From ce2d44f7e8b2158fdbf04e2769a0118fd685278d Mon Sep 17 00:00:00 2001 From: zhangjianwei Date: Mon, 8 Aug 2022 14:57:44 +0800 Subject: [PATCH 1/5] merge patch-phytium-4.19.9 --- .../devicetree/bindings/arm/cpus.txt | 4 + .../devicetree/bindings/gpio/gpio-phytium.txt | 47 + .../phytium,d2000-ixic.txt | 29 + .../bindings/mailbox/phytium-mailbox.txt | 32 + .../bindings/pci/phytium,phytium-pcie-ep.txt | 21 + .../devicetree/bindings/spi/spi-phytium.txt | 24 + .../devicetree/bindings/vendor-prefixes.txt | 1 + arch/arm64/Kconfig | 1 + arch/arm64/Kconfig.platforms | 6 + arch/arm64/boot/dts/Makefile | 1 + arch/arm64/boot/dts/phytium/Makefile | 12 + .../boot/dts/phytium/d2000-devboard-dsk.dts | 73 + .../dts/phytium/d2000-generic-psci-soc.dtsi | 525 + .../phytium/ft1500a-16c-generic-psci-soc.dtsi | 511 + .../dts/phytium/ft1500a-devboard-16c-dsk.dts | 51 + .../dts/phytium/ft2000ahk-devboard-dsk.dts | 52 + .../ft2000ahk-generic-spintable-soc.dtsi | 253 + .../dts/phytium/ft2000ahke-devboard-dsk.dts | 68 + .../phytium/ft2000ahke-generic-psci-soc.dtsi | 312 + .../ft2000plus-MR-devboard-64c-dsk.dts | 136 + .../dts/phytium/ft2000plus-MR-psci-soc.dtsi | 1062 + .../ft2000plus-SR-devboard-64c-dsk.dts | 136 + .../dts/phytium/ft2000plus-SR-psci-soc.dtsi | 986 + .../dts/phytium/ft2004-devboard-d4-dsk.dts | 73 + .../dts/phytium/ft2004-generic-psci-soc.dtsi | 474 + .../boot/dts/phytium/ft2004c-devboard-dsk.dts | 74 + .../dts/phytium/ft2004c-generic-psci-soc.dtsi | 486 + arch/arm64/configs/d2000_defconfig | 6794 +++ arch/arm64/configs/defconfig | 13 +- arch/arm64/include/asm/cpucaps.h | 3 +- arch/arm64/include/asm/cputype.h | 4 + arch/arm64/kernel/cpufeature.c | 9 + arch/arm64/kernel/hibernate.c.rej | 10 + arch/arm64/lib/Makefile | 2 + arch/arm64/lib/crc32.S | 60 + block/bio.c | 2 +- drivers/acpi/acpi_apd.c | 7 + drivers/acpi/internal.h | 12 + drivers/acpi/irq.c | 62 +- drivers/acpi/pci_irq.c | 8 +- drivers/acpi/pci_link.c | 17 +- drivers/firmware/efi/efi.c | 142 +- drivers/firmware/efi/libstub/arm-stub.c | 27 + drivers/gpio/Kconfig | 26 + drivers/gpio/Makefile | 3 + drivers/gpio/gpio-phytium-core.c | 348 + drivers/gpio/gpio-phytium-core.h | 86 + drivers/gpio/gpio-phytium-pci.c | 199 + drivers/gpio/gpio-phytium-platform.c | 217 + drivers/gpu/drm/Kconfig | 2 + drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/phytium/Kconfig | 7 + drivers/gpu/drm/phytium/Makefile | 15 + drivers/gpu/drm/phytium/phytium_crtc.c | 720 + drivers/gpu/drm/phytium/phytium_crtc.h | 38 + drivers/gpu/drm/phytium/phytium_debugfs.c | 400 + drivers/gpu/drm/phytium/phytium_debugfs.h | 13 + drivers/gpu/drm/phytium/phytium_display_drv.c | 461 + drivers/gpu/drm/phytium/phytium_display_drv.h | 151 + drivers/gpu/drm/phytium/phytium_dp.c | 2615 + drivers/gpu/drm/phytium/phytium_dp.h | 153 + drivers/gpu/drm/phytium/phytium_fb.c | 123 + drivers/gpu/drm/phytium/phytium_fb.h | 24 + drivers/gpu/drm/phytium/phytium_fbdev.c | 158 + drivers/gpu/drm/phytium/phytium_fbdev.h | 13 + drivers/gpu/drm/phytium/phytium_gem.c | 281 + drivers/gpu/drm/phytium/phytium_gem.h | 42 + drivers/gpu/drm/phytium/phytium_panel.c | 421 + drivers/gpu/drm/phytium/phytium_panel.h | 46 + drivers/gpu/drm/phytium/phytium_pci.c | 284 + drivers/gpu/drm/phytium/phytium_pci.h | 23 + drivers/gpu/drm/phytium/phytium_plane.c | 627 + drivers/gpu/drm/phytium/phytium_plane.h | 46 + drivers/gpu/drm/phytium/phytium_reg.h | 357 + drivers/gpu/drm/phytium/x100_dc.c | 321 + drivers/gpu/drm/phytium/x100_dc.h | 30 + drivers/gpu/drm/phytium/x100_dp.c | 907 + drivers/gpu/drm/phytium/x100_dp.h | 13 + drivers/gpu/drm/phytium/x100_reg.h | 349 + drivers/i2c/busses/Kconfig | 15 + drivers/i2c/busses/Makefile | 3 + drivers/i2c/busses/i2c-designware-platdrv.c | 1 + drivers/i2c/busses/i2c-phytium-common.c | 152 + drivers/i2c/busses/i2c-phytium-core.h | 233 + drivers/i2c/busses/i2c-phytium-master.c | 498 + drivers/i2c/busses/i2c-phytium-pci.c | 237 + drivers/input/serio/Kconfig | 12 + drivers/input/serio/Makefile | 1 + drivers/input/serio/phytium-ps2.c | 186 + drivers/iommu/arm-smmu.c | 17 + drivers/iommu/iommu.c | 25 + drivers/irqchip/Kconfig | 17 + drivers/irqchip/Makefile | 2 + drivers/irqchip/irq-gic-phytium-2500-its.c | 4158 ++ drivers/irqchip/irq-gic-phytium-2500.c | 1881 + drivers/irqchip/irq-gic-v3-its.c | 429 +- drivers/irqchip/irq-gic-v3.c | 32 +- drivers/irqchip/irq-gic.c | 12 +- drivers/irqchip/irq-phytium-ixic.c | 267 + drivers/mailbox/Kconfig | 6 + drivers/mailbox/Makefile | 2 + drivers/mailbox/phytium_mailbox.c | 200 + drivers/mfd/Kconfig | 13 + drivers/mfd/Makefile | 2 + drivers/mfd/phytium_x100_i2s_lsd.c | 136 + drivers/mfd/phytium_x100_i2s_mmd.c | 190 + drivers/mmc/host/Kconfig | 18 + drivers/mmc/host/Makefile | 2 + drivers/mmc/host/phytium-mci-pci.c | 178 + drivers/mmc/host/phytium-mci.c | 1482 + drivers/mmc/host/phytium-mci.h | 349 + drivers/mmc/host/phytium-sdci.c | 1442 + drivers/mmc/host/phytium-sdci.h | 204 + drivers/mtd/nand/raw/Kconfig | 11 + drivers/mtd/nand/raw/Makefile | 3 + drivers/mtd/nand/raw/phytium_nand.c | 2117 + drivers/mtd/nand/raw/phytium_nand.h | 441 + drivers/mtd/nand/raw/phytium_nand_pci.c | 149 + drivers/mtd/spi-nor/Kconfig | 9 + drivers/mtd/spi-nor/Makefile | 1 + drivers/mtd/spi-nor/phytium-quadspi.c | 1006 + drivers/mtd/spi-nor/spi-nor.c | 1 + drivers/net/can/Kconfig | 24 + drivers/net/can/Makefile | 3 + drivers/net/can/phytium_can.c | 694 + drivers/net/can/phytium_can.h | 156 + drivers/net/can/phytium_can_pci.c | 107 + drivers/net/can/phytium_can_plat.c | 175 + drivers/net/can/usb/ems_usb.c | 1 + .../ethernet/stmicro/stmmac/dwmac-generic.c | 19 + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 1 + .../ethernet/stmicro/stmmac/stmmac_platform.c | 297 +- .../ethernet/stmicro/stmmac/stmmac_platform.h | 2 + drivers/net/hamradio/6pack.c | 10 +- drivers/net/usb/sr9700.c | 2 +- drivers/pci/controller/Kconfig | 10 + drivers/pci/controller/Makefile | 2 + drivers/pci/controller/pcie-phytium-ep.c | 480 + drivers/pci/controller/pcie-phytium-ep.h | 95 + .../pci/controller/pcie-phytium-register.h | 87 + drivers/pci/quirks.c | 1 + drivers/rtc/Kconfig | 10 + drivers/rtc/Makefile | 1 + drivers/rtc/rtc-phytium.c | 331 + drivers/scsi/xen-scsifront.c | 3 +- drivers/spi/Kconfig | 25 + drivers/spi/Makefile | 3 + drivers/spi/spi-phytium-pci.c | 124 + drivers/spi/spi-phytium-plat.c | 206 + drivers/spi/spi-phytium.c | 528 + drivers/spi/spi-phytium.h | 66 + drivers/tee/optee/Kconfig | 30 + drivers/tee/optee/core.c | 155 +- drivers/tty/serial/Kconfig | 11 + drivers/tty/serial/Makefile | 1 + drivers/tty/serial/phytium-uart.c | 922 + drivers/usb/core/hub.c | 5 + drivers/usb/gadget/legacy/inode.c | 10 +- drivers/usb/host/xhci-mem.c | 4 + drivers/usb/host/xhci-pci.c | 7 + drivers/usb/host/xhci.h | 1 + drivers/watchdog/dw_wdt.c | 44 +- drivers/xen/grant-table.c | 14 +- drivers/xen/xenbus/xenbus_client.c | 24 +- fs/file_table.c | 1 - fs/fuse/dev.c | 12 +- fs/fuse/file.c | 6 +- fs/fuse/fuse_i.h | 5 - include/acpi/acpi_drivers.h | 2 +- include/acpi/actbl2.h | 3 +- include/linux/acpi.h | 17 + include/linux/efi.h | 21 +- include/linux/irq.h | 13 + include/linux/irqchip/arm-gic-phytium-2500.h | 621 + include/linux/irqchip/arm-gic-v3.h | 4 +- include/linux/memblock.h | 3 + include/linux/pci_ids.h | 2 + include/net/esp.h | 2 - include/net/sock.h | 3 - include/sound/hdaudio.h | 1 + include/sound/hdmi-codec.h | 17 + include/uapi/linux/serial_core.h | 3 + include/xen/grant_table.h | 12 - kernel/dma/swiotlb.c | 11 +- kernel/irq/debugfs.c | 3 +- kernel/irq/internals.h | 10 +- kernel/irq/irqdesc.c | 6 + kernel/irq/resend.c | 106 +- kernel/locking/qrwlock.c | 7 +- lib/crc32.c | 11 +- mm/memblock.c | 47 +- mm/sparse.c | 4 +- net/core/sock.c | 3 +- net/ipv4/esp4.c | 5 - net/ipv6/esp6.c | 5 - net/sunrpc/xprt.c | 5 +- net/sunrpc/xprtsock.c | 12 +- patch/4.19.9-changelog | 336 + patch/patch-phytium-4.19.8_4.19.9 | 9753 ++++ patch/patch-phytium-4.19.9 | 45611 ++++++++++++++++ sound/hda/hdac_controller.c | 38 + sound/hda/hdac_stream.c | 8 + sound/pci/hda/Kconfig | 15 + sound/pci/hda/Makefile | 2 + sound/pci/hda/hda_controller.c | 6 + sound/pci/hda/hda_phytium.c | 1218 + sound/pci/hda/hda_phytium.h | 51 + sound/soc/Kconfig | 1 + sound/soc/Makefile | 1 + sound/soc/codecs/Kconfig | 9 + sound/soc/codecs/Makefile | 4 + sound/soc/codecs/es8336.c | 1093 + sound/soc/codecs/es8336.h | 161 + sound/soc/codecs/es8388.c | 819 + sound/soc/codecs/es8388.h | 290 + sound/soc/codecs/hdmi-codec.c | 42 + sound/soc/phytium/Kconfig | 31 + sound/soc/phytium/Makefile | 13 + sound/soc/phytium/local.h | 326 + sound/soc/phytium/phytium_i2s.c | 1345 + sound/soc/phytium/pmdk_dp.c | 227 + sound/soc/phytium/pmdk_es8336.c | 100 + sound/soc/phytium/pmdk_es8388.c | 174 + virt/kvm/arm/vgic/vgic-mmio-v3.c | 2 +- 224 files changed, 105237 insertions(+), 395 deletions(-) create mode 100644 Documentation/devicetree/bindings/gpio/gpio-phytium.txt create mode 100644 Documentation/devicetree/bindings/interrupt-controller/phytium,d2000-ixic.txt create mode 100644 Documentation/devicetree/bindings/mailbox/phytium-mailbox.txt create mode 100644 Documentation/devicetree/bindings/pci/phytium,phytium-pcie-ep.txt create mode 100644 Documentation/devicetree/bindings/spi/spi-phytium.txt create mode 100644 arch/arm64/boot/dts/phytium/Makefile create mode 100644 arch/arm64/boot/dts/phytium/d2000-devboard-dsk.dts create mode 100644 arch/arm64/boot/dts/phytium/d2000-generic-psci-soc.dtsi create mode 100644 arch/arm64/boot/dts/phytium/ft1500a-16c-generic-psci-soc.dtsi create mode 100644 arch/arm64/boot/dts/phytium/ft1500a-devboard-16c-dsk.dts create mode 100644 arch/arm64/boot/dts/phytium/ft2000ahk-devboard-dsk.dts create mode 100644 arch/arm64/boot/dts/phytium/ft2000ahk-generic-spintable-soc.dtsi create mode 100644 arch/arm64/boot/dts/phytium/ft2000ahke-devboard-dsk.dts create mode 100644 arch/arm64/boot/dts/phytium/ft2000ahke-generic-psci-soc.dtsi create mode 100644 arch/arm64/boot/dts/phytium/ft2000plus-MR-devboard-64c-dsk.dts create mode 100644 arch/arm64/boot/dts/phytium/ft2000plus-MR-psci-soc.dtsi create mode 100644 arch/arm64/boot/dts/phytium/ft2000plus-SR-devboard-64c-dsk.dts create mode 100644 arch/arm64/boot/dts/phytium/ft2000plus-SR-psci-soc.dtsi create mode 100644 arch/arm64/boot/dts/phytium/ft2004-devboard-d4-dsk.dts create mode 100644 arch/arm64/boot/dts/phytium/ft2004-generic-psci-soc.dtsi create mode 100644 arch/arm64/boot/dts/phytium/ft2004c-devboard-dsk.dts create mode 100644 arch/arm64/boot/dts/phytium/ft2004c-generic-psci-soc.dtsi create mode 100644 arch/arm64/configs/d2000_defconfig create mode 100644 arch/arm64/kernel/hibernate.c.rej create mode 100644 arch/arm64/lib/crc32.S create mode 100644 drivers/gpio/gpio-phytium-core.c create mode 100644 drivers/gpio/gpio-phytium-core.h create mode 100644 drivers/gpio/gpio-phytium-pci.c create mode 100644 drivers/gpio/gpio-phytium-platform.c create mode 100644 drivers/gpu/drm/phytium/Kconfig create mode 100644 drivers/gpu/drm/phytium/Makefile create mode 100644 drivers/gpu/drm/phytium/phytium_crtc.c create mode 100644 drivers/gpu/drm/phytium/phytium_crtc.h create mode 100644 drivers/gpu/drm/phytium/phytium_debugfs.c create mode 100644 drivers/gpu/drm/phytium/phytium_debugfs.h create mode 100644 drivers/gpu/drm/phytium/phytium_display_drv.c create mode 100644 drivers/gpu/drm/phytium/phytium_display_drv.h create mode 100644 drivers/gpu/drm/phytium/phytium_dp.c create mode 100644 drivers/gpu/drm/phytium/phytium_dp.h create mode 100644 drivers/gpu/drm/phytium/phytium_fb.c create mode 100644 drivers/gpu/drm/phytium/phytium_fb.h create mode 100644 drivers/gpu/drm/phytium/phytium_fbdev.c create mode 100644 drivers/gpu/drm/phytium/phytium_fbdev.h create mode 100644 drivers/gpu/drm/phytium/phytium_gem.c create mode 100644 drivers/gpu/drm/phytium/phytium_gem.h create mode 100644 drivers/gpu/drm/phytium/phytium_panel.c create mode 100644 drivers/gpu/drm/phytium/phytium_panel.h create mode 100644 drivers/gpu/drm/phytium/phytium_pci.c create mode 100644 drivers/gpu/drm/phytium/phytium_pci.h create mode 100644 drivers/gpu/drm/phytium/phytium_plane.c create mode 100644 drivers/gpu/drm/phytium/phytium_plane.h create mode 100644 drivers/gpu/drm/phytium/phytium_reg.h create mode 100644 drivers/gpu/drm/phytium/x100_dc.c create mode 100644 drivers/gpu/drm/phytium/x100_dc.h create mode 100644 drivers/gpu/drm/phytium/x100_dp.c create mode 100644 drivers/gpu/drm/phytium/x100_dp.h create mode 100644 drivers/gpu/drm/phytium/x100_reg.h create mode 100644 drivers/i2c/busses/i2c-phytium-common.c create mode 100644 drivers/i2c/busses/i2c-phytium-core.h create mode 100644 drivers/i2c/busses/i2c-phytium-master.c create mode 100644 drivers/i2c/busses/i2c-phytium-pci.c create mode 100644 drivers/input/serio/phytium-ps2.c create mode 100644 drivers/irqchip/irq-gic-phytium-2500-its.c create mode 100644 drivers/irqchip/irq-gic-phytium-2500.c create mode 100644 drivers/irqchip/irq-phytium-ixic.c create mode 100644 drivers/mailbox/phytium_mailbox.c create mode 100644 drivers/mfd/phytium_x100_i2s_lsd.c create mode 100644 drivers/mfd/phytium_x100_i2s_mmd.c create mode 100644 drivers/mmc/host/phytium-mci-pci.c create mode 100644 drivers/mmc/host/phytium-mci.c create mode 100644 drivers/mmc/host/phytium-mci.h create mode 100644 drivers/mmc/host/phytium-sdci.c create mode 100644 drivers/mmc/host/phytium-sdci.h create mode 100644 drivers/mtd/nand/raw/phytium_nand.c create mode 100644 drivers/mtd/nand/raw/phytium_nand.h create mode 100644 drivers/mtd/nand/raw/phytium_nand_pci.c create mode 100644 drivers/mtd/spi-nor/phytium-quadspi.c create mode 100644 drivers/net/can/phytium_can.c create mode 100644 drivers/net/can/phytium_can.h create mode 100644 drivers/net/can/phytium_can_pci.c create mode 100644 drivers/net/can/phytium_can_plat.c create mode 100644 drivers/pci/controller/pcie-phytium-ep.c create mode 100644 drivers/pci/controller/pcie-phytium-ep.h create mode 100644 drivers/pci/controller/pcie-phytium-register.h create mode 100644 drivers/rtc/rtc-phytium.c create mode 100644 drivers/spi/spi-phytium-pci.c create mode 100644 drivers/spi/spi-phytium-plat.c create mode 100644 drivers/spi/spi-phytium.c create mode 100644 drivers/spi/spi-phytium.h create mode 100644 drivers/tty/serial/phytium-uart.c create mode 100644 include/linux/irqchip/arm-gic-phytium-2500.h create mode 100755 patch/4.19.9-changelog create mode 100755 patch/patch-phytium-4.19.8_4.19.9 create mode 100755 patch/patch-phytium-4.19.9 create mode 100644 sound/pci/hda/hda_phytium.c create mode 100644 sound/pci/hda/hda_phytium.h create mode 100644 sound/soc/codecs/es8336.c create mode 100644 sound/soc/codecs/es8336.h create mode 100644 sound/soc/codecs/es8388.c create mode 100644 sound/soc/codecs/es8388.h create mode 100644 sound/soc/phytium/Kconfig create mode 100644 sound/soc/phytium/Makefile create mode 100644 sound/soc/phytium/local.h create mode 100644 sound/soc/phytium/phytium_i2s.c create mode 100644 sound/soc/phytium/pmdk_dp.c create mode 100644 sound/soc/phytium/pmdk_es8336.c create mode 100644 sound/soc/phytium/pmdk_es8388.c diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt index 96dfccc0faa8..046a47e49a94 100644 --- a/Documentation/devicetree/bindings/arm/cpus.txt +++ b/Documentation/devicetree/bindings/arm/cpus.txt @@ -184,6 +184,10 @@ described below. "nvidia,tegra132-denver" "nvidia,tegra186-denver" "nvidia,tegra194-carmel" + "phytium,ftc660" + "phytium,ftc661" + "phytium,ftc662" + "phytium,ftc663" "qcom,krait" "qcom,kryo" "qcom,kryo385" diff --git a/Documentation/devicetree/bindings/gpio/gpio-phytium.txt b/Documentation/devicetree/bindings/gpio/gpio-phytium.txt new file mode 100644 index 000000000000..77d4c6c03d00 --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/gpio-phytium.txt @@ -0,0 +1,47 @@ +* Phytium GPIO controller + +Required properties: +- compatible : Should contain "phytium,gpio" +- reg : Address and length of the register set for the device. +- interrupts: Interrupt mapping for GPIO IRQ. +- gpio-controller : Marks the device node as a gpio controller. +- #gpio-cells : Should be 2. The first cell is the pin number and + the second cell is used to specify the gpio polarity: + 0 = active high + 1 = active low +- #address-cells : should be 1 (for addressing port subnodes). +- #size-cells : should be 0 (port subnodes). + +The GPIO controller has two ports, each of which are represented as child +nodes with the following properties: + +Required properties: +- compatible : "phytium,gpio-port" +- reg : The integer port index of the port, a single cell. + +Optional properties: +- nr-gpios : The number of pins in the port, a single cell. + +Example: + +gpio: gpio@28004000 { + compatible = "phytium,gpio"; + reg = <0x0 0x28004000 0x0 0x1000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + + porta { + compatible = "phytium,gpio-port"; + reg = <0>; + nr-gpios = <8>; + }; + + portb { + compatible = "phytium,gpio-port"; + reg = <1>; + nr-gpios = <8>; + }; +}; diff --git a/Documentation/devicetree/bindings/interrupt-controller/phytium,d2000-ixic.txt b/Documentation/devicetree/bindings/interrupt-controller/phytium,d2000-ixic.txt new file mode 100644 index 000000000000..02fd7ce3b5b9 --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/phytium,d2000-ixic.txt @@ -0,0 +1,29 @@ +Phytium INTx interrupt controller (IXIC) + +This is a psuedo interrupt controller to handle PCI legacy interrupt on +Phytium D2000 and FT-2000/4C SoC, which sits between the PCI INTx devices +and the GIC and forwards the 4 INTx input signals to 4 adjacent GICv3 SPIs. + +Required properties: + +- compatible : "phytium,d2000-ixic" + "phytium,ft2004c-ixic" +- reg : Specifies two regions of the register set, which + are called 'ctr' and 'hpb'. +- interrupt-controller : Identifies the node as an interrupt controller. +- #interrupt-cells : Specifies the number of cells needed to encode an + interrupt source. The value must be 3. +- intx-spi-base : The SPI number of the first SPI of the 4 adjacent + ones the IXIC forwards its interrupts to. + +Example: + ixic: interrupt-controller@29000000 { + compatible = "phytium,d2000-ixic"; + reg-names = "ctr", "hpb"; + reg = <0x0 0x29000000 0x0 0x00060000>, + <0x0 0x29100000 0x0 0x00002000>; + interrupt-controller; + interrupt-parent = <&gic>; + #interrupt-cells = <3>; + intx-spi-base = <28>; + }; diff --git a/Documentation/devicetree/bindings/mailbox/phytium-mailbox.txt b/Documentation/devicetree/bindings/mailbox/phytium-mailbox.txt new file mode 100644 index 000000000000..4d6f5a44f6e4 --- /dev/null +++ b/Documentation/devicetree/bindings/mailbox/phytium-mailbox.txt @@ -0,0 +1,32 @@ +Phytium Mailbox Driver +====================== + +The Phytium mailbox controller that has a channel/link to communicate +with the remote end. A link raises interrupt for any received data. However, +there is no specified way of knowing if the sent data has been read by the +remote. This driver assumes the sender polls STAT register and the remote +clears it after having read the data. + +Mailbox Device Node: +==================== + +Required properties: +-------------------- +- compatible: Shall be "phytium,mbox" +- reg: Contains the mailbox register address range (base + address and length) +- #mbox-cells Shall be 1 - the index of the channel needed. +- interrupts: Contains the interrupt information corresponding to + the link. + +Example: +-------- + +mbox: mailbox@2a000000 { + compatible = "phytium,mbox"; + reg = <0x0 0x2a000000 0x0 0x1000>; + #mbox-cells = <1>; + interrupts = <0 48 4>; + clocks = <&sycclk>; + clock-names = "apb_pclk"; +}; diff --git a/Documentation/devicetree/bindings/pci/phytium,phytium-pcie-ep.txt b/Documentation/devicetree/bindings/pci/phytium,phytium-pcie-ep.txt new file mode 100644 index 000000000000..2e40d1e6ee98 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/phytium,phytium-pcie-ep.txt @@ -0,0 +1,21 @@ +* Phytium PCIe endpoint controller + +Required properties: +- compatible: Should contain "phytium,phytium-pcie-ep" to identify the IP used. +- reg: Should contain the controller register base address, AXI interface + region base address and hpb register base address respectively. +- reg-names: Must be "reg", "mem" and "hpb" respectively. +- max-outbound-regions: Set to maximum number of outbound regions. +- max-functions: Maximum number of functions that can be configured (default 1). + +Example: + +ep0: ep@0x29030000 { + compatible = "phytium,d2000-pcie-ep"; + reg = <0x0 0x29030000 0x0 0x10000>, + <0x11 0x00000000 0x1 0x00000000>, + <0x0 0x29101000 0x0 0x1000>; + reg-names = "reg", "mem", "hpb"; + max-outbound-regions = <3>; + max-functions = /bits/ 8 <1>; +}; diff --git a/Documentation/devicetree/bindings/spi/spi-phytium.txt b/Documentation/devicetree/bindings/spi/spi-phytium.txt new file mode 100644 index 000000000000..a674d192132c --- /dev/null +++ b/Documentation/devicetree/bindings/spi/spi-phytium.txt @@ -0,0 +1,24 @@ +Phytium SPI controller + +Required properties: +- compatible: should be "phytium,spi" +- #address-cells: see spi-bus.txt +- #size-cells: see spi-bus.txt +- reg: address and length of the spi master registers +- interrupts: should contain one interrupt +- clocks: spi clock phandle +- num-cs: see spi-bus.txt + +Optional properties: +- cs-gpios: see spi-bus.txt + +Example: + + +spi0: spi@2800c000 { + compatible = "phytium,spi"; + interrupts = ; + reg = <0x0 0x2800c000 0x0 0x1000>; + clocks = <&sysclk_48mhz>; + num-cs = <4>; +}; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 2c3fc512e746..807273c439bc 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -292,6 +292,7 @@ parade Parade Technologies Inc. pericom Pericom Technology Inc. pervasive Pervasive Displays, Inc. phytec PHYTEC Messtechnik GmbH +phytium Phytium Information Technology Co., Ltd. picochip Picochip Ltd pine64 Pine64 pixcir PIXCIR MICROELECTRONICS Co., Ltd diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1fe3e5cb2927..6b3bdb2000d7 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -75,6 +75,7 @@ config ARM64 select CLONE_BACKWARDS select COMMON_CLK select CPU_PM if (SUSPEND || CPU_IDLE) + select CRC32 select DCACHE_WORD_ACCESS select DMA_DIRECT_OPS select EDAC_SUPPORT diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 91c7ffad8541..7283daa25ab1 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -140,6 +140,12 @@ config ARCH_MVEBU - Armada 7K SoC Family - Armada 8K SoC Family +config ARCH_PHYTIUM + bool "Phytium SoC Family" + help + This enables support for Phytium ARMv8 SoC family. + select ARM_GIC_PHYTIUM_2500 + config ARCH_QCOM bool "Qualcomm Platforms" select GPIOLIB diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile index 4690364d584b..ff8820d78db5 100644 --- a/arch/arm64/boot/dts/Makefile +++ b/arch/arm64/boot/dts/Makefile @@ -16,6 +16,7 @@ subdir-y += lg subdir-y += marvell subdir-y += mediatek subdir-y += nvidia +subdir-y += phytium subdir-y += qcom subdir-y += realtek subdir-y += renesas diff --git a/arch/arm64/boot/dts/phytium/Makefile b/arch/arm64/boot/dts/phytium/Makefile new file mode 100644 index 000000000000..8a37a6a01ec2 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/Makefile @@ -0,0 +1,12 @@ +dtb-$(CONFIG_ARCH_PHYTIUM) += ft2004-devboard-d4-dsk.dtb +dtb-$(CONFIG_ARCH_PHYTIUM) += ft2004c-devboard-dsk.dtb +dtb-$(CONFIG_ARCH_PHYTIUM) += ft1500a-devboard-16c-dsk.dtb +dtb-$(CONFIG_ARCH_PHYTIUM) += ft2000plus-SR-devboard-64c-dsk.dtb +dtb-$(CONFIG_ARCH_PHYTIUM) += ft2000plus-MR-devboard-64c-dsk.dtb +dtb-$(CONFIG_ARCH_PHYTIUM) += ft2000ahk-devboard-dsk.dtb +dtb-$(CONFIG_ARCH_PHYTIUM) += d2000-devboard-dsk.dtb +dtb-$(CONFIG_ARCH_PHYTIUM) += ft2000ahke-devboard-dsk.dtb + +always := $(dtb-y) +subdir-y := $(dts-dirs) +clean-files := *.dtb diff --git a/arch/arm64/boot/dts/phytium/d2000-devboard-dsk.dts b/arch/arm64/boot/dts/phytium/d2000-devboard-dsk.dts new file mode 100644 index 000000000000..5519213c53e3 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/d2000-devboard-dsk.dts @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DTS file for Phytium D2000 devboard + * + * Copyright (C) 2020, Phytium Technology Co., Ltd. + */ + +/dts-v1/; +/memreserve/ 0x80000000 0x10000; + +#include "d2000-generic-psci-soc.dtsi" + +/{ + model = "D2000 Development Board"; + compatible = "phytium,d2000"; + #address-cells = <2>; + #size-cells = <2>; + + chosen { + stdout-path = "uart1:115200n8"; + }; + + memory@00{ + device_type = "memory"; + reg = <0x0 0x80000000 0x1 0x00000000>; + }; + + memory@01{ + device_type = "memory"; + reg = <0x20 0x00000000 0x1 0x00000000>; + }; + + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; +}; + +&rtc0 { + status = "ok"; +}; + +&uart1 { + status = "ok"; +}; + +&gmac0 { + status = "ok"; + phy-mode = "rgmii-txid"; +}; + +&gmac1 { + status = "ok"; + phy-mode = "rgmii-txid"; +}; + +&spi0 { + status = "ok"; +}; + +&qspi { + status = "ok"; +}; + +&i2c0 { + status = "ok"; +}; + +&i2c1 { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/phytium/d2000-generic-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/d2000-generic-psci-soc.dtsi new file mode 100644 index 000000000000..c7be57a2c2aa --- /dev/null +++ b/arch/arm64/boot/dts/phytium/d2000-generic-psci-soc.dtsi @@ -0,0 +1,525 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * dts file for Phytium D2000 SoC + * + * Copyright (C) 2020, Phytium Technology Co., Ltd. + */ + +#include + +/ { + compatible = "phytium,d2000"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + aliases { + ethernet0 = &gmac0; + ethernet1 = &gmac1; + }; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + cpu_suspend = <0xc4000001>; + cpu_off = <0x84000002>; + cpu_on = <0xc4000003>; + sys_poweroff = <0x84000008>; + sys_reset = <0x84000009>; + }; + + cpus { + #address-cells = <0x2>; + #size-cells = <0x0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + numa-node-id = <0>; + clocks = <&scpi_dvfs 0>; + }; + + cpu1: cpu@1 { + device_type = "cpu"; + compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "psci"; + numa-node-id = <0>; + clocks = <&scpi_dvfs 0>; + }; + + cpu2: cpu@100 { + device_type = "cpu"; + compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + numa-node-id = <0>; + clocks = <&scpi_dvfs 1>; + }; + + cpu3: cpu@101 { + device_type = "cpu"; + compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x101>; + enable-method = "psci"; + numa-node-id = <0>; + clocks = <&scpi_dvfs 1>; + }; + + cpu4: cpu@200 { + device_type = "cpu"; + compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x200>; + enable-method = "psci"; + numa-node-id = <0>; + clocks = <&scpi_dvfs 2>; + }; + + cpu5: cpu@201 { + device_type = "cpu"; + compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x201>; + enable-method = "psci"; + numa-node-id = <0>; + clocks = <&scpi_dvfs 2>; + }; + + cpu6: cpu@300 { + device_type = "cpu"; + compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x300>; + enable-method = "psci"; + numa-node-id = <0>; + clocks = <&scpi_dvfs 3>; + }; + + cpu7: cpu@301 { + device_type = "cpu"; + compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x301>; + enable-method = "psci"; + numa-node-id = <0>; + clocks = <&scpi_dvfs 3>; + }; + }; + + gic: interrupt-controller@29900000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <3>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + interrupt-controller; + reg = <0x0 0x29a00000 0 0x20000>, /* GICD */ + <0x0 0x29b00000 0 0x100000>, /* GICR */ + <0x0 0x29c00000 0 0x10000>, /* GICC */ + <0x0 0x29c10000 0 0x10000>, /* GICH */ + <0x0 0x29c20000 0 0x10000>; /* GICV */ + interrupts = ; + + its: gic-its@29920000 { + compatible = "arm,gic-v3-its"; + msi-controller; + reg = <0x0 0x29a20000 0x0 0x20000>; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + clock-frequency = <48000000>; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = ; + }; + + clocks { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + clk250mhz: clk250mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <250000000>; + }; + + sysclk_48mhz: clk48mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <48000000>; + }; + + sysclk_600mhz: clk600mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <600000000>; + }; + }; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + dma-coherent; + ranges; + + gpio0: gpio@28004000 { + compatible = "phytium,gpio"; + reg = <0x0 0x28004000 0x0 0x1000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + + porta { + compatible = "phytium,gpio-port"; + reg = <0>; + nr-gpios = <8>; + }; + + portb { + compatible = "phytium,gpio-port"; + reg = <1>; + nr-gpios = <8>; + }; + }; + + gpio1: gpio@28005000 { + compatible = "phytium,gpio"; + reg = <0x0 0x28005000 0x0 0x1000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + + porta { + compatible = "phytium,gpio-port"; + reg = <0>; + nr-gpios = <8>; + }; + + portb { + compatible = "phytium,gpio-port"; + reg = <1>; + nr-gpios = <8>; + }; + }; + + uart0: uart@28000000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28000000 0x0 0x1000>; + baud = <115200>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = ; + clocks = <&sysclk_48mhz &sysclk_48mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + uart1: uart@28001000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28001000 0x0 0x1000>; + baud = <115200>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = ; + clocks = <&sysclk_48mhz &sysclk_48mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + uart2: uart@28002000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28002000 0x0 0x1000>; + baud = <115200>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = ; + clocks = <&sysclk_48mhz &sysclk_48mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + uart3: uart@28003000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28003000 0x0 0x1000>; + baud = <115200>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = ; + clocks = <&sysclk_48mhz &sysclk_48mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + sdci: sdci@28207c00 { + compatible = "phytium,sdci"; + reg = <0x0 0x28207c00 0x0 0x100>; + interrupts = , + , + ; + clocks = <&sysclk_600mhz &sysclk_600mhz>; + clock-names = "phytium_sdc_clk"; + no-sdio; + no-mmc; + no-dma-coherent; + }; + + watchdog0: watchdog@2800a000 { + compatible = "arm,sbsa-gwdt"; + reg = <0x0 0x2800b000 0x0 0x1000>, + <0x0 0x2800a000 0x0 0x1000>; + interrupts = ; + timeout-sec = <30>; + }; + + watchdog1: watchdog@28016000 { + compatible = "arm,sbsa-gwdt"; + reg = <0x0 0x28017000 0x0 0x1000>, + <0x0 0x28016000 0x0 0x1000>; + interrupts = ; + timeout-sec = <30>; + }; + + rtc0: rtc@2800d000 { + compatible = "phytium,rtc"; + reg = <0x0 0x2800d000 0x0 0x1000>; + clocks = <&sysclk_48mhz>; + clock-names = "rtc_pclk"; + interrupts = ; + status = "disabled"; + }; + + i2c0: i2c@28006000 { + compatible = "snps,designware-i2c"; + reg = <0x0 0x28006000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + status = "disabled"; + }; + + i2c1: i2c@28007000 { + compatible = "snps,designware-i2c"; + reg = <0x0 0x28007000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + status = "disabled"; + }; + + i2c2: i2c@28008000 { + compatible = "snps,designware-i2c"; + reg = <0x0 0x28008000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + status = "disabled"; + }; + + i2c3: i2c@28009000 { + compatible = "snps,designware-i2c"; + reg = <0x0 0x28009000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + status = "disabled"; + }; + + spi0: spi@2800c000 { + compatible = "phytium,spi"; + interrupts = ; + reg = <0x0 0x2800c000 0x0 0x1000>; + clocks = <&sysclk_48mhz>; + num-cs = <4>; + }; + + spi1: spi@28013000 { + compatible = "phytium,spi"; + interrupts = ; + reg = <0x0 0x28013000 0x0 0x1000>; + clocks = <&sysclk_48mhz>; + num-cs = <4>; + }; + + qspi: qspi@28014000 { + compatible = "phytium,qspi"; + reg = <0x0 0x28014000 0x0 0x1000>, + <0x0 0x0 0x0 0x02000000>; + reg-names = "qspi", "qspi_mm"; + clocks = <&sysclk_600mhz>; + + flash@0 { + spi-rx-bus-width = <1>; + spi-max-frequency = <600000000>; + }; + }; + + phytium_axi_setup: stmmac-axi-config { + snps,wr_osr_lmt = <0>; + snps,rd_osr_lmt = <0>; + snps,blen = <0 0 0 0 16 8 4>; + }; + + gmac0: eth@2820c000 { + compatible = "snps,dwmac"; + reg = <0x0 0x2820c000 0x0 0x2000>; + interrupts = ; + interrupt-names = "macirq"; + clocks = <&clk250mhz>; + clock-names = "stmmaceth"; + status = "disabled"; + + snps,pbl = <16>; + snps,fixed-burst; + snps,axi-config = <&phytium_axi_setup>; + snps,force_sf_dma_mode; + snps,multicast-filter-bins = <64>; + snps,perfect-filter-entries = <128>; + tx-fifo-depth = <4096>; + rx-fifo-depth = <4096>; + max-frame-size = <9000>; + }; + + gmac1: eth@28210000 { + compatible = "snps,dwmac"; + reg = <0x0 0x28210000 0x0 0x2000>; + interrupts = ; + interrupt-names = "macirq"; + clocks = <&clk250mhz>; + clock-names = "stmmaceth"; + status = "disabled"; + + snps,pbl = <16>; + snps,fixed-burst; + snps,axi-config = <&phytium_axi_setup>; + snps,force_sf_dma_mode; + snps,multicast-filter-bins = <64>; + snps,perfect-filter-entries = <128>; + snps,rx-queues-to-use = <2>; + tx-fifo-depth = <4096>; + rx-fifo-depth = <4096>; + max-frame-size = <9000>; + }; + + can0: can@28207000 { + compatible = "phytium,can"; + reg = <0x0 0x28207000 0x0 0x400>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "phytium_can_clk"; + tx-fifo-depth = <0x40>; + rx-fifo-depth = <0x40>; + extend_brp; + }; + + can1: can@28207400 { + compatible = "phytium,can"; + reg = <0x0 0x28207400 0x0 0x400>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "phytium_can_clk"; + tx-fifo-depth = <0x40>; + rx-fifo-depth = <0x40>; + extend_brp; + }; + + can2: can@028207800 { + compatible = "phytium,can"; + reg = <0x0 0x28207800 0x0 0x400>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "phytium_can_clk"; + tx-fifo-depth = <0x40>; + rx-fifo-depth = <0x40>; + extend_brp; + }; + + hda: hda@28206000 { + compatible = "phytium,hda"; + reg = <0 0x28206000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + clock-names = "phytium_hda_clk"; + }; + + mbox: mailbox@2a000000 { + compatible = "phytium,mbox"; + reg = <0x0 0x2a000000 0x0 0x1000>; + interrupts = ; + #mbox-cells = <1>; + clocks = <&sysclk_48mhz>; + clock-names = "apb_pclk"; + }; + + sram: sram@2a006000 { + compatible = "phytium,ft2004-sram-ns","mmio-sram"; + reg = <0x0 0x2a006000 0x0 0x2000>; + + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x0 0x2a006000 0x2000>; + + scpi_lpri: scpi-shmem@0 { + compatible = "phytium,ft2004-scpi-shmem"; + reg = <0x1000 0x800>; + }; + }; + + scpi_protocol: scpi { + compatible = "arm,scpi"; + mboxes = <&mbox 0>; + shmem = <&scpi_lpri>; + + clocks { + compatible = "arm,scpi-clocks"; + + scpi_dvfs: scpi_clocks@0 { + compatible = "arm,scpi-dvfs-clocks"; + #clock-cells = <1>; + clock-indices = <0>, <1>, <2>, <3>; + clock-output-names = "c0", "c1", "c2", "c3"; + }; + }; + + scpi_sensors: sensors { + compatible = "arm,scpi-sensors"; + #thermal-sensor-cells = <1>; + }; + }; + + ixic: interrupt-controller@29000000 { + compatible = "phytium,d2000-ixic"; + reg-names = "ctr", "hpb"; + reg = <0x0 0x29000000 0x0 0x00060000>, + <0x0 0x29100000 0x0 0x00002000>; + interrupt-controller; + interrupt-parent = <&gic>; + #interrupt-cells = <3>; + intx-spi-base = <28>; + }; + + pcie: pcie { + compatible = "pci-host-ecam-generic"; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + reg = <0x0 0x40000000 0x0 0x10000000>; + msi-parent = <&its>; + bus-range = <0x0 0xff>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &ixic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x2 &ixic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x3 &ixic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x4 &ixic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>; + ranges = <0x01000000 0x00 0x00000000 0x0 0x50000000 0x0 0x00f00000>, + <0x02000000 0x00 0x58000000 0x0 0x58000000 0x0 0x28000000>, + <0x03000000 0x10 0x00000000 0x10 0x00000000 0x10 0x00000000>; + }; + }; + +}; diff --git a/arch/arm64/boot/dts/phytium/ft1500a-16c-generic-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft1500a-16c-generic-psci-soc.dtsi new file mode 100644 index 000000000000..a0c73fa3fbe8 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/ft1500a-16c-generic-psci-soc.dtsi @@ -0,0 +1,511 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * dts file for FT-1500A SoC + * + * Copyright (C) 2019, Phytium Technology Co., Ltd. + */ + +#include + +/ { + compatible = "phytium,ft1500a"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + aliases { + ethernet0 = &gmac0; + ethernet1 = &gmac1; + }; + + psci { + compatible = "arm,psci-1.0", "arm,psci-0.2", "arm,psci"; + method = "smc"; + cpu_suspend = <0xc4000001>; + cpu_off = <0x84000002>; + cpu_on = <0xc4000003>; + }; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + cpu-map { + cluster0 { + core0 { + cpu = <&cpu0>; + }; + core1 { + cpu = <&cpu1>; + }; + core2 { + cpu = <&cpu2>; + }; + core3 { + cpu = <&cpu3>; + }; + }; + + cluster1 { + core0 { + cpu = <&cpu4>; + }; + core1 { + cpu = <&cpu5>; + }; + core2 { + cpu = <&cpu6>; + }; + core3 { + cpu = <&cpu7>; + }; + }; + + cluster2 { + core0 { + cpu = <&cpu8>; + }; + core1 { + cpu = <&cpu9>; + }; + core2 { + cpu = <&cpu10>; + }; + core3 { + cpu = <&cpu11>; + }; + }; + + cluster3 { + core0 { + cpu = <&cpu12>; + }; + core1 { + cpu = <&cpu13>; + }; + core2 { + cpu = <&cpu14>; + }; + core3 { + cpu = <&cpu15>; + }; + }; + }; + + idle-states { + entry-method = "arm,psci"; + + CPU_SLEEP: cpu-sleep { + compatible = "arm,idle-state"; + local-timer-stop; + arm,psci-suspend-param = <0x0010000>; + entry-latency-us = <100>; + exit-latency-us = <100>; + min-residency-us = <200>; + }; + }; + + cpu0:cpu@0 { + device_type = "cpu"; + compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x000>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; + clocks = <&cpuclk 0>; + clock-latency = <10000>; + cooling-min-level = <0>; /* cooling options */ + cooling-max-level = <5>; + #cooling-cells = <2>; /* min followed by max */ + }; + + cpu1:cpu@1 { + device_type = "cpu"; + compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x001>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; + clocks = <&cpuclk 0>; + clock-latency = <10000>; + }; + + cpu2:cpu@2 { + device_type = "cpu"; + compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x002>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; + clocks = <&cpuclk 0>; + clock-latency = <10000>; + }; + + cpu3:cpu@3 { + device_type = "cpu"; + compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x003>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; + clocks = <&cpuclk 0>; + clock-latency = <10000>; + }; + + cpu4:cpu@100 { + device_type = "cpu"; + compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; + clocks = <&cpuclk 1>; + clock-latency = <10000>; + cooling-min-level = <0>; /* cooling options */ + cooling-max-level = <5>; + #cooling-cells = <2>; /* min followed by max */ + }; + + cpu5:cpu@101 { + device_type = "cpu"; + compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x101>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; + clocks = <&cpuclk 1>; + clock-latency = <10000>; + }; + + cpu6:cpu@102 { + device_type = "cpu"; + compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x102>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; + clocks = <&cpuclk 1>; + clock-latency = <10000>; + }; + + cpu7:cpu@103 { + device_type = "cpu"; + compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x103>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; + clocks = <&cpuclk 1>; + clock-latency = <10000>; + }; + + cpu8:cpu@200 { + device_type = "cpu"; + compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x200>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; + clocks = <&cpuclk 2>; + clock-latency = <10000>; + cooling-min-level = <0>; /* cooling options */ + cooling-max-level = <5>; + #cooling-cells = <2>; /* min followed by max */ + }; + + cpu9:cpu@201 { + device_type = "cpu"; + compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x201>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; + clocks = <&cpuclk 2>; + clock-latency = <10000>; + }; + + cpu10:cpu@202 { + device_type = "cpu"; + compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x202>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; + clocks = <&cpuclk 2>; + clock-latency = <10000>; + }; + + cpu11:cpu@203 { + device_type = "cpu"; + compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x203>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; + clocks = <&cpuclk 2>; + clock-latency = <10000>; + }; + + cpu12:cpu@300 { + device_type = "cpu"; + compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x300>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; + clocks = <&cpuclk 3>; + clock-latency = <10000>; + cooling-min-level = <0>; /* cooling options */ + cooling-max-level = <5>; + #cooling-cells = <2>; /* min followed by max */ + }; + + cpu13:cpu@301 { + device_type = "cpu"; + compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x301>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; + clocks = <&cpuclk 3>; + clock-latency = <10000>; + }; + + cpu14:cpu@302 { + device_type = "cpu"; + compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x302>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; + clocks = <&cpuclk 3>; + clock-latency = <10000>; + }; + + cpu15:cpu@303 { + device_type = "cpu"; + compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x303>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; + clocks = <&cpuclk 3>; + clock-latency = <10000>; + }; + }; + + gic: interrupt-controller@29800000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <3>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + interrupt-controller; + reg = <0x0 0x29800000 0 0x10000>, /* GICD */ + <0x0 0x29a00000 0 0x200000>, /* GICR */ + <0x0 0x29c00000 0 0x10000>, /* GICC */ + <0x0 0x29c10000 0 0x10000>, /* GICH */ + <0x0 0x29c20000 0 0x10000>; /* GICV */ + interrupts = ; + + its: gic-its@29820000 { + compatible = "arm,gic-v3-its"; + msi-controller; + reg = <0x0 0x29820000 0x0 0x20000>; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + clock-frequency = <50000000>; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = ; + }; + + clocks { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + /* 50 MHz reference crystal */ + refclk: refclk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <50000000>; + }; + + clk_100mhz: clk_100mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clocks = <&refclk>; + clock-frequency = <100000000>; + }; + + cpuclk: cpuclk { + compatible = "phytium,1500a-cpu-clock"; + #clock-cells = <1>; + reg = <0x0 0x28100600 0x0 0x10>; + clocks = <&refclk>; + mode = <0x2>; /* 0: do not use pll, 1: partially use pll, 2: totally use pll */ + /*big-clock;*/ + clock-output-names = "cluster0-clk", + "cluster1-clk", + "cluster2-clk", + "cluster3-clk"; + }; + + gmacclk: gmacclk { + compatible = "phytium,1500a-gmac-clock"; + #clock-cells = <0>; + reg = <0x0 0x2810050c 0x0 0x4>; + clocks = <&refclk>; + clock-frequency = <500000000>; + clock-output-names = "gmac-clk"; + }; + }; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + dma-coherent; + ranges; + + uart0: serial@28000000 { + compatible = "snps,dw-apb-uart"; + reg = <0x0 0x28000000 0x0 0x1000>; + clock-frequency = <50000000>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart1: serial@28001000 { + compatible = "snps,dw-apb-uart"; + reg = <0x0 0x28001000 0x0 0x1000>; + clock-frequency = <50000000>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + i2c0: i2c@28002000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0x0 0x28002000 0x0 0x1000>; + interrupts = ; + clock-frequency = <100000>; + clocks = <&clk_100mhz>; + status = "disabled"; + }; + + i2c1: i2c@28003000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0x0 0x28003000 0x0 0x1000>; + interrupts = ; + clock-frequency = <100000>; + clocks = <&clk_100mhz>; + status = "disabled"; + }; + + wdt0: watchdog@28004000 { + compatible = "snps,dw-wdt"; + reg = <0x0 0x28004000 0x0 0x1000>; + clocks = <&refclk>; + interrupts = ; + status = "disabled"; + }; + + wdt1: watchdog@28005000 { + compatible = "snps,dw-wdt"; + reg = <0x0 0x28005000 0x0 0x1000>; + clocks = <&refclk>; + interrupts = ; + status = "disabled"; + }; + + gpio: gpio@28006000 { + compatible = "snps,dw-apb-gpio"; + reg = <0x0 0x28006000 0x0 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + porta: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; + reg = <0>; + }; + portb: gpio-controller@1 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; + reg = <1>; + }; + portc: gpio-controller@2 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; + reg = <2>; + }; + portd: gpio-controller@3 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; + reg = <3>; + }; + }; + + gmac0: ethernet@28c00000 { + compatible = "snps,dwmac"; + reg = <0 0x28c00000 0x0 0x2000>; + interrupts = ; + interrupt-names = "macirq"; + clocks = <&gmacclk>; + clock-names = "stmmaceth"; + snps,pbl = <32>; + snps,fixed-burst; + snps,burst_len = <0xe>; + snps,force_sf_dma_mode; + snps,multicast-filter-bins = <64>; + snps,perfect-filter-entries = <1>; + max-frame-size = <9000>; + status = "disabled"; + }; + + gmac1: ethernet@28c02000 { + compatible = "snps,dwmac"; + reg = <0 0x28c02000 0x0 0x2000>; + interrupts = ; + interrupt-names = "macirq"; + clocks = <&gmacclk>; + clock-names = "stmmaceth"; + snps,pbl = <32>; + snps,fixed-burst; + snps,burst_len = <0xe>; + snps,force_sf_dma_mode; + snps,multicast-filter-bins = <64>; + snps,perfect-filter-entries = <1>; + max-frame-size = <9000>; + status = "disabled"; + }; + + pcie0: pcie-controller { + compatible = "pci-host-ecam-generic"; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + reg = <0 0x40000000 0 0x10000000>; + msi-parent = <&its>; + interrupt-map-mask = <0x0000 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 GIC_SPI 0x33 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x2 &gic 0x0 0x0 GIC_SPI 0x34 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x3 &gic 0x0 0x0 GIC_SPI 0x35 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x4 &gic 0x0 0x0 GIC_SPI 0x36 IRQ_TYPE_LEVEL_HIGH>; + ranges = <0x01000000 0x00 0x00000000 0x00 0x50000000 0x00 0x1000000>, + <0x02000000 0x00 0x60000000 0x00 0x60000000 0x00 0x20000000>, + <0x43000000 0x01 0x00000000 0x01 0x00000000 0x01 0x00000000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/phytium/ft1500a-devboard-16c-dsk.dts b/arch/arm64/boot/dts/phytium/ft1500a-devboard-16c-dsk.dts new file mode 100644 index 000000000000..ed2127496e95 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/ft1500a-devboard-16c-dsk.dts @@ -0,0 +1,51 @@ +/* + * DTS file for Phytium FT1500A Generic board + * + * Copyright (C) 2015, Phytium Technology Co., Ltd. + * + * This file is licensed under a dual GPLv2 or BSD license. + */ + +/dts-v1/; +/memreserve/ 0x80000000 0x80000; + +#include "ft1500a-16c-generic-psci-soc.dtsi" + +/ { + model = "FT1500A-16CORE-DSK Development Board"; + compatible = "phytium,ft-1500a"; + + chosen { + linux,pci-probe-only = <1>; + stdout-path = "uart1:115200n8"; + }; + + memory { + device_type = "memory"; + reg = <0x0 0x80000000 0x0 0x80000000>; /* Updated by bootloader */ + }; +}; + +&uart1 { + status = "ok"; +}; + +&i2c0 { + status = "ok"; +}; + +&i2c1 { + status = "ok"; +}; + +&wdt0 { + status = "ok"; +}; + +&gmac0 { + phy-mode = "gmii"; +}; + +&gmac1 { + phy-mode = "gmii"; +}; diff --git a/arch/arm64/boot/dts/phytium/ft2000ahk-devboard-dsk.dts b/arch/arm64/boot/dts/phytium/ft2000ahk-devboard-dsk.dts new file mode 100644 index 000000000000..4afbbf9c827b --- /dev/null +++ b/arch/arm64/boot/dts/phytium/ft2000ahk-devboard-dsk.dts @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DTS file for Phytium FT-2000A/2 devboard (FT-2000A-HK-DSK series) + * + * Copyright (C) 2019, Phytium Techonlogy Co., Ltd. + */ + +/dts-v1/; + +#include "ft2000ahk-generic-spintable-soc.dtsi" + +/ { + model = "FT-2000A-HK-DSK Development Board"; + compatible = "phytium,ft-2000ahk"; + #address-cells = <2>; + #size-cells = <2>; + + chosen { + linux,pci-probe-only = <1>; + }; + + memory { + device_type = "memory"; + reg = <0x0 0x80000000 0x0 0x80000000>; + }; +}; + +&i2c0 { + status = "ok"; + rtc@68 { + compatible = "dallas,ds1339"; + reg = <0x68>; + }; +}; + +&uart1 { + status = "ok"; +}; + +&gmac0 { + status = "ok"; + phy-mode = "rgmii"; +}; + +&gmac1 { + status = "ok"; + phy-mode = "rgmii"; +}; + +&gpio { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/phytium/ft2000ahk-generic-spintable-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2000ahk-generic-spintable-soc.dtsi new file mode 100644 index 000000000000..fb587b664a83 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/ft2000ahk-generic-spintable-soc.dtsi @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * dts file for FT-2000A/2 SoC + * + * Copyright (C) 2019, Phytium Technology Co., Ltd. + */ + +#include + +/ { + compatible = "phytium,ft2000ahk"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + aliases { + ethernet0 = &gmac0; + ethernet1 = &gmac1; + }; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "phytium,ftc661", "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "spin-table"; + cpu-release-addr = <0x0 0x8007fff0>; + }; + + cpu1: cpu@1 { + device_type = "cpu"; + compatible = "phytium,ftc661", "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "spin-table"; + cpu-release-addr = <0x0 0x8007fff0>; + }; + }; + + gic: interrupt-controller@71800000 { + compatible = "arm,gic-400"; + #interrupt-cells = <3>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + interrupt-controller; + reg = <0x0 0x71801000 0x0 0x1000>, + <0x0 0x71802000 0x0 0x2000>, + <0x0 0x71804000 0x0 0x1000>, + <0x0 0x71805000 0x0 0x1000>; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + clock-frequency = <50000000>; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = , + ; + interrupt-affinity = <&cpu0 &cpu1>; + }; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + dma-coherent; + ranges; + + clocks { + refclk: refclk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <50000000>; + }; + + clk250mhz: clk250mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <250000000>; + }; + + clk500mhz: clk500mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <500000000>; + }; + }; + + uart0: uart@70000000 { + compatible = "snps,dw-apb-uart"; + reg = <0x0 0x70000000 0x0 0x1000>; + clock-frequency = <50000000>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart1: uart@70001000 { + compatible = "snps,dw-apb-uart"; + reg = <0x0 0x70001000 0x0 0x1000>; + clock-frequency = <50000000>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + i2c0: i2c@70002000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0x0 0x70002000 0x0 0x1000>; + interrupts = ; + clock-frequency = <100000>; + clocks = <&refclk>; + status = "disabled"; + }; + + i2c1: i2c@70003000 { + #address-cells = <01>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0x0 0x70003000 0x0 0x1000>; + interrupts = ; + clock-frequency = <100000>; + clocks = <&refclk>; + status = "disabled"; + }; + + watchdog0: wd@70004000 { + compatible = "snps,dw-wdt"; + reg = <0x0 0x70004000 0x0 0x1000>; + clocks = <&refclk>; + interrupts = ; + status = "disabled"; + }; + + watchdog1: wd@70005000 { + compatible = "snps,dw-wdt"; + reg = <0x0 0x70005000 0x0 0x1000>; + clocks = <&refclk>; + interrupts = ; + status = "disabled"; + }; + + gpio: gpio@70006000 { + compatible = "snps,dw-apb-gpio"; + reg = <0x0 0x70006000 0x0 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + porta: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; + reg = <0>; + }; + + portb: gpio-controller@1 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; + reg = <1>; + }; + + portc: gpio-controller@2 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; + reg = <2>; + }; + + portd: gpio-controller@3 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; + reg = <3>; + }; + }; + + gmac0: eth@70c00000 { + compatible = "snps,dwmac"; + reg = <0x0 0x70c00000 0x0 0x2000>; + interrupts = ; + interrupt-names = "macirq"; + clocks = <&clk500mhz>; + clock-names = "stmmaceth"; + status = "disabled"; + + snps,pbl = <16>; + snps,fixed-burst; + snps,burst_len = <14>; + snps,force_sf_dma_mode; + snps,multicast-filter-bins = <64>; + snps,perfect-filter-entries = <128>; + tx-fifo-depth = <4096>; + rx-fifo-depth = <4096>; + max-frame-size = <9000>; + }; + + gmac1: eth@70c10000 { + compatible = "snps,dwmac"; + reg = <0x0 0x70c10000 0x0 0x2000>; + interrupts = ; + interrupt-names = "macirq"; + clocks = <&clk500mhz>; + clock-names = "stmmaceth"; + status = "disabled"; + + snps,pbl = <16>; + snps,fixed-burst; + snps,burst_len = <14>; + snps,force_sf_dma_mode; + snps,multicast-filter-bins = <64>; + snps,perfect-filter-entries = <128>; + tx-fifo-depth = <4096>; + rx-fifo-depth = <4096>; + max-frame-size = <9000>; + }; + + pcie: pcie { + compatible = "pci-host-ecam-generic"; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + reg = <0x0 0x40000000 0x0 0x4000000>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x00 0x0 0x0 0x1 &gic 0x00 0x00 GIC_SPI 0x17 IRQ_TYPE_LEVEL_HIGH>, + <0x00 0x0 0x0 0x2 &gic 0x00 0x00 GIC_SPI 0x16 IRQ_TYPE_LEVEL_HIGH>, + <0x00 0x0 0x0 0x3 &gic 0x00 0x00 GIC_SPI 0x15 IRQ_TYPE_LEVEL_HIGH>, + <0x00 0x0 0x0 0x4 &gic 0x00 0x00 GIC_SPI 0x14 IRQ_TYPE_LEVEL_HIGH>; + ranges = <0x01000000 0x0 0x00000000 0x0 0x44000000 0x0 0x01000000>, + <0x02000000 0x0 0x48000000 0x0 0x48000000 0x0 0x18000000>, + <0x03000000 0x1 0x00000000 0x1 0x00000000 0x1 0x00000000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/phytium/ft2000ahke-devboard-dsk.dts b/arch/arm64/boot/dts/phytium/ft2000ahke-devboard-dsk.dts new file mode 100644 index 000000000000..beafd9b7fd12 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/ft2000ahke-devboard-dsk.dts @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DTS file for Phytium FT-2000A/2 devboard (FT-2000A-HKE-DSK series) + * + * Copyright (C) 2021, Phytium Techonlogy Co., Ltd. + */ + +/dts-v1/; + +#include "ft2000ahke-generic-psci-soc.dtsi" + +/ { + model = "FT-2000A-HKE-DSK Development Board"; + compatible = "phytium,ft-2000ahke"; + #address-cells = <2>; + #size-cells = <2>; + + chosen { + linux,pci-probe-only = <1>; + }; + + memory { + device_type = "memory"; + reg = <0x0 0x80000000 0x0 0x7C000000>; + }; +}; + +&i2c0 { + status = "ok"; + rtc@68 { + compatible = "dallas,ds1339"; + reg = <0x68>; + }; +}; + +&uart1 { + status = "ok"; +}; + +&gmac0 { + status = "ok"; + phy-mode = "rgmii"; +}; + +&gmac1 { + status = "ok"; + phy-mode = "rgmii"; +}; + +&gpio { + status = "ok"; +}; + +&spi0 { + status = "ok"; +}; + +&spi1 { + status = "ok"; +}; + +&can0 { + status = "ok"; +}; + +&can1 { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/phytium/ft2000ahke-generic-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2000ahke-generic-psci-soc.dtsi new file mode 100644 index 000000000000..dd7631e51d33 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/ft2000ahke-generic-psci-soc.dtsi @@ -0,0 +1,312 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * dts file for FT-2000A/2 SoC + * + * Copyright (C) 2019, Phytium Technology Co., Ltd. + */ + +#include + +/ { + compatible = "phytium,ft2000ahke"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + aliases { + ethernet0 = &gmac0; + ethernet1 = &gmac1; + }; + psci { + compatible = "arm,psci-1.0", "arm,psci-0.2", "arm,psci"; + method = "smc"; + cpu_suspend = <0xc4000001>; + cpu_off = <0x84000002>; + cpu_on = <0xc4000003>; + sys_poweroff = <0x84000008>; + sys_reset = <0x84000009>; + }; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "phytium,ftc661", "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + }; + + cpu1: cpu@1 { + device_type = "cpu"; + compatible = "phytium,ftc661", "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "psci"; + }; + }; + + gic: interrupt-controller@71800000 { + compatible = "arm,gic-400"; + #interrupt-cells = <3>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + interrupt-controller; + reg = <0x0 0x71801000 0x0 0x1000>, + <0x0 0x71802000 0x0 0x2000>, + <0x0 0x71804000 0x0 0x1000>, + <0x0 0x71805000 0x0 0x1000>; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + clock-frequency = <50000000>; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = , + ; + interrupt-affinity = <&cpu0 &cpu1>; + }; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + dma-coherent; + ranges; + + clocks { + refclk: refclk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <50000000>; + }; + + clk250mhz: clk250mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <250000000>; + }; + + clk500mhz: clk500mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <500000000>; + }; + + sysclk_48mhz: clk48mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <48000000>; + }; + + sysclk_600mhz: clk600mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <600000000>; + }; + }; + + uart0: uart@70000000 { + compatible = "snps,dw-apb-uart"; + reg = <0x0 0x70000000 0x0 0x1000>; + clock-frequency = <50000000>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart1: uart@70001000 { + compatible = "snps,dw-apb-uart"; + reg = <0x0 0x70001000 0x0 0x1000>; + clock-frequency = <50000000>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart2: uart@70007000 { + compatible = "snps,dw-apb-uart"; + reg = <0x0 0x70007000 0x0 0x1000>; + clock-frequency = <50000000>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart3: uart@70008000 { + compatible = "snps,dw-apb-uart"; + reg = <0x0 0x70008000 0x0 0x1000>; + clock-frequency = <50000000>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + i2c0: i2c@70002000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0x0 0x70002000 0x0 0x1000>; + interrupts = ; + clock-frequency = <100000>; + clocks = <&refclk>; + status = "disabled"; + }; + + i2c1: i2c@70003000 { + #address-cells = <01>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0x0 0x70003000 0x0 0x1000>; + interrupts = ; + clock-frequency = <100000>; + clocks = <&refclk>; + status = "disabled"; + }; + + gpio: gpio@70006000 { + compatible = "snps,dw-apb-gpio"; + reg = <0x0 0x70006000 0x0 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + porta: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; + reg = <0>; + }; + + portb: gpio-controller@1 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; + reg = <1>; + }; + + portc: gpio-controller@2 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; + reg = <2>; + }; + + portd: gpio-controller@3 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; + reg = <3>; + }; + }; + + gmac0: eth@70c00000 { + compatible = "snps,dwmac"; + reg = <0x0 0x70c00000 0x0 0x2000>; + interrupts = ; + interrupt-names = "macirq"; + clocks = <&clk500mhz>; + clock-names = "stmmaceth"; + status = "disabled"; + + snps,pbl = <16>; + snps,fixed-burst; + snps,burst_len = <14>; + snps,force_sf_dma_mode; + snps,multicast-filter-bins = <64>; + snps,perfect-filter-entries = <128>; + tx-fifo-depth = <4096>; + rx-fifo-depth = <4096>; + max-frame-size = <9000>; + }; + + gmac1: eth@70c10000 { + compatible = "snps,dwmac"; + reg = <0x0 0x70c10000 0x0 0x2000>; + interrupts = ; + interrupt-names = "macirq"; + clocks = <&clk500mhz>; + clock-names = "stmmaceth"; + status = "disabled"; + + snps,pbl = <16>; + snps,fixed-burst; + snps,burst_len = <14>; + snps,force_sf_dma_mode; + snps,multicast-filter-bins = <64>; + snps,perfect-filter-entries = <128>; + tx-fifo-depth = <4096>; + rx-fifo-depth = <4096>; + max-frame-size = <9000>; + }; + + spi0: spi@70009000 { + compatible = "phytium,spi"; + interrupts = ; + reg = <0x0 0x70009000 0x0 0x1000>; + clocks = <&sysclk_48mhz>; + num-cs = <4>; + }; + + spi1: spi@7000a000 { + compatible = "phytium,spi"; + interrupts = ; + reg = <0x0 0x7000a000 0x0 0x1000>; + clocks = <&sysclk_48mhz>; + num-cs = <4>; + }; + + can0: can@70014000 { + compatible = "phytium,can"; + reg = <0x0 0x70014000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "phytium_can_clk"; + tx-fifo-depth = <0x40>; + rx-fifo-depth = <0x40>; + }; + + can1: can@70015000 { + compatible = "phytium,can"; + reg = <0x0 0x70015000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "phytium_can_clk"; + tx-fifo-depth = <0x40>; + rx-fifo-depth = <0x40>; + }; + + pcie: pcie { + compatible = "pci-host-ecam-generic"; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + reg = <0x0 0x40000000 0x0 0x4000000>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x00 0x0 0x0 0x1 &gic 0x00 0x00 GIC_SPI 0x17 IRQ_TYPE_LEVEL_HIGH>, + <0x00 0x0 0x0 0x2 &gic 0x00 0x00 GIC_SPI 0x16 IRQ_TYPE_LEVEL_HIGH>, + <0x00 0x0 0x0 0x3 &gic 0x00 0x00 GIC_SPI 0x15 IRQ_TYPE_LEVEL_HIGH>, + <0x00 0x0 0x0 0x4 &gic 0x00 0x00 GIC_SPI 0x14 IRQ_TYPE_LEVEL_HIGH>; + ranges = <0x01000000 0x0 0x00000000 0x0 0x44000000 0x0 0x01000000>, + <0x02000000 0x0 0x48000000 0x0 0x48000000 0x0 0x18000000>, + <0x03000000 0x1 0x00000000 0x1 0x00000000 0x1 0x00000000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/phytium/ft2000plus-MR-devboard-64c-dsk.dts b/arch/arm64/boot/dts/phytium/ft2000plus-MR-devboard-64c-dsk.dts new file mode 100644 index 000000000000..7c2ee8e60dce --- /dev/null +++ b/arch/arm64/boot/dts/phytium/ft2000plus-MR-devboard-64c-dsk.dts @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DTS file for Phytium FT-2000plus devboard. + * + * Copyright (C) 2019, Phytium Technology Co., Ltd. + */ + +/dts-v1/; +/memreserve/ 0x0000000080000000 0x0000000000010000; + +#include "ft2000plus-MR-psci-soc.dtsi" + +/ { + model = "FT-2000plus Development Board"; + compatible = "phytium,ft-2000plus"; + + chosen { + linux,pci-probe-only = <1>; + }; + + /* NUMA Node-0 */ + memory@00 { + device_type = "memory"; + /* 0 - 512MiB (512MiB)*/ + reg = <0x00000000 0x00000000 0x0 0x20000000>; + numa-node-id = <0>; + }; + memory@01 { + device_type = "memory"; + /* 2GiB - 4GiB (2GiB) */ + reg = <0x00000000 0x80000000 0x0 0x80000000>; + numa-node-id = <0>; + }; + memory@02 { + device_type = "memory"; + /* 512GiB - 516GiB (4GiB) */ + reg = <0x00000080 0x00000000 0x1 0x00000000>; + numa-node-id = <0>; + }; + /* NUMA Node-1 */ + memory@10 { + device_type = "memory"; + /* 1024GiB - 1028GiB (4GiB) */ + reg = <0x00000100 0x00000000 0x1 0x00000000>; + numa-node-id = <1>; + }; + memory@11 { + device_type = "memory"; + /* 1536GiB - 1540GiB (4GiB) */ + reg = <0x00000180 0x00000000 0x1 0x00000000>; + numa-node-id = <1>; + }; + /* NUMA Node-2 */ + memory@20 { + device_type = "memory"; + /* 2048GiB - 2052GiB (4GiB) */ + reg = <0x00000200 0x00000000 0x1 0x00000000>; + numa-node-id = <2>; + }; + memory@21 { + device_type = "memory"; + /* 2560GiB - 2564GiB (4GiB) */ + reg = <0x00000280 0x00000000 0x1 0x00000000>; + numa-node-id = <2>; + }; + /* NUMA Node-3 */ + memory@30 { + device_type = "memory"; + /* 3072GiB - 3076GiB (4GiB) */ + reg = <0x00000300 0x00000000 0x1 0x00000000>; + numa-node-id = <3>; + }; + memory@31 { + device_type = "memory"; + /* 3584GiB - 3588GiB (4GiB) */ + reg = <0x00000380 0x00000000 0x1 0x00000000>; + numa-node-id = <3>; + }; + /* NUMA Node-4 */ + memory@40 { + device_type = "memory"; + /* 4096GiB - 4100GiB (4GiB) */ + reg = <0x00000400 0x00000000 0x1 0x00000000>; + numa-node-id = <4>; + }; + memory@41 { + device_type = "memory"; + /* 4608GiB - 4612GiB (4GiB) */ + reg = <0x00000480 0x00000000 0x1 0x00000000>; + numa-node-id = <4>; + }; + /* NUMA Node-5 */ + memory@50 { + device_type = "memory"; + /* 5120GiB - 5124GiB (4GiB) */ + reg = <0x00000500 0x00000000 0x1 0x00000000>; + numa-node-id = <5>; + }; + memory@51 { + device_type = "memory"; + /* 5632GiB - 5636GiB (4GiB) */ + reg = <0x00000580 0x00000000 0x1 0x00000000>; + numa-node-id = <5>; + }; + /* NUMA Node-6 */ + memory@60 { + device_type = "memory"; + /* 6144GiB - 6148GiB (4GiB) */ + reg = <0x00000600 0x00000000 0x1 0x00000000>; + numa-node-id = <6>; + }; + memory@61 { + device_type = "memory"; + /* 6656GiB - 6660GiB (4GiB) */ + reg = <0x00000680 0x00000000 0x1 0x00000000>; + numa-node-id = <6>; + }; + /* NUMA Node-7 */ + memory@70 { + device_type = "memory"; + /* 7168GiB - 7172GiB (4GiB) */ + reg = <0x00000700 0x00000000 0x1 0x00000000>; + numa-node-id = <7>; + }; + memory@71 { + device_type = "memory"; + /* 7680GiB - 7684GiB (4GiB) */ + reg = <0x00000780 0x00000000 0x1 0x00000000>; + numa-node-id = <7>; + }; + +}; + +&uart1 { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/phytium/ft2000plus-MR-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2000plus-MR-psci-soc.dtsi new file mode 100644 index 000000000000..1e9da418b1d1 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/ft2000plus-MR-psci-soc.dtsi @@ -0,0 +1,1062 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * dts file for FT-2000plus SoC + * + * Copyright (C) 2018-2019, Phytium Technology Co., Ltd. + */ + +#include + +/ { + compatible = "phytium,ft2000plus"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + cpu_suspend = <0xc4000001>; + cpu_off = <0x84000002>; + cpu_on = <0xc4000003>; + sys_poweroff = <0x84000008>; + sys_reset = <0x84000009>; + }; + + cpus { + #address-cells = <0x2>; + #size-cells = <0x0>; + + cpu-map { + cluster0 { + core0 { + cpu = <&cpu0>; + }; + core1 { + cpu = <&cpu1>; + }; + core2 { + cpu = <&cpu2>; + }; + core3 { + cpu = <&cpu3>; + }; + }; + + cluster1 { + core0 { + cpu = <&cpu4>; + }; + core1 { + cpu = <&cpu5>; + }; + core2 { + cpu = <&cpu6>; + }; + core3 { + cpu = <&cpu7>; + }; + }; + + cluster2 { + core0 { + cpu = <&cpu8>; + }; + core1 { + cpu = <&cpu9>; + }; + core2 { + cpu = <&cpu10>; + }; + core3 { + cpu = <&cpu11>; + }; + }; + + cluster3 { + core0 { + cpu = <&cpu12>; + }; + core1 { + cpu = <&cpu13>; + }; + core2 { + cpu = <&cpu14>; + }; + core3 { + cpu = <&cpu15>; + }; + }; + + cluster4 { + core0 { + cpu = <&cpu16>; + }; + core1 { + cpu = <&cpu17>; + }; + core2 { + cpu = <&cpu18>; + }; + core3 { + cpu = <&cpu19>; + }; + }; + + cluster5 { + core0 { + cpu = <&cpu20>; + }; + core1 { + cpu = <&cpu21>; + }; + core2 { + cpu = <&cpu22>; + }; + core3 { + cpu = <&cpu23>; + }; + }; + + cluster6 { + core0 { + cpu = <&cpu24>; + }; + core1 { + cpu = <&cpu25>; + }; + core2 { + cpu = <&cpu26>; + }; + core3 { + cpu = <&cpu27>; + }; + }; + + cluster7 { + core0 { + cpu = <&cpu28>; + }; + core1 { + cpu = <&cpu29>; + }; + core2 { + cpu = <&cpu30>; + }; + core3 { + cpu = <&cpu31>; + }; + }; + + cluster8 { + core0 { + cpu = <&cpu32>; + }; + core1 { + cpu = <&cpu33>; + }; + core2 { + cpu = <&cpu34>; + }; + core3 { + cpu = <&cpu35>; + }; + }; + + cluster9 { + core0 { + cpu = <&cpu36>; + }; + core1 { + cpu = <&cpu37>; + }; + core2 { + cpu = <&cpu38>; + }; + core3 { + cpu = <&cpu39>; + }; + }; + + cluster10 { + core0 { + cpu = <&cpu40>; + }; + core1 { + cpu = <&cpu41>; + }; + core2 { + cpu = <&cpu42>; + }; + core3 { + cpu = <&cpu43>; + }; + }; + + cluster11 { + core0 { + cpu = <&cpu44>; + }; + core1 { + cpu = <&cpu45>; + }; + core2 { + cpu = <&cpu46>; + }; + core3 { + cpu = <&cpu47>; + }; + }; + + cluster12 { + core0 { + cpu = <&cpu48>; + }; + core1 { + cpu = <&cpu49>; + }; + core2 { + cpu = <&cpu50>; + }; + core3 { + cpu = <&cpu51>; + }; + }; + + cluster13 { + core0 { + cpu = <&cpu52>; + }; + core1 { + cpu = <&cpu53>; + }; + core2 { + cpu = <&cpu54>; + }; + core3 { + cpu = <&cpu55>; + }; + }; + + cluster14 { + core0 { + cpu = <&cpu56>; + }; + core1 { + cpu = <&cpu57>; + }; + core2 { + cpu = <&cpu58>; + }; + core3 { + cpu = <&cpu59>; + }; + }; + + cluster15 { + core0 { + cpu = <&cpu60>; + }; + core1 { + cpu = <&cpu61>; + }; + core2 { + cpu = <&cpu62>; + }; + core3 { + cpu = <&cpu63>; + }; + }; + }; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + numa-node-id = <0>; + }; + + cpu1: cpu@1 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "psci"; + numa-node-id = <0>; + }; + + cpu2: cpu@2 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x2>; + enable-method = "psci"; + numa-node-id = <0>; + }; + + cpu3: cpu@3 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x3>; + enable-method = "psci"; + numa-node-id = <0>; + }; + + cpu4: cpu@100 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + numa-node-id = <0>; + }; + + cpu5: cpu@101 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x101>; + enable-method = "psci"; + numa-node-id = <0>; + }; + + cpu6: cpu@102 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x102>; + enable-method = "psci"; + numa-node-id = <0>; + }; + + cpu7: cpu@103 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x103>; + enable-method = "psci"; + numa-node-id = <0>; + }; + + cpu8: cpu@200 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x200>; + enable-method = "psci"; + numa-node-id = <1>; + }; + + cpu9: cpu@201 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x201>; + enable-method = "psci"; + numa-node-id = <1>; + }; + + cpu10: cpu@202 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x202>; + enable-method = "psci"; + numa-node-id = <1>; + }; + + cpu11: cpu@203 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x203>; + enable-method = "psci"; + numa-node-id = <1>; + }; + + cpu12: cpu@300 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x300>; + enable-method = "psci"; + numa-node-id = <1>; + }; + + cpu13: cpu@301 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x301>; + enable-method = "psci"; + numa-node-id = <1>; + }; + + cpu14: cpu@302 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x302>; + enable-method = "psci"; + numa-node-id = <1>; + }; + + cpu15: cpu@303 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x303>; + enable-method = "psci"; + numa-node-id = <1>; + }; + + cpu16: cpu@400 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x400>; + enable-method = "psci"; + numa-node-id = <2>; + }; + + cpu17: cpu@401 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x401>; + enable-method = "psci"; + numa-node-id = <2>; + }; + + cpu18: cpu@402 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x402>; + enable-method = "psci"; + numa-node-id = <2>; + }; + + cpu19: cpu@403 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x403>; + enable-method = "psci"; + numa-node-id = <2>; + }; + + cpu20: cpu@500 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x500>; + enable-method = "psci"; + numa-node-id = <2>; + }; + + cpu21: cpu@501 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x501>; + enable-method = "psci"; + numa-node-id = <2>; + }; + + cpu22: cpu@502 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x502>; + enable-method = "psci"; + numa-node-id = <2>; + }; + + cpu23: cpu@503 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x503>; + enable-method = "psci"; + numa-node-id = <2>; + }; + + cpu24: cpu@600 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x600>; + enable-method = "psci"; + numa-node-id = <3>; + }; + + cpu25: cpu@601 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x601>; + enable-method = "psci"; + numa-node-id = <3>; + }; + + cpu26: cpu@602 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x602>; + enable-method = "psci"; + numa-node-id = <3>; + }; + + cpu27: cpu@603 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x603>; + enable-method = "psci"; + numa-node-id = <3>; + }; + + cpu28: cpu@700 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x700>; + enable-method = "psci"; + numa-node-id = <3>; + }; + + cpu29: cpu@701 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x701>; + enable-method = "psci"; + numa-node-id = <3>; + }; + + cpu30: cpu@702 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x702>; + enable-method = "psci"; + numa-node-id = <3>; + }; + + cpu31: cpu@703 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x703>; + enable-method = "psci"; + numa-node-id = <3>; + }; + + cpu32: cpu@800 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x800>; + enable-method = "psci"; + numa-node-id = <4>; + }; + + cpu33: cpu@801 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x801>; + enable-method = "psci"; + numa-node-id = <4>; + }; + + cpu34: cpu@802 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x802>; + enable-method = "psci"; + numa-node-id = <4>; + }; + + cpu35: cpu@803 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x803>; + enable-method = "psci"; + numa-node-id = <4>; + }; + + cpu36: cpu@900 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x900>; + enable-method = "psci"; + numa-node-id = <4>; + }; + + cpu37: cpu@901 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x901>; + enable-method = "psci"; + numa-node-id = <4>; + }; + + cpu38: cpu@902 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x902>; + enable-method = "psci"; + numa-node-id = <4>; + }; + + cpu39: cpu@903 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x903>; + enable-method = "psci"; + numa-node-id = <4>; + }; + + cpu40: cpu@a00 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xa00>; + enable-method = "psci"; + numa-node-id = <5>; + }; + + cpu41: cpu@a01 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xa01>; + enable-method = "psci"; + numa-node-id = <5>; + }; + + cpu42: cpu@a02 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xa02>; + enable-method = "psci"; + numa-node-id = <5>; + }; + + cpu43: cpu@a03 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xa03>; + enable-method = "psci"; + numa-node-id = <5>; + }; + + cpu44: cpu@b00 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xb00>; + enable-method = "psci"; + numa-node-id = <5>; + }; + + cpu45: cpu@b01 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xb01>; + enable-method = "psci"; + numa-node-id = <5>; + }; + + cpu46: cpu@b02 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xb02>; + enable-method = "psci"; + numa-node-id = <5>; + }; + + cpu47: cpu@b03 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xb03>; + enable-method = "psci"; + numa-node-id = <5>; + }; + + cpu48: cpu@c00 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xc00>; + enable-method = "psci"; + numa-node-id = <6>; + }; + + cpu49: cpu@c01 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xc01>; + enable-method = "psci"; + numa-node-id = <6>; + }; + + cpu50: cpu@c02 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xc02>; + enable-method = "psci"; + numa-node-id = <6>; + }; + + cpu51: cpu@c03 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xc03>; + enable-method = "psci"; + numa-node-id = <6>; + }; + + cpu52: cpu@d00 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xd00>; + enable-method = "psci"; + numa-node-id = <6>; + }; + + cpu53: cpu@d01 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xd01>; + enable-method = "psci"; + numa-node-id = <6>; + }; + + cpu54: cpu@d02 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xd02>; + enable-method = "psci"; + numa-node-id = <6>; + }; + + cpu55: cpu@d03 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xd03>; + enable-method = "psci"; + numa-node-id = <6>; + }; + + cpu56: cpu@e00 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xe00>; + enable-method = "psci"; + numa-node-id = <7>; + }; + + cpu57: cpu@e01 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xe01>; + enable-method = "psci"; + numa-node-id = <7>; + }; + + cpu58: cpu@e02 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xe02>; + enable-method = "psci"; + numa-node-id = <7>; + }; + + cpu59: cpu@e03 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xe03>; + enable-method = "psci"; + numa-node-id = <7>; + }; + + cpu60: cpu@f00 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xf00>; + enable-method = "psci"; + numa-node-id = <7>; + }; + + cpu61: cpu@f01 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xf01>; + enable-method = "psci"; + numa-node-id = <7>; + }; + + cpu62: cpu@f02 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xf02>; + enable-method = "psci"; + numa-node-id = <7>; + }; + + cpu63: cpu@f03 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xf03>; + enable-method = "psci"; + numa-node-id = <7>; + }; + }; + + distance-map { + compatible = "numa-distance-map-v1"; + distance-matrix = <0x0 0x0 0x0a>, + <0x0 0x1 0x14>, + <0x0 0x2 0x28>, + <0x0 0x3 0x1e>, + <0x0 0x4 0x14>, + <0x0 0x5 0x1e>, + <0x0 0x6 0x32>, + <0x0 0x7 0x28>, + <0x1 0x0 0x14>, + <0x1 0x1 0x0a>, + <0x1 0x2 0x1e>, + <0x1 0x3 0x14>, + <0x1 0x4 0x1e>, + <0x1 0x5 0x14>, + <0x1 0x6 0x28>, + <0x1 0x7 0x1e>, + <0x2 0x0 0x28>, + <0x2 0x1 0x1e>, + <0x2 0x2 0x0a>, + <0x2 0x3 0x14>, + <0x2 0x4 0x32>, + <0x2 0x5 0x28>, + <0x2 0x6 0x14>, + <0x2 0x7 0x1e>, + <0x3 0x0 0x1e>, + <0x3 0x1 0x14>, + <0x3 0x2 0x14>, + <0x3 0x3 0x0a>, + <0x3 0x4 0x28>, + <0x3 0x5 0x1e>, + <0x3 0x6 0x1e>, + <0x3 0x7 0x14>, + <0x4 0x0 0x14>, + <0x4 0x1 0x1e>, + <0x4 0x2 0x32>, + <0x4 0x3 0x28>, + <0x4 0x4 0x0a>, + <0x4 0x5 0x14>, + <0x4 0x6 0x28>, + <0x4 0x7 0x1e>, + <0x5 0x0 0x1e>, + <0x5 0x1 0x14>, + <0x5 0x2 0x28>, + <0x5 0x3 0x1e>, + <0x5 0x4 0x14>, + <0x5 0x5 0x0a>, + <0x5 0x6 0x1e>, + <0x5 0x7 0x14>, + <0x6 0x0 0x32>, + <0x6 0x1 0x28>, + <0x6 0x2 0x14>, + <0x6 0x3 0x1e>, + <0x6 0x4 0x28>, + <0x6 0x5 0x1e>, + <0x6 0x6 0x0a>, + <0x6 0x7 0x14>, + <0x7 0x0 0x28>, + <0x7 0x1 0x1e>, + <0x7 0x2 0x1e>, + <0x7 0x3 0x14>, + <0x7 0x4 0x1e>, + <0x7 0x5 0x14>, + <0x7 0x6 0x14>, + <0x7 0x7 0x0a>; + }; + + + gic: interrupt-controller@8002a000000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <3>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + interrupt-controller; + reg = <0x0800 0x2a000000 0 0x10000>, /* GICD */ + <0x0800 0x2a800000 0 0x800000>, /* GICR */ + <0x0800 0x29c00000 0 0x10000>, /* GICC */ + <0x0800 0x29c10000 0 0x10000>, /* GICH */ + <0x0800 0x29c20000 0 0x10000>; /* GICV */ + interrupts = ; + + its: gic-its@8002a020000 { + compatible = "arm,gic-v3-its"; + msi-controller; + reg = <0x0800 0x2a020000 0x0 0x20000>; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + clock-frequency = <50000000>; + }; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + dma-coherent; + ranges; + + uart0: serial@28000000 { + compatible = "snps,dw-apb-uart"; + reg = <0x800 0x28000000 0x0 0x1000>; + clock-frequency = <50000000>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart1: serial@28001000 { + compatible = "snps,dw-apb-uart"; + reg = <0x800 0x28001000 0x0 0x1000>; + clock-frequency = <50000000>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + gpio0:gpio@80028006000 { + compatible = "snps,dw-apb-gpio"; + reg = <0x800 0x28006000 0x0 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + status = "ok"; + + gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <0x2>; + snps,nr-gpios = <0x8>; + reg = <0x0>; + }; + + gpio-controller@1 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <0x2>; + snps,nr-gpios = <0x8>; + reg = <0x1>; + }; + + gpio-controller@2 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <0x2>; + snps,nr-gpios = <0x8>; + reg = <0x2>; + }; + + gpio-controller@3 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <0x2>; + snps,nr-gpios = <0x8>; + reg = <0x3>; + }; + }; + + i2c0: i2c@80028002000 { + compatible = "snps,designware-i2c"; + reg = <0x800 0x28002000 0x0 0x1000>; + interrupts = ; + clock-frequency = <100000>; + status = "ok"; + }; + + i2c1: i2c@80028003000 { + compatible = "snps,designware-i2c"; + reg = <0x800 0x28003000 0x0 0x1000>; + interrupts = ; + clock-frequency = <100000>; + status = "ok"; + }; + + pcie0: peu0-c0 { + compatible = "pci-host-ecam-generic"; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + reg = <0x800 0x40000000 0 0x2000000>; + msi-parent = <&its>; + bus-range = <0 0x1f>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 GIC_SPI 0x33 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x2 &gic 0x0 0x0 GIC_SPI 0x34 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x3 &gic 0x0 0x0 GIC_SPI 0x35 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x4 &gic 0x0 0x0 GIC_SPI 0x36 IRQ_TYPE_LEVEL_HIGH>; + ranges = <0x01000000 0x00 0x00000000 0x800 0x50000000 0x00 0x00300000>, + <0x02000000 0x00 0x60000000 0x800 0x60000000 0x00 0x08000000>, + <0x03000000 0x20 0x00000000 0x820 0x00000000 0x08 0x00000000>; + }; + + pcie1: peu0-c1 { + compatible = "pci-host-ecam-generic"; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + reg = <0x800 0x42000000 0 0x2000000>; + msi-parent = <&its>; + bus-range = <0x20 0x3f>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 GIC_SPI 0x33 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x2 &gic 0x0 0x0 GIC_SPI 0x34 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x3 &gic 0x0 0x0 GIC_SPI 0x35 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x4 &gic 0x0 0x0 GIC_SPI 0x36 IRQ_TYPE_LEVEL_HIGH>; + ranges = <0x01000000 0x00 0x00300000 0x800 0x50300000 0x00 0x00300000>, + <0x02000000 0x00 0x68000000 0x800 0x68000000 0x00 0x04000000>, + <0x03000000 0x28 0x00000000 0x828 0x00000000 0x04 0x00000000>; + }; + + pcie2: peu0-c2 { + compatible = "pci-host-ecam-generic"; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + reg = <0x800 0x44000000 0 0x1000000>; + msi-parent = <&its>; + bus-range = <0x40 0x4f>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 GIC_SPI 0x33 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x2 &gic 0x0 0x0 GIC_SPI 0x34 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x3 &gic 0x0 0x0 GIC_SPI 0x35 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x4 &gic 0x0 0x0 GIC_SPI 0x36 IRQ_TYPE_LEVEL_HIGH>; + ranges = <0x01000000 0x00 0x00600000 0x800 0x50600000 0x00 0x00300000>, + <0x02000000 0x00 0x6c000000 0x800 0x6c000000 0x00 0x02000000>, + <0x03000000 0x2c 0x00000000 0x82c 0x00000000 0x04 0x00000000>; + }; + + pcie3: peu1-c0 { + compatible = "pci-host-ecam-generic"; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + reg = <0x800 0x45000000 0 0x2000000>; + msi-parent = <&its>; + bus-range = <0x50 0x6f>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 GIC_SPI 0x33 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x2 &gic 0x0 0x0 GIC_SPI 0x34 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x3 &gic 0x0 0x0 GIC_SPI 0x35 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x4 &gic 0x0 0x0 GIC_SPI 0x36 IRQ_TYPE_LEVEL_HIGH>; + ranges = <0x01000000 0x00 0x00900000 0x800 0x50900000 0x00 0x00300000>, + <0x02000000 0x00 0x6e000000 0x800 0x6e000000 0x00 0x0a000000>, + <0x03000000 0x20 0x00000000 0x830 0x00000000 0x08 0x00000000>; + }; + + pcie4: peu1-c1 { + compatible = "pci-host-ecam-generic"; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + reg = <0x800 0x47000000 0 0x1000000>; + msi-parent = <&its>; + bus-range = <0x70 0x7f>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 GIC_SPI 0x33 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x2 &gic 0x0 0x0 GIC_SPI 0x34 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x3 &gic 0x0 0x0 GIC_SPI 0x35 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x4 &gic 0x0 0x0 GIC_SPI 0x36 IRQ_TYPE_LEVEL_HIGH>; + ranges = <0x01000000 0x00 0x00c00000 0x800 0x50c00000 0x00 0x00300000>, + <0x02000000 0x00 0x78000000 0x800 0x78000000 0x00 0x08000000>, + <0x03000000 0x38 0x00000000 0x838 0x00000000 0x08 0x00000000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/phytium/ft2000plus-SR-devboard-64c-dsk.dts b/arch/arm64/boot/dts/phytium/ft2000plus-SR-devboard-64c-dsk.dts new file mode 100644 index 000000000000..3e3e1e4a1c38 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/ft2000plus-SR-devboard-64c-dsk.dts @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DTS file for Phytium FT-2000plus devboard. + * + * Copyright (C) 2019, Phytium Technology Co., Ltd. + */ + +/dts-v1/; +/memreserve/ 0x0000000080000000 0x0000000000010000; + +#include "ft2000plus-SR-psci-soc.dtsi" + +/ { + model = "FT-2000plus Development Board"; + compatible = "phytium,ft-2000plus"; + + chosen { + linux,pci-probe-only = <1>; + }; + + /* NUMA Node-0 */ + memory@00 { + device_type = "memory"; + /* 0 - 512MiB (512MiB)*/ + reg = <0x00000000 0x00000000 0x0 0x20000000>; + numa-node-id = <0>; + }; + memory@01 { + device_type = "memory"; + /* 2GiB - 4GiB (2GiB) */ + reg = <0x00000000 0x80000000 0x0 0x80000000>; + numa-node-id = <0>; + }; + memory@02 { + device_type = "memory"; + /* 512GiB - 516GiB (4GiB) */ + reg = <0x00000080 0x00000000 0x1 0x00000000>; + numa-node-id = <0>; + }; + /* NUMA Node-1 */ + memory@10 { + device_type = "memory"; + /* 1024GiB - 1028GiB (4GiB) */ + reg = <0x00000100 0x00000000 0x1 0x00000000>; + numa-node-id = <1>; + }; + memory@11 { + device_type = "memory"; + /* 1536GiB - 1540GiB (4GiB) */ + reg = <0x00000180 0x00000000 0x1 0x00000000>; + numa-node-id = <1>; + }; + /* NUMA Node-2 */ + memory@20 { + device_type = "memory"; + /* 2048GiB - 2052GiB (4GiB) */ + reg = <0x00000200 0x00000000 0x1 0x00000000>; + numa-node-id = <2>; + }; + memory@21 { + device_type = "memory"; + /* 2560GiB - 2564GiB (4GiB) */ + reg = <0x00000280 0x00000000 0x1 0x00000000>; + numa-node-id = <2>; + }; + /* NUMA Node-3 */ + memory@30 { + device_type = "memory"; + /* 3072GiB - 3076GiB (4GiB) */ + reg = <0x00000300 0x00000000 0x1 0x00000000>; + numa-node-id = <3>; + }; + memory@31 { + device_type = "memory"; + /* 3584GiB - 3588GiB (4GiB) */ + reg = <0x00000380 0x00000000 0x1 0x00000000>; + numa-node-id = <3>; + }; + /* NUMA Node-4 */ + memory@40 { + device_type = "memory"; + /* 4096GiB - 4100GiB (4GiB) */ + reg = <0x00000400 0x00000000 0x1 0x00000000>; + numa-node-id = <4>; + }; + memory@41 { + device_type = "memory"; + /* 4608GiB - 4612GiB (4GiB) */ + reg = <0x00000480 0x00000000 0x1 0x00000000>; + numa-node-id = <4>; + }; + /* NUMA Node-5 */ + memory@50 { + device_type = "memory"; + /* 5120GiB - 5124GiB (4GiB) */ + reg = <0x00000500 0x00000000 0x1 0x00000000>; + numa-node-id = <5>; + }; + memory@51 { + device_type = "memory"; + /* 5632GiB - 5636GiB (4GiB) */ + reg = <0x00000580 0x00000000 0x1 0x00000000>; + numa-node-id = <5>; + }; + /* NUMA Node-6 */ + memory@60 { + device_type = "memory"; + /* 6144GiB - 6148GiB (4GiB) */ + reg = <0x00000600 0x00000000 0x1 0x00000000>; + numa-node-id = <6>; + }; + memory@61 { + device_type = "memory"; + /* 6656GiB - 6660GiB (4GiB) */ + reg = <0x00000680 0x00000000 0x1 0x00000000>; + numa-node-id = <6>; + }; + /* NUMA Node-7 */ + memory@70 { + device_type = "memory"; + /* 7168GiB - 7172GiB (4GiB) */ + reg = <0x00000700 0x00000000 0x1 0x00000000>; + numa-node-id = <7>; + }; + memory@71 { + device_type = "memory"; + /* 7680GiB - 7684GiB (4GiB) */ + reg = <0x00000780 0x00000000 0x1 0x00000000>; + numa-node-id = <7>; + }; + +}; + +&uart1 { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/phytium/ft2000plus-SR-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2000plus-SR-psci-soc.dtsi new file mode 100644 index 000000000000..687df1601f3e --- /dev/null +++ b/arch/arm64/boot/dts/phytium/ft2000plus-SR-psci-soc.dtsi @@ -0,0 +1,986 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * dts file for FT-2000plus SoC + * + * Copyright (C) 2018-2019, Phytium Technology Co., Ltd. + */ + +#include + +/ { + compatible = "phytium,ft2000plus"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + cpu_suspend = <0xc4000001>; + cpu_off = <0x84000002>; + cpu_on = <0xc4000003>; + sys_poweroff = <0x84000008>; + sys_reset = <0x84000009>; + }; + + cpus { + #address-cells = <0x2>; + #size-cells = <0x0>; + + cpu-map { + cluster0 { + core0 { + cpu = <&cpu0>; + }; + core1 { + cpu = <&cpu1>; + }; + core2 { + cpu = <&cpu2>; + }; + core3 { + cpu = <&cpu3>; + }; + }; + + cluster1 { + core0 { + cpu = <&cpu4>; + }; + core1 { + cpu = <&cpu5>; + }; + core2 { + cpu = <&cpu6>; + }; + core3 { + cpu = <&cpu7>; + }; + }; + + cluster2 { + core0 { + cpu = <&cpu8>; + }; + core1 { + cpu = <&cpu9>; + }; + core2 { + cpu = <&cpu10>; + }; + core3 { + cpu = <&cpu11>; + }; + }; + + cluster3 { + core0 { + cpu = <&cpu12>; + }; + core1 { + cpu = <&cpu13>; + }; + core2 { + cpu = <&cpu14>; + }; + core3 { + cpu = <&cpu15>; + }; + }; + + cluster4 { + core0 { + cpu = <&cpu16>; + }; + core1 { + cpu = <&cpu17>; + }; + core2 { + cpu = <&cpu18>; + }; + core3 { + cpu = <&cpu19>; + }; + }; + + cluster5 { + core0 { + cpu = <&cpu20>; + }; + core1 { + cpu = <&cpu21>; + }; + core2 { + cpu = <&cpu22>; + }; + core3 { + cpu = <&cpu23>; + }; + }; + + cluster6 { + core0 { + cpu = <&cpu24>; + }; + core1 { + cpu = <&cpu25>; + }; + core2 { + cpu = <&cpu26>; + }; + core3 { + cpu = <&cpu27>; + }; + }; + + cluster7 { + core0 { + cpu = <&cpu28>; + }; + core1 { + cpu = <&cpu29>; + }; + core2 { + cpu = <&cpu30>; + }; + core3 { + cpu = <&cpu31>; + }; + }; + + cluster8 { + core0 { + cpu = <&cpu32>; + }; + core1 { + cpu = <&cpu33>; + }; + core2 { + cpu = <&cpu34>; + }; + core3 { + cpu = <&cpu35>; + }; + }; + + cluster9 { + core0 { + cpu = <&cpu36>; + }; + core1 { + cpu = <&cpu37>; + }; + core2 { + cpu = <&cpu38>; + }; + core3 { + cpu = <&cpu39>; + }; + }; + + cluster10 { + core0 { + cpu = <&cpu40>; + }; + core1 { + cpu = <&cpu41>; + }; + core2 { + cpu = <&cpu42>; + }; + core3 { + cpu = <&cpu43>; + }; + }; + + cluster11 { + core0 { + cpu = <&cpu44>; + }; + core1 { + cpu = <&cpu45>; + }; + core2 { + cpu = <&cpu46>; + }; + core3 { + cpu = <&cpu47>; + }; + }; + + cluster12 { + core0 { + cpu = <&cpu48>; + }; + core1 { + cpu = <&cpu49>; + }; + core2 { + cpu = <&cpu50>; + }; + core3 { + cpu = <&cpu51>; + }; + }; + + cluster13 { + core0 { + cpu = <&cpu52>; + }; + core1 { + cpu = <&cpu53>; + }; + core2 { + cpu = <&cpu54>; + }; + core3 { + cpu = <&cpu55>; + }; + }; + + cluster14 { + core0 { + cpu = <&cpu56>; + }; + core1 { + cpu = <&cpu57>; + }; + core2 { + cpu = <&cpu58>; + }; + core3 { + cpu = <&cpu59>; + }; + }; + + cluster15 { + core0 { + cpu = <&cpu60>; + }; + core1 { + cpu = <&cpu61>; + }; + core2 { + cpu = <&cpu62>; + }; + core3 { + cpu = <&cpu63>; + }; + }; + }; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + numa-node-id = <0>; + }; + + cpu1: cpu@1 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "psci"; + numa-node-id = <0>; + }; + + cpu2: cpu@2 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x2>; + enable-method = "psci"; + numa-node-id = <0>; + }; + + cpu3: cpu@3 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x3>; + enable-method = "psci"; + numa-node-id = <0>; + }; + + cpu4: cpu@100 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + numa-node-id = <0>; + }; + + cpu5: cpu@101 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x101>; + enable-method = "psci"; + numa-node-id = <0>; + }; + + cpu6: cpu@102 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x102>; + enable-method = "psci"; + numa-node-id = <0>; + }; + + cpu7: cpu@103 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x103>; + enable-method = "psci"; + numa-node-id = <0>; + }; + + cpu8: cpu@200 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x200>; + enable-method = "psci"; + numa-node-id = <1>; + }; + + cpu9: cpu@201 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x201>; + enable-method = "psci"; + numa-node-id = <1>; + }; + + cpu10: cpu@202 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x202>; + enable-method = "psci"; + numa-node-id = <1>; + }; + + cpu11: cpu@203 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x203>; + enable-method = "psci"; + numa-node-id = <1>; + }; + + cpu12: cpu@300 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x300>; + enable-method = "psci"; + numa-node-id = <1>; + }; + + cpu13: cpu@301 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x301>; + enable-method = "psci"; + numa-node-id = <1>; + }; + + cpu14: cpu@302 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x302>; + enable-method = "psci"; + numa-node-id = <1>; + }; + + cpu15: cpu@303 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x303>; + enable-method = "psci"; + numa-node-id = <1>; + }; + + cpu16: cpu@400 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x400>; + enable-method = "psci"; + numa-node-id = <2>; + }; + + cpu17: cpu@401 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x401>; + enable-method = "psci"; + numa-node-id = <2>; + }; + + cpu18: cpu@402 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x402>; + enable-method = "psci"; + numa-node-id = <2>; + }; + + cpu19: cpu@403 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x403>; + enable-method = "psci"; + numa-node-id = <2>; + }; + + cpu20: cpu@500 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x500>; + enable-method = "psci"; + numa-node-id = <2>; + }; + + cpu21: cpu@501 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x501>; + enable-method = "psci"; + numa-node-id = <2>; + }; + + cpu22: cpu@502 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x502>; + enable-method = "psci"; + numa-node-id = <2>; + }; + + cpu23: cpu@503 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x503>; + enable-method = "psci"; + numa-node-id = <2>; + }; + + cpu24: cpu@600 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x600>; + enable-method = "psci"; + numa-node-id = <3>; + }; + + cpu25: cpu@601 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x601>; + enable-method = "psci"; + numa-node-id = <3>; + }; + + cpu26: cpu@602 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x602>; + enable-method = "psci"; + numa-node-id = <3>; + }; + + cpu27: cpu@603 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x603>; + enable-method = "psci"; + numa-node-id = <3>; + }; + + cpu28: cpu@700 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x700>; + enable-method = "psci"; + numa-node-id = <3>; + }; + + cpu29: cpu@701 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x701>; + enable-method = "psci"; + numa-node-id = <3>; + }; + + cpu30: cpu@702 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x702>; + enable-method = "psci"; + numa-node-id = <3>; + }; + + cpu31: cpu@703 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x703>; + enable-method = "psci"; + numa-node-id = <3>; + }; + + cpu32: cpu@800 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x800>; + enable-method = "psci"; + numa-node-id = <4>; + }; + + cpu33: cpu@801 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x801>; + enable-method = "psci"; + numa-node-id = <4>; + }; + + cpu34: cpu@802 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x802>; + enable-method = "psci"; + numa-node-id = <4>; + }; + + cpu35: cpu@803 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x803>; + enable-method = "psci"; + numa-node-id = <4>; + }; + + cpu36: cpu@900 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x900>; + enable-method = "psci"; + numa-node-id = <4>; + }; + + cpu37: cpu@901 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x901>; + enable-method = "psci"; + numa-node-id = <4>; + }; + + cpu38: cpu@902 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x902>; + enable-method = "psci"; + numa-node-id = <4>; + }; + + cpu39: cpu@903 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x903>; + enable-method = "psci"; + numa-node-id = <4>; + }; + + cpu40: cpu@a00 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xa00>; + enable-method = "psci"; + numa-node-id = <5>; + }; + + cpu41: cpu@a01 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xa01>; + enable-method = "psci"; + numa-node-id = <5>; + }; + + cpu42: cpu@a02 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xa02>; + enable-method = "psci"; + numa-node-id = <5>; + }; + + cpu43: cpu@a03 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xa03>; + enable-method = "psci"; + numa-node-id = <5>; + }; + + cpu44: cpu@b00 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xb00>; + enable-method = "psci"; + numa-node-id = <5>; + }; + + cpu45: cpu@b01 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xb01>; + enable-method = "psci"; + numa-node-id = <5>; + }; + + cpu46: cpu@b02 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xb02>; + enable-method = "psci"; + numa-node-id = <5>; + }; + + cpu47: cpu@b03 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xb03>; + enable-method = "psci"; + numa-node-id = <5>; + }; + + cpu48: cpu@c00 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xc00>; + enable-method = "psci"; + numa-node-id = <6>; + }; + + cpu49: cpu@c01 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xc01>; + enable-method = "psci"; + numa-node-id = <6>; + }; + + cpu50: cpu@c02 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xc02>; + enable-method = "psci"; + numa-node-id = <6>; + }; + + cpu51: cpu@c03 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xc03>; + enable-method = "psci"; + numa-node-id = <6>; + }; + + cpu52: cpu@d00 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xd00>; + enable-method = "psci"; + numa-node-id = <6>; + }; + + cpu53: cpu@d01 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xd01>; + enable-method = "psci"; + numa-node-id = <6>; + }; + + cpu54: cpu@d02 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xd02>; + enable-method = "psci"; + numa-node-id = <6>; + }; + + cpu55: cpu@d03 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xd03>; + enable-method = "psci"; + numa-node-id = <6>; + }; + + cpu56: cpu@e00 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xe00>; + enable-method = "psci"; + numa-node-id = <7>; + }; + + cpu57: cpu@e01 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xe01>; + enable-method = "psci"; + numa-node-id = <7>; + }; + + cpu58: cpu@e02 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xe02>; + enable-method = "psci"; + numa-node-id = <7>; + }; + + cpu59: cpu@e03 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xe03>; + enable-method = "psci"; + numa-node-id = <7>; + }; + + cpu60: cpu@f00 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xf00>; + enable-method = "psci"; + numa-node-id = <7>; + }; + + cpu61: cpu@f01 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xf01>; + enable-method = "psci"; + numa-node-id = <7>; + }; + + cpu62: cpu@f02 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xf02>; + enable-method = "psci"; + numa-node-id = <7>; + }; + + cpu63: cpu@f03 { + device_type = "cpu"; + compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xf03>; + enable-method = "psci"; + numa-node-id = <7>; + }; + }; + + distance-map { + compatible = "numa-distance-map-v1"; + distance-matrix = <0x0 0x0 0x0a>, + <0x0 0x1 0x14>, + <0x0 0x2 0x28>, + <0x0 0x3 0x1e>, + <0x0 0x4 0x14>, + <0x0 0x5 0x1e>, + <0x0 0x6 0x32>, + <0x0 0x7 0x28>, + <0x1 0x0 0x14>, + <0x1 0x1 0x0a>, + <0x1 0x2 0x1e>, + <0x1 0x3 0x14>, + <0x1 0x4 0x1e>, + <0x1 0x5 0x14>, + <0x1 0x6 0x28>, + <0x1 0x7 0x1e>, + <0x2 0x0 0x28>, + <0x2 0x1 0x1e>, + <0x2 0x2 0x0a>, + <0x2 0x3 0x14>, + <0x2 0x4 0x32>, + <0x2 0x5 0x28>, + <0x2 0x6 0x14>, + <0x2 0x7 0x1e>, + <0x3 0x0 0x1e>, + <0x3 0x1 0x14>, + <0x3 0x2 0x14>, + <0x3 0x3 0x0a>, + <0x3 0x4 0x28>, + <0x3 0x5 0x1e>, + <0x3 0x6 0x1e>, + <0x3 0x7 0x14>, + <0x4 0x0 0x14>, + <0x4 0x1 0x1e>, + <0x4 0x2 0x32>, + <0x4 0x3 0x28>, + <0x4 0x4 0x0a>, + <0x4 0x5 0x14>, + <0x4 0x6 0x28>, + <0x4 0x7 0x1e>, + <0x5 0x0 0x1e>, + <0x5 0x1 0x14>, + <0x5 0x2 0x28>, + <0x5 0x3 0x1e>, + <0x5 0x4 0x14>, + <0x5 0x5 0x0a>, + <0x5 0x6 0x1e>, + <0x5 0x7 0x14>, + <0x6 0x0 0x32>, + <0x6 0x1 0x28>, + <0x6 0x2 0x14>, + <0x6 0x3 0x1e>, + <0x6 0x4 0x28>, + <0x6 0x5 0x1e>, + <0x6 0x6 0x0a>, + <0x6 0x7 0x14>, + <0x7 0x0 0x28>, + <0x7 0x1 0x1e>, + <0x7 0x2 0x1e>, + <0x7 0x3 0x14>, + <0x7 0x4 0x1e>, + <0x7 0x5 0x14>, + <0x7 0x6 0x14>, + <0x7 0x7 0x0a>; + }; + + + gic: interrupt-controller@8002a000000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <3>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + interrupt-controller; + reg = <0x0800 0x2a000000 0 0x10000>, /* GICD */ + <0x0800 0x2a800000 0 0x800000>, /* GICR */ + <0x0800 0x29c00000 0 0x10000>, /* GICC */ + <0x0800 0x29c10000 0 0x10000>, /* GICH */ + <0x0800 0x29c20000 0 0x10000>; /* GICV */ + interrupts = ; + + its: gic-its@8002a020000 { + compatible = "arm,gic-v3-its"; + msi-controller; + reg = <0x0800 0x2a020000 0x0 0x20000>; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + clock-frequency = <50000000>; + }; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + dma-coherent; + ranges; + + uart0: serial@28000000 { + compatible = "snps,dw-apb-uart"; + reg = <0x800 0x28000000 0x0 0x1000>; + clock-frequency = <50000000>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart1: serial@28001000 { + compatible = "snps,dw-apb-uart"; + reg = <0x800 0x28001000 0x0 0x1000>; + clock-frequency = <50000000>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + gpio0:gpio@80028006000 { + compatible = "snps,dw-apb-gpio"; + reg = <0x800 0x28006000 0x0 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + status = "ok"; + + gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <0x2>; + snps,nr-gpios = <0x8>; + reg = <0x0>; + }; + + gpio-controller@1 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <0x2>; + snps,nr-gpios = <0x8>; + reg = <0x1>; + }; + + gpio-controller@2 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <0x2>; + snps,nr-gpios = <0x8>; + reg = <0x2>; + }; + + gpio-controller@3 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <0x2>; + snps,nr-gpios = <0x8>; + reg = <0x3>; + }; + }; + + i2c0: i2c@80028002000 { + compatible = "snps,designware-i2c"; + reg = <0x800 0x28002000 0x0 0x1000>; + interrupts = ; + clock-frequency = <100000>; + status = "ok"; + }; + + i2c1: i2c@80028003000 { + compatible = "snps,designware-i2c"; + reg = <0x800 0x28003000 0x0 0x1000>; + interrupts = ; + clock-frequency = <100000>; + status = "ok"; + }; + + pcie0: peu0-c0 { + compatible = "pci-host-ecam-generic"; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + reg = <0x800 0x40000000 0 0x10000000>; + msi-parent = <&its>; + bus-range = <0 0xff>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 GIC_SPI 0x33 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x2 &gic 0x0 0x0 GIC_SPI 0x34 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x3 &gic 0x0 0x0 GIC_SPI 0x35 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x4 &gic 0x0 0x0 GIC_SPI 0x36 IRQ_TYPE_LEVEL_HIGH>; + ranges = <0x01000000 0x00 0x00000000 0x800 0x50000000 0x00 0x00f00000>, + <0x02000000 0x00 0x60000000 0x800 0x60000000 0x00 0x20000000>, + <0x03000000 0x20 0x00000000 0x820 0x00000000 0x20 0x00000000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/phytium/ft2004-devboard-d4-dsk.dts b/arch/arm64/boot/dts/phytium/ft2004-devboard-d4-dsk.dts new file mode 100644 index 000000000000..5bef2e886292 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/ft2004-devboard-d4-dsk.dts @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DTS file for phytium FT-2000/4 devboard (FT-2000/4-D4-DSK series) + * + * Copyright (C) 2018-2019, Phytium Technology Co., Ltd. + */ + +/dts-v1/; +/memreserve/ 0x80000000 0x10000; + +#include "ft2004-generic-psci-soc.dtsi" + +/{ + model = "FT-2000/4-D4-DSK Development Board"; + compatible = "phytium,ft-2004"; + #address-cells = <2>; + #size-cells = <2>; + + chosen { + stdout-path = "uart1:115200n8"; + }; + + memory@00{ + device_type = "memory"; + reg = <0x0 0x80000000 0x1 0x00000000>; + }; + + memory@01{ + device_type = "memory"; + reg = <0x20 0x00000000 0x1 0x00000000>; + }; + + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; +}; + +&rtc0 { + status = "ok"; +}; + +&uart1 { + status = "ok"; +}; + +&gmac0 { + status = "ok"; + phy-mode = "rgmii-rxid"; +}; + +&gmac1 { + status = "ok"; + phy-mode = "rgmii-rxid"; +}; + +&spi0 { + status = "ok"; +}; + +&qspi { + status = "ok"; +}; + +&i2c0 { + status = "ok"; +}; + +&i2c1 { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/phytium/ft2004-generic-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2004-generic-psci-soc.dtsi new file mode 100644 index 000000000000..a6451654e82f --- /dev/null +++ b/arch/arm64/boot/dts/phytium/ft2004-generic-psci-soc.dtsi @@ -0,0 +1,474 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * dts file for FT-2000/4 SoC + * + * Copyright (C) 2018-2019, Phytium Technology Co., Ltd. + */ + +#include + +/ { + compatible = "phytium,ft2004"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + aliases { + ethernet0 = &gmac0; + ethernet1 = &gmac1; + }; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + cpu_suspend = <0xc4000001>; + cpu_off = <0x84000002>; + cpu_on = <0xc4000003>; + sys_poweroff = <0x84000008>; + sys_reset = <0x84000009>; + }; + + cpus { + #address-cells = <0x2>; + #size-cells = <0x0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + numa-node-id = <0>; + clocks = <&scpi_dvfs 0>; + }; + + cpu1: cpu@1 { + device_type = "cpu"; + compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "psci"; + numa-node-id = <0>; + clocks = <&scpi_dvfs 0>; + }; + + cpu2: cpu@100 { + device_type = "cpu"; + compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + numa-node-id = <0>; + clocks = <&scpi_dvfs 1>; + }; + + cpu3: cpu@101 { + device_type = "cpu"; + compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x101>; + enable-method = "psci"; + numa-node-id = <0>; + clocks = <&scpi_dvfs 1>; + }; + }; + + gic: interrupt-controller@29900000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <3>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + interrupt-controller; + reg = <0x0 0x29900000 0 0x20000>, /* GICD */ + <0x0 0x29980000 0 0x80000>, /* GICR */ + <0x0 0x29c00000 0 0x10000>, /* GICC */ + <0x0 0x29c10000 0 0x10000>, /* GICH */ + <0x0 0x29c20000 0 0x10000>; /* GICV */ + interrupts = ; + + its: gic-its@29920000 { + compatible = "arm,gic-v3-its"; + msi-controller; + reg = <0x0 0x29920000 0x0 0x20000>; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + clock-frequency = <48000000>; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = ; + }; + + clocks { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + clk250mhz: clk250mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <250000000>; + }; + + sysclk_48mhz: clk48mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <48000000>; + }; + + sysclk_600mhz: clk600mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <600000000>; + }; + }; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + dma-coherent; + ranges; + + gpio0: gpio@28004000 { + compatible = "phytium,gpio"; + reg = <0x0 0x28004000 0x0 0x1000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + + porta { + compatible = "phytium,gpio-port"; + reg = <0>; + nr-gpios = <8>; + }; + + portb { + compatible = "phytium,gpio-port"; + reg = <1>; + nr-gpios = <8>; + }; + }; + + gpio1: gpio@28005000 { + compatible = "phytium,gpio"; + reg = <0x0 0x28005000 0x0 0x1000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + + porta { + compatible = "phytium,gpio-port"; + reg = <0>; + nr-gpios = <8>; + }; + + portb { + compatible = "phytium,gpio-port"; + reg = <1>; + nr-gpios = <8>; + }; + }; + + uart0: uart@28000000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28000000 0x0 0x1000>; + baud = <115200>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = ; + clocks = <&sysclk_48mhz &sysclk_48mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + uart1: uart@28001000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28001000 0x0 0x1000>; + baud = <115200>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = ; + clocks = <&sysclk_48mhz &sysclk_48mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + uart2: uart@28002000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28002000 0x0 0x1000>; + baud = <115200>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = ; + clocks = <&sysclk_48mhz &sysclk_48mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + uart3: uart@28003000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28003000 0x0 0x1000>; + baud = <115200>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = ; + clocks = <&sysclk_48mhz &sysclk_48mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + sdci: sdci@28207c00 { + compatible = "phytium,sdci"; + reg = <0x0 0x28207c00 0x0 0x100>; + interrupts = , + , + ; + clocks = <&sysclk_600mhz &sysclk_600mhz>; + clock-names = "phytium_sdc_clk"; + no-sdio; + no-mmc; + no-dma-coherent; + }; + + watchdog0: watchdog@2800a000 { + compatible = "arm,sbsa-gwdt"; + reg = <0x0 0x2800b000 0x0 0x1000>, + <0x0 0x2800a000 0x0 0x1000>; + interrupts = ; + timeout-sec = <30>; + }; + + watchdog1: watchdog@28016000 { + compatible = "arm,sbsa-gwdt"; + reg = <0x0 0x28017000 0x0 0x1000>, + <0x0 0x28016000 0x0 0x1000>; + interrupts = ; + timeout-sec = <30>; + }; + + rtc0: rtc@2800d000 { + compatible = "phytium,rtc"; + reg = <0x0 0x2800d000 0x0 0x1000>; + clocks = <&sysclk_48mhz>; + clock-names = "rtc_pclk"; + interrupts = ; + status = "disabled"; + }; + + i2c0: i2c@28006000 { + compatible = "snps,designware-i2c"; + reg = <0x0 0x28006000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + status = "disabled"; + }; + + i2c1: i2c@28007000 { + compatible = "snps,designware-i2c"; + reg = <0x0 0x28007000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + status = "disabled"; + }; + + i2c2: i2c@28008000 { + compatible = "snps,designware-i2c"; + reg = <0x0 0x28008000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + status = "disabled"; + }; + + i2c3: i2c@28009000 { + compatible = "snps,designware-i2c"; + reg = <0x0 0x28009000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + status = "disabled"; + }; + + spi0: spi@2800c000 { + compatible = "phytium,spi"; + interrupts = ; + reg = <0x0 0x2800c000 0x0 0x1000>; + clocks = <&sysclk_48mhz>; + num-cs = <4>; + }; + + spi1: spi@28013000 { + compatible = "phytium,spi"; + interrupts = ; + reg = <0x0 0x28013000 0x0 0x1000>; + clocks = <&sysclk_48mhz>; + num-cs = <4>; + }; + + qspi: qspi@28014000 { + compatible = "phytium,qspi"; + reg = <0x0 0x28014000 0x0 0x1000>, + <0x0 0x0 0x0 0x02000000>; + reg-names = "qspi", "qspi_mm"; + clocks = <&sysclk_600mhz>; + + flash@0 { + spi-rx-bus-width = <1>; + spi-max-frequency = <600000000>; + }; + }; + + pcie: pcie { + compatible = "pci-host-ecam-generic"; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + reg = <0x0 0x40000000 0x0 0x10000000>; + msi-parent = <&its>; + bus-range = <0x0 0xff>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x2 &gic 0x0 0x0 GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x3 &gic 0x0 0x0 GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x4 &gic 0x0 0x0 GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>; + ranges = <0x01000000 0x00 0x00000000 0x0 0x50000000 0x0 0x00f00000>, + <0x02000000 0x00 0x58000000 0x0 0x58000000 0x0 0x28000000>, + <0x03000000 0x10 0x00000000 0x10 0x00000000 0x10 0x00000000>; + }; + + phytium_axi_setup: stmmac-axi-config { + snps,wr_osr_lmt = <0>; + snps,rd_osr_lmt = <0>; + snps,blen = <0 0 0 0 16 8 4>; + }; + + gmac0: eth@2820c000 { + compatible = "snps,dwmac"; + reg = <0x0 0x2820c000 0x0 0x2000>; + interrupts = ; + interrupt-names = "macirq"; + clocks = <&clk250mhz>; + clock-names = "stmmaceth"; + status = "disabled"; + + snps,pbl = <16>; + snps,fixed-burst; + snps,axi-config = <&phytium_axi_setup>; + snps,force_sf_dma_mode; + snps,multicast-filter-bins = <64>; + snps,perfect-filter-entries = <128>; + tx-fifo-depth = <4096>; + rx-fifo-depth = <4096>; + max-frame-size = <9000>; + }; + + gmac1: eth@28210000 { + compatible = "snps,dwmac"; + reg = <0x0 0x28210000 0x0 0x2000>; + interrupts = ; + interrupt-names = "macirq"; + clocks = <&clk250mhz>; + clock-names = "stmmaceth"; + status = "disabled"; + + snps,pbl = <16>; + snps,fixed-burst; + snps,axi-config = <&phytium_axi_setup>; + snps,force_sf_dma_mode; + snps,multicast-filter-bins = <64>; + snps,perfect-filter-entries = <128>; + snps,rx-queues-to-use = <2>; + tx-fifo-depth = <4096>; + rx-fifo-depth = <4096>; + max-frame-size = <9000>; + }; + + can0: can@28207000 { + compatible = "phytium,can"; + reg = <0x0 0x28207000 0x0 0x400>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "phytium_can_clk"; + tx-fifo-depth = <0x40>; + rx-fifo-depth = <0x40>; + }; + + can1: can@28207400 { + compatible = "phytium,can"; + reg = <0x0 0x28207400 0x0 0x400>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "phytium_can_clk"; + tx-fifo-depth = <0x40>; + rx-fifo-depth = <0x40>; + }; + + can2: can@028207800 { + compatible = "phytium,can"; + reg = <0x0 0x28207800 0x0 0x400>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "phytium_can_clk"; + tx-fifo-depth = <0x40>; + rx-fifo-depth = <0x40>; + }; + + hda: hda@28206000 { + compatible = "phytium,hda"; + reg = <0 0x28206000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + clock-names = "phytium_hda_clk"; + }; + + mbox: mailbox@2a000000 { + compatible = "phytium,mbox"; + reg = <0x0 0x2a000000 0x0 0x1000>; + interrupts = ; + #mbox-cells = <1>; + clocks = <&sysclk_48mhz>; + clock-names = "apb_pclk"; + }; + + sram: sram@2a006000 { + compatible = "phytium,ft2004-sram-ns","mmio-sram"; + reg = <0x0 0x2a006000 0x0 0x2000>; + + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x0 0x2a006000 0x2000>; + + scpi_lpri: scpi-shmem@0 { + compatible = "phytium,ft2004-scpi-shmem"; + reg = <0x1000 0x800>; + }; + }; + + scpi_protocol: scpi { + compatible = "arm,scpi"; + mboxes = <&mbox 0>; + shmem = <&scpi_lpri>; + + clocks { + compatible = "arm,scpi-clocks"; + + scpi_dvfs: scpi_clocks@0 { + compatible = "arm,scpi-dvfs-clocks"; + #clock-cells = <1>; + clock-indices = <0>, <1>; + clock-output-names = "c0", "c1"; + }; + }; + + scpi_sensors: sensors { + compatible = "arm,scpi-sensors"; + #thermal-sensor-cells = <1>; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/phytium/ft2004c-devboard-dsk.dts b/arch/arm64/boot/dts/phytium/ft2004c-devboard-dsk.dts new file mode 100644 index 000000000000..d8f9c49d7aeb --- /dev/null +++ b/arch/arm64/boot/dts/phytium/ft2004c-devboard-dsk.dts @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DTS file for Phytium D2000 devboard + * + * Copyright (C) 2020, Phytium Technology Co., Ltd. + */ + +/dts-v1/; +/memreserve/ 0x80000000 0x10000; + +#include "ft2004c-generic-psci-soc.dtsi" + +/{ + model = "FT-2000/4C Development Board"; + compatible = "phytium,ft-2004c"; + #address-cells = <2>; + #size-cells = <2>; + + chosen { + stdout-path = "uart1:115200n8"; + }; + + memory@00{ + device_type = "memory"; + /* 4GiB-64MiB ~ 4GiB is reserved for PBF runtime */ + reg = <0x0 0x80000000 0x0 0x7c000000>; + }; + + memory@01{ + device_type = "memory"; + reg = <0x20 0x00000000 0x1 0x00000000>; + }; + + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; +}; + +&rtc0 { + status = "ok"; +}; + +&uart1 { + status = "ok"; +}; + +&gmac0 { + status = "ok"; + phy-mode = "rgmii-txid"; +}; + +&gmac1 { + status = "ok"; + phy-mode = "rgmii-txid"; +}; + +&spi0 { + status = "ok"; +}; + +&qspi { + status = "ok"; +}; + +&i2c0 { + status = "ok"; +}; + +&i2c1 { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/phytium/ft2004c-generic-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2004c-generic-psci-soc.dtsi new file mode 100644 index 000000000000..9b09de80abf0 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/ft2004c-generic-psci-soc.dtsi @@ -0,0 +1,486 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * dts file for Phytium D2000 SoC + * + * Copyright (C) 2020, Phytium Technology Co., Ltd. + */ + +#include + +/ { + compatible = "phytium,ft2004c"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + aliases { + ethernet0 = &gmac0; + ethernet1 = &gmac1; + }; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + cpu_suspend = <0xc4000001>; + cpu_off = <0x84000002>; + cpu_on = <0xc4000003>; + sys_poweroff = <0x84000008>; + sys_reset = <0x84000009>; + }; + + cpus { + #address-cells = <0x2>; + #size-cells = <0x0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + numa-node-id = <0>; + clocks = <&scpi_dvfs 0>; + }; + + cpu1: cpu@1 { + device_type = "cpu"; + compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "psci"; + numa-node-id = <0>; + clocks = <&scpi_dvfs 0>; + }; + + cpu2: cpu@100 { + device_type = "cpu"; + compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + numa-node-id = <0>; + clocks = <&scpi_dvfs 1>; + }; + + cpu3: cpu@101 { + device_type = "cpu"; + compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x101>; + enable-method = "psci"; + numa-node-id = <0>; + clocks = <&scpi_dvfs 1>; + }; + }; + + gic: interrupt-controller@29900000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <3>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + interrupt-controller; + reg = <0x0 0x29a00000 0 0x20000>, /* GICD */ + <0x0 0x29b00000 0 0x100000>, /* GICR */ + <0x0 0x29c00000 0 0x10000>, /* GICC */ + <0x0 0x29c10000 0 0x10000>, /* GICH */ + <0x0 0x29c20000 0 0x10000>; /* GICV */ + interrupts = ; + + its: gic-its@29920000 { + compatible = "arm,gic-v3-its"; + msi-controller; + reg = <0x0 0x29a20000 0x0 0x20000>; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + clock-frequency = <48000000>; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = ; + }; + + clocks { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + clk250mhz: clk250mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <250000000>; + }; + + sysclk_48mhz: clk48mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <48000000>; + }; + + sysclk_600mhz: clk600mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <600000000>; + }; + }; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + dma-coherent; + ranges; + + gpio0: gpio@28004000 { + compatible = "phytium,gpio"; + reg = <0x0 0x28004000 0x0 0x1000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + + porta { + compatible = "phytium,gpio-port"; + reg = <0>; + nr-gpios = <8>; + }; + + portb { + compatible = "phytium,gpio-port"; + reg = <1>; + nr-gpios = <8>; + }; + }; + + gpio1: gpio@28005000 { + compatible = "phytium,gpio"; + reg = <0x0 0x28005000 0x0 0x1000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + + porta { + compatible = "phytium,gpio-port"; + reg = <0>; + nr-gpios = <8>; + }; + + portb { + compatible = "phytium,gpio-port"; + reg = <1>; + nr-gpios = <8>; + }; + }; + + uart0: uart@28000000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28000000 0x0 0x1000>; + baud = <115200>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = ; + clocks = <&sysclk_48mhz &sysclk_48mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + uart1: uart@28001000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28001000 0x0 0x1000>; + baud = <115200>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = ; + clocks = <&sysclk_48mhz &sysclk_48mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + uart2: uart@28002000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28002000 0x0 0x1000>; + baud = <115200>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = ; + clocks = <&sysclk_48mhz &sysclk_48mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + uart3: uart@28003000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28003000 0x0 0x1000>; + baud = <115200>; + reg-shift = <2>; + reg-io-width = <4>; + interrupts = ; + clocks = <&sysclk_48mhz &sysclk_48mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + sdci: sdci@28207c00 { + compatible = "phytium,sdci"; + reg = <0x0 0x28207c00 0x0 0x100>; + interrupts = , + , + ; + clocks = <&sysclk_600mhz &sysclk_600mhz>; + clock-names = "phytium_sdc_clk"; + no-sdio; + no-mmc; + no-dma-coherent; + }; + + watchdog0: watchdog@2800a000 { + compatible = "arm,sbsa-gwdt"; + reg = <0x0 0x2800b000 0x0 0x1000>, + <0x0 0x2800a000 0x0 0x1000>; + interrupts = ; + timeout-sec = <30>; + }; + + watchdog1: watchdog@28016000 { + compatible = "arm,sbsa-gwdt"; + reg = <0x0 0x28017000 0x0 0x1000>, + <0x0 0x28016000 0x0 0x1000>; + interrupts = ; + timeout-sec = <30>; + }; + + rtc0: rtc@2800d000 { + compatible = "phytium,rtc"; + reg = <0x0 0x2800d000 0x0 0x1000>; + clocks = <&sysclk_48mhz>; + clock-names = "rtc_pclk"; + interrupts = ; + status = "disabled"; + }; + + i2c0: i2c@28006000 { + compatible = "snps,designware-i2c"; + reg = <0x0 0x28006000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + status = "disabled"; + }; + + i2c1: i2c@28007000 { + compatible = "snps,designware-i2c"; + reg = <0x0 0x28007000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + status = "disabled"; + }; + + i2c2: i2c@28008000 { + compatible = "snps,designware-i2c"; + reg = <0x0 0x28008000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + status = "disabled"; + }; + + i2c3: i2c@28009000 { + compatible = "snps,designware-i2c"; + reg = <0x0 0x28009000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + status = "disabled"; + }; + + spi0: spi@2800c000 { + compatible = "phytium,spi"; + interrupts = ; + reg = <0x0 0x2800c000 0x0 0x1000>; + clocks = <&sysclk_48mhz>; + num-cs = <4>; + }; + + spi1: spi@28013000 { + compatible = "phytium,spi"; + interrupts = ; + reg = <0x0 0x28013000 0x0 0x1000>; + clocks = <&sysclk_48mhz>; + num-cs = <4>; + }; + + qspi: qspi@28014000 { + compatible = "phytium,qspi"; + reg = <0x0 0x28014000 0x0 0x1000>, + <0x0 0x0 0x0 0x02000000>; + reg-names = "qspi", "qspi_mm"; + clocks = <&sysclk_600mhz>; + + flash@0 { + spi-rx-bus-width = <1>; + spi-max-frequency = <600000000>; + }; + }; + + phytium_axi_setup: stmmac-axi-config { + snps,wr_osr_lmt = <0>; + snps,rd_osr_lmt = <0>; + snps,blen = <0 0 0 0 16 8 4>; + }; + + gmac0: eth@2820c000 { + compatible = "snps,dwmac"; + reg = <0x0 0x2820c000 0x0 0x2000>; + interrupts = ; + interrupt-names = "macirq"; + clocks = <&clk250mhz>; + clock-names = "stmmaceth"; + status = "disabled"; + + snps,pbl = <16>; + snps,fixed-burst; + snps,axi-config = <&phytium_axi_setup>; + snps,force_sf_dma_mode; + snps,multicast-filter-bins = <64>; + snps,perfect-filter-entries = <128>; + tx-fifo-depth = <4096>; + rx-fifo-depth = <4096>; + max-frame-size = <9000>; + }; + + gmac1: eth@28210000 { + compatible = "snps,dwmac"; + reg = <0x0 0x28210000 0x0 0x2000>; + interrupts = ; + interrupt-names = "macirq"; + clocks = <&clk250mhz>; + clock-names = "stmmaceth"; + status = "disabled"; + + snps,pbl = <16>; + snps,fixed-burst; + snps,axi-config = <&phytium_axi_setup>; + snps,force_sf_dma_mode; + snps,multicast-filter-bins = <64>; + snps,perfect-filter-entries = <128>; + snps,rx-queues-to-use = <2>; + tx-fifo-depth = <4096>; + rx-fifo-depth = <4096>; + max-frame-size = <9000>; + }; + + can0: can@28207000 { + compatible = "phytium,can"; + reg = <0x0 0x28207000 0x0 0x400>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "phytium_can_clk"; + tx-fifo-depth = <0x40>; + rx-fifo-depth = <0x40>; + }; + + can1: can@28207400 { + compatible = "phytium,can"; + reg = <0x0 0x28207400 0x0 0x400>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "phytium_can_clk"; + tx-fifo-depth = <0x40>; + rx-fifo-depth = <0x40>; + }; + + can2: can@028207800 { + compatible = "phytium,can"; + reg = <0x0 0x28207800 0x0 0x400>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "phytium_can_clk"; + tx-fifo-depth = <0x40>; + rx-fifo-depth = <0x40>; + }; + + hda: hda@28206000 { + compatible = "phytium,hda"; + reg = <0 0x28206000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + clock-names = "phytium_hda_clk"; + }; + + mbox: mailbox@2a000000 { + compatible = "phytium,mbox"; + reg = <0x0 0x2a000000 0x0 0x1000>; + interrupts = ; + #mbox-cells = <1>; + clocks = <&sysclk_48mhz>; + clock-names = "apb_pclk"; + }; + + sram: sram@2a006000 { + compatible = "phytium,ft2004-sram-ns","mmio-sram"; + reg = <0x0 0x2a006000 0x0 0x2000>; + + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x0 0x2a006000 0x2000>; + + scpi_lpri: scpi-shmem@0 { + compatible = "phytium,ft2004-scpi-shmem"; + reg = <0x1000 0x800>; + }; + }; + + scpi_protocol: scpi { + compatible = "arm,scpi"; + mboxes = <&mbox 0>; + shmem = <&scpi_lpri>; + + clocks { + compatible = "arm,scpi-clocks"; + + scpi_dvfs: scpi_clocks@0 { + compatible = "arm,scpi-dvfs-clocks"; + #clock-cells = <1>; + clock-indices = <0>, <1>, <2>, <3>; + clock-output-names = "c0", "c1", "c2", "c3"; + }; + }; + + scpi_sensors: sensors { + compatible = "arm,scpi-sensors"; + #thermal-sensor-cells = <1>; + }; + }; + + ixic: interrupt-controller@29000000 { + compatible = "phytium,ft2004c-ixic"; + reg-names = "ctr", "hpb"; + reg = <0x0 0x29000000 0x0 0x00060000>, + <0x0 0x29100000 0x0 0x00002000>; + interrupt-controller; + interrupt-parent = <&gic>; + #interrupt-cells = <3>; + intx-spi-base = <28>; + }; + + pcie: pcie { + compatible = "pci-host-ecam-generic"; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + reg = <0x0 0x40000000 0x0 0x10000000>; + msi-parent = <&its>; + bus-range = <0x0 0xff>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &ixic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x2 &ixic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x3 &ixic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x4 &ixic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>; + ranges = <0x01000000 0x00 0x00000000 0x0 0x50000000 0x0 0x00f00000>, + <0x02000000 0x00 0x58000000 0x0 0x58000000 0x0 0x28000000>, + <0x03000000 0x10 0x00000000 0x10 0x00000000 0x10 0x00000000>; + }; + }; + +}; diff --git a/arch/arm64/configs/d2000_defconfig b/arch/arm64/configs/d2000_defconfig new file mode 100644 index 000000000000..07e50fd7f13b --- /dev/null +++ b/arch/arm64/configs/d2000_defconfig @@ -0,0 +1,6794 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 4.19.155 Kernel Configuration +# + +# +# Compiler: OHOS () clang version 10.0.1.480513 (llvm-project b2cadc87d64786377748bdb6bc5e6949492f01ab) +# +CONFIG_GCC_VERSION=0 +CONFIG_CC_IS_CLANG=y +CONFIG_CLANG_VERSION=100001 +CONFIG_CC_HAS_ASM_GOTO=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_USELIB=y +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_GENERIC_IRQ_MULTI_HANDLER=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_PREEMPT_COUNT=y + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_BUILD_BIN2C=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_CGROUP_PIDS=y +# CONFIG_CGROUP_RDMA is not set +# CONFIG_CGROUP_FREEZER is not set +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_IPC_NS is not set +CONFIG_USER_NS=y +# CONFIG_PID_NS is not set +CONFIG_NET_NS=y +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +# CONFIG_RELAY is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_BPF=y +CONFIG_EXPERT=y +CONFIG_UID16=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +# CONFIG_BPF_SYSCALL is not set +# CONFIG_USERFAULTFD is not set +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_DEBUG_RSEQ is not set +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_MEMCG_SYSFS_ON is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +CONFIG_SLAB_MERGE_DEFAULT=y +# CONFIG_SLAB_FREELIST_RANDOM is not set +# CONFIG_SLAB_FREELIST_HARDENED is not set +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_ARM64=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=12 +CONFIG_ARM64_CONT_SHIFT=4 +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 +CONFIG_ARCH_MMAP_RND_BITS_MAX=33 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ZONE_DMA32=y +CONFIG_HAVE_GENERIC_GUP=y +CONFIG_SMP=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +CONFIG_ARCH_SUNXI=y +CONFIG_ARCH_ALPINE=y +CONFIG_ARCH_BCM2835=y +CONFIG_ARCH_BCM_IPROC=y +CONFIG_ARCH_BERLIN=y +CONFIG_ARCH_BRCMSTB=y +CONFIG_ARCH_EXYNOS=y +CONFIG_ARCH_K3=y +CONFIG_ARCH_LAYERSCAPE=y +CONFIG_ARCH_LG1K=y +CONFIG_ARCH_HISI=y +CONFIG_ARCH_MEDIATEK=y +CONFIG_ARCH_MESON=y +CONFIG_ARCH_MVEBU=y +CONFIG_ARCH_PHYTIUM=y +CONFIG_ARCH_QCOM=y +# CONFIG_ARCH_REALTEK is not set +CONFIG_ARCH_ROCKCHIP=y +CONFIG_ARCH_SEATTLE=y +CONFIG_ARCH_SHMOBILE=y +CONFIG_ARCH_SYNQUACER=y +CONFIG_ARCH_RENESAS=y +CONFIG_ARCH_R8A7795=y +CONFIG_ARCH_R8A7796=y +CONFIG_ARCH_R8A77965=y +CONFIG_ARCH_R8A77970=y +CONFIG_ARCH_R8A77980=y +CONFIG_ARCH_R8A77990=y +CONFIG_ARCH_R8A77995=y +CONFIG_ARCH_STRATIX10=y +CONFIG_ARCH_TEGRA=y +CONFIG_ARCH_SPRD=y +CONFIG_ARCH_THUNDER=y +CONFIG_ARCH_THUNDER2=y +CONFIG_ARCH_UNIPHIER=y +CONFIG_ARCH_VEXPRESS=y +CONFIG_ARCH_XGENE=y +CONFIG_ARCH_ZX=y +CONFIG_ARCH_ZYNQMP=y + +# +# Bus support +# +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +# CONFIG_PCIEPORTBUS is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +# CONFIG_PCI_STUB is not set +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +CONFIG_HOTPLUG_PCI=y +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# +CONFIG_PCI_AARDVARK=y + +# +# Cadence PCIe controllers support +# +# CONFIG_PCIE_CADENCE_HOST is not set +# CONFIG_PCIE_XILINX_NWL is not set +# CONFIG_PCI_FTPCI100 is not set +CONFIG_PCI_TEGRA=y +CONFIG_PCIE_RCAR=y +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +# CONFIG_PCIE_XILINX is not set +CONFIG_PCI_XGENE=y +CONFIG_PCI_XGENE_MSI=y +CONFIG_PCIE_IPROC=y +CONFIG_PCIE_IPROC_PLATFORM=y +CONFIG_PCIE_IPROC_MSI=y +CONFIG_PCI_HOST_THUNDER_PEM=y +CONFIG_PCI_HOST_THUNDER_ECAM=y +CONFIG_PCIE_ROCKCHIP=y +CONFIG_PCIE_ROCKCHIP_HOST=m +# CONFIG_PCIE_MEDIATEK is not set +# CONFIG_PCIE_MOBIVEIL is not set + +# +# DesignWare PCI Core Support +# +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +# CONFIG_PCIE_DW_PLAT_HOST is not set +CONFIG_PCI_LAYERSCAPE=y +CONFIG_PCI_HISI=y +CONFIG_PCIE_QCOM=y +CONFIG_PCIE_ARMADA_8K=y +CONFIG_PCIE_KIRIN=y +CONFIG_PCIE_HISI_STB=y + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +CONFIG_ARM64_ERRATUM_826319=y +CONFIG_ARM64_ERRATUM_827319=y +CONFIG_ARM64_ERRATUM_824069=y +CONFIG_ARM64_ERRATUM_819472=y +CONFIG_ARM64_ERRATUM_832075=y +CONFIG_ARM64_ERRATUM_834220=y +CONFIG_ARM64_ERRATUM_845719=y +CONFIG_ARM64_ERRATUM_843419=y +CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_ARM64_ERRATUM_1463225=y +CONFIG_ARM64_ERRATUM_1542419=y +CONFIG_CAVIUM_ERRATUM_22375=y +CONFIG_CAVIUM_ERRATUM_23144=y +CONFIG_CAVIUM_ERRATUM_23154=y +CONFIG_CAVIUM_ERRATUM_27456=y +CONFIG_CAVIUM_ERRATUM_30115=y +CONFIG_QCOM_FALKOR_ERRATUM_1003=y +CONFIG_QCOM_FALKOR_ERRATUM_1009=y +CONFIG_QCOM_QDF2400_ERRATUM_0065=y +CONFIG_SOCIONEXT_SYNQUACER_PREITS=y +CONFIG_HISILICON_ERRATUM_161600802=y +CONFIG_QCOM_FALKOR_ERRATUM_E1041=y +CONFIG_ARM64_4K_PAGES=y +# CONFIG_ARM64_16K_PAGES is not set +# CONFIG_ARM64_64K_PAGES is not set +# CONFIG_ARM64_VA_BITS_39 is not set +CONFIG_ARM64_VA_BITS_48=y +CONFIG_ARM64_VA_BITS=48 +CONFIG_ARM64_PA_BITS_48=y +CONFIG_ARM64_PA_BITS=48 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SCHED_MC=y +# CONFIG_SCHED_SMT is not set +CONFIG_NR_CPUS=64 +CONFIG_HOTPLUG_CPU=y +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=2 +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_HOLES_IN_ZONE=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_SECCOMP=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_XEN_DOM0=y +CONFIG_XEN=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_HARDEN_BRANCH_PREDICTOR=y +CONFIG_HARDEN_EL2_VECTORS=y +CONFIG_ARM64_SSBD=y +# CONFIG_ARMV8_DEPRECATED is not set +# CONFIG_ARM64_SW_TTBR0_PAN is not set + +# +# ARMv8.1 architectural features +# +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +CONFIG_ARM64_LSE_ATOMICS=y +CONFIG_ARM64_VHE=y + +# +# ARMv8.2 architectural features +# +CONFIG_ARM64_UAO=y +# CONFIG_ARM64_PMEM is not set +CONFIG_ARM64_RAS_EXTN=y +CONFIG_ARM64_SVE=y +CONFIG_ARM64_MODULE_PLTS=y +# CONFIG_RANDOMIZE_BASE is not set + +# +# Boot options +# +CONFIG_CMDLINE="" +# CONFIG_CMDLINE_FORCE is not set +CONFIG_EFI is not set +CONFIG_EFI_STUB=y +CONFIG_COMPAT=y +CONFIG_SYSVIPC_COMPAT=y + +# +# Power management options +# +# CONFIG_SUSPEND is not set +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_PM_GENERIC_DOMAINS_OF=y +CONFIG_CPU_PM=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_DT_IDLE_STATES=y + +# +# ARM CPU Idle Drivers +# +CONFIG_ARM_CPUIDLE=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=m +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y + +# +# CPU frequency scaling drivers +# +CONFIG_CPUFREQ_DT=y +CONFIG_CPUFREQ_DT_PLATDEV=y +CONFIG_ARM_ARMADA_37XX_CPUFREQ=y +CONFIG_ARM_BIG_LITTLE_CPUFREQ=y +# CONFIG_ARM_DT_BL_CPUFREQ is not set +# CONFIG_ARM_SCPI_CPUFREQ is not set +CONFIG_ARM_BRCMSTB_AVS_CPUFREQ=y +# CONFIG_ARM_MEDIATEK_CPUFREQ is not set +# CONFIG_ARM_QCOM_CPUFREQ_KRYO is not set +CONFIG_ARM_TEGRA20_CPUFREQ=y +CONFIG_ARM_TEGRA124_CPUFREQ=y +CONFIG_ARM_TEGRA186_CPUFREQ=y +# CONFIG_QORIQ_CPUFREQ is not set + +# +# Firmware Drivers +# +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_PSCI_CHECKER is not set +# CONFIG_ARM_SCMI_PROTOCOL is not set +#CONFIG_ARM_SCPI_PROTOCOL is not set +#CONFIG_ARM_SCPI_POWER_DOMAIN is not set +# CONFIG_ARM_SDE_INTERFACE is not set +# CONFIG_FIRMWARE_MEMMAP is not set +CONFIG_RASPBERRYPI_FIRMWARE=y +# CONFIG_FW_CFG_SYSFS is not set +CONFIG_QCOM_SCM=y +CONFIG_QCOM_SCM_64=y +# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set +CONFIG_HAVE_ARM_SMCCC=y +# CONFIG_GOOGLE_FIRMWARE is not set +CONFIG_MESON_SM=y + + +#ACPI set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=y + +# +# Tegra firmware driver +# +CONFIG_TEGRA_IVC=y +CONFIG_TEGRA_BPMP=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y +CONFIG_IRQ_BYPASS_MANAGER=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_KVM_ARM_HOST=y +CONFIG_KVM_ARM_PMU=y +CONFIG_KVM_INDIRECT_VECTORS=y +# CONFIG_VHOST_NET is not set +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA256_ARM64=y +CONFIG_CRYPTO_SHA512_ARM64=m +CONFIG_CRYPTO_SHA1_ARM64_CE=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +CONFIG_CRYPTO_SHA512_ARM64_CE=m +CONFIG_CRYPTO_SHA3_ARM64=m +CONFIG_CRYPTO_SM3_ARM64_CE=m +# CONFIG_CRYPTO_SM4_ARM64_CE is not set +CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m +CONFIG_CRYPTO_CRC32_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64=y +CONFIG_CRYPTO_AES_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m +CONFIG_CRYPTO_CHACHA20_NEON=m +CONFIG_CRYPTO_AES_ARM64_BS=m + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +# CONFIG_KPROBES is not set +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_REFCOUNT_FULL=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_BLK_DEV_ZONED is not set +# CONFIG_BLK_DEV_THROTTLING is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +# CONFIG_BLK_WBT is not set +# CONFIG_BLK_CGROUP_IOLATENCY is not set +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_EFI_PARTITION=y +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CFQ_GROUP_IOSCHED is not set +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +# CONFIG_IOSCHED_BFQ is not set +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_ASN1=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_SCRIPT=y +# CONFIG_BINFMT_MISC is not set +CONFIG_COREDUMP=y + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_NO_BOOTMEM=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +# CONFIG_HWPOISON_INJECT is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +CONFIG_CMA_AREAS=7 +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_FRAME_VECTOR=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_NET=y +CONFIG_NET_INGRESS=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_DIAG is not set +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +# CONFIG_TLS is not set +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_INTERFACE is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +# CONFIG_IP_ROUTE_MULTIPATH is not set +# CONFIG_IP_ROUTE_VERBOSE is not set +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +CONFIG_NET_IPIP=y +# CONFIG_NET_IPGRE_DEMUX is not set +CONFIG_NET_IP_TUNNEL=y +# CONFIG_IP_MROUTE is not set +# CONFIG_SYN_COOKIES is not set +CONFIG_NET_IPVTI=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_INET_UDP_DIAG is not set +# CONFIG_INET_RAW_DIAG is not set +# CONFIG_INET_DIAG_DESTROY is not set +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +# CONFIG_IPV6_ROUTER_PREF is not set +# CONFIG_IPV6_OPTIMISTIC_DAD is not set +# CONFIG_INET6_AH is not set +# CONFIG_INET6_ESP is not set +# CONFIG_INET6_IPCOMP is not set +# CONFIG_IPV6_MIP6 is not set +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +# CONFIG_IPV6_VTI is not set +CONFIG_IPV6_SIT=m +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +# CONFIG_IPV6_TUNNEL is not set +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +# CONFIG_NETLABEL is not set +# CONFIG_NETWORK_SECMARK is not set +CONFIG_NET_PTP_CLASSIFY=y +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=y +# CONFIG_NETFILTER_NETLINK_QUEUE is not set +# CONFIG_NETFILTER_NETLINK_LOG is not set +# CONFIG_NETFILTER_NETLINK_OSF is not set +CONFIG_NF_CONNTRACK=y +CONFIG_NF_LOG_COMMON=m +# CONFIG_NF_LOG_NETDEV is not set +# CONFIG_NF_CONNTRACK_MARK is not set +# CONFIG_NF_CONNTRACK_ZONES is not set +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +# CONFIG_NF_CONNTRACK_TIMEOUT is not set +# CONFIG_NF_CONNTRACK_TIMESTAMP is not set +# CONFIG_NF_CONNTRACK_LABELS is not set +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +# CONFIG_NF_CONNTRACK_AMANDA is not set +# CONFIG_NF_CONNTRACK_FTP is not set +# CONFIG_NF_CONNTRACK_H323 is not set +# CONFIG_NF_CONNTRACK_IRC is not set +# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set +# CONFIG_NF_CONNTRACK_SNMP is not set +# CONFIG_NF_CONNTRACK_PPTP is not set +# CONFIG_NF_CONNTRACK_SANE is not set +# CONFIG_NF_CONNTRACK_SIP is not set +# CONFIG_NF_CONNTRACK_TFTP is not set +CONFIG_NF_CT_NETLINK=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +# CONFIG_NF_TABLES is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +# CONFIG_NETFILTER_XT_MARK is not set +# CONFIG_NETFILTER_XT_CONNMARK is not set + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set +# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +# CONFIG_NETFILTER_XT_TARGET_HL is not set +# CONFIG_NETFILTER_XT_TARGET_HMARK is not set +# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set +# CONFIG_NETFILTER_XT_TARGET_LED is not set +CONFIG_NETFILTER_XT_TARGET_LOG=m +# CONFIG_NETFILTER_XT_TARGET_MARK is not set +CONFIG_NETFILTER_XT_NAT=m +# CONFIG_NETFILTER_XT_TARGET_NETMAP is not set +# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set +# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_REDIRECT is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set +# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +# CONFIG_NETFILTER_XT_MATCH_BPF is not set +# CONFIG_NETFILTER_XT_MATCH_CGROUP is not set +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set +# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set +# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set +# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set +# CONFIG_NETFILTER_XT_MATCH_CONNMARK is not set +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ECN is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set +# CONFIG_NETFILTER_XT_MATCH_HELPER is not set +# CONFIG_NETFILTER_XT_MATCH_HL is not set +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set +# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set +# CONFIG_NETFILTER_XT_MATCH_MAC is not set +# CONFIG_NETFILTER_XT_MATCH_MARK is not set +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +# CONFIG_NETFILTER_XT_MATCH_POLICY is not set +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set +# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +# CONFIG_NETFILTER_XT_MATCH_SOCKET is not set +# CONFIG_NETFILTER_XT_MATCH_STATE is not set +# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set +# CONFIG_NETFILTER_XT_MATCH_STRING is not set +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +# CONFIG_NETFILTER_XT_MATCH_TIME is not set +# CONFIG_NETFILTER_XT_MATCH_U32 is not set +# CONFIG_IP_SET is not set +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +# CONFIG_NF_SOCKET_IPV4 is not set +# CONFIG_NF_TPROXY_IPV4 is not set +# CONFIG_NF_DUP_IPV4 is not set +# CONFIG_NF_LOG_ARP is not set +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT_MASQUERADE_IPV4=y +CONFIG_IP_NF_IPTABLES=y +# CONFIG_IP_NF_MATCH_AH is not set +# CONFIG_IP_NF_MATCH_ECN is not set +# CONFIG_IP_NF_MATCH_RPFILTER is not set +# CONFIG_IP_NF_MATCH_TTL is not set +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +# CONFIG_IP_NF_TARGET_SYNPROXY is not set +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +# CONFIG_IP_NF_TARGET_NETMAP is not set +# CONFIG_IP_NF_TARGET_REDIRECT is not set +CONFIG_IP_NF_MANGLE=m +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +# CONFIG_IP_NF_RAW is not set +# CONFIG_IP_NF_SECURITY is not set +CONFIG_IP_NF_ARPTABLES=y +# CONFIG_IP_NF_ARPFILTER is not set +# CONFIG_IP_NF_ARP_MANGLE is not set + +# +# IPv6: Netfilter Configuration +# +# CONFIG_NF_SOCKET_IPV6 is not set +# CONFIG_NF_TPROXY_IPV6 is not set +# CONFIG_NF_DUP_IPV6 is not set +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_NF_NAT_IPV6=m +CONFIG_NF_NAT_MASQUERADE_IPV6=y +CONFIG_IP6_NF_IPTABLES=m +# CONFIG_IP6_NF_MATCH_AH is not set +# CONFIG_IP6_NF_MATCH_EUI64 is not set +# CONFIG_IP6_NF_MATCH_FRAG is not set +# CONFIG_IP6_NF_MATCH_OPTS is not set +# CONFIG_IP6_NF_MATCH_HL is not set +# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set +# CONFIG_IP6_NF_MATCH_MH is not set +# CONFIG_IP6_NF_MATCH_RPFILTER is not set +# CONFIG_IP6_NF_MATCH_RT is not set +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +# CONFIG_IP6_NF_TARGET_SYNPROXY is not set +CONFIG_IP6_NF_MANGLE=m +# CONFIG_IP6_NF_RAW is not set +# CONFIG_IP6_NF_SECURITY is not set +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +# CONFIG_IP6_NF_TARGET_NPT is not set +CONFIG_NF_DEFRAG_IPV6=y +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +# CONFIG_DECNET is not set +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +# CONFIG_IEEE802154 is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +# CONFIG_OPENVSWITCH is not set +# CONFIG_VSOCKETS is not set +# CONFIG_NETLINK_DIAG is not set +# CONFIG_MPLS is not set +# CONFIG_NET_NSH is not set +# CONFIG_HSR is not set +# CONFIG_NET_SWITCHDEV is not set +# CONFIG_NET_L3_MASTER_DEV is not set +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +# CONFIG_CGROUP_NET_PRIO is not set +# CONFIG_CGROUP_NET_CLASSID is not set +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +CONFIG_CAN=y +CONFIG_CAN_RAW=y +CONFIG_CAN_BCM=y +CONFIG_CAN_GW=y + +# +# CAN Device Drivers +# +# CONFIG_CAN_VCAN is not set +# CONFIG_CAN_VXCAN is not set +# CONFIG_CAN_SLCAN is not set +CONFIG_CAN_DEV=y +CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_GRCAN is not set +# CONFIG_CAN_XILINXCAN is not set +# CONFIG_CAN_C_CAN is not set +# CONFIG_CAN_CC770 is not set +# CONFIG_CAN_IFI_CANFD is not set +# CONFIG_CAN_M_CAN is not set +# CONFIG_CAN_PEAK_PCIEFD is not set +# CONFIG_CAN_RCAR is not set +# CONFIG_CAN_RCAR_CANFD is not set +# CONFIG_CAN_SJA1000 is not set +# CONFIG_CAN_SOFTING is not set + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +# CONFIG_CAN_MCP251X is not set + +# +# CAN USB interfaces +# +# CONFIG_CAN_8DEV_USB is not set +# CONFIG_CAN_EMS_USB is not set +# CONFIG_CAN_ESD_USB2 is not set +# CONFIG_CAN_GS_USB is not set +# CONFIG_CAN_KVASER_USB is not set +# CONFIG_CAN_MCBA_USB is not set +# CONFIG_CAN_PEAK_USB is not set +# CONFIG_CAN_UCAN is not set +# CONFIG_CAN_DEBUG_DEVICES is not set +CONFIG_BT=y +CONFIG_BT_BREDR=y +# CONFIG_BT_RFCOMM is not set +# CONFIG_BT_BNEP is not set +CONFIG_BT_HIDP=m +# CONFIG_BT_HS is not set +# CONFIG_BT_LE is not set +CONFIG_BT_LEDS=y +# CONFIG_BT_SELFTEST is not set +# CONFIG_BT_DEBUGFS is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +# CONFIG_BT_HCIBTUSB_AUTOSUSPEND is not set +CONFIG_BT_HCIBTUSB_BCM=y +CONFIG_BT_HCIBTUSB_RTL=y +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_SERDEV=y +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_NOKIA is not set +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +CONFIG_BT_HCIUART_LL=y +# CONFIG_BT_HCIUART_3WIRE is not set +# CONFIG_BT_HCIUART_INTEL is not set +CONFIG_BT_HCIUART_BCM=y +# CONFIG_BT_HCIUART_QCA is not set +# CONFIG_BT_HCIUART_AG6XX is not set +# CONFIG_BT_HCIUART_MRVL is not set +# CONFIG_BT_HCIBCM203X is not set +# CONFIG_BT_HCIBPA10X is not set +# CONFIG_BT_HCIBFUSB is not set +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_BT_ATH3K is not set +# CONFIG_BT_MTKUART is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=y +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_MINSTREL_HT=y +# CONFIG_MAC80211_RC_MINSTREL_VHT is not set +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUGFS is not set +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +# CONFIG_NET_9P_XEN is not set +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set +# CONFIG_NFC is not set +# CONFIG_PSAMPLE is not set +# CONFIG_NET_IFE is not set +# CONFIG_LWTUNNEL is not set +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +# CONFIG_NET_DEVLINK is not set +CONFIG_MAY_USE_DEVLINK=y +CONFIG_FAILOVER=y +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y +CONFIG_TEGRA_AHB=y + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_SYS_HYPERVISOR=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_SOC_BUS=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +CONFIG_REGMAP_SPMI=y +CONFIG_REGMAP_MMIO=y +CONFIG_REGMAP_IRQ=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +CONFIG_DMA_CMA=y + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=32 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +CONFIG_GENERIC_ARCH_TOPOLOGY=y + +# +# Bus devices +# +CONFIG_BRCMSTB_GISB_ARB=y +CONFIG_HISILICON_LPC=y +CONFIG_QCOM_EBI2=y +CONFIG_SIMPLE_PM_BUS=y +CONFIG_SUN50I_DE2_BUS=y +CONFIG_SUNXI_RSB=y +# CONFIG_TEGRA_ACONNECT is not set +# CONFIG_TEGRA_GMI is not set +CONFIG_UNIPHIER_SYSTEM_BUS=y +CONFIG_VEXPRESS_CONFIG=y +# CONFIG_FSL_MC_BUS is not set +# CONFIG_CONNECTOR is not set +# CONFIG_GNSS is not set +CONFIG_MTD=y +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AFS_PARTS is not set +CONFIG_MTD_OF_PARTS=y +# CONFIG_MTD_AR7_PARTS is not set + +# +# Partition parsers +# + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +CONFIG_MTD_M25P80=y +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# CONFIG_MTD_ONENAND is not set +CONFIG_MTD_NAND_ECC=y +# CONFIG_MTD_NAND_ECC_SMC is not set +CONFIG_MTD_NAND=y +# CONFIG_MTD_NAND_ECC_BCH is not set +CONFIG_MTD_NAND_DENALI=y +# CONFIG_MTD_NAND_DENALI_PCI is not set +CONFIG_MTD_NAND_DENALI_DT=y +# CONFIG_MTD_NAND_GPIO is not set +# CONFIG_MTD_NAND_RICOH is not set +# CONFIG_MTD_NAND_DISKONCHIP is not set +# CONFIG_MTD_NAND_DOCG4 is not set +# CONFIG_MTD_NAND_CAFE is not set +CONFIG_MTD_NAND_MARVELL=y +# CONFIG_MTD_NAND_NANDSIM is not set +# CONFIG_MTD_NAND_BRCMNAND is not set +# CONFIG_MTD_NAND_PLATFORM is not set +# CONFIG_MTD_NAND_FSL_IFC is not set +# CONFIG_MTD_NAND_SUNXI is not set +# CONFIG_MTD_NAND_HISI504 is not set +CONFIG_MTD_NAND_QCOM=y +# CONFIG_MTD_NAND_MTK is not set +# CONFIG_MTD_NAND_TEGRA is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +CONFIG_MTD_SPI_NOR=y +# CONFIG_MTD_MT81xx_NOR is not set +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +# CONFIG_SPI_CADENCE_QUADSPI is not set +# CONFIG_SPI_FSL_QUADSPI is not set +# CONFIG_SPI_HISI_SFC is not set +# CONFIG_MTD_UBI is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_DYNAMIC=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_NET=y +CONFIG_OF_MDIO=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OF_RESOLVE=y +CONFIG_OF_OVERLAY=y +CONFIG_OF_NUMA=y +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_XEN_BLKDEV_FRONTEND=y +# CONFIG_XEN_BLKDEV_BACKEND is not set +CONFIG_VIRTIO_BLK=y +# CONFIG_VIRTIO_BLK_SCSI is not set +# CONFIG_BLK_DEV_RBD is not set +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +# CONFIG_NVME_MULTIPATH is not set +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TARGET is not set + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_QCOM_COINCELL is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +CONFIG_SRAM=y +CONFIG_VEXPRESS_SYSCFG=y +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_AT25=m +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# CONFIG_SENSORS_LIS3_SPI is not set +# CONFIG_SENSORS_LIS3_I2C is not set +# CONFIG_ALTERA_STAPL is not set + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# + +# +# SCIF Bus Driver +# + +# +# VOP Bus Driver +# + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_MQ_DEFAULT=y +# CONFIG_SCSI_PROC_FS is not set + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +# CONFIG_BLK_DEV_SR is not set +# CONFIG_CHR_DEV_SG is not set +# CONFIG_CHR_DEV_SCH is not set +# CONFIG_SCSI_CONSTANTS is not set +# CONFIG_SCSI_LOGGING is not set +# CONFIG_SCSI_SCAN_ASYNC is not set + +# +# SCSI Transports +# +# CONFIG_SCSI_SPI_ATTRS is not set +# CONFIG_SCSI_FC_ATTRS is not set +# CONFIG_SCSI_ISCSI_ATTRS is not set +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +# CONFIG_SCSI_SRP_ATTRS is not set +CONFIG_SCSI_LOWLEVEL=y +# CONFIG_ISCSI_TCP is not set +# CONFIG_ISCSI_BOOT_SYSFS is not set +# CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_CXGB4_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_BE2ISCSI is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_HPSA is not set +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_HISI_SAS=y +CONFIG_SCSI_HISI_SAS_PCI=y +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +# CONFIG_MEGARAID_SAS is not set +# CONFIG_SCSI_MPT3SAS is not set +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_SMARTPQI is not set +CONFIG_SCSI_UFSHCD=m +# CONFIG_SCSI_UFSHCD_PCI is not set +CONFIG_SCSI_UFSHCD_PLATFORM=m +# CONFIG_SCSI_UFS_DWC_TC_PLATFORM is not set +CONFIG_SCSI_UFS_QCOM=m +CONFIG_SCSI_UFS_HISI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_XEN_SCSI_FRONTEND is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +# CONFIG_SCSI_QLA_ISCSI is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_VIRTIO is not set +# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_HAVE_PATA_PLATFORM=y +CONFIG_ATA=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_AHCI_BRCM is not set +CONFIG_AHCI_CEVA=y +# CONFIG_AHCI_MTK is not set +CONFIG_AHCI_MVEBU=y +# CONFIG_AHCI_SUNXI is not set +# CONFIG_AHCI_TEGRA is not set +CONFIG_AHCI_XGENE=y +CONFIG_AHCI_QORIQ=y +# CONFIG_SATA_AHCI_SEATTLE is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +CONFIG_SATA_SIL24=y +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +# CONFIG_ATA_PIIX is not set +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +CONFIG_SATA_RCAR=y +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +CONFIG_PATA_PLATFORM=y +CONFIG_PATA_OF_PLATFORM=y +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_ATA_GENERIC is not set +# CONFIG_PATA_LEGACY is not set +# CONFIG_MD is not set +# CONFIG_TARGET_CORE is not set +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y +# CONFIG_BONDING is not set +# CONFIG_DUMMY is not set +# CONFIG_EQUALIZER is not set +# CONFIG_NET_FC is not set +# CONFIG_NET_TEAM is not set +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +# CONFIG_IPVLAN is not set +# CONFIG_VXLAN is not set +# CONFIG_GENEVE is not set +# CONFIG_GTP is not set +# CONFIG_MACSEC is not set +# CONFIG_NETCONSOLE is not set +CONFIG_TUN=y +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=y +# CONFIG_NLMON is not set +# CONFIG_ARCNET is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +CONFIG_ETHERNET=y +CONFIG_NET_VENDOR_3COM=y +# CONFIG_VORTEX is not set +# CONFIG_TYPHOON is not set +CONFIG_NET_VENDOR_ADAPTEC=y +# CONFIG_ADAPTEC_STARFIRE is not set +CONFIG_NET_VENDOR_AGERE=y +# CONFIG_ET131X is not set +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +CONFIG_NET_VENDOR_ALLWINNER=y +CONFIG_SUN4I_EMAC=y +CONFIG_NET_VENDOR_ALTEON=y +# CONFIG_ACENIC is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +# CONFIG_ENA_ETHERNET is not set +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +# CONFIG_PCNET32 is not set +CONFIG_AMD_XGBE=y +CONFIG_NET_XGENE=y +# CONFIG_NET_XGENE_V2 is not set +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_NET_VENDOR_ARC=y +# CONFIG_EMAC_ROCKCHIP is not set +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +# CONFIG_ATL1 is not set +# CONFIG_ATL1E is not set +CONFIG_ATL1C=m +# CONFIG_ALX is not set +CONFIG_NET_VENDOR_AURORA=y +# CONFIG_AURORA_NB8800 is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +# CONFIG_BNX2 is not set +# CONFIG_CNIC is not set +# CONFIG_TIGON3 is not set +# CONFIG_BNX2X is not set +CONFIG_BGMAC=y +CONFIG_BGMAC_PLATFORM=y +# CONFIG_SYSTEMPORT is not set +# CONFIG_BNXT is not set +CONFIG_NET_VENDOR_BROCADE=y +# CONFIG_BNA is not set +CONFIG_NET_VENDOR_CADENCE=y +CONFIG_MACB=y +CONFIG_MACB_USE_HWSTAMP=y +# CONFIG_MACB_PCI is not set +CONFIG_NET_VENDOR_CAVIUM=y +CONFIG_THUNDER_NIC_PF=y +# CONFIG_THUNDER_NIC_VF is not set +CONFIG_THUNDER_NIC_BGX=y +CONFIG_THUNDER_NIC_RGX=y +CONFIG_CAVIUM_PTP=y +# CONFIG_LIQUIDIO is not set +# CONFIG_LIQUIDIO_VF is not set +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +# CONFIG_CHELSIO_T4 is not set +# CONFIG_CHELSIO_T4VF is not set +CONFIG_NET_VENDOR_CISCO=y +# CONFIG_ENIC is not set +CONFIG_NET_VENDOR_CORTINA=y +# CONFIG_GEMINI_ETHERNET is not set +# CONFIG_DNET is not set +CONFIG_NET_VENDOR_DEC=y +# CONFIG_NET_TULIP is not set +CONFIG_NET_VENDOR_DLINK=y +# CONFIG_DL2K is not set +# CONFIG_SUNDANCE is not set +CONFIG_NET_VENDOR_EMULEX=y +# CONFIG_BE2NET is not set +CONFIG_NET_VENDOR_EZCHIP=y +# CONFIG_EZCHIP_NPS_MANAGEMENT_ENET is not set +CONFIG_NET_VENDOR_FREESCALE=y +# CONFIG_FSL_FMAN is not set +# CONFIG_FSL_PQ_MDIO is not set +# CONFIG_FSL_XGMAC_MDIO is not set +# CONFIG_GIANFAR is not set +CONFIG_NET_VENDOR_HISILICON=y +CONFIG_HIX5HD2_GMAC=y +# CONFIG_HISI_FEMAC is not set +# CONFIG_HIP04_ETH is not set +CONFIG_HNS_MDIO=y +CONFIG_HNS=y +CONFIG_HNS_DSAF=y +CONFIG_HNS_ENET=y +# CONFIG_HNS3 is not set +CONFIG_NET_VENDOR_HP=y +# CONFIG_HP100 is not set +CONFIG_NET_VENDOR_HUAWEI=y +# CONFIG_HINIC is not set +CONFIG_NET_VENDOR_I825XX=y +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +# CONFIG_E1000 is not set +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=y +# CONFIG_IXGB is not set +# CONFIG_IXGBE is not set +# CONFIG_IXGBEVF is not set +# CONFIG_I40E is not set +# CONFIG_I40EVF is not set +# CONFIG_ICE is not set +# CONFIG_FM10K is not set +# CONFIG_JME is not set +CONFIG_NET_VENDOR_MARVELL=y +CONFIG_MVMDIO=y +CONFIG_MVNETA=y +CONFIG_MVPP2=y +# CONFIG_PXA168_ETH is not set +# CONFIG_SKGE is not set +CONFIG_SKY2=y +# CONFIG_SKY2_DEBUG is not set +# CONFIG_NET_VENDOR_MEDIATEK is not set +CONFIG_NET_VENDOR_MELLANOX=y +# CONFIG_MLX4_EN is not set +# CONFIG_MLX5_CORE is not set +# CONFIG_MLXSW_CORE is not set +# CONFIG_MLXFW is not set +CONFIG_NET_VENDOR_MICREL=y +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set +# CONFIG_KSZ884X_PCI is not set +CONFIG_NET_VENDOR_MICROCHIP=y +# CONFIG_ENC28J60 is not set +# CONFIG_ENCX24J600 is not set +# CONFIG_LAN743X is not set +CONFIG_NET_VENDOR_MICROSEMI=y +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_MYRI10GE is not set +# CONFIG_FEALNX is not set +CONFIG_NET_VENDOR_NATSEMI=y +# CONFIG_NATSEMI is not set +# CONFIG_NS83820 is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set +# CONFIG_VXGE is not set +CONFIG_NET_VENDOR_NETRONOME=y +# CONFIG_NFP is not set +CONFIG_NET_VENDOR_NI=y +CONFIG_NET_VENDOR_8390=y +# CONFIG_NE2K_PCI is not set +CONFIG_NET_VENDOR_NVIDIA=y +# CONFIG_FORCEDETH is not set +CONFIG_NET_VENDOR_OKI=y +# CONFIG_ETHOC is not set +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_QLOGIC=y +# CONFIG_QLA3XXX is not set +# CONFIG_QLCNIC is not set +# CONFIG_QLGE is not set +# CONFIG_NETXEN_NIC is not set +# CONFIG_QED is not set +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCA7000_SPI is not set +# CONFIG_QCA7000_UART is not set +CONFIG_QCOM_EMAC=m +# CONFIG_RMNET is not set +CONFIG_NET_VENDOR_RDC=y +# CONFIG_R6040 is not set +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_8139CP is not set +# CONFIG_8139TOO is not set +# CONFIG_R8169 is not set +CONFIG_NET_VENDOR_RENESAS=y +# CONFIG_SH_ETH is not set +CONFIG_RAVB=y +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_NET_VENDOR_SAMSUNG=y +# CONFIG_SXGBE_ETH is not set +CONFIG_NET_VENDOR_SEEQ=y +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +CONFIG_NET_VENDOR_SILAN=y +# CONFIG_SC92031 is not set +CONFIG_NET_VENDOR_SIS=y +# CONFIG_SIS900 is not set +# CONFIG_SIS190 is not set +CONFIG_NET_VENDOR_SMSC=y +CONFIG_SMC91X=y +# CONFIG_EPIC100 is not set +CONFIG_SMSC911X=y +# CONFIG_SMSC9420 is not set +CONFIG_NET_VENDOR_SOCIONEXT=y +CONFIG_SNI_AVE=y +CONFIG_SNI_NETSEC=y +CONFIG_NET_VENDOR_STMICRO=y +CONFIG_STMMAC_ETH=y +CONFIG_STMMAC_PLATFORM=y +# CONFIG_DWMAC_DWC_QOS_ETH is not set +CONFIG_DWMAC_GENERIC=y +CONFIG_DWMAC_IPQ806X=y +CONFIG_DWMAC_MESON=y +CONFIG_DWMAC_ROCKCHIP=y +CONFIG_DWMAC_SOCFPGA=y +CONFIG_DWMAC_SUNXI=y +CONFIG_DWMAC_SUN8I=y +# CONFIG_STMMAC_PCI is not set +CONFIG_NET_VENDOR_SUN=y +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +# CONFIG_CASSINI is not set +# CONFIG_NIU is not set +CONFIG_NET_VENDOR_SYNOPSYS=y +# CONFIG_DWC_XLGMAC is not set +CONFIG_NET_VENDOR_TEHUTI=y +# CONFIG_TEHUTI is not set +CONFIG_NET_VENDOR_TI=y +# CONFIG_TI_CPSW_ALE is not set +# CONFIG_TLAN is not set +CONFIG_NET_VENDOR_VIA=y +# CONFIG_VIA_RHINE is not set +# CONFIG_VIA_VELOCITY is not set +CONFIG_NET_VENDOR_WIZNET=y +# CONFIG_WIZNET_W5100 is not set +# CONFIG_WIZNET_W5300 is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +# CONFIG_MDIO_BCM_IPROC is not set +# CONFIG_MDIO_BCM_UNIMAC is not set +CONFIG_MDIO_BITBANG=y +CONFIG_MDIO_BUS_MUX=y +CONFIG_MDIO_BUS_MUX_BCM_IPROC=y +# CONFIG_MDIO_BUS_MUX_GPIO is not set +CONFIG_MDIO_BUS_MUX_MMIOREG=y +CONFIG_MDIO_CAVIUM=y +# CONFIG_MDIO_GPIO is not set +# CONFIG_MDIO_HISI_FEMAC is not set +# CONFIG_MDIO_MSCC_MIIM is not set +# CONFIG_MDIO_OCTEON is not set +CONFIG_MDIO_SUN4I=y +CONFIG_MDIO_THUNDER=y +CONFIG_MDIO_XGENE=y +CONFIG_PHYLINK=y +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +# CONFIG_LED_TRIGGER_PHY is not set + +# +# MII PHY device drivers +# +# CONFIG_SFP is not set +# CONFIG_AMD_PHY is not set +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_AX88796B_PHY is not set +CONFIG_AT803X_PHY=m +# CONFIG_BCM7XXX_PHY is not set +# CONFIG_BCM87XX_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_CORTINA_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +CONFIG_FIXED_PHY=y +# CONFIG_ICPLUS_PHY is not set +# CONFIG_INTEL_XWAY_PHY is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_LXT_PHY is not set +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +CONFIG_MESON_GXL_PHY=m +CONFIG_MICREL_PHY=y +CONFIG_MICROCHIP_PHY=m +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_QSEMI_PHY is not set +CONFIG_REALTEK_PHY=m +# CONFIG_RENESAS_PHY is not set +CONFIG_ROCKCHIP_PHY=y +# CONFIG_SMSC_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_TERANETICS_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_XILINX_GMII2RGMII is not set +# CONFIG_MICREL_KS8995MA is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +CONFIG_USB_NET_DRIVERS=y +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +# CONFIG_USB_NET_CDC_EEM is not set +CONFIG_USB_NET_CDC_NCM=m +# CONFIG_USB_NET_HUAWEI_CDC_NCM is not set +# CONFIG_USB_NET_CDC_MBIM is not set +CONFIG_USB_NET_DM9601=m +# CONFIG_USB_NET_SR9700 is not set +CONFIG_USB_NET_SR9800=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +# CONFIG_USB_NET_GL620A is not set +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +# CONFIG_USB_NET_RNDIS_HOST is not set +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +# CONFIG_USB_ALI_M5632 is not set +# CONFIG_USB_AN2720 is not set +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +# CONFIG_USB_EPSON2888 is not set +# CONFIG_USB_KC2190 is not set +CONFIG_USB_NET_ZAURUS=m +# CONFIG_USB_NET_CX82310_ETH is not set +# CONFIG_USB_NET_KALMIA is not set +# CONFIG_USB_NET_QMI_WWAN is not set +# CONFIG_USB_HSO is not set +# CONFIG_USB_NET_INT51X1 is not set +# CONFIG_USB_IPHETH is not set +# CONFIG_USB_SIERRA_NET is not set +# CONFIG_USB_VL600 is not set +# CONFIG_USB_NET_CH9200 is not set +CONFIG_WLAN=y +# CONFIG_WIRELESS_WDS is not set +CONFIG_WLAN_VENDOR_ADMTEK=y +# CONFIG_ADM8211 is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +# CONFIG_ATH9K is not set +# CONFIG_ATH9K_HTC is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set +CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y +CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_AHB is not set +# CONFIG_ATH10K_SDIO is not set +# CONFIG_ATH10K_USB is not set +# CONFIG_ATH10K_SNOC is not set +# CONFIG_ATH10K_DEBUG is not set +# CONFIG_ATH10K_DEBUGFS is not set +# CONFIG_WCN36XX is not set +CONFIG_WLAN_VENDOR_ATMEL=y +# CONFIG_ATMEL is not set +# CONFIG_AT76C50X_USB is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m +# CONFIG_BRCMSMAC is not set +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_SDIO=y +# CONFIG_BRCMFMAC_USB is not set +# CONFIG_BRCMFMAC_PCIE is not set +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set +CONFIG_WLAN_VENDOR_CISCO=y +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set +# CONFIG_IWLWIFI is not set +CONFIG_WLAN_VENDOR_INTERSIL=y +# CONFIG_HOSTAP is not set +# CONFIG_HERMES is not set +# CONFIG_P54_COMMON is not set +# CONFIG_PRISM54 is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set +CONFIG_MWIFIEX=m +# CONFIG_MWIFIEX_SDIO is not set +CONFIG_MWIFIEX_PCIE=m +# CONFIG_MWIFIEX_USB is not set +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y +# CONFIG_MT7601U is not set +# CONFIG_MT76x0U is not set +# CONFIG_MT76x2E is not set +# CONFIG_MT76x2U is not set +CONFIG_WLAN_VENDOR_RALINK=y +# CONFIG_RT2X00 is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +CONFIG_RTL_CARDS=m +# CONFIG_RTL8192CE is not set +# CONFIG_RTL8192SE is not set +# CONFIG_RTL8192DE is not set +# CONFIG_RTL8723AE is not set +# CONFIG_RTL8723BE is not set +# CONFIG_RTL8188EE is not set +# CONFIG_RTL8192EE is not set +# CONFIG_RTL8821AE is not set +# CONFIG_RTL8192CU is not set +# CONFIG_RTL8XXXU is not set +CONFIG_WLAN_VENDOR_RSI=y +# CONFIG_RSI_91X is not set +CONFIG_WLAN_VENDOR_ST=y +# CONFIG_CW1200 is not set +CONFIG_WLAN_VENDOR_TI=y +# CONFIG_WL1251 is not set +# CONFIG_WL12XX is not set +CONFIG_WL18XX=m +CONFIG_WLCORE=m +# CONFIG_WLCORE_SPI is not set +CONFIG_WLCORE_SDIO=m +CONFIG_WILINK_PLATFORM_DATA=y +CONFIG_WLAN_VENDOR_ZYDAS=y +# CONFIG_USB_ZD1201 is not set +# CONFIG_ZD1211RW is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PEARL_PCIE is not set +# CONFIG_MAC80211_HWSIM is not set +# CONFIG_USB_NET_RNDIS_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +CONFIG_XEN_NETDEV_FRONTEND=y +# CONFIG_XEN_NETDEV_BACKEND is not set +# CONFIG_VMXNET3 is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=y +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +# CONFIG_INPUT_FF_MEMLESS is not set +CONFIG_INPUT_POLLDEV=m +# CONFIG_INPUT_SPARSEKMAP is not set +CONFIG_INPUT_MATRIXKMAP=y + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +CONFIG_KEYBOARD_ADC=m +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +CONFIG_KEYBOARD_GPIO=y +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_TEGRA is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_SH_KEYSC is not set +# CONFIG_KEYBOARD_SUN4I_LRADC is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +CONFIG_KEYBOARD_CROS_EC=y +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_PROPERTIES=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_AR1021_I2C is not set +CONFIG_TOUCHSCREEN_ATMEL_MXT=m +# CONFIG_TOUCHSCREEN_ATMEL_MXT_T37 is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_IPROC is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_WACOM_I2C is not set +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SUN4I is not set +# CONFIG_TOUCHSCREEN_SUR40 is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATMEL_CAPTOUCH is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +CONFIG_INPUT_PM8941_PWRKEY=y +# CONFIG_INPUT_PM8XXX_VIBRATOR is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_GP2A is not set +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_KXTJ9 is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +# CONFIG_INPUT_REGULATOR_HAPTIC is not set +# CONFIG_INPUT_AXP20X_PEK is not set +CONFIG_INPUT_UINPUT=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +# CONFIG_INPUT_RK805_PWRKEY is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=y +# CONFIG_INPUT_SOC_BUTTON_ARRAY is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_INPUT_HISI_POWERKEY=y +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_AMBAKMI=y +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_SUN4I_PS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=16 +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_NOZOMI is not set +# CONFIG_N_GSM is not set +# CONFIG_TRACE_SINK is not set +# CONFIG_LDISC_AUTOLOAD is not set +CONFIG_DEVMEM=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=4 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +# CONFIG_SERIAL_8250_MANY_PORTS is not set +# CONFIG_SERIAL_8250_ASPEED_VUART is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +# CONFIG_SERIAL_8250_RSA is not set +CONFIG_SERIAL_8250_BCM2835AUX=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_MT6577=y +CONFIG_SERIAL_8250_UNIPHIER=y +# CONFIG_SERIAL_8250_MOXA is not set +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +# CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST is not set +CONFIG_SERIAL_MESON=y +CONFIG_SERIAL_MESON_CONSOLE=y +CONFIG_SERIAL_SAMSUNG=y +CONFIG_SERIAL_SAMSUNG_UARTS_4=y +CONFIG_SERIAL_SAMSUNG_UARTS=4 +CONFIG_SERIAL_SAMSUNG_CONSOLE=y +CONFIG_SERIAL_TEGRA=y +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_NR_UARTS=18 +CONFIG_SERIAL_SH_SCI_CONSOLE=y +CONFIG_SERIAL_SH_SCI_EARLYCON=y +CONFIG_SERIAL_SH_SCI_DMA=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +CONFIG_SERIAL_XILINX_PS_UART=y +CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_SPRD is not set +CONFIG_SERIAL_MVEBU_UART=y +CONFIG_SERIAL_MVEBU_CONSOLE=y +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_DEV_BUS=y +CONFIG_SERIAL_DEV_CTRL_TTYPORT=y +# CONFIG_TTY_PRINTK is not set +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IRQ=y +CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y +# CONFIG_HVC_DCC is not set +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_APPLICOM is not set + +# +# PCMCIA character devices +# +# CONFIG_RAW_DRIVER is not set +CONFIG_TCG_TPM=y +# CONFIG_TCG_TIS is not set +# CONFIG_TCG_TIS_SPI is not set +# CONFIG_TCG_TIS_I2C_ATMEL is not set +CONFIG_TCG_TIS_I2C_INFINEON=y +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +# CONFIG_TCG_ATMEL is not set +# CONFIG_TCG_XEN is not set +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +CONFIG_I2C_MUX_PCA954x=y +# CONFIG_I2C_MUX_PINCTRL is not set +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_DEMUX_PINCTRL is not set +# CONFIG_I2C_MUX_MLXCPLD is not set +# CONFIG_I2C_HELPER_AUTO is not set +# CONFIG_I2C_SMBUS is not set + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=y +# CONFIG_I2C_ALGOPCF is not set +# CONFIG_I2C_ALGOPCA is not set + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_HIX5HD2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +CONFIG_I2C_BCM2835=m +CONFIG_I2C_BCM_IPROC=y +CONFIG_I2C_BRCMSTB=y +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +CONFIG_I2C_EXYNOS5=y +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_IMX=y +CONFIG_I2C_MESON=y +# CONFIG_I2C_MT65XX is not set +CONFIG_I2C_MV64XXX=y +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_OMAP is not set +# CONFIG_I2C_PCA_PLATFORM is not set +CONFIG_I2C_PXA=y +# CONFIG_I2C_PXA_SLAVE is not set +CONFIG_I2C_QUP=y +# CONFIG_I2C_RIIC is not set +CONFIG_I2C_RK3X=y +CONFIG_I2C_SH_MOBILE=y +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_SPRD is not set +# CONFIG_I2C_SYNQUACER is not set +CONFIG_I2C_TEGRA=y +CONFIG_I2C_TEGRA_BPMP=y +# CONFIG_I2C_UNIPHIER is not set +CONFIG_I2C_UNIPHIER_F=y +# CONFIG_I2C_VERSATILE is not set +# CONFIG_I2C_THUNDERX is not set +# CONFIG_I2C_XILINX is not set +# CONFIG_I2C_XLP9XX is not set +CONFIG_I2C_RCAR=y + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_CROS_EC_TUNNEL=y +# CONFIG_I2C_XGENE_SLIMPRO is not set +CONFIG_I2C_ZX2967=y +# CONFIG_I2C_STUB is not set +CONFIG_I2C_SLAVE=y +# CONFIG_I2C_SLAVE_EEPROM is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +CONFIG_SPI_MEM=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +CONFIG_SPI_ARMADA_3700=y +# CONFIG_SPI_AXI_SPI_ENGINE is not set +CONFIG_SPI_BCM2835=m +CONFIG_SPI_BCM2835AUX=m +CONFIG_SPI_BCM_QSPI=y +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_FSL_DSPI is not set +CONFIG_SPI_MESON_SPICC=m +CONFIG_SPI_MESON_SPIFC=m +# CONFIG_SPI_MT65XX is not set +# CONFIG_SPI_OC_TINY is not set +CONFIG_SPI_ORION=y +CONFIG_SPI_PL022=y +# CONFIG_SPI_PXA2XX is not set +CONFIG_SPI_ROCKCHIP=y +# CONFIG_SPI_RSPI is not set +CONFIG_SPI_QUP=y +CONFIG_SPI_S3C64XX=y +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SH_MSIOF is not set +# CONFIG_SPI_SH_HSPI is not set +# CONFIG_SPI_SPRD_ADI is not set +# CONFIG_SPI_SUN4I is not set +# CONFIG_SPI_SUN6I is not set +# CONFIG_SPI_TEGRA114 is not set +# CONFIG_SPI_TEGRA20_SFLASH is not set +# CONFIG_SPI_TEGRA20_SLINK is not set +# CONFIG_SPI_THUNDERX is not set +# CONFIG_SPI_UNIPHIER is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_XLP is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +CONFIG_SPI_SPIDEV=m +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +CONFIG_SPMI=y +CONFIG_SPMI_MSM_PMIC_ARB=y +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_DTE=y + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +CONFIG_PINCTRL=y +CONFIG_GENERIC_PINCTRL_GROUPS=y +CONFIG_PINMUX=y +CONFIG_GENERIC_PINMUX_FUNCTIONS=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AXP209 is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_MCP23S08 is not set +CONFIG_PINCTRL_ROCKCHIP=y +CONFIG_PINCTRL_SINGLE=y +# CONFIG_PINCTRL_SX150X is not set +CONFIG_PINCTRL_MAX77620=y +# CONFIG_PINCTRL_RK805 is not set +CONFIG_PINCTRL_BCM2835=y +CONFIG_PINCTRL_IPROC_GPIO=y +CONFIG_PINCTRL_NS2_MUX=y +# CONFIG_PINCTRL_AS370 is not set +# CONFIG_PINCTRL_BERLIN_BG4CT is not set +CONFIG_PINCTRL_MVEBU=y +CONFIG_PINCTRL_ARMADA_AP806=y +CONFIG_PINCTRL_ARMADA_CP110=y +CONFIG_PINCTRL_ARMADA_37XX=y +CONFIG_PINCTRL_MSM=y +# CONFIG_PINCTRL_APQ8064 is not set +# CONFIG_PINCTRL_APQ8084 is not set +# CONFIG_PINCTRL_IPQ4019 is not set +# CONFIG_PINCTRL_IPQ8064 is not set +CONFIG_PINCTRL_IPQ8074=y +# CONFIG_PINCTRL_MSM8660 is not set +# CONFIG_PINCTRL_MSM8960 is not set +# CONFIG_PINCTRL_MDM9615 is not set +# CONFIG_PINCTRL_MSM8X74 is not set +CONFIG_PINCTRL_MSM8916=y +CONFIG_PINCTRL_MSM8994=y +CONFIG_PINCTRL_MSM8996=y +# CONFIG_PINCTRL_MSM8998 is not set +CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set +# CONFIG_PINCTRL_SDM845 is not set +CONFIG_PINCTRL_SAMSUNG=y +CONFIG_PINCTRL_EXYNOS=y +CONFIG_PINCTRL_EXYNOS_ARM64=y +CONFIG_PINCTRL_SH_PFC=y +CONFIG_PINCTRL_PFC_R8A7795=y +CONFIG_PINCTRL_PFC_R8A7796=y +CONFIG_PINCTRL_PFC_R8A77965=y +CONFIG_PINCTRL_PFC_R8A77970=y +CONFIG_PINCTRL_PFC_R8A77980=y +CONFIG_PINCTRL_PFC_R8A77990=y +CONFIG_PINCTRL_PFC_R8A77995=y +# CONFIG_PINCTRL_SPRD is not set +CONFIG_PINCTRL_SUNXI=y +CONFIG_PINCTRL_SUN8I_H3_R=y +CONFIG_PINCTRL_SUN50I_A64=y +CONFIG_PINCTRL_SUN50I_A64_R=y +CONFIG_PINCTRL_SUN50I_H5=y +CONFIG_PINCTRL_SUN50I_H6=y +CONFIG_PINCTRL_SUN50I_H6_R=y +CONFIG_PINCTRL_TEGRA=y +CONFIG_PINCTRL_TEGRA124=y +CONFIG_PINCTRL_TEGRA210=y +CONFIG_PINCTRL_TEGRA_XUSB=y +CONFIG_PINCTRL_UNIPHIER=y +# CONFIG_PINCTRL_UNIPHIER_LD4 is not set +# CONFIG_PINCTRL_UNIPHIER_PRO4 is not set +# CONFIG_PINCTRL_UNIPHIER_SLD8 is not set +# CONFIG_PINCTRL_UNIPHIER_PRO5 is not set +# CONFIG_PINCTRL_UNIPHIER_PXS2 is not set +# CONFIG_PINCTRL_UNIPHIER_LD6B is not set +CONFIG_PINCTRL_UNIPHIER_LD11=y +CONFIG_PINCTRL_UNIPHIER_LD20=y +CONFIG_PINCTRL_UNIPHIER_PXS3=y + +# +# MediaTek pinctrl drivers +# +CONFIG_EINT_MTK=y +CONFIG_PINCTRL_MTK=y +CONFIG_PINCTRL_MT2712=y +CONFIG_PINCTRL_MT7622=y +CONFIG_PINCTRL_MT8173=y +# CONFIG_PINCTRL_ZX296718 is not set +CONFIG_PINCTRL_MESON=y +CONFIG_PINCTRL_MESON_GXBB=y +CONFIG_PINCTRL_MESON_GXL=y +CONFIG_PINCTRL_MESON8_PMX=y +CONFIG_PINCTRL_MESON_AXG=y +CONFIG_PINCTRL_MESON_AXG_PMX=y +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set +CONFIG_GPIO_GENERIC=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_RASPBERRYPI_EXP=y +CONFIG_GPIO_BRCMSTB=y +CONFIG_GPIO_DWAPB=y +# CONFIG_GPIO_EIC_SPRD is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +CONFIG_GPIO_GENERIC_PLATFORM=y +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HLWD is not set +CONFIG_GPIO_MB86S7X=y +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_MPC8XXX is not set +CONFIG_GPIO_MVEBU=y +CONFIG_GPIO_PL061=y +CONFIG_GPIO_RCAR=y +# CONFIG_GPIO_SPRD is not set +# CONFIG_GPIO_SYSCON is not set +CONFIG_GPIO_TEGRA=y +CONFIG_GPIO_TEGRA186=y +# CONFIG_GPIO_THUNDERX is not set +CONFIG_GPIO_UNIPHIER=y +CONFIG_GPIO_XGENE=y +CONFIG_GPIO_XGENE_SB=y +# CONFIG_GPIO_XILINX is not set +# CONFIG_GPIO_XLP is not set +# CONFIG_GPIO_ZYNQ is not set +# CONFIG_GPIO_ZX is not set + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +CONFIG_GPIO_PCA953X=y +CONFIG_GPIO_PCA953X_IRQ=y +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set + +# +# MFD GPIO expanders +# +# CONFIG_GPIO_BD9571MWV is not set +CONFIG_GPIO_MAX77620=y + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set + +# +# USB GPIO expanders +# +# CONFIG_W1 is not set +CONFIG_POWER_AVS=y +CONFIG_ROCKCHIP_IODOMAIN=y +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_BRCMSTB=y +# CONFIG_POWER_RESET_GPIO is not set +# CONFIG_POWER_RESET_GPIO_RESTART is not set +# CONFIG_POWER_RESET_HISI is not set +CONFIG_POWER_RESET_MSM=y +# CONFIG_POWER_RESET_QCOM_PON is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_RESET_VEXPRESS=y +CONFIG_POWER_RESET_XGENE=y +CONFIG_POWER_RESET_SYSCON=y +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_POWER_RESET_ZX is not set +CONFIG_REBOOT_MODE=y +CONFIG_SYSCON_REBOOT_MODE=y +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_LEGO_EV3 is not set +CONFIG_BATTERY_SBS=m +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +CONFIG_BATTERY_BQ27XXX=y +CONFIG_BATTERY_BQ27XXX_I2C=y +# CONFIG_BATTERY_BQ27XXX_DT_UPDATES_NVM is not set +# CONFIG_AXP20X_POWER is not set +# CONFIG_AXP288_FUEL_GAUGE is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_ISP1704 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_MANAGER is not set +# CONFIG_CHARGER_LTC3651 is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_QCOM_SMBB is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_CROS_USBPD is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_ARM_SCPI is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FTSTEUTATES is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_IIO_HWMON is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_ADCXX is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +CONFIG_SENSORS_LM90=m +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_PWM_FAN is not set +CONFIG_SENSORS_RASPBERRYPI_HWMON=m +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH5627 is not set +# CONFIG_SENSORS_SCH5636 is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS1015 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VEXPRESS is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +# CONFIG_THERMAL_GOV_FAIR_SHARE is not set +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +# CONFIG_THERMAL_GOV_USER_SPACE is not set +CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y +CONFIG_CPU_THERMAL=y +# CONFIG_CLOCK_THERMAL is not set +# CONFIG_DEVFREQ_THERMAL is not set +CONFIG_THERMAL_EMULATION=y +CONFIG_HISI_THERMAL=y +# CONFIG_MAX77620_THERMAL is not set +# CONFIG_QORIQ_THERMAL is not set +CONFIG_ROCKCHIP_THERMAL=m +# CONFIG_RCAR_THERMAL is not set +CONFIG_RCAR_GEN3_THERMAL=y +CONFIG_ARMADA_THERMAL=y + +# +# ACPI INT340X thermal drivers +# +CONFIG_MTK_THERMAL=y + +# +# Broadcom thermal drivers +# +CONFIG_BCM2835_THERMAL=m +CONFIG_BRCMSTB_THERMAL=m +CONFIG_BCM_NS_THERMAL=y + +# +# Samsung thermal drivers +# +CONFIG_EXYNOS_THERMAL=y + +# +# NVIDIA Tegra thermal drivers +# +# CONFIG_TEGRA_SOCTHERM is not set +CONFIG_TEGRA_BPMP_THERMAL=m +# CONFIG_QCOM_SPMI_TEMP_ALARM is not set +# CONFIG_GENERIC_ADC_THERMAL is not set + +# +# Qualcomm thermal drivers +# +CONFIG_QCOM_TSENS=y +# CONFIG_ZX2967_THERMAL is not set +CONFIG_UNIPHIER_THERMAL=y +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +# CONFIG_WATCHDOG_SYSFS is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +# CONFIG_GPIO_WATCHDOG is not set +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +CONFIG_ARM_SP805_WATCHDOG=y +# CONFIG_ARM_SBSA_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +CONFIG_HAVE_S3C2410_WATCHDOG=y +CONFIG_S3C2410_WATCHDOG=y +# CONFIG_DW_WATCHDOG is not set +# CONFIG_SUNXI_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_MAX77620_WATCHDOG is not set +# CONFIG_IMX2_WDT is not set +# CONFIG_TEGRA_WATCHDOG is not set +# CONFIG_QCOM_WDT is not set +CONFIG_MESON_GXBB_WATCHDOG=m +CONFIG_MESON_WATCHDOG=m +# CONFIG_MEDIATEK_WATCHDOG is not set +CONFIG_RENESAS_WDT=y +# CONFIG_RENESAS_RZAWDT is not set +# CONFIG_ZX2967_WATCHDOG is not set +CONFIG_UNIPHIER_WATCHDOG=y +# CONFIG_SPRD_WATCHDOG is not set +# CONFIG_ALIM7101_WDT is not set +# CONFIG_I6300ESB_WDT is not set +CONFIG_BCM2835_WDT=y +# CONFIG_BCM7038_WDT is not set +# CONFIG_MEN_A21_WDT is not set +# CONFIG_XEN_WDT is not set + +# +# PCI-based Watchdog Cards +# +# CONFIG_PCIPCWATCHDOG is not set +# CONFIG_WDTPCI is not set + +# +# USB-based Watchdog Cards +# +# CONFIG_USBPCWATCHDOG is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_SUN4I_GPADC is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +CONFIG_MFD_BD9571MWV=y +# CONFIG_MFD_AC100 is not set +CONFIG_MFD_AXP20X=y +# CONFIG_MFD_AXP20X_I2C is not set +CONFIG_MFD_AXP20X_RSB=y +CONFIG_MFD_CROS_EC=y +CONFIG_MFD_CROS_EC_CHARDEV=m +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +CONFIG_MFD_EXYNOS_LPASS=m +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +CONFIG_MFD_HI6421_PMIC=y +CONFIG_MFD_HI655X_PMIC=y +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +CONFIG_MFD_MAX77620=y +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_QCOM_RPM is not set +CONFIG_MFD_SPMI_PMIC=y +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +CONFIG_MFD_RK808=y +# CONFIG_MFD_RN5T618 is not set +CONFIG_MFD_SEC_CORE=y +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_MFD_SC27XX_PMIC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_SUN6I_PRCM is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +CONFIG_MFD_VEXPRESS_SYSREG=y +# CONFIG_RAVE_SP_CORE is not set +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_VIRTUAL_CONSUMER=y +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_88PG86X is not set +# CONFIG_REGULATOR_ACT8865 is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_ANATOP is not set +CONFIG_REGULATOR_AXP20X=y +CONFIG_REGULATOR_BD9571MWV=y +# CONFIG_REGULATOR_DA9210 is not set +# CONFIG_REGULATOR_DA9211 is not set +CONFIG_REGULATOR_FAN53555=y +CONFIG_REGULATOR_GPIO=y +# CONFIG_REGULATOR_HI6421 is not set +CONFIG_REGULATOR_HI6421V530=y +CONFIG_REGULATOR_HI655X=y +# CONFIG_REGULATOR_ISL9305 is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +# CONFIG_REGULATOR_LP872X is not set +# CONFIG_REGULATOR_LP8755 is not set +# CONFIG_REGULATOR_LTC3589 is not set +# CONFIG_REGULATOR_LTC3676 is not set +# CONFIG_REGULATOR_MAX1586 is not set +CONFIG_REGULATOR_MAX77620=y +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_MAX8973 is not set +# CONFIG_REGULATOR_MT6311 is not set +# CONFIG_REGULATOR_PFUZE100 is not set +# CONFIG_REGULATOR_PV88060 is not set +# CONFIG_REGULATOR_PV88080 is not set +# CONFIG_REGULATOR_PV88090 is not set +CONFIG_REGULATOR_PWM=y +CONFIG_REGULATOR_QCOM_SMD_RPM=y +CONFIG_REGULATOR_QCOM_SPMI=y +CONFIG_REGULATOR_RK808=y +# CONFIG_REGULATOR_S2MPA01 is not set +CONFIG_REGULATOR_S2MPS11=y +# CONFIG_REGULATOR_S5M8767 is not set +# CONFIG_REGULATOR_SY8106A is not set +# CONFIG_REGULATOR_TPS51632 is not set +# CONFIG_REGULATOR_TPS62360 is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_TPS65132 is not set +# CONFIG_REGULATOR_TPS6524X is not set +CONFIG_REGULATOR_UNIPHIER=y +CONFIG_REGULATOR_VCTRL=m +# CONFIG_REGULATOR_VEXPRESS is not set +CONFIG_CEC_CORE=m +CONFIG_RC_CORE=m +CONFIG_RC_MAP=m +# CONFIG_LIRC is not set +CONFIG_RC_DECODERS=y +# CONFIG_IR_NEC_DECODER is not set +# CONFIG_IR_RC5_DECODER is not set +# CONFIG_IR_RC6_DECODER is not set +# CONFIG_IR_JVC_DECODER is not set +# CONFIG_IR_SONY_DECODER is not set +# CONFIG_IR_SANYO_DECODER is not set +# CONFIG_IR_SHARP_DECODER is not set +# CONFIG_IR_MCE_KBD_DECODER is not set +# CONFIG_IR_XMP_DECODER is not set +# CONFIG_IR_IMON_DECODER is not set +CONFIG_RC_DEVICES=y +# CONFIG_RC_ATI_REMOTE is not set +# CONFIG_IR_HIX5HD2 is not set +# CONFIG_IR_IMON is not set +# CONFIG_IR_IMON_RAW is not set +# CONFIG_IR_MCEUSB is not set +CONFIG_IR_MESON=m +# CONFIG_IR_MTK is not set +# CONFIG_IR_REDRAT3 is not set +# CONFIG_IR_STREAMZAP is not set +# CONFIG_IR_IGORPLUGUSB is not set +# CONFIG_IR_IGUANA is not set +# CONFIG_IR_TTUSBIR is not set +# CONFIG_RC_LOOPBACK is not set +# CONFIG_IR_GPIO_CIR is not set +# CONFIG_IR_SUNXI is not set +# CONFIG_IR_SERIAL is not set +# CONFIG_IR_SIR is not set +# CONFIG_IR_ZX is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +# CONFIG_MEDIA_RADIO_SUPPORT is not set +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_CEC_SUPPORT is not set +# CONFIG_MEDIA_CEC_RC is not set +CONFIG_MEDIA_CONTROLLER=y +# CONFIG_MEDIA_CONTROLLER_DVB is not set +CONFIG_VIDEO_DEV=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_VIDEO_V4L2=y +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +# CONFIG_VIDEO_PCI_SKELETON is not set +CONFIG_V4L2_MEM2MEM_DEV=m +CONFIG_DVB_CORE=y +# CONFIG_DVB_MMAP is not set +# CONFIG_DVB_NET is not set +CONFIG_DVB_MAX_ADAPTERS=16 +# CONFIG_DVB_DYNAMIC_MINORS is not set +# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set +# CONFIG_DVB_ULE_DEBUG is not set + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y +CONFIG_USB_GSPCA=m +# CONFIG_USB_M5602 is not set +# CONFIG_USB_STV06XX is not set +# CONFIG_USB_GL860 is not set +# CONFIG_USB_GSPCA_BENQ is not set +# CONFIG_USB_GSPCA_CONEX is not set +# CONFIG_USB_GSPCA_CPIA1 is not set +# CONFIG_USB_GSPCA_DTCS033 is not set +# CONFIG_USB_GSPCA_ETOMS is not set +# CONFIG_USB_GSPCA_FINEPIX is not set +# CONFIG_USB_GSPCA_JEILINJ is not set +# CONFIG_USB_GSPCA_JL2005BCD is not set +# CONFIG_USB_GSPCA_KINECT is not set +# CONFIG_USB_GSPCA_KONICA is not set +# CONFIG_USB_GSPCA_MARS is not set +# CONFIG_USB_GSPCA_MR97310A is not set +# CONFIG_USB_GSPCA_NW80X is not set +# CONFIG_USB_GSPCA_OV519 is not set +# CONFIG_USB_GSPCA_OV534 is not set +# CONFIG_USB_GSPCA_OV534_9 is not set +# CONFIG_USB_GSPCA_PAC207 is not set +# CONFIG_USB_GSPCA_PAC7302 is not set +# CONFIG_USB_GSPCA_PAC7311 is not set +# CONFIG_USB_GSPCA_SE401 is not set +# CONFIG_USB_GSPCA_SN9C2028 is not set +# CONFIG_USB_GSPCA_SN9C20X is not set +# CONFIG_USB_GSPCA_SONIXB is not set +# CONFIG_USB_GSPCA_SONIXJ is not set +# CONFIG_USB_GSPCA_SPCA500 is not set +# CONFIG_USB_GSPCA_SPCA501 is not set +# CONFIG_USB_GSPCA_SPCA505 is not set +# CONFIG_USB_GSPCA_SPCA506 is not set +# CONFIG_USB_GSPCA_SPCA508 is not set +# CONFIG_USB_GSPCA_SPCA561 is not set +# CONFIG_USB_GSPCA_SPCA1528 is not set +# CONFIG_USB_GSPCA_SQ905 is not set +# CONFIG_USB_GSPCA_SQ905C is not set +# CONFIG_USB_GSPCA_SQ930X is not set +# CONFIG_USB_GSPCA_STK014 is not set +# CONFIG_USB_GSPCA_STK1135 is not set +# CONFIG_USB_GSPCA_STV0680 is not set +# CONFIG_USB_GSPCA_SUNPLUS is not set +# CONFIG_USB_GSPCA_T613 is not set +# CONFIG_USB_GSPCA_TOPRO is not set +# CONFIG_USB_GSPCA_TOUPTEK is not set +# CONFIG_USB_GSPCA_TV8532 is not set +# CONFIG_USB_GSPCA_VC032X is not set +# CONFIG_USB_GSPCA_VICAM is not set +# CONFIG_USB_GSPCA_XIRLINK_CIT is not set +# CONFIG_USB_GSPCA_ZC3XX is not set +# CONFIG_USB_PWC is not set +# CONFIG_VIDEO_CPIA2 is not set +# CONFIG_USB_ZR364XX is not set +# CONFIG_USB_STKWEBCAM is not set +# CONFIG_USB_S2255 is not set +# CONFIG_VIDEO_USBTV is not set + +# +# Analog TV USB devices +# +# CONFIG_VIDEO_PVRUSB2 is not set +# CONFIG_VIDEO_HDPVR is not set +# CONFIG_VIDEO_USBVISION is not set +# CONFIG_VIDEO_STK1160_COMMON is not set +# CONFIG_VIDEO_GO7007 is not set + +# +# Analog/digital TV USB devices +# +# CONFIG_VIDEO_AU0828 is not set +# CONFIG_VIDEO_CX231XX is not set +# CONFIG_VIDEO_TM6000 is not set + +# +# Digital TV USB devices +# +# CONFIG_DVB_USB is not set +# CONFIG_DVB_USB_V2 is not set +# CONFIG_DVB_TTUSB_BUDGET is not set +# CONFIG_DVB_TTUSB_DEC is not set +# CONFIG_SMS_USB_DRV is not set +# CONFIG_DVB_B2C2_FLEXCOP_USB is not set +# CONFIG_DVB_AS102 is not set + +# +# Webcam, TV (analog/digital) USB devices +# +# CONFIG_VIDEO_EM28XX is not set +# CONFIG_MEDIA_PCI_SUPPORT is not set +# CONFIG_V4L_PLATFORM_DRIVERS is not set +CONFIG_V4L_MEM2MEM_DRIVERS=y +# CONFIG_VIDEO_MEDIATEK_VPU is not set +# CONFIG_VIDEO_MEM2MEM_DEINTERLACE is not set +# CONFIG_VIDEO_SAMSUNG_S5P_G2D is not set +CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m +CONFIG_VIDEO_SAMSUNG_S5P_MFC=m +CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m +# CONFIG_VIDEO_SH_VEU is not set +# CONFIG_VIDEO_RENESAS_FDP1 is not set +# CONFIG_VIDEO_RENESAS_JPU is not set +CONFIG_VIDEO_RENESAS_FCP=m +CONFIG_VIDEO_RENESAS_VSP1=m +# CONFIG_VIDEO_ROCKCHIP_RGA is not set +# CONFIG_VIDEO_QCOM_VENUS is not set +# CONFIG_V4L_TEST_DRIVERS is not set +# CONFIG_DVB_PLATFORM_DRIVERS is not set + +# +# Supported MMC/SDIO adapters +# +# CONFIG_SMS_SDIO_DRV is not set +# CONFIG_CYPRESS_FIRMWARE is not set +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_V4L2=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_DMA_CONTIG=m +CONFIG_VIDEOBUF2_VMALLOC=m + +# +# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) +# +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y +CONFIG_MEDIA_ATTACH=y +CONFIG_VIDEO_IR_I2C=m + +# +# Audio decoders, processors and mixers +# + +# +# RDS decoders +# + +# +# Video decoders +# + +# +# Video and audio decoders +# + +# +# Video encoders +# + +# +# Camera sensor devices +# + +# +# Flash devices +# + +# +# Video improvement chips +# + +# +# Audio/Video compression chips +# + +# +# SDR tuner chips +# + +# +# Miscellaneous helper chips +# + +# +# Sensors used on soc_camera driver +# + +# +# Media SPI Adapters +# +# CONFIG_CXD2880_SPI_DRV is not set +CONFIG_MEDIA_TUNER=y +CONFIG_MEDIA_TUNER_SIMPLE=y +CONFIG_MEDIA_TUNER_TDA8290=y +CONFIG_MEDIA_TUNER_TDA827X=y +CONFIG_MEDIA_TUNER_TDA18271=y +CONFIG_MEDIA_TUNER_TDA9887=y +CONFIG_MEDIA_TUNER_MT20XX=y +CONFIG_MEDIA_TUNER_XC2028=y +CONFIG_MEDIA_TUNER_XC5000=y +CONFIG_MEDIA_TUNER_XC4000=y +CONFIG_MEDIA_TUNER_MC44S803=y + +# +# Multistandard (satellite) frontends +# + +# +# Multistandard (cable + terrestrial) frontends +# + +# +# DVB-S (satellite) frontends +# + +# +# DVB-T (terrestrial) frontends +# + +# +# DVB-C (cable) frontends +# + +# +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends +# + +# +# ISDB-T (terrestrial) frontends +# + +# +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends +# + +# +# Digital terrestrial only tuners/PLL +# + +# +# SEC control devices for DVB-S +# + +# +# Common Interface (EN50221) controller drivers +# + +# +# Tools to develop new frontends +# + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +CONFIG_TEGRA_HOST1X=m +CONFIG_TEGRA_HOST1X_FIREWALL=y +CONFIG_DRM=y +CONFIG_DRM_MIPI_DSI=y +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DEBUG_MM is not set +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=y +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set +# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=m +CONFIG_DRM_GEM_CMA_HELPER=y +CONFIG_DRM_KMS_CMA_HELPER=y +CONFIG_DRM_VM=y + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +CONFIG_DRM_I2C_SIL164=m +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# CONFIG_DRM_HDLCD is not set +# CONFIG_DRM_MALI_DISPLAY is not set +# CONFIG_DRM_RADEON is not set +# CONFIG_DRM_AMDGPU is not set + +# +# ACP (Audio CoProcessor) Configuration +# + +# +# AMD Library routines +# +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT=y +CONFIG_NOUVEAU_PLATFORM_DRIVER=y +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set +# CONFIG_DRM_VKMS is not set +CONFIG_DRM_EXYNOS=m + +# +# CRTCs +# +# CONFIG_DRM_EXYNOS_FIMD is not set +CONFIG_DRM_EXYNOS5433_DECON=y +CONFIG_DRM_EXYNOS7_DECON=y +# CONFIG_DRM_EXYNOS_MIXER is not set +# CONFIG_DRM_EXYNOS_VIDI is not set + +# +# Encoders and Bridges +# +CONFIG_DRM_EXYNOS_DSI=y +# CONFIG_DRM_EXYNOS_DP is not set +CONFIG_DRM_EXYNOS_HDMI=y +CONFIG_DRM_EXYNOS_MIC=y + +# +# Sub-drivers +# +# CONFIG_DRM_EXYNOS_G2D is not set +# CONFIG_DRM_EXYNOS_FIMC is not set +# CONFIG_DRM_EXYNOS_ROTATOR is not set +# CONFIG_DRM_EXYNOS_SCALER is not set +CONFIG_DRM_ROCKCHIP=m +CONFIG_ROCKCHIP_ANALOGIX_DP=y +CONFIG_ROCKCHIP_CDN_DP=y +CONFIG_ROCKCHIP_DW_HDMI=y +CONFIG_ROCKCHIP_DW_MIPI_DSI=y +CONFIG_ROCKCHIP_INNO_HDMI=y +# CONFIG_ROCKCHIP_LVDS is not set +# CONFIG_DRM_UDL is not set +# CONFIG_DRM_AST is not set +# CONFIG_DRM_MGAG200 is not set +# CONFIG_DRM_CIRRUS_QEMU is not set +CONFIG_DRM_RCAR_DU=m +# CONFIG_DRM_RCAR_DW_HDMI is not set +CONFIG_DRM_RCAR_LVDS=m +CONFIG_DRM_RCAR_VSP=y +# CONFIG_DRM_SUN4I is not set +# CONFIG_DRM_QXL is not set +# CONFIG_DRM_BOCHS is not set +# CONFIG_DRM_VIRTIO_GPU is not set +CONFIG_DRM_MSM=m +# CONFIG_DRM_MSM_REGISTER_LOGGING is not set +# CONFIG_DRM_MSM_GPU_SUDO is not set +CONFIG_DRM_MSM_HDMI_HDCP=y +CONFIG_DRM_MSM_DSI=y +CONFIG_DRM_MSM_DSI_PLL=y +CONFIG_DRM_MSM_DSI_28NM_PHY=y +CONFIG_DRM_MSM_DSI_20NM_PHY=y +CONFIG_DRM_MSM_DSI_28NM_8960_PHY=y +CONFIG_DRM_MSM_DSI_14NM_PHY=y +CONFIG_DRM_MSM_DSI_10NM_PHY=y +CONFIG_DRM_TEGRA=m +# CONFIG_DRM_TEGRA_DEBUG is not set +# CONFIG_DRM_TEGRA_STAGING is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_LVDS is not set +CONFIG_DRM_PANEL_SIMPLE=m +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9881C is not set +# CONFIG_DRM_PANEL_INNOLUX_P079ZCA is not set +# CONFIG_DRM_PANEL_JDI_LT070ME05000 is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_ORISETECH_OTM8009A is not set +# CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00 is not set +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +# CONFIG_DRM_PANEL_RAYDIUM_RM68200 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SHARP_LQ101R1SX01 is not set +# CONFIG_DRM_PANEL_SHARP_LS043T1LE01 is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_DUMB_VGA_DAC is not set +# CONFIG_DRM_LVDS_ENCODER is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TI_TFP410 is not set +CONFIG_DRM_ANALOGIX_DP=m +CONFIG_DRM_I2C_ADV7511=m +# CONFIG_DRM_I2C_ADV7511_AUDIO is not set +CONFIG_DRM_I2C_ADV7533=y +CONFIG_DRM_I2C_ADV7511_CEC=y +CONFIG_DRM_DW_HDMI=m +# CONFIG_DRM_DW_HDMI_AHB_AUDIO is not set +# CONFIG_DRM_DW_HDMI_I2S_AUDIO is not set +# CONFIG_DRM_DW_HDMI_CEC is not set +CONFIG_DRM_VC4=m +# CONFIG_DRM_VC4_HDMI_CEC is not set +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_HISI_HIBMC=m +CONFIG_DRM_HISI_KIRIN=m +CONFIG_HISI_KIRIN_DW_DSI=m +# CONFIG_DRM_MEDIATEK is not set +# CONFIG_DRM_ZTE is not set +# CONFIG_DRM_MXSFB is not set +CONFIG_DRM_MESON=m +CONFIG_DRM_MESON_DW_HDMI=m +# CONFIG_DRM_TINYDRM is not set +# CONFIG_DRM_PL111 is not set +# CONFIG_DRM_XEN is not set +CONFIG_DRM_PHYTIUMVR_OCTOPUS=y +CONFIG_DRM_PHYTIUM=y +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +CONFIG_FB_ARMCLCD=y +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SH_MOBILE_LCDC is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_XILINX is not set +# CONFIG_FB_VIRTUAL is not set +CONFIG_XEN_FBDEV_FRONTEND=y +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +# CONFIG_LCD_PLATFORM is not set +# CONFIG_LCD_S6E63M0 is not set +# CONFIG_LCD_LD9040 is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_GENERIC=m +CONFIG_BACKLIGHT_PWM=m +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +CONFIG_VIDEOMODE_HELPERS=y +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +# CONFIG_LOGO_LINUX_CLUT224 is not set +CONFIG_LOGO_CUSTOM_CLUT224=y + +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_TIMER=y +CONFIG_SND_PCM=y +CONFIG_SND_PCM_ELD=y +CONFIG_SND_PCM_IEC958=y +CONFIG_SND_DMAENGINE_PCM=y +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y +# CONFIG_SND_OSSEMUL is not set +CONFIG_SND_PCM_TIMER=y +# CONFIG_SND_HRTIMER is not set +# CONFIG_SND_DYNAMIC_MINORS is not set +CONFIG_SND_SUPPORT_OLD_API=y +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set +# CONFIG_SND_SEQUENCER is not set +CONFIG_SND_DRIVERS=y +# CONFIG_SND_DUMMY is not set +# CONFIG_SND_ALOOP is not set +# CONFIG_SND_MTPAV is not set +# CONFIG_SND_SERIAL_U16550 is not set +# CONFIG_SND_MPU401 is not set +CONFIG_SND_PCI=y +# CONFIG_SND_AD1889 is not set +# CONFIG_SND_ATIIXP is not set +# CONFIG_SND_ATIIXP_MODEM is not set +# CONFIG_SND_AU8810 is not set +# CONFIG_SND_AU8820 is not set +# CONFIG_SND_AU8830 is not set +# CONFIG_SND_AW2 is not set +# CONFIG_SND_BT87X is not set +# CONFIG_SND_CA0106 is not set +# CONFIG_SND_CMIPCI is not set +# CONFIG_SND_OXYGEN is not set +# CONFIG_SND_CS4281 is not set +# CONFIG_SND_CS46XX is not set +# CONFIG_SND_CTXFI is not set +# CONFIG_SND_DARLA20 is not set +# CONFIG_SND_GINA20 is not set +# CONFIG_SND_LAYLA20 is not set +# CONFIG_SND_DARLA24 is not set +# CONFIG_SND_GINA24 is not set +# CONFIG_SND_LAYLA24 is not set +# CONFIG_SND_MONA is not set +# CONFIG_SND_MIA is not set +# CONFIG_SND_ECHO3G is not set +# CONFIG_SND_INDIGO is not set +# CONFIG_SND_INDIGOIO is not set +# CONFIG_SND_INDIGODJ is not set +# CONFIG_SND_INDIGOIOX is not set +# CONFIG_SND_INDIGODJX is not set +# CONFIG_SND_ENS1370 is not set +# CONFIG_SND_ENS1371 is not set +# CONFIG_SND_FM801 is not set +# CONFIG_SND_HDSP is not set +# CONFIG_SND_HDSPM is not set +# CONFIG_SND_ICE1724 is not set +# CONFIG_SND_INTEL8X0 is not set +# CONFIG_SND_INTEL8X0M is not set +# CONFIG_SND_KORG1212 is not set +# CONFIG_SND_LOLA is not set +# CONFIG_SND_LX6464ES is not set +# CONFIG_SND_MIXART is not set +# CONFIG_SND_NM256 is not set +# CONFIG_SND_PCXHR is not set +# CONFIG_SND_RIPTIDE is not set +# CONFIG_SND_RME32 is not set +# CONFIG_SND_RME96 is not set +# CONFIG_SND_RME9652 is not set +# CONFIG_SND_SE6X is not set +# CONFIG_SND_VIA82XX is not set +# CONFIG_SND_VIA82XX_MODEM is not set +# CONFIG_SND_VIRTUOSO is not set +# CONFIG_SND_VX222 is not set +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +# CONFIG_SND_HDA_INTEL is not set +# CONFIG_SND_HDA_TEGRA is not set +CONFIG_SND_HDA_PREALLOC_SIZE=64 +CONFIG_SND_SPI=y +CONFIG_SND_USB=y +# CONFIG_SND_USB_AUDIO is not set +# CONFIG_SND_USB_UA101 is not set +# CONFIG_SND_USB_CAIAQ is not set +# CONFIG_SND_USB_6FIRE is not set +# CONFIG_SND_USB_HIFACE is not set +# CONFIG_SND_BCD2000 is not set +# CONFIG_SND_USB_POD is not set +# CONFIG_SND_USB_PODHD is not set +# CONFIG_SND_USB_TONEPORT is not set +# CONFIG_SND_USB_VARIAX is not set +CONFIG_SND_SOC=y +CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y +# CONFIG_SND_SOC_AMD_ACP is not set +# CONFIG_SND_ATMEL_SOC is not set +CONFIG_SND_BCM2835_SOC_I2S=m +# CONFIG_SND_DESIGNWARE_I2S is not set + +# +# SoC Audio for Freescale CPUs +# + +# +# Common SoC Audio options for Freescale CPUs: +# +# CONFIG_SND_SOC_FSL_ASRC is not set +# CONFIG_SND_SOC_FSL_SAI is not set +# CONFIG_SND_SOC_FSL_SSI is not set +# CONFIG_SND_SOC_FSL_SPDIF is not set +# CONFIG_SND_SOC_FSL_ESAI is not set +# CONFIG_SND_SOC_IMX_AUDMUX is not set +# CONFIG_SND_I2S_HI6210_I2S is not set +# CONFIG_SND_KIRKWOOD_SOC is not set +# CONFIG_SND_SOC_IMG is not set +# CONFIG_SND_SOC_MT2701 is not set +# CONFIG_SND_SOC_MT6797 is not set +# CONFIG_SND_SOC_MT8173 is not set + +# +# ASoC support for Amlogic platforms +# +# CONFIG_SND_MESON_AXG_FRDDR is not set +# CONFIG_SND_MESON_AXG_TODDR is not set +# CONFIG_SND_MESON_AXG_TDMIN is not set +# CONFIG_SND_MESON_AXG_TDMOUT is not set +# CONFIG_SND_MESON_AXG_SOUND_CARD is not set +# CONFIG_SND_MESON_AXG_SPDIFOUT is not set +# CONFIG_SND_SOC_QCOM is not set +CONFIG_SND_SOC_ROCKCHIP=m +CONFIG_SND_SOC_ROCKCHIP_I2S=m +# CONFIG_SND_SOC_ROCKCHIP_PDM is not set +CONFIG_SND_SOC_ROCKCHIP_SPDIF=m +# CONFIG_SND_SOC_ROCKCHIP_MAX98090 is not set +CONFIG_SND_SOC_ROCKCHIP_RT5645=m +# CONFIG_SND_SOC_RK3288_HDMI_ANALOG is not set +CONFIG_SND_SOC_RK3399_GRU_SOUND=m +CONFIG_SND_SOC_SAMSUNG=y +# CONFIG_SND_SAMSUNG_PCM is not set +# CONFIG_SND_SAMSUNG_SPDIF is not set +# CONFIG_SND_SAMSUNG_I2S is not set +# CONFIG_SND_SOC_SAMSUNG_SMDK_WM8994 is not set +# CONFIG_SND_SOC_SAMSUNG_SMDK_SPDIF is not set +# CONFIG_SND_SOC_SMDK_WM8994_PCM is not set +# CONFIG_SND_SOC_SNOW is not set +# CONFIG_SND_SOC_ODROID is not set +# CONFIG_SND_SOC_ARNDALE_RT5631_ALC5631 is not set + +# +# SoC Audio support for Renesas SoCs +# +# CONFIG_SND_SOC_SH4_FSI is not set +CONFIG_SND_SOC_RCAR=m + +# +# STMicroelectronics STM32 SOC audio support +# + +# +# Allwinner SoC Audio support +# +# CONFIG_SND_SUN4I_CODEC is not set +# CONFIG_SND_SUN8I_CODEC_ANALOG is not set +# CONFIG_SND_SUN4I_I2S is not set +# CONFIG_SND_SUN4I_SPDIF is not set +# CONFIG_SND_SOC_TEGRA is not set +# CONFIG_SND_SOC_UNIPHIER is not set +# CONFIG_SND_SOC_XTFPGA_I2S is not set +# CONFIG_ZX_SPDIF is not set +# CONFIG_ZX_I2S is not set +# CONFIG_ZX_TDM is not set +CONFIG_SND_SOC_I2C_AND_SPI=y + +# +# CODEC drivers +# +# CONFIG_SND_SOC_AC97_CODEC is not set +# CONFIG_SND_SOC_ADAU1701 is not set +# CONFIG_SND_SOC_ADAU1761_I2C is not set +# CONFIG_SND_SOC_ADAU1761_SPI is not set +# CONFIG_SND_SOC_ADAU7002 is not set +# CONFIG_SND_SOC_AK4104 is not set +# CONFIG_SND_SOC_AK4458 is not set +# CONFIG_SND_SOC_AK4554 is not set +CONFIG_SND_SOC_AK4613=m +# CONFIG_SND_SOC_AK4642 is not set +# CONFIG_SND_SOC_AK5386 is not set +# CONFIG_SND_SOC_AK5558 is not set +# CONFIG_SND_SOC_ALC5623 is not set +# CONFIG_SND_SOC_BD28623 is not set +# CONFIG_SND_SOC_BT_SCO is not set +# CONFIG_SND_SOC_CS35L32 is not set +# CONFIG_SND_SOC_CS35L33 is not set +# CONFIG_SND_SOC_CS35L34 is not set +# CONFIG_SND_SOC_CS35L35 is not set +# CONFIG_SND_SOC_CS42L42 is not set +# CONFIG_SND_SOC_CS42L51_I2C is not set +# CONFIG_SND_SOC_CS42L52 is not set +# CONFIG_SND_SOC_CS42L56 is not set +# CONFIG_SND_SOC_CS42L73 is not set +# CONFIG_SND_SOC_CS4265 is not set +# CONFIG_SND_SOC_CS4270 is not set +# CONFIG_SND_SOC_CS4271_I2C is not set +# CONFIG_SND_SOC_CS4271_SPI is not set +# CONFIG_SND_SOC_CS42XX8_I2C is not set +# CONFIG_SND_SOC_CS43130 is not set +# CONFIG_SND_SOC_CS4349 is not set +# CONFIG_SND_SOC_CS53L30 is not set +CONFIG_SND_SOC_DA7219=m +CONFIG_SND_SOC_DMIC=m +CONFIG_SND_SOC_HDMI_CODEC=y +# CONFIG_SND_SOC_ES7134 is not set +# CONFIG_SND_SOC_ES7241 is not set +# CONFIG_SND_SOC_ES8316 is not set +# CONFIG_SND_SOC_ES8328_I2C is not set +# CONFIG_SND_SOC_ES8328_SPI is not set +# CONFIG_SND_SOC_GTM601 is not set +# CONFIG_SND_SOC_INNO_RK3036 is not set +CONFIG_SND_SOC_MAX98357A=m +# CONFIG_SND_SOC_MAX98504 is not set +# CONFIG_SND_SOC_MAX9867 is not set +# CONFIG_SND_SOC_MAX98927 is not set +# CONFIG_SND_SOC_MAX98373 is not set +# CONFIG_SND_SOC_MAX9860 is not set +# CONFIG_SND_SOC_MSM8916_WCD_ANALOG is not set +# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set +# CONFIG_SND_SOC_PCM1681 is not set +# CONFIG_SND_SOC_PCM1789_I2C is not set +# CONFIG_SND_SOC_PCM179X_I2C is not set +# CONFIG_SND_SOC_PCM179X_SPI is not set +# CONFIG_SND_SOC_PCM186X_I2C is not set +# CONFIG_SND_SOC_PCM186X_SPI is not set +# CONFIG_SND_SOC_PCM3168A_I2C is not set +# CONFIG_SND_SOC_PCM3168A_SPI is not set +# CONFIG_SND_SOC_PCM512x_I2C is not set +# CONFIG_SND_SOC_PCM512x_SPI is not set +CONFIG_SND_SOC_RL6231=m +CONFIG_SND_SOC_RT5514=m +CONFIG_SND_SOC_RT5514_SPI=m +# CONFIG_SND_SOC_RT5616 is not set +# CONFIG_SND_SOC_RT5631 is not set +CONFIG_SND_SOC_RT5645=m +# CONFIG_SND_SOC_SGTL5000 is not set +# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set +# CONFIG_SND_SOC_SIRF_AUDIO_CODEC is not set +# CONFIG_SND_SOC_SPDIF is not set +# CONFIG_SND_SOC_SSM2305 is not set +# CONFIG_SND_SOC_SSM2602_SPI is not set +# CONFIG_SND_SOC_SSM2602_I2C is not set +# CONFIG_SND_SOC_SSM4567 is not set +# CONFIG_SND_SOC_STA32X is not set +# CONFIG_SND_SOC_STA350 is not set +# CONFIG_SND_SOC_STI_SAS is not set +# CONFIG_SND_SOC_TAS2552 is not set +# CONFIG_SND_SOC_TAS5086 is not set +# CONFIG_SND_SOC_TAS571X is not set +# CONFIG_SND_SOC_TAS5720 is not set +# CONFIG_SND_SOC_TAS6424 is not set +# CONFIG_SND_SOC_TDA7419 is not set +# CONFIG_SND_SOC_TFA9879 is not set +# CONFIG_SND_SOC_TLV320AIC23_I2C is not set +# CONFIG_SND_SOC_TLV320AIC23_SPI is not set +# CONFIG_SND_SOC_TLV320AIC31XX is not set +# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set +# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set +# CONFIG_SND_SOC_TLV320AIC3X is not set +# CONFIG_SND_SOC_TS3A227E is not set +# CONFIG_SND_SOC_TSCS42XX is not set +# CONFIG_SND_SOC_TSCS454 is not set +# CONFIG_SND_SOC_WM8510 is not set +# CONFIG_SND_SOC_WM8523 is not set +# CONFIG_SND_SOC_WM8524 is not set +# CONFIG_SND_SOC_WM8580 is not set +# CONFIG_SND_SOC_WM8711 is not set +# CONFIG_SND_SOC_WM8728 is not set +# CONFIG_SND_SOC_WM8731 is not set +# CONFIG_SND_SOC_WM8737 is not set +# CONFIG_SND_SOC_WM8741 is not set +# CONFIG_SND_SOC_WM8750 is not set +# CONFIG_SND_SOC_WM8753 is not set +# CONFIG_SND_SOC_WM8770 is not set +# CONFIG_SND_SOC_WM8776 is not set +# CONFIG_SND_SOC_WM8782 is not set +# CONFIG_SND_SOC_WM8804_I2C is not set +# CONFIG_SND_SOC_WM8804_SPI is not set +# CONFIG_SND_SOC_WM8903 is not set +# CONFIG_SND_SOC_WM8960 is not set +# CONFIG_SND_SOC_WM8962 is not set +# CONFIG_SND_SOC_WM8974 is not set +# CONFIG_SND_SOC_WM8978 is not set +# CONFIG_SND_SOC_WM8985 is not set +# CONFIG_SND_SOC_ZX_AUD96P22 is not set +# CONFIG_SND_SOC_MAX9759 is not set +# CONFIG_SND_SOC_MT6351 is not set +# CONFIG_SND_SOC_NAU8540 is not set +# CONFIG_SND_SOC_NAU8810 is not set +# CONFIG_SND_SOC_NAU8824 is not set +# CONFIG_SND_SOC_TPA6130A2 is not set +CONFIG_SND_SIMPLE_CARD_UTILS=m +CONFIG_SND_SIMPLE_CARD=m +# CONFIG_SND_SIMPLE_SCU_CARD is not set +CONFIG_SND_AUDIO_GRAPH_CARD=m +# CONFIG_SND_AUDIO_GRAPH_SCU_CARD is not set +# CONFIG_SND_XEN_FRONTEND is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=y +# CONFIG_HID_ACCUTOUCH is not set +# CONFIG_HID_ACRUX is not set +CONFIG_HID_APPLE=y +# CONFIG_HID_APPLEIR is not set +# CONFIG_HID_ASUS is not set +# CONFIG_HID_AUREAL is not set +CONFIG_HID_BELKIN=y +# CONFIG_HID_BETOP_FF is not set +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +# CONFIG_HID_CORSAIR is not set +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_PRODIKEYS is not set +# CONFIG_HID_CMEDIA is not set +CONFIG_HID_CYPRESS=y +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELAN is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_ELO is not set +CONFIG_HID_EZKEY=y +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_GOOGLE_HAMMER is not set +# CONFIG_HID_GT683R is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +CONFIG_HID_ITE=y +# CONFIG_HID_JABRA is not set +# CONFIG_HID_TWINHAN is not set +CONFIG_HID_KENSINGTON=y +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LED is not set +# CONFIG_HID_LENOVO is not set +CONFIG_HID_LOGITECH=y +# CONFIG_HID_LOGITECH_HIDPP is not set +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MAYFLASH is not set +CONFIG_HID_REDRAGON=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTI is not set +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PENMOUNT is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_RETRODE is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SONY is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEAM is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THINGM is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_WIIMOTE is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set + +# +# USB HID support +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +# CONFIG_USB_HIDDEV is not set + +# +# I2C HID support +# +CONFIG_I2C_HID=m +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +CONFIG_USB_OTG=y +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_OTG_FSM is not set +# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set +# CONFIG_USB_MON is not set +# CONFIG_USB_WUSB_CBAF is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +CONFIG_USB_XHCI_PLATFORM=y +# CONFIG_USB_XHCI_HISTB is not set +# CONFIG_USB_XHCI_MTK is not set +# CONFIG_USB_XHCI_MVEBU is not set +CONFIG_USB_XHCI_RCAR=y +CONFIG_USB_XHCI_TEGRA=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_EHCI_HCD_ORION=y +# CONFIG_USB_EHCI_TEGRA is not set +CONFIG_USB_EHCI_EXYNOS=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +CONFIG_USB_OHCI_EXYNOS=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +# CONFIG_USB_UHCI_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_RENESAS_USBHS_HCD is not set +# CONFIG_USB_HCD_TEST_MODE is not set +CONFIG_USB_RENESAS_USBHS=m + +# +# USB Device Class drivers +# +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=y +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=y +CONFIG_USB_STORAGE_FREECOM=y +CONFIG_USB_STORAGE_ISD200=y +CONFIG_USB_STORAGE_USBAT=y +CONFIG_USB_STORAGE_SDDR09=y +CONFIG_USB_STORAGE_SDDR55=y +CONFIG_USB_STORAGE_JUMPSHOT=y +CONFIG_USB_STORAGE_ALAUDA=y +CONFIG_USB_STORAGE_ONETOUCH=y +CONFIG_USB_STORAGE_KARMA=y +CONFIG_USB_STORAGE_CYPRESS_ATACB=y +CONFIG_USB_STORAGE_ENE_UB6250=y +CONFIG_USB_UAS=y + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_MTU3 is not set +CONFIG_USB_MUSB_HDRC=y +# CONFIG_USB_MUSB_HOST is not set +# CONFIG_USB_MUSB_GADGET is not set +CONFIG_USB_MUSB_DUAL_ROLE=y + +# +# Platform Glue Layer +# +CONFIG_USB_MUSB_SUNXI=y + +# +# MUSB DMA mode +# +# CONFIG_MUSB_PIO_ONLY is not set +CONFIG_USB_DWC3=y +# CONFIG_USB_DWC3_ULPI is not set +# CONFIG_USB_DWC3_HOST is not set +# CONFIG_USB_DWC3_GADGET is not set +CONFIG_USB_DWC3_DUAL_ROLE=y + +# +# Platform Glue Driver Support +# +CONFIG_USB_DWC3_EXYNOS=y +CONFIG_USB_DWC3_HAPS=y +CONFIG_USB_DWC3_OF_SIMPLE=y +CONFIG_USB_DWC3_QCOM=y +CONFIG_USB_DWC2=y +# CONFIG_USB_DWC2_HOST is not set + +# +# Gadget/Dual-role mode requires USB Gadget support to be enabled +# +# CONFIG_USB_DWC2_PERIPHERAL is not set +CONFIG_USB_DWC2_DUAL_ROLE=y +# CONFIG_USB_DWC2_PCI is not set +# CONFIG_USB_DWC2_DEBUG is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +CONFIG_USB_CHIPIDEA=y +CONFIG_USB_CHIPIDEA_OF=y +CONFIG_USB_CHIPIDEA_PCI=y +CONFIG_USB_CHIPIDEA_UDC=y +CONFIG_USB_CHIPIDEA_HOST=y +CONFIG_USB_ISP1760=y +CONFIG_USB_ISP1760_HCD=y +CONFIG_USB_ISP1761_UDC=y +# CONFIG_USB_ISP1760_HOST_ROLE is not set +# CONFIG_USB_ISP1760_GADGET_ROLE is not set +CONFIG_USB_ISP1760_DUAL_ROLE=y + +# +# USB port drivers +# +# CONFIG_USB_SERIAL is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +# CONFIG_USB_EZUSB_FX2 is not set +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=y +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set + +# +# USB Physical Layer drivers +# +CONFIG_USB_PHY=y +CONFIG_NOP_USB_XCEIV=y +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_TEGRA_PHY is not set +CONFIG_USB_ULPI=y +CONFIG_USB_ULPI_VIEWPORT=y +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 + +# +# USB Peripheral Controller +# +# CONFIG_USB_FOTG210_UDC is not set +# CONFIG_USB_GR_UDC is not set +# CONFIG_USB_R8A66597 is not set +CONFIG_USB_RENESAS_USBHS_UDC=m +CONFIG_USB_RENESAS_USB3=m +# CONFIG_USB_PXA27X is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_MV_U3D is not set +CONFIG_USB_SNP_CORE=y +CONFIG_USB_SNP_UDC_PLAT=y +# CONFIG_USB_M66592 is not set +CONFIG_USB_BDC_UDC=y + +# +# Platform Support +# +CONFIG_USB_BDC_PCI=y +# CONFIG_USB_AMD5536UDC is not set +# CONFIG_USB_NET2272 is not set +# CONFIG_USB_NET2280 is not set +# CONFIG_USB_GOKU is not set +# CONFIG_USB_EG20T is not set +# CONFIG_USB_GADGET_XILINX is not set +# CONFIG_USB_DUMMY_HCD is not set +CONFIG_USB_LIBCOMPOSITE=y +CONFIG_USB_U_ETHER=y +CONFIG_USB_F_NCM=y +CONFIG_USB_F_MASS_STORAGE=y +CONFIG_USB_F_FS=y +CONFIG_USB_CONFIGFS=y +# CONFIG_USB_CONFIGFS_SERIAL is not set +# CONFIG_USB_CONFIGFS_ACM is not set +# CONFIG_USB_CONFIGFS_OBEX is not set +CONFIG_USB_CONFIGFS_NCM=y +# CONFIG_USB_CONFIGFS_ECM is not set +# CONFIG_USB_CONFIGFS_ECM_SUBSET is not set +# CONFIG_USB_CONFIGFS_RNDIS is not set +# CONFIG_USB_CONFIGFS_EEM is not set +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +# CONFIG_USB_CONFIGFS_F_LB_SS is not set +CONFIG_USB_CONFIGFS_F_FS=y +# CONFIG_USB_CONFIGFS_F_UAC1 is not set +# CONFIG_USB_CONFIGFS_F_UAC1_LEGACY is not set +# CONFIG_USB_CONFIGFS_F_UAC2 is not set +# CONFIG_USB_CONFIGFS_F_MIDI is not set +# CONFIG_USB_CONFIGFS_F_HID is not set +# CONFIG_USB_CONFIGFS_F_UVC is not set +# CONFIG_USB_CONFIGFS_F_PRINTER is not set +# CONFIG_TYPEC is not set +CONFIG_USB_ROLE_SWITCH=m +# CONFIG_USB_LED_TRIG is not set +CONFIG_USB_ULPI_BUS=y +# CONFIG_UWB is not set +CONFIG_MMC=y +CONFIG_PWRSEQ_EMMC=y +# CONFIG_PWRSEQ_SD8787 is not set +CONFIG_PWRSEQ_SIMPLE=y +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=32 +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_ARMMMCI=y +CONFIG_MMC_QCOM_DML=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +# CONFIG_MMC_SDHCI_PCI is not set +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_OF_ARASAN=y +# CONFIG_MMC_SDHCI_OF_AT91 is not set +CONFIG_MMC_SDHCI_OF_ESDHC=y +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +CONFIG_MMC_SDHCI_CADENCE=y +CONFIG_MMC_SDHCI_TEGRA=y +# CONFIG_MMC_SDHCI_PXAV3 is not set +CONFIG_MMC_SDHCI_F_SDH30=y +CONFIG_MMC_SDHCI_IPROC=y +CONFIG_MMC_MESON_GX=y +# CONFIG_MMC_MESON_MX_SDIO is not set +CONFIG_MMC_SDHCI_MSM=y +# CONFIG_MMC_TIFM_SD is not set +CONFIG_MMC_SPI=y +CONFIG_MMC_TMIO_CORE=y +CONFIG_MMC_SDHI=y +# CONFIG_MMC_SDHI_SYS_DMAC is not set +CONFIG_MMC_SDHI_INTERNAL_DMAC=y +# CONFIG_MMC_CB710 is not set +# CONFIG_MMC_VIA_SDMMC is not set +CONFIG_MMC_DW=y +CONFIG_MMC_DW_PLTFM=y +# CONFIG_MMC_DW_BLUEFIELD is not set +CONFIG_MMC_DW_EXYNOS=y +CONFIG_MMC_DW_HI3798CV200=y +CONFIG_MMC_DW_K3=y +# CONFIG_MMC_DW_PCI is not set +CONFIG_MMC_DW_ROCKCHIP=y +# CONFIG_MMC_DW_ZX is not set +# CONFIG_MMC_SH_MMCIF is not set +# CONFIG_MMC_VUB300 is not set +# CONFIG_MMC_USHC is not set +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_SUNXI=y +CONFIG_MMC_CQHCI=y +# CONFIG_MMC_TOSHIBA_PCI is not set +CONFIG_MMC_BCM2835=y +# CONFIG_MMC_MTK is not set +CONFIG_MMC_SDHCI_BRCMSTB=y +CONFIG_MMC_SDHCI_XENON=y +# CONFIG_MMC_SDHCI_OMAP is not set +# CONFIG_MMC_PHYTIUM_MCI_PCI is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_LM3530 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +# CONFIG_LEDS_LP5562 is not set +# CONFIG_LEDS_LP8501 is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_DAC124S085 is not set +CONFIG_LEDS_PWM=y +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +# CONFIG_LEDS_BLINKM is not set +CONFIG_LEDS_SYSCON=y +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +# CONFIG_LEDS_TRIGGER_TIMER is not set +# CONFIG_LEDS_TRIGGER_ONESHOT is not set +CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +CONFIG_LEDS_TRIGGER_CPU=y +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_LEDS_TRIGGER_TRANSIENT is not set +# CONFIG_LEDS_TRIGGER_CAMERA is not set +CONFIG_LEDS_TRIGGER_PANIC=y +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_INFINIBAND is not set +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +# CONFIG_EDAC_LAYERSCAPE is not set +# CONFIG_EDAC_THUNDERX is not set +# CONFIG_EDAC_ALTERA is not set +# CONFIG_EDAC_XGENE is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABX80X is not set +CONFIG_RTC_DRV_BRCMSTB=y +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_HYM8563 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +CONFIG_RTC_DRV_MAX77686=y +CONFIG_RTC_DRV_RK808=m +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_ISL12026 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV8803 is not set +CONFIG_RTC_DRV_S5M=y + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RX6110 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=y +CONFIG_RTC_DRV_DS3232_HWMON=y +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set +# CONFIG_RTC_DRV_ZYNQMP is not set +CONFIG_RTC_DRV_CROS_EC=y + +# +# on-CPU RTC drivers +# +CONFIG_HAVE_S3C_RTC=y +CONFIG_RTC_DRV_S3C=y +# CONFIG_RTC_DRV_SH is not set +# CONFIG_RTC_DRV_PL030 is not set +CONFIG_RTC_DRV_PL031=y +CONFIG_RTC_DRV_SUN6I=y +# CONFIG_RTC_DRV_MV is not set +CONFIG_RTC_DRV_ARMADA38X=y +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_PM8XXX is not set +CONFIG_RTC_DRV_TEGRA=y +# CONFIG_RTC_DRV_SNVS is not set +# CONFIG_RTC_DRV_MT7622 is not set +CONFIG_RTC_DRV_XGENE=y +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=y +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_AMBA_PL08X is not set +CONFIG_BCM_SBA_RAID=m +CONFIG_DMA_BCM2835=m +# CONFIG_DMA_SUN6I is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_INTEL_IDMA64 is not set +CONFIG_K3_DMA=y +# CONFIG_MV_XOR is not set +CONFIG_MV_XOR_V2=y +CONFIG_PL330_DMA=y +# CONFIG_SPRD_DMA is not set +CONFIG_TEGRA20_APB_DMA=y +# CONFIG_TEGRA210_ADMA is not set +# CONFIG_XGENE_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_ZX_DMA is not set +# CONFIG_MTK_HSDMA is not set +CONFIG_QCOM_BAM_DMA=y +CONFIG_QCOM_HIDMA_MGMT=y +CONFIG_QCOM_HIDMA=y +# CONFIG_DW_DMAC is not set +# CONFIG_DW_DMAC_PCI is not set +CONFIG_RENESAS_DMA=y +CONFIG_SH_DMAE_BASE=y +# CONFIG_SH_DMAE is not set +CONFIG_RCAR_DMAC=y +CONFIG_RENESAS_USB_DMAC=m +# CONFIG_SUDMAC is not set + +# +# DMA Clients +# +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +CONFIG_VFIO_IOMMU_TYPE1=y +CONFIG_VFIO_VIRQFD=y +CONFIG_VFIO=y +# CONFIG_VFIO_NOIOMMU is not set +CONFIG_VFIO_PCI=y +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +# CONFIG_VFIO_PLATFORM is not set +# CONFIG_VFIO_MDEV is not set +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=y +# CONFIG_VIRTIO_INPUT is not set +CONFIG_VIRTIO_MMIO=y +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set + +# +# Microsoft Hyper-V guest support +# + +# +# Xen driver support +# +CONFIG_XEN_BALLOON=y +CONFIG_XEN_SCRUB_PAGES_DEFAULT=y +CONFIG_XEN_DEV_EVTCHN=y +CONFIG_XEN_BACKEND=y +CONFIG_XENFS=y +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_SYS_HYPERVISOR=y +CONFIG_XEN_XENBUS_FRONTEND=y +CONFIG_XEN_GNTDEV=y +CONFIG_XEN_GRANT_DEV_ALLOC=y +# CONFIG_XEN_GRANT_DMA_ALLOC is not set +CONFIG_SWIOTLB_XEN=y +# CONFIG_XEN_PVCALLS_FRONTEND is not set +# CONFIG_XEN_PVCALLS_BACKEND is not set +CONFIG_XEN_PRIVCMD=y +CONFIG_XEN_AUTO_XLATE=y +CONFIG_STAGING=y +# CONFIG_PRISM2_USB is not set +# CONFIG_COMEDI is not set +# CONFIG_RTL8192U is not set +# CONFIG_RTLLIB is not set +# CONFIG_RTL8723BS is not set +# CONFIG_R8712U is not set +# CONFIG_R8188EU is not set +# CONFIG_R8822BE is not set +# CONFIG_RTS5208 is not set +# CONFIG_VT6655 is not set +# CONFIG_VT6656 is not set + +# +# IIO staging drivers +# + +# +# Accelerometers +# +# CONFIG_ADIS16203 is not set +# CONFIG_ADIS16240 is not set + +# +# Analog to digital converters +# +# CONFIG_AD7606 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7816 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7280 is not set + +# +# Analog digital bi-direction converters +# +# CONFIG_ADT7316 is not set + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7152 is not set +# CONFIG_AD7746 is not set + +# +# Direct Digital Synthesis +# +# CONFIG_AD9832 is not set +# CONFIG_AD9834 is not set + +# +# Network Analyzer, Impedance Converters +# +# CONFIG_AD5933 is not set + +# +# Active energy metering IC +# +# CONFIG_ADE7854 is not set + +# +# Resolver to digital converters +# +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1210 is not set +# CONFIG_FB_SM750 is not set +# CONFIG_FB_XGI is not set +# CONFIG_USB_EMXX is not set + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# CONFIG_MFD_NVEC is not set +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +CONFIG_ASHMEM=y +# CONFIG_ANDROID_VSOC is not set +CONFIG_ION=y +CONFIG_ION_SYSTEM_HEAP=y +# CONFIG_ION_CARVEOUT_HEAP is not set +# CONFIG_ION_CHUNK_HEAP is not set +CONFIG_ION_CMA_HEAP=y +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_MTD_SPINAND_MT29F is not set +# CONFIG_DGNC is not set +# CONFIG_GS_FPGABOOT is not set +# CONFIG_UNISYSSPAR is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +# CONFIG_FB_TFT is not set +# CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set +# CONFIG_MOST is not set +# CONFIG_KS7010 is not set +# CONFIG_GREYBUS is not set +CONFIG_BCM_VIDEOCORE=y +# CONFIG_BCM2835_VCHIQ is not set +# CONFIG_SND_BCM2835 is not set +# CONFIG_VIDEO_BCM2835 is not set +# CONFIG_PI433 is not set +# CONFIG_MTK_MMC is not set + +# +# Gasket devices +# +# CONFIG_STAGING_GASKET_FRAMEWORK is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_EROFS_FS is not set +CONFIG_HILOG=y +CONFIG_HILOG_BUFFER_SIZE=4096 +CONFIG_HIEVENT=y +# CONFIG_GOLDFISH is not set +CONFIG_CHROME_PLATFORMS=y +CONFIG_CROS_EC_CTL=m +CONFIG_CROS_EC_I2C=y +CONFIG_CROS_EC_SPI=y +CONFIG_CROS_EC_PROTO=y +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +CONFIG_COMMON_CLK_VERSATILE=y +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +# CONFIG_CLK_HSDK is not set +# CONFIG_COMMON_CLK_MAX77686 is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +CONFIG_COMMON_CLK_RK808=y +CONFIG_COMMON_CLK_HI655X=y +#CONFIG_COMMON_CLK_SCPI is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +CONFIG_COMMON_CLK_CS2000_CP=y +CONFIG_COMMON_CLK_S2MPS11=y +CONFIG_CLK_QORIQ=y +# CONFIG_COMMON_CLK_XGENE is not set +CONFIG_COMMON_CLK_PWM=y +# CONFIG_COMMON_CLK_VC5 is not set +CONFIG_COMMON_CLK_IPROC=y +CONFIG_CLK_BCM_NS2=y +CONFIG_CLK_BCM_SR=y +CONFIG_COMMON_CLK_HI3516CV300=y +CONFIG_COMMON_CLK_HI3519=y +CONFIG_COMMON_CLK_HI3660=y +CONFIG_COMMON_CLK_HI3798CV200=y +CONFIG_COMMON_CLK_HI6220=y +CONFIG_RESET_HISI=y +CONFIG_STUB_CLK_HI6220=y +CONFIG_STUB_CLK_HI3660=y + +# +# Clock driver for MediaTek SoC +# +CONFIG_COMMON_CLK_MEDIATEK=y +CONFIG_COMMON_CLK_MT2712=y +# CONFIG_COMMON_CLK_MT2712_BDPSYS is not set +# CONFIG_COMMON_CLK_MT2712_IMGSYS is not set +# CONFIG_COMMON_CLK_MT2712_JPGDECSYS is not set +# CONFIG_COMMON_CLK_MT2712_MFGCFG is not set +# CONFIG_COMMON_CLK_MT2712_MMSYS is not set +# CONFIG_COMMON_CLK_MT2712_VDECSYS is not set +# CONFIG_COMMON_CLK_MT2712_VENCSYS is not set +CONFIG_COMMON_CLK_MT6797=y +# CONFIG_COMMON_CLK_MT6797_MMSYS is not set +# CONFIG_COMMON_CLK_MT6797_IMGSYS is not set +# CONFIG_COMMON_CLK_MT6797_VDECSYS is not set +# CONFIG_COMMON_CLK_MT6797_VENCSYS is not set +CONFIG_COMMON_CLK_MT7622=y +# CONFIG_COMMON_CLK_MT7622_ETHSYS is not set +# CONFIG_COMMON_CLK_MT7622_HIFSYS is not set +# CONFIG_COMMON_CLK_MT7622_AUDSYS is not set +CONFIG_COMMON_CLK_MT8173=y +CONFIG_COMMON_CLK_AMLOGIC=y +CONFIG_COMMON_CLK_MESON_AO=y +CONFIG_COMMON_CLK_REGMAP_MESON=y +CONFIG_COMMON_CLK_GXBB=y +CONFIG_COMMON_CLK_AXG=y +# CONFIG_COMMON_CLK_AXG_AUDIO is not set +CONFIG_ARMADA_37XX_CLK=y +CONFIG_ARMADA_AP806_SYSCON=y +CONFIG_ARMADA_CP110_SYSCON=y +CONFIG_QCOM_GDSC=y +CONFIG_QCOM_RPMCC=y +CONFIG_COMMON_CLK_QCOM=y +CONFIG_QCOM_A53PLL=y +CONFIG_QCOM_CLK_APCS_MSM8916=y +CONFIG_QCOM_CLK_SMD_RPM=y +# CONFIG_APQ_GCC_8084 is not set +# CONFIG_APQ_MMCC_8084 is not set +# CONFIG_IPQ_GCC_4019 is not set +# CONFIG_IPQ_GCC_806X is not set +# CONFIG_IPQ_LCC_806X is not set +CONFIG_IPQ_GCC_8074=y +# CONFIG_MSM_GCC_8660 is not set +CONFIG_MSM_GCC_8916=y +# CONFIG_MSM_GCC_8960 is not set +# CONFIG_MSM_LCC_8960 is not set +# CONFIG_MDM_GCC_9615 is not set +# CONFIG_MDM_LCC_9615 is not set +# CONFIG_MSM_MMCC_8960 is not set +# CONFIG_MSM_GCC_8974 is not set +# CONFIG_MSM_MMCC_8974 is not set +CONFIG_MSM_GCC_8994=y +CONFIG_MSM_GCC_8996=y +CONFIG_MSM_MMCC_8996=y +# CONFIG_MSM_GCC_8998 is not set +# CONFIG_SDM_GCC_845 is not set +# CONFIG_SDM_VIDEOCC_845 is not set +# CONFIG_SDM_DISPCC_845 is not set +# CONFIG_SPMI_PMIC_CLKDIV is not set +CONFIG_CLK_RENESAS=y +CONFIG_CLK_R8A7795=y +CONFIG_CLK_R8A7796=y +CONFIG_CLK_R8A77965=y +CONFIG_CLK_R8A77970=y +CONFIG_CLK_R8A77980=y +CONFIG_CLK_R8A77990=y +CONFIG_CLK_R8A77995=y +# CONFIG_CLK_R9A06G032 is not set +CONFIG_CLK_RCAR_GEN3_CPG=y +# CONFIG_CLK_RCAR_USB2_CLOCK_SEL is not set +CONFIG_CLK_RENESAS_CPG_MSSR=y +CONFIG_CLK_RENESAS_DIV6=y +CONFIG_COMMON_CLK_SAMSUNG=y +CONFIG_EXYNOS_ARM64_COMMON_CLK=y +CONFIG_EXYNOS_AUDSS_CLK_CON=y +CONFIG_SPRD_COMMON_CLK=y +CONFIG_SPRD_SC9860_CLK=y +CONFIG_SUNXI_CCU=y +CONFIG_SUN50I_A64_CCU=y +CONFIG_SUN50I_H6_CCU=y +CONFIG_SUN50I_H6_R_CCU=y +# CONFIG_SUN8I_A83T_CCU is not set +CONFIG_SUN8I_H3_CCU=y +# CONFIG_SUN8I_DE2_CCU is not set +CONFIG_SUN8I_R_CCU=y +CONFIG_CLK_TEGRA_BPMP=y +CONFIG_CLK_UNIPHIER=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y +# CONFIG_HWSPINLOCK_SPRD is not set + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_PROBE=y +CONFIG_CLKSRC_MMIO=y +CONFIG_DW_APB_TIMER=y +CONFIG_DW_APB_TIMER_OF=y +CONFIG_ROCKCHIP_TIMER=y +CONFIG_ARM_ARCH_TIMER=y +# CONFIG_ARM_ARCH_TIMER_EVTSTREAM is not set +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y +# CONFIG_FSL_ERRATUM_A008585 is not set +CONFIG_HISILICON_ERRATUM_161010101=y +CONFIG_ARM64_ERRATUM_858921=y +CONFIG_SUN50I_ERRATUM_UNKNOWN1=y +CONFIG_ARM_TIMER_SP804=y +CONFIG_MTK_TIMER=y +CONFIG_SPRD_TIMER=y +CONFIG_CLKSRC_VERSATILE=y +CONFIG_MAILBOX=y +CONFIG_ARM_MHU=y +CONFIG_PLATFORM_MHU=y +# CONFIG_PL320_MBOX is not set +# CONFIG_ROCKCHIP_MBOX is not set +# CONFIG_ALTERA_MBOX is not set +CONFIG_BCM2835_MBOX=y +CONFIG_HI3660_MBOX=y +CONFIG_HI6220_MBOX=y +# CONFIG_MAILBOX_TEST is not set +CONFIG_QCOM_APCS_IPC=y +CONFIG_TEGRA_HSP_MBOX=y +# CONFIG_XGENE_SLIMPRO_MBOX is not set +# CONFIG_BCM_PDC_MBOX is not set +CONFIG_BCM_FLEXRM_MBOX=m +# CONFIG_MTK_CMDQ_MBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_IOMMU_IOVA=y +CONFIG_OF_IOMMU=y +CONFIG_IOMMU_DMA=y +CONFIG_ROCKCHIP_IOMMU=y +CONFIG_TEGRA_IOMMU_SMMU=y +# CONFIG_EXYNOS_IOMMU is not set +# CONFIG_IPMMU_VMSA is not set +CONFIG_ARM_SMMU=y +CONFIG_ARM_SMMU_V3=y +# CONFIG_MTK_IOMMU is not set +CONFIG_QCOM_IOMMU=y + +# +# Remoteproc drivers +# +CONFIG_REMOTEPROC=y +CONFIG_PHYTIUM_REMOTEPROC=y + +# +# Rpmsg drivers +# +CONFIG_RPMSG=y +# CONFIG_RPMSG_CHAR is not set +CONFIG_RPMSG_QCOM_GLINK_NATIVE=y +CONFIG_RPMSG_QCOM_GLINK_RPM=y +# CONFIG_RPMSG_QCOM_GLINK_SMEM is not set +CONFIG_RPMSG_QCOM_SMD=y +# CONFIG_RPMSG_VIRTIO is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +CONFIG_MESON_GX_SOCINFO=y +CONFIG_MESON_GX_PM_DOMAINS=y +CONFIG_MESON_MX_SOCINFO=y + +# +# Broadcom SoC drivers +# +CONFIG_RASPBERRYPI_POWER=y +CONFIG_SOC_BRCMSTB=y +CONFIG_BRCMSTB_PM=y + +# +# NXP/Freescale QorIQ SoC drivers +# +# CONFIG_FSL_DPAA is not set +CONFIG_FSL_GUTS=y + +# +# i.MX SoC drivers +# + +# +# MediaTek SoC drivers +# +CONFIG_MTK_INFRACFG=y +# CONFIG_MTK_PMIC_WRAP is not set +CONFIG_MTK_SCPSYS=y + +# +# Qualcomm SoC drivers +# +# CONFIG_QCOM_COMMAND_DB is not set +# CONFIG_QCOM_GENI_SE is not set +# CONFIG_QCOM_GSBI is not set +# CONFIG_QCOM_LLCC is not set +CONFIG_QCOM_MDT_LOADER=m +# CONFIG_QCOM_RMTFS_MEM is not set +# CONFIG_QCOM_RPMH is not set +CONFIG_QCOM_SMEM=y +CONFIG_QCOM_SMD_RPM=y +CONFIG_QCOM_SMEM_STATE=y +CONFIG_QCOM_SMP2P=y +CONFIG_QCOM_SMSM=y +# CONFIG_QCOM_WCNSS_CTRL is not set +# CONFIG_QCOM_APR is not set +CONFIG_SOC_RENESAS=y +CONFIG_SYSC_R8A7795=y +CONFIG_SYSC_R8A7796=y +CONFIG_SYSC_R8A77965=y +CONFIG_SYSC_R8A77970=y +CONFIG_SYSC_R8A77980=y +CONFIG_SYSC_R8A77990=y +CONFIG_SYSC_R8A77995=y +CONFIG_RST_RCAR=y +CONFIG_SYSC_RCAR=y +CONFIG_ROCKCHIP_GRF=y +CONFIG_ROCKCHIP_PM_DOMAINS=y +CONFIG_SOC_SAMSUNG=y +CONFIG_EXYNOS_PMU=y +CONFIG_EXYNOS_PM_DOMAINS=y +CONFIG_SUNXI_SRAM=y +CONFIG_ARCH_TEGRA_132_SOC=y +CONFIG_ARCH_TEGRA_210_SOC=y +CONFIG_ARCH_TEGRA_186_SOC=y +CONFIG_ARCH_TEGRA_194_SOC=y +CONFIG_SOC_TEGRA_FUSE=y +CONFIG_SOC_TEGRA_FLOWCTRL=y +CONFIG_SOC_TEGRA_PMC=y +CONFIG_SOC_TEGRA_POWERGATE_BPMP=y +CONFIG_ARCH_K3_AM6_SOC=y +CONFIG_SOC_TI=y + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# CONFIG_SOC_ZTE is not set +CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set +# CONFIG_DEVFREQ_GOV_POWERSAVE is not set +# CONFIG_DEVFREQ_GOV_USERSPACE is not set +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_ARM_EXYNOS_BUS_DEVFREQ is not set +# CONFIG_ARM_RK3399_DMC_DEVFREQ is not set +# CONFIG_PM_DEVFREQ_EVENT is not set +CONFIG_EXTCON=y + +# +# Extcon Device Drivers +# +# CONFIG_EXTCON_ADC_JACK is not set +# CONFIG_EXTCON_GPIO is not set +# CONFIG_EXTCON_MAX3355 is not set +# CONFIG_EXTCON_QCOM_SPMI_MISC is not set +# CONFIG_EXTCON_RT8973A is not set +# CONFIG_EXTCON_SM5502 is not set +CONFIG_EXTCON_USB_GPIO=y +CONFIG_EXTCON_USBC_CROS_EC=y +CONFIG_MEMORY=y +# CONFIG_ARM_PL172_MPMC is not set +CONFIG_TEGRA_MC=y +CONFIG_IIO=y +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_IIO_CROS_EC_ACCEL_LEGACY is not set +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set + +# +# Analog to digital converters +# +# CONFIG_AD7266 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD799X is not set +# CONFIG_AXP20X_ADC is not set +# CONFIG_AXP288_ADC is not set +# CONFIG_BCM_IPROC_ADC is not set +# CONFIG_BERLIN2_ADC is not set +# CONFIG_CC10001_ADC is not set +# CONFIG_ENVELOPE_DETECTOR is not set +CONFIG_EXYNOS_ADC=y +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_MEDIATEK_MT6577_AUXADC is not set +CONFIG_MESON_SARADC=y +# CONFIG_NAU7802 is not set +# CONFIG_QCOM_SPMI_IADC is not set +# CONFIG_QCOM_SPMI_VADC is not set +CONFIG_ROCKCHIP_SARADC=m +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_VF610_ADC is not set + +# +# Analog Front Ends +# +# CONFIG_IIO_RESCALE is not set + +# +# Amplifiers +# +# CONFIG_AD8366 is not set + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_VZ89X is not set +CONFIG_IIO_CROS_EC_SENSORS_CORE=m +CONFIG_IIO_CROS_EC_SENSORS=m + +# +# Hid Sensor IIO Common +# + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set + +# +# Counters +# + +# +# Digital to analog converters +# +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2632 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_VF610_DAC is not set + +# +# IIO dummy driver +# + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set + +# +# Light sensors +# +# CONFIG_ADJD_S311 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +CONFIG_IIO_CROS_EC_LIGHT_PROX=m +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +# CONFIG_JSA1212 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_OPT3001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set + +# +# Inclinometer sensors +# + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set + +# +# Digital potentiometers +# +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_TPL0102 is not set + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +CONFIG_IIO_CROS_EC_BARO=m +# CONFIG_HP03 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set + +# +# Proximity and distance sensors +# +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set + +# +# Resolver to digital converters +# +# CONFIG_AD2S1200 is not set + +# +# Temperature sensors +# +# CONFIG_MAXIM_THERMOCOUPLE is not set +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +CONFIG_PWM_BCM_IPROC=y +CONFIG_PWM_BCM2835=m +# CONFIG_PWM_BERLIN is not set +# CONFIG_PWM_BRCMSTB is not set +CONFIG_PWM_CROS_EC=m +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_HIBVT is not set +CONFIG_PWM_MESON=m +# CONFIG_PWM_MTK_DISP is not set +# CONFIG_PWM_MEDIATEK is not set +# CONFIG_PWM_PCA9685 is not set +CONFIG_PWM_RCAR=m +# CONFIG_PWM_RENESAS_TPU is not set +CONFIG_PWM_ROCKCHIP=y +CONFIG_PWM_SAMSUNG=y +# CONFIG_PWM_SUN4I is not set +CONFIG_PWM_TEGRA=m +# CONFIG_PWM_ZX is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_MAX_NR=1 +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +CONFIG_ALPINE_MSI=y +CONFIG_BRCMSTB_L2_IRQ=y +CONFIG_DW_APB_ICTL=y +CONFIG_HISILICON_IRQ_MBIGEN=y +CONFIG_RENESAS_IRQC=y +CONFIG_MVEBU_GICP=y +CONFIG_MVEBU_ICU=y +CONFIG_MVEBU_ODMI=y +CONFIG_MVEBU_PIC=y +CONFIG_LS_SCFG_MSI=y +CONFIG_PARTITION_PERCPU=y +CONFIG_IRQ_UNIPHIER_AIDET=y +CONFIG_MESON_IRQ_GPIO=y +# CONFIG_QCOM_PDC is not set +CONFIG_PHYTIUM_IXIC=y +# CONFIG_IPACK_BUS is not set +CONFIG_ARCH_HAS_RESET_CONTROLLER=y +CONFIG_RESET_CONTROLLER=y +CONFIG_RESET_BERLIN=y +CONFIG_RESET_MESON=y +# CONFIG_RESET_MESON_AUDIO_ARB is not set +# CONFIG_RESET_QCOM_AOSS is not set +CONFIG_RESET_SIMPLE=y +CONFIG_RESET_SUNXI=y +# CONFIG_RESET_TI_SYSCON is not set +CONFIG_RESET_UNIPHIER=y +CONFIG_RESET_UNIPHIER_USB3=y +CONFIG_COMMON_RESET_HI3660=y +CONFIG_COMMON_RESET_HI6220=y +CONFIG_RESET_TEGRA_BPMP=y +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +CONFIG_PHY_XGENE=y +CONFIG_PHY_SUN4I_USB=y +# CONFIG_PHY_SUN9I_USB is not set +CONFIG_PHY_MESON8B_USB2=y +CONFIG_PHY_MESON_GXL_USB2=y +CONFIG_PHY_MESON_GXL_USB3=y +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_BCM_NS_USB2 is not set +# CONFIG_PHY_BCM_NS_USB3 is not set +CONFIG_PHY_NS2_PCIE=y +CONFIG_PHY_NS2_USB_DRD=y +CONFIG_PHY_BRCM_SATA=y +CONFIG_PHY_BRCM_USB=y +CONFIG_PHY_BCM_SR_PCIE=y +CONFIG_PHY_HI6220_USB=y +CONFIG_PHY_HISTB_COMBPHY=y +CONFIG_PHY_HISI_INNO_USB2=y +# CONFIG_PHY_BERLIN_SATA is not set +# CONFIG_PHY_BERLIN_USB is not set +CONFIG_PHY_MVEBU_CP110_COMPHY=y +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_MTK_TPHY is not set +# CONFIG_PHY_MTK_XSPHY is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_QCOM_APQ8064_SATA is not set +# CONFIG_PHY_QCOM_IPQ806X_SATA is not set +CONFIG_PHY_QCOM_QMP=m +# CONFIG_PHY_QCOM_QUSB2 is not set +CONFIG_PHY_QCOM_UFS=m +CONFIG_PHY_QCOM_USB_HS=y +# CONFIG_PHY_QCOM_USB_HSIC is not set +# CONFIG_PHY_RCAR_GEN2 is not set +# CONFIG_PHY_RCAR_GEN3_PCIE is not set +CONFIG_PHY_RCAR_GEN3_USB2=y +CONFIG_PHY_RCAR_GEN3_USB3=m +# CONFIG_PHY_ROCKCHIP_DP is not set +CONFIG_PHY_ROCKCHIP_EMMC=y +CONFIG_PHY_ROCKCHIP_INNO_USB2=y +CONFIG_PHY_ROCKCHIP_PCIE=m +CONFIG_PHY_ROCKCHIP_TYPEC=y +# CONFIG_PHY_ROCKCHIP_USB is not set +CONFIG_PHY_EXYNOS_DP_VIDEO=y +CONFIG_PHY_EXYNOS_MIPI_VIDEO=y +# CONFIG_PHY_EXYNOS_PCIE is not set +CONFIG_PHY_SAMSUNG_USB2=y +CONFIG_PHY_EXYNOS5_USBDRD=y +CONFIG_PHY_TEGRA_XUSB=y +# CONFIG_PHY_TUSB1210 is not set +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_ARM_CCI_PMU is not set +# CONFIG_ARM_CCN is not set +CONFIG_ARM_PMU=y +# CONFIG_ARM_DSU_PMU is not set +# CONFIG_XGENE_PMU is not set +# CONFIG_ARM_SPE_PMU is not set +CONFIG_RAS=y + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder" +# CONFIG_ANDROID_BINDER_IPC_SELFTEST is not set +# CONFIG_LIBNVDIMM is not set +# CONFIG_DAX is not set +CONFIG_NVMEM=y +# CONFIG_MTK_EFUSE is not set +CONFIG_QCOM_QFPROM=y +CONFIG_ROCKCHIP_EFUSE=y +CONFIG_NVMEM_BCM_OCOTP=y +# CONFIG_NVMEM_SUNXI_SID is not set +CONFIG_UNIPHIER_EFUSE=y +CONFIG_MESON_EFUSE=m +# CONFIG_MESON_MX_EFUSE is not set + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_TEE=y + +# +# TEE drivers +# +CONFIG_OPTEE=y +CONFIG_OPTEE_SHM_NUM_PRIV_PAGES=1 +CONFIG_PM_OPP=y +# CONFIG_SIOX is not set +CONFIG_DRIVERS_HDF=y +CONFIG_HDF_SUPPORT_LEVEL=2 +CONFIG_DRIVERS_HDF_PLATFORM=y +# CONFIG_DRIVERS_HDF_PLATFORM_MIPI_DSI is not set +# CONFIG_DRIVERS_HDF_PLATFORM_MIPI_CSI is not set +CONFIG_DRIVERS_HDF_PLATFORM_GPIO=y +CONFIG_DRIVERS_HDF_PLATFORM_I2C=y +# CONFIG_DRIVERS_HDF_PLATFORM_WATCHDOG is not set +# CONFIG_DRIVERS_HDF_PLATFORM_PWM is not set +# CONFIG_DRIVERS_HDF_PLATFORM_UART is not set +CONFIG_DRIVERS_HDF_PLATFORM_SDIO=y +# CONFIG_DRIVERS_HDF_PLATFORM_EMMC is not set +CONFIG_DRIVERS_HDF_PLATFORM_MMC=y +# CONFIG_DRIVERS_HDF_PLATFORM_SPI is not set +# CONFIG_DRIVERS_HDF_PLATFORM_RTC is not set +# CONFIG_PWM_HI35XX is not set +# CONFIG_DRIVERS_HDF_PLATFORM_REGULATOR is not set +# CONFIG_DRIVERS_HDF_TEST is not set +# CONFIG_DRIVERS_HDF_DISP is not set +# CONFIG_DRIVERS_HDF_INPUT is not set +# CONFIG_DRIVERS_HDF_WIFI is not set +# CONFIG_DRIVERS_HDF_NETDEV_EXT is not set +# CONFIG_DRIVERS_HDF_BT is not set +# CONFIG_DRIVERS_HDF_SENSOR is not set +CONFIG_DRIVERS_HDF_STORAGE=y +# CONFIG_DRIVERS_HDF_USB_PNP_NOTIFY is not set +# CONFIG_DRIVERS_HDF_USB_F_GENERIC is not set +CONFIG_DRIVERS_HDF_AUDIO=y +# CONFIG_DRIVERS_HDF_AUDIO_HI3516CODEC is not set +# CONFIG_DRIVERS_HDF_AUDIO_RK3568 is not set +# CONFIG_DRIVERS_HDF_AUDIO_TEST is not set +# CONFIG_DRIVERS_HDF_VIBRATOR is not set +# CONFIG_DRIVERS_HDF_DSOFTBUS is not set +# CONFIG_DRIVERS_HDF_LIGHT is not set +# CONFIG_SLIMBUS is not set +CONFIG_ACCESS_TOKENID=y + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_FS_IOMAP=y +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +CONFIG_EXT3_FS=y +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_ENCRYPTION is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_FS_DAX is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +# CONFIG_QUOTA_NETLINK_INTERFACE is not set +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +# CONFIG_QFMT_V1 is not set +# CONFIG_QFMT_V2 is not set +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_OVERLAY_FS=m +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +# CONFIG_OVERLAY_FS_INDEX is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_FAT_DEFAULT_UTF8 is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +# CONFIG_PROC_KCORE is not set +CONFIG_PROC_VMCORE=y +# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_CHILDREN is not set +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +# CONFIG_SQUASHFS_XATTR is not set +# CONFIG_SQUASHFS_ZLIB is not set +# CONFIG_SQUASHFS_LZ4 is not set +# CONFIG_SQUASHFS_LZO is not set +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_RAM is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V2=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V3_ACL is not set +CONFIG_NFS_V4=y +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=y +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_ROOT_NFS=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +# CONFIG_NFSD is not set +CONFIG_GRACE_PERIOD=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_SUNRPC_BACKCHANNEL=y +# CONFIG_SUNRPC_DEBUG is not set +# CONFIG_CEPH_FS is not set +# CONFIG_CIFS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_9P_FS=y +# CONFIG_9P_FS_POSIX_ACL is not set +# CONFIG_9P_FS_SECURITY is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +# CONFIG_NLS_MAC_CYRILLIC is not set +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +# CONFIG_NLS_UTF8 is not set +# CONFIG_DLM is not set + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_BIG_KEYS is not set +# CONFIG_TRUSTED_KEYS is not set +# CONFIG_ENCRYPTED_KEYS is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +# CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_YAMA is not set +CONFIG_INTEGRITY=y +# CONFIG_INTEGRITY_SIGNATURE is not set +CONFIG_INTEGRITY_AUDIT=y +# CONFIG_IMA is not set +# CONFIG_EVM is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA=y +CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA=y +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_RSA=y +# CONFIG_CRYPTO_DH is not set +CONFIG_CRYPTO_ECDH=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_USER is not set +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=y +# CONFIG_CRYPTO_MCRYPTD is not set +CONFIG_CRYPTO_AUTHENC=m +# CONFIG_CRYPTO_TEST is not set +CONFIG_CRYPTO_SIMD=y +CONFIG_CRYPTO_ENGINE=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128L is not set +# CONFIG_CRYPTO_AEGIS256 is not set +# CONFIG_CRYPTO_MORUS640 is not set +# CONFIG_CRYPTO_MORUS1280 is not set +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=y + +# +# Block modes +# +# CONFIG_CRYPTO_CBC is not set +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=m +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=y +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=y +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=m +# CONFIG_CRYPTO_POLY1305 is not set +# CONFIG_CRYPTO_MD4 is not set +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=m +CONFIG_CRYPTO_SM3=m +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +CONFIG_CRYPTO_CHACHA20=m +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_SM4 is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_LZO is not set +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +# CONFIG_CRYPTO_USER_API_RNG is not set +# CONFIG_CRYPTO_USER_API_AEAD is not set +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_MARVELL_CESA is not set +# CONFIG_CRYPTO_DEV_FSL_CAAM is not set +# CONFIG_CRYPTO_DEV_EXYNOS_RNG is not set +# CONFIG_CRYPTO_DEV_S5P is not set +# CONFIG_CRYPTO_DEV_CCP is not set +# CONFIG_CAVIUM_CPT is not set +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +# CONFIG_CRYPTO_DEV_CAVIUM_ZIP is not set +# CONFIG_CRYPTO_DEV_QCE is not set +# CONFIG_CRYPTO_DEV_QCOM_RNG is not set +# CONFIG_CRYPTO_DEV_ROCKCHIP is not set +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_CRYPTO_DEV_BCM_SPU=m +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +# CONFIG_CRYPTO_DEV_HISI_SEC is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +# CONFIG_SIGNED_PE_FILE_VERIFICATION is not set + +# +# Certificates for signature checking +# +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_RATIONAL=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_INDIRECT_PIO=y +# CONFIG_CRC_CCITT is not set +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC64 is not set +# CONFIG_CRC4 is not set +CONFIG_CRC7=y +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_XXHASH=m +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=m +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_DMA_DIRECT_OPS=y +CONFIG_SWIOTLB=y +CONFIG_SGL_ALLOC=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +# CONFIG_CORDIC is not set +# CONFIG_DDR is not set +# CONFIG_IRQ_POLL is not set +CONFIG_MPILIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=7 +# CONFIG_BOOT_PRINTK_DELAY is not set +CONFIG_DYNAMIC_DEBUG=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +CONFIG_SLUB_DEBUG_ON=y +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +CONFIG_DEBUG_VM=y +# CONFIG_DEBUG_VM_VMACACHE is not set +# CONFIG_DEBUG_VM_RB is not set +# CONFIG_DEBUG_VM_PGFLAGS is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +# CONFIG_KASAN is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +# CONFIG_SOFTLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHED_INFO=y +# CONFIG_SCHEDSTATS is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +CONFIG_DEBUG_MUTEXES=y +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=21 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_DMA_API_DEBUG is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +CONFIG_MEMTEST=y +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set +# CONFIG_ARM64_PTDUMP_DEBUGFS is not set +# CONFIG_PID_IN_CONTEXTIDR is not set +# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set +# CONFIG_DEBUG_WX is not set +# CONFIG_DEBUG_ALIGN_RODATA is not set +# CONFIG_ARM64_RELOC_TEST is not set +# CONFIG_CORESIGHT is not set + +# +# phytium audio snd config +# +CONFIG_SND_HDA=y +CONFIG_SND_HDA_PHYTIUM=y +CONFIG_SND_HDA_CODEC_REALTEK=y +CONFIG_SND_HDA_GENERIC=y +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +CONFIG_SND_HDA_CORE=y +CONFIG_SND_SOC_PHYTIUM_I2S=y +CONFIG_SND_PMDK_ES8388=y +CONFIG_SND_PMDK_ES8336=y +CONFIG_SND_PMDK_DP=y +CONFIG_SND_SOC_ES8336=y +CONFIG_SND_SOC_ES8388=y diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 1a4f8b67bbe8..53871c8aa344 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -45,6 +45,7 @@ CONFIG_ARCH_HISI=y CONFIG_ARCH_MEDIATEK=y CONFIG_ARCH_MESON=y CONFIG_ARCH_MVEBU=y +CONFIG_ARCH_PHYTIUM=y CONFIG_ARCH_QCOM=y CONFIG_ARCH_ROCKCHIP=y CONFIG_ARCH_SEATTLE=y @@ -234,7 +235,8 @@ CONFIG_SMC91X=y CONFIG_SMSC911X=y CONFIG_SNI_AVE=y CONFIG_SNI_NETSEC=y -CONFIG_STMMAC_ETH=m +CONFIG_STMMAC_ETH=y +CONFIG_STMMAC_PLATFORM=y CONFIG_MDIO_BUS_MUX_MMIOREG=y CONFIG_AT803X_PHY=m CONFIG_MARVELL_PHY=m @@ -322,6 +324,8 @@ CONFIG_SPI_BCM2835AUX=m CONFIG_SPI_MESON_SPICC=m CONFIG_SPI_MESON_SPIFC=m CONFIG_SPI_ORION=y +CONFIG_SPI_PHYTIUM=y +CONFIG_SPI_PHYTIUM_QUADSPI=y CONFIG_SPI_PL022=y CONFIG_SPI_ROCKCHIP=y CONFIG_SPI_QUP=y @@ -347,6 +351,7 @@ CONFIG_GPIO_XGENE_SB=y CONFIG_GPIO_PCA953X=y CONFIG_GPIO_PCA953X_IRQ=y CONFIG_GPIO_MAX77620=y +CONFIG_GPIO_PHYTIUM=y CONFIG_POWER_AVS=y CONFIG_ROCKCHIP_IODOMAIN=y CONFIG_POWER_RESET_MSM=y @@ -477,6 +482,7 @@ CONFIG_SND_SOC_RT5514_SPI=m CONFIG_SND_SOC_RT5645=m CONFIG_SND_SIMPLE_CARD=m CONFIG_SND_AUDIO_GRAPH_CARD=m +CONFIG_SND_HDA_PHYTIUM=m CONFIG_I2C_HID=m CONFIG_USB=y CONFIG_USB_OTG=y @@ -529,6 +535,7 @@ CONFIG_MMC_DW_ROCKCHIP=y CONFIG_MMC_SUNXI=y CONFIG_MMC_BCM2835=y CONFIG_MMC_SDHCI_XENON=y +CONFIG_MMC_PHYTIUM_SDCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y @@ -554,6 +561,7 @@ CONFIG_RTC_DRV_SUN6I=y CONFIG_RTC_DRV_ARMADA38X=y CONFIG_RTC_DRV_TEGRA=y CONFIG_RTC_DRV_XGENE=y +CONFIG_RTC_DRV_PHYTIUM=y CONFIG_DMADEVICES=y CONFIG_DMA_BCM2835=m CONFIG_K3_DMA=y @@ -589,6 +597,7 @@ CONFIG_HWSPINLOCK_QCOM=y CONFIG_ARM_MHU=y CONFIG_PLATFORM_MHU=y CONFIG_BCM2835_MBOX=y +CONFIG_PHYTIUM_MBOX=y CONFIG_QCOM_APCS_IPC=y CONFIG_ROCKCHIP_IOMMU=y CONFIG_TEGRA_IOMMU_SMMU=y @@ -712,3 +721,5 @@ CONFIG_CRYPTO_AES_ARM64_CE_CCM=y CONFIG_CRYPTO_AES_ARM64_CE_BLK=y CONFIG_CRYPTO_CHACHA20_NEON=m CONFIG_CRYPTO_AES_ARM64_BS=m +CONFIG_CAN=y +CONFIG_CAN_PHYTIUM=y diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index df8fe8ecc37e..2cb65d4e509f 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -54,7 +54,8 @@ #define ARM64_WORKAROUND_1463225 33 #define ARM64_SSBS 34 #define ARM64_WORKAROUND_1542419 35 +#define ARM64_HAS_CRC32 36 -#define ARM64_NCAPS 36 +#define ARM64_NCAPS 37 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 3cd936b1c79c..4ef7a35047be 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -69,6 +69,7 @@ #define ARM_CPU_IMP_QCOM 0x51 #define ARM_CPU_IMP_NVIDIA 0x4E #define ARM_CPU_IMP_HISI 0x48 +#define ARM_CPU_IMP_PHYTIUM 0x70 #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 @@ -98,6 +99,8 @@ #define NVIDIA_CPU_PART_DENVER 0x003 #define NVIDIA_CPU_PART_CARMEL 0x004 +#define PHYTIUM_CPU_PART_FTC662 0x662 + #define HISI_CPU_PART_TSV110 0xD01 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) @@ -119,6 +122,7 @@ #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER) #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) +#define MIDR_PHYTIUM_FT2000PLUS MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_FTC662) #define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index de6fa9b4abfa..44ca0fe0f3f0 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1332,6 +1332,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_ssbs, }, #endif + { + .desc = "CRC32 instructions", + .capability = ARM64_HAS_CRC32, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64ISAR0_EL1, + .field_pos = ID_AA64ISAR0_CRC32_SHIFT, + .min_field_value = 1, + }, {}, }; diff --git a/arch/arm64/kernel/hibernate.c.rej b/arch/arm64/kernel/hibernate.c.rej new file mode 100644 index 000000000000..456ba4ce72d8 --- /dev/null +++ b/arch/arm64/kernel/hibernate.c.rej @@ -0,0 +1,10 @@ +diff a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c (rejected hunks) +@@ -214,7 +214,7 @@ static int create_safe_exec_page(void *src_start, size_t length, + } + + memcpy((void *)dst, src_start, length); +- flush_icache_range(dst, dst + length); ++ __flush_icache_range(dst, dst + length); + + pgdp = pgd_offset_raw(allocator(mask), dst_addr); + if (pgd_none(READ_ONCE(*pgdp))) { diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 5df2d611b77d..69ff9887f724 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -25,3 +25,5 @@ KCOV_INSTRUMENT_atomic_ll_sc.o := n UBSAN_SANITIZE_atomic_ll_sc.o := n lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o + +obj-$(CONFIG_CRC32) += crc32.o diff --git a/arch/arm64/lib/crc32.S b/arch/arm64/lib/crc32.S new file mode 100644 index 000000000000..5bc1e85b4e1c --- /dev/null +++ b/arch/arm64/lib/crc32.S @@ -0,0 +1,60 @@ +/* + * Accelerated CRC32(C) using AArch64 CRC instructions + * + * Copyright (C) 2016 - 2018 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + + .cpu generic+crc + + .macro __crc32, c +0: subs x2, x2, #16 + b.mi 8f + ldp x3, x4, [x1], #16 +CPU_BE( rev x3, x3 ) +CPU_BE( rev x4, x4 ) + crc32\c\()x w0, w0, x3 + crc32\c\()x w0, w0, x4 + b.ne 0b + ret + +8: tbz x2, #3, 4f + ldr x3, [x1], #8 +CPU_BE( rev x3, x3 ) + crc32\c\()x w0, w0, x3 +4: tbz x2, #2, 2f + ldr w3, [x1], #4 +CPU_BE( rev w3, w3 ) + crc32\c\()w w0, w0, w3 +2: tbz x2, #1, 1f + ldrh w3, [x1], #2 +CPU_BE( rev16 w3, w3 ) + crc32\c\()h w0, w0, w3 +1: tbz x2, #0, 0f + ldrb w3, [x1] + crc32\c\()b w0, w0, w3 +0: ret + .endm + + .align 5 +ENTRY(crc32_le) +alternative_if_not ARM64_HAS_CRC32 + b crc32_le_base +alternative_else_nop_endif + __crc32 +ENDPROC(crc32_le) + + .align 5 +ENTRY(__crc32c_le) +alternative_if_not ARM64_HAS_CRC32 + b __crc32c_le_base +alternative_else_nop_endif + __crc32 c +ENDPROC(__crc32c_le) diff --git a/block/bio.c b/block/bio.c index 2b4a505d2ee4..3d757055305f 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1528,7 +1528,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, if (bytes > len) bytes = len; - page = alloc_page(q->bounce_gfp | __GFP_ZERO | gfp_mask); + page = alloc_page(q->bounce_gfp | gfp_mask); if (!page) goto cleanup; diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 2664452fa112..bed74949d039 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -162,6 +162,12 @@ static const struct apd_device_desc hip08_i2c_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 250000000, }; + +static const struct apd_device_desc phytium_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 200000000, +}; + static const struct apd_device_desc thunderx2_i2c_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 125000000, @@ -234,6 +240,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "CAV9007", APD_ADDR(thunderx2_i2c_desc) }, { "HISI02A1", APD_ADDR(hip07_i2c_desc) }, { "HISI02A2", APD_ADDR(hip08_i2c_desc) }, + { "PHYT0003", APD_ADDR(phytium_i2c_desc) }, #endif { } }; diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index f59d0b9e2683..6427dac45ec0 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -89,6 +89,18 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent); acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context); void acpi_scan_table_handler(u32 event, void *table, void *context); +#ifdef CONFIG_ACPI_GENERIC_GSI +int acpi_register_irq(struct device *dev, u32 hwirq, int trigger, + int polarity, struct fwnode_handle *fwnode); +#else +static inline +int acpi_register_irq(struct device *dev, u32 hwirq, int trigger, + int polarity, struct fwnode_handle *fwnode) +{ + return acpi_register_gsi(dev, hwirq, trigger, polarity); +} +#endif + /* -------------------------------------------------------------------------- Device Node Initialization / Removal -------------------------------------------------------------------------- */ diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c index 7c352cba0528..fc9b52ea4ad5 100644 --- a/drivers/acpi/irq.c +++ b/drivers/acpi/irq.c @@ -13,6 +13,8 @@ #include #include +#include "internal.h" + enum acpi_irq_model_id acpi_irq_model; static struct fwnode_handle *acpi_gsi_domain_id; @@ -41,6 +43,24 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) } EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); +int acpi_register_irq(struct device *dev, u32 hwirq, int trigger, + int polarity, struct fwnode_handle *fwnode) +{ + struct irq_fwspec fwspec; + + if (!fwnode) { + dev_warn(dev, "No registered irqchip for hwirq %d\n", hwirq); + return -EINVAL; + } + + fwspec.fwnode = fwnode; + fwspec.param[0] = hwirq; + fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity); + fwspec.param_count = 2; + + return irq_create_fwspec_mapping(&fwspec); +} + /** * acpi_register_gsi() - Map a GSI to a linux IRQ number * @dev: device for which IRQ has to be mapped @@ -54,19 +74,7 @@ EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) { - struct irq_fwspec fwspec; - - if (WARN_ON(!acpi_gsi_domain_id)) { - pr_warn("GSI: No registered irqchip, giving up\n"); - return -EINVAL; - } - - fwspec.fwnode = acpi_gsi_domain_id; - fwspec.param[0] = gsi; - fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity); - fwspec.param_count = 2; - - return irq_create_fwspec_mapping(&fwspec); + return acpi_register_irq(dev, gsi, trigger, polarity, acpi_gsi_domain_id); } EXPORT_SYMBOL_GPL(acpi_register_gsi); @@ -95,7 +103,7 @@ EXPORT_SYMBOL_GPL(acpi_unregister_gsi); * Return: * The referenced device fwhandle or NULL on failure */ -static struct fwnode_handle * +struct fwnode_handle * acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source) { struct fwnode_handle *result; @@ -295,3 +303,29 @@ void __init acpi_set_irq_model(enum acpi_irq_model_id model, acpi_irq_model = model; acpi_gsi_domain_id = fwnode; } + +/** + * acpi_irq_create_hierarchy - Create a hierarchical IRQ domain with the default + * GSI domain as its parent. + * @flags: Irq domain flags associated with the domain + * @size: Size of the domain. + * @fwnode: Optional fwnode of the interrupt controller + * @ops: Pointer to the interrupt domain callbacks + * @host_data: Controller private data pointer + */ +struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, + unsigned int size, + struct fwnode_handle *fwnode, + const struct irq_domain_ops *ops, + void *host_data) +{ + struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id, + DOMAIN_BUS_ANY); + + if (!d) + return NULL; + + return irq_domain_create_hierarchy(d, flags, size, fwnode, ops, + host_data); +} +EXPORT_SYMBOL_GPL(acpi_irq_create_hierarchy); diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 94ded9513c73..e926cddd9eba 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -35,6 +35,8 @@ #include #include +#include "internal.h" + #define PREFIX "ACPI: " #define _COMPONENT ACPI_PCI_COMPONENT @@ -423,6 +425,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) char *link = NULL; char link_desc[16]; int rc; + struct fwnode_handle *rs_fwnode; pin = dev->pin; if (!pin) { @@ -451,7 +454,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev) gsi = acpi_pci_link_allocate_irq(entry->link, entry->index, &triggering, &polarity, - &link); + &link, + &rs_fwnode); else gsi = entry->index; } else @@ -475,7 +479,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) return 0; } - rc = acpi_register_gsi(&dev->dev, gsi, triggering, polarity); + rc = acpi_register_irq(&dev->dev, gsi, triggering, polarity, rs_fwnode); if (rc < 0) { dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n", pin_name(pin)); diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index d5eec352a6e1..14010783eece 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -74,6 +74,7 @@ struct acpi_pci_link_irq { u8 resource_type; u8 possible_count; u32 possible[ACPI_PCI_LINK_MAX_POSSIBLE]; + struct acpi_resource_source resource_source; u8 initialized:1; u8 reserved:7; }; @@ -135,6 +136,8 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource, { struct acpi_resource_extended_irq *p = &resource->data.extended_irq; + struct acpi_resource_source *rs = + &link->irq.resource_source; if (!p || !p->interrupt_count) { printk(KERN_WARNING PREFIX "Blank _PRS EXT IRQ resource\n"); @@ -155,6 +158,12 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource, link->irq.triggering = p->triggering; link->irq.polarity = p->polarity; link->irq.resource_type = ACPI_RESOURCE_TYPE_EXTENDED_IRQ; + if (p->resource_source.string_length) { + rs->index = p->resource_source.index; + rs->string_length = p->resource_source.string_length; + rs->string_ptr = kstrdup(p->resource_source.string_ptr, + GFP_KERNEL); + } break; } default: @@ -341,7 +350,8 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) resource->res.data.irq.sharable = ACPI_SHARED; resource->res.data.extended_irq.interrupt_count = 1; resource->res.data.extended_irq.interrupts[0] = irq; - /* ignore resource_source, it's optional */ + resource->res.data.extended_irq.resource_source = + link->irq.resource_source; break; default: printk(KERN_ERR PREFIX "Invalid Resource_type %d\n", link->irq.resource_type); @@ -627,7 +637,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) * failure: return -1 */ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, - int *polarity, char **name) + int *polarity, char **name, struct fwnode_handle **rs_fwnode) { int result; struct acpi_device *device; @@ -671,6 +681,9 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, *polarity = link->irq.polarity; if (name) *name = acpi_device_bid(link->device); + if (rs_fwnode) + *rs_fwnode = acpi_get_irq_source_fwhandle(&link->irq.resource_source); + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Link %s is referenced\n", acpi_device_bid(link->device))); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index a8180f9090fa..365571146674 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -52,7 +52,8 @@ struct efi __read_mostly efi = { .properties_table = EFI_INVALID_TABLE_ADDR, .mem_attr_table = EFI_INVALID_TABLE_ADDR, .rng_seed = EFI_INVALID_TABLE_ADDR, - .tpm_log = EFI_INVALID_TABLE_ADDR + .tpm_log = EFI_INVALID_TABLE_ADDR, + .mem_reserve = EFI_INVALID_TABLE_ADDR, }; EXPORT_SYMBOL(efi); @@ -489,6 +490,7 @@ static __initdata efi_config_table_type_t common_tables[] = { {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table}, {LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed}, {LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log}, + {LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &efi.mem_reserve}, {NULL_GUID, NULL, NULL}, }; @@ -596,6 +598,41 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, early_memunmap(tbl, sizeof(*tbl)); } + if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { + unsigned long prsv = efi.mem_reserve; + + while (prsv) { + struct linux_efi_memreserve *rsv; + u8 *p; + int i; + + /* + * Just map a full page: that is what we will get + * anyway, and it permits us to map the entire entry + * before knowing its size. + */ + p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE), + PAGE_SIZE); + if (p == NULL) { + pr_err("Could not map UEFI memreserve entry!\n"); + return -ENOMEM; + } + + rsv = (void *)(p + prsv % PAGE_SIZE); + + /* reserve the entry itself */ + memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size)); + + for (i = 0; i < atomic_read(&rsv->count); i++) { + memblock_reserve(rsv->entry[i].base, + rsv->entry[i].size); + } + + prsv = rsv->next; + early_memunmap(p, PAGE_SIZE); + } + } + return 0; } @@ -942,6 +979,109 @@ bool efi_is_table_address(unsigned long phys_addr) return false; } +static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); +static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; + +static int __init efi_memreserve_map_root(void) +{ + if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR) + return -ENODEV; + + efi_memreserve_root = memremap(efi.mem_reserve, + sizeof(*efi_memreserve_root), + MEMREMAP_WB); + if (WARN_ON_ONCE(!efi_memreserve_root)) + return -ENOMEM; + return 0; +} + +static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) +{ + struct resource *res, *parent; + + res = kzalloc(sizeof(struct resource), GFP_ATOMIC); + if (!res) + return -ENOMEM; + + res->name = "reserved"; + res->flags = IORESOURCE_MEM; + res->start = addr; + res->end = addr + size - 1; + + /* we expect a conflict with a 'System RAM' region */ + parent = request_resource_conflict(&iomem_resource, res); + return parent ? request_resource(parent, res) : 0; +} + +int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) +{ + struct linux_efi_memreserve *rsv; + unsigned long prsv; + int rc, index; + + if (efi_memreserve_root == (void *)ULONG_MAX) + return -ENODEV; + + if (!efi_memreserve_root) { + rc = efi_memreserve_map_root(); + if (rc) + return rc; + } + + /* first try to find a slot in an existing linked list entry */ + for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) { + rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); + index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); + if (index < rsv->size) { + rsv->entry[index].base = addr; + rsv->entry[index].size = size; + + memunmap(rsv); + return efi_mem_reserve_iomem(addr, size); + } + memunmap(rsv); + } + + /* no slot found - allocate a new linked list entry */ + rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC); + if (!rsv) + return -ENOMEM; + + rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); + if (rc) { + free_page((unsigned long)rsv); + return rc; + } + + /* + * The memremap() call above assumes that a linux_efi_memreserve entry + * never crosses a page boundary, so let's ensure that this remains true + * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by + * using SZ_4K explicitly in the size calculation below. + */ + rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K); + atomic_set(&rsv->count, 1); + rsv->entry[0].base = addr; + rsv->entry[0].size = size; + + spin_lock(&efi_mem_reserve_persistent_lock); + rsv->next = efi_memreserve_root->next; + efi_memreserve_root->next = __pa(rsv); + spin_unlock(&efi_mem_reserve_persistent_lock); + + return efi_mem_reserve_iomem(addr, size); +} + +static int __init efi_memreserve_root_init(void) +{ + if (efi_memreserve_root) + return 0; + if (efi_memreserve_map_root()) + efi_memreserve_root = (void *)ULONG_MAX; + return 0; +} +early_initcall(efi_memreserve_root_init); + #ifdef CONFIG_KEXEC static int update_efi_random_seed(struct notifier_block *nb, unsigned long code, void *unused) diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index 6c09644d620e..296b3211f689 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -69,6 +69,31 @@ static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg) return si; } +void install_memreserve_table(efi_system_table_t *sys_table_arg) +{ + struct linux_efi_memreserve *rsv; + efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; + efi_status_t status; + + status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), + (void **)&rsv); + if (status != EFI_SUCCESS) { + pr_efi_err(sys_table_arg, "Failed to allocate memreserve entry!\n"); + return; + } + + rsv->next = 0; + rsv->size = 0; + atomic_set(&rsv->count, 0); + + status = efi_call_early(install_configuration_table, + &memreserve_table_guid, + rsv); + if (status != EFI_SUCCESS) + pr_efi_err(sys_table_arg, "Failed to install memreserve config table!\n"); +} + + /* * This function handles the architcture specific differences between arm and * arm64 regarding where the kernel image must be loaded and any memory that @@ -235,6 +260,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, } } + install_memreserve_table(sys_table); + new_fdt_addr = fdt_addr; status = allocate_new_fdt_and_exit_boot(sys_table, handle, &new_fdt_addr, efi_get_max_fdt_addr(dram_base), diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 2c34e9537f9e..6762d4c2ff77 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -79,6 +79,10 @@ config GPIO_GENERIC # put drivers in the right section, in alphabetical order +# This symbol is selected by both MMIO and PCI expanders +config GPIO_PHYTIUM_CORE + tristate + # This symbol is selected by both I2C and SPI expanders config GPIO_MAX730X tristate @@ -404,6 +408,16 @@ config GPIO_OMAP help Say yes here to enable GPIO support for TI OMAP SoCs. +config GPIO_PHYTIUM_PLAT + tristate "Phytium GPIO Platform support" + default y if ARCH_PHYTIUM + depends on ARM64 + select GPIO_PHYTIUM_CORE + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + help + Say yes here to enable GPIO support for Phytium SoCs. + config GPIO_PL061 bool "PrimeCell PL061 GPIO support" depends on ARM_AMBA @@ -1308,6 +1322,18 @@ config GPIO_PCIE_IDIO_24 Input filter control is not supported by this driver, and the input filters are deactivated by this driver. +config GPIO_PHYTIUM_PCI + tristate "Phytium GPIO PCI support" + select GPIO_PHYTIUM_CORE + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + help + Say Y here to support Phytium PCI GPIO controller on X100 chipset. + An interrupt is generated when any of the inputs change state + (low to high or high to low). + + This driver can be used for Phytium X100. + config GPIO_RDC321X tristate "RDC R-321x GPIO support" select MFD_CORE diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index c256aff66a65..7e6462a25135 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -95,6 +95,9 @@ obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o obj-$(CONFIG_GPIO_OCTEON) += gpio-octeon.o obj-$(CONFIG_GPIO_OMAP) += gpio-omap.o +obj-$(CONFIG_GPIO_PHYTIUM_CORE) += gpio-phytium-core.o +obj-$(CONFIG_GPIO_PHYTIUM_PCI) += gpio-phytium-pci.o +obj-$(CONFIG_GPIO_PHYTIUM_PLAT) += gpio-phytium-platform.o obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o obj-$(CONFIG_GPIO_PCH) += gpio-pch.o diff --git a/drivers/gpio/gpio-phytium-core.c b/drivers/gpio/gpio-phytium-core.c new file mode 100644 index 000000000000..7c87b0868206 --- /dev/null +++ b/drivers/gpio/gpio-phytium-core.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019, Phytium Corporation. + * Copyright (c) 2021, Phytium Corporation. + */ + +#include +#include +#include +#include +#include + +#include "gpio-phytium-core.h" + +static int get_pin_location(struct phytium_gpio *gpio, unsigned int offset, + struct pin_loc *pl) +{ + int ret; + + if (offset < gpio->ngpio[0]) { + pl->port = 0; + pl->offset = offset; + ret = 0; + } else if (offset < (gpio->ngpio[0] + gpio->ngpio[1])) { + pl->port = 1; + pl->offset = offset - gpio->ngpio[0]; + ret = 0; + } else { + ret = -EINVAL; + } + + return ret; +} + +static void phytium_gpio_toggle_trigger(struct phytium_gpio *gpio, + unsigned int offset) +{ + struct gpio_chip *gc; + u32 pol; + int val; + + /* Only port A can provide interrupt source */ + if (offset >= gpio->ngpio[0]) + return; + + gc = &gpio->gc; + + pol = readl(gpio->regs + GPIO_INT_POLARITY); + /* Just read the current value right out of the data register */ + val = gc->get(gc, offset); + if (val) + pol &= ~BIT(offset); + else + pol |= BIT(offset); + + writel(pol, gpio->regs + GPIO_INT_POLARITY); +} + +int phytium_gpio_get(struct gpio_chip *gc, unsigned int offset) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + void __iomem *dat; + + if (get_pin_location(gpio, offset, &loc)) + return -EINVAL; + + dat = gpio->regs + GPIO_EXT_PORTA + (loc.port * GPIO_PORT_STRIDE); + + return !!(readl(dat) & BIT(loc.offset)); +} +EXPORT_SYMBOL_GPL(phytium_gpio_get); + +void phytium_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + void __iomem *dr; + unsigned long flags; + u32 mask; + + if (get_pin_location(gpio, offset, &loc)) + return; + dr = gpio->regs + GPIO_SWPORTA_DR + (loc.port * GPIO_PORT_STRIDE); + + raw_spin_lock_irqsave(&gpio->lock, flags); + + if (value) + mask = readl(dr) | BIT(loc.offset); + else + mask = readl(dr) & ~BIT(loc.offset); + + writel(mask, dr); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); +} +EXPORT_SYMBOL_GPL(phytium_gpio_set); + +int phytium_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + unsigned long flags; + void __iomem *ddr; + + if (get_pin_location(gpio, offset, &loc)) + return -EINVAL; + ddr = gpio->regs + GPIO_SWPORTA_DDR + (loc.port * GPIO_PORT_STRIDE); + + raw_spin_lock_irqsave(&gpio->lock, flags); + + writel(readl(ddr) & ~(BIT(loc.offset)), ddr); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_gpio_direction_input); + +int phytium_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, + int value) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + unsigned long flags; + void __iomem *ddr; + + if (get_pin_location(gpio, offset, &loc)) + return -EINVAL; + ddr = gpio->regs + GPIO_SWPORTA_DDR + (loc.port * GPIO_PORT_STRIDE); + + raw_spin_lock_irqsave(&gpio->lock, flags); + + writel(readl(ddr) | BIT(loc.offset), ddr); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + phytium_gpio_set(gc, offset, value); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_gpio_direction_output); + +void phytium_gpio_irq_ack(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + u32 val = BIT(irqd_to_hwirq(d)); + + raw_spin_lock(&gpio->lock); + + writel(val, gpio->regs + GPIO_PORTA_EOI); + + raw_spin_unlock(&gpio->lock); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_ack); + +void phytium_gpio_irq_mask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + u32 val; + + /* Only port A can provide interrupt source */ + if (irqd_to_hwirq(d) >= gpio->ngpio[0]) + return; + + raw_spin_lock(&gpio->lock); + + val = readl(gpio->regs + GPIO_INTMASK); + val |= BIT(irqd_to_hwirq(d)); + writel(val, gpio->regs + GPIO_INTMASK); + + raw_spin_unlock(&gpio->lock); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_mask); + +void phytium_gpio_irq_unmask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + u32 val; + + /* Only port A can provide interrupt source */ + if (irqd_to_hwirq(d) >= gpio->ngpio[0]) + return; + + raw_spin_lock(&gpio->lock); + + val = readl(gpio->regs + GPIO_INTMASK); + val &= ~BIT(irqd_to_hwirq(d)); + writel(val, gpio->regs + GPIO_INTMASK); + + raw_spin_unlock(&gpio->lock); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_unmask); + +int phytium_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + int hwirq = irqd_to_hwirq(d); + unsigned long flags, lvl, pol; + + if (hwirq < 0 || hwirq >= gpio->ngpio[0]) + return -EINVAL; + + if ((flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) && + (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))) { + dev_err(gc->parent, + "trying to configure line %d for both level and edge detection, choose one!\n", + hwirq); + return -EINVAL; + } + + raw_spin_lock_irqsave(&gpio->lock, flags); + + lvl = readl(gpio->regs + GPIO_INTTYPE_LEVEL); + pol = readl(gpio->regs + GPIO_INT_POLARITY); + + switch (flow_type) { + case IRQ_TYPE_EDGE_BOTH: + lvl |= BIT(hwirq); + phytium_gpio_toggle_trigger(gpio, hwirq); + irq_set_handler_locked(d, handle_edge_irq); + dev_dbg(gc->parent, "line %d: IRQ on both edges\n", hwirq); + break; + case IRQ_TYPE_EDGE_RISING: + lvl |= BIT(hwirq); + pol |= BIT(hwirq); + irq_set_handler_locked(d, handle_edge_irq); + dev_dbg(gc->parent, "line %d: IRQ on RISING edge\n", hwirq); + break; + case IRQ_TYPE_EDGE_FALLING: + lvl |= BIT(hwirq); + pol &= ~BIT(hwirq); + irq_set_handler_locked(d, handle_edge_irq); + dev_dbg(gc->parent, "line %d: IRQ on FALLING edge\n", hwirq); + break; + case IRQ_TYPE_LEVEL_HIGH: + lvl &= ~BIT(hwirq); + pol |= BIT(hwirq); + irq_set_handler_locked(d, handle_level_irq); + dev_dbg(gc->parent, "line %d: IRQ on HIGH level\n", hwirq); + break; + case IRQ_TYPE_LEVEL_LOW: + lvl &= ~BIT(hwirq); + pol &= ~BIT(hwirq); + irq_set_handler_locked(d, handle_level_irq); + dev_dbg(gc->parent, "line %d: IRQ on LOW level\n", hwirq); + break; + } + + writel(lvl, gpio->regs + GPIO_INTTYPE_LEVEL); + if (flow_type != IRQ_TYPE_EDGE_BOTH) + writel(pol, gpio->regs + GPIO_INT_POLARITY); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_set_type); + +void phytium_gpio_irq_enable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + unsigned long flags; + u32 val; + + /* Only port A can provide interrupt source */ + if (irqd_to_hwirq(d) >= gpio->ngpio[0]) + return; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + val = readl(gpio->regs + GPIO_INTEN); + val |= BIT(irqd_to_hwirq(d)); + writel(val, gpio->regs + GPIO_INTEN); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_enable); + +void phytium_gpio_irq_disable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + unsigned long flags; + u32 val; + + /* Only port A can provide interrupt source */ + if (irqd_to_hwirq(d) >= gpio->ngpio[0]) + return; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + val = readl(gpio->regs + GPIO_INTEN); + val &= ~BIT(irqd_to_hwirq(d)); + writel(val, gpio->regs + GPIO_INTEN); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_disable); + +void phytium_gpio_irq_handler(struct irq_desc *desc) +{ + struct gpio_chip *gc = irq_desc_get_handler_data(desc); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct irq_chip *irqchip = irq_desc_get_chip(desc); + unsigned long pending; + int offset; + + chained_irq_enter(irqchip, desc); + + pending = readl(gpio->regs + GPIO_INTSTATUS); + if (pending) { + for_each_set_bit(offset, &pending, gpio->ngpio[0]) { + int gpio_irq = irq_find_mapping(gc->irq.domain, + offset); + generic_handle_irq(gpio_irq); + + if ((irq_get_trigger_type(gpio_irq) & + IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) + phytium_gpio_toggle_trigger(gpio, offset); + } + } + + chained_irq_exit(irqchip, desc); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_handler); + +int phytium_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + void __iomem *ddr; + + if (get_pin_location(gpio, offset, &loc)) + return -EINVAL; + ddr = gpio->regs + GPIO_SWPORTA_DDR + (loc.port * GPIO_PORT_STRIDE); + + return !(readl(ddr) & BIT(loc.offset)); +} +EXPORT_SYMBOL_GPL(phytium_gpio_get_direction); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Phytium GPIO Controller core"); diff --git a/drivers/gpio/gpio-phytium-core.h b/drivers/gpio/gpio-phytium-core.h new file mode 100644 index 000000000000..236c13530c4e --- /dev/null +++ b/drivers/gpio/gpio-phytium-core.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021, Phytium Corporation. + */ + +#ifndef _GPIO_PHYTIUM_H +#define _GPIO_PHYTIUM_H + +#include +#include + +#include "gpiolib.h" + +#define GPIO_SWPORTA_DR 0x00 /* WR Port A Output Data Register */ +#define GPIO_SWPORTA_DDR 0x04 /* WR Port A Data Direction Register */ +#define GPIO_EXT_PORTA 0x08 /* RO Port A Input Data Register */ +#define GPIO_SWPORTB_DR 0x0c /* WR Port B Output Data Register */ +#define GPIO_SWPORTB_DDR 0x10 /* WR Port B Data Direction Register */ +#define GPIO_EXT_PORTB 0x14 /* RO Port B Input Data Register */ + +#define GPIO_INTEN 0x18 /* WR Port A Interrput Enable Register */ +#define GPIO_INTMASK 0x1c /* WR Port A Interrupt Mask Register */ +#define GPIO_INTTYPE_LEVEL 0x20 /* WR Port A Interrupt Level Register */ +#define GPIO_INT_POLARITY 0x24 /* WR Port A Interrupt Polarity Register */ +#define GPIO_INTSTATUS 0x28 /* RO Port A Interrupt Status Register */ +#define GPIO_RAW_INTSTATUS 0x2c /* RO Port A Raw Interrupt Status Register */ +#define GPIO_LS_SYNC 0x30 /* WR Level-sensitive Synchronization Enable Register */ +#define GPIO_DEBOUNCE 0x34 /* WR Debounce Enable Register */ +#define GPIO_PORTA_EOI 0x38 /* WO Port A Clear Interrupt Register */ + +#define MAX_NPORTS 2 +#define NGPIO_DEFAULT 8 +#define NGPIO_MAX 32 +#define GPIO_PORT_STRIDE (GPIO_EXT_PORTB - GPIO_EXT_PORTA) + +struct pin_loc { + unsigned int port; + unsigned int offset; +}; + +#ifdef CONFIG_PM_SLEEP +struct phytium_gpio_ctx { + u32 swporta_dr; + u32 swporta_ddr; + u32 ext_porta; + u32 swportb_dr; + u32 swportb_ddr; + u32 ext_portb; + u32 inten; + u32 intmask; + u32 inttype_level; + u32 int_polarity; + u32 intstatus; + u32 raw_intstatus; + u32 ls_sync; + u32 debounce; +}; +#endif + +struct phytium_gpio { + raw_spinlock_t lock; + void __iomem *regs; + struct gpio_chip gc; + unsigned int ngpio[2]; + int irq; +#ifdef CONFIG_PM_SLEEP + struct phytium_gpio_ctx ctx; +#endif +}; + +int phytium_gpio_get(struct gpio_chip *gc, unsigned int offset); +void phytium_gpio_set(struct gpio_chip *gc, unsigned int offset, int value); + +int phytium_gpio_get_direction(struct gpio_chip *gc, unsigned int offset); +int phytium_gpio_direction_input(struct gpio_chip *gc, unsigned int offset); +int phytium_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value); + +void phytium_gpio_irq_ack(struct irq_data *d); +void phytium_gpio_irq_mask(struct irq_data *d); +void phytium_gpio_irq_unmask(struct irq_data *d); +int phytium_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type); +void phytium_gpio_irq_enable(struct irq_data *d); +void phytium_gpio_irq_disable(struct irq_data *d); +void phytium_gpio_irq_handler(struct irq_desc *desc); + +#endif diff --git a/drivers/gpio/gpio-phytium-pci.c b/drivers/gpio/gpio-phytium-pci.c new file mode 100644 index 000000000000..3a6d655d4cd8 --- /dev/null +++ b/drivers/gpio/gpio-phytium-pci.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021, Phytium Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "gpio-phytium-core.h" + +static struct irq_chip phytium_gpio_pci_irqchip = { + .name = "phytium_gpio_pci", + .irq_ack = phytium_gpio_irq_ack, + .irq_mask = phytium_gpio_irq_mask, + .irq_unmask = phytium_gpio_irq_unmask, + .irq_set_type = phytium_gpio_irq_set_type, + .irq_enable = phytium_gpio_irq_enable, + .irq_disable = phytium_gpio_irq_disable, +}; + +static int phytium_gpio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct phytium_gpio *gpio; + int err; + + gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + + pci_set_drvdata(pdev, gpio); + + err = pcim_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device: err %d\n", err); + goto err0; + } + + err = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); + if (err) { + dev_err(dev, "Failed to iomap PCI device: err %d\n", err); + goto err0; + } + + gpio->regs = pcim_iomap_table(pdev)[0]; + if (!gpio->regs) { + dev_err(dev, "Cannot map PCI resource\n"); + err = -ENOMEM; + goto err0; + } + + err = pci_enable_msi(pdev); + if (err < 0) + goto err0; + + gpio->irq = pdev->irq; + if (gpio->irq < 0) + dev_warn(dev, "no irq is found.\n"); + + /* There is only one group of Pins at the moment. */ + gpio->ngpio[0] = NGPIO_MAX; + + /* irq_chip support */ + raw_spin_lock_init(&gpio->lock); + + gpio->gc.base = -1; + gpio->gc.get_direction = phytium_gpio_get_direction; + gpio->gc.direction_input = phytium_gpio_direction_input; + gpio->gc.direction_output = phytium_gpio_direction_output; + gpio->gc.get = phytium_gpio_get; + gpio->gc.set = phytium_gpio_set; + gpio->gc.ngpio = gpio->ngpio[0] + gpio->ngpio[1]; + gpio->gc.label = dev_name(dev); + gpio->gc.parent = dev; + gpio->gc.owner = THIS_MODULE; + + err = gpiochip_add_data(&gpio->gc, gpio); + if (err) { + dev_err(dev, "failed to register gpiochip\n"); + goto err1; + } + + err = gpiochip_irqchip_add(&gpio->gc, &phytium_gpio_pci_irqchip, + 0, handle_bad_irq, IRQ_TYPE_NONE); + if (err) { + dev_info(dev, "could not add irqchip\n"); + goto err1; + } + gpiochip_set_chained_irqchip(&gpio->gc, &phytium_gpio_pci_irqchip, + gpio->irq, + phytium_gpio_irq_handler); + + dev_info(dev, "Phytium PCI GPIO controller @%pa registered\n", + &gpio->regs); + + return 0; + +err1: + gpiochip_remove(&gpio->gc); +err0: + pci_set_drvdata(pdev, NULL); + return err; +} + +static void phytium_gpio_pci_remove(struct pci_dev *pdev) +{ + struct phytium_gpio *gpio = pci_get_drvdata(pdev); + + gpiochip_remove(&gpio->gc); + + pci_set_drvdata(pdev, NULL); +} + +static const struct pci_device_id phytium_gpio_pci_ids[] = { + { PCI_DEVICE(0x1DB7, 0xDC31) }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, phytium_gpio_pci_ids); + +#ifdef CONFIG_PM_SLEEP +static int phytium_gpio_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_gpio *gpio = pci_get_drvdata(pdev); + unsigned long flags; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + gpio->ctx.swporta_dr = readl(gpio->regs + GPIO_SWPORTA_DR); + gpio->ctx.swporta_ddr = readl(gpio->regs + GPIO_SWPORTA_DDR); + gpio->ctx.ext_porta = readl(gpio->regs + GPIO_EXT_PORTA); + gpio->ctx.swportb_dr = readl(gpio->regs + GPIO_SWPORTB_DR); + gpio->ctx.swportb_ddr = readl(gpio->regs + GPIO_SWPORTB_DDR); + gpio->ctx.ext_portb = readl(gpio->regs + GPIO_EXT_PORTB); + + gpio->ctx.inten = readl(gpio->regs + GPIO_INTEN); + gpio->ctx.intmask = readl(gpio->regs + GPIO_INTMASK); + gpio->ctx.inttype_level = readl(gpio->regs + GPIO_INTTYPE_LEVEL); + gpio->ctx.int_polarity = readl(gpio->regs + GPIO_INT_POLARITY); + gpio->ctx.debounce = readl(gpio->regs + GPIO_DEBOUNCE); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} + +static int phytium_gpio_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_gpio *gpio = pci_get_drvdata(pdev); + unsigned long flags; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + writel(gpio->ctx.swporta_dr, gpio->regs + GPIO_SWPORTA_DR); + writel(gpio->ctx.swporta_ddr, gpio->regs + GPIO_SWPORTA_DDR); + writel(gpio->ctx.ext_porta, gpio->regs + GPIO_EXT_PORTA); + writel(gpio->ctx.swportb_dr, gpio->regs + GPIO_SWPORTB_DR); + writel(gpio->ctx.swportb_ddr, gpio->regs + GPIO_SWPORTB_DDR); + writel(gpio->ctx.ext_portb, gpio->regs + GPIO_EXT_PORTB); + + writel(gpio->ctx.inten, gpio->regs + GPIO_INTEN); + writel(gpio->ctx.intmask, gpio->regs + GPIO_INTMASK); + writel(gpio->ctx.inttype_level, gpio->regs + GPIO_INTTYPE_LEVEL); + writel(gpio->ctx.int_polarity, gpio->regs + GPIO_INT_POLARITY); + writel(gpio->ctx.debounce, gpio->regs + GPIO_DEBOUNCE); + + writel(0xffffffff, gpio->regs + GPIO_PORTA_EOI); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_gpio_pci_pm_ops, + phytium_gpio_pci_suspend, + phytium_gpio_pci_resume); + +static struct pci_driver phytium_gpio_pci_driver = { + .name = "gpio-phytium-pci", + .id_table = phytium_gpio_pci_ids, + .probe = phytium_gpio_pci_probe, + .remove = phytium_gpio_pci_remove, + .driver = { + .pm = &phytium_gpio_pci_pm_ops, + }, +}; + +module_pci_driver(phytium_gpio_pci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Phytium GPIO PCI Driver"); diff --git a/drivers/gpio/gpio-phytium-platform.c b/drivers/gpio/gpio-phytium-platform.c new file mode 100644 index 000000000000..e60f800e8a2c --- /dev/null +++ b/drivers/gpio/gpio-phytium-platform.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Support functions for Phytium GPIO + * + * Copyright (c) 2019, Phytium Corporation. + * Written by Chen Baozi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gpio-phytium-core.h" + +static struct irq_chip phytium_gpio_irqchip = { + .name = "phytium_gpio", + .irq_ack = phytium_gpio_irq_ack, + .irq_mask = phytium_gpio_irq_mask, + .irq_unmask = phytium_gpio_irq_unmask, + .irq_set_type = phytium_gpio_irq_set_type, + .irq_enable = phytium_gpio_irq_enable, + .irq_disable = phytium_gpio_irq_disable, +}; + +static const struct of_device_id phytium_gpio_of_match[] = { + { .compatible = "phytium,gpio", }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_gpio_of_match); + +static const struct acpi_device_id phytium_gpio_acpi_match[] = { + { "PHYT0001", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, phytium_gpio_acpi_match); + +static int phytium_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct phytium_gpio *gpio; + struct fwnode_handle *fwnode; + int err; + + gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gpio->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->regs)) + return PTR_ERR(gpio->regs); + + gpio->irq = -ENXIO; + gpio->irq = platform_get_irq(pdev, 0); + if (gpio->irq < 0) + dev_warn(dev, "no irq is found.\n"); + + if (!device_get_child_node_count(dev)) + return -ENODEV; + + device_for_each_child_node(dev, fwnode) { + int idx; + + if (fwnode_property_read_u32(fwnode, "reg", &idx) || + idx >= MAX_NPORTS) { + dev_err(dev, "missing/invalid port index\n"); + fwnode_handle_put(fwnode); + return -EINVAL; + } + + if (fwnode_property_read_u32(fwnode, "nr-gpios", + &gpio->ngpio[idx])) { + dev_info(dev, + "failed to get number of gpios for Port%c\n", + idx ? 'B' : 'A'); + gpio->ngpio[idx] = NGPIO_DEFAULT; + } + } + + /* irq_chip support */ + raw_spin_lock_init(&gpio->lock); + + gpio->gc.base = -1; + gpio->gc.get_direction = phytium_gpio_get_direction; + gpio->gc.direction_input = phytium_gpio_direction_input; + gpio->gc.direction_output = phytium_gpio_direction_output; + gpio->gc.get = phytium_gpio_get; + gpio->gc.set = phytium_gpio_set; + gpio->gc.ngpio = gpio->ngpio[0] + gpio->ngpio[1]; + gpio->gc.label = dev_name(dev); + gpio->gc.parent = dev; + gpio->gc.owner = THIS_MODULE; + + err = gpiochip_add_data(&gpio->gc, gpio); + if (err) { + dev_err(dev, "failed to register gpiochip\n"); + goto err1; + } + + err = gpiochip_irqchip_add(&gpio->gc, &phytium_gpio_irqchip, + 0, handle_bad_irq, IRQ_TYPE_NONE); + if (err) { + dev_info(dev, "could not add irqchip\n"); + goto err0; + } + gpiochip_set_chained_irqchip(&gpio->gc, &phytium_gpio_irqchip, + gpio->irq, + phytium_gpio_irq_handler); + + platform_set_drvdata(pdev, gpio); + dev_info(dev, "Phytium GPIO controller @%pa registered\n", + &res->start); + + return 0; + +err1: + gpiochip_remove(&gpio->gc); +err0: + return err; +} + +static int phytium_gpio_remove(struct platform_device *pdev) +{ + struct phytium_gpio *gpio = platform_get_drvdata(pdev); + + gpiochip_remove(&gpio->gc); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_gpio_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_gpio *gpio = platform_get_drvdata(pdev); + unsigned long flags; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + gpio->ctx.swporta_dr = readl(gpio->regs + GPIO_SWPORTA_DR); + gpio->ctx.swporta_ddr = readl(gpio->regs + GPIO_SWPORTA_DDR); + gpio->ctx.ext_porta = readl(gpio->regs + GPIO_EXT_PORTA); + gpio->ctx.swportb_dr = readl(gpio->regs + GPIO_SWPORTB_DR); + gpio->ctx.swportb_ddr = readl(gpio->regs + GPIO_SWPORTB_DDR); + gpio->ctx.ext_portb = readl(gpio->regs + GPIO_EXT_PORTB); + + gpio->ctx.inten = readl(gpio->regs + GPIO_INTEN); + gpio->ctx.intmask = readl(gpio->regs + GPIO_INTMASK); + gpio->ctx.inttype_level = readl(gpio->regs + GPIO_INTTYPE_LEVEL); + gpio->ctx.int_polarity = readl(gpio->regs + GPIO_INT_POLARITY); + gpio->ctx.debounce = readl(gpio->regs + GPIO_DEBOUNCE); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} + +static int phytium_gpio_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_gpio *gpio = platform_get_drvdata(pdev); + unsigned long flags; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + writel(gpio->ctx.swporta_dr, gpio->regs + GPIO_SWPORTA_DR); + writel(gpio->ctx.swporta_ddr, gpio->regs + GPIO_SWPORTA_DDR); + writel(gpio->ctx.ext_porta, gpio->regs + GPIO_EXT_PORTA); + writel(gpio->ctx.swportb_dr, gpio->regs + GPIO_SWPORTB_DR); + writel(gpio->ctx.swportb_ddr, gpio->regs + GPIO_SWPORTB_DDR); + writel(gpio->ctx.ext_portb, gpio->regs + GPIO_EXT_PORTB); + + writel(gpio->ctx.inten, gpio->regs + GPIO_INTEN); + writel(gpio->ctx.intmask, gpio->regs + GPIO_INTMASK); + writel(gpio->ctx.inttype_level, gpio->regs + GPIO_INTTYPE_LEVEL); + writel(gpio->ctx.int_polarity, gpio->regs + GPIO_INT_POLARITY); + writel(gpio->ctx.debounce, gpio->regs + GPIO_DEBOUNCE); + + writel(0xffffffff, gpio->regs + GPIO_PORTA_EOI); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_gpio_pm_ops, phytium_gpio_suspend, + phytium_gpio_resume); + +static struct platform_driver phytium_gpio_driver = { + .driver = { + .name = "gpio-phytium-platform", + .pm = &phytium_gpio_pm_ops, + .of_match_table = of_match_ptr(phytium_gpio_of_match), + .acpi_match_table = ACPI_PTR(phytium_gpio_acpi_match), + }, + .probe = phytium_gpio_probe, + .remove = phytium_gpio_remove, +}; + +module_platform_driver(phytium_gpio_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Phytium GPIO driver"); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index e44e567bd789..3eef585b6f64 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -335,6 +335,8 @@ source "drivers/gpu/drm/tve200/Kconfig" source "drivers/gpu/drm/xen/Kconfig" +source "drivers/gpu/drm/phytium/Kconfig" + # Keep legacy drivers last menuconfig DRM_LEGACY diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index a6771cef85e2..003ad8887229 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -106,4 +106,5 @@ obj-$(CONFIG_DRM_MXSFB) += mxsfb/ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/ obj-$(CONFIG_DRM_PL111) += pl111/ obj-$(CONFIG_DRM_TVE200) += tve200/ +obj-$(CONFIG_DRM_PHYTIUM) += phytium/ obj-$(CONFIG_DRM_XEN) += xen/ diff --git a/drivers/gpu/drm/phytium/Kconfig b/drivers/gpu/drm/phytium/Kconfig new file mode 100644 index 000000000000..e3024feb69d0 --- /dev/null +++ b/drivers/gpu/drm/phytium/Kconfig @@ -0,0 +1,7 @@ +config DRM_PHYTIUM + tristate "DRM Support for Phytium Graphics Card" + depends on DRM + select DRM_KMS_HELPER + help + Choose this option if you have a phytium graphics card. + This driver provides kernel mode setting and buffer management to userspace. diff --git a/drivers/gpu/drm/phytium/Makefile b/drivers/gpu/drm/phytium/Makefile new file mode 100644 index 000000000000..104416fc4313 --- /dev/null +++ b/drivers/gpu/drm/phytium/Makefile @@ -0,0 +1,15 @@ +phytium-dc-drm-y := phytium_display_drv.o \ + phytium_plane.o \ + phytium_crtc.o \ + phytium_dp.o \ + phytium_fb.o \ + phytium_gem.o \ + phytium_fbdev.o \ + phytium_debugfs.o \ + x100_dp.o \ + phytium_panel.o \ + x100_dc.o \ + phytium_pci.o + +obj-$(CONFIG_DRM_PHYTIUM) += phytium-dc-drm.o +CFLAGS_REMOVE_phytium_crtc.o += -mgeneral-regs-only diff --git a/drivers/gpu/drm/phytium/phytium_crtc.c b/drivers/gpu/drm/phytium/phytium_crtc.c new file mode 100644 index 000000000000..796c046d0a73 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_crtc.c @@ -0,0 +1,720 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_dp.h" +#include "x100_dc.h" +#include "phytium_reg.h" + +#define MAXKERNELSIZE 9 +#define SUBPIXELINDEXBITS 5 +#define SUBPIXELCOUNT (1 << SUBPIXELINDEXBITS) +#define SUBPIXELLOADCOUNT (SUBPIXELCOUNT / 2 + 1) +#define WEIGHTSTATECOUNT (((SUBPIXELLOADCOUNT * MAXKERNELSIZE + 1) & ~1) / 2) +#define KERNELTABLESIZE (SUBPIXELLOADCOUNT * MAXKERNELSIZE * sizeof(uint16_t)) +#define PHYALIGN(n, align) (((n) + ((align) - 1)) & ~((align) - 1)) +#define KERNELSTATES (PHYALIGN(KERNELTABLESIZE + 4, 8)) +#define PHYPI 3.14159265358979323846f + +#define MATH_Add(X, Y) (float)((X) + (Y)) +#define MATH_Multiply(X, Y) (float)((X) * (Y)) +#define MATH_Divide(X, Y) (float)((X) / (Y)) +#define MATH_DivideFromUInteger(X, Y) ((float)(X) / (float)(Y)) +#define MATH_I2Float(X) (float)(X) + +struct filter_blit_array { + uint8_t kernelSize; + uint32_t scaleFactor; + uint32_t *kernelStates; +}; + +static uint32_t dc_scaling_get_factor(uint32_t src_size, uint32_t dst_size) +{ + uint32_t factor = 0; + + factor = ((src_size - 1) << SCALE_FACTOR_SRC_OFFSET) / (dst_size - 1); + + return factor; +} + +static float dc_sint(float x) +{ + const float B = 1.2732395477; + const float C = -0.4052847346; + const float P = 0.2310792853; + float y; + + if (x < 0) + y = B*x - C*x*x; + else + y = B*x + C*x*x; + if (y < 0) + y = P * (y * (0 - y) - y) + y; + else + y = P * (y * y - y) + y; + return y; +} + +static float dc_sinc_filter(float x, int radius) +{ + float pit, pitd, f1, f2, result; + float f_radius = MATH_I2Float(radius); + + if (x == 0.0f) { + result = 1.0f; + } else if ((x < -f_radius) || (x > f_radius)) { + result = 0.0f; + } else { + pit = MATH_Multiply(PHYPI, x); + pitd = MATH_Divide(pit, f_radius); + f1 = MATH_Divide(dc_sint(pit), pit); + f2 = MATH_Divide(dc_sint(pitd), pitd); + result = MATH_Multiply(f1, f2); + } + + return result; +} + +static int dc_calculate_sync_table( + uint8_t kernel_size, + uint32_t src_size, + uint32_t dst_size, + struct filter_blit_array *kernel_info) +{ + uint32_t scale_factor; + float f_scale; + int kernel_half; + float f_subpixel_step; + float f_subpixel_offset; + uint32_t subpixel_pos; + int kernel_pos; + int padding; + uint16_t *kernel_array; + int range = 0; + + do { + /* Compute the scale factor. */ + scale_factor = dc_scaling_get_factor(src_size, dst_size); + + /* Same kernel size and ratio as before? */ + if ((kernel_info->kernelSize == kernel_size) && + (kernel_info->scaleFactor == kernel_size)) { + break; + } + + /* check the array */ + if (kernel_info->kernelStates == NULL) + break; + + /* Store new parameters. */ + kernel_info->kernelSize = kernel_size; + kernel_info->scaleFactor = scale_factor; + + /* Compute the scale factor. */ + f_scale = MATH_DivideFromUInteger(dst_size, src_size); + + /* Adjust the factor for magnification. */ + if (f_scale > 1.0f) + f_scale = 1.0f; + + /* Calculate the kernel half. */ + kernel_half = (int) (kernel_info->kernelSize >> 1); + + /* Calculate the subpixel step. */ + f_subpixel_step = MATH_Divide(1.0f, MATH_I2Float(SUBPIXELCOUNT)); + + /* Init the subpixel offset. */ + f_subpixel_offset = 0.5f; + + /* Determine kernel padding size. */ + padding = (MAXKERNELSIZE - kernel_info->kernelSize) / 2; + + /* Set initial kernel array pointer. */ + kernel_array = (uint16_t *) (kernel_info->kernelStates + 1); + + /* Loop through each subpixel. */ + for (subpixel_pos = 0; subpixel_pos < SUBPIXELLOADCOUNT; subpixel_pos++) { + /* Define a temporary set of weights. */ + float fSubpixelSet[MAXKERNELSIZE]; + + /* Init the sum of all weights for the current subpixel. */ + float fWeightSum = 0.0f; + uint16_t weightSum = 0; + short int adjustCount, adjustFrom; + short int adjustment; + + /* Compute weights. */ + for (kernel_pos = 0; kernel_pos < MAXKERNELSIZE; kernel_pos++) { + /* Determine the current index. */ + int index = kernel_pos - padding; + + /* Pad with zeros. */ + if ((index < 0) || (index >= kernel_info->kernelSize)) { + fSubpixelSet[kernel_pos] = 0.0f; + } else { + if (kernel_info->kernelSize == 1) { + fSubpixelSet[kernel_pos] = 1.0f; + } else { + /* Compute the x position for filter function. */ + float fX = MATH_Add( + MATH_I2Float(index - kernel_half), + f_subpixel_offset); + fX = MATH_Multiply(fX, f_scale); + + /* Compute the weight. */ + fSubpixelSet[kernel_pos] = dc_sinc_filter(fX, + kernel_half); + } + + /* Update the sum of weights. */ + fWeightSum = MATH_Add(fWeightSum, + fSubpixelSet[kernel_pos]); + } + } + + /* Adjust weights so that the sum will be 1.0. */ + for (kernel_pos = 0; kernel_pos < MAXKERNELSIZE; kernel_pos++) { + /* Normalize the current weight. */ + float fWeight = MATH_Divide(fSubpixelSet[kernel_pos], + fWeightSum); + + /* Convert the weight to fixed point and store in the table. */ + if (fWeight == 0.0f) + kernel_array[kernel_pos] = 0x0000; + else if (fWeight >= 1.0f) + kernel_array[kernel_pos] = 0x4000; + else if (fWeight <= -1.0f) + kernel_array[kernel_pos] = 0xC000; + else + kernel_array[kernel_pos] = + (int16_t) MATH_Multiply(fWeight, 16384.0f); + weightSum += kernel_array[kernel_pos]; + } + + /* Adjust the fixed point coefficients. */ + adjustCount = 0x4000 - weightSum; + if (adjustCount < 0) { + adjustCount = -adjustCount; + adjustment = -1; + } else { + adjustment = 1; + } + + adjustFrom = (MAXKERNELSIZE - adjustCount) / 2; + for (kernel_pos = 0; kernel_pos < adjustCount; kernel_pos++) { + range = (MAXKERNELSIZE*subpixel_pos + adjustFrom + kernel_pos) * + sizeof(uint16_t); + if ((range >= 0) && (range < KERNELTABLESIZE)) + kernel_array[adjustFrom + kernel_pos] += adjustment; + else + DRM_ERROR("%s failed\n", __func__); + } + + kernel_array += MAXKERNELSIZE; + + /* Advance to the next subpixel. */ + f_subpixel_offset = MATH_Add(f_subpixel_offset, -f_subpixel_step); + } + } while (0); + + return 0; +} + +static void phytium_dc_scaling_config(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + uint32_t scale_factor_x, scale_factor_y, i; + uint32_t kernelStates[128]; + struct filter_blit_array kernel_info_width; + void *tmp = NULL; + + if (mode->hdisplay != mode->crtc_hdisplay || mode->vdisplay != mode->crtc_vdisplay) { + phytium_crtc->src_width = mode->hdisplay; + phytium_crtc->src_height = mode->vdisplay; + phytium_crtc->dst_width = mode->crtc_hdisplay; + phytium_crtc->dst_height = mode->crtc_vdisplay; + + phytium_crtc->dst_x = (mode->crtc_hdisplay - phytium_crtc->dst_width) / 2; + phytium_crtc->dst_y = (mode->crtc_vdisplay - phytium_crtc->dst_height) / 2; + + scale_factor_x = dc_scaling_get_factor(phytium_crtc->src_width, + phytium_crtc->dst_width); + scale_factor_y = dc_scaling_get_factor(phytium_crtc->src_height, + phytium_crtc->dst_height); + if (scale_factor_y > (SCALE_FACTOR_Y_MAX << SCALE_FACTOR_SRC_OFFSET)) + scale_factor_y = (SCALE_FACTOR_Y_MAX << SCALE_FACTOR_SRC_OFFSET); + + phytium_writel_reg(priv, scale_factor_x & SCALE_FACTOR_X_MASK, + group_offset, PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X); + phytium_writel_reg(priv, scale_factor_y & SCALE_FACTOR_Y_MASK, + group_offset, PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y); + phytium_writel_reg(priv, FRAMEBUFFER_TAP, + group_offset, PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG); + + tmp = kmalloc(KERNELSTATES, GFP_KERNEL); + if (!tmp) { + DRM_ERROR("malloc %ld failed\n", KERNELSTATES); + return; + } + + memset(&kernel_info_width, 0, sizeof(struct filter_blit_array)); + kernel_info_width.kernelStates = tmp; + memset(kernel_info_width.kernelStates, 0, KERNELSTATES); + kernel_neon_begin(); + dc_calculate_sync_table(FRAMEBUFFER_HORIZONTAL_FILTER_TAP, + phytium_crtc->src_width, + phytium_crtc->dst_width, + &kernel_info_width); + memset(kernelStates, 0, sizeof(kernelStates)); + memcpy(kernelStates, kernel_info_width.kernelStates + 1, KERNELSTATES - 4); + kernel_neon_end(); + phytium_writel_reg(priv, HORI_FILTER_INDEX, + group_offset, PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX); + for (i = 0; i < 128; i++) { + phytium_writel_reg(priv, kernelStates[i], + group_offset, PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER); + } + + memset(&kernel_info_width, 0, sizeof(struct filter_blit_array)); + kernel_info_width.kernelStates = tmp; + memset(kernel_info_width.kernelStates, 0, KERNELSTATES); + kernel_neon_begin(); + dc_calculate_sync_table(FRAMEBUFFER_FILTER_TAP, phytium_crtc->src_height, + phytium_crtc->dst_height, &kernel_info_width); + memset(kernelStates, 0, sizeof(kernelStates)); + memcpy(kernelStates, kernel_info_width.kernelStates + 1, KERNELSTATES - 4); + kernel_neon_end(); + phytium_writel_reg(priv, VERT_FILTER_INDEX, + group_offset, PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX); + for (i = 0; i < 128; i++) + phytium_writel_reg(priv, kernelStates[i], + group_offset, PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER); + phytium_writel_reg(priv, INITIALOFFSET, + group_offset, PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET); + kfree(tmp); + phytium_crtc->scale_enable = true; + } else { + phytium_crtc->scale_enable = false; + } +} + +static void phytium_crtc_gamma_set(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + uint32_t config = 0; + struct drm_crtc_state *state = crtc->state; + struct drm_color_lut *lut; + int i; + + if (state->gamma_lut) { + if (WARN((state->gamma_lut->length/sizeof(struct drm_color_lut) != GAMMA_INDEX_MAX), + "gamma size is not match\n")) + return; + lut = (struct drm_color_lut *)state->gamma_lut->data; + for (i = 0; i < GAMMA_INDEX_MAX; i++) { + phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); + config = ((lut[i].red >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; + config |= (((lut[i].green >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); + config |= (((lut[i].blue >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); + } + } +} + +static void phytium_crtc_gamma_init(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + uint32_t config = 0; + uint16_t *red, *green, *blue; + int i; + + if (WARN((crtc->gamma_size != GAMMA_INDEX_MAX), "gamma size is not match\n")) + return; + + red = crtc->gamma_store; + green = red + crtc->gamma_size; + blue = green + crtc->gamma_size; + + for (i = 0; i < GAMMA_INDEX_MAX; i++) { + phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); + config = ((*red++ >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; + config |= (((*green++ >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); + config |= (((*blue++ >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); + } +} + +static void phytium_crtc_destroy(struct drm_crtc *crtc) +{ + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + drm_crtc_cleanup(crtc); + kfree(phytium_crtc); +} + +struct drm_crtc_state * +phytium_crtc_atomic_duplicate_state(struct drm_crtc *crtc) +{ + struct phytium_crtc_state *phytium_crtc_state = NULL; + + phytium_crtc_state = kmemdup(crtc->state, sizeof(*phytium_crtc_state), + GFP_KERNEL); + if (!phytium_crtc_state) + return NULL; + __drm_atomic_helper_crtc_duplicate_state(crtc, + &phytium_crtc_state->base); + + return &phytium_crtc_state->base; +} + +void +phytium_crtc_atomic_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct phytium_crtc_state *phytium_crtc_state = + to_phytium_crtc_state(state); + + phytium_crtc_state = to_phytium_crtc_state(state); + __drm_atomic_helper_crtc_destroy_state(state); + kfree(phytium_crtc_state); +} + +static const struct drm_crtc_funcs phytium_crtc_funcs = { + .gamma_set = drm_atomic_helper_legacy_gamma_set, + .set_config = drm_atomic_helper_set_config, + .destroy = phytium_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = phytium_crtc_atomic_duplicate_state, + .atomic_destroy_state = phytium_crtc_atomic_destroy_state, +}; + +static void +phytium_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_atomic_state *state = old_state->state; + struct drm_connector_state *new_conn_state; + struct drm_connector *conn; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + int config = 0, i = 0; + + for_each_new_connector_in_state(state, conn, new_conn_state, i) { + if (new_conn_state->crtc != crtc) + continue; + + switch (conn->display_info.bpc) { + case 10: + phytium_crtc->bpc = DP_RGB101010; + break; + case 6: + phytium_crtc->bpc = DP_RGB666; + break; + default: + phytium_crtc->bpc = DP_RGB888; + break; + } + } + + /* config pix clock */ + phytium_crtc->dc_hw_config_pix_clock(crtc, mode->clock); + + phytium_dc_scaling_config(crtc, old_state); + config = ((mode->crtc_hdisplay & HDISPLAY_END_MASK) << HDISPLAY_END_SHIFT) + | ((mode->crtc_htotal&HDISPLAY_TOTAL_MASK) << HDISPLAY_TOTAL_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HDISPLAY); + config = ((mode->crtc_hsync_start & HSYNC_START_MASK) << HSYNC_START_SHIFT) + | ((mode->crtc_hsync_end & HSYNC_END_MASK) << HSYNC_END_SHIFT) + | HSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : HSYNC_NEGATIVE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HSYNC); + config = ((mode->crtc_vdisplay & VDISPLAY_END_MASK) << VDISPLAY_END_SHIFT) + | ((mode->crtc_vtotal & VDISPLAY_TOTAL_MASK) << VDISPLAY_TOTAL_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VDISPLAY); + config = ((mode->crtc_vsync_start & VSYNC_START_MASK) << VSYNC_START_SHIFT) + | ((mode->crtc_vsync_end & VSYNC_END_MASK) << VSYNC_END_SHIFT) + | VSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : VSYNC_NEGATIVE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VSYNC); + config = PANEL_DATAENABLE_ENABLE | PANEL_DATA_ENABLE | PANEL_CLOCK_ENABLE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_PANEL_CONFIG); + config = phytium_crtc->bpc | OUTPUT_DP; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_DP_CONFIG); + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + if (crtc->state->active) + config |= FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET; + else + config &= (~(FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET)); + + if (phytium_crtc->scale_enable) + config |= FRAMEBUFFER_SCALE_ENABLE; + else + config &= (~FRAMEBUFFER_SCALE_ENABLE); + + config |= FRAMEBUFFER_GAMMA_ENABLE; + + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + drm_crtc_vblank_on(crtc); +} + +static void +phytium_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + drm_crtc_vblank_off(crtc); + phytium_crtc->dc_hw_disable(crtc); +} + +static void phytium_crtc_update_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, + const struct drm_display_mode *native_mode) +{ + if (native_mode->clock == drm_mode->clock && + native_mode->htotal == drm_mode->htotal && + native_mode->vtotal == drm_mode->vtotal) { + drm_mode->crtc_hdisplay = native_mode->crtc_hdisplay; + drm_mode->crtc_vdisplay = native_mode->crtc_vdisplay; + drm_mode->crtc_clock = native_mode->crtc_clock; + drm_mode->crtc_hblank_start = native_mode->crtc_hblank_start; + drm_mode->crtc_hblank_end = native_mode->crtc_hblank_end; + drm_mode->crtc_hsync_start = native_mode->crtc_hsync_start; + drm_mode->crtc_hsync_end = native_mode->crtc_hsync_end; + drm_mode->crtc_htotal = native_mode->crtc_htotal; + drm_mode->crtc_hskew = native_mode->crtc_hskew; + drm_mode->crtc_vblank_start = native_mode->crtc_vblank_start; + drm_mode->crtc_vblank_end = native_mode->crtc_vblank_end; + drm_mode->crtc_vsync_start = native_mode->crtc_vsync_start; + drm_mode->crtc_vsync_end = native_mode->crtc_vsync_end; + drm_mode->crtc_vtotal = native_mode->crtc_vtotal; + } +} + +static int +phytium_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) +{ + struct drm_plane_state *new_plane_state = NULL; + int ret = 0; + struct drm_atomic_state *state = crtc_state->state; + struct drm_connector *connector; + struct drm_connector_state *new_con_state; + uint32_t i; + struct phytium_dp_device *phytium_dp = NULL; + + for_each_new_connector_in_state(state, connector, new_con_state, i) { + if (new_con_state->crtc == crtc) { + phytium_dp = connector_to_dp_device(connector); + break; + } + } + if (phytium_dp) + phytium_crtc_update_timing_for_drm_display_mode(&crtc_state->adjusted_mode, + &phytium_dp->native_mode); + + new_plane_state = drm_atomic_get_new_plane_state(crtc_state->state, + crtc->primary); + if (crtc_state->enable && new_plane_state && !new_plane_state->crtc) { + ret = -EINVAL; + goto fail; + } + + return 0; +fail: + return ret; +} + +static void +phytium_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe, config; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + if (config & FRAMEBUFFER_RESET) { + phytium_writel_reg(priv, config | FRAMEBUFFER_VALID_PENDING, + group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + } +} + +static void phytium_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + struct phytium_crtc_state *phytium_crtc_state = NULL; + int phys_pipe = phytium_crtc->phys_pipe, config; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + + DRM_DEBUG_KMS("crtc->state active:%d enable:%d\n", + crtc->state->active, crtc->state->enable); + phytium_crtc_state = to_phytium_crtc_state(crtc->state); + + if (crtc->state->color_mgmt_changed) + phytium_crtc_gamma_set(crtc); + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + phytium_writel_reg(priv, config&(~FRAMEBUFFER_VALID_PENDING), + group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + if (crtc->state->event) { + DRM_DEBUG_KMS("vblank->refcount:%d\n", + atomic_read(&dev->vblank[0].refcount)); + spin_lock_irq(&dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, crtc->state->event); + else + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irq(&dev->event_lock); + } +} + +static enum drm_mode_status +phytium_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + + if (mode->crtc_clock > priv->info.crtc_clock_max) + return MODE_CLOCK_HIGH; + + if (mode->hdisplay > priv->info.hdisplay_max) + return MODE_BAD_HVALUE; + + if (mode->vdisplay > priv->info.vdisplay_max) + return MODE_BAD_VVALUE; + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + return MODE_NO_INTERLACE; + + return MODE_OK; +} + +static const struct drm_crtc_helper_funcs phytium_crtc_helper_funcs = { + .mode_valid = phytium_crtc_mode_valid, + .atomic_check = phytium_crtc_atomic_check, + .atomic_begin = phytium_crtc_atomic_begin, + .atomic_flush = phytium_crtc_atomic_flush, + .atomic_enable = phytium_crtc_atomic_enable, + .atomic_disable = phytium_crtc_atomic_disable, +}; + +void phytium_crtc_resume(struct drm_device *drm_dev) +{ + struct drm_crtc *crtc; + + drm_for_each_crtc(crtc, drm_dev) { + phytium_crtc_gamma_init(crtc); + } +} + +int phytium_crtc_init(struct drm_device *dev, int phys_pipe) +{ + struct phytium_crtc *phytium_crtc; + struct phytium_crtc_state *phytium_crtc_state; + struct phytium_plane *phytium_primary_plane = NULL; + struct phytium_plane *phytium_cursor_plane = NULL; + struct phytium_display_private *priv = dev->dev_private; + int ret; + + phytium_crtc = kzalloc(sizeof(*phytium_crtc), GFP_KERNEL); + if (!phytium_crtc) { + ret = -ENOMEM; + goto failed_malloc_crtc; + } + + phytium_crtc_state = kzalloc(sizeof(*phytium_crtc_state), GFP_KERNEL); + if (!phytium_crtc_state) { + ret = -ENOMEM; + goto failed_malloc_crtc_state; + } + + phytium_crtc_state->base.crtc = &phytium_crtc->base; + phytium_crtc->base.state = &phytium_crtc_state->base; + phytium_crtc->phys_pipe = phys_pipe; + + if (IS_X100(priv)) { + phytium_crtc->dc_hw_config_pix_clock = x100_dc_hw_config_pix_clock; + phytium_crtc->dc_hw_disable = x100_dc_hw_disable; + priv->dc_reg_base[phys_pipe] = X100_DC_BASE(phys_pipe); + priv->dcreq_reg_base[phys_pipe] = X100_DCREQ_BASE(phys_pipe); + priv->address_transform_base = X100_ADDRESS_TRANSFORM_BASE; + } + + phytium_primary_plane = phytium_primary_plane_create(dev, phys_pipe); + if (IS_ERR(phytium_primary_plane)) { + ret = PTR_ERR(phytium_primary_plane); + DRM_ERROR("create primary plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_create_primary; + } + + phytium_cursor_plane = phytium_cursor_plane_create(dev, phys_pipe); + if (IS_ERR(phytium_cursor_plane)) { + ret = PTR_ERR(phytium_cursor_plane); + DRM_ERROR("create cursor plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_create_cursor; + } + + ret = drm_crtc_init_with_planes(dev, &phytium_crtc->base, + &phytium_primary_plane->base, + &phytium_cursor_plane->base, + &phytium_crtc_funcs, + "phys_pipe %d", phys_pipe); + + if (ret) { + DRM_ERROR("init crtc with plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_crtc_init; + } + drm_crtc_helper_add(&phytium_crtc->base, &phytium_crtc_helper_funcs); + drm_crtc_vblank_reset(&phytium_crtc->base); + drm_mode_crtc_set_gamma_size(&phytium_crtc->base, GAMMA_INDEX_MAX); + drm_crtc_enable_color_mgmt(&phytium_crtc->base, 0, false, GAMMA_INDEX_MAX); + phytium_crtc_gamma_init(&phytium_crtc->base); + + return 0; + +failed_crtc_init: +failed_create_cursor: + /* drm_mode_config_cleanup() will free any crtcs/planes already initialized */ +failed_create_primary: + kfree(phytium_crtc_state); +failed_malloc_crtc_state: + kfree(phytium_crtc); +failed_malloc_crtc: + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_crtc.h b/drivers/gpu/drm/phytium/phytium_crtc.h new file mode 100644 index 000000000000..125a99b42660 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_crtc.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_CRTC_H__ +#define __PHYTIUM_CRTC_H__ + +struct phytium_crtc { + struct drm_crtc base; + int phys_pipe; + unsigned int bpc; + + /* scale */ + uint32_t src_width; + uint32_t src_height; + uint32_t dst_width; + uint32_t dst_height; + uint32_t dst_x; + uint32_t dst_y; + bool scale_enable; + bool reserve[3]; + + void (*dc_hw_config_pix_clock)(struct drm_crtc *crtc, int clock); + void (*dc_hw_disable)(struct drm_crtc *crtc); +}; + +struct phytium_crtc_state { + struct drm_crtc_state base; +}; + +#define to_phytium_crtc(x) container_of(x, struct phytium_crtc, base) +#define to_phytium_crtc_state(x) container_of(x, struct phytium_crtc_state, base) + +void phytium_crtc_resume(struct drm_device *drm_dev); +int phytium_crtc_init(struct drm_device *dev, int pipe); +#endif /* __PHYTIUM_CRTC_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.c b/drivers/gpu/drm/phytium/phytium_debugfs.c new file mode 100644 index 000000000000..b38deafcf874 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_debugfs.c @@ -0,0 +1,400 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include + +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_reg.h" + +static ssize_t +phytium_dp_register_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + return len; +} + +static int phytium_dp_register_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_M_VID, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_M_VID)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_N_VID, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_N_VID)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_TRANSFER_UNIT_SIZE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_DATA_COUNT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_DATA_COUNT)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HTOTAL, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HRES, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HRES)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSWIDTH, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSTART, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSTART)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VTOTAL, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VRES, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VRES)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSWIDTH, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSTART, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSTART)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_POLARITY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC0, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC1, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_USER_SYNC_POLARITY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_VIDEO_STREAM_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SECONDARY_STREAM_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE)); + seq_puts(m, "audio:\n"); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_INPUT_SELECT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DIRECT_CLKDIV, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_COUNT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_MAP, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DATA_WINDOW, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_CATEGORY_CODE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_MAUD, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_MAUD)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_NAUD, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_NAUD)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CLOCK_MODE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_SOURCE_FORMAT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_AUDIO_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE)); + + return 0; +} + +static int phytium_dp_register_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_register_show, inode->i_private); +} + +static const struct file_operations phytium_dp_register_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_register_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_register_write, +}; + +static ssize_t +phytium_dp_trigger_train_fail_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + if (kstrtouint(tmp, 10, &phytium_dp->trigger_train_fail) != 0) + return -EINVAL; + + return len; +} + +static int phytium_dp_trigger_train_fail_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + seq_printf(m, "trigger_train_fail: %d\n", phytium_dp->trigger_train_fail); + seq_printf(m, "train_retry_count: %d\n", phytium_dp->train_retry_count); + + return 0; +} + +static int phytium_dp_trigger_train_fail_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_trigger_train_fail_show, inode->i_private); +} + +static const struct file_operations phytium_dp_trigger_train_fail_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_trigger_train_fail_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_trigger_train_fail_write, +}; + +static int phytium_edp_backlight_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!phytium_dp->is_edp) + return -ENODEV; + + mutex_lock(&phytium_dp->panel.panel_lock); + seq_printf(m, "backlight: %s\n", phytium_dp->panel.backlight_enabled?"enabled":"disabled"); + mutex_unlock(&phytium_dp->panel.panel_lock); + + return 0; +} + +static int phytium_edp_backlight_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_edp_backlight_show, inode->i_private); +} + +static const struct file_operations phytium_edp_backlight_fops = { + .owner = THIS_MODULE, + .open = phytium_edp_backlight_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int phytium_edp_power_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!phytium_dp->is_edp) + return -ENODEV; + + mutex_lock(&phytium_dp->panel.panel_lock); + seq_printf(m, "power: %s\n", phytium_dp->panel.power_enabled?"enabled":"disabled"); + mutex_unlock(&phytium_dp->panel.panel_lock); + + return 0; +} + +static int phytium_edp_power_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_edp_power_show, inode->i_private); +} + +static const struct file_operations phytium_edp_power_fops = { + .owner = THIS_MODULE, + .open = phytium_edp_power_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +struct dpcd_block { + /* DPCD dump start address. */ + unsigned int offset; + /* DPCD dump end address, inclusive. If unset, .size will be used. */ + unsigned int end; + /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ + size_t size; + /* Only valid for eDP. */ + bool edp; +}; + +static const struct dpcd_block phytium_dpcd_debug[] = { + { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, + { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, + { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, + { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, + { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, + { .offset = DP_SET_POWER }, + { .offset = DP_EDP_DPCD_REV }, + { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, + { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, + { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, + { .offset = DP_DEVICE_SERVICE_IRQ_VECTOR, .size = 1 }, + { .offset = DP_TEST_REQUEST, .end = DP_TEST_PATTERN }, +}; + +static int phytium_dpcd_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + uint8_t buf[16], i; + ssize_t err; + + if (connector->status != connector_status_connected) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(phytium_dpcd_debug); i++) { + const struct dpcd_block *b = &phytium_dpcd_debug[i]; + size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); + + if (WARN_ON(size > sizeof(buf))) + continue; + + err = drm_dp_dpcd_read(&phytium_dp->aux, b->offset, buf, size); + if (err <= 0) { + DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", + size, b->offset, err); + continue; + } + + seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); + } + + return 0; +} + +static int phytium_dpcd_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dpcd_show, inode->i_private); +} + +static const struct file_operations phytium_dpcd_fops = { + .owner = THIS_MODULE, + .open = phytium_dpcd_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t +phytium_dp_state_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + return len; +} + +static int phytium_dp_state_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + seq_printf(m, "port number: %d\n", phytium_dp->port); + seq_printf(m, "source_max_lane_count: %d\n", phytium_dp->source_max_lane_count); + seq_printf(m, "max_source_rates: %d\n", + phytium_dp->source_rates[phytium_dp->num_source_rates-1]); + if (connector->status == connector_status_connected) { + seq_printf(m, "sink_max_lane_count: %d\n", phytium_dp->sink_max_lane_count); + seq_printf(m, "max_sink_rates: %d\n", + phytium_dp->sink_rates[phytium_dp->num_sink_rates-1]); + seq_printf(m, "link_rate: %d\n", phytium_dp->link_rate); + seq_printf(m, "link_lane_count: %d\n", phytium_dp->link_lane_count); + seq_printf(m, "train_set[0]: %d\n", phytium_dp->train_set[0]); + seq_printf(m, "has_audio: %s\n", phytium_dp->has_audio?"yes":"no"); + } + + return 0; +} + +static int phytium_dp_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_state_show, inode->i_private); +} + +static const struct file_operations phytium_dp_state_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_state_write, +}; + +static const struct phytium_debugfs_connector_files { + const char *name; + const struct file_operations *fops; +} phytium_debugfs_connector_files[] = { + {"dp_state", &phytium_dp_state_fops}, + {"dpcd", &phytium_dpcd_fops}, + {"dp_register", &phytium_dp_register_fops}, + {"dp_trigger_train_fail", &phytium_dp_trigger_train_fail_fops}, +}; + +static const struct phytium_debugfs_connector_files phytium_edp_debugfs_connector_files[] = { + {"edp_power", &phytium_edp_power_fops}, + {"edp_backlight", &phytium_edp_backlight_fops}, +}; + +int phytium_debugfs_connector_add(struct drm_connector *connector) +{ + struct dentry *root = connector->debugfs_entry; + struct dentry *ent; + int i; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!root) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(phytium_debugfs_connector_files); i++) { + ent = debugfs_create_file(phytium_debugfs_connector_files[i].name, + 0644, + root, + connector, + phytium_debugfs_connector_files[i].fops); + if (!ent) + return -ENOMEM; + } + + if (phytium_dp->is_edp) + for (i = 0; i < ARRAY_SIZE(phytium_edp_debugfs_connector_files); i++) { + ent = debugfs_create_file(phytium_edp_debugfs_connector_files[i].name, + 0644, + root, + connector, + phytium_edp_debugfs_connector_files[i].fops); + if (!ent) + return -ENOMEM; + } + + return 0; +} diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.h b/drivers/gpu/drm/phytium/phytium_debugfs.h new file mode 100644 index 000000000000..37ca93c18821 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_debugfs.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DEBUGFS_H__ +#define __PHYTIUM_DEBUGFS_H__ + +int phytium_debugfs_register(struct phytium_display_private *priv); +int phytium_debugfs_connector_add(struct drm_connector *connector); + +#endif /* __PHYTIUM_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.c b/drivers/gpu/drm/phytium/phytium_display_drv.c new file mode 100644 index 000000000000..49a66740388f --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_display_drv.c @@ -0,0 +1,461 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_plane.h" +#include "phytium_crtc.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "phytium_fb.h" +#include "phytium_fbdev.h" +#include "phytium_reg.h" +#include "phytium_pci.h" + +int dc_fake_mode_enable; +module_param(dc_fake_mode_enable, int, 0644); +MODULE_PARM_DESC(dc_fake_mode_enable, "Enable DC fake mode (0-disabled; 1-enabled; default-0)"); + +int dc_fast_training_check = 1; +module_param(dc_fast_training_check, int, 0644); +MODULE_PARM_DESC(dc_fast_training_check, "Check dp fast training (0-disabled; 1-enabled; default-1)"); + +int num_source_rates = 4; +module_param(num_source_rates, int, 0644); +MODULE_PARM_DESC(num_source_rates, "set the source max rates (1-1.62Gbps; 2-2.7Gbps; 3-5.4Gbps; 4-8.1Gbps; default-4)"); + +int source_max_lane_count = 4; +module_param(source_max_lane_count, int, 0644); +MODULE_PARM_DESC(source_max_lane_count, "set the source lane count (1-1lane; 2-2lane; 4-4lane; default-4)"); + +int link_dynamic_adjust; +module_param(link_dynamic_adjust, int, 0644); +MODULE_PARM_DESC(link_dynamic_adjust, "dynamic select the train pamameter according to the display mode (0-disabled; 1-enabled; default-1)"); + +int phytium_wait_cmd_done(struct phytium_display_private *priv, + uint32_t register_offset, + uint32_t request_bit, + uint32_t reply_bit) +{ + int timeout = 500, config = 0, ret = 0; + + do { + mdelay(1); + timeout--; + config = phytium_readl_reg(priv, 0, register_offset); + } while ((!(config & reply_bit)) && timeout); + + phytium_writel_reg(priv, config & (~request_bit), 0, register_offset); + + if (timeout == 0) { + DRM_ERROR("wait cmd reply timeout\n"); + ret = -EBUSY; + } else { + timeout = 500; + do { + mdelay(1); + timeout--; + config = phytium_readl_reg(priv, 0, register_offset); + } while ((config & reply_bit) && timeout); + if (timeout == 0) { + DRM_ERROR("clear cmd timeout\n"); + ret = -EBUSY; + } + } + mdelay(5); + + return ret; +} + +static void phytium_irq_preinstall(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i, status; + + for_each_pipe_masked(priv, i) { + status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + } +} + +static void phytium_irq_uninstall(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i, status; + + for_each_pipe_masked(priv, i) { + status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + } +} + +static irqreturn_t phytium_display_irq_handler(int irq, void *data) +{ + struct drm_device *dev = data; + struct phytium_display_private *priv = dev->dev_private; + bool enabled = 0; + int i = 0, virt_pipe = 0; + irqreturn_t ret = IRQ_NONE, ret1 = IRQ_NONE; + + for_each_pipe_masked(priv, i) { + enabled = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + if (enabled & INT_STATUS) { + virt_pipe = phytium_get_virt_pipe(priv, i); + if (virt_pipe < 0) + return IRQ_NONE; + drm_handle_vblank(dev, virt_pipe); + ret = IRQ_HANDLED; + if (priv->dc_hw_clear_msi_irq) + priv->dc_hw_clear_msi_irq(priv, i); + } + } + + ret1 = phytium_dp_hpd_irq_handler(priv); + if (ret == IRQ_HANDLED || ret1 == IRQ_HANDLED) + return IRQ_HANDLED; + + return IRQ_NONE; +} + +static int phytium_enable_vblank(struct drm_device *dev, unsigned int virt_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + int phys_pipe; + + phys_pipe = phytium_get_phys_pipe(priv, virt_pipe); + if (phys_pipe < 0) + return phys_pipe; + + phytium_writel_reg(priv, INT_ENABLE, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_INT_ENABLE); + + return 0; +} + +static void phytium_disable_vblank(struct drm_device *dev, unsigned int virt_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + int phys_pipe; + + phys_pipe = phytium_get_phys_pipe(priv, virt_pipe); + if (phys_pipe >= 0) + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_INT_ENABLE); +} + +static const struct drm_mode_config_funcs phytium_mode_funcs = { + .fb_create = phytium_fb_create, + .output_poll_changed = drm_fb_helper_output_poll_changed, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static void phytium_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, state); + drm_atomic_helper_commit_planes(dev, state, false); + drm_atomic_helper_commit_modeset_enables(dev, state); + drm_atomic_helper_commit_hw_done(state); + drm_atomic_helper_wait_for_flip_done(dev, state); + drm_atomic_helper_cleanup_planes(dev, state); +} + +static struct drm_mode_config_helper_funcs phytium_mode_config_helpers = { + .atomic_commit_tail = phytium_atomic_commit_tail, +}; + +static int phytium_modeset_init(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i = 0, ret; + + drm_mode_config_init(dev); + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = 16384; + dev->mode_config.max_height = 16384; + dev->mode_config.cursor_width = 32; + dev->mode_config.cursor_height = 32; + + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; + dev->mode_config.allow_fb_modifiers = true; + + dev->mode_config.funcs = &phytium_mode_funcs; + dev->mode_config.helper_private = &phytium_mode_config_helpers; + + for_each_pipe_masked(priv, i) { + ret = phytium_crtc_init(dev, i); + if (ret) { + DRM_ERROR("phytium_crtc_init(pipe %d) return failed\n", i); + goto failed_crtc_init; + } + } + + for_each_pipe_masked(priv, i) { + ret = phytium_dp_init(dev, i); + if (ret) { + DRM_ERROR("phytium_dp_init(pipe %d) return failed\n", i); + goto failed_dp_init; + } + } + + drm_mode_config_reset(dev); + + return 0; +failed_dp_init: +failed_crtc_init: + drm_mode_config_cleanup(dev); + return ret; +} + +int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe) +{ + int i = 0; + int virt_pipe = 0; + + for_each_pipe_masked(priv, i) { + if (i != phys_pipe) + virt_pipe++; + else + return virt_pipe; + } + + DRM_ERROR("%s %d failed\n", __func__, phys_pipe); + return -EINVAL; +} + +int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe) +{ + int i = 0; + int tmp = 0; + + for_each_pipe_masked(priv, i) { + if (tmp != virt_pipe) + tmp++; + else + return i; + } + + DRM_ERROR("%s %d failed\n", __func__, virt_pipe); + return -EINVAL; +} + +static int phytium_display_load(struct drm_device *dev, unsigned long flags) +{ + struct phytium_display_private *priv = dev->dev_private; + int ret = 0; + + ret = drm_vblank_init(dev, priv->info.num_pipes); + if (ret) { + DRM_ERROR("vblank init failed\n"); + goto failed_vblank_init; + } + + ret = phytium_modeset_init(dev); + if (ret) { + DRM_ERROR("phytium_modeset_init failed\n"); + goto failed_modeset_init; + } + + if (priv->vram_support) + priv->vram_hw_init(priv); + + ret = drm_irq_install(dev, priv->irq); + if (ret) { + DRM_ERROR("install irq failed\n"); + goto failed_irq_install; + } + + ret = phytium_drm_fbdev_init(dev); + if (ret) + DRM_ERROR("failed to init dev\n"); + + return ret; + +failed_irq_install: + drm_mode_config_cleanup(dev); +failed_modeset_init: +failed_vblank_init: + return ret; +} + +static void phytium_display_unload(struct drm_device *dev) +{ + phytium_drm_fbdev_fini(dev); + drm_irq_uninstall(dev); + drm_mode_config_cleanup(dev); +} + +static const struct vm_operations_struct phytium_vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_ioctl_desc phytium_ioctls[] = { + /* for test, none so far */ +}; + +static const struct file_operations phytium_drm_driver_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .compat_ioctl = drm_compat_ioctl, + .poll = drm_poll, + .read = drm_read, + .llseek = no_llseek, + .mmap = phytium_gem_mmap, +}; + +struct drm_driver phytium_display_drm_driver = { + .driver_features = DRIVER_HAVE_IRQ | + DRIVER_IRQ_SHARED | + DRIVER_PRIME | + DRIVER_MODESET | + DRIVER_ATOMIC | + DRIVER_GEM, + .load = phytium_display_load, + .unload = phytium_display_unload, + .lastclose = drm_fb_helper_lastclose, + .irq_handler = phytium_display_irq_handler, + .irq_preinstall = phytium_irq_preinstall, + .irq_uninstall = phytium_irq_uninstall, + .enable_vblank = phytium_enable_vblank, + .disable_vblank = phytium_disable_vblank, + .gem_free_object = phytium_gem_free_object, + .gem_vm_ops = &phytium_vm_ops, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_get_sg_table = phytium_gem_prime_get_sg_table, + .gem_prime_import_sg_table = phytium_gem_prime_import_sg_table, + .gem_prime_vmap = phytium_gem_prime_vmap, + .gem_prime_vunmap = phytium_gem_prime_vunmap, + .gem_prime_mmap = phytium_gem_prime_mmap, + .dumb_create = phytium_gem_dumb_create, + .dumb_destroy = phytium_gem_dumb_destroy, + .ioctls = phytium_ioctls, + .num_ioctls = ARRAY_SIZE(phytium_ioctls), + .fops = &phytium_drm_driver_fops, + .name = DRV_NAME, + .desc = DRV_DESC, + .date = DRV_DATE, + .major = DRV_MAJOR, + .minor = DRV_MINOR, +}; + +static void phytium_display_shutdown(struct drm_device *dev) +{ + drm_atomic_helper_shutdown(dev); +} + +static int phytium_display_pm_suspend(struct drm_device *dev) +{ + struct drm_atomic_state *state; + struct phytium_display_private *priv = dev->dev_private; + int ret, ret1; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); + state = drm_atomic_helper_suspend(dev); + if (IS_ERR(state)) { + DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", PTR_ERR(state)); + ret = PTR_ERR(state); + goto suspend_failed; + } + dev->mode_config.suspend_state = state; + ret = phytium_gem_suspend(dev); + if (ret) { + DRM_ERROR("phytium_gem_suspend failed: %d\n", ret); + goto gem_suspend_failed; + } + + return 0; + +gem_suspend_failed: + ret1 = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret1) + DRM_ERROR("Failed to resume (%d)\n", ret1); + dev->mode_config.suspend_state = NULL; +suspend_failed: + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + phytium_dp_hpd_irq_setup(dev, true); + + return ret; +} + +static int phytium_display_pm_resume(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int ret = 0; + + if (WARN_ON(!dev->mode_config.suspend_state)) + return -EINVAL; + + ret = phytium_dp_resume(dev); + if (ret) + return -EIO; + + phytium_crtc_resume(dev); + phytium_gem_resume(dev); + + if (priv->vram_support) + priv->vram_hw_init(priv); + + ret = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret) { + DRM_ERROR("Failed to resume (%d)\n", ret); + return ret; + } + + dev->mode_config.suspend_state = NULL; + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + phytium_dp_hpd_irq_setup(dev, true); + + return 0; +} + +void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev) +{ + INIT_LIST_HEAD(&priv->gem_list_head); + spin_lock_init(&priv->hotplug_irq_lock); + INIT_WORK(&priv->hotplug_work, phytium_dp_hpd_work_func); + priv->dev = dev; + priv->display_shutdown = phytium_display_shutdown; + priv->display_pm_suspend = phytium_display_pm_suspend; + priv->display_pm_resume = phytium_display_pm_resume; +} + +static int __init phytium_display_init(void) +{ + int ret = 0; + + ret = pci_register_driver(&phytium_pci_driver); + + return ret; +} + +static void __exit phytium_display_exit(void) +{ + pci_unregister_driver(&phytium_pci_driver); +} + +module_init(phytium_display_init); +module_exit(phytium_display_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Yang Xun "); +MODULE_DESCRIPTION("Phytium Display Controller"); diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.h b/drivers/gpu/drm/phytium/phytium_display_drv.h new file mode 100644 index 000000000000..9e052b805fcd --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_display_drv.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DISPLAY_DRV_H__ +#define __PHYTIUM_DISPLAY_DRV_H__ + +#include +#include + +#define DEBUG_LOG 0 + +#define PHYTIUM_FORMAT_MAX_PLANE 3 +#define DP_MAX_DOWNSTREAM_PORTS 0x10 + +#define DRV_NAME "dc" +#define DRV_DESC "phytium dc" +#define DRV_DATE "20201220" +#define DRV_MAJOR 1 +#define DRV_MINOR 1 + +/* come from GPU */ +#define DRM_FORMAT_MOD_VENDOR_PHYTIUM 0x92 + +/* dc:mode0 8x8 16bpp gpu: FBCDC_8X8_V10 */ +#define DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC fourcc_mod_code(PHYTIUM, 21) +/* dc:mode3 8x4 32bpp gpu: FBCDC_16X4_v10 */ +#define DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC fourcc_mod_code(PHYTIUM, 22) + +#define PIPE_MASK_SHIFT 0x0 +#define PIPE_MASK_MASK 0x7 +#define EDP_MASK_SHIFT 0x3 +#define EDP_MASK_MASK 0x7 + +enum phytium_platform { + PHYTIUM_PLATFORM_UNINITIALIZED = 0, + PHYTIUM_PLATFORM_X100, +}; + +#define IS_PLATFORM(priv, p) ((priv)->info.platform_mask & BIT(p)) + +#define IS_X100(priv) IS_PLATFORM(priv, PHYTIUM_PLATFORM_X100) + +struct phytium_device_info { + unsigned char platform_mask; + unsigned char pipe_mask; + unsigned char num_pipes; + unsigned char total_pipes; + unsigned char edp_mask; + unsigned int crtc_clock_max; + unsigned int hdisplay_max; + unsigned int vdisplay_max; + unsigned int backlight_max; + unsigned long address_mask; +}; + +struct phytium_display_private { + /* hw */ + void __iomem *regs; + void __iomem *vram_addr; + struct phytium_device_info info; + bool vram_support; + bool reserve[3]; + uint32_t dc_reg_base[3]; + uint32_t dcreq_reg_base[3]; + uint32_t dp_reg_base[3]; + uint32_t address_transform_base; + uint32_t phy_access_base[3]; + + /* drm */ + struct drm_device *dev; + int irq; + + /* fb_dev */ + struct drm_fb_helper fbdev_helper; + struct phytium_gem_object *fbdev_phytium_gem; + + int save_reg[3]; + struct list_head gem_list_head; + + struct work_struct hotplug_work; + spinlock_t hotplug_irq_lock; + + void (*vram_hw_init)(struct phytium_display_private *priv); + void (*display_shutdown)(struct drm_device *dev); + int (*display_pm_suspend)(struct drm_device *dev); + int (*display_pm_resume)(struct drm_device *dev); + + void (*dc_hw_clear_msi_irq)(struct phytium_display_private *priv, uint32_t phys_pipe); + + int (*dc_hw_fb_format_check)(const struct drm_mode_fb_cmd2 *mode_cmd, int count); +}; + +static inline unsigned int +phytium_readl_reg(struct phytium_display_private *priv, uint32_t group_offset, uint32_t reg_offset) +{ + unsigned int data; + + data = readl(priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Read 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); +#endif + return data; +} + +static inline void +phytium_writel_reg(struct phytium_display_private *priv, uint32_t data, + uint32_t group_offset, uint32_t reg_offset) +{ + + writel(data, priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Write 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); +#endif +} + +static inline void +phytium_writeb_reg(struct phytium_display_private *priv, uint8_t data, + uint32_t group_offset, uint32_t reg_offset) +{ + writeb(data, priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Write 32'h%08x 8'h%08x\n", group_offset + reg_offset, data); +#endif +} + +#define for_each_pipe(__dev_priv, __p) \ + for ((__p) = 0; (__p) < __dev_priv->info.total_pipes; (__p)++) + +#define for_each_pipe_masked(__dev_priv, __p) \ + for ((__p) = 0; (__p) < __dev_priv->info.total_pipes; (__p)++) \ + for_each_if((__dev_priv->info.pipe_mask) & BIT(__p)) + +int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe); +int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe); +int phytium_wait_cmd_done(struct phytium_display_private *priv, + uint32_t register_offset, + uint32_t request_bit, + uint32_t reply_bit); +void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev); + +extern struct drm_driver phytium_display_drm_driver; +extern int dc_fake_mode_enable; +extern int dc_fast_training_check; +extern int num_source_rates; +extern int source_max_lane_count; +extern int link_dynamic_adjust; + +#endif /* __PHYTIUM_DISPLAY_DRV_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_dp.c b/drivers/gpu/drm/phytium/phytium_dp.c new file mode 100644 index 000000000000..7c7284bac8ee --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_dp.c @@ -0,0 +1,2615 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_debugfs.h" +#include "x100_dp.h" +#include "phytium_panel.h" +#include "phytium_reg.h" + +static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp); +static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged); +static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp); +static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp); + +static int phytium_rate[] = {162000, 270000, 540000, 810000}; + +void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->phy_access_base[port]; + +#if DEBUG_LOG + pr_info("phy address write: 0x%x data:0x%x\n", address, data); +#endif + phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); + phytium_writel_reg(priv, data, group_offset, PHYTIUM_PHY_WRITE_DATA); + phytium_writel_reg(priv, ACCESS_WRITE, group_offset, PHYTIUM_PHY_ACCESS_CTRL); + udelay(10); +} + +uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->phy_access_base[port]; + uint32_t data; + + phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); + phytium_writel_reg(priv, ACCESS_READ, group_offset, PHYTIUM_PHY_ACCESS_CTRL); + udelay(10); + data = phytium_readl_reg(priv, group_offset, PHYTIUM_PHY_READ_DATA); +#if DEBUG_LOG + pr_info("phy address read: 0x%x data:0x%x\n", address, data); +#endif + + return data; +} + +static int +phytium_dp_hw_aux_transfer_write(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned int i = 0, j = 0; + unsigned int cmd = 0; + unsigned int aux_status = 0, interrupt_status = 0; + unsigned char *data = msg->buffer; + int count_timeout = 0; + long ret = 0; + + for (i = 0; i < 3; i++) { + /* clear X100_DP_INTERRUPT_RAW_STATUS */ + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); + for (j = 0; j < msg->size; j++) + phytium_writeb_reg(priv, data[j], group_offset, PHYTIUM_DP_AUX_WRITE_FIFO); + + cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); + if (msg->size == 0) + cmd |= ADDRESS_ONLY; + else + cmd |= (msg->size-1) & BYTE_COUNT_MASK; + phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); + + count_timeout = 0; + do { + mdelay(5); + interrupt_status = phytium_readl_reg(priv, group_offset, + PHYTIUM_DP_INTERRUPT_RAW_STATUS); + aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); + if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) + || (interrupt_status & REPLY_TIMEOUT)) { + DRM_DEBUG_KMS("aux wait exit\n"); + break; + } + count_timeout++; + } while (count_timeout < 6); + + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + if (interrupt_status & REPLY_TIMEOUT) { + DRM_DEBUG_KMS("aux write reply timeout\n"); + continue; + } else if (aux_status & REPLY_ERROR) { + DRM_DEBUG_KMS("aux write reply error\n"); + continue; + } else if (aux_status & REPLY_RECEIVED) { + DRM_DEBUG_KMS("aux write reply received succussful\n"); + break; + } + } + + if (interrupt_status & REPLY_TIMEOUT) { + DRM_NOTE("aux(%d) write reply timeout\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if (aux_status & REPLY_ERROR) { + DRM_ERROR("aux(%d) write reply error\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { + DRM_ERROR("aux(%d) write reply no response\n", phytium_dp->port); + ret = -EIO; + goto out; + } + + msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); + ret = msg->size; +out: + return ret; +} + +static int +phytium_dp_hw_aux_transfer_read(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned int i = 0; + unsigned int cmd = 0; + unsigned int aux_status = 0, interrupt_status = 0; + unsigned char *data = msg->buffer; + int count_timeout = 0; + long ret = 0; + + for (i = 0; i < 3; i++) { + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); + cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); + if (msg->size == 0) + cmd |= ADDRESS_ONLY; + else + cmd |= ((msg->size-1) & BYTE_COUNT_MASK); + phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); + + count_timeout = 0; + do { + mdelay(5); + interrupt_status = phytium_readl_reg(priv, group_offset, + PHYTIUM_DP_INTERRUPT_RAW_STATUS); + aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); + if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) + || (interrupt_status & REPLY_TIMEOUT)) { + DRM_DEBUG_KMS("aux wait exit\n"); + break; + } + count_timeout++; + } while (count_timeout < 6); + + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + if (interrupt_status & REPLY_TIMEOUT) { + DRM_DEBUG_KMS("aux read reply timeout\n"); + continue; + } else if (aux_status & REPLY_ERROR) { + DRM_DEBUG_KMS("aux read reply error\n"); + continue; + } else if (aux_status & REPLY_RECEIVED) { + DRM_DEBUG_KMS("aux read reply received succussful\n"); + break; + } + } + + if (interrupt_status & REPLY_TIMEOUT) { + DRM_NOTE("aux(%d) read reply timeout\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if (aux_status & REPLY_ERROR) { + DRM_ERROR("aux(%d) read reply error\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { + DRM_ERROR("aux(%d) read reply no response\n", phytium_dp->port); + ret = -EIO; + goto out; + } + + msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); + ret = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA_COUNT); + + if (ret > msg->size) { + ret = msg->size; + } else if (ret != msg->size) { + DRM_DEBUG_KMS("aux read count error(ret:0x%lx != 0x%lx)\n", ret, msg->size); + ret = -EBUSY; + goto out; + } + + for (i = 0; i < ret; i++) + data[i] = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA); + +out: + return ret; +} + +static void phytium_get_native_mode(struct phytium_dp_device *phytium_dp) +{ + struct drm_display_mode *t, *mode; + struct drm_connector *connector = &phytium_dp->connector; + struct drm_display_mode *native_mode = &phytium_dp->native_mode; + + list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { + if (mode->type & DRM_MODE_TYPE_PREFERRED) { + if (mode->hdisplay != native_mode->hdisplay || + mode->vdisplay != native_mode->vdisplay) { + memcpy(native_mode, mode, sizeof(*mode)); + drm_mode_set_crtcinfo(native_mode, 0); + } + break; + } + } + + if (&mode->head == &connector->probed_modes) + native_mode->clock = 0; +} + +static int phytium_connector_add_common_modes(struct phytium_dp_device *phytium_dp) +{ + int i = 0, ret = 0; + struct drm_device *dev = phytium_dp->dev; + struct drm_display_mode *mode = NULL, *current_mode = NULL; + struct drm_display_mode *native_mode = &phytium_dp->native_mode; + bool mode_existed = false; + struct mode_size { + char name[DRM_DISPLAY_MODE_LEN]; + int w; + int h; + } common_mode[] = { + { "640x480", 640, 480}, + { "800x600", 800, 600}, + { "1024x768", 1024, 768}, + { "1280x720", 1280, 720}, + { "1280x800", 1280, 800}, + {"1280x1024", 1280, 1024}, + { "1440x900", 1440, 900}, + {"1680x1050", 1680, 1050}, + {"1600x1200", 1600, 1200}, + {"1920x1080", 1920, 1080}, + {"1920x1200", 1920, 1200} + }; + + if (native_mode->clock == 0) + return ret; + + for (i = 0; i < ARRAY_SIZE(common_mode); i++) { + mode_existed = false; + + if (common_mode[i].w > native_mode->hdisplay || + common_mode[i].h > native_mode->vdisplay || + (common_mode[i].w == native_mode->hdisplay && + common_mode[i].h == native_mode->vdisplay)) + continue; + + list_for_each_entry(current_mode, &phytium_dp->connector.probed_modes, head) { + if (common_mode[i].w == current_mode->hdisplay && + common_mode[i].h == current_mode->vdisplay) { + mode_existed = true; + break; + } + } + + if (mode_existed) + continue; + + mode = drm_mode_duplicate(dev, native_mode); + if (mode == NULL) + continue; + + mode->hdisplay = common_mode[i].w; + mode->vdisplay = common_mode[i].h; + mode->type &= ~DRM_MODE_TYPE_PREFERRED; + strncpy(mode->name, common_mode[i].name, DRM_DISPLAY_MODE_LEN); + drm_mode_probed_add(&phytium_dp->connector, mode); + ret++; + } + + return ret; +} + +static int phytium_connector_get_modes(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct edid *edid; + int ret = 0; + + edid = phytium_dp->detect_edid; + if (edid && drm_edid_is_valid(edid)) { + drm_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + phytium_get_native_mode(phytium_dp); + if (dc_fake_mode_enable) + ret += phytium_connector_add_common_modes(phytium_dp); + } else { + ret = drm_add_modes_noedid(connector, 640, 480); + } + + return ret; +} + +static int +phytium_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned int requested, actual; + + switch (display_info->bpc) { + case 10: + case 6: + case 8: + break; + default: + DRM_INFO("not support bpc(%d)\n", display_info->bpc); + display_info->bpc = 8; + break; + } + + if ((display_info->color_formats & DRM_COLOR_FORMAT_RGB444) == 0) { + DRM_INFO("not support color_format(%d)\n", display_info->color_formats); + display_info->color_formats = DRM_COLOR_FORMAT_RGB444; + } + + requested = mode->clock * display_info->bpc * 3 / 1000; + actual = phytium_dp->max_link_rate * phytium_dp->max_link_lane_count / 100; + actual = actual * 8 / 10; + if (requested >= actual) { + DRM_DEBUG_KMS("requested=%d, actual=%d, clock=%d\n", requested, actual, + mode->clock); + return MODE_CLOCK_HIGH; + } + + if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) + return MODE_BAD_HVALUE; + + if ((mode->hdisplay == 1024) && (mode->clock > 78000)) + return MODE_BAD_HVALUE; + + return MODE_OK; +} + +static const +struct drm_connector_helper_funcs phytium_connector_helper_funcs = { + .get_modes = phytium_connector_get_modes, + .mode_valid = phytium_connector_mode_valid, + .best_encoder = drm_atomic_helper_best_encoder, +}; + +static void phytium_dp_set_sink_rates(struct phytium_dp_device *phytium_dp) +{ + static const int dp_rates[] = {162000, 270000, 540000, 810000}; + int i, max_rate; + + max_rate = drm_dp_bw_code_to_link_rate(phytium_dp->dpcd[DP_MAX_LINK_RATE]); + for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { + if (dp_rates[i] > max_rate) + break; + phytium_dp->sink_rates[i] = dp_rates[i]; + } + phytium_dp->num_sink_rates = i; +} + +static int get_common_rates(const int *source_rates, int source_len, const int *sink_rates, + int sink_len, int *common_rates) +{ + int i = 0, j = 0, k = 0; + + while (i < source_len && j < sink_len) { + if (source_rates[i] == sink_rates[j]) { + if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) + return k; + common_rates[k] = source_rates[i]; + ++k; + ++i; + ++j; + } else if (source_rates[i] < sink_rates[j]) { + ++i; + } else { + ++j; + } + } + return k; +} + +static void phytium_dp_set_common_rates(struct phytium_dp_device *phytium_dp) +{ + WARN_ON(!phytium_dp->num_source_rates || !phytium_dp->num_sink_rates); + + phytium_dp->num_common_rates = get_common_rates(phytium_dp->source_rates, + phytium_dp->num_source_rates, + phytium_dp->sink_rates, + phytium_dp->num_sink_rates, + phytium_dp->common_rates); + + if (WARN_ON(phytium_dp->num_common_rates == 0)) { + phytium_dp->common_rates[0] = 162000; + phytium_dp->num_common_rates = 1; + } +} + +static bool phytium_dp_get_dpcd(struct phytium_dp_device *phytium_dp) +{ + int ret; + unsigned char sink_count = 0; + + /* get dpcd capability,but don't check data error; so check revision */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, 0x00, phytium_dp->dpcd, + sizeof(phytium_dp->dpcd)); + if (ret < 0) { + DRM_ERROR("port %d get DPCD capability fail\n", phytium_dp->port); + return false; + } + + if (phytium_dp->dpcd[DP_DPCD_REV] == 0) { + DRM_ERROR("DPCD data error: 0x%x\n", phytium_dp->dpcd[DP_DPCD_REV]); + return false; + } + + /* parse sink support link */ + phytium_dp_set_sink_rates(phytium_dp); + phytium_dp_set_common_rates(phytium_dp); + phytium_dp->sink_max_lane_count = drm_dp_max_lane_count(phytium_dp->dpcd); + phytium_dp->common_max_lane_count = min(phytium_dp->source_max_lane_count, + phytium_dp->sink_max_lane_count); + + /* get dpcd sink count */ + if (drm_dp_dpcd_readb(&phytium_dp->aux, DP_SINK_COUNT, &sink_count) <= 0) { + DRM_ERROR("get DPCD sink_count fail\n"); + return false; + } + + phytium_dp->sink_count = DP_GET_SINK_COUNT(sink_count); + if (!phytium_dp->sink_count) { + DRM_ERROR("DPCD sink_count should not be zero\n"); + return false; + } + + if (!drm_dp_is_branch(phytium_dp->dpcd)) + return true; + + if (phytium_dp->dpcd[DP_DPCD_REV] == 0x10) + return true; + + /* get downstream port for branch device */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_DOWNSTREAM_PORT_0, + phytium_dp->downstream_ports, DP_MAX_DOWNSTREAM_PORTS); + if (ret < 0) { + DRM_ERROR("get DPCD DFP fail\n"); + return false; + } + + return true; +} + +static enum drm_connector_status +phytium_dp_detect_dpcd(struct phytium_dp_device *phytium_dp) +{ + if (!phytium_dp_get_dpcd(phytium_dp)) + return connector_status_disconnected; + + if (!drm_dp_is_branch(phytium_dp->dpcd)) + return connector_status_connected; + + if (phytium_dp->downstream_ports[0] & DP_DS_PORT_HPD) { + return phytium_dp->sink_count ? connector_status_connected + : connector_status_disconnected; + } + return connector_status_connected; +} + +static void phytium_get_adjust_train(struct phytium_dp_device *phytium_dp, + const uint8_t link_status[DP_LINK_STATUS_SIZE], uint8_t lane_count) +{ + unsigned char v = 0; + unsigned char p = 0; + int lane; + unsigned char voltage_max; + unsigned char preemph_max; + + /* find max value */ + for (lane = 0; lane < lane_count; lane++) { + uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); + uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); + + if (this_v > v) + v = this_v; + if (this_p > p) + p = this_p; + } + voltage_max = DP_TRAIN_VOLTAGE_SWING_LEVEL_3; + if (v >= voltage_max) + v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; + + preemph_max = DP_TRAIN_PRE_EMPH_LEVEL_3; + if (p >= preemph_max) + p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + for (lane = 0; lane < 4; lane++) + phytium_dp->train_set[lane] = v | p; +} + +bool phytium_dp_coding_8b10b_need_enable(unsigned char test_pattern) +{ + switch (test_pattern) { + case PHYTIUM_PHY_TP_D10_2: + case PHYTIUM_PHY_TP_SYMBOL_ERROR: + case PHYTIUM_PHY_TP_CP2520_1: + case PHYTIUM_PHY_TP_CP2520_2: + case PHYTIUM_PHY_TP_CP2520_3: + return true; + case PHYTIUM_PHY_TP_PRBS7: + case PHYTIUM_PHY_TP_80BIT_CUSTOM: + return false; + default: + return false; + } +} + +bool phytium_dp_scrambled_need_enable(unsigned char test_pattern) +{ + switch (test_pattern) { + case PHYTIUM_PHY_TP_SYMBOL_ERROR: + case PHYTIUM_PHY_TP_CP2520_1: + case PHYTIUM_PHY_TP_CP2520_2: + case PHYTIUM_PHY_TP_CP2520_3: + return true; + case PHYTIUM_PHY_TP_D10_2: + case PHYTIUM_PHY_TP_PRBS7: + case PHYTIUM_PHY_TP_80BIT_CUSTOM: + return false; + default: + return false; + } +} + +static void phytium_dp_hw_set_lane_setting(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, + uint8_t train_set) +{ + phytium_dp->funcs->dp_hw_set_phy_lane_setting(phytium_dp, link_rate, train_set); +} + +static void phytium_dp_hw_set_link(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, + uint32_t link_rate) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0, retry = 3; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, lane_count, + group_offset, PHYTIUM_DP_LANE_COUNT_SET); + phytium_writel_reg(priv, + drm_dp_link_rate_to_bw_code(link_rate), + group_offset, PHYTIUM_DP_LINK_BW_SET); + + if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) + phytium_writel_reg(priv, ENHANCED_FRAME_ENABLE, + group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); + else + phytium_writel_reg(priv, ENHANCED_FRAME_DISABLE, + group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); + +try_again: + ret = phytium_dp->funcs->dp_hw_set_phy_lane_and_rate(phytium_dp, lane_count, link_rate); + if ((ret < 0) && retry) { + retry--; + goto try_again; + } +} + +static void phytium_dp_hw_set_test_pattern(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, + uint8_t test_pattern, + uint8_t *custom_pattern, + uint32_t custom_pattern_size) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, val = 0, tmp = 0, i; + uint32_t group_offset = priv->dp_reg_base[port]; + + if ((test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) + && custom_pattern && (custom_pattern_size > 0)) { + val = *(int *)custom_pattern; + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0); + val = *(int *)(custom_pattern + 4); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1); + val = *(short int *)(custom_pattern + 8); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2); + } + + if (test_pattern == PHYTIUM_PHY_TP_D10_2 || test_pattern == PHYTIUM_PHY_TP_PRBS7 + || test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) + phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + else + phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + + tmp = test_pattern - PHYTIUM_PHY_TP_NONE + TEST_PATTERN_NONE; + val = 0; + for (i = 0; i < lane_count; i++) + val |= (tmp << (TEST_PATTERN_LANE_SHIFT * i)); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_LINK_QUAL_PATTERN_SET); +} + +static void phytium_dp_hw_set_train_pattern(struct phytium_dp_device *phytium_dp, + uint8_t train_pattern) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, tmp = 0; + uint32_t group_offset = priv->dp_reg_base[port]; + + /* Scrambling is disabled for TPS1/TPS2/3 and enabled for TPS4 */ + if (train_pattern == DP_TRAINING_PATTERN_4 + || train_pattern == DP_TRAINING_PATTERN_DISABLE) { + phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + phytium_writel_reg(priv, SCRAMBLER_RESET, group_offset, + PHYTIUM_DP_FORCE_SCRAMBLER_RESET); + } else { + phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + } + switch (train_pattern) { + case DP_TRAINING_PATTERN_DISABLE: + tmp = TRAINING_OFF; + break; + case DP_TRAINING_PATTERN_1: + tmp = TRAINING_PATTERN_1; + break; + case DP_TRAINING_PATTERN_2: + tmp = TRAINING_PATTERN_2; + break; + case DP_TRAINING_PATTERN_3: + tmp = TRAINING_PATTERN_3; + break; + case DP_TRAINING_PATTERN_4: + tmp = TRAINING_PATTERN_4; + break; + default: + tmp = TRAINING_OFF; + break; + } + + phytium_writel_reg(priv, tmp, group_offset, PHYTIUM_DP_TRAINING_PATTERN_SET); +} + +void phytium_dp_hw_enable_audio(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int config = 0, config1, data_window = 0; + const struct dp_audio_n_m *n_m = NULL; + uint32_t group_offset = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + + data_window = 90*(phytium_dp->link_rate)/100 + *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) + /phytium_dp->mode.clock/4; + + phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); + + n_m = phytium_dp_audio_get_n_m(phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); + if (n_m == NULL) { + DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", + phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); + } else { + phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); + } + + config1 = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, config1, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); +} + +static void phytium_dp_hw_audio_shutdown(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); +} + +static void phytium_dp_hw_audio_digital_mute(struct phytium_dp_device *phytium_dp, bool enable) +{ + struct phytium_display_private *priv = phytium_dp->dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + if (enable) + phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, + group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + else + phytium_writel_reg(priv, SEC_AUDIO_ENABLE, + group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); +} + +static int +phytium_dp_hw_audio_hw_params(struct phytium_dp_device *phytium_dp, struct audio_info audio_info) +{ + struct phytium_display_private *priv = phytium_dp->dev->dev_private; + int port = phytium_dp->port; + int ret = 0, data_window = 0; + const struct dp_audio_n_m *n_m = NULL; + uint32_t fs, ws, fs_accurac; + uint32_t group_offset = priv->dp_reg_base[port]; + + DRM_DEBUG_KMS("%s:set port%d sample_rate(%d) channels(%d) sample_width(%d)\n", + __func__, phytium_dp->port, audio_info.sample_rate, + audio_info.channels, audio_info.sample_width); + + phytium_writel_reg(priv, INPUT_SELECT_I2S, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT); + phytium_writel_reg(priv, APB_CLOCK/audio_info.sample_rate, + group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV); + phytium_writel_reg(priv, audio_info.channels & CHANNEL_MASK, + group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT); + phytium_writel_reg(priv, CHANNEL_MAP_DEFAULT, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP); + data_window = 90*(phytium_dp->link_rate)/100 + *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) + /phytium_dp->mode.clock/4; + phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); + phytium_writel_reg(priv, 0xb5, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE); + + phytium_writel_reg(priv, CLOCK_MODE_SYNC, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE); + phytium_writel_reg(priv, CS_SOURCE_FORMAT_DEFAULT, + group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT); + + switch (audio_info.sample_rate) { + case 32000: + fs = ORIG_FREQ_32000; + fs_accurac = SAMPLING_FREQ_32000; + break; + case 44100: + fs = ORIG_FREQ_44100; + fs_accurac = SAMPLING_FREQ_44100; + break; + case 48000: + fs = ORIG_FREQ_48000; + fs_accurac = SAMPLING_FREQ_48000; + break; + case 96000: + fs = ORIG_FREQ_96000; + fs_accurac = SAMPLING_FREQ_96000; + break; + case 176400: + fs = ORIG_FREQ_176400; + fs_accurac = SAMPLING_FREQ_176400; + break; + case 192000: + fs = ORIG_FREQ_192000; + fs_accurac = SAMPLING_FREQ_192000; + break; + default: + DRM_ERROR("dp not support sample_rate %d\n", audio_info.sample_rate); + goto out; + } + + switch (audio_info.sample_width) { + case 16: + ws = WORD_LENGTH_16; + break; + case 18: + ws = WORD_LENGTH_18; + break; + case 20: + ws = WORD_LENGTH_20; + break; + case 24: + ws = WORD_LENGTH_24; + break; + default: + DRM_ERROR("dp not support sample_width %d\n", audio_info.sample_width); + goto out; + } + + phytium_writel_reg(priv, ((fs&ORIG_FREQ_MASK)<link_rate, audio_info.sample_rate); + if (n_m == NULL) { + DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", + phytium_dp->link_rate, audio_info.sample_rate); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); + + } else { + phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); + } + phytium_writel_reg(priv, SECONDARY_STREAM_ENABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_dp->audio_info = audio_info; + + return 0; + +out: + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + + return ret; +} + +void phytium_dp_hw_disable_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SST_MST_SOURCE_0_DISABLE, + group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); +} + +bool phytium_dp_hw_video_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, config; + uint32_t group_offset = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); + return config ? true : false; +} + +void phytium_dp_hw_enable_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SST_MST_SOURCE_0_ENABLE, + group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); +} + +void phytium_dp_hw_config_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned long link_bw, date_rate = 0; + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned char tu_size = 64; + unsigned long data_per_tu = 0; + int symbols_per_tu, frac_symbols_per_tu, symbol_count, udc, value; + + /* cal M/N and tu_size */ + phytium_writel_reg(priv, phytium_dp->mode.crtc_clock/10, group_offset, PHYTIUM_DP_M_VID); + phytium_writel_reg(priv, phytium_dp->link_rate/10, group_offset, PHYTIUM_DP_N_VID); + link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; + date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; + + /* mul 10 for register setting */ + data_per_tu = 10*tu_size * date_rate/link_bw; + symbols_per_tu = (data_per_tu/10)&0xff; + frac_symbols_per_tu = (data_per_tu%10*16/10) & 0xf; + phytium_writel_reg(priv, frac_symbols_per_tu<<24 | symbols_per_tu<<16 | tu_size, + group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE); + + symbol_count = (phytium_dp->mode.crtc_hdisplay*display_info->bpc*3 + 7)/8; + udc = (symbol_count + phytium_dp->link_lane_count - 1)/phytium_dp->link_lane_count; + phytium_writel_reg(priv, udc, group_offset, PHYTIUM_DP_DATA_COUNT); + + /* config main stream attributes */ + phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal, + group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL); + phytium_writel_reg(priv, phytium_dp->mode.crtc_hdisplay, + group_offset, PHYTIUM_DP_MAIN_LINK_HRES); + phytium_writel_reg(priv, + phytium_dp->mode.crtc_hsync_end - phytium_dp->mode.crtc_hsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH); + phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal - phytium_dp->mode.crtc_hsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_HSTART); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal, + group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vdisplay, + group_offset, PHYTIUM_DP_MAIN_LINK_VRES); + phytium_writel_reg(priv, + phytium_dp->mode.crtc_vsync_end - phytium_dp->mode.crtc_vsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal - phytium_dp->mode.crtc_vsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_VSTART); + + value = 0; + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) + value = value & (~HSYNC_POLARITY_LOW); + else + value = value | HSYNC_POLARITY_LOW; + + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) + value = value & (~VSYNC_POLARITY_LOW); + else + value = value | VSYNC_POLARITY_LOW; + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY); + + switch (display_info->bpc) { + case 10: + value = (MISC0_BIT_DEPTH_10BIT << MISC0_BIT_DEPTH_OFFSET); + break; + case 6: + value = (MISC0_BIT_DEPTH_6BIT << MISC0_BIT_DEPTH_OFFSET); + break; + default: + value = (MISC0_BIT_DEPTH_8BIT << MISC0_BIT_DEPTH_OFFSET); + break; + } + value |= (MISC0_COMPONENT_FORMAT_RGB << MISC0_COMPONENT_FORMAT_SHIFT) + | MISC0_SYNCHRONOUS_CLOCK; + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1); + + value = USER_ODDEVEN_POLARITY_HIGH | USER_DATA_ENABLE_POLARITY_HIGH; + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) + value = value | USER_HSYNC_POLARITY_HIGH; + else + value = value & (~USER_HSYNC_POLARITY_HIGH); + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) + value = value | USER_VSYNC_POLARITY_HIGH; + else + value = value & (~USER_VSYNC_POLARITY_HIGH); + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY); +} + +void phytium_dp_hw_disable_output(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, TRANSMITTER_OUTPUT_DISABLE, + group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); +} + +void phytium_dp_hw_enable_output(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); + phytium_writel_reg(priv, TRANSMITTER_OUTPUT_ENABLE, + group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); +} + +void phytium_dp_hw_enable_input_source(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, VIRTUAL_SOURCE_0_ENABLE, + group_offset, PHYTIUM_INPUT_SOURCE_ENABLE); +} + +void phytium_dp_hw_disable_input_source(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + + phytium_writel_reg(priv, (~VIRTUAL_SOURCE_0_ENABLE)&VIRTUAL_SOURCE_0_ENABLE_MASK, + priv->dp_reg_base[port], PHYTIUM_INPUT_SOURCE_ENABLE); +} + +bool phytium_dp_hw_output_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + int config = 0; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); + return config ? true : false; +} + +static void phytium_dp_hw_get_hpd_state(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t val = 0, raw_state = 0; + uint32_t group_offset = priv->dp_reg_base[port]; + + val = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_RAW_STATUS); + + /* maybe miss hpd, so used for clear PHYTIUM_DP_INTERRUPT_RAW_STATUS */ + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + raw_state = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SINK_HPD_STATE); + if (val & HPD_EVENT) + phytium_dp->dp_hpd_state.hpd_event_state = true; + + if (val & HPD_IRQ) + phytium_dp->dp_hpd_state.hpd_irq_state = true; + + if (raw_state & HPD_CONNECT) + phytium_dp->dp_hpd_state.hpd_raw_state = true; + else + phytium_dp->dp_hpd_state.hpd_raw_state = false; +} + +void phytium_dp_hw_hpd_irq_setup(struct phytium_dp_device *phytium_dp, bool enable) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_dp->dp_hpd_state.hpd_irq_enable = enable; + if (enable) + phytium_writel_reg(priv, HPD_OTHER_MASK, group_offset, PHYTIUM_DP_INTERRUPT_MASK); + else + phytium_writel_reg(priv, HPD_IRQ_MASK|HPD_EVENT_MASK|HPD_OTHER_MASK, + group_offset, PHYTIUM_DP_INTERRUPT_MASK); +} + +int phytium_dp_hw_init(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret; + uint32_t group_offset = priv->dp_reg_base[port]; + + ret = phytium_dp->funcs->dp_hw_reset(phytium_dp); + if (ret) + goto out; + ret = phytium_dp->funcs->dp_hw_init_phy(phytium_dp); + if (ret) + goto out; + + phytium_writel_reg(priv, AUX_CLK_DIVIDER, group_offset, PHYTIUM_DP_AUX_CLK_DIVIDER); + phytium_dp->fast_train_support = false; + phytium_dp->hw_spread_enable = phytium_dp->funcs->dp_hw_spread_is_enable(phytium_dp); + +out: + return ret; +} + +static void phytium_dp_hw_set_source_rate_and_lane_count(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->source_rates = phytium_rate; + phytium_dp->num_source_rates = num_source_rates; + + if (phytium_dp->port == 0) + phytium_dp->source_max_lane_count = source_max_lane_count; + else if (phytium_dp->port == 1) + phytium_dp->source_max_lane_count = source_max_lane_count; + else if (phytium_dp->port == 2) + phytium_dp->source_max_lane_count = 1; + else + phytium_dp->source_max_lane_count = 1; +} + +static int phytium_dp_dpcd_get_tp_link(struct phytium_dp_device *phytium_dp, + uint8_t *test_lane_count, + uint32_t *test_link_rate) +{ + uint8_t test_link_bw; + int ret; + + ret = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_LANE_COUNT, + test_lane_count); + if (ret <= 0) { + DRM_DEBUG_KMS("test pattern Lane count read failed(%d)\n", ret); + goto failed; + } + + ret = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_LINK_RATE, + &test_link_bw); + if (ret <= 0) { + DRM_DEBUG_KMS("test pattern link rate read failed(%d)\n", ret); + goto failed; + } + *test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_link(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, uint32_t link_rate) +{ + uint8_t link_config[2]; + int ret = 0; + + link_config[0] = drm_dp_link_rate_to_bw_code(link_rate); + link_config[1] = lane_count; + if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) { + link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + } + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_LINK_BW_SET, link_config, 2); + if (ret < 0) { + DRM_NOTE("write dpcd DP_LINK_BW_SET fail: ret:%d\n", ret); + goto failed; + } + + if (phytium_dp->hw_spread_enable) + link_config[0] = DP_SPREAD_AMP_0_5; + else + link_config[0] = 0; + link_config[1] = DP_SET_ANSI_8B10B; + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); + if (ret < 0) { + DRM_ERROR("write DP_DOWNSPREAD_CTRL fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_test_pattern(struct phytium_dp_device *phytium_dp, + uint8_t test_pattern) +{ + unsigned char value; + int ret; + + if (phytium_dp_coding_8b10b_need_enable(test_pattern)) + value = DP_SET_ANSI_8B10B; + else + value = 0; + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value); + if (ret < 0) { + DRM_ERROR("write DP_MAIN_LINK_CHANNEL_CODING_SET fail: ret:%d\n", ret); + goto failed; + } + + if (phytium_dp_scrambled_need_enable(test_pattern)) + value = DP_TRAINING_PATTERN_DISABLE; + else + value = (DP_TRAINING_PATTERN_DISABLE | DP_LINK_SCRAMBLING_DISABLE); + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_LINK_QUAL_LANE0_SET, test_pattern); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_train_pattern(struct phytium_dp_device *phytium_dp, + uint8_t train_pattern) +{ + uint8_t value; + int ret; + + /* Scrambling is disabled for TPS1/2/3 and enabled for TPS4 */ + if (train_pattern == DP_TRAINING_PATTERN_4 || train_pattern == DP_TRAINING_PATTERN_DISABLE) + value = train_pattern; + else + value = (train_pattern | DP_LINK_SCRAMBLING_DISABLE); + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); + if (ret < 0) { + DRM_NOTE("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int +phytium_dp_dpcd_set_lane_setting(struct phytium_dp_device *phytium_dp, uint8_t *train_set) +{ + int ret = 0; + + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_TRAINING_LANE0_SET, + phytium_dp->train_set, 4); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_LANE0_SET fail: ret:%d\n", ret); + return ret; + } + + return 0; +} + +static int +phytium_dp_dpcd_get_adjust_request(struct phytium_dp_device *phytium_dp, uint8_t lane_count) +{ + int ret = 0; + uint8_t link_status[DP_LINK_STATUS_SIZE]; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + goto failed; + } + phytium_get_adjust_train(phytium_dp, link_status, lane_count); + + return 0; +failed: + return ret; +} + +void phytium_dp_dpcd_sink_dpms(struct phytium_dp_device *phytium_dp, int mode) +{ + int ret, i; + + if (phytium_dp->dpcd[DP_DPCD_REV] < 0x11) + return; + if (mode != DRM_MODE_DPMS_ON) { + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_SET_POWER, DP_SET_POWER_D3); + } else { + for (i = 0; i < 3; i++) { + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); + if (ret == 1) + break; + msleep(20); + } + } + + if (ret != 1) + DRM_DEBUG_KMS("failed to %s sink power state\n", + mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); +} + +static bool phytium_dp_link_training_clock_recovery(struct phytium_dp_device *phytium_dp) +{ + int ret; + unsigned char voltage, max_vswing_tries; + int voltage_tries; + + /* clear the test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->link_lane_count, + PHYTIUM_PHY_TP_NONE, NULL, 0); + + /* config source and sink's link rate and lane count */ + phytium_dp_hw_set_link(phytium_dp, phytium_dp->link_lane_count, phytium_dp->link_rate); + ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->link_lane_count, + phytium_dp->link_rate); + if (ret < 0) { + DRM_NOTE("phytium_dp_dpcd_set_link failed(ret=%d)\n", ret); + return false; + } + + /* config source's voltage swing and pre-emphasis(103-106) */ + memset(phytium_dp->train_set, 0, sizeof(phytium_dp->train_set)); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + + /* config train pattern */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return false; + } + + /* config sink's voltage swing and pre-emphasis(103-106) */ + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return false; + } + + voltage_tries = 1; + max_vswing_tries = 0; + for (;;) { + unsigned char link_status[DP_LINK_STATUS_SIZE]; + + drm_dp_link_train_clock_recovery_delay(phytium_dp->dpcd); + + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return false; + } + + if (drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("clock revorery ok\n"); + return true; + } + + if (voltage_tries == 5) { + DRM_DEBUG_KMS("Same voltage tried 5 times\n"); + return false; + } + + if (max_vswing_tries == 1) { + DRM_DEBUG_KMS("Max Voltage Swing reached\n"); + return false; + } + + voltage = phytium_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_get_adjust_train(phytium_dp, link_status, phytium_dp->link_lane_count); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return false; + } + + if ((phytium_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) + ++voltage_tries; + else + voltage_tries = 1; + + if (phytium_dp->train_set[0] & DP_TRAIN_MAX_SWING_REACHED) + ++max_vswing_tries; + + DRM_DEBUG_KMS("try train_set:0x%x voltage_tries:%d max_vswing_tries:%d\n", + phytium_dp->train_set[0], voltage_tries, max_vswing_tries); + } +} + +static unsigned int phytium_dp_get_training_pattern(struct phytium_dp_device *phytium_dp) +{ + bool sink_tps3, sink_tps4; + + sink_tps4 = drm_dp_tps4_supported(phytium_dp->dpcd); + if (sink_tps4) + return DP_TRAINING_PATTERN_4; + else if (phytium_dp->link_rate == 810000) + DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n"); + + sink_tps3 = drm_dp_tps3_supported(phytium_dp->dpcd); + if (sink_tps3) + return DP_TRAINING_PATTERN_3; + else if (phytium_dp->link_rate >= 540000) + DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); + + return DP_TRAINING_PATTERN_2; +} + +static bool phytium_dp_link_training_channel_equalization(struct phytium_dp_device *phytium_dp) +{ + unsigned int training_pattern; + int tries, ret; + unsigned char link_status[DP_LINK_STATUS_SIZE]; + bool channel_eq = false; + + /* config source and sink's voltage swing and pre-emphasis(103-106), from clock recovery */ + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return channel_eq; + } + + /* config source and sink's train_pattern x */ + training_pattern = phytium_dp_get_training_pattern(phytium_dp); + phytium_dp_hw_set_train_pattern(phytium_dp, training_pattern); + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, training_pattern); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return channel_eq; + } + + for (tries = 0; tries < 5; tries++) { + drm_dp_link_train_channel_eq_delay(phytium_dp->dpcd); + + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + break; + } + + /* Make sure clock is still ok */ + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("CR check failed, cannot continue channel equalization\n"); + break; + } + + if (drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + channel_eq = true; + DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); + break; + } + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_get_adjust_train(phytium_dp, link_status, phytium_dp->link_lane_count); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + break; + } + } + + /* Try 5 times, else fail and try at lower BW */ + if (tries == 5) + DRM_DEBUG_KMS("Channel equalization failed 5 times\n"); + + return channel_eq; +} + +static void phytium_dp_train_retry_work_fn(struct work_struct *work) +{ + struct phytium_dp_device *phytium_dp = train_retry_to_dp_device(work); + struct drm_connector *connector; + + connector = &phytium_dp->connector; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + mutex_lock(&connector->dev->mode_config.mutex); + drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); + mutex_unlock(&connector->dev->mode_config.mutex); + drm_kms_helper_hotplug_event(connector->dev); +} + +/* return index of rate in rates array, or -1 if not found */ +static int phytium_dp_rate_index(const int *rates, int len, int rate) +{ + int i; + + for (i = 0; i < len; i++) + if (rate == rates[i]) + return i; + + return -1; +} + +int phytium_dp_get_link_train_fallback_values(struct phytium_dp_device *phytium_dp) +{ + int index, ret = 0; + + if (phytium_dp->is_edp) { + phytium_dp->train_retry_count++; + DRM_INFO("Retrying Link training for eDP(%d) with same parameters\n", + phytium_dp->port); + goto out; + } else { + index = phytium_dp_rate_index(phytium_dp->common_rates, + phytium_dp->num_common_rates, + phytium_dp->link_rate); + if (index > 0) { + phytium_dp->link_rate = phytium_dp->common_rates[index - 1]; + } else if (phytium_dp->link_lane_count > 1) { + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->link_lane_count >> 1; + } else { + phytium_dp->train_retry_count++; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_INFO("Retrying Link training for DP(%d) with maximal parameters\n", + phytium_dp->port); + ret = -1; + } + } + +out: + return ret; +} + +static int +phytium_dp_stop_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret; + + /* config source and sink's train_pattern x: DP_TRAINING_PATTERN_DISABLE */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + if (ret < 0) { + DRM_NOTE("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return ret; + } + + return 0; +} + +int phytium_dp_start_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + + phytium_dp_hw_disable_output(phytium_dp); + phytium_dp_hw_disable_input_source(phytium_dp); + phytium_dp_hw_disable_video(phytium_dp); + phytium_dp_hw_enable_input_source(phytium_dp); + phytium_dp_hw_enable_output(phytium_dp); + phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_OFF); + phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_ON); + + if (!phytium_dp_link_training_clock_recovery(phytium_dp)) + goto failure_handling; + + if (!phytium_dp_link_training_channel_equalization(phytium_dp)) + goto failure_handling; + + ret = phytium_dp_stop_link_train(phytium_dp); + if (ret < 0) { + DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + goto out; + } + + if (phytium_dp->trigger_train_fail) { + phytium_dp->trigger_train_fail--; + goto failure_handling; + } + phytium_dp->train_retry_count = 0; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training Pass at Link Rate = %d, Lane count = %d\n", + phytium_dp->connector.base.id, + phytium_dp->connector.name, phytium_dp->link_rate, + phytium_dp->link_lane_count); + + return 0; + +failure_handling: + DRM_INFO("[CONNECTOR:%d:%s] Link Training failed at Link Rate = %d, Lane count = %d", + phytium_dp->connector.base.id, + phytium_dp->connector.name, + phytium_dp->link_rate, phytium_dp->link_lane_count); + + ret = phytium_dp_stop_link_train(phytium_dp); + if (ret < 0) { + DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + goto out; + } + + phytium_dp_get_link_train_fallback_values(phytium_dp); + if (phytium_dp->train_retry_count < 5) + schedule_work(&phytium_dp->train_retry_work); + else + DRM_ERROR("DP(%d) Link Training Unsuccessful, and stop Training\n", + phytium_dp->port); + +out: + return -1; +} + +static bool phytium_dp_needs_link_retrain(struct phytium_dp_device *phytium_dp) +{ + unsigned char link_status[DP_LINK_STATUS_SIZE]; + int ret = 0; + + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return true; + } + + if ((phytium_dp->link_rate == 0) || (phytium_dp->link_lane_count == 0)) { + DRM_DEBUG_KMS("link_rate(%d) or lane_count(%d) is invalid\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); + return true; + } + + /* Make sure clock is still ok */ + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("Clock recovery check failed\n"); + return true; + } + + if (!drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("Channel EQ check failed\n"); + return true; + } + + if (!phytium_dp_hw_output_is_enable(phytium_dp)) { + DRM_DEBUG_KMS("check DP output enable failed\n"); + return true; + } + return false; +} + +static bool +phytium_dp_get_sink_irq(struct phytium_dp_device *phytium_dp, u8 *sink_irq_vector) +{ + return drm_dp_dpcd_readb(&phytium_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector) == 1; +} + +static uint8_t phytium_dp_autotest_phy_pattern(struct phytium_dp_device *phytium_dp) +{ + union phytium_phy_tp phytium_phy_tp; + int ret; + unsigned char test_80_bit_pattern[ + (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - + DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0}; + unsigned char test_pattern; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_TEST_PHY_PATTERN, + &phytium_phy_tp.raw, + sizeof(phytium_phy_tp)); + if (ret <= 0) { + DRM_DEBUG_KMS("Could not read DP_TEST_PHY_PATTERN\n"); + goto failed; + } + + test_pattern = phytium_phy_tp.bits.PATTERN; + + if (test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) { + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0, + test_80_bit_pattern, + sizeof(test_80_bit_pattern)); + if (ret <= 0) { + DRM_DEBUG_KMS("Could not read DP_TEST_PHY_PATTERN\n"); + goto failed; + } + } + + /* config source and sink's link rate and link count */ + ret = phytium_dp_dpcd_get_tp_link(phytium_dp, &phytium_dp->compliance.test_lane_count, + &phytium_dp->compliance.test_link_rate); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_get_tp_link fail: ret:%d\n", ret); + goto failed; + } + + phytium_dp_hw_set_link(phytium_dp, phytium_dp->compliance.test_lane_count, + phytium_dp->compliance.test_link_rate); + ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->compliance.test_lane_count, + phytium_dp->compliance.test_link_rate); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_link fail: ret:%d\n", ret); + goto failed_dpcd_set_link; + } + + /* config source and sink's lane setting: voltage swing and pre-emphasis */ + ret = phytium_dp_dpcd_get_adjust_request(phytium_dp, + phytium_dp->compliance.test_lane_count); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_get_adjust_request fail: ret:%d\n", ret); + goto failed_dpcd_get_adjust_request; + } + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->compliance.test_link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + goto failed_dpcd_set_lane_setting; + } + + /* config test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->compliance.test_lane_count, + test_pattern, test_80_bit_pattern, + sizeof(test_80_bit_pattern)); + ret = phytium_dp_dpcd_set_test_pattern(phytium_dp, test_pattern); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_test_pattern fail: ret:%d\n", ret); + goto failed_dpcd_set_tp; + } + + return DP_TEST_ACK; + +failed_dpcd_set_tp: + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->compliance.test_lane_count, + PHYTIUM_PHY_TP_NONE, test_80_bit_pattern, + sizeof(test_80_bit_pattern)); +failed_dpcd_set_link: +failed_dpcd_set_lane_setting: +failed_dpcd_get_adjust_request: +failed: + return DP_TEST_NAK; +} + +static void phytium_dp_handle_test_request(struct phytium_dp_device *phytium_dp) +{ + uint8_t response = DP_TEST_NAK; + uint8_t request = 0; + int status; + + status = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_REQUEST, &request); + if (status <= 0) { + DRM_DEBUG_KMS("Could not read test request from sink\n"); + goto update_status; + } + + switch (request) { + case DP_TEST_LINK_TRAINING: + case DP_TEST_LINK_VIDEO_PATTERN: + case DP_TEST_LINK_EDID_READ: + DRM_DEBUG_KMS("Not support test request '%02x'\n", request); + response = DP_TEST_NAK; + break; + case DP_TEST_LINK_PHY_TEST_PATTERN: + DRM_DEBUG_KMS("PHY_PATTERN test requested\n"); + response = phytium_dp_autotest_phy_pattern(phytium_dp); + break; + default: + DRM_DEBUG_KMS("Invalid test request '%02x'\n", request); + break; + } + +update_status: + status = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TEST_RESPONSE, response); + if (status <= 0) + DRM_DEBUG_KMS("Could not write test response to sink\n"); + +} + +static void phytium_dp_unset_edid(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (phytium_dp->detect_edid) + kfree(phytium_dp->detect_edid); + phytium_dp->detect_edid = NULL; + phytium_dp->has_audio = false; +} + +static enum drm_connector_status phytium_dp_set_edid(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + phytium_dp_unset_edid(connector); + phytium_dp->detect_edid = drm_get_edid(connector, &phytium_dp->aux.ddc); + if (!phytium_dp->detect_edid) + return connector_status_disconnected; + + phytium_dp->has_audio = drm_detect_monitor_audio(phytium_dp->detect_edid); + + return connector_status_connected; +} + +static int phytium_dp_long_pulse(struct drm_connector *connector, bool hpd_raw_state) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + enum drm_connector_status status = connector->status; + bool video_enable = false; + uint32_t index = 0; + + if (phytium_dp->is_edp) + status = connector_status_connected; + else if (hpd_raw_state) { + if (!phytium_dp_needs_link_retrain(phytium_dp)) { + status = connector_status_connected; + goto out; + } + } else { + status = connector_status_disconnected; + goto out; + } + + if (!phytium_dp->is_edp) { + status = phytium_dp_detect_dpcd(phytium_dp); + if (status == connector_status_disconnected) + goto out; + + index = phytium_dp->num_common_rates-1; + phytium_dp->max_link_rate = phytium_dp->common_rates[index]; + phytium_dp->max_link_lane_count = phytium_dp->common_max_lane_count; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", + phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); + + video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + phytium_dp_start_link_train(phytium_dp); + + status = phytium_dp_set_edid(connector); + if (status == connector_status_disconnected) + goto out; + + if (video_enable) { + mdelay(2); + phytium_dp_hw_enable_video(phytium_dp); + } + } + +out: + return status; +} + +static int phytium_dp_short_pulse(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + enum drm_connector_status status = connector->status; + u8 sink_irq_vector = 0; + bool video_enable = false; + + /* handle the test pattern */ + if (phytium_dp_get_sink_irq(phytium_dp, &sink_irq_vector) && + sink_irq_vector != 0) { + drm_dp_dpcd_writeb(&phytium_dp->aux, + DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector); + if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) + phytium_dp_handle_test_request(phytium_dp); + if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) + DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); + } + if (!phytium_dp_needs_link_retrain(phytium_dp)) { + status = connector_status_connected; + goto out; + } + + video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + phytium_dp_start_link_train(phytium_dp); + if (video_enable) { + mdelay(2); + phytium_dp_hw_enable_video(phytium_dp); + } + +out: + return status; +} + +void phytium_dp_hpd_poll_handler(struct phytium_display_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + enum drm_connector_status old_status; + bool changed = false; + + mutex_lock(&dev->mode_config.mutex); + DRM_DEBUG_KMS("running encoder hotplug poll functions\n"); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->force) + continue; + old_status = connector->status; + connector->status = drm_helper_probe_detect(connector, NULL, false); + if (old_status != connector->status) { + const char *old, *new; + + old = drm_get_connector_status_name(old_status); + new = drm_get_connector_status_name(connector->status); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, + connector->name, + old, new); + changed = true; + } + } + drm_connector_list_iter_end(&conn_iter); + mutex_unlock(&dev->mode_config.mutex); + + if (changed) + drm_kms_helper_hotplug_event(dev); +} + +void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable) +{ + struct phytium_dp_device *phytium_dp; + struct drm_encoder *encoder; + struct phytium_display_private *priv = dev->dev_private; + bool handler = false; + bool hpd_raw_state_old = false; + + /* We might have missed any hotplugs that happened, so polling and handler */ + if (enable) { + spin_lock(&priv->hotplug_irq_lock); + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (!phytium_dp->dp_hpd_state.hpd_irq_enable) { + hpd_raw_state_old = phytium_dp->dp_hpd_state.hpd_raw_state; + phytium_dp_hw_get_hpd_state(phytium_dp); + if (phytium_dp->dp_hpd_state.hpd_event_state + || phytium_dp->dp_hpd_state.hpd_irq_state + || (hpd_raw_state_old != phytium_dp->dp_hpd_state.hpd_raw_state)) { + handler = true; + } + } + } + spin_unlock(&priv->hotplug_irq_lock); + if (handler) + phytium_dp_hpd_poll_handler(priv); + } + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + phytium_dp_hw_hpd_irq_setup(phytium_dp, enable); + } +} + +void phytium_dp_hpd_work_func(struct work_struct *work) +{ + struct phytium_display_private *priv = + container_of(work, struct phytium_display_private, hotplug_work); + struct drm_device *dev = priv->dev; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + enum drm_connector_status old_status; + bool changed = false; + + mutex_lock(&dev->mode_config.mutex); + DRM_DEBUG_KMS("running encoder hotplug work functions\n"); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->force) + continue; + old_status = connector->status; + connector->status = drm_helper_probe_detect(connector, NULL, false); + if (old_status != connector->status) { + const char *old, *new; + + old = drm_get_connector_status_name(old_status); + new = drm_get_connector_status_name(connector->status); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, + connector->name, + old, new); + changed = true; + } + } + drm_connector_list_iter_end(&conn_iter); + mutex_unlock(&dev->mode_config.mutex); + + if (changed) + drm_kms_helper_hotplug_event(dev); + + phytium_dp_hpd_irq_setup(dev, true); +} + +irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv) +{ + struct drm_encoder *encoder = NULL; + struct phytium_dp_device *phytium_dp = NULL; + struct drm_device *dev = priv->dev; + bool handler = false; + + spin_lock(&priv->hotplug_irq_lock); + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (phytium_dp->dp_hpd_state.hpd_irq_enable) { + phytium_dp_hw_get_hpd_state(phytium_dp); + if (phytium_dp->dp_hpd_state.hpd_event_state + || phytium_dp->dp_hpd_state.hpd_irq_state) { + handler = true; + } + } + } + spin_unlock(&priv->hotplug_irq_lock); + + if (handler) { + phytium_dp_hpd_irq_setup(dev, false); + schedule_work(&priv->hotplug_work); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + + +static void phytium_dp_fast_link_train_detect(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->fast_train_support = !!(phytium_dp->dpcd[DP_MAX_DOWNSPREAD] + & DP_NO_AUX_HANDSHAKE_LINK_TRAINING); + DRM_DEBUG_KMS("fast link training %s\n", + phytium_dp->fast_train_support ? "supported" : "unsupported"); +} + +bool phytium_dp_fast_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + unsigned int training_pattern; + + /* clear the test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->link_lane_count, + PHYTIUM_PHY_TP_NONE, NULL, 0); + + /* config source and sink's link rate and lane count */ + phytium_dp_hw_set_link(phytium_dp, phytium_dp->link_lane_count, phytium_dp->link_rate); + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + + /* config train pattern */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + usleep_range(500, 600); + + training_pattern = phytium_dp_get_training_pattern(phytium_dp); + phytium_dp_hw_set_train_pattern(phytium_dp, training_pattern); + usleep_range(500, 600); + + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + + if (dc_fast_training_check) { + unsigned char link_status[DP_LINK_STATUS_SIZE]; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return false; + } + + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("check clock recovery failed\n"); + return false; + } + + if (!drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("check channel equalization failed\n"); + return false; + } + } + + return true; +} + +static enum drm_connector_status +phytium_connector_detect(struct drm_connector *connector, bool force) +{ + enum drm_connector_status status = connector->status; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + bool hpd_event_state, hpd_irq_state, hpd_raw_state; + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + bool plugged = true; + + spin_lock_irq(&priv->hotplug_irq_lock); + hpd_event_state = phytium_dp->dp_hpd_state.hpd_event_state; + hpd_irq_state = phytium_dp->dp_hpd_state.hpd_irq_state; + hpd_raw_state = phytium_dp->dp_hpd_state.hpd_raw_state; + phytium_dp->dp_hpd_state.hpd_event_state = false; + phytium_dp->dp_hpd_state.hpd_irq_state = false; + spin_unlock_irq(&priv->hotplug_irq_lock); + + if (hpd_event_state) + status = phytium_dp_long_pulse(connector, hpd_raw_state); + + if (hpd_irq_state) + status = phytium_dp_short_pulse(connector); + + if (status == connector_status_unknown) + status = connector_status_disconnected; + + if ((!phytium_dp->is_edp) && (!hpd_raw_state)) + status = connector_status_disconnected; + + if (connector->status != status) { + if ((status == connector_status_connected) && phytium_dp->has_audio) + plugged = true; + else + plugged = false; + + handle_plugged_change(phytium_dp, plugged); + } + + return status; +} + +static void +phytium_connector_destroy(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + drm_connector_cleanup(connector); + kfree(phytium_dp); +} + +static int +phytium_dp_connector_register(struct drm_connector *connector) +{ + int ret; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + phytium_dp_aux_init(phytium_dp); + if (phytium_dp->is_edp) { + phytium_edp_init_connector(phytium_dp); + ret = phytium_edp_backlight_device_register(phytium_dp); + if (ret) + DRM_ERROR("failed to register port(%d) backlight device(ret=%d)\n", + phytium_dp->port, ret); + } + + ret = phytium_debugfs_connector_add(connector); + if (ret) + DRM_ERROR("failed to register phytium connector debugfs(ret=%d)\n", ret); + + return 0; +} + +static void +phytium_dp_connector_unregister(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (phytium_dp->is_edp) { + phytium_edp_backlight_device_unregister(phytium_dp); + phytium_edp_panel_poweroff(phytium_dp); + } + drm_dp_aux_unregister(&phytium_dp->aux); +} + +static const struct drm_connector_funcs phytium_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = phytium_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = phytium_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .late_register = phytium_dp_connector_register, + .early_unregister = phytium_dp_connector_unregister, +}; + +static void phytium_dp_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + struct phytium_dp_device *dp = encoder_to_dp_device(encoder); + + drm_mode_copy(&dp->mode, adjusted); +} + +static void phytium_edp_panel_poweron(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_poweron(&phytium_dp->panel); +} + +static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_poweroff(&phytium_dp->panel); +} + +static void phytium_edp_backlight_on(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_enable_backlight(&phytium_dp->panel); +} + +static void phytium_edp_backlight_off(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_disable_backlight(&phytium_dp->panel); +} + +static void phytium_encoder_disable(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + + if (phytium_dp->is_edp) + phytium_edp_backlight_off(phytium_dp); + + phytium_dp_hw_disable_video(phytium_dp); + + mdelay(50); + + if (phytium_dp->is_edp) + phytium_edp_panel_poweroff(phytium_dp); +} + +void phytium_dp_adjust_link_train_parameter(struct phytium_dp_device *phytium_dp) +{ + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned long link_bw, date_rate = 0, bs_limit, bs_request; + int rate = 0; + + bs_request = phytium_dp->mode.crtc_htotal/(phytium_dp->mode.crtc_clock/1000); + date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; + + for (;;) { + bs_limit = 8192 / (phytium_dp->link_rate/1000); + link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; + rate = 10 * date_rate / link_bw; + DRM_DEBUG_KMS("adjust link rate(%d), lane count(%d)\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); + DRM_DEBUG_KMS("for crtc_clock(%d) bs_request(%ld) bs_limit(%ld) rate(%d)\n", + phytium_dp->mode.crtc_clock, bs_request, bs_limit, rate); + if ((link_dynamic_adjust && (bs_request < bs_limit) && rate < 10) || + ((!link_dynamic_adjust) && (rate < 10))) + break; + phytium_dp_get_link_train_fallback_values(phytium_dp); + } + + DRM_DEBUG_KMS("Try link training at Link Rate = %d, Lane count = %d\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); +} + +static void phytium_encoder_enable(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + int ret = 0; + + phytium_dp_hw_disable_video(phytium_dp); + + if (phytium_dp->is_edp) { + phytium_edp_panel_poweron(phytium_dp); + if (phytium_dp->fast_train_support) + phytium_dp_fast_link_train(phytium_dp); + else + ret = phytium_dp_start_link_train(phytium_dp); + mdelay(2); + phytium_dp_fast_link_train_detect(phytium_dp); + } else { + phytium_dp_adjust_link_train_parameter(phytium_dp); + ret = phytium_dp_start_link_train(phytium_dp); + mdelay(2); + } + + phytium_dp_hw_config_video(phytium_dp); + if (ret == 0) { + phytium_dp_hw_enable_video(phytium_dp); + if (phytium_dp->has_audio) + phytium_dp_hw_enable_audio(phytium_dp); + } + + if (phytium_dp->is_edp) { + phytium_edp_backlight_on(phytium_dp); + } +} + +static const struct drm_encoder_helper_funcs phytium_encoder_helper_funcs = { + .mode_set = phytium_dp_encoder_mode_set, + .disable = phytium_encoder_disable, + .enable = phytium_encoder_enable, +}; + +static const struct drm_encoder_funcs phytium_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static const struct dp_audio_n_m phytium_dp_audio_n_m[] = { + { 32000, 162000, 1024, 10125 }, + { 44100, 162000, 784, 5625 }, + { 48000, 162000, 512, 3375 }, + { 64000, 162000, 2048, 10125 }, + { 88200, 162000, 1568, 5625 }, + { 96000, 162000, 1024, 3375 }, + { 128000, 162000, 4096, 10125 }, + { 176400, 162000, 3136, 5625 }, + { 192000, 162000, 2048, 3375 }, + { 32000, 270000, 1024, 16875 }, + { 44100, 270000, 784, 9375 }, + { 48000, 270000, 512, 5625 }, + { 64000, 270000, 2048, 16875 }, + { 88200, 270000, 1568, 9375 }, + { 96000, 270000, 1024, 5625 }, + { 128000, 270000, 4096, 16875 }, + { 176400, 270000, 3136, 9375 }, + { 192000, 270000, 2048, 5625 }, + { 32000, 540000, 1024, 33750 }, + { 44100, 540000, 784, 18750 }, + { 48000, 540000, 512, 11250 }, + { 64000, 540000, 2048, 33750 }, + { 88200, 540000, 1568, 18750 }, + { 96000, 540000, 1024, 11250 }, + { 128000, 540000, 4096, 33750 }, + { 176400, 540000, 3136, 18750 }, + { 192000, 540000, 2048, 11250 }, + { 32000, 810000, 1024, 50625 }, + { 44100, 810000, 784, 28125 }, + { 48000, 810000, 512, 16875 }, + { 64000, 810000, 2048, 50625 }, + { 88200, 810000, 1568, 28125 }, + { 96000, 810000, 1024, 16875 }, + { 128000, 810000, 4096, 50625 }, + { 176400, 810000, 3136, 28125 }, + { 192000, 810000, 2048, 16875 }, +}; + +static int phytium_dp_audio_get_eld(struct device *dev, void *data, u8 *buf, size_t len) +{ + struct phytium_dp_device *phytium_dp = data; + + memcpy(buf, phytium_dp->connector.eld, min(sizeof(phytium_dp->connector.eld), len)); + + return 0; +} + +static int phytium_dp_audio_digital_mute(struct device *dev, void *data, bool enable) +{ + struct phytium_dp_device *phytium_dp = data; + + phytium_dp_hw_audio_digital_mute(phytium_dp, enable); + + return 0; +} + +const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(phytium_dp_audio_n_m); i++) { + if (sample_rate == phytium_dp_audio_n_m[i].sample_rate + && link_rate == phytium_dp_audio_n_m[i].link_rate) + return &phytium_dp_audio_n_m[i]; + } + + return NULL; +} + +static int phytium_dp_audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct phytium_dp_device *phytium_dp = data; + int ret = 0; + struct audio_info audio_info = { + .sample_width = params->sample_width, + .sample_rate = params->sample_rate, + .channels = params->channels, + }; + + if (daifmt->fmt != HDMI_I2S) { + DRM_ERROR("invalid audio format %d\n", daifmt->fmt); + ret = -EINVAL; + goto failed; + } + + ret = phytium_dp_hw_audio_hw_params(phytium_dp, audio_info); + +failed: + return ret; +} + +static void phytium_dp_audio_shutdown(struct device *dev, void *data) +{ + struct phytium_dp_device *phytium_dp = data; + + phytium_dp_hw_audio_shutdown(phytium_dp); +} + +static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged) +{ + if (phytium_dp->plugged_cb && phytium_dp->codec_dev) + phytium_dp->plugged_cb(phytium_dp->codec_dev, plugged); +} + +static int phytium_dp_audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct phytium_dp_device *phytium_dp = data; + bool plugged; + + phytium_dp->plugged_cb = fn; + phytium_dp->codec_dev = codec_dev; + + if ((phytium_dp->connector.status == connector_status_connected) && phytium_dp->has_audio) + plugged = true; + else + plugged = false; + + handle_plugged_change(phytium_dp, plugged); + return 0; +} + + +static const struct hdmi_codec_ops phytium_audio_codec_ops = { + .hw_params = phytium_dp_audio_hw_params, + .audio_shutdown = phytium_dp_audio_shutdown, + .digital_mute = phytium_dp_audio_digital_mute, + .get_eld = phytium_dp_audio_get_eld, + .hook_plugged_cb = phytium_dp_audio_hook_plugged_cb, +}; + +static int phytium_dp_audio_codec_init(struct phytium_dp_device *phytium_dp) +{ + struct device *dev = phytium_dp->dev->dev; + struct hdmi_codec_pdata codec_data = { + .i2s = 1, + .spdif = 0, + .ops = &phytium_audio_codec_ops, + .max_i2s_channels = 2, + .data = phytium_dp, + }; + + phytium_dp->audio_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, sizeof(codec_data)); + + return PTR_ERR_OR_ZERO(phytium_dp->audio_pdev); +} + +static long phytium_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) +{ + struct phytium_dp_device *phytium_dp = container_of(aux, struct phytium_dp_device, aux); + long ret = 0; + + DRM_DEBUG_KMS("msg->size: 0x%lx\n", msg->size); + + if (WARN_ON(msg->size > 16)) + return -E2BIG; + + switch (msg->request & ~DP_AUX_I2C_MOT) { + case DP_AUX_NATIVE_WRITE: + case DP_AUX_I2C_WRITE: + case DP_AUX_I2C_WRITE_STATUS_UPDATE: + ret = phytium_dp_hw_aux_transfer_write(phytium_dp, msg); + DRM_DEBUG_KMS("aux write reply:0x%x ret:0x%lx\n", msg->reply, ret); + break; + case DP_AUX_NATIVE_READ: + case DP_AUX_I2C_READ: + ret = phytium_dp_hw_aux_transfer_read(phytium_dp, msg); + DRM_DEBUG_KMS("aux read ret:0x%lx\n", ret); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp) +{ + drm_dp_aux_init(&phytium_dp->aux); + phytium_dp->aux.name = kasprintf(GFP_KERNEL, "dp-%d", phytium_dp->port); + phytium_dp->aux.transfer = phytium_dp_aux_transfer; +} + +int phytium_get_encoder_crtc_mask(struct phytium_dp_device *phytium_dp, int port) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int i, mask = 0; + + for_each_pipe_masked(priv, i) { + if (i != port) + mask++; + else + break; + } + + return BIT(mask); +} + +static bool phytium_dp_is_edp(struct phytium_dp_device *phytium_dp, int port) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + + if (priv->info.edp_mask & BIT(port)) + return true; + else + return false; +} + +static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp) +{ + enum drm_connector_status status; + struct drm_connector *connector = &phytium_dp->connector; + + phytium_edp_panel_poweron(phytium_dp); + + status = phytium_dp_detect_dpcd(phytium_dp); + if (status == connector_status_disconnected) + return false; + + status = phytium_dp_set_edid(connector); + if (status == connector_status_disconnected) + return false; + + connector->status = status; + phytium_dp->max_link_rate = phytium_dp->common_rates[phytium_dp->num_common_rates-1]; + phytium_dp->max_link_lane_count = phytium_dp->common_max_lane_count; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", + phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); + + return true; +} + +int phytium_dp_resume(struct drm_device *drm_dev) +{ + struct phytium_dp_device *phytium_dp; + struct drm_encoder *encoder; + int ret = 0; + + drm_for_each_encoder(encoder, drm_dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (phytium_dp->is_edp) { + phytium_edp_backlight_off(phytium_dp); + phytium_edp_panel_poweroff(phytium_dp); + } + ret = phytium_dp_hw_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); + return -EIO; + } + } + + return 0; +} + +int phytium_dp_init(struct drm_device *dev, int port) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_dp_device *phytium_dp = NULL; + int ret, type; + + DRM_DEBUG_KMS("%s: port %d\n", __func__, port); + phytium_dp = kzalloc(sizeof(*phytium_dp), GFP_KERNEL); + if (!phytium_dp) { + ret = -ENOMEM; + goto failed_malloc_dp; + } + + phytium_dp->dev = dev; + phytium_dp->port = port; + phytium_dp_hw_set_source_rate_and_lane_count(phytium_dp); + + if (IS_X100(priv)) { + x100_dp_func_register(phytium_dp); + priv->dp_reg_base[port] = X100_DP_BASE(port); + priv->phy_access_base[port] = X100_PHY_ACCESS_BASE(port); + } + + if (phytium_dp_is_edp(phytium_dp, port)) { + phytium_dp->is_edp = true; + type = DRM_MODE_CONNECTOR_eDP; + phytium_dp_panel_init_backlight_funcs(phytium_dp); + phytium_edp_backlight_off(phytium_dp); + phytium_edp_panel_poweroff(phytium_dp); + } else { + phytium_dp->is_edp = false; + type = DRM_MODE_CONNECTOR_DisplayPort; + } + + ret = phytium_dp_hw_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); + goto failed_init_dp; + } + + ret = drm_encoder_init(dev, &phytium_dp->encoder, + &phytium_encoder_funcs, + DRM_MODE_ENCODER_TMDS, "DP %d", port); + if (ret) { + DRM_ERROR("failed to initialize encoder with drm\n"); + goto failed_encoder_init; + } + drm_encoder_helper_add(&phytium_dp->encoder, &phytium_encoder_helper_funcs); + phytium_dp->encoder.possible_crtcs = phytium_get_encoder_crtc_mask(phytium_dp, port); + + phytium_dp->connector.dpms = DRM_MODE_DPMS_OFF; + phytium_dp->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + ret = drm_connector_init(dev, &phytium_dp->connector, &phytium_connector_funcs, + type); + if (ret) { + DRM_ERROR("failed to initialize connector with drm\n"); + goto failed_connector_init; + } + drm_connector_helper_add(&phytium_dp->connector, &phytium_connector_helper_funcs); + drm_connector_attach_encoder(&phytium_dp->connector, &phytium_dp->encoder); + + ret = phytium_dp_audio_codec_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize audio codec\n"); + goto failed_connector_init; + } + + phytium_dp->train_retry_count = 0; + INIT_WORK(&phytium_dp->train_retry_work, phytium_dp_train_retry_work_fn); + drm_connector_register(&phytium_dp->connector); + + return 0; +failed_connector_init: +failed_encoder_init: +failed_init_dp: + kfree(phytium_dp); +failed_malloc_dp: + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_dp.h b/drivers/gpu/drm/phytium/phytium_dp.h new file mode 100644 index 000000000000..e1cf6c8483ad --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_dp.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DP_H__ +#define __PHYTIUM_DP_H__ + +#include +#include +#include + +struct phytium_dp_device; + +#include "phytium_panel.h" + +struct audio_info { + int sample_rate; + int channels; + int sample_width; +}; + +struct dp_audio_n_m { + int sample_rate; + int link_rate; + u16 m; + u16 n; +}; + +struct phytium_dp_compliance { + unsigned long test_type; + uint32_t test_link_rate; + u8 test_lane_count; + bool test_active; + u8 reserve[2]; +}; + +struct phytium_dp_func { + int (*dp_hw_reset)(struct phytium_dp_device *phytium_dp); + bool (*dp_hw_spread_is_enable)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_set_backlight)(struct phytium_dp_device *phytium_dp, uint32_t level); + uint32_t (*dp_hw_get_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_disable_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_enable_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_poweroff_panel)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_poweron_panel)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_init_phy)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_set_phy_lane_setting)(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, uint8_t train_set); + int (*dp_hw_set_phy_lane_and_rate)(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, + uint32_t link_rate); +}; + +struct phytium_dp_hpd_state { + bool hpd_event_state; + bool hpd_irq_state; + bool hpd_raw_state; + bool hpd_irq_enable; +}; + +struct phytium_dp_device { + struct drm_device *dev; + struct drm_encoder encoder; + struct drm_connector connector; + int port; + struct drm_display_mode mode; + bool link_trained; + bool detect_done; + bool is_edp; + bool reserve0; + struct drm_dp_aux aux; + unsigned char dpcd[DP_RECEIVER_CAP_SIZE]; + uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; + unsigned char downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; + unsigned char sink_count; + + int *source_rates; + int num_source_rates; + int sink_rates[DP_MAX_SUPPORTED_RATES]; + int num_sink_rates; + int common_rates[DP_MAX_SUPPORTED_RATES]; + int num_common_rates; + + int source_max_lane_count; + int sink_max_lane_count; + int common_max_lane_count; + + int max_link_rate; + int max_link_lane_count; + int link_rate; + int link_lane_count; + struct work_struct train_retry_work; + int train_retry_count; + uint32_t trigger_train_fail; + + unsigned char train_set[4]; + struct edid *detect_edid; + bool has_audio; + bool fast_train_support; + bool hw_spread_enable; + bool reserve[1]; + struct platform_device *audio_pdev; + struct audio_info audio_info; + hdmi_codec_plugged_cb plugged_cb; + struct device *codec_dev; + struct phytium_dp_compliance compliance; + struct phytium_dp_func *funcs; + struct phytium_dp_hpd_state dp_hpd_state; + + struct phytium_panel panel; + struct drm_display_mode native_mode; +}; + +union phytium_phy_tp { + struct { + /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1 + * and 3 bits for DP1.2. + */ + uint8_t PATTERN :3; + uint8_t RESERVED :5; + } bits; + uint8_t raw; +}; + +/* PHY test patterns + * The order of test patterns follows DPCD register PHY_TEST_PATTERN (0x248) + */ +enum phytium_dpcd_phy_tp { + PHYTIUM_PHY_TP_NONE = 0, + PHYTIUM_PHY_TP_D10_2, + PHYTIUM_PHY_TP_SYMBOL_ERROR, + PHYTIUM_PHY_TP_PRBS7, + PHYTIUM_PHY_TP_80BIT_CUSTOM, + PHYTIUM_PHY_TP_CP2520_1, + PHYTIUM_PHY_TP_CP2520_2, + PHYTIUM_PHY_TP_CP2520_3, +}; +#define encoder_to_dp_device(x) container_of(x, struct phytium_dp_device, encoder) +#define connector_to_dp_device(x) container_of(x, struct phytium_dp_device, connector) +#define panel_to_dp_device(x) container_of(x, struct phytium_dp_device, panel) +#define train_retry_to_dp_device(x) container_of(x, struct phytium_dp_device, train_retry_work) +void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data); +uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address); + +int phytium_dp_init(struct drm_device *dev, int pipe); +int phytium_dp_resume(struct drm_device *drm_dev); +void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable); +irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv); +void phytium_dp_hpd_work_func(struct work_struct *work); +const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate); +#endif /* __PHYTIUM_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_fb.c b/drivers/gpu/drm/phytium/phytium_fb.c new file mode 100644 index 000000000000..fecca2cd3b8c --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fb.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include "phytium_display_drv.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +static int +phytium_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, + unsigned int *handle) +{ + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + + return drm_gem_handle_create(file_priv, &phytium_fb->phytium_gem_obj[0]->base, handle); +} + +static void phytium_fb_destroy(struct drm_framebuffer *fb) +{ + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + int i, num_planes; + struct drm_gem_object *obj = NULL; + + num_planes = drm_format_num_planes(fb->format->format); + + for (i = 0; i < num_planes; i++) { + obj = &phytium_fb->phytium_gem_obj[i]->base; + if (obj) + drm_gem_object_unreference_unlocked(obj); + } + + drm_framebuffer_cleanup(fb); + kfree(phytium_fb); +} + +static struct drm_framebuffer_funcs viv_fb_funcs = { + .create_handle = phytium_fb_create_handle, + .destroy = phytium_fb_destroy, +}; + +struct phytium_framebuffer * +phytium_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, + struct phytium_gem_object **phytium_gem_obj, unsigned int num_planes) +{ + struct phytium_framebuffer *phytium_fb; + int ret = 0, i; + + phytium_fb = kzalloc(sizeof(*phytium_fb), GFP_KERNEL); + if (!phytium_fb) + return ERR_PTR(-ENOMEM); + + drm_helper_mode_fill_fb_struct(dev, &phytium_fb->base, mode_cmd); + + ret = drm_framebuffer_init(dev, &phytium_fb->base, &viv_fb_funcs); + + if (ret) { + DRM_ERROR("Failed to initialize framebuffer: %d\n", ret); + kfree(phytium_fb); + return ERR_PTR(ret); + } + + for (i = 0; i < num_planes; i++) + phytium_fb->phytium_gem_obj[i] = phytium_gem_obj[i]; + + return phytium_fb; +} + +struct drm_framebuffer * +phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + int ret = 0, i, num_planes; + struct drm_gem_object *obj; + unsigned int hsub, vsub, size; + struct phytium_gem_object *phytium_gem_obj[PHYTIUM_FORMAT_MAX_PLANE] = {0}; + struct phytium_framebuffer *phytium_fb; + struct phytium_display_private *priv = dev->dev_private; + + hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); + vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); + num_planes = min(drm_format_num_planes(mode_cmd->pixel_format), PHYTIUM_FORMAT_MAX_PLANE); + for (i = 0; i < num_planes; i++) { + unsigned int height = mode_cmd->height / (i ? vsub : 1); + + size = height * mode_cmd->pitches[i] + mode_cmd->offsets[i]; + obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); + if (!obj) { + DRM_ERROR("Failed to lookup GEM object\n"); + ret = -ENXIO; + goto error; + } + + if (obj->size < size) { + drm_gem_object_unreference_unlocked(obj); + ret = -EINVAL; + goto error; + } + + phytium_gem_obj[i] = to_phytium_gem_obj(obj); + + ret = priv->dc_hw_fb_format_check(mode_cmd, i); + if (ret < 0) + goto error; + } + + phytium_fb = phytium_fb_alloc(dev, mode_cmd, phytium_gem_obj, i); + if (IS_ERR(phytium_fb)) { + DRM_DEBUG_KMS("phytium_fb_alloc failed\n"); + ret = PTR_ERR(phytium_fb); + goto error; + } + + return &phytium_fb->base; +error: + for (i--; i >= 0; i--) + drm_gem_object_unreference_unlocked(&phytium_gem_obj[i]->base); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/phytium/phytium_fb.h b/drivers/gpu/drm/phytium/phytium_fb.h new file mode 100644 index 000000000000..c11c6c009b13 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fb.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_FB_H__ +#define __PHYTIUM_FB_H__ + +struct phytium_framebuffer { + struct drm_framebuffer base; + struct phytium_gem_object *phytium_gem_obj[PHYTIUM_FORMAT_MAX_PLANE]; +}; + +#define to_phytium_framebuffer(fb) container_of(fb, struct phytium_framebuffer, base) + +struct phytium_framebuffer *phytium_fb_alloc(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct phytium_gem_object **phytium_gem_obj, + unsigned int num_planes); + +struct drm_framebuffer *phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd); +#endif /* __PHYTIUM_FB_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.c b/drivers/gpu/drm/phytium/phytium_fbdev.c new file mode 100644 index 000000000000..8eb16b3d7c70 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fbdev.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_gem.h" +#include "phytium_fb.h" + + +#define PHYTIUM_MAX_CONNECTOR 1 +#define helper_to_drm_private(x) container_of(x, struct phytium_display_private, fbdev_helper) + +static int phytium_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + struct drm_fb_helper *helper = info->par; + struct phytium_display_private *priv = helper_to_drm_private(helper); + + return phytium_gem_mmap_obj(&priv->fbdev_phytium_gem->base, vma); +} + +static struct fb_ops phytium_fbdev_ops = { + .owner = THIS_MODULE, + DRM_FB_HELPER_DEFAULT_OPS, + .fb_mmap = phytium_fbdev_mmap, + .fb_fillrect = drm_fb_helper_cfb_fillrect, + .fb_copyarea = drm_fb_helper_cfb_copyarea, + .fb_imageblit = drm_fb_helper_cfb_imageblit, +}; + +static int +phytium_drm_fbdev_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) +{ + struct phytium_display_private *priv = helper_to_drm_private(helper); + struct drm_device *dev = helper->dev; + unsigned int bytes_per_pixel; + struct drm_mode_fb_cmd2 mode_cmd = {0}; + struct phytium_framebuffer *phytium_fb = NULL; + struct fb_info *fbi = NULL; + struct drm_framebuffer *fb = NULL; + size_t size = 0; + int ret = 0; + unsigned long offset; + + bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + mode_cmd.pitches[0] = ALIGN(sizes->surface_width * bytes_per_pixel, 128); + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); + size = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height); + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret < 0) { + DRM_ERROR("failed to get mutex lock\n"); + return ret; + } + + priv->fbdev_phytium_gem = phytium_gem_create_object(dev, size); + if (!priv->fbdev_phytium_gem) { + DRM_ERROR("failed to create gem object\n"); + return -ENOMEM; + } + mutex_unlock(&dev->struct_mutex); + + fbi = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(fbi)) { + DRM_DEV_ERROR(dev->dev, "Failed to create framebuffer info."); + ret = PTR_ERR(fbi); + goto out; + } + + phytium_fb = phytium_fb_alloc(dev, &mode_cmd, &priv->fbdev_phytium_gem, 1); + if (IS_ERR(phytium_fb)) { + DRM_DEV_ERROR(dev->dev, "Failed to alloc DRM framebuffer.\n"); + ret = PTR_ERR(phytium_fb); + goto out; + } + + helper->fb = &(phytium_fb->base); + fbi->par = helper; + fbi->flags = FBINFO_FLAG_DEFAULT; + fbi->fbops = &phytium_fbdev_ops; + + fb = helper->fb; + drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth); + drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); + + offset = fbi->var.xoffset * bytes_per_pixel; + offset += fbi->var.yoffset * fb->pitches[0]; + dev->mode_config.fb_base = 0; + fbi->screen_base = priv->fbdev_phytium_gem->vaddr + offset; + fbi->screen_size = priv->fbdev_phytium_gem->base.size; + fbi->fix.smem_len = priv->fbdev_phytium_gem->base.size; + DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%pa offset=%ld size=%zu\n", fb->width, fb->height, + fb->format->depth, &priv->fbdev_phytium_gem->iova, offset, size); + fbi->skip_vt_switch = true; + + return 0; +out: + phytium_gem_free_object(&priv->fbdev_phytium_gem->base); + return ret; +} + +static const struct drm_fb_helper_funcs phytium_drm_fb_helper_funcs = { + .fb_probe = phytium_drm_fbdev_create, +}; + +int phytium_drm_fbdev_init(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + struct drm_fb_helper *helper; + int ret; + + if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) + return -EINVAL; + + helper = &priv->fbdev_helper; + drm_fb_helper_prepare(dev, helper, &phytium_drm_fb_helper_funcs); + + ret = drm_fb_helper_init(dev, helper, PHYTIUM_MAX_CONNECTOR); + if (ret < 0) { + DRM_DEV_ERROR(dev->dev, "Failed to initialize drm fb helper -ret %d\n", ret); + return ret; + } + + ret = drm_fb_helper_single_add_all_connectors(helper); + if (ret < 0) { + DRM_DEV_ERROR(dev->dev, "Failed to add connectors - %d/\n", ret); + goto err_drm_fb_helper_fini; + } + + ret = drm_fb_helper_initial_config(helper, 32); + return 0; + +err_drm_fb_helper_fini: + drm_fb_helper_fini(helper); + return ret; +} + +void phytium_drm_fbdev_fini(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + struct drm_fb_helper *helper; + + helper = &priv->fbdev_helper; + drm_fb_helper_unregister_fbi(helper); + + if (helper->fb) + drm_framebuffer_put(helper->fb); + + drm_fb_helper_fini(helper); +} diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.h b/drivers/gpu/drm/phytium/phytium_fbdev.h new file mode 100644 index 000000000000..d291d82c2706 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fbdev.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef _PHYTIUM_FBDEV_H +#define _PHYTIUM_FBDEV_H + +int phytium_drm_fbdev_init(struct drm_device *dev); +void phytium_drm_fbdev_fini(struct drm_device *dev); + +#endif /* _PHYTIUM_FBDEV_H */ diff --git a/drivers/gpu/drm/phytium/phytium_gem.c b/drivers/gpu/drm/phytium/phytium_gem.c new file mode 100644 index 000000000000..bd0b85e64bbc --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_gem.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_gem.h" + +struct sg_table * +phytium_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + struct sg_table *sgt; + struct drm_device *dev = obj->dev; + int ret; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + DRM_DEBUG_KMS("malloc sgt fail\n"); + return ERR_PTR(-ENOMEM); + } + + ret = dma_get_sgtable_attrs(dev->dev, sgt, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, phytium_gem_obj->size, + DMA_ATTR_WRITE_COMBINE); + if (ret) { + DRM_ERROR("failed to allocate sgt, %d\n", ret); + kfree(sgt); + return ERR_PTR(ret); + } + + return sgt; +} + +struct drm_gem_object * +phytium_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + struct phytium_gem_object *phytium_gem_obj = NULL; + struct scatterlist *s; + dma_addr_t expected; + int ret, i; + + phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); + if (!phytium_gem_obj) { + DRM_ERROR("failed to allocate phytium_gem_obj\n"); + ret = -ENOMEM; + goto failed_malloc; + } + + ret = drm_gem_object_init(dev, &phytium_gem_obj->base, attach->dmabuf->size); + if (ret) { + DRM_ERROR("failed to initialize drm gem object: %d\n", ret); + goto failed_object_init; + } + + expected = sg_dma_address(sgt->sgl); + for_each_sg(sgt->sgl, s, sgt->nents, i) { + if (sg_dma_address(s) != expected) { + DRM_ERROR("sg_table is not contiguous"); + ret = -EINVAL; + goto failed_check_continue; + } + expected = sg_dma_address(s) + sg_dma_len(s); + } + + phytium_gem_obj->iova = sg_dma_address(sgt->sgl); + phytium_gem_obj->sgt = sgt; + + return &phytium_gem_obj->base; +failed_check_continue: + drm_gem_object_release(&phytium_gem_obj->base); +failed_object_init: + kfree(phytium_gem_obj); +failed_malloc: + return ERR_PTR(ret); +} + +void *phytium_gem_prime_vmap(struct drm_gem_object *obj) +{ + struct phytium_gem_object *phytium_obj = to_phytium_gem_obj(obj); + + return phytium_obj->vaddr; +} + +void phytium_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ +} + +int phytium_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + int ret = 0; + + ret = drm_gem_mmap_obj(obj, obj->size, vma); + if (ret < 0) + return ret; + + return phytium_gem_mmap_obj(obj, vma); +} + +int phytium_gem_suspend(struct drm_device *drm_dev) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct phytium_gem_object *phytium_gem_obj = NULL; + + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (!phytium_gem_obj->is_vram) + continue; + + phytium_gem_obj->vaddr_save = vmalloc(phytium_gem_obj->size); + if (!phytium_gem_obj->vaddr_save) + goto malloc_failed; + + memcpy(phytium_gem_obj->vaddr_save, phytium_gem_obj->vaddr, phytium_gem_obj->size); + } + + return 0; +malloc_failed: + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (!phytium_gem_obj->is_vram) + continue; + + if (phytium_gem_obj->vaddr_save) { + vfree(phytium_gem_obj->vaddr_save); + phytium_gem_obj->vaddr_save = NULL; + } + } + return -ENOMEM; +} + +void phytium_gem_resume(struct drm_device *drm_dev) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct phytium_gem_object *phytium_gem_obj = NULL; + + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (!phytium_gem_obj->is_vram) + continue; + + memcpy(phytium_gem_obj->vaddr, phytium_gem_obj->vaddr_save, phytium_gem_obj->size); + vfree(phytium_gem_obj->vaddr_save); + phytium_gem_obj->vaddr_save = NULL; + } +} + +void phytium_gem_free_object(struct drm_gem_object *obj) +{ + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + struct drm_device *dev = obj->dev; + + DRM_DEBUG_KMS("free phytium_gem_obj iova:0x%pa size:0x%lx\n", + &phytium_gem_obj->iova, phytium_gem_obj->size); + if (phytium_gem_obj->vaddr) { + dma_free_attrs(dev->dev, phytium_gem_obj->size, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, 0); + list_del(&phytium_gem_obj->list); + } + else if (obj->import_attach) + drm_prime_gem_destroy(obj, phytium_gem_obj->sgt); + drm_gem_object_release(obj); + kfree(phytium_gem_obj); +} + +int phytium_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + int ret = 0; + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + + /* + * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the + * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map + * the whole buffer. + */ + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_pgoff = 0; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + + if (phytium_gem_obj->is_vram) + ret = dma_mmap_attrs(obj->dev->dev, vma, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, vma->vm_end - vma->vm_start, + DMA_ATTR_WRITE_COMBINE); + else + ret = dma_mmap_attrs(obj->dev->dev, vma, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, vma->vm_end - vma->vm_start, 0); + if (ret) + drm_gem_vm_close(vma); + + return ret; +} + +int phytium_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int ret = 0; + + ret = drm_gem_mmap(filp, vma); + if (ret < 0) + return ret; + + return phytium_gem_mmap_obj(vma->vm_private_data, vma); +} + +int phytium_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, uint32_t handle) +{ + return drm_gem_dumb_destroy(file, dev, handle); +} + +struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, unsigned long size) +{ + struct phytium_gem_object *phytium_gem_obj = NULL; + struct phytium_display_private *priv = dev->dev_private; + int ret = 0; + + phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); + if (!phytium_gem_obj) { + DRM_ERROR("failed to allocate phytium_gem_obj\n"); + ret = -ENOMEM; + goto error; + } + + ret = drm_gem_object_init(dev, &phytium_gem_obj->base, size); + if (ret) { + DRM_ERROR("failed to initialize drm gem object: %d\n", ret); + goto failed_object_init; + } + + phytium_gem_obj->vaddr = dma_alloc_attrs(dev->dev, size, &phytium_gem_obj->iova, + GFP_KERNEL, 0); + if (!phytium_gem_obj->vaddr) { + DRM_ERROR("fail to allocate buffer with size %lx\n", size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + + phytium_gem_obj->size = size; + phytium_gem_obj->is_vram = priv->vram_support; + list_add_tail(&phytium_gem_obj->list, &priv->gem_list_head); + DRM_DEBUG_KMS("phytium_gem_obj iova:0x%pa size:0x%lx\n", + &phytium_gem_obj->iova, phytium_gem_obj->size); + return phytium_gem_obj; +failed_dma_alloc: + drm_gem_object_unreference_unlocked(&phytium_gem_obj->base); + return ERR_PTR(ret); +failed_object_init: + kfree(phytium_gem_obj); +error: + return ERR_PTR(ret); +} + +int phytium_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + int size = 0; + struct phytium_gem_object *phytium_gem_obj = NULL; + int ret = 0; + + args->pitch = ALIGN(args->width*DIV_ROUND_UP(args->bpp, 8), 128); + args->size = args->pitch * args->height; + size = PAGE_ALIGN(args->size); + phytium_gem_obj = phytium_gem_create_object(dev, size); + if (IS_ERR(phytium_gem_obj)) + return PTR_ERR(phytium_gem_obj); + ret = drm_gem_handle_create(file, &phytium_gem_obj->base, &args->handle); + if (ret) { + DRM_ERROR("failed to drm_gem_handle_create\n"); + goto failed_gem_handle; + } + drm_gem_object_unreference_unlocked(&phytium_gem_obj->base); + + return 0; +failed_gem_handle: + phytium_gem_free_object(&phytium_gem_obj->base); + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_gem.h b/drivers/gpu/drm/phytium/phytium_gem.h new file mode 100644 index 000000000000..b1d6b54ebf2f --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_gem.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_GEM_H__ +#define __PHYTIUM_GEM_H__ + +#include + +struct phytium_gem_object { + struct drm_gem_object base; + dma_addr_t iova; + void *vaddr; + unsigned long size; + struct sg_table *sgt; + bool is_vram; + bool reserve[3]; + struct list_head list; + void *vaddr_save; +}; + +#define to_phytium_gem_obj(obj) container_of(obj, struct phytium_gem_object, base) + +int phytium_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma); +int phytium_gem_mmap(struct file *filp, struct vm_area_struct *vma); +void phytium_gem_free_object(struct drm_gem_object *obj); +struct sg_table *phytium_gem_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object *phytium_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, struct sg_table *sgt); +void phytium_gem_free_object(struct drm_gem_object *obj); +int phytium_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, unsigned int handle); +struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, unsigned long size); +int phytium_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); +void *phytium_gem_prime_vmap(struct drm_gem_object *obj); +void phytium_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int phytium_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); +int phytium_gem_suspend(struct drm_device *drm_dev); +void phytium_gem_resume(struct drm_device *drm_dev); +#endif /* __PHYTIUM_GEM_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_panel.c b/drivers/gpu/drm/phytium/phytium_panel.c new file mode 100644 index 000000000000..ed16ed15197d --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_panel.c @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_panel.h" + +static int +phytium_dp_aux_set_backlight(struct phytium_panel *panel, unsigned int level) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + unsigned char vals[2] = { 0x0 }; + + vals[0] = level; + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) { + vals[0] = (level & 0xFF00) >> 8; + vals[1] = (level & 0xFF); + } + + if (drm_dp_dpcd_write(&phytium_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + vals, sizeof(vals)) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight level\n"); + return -EIO; + } + + return 0; +} + +static unsigned int phytium_dp_aux_get_backlight(struct phytium_panel *panel) +{ + unsigned char read_val[2] = { 0x0 }; + unsigned char level = 0; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (drm_dp_dpcd_read(&phytium_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + &read_val, sizeof(read_val)) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_BRIGHTNESS_MSB); + return 0; + } + + level = read_val[0]; + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + level = (read_val[0] << 8 | read_val[1]); + + return level; +} + +static void set_aux_backlight_enable(struct phytium_panel *panel, bool enable) +{ + u8 reg_val = 0; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (!(phytium_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP)) + return; + + if (drm_dp_dpcd_readb(&phytium_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + ®_val) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_DISPLAY_CONTROL_REGISTER); + return; + } + + if (enable) + reg_val |= DP_EDP_BACKLIGHT_ENABLE; + else + reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE); + + if (drm_dp_dpcd_writeb(&phytium_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + reg_val) != 1) { + DRM_DEBUG_KMS("Failed to %s aux backlight\n", + enable ? "enable" : "disable"); + } +} + +static void phytium_dp_aux_enable_backlight(struct phytium_panel *panel) +{ + unsigned char dpcd_buf, new_dpcd_buf, edp_backlight_mode; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (drm_dp_dpcd_readb(&phytium_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_MODE_SET_REGISTER); + return; + } + + new_dpcd_buf = dpcd_buf; + edp_backlight_mode = dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + + switch (edp_backlight_mode) { + case DP_EDP_BACKLIGHT_CONTROL_MODE_PWM: + case DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET: + case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT: + new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; + break; + + /* Do nothing when it is already DPCD mode */ + case DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD: + default: + break; + } + + if (new_dpcd_buf != dpcd_buf) { + if (drm_dp_dpcd_writeb(&phytium_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight mode\n"); + } + } + + set_aux_backlight_enable(panel, true); + phytium_dp_aux_set_backlight(panel, panel->level); +} + +static void phytium_dp_aux_disable_backlight(struct phytium_panel *panel) +{ + set_aux_backlight_enable(panel, false); +} + +static void phytium_dp_aux_setup_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + phytium_dp->panel.max = 0xFFFF; + else + phytium_dp->panel.max = 0xFF; + + phytium_dp->panel.min = 0; + phytium_dp->panel.level = phytium_dp_aux_get_backlight(panel); + phytium_dp->panel.backlight_enabled = (phytium_dp->panel.level != 0); +} + +static void phytium_dp_hw_poweron_panel(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_poweron_panel(phytium_dp); +} + +static void phytium_dp_hw_poweroff_panel(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_poweroff_panel(phytium_dp); +} + +static int +phytium_dp_hw_set_backlight(struct phytium_panel *panel, uint32_t level) +{ + int ret; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + ret = phytium_dp->funcs->dp_hw_set_backlight(phytium_dp, level); + + return ret; +} + +static uint32_t phytium_dp_hw_get_backlight(struct phytium_panel *panel) +{ + uint32_t ret; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + ret = phytium_dp->funcs->dp_hw_get_backlight(phytium_dp); + + return ret; +} + +static void phytium_dp_hw_enable_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_set_backlight(phytium_dp, phytium_dp->panel.level); + phytium_dp->funcs->dp_hw_enable_backlight(phytium_dp); +} + +static void phytium_dp_hw_disable_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_disable_backlight(phytium_dp); +} + +static void phytium_dp_hw_setup_backlight(struct phytium_panel *panel) +{ + struct drm_device *dev = panel->dev; + struct phytium_display_private *priv = dev->dev_private; + + panel->max = priv->info.backlight_max; + panel->min = 0; + panel->level = phytium_dp_hw_get_backlight(panel); +} + +void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp) +{ + if (phytium_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && + (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) && + !(phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) { + DRM_DEBUG_KMS("AUX Backlight Control Supported!\n"); + phytium_dp->panel.setup_backlight = phytium_dp_aux_setup_backlight; + phytium_dp->panel.enable_backlight = phytium_dp_aux_enable_backlight; + phytium_dp->panel.disable_backlight = phytium_dp_aux_disable_backlight; + phytium_dp->panel.set_backlight = phytium_dp_aux_set_backlight; + phytium_dp->panel.get_backlight = phytium_dp_aux_get_backlight; + } else { + DRM_DEBUG_KMS("SE Backlight Control Supported!\n"); + phytium_dp->panel.setup_backlight = phytium_dp_hw_setup_backlight; + phytium_dp->panel.enable_backlight = phytium_dp_hw_enable_backlight; + phytium_dp->panel.disable_backlight = phytium_dp_hw_disable_backlight; + phytium_dp->panel.set_backlight = phytium_dp_hw_set_backlight; + phytium_dp->panel.get_backlight = phytium_dp_hw_get_backlight; + } + phytium_dp->panel.poweron = phytium_dp_hw_poweron_panel; + phytium_dp->panel.poweroff = phytium_dp_hw_poweroff_panel; + mutex_init(&phytium_dp->panel.panel_lock); + phytium_dp->panel.dev = phytium_dp->dev; + + /* Upper limits from eDP 1.3 spec */ + phytium_dp->panel.panel_power_up_delay = 210; /* t1_t3 */ + phytium_dp->panel.backlight_on_delay = 50; /* t7 */ + phytium_dp->panel.backlight_off_delay = 50; + phytium_dp->panel.panel_power_down_delay = 500; /* t10 */ + phytium_dp->panel.panel_power_cycle_delay = 510; /* t11 + t12 */ +} + +void phytium_dp_panel_release_backlight_funcs(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->panel.setup_backlight = NULL; + phytium_dp->panel.enable_backlight = NULL; + phytium_dp->panel.disable_backlight = NULL; + phytium_dp->panel.set_backlight = NULL; + phytium_dp->panel.get_backlight = NULL; + phytium_dp->panel.poweron = NULL; + phytium_dp->panel.poweroff = NULL; +} + +void phytium_panel_enable_backlight(struct phytium_panel *panel) +{ + + if (panel->enable_backlight) { + mutex_lock(&panel->panel_lock); + msleep(panel->backlight_on_delay); + panel->enable_backlight(panel); + panel->backlight_enabled = true; + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_disable_backlight(struct phytium_panel *panel) +{ + if (panel->disable_backlight) { + mutex_lock(&panel->panel_lock); + panel->disable_backlight(panel); + panel->backlight_enabled = false; + msleep(panel->backlight_off_delay); + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_poweron(struct phytium_panel *panel) +{ + if (panel->poweron) { + mutex_lock(&panel->panel_lock); + panel->poweron(panel); + panel->power_enabled = true; + msleep(panel->panel_power_up_delay); + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_poweroff(struct phytium_panel *panel) +{ + if (panel->poweroff) { + mutex_lock(&panel->panel_lock); + msleep(panel->panel_power_down_delay); + panel->poweroff(panel); + panel->power_enabled = false; + mutex_unlock(&panel->panel_lock); + } +} + +static uint32_t phytium_scale(uint32_t source_val, + uint32_t source_min, uint32_t source_max, + uint32_t target_min, uint32_t target_max) +{ + uint64_t target_val; + + WARN_ON(source_min > source_max); + WARN_ON(target_min > target_max); + + /* defensive */ + source_val = clamp(source_val, source_min, source_max); + + /* avoid overflows */ + target_val = mul_u32_u32(source_val - source_min, target_max - target_min); + target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min); + target_val += target_min; + + return target_val; +} + +static inline uint32_t +phytium_scale_hw_to_user(struct phytium_panel *panel, uint32_t hw_level, uint32_t user_max) +{ + return phytium_scale(hw_level, panel->min, panel->max, + 0, user_max); +} + +static inline uint32_t +phytium_scale_user_to_hw(struct phytium_panel *panel, u32 user_level, u32 user_max) +{ + return phytium_scale(user_level, 0, user_max, + panel->min, panel->max); +} + +static int phytium_backlight_device_update_status(struct backlight_device *bd) +{ + struct phytium_panel *panel = bl_get_data(bd); + struct drm_device *dev = panel->dev; + uint32_t hw_level = 0; + int ret = 0; + + DRM_DEBUG_KMS("updating phytium_backlight, brightness=%d/%d\n", + bd->props.brightness, bd->props.max_brightness); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + hw_level = phytium_scale_user_to_hw(panel, bd->props.brightness, bd->props.max_brightness); + + if ((panel->set_backlight) && (panel->backlight_enabled)) { + mutex_lock(&panel->panel_lock); + ret = panel->set_backlight(panel, hw_level); + panel->level = hw_level; + mutex_unlock(&panel->panel_lock); + } + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + return ret; +} + +static int phytium_backlight_device_get_brightness(struct backlight_device *bd) +{ + struct phytium_panel *panel = bl_get_data(bd); + struct drm_device *dev = panel->dev; + uint32_t hw_level = 0; + int ret; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + if (panel->get_backlight && panel->backlight_enabled) { + mutex_lock(&panel->panel_lock); + hw_level = panel->get_backlight(panel); + panel->level = hw_level; + mutex_unlock(&panel->panel_lock); + } + drm_modeset_unlock(&dev->mode_config.connection_mutex); + ret = phytium_scale_hw_to_user(panel, hw_level, bd->props.max_brightness); + DRM_DEBUG_KMS("get phytium_backlight, brightness=%d/%d\n", + ret, bd->props.max_brightness); + + return ret; +} + +static const struct backlight_ops phytium_backlight_device_ops = { + .update_status = phytium_backlight_device_update_status, + .get_brightness = phytium_backlight_device_get_brightness, +}; + +int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp) +{ + struct backlight_properties props; + char bl_name[16]; + + if (phytium_dp->panel.setup_backlight) { + mutex_lock(&phytium_dp->panel.panel_lock); + phytium_dp->panel.setup_backlight(&phytium_dp->panel); + mutex_unlock(&phytium_dp->panel.panel_lock); + } else { + return -EINVAL; + } + + memset(&props, 0, sizeof(props)); + props.max_brightness = PHYTIUM_MAX_BL_LEVEL; + props.type = BACKLIGHT_RAW; + props.brightness = phytium_scale_hw_to_user(&phytium_dp->panel, phytium_dp->panel.level, + props.max_brightness); + snprintf(bl_name, sizeof(bl_name), "phytium_bl%d", phytium_dp->port); + + phytium_dp->panel.bl_device = + backlight_device_register(bl_name, + phytium_dp->connector.kdev, + &phytium_dp->panel, + &phytium_backlight_device_ops, + &props); + + if (IS_ERR(phytium_dp->panel.bl_device)) { + DRM_ERROR("Failed to register backlight: %ld\n", + PTR_ERR(phytium_dp->panel.bl_device)); + phytium_dp->panel.bl_device = NULL; + return -ENODEV; + } + + DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n", + phytium_dp->connector.name); + + return 0; +} + +void phytium_edp_backlight_device_unregister(struct phytium_dp_device *phytium_dp) +{ + if (phytium_dp->panel.bl_device) { + backlight_device_unregister(phytium_dp->panel.bl_device); + phytium_dp->panel.bl_device = NULL; + } +} diff --git a/drivers/gpu/drm/phytium/phytium_panel.h b/drivers/gpu/drm/phytium/phytium_panel.h new file mode 100644 index 000000000000..e2d5f068064a --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_panel.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PANEL_H__ +#define __PHYTIUM_PANEL_H__ +#include "phytium_dp.h" + +#define PHYTIUM_MAX_BL_LEVEL 0xFF + +struct phytium_panel { + struct drm_device *dev; + bool backlight_enabled; + bool power_enabled; + bool reserve1[2]; + unsigned int min; + unsigned int level; + unsigned int max; + struct backlight_device *bl_device; + void (*setup_backlight)(struct phytium_panel *panel); + uint32_t (*get_backlight)(struct phytium_panel *panel); + int (*set_backlight)(struct phytium_panel *panel, uint32_t level); + void (*disable_backlight)(struct phytium_panel *panel); + void (*enable_backlight)(struct phytium_panel *panel); + void (*poweron)(struct phytium_panel *panel); + void (*poweroff)(struct phytium_panel *panel); + struct mutex panel_lock; + uint32_t panel_power_up_delay; + uint32_t backlight_on_delay; + uint32_t backlight_off_delay; + uint32_t panel_power_down_delay; + uint32_t panel_power_cycle_delay; +}; + +void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp); +void phytium_panel_release_backlight_funcs(struct phytium_dp_device *phytium_dp); +int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp); +void phytium_edp_backlight_device_unregister(struct phytium_dp_device *phytium_dp); +void phytium_panel_enable_backlight(struct phytium_panel *panel); +void phytium_panel_disable_backlight(struct phytium_panel *panel); +void phytium_panel_poweron(struct phytium_panel *panel); +void phytium_panel_poweroff(struct phytium_panel *panel); + +#endif /* __PHYTIUM_PANEL_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_pci.c b/drivers/gpu/drm/phytium/phytium_pci.c new file mode 100644 index 000000000000..72fe10b242dd --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_pci.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include "phytium_display_drv.h" +#include "phytium_pci.h" +#include "phytium_dp.h" +#include "x100_dc.h" +#include "x100_dp.h" + +int dc_msi_enable; +module_param(dc_msi_enable, int, 0644); +MODULE_PARM_DESC(dc_msi_enable, "Enable DC msi interrupt (0-disabled; 1-enabled; default-0)"); + +void phytium_pci_vram_hw_init(struct phytium_display_private *priv) +{ + struct phytium_pci_private *pci_priv = to_pci_priv(priv); + + pci_priv->dc_hw_vram_init(priv, pci_priv->vram_addr, pci_priv->vram_size); +} + +int phytium_pci_vram_init(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + resource_size_t vram_addr, vram_size; + int ret = 0; + struct phytium_pci_private *pci_priv = to_pci_priv(priv); + + vram_addr = pci_resource_start(pdev, 2); + vram_size = pci_resource_len(pdev, 2); + if ((vram_addr != 0) && (vram_size != 0)) { + DRM_DEBUG_KMS("vram_addr:0x%llx vram_size: 0x%llx\n", vram_addr, vram_size); + ret = dma_declare_coherent_memory(&pdev->dev, vram_addr, vram_addr, + vram_size, DMA_MEMORY_EXCLUSIVE); + if (ret) { + DRM_ERROR("pci bar2 vram declare fail\n"); + ret = -1; + goto failed_declare_memory; + } + pci_priv->vram_addr = vram_addr; + pci_priv->vram_size = vram_size; + priv->vram_support = true; + priv->vram_hw_init = phytium_pci_vram_hw_init; + } else { + DRM_DEBUG_KMS("not support vram\n"); + pci_priv->vram_addr = 0; + pci_priv->vram_size = 0; + priv->vram_support = false; + priv->vram_hw_init = NULL; + } + +failed_declare_memory: + return ret; +} + +void phytium_pci_vram_fini(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + if (priv->vram_support) + dma_release_declared_memory(&pdev->dev); +} + +static struct phytium_display_private* +phytium_pci_private_init(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = NULL; + struct phytium_pci_private *pci_priv = NULL; + struct phytium_device_info *phytium_info = (struct phytium_device_info *)ent->driver_data; + int i = 0; + resource_size_t io_addr, io_size; + + pci_priv = devm_kzalloc(&pdev->dev, sizeof(*pci_priv), GFP_KERNEL); + if (!pci_priv) { + DRM_ERROR("no memory to allocate for drm_display_private\n"); + goto failed_malloc_priv; + } + + memset(pci_priv, 0, sizeof(*pci_priv)); + priv = &pci_priv->base; + phytium_display_private_init(priv, dev); + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + DRM_DEBUG_KMS("priv->info.num_pipes :%d\n", priv->info.num_pipes); + priv->info.pipe_mask = ((pdev->subsystem_device >> PIPE_MASK_SHIFT) & PIPE_MASK_MASK); + priv->info.edp_mask = ((pdev->subsystem_device >> EDP_MASK_SHIFT) & EDP_MASK_MASK); + priv->info.num_pipes = 0; + for_each_pipe_masked(priv, i) + priv->info.num_pipes++; + if (priv->info.num_pipes == 0) { + DRM_ERROR("num_pipes is zero, so exit init\n"); + goto failed_init_numpipe; + } + + io_addr = pci_resource_start(pdev, 0); + io_size = pci_resource_len(pdev, 0); + priv->regs = ioremap(io_addr, io_size); + if (priv->regs == NULL) { + DRM_ERROR("pci bar0 ioremap fail, addr:0x%llx, size:0x%llx\n", io_addr, io_size); + goto failed_ioremap; + } + + priv->irq = pdev->irq; + if (IS_X100(priv)) { + pci_priv->dc_hw_vram_init = x100_dc_hw_vram_init; + priv->dc_hw_clear_msi_irq = x100_dc_hw_clear_msi_irq; + priv->dc_hw_fb_format_check = x100_dc_hw_fb_format_check; + } + + return priv; + +failed_ioremap: +failed_init_numpipe: + devm_kfree(&pdev->dev, pci_priv); +failed_malloc_priv: + return NULL; +} + +static void +phytium_pci_private_fini(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + struct phytium_pci_private *pci_priv = to_pci_priv(priv); + + if (priv->regs) + iounmap(priv->regs); + + devm_kfree(&pdev->dev, pci_priv); +} + +static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct phytium_display_private *priv = NULL; + struct drm_device *dev = NULL; + int ret = 0; + + dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); + if (IS_ERR(dev)) { + DRM_ERROR("failed to allocate drm_device\n"); + return PTR_ERR(dev); + } + dev->pdev = pdev; + pci_set_drvdata(pdev, dev); + pci_set_master(pdev); + ret = pci_enable_device(pdev); + if (ret) { + DRM_ERROR("pci enbale device fail\n"); + goto failed_enable_device; + } + + if (dc_msi_enable) { + ret = pci_enable_msi(pdev); + if (ret) + DRM_ERROR("pci enbale msi fail\n"); + } + + dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); + + priv = phytium_pci_private_init(pdev, ent); + if (priv) + dev->dev_private = priv; + else + goto failed_pci_private_init; + + ret = phytium_pci_vram_init(pdev, priv); + if (ret) { + DRM_ERROR("failed to init pci vram\n"); + goto failed_pci_vram_init; + } + + ret = drm_dev_register(dev, 0); + if (ret) { + DRM_ERROR("failed to register drm dev\n"); + goto failed_register_drm; + } + + phytium_dp_hpd_irq_setup(dev, true); + + return 0; + +failed_register_drm: + phytium_pci_vram_fini(pdev, priv); +failed_pci_vram_init: + phytium_pci_private_fini(pdev, priv); +failed_pci_private_init: + if (pdev->msi_enabled) + pci_disable_msi(pdev); + pci_disable_device(pdev); +failed_enable_device: + pci_set_drvdata(pdev, NULL); + drm_dev_unref(dev); + + return -1; +} + +static void phytium_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = dev->dev_private; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_dev_unregister(dev); + phytium_pci_vram_fini(pdev, priv); + phytium_pci_private_fini(pdev, priv); + if (pdev->msi_enabled) + pci_disable_msi(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + drm_dev_unref(dev); +} + +static void phytium_pci_shutdown(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = dev->dev_private; + + priv->display_shutdown(dev); +} + +static int phytium_pci_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = drm_dev->dev_private; + int ret = 0; + + ret = priv->display_pm_suspend(drm_dev); + if (ret < 0) + goto out; + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + udelay(200); + +out: + return ret; +} + +static int phytium_pci_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = drm_dev->dev_private; + int ret = 0; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + pci_set_master(pdev); + + return priv->display_pm_resume(drm_dev); +} + +static const struct dev_pm_ops phytium_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_pci_pm_suspend, phytium_pci_pm_resume) +}; + +static const struct phytium_device_info x100_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_X100), + .total_pipes = 3, + .crtc_clock_max = X100_DC_PIX_CLOCK_MAX, + .hdisplay_max = x100_DC_HDISPLAY_MAX, + .vdisplay_max = X100_DC_VDISPLAY_MAX, + .address_mask = X100_DC_ADDRESS_MASK, + .backlight_max = X100_DP_BACKLIGHT_MAX, +}; + +static const struct pci_device_id phytium_display_pci_ids[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc22), (kernel_ulong_t)&x100_info }, + { /* End: all zeroes */ } +}; +MODULE_DEVICE_TABLE(pci, phytium_display_pci_ids); + +struct pci_driver phytium_pci_driver = { + .name = "phytium_display_pci", + .id_table = phytium_display_pci_ids, + .probe = phytium_pci_probe, + .remove = phytium_pci_remove, + .shutdown = phytium_pci_shutdown, + .driver.pm = &phytium_pci_pm_ops, +}; diff --git a/drivers/gpu/drm/phytium/phytium_pci.h b/drivers/gpu/drm/phytium/phytium_pci.h new file mode 100644 index 000000000000..94e3a5e8e95c --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_pci.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PCI_H__ +#define __PHYTIUM_PCI_H__ + +#include "phytium_display_drv.h" + +struct phytium_pci_private { + struct phytium_display_private base; + resource_size_t vram_addr; + resource_size_t vram_size; + void (*dc_hw_vram_init)(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size); +}; + +#define to_pci_priv(priv) container_of(priv, struct phytium_pci_private, base) + +extern struct pci_driver phytium_pci_driver; +#endif /* __PHYTIUM_PCI_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_plane.c b/drivers/gpu/drm/phytium/phytium_plane.c new file mode 100644 index 000000000000..777bcd137293 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_plane.c @@ -0,0 +1,627 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include + +#include "phytium_display_drv.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" +#include "phytium_crtc.h" +#include "x100_dc.h" +#include "phytium_reg.h" + +#define PHYTIUM_CURS_W_SIZE 32 +#define PHYTIUM_CURS_H_SIZE 32 + +void phytium_plane_destroy(struct drm_plane *plane) +{ + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + + drm_plane_cleanup(plane); + kfree(phytium_plane); +} + +/** + * phytium_plane_atomic_get_property - fetch plane property value + * @plane: plane to fetch property for + * @state: state containing the property value + * @property: property to look up + * @val: pointer to write property value into + * + * The DRM core does not store shadow copies of properties for + * atomic-capable drivers. This entrypoint is used to fetch + * the current value of a driver-specific plane property. + */ +static int +phytium_plane_atomic_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val) +{ + DRM_DEBUG_KMS("Unknown plane property [PROP:%d:%s]\n", property->base.id, property->name); + return -EINVAL; +} + +/** + * phytium_plane_atomic_set_property - set plane property value + * @plane: plane to set property for + * @state: state to update property value in + * @property: property to set + * @val: value to set property to + * + * Writes the specified property value for a plane into the provided atomic + * state object. + * + * Returns 0 on success, -EINVAL on unrecognized properties + */ +int +phytium_plane_atomic_set_property(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val) +{ + DRM_DEBUG_KMS("Unknown plane property [PROP:%d:%s]\n", property->base.id, property->name); + return -EINVAL; +} + +struct drm_plane_state * +phytium_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct drm_plane_state *state = NULL; + struct phytium_plane_state *phytium_state = NULL; + + phytium_state = kmemdup(plane->state, sizeof(*phytium_state), GFP_KERNEL); + + if (!phytium_state) + return NULL; + + state = &phytium_state->base; + if (state->fb) + drm_framebuffer_reference(state->fb); + + state->fence = NULL; + state->commit = NULL; + + return state; +} + +void +phytium_plane_atomic_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) +{ + struct phytium_plane_state *phytium_state = to_phytium_plane_state(state); + + __drm_atomic_helper_plane_destroy_state(state); + kfree(phytium_state); +} + +const struct drm_plane_funcs phytium_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = phytium_plane_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_get_property = phytium_plane_atomic_get_property, + .atomic_set_property = phytium_plane_atomic_set_property, + .atomic_duplicate_state = phytium_plane_atomic_duplicate_state, + .atomic_destroy_state = phytium_plane_atomic_destroy_state, +}; + +static int phytium_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct dma_buf *dma_buf; + struct dma_fence *fence; + + if (!state->fb) + return 0; + dma_buf = to_phytium_framebuffer(state->fb)->phytium_gem_obj[0]->base.dma_buf; + if (dma_buf) { + fence = reservation_object_get_excl_rcu(dma_buf->resv); + drm_atomic_set_fence_for_plane(state, fence); + } + + return 0; +} + +static int +phytium_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = state->crtc; + struct drm_crtc_state *crtc_state; + int src_x, src_y, src_w, src_h; + unsigned long base_offset; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + if ((!fb) || (!crtc)) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state->state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + if (phytium_crtc->scale_enable) + return -EINVAL; + if ((src_w != PHYTIUM_CURS_W_SIZE) || (src_h != PHYTIUM_CURS_W_SIZE)) { + DRM_INFO("Invalid cursor size(%d, %d)\n", src_w, src_h); + return -EINVAL; + } + } else if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + src_x = state->src_x >> 16; + src_y = state->src_y >> 16; + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + + base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; + if (base_offset & (priv->info.address_mask)) { + DRM_ERROR("fb base address is not aligned by 0x%lx byte\n", + priv->info.address_mask); + return -EINVAL; + } + + if (src_w != state->crtc_w || src_h != state->crtc_h) { + DRM_ERROR("scale not support: crtc_w(0x%x)/h(0x%x) src_w(0x%x)/h(0x%x)\n", + state->crtc_w, state->crtc_h, src_w, src_h); + return -EINVAL; + } + + if ((state->crtc_x < 0) || (state->crtc_y < 0)) { + DRM_ERROR("crtc_x(0x%x)/y(0x%x) of drm plane state is invalid\n", + state->crtc_x, state->crtc_y); + return -EINVAL; + } + + if ((state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay) + || (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)) { + DRM_ERROR("plane out of crtc region\n"); + return -EINVAL; + } + } + + return 0; +} + +static void phytium_dc_get_plane_parameter(struct drm_plane *plane) +{ + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + struct phytium_gem_object *phytium_gem_obj = NULL; + int i, num_planes = 0; + + num_planes = drm_format_num_planes(fb->format->format); + for (i = 0; i < num_planes; i++) { + phytium_gem_obj = phytium_fb->phytium_gem_obj[i]; + phytium_plane->iova[i] = phytium_gem_obj->iova + fb->offsets[i]; + phytium_plane->size[i] = phytium_gem_obj->size - fb->offsets[i]; + + if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC) + phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE0; + else if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC) + phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE3; + else if (fb->modifier == DRM_FORMAT_MOD_LINEAR) + phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; + else + phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; + + if (i == 0) { + switch (fb->format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB2101010; + break; + + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB8888; + break; + + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB8888; + break; + + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_BGRA4444: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB4444; + break; + + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_BGRX4444: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB4444; + break; + + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB1555; + break; + + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB1555; + break; + + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + phytium_plane->format = FRAMEBUFFER_FORMAT_RGB565; + break; + + case DRM_FORMAT_YUYV: + phytium_plane->format = FRAMEBUFFER_FORMAT_YUYV; + break; + + case DRM_FORMAT_UYVY: + phytium_plane->format = FRAMEBUFFER_FORMAT_UYVY; + break; + case DRM_FORMAT_NV16: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV16; + break; + case DRM_FORMAT_NV12: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; + break; + case DRM_FORMAT_NV21: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; + break; + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + fb->format->format); + return; + } + + switch (fb->format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_RGB565: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_BGR565: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ABGR; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_RGBX5551: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_RGBA; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_BGRX5551: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_BGRA; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV12: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + fb->format->format); + return; + } + } + } +} + +static void phytium_dc_primary_plane_update(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + int phys_pipe = phytium_plane->phys_pipe; + int src_x, src_y, crtc_x, crtc_y, crtc_w, crtc_h; + unsigned long base_offset; + int config; + + src_x = plane->state->src_x >> 16; + src_y = plane->state->src_y >> 16; + crtc_x = plane->state->crtc_x; + crtc_y = plane->state->crtc_y; + crtc_w = plane->state->crtc_w; + crtc_h = plane->state->crtc_h; + + if (phytium_plane->dc_hw_update_dcreq) + phytium_plane->dc_hw_update_dcreq(plane); + phytium_plane->dc_hw_update_primary_hi_addr(plane); + + /* config dc */ + /* Y */ + base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; + phytium_writel_reg(priv, (phytium_plane->iova[0] + base_offset) & ADDRESS_MASK, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[0], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE); + + /* U */ + phytium_writel_reg(priv, phytium_plane->iova[1] & 0xffffffff, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[1], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_STRIDE); + + /* V */ + phytium_writel_reg(priv, phytium_plane->iova[2] & 0xffffffff, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[2], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_STRIDE); + + /* size */ + phytium_writel_reg(priv, (crtc_w & WIDTH_MASK) | ((crtc_h&HEIGHT_MASK) << HEIGHT_SHIFT), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_SIZE); + /* config */ + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config &= ~(FRAMEBUFFER_FORMAT_MASK << FRAMEBUFFER_FORMAT_SHIFT); + config |= (phytium_plane->format << FRAMEBUFFER_FORMAT_SHIFT); + config &= ~(1 << FRAMEBUFFER_UVSWIZZLE_SHIFT); + config |= (phytium_plane->uv_swizzle << FRAMEBUFFER_UVSWIZZLE_SHIFT); + config &= ~(FRAMEBUFFER_SWIZZLE_MASK << FRAMEBUFFER_SWIZZLE_SHIFT); + config |= (phytium_plane->swizzle << FRAMEBUFFER_SWIZZLE_SHIFT); + config &= ~(FRAMEBUFFER_TILE_MODE_MASK << FRAMEBUFFER_TILE_MODE_SHIFT); + config |= (phytium_plane->tiling[0] << FRAMEBUFFER_TILE_MODE_SHIFT); + config &= (~FRAMEBUFFER_CLEAR); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); +} + +static void phytium_dc_cursor_plane_update(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + int phys_pipe = phytium_plane->phys_pipe; + int config; + unsigned long iova; + + phytium_plane->enable = 1; + phytium_plane->cursor_hot_x = fb->hot_x; + phytium_plane->cursor_hot_y = fb->hot_y; + phytium_plane->cursor_x = plane->state->crtc_x + fb->hot_x; + phytium_plane->cursor_y = plane->state->crtc_y + fb->hot_y; + + config = CURSOR_FORMAT_ARGB8888 | + ((phytium_plane->cursor_hot_y & CURSOR_HOT_Y_MASK) << CURSOR_HOT_Y_SHIFT) | + ((phytium_plane->cursor_hot_x & CURSOR_HOT_X_MASK) << CURSOR_HOT_X_SHIFT); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + + config = ((phytium_plane->cursor_x & CURSOR_X_MASK) << CURSOR_X_SHIFT) | + ((phytium_plane->cursor_y & CURSOR_Y_MASK) << CURSOR_Y_SHIFT); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_CURSOR_LOCATION); + iova = phytium_plane->iova[0]; + phytium_writel_reg(priv, iova & 0xffffffff, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_CURSOR_ADDRESS); + if (phytium_plane->dc_hw_update_cursor_hi_addr) + phytium_plane->dc_hw_update_cursor_hi_addr(plane, iova); +} + +static void phytium_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_framebuffer *fb, *old_fb; + + DRM_DEBUG_KMS("update plane: type=%d\n", plane->type); + if (!plane->state->crtc || !plane->state->fb) + return; + + fb = plane->state->fb; + old_fb = old_state->fb; + + if (fb) + drm_framebuffer_reference(fb); + if (old_fb) + drm_framebuffer_unreference(old_fb); + + phytium_dc_get_plane_parameter(plane); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + phytium_dc_primary_plane_update(plane); + else if (plane->type == DRM_PLANE_TYPE_CURSOR) + phytium_dc_cursor_plane_update(plane); +} + +static void phytium_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + int config; + struct drm_framebuffer *old_fb; + + old_fb = old_state->fb; + if (old_fb) + drm_framebuffer_unreference(old_fb); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + phytium_writel_reg(priv, CLEAR_VALUE_RED, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE); + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config |= FRAMEBUFFER_CLEAR; + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { + phytium_writel_reg(priv, CURSOR_FORMAT_DISABLED, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + } +} + +const struct drm_plane_helper_funcs phytium_plane_helper_funcs = { + .prepare_fb = phytium_plane_prepare_fb, + .atomic_check = phytium_plane_atomic_check, + .atomic_update = phytium_plane_atomic_update, + .atomic_disable = phytium_plane_atomic_disable, +}; + +struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int phys_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = NULL; + struct phytium_plane_state *phytium_plane_state = NULL; + int ret = 0; + unsigned int flags = 0; + const uint32_t *formats = NULL; + uint32_t format_count; + const uint64_t *format_modifiers; + + phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); + if (!phytium_plane) { + ret = -ENOMEM; + goto failed_malloc_plane; + } + + phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); + if (!phytium_plane_state) { + ret = -ENOMEM; + goto failed_malloc_plane_state; + } + phytium_plane_state->base.plane = &phytium_plane->base; + phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; + phytium_plane->base.state = &phytium_plane_state->base; + phytium_plane->phys_pipe = phys_pipe; + + if (IS_X100(priv)) { + phytium_plane->dc_hw_plane_get_format = x100_dc_hw_plane_get_primary_format; + phytium_plane->dc_hw_update_dcreq = x100_dc_hw_update_dcreq; + phytium_plane->dc_hw_update_primary_hi_addr = x100_dc_hw_update_primary_hi_addr; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } + + phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); + ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, + &phytium_plane_funcs, formats, + format_count, + format_modifiers, + DRM_PLANE_TYPE_PRIMARY, "primary %d", phys_pipe); + + if (ret) + goto failed_plane_init; + + flags = DRM_MODE_ROTATE_0; + drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); + drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); + + return phytium_plane; +failed_plane_init: + kfree(phytium_plane_state); +failed_malloc_plane_state: + kfree(phytium_plane); +failed_malloc_plane: + return ERR_PTR(ret); +} + +struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int phys_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = NULL; + struct phytium_plane_state *phytium_plane_state = NULL; + int ret = 0; + unsigned int flags = 0; + const uint32_t *formats = NULL; + uint32_t format_count; + const uint64_t *format_modifiers; + + phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); + if (!phytium_plane) { + ret = -ENOMEM; + goto failed_malloc_plane; + } + + phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); + if (!phytium_plane_state) { + ret = -ENOMEM; + goto failed_malloc_plane_state; + } + phytium_plane_state->base.plane = &phytium_plane->base; + phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; + phytium_plane->base.state = &phytium_plane_state->base; + phytium_plane->phys_pipe = phys_pipe; + + if (IS_X100(priv)) { + phytium_plane->dc_hw_plane_get_format = x100_dc_hw_plane_get_cursor_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = NULL; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } + + phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); + ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, + &phytium_plane_funcs, + formats, format_count, + format_modifiers, + DRM_PLANE_TYPE_CURSOR, "cursor %d", phys_pipe); + + if (ret) + goto failed_plane_init; + + flags = DRM_MODE_ROTATE_0; + drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); + drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); + + return phytium_plane; +failed_plane_init: + kfree(phytium_plane_state); +failed_malloc_plane_state: + kfree(phytium_plane); +failed_malloc_plane: + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/phytium/phytium_plane.h b/drivers/gpu/drm/phytium/phytium_plane.h new file mode 100644 index 000000000000..41bb607d857e --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_plane.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PLANE_H__ +#define __PHYTIUM_PLANE_H__ + +struct phytium_plane { + struct drm_plane base; + int phys_pipe; + unsigned long iova[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned long size[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned int format; + unsigned int tiling[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned int swizzle; + unsigned int uv_swizzle; + unsigned int rot_angle; + + /* only for cursor */ + bool enable; + bool reserve[3]; + unsigned int cursor_x; + unsigned int cursor_y; + unsigned int cursor_hot_x; + unsigned int cursor_hot_y; + + void (*dc_hw_plane_get_format)(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); + void (*dc_hw_update_dcreq)(struct drm_plane *plane); + void (*dc_hw_update_primary_hi_addr)(struct drm_plane *plane); + void (*dc_hw_update_cursor_hi_addr)(struct drm_plane *plane, uint64_t iova); +}; + +struct phytium_plane_state { + struct drm_plane_state base; +}; + +#define to_phytium_plane(x) container_of(x, struct phytium_plane, base) +#define to_phytium_plane_state(x) container_of(x, struct phytium_plane_state, base) + +struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int pipe); +struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int pipe); +#endif /* __PHYTIUM_PLANE_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_reg.h b/drivers/gpu/drm/phytium/phytium_reg.h new file mode 100644 index 000000000000..7d8e1183f158 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_reg.h @@ -0,0 +1,357 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_REG_H__ +#define __PHYTIUM_REG_H__ + +/******************************register base******************************************/ +#define X100_PIPE_BASE(pipe) (0x8000*pipe) +#define X100_DC_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x0000) +#define X100_DCREQ_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x2000) +#define X100_DP_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x3000) +#define X100_ADDRESS_TRANSFORM_BASE 0x4000 +#define X100_PHY_ACCESS_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x5000) +/******************************register base end******************************************/ + +/******************************dc register start******************************************/ +#define PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS 0x1400 + #define ADDRESS_MASK 0xffffff80 +#define PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE 0x1408 +#define PHYTIUM_DC_PANEL_CONFIG 0x1418 + #define PANEL_DATAENABLE_ENABLE (1<<0) + #define PANEL_DATA_ENABLE (1<<4) + #define PANEL_CLOCK_ENABLE (1<<8) +#define PHYTIUM_DC_HDISPLAY 0x1430 + #define HDISPLAY_END_SHIFT 0 + #define HDISPLAY_END_MASK 0x7fff + #define HDISPLAY_TOTAL_SHIFT 16 + #define HDISPLAY_TOTAL_MASK 0x7fff +#define PHYTIUM_DC_HSYNC 0x1438 + #define HSYNC_START_SHIFT 0 + #define HSYNC_START_MASK 0x7fff + #define HSYNC_END_SHIFT 15 + #define HSYNC_END_MASK 0x7fff + #define HSYNC_PULSE_ENABLED (1<<30) + #define HSYNC_NEGATIVE (1<<31) +#define PHYTIUM_DC_VDISPLAY 0x1440 + #define VDISPLAY_END_SHIFT 0 + #define VDISPLAY_END_MASK 0x7fff + #define VDISPLAY_TOTAL_SHIFT 16 + #define VDISPLAY_TOTAL_MASK 0x7fff +#define PHYTIUM_DC_VSYNC 0x1448 + #define VSYNC_START_SHIFT 0 + #define VSYNC_START_MASK 0x7fff + #define VSYNC_END_SHIFT 15 + #define VSYNC_END_MASK 0x7fff + #define VSYNC_PULSE_ENABLED (1<<30) + #define VSYNC_NEGATIVE (1<<31) +#define PHYTIUM_DC_DISPLAY_CURRENT_LOCATION 0x1450 +#define PHYTIUM_DC_GAMMA_INDEX 0x1458 + #define GAMMA_INDEX_MAX 256 +#define PHYTIUM_DC_GAMMA_DATA 0x1460 + #define GAMMA_BLUE_SHIFT 0 + #define GAMMA_BLUE_MASK 0x3ff + #define GAMMA_GREEN_SHIFT 10 + #define GAMMA_GREEN_MASK 0x3ff + #define GAMMA_RED_SHIFT 20 + #define GAMMA_RED_MASK 0x3ff +#define PHYTIUM_DC_CURSOR_CONFIG 0x1468 + #define CURSOR_FORMAT_DISABLED 0x0 + #define CURSOR_FORMAT_MASKMODE 0x3 + #define CURSOR_FORMAT_ARGB8888 0x2 + #define CURSOR_FORMAT_MASK 0x3 + #define CURSOR_HOT_Y_SHIFT 8 + #define CURSOR_HOT_Y_MASK 0x1f + #define CURSOR_HOT_X_SHIFT 16 + #define CURSOR_HOT_X_MASK 0x1f +#define PHYTIUM_DC_CURSOR_ADDRESS 0x146c +#define PHYTIUM_DC_CURSOR_LOCATION 0x1470 + #define CURSOR_X_SHIFT 0 + #define CURSOR_X_MASK 0x7fff + #define CURSOR_Y_SHIFT 16 + #define CURSOR_Y_MASK 0x7fff +#define PHYTIUM_DC_CURSOR_BACKGROUND 0x1474 +#define PHYTIUM_DC_CURSOR_FOREGROUND 0x1478 +#define PHYTIUM_DC_INT_STATUS 0x147c + #define INT_STATUS 0x1 +#define PHYTIUM_DC_INT_ENABLE 0x1480 + #define INT_ENABLE 0x1 + #define INT_DISABLE 0x0 + +#define PHYTIUM_DC_FRAMEBUFFER_CONFIG 0x1518 + #define FRAMEBUFFER_OUTPUT BIT(0) + #define FRAMEBUFFER_GAMMA_ENABLE BIT(2) + #define FRAMEBUFFER_VALID_PENDING BIT(3) + #define FRAMEBUFFER_RESET BIT(4) + #define FRAMEBUFFER_PROGRESS BIT(6) + #define FRAMEBUFFER_ROT_ANGLE_SHIFT (11) + #define FRAMEBUFFER_ROT_ANGLE_MASK (0x7) + #define FRAMEBUFFER_ROT_ANGLE_ROT0 (0) + #define FRAMEBUFFER_ROT_ANGLE_FLIP_X (1) + #define FRAMEBUFFER_ROT_ANGLE_FLIP_Y (2) + #define FRAMEBUFFER_TILE_MODE_SHIFT (17) + #define FRAMEBUFFER_TILE_MODE_MASK (0x1f) + #define FRAMEBUFFER_LINEAR 0 + #define FRAMEBUFFER_TILE_MODE0 4 + #define FRAMEBUFFER_TILE_MODE3 7 + #define FRAMEBUFFER_FORMAT_SHIFT 26 + #define FRAMEBUFFER_FORMAT_MASK 0x3f + #define FRAMEBUFFER_FORMAT_XRGB4444 0x0 + #define FRAMEBUFFER_FORMAT_ARGB4444 0x1 + #define FRAMEBUFFER_FORMAT_XRGB1555 0x2 + #define FRAMEBUFFER_FORMAT_ARGB1555 0x3 + #define FRAMEBUFFER_FORMAT_RGB565 0x4 + #define FRAMEBUFFER_FORMAT_XRGB8888 0x5 + #define FRAMEBUFFER_FORMAT_ARGB8888 0x6 + #define FRAMEBUFFER_FORMAT_YUYV 0x7 + #define FRAMEBUFFER_FORMAT_UYVY 0x8 + #define FRAMEBUFFER_FORMAT_NV12 0x11 + #define FRAMEBUFFER_FORMAT_NV16 0x12 + #define FRAMEBUFFER_FORMAT_ARGB2101010 0x16 + #define FRAMEBUFFER_SWIZZLE_SHIFT 23 + #define FRAMEBUFFER_SWIZZLE_MASK 0x3 + #define FRAMEBUFFER_SWIZZLE_ARGB 0 + #define FRAMEBUFFER_SWIZZLE_RGBA 1 + #define FRAMEBUFFER_SWIZZLE_ABGR 2 + #define FRAMEBUFFER_SWIZZLE_BGRA 3 + #define FRAMEBUFFER_UVSWIZZLE_SHIFT 25 + #define FRAMEBUFFER_UVSWIZZLE_DISABLE 0 + #define FRAMEBUFFER_UVSWIZZLE_ENABLE 1 + #define FRAMEBUFFER_CLEAR BIT(8) + #define FRAMEBUFFER_SCALE_ENABLE BIT(22) +#define PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG 0x1520 + #define FRAMEBUFFER_FILTER_TAP 3 + #define FRAMEBUFFER_HORIZONTAL_FILTER_TAP 3 + #define FRAMEBUFFER_TAP 0x33 +#define PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS 0x1530 +#define PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS 0x1538 +#define PHYTIUM_DC_OVERLAY_CONFIG 0x1540 + #define X100_DC_OVERLAY_ENABLE BIT(24) + +#define PHYTIUM_DC_FRAMEBUFFER_U_STRIDE 0x1800 +#define PHYTIUM_DC_FRAMEBUFFER_V_STRIDE 0x1808 +#define PHYTIUM_DC_FRAMEBUFFER_SIZE 0x1810 + #define WIDTH_SHIFT 0 + #define WIDTH_MASK 0x7fff + #define HEIGHT_SHIFT 15 + #define HEIGHT_MASK 0x7fff + +#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X 0x1828 + #define SCALE_FACTOR_X_MASK 0x7fffffff +#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y 0x1830 + #define SCALE_FACTOR_Y_MASK 0x7fffffff + #define SCALE_FACTOR_Y_MAX 0x3 + #define SCALE_FACTOR_SRC_OFFSET 16 + +#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX 0x1838 + #define HORI_FILTER_INDEX 0x0 +#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER 0x1a00 +#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX 0x1a08 + #define VERT_FILTER_INDEX 0x0 +#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER 0x1a10 +#define PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE 0x1a18 + #define CLEAR_VALUE_RED 0x00ff0000 + #define CLEAR_VALUE_GREEN 0x0000ff00 +#define PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET 0x1a20 + #define INITIALOFFSET (0x8000 | (0X8000 << 16)) +#define PHYTIUM_DC_DP_CONFIG 0x1cd0 + #define OUTPUT_DP (1<<3) + #define DP_RGB666 (0x1) + #define DP_RGB888 (0x2) + #define DP_RGB101010 (0x3) +/******************************dc register end********************************************/ + +/******************************phy access register****************************************/ +#define PHYTIUM_PHY_ACCESS_ADDRESS 0x0000 +#define PHYTIUM_PHY_WRITE_DATA 0x0004 +#define PHYTIUM_PHY_READ_DATA 0x0008 +#define PHYTIUM_PHY_ACCESS_CTRL 0x000c + #define ACCESS_WRITE (1<<0) + #define ACCESS_READ (1<<1) +/******************************phy access register end*************************************/ + +/******************************dp register start******************************************/ +#define PHYTIUM_DP_LINK_BW_SET 0x0000 +#define PHYTIUM_DP_LANE_COUNT_SET 0x0004 +#define PHYTIUM_DP_ENHANCED_FRAME_EN 0x0008 + #define ENHANCED_FRAME_ENABLE 0x1 + #define ENHANCED_FRAME_DISABLE 0x0 +#define PHYTIUM_DP_TRAINING_PATTERN_SET 0x000c + #define TRAINING_OFF 0x0 + #define TRAINING_PATTERN_1 0x1 + #define TRAINING_PATTERN_2 0x2 + #define TRAINING_PATTERN_3 0x3 + #define TRAINING_PATTERN_4 0x4 +#define PHYTIUM_DP_LINK_QUAL_PATTERN_SET 0x0010 + #define TEST_PATTERN_NONE 0x0 + #define TEST_PATTERN_D10_2 0x1 + #define TEST_PATTERN_SYMBOL_ERROR 0x2 + #define TEST_PATTERN_PRBS7 0x3 + #define TEST_PATTERN_80BIT_CUSTOM 0x4 + #define TEST_PATTERN_CP2520_1 0x5 + #define TEST_PATTERN_CP2520_2 0x6 + #define TEST_PATTERN_CP2520_3 0x7 + #define TEST_PATTERN_LANE_SHIFT 8 +#define PHYTIUM_DP_SCRAMBLING_DISABLE 0x0014 + #define SCRAMBLING_ENABLE 0x0 + #define SCRAMBLING_DISABLE 0x1 +#define PHYTIUM_DP_DOWNSPREAD_CTRL 0x0018 +#define PHYTIUM_DP_ALT_SCRAMBLER_RESET 0x001c +#define PHYTIUM_DP_HBR2_SCRAMBLER_RESET 0x0020 +#define PHYTIUM_DP_DISPLAYPORT_VERSION 0x0024 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0 0x0030 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1 0x0034 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2 0x0038 +#define PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE 0x0080 + #define TRANSMITTER_OUTPUT_ENABLE BIT(0) + #define TRANSMITTER_OUTPUT_DISABLE 0 +#define PHYTIUM_DP_VIDEO_STREAM_ENABLE 0x0084 + #define SST_MST_SOURCE_0_ENABLE BIT(0) + #define SST_MST_SOURCE_0_ENABLE_MASK 0x1 + #define SST_MST_SOURCE_0_DISABLE 0 +#define PHYTIUM_DP_SECONDARY_STREAM_ENABLE 0x0088 + #define SECONDARY_STREAM_ENABLE 0x1 + #define SECONDARY_STREAM_DISABLE 0x0 +#define PHYTIUM_DP_SEC_DATA_WINDOW 0x008C +#define PHYTIUM_DP_SOFT_RESET 0x0090 + #define LINK_SOFT_RESET (0x1 << 0) + #define VIDEO_SOFT_RESET (0x1 << 1) +#define PHYTIUM_INPUT_SOURCE_ENABLE 0x0094 + #define VIRTUAL_SOURCE_0_ENABLE BIT(0) + #define VIRTUAL_SOURCE_0_ENABLE_MASK 0x1 +#define PHYTIUM_DP_FORCE_SCRAMBLER_RESET 0x00C0 + #define SCRAMBLER_RESET BIT(0) +#define PHYTIUM_DP_SOURCE_CONTROL_STATUS 0x00C4 +#define PHYTIUM_DP_DATA_CONTROL 0x00C8 +#define PHYTIUM_DP_CORE_CAPABILITY 0x00F8 +#define PHYTIUM_DP_CORE_ID 0x00FC +#define PHYTIUM_DP_AUX_COMMAND 0x0100 + #define BYTE_COUNT_MASK 0xf + #define COMMAND_SHIFT 8 + #define COMMAND_MASK 0xf + #define ADDRESS_ONLY (1<<12) +#define PHYTIUM_DP_AUX_WRITE_FIFO 0x0104 +#define PHYTIUM_DP_AUX_ADDRESS 0x0108 +#define PHYTIUM_DP_AUX_CLK_DIVIDER 0x010C + #define AUX_CLK_DIVIDER 48 +#define PHYTIUM_DP_SINK_HPD_STATE 0x0128 + #define HPD_CONNECT 0x1 + #define HPD_DISCONNECT 0x0 +#define PHYTIUM_DP_INTERRUPT_RAW_STATUS 0x0130 + #define REPLY_TIMEOUT (1<<3) + #define DP_STATUS_REQUEST_IN_PROGRESS (1<<1) + #define HPD_STATE (0<<1) +#define PHYTIUM_DP_AUX_REPLY_DATA 0x0134 +#define PHYTIUM_DP_AUX_REPLY_CODE 0x0138 + #define AUX_NATIVE_ACK (0x0<<0) + #define AUX_NATIVE_NACK (0x1<<0) + #define AUX_NATIVE_DEFER (0x2<<0) + #define AUX_NATIVE_MASK (0x3 << 0) + #define AUX_I2C_ACK (0x0<<2) + #define AUX_I2C_NACK (0x1<<2) + #define AUX_I2C_DEFER (0x2<<2) + #define AUX_I2C_MASK (0x3 << 2) +#define PHYTIUM_DP_INTERRUPT_STATUS 0x0140 + #define HPD_IRQ (1<<1) + #define HPD_EVENT (1<<0) +#define PHYTIUM_DP_INTERRUPT_MASK 0x0144 + #define HPD_IRQ_MASK (1<<1) + #define HPD_EVENT_MASK (1<<0) + #define HPD_OTHER_MASK 0x3c +#define PHYTIUM_DP_AUX_REPLY_DATA_COUNT 0x0148 +#define PHYTIUM_DP_AUX_STATUS 0x014C + #define REPLY_RECEIVED 0x1 + #define REPLY_IN_PROGRESS 0x2 + #define REQUEST_IN_PROGRESS 0x4 + #define REPLY_ERROR 0x8 +#define PHYTIUM_DP_AUX_TIMER 0x0158 +#define PHYTIUM_DP_MAIN_LINK_HTOTAL 0x0180 +#define PHYTIUM_DP_MAIN_LINK_VTOTAL 0x0184 +#define PHYTIUM_DP_MAIN_LINK_POLARITY 0x0188 + #define VSYNC_POLARITY_LOW BIT(1) + #define HSYNC_POLARITY_LOW BIT(0) +#define PHYTIUM_DP_MAIN_LINK_HSWIDTH 0x018C +#define PHYTIUM_DP_MAIN_LINK_VSWIDTH 0x0190 +#define PHYTIUM_DP_MAIN_LINK_HRES 0x0194 +#define PHYTIUM_DP_MAIN_LINK_VRES 0x0198 +#define PHYTIUM_DP_MAIN_LINK_HSTART 0x019C +#define PHYTIUM_DP_MAIN_LINK_VSTART 0x01A0 +#define PHYTIUM_DP_MAIN_LINK_MISC0 0x01A4 + #define MISC0_SYNCHRONOUS_CLOCK BIT(0) + #define MISC0_BIT_DEPTH_OFFSET 5 + #define MISC0_BIT_DEPTH_6BIT 0x0 + #define MISC0_BIT_DEPTH_8BIT 0x1 + #define MISC0_BIT_DEPTH_10BIT 0x2 + #define MISC0_COMPONENT_FORMAT_SHIFT 1 + #define MISC0_COMPONENT_FORMAT_RGB 0x0 +#define PHYTIUM_DP_MAIN_LINK_MISC1 0x01A8 +#define PHYTIUM_DP_M_VID 0x01AC +#define PHYTIUM_DP_TRANSFER_UNIT_SIZE 0x01B0 +#define PHYTIUM_DP_N_VID 0x01B4 +#define PHYTIUM_DP_USER_PIXEL_WIDTH 0x01B8 +#define PHYTIUM_DP_DATA_COUNT 0x01BC +#define PHYTIUM_DP_INTERLACED 0x01C0 +#define PHYTIUM_DP_USER_SYNC_POLARITY 0x01C4 + #define USER_ODDEVEN_POLARITY_HIGH BIT(3) + #define USER_DATA_ENABLE_POLARITY_HIGH BIT(2) + #define USER_VSYNC_POLARITY_HIGH BIT(1) + #define USER_HSYNC_POLARITY_HIGH BIT(0) +#define PHYTIUM_DP_USER_CONTROL 0x01C8 +#define PHYTIUM_EDP_CRC_ENABLE 0x01D0 +#define PHYTIUM_EDP_CRC_RED 0x01D4 +#define PHYTIUM_EDP_CRC_GREEN 0x01D8 +#define PHYTIUM_EDP_CRC_BLUE 0x01DC +#define PHYTIUM_DP_SEC_AUDIO_ENABLE 0x0300 + #define SEC_AUDIO_ENABLE BIT(0) + #define CHANNEL_MUTE_ENABLE BIT(1) +#define PHYTIUM_DP_SEC_INPUT_SELECT 0x0304 + #define INPUT_SELECT_I2S 0x0 +#define PHYTIUM_DP_SEC_CHANNEL_COUNT 0x0308 + #define CHANNEL_2 0x2 + #define CHANNEL_2_LFE 0x3 + #define CHANNEL_5_1 0x6 + #define CHANNEL_7_1 0x7 + #define CHANNEL_MASK 0xf +#define PHYTIUM_DP_SEC_DIRECT_CLKDIV 0x030c + #define APB_CLOCK 48000000 +#define PHYTIUM_DP_SEC_MAUD 0x0318 +#define PHYTIUM_DP_SEC_NAUD 0x031c +#define PHYTIUM_DP_SEC_CLOCK_MODE 0x0320 + #define CLOCK_MODE_SYNC 0x1 +#define PHYTIUM_DP_SEC_CS_SOURCE_FORMAT 0x0340 + #define CS_SOURCE_FORMAT_DEFAULT 0x0 +#define PHYTIUM_DP_SEC_CS_CATEGORY_CODE 0x0344 +#define PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ 0x0348 + #define ORIG_FREQ_32000 0xc + #define ORIG_FREQ_44100 0xf + #define ORIG_FREQ_48000 0xd + #define ORIG_FREQ_88200 0x7 + #define ORIG_FREQ_96000 0x5 + #define ORIG_FREQ_176400 0x3 + #define ORIG_FREQ_192000 0x1 + #define ORIG_FREQ_MASK 0xf + #define ORIG_FREQ_SHIFT 0 + #define WORD_LENGTH_16 0x4 + #define WORD_LENGTH_18 0x2 + #define WORD_LENGTH_20 0xc + #define WORD_LENGTH_24 0xd + #define WORD_LENGTH_MASK 0xf + #define WORD_LENGTH_SHIFT 4 +#define PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY 0x034c // not used + #define SAMPLING_FREQ_32000 0xc + #define SAMPLING_FREQ_44100 0x0 + #define SAMPLING_FREQ_48000 0x4 + #define SAMPLING_FREQ_88200 0x1 + #define SAMPLING_FREQ_96000 0x5 + #define SAMPLING_FREQ_176400 0x3 + #define SAMPLING_FREQ_192000 0x7 + #define SAMPLING_FREQ_MASK 0xf + #define SAMPLING_FREQ_SHIFT 4 +#define PHYTIUM_DP_SEC_CHANNEL_MAP 0x035C + #define CHANNEL_MAP_DEFAULT 0x87654321 +/******************************dp register end********************************************/ + +#endif /* __PHYTIUM_REG_H__ */ diff --git a/drivers/gpu/drm/phytium/x100_dc.c b/drivers/gpu/drm/phytium/x100_dc.c new file mode 100644 index 000000000000..06394c232dab --- /dev/null +++ b/drivers/gpu/drm/phytium/x100_dc.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "x100_reg.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +static const unsigned int x100_primary_formats[] = { + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_BGRA1010102, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_YUYV, + DRM_FORMAT_UYVY, +}; + +static uint64_t x100_primary_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC, + DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC, + DRM_FORMAT_MOD_INVALID +}; + +static const unsigned int x100_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +void x100_dc_hw_vram_init(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size) +{ + uint32_t config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, + X100_GPU_ADDRESS_TRANSFORM_SRC_ADDR); + if (config) + phytium_writel_reg(priv, config, group_offset, + X100_GPU_ADDRESS_TRANSFORM_SRC_ADDR); + + config = phytium_readl_reg(priv, group_offset, + X100_GPU_ADDRESS_TRANSFORM_SIZE); + if (config) + phytium_writel_reg(priv, config, group_offset, + X100_GPU_ADDRESS_TRANSFORM_SIZE); + + config = phytium_readl_reg(priv, group_offset, + X100_GPU_ADDRESS_TRANSFORM_DST_ADDR); + if (config) + phytium_writel_reg(priv, config, group_offset, + X100_GPU_ADDRESS_TRANSFORM_DST_ADDR); + + phytium_writel_reg(priv, (vram_addr & SRC_ADDR_MASK) >> SRC_ADDR_OFFSET, + group_offset, X100_DC_ADDRESS_TRANSFORM_SRC_ADDR); + phytium_writel_reg(priv, (vram_size >> SIZE_OFFSET) | ADDRESS_TRANSFORM_ENABLE, + group_offset, X100_DC_ADDRESS_TRANSFORM_SIZE); + config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_DST_ADDR); + phytium_writel_reg(priv, config, group_offset, X100_DC_ADDRESS_TRANSFORM_DST_ADDR); +} + +void x100_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe) +{ + phytium_writel_reg(priv, MSI_CLEAR, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_MSI_CLEAR); +} + +void x100_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; + int ret = 0; + + /* config pix clock */ + phytium_writel_reg(priv, FLAG_REQUEST | CMD_PIXEL_CLOCK | (clock & PIXEL_CLOCK_MASK), + group_offset, X100_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set pixel clock\n", __func__); +} + +void x100_dc_hw_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int reset_timeout = 100; + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + // reset dc + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], + X100_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); + do { + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_IDLE); + if (config | IS_IDLE) + break; + mdelay(1); + reset_timeout--; + } while (reset_timeout); + + /* reset pix clock */ + x100_dc_hw_config_pix_clock(crtc, 0); + + // reset dc + reset_timeout = 100; + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], + X100_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); + do { + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_IDLE); + if (config | IS_IDLE) + break; + mdelay(1); + reset_timeout--; + } while (reset_timeout); + + /* reset dcreq */ + phytium_writel_reg(priv, DCREQ_PLAN_A, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_PLAN); + phytium_writel_reg(priv, 0, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_CONTROL); + phytium_writel_reg(priv, DCREQ_RESET, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_RESET); + msleep(20); + phytium_writel_reg(priv, (~DCREQ_RESET)&DCREQ_RESET_MASK, + priv->dcreq_reg_base[phys_pipe], X100_DCREQ_RESET); +} + +int x100_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count) +{ + int ret = 0; + + switch (mode_cmd->modifier[count]) { + case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC: + switch (mode_cmd->pixel_format) { + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + break; + default: + DRM_ERROR("TILE_MODE0_FBCDC not support DRM_FORMAT %d", + mode_cmd->pixel_format); + ret = -EINVAL; + goto error; + } + break; + case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC: + switch (mode_cmd->pixel_format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + break; + default: + DRM_ERROR("TILE_MODE3_FBCDC not support DRM_FORMAT %d", + mode_cmd->pixel_format); + ret = -EINVAL; + goto error; + } + break; + case DRM_FORMAT_MOD_LINEAR: + break; + default: + DRM_ERROR("unsupported fb modifier 0x%llx\n", mode_cmd->modifier[0]); + ret = -EINVAL; + goto error; + } + + return 0; +error: + return ret; +} + +void x100_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = x100_primary_formats_modifiers; + *formats = x100_primary_formats; + *format_count = ARRAY_SIZE(x100_primary_formats); +} + +void x100_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = NULL; + *formats = x100_cursor_formats; + *format_count = ARRAY_SIZE(x100_cursor_formats); +} + +void x100_dc_hw_update_dcreq(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; + int config; + + if (phytium_plane->tiling[0] == FRAMEBUFFER_LINEAR) { + phytium_writel_reg(priv, DCREQ_MODE_LINEAR, + group_offset, X100_DCREQ_PLANE0_CONFIG); + } else { + config = DCREQ_NO_LOSSY; + if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE0) + config |= DCREQ_TILE_TYPE_MODE0; + else if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE3) + config |= DCREQ_TILE_TYPE_MODE3; + else + config |= DCREQ_TILE_TYPE_MODE0; + + switch (phytium_plane->format) { + case FRAMEBUFFER_FORMAT_ARGB8888: + case FRAMEBUFFER_FORMAT_XRGB8888: + config |= DCREQ_COLOURFORMAT_BGRA8888; + break; + case FRAMEBUFFER_FORMAT_ARGB2101010: + config |= DCREQ_COLOURFORMAT_ARGB2101010; + break; + case FRAMEBUFFER_FORMAT_XRGB4444: + case FRAMEBUFFER_FORMAT_ARGB4444: + config |= DCREQ_COLOURFORMAT_ARGB4444; + break; + case FRAMEBUFFER_FORMAT_XRGB1555: + case FRAMEBUFFER_FORMAT_ARGB1555: + config |= DCREQ_COLOURFORMAT_ARGB1555; + break; + case FRAMEBUFFER_FORMAT_RGB565: + config |= DCREQ_COLOURFORMAT_RGB565; + break; + case FRAMEBUFFER_FORMAT_YUYV: + config |= DCREQ_COLOURFORMAT_YUYV; + break; + case FRAMEBUFFER_FORMAT_UYVY: + config |= DCREQ_COLOURFORMAT_UYVY; + break; + } + config |= DCREQ_ARGBSWIZZLE_ARGB; + config |= DCREQ_MODE_TILE; + phytium_writel_reg(priv, phytium_plane->iova[0] & 0xffffffff, + group_offset, X100_DCREQ_PLANE0_ADDR_START); + phytium_writel_reg(priv, (phytium_plane->iova[0] + phytium_plane->size[0]) & + 0xffffffff, group_offset, X100_DCREQ_PLANE0_ADDR_END); + phytium_writel_reg(priv, config, group_offset, X100_DCREQ_PLANE0_CONFIG); + } +} + +void x100_dc_hw_update_primary_hi_addr(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + + phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, + priv->dcreq_reg_base[phys_pipe], X100_DCREQ_PIX_DMA_PREFIX); +} diff --git a/drivers/gpu/drm/phytium/x100_dc.h b/drivers/gpu/drm/phytium/x100_dc.h new file mode 100644 index 000000000000..ae98b4ffe0cf --- /dev/null +++ b/drivers/gpu/drm/phytium/x100_dc.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __X100_DC_H__ +#define __X100_DC_H__ + +#define X100_DC_PIX_CLOCK_MAX (594000) +#define x100_DC_HDISPLAY_MAX 3840 +#define X100_DC_VDISPLAY_MAX 2160 +#define X100_DC_ADDRESS_MASK 0x3f + +extern void x100_dc_hw_vram_init(struct phytium_display_private *priv, + resource_size_t vram_addr, + resource_size_t vram_size); +extern void x100_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe); +extern void x100_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock); +extern void x100_dc_hw_disable(struct drm_crtc *crtc); +extern int x100_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count); +extern void x100_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void x100_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +void x100_dc_hw_update_dcreq(struct drm_plane *plane); +void x100_dc_hw_update_primary_hi_addr(struct drm_plane *plane); +#endif /* __X100_DC_H__ */ diff --git a/drivers/gpu/drm/phytium/x100_dp.c b/drivers/gpu/drm/phytium/x100_dp.c new file mode 100644 index 000000000000..4cc390442461 --- /dev/null +++ b/drivers/gpu/drm/phytium/x100_dp.c @@ -0,0 +1,907 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include "phytium_display_drv.h" +#include "x100_reg.h" +#include "phytium_dp.h" +#include "x100_dp.h" + +/* [reg][ling_rate 1.62->8.1] */ +static int vco_val[12][4] = { + {0x0509, 0x0509, 0x0509, 0x0509}, // CP_PADJ + {0x0f00, 0x0f00, 0x0f00, 0x0f00}, // CP_IADJ + {0x0F08, 0x0F08, 0x0F08, 0x0F08}, // FILT_PADJ + {0x0061, 0x006C, 0x006C, 0x0051}, // INTDIV + {0x3333, 0x0000, 0x0000, 0x0000}, // FRACDIVL + {0x0000, 0x0000, 0x0000, 0x0000}, // FRACDIVH + {0x0042, 0x0048, 0x0048, 0x0036}, // HIGH_THR + {0x0002, 0x0002, 0x0002, 0x0002}, // PDIAG_CTRL + {0x0c5e, 0x0c5e, 0x0c5e, 0x0c5e}, // VCOCAL_PLLCNT_START + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, // LOCK_PEFCNT + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, // LOCK_PLLCNT_START + {0x0005, 0x0005, 0x0005, 0x0005}, // LOCK_PLLCNT_THR +}; + +static int mgnfs_val[4][4][4] = // [link_rate][swing][emphasis] +{ + /* 1.62Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 2.7Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 5.4Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0018, 0x006, 0x0000, 0x0000}, + {0x000c, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 8.1Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0013, 0x006, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int cpost_val[4][4][4] = // [link_rate][swing][emphasis] +{ + /* 1.62Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 2.7Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 5.4Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 8.1Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int x100_dp_hw_set_phy_lane_and_rate(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, + uint32_t link_rate) +{ + int port = phytium_dp->port%3; + int i = 0, data, tmp, tmp1, index = 0, mask; + int timeout = 500, ret = 0; + + if (port == 0 || port == 1) { + /* set pma powerdown */ + data = 0; + mask = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (A3_POWERDOWN3 << i*A3_POWERDOWN3_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (A0_ACTIVE << i*A0_ACTIVE_SHIFT); + mask |= (((1<port%3; + int voltage_swing = 0; + int pre_emphasis = 0, link_rate_index = 0; + + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + default: + voltage_swing = 0; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + voltage_swing = 1; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + voltage_swing = 2; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + voltage_swing = 3; + break; + } + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_0: + default: + pre_emphasis = 0; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_1: + pre_emphasis = 1; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + pre_emphasis = 2; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + pre_emphasis = 3; + break; + } + + switch (link_rate) { + case 810000: + link_rate_index = 3; + break; + case 540000: + link_rate_index = 2; + break; + case 270000: + link_rate_index = 1; + break; + case 162000: + link_rate_index = 0; + break; + default: + DRM_ERROR("phytium dp rate(%d) not support\n", link_rate); + link_rate_index = 2; + break; + } + + if (port == 0) { + phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_DIAG_ACYA, UNLOCK); + + } else if (port == 1) { + phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_CPOST1, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_DIAG_ACYA, UNLOCK); + } else { + phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_DIAG_ACYA, UNLOCK); + } +} + +static int x100_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) +{ + int port = phytium_dp->port; + int i = 0, data, tmp, mask; + int timeout = 500, ret = 0; + + if (port == 0 || port == 1) { + phytium_phy_writel(phytium_dp, X100_PHY0_APB_RESET, APB_RESET); + + phytium_phy_writel(phytium_dp, X100_PHY0_PIPE_RESET, RESET); + + /* config lane to dp mode */ + data = 0; + mask = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (LANE_BIT << i*LANE_BIT_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (LANE_MASTER << i*LANE_MASTER_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (BIT_20 << i*BIT_20_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (A0_ACTIVE << i*A0_ACTIVE_SHIFT); + mask |= (((1<dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_ENABLE, + group_offset, X100_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweron panel\n", __func__); +} + +static void x100_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_DISABLE, + group_offset, X100_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweroff panel\n", __func__); +} + +static void x100_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0; + uint32_t group_offset = priv->dcreq_reg_base[port]; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_ENABLE, + group_offset, X100_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to enable backlight\n", __func__); +} + +static void x100_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_DISABLE, + group_offset, X100_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to disable backlight\n", __func__); +} + +static uint32_t x100_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE); + return ((config >> BACKLIGHT_VALUE_SHIFT) & BACKLIGHT_VALUE_MASK); +} + +static int x100_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32_t level) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int config = 0; + int ret = 0; + + if (level > X100_DP_BACKLIGHT_MAX) { + ret = -EINVAL; + goto out; + } + + config = FLAG_REQUEST | CMD_BACKLIGHT | ((level & BACKLIGHT_MASK) << BACKLIGHT_SHIFT); + phytium_writel_reg(priv, config, group_offset, X100_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set backlight\n", __func__); + +out: + return ret; +} + +bool x100_dp_hw_spread_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + + return ((config & DP_SPREAD_ENABLE(port)) ? true:false); +} + +int x100_dp_hw_reset(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int timeout = 100, config, ret = 0; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + config &= (~DC_DP_RESET_STATUS(port)); + + phytium_writel_reg(priv, config, group_offset, X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + phytium_writel_reg(priv, FLAG_REQUEST | CMD_DC_DP_RESET, + priv->dcreq_reg_base[port], X100_DCREQ_CMD_REGISTER); + do { + mdelay(10); + timeout--; + config = phytium_readl_reg(priv, group_offset, + X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + if (config & DC_DP_RESET_STATUS(port)) + break; + } while (timeout); + if (timeout == 0) { + DRM_ERROR("reset dc/dp pipe(%d) failed\n", port); + ret = -1; + } + + return ret; +} + +static struct phytium_dp_func x100_dp_funcs = { + .dp_hw_reset = x100_dp_hw_reset, + .dp_hw_spread_is_enable = x100_dp_hw_spread_is_enable, + .dp_hw_set_backlight = x100_dp_hw_set_backlight, + .dp_hw_get_backlight = x100_dp_hw_get_backlight, + .dp_hw_disable_backlight = x100_dp_hw_disable_backlight, + .dp_hw_enable_backlight = x100_dp_hw_enable_backlight, + .dp_hw_poweroff_panel = x100_dp_hw_poweroff_panel, + .dp_hw_poweron_panel = x100_dp_hw_poweron_panel, + .dp_hw_init_phy = x100_dp_hw_init_phy, + .dp_hw_set_phy_lane_setting = x100_dp_hw_set_phy_lane_setting, + .dp_hw_set_phy_lane_and_rate = x100_dp_hw_set_phy_lane_and_rate, +}; + +void x100_dp_func_register(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->funcs = &x100_dp_funcs; +} diff --git a/drivers/gpu/drm/phytium/x100_dp.h b/drivers/gpu/drm/phytium/x100_dp.h new file mode 100644 index 000000000000..a7a0fc48a58b --- /dev/null +++ b/drivers/gpu/drm/phytium/x100_dp.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __X100_DP_H__ +#define __X100_DP_H__ + +#define X100_DP_BACKLIGHT_MAX 100 + +void x100_dp_func_register(struct phytium_dp_device *phytium_dp); +#endif /* __X100_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/x100_reg.h b/drivers/gpu/drm/phytium/x100_reg.h new file mode 100644 index 000000000000..130430e924b5 --- /dev/null +++ b/drivers/gpu/drm/phytium/x100_reg.h @@ -0,0 +1,349 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __X100_REG_H__ +#define __X100_REG_H__ + +#include "phytium_reg.h" + +/******************************dc register start******************************************/ +#define X100_DC_CLOCK_CONTROL 0x0000 + #define SOFT_RESET (1<<12) +#define X100_DC_CLOCK_IDLE 0x0004 + #define IS_IDLE (1<<16) +/******************************dc register end********************************************/ + +/******************************dcreq register start**************************************/ +#define X100_DCREQ_PLANE0_ADDR_START 0x00 +#define X100_DCREQ_PLANE0_ADDR_END 0x04 +#define X100_DCREQ_PLANE1_ADDR_START 0x08 +#define X100_DCREQ_PLANE1_ADDR_END 0x0c +#define X100_DCREQ_PLANE0_CONFIG 0x10 + #define DCREQ_NO_LOSSY (0 << 0) + #define DCREQ_LOSSY (1 << 0) + #define DCREQ_TILE_TYPE_MASK (0x3 << 1) + #define DCREQ_TILE_TYPE_MODE0 (0x1 << 1) + #define DCREQ_TILE_TYPE_MODE3 (0x2 << 1) + #define DCREQ_COLOURFORMAT_MASK (0x7f << 8) + #define DCREQ_COLOURFORMAT_RGB565 (0x5 << 8) + #define DCREQ_COLOURFORMAT_ARGB1555 (0x4 << 8) + #define DCREQ_COLOURFORMAT_ARGB4444 (0x02 << 8) + #define DCREQ_COLOURFORMAT_BGRA8888 (0x29 << 8) + #define DCREQ_COLOURFORMAT_ARGB2101010 (0xe << 8) + #define DCREQ_COLOURFORMAT_YUYV (0x59 << 8) + #define DCREQ_COLOURFORMAT_UYVY (0x5b << 8) + #define DCREQ_ARGBSWIZZLE_MASK (0xf << 4) + #define DCREQ_ARGBSWIZZLE_ARGB (0X0 << 4) + #define DCREQ_ARGBSWIZZLE_BGRA (0XC << 4) + #define DCREQ_MODE_MASK (1 << 16) + #define DCREQ_MODE_LINEAR (0 << 16) + #define DCREQ_MODE_TILE (1 << 16) +#define X100_DCREQ_PLANE1_CONFIG(pipe) 0x14 +#define X100_DCREQ_PLANE0_CLEAR_COLOR_L 0x18 +#define X100_DCREQ_PLANE0_CLEAR_COLOR_H 0x1C +#define X100_DCREQ_PLANE1_CLEAR_COLOR_L 0x20 +#define X100_DCREQ_PLANE1_CLEAR_COLOR_H 0x24 +#define X100_DCREQ_CMD_REGISTER 0x38 + #define FLAG_REPLY (1<<31) + #define FLAG_REQUEST (1<<30) + #define CMD_PIXEL_CLOCK (0x0 << 28) + #define CMD_BACKLIGHT (0x1 << 28) + #define CMD_DC_DP_RESET (0x3 << 28) + #define BACKLIGHT_SHIFT 21 + #define BACKLIGHT_MASK 0x7f + #define BACKLIGHT_MAX 100 + #define BACKLIGHT_ENABLE (101 << BACKLIGHT_SHIFT) + #define BACKLIGHT_DISABLE (102 << BACKLIGHT_SHIFT) + #define PANEL_POWER_ENABLE (103 << BACKLIGHT_SHIFT) + #define PANEL_POWER_DISABLE (104 << BACKLIGHT_SHIFT) + #define PIXEL_CLOCK_MASK (0x1fffff) +#define X100_DCREQ_FBCD_CLOCK_CONFIG 0x3c +#define X100_DCREQ_PIX_DMA_PREFIX 0x50 + #define PREFIX_MASK 0xff + #define PREFIX_SHIFT 32 +#define X100_DCREQ_FRAME_START 0x54 +#define X100_DCREQ_FILTER_CONFIG 0x58 +#define X100_DCREQ_CONTROL 0x5C + #define DC_REQ_ENABLE (1<<0) +#define X100_DCREQ_MSI_CLEAR 0x60 + #define MSI_CLEAR 0x0 +#define X100_DCREQ_RESET 0x68 + #define DCREQ_RESET (0x3 << 0) + #define DCREQ_RESET_MASK 0x3 +#define X100_DCREQ_PLAN 0x94 + #define DCREQ_PLAN_A 0x0 + #define DCREQ_PLAN_B 0X5 +/******************************dcreq register end**************************************/ + +/******************************address transform register start**************************/ +#define X100_GPU_ADDRESS_TRANSFORM_SRC_ADDR 0x0 +#define X100_GPU_ADDRESS_TRANSFORM_SIZE 0x4 +#define X100_GPU_ADDRESS_TRANSFORM_DST_ADDR 0x8 + +#define X100_DC_ADDRESS_TRANSFORM_SRC_ADDR 0x24 + #define SRC_ADDR_OFFSET 22 + #define SRC_ADDR_MASK 0xffffffffff +#define X100_DC_ADDRESS_TRANSFORM_SIZE 0x28 + #define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) + #define SIZE_OFFSET 22 +#define X100_DC_ADDRESS_TRANSFORM_DST_ADDR 0x2c + #define DST_ADDR_OFFSET 22 +#define X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS 0x48 + #define DC_DP_RESET_STATUS(pipe) (1 << pipe) + #define DP_SPREAD_ENABLE(pipe) (0x8 << pipe) +#define X100_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE 0x4c + #define BACKLIGHT_VALUE_MASK (0x7f) + #define BACKLIGHT_VALUE_SHIFT 16 +/******************************address transform register end**************************/ + +/******************************phy register start******************************************/ +/* self define */ +#define X100_PHY0_PIPE_RESET 0x40104 + #define RESET 0x0 + #define RESET_DEASSERT 0x1 +#define X100_PHY1_PIPE_RESET 0x100100 + #define PHY1_PIPE_RESET 0x0 + #define PHY1_PIPE_RESET_DEASSERT 0x4 + +#define X100_PHY1_EN_REFCLK 0x100070 + +#define X100_PHY0_MODE 0x40088 + #define LANE_BIT (0x3) + #define LANE_BIT_SHIFT 0x2 +#define X100_PHY1_SEL 0x100004 + #define PHY1_DP_LANE_BIT 0x1 + #define PHY1_DP_LANE_BIT_SHIFT 2 + +#define X100_PHY0_LINK_CFG 0x40044 + #define LANE_MASTER 0x1 + #define LANE_MASTER_SHIFT 1 + +#define X100_PHY0_PLL_EN 0x40010 + #define PLL_EN 0x1 + #define PLL_EN_SHIFT 1 +#define X100_PHY0_PMA_WIDTH 0x40020 + #define BIT_20 0x5 + #define BIT_20_SHIFT 4 + +#define X100_PHY0_PMA0_POWER 0x40014 +#define X100_PHY0_PMA1_POWER 0x40018 + #define A0_ACTIVE 0x1 + #define A0_ACTIVE_SHIFT 8 + #define A3_POWERDOWN3 0x8 + #define A3_POWERDOWN3_SHIFT 8 + +#define X100_PHY1_PMA_MISC 0x1000a0 + #define PHY1_PLL_EN 0x1 + #define PHY1_PLL_EN_MASK 1 + #define PHY1_PLL_EN_SHIFT 8 + #define PHY1_BIT_20 0x5 + #define PHY1_BIT_20_SHIFT 9 + #define PHY1_A0_ACTIVE 0x1 + #define PHY1_A0_ACTIVE_SHIFT 2 + #define PHY1_A0_ACTIVE_MASK 0x3f + #define PHY1_A3_POWERDOWN3 0x8 + #define PHY1_A3_POWERDOWN3_MASK 0x3f + #define PHY1_A3_POWERDOWN3_SHIFT 2 + +#define X100_PHY0_LINK_RESET 0x40108 + #define LINK_RESET 0x1 + #define LINK_RESET_MASK 0x1 + #define LINTK_RESET_SHIFT 0x1 + +#define X100_PHY0_APB_RESET 0x40100 + #define APB_RESET 0x1 +#define X100_PHY1_APB_RESET 0x100104 + #define PHY1_APB_RESET 0x4 + +/* phy origin register */ +#define X100_PHY0_PLL_CFG 0x30038 +#define X100_PHY1_PLL_CFG 0xb0038 + #define SINGLE_LINK 0x0 + #define DOUBLE_LINK 0x2 + +#define X100_PHY0_PMA_CONTROL 0x3800c +#define X100_PHY1_PMA_CONTROL 0xb800c + #define CONTROL_ENABLE 0x1 + #define CONTROL_ENABLE_MASK 0x1 + #define CONTROL_ENABLE_SHIFT 0x1 + +#define X100_PHY0_PMA_CONTROL2 0x38004 +#define X100_PHY1_PMA_CONTROL2 0xb8004 + #define PLL0_LOCK_DONE (0x1 << 6) + #define PLL1_LOCK_DONE (0x1 << 7) + +#define X100_PHY0_PLL0_CLK_SEL 0X684 +#define X100_PHY0_PLL1_CLK_SEL 0x704 +#define X100_PHY1_PLL_CLK_SEL 0X80684 + #define PLL_LINK_RATE_162000 0xf01 + #define PLL_LINK_RATE_270000 0x701 + #define PLL_LINK_RATE_540000 0x301 + #define PLL_LINK_RATE_810000 0x200 + +#define X100_PHY0_HSCLK0_SEL 0x18398 +#define X100_PHY0_HSCLK1_SEL 0x1a398 +#define X100_PHY1_HSCLK_SEL 0x90398 + #define HSCLK_LINK_0 0x0 + #define HSCLK_LINK_1 0x1 + +#define X100_PHY0_HSCLK0_DIV 0x1839c +#define X100_PHY0_HSCLK1_DIV 0x1a39c +#define X100_PHY1_HSCLK_DIV 0x9039c + #define HSCLK_LINK_RATE_162000 0x2 + #define HSCLK_LINK_RATE_270000 0x1 + #define HSCLK_LINK_RATE_540000 0x0 + #define HSCLK_LINK_RATE_810000 0x0 + +#define X100_PHY0_PLLDRC0_CTRL 0x18394 +#define X100_PHY0_PLLDRC1_CTRL 0x1a394 +#define X100_PHY1_PLLDRC_CTRL 0x90394 + #define PLLDRC_LINK0 0x1 + #define PLLDRC_LINK1 0x9 + +#define X100_PHY0_PLL0_DSM_M0 0x250 +#define X100_PHY1_PLL0_DSM_M0 0x80250 + #define PLL0_DSM_M0 0x4 +#define X100_PHY0_PLL0_VCOCAL_START 0x218 +#define X100_PHY1_PLL0_VCOCAL_START 0x80218 + #define PLL0_VCOCAL_START 0xc5e +#define X100_PHY0_PLL0_VCOCAL_CTRL 0x208 +#define X100_PHY1_PLL0_VCOCAL_CTRL 0x80208 + #define PLL0_VCOCAL_CTRL 0x3 + +#define X100_PHY0_PLL1_DSM_M0 0x350 + #define PLL1_DSM_M0 0x4 +#define X100_PHY0_PLL1_VCOCAL_START 0x318 + #define PLL1_VCOCAL_START 0xc5e +#define X100_PHY0_PLL1_VCOCAL_CTRL 0x308 + #define PLL1_VCOCAL_CTRL 0x3 + +#define X100_PHY0_PLL0_CP_PADJ 0x690 +#define X100_PHY0_PLL0_CP_IADJ 0x694 +#define X100_PHY0_PLL0_CP_FILT_PADJ 0x698 +#define X100_PHY0_PLL0_INTDIV 0x240 +#define X100_PHY0_PLL0_FRACDIVL 0x244 +#define X100_PHY0_PLL0_FRACDIVH 0x248 +#define X100_PHY0_PLL0_HIGH_THR 0x24c +#define X100_PHY0_PLL0_PDIAG_CTRL 0x680 +#define X100_PHY0_PLL0_VCOCAL_PLLCNT_START 0x220 +#define X100_PHY0_PLL0_LOCK_PEFCNT 0x270 +#define X100_PHY0_PLL0_LOCK_PLLCNT_START 0x278 +#define X100_PHY0_PLL0_LOCK_PLLCNT_THR 0x27c + +#define X100_PHY0_PLL1_CP_PADJ 0x710 +#define X100_PHY0_PLL1_CP_IADJ 0x714 +#define X100_PHY0_PLL1_CP_FILT_PADJ 0x718 +#define X100_PHY0_PLL1_INTDIV 0x340 +#define X100_PHY0_PLL1_FRACDIVL 0x344 +#define X100_PHY0_PLL1_FRACDIVH 0x348 +#define X100_PHY0_PLL1_HIGH_THR 0x34c +#define X100_PHY0_PLL1_PDIAG_CTRL 0x700 +#define X100_PHY0_PLL1_VCOCAL_PLLCNT_START 0x320 +#define X100_PHY0_PLL1_LOCK_PEFCNT 0x370 +#define X100_PHY0_PLL1_LOCK_PLLCNT_START 0x378 +#define X100_PHY0_PLL1_LOCK_PLLCNT_THR 0x37c + +#define X100_PHY1_PLL0_CP_PADJ 0x80690 +#define X100_PHY1_PLL0_CP_IADJ 0x80694 +#define X100_PHY1_PLL0_CP_FILT_PADJ 0x80698 +#define X100_PHY1_PLL0_INTDIV 0x80240 +#define X100_PHY1_PLL0_FRACDIVL 0x80244 +#define X100_PHY1_PLL0_FRACDIVH 0x80248 +#define X100_PHY1_PLL0_HIGH_THR 0x8024c +#define X100_PHY1_PLL0_PDIAG_CTRL 0x80680 +#define X100_PHY1_PLL0_VCOCAL_PLLCNT_START 0x80220 +#define X100_PHY1_PLL0_LOCK_PEFCNT 0x80270 +#define X100_PHY1_PLL0_LOCK_PLLCNT_START 0x80278 +#define X100_PHY1_PLL0_LOCK_PLLCNT_THR 0x8027c + +#define X100_PHY0_PLL0_TX_PSC_A0 0x18400 +#define X100_PHY1_PLL0_TX_PSC_A0 0x90400 + #define PLL0_TX_PSC_A0 0xfb +#define X100_PHY0_PLL0_TX_PSC_A2 0x18408 +#define X100_PHY1_PLL0_TX_PSC_A2 0x90408 + #define PLL0_TX_PSC_A2 0x4aa +#define X100_PHY0_PLL0_TX_PSC_A3 0x1840c +#define X100_PHY1_PLL0_TX_PSC_A3 0x9040c + #define PLL0_TX_PSC_A3 0x4aa +#define X100_PHY0_PLL0_RX_PSC_A0 0x28000 +#define X100_PHY1_PLL0_RX_PSC_A0 0xa0000 + #define PLL0_RX_PSC_A0 0x0 +#define X100_PHY0_PLL0_RX_PSC_A2 0x28008 +#define X100_PHY1_PLL0_RX_PSC_A2 0xa0008 + #define PLL0_RX_PSC_A2 0x0 +#define X100_PHY0_PLL0_RX_PSC_A3 0x2800C +#define X100_PHY1_PLL0_RX_PSC_A3 0xa000C + #define PLL0_RX_PSC_A3 0x0 +#define X100_PHY0_PLL0_RX_PSC_CAL 0x28018 +#define X100_PHY1_PLL0_RX_PSC_CAL 0xa0018 + #define PLL0_RX_PSC_CAL 0x0 + +#define X100_PHY0_PLL1_TX_PSC_A0 0x1a400 + #define PLL1_TX_PSC_A0 0xfb +#define X100_PHY0_PLL1_TX_PSC_A2 0x1a408 + #define PLL1_TX_PSC_A2 0x4aa +#define X100_PHY0_PLL1_TX_PSC_A3 0x1a40c + #define PLL1_TX_PSC_A3 0x4aa +#define X100_PHY0_PLL1_RX_PSC_A0 0x2a000 + #define PLL1_RX_PSC_A0 0x0 +#define X100_PHY0_PLL1_RX_PSC_A2 0x2a008 + #define PLL1_RX_PSC_A2 0x0 +#define X100_PHY0_PLL1_RX_PSC_A3 0x2a00C + #define PLL1_RX_PSC_A3 0x0 +#define X100_PHY0_PLL1_RX_PSC_CAL 0x2a018 + #define PLL1_RX_PSC_CAL 0x0 + +#define X100_PHY0_PLL0_XCVR_CTRL 0x183a8 +#define X100_PHY1_PLL0_XCVR_CTRL 0x903a8 + #define PLL0_XCVR_CTRL 0xf +#define X100_PHY0_PLL1_XCVR_CTRL 0x1a3a8 + #define PLL1_XCVR_CTRL 0xf + +#define X100_PHY0_PLL0_RX_GCSM1_CTRL 0x28420 +#define X100_PHY1_PLL0_RX_GCSM1_CTRL 0xa0420 + #define PLL0_RX_GCSM1_CTRL 0x0 +#define X100_PHY0_PLL0_RX_GCSM2_CTRL 0x28440 +#define X100_PHY1_PLL0_RX_GCSM2_CTRL 0xa0440 + #define PLL0_RX_GCSM2_CTRL 0x0 +#define X100_PHY0_PLL0_RX_PERGCSM_CTRL 0x28460 +#define X100_PHY1_PLL0_RX_PERGCSM_CTRL 0xa0460 + #define PLL0_RX_PERGCSM_CTRL 0x0 + +#define X100_PHY0_PLL1_RX_GCSM1_CTRL 0x2a420 + #define PLL1_RX_GCSM1_CTRL 0x0 +#define X100_PHY0_PLL1_RX_GCSM2_CTRL 0x2a440 + #define PLL1_RX_GCSM2_CTRL 0x0 +#define X100_PHY0_PLL1_RX_PERGCSM_CTRL 0x2a460 + #define PLL1_RX_PERGCSM_CTRL 0x0 + +/* swing and emphasis */ +#define X100_PHY0_PLL0_TX_DIAG_ACYA 0x1879c +#define X100_PHY0_PLL1_TX_DIAG_ACYA 0x1a79c +#define X100_PHY1_PLL0_TX_DIAG_ACYA 0x9079c + #define LOCK 1 + #define UNLOCK 0 + +#define X100_PHY0_PLL0_TX_TXCC_CTRL 0x18100 +#define X100_PHY0_PLL1_TX_TXCC_CTRL 0x1a100 +#define X100_PHY1_PLL0_TX_TXCC_CTRL 0x90100 + #define TX_TXCC_CTRL 0x8a4 + +#define X100_PHY0_PLL0_TX_DRV 0x18318 +#define X100_PHY0_PLL1_TX_DRV 0x1a318 +#define X100_PHY1_PLL0_TX_DRV 0x90318 + #define TX_DRV 0x3 + +#define X100_PHY0_PLL0_TX_MGNFS 0x18140 +#define X100_PHY0_PLL1_TX_MGNFS 0x1a140 +#define X100_PHY1_PLL0_TX_MGNFS 0x90140 + +#define X100_PHY0_PLL0_TX_CPOST 0x18130 +#define X100_PHY0_PLL1_TX_CPOST 0x1a130 +#define X100_PHY0_PLL1_TX_CPOST1 0x1a13c +#define X100_PHY1_PLL0_TX_CPOST 0x90130 + +/******************************phy register end********************************************/ +#endif /* __X100_REG_H__ */ diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 017aec34a238..6c9a83ff041d 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -531,6 +531,21 @@ config I2C_DESIGNWARE_BAYTRAIL the platform firmware controlling it. You should say Y if running on a BayTrail system using the AXP288. +config I2C_PHYTIUM_CORE + tristate + +config I2C_PHYTIUM_PCI + tristate "Phytium I2C PCI" + depends on PCI && ARCH_PHYTIUM + select I2C_PHYTIUM_CORE + select I2C_SMBUS + help + If you say yes to this option, support will be included for the + Phytium I2C adapter. Only master mode is supported. + + This driver can also be built as a module. If so, the module + will be called i2c-phytium-pci. + config I2C_DIGICOLOR tristate "Conexant Digicolor I2C driver" depends on ARCH_DIGICOLOR diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 18b26af82b1c..2ea9bbfddb30 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -53,6 +53,9 @@ i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-bayt obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o i2c-designware-pci-objs := i2c-designware-pcidrv.o obj-$(CONFIG_I2C_DIGICOLOR) += i2c-digicolor.o +obj-$(CONFIG_I2C_PHYTIUM_CORE) += i2c-phytium-core.o +i2c-phytium-core-objs := i2c-phytium-common.o i2c-phytium-master.o +obj-$(CONFIG_I2C_PHYTIUM_PCI) += i2c-phytium-pci.o obj-$(CONFIG_I2C_EFM32) += i2c-efm32.o obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o obj-$(CONFIG_I2C_EMEV2) += i2c-emev2.o diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index b5750fd85125..3818b7a0a847 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -151,6 +151,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = { { "APMC0D0F", 0 }, { "HISI02A1", 0 }, { "HISI02A2", 0 }, + { "PHYT0003", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); diff --git a/drivers/i2c/busses/i2c-phytium-common.c b/drivers/i2c/busses/i2c-phytium-common.c new file mode 100644 index 000000000000..7a6f0ca75299 --- /dev/null +++ b/drivers/i2c/busses/i2c-phytium-common.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Phytium I2C adapter driver. + * + * Based on the TI DAVINCI I2C adapter driver. + * + * Copyright (C) 2021,Phytium Technology Co.,Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-phytium-core.h" + +static char *abort_sources[] = { + [ABRT_7B_ADDR_NOACK] = + "slave address not acknowledged (7bit mode)", + [ABRT_10ADDR1_NOACK] = + "first address byte not acknowledged (10bit mode)", + [ABRT_10ADDR2_NOACK] = + "second address byte not acknowledged (10bit mode)", + [ABRT_TXDATA_NOACK] = + "data not acknowledged", + [ABRT_GCALL_NOACK] = + "no acknowledgement for a general call", + [ABRT_GCALL_READ] = + "read after general call", + [ABRT_SBYTE_ACKDET] = + "start byte acknowledged", + [ABRT_SBYTE_NORSTRT] = + "trying to send start byte when restart is disabled", + [ABRT_10B_RD_NORSTRT] = + "trying to read when restart is disabled (10bit mode)", + [ABRT_MASTER_DIS] = + "trying to use disabled adapter", + [ARB_LOST] = + "lost arbitration", + [ABRT_SLAVE_FLUSH_TXFIFO] = + "read command so flush old data in the TX FIFO", + [ABRT_SLAVE_ARBLOST] = + "slave lost the bus while transmitting data to a remote master", + [ABRT_SLAVE_RD_INTX] = + "incorrect slave-transmitter mode configuration", +}; + +u32 phytium_readl(struct phytium_i2c_dev *dev, int offset) +{ + return readl_relaxed(dev->base + offset); +} + +void phytium_writel(struct phytium_i2c_dev *dev, u32 b, int offset) +{ + writel_relaxed(b, dev->base + offset); +} + +void __i2c_phytium_disable(struct phytium_i2c_dev *dev) +{ + int timeout = 100; + + do { + __i2c_phytium_disable_nowait(dev); + if ((phytium_readl(dev, IC_ENABLE_STATUS) & 1) == 0) + return; + + /* + * Wait 10 times the signaling period of the highest I2C + * transfer supported by the driver (for 400KHz this is + * 25us). + */ + usleep_range(25, 250); + } while (timeout--); + + dev_warn(dev->dev, "timeout in disabling adapter\n"); +} + +int i2c_phytium_wait_bus_not_busy(struct phytium_i2c_dev *dev) +{ + int timeout = 20; /* 20 ms */ + + while (phytium_readl(dev, IC_STATUS) & IC_STATUS_ACTIVITY) { + if (timeout <= 0) { + dev_warn(dev->dev, "timeout waiting for bus ready\n"); + i2c_recover_bus(&dev->adapter); + + if (phytium_readl(dev, IC_STATUS) & IC_STATUS_ACTIVITY) + return -ETIMEDOUT; + return 0; + } + timeout--; + usleep_range(1000, 1100); + } + + return 0; +} + +int i2c_phytium_handle_tx_abort(struct phytium_i2c_dev *dev) +{ + unsigned long abort_source = dev->abort_source; + int i; + + if (abort_source & IC_TX_ABRT_NOACK) { + for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) + dev_dbg(dev->dev, + "%s: %s\n", __func__, abort_sources[i]); + return -EREMOTEIO; + } + + for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) + dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]); + + if (abort_source & IC_TX_ARB_LOST) + return -EAGAIN; + else if (abort_source & IC_TX_ABRT_GCALL_READ) + return -EINVAL; + else + return -EIO; + + return 0; +} + +u32 i2c_phytium_func(struct i2c_adapter *adapter) +{ + struct phytium_i2c_dev *dev = i2c_get_adapdata(adapter); + + return dev->functionality; +} + +void i2c_phytium_disable(struct phytium_i2c_dev *dev) +{ + /* Disable controller */ + __i2c_phytium_disable(dev); + + /* Disable all interupts */ + phytium_writel(dev, 0, IC_INTR_MASK); + phytium_readl(dev, IC_CLR_INTR); +} + +void i2c_phytium_disable_int(struct phytium_i2c_dev *dev) +{ + phytium_writel(dev, 0, IC_INTR_MASK); +} + +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Phytium I2C bus adapter core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-phytium-core.h b/drivers/i2c/busses/i2c-phytium-core.h new file mode 100644 index 000000000000..651c298362a5 --- /dev/null +++ b/drivers/i2c/busses/i2c-phytium-core.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium I2C adapter driver. + * + * Copyright (C) 2021, Phytium Technology Co.,Ltd. + */ + +#include +#include +#include + +#define IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \ + I2C_FUNC_SMBUS_BYTE | \ + I2C_FUNC_SMBUS_BYTE_DATA | \ + I2C_FUNC_SMBUS_WORD_DATA | \ + I2C_FUNC_SMBUS_BLOCK_DATA | \ + I2C_FUNC_SMBUS_I2C_BLOCK) + +#define IC_CON_MASTER 0x1 +#define IC_CON_SPEED_STD 0x2 +#define IC_CON_SPEED_FAST 0x4 +#define IC_CON_SPEED_HIGH 0x6 +#define IC_CON_SPEED_MASK 0x6 +#define IC_CON_10BITADDR_SLAVE 0x8 +#define IC_CON_10BITADDR_MASTER 0x10 +#define IC_CON_RESTART_EN 0x20 +#define IC_CON_SLAVE_DISABLE 0x40 +#define IC_CON_STOP_DET_IFADDRESSED 0x80 +#define IC_CON_TX_EMPTY_CTRL 0x100 +#define IC_CON_RX_FIFO_FULL_HLD_CTRL 0x200 + +#define IC_CON 0x0 +#define IC_TAR 0x4 +#define IC_SAR 0x8 +#define IC_DATA_CMD 0x10 +#define IC_SS_SCL_HCNT 0x14 +#define IC_SS_SCL_LCNT 0x18 +#define IC_FS_SCL_HCNT 0x1c +#define IC_FS_SCL_LCNT 0x20 +#define IC_HS_SCL_HCNT 0x24 +#define IC_HS_SCL_LCNT 0x28 +#define IC_INTR_STAT 0x2c +#define IC_INTR_MASK 0x30 +#define IC_RAW_INTR_STAT 0x34 +#define IC_RX_TL 0x38 +#define IC_TX_TL 0x3c +#define IC_CLR_INTR 0x40 +#define IC_CLR_RX_UNDER 0x44 +#define IC_CLR_RX_OVER 0x48 +#define IC_CLR_TX_OVER 0x4c +#define IC_CLR_RD_REQ 0x50 +#define IC_CLR_TX_ABRT 0x54 +#define IC_CLR_RX_DONE 0x58 +#define IC_CLR_ACTIVITY 0x5c +#define IC_CLR_STOP_DET 0x60 +#define IC_CLR_START_DET 0x64 +#define IC_CLR_GEN_CALL 0x68 +#define IC_ENABLE 0x6c +#define IC_STATUS 0x70 +#define IC_TXFLR 0x74 +#define IC_RXFLR 0x78 +#define IC_SDA_HOLD 0x7c +#define IC_TX_ABRT_SOURCE 0x80 +#define IC_ENABLE_STATUS 0x9c +#define IC_SMBCLK_LOW_MEXT 0xa8 +#define IC_SMBCLK_LOW_TIMEOUT 0xac +#define IC_SMBDAT_STUCK_TIMEOUT 0xb4 +#define IC_CLR_SMBCLK_EXT_LOW_TIMEOUT 0xbc +#define IC_CLR_SMBCLK_TMO_LOW_TIMEOUT 0xc0 +#define IC_CLR_SMBDAT_LOW_TIMEOUT 0xc4 +#define IC_CLR_SMBALERT_IN_N 0xd0 +#define IC_COMP_PARAM_1 0xf4 + +#define IC_INTR_RX_UNDER 0x001 +#define IC_INTR_RX_OVER 0x002 +#define IC_INTR_RX_FULL 0x004 +#define IC_INTR_TX_OVER 0x008 +#define IC_INTR_TX_EMPTY 0x010 +#define IC_INTR_RD_REQ 0x020 +#define IC_INTR_TX_ABRT 0x040 +#define IC_INTR_RX_DONE 0x080 +#define IC_INTR_ACTIVITY 0x100 +#define IC_INTR_STOP_DET 0x200 +#define IC_INTR_START_DET 0x400 +#define IC_INTR_GEN_CALL 0x800 +#define IC_INTR_SMBCLK_EXT_LOW_TIMEOUT 0x1000 +#define IC_INTR_SMBCLK_TMO_LOW_TIMEOUT 0x2000 +#define IC_INTR_SMBSDA_LOW_TIMEOUT 0x4000 +#define IC_INTR_SMBALERT_IN_N 0x20000 + +#define IC_INTR_DEFAULT_MASK (IC_INTR_RX_FULL | \ + IC_INTR_TX_ABRT | \ + IC_INTR_STOP_DET) +#define IC_INTR_MASTER_MASK (IC_INTR_DEFAULT_MASK | \ + IC_INTR_TX_EMPTY) +#define IC_INTR_SMBUS_MASK (IC_INTR_MASTER_MASK | \ + IC_INTR_SMBCLK_EXT_LOW_TIMEOUT | \ + IC_INTR_SMBCLK_TMO_LOW_TIMEOUT | \ + IC_INTR_SMBSDA_LOW_TIMEOUT) + +#define IC_STATUS_ACTIVITY 0x1 +#define IC_STATUS_TFE BIT(2) +#define IC_STATUS_MASTER_ACTIVITY BIT(5) +#define IC_STATUS_SLAVE_ACTIVITY BIT(6) + +#define IC_SDA_HOLD_RX_SHIFT 16 +#define IC_SDA_HOLD_RX_MASK GENMASK(23, IC_SDA_HOLD_RX_SHIFT) + +#define IC_ERR_TX_ABRT 0x1 + +#define IC_TAR_10BITADDR_MASTER BIT(12) + +#define IC_COMP_PARAM_1_SPEED_MODE_HIGH (BIT(2) | BIT(3)) +#define IC_COMP_PARAM_1_SPEED_MODE_MASK GENMASK(3, 2) + +#define STATUS_IDLE 0x0 +#define STATUS_WRITE_IN_PROGRESS 0x1 +#define STATUS_READ_IN_PROGRESS 0x2 + +#define ABRT_7B_ADDR_NOACK 0 +#define ABRT_10ADDR1_NOACK 1 +#define ABRT_10ADDR2_NOACK 2 +#define ABRT_TXDATA_NOACK 3 +#define ABRT_GCALL_NOACK 4 +#define ABRT_GCALL_READ 5 +#define ABRT_SBYTE_ACKDET 7 +#define ABRT_SBYTE_NORSTRT 9 +#define ABRT_10B_RD_NORSTRT 10 +#define ABRT_MASTER_DIS 11 +#define ARB_LOST 12 +#define ABRT_SLAVE_FLUSH_TXFIFO 13 +#define ABRT_SLAVE_ARBLOST 14 +#define ABRT_SLAVE_RD_INTX 15 + +#define IC_TX_ABRT_7B_ADDR_NOACK (1UL << ABRT_7B_ADDR_NOACK) +#define IC_TX_ABRT_10ADDR1_NOACK (1UL << ABRT_10ADDR1_NOACK) +#define IC_TX_ABRT_10ADDR2_NOACK (1UL << ABRT_10ADDR2_NOACK) +#define IC_TX_ABRT_TXDATA_NOACK (1UL << ABRT_TXDATA_NOACK) +#define IC_TX_ABRT_GCALL_NOACK (1UL << ABRT_GCALL_NOACK) +#define IC_TX_ABRT_GCALL_READ (1UL << ABRT_GCALL_READ) +#define IC_TX_ABRT_SBYTE_ACKDET (1UL << ABRT_SBYTE_ACKDET) +#define IC_TX_ABRT_SBYTE_NORSTRT (1UL << ABRT_SBYTE_NORSTRT) +#define IC_TX_ABRT_10B_RD_NORSTRT (1UL << ABRT_10B_RD_NORSTRT) +#define IC_TX_ABRT_MASTER_DIS (1UL << ABRT_MASTER_DIS) +#define IC_TX_ARB_LOST (1UL << ARB_LOST) +#define IC_RX_ABRT_SLAVE_RD_INTX (1UL << ABRT_SLAVE_RD_INTX) +#define IC_RX_ABRT_SLAVE_ARBLOST (1UL << ABRT_SLAVE_ARBLOST) +#define IC_RX_ABRT_SLAVE_FLUSH_TXFIFO (1UL << ABRT_SLAVE_FLUSH_TXFIFO) + +#define IC_TX_ABRT_NOACK (IC_TX_ABRT_7B_ADDR_NOACK | \ + IC_TX_ABRT_10ADDR1_NOACK | \ + IC_TX_ABRT_10ADDR2_NOACK | \ + IC_TX_ABRT_TXDATA_NOACK | \ + IC_TX_ABRT_GCALL_NOACK) +#define CONTROLLER_TYPE_IIC 0 +#define CONTROLLER_TYPE_SMBUS 1 + +struct phytium_i2c_dev { + struct device *dev; + void __iomem *base; + int irq; + u32 flags; + struct completion cmd_complete; + u32 (*get_clk_rate_khz)(struct phytium_i2c_dev *dev); + + struct i2c_adapter adapter; + struct i2c_client *ara; + struct i2c_smbus_alert_setup alert_data; + + struct phytium_pci_i2c *controller; + + unsigned int status; + int cmd_err; + u32 abort_source; + + struct i2c_msg *msgs; + int msgs_num; + int msg_write_idx; + int msg_read_idx; + int msg_err; + u32 tx_buf_len; + u8 *tx_buf; + u32 rx_buf_len; + u8 *rx_buf; + + u32 master_cfg; + u32 functionality; + unsigned int tx_fifo_depth; + unsigned int rx_fifo_depth; + int rx_outstanding; + + struct i2c_timings timings; + u32 sda_hold_time; + u16 ss_hcnt; + u16 ss_lcnt; + u16 fs_hcnt; + u16 fs_lcnt; + u16 fp_hcnt; + u16 fp_lcnt; + u16 hs_hcnt; + u16 hs_lcnt; + + void (*disable)(struct phytium_i2c_dev *dev); + void (*disable_int)(struct phytium_i2c_dev *dev); + int (*init)(struct phytium_i2c_dev *dev); +}; + +#define ACCESS_INTR_MASK 0x00000004 + +#define DEFAULT_CLOCK_FREQUENCY 48000000 + +u32 phytium_readl(struct phytium_i2c_dev *dev, int offset); +void phytium_writel(struct phytium_i2c_dev *dev, u32 b, int offset); +int i2c_phytium_wait_bus_not_busy(struct phytium_i2c_dev *dev); +int i2c_phytium_handle_tx_abort(struct phytium_i2c_dev *dev); +u32 i2c_phytium_func(struct i2c_adapter *adap); +void i2c_phytium_disable(struct phytium_i2c_dev *dev); +void i2c_phytium_disable_int(struct phytium_i2c_dev *dev); + +static inline void __i2c_phytium_enable(struct phytium_i2c_dev *dev) +{ + phytium_writel(dev, 1, IC_ENABLE); +} + +static inline void __i2c_phytium_disable_nowait(struct phytium_i2c_dev *dev) +{ + phytium_writel(dev, 0, IC_ENABLE); +} + +void __i2c_phytium_disable(struct phytium_i2c_dev *dev); + +extern int i2c_phytium_probe(struct phytium_i2c_dev *dev); + diff --git a/drivers/i2c/busses/i2c-phytium-master.c b/drivers/i2c/busses/i2c-phytium-master.c new file mode 100644 index 000000000000..8b82006db3ca --- /dev/null +++ b/drivers/i2c/busses/i2c-phytium-master.c @@ -0,0 +1,498 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium I2C adapter driver. + * + * Copyright (C) 2021, Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-phytium-core.h" + +static int i2c_phytium_init_master(struct phytium_i2c_dev *dev) +{ + /* Disable the adapter */ + __i2c_phytium_disable(dev); + + /* Write standard speed timing parameters */ + phytium_writel(dev, dev->ss_hcnt, IC_SS_SCL_HCNT); + phytium_writel(dev, dev->ss_lcnt, IC_SS_SCL_LCNT); + + /* Write fast mode/fast mode plus timing parameters */ + phytium_writel(dev, dev->fs_hcnt, IC_FS_SCL_HCNT); + phytium_writel(dev, dev->fs_lcnt, IC_FS_SCL_LCNT); + + /* Write high speed timing parameters if supported */ + if (dev->hs_hcnt && dev->hs_hcnt) { + phytium_writel(dev, dev->hs_hcnt, IC_HS_SCL_HCNT); + phytium_writel(dev, dev->hs_lcnt, IC_HS_SCL_LCNT); + } + + /* Write SDA hold time if supported */ + if (dev->sda_hold_time) + phytium_writel(dev, dev->sda_hold_time, IC_SDA_HOLD); + + /* Configure Tx/Rx FIFO threshold levels */ + phytium_writel(dev, dev->tx_fifo_depth >> 1, IC_TX_TL); + phytium_writel(dev, 0, IC_RX_TL); + + /* Configure the I2C master */ + phytium_writel(dev, dev->master_cfg, IC_CON); + + return 0; +} + +static void i2c_phytium_xfer_init(struct phytium_i2c_dev *dev) +{ + struct i2c_msg *msgs = dev->msgs; + u32 ic_con, ic_tar = 0; + + /* Disable the adapter */ + __i2c_phytium_disable(dev); + + /* If the slave address is 10-bit address, enable 10BITADDR */ + ic_con = phytium_readl(dev, IC_CON); + if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) { + ic_con |= IC_CON_10BITADDR_MASTER; + ic_tar = IC_TAR_10BITADDR_MASTER; + } else { + ic_con &= ~IC_CON_10BITADDR_MASTER; + } + + phytium_writel(dev, ic_con, IC_CON); + + /* + * Set the slave (target) address and enable 10-bit addressing mode + * if applicable. + */ + phytium_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, IC_TAR); + + /* Enforce disabled interrupts */ + i2c_phytium_disable_int(dev); + + /* Enable the adapter */ + __i2c_phytium_enable(dev); + + /* Clear and enable interrupts */ + phytium_readl(dev, IC_CLR_INTR); + if (dev->ara) + phytium_writel(dev, IC_INTR_SMBUS_MASK, IC_INTR_MASK); + else + phytium_writel(dev, IC_INTR_MASTER_MASK, IC_INTR_MASK); +} + +static void i2c_phytium_xfer_msg(struct phytium_i2c_dev *dev) +{ + struct i2c_msg *msgs = dev->msgs; + u32 intr_mask; + int tx_limit, rx_limit; + u32 addr = msgs[dev->msg_write_idx].addr; + u32 buf_len = dev->tx_buf_len; + u8 *buf = dev->tx_buf; + bool need_restart = false; + + intr_mask = IC_INTR_MASTER_MASK; + + for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) { + u32 flags = msgs[dev->msg_write_idx].flags; + + if (msgs[dev->msg_write_idx].addr != addr) { + dev_err(dev->dev, + "%s: invalid target address\n", __func__); + dev->msg_err = -EINVAL; + break; + } + + if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { + /* new i2c_msg */ + buf = msgs[dev->msg_write_idx].buf; + buf_len = msgs[dev->msg_write_idx].len; + + if ((dev->master_cfg & IC_CON_RESTART_EN) && + (dev->msg_write_idx > 0)) + need_restart = true; + } + + tx_limit = dev->tx_fifo_depth - phytium_readl(dev, IC_TXFLR); + rx_limit = dev->tx_fifo_depth - phytium_readl(dev, IC_RXFLR); + + while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) { + u32 cmd = 0; + + if (dev->msg_write_idx == dev->msgs_num - 1 && + buf_len == 1 && !(flags & I2C_M_RECV_LEN)) + cmd |= BIT(9); + + if (need_restart) { + cmd |= BIT(10); + need_restart = false; + } + + if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { + /* avoid rx buffer overrun */ + if (dev->rx_outstanding >= dev->rx_fifo_depth) + break; + + phytium_writel(dev, cmd | 0x100, IC_DATA_CMD); + rx_limit--; + dev->rx_outstanding++; + } else { + phytium_writel(dev, cmd | *buf++, IC_DATA_CMD); + } + tx_limit--; + buf_len--; + } + + dev->tx_buf = buf; + dev->tx_buf_len = buf_len; + + /* + * Because we don't know the buffer length in the + * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop + * the transaction here. + */ + if (buf_len > 0 || flags & I2C_M_RECV_LEN) { + /* more bytes to be written */ + dev->status |= STATUS_WRITE_IN_PROGRESS; + break; + } else { + dev->status &= ~STATUS_WRITE_IN_PROGRESS; + } + } + + if (dev->msg_write_idx == dev->msgs_num) + intr_mask &= ~IC_INTR_TX_EMPTY; + + if (dev->msg_err) + intr_mask = 0; + + phytium_writel(dev, intr_mask, IC_INTR_MASK); +} + +static u8 i2c_phytium_recv_len(struct phytium_i2c_dev *dev, u8 len) +{ + struct i2c_msg *msgs = dev->msgs; + u32 flags = msgs[dev->msg_read_idx].flags; + + /* + * Adjust the buffer length and mask the flag + * after receiving the first byte. + */ + len += (flags & I2C_CLIENT_PEC) ? 2 : 1; + dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding); + msgs[dev->msg_read_idx].len = len; + msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN; + + return len; +} + +static void i2c_phytium_read(struct phytium_i2c_dev *dev) +{ + struct i2c_msg *msgs = dev->msgs; + int rx_valid; + + for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) { + u32 len; + u8 *buf; + + if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD)) + continue; + + if (!(dev->status & STATUS_READ_IN_PROGRESS)) { + len = msgs[dev->msg_read_idx].len; + buf = msgs[dev->msg_read_idx].buf; + } else { + len = dev->rx_buf_len; + buf = dev->rx_buf; + } + + rx_valid = phytium_readl(dev, IC_RXFLR); + + for (; len > 0 && rx_valid > 0; len--, rx_valid--) { + u32 flags = msgs[dev->msg_read_idx].flags; + + *buf = phytium_readl(dev, IC_DATA_CMD); + /* Ensure length byte is a valid value */ + if (flags & I2C_M_RECV_LEN && + *buf <= I2C_SMBUS_BLOCK_MAX && *buf > 0) { + len = i2c_phytium_recv_len(dev, *buf); + } + buf++; + dev->rx_outstanding--; + } + + if (len > 0) { + dev->status |= STATUS_READ_IN_PROGRESS; + dev->rx_buf_len = len; + dev->rx_buf = buf; + return; + } else + dev->status &= ~STATUS_READ_IN_PROGRESS; + } +} + +static int i2c_phytium_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], int num) +{ + struct phytium_i2c_dev *dev = i2c_get_adapdata(adapter); + int ret; + + dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num); + + pm_runtime_get_sync(dev->dev); + + reinit_completion(&dev->cmd_complete); + dev->msgs = msgs; + dev->msgs_num = num; + dev->cmd_err = 0; + dev->msg_write_idx = 0; + dev->msg_read_idx = 0; + dev->msg_err = 0; + dev->status = STATUS_IDLE; + dev->abort_source = 0; + dev->rx_outstanding = 0; + + ret = i2c_phytium_wait_bus_not_busy(dev); + if (ret < 0) + goto done; + + /* Start the transfers */ + i2c_phytium_xfer_init(dev); + + /* Wait for tx to complete */ + if (!wait_for_completion_timeout(&dev->cmd_complete, adapter->timeout)) { + dev_err(dev->dev, "controller timed out\n"); + i2c_recover_bus(&dev->adapter); + i2c_phytium_init_master(dev); + ret = -ETIMEDOUT; + goto done; + } + + __i2c_phytium_disable_nowait(dev); + + if (dev->msg_err) { + ret = dev->msg_err; + goto done; + } + + if (likely(!dev->cmd_err && !dev->status)) { + ret = num; + goto done; + } + + /* We have got an error */ + if (dev->cmd_err == IC_ERR_TX_ABRT) { + ret = i2c_phytium_handle_tx_abort(dev); + goto done; + } + + if (dev->status) + dev_err(dev->dev, "transfer terminated early.\n"); + + ret = -EIO; + +done: + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + return ret; +} + +static const struct i2c_algorithm i2c_phytium_algo = { + .master_xfer = i2c_phytium_xfer, + .functionality = i2c_phytium_func, +}; + +static const struct i2c_adapter_quirks i2c_phytium_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN, +}; + +static u32 i2c_phytium_read_clear_intrbits(struct phytium_i2c_dev *dev) +{ + u32 stat; + + stat = phytium_readl(dev, IC_INTR_STAT); + + if (stat & IC_INTR_RX_UNDER) + phytium_readl(dev, IC_CLR_RX_UNDER); + if (stat & IC_INTR_RX_OVER) + phytium_readl(dev, IC_CLR_RX_OVER); + if (stat & IC_INTR_TX_OVER) + phytium_readl(dev, IC_CLR_TX_OVER); + if (stat & IC_INTR_RD_REQ) + phytium_readl(dev, IC_CLR_RD_REQ); + if (stat & IC_INTR_TX_ABRT) { + dev->abort_source = phytium_readl(dev, IC_TX_ABRT_SOURCE); + phytium_readl(dev, IC_CLR_TX_ABRT); + } + if (stat & IC_INTR_RX_DONE) + phytium_readl(dev, IC_CLR_RX_DONE); + if (stat & IC_INTR_ACTIVITY) + phytium_readl(dev, IC_CLR_ACTIVITY); + if (stat & IC_INTR_STOP_DET) + phytium_readl(dev, IC_CLR_STOP_DET); + if (stat & IC_INTR_START_DET) + phytium_readl(dev, IC_CLR_START_DET); + if (stat & IC_INTR_GEN_CALL) + phytium_readl(dev, IC_CLR_GEN_CALL); + if (stat & IC_INTR_SMBCLK_EXT_LOW_TIMEOUT) + phytium_readl(dev, IC_CLR_SMBCLK_EXT_LOW_TIMEOUT); + if (stat & IC_INTR_SMBCLK_TMO_LOW_TIMEOUT) + phytium_readl(dev, IC_CLR_SMBCLK_TMO_LOW_TIMEOUT); + if (stat & IC_INTR_SMBSDA_LOW_TIMEOUT) + phytium_readl(dev, IC_CLR_SMBDAT_LOW_TIMEOUT); + if (stat & IC_INTR_SMBALERT_IN_N) + phytium_readl(dev, IC_CLR_SMBALERT_IN_N); + + return stat; +} + +static int i2c_phytium_irq_handler_master(struct phytium_i2c_dev *dev) +{ + u32 stat; + + stat = i2c_phytium_read_clear_intrbits(dev); + + /* SMBus interrupt */ + if (dev->ara) { + if (stat & (IC_INTR_SMBCLK_EXT_LOW_TIMEOUT | IC_INTR_SMBCLK_TMO_LOW_TIMEOUT)) { + phytium_writel(dev, phytium_readl(dev, IC_ENABLE) & (~BIT(6)), + IC_ENABLE); + phytium_writel(dev, phytium_readl(dev, IC_ENABLE) | BIT(4), + IC_ENABLE); + goto abort; + } + + if (stat & IC_INTR_SMBSDA_LOW_TIMEOUT) { + phytium_writel(dev, phytium_readl(dev, IC_ENABLE) | BIT(6), + IC_ENABLE); + goto abort; + } + + if (stat & IC_INTR_SMBALERT_IN_N) + i2c_handle_smbus_alert(dev->ara); + } + + if (stat & IC_INTR_TX_ABRT) { + dev->cmd_err |= IC_ERR_TX_ABRT; + dev->status = STATUS_IDLE; + + /* Anytime TX_ABRT is set, the contents of the tx/rx + * buffers are flushed. Make sure to skip them. + */ + phytium_writel(dev, 0, IC_INTR_MASK); + goto abort; + } + + if (stat & IC_INTR_RX_FULL) + i2c_phytium_read(dev); + + if (stat & IC_INTR_TX_EMPTY) + i2c_phytium_xfer_msg(dev); + +abort: + if ((stat & (IC_INTR_TX_ABRT | IC_INTR_STOP_DET)) || + dev->msg_err) + complete(&dev->cmd_complete); + else if (unlikely(dev->flags & ACCESS_INTR_MASK)) { + /* Workaround to trigger pending interrupt */ + stat = phytium_readl(dev, IC_INTR_MASK); + i2c_phytium_disable_int(dev); + phytium_writel(dev, stat, IC_INTR_MASK); + } + + return 0; +} + +static irqreturn_t i2c_phytium_isr(int this_irq, void *dev_id) +{ + struct phytium_i2c_dev *dev = dev_id; + u32 stat, enabled; + + enabled = phytium_readl(dev, IC_ENABLE); + stat = phytium_readl(dev, IC_RAW_INTR_STAT); + if (!enabled || !(stat & ~IC_INTR_ACTIVITY)) + return IRQ_NONE; + + i2c_phytium_irq_handler_master(dev); + + return IRQ_HANDLED; +} + +int i2c_phytium_probe(struct phytium_i2c_dev *dev) +{ + const char *mode_str; + struct i2c_adapter *adapter = &dev->adapter; + unsigned long irq_flags; + int ret; + + init_completion(&dev->cmd_complete); + + dev->init = i2c_phytium_init_master; + dev->disable = i2c_phytium_disable; + dev->disable_int = i2c_phytium_disable_int; + + switch (dev->master_cfg & IC_CON_SPEED_MASK) { + case IC_CON_SPEED_STD: + mode_str = "Standard Mode"; + break; + case IC_CON_SPEED_HIGH: + mode_str = "High Speed Mode"; + break; + default: + mode_str = "Fast Mode"; + } + dev_dbg(dev->dev, "Bus speed: %s\n", mode_str); + + ret = dev->init(dev); + if (ret) + return ret; + + /* XXX: should be initialized in firmware, remove it in future */ +#define DEFAULT_TIMEOUT (DEFAULT_CLOCK_FREQUENCY / 1000 * 35) + phytium_writel(dev, DEFAULT_TIMEOUT, IC_SMBCLK_LOW_MEXT); + phytium_writel(dev, DEFAULT_TIMEOUT, IC_SMBCLK_LOW_TIMEOUT); + phytium_writel(dev, DEFAULT_TIMEOUT, IC_SMBDAT_STUCK_TIMEOUT); + + snprintf(adapter->name, sizeof(adapter->name), "Phytium I2C adapter"); + adapter->retries = 3; + adapter->algo = &i2c_phytium_algo; + adapter->quirks = &i2c_phytium_quirks; + adapter->dev.parent = dev->dev; + i2c_set_adapdata(adapter, dev); + + irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; + + i2c_phytium_disable_int(dev); + ret = devm_request_irq(dev->dev, dev->irq, i2c_phytium_isr, irq_flags, + dev_name(dev->dev), dev); + if (ret) { + dev_err(dev->dev, "failed to request irq %i: %d\n", dev->irq, ret); + return ret; + } + + /* + * Increment PM usage count during adapter registration in order to + * avoid possible spurious runtime suspend when adapter device is + * registered to the device core and immediate resume in case bus has + * registered I2C slaves that do I2C transfers in their probe. + */ + pm_runtime_get_noresume(dev->dev); + ret = i2c_add_numbered_adapter(adapter); + if (ret) + dev_err(dev->dev, "fail to add adapter: %d\n", ret); + pm_runtime_put_noidle(dev->dev); + + return ret; +} +EXPORT_SYMBOL_GPL(i2c_phytium_probe); + +MODULE_DESCRIPTION("Phytium I2C bus master adapter"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-phytium-pci.c b/drivers/i2c/busses/i2c-phytium-pci.c new file mode 100644 index 000000000000..1d6514a4915c --- /dev/null +++ b/drivers/i2c/busses/i2c-phytium-pci.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PCI driver for Phytium I2C adapter. + * + * Copyright (C) 2021,Phytium Technology Co.,Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-phytium-core.h" + +#define DRV_NAME "i2c-phytium-pci" + +enum phytium_pci_ctl_id_t { + octopus_i2c, +}; + +struct scl_sda_cfg { + u32 ss_hcnt; + u32 fs_hcnt; + u32 ss_lcnt; + u32 fs_lcnt; + u32 sda_hold; +}; + +struct phytium_pci_i2c { + u32 bus_num; + u32 bus_cfg; + u32 tx_fifo_depth; + u32 rx_fifo_depth; + u32 clk_khz; + u32 functionality; + u32 flags; + struct scl_sda_cfg *scl_sda_cfg; + int (*setup)(struct pci_dev *pdev, struct phytium_pci_i2c *c); +}; + +/* Octopus HCNT/LCNT/SDA hold time */ +static struct scl_sda_cfg octopus_config = { + .ss_hcnt = 0x190, + .ss_lcnt = 0x1d6, + .fs_hcnt = 0x3c, + .fs_lcnt = 0x82, + .sda_hold = 0x0, // XXX +}; + +static int octopus_setup(struct pci_dev *pdev, struct phytium_pci_i2c *c) +{ + struct phytium_i2c_dev *i2c = pci_get_drvdata(pdev); + + if (pdev->device == 0xdc32) { + /* + * Since we have already register the adapter, the dev->irq + * must be valid. + */ + i2c->alert_data.irq = i2c->irq; + + i2c->ara = i2c_setup_smbus_alert(&i2c->adapter, &i2c->alert_data); + if (!i2c->ara) + return -ENODEV; + } + + return 0; +} + +static struct phytium_pci_i2c pci_ctrl_info[] = { + [octopus_i2c] = { + .bus_num = -1, + .bus_cfg = IC_CON_MASTER | IC_CON_SLAVE_DISABLE | + IC_CON_RESTART_EN | IC_CON_SPEED_FAST, + .tx_fifo_depth = 7, + .rx_fifo_depth = 7, + .functionality = I2C_FUNC_10BIT_ADDR, + .clk_khz = 48000000, + .scl_sda_cfg = &octopus_config, + .setup = octopus_setup, + }, +}; + +#ifdef CONFIG_PM +static int i2c_phytium_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_i2c_dev *i_dev = pci_get_drvdata(pdev); + + i_dev->disable(i_dev); + + return 0; +} + +static int i2c_phytium_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_i2c_dev *i_dev = pci_get_drvdata(pdev); + + return i_dev->init(i_dev); +} +#endif + +static UNIVERSAL_DEV_PM_OPS(i2c_phytium_pm_ops, i2c_phytium_pci_suspend, + i2c_phytium_pci_resume, NULL); + +static u32 i2c_phytium_get_clk_rate_khz(struct phytium_i2c_dev *dev) +{ + return dev->controller->clk_khz; +} + +static int i2c_phytium_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct phytium_i2c_dev *dev; + struct i2c_adapter *adapter; + struct phytium_pci_i2c *controller; + struct scl_sda_cfg *cfg; + int ret; + + if (id->driver_data >= ARRAY_SIZE(pci_ctrl_info)) { + dev_err(&pdev->dev, "%s: invalid driver data %ld\n", __func__, + id->driver_data); + ret = -EINVAL; + goto out; + } + + controller = &pci_ctrl_info[id->driver_data]; + + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "Failed to enable I2C PCI device (%d)\n", ret); + goto out; + } + + ret = pcim_iomap_regions(pdev, 0x1, pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + goto out; + } + + dev = devm_kzalloc(&pdev->dev, sizeof(struct phytium_i2c_dev), GFP_KERNEL); + if (!dev) { + ret = -ENOMEM; + goto out; + } + + dev->controller = controller; + dev->get_clk_rate_khz = i2c_phytium_get_clk_rate_khz; + dev->base = pcim_iomap_table(pdev)[0]; + dev->dev = &pdev->dev; + dev->irq = pdev->irq; + dev->flags |= controller->flags; + + dev->functionality = controller->functionality | IC_DEFAULT_FUNCTIONALITY; + dev->master_cfg = controller->bus_cfg; + if (controller->scl_sda_cfg) { + cfg = controller->scl_sda_cfg; + dev->ss_hcnt = cfg->ss_hcnt; + dev->fs_hcnt = cfg->fs_hcnt; + dev->ss_lcnt = cfg->ss_lcnt; + dev->fs_lcnt = cfg->fs_lcnt; + dev->sda_hold_time = cfg->sda_hold; + } + + pci_set_drvdata(pdev, dev); + + dev->tx_fifo_depth = controller->tx_fifo_depth; + dev->rx_fifo_depth = controller->rx_fifo_depth; + + adapter = &dev->adapter; + adapter->owner = THIS_MODULE; + adapter->class = 0; + ACPI_COMPANION_SET(&adapter->dev, ACPI_COMPANION(&pdev->dev)); + adapter->nr = controller->bus_num; + + ret = i2c_phytium_probe(dev); + if (ret) + goto out; + + if (controller->setup) { + ret = controller->setup(pdev, controller); + if (ret) + goto out; + } + + pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + pm_runtime_allow(&pdev->dev); + +out: + return ret; +} + +static void i2c_phytium_pci_remove(struct pci_dev *pdev) +{ + struct phytium_i2c_dev *dev = pci_get_drvdata(pdev); + + dev->disable(dev); + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + + i2c_del_adapter(&dev->adapter); +} + +static const struct pci_device_id i2_phytium_pci_ids[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc32), octopus_i2c }, + { PCI_VDEVICE(PHYTIUM, 0xdc30), octopus_i2c }, + { } +}; +MODULE_DEVICE_TABLE(pci, i2_phytium_pci_ids); + +static struct pci_driver phytium_i2c_driver = { + .name = DRV_NAME, + .id_table = i2_phytium_pci_ids, + .probe = i2c_phytium_pci_probe, + .remove = i2c_phytium_pci_remove, + .driver = { + .pm = &i2c_phytium_pm_ops, + }, +}; + +module_pci_driver(phytium_i2c_driver); + +MODULE_ALIAS("i2c-phytium-pci"); +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Phytium PCI I2C bus adapter"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig index d90d9f1098ff..958312b2169c 100644 --- a/drivers/input/serio/Kconfig +++ b/drivers/input/serio/Kconfig @@ -39,6 +39,18 @@ config SERIO_I8042 To compile this driver as a module, choose M here: the module will be called i8042. +config SERIO_PHYTIUM_PS2 + depends on SERIO + tristate "PHYTIUM PS/2 (keyboard and mouse)" + default y if ARCH_PHYTIUM + depends on PCI + help + This selects support for the PS/2 Host Controller on + Phytium SoCs. + + To compile this driver as a module, choose M here: the + module will be called phytium-ps2. + config SERIO_SERPORT tristate "Serial port line discipline" default y diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile index 67950a5ccb3f..c180361bdd12 100644 --- a/drivers/input/serio/Makefile +++ b/drivers/input/serio/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_SERIO) += serio.o obj-$(CONFIG_SERIO_I8042) += i8042.o +obj-$(CONFIG_SERIO_PHYTIUM_PS2) += phytium-ps2.o obj-$(CONFIG_SERIO_PARKBD) += parkbd.o obj-$(CONFIG_SERIO_SERPORT) += serport.o obj-$(CONFIG_SERIO_CT82C710) += ct82c710.o diff --git a/drivers/input/serio/phytium-ps2.c b/drivers/input/serio/phytium-ps2.c new file mode 100644 index 000000000000..cf4fe74fd0dd --- /dev/null +++ b/drivers/input/serio/phytium-ps2.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Phytium PS/2 keyboard controller driver. + * + * Copyright (C) 2021, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "phytium_ps2_pci" + +#define REG_STAT 0x0 +#define REG_STAT_TX_TIMEOUT 0x1 +#define REG_STAT_RX_TIMEOUT 0x2 +#define REG_STAT_TX_FULL 0x4 +#define REG_CTRL 0x4 +#define REG_CTRL_RESET 0x1 +#define REG_CTRL_TX_TIMEOUT 0x2 +#define REG_CTRL_RX_TIMEOUT 0x4 +#define REG_CTRL_RX_INTR 0x8 +#define REG_INTR 0x8 +#define REG_INTR_TIMEOUT 0x1 +#define REG_INTR_RX 0x2 +#define REG_TX 0xc +#define REG_RX 0x10 +#define REG_TIMER_VAL 0x14 + +#define REG_CTRL_ENABLE (REG_CTRL_TX_TIMEOUT|REG_CTRL_RX_TIMEOUT|REG_CTRL_RX_INTR) +#define REG_DATA_PARITY 0x100 + +#define STAT_RX_COUNTER(stat) ((stat >> 8) & 0x1f) + +struct phytium_ps2_data { + void __iomem *base; + struct serio *io; + struct pci_dev *dev; +}; + +static irqreturn_t phytium_ps2_irq(int irq, void *devid) +{ + struct phytium_ps2_data *ps2if = devid; + u32 status, scancode, val = 0; + unsigned int flag; + int i, rxcount; + + status = readl(ps2if->base + REG_STAT); + if (!status) + return IRQ_NONE; + + /* Check if there is timeout interrupt */ + if (status & (REG_STAT_RX_TIMEOUT|REG_STAT_TX_TIMEOUT)) + val |= REG_INTR_TIMEOUT; + + rxcount = STAT_RX_COUNTER(status); + for (i = 0; i < rxcount; i++) { + scancode = readl(ps2if->base + REG_RX) & 0x1ff; + + if (rxcount <= 16 && scancode != 0x1ff) { + flag = ((scancode & REG_DATA_PARITY) ? SERIO_PARITY : 0); + serio_interrupt(ps2if->io, scancode & 0xff, flag); + } + } + + val |= REG_INTR_RX; + writel(val, ps2if->base + REG_INTR); + + return IRQ_HANDLED; +} + +int phytium_ps2_write(struct serio *serio, unsigned char val) +{ + struct phytium_ps2_data *ps2if = serio->port_data; + unsigned int stat; + + do { + stat = readl(ps2if->base + REG_STAT); + cpu_relax(); + } while (stat & REG_STAT_TX_FULL); + + writel(val, ps2if->base + REG_TX); + + return 0; +} + +int phytium_ps2_open(struct serio *io) +{ + struct phytium_ps2_data *ps2if = io->port_data; + + writel(REG_CTRL_RESET, ps2if->base + REG_CTRL); + /* Wait 4ms for the controller to be reset */ + usleep_range(4000, 6000); + writel(REG_CTRL_ENABLE, ps2if->base + REG_CTRL); + + return 0; +} + +void phytium_ps2_close(struct serio *io) +{ + struct phytium_ps2_data *ps2if = io->port_data; + + writel(0, ps2if->base + REG_CTRL); +} + +static int phytium_pci_ps2_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct phytium_ps2_data *ps2if; + struct serio *serio; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + goto out; + + ret = pcim_iomap_regions(pdev, 0x1, DRV_NAME); + if (ret) + goto out; + + ps2if = devm_kzalloc(&pdev->dev, sizeof(struct phytium_ps2_data), GFP_KERNEL); + serio = kzalloc(sizeof(struct serio), GFP_KERNEL); + if (!ps2if || !serio) { + ret = -ENOMEM; + goto free; + } + + serio->id.type = SERIO_8042; + serio->write = phytium_ps2_write; + serio->open = phytium_ps2_open; + serio->close = phytium_ps2_close; + strlcpy(serio->name, pci_name(pdev), sizeof(serio->name)); + strlcpy(serio->phys, dev_name(&pdev->dev), sizeof(serio->phys)); + serio->port_data = ps2if; + serio->dev.parent = &pdev->dev; + ps2if->io = serio; + ps2if->dev = pdev; + ps2if->base = pcim_iomap_table(pdev)[0]; + + ret = devm_request_irq(&pdev->dev, pdev->irq, phytium_ps2_irq, + IRQF_SHARED, DRV_NAME, ps2if); + if (ret) { + dev_err(&pdev->dev, "could not request IRQ %d\n", pdev->irq); + goto free; + } + + pci_set_drvdata(pdev, ps2if); + serio_register_port(ps2if->io); + + return 0; + +free: + kfree(serio); +out: + return ret; +} + +static void phytium_pci_ps2_remove(struct pci_dev *pdev) +{ + struct phytium_ps2_data *ps2if = pci_get_drvdata(pdev); + + serio_unregister_port(ps2if->io); + pcim_iounmap_regions(pdev, 0x1); +} + +static const struct pci_device_id phytium_pci_ps2_ids[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc34) }, + {}, +}; +MODULE_DEVICE_TABLE(pci, phytium_pci_ps2_ids); + +static struct pci_driver phytium_pci_ps2_driver = { + .name = DRV_NAME, + .id_table = phytium_pci_ps2_ids, + .probe = phytium_pci_ps2_probe, + .remove = phytium_pci_ps2_remove, +}; +module_pci_driver(phytium_pci_ps2_driver); + +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Phytium PCI PS/2 controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 0c3b8f1c7225..56a4675cd659 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -53,9 +53,15 @@ #include +#include + #include "io-pgtable.h" #include "arm-smmu-regs.h" +#ifdef CONFIG_ARCH_PHYTIUM +#define FWID_READ(id) (((u16)(id) >> 3) | (((id) >> SMR_MASK_SHIFT | 0x7000) << SMR_MASK_SHIFT)) +#endif + /* * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU * global register space are still, in fact, using a hypervisor to mediate it @@ -1392,6 +1398,17 @@ static int arm_smmu_add_device(struct device *dev) return -ENODEV; } +#ifdef CONFIG_ARCH_PHYTIUM + /* FT2000PLUS workaround patch */ + if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) == MIDR_PHYTIUM_FT2000PLUS) { + int num = fwspec->num_ids; + for (i = 0; i < num; i++) { + u32 fwid = FWID_READ(fwspec->ids[i]); + iommu_fwspec_add_ids(dev, &fwid, 1); + } + } +#endif + ret = -EINVAL; for (i = 0; i < fwspec->num_ids; i++) { u16 sid = fwspec->ids[i]; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 85ef6c9bc898..9c02fa383801 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -33,6 +33,9 @@ #include #include #include +#ifdef CONFIG_ARCH_PHYTIUM +#include +#endif static struct kset *iommu_group_kset; static DEFINE_IDA(iommu_group_ida); @@ -126,7 +129,19 @@ static int __init iommu_set_def_domain_type(char *str) if (ret) return ret; +#ifdef CONFIG_ARCH_PHYTIUM + /* + * Always set default iommu type to IOMMU_DOMAIN_IDENTITY + * on Phytium FT-2000+ SoC to avoid unnecessary troubles + * introduced by the SMMU workaround. + */ + if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) == MIDR_PHYTIUM_FT2000PLUS) + iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; + else + iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA; +#else iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA; +#endif return 0; } early_param("iommu.passthrough", iommu_set_def_domain_type); @@ -1241,6 +1256,16 @@ int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) bus->iommu_ops = ops; +#ifdef CONFIG_ARCH_PHYTIUM + /* + * Always set default iommu type to IOMMU_DOMAIN_IDENTITY + * on Phytium FT-2000+ SoC to avoid unnecessary troubles + * introduced by the SMMU workaround. + */ + if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) == MIDR_PHYTIUM_FT2000PLUS) + iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; +#endif + /* Do IOMMU specific setup for this bus-type */ err = iommu_bus_init(bus, ops); if (err) diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 8cb6800dbdfb..8a0d6bc66a50 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -57,6 +57,15 @@ config ARM_GIC_V3_ITS_FSL_MC depends on FSL_MC_BUS default ARM_GIC_V3_ITS +config ARM_GIC_PHYTIUM_2500 + bool + select IRQ_DOMAIN + select GENERIC_IRQ_MULTI_HANDLER + select IRQ_DOMAIN_HIERARCHY + select PARTITION_PERCPU + select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_MSI_IRQ_DOMAIN + config ARM_NVIC bool select IRQ_DOMAIN @@ -371,6 +380,14 @@ config QCOM_PDC Power Domain Controller driver to manage and configure wakeup IRQs for Qualcomm Technologies Inc (QTI) mobile chips. +config PHYTIUM_IXIC + bool "Phytium D2000 SoC PCI Legacy Interrupt Controller" + depends on ARCH_PHYTIUM + select IRQ_DOMAIN + select IRQ_DOMAIN_HIERARCHY + help + This enables support PCI Legacy Interrupt on Phytium D2000 SoC. + config SIFIVE_PLIC bool "SiFive Platform-Level Interrupt Controller" depends on RISCV diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index fbd1ec8070ef..e6fc39085149 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o +obj-$(CONFIG_ARM_GIC_PHYTIUM_2500) += irq-gic-phytium-2500.o irq-gic-phytium-2500-its.o obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o obj-$(CONFIG_ARM_NVIC) += irq-nvic.o @@ -88,3 +89,4 @@ obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o obj-$(CONFIG_NDS32) += irq-ativic32.o obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o +obj-$(CONFIG_PHYTIUM_IXIC) += irq-phytium-ixic.o diff --git a/drivers/irqchip/irq-gic-phytium-2500-its.c b/drivers/irqchip/irq-gic-phytium-2500-its.c new file mode 100644 index 000000000000..03be44d3782f --- /dev/null +++ b/drivers/irqchip/irq-gic-phytium-2500-its.c @@ -0,0 +1,4158 @@ +/* + * Copyright (C) 2020 Phytium Corporation. + * Author: Wang Yinfeng + * Chen Baozi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include "irq-gic-common.h" + +#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) +#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) +#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) +#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3) + +#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) +#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) + +static u32 lpi_id_bits; + +/* + * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to + * deal with (one configuration byte per interrupt). PENDBASE has to + * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). + */ +#define LPI_NRBITS lpi_id_bits +#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) +#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) + +#define LPI_PROP_DEFAULT_PRIO 0xa0 + +/* + * Collection structure - just an ID, and a redistributor address to + * ping. We use one per CPU as a bag of interrupts assigned to this + * CPU. + */ +struct its_collection { + u64 target_address; + u16 col_id; +}; + +/* + * The ITS_BASER structure - contains memory information, cached + * value of BASER register configuration and ITS page size. + */ +struct its_baser { + void *base; + u64 val; + u32 order; + u32 psz; +}; + +struct its_device; + +/* + * The ITS structure - contains most of the infrastructure, with the + * top-level MSI domain, the command queue, the collections, and the + * list of devices writing to it. + */ +struct its_node { + raw_spinlock_t lock; + struct list_head entry; + void __iomem *base; + phys_addr_t phys_base; + struct its_cmd_block *cmd_base; + struct its_cmd_block *cmd_write; + struct its_baser tables[GITS_BASER_NR_REGS]; + struct its_collection *collections; + struct fwnode_handle *fwnode_handle; + u64 (*get_msi_base)(struct its_device *its_dev); + u64 cbaser_save; + u32 ctlr_save; + struct list_head its_device_list; + u64 flags; + unsigned long list_nr; + u32 ite_size; + u32 device_ids; + int numa_node; + unsigned int msi_domain_flags; + u32 pre_its_base; /* for Socionext Synquacer */ + bool is_v4; + int vlpi_redist_offset; +}; + +#define ITS_ITT_ALIGN SZ_256 + +/* The maximum number of VPEID bits supported by VLPI commands */ +#define ITS_MAX_VPEID_BITS (16) +#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) + +/* Convert page order to size in bytes */ +#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) + +struct event_lpi_map { + unsigned long *lpi_map; + u16 *col_map; + irq_hw_number_t lpi_base; + int nr_lpis; + struct mutex vlpi_lock; + struct its_vm *vm; + struct its_vlpi_map *vlpi_maps; + int nr_vlpis; +}; + +/* + * The ITS view of a device - belongs to an ITS, owns an interrupt + * translation table, and a list of interrupts. If it some of its + * LPIs are injected into a guest (GICv4), the event_map.vm field + * indicates which one. + */ +struct its_device { + struct list_head entry; + struct its_node *its; + struct event_lpi_map event_map; + void *itt; + u32 nr_ites; + u32 device_id; +}; + +static struct { + raw_spinlock_t lock; + struct its_device *dev; + struct its_vpe **vpes; + int next_victim; +} vpe_proxy; + +struct cpu_lpi_count { + atomic_t managed; + atomic_t unmanaged; +}; + +static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count); + +static LIST_HEAD(its_nodes); +static DEFINE_RAW_SPINLOCK(its_lock); +static struct rdists *gic_rdists; +static struct irq_domain *its_parent; + +static unsigned long its_list_map; +static u16 vmovp_seq_num; +static DEFINE_RAW_SPINLOCK(vmovp_lock); + +static DEFINE_IDA(its_vpeid_ida); + +#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) +#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) + +static struct its_collection *dev_event_to_col(struct its_device *its_dev, + u32 event) +{ + struct its_node *its = its_dev->its; + + return its->collections + its_dev->event_map.col_map[event]; +} + +static struct its_collection *valid_col(struct its_collection *col) +{ + if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15))) + return NULL; + + return col; +} + +static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) +{ + if (valid_col(its->collections + vpe->col_idx)) + return vpe; + + return NULL; +} + +/* + * ITS command descriptors - parameters to be encoded in a command + * block. + */ +struct its_cmd_desc { + union { + struct { + struct its_device *dev; + u32 event_id; + } its_inv_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_clear_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_int_cmd; + + struct { + struct its_device *dev; + int valid; + } its_mapd_cmd; + + struct { + struct its_collection *col; + int valid; + } its_mapc_cmd; + + struct { + struct its_device *dev; + u32 phys_id; + u32 event_id; + } its_mapti_cmd; + + struct { + struct its_device *dev; + struct its_collection *col; + u32 event_id; + } its_movi_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_discard_cmd; + + struct { + struct its_collection *col; + } its_invall_cmd; + + struct { + struct its_vpe *vpe; + } its_vinvall_cmd; + + struct { + struct its_vpe *vpe; + struct its_collection *col; + bool valid; + } its_vmapp_cmd; + + struct { + struct its_vpe *vpe; + struct its_device *dev; + u32 virt_id; + u32 event_id; + bool db_enabled; + } its_vmapti_cmd; + + struct { + struct its_vpe *vpe; + struct its_device *dev; + u32 event_id; + bool db_enabled; + } its_vmovi_cmd; + + struct { + struct its_vpe *vpe; + struct its_collection *col; + u16 seq_num; + u16 its_list; + } its_vmovp_cmd; + }; +}; + +/* + * The ITS command block, which is what the ITS actually parses. + */ +struct its_cmd_block { + u64 raw_cmd[4]; +}; + +#define ITS_CMD_QUEUE_SZ SZ_64K +#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) + +typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, + struct its_cmd_block *, + struct its_cmd_desc *); + +typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, + struct its_cmd_block *, + struct its_cmd_desc *); + +static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) +{ + u64 mask = GENMASK_ULL(h, l); + *raw_cmd &= ~mask; + *raw_cmd |= (val << l) & mask; +} + +static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) +{ + its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); +} + +static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) +{ + its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); +} + +static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) +{ + its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); +} + +static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) +{ + its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); +} + +static void its_encode_size(struct its_cmd_block *cmd, u8 size) +{ + its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); +} + +static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) +{ + its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8); +} + +static void its_encode_valid(struct its_cmd_block *cmd, int valid) +{ + its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); +} + +static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) +{ + its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16); +} + +static void its_encode_collection(struct its_cmd_block *cmd, u16 col) +{ + its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); +} + +static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) +{ + its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); +} + +static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) +{ + its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); +} + +static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) +{ + its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); +} + +static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) +{ + its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); +} + +static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) +{ + its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); +} + +static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) +{ + its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); +} + +static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) +{ + its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16); +} + +static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) +{ + its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); +} + +static inline void its_fixup_cmd(struct its_cmd_block *cmd) +{ + /* Let's fixup BE commands */ + cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); + cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); + cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); + cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); +} + +static struct its_collection *its_build_mapd_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + unsigned long itt_addr; + u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); + + itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); + itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); + + its_encode_cmd(cmd, GITS_CMD_MAPD); + its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); + its_encode_size(cmd, size - 1); + its_encode_itt(cmd, itt_addr); + its_encode_valid(cmd, desc->its_mapd_cmd.valid); + + its_fixup_cmd(cmd); + + return NULL; +} + +static struct its_collection *its_build_mapc_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_MAPC); + its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); + its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); + its_encode_valid(cmd, desc->its_mapc_cmd.valid); + + its_fixup_cmd(cmd); + + return desc->its_mapc_cmd.col; +} + +static struct its_collection *its_build_mapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_mapti_cmd.dev, + desc->its_mapti_cmd.event_id); + if (is_kdump_kernel()) + col->col_id = col->col_id % 65; + else + col->col_id = col->col_id % 64; + + its_encode_cmd(cmd, GITS_CMD_MAPTI); + its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); + its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); + its_encode_collection(cmd, col->col_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_movi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_movi_cmd.dev, + desc->its_movi_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_MOVI); + its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_movi_cmd.event_id); + its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_discard_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_discard_cmd.dev, + desc->its_discard_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_DISCARD); + its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_discard_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_inv_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_inv_cmd.dev, + desc->its_inv_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INV); + its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_inv_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_int_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_int_cmd.dev, + desc->its_int_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INT); + its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_int_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_clear_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_clear_cmd.dev, + desc->its_clear_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_CLEAR); + its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_clear_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_invall_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_INVALL); + its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return NULL; +} + +static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_VINVALL); + its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vinvall_cmd.vpe); +} + +static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + unsigned long vpt_addr; + u64 target; + + vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); + target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; + + its_encode_cmd(cmd, GITS_CMD_VMAPP); + its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); + its_encode_valid(cmd, desc->its_vmapp_cmd.valid); + its_encode_target(cmd, target); + its_encode_vpt_addr(cmd, vpt_addr); + its_encode_vpt_size(cmd, LPI_NRBITS - 1); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmapp_cmd.vpe); +} + +static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u32 db; + + if (desc->its_vmapti_cmd.db_enabled) + db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; + else + db = 1023; + + its_encode_cmd(cmd, GITS_CMD_VMAPTI); + its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); + its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); + its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); + its_encode_db_phys_id(cmd, db); + its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmapti_cmd.vpe); +} + +static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u32 db; + + if (desc->its_vmovi_cmd.db_enabled) + db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; + else + db = 1023; + + its_encode_cmd(cmd, GITS_CMD_VMOVI); + its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); + its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); + its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); + its_encode_db_phys_id(cmd, db); + its_encode_db_valid(cmd, true); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmovi_cmd.vpe); +} + +static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u64 target; + + target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; + its_encode_cmd(cmd, GITS_CMD_VMOVP); + its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); + its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); + its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); + its_encode_target(cmd, target); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmovp_cmd.vpe); +} + +static u64 its_cmd_ptr_to_offset(struct its_node *its, + struct its_cmd_block *ptr) +{ + return (ptr - its->cmd_base) * sizeof(*ptr); +} + +static int its_queue_full(struct its_node *its) +{ + int widx; + int ridx; + + widx = its->cmd_write - its->cmd_base; + ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); + + /* This is incredibly unlikely to happen, unless the ITS locks up. */ + if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) + return 1; + + return 0; +} + +static struct its_cmd_block *its_allocate_entry(struct its_node *its) +{ + struct its_cmd_block *cmd; + u32 count = 1000000; /* 1s! */ + + while (its_queue_full(its)) { + count--; + if (!count) { + pr_err_ratelimited("ITS queue not draining\n"); + return NULL; + } + cpu_relax(); + udelay(1); + } + + cmd = its->cmd_write++; + + /* Handle queue wrapping */ + if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) + its->cmd_write = its->cmd_base; + + /* Clear command */ + cmd->raw_cmd[0] = 0; + cmd->raw_cmd[1] = 0; + cmd->raw_cmd[2] = 0; + cmd->raw_cmd[3] = 0; + + return cmd; +} + +static struct its_cmd_block *its_post_commands(struct its_node *its) +{ + u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); + + writel_relaxed(wr, its->base + GITS_CWRITER); + + return its->cmd_write; +} + +static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) +{ + /* + * Make sure the commands written to memory are observable by + * the ITS. + */ + if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) + gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); + else + dsb(ishst); +} + +static int its_wait_for_range_completion(struct its_node *its, + struct its_cmd_block *from, + struct its_cmd_block *to) +{ + u64 rd_idx, from_idx, to_idx; + u32 count = 1000000; /* 1s! */ + + from_idx = its_cmd_ptr_to_offset(its, from); + to_idx = its_cmd_ptr_to_offset(its, to); + + while (1) { + rd_idx = readl_relaxed(its->base + GITS_CREADR); + + /* Direct case */ + if (from_idx < to_idx && rd_idx >= to_idx) + break; + + /* Wrapped case */ + if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx) + break; + + count--; + if (!count) { + pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n", + from_idx, to_idx, rd_idx); + return -1; + } + cpu_relax(); + udelay(1); + } + + return 0; +} + +/* Warning, macro hell follows */ +#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ +void name(struct its_node *its, \ + buildtype builder, \ + struct its_cmd_desc *desc) \ +{ \ + struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ + synctype *sync_obj; \ + unsigned long flags; \ + \ + raw_spin_lock_irqsave(&its->lock, flags); \ + \ + cmd = its_allocate_entry(its); \ + if (!cmd) { /* We're soooooo screewed... */ \ + raw_spin_unlock_irqrestore(&its->lock, flags); \ + return; \ + } \ + sync_obj = builder(its, cmd, desc); \ + its_flush_cmd(its, cmd); \ + \ + if (sync_obj) { \ + sync_cmd = its_allocate_entry(its); \ + if (!sync_cmd) \ + goto post; \ + \ + buildfn(its, sync_cmd, sync_obj); \ + its_flush_cmd(its, sync_cmd); \ + } \ + \ +post: \ + next_cmd = its_post_commands(its); \ + raw_spin_unlock_irqrestore(&its->lock, flags); \ + \ + if (its_wait_for_range_completion(its, cmd, next_cmd)) \ + pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ +} + +static void its_build_sync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, + struct its_collection *sync_col) +{ + its_encode_cmd(sync_cmd, GITS_CMD_SYNC); + its_encode_target(sync_cmd, sync_col->target_address); + + its_fixup_cmd(sync_cmd); +} + +static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, + struct its_collection, its_build_sync_cmd) + +static void its_build_vsync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, + struct its_vpe *sync_vpe) +{ + its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); + its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); + + its_fixup_cmd(sync_cmd); +} + +static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, + struct its_vpe, its_build_vsync_cmd) + +static void its_send_int(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_int_cmd.dev = dev; + desc.its_int_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_int_cmd, &desc); +} + +static void its_send_clear(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_clear_cmd.dev = dev; + desc.its_clear_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_clear_cmd, &desc); +} + +static void its_send_inv(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_inv_cmd.dev = dev; + desc.its_inv_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_inv_cmd, &desc); +} + +static void its_send_mapd(struct its_device *dev, int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapd_cmd.dev = dev; + desc.its_mapd_cmd.valid = !!valid; + + its_send_single_command(dev->its, its_build_mapd_cmd, &desc); +} + +static void its_send_mapc(struct its_node *its, struct its_collection *col, + int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapc_cmd.col = col; + desc.its_mapc_cmd.valid = !!valid; + + its_send_single_command(its, its_build_mapc_cmd, &desc); +} + +static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_mapti_cmd.dev = dev; + desc.its_mapti_cmd.phys_id = irq_id; + desc.its_mapti_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_mapti_cmd, &desc); +} + +static void its_send_movi(struct its_device *dev, + struct its_collection *col, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_movi_cmd.dev = dev; + desc.its_movi_cmd.col = col; + desc.its_movi_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_movi_cmd, &desc); +} + +static void its_send_discard(struct its_device *dev, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_discard_cmd.dev = dev; + desc.its_discard_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_discard_cmd, &desc); +} + +static void its_send_invall(struct its_node *its, struct its_collection *col) +{ + struct its_cmd_desc desc; + + desc.its_invall_cmd.col = col; + + its_send_single_command(its, its_build_invall_cmd, &desc); +} + +static void its_send_vmapti(struct its_device *dev, u32 id) +{ + struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; + struct its_cmd_desc desc; + + desc.its_vmapti_cmd.vpe = map->vpe; + desc.its_vmapti_cmd.dev = dev; + desc.its_vmapti_cmd.virt_id = map->vintid; + desc.its_vmapti_cmd.event_id = id; + desc.its_vmapti_cmd.db_enabled = map->db_enabled; + + its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); +} + +static void its_send_vmovi(struct its_device *dev, u32 id) +{ + struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; + struct its_cmd_desc desc; + + desc.its_vmovi_cmd.vpe = map->vpe; + desc.its_vmovi_cmd.dev = dev; + desc.its_vmovi_cmd.event_id = id; + desc.its_vmovi_cmd.db_enabled = map->db_enabled; + + its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); +} + +static void its_send_vmapp(struct its_node *its, + struct its_vpe *vpe, bool valid) +{ + struct its_cmd_desc desc; + + desc.its_vmapp_cmd.vpe = vpe; + desc.its_vmapp_cmd.valid = valid; + desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; + + its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); +} + +static void its_send_vmovp(struct its_vpe *vpe) +{ + struct its_cmd_desc desc; + struct its_node *its; + unsigned long flags; + int col_id = vpe->col_idx; + + desc.its_vmovp_cmd.vpe = vpe; + desc.its_vmovp_cmd.its_list = (u16)its_list_map; + + if (!its_list_map) { + its = list_first_entry(&its_nodes, struct its_node, entry); + desc.its_vmovp_cmd.seq_num = 0; + desc.its_vmovp_cmd.col = &its->collections[col_id]; + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); + return; + } + + /* + * Yet another marvel of the architecture. If using the + * its_list "feature", we need to make sure that all ITSs + * receive all VMOVP commands in the same order. The only way + * to guarantee this is to make vmovp a serialization point. + * + * Wall <-- Head. + */ + raw_spin_lock_irqsave(&vmovp_lock, flags); + + desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; + + /* Emit VMOVPs */ + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + if (!vpe->its_vm->vlpi_count[its->list_nr]) + continue; + + desc.its_vmovp_cmd.col = &its->collections[col_id]; + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) +{ + struct its_cmd_desc desc; + + desc.its_vinvall_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); +} + +/* + * irqchip functions - assumes MSI, mostly. + */ + +static inline u32 its_get_event_id(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + return d->hwirq - its_dev->event_map.lpi_base; +} + +static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) +{ + irq_hw_number_t hwirq; + void *va; + u8 *cfg; + + if (irqd_is_forwarded_to_vcpu(d)) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + struct its_vlpi_map *map; + + va = page_address(its_dev->event_map.vm->vprop_page); + map = &its_dev->event_map.vlpi_maps[event]; + hwirq = map->vintid; + + /* Remember the updated property */ + map->properties &= ~clr; + map->properties |= set | LPI_PROP_GROUP1; + } else { + va = gic_rdists->prop_table_va; + hwirq = d->hwirq; + } + + cfg = va + hwirq - 8192; + *cfg &= ~clr; + *cfg |= set | LPI_PROP_GROUP1; + + /* + * Make the above write visible to the redistributors. + * And yes, we're flushing exactly: One. Single. Byte. + * Humpf... + */ + if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) + gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); + else + dsb(ishst); +} + +static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + lpi_write_config(d, clr, set); + its_send_inv(its_dev, its_get_event_id(d)); +} + +static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + if (its_dev->event_map.vlpi_maps[event].db_enabled == enable) + return; + + its_dev->event_map.vlpi_maps[event].db_enabled = enable; + + /* + * More fun with the architecture: + * + * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI + * value or to 1023, depending on the enable bit. But that + * would be issueing a mapping for an /existing/ DevID+EventID + * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI + * to the /same/ vPE, using this opportunity to adjust the + * doorbell. Mouahahahaha. We loves it, Precious. + */ + its_send_vmovi(its_dev, event); +} + +static void its_mask_irq(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) + its_vlpi_set_doorbell(d, false); + + lpi_update_config(d, LPI_PROP_ENABLED, 0); +} + +static void its_unmask_irq(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) + its_vlpi_set_doorbell(d, true); + + lpi_update_config(d, 0, LPI_PROP_ENABLED); +} + +static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static void its_inc_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + else + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static void its_dec_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + else + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static unsigned int cpumask_pick_least_loaded(unsigned int target_cpu, struct irq_data *d, + const struct cpumask *cpu_mask) +{ + unsigned int cpu = target_cpu, tmp, target_skt, dest_skt; + int count = S32_MAX; + + target_skt = (cpu_logical_map(target_cpu) >> 16) & 0xff; + + for_each_cpu(tmp, cpu_mask) { + int this_count = its_read_lpi_count(d, tmp); + dest_skt = (cpu_logical_map(tmp) >> 16) & 0xff; + + if ((this_count < count) && (dest_skt == target_skt)) { + cpu = tmp; + count = this_count; + } + } + + return cpu; +} + +/* + * As suggested by Thomas Gleixner in: + * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de + */ +static int its_select_cpu(unsigned int target_cpu, struct irq_data *d, + const struct cpumask *aff_mask) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + cpumask_var_t tmpmask; + int cpu = target_cpu, node; + + if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC)) + return -ENOMEM; + + node = its_dev->its->numa_node; + + if (!irqd_affinity_is_managed(d)) { + /* First try the NUMA node */ + if (node != NUMA_NO_NODE) { + /* + * Try the intersection of the affinity mask and the + * node mask (and the online mask, just to be safe). + */ + cpumask_and(tmpmask, cpumask_of_node(node), aff_mask); + cpumask_and(tmpmask, tmpmask, cpu_online_mask); + + /* + * Ideally, we would check if the mask is empty, and + * try again on the full node here. + * + * But it turns out that the way ACPI describes the + * affinity for ITSs only deals about memory, and + * not target CPUs, so it cannot describe a single + * ITS placed next to two NUMA nodes. + * + * Instead, just fallback on the online mask. This + * diverges from Thomas' suggestion above. + */ + cpu = cpumask_pick_least_loaded(target_cpu, d, tmpmask); + if (cpu < nr_cpu_ids) + goto out; + + /* If we can't cross sockets, give up */ + if ((its_dev->its->flags & + ITS_FLAGS_WORKAROUND_CAVIUM_23144)) + goto out; + + /* If the above failed, expand the search */ + } + + /* Try the intersection of the affinity and online masks */ + cpumask_and(tmpmask, aff_mask, cpu_online_mask); + + /* If that doesn't fly, the online mask is the last resort */ + if (cpumask_empty(tmpmask)) + cpumask_copy(tmpmask, cpu_online_mask); + + cpu = cpumask_pick_least_loaded(target_cpu, d, tmpmask); + } else { + cpumask_and(tmpmask, irq_data_get_affinity_mask(d), + cpu_online_mask); + + /* If we cannot cross sockets, limit the search to that node */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && + node != NUMA_NO_NODE) + cpumask_and(tmpmask, tmpmask, cpumask_of_node(node)); + + cpu = cpumask_pick_least_loaded(target_cpu, d, tmpmask); + } +out: + free_cpumask_var(tmpmask); + + pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, + cpumask_pr_args(aff_mask), cpu); + return cpu; +} + +#define MAX_MARS3_SKT_COUNT 8 + +static int its_cpumask_select(struct its_device *its_dev, + const struct cpumask *mask_val, + const struct cpumask *cpu_mask) +{ + unsigned int skt, skt_id, i; + phys_addr_t its_phys_base; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; + + its_phys_base = its_dev->its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + if ((is_kdump_kernel()) && (skt_id == skt)) { + return i; + } + + skt_cpu_cnt[skt]++; + } + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + if (0 != skt_id) { + for (i = 0; i < skt_id; i++) + cpus += skt_cpu_cnt[i]; + } + + cpu = cpumask_any_and(mask_val, cpu_mask); + cpus = cpus + cpu % skt_cpu_cnt[skt_id]; + + return cpus; +} + +static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + unsigned int cpu, target_cpu; + const struct cpumask *cpu_mask = cpu_online_mask; + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_collection *target_col; + u32 id = its_get_event_id(d); + int prev_cpu; + + /* A forwarded interrupt should use irq_set_vcpu_affinity */ + if (irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; + + prev_cpu = its_dev->event_map.col_map[id]; + its_dec_lpi_count(d, prev_cpu); + + target_cpu = its_cpumask_select(its_dev, mask_val, cpu_mask); + if (!force) + cpu = its_select_cpu(target_cpu, d, mask_val); + else + cpu = cpumask_pick_least_loaded(target_cpu, d, mask_val); + + if (cpu < 0 || cpu >= nr_cpu_ids) + goto err; + + /* don't set the affinity when the target cpu is same as current one */ + if (cpu != prev_cpu) { + target_col = &its_dev->its->collections[cpu]; + its_send_movi(its_dev, target_col, id); + its_dev->event_map.col_map[id] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + } + + its_inc_lpi_count(d, cpu); + + return IRQ_SET_MASK_OK_DONE; + +err: + its_inc_lpi_count(d, prev_cpu); + return -EINVAL; +} + +static u64 its_irq_get_msi_base(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + return its->phys_base + GITS_TRANSLATER; +} + +static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its; + u64 addr; + + its = its_dev->its; + addr = its->get_msi_base(its_dev); + + msg->address_lo = lower_32_bits(addr); + msg->address_hi = upper_32_bits(addr); + msg->data = its_get_event_id(d); +} + +static int its_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (state) + its_send_int(its_dev, event); + else + its_send_clear(its_dev, event); + + return 0; +} + +static void its_map_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + /* Not using the ITS list? Everything is always mapped. */ + if (!its_list_map) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + /* + * If the VM wasn't mapped yet, iterate over the vpes and get + * them mapped now. + */ + vm->vlpi_count[its->list_nr]++; + + if (vm->vlpi_count[its->list_nr] == 1) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) { + struct its_vpe *vpe = vm->vpes[i]; + struct irq_data *d = irq_get_irq_data(vpe->irq); + + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + } + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static void its_unmap_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + /* Not using the ITS list? Everything is always mapped. */ + if (!its_list_map) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + if (!--vm->vlpi_count[its->list_nr]) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) + its_send_vmapp(its, vm->vpes[i], false); + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + if (!info->map) + return -EINVAL; + + mutex_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm) { + struct its_vlpi_map *maps; + + maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), + GFP_KERNEL); + if (!maps) { + ret = -ENOMEM; + goto out; + } + + its_dev->event_map.vm = info->map->vm; + its_dev->event_map.vlpi_maps = maps; + } else if (its_dev->event_map.vm != info->map->vm) { + ret = -EINVAL; + goto out; + } + + /* Get our private copy of the mapping information */ + its_dev->event_map.vlpi_maps[event] = *info->map; + + if (irqd_is_forwarded_to_vcpu(d)) { + /* Already mapped, move it around */ + its_send_vmovi(its_dev, event); + } else { + /* Ensure all the VPEs are mapped on this ITS */ + its_map_vm(its_dev->its, info->map->vm); + + /* + * Flag the interrupt as forwarded so that we can + * start poking the virtual property table. + */ + irqd_set_forwarded_to_vcpu(d); + + /* Write out the property to the prop table */ + lpi_write_config(d, 0xff, info->map->properties); + + /* Drop the physical mapping */ + its_send_discard(its_dev, event); + + /* and install the virtual one */ + its_send_vmapti(its_dev, event); + + /* Increment the number of VLPIs */ + its_dev->event_map.nr_vlpis++; + } + +out: + mutex_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + mutex_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm || + !its_dev->event_map.vlpi_maps[event].vm) { + ret = -EINVAL; + goto out; + } + + /* Copy our mapping information to the incoming request */ + *info->map = its_dev->event_map.vlpi_maps[event]; + +out: + mutex_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_unmap(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + mutex_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { + ret = -EINVAL; + goto out; + } + + /* Drop the virtual mapping */ + its_send_discard(its_dev, event); + + /* and restore the physical one */ + irqd_clr_forwarded_to_vcpu(d); + its_send_mapti(its_dev, d->hwirq, event); + lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | + LPI_PROP_ENABLED | + LPI_PROP_GROUP1)); + + /* Potentially unmap the VM from this ITS */ + its_unmap_vm(its_dev->its, its_dev->event_map.vm); + + /* + * Drop the refcount and make the device available again if + * this was the last VLPI. + */ + if (!--its_dev->event_map.nr_vlpis) { + its_dev->event_map.vm = NULL; + kfree(its_dev->event_map.vlpi_maps); + } + +out: + mutex_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; + + if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) + lpi_update_config(d, 0xff, info->config); + else + lpi_write_config(d, 0xff, info->config); + its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); + + return 0; +} + +static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + /* Need a v4 ITS */ + if (!its_dev->its->is_v4) + return -EINVAL; + + /* Unmap request? */ + if (!info) + return its_vlpi_unmap(d); + + switch (info->cmd_type) { + case MAP_VLPI: + return its_vlpi_map(d, info); + + case GET_VLPI: + return its_vlpi_get(d, info); + + case PROP_UPDATE_VLPI: + case PROP_UPDATE_AND_INV_VLPI: + return its_vlpi_prop_update(d, info); + + default: + return -EINVAL; + } +} + +static int its_irq_retrigger(struct irq_data *d) +{ + return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + +static struct irq_chip its_irq_chip = { + .name = "ITS", + .irq_mask = its_mask_irq, + .irq_unmask = its_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_set_affinity, + .irq_compose_msi_msg = its_irq_compose_msi_msg, + .irq_set_irqchip_state = its_irq_set_irqchip_state, + .irq_retrigger = its_irq_retrigger, + .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, +}; + + +/* + * How we allocate LPIs: + * + * lpi_range_list contains ranges of LPIs that are to available to + * allocate from. To allocate LPIs, just pick the first range that + * fits the required allocation, and reduce it by the required + * amount. Once empty, remove the range from the list. + * + * To free a range of LPIs, add a free range to the list, sort it and + * merge the result if the new range happens to be adjacent to an + * already free block. + * + * The consequence of the above is that allocation is cost is low, but + * freeing is expensive. We assumes that freeing rarely occurs. + */ +#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ + +static DEFINE_MUTEX(lpi_range_lock); +static LIST_HEAD(lpi_range_list); + +struct lpi_range { + struct list_head entry; + u32 base_id; + u32 span; +}; + +static struct lpi_range *mk_lpi_range(u32 base, u32 span) +{ + struct lpi_range *range; + + range = kzalloc(sizeof(*range), GFP_KERNEL); + if (range) { + INIT_LIST_HEAD(&range->entry); + range->base_id = base; + range->span = span; + } + + return range; +} + +static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b) +{ + struct lpi_range *ra, *rb; + + ra = container_of(a, struct lpi_range, entry); + rb = container_of(b, struct lpi_range, entry); + + return rb->base_id - ra->base_id; +} + +static void merge_lpi_ranges(void) +{ + struct lpi_range *range, *tmp; + + list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { + if (!list_is_last(&range->entry, &lpi_range_list) && + (tmp->base_id == (range->base_id + range->span))) { + tmp->base_id = range->base_id; + tmp->span += range->span; + list_del(&range->entry); + kfree(range); + } + } +} + +static int alloc_lpi_range(u32 nr_lpis, u32 *base) +{ + struct lpi_range *range, *tmp; + int err = -ENOSPC; + + mutex_lock(&lpi_range_lock); + + list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { + if (range->span >= nr_lpis) { + *base = range->base_id; + range->base_id += nr_lpis; + range->span -= nr_lpis; + + if (range->span == 0) { + list_del(&range->entry); + kfree(range); + } + + err = 0; + break; + } + } + + mutex_unlock(&lpi_range_lock); + + pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); + return err; +} + +static int free_lpi_range(u32 base, u32 nr_lpis) +{ + struct lpi_range *new; + int err = 0; + + mutex_lock(&lpi_range_lock); + + new = mk_lpi_range(base, nr_lpis); + if (!new) { + err = -ENOMEM; + goto out; + } + + list_add(&new->entry, &lpi_range_list); + list_sort(NULL, &lpi_range_list, lpi_range_cmp); + merge_lpi_ranges(); +out: + mutex_unlock(&lpi_range_lock); + return err; +} + +static int __init its_lpi_init(u32 id_bits) +{ + u32 lpis = (1UL << id_bits) - 8192; + u32 numlpis; + int err; + + numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer); + + if (numlpis > 2 && !WARN_ON(numlpis > lpis)) { + lpis = numlpis; + pr_info("ITS: Using hypervisor restricted LPI range [%u]\n", + lpis); + } + + /* + * Initializing the allocator is just the same as freeing the + * full range of LPIs. + */ + err = free_lpi_range(8192, lpis); + pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); + return err; +} + +static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) +{ + unsigned long *bitmap = NULL; + int err = 0; + + do { + err = alloc_lpi_range(nr_irqs, base); + if (!err) + break; + + nr_irqs /= 2; + } while (nr_irqs > 0); + + if (err) + goto out; + + bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC); + if (!bitmap) + goto out; + + *nr_ids = nr_irqs; + +out: + if (!bitmap) + *base = *nr_ids = 0; + + return bitmap; +} + +static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) +{ + WARN_ON(free_lpi_range(base, nr_ids)); + kfree(bitmap); +} + +static void gic_reset_prop_table(void *va) +{ + /* Priority 0xa0, Group-1, disabled */ + memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); + + /* Make sure the GIC will observe the written configuration */ + gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); +} + +static struct page *its_allocate_prop_table(gfp_t gfp_flags) +{ + struct page *prop_page; + + prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); + if (!prop_page) + return NULL; + + gic_reset_prop_table(page_address(prop_page)); + + return prop_page; +} + +static void its_free_prop_table(struct page *prop_page) +{ + free_pages((unsigned long)page_address(prop_page), + get_order(LPI_PROPBASE_SZ)); +} + +static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) +{ + phys_addr_t start, end, addr_end; + u64 i; + + /* + * We don't bother checking for a kdump kernel as by + * construction, the LPI tables are out of this kernel's + * memory map. + */ + if (is_kdump_kernel()) + return true; + + addr_end = addr + size - 1; + + for_each_reserved_mem_region(i, &start, &end) { + if (addr >= start && addr_end <= end) + return true; + } + + /* Not found, not a good sign... */ + pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n", + &addr, &addr_end); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + return false; +} + +static int gic_reserve_range(phys_addr_t addr, unsigned long size) +{ + if (efi_enabled(EFI_CONFIG_TABLES)) + return efi_mem_reserve_persistent(addr, size); + + return 0; +} +static int __init its_setup_lpi_prop_table(void) +{ + if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { + u64 val; + + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1; + + gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); + gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ, + MEMREMAP_WB); + gic_reset_prop_table(gic_rdists->prop_table_va); + } else { + struct page *page; + + lpi_id_bits = min_t(u32, + GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), + ITS_MAX_LPI_NRBITS); + page = its_allocate_prop_table(GFP_NOWAIT); + if (!page) { + pr_err("Failed to allocate PROPBASE\n"); + return -ENOMEM; + } + + gic_rdists->prop_table_pa = page_to_phys(page); + gic_rdists->prop_table_va = page_address(page); + WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ)); + } + + pr_info("GICv-2500: using LPI property table @%pa\n", + &gic_rdists->prop_table_pa); + + return its_lpi_init(lpi_id_bits); +} + +static const char *its_base_type_string[] = { + [GITS_BASER_TYPE_DEVICE] = "Devices", + [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", + [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)", + [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", + [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", + [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", + [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", +}; + +static u64 its_read_baser(struct its_node *its, struct its_baser *baser) +{ + u32 idx = baser - its->tables; + + return gits_read_baser(its->base + GITS_BASER + (idx << 3)); +} + +static void its_write_baser(struct its_node *its, struct its_baser *baser, + u64 val) +{ + u32 idx = baser - its->tables; + + gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); + baser->val = its_read_baser(its, baser); +} + +static int its_setup_baser(struct its_node *its, struct its_baser *baser, + u64 cache, u64 shr, u32 psz, u32 order, + bool indirect) +{ + u64 val = its_read_baser(its, baser); + u64 esz = GITS_BASER_ENTRY_SIZE(val); + u64 type = GITS_BASER_TYPE(val); + u64 baser_phys, tmp; + u32 alloc_pages; + void *base; + +retry_alloc_baser: + alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); + if (alloc_pages > GITS_BASER_PAGES_MAX) { + pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", + &its->phys_base, its_base_type_string[type], + alloc_pages, GITS_BASER_PAGES_MAX); + alloc_pages = GITS_BASER_PAGES_MAX; + order = get_order(GITS_BASER_PAGES_MAX * psz); + } + + base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!base) + return -ENOMEM; + + baser_phys = virt_to_phys(base); + + /* Check if the physical address of the memory is above 48bits */ + if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) { + + /* 52bit PA is supported only when PageSize=64K */ + if (psz != SZ_64K) { + pr_err("ITS: no 52bit PA support when psz=%d\n", psz); + free_pages((unsigned long)base, order); + return -ENXIO; + } + + /* Convert 52bit PA to 48bit field */ + baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys); + } + +retry_baser: + val = (baser_phys | + (type << GITS_BASER_TYPE_SHIFT) | + ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | + ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | + cache | + shr | + GITS_BASER_VALID); + + val |= indirect ? GITS_BASER_INDIRECT : 0x0; + + switch (psz) { + case SZ_4K: + val |= GITS_BASER_PAGE_SIZE_4K; + break; + case SZ_16K: + val |= GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_64K: + val |= GITS_BASER_PAGE_SIZE_64K; + break; + } + + its_write_baser(its, baser, val); + tmp = baser->val; + + if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { + /* + * Shareability didn't stick. Just use + * whatever the read reported, which is likely + * to be the only thing this redistributor + * supports. If that's zero, make it + * non-cacheable as well. + */ + shr = tmp & GITS_BASER_SHAREABILITY_MASK; + if (!shr) { + cache = GITS_BASER_nC; + gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); + } + goto retry_baser; + } + + if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { + /* + * Page size didn't stick. Let's try a smaller + * size and retry. If we reach 4K, then + * something is horribly wrong... + */ + free_pages((unsigned long)base, order); + baser->base = NULL; + + switch (psz) { + case SZ_16K: + psz = SZ_4K; + goto retry_alloc_baser; + case SZ_64K: + psz = SZ_16K; + goto retry_alloc_baser; + } + } + + if (val != tmp) { + pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", + &its->phys_base, its_base_type_string[type], + val, tmp); + free_pages((unsigned long)base, order); + return -ENXIO; + } + + baser->order = order; + baser->base = base; + baser->psz = psz; + tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; + + pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", + &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), + its_base_type_string[type], + (unsigned long)virt_to_phys(base), + indirect ? "indirect" : "flat", (int)esz, + psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); + + return 0; +} + +static bool its_parse_indirect_baser(struct its_node *its, + struct its_baser *baser, + u32 psz, u32 *order, u32 ids) +{ + u64 tmp = its_read_baser(its, baser); + u64 type = GITS_BASER_TYPE(tmp); + u64 esz = GITS_BASER_ENTRY_SIZE(tmp); + u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; + u32 new_order = *order; + bool indirect = false; + + /* No need to enable Indirection if memory requirement < (psz*2)bytes */ + if ((esz << ids) > (psz * 2)) { + /* + * Find out whether hw supports a single or two-level table by + * table by reading bit at offset '62' after writing '1' to it. + */ + its_write_baser(its, baser, val | GITS_BASER_INDIRECT); + indirect = !!(baser->val & GITS_BASER_INDIRECT); + + if (indirect) { + /* + * The size of the lvl2 table is equal to ITS page size + * which is 'psz'. For computing lvl1 table size, + * subtract ID bits that sparse lvl2 table from 'ids' + * which is reported by ITS hardware times lvl1 table + * entry size. + */ + ids -= ilog2(psz / (int)esz); + esz = GITS_LVL1_ENTRY_SIZE; + } + } + + /* + * Allocate as many entries as required to fit the + * range of device IDs that the ITS can grok... The ID + * space being incredibly sparse, this results in a + * massive waste of memory if two-level device table + * feature is not supported by hardware. + */ + new_order = max_t(u32, get_order(esz << ids), new_order); + if (new_order >= MAX_ORDER) { + new_order = MAX_ORDER - 1; + ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); + pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n", + &its->phys_base, its_base_type_string[type], + its->device_ids, ids); + } + + *order = new_order; + + return indirect; +} + +static void its_free_tables(struct its_node *its) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (its->tables[i].base) { + free_pages((unsigned long)its->tables[i].base, + its->tables[i].order); + its->tables[i].base = NULL; + } + } +} + +static int its_alloc_tables(struct its_node *its) +{ + u64 shr = GITS_BASER_InnerShareable; + u64 cache = GITS_BASER_RaWaWb; + u32 psz = SZ_64K; + int err, i; + + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) + /* erratum 24313: ignore memory access type */ + cache = GITS_BASER_nCnB; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + struct its_baser *baser = its->tables + i; + u64 val = its_read_baser(its, baser); + u64 type = GITS_BASER_TYPE(val); + u32 order = get_order(psz); + bool indirect = false; + + switch (type) { + case GITS_BASER_TYPE_NONE: + continue; + + case GITS_BASER_TYPE_DEVICE: + indirect = its_parse_indirect_baser(its, baser, + psz, &order, + its->device_ids); + break; + + case GITS_BASER_TYPE_VCPU: + indirect = its_parse_indirect_baser(its, baser, + psz, &order, + ITS_MAX_VPEID_BITS); + break; + } + + err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); + if (err < 0) { + its_free_tables(its); + return err; + } + + /* Update settings which will be used for next BASERn */ + psz = baser->psz; + cache = baser->val & GITS_BASER_CACHEABILITY_MASK; + shr = baser->val & GITS_BASER_SHAREABILITY_MASK; + } + + return 0; +} + +static int its_alloc_collections(struct its_node *its) +{ + int i; + + its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), + GFP_KERNEL); + if (!its->collections) + return -ENOMEM; + + for (i = 0; i < nr_cpu_ids; i++) + its->collections[i].target_address = ~0ULL; + + return 0; +} + +static struct page *its_allocate_pending_table(gfp_t gfp_flags) +{ + struct page *pend_page; + + pend_page = alloc_pages(gfp_flags | __GFP_ZERO, + get_order(LPI_PENDBASE_SZ)); + if (!pend_page) + return NULL; + + /* Make sure the GIC will observe the zero-ed page */ + gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); + + return pend_page; +} + +static void its_free_pending_table(struct page *pt) +{ + free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); +} + +/* + * Booting with kdump and LPIs enabled is generally fine. Any other + * case is wrong in the absence of firmware/EFI support. + */ +static bool enabled_lpis_allowed(void) +{ + phys_addr_t addr; + u64 val; + + /* Check whether the property table is in a reserved region */ + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + addr = val & GENMASK_ULL(51, 12); + + return gic_check_reserved_range(addr, LPI_PROPBASE_SZ); +} + +static int __init allocate_lpi_tables(void) +{ + u64 val; + int err, cpu; + + /* + * If LPIs are enabled while we run this from the boot CPU, + * flag the RD tables as pre-allocated if the stars do align. + */ + val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR); + if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) { + gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | + RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING); + pr_info("GICv3: Using preallocated redistributor tables\n"); + } + + err = its_setup_lpi_prop_table(); + if (err) + return err; + + /* + * We allocate all the pending tables anyway, as we may have a + * mix of RDs that have had LPIs enabled, and some that + * don't. We'll free the unused ones as each CPU comes online. + */ + for_each_possible_cpu(cpu) { + struct page *pend_page; + + pend_page = its_allocate_pending_table(GFP_NOWAIT); + if (!pend_page) { + pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); + return -ENOMEM; + } + + gic_data_rdist_cpu(cpu)->pend_page = pend_page; + } + + return 0; +} + +static void its_cpu_init_lpis(void) +{ + void __iomem *rbase = gic_data_rdist_rd_base(); + struct page *pend_page; + phys_addr_t paddr; + u64 val, tmp; + + if (gic_data_rdist()->lpi_enabled) + return; + + val = readl_relaxed(rbase + GICR_CTLR); + if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && + (val & GICR_CTLR_ENABLE_LPIS)) { + /* + * Check that we get the same property table on all + * RDs. If we don't, this is hopeless. + */ + paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); + paddr &= GENMASK_ULL(51, 12); + if (WARN_ON(gic_rdists->prop_table_pa != paddr)) + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); + paddr &= GENMASK_ULL(51, 16); + + WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); + its_free_pending_table(gic_data_rdist()->pend_page); + gic_data_rdist()->pend_page = NULL; + + goto out; + } + + pend_page = gic_data_rdist()->pend_page; + paddr = page_to_phys(pend_page); + WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); + + /* set PROPBASE */ + val = (gic_rdists->prop_table_pa | + GICR_PROPBASER_InnerShareable | + GICR_PROPBASER_RaWaWb | + ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); + + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); + + if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { + if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | + GICR_PROPBASER_CACHEABILITY_MASK); + val |= GICR_PROPBASER_nC; + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + } + pr_info_once("GIC: using cache flushing for LPI property table\n"); + gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; + } + + /* set PENDBASE */ + val = (page_to_phys(pend_page) | + GICR_PENDBASER_InnerShareable | + GICR_PENDBASER_RaWaWb); + + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); + + if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must remove the + * cacheability attributes as well. + */ + val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | + GICR_PENDBASER_CACHEABILITY_MASK); + val |= GICR_PENDBASER_nC; + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + } + + /* Enable LPIs */ + val = readl_relaxed(rbase + GICR_CTLR); + val |= GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + /* Make sure the GIC has seen the above */ + dsb(sy); +out: + gic_data_rdist()->lpi_enabled = true; + pr_info("GICv-2500: CPU%d: using %s LPI pending table @%pa\n", + smp_processor_id(), + gic_data_rdist()->pend_page ? "allocated" : "reserved", + &paddr); +} + +static void its_cpu_init_collection(struct its_node *its) +{ + int cpu = smp_processor_id(); + unsigned long mpid, skt_id; + phys_addr_t its_phys_base; + u64 target; + + /* avoid cross node collections and its mapping */ + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + struct device_node *cpu_node; + + cpu_node = of_get_cpu_node(cpu, NULL); + if (its->numa_node != NUMA_NO_NODE && + its->numa_node != of_node_to_nid(cpu_node)) + return; + } + + mpid = cpu_logical_map(cpu); + its_phys_base = its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + /* + * We now have to bind each collection to its target + * redistributor. + */ + if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { + /* + * This ITS wants the physical address of the + * redistributor. + */ + target = gic_data_rdist()->phys_base; + } else { + /* This ITS wants a linear CPU number. */ + target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + target = GICR_TYPER_CPU_NUMBER(target) << 16; + } + + /* Perform collection mapping */ + its->collections[cpu].target_address = target; + if (is_kdump_kernel()) + its->collections[cpu].col_id = cpu % 65; + else + its->collections[cpu].col_id = cpu % 64; + + its_send_mapc(its, &its->collections[cpu], 1); + its_send_invall(its, &its->collections[cpu]); +} + +static void its_cpu_init_collections(void) +{ + struct its_node *its; + + raw_spin_lock(&its_lock); + + list_for_each_entry(its, &its_nodes, entry) + its_cpu_init_collection(its); + + raw_spin_unlock(&its_lock); +} + +static struct its_device *its_find_device(struct its_node *its, u32 dev_id) +{ + struct its_device *its_dev = NULL, *tmp; + unsigned long flags; + + raw_spin_lock_irqsave(&its->lock, flags); + + list_for_each_entry(tmp, &its->its_device_list, entry) { + if (tmp->device_id == dev_id) { + its_dev = tmp; + break; + } + } + + raw_spin_unlock_irqrestore(&its->lock, flags); + + return its_dev; +} + +static struct its_baser *its_get_baser(struct its_node *its, u32 type) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (GITS_BASER_TYPE(its->tables[i].val) == type) + return &its->tables[i]; + } + + return NULL; +} + +static bool its_alloc_table_entry(struct its_baser *baser, u32 id) +{ + struct page *page; + u32 esz, idx; + __le64 *table; + + /* Don't allow device id that exceeds single, flat table limit */ + esz = GITS_BASER_ENTRY_SIZE(baser->val); + if (!(baser->val & GITS_BASER_INDIRECT)) + return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); + + /* Compute 1st level table index & check if that exceeds table limit */ + idx = id >> ilog2(baser->psz / esz); + if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) + return false; + + table = baser->base; + + /* Allocate memory for 2nd level table */ + if (!table[idx]) { + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz)); + if (!page) + return false; + + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(page_address(page), baser->psz); + + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); + + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); + + /* Ensure updated table contents are visible to ITS hardware */ + dsb(sy); + } + + return true; +} + +static bool its_alloc_device_table(struct its_node *its, u32 dev_id) +{ + struct its_baser *baser; + + baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); + + /* Don't allow device id that exceeds ITS hardware limit */ + if (!baser) + return (ilog2(dev_id) < its->device_ids); + + return its_alloc_table_entry(baser, dev_id); +} + +static bool its_alloc_vpe_table(u32 vpe_id) +{ + struct its_node *its; + + /* + * Make sure the L2 tables are allocated on *all* v4 ITSs. We + * could try and only do it on ITSs corresponding to devices + * that have interrupts targeted at this VPE, but the + * complexity becomes crazy (and you have tons of memory + * anyway, right?). + */ + list_for_each_entry(its, &its_nodes, entry) { + struct its_baser *baser; + + if (!its->is_v4) + continue; + + baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); + if (!baser) + return false; + + if (!its_alloc_table_entry(baser, vpe_id)) + return false; + } + + return true; +} + +static struct its_device *its_create_device(struct its_node *its, u32 dev_id, + int nvecs, bool alloc_lpis) +{ + struct its_device *dev; + unsigned long *lpi_map = NULL; + unsigned long flags; + u16 *col_map = NULL; + void *itt; + int lpi_base; + int nr_lpis; + int nr_ites; + int sz; + + if (!its_alloc_device_table(its, dev_id)) + return NULL; + + if (WARN_ON(!is_power_of_2(nvecs))) + nvecs = roundup_pow_of_two(nvecs); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + /* + * Even if the device wants a single LPI, the ITT must be + * sized as a power of two (and you need at least one bit...). + */ + nr_ites = max(2, nvecs); + sz = nr_ites * its->ite_size; + sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; + itt = kzalloc(sz, GFP_KERNEL); + if (alloc_lpis) { + lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); + if (lpi_map) + col_map = kcalloc(nr_lpis, sizeof(*col_map), + GFP_KERNEL); + } else { + col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL); + nr_lpis = 0; + lpi_base = 0; + } + + if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { + kfree(dev); + kfree(itt); + kfree(lpi_map); + kfree(col_map); + return NULL; + } + + gic_flush_dcache_to_poc(itt, sz); + + dev->its = its; + dev->itt = itt; + dev->nr_ites = nr_ites; + dev->event_map.lpi_map = lpi_map; + dev->event_map.col_map = col_map; + dev->event_map.lpi_base = lpi_base; + dev->event_map.nr_lpis = nr_lpis; + mutex_init(&dev->event_map.vlpi_lock); + dev->device_id = dev_id; + INIT_LIST_HEAD(&dev->entry); + + raw_spin_lock_irqsave(&its->lock, flags); + list_add(&dev->entry, &its->its_device_list); + raw_spin_unlock_irqrestore(&its->lock, flags); + + /* Map device to its ITT */ + its_send_mapd(dev, 1); + + return dev; +} + +static void its_free_device(struct its_device *its_dev) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&its_dev->its->lock, flags); + list_del(&its_dev->entry); + raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); + kfree(its_dev->itt); + kfree(its_dev); +} + +static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) +{ + int idx; + + idx = find_first_zero_bit(dev->event_map.lpi_map, + dev->event_map.nr_lpis); + if (idx == dev->event_map.nr_lpis) + return -ENOSPC; + + *hwirq = dev->event_map.lpi_base + idx; + set_bit(idx, dev->event_map.lpi_map); + + return 0; +} + +static int its_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + struct its_node *its; + struct its_device *its_dev; + struct msi_domain_info *msi_info; + u32 dev_id; + + /* + * We ignore "dev" entierely, and rely on the dev_id that has + * been passed via the scratchpad. This limits this domain's + * usefulness to upper layers that definitely know that they + * are built on top of the ITS. + */ + dev_id = info->scratchpad[0].ul; + + msi_info = msi_get_domain_info(domain); + its = msi_info->data; + + if (!gic_rdists->has_direct_lpi && + vpe_proxy.dev && + vpe_proxy.dev->its == its && + dev_id == vpe_proxy.dev->device_id) { + /* Bad luck. Get yourself a better implementation */ + WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", + dev_id); + return -EINVAL; + } + + its_dev = its_find_device(its, dev_id); + if (its_dev) { + /* + * We already have seen this ID, probably through + * another alias (PCI bridge of some sort). No need to + * create the device. + */ + pr_debug("Reusing ITT for devID %x\n", dev_id); + goto out; + } + + its_dev = its_create_device(its, dev_id, nvec, true); + if (!its_dev) + return -ENOMEM; + + pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); +out: + info->scratchpad[0].ptr = its_dev; + return 0; +} + +static struct msi_domain_ops its_msi_domain_ops = { + .msi_prepare = its_msi_prepare, +}; + +static int its_irq_gic_domain_alloc(struct irq_domain *domain, + unsigned int virq, + irq_hw_number_t hwirq) +{ + struct irq_fwspec fwspec; + + if (irq_domain_get_of_node(domain->parent)) { + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 3; + fwspec.param[0] = GIC_IRQ_TYPE_LPI; + fwspec.param[1] = hwirq; + fwspec.param[2] = IRQ_TYPE_EDGE_RISING; + } else if (is_fwnode_irqchip(domain->parent->fwnode)) { + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 2; + fwspec.param[0] = hwirq; + fwspec.param[1] = IRQ_TYPE_EDGE_RISING; + } else { + return -EINVAL; + } + + return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); +} + +static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + msi_alloc_info_t *info = args; + struct its_device *its_dev = info->scratchpad[0].ptr; + irq_hw_number_t hwirq; + int err; + int i; + + for (i = 0; i < nr_irqs; i++) { + err = its_alloc_device_irq(its_dev, &hwirq); + if (err) + return err; + + err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); + if (err) + return err; + + irq_domain_set_hwirq_and_chip(domain, virq + i, + hwirq, &its_irq_chip, its_dev); + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); + pr_debug("ID:%d pID:%d vID:%d\n", + (int)(hwirq - its_dev->event_map.lpi_base), + (int) hwirq, virq + i); + } + + return 0; +} + +static int its_cpumask_first(struct its_device *its_dev, + const struct cpumask *cpu_mask) +{ + unsigned int skt, skt_id, i; + phys_addr_t its_phys_base; + unsigned int cpu, cpus = 0; + unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; + + its_phys_base = its_dev->its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + if ((is_kdump_kernel()) && (skt_id == skt)) { + return i; + } + + skt_cpu_cnt[skt]++; + } + else if (0xff != skt ) + pr_err("socket address: %d is out of range.", skt); + } + + if (0 != skt_id) { + for (i = 0; i < skt_id; i++) + cpus += skt_cpu_cnt[i]; + } + + cpu = cpumask_first(cpu_mask); + if ((cpu > cpus) && (cpu < (cpus + skt_cpu_cnt[skt_id]))) + cpus = cpu; + + return cpus; +} + +static int its_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + const struct cpumask *cpu_mask = cpu_online_mask; + int cpu; + + /* get the cpu_mask of local node */ + if (its_dev->its->numa_node >= 0) + cpu_mask = cpumask_of_node(its_dev->its->numa_node); + + /* Bind the LPI to the first possible CPU */ + cpu = its_cpumask_first(its_dev, cpu_mask); + printk("its_irq_domain_activate: MAPTI irq %d hwirq %ld on cpu %d\n", + d->irq, d->hwirq, cpu); + + its_inc_lpi_count(d, cpu); + its_dev->event_map.col_map[event] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + /* Map the GIC IRQ and event to the device */ + its_send_mapti(its_dev, d->hwirq, event); + return 0; +} + +static void its_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + its_dec_lpi_count(d, its_dev->event_map.col_map[event]); + /* Stop the delivery of interrupts */ + its_send_discard(its_dev, event); +} + +static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + int i; + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *data = irq_domain_get_irq_data(domain, + virq + i); + u32 event = its_get_event_id(data); + + /* Mark interrupt index as unused */ + clear_bit(event, its_dev->event_map.lpi_map); + + /* Nuke the entry in the domain */ + irq_domain_reset_irq_data(data); + } + + /* If all interrupts have been freed, start mopping the floor */ + if (bitmap_empty(its_dev->event_map.lpi_map, + its_dev->event_map.nr_lpis)) { + its_lpi_free(its_dev->event_map.lpi_map, + its_dev->event_map.lpi_base, + its_dev->event_map.nr_lpis); + kfree(its_dev->event_map.col_map); + + /* Unmap device/itt */ + its_send_mapd(its_dev, 0); + its_free_device(its_dev); + } + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops its_domain_ops = { + .alloc = its_irq_domain_alloc, + .free = its_irq_domain_free, + .activate = its_irq_domain_activate, + .deactivate = its_irq_domain_deactivate, +}; + +/* + * This is insane. + * + * If a GICv4 doesn't implement Direct LPIs (which is extremely + * likely), the only way to perform an invalidate is to use a fake + * device to issue an INV command, implying that the LPI has first + * been mapped to some event on that device. Since this is not exactly + * cheap, we try to keep that mapping around as long as possible, and + * only issue an UNMAP if we're short on available slots. + * + * Broken by design(tm). + */ +static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) +{ + /* Already unmapped? */ + if (vpe->vpe_proxy_event == -1) + return; + + its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); + vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; + + /* + * We don't track empty slots at all, so let's move the + * next_victim pointer if we can quickly reuse that slot + * instead of nuking an existing entry. Not clear that this is + * always a win though, and this might just generate a ripple + * effect... Let's just hope VPEs don't migrate too often. + */ + if (vpe_proxy.vpes[vpe_proxy.next_victim]) + vpe_proxy.next_victim = vpe->vpe_proxy_event; + + vpe->vpe_proxy_event = -1; +} + +static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) +{ + if (!gic_rdists->has_direct_lpi) { + unsigned long flags; + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + its_vpe_db_proxy_unmap_locked(vpe); + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); + } +} + +static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) +{ + /* Already mapped? */ + if (vpe->vpe_proxy_event != -1) + return; + + /* This slot was already allocated. Kick the other VPE out. */ + if (vpe_proxy.vpes[vpe_proxy.next_victim]) + its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); + + /* Map the new VPE instead */ + vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; + vpe->vpe_proxy_event = vpe_proxy.next_victim; + vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; + + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; + its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); +} + +static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) +{ + unsigned long flags; + struct its_collection *target_col; + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); + while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) + cpu_relax(); + + return; + } + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + + its_vpe_db_proxy_map_locked(vpe); + + target_col = &vpe_proxy.dev->its->collections[to]; + its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; + + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); +} + +static int its_vpe_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + int cpu = cpumask_first(mask_val); + + /* + * Changing affinity is mega expensive, so let's be as lazy as + * we can and only do it if we really have to. Also, if mapped + * into the proxy device, we need to move the doorbell + * interrupt to its new location. + */ + if (vpe->col_idx != cpu) { + int from = vpe->col_idx; + + vpe->col_idx = cpu; + its_send_vmovp(vpe); + its_vpe_db_proxy_move(vpe, from, cpu); + } + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK_DONE; +} + +static void its_vpe_schedule(struct its_vpe *vpe) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + /* Schedule the VPE */ + val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & + GENMASK_ULL(51, 12); + val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + val = virt_to_phys(page_address(vpe->vpt_page)) & + GENMASK_ULL(51, 16); + val |= GICR_VPENDBASER_RaWaWb; + val |= GICR_VPENDBASER_NonShareable; + /* + * There is no good way of finding out if the pending table is + * empty as we can race against the doorbell interrupt very + * easily. So in the end, vpe->pending_last is only an + * indication that the vcpu has something pending, not one + * that the pending table is empty. A good implementation + * would be able to read its coarse map pretty quickly anyway, + * making this a tolerable issue. + */ + val |= GICR_VPENDBASER_PendingLast; + val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; + val |= GICR_VPENDBASER_Valid; + gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); +} + +static void its_vpe_deschedule(struct its_vpe *vpe) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u32 count = 1000000; /* 1s! */ + bool clean; + u64 val; + + /* We're being scheduled out */ + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + val &= ~GICR_VPENDBASER_Valid; + gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + do { + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + clean = !(val & GICR_VPENDBASER_Dirty); + if (!clean) { + count--; + cpu_relax(); + udelay(1); + } + } while (!clean && count); + + if (unlikely(!clean && !count)) { + pr_err_ratelimited("ITS virtual pending table not cleaning\n"); + vpe->idai = false; + vpe->pending_last = true; + } else { + vpe->idai = !!(val & GICR_VPENDBASER_IDAI); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); + } +} + +static void its_vpe_invall(struct its_vpe *vpe) +{ + struct its_node *its; + + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) + continue; + + /* + * Sending a VINVALL to a single ITS is enough, as all + * we need is to reach the redistributors. + */ + its_send_vinvall(its, vpe); + return; + } +} + +static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case SCHEDULE_VPE: + its_vpe_schedule(vpe); + return 0; + + case DESCHEDULE_VPE: + its_vpe_deschedule(vpe); + return 0; + + case INVALL_VPE: + its_vpe_invall(vpe); + return 0; + + default: + return -EINVAL; + } +} + +static void its_vpe_send_cmd(struct its_vpe *vpe, + void (*cmd)(struct its_device *, u32)) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + + its_vpe_db_proxy_map_locked(vpe); + cmd(vpe_proxy.dev, vpe->vpe_proxy_event); + + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); +} + +static void its_vpe_send_inv(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR); + while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) + cpu_relax(); + } else { + its_vpe_send_cmd(vpe, its_send_inv); + } +} + +static void its_vpe_mask_irq(struct irq_data *d) +{ + /* + * We need to unmask the LPI, which is described by the parent + * irq_data. Instead of calling into the parent (which won't + * exactly do the right thing, let's simply use the + * parent_data pointer. Yes, I'm naughty. + */ + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); + its_vpe_send_inv(d); +} + +static void its_vpe_unmask_irq(struct irq_data *d) +{ + /* Same hack as above... */ + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); + its_vpe_send_inv(d); +} + +static int its_vpe_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; + if (state) { + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); + } else { + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); + while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) + cpu_relax(); + } + } else { + if (state) + its_vpe_send_cmd(vpe, its_send_int); + else + its_vpe_send_cmd(vpe, its_send_clear); + } + + return 0; +} + +static struct irq_chip its_vpe_irq_chip = { + .name = "GICv4-vpe", + .irq_mask = its_vpe_mask_irq, + .irq_unmask = its_vpe_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, + .irq_set_irqchip_state = its_vpe_set_irqchip_state, + .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, +}; + +static int its_vpe_id_alloc(void) +{ + return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); +} + +static void its_vpe_id_free(u16 id) +{ + ida_simple_remove(&its_vpeid_ida, id); +} + +static int its_vpe_init(struct its_vpe *vpe) +{ + struct page *vpt_page; + int vpe_id; + + /* Allocate vpe_id */ + vpe_id = its_vpe_id_alloc(); + if (vpe_id < 0) + return vpe_id; + + /* Allocate VPT */ + vpt_page = its_allocate_pending_table(GFP_KERNEL); + if (!vpt_page) { + its_vpe_id_free(vpe_id); + return -ENOMEM; + } + + if (!its_alloc_vpe_table(vpe_id)) { + its_vpe_id_free(vpe_id); + its_free_pending_table(vpe->vpt_page); + return -ENOMEM; + } + + vpe->vpe_id = vpe_id; + vpe->vpt_page = vpt_page; + vpe->vpe_proxy_event = -1; + + return 0; +} + +static void its_vpe_teardown(struct its_vpe *vpe) +{ + its_vpe_db_proxy_unmap(vpe); + its_vpe_id_free(vpe->vpe_id); + its_free_pending_table(vpe->vpt_page); +} + +static void its_vpe_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + struct its_vm *vm = domain->host_data; + int i; + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *data = irq_domain_get_irq_data(domain, + virq + i); + struct its_vpe *vpe = irq_data_get_irq_chip_data(data); + + BUG_ON(vm != vpe->its_vm); + + clear_bit(data->hwirq, vm->db_bitmap); + its_vpe_teardown(vpe); + irq_domain_reset_irq_data(data); + } + + if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { + its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); + its_free_prop_table(vm->vprop_page); + } +} + +static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct its_vm *vm = args; + unsigned long *bitmap; + struct page *vprop_page; + int base, nr_ids, i, err = 0; + + BUG_ON(!vm); + + bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); + if (!bitmap) + return -ENOMEM; + + if (nr_ids < nr_irqs) { + its_lpi_free(bitmap, base, nr_ids); + return -ENOMEM; + } + + vprop_page = its_allocate_prop_table(GFP_KERNEL); + if (!vprop_page) { + its_lpi_free(bitmap, base, nr_ids); + return -ENOMEM; + } + + vm->db_bitmap = bitmap; + vm->db_lpi_base = base; + vm->nr_db_lpis = nr_ids; + vm->vprop_page = vprop_page; + + for (i = 0; i < nr_irqs; i++) { + vm->vpes[i]->vpe_db_lpi = base + i; + err = its_vpe_init(vm->vpes[i]); + if (err) + break; + err = its_irq_gic_domain_alloc(domain, virq + i, + vm->vpes[i]->vpe_db_lpi); + if (err) + break; + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + &its_vpe_irq_chip, vm->vpes[i]); + set_bit(i, bitmap); + } + + if (err) { + if (i > 0) + its_vpe_irq_domain_free(domain, virq, i - 1); + + its_lpi_free(bitmap, base, nr_ids); + its_free_prop_table(vprop_page); + } + + return err; +} + +static int its_vpe_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* If we use the list map, we issue VMAPP on demand... */ + if (its_list_map) + return 0; + + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + } + + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + + return 0; +} + +static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * If we use the list map, we unmap the VPE once no VLPIs are + * associated with the VM. + */ + if (its_list_map) + return; + + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + its_send_vmapp(its, vpe, false); + } +} + +static const struct irq_domain_ops its_vpe_domain_ops = { + .alloc = its_vpe_irq_domain_alloc, + .free = its_vpe_irq_domain_free, + .activate = its_vpe_irq_domain_activate, + .deactivate = its_vpe_irq_domain_deactivate, +}; + +static int its_force_quiescent(void __iomem *base) +{ + u32 count = 1000000; /* 1s */ + u32 val; + + val = readl_relaxed(base + GITS_CTLR); + /* + * GIC architecture specification requires the ITS to be both + * disabled and quiescent for writes to GITS_BASER or + * GITS_CBASER to not have UNPREDICTABLE results. + */ + if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) + return 0; + + /* Disable the generation of all interrupts to this ITS */ + val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); + writel_relaxed(val, base + GITS_CTLR); + + /* Poll GITS_CTLR and wait until ITS becomes quiescent */ + while (1) { + val = readl_relaxed(base + GITS_CTLR); + if (val & GITS_CTLR_QUIESCENT) + return 0; + + count--; + if (!count) + return -EBUSY; + + cpu_relax(); + udelay(1); + } +} + +static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) +{ + struct its_node *its = data; + + /* erratum 22375: only alloc 8MB table size */ + its->device_ids = 0x14; /* 20 bits, 8MB */ + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; + + return true; +} + +static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; + + return true; +} + +static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) +{ + struct its_node *its = data; + + /* On QDF2400, the size of the ITE is 16Bytes */ + its->ite_size = 16; + + return true; +} + +static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + /* + * The Socionext Synquacer SoC has a so-called 'pre-ITS', + * which maps 32-bit writes targeted at a separate window of + * size '4 << device_id_bits' onto writes to GITS_TRANSLATER + * with device ID taken from bits [device_id_bits + 1:2] of + * the window offset. + */ + return its->pre_its_base + (its_dev->device_id << 2); +} + +static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) +{ + struct its_node *its = data; + u32 pre_its_window[2]; + u32 ids; + + if (!fwnode_property_read_u32_array(its->fwnode_handle, + "socionext,synquacer-pre-its", + pre_its_window, + ARRAY_SIZE(pre_its_window))) { + + its->pre_its_base = pre_its_window[0]; + its->get_msi_base = its_irq_get_msi_base_pre_its; + + ids = ilog2(pre_its_window[1]) - 2; + if (its->device_ids > ids) + its->device_ids = ids; + + /* the pre-ITS breaks isolation, so disable MSI remapping */ + its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; + return true; + } + return false; +} + +static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) +{ + struct its_node *its = data; + + /* + * Hip07 insists on using the wrong address for the VLPI + * page. Trick it into doing the right thing... + */ + its->vlpi_redist_offset = SZ_128K; + return true; +} + +static const struct gic_quirk its_quirks[] = { +#ifdef CONFIG_CAVIUM_ERRATUM_22375 + { + .desc = "ITS: Cavium errata 22375, 24313", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_22375, + }, +#endif +#ifdef CONFIG_CAVIUM_ERRATUM_23144 + { + .desc = "ITS: Cavium erratum 23144", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_23144, + }, +#endif +#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 + { + .desc = "ITS: QDF2400 erratum 0065", + .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ + .mask = 0xffffffff, + .init = its_enable_quirk_qdf2400_e0065, + }, +#endif +#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS + { + /* + * The Socionext Synquacer SoC incorporates ARM's own GIC-500 + * implementation, but with a 'pre-ITS' added that requires + * special handling in software. + */ + .desc = "ITS: Socionext Synquacer pre-ITS", + .iidr = 0x0001143b, + .mask = 0xffffffff, + .init = its_enable_quirk_socionext_synquacer, + }, +#endif +#ifdef CONFIG_HISILICON_ERRATUM_161600802 + { + .desc = "ITS: Hip07 erratum 161600802", + .iidr = 0x00000004, + .mask = 0xffffffff, + .init = its_enable_quirk_hip07_161600802, + }, +#endif + { + } +}; + +static void its_enable_quirks(struct its_node *its) +{ + u32 iidr = readl_relaxed(its->base + GITS_IIDR); + + gic_enable_quirks(iidr, its_quirks, its); +} + +static int its_save_disable(void) +{ + struct its_node *its; + int err = 0; + + raw_spin_lock(&its_lock); + list_for_each_entry(its, &its_nodes, entry) { + void __iomem *base; + + if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) + continue; + + base = its->base; + its->ctlr_save = readl_relaxed(base + GITS_CTLR); + err = its_force_quiescent(base); + if (err) { + pr_err("ITS@%pa: failed to quiesce: %d\n", + &its->phys_base, err); + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + goto err; + } + + its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); + } + +err: + if (err) { + list_for_each_entry_continue_reverse(its, &its_nodes, entry) { + void __iomem *base; + + if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) + continue; + + base = its->base; + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + } + } + raw_spin_unlock(&its_lock); + + return err; +} + +static void its_restore_enable(void) +{ + struct its_node *its; + int ret; + + raw_spin_lock(&its_lock); + list_for_each_entry(its, &its_nodes, entry) { + void __iomem *base; + int i; + + if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) + continue; + + base = its->base; + + /* + * Make sure that the ITS is disabled. If it fails to quiesce, + * don't restore it since writing to CBASER or BASER + * registers is undefined according to the GIC v3 ITS + * Specification. + */ + ret = its_force_quiescent(base); + if (ret) { + pr_err("ITS@%pa: failed to quiesce on resume: %d\n", + &its->phys_base, ret); + continue; + } + + gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); + + /* + * Writing CBASER resets CREADR to 0, so make CWRITER and + * cmd_write line up with it. + */ + its->cmd_write = its->cmd_base; + gits_write_cwriter(0, base + GITS_CWRITER); + + /* Restore GITS_BASER from the value cache. */ + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + struct its_baser *baser = &its->tables[i]; + + if (!(baser->val & GITS_BASER_VALID)) + continue; + + its_write_baser(its, baser, baser->val); + } + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + + /* + * Reinit the collection if it's stored in the ITS. This is + * indicated by the col_id being less than the HCC field. + * CID < HCC as specified in the GIC v3 Documentation. + */ + if (its->collections[smp_processor_id()].col_id < + GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) + its_cpu_init_collection(its); + } + raw_spin_unlock(&its_lock); +} + +static struct syscore_ops its_syscore_ops = { + .suspend = its_save_disable, + .resume = its_restore_enable, +}; + +static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) +{ + struct irq_domain *inner_domain; + struct msi_domain_info *info; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); + if (!inner_domain) { + kfree(info); + return -ENOMEM; + } + + inner_domain->parent = its_parent; + irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); + inner_domain->flags |= its->msi_domain_flags; + info->ops = &its_msi_domain_ops; + info->data = its; + inner_domain->host_data = info; + + return 0; +} + +static int its_init_vpe_domain(void) +{ + struct its_node *its; + u32 devid; + int entries; + + if (gic_rdists->has_direct_lpi) { + pr_info("ITS: Using DirectLPI for VPE invalidation\n"); + return 0; + } + + /* Any ITS will do, even if not v4 */ + its = list_first_entry(&its_nodes, struct its_node, entry); + + entries = roundup_pow_of_two(nr_cpu_ids); + vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), + GFP_KERNEL); + if (!vpe_proxy.vpes) { + pr_err("ITS: Can't allocate GICv4 proxy device array\n"); + return -ENOMEM; + } + + /* Use the last possible DevID */ + devid = GENMASK(its->device_ids - 1, 0); + vpe_proxy.dev = its_create_device(its, devid, entries, false); + if (!vpe_proxy.dev) { + kfree(vpe_proxy.vpes); + pr_err("ITS: Can't allocate GICv4 proxy device\n"); + return -ENOMEM; + } + + BUG_ON(entries > vpe_proxy.dev->nr_ites); + + raw_spin_lock_init(&vpe_proxy.lock); + vpe_proxy.next_victim = 0; + pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", + devid, vpe_proxy.dev->nr_ites); + + return 0; +} + +static int __init its_compute_its_list_map(struct resource *res, + void __iomem *its_base) +{ + int its_number; + u32 ctlr; + + /* + * This is assumed to be done early enough that we're + * guaranteed to be single-threaded, hence no + * locking. Should this change, we should address + * this. + */ + its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); + if (its_number >= GICv4_ITS_LIST_MAX) { + pr_err("ITS@%pa: No ITSList entry available!\n", + &res->start); + return -EINVAL; + } + + ctlr = readl_relaxed(its_base + GITS_CTLR); + ctlr &= ~GITS_CTLR_ITS_NUMBER; + ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; + writel_relaxed(ctlr, its_base + GITS_CTLR); + ctlr = readl_relaxed(its_base + GITS_CTLR); + if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { + its_number = ctlr & GITS_CTLR_ITS_NUMBER; + its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; + } + + if (test_and_set_bit(its_number, &its_list_map)) { + pr_err("ITS@%pa: Duplicate ITSList entry %d\n", + &res->start, its_number); + return -EINVAL; + } + + return its_number; +} + +static int __init its_probe_one(struct resource *res, + struct fwnode_handle *handle, int numa_node) +{ + struct its_node *its; + void __iomem *its_base; + u32 val, ctlr; + u64 baser, tmp, typer; + int err; + + its_base = ioremap(res->start, resource_size(res)); + if (!its_base) { + pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); + return -ENOMEM; + } + + val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (val != 0x30 && val != 0x40) { + pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); + err = -ENODEV; + goto out_unmap; + } + + err = its_force_quiescent(its_base); + if (err) { + pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); + goto out_unmap; + } + + pr_info("ITS %pR\n", res); + + its = kzalloc(sizeof(*its), GFP_KERNEL); + if (!its) { + err = -ENOMEM; + goto out_unmap; + } + + raw_spin_lock_init(&its->lock); + INIT_LIST_HEAD(&its->entry); + INIT_LIST_HEAD(&its->its_device_list); + typer = gic_read_typer(its_base + GITS_TYPER); + its->base = its_base; + its->phys_base = res->start; + its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); + its->device_ids = GITS_TYPER_DEVBITS(typer); + its->is_v4 = !!(typer & GITS_TYPER_VLPIS); + if (its->is_v4) { + if (!(typer & GITS_TYPER_VMOVP)) { + err = its_compute_its_list_map(res, its_base); + if (err < 0) + goto out_free_its; + + its->list_nr = err; + + pr_info("ITS@%pa: Using ITS number %d\n", + &res->start, err); + } else { + pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); + } + } + + its->numa_node = numa_node; + + its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(ITS_CMD_QUEUE_SZ)); + if (!its->cmd_base) { + err = -ENOMEM; + goto out_free_its; + } + its->cmd_write = its->cmd_base; + its->fwnode_handle = handle; + its->get_msi_base = its_irq_get_msi_base; + its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP; + + its_enable_quirks(its); + + err = its_alloc_tables(its); + if (err) + goto out_free_cmd; + + err = its_alloc_collections(its); + if (err) + goto out_free_tables; + + baser = (virt_to_phys(its->cmd_base) | + GITS_CBASER_RaWaWb | + GITS_CBASER_InnerShareable | + (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | + GITS_CBASER_VALID); + + gits_write_cbaser(baser, its->base + GITS_CBASER); + tmp = gits_read_cbaser(its->base + GITS_CBASER); + + if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { + if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + baser &= ~(GITS_CBASER_SHAREABILITY_MASK | + GITS_CBASER_CACHEABILITY_MASK); + baser |= GITS_CBASER_nC; + gits_write_cbaser(baser, its->base + GITS_CBASER); + } + pr_info("ITS: using cache flushing for cmd queue\n"); + its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; + } + + gits_write_cwriter(0, its->base + GITS_CWRITER); + ctlr = readl_relaxed(its->base + GITS_CTLR); + ctlr |= GITS_CTLR_ENABLE; + if (its->is_v4) + ctlr |= GITS_CTLR_ImDe; + writel_relaxed(ctlr, its->base + GITS_CTLR); + + if (GITS_TYPER_HCC(typer)) + its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE; + + err = its_init_domain(handle, its); + if (err) + goto out_free_tables; + + raw_spin_lock(&its_lock); + list_add(&its->entry, &its_nodes); + raw_spin_unlock(&its_lock); + + return 0; + +out_free_tables: + its_free_tables(its); +out_free_cmd: + free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); +out_free_its: + kfree(its); +out_unmap: + iounmap(its_base); + pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); + return err; +} + +static bool gic_rdists_supports_plpis(void) +{ + return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); +} + +static int redist_disable_lpis(void) +{ + void __iomem *rbase = gic_data_rdist_rd_base(); + u64 timeout = USEC_PER_SEC; + u64 val; + + if (!gic_rdists_supports_plpis()) { + pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); + return -ENXIO; + } + + val = readl_relaxed(rbase + GICR_CTLR); + if (!(val & GICR_CTLR_ENABLE_LPIS)) + return 0; + + /* + * If coming via a CPU hotplug event, we don't need to disable + * LPIs before trying to re-enable them. They are already + * configured and all is well in the world. + * + * If running with preallocated tables, there is nothing to do. + */ + if (gic_data_rdist()->lpi_enabled || + (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) + return 0; + + /* + * From that point on, we only try to do some damage control. + */ + pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", + smp_processor_id()); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + /* Disable LPIs */ + val &= ~GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + /* Make sure any change to GICR_CTLR is observable by the GIC */ + dsb(sy); + + /* + * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs + * from 1 to 0 before programming GICR_PEND{PROP}BASER registers. + * Error out if we time out waiting for RWP to clear. + */ + while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) { + if (!timeout) { + pr_err("CPU%d: Timeout while disabling LPIs\n", + smp_processor_id()); + return -ETIMEDOUT; + } + udelay(1); + timeout--; + } + + /* + * After it has been written to 1, it is IMPLEMENTATION + * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be + * cleared to 0. Error out if clearing the bit failed. + */ + if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) { + pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id()); + return -EBUSY; + } + + return 0; +} + +int phytium_its_cpu_init(void) +{ + if (!list_empty(&its_nodes)) { + int ret; + + ret = redist_disable_lpis(); + if (ret) + return ret; + + its_cpu_init_lpis(); + its_cpu_init_collections(); + } + + return 0; +} + +static const struct of_device_id its_device_id[] = { + { .compatible = "arm,gic-v3-its", }, + {}, +}; + +static int __init its_of_probe(struct device_node *node) +{ + struct device_node *np; + struct resource res; + + for (np = of_find_matching_node(node, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np)) + continue; + if (!of_property_read_bool(np, "msi-controller")) { + pr_warn("%pOF: no msi-controller property, ITS ignored\n", + np); + continue; + } + + if (of_address_to_resource(np, 0, &res)) { + pr_warn("%pOF: no regs?\n", np); + continue; + } + + its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); + } + return 0; +} + +#ifdef CONFIG_ACPI + +#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) + +#ifdef CONFIG_ACPI_NUMA +struct its_srat_map { + /* numa node id */ + u32 numa_node; + /* GIC ITS ID */ + u32 its_id; +}; + +static struct its_srat_map *its_srat_maps __initdata; +static int its_in_srat __initdata; + +static int __init acpi_get_its_numa_node(u32 its_id) +{ + int i; + + for (i = 0; i < its_in_srat; i++) { + if (its_id == its_srat_maps[i].its_id) + return its_srat_maps[i].numa_node; + } + return NUMA_NO_NODE; +} + +static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header, + const unsigned long end) +{ + return 0; +} + +static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, + const unsigned long end) +{ + int node; + struct acpi_srat_gic_its_affinity *its_affinity; + + its_affinity = (struct acpi_srat_gic_its_affinity *)header; + if (!its_affinity) + return -EINVAL; + + if (its_affinity->header.length < sizeof(*its_affinity)) { + pr_err("SRAT: Invalid header length %d in ITS affinity\n", + its_affinity->header.length); + return -EINVAL; + } + + node = acpi_map_pxm_to_node(its_affinity->proximity_domain); + + if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { + pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); + return 0; + } + + its_srat_maps[its_in_srat].numa_node = node; + its_srat_maps[its_in_srat].its_id = its_affinity->its_id; + its_in_srat++; + pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", + its_affinity->proximity_domain, its_affinity->its_id, node); + + return 0; +} + +static void __init acpi_table_parse_srat_its(void) +{ + int count; + + count = acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, + gic_acpi_match_srat_its, 0); + if (count <= 0) + return; + + its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map), + GFP_KERNEL); + if (!its_srat_maps) { + pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n"); + return; + } + + acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, + gic_acpi_parse_srat_its, 0); +} + +/* free the its_srat_maps after ITS probing */ +static void __init acpi_its_srat_maps_free(void) +{ + kfree(its_srat_maps); +} +#else +static void __init acpi_table_parse_srat_its(void) { } +static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } +static void __init acpi_its_srat_maps_free(void) { } +#endif + +static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, + const unsigned long end) +{ + struct acpi_madt_generic_translator *its_entry; + struct fwnode_handle *dom_handle; + struct resource res; + int err; + + its_entry = (struct acpi_madt_generic_translator *)header; + memset(&res, 0, sizeof(res)); + res.start = its_entry->base_address; + res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; + res.flags = IORESOURCE_MEM; + + dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address); + if (!dom_handle) { + pr_err("ITS@%pa: Unable to allocate GIC-Phytium-2500 ITS domain token\n", + &res.start); + return -ENOMEM; + } + + err = iort_register_domain_token(its_entry->translation_id, res.start, + dom_handle); + if (err) { + pr_err("ITS@%pa: Unable to register GIC-Phytium-2500 ITS domain token (ITS ID %d) to IORT\n", + &res.start, its_entry->translation_id); + goto dom_err; + } + + err = its_probe_one(&res, dom_handle, + acpi_get_its_numa_node(its_entry->translation_id)); + if (!err) + return 0; + + iort_deregister_domain_token(its_entry->translation_id); +dom_err: + irq_domain_free_fwnode(dom_handle); + return err; +} + +static void __init its_acpi_probe(void) +{ + acpi_table_parse_srat_its(); + acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + gic_acpi_parse_madt_its, 0); + acpi_its_srat_maps_free(); +} +#else +static void __init its_acpi_probe(void) { } +#endif + +int __init phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, + struct irq_domain *parent_domain) +{ + struct device_node *of_node; + struct its_node *its; + bool has_v4 = false; + int err; + + its_parent = parent_domain; + of_node = to_of_node(handle); + if (of_node) + its_of_probe(of_node); + else + its_acpi_probe(); + + if (list_empty(&its_nodes)) { + pr_warn("ITS: No ITS available, not enabling LPIs\n"); + return -ENXIO; + } + + gic_rdists = rdists; + + err = allocate_lpi_tables(); + if (err) + return err; + + list_for_each_entry(its, &its_nodes, entry) + has_v4 |= its->is_v4; + + if (has_v4 & rdists->has_vlpis) { + if (its_init_vpe_domain() || + its_init_v4(parent_domain, &its_vpe_domain_ops)) { + rdists->has_vlpis = false; + pr_err("ITS: Disabling GICv4 support\n"); + } + } + + register_syscore_ops(&its_syscore_ops); + + return 0; +} diff --git a/drivers/irqchip/irq-gic-phytium-2500.c b/drivers/irqchip/irq-gic-phytium-2500.c new file mode 100644 index 000000000000..87635081c807 --- /dev/null +++ b/drivers/irqchip/irq-gic-phytium-2500.c @@ -0,0 +1,1881 @@ +/* + * Copyright (C) 2020 Phytium Corporation. + * Author: Wang Yinfeng + * Chen Baozi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#define pr_fmt(fmt) "GIC-2500: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "irq-gic-common.h" +#include + +#define MAX_MARS3_SOC_COUNT 8 +#define MARS3_ADDR_SKTID_SHIFT 41 + +struct gic_dist_desc { + void __iomem *dist_base; + phys_addr_t phys_base; + unsigned long size; +}; + +struct redist_region { + void __iomem *redist_base; + phys_addr_t phys_base; + bool single_redist; +}; + +static struct gic_dist_desc mars3_gic_dists[MAX_MARS3_SOC_COUNT] __read_mostly; + +static unsigned int mars3_sockets_bitmap = 0x1; + +#define mars3_irq_to_skt(hwirq) (((hwirq) - 32) % 8) + +struct gic_chip_data { + struct fwnode_handle *fwnode; + void __iomem *dist_base; + struct redist_region *redist_regions; + struct rdists rdists; + struct irq_domain *domain; + u64 redist_stride; + u32 nr_redist_regions; + bool has_rss; + unsigned int irq_nr; + struct partition_desc *ppi_descs[16]; +}; + +static struct gic_chip_data gic_data __read_mostly; +static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); + +static struct gic_kvm_info gic_v3_kvm_info; +static DEFINE_PER_CPU(bool, has_rss); + +#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4) +#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) + +/* Our default, arbitrary priority value. Linux only uses one anyway. */ +#define DEFAULT_PMR_VALUE 0xf0 + +static inline unsigned int gic_irq(struct irq_data *d) +{ + return d->hwirq; +} + +static inline int gic_irq_in_rdist(struct irq_data *d) +{ + return gic_irq(d) < 32; +} + +static inline void __iomem *gic_dist_base(struct irq_data *d) +{ + if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */ + return gic_data_rdist_sgi_base(); + + if (d->hwirq <= 1023) /* SPI -> dist_base */ + return gic_data.dist_base; + + return NULL; +} + +static void gic_do_wait_for_rwp(void __iomem *base) +{ + u32 count = 1000000; /* 1s! */ + + while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { + count--; + if (!count) { + pr_err_ratelimited("RWP timeout, gone fishing\n"); + return; + } + cpu_relax(); + udelay(1); + }; +} + +/* Wait for completion of a distributor change */ +static void gic_dist_wait_for_rwp(void) +{ + gic_do_wait_for_rwp(gic_data.dist_base); +} + +/* Wait for completion of a redistributor change */ +static void gic_redist_wait_for_rwp(void) +{ + gic_do_wait_for_rwp(gic_data_rdist_rd_base()); +} + +#ifdef CONFIG_ARM64 + +static u64 __maybe_unused gic_read_iar(void) +{ + if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154)) + return gic_read_iar_cavium_thunderx(); + else + return gic_read_iar_common(); +} +#endif + +static void gic_enable_redist(bool enable) +{ + void __iomem *rbase; + u32 count = 1000000; /* 1s! */ + u32 val; + unsigned long mpidr; + int i; + + rbase = gic_data_rdist_rd_base(); + + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + /* Wake up this CPU redistributor */ + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { /* Check that GICR_WAKER is writeable */ + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; /* No PM support in this redistributor */ + } + + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + }; + if (!count) + pr_err_ratelimited("redistributor failed to %s...\n", + enable ? "wakeup" : "sleep"); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + /* Either Aff0 or Aff1 is not zero */ + if (mpidr & 0xffff) + return; + + /* Skip 64 Redistributors */ + rbase = rbase + 64 * SZ_128K; + + for (i = 0; i < 4; i++) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; + } + + count = 1000000; /* 1s! */ + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + }; + + if (!count) + pr_err_ratelimited("CPU MPIDR 0x%lx: redistributor %d failed to %s...\n", + mpidr, 64 + i, enable ? "wakeup" : "sleep"); + + rbase = rbase + SZ_128K; + } +} + +/* + * Routines to disable, enable, EOI and route interrupts + */ +static int gic_peek_irq(struct irq_data *d, u32 offset) +{ + u32 mask = 1 << (gic_irq(d) % 32); + void __iomem *base; + unsigned int skt; + + if (gic_irq_in_rdist(d)) + base = gic_data_rdist_sgi_base(); + else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + } + + return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); +} + +static void gic_poke_irq(struct irq_data *d, u32 offset) +{ + u32 mask = 1 << (gic_irq(d) % 32); + void __iomem *base; + unsigned long mpidr; + void __iomem *rbase; + int i; + unsigned int skt; + + if (gic_irq_in_rdist(d)) { + base = gic_data_rdist_sgi_base(); + + writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4); + gic_redist_wait_for_rwp(); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xffff) == 0) { + rbase = base + 64*SZ_128K; + + for (i = 0; i < 4; i++) { + writel_relaxed(mask, rbase + offset + (gic_irq(d) / 32) * 4); + gic_do_wait_for_rwp(rbase - SZ_64K); + rbase = rbase + SZ_128K; + } + } + } else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4); + gic_do_wait_for_rwp(base); + } +} + +static void gic_mask_irq(struct irq_data *d) +{ + gic_poke_irq(d, GICD_ICENABLER); +} + +static void gic_eoimode1_mask_irq(struct irq_data *d) +{ + gic_mask_irq(d); + /* + * When masking a forwarded interrupt, make sure it is + * deactivated as well. + * + * This ensures that an interrupt that is getting + * disabled/masked will not get "stuck", because there is + * noone to deactivate it (guest is being terminated). + */ + if (irqd_is_forwarded_to_vcpu(d)) + gic_poke_irq(d, GICD_ICACTIVER); +} + +static void gic_unmask_irq(struct irq_data *d) +{ + gic_poke_irq(d, GICD_ISENABLER); +} + +static int gic_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool val) +{ + u32 reg; + + if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + reg = val ? GICD_ISPENDR : GICD_ICPENDR; + break; + + case IRQCHIP_STATE_ACTIVE: + reg = val ? GICD_ISACTIVER : GICD_ICACTIVER; + break; + + case IRQCHIP_STATE_MASKED: + reg = val ? GICD_ICENABLER : GICD_ISENABLER; + break; + + default: + return -EINVAL; + } + + gic_poke_irq(d, reg); + return 0; +} + +static int gic_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + *val = gic_peek_irq(d, GICD_ISPENDR); + break; + + case IRQCHIP_STATE_ACTIVE: + *val = gic_peek_irq(d, GICD_ISACTIVER); + break; + + case IRQCHIP_STATE_MASKED: + *val = !gic_peek_irq(d, GICD_ISENABLER); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static void gic_eoi_irq(struct irq_data *d) +{ + gic_write_eoir(gic_irq(d)); +} + +static void gic_eoimode1_eoi_irq(struct irq_data *d) +{ + /* + * No need to deactivate an LPI, or an interrupt that + * is is getting forwarded to a vcpu. + */ + if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) + return; + gic_write_dir(gic_irq(d)); +} + +static int gic_set_type(struct irq_data *d, unsigned int type) +{ + unsigned int irq = gic_irq(d); + unsigned long mpidr; + int i; + void __iomem *base; + void __iomem *rbase; + unsigned int skt; + int ret; + + /* Interrupt configuration for SGIs can't be changed */ + if (irq < 16) + return -EINVAL; + + /* SPIs have restrictions on the supported types */ + if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && + type != IRQ_TYPE_EDGE_RISING) + return -EINVAL; + + if (gic_irq_in_rdist(d)) { + base = gic_data_rdist_sgi_base(); + ret = gic_configure_irq(irq, type, base, gic_redist_wait_for_rwp); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xffff) == 0) { + rbase = base + 64*SZ_128K; + + for (i = 0; i < 4; i++) { + ret = gic_configure_irq(irq, type, rbase, NULL); + gic_do_wait_for_rwp(rbase - SZ_64K); + rbase = rbase + SZ_128K; + } + } + } else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + ret = gic_configure_irq(irq, type, base, NULL); + gic_do_wait_for_rwp(base); + } + + return ret; +} + +static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) +{ + if (vcpu) + irqd_set_forwarded_to_vcpu(d); + else + irqd_clr_forwarded_to_vcpu(d); + return 0; +} + +static u64 gic_mpidr_to_affinity(unsigned long mpidr) +{ + u64 aff; + + aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + return aff; +} + +static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) +{ + u32 irqnr; + + do { + irqnr = gic_read_iar(); + + if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) { + int err; + + if (static_branch_likely(&supports_deactivate_key)) + gic_write_eoir(irqnr); + else + isb(); + + err = handle_domain_irq(gic_data.domain, irqnr, regs); + if (err) { + WARN_ONCE(true, "Unexpected interrupt received!\n"); + if (static_branch_likely(&supports_deactivate_key)) { + if (irqnr < 8192) + gic_write_dir(irqnr); + } else { + gic_write_eoir(irqnr); + } + } + continue; + } + if (irqnr < 16) { + gic_write_eoir(irqnr); + if (static_branch_likely(&supports_deactivate_key)) + gic_write_dir(irqnr); +#ifdef CONFIG_SMP + /* + * Unlike GICv2, we don't need an smp_rmb() here. + * The control dependency from gic_read_iar to + * the ISB in gic_write_eoir is enough to ensure + * that any shared data read by handle_IPI will + * be read after the ACK. + */ + handle_IPI(irqnr, regs); +#else + WARN_ONCE(true, "Unexpected SGI received!\n"); +#endif + continue; + } + } while (irqnr != ICC_IAR1_EL1_SPURIOUS); +} + +static void __init gic_dist_init(void) +{ + unsigned int i; + u64 affinity; + void __iomem *base; + unsigned int skt; + + for (skt = 0; skt < MAX_MARS3_SOC_COUNT; skt++) { + if ((((unsigned int)1 << skt) & mars3_sockets_bitmap) == 0) + continue; + + base = mars3_gic_dists[skt].dist_base; + + /* Disable the distributor */ + writel_relaxed(0, base + GICD_CTLR); + gic_do_wait_for_rwp(base); + + /* + * Configure SPIs as non-secure Group-1. This will only matter + * if the GIC only has a single security state. This will not + * do the right thing if the kernel is running in secure mode, + * but that's not the intended use case anyway. + */ + for (i = 32; i < gic_data.irq_nr; i += 32) + writel_relaxed(~0, base + GICD_IGROUPR + i / 8); + + gic_dist_config(base, gic_data.irq_nr, NULL); + gic_do_wait_for_rwp(base); + + /* Enable distributor with ARE, Group1 */ + writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, + base + GICD_CTLR); + + /* + * Set all global interrupts to the boot CPU only. ARE must be + * enabled. + */ + affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); + for (i = 32; i < gic_data.irq_nr; i++) + gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); + } +} + +static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) +{ + int ret = -ENODEV; + int i; + + for (i = 0; i < gic_data.nr_redist_regions; i++) { + void __iomem *ptr = gic_data.redist_regions[i].redist_base; + u64 typer; + u32 reg; + + reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (reg != GIC_PIDR2_ARCH_GICv3 && + reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */ + pr_warn("No redistributor present @%p\n", ptr); + break; + } + + do { + typer = gic_read_typer(ptr + GICR_TYPER); + ret = fn(gic_data.redist_regions + i, ptr); + if (!ret) + return 0; + + if (gic_data.redist_regions[i].single_redist) + break; + + if (gic_data.redist_stride) { + ptr += gic_data.redist_stride; + } else { + ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ + if (typer & GICR_TYPER_VLPIS) + ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ + } + } while (!(typer & GICR_TYPER_LAST)); + } + + return ret ? -ENODEV : 0; +} + +static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) +{ + unsigned long mpidr = cpu_logical_map(smp_processor_id()); + u64 typer; + u32 aff, aff2_skt, rdist_skt; + + /* + * Convert affinity to a 32bit value that can be matched to + * GICR_TYPER bits [63:32]. + */ + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + aff2_skt = MPIDR_AFFINITY_LEVEL(mpidr, 2) & 0x7; + rdist_skt = (((u64)region->phys_base >> MARS3_ADDR_SKTID_SHIFT) & 0x7); + + if (aff2_skt != rdist_skt) + return 1; + + typer = gic_read_typer(ptr + GICR_TYPER); + if ((typer >> 32) == aff) { + u64 offset = ptr - region->redist_base; + gic_data_rdist_rd_base() = ptr; + gic_data_rdist()->phys_base = region->phys_base + offset; + + pr_info("CPU%d: found redistributor %lx region %d:%pa\n", + smp_processor_id(), mpidr, + (int)(region - gic_data.redist_regions), + &gic_data_rdist()->phys_base); + return 0; + } + + /* Try next one */ + return 1; +} + +static int gic_populate_rdist(void) +{ + if (gic_iterate_rdists(__gic_populate_rdist) == 0) + return 0; + + /* We couldn't even deal with ourselves... */ + WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", + smp_processor_id(), + (unsigned long)cpu_logical_map(smp_processor_id())); + return -ENODEV; +} + +static int __gic_update_vlpi_properties(struct redist_region *region, + void __iomem *ptr) +{ + u64 typer = gic_read_typer(ptr + GICR_TYPER); + gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); + gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS); + + return 1; +} + +static void gic_update_vlpi_properties(void) +{ + gic_iterate_rdists(__gic_update_vlpi_properties); + pr_info("%sVLPI support, %sdirect LPI support\n", + !gic_data.rdists.has_vlpis ? "no " : "", + !gic_data.rdists.has_direct_lpi ? "no " : ""); +} + +static void gic_cpu_sys_reg_init(void) +{ + int i, cpu = smp_processor_id(); + u64 mpidr = cpu_logical_map(cpu); + u64 need_rss = MPIDR_RS(mpidr); + bool group0; + u32 val, pribits; + + /* + * Need to check that the SRE bit has actually been set. If + * not, it means that SRE is disabled at EL2. We're going to + * die painfully, and there is nothing we can do about it. + * + * Kindly inform the luser. + */ + if (!gic_enable_sre()) + pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); + + pribits = gic_read_ctlr(); + pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; + pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; + pribits++; + + /* + * Let's find out if Group0 is under control of EL3 or not by + * setting the highest possible, non-zero priority in PMR. + * + * If SCR_EL3.FIQ is set, the priority gets shifted down in + * order for the CPU interface to set bit 7, and keep the + * actual priority in the non-secure range. In the process, it + * looses the least significant bit and the actual priority + * becomes 0x80. Reading it back returns 0, indicating that + * we're don't have access to Group0. + */ + write_gicreg(BIT(8 - pribits), ICC_PMR_EL1); + val = read_gicreg(ICC_PMR_EL1); + group0 = val != 0; + + /* Set priority mask register */ + write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); + + /* + * Some firmwares hand over to the kernel with the BPR changed from + * its reset value (and with a value large enough to prevent + * any pre-emptive interrupts from working at all). Writing a zero + * to BPR restores is reset value. + */ + gic_write_bpr1(0); + + if (static_branch_likely(&supports_deactivate_key)) { + /* EOI drops priority only (mode 1) */ + gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop); + } else { + /* EOI deactivates interrupt too (mode 0) */ + gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); + } + + /* Always whack Group0 before Group1 */ + if (group0) { + switch(pribits) { + case 8: + case 7: + write_gicreg(0, ICC_AP0R3_EL1); + write_gicreg(0, ICC_AP0R2_EL1); + case 6: + write_gicreg(0, ICC_AP0R1_EL1); + case 5: + case 4: + write_gicreg(0, ICC_AP0R0_EL1); + } + + isb(); + } + + switch(pribits) { + case 8: + case 7: + write_gicreg(0, ICC_AP1R3_EL1); + write_gicreg(0, ICC_AP1R2_EL1); + case 6: + write_gicreg(0, ICC_AP1R1_EL1); + case 5: + case 4: + write_gicreg(0, ICC_AP1R0_EL1); + } + + isb(); + + /* ... and let's hit the road... */ + gic_write_grpen1(1); + + /* Keep the RSS capability status in per_cpu variable */ + per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS); + + /* Check all the CPUs have capable of sending SGIs to other CPUs */ + for_each_online_cpu(i) { + bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu); + + need_rss |= MPIDR_RS(cpu_logical_map(i)); + if (need_rss && (!have_rss)) + pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", + cpu, (unsigned long)mpidr, + i, (unsigned long)cpu_logical_map(i)); + } + + /** + * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0, + * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED + * UNPREDICTABLE choice of : + * - The write is ignored. + * - The RS field is treated as 0. + */ + if (need_rss && (!gic_data.has_rss)) + pr_crit_once("RSS is required but GICD doesn't support it\n"); +} + +static bool gicv3_nolpi; + +static int __init gicv3_nolpi_cfg(char *buf) +{ + return strtobool(buf, &gicv3_nolpi); +} +early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); + +static int gic_dist_supports_lpis(void) +{ + return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && !gicv3_nolpi; +} + +static void gic_cpu_init(void) +{ + void __iomem *rbase; + unsigned long mpidr; + int i; + + /* Register ourselves with the rest of the world */ + if (gic_populate_rdist()) + return; + + gic_enable_redist(true); + + rbase = gic_data_rdist_sgi_base(); + + /* Configure SGIs/PPIs as non-secure Group-1 */ + writel_relaxed(~0, rbase + GICR_IGROUPR0); + + gic_cpu_config(rbase, gic_redist_wait_for_rwp); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xFFFF) == 0) { + rbase = rbase + 64*SZ_128K; + + for (i = 0; i < 4; i++) { + /* Configure SGIs/PPIs as non-secure Group-1 */ + writel_relaxed(~0, rbase + GICR_IGROUPR0); + + gic_cpu_config(rbase, NULL); + gic_do_wait_for_rwp(rbase - SZ_64K); + + rbase = rbase + SZ_128K; + } + } + + /* initialise system registers */ + gic_cpu_sys_reg_init(); +} + +#ifdef CONFIG_SMP + +#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) +#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) + +static int gic_starting_cpu(unsigned int cpu) +{ + gic_cpu_init(); + + if (gic_dist_supports_lpis()) + phytium_its_cpu_init(); + + return 0; +} + +static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, + unsigned long cluster_id) +{ + int next_cpu, cpu = *base_cpu; + unsigned long mpidr = cpu_logical_map(cpu); + u16 tlist = 0; + + while (cpu < nr_cpu_ids) { + tlist |= 1 << (mpidr & 0xf); + + next_cpu = cpumask_next(cpu, mask); + if (next_cpu >= nr_cpu_ids) + goto out; + cpu = next_cpu; + + mpidr = cpu_logical_map(cpu); + + if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { + cpu--; + goto out; + } + } +out: + *base_cpu = cpu; + return tlist; +} + +#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ + (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ + << ICC_SGI1R_AFFINITY_## level ##_SHIFT) + +static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) +{ + u64 val; + + val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | + MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | + irq << ICC_SGI1R_SGI_ID_SHIFT | + MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | + MPIDR_TO_SGI_RS(cluster_id) | + tlist << ICC_SGI1R_TARGET_LIST_SHIFT); + + pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); + gic_write_sgi1r(val); +} + +static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) +{ + int cpu; + + if (WARN_ON(irq >= 16)) + return; + + /* + * Ensure that stores to Normal memory are visible to the + * other CPUs before issuing the IPI. + */ + wmb(); + + for_each_cpu(cpu, mask) { + u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); + u16 tlist; + + tlist = gic_compute_target_list(&cpu, mask, cluster_id); + gic_send_sgi(cluster_id, tlist, irq); + } + + /* Force the above writes to ICC_SGI1R_EL1 to be executed */ + isb(); +} + +static void gic_smp_init(void) +{ + set_smp_cross_call(gic_raise_softirq); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, + "irqchip/arm/gic_phytium_2500:starting", + gic_starting_cpu, NULL); +} + +static int gic_cpumask_select(struct irq_data *d, const struct cpumask *mask_val) +{ + unsigned int skt, irq_skt, i; + unsigned int cpu, cpus = 0; + unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; + + irq_skt = mars3_irq_to_skt(gic_irq(d)); + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) { + if ((is_kdump_kernel()) && (irq_skt == skt)) { + return i; + } + + skt_cpu_cnt[skt]++; + } + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + if (0 != irq_skt) { + for (i = 0; i < irq_skt; i++) + cpus += skt_cpu_cnt[i]; + } + + cpu = cpumask_any_and(mask_val, cpu_online_mask); + cpus = cpus + cpu % skt_cpu_cnt[irq_skt]; + + return cpus; +} + +static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + unsigned int cpu, skt; + void __iomem *reg; + int enabled; + u64 val; + + if (force) + cpu = cpumask_first(mask_val); + else + cpu = gic_cpumask_select(d, mask_val); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + if (gic_irq_in_rdist(d)) + return -EINVAL; + + /* If interrupt was enabled, disable it first */ + enabled = gic_peek_irq(d, GICD_ISENABLER); + if (enabled) + gic_mask_irq(d); + + skt = mars3_irq_to_skt(gic_irq(d)); + reg = mars3_gic_dists[skt].dist_base + GICD_IROUTER + (gic_irq(d) * 8); + val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); + + gic_write_irouter(val, reg); + + /* + * If the interrupt was enabled, enabled it again. Otherwise, + * just wait for the distributor to have digested our changes. + */ + if (enabled) + gic_unmask_irq(d); + else + gic_dist_wait_for_rwp(); + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK_DONE; +} +#else +#define gic_set_affinity NULL +#define gic_smp_init() do { } while(0) +#endif + +static int gic_retrigger(struct irq_data *data) +{ + return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); +} + +#ifdef CONFIG_CPU_PM +/* Check whether it's single security state view */ +static bool gic_dist_security_disabled(void) +{ + return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; +} + +static int gic_cpu_pm_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + if (cmd == CPU_PM_EXIT) { + if (gic_dist_security_disabled()) + gic_enable_redist(true); + gic_cpu_sys_reg_init(); + } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) { + gic_write_grpen1(0); + gic_enable_redist(false); + } + return NOTIFY_OK; +} + +static struct notifier_block gic_cpu_pm_notifier_block = { + .notifier_call = gic_cpu_pm_notifier, +}; + +static void gic_cpu_pm_init(void) +{ + cpu_pm_register_notifier(&gic_cpu_pm_notifier_block); +} + +#else +static inline void gic_cpu_pm_init(void) { } +#endif /* CONFIG_CPU_PM */ + +static struct irq_chip gic_chip = { + .name = "GIC-Phytium-2500", + .irq_mask = gic_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static struct irq_chip gic_eoimode1_chip = { + .name = "GIC-Phytium-2500", + .irq_mask = gic_eoimode1_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoimode1_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) + +static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + struct irq_chip *chip = &gic_chip; + struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); + + if (static_branch_likely(&supports_deactivate_key)) + chip = &gic_eoimode1_chip; + + /* SGIs are private to the core kernel */ + if (hw < 16) + return -EPERM; + /* Nothing here */ + if (hw >= gic_data.irq_nr && hw < 8192) + return -EPERM; + /* Off limits */ + if (hw >= GIC_ID_NR) + return -EPERM; + + /* PPIs */ + if (hw < 32) { + irq_set_percpu_devid(irq); + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_percpu_devid_irq, NULL, NULL); + irq_set_status_flags(irq, IRQ_NOAUTOEN); + } + /* SPIs */ + if (hw >= 32 && hw < gic_data.irq_nr) { + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + irq_set_probe(irq); + irqd_set_single_target(irqd); + } + /* LPIs */ + if (hw >= 8192 && hw < GIC_ID_NR) { + if (!gic_dist_supports_lpis()) + return -EPERM; + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + } + + /* Prevents SW retriggers which mess up the ACK/EOI ordering */ + irqd_set_handle_enforce_irqctx(irqd); + return 0; +} + +#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) + +static int gic_irq_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + if (is_of_node(fwspec->fwnode)) { + if (fwspec->param_count < 3) + return -EINVAL; + + switch (fwspec->param[0]) { + case 0: /* SPI */ + *hwirq = fwspec->param[1] + 32; + break; + case 1: /* PPI */ + case GIC_IRQ_TYPE_PARTITION: + *hwirq = fwspec->param[1] + 16; + break; + case GIC_IRQ_TYPE_LPI: /* LPI */ + *hwirq = fwspec->param[1]; + break; + default: + return -EINVAL; + } + + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + + /* + * Make it clear that broken DTs are... broken. + * Partitionned PPIs are an unfortunate exception. + */ + WARN_ON(*type == IRQ_TYPE_NONE && + fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); + return 0; + } + + if (is_fwnode_irqchip(fwspec->fwnode)) { + if (fwspec->param_count != 2) + return -EINVAL; + + *hwirq = fwspec->param[0]; + *type = fwspec->param[1]; + + WARN_ON(*type == IRQ_TYPE_NONE); + return 0; + } + + return -EINVAL; +} + +static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i, ret; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; + struct irq_fwspec *fwspec = arg; + + ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + ret = gic_irq_domain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + } + + return 0; +} + +static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + irq_set_handler(virq + i, NULL); + irq_domain_reset_irq_data(d); + } +} + +static int gic_irq_domain_select(struct irq_domain *d, + struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + /* Not for us */ + if (fwspec->fwnode != d->fwnode) + return 0; + + /* If this is not DT, then we have a single domain */ + if (!is_of_node(fwspec->fwnode)) + return 1; + + /* + * If this is a PPI and we have a 4th (non-null) parameter, + * then we need to match the partition domain. + */ + if (fwspec->param_count >= 4 && + fwspec->param[0] == 1 && fwspec->param[3] != 0) + return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]); + + return d == gic_data.domain; +} + +static const struct irq_domain_ops gic_irq_domain_ops = { + .translate = gic_irq_domain_translate, + .alloc = gic_irq_domain_alloc, + .free = gic_irq_domain_free, + .select = gic_irq_domain_select, +}; + +static int partition_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + struct device_node *np; + int ret; + + np = of_find_node_by_phandle(fwspec->param[3]); + if (WARN_ON(!np)) + return -EINVAL; + + ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]], + of_node_to_fwnode(np)); + if (ret < 0) + return ret; + + *hwirq = ret; + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + + return 0; +} + +static const struct irq_domain_ops partition_domain_ops = { + .translate = partition_domain_translate, + .select = gic_irq_domain_select, +}; + +static int __init gic_init_bases(void __iomem *dist_base, + struct redist_region *rdist_regs, + u32 nr_redist_regions, + u64 redist_stride, + struct fwnode_handle *handle) +{ + u32 typer; + int gic_irqs; + int err; + + if (!is_hyp_mode_available()) + static_branch_disable(&supports_deactivate_key); + + if (static_branch_likely(&supports_deactivate_key)) + pr_info("GIC: Using split EOI/Deactivate mode\n"); + + gic_data.fwnode = handle; + gic_data.dist_base = dist_base; + gic_data.redist_regions = rdist_regs; + gic_data.nr_redist_regions = nr_redist_regions; + gic_data.redist_stride = redist_stride; + + /* + * Find out how many interrupts are supported. + * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) + */ + typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); + gic_data.rdists.gicd_typer = typer; + gic_irqs = GICD_TYPER_IRQS(typer); + if (gic_irqs > 1020) + gic_irqs = 1020; + gic_data.irq_nr = gic_irqs; + + gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, + &gic_data); + irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); + gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); + gic_data.rdists.has_vlpis = true; + gic_data.rdists.has_direct_lpi = true; + + if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { + err = -ENOMEM; + goto out_free; + } + + gic_data.has_rss = !!(typer & GICD_TYPER_RSS); + pr_info("Distributor has %sRange Selector support\n", + gic_data.has_rss ? "" : "no "); + + set_handle_irq(gic_handle_irq); + + gic_update_vlpi_properties(); + + gic_smp_init(); + gic_dist_init(); + gic_cpu_init(); + gic_cpu_pm_init(); + + if (gic_dist_supports_lpis()) { + phytium_its_init(handle, &gic_data.rdists, gic_data.domain); + phytium_its_cpu_init(); + } + + return 0; + +out_free: + if (gic_data.domain) + irq_domain_remove(gic_data.domain); + free_percpu(gic_data.rdists.rdist); + return err; +} + +static int __init gic_validate_dist_version(void __iomem *dist_base) +{ + u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + + if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) + return -ENODEV; + + return 0; +} + +/* Create all possible partitions at boot time */ +static void __init gic_populate_ppi_partitions(struct device_node *gic_node) +{ + struct device_node *parts_node, *child_part; + int part_idx = 0, i; + int nr_parts; + struct partition_affinity *parts; + + parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); + if (!parts_node) + return; + + nr_parts = of_get_child_count(parts_node); + + if (!nr_parts) + goto out_put_node; + + parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); + if (WARN_ON(!parts)) + goto out_put_node; + + for_each_child_of_node(parts_node, child_part) { + struct partition_affinity *part; + int n; + + part = &parts[part_idx]; + + part->partition_id = of_node_to_fwnode(child_part); + + pr_info("GIC: PPI partition %s[%d] { ", + child_part->name, part_idx); + + n = of_property_count_elems_of_size(child_part, "affinity", + sizeof(u32)); + WARN_ON(n <= 0); + + for (i = 0; i < n; i++) { + int err, cpu; + u32 cpu_phandle; + struct device_node *cpu_node; + + err = of_property_read_u32_index(child_part, "affinity", + i, &cpu_phandle); + if (WARN_ON(err)) + continue; + + cpu_node = of_find_node_by_phandle(cpu_phandle); + if (WARN_ON(!cpu_node)) + continue; + + cpu = of_cpu_node_to_id(cpu_node); + if (WARN_ON(cpu < 0)) + continue; + + pr_cont("%pOF[%d] ", cpu_node, cpu); + + cpumask_set_cpu(cpu, &part->mask); + } + + pr_cont("}\n"); + part_idx++; + } + + for (i = 0; i < 16; i++) { + unsigned int irq; + struct partition_desc *desc; + struct irq_fwspec ppi_fwspec = { + .fwnode = gic_data.fwnode, + .param_count = 3, + .param = { + [0] = GIC_IRQ_TYPE_PARTITION, + [1] = i, + [2] = IRQ_TYPE_NONE, + }, + }; + + irq = irq_create_fwspec_mapping(&ppi_fwspec); + if (WARN_ON(!irq)) + continue; + desc = partition_create_desc(gic_data.fwnode, parts, nr_parts, + irq, &partition_domain_ops); + if (WARN_ON(!desc)) + continue; + + gic_data.ppi_descs[i] = desc; + } + +out_put_node: + of_node_put(parts_node); +} + +static void __init gic_of_setup_kvm_info(struct device_node *node) +{ + int ret; + struct resource r; + u32 gicv_idx; + + gic_v3_kvm_info.type = GIC_V3; + + gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0); + if (!gic_v3_kvm_info.maint_irq) + return; + + if (of_property_read_u32(node, "#redistributor-regions", + &gicv_idx)) + gicv_idx = 1; + + gicv_idx += 3; /* Also skip GICD, GICC, GICH */ + ret = of_address_to_resource(node, gicv_idx, &r); + if (!ret) + gic_v3_kvm_info.vcpu = r; + + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_set_kvm_info(&gic_v3_kvm_info); +} + +static int __init gic_of_init(struct device_node *node, struct device_node *parent) +{ + void __iomem *dist_base; + struct redist_region *rdist_regs; + u64 redist_stride; + u32 nr_redist_regions; + int err, i, skt; + struct resource res; + + dist_base = of_iomap(node, 0); + if (!dist_base) { + pr_err("%pOF: unable to map gic dist registers\n", node); + return -ENXIO; + } + + err = gic_validate_dist_version(dist_base); + if (err) { + pr_err("%pOF: no distributor detected, giving up\n", node); + goto out_unmap_dist; + } + + if (of_address_to_resource(node, 0, &res)) { + printk("Error: No GIC Distributor in FDT\n"); + goto out_unmap_dist; + } + + mars3_gic_dists[0].phys_base = res.start; + mars3_gic_dists[0].size = resource_size(&res); + mars3_gic_dists[0].dist_base = dist_base; + + if (of_property_read_u32(node, "#mars3-soc-bitmap", &mars3_sockets_bitmap)) + mars3_sockets_bitmap = 0x1; + + for (skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { + if ((((unsigned int)1 << skt) & mars3_sockets_bitmap) == 0) + continue; + + mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | + mars3_gic_dists[0].phys_base; + mars3_gic_dists[skt].size = mars3_gic_dists[0].size; + mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, + mars3_gic_dists[skt].size); + } + + if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) + nr_redist_regions = 1; + + rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs), + GFP_KERNEL); + if (!rdist_regs) { + err = -ENOMEM; + goto out_unmap_dist; + } + + for (i = 0; i < nr_redist_regions; i++) { + struct resource res; + int ret; + + ret = of_address_to_resource(node, 1 + i, &res); + rdist_regs[i].redist_base = of_iomap(node, 1 + i); + if (ret || !rdist_regs[i].redist_base) { + pr_err("%pOF: couldn't map region %d\n", node, i); + err = -ENODEV; + goto out_unmap_rdist; + } + rdist_regs[i].phys_base = res.start; + } + + if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) + redist_stride = 0; + + err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions, + redist_stride, &node->fwnode); + if (err) + goto out_unmap_rdist; + + gic_populate_ppi_partitions(node); + + if (static_branch_likely(&supports_deactivate_key)) + gic_of_setup_kvm_info(node); + return 0; + +out_unmap_rdist: + for (i = 0; i < nr_redist_regions; i++) + if (rdist_regs[i].redist_base) + iounmap(rdist_regs[i].redist_base); + kfree(rdist_regs); +out_unmap_dist: + iounmap(dist_base); + return err; +} + +IRQCHIP_DECLARE(gic_phyt_2500, "arm,gic-phytium-2500", gic_of_init); + +#ifdef CONFIG_ACPI +static struct +{ + void __iomem *dist_base; + struct redist_region *redist_regs; + u32 nr_redist_regions; + bool single_redist; + u32 maint_irq; + int maint_irq_mode; + phys_addr_t vcpu_base; +} acpi_data __initdata; + +static int gic_mars3_sockets_bitmap(void) +{ + unsigned int skt, i; + int skt_bitmap = 0; + unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; + + for (i = 0; i < max_t(unsigned int, nr_cpu_ids, NR_CPUS); i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + for (i = 0; i < MAX_MARS3_SOC_COUNT; i++) { + if (skt_cpu_cnt[i] > 0) + skt_bitmap |= (1 << i); + } + + return skt_bitmap; +} + +static void __init +gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) +{ + static int count = 0; + + acpi_data.redist_regs[count].phys_base = phys_base; + acpi_data.redist_regs[count].redist_base = redist_base; + acpi_data.redist_regs[count].single_redist = acpi_data.single_redist; + count++; +} + +static int __init +gic_acpi_parse_madt_redist(struct acpi_subtable_header *header, + const unsigned long end) +{ + struct acpi_madt_generic_redistributor *redist = + (struct acpi_madt_generic_redistributor *)header; + void __iomem *redist_base; + + redist_base = ioremap(redist->base_address, redist->length); + if (!redist_base) { + pr_err("Couldn't map GICR region @%llx\n", redist->base_address); + return -ENOMEM; + } + + gic_acpi_register_redist(redist->base_address, redist_base); + return 0; +} + +static int __init +gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; + void __iomem *redist_base; + + /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + redist_base = ioremap(gicc->gicr_base_address, size); + if (!redist_base) + return -ENOMEM; + + gic_acpi_register_redist(gicc->gicr_base_address, redist_base); + return 0; +} + +static int __init gic_acpi_collect_gicr_base(void) +{ + acpi_tbl_entry_handler redist_parser; + enum acpi_madt_type type; + + if (acpi_data.single_redist) { + type = ACPI_MADT_TYPE_GENERIC_INTERRUPT; + redist_parser = gic_acpi_parse_madt_gicc; + } else { + type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR; + redist_parser = gic_acpi_parse_madt_redist; + } + + /* Collect redistributor base addresses in GICR entries */ + if (acpi_table_parse_madt(type, redist_parser, 0) > 0) + return 0; + + pr_info("No valid GICR entries exist\n"); + return -ENODEV; +} + +static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header, + const unsigned long end) +{ + /* Subtable presence means that redist exists, that's it */ + return 0; +} + +static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + + /* + * If GICC is enabled and has valid gicr base address, then it means + * GICR base is presented via GICC + */ + if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) + return 0; + + /* + * It's perfectly valid firmware can pass disabled GICC entry, driver + * should not treat as errors, skip the entry instead of probe fail. + */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + return -ENODEV; +} + +static int __init gic_acpi_count_gicr_regions(void) +{ + int count; + + /* + * Count how many redistributor regions we have. It is not allowed + * to mix redistributor description, GICR and GICC subtables have to be + * mutually exclusive. + */ + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, + gic_acpi_match_gicr, 0); + if (count > 0) { + acpi_data.single_redist = false; + return count; + } + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + gic_acpi_match_gicc, 0); + if (count > 0) + acpi_data.single_redist = true; + + return count; +} + +static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, + struct acpi_probe_entry *ape) +{ + struct acpi_madt_generic_distributor *dist; + int count; + + dist = (struct acpi_madt_generic_distributor *)header; + if (dist->version != ape->driver_data) + return false; + + /* We need to do that exercise anyway, the sooner the better */ + count = gic_acpi_count_gicr_regions(); + if (count <= 0) + return false; + + acpi_data.nr_redist_regions = count; + return true; +} + +static int __init gic_acpi_parse_virt_madt_gicc(struct acpi_subtable_header *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + int maint_irq_mode; + static int first_madt = true; + + /* Skip unusable CPUs */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ? + ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; + + if (first_madt) { + first_madt = false; + + acpi_data.maint_irq = gicc->vgic_interrupt; + acpi_data.maint_irq_mode = maint_irq_mode; + acpi_data.vcpu_base = gicc->gicv_base_address; + + return 0; + } + + /* + * The maintenance interrupt and GICV should be the same for every CPU + */ + if ((acpi_data.maint_irq != gicc->vgic_interrupt) || + (acpi_data.maint_irq_mode != maint_irq_mode) || + (acpi_data.vcpu_base != gicc->gicv_base_address)) + return -EINVAL; + + return 0; +} + +static bool __init gic_acpi_collect_virt_info(void) +{ + int count; + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + gic_acpi_parse_virt_madt_gicc, 0); + + return (count > 0); +} + +#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K) +#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K) +#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K) + +static void __init gic_acpi_setup_kvm_info(void) +{ + int irq; + + if (!gic_acpi_collect_virt_info()) { + pr_warn("Unable to get hardware information used for virtualization\n"); + return; + } + + gic_v3_kvm_info.type = GIC_V3; + + irq = acpi_register_gsi(NULL, acpi_data.maint_irq, + acpi_data.maint_irq_mode, + ACPI_ACTIVE_HIGH); + if (irq <= 0) + return; + + gic_v3_kvm_info.maint_irq = irq; + + if (acpi_data.vcpu_base) { + struct resource *vcpu = &gic_v3_kvm_info.vcpu; + + vcpu->flags = IORESOURCE_MEM; + vcpu->start = acpi_data.vcpu_base; + vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; + } + + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_set_kvm_info(&gic_v3_kvm_info); +} + +static int __init +gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) +{ + struct acpi_madt_generic_distributor *dist; + struct fwnode_handle *domain_handle; + size_t size; + int i, err, skt; + + /* Get distributor base address */ + dist = (struct acpi_madt_generic_distributor *)header; + acpi_data.dist_base = ioremap(dist->base_address, + ACPI_GICV3_DIST_MEM_SIZE); + if (!acpi_data.dist_base) { + pr_err("Unable to map GICD registers\n"); + return -ENOMEM; + } + + err = gic_validate_dist_version(acpi_data.dist_base); + if (err) { + pr_err("No distributor detected at @%p, giving up\n", + acpi_data.dist_base); + goto out_dist_unmap; + } + + mars3_gic_dists[0].phys_base = dist->base_address; + mars3_gic_dists[0].size = ACPI_GICV3_DIST_MEM_SIZE; + mars3_gic_dists[0].dist_base = acpi_data.dist_base; + + mars3_sockets_bitmap = gic_mars3_sockets_bitmap(); + if (mars3_sockets_bitmap == 0) { + mars3_sockets_bitmap = 0x1; + pr_err("No socket, please check cpus MPIDR_AFFINITY_LEVEL!"); + } else + pr_info("mars3_sockets_bitmap = 0x%x\n", mars3_sockets_bitmap); + + for (skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { + if ((((unsigned int)1 << skt) & mars3_sockets_bitmap) == 0) + continue; + + mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | + mars3_gic_dists[0].phys_base; + mars3_gic_dists[skt].size = mars3_gic_dists[0].size; + mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, + mars3_gic_dists[skt].size); + } + + size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions; + acpi_data.redist_regs = kzalloc(size, GFP_KERNEL); + if (!acpi_data.redist_regs) { + err = -ENOMEM; + goto out_dist_unmap; + } + + err = gic_acpi_collect_gicr_base(); + if (err) + goto out_redist_unmap; + + domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base); + if (!domain_handle) { + err = -ENOMEM; + goto out_redist_unmap; + } + + err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs, + acpi_data.nr_redist_regions, 0, domain_handle); + if (err) + goto out_fwhandle_free; + + acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); + + if (static_branch_likely(&supports_deactivate_key)) + gic_acpi_setup_kvm_info(); + + return 0; + +out_fwhandle_free: + irq_domain_free_fwnode(domain_handle); +out_redist_unmap: + for (i = 0; i < acpi_data.nr_redist_regions; i++) + if (acpi_data.redist_regs[i].redist_base) + iounmap(acpi_data.redist_regs[i].redist_base); + kfree(acpi_data.redist_regs); +out_dist_unmap: + iounmap(acpi_data.dist_base); + return err; +} +IRQCHIP_ACPI_DECLARE(gic_phyt_2500, ACPI_MADT_TYPE_PHYTIUM_2500, + acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3, + gic_acpi_init); +#endif diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index d5cc32e80f5e..692b5790d3f2 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -19,13 +19,16 @@ #include #include #include +#include #include #include +#include #include #include #include #include #include +#include #include #include #include @@ -52,6 +55,7 @@ #define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3) #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) +#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) static u32 lpi_id_bits; @@ -167,6 +171,13 @@ static struct { int next_victim; } vpe_proxy; +struct cpu_lpi_count { + atomic_t managed; + atomic_t unmanaged; +}; + +static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count); + static LIST_HEAD(its_nodes); static DEFINE_RAW_SPINLOCK(its_lock); static struct rdists *gic_rdists; @@ -179,6 +190,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock); static DEFINE_IDA(its_vpeid_ida); #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) +#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) @@ -1062,7 +1074,7 @@ static inline u32 its_get_event_id(struct irq_data *d) static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) { irq_hw_number_t hwirq; - struct page *prop_page; + void *va; u8 *cfg; if (irqd_is_forwarded_to_vcpu(d)) { @@ -1070,7 +1082,7 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) u32 event = its_get_event_id(d); struct its_vlpi_map *map; - prop_page = its_dev->event_map.vm->vprop_page; + va = page_address(its_dev->event_map.vm->vprop_page); map = &its_dev->event_map.vlpi_maps[event]; hwirq = map->vintid; @@ -1078,11 +1090,11 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) map->properties &= ~clr; map->properties |= set | LPI_PROP_GROUP1; } else { - prop_page = gic_rdists->prop_page; + va = gic_rdists->prop_table_va; hwirq = d->hwirq; } - cfg = page_address(prop_page) + hwirq - 8192; + cfg = va + hwirq - 8192; *cfg &= ~clr; *cfg |= set | LPI_PROP_GROUP1; @@ -1144,42 +1156,159 @@ static void its_unmask_irq(struct irq_data *d) lpi_update_config(d, 0, LPI_PROP_ENABLED); } +static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static void its_inc_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + else + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static void its_dec_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + else + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static unsigned int cpumask_pick_least_loaded(struct irq_data *d, + const struct cpumask *cpu_mask) +{ + unsigned int cpu = nr_cpu_ids, tmp; + int count = S32_MAX; + + for_each_cpu(tmp, cpu_mask) { + int this_count = its_read_lpi_count(d, tmp); + if (this_count < count) { + cpu = tmp; + count = this_count; + } + } + + return cpu; +} + +/* + * As suggested by Thomas Gleixner in: + * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de + */ +static int its_select_cpu(struct irq_data *d, + const struct cpumask *aff_mask) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + cpumask_var_t tmpmask; + int cpu, node; + + if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC)) + return -ENOMEM; + + node = its_dev->its->numa_node; + + if (!irqd_affinity_is_managed(d)) { + /* First try the NUMA node */ + if (node != NUMA_NO_NODE) { + /* + * Try the intersection of the affinity mask and the + * node mask (and the online mask, just to be safe). + */ + cpumask_and(tmpmask, cpumask_of_node(node), aff_mask); + cpumask_and(tmpmask, tmpmask, cpu_online_mask); + + /* + * Ideally, we would check if the mask is empty, and + * try again on the full node here. + * + * But it turns out that the way ACPI describes the + * affinity for ITSs only deals about memory, and + * not target CPUs, so it cannot describe a single + * ITS placed next to two NUMA nodes. + * + * Instead, just fallback on the online mask. This + * diverges from Thomas' suggestion above. + */ + cpu = cpumask_pick_least_loaded(d, tmpmask); + if (cpu < nr_cpu_ids) + goto out; + + /* If we can't cross sockets, give up */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)) + goto out; + + /* If the above failed, expand the search */ + } + + /* Try the intersection of the affinity and online masks */ + cpumask_and(tmpmask, aff_mask, cpu_online_mask); + + /* If that doesn't fly, the online mask is the last resort */ + if (cpumask_empty(tmpmask)) + cpumask_copy(tmpmask, cpu_online_mask); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } else { + cpumask_and(tmpmask, irq_data_get_affinity_mask(d), cpu_online_mask); + + /* If we cannot cross sockets, limit the search to that node */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && + node != NUMA_NO_NODE) + cpumask_and(tmpmask, tmpmask, cpumask_of_node(node)); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } +out: + free_cpumask_var(tmpmask); + + pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu); + return cpu; +} + static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - unsigned int cpu; - const struct cpumask *cpu_mask = cpu_online_mask; struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_collection *target_col; u32 id = its_get_event_id(d); + int cpu, prev_cpu; /* A forwarded interrupt should use irq_set_vcpu_affinity */ if (irqd_is_forwarded_to_vcpu(d)) return -EINVAL; - /* lpi cannot be routed to a redistributor that is on a foreign node */ - if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { - if (its_dev->its->numa_node >= 0) { - cpu_mask = cpumask_of_node(its_dev->its->numa_node); - if (!cpumask_intersects(mask_val, cpu_mask)) - return -EINVAL; - } - } + prev_cpu = its_dev->event_map.col_map[id]; + its_dec_lpi_count(d, prev_cpu); - cpu = cpumask_any_and(mask_val, cpu_mask); + if (!force) + cpu = its_select_cpu(d, mask_val); + else + cpu = cpumask_pick_least_loaded(d, mask_val); - if (cpu >= nr_cpu_ids) - return -EINVAL; + if (cpu < 0 || cpu >= nr_cpu_ids) + goto err; /* don't set the affinity when the target cpu is same as current one */ - if (cpu != its_dev->event_map.col_map[id]) { + if (cpu != prev_cpu) { target_col = &its_dev->its->collections[cpu]; its_send_movi(its_dev, target_col, id); its_dev->event_map.col_map[id] = cpu; irq_data_update_effective_affinity(d, cpumask_of(cpu)); } + its_inc_lpi_count(d, cpu); + return IRQ_SET_MASK_OK_DONE; + +err: + its_inc_lpi_count(d, prev_cpu); + return -EINVAL; } static u64 its_irq_get_msi_base(struct its_device *its_dev) @@ -1202,7 +1331,8 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) msg->address_hi = upper_32_bits(addr); msg->data = its_get_event_id(d); - iommu_dma_map_msi_msg(d->irq, msg); + if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) != MIDR_PHYTIUM_FT2000PLUS) + iommu_dma_map_msi_msg(d->irq, msg); } static int its_irq_set_irqchip_state(struct irq_data *d, @@ -1446,6 +1576,11 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) } } +static int its_irq_retrigger(struct irq_data *d) +{ + return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + static struct irq_chip its_irq_chip = { .name = "ITS", .irq_mask = its_mask_irq, @@ -1454,6 +1589,7 @@ static struct irq_chip its_irq_chip = { .irq_set_affinity = its_set_affinity, .irq_compose_msi_msg = its_irq_compose_msi_msg, .irq_set_irqchip_state = its_irq_set_irqchip_state, + .irq_retrigger = its_irq_retrigger, .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, }; @@ -1634,6 +1770,15 @@ static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) kfree(bitmap); } +static void gic_reset_prop_table(void *va) +{ + /* Priority 0xa0, Group-1, disabled */ + memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); + + /* Make sure the GIC will observe the written configuration */ + gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); +} + static struct page *its_allocate_prop_table(gfp_t gfp_flags) { struct page *prop_page; @@ -1642,13 +1787,7 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags) if (!prop_page) return NULL; - /* Priority 0xa0, Group-1, disabled */ - memset(page_address(prop_page), - LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, - LPI_PROPBASE_SZ); - - /* Make sure the GIC will observe the written configuration */ - gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ); + gic_reset_prop_table(page_address(prop_page)); return prop_page; } @@ -1659,20 +1798,74 @@ static void its_free_prop_table(struct page *prop_page) get_order(LPI_PROPBASE_SZ)); } -static int __init its_alloc_lpi_tables(void) +static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) { - phys_addr_t paddr; + phys_addr_t start, end, addr_end; + u64 i; - lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), - ITS_MAX_LPI_NRBITS); - gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); - if (!gic_rdists->prop_page) { - pr_err("Failed to allocate PROPBASE\n"); - return -ENOMEM; + /* + * We don't bother checking for a kdump kernel as by + * construction, the LPI tables are out of this kernel's + * memory map. + */ + if (is_kdump_kernel()) + return true; + + addr_end = addr + size - 1; + + for_each_reserved_mem_region(i, &start, &end) { + if (addr >= start && addr_end <= end) + return true; + } + + /* Not found, not a good sign... */ + pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n", + &addr, &addr_end); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + return false; +} + +static int gic_reserve_range(phys_addr_t addr, unsigned long size) +{ + if (efi_enabled(EFI_CONFIG_TABLES)) + return efi_mem_reserve_persistent(addr, size); + + return 0; +} + +static int __init its_setup_lpi_prop_table(void) +{ + if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { + u64 val; + + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1; + + gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); + gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ, + MEMREMAP_WB); + gic_reset_prop_table(gic_rdists->prop_table_va); + } else { + struct page *page; + + lpi_id_bits = min_t(u32, + GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), + ITS_MAX_LPI_NRBITS); + page = its_allocate_prop_table(GFP_NOWAIT); + if (!page) { + pr_err("Failed to allocate PROPBASE\n"); + return -ENOMEM; + } + + gic_rdists->prop_table_pa = page_to_phys(page); + gic_rdists->prop_table_va = page_address(page); + WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ)); } - paddr = page_to_phys(gic_rdists->prop_page); - pr_info("GIC: using LPI property table @%pa\n", &paddr); + pr_info("GICv3: using LPI property table @%pa\n", + &gic_rdists->prop_table_pa); return its_lpi_init(lpi_id_bits); } @@ -1963,12 +2156,9 @@ static int its_alloc_collections(struct its_node *its) static struct page *its_allocate_pending_table(gfp_t gfp_flags) { struct page *pend_page; - /* - * The pending pages have to be at least 64kB aligned, - * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below. - */ + pend_page = alloc_pages(gfp_flags | __GFP_ZERO, - get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); + get_order(LPI_PENDBASE_SZ)); if (!pend_page) return NULL; @@ -1980,10 +2170,66 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags) static void its_free_pending_table(struct page *pt) { - free_pages((unsigned long)page_address(pt), - get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); + free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); } +/* + * Booting with kdump and LPIs enabled is generally fine. Any other + * case is wrong in the absence of firmware/EFI support. + */ +static bool enabled_lpis_allowed(void) +{ + phys_addr_t addr; + u64 val; + + /* Check whether the property table is in a reserved region */ + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + addr = val & GENMASK_ULL(51, 12); + + return gic_check_reserved_range(addr, LPI_PROPBASE_SZ); +} + +static int __init allocate_lpi_tables(void) +{ + u64 val; + int err, cpu; + + /* + * If LPIs are enabled while we run this from the boot CPU, + * flag the RD tables as pre-allocated if the stars do align. + */ + val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR); + if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) { + gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | + RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING); + pr_info("GICv3: Using preallocated redistributor tables\n"); + } + + err = its_setup_lpi_prop_table(); + if (err) + return err; + + /* + * We allocate all the pending tables anyway, as we may have a + * mix of RDs that have had LPIs enabled, and some that + * don't. We'll free the unused ones as each CPU comes online. + */ + for_each_possible_cpu(cpu) { + struct page *pend_page; + + pend_page = its_allocate_pending_table(GFP_NOWAIT); + if (!pend_page) { + pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); + return -ENOMEM; + } + + gic_data_rdist_cpu(cpu)->pend_page = pend_page; + } + + return 0; +} + + static u64 its_clear_vpend_valid(void __iomem *vlpi_base) { u32 count = 1000000; /* 1s! */ @@ -2011,28 +2257,40 @@ static void its_cpu_init_lpis(void) { void __iomem *rbase = gic_data_rdist_rd_base(); struct page *pend_page; + phys_addr_t paddr; u64 val, tmp; - /* If we didn't allocate the pending table yet, do it now */ - pend_page = gic_data_rdist()->pend_page; - if (!pend_page) { - phys_addr_t paddr; + if (gic_data_rdist()->lpi_enabled) + return; + + val = readl_relaxed(rbase + GICR_CTLR); + if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && + (val & GICR_CTLR_ENABLE_LPIS)) { + /* + * Check that we get the same property table on all + * RDs. If we don't, this is hopeless. + */ + paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); + paddr &= GENMASK_ULL(51, 12); + if (WARN_ON(gic_rdists->prop_table_pa != paddr)) + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); + paddr &= GENMASK_ULL(51, 16); + + WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); + its_free_pending_table(gic_data_rdist()->pend_page); + gic_data_rdist()->pend_page = NULL; + goto out; + } - pend_page = its_allocate_pending_table(GFP_NOWAIT); - if (!pend_page) { - pr_err("Failed to allocate PENDBASE for CPU%d\n", - smp_processor_id()); - return; - } + pend_page = gic_data_rdist()->pend_page; + paddr = page_to_phys(pend_page); + WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); - paddr = page_to_phys(pend_page); - pr_info("CPU%d: using LPI pending table @%pa\n", - smp_processor_id(), &paddr); - gic_data_rdist()->pend_page = pend_page; - } /* set PROPBASE */ - val = (page_to_phys(gic_rdists->prop_page) | + val = (gic_rdists->prop_table_pa | GICR_PROPBASER_InnerShareable | GICR_PROPBASER_RaWaWb | ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); @@ -2106,6 +2364,12 @@ static void its_cpu_init_lpis(void) /* Make sure the GIC has seen the above */ dsb(sy); +out: + gic_data_rdist()->lpi_enabled = true; + pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n", + smp_processor_id(), + gic_data_rdist()->pend_page ? "allocated" : "reserved", + &paddr); } static void its_cpu_init_collection(struct its_node *its) @@ -2490,22 +2754,13 @@ static int its_irq_domain_activate(struct irq_domain *domain, { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); - const struct cpumask *cpu_mask = cpu_online_mask; int cpu; - /* get the cpu_mask of local node */ - if (its_dev->its->numa_node >= 0) - cpu_mask = cpumask_of_node(its_dev->its->numa_node); - - /* Bind the LPI to the first possible CPU */ - cpu = cpumask_first_and(cpu_mask, cpu_online_mask); - if (cpu >= nr_cpu_ids) { - if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) - return -EINVAL; - - cpu = cpumask_first(cpu_online_mask); - } + cpu = its_select_cpu(d, cpu_online_mask); + if (cpu < 0 || cpu >= nr_cpu_ids) + return -EINVAL; + its_inc_lpi_count(d, cpu); its_dev->event_map.col_map[event] = cpu; irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -2520,6 +2775,7 @@ static void its_irq_domain_deactivate(struct irq_domain *domain, struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); + its_dec_lpi_count(d, its_dev->event_map.col_map[event]); /* Stop the delivery of interrupts */ its_send_discard(its_dev, event); } @@ -3594,16 +3850,6 @@ static int redist_disable_lpis(void) u64 timeout = USEC_PER_SEC; u64 val; - /* - * If coming via a CPU hotplug event, we don't need to disable - * LPIs before trying to re-enable them. They are already - * configured and all is well in the world. Detect this case - * by checking the allocation of the pending table for the - * current CPU. - */ - if (gic_data_rdist()->pend_page) - return 0; - if (!gic_rdists_supports_plpis()) { pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); return -ENXIO; @@ -3613,7 +3859,21 @@ static int redist_disable_lpis(void) if (!(val & GICR_CTLR_ENABLE_LPIS)) return 0; - pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n", + /* + * If coming via a CPU hotplug event, we don't need to disable + * LPIs before trying to re-enable them. They are already + * configured and all is well in the world. + * + * If running with preallocated tables, there is nothing to do. + */ + if (gic_data_rdist()->lpi_enabled || + (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) + return 0; + + /* + * From that point on, we only try to do some damage control. + */ + pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", smp_processor_id()); add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); @@ -3869,7 +4129,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, } gic_rdists = rdists; - err = its_alloc_lpi_tables(); + + err = allocate_lpi_tables(); if (err) return err; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index ac888d7a0b00..6d9961b0523c 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -653,7 +653,9 @@ early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); static int gic_dist_supports_lpis(void) { - return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && !gicv3_nolpi; + return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && + !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && + !gicv3_nolpi); } static void gic_cpu_init(void) @@ -673,10 +675,6 @@ static void gic_cpu_init(void) gic_cpu_config(rbase, gic_redist_wait_for_rwp); - /* Give LPIs a spin */ - if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) - its_cpu_init(); - /* initialise system registers */ gic_cpu_sys_reg_init(); } @@ -689,6 +687,10 @@ static void gic_cpu_init(void) static int gic_starting_cpu(unsigned int cpu) { gic_cpu_init(); + + if (gic_dist_supports_lpis()) + its_cpu_init(); + return 0; } @@ -818,6 +820,11 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, #define gic_smp_init() do { } while(0) #endif +static int gic_retrigger(struct irq_data *data) +{ + return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); +} + #ifdef CONFIG_CPU_PM /* Check whether it's single security state view */ static bool gic_dist_security_disabled(void) @@ -859,6 +866,7 @@ static struct irq_chip gic_chip = { .irq_eoi = gic_eoi_irq, .irq_set_type = gic_set_type, .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, .irq_get_irqchip_state = gic_irq_get_irqchip_state, .irq_set_irqchip_state = gic_irq_set_irqchip_state, .flags = IRQCHIP_SET_TYPE_MASKED | @@ -873,6 +881,7 @@ static struct irq_chip gic_eoimode1_chip = { .irq_eoi = gic_eoimode1_eoi_irq, .irq_set_type = gic_set_type, .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, .irq_get_irqchip_state = gic_irq_get_irqchip_state, .irq_set_irqchip_state = gic_irq_set_irqchip_state, .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, @@ -887,6 +896,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct irq_chip *chip = &gic_chip; + struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); if (static_branch_likely(&supports_deactivate_key)) chip = &gic_eoimode1_chip; @@ -913,7 +923,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_fasteoi_irq, NULL, NULL); irq_set_probe(irq); - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); + irqd_set_single_target(irqd); } /* LPIs */ if (hw >= 8192 && hw < GIC_ID_NR) { @@ -923,6 +933,8 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, handle_fasteoi_irq, NULL, NULL); } + /* Prevents SW retriggers which mess up the ACK/EOI ordering */ + irqd_set_handle_enforce_irqctx(irqd); return 0; } @@ -1127,14 +1139,16 @@ static int __init gic_init_bases(void __iomem *dist_base, gic_update_vlpi_properties(); - if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) - its_init(handle, &gic_data.rdists, gic_data.domain); - gic_smp_init(); gic_dist_init(); gic_cpu_init(); gic_cpu_pm_init(); + if (gic_dist_supports_lpis()) { + its_init(handle, &gic_data.rdists, gic_data.domain); + its_cpu_init(); + } + return 0; out_free: diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index b5417012afc8..0f4ec68e3c6a 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -342,6 +342,11 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, } #endif +static int gic_retrigger(struct irq_data *data) +{ + return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); +} + static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) { u32 irqstat, irqnr; @@ -412,6 +417,7 @@ static const struct irq_chip gic_chip = { .irq_unmask = gic_unmask_irq, .irq_eoi = gic_eoi_irq, .irq_set_type = gic_set_type, + .irq_retrigger = gic_retrigger, .irq_get_irqchip_state = gic_irq_get_irqchip_state, .irq_set_irqchip_state = gic_irq_set_irqchip_state, .flags = IRQCHIP_SET_TYPE_MASKED | @@ -964,6 +970,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct gic_chip_data *gic = d->host_data; + struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); if (hw < 32) { irq_set_percpu_devid(irq); @@ -974,8 +981,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, handle_fasteoi_irq, NULL, NULL); irq_set_probe(irq); - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); + irqd_set_single_target(irqd); } + + /* Prevents SW retriggers which mess up the ACK/EOI ordering */ + irqd_set_handle_enforce_irqctx(irqd); return 0; } diff --git a/drivers/irqchip/irq-phytium-ixic.c b/drivers/irqchip/irq-phytium-ixic.c new file mode 100644 index 000000000000..dd3d755b7b15 --- /dev/null +++ b/drivers/irqchip/irq-phytium-ixic.c @@ -0,0 +1,267 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Phytium D2000 PCIe legacy INTx interrupt controller + * + * Copyright (c) 2020 Phytium Technology Co., Ltd. + * + * Author: Chen Baozi + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define NUM_IRQS 4 + +#define CTR_BANK_NUM 6 +#define CTR_BANK_SIZE 0x10000 +#define CTR_BANK_ISTATUS_LOCAL 0x184 + +#define HPB_INTX_STATUS_0 0x0 +#define HPB_INTX_STATUS_1 0x1000 + +struct ixic_irq_data { + void __iomem *ctr; + void __iomem *hpb; + u32 spi_base; +}; + +static void phytium_ixic_irq_eoi(struct irq_data *d) +{ + struct ixic_irq_data *data = irq_data_get_irq_chip_data(d); + unsigned int intx = irqd_to_hwirq(d); + u32 gstatus = readl(data->hpb) | (readl(data->hpb + HPB_INTX_STATUS_1) << 12); + u32 imask, istatus; + int i; + + WARN_ON(intx >= NUM_IRQS); + imask = 1 << (3 - intx); + istatus = (1 << intx) << 24; + for (i = 0; i < CTR_BANK_NUM; i++, gstatus >>= 4) { + if (gstatus & imask) + writel(istatus, data->ctr + CTR_BANK_SIZE*i + CTR_BANK_ISTATUS_LOCAL); + } + + irq_chip_eoi_parent(d); +} + +static struct irq_chip phytium_ixic_irq_chip = { + .name = "IXIU", + .irq_eoi = phytium_ixic_irq_eoi, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_set_type = irq_chip_set_type_parent, + .irq_set_affinity = irq_chip_set_affinity_parent, + .flags = IRQCHIP_MASK_ON_SUSPEND, +}; + +static int phytium_ixic_translate(struct irq_domain *domain, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + struct ixic_irq_data *info = domain->host_data; + + if (is_of_node(fwspec->fwnode)) { + if (fwspec->param_count != 3) + return -EINVAL; + + if (fwspec->param[0] != GIC_SPI) + return -EINVAL; /* No PPI should point to this domain */ + + *hwirq = fwspec->param[1] - info->spi_base; + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + } else { + if (fwspec->param_count != 2) + return -EINVAL; + *hwirq = fwspec->param[0] - info->spi_base; + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; + } + + return 0; +} + +static int phytium_ixic_alloc(struct irq_domain *dom, unsigned int virq, + unsigned int nr_irqs, void *data) +{ + struct irq_fwspec *fwspec = data; + struct irq_fwspec parent_fwspec; + struct ixic_irq_data *info = dom->host_data; + irq_hw_number_t hwirq; + + /* We assume the device use the parent's format directly */ + parent_fwspec = *fwspec; + if (is_of_node(dom->parent->fwnode)) { + if (fwspec->param_count != 3) + return -EINVAL; /* Not GIC compliant */ + if (fwspec->param[0] != GIC_SPI) + return -EINVAL; /* No PPI should point to this domain */ + + /* Get the local hwirq of IXIC */ + hwirq = fwspec->param[1] - info->spi_base; + } else { + hwirq = fwspec->param[0] - info->spi_base; + } + WARN_ON(nr_irqs != 1); + irq_domain_set_hwirq_and_chip(dom, virq, hwirq, &phytium_ixic_irq_chip, info); + + parent_fwspec.fwnode = dom->parent->fwnode; + return irq_domain_alloc_irqs_parent(dom, virq, nr_irqs, &parent_fwspec); +} + +static const struct irq_domain_ops ixic_domain_ops = { + .translate = phytium_ixic_translate, + .alloc = phytium_ixic_alloc, + .free = irq_domain_free_irqs_common, +}; + +static struct ixic_irq_data *phytium_ixic_init(const struct fwnode_handle *fwnode, + struct resource *ctr, struct resource *hpb) +{ + struct ixic_irq_data *data; + int err; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); + + if (fwnode_property_read_u32_array(fwnode, "intx-spi-base", + &data->spi_base, 1)) { + err = -ENODEV; + goto out_free; + } + + data->ctr = ioremap(ctr->start, resource_size(ctr)); + if (!data->ctr) { + err = -ENODEV; + goto out_free; + } + + data->hpb = ioremap(hpb->start, resource_size(hpb)); + if (!data->hpb) { + err = -ENODEV; + goto out_free; + } + + return data; + +out_free: + kfree(data); + return ERR_PTR(err); +} + +static int __init phytium_ixic_dt_init(struct device_node *node, + struct device_node *parent) +{ + struct irq_domain *pd, *d; + struct ixic_irq_data *data; + struct resource ctr, hpb; + + if (!parent) { + pr_err("%pOF: no parent, giving up\n", node); + return -ENODEV; + } + + pd = irq_find_host(parent); + if (!pd) { + pr_err("%pOF: unable to obtain parent domain\n", node); + return -ENXIO; + } + + if (of_address_to_resource(node, 0, &ctr)) { + pr_err("%pOF: failed to parse 'ctr' memory resource\n", node); + return -ENXIO; + } + + if (of_address_to_resource(node, 1, &hpb)) { + pr_err("%pOF: failed to parse 'hpb' memory resource\n", node); + return -ENXIO; + } + + data = phytium_ixic_init(of_node_to_fwnode(node), &ctr, &hpb); + if (IS_ERR(data)) + return PTR_ERR(data); + + d = irq_domain_add_hierarchy(pd, 0, NUM_IRQS, node, &ixic_domain_ops, data); + if (!d) { + pr_err("%pOF: failed to allocate domain\n", node); + goto out_unmap; + } + + pr_info("%pOF: %d interrupts forwarded to %pOF\n", node, NUM_IRQS, parent); + + return 0; + +out_unmap: + iounmap(data->ctr); + iounmap(data->hpb); + kfree(data); + return -ENOMEM; +} +IRQCHIP_DECLARE(d2000_ixic, "phytium,d2000-ixic", phytium_ixic_dt_init); +IRQCHIP_DECLARE(ft2004c_ixic, "phytium,ft2004c-ixic", phytium_ixic_dt_init); + +#ifdef CONFIG_ACPI +static int phytium_ixic_acpi_probe(struct platform_device *pdev) +{ + struct irq_domain *domain; + struct ixic_irq_data *data; + struct resource *ctr, *hpb; + + ctr = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!ctr) { + dev_err(&pdev->dev, "failed to parse 'ctr' memory resource\n"); + return -ENXIO; + } + + hpb = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!hpb) { + dev_err(&pdev->dev, "failed to parse 'hpb' memory resource\n"); + return -ENXIO; + } + + data = phytium_ixic_init(dev_fwnode(&pdev->dev), ctr, hpb); + if (IS_ERR(data)) + return PTR_ERR(data); + + domain = acpi_irq_create_hierarchy(0, NUM_IRQS, dev_fwnode(&pdev->dev), + &ixic_domain_ops, data); + if (!domain) { + dev_err(&pdev->dev, "failed to create IRQ domain\n"); + goto out_unmap; + } + + dev_info(&pdev->dev, "%d interrupts forwarded\n", NUM_IRQS); + + return 0; + +out_unmap: + iounmap(data->ctr); + iounmap(data->hpb); + kfree(data); + return -ENOMEM; +} + +static const struct acpi_device_id phytium_ixic_acpi_ids[] = { + { "PHYT0013" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(acpi, phytium_ixic_acpi_ids); + +static struct platform_driver phytium_ixic_driver = { + .driver = { + .name = "phytium-ixic", + .acpi_match_table = phytium_ixic_acpi_ids, + }, + .probe = phytium_ixic_acpi_probe, +}; +builtin_platform_driver(phytium_ixic_driver); +#endif diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index 841c005d8ebb..461e2397dc88 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -21,6 +21,12 @@ config IMX_MBOX help Mailbox implementation for i.MX Messaging Unit (MU). +config PHYTIUM_MBOX + tristate "Phytium SoC Mailbox Support" + depends on ARCH_PHYTIUM || COMPILE_TEST + help + This driver provides the support for the Phytium mailbox controller. + config PLATFORM_MHU tristate "Platform MHU Mailbox" depends on OF diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index c818b5d011ae..de3cbe3ffa44 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -9,6 +9,8 @@ obj-$(CONFIG_ARM_MHU) += arm_mhu.o obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o +obj-$(CONFIG_PHYTIUM_MBOX) += phytium_mailbox.o + obj-$(CONFIG_PLATFORM_MHU) += platform_mhu.o obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o diff --git a/drivers/mailbox/phytium_mailbox.c b/drivers/mailbox/phytium_mailbox.c new file mode 100644 index 000000000000..c797d4b4769f --- /dev/null +++ b/drivers/mailbox/phytium_mailbox.c @@ -0,0 +1,200 @@ +/* + * Phytium SoC mailbox driver + * + * Copyright (c) 2020 Phytium Corporation. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define INTR_STAT 0x0 +#define INTR_SET 0x8 +#define INTR_CLR 0x10 + +#define TX_REG 0x100 + +#define NR_CHANS 1 + +struct phytium_mbox_link { + unsigned irq; + void __iomem *tx_reg; + void __iomem *rx_reg; +}; + +struct phytium_mbox { + void __iomem *base; + struct phytium_mbox_link mlink; + struct mbox_chan chan; + struct mbox_controller mbox; +}; + +static irqreturn_t phytium_mbox_rx_irq(int irq, void *ch) +{ + struct mbox_chan *chan = ch; + struct phytium_mbox_link *mlink = chan->con_priv; + u32 val; + + val = readl_relaxed(mlink->rx_reg + INTR_STAT); + if (!val) + return IRQ_NONE; + + mbox_chan_received_data(chan, (void *)&val); + + writel_relaxed(val, mlink->rx_reg + INTR_CLR); + + return IRQ_HANDLED; +} + +static int phytium_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct phytium_mbox_link *mlink = chan->con_priv; + u32 *arg = data; + + writel_relaxed(*arg, mlink->tx_reg + INTR_SET); + + return 0; +} + +static int phytium_mbox_startup(struct mbox_chan *chan) +{ + struct phytium_mbox_link *mlink = chan->con_priv; + u32 val; + int ret; + + val = readl_relaxed(mlink->tx_reg + INTR_STAT); + writel_relaxed(val, mlink->tx_reg + INTR_CLR); + + ret = request_irq(mlink->irq, phytium_mbox_rx_irq, + IRQF_SHARED, "phytium_mbox_link", chan); + if (ret) { + dev_err(chan->mbox->dev, + "Unable to acquire IRQ %d\n", mlink->irq); + } + + return ret; +} + +static void phytium_mbox_shutdown(struct mbox_chan *chan) +{ + struct phytium_mbox_link *mlink = chan->con_priv; + + free_irq(mlink->irq, chan); +} + +static bool phytium_mbox_last_tx_done(struct mbox_chan *chan) +{ + struct phytium_mbox_link *mlink = chan->con_priv; + u32 val = readl_relaxed(mlink->tx_reg + INTR_STAT); + + return (val == (u32)(1U << 31)); +} + +static const struct mbox_chan_ops phytium_mbox_ops = { + .send_data = phytium_mbox_send_data, + .startup = phytium_mbox_startup, + .shutdown = phytium_mbox_shutdown, + .last_tx_done = phytium_mbox_last_tx_done, +}; + +static const struct acpi_device_id phytium_mbox_acpi_match[] = { + { "PHYT0009", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, phytium_mbox_acpi_match); + +static const struct of_device_id phytium_mbox_of_match[] = { + { .compatible = "phytium,mbox", }, + { }, +}; +MODULE_DEVICE_TABLE(of, phytium_mbox_of_match); + +static int phytium_mbox_probe(struct platform_device *pdev) +{ + struct phytium_mbox *mbox; + struct resource *res; + int err, irq; + + /* Allocate memory for device */ + mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mbox->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mbox->base)) { + dev_err(&pdev->dev, "ioremap base failed\n"); + return PTR_ERR(mbox->base); + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "cannot obtain irq\n"); + return irq; + } + + mbox->chan.con_priv = &mbox->mlink; + mbox->mlink.irq = irq; + mbox->mlink.rx_reg = mbox->base; + mbox->mlink.tx_reg = mbox->mlink.rx_reg + TX_REG; + + mbox->mbox.dev = &pdev->dev; + mbox->mbox.chans = &mbox->chan; + mbox->mbox.num_chans = NR_CHANS; + mbox->mbox.ops = &phytium_mbox_ops; + mbox->mbox.txdone_irq = false; + mbox->mbox.txdone_poll = true; + mbox->mbox.txpoll_period = 1; + + platform_set_drvdata(pdev, mbox); + + err = mbox_controller_register(&mbox->mbox); + if (err) { + dev_err(&pdev->dev, "Failed to register mailboxes %d\n", err); + goto fail; + } + + dev_info(&pdev->dev, "Phytium SoC Mailbox registered\n"); +fail: + return err; +} + +static int phytium_mbox_remove(struct platform_device *pdev) +{ + struct phytium_mbox *mbox = platform_get_drvdata(pdev); + + mbox_controller_unregister(&mbox->mbox); + + return 0; +} + +static struct platform_driver phytium_mbox_driver = { + .probe = phytium_mbox_probe, + .remove = phytium_mbox_remove, + .driver = { + .name = "phytium-mbox", + .of_match_table = of_match_ptr(phytium_mbox_of_match), + .acpi_match_table = ACPI_PTR(phytium_mbox_acpi_match), + }, +}; + +module_platform_driver(phytium_mbox_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Phytium SoC Mailbox Driver"); +MODULE_AUTHOR("Chen Baozi "); diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index dd938a5d0409..75b4e0eda51f 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -915,6 +915,19 @@ config UCB1400_CORE To compile this driver as a module, choose M here: the module will be called ucb1400_core. +config MFD_PHYTIUM_I2S_LSD + bool "PHYTIUM X100 I2S LSD MFD driver" + depends on (PCI && ARCH_PHYTIUM) + help + This enables support for the Phytium X100 LSD I2S controller. + +config MFD_PHYTIUM_I2S_MMD + bool "PHYTIUM X100 I2S MMD MFD driver" + depends on (PCI && ARCH_PHYTIUM) + help + This enables support for the Phytium X100 MMD I2S controllers + for Display Port. + config MFD_PM8XXX tristate "Qualcomm PM8xxx PMIC chips driver" depends on (ARM || HEXAGON || COMPILE_TEST) diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 5856a9489cbd..65a8aa1d1abf 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -241,3 +241,5 @@ obj-$(CONFIG_MFD_SC27XX_PMIC) += sprd-sc27xx-spi.o obj-$(CONFIG_RAVE_SP_CORE) += rave-sp.o obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o +obj-$(CONFIG_MFD_PHYTIUM_I2S_LSD) += phytium_x100_i2s_lsd.o +obj-$(CONFIG_MFD_PHYTIUM_I2S_MMD) += phytium_x100_i2s_mmd.o diff --git a/drivers/mfd/phytium_x100_i2s_lsd.c b/drivers/mfd/phytium_x100_i2s_lsd.c new file mode 100644 index 000000000000..720501f35c52 --- /dev/null +++ b/drivers/mfd/phytium_x100_i2s_lsd.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium I2S LSD MFD driver over PCI bus + * + * Copyright (C) 2020-2021, Phytium Technology Co.,Ltd. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + */ + +#include +#include +#include +#include + +struct phytium_x100_mfd { + struct device *dev; +}; + +struct pdata_x100_mfd { + struct device *dev; + char *name; + int clk_base; +}; + +static struct resource phytium_x100_i2s_res0[] = { + [0] = { + .flags = IORESOURCE_MEM, + }, + [1] = { + .flags = IORESOURCE_MEM, + }, + [2] = { + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell phytium_x100_mfd_cells[] = { + { + .id = 0, + .name = "phytium-i2s", + .of_compatible = "phytium,i2s", + .resources = phytium_x100_i2s_res0, + .num_resources = ARRAY_SIZE(phytium_x100_i2s_res0), + .ignore_resource_conflicts = true, + }, +}; + +static void phytium_x100_i2s_setup(struct pci_dev *pdev) +{ + struct mfd_cell *cell = &phytium_x100_mfd_cells[0]; + struct resource *res = (struct resource *)cell->resources; + struct pdata_x100_mfd *pdata; + + res[0].start = pci_resource_start(pdev, 0); + res[0].end = pci_resource_start(pdev, 0) + 0x0fff; + + res[1].start = pci_resource_start(pdev, 0) + 0x1000; + res[1].end = pci_resource_start(pdev, 0) + 0x1fff; + + res[2].start = pdev->irq; + res[2].end = pdev->irq; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + + pdata->dev = &pdev->dev; + pdata->name = "phytium-i2s-lsd"; + pdata->clk_base = 480000000; + + cell->platform_data = pdata; + cell->pdata_size = sizeof(*pdata); +} + +static int phytium_x100_mfd_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct phytium_x100_mfd *phytium_mfd; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + phytium_mfd = devm_kzalloc(&pdev->dev, sizeof(*phytium_mfd), GFP_KERNEL); + if (!phytium_mfd) + return -ENOMEM; + + phytium_mfd->dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, phytium_mfd); + + phytium_x100_i2s_setup(pdev); + + ret = mfd_add_devices(&pdev->dev, 0, phytium_x100_mfd_cells, + ARRAY_SIZE(phytium_x100_mfd_cells), NULL, 0, + NULL); + if (ret) + return 0; + + return 0; +} + + +static void phytium_x100_mfd_remove(struct pci_dev *pdev) +{ + mfd_remove_devices(&pdev->dev); +} + +static const struct pci_device_id phytium_x100_mfd_ids[] = { + { + .vendor = 0x1DB7, + .device = 0xDC2B, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = 0x3, + .class_mask = 0, + }, + {}, +}; +MODULE_DEVICE_TABLE(pci, phytium_x100_mfd_ids); + +static struct pci_driver phytium_i2s_lsd_mfd_driver = { + .name = "phytium_x100_mfd_i2s", + .id_table = phytium_x100_mfd_ids, + .probe = phytium_x100_mfd_probe, + .remove = phytium_x100_mfd_remove, +}; + +module_pci_driver(phytium_i2s_lsd_mfd_driver); + +MODULE_AUTHOR("Yiqun Zhang "); +MODULE_DESCRIPTION("Phytium X100 MFD PCI driver for I2S-LSD"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/phytium_x100_i2s_mmd.c b/drivers/mfd/phytium_x100_i2s_mmd.c new file mode 100644 index 000000000000..e327e5c6288c --- /dev/null +++ b/drivers/mfd/phytium_x100_i2s_mmd.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium I2S MMD MFD driver over PCI bus + * + * Copyright (C) 2020-2021, Phytium Technology Co.,Ltd. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + */ + +#include +#include +#include +#include + +struct phytium_x100_mfd { + struct device *dev; +}; + +struct pdata_x100_mfd { + struct device *dev; + char *name; + int clk_base; +}; + +static struct resource phytium_x100_i2s_res0[] = { + [0] = { + .flags = IORESOURCE_MEM, + }, + [1] = { + .flags = IORESOURCE_MEM, + }, + [2] = { + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource phytium_x100_i2s_res1[] = { + [0] = { + .flags = IORESOURCE_MEM, + }, + [1] = { + .flags = IORESOURCE_MEM, + }, + [2] = { + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource phytium_x100_i2s_res2[] = { + [0] = { + .flags = IORESOURCE_MEM, + }, + [1] = { + .flags = IORESOURCE_MEM, + }, + [2] = { + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell phytium_x100_mfd_cells[] = { + { + .id = 1, + .name = "phytium-i2s", + .of_compatible = "phytium,i2s", + .resources = phytium_x100_i2s_res0, + .num_resources = ARRAY_SIZE(phytium_x100_i2s_res0), + .ignore_resource_conflicts = true, + }, + { + .id = 2, + .name = "phytium-i2s", + .of_compatible = "phytium,i2s", + .resources = phytium_x100_i2s_res1, + .num_resources = ARRAY_SIZE(phytium_x100_i2s_res1), + .ignore_resource_conflicts = true, + }, + { + .id = 3, + .name = "phytium-i2s", + .of_compatible = "phytium,i2s", + .resources = phytium_x100_i2s_res2, + .num_resources = ARRAY_SIZE(phytium_x100_i2s_res2), + .ignore_resource_conflicts = true, + }, +}; + +static void phytium_x100_i2s_setup(struct pci_dev *pdev, int i) +{ + struct mfd_cell *cell = &phytium_x100_mfd_cells[i]; + struct resource *res = (struct resource *)cell->resources; + struct pdata_x100_mfd *pdata; + + res[0].start = pci_resource_start(pdev, 0) + 0x2000 * i + 0x1000; + res[0].end = pci_resource_start(pdev, 0) + 0x2000 * i + 0x1fff; + + res[1].start = pci_resource_start(pdev, 0) + 0x2000 * i; + res[1].end = pci_resource_start(pdev, 0) + 0x2000 * i + 0x0fff; + + res[2].start = pdev->irq; + res[2].end = pdev->irq; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + + pdata->dev = &pdev->dev; + pdata->clk_base = 600000000; + switch (i) { + case 0: + pdata->name = "phytium-i2s-dp0"; + break; + case 1: + pdata->name = "phytium-i2s-dp1"; + break; + case 2: + pdata->name = "phytium-i2s-dp2"; + break; + default: + break; + } + + cell->platform_data = pdata; + cell->pdata_size = sizeof(*pdata); +} + +static int phytium_x100_mfd_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct phytium_x100_mfd *phytium_mfd; + int i; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + phytium_mfd = devm_kzalloc(&pdev->dev, sizeof(*phytium_mfd), GFP_KERNEL); + if (!phytium_mfd) + return -ENOMEM; + + phytium_mfd->dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, phytium_mfd); + + for (i = 0; i < 3; i++) + phytium_x100_i2s_setup(pdev, i); + + ret = mfd_add_devices(&pdev->dev, 0, phytium_x100_mfd_cells, + ARRAY_SIZE(phytium_x100_mfd_cells), NULL, 0, + NULL); + if (ret) + return 0; + + return 0; +} + + +static void phytium_x100_mfd_remove(struct pci_dev *pdev) +{ + mfd_remove_devices(&pdev->dev); +} + +static const struct pci_device_id phytium_x100_mfd_ids[] = { + { + .vendor = 0x1DB7, + .device = 0xDC23, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = 0x3, + .class_mask = 0, + }, + {}, +}; +MODULE_DEVICE_TABLE(pci, phytium_x100_mfd_ids); + +static struct pci_driver phytium_i2s_mmd_mfd_driver = { + .name = "phytium_x100_mfd_mmd", + .id_table = phytium_x100_mfd_ids, + .probe = phytium_x100_mfd_probe, + .remove = phytium_x100_mfd_remove, +}; + +module_pci_driver(phytium_i2s_mmd_mfd_driver); + +MODULE_AUTHOR("Yiqun Zhang "); +MODULE_DESCRIPTION("Phytium X100 MFD PCI driver for I2S-DP"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index b7f809aa40c2..153098fcad59 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -945,3 +945,21 @@ config MMC_SDHCI_OMAP If you have a controller with this interface, say Y or M here. If unsure, say N. + +config MMC_PHYTIUM_SDCI + tristate "Phytium FT SD Host Controller support" + depends on ARM64 + help + This selects support for the Phytium FT4C SD Host Controller + +config MMC_PHYTIUM_MCI_PCI + tristate "Phytium octopus PCI MultiMedia Card Interface support" + depends on ARCH_PHYTIUM + default y if ARCH_PHYTIUM + help + This selects support for the PCI MultiMedia Card Interface on Phytium + X100 chipset. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index ce8398e6f2c0..c6212a81bc4d 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -69,6 +69,8 @@ obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o obj-$(CONFIG_MMC_TOSHIBA_PCI) += toshsd.o obj-$(CONFIG_MMC_BCM2835) += bcm2835.o +obj-$(CONFIG_MMC_PHYTIUM_SDCI) += phytium-sdci.o +obj-$(CONFIG_MMC_PHYTIUM_MCI_PCI) += phytium-mci-pci.o phytium-mci.o obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o diff --git a/drivers/mmc/host/phytium-mci-pci.c b/drivers/mmc/host/phytium-mci-pci.c new file mode 100644 index 000000000000..dda6089dce5f --- /dev/null +++ b/drivers/mmc/host/phytium-mci-pci.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Phytium Multimedia Card Interface PCI driver + * + * Copyright (C) 2020 Phytium Technology Co.,Ltd. + * + */ + +#include +#include +#include +#include +#include +#include +#include "phytium-mci.h" + +static u32 sd_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | + MMC_CAP_CMD23 | MMC_CAP_4_BIT_DATA; +static u32 sd_caps2 = MMC_CAP2_NO_MMC; + +static u32 emmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA | MMC_CAP_WAIT_WHILE_BUSY | + MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_HW_RESET | MMC_CAP_MMC_HIGHSPEED | + MMC_CAP_NONREMOVABLE; +static u32 emmc_caps2 = MMC_CAP2_NO_SDIO | MMC_CAP2_NO_SD; + +#define PCI_BAR_NO 0 + +#if defined CONFIG_PM && defined CONFIG_PM_SLEEP +static const struct dev_pm_ops phytium_mci_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_mci_suspend, + phytium_mci_resume) + SET_RUNTIME_PM_OPS(phytium_mci_runtime_suspend, + phytium_mci_runtime_resume, NULL) +}; +#else +#define phytium_mci_dev_pm_ops NULL +#endif + +static int +phytium_mci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct phytium_mci_host *host; + struct mmc_host *mmc; + int ret; + + ret = pcim_enable_device(pdev); + + if (ret) + return ret; + pci_set_master(pdev); + + mmc = mmc_alloc_host(sizeof(struct phytium_mci_host), &pdev->dev); + + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + + pci_enable_msi(pdev); + + host->irq = pdev->irq; + host->irq_flags = IRQF_SHARED; + host->dev = &pdev->dev; + ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_NO, pci_name(pdev)); + + if (ret) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + goto host_free; + } + + host->base = pcim_iomap_table(pdev)[PCI_BAR_NO]; + host->is_use_dma = 1; + host->is_device_x100 = 1; + + if (pdev->devfn == 2) { + host->caps = emmc_caps; + host->caps2 = emmc_caps2; + } else { + host->caps = sd_caps; + host->caps2 = sd_caps2; + mmc->f_max = 25000000; /* stable frequency */ + } + + host->mmc = mmc; + host->clk_rate = MCI_CLK; + + dev_info(&pdev->dev, "%s %d: [bar %d] addr: 0x%llx size: 0x%llx km: 0x%llx devfn:%d\n", + __func__, __LINE__, PCI_BAR_NO, pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0), (uint64_t)host->base, pdev->devfn); + + dev_dbg(&pdev->dev, "%s %d:irq:0x%x\n", __func__, __LINE__, host->irq); + + ret = phytium_mci_common_probe(host); + + if (ret == MCI_REALEASE_MEM) { + ret = -ENOMEM; + goto release_mem; + } else if (ret) { + goto release; + } + pci_set_drvdata(pdev, mmc); + dev_info(&pdev->dev, "%s %d: probe phytium mci successful.\n", __func__, __LINE__); + return 0; + +release: + phytium_mci_deinit_hw(host); +release_mem: + + if (host->dma.adma_table) { + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), + host->dma.adma_table, host->dma.adma_addr); + } +host_free: + mmc_free_host(mmc); + pci_disable_device(pdev); + return ret; +} + +static void phytium_mci_pci_remove(struct pci_dev *pdev) +{ + struct phytium_mci_host *host; + struct mmc_host *mmc; + + mmc = pci_get_drvdata(pdev); + if (!mmc) { + dev_info(&pdev->dev, "%s %d: mmc is null.\n", __func__, __LINE__); + return; + } + host = mmc_priv(mmc); + if (!host) { + dev_info(&pdev->dev, "%s %d: host is null.\n", __func__, __LINE__); + mmc_remove_host(mmc); + mmc_free_host(mmc); + return; + } + + del_timer(&host->hotplug_timer); + + mmc_remove_host(host->mmc); + + if (host->dma.adma_table) { + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), + host->dma.adma_table, host->dma.adma_addr); + } + phytium_mci_deinit_hw(host); + mmc_free_host(mmc); + pci_set_drvdata(pdev, NULL); +} + +static const struct pci_device_id phytium_mci_pci_tbl[] = { + { + .vendor = 0x1DB7, + .device = 0xDC28, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = 0x5, + .class_mask = 0, + }, + {} +}; +MODULE_DEVICE_TABLE(pci, phytium_mci_pci_tbl); + +static struct pci_driver phytium_mci_pci_driver = { + .name = "phytium-mci-pci", + .id_table = phytium_mci_pci_tbl, + .probe = phytium_mci_pci_probe, + .remove = phytium_mci_pci_remove, + .driver = { + .pm = &phytium_mci_dev_pm_ops, + } +}; +module_pci_driver(phytium_mci_pci_driver); + +MODULE_DESCRIPTION("Phytium Multimedia Card Interface PCI driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Cheng Quan "); diff --git a/drivers/mmc/host/phytium-mci.c b/drivers/mmc/host/phytium-mci.c new file mode 100644 index 000000000000..7e9ba20df6ae --- /dev/null +++ b/drivers/mmc/host/phytium-mci.c @@ -0,0 +1,1482 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for Phytium Multimedia Card Interface + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium-mci.h" + +static const u32 cmd_ints_mask = MCI_INT_MASK_RE | MCI_INT_MASK_CMD | MCI_INT_MASK_RCRC | + MCI_INT_MASK_RTO | MCI_INT_MASK_HTO | MCI_RAW_INTS_HLE; + +static const u32 data_ints_mask = MCI_INT_MASK_DTO | MCI_INT_MASK_DCRC | MCI_INT_MASK_DRTO | + MCI_INT_MASK_SBE_BCI; +static const u32 cmd_err_ints_mask = MCI_INT_MASK_RTO | MCI_INT_MASK_RCRC | MCI_INT_MASK_RE | + MCI_INT_MASK_DCRC | MCI_INT_MASK_DRTO | + MCI_MASKED_INTS_SBE_BCI; + +static const u32 dmac_ints_mask = MCI_DMAC_INT_ENA_FBE | MCI_DMAC_INT_ENA_DU | + MCI_DMAC_INT_ENA_NIS | MCI_DMAC_INT_ENA_AIS; +static const u32 dmac_err_ints_mask = MCI_DMAC_INT_ENA_FBE | MCI_DMAC_INT_ENA_DU | + MCI_DMAC_INT_ENA_AIS; + +static void phytium_mci_cmd_next(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd); +static void phytium_mci_adma_reset(struct phytium_mci_host *host); +static void phytium_mci_send_cmd(struct phytium_mci_host *host, u32 cmd, u32 arg); +static bool phytium_mci_data_xfer_done(struct phytium_mci_host *host, u32 events, + struct mmc_request *mrq, struct mmc_data *data); +static void phytium_mci_init_adma_table(struct phytium_mci_host *host, + struct phytium_mci_dma *dma); +static void phytium_mci_init_hw(struct phytium_mci_host *host); +static int phytium_mci_get_cd(struct mmc_host *mmc); +static int phytium_mci_err_irq(struct phytium_mci_host *host, u32 dmac_events, u32 events); + +static void sdr_set_bits(void __iomem *reg, u32 bs) +{ + u32 val = readl(reg); + + val |= bs; + writel(val, reg); +} + +static void sdr_clr_bits(void __iomem *reg, u32 bs) +{ + u32 val = readl(reg); + + val &= ~bs; + writel(val, reg); +} + +static void phytium_mci_reset_hw(struct phytium_mci_host *host) +{ + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET); + + while (readl(host->base + MCI_CNTRL) & (MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET)) + cpu_relax(); + phytium_mci_send_cmd(host, MCI_CMD_UPD_CLK, 0); +} + +static void phytium_mci_update_external_clk(struct phytium_mci_host *host, u32 uhs_reg_value) +{ + writel(0, host->base + MCI_UHS_REG_EXT); + writel(uhs_reg_value, host->base + MCI_UHS_REG_EXT); + while (!(readl(host->base + MCI_CCLK_RDY) & 0x1)) + cpu_relax(); + +} + +static void phytium_mci_prepare_data(struct phytium_mci_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (!(data->host_cookie & MCI_PREPARE_FLAG)) { + data->host_cookie |= MCI_PREPARE_FLAG; + data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); + } +} + +static void phytium_mci_unprepare_data(struct phytium_mci_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (data->host_cookie & MCI_ASYNC_FLAG) + return; + + if (data->host_cookie & MCI_PREPARE_FLAG) { + dma_unmap_sg(host->dev, data->sg, data->sg_len, mmc_get_dma_dir(data)); + data->host_cookie &= ~MCI_PREPARE_FLAG; + } +} + +static void phytium_mci_send_cmd(struct phytium_mci_host *host, u32 cmd, u32 arg) +{ + + writel(arg, host->base + MCI_CMDARG); + wmb(); /* drain writebuffer */ + + while (readl(host->base + MCI_STATUS) & MCI_STATUS_CARD_BUSY) + cpu_relax(); + + writel(MCI_CMD_START | cmd, host->base + MCI_CMD); + + while (readl(host->base + MCI_CMD) & MCI_CMD_START) + cpu_relax(); + +} + +static void phytium_mci_update_cmd11(struct phytium_mci_host *host, u32 cmd) +{ + writel(MCI_CMD_START | cmd, host->base + MCI_CMD); + + while (readl(host->base + MCI_CMD) & MCI_CMD_START) + cpu_relax(); +} + +static void phytium_mci_set_clk(struct phytium_mci_host *host, struct mmc_ios *ios) +{ + u32 div = 0xff, drv = 0, sample = 0; + unsigned long clk_rate; + u32 mci_cmd_bits = MCI_CMD_UPD_CLK; + u32 cmd_reg; + u32 cur_cmd_index; + u32 first_uhs_div, tmp_ext_reg; + + cmd_reg = readl(host->base + MCI_CMD); + cur_cmd_index = cmd_reg & 0x3F; + + if (cur_cmd_index == SD_SWITCH_VOLTAGE) + mci_cmd_bits |= MCI_CMD_VOLT_SWITCH; + if (ios->clock) { + if (host->current_ios_clk == ios->clock) + return; + + dev_dbg(host->dev, "will change clock, host->clk_rate: %ld, ios->clock: %d\n", + host->clk_rate, ios->clock); + + if (ios->clock >= 25000000) + tmp_ext_reg = 0x202; + else if (ios->clock == 400000) + tmp_ext_reg = 0x502; + else + tmp_ext_reg = 0x302; + + phytium_mci_update_external_clk(host, tmp_ext_reg); + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + + if (cur_cmd_index == SD_SWITCH_VOLTAGE) + phytium_mci_update_cmd11(host, mci_cmd_bits | cmd_reg); + else + phytium_mci_send_cmd(host, mci_cmd_bits, 0); + + clk_rate = host->clk_rate; + first_uhs_div = 1 + ((tmp_ext_reg >> 8)&0xFF); + div = clk_rate / (2 * first_uhs_div * ios->clock); + if (div > 2) { + sample = div / 2 + 1; + drv = sample - 1; + writel((sample << 16) | (drv << 8) | (div & 0xff), + host->base + MCI_CLKDIV); + } else if (div == 2) { + drv = 0; + sample = 1; + writel((drv << 8) | (sample << 16) | (div & 0xff), + host->base + MCI_CLKDIV); + } + + dev_dbg(host->dev, "UHS_REG_EXT ext: %x, CLKDIV: %x\n", + readl(host->base + MCI_UHS_REG_EXT), readl(host->base + MCI_CLKDIV)); + + sdr_set_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + + if (cur_cmd_index == SD_SWITCH_VOLTAGE) + phytium_mci_update_cmd11(host, mci_cmd_bits | cmd_reg); + else + phytium_mci_send_cmd(host, mci_cmd_bits, 0); + + host->current_ios_clk = ios->clock; + + dev_dbg(host->dev, "host->clk_rate: %ld, ios->clock: %d\n", + host->clk_rate, ios->clock); + } else { + host->current_ios_clk = 0; + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + sdr_clr_bits(host->base + MCI_UHS_REG_EXT, MCI_EXT_CLK_ENABLE); + dev_dbg(host->dev, "host->clk_rate: %ld, ios->clock: %d\n", + host->clk_rate, ios->clock); + } +} + +static inline u32 +phytium_mci_cmd_find_resp(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 resp; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1: + case MMC_RSP_R1B: + resp = 0x5; + break; + + case MMC_RSP_R2: + resp = 0x7; + break; + + case MMC_RSP_R3: + resp = 0x1; + break; + + case MMC_RSP_NONE: + default: + resp = 0x0; + break; + } + + return resp; +} + +static inline +u32 phytium_mci_cmd_prepare_raw_cmd(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 opcode = cmd->opcode; + u32 resp = phytium_mci_cmd_find_resp(host, mrq, cmd); + u32 rawcmd = ((opcode & 0x3f) | ((resp & 0x7) << 6)); + + if (opcode == MMC_GO_INACTIVE_STATE || + (opcode == SD_IO_RW_DIRECT && ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) + rawcmd |= (0x1 << 14); + else if (opcode == SD_SWITCH_VOLTAGE) + rawcmd |= (0x1 << 28); + + if (test_and_clear_bit(MCI_CARD_NEED_INIT, &host->flags)) + rawcmd |= (0x1 << 15); + + if (cmd->data) { + struct mmc_data *data = cmd->data; + + rawcmd |= (0x1 << 9); + + if (data->flags & MMC_DATA_WRITE) + rawcmd |= (0x1 << 10); + } + + return (rawcmd | (0x1 << 29) | (0x1 << 31)); +} + +static inline void +phytium_mci_adma_write_desc(struct phytium_mci_host *host, + struct phytium_adma2_64_desc *desc, + dma_addr_t addr, u32 len, u32 attribute) +{ + desc->attribute = attribute; + desc->len = len; + desc->addr_lo = lower_32_bits(addr); + desc->addr_hi = upper_32_bits(addr); + dev_dbg(host->dev, "%s %d:addr_lo:0x%x ddr_hi:0x%x\n", __func__, + __LINE__, desc->addr_lo, desc->addr_hi); + + if ((attribute == 0x80000004) || (attribute == 0x8000000c)) { + desc->desc_lo = 0; + desc->desc_hi = 0; + } +} + +static void +phytium_mci_data_sg_write_2_admc_table(struct phytium_mci_host *host, struct mmc_data *data) +{ + struct phytium_adma2_64_desc *desc; + u32 dma_len, i; + dma_addr_t dma_address; + struct scatterlist *sg; + + phytium_mci_init_adma_table(host, &host->dma); + + desc = host->dma.adma_table; + for_each_sg(data->sg, sg, data->sg_count, i) { + dma_address = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + + if (i == 0) { + if (sg_is_last(sg) || (data->sg_count == 1 && dma_len == SD_BLOCK_SIZE)) + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x8000000c); + else + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x8000001a); + } else if (sg_is_last(sg)) { + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x80000004); + } else { + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x80000012); + } + + desc++; + } +} + +static void +phytium_mci_data_sg_write_2_fifo(struct phytium_mci_host *host, struct mmc_data *data) +{ + struct scatterlist *sg; + u32 dma_len, i, j; + u32 *virt_addr; + + if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { + writel(0x1<<10, host->base + MCI_CMD); + for_each_sg(data->sg, sg, data->sg_count, i) { + dma_len = sg_dma_len(sg); + virt_addr = sg_virt(data->sg); + for (j = 0; j < (dma_len / 4); j++) { + writel(*virt_addr, host->base + MCI_DATA); + virt_addr++; + } + } + } +} + +static void phytium_mci_restart_clk(struct phytium_mci_host *host) +{ + u32 clk_div, uhs; + + while (readl(host->base + MCI_CMD) & MCI_CMD_START) + cpu_relax(); + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + clk_div = readl(host->base + MCI_CLKDIV); + uhs = readl(host->base + MCI_UHS_REG_EXT); + writel(0, host->base + MCI_UHS_REG_EXT); + writel(uhs, host->base + MCI_UHS_REG_EXT); + while (!(readl(host->base + MCI_CCLK_RDY) & 0x1)) + cpu_relax(); + + writel(clk_div, host->base + MCI_CLKDIV); + sdr_set_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + writel(MCI_CMD_START | MCI_CMD_UPD_CLK, host->base + MCI_CMD); + while (readl(host->base + MCI_CMD) & MCI_CMD_START) + cpu_relax(); +} + +static int +phytim_mci_start_multiple_write(struct phytium_mci_host *host, + struct mmc_request *mrq, u32 cnts, u32 offset) +{ + u32 rawcmd, cmd_status; + struct mmc_command *cmd = mrq->cmd; + u32 *rsp = cmd->resp; + unsigned long deadline_time; + + if (!(host->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) + return -ESHUTDOWN; + + while ((readl(host->base + MCI_STATUS) & (MCI_STATUS_CARD_BUSY))) + cpu_relax(); + + writel(0xffffe, host->base + MCI_RAW_INTS); + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + writel(mrq->data->blksz, host->base + MCI_BLKSIZ); + writel(cnts * mrq->data->blksz, host->base + MCI_BYTCNT); + writel(cmd->arg + offset, host->base + MCI_CMDARG); + writel(rawcmd, host->base + MCI_CMD); + deadline_time = jiffies + msecs_to_jiffies(200); + + cmd_status = readl(host->base + MCI_RAW_INTS); + while (!(cmd_status & MCI_MASKED_INTS_CMD)) { + if (!(host->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) + return -ESHUTDOWN; + + cmd_status = readl(host->base + MCI_RAW_INTS); + if (cmd_err_ints_mask & cmd_status) + return -ESHUTDOWN; + + if (cmd_status & MCI_MASKED_INTS_CMD) + break; + + if (time_after(jiffies, deadline_time)) + return -ESHUTDOWN; + } + + if (cmd_status & MCI_MASKED_INTS_CMD) { + if (cmd->flags & MMC_RSP_136) { + rsp[3] = readl(host->base + MCI_RESP0); + rsp[2] = readl(host->base + MCI_RESP1); + rsp[1] = readl(host->base + MCI_RESP2); + rsp[0] = readl(host->base + MCI_RESP3); + } else { + rsp[0] = readl(host->base + MCI_RESP0); + } + } + deadline_time = jiffies + msecs_to_jiffies(1000); + while (!(cmd_status & MCI_MASKED_INTS_DTO)) { + if (!(host->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) + return -ESHUTDOWN; + cmd_status = readl(host->base + MCI_RAW_INTS); + if (cmd_err_ints_mask & cmd_status) + return -ESHUTDOWN; + if (cmd_status & MCI_MASKED_INTS_DTO) + return 0; + if (time_after(jiffies, deadline_time)) + return -ESHUTDOWN; + } + return 0; +} + +static int +phytium_mci_start_sbc_stop_cmd(struct phytium_mci_host *host, struct mmc_request *mrq, + struct mmc_command *cmd, u32 arg) +{ + u32 rawcmd, cmd_status; + u32 *rsp = cmd->resp; + unsigned long deadline_time; + + writel(0xffffe, host->base + MCI_RAW_INTS); + + while ((readl(host->base + MCI_STATUS) & (MCI_STATUS_CARD_BUSY))) + cpu_relax(); + + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + writel(arg, host->base + MCI_CMDARG); + writel(rawcmd, host->base + MCI_CMD); + + deadline_time = jiffies + msecs_to_jiffies(200); + cmd_status = readl(host->base + MCI_RAW_INTS); + while (!(cmd_status & MCI_MASKED_INTS_CMD)) { + if (!(host->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) + return -ENOMEDIUM; + + cmd_status = readl(host->base + MCI_RAW_INTS); + if (cmd_err_ints_mask & cmd_status) + return -ETIMEDOUT; + + if (cmd_status & MCI_MASKED_INTS_CMD) + break; + + if (time_after(jiffies, deadline_time)) + return -ETIMEDOUT; + } + + if (cmd_status & MCI_MASKED_INTS_CMD) { + if (cmd->flags & MMC_RSP_136) { + rsp[3] = readl(host->base + MCI_RESP0); + rsp[2] = readl(host->base + MCI_RESP1); + rsp[1] = readl(host->base + MCI_RESP2); + rsp[0] = readl(host->base + MCI_RESP3); + } else { + rsp[0] = readl(host->base + MCI_RESP0); + } + } + + if (cmd_err_ints_mask & cmd_status) + return -ETIMEDOUT; + + return 0; +} + +static void +phytium_mci_start_write_multiple_non_dma(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + u32 write_cnts, last_cnts; + u32 i, j, k, send_cnt_one_sg, block_offset; + int ret = 0, dma_len; + struct scatterlist *sg; + u32 *virt_addr = NULL; + + write_cnts = data->blocks / 4; + (data->blocks % 4) ? write_cnts++ : write_cnts; + last_cnts = data->blocks % 4; + if (!(host->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) { + ret = -ENOMEDIUM; + goto write_err; + } + + dev_dbg(host->dev, "%s: cmd:%d, block counts:%d\n", + __func__, mrq->cmd->opcode, data->blocks); + + sdr_clr_bits(host->base + MCI_CNTRL, MCI_CNTRL_USE_INTERNAL_DMAC); + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET); + while (readl(host->base + MCI_CNTRL) & MCI_CNTRL_FIFO_RESET) + cpu_relax(); + sdr_clr_bits(host->base + MCI_BUS_MODE, MCI_BUS_MODE_DE); + + if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { + block_offset = 0; + for_each_sg(data->sg, sg, data->sg_count, i) { + /* Each SG data transfor starts */ + dma_len = sg_dma_len(sg); + send_cnt_one_sg = (dma_len / MCI_MAX_FIFO_CNT) + 1; + virt_addr = sg_virt(sg); + for (k = 0; k < send_cnt_one_sg; k++) { + if (dma_len && dma_len >= MCI_MAX_FIFO_CNT) { + /*first write sbc cmd*/ + ret = phytium_mci_start_sbc_stop_cmd(host, mrq, + mrq->sbc, 4); + if (ret) + goto write_err; + writel(0x1 << 10, host->base + MCI_CMD); + for (j = 0; j < (MCI_MAX_FIFO_CNT / 4); j++) { + writel(*virt_addr, host->base + MCI_DATA); + virt_addr++; + } + + /*second write cmd25 here*/ + ret = phytim_mci_start_multiple_write(host, mrq, 4, + block_offset); + if (ret) + goto write_err; + block_offset += 4; + dma_len -= MCI_MAX_FIFO_CNT; + } else if (dma_len > 0) { + /*first write sbc cmd*/ + last_cnts = dma_len / 512; + ret = phytium_mci_start_sbc_stop_cmd(host, mrq, mrq->sbc, + last_cnts); + if (ret) + goto write_err; + writel(0x1 << 10, host->base + MCI_CMD); + for (j = 0; j < (dma_len / 4); j++) { + writel(*virt_addr, host->base + MCI_DATA); + virt_addr++; + } + /*second write cmd25 here*/ + ret = phytim_mci_start_multiple_write(host, mrq, last_cnts, + block_offset); + if (ret) + goto write_err; + block_offset += last_cnts; + dma_len = 0; + } else { + dev_dbg(host->dev, "%s: sg %d end\n", __func__, i); + break; + } + } + } + } + +write_err: + host->data = NULL; + host->cmd = NULL; + host->mrq = NULL; + writel(0xffffe, host->base + MCI_RAW_INTS); + if (ret) { + data->bytes_xfered = 0; + if (ret == -ESHUTDOWN) { + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET); + while (readl(host->base + MCI_CNTRL) & MCI_CNTRL_FIFO_RESET) + cpu_relax(); + + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_CONTROLLER_RESET); + while (readl(host->base + MCI_STATUS) & MCI_STATUS_CARD_BUSY) + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_CONTROLLER_RESET); + phytium_mci_restart_clk(host); + phytium_mci_start_sbc_stop_cmd(host, mrq, mrq->stop, mrq->stop->arg); + } + data->error = -ETIMEDOUT; + mrq->cmd->error = -ETIMEDOUT; + mmc_request_done(host->mmc, mrq); + return; + } + data->bytes_xfered = data->blocks * data->blksz; + mmc_request_done(host->mmc, mrq); +} + +static void +phytium_mci_start_data(struct phytium_mci_host *host, struct mmc_request *mrq, + struct mmc_command *cmd, struct mmc_data *data) +{ + bool read; + u32 rawcmd; + unsigned long flags; + + + WARN_ON(host->cmd); + host->cmd = cmd; + cmd->error = 0; + WARN_ON(host->data); + host->data = data; + read = data->flags & MMC_DATA_READ; + + if (!(host->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) { + phytium_mci_err_irq(host, 0, MCI_INT_MASK_RTO); + return; + } + /* clear interrupts */ + writel(0xffffe, host->base + MCI_RAW_INTS); + + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET); + + while (readl(host->base + MCI_CNTRL) & (MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET)) + cpu_relax(); + + if (host->adtc_type == COMMOM_ADTC) + sdr_clr_bits(host->base + MCI_CNTRL, MCI_CNTRL_USE_INTERNAL_DMAC); + else + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_USE_INTERNAL_DMAC); + wmb(); /* drain writebuffer */ + sdr_clr_bits(host->base + MCI_CNTRL, MCI_CNTRL_INT_ENABLE); + + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + if (host->is_use_dma && host->adtc_type == BLOCK_RW_ADTC) + phytium_mci_data_sg_write_2_admc_table(host, data); + else + phytium_mci_data_sg_write_2_fifo(host, data); + + spin_lock_irqsave(&host->lock, flags); + sdr_set_bits(host->base + MCI_INT_MASK, cmd_ints_mask | data_ints_mask); + if (host->is_use_dma && host->adtc_type == BLOCK_RW_ADTC) { + sdr_set_bits(host->base + MCI_DMAC_INT_ENA, dmac_ints_mask); + /* Enable the IDMAC */ + sdr_set_bits(host->base + MCI_BUS_MODE, MCI_BUS_MODE_DE); + writel((u32)host->dma.adma_addr, host->base + MCI_DESC_LIST_ADDRL); + writel((u32)(host->dma.adma_addr >> 32), host->base + MCI_DESC_LIST_ADDRH); + } + writel(mrq->data->blksz, host->base + MCI_BLKSIZ); + writel(mrq->data->blocks * mrq->data->blksz, host->base + MCI_BYTCNT); + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_INT_ENABLE); + writel(cmd->arg, host->base + MCI_CMDARG); + wmb(); /* drain writebuffer */ + writel(rawcmd, host->base + MCI_CMD); + spin_unlock_irqrestore(&host->lock, flags); +} + +static void phytium_mci_track_cmd_data(struct phytium_mci_host *host, + struct mmc_command *cmd, + struct mmc_data *data) +{ + if (host->error) + dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", + __func__, cmd->opcode, cmd->arg, host->error); +} + +static void phytium_mci_request_done(struct phytium_mci_host *host, struct mmc_request *mrq) +{ + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->mrq = NULL; + if (host->cmd) + host->cmd = NULL; + spin_unlock_irqrestore(&host->lock, flags); + phytium_mci_track_cmd_data(host, mrq->cmd, mrq->data); + + if (mrq->data) + phytium_mci_unprepare_data(host, mrq); + + mmc_request_done(host->mmc, mrq); +} + +static bool phytium_mci_cmd_done(struct phytium_mci_host *host, int events, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + bool done = false; + unsigned long flags; + u32 *rsp = cmd->resp; + + if (!(events & (MCI_RAW_INTS_RCRC | MCI_RAW_INTS_RE | MCI_RAW_INTS_CMD | + MCI_RAW_INTS_RTO | MCI_INT_MASK_HTO))) { + dev_err(host->dev, "No interrupt generation:h%x\n", events); + return done; + } + + spin_lock_irqsave(&host->lock, flags); + done = !host->cmd; + host->cmd = NULL; + if (done) { + spin_unlock_irqrestore(&host->lock, flags); + return true; + } + sdr_clr_bits(host->base + MCI_INT_MASK, cmd_ints_mask); + spin_unlock_irqrestore(&host->lock, flags); + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + rsp[3] = readl(host->base + MCI_RESP0); + rsp[2] = readl(host->base + MCI_RESP1); + rsp[1] = readl(host->base + MCI_RESP2); + rsp[0] = readl(host->base + MCI_RESP3); + } else { + rsp[0] = readl(host->base + MCI_RESP0); + } + + if (cmd->opcode == SD_SEND_RELATIVE_ADDR) + host->current_rca = rsp[0] & 0xFFFF0000; + } + if (!(events & (MCI_RAW_INTS_CMD | MCI_INT_MASK_HTO))) { + if (!(host->caps & MMC_CAP_NONREMOVABLE) && (events & MCI_RAW_INTS_RTO) + && readl(host->base + MCI_CARD_DETECT)) { + cmd->error = -ENOMEDIUM; + rsp[0] = 0; + } else if (events & MCI_RAW_INTS_RTO || + (cmd->opcode != MMC_SEND_TUNING_BLOCK && + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)) { + cmd->error = -ETIMEDOUT; + } else if (events & MCI_RAW_INTS_RCRC) { + cmd->error = -EILSEQ; + } else { + cmd->error = -ETIMEDOUT; + } + } + phytium_mci_cmd_next(host, mrq, cmd); + return true; +} + +static void phytium_mci_start_command(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 rawcmd; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + WARN_ON(host->cmd); + host->cmd = cmd; + cmd->error = 0; + writel(0xffffe, host->base + MCI_RAW_INTS); + + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + spin_unlock_irqrestore(&host->lock, flags); + + if (!(host->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) { + phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, cmd); + return; + } + + spin_lock_irqsave(&host->lock, flags); + sdr_set_bits(host->base + MCI_INT_MASK, cmd_ints_mask); + writel(cmd->arg, host->base + MCI_CMDARG); + writel(rawcmd, host->base + MCI_CMD); + spin_unlock_irqrestore(&host->lock, flags); +} + +static void +phytium_mci_cmd_next(struct phytium_mci_host *host, struct mmc_request *mrq, + struct mmc_command *cmd) +{ + if ((cmd->error && !(cmd->opcode == MMC_SEND_TUNING_BLOCK || + cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)) || + (mrq->sbc && mrq->sbc->error)) { + phytium_mci_request_done(host, mrq); + } else if (cmd == mrq->sbc) { + if ((mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_READ_SINGLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_BLOCK)) { + dev_dbg(host->dev, "%s %d:sbc done and next cmd :%d length:%d\n", + __func__, __LINE__, mrq->cmd->opcode, mrq->data->sg->length); + phytium_mci_prepare_data(host, mrq); + if (host->is_use_dma) + host->adtc_type = BLOCK_RW_ADTC; + else + host->adtc_type = COMMOM_ADTC; + phytium_mci_start_data(host, mrq, mrq->cmd, mrq->data); + } else { + dev_err(host->dev, "%s %d:ERROR: cmd %d followers the SBC\n", + __func__, __LINE__, cmd->opcode); + } + } else if (!cmd->data) { + phytium_mci_request_done(host, mrq); + } +} + +static void phytium_mci_ops_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + + host->error = 0; + WARN_ON(host->mrq); + host->mrq = mrq; + + while ((readl(host->base + MCI_STATUS) & (MCI_STATUS_CARD_BUSY))) + cpu_relax(); + + dev_dbg(host->dev, "%s %d: cmd:%d arg:0x%x\n", __func__, __LINE__, + mrq->cmd->opcode, mrq->cmd->arg); + + if (host->is_device_x100 && mrq->sbc && mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) { + phytium_mci_start_write_multiple_non_dma(mmc, mrq); + return; + } + + if (mrq->sbc) { + phytium_mci_start_command(host, mrq, mrq->sbc); + return; + } + if (mrq->data) { + phytium_mci_prepare_data(host, mrq); + + if ((mrq->data->sg->length >= 512) && host->is_use_dma && + ((mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_READ_SINGLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_BLOCK) || + (mrq->cmd->opcode == SD_IO_RW_EXTENDED))) + + host->adtc_type = BLOCK_RW_ADTC; + else + host->adtc_type = COMMOM_ADTC; + + phytium_mci_start_data(host, mrq, mrq->cmd, mrq->data); + return; + } + phytium_mci_start_command(host, mrq, mrq->cmd); +} + +static void phytium_mci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!data) + return; + + phytium_mci_prepare_data(host, mrq); + data->host_cookie |= MCI_ASYNC_FLAG; +} + +static void phytium_mci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, + int err) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!data) + return; + + if (data->host_cookie & MCI_ASYNC_FLAG) { + data->host_cookie &= ~MCI_ASYNC_FLAG; + phytium_mci_unprepare_data(host, mrq); + } +} + +static void phytium_mci_data_read_without_dma(struct phytium_mci_host *host, + struct mmc_data *data) +{ + u32 length, i, data_val, dma_len, tmp = 0; + u32 *virt_addr; + unsigned long flags; + struct scatterlist *sg; + + length = data->blocks * data->blksz; + + if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { + spin_lock_irqsave(&host->lock, flags); + if (data->host_cookie & MCI_ASYNC_FLAG) { + tmp = MCI_ASYNC_FLAG; + phytium_mci_post_req(host->mmc, data->mrq, 0); + } else { + phytium_mci_unprepare_data(host, data->mrq); + } + + for_each_sg(data->sg, sg, data->sg_count, i) { + dma_len = sg_dma_len(sg); + virt_addr = sg_virt(data->sg); + + for (i = 0; i < (dma_len / 4); i++) { + data_val = readl(host->base + MCI_DATA); + memcpy(virt_addr, &data_val, 4); + ++virt_addr; + } + } + + if (tmp & MCI_ASYNC_FLAG) + phytium_mci_pre_req(host->mmc, data->mrq); + else + phytium_mci_prepare_data(host, data->mrq); + + spin_unlock_irqrestore(&host->lock, flags); + } + data->bytes_xfered = length; +} + +static void phytium_mci_data_xfer_next(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_data *data) +{ + if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && + (data->error && !mrq->sbc)) { + while ((readl(host->base + MCI_STATUS) & (MCI_STATUS_CARD_BUSY))) + cpu_relax(); + phytium_mci_start_command(host, mrq, mrq->stop); + } else { + phytium_mci_request_done(host, mrq); + } +} + +static bool phytium_mci_data_xfer_done(struct phytium_mci_host *host, u32 events, + struct mmc_request *mrq, struct mmc_data *data) +{ + unsigned long flags; + bool done; + + unsigned int check_data = events & (MCI_RAW_INTS_DTO | MCI_RAW_INTS_RCRC | + MCI_RAW_INTS_DCRC | MCI_RAW_INTS_RE | + MCI_RAW_INTS_DRTO | MCI_RAW_INTS_EBE | + MCI_DMAC_STATUS_AIS | MCI_DMAC_STATUS_DU | + MCI_RAW_INTS_SBE_BCI | MCI_INT_MASK_RTO); + + spin_lock_irqsave(&host->lock, flags); + done = !host->data; + + if (check_data || host->data) + host->data = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + if (done) + return true; + if (check_data) { + spin_lock_irqsave(&host->lock, flags); + sdr_clr_bits(host->base + MCI_DMAC_INT_ENA, dmac_ints_mask); + sdr_clr_bits(host->base + MCI_INT_MASK, data_ints_mask); + /* Stop the IDMAC running */ + sdr_clr_bits(host->base + MCI_BUS_MODE, MCI_BUS_MODE_DE); + dev_dbg(host->dev, "DMA stop\n"); + spin_unlock_irqrestore(&host->lock, flags); + + if (events & MCI_RAW_INTS_DTO) { + if (!host->is_use_dma || + (host->is_use_dma && host->adtc_type == COMMOM_ADTC && + (mrq->cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC)) + phytium_mci_data_read_without_dma(host, data); + else + data->bytes_xfered = data->blocks * data->blksz; + } else { + data->bytes_xfered = 0; + if (!(host->caps & MMC_CAP_NONREMOVABLE) + && readl(host->base + MCI_CARD_DETECT) + && (events & cmd_err_ints_mask)) { + data->error = -ENOMEDIUM; + data->mrq->cmd->error = -ENOMEDIUM; + } else if (events & (MCI_RAW_INTS_DCRC | MCI_RAW_INTS_EBE | + MCI_RAW_INTS_SBE_BCI)) { + data->error = -EILSEQ; + } else { + data->error = -ETIMEDOUT; + } + } + + phytium_mci_data_xfer_next(host, mrq, data); + done = true; + } + return done; +} + +static int phytium_mci_card_busy(struct mmc_host *mmc) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + u32 status; + + status = readl(host->base + MCI_STATUS); + + return !!(status & MCI_STATUS_CARD_BUSY); +} + +static void __phytium_mci_enable_sdio_irq(struct phytium_mci_host *host, int enable) +{ + if (enable) + sdr_set_bits(host->base + MCI_INT_MASK, MCI_INT_MASK_SDIO); + else + sdr_clr_bits(host->base + MCI_INT_MASK, MCI_INT_MASK_SDIO); +} + +static void phytium_mci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + + __phytium_mci_enable_sdio_irq(host, enable); +} + +static void hotplug_timer_func(struct timer_list *t) +{ + struct phytium_mci_host *host; + u32 status; + + host = from_timer(host, t, hotplug_timer); + if (!host) + return; + + status = readl(host->base + MCI_CARD_DETECT); + + if (status & 0x1) { + if (host->mmc->card) { + cancel_delayed_work(&host->mmc->detect); + mmc_detect_change(host->mmc, msecs_to_jiffies(100)); + } + } else { + cancel_delayed_work(&host->mmc->detect); + mmc_detect_change(host->mmc, msecs_to_jiffies(200)); + } +} + +static int phytium_mci_err_irq(struct phytium_mci_host *host, u32 dmac_events, u32 events) +{ + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + + mrq = host->mrq; + cmd = host->cmd; + data = host->data; + + if (cmd && (cmd == mrq->sbc)) { + phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, mrq->sbc); + } else if (cmd && (cmd == mrq->stop)) { + phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, mrq->stop); + } else if (data) { + data->error = -ETIMEDOUT; + if ((data->flags & MMC_DATA_READ) == MMC_DATA_READ || + (data->flags & MMC_DATA_WRITE) == MMC_DATA_WRITE) + phytium_mci_data_xfer_done(host, events | dmac_events, mrq, data); + } else if (cmd) { + phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, mrq->cmd); + } + + return 0; +} + +static irqreturn_t phytium_mci_irq(int irq, void *dev_id) +{ + struct phytium_mci_host *host = (struct phytium_mci_host *) dev_id; + unsigned long flags; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + u32 events, event_mask, dmac_events, dmac_evt_mask; + + if (!host) + return IRQ_NONE; + writel(0, host->base + 0xfd0); + + spin_lock_irqsave(&host->lock, flags); + events = readl(host->base + MCI_RAW_INTS); + dmac_events = readl(host->base + MCI_DMAC_STATUS); + event_mask = readl(host->base + MCI_INT_MASK); + dmac_evt_mask = readl(host->base + MCI_DMAC_INT_ENA); + if ((!events) && (!(dmac_events&0x1fff))) { + spin_unlock_irqrestore(&host->lock, flags); + return IRQ_NONE; + } + dev_dbg(host->dev, "%s:events:%x,mask:0x%x,dmac_events:%x,dmac_mask:0x%x,cmd:%d\n", + __func__, events, event_mask, dmac_events, dmac_evt_mask, + host->mrq ? host->mrq->cmd->opcode : 255); + + mrq = host->mrq; + cmd = host->cmd; + data = host->data; + + if (((events & event_mask) & MCI_RAW_INTS_SDIO) && + ((events == 0x10001) || (events == 0x10000) || (events == 0x10040))) { + writel(events, host->base + MCI_RAW_INTS); + __phytium_mci_enable_sdio_irq(host, 0); + sdio_signal_irq(host->mmc); + spin_unlock_irqrestore(&host->lock, flags); + goto irq_out; + } + + writel(events, host->base + MCI_RAW_INTS); + writel(dmac_events, host->base + MCI_DMAC_STATUS); + spin_unlock_irqrestore(&host->lock, flags); + + if (((events & event_mask) == 0) && ((dmac_evt_mask & dmac_events) == 0)) + goto irq_out; + + if (((events & event_mask) & MCI_RAW_INTS_CD) && !(host->caps & MMC_CAP_NONREMOVABLE)) { + mod_timer(&host->hotplug_timer, jiffies + usecs_to_jiffies(20000)); + dev_dbg(host->dev, "sd status changed here ! status:[%d] [%s %d]", + readl(host->base + MCI_CARD_DETECT), __func__, __LINE__); + + if ((events & event_mask) == MCI_RAW_INTS_CD) + goto irq_out; + } + + if (!mrq) { + if (events & MCI_RAW_INTS_HLE) + dev_dbg(host->dev, + "%s: MRQ=NULL and HW write locked, events=%08x,event_mask=%08x\n", + __func__, events, event_mask); + else + dev_dbg(host->dev, "%s: MRQ=NULL events:%08X evt_mask=%08X,sd_status:%d\n", + __func__, events, event_mask, readl(host->base + MCI_CARD_DETECT)); + goto irq_out; + } + + if ((dmac_events & dmac_err_ints_mask) || (events & cmd_err_ints_mask)) { + dev_dbg(host->dev, "ERR:events:%x,mask:0x%x,dmac_evts:%x,dmac_mask:0x%x,cmd:%d\n", + events, event_mask, dmac_events, dmac_evt_mask, mrq->cmd->opcode); + phytium_mci_err_irq(host, dmac_events & dmac_err_ints_mask, + events & cmd_err_ints_mask); + goto irq_out; + } + + if ((events & MCI_MASKED_INTS_DTO) && (events & MCI_MASKED_INTS_CMD)) { + phytium_mci_cmd_done(host, events, mrq, cmd); + phytium_mci_data_xfer_done(host, (events & data_ints_mask) | + (dmac_events & dmac_ints_mask), mrq, data); + } else if (events & MCI_MASKED_INTS_CMD || + ((events & MCI_INT_MASK_HTO) && (cmd->opcode == SD_SWITCH_VOLTAGE))) { + phytium_mci_cmd_done(host, events, mrq, cmd); + } else if (events & MCI_MASKED_INTS_DTO) { + phytium_mci_data_xfer_done(host, (events & data_ints_mask) | + (dmac_events & dmac_ints_mask), mrq, data); + } + +irq_out: + return IRQ_HANDLED; +} + +static void phytium_mci_init_hw(struct phytium_mci_host *host) +{ + u32 val; + + sdr_set_bits(host->base + MCI_PWREN, MCI_PWREN_ENABLE); + sdr_set_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + sdr_set_bits(host->base + MCI_UHS_REG_EXT, MCI_EXT_CLK_ENABLE); + sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); + + phytium_mci_reset_hw(host); + + if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + sdr_set_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); + else + sdr_clr_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); + + writel(0, host->base + MCI_INT_MASK); + val = readl(host->base + MCI_RAW_INTS); + writel(val, host->base + MCI_RAW_INTS); + writel(0, host->base + MCI_DMAC_INT_ENA); + val = readl(host->base + MCI_DMAC_STATUS); + writel(val, host->base + MCI_DMAC_STATUS); + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE)) + writel(MCI_INT_MASK_CD, host->base + MCI_INT_MASK); + + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_INT_ENABLE | + MCI_CNTRL_USE_INTERNAL_DMAC); + + writel(0xFFFFFFFF, host->base + MCI_TMOUT); + dev_info(host->dev, "init hardware done!"); + +} + +void phytium_mci_deinit_hw(struct phytium_mci_host *host) +{ + u32 val; + + sdr_clr_bits(host->base + MCI_PWREN, MCI_PWREN_ENABLE); + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + sdr_clr_bits(host->base + MCI_UHS_REG_EXT, MCI_EXT_CLK_ENABLE); + sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); + writel(0, host->base + MCI_INT_MASK); + val = readl(host->base + MCI_RAW_INTS); + writel(val, host->base + MCI_RAW_INTS); + writel(0, host->base + MCI_DMAC_INT_ENA); + val = readl(host->base + MCI_DMAC_STATUS); + writel(val, host->base + MCI_DMAC_STATUS); + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE)) + writel(MCI_INT_MASK_CD, host->base + MCI_INT_MASK); +} +EXPORT_SYMBOL_GPL(phytium_mci_deinit_hw); + +static void phytium_mci_adma_reset(struct phytium_mci_host *host) +{ + u32 bmod = readl(host->base + MCI_BUS_MODE); + + bmod |= MCI_BUS_MODE_SWR; + writel(bmod, host->base + MCI_BUS_MODE); +} + +static void phytium_mci_init_adma_table(struct phytium_mci_host *host, + struct phytium_mci_dma *dma) +{ + struct phytium_adma2_64_desc *adma_table = dma->adma_table; + dma_addr_t dma_addr; + int i; + + memset(adma_table, 0, sizeof(struct phytium_adma2_64_desc) * MAX_BD_NUM); + + for (i = 0; i < (MAX_BD_NUM - 1); i++) { + dma_addr = dma->adma_addr + sizeof(*adma_table) * (i + 1); + adma_table[i].desc_lo = lower_32_bits(dma_addr); + adma_table[i].desc_hi = upper_32_bits(dma_addr); + adma_table[i].attribute = 0; + adma_table[i].NON1 = 0; + adma_table[i].len = 0; + adma_table[i].NON2 = 0; + } + + phytium_mci_adma_reset(host); +} + +static void phytium_mci_set_buswidth(struct phytium_mci_host *host, u32 width) +{ + u32 val; + + switch (width) { + case MMC_BUS_WIDTH_1: + val = MCI_BUS_1BITS; + break; + + case MMC_BUS_WIDTH_4: + val = MCI_BUS_4BITS; + break; + + case MMC_BUS_WIDTH_8: + val = MCI_BUS_8BITS; + break; + default: + val = MCI_BUS_4BITS; + break; + } + writel(val, host->base + MCI_CTYPE); + dev_dbg(host->dev, "Bus Width = %d, set value:0x%x\n", width, val); +} + +static void phytium_mci_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + + if (ios->timing == MMC_TIMING_MMC_DDR52 || ios->timing == MMC_TIMING_UHS_DDR50) + sdr_set_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_DDR); + else + sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_DDR); + + phytium_mci_set_buswidth(host, ios->bus_width); + + switch (ios->power_mode) { + case MMC_POWER_UP: + set_bit(MCI_CARD_NEED_INIT, &host->flags); + writel(MCI_POWER_ON, host->base + MCI_PWREN); + break; + + case MMC_POWER_ON: + break; + + case MMC_POWER_OFF: + writel(MCI_POWER_OFF, host->base + MCI_PWREN); + break; + + default: + break; + } + phytium_mci_set_clk(host, ios); +} + +static void phytium_mci_ack_sdio_irq(struct mmc_host *mmc) +{ + unsigned long flags; + struct phytium_mci_host *host = mmc_priv(mmc); + + spin_lock_irqsave(&host->lock, flags); + __phytium_mci_enable_sdio_irq(host, 1); + spin_unlock_irqrestore(&host->lock, flags); +} + +static int phytium_mci_get_cd(struct mmc_host *mmc) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + u32 status; + + if (mmc->caps & MMC_CAP_NONREMOVABLE) + return 1; + + status = readl(host->base + MCI_CARD_DETECT); + + if ((status & 0x1) == 0x1) + return 0; + + return 1; +} + +static int phytium_mci_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + unsigned int is_voltage_180 = 0; + + is_voltage_180 = readl(host->base + MCI_UHS_REG); + if ((mmc->caps & MMC_CAP_NONREMOVABLE) && (ios->signal_voltage != MMC_SIGNAL_VOLTAGE_180)) + return -EINVAL; + + if ((ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) && (is_voltage_180 & 0x1)) + sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); + else if ((ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) && (!(is_voltage_180 & 0x1))) + sdr_set_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); + else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_120) + return -EINVAL; + return 0; +} + +static void phytium_mci_hw_reset(struct mmc_host *mmc) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + u32 reset_flag; + + if (host->is_use_dma) { + reset_flag = MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET; + phytium_mci_adma_reset(host); + sdr_set_bits(host->base + MCI_CNTRL, reset_flag); + } else { + reset_flag = MCI_CNTRL_FIFO_RESET; + sdr_set_bits(host->base + MCI_CNTRL, reset_flag); + } + + while (readl(host->base + MCI_CNTRL) & reset_flag) + cpu_relax(); + + sdr_clr_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); + udelay(5); + sdr_set_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); + usleep_range(200, 300); +} + +#ifdef CONFIG_PM_SLEEP +int phytium_mci_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_deinit_hw(host); + return 0; +} +EXPORT_SYMBOL(phytium_mci_suspend); + +int phytium_mci_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_init_hw(host); + return 0; +} +EXPORT_SYMBOL(phytium_mci_resume); + +#endif + +#ifdef CONFIG_PM +int phytium_mci_runtime_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_deinit_hw(host); + return 0; +} +EXPORT_SYMBOL(phytium_mci_runtime_suspend); + +int phytium_mci_runtime_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_init_hw(host); + return 0; +} +EXPORT_SYMBOL(phytium_mci_runtime_resume); + +#endif + +static struct mmc_host_ops phytium_mci_ops = { + .post_req = phytium_mci_post_req, + .pre_req = phytium_mci_pre_req, + .request = phytium_mci_ops_request, + .set_ios = phytium_mci_ops_set_ios, + .get_cd = phytium_mci_get_cd, + .enable_sdio_irq = phytium_mci_enable_sdio_irq, + .ack_sdio_irq = phytium_mci_ack_sdio_irq, + .card_busy = phytium_mci_card_busy, + .start_signal_voltage_switch = phytium_mci_ops_switch_volt, + .hw_reset = phytium_mci_hw_reset, +}; + +int phytium_mci_common_probe(struct phytium_mci_host *host) +{ + struct mmc_host *mmc = host->mmc; + struct device *dev = host->dev; + int uhs_reg_value = 0x502; + int ret; + + dma_set_mask(dev, DMA_BIT_MASK(64)); + dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); + + timer_setup(&host->hotplug_timer, hotplug_timer_func, 0); + + mmc->f_min = MCI_F_MIN; + if (!mmc->f_max) + mmc->f_max = MCI_F_MAX; + + mmc->ops = &phytium_mci_ops; + mmc->ocr_avail_sdio = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->ocr_avail_mmc = MMC_VDD_165_195; + mmc->caps |= host->caps; + + if (mmc->caps & MMC_CAP_SDIO_IRQ) { + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; + dev_dbg(host->dev, "%s %d: MMC_CAP_SDIO_IRQ\n", __func__, __LINE__); + } + mmc->caps2 |= host->caps2; + if (host->is_use_dma) { + /* MMC core transfer sizes tunable parameters */ + mmc->max_segs = MAX_BD_NUM; + mmc->max_seg_size = 4 * 1024; + mmc->max_blk_size = 512; + mmc->max_req_size = 512 * 1024; + mmc->max_blk_count = mmc->max_req_size / 512; + host->dma.adma_table = dma_zalloc_coherent(host->dev, + MAX_BD_NUM * + sizeof(struct phytium_adma2_64_desc), + &host->dma.adma_addr, GFP_KERNEL); + if (!host->dma.adma_table) + return MCI_REALEASE_MEM; + + host->dma.desc_sz = ADMA2_64_DESC_SZ; + phytium_mci_init_adma_table(host, &host->dma); + } else { + mmc->max_segs = MAX_BD_NUM; + mmc->max_seg_size = 4 * 1024; + mmc->max_blk_size = 512; + mmc->max_req_size = 4 * 512; + mmc->max_blk_count = mmc->max_req_size / 512; + } + writel(MCI_SET_FIFOTH(0x2, 7, 8), host->base + MCI_FIFOTH); + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + phytium_mci_update_external_clk(host, uhs_reg_value); + + spin_lock_init(&host->lock); + + phytium_mci_init_hw(host); + ret = devm_request_irq(host->dev, host->irq, phytium_mci_irq, + host->irq_flags, "phytium-mci", host); + + if (ret) + return ret; + + ret = mmc_add_host(mmc); + + if (ret) { + dev_err(host->dev, "%s %d: mmc add host!\n", __func__, __LINE__); + return ret; + } + return 0; +} +EXPORT_SYMBOL(phytium_mci_common_probe); + +MODULE_DESCRIPTION("Phytium Multimedia Card Interface driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Cheng Quan "); diff --git a/drivers/mmc/host/phytium-mci.h b/drivers/mmc/host/phytium-mci.h new file mode 100644 index 000000000000..53ba351b6c0d --- /dev/null +++ b/drivers/mmc/host/phytium-mci.h @@ -0,0 +1,349 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Driver for Phytium Multimedia Card Interface + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_MCI_H +#define __PHYTIUM_MCI_H + +#include +#include +#include +#include +#include +#include + +/*------------------------------------------------------*/ +/* Common Definition */ +/*------------------------------------------------------*/ +#define MAX_BD_NUM 128 +#define SD_BLOCK_SIZE 512 + +#define MCI_BUS_1BITS 0x0 +#define MCI_BUS_4BITS 0x1 +#define MCI_BUS_8BITS (0x1 << 16) + +#define MCI_SD_DRV_VALUE 0 +#define MCI_SD_SAMP_VALUE_MAX 0 +#define MCI_SD_SAMP_VALUE_MIN 0 + +#define MCI_TIMEOUT_CMD_VALUE 0xFFFFFFFF +#define MCI_POWER_ON 1 +#define MCI_POWER_OFF 0 + +#define MCI_PREPARE_FLAG (0x1 << 0) +#define MCI_ASYNC_FLAG (0x1 << 1) +#define MCI_MMAP_FLAG (0x1 << 2) + +#define MCI_CMD_TIMEOUT (HZ/10 * 50) /* 100ms x5 */ +#define MCI_DATA_TIMEOUT (HZ * 10) /* 1000ms x5 */ + +#define MCI_CMD_TYPE_ADTC 0x2 + +#define MCI_F_MIN 400000 +#define MCI_F_MAX 50000000 + +#define MCI_CLK 1200000000 +#define MCI_REALEASE_MEM 0x1 +#define MCI_MAX_FIFO_CNT 0x800 + +/* FIFOTH register defines */ +#define MCI_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \ + ((r) & 0xFFF) << 16 | ((t) & 0xFFF)) +/* Card read threshold */ +#define MCI_SET_THLD(v, x) (((v) & 0xFFF) << 16 | (x)) +#define MCI_CARD_WR_THR_EN BIT(2) +#define MCI_CARD_RD_THR_EN BIT(0) + +/*----------------------------------------------------------------------*/ +/* Register Offset */ +/*----------------------------------------------------------------------*/ +#define MCI_CNTRL 0x00 /* the controller config reg */ +#define MCI_PWREN 0x04 /* the power enable reg */ +#define MCI_CLKDIV 0x08 /* the clock divider reg */ +#define MCI_CLKENA 0x10 /* the clock enable reg */ +#define MCI_TMOUT 0x14 /* the timeout reg */ +#define MCI_CTYPE 0x18 /* the card type reg */ +#define MCI_BLKSIZ 0x1C /* the block size reg */ +#define MCI_BYTCNT 0x20 /* the byte count reg */ +#define MCI_INT_MASK 0x24 /* the interrupt mask reg */ +#define MCI_CMDARG 0x28 /* the command argument reg */ +#define MCI_CMD 0x2C /* the command reg */ +#define MCI_RESP0 0x30 /* the response reg0 */ +#define MCI_RESP1 0x34 /* the response reg1 */ +#define MCI_RESP2 0x38 /* the response reg2 */ +#define MCI_RESP3 0X3C /* the response reg3 */ +#define MCI_MASKED_INTS 0x40 /* the masked interrupt status reg */ +#define MCI_RAW_INTS 0x44 /* the raw interrupt status reg */ +#define MCI_STATUS 0x48 /* the status reg */ +#define MCI_FIFOTH 0x4C /* the FIFO threshold watermark reg */ +#define MCI_CARD_DETECT 0x50 /* the card detect reg */ +#define MCI_CARD_WRTPRT 0x54 /* the card write protect reg */ +#define MCI_CCLK_RDY 0x58 /* first div is ready? 1:ready,0:not ready*/ +#define MCI_TRAN_CARD_CNT 0x5C /* the transferred CIU card byte count reg */ +#define MCI_TRAN_FIFO_CNT 0x60 /* the transferred host to FIFO byte count reg */ +#define MCI_DEBNCE 0x64 /* the debounce count reg */ +#define MCI_UID 0x68 /* the user ID reg */ +#define MCI_VID 0x6C /* the controller version ID reg */ +#define MCI_HWCONF 0x70 /* the hardware configuration reg */ +#define MCI_UHS_REG 0x74 /* the UHS-I reg */ +#define MCI_CARD_RESET 0x78 /* the card reset reg */ +#define MCI_BUS_MODE 0x80 /* the bus mode reg */ +#define MCI_DESC_LIST_ADDRL 0x88 /* the descriptor list low base address reg */ +#define MCI_DESC_LIST_ADDRH 0x8C /* the descriptor list high base address reg */ +#define MCI_DMAC_STATUS 0x90 /* the internal DMAC status reg */ +#define MCI_DMAC_INT_ENA 0x94 /* the internal DMAC interrupt enable reg */ +#define MCI_CUR_DESC_ADDRL 0x98 /* the current host descriptor low address reg */ +#define MCI_CUR_DESC_ADDRH 0x9C /* the current host descriptor high address reg */ +#define MCI_CUR_BUF_ADDRL 0xA0 /* the current buffer low address reg */ +#define MCI_CUR_BUF_ADDRH 0xA4 /* the current buffer high address reg */ +#define MCI_CARD_THRCTL 0x100 /* the card threshold control reg */ +#define MCI_UHS_REG_EXT 0x108 /* the UHS register extension */ +#define MCI_EMMC_DDR_REG 0x10C /* the EMMC DDR reg */ +#define MCI_ENABLE_SHIFT 0x110 /* the enable phase shift reg */ +#define MCI_DATA 0x200 /* the data FIFO access */ + +/* Command register defines */ +#define MCI_CMD_START BIT(31) +#define MCI_CMD_USE_HOLD_REG BIT(29) +#define MCI_CMD_VOLT_SWITCH BIT(28) +#define MCI_CMD_CCS_EXP BIT(23) +#define MCI_CMD_CEATA_RD BIT(22) +#define MCI_CMD_UPD_CLK BIT(21) +#define MCI_CMD_INIT BIT(15) +#define MCI_CMD_STOP BIT(14) +#define MCI_CMD_PRV_DAT_WAIT BIT(13) +#define MCI_CMD_SEND_STOP BIT(12) +#define MCI_CMD_STRM_MODE BIT(11) +#define MCI_CMD_DAT_WR BIT(10) +#define MCI_CMD_DAT_EXP BIT(9) +#define MCI_CMD_RESP_CRC BIT(8) +#define MCI_CMD_RESP_LONG BIT(7) +#define MCI_CMD_RESP_EXP BIT(6) +#define MCI_CMD_INDX(n) ((n) & 0x1F) + +/*------------------------------------------------------*/ +/* Register Mask */ +/*------------------------------------------------------*/ +/* MCI_CNTRL mask */ +#define MCI_CNTRL_CONTROLLER_RESET (0x1 << 0) /* RW */ +#define MCI_CNTRL_FIFO_RESET (0x1 << 1) /* RW */ +#define MCI_CNTRL_DMA_RESET (0x1 << 2) /* RW */ +#define MCI_CNTRL_RES (0x1 << 3) /* */ +#define MCI_CNTRL_INT_ENABLE (0x1 << 4) /* RW */ +#define MCI_CNTRL_DMA_ENABLE (0x1 << 5) /* RW */ +#define MCI_CNTRL_READ_WAIT (0x1 << 6) /* RW */ +#define MCI_CNTRL_SEND_IRQ_RESPONSE (0x1 << 7) /* RW */ +#define MCI_CNTRL_ABORT_READ_DATA (0x1 << 8) /* RW */ +#define MCI_CNTRL_ENDIAN (0x1 << 11) /* RW */ +//#define MCI_CNTRL_CARD_VOLTAGE_A (0xF << 16) /* RW */ +//#define MCI_CNTRL_CARD_VOLTAGE_B (0xF << 20) /* RW */ +#define MCI_CNTRL_ENABLE_OD_PULLUP (0x1 << 24) /* RW */ +#define MCI_CNTRL_USE_INTERNAL_DMAC (0x1 << 25) /* RW */ + +/* MCI_PWREN mask */ +#define MCI_PWREN_ENABLE (0x1 << 0) /* RW */ + +/* MCI_CLKENA mask */ +#define MCI_CLKENA_CCLK_ENABLE (0x1 << 0) /* RW */ +#define MCI_CLKENA_CCLK_LOW_POWER (0x1 << 16) /* RW */ +#define MCI_EXT_CLK_ENABLE (0x1 << 1) + +/* MCI_INT_MASK mask */ +#define MCI_INT_MASK_CD (0x1 << 0) /* RW */ +#define MCI_INT_MASK_RE (0x1 << 1) /* RW */ +#define MCI_INT_MASK_CMD (0x1 << 2) /* RW */ +#define MCI_INT_MASK_DTO (0x1 << 3) /* RW */ +#define MCI_INT_MASK_TXDR (0x1 << 4) /* RW */ +#define MCI_INT_MASK_RXDR (0x1 << 5) /* RW */ +#define MCI_INT_MASK_RCRC (0x1 << 6) /* RW */ +#define MCI_INT_MASK_DCRC (0x1 << 7) /* RW */ +#define MCI_INT_MASK_RTO (0x1 << 8) /* RW */ +#define MCI_INT_MASK_DRTO (0x1 << 9) /* RW */ +#define MCI_INT_MASK_HTO (0x1 << 10) /* RW */ +#define MCI_INT_MASK_FRUN (0x1 << 11) /* RW */ +#define MCI_INT_MASK_HLE (0x1 << 12) /* RW */ +#define MCI_INT_MASK_SBE_BCI (0x1 << 13) /* RW */ +#define MCI_INT_MASK_ACD (0x1 << 14) /* RW */ +#define MCI_INT_MASK_EBE (0x1 << 15) /* RW */ +#define MCI_INT_MASK_SDIO (0x1 << 16) /* RW */ + +/* MCI_MASKED_INTS mask */ +#define MCI_MASKED_INTS_CD (0x1 << 0) /* RO */ +#define MCI_MASKED_INTS_RE (0x1 << 1) /* RO */ +#define MCI_MASKED_INTS_CMD (0x1 << 2) /* RO */ +#define MCI_MASKED_INTS_DTO (0x1 << 3) /* RO */ +#define MCI_MASKED_INTS_TXDR (0x1 << 4) /* RO */ +#define MCI_MASKED_INTS_RXDR (0x1 << 5) /* RO */ +#define MCI_MASKED_INTS_RCRC (0x1 << 6) /* RO */ +#define MCI_MASKED_INTS_DCRC (0x1 << 7) /* RO */ +#define MCI_MASKED_INTS_RTO (0x1 << 8) /* RO */ +#define MCI_MASKED_INTS_DRTO (0x1 << 9) /* RO */ +#define MCI_MASKED_INTS_HTO (0x1 << 10) /* RO */ +#define MCI_MASKED_INTS_FRUN (0x1 << 11) /* RO */ +#define MCI_MASKED_INTS_HLE (0x1 << 12) /* RO */ +#define MCI_MASKED_INTS_SBE_BCI (0x1 << 13) /* RO */ +#define MCI_MASKED_INTS_ACD (0x1 << 14) /* RO */ +#define MCI_MASKED_INTS_EBE (0x1 << 15) /* RO */ +#define MCI_MASKED_INTS_SDIO (0x1 << 16) /* RO */ + +/* MCI_RAW_INTS mask */ +#define MCI_RAW_INTS_CD (0x1 << 0) /* W1C */ +#define MCI_RAW_INTS_RE (0x1 << 1) /* W1C */ +#define MCI_RAW_INTS_CMD (0x1 << 2) /* W1C */ +#define MCI_RAW_INTS_DTO (0x1 << 3) /* W1C */ +#define MCI_RAW_INTS_TXDR (0x1 << 4) /* W1C */ +#define MCI_RAW_INTS_RXDR (0x1 << 5) /* W1C */ +#define MCI_RAW_INTS_RCRC (0x1 << 6) /* W1C */ +#define MCI_RAW_INTS_DCRC (0x1 << 7) /* W1C */ +#define MCI_RAW_INTS_RTO (0x1 << 8) /* W1C */ +#define MCI_RAW_INTS_DRTO (0x1 << 9) /* W1C */ +#define MCI_RAW_INTS_HTO (0x1 << 10) /* W1C */ +#define MCI_RAW_INTS_FRUN (0x1 << 11) /* W1C */ +#define MCI_RAW_INTS_HLE (0x1 << 12) /* W1C */ +#define MCI_RAW_INTS_SBE_BCI (0x1 << 13) /* W1C */ +#define MCI_RAW_INTS_ACD (0x1 << 14) /* W1C */ +#define MCI_RAW_INTS_EBE (0x1 << 15) /* W1C */ +#define MCI_RAW_INTS_SDIO (0x1 << 16) /* W1C */ + +/* MCI_STATUS mask */ +#define MCI_STATUS_FIFO_RX (0x1 << 0) /* RO */ +#define MCI_STATUS_FIFO_TX (0x1 << 1) /* RO */ +#define MCI_STATUS_FIFO_EMPTY (0x1 << 2) /* RO */ +#define MCI_STATUS_FIFO_FULL (0x1 << 3) /* RO */ +#define MCI_STATUS_CARD_STATUS (0x1 << 8) /* RO */ +#define MCI_STATUS_CARD_BUSY (0x1 << 9) /* RO */ +#define MCI_STATUS_DATA_BUSY (0x1 << 10) /* RO */ +#define MCI_STATUS_DMA_ACK (0x1 << 31) /* RO */ +#define MCI_STATUS_DMA_REQ (0x1 << 32) /* RO */ + +/* MCI_UHS_REG mask */ +#define MCI_UHS_REG_VOLT (0x1 << 0) /* RW */ +#define MCI_UHS_REG_DDR (0x1 << 16) /* RW */ + +/* MCI_CARD_RESET mask */ +#define MCI_CARD_RESET_ENABLE (0x1 << 0) /* RW */ + +/* MCI_BUS_MODE mask */ +#define MCI_BUS_MODE_SWR (0x1 << 0) /* RW */ +#define MCI_BUS_MODE_FB (0x1 << 1) /* RW */ +#define MCI_BUS_MODE_DE (0x1 << 7) /* RW */ + +/* MCI_DMAC_STATUS mask */ +#define MCI_DMAC_STATUS_TI (0x1 << 0) /* RW */ +#define MCI_DMAC_STATUS_RI (0x1 << 1) /* RW */ +#define MCI_DMAC_STATUS_FBE (0x1 << 2) /* RW */ +#define MCI_DMAC_STATUS_DU (0x1 << 4) /* RW */ +#define MCI_DMAC_STATUS_NIS (0x1 << 8) /* RW */ +#define MCI_DMAC_STATUS_AIS (0x1 << 9) /* RW */ + +/* MCI_DMAC_INT_ENA mask */ +#define MCI_DMAC_INT_ENA_TI (0x1 << 0) /* RW */ +#define MCI_DMAC_INT_ENA_RI (0x1 << 1) /* RW */ +#define MCI_DMAC_INT_ENA_FBE (0x1 << 2) /* RW */ +#define MCI_DMAC_INT_ENA_DU (0x1 << 4) /* RW */ +#define MCI_DMAC_INT_ENA_CES (0x1 << 5) /* RW */ +#define MCI_DMAC_INT_ENA_NIS (0x1 << 8) /* RW */ +#define MCI_DMAC_INT_ENA_AIS (0x1 << 9) /* RW */ + +/* MCI_CARD_THRCTL mask */ +#define MCI_CARD_THRCTL_CARDRD (0x1 << 0) /* RW */ +#define MCI_CARD_THRCTL_BUSY_CLR (0x1 << 1) /* RW */ +#define MCI_CARD_THRCTL_CARDWR (0x1 << 2) /* RW */ + +/* MCI_UHS_REG_EXT mask */ +#define MCI_UHS_REG_EXT_MMC_VOLT (0x1 << 0) /* RW */ +#define MCI_UHS_REG_EXT_CLK_ENA (0x1 << 1) /* RW */ + +/* MCI_EMMC_DDR_REG mask */ +#define MCI_EMMC_DDR_CYCLE (0x1 << 0) /* RW */ + +/*--------------------------------------*/ +/* Structure Type */ +/*--------------------------------------*/ +/* Maximum segments assuming a 512KiB maximum requisition */ +/* size and a minimum4KiB page size. */ +#define MCI_MAX_SEGS 128 +/* ADMA2 64-bit DMA descriptor size */ +#define ADMA2_64_DESC_SZ 32 + +/* Each descriptor can transfer up to 4KB of data in chained mode */ +/*ADMA2 64-bit descriptor.*/ +struct phytium_adma2_64_desc { + u32 attribute; +#define IDMAC_DES0_DIC BIT(1) +#define IDMAC_DES0_LD BIT(2) +#define IDMAC_DES0_FD BIT(3) +#define IDMAC_DES0_CH BIT(4) +#define IDMAC_DES0_ER BIT(5) +#define IDMAC_DES0_CES BIT(30) +#define IDMAC_DES0_OWN BIT(31) + u32 NON1; + u32 len; + u32 NON2; + u32 addr_lo; /* Lower 32-bits of Buffer Address Pointer 1*/ + u32 addr_hi; /* Upper 32-bits of Buffer Address Pointer 1*/ + u32 desc_lo; /* Lower 32-bits of Next Descriptor Address */ + u32 desc_hi; /* Upper 32-bits of Next Descriptor Address */ +} __packed __aligned(4); + +struct phytium_mci_dma { + struct scatterlist *sg; /* I/O scatter list */ + /* ADMA descriptor table, pointer to adma_table array */ + struct phytium_adma2_64_desc *adma_table; + /* Mapped ADMA descr. table, the physical address of adma_table array */ + dma_addr_t adma_addr; + unsigned int desc_sz; /* ADMA descriptor size */ +}; + +enum adtc_t { + COMMOM_ADTC = 0, + BLOCK_RW_ADTC = 1 +}; + +struct phytium_mci_host { + struct device *dev; + struct mmc_host *mmc; + u32 caps; + u32 caps2; + spinlock_t lock; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + int error; + void __iomem *base; /* host base address */ + void *adma_table1; + dma_addr_t adma_addr1; + struct phytium_mci_dma dma_rx; /* dma channel */ + struct phytium_mci_dma dma_tx; /* dma channel */ + struct phytium_mci_dma dma; /* dma channel */ + u64 dma_mask; + bool vqmmc_enabled; + u32 *sg_virt_addr; + enum adtc_t adtc_type; /* 0:common adtc cmd; 1:block r/w adtc cmd;*/ + struct timer_list hotplug_timer; + struct delayed_work req_timeout; + int irq; /* host interrupt */ + u32 current_rca; /*the current rca value*/ + u32 current_ios_clk; + u32 is_use_dma; + u32 is_device_x100; + struct clk *src_clk; /* phytium_mci source clock */ + unsigned long clk_rate; + unsigned long clk_div; + unsigned long irq_flags; + unsigned long flags; +#define MCI_CARD_NEED_INIT 1 + +}; + +int phytium_mci_common_probe(struct phytium_mci_host *host); +void phytium_mci_deinit_hw(struct phytium_mci_host *host); +int phytium_mci_runtime_suspend(struct device *dev); +int phytium_mci_runtime_resume(struct device *dev); +int phytium_mci_resume(struct device *dev); +int phytium_mci_suspend(struct device *dev); + +#endif /* __PHYTIUM_MCI_HW_H */ diff --git a/drivers/mmc/host/phytium-sdci.c b/drivers/mmc/host/phytium-sdci.c new file mode 100644 index 000000000000..effa50bc5a49 --- /dev/null +++ b/drivers/mmc/host/phytium-sdci.c @@ -0,0 +1,1442 @@ +/* + * File Name: phytium_sdci.c - Phytium FT SDCI dirver + * + * Copyright (C) 2019 Phytium Technology Co.,Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "phytium-sdci.h" + +static const u32 cmd_ints_mask = SDCI_SDCI_NORMAL_ISER_ECC_EN | SDCI_SDCI_NORMAL_ISER_EEI_EN; +static const u32 data_ints_mask = SDCI_BD_ISER_ETRS_EN; +static const u32 err_ints_mask = SDCI_ERROR_ISER_ECTE_EN | SDCI_ERROR_ISR_CCRCE_EN | + SDCI_ERROR_ISR_CIR_EN | SDCI_ERROR_ISR_CNR_EN; +static const u32 caps = MMC_CAP_ERASE; + +static void hotplug_timer_func(struct timer_list *t); +static bool phytium_sdci_private_send_cmd(struct phytium_sdci_host *host, + u32 cmd, u32 resp_type, u32 arg); +static bool phytium_sdci_cmd_done(struct phytium_sdci_host *host, int events, + struct mmc_request *mrq, + struct mmc_command *cmd); +static bool phytium_sdci_data_xfer_done(struct phytium_sdci_host *host, + u32 events, struct mmc_request *mrq, + struct mmc_data *data); +static void phytium_sdci_cmd_next(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd); + +static int phytium_sdci_cmd13_process(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_data *data, + u32 wait_timeout_ms, + u32 send_once_time_ms); + +static int phytium_sd_error(struct phytium_sdci_host *host) +{ + int temp; + temp = readl(host->base + SDCI_NORMAL_ISR); + dev_err(host->dev, "[%s %d]SDCI_NORMAL_ISR:%x\n", __func__, __LINE__, temp); + temp = readl(host->base + SDCI_BD_ISR); + temp = readl(host->base + SDCI_ERROR_ISR); + dev_err(host->dev, "[%s %d]SDCI_ERROR_ISR:%x\n", __func__, __LINE__, temp); + temp = readl(host->base + SDCI_BD_ISR); + dev_err(host->dev, "[%s %d]SDCI_BD_ISR:%x\n", __func__, __LINE__, temp); + temp = readl(host->base + SDCI_RESP0); + dev_err(host->dev, "[%s %d]SDCI_RESP0:%x\n", __func__, __LINE__, temp); + + return 0; +} + +static void sdr_set_bits(void __iomem *reg, u32 bs) +{ + u32 val; + + val = readl(reg); + val |= bs; + + writel(val, reg); +} + +static void sdr_clr_bits(void __iomem *reg, u32 bs) +{ + u32 val; + + val = readl(reg); + val &= ~bs; + + writel(val, reg); +} + +static void phytium_sdci_reset_hw(struct phytium_sdci_host *host) +{ + sdr_set_bits(host->base + SDCI_SOFTWARE, + SDCI_SOFTWARE_SRST); + sdr_clr_bits(host->base + SDCI_SOFTWARE, + SDCI_SOFTWARE_SRST); + while (!(readl(host->base + SDCI_STATUS) & SDCI_STATUS_IDIE)) + cpu_relax(); +} + +static void phytium_sdci_prepare_data(struct phytium_sdci_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + bool read; + + read = (data->flags & MMC_DATA_READ) != 0; + data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, + read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); +} + +static void phytium_sdci_unprepare_data(struct phytium_sdci_host *host, + struct mmc_request *mrq) +{ + bool read; + struct mmc_data *data = mrq->data; + + read = (data->flags & MMC_DATA_READ) != 0; + dma_unmap_sg(host->dev, data->sg, data->sg_len, + read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); +} + +static void phytium_sdci_set_clk(struct phytium_sdci_host *host, + struct mmc_ios *ios) +{ + unsigned long clk_rate; + u32 div = 0xffffffff, div_reg; + + if (ios->clock) { + clk_rate = host->clk_rate; + div = ((clk_rate / (2 * ios->clock)) - 1); + div_reg = readl(host->base + SDCI_CLOCK_D); + if (div_reg == div) + return; + writel(div, host->base + SDCI_CLOCK_D); + writel(0, host->base + SDCI_SD_DRV); + writel(5, host->base + SDCI_SD_SAMP); + + sdr_set_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_SRST); + sdr_clr_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_SRST); + while (!(readl(host->base + SDCI_STATUS) & SDCI_STATUS_IDIE)) + cpu_relax(); + dev_dbg(host->dev, "host->clk_rate: %ld, ios->clock: %d\n", + host->clk_rate, ios->clock); + } +} + + +static inline u32 phytium_sdci_cmd_find_resp(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 resp; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1: + resp = 0x2; + break; + case MMC_RSP_R1B: + resp = 0x2; + break; + case MMC_RSP_R2: + resp = 0x1; + break; + case MMC_RSP_R3: + resp = 0x3; + break; + case MMC_RSP_NONE: + default: + resp = 0x0; + break; + } + + return resp; +} + +static inline u32 phytium_sdci_cmd_prepare_raw_cmd(struct phytium_sdci_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + /* + * rawcmd : + * trty << 14 | opcode << 8 | cmdw << 6 | cice << 4 | crce << 3 | resp + */ + u32 resp, rawcmd; + u32 opcode = cmd->opcode; + + resp = phytium_sdci_cmd_find_resp(host, mrq, cmd); + rawcmd = ((opcode << 8) | resp); + + if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) { + rawcmd = (rawcmd | (SDCI_CMD_TYPE_ADTC << 14)); + } + + return rawcmd; +} + +static void +phytium_sdci_unexpected_error_handler(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_data *data, + int err_type) +{ + unsigned long flags; + int error; + + spin_lock_irqsave(&host->lock, flags); + host->mrq = NULL; + host->cmd = NULL; + host->data = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + if (err_type & ERR_CARD_ABSENT) { + host->mmc->detect_change = 1; + dev_dbg(host->dev, "SD is absent when send cmd:%d\n", mrq->cmd->opcode); + } + + switch (err_type) { + case ERR_CARD_ABSENT: + error = -ENOMEDIUM; + break; + case ERR_TIMEOUT: + error = -ETIMEDOUT; + break; + case ERR_CMD_RESPONED: + error = -EIO; + break; + default: + error = -ETIMEDOUT; + break; + } + + if (data) { + data->error = error; + phytium_sdci_unprepare_data(host, mrq); + + if ((data->flags & MMC_DATA_READ) == MMC_DATA_READ || + (data->flags & MMC_DATA_WRITE) == MMC_DATA_WRITE) + phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_TRS_R, mrq, data); + } else { + mrq->cmd->error = error; + } + + mmc_request_done(host->mmc, mrq); +} + +static bool phytium_sdci_start_data(struct phytium_sdci_host *host, struct mmc_request *mrq, + struct mmc_command *cmd, struct mmc_data *data) +{ + bool read, res; + u32 sg_dma_addrh, sg_dma_addrl; + u32 sd_block_addrh, sd_block_addrl; + u32 temp, timeout, sd_status; + u32 block_cnt = 0; + u32 sd_block_addr = cmd->arg; + u32 private_cmd, resp_type, arg; + u32 j, dma_len; + unsigned long deadline_time; + dma_addr_t dma_address; + struct scatterlist *sg; + int ret; + + WARN_ON(host->cmd); + host->cmd = cmd; + + WARN_ON(host->data); + host->data = data; + read = data->flags & MMC_DATA_READ; + + for_each_sg(data->sg, sg, data->sg_count, j) { + writel(0, host->base + SDCI_COMMAND); + + dma_address = sg_dma_address(sg); + sg_dma_addrh = (u32) (dma_address >> 32); + sg_dma_addrl = (u32) dma_address; + + dma_len = sg_dma_len(sg); + block_cnt = (dma_len / SD_BLOCK_SIZE); + + sd_block_addrh = 0; + sd_block_addrl = sd_block_addr; + + sdr_set_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_BDRST); + sdr_clr_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_BDRST); + writel(block_cnt, host->base + SDCI_BLK_CNT); + + if ((mrq->data->flags & MMC_DATA_READ) == MMC_DATA_READ) { + writel(sg_dma_addrl, host->base + SDCI_BD_RX); + writel(sg_dma_addrh, host->base + SDCI_BD_RX); + writel(sd_block_addrl, host->base + SDCI_BD_RX); + writel(sd_block_addrh, host->base + SDCI_BD_RX); + timeout = 100 * block_cnt; + } else { + timeout = 250 * block_cnt; + ret = phytium_sdci_cmd13_process(host, mrq, data, timeout, 1); + if (ret != SDCI_CMD13_OK) + return false; + + writel(sg_dma_addrl, host->base + SDCI_BD_TX); + writel(sg_dma_addrh, host->base + SDCI_BD_TX); + writel(sd_block_addrl, host->base + SDCI_BD_TX); + writel(sd_block_addrh, host->base + SDCI_BD_TX); + } + + deadline_time = jiffies + msecs_to_jiffies(timeout); + + temp = readl(host->base + SDCI_BD_ISR); + if ((mrq->data->flags & MMC_DATA_READ) == MMC_DATA_READ) { + while ((temp & SDCI_BD_ISR_TRS_R) != SDCI_BD_ISR_TRS_R) { + sd_status = readl(host->base + SDCI_STATUS); + if (sd_status & SDCI_STATUS_CDSL) { + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_CARD_ABSENT); + if (temp & SDCI_BD_ISR_DAIS) + writel(1, host->base + SDCI_BD_ISR); + return false; + } + + temp = readl(host->base + SDCI_BD_ISR); + if (time_after(jiffies, deadline_time)) { + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_TIMEOUT); + dev_err(host->dev, + "Read Data timeout:jiffies:0x%lx,dt_jiffies:" + "0x%lx, BD_isr_reg:0x%x,cmd:%d, REG_D0:0x%x\n", + jiffies, jiffies - deadline_time, temp, + cmd->opcode, readl(host->base + SDCI_STATUS)); + + return false; + } + } + } else { + while ((temp & SDCI_BD_ISR_TRS_W) != SDCI_BD_ISR_TRS_W) { + sd_status = readl(host->base + SDCI_STATUS); + if (sd_status & SDCI_STATUS_CDSL) { + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_CARD_ABSENT); + dev_err(host->dev, "[%s][%d]: Card absent ! cmd(%d)\n", + __func__, __LINE__, mrq->cmd->opcode); + return false; + } + + temp = readl(host->base + SDCI_BD_ISR); + if (time_after(jiffies, deadline_time)) { + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_TIMEOUT); + dev_err(host->dev, + "Write Date timeout: jiffies:0x%lx,dt_jiffies:" + "0x%lx,BD_isr_reg:0x%x\n", + jiffies, jiffies - deadline_time, temp); + return false; + } + } + } + writel(1, host->base + SDCI_BD_ISR); + writel(1, host->base + SDCI_NORMAL_ISR); + sd_block_addr = sd_block_addr + block_cnt; + + if (j < (data->sg_count - 1) && 1 < block_cnt) { + private_cmd = MMC_STOP_TRANSMISSION; + resp_type = 0x2; + arg = 0; + res = phytium_sdci_private_send_cmd(host, private_cmd, + resp_type, arg); + if (!res) { + sd_status = readl(host->base + SDCI_STATUS); + if (sd_status & SDCI_STATUS_CDSL) { + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_CARD_ABSENT); + writel(1, host->base + SDCI_BD_ISR); + dev_err(host->dev, + "[%s][%d]:Card absent ! private_cmd(%d)\n", + __func__, __LINE__, private_cmd); + } else { + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_CMD_RESPONED); + dev_err(host->dev, + "[%s][%d] cmd(%d) response errored\n", + __func__, __LINE__, mrq->cmd->opcode); + phytium_sd_error(host); + } + writel(1, host->base + SDCI_NORMAL_ISR); + return false; + } + writel(1, host->base + SDCI_NORMAL_ISR); + } + } + + host->is_multi_rw_only_one_blkcnt = false; + + if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK && block_cnt == 1) || + (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK && block_cnt == 1)) + host->is_multi_rw_only_one_blkcnt = true; + + phytium_sdci_cmd_done(host, SDCI_NORMAL_ISR_CC, mrq, cmd); + if ((mrq->data->flags & MMC_DATA_READ) == MMC_DATA_READ) + phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_TRS_R, + mrq, data); + else + phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_TRS_W, + mrq, data); + + return true; +} + +static int phytium_sdci_auto_cmd_done(struct phytium_sdci_host *host, + int events, struct mmc_command *cmd) +{ + u32 *rsp = cmd->resp; + + rsp[0] = readl(host->base + SDCI_RESP0); + + if (events & SDCI_NORMAL_ISR_CC) + cmd->error = 0; + else { + phytium_sdci_reset_hw(host); + dev_err(host->dev, + "%s: AUTO_CMD%d arg=%08X; rsp %08X; cmd_error=%d\n", + __func__, cmd->opcode, cmd->arg, rsp[0], cmd->error); + } + + return cmd->error; +} + +static void phytium_sdci_track_cmd_data(struct phytium_sdci_host *host, + struct mmc_command *cmd, + struct mmc_data *data) +{ + if (host->error) + dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", + __func__, cmd->opcode, cmd->arg, host->error); +} + +static void phytium_sdci_request_done(struct phytium_sdci_host *host, + struct mmc_request *mrq) +{ + unsigned long flags; + + dev_dbg(host->dev, + "%s_%d:mrq->cmd->opcode:%d, mrq->cmd->arg:0x%x resp 0x%x 0x%x 0x%x 0x%x\n", + __func__, __LINE__, mrq->cmd->opcode, mrq->cmd->arg, + mrq->cmd->resp[0], mrq->cmd->resp[1], mrq->cmd->resp[2], + mrq->cmd->resp[3]); + + spin_lock_irqsave(&host->lock, flags); + host->mrq = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + phytium_sdci_track_cmd_data(host, mrq->cmd, mrq->data); + if (mrq->data) + phytium_sdci_unprepare_data(host, mrq); + mmc_request_done(host->mmc, mrq); +} + +static bool +phytium_sdci_auto_command_done(struct phytium_sdci_host *host, int events, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + u32 *rsp = cmd->resp; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->cmd = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + sdr_clr_bits(host->base + SDCI_NORMAL_ISER, cmd_ints_mask); + + rsp[0] = 0x900; + phytium_sdci_request_done(host, mrq); + return true; +} + +/* returns true if command is fully handled; returns false otherwise */ +static bool phytium_sdci_cmd_done(struct phytium_sdci_host *host, int events, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + bool done = false; + bool sbc_error; + unsigned long flags; + u32 *rsp = cmd->resp; + + if (mrq->sbc && cmd == mrq->cmd && + (events & SDCI_NORMAL_ISR_CC)) + phytium_sdci_auto_cmd_done(host, events, mrq->sbc); + + sbc_error = mrq->sbc && mrq->sbc->error; + + if (!sbc_error && !(events & (SDCI_NORMAL_ISR_CC | + SDCI_NORMAL_ISR_CR | + SDCI_NORMAL_ISR_TIMEOUT))) + return done; + + spin_lock_irqsave(&host->lock, flags); + done = !host->cmd; + host->cmd = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + if (done) + return true; + + sdr_clr_bits(host->base + SDCI_NORMAL_ISER, cmd_ints_mask); + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + rsp[0] = readl(host->base + SDCI_RESP0); + rsp[1] = readl(host->base + SDCI_RESP1); + rsp[2] = readl(host->base + SDCI_RESP2); + rsp[3] = readl(host->base + SDCI_RESP3); + } else + rsp[0] = readl(host->base + SDCI_RESP0); + + if(cmd->opcode == SD_SEND_RELATIVE_ADDR) + host->current_rca = rsp[0] & 0xFFFF0000; + } + + if (!sbc_error && + !(events & SDCI_NORMAL_ISR_CC) && + (events & SDCI_NORMAL_ISR_TIMEOUT)) + cmd->error = -ETIMEDOUT; + + if (cmd->error) + dev_dbg(host->dev, + "%s: cmd=%d arg=%08X; rsp %08X; cmd_error=%d\n", + __func__, cmd->opcode, cmd->arg, rsp[0], + cmd->error); + + phytium_sdci_cmd_next(host, mrq, cmd); + + return true; +} + +static bool set_databus_width(struct phytium_sdci_host *host) +{ + bool res; + u32 cmd, resp_type, arg; + + cmd = SD_APP_SET_BUS_WIDTH; + resp_type = 0x2; + arg = 0x2; + res = phytium_sdci_private_send_cmd(host, cmd, resp_type, arg); + if (!res) + return false; + + cmd = MMC_APP_CMD; + resp_type = 0x2; + arg = host->current_rca; + res = phytium_sdci_private_send_cmd(host, cmd, resp_type, arg); + if (!res) + return false; + + return true; +} + + +static void phytium_sdci_start_command(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 rawcmd; + struct mmc_data *data = mrq->data; + dma_addr_t dma_adtc_buf; + u32 dma_bufh,dma_bufl; + u32 block_cnt = 0; + + WARN_ON(host->cmd); + host->cmd = cmd; + + cmd->error = 0; + rawcmd = phytium_sdci_cmd_prepare_raw_cmd(host, mrq, cmd); + if (cmd->opcode == MMC_STOP_TRANSMISSION || + cmd->opcode == MMC_SEND_STATUS) + writel(1, host->base + SDCI_ERROR_ISR); + sdr_set_bits(host->base + SDCI_NORMAL_ISER, cmd_ints_mask); + writel(rawcmd, host->base + SDCI_COMMAND); + + if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) { + WARN_ON(host->data); + host->data = data; + + dma_adtc_buf = host->dma_rx.bd_addr; + dma_bufh = (u32) (dma_adtc_buf >> 32); + dma_bufl = (u32) dma_adtc_buf; + block_cnt = mrq->data->blocks; + sdr_set_bits(host->base + SDCI_BD_ISER, data_ints_mask); + writel(block_cnt, host->base + SDCI_BLK_CNT); + + if ((mrq->data->flags & MMC_DATA_READ) == MMC_DATA_READ) { + writel(dma_bufl, host->base + SDCI_BD_RX); + writel(dma_bufh, host->base + SDCI_BD_RX); + writel(cmd->arg, host->base + SDCI_BD_RX); + writel(0, host->base + SDCI_BD_RX); + } else { + writel(dma_bufl, host->base + SDCI_BD_TX); + writel(dma_bufh, host->base + SDCI_BD_TX); + writel(cmd->arg, host->base + SDCI_BD_TX); + writel(0, host->base + SDCI_BD_TX); + } + } else { + writel(cmd->arg, host->base + SDCI_ARGUMENT); + } +} + +static void phytium_sdci_cmd_next(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + if (cmd->error || (mrq->sbc && mrq->sbc->error)) + phytium_sdci_request_done(host, mrq); + else if (cmd == mrq->sbc) + phytium_sdci_start_command(host, mrq, mrq->cmd); + else if (!cmd->data) + phytium_sdci_request_done(host, mrq); +} + +static int phytium_sdci_cmd13_process(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_data *data, + u32 wait_timeout_ms, + u32 send_once_time_ms) +{ + u32 private_cmd, resp_type, arg, temp, sd_status; + unsigned long deadline_time; + bool res; + + deadline_time = jiffies + msecs_to_jiffies(wait_timeout_ms); + + do { + private_cmd = MMC_SEND_STATUS; + resp_type = 0x2; + arg = host->current_rca; + + res = phytium_sdci_private_send_cmd(host, private_cmd, resp_type, arg); + if (!res) { + sd_status = readl(host->base + SDCI_STATUS); + if (sd_status & SDCI_STATUS_CDSL) { + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_CARD_ABSENT); + dev_err(host->dev, + "[%s][%d] Card absent! private_cmd(%d)\n", + __func__, __LINE__, private_cmd); + } else { + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_CMD_RESPONED); + + dev_err(host->dev, + "[%s][%d] private_cmd(%d) response errored\n", + __func__, __LINE__, private_cmd); + phytium_sd_error(host); + } + writel(1, host->base + SDCI_BD_ISR); + return SDCI_CMD13_FAILED; + } + + temp = readl(host->base + SDCI_RESP0); + + if (time_after(jiffies, deadline_time)) { + + if (mrq->cmd->opcode == MMC_SEND_STATUS) + return SDCI_CMD13_OK; + + dev_err(host->dev, + "SD card is not in transfer mode,timeout:%d,rsp[0]:%x\n", + wait_timeout_ms, temp); + + phytium_sdci_unexpected_error_handler(host, mrq, data, + ERR_TIMEOUT); + phytium_sd_error(host); + return SDCI_CMD13_FAILED; + } + + writel(1, host->base + SDCI_NORMAL_ISR); + + if (CARD_TRAN_STATE != (temp & CARD_CURRENT_STATE) && send_once_time_ms) + mdelay(send_once_time_ms); + + } while (CARD_TRAN_STATE != (temp & CARD_CURRENT_STATE)); + + return SDCI_CMD13_OK; +} + +static void phytium_sdci_ops_request(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + struct phytium_sdci_host *host = mmc_priv(mmc); + unsigned long flags; + bool res; + u32 status_sd; + int res_cmd13; + + host->error = 0; + WARN_ON(host->mrq); + host->mrq = mrq; + + dev_dbg(host->dev, + "phytium_sdci_ops_request:mrq->cmd->opcode:%d, mrq->cmd->arg:0x%x \n", + mrq->cmd->opcode, mrq->cmd->arg); + + if (mrq->cmd->opcode == MMC_SEND_STATUS && + (mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_ADTC) { + u32 status = readl(host->base + SDCI_STATUS); + if (status & SDCI_STATUS_CDSL) { + phytium_sdci_unexpected_error_handler(host, mrq, NULL, + ERR_CARD_ABSENT); + return; + } else { + res_cmd13 = phytium_sdci_cmd13_process(host, mrq, NULL, 400, 5); + if (res_cmd13 == SDCI_CMD13_FAILED) + return; + } + } else if (mrq->cmd->opcode == MMC_STOP_TRANSMISSION) { + status_sd = readl(host->base + SDCI_STATUS); + if (status_sd & SDCI_STATUS_CDSL) { + phytium_sdci_unexpected_error_handler(host, mrq, NULL, + ERR_CARD_ABSENT); + return; + } + } + + if (mrq->data){ + phytium_sdci_prepare_data(host, mrq); + if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK || + mrq->cmd->opcode == MMC_READ_SINGLE_BLOCK || + mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || + mrq->cmd->opcode == MMC_WRITE_BLOCK) { + host->adtc_type = BLOCK_RW_ADTC; + phytium_sdci_start_data(host, mrq, + mrq->cmd, mrq->data); + return; + } + host->adtc_type = COMMOM_ADTC; + } + + if (mrq->cmd->opcode == SD_IO_RW_DIRECT || + mrq->cmd->opcode == SD_IO_SEND_OP_COND) { + spin_lock_irqsave(&host->lock, flags); + host->mrq = NULL; + host->cmd = NULL; + spin_unlock_irqrestore(&host->lock, flags); + mrq->cmd->error = -EINVAL; + mmc_request_done(host->mmc, mrq); + + return; + } + + if (mrq->cmd->opcode == SD_APP_SEND_SCR) { + res = set_databus_width(host); + if (!res) { + phytium_sdci_unexpected_error_handler(host, mrq, NULL, ERR_CMD_RESPONED); + return; + } + } + + /* if SBC is required, we have HW option and SW option. + * if HW option is enabled, and SBC does not have "special" flags, + * use HW option, otherwise use SW option + */ + if (mrq->sbc && + (!mmc_card_mmc(mmc->card) || (mrq->sbc->arg & 0xFFFF0000))) + phytium_sdci_start_command(host, mrq, mrq->sbc); + else + phytium_sdci_start_command(host, mrq, mrq->cmd); +} + +static void phytium_sdci_data_xfer_next(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_data *data) +{ + if (mmc_op_multi(mrq->cmd->opcode) && + mrq->stop && !mrq->stop->error && + !mrq->sbc && host->is_multi_rw_only_one_blkcnt) { + host->is_multi_rw_only_one_blkcnt = false; + phytium_sdci_auto_command_done(host, SDCI_NORMAL_ISR_CC, mrq, mrq->stop); + } else if (mmc_op_multi(mrq->cmd->opcode) && + mrq->stop && !mrq->stop->error && + !mrq->sbc) + phytium_sdci_start_command(host, mrq, mrq->stop); + else + phytium_sdci_request_done(host, mrq); +} + +static inline void get_data_buffer(struct mmc_data *data, + u32 *bytes, u32 **pointer) +{ + struct scatterlist *sg; + + sg = &data->sg[0]; + *bytes = sg->length; + *pointer = sg_virt(sg); +} + +static bool phytium_sdci_data_xfer_done(struct phytium_sdci_host *host, + u32 events, struct mmc_request *mrq, + struct mmc_data *data) +{ + struct mmc_command *stop = data->stop; + unsigned long flags; + bool done; + unsigned int check_data; + u32 sg_length,i; + u32 *sg_virt_addr; + + check_data = events & (SDCI_BD_ISR_TRS_R | SDCI_BD_ISR_TRS_W | SDCI_BD_ISR_EDTE); + + spin_lock_irqsave(&host->lock, flags); + done = !host->data; + if (check_data) + host->data = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + if (done) + return true; + + if (check_data || (stop && stop->error)) { + sdr_clr_bits(host->base + SDCI_BD_ISER, data_ints_mask); + dev_dbg(host->dev, "DMA stop\n"); + + if (((events & SDCI_BD_ISR_TRS_R) || + (events & SDCI_BD_ISR_TRS_W)) && + (!stop || !stop->error)) { + if ((mrq->cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC && + (host->adtc_type == COMMOM_ADTC)) { + get_data_buffer(data, &sg_length, + &host->sg_virt_addr); + sg_virt_addr = host->sg_virt_addr; + + for (i = 0; i < (sg_length/4); i++) { + *sg_virt_addr = host->dma_rx.buf[i]; + sg_virt_addr++; + } + } + data->bytes_xfered = data->blocks * data->blksz; + } else { + dev_dbg(host->dev, "interrupt events: %x\n", events); + phytium_sdci_reset_hw(host); + data->bytes_xfered = 0; + dev_dbg(host->dev, "%s: cmd=%d; blocks=%d", + __func__, mrq->cmd->opcode, data->blocks); + dev_dbg(host->dev, "data_error=%d xfer_size=%d\n", + (int)data->error, data->bytes_xfered); + } + + phytium_sdci_data_xfer_next(host, mrq, data); + done = true; + } + + return done; +} + + +static int phytium_sdci_card_busy(struct mmc_host *mmc) +{ + struct phytium_sdci_host *host = mmc_priv(mmc); + u32 status; + + /* check if any pin between dat[0:3] is low */ + status = readl(host->base + SDCI_STATUS); + if (((status >> 20) & 0xf) != 0xf) + return 1; + + return 0; +} + +static void phytium_sdci_request_timeout(struct work_struct *work) +{ + struct phytium_sdci_host *host; + + host = container_of(work, struct phytium_sdci_host, req_timeout.work); + dev_err(host->dev, "%s: aborting cmd/data/mrq\n", __func__); + if (host->mrq) { + dev_err(host->dev, "%s: aborting mrq=%p cmd=%d\n", __func__, + host->mrq, host->mrq->cmd->opcode); + if (host->cmd) { + dev_err(host->dev, "%s: aborting cmd=%d\n", + __func__, host->cmd->opcode); + phytium_sdci_cmd_done(host, SDCI_NORMAL_ISR_TIMEOUT, + host->mrq, host->cmd); + } else if (host->data) { + dev_err(host->dev, "%s: abort data: cmd%d; %d blocks\n", + __func__, host->mrq->cmd->opcode, + host->data->blocks); + phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_EDTE, + host->mrq, host->data); + } + } +} + +static void hotplug_timer_func(struct timer_list *t) +{ + struct phytium_sdci_host *host; + u32 status; + + host = from_timer(host, t, hotplug_timer); + if (!host) + dev_err(host->dev, "%s: Not find host!\n", __func__); + status = readl(host->base + SDCI_STATUS); + + if (status & SDCI_STATUS_CDSL) { /* card absent */ + if (host->mmc->card) { + cancel_delayed_work(&host->mmc->detect); + mmc_detect_change(host->mmc, + msecs_to_jiffies(100)); + } + } else { /* card insert */ + cancel_delayed_work(&host->mmc->detect); + mmc_detect_change(host->mmc, msecs_to_jiffies(200)); + } +} + +static irqreturn_t phytium_sdci_irq(int irq, void *dev_id) +{ + struct phytium_sdci_host *host = (struct phytium_sdci_host *) dev_id; + unsigned long flags; + struct mmc_request *mrq; + struct mmc_command *cmd; + u32 events; + + if (!host) + return IRQ_NONE; + + spin_lock_irqsave(&host->lock, flags); + events = readl(host->base + SDCI_NORMAL_ISR); + /* clear interrupts */ + writel(1, host->base + SDCI_NORMAL_ISR); + + mrq = host->mrq; + cmd = host->cmd; + spin_unlock_irqrestore(&host->lock, flags); + + if (events & (SDCI_NORMAL_ISR_CR | SDCI_NORMAL_ISR_CI)) { + mod_timer(&host->hotplug_timer, + jiffies + usecs_to_jiffies(30000)); + goto irq_out; + } + + if (!(events & cmd_ints_mask)) + goto irq_out; + + if (!mrq) { + dev_err(host->dev, "%s: MRQ=NULL; events=%08X\n", + __func__, events); + WARN_ON(1); + goto irq_out; + } + + dev_dbg(host->dev, "%s: events=%08X\n", __func__, events); + + if (cmd) + phytium_sdci_cmd_done(host, events, mrq, cmd); + +irq_out: + return IRQ_HANDLED; +} + +static irqreturn_t phytium_sdci_dma_irq(int irq, void *dev_id) +{ + struct phytium_sdci_host *host = (struct phytium_sdci_host *) dev_id; + unsigned long flags; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + u32 events; + + spin_lock_irqsave(&host->lock, flags); + events = readl(host->base + SDCI_BD_ISR); + writel(1, host->base + SDCI_BD_ISR); + + mrq = host->mrq; + cmd = host->cmd; + data = host->data; + spin_unlock_irqrestore(&host->lock, flags); + + if (!(events & data_ints_mask)) + goto dma_irq_out; + + if (!mrq) { + dev_err(host->dev, + "%s: MRQ=NULL; events=%08X\n", + __func__, events); + goto dma_irq_out; + } + + dev_dbg(host->dev, "%s: events=%08X\n", __func__, events); + + if (data) + phytium_sdci_data_xfer_done(host, events, mrq, data); + +dma_irq_out: + return IRQ_HANDLED; +} + +static irqreturn_t phytium_sdci_err_irq(int irq, void *dev_id) +{ + struct phytium_sdci_host *host = (struct phytium_sdci_host *) dev_id; + unsigned long flags; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + u32 events; + + if (!host) + return IRQ_NONE; + + spin_lock_irqsave(&host->lock, flags); + events = readl(host->base + SDCI_ERROR_ISR); + mrq = host->mrq; + cmd = host->cmd; + data = host->data; + spin_unlock_irqrestore(&host->lock, flags); + + if (!(events&err_ints_mask)) + goto err_irq_out; + + if (!mrq) { + sdr_clr_bits(host->base + SDCI_NORMAL_ISER, SDCI_NORMAL_ISR_EI); + writel(1, host->base + SDCI_ERROR_ISR); + dev_err(host->dev, "%s: MRQ=NULL; events=%08X\n", __func__, events); + goto err_irq_out; + } + sdr_clr_bits(host->base + SDCI_NORMAL_ISER, SDCI_NORMAL_ISR_EI); + if (data) { + dev_err(host->dev, + "[%s][%d]: cmd(%d); %d read blocks, status:%x,flag:%x\n", + __func__, __LINE__, mrq->cmd->opcode, data->blocks, events, data->flags); + data->error = -ETIMEDOUT; + if ((data->flags & MMC_DATA_READ) == MMC_DATA_READ || + (data->flags & MMC_DATA_WRITE) == MMC_DATA_WRITE) + phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_EDTE | SDCI_BD_ISR_TRS_R, + mrq, data); + mrq->cmd->error = -ETIMEDOUT; + mmc_request_done(host->mmc, mrq); + } else if (cmd) { + phytium_sdci_cmd_done(host, SDCI_NORMAL_ISR_TIMEOUT, mrq, cmd); + } + + writel(1, host->base + SDCI_NORMAL_ISR); + writel(1, host->base + SDCI_ERROR_ISR); +err_irq_out: + return IRQ_HANDLED; +} + +static void phytium_sdci_init_hw(struct phytium_sdci_host *host) +{ + u32 val; + + /* Reset */ + phytium_sdci_reset_hw(host); + + val = SDCI_SEN_CREFR_VAL | SDCI_SEN_DEBNCE_VAL; + writel(val, host->base + SDCI_SD_SEN); + + /* Disable and clear all interrupts */ + writel(0, host->base + SDCI_NORMAL_ISER); + writel(0, host->base + SDCI_ERROR_ISER); + writel(0, host->base + SDCI_BD_ISER); + + writel(1, host->base + SDCI_NORMAL_ISR); + writel(1, host->base + SDCI_ERROR_ISR); + writel(1, host->base + SDCI_BD_ISR); + + sdr_set_bits(host->base + SDCI_NORMAL_ISER, + SDCI_SDCI_NORMAL_ISER_ECI|SDCI_SDCI_NORMAL_ISER_ECR); + /* Configure default cmd timeout to 0.1(s)s = val/25M */ + val = SDCI_F_MAX / 10; + writel(val, host->base + SDCI_TIMEOUT_CMD); + writel(SDCI_TIMEOUT_DATA_VALUE, host->base + SDCI_TIMEOUT_DATA); + + val = 0x0F00; + writel(val,host->base + SDCI_CONTROLLER); + + dev_dbg(host->dev, "init hardware done!"); +} + +static void phytium_sdci_deinit_hw(struct phytium_sdci_host *host) +{ + /* Disable and clear all interrupts */ + writel(0, host->base + SDCI_NORMAL_ISER); + writel(0, host->base + SDCI_ERROR_ISER); + writel(0, host->base + SDCI_BD_ISER); + + writel(0, host->base + SDCI_NORMAL_ISR); + writel(0, host->base + SDCI_ERROR_ISR); + writel(0, host->base + SDCI_BD_ISR); +} + +static void phytium_sdci_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct phytium_sdci_host *host = mmc_priv(mmc); + + if (ios->bus_width == MMC_BUS_WIDTH_4) + mmc->caps = mmc->caps & (~MMC_CAP_4_BIT_DATA); + + /* Suspend/Resume will do power off/on */ + switch (ios->power_mode) { + case MMC_POWER_UP: + writel(SDCI_POWER_ON, host->base + SDCI_POWER); + break; + case MMC_POWER_ON: + phytium_sdci_set_clk(host, ios); + break; + case MMC_POWER_OFF: + writel(SDCI_POWER_OFF, host->base + SDCI_POWER); + break; + default: + break; + } +} + +static int phytium_sdci_get_cd(struct mmc_host *mmc) +{ + struct phytium_sdci_host *host = mmc_priv(mmc); + u32 status = readl(host->base + SDCI_STATUS); + + if (((status >> 19) & 0x1) == 0x1) + return 0; + + return 1; +} + +static void phytium_sdci_hw_reset(struct mmc_host *mmc) +{ + struct phytium_sdci_host *host = mmc_priv(mmc); + + sdr_set_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_SRST); + sdr_clr_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_SRST); + while (!(readl(host->base + SDCI_STATUS) & SDCI_STATUS_IDIE)) + cpu_relax(); +} + +static struct mmc_host_ops phytium_sdci_ops = { + .request = phytium_sdci_ops_request, + .set_ios = phytium_sdci_ops_set_ios, + .get_cd = phytium_sdci_get_cd, + .card_busy = phytium_sdci_card_busy, + .hw_reset = phytium_sdci_hw_reset, +}; + +static bool phytium_sdci_private_send_cmd(struct phytium_sdci_host *host, + u32 cmd, u32 resp_type,u32 arg) +{ + u32 temp, sd_cmd, sd_arg, sd_status; + unsigned long deadline_time; + + writel(1, host->base + SDCI_NORMAL_ISR); + writel(1, host->base + SDCI_ERROR_ISR); + + sd_cmd = (cmd << 8) | resp_type; + sd_arg = arg; + writel(sd_cmd, host->base + SDCI_COMMAND); + writel(sd_arg, host->base + SDCI_ARGUMENT); + + if (cmd == MMC_STOP_TRANSMISSION) + deadline_time = jiffies + msecs_to_jiffies(1000); + else + deadline_time = jiffies + msecs_to_jiffies(100); + + temp = readl(host->base + SDCI_NORMAL_ISR); + while ((temp & SDCI_NORMAL_ISR_CC) != SDCI_NORMAL_ISR_CC) { + sd_status = readl(host->base + SDCI_STATUS); + if (sd_status & SDCI_STATUS_CDSL) + return false; + + temp = readl(host->base + SDCI_NORMAL_ISR); + if (time_after(jiffies, deadline_time)) + return false; + + if (cmd == MMC_STOP_TRANSMISSION) + mdelay(1); + } + + return true; +} + +static int phytium_sdci_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct phytium_sdci_host *host; + struct resource *res; + int ret; + const struct acpi_device_id *match; + struct device *dev = &pdev->dev; + + /* Allocate MMC host for this device */ + mmc = mmc_alloc_host(sizeof(struct phytium_sdci_host), &pdev->dev); + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + ret = mmc_of_parse(mmc); + if (ret) + goto host_free; + + if (dev->of_node) { + host->src_clk = devm_clk_get(&pdev->dev, "phytium_sdc_clk"); + if (IS_ERR(host->src_clk)) { + ret = PTR_ERR(host->src_clk); + goto host_free; + } + + host->clk_rate = clk_get_rate(host->src_clk); + if (device_property_read_bool(dev, "no-dma-coherent")) + dev->archdata.dma_coherent = false; + } else if (has_acpi_companion(dev)) { + match = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!match) { + dev_err(dev, "Error ACPI match data is missing\n"); + return -ENODEV; + } + + acpi_dma_configure(dev, DEV_DMA_NOT_SUPPORTED); + + host->clk_rate = 600000000; + } else { + dev_err(&pdev->dev, "No DT found\n"); + return -EINVAL; + } + + dma_set_mask(dev, DMA_BIT_MASK(40)); + dma_set_coherent_mask(dev, DMA_BIT_MASK(40)); + + timer_setup(&host->hotplug_timer, hotplug_timer_func, 0); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto host_free; + } + + host->irq = platform_get_irq(pdev, 1); + if (host->irq < 0) { + ret = -EINVAL; + goto host_free; + } + + host->irq_err = platform_get_irq(pdev, 2); + if (host->irq_err < 0) { + ret = -EINVAL; + goto host_free; + } + + host->irq_bd = platform_get_irq(pdev, 0); + if (host->irq_bd < 0) { + ret = -EINVAL; + goto host_free; + } + + host->caps = caps; + host->dev = &pdev->dev; + host->mmc = mmc; + + if((4 * SDCI_F_MAX) > host->clk_rate) + host->clk_div = 1; + else + host->clk_div = ((host->clk_rate / (2 * SDCI_F_MAX)) - 1); + + /* Set host parameters to mmc */ + mmc->f_min = SDCI_F_MIN; + mmc->f_max = (host->clk_rate / ((host->clk_div + 1) * 2)); + mmc->ops = &phytium_sdci_ops; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + + mmc->caps |= host->caps; + /* MMC core transfer sizes tunable parameters */ + mmc->max_segs = MAX_BD_NUM; + mmc->max_seg_size = 512 * 1024; + mmc->max_blk_size = 512; + mmc->max_req_size = 512 * 1024; + mmc->max_blk_count = mmc->max_req_size / 512; + + host->dma_rx.buf = dma_zalloc_coherent(&pdev->dev, + MAX_BD_NUM, + &host->dma_rx.bd_addr, + GFP_KERNEL); + if (!host->dma_rx.buf){ + ret = -ENOMEM; + goto release_mem; + } + + host->cmd_timeout = msecs_to_jiffies(100); + host->data_timeout = msecs_to_jiffies(250); + + INIT_DELAYED_WORK(&host->req_timeout, phytium_sdci_request_timeout); + spin_lock_init(&host->lock); + + platform_set_drvdata(pdev, mmc); + phytium_sdci_init_hw(host); + + ret = devm_request_irq(&pdev->dev, host->irq, phytium_sdci_irq, + IRQF_SHARED, pdev->name, host); + if (ret) + goto release; + + ret = devm_request_irq(&pdev->dev, host->irq_err, phytium_sdci_err_irq, + IRQF_SHARED, pdev->name, host); + if (ret) + goto release; + + ret = devm_request_irq(&pdev->dev, host->irq_bd, phytium_sdci_dma_irq, + IRQF_SHARED, pdev->name, host); + if (ret) + goto release; + + ret = mmc_add_host(mmc); + if (ret) + goto release; + + return 0; + +release: + platform_set_drvdata(pdev, NULL); + phytium_sdci_deinit_hw(host); +release_mem: + if (host->dma_rx.buf) + dma_free_coherent(&pdev->dev, MAX_BD_NUM, + host->dma_rx.buf, + host->dma_rx.bd_addr); +host_free: + mmc_free_host(mmc); + + return ret; +} + +static int phytium_sdci_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct phytium_sdci_host *host; + + mmc = platform_get_drvdata(pdev); + host = mmc_priv(mmc); + + cancel_delayed_work_sync(&host->req_timeout); + platform_set_drvdata(pdev, NULL); + mmc_remove_host(host->mmc); + phytium_sdci_deinit_hw(host); + + if (host->dma_rx.buf) + dma_free_coherent(&pdev->dev, MAX_BD_NUM, + host->dma_rx.buf, host->dma_rx.bd_addr); + + mmc_free_host(host->mmc); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_sdci_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_sdci_host *host = mmc_priv(mmc); + + phytium_sdci_deinit_hw(host); + return 0; +} + +static int phytium_sdci_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_sdci_host *host = mmc_priv(mmc); + + phytium_sdci_init_hw(host); + mmc->caps = mmc->caps | MMC_CAP_4_BIT_DATA; + + return 0; +} +#endif + +#ifdef CONFIG_PM +static int phytium_sdci_runtime_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_sdci_host *host = mmc_priv(mmc); + + phytium_sdci_deinit_hw(host); + + return 0; +} + +static int phytium_sdci_runtime_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_sdci_host *host = mmc_priv(mmc); + + phytium_sdci_init_hw(host); + + return 0; +} + +static const struct dev_pm_ops phytium_sdci_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_sdci_suspend, + phytium_sdci_resume) + SET_RUNTIME_PM_OPS(phytium_sdci_runtime_suspend, + phytium_sdci_runtime_resume, NULL) +}; +#else +#define phytium_sdci_dev_pm_ops NULL +#endif + +static const struct of_device_id phytium_sdci_of_ids[] = { + { .compatible = "phytium,sdci", }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_sdci_of_ids); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_sdci_acpi_ids[] = { + { .id = "PHYT0005" }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, phytium_sdci_acpi_ids); +#else +#define phytium_sdci_acpi_ids NULL +#endif + +static struct platform_driver phytium_sdci_driver = { + .probe = phytium_sdci_probe, + .remove = phytium_sdci_remove, + .driver = { + .name = "sdci-phytium", + .of_match_table = phytium_sdci_of_ids, + .acpi_match_table = phytium_sdci_acpi_ids, + .pm = &phytium_sdci_dev_pm_ops, + }, +}; + +module_platform_driver(phytium_sdci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Cheng Quan "); +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Phytium SD Card Interface driver"); diff --git a/drivers/mmc/host/phytium-sdci.h b/drivers/mmc/host/phytium-sdci.h new file mode 100644 index 000000000000..97afd4cb9288 --- /dev/null +++ b/drivers/mmc/host/phytium-sdci.h @@ -0,0 +1,204 @@ +/* + * File Name: phytium_sdci.h - Phytium FT SDCI dirver + * + * Copyright (C) 2019 Phytium Technology Co.,Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +/*---------------------------------------------------------------------------*/ +/* Common Definition */ +/*---------------------------------------------------------------------------*/ +#define MAX_BD_NUM 0x1000 +#define SD_BLOCK_SIZE 512 + +/*---------------------------------------------------------------------------*/ +/* Register Offset */ +/*---------------------------------------------------------------------------*/ +#define SDCI_CONTROLLER 0x00 /* controller config reg */ +#define SDCI_ARGUMENT 0x04 /* argument reg */ +#define SDCI_COMMAND 0x08 /* command reg */ +#define SDCI_CLOCK_D 0x0C /* clock divide reg */ +#define SDCI_SOFTWARE 0x10 /* controller reset reg */ +#define SDCI_POWER 0X14 /* POWRE CONTROL REG */ +#define SDCI_TIMEOUT_CMD 0x18 /* cmd timeout config reg */ +#define SDCI_TIMEOUT_DATA 0x1C /* data timeout reg */ +#define SDCI_NORMAL_ISER 0x20 /* normal ISR config reg */ +#define SDCI_ERROR_ISER 0x24 /* erroe ISR config reg */ +#define SDCI_BD_ISER 0x28 /* BD ISR config reg */ +#define SDCI_CAPA 0x2C /* BD ISR config reg */ +#define SDCI_SD_DRV 0x30 /* SD card driving phase position reg */ +#define SDCI_SD_SAMP 0x34 /* SD card sampling phase position reg */ +#define SDCI_SD_SEN 0x38 /* SD card detection reg */ +#define SDCI_HDS_AXI 0x3C /* AXI boundary config reg */ +#define SDCI_BD_RX 0x40 /* BD rx addr reg */ +#define SDCI_BD_TX 0x60 /* BD tx addr reg */ +#define SDCI_BLK_CNT 0x80 /* r/w block num reg */ +#define SDCI_NORMAL_ISR 0xC0 /* normal ISR status reg */ +#define SDCI_ERROR_ISR 0xC4 /* error ISR status reg */ +#define SDCI_BD_ISR 0xC8 /* BD ISR status reg */ +#define SDCI_BD_STATUS 0xCC /* BD descriptor status reg */ +#define SDCI_STATUS 0xD0 /* status reg */ +#define SDCI_BLOCK 0xD4 /* block len reg */ +#define SDCI_RESP0 0xE0 /* response reg0 */ +#define SDCI_RESP1 0xE4 /* response reg1 */ +#define SDCI_RESP2 0xE8 /* response reg2 */ +#define SDCI_RESP3 0XEC /* response reg3 */ + +/*---------------------------------------------------------------------------*/ +/* Register Mask */ +/*---------------------------------------------------------------------------*/ +/* SDCI_CONTROLLER mask */ +#define SDCI_CONTROLLER_ECRCWR (0x1 << 0) /* RW */ +#define SDCI_CONTROLLER_ECRCRD (0x1 << 1) /* RW */ +#define SDCI_CONTROLLER_RESEDE (0x1 << 2) /* RW */ +#define SDCI_CONTROLLER_PERMDR (0x3 << 8) /* RW */ +#define SDCI_CONTROLLER_PERMDX (0x3 << 10) /* RW */ + +/* SDCI_SOFTWARE mask */ +#define SDCI_SOFTWARE_SRST (0x1 << 0) /* RW */ +#define SDCI_SOFTWARE_SCRST (0x1 << 1) /* RW */ +#define SDCI_SOFTWARE_BDRST (0x1 << 2) /* RW */ +#define SDCI_SOFTWARE_CFCLF (0x1 << 3) /* RW */ +#define SDCI_SOFTWARE_SDRST (0x1 << 4) /* RW */ + +/* SDCI_NORMAL_ISER mask */ +#define SDCI_SDCI_NORMAL_ISER_ECC_EN (0x1 << 0) /* RW */ +#define SDCI_SDCI_NORMAL_ISER_ECR (0x1 << 1) /* RW */ +#define SDCI_SDCI_NORMAL_ISER_ECI (0x1 << 2) /* RW */ +#define SDCI_SDCI_NORMAL_ISER_EEI_EN (0x1 << 15) /* RW */ + +/* SDCI_NORMAL_ISR mask */ +#define SDCI_NORMAL_ISR_CC (0x1 << 0) /* R */ +#define SDCI_NORMAL_ISR_CR (0x1 << 1) /* R */ +#define SDCI_NORMAL_ISR_CI (0x1 << 2) /* R */ +#define SDCI_NORMAL_ISR_TIMEOUT (0x1 << 3) /* R */ +#define SDCI_NORMAL_ISR_EI (0x1 << 15) /* R */ + +/* SDCI_ERROR_ISER mask */ +#define SDCI_ERROR_ISER_ECTE_EN (0x1 << 0) /* RW */ +#define SDCI_ERROR_ISR_CCRCE_EN (0x1 << 1) /* RW */ +#define SDCI_ERROR_ISR_CIR_EN (0x1 << 3) /* RW */ +#define SDCI_ERROR_ISR_CNR_EN (0x1 << 4) /* RW */ +/* SDCI_ERROR_ISR mask */ +#define SDCI_ERROR_ISR_CTE (0x1 << 0) /* R */ +#define SDCI_ERROR_ISR_CCRCE (0x1 << 1) /* R */ +#define SDCI_ERROR_ISR_CIR (0x1 << 3) /* R */ +#define SDCI_ERROR_ISR_CNR (0x1 << 4) /* R */ + +/* SDCI_BD_ISER mask */ +#define SDCI_BD_ISER_ETRS_EN (0x1 << 8) /* RW */ +#define SDCI_BD_ISER_DATFRAX_EN (0x1 << 7) /* RW */ + +/* SDCI_BD_ISR mask */ +#define SDCI_BD_ISR_TRS_W (0x1 << 0) /* R */ +#define SDCI_BD_ISR_TRS_R (0x1 << 8) /* R */ +#define SDCI_BD_ISR_EDTE (0x1 << 3) /* R */ +#define SDCI_BD_ISR_DAIS (0x1 << 15) /* R */ +#define SDCI_BD_ISR_DATFRAX (0x1 << 7) /* R */ + +/* SDCI_HDS_AXI mask */ +#define SDCI_HDS_AXI_AWDOMAIN (0x1 << 0) /* RW */ +#define SDCI_HDS_AXI_ARDOMAIN (0x1 << 12) /* RW */ +#define SDCI_HDS_AXI_AWCACHE (0x6 << 24) /* RW */ +#define SDCI_HDS_AXI_ARCACHE (0xB << 28) /* RW */ + +/* SDCI_STATUS mask */ +#define SDCI_STATUS_CMD_BUSY (0x0 << 0) /* R */ +#define SDCI_STATUS_CMD_READY (0x1 << 0) /* R */ +#define SDCI_STATUS_IDIE (0x1 << 12) /* R */ +#define SDCI_CARD_BUSY_IN_PRG (0x1 << 20) /* R D0 BUSY:0,IDLE:1 */ + +/* SDCI_STATUS */ +#define SDCI_STATUS_CDSL (0x1 << 19) /* R */ + +/*---------------------------------------------------------------------------*/ +/* Register Value */ +/*---------------------------------------------------------------------------*/ +#define SDCI_SD_DRV_VALUE 0 +#define SDCI_SD_SAMP_VALUE_MAX 50 +#define SDCI_SD_SAMP_VALUE_MIN 0 + +#define SDCI_TIMEOUT_CMD_VALUE 0xFFFFFFFF +#define SDCI_TIMEOUT_DATA_VALUE 0xFFFFFFFF +#define SDCI_POWER_ON 1 +#define SDCI_POWER_OFF 0 + +#define SDCI_CMD_TIMEOUT 10 +#define SDCI_DAT_TIMEOUT 5000 + +#define SDCI_CMD_TYPE_ADTC 0x2 + +#define SDCI_F_MIN 400000 +#define SDCI_F_MAX 25000000 + +#define SDCI_SEN_CREFR_VAL (0x1 << 1) +#define SDCI_SEN_DEBNCE_VAL (0xB << 8) + +#define CARD_CURRENT_STATE (0xF << 9) +#define CARD_PRG_STATE (0x7 << 9) +#define CARD_TRAN_STATE (0x4 << 9) + +#define SDCI_CMD13_OK 1 +#define SDCI_CMD13_FAILED 0 + +#define ERR_TIMEOUT (0x1 << 0) +#define ERR_CARD_ABSENT (0x1 << 1) +#define ERR_CMD_RESPONED (0x1 << 2) + +/*---------------------------------------------------------------------------*/ +/* Structure Type */ +/*---------------------------------------------------------------------------*/ +struct phytium_sdci_dma { + struct scatterlist *sg; + u32 *buf; + dma_addr_t bd_addr; + size_t bytes; +}; + +typedef enum { + COMMOM_ADTC = 0, + BLOCK_RW_ADTC = 1 +} adtc_type_t; + +struct phytium_sdci_host { + struct device *dev; + struct mmc_host *mmc; + u32 caps; + spinlock_t lock; + + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + int error; + + void __iomem *base; + + struct phytium_sdci_dma dma_rx; + struct phytium_sdci_dma dma_tx; + + u32 *sg_virt_addr; + adtc_type_t adtc_type; + + struct timer_list hotplug_timer; + + struct delayed_work req_timeout; + u32 cmd_timeout; + u32 data_timeout; + + int irq; + int irq_err; + int irq_bd; + + struct clk *src_clk; + unsigned long clk_rate; + unsigned long clk_div; + unsigned long real_rate; + + u32 current_rca; + bool is_multi_rw_only_one_blkcnt; +}; + diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 5fc9a1bde4ac..95a4000ffd09 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -285,6 +285,17 @@ config MTD_NAND_ATMEL Enables support for NAND Flash / Smart Media Card interface on Atmel AT91 processors. +config MTD_NAND_PHYTIUM + tristate + +config MTD_NAND_PHYTIUM_PCI + tristate "Support Phytium NAND controller as a PCI device" + select MTD_NAND_PHYTIUM + depends on PCI + help + Enable the driver for NAND flash controller on Phytium X100 chipset, + using the Phytium NAND controller core. + config MTD_NAND_MARVELL tristate "NAND controller support on Marvell boards" depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \ diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index d5a5f9832b88..71794ec47ad6 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -58,6 +58,9 @@ obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o +obj-$(CONFIG_MTD_NAND_PHYTIUM) += phytium_nand.o +obj-$(CONFIG_MTD_NAND_PHYTIUM_PCI) += phytium_nand_pci.o + nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_amd.o nand-objs += nand_hynix.o diff --git a/drivers/mtd/nand/raw/phytium_nand.c b/drivers/mtd/nand/raw/phytium_nand.c new file mode 100644 index 000000000000..e285dc35ec42 --- /dev/null +++ b/drivers/mtd/nand/raw/phytium_nand.c @@ -0,0 +1,2117 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Core driver for Phytium NAND flash controller + * + * Copyright (C) 2020-2021, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "phytium_nand.h" + +u16 timing_asy_mode0[TIMING_ASY_NUM] = { /* x100 pass, sample: 1 */ + 0x03, 0x03, 0x28, 0x28, 0x03, 0x03, 0x06, 0x06, 0x28, 0x70, 0x30, 0x50}; +u16 timing_asy_mode1[TIMING_ASY_NUM] = { /* x100 pass, sample: 1 */ + 0x03, 0x03, 0x14, 0x14, 0x03, 0x03, 0x06, 0x06, 0x14, 0x70, 0x30, 0x28}; +u16 timing_asy_mode2[TIMING_ASY_NUM] = { /* x100 pass, sample: 7/8 (unlic) */ + 0x03, 0x03, 0x0D, 0x0D, 0x03, 0x03, 0x06, 0x06, 0x0D, 0x70, 0x20, 0x1A}; +u16 timing_asy_mode3[TIMING_ASY_NUM] = { /* x100 pass, sample: 4-7 */ + 0x03, 0x03, 0x0A, 0x0A, 0x03, 0x03, 0x06, 0x06, 0x0A, 0x70, 0x20, 0x14}; +u16 timing_asy_mode4[TIMING_ASY_NUM] = { /* x100 1.8v pass */ + 0x03, 0x03, 0x08, 0x08, 0x03, 0x03, 0x06, 0x06, 0x08, 0x70, 0x15, 0x10}; +u16 timing_asy_mode5[TIMING_ASY_NUM] = { /* x100 1.8v pass */ + 0x03, 0x03, 0x07, 0x07, 0x03, 0x03, 0x06, 0x06, 0x07, 0x20, 0x15, 0x0E}; +u16 timing_syn_mode0[TIMING_SYN_NUM] = { /* x100 1.8v pass */ + 0x20, 0x41, 0x05, 0x20, 0x10, 0x19, 0x62, 0x40, 0x38, 0x20, 0x00, 0x09, + 0x50, 0x20}; +u16 timing_syn_mode1[TIMING_SYN_NUM] = { /* x100 1.8v pass */ + 0x18, 0x32, 0x06, 0x18, 0x0C, 0x10, 0x76, 0x40, 0x2A, 0x1E, 0x00, 0x12, + 0x24, 0x18}; +u16 timing_syn_mode2[TIMING_SYN_NUM] = { /* x100 1.8v pass */ + 0x10, 0x0A, 0x04, 0x10, 0x08, 0x0A, 0x6E, 0x50, 0x1D, 0x10, 0x00, 0x0C, + 0x18, 0x10}; +u16 timing_syn_mode3[TIMING_SYN_NUM] = { /* x100 1.8v pass */ + 0x0C, 0x1A, 0x02, 0x0C, 0x06, 0x08, 0x78, 0x7C, 0x15, 0x0C, 0x00, 0x08, + 0x12, 0x0C}; +u16 timing_syn_mode4[TIMING_SYN_NUM] = { /* x100 1.8v failed */ + 0x08, 0x17, 0x05, 0x08, 0x04, 0x01, 0x73, 0x40, 0x0C, 0x08, 0x00, 0x06, + 0x0C, 0x10}; +u16 timing_tog_ddr_mode0[TIMING_TOG_NUM] = { /* 600M clk */ + 0x14, 0x0a, 0x08, 0x08, 0xc8, 0xc8, 0x08, 0x08, 0x20, 0x0a, 0x14, 0x08}; + +static u32 nfc_ecc_errover; +static u32 nfc_ecc_err; +static u32 nfc_irq_st; +static u32 nfc_irq_en; +static u32 nfc_irq_complete; + +/* + * Internal helper to conditionnally apply a delay (from the above structure, + * most of the time). + */ +static void cond_delay(unsigned int ns) +{ + if (!ns) + return; + + if (ns < 10000) + ndelay(ns); + else + udelay(DIV_ROUND_UP(ns, 1000)); +} + +static inline struct phytium_nfc *to_phytium_nfc(struct nand_controller *ctrl) +{ + return container_of(ctrl, struct phytium_nfc, controller); +} + +static inline struct phytium_nand_chip *to_phytium_nand(struct nand_chip *chip) +{ + return container_of(chip, struct phytium_nand_chip, chip); +} + +static u32 phytium_read(struct phytium_nfc *nfc, u32 reg) +{ + return readl_relaxed(nfc->regs + reg); +} + +static void phytium_write(struct phytium_nfc *nfc, u32 reg, u32 value) +{ + return writel_relaxed(value, nfc->regs + reg); +} + +static inline int phytium_wait_busy(struct phytium_nfc *nfc) +{ + u32 status; + + if (nfc_ecc_errover) { + nfc_ecc_errover = 0; + return 0; + } + + return readl_relaxed_poll_timeout(nfc->regs + NDSR, status, + !(status & NDSR_BUSY), 10, 10000); +} + +static void phytium_nfc_disable_int(struct phytium_nfc *nfc, u32 int_mask) +{ + u32 reg; + + reg = phytium_read(nfc, NDIR_MASK); + phytium_write(nfc, NDIR_MASK, reg | int_mask); +} + +static void phytium_nfc_enable_int(struct phytium_nfc *nfc, u32 int_mask) +{ + u32 reg; + + reg = phytium_read(nfc, NDIR_MASK); + phytium_write(nfc, NDIR_MASK, reg & (~int_mask)); +} + +static void phytium_nfc_clear_int(struct phytium_nfc *nfc, u32 int_mask) +{ + phytium_write(nfc, NDIR_MASK, int_mask); +} + +static int phytium_nfc_cmd_correct(struct phytium_nfc_op *nfc_op) +{ + if (!nfc_op) + return -EINVAL; + + if (nfc_op->cmd_len == 0x01) { + nfc_op->cmd[1] = nfc_op->cmd[0]; + nfc_op->cmd[0] = 0; + } + + return 0; +} + +static int phytium_nfc_addr_correct(struct phytium_nfc_op *nfc_op) +{ + u32 len; + int i, j; + + if (!nfc_op) + return -EINVAL; + + len = nfc_op->addr_len > PHYTIUM_NFC_ADDR_MAX_LEN ? + PHYTIUM_NFC_ADDR_MAX_LEN : nfc_op->addr_len; + + if (len == PHYTIUM_NFC_ADDR_MAX_LEN) + return 0; + + for (i = len-1, j = PHYTIUM_NFC_ADDR_MAX_LEN - 1; i >= 0; i--, j--) { + nfc_op->addr[j] = nfc_op->addr[i]; + nfc_op->addr[i] = 0; + } + + return 0; +} + +static void phytium_nfc_parse_instructions(struct nand_chip *chip, + const struct nand_subop *subop, + struct phytium_nfc_op *nfc_op) +{ + struct nand_op_instr *instr = NULL; + bool first_cmd = true; + u32 op_id; + int i; + + /* Reset the input structure as most of its fields will be OR'ed */ + memset(nfc_op, 0, sizeof(struct phytium_nfc_op)); + + for (op_id = 0; op_id < subop->ninstrs; op_id++) { + unsigned int offset, naddrs; + const u8 *addrs; + int len; + + instr = (struct nand_op_instr *)&subop->instrs[op_id]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + if (first_cmd) { + nfc_op->cmd[0] = instr->ctx.cmd.opcode; + } else { + nfc_op->cmd[1] = instr->ctx.cmd.opcode; + nfc_op->cmd_ctrl.nfc_ctrl.dbc = 1; + } + + nfc_op->cle_ale_delay_ns = instr->delay_ns; + first_cmd = false; + nfc_op->cmd_len++; + + break; + + case NAND_OP_ADDR_INSTR: + offset = nand_subop_get_addr_start_off(subop, op_id); + naddrs = nand_subop_get_num_addr_cyc(subop, op_id); + addrs = &instr->ctx.addr.addrs[offset]; + + nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = naddrs; + + for (i = 0; i < min_t(u32, PHYTIUM_NFC_ADDR_MAX_LEN, naddrs); i++) + nfc_op->addr[i] = addrs[i]; + + nfc_op->cle_ale_delay_ns = instr->delay_ns; + + nfc_op->addr_len = naddrs; + break; + + case NAND_OP_DATA_IN_INSTR: + nfc_op->data_instr = instr; + nfc_op->data_instr_idx = op_id; + nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; + len = nand_subop_get_data_len(subop, op_id); + nfc_op->page_cnt = len; + nfc_op->data_delay_ns = instr->delay_ns; + + break; + + case NAND_OP_DATA_OUT_INSTR: + nfc_op->data_instr = instr; + nfc_op->data_instr_idx = op_id; + nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; + len = nand_subop_get_data_len(subop, op_id); + nfc_op->page_cnt = len; + nfc_op->data_delay_ns = instr->delay_ns; + break; + + case NAND_OP_WAITRDY_INSTR: + nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms; + nfc_op->rdy_delay_ns = instr->delay_ns; + break; + } + } +} + +int phytium_nfc_prepare_cmd(struct nand_chip *chip, + struct phytium_nfc_op *nfc_op, + enum dma_data_direction direction) +{ + struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + int i; + + phytium_nfc_cmd_correct(nfc_op); + phytium_nfc_addr_correct(nfc_op); + + nfc_op->cmd_ctrl.nfc_ctrl.csel = phytium_nand->selected_die; + + for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) + nfc_op->mem_addr_first[i] = (nfc->dma_phy_addr >> (8 * i)) & 0xFF; + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nfc_prepare_cmd); + +static int phytium_nfc_cmd_dump(struct phytium_nfc *nfc, + struct phytium_nfc_op *nfc_op, u8 *buf) +{ + u8 *p; + u8 str[1024] = {0}; + int i; + + sprintf(str, "Phytium NFC cmd dump:\n"); + sprintf(str, "%s cmd0:%x, cmd1:%x, ctrl:%x, page_cnt:%d\n", + str, nfc_op->cmd[0], nfc_op->cmd[1], nfc_op->cmd_ctrl.ctrl, nfc_op->page_cnt); + + p = &nfc_op->addr[0]; + sprintf(str, "%s addr:%02x %02x %02x %02x %02x\n", + str, p[0], p[1], p[2], p[3], p[4]); + + p = &nfc_op->mem_addr_first[0]; + sprintf(str, "%s mem_addr_first:%02x %02x %02x %02x %02x\n", + str, p[0], p[1], p[2], p[3], p[4]); + + for (i = 0; i < PHYTIUM_NFC_DSP_SIZE; i++) + sprintf(str, "%s %02x", str, buf[i]); + + dev_info(nfc->dev, "%s\n", str); + + return 0; +} + +int phytium_nfc_data_dump(struct phytium_nfc *nfc, u8 *buf, u32 len) +{ + u8 str[1024] = {0}; + int i; + + len = len > 512 ? 512 : len; + + sprintf(str, "Phytium NFC data dump: %d\n", len); + for (i = 0; i < len; i++) { + if (i && (i%128 == 0)) { + dev_info(nfc->dev, "next:\n%s\n", str); + memset(str, 0, 1024); + } + + if (i && (i%16 == 0)) + sprintf(str, "%s\n", str); + sprintf(str, "%s %02x", str, buf[i]); + } + + dev_info(nfc->dev, "%s\n", str); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nfc_data_dump); + +int phytium_nfc_send_cmd(struct nand_chip *chip, + struct phytium_nfc_op *nfc_op) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 value = 0; + + memset((u8 *)nfc->dsp_addr, 0, PAGE_SIZE); + memcpy((u8 *)nfc->dsp_addr, (u8 *)nfc_op, PHYTIUM_NFC_DSP_SIZE); + + phytium_nfc_cmd_dump(nfc, nfc_op, (u8 *)nfc->dsp_addr); + + if (phytium_wait_busy(nfc) != 0) { + dev_err(nfc->dev, "NFC was always busy\n"); + dev_err(nfc->dev, "NFC state: %x\n", phytium_read(nfc, NDSR)); + dev_err(nfc->dev, "NFC debug: %x\n", phytium_read(nfc, ND_DEBUG)); + return 0; + } + + reinit_completion(&nfc->complete); + + spin_lock(&nfc->spinlock); + value = nfc->dsp_phy_addr & 0xFFFFFFFF; + phytium_write(nfc, NDAR0, value); + + /* Don't modify NDAR1_DMA_RLEN & NDAR1_DMA_WLEN */ + value = phytium_read(nfc, NDAR1); + value |= NDAR1_H8((nfc->dsp_phy_addr >> 32) & 0xFF); + phytium_write(nfc, NDAR1, value); + + phytium_nfc_enable_int(nfc, NDIR_CMD_FINISH_MASK); + + value |= NDAR1_DMA_EN; + phytium_write(nfc, NDAR1, value); + spin_unlock(&nfc->spinlock); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nfc_send_cmd); + +int phytium_nfc_prepare_cmd2(struct nand_chip *chip, + struct phytium_nfc_op *nfc_op, + enum dma_data_direction direction, + u32 cmd_num) +{ + struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); + int i; + + for (i = 0; i < cmd_num; i++) { + phytium_nfc_cmd_correct(nfc_op); + phytium_nfc_addr_correct(nfc_op); + nfc_op->cmd_ctrl.nfc_ctrl.csel = phytium_nand->selected_die; + nfc_op++; + } + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nfc_prepare_cmd2); + +int phytium_nfc_send_cmd2(struct nand_chip *chip, + struct phytium_nfc_op *nfc_op, + u32 cmd_num) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 value = 0; + int i; + + memset((u8 *)nfc->dsp_addr, 0, PAGE_SIZE); + + for (i = 0; i < cmd_num; i++) { + memcpy((u8 *)nfc->dsp_addr + i*PHYTIUM_NFC_DSP_SIZE, + (u8 *)nfc_op, PHYTIUM_NFC_DSP_SIZE); + phytium_nfc_cmd_dump(nfc, nfc_op, (u8 *)nfc->dsp_addr + i*PHYTIUM_NFC_DSP_SIZE); + nfc_op++; + } + + if (phytium_wait_busy(nfc) != 0) { + dev_err(nfc->dev, "NFC was always busy\n"); + dev_err(nfc->dev, "NFC state: %x\n", phytium_read(nfc, NDSR)); + dev_err(nfc->dev, "NFC debug: %x\n", phytium_read(nfc, ND_DEBUG)); + return 0; + } + + reinit_completion(&nfc->complete); + + spin_lock(&nfc->spinlock); + value = nfc->dsp_phy_addr & 0xFFFFFFFF; + phytium_write(nfc, NDAR0, value); + + /* Don't modify NDAR1_DMA_RLEN & NDAR1_DMA_WLEN */ + value = phytium_read(nfc, NDAR1); + value |= NDAR1_H8((nfc->dsp_phy_addr >> 32) & 0xFF); + phytium_write(nfc, NDAR1, value); + + phytium_nfc_enable_int(nfc, NDIR_DMA_FINISH_MASK | NDIR_ECC_ERR_MASK); + + value |= NDAR1_DMA_EN; + phytium_write(nfc, NDAR1, value); + spin_unlock(&nfc->spinlock); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nfc_send_cmd2); + +int phytium_nfc_wait_op(struct nand_chip *chip, + u32 timeout_ms) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + int ret; + + /* Timeout is expressed in ms */ + if (!timeout_ms) + timeout_ms = IRQ_TIMEOUT; + else if (timeout_ms > 1000) + timeout_ms = 1000; + else if (timeout_ms < 100) + timeout_ms = 100; + + ret = wait_for_completion_timeout(&nfc->complete, + msecs_to_jiffies(timeout_ms)); + if (!ret) { + dev_err(nfc->dev, "Timeout waiting for RB signal\n"); + dev_err(nfc->dev, "NFC state: %x\n", phytium_read(nfc, NDSR)); + dev_err(nfc->dev, "NFC irq state: %x, irq en:%x\n", + phytium_read(nfc, NDIR), phytium_read(nfc, NDIR_MASK)); + dev_err(nfc->dev, "NFC debug: %x\n", phytium_read(nfc, ND_DEBUG)); + + complete_release(&nfc->complete); + phytium_nfc_clear_int(nfc, NDIR_ALL_INT); + return -ETIMEDOUT; + } + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nfc_wait_op); + +static int phytium_nfc_xfer_data_pio(struct phytium_nfc *nfc, + const struct nand_subop *subop, + struct phytium_nfc_op *nfc_op) +{ + const struct nand_op_instr *instr = nfc_op->data_instr; + unsigned int op_id = nfc_op->data_instr_idx; + unsigned int len = nand_subop_get_data_len(subop, op_id); + unsigned int offset = nand_subop_get_data_start_off(subop, op_id); + bool reading = (instr->type == NAND_OP_DATA_IN_INSTR); + + if (reading) { + u8 *in = instr->ctx.data.buf.in + offset; + + memcpy(in, nfc->dma_buf, len); + + nfc->dma_offset = 0; + } else { + const u8 *out = instr->ctx.data.buf.out + offset; + + memcpy(nfc->dma_buf, out, len); + } + + return 0; +} + +static int memcpy_to_reg16(struct phytium_nfc *nfc, u32 reg, u16 *buf, size_t len) +{ + int i; + u32 val = 0; + + if (!nfc || !buf || (len >= 16)) + return -EINVAL; + + for (i = 0; i < len; i++) { + val = (val << 16) + buf[i]; + if (i % 2) { + phytium_write(nfc, reg, val); + val = 0; + reg += 4; + } + } + + return 0; +} + +int phytium_nfc_default_data_interface(struct phytium_nfc *nfc) +{ + int value; + + value = phytium_read(nfc, NDCR0); + value &= (~NDCR0_IN_MODE(3)); + value |= NDCR0_IN_MODE(nfc->inter_mode); + phytium_write(nfc, NDCR0, value); + + switch (nfc->inter_mode) { + case ASYN_SDR: + if (nfc->timing_mode == ASY_MODE4) { + memcpy_to_reg16(nfc, NDTR0, timing_asy_mode4, TIMING_ASY_NUM); + phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(4)); + } else if (nfc->timing_mode == ASY_MODE3) { + memcpy_to_reg16(nfc, NDTR0, timing_asy_mode3, TIMING_ASY_NUM); + phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(5)); + } else if (nfc->timing_mode == ASY_MODE2) { + memcpy_to_reg16(nfc, NDTR0, timing_asy_mode2, TIMING_ASY_NUM); + phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(3)); + } else if (nfc->timing_mode == ASY_MODE1) { + memcpy_to_reg16(nfc, NDTR0, timing_asy_mode1, TIMING_ASY_NUM); + phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(2)); + } else { + memcpy_to_reg16(nfc, NDTR0, timing_asy_mode0, TIMING_ASY_NUM); + phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(1)); + } + phytium_write(nfc, ND_INTERVAL_TIME, 0x01); + break; + case ONFI_DDR: + if (nfc->timing_mode == SYN_MODE4) { + memcpy_to_reg16(nfc, NDTR6, timing_syn_mode4, TIMING_SYN_NUM); + phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(0x0D)); + phytium_write(nfc, ND_INTERVAL_TIME, 0x30); + } else if (nfc->timing_mode == SYN_MODE3) { + memcpy_to_reg16(nfc, NDTR6, timing_syn_mode3, TIMING_SYN_NUM); + phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(0x05)); + phytium_write(nfc, ND_INTERVAL_TIME, 0x18); + } else if (nfc->timing_mode == SYN_MODE2) { + memcpy_to_reg16(nfc, NDTR6, timing_syn_mode2, TIMING_SYN_NUM); + phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(0x08)); + phytium_write(nfc, ND_INTERVAL_TIME, 0x20); + } else if (nfc->timing_mode == SYN_MODE1) { + memcpy_to_reg16(nfc, NDTR6, timing_syn_mode1, TIMING_SYN_NUM); + phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(0x12)); + phytium_write(nfc, ND_INTERVAL_TIME, 0x40); + } else { + memcpy_to_reg16(nfc, NDTR6, timing_syn_mode0, TIMING_SYN_NUM); + phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(0x12)); + phytium_write(nfc, ND_INTERVAL_TIME, 0x40); + } + break; + case TOG_ASYN_DDR: + phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(8)); + phytium_write(nfc, ND_INTERVAL_TIME, 0xC8); + memcpy_to_reg16(nfc, NDTR13, timing_tog_ddr_mode0, TIMING_TOG_NUM); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nfc_default_data_interface); + +static int phytium_nfc_naked_waitrdy_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + int ret = 0; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + + dev_info(nfc->dev, "Phytium nand command 0x%02x 0x%02x.\n", + nfc_op.cmd[0], nfc_op.cmd[1]); + + switch (nfc_op.cmd[0]) { + case NAND_CMD_PARAM: + memset(nfc->dma_buf, 0, PAGE_SIZE); + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ_PARAM; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + if (nfc->inter_pro == NAND_ONFI) + nfc_op.page_cnt = 3 * sizeof(struct nand_onfi_params); + else if (nfc->inter_pro == NAND_JEDEC) + nfc_op.page_cnt = 3 * sizeof(struct nand_jedec_params); + if (nfc_op.page_cnt) + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + nfc->dma_offset = 0; + break; + case NAND_CMD_SET_FEATURES: + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_SET_FTR; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + if (nfc->inter_mode != ASYN_SDR) { + dev_err(nfc->dev, "Not support SET_FEATURES command!\n"); + return 0; + } + break; + case NAND_CMD_GET_FEATURES: + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_GET_FTR; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + break; + case NAND_CMD_READ0: + if (nfc_op.cmd[1] == NAND_CMD_READSTART) { /* large page */ + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + } else if (nfc_op.cmd[1] == NAND_CMD_SEQIN) { /* program page begin */ + nfc_op.cmd[0] = NAND_CMD_SEQIN; + nfc_op.cmd[1] = NAND_CMD_PAGEPROG; + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + } else { /* small page */ + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + } + break; + case NAND_CMD_RNDOUT: /* change read column */ + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_READ_COL; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + break; + case NAND_CMD_READSTART: /* large page */ + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + break; + case NAND_CMD_RNDOUTSTART: /* change read column */ + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_READ_COL; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc->dma_offset = nfc_op.addr[1]; + nfc->dma_offset = (nfc->dma_offset << 8) + nfc_op.addr[0]; + break; + case NAND_CMD_SEQIN: /* program begin */ + if (nfc_op.cmd[0] == NAND_CMD_READ0) { + nfc_op.cmd[0] = NAND_CMD_SEQIN; + nfc_op.cmd[1] = NAND_CMD_PAGEPROG; + } + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + break; + case NAND_CMD_RNDIN: /* change write column */ + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_WR_COL; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + break; + case NAND_CMD_PAGEPROG: /* program end */ + nfc_op.cmd[0] = NAND_CMD_RNDIN; + nfc_op.cmd[1] = NAND_CMD_PAGEPROG; + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + break; + default: + dev_err(nfc->dev, "Not support cmd %d.\n", nfc_op.cmd[1]); + ret = -EINVAL; + goto out; + } + + if ((nfc_op.data_instr) && (direction == DMA_TO_DEVICE)) + phytium_nfc_xfer_data_pio(nfc, subop, &nfc_op); + + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + nfc_op.rdy_timeout_ms = nfc_op.rdy_timeout_ms; + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + goto out; + + cond_delay(nfc_op.rdy_delay_ns); + + if ((nfc_op.data_instr) && (direction == DMA_FROM_DEVICE)) + phytium_nfc_xfer_data_pio(nfc, subop, &nfc_op); + +out: + return ret; +} + +static int phytium_nfc_read_id_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + u16 read_len = 0; + int ret; + u8 *buf = nfc->dma_buf; + + memset(nfc->dma_buf, 0, PAGE_SIZE); + direction = DMA_FROM_DEVICE; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + read_len = nfc_op.page_cnt; + nfc_op.page_cnt = (read_len & 0x03) ? ((read_len & 0xFFFC) + 4) : read_len; + + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ_ID; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 0; + + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + return ret; + + cond_delay(nfc_op.rdy_delay_ns); + + if (!strncmp(nfc->dma_buf, "ONFI", 4)) { + nfc->inter_pro = NAND_ONFI; + } else if (!strncmp(nfc->dma_buf, "JEDEC", 5)) { + nfc->inter_pro = NAND_JEDEC; + if (buf[5] == 1) + nfc->inter_mode = ASYN_SDR; + else if (buf[5] == 2) + nfc->inter_mode = TOG_ASYN_DDR; + else if (buf[5] == 4) + nfc->inter_mode = ASYN_SDR; + } else { + nfc->inter_pro = NAND_OTHER; + } + + dev_info(nfc->dev, "Nand protocol: %d, interface mode: %d\n", + nfc->inter_pro, nfc->inter_mode); + + phytium_nfc_xfer_data_pio(nfc, subop, &nfc_op); + + return 0; +} + +static int phytium_nfc_read_status_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + u16 read_len = 0; + u32 timeout, count = 0; + int ret = 0; + + direction = DMA_FROM_DEVICE; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + read_len = nfc_op.page_cnt; + nfc_op.page_cnt = (read_len & 0x03) ? ((read_len & 0xFFFC) + 4) : read_len; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ_STATUS; + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + +read_status_retry: + count++; + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + timeout = nfc_op.rdy_timeout_ms ? nfc_op.rdy_timeout_ms : 10; + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + goto out; + + phytium_nfc_xfer_data_pio(nfc, subop, &nfc_op); + + if (0xE0 != *(u8 *)(nfc->dma_buf)) { + dev_info(nfc->dev, "Retry to read status (%x)\n", *(u8 *)(nfc->dma_buf)); + + if (count < 5) + goto read_status_retry; + } + +out: + return ret; +} + +static int phytium_nfc_reset_cmd_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + + direction = DMA_NONE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_RESET; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + return phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); +} + +static int phytium_nfc_erase_cmd_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + direction = DMA_NONE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_ERASE; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + return phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); +} + +static int phytium_nfc_data_in_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nfc_op nfc_op; + struct nand_op_instr *instr; + unsigned int op_id; + unsigned int len; + unsigned int offset; + u8 *in = NULL; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + if (nfc_op.data_instr->type != NAND_OP_DATA_IN_INSTR) { + dev_err(nfc->dev, "Phytium nfc instrs parser failed!\n"); + return -EINVAL; + } + + instr = nfc_op.data_instr; + op_id = nfc_op.data_instr_idx; + len = nand_subop_get_data_len(subop, op_id); + offset = nand_subop_get_data_start_off(subop, op_id); + in = instr->ctx.data.buf.in + offset; + + phytium_nfc_cmd_dump(nfc, &nfc_op, (u8 *)nfc->dsp_addr); + + memcpy(in, nfc->dma_buf + nfc->dma_offset, len); + nfc->dma_offset += len; + phytium_nfc_data_dump(nfc, in, len); + + return 0; +} + +static const struct nand_op_parser phytium_nfc_op_parser = NAND_OP_PARSER( + /* Naked commands not supported, use a function for each pattern */ + NAND_OP_PARSER_PATTERN( + phytium_nfc_read_id_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_erase_cmd_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_read_status_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_reset_cmd_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_naked_waitrdy_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_naked_waitrdy_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_naked_waitrdy_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 8), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_naked_waitrdy_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_data_in_type_exec, + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)), + ); + +static int phytium_nfc_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + return nand_op_parser_exec_op(chip, &phytium_nfc_op_parser, + op, check_only); +} + +static int phytium_nfc_reset(struct phytium_nfc *nfc) +{ + u32 value; + + phytium_write(nfc, NDIR_MASK, NDIR_ALL_INT); + phytium_write(nfc, NDSR, NDIR_ALL_INT); + + phytium_write(nfc, ND_ERR_CLR, 0x0F); + phytium_write(nfc, NDFIFO_CLR, 1); + + value = phytium_read(nfc, NDCR0); + phytium_write(nfc, NDCR0, value & ~(NDCR0_ECC_EN | NDCR0_SPARE_EN)); + + return 0; +} + +static void phytium_nfc_select_chip(struct mtd_info *mtd, int die_nr) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + + dev_dbg(nfc->dev, "Phytium nand selected chip %d\n", die_nr); + + if (chip == nfc->selected_chip && die_nr == phytium_nand->selected_die) + return; + + if (die_nr < 0 || die_nr >= phytium_nand->nsels) { + nfc->selected_chip = NULL; + phytium_nand->selected_die = -1; + return; + } + + phytium_nfc_reset(nfc); + + nfc->selected_chip = chip; + phytium_nand->selected_die = die_nr; +} + +static int phytium_nand_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + oobregion->length = chip->ecc.total; + oobregion->offset = mtd->oobsize - oobregion->length; + + return 0; +} + +static int phytium_nand_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + /* + * Bootrom looks in bytes 0 & 5 for bad blocks for the + * 4KB page / 4bit BCH combination. + */ + if (mtd->writesize >= SZ_4K) + oobregion->offset = 6; + else + oobregion->offset = 2; + + oobregion->length = mtd->oobsize - chip->ecc.total - oobregion->offset; + + return 0; +} + +static const struct mtd_ooblayout_ops phytium_nand_ooblayout_ops = { + .ecc = phytium_nand_ooblayout_ecc, + .free = phytium_nand_ooblayout_free, +}; + +static void phytium_nfc_enable_hw_ecc(struct nand_chip *chip) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 ndcr0 = phytium_read(nfc, NDCR0); + + if (!(ndcr0 & NDCR0_ECC_EN)) + phytium_write(nfc, NDCR0, ndcr0 | NDCR0_ECC_EN); +} + +static void phytium_nfc_disable_hw_ecc(struct nand_chip *chip) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 ndcr0 = phytium_read(nfc, NDCR0); + + if (ndcr0 & NDCR0_ECC_EN) + phytium_write(nfc, NDCR0, ndcr0 & ~NDCR0_ECC_EN); +} + +static void nfc_irq_callback(struct work_struct *work) +{ + struct phytium_nfc *nfc = container_of(work, struct phytium_nfc, work); + + if (!nfc) + return; + + if (nfc_irq_complete) + complete_all(&nfc->complete); + + nfc_irq_st = 0; + nfc_irq_en = 0; + nfc_irq_complete = 0; +} + +static irqreturn_t phytium_nfc_isr(int irq, void *dev_id) +{ + struct phytium_nfc *nfc = dev_id; + u32 st = phytium_read(nfc, NDIR); + u32 ien = (~phytium_read(nfc, NDIR_MASK)) & NDIR_ALL_INT; + + if (!(st & ien)) + return IRQ_NONE; + + nfc_irq_st = st; + nfc_irq_en = ien; + phytium_nfc_disable_int(nfc, st & NDIR_ALL_INT); + phytium_write(nfc, 0xFD0, 0); + + if (st & (NDIR_CMD_FINISH | NDIR_DMA_FINISH)) { + if (st & NDIR_ECC_ERR) + nfc_ecc_err = 1; + phytium_write(nfc, NDIR, st); + nfc_irq_complete = 1; + } else if (st & (NDIR_FIFO_TIMEOUT | NDIR_PGFINISH)) { + phytium_write(nfc, NDIR, st); + phytium_nfc_enable_int(nfc, (~st) & (NDIR_DMA_FINISH_MASK | + NDIR_PGFINISH_MASK | + NDIR_FIFO_TIMEOUT_MASK | + NDIR_CMD_FINISH_MASK)); + nfc_irq_complete = 0; + } else if (st & NDIR_ECC_ERR) { + phytium_write(nfc, ND_ERR_CLR, 0x08); + phytium_write(nfc, NDIR, st); + phytium_write(nfc, NDFIFO_CLR, 0x01); + nfc_irq_complete = 1; + nfc_ecc_errover = 1; + } else { + phytium_write(nfc, NDIR, st); + nfc_irq_complete = 1; + } + + schedule_work(&nfc->work); + + return IRQ_HANDLED; +} + +static int phytium_nfc_hw_ecc_correct(struct nand_chip *chip, + char *buf, int len) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 i, j, value, tmp; + int stat = 0; + + if (!buf) + return -EINVAL; + + for (i = 0; i < chip->ecc.steps; i++) { + for (j = 0; j < 2; j++) { + value = phytium_read(nfc, 0xB8 + 4 * (2 * i + j)); + dev_info(nfc->dev, "ECC_FLAG: offset:%x value:0x%08x\n", + 0xB8 + 4 * (2 * i + j), value); + + tmp = value & 0xFFFF; + if (tmp && (tmp <= 4096)) { + tmp--; + stat++; + dev_info(nfc->dev, "ECC_CORRECT %x %02x\n", + chip->ecc.size * i + (tmp >> 3), + buf[chip->ecc.size * i + (tmp >> 3)]); + dev_info(nfc->dev, "ECC_CORRECT xor %x %02x\n", + 0x01 << (tmp % 8), buf[chip->ecc.size * i + (tmp >> 3)]); + } else if (tmp > 4096) { + dev_info(nfc->dev, "ECC_CORRECT offset > 4096!\n"); + } + + tmp = (value >> 16) & 0xFFFF; + if (tmp && (tmp <= 4096)) { + tmp--; + stat++; + dev_info(nfc->dev, "ECC_CORRECT %x %02x\n", + chip->ecc.size * i + (tmp >> 3), + buf[chip->ecc.size * i + (tmp >> 3)]); + dev_info(nfc->dev, "ECC_CORRECT xor %x %02x\n", + chip->ecc.size * i + (tmp >> 3), + buf[chip->ecc.size * i + (tmp >> 3)]); + } else if (tmp > 4096) { + dev_info(nfc->dev, "ECC_CORRECT offset > 4096!\n"); + } + } + } + + return stat; +} + +static int phytium_nand_page_read(struct mtd_info *mtd, struct nand_chip *chip, + u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + int ret = 0; + + memset(&nfc_op, 0, sizeof(nfc_op)); + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); + + memset(nfc->dma_buf, 0x0, mtd->writesize + mtd->oobsize); + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op.cmd[0] = NAND_CMD_READ0; + nfc_op.cmd[1] = NAND_CMD_READSTART; + nfc_op.cmd_len = 2; + nfc_op.cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op.rdy_timeout_ms = PSEC_TO_MSEC(sdr->tR_max); + nfc_op.rdy_delay_ns = PSEC_TO_NSEC(sdr->tRR_min); + + nfc_op.cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op.addr[2] = page; + nfc_op.addr[3] = page >> 8; + nfc_op.addr[4] = page >> 16; + nfc_op.addr_len = 5; + nfc_op.cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + + nfc_op.page_cnt = mtd->writesize; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op.cmd_ctrl.nfc_ctrl.ecc_en = 0; + + /* For data read/program */ + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + return ret; + + if ((direction == DMA_FROM_DEVICE) && buf) + memcpy(buf, nfc->dma_buf, mtd->writesize); + + return ret; +} + +static int phytium_nand_oob_read(struct mtd_info *mtd, struct nand_chip *chip, + u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + int ret = 0; + + memset(&nfc_op, 0, sizeof(nfc_op)); + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); + + memset(nfc->dma_buf, 0x00, mtd->writesize + mtd->oobsize); + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op.cmd[0] = NAND_CMD_READ0; + nfc_op.cmd[1] = NAND_CMD_READSTART; + nfc_op.cmd_len = 2; + nfc_op.cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op.rdy_timeout_ms = PSEC_TO_MSEC(sdr->tR_max); + nfc_op.rdy_delay_ns = PSEC_TO_NSEC(sdr->tRR_min); + + nfc_op.cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op.addr[2] = page; + nfc_op.addr[3] = page >> 8; + nfc_op.addr[4] = page >> 16; + nfc_op.addr_len = 5; + nfc_op.cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + + nfc_op.page_cnt = oob_len; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op.cmd_ctrl.nfc_ctrl.ecc_en = 0; + nfc_op.addr[0] = mtd->writesize & 0xFF; + nfc_op.addr[1] = (mtd->writesize >> 8) & 0xFF; + + /* For data read/program */ + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + return ret; + + cond_delay(nfc_op.rdy_delay_ns); + + if (direction == DMA_FROM_DEVICE) + memcpy(oob_buf, nfc->dma_buf, oob_len); + + return ret; +} + +static int phytium_nand_get_ecc_total(struct mtd_info *mtd, + struct nand_ecc_ctrl *ecc) +{ + int ecc_total = 0; + + switch (mtd->writesize) { + case 0x200: + if (ecc->strength == 4) + ecc_total = 7; + else if (ecc->strength == 2) + ecc_total = 4; + break; + case 0x800: + if (ecc->strength == 4) + ecc_total = 0x1a; + else if (ecc->strength == 2) + ecc_total = 0xd; + break; + case 0x1000: + if (ecc->strength == 4) + ecc_total = 0x34; + else if (ecc->strength == 2) + ecc_total = 0x1a; + break; + case 0x2000: + if (ecc->strength == 4) + ecc_total = 0x68; + else if (ecc->strength == 2) + ecc_total = 0x34; + break; + case 0x4000: + if (ecc->strength == 4) + ecc_total = 0xd0; + else if (ecc->strength == 2) + ecc_total = 0x68; + break; + default: + break; + } + + return ecc_total; +} + +static int phytium_nand_page_read_hwecc(struct mtd_info *mtd, struct nand_chip *chip, + u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op *nfc_op = NULL; + enum dma_data_direction direction; + u32 ecc_offset; + int max_bitflips = 0; + u32 nfc_state = 0; + int ret = 0; + int i; + + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); + + ecc_offset = phytium_nand->ecc.offset; + memset(nfc->dma_buf, 0x00, mtd->writesize + mtd->oobsize); + nfc_op = kzalloc(2 * sizeof(struct phytium_nfc_op), GFP_KERNEL); + if (!nfc_op) { + dev_err(nfc->dev, "Can't malloc space for phytium_nfc_op\n"); + return 0; + } + + nfc_op->cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op->rdy_timeout_ms = PSEC_TO_MSEC(sdr->tR_max); + nfc_op->rdy_delay_ns = PSEC_TO_NSEC(sdr->tRR_min); + + direction = DMA_FROM_DEVICE; + nfc_op->cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op->cmd[0] = NAND_CMD_READ0; + nfc_op->cmd[1] = NAND_CMD_READSTART; + nfc_op->cmd_len = 2; + nfc_op->addr_len = 5; + nfc_op->cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op->addr[2] = page; + nfc_op->addr[3] = page >> 8; + nfc_op->addr[4] = page >> 16; + nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op->cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc_op->page_cnt = mtd->writesize; + nfc_op->cmd_ctrl.nfc_ctrl.nc = 1; + for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) + nfc_op->mem_addr_first[i] = (nfc->dma_phy_addr >> (8 * i)) & 0xFF; + + nfc_op++; + memcpy(nfc_op, nfc_op - 1, sizeof(struct phytium_nfc_op)); + nfc_op->cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_READ_COL; + nfc_op->cmd[0] = NAND_CMD_RNDOUT; + nfc_op->cmd[1] = NAND_CMD_RNDOUTSTART; + memset(&nfc_op->addr, 0, PHYTIUM_NFC_ADDR_MAX_LEN); + nfc_op->addr_len = 2; + nfc_op->addr[0] = mtd->writesize + phytium_nand->ecc.offset; + nfc_op->addr[1] = (mtd->writesize + phytium_nand->ecc.offset) >> 8; + nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = 0x02; + nfc_op->page_cnt = phytium_nand_get_ecc_total(mtd, &chip->ecc); + nfc_op->cmd_ctrl.nfc_ctrl.nc = 0; + nfc_op->cmd_ctrl.nfc_ctrl.auto_rs = 0; + nfc_op->cmd_ctrl.nfc_ctrl.ecc_en = 1; + for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) + nfc_op->mem_addr_first[i] = + ((nfc->dma_phy_addr + mtd->writesize) >> (8 * i)) & 0xFF; + + nfc_op--; + phytium_nfc_prepare_cmd2(chip, nfc_op, direction, 2); + phytium_nfc_send_cmd2(chip, nfc_op, 2); + cond_delay(nfc_op->cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op->rdy_timeout_ms); + if (ret) + return ret; + + cond_delay(nfc_op->rdy_delay_ns); + + if ((direction == DMA_FROM_DEVICE) && buf) { + nfc_state = phytium_read(nfc, NDSR); + if ((nfc_state & NDSR_ECC_ERROVER) || (nfc_ecc_errover == 1)) { + for (i = 0; i < mtd->writesize/16; i++) { + if (0xFF != *(u8 *)(nfc->dma_buf + i)) { + dev_info(nfc->dev, "NFC: NDSR_ECC_ERROVER %x\n", page); + mtd->ecc_stats.failed++; + mtd->ecc_stats.corrected += max_bitflips; + break; + } + } + } else if (nfc_state & NDSR_ECC_ERR) { + max_bitflips = phytium_nfc_hw_ecc_correct(chip, + nfc->dma_buf, mtd->writesize); + mtd->ecc_stats.corrected += max_bitflips; + dev_info(nfc->dev, "NFC: NDSR_ECC_ERR page:%x, bit:%d\n", + page, max_bitflips); + } + + memcpy(buf, nfc->dma_buf, mtd->writesize); + } + + return max_bitflips; +} + +static int phytium_nand_page_write(struct mtd_info *mtd, struct nand_chip *chip, + const u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + int ret = 0; + + memset(&nfc_op, 0, sizeof(nfc_op)); + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); + + memcpy(nfc->dma_buf, buf, mtd->writesize); + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op.cmd[0] = NAND_CMD_SEQIN; + nfc_op.cmd[1] = NAND_CMD_PAGEPROG; + nfc_op.cmd_len = 2; + nfc_op.addr_len = 5; + nfc_op.cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op.rdy_timeout_ms = PSEC_TO_MSEC(sdr->tPROG_max); + nfc_op.rdy_delay_ns = 0; + + nfc_op.cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op.addr[2] = page; + nfc_op.addr[3] = page >> 8; + nfc_op.addr[4] = page >> 16; + nfc_op.cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc_op.page_cnt = mtd->writesize; + + /* For data read/program */ + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + goto out; + + cond_delay(nfc_op.rdy_delay_ns); +out: + return ret; +} + +static int phytium_nand_oob_write(struct mtd_info *mtd, struct nand_chip *chip, + u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + int ret = 0; + + memset(&nfc_op, 0, sizeof(nfc_op)); + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); + + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op.cmd[0] = NAND_CMD_SEQIN; + nfc_op.cmd[1] = NAND_CMD_PAGEPROG; + nfc_op.cmd_len = 2; + nfc_op.addr_len = 5; + nfc_op.cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op.rdy_timeout_ms = PSEC_TO_MSEC(sdr->tPROG_max); + nfc_op.rdy_delay_ns = 0; + + nfc_op.cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op.addr[2] = page; + nfc_op.addr[3] = page >> 8; + nfc_op.addr[4] = page >> 16; + nfc_op.cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + + nfc_op.page_cnt = oob_len; + nfc_op.cmd_ctrl.nfc_ctrl.ecc_en = 0; + nfc_op.addr[0] = mtd->writesize & 0xFF; + nfc_op.addr[1] = (mtd->writesize >> 8) & 0xFF; + nfc_op.cmd_ctrl.nfc_ctrl.ecc_en = 0; + memcpy(nfc->dma_buf, oob_buf, mtd->oobsize); + + /* For data read/program */ + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + goto out; + + cond_delay(nfc_op.rdy_delay_ns); +out: + return ret; +} + +static int phytium_nand_page_write_hwecc(struct mtd_info *mtd, struct nand_chip *chip, + const u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op *nfc_op; + enum dma_data_direction direction; + u32 ecc_offset; + int ret = 0; + int i; + + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); + ecc_offset = phytium_nand->ecc.offset; + + nfc_op = kzalloc(2 * sizeof(struct phytium_nfc_op), GFP_KERNEL); + if (!nfc_op) { + dev_err(nfc->dev, "Can't malloc space for phytium_nfc_op\n"); + return 0; + } + + nfc_op->cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op->rdy_timeout_ms = PSEC_TO_MSEC(sdr->tR_max); + nfc_op->rdy_delay_ns = PSEC_TO_NSEC(sdr->tRR_min); + + direction = DMA_TO_DEVICE; + nfc_op->cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_ROW_ADDR; + nfc_op->cmd[0] = NAND_CMD_SEQIN; + nfc_op->cmd_len = 1; + nfc_op->addr_len = 5; + nfc_op->cmd_ctrl.nfc_ctrl.dbc = 0; + nfc_op->addr[2] = page; + nfc_op->addr[3] = page >> 8; + nfc_op->addr[4] = page >> 16; + nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op->cmd_ctrl.nfc_ctrl.auto_rs = 0; + nfc_op->cmd_ctrl.nfc_ctrl.nc = 1; + for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) + nfc_op->mem_addr_first[i] = (nfc->dma_phy_addr >> (8 * i)) & 0xFF; + + /* The first dsp must have data to transfer */ + memcpy(nfc->dma_buf, buf, mtd->writesize); + nfc_op->page_cnt = mtd->writesize; + nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; + + nfc_op++; + memcpy(nfc_op, nfc_op - 1, sizeof(struct phytium_nfc_op)); + nfc_op->cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op->cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op->cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc_op->cmd[0] = NAND_CMD_RNDIN; + nfc_op->cmd[1] = NAND_CMD_PAGEPROG; + memset(&nfc_op->addr, 0, PHYTIUM_NFC_ADDR_MAX_LEN); + nfc_op->addr_len = 2; + nfc_op->cmd_len = 2; + nfc_op->addr[0] = mtd->writesize + ecc_offset; + nfc_op->addr[1] = (mtd->writesize + ecc_offset) >> 8; + nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = 0x02; + nfc_op->page_cnt = phytium_nand_get_ecc_total(mtd, &chip->ecc); + nfc_op->cmd_ctrl.nfc_ctrl.nc = 0; + nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op->cmd_ctrl.nfc_ctrl.ecc_en = 1; + for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) + nfc_op->mem_addr_first[i] = + ((nfc->dma_phy_addr + mtd->writesize + ecc_offset) >> (8 * i)) & 0xFF; + + /* when enable ECC, must offer ecc_offset of oob, but no oobdata */ + nfc_op--; + phytium_nfc_prepare_cmd2(chip, nfc_op, direction, 2); + phytium_nfc_send_cmd2(chip, nfc_op, 2); + cond_delay(nfc_op->cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op->rdy_timeout_ms); + if (ret) + goto out; + + cond_delay(nfc_op->rdy_delay_ns); +out: + return ret; +} + +static int phytium_nfc_hw_ecc_bch_read_page_raw(struct mtd_info *mtd, + struct nand_chip *chip, + u8 *buf, int oob_required, + int page) +{ + u32 oob_len = oob_required ? mtd->oobsize : 0; + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + int ret; + + ret = phytium_nand_page_read(mtd, chip, buf, NULL, 0, page, true); + if (oob_required) + ret = phytium_nand_oob_read(mtd, chip, NULL, chip->oob_poi, + oob_len, page, true); + + phytium_nfc_data_dump(nfc, buf, mtd->writesize); + + return ret; +} + +static int phytium_nfc_hw_ecc_bch_read_oob_raw(struct mtd_info *mtd, + struct nand_chip *chip, int page) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + int ret; + + /* Invalidate page cache */ + chip->pagebuf = -1; + memset(chip->oob_poi, 0xFF, mtd->oobsize); + + ret = phytium_nand_oob_read(mtd, chip, NULL, chip->oob_poi, + mtd->oobsize, page, true); + + phytium_nfc_data_dump(nfc, chip->oob_poi, mtd->oobsize); + + return ret; +} + +static int phytium_nfc_hw_ecc_bch_read_page(struct mtd_info *mtd, + struct nand_chip *chip, + u8 *buf, int oob_required, + int page) +{ + int ret; + u32 oob_len = oob_required ? mtd->oobsize : 0; + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + + phytium_nand = to_phytium_nand(chip); + + phytium_nfc_enable_hw_ecc(chip); + ret = phytium_nand_page_read_hwecc(mtd, chip, buf, NULL, + 0, page, true); + phytium_nfc_disable_hw_ecc(chip); + + if (oob_required) { + oob_len = mtd->oobsize; + ret = phytium_nand_oob_read(mtd, chip, NULL, chip->oob_poi, + oob_len, page, true); + } + + phytium_nfc_data_dump(nfc, buf, mtd->writesize); + + return ret; +} + +static int phytium_nfc_hw_ecc_bch_read_oob(struct mtd_info *mtd, + struct nand_chip *chip, + int page) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 oob_len = mtd->oobsize; + int ret; + + ret = phytium_nand_oob_read(mtd, chip, NULL, chip->oob_poi, + oob_len, page, true); + + phytium_nfc_data_dump(nfc, chip->oob_poi, oob_len); + + return ret; +} + +static int phytium_nfc_hw_ecc_bch_write_page_raw(struct mtd_info *mtd, + struct nand_chip *chip, + const u8 *buf, + int oob_required, int page) +{ + void *oob_buf = oob_required ? chip->oob_poi : NULL; + + if (oob_required) + phytium_nand_oob_write(mtd, chip, NULL, oob_buf, + mtd->oobsize, page, false); + + return phytium_nand_page_write(mtd, chip, buf, NULL, + 0, page, false); +} + +static int phytium_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd, + struct nand_chip *chip, + const u8 *buf, + int oob_required, int page) +{ + int ret; + void *oob_buf = oob_required ? chip->oob_poi : NULL; + u32 oob_len; + + if (oob_required) { + oob_len = mtd->oobsize; + phytium_nand_oob_write(mtd, chip, NULL, oob_buf, + oob_len, page, false); + } + + phytium_nfc_enable_hw_ecc(chip); + ret = phytium_nand_page_write_hwecc(mtd, chip, buf, NULL, + 0, page, false); + phytium_nfc_disable_hw_ecc(chip); + + return ret; +} + +static int phytium_nfc_hw_ecc_bch_write_oob_raw(struct mtd_info *mtd, + struct nand_chip *chip, int page) +{ + return phytium_nand_oob_write(mtd, chip, NULL, chip->oob_poi, + mtd->oobsize, page, false); +} + +static int phytium_nfc_hw_ecc_bch_write_oob(struct mtd_info *mtd, + struct nand_chip *chip, int page) +{ + struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); + u32 oob_len = mtd->oobsize - phytium_nand->ecc.length; + + return phytium_nand_oob_write(mtd, chip, NULL, chip->oob_poi, + oob_len, page, false); +} + +static int phytium_nand_hw_ecc_ctrl_init(struct mtd_info *mtd, + struct nand_ecc_ctrl *ecc) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if ((mtd->writesize + mtd->oobsize > MAX_CHUNK_SIZE)) + return -ENOTSUPP; + + chip->ecc.algo = NAND_ECC_BCH; + ecc->read_page_raw = phytium_nfc_hw_ecc_bch_read_page_raw; + ecc->read_page = phytium_nfc_hw_ecc_bch_read_page; + ecc->read_oob_raw = phytium_nfc_hw_ecc_bch_read_oob_raw; + ecc->read_oob = phytium_nfc_hw_ecc_bch_read_oob; + ecc->write_page_raw = phytium_nfc_hw_ecc_bch_write_page_raw; + ecc->write_page = phytium_nfc_hw_ecc_bch_write_page; + ecc->write_oob_raw = phytium_nfc_hw_ecc_bch_write_oob_raw; + ecc->write_oob = phytium_nfc_hw_ecc_bch_write_oob; + + return 0; +} + +static int phytium_nand_ecc_init(struct mtd_info *mtd, + struct nand_ecc_ctrl *ecc) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + int ret = 0; + + if (ecc->mode != NAND_ECC_NONE && (!ecc->size || !ecc->strength)) { + if (chip->ecc_step_ds && chip->ecc_strength_ds) { + ecc->size = chip->ecc_step_ds; + ecc->strength = chip->ecc_strength_ds; + } else { + ecc->size = 512; + ecc->strength = 1; + } + } + + mtd_set_ooblayout(mtd, &phytium_nand_ooblayout_ops); + + switch (ecc->mode) { + case NAND_ECC_HW: + ret = phytium_nand_hw_ecc_ctrl_init(mtd, ecc); + break; + case NAND_ECC_NONE: + ecc->read_page_raw = phytium_nfc_hw_ecc_bch_read_page_raw; + ecc->read_oob_raw = phytium_nfc_hw_ecc_bch_read_oob; + ecc->write_page_raw = phytium_nfc_hw_ecc_bch_write_page_raw; + ecc->write_oob_raw = phytium_nfc_hw_ecc_bch_write_oob_raw; + ecc->read_page = ecc->read_page_raw; + ecc->read_oob = ecc->read_oob_raw; + ecc->write_page = ecc->write_page_raw; + ecc->write_oob = ecc->write_oob_raw; + break; + case NAND_ECC_SOFT: + case NAND_ECC_ON_DIE: + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static u8 bbt_pattern[] = {'P', 'H', 'Y', 'b', 't', '0' }; +static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'Y', 'H', 'P' }; + +static struct nand_bbt_descr bbt_main_descr = { + .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | + NAND_BBT_2BIT | NAND_BBT_VERSION, + .offs = 8, + .len = 6, + .veroffs = 14, + .maxblocks = 8, /* Last 8 blocks in each chip */ + .pattern = bbt_pattern +}; + +static struct nand_bbt_descr bbt_mirror_descr = { + .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | + NAND_BBT_2BIT | NAND_BBT_VERSION, + .offs = 8, + .len = 6, + .veroffs = 14, + .maxblocks = 8, /* Last 8 blocks in each chip */ + .pattern = bbt_mirror_pattern +}; + +static int phytium_nand_attach_chip(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + int ret = 0; + + if (nfc->caps->flash_bbt) + chip->bbt_options |= NAND_BBT_USE_FLASH; + + if (chip->bbt_options & NAND_BBT_USE_FLASH) { + /* + * We'll use a bad block table stored in-flash and don't + * allow writing the bad block marker to the flash. + */ + chip->bbt_options |= NAND_BBT_NO_OOB_BBM; + chip->bbt_td = &bbt_main_descr; + chip->bbt_md = &bbt_mirror_descr; + } + + if (chip->options & NAND_BUSWIDTH_16) + phytium_nand->ndcr |= NDCR0_WIDTH; + + /* + * On small page NANDs, only one cycle is needed to pass the + * column address. + */ + if (mtd->writesize <= 512) + phytium_nand->addr_cyc = 1; + else + phytium_nand->addr_cyc = 2; + + /* + * Now add the number of cycles needed to pass the row + * address. + * + * Addressing a chip using CS 2 or 3 should also need the third row + * cycle but due to inconsistance in the documentation and lack of + * hardware to test this situation, this case is not supported. + */ + if (chip->options & NAND_ROW_ADDR_3) + phytium_nand->addr_cyc += 3; + else + phytium_nand->addr_cyc += 2; + + if (nfc->caps) { + if (chip->ecc.mode == NAND_ECC_HW) { + chip->ecc.size = nfc->caps->ecc_step_size; + chip->ecc.strength = nfc->caps->ecc_strength; + chip->ecc.bytes = 7; + } else { + chip->ecc.size = 512; + chip->ecc.strength = 1; + chip->ecc.bytes = 0; + } + chip->ecc.mode = NAND_ECC_HW; + } + + if (chip->ecc.strength == 0x04) + phytium_nand->ndcr |= NDCR0_ECC_STREN(4); + else if (chip->ecc.strength == 0x02) + phytium_nand->ndcr |= NDCR0_ECC_STREN(2); + else + phytium_nand->ndcr |= NDCR0_ECC_STREN(0); + + ret = phytium_nand_ecc_init(mtd, &chip->ecc); + if (ret) { + dev_err(nfc->dev, "ECC init failed: %d\n", ret); + goto out; + } + + /* + * Subpage write not available with hardware ECC, prohibit also + * subpage read as in userspace subpage access would still be + * allowed and subpage write, if used, would lead to numerous + * uncorrectable ECC errors. + */ + if (chip->ecc.mode == NAND_ECC_HW) + chip->options |= NAND_NO_SUBPAGE_WRITE; + + /* + * We keep the MTD name unchanged to avoid breaking platforms + * where the MTD cmdline parser is used and the bootloader + * has not been updated to use the new naming scheme. + */ + if (nfc->caps->legacy_of_bindings) + mtd->name = "x100_nand-0"; + +out: + return ret; +} + +static const struct nand_controller_ops phytium_nand_controller_ops = { + .attach_chip = phytium_nand_attach_chip, +}; + +static void phytium_nand_chips_cleanup(struct phytium_nfc *nfc) +{ + struct phytium_nand_chip *entry, *temp; + + list_for_each_entry_safe(entry, temp, &nfc->chips, node) { + nand_release(nand_to_mtd(&entry->chip)); + list_del(&entry->node); + } +} + +static int phytium_nfc_init_dma(struct phytium_nfc *nfc) +{ + int ret; + + ret = dma_set_mask_and_coherent(nfc->dev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + nfc->dsp_addr = dma_alloc_coherent(nfc->dev, PAGE_SIZE, + &nfc->dsp_phy_addr, GFP_KERNEL | GFP_DMA); + if (!nfc->dsp_addr) + return -ENOMEM; + + nfc->dma_buf = dma_alloc_coherent(nfc->dev, MAX_CHUNK_SIZE, + &nfc->dma_phy_addr, GFP_KERNEL | GFP_DMA); + if (!nfc->dma_buf) + return -ENOMEM; + + dev_info(nfc->dev, "NFC address dsp_phy_addr:%llx, dma_phy_addr:%llx\n", + nfc->dsp_phy_addr, nfc->dma_phy_addr); + + return 0; +} + +int phytium_nfc_init(struct phytium_nfc *nfc) +{ + u32 value; + + nfc->inter_mode = ASYN_SDR; + nfc->timing_mode = ASY_MODE0; + + phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(1)); + phytium_write(nfc, ND_INTERVAL_TIME, 1); + phytium_write(nfc, NDFIFO_LEVEL0, 4); + phytium_write(nfc, NDFIFO_LEVEL1, 4); + phytium_write(nfc, NDFIFO_CLR, 1); + phytium_write(nfc, ND_ERR_CLR, 1); + + /* Configure the DMA */ + phytium_nfc_init_dma(nfc); + + phytium_write(nfc, NDCR0, + NDCR0_IN_MODE(nfc->inter_mode) | NDCR0_ECC_STREN(4)); + + phytium_nfc_reset(nfc); + + value = phytium_read(nfc, NDCR0); + phytium_write(nfc, NDCR0, value | NDCR0_EN); + + nfc_ecc_errover = 0; + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nfc_init); + +static int phytium_nfc_setup_data_interface(struct mtd_info *mtd, int chipnr, + const struct nand_data_interface + *conf) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + unsigned int period_ns = 2; + const struct nand_sdr_timings *sdr; + struct phytium_nfc_timings nfc_tmg; + int read_delay; + + sdr = nand_get_sdr_timings(conf); + if (IS_ERR(sdr)) + return PTR_ERR(sdr); + + nfc_tmg.tRP = TO_CYCLES(DIV_ROUND_UP(sdr->tRC_min, 2), period_ns) - 1; + nfc_tmg.tRH = nfc_tmg.tRP; + nfc_tmg.tWP = TO_CYCLES(DIV_ROUND_UP(sdr->tWC_min, 2), period_ns) - 1; + nfc_tmg.tWH = nfc_tmg.tWP; + nfc_tmg.tCS = TO_CYCLES(sdr->tCS_min, period_ns); + nfc_tmg.tCH = TO_CYCLES(sdr->tCH_min, period_ns) - 1; + nfc_tmg.tADL = TO_CYCLES(sdr->tADL_min, period_ns); + dev_info(nfc->dev, "[nfc_tmg]tRP: %d, tRH:%d, tWP:%d tWH:%d\n", + nfc_tmg.tRP, nfc_tmg.tRH, nfc_tmg.tWP, nfc_tmg.tWH); + dev_info(nfc->dev, "[nfc_tmg]tCS: %d, tCH:%d, tADL:%d\n", + nfc_tmg.tCS, nfc_tmg.tCH, nfc_tmg.tADL); + + read_delay = sdr->tRC_min >= 30000 ? + MIN_RD_DEL_CNT : MIN_RD_DEL_CNT + nfc_tmg.tRH; + + nfc_tmg.tAR = TO_CYCLES(sdr->tAR_min, period_ns); + nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min), + period_ns) - 2, + nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min), + period_ns); + dev_info(nfc->dev, "[nfc_tmg]tAR: %d, tWHR:%d, tRHW:%d\n", + nfc_tmg.tAR, nfc_tmg.tWHR, nfc_tmg.tRHW); + + nfc_tmg.tR = TO_CYCLES(sdr->tWB_max, period_ns); + + if (chipnr < 0) + return 0; + + if (nfc_tmg.tWP > 0x10) + nfc->timing_mode = ASY_MODE1; + else if (nfc_tmg.tWP < 0x0D) + nfc->timing_mode = ASY_MODE3; + + if (nfc->inter_mode == ONFI_DDR) + nfc->timing_mode = SYN_MODE3; + + phytium_nfc_default_data_interface(nfc); + + return 0; +} + +static int phytium_nand_chip_init(struct phytium_nfc *nfc) +{ + struct device *dev = nfc->dev; + struct phytium_nand_chip *phytium_nand; + struct mtd_info *mtd; + struct nand_chip *chip; + int ret; + + /* Alloc the nand chip structure */ + phytium_nand = devm_kzalloc(dev, sizeof(*phytium_nand), GFP_KERNEL); + if (!phytium_nand) + return -ENOMEM; + + phytium_nand->nsels = 1; + phytium_nand->selected_die = -1; + + chip = &phytium_nand->chip; + chip->controller = &nfc->controller; + chip->exec_op = phytium_nfc_exec_op; + chip->select_chip = phytium_nfc_select_chip; + chip->setup_data_interface = phytium_nfc_setup_data_interface; + phytium_nfc_default_data_interface(nfc); + + mtd = nand_to_mtd(chip); + mtd->dev.parent = dev; + mtd->owner = THIS_MODULE; + + /* + * Default to HW ECC engine mode. If the nand-ecc-mode property is given + * in the DT node, this entry will be overwritten in nand_scan_ident(). + */ + chip->ecc.mode = NAND_ECC_HW; + + chip->options |= NAND_BUSWIDTH_AUTO; + chip->options |= NAND_SKIP_BBTSCAN; + chip->bbt_options |= NAND_BBT_NO_OOB; + + ret = nand_scan(mtd, phytium_nand->nsels); + if (ret) { + dev_err(dev, "could not scan the nand chip\n"); + goto out; + } + + ret = mtd_device_register(mtd, nfc->caps->parts, nfc->caps->nr_parts - 1); + if (ret) { + dev_err(dev, "failed to register mtd device: %d\n", ret); + nand_release(mtd); + return ret; + } + + phytium_nand->ecc.length = phytium_nand_get_ecc_total(mtd, &chip->ecc); + phytium_nand->ecc.offset = mtd->oobsize - phytium_nand->ecc.length; + chip->ecc.total = phytium_nand_get_ecc_total(mtd, &chip->ecc); + + mtd_ooblayout_ecc(mtd, 0, &phytium_nand->ecc); + + dev_info(dev, "ooblayout ecc offset: %x, length: %x\n", + phytium_nand->ecc.offset, phytium_nand->ecc.length); + +out: + list_add_tail(&phytium_nand->node, &nfc->chips); + return 0; +} + +int phytium_nand_init(struct phytium_nfc *nfc) +{ + int ret; + + nand_controller_init(&nfc->controller); + nfc->controller.ops = &phytium_nand_controller_ops; + INIT_LIST_HEAD(&nfc->chips); + + init_completion(&nfc->complete); + + /* Init the controller and then probe the chips */ + ret = phytium_nfc_init(nfc); + if (ret) + goto out; + + ret = devm_request_irq(nfc->dev, nfc->irq, phytium_nfc_isr, + IRQF_SHARED, "phytium-nfc", nfc); + if (ret) + goto out; + + INIT_WORK(&nfc->work, nfc_irq_callback); + + ret = phytium_nand_chip_init(nfc); + if (ret) + goto out; + + spin_lock_init(&nfc->spinlock); + +out: + return ret; +} +EXPORT_SYMBOL_GPL(phytium_nand_init); + +int phytium_nand_remove(struct phytium_nfc *nfc) +{ + phytium_nand_chips_cleanup(nfc); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nand_remove); + +static int phytium_nfc_wait_ndrun(struct nand_chip *chip) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + int ret = 0; + u32 val; + + ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val, + (val & NDSR_RB) == 0, + 0, 100 * 1000); + if (ret) { + dev_err(nfc->dev, "Timeout on NAND controller run mode\n"); + ret = -EAGAIN; + } + + return ret; +} + +int phytium_nand_prepare(struct phytium_nfc *nfc) +{ + struct phytium_nand_chip *chip = NULL; + + list_for_each_entry(chip, &nfc->chips, node) + phytium_nfc_wait_ndrun(&chip->chip); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nand_prepare); + +int phytium_nand_resume(struct phytium_nfc *nfc) +{ + nfc->selected_chip = NULL; + phytium_nfc_init(nfc); + phytium_nfc_default_data_interface(nfc); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nand_resume); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Phytium NAND controller driver"); +MODULE_AUTHOR("Zhu Mingshuai "); diff --git a/drivers/mtd/nand/raw/phytium_nand.h b/drivers/mtd/nand/raw/phytium_nand.h new file mode 100644 index 000000000000..8a1c6d682c65 --- /dev/null +++ b/drivers/mtd/nand/raw/phytium_nand.h @@ -0,0 +1,441 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium NAND flash controller driver + * + * Copyright (C) 2020-2021, Phytium Technology, Co., Ltd. + */ +#ifndef PHYTIUM_NAND_H +#define PHYTIUM_NAND_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* NFC does not support transfers of larger chunks at a time */ +#define MAX_PAGE_NUM 16 +#define MAX_CHUNK_SIZE ((1024 + 76) * 16) + +#define POLL_PERIOD 0 +#define POLL_TIMEOUT 100000 +/* Interrupt maximum wait period in ms */ +#define IRQ_TIMEOUT 1000 + +/* Latency in clock cycles between SoC pins and NFC logic */ +#define MIN_RD_DEL_CNT 3 + +#define PHYTIUM_NFC_ADDR_MAX_LEN 5 +#define PHYTIUM_NFC_DSP_SIZE 16 + +/* NAND controller flash control register */ +#define NDCR0 0x00 +#define NDCR0_EN BIT(0) +#define NDCR0_WIDTH BIT(1) +#define NDCR0_IN_MODE(x) (min_t(u32, x, 0x3) << 2) +#define NDCR0_ECC_EN BIT(4) +#define NDCR0_ECC_STREN(x) (min_t(u32, x, 0x7) << 5) +#define NDCR0_SPARE_EN BIT(8) +#define NDCR0_SPARE_SIZE(x) (min_t(u32, x, 0xFFF) << 9) +#define NDCR0_GENERIC_FIELDS_MASK + +#define NDCR1 0x04 +#define NDCR1_SAMPL_PHASE(x) min_t(u32, x, 0xFFFF) + +#define NDAR0 0x08 + +#define NDAR1 0x0C +#define NDAR1_H8(x) min_t(u32, x, 0xFF) +#define NDAR1_DMA_EN BIT(8) +#define NDAR1_EMPTY(x) (min_t(u32, x, 0x7F) << 9) +#define NDAR1_DMA_RLEN(x) (min_t(u32, x, 0xFF) << 9) +#define NDAR1_DMA_WLEN(x) (min_t(u32, x, 0xFF) << 9) + +#define NDTR0 0x10 +#define NDTR0_TCS_TCLS(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR0_TCLS_TWP(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR1 0x14 +#define NDTR1_TWH(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR1_TWP(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR2 0x18 +#define NDTR2_TCH_TCLH(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR2_TCLH_TWH(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR3 0x1c +#define NDTR3_TDQ_EN(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR3_TCH_TWH(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR4 0x20 +#define NDTR4_TWHR_SMX(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR4_TREH(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR5 0x24 +#define NDTR5_TRC(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR5_TADL_SMX(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR6 0x28 +#define NDTR6_TCAD_TCS_SMX(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR6_RES(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR7 0x2c +#define NDTR7_TCK(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR7_TDQ_EN(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR8 0x30 +#define NDTR8_TCAD_TCK_SMX(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR8_HF_TCK(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR9 0x34 +#define NDTR9_TWHR(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR9_TCCS_TCALS_SMX(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR10 0x38 +#define NDTR10_TCK(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR10_MTCK(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR11 0x3c +#define NDTR11_TCK_TCALS(x) (min_t(u32, x, 0xFFFF) << 16) +#define NDTR11_RES(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR12 0x40 +#define NDTR12_TWRCK(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR12_RES(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR13 0x44 +#define NDTR13_TWRHCA(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR13_TRLCA(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR14 0x48 +#define NDTR14_TWRHCE(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR14_RES(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR15 0x4c +#define NDTR15_TCDQSS_TWPRE_TDS(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR15_HFTDSC(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR16 0x50 +#define NDTR16_TWPST_TDH(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR16_TWPSTH(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR17 0x54 +#define NDTR17_TCS_TRPRE(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR17_TRELDQS(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR18 0x58 +#define NDTR18_TRPST_TDQSRE(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR18_RES(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDFIFO 0x5c +#define NDFIFO_REV (min_t(u32, x, 0) << 12) +#define NDFIFO_FULL BIT(11) +#define NDFIFO_EMP BIT(10) +#define NDFIFO_CNT(x) (min_t(u32, x, 0x3F) << 0) + +#define ND_INTERVAL_TIME 0x60 +#define NDCMD_INTERVAL_TIME 0x64 +#define NDFIFO_TIMEOUT 0x68 +#define NDFIFO_LEVEL0 0x6c +#define NDFIFO_LEVEL1 0x70 +#define NDWP 0x74 +#define NDFIFO_CLR 0x78 + +#define NDSR 0x7c +#define NDSR_BUSY BIT(0) +#define NDSR_DMA_BUSY BIT(1) +#define NDSR_DMA_PGFINISH BIT(2) +#define NDSR_DMA_FINISH BIT(3) +#define NDSR_FIFO_EMP BIT(4) +#define NDSR_FIFO_FULL BIT(5) +#define NDSR_FIFO_TIMEOUT BIT(6) +#define NDSR_CS(x) (min_t(u32, x, 0xF) << 7) +#define NDSR_CMD_PGFINISH BIT(11) +#define NDSR_PG_PGFINISH BIT(12) +#define NDSR_RE BIT(13) +#define NDSR_DQS BIT(14) +#define NDSR_RB BIT(15) +#define NDSR_ECC_BUSY BIT(16) +#define NDSR_ECC_FINISH BIT(17) +#define NDSR_ECC_RIGHT BIT(18) +#define NDSR_ECC_ERR BIT(19) +#define NDSR_ECC_ERROVER BIT(20) +#define NDSR_AXI_DSP_ERR BIT(21) +#define NDSR_AXI_RD_ERR BIT(22) +#define NDSR_AXI_WR_ERR BIT(23) + +#define NDIR_MASK 0x80 +#define NDIR_BUSY_MASK BIT(0) +#define NDIR_DMA_BUSY_MASK BIT(1) +#define NDIR_DMA_PGFINISH_MASK BIT(2) +#define NDIR_DMA_FINISH_MASK BIT(3) +#define NDIR_FIFO_EMP_MASK BIT(4) +#define NDIR_FIFO_FULL_MASK BIT(5) +#define NDIR_FIFO_TIMEOUT_MASK BIT(6) +#define NDIR_CMD_FINISH_MASK BIT(7) +#define NDIR_PGFINISH_MASK BIT(8) +#define NDIR_RE_MASK BIT(9) +#define NDIR_DQS_MASK BIT(10) +#define NDIR_RB_MASK BIT(11) +#define NDIR_ECC_FINISH_MASK BIT(12) +#define NDIR_ECC_ERR_MASK BIT(13) + +#define NDIR 0x84 +#define NDIR_ALL_INT GENMASK(13, 0) +#define NDIR_BUSY BIT(0) +#define NDIR_DMA_BUSY BIT(1) +#define NDIR_DMA_PGFINISH BIT(2) +#define NDIR_DMA_FINISH BIT(3) +#define NDIR_FIFO_EMP BIT(4) +#define NDIR_FIFO_FULL BIT(5) +#define NDIR_FIFO_TIMEOUT BIT(6) +#define NDIR_CMD_FINISH BIT(7) +#define NDIR_PGFINISH BIT(8) +#define NDIR_RE BIT(9) +#define NDIR_DQS BIT(10) +#define NDIR_RB BIT(11) +#define NDIR_ECC_FINISH BIT(12) +#define NDIR_ECC_ERR BIT(13) + +#define ND_DEBUG 0x88 + +#define ND_ERR_CLR 0x8c +#define ND_DSP_ERR_CLR BIT(0) +#define ND_AXI_RD_ERR_CLR BIT(1) +#define ND_AXI_WR_ERR_CLR BIT(2) +#define ND_ECC_ERR_CLR BIT(3) + +#define NDCR_PAGE_SZ(x) (x >= 2048 ? BIT(24) : 0) + +enum nand_inter_pro { + NAND_ONFI, + NAND_JEDEC, + NAND_OTHER, +}; + +enum nand_inter_mode { + ASYN_SDR, + ONFI_DDR, + TOG_ASYN_DDR, +}; + +enum asy_timing_mode { + ASY_MODE0, + ASY_MODE1, + ASY_MODE2, + ASY_MODE3, + ASY_MODE4, +}; + +enum onfi_syn_timing_mode { + SYN_MODE0 = 0x10, + SYN_MODE1, + SYN_MODE2, + SYN_MODE3, + SYN_MODE4, +}; + +/** + * NAND controller timings expressed in NAND Controller clock cycles + * + * @tRP: ND_nRE pulse width + * @tRH: ND_nRE high duration + * @tWP: ND_nWE pulse time + * @tWH: ND_nWE high duration + * @tCS: Enable signal setup time + * @tCH: Enable signal hold time + * @tADL: Address to write data delay + * @tAR: ND_ALE low to ND_nRE low delay + * @tWHR: ND_nWE high to ND_nRE low for status read + * @tRHW: ND_nRE high duration, read to write delay + * @tR: ND_nWE high to ND_nRE low for read + */ +struct phytium_nfc_timings { + u16 tRP; + u16 tRH; + u16 tWP; /* NDTR1_TWP */ + u16 tWH; /* NDTR1_TWH */ + u16 tCS; + u16 tCH; + u16 tADL; + u16 tAR; + u16 tWHR; + u16 tRHW; + u16 tR; +}; + +/** + * NAND chip structure: stores NAND chip device related information + * + * @chip: Base NAND chip structure + * @node: Used to store NAND chips into a list + * @ndcr: Controller register value for this NAND chip + * @ndtr0: Timing registers 0 value for this NAND chip + * @ndtr1: Timing registers 1 value for this NAND chip + * @selected_die: Current active CS + * @nsels: Number of CS lines required by the NAND chip + */ +struct phytium_nand_chip { + struct nand_chip chip; + struct list_head node; + u32 ndcr; + u32 ndtr0; + u32 ndtr1; + int addr_cyc; + int selected_die; + unsigned int nsels; + struct mtd_oob_region ecc; +}; + +/** + * NAND controller capabilities for distinction between compatible strings + * + * @max_cs_nb: Number of Chip Select lines available + * @max_rb_nb: Number of Ready/Busy lines available + * @legacy_of_bindings: Indicates if DT parsing must be done using the old + * fashion way + * @flash_bbt: + * @ecc_strength: + * @ecc_step_size: + * @parts: + * @nr_parts: + */ +struct phytium_nfc_caps { + unsigned int max_cs_nb; + unsigned int max_rb_nb; + bool legacy_of_bindings; + bool flash_bbt; + int ecc_strength; + int ecc_step_size; + struct mtd_partition *parts; + unsigned int nr_parts; +}; + +/** + * NAND controller structure: stores Marvell NAND controller information + * + * @controller: Base controller structure + * @dev: Parent device (used to print error messages) + * @regs: NAND controller registers + * @reg_clk: Regiters clock + * @complete: Completion object to wait for NAND controller events + * @chips: List containing all the NAND chips attached to + * this NAND controller + * @caps: NAND controller capabilities for each compatible string + * @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only) + */ +struct phytium_nfc { + struct nand_controller controller; + struct device *dev; + void __iomem *regs; + int irq; + struct completion complete; + struct list_head chips; + struct nand_chip *selected_chip; + struct phytium_nfc_caps *caps; + + void *dsp_addr; + dma_addr_t dsp_phy_addr; + + void *dma_buf; + u32 dma_offset; + dma_addr_t dma_phy_addr; + + enum nand_inter_pro inter_pro; + enum nand_inter_mode inter_mode; + u32 timing_mode; + + spinlock_t spinlock; + struct work_struct work; +}; + +/** + * Derives a duration in numbers of clock cycles. + * + * @ps: Duration in pico-seconds + * @period_ns: Clock period in nano-seconds + * + * Convert the duration in nano-seconds, then divide by the period and + * return the number of clock periods. + */ +#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns)) +#define TO_CYCLES64(ps, period_ns) (DIV_ROUND_UP_ULL(div_u64(ps, 1000), \ + period_ns)) + +struct phytium_nfc_cmd_ctrl { + u16 csel:4; + u16 dbc:1; + u16 addr_cyc:3; + u16 nc:1; +#define TYPE_RESET 0x00 +#define TYPE_SET_FTR 0x01 +#define TYPE_GET_FTR 0x02 +#define TYPE_READ_ID 0x03 +#define TYPE_PAGE_PRO 0x04 +#define TYPE_ERASE 0x05 +#define TYPE_READ 0x06 +#define TYPE_TOGGLE 0x07 +#define TYPE_READ_PARAM 0x02 +#define TYPE_READ_STATUS 0x03 +#define TYPE_CH_READ_COL 0x03 +#define TYPE_CH_ROW_ADDR 0x01 +#define TYPE_CH_WR_COL 0x01 + u16 cmd_type:4; + u16 dc:1; + u16 auto_rs:1; + u16 ecc_en:1; +}; + +/** + * NAND driver structure filled during the parsing of the ->exec_op() subop + * subset of instructions. + * + * @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle + * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin + * @rdy_delay_ns: Optional delay after waiting for the RB pin + * @data_delay_ns: Optional delay after the data xfer + * @data_instr_idx: Index of the data instruction in the subop + * @data_instr: Pointer to the data instruction in the subop + */ +struct phytium_nfc_op { + u8 cmd[2]; + union { + u16 ctrl; + struct phytium_nfc_cmd_ctrl nfc_ctrl; + } cmd_ctrl; + u8 addr[PHYTIUM_NFC_ADDR_MAX_LEN]; + u16 page_cnt; + u8 mem_addr_first[PHYTIUM_NFC_ADDR_MAX_LEN]; + + u32 cmd_len; + u32 addr_len; + + u32 cle_ale_delay_ns; + u32 rdy_timeout_ms; + u32 rdy_delay_ns; + u32 data_delay_ns; + u32 data_instr_idx; + struct nand_op_instr *data_instr; +} __attribute__ ((__packed__)); + +#define TIMING_ASY_NUM 12 +#define TIMING_SYN_NUM 14 +#define TIMING_TOG_NUM 12 + +#define TMP_DMA_DEBUG 0 /* Temporary dma space */ + +int phytium_nand_init(struct phytium_nfc *nfc); +int phytium_nand_remove(struct phytium_nfc *nfc); +int phytium_nand_prepare(struct phytium_nfc *nfc); +int phytium_nand_suspend(struct phytium_nfc *nfc); +int phytium_nand_resume(struct phytium_nfc *nfc); + +#endif /* NAND_PHYTIUM_NAND_H */ diff --git a/drivers/mtd/nand/raw/phytium_nand_pci.c b/drivers/mtd/nand/raw/phytium_nand_pci.c new file mode 100644 index 000000000000..4f614910f34d --- /dev/null +++ b/drivers/mtd/nand/raw/phytium_nand_pci.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI driver for Phytium NAND flash controller + * + * Copyright (C) 2021, Phytium Technology Co., Ltd. + */ +#include +#include +#include + +#include "phytium_nand.h" + +#define DRV_NAME "phytium_nand_pci" + +static struct mtd_partition partition_info[] = { + { + .name = "Flash partition 1", + .offset = 0x0000000, + .size = 0x4000000 }, + { + .name = "Flash partition 2", + .offset = 0x4000000, + .size = 0x8000000 }, + { + .name = "Flash partition 3", + .offset = 0x8000000, + .size = 0x10000000 }, + { + .name = "Flash partition 4", + .offset = 0x10000000, + .size = 0x12000000 }, + { + .name = "Flash partition 5", + .offset = 0x12000000, + .size = 0x14000000 }, +}; + +static struct phytium_nfc_caps x100_nfc_caps = { + .max_cs_nb = 2, + .max_rb_nb = 1, + .legacy_of_bindings = true, + .ecc_strength = 4, + .ecc_step_size = 512, + .nr_parts = 5, + .parts = partition_info, +}; + +static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct phytium_nfc *nfc; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pcim_iomap_regions(pdev, 0x1, pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + return ret; + } + + pci_set_master(pdev); + pci_try_set_mwi(pdev); + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + nfc = devm_kzalloc(&pdev->dev, sizeof(struct phytium_nfc), + GFP_KERNEL); + if (!nfc) + return -ENOMEM; + + nfc->dev = &pdev->dev; + nfc->regs = pcim_iomap_table(pdev)[0]; + nfc->irq = pdev->irq; + nfc->caps = &x100_nfc_caps; + + ret = phytium_nand_init(nfc); + if (ret) + return ret; + + pci_set_drvdata(pdev, nfc); + + return ret; +} + +static void phytium_pci_remove(struct pci_dev *pdev) +{ + struct phytium_nfc *nfc = pci_get_drvdata(pdev); + int ret; + + ret = phytium_nand_remove(nfc); + if (ret) + dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret); +} + +static int __maybe_unused phytium_nfc_prepare(struct device *dev) +{ + struct pci_dev *pci = to_pci_dev(dev); + struct phytium_nfc *nfc = pci_get_drvdata(pci); + int ret; + + ret = phytium_nand_prepare(nfc); + + return 0; +} + +static int __maybe_unused phytium_nfc_resume(struct device *dev) +{ + struct pci_dev *pci = to_pci_dev(dev); + struct phytium_nfc *nfc = pci_get_drvdata(pci); + int ret; + + ret = phytium_nand_resume(nfc); + + return ret; +} + +static const struct dev_pm_ops phytium_pci_dev_pm_ops = { + .prepare = phytium_nfc_prepare, + .resume = phytium_nfc_resume, +}; + +static const struct pci_device_id phytium_pci_id_table[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc29) }, + { } +}; +MODULE_DEVICE_TABLE(pci, phytium_pci_id_table); + +static struct pci_driver phytium_pci_driver = { + .name = DRV_NAME, + .id_table = phytium_pci_id_table, + .probe = phytium_pci_probe, + .remove = phytium_pci_remove, + .driver = { + .pm = &phytium_pci_dev_pm_ops, + }, +}; +module_pci_driver(phytium_pci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("PCI driver for Phytium NAND controller"); +MODULE_AUTHOR("Zhu Mingshuai "); diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index 37775fc09e09..ff2af58a9a4a 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig @@ -7,6 +7,15 @@ menuconfig MTD_SPI_NOR if MTD_SPI_NOR +config SPI_PHYTIUM_QUADSPI + tristate "Phytium Quad SPI Controller" + depends on ARCH_PHYTIUM || ARM + depends on OF && HAS_IOMEM + help + This enables support for the Quad SPI controller in master mode. + This driver does not support generic SPI. The implementation only + supports SPI NOR. + config MTD_MT81xx_NOR tristate "Mediatek MT81xx SPI NOR flash controller" depends on HAS_IOMEM diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile index f4c61d282abd..ebc8ce095bd0 100644 --- a/drivers/mtd/spi-nor/Makefile +++ b/drivers/mtd/spi-nor/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o +obj-$(CONFIG_SPI_PHYTIUM_QUADSPI) += phytium-quadspi.o obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o diff --git a/drivers/mtd/spi-nor/phytium-quadspi.c b/drivers/mtd/spi-nor/phytium-quadspi.c new file mode 100644 index 000000000000..15502ecc295a --- /dev/null +++ b/drivers/mtd/spi-nor/phytium-quadspi.c @@ -0,0 +1,1006 @@ +/* + * Phytium SPI core controller driver. + * + * Copyright (c) 2019, Phytium Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define QSPI_FLASH_CAP_REG 0x000 +#define QSPI_RD_CFG_REG 0x004 +#define QSPI_WR_CFG_REG 0x008 +#define QSPI_FLUSH_REG 0x00C +#define QSPI_CMD_PORT_REG 0x010 +#define QSPI_ADDR_PORT_REG 0x014 +#define QSPI_HD_PORT_REG 0x018 +#define QSPI_LD_PORT_REG 0x01C +#define QSPI_FUN_SET_REG 0x020 +#define QSPI_WIP_REG 0x024 +#define QSPI_WP_REG 0x028 +#define QSPI_MODE_REG 0x02C + +#define QSPI_FLASH_CAP_NUM_SHIFT 3 +#define QSPI_FLASH_CAP_NUM_MASK (0x3 << QSPI_FLASH_CAP_NUM_SHIFT) +#define QSPI_FLASH_CAP_CAP_SHIFT 0 +#define QSPI_FLASH_CAP_CAP_MASK (0x7 << QSPI_FLASH_CAP_CAP_SHIFT) + +#define QSPI_RD_CFG_RD_CMD_SHIFT 24 +#define QSPI_RD_CFG_RD_CMD_MASK (0xFF << QSPI_RD_CFG_RD_CMD_SHIFT) +#define QSPI_RD_CFG_RD_THROUGH_SHIFT 23 +#define QSPI_RD_CFG_RD_THROUGH_MASK (0x01 << QSPI_RD_CFG_RD_THROUGH_SHIFT) +#define QSPI_RD_CFG_RD_TRANSFER_SHIFT 20 +#define QSPI_RD_CFG_RD_TRANSFER_MASK (0x07 << QSPI_RD_CFG_RD_TRANSFER_SHIFT) +#define QSPI_RD_CFG_RD_ADDR_SEL_SHIFT 19 +#define QSPI_RD_CFG_RD_ADDR_SEL_MASK (0x1 << QSPI_RD_CFG_RD_ADDR_SEL_SHIFT) +#define QSPI_RD_CFG_RD_LATENCY_SHIFT 18 +#define QSPI_RD_CFG_RD_LATENCY_MASK (0x1 << QSPI_RD_CFG_RD_LATENCY_SHIFT) +#define QSPI_RD_CFG_MODE_BYTE_SHIFT 17 +#define QSPI_RD_CFG_MODE_BYTE_MASK (0x1 << QSPI_RD_CFG_MODE_BYTE_SHIFT) +#define QSPI_RD_CFG_CMD_SIGN_SHIFT 9 +#define QSPI_RD_CFG_CMD_SIGN_MASK (0xFF << QSPI_RD_CFG_CMD_SIGN_SHIFT) +#define QSPI_RD_CFG_DUMMY_SHIFT 4 +#define QSPI_RD_CFG_DUMMY_MASK (0x1F << QSPI_RD_CFG_DUMMY_SHIFT) +#define QSPI_RD_CFG_D_BUFFER_SHIFT 3 +#define QSPI_RD_CFG_D_BUFFER_MASK (0x1 << QSPI_RD_CFG_D_BUFFER_SHIFT) +#define QSPI_RD_CFG_RD_SCK_SEL_SHIFT 0 +#define QSPI_RD_CFG_RD_SCK_SEL_MASK (0x3 << QSPI_RD_CFG_RD_SCK_SEL_SHIFT) + +#define QSPI_WR_CFG_WR_CMD_SHIFT 24 +#define QSPI_WR_CFG_WR_CMD_MASK (0xFF << QSPI_WR_CFG_WR_CMD_SHIFT) +#define QSPI_WR_CFG_WR_WAIT_SHIFT 9 +#define QSPI_WR_CFG_WR_WAIT_MASK (0x01 << QSPI_WR_CFG_WR_WAIT_SHIFT) +#define QSPI_WR_CFG_WR_THROUGH_SHIFT 8 +#define QSPI_WR_CFG_WR_THROUGH_MAS (0x01 << QSPI_WR_CFG_WR_THROUGH_SHIFT) +#define QSPI_WR_CFG_WR_TRANSFER_SHIFT 5 +#define QSPI_WR_CFG_WR_TRANSFER_MASK (0X7 << QSPI_WR_CFG_WR_TRANSFER_SHIFT) +#define QSPI_WR_CFG_WR_ADDR_SEL_SHIFT 4 +#define QSPI_WR_CFG_WR_ADDR_SEL_MASK (0x1 << QSPI_WR_CFG_WR_ADDR_SEL_SHIFT) +#define QSPI_WR_CFG_WR_MODE_SHIFT 3 +#define QSPI_WR_CFG_WR_MODE (0x01 << QSPI_WR_CFG_WR_MODE_SHIFT) +#define QSPI_WR_CFG_WR_SCK_SEL_SHIFT 0 +#define QSPI_WR_CFG_WR_SCK_SEL_MASK (0x7 << QSPI_WR_CFG_WR_SCK_SEL_SHIFT) + +#define QSPI_FLUSH_EN (0x1 << 0) + +#define QSPI_CMD_PORT_CMD_SHIFT 24 +#define QSPI_CMD_PORT_CMD_MASK (0xFF << QSPI_CMD_PORT_CMD_SHIFT) +#define QSPI_CMD_PORT_WAIT_SHIFT 22 +#define QSPI_CMD_PORT_WAIT_MASK (0x1 << QSPI_CMD_PORT_WAIT_SHIFT) +#define QSPI_CMD_PORT_THROUGH_SHIFT 21 +#define QSPI_CMD_PORT_THROUGH_MASK (0x1 << QSPI_CMD_PORT_THROUGH_SHIFT) +#define QSPI_CMD_PORT_CS_SHIFT 19 +#define QSPI_CMD_PORT_CS_MASK (0x3 << QSPI_CMD_PORT_CS_SHIFT) +#define QSPI_CMD_PORT_TRANSFER_SHIFT 16 +#define QSPI_CMD_PORT_TRANSFER_MASK (0x7 << QSPI_CMD_PORT_TRANSFER_SHIFT) +#define QSPI_CMD_PORT_CMD_ADDR_SHIFT 15 +#define QSPI_CMD_PORT_CMD_ADDR_MASK (0x1 << QSPI_CMD_PORT_CMD_ADDR_SHIFT) +#define QSPI_CMD_PORT_LATENCY_SHIFT 14 +#define QSPI_CMD_PORT_LATENCY_MASK (0x1 << QSPI_CMD_PORT_LATENCY_SHIFT) +#define QSPI_CMD_PORT_DATA_TRANSFER_SHIFT 13 +#define QSPI_CMD_PORT_DATA_TRANSFER_MASK (0x1 << 13) +#define QSPI_CMD_PORT_SEL_SHIFT 12 +#define QSPI_CMD_PORT_SEL_MASK (0x1 << QSPI_CMD_PORT_SEL_SHIFT) +#define QSPI_CMD_PORT_DUMMY_SHIFT 7 +#define QSPI_CMD_PORT_DUMMY_MASK (0x1F << QSPI_CMD_PORT_DUMMY_SHIFT) +#define QSPI_CMD_PORT_P_BUFFER_SHIFT 6 +#define QSPI_CMD_PORT_P_BUFFER_MASK (0x1 << QSPI_CMD_PORT_P_BUFFER_SHIFT) +#define QSPI_CMD_PORT_RW_NUM_SHIFT 3 +#define QSPI_CMD_PORT_RW_NUM_MASK (0x7 << QSPI_CMD_PORT_RW_NUM_SHIFT) +#define QSPI_CMD_PORT_SCK_SEL_SHIFT 0 +#define QSPI_CMD_PORT_SCK_SEL_MASK (0x7 << QSPI_CMD_PORT_SCK_SEL_SHIFT) + +#define QSPI_FUN_SET_HOLD_SHIFT 24 +#define QSPI_FUN_SET_HOLD_MASK (0xFF << QSPI_FUN_SET_HOLD_SHIFT) +#define QSPI_FUN_SET_SETUP_SHIFT 16 +#define QSPI_FUN_SET_SETUP_MASK (0xFF << QSPI_FUN_SET_SETUP_SHIFT) +#define QSPI_FUN_SET_DELAY_SHIFT 0 +#define QSPI_FUN_SET_DELAY_MASK (0xFFFF << QSPI_FUN_SET_DELAY_SHIFT) + +#define QSPI_WIP_W_CMD_SHIFT 24 +#define QSPI_WIP_W_CMD_MASK (0xFF << QSPI_WIP_W_CMD_SHIFT) +#define QSPI_WIP_W_TRANSFER_SHIFT 3 +#define QSPI_WIP_W_TRANSFER_MASK (0x3 << QSPI_WIP_W_TRANSFER_SHIFT) +#define QSPI_WIP_W_SCK_SEL_SHIFT 0 +#define QSPI_WIP_W_SCK_SEL_MASK (0x7 << QSPI_WIP_W_SCK_SEL_SHIFT) + +#define QSPI_WP_EN_SHIFT 17 +#define QSPI_WP_EN_MASK (0x1 << QSPI_WP_EN_SHIFT) +#define QSPI_WP_IO2_SHIFT 16 +#define QSPI_WP_IO2_MASK (0x1 << QSPI_WP_IO2_SHIFT) +#define QSPI_WP_HOLD_SHIFT 8 +#define QSPI_WP_HOLD_MASK (0xFF << QSPI_WP_HOLD_SHIFT) +#define QSPI_WP_SETUP_SHIFT 0 +#define QSPI_WP_SETUP_MASK (0xFF << QSPI_WP_SETUP_SHIFT) + +#define QSPI_MODE_VALID_SHIFT 8 +#define QSPI_MODE_VALID_MASK (0xFF << QSPI_MODE_VALID_SHIFT) +#define QSPI_MODE_SHIFT 0 +#define QSPI_MODE_MASK (0xFF << QSPI_MODE_SHIFT) + +#define FSIZE_VAL(size) (__fls(size) - 1) + +#define PHYTIUM_MAX_MMAP_S SZ_512M +#define PHYTIUM_MAX_NORCHIP 2 + +#define PHYTIUM_QSPI_FIFO_SZ 32 +#define PHYTIUM_QSPI_FIFO_TIMEOUT_US 50000 +#define PHYTIUM_QSPI_BUSY_TIMEOUT_US 100000 + +#define PHYTIUM_SCK_SEL 0x05 +#define PHYTIUM_CMD_SCK_SEL 0x07 + +#define PHYTIUM_FMODE_MM 0x01 +#define PHYTIUM_FMODE_IN 0x02 + +/* + * the codes of the different commands + */ +#define CMD_WRDI 0x04 +#define CMD_RDID 0x9F +#define CMD_RDSR 0x05 +#define CMD_WREN 0x06 +#define CMD_RDAR 0x65 +#define CMD_P4E 0x20 +#define CMD_4P4E 0x21 +#define CMD_BE 0x60 +#define CMD_4BE 0xC7 +#define CMD_READ 0x03 +#define CMD_FAST_READ 0x0B +#define CMD_QOR 0x6B +#define CMD_QIOR 0xEB +#define CMD_DDRFR 0x0D +#define CMD_DDRQIOQ 0xED +#define CMD_PP 0x02 +#define CMD_QPP 0x32 +#define CMD_SE 0xD8 +#define CMD_4FAST_READ 0x0C +#define CMD_4READ 0x13 +#define CMD_4QOR 0x6C +#define CMD_4QIOR 0xEC +#define CMD_4DDRFR 0x0E +#define CMD_4DDRQIOR 0xEE +#define CMD_4PP 0x12 +#define CMD_4QPP 0x34 +#define CMD_4SE 0xDC + +#define PHYTIUM_QSPI_1_1_1 0 +#define PHYTIUM_QSPI_1_1_2 1 +#define PHYTIUM_QSPI_1_1_4 2 +#define PHYTIUM_QSPI_1_2_2 3 +#define PHYTIUM_QSPI_1_4_4 4 +#define PHYTIUM_QSPI_2_2_2 5 +#define PHYTIUM_QSPI_4_4_4 6 + +struct phytium_qspi_flash { + struct spi_nor nor; + struct phytium_qspi *qspi; + u32 cs; + u32 fsize; + u32 presc; + u32 clk_div; + u32 read_mode; + bool registered; + u32 prefetch_limit; + u32 addr_width; + u32 read_cmd; +}; + +struct phytium_qspi { + struct device *dev; + void __iomem *io_base; + void __iomem *mm_base; + resource_size_t mm_size; + u32 nor_num; + struct clk *clk; + u32 clk_rate; + struct phytium_qspi_flash flash[PHYTIUM_MAX_NORCHIP]; + + spinlock_t spinlock; + + /* + * to protect device configuration, could be different between + * 2 flash access (bk1, bk2) + */ + struct mutex lock; +}; + +/* Need to enable p_buffer */ +static int memcpy_from_ftreg(struct phytium_qspi *qspi, u_char *buf, size_t len) +{ + int i; + u32 val = 0; + + if (!qspi || !buf) + return -EINVAL; + + for (i = 0; i < len; i++) { + if (0 == i % 4) + val = readl_relaxed(qspi->io_base + QSPI_LD_PORT_REG); + + buf[i] = (u_char) (val >> (i % 4) * 8) & 0xFF; + } + + return 0; +} + +/* Not to enable p_buffer */ +static int memcpy_to_ftreg(struct phytium_qspi *qspi, u_char *buf, size_t len) +{ + u32 val = 0; + + if (!qspi || !buf || (len >= 8)) + return -EINVAL; + + if (1 == len) { + val = buf[0]; + } else if (2 == len) { + val = buf[1]; + val = (val << 8) + buf[0]; + } else if (3 == len) { + val = buf[2]; + val = (val << 8) + buf[1]; + val = (val << 8) + buf[0]; + } else if (4 == len) { + val = buf[3]; + val = (val << 8) + buf[2]; + val = (val << 8) + buf[1]; + val = (val << 8) + buf[0]; + } + + writel_relaxed(val, qspi->io_base + QSPI_LD_PORT_REG); + + return 0; +} + +static int phytium_qspi_wait_cmd(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash) +{ + u32 cmd = 0; + u32 cnt = 0; + + cmd |= CMD_RDSR << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + + cnt = PHYTIUM_QSPI_BUSY_TIMEOUT_US / 10; + while (readl_relaxed(qspi->io_base + QSPI_LD_PORT_REG) & 0x01) { + udelay(10); + cnt--; + if (!cnt) { + dev_err(qspi->dev, "wait command process timeout\n"); + break; + } + } + + return !cnt; +} + +static int phytium_qspi_cmd_enable(struct phytium_qspi *qspi) +{ + u32 val = 0; + + writel_relaxed(val, qspi->io_base + QSPI_LD_PORT_REG); + + return 0; +} + +static int phytium_qspi_write_enable(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash) +{ + u32 cmd = 0; + + cmd = CMD_WREN << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= PHYTIUM_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + return 0; +} + +static int phytium_qspi_write_disable(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash) +{ + u32 cmd = 0; + + cmd = CMD_WRDI << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= PHYTIUM_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + return 0; +} + +static int phytium_qspi_read_flash_id(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash, u8 opcode, u8 *buf, int len) +{ + u32 cmd = 0; + unsigned long iflags; + + cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_P_BUFFER_SHIFT); + cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + spin_lock_irqsave(&qspi->spinlock, iflags); + memcpy_from_ftreg(qspi, buf, len); + spin_unlock_irqrestore(&qspi->spinlock, iflags); + + dev_dbg(qspi->dev, "read flash id:%x\n", *(u32 *)buf); + return 0; +} + +static int phytium_qspi_read_flash_sfdp(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash, u8 opcode, loff_t from, u8 *buf, int len) +{ + unsigned long iflags; + u32 cmd = 0; + + cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_P_BUFFER_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); + cmd |= PHYTIUM_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + writel_relaxed(from, qspi->io_base + QSPI_ADDR_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + spin_lock_irqsave(&qspi->spinlock, iflags); + memcpy_from_ftreg(qspi, buf, len); + spin_unlock_irqrestore(&qspi->spinlock, iflags); + + dev_dbg(qspi->dev, "read flash sfdp:0x%llx 0x%llx\n", + *(u64 *)buf, *(u64 *)(buf + 8)); + return 0; +} + +static int phytium_qspi_read_flash_sr1(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash, u8 opcode, u8 *buf, int len) +{ + u32 cmd = 0; + u32 val; + + cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + cmd |= (len << QSPI_CMD_PORT_RW_NUM_SHIFT) & QSPI_CMD_PORT_RW_NUM_MASK; + cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + val = readl_relaxed(qspi->io_base + QSPI_LD_PORT_REG); + buf[0] = (u8)val; + + return 0; +} + +static int phytium_qspi_read_reg(struct spi_nor *nor, + u8 opcode, u8 *buf, int len) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct device *dev = flash->qspi->dev; + struct phytium_qspi *qspi = flash->qspi; + unsigned long iflags; + u32 cmd = 0; + + dev_dbg(dev, "read_reg: cmd:%#.2x buf:%pK len:%#x\n", opcode, buf, len); + + switch (opcode) { + case CMD_RDID: + phytium_qspi_read_flash_id(qspi, flash, opcode, buf, len); + return 0; + case CMD_RDSR: + phytium_qspi_read_flash_sr1(qspi, flash, opcode, buf, len); + return 0; + default: + break; + } + + cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_P_BUFFER_SHIFT); + cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + spin_lock_irqsave(&qspi->spinlock, iflags); + memcpy_from_ftreg(qspi, buf, len); + spin_unlock_irqrestore(&qspi->spinlock, iflags); + + return 0; +} + +static int phytium_qspi_write_reg(struct spi_nor *nor, u8 opcode, + u8 *buf, int len) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct device *dev = flash->qspi->dev; + struct phytium_qspi *qspi = flash->qspi; + u32 cmd = 0; + + dev_dbg(dev, "write_reg: cmd:%#.2x buf:%pK len:%#x\n", + opcode, buf, len); + + switch(opcode){ + case CMD_WREN: + phytium_qspi_write_enable(qspi, flash); + return 0; + case CMD_WRDI: + phytium_qspi_write_disable(qspi, flash); + return 0; + default: + break; + } + + cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + if ((len > 8) || (NULL == buf)) { + dev_err(dev, "data length exceed. commad %x, len:%d \n", opcode, len); + return -EINVAL; + } + else if(len > 0){ + cmd |= ((len - 1) << QSPI_CMD_PORT_RW_NUM_SHIFT) & QSPI_CMD_PORT_RW_NUM_MASK; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + } + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + memcpy_to_ftreg(qspi, buf, len); + + return 0; +} + +static ssize_t phytium_qspi_read_tmp(struct phytium_qspi *qspi, u32 read_cmd, + loff_t from, size_t len, u_char *buf) +{ + u32 addr = (u32)from; + u64 val = 0; + + if (!qspi) + return -1; + + dev_dbg(qspi->dev, "read cmd:%x, addr:%x len:%zx\n", read_cmd, addr, len); + writel_relaxed(read_cmd, qspi->io_base + QSPI_RD_CFG_REG); + + memcpy_fromio(buf, qspi->mm_base + addr, len); + + val = *(u64 *)(buf); + dev_dbg(qspi->dev, "read val:%llx\n", val); + + return len; +} + +static ssize_t phytium_qspi_read(struct spi_nor *nor, loff_t from, size_t len, + u_char *buf) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct phytium_qspi *qspi = flash->qspi; + u32 cmd = nor->read_opcode; + u32 addr = (u32)from; + + addr = addr + flash->cs * flash->fsize; + dev_dbg(qspi->dev, "read(%#.2x): buf:%pK from:%#.8x len:%#zx\n", + nor->read_opcode, buf, addr, len); + + cmd = cmd << QSPI_RD_CFG_RD_CMD_SHIFT; + cmd |= BIT(QSPI_RD_CFG_D_BUFFER_SHIFT); + cmd |= flash->clk_div << QSPI_CMD_PORT_SCK_SEL_SHIFT; + + cmd &= ~QSPI_RD_CFG_RD_TRANSFER_MASK; + cmd |= (flash->addr_width << QSPI_RD_CFG_RD_TRANSFER_SHIFT); + + switch (nor->read_opcode) { + case CMD_READ: + case CMD_FAST_READ: + case CMD_QIOR: + case CMD_QOR: + cmd &= ~QSPI_RD_CFG_RD_ADDR_SEL_MASK; + break; + case CMD_4READ: + case CMD_4FAST_READ: + case CMD_4QOR: + case CMD_4QIOR: + cmd |= BIT(QSPI_RD_CFG_RD_ADDR_SEL_SHIFT); + break; + case 0x5A: + cmd &= ~QSPI_RD_CFG_RD_ADDR_SEL_MASK; + phytium_qspi_read_flash_sfdp(qspi, flash, nor->read_opcode, from, buf, len); + return 0; + break; + default: + break; + } + + if((PHYTIUM_QSPI_1_1_4 == flash->addr_width) || + (PHYTIUM_QSPI_1_4_4 == flash->addr_width)) { + cmd |= BIT(QSPI_RD_CFG_RD_LATENCY_SHIFT); + + cmd &= ~QSPI_RD_CFG_DUMMY_MASK; + cmd |= (0x07 << QSPI_RD_CFG_DUMMY_SHIFT); + } + + dev_dbg(qspi->dev, "read(%#.2x): cmd:%#x\n", nor->read_opcode, cmd); + if (cmd != flash->read_cmd) + flash->read_cmd = cmd; + + writel_relaxed(cmd, qspi->io_base + QSPI_RD_CFG_REG); + + memcpy_fromio(buf, qspi->mm_base + addr, len); + + return len; +} + +static ssize_t phytium_qspi_write(struct spi_nor *nor, loff_t to, size_t len, + const u_char *buf) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct device *dev = flash->qspi->dev; + struct phytium_qspi *qspi = flash->qspi; + u32 cmd = nor->program_opcode; + u32 addr = (u32)to; + int i; + u_char tmp[8] = {0}; + size_t mask = 0x03; + + addr = addr + flash->cs * flash->fsize; + dev_dbg(dev, "write(%#.2x): buf:%p to:%#.8x len:%#zx\n", + nor->program_opcode, buf, addr, len); + + if (addr & 0x03) { + dev_err(dev, "Addr not four-byte aligned!\n"); + return -EINVAL; + } + + cmd = cmd << QSPI_WR_CFG_WR_CMD_SHIFT; + cmd |= BIT(QSPI_WR_CFG_WR_MODE_SHIFT); + cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + + switch (nor->program_opcode) { + case CMD_PP: + case CMD_QPP: + cmd &= ~QSPI_WR_CFG_WR_ADDR_SEL_MASK; + break; + case CMD_4PP: + case CMD_4QPP: + cmd |= BIT(QSPI_WR_CFG_WR_ADDR_SEL_SHIFT); + break; + default: + dev_err(qspi->dev, "Not support program command:%#x\n", + nor->erase_opcode); + return -EINVAL; + } + + dev_dbg(qspi->dev, "write cmd:%x\n", cmd); + writel_relaxed(cmd, qspi->io_base + QSPI_WR_CFG_REG); + + for (i = 0; i < len/4; i++) { + writel_relaxed(*(u32 *)(buf + 4*i), qspi->mm_base + addr + 4*i); + } + + if (len & mask) { + addr = addr + (len & ~mask); + phytium_qspi_read_tmp(qspi, flash->read_cmd, addr, 4, &tmp[0]); + memcpy(tmp, buf + (len & ~mask), len & mask); + writel_relaxed(*(u32 *)(tmp), qspi->mm_base + addr); + } + + writel_relaxed(QSPI_FLUSH_EN, qspi->io_base + QSPI_FLUSH_REG); + + phytium_qspi_wait_cmd(qspi, flash); + + return len; +} + +static int phytium_qspi_erase(struct spi_nor *nor, loff_t offs) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct device *dev = flash->qspi->dev; + struct phytium_qspi *qspi = flash->qspi; + u32 cmd = nor->erase_opcode; + u32 addr = (u32)offs; + + dev_dbg(dev, "erase(%#.2x):offs:%#x\n", nor->erase_opcode, (u32)offs); + + phytium_qspi_write_enable(qspi, flash); + cmd = cmd << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= PHYTIUM_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + /* s25fl256s1 not supoort D8, DC, 20, 21 */ + switch (nor->erase_opcode) { + case CMD_SE: + cmd &= ~QSPI_CMD_PORT_SEL_MASK; + cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); + writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); + break; + case CMD_4SE: + cmd |= BIT(QSPI_CMD_PORT_SEL_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); + writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); + break; + case CMD_P4E: + cmd &= ~QSPI_CMD_PORT_SEL_MASK; + cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); + writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); + break; + case CMD_4P4E: + cmd |= BIT(QSPI_CMD_PORT_SEL_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); + writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); + break; + case CMD_BE: + cmd &= ~QSPI_CMD_PORT_SEL_MASK; + break; + case CMD_4BE: + cmd |= BIT(QSPI_CMD_PORT_SEL_SHIFT); + break; + default: + dev_err(qspi->dev, "Not support erase command:%#x\n", + nor->erase_opcode); + return -EINVAL; + } + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + phytium_qspi_wait_cmd(qspi, flash); + + return 0; +} + +static int phytium_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct phytium_qspi *qspi = flash->qspi; + + mutex_lock(&qspi->lock); + return 0; +} + +static void phytium_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct phytium_qspi *qspi = flash->qspi; + + mutex_unlock(&qspi->lock); +} + +static int phytium_qspi_get_flash_size(struct phytium_qspi *qspi, u32 size) +{ + int ret = 0; + u32 value; + + switch (size) { + case SZ_4M: + value = 0; + break; + case SZ_8M: + value = 1; + break; + case SZ_16M: + value = 2; + break; + case SZ_32M: + value = 3; + break; + case SZ_64M: + value = 4; + break; + case SZ_128M: + value = 5; + break; + case SZ_256M: + value = 6; + break; + case SZ_512M: + value = 7; + break; + default: + value = 0; + + ret = -EINVAL; + return ret; + } + + return value; +} +static int phytium_qspi_flash_setup(struct phytium_qspi *qspi, + struct device_node *np) +{ + struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_PP, + }; + u32 width, presc; + u32 cs_num = 0; + u32 max_rate = 0; + u32 clk_div = 0; + u32 flash_cap = 0; + u32 addr_width = PHYTIUM_QSPI_1_1_1; + struct phytium_qspi_flash *flash; + struct mtd_info *mtd; + int ret; + + of_property_read_u32(np, "reg", &cs_num); + if (cs_num >= PHYTIUM_MAX_NORCHIP) + return -EINVAL; + + of_property_read_u32(np, "spi-max-frequency", &max_rate); + if (!max_rate) + return -EINVAL; + + of_property_read_u32(np, "spi-clk-div", &clk_div); + if (!clk_div) + clk_div = PHYTIUM_SCK_SEL; + + if (clk_div < 4) + return -EINVAL; + + presc = DIV_ROUND_UP(qspi->clk_rate, max_rate) - 1; + + of_property_read_u32(np, "spi-rx-bus-width", &width); + if (!width) + width = 1; + + if (width == 4) { + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; + addr_width = PHYTIUM_QSPI_1_1_4; + } else if (width == 2) { + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; + addr_width = PHYTIUM_QSPI_1_1_2; + } else if (width != 1) + return -EINVAL; + + flash = &qspi->flash[cs_num]; + flash->qspi = qspi; + flash->cs = cs_num; + flash->presc = presc; + flash->clk_div = clk_div; + flash->addr_width = addr_width; + + flash->nor.dev = qspi->dev; + spi_nor_set_flash_node(&flash->nor, np); + flash->nor.priv = flash; + mtd = &flash->nor.mtd; + + flash->nor.read = phytium_qspi_read; + flash->nor.write = phytium_qspi_write; + flash->nor.erase = phytium_qspi_erase; + flash->nor.read_reg = phytium_qspi_read_reg; + flash->nor.write_reg = phytium_qspi_write_reg; + flash->nor.prepare = phytium_qspi_prep; + flash->nor.unprepare = phytium_qspi_unprep; + + ret = spi_nor_scan(&flash->nor, NULL, &hwcaps); + if (ret) { + dev_err(qspi->dev, "device scan failed\n"); + return ret; + } + + flash->fsize = mtd->size; + flash->prefetch_limit = mtd->size - PHYTIUM_QSPI_FIFO_SZ; + + ret = phytium_qspi_get_flash_size(flash->qspi, mtd->size); + if (ret < 1) { + dev_err(qspi->dev, "flash size invalid\n"); + return ret; + } + + flash_cap = cs_num << QSPI_FLASH_CAP_NUM_SHIFT; + flash_cap |= ret; + writel_relaxed(flash_cap, qspi->io_base + QSPI_FLASH_CAP_REG); + + flash->read_mode = PHYTIUM_FMODE_MM; + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + dev_err(qspi->dev, "mtd device parse failed\n"); + return ret; + } + + flash->registered = true; + + dev_dbg(qspi->dev, "read mm:%s %px cs:%d bus:%d clk-div:%d\n", + flash->read_mode == PHYTIUM_FMODE_MM ? "yes" : "no", + qspi->mm_base, cs_num, width, clk_div); + + dev_dbg(qspi->dev, "mtd->size:%llx, mtd->erasesize:%x, fsize:%x\n", + mtd->size, mtd->erasesize, flash->fsize); + + return 0; +} + +static void phytium_qspi_mtd_free(struct phytium_qspi *qspi) +{ + int i; + + for (i = 0; i < PHYTIUM_MAX_NORCHIP; i++) + if (qspi->flash[i].registered) + mtd_device_unregister(&qspi->flash[i].nor.mtd); +} + +static ssize_t clk_div_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct phytium_qspi *qspi = dev_get_drvdata(dev); + struct phytium_qspi_flash *flash = &qspi->flash[0]; + + return sprintf(buf, "Flash 0 clk-div: %d\n", flash->clk_div); +} + +static ssize_t clk_div_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t size) +{ + struct phytium_qspi *qspi = dev_get_drvdata(dev); + struct phytium_qspi_flash *flash = &qspi->flash[0]; + long value; + char *token; + ssize_t status; + + token = strsep ((char **)&buf, " "); + if (!token) + return -EINVAL; + + status = kstrtol(token, 0, &value); + if (status) + return status; + + flash->clk_div = (u8)value; + + return size; +} +static DEVICE_ATTR_RW(clk_div); + +static struct attribute *phytium_qspi_attrs[] = { + &dev_attr_clk_div.attr, + NULL, +}; + +static struct attribute_group phytium_qspi_attr_group = { + .attrs = phytium_qspi_attrs, +}; + +static int phytium_qspi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *flash_np; + struct phytium_qspi *qspi; + struct resource *res; + int ret; + + qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL); + if (!qspi) + return -ENOMEM; + + qspi->nor_num = of_get_child_count(dev->of_node); + if (!qspi->nor_num || qspi->nor_num > PHYTIUM_MAX_NORCHIP) + return -ENODEV; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi"); + qspi->io_base = devm_ioremap_resource(dev, res); + if (IS_ERR(qspi->io_base)) + return PTR_ERR(qspi->io_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm"); + qspi->mm_base = devm_ioremap_resource(dev, res); + if (IS_ERR(qspi->mm_base)) + return PTR_ERR(qspi->mm_base); + + qspi->mm_size = resource_size(res); + + qspi->clk = devm_clk_get(dev, NULL); + if (IS_ERR(qspi->clk)) + return PTR_ERR(qspi->clk); + + qspi->clk_rate = clk_get_rate(qspi->clk); + if (!qspi->clk_rate) + return -EINVAL; + + ret = clk_prepare_enable(qspi->clk); + if (ret) { + dev_err(dev, "can not enable the clock\n"); + return ret; + } + + qspi->dev = dev; + platform_set_drvdata(pdev, qspi); + mutex_init(&qspi->lock); + spin_lock_init(&qspi->spinlock); + + for_each_available_child_of_node(dev->of_node, flash_np) { + ret = phytium_qspi_flash_setup(qspi, flash_np); + if (ret) { + dev_err(dev, "unable to setup flash chip\n"); + goto err_flash; + } + } + + ret = sysfs_create_group(&qspi->dev->kobj, &phytium_qspi_attr_group); + if (ret) { + dev_err(dev, "unable to create sysfs\n"); + goto err_flash; + } + + return 0; + +err_flash: + mutex_destroy(&qspi->lock); + phytium_qspi_mtd_free(qspi); + + clk_disable_unprepare(qspi->clk); + return ret; +} + +static int phytium_qspi_remove(struct platform_device *pdev) +{ + struct phytium_qspi *qspi = platform_get_drvdata(pdev); + + sysfs_remove_group(&qspi->dev->kobj, &phytium_qspi_attr_group); + + phytium_qspi_mtd_free(qspi); + mutex_destroy(&qspi->lock); + + clk_disable_unprepare(qspi->clk); + return 0; +} + +static const struct of_device_id phytium_qspi_match[] = { + {.compatible = "phytium,qspi"}, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_qspi_match); + +static struct platform_driver phytium_qspi_driver = { + .probe = phytium_qspi_probe, + .remove = phytium_qspi_remove, + .driver = { + .name = "phytium-quadspi", + .of_match_table = phytium_qspi_match, + }, +}; + +module_platform_driver(phytium_qspi_driver); + +MODULE_AUTHOR("Mingshuai Zhu "); +MODULE_DESCRIPTION("Phytium QuadSPI driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index ff641c06003a..441fb82c2893 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -1263,6 +1263,7 @@ static const struct flash_info spi_nor_ids[] = { /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */ { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "XM25QH128B", INFO(0x205018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { }, }; diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 7cdd0cead693..013503b2b506 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -119,6 +119,30 @@ config CAN_JANZ_ICAN3 This driver can also be built as a module. If so, the module will be called janz-ican3.ko. +config CAN_PHYTIUM_CORE + tristate "Phytium CAN controller core driver" + +config CAN_PHYTIUM_PLAT + tristate "Phytium CAN platform support" + select CAN_PHYTIUM_CORE + depends on ARCH_PHYTIUM || COMPILE_TEST + default y if ARCH_PHYTIUM + ---help--- + This driver supports for the on-chip Phytium CAN controller found on + FT-2000/4 and D2000 SoCs. + + To compile this driver as a module, choose M here. + +config CAN_PHYTIUM_PCI + tristate "Phytium CAN PCI support" + select CAN_PHYTIUM_CORE + depends on PCI || COMPILE_TEST + ---help--- + This driver is for Phytium CAN controller of X100 chipset which + is a PCI device. + + To compile this driver as a module, choose M here. + config CAN_SUN4I tristate "Allwinner A10 CAN controller" depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 44922bf29b6a..373ddddad65f 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -28,6 +28,9 @@ obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o obj-$(CONFIG_CAN_MSCAN) += mscan/ obj-$(CONFIG_CAN_M_CAN) += m_can/ obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_canfd/ +obj-$(CONFIG_CAN_PHYTIUM_CORE) += phytium_can.o +obj-$(CONFIG_CAN_PHYTIUM_PLAT) += phytium_can_plat.o +obj-$(CONFIG_CAN_PHYTIUM_PCI) += phytium_can_pci.o obj-$(CONFIG_CAN_SJA1000) += sja1000/ obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o diff --git a/drivers/net/can/phytium_can.c b/drivers/net/can/phytium_can.c new file mode 100644 index 000000000000..30a3f52910b4 --- /dev/null +++ b/drivers/net/can/phytium_can.c @@ -0,0 +1,694 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Phytium CAN core controller driver + * + * Copyright (C) 2018-2021, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "phytium_can.h" + +static void phytium_write_reg(const struct phytium_can_priv *priv, + enum phytium_can_reg reg, u32 val) +{ + writel(val, priv->reg_base + reg); +} + +static u32 phytium_read_reg(const struct phytium_can_priv *priv, + enum phytium_can_reg reg) +{ + return readl(priv->reg_base + reg); +} + +static void phytium_set_reg_bits(const struct phytium_can_priv *priv, + enum phytium_can_reg reg, u32 bs) +{ + u32 val = readl(priv->reg_base + reg); + + val |= bs; + writel(val, priv->reg_base + reg); +} + +static void phytium_clr_reg_bits(const struct phytium_can_priv *priv, + enum phytium_can_reg reg, u32 bs) +{ + u32 val = readl(priv->reg_base + reg); + + val &= ~bs; + writel(val, priv->reg_base + reg); +} + +static int phytium_set_bittiming(struct net_device *ndev) +{ + struct phytium_can_priv *priv = netdev_priv(ndev); + struct can_bittiming *bt = &priv->can.bittiming; + u32 btr; + u32 is_config_mode; + + /* Check whether Phytium CAN is in configuration mode. + * It cannot set bit timing if Phytium CAN is not in configuration mode. + */ + is_config_mode = (priv->read_reg(priv, FTCAN_CTRL) & + FTCAN_CTRL_XFER_MASK); + if (is_config_mode) { + netdev_alert(ndev, + "BUG! Cannot set bittiming - CAN is not in config mode\n"); + return -EPERM; + } + + /* Setting Baud Rate prescalar value in BRPR Register */ + btr = (bt->brp - 1) << 16; + + /* Setting Time Segment 1 in BTR Register */ + btr |= (bt->prop_seg - 1) << 2; + + btr |= (bt->phase_seg1 - 1) << 5; + + /* Setting Time Segment 2 in BTR Register */ + btr |= (bt->phase_seg2 - 1) << 8; + + /* Setting Synchronous jump width in BTR Register */ + btr |= (bt->sjw - 1); + + priv->write_reg(priv, FTCAN_DAT_RATE_CTRL, btr); + priv->write_reg(priv, FTCAN_ARB_RATE_CTRL, btr); + + netdev_dbg(ndev, "DAT=0x%08x, ARB=0x%08x\n", + priv->read_reg(priv, FTCAN_DAT_RATE_CTRL), + priv->read_reg(priv, FTCAN_ARB_RATE_CTRL)); + + return 0; +} + +static int phytium_can_start(struct net_device *ndev) +{ + struct phytium_can_priv *priv = netdev_priv(ndev); + int err; + + err = phytium_set_bittiming(ndev); + if (err < 0) + return err; + + /* Identifier mask enable */ + priv->set_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_AIME_MASK); + priv->write_reg(priv, FTCAN_ACC_ID0_MASK, FTCAN_ACC_IDN_MASK); + priv->write_reg(priv, FTCAN_ACC_ID1_MASK, FTCAN_ACC_IDN_MASK); + priv->write_reg(priv, FTCAN_ACC_ID2_MASK, FTCAN_ACC_IDN_MASK); + priv->write_reg(priv, FTCAN_ACC_ID3_MASK, FTCAN_ACC_IDN_MASK); + + /* Enable interrupts */ + priv->write_reg(priv, FTCAN_INTR, FTCAN_INTR_EN); + + /*Enable Transfer*/ + priv->set_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_XFER_MASK); + + netdev_dbg(ndev, "status:#x%08x\n", + priv->read_reg(priv, FTCAN_XFER_STS)); + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + return 0; +} + +static int phytium_do_set_mode(struct net_device *ndev, enum can_mode mode) +{ + int ret; + + switch (mode) { + case CAN_MODE_START: + ret = phytium_can_start(ndev); + if (ret < 0) { + netdev_err(ndev, "xcan_chip_start failed!\n"); + return ret; + } + netif_wake_queue(ndev); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static int phytium_can_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct phytium_can_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf = (struct can_frame *)skb->data; + u32 id, dlc, frame_head[2] = {0, 0}, data[8] = {0, 0}; + u32 tx_fifo_cnt; + unsigned long flags; + + if (can_dropped_invalid_skb(ndev, skb)) + return NETDEV_TX_OK; + + /* Check if the TX buffer is full */ + tx_fifo_cnt = (priv->read_reg(priv, FTCAN_FIFO_CNT) >> FTCAN_FIFO_CNT_TFN_SHIFT); + if (tx_fifo_cnt == priv->tx_max) { + netif_stop_queue(ndev); + netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + if (priv->tx_head == priv->tx_tail) { + priv->tx_head = 0; + priv->tx_tail = 0; + } + + /* Watch carefully on the bit sequence */ + if (cf->can_id & CAN_EFF_FLAG) { + /* Extended CAN ID format */ + id = ((cf->can_id & CAN_EFF_MASK) << FTCAN_IDR_ID2_SHIFT) & + FTCAN_IDR_ID2_MASK; + id |= (((cf->can_id & CAN_EFF_MASK) >> + (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) << + FTCAN_IDR_ID1_SHIFT) & FTCAN_IDR_ID1_MASK; + + /* The substibute remote TX request bit should be "1" + * for extended frames as in the Xilinx CAN datasheet + */ + id |= FTCAN_IDR_IDE_MASK | FTCAN_IDR_SRR_MASK; + + if (cf->can_id & CAN_RTR_FLAG) + /* Extended frames remote TX request */ + id |= FTCAN_IDR_RTR_MASK; + + dlc = cf->can_dlc << FTCAN_IDR_EDLC_SHIFT; + + frame_head[0] = cpu_to_be32p(&id);//id; + frame_head[1] = cpu_to_be32p(&dlc);//dlc; + + /* Write the Frame to Phytium CAN TX FIFO */ + priv->write_reg(priv, FTCAN_TX_FIFO, frame_head[0]); + priv->write_reg(priv, FTCAN_TX_FIFO, frame_head[1]); + } else { + /* Standard CAN ID format */ + id = ((cf->can_id & CAN_SFF_MASK) << FTCAN_IDR_ID1_SHIFT) & + FTCAN_IDR_ID1_MASK; + + if (cf->can_id & CAN_RTR_FLAG) + /* Standard frames remote TX request */ + id |= FTCAN_IDR_SRR_MASK; + + dlc = ((cf->can_dlc << FTCAN_IDR_SDLC_SHIFT) | FTCAN_IDR_PAD_MASK); + id |= dlc; + + frame_head[0] = cpu_to_be32p(&id); + + /* Write the Frame to Xilinx CAN TX FIFO */ + priv->write_reg(priv, FTCAN_TX_FIFO, frame_head[0]); + } + + if (!(cf->can_id & CAN_RTR_FLAG)) { + if (cf->can_dlc > 0) { + data[0] = (*(__be32 *)(cf->data + 0)); + priv->write_reg(priv, FTCAN_TX_FIFO, data[0]); + } + if (cf->can_dlc > 4) { + data[1] = (*(__be32 *)(cf->data + 4)); + priv->write_reg(priv, FTCAN_TX_FIFO, data[1]); + } + stats->tx_bytes += cf->can_dlc; + } + + can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); + priv->tx_head++; + + /* triggers tranmission */ + spin_lock_irqsave(&priv->lock, flags); + priv->clr_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_XFER_MASK); + priv->set_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_TXREQ_MASK); + priv->set_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_TXREQ_MASK | FTCAN_CTRL_XFER_MASK); + netif_stop_queue(ndev); + spin_unlock_irqrestore(&priv->lock, flags); + + return NETDEV_TX_OK; +} + +static void phytium_can_rx(struct net_device *ndev) +{ + struct phytium_can_priv *priv = netdev_priv(ndev); + int data_cnt, i; + u32 buf[64]; + + data_cnt = priv->read_reg(priv, FTCAN_FIFO_CNT); + data_cnt &= 0x7F; + + for (i = 0; i < data_cnt; ++i) + buf[i] = priv->read_reg(priv, FTCAN_RX_FIFO); + + if (priv->is_kfifo_full_err) + return; + + if ((KFIFO_LEN - kfifo_len(&priv->rx_kfifo)) < (data_cnt * 4)) { + netdev_err(ndev, "RX kfifo is full,restart CAN controller!\n"); + priv->is_kfifo_full_err = true; + return; + } + + kfifo_in(&priv->rx_kfifo, buf, data_cnt * 4); + + cancel_delayed_work(&priv->can_frame_work); + schedule_delayed_work(&priv->can_frame_work, 0); +} + +static void phytium_err_interrupt(struct net_device *ndev, u32 isr) +{ + struct phytium_can_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + u32 txerr = 0, rxerr = 0; + + skb = alloc_can_err_skb(ndev, &cf); + + rxerr = priv->read_reg(priv, FTCAN_ERR_CNT) & FTCAN_ERR_CNT_RFN_MASK; + txerr = ((priv->read_reg(priv, FTCAN_ERR_CNT) & + FTCAN_ERR_CNT_TFN_MASK) >> FTCAN_ERR_CNT_TFN_SHIFT); + + if (isr & FTCAN_INTR_BOIS_MASK) { + priv->can.state = CAN_STATE_BUS_OFF; + priv->can.can_stats.bus_off++; + /* Leave device in Config Mode in bus-off state */ + can_bus_off(ndev); + if (skb) + cf->can_id |= CAN_ERR_BUSOFF; + } else if ((isr & FTCAN_INTR_PEIS_MASK) == FTCAN_INTR_PEIS_MASK) { + priv->can.state = CAN_STATE_ERROR_PASSIVE; + priv->can.can_stats.error_passive++; + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (rxerr > 127) ? + CAN_ERR_CRTL_RX_PASSIVE : + CAN_ERR_CRTL_TX_PASSIVE; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + } else if (isr & FTCAN_INTR_PWIS_MASK) { + priv->can.state = CAN_STATE_ERROR_WARNING; + priv->can.can_stats.error_warning++; + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= (txerr > rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + } + + /* Check for RX FIFO Overflow interrupt */ + if (isr & FTCAN_INTR_RFIS_MASK) { + stats->rx_over_errors++; + stats->rx_errors++; + + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; + } + } + + if (skb) { + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } + + if ((isr & FTCAN_INTR_RFIS_MASK) && + (FTCAN_XFER_OVERLOAD_FRAM == + (priv->read_reg(priv, FTCAN_XFER_STS) & + FTCAN_XFER_FRAS_MASK))) + phytium_can_rx(ndev); +} + +static int +phytium_get_frame_from_kfifo(struct net_device *ndev, u8 *buf, int len) +{ + u32 id, dlc, net_dlc, is_standard_frame_flag; + u32 rdout_dlc, data[2] = {0, 0}; + struct can_frame *cf; + struct phytium_can_priv *priv; + struct net_device_stats *stats; + struct sk_buff *skb; + + memcpy(&id, buf, 4); + + id = be32_to_cpup(&id); + + if (id & FTCAN_IDR_IDE_MASK) { + /* Received an Extended format frame */ + memcpy(&dlc, buf + 4, 4); + dlc = (dlc >> 2) & 0xf; + + net_dlc = get_can_dlc(dlc); + if (net_dlc > 4 && len >= 16) + rdout_dlc = 16; + else if (net_dlc > 0 && len >= 12) + rdout_dlc = 12; + else if (net_dlc == 0 && len >= 8) + rdout_dlc = 8; + else + return 0; + + is_standard_frame_flag = 0; + } else { + /* Received a standard format frame */ + dlc = (id & FTCAN_IDR_DLC_MASK) >> FTCAN_IDR_SDLC_SHIFT; + net_dlc = get_can_dlc(dlc); + if (net_dlc > 4 && len >= 12) + rdout_dlc = 12; + else if (net_dlc > 0 && len >= 8) + rdout_dlc = 8; + else if (net_dlc == 0 && len >= 4) + rdout_dlc = 4; + else + return 0; + + is_standard_frame_flag = 1; + } + + if (unlikely(!ndev)) + return -1; + + priv = netdev_priv(ndev); + stats = &ndev->stats; + skb = alloc_can_skb(ndev, &cf); + if (unlikely(!skb)) { + stats->rx_dropped++; + return rdout_dlc; + } + /* Change Phytium CAN ID format to socketCAN ID format */ + if (id & FTCAN_IDR_IDE_MASK) { + cf->can_id = (id & FTCAN_IDR_ID1_MASK) >> 3; + cf->can_id |= (id & FTCAN_IDR_ID2_MASK) >> + FTCAN_IDR_ID2_SHIFT; + cf->can_id |= CAN_EFF_FLAG; + if (id & FTCAN_IDR_RTR_MASK) + cf->can_id |= CAN_RTR_FLAG; + } else { + cf->can_id = (id & FTCAN_IDR_ID1_MASK) >> + FTCAN_IDR_ID1_SHIFT; + if (id & FTCAN_IDR_SRR_MASK) + cf->can_id |= CAN_RTR_FLAG; + } + + cf->can_dlc = net_dlc; + + if (!(cf->can_id & CAN_RTR_FLAG)) { + if (cf->can_dlc > 0 && is_standard_frame_flag) { + memcpy(data, buf + 4, 4); + *(__be32 *)(cf->data) = (data[0]); + } else if (cf->can_dlc > 0 && !is_standard_frame_flag) { + memcpy(data, buf + 8, 4); + *(__be32 *)(cf->data) = (data[0]); + } + + if (cf->can_dlc > 4 && is_standard_frame_flag) { + memcpy(data + 1, buf + 8, 4); + *(__be32 *)(cf->data + 4) = (data[1]); + } else if (cf->can_dlc > 0 && !is_standard_frame_flag) { + memcpy(data + 1, buf + 12, 4); + *(__be32 *)(cf->data + 4) = (data[1]); + } + } + stats->rx_bytes += cf->can_dlc; + stats->rx_packets++; + + netif_receive_skb(skb); + return rdout_dlc; +} + +static void phytium_poll_kfifo(struct work_struct *work) +{ + u32 len, no_rd_len; + int rdout_len; + u8 *buffer; + struct phytium_can_priv *priv = container_of(work, + struct phytium_can_priv, + can_frame_work.work); + struct net_device *ndev = priv->ndev; + + len = kfifo_len(&priv->rx_kfifo); + if (!len) + return; + + buffer = kzalloc(len + 4 * 4, GFP_KERNEL); + if (!buffer) + return; + + if (priv->can_frame[0]) { + memcpy(buffer, priv->can_frame + 1, priv->can_frame[0]); + if (!kfifo_out(&priv->rx_kfifo, buffer + priv->can_frame[0], len)) + dev_err(priv->dev, "Kfifo_out error.\n"); + len += priv->can_frame[0]; + } else { + if (!kfifo_out(&priv->rx_kfifo, buffer, len)) + dev_err(priv->dev, "Kfifo_out error.\n"); + } + + no_rd_len = len; + do { + if (no_rd_len >= CAN_FRAM_MIN_IN_FIFO) { + rdout_len = phytium_get_frame_from_kfifo(ndev, buffer + (len - no_rd_len), + no_rd_len); + if (rdout_len == -1) { + priv->can_frame[0] = 0; + break; + } else if (!rdout_len) { + priv->can_frame[0] = no_rd_len; + memcpy(priv->can_frame + 1, + buffer + (len - no_rd_len), no_rd_len); + break; + } + + no_rd_len -= rdout_len; + if (!no_rd_len) { + /* clear unfinished data length stored in can_frame[0] */ + priv->can_frame[0] = 0; + break; + } + } else { + priv->can_frame[0] = no_rd_len; + memcpy(priv->can_frame + 1, + buffer + (len - no_rd_len), no_rd_len); + break; + } + } while (1); + + kfree(buffer); +} + +static void phytium_tx_interrupt(struct net_device *ndev, u32 isr) +{ + struct phytium_can_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + + while ((priv->tx_head - priv->tx_tail > 0) && + (isr & FTCAN_INTR_TEIS_MASK)) { + priv->set_reg_bits(priv, FTCAN_INTR, + FTCAN_INTR_TEIC_MASK | FTCAN_INTR_REIC_MASK); + can_get_echo_skb(ndev, priv->tx_tail % priv->tx_max); + priv->tx_tail++; + stats->tx_packets++; + isr = (priv->read_reg(priv, FTCAN_INTR) & + FTCAN_INTR_STATUS_MASK); + } + + priv->clr_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_XFER_MASK); + priv->clr_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_TXREQ_MASK); + priv->set_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_XFER_MASK); + can_led_event(ndev, CAN_LED_EVENT_TX); + netif_wake_queue(ndev); +} + +static irqreturn_t phytium_can_irq(int irq, void *dev_id) +{ + struct net_device *ndev = (struct net_device *)dev_id; + struct phytium_can_priv *priv = netdev_priv(ndev); + u32 isr; + + /* Get the interrupt status from Phytium CAN */ + isr = (priv->read_reg(priv, FTCAN_INTR) & FTCAN_INTR_STATUS_MASK); + if (!isr) + return IRQ_NONE; + + /* Check for the type of error interrupt and Processing it */ + if (isr & (FTCAN_INTR_EIS_MASK | FTCAN_INTR_RFIS_MASK | + FTCAN_INTR_BOIS_MASK | FTCAN_INTR_PEIS_MASK)) { + if (isr & FTCAN_INTR_RFIS_MASK) { + priv->clr_reg_bits(priv, FTCAN_INTR, + FTCAN_INTR_EN); + priv->set_reg_bits(priv, FTCAN_INTR, + FTCAN_INTR_REIC_MASK | + FTCAN_INTR_TEIC_MASK); + } + + phytium_err_interrupt(ndev, isr); + + priv->set_reg_bits(priv, FTCAN_INTR, + (FTCAN_INTR_EIC_MASK | FTCAN_INTR_RFIC_MASK | + FTCAN_INTR_BOIC_MASK | FTCAN_INTR_PEIC_MASK)); + priv->set_reg_bits(priv, FTCAN_INTR, FTCAN_INTR_EN); + return IRQ_HANDLED; + } + + if ((isr & FTCAN_INTR_TEIS_MASK)) { + isr &= (~FTCAN_INTR_REIS_MASK); + phytium_tx_interrupt(ndev, isr); + } + + if (isr & (FTCAN_INTR_REIS_MASK)) { + priv->clr_reg_bits(priv, FTCAN_INTR, + FTCAN_INTR_REIE_MASK); + phytium_can_rx(ndev); + priv->set_reg_bits(priv, FTCAN_INTR, FTCAN_INTR_REIC_MASK); + priv->set_reg_bits(priv, FTCAN_INTR, + FTCAN_INTR_REIE_MASK); + } + + return IRQ_HANDLED; +} + +static void phytium_can_stop(struct net_device *ndev) +{ + struct phytium_can_priv *priv = netdev_priv(ndev); + u32 ier, data_cnt, i; + + /* Disable interrupts and leave the can in configuration mode */ + ier = (FTCAN_INTR_DIS & FTCAN_INTR_EN_MASK); + priv->clr_reg_bits(priv, FTCAN_INTR, ier); + + priv = netdev_priv(ndev); + + data_cnt = priv->read_reg(priv, FTCAN_FIFO_CNT); + data_cnt &= 0x7F; + for (i = 0; i < data_cnt; ++i) + priv->read_reg(priv, FTCAN_RX_FIFO); + + memset(priv->can_frame, 0, sizeof(priv->can_frame)); + priv->is_kfifo_full_err = false; + kfifo_reset(&priv->rx_kfifo); + /* Disable Transfer */ + priv->clr_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_XFER_MASK); + priv->can.state = CAN_STATE_STOPPED; +} + +static int phytium_can_open(struct net_device *ndev) +{ + struct phytium_can_priv *priv = netdev_priv(ndev); + int ret; + + ret = request_irq(ndev->irq, phytium_can_irq, priv->irq_flags, + ndev->name, ndev); + if (ret < 0) { + netdev_err(ndev, "irq allocation for CAN failed\n"); + goto err; + } + + ret = open_candev(ndev); + if (ret) + goto err_irq; + + ret = phytium_can_start(ndev); + if (ret < 0) { + netdev_err(ndev, "failed to start!\n"); + goto err_candev; + } + + can_led_event(ndev, CAN_LED_EVENT_OPEN); + + netif_start_queue(ndev); + + return 0; + +err_candev: + close_candev(ndev); +err_irq: + free_irq(ndev->irq, ndev); +err: + return ret; +} + +static int phytium_can_close(struct net_device *ndev) +{ + netif_stop_queue(ndev); + phytium_can_stop(ndev); + free_irq(ndev->irq, ndev); + close_candev(ndev); + can_led_event(ndev, CAN_LED_EVENT_STOP); + + return 0; +} + +static int phytium_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) +{ + struct phytium_can_priv *priv = netdev_priv(ndev); + + bec->rxerr = priv->read_reg(priv, FTCAN_ERR_CNT) & FTCAN_ERR_CNT_RFN_MASK; + bec->txerr = ((priv->read_reg(priv, FTCAN_ERR_CNT) & + FTCAN_ERR_CNT_TFN_MASK) >> FTCAN_ERR_CNT_TFN_SHIFT); + + return 0; +} + +static const struct net_device_ops phytium_can_netdev_ops = { + .ndo_open = phytium_can_open, + .ndo_stop = phytium_can_close, + .ndo_start_xmit = phytium_can_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +void register_phytium_can(struct phytium_can_priv *priv) +{ + int ret; + + priv->can.do_set_mode = phytium_do_set_mode; + priv->can.do_get_berr_counter = phytium_get_berr_counter; + + priv->ndev->netdev_ops = &phytium_can_netdev_ops; + + priv->write_reg = phytium_write_reg; + priv->read_reg = phytium_read_reg; + priv->set_reg_bits = phytium_set_reg_bits; + priv->clr_reg_bits = phytium_clr_reg_bits; + + if (kfifo_alloc(&priv->rx_kfifo, KFIFO_LEN, GFP_KERNEL)) { + dev_err(priv->dev, "failed to allocate kfifo\n"); + goto err; + } + + INIT_DELAYED_WORK(&priv->can_frame_work, phytium_poll_kfifo); + + ret = register_candev(priv->ndev); + if (ret) { + dev_err(priv->dev, "fail to register failed (err=%d)\n", ret); + goto err; + } + + devm_can_led_init(priv->ndev); + netdev_dbg(priv->ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n", + priv->reg_base, priv->ndev->irq, priv->can.clock.freq, priv->tx_max); + return; +err: + free_candev(priv->ndev); +} +EXPORT_SYMBOL_GPL(register_phytium_can); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Core driver for Phytium CAN controller"); diff --git a/drivers/net/can/phytium_can.h b/drivers/net/can/phytium_can.h new file mode 100644 index 000000000000..f1d9103690ee --- /dev/null +++ b/drivers/net/can/phytium_can.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* CAN bus driver for Phytium CAN controller. + * + * Copyright (C) 2021, Phytium Technology Co.,Ltd. + */ + +#ifndef PHYTIUM_CAN_H +#define PHYTIUM_CAN_H + +enum phytium_can_reg { + FTCAN_CTRL = 0x00, /* Global control register */ + FTCAN_INTR = 0x04, /* Interrupt register */ + FTCAN_ARB_RATE_CTRL = 0x08, /* Arbitration rate control register */ + FTCAN_DAT_RATE_CTRL = 0x0C, /* Data rate control register */ + FTCAN_ACC_ID0 = 0x10, /* Acceptance identifier0 register */ + FTCAN_ACC_ID1 = 0x14, /* Acceptance identifier1 register */ + FTCAN_ACC_ID2 = 0x18, /* Acceptance identifier2 register */ + FTCAN_ACC_ID3 = 0x1C, /* Acceptance identifier3 register */ + FTCAN_ACC_ID0_MASK = 0x20, /* Acceptance identifier0 mask register */ + FTCAN_ACC_ID1_MASK = 0x24, /* Acceptance identifier1 mask register */ + FTCAN_ACC_ID2_MASK = 0x28, /* Acceptance identifier2 mask register */ + FTCAN_ACC_ID3_MASK = 0x2C, /* Acceptance identifier3 mask register */ + FTCAN_XFER_STS = 0x30, /* Transfer status register */ + FTCAN_ERR_CNT = 0x34, /* Error counter register */ + FTCAN_FIFO_CNT = 0x38, /* FIFO counter register */ + FTCAN_DMA_CTRL = 0x3C, /* DMA request control register */ + FTCAN_TX_FIFO = 0x100, /* TX FIFO shadow register */ + FTCAN_RX_FIFO = 0x200, /* RX FIFO shadow register */ +}; + +/* FTCAN_CTRL mask */ +#define FTCAN_CTRL_XFER_MASK (0x1 << 0) /* Transfer enable */ +#define FTCAN_CTRL_TXREQ_MASK (0x1 << 1) /* Transmit request */ +#define FTCAN_CTRL_AIME_MASK (0x1 << 2) /* Acceptance identifier mask enable */ + +/* FTCAN_INTR mask */ +#define FTCAN_INTR_STATUS_MASK (0xFF << 0) /* the interrupt status */ +#define FTCAN_INTR_BOIS_MASK (0x1 << 0) /* Bus off interrupt status */ +#define FTCAN_INTR_PWIS_MASK (0x1 << 1) /* Passive warning interrupt status */ +#define FTCAN_INTR_PEIS_MASK (0x1 << 2) /* Passive error interrupt status */ +#define FTCAN_INTR_RFIS_MASK (0x1 << 3) /* RX FIFO full interrupt status */ +#define FTCAN_INTR_TFIS_MASK (0x1 << 4) /* TX FIFO empty interrupt status */ +#define FTCAN_INTR_REIS_MASK (0x1 << 5) /* RX frame end interrupt status */ +#define FTCAN_INTR_TEIS_MASK (0x1 << 6) /* TX frame end interrupt status */ +#define FTCAN_INTR_EIS_MASK (0x1 << 7) /* Error interrupt status */ + +#define FTCAN_INTR_EN_MASK (0xFF << 8) /* the interrupt enable */ +#define FTCAN_INTR_BOIE_MASK (0x1 << 8) /* Bus off interrupt enable */ +#define FTCAN_INTR_PWIE_MASK (0x1 << 9) /* Passive warning interrupt enable */ +#define FTCAN_INTR_PEIE_MASK (0x1 << 10) /* Passive error interrupt enable */ +#define FTCAN_INTR_RFIE_MASK (0x1 << 11) /* RX FIFO full interrupt enable */ +#define FTCAN_INTR_TFIE_MASK (0x1 << 12) /* TX FIFO empty interrupt enable */ +#define FTCAN_INTR_REIE_MASK (0x1 << 13) /* RX frame end interrupt enable */ +#define FTCAN_INTR_TEIE_MASK (0x1 << 14) /* TX frame end interrupt enable */ +#define FTCAN_INTR_EIE_MASK (0x1 << 15) /* Error interrupt enable */ + +#define FTCAN_INTR_BOIC_MASK (0x1 << 16) /* Bus off interrupt clear */ +#define FTCAN_INTR_PWIC_MASK (0x1 << 17) /* Passive warning interrupt clear */ +#define FTCAN_INTR_PEIC_MASK (0x1 << 18) /* Passive error interrupt clear */ +#define FTCAN_INTR_RFIC_MASK (0x1 << 19) /* RX FIFO full interrupt clear */ +#define FTCAN_INTR_TFIC_MASK (0x1 << 20) /* TX FIFO empty interrupt clear */ +#define FTCAN_INTR_REIC_MASK (0x1 << 21) /* RX frame end interrupt clear */ +#define FTCAN_INTR_TEIC_MASK (0x1 << 22) /* TX frame end interrupt clear */ +#define FTCAN_INTR_EIC_MASK (0x1 << 23) /* Error interrupt clear */ + +#define FTCAN_XFER_XFERS_MASK (0x1 << 10) /* Transfer status, 1:idle,0:busy */ +#define FTCAN_XFER_FRAS_MASK (0x7) /* frame status */ +#define FTCAN_XFER_OVERLOAD_FRAM 0x3 + +/* FTCAN_ACC_ID(0-3)_MASK mask */ +#define FTCAN_ACC_IDN_MASK 0x1FFFFFFF + +/* FTCAN_ERR_CNT_OFFSET mask */ +#define FTCAN_ERR_CNT_RFN_MASK (0xFF << 0) /* Receive error counter */ +#define FTCAN_ERR_CNT_TFN_MASK (0xFF << 16) /* Transmit error counter */ + +/* FTCAN_FIFO_CNT_OFFSET mask */ +#define FTCAN_FIFO_CNT_RFN_MASK (0xFF << 0) /* Receive FIFO valid data number */ +#define FTCAN_FIFO_CNT_TFN_MASK (0xFF << 16)/* Transmit FIFO valid data number */ + +#define FTCAN_ERR_CNT_TFN_SHIFT 16 /* Tx Error Count shift */ +#define FTCAN_FIFO_CNT_TFN_SHIFT 16 /* Tx FIFO Count shift */ +#define FTCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */ +#define FTCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */ +#define FTCAN_IDR_SDLC_SHIFT 14 +#define FTCAN_IDR_EDLC_SHIFT 26 + +#define FTCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */ +#define FTCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */ +#define FTCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */ +#define FTCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */ +#define FTCAN_IDR_RTR_MASK 0x00000001 /* Extended frames remote TX request */ +#define FTCAN_IDR_DLC_MASK 0x0003C000 /* Standard msg dlc */ +#define FTCAN_IDR_PAD_MASK 0x00003FFF /* Standard msg padding 1 */ + +#define FTCAN_INTR_EN (FTCAN_INTR_TEIE_MASK | \ + FTCAN_INTR_REIE_MASK | \ + FTCAN_INTR_RFIE_MASK) + +#define FTCAN_INTR_DIS 0x00000000 +#define FTCAN_NAPI_WEIGHT 64 + +#define KFIFO_LEN 4096 +#define CAN_FRAM_MIN_IN_FIFO 4 + +/* struct phytium_can_priv - This definition define CAN driver instance + * @can: CAN private data structure. + * @rx_kfifo: Received frame FIFO + * @can_frame_work: Poll data from kfifo + * @can_clk: Pointer to struct clk + * @tx_head: Tx CAN packets ready to send on the queue + * @tx_tail: Tx CAN packets successfully sended on the queue + * @tx_max: Maximum number packets the driver can send + * @read_reg: For reading data from CAN registers + * @write_reg: For writing data to CAN registers + * @set_reg_bits: For writing data to CAN registers bit + * @clr_reg_bits: For writing 0 to CAN registers bit + * @dev: Device data structure + * @ndev: Network device data structure + * @reg_base: Ioremapped address to registers + * @irq_flags: For request_irq() + * @lock: The spin lock flag + * @isr: The interrupt status + * @can_frame: Store unfinished data frame + * @is_kfifo_full_err: Full flag for kfifo + */ +struct phytium_can_priv { + struct can_priv can; + struct kfifo rx_kfifo; + struct delayed_work can_frame_work; + struct clk *can_clk; + + unsigned int tx_head; + unsigned int tx_tail; + unsigned int tx_max; + + u32 (*read_reg)(const struct phytium_can_priv *priv, enum phytium_can_reg reg); + void (*write_reg)(const struct phytium_can_priv *priv, enum phytium_can_reg reg, u32 val); + void (*set_reg_bits)(const struct phytium_can_priv *priv, enum phytium_can_reg reg, u32 bs); + void (*clr_reg_bits)(const struct phytium_can_priv *priv, enum phytium_can_reg reg, u32 bs); + + struct device *dev; + struct net_device *ndev; + + void __iomem *reg_base; + unsigned long irq_flags; + spinlock_t lock; /* lock for tx */ + + u32 isr; + u32 can_frame[4]; + u32 is_kfifo_full_err; +}; + +void register_phytium_can(struct phytium_can_priv *priv); + +#endif /* PHYTIUM_CAN_H */ diff --git a/drivers/net/can/phytium_can_pci.c b/drivers/net/can/phytium_can_pci.c new file mode 100644 index 000000000000..87c99a701680 --- /dev/null +++ b/drivers/net/can/phytium_can_pci.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* PCI CAN bus driver for Phytium CAN controller + * + * Copyright (C) 2021, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "phytium_can.h" + +#define DRV_NAME "phytium_can_pci" + +#define TX_MAX 64 +#define CLK_FREQ 480000000 + +static const struct can_bittiming_const phytium_ext_bittiming_const = { + .name = "phytium_can_ext", + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 8192, + .brp_inc = 2, +}; + +static int phytium_can_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct net_device *ndev; + struct phytium_can_priv *priv; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pcim_iomap_regions(pdev, 0x1, pci_name(pdev)); + if (ret) + return ret; + + ndev = alloc_candev(sizeof(struct phytium_can_priv), TX_MAX); + if (!ndev) + return -ENOMEM; + + priv = netdev_priv(ndev); + priv->dev = &pdev->dev; + + priv->reg_base = pcim_iomap_table(pdev)[0]; + + priv->tx_head = 0; + priv->tx_tail = 0; + priv->tx_max = TX_MAX; + priv->ndev = ndev; + priv->is_kfifo_full_err = false; + priv->can.bittiming_const = &phytium_ext_bittiming_const; + + ndev->irq = pdev->irq; + ndev->flags |= IFF_ECHO; /* We support local echo */ + priv->irq_flags = IRQF_SHARED; + + spin_lock_init(&priv->lock); + + pci_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, &pdev->dev); + + priv->can.clock.freq = CLK_FREQ; + + register_phytium_can(priv); + + return ret; +} + +static void phytium_can_pci_remove(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct phytium_can_priv *priv = netdev_priv(ndev); + + kfifo_free(&priv->rx_kfifo); + unregister_candev(ndev); + free_candev(ndev); +} + +static const struct pci_device_id phytium_pci_id_table[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc2d) }, + {}, +}; +MODULE_DEVICE_TABLE(pci, phytium_pci_id_table); + +static struct pci_driver phytium_can_pci_driver = { + .name = DRV_NAME, + .id_table = phytium_pci_id_table, + .probe = phytium_can_pci_probe, + .remove = phytium_can_pci_remove, +}; + +module_pci_driver(phytium_can_pci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Phytium can controller driver"); +MODULE_AUTHOR("Cheng Quan "); diff --git a/drivers/net/can/phytium_can_plat.c b/drivers/net/can/phytium_can_plat.c new file mode 100644 index 000000000000..9bf2d533ddf6 --- /dev/null +++ b/drivers/net/can/phytium_can_plat.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Platform CAN bus driver for Phytium CAN controller + * + * Copyright (C) 2018-2021, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "phytium_can.h" + +#define DRV_NAME "phytium_can_plat" + +static const struct can_bittiming_const phytium_bittiming_const = { + .name = "phytium_can", + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 512, + .brp_inc = 2, +}; + +static const struct can_bittiming_const phytium_ext_bittiming_const = { + .name = "phytium_can_ext", + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 8192, + .brp_inc = 2, +}; + +static int phytium_can_probe(struct platform_device *pdev) +{ + struct resource *res; + struct net_device *ndev; + struct phytium_can_priv *priv; + u32 tx_max; + struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev); + int ret; + + ret = fwnode_property_read_u32(fwnode, "tx-fifo-depth", &tx_max); + if (ret < 0) { + dev_err(&pdev->dev, "tx-fifo-depth get error.\n"); + goto err; + } + + ndev = alloc_candev(sizeof(struct phytium_can_priv), tx_max); + if (!ndev) { + ret = -ENOMEM; + goto err; + } + + priv = netdev_priv(ndev); + priv->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->reg_base)) + return PTR_ERR(priv->reg_base); + + priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING; + priv->tx_head = 0; + priv->tx_tail = 0; + priv->tx_max = tx_max; + priv->ndev = ndev; + priv->is_kfifo_full_err = false; + + if (fwnode_property_present(fwnode, "extend_brp")) + priv->can.bittiming_const = &phytium_ext_bittiming_const; + else + priv->can.bittiming_const = &phytium_bittiming_const; + + ndev->irq = platform_get_irq(pdev, 0); + ndev->flags |= IFF_ECHO; /* We support local echo */ + priv->irq_flags = IRQF_SHARED; + + spin_lock_init(&priv->lock); + + platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, &pdev->dev); + + /* Getting the CAN can_clk info */ + if (pdev->dev.of_node) { + priv->can_clk = devm_clk_get(&pdev->dev, "phytium_can_clk"); + if (IS_ERR(priv->can_clk)) { + dev_err(&pdev->dev, "Device clock not found.\n"); + ret = PTR_ERR(priv->can_clk); + goto free; + } + + ret = clk_prepare_enable(priv->can_clk); + if (ret) + goto free; + + priv->can.clock.freq = clk_get_rate(priv->can_clk); + } else if (has_acpi_companion(&pdev->dev)) { + ret = fwnode_property_read_u32(fwnode, "clock-frequency", + &priv->can.clock.freq); + if (ret < 0) { + dev_err(&pdev->dev, "clock frequency get error.\n"); + goto free; + } + } + + register_phytium_can(priv); + + return 0; + +free: + free_candev(ndev); +err: + return ret; +} + +static int phytium_can_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct phytium_can_priv *priv = netdev_priv(ndev); + + kfifo_free(&priv->rx_kfifo); + unregister_candev(ndev); + free_candev(ndev); + + return 0; +} + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_can_acpi_match[] = { + { "PHYT000A", 0 }, + {} +}; +MODULE_DEVICE_TABLE(acpi, phytium_can_acpi_match); +#endif + +/* Match table for OF platform binding */ +static const struct of_device_id phytium_can_of_ids[] = { + { .compatible = "phytium,can", }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(of, phytium_can_of_ids); + +static struct platform_driver phytium_can_driver = { + .probe = phytium_can_probe, + .remove = phytium_can_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = phytium_can_of_ids, + .acpi_match_table = ACPI_PTR(phytium_can_acpi_match), + }, +}; + +module_platform_driver(phytium_can_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Platform CAN bus driver for Phytium CAN Controller"); diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 7a92f640c379..b7dfd4109d24 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -823,6 +823,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne usb_unanchor_urb(urb); usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); + dev_kfree_skb(skb); atomic_dec(&dev->active_tx_urbs); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c index fad503820e04..8241d670f6a1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c @@ -9,6 +9,7 @@ * warranty of any kind, whether express or implied. */ +#include #include #include #include @@ -32,6 +33,12 @@ static int dwmac_generic_probe(struct platform_device *pdev) dev_err(&pdev->dev, "dt configuration failed\n"); return PTR_ERR(plat_dat); } + } else if (has_acpi_companion(&pdev->dev)) { + plat_dat = stmmac_probe_config_acpi(pdev, &stmmac_res.mac); + if (!plat_dat) { + dev_err(&pdev->dev, "acpi configuration failed\n"); + return -EINVAL; + } } else { plat_dat = dev_get_platdata(&pdev->dev); if (!plat_dat) { @@ -84,6 +91,17 @@ static const struct of_device_id dwmac_generic_match[] = { }; MODULE_DEVICE_TABLE(of, dwmac_generic_match); +#ifdef CONFIG_ACPI +static const struct acpi_device_id dwmac_acpi_ids[] = { + { .id = "PHYT0004" }, + {}, +}; + +MODULE_DEVICE_TABLE(acpi, dwmac_acpi_ids); +#else +#define dwmac_acpi_ids NULL +#endif + static struct platform_driver dwmac_generic_driver = { .probe = dwmac_generic_probe, .remove = stmmac_pltfr_remove, @@ -91,6 +109,7 @@ static struct platform_driver dwmac_generic_driver = { .name = STMMAC_RESOURCE_NAME, .pm = &stmmac_pltfr_pm_ops, .of_match_table = of_match_ptr(dwmac_generic_match), + .acpi_match_table = ACPI_PTR(dwmac_acpi_ids), }, }; module_platform_driver(dwmac_generic_driver); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 2872684906e1..f40477a2228b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -24,6 +24,7 @@ https://bugzilla.stlinux.com/ *******************************************************************************/ +#include #include #include #include diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 2b800ce1d5bf..a4485ea0d2b1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -18,6 +18,9 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#include +#include +#include #include #include #include @@ -607,6 +610,248 @@ void stmmac_remove_config_dt(struct platform_device *pdev, EXPORT_SYMBOL_GPL(stmmac_probe_config_dt); EXPORT_SYMBOL_GPL(stmmac_remove_config_dt); +#ifdef CONFIG_ACPI +/* + * Parse ACPI _DSD to setup AXI register + */ +static struct stmmac_axi * stmmac_axi_setup_acpi(struct platform_device *pdev) +{ + struct fwnode_handle *np = dev_fwnode(&(pdev->dev)); + struct stmmac_axi * axi; + + axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL); + if (!axi) + return ERR_PTR(-ENOMEM); + + axi->axi_lpi_en = fwnode_property_read_bool(np, "snps,lpi_en"); + axi->axi_xit_frm = fwnode_property_read_bool(np, "snps,xit_frm"); + axi->axi_kbbe = fwnode_property_read_bool(np, "snps,axi_kbbe"); + axi->axi_fb = fwnode_property_read_bool(np, "snps,axi_fb"); + axi->axi_mb = fwnode_property_read_bool(np, "snps,axi_mb"); + axi->axi_rb = fwnode_property_read_bool(np, "snps,axi_rb"); + + if (fwnode_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt)) + axi->axi_wr_osr_lmt = 1; + if (fwnode_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt)) + axi->axi_rd_osr_lmt = 1; + fwnode_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN); + + return axi; +} + +/** + * Parse ACPI _DSD parameters for multiple queues configuration + */ +static void stmmac_mtl_setup_acpi(struct platform_device *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->rx_queues_to_use = 1; + plat->tx_queues_to_use = 1; + + /** + * First Queue must always be in DCB mode. As MTL_QUEUE_DCB=1 we need + * to always set this, otherwise Queue will be classified as AVB + * (because MTL_QUEUE_AVB = 0). + */ + plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; + plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; + + plat->rx_queues_cfg[0].use_prio = true; + + plat->rx_queues_cfg[0].pkt_route = 0x0; + + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; + + plat->tx_queues_cfg[0].use_prio = true; +} + +static int stmmac_acpi_phy(struct plat_stmmacenet_data *plat, + struct fwnode_handle *np, struct device *dev) +{ + plat->mdio_bus_data = devm_kzalloc(dev, + sizeof(struct stmmac_mdio_bus_data), + GFP_KERNEL); + + return 0; +} + +int fw_get_phy_mode(struct fwnode_handle *np) +{ + const char *pm; + int err, i; + + err = fwnode_property_read_string(np, "phy-mode", &pm); + if (err < 0) + err = fwnode_property_read_string(np, "phy-connection-mode", &pm); + if (err < 0) + return err; + + for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) { + if (!strcasecmp(pm, phy_modes(i))) + return i; + } + + return -ENODEV; +} + +int stmmac_acpi_clock_setup(struct plat_stmmacenet_data *plat, + struct platform_device *pdev) +{ + struct fwnode_handle *np = dev_fwnode(&(pdev->dev)); + struct device * dev = &pdev->dev; + struct clk *clk = ERR_PTR(-ENODEV); + u64 clk_freq = 0; + int err; + + err = fwnode_property_read_u64(np, "clock-frequency", &clk_freq); + if (err < 0) + clk_freq = 125000000; /* default to 125MHz */ + + plat->stmmac_clk = devm_clk_get(dev, dev_name(dev)); + if (IS_ERR(plat->stmmac_clk)) { + clk = clk_register_fixed_rate(dev, dev_name(dev), NULL, 0, clk_freq); + if (IS_ERR(clk)) + return -1; + if (clk_register_clkdev(clk, dev_name(dev), dev_name(dev))) + return -1; + plat->stmmac_clk = clk; + } + clk_prepare_enable(plat->stmmac_clk); + + plat->pclk = devm_clk_get(dev, "pclk"); + if (IS_ERR(plat->pclk)) + plat->pclk = NULL; + clk_prepare_enable(plat->pclk); + + plat->clk_ptp_ref = devm_clk_get(dev, "ptp_ref"); + if (IS_ERR(plat->clk_ptp_ref)) { + plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); + plat->clk_ptp_ref = NULL; + } + + plat->stmmac_rst = devm_reset_control_get(dev,STMMAC_RESOURCE_NAME); + if (IS_ERR(plat->stmmac_rst)) { + dev_info(dev, "no reset control found\n"); + plat->stmmac_rst = NULL; + } + + return 0; +} + +/** + * Parse ACPI driver parameters + */ +struct plat_stmmacenet_data * +stmmac_probe_config_acpi(struct platform_device *pdev, const char **mac) +{ + struct fwnode_handle *np; + struct plat_stmmacenet_data *plat; + struct stmmac_dma_cfg *dma_cfg; + + plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return ERR_PTR(-ENOMEM); + + np = dev_fwnode(&(pdev->dev)); + + plat->interface = fw_get_phy_mode(np); + + /* Get max speed of operation from device tree */ + if (fwnode_property_read_u32(np, "max-speed", &plat->max_speed)) + plat->max_speed = -1; + + if (fwnode_property_read_u32(np, "bus_id", &plat->bus_id)) + plat->bus_id = 2; + + /* Default to PHY auto-detection */ + plat->phy_addr = -1; + + /* "snps,phy-addr" is not a standard property. Mark it as deprecated + * and warn of its use. Remove this when PHY node support is added. + */ + if (fwnode_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) + dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); + + if (stmmac_acpi_phy(plat, np, &pdev->dev)) + return ERR_PTR(-ENODEV); + + fwnode_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); + fwnode_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size); + if (plat->tx_fifo_size == 0) + plat->tx_fifo_size = 0x10000; + if (plat->rx_fifo_size == 0) + plat->rx_fifo_size = 0x10000; + + plat->force_sf_dma_mode = + fwnode_property_read_bool(np, "snps,force_sf_dma_mode"); + plat->en_tx_lpi_clockgating = + fwnode_property_read_bool(np, "snps,en-tx-lpi-clockgating"); + + /* Set the maxmtu to a default of JUMBO_LEN in case the + * parameter is not present. + */ + plat->maxmtu = JUMBO_LEN; + + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; + + /* Only to "snps,dwmac" */ + fwnode_property_read_u32(np, "max-frame-size", &plat->maxmtu); + fwnode_property_read_u32(np, "snps,multicast-filter-bins", + &plat->multicast_filter_bins); + fwnode_property_read_u32(np, "snps,perfect-filter-entries", + &plat->unicast_filter_entries); + plat->unicast_filter_entries = dwmac1000_validate_ucast_entries( + plat->unicast_filter_entries); + plat->multicast_filter_bins = dwmac1000_validate_mcast_bins( + plat->multicast_filter_bins); + plat->has_gmac = 1; + plat->pmt = 1; + + dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); + if (!dma_cfg) + return ERR_PTR(-ENOMEM); + plat->dma_cfg = dma_cfg; + + fwnode_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); + if (!dma_cfg->pbl) + dma_cfg->pbl = DEFAULT_DMA_PBL; + + fwnode_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl); + fwnode_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl); + dma_cfg->pblx8 = !fwnode_property_read_bool(np, "snps,no-pbl-x8"); + + dma_cfg->aal = fwnode_property_read_bool(np, "snps,aal"); + dma_cfg->fixed_burst = fwnode_property_read_bool(np, "snps,fixed-burst"); + dma_cfg->mixed_burst = fwnode_property_read_bool(np, "snps,mixed-burst"); + + plat->force_thresh_dma_mode = fwnode_property_read_bool(np, "snps,force_thresh_dma_mode"); + if (plat->force_thresh_dma_mode) + plat->force_sf_dma_mode = 0; + + fwnode_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed); + + plat->axi = stmmac_axi_setup_acpi(pdev); + + stmmac_mtl_setup_acpi(pdev, plat); + + stmmac_acpi_clock_setup(plat,pdev); + + return plat; +} +#else +struct plat_stmmacenet_data * +stmmac_probe_config_acpi(struct platform_device *pdev, const char **mac) +{ + return ERR_PTR(-EINVAL); +} +#endif /* CONFIG_ACPI */ +EXPORT_SYMBOL_GPL(stmmac_probe_config_acpi); + int stmmac_get_platform_resources(struct platform_device *pdev, struct stmmac_resources *stmmac_res) { @@ -617,33 +862,43 @@ int stmmac_get_platform_resources(struct platform_device *pdev, /* Get IRQ information early to have an ability to ask for deferred * probe if needed before we went too far with resource allocation. */ - stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); - if (stmmac_res->irq < 0) { - if (stmmac_res->irq != -EPROBE_DEFER) { - dev_err(&pdev->dev, - "MAC IRQ configuration information not found\n"); + if (pdev->dev.of_node) { + stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); + if (stmmac_res->irq < 0) { + if (stmmac_res->irq != -EPROBE_DEFER) { + dev_err(&pdev->dev, + "MAC IRQ configuration information not found\n"); + } + return stmmac_res->irq; } - return stmmac_res->irq; - } - /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq - * The external wake up irq can be passed through the platform code - * named as "eth_wake_irq" - * - * In case the wake up interrupt is not passed from the platform - * so the driver will continue to use the mac irq (ndev->irq) - */ - stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); - if (stmmac_res->wol_irq < 0) { - if (stmmac_res->wol_irq == -EPROBE_DEFER) + /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq + * The external wake up irq can be passed through the platform code + * named as "eth_wake_irq" + * + * In case the wake up interrupt is not passed from the platform + * so the driver will continue to use the mac irq (ndev->irq) + */ + stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); + if (stmmac_res->wol_irq < 0) { + if (stmmac_res->wol_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + stmmac_res->wol_irq = stmmac_res->irq; + } + + stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); + if (stmmac_res->lpi_irq == -EPROBE_DEFER) return -EPROBE_DEFER; + } else if (has_acpi_companion(&pdev->dev)) { + stmmac_res->irq = platform_get_irq(pdev, 0); + if (stmmac_res->irq < 0) + dev_err(&pdev->dev, + "MAC IRQ configuration information not found\n"); + stmmac_res->wol_irq = stmmac_res->irq; + stmmac_res->lpi_irq = -1; } - stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); - if (stmmac_res->lpi_irq == -EPROBE_DEFER) - return -EPROBE_DEFER; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h index b72eb0de57b7..8e117ad0e42a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -23,6 +23,8 @@ struct plat_stmmacenet_data * stmmac_probe_config_dt(struct platform_device *pdev, const char **mac); +struct plat_stmmacenet_data * +stmmac_probe_config_acpi(struct platform_device *pdev, const char **mac); void stmmac_remove_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat); diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 68636b6b2c7a..8c636c493227 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -674,14 +674,14 @@ static void sixpack_close(struct tty_struct *tty) */ netif_stop_queue(sp->dev); - unregister_netdev(sp->dev); - del_timer_sync(&sp->tx_t); del_timer_sync(&sp->resync_t); /* Free all 6pack frame buffers. */ kfree(sp->rbuff); kfree(sp->xbuff); + + unregister_netdev(sp->dev); } /* Perform I/O control on an active 6pack channel. */ @@ -859,12 +859,6 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte) return; } - if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) { - pr_err("6pack: cooked buffer overrun, data loss\n"); - sp->rx_count = 0; - return; - } - buf = sp->raw_buf; sp->cooked_buf[sp->rx_count_cooked++] = buf[0] | ((buf[1] << 2) & 0xc0); diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index 83640628c47d..6ac232e52bf7 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -410,7 +410,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) /* ignore the CRC length */ len = (skb->data[1] | (skb->data[2] << 8)) - 4; - if (len > ETH_FRAME_LEN || len > skb->len) + if (len > ETH_FRAME_LEN) return 0; /* the last packet of current skb */ diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 028b287466fb..a1a024fe78e3 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -278,5 +278,15 @@ config VMD To compile this driver as a module, choose M here: the module will be called vmd. +config PCIE_PHYTIUM_EP + tristate "Phytium PCIe endpoint controller" + depends on OF + depends on PCI_ENDPOINT + help + Say Y here if you want to support Phytium PCIe controller in + endpoint mode on Phytium SoC. The controller can act as Root Port + or End Point with different phytium firmware. But End Point mode only support + one physical function. + source "drivers/pci/controller/dwc/Kconfig" endmenu diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index d56a507495c5..45fbf1ff3354 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -29,6 +29,8 @@ obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o obj-$(CONFIG_VMD) += vmd.o +obj-$(CONFIG_PCIE_PHYTIUM_EP) += pcie-phytium-ep.o + # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW obj-y += dwc/ diff --git a/drivers/pci/controller/pcie-phytium-ep.c b/drivers/pci/controller/pcie-phytium-ep.c new file mode 100644 index 000000000000..2c5989b9c544 --- /dev/null +++ b/drivers/pci/controller/pcie-phytium-ep.c @@ -0,0 +1,480 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium d2000 pcie endpoint driver + * + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-phytium-ep.h" +#include "pcie-phytium-register.h" + +#define PHYTIUM_PCIE_EP_IRQ_PCI_ADDR_NONE 0x0 +#define PHYTIUM_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x1 + +static int phytium_pcie_ep_write_header(struct pci_epc *epc, unsigned char fn, + struct pci_epf_header *hdr) +{ + struct phytium_pcie_ep *priv = epc_get_drvdata(epc); + u16 tmp = 0; + + phytium_pcie_writew(priv, fn, PHYTIUM_PCI_VENDOR_ID, hdr->vendorid); + phytium_pcie_writew(priv, fn, PHYTIUM_PCI_DEVICE_ID, hdr->deviceid); + phytium_pcie_writeb(priv, fn, PHYTIUM_PCI_REVISION_ID, hdr->revid); + phytium_pcie_writeb(priv, fn, PHYTIUM_PCI_CLASS_PROG, hdr->progif_code); + phytium_pcie_writew(priv, fn, PHYTIUM_PCI_CLASS_DEVICE, + hdr->subclass_code | (hdr->baseclass_code << 8)); + + phytium_pcie_writew(priv, fn, PHYTIUM_PCI_SUBSYS_VENDOR_ID, + hdr->subsys_vendor_id); + phytium_pcie_writew(priv, fn, PHYTIUM_PCI_SUBSYS_DEVICE_ID, + hdr->subsys_id); + + tmp = phytium_pcie_readw(priv, fn, PHYTIUM_PCI_INTERRUPT_PIN); + tmp = ((tmp & (~INTERRUPT_PIN_MASK)) | hdr->interrupt_pin); + phytium_pcie_writew(priv, fn, PHYTIUM_PCI_INTERRUPT_PIN, tmp); + + tmp = phytium_pcie_readw(priv, fn, PHYTIUM_PCI_MSIX_CAP); + phytium_pcie_writew(priv, fn, PHYTIUM_PCI_MSIX_CAP, MSIX_DISABLE); + + return 0; +} + +static int phytium_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, + struct pci_epf_bar *epf_bar) +{ + struct phytium_pcie_ep *priv = epc_get_drvdata(epc); + u64 sz = 0, sz_mask, atr_size; + int flags = epf_bar->flags; + u32 setting, src_addr0, src_addr1, trsl_addr0, trsl_addr1, trsl_param; + enum pci_barno barno = epf_bar->barno; + struct pci_epc_mem *mem = epc->mem; + + if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (barno & 1)) { + dev_err(&epc->dev, "bar %d do not support mem64\n", barno); + return -EINVAL; + } + + if (barno & 1) { + dev_err(&epc->dev, "not support bar 1/3/5\n"); + return -EINVAL; + } + dev_dbg(epc->dev.parent, "set bar%d mapping address 0x%pa size 0x%lx\n", + barno, &(epf_bar->phys_addr), epf_bar->size); + + if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { + setting = BAR_IO_TYPE; + sz = max_t(size_t, epf_bar->size, BAR_IO_MIN_APERTURE); + sz = 1 << fls64(sz - 1); + sz_mask = ~(sz - 1); + setting |= sz_mask; + trsl_param = TRSL_ID_IO; + } else { + setting = BAR_MEM_TYPE; + sz = max_t(size_t, epf_bar->size, BAR_MEM_MIN_APERTURE); + sz = 1 << fls64(sz - 1); + sz_mask = ~(sz - 1); + setting |= lower_32_bits(sz_mask); + + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) + setting |= BAR_MEM_64BIT; + + if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) + setting |= BAR_MEM_PREFETCHABLE; + + trsl_param = TRSL_ID_MASTER; + } + + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_BAR(barno), setting); + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_BAR(barno + 1), + upper_32_bits(sz_mask)); + dev_dbg(epc->dev.parent, "set bar%d mapping address 0x%pa size 0x%llx 0x%x\n", + barno, &(epf_bar->phys_addr), sz, lower_32_bits(epf_bar->phys_addr)); + sz = ALIGN(sz, mem->page_size); + atr_size = fls64(sz - 1) - 1; + src_addr0 = ATR_IMPL | ((atr_size & ATR_SIZE_MASK) << ATR_SIZE_SHIFT); + src_addr1 = 0; + trsl_addr0 = (lower_32_bits(epf_bar->phys_addr) & TRSL_ADDR_32_12_MASK); + trsl_addr1 = upper_32_bits(epf_bar->phys_addr); + + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_SRC_ADDR0(barno), + src_addr0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_SRC_ADDR1(barno), + src_addr1); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_ADDR0(barno), + trsl_addr0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_ADDR1(barno), + trsl_addr1); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_PARAM(barno), + trsl_param); + + return 0; +} + +static void phytium_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, + struct pci_epf_bar *epf_bar) +{ + struct phytium_pcie_ep *priv = epc_get_drvdata(epc); + int flags = epf_bar->flags; + enum pci_barno barno = epf_bar->barno; + + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_BAR(barno), 0); + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_BAR(barno + 1), 0); + + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_SRC_ADDR0(barno), 0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_SRC_ADDR1(barno), 0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_ADDR0(barno), 0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_ADDR1(barno), 0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_PARAM(barno), 0); +} + +static int phytium_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, + phys_addr_t addr, u64 pci_addr, + size_t size) +{ + struct phytium_pcie_ep *priv = epc_get_drvdata(epc); + u32 src_addr0, src_addr1, trsl_addr0, trsl_addr1, trsl_param, atr_size; + u64 sz = 0; + u32 r; + struct pci_epc_mem *mem = epc->mem; + + r = find_first_zero_bit(&priv->ob_region_map, + sizeof(priv->ob_region_map) * BITS_PER_LONG); + if (r >= priv->max_regions) { + dev_err(&epc->dev, "no free outbound region\n"); + return -EINVAL; + } + + dev_dbg(epc->dev.parent, "set slave %d: mapping address 0x%pa to pci 0x%llx, size 0x%zx\n", + r, &addr, pci_addr, size); + + sz = ALIGN(size, mem->page_size); + atr_size = fls64(sz - 1) - 1; + src_addr0 = ATR_IMPL | ((atr_size & ATR_SIZE_MASK) << ATR_SIZE_SHIFT); + src_addr0 |= (lower_32_bits(addr) & SRC_ADDR_32_12_MASK); + src_addr1 = upper_32_bits(addr); + trsl_addr0 = (lower_32_bits(pci_addr) & TRSL_ADDR_32_12_MASK); + trsl_addr1 = upper_32_bits(pci_addr); + trsl_param = TRSL_ID_PCIE_TR; + + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR0(r), + src_addr0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR1(r), + src_addr1); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR0(r), + trsl_addr0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR1(r), + trsl_addr1); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_PARAM(r), + trsl_param); + set_bit(r, &priv->ob_region_map); + priv->ob_addr[r] = addr; + + return 0; +} + +static void phytium_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, + phys_addr_t addr) +{ + struct phytium_pcie_ep *priv = epc_get_drvdata(epc); + u32 r; + + for (r = 0; r < priv->max_regions; r++) + if (priv->ob_addr[r] == addr) + break; + + if (r == priv->max_regions) { + dev_err(&epc->dev, "used unmap addr 0x%pa\n", &addr); + return; + } + dev_dbg(epc->dev.parent, "set slave %d: unmapping address 0x%pa\n", r, &addr); + + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR0(r), 0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR1(r), 0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR0(r), 0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR1(r), 0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_PARAM(r), 0); + priv->ob_addr[r] = 0; + clear_bit(r, &priv->ob_region_map); +} + +static int phytium_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc) +{ + struct phytium_pcie_ep *priv = epc_get_drvdata(epc); + u16 flags = 0; + + flags = (mmc & MSI_NUM_MASK) << MSI_NUM_SHIFT; + flags &= ~MSI_MASK_SUPPORT; + phytium_pcie_writew(priv, fn, PHYTIUM_PCI_INTERRUPT_PIN, flags); + + return 0; +} + +static int phytium_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) +{ + struct phytium_pcie_ep *priv = epc_get_drvdata(epc); + u16 flags, mme; + u32 cap = PHYTIUM_PCI_CF_MSI_BASE; + + flags = phytium_pcie_readw(priv, fn, cap + PCI_MSI_FLAGS); + if (!(flags & PCI_MSI_FLAGS_ENABLE)) + return -EINVAL; + + mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; + + return mme; +} + +static int phytium_pcie_ep_send_msi_irq(struct phytium_pcie_ep *priv, u8 fn, + u8 interrupt_num) +{ + u32 cap = PHYTIUM_PCI_CF_MSI_BASE; + u16 flags, mme, data_mask, data; + u8 msi_count; + u64 pci_addr, pci_addr_mask = IRQ_MAPPING_SIZE - 1; + u32 src_addr0, src_addr1, trsl_addr0, trsl_addr1, trsl_param, atr_size; + + flags = phytium_pcie_readw(priv, fn, cap + PCI_MSI_FLAGS); + if (!(flags & PCI_MSI_FLAGS_ENABLE)) + return -EINVAL; + + mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; + msi_count = 1 << mme; + if (!interrupt_num || interrupt_num > msi_count) + return -EINVAL; + + data_mask = msi_count - 1; + data = phytium_pcie_readw(priv, fn, cap + PCI_MSI_DATA_64); + data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); + + /* Get the PCI address */ + pci_addr = phytium_pcie_readl(priv, fn, cap + PCI_MSI_ADDRESS_HI); + pci_addr <<= 32; + pci_addr |= phytium_pcie_readl(priv, fn, cap + PCI_MSI_ADDRESS_LO); + pci_addr &= GENMASK_ULL(63, 2); + + if (priv->irq_pci_addr != (pci_addr & ~pci_addr_mask) || (priv->irq_pci_fn != fn)) { + /* First region for IRQ writes. */ + atr_size = fls64(pci_addr_mask) - 1; + src_addr0 = ATR_IMPL | ((atr_size & ATR_SIZE_MASK) << ATR_SIZE_SHIFT); + src_addr0 |= (lower_32_bits(priv->irq_phys_addr) & SRC_ADDR_32_12_MASK); + src_addr1 = upper_32_bits(priv->irq_phys_addr); + trsl_addr0 = (lower_32_bits(pci_addr) & TRSL_ADDR_32_12_MASK); + trsl_addr1 = upper_32_bits(pci_addr); + trsl_param = TRSL_ID_PCIE_TR; + + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR0(0), + src_addr0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR1(0), + src_addr1); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR0(0), + trsl_addr0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR1(0), + trsl_addr1); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_PARAM(0), + trsl_param); + priv->irq_pci_addr = (pci_addr & ~pci_addr_mask); + priv->irq_pci_fn = fn; + } + + dev_dbg(priv->epc->dev.parent, "send event %d\n", data); + writew(data, priv->irq_cpu_addr + (pci_addr & pci_addr_mask)); + + return 0; +} + +static int phytium_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, + enum pci_epc_irq_type type, + u16 interrupt_num) +{ + struct phytium_pcie_ep *priv = epc_get_drvdata(epc); + + switch (type) { + case PCI_EPC_IRQ_MSI: + return phytium_pcie_ep_send_msi_irq(priv, fn, interrupt_num); + + default: + break; + } + + return -EINVAL; +} + +static int phytium_pcie_ep_start(struct pci_epc *epc) +{ + struct pci_epf *epf; + u32 cfg; + + cfg = BIT(0); + list_for_each_entry(epf, &epc->pci_epf, list) + cfg |= BIT(epf->func_no); + + list_for_each_entry(epf, &epc->pci_epf, list) + pci_epf_linkup(epf); + + return 0; +} + +static const struct pci_epc_ops phytium_pcie_epc_ops = { + .write_header = phytium_pcie_ep_write_header, + .set_bar = phytium_pcie_ep_set_bar, + .clear_bar = phytium_pcie_ep_clear_bar, + .map_addr = phytium_pcie_ep_map_addr, + .unmap_addr = phytium_pcie_ep_unmap_addr, + .set_msi = phytium_pcie_ep_set_msi, + .get_msi = phytium_pcie_ep_get_msi, + .raise_irq = phytium_pcie_ep_raise_irq, + .start = phytium_pcie_ep_start, +}; + + + +static int phytium_pcie_ep_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct phytium_pcie_ep *priv = NULL; + struct resource *res; + struct device_node *np = dev->of_node; + struct pci_epc *epc; + int ret = 0, value; + + dev_dbg(dev, "enter %s\n", __func__); + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); + priv->reg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->reg_base)) { + dev_err(dev, "missing \"reg\"\n"); + return PTR_ERR(priv->reg_base); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); + if (!res) { + dev_err(dev, "missing \"mem\"\n"); + return -EINVAL; + } + priv->mem_res = res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hpb"); + priv->hpb_base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->hpb_base)) { + dev_err(dev, "missing \"hpb\"\n"); + return PTR_ERR(priv->hpb_base); + } + + ret = of_property_read_u32(np, "max-outbound-regions", &priv->max_regions); + if (ret < 0) { + dev_err(dev, "missing \"max-outbound-regions\"\n"); + return ret; + } + dev_info(dev, "%s max-outbound-regions %d\n", __func__, priv->max_regions); + + priv->ob_addr = devm_kcalloc(dev, priv->max_regions, + sizeof(*priv->ob_addr), GFP_KERNEL); + if (!priv->ob_addr) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + epc = devm_pci_epc_create(dev, &phytium_pcie_epc_ops); + if (IS_ERR(epc)) { + dev_err(dev, "failed to create epc device\n"); + return PTR_ERR(epc); + } + + priv->epc = epc; + epc_set_drvdata(epc, priv); + + if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) + epc->max_functions = 1; + dev_info(dev, "%s epc->max_functions %d\n", __func__, epc->max_functions); + + + ret = pci_epc_mem_init(epc, priv->mem_res->start, + resource_size(priv->mem_res)); + if (ret < 0) { + dev_err(dev, "failed to initialize the memory space\n"); + return ret; + } + + priv->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &priv->irq_phys_addr, + SZ_4K); + if (!priv->irq_cpu_addr) { + dev_err(dev, "failed to reserve memory space for MSI\n"); + ret = -ENOMEM; + goto err_alloc_irq_mem; + } + priv->irq_pci_addr = PHYTIUM_PCIE_EP_IRQ_PCI_ADDR_NONE; + /* Reserve region 0 for IRQS */ + set_bit(0, &priv->ob_region_map); + + value = ((lower_32_bits(priv->mem_res->start) >> C0_PREF_VALUE_SHIFT) + & C0_PREF_BASE_MASK) << C0_PREF_BASE_SHIFT; + value |= (((lower_32_bits(priv->mem_res->end) >> C0_PREF_VALUE_SHIFT) + & C0_PREF_LIMIT_MASK) << C0_PREF_LIMIT_SHIFT); + phytium_hpb_writel(priv, PHYTIUM_HPB_C0_PREF_BASE_LIMIT, value); + + value = ((upper_32_bits(priv->mem_res->start) >> C0_PREF_UP32_VALUE_SHIFT) + & C0_PREF_BASE_UP32_MASK) << C0_PREF_BASE_UP32_SHIFT; + value |= (((upper_32_bits(priv->mem_res->end) >> C0_PREF_UP32_VALUE_SHIFT) + & C0_PREF_LIMIT_UP32_MASK) << C0_PREF_LIMIT_UP32_SHIFT); + phytium_hpb_writel(priv, PHYTIUM_HPB_C0_PREF_BASE_LIMIT_UP32, value); + + dev_dbg(dev, "exit %s successful\n", __func__); + return 0; + +err_alloc_irq_mem: + pci_epc_mem_exit(epc); + return ret; +} + +static int phytium_pcie_ep_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct phytium_pcie_ep *priv = dev_get_drvdata(dev); + struct pci_epc *epc = priv->epc; + + pci_epc_mem_exit(epc); + + return 0; +} + +static const struct of_device_id phytium_pcie_ep_of_match[] = { + { .compatible = "phytium,d2000-pcie-ep" }, + { }, +}; + +static struct platform_driver phytium_pcie_ep_driver = { + .driver = { + .name = "phytium-pcie-ep", + .of_match_table = phytium_pcie_ep_of_match, + }, + .probe = phytium_pcie_ep_probe, + .remove = phytium_pcie_ep_remove, +}; + +module_platform_driver(phytium_pcie_ep_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Yang Xun "); +MODULE_DESCRIPTION("Phytium Pcie Controller Endpoint driver"); diff --git a/drivers/pci/controller/pcie-phytium-ep.h b/drivers/pci/controller/pcie-phytium-ep.h new file mode 100644 index 000000000000..27d39a222f3c --- /dev/null +++ b/drivers/pci/controller/pcie-phytium-ep.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium d2000 pcie endpoint driver + * + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __PCIE_PHYTIUM_EP_H__ +#define __PCIE_PHYTIUM_EP_H__ + +#include "pcie-phytium-register.h" + +#define IRQ_MAPPING_SIZE 0x1000 +struct phytium_pcie_ep { + void __iomem *reg_base; + struct resource *mem_res; + void __iomem *hpb_base; + unsigned int max_regions; + unsigned long ob_region_map; + phys_addr_t *ob_addr; + phys_addr_t irq_phys_addr; + void __iomem *irq_cpu_addr; + unsigned long irq_pci_addr; + u8 irq_pci_fn; + struct pci_epc *epc; +}; + +static inline void +phytium_pcie_writeb(struct phytium_pcie_ep *priv, u8 fn, u32 reg, u8 value) +{ + pr_debug("Write 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); + writeb(value, priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); +} + +static inline unsigned char +phytium_pcie_readb(struct phytium_pcie_ep *priv, u8 fn, u32 reg) +{ + unsigned char value; + + value = readb(priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); + pr_debug("Read 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); + + return value; +} + +static inline void +phytium_pcie_writew(struct phytium_pcie_ep *priv, u8 fn, u32 reg, u16 value) +{ + pr_debug("Write 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); + writew(value, priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); +} + +static inline unsigned short +phytium_pcie_readw(struct phytium_pcie_ep *priv, u8 fn, u32 reg) +{ + unsigned short value; + + value = readw(priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); + pr_debug("Read 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); + + return value; +} + +static inline void +phytium_pcie_writel(struct phytium_pcie_ep *priv, u8 fn, u32 reg, u32 value) +{ + pr_debug("Write 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); + writel(value, priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); +} + +static inline unsigned int +phytium_pcie_readl(struct phytium_pcie_ep *priv, u8 fn, u32 reg) +{ + unsigned int value; + + value = readl(priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); + pr_debug("Read 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); + + return value; +} + +static inline void +phytium_hpb_writel(struct phytium_pcie_ep *priv, u32 reg, u32 value) +{ + pr_debug("Write 32'h%08x 32'h%08x\n", reg, value); + writel(value, priv->hpb_base + reg); +} +#endif diff --git a/drivers/pci/controller/pcie-phytium-register.h b/drivers/pci/controller/pcie-phytium-register.h new file mode 100644 index 000000000000..7b90d416dd4b --- /dev/null +++ b/drivers/pci/controller/pcie-phytium-register.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium d2000 pcie endpoint driver + * + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __PCIE_PHYTIUM_REGISTER_H__ +#define __PCIE_PHYTIUM_REGISTER_H__ + +#define PHYTIUM_PCIE_FUNC_BASE(fn) (((fn) << 14) & GENMASK(16, 14)) +#define PHYTIUM_PCI_VENDOR_ID 0x98 +#define PHYTIUM_PCI_DEVICE_ID 0x9a +#define PHYTIUM_PCI_REVISION_ID 0x9c +#define PHYTIUM_PCI_CLASS_PROG 0x9d +#define PHYTIUM_PCI_CLASS_DEVICE 0x9e +#define PHYTIUM_PCI_SUBSYS_VENDOR_ID 0xa0 +#define PHYTIUM_PCI_SUBSYS_DEVICE_ID 0xa2 +#define PHYTIUM_PCI_INTERRUPT_PIN 0xa8 +#define INTERRUPT_PIN_MASK 0x7 +#define MSI_DISABLE (1 << 3) +#define MSI_NUM_MASK (0x7) +#define MSI_NUM_SHIFT 4 +#define MSI_MASK_SUPPORT (1 << 7) +#define PHYTIUM_PCI_MSIX_CAP 0xaa + #define MSIX_DISABLE (0 << 15) + +#define PHYTIUM_PCI_BAR_0 0xe4 +#define PHYTIUM_PCI_BAR(bar_num) (0xe4 + bar_num * 4) +#define BAR_IO_TYPE (1 << 0) +#define BAR_MEM_TYPE (0 << 0) +#define BAR_MEM_64BIT (1 << 2) +#define BAR_MEM_PREFETCHABLE (1 << 3) +#define BAR_IO_MIN_APERTURE 4 +#define BAR_MEM_MIN_APERTURE 16 + + +#define PHYTIUM_PCI_WIN0_BASE 0x600 +#define PHYTIUM_PCI_WIN0_SRC_ADDR0(table) (PHYTIUM_PCI_WIN0_BASE + 0X20 * table + 0x0) +#define ATR_IMPL 0x1 +#define ATR_SIZE_MASK 0x3f +#define ATR_SIZE_SHIFT 1 +#define ATR_SIZE_ALIGN 0x1000 +#define SRC_ADDR_32_12_MASK 0xfffff000 + +#define PHYTIUM_PCI_WIN0_SRC_ADDR1(table) (PHYTIUM_PCI_WIN0_BASE + 0X20 * table + 0x4) +#define PHYTIUM_PCI_WIN0_TRSL_ADDR0(table) (PHYTIUM_PCI_WIN0_BASE + 0X20 * table + 0x8) +#define TRSL_ADDR_32_12_MASK 0xfffff000 + +#define PHYTIUM_PCI_WIN0_TRSL_ADDR1(table) (PHYTIUM_PCI_WIN0_BASE + 0X20 * table + 0xc) +#define PHYTIUM_PCI_WIN0_TRSL_PARAM(table) (PHYTIUM_PCI_WIN0_BASE + 0X20 * table + 0x10) +#define TRSL_ID_IO 0x1 +#define TRSL_ID_MASTER 0x4 +#define TRSL_ID_PCIE_TR 0x0 + +#define PHYTIUM_PCI_SLAVE0_BASE 0x800 +#define PHYTIUM_PCI_SLAVE0_SRC_ADDR0(table) (PHYTIUM_PCI_SLAVE0_BASE + 0X20 * table + 0x0) +#define PHYTIUM_PCI_SLAVE0_SRC_ADDR1(table) (PHYTIUM_PCI_SLAVE0_BASE + 0X20 * table + 0x4) +#define PHYTIUM_PCI_SLAVE0_TRSL_ADDR0(table) (PHYTIUM_PCI_SLAVE0_BASE + 0X20 * table + 0x8) +#define PHYTIUM_PCI_SLAVE0_TRSL_ADDR1(table) (PHYTIUM_PCI_SLAVE0_BASE + 0X20 * table + 0xc) +#define PHYTIUM_PCI_SLAVE0_TRSL_PARAM(table) (PHYTIUM_PCI_SLAVE0_BASE + 0X20 * table + 0x10) + +#define PHYTIUM_PCI_CF_MSI_BASE 0x10e0 +#define PHYTIUM_PCI_CF_MSI_CONTROL 0x10e2 + +#define PHYTIUM_HPB_C0_PREF_BASE_LIMIT 0xa30 + #define C0_PREF_LIMIT_MASK 0xfff + #define C0_PREF_LIMIT_SHIFT 20 + #define C0_PREF_BASE_MASK 0xfff + #define C0_PREF_BASE_SHIFT 4 + #define C0_PREF_VALUE_SHIFT 20 +#define PHYTIUM_HPB_C0_PREF_BASE_LIMIT_UP32 0xa34 + #define C0_PREF_LIMIT_UP32_MASK 0xff + #define C0_PREF_LIMIT_UP32_SHIFT 8 + #define C0_PREF_BASE_UP32_MASK 0xff + #define C0_PREF_BASE_UP32_SHIFT 0 + #define C0_PREF_UP32_VALUE_SHIFT 0 +#endif + + diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index af2149632102..1b522f144944 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -5059,6 +5059,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PHYTIUM, 0xdc3a, quirk_no_ext_tags); #ifdef CONFIG_PCI_ATS /* diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 28a4505a1bc8..355026f9cc76 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -1794,6 +1794,16 @@ config RTC_DRV_RTD119X If you say yes here, you get support for the RTD1295 SoC Real Time Clock. +config RTC_DRV_PHYTIUM + tristate "Phytium RTC" + depends on ARCH_PHYTIUM + default y if ARCH_PHYTIUM + help + Say yes here to support the Phytium SoC real time clock. + + This driver can also be built as a module, if so, the module + will be called "rtc-phytium". + comment "HID Sensor RTC drivers" config RTC_DRV_HID_SENSOR_TIME diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 5ff2fc0c361a..289447233c6b 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -120,6 +120,7 @@ obj-$(CONFIG_RTC_DRV_PCF85363) += rtc-pcf85363.o obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o +obj-$(CONFIG_RTC_DRV_PHYTIUM) += rtc-phytium.o obj-$(CONFIG_RTC_DRV_PIC32) += rtc-pic32.o obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o diff --git a/drivers/rtc/rtc-phytium.c b/drivers/rtc/rtc-phytium.c new file mode 100644 index 000000000000..97a560dfa3af --- /dev/null +++ b/drivers/rtc/rtc-phytium.c @@ -0,0 +1,331 @@ +/* + * Phytium Real Time Clock Driver + * + * Copyright (c) 2019, Phytium Technology Co., Ltd. + * + * Chen Baozi + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RTC_CMR 0x04 +#define RTC_AES_SEL 0x08 +#define RTC_AES_SEL_COUNTER 0x100 +#define RTC_CCR 0x0C +#define RTC_CCR_IE BIT(0) +#define RTC_CCR_MASK BIT(1) +#define RTC_CCR_EN BIT(2) +#define RTC_CCR_WEN BIT(3) +#define RTC_STAT 0x10 +#define RTC_STAT_BIT BIT(0) +#define RTC_RSTAT 0x14 +#define RTC_EOI 0x18 +#define RTC_VER 0x1C +#define RTC_CDR_LOW 0x20 +#define RTC_CCVR 0x24 +#define RTC_CLR_LOW 0x28 +#define RTC_CLR 0x2c +#define RTC_COUNTER_HB_OFFSET 15 +#define RTC_COUNTER_LB_MASK 0x7fff + +spinlock_t spinlock_phytium_rtc; + +struct phytium_rtc_dev { + struct rtc_device *rtc; + struct device *dev; + unsigned long alarm_time; + void __iomem *csr_base; + struct clk *clk; + unsigned int irq_wake; + unsigned int irq_enabled; +}; + +static int phytium_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); + + unsigned long counter = 0; + unsigned long tmp = 0; + + spin_lock(&spinlock_phytium_rtc); + writel(RTC_AES_SEL_COUNTER, pdata->csr_base + RTC_AES_SEL); + counter = readl(pdata->csr_base + RTC_CCVR); + tmp = readl(pdata->csr_base + RTC_CDR_LOW); + printk("%s_%d:counter:0x%lx\n", __func__, __LINE__, counter); + spin_unlock(&spinlock_phytium_rtc); + + rtc_time_to_tm(counter, tm); + return rtc_valid_tm(tm); +} + +static int phytium_rtc_set_mmss(struct device *dev, unsigned long secs) +{ + struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); + unsigned long counter = 0; + unsigned long tmp = 0; + + spin_lock(&spinlock_phytium_rtc); + + writel(RTC_AES_SEL_COUNTER, pdata->csr_base + RTC_AES_SEL); + writel(0x00000000, pdata->csr_base + RTC_CLR_LOW); + writel((u32)secs, pdata->csr_base + RTC_CLR); + writel(RTC_AES_SEL_COUNTER, pdata->csr_base + RTC_AES_SEL); + counter = readl(pdata->csr_base + RTC_CLR); + tmp = readl(pdata->csr_base + RTC_CLR_LOW); + + spin_unlock(&spinlock_phytium_rtc); + + return 0; +} + +static int phytium_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); + + rtc_time_to_tm(pdata->alarm_time, &alrm->time); + alrm->enabled = readl(pdata->csr_base + RTC_CCR) & RTC_CCR_IE; + + return 0; +} + +static int phytium_rtc_alarm_irq_enable(struct device *dev, u32 enabled) +{ + struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); + u32 ccr; + + ccr = readl(pdata->csr_base + RTC_CCR); + if (enabled) { + ccr &= ~RTC_CCR_MASK; + ccr |= RTC_CCR_IE; + } else { + ccr &= ~RTC_CCR_IE; + ccr |= RTC_CCR_MASK; + } + writel(ccr, pdata->csr_base + RTC_CCR); + + return 0; +} + +static int phytium_rtc_alarm_irq_enabled(struct device *dev) +{ + struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); + + return readl(pdata->csr_base + RTC_CCR) & RTC_CCR_IE ? 1: 0; +} + +static int phytium_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); + unsigned long rtc_time; + unsigned long alarm_time; + + rtc_time = readl(pdata->csr_base + RTC_CCVR); + rtc_tm_to_time(&alrm->time, &alarm_time); + + pdata->alarm_time = alarm_time; + writel((u32) pdata->alarm_time, pdata->csr_base + RTC_CMR); + + phytium_rtc_alarm_irq_enable(dev, alrm->enabled); + + return 0; +} + +static const struct rtc_class_ops phytium_rtc_ops = { + .read_time = phytium_rtc_read_time, + .set_mmss = phytium_rtc_set_mmss, + .read_alarm = phytium_rtc_read_alarm, + .set_alarm = phytium_rtc_set_alarm, + .alarm_irq_enable = phytium_rtc_alarm_irq_enable, +}; + +static irqreturn_t phytium_rtc_interrupt(int irq, void *id) +{ + struct phytium_rtc_dev *pdata = (struct phytium_rtc_dev *) id; + + /* Check if interrupt asserted */ + if (!(readl(pdata->csr_base + RTC_STAT) & RTC_STAT_BIT)) + return IRQ_NONE; + + /* Clear interrupt */ + readl(pdata->csr_base + RTC_EOI); + + rtc_update_irq(pdata->rtc, 1, RTC_IRQF | RTC_AF); + + return IRQ_HANDLED; +} + +static int phytium_rtc_probe(struct platform_device *pdev) +{ + struct phytium_rtc_dev *pdata; + struct resource *res; + int ret; + int irq; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + platform_set_drvdata(pdev, pdata); + pdata->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pdata->csr_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pdata->csr_base)) + return PTR_ERR(pdata->csr_base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "No IRQ resource\n"); + return irq; + } + ret = devm_request_irq(&pdev->dev, irq, phytium_rtc_interrupt, 0, + dev_name(&pdev->dev), pdata); + if (ret) { + dev_err(&pdev->dev, "Could not request IRQ\n"); + return ret; + } + +#ifndef CONFIG_ACPI + pdata->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pdata->clk)) { + dev_err(&pdev->dev, "Couldn't get the clock for RTC\n"); + return -ENODEV; + } + + ret = clk_prepare_enable(pdata->clk); + if (ret) + return ret; +#endif + + spin_lock_init(&spinlock_phytium_rtc); + + /* Turn on the clock and the crystal */ + writel(RTC_CCR_EN, pdata->csr_base + RTC_CCR); + + ret = device_init_wakeup(&pdev->dev, 1); + if (ret) { + clk_disable_unprepare(pdata->clk); + return ret; + } + + pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, + &phytium_rtc_ops, THIS_MODULE); + if (IS_ERR(pdata->rtc)) { + clk_disable_unprepare(pdata->clk); + return PTR_ERR(pdata->rtc); + } + + /* HW does not support update faster than 1 seconds */ + pdata->rtc->uie_unsupported = 1; + + return 0; +} + +static int phytium_rtc_remove(struct platform_device *pdev) +{ + struct phytium_rtc_dev *pdata = platform_get_drvdata(pdev); + + phytium_rtc_alarm_irq_enable(&pdev->dev, 0); + device_init_wakeup(&pdev->dev, 0); + clk_disable_unprepare(pdata->clk); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_rtc_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_rtc_dev *pdata = platform_get_drvdata(pdev); + int irq; + + /* + * If this RTC alarm will be used for waking the system up, + * don't disable it of course. Else we just disable the alarm + * and await suspension. + */ + irq = platform_get_irq(pdev, 0); + if (device_may_wakeup(&pdev->dev)) { + if (!enable_irq_wake(irq)) + pdata->irq_wake = 1; + } else { + pdata->irq_enabled = phytium_rtc_alarm_irq_enabled(dev); + phytium_rtc_alarm_irq_enable(dev, 0); + clk_disable_unprepare(pdata->clk); + } + + return 0; +} + +static int phytium_rtc_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_rtc_dev *pdata = platform_get_drvdata(pdev); + int irq; + int rc; + + irq = platform_get_irq(pdev, 0); + if (device_may_wakeup(&pdev->dev)) { + if (pdata->irq_wake) { + disable_irq_wake(irq); + pdata->irq_wake = 0; + } + } else { + rc = clk_prepare_enable(pdata->clk); + if (rc) { + dev_err(dev, "Unable to enable clock error %d\n", rc); + return rc; + } + phytium_rtc_alarm_irq_enable(dev, pdata->irq_enabled); + } + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_rtc_pm_ops, phytium_rtc_suspend, phytium_rtc_resume); + +#ifdef CONFIG_OF +static const struct of_device_id phytium_rtc_of_match[] = { + { .compatible = "phytium,rtc" }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_rtc_of_match); +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_rtc_acpi_match[] = { + { "PHYT0002", 0 }, + { } +}; +#endif + +static struct platform_driver phytium_rtc_driver = { + .probe = phytium_rtc_probe, + .remove = phytium_rtc_remove, + .driver = { + .name = "phytium-rtc", + .pm = &phytium_rtc_pm_ops, + .of_match_table = of_match_ptr(phytium_rtc_of_match), + .acpi_match_table = ACPI_PTR(phytium_rtc_acpi_match), + }, +}; + +module_platform_driver(phytium_rtc_driver); + +MODULE_DESCRIPTION("Phytium RTC driver"); +MODULE_AUTHOR("Chen Baozi "); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index a1d822ae9fca..61389bdc7926 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c @@ -233,11 +233,12 @@ static void scsifront_gnttab_done(struct vscsifrnt_info *info, return; for (i = 0; i < shadow->nr_grants; i++) { - if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) { + if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) { shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME "grant still in use by backend\n"); BUG(); } + gnttab_end_foreign_access(shadow->gref[i], 0, 0UL); } kfree(shadow->sg); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 0a7fd56c1ed9..4a964c5d5e0a 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -454,6 +454,31 @@ config SPI_ORION This enables using the SPI master controller on the Orion and MVEBU chips. +config SPI_PHYTIUM + tristate + depends on ARCH_PHYTIUM || COMPILE_TEST + +config SPI_PHYTIUM_PLAT + tristate "Phytium SPI controller platform support" + select SPI_PHYTIUM + help + This selects a platform driver for Phytium SPI controller. + + If you say yes to this option, support will be included for + FT-2000/4 and D2000 families of SPI controller. + +config SPI_PHYTIUM_PCI + tristate "Phytium SPI controller PCI support" + depends on PCI + select SPI_PHYTIUM + help + This selects a PCI driver for Phytium SPI controller. + + If you say yes to this option, support will be included for + Phytium X100 chipset of SPI controller. + + If unsure, say N. + config SPI_PIC32 tristate "Microchip PIC32 series SPI" depends on MACH_PIC32 || COMPILE_TEST diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index a90d55970036..01d74f2a1afe 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -35,6 +35,9 @@ obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o +obj-$(CONFIG_SPI_PHYTIUM) += spi-phytium.o +obj-$(CONFIG_SPI_PHYTIUM_PLAT) += spi-phytium-plat.o +obj-$(CONFIG_SPI_PHYTIUM_PCI) += spi-phytium-pci.o obj-$(CONFIG_SPI_EFM32) += spi-efm32.o obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o obj-$(CONFIG_SPI_FALCON) += spi-falcon.o diff --git a/drivers/spi/spi-phytium-pci.c b/drivers/spi/spi-phytium-pci.c new file mode 100644 index 000000000000..5a856b317fc8 --- /dev/null +++ b/drivers/spi/spi-phytium-pci.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SPI core controller PCI driver. + * + * Copyright (c) 2019-2021, Phytium Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-phytium.h" + +#define DRIVER_NAME "phytium_spi_pci" + +static int phytium_spi_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct phytium_spi *fts; + int pci_bar = 0; + int ret; + + fts = devm_kzalloc(&pdev->dev, sizeof(struct phytium_spi), + GFP_KERNEL); + if (!fts) + return -ENOMEM; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pcim_iomap_regions(pdev, 1 << pci_bar, pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "pci iomap failed?\n"); + return ret; + } + + fts->regs = pcim_iomap_table(pdev)[pci_bar]; + if (IS_ERR(fts->regs)) { + dev_err(&pdev->dev, "SPI region map failed\n"); + return PTR_ERR(fts->regs); + } + + fts->irq = pdev->irq; + if (fts->irq < 0) { + dev_err(&pdev->dev, "no irq resource?\n"); + return fts->irq; /* -ENXIO */ + } + + fts->bus_num = -1; + + fts->max_freq = 48000000; + + fts->num_cs = 4; + + fts->global_cs = 1; + + ret = phytium_spi_add_host(&pdev->dev, fts); + if (ret) + return ret; + + pci_set_drvdata(pdev, fts); + return 0; +} + +static void phytium_spi_pci_remove(struct pci_dev *pdev) +{ + struct phytium_spi *fts = pci_get_drvdata(pdev); + + phytium_spi_remove_host(fts); +} + + +#ifdef CONFIG_PM_SLEEP +static int spi_suspend(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct phytium_spi *fts = spi_master_get_devdata(master); + + return phytium_spi_suspend_host(fts); +} + +static int spi_resume(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct phytium_spi *fts = spi_master_get_devdata(master); + + return phytium_spi_resume_host(fts); +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_spi_pm_ops, spi_suspend, spi_resume); + +static const struct pci_device_id phytium_device_pci_tbl[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc2c) }, + {}, +}; + +static struct pci_driver phytium_spi_pci_driver = { + .name = DRIVER_NAME, + .id_table = phytium_device_pci_tbl, + .probe = phytium_spi_pci_probe, + .remove = phytium_spi_pci_remove, + .driver = { + .pm = &phytium_spi_pm_ops, + } +}; + +module_pci_driver(phytium_spi_pci_driver); + +MODULE_AUTHOR("Yiqun Zhang "); +MODULE_DESCRIPTION("PCI Driver for Phytium SPI controller core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-phytium-plat.c b/drivers/spi/spi-phytium-plat.c new file mode 100644 index 000000000000..04c2b5d2fb95 --- /dev/null +++ b/drivers/spi/spi-phytium-plat.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SPI core controller platform driver. + * + * Copyright (c) 2019-2021, Phytium Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-phytium.h" + +#define DRIVER_NAME "phytium_spi" + +struct phytium_spi_clk { + struct phytium_spi fts; + struct clk *clk; +}; + +static int phytium_spi_probe(struct platform_device *pdev) +{ + struct phytium_spi_clk *ftsc; + struct phytium_spi *fts; + struct resource *mem; + int ret; + int num_cs; + int cs_gpio; + int i; + + ftsc = devm_kzalloc(&pdev->dev, sizeof(struct phytium_spi_clk), + GFP_KERNEL); + if (!ftsc) + return -ENOMEM; + + fts = &ftsc->fts; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "no mem resource?\n"); + return -EINVAL; + } + + fts->regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(fts->regs)) { + dev_err(&pdev->dev, "SPI region map failed\n"); + return PTR_ERR(fts->regs); + } + + fts->irq = platform_get_irq(pdev, 0); + if (fts->irq < 0) { + dev_err(&pdev->dev, "no irq resource?\n"); + return fts->irq; /* -ENXIO */ + } + + if (pdev->dev.of_node) { + ftsc->clk = devm_clk_get(&pdev->dev, NULL); + + if (IS_ERR(ftsc->clk)) + return PTR_ERR(ftsc->clk); + ret = clk_prepare_enable(ftsc->clk); + if (ret) + return ret; + + fts->max_freq = clk_get_rate(ftsc->clk); + } else if (has_acpi_companion(&pdev->dev)) { + fts->max_freq = 48000000; + } + + fts->bus_num = pdev->id; + device_property_read_u32(&pdev->dev, "reg-io-width", &fts->reg_io_width); + + num_cs = 4; + + device_property_read_u32(&pdev->dev, "num-cs", &num_cs); + + fts->num_cs = num_cs; + + if (pdev->dev.of_node) { + int i; + + for (i = 0; i < fts->num_cs; i++) { + cs_gpio = of_get_named_gpio(pdev->dev.of_node, + "cs-gpios", i); + + if (cs_gpio == -EPROBE_DEFER) { + ret = cs_gpio; + goto out; + } + + if (gpio_is_valid(cs_gpio)) { + ret = devm_gpio_request(&pdev->dev, cs_gpio, + dev_name(&pdev->dev)); + if (ret) + goto out; + } + } + } else if(has_acpi_companion(&pdev->dev)) { + int n; + int *cs; + struct gpio_desc *gpiod; + + n = gpiod_count(&pdev->dev, "cs"); + + cs = devm_kcalloc(&pdev->dev, n, sizeof(int), GFP_KERNEL); + fts->cs = cs; + + for (i = 0; i < n; i++) { + gpiod = devm_gpiod_get_index_optional(&pdev->dev, "cs", i, + GPIOD_OUT_LOW); + + if (IS_ERR(gpiod)) { + ret = PTR_ERR(gpiod); + goto out; + } + + cs_gpio = desc_to_gpio(gpiod); + cs[i] = cs_gpio; + } + } + + fts->global_cs = device_get_match_data(&pdev->dev); + + ret = phytium_spi_add_host(&pdev->dev, fts); + if (ret) + goto out; + + platform_set_drvdata(pdev, ftsc); + return 0; + +out: + clk_disable_unprepare(ftsc->clk); + return ret; +} + +static int phytium_spi_remove(struct platform_device *pdev) +{ + struct phytium_spi_clk *ftsc = platform_get_drvdata(pdev); + + phytium_spi_remove_host(&ftsc->fts); + clk_disable_unprepare(ftsc->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int spi_suspend(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct phytium_spi *fts = spi_master_get_devdata(master); + + return phytium_spi_suspend_host(fts); +} + +static int spi_resume(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct phytium_spi *fts = spi_master_get_devdata(master); + + return phytium_spi_resume_host(fts); +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_spi_pm_ops, spi_suspend, spi_resume); + +static const struct of_device_id phytium_spi_of_match[] = { + { .compatible = "phytium,spi", .data = (void *)0 }, + { /* end of table */} +}; +MODULE_DEVICE_TABLE(of, phytium_spi_of_match); + +static const struct acpi_device_id phytium_spi_acpi_match[] = { + {"PHYT000E", 0}, + {} +}; +MODULE_DEVICE_TABLE(acpi, phytium_spi_acpi_match); + +static struct platform_driver phytium_spi_driver = { + .probe = phytium_spi_probe, + .remove = phytium_spi_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = of_match_ptr(phytium_spi_of_match), + .acpi_match_table = ACPI_PTR(phytium_spi_acpi_match), + .pm = &phytium_spi_pm_ops, + }, +}; +module_platform_driver(phytium_spi_driver); + +MODULE_AUTHOR("Yiqun Zhang "); +MODULE_DESCRIPTION("Platform Driver for Phytium SPI controller core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-phytium.c b/drivers/spi/spi-phytium.c new file mode 100644 index 000000000000..aa1859ebe755 --- /dev/null +++ b/drivers/spi/spi-phytium.c @@ -0,0 +1,528 @@ +/* + * Phytium SPI core controller driver. + * + * Copyright (c) 2019-2021, Phytium Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "spi-phytium.h" + +static inline u32 phytium_readl(struct phytium_spi *fts, u32 offset) +{ + return __raw_readl(fts->regs + offset); +} + +static inline u16 phytium_readw(struct phytium_spi *fts, u32 offset) +{ + return __raw_readw(fts->regs + offset); +} + +static inline void phytium_writel(struct phytium_spi *fts, u32 offset, u32 val) +{ + __raw_writel(val, fts->regs + offset); +} + +static inline void phytium_writew(struct phytium_spi *fts, u32 offset, u16 val) +{ + __raw_writew(val, fts->regs + offset); +} + +static inline u32 phytium_read_io_reg(struct phytium_spi *fts, u32 offset) +{ + switch (fts->reg_io_width) { + case 2: + return phytium_readw(fts, offset); + case 4: + default: + return phytium_readl(fts, offset); + } +} + +static inline void phytium_write_io_reg(struct phytium_spi *fts, u32 offset, u32 val) +{ + switch (fts->reg_io_width) { + case 2: + phytium_writew(fts, offset, val); + break; + case 4: + default: + phytium_writel(fts, offset, val); + break; + } +} + +static inline void spi_enable_chip(struct phytium_spi *fts, int enable) +{ + phytium_writel(fts, SSIENR, (enable ? 1 : 0)); +} + +static inline void spi_set_clk(struct phytium_spi *fts, u16 div) +{ + phytium_writel(fts, BAUDR, div); +} + +static inline void spi_mask_intr(struct phytium_spi *fts, u32 mask) +{ + u32 new_mask; + + new_mask = phytium_readl(fts, IMR) & ~mask; + phytium_writel(fts, IMR, new_mask); +} + +static inline void spi_umask_intr(struct phytium_spi *fts, u32 mask) +{ + u32 new_mask; + + new_mask = phytium_readl(fts, IMR) | mask; + phytium_writel(fts, IMR, new_mask); +} + +static inline void spi_global_cs(struct phytium_spi *fts) +{ + u32 global_cs_en, mask, setmask; + + mask = GENMASK(fts->num_cs-1, 0) << fts->num_cs; + setmask = ~GENMASK(fts->num_cs-1, 0); + global_cs_en = (phytium_readl(fts, GCSR) | mask) & setmask; + + phytium_writel(fts, GCSR, global_cs_en); +} + +static inline void spi_reset_chip(struct phytium_spi *fts) +{ + spi_enable_chip(fts, 0); + if (fts->global_cs) + spi_global_cs(fts); + spi_mask_intr(fts, 0xff); + spi_enable_chip(fts, 1); +} + +static inline void spi_shutdown_chip(struct phytium_spi *fts) +{ + spi_enable_chip(fts, 0); + spi_set_clk(fts, 0); +} + +struct phytium_spi_chip { + u8 poll_mode; + u8 type; + void (*cs_control)(u32 command); +}; + +struct chip_data { + u8 cs; + u8 tmode; + u8 type; + + u8 poll_mode; + + u16 clk_div; + u32 speed_hz; + void (*cs_control)(u32 command); +}; + +static void phytium_spi_set_cs(struct spi_device *spi, bool enable) +{ + struct phytium_spi *fts = spi_master_get_devdata(spi->master); + struct chip_data *chip = spi_get_ctldata(spi); + u32 origin; + + if (chip && chip->cs_control) + chip->cs_control(!enable); + + if (!enable) { + phytium_writel(fts, SER, BIT(spi->chip_select)); + if (fts->global_cs) { + origin = phytium_readl(fts, GCSR); + phytium_writel(fts, GCSR, origin | (1 << spi->chip_select)); + } + } else { + if (fts->global_cs) { + origin = phytium_readl(fts, GCSR); + phytium_writel(fts, GCSR, origin & ~(1 << spi->chip_select)); + } + } +} + +static inline u32 tx_max(struct phytium_spi *fts) +{ + u32 tx_left, tx_room, rxtx_gap; + + tx_left = (fts->tx_end - fts->tx) / fts->n_bytes; + tx_room = fts->fifo_len - phytium_readl(fts, TXFLR); + + rxtx_gap = ((fts->rx_end - fts->rx) - (fts->tx_end - fts->tx)) + / fts->n_bytes; + + return min3(tx_left, tx_room, (u32) (fts->fifo_len - rxtx_gap)); +} + +static inline u32 rx_max(struct phytium_spi *fts) +{ + u32 rx_left = (fts->rx_end - fts->rx) / fts->n_bytes; + + return min_t(u32, rx_left, phytium_readl(fts, RXFLR)); +} + +static void phytium_writer(struct phytium_spi *fts) +{ + u32 max = tx_max(fts); + u16 txw = 0; + + while (max--) { + if (fts->tx_end - fts->len) { + if (fts->n_bytes == 1) + txw = *(u8 *)(fts->tx); + else + txw = *(u16 *)(fts->tx); + } + phytium_write_io_reg(fts, DR, txw); + fts->tx += fts->n_bytes; + } +} + +static void phytium_reader(struct phytium_spi *fts) +{ + u32 max = rx_max(fts); + u16 rxw; + + while (max--) { + rxw = phytium_read_io_reg(fts, DR); + if (fts->rx_end - fts->len) { + if (fts->n_bytes == 1) + *(u8 *)(fts->rx) = rxw; + else + *(u16 *)(fts->rx) = rxw; + } + fts->rx += fts->n_bytes; + } +} + +static void int_error_stop(struct phytium_spi *fts, const char *msg) +{ + spi_reset_chip(fts); + + dev_err(&fts->master->dev, "%s\n", msg); + fts->master->cur_msg->status = -EIO; + spi_finalize_current_transfer(fts->master); +} + +static irqreturn_t interrupt_transfer(struct phytium_spi *fts) +{ + u16 irq_status = phytium_readl(fts, ISR); + + if (irq_status & (INT_TXOI | INT_RXOI | INT_RXUI)) { + phytium_readl(fts, ICR); + int_error_stop(fts, "interrupt_transfer: fifo overrun/underrun"); + return IRQ_HANDLED; + } + + phytium_reader(fts); + if (fts->rx_end == fts->rx) { + spi_mask_intr(fts, INT_TXEI); + spi_finalize_current_transfer(fts->master); + return IRQ_HANDLED; + } + if (irq_status & INT_TXEI) { + spi_mask_intr(fts, INT_TXEI); + phytium_writer(fts); + spi_umask_intr(fts, INT_TXEI); + } + + return IRQ_HANDLED; +} + +static irqreturn_t phytium_spi_irq(int irq, void *dev_id) +{ + struct spi_master *master = dev_id; + struct phytium_spi *fts = spi_master_get_devdata(master); + u16 irq_status = phytium_readl(fts, ISR) & 0x3f; + + if (!irq_status) + return IRQ_NONE; + + if (!master->cur_msg) { + spi_mask_intr(fts, INT_TXEI); + return IRQ_HANDLED; + } + + if (fts->transfer_handler) + return fts->transfer_handler(fts); + else + return IRQ_HANDLED; +} + +static int poll_transfer(struct phytium_spi *fts) +{ + do { + phytium_writer(fts); + phytium_reader(fts); + cpu_relax(); + } while (fts->rx_end > fts->rx); + + return 0; +} + +static int phytium_spi_transfer_one(struct spi_master *master, + struct spi_device *spi, struct spi_transfer *transfer) +{ + struct phytium_spi *fts = spi_master_get_devdata(master); + struct chip_data *chip = spi_get_ctldata(spi); + u8 imask = 0; + u16 txlevel = 0; + u16 clk_div; + u32 cr0; + + fts->tx = (void *)transfer->tx_buf; + fts->tx_end = fts->tx + transfer->len; + fts->rx = transfer->rx_buf; + fts->rx_end = fts->rx + transfer->len; + fts->len = transfer->len; + + spi_enable_chip(fts, 0); + + if (transfer->speed_hz != chip->speed_hz) { + clk_div = (fts->max_freq / transfer->speed_hz + 1) & 0xfffe; + + chip->speed_hz = transfer->speed_hz; + chip->clk_div = clk_div; + + spi_set_clk(fts, chip->clk_div); + } + + if (transfer->bits_per_word == 8) { + fts->n_bytes = 1; + } else if (transfer->bits_per_word == 16) { + fts->n_bytes = 2; + } else { + return -EINVAL; + } + + cr0 = (transfer->bits_per_word - 1) + | (chip->type << FRF_OFFSET) + | (spi->mode << MODE_OFFSET) + | (chip->tmode << TMOD_OFFSET); + + if (chip->cs_control) { + if (fts->rx && fts->tx) + chip->tmode = TMOD_TR; + else if (fts->rx) + chip->tmode = TMOD_RO; + else + chip->tmode = TMOD_TO; + + cr0 &= ~TMOD_MASK; + cr0 |= (chip->tmode << TMOD_OFFSET); + } + + phytium_writel(fts, CTRL0, cr0); + + spi_mask_intr(fts, 0xff); + + if (!chip->poll_mode) { + txlevel = min_t(u16, fts->fifo_len / 2, fts->len / fts->n_bytes); + phytium_writel(fts, TXFLTR, txlevel); + + imask |= INT_TXEI | INT_TXOI | + INT_RXUI | INT_RXOI; + spi_umask_intr(fts, imask); + + fts->transfer_handler = interrupt_transfer; + } + + spi_enable_chip(fts, 1); + + if (chip->poll_mode) + return poll_transfer(fts); + + return 1; +} + +static void phytium_spi_handle_err(struct spi_master *master, + struct spi_message *msg) +{ + struct phytium_spi *fts = spi_master_get_devdata(master); + + spi_reset_chip(fts); +} + +static int phytium_spi_setup(struct spi_device *spi) +{ + struct phytium_spi_chip *chip_info = NULL; + struct chip_data *chip; + int ret; + + chip = spi_get_ctldata(spi); + if (!chip) { + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); + if (!chip) + return -ENOMEM; + spi_set_ctldata(spi, chip); + } + + chip_info = spi->controller_data; + + if (chip_info) { + if (chip_info->cs_control) + chip->cs_control = chip_info->cs_control; + + chip->poll_mode = chip_info->poll_mode; + chip->type = chip_info->type; + } + + chip->tmode = 0; + + if (gpio_is_valid(spi->cs_gpio)) { + ret = gpio_direction_output(spi->cs_gpio, + !(spi->mode & SPI_CS_HIGH)); + if (ret) + return ret; + } + + return 0; +} + +static void phytium_spi_cleanup(struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata(spi); + + kfree(chip); + spi_set_ctldata(spi, NULL); +} + +static void spi_hw_init(struct device *dev, struct phytium_spi *fts) +{ + spi_reset_chip(fts); + + if (!fts->fifo_len) { + u32 fifo; + + for (fifo = 1; fifo < 256; fifo++) { + phytium_writel(fts, TXFLTR, fifo); + if (fifo != phytium_readl(fts, TXFLTR)) + break; + } + phytium_writel(fts, TXFLTR, 0); + + fts->fifo_len = (fifo == 1) ? 0 : fifo; + dev_dbg(dev, "Detected FIFO size: %u bytes\n", fts->fifo_len); + } +} + +int phytium_spi_add_host(struct device *dev, struct phytium_spi *fts) +{ + struct spi_master *master; + int ret; + + BUG_ON(fts == NULL); + + master = spi_alloc_master(dev, 0); + if (!master) + return -ENOMEM; + + fts->master = master; + snprintf(fts->name, sizeof(fts->name), "phytium_spi%d", fts->bus_num); + + ret = request_irq(fts->irq, phytium_spi_irq, IRQF_SHARED, fts->name, master); + if (ret < 0) { + dev_err(dev, "can not get IRQ\n"); + goto err_free_master; + } + + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP; + master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); + master->bus_num = fts->bus_num; + master->num_chipselect = fts->num_cs; + master->setup = phytium_spi_setup; + master->cleanup = phytium_spi_cleanup; + master->set_cs = phytium_spi_set_cs; + master->transfer_one = phytium_spi_transfer_one; + master->handle_err = phytium_spi_handle_err; + master->max_speed_hz = fts->max_freq; + master->dev.of_node = dev->of_node; + master->dev.fwnode = dev->fwnode; + master->flags = SPI_MASTER_GPIO_SS; + master->cs_gpios = fts->cs; + + spi_hw_init(dev, fts); + + spi_master_set_devdata(master, fts); + ret = devm_spi_register_master(dev, master); + if (ret) { + dev_err(&master->dev, "problem registering spi master\n"); + goto err_exit; + } + + return 0; + +err_exit: + spi_enable_chip(fts, 0); + free_irq(fts->irq, master); +err_free_master: + spi_master_put(master); + return ret; +} +EXPORT_SYMBOL_GPL(phytium_spi_add_host); + +void phytium_spi_remove_host(struct phytium_spi *fts) +{ + spi_shutdown_chip(fts); + + free_irq(fts->irq, fts->master); +} +EXPORT_SYMBOL_GPL(phytium_spi_remove_host); + +int phytium_spi_suspend_host(struct phytium_spi *fts) +{ + int ret; + + ret = spi_controller_suspend(fts->master); + if (ret) + return ret; + + spi_shutdown_chip(fts); + return 0; +} +EXPORT_SYMBOL_GPL(phytium_spi_suspend_host); + +int phytium_spi_resume_host(struct phytium_spi *fts) +{ + int ret; + + spi_hw_init(&fts->master->dev, fts); + ret = spi_controller_resume(fts->master); + if (ret) + dev_err(&fts->master->dev, "fail to start queue (%d)\n", ret); + return ret; +} +EXPORT_SYMBOL_GPL(phytium_spi_resume_host); + +MODULE_AUTHOR("Zhu Mingshuai "); +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Driver for Phytium SPI controller core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-phytium.h b/drivers/spi/spi-phytium.h new file mode 100644 index 000000000000..44b53ef4d1ea --- /dev/null +++ b/drivers/spi/spi-phytium.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef PHYTIUM_SPI_HEADER_H +#define PHYTIUM_SPI_HEADER_H + +#include +#include +#include + +#define CTRL0 0x00 +#define SSIENR 0x08 +#define SER 0x10 +#define BAUDR 0x14 +#define TXFLTR 0x18 +#define TXFLR 0x20 +#define RXFLR 0x24 +#define IMR 0x2c +#define ISR 0x30 +#define ICR 0x48 +#define DR 0x60 +#define GCSR 0x100 + +#define FRF_OFFSET 4 +#define MODE_OFFSET 6 +#define TMOD_OFFSET 8 + +#define TMOD_MASK (0x3 << TMOD_OFFSET) +#define TMOD_TR 0x0 +#define TMOD_TO 0x1 +#define TMOD_RO 0x2 + +#define INT_TXEI (1 << 0) +#define INT_TXOI (1 << 1) +#define INT_RXUI (1 << 2) +#define INT_RXOI (1 << 3) + +struct phytium_spi { + struct spi_master *master; + char name[16]; + + void __iomem *regs; + bool global_cs; + unsigned long paddr; + int irq; + u32 fifo_len; + u32 max_freq; + + u32 reg_io_width; + u16 bus_num; + u16 num_cs; + int *cs; + + size_t len; + void *tx; + void *tx_end; + void *rx; + void *rx_end; + u8 n_bytes; + irqreturn_t (*transfer_handler)(struct phytium_spi *fts); +}; + +extern int phytium_spi_add_host(struct device *dev, struct phytium_spi *fts); +extern void phytium_spi_remove_host(struct phytium_spi *fts); +extern int phytium_spi_suspend_host(struct phytium_spi *fts); +extern int phytium_spi_resume_host(struct phytium_spi *fts); + +#endif /* PHYTIUM_SPI_HEADER_H */ diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig index 3c1ec4e9ed29..f3be66ef85b7 100644 --- a/drivers/tee/optee/Kconfig +++ b/drivers/tee/optee/Kconfig @@ -14,3 +14,33 @@ config OPTEE_SHM_NUM_PRIV_PAGES help This sets the number of private shared memory pages to be used by OP-TEE TEE driver. + +if OPTEE + +choice + prompt "Default conduit method" + default OPTEE_DEFAULT_METHOD_NONE + help + This option sets the default conduit method for OP-TEE in case + firmware misses "method" property. If in doubt, select "none" + which depends on firmware to provide the value. + +config OPTEE_DEFAULT_METHOD_NONE + bool "none" + help + There is no default conduit method used by the driver. Require + firwmare to provide the method explicitly. + +config OPTEE_DEFAULT_METHOD_HVC + bool "hvc" + help + Use the "hvc" as default conduit method. + +config OPTEE_DEFAULT_METHOD_SMC + bool "smc" + help + Use the "hvc" as default conduit method. + +endchoice + +endif diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 2f254f957b0a..640cfc9010e1 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -14,6 +14,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -529,15 +530,23 @@ static void optee_smccc_hvc(unsigned long a0, unsigned long a1, arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res); } -static optee_invoke_fn *get_invoke_func(struct device_node *np) +#if defined(CONFIG_OPTEE_DEFAULT_METHOD_HVC) +#define DEFAULT_CONDUIT_METHOD optee_smccc_hvc +#elif defined(CONFIG_OPTEE_DEFAULT_METHOD_SMC) +#define DEFAULT_CONDUIT_METHOD optee_smccc_hvc +#else +#define DEFAULT_CONDUIT_METHOD ERR_PTR(-ENXIO) +#endif + +static optee_invoke_fn *get_invoke_func(struct device *dev) { const char *method; - pr_info("probing for conduit method from DT.\n"); + pr_info("probing for conduit method.\n"); - if (of_property_read_string(np, "method", &method)) { + if (device_property_read_string(dev, "method", &method)) { pr_warn("missing \"method\" property\n"); - return ERR_PTR(-ENXIO); + return DEFAULT_CONDUIT_METHOD; } if (!strcmp("hvc", method)) @@ -549,7 +558,37 @@ static optee_invoke_fn *get_invoke_func(struct device_node *np) return ERR_PTR(-EINVAL); } -static struct optee *optee_probe(struct device_node *np) +static int optee_remove(struct platform_device *pdev) +{ + struct optee *optee = platform_get_drvdata(pdev); + + /* + * Ask OP-TEE to free all cached shared memory objects to decrease + * reference counters and also avoid wild pointers in secure world + * into the old shared memory range. + */ + optee_disable_shm_cache(optee); + + /* + * The two devices have to be unregistered before we can free the + * other resources. + */ + tee_device_unregister(optee->supp_teedev); + tee_device_unregister(optee->teedev); + + tee_shm_pool_free(optee->pool); + if (optee->memremaped_shm) + memunmap(optee->memremaped_shm); + optee_wait_queue_exit(&optee->wait_queue); + optee_supp_uninit(&optee->supp); + mutex_destroy(&optee->call_queue.mutex); + + kfree(optee); + + return 0; +} + +static int optee_probe(struct platform_device *pdev) { optee_invoke_fn *invoke_fn; struct tee_shm_pool *pool; @@ -559,25 +598,25 @@ static struct optee *optee_probe(struct device_node *np) u32 sec_caps; int rc; - invoke_fn = get_invoke_func(np); + invoke_fn = get_invoke_func(&pdev->dev); if (IS_ERR(invoke_fn)) - return (void *)invoke_fn; + return PTR_ERR(invoke_fn); if (!optee_msg_api_uid_is_optee_api(invoke_fn)) { pr_warn("api uid mismatch\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } optee_msg_get_os_revision(invoke_fn); if (!optee_msg_api_revision_is_compatible(invoke_fn)) { pr_warn("api revision mismatch\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) { pr_warn("capabilities mismatch\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } /* @@ -589,7 +628,7 @@ static struct optee *optee_probe(struct device_node *np) pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm, sec_caps); if (IS_ERR(pool)) - return (void *)pool; + return PTR_ERR(pool); optee = kzalloc(sizeof(*optee), GFP_KERNEL); if (!optee) { @@ -631,8 +670,10 @@ static struct optee *optee_probe(struct device_node *np) optee_enable_shm_cache(optee); + platform_set_drvdata(pdev, optee); + pr_info("initialized driver\n"); - return optee; + return 0; err: if (optee) { /* @@ -648,83 +689,37 @@ static struct optee *optee_probe(struct device_node *np) tee_shm_pool_free(pool); if (memremaped_shm) memunmap(memremaped_shm); - return ERR_PTR(rc); -} - -static void optee_remove(struct optee *optee) -{ - /* - * Ask OP-TEE to free all cached shared memory objects to decrease - * reference counters and also avoid wild pointers in secure world - * into the old shared memory range. - */ - optee_disable_shm_cache(optee); - - /* - * The two devices has to be unregistered before we can free the - * other resources. - */ - tee_device_unregister(optee->supp_teedev); - tee_device_unregister(optee->teedev); - - tee_shm_pool_free(optee->pool); - if (optee->memremaped_shm) - memunmap(optee->memremaped_shm); - optee_wait_queue_exit(&optee->wait_queue); - optee_supp_uninit(&optee->supp); - mutex_destroy(&optee->call_queue.mutex); - - kfree(optee); + return rc; } -static const struct of_device_id optee_match[] = { +static const struct of_device_id optee_dt_match[] = { { .compatible = "linaro,optee-tz" }, {}, }; -static struct optee *optee_svc; - -static int __init optee_driver_init(void) -{ - struct device_node *fw_np; - struct device_node *np; - struct optee *optee; - - /* Node is supposed to be below /firmware */ - fw_np = of_find_node_by_name(NULL, "firmware"); - if (!fw_np) - return -ENODEV; - - np = of_find_matching_node(fw_np, optee_match); - if (!np || !of_device_is_available(np)) { - of_node_put(np); - return -ENODEV; - } - - optee = optee_probe(np); - of_node_put(np); - - if (IS_ERR(optee)) - return PTR_ERR(optee); - - optee_svc = optee; - - return 0; -} -module_init(optee_driver_init); - -static void __exit optee_driver_exit(void) -{ - struct optee *optee = optee_svc; - - optee_svc = NULL; - if (optee) - optee_remove(optee); -} -module_exit(optee_driver_exit); +MODULE_DEVICE_TABLE(of, optee_dt_match); +#ifdef CONFIG_ACPI +static const struct acpi_device_id optee_acpi_match[] = { + { "PHYT8003" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, optee_acpi_match); +#endif + +static struct platform_driver optee_driver = { + .probe = optee_probe, + .remove = optee_remove, + .driver = { + .name = "optee", + .of_match_table = optee_dt_match, + .acpi_match_table = ACPI_PTR(optee_acpi_match), + }, +}; +module_platform_driver(optee_driver); MODULE_AUTHOR("Linaro"); MODULE_DESCRIPTION("OP-TEE driver"); MODULE_SUPPORTED_DEVICE(""); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:optee"); diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index cd13065095bc..a11e6c87fe32 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -74,6 +74,17 @@ config SERIAL_AMBA_PL011_CONSOLE your boot loader (lilo or loadlin) about how to pass options to the kernel at boot time.) +config SERIAL_PHYTIUM_PCI + tristate "Phytium PCI serial port support" + depends on PCI + select SERIAL_CORE + help + This driver supports the Phytium UART controller on PCI/PCIe adapters. + If you want to compile this driver into the kernel, say Y here. To + compile this driver as a module, choose M here. + + If unsure, say N. + config SERIAL_EARLYCON_ARM_SEMIHOST bool "Early console using ARM semihosting" depends on ARM64 || ARM diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index daac675612df..6d4cf6bc06ca 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -24,6 +24,7 @@ obj-$(CONFIG_SERIAL_8250) += 8250/ obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o +obj-$(CONFIG_SERIAL_PHYTIUM_PCI) += phytium-uart.o obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o obj-$(CONFIG_SERIAL_PXA_NON8250) += pxa.o obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o diff --git a/drivers/tty/serial/phytium-uart.c b/drivers/tty/serial/phytium-uart.c new file mode 100644 index 000000000000..bd59a97740c7 --- /dev/null +++ b/drivers/tty/serial/phytium-uart.c @@ -0,0 +1,922 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Phytium PCI UART controller + * + * Copyright 2021 Phytium Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "phytium_uart" + +#define REG_DR 0x00 +#define REG_FR 0x18 +#define REG_IBRD 0x24 +#define REG_FBRD 0x28 +#define REG_LCRH_RX 0x2c +#define REG_LCRH_TX 0x2c +#define REG_CR 0x30 +#define REG_IFLS 0x34 +#define REG_IMSC 0x38 +#define REG_RIS 0x3c +#define REG_MIS 0x40 +#define REG_ICR 0x44 + +#define REG_DR_OE (1 << 11) +#define REG_DR_BE (1 << 10) +#define REG_DR_PE (1 << 9) +#define REG_DR_FE (1 << 8) + +#define REG_LCRH_SPS 0x80 +#define REG_LCRH_WLEN_8 0x60 +#define REG_LCRH_WLEN_7 0x40 +#define REG_LCRH_WLEN_6 0x20 +#define REG_LCRH_WLEN_5 0x00 +#define REG_LCRH_FEN 0x10 +#define REG_LCRH_STP2 0x08 +#define REG_LCRH_EPS 0x04 +#define REG_LCRH_PEN 0x02 +#define REG_LCRH_BRK 0x01 + +#define REG_FR_RI 0x100 +#define REG_FR_TXFE 0x080 +#define REG_FR_RXFF 0x040 +#define REG_FR_TXFF 0x020 +#define REG_FR_RXFE 0x010 +#define REG_FR_BUSY 0x008 +#define REG_FR_DCD 0x004 +#define REG_FR_DSR 0x002 +#define REG_FR_CTS 0x001 +#define REG_FR_TMSK (REG_FR_TXFF + REG_FR_BUSY) + +#define REG_CR_CTSEN 0x8000 /* CTS hardware flow control */ +#define REG_CR_RTSEN 0x4000 /* RTS hardware flow control */ +#define REG_CR_OUT2 0x2000 /* OUT2 */ +#define REG_CR_OUT1 0x1000 /* OUT1 */ +#define REG_CR_RTS 0x0800 /* RTS */ +#define REG_CR_DTR 0x0400 /* DTR */ +#define REG_CR_RXE 0x0200 /* receive enable */ +#define REG_CR_TXE 0x0100 /* transmit enable */ +#define REG_CR_LBE 0x0080 /* loopback enable */ +#define REG_CR_RTIE 0x0040 +#define REG_CR_TIE 0x0020 +#define REG_CR_RIE 0x0010 +#define REG_CR_MSIE 0x0008 +#define REG_CR_IIRLP 0x0004 /* SIR low power mode */ +#define REG_CR_SIREN 0x0002 /* SIR enable */ +#define REG_CR_UARTEN 0x0001 /* UART enable */ + +#define REG_IFLS_RX1_8 (0 << 3) +#define REG_IFLS_RX2_8 (1 << 3) +#define REG_IFLS_RX4_8 (2 << 3) +#define REG_IFLS_RX6_8 (3 << 3) +#define REG_IFLS_RX7_8 (4 << 3) +#define REG_IFLS_TX1_8 (0 << 0) +#define REG_IFLS_TX2_8 (1 << 0) +#define REG_IFLS_TX4_8 (2 << 0) +#define REG_IFLS_TX6_8 (3 << 0) + +#define REG_IMSC_OEIM (1 << 10) /* overrun error interrupt mask */ +#define REG_IMSC_BEIM (1 << 9) /* break error interrupt mask */ +#define REG_IMSC_PEIM (1 << 8) /* parity error interrupt mask */ +#define REG_IMSC_FEIM (1 << 7) /* framing error interrupt mask */ +#define REG_IMSC_RTIM (1 << 6) /* receive timeout interrupt mask */ +#define REG_IMSC_TXIM (1 << 5) /* transmit interrupt mask */ +#define REG_IMSC_RXIM (1 << 4) /* receive interrupt mask */ +#define REG_IMSC_DSRMIM (1 << 3) /* DSR interrupt mask */ +#define REG_IMSC_DCDMIM (1 << 2) /* DCD interrupt mask */ +#define REG_IMSC_CTSMIM (1 << 1) /* CTS interrupt mask */ +#define REG_IMSC_RIMIM (1 << 0) /* RI interrupt mask */ + +#define REG_ICR_OEIS (1 << 10) /* overrun error interrupt status */ +#define REG_ICR_BEIS (1 << 9) /* break error interrupt status */ +#define REG_ICR_PEIS (1 << 8) /* parity error interrupt status */ +#define REG_ICR_FEIS (1 << 7) /* framing error interrupt status */ +#define REG_ICR_RTIS (1 << 6) /* receive timeout interrupt status */ +#define REG_ICR_TXIS (1 << 5) /* transmit interrupt status */ +#define REG_ICR_RXIS (1 << 4) /* receive interrupt status */ +#define REG_ICR_DSRMIS (1 << 3) /* DSR interrupt status */ +#define REG_ICR_DCDMIS (1 << 2) /* DCD interrupt status */ +#define REG_ICR_CTSMIS (1 << 1) /* CTS interrupt status */ +#define REG_ICR_RIMIS (1 << 0) /* RI interrupt status */ + +#define UART_NR 12 + +#define UART_DR_ERROR (REG_DR_OE|REG_DR_BE|REG_DR_PE|REG_DR_FE) +#define UART_DUMMY_DR_RX (1 << 16) + +#define DEFAULT_UARTCLK 48000000 /* 48 MHz */ + +/* + * We wrap our port structure around the generic uart_port. + */ +struct phytium_uart_port { + struct uart_port port; + unsigned int im; /* interrupt mask */ + unsigned int old_status; + unsigned int old_cr; /* state during shutdown */ + char type[12]; +}; + +static unsigned int phytium_uart_read(const struct phytium_uart_port *pup, + unsigned int reg) +{ + void __iomem *addr = pup->port.membase + reg; + + return readl_relaxed(addr); +} + +static void phytium_uart_write(unsigned int val, const struct phytium_uart_port *pup, + unsigned int reg) +{ + void __iomem *addr = pup->port.membase + reg; + + writel_relaxed(val, addr); +} + +static int phytium_fifo_to_tty(struct phytium_uart_port *pup) +{ + u16 status; + unsigned int ch, flag, fifotaken; + + for (fifotaken = 0; fifotaken < 256; fifotaken++) { + status = phytium_uart_read(pup, REG_FR); + if (status & REG_FR_RXFE) + break; + + /* Take chars from the FIFO and update status */ + ch = phytium_uart_read(pup, REG_DR) | UART_DUMMY_DR_RX; + flag = TTY_NORMAL; + pup->port.icount.rx++; + + if (unlikely(ch & UART_DR_ERROR)) { + if (ch & REG_DR_BE) { + ch &= ~(REG_DR_FE | REG_DR_PE); + pup->port.icount.brk++; + if (uart_handle_break(&pup->port)) + continue; + } else if (ch & REG_DR_PE) + pup->port.icount.parity++; + else if (ch & REG_DR_FE) + pup->port.icount.frame++; + if (ch & REG_DR_OE) + pup->port.icount.overrun++; + + ch &= pup->port.read_status_mask; + + if (ch & REG_DR_BE) + flag = TTY_BREAK; + else if (ch & REG_DR_PE) + flag = TTY_PARITY; + else if (ch & REG_DR_FE) + flag = TTY_FRAME; + } + + if (uart_handle_sysrq_char(&pup->port, ch & 255)) + continue; + + uart_insert_char(&pup->port, ch, REG_DR_OE, ch, flag); + } + + return fifotaken; +} + +static void phytium_rx_chars(struct phytium_uart_port *pup) +__releases(&pup->port.lock) +__acquires(&pup->port.lock) +{ + phytium_fifo_to_tty(pup); + + spin_unlock(&pup->port.lock); + tty_flip_buffer_push(&pup->port.state->port); + spin_lock(&pup->port.lock); +} + +static void phytium_stop_tx(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + pup->im &= ~REG_IMSC_TXIM; + phytium_uart_write(pup->im, pup, REG_IMSC); +} + +static bool phytium_tx_char(struct phytium_uart_port *pup, unsigned char c, + bool from_irq) +{ + + if (unlikely(!from_irq) && + phytium_uart_read(pup, REG_FR) & REG_FR_TXFF) + return false; /* unable to transmit character */ + + phytium_uart_write(c, pup, REG_DR); + pup->port.icount.tx++; + + return true; +} + +static bool phytium_tx_chars(struct phytium_uart_port *pup, bool from_irq) +{ + struct circ_buf *xmit = &pup->port.state->xmit; + int count = pup->port.fifosize >> 1; + + if (pup->port.x_char) { + if (!phytium_tx_char(pup, pup->port.x_char, from_irq)) + return true; + pup->port.x_char = 0; + --count; + } + if (uart_circ_empty(xmit) || uart_tx_stopped(&pup->port)) { + phytium_stop_tx(&pup->port); + return false; + } + + do { + if (likely(from_irq) && count-- == 0) + break; + + if (!phytium_tx_char(pup, xmit->buf[xmit->tail], from_irq)) + break; + + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + } while (!uart_circ_empty(xmit)); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&pup->port); + + if (uart_circ_empty(xmit)) { + phytium_stop_tx(&pup->port); + return false; + } + return true; +} + +static void phytium_modem_status(struct phytium_uart_port *pup) +{ + unsigned int status, delta; + + status = phytium_uart_read(pup, REG_FR) & (REG_FR_DCD|REG_FR_DSR|REG_FR_CTS); + + delta = status ^ pup->old_status; + pup->old_status = status; + + if (!delta) + return; + + if (delta & REG_FR_DCD) + uart_handle_dcd_change(&pup->port, status & REG_FR_DCD); + + if (delta & REG_FR_DSR) + pup->port.icount.dsr++; + + if (delta & REG_FR_CTS) + uart_handle_cts_change(&pup->port, status & REG_FR_CTS); + + wake_up_interruptible(&pup->port.state->port.delta_msr_wait); +} + +static irqreturn_t phytium_uart_interrupt(int irq, void *dev_id) +{ + struct phytium_uart_port *pup = dev_id; + unsigned long flags; + unsigned int status, pass_counter = 256; + int handled = 0; + + spin_lock_irqsave(&pup->port.lock, flags); + status = phytium_uart_read(pup, REG_RIS) & pup->im; + if (status) { + do { + phytium_uart_write(status & ~(REG_ICR_TXIS|REG_ICR_RTIS|REG_ICR_RXIS), + pup, REG_ICR); + + if (status & (REG_ICR_RTIS|REG_ICR_RXIS)) + phytium_rx_chars(pup); + + if (status & (REG_ICR_DSRMIS|REG_ICR_DCDMIS| + REG_ICR_CTSMIS|REG_ICR_RIMIS)) + phytium_modem_status(pup); + if (status & REG_ICR_TXIS) + phytium_tx_chars(pup, true); + + if (pass_counter-- == 0) + break; + + status = phytium_uart_read(pup, REG_RIS) & pup->im; + } while (status != 0); + handled = 1; + } + spin_unlock_irqrestore(&pup->port.lock, flags); + + return IRQ_RETVAL(handled); +} + +static unsigned int phytium_tx_empty(struct uart_port *port) +{ + unsigned int status; + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + status = phytium_uart_read(pup, REG_FR) & (REG_FR_BUSY | REG_FR_TXFF); + + return status ? 0 : TIOCSER_TEMT; +} + +static void phytium_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + unsigned int cr; + + cr = phytium_uart_read(pup, REG_CR); + +#define TIOCMBIT(tiocmbit, uartbit) \ + do { \ + if (mctrl & tiocmbit) \ + cr |= uartbit; \ + else \ + cr &= ~uartbit; \ + } while (0) + + TIOCMBIT(TIOCM_RTS, REG_CR_RTS); + TIOCMBIT(TIOCM_DTR, REG_CR_DTR); + TIOCMBIT(TIOCM_OUT1, REG_CR_OUT1); + TIOCMBIT(TIOCM_OUT2, REG_CR_OUT2); + TIOCMBIT(TIOCM_LOOP, REG_CR_LBE); + + if (port->status & UPSTAT_AUTORTS) { + /* We need to disable auto-RTS if we want to turn RTS off */ + TIOCMBIT(TIOCM_RTS, REG_CR_RTSEN); + } +#undef TIOCMBIT + + phytium_uart_write(cr, pup, REG_CR); +} + +static unsigned int phytium_get_mctrl(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + unsigned int cr = 0; + unsigned int status = phytium_uart_read(pup, REG_FR); + +#define TIOCMBIT(uartbit, tiocmbit) \ + do { \ + if (status & uartbit) \ + cr |= tiocmbit; \ + } while (0) + + TIOCMBIT(REG_FR_DCD, TIOCM_CAR); + TIOCMBIT(REG_FR_DSR, TIOCM_DSR); + TIOCMBIT(REG_FR_CTS, TIOCM_CTS); + TIOCMBIT(REG_FR_RI, TIOCM_RNG); +#undef TIOCMBIT + return cr; +} + +static void phytium_start_tx(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + if (phytium_tx_chars(pup, false)) { + pup->im |= REG_IMSC_TXIM; + phytium_uart_write(pup->im, pup, REG_IMSC); + } +} + +static void phytium_stop_rx(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + pup->im &= ~(REG_IMSC_RXIM|REG_IMSC_RTIM|REG_IMSC_FEIM| + REG_IMSC_PEIM|REG_IMSC_BEIM|REG_IMSC_OEIM); + phytium_uart_write(pup->im, pup, REG_IMSC); +} + +static void phytium_enable_ms(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + pup->im |= REG_IMSC_RIMIM|REG_IMSC_CTSMIM|REG_IMSC_DCDMIM|REG_IMSC_DSRMIM; + phytium_uart_write(pup->im, pup, REG_IMSC); +} + +static void phytium_break_ctl(struct uart_port *port, int break_state) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + unsigned long flags; + unsigned int lcr_h; + + spin_lock_irqsave(&pup->port.lock, flags); + lcr_h = phytium_uart_read(pup, REG_LCRH_TX); + if (break_state == -1) + lcr_h |= REG_LCRH_BRK; + else + lcr_h &= ~REG_LCRH_BRK; + phytium_uart_write(lcr_h, pup, REG_LCRH_TX); + spin_unlock_irqrestore(&pup->port.lock, flags); +} + +static int phytium_hwinit(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + /* XXX: more configurable setup method in future */ + pup->port.uartclk = DEFAULT_UARTCLK; + + /* Clear pending error and receive interrupts */ + phytium_uart_write(REG_ICR_OEIS | REG_ICR_BEIS | REG_ICR_PEIS | + REG_ICR_FEIS | REG_ICR_RTIS | REG_ICR_RXIS, + pup, REG_ICR); + + /* + * Save interrupts enable mask, and enable RX interrupts in case if + * the interrupt is used for NMI entry. + */ + pup->im = phytium_uart_read(pup, REG_IMSC); + phytium_uart_write(REG_IMSC_RTIM | REG_IMSC_RXIM, pup, REG_IMSC); + + return 0; +} + +static int phytium_uart_allocate_irq(struct phytium_uart_port *pup) +{ + phytium_uart_write(pup->im, pup, REG_IMSC); + + return request_irq(pup->port.irq, phytium_uart_interrupt, IRQF_SHARED, DRV_NAME, pup); +} + +static void phytium_enable_interrtups(struct phytium_uart_port *pup) +{ + unsigned int i; + + spin_lock_irq(&pup->port.lock); + + /* Clear out any spuriously appearing RX interrupts */ + phytium_uart_write(REG_ICR_RTIS | REG_ICR_RXIS, pup, REG_ICR); + + /* + * RXIS is asserted only when the RX FIFO transitions from below + * to above the trigger threshold. If the RX FIFO is already + * full to the threashold this can't happen and RXIS will now be + * stuck off. Drain the RX FIFO explicitly to fix this: + */ + for (i = 0; i < pup->port.fifosize * 2; i++) { + if (phytium_uart_read(pup, REG_FR) & REG_FR_RXFE) + break; + + phytium_uart_read(pup, REG_DR); + } + + pup->im = REG_IMSC_RTIM | REG_IMSC_RXIM; + phytium_uart_write(pup->im, pup, REG_IMSC); + spin_unlock_irq(&pup->port.lock); +} + +static int phytium_startup(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + unsigned int cr; + int ret = 0; + + ret = phytium_hwinit(port); + if (ret) + goto out; + + ret = phytium_uart_allocate_irq(pup); + if (ret) + goto out; + + phytium_uart_write(REG_IFLS_RX4_8|REG_IFLS_TX4_8, pup, REG_IFLS); + + spin_lock_irq(&pup->port.lock); + + /* restore RTS and DTR */ + cr = pup->old_cr & (REG_CR_RTS | REG_CR_DTR); + cr |= REG_CR_UARTEN | REG_CR_RXE | REG_CR_TXE; + phytium_uart_write(cr, pup, REG_CR); + + spin_unlock_irq(&pup->port.lock); + + /* initialise the old status of the modem signals */ + pup->old_status = phytium_uart_read(pup, REG_FR) & (REG_FR_DCD|REG_FR_DSR|REG_FR_CTS); + + phytium_enable_interrtups(pup); + +out: + return ret; +} + +static void phytium_shutdown_channel(struct phytium_uart_port *pup, + unsigned int lcrh) +{ + unsigned long val; + + val = phytium_uart_read(pup, lcrh); + val &= ~(REG_LCRH_BRK | REG_LCRH_FEN); + phytium_uart_write(val, pup, lcrh); +} + +static void phytium_disable_uart(struct phytium_uart_port *pup) +{ + unsigned int cr; + + pup->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); + spin_lock_irq(&pup->port.lock); + cr = phytium_uart_read(pup, REG_CR); + pup->old_cr = cr; + cr &= REG_CR_RTS | REG_CR_DTR; + cr |= REG_CR_UARTEN | REG_CR_TXE; + phytium_uart_write(cr, pup, REG_CR); + spin_unlock_irq(&pup->port.lock); + + /* + * disable break condition and fifos + */ + phytium_shutdown_channel(pup, REG_LCRH_RX); +} + +static void phytium_disable_interrupts(struct phytium_uart_port *pup) +{ + spin_lock_irq(&pup->port.lock); + + /* mask all interrupts and clear all pending ones */ + pup->im = 0; + phytium_uart_write(pup->im, pup, REG_IMSC); + phytium_uart_write(0xffff, pup, REG_ICR); + + spin_unlock_irq(&pup->port.lock); +} + +static void phytium_shutdown(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + phytium_disable_interrupts(pup); + + free_irq(pup->port.irq, pup); + + phytium_disable_uart(pup); + + if (pup->port.ops->flush_buffer) + pup->port.ops->flush_buffer(port); +} + +static void +phytium_setup_status_masks(struct uart_port *port, struct ktermios *termios) +{ + port->read_status_mask = REG_DR_OE | 255; + if (termios->c_iflag & INPCK) + port->read_status_mask |= REG_DR_FE | REG_DR_PE; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + port->read_status_mask |= REG_DR_BE; + + /* + * Characters to ignore + */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= REG_DR_FE | REG_DR_PE; + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= REG_DR_BE; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= REG_DR_OE; + } + + /* + * Ignore all characters if CREAD is not set. + */ + if ((termios->c_cflag & CREAD) == 0) + port->ignore_status_mask |= UART_DUMMY_DR_RX; +} + +static void +phytium_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + unsigned int lcr_h, old_cr; + unsigned long flags; + unsigned int baud, quot; + + /* Ask the core to calculate the divisor for us. */ + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); + + if (baud > port->uartclk/16) + quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); + else + quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); + + switch (termios->c_cflag & CSIZE) { + case CS5: + lcr_h = REG_LCRH_WLEN_5; + break; + case CS6: + lcr_h = REG_LCRH_WLEN_6; + break; + case CS7: + lcr_h = REG_LCRH_WLEN_7; + break; + default: /* CS8 */ + lcr_h = REG_LCRH_WLEN_8; + break; + } + if (termios->c_cflag & CSTOPB) + lcr_h |= REG_LCRH_STP2; + if (termios->c_cflag & PARENB) { + lcr_h |= REG_LCRH_PEN; + if (!(termios->c_cflag & PARODD)) + lcr_h |= REG_LCRH_EPS; + if (termios->c_cflag & CMSPAR) + lcr_h |= REG_LCRH_SPS; + } + if (pup->port.fifosize > 1) + lcr_h |= REG_LCRH_FEN; + + spin_lock_irqsave(&port->lock, flags); + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + + phytium_setup_status_masks(port, termios); + + if (UART_ENABLE_MS(port, termios->c_cflag)) + phytium_enable_ms(port); + + /* first, disable everything */ + old_cr = phytium_uart_read(pup, REG_CR); + phytium_uart_write(0, pup, REG_CR); + + if (termios->c_cflag & CRTSCTS) { + if (old_cr & REG_CR_RTS) + old_cr |= REG_CR_RTSEN; + + old_cr |= REG_CR_CTSEN; + port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; + } else { + old_cr &= ~(REG_CR_CTSEN | REG_CR_RTSEN); + port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); + } + + /* Set baud rate */ + phytium_uart_write(quot & 0x3f, pup, REG_FBRD); + phytium_uart_write(quot >> 6, pup, REG_IBRD); + + phytium_uart_write(lcr_h, pup, REG_LCRH_RX); + phytium_uart_write(old_cr, pup, REG_CR); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *phytium_type(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + return pup->port.type == PORT_PHYTIUM ? pup->type : NULL; +} + +static void phytium_release_port(struct uart_port *port) +{ + /* Nothing to release ... */ +} + +static int phytium_request_port(struct uart_port *port) +{ + /* UARTs always present */ + return 0; +} + +static void phytium_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_PHYTIUM; + phytium_request_port(port); + } +} + +static int phytium_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + int ret = 0; + + if (ser->type != PORT_UNKNOWN && ser->type != PORT_PHYTIUM) + ret = -EINVAL; + if (ser->irq < 0 || ser->irq >= nr_irqs) + ret = -EINVAL; + if (ser->baud_base < 9600) + ret = -EINVAL; + + return ret; +} + +static const struct uart_ops phytium_uart_ops = { + .tx_empty = phytium_tx_empty, + .set_mctrl = phytium_set_mctrl, + .get_mctrl = phytium_get_mctrl, + .stop_tx = phytium_stop_tx, + .start_tx = phytium_start_tx, + .stop_rx = phytium_stop_rx, + .enable_ms = phytium_enable_ms, + .break_ctl = phytium_break_ctl, + .startup = phytium_startup, + .shutdown = phytium_shutdown, + .set_termios = phytium_set_termios, + .type = phytium_type, + .release_port = phytium_release_port, + .request_port = phytium_request_port, + .config_port = phytium_config_port, + .verify_port = phytium_verify_port, +}; + +static struct phytium_uart_port *uart_ports[UART_NR]; + +static struct uart_driver phytium_uart = { + .owner = THIS_MODULE, + .driver_name = DRV_NAME, + .dev_name = "ttyS", + .nr = UART_NR, +}; + +void phytium_unregister_port(struct phytium_uart_port *pup) +{ + int i; + bool busy = false; + + for (i = 0; i < ARRAY_SIZE(uart_ports); i++) { + if (uart_ports[i] == pup) + uart_ports[i] = NULL; + else if (uart_ports[i]) + busy = true; + } + + if (!busy) + uart_unregister_driver(&phytium_uart); +} + +static int phytium_find_free_port(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(uart_ports); i++) + if (uart_ports[i] == NULL) + return i; + + return -EBUSY; +} + +static int phytium_register_port(struct phytium_uart_port *pup) +{ + int rc; + + /* Ensure interrupts from this UART are masked and cleared */ + phytium_uart_write(0, pup, REG_IMSC); + phytium_uart_write(0xffff, pup, REG_ICR); + + if (!phytium_uart.state) { + rc = uart_register_driver(&phytium_uart); + if (rc < 0) { + dev_err(pup->port.dev, + "Failed to register Phytium PCI UART driver\n"); + return rc; + } + } + + rc = uart_add_one_port(&phytium_uart, &pup->port); + if (rc) + phytium_unregister_port(pup); + + return rc; +} + +static int phytium_uart_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct phytium_uart_port *pup; + int portnr, rc; + + portnr = phytium_find_free_port(); + if (portnr < 0) + return portnr; + + pup = devm_kzalloc(&pdev->dev, sizeof(struct phytium_uart_port), + GFP_KERNEL); + if (!pup) + return -ENOMEM; + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + rc = pcim_iomap_regions_request_all(pdev, 0x01, pci_name(pdev)); + if (rc) + return rc; + + pup->port.iotype = UPIO_MEM32; + pup->port.irq = pdev->irq; + pup->port.mapbase = pci_resource_start(pdev, 0); + pup->port.membase = pcim_iomap_table(pdev)[0]; + pup->port.ops = &phytium_uart_ops; + pup->port.dev = &pdev->dev; + pup->port.fifosize = 32; + pup->port.flags = UPF_BOOT_AUTOCONF; + pup->port.line = portnr; + + uart_ports[portnr] = pup; + + pup->old_cr = 0; + snprintf(pup->type, sizeof(pup->type), "pci-uart"); + + pci_set_drvdata(pdev, pup); + + return phytium_register_port(pup); +} + +static void phytium_uart_remove(struct pci_dev *pdev) +{ + struct phytium_uart_port *pup = pci_get_drvdata(pdev); + + uart_remove_one_port(&phytium_uart, &pup->port); + phytium_unregister_port(pup); +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_uart_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_uart_port *pup = pci_get_drvdata(pdev); + + if (pup) + uart_suspend_port(&phytium_uart, &pup->port); + + return 0; +} + +static int phytium_uart_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_uart_port *pup = pci_get_drvdata(pdev); + + if (pup) + uart_resume_port(&phytium_uart, &pup->port); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_dev_pm_ops, phytium_uart_suspend, phytium_uart_resume); + +static const struct pci_device_id pci_ids[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc2e) }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, pci_ids); + +static struct pci_driver phytium_uart_pci_driver = { + .name = DRV_NAME, + .probe = phytium_uart_probe, + .remove = phytium_uart_remove, + .driver = { + .pm = &phytium_dev_pm_ops, + }, + .id_table = pci_ids, +}; + +static int __init phytium_uart_init(void) +{ + pr_info("Serial: Phytium PCI UART driver\n"); + + return pci_register_driver(&phytium_uart_pci_driver); +} + +static void __exit phytium_uart_exit(void) +{ + pci_unregister_driver(&phytium_uart_pci_driver); +} + +module_init(phytium_uart_init); +module_exit(phytium_uart_exit); + +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Phytium PCI serial port driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index fa28f23a4a33..c31d45d165e7 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1134,6 +1134,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) ((portstatus & USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_POLLING)) need_debounce_delay = true; + + /* Make sure a warm-reset request is handled by port_event */ + if (type == HUB_RESUME && + hub_port_warm_reset_required(hub, port1, portstatus)) + set_bit(port1, hub->event_bits); /* Clear status-change flags; we'll debounce later */ if (portchange & USB_PORT_STAT_C_CONNECTION) { diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 31ac2e8fd4c3..25d417ad9000 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1814,9 +1814,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) spin_lock_irq (&dev->lock); value = -EINVAL; if (dev->buf) { - spin_unlock_irq(&dev->lock); kfree(kbuf); - return value; + goto fail; } dev->buf = kbuf; @@ -1863,8 +1862,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) value = usb_gadget_probe_driver(&gadgetfs_driver); if (value != 0) { - spin_lock_irq(&dev->lock); - goto fail; + kfree (dev->buf); + dev->buf = NULL; } else { /* at this point "good" hardware has for the first time * let the USB the host see us. alternatively, if users @@ -1881,9 +1880,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) return value; fail: - dev->config = NULL; - dev->hs_config = NULL; - dev->dev = NULL; spin_unlock_irq (&dev->lock); pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev); kfree (dev->buf); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 9e87c282a743..67b19e76cd34 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2148,6 +2148,10 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, if (rhub->min_rev < minor_revision) rhub->min_rev = minor_revision; + if (xhci->quirks & XHCI_SLOWDOWN_QUIRK) + if (major_revision == 0x03) + rhub->min_rev = 0; + /* Port offset and count in the third dword, see section 7.2 */ temp = readl(addr + 2); port_offset = XHCI_EXT_PORT_OFF(temp); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 0c6b6f14b169..18f47158062f 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -44,6 +44,7 @@ #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 #define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af +#define PCI_DEVICE_ID_PHYTIUM_XHCI 0xdc27 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba @@ -228,6 +229,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_VIA) xhci->quirks |= XHCI_RESET_ON_RESUME; + if (pdev->vendor == PCI_VENDOR_ID_PHYTIUM || + pdev->device == PCI_DEVICE_ID_PHYTIUM_XHCI) { + xhci->quirks |= XHCI_RESET_ON_RESUME; + xhci->quirks |= XHCI_SLOWDOWN_QUIRK; + } + /* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3432) diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 7a4195f8cd1c..89e49a3b5bd9 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1870,6 +1870,7 @@ struct xhci_hcd { #define XHCI_SUSPEND_DELAY BIT_ULL(30) #define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31) #define XHCI_ZERO_64B_REGS BIT_ULL(32) +#define XHCI_SLOWDOWN_QUIRK BIT_ULL(33) #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34) #define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35) #define XHCI_DISABLE_SPARSE BIT_ULL(38) diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c index 501aebb5b81f..472e8bd2536d 100644 --- a/drivers/watchdog/dw_wdt.c +++ b/drivers/watchdog/dw_wdt.c @@ -18,6 +18,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -54,7 +55,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " struct dw_wdt { void __iomem *regs; struct clk *clk; - unsigned long rate; + u64 rate; struct watchdog_device wdd; struct reset_control *rst; /* Save/restore */ @@ -252,18 +253,32 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) if (IS_ERR(dw_wdt->regs)) return PTR_ERR(dw_wdt->regs); - dw_wdt->clk = devm_clk_get(dev, NULL); - if (IS_ERR(dw_wdt->clk)) - return PTR_ERR(dw_wdt->clk); + if (dev->of_node) { + dw_wdt->clk = devm_clk_get(dev, NULL); + if (IS_ERR(dw_wdt->clk)) + return PTR_ERR(dw_wdt->clk); - ret = clk_prepare_enable(dw_wdt->clk); - if (ret) - return ret; + ret = clk_prepare_enable(dw_wdt->clk); + if (ret) + return ret; - dw_wdt->rate = clk_get_rate(dw_wdt->clk); - if (dw_wdt->rate == 0) { - ret = -EINVAL; - goto out_disable_clk; + dw_wdt->rate = clk_get_rate(dw_wdt->clk); + if (dw_wdt->rate == 0) { + ret = -EINVAL; + goto out_disable_clk; + } + } else if (has_acpi_companion(&pdev->dev)) { + /* + * When Driver probe with ACPI device, clock devices + * are not available, so watchdog rate get from + * clock-frequency property given in _DSD object. + */ + device_property_read_u64(dev, "clock-frequency", + &dw_wdt->rate); + if (dw_wdt->rate == 0) { + ret = -EINVAL; + goto out_disable_clk; + } } dw_wdt->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL); @@ -325,6 +340,12 @@ static int dw_wdt_drv_remove(struct platform_device *pdev) return 0; } +static const struct acpi_device_id dw_wdt_acpi_match[] = { + { "PHYT0014", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, dw_wdt_acpi_match); + #ifdef CONFIG_OF static const struct of_device_id dw_wdt_of_match[] = { { .compatible = "snps,dw-wdt", }, @@ -339,6 +360,7 @@ static struct platform_driver dw_wdt_driver = { .driver = { .name = "dw_wdt", .of_match_table = of_match_ptr(dw_wdt_of_match), + .acpi_match_table = ACPI_PTR(dw_wdt_acpi_match), .pm = &dw_wdt_pm_ops, }, }; diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 9524806eb4b9..97341fa75458 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -436,21 +436,11 @@ static void gnttab_add_deferred(grant_ref_t ref, bool readonly, what, ref, page ? page_to_pfn(page) : -1); } -int gnttab_try_end_foreign_access(grant_ref_t ref) -{ - int ret = _gnttab_end_foreign_access_ref(ref, 0); - - if (ret) - put_free_entry(ref); - - return ret; -} -EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access); - void gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page) { - if (gnttab_try_end_foreign_access(ref)) { + if (gnttab_end_foreign_access_ref(ref, readonly)) { + put_free_entry(ref); if (page != 0) put_page(virt_to_page(page)); } else diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 6dde323dabd4..e35bb6b87449 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -368,14 +368,7 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, unsigned int nr_pages, grant_ref_t *grefs) { int err; - unsigned int i; - grant_ref_t gref_head; - - err = gnttab_alloc_grant_references(nr_pages, &gref_head); - if (err) { - xenbus_dev_fatal(dev, err, "granting access to ring page"); - return err; - } + int i, j; for (i = 0; i < nr_pages; i++) { unsigned long gfn; @@ -385,14 +378,23 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, else gfn = virt_to_gfn(vaddr); - grefs[i] = gnttab_claim_grant_reference(&gref_head); - gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id, - gfn, 0); + err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0); + if (err < 0) { + xenbus_dev_fatal(dev, err, + "granting access to ring page"); + goto fail; + } + grefs[i] = err; vaddr = vaddr + XEN_PAGE_SIZE; } return 0; + +fail: + for (j = 0; j < i; j++) + gnttab_end_foreign_access_ref(grefs[j], 0); + return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring); diff --git a/fs/file_table.c b/fs/file_table.c index ba4418325c26..e49af4caf15d 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -365,7 +365,6 @@ void __fput_sync(struct file *file) } EXPORT_SYMBOL(fput); -EXPORT_SYMBOL(__fput_sync); void __init files_init(void) { diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 0ff26588335e..1ff5a6b21db0 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -993,17 +993,7 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep, while (count) { if (cs->write && cs->pipebufs && page) { - /* - * Can't control lifetime of pipe buffers, so always - * copy user pages. - */ - if (cs->req->in.user_pages) { - err = fuse_copy_fill(cs); - if (err) - return err; - } else { - return fuse_ref_page(cs, page, offset, count); - } + return fuse_ref_page(cs, page, offset, count); } else if (!cs->len) { if (cs->move_pages && page && offset == 0 && count == PAGE_SIZE) { diff --git a/fs/fuse/file.c b/fs/fuse/file.c index d90d91232dfe..c1af038920dc 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1320,12 +1320,10 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, (PAGE_SIZE - ret) & (PAGE_SIZE - 1); } - if (write) { - req->in.user_pages = 1; + if (write) req->in.argpages = 1; - } else { + else req->out.argpages = 1; - } *nbytesp = nbytes; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 0981011c10c6..853d37ec81e0 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -174,11 +174,6 @@ struct fuse_in { /** True if the data for the last argument is in req->pages */ unsigned argpages:1; -#ifndef __GENKSYMS__ - /** True if direct write */ - unsigned user_pages:1; -#endif - /** Number of arguments */ unsigned numargs; diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h index 14499757338f..c524216e0363 100644 --- a/include/acpi/acpi_drivers.h +++ b/include/acpi/acpi_drivers.h @@ -81,7 +81,7 @@ int acpi_irq_penalty_init(void); int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, - int *polarity, char **name); + int *polarity, char **name, struct fwnode_handle **rs_fwnode); int acpi_pci_link_free_irq(acpi_handle handle); /* ACPI PCI Device Binding (pci_bind.c) */ diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index 1d4ef0621174..1ab59ac16176 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -502,7 +502,8 @@ enum acpi_madt_type { ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13, ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14, ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15, - ACPI_MADT_TYPE_RESERVED = 16 /* 16 and greater are reserved */ + ACPI_MADT_TYPE_RESERVED = 16, + ACPI_MADT_TYPE_PHYTIUM_2500 = 128 }; /* diff --git a/include/linux/acpi.h b/include/linux/acpi.h index cd412817654f..4e8d56daa5f0 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -23,6 +23,7 @@ #include #include /* for struct resource */ +#include #include #include #include @@ -323,6 +324,22 @@ int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); void acpi_set_irq_model(enum acpi_irq_model_id model, struct fwnode_handle *fwnode); +struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, + unsigned int size, + struct fwnode_handle *fwnode, + const struct irq_domain_ops *ops, + void *host_data); + +#ifdef CONFIG_ACPI_GENERIC_GSI +struct fwnode_handle *acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source); +#else +static inline +struct fwnode_handle *acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source) +{ + return NULL; +} +#endif + #ifdef CONFIG_X86_IO_APIC extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); #else diff --git a/include/linux/efi.h b/include/linux/efi.h index 9a5d4b499271..3ec7b52f8994 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -566,7 +566,7 @@ typedef efi_status_t efi_get_variable_t (efi_char16_t *name, efi_guid_t *vendor, unsigned long *data_size, void *data); typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor); -typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, +typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, u32 attr, unsigned long data_size, void *data); typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count); @@ -672,6 +672,7 @@ void efi_native_runtime_setup(void); #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) #define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) #define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa) +#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2) typedef struct { efi_guid_t guid; @@ -957,6 +958,7 @@ extern struct efi { unsigned long mem_attr_table; /* memory attributes table */ unsigned long rng_seed; /* UEFI firmware random seed */ unsigned long tpm_log; /* TPM2 Event Log table */ + unsigned long mem_reserve; /* Linux EFI memreserve table */ efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; @@ -1045,6 +1047,7 @@ extern int __init efi_uart_console_only (void); extern u64 efi_mem_desc_end(efi_memory_desc_t *md); extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md); extern void efi_mem_reserve(phys_addr_t addr, u64 size); +extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); extern void efi_reserve_boot_services(void); @@ -1707,4 +1710,20 @@ extern struct efi_runtime_work efi_rts_work; /* Workqueue to queue EFI Runtime Services */ extern struct workqueue_struct *efi_rts_wq; +struct linux_efi_memreserve { + int size; // allocated size of the array + atomic_t count; // number of entries used + phys_addr_t next; // pa of next struct instance + struct { + phys_addr_t base; + phys_addr_t size; + } entry[0]; +}; + +#define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \ + (count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0])) + +#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \ + / sizeof(((struct linux_efi_memreserve *)0)->entry[0])) + #endif /* _LINUX_EFI_H */ diff --git a/include/linux/irq.h b/include/linux/irq.h index a042faefb9b7..23ef1fe2366c 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -208,6 +208,8 @@ struct irq_data { * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set * IRQD_CAN_RESERVE - Can use reservation mode + * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked + * from actual interrupt context. * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change * required * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call @@ -235,6 +237,7 @@ enum { IRQD_DEFAULT_TRIGGER_SET = (1 << 25), IRQD_CAN_RESERVE = (1 << 26), IRQD_MSI_NOMASK_QUIRK = (1 << 27), + IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28), IRQD_AFFINITY_ON_ACTIVATE = (1 << 29), }; @@ -305,6 +308,16 @@ static inline bool irqd_is_single_target(struct irq_data *d) return __irqd_to_state(d) & IRQD_SINGLE_TARGET; } +static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX; +} + +static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX; +} + static inline bool irqd_is_wakeup_set(struct irq_data *d) { return __irqd_to_state(d) & IRQD_WAKEUP_STATE; diff --git a/include/linux/irqchip/arm-gic-phytium-2500.h b/include/linux/irqchip/arm-gic-phytium-2500.h new file mode 100644 index 000000000000..2ae61f3293cc --- /dev/null +++ b/include/linux/irqchip/arm-gic-phytium-2500.h @@ -0,0 +1,621 @@ +/* + * Copyright (C) 2020 Phytium Corporation. + * Author: Wang Yinfeng + * Chen Baozi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __LINUX_IRQCHIP_ARM_GIC_PHYTIUM_2500_H +#define __LINUX_IRQCHIP_ARM_GIC_PHYTIUM_2500_H + +/* + * Distributor registers. We assume we're running non-secure, with ARE + * being set. Secure-only and non-ARE registers are not described. + */ +#define GICD_CTLR 0x0000 +#define GICD_TYPER 0x0004 +#define GICD_IIDR 0x0008 +#define GICD_STATUSR 0x0010 +#define GICD_SETSPI_NSR 0x0040 +#define GICD_CLRSPI_NSR 0x0048 +#define GICD_SETSPI_SR 0x0050 +#define GICD_CLRSPI_SR 0x0058 +#define GICD_SEIR 0x0068 +#define GICD_IGROUPR 0x0080 +#define GICD_ISENABLER 0x0100 +#define GICD_ICENABLER 0x0180 +#define GICD_ISPENDR 0x0200 +#define GICD_ICPENDR 0x0280 +#define GICD_ISACTIVER 0x0300 +#define GICD_ICACTIVER 0x0380 +#define GICD_IPRIORITYR 0x0400 +#define GICD_ICFGR 0x0C00 +#define GICD_IGRPMODR 0x0D00 +#define GICD_NSACR 0x0E00 +#define GICD_IROUTER 0x6000 +#define GICD_IDREGS 0xFFD0 +#define GICD_PIDR2 0xFFE8 + +/* + * Those registers are actually from GICv2, but the spec demands that they + * are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3). + */ +#define GICD_ITARGETSR 0x0800 +#define GICD_SGIR 0x0F00 +#define GICD_CPENDSGIR 0x0F10 +#define GICD_SPENDSGIR 0x0F20 + +#define GICD_CTLR_RWP (1U << 31) +#define GICD_CTLR_DS (1U << 6) +#define GICD_CTLR_ARE_NS (1U << 4) +#define GICD_CTLR_ENABLE_G1A (1U << 1) +#define GICD_CTLR_ENABLE_G1 (1U << 0) + +#define GICD_IIDR_IMPLEMENTER_SHIFT 0 +#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT) +#define GICD_IIDR_REVISION_SHIFT 12 +#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT) +#define GICD_IIDR_VARIANT_SHIFT 16 +#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT) +#define GICD_IIDR_PRODUCT_ID_SHIFT 24 +#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT) + + +/* + * In systems with a single security state (what we emulate in KVM) + * the meaning of the interrupt group enable bits is slightly different + */ +#define GICD_CTLR_ENABLE_SS_G1 (1U << 1) +#define GICD_CTLR_ENABLE_SS_G0 (1U << 0) + +#define GICD_TYPER_RSS (1U << 26) +#define GICD_TYPER_LPIS (1U << 17) +#define GICD_TYPER_MBIS (1U << 16) + +#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) +#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) +#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) + +#define GICD_IROUTER_SPI_MODE_ONE (0U << 31) +#define GICD_IROUTER_SPI_MODE_ANY (1U << 31) + +#define GIC_PIDR2_ARCH_MASK 0xf0 +#define GIC_PIDR2_ARCH_GICv3 0x30 +#define GIC_PIDR2_ARCH_GICv4 0x40 + +#define GIC_V3_DIST_SIZE 0x10000 + +/* + * Re-Distributor registers, offsets from RD_base + */ +#define GICR_CTLR GICD_CTLR +#define GICR_IIDR 0x0004 +#define GICR_TYPER 0x0008 +#define GICR_STATUSR GICD_STATUSR +#define GICR_WAKER 0x0014 +#define GICR_SETLPIR 0x0040 +#define GICR_CLRLPIR 0x0048 +#define GICR_SEIR GICD_SEIR +#define GICR_PROPBASER 0x0070 +#define GICR_PENDBASER 0x0078 +#define GICR_INVLPIR 0x00A0 +#define GICR_INVALLR 0x00B0 +#define GICR_SYNCR 0x00C0 +#define GICR_MOVLPIR 0x0100 +#define GICR_MOVALLR 0x0110 +#define GICR_IDREGS GICD_IDREGS +#define GICR_PIDR2 GICD_PIDR2 + +#define GICR_CTLR_ENABLE_LPIS (1UL << 0) +#define GICR_CTLR_RWP (1UL << 3) + +#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) + +#define GICR_WAKER_ProcessorSleep (1U << 1) +#define GICR_WAKER_ChildrenAsleep (1U << 2) + +#define GIC_BASER_CACHE_nCnB 0ULL +#define GIC_BASER_CACHE_SameAsInner 0ULL +#define GIC_BASER_CACHE_nC 1ULL +#define GIC_BASER_CACHE_RaWt 2ULL +#define GIC_BASER_CACHE_RaWb 3ULL +#define GIC_BASER_CACHE_WaWt 4ULL +#define GIC_BASER_CACHE_WaWb 5ULL +#define GIC_BASER_CACHE_RaWaWt 6ULL +#define GIC_BASER_CACHE_RaWaWb 7ULL +#define GIC_BASER_CACHE_MASK 7ULL +#define GIC_BASER_NonShareable 0ULL +#define GIC_BASER_InnerShareable 1ULL +#define GIC_BASER_OuterShareable 2ULL +#define GIC_BASER_SHAREABILITY_MASK 3ULL + +#define GIC_BASER_CACHEABILITY(reg, inner_outer, type) \ + (GIC_BASER_CACHE_##type << reg##_##inner_outer##_CACHEABILITY_SHIFT) + +#define GIC_BASER_SHAREABILITY(reg, type) \ + (GIC_BASER_##type << reg##_SHAREABILITY_SHIFT) + +/* encode a size field of width @w containing @n - 1 units */ +#define GIC_ENCODE_SZ(n, w) (((unsigned long)(n) - 1) & GENMASK_ULL(((w) - 1), 0)) + +#define GICR_PROPBASER_SHAREABILITY_SHIFT (10) +#define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PROPBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, SHAREABILITY_MASK) +#define GICR_PROPBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, MASK) +#define GICR_PROPBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, MASK) +#define GICR_PROPBASER_CACHEABILITY_MASK GICR_PROPBASER_INNER_CACHEABILITY_MASK + +#define GICR_PROPBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable) + +#define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB) +#define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC) +#define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) +#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) +#define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt) +#define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb) +#define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt) +#define GICR_PROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWb) + +#define GICR_PROPBASER_IDBITS_MASK (0x1f) +#define GICR_PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 12)) +#define GICR_PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 16)) + +#define GICR_PENDBASER_SHAREABILITY_SHIFT (10) +#define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PENDBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, SHAREABILITY_MASK) +#define GICR_PENDBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, MASK) +#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, MASK) +#define GICR_PENDBASER_CACHEABILITY_MASK GICR_PENDBASER_INNER_CACHEABILITY_MASK + +#define GICR_PENDBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable) + +#define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB) +#define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC) +#define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) +#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) +#define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt) +#define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb) +#define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt) +#define GICR_PENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWb) + +#define GICR_PENDBASER_PTZ BIT_ULL(62) + +/* + * Re-Distributor registers, offsets from SGI_base + */ +#define GICR_IGROUPR0 GICD_IGROUPR +#define GICR_ISENABLER0 GICD_ISENABLER +#define GICR_ICENABLER0 GICD_ICENABLER +#define GICR_ISPENDR0 GICD_ISPENDR +#define GICR_ICPENDR0 GICD_ICPENDR +#define GICR_ISACTIVER0 GICD_ISACTIVER +#define GICR_ICACTIVER0 GICD_ICACTIVER +#define GICR_IPRIORITYR0 GICD_IPRIORITYR +#define GICR_ICFGR0 GICD_ICFGR +#define GICR_IGRPMODR0 GICD_IGRPMODR +#define GICR_NSACR GICD_NSACR + +#define GICR_TYPER_PLPIS (1U << 0) +#define GICR_TYPER_VLPIS (1U << 1) +#define GICR_TYPER_DirectLPIS (1U << 3) +#define GICR_TYPER_LAST (1U << 4) + +#define GIC_V3_REDIST_SIZE 0x20000 + +#define LPI_PROP_GROUP1 (1 << 1) +#define LPI_PROP_ENABLED (1 << 0) + +/* + * Re-Distributor registers, offsets from VLPI_base + */ +#define GICR_VPROPBASER 0x0070 + +#define GICR_VPROPBASER_IDBITS_MASK 0x1f + +#define GICR_VPROPBASER_SHAREABILITY_SHIFT (10) +#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56) + +#define GICR_VPROPBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK) +#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK) +#define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK) +#define GICR_VPROPBASER_CACHEABILITY_MASK \ + GICR_VPROPBASER_INNER_CACHEABILITY_MASK + +#define GICR_VPROPBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable) + +#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB) +#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC) +#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) +#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) +#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt) +#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb) +#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt) +#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb) + +#define GICR_VPENDBASER 0x0078 + +#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10) +#define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_VPENDBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK) +#define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK) +#define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK) +#define GICR_VPENDBASER_CACHEABILITY_MASK \ + GICR_VPENDBASER_INNER_CACHEABILITY_MASK + +#define GICR_VPENDBASER_NonShareable \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable) + +#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) +#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) +#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) +#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) +#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt) +#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb) +#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt) +#define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb) + +#define GICR_VPENDBASER_Dirty (1ULL << 60) +#define GICR_VPENDBASER_PendingLast (1ULL << 61) +#define GICR_VPENDBASER_IDAI (1ULL << 62) +#define GICR_VPENDBASER_Valid (1ULL << 63) + +/* + * ITS registers, offsets from ITS_base + */ +#define GITS_CTLR 0x0000 +#define GITS_IIDR 0x0004 +#define GITS_TYPER 0x0008 +#define GITS_CBASER 0x0080 +#define GITS_CWRITER 0x0088 +#define GITS_CREADR 0x0090 +#define GITS_BASER 0x0100 +#define GITS_IDREGS_BASE 0xffd0 +#define GITS_PIDR0 0xffe0 +#define GITS_PIDR1 0xffe4 +#define GITS_PIDR2 GICR_PIDR2 +#define GITS_PIDR4 0xffd0 +#define GITS_CIDR0 0xfff0 +#define GITS_CIDR1 0xfff4 +#define GITS_CIDR2 0xfff8 +#define GITS_CIDR3 0xfffc + +#define GITS_TRANSLATER 0x10040 + +#define GITS_CTLR_ENABLE (1U << 0) +#define GITS_CTLR_ImDe (1U << 1) +#define GITS_CTLR_ITS_NUMBER_SHIFT 4 +#define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT) +#define GITS_CTLR_QUIESCENT (1U << 31) + +#define GITS_TYPER_PLPIS (1UL << 0) +#define GITS_TYPER_VLPIS (1UL << 1) +#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 +#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1) +#define GITS_TYPER_IDBITS_SHIFT 8 +#define GITS_TYPER_DEVBITS_SHIFT 13 +#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) +#define GITS_TYPER_PTA (1UL << 19) +#define GITS_TYPER_HCC_SHIFT 24 +#define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff) +#define GITS_TYPER_VMOVP (1ULL << 37) + +#define GITS_IIDR_REV_SHIFT 12 +#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) +#define GITS_IIDR_REV(r) (((r) >> GITS_IIDR_REV_SHIFT) & 0xf) +#define GITS_IIDR_PRODUCTID_SHIFT 24 + +#define GITS_CBASER_VALID (1ULL << 63) +#define GITS_CBASER_SHAREABILITY_SHIFT (10) +#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_CBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_CBASER, SHAREABILITY_MASK) +#define GITS_CBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, MASK) +#define GITS_CBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, OUTER, MASK) +#define GITS_CBASER_CACHEABILITY_MASK GITS_CBASER_INNER_CACHEABILITY_MASK + +#define GITS_CBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_CBASER, InnerShareable) + +#define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB) +#define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC) +#define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) +#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) +#define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt) +#define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb) +#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt) +#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb) + +#define GITS_BASER_NR_REGS 8 + +#define GITS_BASER_VALID (1ULL << 63) +#define GITS_BASER_INDIRECT (1ULL << 62) + +#define GITS_BASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_BASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_BASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, INNER, MASK) +#define GITS_BASER_CACHEABILITY_MASK GITS_BASER_INNER_CACHEABILITY_MASK +#define GITS_BASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, MASK) +#define GITS_BASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_BASER, SHAREABILITY_MASK) + +#define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB) +#define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC) +#define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) +#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) +#define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt) +#define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb) +#define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt) +#define GITS_BASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWb) + +#define GITS_BASER_TYPE_SHIFT (56) +#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) +#define GITS_BASER_ENTRY_SIZE_SHIFT (48) +#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) +#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) +#define GITS_BASER_PHYS_52_to_48(phys) \ + (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12) +#define GITS_BASER_SHAREABILITY_SHIFT (10) +#define GITS_BASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) +#define GITS_BASER_PAGE_SIZE_SHIFT (8) +#define GITS_BASER_PAGE_SIZE_4K (0ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_16K (1ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_64K (2ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_MASK (3ULL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGES_MAX 256 +#define GITS_BASER_PAGES_SHIFT (0) +#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1) + +#define GITS_BASER_TYPE_NONE 0 +#define GITS_BASER_TYPE_DEVICE 1 +#define GITS_BASER_TYPE_VCPU 2 +#define GITS_BASER_TYPE_RESERVED3 3 +#define GITS_BASER_TYPE_COLLECTION 4 +#define GITS_BASER_TYPE_RESERVED5 5 +#define GITS_BASER_TYPE_RESERVED6 6 +#define GITS_BASER_TYPE_RESERVED7 7 + +#define GITS_LVL1_ENTRY_SIZE (8UL) + +/* + * ITS commands + */ +#define GITS_CMD_MAPD 0x08 +#define GITS_CMD_MAPC 0x09 +#define GITS_CMD_MAPTI 0x0a +#define GITS_CMD_MAPI 0x0b +#define GITS_CMD_MOVI 0x01 +#define GITS_CMD_DISCARD 0x0f +#define GITS_CMD_INV 0x0c +#define GITS_CMD_MOVALL 0x0e +#define GITS_CMD_INVALL 0x0d +#define GITS_CMD_INT 0x03 +#define GITS_CMD_CLEAR 0x04 +#define GITS_CMD_SYNC 0x05 + +/* + * GICv4 ITS specific commands + */ +#define GITS_CMD_GICv4(x) ((x) | 0x20) +#define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL) +#define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC) +#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI) +#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI) +#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC) +/* VMOVP is the odd one, as it doesn't have a physical counterpart */ +#define GITS_CMD_VMOVP GITS_CMD_GICv4(2) + +/* + * ITS error numbers + */ +#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107 +#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109 +#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307 +#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507 +#define E_ITS_MAPD_DEVICE_OOR 0x010801 +#define E_ITS_MAPD_ITTSIZE_OOR 0x010802 +#define E_ITS_MAPC_PROCNUM_OOR 0x010902 +#define E_ITS_MAPC_COLLECTION_OOR 0x010903 +#define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04 +#define E_ITS_MAPTI_ID_OOR 0x010a05 +#define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06 +#define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07 +#define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09 +#define E_ITS_MOVALL_PROCNUM_OOR 0x010e01 +#define E_ITS_DISCARD_UNMAPPED_INTERRUPT 0x010f07 + +/* + * CPU interface registers + */ +#define ICC_CTLR_EL1_EOImode_SHIFT (1) +#define ICC_CTLR_EL1_EOImode_drop_dir (0U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_drop (1U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_CBPR_SHIFT 0 +#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT) +#define ICC_CTLR_EL1_PRI_BITS_SHIFT 8 +#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT) +#define ICC_CTLR_EL1_ID_BITS_SHIFT 11 +#define ICC_CTLR_EL1_ID_BITS_MASK (0x7 << ICC_CTLR_EL1_ID_BITS_SHIFT) +#define ICC_CTLR_EL1_SEIS_SHIFT 14 +#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT) +#define ICC_CTLR_EL1_A3V_SHIFT 15 +#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) +#define ICC_CTLR_EL1_RSS (0x1 << 18) +#define ICC_PMR_EL1_SHIFT 0 +#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) +#define ICC_BPR0_EL1_SHIFT 0 +#define ICC_BPR0_EL1_MASK (0x7 << ICC_BPR0_EL1_SHIFT) +#define ICC_BPR1_EL1_SHIFT 0 +#define ICC_BPR1_EL1_MASK (0x7 << ICC_BPR1_EL1_SHIFT) +#define ICC_IGRPEN0_EL1_SHIFT 0 +#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT) +#define ICC_IGRPEN1_EL1_SHIFT 0 +#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT) +#define ICC_SRE_EL1_DIB (1U << 2) +#define ICC_SRE_EL1_DFB (1U << 1) +#define ICC_SRE_EL1_SRE (1U << 0) + +/* + * Hypervisor interface registers (SRE only) + */ +#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1) + +#define ICH_LR_EOI (1ULL << 41) +#define ICH_LR_GROUP (1ULL << 60) +#define ICH_LR_HW (1ULL << 61) +#define ICH_LR_STATE (3ULL << 62) +#define ICH_LR_PENDING_BIT (1ULL << 62) +#define ICH_LR_ACTIVE_BIT (1ULL << 63) +#define ICH_LR_PHYS_ID_SHIFT 32 +#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) +#define ICH_LR_PRIORITY_SHIFT 48 +#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT) + +/* These are for GICv2 emulation only */ +#define GICH_LR_VIRTUALID (0x3ffUL << 0) +#define GICH_LR_PHYSID_CPUID_SHIFT (10) +#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) + +#define ICH_MISR_EOI (1 << 0) +#define ICH_MISR_U (1 << 1) + +#define ICH_HCR_EN (1 << 0) +#define ICH_HCR_UIE (1 << 1) +#define ICH_HCR_NPIE (1 << 3) +#define ICH_HCR_TC (1 << 10) +#define ICH_HCR_TALL0 (1 << 11) +#define ICH_HCR_TALL1 (1 << 12) +#define ICH_HCR_EOIcount_SHIFT 27 +#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT) + +#define ICH_VMCR_ACK_CTL_SHIFT 2 +#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT) +#define ICH_VMCR_FIQ_EN_SHIFT 3 +#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT) +#define ICH_VMCR_CBPR_SHIFT 4 +#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT) +#define ICH_VMCR_EOIM_SHIFT 9 +#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT) +#define ICH_VMCR_BPR1_SHIFT 18 +#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT) +#define ICH_VMCR_BPR0_SHIFT 21 +#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT) +#define ICH_VMCR_PMR_SHIFT 24 +#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) +#define ICH_VMCR_ENG0_SHIFT 0 +#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT) +#define ICH_VMCR_ENG1_SHIFT 1 +#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT) + +#define ICH_VTR_PRI_BITS_SHIFT 29 +#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT) +#define ICH_VTR_ID_BITS_SHIFT 23 +#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT) +#define ICH_VTR_SEIS_SHIFT 22 +#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT) +#define ICH_VTR_A3V_SHIFT 21 +#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT) + +#define ICC_IAR1_EL1_SPURIOUS 0x3ff + +#define ICC_SRE_EL2_SRE (1 << 0) +#define ICC_SRE_EL2_ENABLE (1 << 3) + +#define ICC_SGI1R_TARGET_LIST_SHIFT 0 +#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT) +#define ICC_SGI1R_AFFINITY_1_SHIFT 16 +#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) +#define ICC_SGI1R_SGI_ID_SHIFT 24 +#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT) +#define ICC_SGI1R_AFFINITY_2_SHIFT 32 +#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) +#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 +#define ICC_SGI1R_RS_SHIFT 44 +#define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT) +#define ICC_SGI1R_AFFINITY_3_SHIFT 48 +#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) + +#include + +#ifndef __ASSEMBLY__ + +/* + * We need a value to serve as a irq-type for LPIs. Choose one that will + * hopefully pique the interest of the reviewer. + */ +#define GIC_IRQ_TYPE_LPI 0xa110c8ed + +struct rdists { + struct { + void __iomem *rd_base; + struct page *pend_page; + phys_addr_t phys_base; + bool lpi_enabled; + } __percpu *rdist; + phys_addr_t prop_table_pa; + void *prop_table_va; + u64 flags; + u32 gicd_typer; + bool has_vlpis; + bool has_direct_lpi; +}; + +struct irq_domain; +struct fwnode_handle; +int phytium_its_cpu_init(void); +int phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, + struct irq_domain *domain); + +static inline bool gic_enable_sre(void) +{ + u32 val; + + val = gic_read_sre(); + if (val & ICC_SRE_EL1_SRE) + return true; + + val |= ICC_SRE_EL1_SRE; + gic_write_sre(val); + val = gic_read_sre(); + + return !!(val & ICC_SRE_EL1_SRE); +} + +#endif + +#endif diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 1d21e98d6854..1f33daa5c674 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -585,8 +585,10 @@ struct rdists { void __iomem *rd_base; struct page *pend_page; phys_addr_t phys_base; + bool lpi_enabled; } __percpu *rdist; - struct page *prop_page; + phys_addr_t prop_table_pa; + void *prop_table_va; u64 flags; u32 gicd_typer; bool has_vlpis; diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 2acdd046df2d..1b06db7a6096 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -301,6 +301,9 @@ static inline int memblock_get_region_node(const struct memblock_region *r) } #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ +void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, + int nid); phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c0dd2f749d3f..2019eca4cbdc 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -3118,4 +3118,6 @@ #define PCI_VENDOR_ID_NCUBE 0x10ff +#define PCI_VENDOR_ID_PHYTIUM 0x1db7 + #endif /* _LINUX_PCI_IDS_H */ diff --git a/include/net/esp.h b/include/net/esp.h index 465e38890ee9..117652eb6ea3 100644 --- a/include/net/esp.h +++ b/include/net/esp.h @@ -4,8 +4,6 @@ #include -#define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER) - struct ip_esp_hdr; static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb) diff --git a/include/net/sock.h b/include/net/sock.h index d280da601c25..bc752237dff3 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2512,9 +2512,6 @@ extern int sysctl_optmem_max; extern __u32 sysctl_wmem_default; extern __u32 sysctl_rmem_default; -#define SKB_FRAG_PAGE_ORDER get_order(32768) -DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); - static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto) { /* Does this proto have per netns sysctl_wmem ? */ diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index cd1773d0e08f..935f53b9b771 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -354,6 +354,7 @@ struct hdac_bus { bool align_bdle_4k:1; /* BDLE align 4K boundary */ bool reverse_assign:1; /* assign devices in reverse order */ bool corbrp_self_clear:1; /* CORBRP clears itself after reset */ + bool cmd_resend:1; /* command resend */ int bdl_pos_adj; /* BDL position adjustment */ diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h index 9483c55f871b..1e3f2d0fe547 100644 --- a/include/sound/hdmi-codec.h +++ b/include/sound/hdmi-codec.h @@ -55,6 +55,9 @@ struct hdmi_codec_params { int channels; }; +typedef void (*hdmi_codec_plugged_cb)(struct device *dev, + bool plugged); + struct hdmi_codec_pdata; struct hdmi_codec_ops { /* @@ -96,6 +99,14 @@ struct hdmi_codec_ops { */ int (*get_dai_id)(struct snd_soc_component *comment, struct device_node *endpoint); + + /* + * Hook callback function to handle connector plug event. + * Optional + */ + int (*hook_plugged_cb)(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev); }; /* HDMI codec initalization data */ @@ -107,6 +118,12 @@ struct hdmi_codec_pdata { void *data; }; +struct snd_soc_component; +struct snd_soc_jack; + +int hdmi_codec_set_jack_detect(struct snd_soc_component *component, + struct snd_soc_jack *jack); + #define HDMI_CODEC_DRV_NAME "hdmi-audio-codec" #endif /* __HDMI_CODEC_H__ */ diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index dce5f9dae121..998ca4e67c0e 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -281,4 +281,7 @@ /* MediaTek BTIF */ #define PORT_MTK_BTIF 117 +/* Phytium PCI UART */ +#define PORT_PHYTIUM 118 + #endif /* _UAPILINUX_SERIAL_CORE_H */ diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 7628ab25f686..a9978350b45b 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -97,22 +97,10 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly); * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. - * Note that the granted page might still be accessed (read or write) by the - * other side after gnttab_end_foreign_access() returns, so even if page was - * specified as 0 it is not allowed to just reuse the page for other - * purposes immediately. */ void gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page); -/* - * End access through the given grant reference, iff the grant entry is - * no longer in use. In case of success ending foreign access, the - * grant reference is deallocated. - * Return 1 if the grant entry was freed, 0 if it is still in use. - */ -int gnttab_try_end_foreign_access(grant_ref_t ref); - int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 543597e33cff..2a8c41f12d45 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -583,15 +583,10 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, */ for (i = 0; i < nslots; i++) io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) + swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE); - /* - * When dir == DMA_FROM_DEVICE we could omit the copy from the orig - * to the tlb buffer, if we knew for sure the device will - * overwirte the entire current content. But we don't. Thus - * unconditional bounce may prevent leaking swiotlb content (i.e. - * kernel memory) to user-space. - */ - swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE); return tlb_addr; } diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index b3f55dd581b0..9473b1d47199 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -209,8 +209,7 @@ static ssize_t irq_debug_write(struct file *file, const char __user *user_buf, err = -EINVAL; } else { desc->istate |= IRQS_PENDING; - check_irq_resend(desc); - err = 0; + err = check_irq_resend(desc); } raw_spin_unlock_irqrestore(&desc->lock, flags); diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 3f4618510d05..af2e9941f329 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -106,7 +106,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc); irqreturn_t handle_irq_event(struct irq_desc *desc); /* Resending of interrupts :*/ -void check_irq_resend(struct irq_desc *desc); +int check_irq_resend(struct irq_desc *desc); bool irq_wait_for_poll(struct irq_desc *desc); void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); @@ -420,6 +420,10 @@ static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) { return desc->pending_mask; } +static inline bool handle_enforce_irqctx(struct irq_data *data) +{ + return irqd_is_handle_enforce_irqctx(data); +} bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear); #else /* CONFIG_GENERIC_PENDING_IRQ */ static inline bool irq_can_move_pcntxt(struct irq_data *data) @@ -446,6 +450,10 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear) { return false; } +static inline bool handle_enforce_irqctx(struct irq_data *data) +{ + return false; +} #endif /* !CONFIG_GENERIC_PENDING_IRQ */ #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY) diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 26814a14013c..5742f706503b 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -633,9 +633,15 @@ void irq_init_desc(unsigned int irq) int generic_handle_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); + struct irq_data *data; if (!desc) return -EINVAL; + + data = irq_desc_get_irq_data(desc); + if (WARN_ON_ONCE(!in_irq() && handle_enforce_irqctx(data))) + return -EPERM; + generic_handle_irq_desc(desc); return 0; } diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 98c04ca5fa43..7de48bc06c75 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -47,56 +47,88 @@ static void resend_irqs(unsigned long arg) /* Tasklet to handle resend: */ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0); +static int irq_sw_resend(struct irq_desc *desc) +{ + unsigned int irq = irq_desc_get_irq(desc); + + /* + * Validate whether this interrupt can be safely injected from + * non interrupt context + */ + if (handle_enforce_irqctx(&desc->irq_data)) + return -EINVAL; + + /* + * If the interrupt is running in the thread context of the parent + * irq we need to be careful, because we cannot trigger it + * directly. + */ + if (irq_settings_is_nested_thread(desc)) { + /* + * If the parent_irq is valid, we retrigger the parent, + * otherwise we do nothing. + */ + if (!desc->parent_irq) + return -EINVAL; + irq = desc->parent_irq; + } + + /* Set it pending and activate the softirq: */ + set_bit(irq, irqs_resend); + tasklet_schedule(&resend_tasklet); + return 0; +} + +#else +static int irq_sw_resend(struct irq_desc *desc) +{ + return -EINVAL; +} +#endif + +static int try_retrigger(struct irq_desc *desc) +{ + if (desc->irq_data.chip->irq_retrigger) + return desc->irq_data.chip->irq_retrigger(&desc->irq_data); + +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + return irq_chip_retrigger_hierarchy(&desc->irq_data); +#else + return 0; #endif +} /* * IRQ resend * * Is called with interrupts disabled and desc->lock held. */ -void check_irq_resend(struct irq_desc *desc) +int check_irq_resend(struct irq_desc *desc) { + int err = 0; + /* - * We do not resend level type interrupts. Level type - * interrupts are resent by hardware when they are still - * active. Clear the pending bit so suspend/resume does not - * get confused. + * We do not resend level type interrupts. Level type interrupts + * are resent by hardware when they are still active. Clear the + * pending bit so suspend/resume does not get confused. */ if (irq_settings_is_level(desc)) { desc->istate &= ~IRQS_PENDING; - return; + return -EINVAL; } if (desc->istate & IRQS_REPLAY) - return; - if (desc->istate & IRQS_PENDING) { - desc->istate &= ~IRQS_PENDING; - desc->istate |= IRQS_REPLAY; + return -EBUSY; - if (!desc->irq_data.chip->irq_retrigger || - !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { -#ifdef CONFIG_HARDIRQS_SW_RESEND - unsigned int irq = irq_desc_get_irq(desc); - - /* - * If the interrupt is running in the thread - * context of the parent irq we need to be - * careful, because we cannot trigger it - * directly. - */ - if (irq_settings_is_nested_thread(desc)) { - /* - * If the parent_irq is valid, we - * retrigger the parent, otherwise we - * do nothing. - */ - if (!desc->parent_irq) - return; - irq = desc->parent_irq; - } - /* Set it pending and activate the softirq: */ - set_bit(irq, irqs_resend); - tasklet_schedule(&resend_tasklet); -#endif - } - } + if (!(desc->istate & IRQS_PENDING)) + return 0; + + desc->istate &= ~IRQS_PENDING; + + if (!try_retrigger(desc)) + err = irq_sw_resend(desc); + + /* If the retrigger was successfull, mark it with the REPLAY bit */ + if (!err) + desc->istate |= IRQS_REPLAY; + return err; } diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c index c7471c3fb798..16c09cda3b02 100644 --- a/kernel/locking/qrwlock.c +++ b/kernel/locking/qrwlock.c @@ -70,6 +70,8 @@ EXPORT_SYMBOL(queued_read_lock_slowpath); */ void queued_write_lock_slowpath(struct qrwlock *lock) { + int cnts; + /* Put the writer into the wait queue */ arch_spin_lock(&lock->wait_lock); @@ -83,9 +85,8 @@ void queued_write_lock_slowpath(struct qrwlock *lock) /* When no more readers or writers, set the locked flag */ do { - atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING); - } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING, - _QW_LOCKED) != _QW_WAITING); + cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING); + } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)); unlock: arch_spin_unlock(&lock->wait_lock); } diff --git a/lib/crc32.c b/lib/crc32.c index 1a5d08470044..17acc6aa540b 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -183,21 +183,21 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p, } #if CRC_LE_BITS == 1 -u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) +u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE); } -u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) +u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE); } #else -u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) +u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, (const u32 (*)[256])crc32table_le, CRC32_POLY_LE); } -u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) +u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE); @@ -206,6 +206,9 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) EXPORT_SYMBOL(crc32_le); EXPORT_SYMBOL(__crc32c_le); +u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); +u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); + /* * This multiplies the polynomials x and y modulo the given modulus. * This follows the "little-endian" CRC convention that the lsbit diff --git a/mm/memblock.c b/mm/memblock.c index bb4e32c6b19e..20b874cff3eb 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1349,7 +1349,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i static void * __init memblock_virt_alloc_internal( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, - int nid) + int nid, bool exact_nid) { phys_addr_t alloc; void *ptr; @@ -1377,7 +1377,7 @@ static void * __init memblock_virt_alloc_internal( if (alloc && !memblock_reserve(alloc, size)) goto done; - if (nid != NUMA_NO_NODE) { + if (nid != NUMA_NO_NODE && !exact_nid) { alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, NUMA_NO_NODE, flags); @@ -1443,7 +1443,7 @@ void * __init memblock_virt_alloc_try_nid_raw( &max_addr, (void *)_RET_IP_); ptr = memblock_virt_alloc_internal(size, align, - min_addr, max_addr, nid); + min_addr, max_addr, nid, false); #ifdef CONFIG_DEBUG_VM if (ptr && size > 0) memset(ptr, PAGE_POISON_PATTERN, size); @@ -1451,6 +1451,43 @@ void * __init memblock_virt_alloc_try_nid_raw( return ptr; } +/** + * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node + * without zeroing memory + * @size: size of memory block to be allocated in bytes + * @align: alignment of the region and block's size + * @min_addr: the lower bound of the memory region from where the allocation + * is preferred (phys address) + * @max_addr: the upper bound of the memory region from where the allocation + * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to + * allocate only from memory limited by memblock.current_limit value + * @nid: nid of the free area to find, %NUMA_NO_NODE for any node + * + * Public function, provides additional debug information (including caller + * info), if enabled. Does not zero allocated memory. + * + * Return: + * Virtual address of allocated memory block on success, NULL on failure. + */ +void * __init memblock_alloc_exact_nid_raw( + phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, + int nid) +{ + void *ptr; + + memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", + __func__, (u64)size, (u64)align, nid, &min_addr, + &max_addr, (void *)_RET_IP_); + + ptr = memblock_virt_alloc_internal(size, align, + min_addr, max_addr, nid, true); + if (ptr && size > 0) + memset(ptr, PAGE_POISON_PATTERN, size); + + return ptr; +} + /** * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block * @size: size of memory block to be allocated in bytes @@ -1480,7 +1517,7 @@ void * __init memblock_virt_alloc_try_nid_nopanic( &max_addr, (void *)_RET_IP_); ptr = memblock_virt_alloc_internal(size, align, - min_addr, max_addr, nid); + min_addr, max_addr, nid, false); if (ptr) memset(ptr, 0, size); return ptr; @@ -1515,7 +1552,7 @@ void * __init memblock_virt_alloc_try_nid( __func__, (u64)size, (u64)align, nid, &min_addr, &max_addr, (void *)_RET_IP_); ptr = memblock_virt_alloc_internal(size, align, - min_addr, max_addr, nid); + min_addr, max_addr, nid, false); if (ptr) { memset(ptr, 0, size); return ptr; diff --git a/mm/sparse.c b/mm/sparse.c index 3b24ba903d9e..d2289723b350 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -17,6 +17,8 @@ #include #include +#include + /* * Permanent SPARSEMEM data: * @@ -405,7 +407,7 @@ static void __init sparse_buffer_init(unsigned long size, int nid) { WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ sparsemap_buf = - memblock_virt_alloc_try_nid_raw(size, PAGE_SIZE, + memblock_alloc_exact_nid_raw(size, section_map_size(), __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); sparsemap_buf_end = sparsemap_buf + size; diff --git a/net/core/sock.c b/net/core/sock.c index b9cfe5589d38..d243a6f41267 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2189,7 +2189,8 @@ static void sk_leave_memory_pressure(struct sock *sk) } } -DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); +/* On 32bit arches, an skb frag is limited to 2^15 */ +#define SKB_FRAG_PAGE_ORDER get_order(32768) /** * skb_page_frag_refill - check that a page_frag contains enough room diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index cde9a5fd4149..114f9def1ec5 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -276,7 +276,6 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * struct page *page; struct sk_buff *trailer; int tailen = esp->tailen; - unsigned int allocsz; /* this is non-NULL only with UDP Encapsulation */ if (x->encap) { @@ -286,10 +285,6 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * return err; } - allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES); - if (allocsz > ESP_SKB_FRAG_MAXSIZE) - goto cow; - if (!skb_cloned(skb)) { if (tailen <= skb_tailroom(skb)) { nfrags = 1; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 53987b1c227c..a7d996148eed 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -242,11 +242,6 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info struct page *page; struct sk_buff *trailer; int tailen = esp->tailen; - unsigned int allocsz; - - allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES); - if (allocsz > ESP_SKB_FRAG_MAXSIZE) - goto cow; if (!skb_cloned(skb)) { if (tailen <= skb_tailroom(skb)) { diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 7ca4bb09c48a..5e7c13aa66d0 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -768,7 +768,10 @@ void xprt_connect(struct rpc_task *task) if (!xprt_lock_write(xprt, task)) return; - if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { + if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) + xprt->ops->close(xprt); + + if (!xprt_connected(xprt)) { task->tk_rqstp->rq_bytes_sent = 0; task->tk_timeout = task->tk_rqstp->rq_timeout; task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 917724875a64..9dc059dea689 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -557,7 +557,7 @@ static int xs_local_send_request(struct rpc_task *task) -status); /* fall through */ case -EPIPE: - xprt_force_disconnect(xprt); + xs_close(xprt); status = -ENOTCONN; } @@ -845,16 +845,6 @@ static void xs_reset_transport(struct sock_xprt *transport) if (sk == NULL) return; - /* - * Make sure we're calling this in a context from which it is safe - * to call __fput_sync(). In practice that means rpciod and the - * system workqueue. - */ - if (!(current->flags & PF_WQ_WORKER)) { - WARN_ON_ONCE(1); - set_bit(XPRT_CLOSE_WAIT, &xprt->state); - return; - } if (atomic_read(&transport->xprt.swapper)) sk_clear_memalloc(sk); diff --git a/patch/4.19.9-changelog b/patch/4.19.9-changelog new file mode 100755 index 000000000000..3467bcd2913f --- /dev/null +++ b/patch/4.19.9-changelog @@ -0,0 +1,336 @@ +2022-07-21 Linux内核 (版本:4.19.9) + + X100显示控制器驱动升级 + 相关文件 + drivers/gpu/drm/phytium/* + X100休眠与DMA问题修复 + 相关文件 + drivers/pci/quirks.c + 相关上游补丁 + b4aecf78083d8c6424657c1746c7c3de6e61669f + X100 USB控制器驱动升级 + 相关文件 + drivers/usb/host/xhci-mem.c + drivers/usb/host/xhci-pci.c + drivers/usb/host/xhci.h + 相关上游补丁 + 4fdc1790e6a9ef22399c6bc6e63b80f4609f3b7e + 22454b79e6de05fa61a2a72d00d2eed798abbb75 + X100 SDIO/CAN/I2S驱动缺陷修复 + 相关文件 + drivers/mmc/host/phytium-mci-pci.c + drivers/net/can/phytium_can_pci.c + sound/soc/phytium/phytium_i2s.c + 修复SPI控制器平台驱动在设备树下不正确工作问题 + 相关文件 + drivers/spi/spi-phytium-plat.c + S系列中断控制器驱动更新 + 相关文件 + drivers/irqchip/irq-gic-phytium-2500-its.c + drivers/irqchip/irq-gic-v3-its.c + FT-2000+ IOMMU修复 + 相关文件 + drivers/iommu/arm-smmu.c + CPU核设备树标识字符串更新 + Windows@KVM(ARM64)补丁移植 + 相关上游补丁 + 731532176716e2775a5d21115bb9c5c61e0cb704 + + 2021-12-20 Linux内核 (版本:4.19.8) + + 新增FT-2000/4C SoC支持 + 相关文件 + arch/arm64/boot/dts/phytium/Makefile + arch/arm64/boot/dts/phytium/ft2004c-devboard-dsk.dts + arch/arm64/boot/dts/phytium/ft2004c-generic-psci-soc.dtsi + Documentation/devicetree/bindings/interrupt-controller/phytium,d2000-ixic.txt + drivers/irqchip/irq-phytium-ixic.c + 新增飞腾PCI Vendor ID + 相关文件 + include/linux/pci_ids.h + 新增HDA控制器重传机制,规避链路传输失败问题 + 相关文件 + include/sound/hdaudio.h + sound/hda/hdac_controller.c + sound/pci/hda/hda_phytium.c + S2500中断控制器多队列中断分布问题修复以及更好的Kdump支持 + 相关文件 + drivers/irqchip/irq-gic-phytium-2500-its.c + drivers/irqchip/irq-gic-phytium-2500.c + 新增X100驱动支持 + CAN + 相关文件 + drivers/net/can/Kconfig + drivers/net/can/Makefile + drivers/net/can/phytium-can.c + drivers/net/can/phytium_can.c + drivers/net/can/phytium_can.h + drivers/net/can/phytium_can_pci.c + drivers/net/can/phytium_can_plat.c + GPIO + 相关文件 + drivers/gpio/Kconfig + drivers/gpio/Makefile + drivers/gpio/gpio-phytium-core.c + drivers/gpio/gpio-phytium-core.h + drivers/gpio/gpio-phytium-pci.c + drivers/gpio/gpio-phytium-platform.c + drivers/gpio/gpio-phytium.c + DC/DP + 相关文件 + drivers/gpu/drm/Kconfig + drivers/gpu/drm/Makefile + drivers/gpu/drm/phytium/Kconfig + drivers/gpu/drm/phytium/Makefile + drivers/gpu/drm/phytium/phytium_crtc.c + drivers/gpu/drm/phytium/phytium_crtc.h + drivers/gpu/drm/phytium/phytium_debugfs.c + drivers/gpu/drm/phytium/phytium_debugfs.h + drivers/gpu/drm/phytium/phytium_display_drv.c + drivers/gpu/drm/phytium/phytium_display_drv.h + drivers/gpu/drm/phytium/phytium_dp.c + drivers/gpu/drm/phytium/phytium_dp.h + drivers/gpu/drm/phytium/phytium_fb.c + drivers/gpu/drm/phytium/phytium_fb.h + drivers/gpu/drm/phytium/phytium_fbdev.c + drivers/gpu/drm/phytium/phytium_fbdev.h + drivers/gpu/drm/phytium/phytium_gem.c + drivers/gpu/drm/phytium/phytium_gem.h + drivers/gpu/drm/phytium/phytium_panel.c + drivers/gpu/drm/phytium/phytium_panel.h + drivers/gpu/drm/phytium/phytium_plane.c + drivers/gpu/drm/phytium/phytium_plane.h + drivers/gpu/drm/phytium/phytium_reg.h + drivers/gpu/drm/phytium/x100_dp.c + drivers/gpu/drm/phytium/x100_dp.h + 相关上游补丁 + 55c5cc63ab3277aa20637dc20f6528987ac23743 + 6fa5963c37a2e3335eba0b7455e35a01318ebc15 + I2C + 相关文件 + drivers/i2c/busses/Kconfig + drivers/i2c/busses/Makefile + drivers/i2c/busses/i2c-phytium-common.c + drivers/i2c/busses/i2c-phytium-core.h + drivers/i2c/busses/i2c-phytium-master.c + drivers/i2c/busses/i2c-phytium-pci.c + I2S音频子系统 + 相关文件 + drivers/mfd/Kconfig + drivers/mfd/Makefile + drivers/mfd/phytium_x100_i2s_lsd.c + drivers/mfd/phytium_x100_i2s_mmd.c + sound/soc/Kconfig + sound/soc/Makefile + sound/soc/codecs/Kconfig + sound/soc/codecs/Makefile + sound/soc/codecs/es8336.c + sound/soc/codecs/es8336.h + sound/soc/codecs/es8388.c + sound/soc/codecs/es8388.h + sound/soc/phytium/Kconfig + sound/soc/phytium/Makefile + sound/soc/phytium/local.h + sound/soc/phytium/phytium_i2s.c + sound/soc/phytium/pmdk_dp.c + sound/soc/phytium/pmdk_es8336.c + sound/soc/phytium/pmdk_es8388.c + MMCSD + 相关文件 + drivers/mmc/host/Kconfig + drivers/mmc/host/Makefile + drivers/mmc/host/phytium-mci-pci.c + drivers/mmc/host/phytium-mci.c + drivers/mmc/host/phytium-mci.h + NAND Flash Controller + 相关文件 + drivers/mtd/nand/raw/Kconfig + drivers/mtd/nand/raw/Makefile + drivers/mtd/nand/raw/phytium_nand.c + drivers/mtd/nand/raw/phytium_nand.h + drivers/mtd/nand/raw/phytium_nand_pci.c + PS/2 + 相关文件 + drivers/input/serio/Kconfig + drivers/input/serio/Makefile + drivers/input/serio/phytium-ps2.c + SPI + 相关文件 + Documentation/devicetree/bindings/spi/spi-phytium.txt + drivers/spi/Kconfig + drivers/spi/Makefile + drivers/spi/spi-phytium-pci.c + drivers/spi/spi-phytium-plat.c + drivers/spi/spi-phytium.c + drivers/spi/spi-phytium.h + UART + 相关文件 + include/uapi/linux/serial_core.h + drivers/tty/serial/Kconfig + drivers/tty/serial/Makefile + drivers/tty/serial/phytium-uart.c + USB3.0 Host + 相关文件 + drivers/usb/host/xhci-pci.c + + +2021-08-02 Linux内核 (版本:4.19.7) + 完善kdump & kexec功能 + 相关文件 + drivers/firmware/efi/efi.c + drivers/firmware/efi/libstub/arm-stub.c + drivers/irqchip/irq-gic-phytium-2500-its.c + drivers/irqchip/irq-gic-phytium-2500.c + drivers/irqchip/irq-gic-v3-its.c + drivers/irqchip/irq-gic-v3.c + drivers/irqchip/irq-gic.c + include/linux/efi.h + include/linux/irq.h + kernel/irq/debugfs.c + kernel/irq/internals.h + kernel/irq/irqdesc.c + kernel/irq/resend.c + 相关上游补丁 + 1b57d91b969cda1d2c3530f2e829ca366a9c7df7 + 5f774f5e12512b850a611aa99b4601d7eac50edb + 17f644e949ffb14e9c8870d99bc574066d8b685c + da90921acc62c71d27729ae211ccfda5370bf75b + 1f85b1f5e1f5541272abedc19ba7b6c5b564c228 + cd1752d34ef33d68d82ef9dcc699b4eaa17c07fc + c16816acd08697b02a53f56f8936497a9f6f6e7a + c5d6082d35e0bcc20a26a067ffcfddcb5257e580 + 2f13ff1d1d5c0257c97ea76b86a2d9c99c44a4b9 + 5e2c9f9a627772672accd80fa15359c0de6aa894 + 3fb68faee8676900f896d1615442aeca36e5f940 + f842ca8e9c8a80d07f5589536311250d7d6018f9 + a23d3bb05ccbd815c79293d2207fedede0b3515d + b844470f22061e8cd646cb355e85d2f518b2c913 + 71e0940d52e107748b270213a01d3b1546657d74 + 新增D2000 PCIe EP驱动 + 相关文件 + drivers/pci/controller/Kconfig + drivers/pci/controller/Makefile + drivers/pci/controller/pcie-phytium-ep.c + drivers/pci/controller/pcie-phytium-ep.h + drivers/pci/controller/pcie-phytium-register.h + Documentation/devicetree/bindings/pci/phytium,phytium-pcie-ep.txt + SPI驱动删除重复GPIO片选注册 + 相关文件 + drivers/spi/spi-phytium.c + D2000 INTx中断控制器驱动S3/S4恢复bugfix + 相关文件 + drivers/irqchip/irq-phytium-ixic.c + 修复HDA驱动无CODEC连接时产生错误的大量中断的bug + 相关文件 + sound/pci/hda/hda_phytium.c + +2021-05-06 Linux内核 (版本:4.19.6) + + 新增FT-2000AHKE设备树 + arch/arm64/boot/dts/phytium/Makefile + arch/arm64/boot/dts/phytium/ft2000ahke-devboard-dsk.dts + arch/arm64/boot/dts/phytium/ft2000ahke-generic-psci-soc.dtsi + SPI驱动增加对片选固件描述的支持 + drivers/spi/Kconfig + drivers/spi/spi-phytium.c + QSPI驱动增加片选功能与bugfix + drivers/mtd/spi-nor/phytium-quadsip.c + FT-2000/4、D2000设备树新增加PMU节点; + arch/arm64/boot/dts/phytium/ft2004-generic-psci-soc.dtsi + arch/arm64/boot/dts/phytium/d2000-generic-psci-soc.dtsi + Backport & rework 中断控制器补丁以支持kexec + 相关文件 + drivers/irqchip/irq-gic-v3-its.c + drivers/irqchip/irq-gic-phytium-2500-its.c + 相关上游补丁 + d38a71c5452529fd3326b0ae488292e5fbd8d2a1 + adaab500dd81a59d2b3b0ce3e995db5b9e3ee8a4 + 053be4854f9bcceba99cdfa0c89acc4696852c3f + e1a2e2010ba9d3c765b2e37a7ae8b332564716f1 + c440a9d9d113b9b3cd99bb5096c4aa47d515e463 + c6e2ccb66d0c3b4fffc59932585e9f709ad59003 + +2021-01-21 Linux内核(版本:4.19.5) + + 新增D2000高性能桌面CPU支持:设备树与INTx中断控制器支持 + drivers/acpi/internal.h + drivers/acpi/irq.c + drivers/acpi/pci_irq.c + drivers/acpi/pci_link.c + drivers/irqchip/Kconfig + drivers/irqchip/Makefile + drivers/irqchip/irq-phytium-ixic.c + drivers/linux/acpi.h + FT-2000/4、D2000设备树中GMAC节点phy-mode更新 + 片上IO设备驱动改进(bug fix与设备稳定性): + CAN(适用于FT-2000/4、D2000) + drivers/net/can/phytium-can.c + SDC(适用于FT-2000/4、D2000) + drivers/mmc/host/phytium-sdci.c + drivers/mmc/host/phytium-sdci.h + HDA(适用于FT-2000/4、D2000) + sound/pci/hda/hda_phytium.c + 修复S2500多路服务器跨节点MSI中断问题,并改善了中断的实时性 + drivers/irqchip/irq-gic-phytium-2500-its.c + 移植适用于FTC661及后续CPU核心的CRC32指令支持补丁: + 9784d82db3eb3de7851e5a3f4a2481607de2452c + 86d0dd34eafffbc76a81aba6ae2d71927d3835a8 + 7481cddf29ede204b475facc40e6f65459939881 + 关键ARM64通用补丁backport: + d0b7a302d58abe24ed0f32a0672dd4c356bb73db + e02f5c1bb2283cfcee68f2f0feddcc06150f13aa + 11e37d357f6ba7a9af850a872396082cc0a0001f + +2020-10-15 Linux内核(版本:4.19.4) + 内核源码包:linux-phytium-4.19.4.tar.xz + 累计更新补丁(相对上游基础版本):patch-phytium-4.19.4.xz + 增量更新补丁(相对上次发布版本):patch-phytium-4.19.3~4.19.4.xz + Changelog + RTC设备ACPI ID升级至飞腾系统ACPI描述规范V1.1 + QSPI驱动提高了稳定性与兼容性 + 新增针对FT-2000+ SMMU的workaround代码 + 新增Phytium Mailbox驱动支持 + 新增设备树下的FT-2000/4 SCPI协议驱动支持 + 新增ACPI下FT-2000/4 OPTEE支持(需ACPI同步更新) + 改进了HDA驱动,修复了从S3/S4唤醒时死机的问题 + 改进了CAN驱动 + MD5校验码:md5sum-4.19.4 + 2020-07-28 Linux内核(版本:4.19.3) + 压缩包:linux-phytium-4.19.3.tar.xz + 累计更新补丁(相对上游基础版本):patch-phytium-4.19.3.xz + 增量更新补丁(相对上次发布版本):patch-phytium-4.19.2~4.19.3.xz + Changelog + 新增S2500多路服务器的支持 + FT-2000/4片上设备驱动内ACPI标识符更新至《飞腾系统ACPI描述规范V1.1》 + 添加了ACPI描述下I2C控制器输入时钟属性的参数读取支持 + 重构了GPIO驱动(需更新固件),支持GPIO中断 + MD5校验码:md5sum-4.19.3 + 2020-04-29 Linux内核(版本:4.19.2) + 压缩包:linux-phytium-4.19.2.tar.xz + 累计更新补丁(相对上游基础版本):patch-phytium-4.19.2.xz + 增量更新补丁(相对上次发布版本):patch-phytium-4.19.1~4.19.2.xz + Changelog + 增加了FT-2000+单根PCIe Host的设备树; + 补全了FT-2000/4设备树内的UART节点; + 修复了HDA、SDCI驱动的若干bug; + 添加FT-2000/4片上设备驱动至arm64架构的defconfig; + 添加SPI驱动对ACPI的支持; + MD5校验码:md5sum-4.19.2 + 2020-01-16 Linux内核(版本:4.19.1) + 压缩包:linux-phytium-4.19.1.tar.xz + 累计更新补丁(相对上游基础版本):patch-phytium-4.19.1.xz + MD5校验码:md5sum-4.19.1 + 参考补丁 + ACPI下STMMAC驱动32位DMA掩码补丁 + 补丁文件:stmmac-Add-32-bits-mask-for-DMA-buffers-allocation.patch + 禁用AT803x PHY休眠补丁 + 补丁文件:phy-disable-hibernation-of.patch + FT-2000/4 SDCI AXI互联缓存一致性使能补丁 + 补丁文件:mmc-phytium-set-axi-boundary.patch + SCPI@FT-2000/4驱动ACPI使能(非标准实验性补丁) + 补丁文件:phytium-scpi-Add-ACPI-support-for-Phytium-SCPI.patch + + + + + diff --git a/patch/patch-phytium-4.19.8_4.19.9 b/patch/patch-phytium-4.19.8_4.19.9 new file mode 100755 index 000000000000..21598c6045e3 --- /dev/null +++ b/patch/patch-phytium-4.19.8_4.19.9 @@ -0,0 +1,9753 @@ +diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt +index 96dfccc0faa8..046a47e49a94 100644 +--- a/Documentation/devicetree/bindings/arm/cpus.txt ++++ b/Documentation/devicetree/bindings/arm/cpus.txt +@@ -184,6 +184,10 @@ described below. + "nvidia,tegra132-denver" + "nvidia,tegra186-denver" + "nvidia,tegra194-carmel" ++ "phytium,ftc660" ++ "phytium,ftc661" ++ "phytium,ftc662" ++ "phytium,ftc663" + "qcom,krait" + "qcom,kryo" + "qcom,kryo385" +diff --git a/arch/arm64/boot/dts/phytium/d2000-generic-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/d2000-generic-psci-soc.dtsi +index b5efc20cb34f..c7be57a2c2aa 100644 +--- a/arch/arm64/boot/dts/phytium/d2000-generic-psci-soc.dtsi ++++ b/arch/arm64/boot/dts/phytium/d2000-generic-psci-soc.dtsi +@@ -34,7 +34,7 @@ + + cpu0: cpu@0 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -43,7 +43,7 @@ + + cpu1: cpu@1 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -52,7 +52,7 @@ + + cpu2: cpu@100 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -61,7 +61,7 @@ + + cpu3: cpu@101 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x101>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -70,7 +70,7 @@ + + cpu4: cpu@200 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x200>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -79,7 +79,7 @@ + + cpu5: cpu@201 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x201>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -88,7 +88,7 @@ + + cpu6: cpu@300 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x300>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -97,7 +97,7 @@ + + cpu7: cpu@301 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x301>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -413,6 +413,7 @@ + clock-names = "phytium_can_clk"; + tx-fifo-depth = <0x40>; + rx-fifo-depth = <0x40>; ++ extend_brp; + }; + + can1: can@28207400 { +@@ -423,6 +424,7 @@ + clock-names = "phytium_can_clk"; + tx-fifo-depth = <0x40>; + rx-fifo-depth = <0x40>; ++ extend_brp; + }; + + can2: can@028207800 { +@@ -433,6 +435,7 @@ + clock-names = "phytium_can_clk"; + tx-fifo-depth = <0x40>; + rx-fifo-depth = <0x40>; ++ extend_brp; + }; + + hda: hda@28206000 { +diff --git a/arch/arm64/boot/dts/phytium/ft1500a-16c-generic-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft1500a-16c-generic-psci-soc.dtsi +index 5cff9c2f100f..a0c73fa3fbe8 100644 +--- a/arch/arm64/boot/dts/phytium/ft1500a-16c-generic-psci-soc.dtsi ++++ b/arch/arm64/boot/dts/phytium/ft1500a-16c-generic-psci-soc.dtsi +@@ -107,7 +107,7 @@ + + cpu0:cpu@0 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x000>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; +@@ -120,7 +120,7 @@ + + cpu1:cpu@1 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x001>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; +@@ -130,7 +130,7 @@ + + cpu2:cpu@2 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x002>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; +@@ -140,7 +140,7 @@ + + cpu3:cpu@3 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x003>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; +@@ -150,7 +150,7 @@ + + cpu4:cpu@100 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; +@@ -163,7 +163,7 @@ + + cpu5:cpu@101 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x101>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; +@@ -173,7 +173,7 @@ + + cpu6:cpu@102 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x102>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; +@@ -183,7 +183,7 @@ + + cpu7:cpu@103 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x103>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; +@@ -193,7 +193,7 @@ + + cpu8:cpu@200 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x200>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; +@@ -206,7 +206,7 @@ + + cpu9:cpu@201 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x201>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; +@@ -216,7 +216,7 @@ + + cpu10:cpu@202 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x202>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; +@@ -226,7 +226,7 @@ + + cpu11:cpu@203 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x203>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; +@@ -236,7 +236,7 @@ + + cpu12:cpu@300 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x300>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; +@@ -249,7 +249,7 @@ + + cpu13:cpu@301 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x301>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; +@@ -259,7 +259,7 @@ + + cpu14:cpu@302 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x302>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; +@@ -269,7 +269,7 @@ + + cpu15:cpu@303 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc660", "arm,armv8"; + reg = <0x0 0x303>; + enable-method = "psci"; + cpu-idle-states = <&CPU_SLEEP>; +diff --git a/arch/arm64/boot/dts/phytium/ft2000ahk-generic-spintable-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2000ahk-generic-spintable-soc.dtsi +index 81393fc55bec..fb587b664a83 100644 +--- a/arch/arm64/boot/dts/phytium/ft2000ahk-generic-spintable-soc.dtsi ++++ b/arch/arm64/boot/dts/phytium/ft2000ahk-generic-spintable-soc.dtsi +@@ -24,7 +24,7 @@ + + cpu0: cpu@0 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc661", "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "spin-table"; + cpu-release-addr = <0x0 0x8007fff0>; +@@ -32,7 +32,7 @@ + + cpu1: cpu@1 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc661", "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "spin-table"; + cpu-release-addr = <0x0 0x8007fff0>; +diff --git a/arch/arm64/boot/dts/phytium/ft2000ahke-generic-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2000ahke-generic-psci-soc.dtsi +index 3f3731e50efa..dd7631e51d33 100644 +--- a/arch/arm64/boot/dts/phytium/ft2000ahke-generic-psci-soc.dtsi ++++ b/arch/arm64/boot/dts/phytium/ft2000ahke-generic-psci-soc.dtsi +@@ -33,14 +33,14 @@ + + cpu0: cpu@0 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc661", "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + }; + + cpu1: cpu@1 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc661", "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "psci"; + }; +diff --git a/arch/arm64/boot/dts/phytium/ft2000plus-MR-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2000plus-MR-psci-soc.dtsi +index 8ac066cd075a..1e9da418b1d1 100644 +--- a/arch/arm64/boot/dts/phytium/ft2000plus-MR-psci-soc.dtsi ++++ b/arch/arm64/boot/dts/phytium/ft2000plus-MR-psci-soc.dtsi +@@ -271,7 +271,7 @@ + + cpu0: cpu@0 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -279,7 +279,7 @@ + + cpu1: cpu@1 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -287,7 +287,7 @@ + + cpu2: cpu@2 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x2>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -295,7 +295,7 @@ + + cpu3: cpu@3 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x3>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -303,7 +303,7 @@ + + cpu4: cpu@100 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -311,7 +311,7 @@ + + cpu5: cpu@101 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x101>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -319,7 +319,7 @@ + + cpu6: cpu@102 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x102>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -327,7 +327,7 @@ + + cpu7: cpu@103 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x103>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -335,7 +335,7 @@ + + cpu8: cpu@200 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x200>; + enable-method = "psci"; + numa-node-id = <1>; +@@ -343,7 +343,7 @@ + + cpu9: cpu@201 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x201>; + enable-method = "psci"; + numa-node-id = <1>; +@@ -351,7 +351,7 @@ + + cpu10: cpu@202 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x202>; + enable-method = "psci"; + numa-node-id = <1>; +@@ -359,7 +359,7 @@ + + cpu11: cpu@203 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x203>; + enable-method = "psci"; + numa-node-id = <1>; +@@ -367,7 +367,7 @@ + + cpu12: cpu@300 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x300>; + enable-method = "psci"; + numa-node-id = <1>; +@@ -375,7 +375,7 @@ + + cpu13: cpu@301 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x301>; + enable-method = "psci"; + numa-node-id = <1>; +@@ -383,7 +383,7 @@ + + cpu14: cpu@302 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x302>; + enable-method = "psci"; + numa-node-id = <1>; +@@ -391,7 +391,7 @@ + + cpu15: cpu@303 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x303>; + enable-method = "psci"; + numa-node-id = <1>; +@@ -399,7 +399,7 @@ + + cpu16: cpu@400 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x400>; + enable-method = "psci"; + numa-node-id = <2>; +@@ -407,7 +407,7 @@ + + cpu17: cpu@401 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x401>; + enable-method = "psci"; + numa-node-id = <2>; +@@ -415,7 +415,7 @@ + + cpu18: cpu@402 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x402>; + enable-method = "psci"; + numa-node-id = <2>; +@@ -423,7 +423,7 @@ + + cpu19: cpu@403 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x403>; + enable-method = "psci"; + numa-node-id = <2>; +@@ -431,7 +431,7 @@ + + cpu20: cpu@500 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x500>; + enable-method = "psci"; + numa-node-id = <2>; +@@ -439,7 +439,7 @@ + + cpu21: cpu@501 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x501>; + enable-method = "psci"; + numa-node-id = <2>; +@@ -447,7 +447,7 @@ + + cpu22: cpu@502 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x502>; + enable-method = "psci"; + numa-node-id = <2>; +@@ -455,7 +455,7 @@ + + cpu23: cpu@503 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x503>; + enable-method = "psci"; + numa-node-id = <2>; +@@ -463,7 +463,7 @@ + + cpu24: cpu@600 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x600>; + enable-method = "psci"; + numa-node-id = <3>; +@@ -471,7 +471,7 @@ + + cpu25: cpu@601 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x601>; + enable-method = "psci"; + numa-node-id = <3>; +@@ -479,7 +479,7 @@ + + cpu26: cpu@602 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x602>; + enable-method = "psci"; + numa-node-id = <3>; +@@ -487,7 +487,7 @@ + + cpu27: cpu@603 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x603>; + enable-method = "psci"; + numa-node-id = <3>; +@@ -495,7 +495,7 @@ + + cpu28: cpu@700 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x700>; + enable-method = "psci"; + numa-node-id = <3>; +@@ -503,7 +503,7 @@ + + cpu29: cpu@701 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x701>; + enable-method = "psci"; + numa-node-id = <3>; +@@ -511,7 +511,7 @@ + + cpu30: cpu@702 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x702>; + enable-method = "psci"; + numa-node-id = <3>; +@@ -519,7 +519,7 @@ + + cpu31: cpu@703 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x703>; + enable-method = "psci"; + numa-node-id = <3>; +@@ -527,7 +527,7 @@ + + cpu32: cpu@800 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x800>; + enable-method = "psci"; + numa-node-id = <4>; +@@ -535,7 +535,7 @@ + + cpu33: cpu@801 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x801>; + enable-method = "psci"; + numa-node-id = <4>; +@@ -543,7 +543,7 @@ + + cpu34: cpu@802 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x802>; + enable-method = "psci"; + numa-node-id = <4>; +@@ -551,7 +551,7 @@ + + cpu35: cpu@803 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x803>; + enable-method = "psci"; + numa-node-id = <4>; +@@ -559,7 +559,7 @@ + + cpu36: cpu@900 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x900>; + enable-method = "psci"; + numa-node-id = <4>; +@@ -567,7 +567,7 @@ + + cpu37: cpu@901 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x901>; + enable-method = "psci"; + numa-node-id = <4>; +@@ -575,7 +575,7 @@ + + cpu38: cpu@902 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x902>; + enable-method = "psci"; + numa-node-id = <4>; +@@ -583,7 +583,7 @@ + + cpu39: cpu@903 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x903>; + enable-method = "psci"; + numa-node-id = <4>; +@@ -591,7 +591,7 @@ + + cpu40: cpu@a00 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xa00>; + enable-method = "psci"; + numa-node-id = <5>; +@@ -599,7 +599,7 @@ + + cpu41: cpu@a01 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xa01>; + enable-method = "psci"; + numa-node-id = <5>; +@@ -607,7 +607,7 @@ + + cpu42: cpu@a02 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xa02>; + enable-method = "psci"; + numa-node-id = <5>; +@@ -615,7 +615,7 @@ + + cpu43: cpu@a03 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xa03>; + enable-method = "psci"; + numa-node-id = <5>; +@@ -623,7 +623,7 @@ + + cpu44: cpu@b00 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xb00>; + enable-method = "psci"; + numa-node-id = <5>; +@@ -631,7 +631,7 @@ + + cpu45: cpu@b01 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xb01>; + enable-method = "psci"; + numa-node-id = <5>; +@@ -639,7 +639,7 @@ + + cpu46: cpu@b02 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xb02>; + enable-method = "psci"; + numa-node-id = <5>; +@@ -647,7 +647,7 @@ + + cpu47: cpu@b03 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xb03>; + enable-method = "psci"; + numa-node-id = <5>; +@@ -655,7 +655,7 @@ + + cpu48: cpu@c00 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xc00>; + enable-method = "psci"; + numa-node-id = <6>; +@@ -663,7 +663,7 @@ + + cpu49: cpu@c01 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xc01>; + enable-method = "psci"; + numa-node-id = <6>; +@@ -671,7 +671,7 @@ + + cpu50: cpu@c02 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xc02>; + enable-method = "psci"; + numa-node-id = <6>; +@@ -679,7 +679,7 @@ + + cpu51: cpu@c03 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xc03>; + enable-method = "psci"; + numa-node-id = <6>; +@@ -687,7 +687,7 @@ + + cpu52: cpu@d00 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xd00>; + enable-method = "psci"; + numa-node-id = <6>; +@@ -695,7 +695,7 @@ + + cpu53: cpu@d01 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xd01>; + enable-method = "psci"; + numa-node-id = <6>; +@@ -703,7 +703,7 @@ + + cpu54: cpu@d02 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xd02>; + enable-method = "psci"; + numa-node-id = <6>; +@@ -711,7 +711,7 @@ + + cpu55: cpu@d03 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xd03>; + enable-method = "psci"; + numa-node-id = <6>; +@@ -719,7 +719,7 @@ + + cpu56: cpu@e00 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xe00>; + enable-method = "psci"; + numa-node-id = <7>; +@@ -727,7 +727,7 @@ + + cpu57: cpu@e01 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xe01>; + enable-method = "psci"; + numa-node-id = <7>; +@@ -735,7 +735,7 @@ + + cpu58: cpu@e02 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xe02>; + enable-method = "psci"; + numa-node-id = <7>; +@@ -743,7 +743,7 @@ + + cpu59: cpu@e03 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xe03>; + enable-method = "psci"; + numa-node-id = <7>; +@@ -751,7 +751,7 @@ + + cpu60: cpu@f00 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xf00>; + enable-method = "psci"; + numa-node-id = <7>; +@@ -759,7 +759,7 @@ + + cpu61: cpu@f01 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xf01>; + enable-method = "psci"; + numa-node-id = <7>; +@@ -767,7 +767,7 @@ + + cpu62: cpu@f02 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xf02>; + enable-method = "psci"; + numa-node-id = <7>; +@@ -775,7 +775,7 @@ + + cpu63: cpu@f03 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xf03>; + enable-method = "psci"; + numa-node-id = <7>; +diff --git a/arch/arm64/boot/dts/phytium/ft2000plus-SR-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2000plus-SR-psci-soc.dtsi +index e50f0e62154b..687df1601f3e 100644 +--- a/arch/arm64/boot/dts/phytium/ft2000plus-SR-psci-soc.dtsi ++++ b/arch/arm64/boot/dts/phytium/ft2000plus-SR-psci-soc.dtsi +@@ -271,7 +271,7 @@ + + cpu0: cpu@0 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -279,7 +279,7 @@ + + cpu1: cpu@1 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -287,7 +287,7 @@ + + cpu2: cpu@2 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x2>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -295,7 +295,7 @@ + + cpu3: cpu@3 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x3>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -303,7 +303,7 @@ + + cpu4: cpu@100 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -311,7 +311,7 @@ + + cpu5: cpu@101 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x101>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -319,7 +319,7 @@ + + cpu6: cpu@102 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x102>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -327,7 +327,7 @@ + + cpu7: cpu@103 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x103>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -335,7 +335,7 @@ + + cpu8: cpu@200 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x200>; + enable-method = "psci"; + numa-node-id = <1>; +@@ -343,7 +343,7 @@ + + cpu9: cpu@201 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x201>; + enable-method = "psci"; + numa-node-id = <1>; +@@ -351,7 +351,7 @@ + + cpu10: cpu@202 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x202>; + enable-method = "psci"; + numa-node-id = <1>; +@@ -359,7 +359,7 @@ + + cpu11: cpu@203 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x203>; + enable-method = "psci"; + numa-node-id = <1>; +@@ -367,7 +367,7 @@ + + cpu12: cpu@300 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x300>; + enable-method = "psci"; + numa-node-id = <1>; +@@ -375,7 +375,7 @@ + + cpu13: cpu@301 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x301>; + enable-method = "psci"; + numa-node-id = <1>; +@@ -383,7 +383,7 @@ + + cpu14: cpu@302 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x302>; + enable-method = "psci"; + numa-node-id = <1>; +@@ -391,7 +391,7 @@ + + cpu15: cpu@303 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x303>; + enable-method = "psci"; + numa-node-id = <1>; +@@ -399,7 +399,7 @@ + + cpu16: cpu@400 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x400>; + enable-method = "psci"; + numa-node-id = <2>; +@@ -407,7 +407,7 @@ + + cpu17: cpu@401 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x401>; + enable-method = "psci"; + numa-node-id = <2>; +@@ -415,7 +415,7 @@ + + cpu18: cpu@402 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x402>; + enable-method = "psci"; + numa-node-id = <2>; +@@ -423,7 +423,7 @@ + + cpu19: cpu@403 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x403>; + enable-method = "psci"; + numa-node-id = <2>; +@@ -431,7 +431,7 @@ + + cpu20: cpu@500 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x500>; + enable-method = "psci"; + numa-node-id = <2>; +@@ -439,7 +439,7 @@ + + cpu21: cpu@501 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x501>; + enable-method = "psci"; + numa-node-id = <2>; +@@ -447,7 +447,7 @@ + + cpu22: cpu@502 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x502>; + enable-method = "psci"; + numa-node-id = <2>; +@@ -455,7 +455,7 @@ + + cpu23: cpu@503 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x503>; + enable-method = "psci"; + numa-node-id = <2>; +@@ -463,7 +463,7 @@ + + cpu24: cpu@600 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x600>; + enable-method = "psci"; + numa-node-id = <3>; +@@ -471,7 +471,7 @@ + + cpu25: cpu@601 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x601>; + enable-method = "psci"; + numa-node-id = <3>; +@@ -479,7 +479,7 @@ + + cpu26: cpu@602 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x602>; + enable-method = "psci"; + numa-node-id = <3>; +@@ -487,7 +487,7 @@ + + cpu27: cpu@603 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x603>; + enable-method = "psci"; + numa-node-id = <3>; +@@ -495,7 +495,7 @@ + + cpu28: cpu@700 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x700>; + enable-method = "psci"; + numa-node-id = <3>; +@@ -503,7 +503,7 @@ + + cpu29: cpu@701 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x701>; + enable-method = "psci"; + numa-node-id = <3>; +@@ -511,7 +511,7 @@ + + cpu30: cpu@702 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x702>; + enable-method = "psci"; + numa-node-id = <3>; +@@ -519,7 +519,7 @@ + + cpu31: cpu@703 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x703>; + enable-method = "psci"; + numa-node-id = <3>; +@@ -527,7 +527,7 @@ + + cpu32: cpu@800 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x800>; + enable-method = "psci"; + numa-node-id = <4>; +@@ -535,7 +535,7 @@ + + cpu33: cpu@801 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x801>; + enable-method = "psci"; + numa-node-id = <4>; +@@ -543,7 +543,7 @@ + + cpu34: cpu@802 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x802>; + enable-method = "psci"; + numa-node-id = <4>; +@@ -551,7 +551,7 @@ + + cpu35: cpu@803 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x803>; + enable-method = "psci"; + numa-node-id = <4>; +@@ -559,7 +559,7 @@ + + cpu36: cpu@900 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x900>; + enable-method = "psci"; + numa-node-id = <4>; +@@ -567,7 +567,7 @@ + + cpu37: cpu@901 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x901>; + enable-method = "psci"; + numa-node-id = <4>; +@@ -575,7 +575,7 @@ + + cpu38: cpu@902 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x902>; + enable-method = "psci"; + numa-node-id = <4>; +@@ -583,7 +583,7 @@ + + cpu39: cpu@903 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0x903>; + enable-method = "psci"; + numa-node-id = <4>; +@@ -591,7 +591,7 @@ + + cpu40: cpu@a00 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xa00>; + enable-method = "psci"; + numa-node-id = <5>; +@@ -599,7 +599,7 @@ + + cpu41: cpu@a01 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xa01>; + enable-method = "psci"; + numa-node-id = <5>; +@@ -607,7 +607,7 @@ + + cpu42: cpu@a02 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xa02>; + enable-method = "psci"; + numa-node-id = <5>; +@@ -615,7 +615,7 @@ + + cpu43: cpu@a03 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xa03>; + enable-method = "psci"; + numa-node-id = <5>; +@@ -623,7 +623,7 @@ + + cpu44: cpu@b00 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xb00>; + enable-method = "psci"; + numa-node-id = <5>; +@@ -631,7 +631,7 @@ + + cpu45: cpu@b01 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xb01>; + enable-method = "psci"; + numa-node-id = <5>; +@@ -639,7 +639,7 @@ + + cpu46: cpu@b02 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xb02>; + enable-method = "psci"; + numa-node-id = <5>; +@@ -647,7 +647,7 @@ + + cpu47: cpu@b03 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xb03>; + enable-method = "psci"; + numa-node-id = <5>; +@@ -655,7 +655,7 @@ + + cpu48: cpu@c00 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xc00>; + enable-method = "psci"; + numa-node-id = <6>; +@@ -663,7 +663,7 @@ + + cpu49: cpu@c01 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xc01>; + enable-method = "psci"; + numa-node-id = <6>; +@@ -671,7 +671,7 @@ + + cpu50: cpu@c02 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xc02>; + enable-method = "psci"; + numa-node-id = <6>; +@@ -679,7 +679,7 @@ + + cpu51: cpu@c03 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xc03>; + enable-method = "psci"; + numa-node-id = <6>; +@@ -687,7 +687,7 @@ + + cpu52: cpu@d00 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xd00>; + enable-method = "psci"; + numa-node-id = <6>; +@@ -695,7 +695,7 @@ + + cpu53: cpu@d01 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xd01>; + enable-method = "psci"; + numa-node-id = <6>; +@@ -703,7 +703,7 @@ + + cpu54: cpu@d02 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xd02>; + enable-method = "psci"; + numa-node-id = <6>; +@@ -711,7 +711,7 @@ + + cpu55: cpu@d03 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xd03>; + enable-method = "psci"; + numa-node-id = <6>; +@@ -719,7 +719,7 @@ + + cpu56: cpu@e00 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xe00>; + enable-method = "psci"; + numa-node-id = <7>; +@@ -727,7 +727,7 @@ + + cpu57: cpu@e01 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xe01>; + enable-method = "psci"; + numa-node-id = <7>; +@@ -735,7 +735,7 @@ + + cpu58: cpu@e02 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xe02>; + enable-method = "psci"; + numa-node-id = <7>; +@@ -743,7 +743,7 @@ + + cpu59: cpu@e03 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xe03>; + enable-method = "psci"; + numa-node-id = <7>; +@@ -751,7 +751,7 @@ + + cpu60: cpu@f00 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xf00>; + enable-method = "psci"; + numa-node-id = <7>; +@@ -759,7 +759,7 @@ + + cpu61: cpu@f01 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xf01>; + enable-method = "psci"; + numa-node-id = <7>; +@@ -767,7 +767,7 @@ + + cpu62: cpu@f02 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xf02>; + enable-method = "psci"; + numa-node-id = <7>; +@@ -775,7 +775,7 @@ + + cpu63: cpu@f03 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc662", "arm,armv8"; + reg = <0x0 0xf03>; + enable-method = "psci"; + numa-node-id = <7>; +diff --git a/arch/arm64/boot/dts/phytium/ft2004-generic-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2004-generic-psci-soc.dtsi +index 9d28335ddaef..a6451654e82f 100644 +--- a/arch/arm64/boot/dts/phytium/ft2004-generic-psci-soc.dtsi ++++ b/arch/arm64/boot/dts/phytium/ft2004-generic-psci-soc.dtsi +@@ -34,7 +34,7 @@ + + cpu0: cpu@0 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -43,7 +43,7 @@ + + cpu1: cpu@1 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -52,7 +52,7 @@ + + cpu2: cpu@100 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -61,7 +61,7 @@ + + cpu3: cpu@101 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x101>; + enable-method = "psci"; + numa-node-id = <0>; +diff --git a/arch/arm64/boot/dts/phytium/ft2004c-generic-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2004c-generic-psci-soc.dtsi +index 231a406a2703..9b09de80abf0 100644 +--- a/arch/arm64/boot/dts/phytium/ft2004c-generic-psci-soc.dtsi ++++ b/arch/arm64/boot/dts/phytium/ft2004c-generic-psci-soc.dtsi +@@ -34,7 +34,7 @@ + + cpu0: cpu@0 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -43,7 +43,7 @@ + + cpu1: cpu@1 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -52,7 +52,7 @@ + + cpu2: cpu@100 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + numa-node-id = <0>; +@@ -61,7 +61,7 @@ + + cpu3: cpu@101 { + device_type = "cpu"; +- compatible = "arm,armv8"; ++ compatible = "phytium,ftc663", "arm,armv8"; + reg = <0x0 0x101>; + enable-method = "psci"; + numa-node-id = <0>; +diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c +index 6b2686d54411..29cdc99688f3 100644 +--- a/arch/arm64/kernel/hibernate.c ++++ b/arch/arm64/kernel/hibernate.c +@@ -214,7 +214,7 @@ static int create_safe_exec_page(void *src_start, size_t length, + } + + memcpy((void *)dst, src_start, length); +- flush_icache_range(dst, dst + length); ++ __flush_icache_range(dst, dst + length); + + pgdp = pgd_offset_raw(allocator(mask), dst_addr); + if (pgd_none(READ_ONCE(*pgdp))) { +diff --git a/drivers/gpu/drm/phytium/Kconfig b/drivers/gpu/drm/phytium/Kconfig +index b94add77357e..e3024feb69d0 100644 +--- a/drivers/gpu/drm/phytium/Kconfig ++++ b/drivers/gpu/drm/phytium/Kconfig +@@ -4,5 +4,4 @@ config DRM_PHYTIUM + select DRM_KMS_HELPER + help + Choose this option if you have a phytium graphics card. +- This driver provides kernel mode setting and +- buffer management to userspace. ++ This driver provides kernel mode setting and buffer management to userspace. +diff --git a/drivers/gpu/drm/phytium/Makefile b/drivers/gpu/drm/phytium/Makefile +index eb5b48794499..104416fc4313 100644 +--- a/drivers/gpu/drm/phytium/Makefile ++++ b/drivers/gpu/drm/phytium/Makefile +@@ -8,6 +8,8 @@ phytium-dc-drm-y := phytium_display_drv.o \ + phytium_debugfs.o \ + x100_dp.o \ + phytium_panel.o \ ++ x100_dc.o \ ++ phytium_pci.o + + obj-$(CONFIG_DRM_PHYTIUM) += phytium-dc-drm.o + CFLAGS_REMOVE_phytium_crtc.o += -mgeneral-regs-only +diff --git a/drivers/gpu/drm/phytium/phytium_crtc.c b/drivers/gpu/drm/phytium/phytium_crtc.c +index b78181a725e7..796c046d0a73 100644 +--- a/drivers/gpu/drm/phytium/phytium_crtc.c ++++ b/drivers/gpu/drm/phytium/phytium_crtc.c +@@ -1,15 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0 +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #include +@@ -17,10 +9,11 @@ + #include + #include + #include "phytium_display_drv.h" +-#include "phytium_reg.h" + #include "phytium_crtc.h" + #include "phytium_plane.h" + #include "phytium_dp.h" ++#include "x100_dc.h" ++#include "phytium_reg.h" + + #define MAXKERNELSIZE 9 + #define SUBPIXELINDEXBITS 5 +@@ -32,63 +25,11 @@ + #define KERNELSTATES (PHYALIGN(KERNELTABLESIZE + 4, 8)) + #define PHYPI 3.14159265358979323846f + +-#define PHYTIUM_MATH_Add(X, Y) (float)((X) + (Y)) +-#define PHYTIUM_MATH_Multiply(X, Y) (float)((X) * (Y)) +-#define PHYTIUM_MATH_Divide(X, Y) (float)((X) / (Y)) +-#define PHYTIUM_MATH_DivideFromUInteger(X, Y) ((float)(X) / (float)(Y)) +-#define PHYTIUM_MATH_I2Float(X) (float)(X) +- +-static int phytium_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, +- u16 *green, u16 *blue, uint32_t size, +- struct drm_modeset_acquire_ctx *ctx) +-{ +- return 0; +-} +- +-static void phytium_crtc_destroy(struct drm_crtc *crtc) +-{ +- struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); +- +- drm_crtc_cleanup(crtc); +- kfree(phytium_crtc); +-} +- +-struct drm_crtc_state * +-phytium_crtc_atomic_duplicate_state(struct drm_crtc *crtc) +-{ +- struct phytium_crtc_state *phytium_crtc_state = NULL; +- +- phytium_crtc_state = kmemdup(crtc->state, sizeof(*phytium_crtc_state), +- GFP_KERNEL); +- if (IS_ERR(phytium_crtc_state)) +- return NULL; +- __drm_atomic_helper_crtc_duplicate_state(crtc, +- &phytium_crtc_state->base); +- +- return &phytium_crtc_state->base; +-} +- +-void +-phytium_crtc_atomic_destroy_state(struct drm_crtc *crtc, +- struct drm_crtc_state *state) +-{ +- struct phytium_crtc_state *phytium_crtc_state = +- to_phytium_crtc_state(state); +- +- phytium_crtc_state = to_phytium_crtc_state(state); +- __drm_atomic_helper_crtc_destroy_state(state); +- kfree(phytium_crtc_state); +-} +- +-static const struct drm_crtc_funcs phytium_crtc_funcs = { +- .gamma_set = phytium_crtc_gamma_set, +- .set_config = drm_atomic_helper_set_config, +- .destroy = phytium_crtc_destroy, +- .page_flip = drm_atomic_helper_page_flip, +- .reset = drm_atomic_helper_crtc_reset, +- .atomic_duplicate_state = phytium_crtc_atomic_duplicate_state, +- .atomic_destroy_state = phytium_crtc_atomic_destroy_state, +-}; ++#define MATH_Add(X, Y) (float)((X) + (Y)) ++#define MATH_Multiply(X, Y) (float)((X) * (Y)) ++#define MATH_Divide(X, Y) (float)((X) / (Y)) ++#define MATH_DivideFromUInteger(X, Y) ((float)(X) / (float)(Y)) ++#define MATH_I2Float(X) (float)(X) + + struct filter_blit_array { + uint8_t kernelSize; +@@ -96,7 +37,7 @@ struct filter_blit_array { + uint32_t *kernelStates; + }; + +-uint32_t phytium_crtc_scaling_get_factor(uint32_t src_size, uint32_t dst_size) ++static uint32_t dc_scaling_get_factor(uint32_t src_size, uint32_t dst_size) + { + uint32_t factor = 0; + +@@ -105,7 +46,7 @@ uint32_t phytium_crtc_scaling_get_factor(uint32_t src_size, uint32_t dst_size) + return factor; + } + +-float phytium_sint(float x) ++static float dc_sint(float x) + { + const float B = 1.2732395477; + const float C = -0.4052847346; +@@ -123,27 +64,27 @@ float phytium_sint(float x) + return y; + } + +-float phytium_sinc_filter(float x, int radius) ++static float dc_sinc_filter(float x, int radius) + { + float pit, pitd, f1, f2, result; +- float f_radius = PHYTIUM_MATH_I2Float(radius); ++ float f_radius = MATH_I2Float(radius); + + if (x == 0.0f) { + result = 1.0f; + } else if ((x < -f_radius) || (x > f_radius)) { + result = 0.0f; + } else { +- pit = PHYTIUM_MATH_Multiply(PHYPI, x); +- pitd = PHYTIUM_MATH_Divide(pit, f_radius); +- f1 = PHYTIUM_MATH_Divide(phytium_sint(pit), pit); +- f2 = PHYTIUM_MATH_Divide(phytium_sint(pitd), pitd); +- result = PHYTIUM_MATH_Multiply(f1, f2); ++ pit = MATH_Multiply(PHYPI, x); ++ pitd = MATH_Divide(pit, f_radius); ++ f1 = MATH_Divide(dc_sint(pit), pit); ++ f2 = MATH_Divide(dc_sint(pitd), pitd); ++ result = MATH_Multiply(f1, f2); + } + + return result; + } + +-int calculate_sync_table( ++static int dc_calculate_sync_table( + uint8_t kernel_size, + uint32_t src_size, + uint32_t dst_size, +@@ -162,7 +103,7 @@ int calculate_sync_table( + + do { + /* Compute the scale factor. */ +- scale_factor = phytium_crtc_scaling_get_factor(src_size, dst_size); ++ scale_factor = dc_scaling_get_factor(src_size, dst_size); + + /* Same kernel size and ratio as before? */ + if ((kernel_info->kernelSize == kernel_size) && +@@ -179,7 +120,7 @@ int calculate_sync_table( + kernel_info->scaleFactor = scale_factor; + + /* Compute the scale factor. */ +- f_scale = PHYTIUM_MATH_DivideFromUInteger(dst_size, src_size); ++ f_scale = MATH_DivideFromUInteger(dst_size, src_size); + + /* Adjust the factor for magnification. */ + if (f_scale > 1.0f) +@@ -189,7 +130,7 @@ int calculate_sync_table( + kernel_half = (int) (kernel_info->kernelSize >> 1); + + /* Calculate the subpixel step. */ +- f_subpixel_step = PHYTIUM_MATH_Divide(1.0f, PHYTIUM_MATH_I2Float(SUBPIXELCOUNT)); ++ f_subpixel_step = MATH_Divide(1.0f, MATH_I2Float(SUBPIXELCOUNT)); + + /* Init the subpixel offset. */ + f_subpixel_offset = 0.5f; +@@ -224,18 +165,18 @@ int calculate_sync_table( + fSubpixelSet[kernel_pos] = 1.0f; + } else { + /* Compute the x position for filter function. */ +- float fX = PHYTIUM_MATH_Add( +- PHYTIUM_MATH_I2Float(index - kernel_half), ++ float fX = MATH_Add( ++ MATH_I2Float(index - kernel_half), + f_subpixel_offset); +- fX = PHYTIUM_MATH_Multiply(fX, f_scale); ++ fX = MATH_Multiply(fX, f_scale); + + /* Compute the weight. */ +- fSubpixelSet[kernel_pos] = phytium_sinc_filter(fX, ++ fSubpixelSet[kernel_pos] = dc_sinc_filter(fX, + kernel_half); + } + + /* Update the sum of weights. */ +- fWeightSum = PHYTIUM_MATH_Add(fWeightSum, ++ fWeightSum = MATH_Add(fWeightSum, + fSubpixelSet[kernel_pos]); + } + } +@@ -243,7 +184,7 @@ int calculate_sync_table( + /* Adjust weights so that the sum will be 1.0. */ + for (kernel_pos = 0; kernel_pos < MAXKERNELSIZE; kernel_pos++) { + /* Normalize the current weight. */ +- float fWeight = PHYTIUM_MATH_Divide(fSubpixelSet[kernel_pos], ++ float fWeight = MATH_Divide(fSubpixelSet[kernel_pos], + fWeightSum); + + /* Convert the weight to fixed point and store in the table. */ +@@ -255,7 +196,7 @@ int calculate_sync_table( + kernel_array[kernel_pos] = 0xC000; + else + kernel_array[kernel_pos] = +- (int16_t) PHYTIUM_MATH_Multiply(fWeight, 16384.0f); ++ (int16_t) MATH_Multiply(fWeight, 16384.0f); + weightSum += kernel_array[kernel_pos]; + } + +@@ -281,21 +222,22 @@ int calculate_sync_table( + kernel_array += MAXKERNELSIZE; + + /* Advance to the next subpixel. */ +- f_subpixel_offset = PHYTIUM_MATH_Add(f_subpixel_offset, -f_subpixel_step); ++ f_subpixel_offset = MATH_Add(f_subpixel_offset, -f_subpixel_step); + } + } while (0); + + return 0; + } + +-void phytium_crtc_scaling_config(struct drm_crtc *crtc, ++static void phytium_dc_scaling_config(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) + { + struct drm_device *dev = crtc->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; ++ uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + uint32_t scale_factor_x, scale_factor_y, i; + uint32_t kernelStates[128]; + struct filter_blit_array kernel_info_width; +@@ -310,22 +252,22 @@ void phytium_crtc_scaling_config(struct drm_crtc *crtc, + phytium_crtc->dst_x = (mode->crtc_hdisplay - phytium_crtc->dst_width) / 2; + phytium_crtc->dst_y = (mode->crtc_vdisplay - phytium_crtc->dst_height) / 2; + +- scale_factor_x = phytium_crtc_scaling_get_factor(phytium_crtc->src_width, ++ scale_factor_x = dc_scaling_get_factor(phytium_crtc->src_width, + phytium_crtc->dst_width); +- scale_factor_y = phytium_crtc_scaling_get_factor(phytium_crtc->src_height, ++ scale_factor_y = dc_scaling_get_factor(phytium_crtc->src_height, + phytium_crtc->dst_height); + if (scale_factor_y > (SCALE_FACTOR_Y_MAX << SCALE_FACTOR_SRC_OFFSET)) + scale_factor_y = (SCALE_FACTOR_Y_MAX << SCALE_FACTOR_SRC_OFFSET); + + phytium_writel_reg(priv, scale_factor_x & SCALE_FACTOR_X_MASK, +- PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X(phys_pipe)); ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X); + phytium_writel_reg(priv, scale_factor_y & SCALE_FACTOR_Y_MASK, +- PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y(phys_pipe)); ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y); + phytium_writel_reg(priv, FRAMEBUFFER_TAP, +- PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG(phys_pipe)); ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG); + + tmp = kmalloc(KERNELSTATES, GFP_KERNEL); +- if (IS_ERR(tmp)) { ++ if (!tmp) { + DRM_ERROR("malloc %ld failed\n", KERNELSTATES); + return; + } +@@ -334,34 +276,36 @@ void phytium_crtc_scaling_config(struct drm_crtc *crtc, + kernel_info_width.kernelStates = tmp; + memset(kernel_info_width.kernelStates, 0, KERNELSTATES); + kernel_neon_begin(); +- calculate_sync_table(FRAMEBUFFER_HORIZONTAL_FILTER_TAP, phytium_crtc->src_width, +- phytium_crtc->dst_width, &kernel_info_width); ++ dc_calculate_sync_table(FRAMEBUFFER_HORIZONTAL_FILTER_TAP, ++ phytium_crtc->src_width, ++ phytium_crtc->dst_width, ++ &kernel_info_width); + memset(kernelStates, 0, sizeof(kernelStates)); + memcpy(kernelStates, kernel_info_width.kernelStates + 1, KERNELSTATES - 4); + kernel_neon_end(); + phytium_writel_reg(priv, HORI_FILTER_INDEX, +- PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX(phys_pipe)); ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX); + for (i = 0; i < 128; i++) { + phytium_writel_reg(priv, kernelStates[i], +- PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER(phys_pipe)); ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER); + } + + memset(&kernel_info_width, 0, sizeof(struct filter_blit_array)); + kernel_info_width.kernelStates = tmp; + memset(kernel_info_width.kernelStates, 0, KERNELSTATES); + kernel_neon_begin(); +- calculate_sync_table(FRAMEBUFFER_FILTER_TAP, phytium_crtc->src_height, ++ dc_calculate_sync_table(FRAMEBUFFER_FILTER_TAP, phytium_crtc->src_height, + phytium_crtc->dst_height, &kernel_info_width); + memset(kernelStates, 0, sizeof(kernelStates)); + memcpy(kernelStates, kernel_info_width.kernelStates + 1, KERNELSTATES - 4); + kernel_neon_end(); + phytium_writel_reg(priv, VERT_FILTER_INDEX, +- PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX(phys_pipe)); ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX); + for (i = 0; i < 128; i++) + phytium_writel_reg(priv, kernelStates[i], +- PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER(phys_pipe)); ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER); + phytium_writel_reg(priv, INITIALOFFSET, +- PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET(phys_pipe)); ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET); + kfree(tmp); + phytium_crtc->scale_enable = true; + } else { +@@ -369,19 +313,118 @@ void phytium_crtc_scaling_config(struct drm_crtc *crtc, + } + } + ++static void phytium_crtc_gamma_set(struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); ++ int phys_pipe = phytium_crtc->phys_pipe; ++ uint32_t group_offset = priv->dc_reg_base[phys_pipe]; ++ uint32_t config = 0; ++ struct drm_crtc_state *state = crtc->state; ++ struct drm_color_lut *lut; ++ int i; ++ ++ if (state->gamma_lut) { ++ if (WARN((state->gamma_lut->length/sizeof(struct drm_color_lut) != GAMMA_INDEX_MAX), ++ "gamma size is not match\n")) ++ return; ++ lut = (struct drm_color_lut *)state->gamma_lut->data; ++ for (i = 0; i < GAMMA_INDEX_MAX; i++) { ++ phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); ++ config = ((lut[i].red >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; ++ config |= (((lut[i].green >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); ++ config |= (((lut[i].blue >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); ++ } ++ } ++} ++ ++static void phytium_crtc_gamma_init(struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); ++ int phys_pipe = phytium_crtc->phys_pipe; ++ uint32_t group_offset = priv->dc_reg_base[phys_pipe]; ++ uint32_t config = 0; ++ uint16_t *red, *green, *blue; ++ int i; ++ ++ if (WARN((crtc->gamma_size != GAMMA_INDEX_MAX), "gamma size is not match\n")) ++ return; ++ ++ red = crtc->gamma_store; ++ green = red + crtc->gamma_size; ++ blue = green + crtc->gamma_size; ++ ++ for (i = 0; i < GAMMA_INDEX_MAX; i++) { ++ phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); ++ config = ((*red++ >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; ++ config |= (((*green++ >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); ++ config |= (((*blue++ >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); ++ } ++} ++ ++static void phytium_crtc_destroy(struct drm_crtc *crtc) ++{ ++ struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); ++ ++ drm_crtc_cleanup(crtc); ++ kfree(phytium_crtc); ++} ++ ++struct drm_crtc_state * ++phytium_crtc_atomic_duplicate_state(struct drm_crtc *crtc) ++{ ++ struct phytium_crtc_state *phytium_crtc_state = NULL; ++ ++ phytium_crtc_state = kmemdup(crtc->state, sizeof(*phytium_crtc_state), ++ GFP_KERNEL); ++ if (!phytium_crtc_state) ++ return NULL; ++ __drm_atomic_helper_crtc_duplicate_state(crtc, ++ &phytium_crtc_state->base); ++ ++ return &phytium_crtc_state->base; ++} ++ ++void ++phytium_crtc_atomic_destroy_state(struct drm_crtc *crtc, ++ struct drm_crtc_state *state) ++{ ++ struct phytium_crtc_state *phytium_crtc_state = ++ to_phytium_crtc_state(state); ++ ++ phytium_crtc_state = to_phytium_crtc_state(state); ++ __drm_atomic_helper_crtc_destroy_state(state); ++ kfree(phytium_crtc_state); ++} ++ ++static const struct drm_crtc_funcs phytium_crtc_funcs = { ++ .gamma_set = drm_atomic_helper_legacy_gamma_set, ++ .set_config = drm_atomic_helper_set_config, ++ .destroy = phytium_crtc_destroy, ++ .page_flip = drm_atomic_helper_page_flip, ++ .reset = drm_atomic_helper_crtc_reset, ++ .atomic_duplicate_state = phytium_crtc_atomic_duplicate_state, ++ .atomic_destroy_state = phytium_crtc_atomic_destroy_state, ++}; ++ + static void + phytium_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) + { + struct drm_device *dev = crtc->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_atomic_state *state = old_state->state; + struct drm_connector_state *new_conn_state; + struct drm_connector *conn; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; +- int timeout = 100; ++ uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + int config = 0, i = 0; + + for_each_new_connector_in_state(state, conn, new_conn_state, i) { +@@ -402,44 +445,31 @@ phytium_crtc_atomic_enable(struct drm_crtc *crtc, + } + + /* config pix clock */ +- timeout = 100; +- phytium_writel_reg(priv, (mode->clock & PIX_CLOCK_MASK) | FLAG_REQUEST, +- PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(phys_pipe)); ++ phytium_crtc->dc_hw_config_pix_clock(crtc, mode->clock); + +- do { +- mdelay(10); +- timeout--; +- config = phytium_readl_reg(priv, PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(phys_pipe)); +- } while ((!(config & FLAG_REPLY)) && timeout); +- if (timeout == 0) +- DRM_ERROR("config pix clock(%d kHz) failed\n", mode->clock); +- phytium_writel_reg(priv, mode->clock & PIX_CLOCK_MASK, +- PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(phys_pipe)); +- mdelay(20); +- +- phytium_crtc_scaling_config(crtc, old_state); ++ phytium_dc_scaling_config(crtc, old_state); + config = ((mode->crtc_hdisplay & HDISPLAY_END_MASK) << HDISPLAY_END_SHIFT) + | ((mode->crtc_htotal&HDISPLAY_TOTAL_MASK) << HDISPLAY_TOTAL_SHIFT); +- phytium_writel_reg(priv, config, PHYTIUM_DC_HDISPLAY(phys_pipe)); ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HDISPLAY); + config = ((mode->crtc_hsync_start & HSYNC_START_MASK) << HSYNC_START_SHIFT) + | ((mode->crtc_hsync_end & HSYNC_END_MASK) << HSYNC_END_SHIFT) + | HSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : HSYNC_NEGATIVE; +- phytium_writel_reg(priv, config, PHYTIUM_DC_HSYNC(phys_pipe)); ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HSYNC); + config = ((mode->crtc_vdisplay & VDISPLAY_END_MASK) << VDISPLAY_END_SHIFT) + | ((mode->crtc_vtotal & VDISPLAY_TOTAL_MASK) << VDISPLAY_TOTAL_SHIFT); +- phytium_writel_reg(priv, config, PHYTIUM_DC_VDISPLAY(phys_pipe)); ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VDISPLAY); + config = ((mode->crtc_vsync_start & VSYNC_START_MASK) << VSYNC_START_SHIFT) + | ((mode->crtc_vsync_end & VSYNC_END_MASK) << VSYNC_END_SHIFT) + | VSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : VSYNC_NEGATIVE; +- phytium_writel_reg(priv, config, PHYTIUM_DC_VSYNC(phys_pipe)); ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VSYNC); + config = PANEL_DATAENABLE_ENABLE | PANEL_DATA_ENABLE | PANEL_CLOCK_ENABLE; +- phytium_writel_reg(priv, config, PHYTIUM_DC_PANEL_CONFIG(phys_pipe)); ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_PANEL_CONFIG); + config = phytium_crtc->bpc | OUTPUT_DP; +- phytium_writel_reg(priv, config, PHYTIUM_DC_DP_CONFIG(phys_pipe)); ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_DP_CONFIG); + +- config = phytium_readl_reg(priv, PHYTIUM_DC_FRAMEBUFFER_CONFIG(phys_pipe)); ++ config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + if (crtc->state->active) + config |= FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET; +@@ -451,9 +481,9 @@ phytium_crtc_atomic_enable(struct drm_crtc *crtc, + else + config &= (~FRAMEBUFFER_SCALE_ENABLE); + +- phytium_writel_reg(priv, config, +- PHYTIUM_DC_FRAMEBUFFER_CONFIG(phys_pipe)); ++ config |= FRAMEBUFFER_GAMMA_ENABLE; + ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + drm_crtc_vblank_on(crtc); + } + +@@ -461,59 +491,10 @@ static void + phytium_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) + { +- struct drm_device *dev = crtc->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); +- int reset_timeout = 100; +- int config = 0; +- int phys_pipe = phytium_crtc->phys_pipe; + + drm_crtc_vblank_off(crtc); +- +- config = phytium_readl_reg(priv, PHYTIUM_DC_CLOCK_CONTROL(phys_pipe)); +- phytium_writel_reg(priv, config | SOFT_RESET, PHYTIUM_DC_CLOCK_CONTROL(phys_pipe)); +- phytium_writel_reg(priv, 0, PHYTIUM_DC_CLOCK_CONTROL(phys_pipe)); +- do { +- config = phytium_readl_reg(priv, PHYTIUM_DC_CLOCK_IDLE(phys_pipe)); +- if (config | IS_IDLE) +- break; +- mdelay(1); +- reset_timeout--; +- } while (reset_timeout); +- +- /* reset pix clock */ +- reset_timeout = 100; +- phytium_writel_reg(priv, (0x0 & PIX_CLOCK_MASK) | FLAG_REQUEST, +- PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(phys_pipe)); +- do { +- mdelay(10); +- reset_timeout--; +- config = phytium_readl_reg(priv, PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(phys_pipe)); +- } while ((!(config & FLAG_REPLY)) && reset_timeout); +- if (reset_timeout == 0) +- DRM_ERROR("reset pix clock failed\n"); +- phytium_writel_reg(priv, 0x0 & PIX_CLOCK_MASK, +- PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(phys_pipe)); +- mdelay(20); +- +- config = phytium_readl_reg(priv, PHYTIUM_DC_CLOCK_CONTROL(phys_pipe)); +- phytium_writel_reg(priv, config | SOFT_RESET, PHYTIUM_DC_CLOCK_CONTROL(phys_pipe)); +- phytium_writel_reg(priv, 0, PHYTIUM_DC_CLOCK_CONTROL(phys_pipe)); +- do { +- config = phytium_readl_reg(priv, PHYTIUM_DC_CLOCK_IDLE(phys_pipe)); +- if (config | IS_IDLE) +- break; +- mdelay(1); +- reset_timeout--; +- } while (reset_timeout); +- +- /* reset dcreq */ +- phytium_writel_reg(priv, DCREQ_PLAN_A, PHYTIUM_DCREQ_PLAN(phys_pipe)); +- phytium_writel_reg(priv, 0, PHYTIUM_DCREQ_CONTROL(phys_pipe)); +- phytium_writel_reg(priv, DCREQ_RESET, PHYTIUM_DCREQ_RESET(phys_pipe)); +- msleep(20); +- phytium_writel_reg(priv, (~DCREQ_RESET)&DCREQ_RESET_MASK, +- PHYTIUM_DCREQ_RESET(phys_pipe)); ++ phytium_crtc->dc_hw_disable(crtc); + } + + static void phytium_crtc_update_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, +@@ -577,15 +558,15 @@ phytium_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) + { + struct drm_device *dev = crtc->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); +- int phys_pipe = phytium_crtc->phys_pipe; +- int config = 0; ++ int phys_pipe = phytium_crtc->phys_pipe, config; ++ uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + +- config = phytium_readl_reg(priv, PHYTIUM_DC_FRAMEBUFFER_CONFIG(phys_pipe)); ++ config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + if (config & FRAMEBUFFER_RESET) { + phytium_writel_reg(priv, config | FRAMEBUFFER_VALID_PENDING, +- PHYTIUM_DC_FRAMEBUFFER_CONFIG(phys_pipe)); ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + } + } + +@@ -593,19 +574,23 @@ static void phytium_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) + { + struct drm_device *dev = crtc->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + struct phytium_crtc_state *phytium_crtc_state = NULL; +- int phys_pipe = phytium_crtc->phys_pipe; +- int config; ++ int phys_pipe = phytium_crtc->phys_pipe, config; ++ uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + + DRM_DEBUG_KMS("crtc->state active:%d enable:%d\n", + crtc->state->active, crtc->state->enable); + phytium_crtc_state = to_phytium_crtc_state(crtc->state); +- config = phytium_readl_reg(priv, PHYTIUM_DC_FRAMEBUFFER_CONFIG(phys_pipe)); + ++ if (crtc->state->color_mgmt_changed) ++ phytium_crtc_gamma_set(crtc); ++ ++ config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + phytium_writel_reg(priv, config&(~FRAMEBUFFER_VALID_PENDING), +- PHYTIUM_DC_FRAMEBUFFER_CONFIG(phys_pipe)); ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); ++ + if (crtc->state->event) { + DRM_DEBUG_KMS("vblank->refcount:%d\n", + atomic_read(&dev->vblank[0].refcount)); +@@ -622,13 +607,16 @@ static void phytium_crtc_atomic_flush(struct drm_crtc *crtc, + static enum drm_mode_status + phytium_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) + { +- if (mode->crtc_clock > PIX_CLOCK_MAX) ++ struct drm_device *dev = crtc->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ ++ if (mode->crtc_clock > priv->info.crtc_clock_max) + return MODE_CLOCK_HIGH; + +- if(mode->hdisplay > HDISPLAY_END_MAX) ++ if (mode->hdisplay > priv->info.hdisplay_max) + return MODE_BAD_HVALUE; + +- if(mode->vdisplay > VDISPLAY_END_MAX) ++ if (mode->vdisplay > priv->info.vdisplay_max) + return MODE_BAD_VVALUE; + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) +@@ -646,22 +634,32 @@ static const struct drm_crtc_helper_funcs phytium_crtc_helper_funcs = { + .atomic_disable = phytium_crtc_atomic_disable, + }; + ++void phytium_crtc_resume(struct drm_device *drm_dev) ++{ ++ struct drm_crtc *crtc; ++ ++ drm_for_each_crtc(crtc, drm_dev) { ++ phytium_crtc_gamma_init(crtc); ++ } ++} ++ + int phytium_crtc_init(struct drm_device *dev, int phys_pipe) + { + struct phytium_crtc *phytium_crtc; + struct phytium_crtc_state *phytium_crtc_state; + struct phytium_plane *phytium_primary_plane = NULL; + struct phytium_plane *phytium_cursor_plane = NULL; ++ struct phytium_display_private *priv = dev->dev_private; + int ret; + + phytium_crtc = kzalloc(sizeof(*phytium_crtc), GFP_KERNEL); +- if (IS_ERR(phytium_crtc)) { ++ if (!phytium_crtc) { + ret = -ENOMEM; + goto failed_malloc_crtc; + } + + phytium_crtc_state = kzalloc(sizeof(*phytium_crtc_state), GFP_KERNEL); +- if (IS_ERR(phytium_crtc_state)) { ++ if (!phytium_crtc_state) { + ret = -ENOMEM; + goto failed_malloc_crtc_state; + } +@@ -670,6 +668,14 @@ int phytium_crtc_init(struct drm_device *dev, int phys_pipe) + phytium_crtc->base.state = &phytium_crtc_state->base; + phytium_crtc->phys_pipe = phys_pipe; + ++ if (IS_X100(priv)) { ++ phytium_crtc->dc_hw_config_pix_clock = x100_dc_hw_config_pix_clock; ++ phytium_crtc->dc_hw_disable = x100_dc_hw_disable; ++ priv->dc_reg_base[phys_pipe] = X100_DC_BASE(phys_pipe); ++ priv->dcreq_reg_base[phys_pipe] = X100_DCREQ_BASE(phys_pipe); ++ priv->address_transform_base = X100_ADDRESS_TRANSFORM_BASE; ++ } ++ + phytium_primary_plane = phytium_primary_plane_create(dev, phys_pipe); + if (IS_ERR(phytium_primary_plane)) { + ret = PTR_ERR(phytium_primary_plane); +@@ -696,6 +702,9 @@ int phytium_crtc_init(struct drm_device *dev, int phys_pipe) + } + drm_crtc_helper_add(&phytium_crtc->base, &phytium_crtc_helper_funcs); + drm_crtc_vblank_reset(&phytium_crtc->base); ++ drm_mode_crtc_set_gamma_size(&phytium_crtc->base, GAMMA_INDEX_MAX); ++ drm_crtc_enable_color_mgmt(&phytium_crtc->base, 0, false, GAMMA_INDEX_MAX); ++ phytium_crtc_gamma_init(&phytium_crtc->base); + + return 0; + +diff --git a/drivers/gpu/drm/phytium/phytium_crtc.h b/drivers/gpu/drm/phytium/phytium_crtc.h +index 9928289a36ad..125a99b42660 100644 +--- a/drivers/gpu/drm/phytium/phytium_crtc.h ++++ b/drivers/gpu/drm/phytium/phytium_crtc.h +@@ -1,15 +1,7 @@ + /* SPDX-License-Identifier: GPL-2.0 */ +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #ifndef __PHYTIUM_CRTC_H__ +@@ -29,6 +21,9 @@ struct phytium_crtc { + uint32_t dst_y; + bool scale_enable; + bool reserve[3]; ++ ++ void (*dc_hw_config_pix_clock)(struct drm_crtc *crtc, int clock); ++ void (*dc_hw_disable)(struct drm_crtc *crtc); + }; + + struct phytium_crtc_state { +@@ -38,7 +33,6 @@ struct phytium_crtc_state { + #define to_phytium_crtc(x) container_of(x, struct phytium_crtc, base) + #define to_phytium_crtc_state(x) container_of(x, struct phytium_crtc_state, base) + ++void phytium_crtc_resume(struct drm_device *drm_dev); + int phytium_crtc_init(struct drm_device *dev, int pipe); + #endif /* __PHYTIUM_CRTC_H__ */ +- +- +diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.c b/drivers/gpu/drm/phytium/phytium_debugfs.c +index 7fa58b5a06e7..b38deafcf874 100644 +--- a/drivers/gpu/drm/phytium/phytium_debugfs.c ++++ b/drivers/gpu/drm/phytium/phytium_debugfs.c +@@ -1,15 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0 +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #include +@@ -43,72 +35,73 @@ static int phytium_dp_register_show(struct seq_file *m, void *data) + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; +- +- seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_M_VID(port), +- phytium_readl_reg(priv, PHYTIUM_DP_M_VID(port))); +- seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_N_VID(port), +- phytium_readl_reg(priv, PHYTIUM_DP_N_VID(port))); +- seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_TRANSFER_UNIT_SIZE(port), +- phytium_readl_reg(priv, PHYTIUM_DP_TRANSFER_UNIT_SIZE(port))); +- seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_DATA_COUNT(port), +- phytium_readl_reg(priv, PHYTIUM_DP_DATA_COUNT(port))); +- seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HTOTAL(port), +- phytium_readl_reg(priv, PHYTIUM_DP_MAIN_LINK_HTOTAL(port))); +- seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HRES(port), +- phytium_readl_reg(priv, PHYTIUM_DP_MAIN_LINK_HRES(port))); +- seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSWIDTH(port), +- phytium_readl_reg(priv, PHYTIUM_DP_MAIN_LINK_HSWIDTH(port))); +- seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSTART(port), +- phytium_readl_reg(priv, PHYTIUM_DP_MAIN_LINK_HSTART(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VTOTAL(port), +- phytium_readl_reg(priv, PHYTIUM_DP_MAIN_LINK_VTOTAL(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VRES(port), +- phytium_readl_reg(priv, PHYTIUM_DP_MAIN_LINK_VRES(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSWIDTH(port), +- phytium_readl_reg(priv, PHYTIUM_DP_MAIN_LINK_VSWIDTH(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSTART(port), +- phytium_readl_reg(priv, PHYTIUM_DP_MAIN_LINK_VSTART(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_POLARITY(port), +- phytium_readl_reg(priv, PHYTIUM_DP_MAIN_LINK_POLARITY(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC0(port), +- phytium_readl_reg(priv, PHYTIUM_DP_MAIN_LINK_MISC0(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC1(port), +- phytium_readl_reg(priv, PHYTIUM_DP_MAIN_LINK_MISC1(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_USER_SYNC_POLARITY(port), +- phytium_readl_reg(priv, PHYTIUM_DP_USER_SYNC_POLARITY(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_VIDEO_STREAM_ENABLE(port), +- phytium_readl_reg(priv, PHYTIUM_DP_VIDEO_STREAM_ENABLE(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SECONDARY_STREAM_ENABLE(port), +- phytium_readl_reg(priv, PHYTIUM_DP_SECONDARY_STREAM_ENABLE(port))); ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_M_VID, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_M_VID)); ++ seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_N_VID, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_N_VID)); ++ seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_TRANSFER_UNIT_SIZE, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE)); ++ seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_DATA_COUNT, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_DATA_COUNT)); ++ seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HTOTAL, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL)); ++ seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HRES, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HRES)); ++ seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSWIDTH, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH)); ++ seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSTART, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSTART)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VTOTAL, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VRES, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VRES)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSWIDTH, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSTART, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSTART)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_POLARITY, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC0, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC1, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_USER_SYNC_POLARITY, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_VIDEO_STREAM_ENABLE, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SECONDARY_STREAM_ENABLE, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE)); + seq_puts(m, "audio:\n"); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_INPUT_SELECT(port), +- phytium_readl_reg(priv, PHYTIUM_DP_SEC_INPUT_SELECT(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DIRECT_CLKDIV(port), +- phytium_readl_reg(priv, PHYTIUM_DP_SEC_DIRECT_CLKDIV(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_COUNT(port), +- phytium_readl_reg(priv, PHYTIUM_DP_SEC_CHANNEL_COUNT(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_MAP(port), +- phytium_readl_reg(priv, PHYTIUM_DP_SEC_CHANNEL_MAP(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DATA_WINDOW(port), +- phytium_readl_reg(priv, PHYTIUM_DP_SEC_DATA_WINDOW(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_CATEGORY_CODE(port), +- phytium_readl_reg(priv, PHYTIUM_DP_SEC_CS_CATEGORY_CODE(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_MAUD(port), +- phytium_readl_reg(priv, PHYTIUM_DP_SEC_MAUD(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_NAUD(port), +- phytium_readl_reg(priv, PHYTIUM_DP_SEC_NAUD(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CLOCK_MODE(port), +- phytium_readl_reg(priv, PHYTIUM_DP_SEC_CLOCK_MODE(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_SOURCE_FORMAT(port), +- phytium_readl_reg(priv, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ(port), +- phytium_readl_reg(priv, PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY(port), +- phytium_readl_reg(priv, PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY(port))); +- seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_AUDIO_ENABLE(port), +- phytium_readl_reg(priv, PHYTIUM_DP_SEC_AUDIO_ENABLE(port))); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_INPUT_SELECT, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DIRECT_CLKDIV, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_COUNT, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_MAP, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DATA_WINDOW, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_CATEGORY_CODE, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_MAUD, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_MAUD)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_NAUD, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_NAUD)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CLOCK_MODE, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_SOURCE_FORMAT, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_AUDIO_ENABLE, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE)); + + return 0; + } +diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.h b/drivers/gpu/drm/phytium/phytium_debugfs.h +index dbf74ba52302..37ca93c18821 100644 +--- a/drivers/gpu/drm/phytium/phytium_debugfs.h ++++ b/drivers/gpu/drm/phytium/phytium_debugfs.h +@@ -1,21 +1,13 @@ + /* SPDX-License-Identifier: GPL-2.0 */ +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #ifndef __PHYTIUM_DEBUGFS_H__ + #define __PHYTIUM_DEBUGFS_H__ + +-int phytium_debugfs_register(struct phytium_display_drm_private *priv); ++int phytium_debugfs_register(struct phytium_display_private *priv); + int phytium_debugfs_connector_add(struct drm_connector *connector); + + #endif /* __PHYTIUM_DEBUGFS_H__ */ +diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.c b/drivers/gpu/drm/phytium/phytium_display_drv.c +index b5d439d08aa7..49a66740388f 100644 +--- a/drivers/gpu/drm/phytium/phytium_display_drv.c ++++ b/drivers/gpu/drm/phytium/phytium_display_drv.c +@@ -1,15 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0 +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #include +@@ -20,17 +12,14 @@ + #include + #include + #include "phytium_display_drv.h" +-#include "phytium_reg.h" + #include "phytium_plane.h" + #include "phytium_crtc.h" + #include "phytium_dp.h" + #include "phytium_gem.h" + #include "phytium_fb.h" + #include "phytium_fbdev.h" +- +-static int dc_msi_enable; +-module_param(dc_msi_enable, int, 0644); +-MODULE_PARM_DESC(dc_msi_enable, "Enable DC msi interrupt (0-disabled; 1-enabled; default-0)"); ++#include "phytium_reg.h" ++#include "phytium_pci.h" + + int dc_fake_mode_enable; + module_param(dc_fake_mode_enable, int, 0644); +@@ -48,50 +37,85 @@ int source_max_lane_count = 4; + module_param(source_max_lane_count, int, 0644); + MODULE_PARM_DESC(source_max_lane_count, "set the source lane count (1-1lane; 2-2lane; 4-4lane; default-4)"); + +-int link_dynamic_adjust = 1; ++int link_dynamic_adjust; + module_param(link_dynamic_adjust, int, 0644); + MODULE_PARM_DESC(link_dynamic_adjust, "dynamic select the train pamameter according to the display mode (0-disabled; 1-enabled; default-1)"); + +-void phytium_irq_preinstall(struct drm_device *dev) ++int phytium_wait_cmd_done(struct phytium_display_private *priv, ++ uint32_t register_offset, ++ uint32_t request_bit, ++ uint32_t reply_bit) ++{ ++ int timeout = 500, config = 0, ret = 0; ++ ++ do { ++ mdelay(1); ++ timeout--; ++ config = phytium_readl_reg(priv, 0, register_offset); ++ } while ((!(config & reply_bit)) && timeout); ++ ++ phytium_writel_reg(priv, config & (~request_bit), 0, register_offset); ++ ++ if (timeout == 0) { ++ DRM_ERROR("wait cmd reply timeout\n"); ++ ret = -EBUSY; ++ } else { ++ timeout = 500; ++ do { ++ mdelay(1); ++ timeout--; ++ config = phytium_readl_reg(priv, 0, register_offset); ++ } while ((config & reply_bit) && timeout); ++ if (timeout == 0) { ++ DRM_ERROR("clear cmd timeout\n"); ++ ret = -EBUSY; ++ } ++ } ++ mdelay(5); ++ ++ return ret; ++} ++ ++static void phytium_irq_preinstall(struct drm_device *dev) + { +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + int i, status; + + for_each_pipe_masked(priv, i) { +- status = phytium_readl_reg(priv, PHYTIUM_DC_INT_STATUS(i)); +- phytium_writel_reg(priv, INT_DISABLE, PHYTIUM_DC_INT_ENABLE(i)); ++ status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); ++ phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + } + } + + static void phytium_irq_uninstall(struct drm_device *dev) + { +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + int i, status; + + for_each_pipe_masked(priv, i) { +- status = phytium_readl_reg(priv, PHYTIUM_DC_INT_STATUS(i)); +- phytium_writel_reg(priv, INT_DISABLE, PHYTIUM_DC_INT_ENABLE(i)); ++ status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); ++ phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + } + } + + static irqreturn_t phytium_display_irq_handler(int irq, void *data) + { + struct drm_device *dev = data; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int status = 0; ++ struct phytium_display_private *priv = dev->dev_private; ++ bool enabled = 0; + int i = 0, virt_pipe = 0; + irqreturn_t ret = IRQ_NONE, ret1 = IRQ_NONE; + + for_each_pipe_masked(priv, i) { +- status = phytium_readl_reg(priv, PHYTIUM_DC_INT_STATUS(i)); +- +- if (status & INT_STATUS) { ++ enabled = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); ++ if (enabled & INT_STATUS) { + virt_pipe = phytium_get_virt_pipe(priv, i); + if (virt_pipe < 0) + return IRQ_NONE; + drm_handle_vblank(dev, virt_pipe); + ret = IRQ_HANDLED; +- phytium_writel_reg(priv, MSI_CLEAR, PHYTIUM_DCREQ_MSI_CLEAR(i)); ++ if (priv->dc_hw_clear_msi_irq) ++ priv->dc_hw_clear_msi_irq(priv, i); + } + } + +@@ -104,84 +128,29 @@ static irqreturn_t phytium_display_irq_handler(int irq, void *data) + + static int phytium_enable_vblank(struct drm_device *dev, unsigned int virt_pipe) + { +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + int phys_pipe; + + phys_pipe = phytium_get_phys_pipe(priv, virt_pipe); + if (phys_pipe < 0) + return phys_pipe; +- phytium_writel_reg(priv, INT_ENABLE, PHYTIUM_DC_INT_ENABLE(phys_pipe)); ++ ++ phytium_writel_reg(priv, INT_ENABLE, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_INT_ENABLE); + + return 0; + } + + static void phytium_disable_vblank(struct drm_device *dev, unsigned int virt_pipe) + { +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + int phys_pipe; + + phys_pipe = phytium_get_phys_pipe(priv, virt_pipe); + if (phys_pipe >= 0) +- phytium_writel_reg(priv, INT_DISABLE, PHYTIUM_DC_INT_ENABLE(phys_pipe)); ++ phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[phys_pipe], ++ PHYTIUM_DC_INT_ENABLE); + } + +-static const struct vm_operations_struct phytium_vm_ops = { +- .open = drm_gem_vm_open, +- .close = drm_gem_vm_close, +-}; +- +-static const struct drm_ioctl_desc phytium_ioctls[] = { +- /* for test, none so far */ +-}; +- +-static const struct file_operations phytium_drm_driver_fops = { +- .owner = THIS_MODULE, +- .open = drm_open, +- .release = drm_release, +- .unlocked_ioctl = drm_ioctl, +- .compat_ioctl = drm_compat_ioctl, +- .poll = drm_poll, +- .read = drm_read, +- .llseek = no_llseek, +- .mmap = phytium_gem_mmap, +-}; +- +-static struct drm_driver phytium_display_drm_driver = { +- .driver_features = DRIVER_HAVE_IRQ | +- DRIVER_IRQ_SHARED | +- DRIVER_PRIME | +- DRIVER_MODESET | +- DRIVER_ATOMIC | +- DRIVER_GEM, +- .lastclose = drm_fb_helper_lastclose, +- .irq_handler = phytium_display_irq_handler, +- .irq_preinstall = phytium_irq_preinstall, +- .irq_uninstall = phytium_irq_uninstall, +- .enable_vblank = phytium_enable_vblank, +- .disable_vblank = phytium_disable_vblank, +- .gem_free_object = phytium_gem_free_object, +- .gem_vm_ops = &phytium_vm_ops, +- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, +- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, +- .gem_prime_export = drm_gem_prime_export, +- .gem_prime_import = drm_gem_prime_import, +- .gem_prime_get_sg_table = phytium_gem_prime_get_sg_table, +- .gem_prime_import_sg_table = phytium_gem_prime_import_sg_table, +- .gem_prime_vmap = phytium_gem_prime_vmap, +- .gem_prime_vunmap = phytium_gem_prime_vunmap, +- .gem_prime_mmap = phytium_gem_prime_mmap, +- .dumb_create = phytium_gem_dumb_create, +- .dumb_destroy = phytium_gem_dumb_destroy, +- .ioctls = phytium_ioctls, +- .num_ioctls = ARRAY_SIZE(phytium_ioctls), +- .fops = &phytium_drm_driver_fops, +- .name = DRV_NAME, +- .desc = DRV_DESC, +- .date = DRV_DATE, +- .major = DRV_MAJOR, +- .minor = DRV_MINOR, +-}; +- + static const struct drm_mode_config_funcs phytium_mode_funcs = { + .fb_create = phytium_fb_create, + .output_poll_changed = drm_fb_helper_output_poll_changed, +@@ -207,7 +176,7 @@ static struct drm_mode_config_helper_funcs phytium_mode_config_helpers = { + + static int phytium_modeset_init(struct drm_device *dev) + { +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + int i = 0, ret; + + drm_mode_config_init(dev); +@@ -250,7 +219,7 @@ static int phytium_modeset_init(struct drm_device *dev) + return ret; + } + +-int phytium_get_virt_pipe(struct phytium_display_drm_private *priv, int phys_pipe) ++int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe) + { + int i = 0; + int virt_pipe = 0; +@@ -266,7 +235,7 @@ int phytium_get_virt_pipe(struct phytium_display_drm_private *priv, int phys_pip + return -EINVAL; + } + +-int phytium_get_phys_pipe(struct phytium_display_drm_private *priv, int virt_pipe) ++int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe) + { + int i = 0; + int tmp = 0; +@@ -282,105 +251,10 @@ int phytium_get_phys_pipe(struct phytium_display_drm_private *priv, int virt_pip + return -EINVAL; + } + +-static int phytium_display_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++static int phytium_display_load(struct drm_device *dev, unsigned long flags) + { +- struct phytium_display_drm_private *priv = NULL; +- struct drm_device *dev = NULL; +- struct phytium_device_info *phytium_info = (struct phytium_device_info *)ent->driver_data; +- int ret = 0, i = 0, config, timeout = 100; +- resource_size_t io_addr, io_size, vram_addr, vram_size; +- +- dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); +- if (IS_ERR(dev)) { +- DRM_ERROR("failed to allocate drm_device\n"); +- return PTR_ERR(dev); +- } +- +- dev->pdev = pdev; +- pci_set_drvdata(pdev, dev); +- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); +- if (IS_ERR(priv)) { +- DRM_ERROR("no memory to allocate for drm_display_private\n"); +- goto failed_malloc_priv; +- } +- memset(priv, 0, sizeof(*priv)); +- INIT_LIST_HEAD(&priv->gem_list_head); +- spin_lock_init(&priv->hotplug_irq_lock); +- dev->dev_private = priv; +- priv->dev = dev; +- memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); +- DRM_DEBUG_KMS("priv->info.num_pipes :%d\n", priv->info.num_pipes); +- priv->info.pipe_mask = ((pdev->subsystem_device >> PIPE_MASK_SHIFT) & PIPE_MASK_MASK); +- priv->info.edp_mask = ((pdev->subsystem_device >> EDP_MASK_SHIFT) & EDP_MASK_MASK); +- priv->info.num_pipes = 0; +- for_each_pipe_masked(priv, i) +- priv->info.num_pipes++; +- if (priv->info.num_pipes == 0) { +- DRM_ERROR("num_pipes is zero, so exit init\n"); +- goto failed_init_numpipe; +- } +- +- pci_set_master(pdev); +- ret = pci_enable_device(pdev); +- if (ret) { +- DRM_ERROR("pci enbale device fail\n"); +- goto failed_enable_device; +- } +- +- if (dc_msi_enable) { +- ret = pci_enable_msi(pdev); +- if (ret) +- DRM_ERROR("pci enbale msi fail\n"); +- } +- io_addr = pci_resource_start(pdev, 0); +- io_size = pci_resource_len(pdev, 0); +- priv->regs = ioremap(io_addr, io_size); +- if (priv->regs == NULL) { +- DRM_ERROR("pci bar0 ioremap fail, addr:0x%llx, size:0x%llx\n", io_addr, io_size); +- goto failed_ioremap; +- } +- +- vram_addr = pci_resource_start(pdev, 2); +- vram_size = pci_resource_len(pdev, 2); +- +- /* reset dc/dp */ +- for_each_pipe_masked(priv, i) { +- timeout = 100; +- phytium_writel_reg(priv, PHYTIUM_DC_DP_RESET_STATUS, 0); +- phytium_writel_reg(priv, CMD_DC_DP_RESET | FLAG_REQUEST, +- PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(i)); +- do { +- mdelay(10); +- timeout--; +- config = phytium_readl_reg(priv, PHYTIUM_DC_DP_RESET_STATUS); +- if (config & DC_DP_RESET_STATUS(i)) +- break; +- } while (timeout); +- if (timeout == 0) { +- DRM_ERROR("reset dc/dp pipe(%d) failed\n", i); +- goto failed_reset; +- } +- } +- +- if ((vram_addr != 0) && (vram_size != 0)) { +- DRM_DEBUG_KMS("vram_addr:0x%llx vram_size: 0x%llx\n", vram_addr, vram_size); +- ret = dma_declare_coherent_memory(dev->dev, vram_addr, vram_addr, +- vram_size, DMA_MEMORY_EXCLUSIVE); +- if (ret) { +- DRM_ERROR("pci bar2 vram declare fail\n"); +- goto failed_declare_memory; +- } +- phytium_writel_reg(priv, (vram_addr & SRC_ADDR_MASK) >> SRC_ADDR_OFFSET, +- PHYTIUM_DC_ADDRESS_TRANSFORM_SRC_ADDR); +- phytium_writel_reg(priv, (vram_size >> SIZE_OFFSET) | ADDRESS_TRANSFORM_ENABLE, +- PHYTIUM_DC_ADDRESS_TRANSFORM_SIZE); +- config = phytium_readl_reg(priv, PHYTIUM_DC_ADDRESS_TRANSFORM_DST_ADDR); +- phytium_writel_reg(priv, config, PHYTIUM_DC_ADDRESS_TRANSFORM_DST_ADDR); +- priv->vram_support = true; +- } else { +- DRM_DEBUG_KMS("not support vram\n"); +- priv->vram_support = false; +- } ++ struct phytium_display_private *priv = dev->dev_private; ++ int ret = 0; + + ret = drm_vblank_init(dev, priv->info.num_pipes); + if (ret) { +@@ -394,116 +268,116 @@ static int phytium_display_probe(struct pci_dev *pdev, const struct pci_device_i + goto failed_modeset_init; + } + +- INIT_WORK(&priv->hotplug_work, phytium_dp_hpd_work_func); +- ret = drm_irq_install(dev, dev->pdev->irq); ++ if (priv->vram_support) ++ priv->vram_hw_init(priv); ++ ++ ret = drm_irq_install(dev, priv->irq); + if (ret) { + DRM_ERROR("install irq failed\n"); + goto failed_irq_install; + } + +- ret = drm_dev_register(dev, 0); +- if (ret) { +- DRM_ERROR("failed to register drm dev\n"); +- goto failed_register_drm; +- } +- + ret = phytium_drm_fbdev_init(dev); + if (ret) + DRM_ERROR("failed to init dev\n"); + +- phytium_dp_hpd_irq_setup(dev, true); +- +- return 0; ++ return ret; + +-failed_register_drm: +- drm_irq_uninstall(dev); + failed_irq_install: + drm_mode_config_cleanup(dev); + failed_modeset_init: + failed_vblank_init: +- dma_release_declared_memory(dev->dev); +-failed_declare_memory: +-failed_reset: +- iounmap(priv->regs); +-failed_ioremap: +- if (pdev->msi_enabled) +- pci_disable_msi(pdev); +- pci_disable_device(pdev); +-failed_enable_device: +-failed_init_numpipe: +- devm_kfree(&pdev->dev, priv); +-failed_malloc_priv: +- pci_set_drvdata(pdev, NULL); +- drm_dev_unref(dev); +- return -1; ++ return ret; + } + +-static void phytium_display_remove(struct pci_dev *pdev) ++static void phytium_display_unload(struct drm_device *dev) + { +- struct drm_device *dev = pci_get_drvdata(pdev); +- struct phytium_display_drm_private *priv = dev->dev_private; +- +- phytium_dp_hpd_irq_setup(dev, false); +- cancel_work_sync(&priv->hotplug_work); + phytium_drm_fbdev_fini(dev); +- drm_dev_unregister(dev); + drm_irq_uninstall(dev); + drm_mode_config_cleanup(dev); +- dma_release_declared_memory(dev->dev); +- iounmap(priv->regs); +- if (pdev->msi_enabled) +- pci_disable_msi(pdev); +- pci_disable_device(pdev); +- devm_kfree(&pdev->dev, priv); +- pci_set_drvdata(pdev, NULL); +- drm_dev_unref(dev); + } + +-static void phytium_display_shutdown(struct pci_dev *pdev) +-{ +- struct drm_device *dev = pci_get_drvdata(pdev); +- struct phytium_display_drm_private *priv = dev->dev_private; ++static const struct vm_operations_struct phytium_vm_ops = { ++ .open = drm_gem_vm_open, ++ .close = drm_gem_vm_close, ++}; ++ ++static const struct drm_ioctl_desc phytium_ioctls[] = { ++ /* for test, none so far */ ++}; ++ ++static const struct file_operations phytium_drm_driver_fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .unlocked_ioctl = drm_ioctl, ++ .compat_ioctl = drm_compat_ioctl, ++ .poll = drm_poll, ++ .read = drm_read, ++ .llseek = no_llseek, ++ .mmap = phytium_gem_mmap, ++}; + ++struct drm_driver phytium_display_drm_driver = { ++ .driver_features = DRIVER_HAVE_IRQ | ++ DRIVER_IRQ_SHARED | ++ DRIVER_PRIME | ++ DRIVER_MODESET | ++ DRIVER_ATOMIC | ++ DRIVER_GEM, ++ .load = phytium_display_load, ++ .unload = phytium_display_unload, ++ .lastclose = drm_fb_helper_lastclose, ++ .irq_handler = phytium_display_irq_handler, ++ .irq_preinstall = phytium_irq_preinstall, ++ .irq_uninstall = phytium_irq_uninstall, ++ .enable_vblank = phytium_enable_vblank, ++ .disable_vblank = phytium_disable_vblank, ++ .gem_free_object = phytium_gem_free_object, ++ .gem_vm_ops = &phytium_vm_ops, ++ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, ++ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, ++ .gem_prime_export = drm_gem_prime_export, ++ .gem_prime_import = drm_gem_prime_import, ++ .gem_prime_get_sg_table = phytium_gem_prime_get_sg_table, ++ .gem_prime_import_sg_table = phytium_gem_prime_import_sg_table, ++ .gem_prime_vmap = phytium_gem_prime_vmap, ++ .gem_prime_vunmap = phytium_gem_prime_vunmap, ++ .gem_prime_mmap = phytium_gem_prime_mmap, ++ .dumb_create = phytium_gem_dumb_create, ++ .dumb_destroy = phytium_gem_dumb_destroy, ++ .ioctls = phytium_ioctls, ++ .num_ioctls = ARRAY_SIZE(phytium_ioctls), ++ .fops = &phytium_drm_driver_fops, ++ .name = DRV_NAME, ++ .desc = DRV_DESC, ++ .date = DRV_DATE, ++ .major = DRV_MAJOR, ++ .minor = DRV_MINOR, ++}; ++ ++static void phytium_display_shutdown(struct drm_device *dev) ++{ + drm_atomic_helper_shutdown(dev); +- phytium_dp_hpd_irq_setup(dev, false); +- cancel_work_sync(&priv->hotplug_work); +- phytium_drm_fbdev_fini(dev); +- drm_dev_unregister(dev); +- drm_irq_uninstall(dev); +- drm_mode_config_cleanup(dev); +- dma_release_declared_memory(dev->dev); +- iounmap(priv->regs); +- if (pdev->msi_enabled) +- pci_disable_msi(pdev); +- pci_disable_device(pdev); +- devm_kfree(&pdev->dev, priv); +- pci_set_drvdata(pdev, NULL); +- drm_dev_unref(dev); + } + +-static int phytium_display_pm_suspend(struct device *dev) ++static int phytium_display_pm_suspend(struct drm_device *dev) + { +- struct pci_dev *pdev = to_pci_dev(dev); +- struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct drm_atomic_state *state; +- struct phytium_display_drm_private *priv = drm_dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + int ret, ret1; + +- priv->save_reg[0] = phytium_readl_reg(priv, PHYTIUM_DC_ADDRESS_TRANSFORM_SRC_ADDR); +- priv->save_reg[1] = phytium_readl_reg(priv, PHYTIUM_DC_ADDRESS_TRANSFORM_SIZE); +- priv->save_reg[2] = phytium_readl_reg(priv, PHYTIUM_DC_ADDRESS_TRANSFORM_DST_ADDR); +- +- phytium_dp_hpd_irq_setup(drm_dev, false); ++ phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); +- drm_fb_helper_set_suspend_unlocked(drm_dev->fb_helper, 1); +- state = drm_atomic_helper_suspend(drm_dev); ++ drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); ++ state = drm_atomic_helper_suspend(dev); + if (IS_ERR(state)) { + DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", PTR_ERR(state)); + ret = PTR_ERR(state); + goto suspend_failed; + } +- drm_dev->mode_config.suspend_state = state; +- ret = phytium_gem_suspend(drm_dev); ++ dev->mode_config.suspend_state = state; ++ ret = phytium_gem_suspend(dev); + if (ret) { + DRM_ERROR("phytium_gem_suspend failed: %d\n", ret); + goto gem_suspend_failed; +@@ -512,102 +386,71 @@ static int phytium_display_pm_suspend(struct device *dev) + return 0; + + gem_suspend_failed: +- ret1 = drm_atomic_helper_resume(drm_dev, drm_dev->mode_config.suspend_state); ++ ret1 = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret1) + DRM_ERROR("Failed to resume (%d)\n", ret1); +- drm_dev->mode_config.suspend_state = NULL; ++ dev->mode_config.suspend_state = NULL; + suspend_failed: +- drm_fb_helper_set_suspend_unlocked(drm_dev->fb_helper, 0); +- phytium_dp_hpd_irq_setup(drm_dev, true); ++ drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); ++ phytium_dp_hpd_irq_setup(dev, true); + + return ret; + } + +-static int phytium_display_pm_resume(struct device *dev) ++static int phytium_display_pm_resume(struct drm_device *dev) + { +- struct pci_dev *pdev = to_pci_dev(dev); +- struct drm_device *drm_dev = pci_get_drvdata(pdev); +- struct phytium_display_drm_private *priv = drm_dev->dev_private; +- int ret = 0, i = 0, config, timeout = 100; +- struct phytium_dp_device *phytium_dp; +- struct drm_encoder *encoder; +- +- if (WARN_ON(!drm_dev->mode_config.suspend_state)) ++ struct phytium_display_private *priv = dev->dev_private; ++ int ret = 0; ++ ++ if (WARN_ON(!dev->mode_config.suspend_state)) + return -EINVAL; + +- /* reset dc/dp */ +- for_each_pipe_masked(priv, i) { +- timeout = 100; +- phytium_writel_reg(priv, PHYTIUM_DC_DP_RESET_STATUS, 0); +- phytium_writel_reg(priv, CMD_DC_DP_RESET | FLAG_REQUEST, +- PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(i)); +- do { +- mdelay(10); +- timeout--; +- config = phytium_readl_reg(priv, PHYTIUM_DC_DP_RESET_STATUS); +- if (config & DC_DP_RESET_STATUS(i)) +- break; +- } while (timeout); +- if (timeout == 0) { +- DRM_ERROR("reset dc/dp pipe(%d) failed\n", i); +- return -EIO; +- } +- } ++ ret = phytium_dp_resume(dev); ++ if (ret) ++ return -EIO; + +- phytium_gem_resume(drm_dev); +- phytium_writel_reg(priv, priv->save_reg[0], PHYTIUM_DC_ADDRESS_TRANSFORM_SRC_ADDR); +- phytium_writel_reg(priv, priv->save_reg[1], PHYTIUM_DC_ADDRESS_TRANSFORM_SIZE); +- phytium_writel_reg(priv, priv->save_reg[2], PHYTIUM_DC_ADDRESS_TRANSFORM_DST_ADDR); ++ phytium_crtc_resume(dev); ++ phytium_gem_resume(dev); + +- drm_for_each_encoder(encoder, drm_dev) { +- phytium_dp = encoder_to_dp_device(encoder); +- phytium_dp->funcs->dp_hw_init(phytium_dp); +- } ++ if (priv->vram_support) ++ priv->vram_hw_init(priv); + +- ret = drm_atomic_helper_resume(drm_dev, drm_dev->mode_config.suspend_state); ++ ret = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret) { + DRM_ERROR("Failed to resume (%d)\n", ret); + return ret; + } + +- drm_dev->mode_config.suspend_state = NULL; +- drm_fb_helper_set_suspend_unlocked(drm_dev->fb_helper, 0); +- phytium_dp_hpd_irq_setup(drm_dev, true); ++ dev->mode_config.suspend_state = NULL; ++ drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); ++ phytium_dp_hpd_irq_setup(dev, true); + + return 0; + } + +-static const struct dev_pm_ops phytium_display_pm_ops = { +- SET_SYSTEM_SLEEP_PM_OPS(phytium_display_pm_suspend, phytium_display_pm_resume) +-}; +- +-static const struct phytium_device_info phytium_pipe_info = { +- .total_pipes = 3, +-}; +- +-static const struct pci_device_id phytium_display_ids[] = { +- { PCI_VDEVICE(PHYTIUM, 0xdc22), (kernel_ulong_t)&phytium_pipe_info }, +- { /* End: all zeroes */ } +-}; +-MODULE_DEVICE_TABLE(pci, phytium_display_ids); +- +-static struct pci_driver phytium_display_driver = { +- .name = "phytium_display", +- .id_table = phytium_display_ids, +- .probe = phytium_display_probe, +- .remove = phytium_display_remove, +- .shutdown = phytium_display_shutdown, +- .driver.pm = &phytium_display_pm_ops, +-}; ++void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev) ++{ ++ INIT_LIST_HEAD(&priv->gem_list_head); ++ spin_lock_init(&priv->hotplug_irq_lock); ++ INIT_WORK(&priv->hotplug_work, phytium_dp_hpd_work_func); ++ priv->dev = dev; ++ priv->display_shutdown = phytium_display_shutdown; ++ priv->display_pm_suspend = phytium_display_pm_suspend; ++ priv->display_pm_resume = phytium_display_pm_resume; ++} + + static int __init phytium_display_init(void) + { +- return pci_register_driver(&phytium_display_driver); ++ int ret = 0; ++ ++ ret = pci_register_driver(&phytium_pci_driver); ++ ++ return ret; + } + + static void __exit phytium_display_exit(void) + { +- return pci_unregister_driver(&phytium_display_driver); ++ pci_unregister_driver(&phytium_pci_driver); + } + + module_init(phytium_display_init); +diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.h b/drivers/gpu/drm/phytium/phytium_display_drv.h +index 028e46da0572..9e052b805fcd 100644 +--- a/drivers/gpu/drm/phytium/phytium_display_drv.h ++++ b/drivers/gpu/drm/phytium/phytium_display_drv.h +@@ -1,15 +1,7 @@ + /* SPDX-License-Identifier: GPL-2.0 */ +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #ifndef __PHYTIUM_DISPLAY_DRV_H__ +@@ -42,23 +34,44 @@ + #define EDP_MASK_SHIFT 0x3 + #define EDP_MASK_MASK 0x7 + ++enum phytium_platform { ++ PHYTIUM_PLATFORM_UNINITIALIZED = 0, ++ PHYTIUM_PLATFORM_X100, ++}; ++ ++#define IS_PLATFORM(priv, p) ((priv)->info.platform_mask & BIT(p)) ++ ++#define IS_X100(priv) IS_PLATFORM(priv, PHYTIUM_PLATFORM_X100) ++ + struct phytium_device_info { ++ unsigned char platform_mask; + unsigned char pipe_mask; + unsigned char num_pipes; + unsigned char total_pipes; + unsigned char edp_mask; ++ unsigned int crtc_clock_max; ++ unsigned int hdisplay_max; ++ unsigned int vdisplay_max; ++ unsigned int backlight_max; ++ unsigned long address_mask; + }; + +-struct phytium_display_drm_private { ++struct phytium_display_private { + /* hw */ + void __iomem *regs; + void __iomem *vram_addr; + struct phytium_device_info info; + bool vram_support; + bool reserve[3]; ++ uint32_t dc_reg_base[3]; ++ uint32_t dcreq_reg_base[3]; ++ uint32_t dp_reg_base[3]; ++ uint32_t address_transform_base; ++ uint32_t phy_access_base[3]; + + /* drm */ + struct drm_device *dev; ++ int irq; + + /* fb_dev */ + struct drm_fb_helper fbdev_helper; +@@ -69,38 +82,47 @@ struct phytium_display_drm_private { + + struct work_struct hotplug_work; + spinlock_t hotplug_irq_lock; ++ ++ void (*vram_hw_init)(struct phytium_display_private *priv); ++ void (*display_shutdown)(struct drm_device *dev); ++ int (*display_pm_suspend)(struct drm_device *dev); ++ int (*display_pm_resume)(struct drm_device *dev); ++ ++ void (*dc_hw_clear_msi_irq)(struct phytium_display_private *priv, uint32_t phys_pipe); ++ ++ int (*dc_hw_fb_format_check)(const struct drm_mode_fb_cmd2 *mode_cmd, int count); + }; + + static inline unsigned int +-phytium_readl_reg(struct phytium_display_drm_private *priv, unsigned int offset) ++phytium_readl_reg(struct phytium_display_private *priv, uint32_t group_offset, uint32_t reg_offset) + { + unsigned int data; + +- data = readl(priv->regs + offset); ++ data = readl(priv->regs + group_offset + reg_offset); + #if DEBUG_LOG +- pr_info("Read 32'h%08x 32'h%08x\n", offset, data); ++ pr_info("Read 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); + #endif + return data; + } + + static inline void +-phytium_writel_reg(struct phytium_display_drm_private *priv, unsigned int data, +- unsigned int offset) ++phytium_writel_reg(struct phytium_display_private *priv, uint32_t data, ++ uint32_t group_offset, uint32_t reg_offset) + { + +- writel(data, priv->regs + offset); ++ writel(data, priv->regs + group_offset + reg_offset); + #if DEBUG_LOG +- pr_info("Write 32'h%08x 32'h%08x\n", offset, data); ++ pr_info("Write 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); + #endif + } + + static inline void +-phytium_writeb_reg(struct phytium_display_drm_private *priv, unsigned char data, +- unsigned int offset) ++phytium_writeb_reg(struct phytium_display_private *priv, uint8_t data, ++ uint32_t group_offset, uint32_t reg_offset) + { +- writeb(data, priv->regs + offset); ++ writeb(data, priv->regs + group_offset + reg_offset); + #if DEBUG_LOG +- pr_info("Write 32'h%08x 8'h%08x\n", offset, data); ++ pr_info("Write 32'h%08x 8'h%08x\n", group_offset + reg_offset, data); + #endif + } + +@@ -111,9 +133,15 @@ phytium_writeb_reg(struct phytium_display_drm_private *priv, unsigned char data, + for ((__p) = 0; (__p) < __dev_priv->info.total_pipes; (__p)++) \ + for_each_if((__dev_priv->info.pipe_mask) & BIT(__p)) + +-int phytium_get_virt_pipe(struct phytium_display_drm_private *priv, int phys_pipe); +-int phytium_get_phys_pipe(struct phytium_display_drm_private *priv, int virt_pipe); ++int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe); ++int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe); ++int phytium_wait_cmd_done(struct phytium_display_private *priv, ++ uint32_t register_offset, ++ uint32_t request_bit, ++ uint32_t reply_bit); ++void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev); + ++extern struct drm_driver phytium_display_drm_driver; + extern int dc_fake_mode_enable; + extern int dc_fast_training_check; + extern int num_source_rates; +diff --git a/drivers/gpu/drm/phytium/phytium_dp.c b/drivers/gpu/drm/phytium/phytium_dp.c +index 9ce50de88683..7c7284bac8ee 100644 +--- a/drivers/gpu/drm/phytium/phytium_dp.c ++++ b/drivers/gpu/drm/phytium/phytium_dp.c +@@ -1,15 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0 +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #include +@@ -21,17 +13,212 @@ + #include + #include + #include "phytium_display_drv.h" +-#include "phytium_reg.h" + #include "phytium_dp.h" + #include "phytium_debugfs.h" + #include "x100_dp.h" + #include "phytium_panel.h" ++#include "phytium_reg.h" + + static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp); + static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged); + static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp); + static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp); + ++static int phytium_rate[] = {162000, 270000, 540000, 810000}; ++ ++void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->phy_access_base[port]; ++ ++#if DEBUG_LOG ++ pr_info("phy address write: 0x%x data:0x%x\n", address, data); ++#endif ++ phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); ++ phytium_writel_reg(priv, data, group_offset, PHYTIUM_PHY_WRITE_DATA); ++ phytium_writel_reg(priv, ACCESS_WRITE, group_offset, PHYTIUM_PHY_ACCESS_CTRL); ++ udelay(10); ++} ++ ++uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->phy_access_base[port]; ++ uint32_t data; ++ ++ phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); ++ phytium_writel_reg(priv, ACCESS_READ, group_offset, PHYTIUM_PHY_ACCESS_CTRL); ++ udelay(10); ++ data = phytium_readl_reg(priv, group_offset, PHYTIUM_PHY_READ_DATA); ++#if DEBUG_LOG ++ pr_info("phy address read: 0x%x data:0x%x\n", address, data); ++#endif ++ ++ return data; ++} ++ ++static int ++phytium_dp_hw_aux_transfer_write(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ unsigned int i = 0, j = 0; ++ unsigned int cmd = 0; ++ unsigned int aux_status = 0, interrupt_status = 0; ++ unsigned char *data = msg->buffer; ++ int count_timeout = 0; ++ long ret = 0; ++ ++ for (i = 0; i < 3; i++) { ++ /* clear X100_DP_INTERRUPT_RAW_STATUS */ ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); ++ phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); ++ for (j = 0; j < msg->size; j++) ++ phytium_writeb_reg(priv, data[j], group_offset, PHYTIUM_DP_AUX_WRITE_FIFO); ++ ++ cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); ++ if (msg->size == 0) ++ cmd |= ADDRESS_ONLY; ++ else ++ cmd |= (msg->size-1) & BYTE_COUNT_MASK; ++ phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); ++ ++ count_timeout = 0; ++ do { ++ mdelay(5); ++ interrupt_status = phytium_readl_reg(priv, group_offset, ++ PHYTIUM_DP_INTERRUPT_RAW_STATUS); ++ aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); ++ if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) ++ || (interrupt_status & REPLY_TIMEOUT)) { ++ DRM_DEBUG_KMS("aux wait exit\n"); ++ break; ++ } ++ count_timeout++; ++ } while (count_timeout < 6); ++ ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); ++ if (interrupt_status & REPLY_TIMEOUT) { ++ DRM_DEBUG_KMS("aux write reply timeout\n"); ++ continue; ++ } else if (aux_status & REPLY_ERROR) { ++ DRM_DEBUG_KMS("aux write reply error\n"); ++ continue; ++ } else if (aux_status & REPLY_RECEIVED) { ++ DRM_DEBUG_KMS("aux write reply received succussful\n"); ++ break; ++ } ++ } ++ ++ if (interrupt_status & REPLY_TIMEOUT) { ++ DRM_NOTE("aux(%d) write reply timeout\n", phytium_dp->port); ++ ret = -EIO; ++ goto out; ++ } else if (aux_status & REPLY_ERROR) { ++ DRM_ERROR("aux(%d) write reply error\n", phytium_dp->port); ++ ret = -EIO; ++ goto out; ++ } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { ++ DRM_ERROR("aux(%d) write reply no response\n", phytium_dp->port); ++ ret = -EIO; ++ goto out; ++ } ++ ++ msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); ++ ret = msg->size; ++out: ++ return ret; ++} ++ ++static int ++phytium_dp_hw_aux_transfer_read(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ unsigned int i = 0; ++ unsigned int cmd = 0; ++ unsigned int aux_status = 0, interrupt_status = 0; ++ unsigned char *data = msg->buffer; ++ int count_timeout = 0; ++ long ret = 0; ++ ++ for (i = 0; i < 3; i++) { ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); ++ phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); ++ cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); ++ if (msg->size == 0) ++ cmd |= ADDRESS_ONLY; ++ else ++ cmd |= ((msg->size-1) & BYTE_COUNT_MASK); ++ phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); ++ ++ count_timeout = 0; ++ do { ++ mdelay(5); ++ interrupt_status = phytium_readl_reg(priv, group_offset, ++ PHYTIUM_DP_INTERRUPT_RAW_STATUS); ++ aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); ++ if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) ++ || (interrupt_status & REPLY_TIMEOUT)) { ++ DRM_DEBUG_KMS("aux wait exit\n"); ++ break; ++ } ++ count_timeout++; ++ } while (count_timeout < 6); ++ ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); ++ if (interrupt_status & REPLY_TIMEOUT) { ++ DRM_DEBUG_KMS("aux read reply timeout\n"); ++ continue; ++ } else if (aux_status & REPLY_ERROR) { ++ DRM_DEBUG_KMS("aux read reply error\n"); ++ continue; ++ } else if (aux_status & REPLY_RECEIVED) { ++ DRM_DEBUG_KMS("aux read reply received succussful\n"); ++ break; ++ } ++ } ++ ++ if (interrupt_status & REPLY_TIMEOUT) { ++ DRM_NOTE("aux(%d) read reply timeout\n", phytium_dp->port); ++ ret = -EIO; ++ goto out; ++ } else if (aux_status & REPLY_ERROR) { ++ DRM_ERROR("aux(%d) read reply error\n", phytium_dp->port); ++ ret = -EIO; ++ goto out; ++ } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { ++ DRM_ERROR("aux(%d) read reply no response\n", phytium_dp->port); ++ ret = -EIO; ++ goto out; ++ } ++ ++ msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); ++ ret = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA_COUNT); ++ ++ if (ret > msg->size) { ++ ret = msg->size; ++ } else if (ret != msg->size) { ++ DRM_DEBUG_KMS("aux read count error(ret:0x%lx != 0x%lx)\n", ret, msg->size); ++ ret = -EBUSY; ++ goto out; ++ } ++ ++ for (i = 0; i < ret; i++) ++ data[i] = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA); ++ ++out: ++ return ret; ++} ++ + static void phytium_get_native_mode(struct phytium_dp_device *phytium_dp) + { + struct drm_display_mode *t, *mode; +@@ -161,10 +348,7 @@ phytium_connector_mode_valid(struct drm_connector *connector, + } + + requested = mode->clock * display_info->bpc * 3 / 1000; +- if (link_dynamic_adjust) +- actual = phytium_dp->max_link_rate * phytium_dp->max_link_lane_count / 100; +- else +- actual = phytium_dp->link_rate * phytium_dp->link_lane_count / 100; ++ actual = phytium_dp->max_link_rate * phytium_dp->max_link_lane_count / 100; + actual = actual * 8 / 10; + if (requested >= actual) { + DRM_DEBUG_KMS("requested=%d, actual=%d, clock=%d\n", requested, actual, +@@ -172,7 +356,7 @@ phytium_connector_mode_valid(struct drm_connector *connector, + return MODE_CLOCK_HIGH; + } + +- if (mode->hdisplay == 1600) ++ if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) + return MODE_BAD_HVALUE; + + if ((mode->hdisplay == 1024) && (mode->clock > 78000)) +@@ -376,39 +560,545 @@ bool phytium_dp_scrambled_need_enable(unsigned char test_pattern) + } + + static void phytium_dp_hw_set_lane_setting(struct phytium_dp_device *phytium_dp, +- uint32_t link_rate, +- uint8_t train_set) ++ uint32_t link_rate, ++ uint8_t train_set) + { +- phytium_dp->funcs->dp_hw_set_lane_setting(phytium_dp, link_rate, train_set); ++ phytium_dp->funcs->dp_hw_set_phy_lane_setting(phytium_dp, link_rate, train_set); + } + + static void phytium_dp_hw_set_link(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, + uint32_t link_rate) + { +- phytium_dp->funcs->dp_hw_set_link(phytium_dp, lane_count, link_rate); +-} ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port, ret = 0, retry = 3; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ phytium_writel_reg(priv, lane_count, ++ group_offset, PHYTIUM_DP_LANE_COUNT_SET); ++ phytium_writel_reg(priv, ++ drm_dp_link_rate_to_bw_code(link_rate), ++ group_offset, PHYTIUM_DP_LINK_BW_SET); ++ ++ if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) ++ phytium_writel_reg(priv, ENHANCED_FRAME_ENABLE, ++ group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); ++ else ++ phytium_writel_reg(priv, ENHANCED_FRAME_DISABLE, ++ group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); + ++try_again: ++ ret = phytium_dp->funcs->dp_hw_set_phy_lane_and_rate(phytium_dp, lane_count, link_rate); ++ if ((ret < 0) && retry) { ++ retry--; ++ goto try_again; ++ } ++} + + static void phytium_dp_hw_set_test_pattern(struct phytium_dp_device *phytium_dp, +- uint8_t lane_count, ++ uint8_t lane_count, + uint8_t test_pattern, + uint8_t *custom_pattern, + uint32_t custom_pattern_size) + { +- phytium_dp->funcs->dp_hw_set_test_pattern(phytium_dp, lane_count, test_pattern, +- custom_pattern, custom_pattern_size); ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port, val = 0, tmp = 0, i; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ if ((test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) ++ && custom_pattern && (custom_pattern_size > 0)) { ++ val = *(int *)custom_pattern; ++ phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0); ++ val = *(int *)(custom_pattern + 4); ++ phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1); ++ val = *(short int *)(custom_pattern + 8); ++ phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2); ++ } ++ ++ if (test_pattern == PHYTIUM_PHY_TP_D10_2 || test_pattern == PHYTIUM_PHY_TP_PRBS7 ++ || test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) ++ phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, ++ PHYTIUM_DP_SCRAMBLING_DISABLE); ++ else ++ phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, ++ PHYTIUM_DP_SCRAMBLING_DISABLE); ++ ++ tmp = test_pattern - PHYTIUM_PHY_TP_NONE + TEST_PATTERN_NONE; ++ val = 0; ++ for (i = 0; i < lane_count; i++) ++ val |= (tmp << (TEST_PATTERN_LANE_SHIFT * i)); ++ phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_LINK_QUAL_PATTERN_SET); + } + + static void phytium_dp_hw_set_train_pattern(struct phytium_dp_device *phytium_dp, +- uint8_t test_pattern) ++ uint8_t train_pattern) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port, tmp = 0; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ /* Scrambling is disabled for TPS1/TPS2/3 and enabled for TPS4 */ ++ if (train_pattern == DP_TRAINING_PATTERN_4 ++ || train_pattern == DP_TRAINING_PATTERN_DISABLE) { ++ phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, ++ PHYTIUM_DP_SCRAMBLING_DISABLE); ++ phytium_writel_reg(priv, SCRAMBLER_RESET, group_offset, ++ PHYTIUM_DP_FORCE_SCRAMBLER_RESET); ++ } else { ++ phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, ++ PHYTIUM_DP_SCRAMBLING_DISABLE); ++ } ++ switch (train_pattern) { ++ case DP_TRAINING_PATTERN_DISABLE: ++ tmp = TRAINING_OFF; ++ break; ++ case DP_TRAINING_PATTERN_1: ++ tmp = TRAINING_PATTERN_1; ++ break; ++ case DP_TRAINING_PATTERN_2: ++ tmp = TRAINING_PATTERN_2; ++ break; ++ case DP_TRAINING_PATTERN_3: ++ tmp = TRAINING_PATTERN_3; ++ break; ++ case DP_TRAINING_PATTERN_4: ++ tmp = TRAINING_PATTERN_4; ++ break; ++ default: ++ tmp = TRAINING_OFF; ++ break; ++ } ++ ++ phytium_writel_reg(priv, tmp, group_offset, PHYTIUM_DP_TRAINING_PATTERN_SET); ++} ++ ++void phytium_dp_hw_enable_audio(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ int config = 0, config1, data_window = 0; ++ const struct dp_audio_n_m *n_m = NULL; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); ++ phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); ++ ++ data_window = 90*(phytium_dp->link_rate)/100 ++ *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) ++ /phytium_dp->mode.clock/4; ++ ++ phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); ++ ++ n_m = phytium_dp_audio_get_n_m(phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); ++ if (n_m == NULL) { ++ DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", ++ phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); ++ phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); ++ phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); ++ } else { ++ phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); ++ phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); ++ } ++ ++ config1 = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); ++ phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, ++ group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); ++ phytium_writel_reg(priv, config1, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); ++} ++ ++static void phytium_dp_hw_audio_shutdown(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, ++ group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); ++} ++ ++static void phytium_dp_hw_audio_digital_mute(struct phytium_dp_device *phytium_dp, bool enable) ++{ ++ struct phytium_display_private *priv = phytium_dp->dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ if (enable) ++ phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, ++ group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); ++ else ++ phytium_writel_reg(priv, SEC_AUDIO_ENABLE, ++ group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); ++} ++ ++static int ++phytium_dp_hw_audio_hw_params(struct phytium_dp_device *phytium_dp, struct audio_info audio_info) ++{ ++ struct phytium_display_private *priv = phytium_dp->dev->dev_private; ++ int port = phytium_dp->port; ++ int ret = 0, data_window = 0; ++ const struct dp_audio_n_m *n_m = NULL; ++ uint32_t fs, ws, fs_accurac; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ DRM_DEBUG_KMS("%s:set port%d sample_rate(%d) channels(%d) sample_width(%d)\n", ++ __func__, phytium_dp->port, audio_info.sample_rate, ++ audio_info.channels, audio_info.sample_width); ++ ++ phytium_writel_reg(priv, INPUT_SELECT_I2S, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT); ++ phytium_writel_reg(priv, APB_CLOCK/audio_info.sample_rate, ++ group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV); ++ phytium_writel_reg(priv, audio_info.channels & CHANNEL_MASK, ++ group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT); ++ phytium_writel_reg(priv, CHANNEL_MAP_DEFAULT, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP); ++ data_window = 90*(phytium_dp->link_rate)/100 ++ *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) ++ /phytium_dp->mode.clock/4; ++ phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); ++ phytium_writel_reg(priv, 0xb5, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE); ++ ++ phytium_writel_reg(priv, CLOCK_MODE_SYNC, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE); ++ phytium_writel_reg(priv, CS_SOURCE_FORMAT_DEFAULT, ++ group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT); ++ ++ switch (audio_info.sample_rate) { ++ case 32000: ++ fs = ORIG_FREQ_32000; ++ fs_accurac = SAMPLING_FREQ_32000; ++ break; ++ case 44100: ++ fs = ORIG_FREQ_44100; ++ fs_accurac = SAMPLING_FREQ_44100; ++ break; ++ case 48000: ++ fs = ORIG_FREQ_48000; ++ fs_accurac = SAMPLING_FREQ_48000; ++ break; ++ case 96000: ++ fs = ORIG_FREQ_96000; ++ fs_accurac = SAMPLING_FREQ_96000; ++ break; ++ case 176400: ++ fs = ORIG_FREQ_176400; ++ fs_accurac = SAMPLING_FREQ_176400; ++ break; ++ case 192000: ++ fs = ORIG_FREQ_192000; ++ fs_accurac = SAMPLING_FREQ_192000; ++ break; ++ default: ++ DRM_ERROR("dp not support sample_rate %d\n", audio_info.sample_rate); ++ goto out; ++ } ++ ++ switch (audio_info.sample_width) { ++ case 16: ++ ws = WORD_LENGTH_16; ++ break; ++ case 18: ++ ws = WORD_LENGTH_18; ++ break; ++ case 20: ++ ws = WORD_LENGTH_20; ++ break; ++ case 24: ++ ws = WORD_LENGTH_24; ++ break; ++ default: ++ DRM_ERROR("dp not support sample_width %d\n", audio_info.sample_width); ++ goto out; ++ } ++ ++ phytium_writel_reg(priv, ((fs&ORIG_FREQ_MASK)<link_rate, audio_info.sample_rate); ++ if (n_m == NULL) { ++ DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", ++ phytium_dp->link_rate, audio_info.sample_rate); ++ phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); ++ phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); ++ ++ } else { ++ phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); ++ phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); ++ } ++ phytium_writel_reg(priv, SECONDARY_STREAM_ENABLE, ++ group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); ++ phytium_dp->audio_info = audio_info; ++ ++ return 0; ++ ++out: ++ phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, ++ group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); ++ ++ return ret; ++} ++ ++void phytium_dp_hw_disable_video(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ phytium_writel_reg(priv, SST_MST_SOURCE_0_DISABLE, ++ group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); ++} ++ ++bool phytium_dp_hw_video_is_enable(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port, config; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); ++ return config ? true : false; ++} ++ ++void phytium_dp_hw_enable_video(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ phytium_writel_reg(priv, SST_MST_SOURCE_0_ENABLE, ++ group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); ++ phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); ++} ++ ++void phytium_dp_hw_config_video(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ unsigned long link_bw, date_rate = 0; ++ struct drm_display_info *display_info = &phytium_dp->connector.display_info; ++ unsigned char tu_size = 64; ++ unsigned long data_per_tu = 0; ++ int symbols_per_tu, frac_symbols_per_tu, symbol_count, udc, value; ++ ++ /* cal M/N and tu_size */ ++ phytium_writel_reg(priv, phytium_dp->mode.crtc_clock/10, group_offset, PHYTIUM_DP_M_VID); ++ phytium_writel_reg(priv, phytium_dp->link_rate/10, group_offset, PHYTIUM_DP_N_VID); ++ link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; ++ date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; ++ ++ /* mul 10 for register setting */ ++ data_per_tu = 10*tu_size * date_rate/link_bw; ++ symbols_per_tu = (data_per_tu/10)&0xff; ++ frac_symbols_per_tu = (data_per_tu%10*16/10) & 0xf; ++ phytium_writel_reg(priv, frac_symbols_per_tu<<24 | symbols_per_tu<<16 | tu_size, ++ group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE); ++ ++ symbol_count = (phytium_dp->mode.crtc_hdisplay*display_info->bpc*3 + 7)/8; ++ udc = (symbol_count + phytium_dp->link_lane_count - 1)/phytium_dp->link_lane_count; ++ phytium_writel_reg(priv, udc, group_offset, PHYTIUM_DP_DATA_COUNT); ++ ++ /* config main stream attributes */ ++ phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal, ++ group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL); ++ phytium_writel_reg(priv, phytium_dp->mode.crtc_hdisplay, ++ group_offset, PHYTIUM_DP_MAIN_LINK_HRES); ++ phytium_writel_reg(priv, ++ phytium_dp->mode.crtc_hsync_end - phytium_dp->mode.crtc_hsync_start, ++ group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH); ++ phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal - phytium_dp->mode.crtc_hsync_start, ++ group_offset, PHYTIUM_DP_MAIN_LINK_HSTART); ++ phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal, ++ group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL); ++ phytium_writel_reg(priv, phytium_dp->mode.crtc_vdisplay, ++ group_offset, PHYTIUM_DP_MAIN_LINK_VRES); ++ phytium_writel_reg(priv, ++ phytium_dp->mode.crtc_vsync_end - phytium_dp->mode.crtc_vsync_start, ++ group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH); ++ phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal - phytium_dp->mode.crtc_vsync_start, ++ group_offset, PHYTIUM_DP_MAIN_LINK_VSTART); ++ ++ value = 0; ++ if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) ++ value = value & (~HSYNC_POLARITY_LOW); ++ else ++ value = value | HSYNC_POLARITY_LOW; ++ ++ if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) ++ value = value & (~VSYNC_POLARITY_LOW); ++ else ++ value = value | VSYNC_POLARITY_LOW; ++ phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY); ++ ++ switch (display_info->bpc) { ++ case 10: ++ value = (MISC0_BIT_DEPTH_10BIT << MISC0_BIT_DEPTH_OFFSET); ++ break; ++ case 6: ++ value = (MISC0_BIT_DEPTH_6BIT << MISC0_BIT_DEPTH_OFFSET); ++ break; ++ default: ++ value = (MISC0_BIT_DEPTH_8BIT << MISC0_BIT_DEPTH_OFFSET); ++ break; ++ } ++ value |= (MISC0_COMPONENT_FORMAT_RGB << MISC0_COMPONENT_FORMAT_SHIFT) ++ | MISC0_SYNCHRONOUS_CLOCK; ++ phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0); ++ phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1); ++ ++ value = USER_ODDEVEN_POLARITY_HIGH | USER_DATA_ENABLE_POLARITY_HIGH; ++ if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) ++ value = value | USER_HSYNC_POLARITY_HIGH; ++ else ++ value = value & (~USER_HSYNC_POLARITY_HIGH); ++ if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) ++ value = value | USER_VSYNC_POLARITY_HIGH; ++ else ++ value = value & (~USER_VSYNC_POLARITY_HIGH); ++ phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY); ++} ++ ++void phytium_dp_hw_disable_output(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ phytium_writel_reg(priv, TRANSMITTER_OUTPUT_DISABLE, ++ group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); ++ phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); ++} ++ ++void phytium_dp_hw_enable_output(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); ++ phytium_writel_reg(priv, TRANSMITTER_OUTPUT_ENABLE, ++ group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); ++} ++ ++void phytium_dp_hw_enable_input_source(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ phytium_writel_reg(priv, VIRTUAL_SOURCE_0_ENABLE, ++ group_offset, PHYTIUM_INPUT_SOURCE_ENABLE); ++} ++ ++void phytium_dp_hw_disable_input_source(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ ++ phytium_writel_reg(priv, (~VIRTUAL_SOURCE_0_ENABLE)&VIRTUAL_SOURCE_0_ENABLE_MASK, ++ priv->dp_reg_base[port], PHYTIUM_INPUT_SOURCE_ENABLE); ++} ++ ++bool phytium_dp_hw_output_is_enable(struct phytium_dp_device *phytium_dp) + { +- phytium_dp->funcs->dp_hw_set_train_pattern(phytium_dp, test_pattern); ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ int config = 0; ++ ++ config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); ++ return config ? true : false; + } + + static void phytium_dp_hw_get_hpd_state(struct phytium_dp_device *phytium_dp) + { +- phytium_dp->funcs->dp_hw_get_hpd_state(phytium_dp); ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t val = 0, raw_state = 0; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ val = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_RAW_STATUS); ++ ++ /* maybe miss hpd, so used for clear PHYTIUM_DP_INTERRUPT_RAW_STATUS */ ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); ++ raw_state = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SINK_HPD_STATE); ++ if (val & HPD_EVENT) ++ phytium_dp->dp_hpd_state.hpd_event_state = true; ++ ++ if (val & HPD_IRQ) ++ phytium_dp->dp_hpd_state.hpd_irq_state = true; ++ ++ if (raw_state & HPD_CONNECT) ++ phytium_dp->dp_hpd_state.hpd_raw_state = true; ++ else ++ phytium_dp->dp_hpd_state.hpd_raw_state = false; ++} ++ ++void phytium_dp_hw_hpd_irq_setup(struct phytium_dp_device *phytium_dp, bool enable) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ phytium_dp->dp_hpd_state.hpd_irq_enable = enable; ++ if (enable) ++ phytium_writel_reg(priv, HPD_OTHER_MASK, group_offset, PHYTIUM_DP_INTERRUPT_MASK); ++ else ++ phytium_writel_reg(priv, HPD_IRQ_MASK|HPD_EVENT_MASK|HPD_OTHER_MASK, ++ group_offset, PHYTIUM_DP_INTERRUPT_MASK); ++} ++ ++int phytium_dp_hw_init(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port, ret; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ ret = phytium_dp->funcs->dp_hw_reset(phytium_dp); ++ if (ret) ++ goto out; ++ ret = phytium_dp->funcs->dp_hw_init_phy(phytium_dp); ++ if (ret) ++ goto out; ++ ++ phytium_writel_reg(priv, AUX_CLK_DIVIDER, group_offset, PHYTIUM_DP_AUX_CLK_DIVIDER); ++ phytium_dp->fast_train_support = false; ++ phytium_dp->hw_spread_enable = phytium_dp->funcs->dp_hw_spread_is_enable(phytium_dp); ++ ++out: ++ return ret; ++} ++ ++static void phytium_dp_hw_set_source_rate_and_lane_count(struct phytium_dp_device *phytium_dp) ++{ ++ phytium_dp->source_rates = phytium_rate; ++ phytium_dp->num_source_rates = num_source_rates; ++ ++ if (phytium_dp->port == 0) ++ phytium_dp->source_max_lane_count = source_max_lane_count; ++ else if (phytium_dp->port == 1) ++ phytium_dp->source_max_lane_count = source_max_lane_count; ++ else if (phytium_dp->port == 2) ++ phytium_dp->source_max_lane_count = 1; ++ else ++ phytium_dp->source_max_lane_count = 1; + } + + static int phytium_dp_dpcd_get_tp_link(struct phytium_dp_device *phytium_dp, +@@ -451,11 +1141,14 @@ static int phytium_dp_dpcd_set_link(struct phytium_dp_device *phytium_dp, + } + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_LINK_BW_SET, link_config, 2); + if (ret < 0) { +- DRM_ERROR("write dpcd DP_LINK_BW_SET fail: ret:%d\n", ret); ++ DRM_NOTE("write dpcd DP_LINK_BW_SET fail: ret:%d\n", ret); + goto failed; + } + +- link_config[0] = 0; ++ if (phytium_dp->hw_spread_enable) ++ link_config[0] = DP_SPREAD_AMP_0_5; ++ else ++ link_config[0] = 0; + link_config[1] = DP_SET_ANSI_8B10B; + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); + if (ret < 0) { +@@ -520,7 +1213,7 @@ static int phytium_dp_dpcd_set_train_pattern(struct phytium_dp_device *phytium_d + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); + if (ret < 0) { +- DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); ++ DRM_NOTE("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + +@@ -600,7 +1293,7 @@ static bool phytium_dp_link_training_clock_recovery(struct phytium_dp_device *ph + ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->link_lane_count, + phytium_dp->link_rate); + if (ret < 0) { +- DRM_ERROR("phytium_dp_dpcd_set_link failed(ret=%d)\n", ret); ++ DRM_NOTE("phytium_dp_dpcd_set_link failed(ret=%d)\n", ret); + return false; + } + +@@ -827,11 +1520,11 @@ phytium_dp_stop_link_train(struct phytium_dp_device *phytium_dp) + int ret; + + /* config source and sink's train_pattern x: DP_TRAINING_PATTERN_DISABLE */ +- phytium_dp->funcs->dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); ++ phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + if (ret < 0) { +- DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); ++ DRM_NOTE("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return ret; + } + +@@ -842,11 +1535,11 @@ int phytium_dp_start_link_train(struct phytium_dp_device *phytium_dp) + { + int ret = 0; + +- phytium_dp->funcs->dp_hw_disable_output(phytium_dp); +- phytium_dp->funcs->dp_hw_disable_input_source(phytium_dp); +- phytium_dp->funcs->dp_hw_disable_video(phytium_dp); +- phytium_dp->funcs->dp_hw_enable_input_source(phytium_dp); +- phytium_dp->funcs->dp_hw_enable_output(phytium_dp); ++ phytium_dp_hw_disable_output(phytium_dp); ++ phytium_dp_hw_disable_input_source(phytium_dp); ++ phytium_dp_hw_disable_video(phytium_dp); ++ phytium_dp_hw_enable_input_source(phytium_dp); ++ phytium_dp_hw_enable_output(phytium_dp); + phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_OFF); + phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_ON); + +@@ -858,7 +1551,7 @@ int phytium_dp_start_link_train(struct phytium_dp_device *phytium_dp) + + ret = phytium_dp_stop_link_train(phytium_dp); + if (ret < 0) { +- DRM_ERROR("phytium_dp_stop_link_train failed: ret = %d\n", ret); ++ DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + goto out; + } + +@@ -883,7 +1576,7 @@ int phytium_dp_start_link_train(struct phytium_dp_device *phytium_dp) + + ret = phytium_dp_stop_link_train(phytium_dp); + if (ret < 0) { +- DRM_ERROR("phytium_dp_stop_link_train failed: ret = %d\n", ret); ++ DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + goto out; + } + +@@ -928,7 +1621,7 @@ static bool phytium_dp_needs_link_retrain(struct phytium_dp_device *phytium_dp) + return true; + } + +- if (!phytium_dp->funcs->dp_hw_output_is_enable(phytium_dp)) { ++ if (!phytium_dp_hw_output_is_enable(phytium_dp)) { + DRM_DEBUG_KMS("check DP output enable failed\n"); + return true; + } +@@ -1065,7 +1758,7 @@ static void phytium_dp_unset_edid(struct drm_connector *connector) + { + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + +- if (!IS_ERR(phytium_dp->detect_edid)) ++ if (phytium_dp->detect_edid) + kfree(phytium_dp->detect_edid); + phytium_dp->detect_edid = NULL; + phytium_dp->has_audio = false; +@@ -1077,7 +1770,7 @@ static enum drm_connector_status phytium_dp_set_edid(struct drm_connector *conne + + phytium_dp_unset_edid(connector); + phytium_dp->detect_edid = drm_get_edid(connector, &phytium_dp->aux.ddc); +- if (IS_ERR(phytium_dp->detect_edid)) ++ if (!phytium_dp->detect_edid) + return connector_status_disconnected; + + phytium_dp->has_audio = drm_detect_monitor_audio(phytium_dp->detect_edid); +@@ -1117,7 +1810,7 @@ static int phytium_dp_long_pulse(struct drm_connector *connector, bool hpd_raw_s + DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", + phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); + +- video_enable = phytium_dp->funcs->dp_hw_video_is_enable(phytium_dp); ++ video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + phytium_dp_start_link_train(phytium_dp); + + status = phytium_dp_set_edid(connector); +@@ -1126,7 +1819,7 @@ static int phytium_dp_long_pulse(struct drm_connector *connector, bool hpd_raw_s + + if (video_enable) { + mdelay(2); +- phytium_dp->funcs->dp_hw_enable_video(phytium_dp); ++ phytium_dp_hw_enable_video(phytium_dp); + } + } + +@@ -1157,18 +1850,18 @@ static int phytium_dp_short_pulse(struct drm_connector *connector) + goto out; + } + +- video_enable = phytium_dp->funcs->dp_hw_video_is_enable(phytium_dp); ++ video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + phytium_dp_start_link_train(phytium_dp); + if (video_enable) { + mdelay(2); +- phytium_dp->funcs->dp_hw_enable_video(phytium_dp); ++ phytium_dp_hw_enable_video(phytium_dp); + } + + out: + return status; + } + +-void phytium_dp_hpd_poll_handler(struct phytium_display_drm_private *priv) ++void phytium_dp_hpd_poll_handler(struct phytium_display_private *priv) + { + struct drm_device *dev = priv->dev; + struct drm_connector_list_iter conn_iter; +@@ -1207,8 +1900,9 @@ void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable) + { + struct phytium_dp_device *phytium_dp; + struct drm_encoder *encoder; +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + bool handler = false; ++ bool hpd_raw_state_old = false; + + /* We might have missed any hotplugs that happened, so polling and handler */ + if (enable) { +@@ -1217,9 +1911,11 @@ void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable) + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (!phytium_dp->dp_hpd_state.hpd_irq_enable) { ++ hpd_raw_state_old = phytium_dp->dp_hpd_state.hpd_raw_state; + phytium_dp_hw_get_hpd_state(phytium_dp); + if (phytium_dp->dp_hpd_state.hpd_event_state +- || phytium_dp->dp_hpd_state.hpd_irq_state) { ++ || phytium_dp->dp_hpd_state.hpd_irq_state ++ || (hpd_raw_state_old != phytium_dp->dp_hpd_state.hpd_raw_state)) { + handler = true; + } + } +@@ -1231,14 +1927,14 @@ void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable) + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); +- phytium_dp->funcs->dp_hw_hpd_irq_setup(phytium_dp, enable); ++ phytium_dp_hw_hpd_irq_setup(phytium_dp, enable); + } + } + + void phytium_dp_hpd_work_func(struct work_struct *work) + { +- struct phytium_display_drm_private *priv = +- container_of(work, struct phytium_display_drm_private, hotplug_work); ++ struct phytium_display_private *priv = ++ container_of(work, struct phytium_display_private, hotplug_work); + struct drm_device *dev = priv->dev; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; +@@ -1274,7 +1970,7 @@ void phytium_dp_hpd_work_func(struct work_struct *work) + phytium_dp_hpd_irq_setup(dev, true); + } + +-irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_drm_private *priv) ++irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv) + { + struct drm_encoder *encoder = NULL; + struct phytium_dp_device *phytium_dp = NULL; +@@ -1369,7 +2065,7 @@ phytium_connector_detect(struct drm_connector *connector, bool force) + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + bool hpd_event_state, hpd_irq_state, hpd_raw_state; + struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + bool plugged = true; + + spin_lock_irq(&priv->hotplug_irq_lock); +@@ -1386,6 +2082,12 @@ phytium_connector_detect(struct drm_connector *connector, bool force) + if (hpd_irq_state) + status = phytium_dp_short_pulse(connector); + ++ if (status == connector_status_unknown) ++ status = connector_status_disconnected; ++ ++ if ((!phytium_dp->is_edp) && (!hpd_raw_state)) ++ status = connector_status_disconnected; ++ + if (connector->status != status) { + if ((status == connector_status_connected) && phytium_dp->has_audio) + plugged = true; +@@ -1489,10 +2191,7 @@ static void phytium_encoder_disable(struct drm_encoder *encoder) + if (phytium_dp->is_edp) + phytium_edp_backlight_off(phytium_dp); + +- phytium_dp->funcs->dp_hw_disable_video(phytium_dp); +- +- if (phytium_dp->has_audio) +- phytium_dp->funcs->dp_hw_disable_audio(phytium_dp); ++ phytium_dp_hw_disable_video(phytium_dp); + + mdelay(50); + +@@ -1506,9 +2205,6 @@ void phytium_dp_adjust_link_train_parameter(struct phytium_dp_device *phytium_dp + unsigned long link_bw, date_rate = 0, bs_limit, bs_request; + int rate = 0; + +- if (!link_dynamic_adjust) +- return; +- + bs_request = phytium_dp->mode.crtc_htotal/(phytium_dp->mode.crtc_clock/1000); + date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; + +@@ -1516,14 +2212,18 @@ void phytium_dp_adjust_link_train_parameter(struct phytium_dp_device *phytium_dp + bs_limit = 8192 / (phytium_dp->link_rate/1000); + link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; + rate = 10 * date_rate / link_bw; +- +- if ((bs_request < bs_limit) && rate < 10) { +- DRM_DEBUG_KMS("Try link training at Link Rate = %d, Lane count = %d\n", +- phytium_dp->link_rate, phytium_dp->link_lane_count); ++ DRM_DEBUG_KMS("adjust link rate(%d), lane count(%d)\n", ++ phytium_dp->link_rate, phytium_dp->link_lane_count); ++ DRM_DEBUG_KMS("for crtc_clock(%d) bs_request(%ld) bs_limit(%ld) rate(%d)\n", ++ phytium_dp->mode.crtc_clock, bs_request, bs_limit, rate); ++ if ((link_dynamic_adjust && (bs_request < bs_limit) && rate < 10) || ++ ((!link_dynamic_adjust) && (rate < 10))) + break; +- } + phytium_dp_get_link_train_fallback_values(phytium_dp); + } ++ ++ DRM_DEBUG_KMS("Try link training at Link Rate = %d, Lane count = %d\n", ++ phytium_dp->link_rate, phytium_dp->link_lane_count); + } + + static void phytium_encoder_enable(struct drm_encoder *encoder) +@@ -1531,7 +2231,7 @@ static void phytium_encoder_enable(struct drm_encoder *encoder) + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + int ret = 0; + +- phytium_dp->funcs->dp_hw_disable_video(phytium_dp); ++ phytium_dp_hw_disable_video(phytium_dp); + + if (phytium_dp->is_edp) { + phytium_edp_panel_poweron(phytium_dp); +@@ -1547,11 +2247,11 @@ static void phytium_encoder_enable(struct drm_encoder *encoder) + mdelay(2); + } + +- phytium_dp->funcs->dp_hw_config_video(phytium_dp); ++ phytium_dp_hw_config_video(phytium_dp); + if (ret == 0) { +- phytium_dp->funcs->dp_hw_enable_video(phytium_dp); ++ phytium_dp_hw_enable_video(phytium_dp); + if (phytium_dp->has_audio) +- phytium_dp->funcs->dp_hw_enable_audio(phytium_dp); ++ phytium_dp_hw_enable_audio(phytium_dp); + } + + if (phytium_dp->is_edp) { +@@ -1620,10 +2320,10 @@ static int phytium_dp_audio_get_eld(struct device *dev, void *data, u8 *buf, siz + static int phytium_dp_audio_digital_mute(struct device *dev, void *data, bool enable) + { + struct phytium_dp_device *phytium_dp = data; +- int ret; + +- ret = phytium_dp->funcs->dp_hw_audio_digital_mute(phytium_dp); +- return ret; ++ phytium_dp_hw_audio_digital_mute(phytium_dp, enable); ++ ++ return 0; + } + + const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate) +@@ -1657,7 +2357,7 @@ static int phytium_dp_audio_hw_params(struct device *dev, void *data, + goto failed; + } + +- ret = phytium_dp->funcs->dp_hw_audio_hw_params(phytium_dp, audio_info); ++ ret = phytium_dp_hw_audio_hw_params(phytium_dp, audio_info); + + failed: + return ret; +@@ -1667,7 +2367,7 @@ static void phytium_dp_audio_shutdown(struct device *dev, void *data) + { + struct phytium_dp_device *phytium_dp = data; + +- phytium_dp->funcs->dp_hw_audio_shutdown(phytium_dp); ++ phytium_dp_hw_audio_shutdown(phytium_dp); + } + + static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged) +@@ -1677,8 +2377,8 @@ static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plu + } + + static int phytium_dp_audio_hook_plugged_cb(struct device *dev, void *data, +- hdmi_codec_plugged_cb fn, +- struct device *codec_dev) ++ hdmi_codec_plugged_cb fn, ++ struct device *codec_dev) + { + struct phytium_dp_device *phytium_dp = data; + bool plugged; +@@ -1722,151 +2422,6 @@ static int phytium_dp_audio_codec_init(struct phytium_dp_device *phytium_dp) + return PTR_ERR_OR_ZERO(phytium_dp->audio_pdev); + } + +-static int +-phytium_dp_aux_transfer_write(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- unsigned int i = 0; +- unsigned int cmd = 0; +- unsigned int aux_status = 0, interrupt_status = 0; +- unsigned char *data = msg->buffer; +- int count_timeout = 0; +- long ret = 0; +- +- for (i = 0; i < 3; i++) { +- /* clear PHYTIUM_DP_INTERRUPT_RAW_STATUS */ +- phytium_readl_reg(priv, PHYTIUM_DP_INTERRUPT_STATUS(port)); +- phytium_writel_reg(priv, msg->address, PHYTIUM_DP_AUX_ADDRESS(port)); +- for (i = 0; i < msg->size; i++) +- phytium_writeb_reg(priv, data[i], PHYTIUM_DP_AUX_WRITE_FIFO(port)); +- +- cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); +- if (msg->size == 0) +- cmd |= ADDRESS_ONLY; +- else +- cmd |= (msg->size-1) & BYTE_COUNT_MASK; +- phytium_writel_reg(priv, cmd, PHYTIUM_DP_AUX_COMMAND(port)); +- +- do { +- mdelay(5); +- interrupt_status = phytium_readl_reg(priv, +- PHYTIUM_DP_INTERRUPT_RAW_STATUS(port)); +- aux_status = phytium_readl_reg(priv, PHYTIUM_DP_AUX_STATUS(port)); +- if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) +- || (interrupt_status & REPLY_TIMEOUT)) { +- DRM_DEBUG_KMS("aux wait exit\n"); +- break; +- } +- count_timeout++; +- } while (count_timeout < 6); +- +- phytium_readl_reg(priv, PHYTIUM_DP_INTERRUPT_STATUS(port)); +- if (interrupt_status & REPLY_TIMEOUT) { +- DRM_DEBUG_KMS("aux write reply timeout\n"); +- continue; +- } else if (aux_status & REPLY_ERROR) { +- DRM_DEBUG_KMS("aux write reply error\n"); +- continue; +- } else if (aux_status & REPLY_RECEIVED) { +- DRM_DEBUG_KMS("aux write reply received succussful\n"); +- break; +- } +- } +- +- if (interrupt_status & REPLY_TIMEOUT) { +- DRM_ERROR("aux(%d) write reply timeout\n", phytium_dp->port); +- ret = -EIO; +- goto out; +- } else if (aux_status & REPLY_ERROR) { +- DRM_ERROR("aux(%d) write reply error\n", phytium_dp->port); +- ret = -EIO; +- goto out; +- } +- msg->reply = phytium_readl_reg(priv, PHYTIUM_DP_AUX_REPLY_CODE(port)); +- ret = msg->size; +-out: +- return ret; +-} +- +-static int +-phytium_dp_aux_transfer_read(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- unsigned int i = 0; +- unsigned int cmd = 0; +- unsigned int aux_status = 0, interrupt_status = 0; +- unsigned char *data = msg->buffer; +- int count_timeout = 0; +- long ret = 0; +- +- for (i = 0; i < 3; i++) { +- phytium_readl_reg(priv, PHYTIUM_DP_INTERRUPT_STATUS(port)); +- phytium_writel_reg(priv, msg->address, PHYTIUM_DP_AUX_ADDRESS(port)); +- cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); +- if (msg->size == 0) +- cmd |= ADDRESS_ONLY; +- else +- cmd |= ((msg->size-1) & BYTE_COUNT_MASK); +- phytium_writel_reg(priv, cmd, PHYTIUM_DP_AUX_COMMAND(port)); +- +- do { +- mdelay(5); +- interrupt_status = phytium_readl_reg(priv, +- PHYTIUM_DP_INTERRUPT_RAW_STATUS(port)); +- aux_status = phytium_readl_reg(priv, PHYTIUM_DP_AUX_STATUS(port)); +- if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) +- || (interrupt_status & REPLY_TIMEOUT)) { +- DRM_DEBUG_KMS("aux wait exit\n"); +- break; +- } +- count_timeout++; +- } while (count_timeout < 6); +- +- phytium_readl_reg(priv, PHYTIUM_DP_INTERRUPT_STATUS(port)); +- if (interrupt_status & REPLY_TIMEOUT) { +- DRM_DEBUG_KMS("aux read reply timeout\n"); +- continue; +- } else if (aux_status & REPLY_ERROR) { +- DRM_DEBUG_KMS("aux read reply error\n"); +- continue; +- } else if (aux_status & REPLY_RECEIVED) { +- DRM_DEBUG_KMS("aux read reply received succussful\n"); +- break; +- } +- } +- +- if (interrupt_status & REPLY_TIMEOUT) { +- DRM_ERROR("aux(%d) read reply timeout\n", phytium_dp->port); +- ret = -EIO; +- goto out; +- } else if (aux_status & REPLY_ERROR) { +- DRM_ERROR("aux(%d) read reply error\n", phytium_dp->port); +- ret = -EIO; +- goto out; +- } +- +- msg->reply = phytium_readl_reg(priv, PHYTIUM_DP_AUX_REPLY_CODE(port)); +- ret = phytium_readl_reg(priv, PHYTIUM_DP_AUX_REPLY_DATA_COUNT(port)); +- +- if (ret > msg->size) { +- ret = msg->size; +- } else if (ret != msg->size) { +- DRM_DEBUG_KMS("aux read count error(ret:0x%lx != 0x%lx)\n", ret, msg->size); +- ret = -EBUSY; +- goto out; +- } +- +- for (i = 0; i < ret; i++) +- data[i] = phytium_readl_reg(priv, PHYTIUM_DP_AUX_REPLY_DATA(port)); +- +-out: +- return ret; +-} +- + static long phytium_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) + { + struct phytium_dp_device *phytium_dp = container_of(aux, struct phytium_dp_device, aux); +@@ -1881,12 +2436,12 @@ static long phytium_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_ms + case DP_AUX_NATIVE_WRITE: + case DP_AUX_I2C_WRITE: + case DP_AUX_I2C_WRITE_STATUS_UPDATE: +- ret = phytium_dp_aux_transfer_write(phytium_dp, msg); ++ ret = phytium_dp_hw_aux_transfer_write(phytium_dp, msg); + DRM_DEBUG_KMS("aux write reply:0x%x ret:0x%lx\n", msg->reply, ret); + break; + case DP_AUX_NATIVE_READ: + case DP_AUX_I2C_READ: +- ret = phytium_dp_aux_transfer_read(phytium_dp, msg); ++ ret = phytium_dp_hw_aux_transfer_read(phytium_dp, msg); + DRM_DEBUG_KMS("aux read ret:0x%lx\n", ret); + break; + default: +@@ -1907,7 +2462,7 @@ static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp) + int phytium_get_encoder_crtc_mask(struct phytium_dp_device *phytium_dp, int port) + { + struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + int i, mask = 0; + + for_each_pipe_masked(priv, i) { +@@ -1923,7 +2478,7 @@ int phytium_get_encoder_crtc_mask(struct phytium_dp_device *phytium_dp, int port + static bool phytium_dp_is_edp(struct phytium_dp_device *phytium_dp, int port) + { + struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + + if (priv->info.edp_mask & BIT(port)) + return true; +@@ -1936,13 +2491,6 @@ static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp) + enum drm_connector_status status; + struct drm_connector *connector = &phytium_dp->connector; + +- /* Upper limits from eDP 1.3 spec */ +- phytium_dp->panel.panel_power_up_delay = 210; /* t1_t3 */ +- phytium_dp->panel.backlight_on_delay = 50; /* t7 */ +- phytium_dp->panel.backlight_off_delay = 50; +- phytium_dp->panel.panel_power_down_delay = 500; /* t10 */ +- phytium_dp->panel.panel_power_cycle_delay = 510; /* t11 + t12 */ +- phytium_dp_panel_init_backlight_funcs(phytium_dp); + phytium_edp_panel_poweron(phytium_dp); + + status = phytium_dp_detect_dpcd(phytium_dp); +@@ -1964,24 +2512,68 @@ static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp) + return true; + } + ++int phytium_dp_resume(struct drm_device *drm_dev) ++{ ++ struct phytium_dp_device *phytium_dp; ++ struct drm_encoder *encoder; ++ int ret = 0; ++ ++ drm_for_each_encoder(encoder, drm_dev) { ++ phytium_dp = encoder_to_dp_device(encoder); ++ if (phytium_dp->is_edp) { ++ phytium_edp_backlight_off(phytium_dp); ++ phytium_edp_panel_poweroff(phytium_dp); ++ } ++ ret = phytium_dp_hw_init(phytium_dp); ++ if (ret) { ++ DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); ++ return -EIO; ++ } ++ } ++ ++ return 0; ++} + + int phytium_dp_init(struct drm_device *dev, int port) + { ++ struct phytium_display_private *priv = dev->dev_private; + struct phytium_dp_device *phytium_dp = NULL; + int ret, type; + + DRM_DEBUG_KMS("%s: port %d\n", __func__, port); + phytium_dp = kzalloc(sizeof(*phytium_dp), GFP_KERNEL); +- if (IS_ERR(phytium_dp)) { ++ if (!phytium_dp) { + ret = -ENOMEM; + goto failed_malloc_dp; + } + + phytium_dp->dev = dev; + phytium_dp->port = port; +- x100_dp_func_register(phytium_dp); ++ phytium_dp_hw_set_source_rate_and_lane_count(phytium_dp); ++ ++ if (IS_X100(priv)) { ++ x100_dp_func_register(phytium_dp); ++ priv->dp_reg_base[port] = X100_DP_BASE(port); ++ priv->phy_access_base[port] = X100_PHY_ACCESS_BASE(port); ++ } ++ ++ if (phytium_dp_is_edp(phytium_dp, port)) { ++ phytium_dp->is_edp = true; ++ type = DRM_MODE_CONNECTOR_eDP; ++ phytium_dp_panel_init_backlight_funcs(phytium_dp); ++ phytium_edp_backlight_off(phytium_dp); ++ phytium_edp_panel_poweroff(phytium_dp); ++ } else { ++ phytium_dp->is_edp = false; ++ type = DRM_MODE_CONNECTOR_DisplayPort; ++ } ++ ++ ret = phytium_dp_hw_init(phytium_dp); ++ if (ret) { ++ DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); ++ goto failed_init_dp; ++ } + +- phytium_dp->funcs->dp_hw_init(phytium_dp); + ret = drm_encoder_init(dev, &phytium_dp->encoder, + &phytium_encoder_funcs, + DRM_MODE_ENCODER_TMDS, "DP %d", port); +@@ -1994,14 +2586,6 @@ int phytium_dp_init(struct drm_device *dev, int port) + + phytium_dp->connector.dpms = DRM_MODE_DPMS_OFF; + phytium_dp->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; +- if (phytium_dp_is_edp(phytium_dp, port)) { +- phytium_dp->is_edp = true; +- type = DRM_MODE_CONNECTOR_eDP; +- } else { +- phytium_dp->is_edp = false; +- type = DRM_MODE_CONNECTOR_DisplayPort; +- } +- + ret = drm_connector_init(dev, &phytium_dp->connector, &phytium_connector_funcs, + type); + if (ret) { +@@ -2019,10 +2603,12 @@ int phytium_dp_init(struct drm_device *dev, int port) + + phytium_dp->train_retry_count = 0; + INIT_WORK(&phytium_dp->train_retry_work, phytium_dp_train_retry_work_fn); ++ drm_connector_register(&phytium_dp->connector); + + return 0; + failed_connector_init: + failed_encoder_init: ++failed_init_dp: + kfree(phytium_dp); + failed_malloc_dp: + return ret; +diff --git a/drivers/gpu/drm/phytium/phytium_dp.h b/drivers/gpu/drm/phytium/phytium_dp.h +index 81e8ed13a254..e1cf6c8483ad 100644 +--- a/drivers/gpu/drm/phytium/phytium_dp.h ++++ b/drivers/gpu/drm/phytium/phytium_dp.h +@@ -1,15 +1,7 @@ + /* SPDX-License-Identifier: GPL-2.0 */ +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #ifndef __PHYTIUM_DP_H__ +@@ -45,39 +37,20 @@ struct phytium_dp_compliance { + }; + + struct phytium_dp_func { +- void (*dp_hw_set_test_pattern)(struct phytium_dp_device *phytium_dp, uint8_t test_pattern, +- uint8_t lane_count, uint8_t *custom_pattern, +- uint32_t custom_pattern_size); +- void (*dp_hw_set_link)(struct phytium_dp_device *phytium_dp, uint8_t link_lane_count, +- uint32_t link_rate); +- void (*dp_hw_set_lane_setting)(struct phytium_dp_device *phytium_dp, +- uint32_t link_rate, uint8_t train_set); +- void (*dp_hw_set_train_pattern)(struct phytium_dp_device *phytium_dp, +- uint8_t train_pattern); +- void (*dp_hw_init)(struct phytium_dp_device *phytium_dp); +- void (*dp_hw_enable_output)(struct phytium_dp_device *phytium_dp); +- void (*dp_hw_disable_output)(struct phytium_dp_device *phytium_dp); +- bool (*dp_hw_output_is_enable)(struct phytium_dp_device *phytium_dp); +- void (*dp_hw_enable_input_source)(struct phytium_dp_device *phytium_dp); +- void (*dp_hw_disable_input_source)(struct phytium_dp_device *phytium_dp); +- void (*dp_hw_get_hpd_state)(struct phytium_dp_device *phytium_dp); +- void (*dp_hw_hpd_irq_setup)(struct phytium_dp_device *phytium_dp, bool enable); +- void (*dp_hw_disable_video)(struct phytium_dp_device *phytium_dp); +- void (*dp_hw_enable_video)(struct phytium_dp_device *phytium_dp); +- bool (*dp_hw_video_is_enable)(struct phytium_dp_device *phytium_dp); +- void (*dp_hw_config_video)(struct phytium_dp_device *phytium_dp); +- void (*dp_hw_disable_audio)(struct phytium_dp_device *phytium_dp); +- void (*dp_hw_enable_audio)(struct phytium_dp_device *phytium_dp); +- void (*dp_hw_audio_shutdown)(struct phytium_dp_device *phytium_dp); +- int (*dp_hw_audio_digital_mute)(struct phytium_dp_device *phytium_dp); +- int (*dp_hw_audio_hw_params)(struct phytium_dp_device *phytium_dp, +- struct audio_info audio_info); +- void (*dp_hw_enable_backlight)(struct phytium_dp_device *phytium_dp); +- void (*dp_hw_disable_backlight)(struct phytium_dp_device *phytium_dp); +- uint32_t (*dp_hw_get_backlight)(struct phytium_dp_device *phytium_dp); ++ int (*dp_hw_reset)(struct phytium_dp_device *phytium_dp); ++ bool (*dp_hw_spread_is_enable)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_set_backlight)(struct phytium_dp_device *phytium_dp, uint32_t level); +- void (*dp_hw_poweron_panel)(struct phytium_dp_device *phytium_dp); ++ uint32_t (*dp_hw_get_backlight)(struct phytium_dp_device *phytium_dp); ++ void (*dp_hw_disable_backlight)(struct phytium_dp_device *phytium_dp); ++ void (*dp_hw_enable_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_poweroff_panel)(struct phytium_dp_device *phytium_dp); ++ void (*dp_hw_poweron_panel)(struct phytium_dp_device *phytium_dp); ++ int (*dp_hw_init_phy)(struct phytium_dp_device *phytium_dp); ++ void (*dp_hw_set_phy_lane_setting)(struct phytium_dp_device *phytium_dp, ++ uint32_t link_rate, uint8_t train_set); ++ int (*dp_hw_set_phy_lane_and_rate)(struct phytium_dp_device *phytium_dp, ++ uint8_t link_lane_count, ++ uint32_t link_rate); + }; + + struct phytium_dp_hpd_state { +@@ -126,7 +99,8 @@ struct phytium_dp_device { + struct edid *detect_edid; + bool has_audio; + bool fast_train_support; +- bool reserve[2]; ++ bool hw_spread_enable; ++ bool reserve[1]; + struct platform_device *audio_pdev; + struct audio_info audio_info; + hdmi_codec_plugged_cb plugged_cb; +@@ -167,10 +141,13 @@ enum phytium_dpcd_phy_tp { + #define connector_to_dp_device(x) container_of(x, struct phytium_dp_device, connector) + #define panel_to_dp_device(x) container_of(x, struct phytium_dp_device, panel) + #define train_retry_to_dp_device(x) container_of(x, struct phytium_dp_device, train_retry_work) ++void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data); ++uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address); ++ + int phytium_dp_init(struct drm_device *dev, int pipe); +-void phytium_dp_hw_init(struct phytium_dp_device *phytium_dp); ++int phytium_dp_resume(struct drm_device *drm_dev); + void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable); +-irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_drm_private *priv); ++irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv); + void phytium_dp_hpd_work_func(struct work_struct *work); + const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate); + #endif /* __PHYTIUM_DP_H__ */ +diff --git a/drivers/gpu/drm/phytium/phytium_fb.c b/drivers/gpu/drm/phytium/phytium_fb.c +index 73855f1e37ec..fecca2cd3b8c 100644 +--- a/drivers/gpu/drm/phytium/phytium_fb.c ++++ b/drivers/gpu/drm/phytium/phytium_fb.c +@@ -1,21 +1,12 @@ + // SPDX-License-Identifier: GPL-2.0 +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #include + #include + #include "phytium_display_drv.h" +-#include "phytium_reg.h" + #include "phytium_fb.h" + #include "phytium_gem.h" + +@@ -59,7 +50,7 @@ phytium_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd + int ret = 0, i; + + phytium_fb = kzalloc(sizeof(*phytium_fb), GFP_KERNEL); +- if (IS_ERR(phytium_fb)) ++ if (!phytium_fb) + return ERR_PTR(-ENOMEM); + + drm_helper_mode_fill_fb_struct(dev, &phytium_fb->base, mode_cmd); +@@ -87,6 +78,7 @@ phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, + unsigned int hsub, vsub, size; + struct phytium_gem_object *phytium_gem_obj[PHYTIUM_FORMAT_MAX_PLANE] = {0}; + struct phytium_framebuffer *phytium_fb; ++ struct phytium_display_private *priv = dev->dev_private; + + hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); + vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); +@@ -96,7 +88,7 @@ phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, + + size = height * mode_cmd->pitches[i] + mode_cmd->offsets[i]; + obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); +- if (IS_ERR(obj)) { ++ if (!obj) { + DRM_ERROR("Failed to lookup GEM object\n"); + ret = -ENXIO; + goto error; +@@ -109,69 +101,10 @@ phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, + } + + phytium_gem_obj[i] = to_phytium_gem_obj(obj); +- switch (mode_cmd->modifier[i]) { +- case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC: +- switch (mode_cmd->pixel_format) { +- case DRM_FORMAT_ARGB4444: +- case DRM_FORMAT_ABGR4444: +- case DRM_FORMAT_RGBA4444: +- case DRM_FORMAT_BGRA4444: +- case DRM_FORMAT_XRGB4444: +- case DRM_FORMAT_XBGR4444: +- case DRM_FORMAT_RGBX4444: +- case DRM_FORMAT_BGRX4444: +- case DRM_FORMAT_ARGB1555: +- case DRM_FORMAT_ABGR1555: +- case DRM_FORMAT_RGBA5551: +- case DRM_FORMAT_BGRA5551: +- case DRM_FORMAT_XRGB1555: +- case DRM_FORMAT_XBGR1555: +- case DRM_FORMAT_RGBX5551: +- case DRM_FORMAT_BGRX5551: +- case DRM_FORMAT_RGB565: +- case DRM_FORMAT_BGR565: +- case DRM_FORMAT_YUYV: +- case DRM_FORMAT_UYVY: +- phytium_gem_obj[i]->tiling = FRAMEBUFFER_TILE_MODE0; +- break; +- default: +- DRM_ERROR("TILE_MODE0_FBCDC not support DRM_FORMAT %d", +- mode_cmd->pixel_format); +- ret = -EINVAL; +- goto error; +- } +- break; +- case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC: +- switch (mode_cmd->pixel_format) { +- case DRM_FORMAT_ARGB2101010: +- case DRM_FORMAT_ABGR2101010: +- case DRM_FORMAT_RGBA1010102: +- case DRM_FORMAT_BGRA1010102: +- case DRM_FORMAT_ARGB8888: +- case DRM_FORMAT_ABGR8888: +- case DRM_FORMAT_RGBA8888: +- case DRM_FORMAT_BGRA8888: +- case DRM_FORMAT_XRGB8888: +- case DRM_FORMAT_XBGR8888: +- case DRM_FORMAT_RGBX8888: +- case DRM_FORMAT_BGRX8888: +- phytium_gem_obj[i]->tiling = FRAMEBUFFER_TILE_MODE3; +- break; +- default: +- DRM_ERROR("TILE_MODE3_FBCDC not support DRM_FORMAT %d", +- mode_cmd->pixel_format); +- ret = -EINVAL; +- goto error; +- } +- break; +- case DRM_FORMAT_MOD_LINEAR: +- phytium_gem_obj[i]->tiling = FRAMEBUFFER_LINEAR; +- break; +- default: +- DRM_ERROR("unsupported fb modifier 0x%llx\n", mode_cmd->modifier[0]); +- ret = -EINVAL; ++ ++ ret = priv->dc_hw_fb_format_check(mode_cmd, i); ++ if (ret < 0) + goto error; +- } + } + + phytium_fb = phytium_fb_alloc(dev, mode_cmd, phytium_gem_obj, i); +diff --git a/drivers/gpu/drm/phytium/phytium_fb.h b/drivers/gpu/drm/phytium/phytium_fb.h +index 7913d3364f21..c11c6c009b13 100644 +--- a/drivers/gpu/drm/phytium/phytium_fb.h ++++ b/drivers/gpu/drm/phytium/phytium_fb.h +@@ -1,15 +1,7 @@ + /* SPDX-License-Identifier: GPL-2.0 */ +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #ifndef __PHYTIUM_FB_H__ +diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.c b/drivers/gpu/drm/phytium/phytium_fbdev.c +index f929ab418923..8eb16b3d7c70 100644 +--- a/drivers/gpu/drm/phytium/phytium_fbdev.c ++++ b/drivers/gpu/drm/phytium/phytium_fbdev.c +@@ -1,15 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0 +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #include +@@ -23,12 +15,12 @@ + + + #define PHYTIUM_MAX_CONNECTOR 1 +-#define helper_to_drm_private(x) container_of(x, struct phytium_display_drm_private, fbdev_helper) ++#define helper_to_drm_private(x) container_of(x, struct phytium_display_private, fbdev_helper) + + static int phytium_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) + { + struct drm_fb_helper *helper = info->par; +- struct phytium_display_drm_private *priv = helper_to_drm_private(helper); ++ struct phytium_display_private *priv = helper_to_drm_private(helper); + + return phytium_gem_mmap_obj(&priv->fbdev_phytium_gem->base, vma); + } +@@ -45,7 +37,7 @@ static struct fb_ops phytium_fbdev_ops = { + static int + phytium_drm_fbdev_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) + { +- struct phytium_display_drm_private *priv = helper_to_drm_private(helper); ++ struct phytium_display_private *priv = helper_to_drm_private(helper); + struct drm_device *dev = helper->dev; + unsigned int bytes_per_pixel; + struct drm_mode_fb_cmd2 mode_cmd = {0}; +@@ -70,7 +62,7 @@ phytium_drm_fbdev_create(struct drm_fb_helper *helper, struct drm_fb_helper_surf + } + + priv->fbdev_phytium_gem = phytium_gem_create_object(dev, size); +- if (priv->fbdev_phytium_gem == NULL) { ++ if (!priv->fbdev_phytium_gem) { + DRM_ERROR("failed to create gem object\n"); + return -ENOMEM; + } +@@ -121,7 +113,7 @@ static const struct drm_fb_helper_funcs phytium_drm_fb_helper_funcs = { + + int phytium_drm_fbdev_init(struct drm_device *dev) + { +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + struct drm_fb_helper *helper; + int ret; + +@@ -153,7 +145,7 @@ int phytium_drm_fbdev_init(struct drm_device *dev) + + void phytium_drm_fbdev_fini(struct drm_device *dev) + { +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + struct drm_fb_helper *helper; + + helper = &priv->fbdev_helper; +diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.h b/drivers/gpu/drm/phytium/phytium_fbdev.h +index b61f92c7d162..d291d82c2706 100644 +--- a/drivers/gpu/drm/phytium/phytium_fbdev.h ++++ b/drivers/gpu/drm/phytium/phytium_fbdev.h +@@ -1,15 +1,7 @@ + /* SPDX-License-Identifier: GPL-2.0 */ +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #ifndef _PHYTIUM_FBDEV_H +diff --git a/drivers/gpu/drm/phytium/phytium_gem.c b/drivers/gpu/drm/phytium/phytium_gem.c +index d8d6cae0dc05..bd0b85e64bbc 100644 +--- a/drivers/gpu/drm/phytium/phytium_gem.c ++++ b/drivers/gpu/drm/phytium/phytium_gem.c +@@ -1,15 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0 +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #include +@@ -19,7 +11,6 @@ + #include + #include + #include "phytium_display_drv.h" +-#include "phytium_reg.h" + #include "phytium_gem.h" + + struct sg_table * +@@ -31,7 +22,7 @@ phytium_gem_prime_get_sg_table(struct drm_gem_object *obj) + int ret; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); +- if (IS_ERR(sgt)) { ++ if (!sgt) { + DRM_DEBUG_KMS("malloc sgt fail\n"); + return ERR_PTR(-ENOMEM); + } +@@ -59,7 +50,7 @@ phytium_gem_prime_import_sg_table(struct drm_device *dev, + int ret, i; + + phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); +- if (IS_ERR(phytium_gem_obj)) { ++ if (!phytium_gem_obj) { + DRM_ERROR("failed to allocate phytium_gem_obj\n"); + ret = -ENOMEM; + goto failed_malloc; +@@ -117,7 +108,7 @@ int phytium_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vm + + int phytium_gem_suspend(struct drm_device *drm_dev) + { +- struct phytium_display_drm_private *priv = drm_dev->dev_private; ++ struct phytium_display_private *priv = drm_dev->dev_private; + struct phytium_gem_object *phytium_gem_obj = NULL; + + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { +@@ -125,7 +116,7 @@ int phytium_gem_suspend(struct drm_device *drm_dev) + continue; + + phytium_gem_obj->vaddr_save = vmalloc(phytium_gem_obj->size); +- if (phytium_gem_obj->vaddr_save == NULL) ++ if (!phytium_gem_obj->vaddr_save) + goto malloc_failed; + + memcpy(phytium_gem_obj->vaddr_save, phytium_gem_obj->vaddr, phytium_gem_obj->size); +@@ -147,7 +138,7 @@ int phytium_gem_suspend(struct drm_device *drm_dev) + + void phytium_gem_resume(struct drm_device *drm_dev) + { +- struct phytium_display_drm_private *priv = drm_dev->dev_private; ++ struct phytium_display_private *priv = drm_dev->dev_private; + struct phytium_gem_object *phytium_gem_obj = NULL; + + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { +@@ -224,11 +215,11 @@ int phytium_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, uint + struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, unsigned long size) + { + struct phytium_gem_object *phytium_gem_obj = NULL; +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + int ret = 0; + + phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); +- if (IS_ERR(phytium_gem_obj)) { ++ if (!phytium_gem_obj) { + DRM_ERROR("failed to allocate phytium_gem_obj\n"); + ret = -ENOMEM; + goto error; +@@ -242,7 +233,7 @@ struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, uns + + phytium_gem_obj->vaddr = dma_alloc_attrs(dev->dev, size, &phytium_gem_obj->iova, + GFP_KERNEL, 0); +- if (phytium_gem_obj->vaddr == NULL) { ++ if (!phytium_gem_obj->vaddr) { + DRM_ERROR("fail to allocate buffer with size %lx\n", size); + ret = -ENOMEM; + goto failed_dma_alloc; +diff --git a/drivers/gpu/drm/phytium/phytium_gem.h b/drivers/gpu/drm/phytium/phytium_gem.h +index 8930c7236549..b1d6b54ebf2f 100644 +--- a/drivers/gpu/drm/phytium/phytium_gem.h ++++ b/drivers/gpu/drm/phytium/phytium_gem.h +@@ -1,15 +1,7 @@ + /* SPDX-License-Identifier: GPL-2.0 */ +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #ifndef __PHYTIUM_GEM_H__ +@@ -23,7 +15,6 @@ struct phytium_gem_object { + void *vaddr; + unsigned long size; + struct sg_table *sgt; +- unsigned int tiling; + bool is_vram; + bool reserve[3]; + struct list_head list; +diff --git a/drivers/gpu/drm/phytium/phytium_panel.c b/drivers/gpu/drm/phytium/phytium_panel.c +index 278abea4c2d1..ed16ed15197d 100644 +--- a/drivers/gpu/drm/phytium/phytium_panel.c ++++ b/drivers/gpu/drm/phytium/phytium_panel.c +@@ -1,15 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0 +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #include +@@ -18,7 +10,6 @@ + #include + #include + #include "phytium_display_drv.h" +-#include "phytium_reg.h" + #include "phytium_dp.h" + #include "phytium_panel.h" + +@@ -201,9 +192,12 @@ static void phytium_dp_hw_disable_backlight(struct phytium_panel *panel) + + static void phytium_dp_hw_setup_backlight(struct phytium_panel *panel) + { +- panel->max = BACKLIGHT_MAX; ++ struct drm_device *dev = panel->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ ++ panel->max = priv->info.backlight_max; + panel->min = 0; +- panel->level = 10; ++ panel->level = phytium_dp_hw_get_backlight(panel); + } + + void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp) +@@ -228,6 +222,14 @@ void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp) + phytium_dp->panel.poweron = phytium_dp_hw_poweron_panel; + phytium_dp->panel.poweroff = phytium_dp_hw_poweroff_panel; + mutex_init(&phytium_dp->panel.panel_lock); ++ phytium_dp->panel.dev = phytium_dp->dev; ++ ++ /* Upper limits from eDP 1.3 spec */ ++ phytium_dp->panel.panel_power_up_delay = 210; /* t1_t3 */ ++ phytium_dp->panel.backlight_on_delay = 50; /* t7 */ ++ phytium_dp->panel.backlight_off_delay = 50; ++ phytium_dp->panel.panel_power_down_delay = 500; /* t10 */ ++ phytium_dp->panel.panel_power_cycle_delay = 510; /* t11 + t12 */ + } + + void phytium_dp_panel_release_backlight_funcs(struct phytium_dp_device *phytium_dp) +@@ -354,10 +356,13 @@ static int phytium_backlight_device_get_brightness(struct backlight_device *bd) + if (panel->get_backlight && panel->backlight_enabled) { + mutex_lock(&panel->panel_lock); + hw_level = panel->get_backlight(panel); ++ panel->level = hw_level; + mutex_unlock(&panel->panel_lock); + } + drm_modeset_unlock(&dev->mode_config.connection_mutex); + ret = phytium_scale_hw_to_user(panel, hw_level, bd->props.max_brightness); ++ DRM_DEBUG_KMS("get phytium_backlight, brightness=%d/%d\n", ++ ret, bd->props.max_brightness); + + return ret; + } +@@ -383,6 +388,8 @@ int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp) + memset(&props, 0, sizeof(props)); + props.max_brightness = PHYTIUM_MAX_BL_LEVEL; + props.type = BACKLIGHT_RAW; ++ props.brightness = phytium_scale_hw_to_user(&phytium_dp->panel, phytium_dp->panel.level, ++ props.max_brightness); + snprintf(bl_name, sizeof(bl_name), "phytium_bl%d", phytium_dp->port); + + phytium_dp->panel.bl_device = +@@ -399,7 +406,6 @@ int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp) + return -ENODEV; + } + +- phytium_dp->panel.dev = phytium_dp->dev; + DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n", + phytium_dp->connector.name); + +diff --git a/drivers/gpu/drm/phytium/phytium_panel.h b/drivers/gpu/drm/phytium/phytium_panel.h +index a55f7bb3dc8a..e2d5f068064a 100644 +--- a/drivers/gpu/drm/phytium/phytium_panel.h ++++ b/drivers/gpu/drm/phytium/phytium_panel.h +@@ -1,15 +1,7 @@ + /* SPDX-License-Identifier: GPL-2.0 */ +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #ifndef __PHYTIUM_PANEL_H__ +diff --git a/drivers/gpu/drm/phytium/phytium_pci.c b/drivers/gpu/drm/phytium/phytium_pci.c +new file mode 100644 +index 000000000000..72fe10b242dd +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_pci.c +@@ -0,0 +1,284 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#include "phytium_display_drv.h" ++#include "phytium_pci.h" ++#include "phytium_dp.h" ++#include "x100_dc.h" ++#include "x100_dp.h" ++ ++int dc_msi_enable; ++module_param(dc_msi_enable, int, 0644); ++MODULE_PARM_DESC(dc_msi_enable, "Enable DC msi interrupt (0-disabled; 1-enabled; default-0)"); ++ ++void phytium_pci_vram_hw_init(struct phytium_display_private *priv) ++{ ++ struct phytium_pci_private *pci_priv = to_pci_priv(priv); ++ ++ pci_priv->dc_hw_vram_init(priv, pci_priv->vram_addr, pci_priv->vram_size); ++} ++ ++int phytium_pci_vram_init(struct pci_dev *pdev, struct phytium_display_private *priv) ++{ ++ resource_size_t vram_addr, vram_size; ++ int ret = 0; ++ struct phytium_pci_private *pci_priv = to_pci_priv(priv); ++ ++ vram_addr = pci_resource_start(pdev, 2); ++ vram_size = pci_resource_len(pdev, 2); ++ if ((vram_addr != 0) && (vram_size != 0)) { ++ DRM_DEBUG_KMS("vram_addr:0x%llx vram_size: 0x%llx\n", vram_addr, vram_size); ++ ret = dma_declare_coherent_memory(&pdev->dev, vram_addr, vram_addr, ++ vram_size, DMA_MEMORY_EXCLUSIVE); ++ if (ret) { ++ DRM_ERROR("pci bar2 vram declare fail\n"); ++ ret = -1; ++ goto failed_declare_memory; ++ } ++ pci_priv->vram_addr = vram_addr; ++ pci_priv->vram_size = vram_size; ++ priv->vram_support = true; ++ priv->vram_hw_init = phytium_pci_vram_hw_init; ++ } else { ++ DRM_DEBUG_KMS("not support vram\n"); ++ pci_priv->vram_addr = 0; ++ pci_priv->vram_size = 0; ++ priv->vram_support = false; ++ priv->vram_hw_init = NULL; ++ } ++ ++failed_declare_memory: ++ return ret; ++} ++ ++void phytium_pci_vram_fini(struct pci_dev *pdev, struct phytium_display_private *priv) ++{ ++ if (priv->vram_support) ++ dma_release_declared_memory(&pdev->dev); ++} ++ ++static struct phytium_display_private* ++phytium_pci_private_init(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ struct drm_device *dev = pci_get_drvdata(pdev); ++ struct phytium_display_private *priv = NULL; ++ struct phytium_pci_private *pci_priv = NULL; ++ struct phytium_device_info *phytium_info = (struct phytium_device_info *)ent->driver_data; ++ int i = 0; ++ resource_size_t io_addr, io_size; ++ ++ pci_priv = devm_kzalloc(&pdev->dev, sizeof(*pci_priv), GFP_KERNEL); ++ if (!pci_priv) { ++ DRM_ERROR("no memory to allocate for drm_display_private\n"); ++ goto failed_malloc_priv; ++ } ++ ++ memset(pci_priv, 0, sizeof(*pci_priv)); ++ priv = &pci_priv->base; ++ phytium_display_private_init(priv, dev); ++ ++ memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); ++ DRM_DEBUG_KMS("priv->info.num_pipes :%d\n", priv->info.num_pipes); ++ priv->info.pipe_mask = ((pdev->subsystem_device >> PIPE_MASK_SHIFT) & PIPE_MASK_MASK); ++ priv->info.edp_mask = ((pdev->subsystem_device >> EDP_MASK_SHIFT) & EDP_MASK_MASK); ++ priv->info.num_pipes = 0; ++ for_each_pipe_masked(priv, i) ++ priv->info.num_pipes++; ++ if (priv->info.num_pipes == 0) { ++ DRM_ERROR("num_pipes is zero, so exit init\n"); ++ goto failed_init_numpipe; ++ } ++ ++ io_addr = pci_resource_start(pdev, 0); ++ io_size = pci_resource_len(pdev, 0); ++ priv->regs = ioremap(io_addr, io_size); ++ if (priv->regs == NULL) { ++ DRM_ERROR("pci bar0 ioremap fail, addr:0x%llx, size:0x%llx\n", io_addr, io_size); ++ goto failed_ioremap; ++ } ++ ++ priv->irq = pdev->irq; ++ if (IS_X100(priv)) { ++ pci_priv->dc_hw_vram_init = x100_dc_hw_vram_init; ++ priv->dc_hw_clear_msi_irq = x100_dc_hw_clear_msi_irq; ++ priv->dc_hw_fb_format_check = x100_dc_hw_fb_format_check; ++ } ++ ++ return priv; ++ ++failed_ioremap: ++failed_init_numpipe: ++ devm_kfree(&pdev->dev, pci_priv); ++failed_malloc_priv: ++ return NULL; ++} ++ ++static void ++phytium_pci_private_fini(struct pci_dev *pdev, struct phytium_display_private *priv) ++{ ++ struct phytium_pci_private *pci_priv = to_pci_priv(priv); ++ ++ if (priv->regs) ++ iounmap(priv->regs); ++ ++ devm_kfree(&pdev->dev, pci_priv); ++} ++ ++static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ struct phytium_display_private *priv = NULL; ++ struct drm_device *dev = NULL; ++ int ret = 0; ++ ++ dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); ++ if (IS_ERR(dev)) { ++ DRM_ERROR("failed to allocate drm_device\n"); ++ return PTR_ERR(dev); ++ } ++ dev->pdev = pdev; ++ pci_set_drvdata(pdev, dev); ++ pci_set_master(pdev); ++ ret = pci_enable_device(pdev); ++ if (ret) { ++ DRM_ERROR("pci enbale device fail\n"); ++ goto failed_enable_device; ++ } ++ ++ if (dc_msi_enable) { ++ ret = pci_enable_msi(pdev); ++ if (ret) ++ DRM_ERROR("pci enbale msi fail\n"); ++ } ++ ++ dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); ++ ++ priv = phytium_pci_private_init(pdev, ent); ++ if (priv) ++ dev->dev_private = priv; ++ else ++ goto failed_pci_private_init; ++ ++ ret = phytium_pci_vram_init(pdev, priv); ++ if (ret) { ++ DRM_ERROR("failed to init pci vram\n"); ++ goto failed_pci_vram_init; ++ } ++ ++ ret = drm_dev_register(dev, 0); ++ if (ret) { ++ DRM_ERROR("failed to register drm dev\n"); ++ goto failed_register_drm; ++ } ++ ++ phytium_dp_hpd_irq_setup(dev, true); ++ ++ return 0; ++ ++failed_register_drm: ++ phytium_pci_vram_fini(pdev, priv); ++failed_pci_vram_init: ++ phytium_pci_private_fini(pdev, priv); ++failed_pci_private_init: ++ if (pdev->msi_enabled) ++ pci_disable_msi(pdev); ++ pci_disable_device(pdev); ++failed_enable_device: ++ pci_set_drvdata(pdev, NULL); ++ drm_dev_unref(dev); ++ ++ return -1; ++} ++ ++static void phytium_pci_remove(struct pci_dev *pdev) ++{ ++ struct drm_device *dev = pci_get_drvdata(pdev); ++ struct phytium_display_private *priv = dev->dev_private; ++ ++ phytium_dp_hpd_irq_setup(dev, false); ++ cancel_work_sync(&priv->hotplug_work); ++ drm_dev_unregister(dev); ++ phytium_pci_vram_fini(pdev, priv); ++ phytium_pci_private_fini(pdev, priv); ++ if (pdev->msi_enabled) ++ pci_disable_msi(pdev); ++ pci_disable_device(pdev); ++ pci_set_drvdata(pdev, NULL); ++ drm_dev_unref(dev); ++} ++ ++static void phytium_pci_shutdown(struct pci_dev *pdev) ++{ ++ struct drm_device *dev = pci_get_drvdata(pdev); ++ struct phytium_display_private *priv = dev->dev_private; ++ ++ priv->display_shutdown(dev); ++} ++ ++static int phytium_pci_pm_suspend(struct device *dev) ++{ ++ struct pci_dev *pdev = to_pci_dev(dev); ++ struct drm_device *drm_dev = pci_get_drvdata(pdev); ++ struct phytium_display_private *priv = drm_dev->dev_private; ++ int ret = 0; ++ ++ ret = priv->display_pm_suspend(drm_dev); ++ if (ret < 0) ++ goto out; ++ ++ pci_save_state(pdev); ++ pci_disable_device(pdev); ++ pci_set_power_state(pdev, PCI_D3hot); ++ udelay(200); ++ ++out: ++ return ret; ++} ++ ++static int phytium_pci_pm_resume(struct device *dev) ++{ ++ struct pci_dev *pdev = to_pci_dev(dev); ++ struct drm_device *drm_dev = pci_get_drvdata(pdev); ++ struct phytium_display_private *priv = drm_dev->dev_private; ++ int ret = 0; ++ ++ pci_set_power_state(pdev, PCI_D0); ++ pci_restore_state(pdev); ++ ret = pci_enable_device(pdev); ++ if (ret) ++ return ret; ++ pci_set_master(pdev); ++ ++ return priv->display_pm_resume(drm_dev); ++} ++ ++static const struct dev_pm_ops phytium_pci_pm_ops = { ++ SET_SYSTEM_SLEEP_PM_OPS(phytium_pci_pm_suspend, phytium_pci_pm_resume) ++}; ++ ++static const struct phytium_device_info x100_info = { ++ .platform_mask = BIT(PHYTIUM_PLATFORM_X100), ++ .total_pipes = 3, ++ .crtc_clock_max = X100_DC_PIX_CLOCK_MAX, ++ .hdisplay_max = x100_DC_HDISPLAY_MAX, ++ .vdisplay_max = X100_DC_VDISPLAY_MAX, ++ .address_mask = X100_DC_ADDRESS_MASK, ++ .backlight_max = X100_DP_BACKLIGHT_MAX, ++}; ++ ++static const struct pci_device_id phytium_display_pci_ids[] = { ++ { PCI_VDEVICE(PHYTIUM, 0xdc22), (kernel_ulong_t)&x100_info }, ++ { /* End: all zeroes */ } ++}; ++MODULE_DEVICE_TABLE(pci, phytium_display_pci_ids); ++ ++struct pci_driver phytium_pci_driver = { ++ .name = "phytium_display_pci", ++ .id_table = phytium_display_pci_ids, ++ .probe = phytium_pci_probe, ++ .remove = phytium_pci_remove, ++ .shutdown = phytium_pci_shutdown, ++ .driver.pm = &phytium_pci_pm_ops, ++}; +diff --git a/drivers/gpu/drm/phytium/phytium_pci.h b/drivers/gpu/drm/phytium/phytium_pci.h +new file mode 100644 +index 000000000000..94e3a5e8e95c +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_pci.h +@@ -0,0 +1,23 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef __PHYTIUM_PCI_H__ ++#define __PHYTIUM_PCI_H__ ++ ++#include "phytium_display_drv.h" ++ ++struct phytium_pci_private { ++ struct phytium_display_private base; ++ resource_size_t vram_addr; ++ resource_size_t vram_size; ++ void (*dc_hw_vram_init)(struct phytium_display_private *priv, resource_size_t vram_addr, ++ resource_size_t vram_size); ++}; ++ ++#define to_pci_priv(priv) container_of(priv, struct phytium_pci_private, base) ++ ++extern struct pci_driver phytium_pci_driver; ++#endif /* __PHYTIUM_PCI_H__ */ +diff --git a/drivers/gpu/drm/phytium/phytium_plane.c b/drivers/gpu/drm/phytium/phytium_plane.c +index 47a57ff7264c..777bcd137293 100644 +--- a/drivers/gpu/drm/phytium/phytium_plane.c ++++ b/drivers/gpu/drm/phytium/phytium_plane.c +@@ -1,15 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0 +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #include +@@ -20,61 +12,16 @@ + #include + + #include "phytium_display_drv.h" +-#include "phytium_reg.h" + #include "phytium_plane.h" + #include "phytium_fb.h" + #include "phytium_gem.h" + #include "phytium_crtc.h" ++#include "x100_dc.h" ++#include "phytium_reg.h" + + #define PHYTIUM_CURS_W_SIZE 32 + #define PHYTIUM_CURS_H_SIZE 32 + +-static const unsigned int phytium_primary_formats[] = { +- DRM_FORMAT_ARGB2101010, +- DRM_FORMAT_ABGR2101010, +- DRM_FORMAT_RGBA1010102, +- DRM_FORMAT_BGRA1010102, +- DRM_FORMAT_ARGB8888, +- DRM_FORMAT_ABGR8888, +- DRM_FORMAT_RGBA8888, +- DRM_FORMAT_BGRA8888, +- DRM_FORMAT_XRGB8888, +- DRM_FORMAT_XBGR8888, +- DRM_FORMAT_RGBX8888, +- DRM_FORMAT_BGRX8888, +- DRM_FORMAT_ARGB4444, +- DRM_FORMAT_ABGR4444, +- DRM_FORMAT_RGBA4444, +- DRM_FORMAT_BGRA4444, +- DRM_FORMAT_XRGB4444, +- DRM_FORMAT_XBGR4444, +- DRM_FORMAT_RGBX4444, +- DRM_FORMAT_BGRX4444, +- DRM_FORMAT_ARGB1555, +- DRM_FORMAT_ABGR1555, +- DRM_FORMAT_RGBA5551, +- DRM_FORMAT_BGRA5551, +- DRM_FORMAT_XRGB1555, +- DRM_FORMAT_XBGR1555, +- DRM_FORMAT_RGBX5551, +- DRM_FORMAT_BGRX5551, +- DRM_FORMAT_RGB565, +- DRM_FORMAT_BGR565, +- DRM_FORMAT_YUYV, +- DRM_FORMAT_UYVY, +-}; +- +-static uint64_t phytium_primary_formats_modifiers[] = { +- DRM_FORMAT_MOD_LINEAR, +- DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC, +- DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC, +- DRM_FORMAT_MOD_INVALID +-}; +- +-static const unsigned int phytium_cursor_formats[] = { +- DRM_FORMAT_ARGB8888, +-}; +- + void phytium_plane_destroy(struct drm_plane *plane) + { + struct phytium_plane *phytium_plane = to_phytium_plane(plane); +@@ -134,13 +81,16 @@ phytium_plane_atomic_duplicate_state(struct drm_plane *plane) + + phytium_state = kmemdup(plane->state, sizeof(*phytium_state), GFP_KERNEL); + +- if (IS_ERR(phytium_state)) ++ if (!phytium_state) + return NULL; + + state = &phytium_state->base; + if (state->fb) + drm_framebuffer_reference(state->fb); + ++ state->fence = NULL; ++ state->commit = NULL; ++ + return state; + } + +@@ -149,8 +99,7 @@ phytium_plane_atomic_destroy_state(struct drm_plane *plane, struct drm_plane_sta + { + struct phytium_plane_state *phytium_state = to_phytium_plane_state(state); + +- if (state->fb) +- drm_framebuffer_unreference(state->fb); ++ __drm_atomic_helper_plane_destroy_state(state); + kfree(phytium_state); + } + +@@ -185,6 +134,8 @@ static int phytium_plane_prepare_fb(struct drm_plane *plane, + static int + phytium_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) + { ++ struct drm_device *dev = plane->dev; ++ struct phytium_display_private *priv = dev->dev_private; + struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = state->crtc; + struct drm_crtc_state *crtc_state; +@@ -215,8 +166,9 @@ phytium_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *stat + src_h = state->src_h >> 16; + + base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; +- if (base_offset & (~ADDRESS_MASK)) { +- DRM_ERROR("fb base address is not aligned by 128 byte\n"); ++ if (base_offset & (priv->info.address_mask)) { ++ DRM_ERROR("fb base address is not aligned by 0x%lx byte\n", ++ priv->info.address_mask); + return -EINVAL; + } + +@@ -242,48 +194,28 @@ phytium_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *stat + return 0; + } + +-static void phytium_plane_atomic_update(struct drm_plane *plane, +- struct drm_plane_state *old_state) ++static void phytium_dc_get_plane_parameter(struct drm_plane *plane) + { +- int i, num_planes = 0; +- struct drm_framebuffer *fb, *old_fb; +- struct drm_device *dev = plane->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); +- struct phytium_framebuffer *phytium_fb = NULL; +- struct phytium_gem_object *phytium_gem_obj; +- int config; +- int phys_pipe = phytium_plane->phys_pipe; +- int src_x, src_y, crtc_x, crtc_y, crtc_w, crtc_h; +- unsigned long base_offset; +- +- DRM_DEBUG_KMS("update plane: type=%d\n", plane->type); +- if (!plane->state->crtc || !plane->state->fb) +- return; +- +- src_x = plane->state->src_x >> 16; +- src_y = plane->state->src_y >> 16; +- crtc_x = plane->state->crtc_x; +- crtc_y = plane->state->crtc_y; +- crtc_w = plane->state->crtc_w; +- crtc_h = plane->state->crtc_h; +- fb = plane->state->fb; +- old_fb = old_state->fb; +- +- if (fb) { +- num_planes = drm_format_num_planes(fb->format->format); +- drm_framebuffer_reference(fb); +- } +- +- if (old_fb) +- drm_framebuffer_unreference(old_fb); ++ struct drm_framebuffer *fb = plane->state->fb; ++ struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); ++ struct phytium_gem_object *phytium_gem_obj = NULL; ++ int i, num_planes = 0; + ++ num_planes = drm_format_num_planes(fb->format->format); + for (i = 0; i < num_planes; i++) { +- phytium_fb = to_phytium_framebuffer(fb); + phytium_gem_obj = phytium_fb->phytium_gem_obj[i]; + phytium_plane->iova[i] = phytium_gem_obj->iova + fb->offsets[i]; + phytium_plane->size[i] = phytium_gem_obj->size - fb->offsets[i]; +- phytium_plane->tiling[i] = phytium_gem_obj->tiling; ++ ++ if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC) ++ phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE0; ++ else if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC) ++ phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE3; ++ else if (fb->modifier == DRM_FORMAT_MOD_LINEAR) ++ phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; ++ else ++ phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; + + if (i == 0) { + switch (fb->format->format) { +@@ -348,7 +280,15 @@ static void phytium_plane_atomic_update(struct drm_plane *plane, + case DRM_FORMAT_UYVY: + phytium_plane->format = FRAMEBUFFER_FORMAT_UYVY; + break; +- ++ case DRM_FORMAT_NV16: ++ phytium_plane->format = FRAMEBUFFER_FORMAT_NV16; ++ break; ++ case DRM_FORMAT_NV12: ++ phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; ++ break; ++ case DRM_FORMAT_NV21: ++ phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; ++ break; + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + fb->format->format); +@@ -404,6 +344,8 @@ static void phytium_plane_atomic_update(struct drm_plane *plane, + + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: ++ case DRM_FORMAT_NV16: ++ case DRM_FORMAT_NV12: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; +@@ -415,136 +357,151 @@ static void phytium_plane_atomic_update(struct drm_plane *plane, + } + } + } ++} + +- if (plane->type == DRM_PLANE_TYPE_PRIMARY) { +- if (phytium_plane->tiling[0] == FRAMEBUFFER_LINEAR) { +- phytium_writel_reg(priv, DCREQ_MODE_LINEAR, +- PHYTIUM_DCREQ_PLANE0_CONFIG(phys_pipe)); +- } else { +- config = DCREQ_NO_LOSSY; +- if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE0) +- config |= DCREQ_TILE_TYPE_MODE0; +- else if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE3) +- config |= DCREQ_TILE_TYPE_MODE3; +- else +- config |= DCREQ_TILE_TYPE_MODE0; +- +- switch (phytium_plane->format) { +- case FRAMEBUFFER_FORMAT_ARGB8888: +- case FRAMEBUFFER_FORMAT_XRGB8888: +- config |= DCREQ_COLOURFORMAT_BGRA8888; +- break; +- case FRAMEBUFFER_FORMAT_ARGB2101010: +- config |= DCREQ_COLOURFORMAT_ARGB2101010; +- break; +- case FRAMEBUFFER_FORMAT_XRGB4444: +- case FRAMEBUFFER_FORMAT_ARGB4444: +- config |= DCREQ_COLOURFORMAT_ARGB4444; +- break; +- case FRAMEBUFFER_FORMAT_XRGB1555: +- case FRAMEBUFFER_FORMAT_ARGB1555: +- config |= DCREQ_COLOURFORMAT_ARGB1555; +- break; +- case FRAMEBUFFER_FORMAT_RGB565: +- config |= DCREQ_COLOURFORMAT_RGB565; +- break; +- case FRAMEBUFFER_FORMAT_YUYV: +- config |= DCREQ_COLOURFORMAT_YUYV; +- break; +- case FRAMEBUFFER_FORMAT_UYVY: +- config |= DCREQ_COLOURFORMAT_UYVY; +- break; +- } +- config |= DCREQ_ARGBSWIZZLE_ARGB; +- config |= DCREQ_MODE_TILE; +- phytium_writel_reg(priv, phytium_plane->iova[0] & 0xffffffff, +- PHYTIUM_DCREQ_PLANE0_ADDR_START(phys_pipe)); +- phytium_writel_reg(priv, (phytium_plane->iova[0] + phytium_plane->size[0]) & +- 0xffffffff, PHYTIUM_DCREQ_PLANE0_ADDR_END(phys_pipe)); +- phytium_writel_reg(priv, config, PHYTIUM_DCREQ_PLANE0_CONFIG(phys_pipe)); +- } ++static void phytium_dc_primary_plane_update(struct drm_plane *plane) ++{ ++ struct drm_device *dev = plane->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_plane *phytium_plane = to_phytium_plane(plane); ++ struct drm_framebuffer *fb = plane->state->fb; ++ int phys_pipe = phytium_plane->phys_pipe; ++ int src_x, src_y, crtc_x, crtc_y, crtc_w, crtc_h; ++ unsigned long base_offset; ++ int config; + +- /* config dc */ +- /* Y */ +- base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; +- phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, +- PHYTIUM_DCREQ_PIX_DMA_PREFIX(phys_pipe)); +- phytium_writel_reg(priv, (phytium_plane->iova[0] + base_offset) & ADDRESS_MASK, +- PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS(phys_pipe)); +- phytium_writel_reg(priv, ALIGN(fb->pitches[0], 128), +- PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE(phys_pipe)); +- +- /* U */ +- phytium_writel_reg(priv, phytium_plane->iova[1] & 0xffffffff, +- PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS(phys_pipe)); +- phytium_writel_reg(priv, ALIGN(fb->pitches[1], 128), +- PHYTIUM_DC_FRAMEBUFFER_U_STRIDE(phys_pipe)); +- +- /* V */ +- phytium_writel_reg(priv, phytium_plane->iova[2] & 0xffffffff, +- PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS(phys_pipe)); +- phytium_writel_reg(priv, ALIGN(fb->pitches[2], 128), +- PHYTIUM_DC_FRAMEBUFFER_V_STRIDE(phys_pipe)); +- +- /* size */ +- phytium_writel_reg(priv, (crtc_w & WIDTH_MASK) | ((crtc_h&HEIGHT_MASK) +- << HEIGHT_SHIFT), PHYTIUM_DC_FRAMEBUFFER_SIZE(phys_pipe)); +- /* config */ +- config = phytium_readl_reg(priv, PHYTIUM_DC_FRAMEBUFFER_CONFIG(phys_pipe)); +- config &= ~(FRAMEBUFFER_FORMAT_MASK << FRAMEBUFFER_FORMAT_SHIFT); +- config |= (phytium_plane->format << FRAMEBUFFER_FORMAT_SHIFT); +- config &= ~(1 << FRAMEBUFFER_UVSWIZZLE_SHIFT); +- config |= (phytium_plane->uv_swizzle << FRAMEBUFFER_UVSWIZZLE_SHIFT); +- config &= ~(FRAMEBUFFER_SWIZZLE_MASK << FRAMEBUFFER_SWIZZLE_SHIFT); +- config |= (phytium_plane->swizzle << FRAMEBUFFER_SWIZZLE_SHIFT); +- config &= ~(FRAMEBUFFER_TILE_MODE_MASK << FRAMEBUFFER_TILE_MODE_SHIFT); +- config |= (phytium_plane->tiling[0] << FRAMEBUFFER_TILE_MODE_SHIFT); +- config &= (~FRAMEBUFFER_CLEAR); +- phytium_writel_reg(priv, config, PHYTIUM_DC_FRAMEBUFFER_CONFIG(phys_pipe)); +- } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { +- phytium_plane->enable = 1; +- phytium_plane->cursor_hot_x = fb->hot_x; +- phytium_plane->cursor_hot_y = fb->hot_y; +- phytium_plane->cursor_x = plane->state->crtc_x + fb->hot_x; +- phytium_plane->cursor_y = plane->state->crtc_y + fb->hot_y; +- +- config = CURSOR_FORMAT_ARGB8888 | +- ((phytium_plane->cursor_hot_y & CURSOR_HOT_Y_MASK) << CURSOR_HOT_Y_SHIFT) | +- ((phytium_plane->cursor_hot_x & CURSOR_HOT_X_MASK) << CURSOR_HOT_X_SHIFT); +- +- phytium_writel_reg(priv, config, PHYTIUM_DC_CURSOR_CONFIG(phys_pipe)); +- phytium_writel_reg(priv, +- ((phytium_plane->cursor_x & CURSOR_X_MASK) << CURSOR_X_SHIFT) | +- ((phytium_plane->cursor_y & CURSOR_Y_MASK) << CURSOR_Y_SHIFT), +- PHYTIUM_DC_CURSOR_LOCATION(phys_pipe)); +- phytium_writel_reg(priv, phytium_plane->iova[0], +- PHYTIUM_DC_CURSOR_ADDRESS(phys_pipe)); +- } ++ src_x = plane->state->src_x >> 16; ++ src_y = plane->state->src_y >> 16; ++ crtc_x = plane->state->crtc_x; ++ crtc_y = plane->state->crtc_y; ++ crtc_w = plane->state->crtc_w; ++ crtc_h = plane->state->crtc_h; ++ ++ if (phytium_plane->dc_hw_update_dcreq) ++ phytium_plane->dc_hw_update_dcreq(plane); ++ phytium_plane->dc_hw_update_primary_hi_addr(plane); ++ ++ /* config dc */ ++ /* Y */ ++ base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; ++ phytium_writel_reg(priv, (phytium_plane->iova[0] + base_offset) & ADDRESS_MASK, ++ priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS); ++ phytium_writel_reg(priv, ALIGN(fb->pitches[0], 128), ++ priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE); ++ ++ /* U */ ++ phytium_writel_reg(priv, phytium_plane->iova[1] & 0xffffffff, ++ priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS); ++ phytium_writel_reg(priv, ALIGN(fb->pitches[1], 128), ++ priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_STRIDE); ++ ++ /* V */ ++ phytium_writel_reg(priv, phytium_plane->iova[2] & 0xffffffff, ++ priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS); ++ phytium_writel_reg(priv, ALIGN(fb->pitches[2], 128), ++ priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_STRIDE); ++ ++ /* size */ ++ phytium_writel_reg(priv, (crtc_w & WIDTH_MASK) | ((crtc_h&HEIGHT_MASK) << HEIGHT_SHIFT), ++ priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_SIZE); ++ /* config */ ++ config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], ++ PHYTIUM_DC_FRAMEBUFFER_CONFIG); ++ config &= ~(FRAMEBUFFER_FORMAT_MASK << FRAMEBUFFER_FORMAT_SHIFT); ++ config |= (phytium_plane->format << FRAMEBUFFER_FORMAT_SHIFT); ++ config &= ~(1 << FRAMEBUFFER_UVSWIZZLE_SHIFT); ++ config |= (phytium_plane->uv_swizzle << FRAMEBUFFER_UVSWIZZLE_SHIFT); ++ config &= ~(FRAMEBUFFER_SWIZZLE_MASK << FRAMEBUFFER_SWIZZLE_SHIFT); ++ config |= (phytium_plane->swizzle << FRAMEBUFFER_SWIZZLE_SHIFT); ++ config &= ~(FRAMEBUFFER_TILE_MODE_MASK << FRAMEBUFFER_TILE_MODE_SHIFT); ++ config |= (phytium_plane->tiling[0] << FRAMEBUFFER_TILE_MODE_SHIFT); ++ config &= (~FRAMEBUFFER_CLEAR); ++ phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], ++ PHYTIUM_DC_FRAMEBUFFER_CONFIG); ++} ++ ++static void phytium_dc_cursor_plane_update(struct drm_plane *plane) ++{ ++ struct drm_device *dev = plane->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_plane *phytium_plane = to_phytium_plane(plane); ++ struct drm_framebuffer *fb = plane->state->fb; ++ int phys_pipe = phytium_plane->phys_pipe; ++ int config; ++ unsigned long iova; ++ ++ phytium_plane->enable = 1; ++ phytium_plane->cursor_hot_x = fb->hot_x; ++ phytium_plane->cursor_hot_y = fb->hot_y; ++ phytium_plane->cursor_x = plane->state->crtc_x + fb->hot_x; ++ phytium_plane->cursor_y = plane->state->crtc_y + fb->hot_y; ++ ++ config = CURSOR_FORMAT_ARGB8888 | ++ ((phytium_plane->cursor_hot_y & CURSOR_HOT_Y_MASK) << CURSOR_HOT_Y_SHIFT) | ++ ((phytium_plane->cursor_hot_x & CURSOR_HOT_X_MASK) << CURSOR_HOT_X_SHIFT); ++ phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); ++ ++ config = ((phytium_plane->cursor_x & CURSOR_X_MASK) << CURSOR_X_SHIFT) | ++ ((phytium_plane->cursor_y & CURSOR_Y_MASK) << CURSOR_Y_SHIFT); ++ phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], ++ PHYTIUM_DC_CURSOR_LOCATION); ++ iova = phytium_plane->iova[0]; ++ phytium_writel_reg(priv, iova & 0xffffffff, priv->dc_reg_base[phys_pipe], ++ PHYTIUM_DC_CURSOR_ADDRESS); ++ if (phytium_plane->dc_hw_update_cursor_hi_addr) ++ phytium_plane->dc_hw_update_cursor_hi_addr(plane, iova); ++} ++ ++static void phytium_plane_atomic_update(struct drm_plane *plane, ++ struct drm_plane_state *old_state) ++{ ++ struct drm_framebuffer *fb, *old_fb; ++ ++ DRM_DEBUG_KMS("update plane: type=%d\n", plane->type); ++ if (!plane->state->crtc || !plane->state->fb) ++ return; ++ ++ fb = plane->state->fb; ++ old_fb = old_state->fb; ++ ++ if (fb) ++ drm_framebuffer_reference(fb); ++ if (old_fb) ++ drm_framebuffer_unreference(old_fb); ++ ++ phytium_dc_get_plane_parameter(plane); ++ ++ if (plane->type == DRM_PLANE_TYPE_PRIMARY) ++ phytium_dc_primary_plane_update(plane); ++ else if (plane->type == DRM_PLANE_TYPE_CURSOR) ++ phytium_dc_cursor_plane_update(plane); + } + + static void phytium_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) + { + struct drm_device *dev = plane->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); +- struct drm_framebuffer *old_fb; + int phys_pipe = phytium_plane->phys_pipe; + int config; ++ struct drm_framebuffer *old_fb; + + old_fb = old_state->fb; + if (old_fb) + drm_framebuffer_unreference(old_fb); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { +- phytium_writel_reg(priv, CLEAR_VALUE_RED, +- PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE(phys_pipe)); +- config = phytium_readl_reg(priv, PHYTIUM_DC_FRAMEBUFFER_CONFIG(phys_pipe)); ++ phytium_writel_reg(priv, CLEAR_VALUE_RED, priv->dc_reg_base[phys_pipe], ++ PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE); ++ config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], ++ PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config |= FRAMEBUFFER_CLEAR; +- phytium_writel_reg(priv, config, PHYTIUM_DC_FRAMEBUFFER_CONFIG(phys_pipe)); ++ phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], ++ PHYTIUM_DC_FRAMEBUFFER_CONFIG); + } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { + phytium_writel_reg(priv, CURSOR_FORMAT_DISABLED, +- PHYTIUM_DC_CURSOR_CONFIG(phys_pipe)); ++ priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + } + } + +@@ -557,89 +514,114 @@ const struct drm_plane_helper_funcs phytium_plane_helper_funcs = { + + struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int phys_pipe) + { +- struct phytium_plane *phytium_primary_plane = NULL; +- struct phytium_plane_state *phytium_primary_plane_state = NULL; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_plane *phytium_plane = NULL; ++ struct phytium_plane_state *phytium_plane_state = NULL; + int ret = 0; + unsigned int flags = 0; ++ const uint32_t *formats = NULL; ++ uint32_t format_count; ++ const uint64_t *format_modifiers; + +- phytium_primary_plane = kzalloc(sizeof(*phytium_primary_plane), GFP_KERNEL); +- if (IS_ERR(phytium_primary_plane)) { ++ phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); ++ if (!phytium_plane) { + ret = -ENOMEM; + goto failed_malloc_plane; + } + +- phytium_primary_plane_state = kzalloc(sizeof(*phytium_primary_plane_state), GFP_KERNEL); +- if (IS_ERR(phytium_primary_plane_state)) { ++ phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); ++ if (!phytium_plane_state) { + ret = -ENOMEM; + goto failed_malloc_plane_state; + } +- phytium_primary_plane_state->base.plane = &phytium_primary_plane->base; +- phytium_primary_plane_state->base.rotation = DRM_MODE_ROTATE_0; +- phytium_primary_plane->base.state = &phytium_primary_plane_state->base; +- phytium_primary_plane->phys_pipe = phys_pipe; +- +- ret = drm_universal_plane_init(dev, &phytium_primary_plane->base, 0x0, +- &phytium_plane_funcs, phytium_primary_formats, +- ARRAY_SIZE(phytium_primary_formats), +- phytium_primary_formats_modifiers, ++ phytium_plane_state->base.plane = &phytium_plane->base; ++ phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; ++ phytium_plane->base.state = &phytium_plane_state->base; ++ phytium_plane->phys_pipe = phys_pipe; ++ ++ if (IS_X100(priv)) { ++ phytium_plane->dc_hw_plane_get_format = x100_dc_hw_plane_get_primary_format; ++ phytium_plane->dc_hw_update_dcreq = x100_dc_hw_update_dcreq; ++ phytium_plane->dc_hw_update_primary_hi_addr = x100_dc_hw_update_primary_hi_addr; ++ phytium_plane->dc_hw_update_cursor_hi_addr = NULL; ++ } ++ ++ phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); ++ ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, ++ &phytium_plane_funcs, formats, ++ format_count, ++ format_modifiers, + DRM_PLANE_TYPE_PRIMARY, "primary %d", phys_pipe); + + if (ret) + goto failed_plane_init; + + flags = DRM_MODE_ROTATE_0; +- drm_plane_create_rotation_property(&phytium_primary_plane->base, DRM_MODE_ROTATE_0, flags); +- drm_plane_helper_add(&phytium_primary_plane->base, &phytium_plane_helper_funcs); ++ drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); ++ drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); + +- return phytium_primary_plane; ++ return phytium_plane; + failed_plane_init: +- kfree(phytium_primary_plane_state); ++ kfree(phytium_plane_state); + failed_malloc_plane_state: +- kfree(phytium_primary_plane); ++ kfree(phytium_plane); + failed_malloc_plane: + return ERR_PTR(ret); + } + + struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int phys_pipe) + { +- struct phytium_plane *phytium_cursor_plane = NULL; +- struct phytium_plane_state *phytium_cursor_plane_state = NULL; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_plane *phytium_plane = NULL; ++ struct phytium_plane_state *phytium_plane_state = NULL; + int ret = 0; + unsigned int flags = 0; ++ const uint32_t *formats = NULL; ++ uint32_t format_count; ++ const uint64_t *format_modifiers; + +- phytium_cursor_plane = kzalloc(sizeof(*phytium_cursor_plane), GFP_KERNEL); +- if (IS_ERR(phytium_cursor_plane)) { ++ phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); ++ if (!phytium_plane) { + ret = -ENOMEM; + goto failed_malloc_plane; + } + +- phytium_cursor_plane_state = kzalloc(sizeof(*phytium_cursor_plane_state), GFP_KERNEL); +- if (IS_ERR(phytium_cursor_plane_state)) { ++ phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); ++ if (!phytium_plane_state) { + ret = -ENOMEM; + goto failed_malloc_plane_state; + } +- phytium_cursor_plane_state->base.plane = &phytium_cursor_plane->base; +- phytium_cursor_plane_state->base.rotation = DRM_MODE_ROTATE_0; +- phytium_cursor_plane->base.state = &phytium_cursor_plane_state->base; +- phytium_cursor_plane->phys_pipe = phys_pipe; ++ phytium_plane_state->base.plane = &phytium_plane->base; ++ phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; ++ phytium_plane->base.state = &phytium_plane_state->base; ++ phytium_plane->phys_pipe = phys_pipe; ++ ++ if (IS_X100(priv)) { ++ phytium_plane->dc_hw_plane_get_format = x100_dc_hw_plane_get_cursor_format; ++ phytium_plane->dc_hw_update_dcreq = NULL; ++ phytium_plane->dc_hw_update_primary_hi_addr = NULL; ++ phytium_plane->dc_hw_update_cursor_hi_addr = NULL; ++ } + +- ret = drm_universal_plane_init(dev, &phytium_cursor_plane->base, 0x0, ++ phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); ++ ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, + &phytium_plane_funcs, +- phytium_cursor_formats, ARRAY_SIZE(phytium_cursor_formats), +- NULL, DRM_PLANE_TYPE_CURSOR, "cursor %d", phys_pipe); ++ formats, format_count, ++ format_modifiers, ++ DRM_PLANE_TYPE_CURSOR, "cursor %d", phys_pipe); + + if (ret) + goto failed_plane_init; + + flags = DRM_MODE_ROTATE_0; +- drm_plane_create_rotation_property(&phytium_cursor_plane->base, DRM_MODE_ROTATE_0, flags); +- drm_plane_helper_add(&phytium_cursor_plane->base, &phytium_plane_helper_funcs); ++ drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); ++ drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); + +- return phytium_cursor_plane; ++ return phytium_plane; + failed_plane_init: +- kfree(phytium_cursor_plane_state); ++ kfree(phytium_plane_state); + failed_malloc_plane_state: +- kfree(phytium_cursor_plane); ++ kfree(phytium_plane); + failed_malloc_plane: + return ERR_PTR(ret); + } +diff --git a/drivers/gpu/drm/phytium/phytium_plane.h b/drivers/gpu/drm/phytium/phytium_plane.h +index 2b9e2f365f13..41bb607d857e 100644 +--- a/drivers/gpu/drm/phytium/phytium_plane.h ++++ b/drivers/gpu/drm/phytium/phytium_plane.h +@@ -1,15 +1,7 @@ + /* SPDX-License-Identifier: GPL-2.0 */ +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #ifndef __PHYTIUM_PLANE_H__ +@@ -33,6 +25,13 @@ struct phytium_plane { + unsigned int cursor_y; + unsigned int cursor_hot_x; + unsigned int cursor_hot_y; ++ ++ void (*dc_hw_plane_get_format)(const uint64_t **format_modifiers, ++ const uint32_t **formats, ++ uint32_t *format_count); ++ void (*dc_hw_update_dcreq)(struct drm_plane *plane); ++ void (*dc_hw_update_primary_hi_addr)(struct drm_plane *plane); ++ void (*dc_hw_update_cursor_hi_addr)(struct drm_plane *plane, uint64_t iova); + }; + + struct phytium_plane_state { +diff --git a/drivers/gpu/drm/phytium/phytium_reg.h b/drivers/gpu/drm/phytium/phytium_reg.h +index 005a0416e5ff..7d8e1183f158 100644 +--- a/drivers/gpu/drm/phytium/phytium_reg.h ++++ b/drivers/gpu/drm/phytium/phytium_reg.h +@@ -1,69 +1,64 @@ + /* SPDX-License-Identifier: GPL-2.0 */ +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #ifndef __PHYTIUM_REG_H__ + #define __PHYTIUM_REG_H__ + +-#define PHYTIUM_PIPE_BASE(pipe) (0x8000*pipe) +-#define PHYTIUM_DC_BASE(pipe) (PHYTIUM_PIPE_BASE(pipe) + 0x0000) +-#define PHYTIUM_DCREQ_BASE(pipe) (PHYTIUM_PIPE_BASE(pipe) + 0x2000) +-#define PHYTIUM_DP_BASE(pipe) (PHYTIUM_PIPE_BASE(pipe) + 0x3000) +-#define PHYTIUM_ADDRESS_TRANSFORM_BASE 0x4000 +-#define PHYTIUM_PHY_BASE(pipe) (PHYTIUM_PIPE_BASE(pipe) + 0x5000) ++/******************************register base******************************************/ ++#define X100_PIPE_BASE(pipe) (0x8000*pipe) ++#define X100_DC_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x0000) ++#define X100_DCREQ_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x2000) ++#define X100_DP_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x3000) ++#define X100_ADDRESS_TRANSFORM_BASE 0x4000 ++#define X100_PHY_ACCESS_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x5000) ++/******************************register base end******************************************/ + + /******************************dc register start******************************************/ +-#define PHYTIUM_DC_CLOCK_CONTROL(pipe) (PHYTIUM_DC_BASE(pipe) + 0x0000) +- #define SOFT_RESET (1<<12) +-#define PHYTIUM_DC_CLOCK_IDLE(pipe) (PHYTIUM_DC_BASE(pipe) + 0x0004) +- #define IS_IDLE (1<<16) +-#define PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1400) ++#define PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS 0x1400 + #define ADDRESS_MASK 0xffffff80 +-#define PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1408) +-#define PHYTIUM_DC_PANEL_CONFIG(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1418) ++#define PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE 0x1408 ++#define PHYTIUM_DC_PANEL_CONFIG 0x1418 + #define PANEL_DATAENABLE_ENABLE (1<<0) + #define PANEL_DATA_ENABLE (1<<4) + #define PANEL_CLOCK_ENABLE (1<<8) +-#define PHYTIUM_DC_HDISPLAY(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1430) ++#define PHYTIUM_DC_HDISPLAY 0x1430 + #define HDISPLAY_END_SHIFT 0 + #define HDISPLAY_END_MASK 0x7fff +- #define HDISPLAY_END_MAX 3840 + #define HDISPLAY_TOTAL_SHIFT 16 + #define HDISPLAY_TOTAL_MASK 0x7fff +-#define PHYTIUM_DC_HSYNC(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1438) ++#define PHYTIUM_DC_HSYNC 0x1438 + #define HSYNC_START_SHIFT 0 + #define HSYNC_START_MASK 0x7fff + #define HSYNC_END_SHIFT 15 + #define HSYNC_END_MASK 0x7fff + #define HSYNC_PULSE_ENABLED (1<<30) + #define HSYNC_NEGATIVE (1<<31) +-#define PHYTIUM_DC_VDISPLAY(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1440) ++#define PHYTIUM_DC_VDISPLAY 0x1440 + #define VDISPLAY_END_SHIFT 0 + #define VDISPLAY_END_MASK 0x7fff +- #define VDISPLAY_END_MAX 2160 + #define VDISPLAY_TOTAL_SHIFT 16 + #define VDISPLAY_TOTAL_MASK 0x7fff +-#define PHYTIUM_DC_VSYNC(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1448) ++#define PHYTIUM_DC_VSYNC 0x1448 + #define VSYNC_START_SHIFT 0 + #define VSYNC_START_MASK 0x7fff + #define VSYNC_END_SHIFT 15 + #define VSYNC_END_MASK 0x7fff + #define VSYNC_PULSE_ENABLED (1<<30) + #define VSYNC_NEGATIVE (1<<31) +-#define PHYTIUM_DC_DISPLAY_CURRENT_LOCATION(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1450) +-#define PHYTIUM_DC_GAMMA_INDEX(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1458) +-#define PHYTIUM_DC_GAMMA_DATA(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1460) +-#define PHYTIUM_DC_CURSOR_CONFIG(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1468) ++#define PHYTIUM_DC_DISPLAY_CURRENT_LOCATION 0x1450 ++#define PHYTIUM_DC_GAMMA_INDEX 0x1458 ++ #define GAMMA_INDEX_MAX 256 ++#define PHYTIUM_DC_GAMMA_DATA 0x1460 ++ #define GAMMA_BLUE_SHIFT 0 ++ #define GAMMA_BLUE_MASK 0x3ff ++ #define GAMMA_GREEN_SHIFT 10 ++ #define GAMMA_GREEN_MASK 0x3ff ++ #define GAMMA_RED_SHIFT 20 ++ #define GAMMA_RED_MASK 0x3ff ++#define PHYTIUM_DC_CURSOR_CONFIG 0x1468 + #define CURSOR_FORMAT_DISABLED 0x0 + #define CURSOR_FORMAT_MASKMODE 0x3 + #define CURSOR_FORMAT_ARGB8888 0x2 +@@ -72,23 +67,24 @@ + #define CURSOR_HOT_Y_MASK 0x1f + #define CURSOR_HOT_X_SHIFT 16 + #define CURSOR_HOT_X_MASK 0x1f +-#define PHYTIUM_DC_CURSOR_ADDRESS(pipe) (PHYTIUM_DC_BASE(pipe) + 0x146c) +-#define PHYTIUM_DC_CURSOR_LOCATION(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1470) ++#define PHYTIUM_DC_CURSOR_ADDRESS 0x146c ++#define PHYTIUM_DC_CURSOR_LOCATION 0x1470 + #define CURSOR_X_SHIFT 0 + #define CURSOR_X_MASK 0x7fff + #define CURSOR_Y_SHIFT 16 + #define CURSOR_Y_MASK 0x7fff +-#define PHYTIUM_DC_CURSOR_BACKGROUND(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1474) +-#define PHYTIUM_DC_CURSOR_FOREGROUND(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1478) +-#define PHYTIUM_DC_INT_STATUS(pipe) (PHYTIUM_DC_BASE(pipe) + 0x147c) ++#define PHYTIUM_DC_CURSOR_BACKGROUND 0x1474 ++#define PHYTIUM_DC_CURSOR_FOREGROUND 0x1478 ++#define PHYTIUM_DC_INT_STATUS 0x147c + #define INT_STATUS 0x1 +-#define PHYTIUM_DC_INT_ENABLE(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1480) ++#define PHYTIUM_DC_INT_ENABLE 0x1480 + #define INT_ENABLE 0x1 + #define INT_DISABLE 0x0 + +-#define PHYTIUM_DC_FRAMEBUFFER_CONFIG(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1518) ++#define PHYTIUM_DC_FRAMEBUFFER_CONFIG 0x1518 + #define FRAMEBUFFER_OUTPUT BIT(0) +- #define FRAMEBUFFER_VALID_PENDING (1<<3) ++ #define FRAMEBUFFER_GAMMA_ENABLE BIT(2) ++ #define FRAMEBUFFER_VALID_PENDING BIT(3) + #define FRAMEBUFFER_RESET BIT(4) + #define FRAMEBUFFER_PROGRESS BIT(6) + #define FRAMEBUFFER_ROT_ANGLE_SHIFT (11) +@@ -112,6 +108,8 @@ + #define FRAMEBUFFER_FORMAT_ARGB8888 0x6 + #define FRAMEBUFFER_FORMAT_YUYV 0x7 + #define FRAMEBUFFER_FORMAT_UYVY 0x8 ++ #define FRAMEBUFFER_FORMAT_NV12 0x11 ++ #define FRAMEBUFFER_FORMAT_NV16 0x12 + #define FRAMEBUFFER_FORMAT_ARGB2101010 0x16 + #define FRAMEBUFFER_SWIZZLE_SHIFT 23 + #define FRAMEBUFFER_SWIZZLE_MASK 0x3 +@@ -124,140 +122,70 @@ + #define FRAMEBUFFER_UVSWIZZLE_ENABLE 1 + #define FRAMEBUFFER_CLEAR BIT(8) + #define FRAMEBUFFER_SCALE_ENABLE BIT(22) +-#define PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1520) ++#define PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG 0x1520 + #define FRAMEBUFFER_FILTER_TAP 3 + #define FRAMEBUFFER_HORIZONTAL_FILTER_TAP 3 + #define FRAMEBUFFER_TAP 0x33 +-#define PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1530) +-#define PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1538) +-#define PHYTIUM_DC_OVERLAY_CONFIG(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1540) +- #define PHYTIUM_DC_OVERLAY_ENABLE BIT(24) +- +-#define PHYTIUM_DC_FRAMEBUFFER_U_STRIDE(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1800) +-#define PHYTIUM_DC_FRAMEBUFFER_V_STRIDE(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1808) +-#define PHYTIUM_DC_FRAMEBUFFER_SIZE(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1810) ++#define PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS 0x1530 ++#define PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS 0x1538 ++#define PHYTIUM_DC_OVERLAY_CONFIG 0x1540 ++ #define X100_DC_OVERLAY_ENABLE BIT(24) ++ ++#define PHYTIUM_DC_FRAMEBUFFER_U_STRIDE 0x1800 ++#define PHYTIUM_DC_FRAMEBUFFER_V_STRIDE 0x1808 ++#define PHYTIUM_DC_FRAMEBUFFER_SIZE 0x1810 + #define WIDTH_SHIFT 0 + #define WIDTH_MASK 0x7fff + #define HEIGHT_SHIFT 15 + #define HEIGHT_MASK 0x7fff + +-#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1828) ++#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X 0x1828 + #define SCALE_FACTOR_X_MASK 0x7fffffff +-#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1830) ++#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y 0x1830 + #define SCALE_FACTOR_Y_MASK 0x7fffffff + #define SCALE_FACTOR_Y_MAX 0x3 + #define SCALE_FACTOR_SRC_OFFSET 16 + +-#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1838) ++#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX 0x1838 + #define HORI_FILTER_INDEX 0x0 +-#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1a00) +-#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1a08) ++#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER 0x1a00 ++#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX 0x1a08 + #define VERT_FILTER_INDEX 0x0 +-#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1a10) +- +-#define PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1a18) ++#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER 0x1a10 ++#define PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE 0x1a18 + #define CLEAR_VALUE_RED 0x00ff0000 + #define CLEAR_VALUE_GREEN 0x0000ff00 +- +-#define PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1a20) +- #define INITIALOFFSET (0x8000 | (0X8000 << 16)) +- +-#define PHYTIUM_DC_DP_CONFIG(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1cd0) ++#define PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET 0x1a20 ++ #define INITIALOFFSET (0x8000 | (0X8000 << 16)) ++#define PHYTIUM_DC_DP_CONFIG 0x1cd0 + #define OUTPUT_DP (1<<3) + #define DP_RGB666 (0x1) + #define DP_RGB888 (0x2) + #define DP_RGB101010 (0x3) + /******************************dc register end********************************************/ + +-/******************************dcreq register start**************************************/ +-#define PHYTIUM_DCREQ_PLANE0_ADDR_START(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x00) +-#define PHYTIUM_DCREQ_PLANE0_ADDR_END(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x04) +-#define PHYTIUM_DCREQ_PLANE1_ADDR_START(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x08) +-#define PHYTIUM_DCREQ_PLANE1_ADDR_END(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x0c) +-#define PHYTIUM_DCREQ_PLANE0_CONFIG(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x10) +- #define DCREQ_NO_LOSSY (0 << 0) +- #define DCREQ_LOSSY (1 << 0) +- #define DCREQ_TILE_TYPE_MASK (0x3 << 1) +- #define DCREQ_TILE_TYPE_MODE0 (0x1 << 1) +- #define DCREQ_TILE_TYPE_MODE3 (0x2 << 1) +- #define DCREQ_COLOURFORMAT_MASK (0x7f << 8) +- #define DCREQ_COLOURFORMAT_RGB565 (0x5 << 8) +- #define DCREQ_COLOURFORMAT_ARGB1555 (0x4 << 8) +- #define DCREQ_COLOURFORMAT_ARGB4444 (0x02 << 8) +- #define DCREQ_COLOURFORMAT_BGRA8888 (0x29 << 8) +- #define DCREQ_COLOURFORMAT_ARGB2101010 (0xe << 8) +- #define DCREQ_COLOURFORMAT_YUYV (0x59 << 8) +- #define DCREQ_COLOURFORMAT_UYVY (0x5b << 8) +- #define DCREQ_ARGBSWIZZLE_MASK (0xf << 4) +- #define DCREQ_ARGBSWIZZLE_ARGB (0X0 << 4) +- #define DCREQ_ARGBSWIZZLE_BGRA (0XC << 4) +- #define DCREQ_MODE_MASK (1 << 16) +- #define DCREQ_MODE_LINEAR (0 << 16) +- #define DCREQ_MODE_TILE (1 << 16) +-#define PHYTIUM_DCREQ_PLANE1_CONFIG(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x14) +-#define PHYTIUM_DCREQ_PLANE0_CLEAR_COLOR_L(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x18) +-#define PHYTIUM_DCREQ_PLANE0_CLEAR_COLOR_H(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x1C) +-#define PHYTIUM_DCREQ_PLANE1_CLEAR_COLOR_L(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x20) +-#define PHYTIUM_DCREQ_PLANE1_CLEAR_COLOR_H(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x24) +-#define PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x38) +- #define FLAG_REPLY (1<<31) +- #define FLAG_REQUEST (1<<30) +- #define CMD_BACKLIGHT (0x1 << 28) +- #define CMD_DC_DP_RESET (0x3 << 28) +- #define BACKLIGHT_SHIFT 21 +- #define BACKLIGHT_MASK 0x7f +- #define BACKLIGHT_MAX 100 +- #define BACKLIGHT_ENABLE (101 << BACKLIGHT_SHIFT) +- #define BACKLIGHT_DISABLE (102 << BACKLIGHT_SHIFT) +- #define PANEL_POWER_ENABLE (103 << BACKLIGHT_SHIFT) +- #define PANEL_POWER_DISABLE (104 << BACKLIGHT_SHIFT) +- #define PIX_CLOCK_MASK (0x1fffff) +- #define PIX_CLOCK_MAX (594000) +-#define PHYTIUM_DCREQ_FBCD_CLOCK_CONFIG(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x3c) +-#define PHYTIUM_DCREQ_PIX_DMA_PREFIX(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x50) +- #define PREFIX_MASK 0xff +- #define PREFIX_SHIFT (32) +-#define PHYTIUM_DCREQ_FRAME_START(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x54) +-#define PHYTIUM_DCREQ_FILTER_CONFIG(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x58) +-#define PHYTIUM_DCREQ_CONTROL(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x5C) +- #define DC_REQ_ENABLE (1<<0) +-#define PHYTIUM_DCREQ_MSI_CLEAR(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x60) +- #define MSI_CLEAR 0x0 +- +-#define PHYTIUM_DCREQ_RESET(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x68) +- #define DCREQ_RESET (0x3 << 0) +- #define DCREQ_RESET_MASK 0x3 +-#define PHYTIUM_DCREQ_PLAN(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x94) +- #define DCREQ_PLAN_A 0x0 +- #define DCREQ_PLAN_B 0X5 +-/******************************dcreq register end**************************************/ +- +-/******************************address transform register start**************************/ +-#define PHYTIUM_DC_ADDRESS_TRANSFORM_SRC_ADDR (PHYTIUM_ADDRESS_TRANSFORM_BASE + 0x24) +- #define SRC_ADDR_OFFSET 22 +- #define SRC_ADDR_MASK 0xffffffffff +-#define PHYTIUM_DC_ADDRESS_TRANSFORM_SIZE (PHYTIUM_ADDRESS_TRANSFORM_BASE + 0x28) +- #define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) +- #define SIZE_OFFSET 22 +-#define PHYTIUM_DC_ADDRESS_TRANSFORM_DST_ADDR (PHYTIUM_ADDRESS_TRANSFORM_BASE + 0x2c) +- #define DST_ADDR_OFFSET 22 +-#define PHYTIUM_DC_DP_RESET_STATUS (PHYTIUM_ADDRESS_TRANSFORM_BASE + 0x48) +- #define DC_DP_RESET_STATUS(pipe) (1 << pipe) +-/******************************address transform register end**************************/ ++/******************************phy access register****************************************/ ++#define PHYTIUM_PHY_ACCESS_ADDRESS 0x0000 ++#define PHYTIUM_PHY_WRITE_DATA 0x0004 ++#define PHYTIUM_PHY_READ_DATA 0x0008 ++#define PHYTIUM_PHY_ACCESS_CTRL 0x000c ++ #define ACCESS_WRITE (1<<0) ++ #define ACCESS_READ (1<<1) ++/******************************phy access register end*************************************/ + + /******************************dp register start******************************************/ +-#define PHYTIUM_DP_LINK_BW_SET(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0000) +-#define PHYTIUM_DP_LANE_COUNT_SET(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0004) +-#define PHYTIUM_DP_ENHANCED_FRAME_EN(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0008) ++#define PHYTIUM_DP_LINK_BW_SET 0x0000 ++#define PHYTIUM_DP_LANE_COUNT_SET 0x0004 ++#define PHYTIUM_DP_ENHANCED_FRAME_EN 0x0008 + #define ENHANCED_FRAME_ENABLE 0x1 + #define ENHANCED_FRAME_DISABLE 0x0 +-#define PHYTIUM_DP_TRAINING_PATTERN_SET(pipe) (PHYTIUM_DP_BASE(pipe) + 0x000c) ++#define PHYTIUM_DP_TRAINING_PATTERN_SET 0x000c + #define TRAINING_OFF 0x0 + #define TRAINING_PATTERN_1 0x1 + #define TRAINING_PATTERN_2 0x2 + #define TRAINING_PATTERN_3 0x3 + #define TRAINING_PATTERN_4 0x4 +-#define PHYTIUM_DP_LINK_QUAL_PATTERN_SET(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0010) ++#define PHYTIUM_DP_LINK_QUAL_PATTERN_SET 0x0010 + #define TEST_PATTERN_NONE 0x0 + #define TEST_PATTERN_D10_2 0x1 + #define TEST_PATTERN_SYMBOL_ERROR 0x2 +@@ -267,59 +195,57 @@ + #define TEST_PATTERN_CP2520_2 0x6 + #define TEST_PATTERN_CP2520_3 0x7 + #define TEST_PATTERN_LANE_SHIFT 8 +- +-#define PHYTIUM_DP_SCRAMBLING_DISABLE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0014) ++#define PHYTIUM_DP_SCRAMBLING_DISABLE 0x0014 + #define SCRAMBLING_ENABLE 0x0 + #define SCRAMBLING_DISABLE 0x1 +-#define PHYTIUM_DP_DOWNSPREAD_CTRL(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0018) +-#define PHYTIUM_DP_ALT_SCRAMBLER_RESET(pipe) (PHYTIUM_DP_BASE(pipe) + 0x001c) +-#define PHYTIUM_DP_HBR2_SCRAMBLER_RESET(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0020) +-#define PHYTIUM_DP_DISPLAYPORT_VERSION(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0024) +-#define PHYTIUM_DP_LANE_REMAP (PHYTIUM_DP_BASE(pipe) + 0x002C) +-#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0030) +-#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0034) +-#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0038) +-#define PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0080) ++#define PHYTIUM_DP_DOWNSPREAD_CTRL 0x0018 ++#define PHYTIUM_DP_ALT_SCRAMBLER_RESET 0x001c ++#define PHYTIUM_DP_HBR2_SCRAMBLER_RESET 0x0020 ++#define PHYTIUM_DP_DISPLAYPORT_VERSION 0x0024 ++#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0 0x0030 ++#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1 0x0034 ++#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2 0x0038 ++#define PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE 0x0080 + #define TRANSMITTER_OUTPUT_ENABLE BIT(0) + #define TRANSMITTER_OUTPUT_DISABLE 0 +-#define PHYTIUM_DP_VIDEO_STREAM_ENABLE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0084) ++#define PHYTIUM_DP_VIDEO_STREAM_ENABLE 0x0084 + #define SST_MST_SOURCE_0_ENABLE BIT(0) + #define SST_MST_SOURCE_0_ENABLE_MASK 0x1 + #define SST_MST_SOURCE_0_DISABLE 0 +-#define PHYTIUM_DP_SECONDARY_STREAM_ENABLE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0088) ++#define PHYTIUM_DP_SECONDARY_STREAM_ENABLE 0x0088 + #define SECONDARY_STREAM_ENABLE 0x1 + #define SECONDARY_STREAM_DISABLE 0x0 +-#define PHYTIUM_DP_SEC_DATA_WINDOW(pipe) (PHYTIUM_DP_BASE(pipe) + 0x008C) +-#define PHYTIUM_DP_SOFT_RESET(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0090) ++#define PHYTIUM_DP_SEC_DATA_WINDOW 0x008C ++#define PHYTIUM_DP_SOFT_RESET 0x0090 + #define LINK_SOFT_RESET (0x1 << 0) + #define VIDEO_SOFT_RESET (0x1 << 1) +-#define PHYTIUM_INPUT_SOURCE_ENABLE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0094) ++#define PHYTIUM_INPUT_SOURCE_ENABLE 0x0094 + #define VIRTUAL_SOURCE_0_ENABLE BIT(0) + #define VIRTUAL_SOURCE_0_ENABLE_MASK 0x1 +-#define PHYTIUM_DP_FORCE_SCRAMBLER_RESET(pipe) (PHYTIUM_DP_BASE(pipe) + 0x00C0) ++#define PHYTIUM_DP_FORCE_SCRAMBLER_RESET 0x00C0 + #define SCRAMBLER_RESET BIT(0) +-#define PHYTIUM_DP_SOURCE_CONTROL_STATUS(pipe) (PHYTIUM_DP_BASE(pipe) + 0x00C4) +-#define PHYTIUM_DP_DATA_CONTROL(pipe) (PHYTIUM_DP_BASE(pipe) + 0x00C8) +-#define PHYTIUM_DP_CORE_CAPABILITY(pipe) (PHYTIUM_DP_BASE(pipe) + 0x00F8) +-#define PHYTIUM_DP_CORE_ID(pipe) (PHYTIUM_DP_BASE(pipe) + 0x00FC) +-#define PHYTIUM_DP_AUX_COMMAND(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0100) ++#define PHYTIUM_DP_SOURCE_CONTROL_STATUS 0x00C4 ++#define PHYTIUM_DP_DATA_CONTROL 0x00C8 ++#define PHYTIUM_DP_CORE_CAPABILITY 0x00F8 ++#define PHYTIUM_DP_CORE_ID 0x00FC ++#define PHYTIUM_DP_AUX_COMMAND 0x0100 + #define BYTE_COUNT_MASK 0xf + #define COMMAND_SHIFT 8 + #define COMMAND_MASK 0xf + #define ADDRESS_ONLY (1<<12) +-#define PHYTIUM_DP_AUX_WRITE_FIFO(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0104) +-#define PHYTIUM_DP_AUX_ADDRESS(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0108) +-#define PHYTIUM_DP_AUX_CLK_DIVIDER(pipe) (PHYTIUM_DP_BASE(pipe) + 0x010C) ++#define PHYTIUM_DP_AUX_WRITE_FIFO 0x0104 ++#define PHYTIUM_DP_AUX_ADDRESS 0x0108 ++#define PHYTIUM_DP_AUX_CLK_DIVIDER 0x010C + #define AUX_CLK_DIVIDER 48 +-#define PHYTIUM_DP_SINK_HPD_STATE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0128) ++#define PHYTIUM_DP_SINK_HPD_STATE 0x0128 + #define HPD_CONNECT 0x1 + #define HPD_DISCONNECT 0x0 +-#define PHYTIUM_DP_INTERRUPT_RAW_STATUS(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0130) ++#define PHYTIUM_DP_INTERRUPT_RAW_STATUS 0x0130 + #define REPLY_TIMEOUT (1<<3) +- #define PHYTIUM_DP_STATUS_REQUEST_IN_PROGRESS (1<<1) ++ #define DP_STATUS_REQUEST_IN_PROGRESS (1<<1) + #define HPD_STATE (0<<1) +-#define PHYTIUM_DP_AUX_REPLY_DATA(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0134) +-#define PHYTIUM_DP_AUX_REPLY_CODE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0138) ++#define PHYTIUM_DP_AUX_REPLY_DATA 0x0134 ++#define PHYTIUM_DP_AUX_REPLY_CODE 0x0138 + #define AUX_NATIVE_ACK (0x0<<0) + #define AUX_NATIVE_NACK (0x1<<0) + #define AUX_NATIVE_DEFER (0x2<<0) +@@ -328,32 +254,32 @@ + #define AUX_I2C_NACK (0x1<<2) + #define AUX_I2C_DEFER (0x2<<2) + #define AUX_I2C_MASK (0x3 << 2) +-#define PHYTIUM_DP_INTERRUPT_STATUS(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0140) +- #define HPD_IRQ (1<<1) ++#define PHYTIUM_DP_INTERRUPT_STATUS 0x0140 ++ #define HPD_IRQ (1<<1) + #define HPD_EVENT (1<<0) +-#define PHYTIUM_DP_INTERRUPT_MASK(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0144) ++#define PHYTIUM_DP_INTERRUPT_MASK 0x0144 + #define HPD_IRQ_MASK (1<<1) + #define HPD_EVENT_MASK (1<<0) + #define HPD_OTHER_MASK 0x3c +-#define PHYTIUM_DP_AUX_REPLY_DATA_COUNT(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0148) +-#define PHYTIUM_DP_AUX_STATUS(pipe) (PHYTIUM_DP_BASE(pipe) + 0x014C) ++#define PHYTIUM_DP_AUX_REPLY_DATA_COUNT 0x0148 ++#define PHYTIUM_DP_AUX_STATUS 0x014C + #define REPLY_RECEIVED 0x1 + #define REPLY_IN_PROGRESS 0x2 + #define REQUEST_IN_PROGRESS 0x4 + #define REPLY_ERROR 0x8 +-#define PHYTIUM_DP_AUX_TIMER(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0158) +-#define PHYTIUM_DP_MAIN_LINK_HTOTAL(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0180) +-#define PHYTIUM_DP_MAIN_LINK_VTOTAL(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0184) +-#define PHYTIUM_DP_MAIN_LINK_POLARITY(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0188) ++#define PHYTIUM_DP_AUX_TIMER 0x0158 ++#define PHYTIUM_DP_MAIN_LINK_HTOTAL 0x0180 ++#define PHYTIUM_DP_MAIN_LINK_VTOTAL 0x0184 ++#define PHYTIUM_DP_MAIN_LINK_POLARITY 0x0188 + #define VSYNC_POLARITY_LOW BIT(1) + #define HSYNC_POLARITY_LOW BIT(0) +-#define PHYTIUM_DP_MAIN_LINK_HSWIDTH(pipe) (PHYTIUM_DP_BASE(pipe) + 0x018C) +-#define PHYTIUM_DP_MAIN_LINK_VSWIDTH(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0190) +-#define PHYTIUM_DP_MAIN_LINK_HRES(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0194) +-#define PHYTIUM_DP_MAIN_LINK_VRES(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0198) +-#define PHYTIUM_DP_MAIN_LINK_HSTART(pipe) (PHYTIUM_DP_BASE(pipe) + 0x019C) +-#define PHYTIUM_DP_MAIN_LINK_VSTART(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01A0) +-#define PHYTIUM_DP_MAIN_LINK_MISC0(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01A4) ++#define PHYTIUM_DP_MAIN_LINK_HSWIDTH 0x018C ++#define PHYTIUM_DP_MAIN_LINK_VSWIDTH 0x0190 ++#define PHYTIUM_DP_MAIN_LINK_HRES 0x0194 ++#define PHYTIUM_DP_MAIN_LINK_VRES 0x0198 ++#define PHYTIUM_DP_MAIN_LINK_HSTART 0x019C ++#define PHYTIUM_DP_MAIN_LINK_VSTART 0x01A0 ++#define PHYTIUM_DP_MAIN_LINK_MISC0 0x01A4 + #define MISC0_SYNCHRONOUS_CLOCK BIT(0) + #define MISC0_BIT_DEPTH_OFFSET 5 + #define MISC0_BIT_DEPTH_6BIT 0x0 +@@ -361,47 +287,44 @@ + #define MISC0_BIT_DEPTH_10BIT 0x2 + #define MISC0_COMPONENT_FORMAT_SHIFT 1 + #define MISC0_COMPONENT_FORMAT_RGB 0x0 +- +-#define PHYTIUM_DP_MAIN_LINK_MISC1(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01A8) +-#define PHYTIUM_DP_M_VID(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01AC) +-#define PHYTIUM_DP_TRANSFER_UNIT_SIZE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01B0) +-#define PHYTIUM_DP_N_VID(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01B4) +-#define PHYTIUM_DP_USER_PIXEL_WIDTH(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01B8) +-#define PHYTIUM_DP_DATA_COUNT(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01BC) +-#define PHYTIUM_DP_INTERLACED(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01C0) +-#define PHYTIUM_DP_USER_SYNC_POLARITY(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01C4) +-#define USER_ODDEVEN_POLARITY_HIGH BIT(3) +-#define USER_DATA_ENABLE_POLARITY_HIGH BIT(2) +-#define USER_VSYNC_POLARITY_HIGH BIT(1) +-#define USER_HSYNC_POLARITY_HIGH BIT(0) +-#define PHYTIUM_DP_USER_CONTROL(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01C8) +-#define PHYTIUM_EDP_CRC_ENABLE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01D0) +-#define PHYTIUM_EDP_CRC_RED(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01D4) +-#define PHYTIUM_EDP_CRC_GREEN(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01D8) +-#define PHYTIUM_EDP_CRC_BLUE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01DC) +- +-#define PHYTIUM_DP_SEC_AUDIO_ENABLE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0300) +- #define SEC_AUDIO_ENABLE 1 +- #define SEC_AUDIO_DISABLE 0 +-#define PHYTIUM_DP_SEC_INPUT_SELECT(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0304) ++#define PHYTIUM_DP_MAIN_LINK_MISC1 0x01A8 ++#define PHYTIUM_DP_M_VID 0x01AC ++#define PHYTIUM_DP_TRANSFER_UNIT_SIZE 0x01B0 ++#define PHYTIUM_DP_N_VID 0x01B4 ++#define PHYTIUM_DP_USER_PIXEL_WIDTH 0x01B8 ++#define PHYTIUM_DP_DATA_COUNT 0x01BC ++#define PHYTIUM_DP_INTERLACED 0x01C0 ++#define PHYTIUM_DP_USER_SYNC_POLARITY 0x01C4 ++ #define USER_ODDEVEN_POLARITY_HIGH BIT(3) ++ #define USER_DATA_ENABLE_POLARITY_HIGH BIT(2) ++ #define USER_VSYNC_POLARITY_HIGH BIT(1) ++ #define USER_HSYNC_POLARITY_HIGH BIT(0) ++#define PHYTIUM_DP_USER_CONTROL 0x01C8 ++#define PHYTIUM_EDP_CRC_ENABLE 0x01D0 ++#define PHYTIUM_EDP_CRC_RED 0x01D4 ++#define PHYTIUM_EDP_CRC_GREEN 0x01D8 ++#define PHYTIUM_EDP_CRC_BLUE 0x01DC ++#define PHYTIUM_DP_SEC_AUDIO_ENABLE 0x0300 ++ #define SEC_AUDIO_ENABLE BIT(0) ++ #define CHANNEL_MUTE_ENABLE BIT(1) ++#define PHYTIUM_DP_SEC_INPUT_SELECT 0x0304 + #define INPUT_SELECT_I2S 0x0 +-#define PHYTIUM_DP_SEC_CHANNEL_COUNT(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0308) +- #define CHANNEL_MUTE 0x0 ++#define PHYTIUM_DP_SEC_CHANNEL_COUNT 0x0308 + #define CHANNEL_2 0x2 + #define CHANNEL_2_LFE 0x3 + #define CHANNEL_5_1 0x6 + #define CHANNEL_7_1 0x7 + #define CHANNEL_MASK 0xf +-#define PHYTIUM_DP_SEC_DIRECT_CLKDIV(pipe) (PHYTIUM_DP_BASE(pipe) + 0x030c) ++#define PHYTIUM_DP_SEC_DIRECT_CLKDIV 0x030c + #define APB_CLOCK 48000000 +-#define PHYTIUM_DP_SEC_MAUD(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0318) +-#define PHYTIUM_DP_SEC_NAUD(pipe) (PHYTIUM_DP_BASE(pipe) + 0x031c) +-#define PHYTIUM_DP_SEC_CLOCK_MODE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0320) ++#define PHYTIUM_DP_SEC_MAUD 0x0318 ++#define PHYTIUM_DP_SEC_NAUD 0x031c ++#define PHYTIUM_DP_SEC_CLOCK_MODE 0x0320 + #define CLOCK_MODE_SYNC 0x1 +-#define PHYTIUM_DP_SEC_CS_SOURCE_FORMAT(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0340) ++#define PHYTIUM_DP_SEC_CS_SOURCE_FORMAT 0x0340 + #define CS_SOURCE_FORMAT_DEFAULT 0x0 +-#define PHYTIUM_DP_SEC_CS_CATEGORY_CODE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0344) +-#define PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0348) ++#define PHYTIUM_DP_SEC_CS_CATEGORY_CODE 0x0344 ++#define PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ 0x0348 + #define ORIG_FREQ_32000 0xc + #define ORIG_FREQ_44100 0xf + #define ORIG_FREQ_48000 0xd +@@ -417,7 +340,7 @@ + #define WORD_LENGTH_24 0xd + #define WORD_LENGTH_MASK 0xf + #define WORD_LENGTH_SHIFT 4 +-#define PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY(pipe) (PHYTIUM_DP_BASE(pipe) + 0x034c) // not used ++#define PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY 0x034c // not used + #define SAMPLING_FREQ_32000 0xc + #define SAMPLING_FREQ_44100 0x0 + #define SAMPLING_FREQ_48000 0x4 +@@ -427,260 +350,8 @@ + #define SAMPLING_FREQ_192000 0x7 + #define SAMPLING_FREQ_MASK 0xf + #define SAMPLING_FREQ_SHIFT 4 +- +-#define PHYTIUM_DP_SEC_CHANNEL_MAP(pipe) (PHYTIUM_DP_BASE(pipe) + 0x035C) ++#define PHYTIUM_DP_SEC_CHANNEL_MAP 0x035C + #define CHANNEL_MAP_DEFAULT 0x87654321 + /******************************dp register end********************************************/ + +-/******************************phy register start******************************************/ +-/* self define */ +-#define PHYTIUM_PHY_ACCESS_ADDRESS(pipe) (PHYTIUM_PHY_BASE(pipe) + 0x0000) +-#define PHYTIUM_PHY_WRITE_DATA(pipe) (PHYTIUM_PHY_BASE(pipe) + 0x0004) +-#define PHYTIUM_PHY_READ_DATA(pipe) (PHYTIUM_PHY_BASE(pipe) + 0x0008) +-#define PHYTIUM_PHY_ACCESS_CTRL(pipe) (PHYTIUM_PHY_BASE(pipe) + 0x000c) +- #define ACCESS_WRITE (1<<0) +- #define ACCESS_READ (1<<1) +-#define PHYTIUM_PHY0_PIPE_RESET 0x40104 +- #define RESET 0x0 +- #define RESET_DEASSERT 0x1 +-#define PHYTIUM_PHY1_PIPE_RESET 0x100100 +- #define PHY1_PIPE_RESET 0x0 +- #define PHY1_PIPE_RESET_DEASSERT 0x4 +- +-#define PHYTIUM_PHY1_EN_REFCLK 0x100070 +- +-#define PHYTIUM_PHY0_MODE 0x40088 +- #define LANE_BIT (0x3) +- #define LANE_BIT_SHIFT 0x2 +-#define PHYTIUM_PHY1_SEL 0x100004 +- #define PHY1_DP_LANE_BIT 0x1 +- #define PHY1_DP_LANE_BIT_SHIFT 2 +- +-#define PHYTIUM_PHY0_LINK_CFG 0x40044 +- #define LANE_MASTER 0x1 +- #define LANE_MASTER_SHIFT 1 +- +-#define PHYTUIM_PHY0_PLL_EN 0x40010 +- #define PLL_EN 0x1 +- #define PLL_EN_SHIFT 1 +-#define PHYTUIM_PHY0_PMA_WIDTH 0x40020 +- #define BIT_20 0x5 +- #define BIT_20_SHIFT 4 +- +-#define PHYTIUM_PHY0_PMA0_POWER 0x40014 +-#define PHYTIUM_PHY0_PMA1_POWER 0x40018 +- #define A0_ACTIVE 0x1 +- #define A0_ACTIVE_SHIFT 8 +- #define A3_POWERDOWN3 0x8 +- #define A3_POWERDOWN3_SHIFT 8 +- +-#define PHYTIUM_PHY1_PMA_MISC 0x1000a0 +- #define PHY1_PLL_EN 0x1 +- #define PHY1_PLL_EN_MASK 1 +- #define PHY1_PLL_EN_SHIFT 8 +- #define PHY1_BIT_20 0x5 +- #define PHY1_BIT_20_SHIFT 9 +- #define PHY1_A0_ACTIVE 0x1 +- #define PHY1_A0_ACTIVE_SHIFT 2 +- #define PHY1_A0_ACTIVE_MASK 0x3f +- #define PHY1_A3_POWERDOWN3 0x8 +- #define PHY1_A3_POWERDOWN3_MASK 0x3f +- #define PHY1_A3_POWERDOWN3_SHIFT 2 +- +-#define PHYTIUM_PHY0_LINK_RESET 0x40108 +- #define LINK_RESET 0x1 +- #define LINK_RESET_MASK 0x1 +- #define LINTK_RESET_SHIFT 0x1 +- +-#define PHYTIUM_PHY0_APB_RESET 0x40100 +- #define APB_RESET 0x1 +-#define PHYTIUM_PHY1_APB_RESET 0x100104 +- #define PHY1_APB_RESET 0x4 +- +-/* phy origin register */ +-#define PHYTIUM_PHY0_PLL_CFG 0x30038 +-#define PHYTIUM_PHY1_PLL_CFG 0xb0038 +- #define SINGLE_LINK 0x0 +- #define DOUBLE_LINK 0x2 +- +-#define PHYTIUM_PHY0_PMA_CONTROL 0x3800c +-#define PHYTIUM_PHY1_PMA_CONTROL 0xb800c +- #define CONTROL_ENABLE 0x1 +- #define CONTROL_ENABLE_MASK 0x1 +- #define CONTROL_ENABLE_SHIFT 0x1 +- +-#define PHYTIUM_PHY0_PMA_CONTROL2 0x38004 +-#define PHYTIUM_PHY1_PMA_CONTROL2 0xb8004 +- +-#define PHYTIUM_PHY0_PLL0_CLK_SEL 0X684 +-#define PHYTIUM_PHY0_PLL1_CLK_SEL 0x704 +-#define PHYTIUM_PHY1_PLL_CLK_SEL 0X80684 +- #define PLL_LINK_RATE_162000 0xf01 +- #define PLL_LINK_RATE_270000 0x701 +- #define PLL_LINK_RATE_540000 0x301 +- #define PLL_LINK_RATE_810000 0x200 +- +-#define PHYTIUM_PHY0_HSCLK0_SEL 0x18398 +-#define PHYTIUM_PHY0_HSCLK1_SEL 0x1a398 +-#define PHYTIUM_PHY1_HSCLK_SEL 0x90398 +- #define HSCLK_LINK_0 0x0 +- #define HSCLK_LINK_1 0x1 +- +-#define PHYTIUM_PHY0_HSCLK0_DIV 0x1839c +-#define PHYTIUM_PHY0_HSCLK1_DIV 0x1a39c +-#define PHYTIUM_PHY1_HSCLK_DIV 0x9039c +- #define HSCLK_LINK_RATE_162000 0x2 +- #define HSCLK_LINK_RATE_270000 0x1 +- #define HSCLK_LINK_RATE_540000 0x0 +- #define HSCLK_LINK_RATE_810000 0x0 +- +-#define PHYTIUM_PHY0_PLLDRC0_CTRL 0x18394 +-#define PHYTIUM_PHY0_PLLDRC1_CTRL 0x1a394 +-#define PHYTIUM_PHY1_PLLDRC_CTRL 0x90394 +- #define PLLDRC_LINK0 0x1 +- #define PLLDRC_LINK1 0x9 +- +-#define PHYTIUM_PHY0_PLL0_DSM_M0 0x250 +-#define PHYTIUM_PHY1_PLL0_DSM_M0 0x80250 +- #define PLL0_DSM_M0 0x4 +-#define PHYTIUM_PHY0_PLL0_VCOCAL_START 0x218 +-#define PHYTIUM_PHY1_PLL0_VCOCAL_START 0x80218 +- #define PLL0_VCOCAL_START 0xc5e +-#define PHYTIUM_PHY0_PLL0_VCOCAL_CTRL 0x208 +-#define PHYTIUM_PHY1_PLL0_VCOCAL_CTRL 0x80208 +- #define PLL0_VCOCAL_CTRL 0x3 +- +-#define PHYTIUM_PHY0_PLL1_DSM_M0 0x350 +- #define PLL1_DSM_M0 0x4 +-#define PHYTIUM_PHY0_PLL1_VCOCAL_START 0x318 +- #define PLL1_VCOCAL_START 0xc5e +-#define PHYTIUM_PHY0_PLL1_VCOCAL_CTRL 0x308 +- #define PLL1_VCOCAL_CTRL 0x3 +- +-#define PHYTIUM_PHY0_PLL0_CP_PADJ 0x690 +-#define PHYTIUM_PHY0_PLL0_CP_IADJ 0x694 +-#define PHYTIUM_PHY0_PLL0_CP_FILT_PADJ 0x698 +-#define PHYTIUM_PHY0_PLL0_INTDIV 0x240 +-#define PHYTIUM_PHY0_PLL0_FRACDIVL 0x244 +-#define PHYTIUM_PHY0_PLL0_FRACDIVH 0x248 +-#define PHYTIUM_PHY0_PLL0_HIGH_THR 0x24c +-#define PHYTIUM_PHY0_PLL0_PDIAG_CTRL 0x680 +-#define PHYTIUM_PHY0_PLL0_VCOCAL_PLLCNT_START 0x220 +-#define PHYTIUM_PHY0_PLL0_LOCK_PEFCNT 0x270 +-#define PHYTIUM_PHY0_PLL0_LOCK_PLLCNT_START 0x278 +-#define PHYTIUM_PHY0_PLL0_LOCK_PLLCNT_THR 0x27c +- +-#define PHYTIUM_PHY0_PLL1_CP_PADJ 0x710 +-#define PHYTIUM_PHY0_PLL1_CP_IADJ 0x714 +-#define PHYTIUM_PHY0_PLL1_CP_FILT_PADJ 0x718 +-#define PHYTIUM_PHY0_PLL1_INTDIV 0x340 +-#define PHYTIUM_PHY0_PLL1_FRACDIVL 0x344 +-#define PHYTIUM_PHY0_PLL1_FRACDIVH 0x348 +-#define PHYTIUM_PHY0_PLL1_HIGH_THR 0x34c +-#define PHYTIUM_PHY0_PLL1_PDIAG_CTRL 0x700 +-#define PHYTIUM_PHY0_PLL1_VCOCAL_PLLCNT_START 0x320 +-#define PHYTIUM_PHY0_PLL1_LOCK_PEFCNT 0x370 +-#define PHYTIUM_PHY0_PLL1_LOCK_PLLCNT_START 0x378 +-#define PHYTIUM_PHY0_PLL1_LOCK_PLLCNT_THR 0x37c +- +-#define PHYTIUM_PHY1_PLL0_CP_PADJ 0x80690 +-#define PHYTIUM_PHY1_PLL0_CP_IADJ 0x80694 +-#define PHYTIUM_PHY1_PLL0_CP_FILT_PADJ 0x80698 +-#define PHYTIUM_PHY1_PLL0_INTDIV 0x80240 +-#define PHYTIUM_PHY1_PLL0_FRACDIVL 0x80244 +-#define PHYTIUM_PHY1_PLL0_FRACDIVH 0x80248 +-#define PHYTIUM_PHY1_PLL0_HIGH_THR 0x8024c +-#define PHYTIUM_PHY1_PLL0_PDIAG_CTRL 0x80680 +-#define PHYTIUM_PHY1_PLL0_VCOCAL_PLLCNT_START 0x80220 +-#define PHYTIUM_PHY1_PLL0_LOCK_PEFCNT 0x80270 +-#define PHYTIUM_PHY1_PLL0_LOCK_PLLCNT_START 0x80278 +-#define PHYTIUM_PHY1_PLL0_LOCK_PLLCNT_THR 0x8027c +- +-#define PHYTIUM_PHY0_PLL0_TX_PSC_A0 0x18400 +-#define PHYTIUM_PHY1_PLL0_TX_PSC_A0 0x90400 +- #define PLL0_TX_PSC_A0 0xfb +-#define PHYTIUM_PHY0_PLL0_TX_PSC_A2 0x18408 +-#define PHYTIUM_PHY1_PLL0_TX_PSC_A2 0x90408 +- #define PLL0_TX_PSC_A2 0x4aa +-#define PHYTIUM_PHY0_PLL0_TX_PSC_A3 0x1840c +-#define PHYTIUM_PHY1_PLL0_TX_PSC_A3 0x9040c +- #define PLL0_TX_PSC_A3 0x4aa +-#define PHYTIUM_PHY0_PLL0_RX_PSC_A0 0x28000 +-#define PHYTIUM_PHY1_PLL0_RX_PSC_A0 0xa0000 +- #define PLL0_RX_PSC_A0 0x0 +-#define PHYTIUM_PHY0_PLL0_RX_PSC_A2 0x28008 +-#define PHYTIUM_PHY1_PLL0_RX_PSC_A2 0xa0008 +- #define PLL0_RX_PSC_A2 0x0 +-#define PHYTIUM_PHY0_PLL0_RX_PSC_A3 0x2800C +-#define PHYTIUM_PHY1_PLL0_RX_PSC_A3 0xa000C +- #define PLL0_RX_PSC_A3 0x0 +-#define PHYTIUM_PHY0_PLL0_RX_PSC_CAL 0x28018 +-#define PHYTIUM_PHY1_PLL0_RX_PSC_CAL 0xa0018 +- #define PLL0_RX_PSC_CAL 0x0 +- +-#define PHYTIUM_PHY0_PLL1_TX_PSC_A0 0x1a400 +- #define PLL1_TX_PSC_A0 0xfb +-#define PHYTIUM_PHY0_PLL1_TX_PSC_A2 0x1a408 +- #define PLL1_TX_PSC_A2 0x4aa +-#define PHYTIUM_PHY0_PLL1_TX_PSC_A3 0x1a40c +- #define PLL1_TX_PSC_A3 0x4aa +-#define PHYTIUM_PHY0_PLL1_RX_PSC_A0 0x2a000 +- #define PLL1_RX_PSC_A0 0x0 +-#define PHYTIUM_PHY0_PLL1_RX_PSC_A2 0x2a008 +- #define PLL1_RX_PSC_A2 0x0 +-#define PHYTIUM_PHY0_PLL1_RX_PSC_A3 0x2a00C +- #define PLL1_RX_PSC_A3 0x0 +-#define PHYTIUM_PHY0_PLL1_RX_PSC_CAL 0x2a018 +- #define PLL1_RX_PSC_CAL 0x0 +- +-#define PHYTIUM_PHY0_PLL0_XCVR_CTRL 0x183a8 +-#define PHYTIUM_PHY1_PLL0_XCVR_CTRL 0x903a8 +- #define PLL0_XCVR_CTRL 0xf +-#define PHYTIUM_PHY0_PLL1_XCVR_CTRL 0x1a3a8 +- #define PLL1_XCVR_CTRL 0xf +- +-#define PHYTIUM_PHY0_PLL0_RX_GCSM1_CTRL 0x28420 +-#define PHYTIUM_PHY1_PLL0_RX_GCSM1_CTRL 0xa0420 +- #define PLL0_RX_GCSM1_CTRL 0x0 +-#define PHYTIUM_PHY0_PLL0_RX_GCSM2_CTRL 0x28440 +-#define PHYTIUM_PHY1_PLL0_RX_GCSM2_CTRL 0xa0440 +- #define PLL0_RX_GCSM2_CTRL 0x0 +-#define PHYTIUM_PHY0_PLL0_RX_PERGCSM_CTRL 0x28460 +-#define PHYTIUM_PHY1_PLL0_RX_PERGCSM_CTRL 0xa0460 +- #define PLL0_RX_PERGCSM_CTRL 0x0 +- +-#define PHYTIUM_PHY0_PLL1_RX_GCSM1_CTRL 0x2a420 +- #define PLL1_RX_GCSM1_CTRL 0x0 +-#define PHYTIUM_PHY0_PLL1_RX_GCSM2_CTRL 0x2a440 +- #define PLL1_RX_GCSM2_CTRL 0x0 +-#define PHYTIUM_PHY0_PLL1_RX_PERGCSM_CTRL 0x2a460 +- #define PLL1_RX_PERGCSM_CTRL 0x0 +- +-/* swing and emphasis */ +-#define PHYTIUM_PHY0_PLL0_TX_DIAG_ACYA 0x1879c +-#define PHYTIUM_PHY0_PLL1_TX_DIAG_ACYA 0x1a79c +-#define PHYTIUM_PHY1_PLL0_TX_DIAG_ACYA 0x9079c +- #define LOCK 1 +- #define UNLOCK 0 +- +-#define PHYTIUM_PHY0_PLL0_TX_TXCC_CTRL 0x18100 +-#define PHYTIUM_PHY0_PLL1_TX_TXCC_CTRL 0x1a100 +-#define PHYTIUM_PHY1_PLL0_TX_TXCC_CTRL 0x90100 +- #define TX_TXCC_CTRL 0x8a4 +- +-#define PHYTIUM_PHY0_PLL0_TX_DRV 0x18318 +-#define PHYTIUM_PHY0_PLL1_TX_DRV 0x1a318 +-#define PHYTIUM_PHY1_PLL0_TX_DRV 0x90318 +- #define TX_DRV 0x3 +- +-#define PHYTIUM_PHY0_PLL0_TX_MGNFS 0x18140 +-#define PHYTIUM_PHY0_PLL1_TX_MGNFS 0x1a140 +-#define PHYTIUM_PHY1_PLL0_TX_MGNFS 0x90140 +- +-#define PHYTIUM_PHY0_PLL0_TX_CPOST 0x18130 +-#define PHYTIUM_PHY0_PLL1_TX_CPOST 0x1a130 +-#define PHYTIUM_PHY0_PLL1_TX_CPOST1 0x1a13c +-#define PHYTIUM_PHY1_PLL0_TX_CPOST 0x90130 +- +-/******************************phy register end********************************************/ + #endif /* __PHYTIUM_REG_H__ */ +diff --git a/drivers/gpu/drm/phytium/x100_dc.c b/drivers/gpu/drm/phytium/x100_dc.c +new file mode 100644 +index 000000000000..06394c232dab +--- /dev/null ++++ b/drivers/gpu/drm/phytium/x100_dc.c +@@ -0,0 +1,321 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include "phytium_display_drv.h" ++#include "x100_reg.h" ++#include "phytium_crtc.h" ++#include "phytium_plane.h" ++#include "phytium_fb.h" ++#include "phytium_gem.h" ++ ++static const unsigned int x100_primary_formats[] = { ++ DRM_FORMAT_ARGB2101010, ++ DRM_FORMAT_ABGR2101010, ++ DRM_FORMAT_RGBA1010102, ++ DRM_FORMAT_BGRA1010102, ++ DRM_FORMAT_ARGB8888, ++ DRM_FORMAT_ABGR8888, ++ DRM_FORMAT_RGBA8888, ++ DRM_FORMAT_BGRA8888, ++ DRM_FORMAT_XRGB8888, ++ DRM_FORMAT_XBGR8888, ++ DRM_FORMAT_RGBX8888, ++ DRM_FORMAT_BGRX8888, ++ DRM_FORMAT_ARGB4444, ++ DRM_FORMAT_ABGR4444, ++ DRM_FORMAT_RGBA4444, ++ DRM_FORMAT_BGRA4444, ++ DRM_FORMAT_XRGB4444, ++ DRM_FORMAT_XBGR4444, ++ DRM_FORMAT_RGBX4444, ++ DRM_FORMAT_BGRX4444, ++ DRM_FORMAT_ARGB1555, ++ DRM_FORMAT_ABGR1555, ++ DRM_FORMAT_RGBA5551, ++ DRM_FORMAT_BGRA5551, ++ DRM_FORMAT_XRGB1555, ++ DRM_FORMAT_XBGR1555, ++ DRM_FORMAT_RGBX5551, ++ DRM_FORMAT_BGRX5551, ++ DRM_FORMAT_RGB565, ++ DRM_FORMAT_BGR565, ++ DRM_FORMAT_YUYV, ++ DRM_FORMAT_UYVY, ++}; ++ ++static uint64_t x100_primary_formats_modifiers[] = { ++ DRM_FORMAT_MOD_LINEAR, ++ DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC, ++ DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC, ++ DRM_FORMAT_MOD_INVALID ++}; ++ ++static const unsigned int x100_cursor_formats[] = { ++ DRM_FORMAT_ARGB8888, ++}; ++ ++void x100_dc_hw_vram_init(struct phytium_display_private *priv, resource_size_t vram_addr, ++ resource_size_t vram_size) ++{ ++ uint32_t config; ++ uint32_t group_offset = priv->address_transform_base; ++ ++ config = phytium_readl_reg(priv, group_offset, ++ X100_GPU_ADDRESS_TRANSFORM_SRC_ADDR); ++ if (config) ++ phytium_writel_reg(priv, config, group_offset, ++ X100_GPU_ADDRESS_TRANSFORM_SRC_ADDR); ++ ++ config = phytium_readl_reg(priv, group_offset, ++ X100_GPU_ADDRESS_TRANSFORM_SIZE); ++ if (config) ++ phytium_writel_reg(priv, config, group_offset, ++ X100_GPU_ADDRESS_TRANSFORM_SIZE); ++ ++ config = phytium_readl_reg(priv, group_offset, ++ X100_GPU_ADDRESS_TRANSFORM_DST_ADDR); ++ if (config) ++ phytium_writel_reg(priv, config, group_offset, ++ X100_GPU_ADDRESS_TRANSFORM_DST_ADDR); ++ ++ phytium_writel_reg(priv, (vram_addr & SRC_ADDR_MASK) >> SRC_ADDR_OFFSET, ++ group_offset, X100_DC_ADDRESS_TRANSFORM_SRC_ADDR); ++ phytium_writel_reg(priv, (vram_size >> SIZE_OFFSET) | ADDRESS_TRANSFORM_ENABLE, ++ group_offset, X100_DC_ADDRESS_TRANSFORM_SIZE); ++ config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_DST_ADDR); ++ phytium_writel_reg(priv, config, group_offset, X100_DC_ADDRESS_TRANSFORM_DST_ADDR); ++} ++ ++void x100_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe) ++{ ++ phytium_writel_reg(priv, MSI_CLEAR, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_MSI_CLEAR); ++} ++ ++void x100_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); ++ int phys_pipe = phytium_crtc->phys_pipe; ++ uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; ++ int ret = 0; ++ ++ /* config pix clock */ ++ phytium_writel_reg(priv, FLAG_REQUEST | CMD_PIXEL_CLOCK | (clock & PIXEL_CLOCK_MASK), ++ group_offset, X100_DCREQ_CMD_REGISTER); ++ ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, ++ FLAG_REQUEST, FLAG_REPLY); ++ if (ret < 0) ++ DRM_ERROR("%s: failed to set pixel clock\n", __func__); ++} ++ ++void x100_dc_hw_disable(struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); ++ int reset_timeout = 100; ++ int config = 0; ++ int phys_pipe = phytium_crtc->phys_pipe; ++ ++ // reset dc ++ config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); ++ phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], ++ X100_DC_CLOCK_CONTROL); ++ phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); ++ do { ++ config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_IDLE); ++ if (config | IS_IDLE) ++ break; ++ mdelay(1); ++ reset_timeout--; ++ } while (reset_timeout); ++ ++ /* reset pix clock */ ++ x100_dc_hw_config_pix_clock(crtc, 0); ++ ++ // reset dc ++ reset_timeout = 100; ++ config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); ++ phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], ++ X100_DC_CLOCK_CONTROL); ++ phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); ++ do { ++ config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_IDLE); ++ if (config | IS_IDLE) ++ break; ++ mdelay(1); ++ reset_timeout--; ++ } while (reset_timeout); ++ ++ /* reset dcreq */ ++ phytium_writel_reg(priv, DCREQ_PLAN_A, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_PLAN); ++ phytium_writel_reg(priv, 0, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_CONTROL); ++ phytium_writel_reg(priv, DCREQ_RESET, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_RESET); ++ msleep(20); ++ phytium_writel_reg(priv, (~DCREQ_RESET)&DCREQ_RESET_MASK, ++ priv->dcreq_reg_base[phys_pipe], X100_DCREQ_RESET); ++} ++ ++int x100_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count) ++{ ++ int ret = 0; ++ ++ switch (mode_cmd->modifier[count]) { ++ case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC: ++ switch (mode_cmd->pixel_format) { ++ case DRM_FORMAT_ARGB4444: ++ case DRM_FORMAT_ABGR4444: ++ case DRM_FORMAT_RGBA4444: ++ case DRM_FORMAT_BGRA4444: ++ case DRM_FORMAT_XRGB4444: ++ case DRM_FORMAT_XBGR4444: ++ case DRM_FORMAT_RGBX4444: ++ case DRM_FORMAT_BGRX4444: ++ case DRM_FORMAT_ARGB1555: ++ case DRM_FORMAT_ABGR1555: ++ case DRM_FORMAT_RGBA5551: ++ case DRM_FORMAT_BGRA5551: ++ case DRM_FORMAT_XRGB1555: ++ case DRM_FORMAT_XBGR1555: ++ case DRM_FORMAT_RGBX5551: ++ case DRM_FORMAT_BGRX5551: ++ case DRM_FORMAT_RGB565: ++ case DRM_FORMAT_BGR565: ++ case DRM_FORMAT_YUYV: ++ case DRM_FORMAT_UYVY: ++ break; ++ default: ++ DRM_ERROR("TILE_MODE0_FBCDC not support DRM_FORMAT %d", ++ mode_cmd->pixel_format); ++ ret = -EINVAL; ++ goto error; ++ } ++ break; ++ case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC: ++ switch (mode_cmd->pixel_format) { ++ case DRM_FORMAT_ARGB2101010: ++ case DRM_FORMAT_ABGR2101010: ++ case DRM_FORMAT_RGBA1010102: ++ case DRM_FORMAT_BGRA1010102: ++ case DRM_FORMAT_ARGB8888: ++ case DRM_FORMAT_ABGR8888: ++ case DRM_FORMAT_RGBA8888: ++ case DRM_FORMAT_BGRA8888: ++ case DRM_FORMAT_XRGB8888: ++ case DRM_FORMAT_XBGR8888: ++ case DRM_FORMAT_RGBX8888: ++ case DRM_FORMAT_BGRX8888: ++ break; ++ default: ++ DRM_ERROR("TILE_MODE3_FBCDC not support DRM_FORMAT %d", ++ mode_cmd->pixel_format); ++ ret = -EINVAL; ++ goto error; ++ } ++ break; ++ case DRM_FORMAT_MOD_LINEAR: ++ break; ++ default: ++ DRM_ERROR("unsupported fb modifier 0x%llx\n", mode_cmd->modifier[0]); ++ ret = -EINVAL; ++ goto error; ++ } ++ ++ return 0; ++error: ++ return ret; ++} ++ ++void x100_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, ++ const uint32_t **formats, ++ uint32_t *format_count) ++{ ++ *format_modifiers = x100_primary_formats_modifiers; ++ *formats = x100_primary_formats; ++ *format_count = ARRAY_SIZE(x100_primary_formats); ++} ++ ++void x100_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, ++ const uint32_t **formats, ++ uint32_t *format_count) ++{ ++ *format_modifiers = NULL; ++ *formats = x100_cursor_formats; ++ *format_count = ARRAY_SIZE(x100_cursor_formats); ++} ++ ++void x100_dc_hw_update_dcreq(struct drm_plane *plane) ++{ ++ struct drm_device *dev = plane->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_plane *phytium_plane = to_phytium_plane(plane); ++ int phys_pipe = phytium_plane->phys_pipe; ++ uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; ++ int config; ++ ++ if (phytium_plane->tiling[0] == FRAMEBUFFER_LINEAR) { ++ phytium_writel_reg(priv, DCREQ_MODE_LINEAR, ++ group_offset, X100_DCREQ_PLANE0_CONFIG); ++ } else { ++ config = DCREQ_NO_LOSSY; ++ if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE0) ++ config |= DCREQ_TILE_TYPE_MODE0; ++ else if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE3) ++ config |= DCREQ_TILE_TYPE_MODE3; ++ else ++ config |= DCREQ_TILE_TYPE_MODE0; ++ ++ switch (phytium_plane->format) { ++ case FRAMEBUFFER_FORMAT_ARGB8888: ++ case FRAMEBUFFER_FORMAT_XRGB8888: ++ config |= DCREQ_COLOURFORMAT_BGRA8888; ++ break; ++ case FRAMEBUFFER_FORMAT_ARGB2101010: ++ config |= DCREQ_COLOURFORMAT_ARGB2101010; ++ break; ++ case FRAMEBUFFER_FORMAT_XRGB4444: ++ case FRAMEBUFFER_FORMAT_ARGB4444: ++ config |= DCREQ_COLOURFORMAT_ARGB4444; ++ break; ++ case FRAMEBUFFER_FORMAT_XRGB1555: ++ case FRAMEBUFFER_FORMAT_ARGB1555: ++ config |= DCREQ_COLOURFORMAT_ARGB1555; ++ break; ++ case FRAMEBUFFER_FORMAT_RGB565: ++ config |= DCREQ_COLOURFORMAT_RGB565; ++ break; ++ case FRAMEBUFFER_FORMAT_YUYV: ++ config |= DCREQ_COLOURFORMAT_YUYV; ++ break; ++ case FRAMEBUFFER_FORMAT_UYVY: ++ config |= DCREQ_COLOURFORMAT_UYVY; ++ break; ++ } ++ config |= DCREQ_ARGBSWIZZLE_ARGB; ++ config |= DCREQ_MODE_TILE; ++ phytium_writel_reg(priv, phytium_plane->iova[0] & 0xffffffff, ++ group_offset, X100_DCREQ_PLANE0_ADDR_START); ++ phytium_writel_reg(priv, (phytium_plane->iova[0] + phytium_plane->size[0]) & ++ 0xffffffff, group_offset, X100_DCREQ_PLANE0_ADDR_END); ++ phytium_writel_reg(priv, config, group_offset, X100_DCREQ_PLANE0_CONFIG); ++ } ++} ++ ++void x100_dc_hw_update_primary_hi_addr(struct drm_plane *plane) ++{ ++ struct drm_device *dev = plane->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_plane *phytium_plane = to_phytium_plane(plane); ++ int phys_pipe = phytium_plane->phys_pipe; ++ ++ phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, ++ priv->dcreq_reg_base[phys_pipe], X100_DCREQ_PIX_DMA_PREFIX); ++} +diff --git a/drivers/gpu/drm/phytium/x100_dc.h b/drivers/gpu/drm/phytium/x100_dc.h +new file mode 100644 +index 000000000000..ae98b4ffe0cf +--- /dev/null ++++ b/drivers/gpu/drm/phytium/x100_dc.h +@@ -0,0 +1,30 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef __X100_DC_H__ ++#define __X100_DC_H__ ++ ++#define X100_DC_PIX_CLOCK_MAX (594000) ++#define x100_DC_HDISPLAY_MAX 3840 ++#define X100_DC_VDISPLAY_MAX 2160 ++#define X100_DC_ADDRESS_MASK 0x3f ++ ++extern void x100_dc_hw_vram_init(struct phytium_display_private *priv, ++ resource_size_t vram_addr, ++ resource_size_t vram_size); ++extern void x100_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe); ++extern void x100_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock); ++extern void x100_dc_hw_disable(struct drm_crtc *crtc); ++extern int x100_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count); ++extern void x100_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, ++ const uint32_t **formats, ++ uint32_t *format_count); ++extern void x100_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, ++ const uint32_t **formats, ++ uint32_t *format_count); ++void x100_dc_hw_update_dcreq(struct drm_plane *plane); ++void x100_dc_hw_update_primary_hi_addr(struct drm_plane *plane); ++#endif /* __X100_DC_H__ */ +diff --git a/drivers/gpu/drm/phytium/x100_dp.c b/drivers/gpu/drm/phytium/x100_dp.c +index 60bd3316f6f9..4cc390442461 100644 +--- a/drivers/gpu/drm/phytium/x100_dp.c ++++ b/drivers/gpu/drm/phytium/x100_dp.c +@@ -1,20 +1,13 @@ + // SPDX-License-Identifier: GPL-2.0 +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #include "phytium_display_drv.h" +-#include "phytium_reg.h" ++#include "x100_reg.h" + #include "phytium_dp.h" ++#include "x100_dp.h" + + /* [reg][ling_rate 1.62->8.1] */ + static int vco_val[12][4] = { +@@ -53,8 +46,8 @@ static int mgnfs_val[4][4][4] = // [link_rate][swing][emphasis] + /* 5.4Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, +- {0x0013, 0x006, 0x0000, 0x0000}, +- {0x0006, 0x0000, 0x0000, 0x0000}, ++ {0x0018, 0x006, 0x0000, 0x0000}, ++ {0x000c, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + +@@ -72,7 +65,7 @@ static int cpost_val[4][4][4] = // [link_rate][swing][emphasis] + /* 1.62Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, +- {0x0000, 0x0014, 0x001f, 0x0000}, ++ {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +@@ -80,7 +73,7 @@ static int cpost_val[4][4][4] = // [link_rate][swing][emphasis] + /* 2.7Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, +- {0x0000, 0x0014, 0x001f, 0x0000}, ++ {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +@@ -102,45 +95,13 @@ static int cpost_val[4][4][4] = // [link_rate][swing][emphasis] + }, + }; + +-static void x100_phy_writel(struct phytium_dp_device *phytium_dp, u32 address, u32 data) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- +-#if DEBUG_LOG +- pr_info("phy address write: 0x%x data:0x%x\n", address, data); +-#endif +- phytium_writel_reg(priv, address, PHYTIUM_PHY_ACCESS_ADDRESS(port)); +- phytium_writel_reg(priv, data, PHYTIUM_PHY_WRITE_DATA(port)); +- phytium_writel_reg(priv, ACCESS_WRITE, PHYTIUM_PHY_ACCESS_CTRL(port)); +- udelay(10); +-} +- +-static u32 x100_phy_readl(struct phytium_dp_device *phytium_dp, u32 address) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- u32 data; +- +- phytium_writel_reg(priv, address, PHYTIUM_PHY_ACCESS_ADDRESS(port)); +- phytium_writel_reg(priv, ACCESS_READ, PHYTIUM_PHY_ACCESS_CTRL(port)); +- udelay(10); +- data = phytium_readl_reg(priv, PHYTIUM_PHY_READ_DATA(port)); +-#if DEBUG_LOG +- pr_info("phy address read: 0x%x data:0x%x\n", address, data); +-#endif +- +- return data; +-} +- +-static void x100_dp_phy_set_lane_and_rate(struct phytium_dp_device *phytium_dp, ++static int x100_dp_hw_set_phy_lane_and_rate(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, + uint32_t link_rate) + { + int port = phytium_dp->port%3; + int i = 0, data, tmp, tmp1, index = 0, mask; ++ int timeout = 500, ret = 0; + + if (port == 0 || port == 1) { + /* set pma powerdown */ +@@ -151,13 +112,13 @@ static void x100_dp_phy_set_lane_and_rate(struct phytium_dp_device *phytium_dp, + mask |= (((1<port%3; + int voltage_swing = 0; +@@ -563,47 +553,48 @@ static void x100_dp_phy_set_lane_setting(struct phytium_dp_device *phytium_dp, + } + + if (port == 0) { +- x100_phy_writel(phytium_dp, PHYTIUM_PHY0_PLL0_TX_DIAG_ACYA, LOCK); +- x100_phy_writel(phytium_dp, PHYTIUM_PHY0_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); +- x100_phy_writel(phytium_dp, PHYTIUM_PHY0_PLL0_TX_DRV, TX_DRV); +- x100_phy_writel(phytium_dp, PHYTIUM_PHY0_PLL0_TX_MGNFS, ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_DIAG_ACYA, LOCK); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_DRV, TX_DRV); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); +- x100_phy_writel(phytium_dp, PHYTIUM_PHY0_PLL0_TX_CPOST, ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); +- x100_phy_writel(phytium_dp, PHYTIUM_PHY0_PLL0_TX_DIAG_ACYA, UNLOCK); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_DIAG_ACYA, UNLOCK); + + } else if (port == 1) { +- x100_phy_writel(phytium_dp, PHYTIUM_PHY0_PLL1_TX_DIAG_ACYA, LOCK); +- x100_phy_writel(phytium_dp, PHYTIUM_PHY0_PLL1_TX_TXCC_CTRL, TX_TXCC_CTRL); +- x100_phy_writel(phytium_dp, PHYTIUM_PHY0_PLL1_TX_DRV, TX_DRV); +- x100_phy_writel(phytium_dp, PHYTIUM_PHY0_PLL1_TX_MGNFS, ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_DIAG_ACYA, LOCK); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_TXCC_CTRL, TX_TXCC_CTRL); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_DRV, TX_DRV); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); +- x100_phy_writel(phytium_dp, PHYTIUM_PHY0_PLL1_TX_CPOST, ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); +- x100_phy_writel(phytium_dp, PHYTIUM_PHY0_PLL1_TX_CPOST1, ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_CPOST1, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); +- x100_phy_writel(phytium_dp, PHYTIUM_PHY0_PLL1_TX_DIAG_ACYA, UNLOCK); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_DIAG_ACYA, UNLOCK); + } else { +- x100_phy_writel(phytium_dp, PHYTIUM_PHY1_PLL0_TX_DIAG_ACYA, LOCK); +- x100_phy_writel(phytium_dp, PHYTIUM_PHY1_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); +- x100_phy_writel(phytium_dp, PHYTIUM_PHY1_PLL0_TX_DRV, TX_DRV); +- x100_phy_writel(phytium_dp, PHYTIUM_PHY1_PLL0_TX_MGNFS, ++ phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_DIAG_ACYA, LOCK); ++ phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); ++ phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_DRV, TX_DRV); ++ phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); +- x100_phy_writel(phytium_dp, PHYTIUM_PHY1_PLL0_TX_CPOST, ++ phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); +- x100_phy_writel(phytium_dp, PHYTIUM_PHY1_PLL0_TX_DIAG_ACYA, UNLOCK); ++ phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_DIAG_ACYA, UNLOCK); + } + } + +-static void x100_dp_phy_init(struct phytium_dp_device *phytium_dp) ++static int x100_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) + { + int port = phytium_dp->port; + int i = 0, data, tmp, mask; ++ int timeout = 500, ret = 0; + + if (port == 0 || port == 1) { +- x100_phy_writel(phytium_dp, PHYTIUM_PHY0_APB_RESET, APB_RESET); ++ phytium_phy_writel(phytium_dp, X100_PHY0_APB_RESET, APB_RESET); + +- x100_phy_writel(phytium_dp, PHYTIUM_PHY0_PIPE_RESET, RESET); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PIPE_RESET, RESET); + + /* config lane to dp mode */ + data = 0; +@@ -614,9 +605,9 @@ static void x100_dp_phy_init(struct phytium_dp_device *phytium_dp) + } + mask = (mask << (port*LANE_BIT_SHIFT*4)); + data = (data << (port*LANE_BIT_SHIFT*4)); +- tmp = x100_phy_readl(phytium_dp, PHYTIUM_PHY0_MODE); ++ tmp = phytium_phy_readl(phytium_dp, X100_PHY0_MODE); + tmp = (tmp & (~mask)) | data; +- x100_phy_writel(phytium_dp, PHYTIUM_PHY0_MODE, tmp); ++ phytium_phy_writel(phytium_dp, X100_PHY0_MODE, tmp); + + /* config lane master or slave */ + data = 0; +@@ -627,9 +618,9 @@ static void x100_dp_phy_init(struct phytium_dp_device *phytium_dp) + } + mask = (mask << (port*LANE_MASTER_SHIFT*4)); + data = (data << (port*LANE_MASTER_SHIFT*4)); +- tmp = x100_phy_readl(phytium_dp, PHYTIUM_PHY0_LINK_CFG); ++ tmp = phytium_phy_readl(phytium_dp, X100_PHY0_LINK_CFG); + tmp = (tmp & (~mask)) | data; +- x100_phy_writel(phytium_dp, PHYTIUM_PHY0_LINK_CFG, tmp); ++ phytium_phy_writel(phytium_dp, X100_PHY0_LINK_CFG, tmp); + + /* pll clock enable */ + data = 0; +@@ -640,9 +631,9 @@ static void x100_dp_phy_init(struct phytium_dp_device *phytium_dp) + } + mask = (mask << (port*PLL_EN_SHIFT*4)); + data = (data << (port*PLL_EN_SHIFT*4)); +- tmp = x100_phy_readl(phytium_dp, PHYTUIM_PHY0_PLL_EN); ++ tmp = phytium_phy_readl(phytium_dp, X100_PHY0_PLL_EN); + tmp = (tmp & (~mask)) | data; +- x100_phy_writel(phytium_dp, PHYTUIM_PHY0_PLL_EN, tmp); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL_EN, tmp); + + /* config input 20 bit */ + data = 0; +@@ -653,9 +644,9 @@ static void x100_dp_phy_init(struct phytium_dp_device *phytium_dp) + } + mask = (mask << (port*BIT_20_SHIFT*4)); + data = (data << (port*BIT_20_SHIFT*4)); +- tmp = x100_phy_readl(phytium_dp, PHYTUIM_PHY0_PMA_WIDTH); ++ tmp = phytium_phy_readl(phytium_dp, X100_PHY0_PMA_WIDTH); + tmp = (tmp & (~mask)) | data; +- x100_phy_writel(phytium_dp, PHYTUIM_PHY0_PMA_WIDTH, tmp); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PMA_WIDTH, tmp); + + /* config lane active power state */ + data = 0; +@@ -665,13 +656,13 @@ static void x100_dp_phy_init(struct phytium_dp_device *phytium_dp) + mask |= (((1<dev; +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; +- int timeout = 100, config = 0; ++ uint32_t group_offset = priv->dcreq_reg_base[port]; ++ int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_ENABLE, +- PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(port)); +- +- do { +- mdelay(10); +- timeout--; +- config = phytium_readl_reg(priv, PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(port)); +- } while ((!(config & FLAG_REPLY)) && timeout); +- if (timeout == 0) +- DRM_ERROR("%s failed\n", __func__); +- +- phytium_writel_reg(priv, 0, +- PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(port)); +- mdelay(20); ++ group_offset, X100_DCREQ_CMD_REGISTER); ++ ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, ++ FLAG_REQUEST, FLAG_REPLY); ++ if (ret < 0) ++ DRM_ERROR("%s: failed to poweron panel\n", __func__); + } + + static void x100_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) + { + struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; +- int timeout = 100, config = 0; ++ uint32_t group_offset = priv->dcreq_reg_base[port]; ++ int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_DISABLE, +- PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(port)); +- +- do { +- mdelay(10); +- timeout--; +- config = phytium_readl_reg(priv, PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(port)); +- } while ((!(config & FLAG_REPLY)) && timeout); +- if (timeout == 0) +- DRM_ERROR("%s failed\n", __func__); +- +- phytium_writel_reg(priv, 0, +- PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(port)); +- mdelay(20); ++ group_offset, X100_DCREQ_CMD_REGISTER); ++ ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, ++ FLAG_REQUEST, FLAG_REPLY); ++ if (ret < 0) ++ DRM_ERROR("%s: failed to poweroff panel\n", __func__); + } + + static void x100_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) + { + struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- int timeout = 100, config = 0; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port, ret = 0; ++ uint32_t group_offset = priv->dcreq_reg_base[port]; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_ENABLE, +- PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(port)); +- +- do { +- mdelay(10); +- timeout--; +- config = phytium_readl_reg(priv, PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(port)); +- } while ((!(config & FLAG_REPLY)) && timeout); +- if (timeout == 0) +- DRM_ERROR("%s failed\n", __func__); +- +- phytium_writel_reg(priv, 0, +- PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(port)); +- mdelay(20); ++ group_offset, X100_DCREQ_CMD_REGISTER); ++ ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, ++ FLAG_REQUEST, FLAG_REPLY); ++ if (ret < 0) ++ DRM_ERROR("%s: failed to enable backlight\n", __func__); + } + + static void x100_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) + { + struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; +- int timeout = 100, config = 0; ++ uint32_t group_offset = priv->dcreq_reg_base[port]; ++ int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_DISABLE, +- PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(port)); +- +- do { +- mdelay(10); +- timeout--; +- config = phytium_readl_reg(priv, PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(port)); +- } while ((!(config & FLAG_REPLY)) && timeout); +- if (timeout == 0) +- DRM_ERROR("%s failed\n", __func__); +- +- phytium_writel_reg(priv, 0, +- PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(port)); +- mdelay(20); ++ group_offset, X100_DCREQ_CMD_REGISTER); ++ ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, ++ FLAG_REQUEST, FLAG_REPLY); ++ if (ret < 0) ++ DRM_ERROR("%s: failed to disable backlight\n", __func__); + } + + static uint32_t x100_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) + { +- return phytium_dp->panel.level; ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int config; ++ uint32_t group_offset = priv->address_transform_base; ++ ++ config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE); ++ return ((config >> BACKLIGHT_VALUE_SHIFT) & BACKLIGHT_VALUE_MASK); + } + + static int x100_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32_t level) + { + struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; +- int timeout = 100, config = 0; ++ uint32_t group_offset = priv->dcreq_reg_base[port]; ++ int config = 0; + int ret = 0; + +- if (level > BACKLIGHT_MAX) { ++ if (level > X100_DP_BACKLIGHT_MAX) { + ret = -EINVAL; + goto out; + } + + config = FLAG_REQUEST | CMD_BACKLIGHT | ((level & BACKLIGHT_MASK) << BACKLIGHT_SHIFT); +- phytium_writel_reg(priv, config, PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(port)); +- +- do { +- mdelay(10); +- timeout--; +- config = phytium_readl_reg(priv, PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(port)); +- } while ((!(config & FLAG_REPLY)) && timeout); +- if (timeout == 0) { +- DRM_ERROR("%s failed\n", __func__); +- ret = -EIO; +- } +- +- phytium_writel_reg(priv, 0, +- PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(port)); +- mdelay(20); ++ phytium_writel_reg(priv, config, group_offset, X100_DCREQ_CMD_REGISTER); ++ ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, ++ FLAG_REQUEST, FLAG_REPLY); ++ if (ret < 0) ++ DRM_ERROR("%s: failed to set backlight\n", __func__); + + out: + return ret; + } + +-static void x100_dp_hw_set_test_pattern(struct phytium_dp_device *phytium_dp, +- uint8_t lane_count, +- uint8_t test_pattern, +- uint8_t *custom_pattern, +- uint32_t custom_pattern_size) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port, val = 0, tmp = 0, i; +- +- if ((test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) +- && custom_pattern && (custom_pattern_size > 0)) { +- val = *(int *)custom_pattern; +- phytium_writel_reg(priv, val, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0(port)); +- val = *(int *)(custom_pattern + 4); +- phytium_writel_reg(priv, val, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1(port)); +- val = *(short int *)(custom_pattern + 8); +- phytium_writel_reg(priv, val, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2(port)); +- } +- +- if (test_pattern == PHYTIUM_PHY_TP_D10_2 || test_pattern == PHYTIUM_PHY_TP_PRBS7 +- || test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) +- phytium_writel_reg(priv, SCRAMBLING_DISABLE, PHYTIUM_DP_SCRAMBLING_DISABLE(port)); +- else +- phytium_writel_reg(priv, SCRAMBLING_ENABLE, PHYTIUM_DP_SCRAMBLING_DISABLE(port)); +- +- tmp = test_pattern - PHYTIUM_PHY_TP_NONE + TEST_PATTERN_NONE; +- val = 0; +- for (i = 0; i < lane_count; i++) +- val |= (tmp << (TEST_PATTERN_LANE_SHIFT * i)); +- phytium_writel_reg(priv, val, PHYTIUM_DP_LINK_QUAL_PATTERN_SET(port)); +-} +- +-static void x100_dp_hw_set_train_pattern(struct phytium_dp_device *phytium_dp, +- uint8_t train_pattern) ++bool x100_dp_hw_spread_is_enable(struct phytium_dp_device *phytium_dp) + { + struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port, tmp = 0; +- +- /* Scrambling is disabled for TPS1/TPS2/3 and enabled for TPS4 */ +- if (train_pattern == DP_TRAINING_PATTERN_4 +- || train_pattern == DP_TRAINING_PATTERN_DISABLE) { +- phytium_writel_reg(priv, SCRAMBLING_ENABLE, PHYTIUM_DP_SCRAMBLING_DISABLE(port)); +- phytium_writel_reg(priv, SCRAMBLER_RESET, PHYTIUM_DP_FORCE_SCRAMBLER_RESET(port)); +- } else { +- phytium_writel_reg(priv, SCRAMBLING_DISABLE, PHYTIUM_DP_SCRAMBLING_DISABLE(port)); +- } +- switch (train_pattern) { +- case DP_TRAINING_PATTERN_DISABLE: +- tmp = TRAINING_OFF; +- break; +- case DP_TRAINING_PATTERN_1: +- tmp = TRAINING_PATTERN_1; +- break; +- case DP_TRAINING_PATTERN_2: +- tmp = TRAINING_PATTERN_2; +- break; +- case DP_TRAINING_PATTERN_3: +- tmp = TRAINING_PATTERN_3; +- break; +- case DP_TRAINING_PATTERN_4: +- tmp = TRAINING_PATTERN_4; +- break; +- default: +- tmp = TRAINING_OFF; +- break; +- } +- +- phytium_writel_reg(priv, tmp, PHYTIUM_DP_TRAINING_PATTERN_SET(port)); +-} +- +-static void x100_dp_hw_set_link(struct phytium_dp_device *phytium_dp, +- uint8_t link_lane_count, uint32_t link_rate) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- +- phytium_writel_reg(priv, link_lane_count, +- PHYTIUM_DP_LANE_COUNT_SET(port)); +- phytium_writel_reg(priv, +- drm_dp_link_rate_to_bw_code(link_rate), +- PHYTIUM_DP_LINK_BW_SET(port)); +- +- if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) +- phytium_writel_reg(priv, ENHANCED_FRAME_ENABLE, +- PHYTIUM_DP_ENHANCED_FRAME_EN(port)); +- else +- phytium_writel_reg(priv, ENHANCED_FRAME_DISABLE, +- PHYTIUM_DP_ENHANCED_FRAME_EN(port)); +- x100_dp_phy_set_lane_and_rate(phytium_dp, link_lane_count, link_rate); +-} +- +-void x100_dp_hw_set_lane_setting(struct phytium_dp_device *phytium_dp, +- uint32_t link_rate, uint8_t train_set) +-{ +- x100_dp_phy_set_lane_setting(phytium_dp, link_rate, train_set); +-} +- +-static int X100_rate[] = {162000, 270000, 540000, 810000}; +- +-static void x100_dp_set_source_rate_and_lane_count(struct phytium_dp_device *phytium_dp) +-{ +- phytium_dp->source_rates = X100_rate; +- phytium_dp->num_source_rates = num_source_rates; +- +- if (phytium_dp->port == 0) +- phytium_dp->source_max_lane_count = source_max_lane_count; +- else if (phytium_dp->port == 1) +- phytium_dp->source_max_lane_count = source_max_lane_count; +- else if (phytium_dp->port == 2) +- phytium_dp->source_max_lane_count = 1; +- else +- phytium_dp->source_max_lane_count = 1; +-} +- +-void x100_dp_hw_get_hpd_state(struct phytium_dp_device *phytium_dp) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- uint32_t val = 0, raw_state = 0; +- +- val = phytium_readl_reg(priv, PHYTIUM_DP_INTERRUPT_RAW_STATUS(port)); +- +- /* maybe miss hpd, so used for clear PHYTIUM_DP_INTERRUPT_RAW_STATUS */ +- phytium_readl_reg(priv, PHYTIUM_DP_INTERRUPT_STATUS(port)); +- raw_state = phytium_readl_reg(priv, PHYTIUM_DP_SINK_HPD_STATE(port)); +- if (val & HPD_EVENT) +- phytium_dp->dp_hpd_state.hpd_event_state = true; +- +- if (val & HPD_IRQ) +- phytium_dp->dp_hpd_state.hpd_irq_state = true; +- +- if (raw_state & HPD_CONNECT) +- phytium_dp->dp_hpd_state.hpd_raw_state = true; +- else +- phytium_dp->dp_hpd_state.hpd_raw_state = false; +-} +- +-void x100_dp_hw_hpd_irq_setup(struct phytium_dp_device *phytium_dp, bool enable) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- +- phytium_dp->dp_hpd_state.hpd_irq_enable = enable; +- if (enable) +- phytium_writel_reg(priv, HPD_OTHER_MASK, PHYTIUM_DP_INTERRUPT_MASK(port)); +- else +- phytium_writel_reg(priv, HPD_IRQ_MASK|HPD_EVENT_MASK|HPD_OTHER_MASK, +- PHYTIUM_DP_INTERRUPT_MASK(port)); +-} +- +-void x100_dp_hw_disable_audio(struct phytium_dp_device *phytium_dp) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- +- phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, +- PHYTIUM_DP_SECONDARY_STREAM_ENABLE(port)); +-} +- +-void x100_dp_hw_enable_audio(struct phytium_dp_device *phytium_dp) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port, config; ++ uint32_t group_offset = priv->address_transform_base; + +- phytium_writel_reg(priv, SECONDARY_STREAM_ENABLE, +- PHYTIUM_DP_SECONDARY_STREAM_ENABLE(port)); ++ config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + ++ return ((config & DP_SPREAD_ENABLE(port)) ? true:false); + } + +-static void x100_dp_hw_audio_shutdown(struct phytium_dp_device *phytium_dp) ++int x100_dp_hw_reset(struct phytium_dp_device *phytium_dp) + { + struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; ++ struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; ++ int timeout = 100, config, ret = 0; ++ uint32_t group_offset = priv->address_transform_base; + +- phytium_writel_reg(priv, SEC_AUDIO_DISABLE, PHYTIUM_DP_SEC_AUDIO_ENABLE(port)); +-} +- +-static int x100_dp_hw_audio_digital_mute(struct phytium_dp_device *phytium_dp) +-{ +- struct phytium_display_drm_private *priv = phytium_dp->dev->dev_private; +- int port = phytium_dp->port; +- int ret = 0; +- +- if (phytium_readl_reg(priv, PHYTIUM_DP_SECONDARY_STREAM_ENABLE(port)) == 0) { +- ret = -ENODEV; +- goto out; +- } +- phytium_writel_reg(priv, CHANNEL_MUTE, PHYTIUM_DP_SEC_CHANNEL_COUNT(port)); ++ config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); ++ config &= (~DC_DP_RESET_STATUS(port)); + +-out: +- return ret; +-} +- +-int x100_dp_hw_audio_hw_params(struct phytium_dp_device *phytium_dp, struct audio_info audio_info) +-{ +- struct phytium_display_drm_private *priv = phytium_dp->dev->dev_private; +- int port = phytium_dp->port; +- int ret = 0, data_window = 0; +- const struct dp_audio_n_m *n_m = NULL; +- uint32_t fs, ws, fs_accurac; +- +- DRM_DEBUG_KMS("%s:set port%d sample_rate(%d) channels(%d) sample_width(%d)\n", +- __func__, phytium_dp->port, audio_info.sample_rate, +- audio_info.channels, audio_info.sample_width); +- +- phytium_writel_reg(priv, INPUT_SELECT_I2S, PHYTIUM_DP_SEC_INPUT_SELECT(port)); +- phytium_writel_reg(priv, APB_CLOCK/audio_info.sample_rate, +- PHYTIUM_DP_SEC_DIRECT_CLKDIV(port)); +- phytium_writel_reg(priv, audio_info.channels & CHANNEL_MASK, +- PHYTIUM_DP_SEC_CHANNEL_COUNT(port)); +- phytium_writel_reg(priv, CHANNEL_MAP_DEFAULT, PHYTIUM_DP_SEC_CHANNEL_MAP(port)); +- data_window = 90*(phytium_dp->link_rate)/100 +- *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) +- /phytium_dp->mode.clock/4; +- phytium_writel_reg(priv, data_window, PHYTIUM_DP_SEC_DATA_WINDOW(port)); +- phytium_writel_reg(priv, 0xb5, PHYTIUM_DP_SEC_CS_CATEGORY_CODE(port)); +- n_m = phytium_dp_audio_get_n_m(phytium_dp->link_rate, audio_info.sample_rate); +- if (n_m == NULL) { +- DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", +- phytium_dp->link_rate, audio_info.sample_rate); +- goto out; +- } +- +- phytium_writel_reg(priv, n_m->m, PHYTIUM_DP_SEC_MAUD(port)); +- phytium_writel_reg(priv, n_m->n, PHYTIUM_DP_SEC_NAUD(port)); +- phytium_writel_reg(priv, CLOCK_MODE_SYNC, PHYTIUM_DP_SEC_CLOCK_MODE(port)); +- phytium_writel_reg(priv, CS_SOURCE_FORMAT_DEFAULT, +- PHYTIUM_DP_SEC_CS_SOURCE_FORMAT(port)); +- +- switch (audio_info.sample_rate) { +- case 32000: +- fs = ORIG_FREQ_32000; +- fs_accurac = SAMPLING_FREQ_32000; +- break; +- case 44100: +- fs = ORIG_FREQ_44100; +- fs_accurac = SAMPLING_FREQ_44100; +- break; +- case 48000: +- fs = ORIG_FREQ_48000; +- fs_accurac = SAMPLING_FREQ_48000; +- break; +- case 96000: +- fs = ORIG_FREQ_96000; +- fs_accurac = SAMPLING_FREQ_96000; +- break; +- case 176400: +- fs = ORIG_FREQ_176400; +- fs_accurac = SAMPLING_FREQ_176400; +- break; +- case 192000: +- fs = ORIG_FREQ_192000; +- fs_accurac = SAMPLING_FREQ_192000; +- break; +- default: +- DRM_ERROR("dp not support sample_rate %d\n", audio_info.sample_rate); +- goto out; +- } +- +- switch (audio_info.sample_width) { +- case 16: +- ws = WORD_LENGTH_16; +- break; +- case 18: +- ws = WORD_LENGTH_18; +- break; +- case 20: +- ws = WORD_LENGTH_20; +- break; +- case 24: +- ws = WORD_LENGTH_24; +- break; +- default: +- DRM_ERROR("dp not support sample_width %d\n", audio_info.sample_width); +- goto out; ++ phytium_writel_reg(priv, config, group_offset, X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); ++ phytium_writel_reg(priv, FLAG_REQUEST | CMD_DC_DP_RESET, ++ priv->dcreq_reg_base[port], X100_DCREQ_CMD_REGISTER); ++ do { ++ mdelay(10); ++ timeout--; ++ config = phytium_readl_reg(priv, group_offset, ++ X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); ++ if (config & DC_DP_RESET_STATUS(port)) ++ break; ++ } while (timeout); ++ if (timeout == 0) { ++ DRM_ERROR("reset dc/dp pipe(%d) failed\n", port); ++ ret = -1; + } + +- phytium_writel_reg(priv, ((fs&ORIG_FREQ_MASK)<audio_info = audio_info; +- +- return 0; +- +-out: +- phytium_writel_reg(priv, SEC_AUDIO_DISABLE, PHYTIUM_DP_SEC_AUDIO_ENABLE(port)); + return ret; + } + +-void x100_dp_hw_disable_video(struct phytium_dp_device *phytium_dp) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- +- phytium_writel_reg(priv, SST_MST_SOURCE_0_DISABLE, +- PHYTIUM_DP_VIDEO_STREAM_ENABLE(port)); +-} +- +-void x100_dp_hw_enable_video(struct phytium_dp_device *phytium_dp) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- +- phytium_writel_reg(priv, SST_MST_SOURCE_0_ENABLE, +- PHYTIUM_DP_VIDEO_STREAM_ENABLE(port)); +- phytium_writel_reg(priv, LINK_SOFT_RESET, PHYTIUM_DP_SOFT_RESET(port)); +-} +- +-bool x100_dp_hw_video_is_enable(struct phytium_dp_device *phytium_dp) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- +- return phytium_readl_reg(priv, PHYTIUM_DP_VIDEO_STREAM_ENABLE(port)) ? true : false; +-} +- +-void x100_dp_hw_config_video(struct phytium_dp_device *phytium_dp) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- unsigned long link_bw, date_rate = 0; +- struct drm_display_info *display_info = &phytium_dp->connector.display_info; +- unsigned char tu_size = 64; +- unsigned long data_per_tu = 0; +- int symbols_per_tu, frac_symbols_per_tu, symbol_count, udc, value; +- +- /* cal M/N and tu_size */ +- phytium_writel_reg(priv, phytium_dp->mode.crtc_clock/10, PHYTIUM_DP_M_VID(port)); +- phytium_writel_reg(priv, phytium_dp->link_rate/10, PHYTIUM_DP_N_VID(port)); +- link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; +- date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; +- +- /* mul 10 for register setting */ +- data_per_tu = 10*tu_size * date_rate/link_bw; +- symbols_per_tu = (data_per_tu/10)&0xff; +- frac_symbols_per_tu = (data_per_tu%10*16/10) & 0xf; +- phytium_writel_reg(priv, frac_symbols_per_tu<<24 | symbols_per_tu<<16 | tu_size, +- PHYTIUM_DP_TRANSFER_UNIT_SIZE(port)); +- +- symbol_count = (phytium_dp->mode.crtc_hdisplay*display_info->bpc*3 + 7)/8; +- udc = (symbol_count + phytium_dp->link_lane_count - 1)/phytium_dp->link_lane_count; +- phytium_writel_reg(priv, udc, PHYTIUM_DP_DATA_COUNT(port)); +- +- /* config main stream attributes */ +- phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal, +- PHYTIUM_DP_MAIN_LINK_HTOTAL(port)); +- phytium_writel_reg(priv, phytium_dp->mode.crtc_hdisplay, +- PHYTIUM_DP_MAIN_LINK_HRES(port)); +- phytium_writel_reg(priv, +- phytium_dp->mode.crtc_hsync_end - phytium_dp->mode.crtc_hsync_start, +- PHYTIUM_DP_MAIN_LINK_HSWIDTH(port)); +- phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal - phytium_dp->mode.crtc_hsync_start, +- PHYTIUM_DP_MAIN_LINK_HSTART(port)); +- phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal, +- PHYTIUM_DP_MAIN_LINK_VTOTAL(port)); +- phytium_writel_reg(priv, phytium_dp->mode.crtc_vdisplay, +- PHYTIUM_DP_MAIN_LINK_VRES(port)); +- phytium_writel_reg(priv, +- phytium_dp->mode.crtc_vsync_end - phytium_dp->mode.crtc_vsync_start, +- PHYTIUM_DP_MAIN_LINK_VSWIDTH(port)); +- phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal - phytium_dp->mode.crtc_vsync_start, +- PHYTIUM_DP_MAIN_LINK_VSTART(port)); +- +- value = 0; +- if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) +- value = value & (~HSYNC_POLARITY_LOW); +- else +- value = value | HSYNC_POLARITY_LOW; +- +- if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) +- value = value & (~VSYNC_POLARITY_LOW); +- else +- value = value | VSYNC_POLARITY_LOW; +- phytium_writel_reg(priv, value, PHYTIUM_DP_MAIN_LINK_POLARITY(port)); +- +- switch (display_info->bpc) { +- case 10: +- value = (MISC0_BIT_DEPTH_10BIT << MISC0_BIT_DEPTH_OFFSET); +- break; +- case 6: +- value = (MISC0_BIT_DEPTH_6BIT << MISC0_BIT_DEPTH_OFFSET); +- break; +- default: +- value = (MISC0_BIT_DEPTH_8BIT << MISC0_BIT_DEPTH_OFFSET); +- break; +- } +- value |= (MISC0_COMPONENT_FORMAT_RGB << MISC0_COMPONENT_FORMAT_SHIFT) +- | MISC0_SYNCHRONOUS_CLOCK; +- phytium_writel_reg(priv, value, PHYTIUM_DP_MAIN_LINK_MISC0(port)); +- phytium_writel_reg(priv, 0, PHYTIUM_DP_MAIN_LINK_MISC1(port)); +- +- value = USER_ODDEVEN_POLARITY_HIGH | USER_DATA_ENABLE_POLARITY_HIGH; +- if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) +- value = value | USER_HSYNC_POLARITY_HIGH; +- else +- value = value & (~USER_HSYNC_POLARITY_HIGH); +- if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) +- value = value | USER_VSYNC_POLARITY_HIGH; +- else +- value = value & (~USER_VSYNC_POLARITY_HIGH); +- phytium_writel_reg(priv, value, PHYTIUM_DP_USER_SYNC_POLARITY(port)); +-} +- +-void x100_dp_hw_disable_output(struct phytium_dp_device *phytium_dp) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- +- phytium_writel_reg(priv, TRANSMITTER_OUTPUT_DISABLE, +- PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE(port)); +- phytium_writel_reg(priv, LINK_SOFT_RESET, PHYTIUM_DP_SOFT_RESET(port)); +-} +- +-void x100_dp_hw_enable_output(struct phytium_dp_device *phytium_dp) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- +- phytium_writel_reg(priv, LINK_SOFT_RESET, PHYTIUM_DP_SOFT_RESET(port)); +- phytium_writel_reg(priv, TRANSMITTER_OUTPUT_ENABLE, +- PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE(port)); +-} +- +-void x100_dp_hw_enable_input_source(struct phytium_dp_device *phytium_dp) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- +- phytium_writel_reg(priv, VIRTUAL_SOURCE_0_ENABLE, +- PHYTIUM_INPUT_SOURCE_ENABLE(port)); +-} +- +-void x100_dp_hw_disable_input_source(struct phytium_dp_device *phytium_dp) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- +- phytium_writel_reg(priv, (~VIRTUAL_SOURCE_0_ENABLE)&VIRTUAL_SOURCE_0_ENABLE_MASK, +- PHYTIUM_INPUT_SOURCE_ENABLE(port)); +-} +- +-bool x100_dp_hw_output_is_enable(struct phytium_dp_device *phytium_dp) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- +- return phytium_readl_reg(priv, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE(port)) ? true : false; +-} +- +-void x100_dp_hw_init(struct phytium_dp_device *phytium_dp) +-{ +- struct drm_device *dev = phytium_dp->dev; +- struct phytium_display_drm_private *priv = dev->dev_private; +- int port = phytium_dp->port; +- +- x100_dp_set_source_rate_and_lane_count(phytium_dp); +- x100_dp_phy_init(phytium_dp); +- phytium_writel_reg(priv, AUX_CLK_DIVIDER, PHYTIUM_DP_AUX_CLK_DIVIDER(port)); +- phytium_dp->fast_train_support = false; +-} +- + static struct phytium_dp_func x100_dp_funcs = { +- .dp_hw_init = x100_dp_hw_init, +- .dp_hw_enable_output = x100_dp_hw_enable_output, +- .dp_hw_disable_output = x100_dp_hw_disable_output, +- .dp_hw_output_is_enable = x100_dp_hw_output_is_enable, +- .dp_hw_enable_input_source = x100_dp_hw_enable_input_source, +- .dp_hw_disable_input_source = x100_dp_hw_disable_input_source, +- .dp_hw_get_hpd_state = x100_dp_hw_get_hpd_state, +- .dp_hw_hpd_irq_setup = x100_dp_hw_hpd_irq_setup, +- .dp_hw_set_test_pattern = x100_dp_hw_set_test_pattern, +- .dp_hw_set_link = x100_dp_hw_set_link, +- .dp_hw_set_lane_setting = x100_dp_hw_set_lane_setting, +- .dp_hw_set_train_pattern = x100_dp_hw_set_train_pattern, +- .dp_hw_disable_video = x100_dp_hw_disable_video, +- .dp_hw_enable_video = x100_dp_hw_enable_video, +- .dp_hw_video_is_enable = x100_dp_hw_video_is_enable, +- .dp_hw_config_video = x100_dp_hw_config_video, +- .dp_hw_enable_audio = x100_dp_hw_enable_audio, +- .dp_hw_disable_audio = x100_dp_hw_disable_audio, +- .dp_hw_audio_shutdown = x100_dp_hw_audio_shutdown, +- .dp_hw_audio_digital_mute = x100_dp_hw_audio_digital_mute, +- .dp_hw_audio_hw_params = x100_dp_hw_audio_hw_params, +- .dp_hw_enable_backlight = x100_dp_hw_enable_backlight, +- .dp_hw_disable_backlight = x100_dp_hw_disable_backlight, +- .dp_hw_get_backlight = x100_dp_hw_get_backlight, ++ .dp_hw_reset = x100_dp_hw_reset, ++ .dp_hw_spread_is_enable = x100_dp_hw_spread_is_enable, + .dp_hw_set_backlight = x100_dp_hw_set_backlight, +- .dp_hw_poweron_panel = x100_dp_hw_poweron_panel, ++ .dp_hw_get_backlight = x100_dp_hw_get_backlight, ++ .dp_hw_disable_backlight = x100_dp_hw_disable_backlight, ++ .dp_hw_enable_backlight = x100_dp_hw_enable_backlight, + .dp_hw_poweroff_panel = x100_dp_hw_poweroff_panel, ++ .dp_hw_poweron_panel = x100_dp_hw_poweron_panel, ++ .dp_hw_init_phy = x100_dp_hw_init_phy, ++ .dp_hw_set_phy_lane_setting = x100_dp_hw_set_phy_lane_setting, ++ .dp_hw_set_phy_lane_and_rate = x100_dp_hw_set_phy_lane_and_rate, + }; + + void x100_dp_func_register(struct phytium_dp_device *phytium_dp) +diff --git a/drivers/gpu/drm/phytium/x100_dp.h b/drivers/gpu/drm/phytium/x100_dp.h +index 284c204e16b8..a7a0fc48a58b 100644 +--- a/drivers/gpu/drm/phytium/x100_dp.h ++++ b/drivers/gpu/drm/phytium/x100_dp.h +@@ -1,19 +1,13 @@ + /* SPDX-License-Identifier: GPL-2.0 */ +-/* Phytium X100 display drm driver ++/* Phytium display drm driver + * +- * Copyright (c) 2021 Phytium Limited. +- * +- * Author: +- * Yang Xun +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + + #ifndef __X100_DP_H__ + #define __X100_DP_H__ + ++#define X100_DP_BACKLIGHT_MAX 100 ++ + void x100_dp_func_register(struct phytium_dp_device *phytium_dp); + #endif /* __X100_DP_H__ */ +diff --git a/drivers/gpu/drm/phytium/x100_reg.h b/drivers/gpu/drm/phytium/x100_reg.h +new file mode 100644 +index 000000000000..130430e924b5 +--- /dev/null ++++ b/drivers/gpu/drm/phytium/x100_reg.h +@@ -0,0 +1,349 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef __X100_REG_H__ ++#define __X100_REG_H__ ++ ++#include "phytium_reg.h" ++ ++/******************************dc register start******************************************/ ++#define X100_DC_CLOCK_CONTROL 0x0000 ++ #define SOFT_RESET (1<<12) ++#define X100_DC_CLOCK_IDLE 0x0004 ++ #define IS_IDLE (1<<16) ++/******************************dc register end********************************************/ ++ ++/******************************dcreq register start**************************************/ ++#define X100_DCREQ_PLANE0_ADDR_START 0x00 ++#define X100_DCREQ_PLANE0_ADDR_END 0x04 ++#define X100_DCREQ_PLANE1_ADDR_START 0x08 ++#define X100_DCREQ_PLANE1_ADDR_END 0x0c ++#define X100_DCREQ_PLANE0_CONFIG 0x10 ++ #define DCREQ_NO_LOSSY (0 << 0) ++ #define DCREQ_LOSSY (1 << 0) ++ #define DCREQ_TILE_TYPE_MASK (0x3 << 1) ++ #define DCREQ_TILE_TYPE_MODE0 (0x1 << 1) ++ #define DCREQ_TILE_TYPE_MODE3 (0x2 << 1) ++ #define DCREQ_COLOURFORMAT_MASK (0x7f << 8) ++ #define DCREQ_COLOURFORMAT_RGB565 (0x5 << 8) ++ #define DCREQ_COLOURFORMAT_ARGB1555 (0x4 << 8) ++ #define DCREQ_COLOURFORMAT_ARGB4444 (0x02 << 8) ++ #define DCREQ_COLOURFORMAT_BGRA8888 (0x29 << 8) ++ #define DCREQ_COLOURFORMAT_ARGB2101010 (0xe << 8) ++ #define DCREQ_COLOURFORMAT_YUYV (0x59 << 8) ++ #define DCREQ_COLOURFORMAT_UYVY (0x5b << 8) ++ #define DCREQ_ARGBSWIZZLE_MASK (0xf << 4) ++ #define DCREQ_ARGBSWIZZLE_ARGB (0X0 << 4) ++ #define DCREQ_ARGBSWIZZLE_BGRA (0XC << 4) ++ #define DCREQ_MODE_MASK (1 << 16) ++ #define DCREQ_MODE_LINEAR (0 << 16) ++ #define DCREQ_MODE_TILE (1 << 16) ++#define X100_DCREQ_PLANE1_CONFIG(pipe) 0x14 ++#define X100_DCREQ_PLANE0_CLEAR_COLOR_L 0x18 ++#define X100_DCREQ_PLANE0_CLEAR_COLOR_H 0x1C ++#define X100_DCREQ_PLANE1_CLEAR_COLOR_L 0x20 ++#define X100_DCREQ_PLANE1_CLEAR_COLOR_H 0x24 ++#define X100_DCREQ_CMD_REGISTER 0x38 ++ #define FLAG_REPLY (1<<31) ++ #define FLAG_REQUEST (1<<30) ++ #define CMD_PIXEL_CLOCK (0x0 << 28) ++ #define CMD_BACKLIGHT (0x1 << 28) ++ #define CMD_DC_DP_RESET (0x3 << 28) ++ #define BACKLIGHT_SHIFT 21 ++ #define BACKLIGHT_MASK 0x7f ++ #define BACKLIGHT_MAX 100 ++ #define BACKLIGHT_ENABLE (101 << BACKLIGHT_SHIFT) ++ #define BACKLIGHT_DISABLE (102 << BACKLIGHT_SHIFT) ++ #define PANEL_POWER_ENABLE (103 << BACKLIGHT_SHIFT) ++ #define PANEL_POWER_DISABLE (104 << BACKLIGHT_SHIFT) ++ #define PIXEL_CLOCK_MASK (0x1fffff) ++#define X100_DCREQ_FBCD_CLOCK_CONFIG 0x3c ++#define X100_DCREQ_PIX_DMA_PREFIX 0x50 ++ #define PREFIX_MASK 0xff ++ #define PREFIX_SHIFT 32 ++#define X100_DCREQ_FRAME_START 0x54 ++#define X100_DCREQ_FILTER_CONFIG 0x58 ++#define X100_DCREQ_CONTROL 0x5C ++ #define DC_REQ_ENABLE (1<<0) ++#define X100_DCREQ_MSI_CLEAR 0x60 ++ #define MSI_CLEAR 0x0 ++#define X100_DCREQ_RESET 0x68 ++ #define DCREQ_RESET (0x3 << 0) ++ #define DCREQ_RESET_MASK 0x3 ++#define X100_DCREQ_PLAN 0x94 ++ #define DCREQ_PLAN_A 0x0 ++ #define DCREQ_PLAN_B 0X5 ++/******************************dcreq register end**************************************/ ++ ++/******************************address transform register start**************************/ ++#define X100_GPU_ADDRESS_TRANSFORM_SRC_ADDR 0x0 ++#define X100_GPU_ADDRESS_TRANSFORM_SIZE 0x4 ++#define X100_GPU_ADDRESS_TRANSFORM_DST_ADDR 0x8 ++ ++#define X100_DC_ADDRESS_TRANSFORM_SRC_ADDR 0x24 ++ #define SRC_ADDR_OFFSET 22 ++ #define SRC_ADDR_MASK 0xffffffffff ++#define X100_DC_ADDRESS_TRANSFORM_SIZE 0x28 ++ #define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) ++ #define SIZE_OFFSET 22 ++#define X100_DC_ADDRESS_TRANSFORM_DST_ADDR 0x2c ++ #define DST_ADDR_OFFSET 22 ++#define X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS 0x48 ++ #define DC_DP_RESET_STATUS(pipe) (1 << pipe) ++ #define DP_SPREAD_ENABLE(pipe) (0x8 << pipe) ++#define X100_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE 0x4c ++ #define BACKLIGHT_VALUE_MASK (0x7f) ++ #define BACKLIGHT_VALUE_SHIFT 16 ++/******************************address transform register end**************************/ ++ ++/******************************phy register start******************************************/ ++/* self define */ ++#define X100_PHY0_PIPE_RESET 0x40104 ++ #define RESET 0x0 ++ #define RESET_DEASSERT 0x1 ++#define X100_PHY1_PIPE_RESET 0x100100 ++ #define PHY1_PIPE_RESET 0x0 ++ #define PHY1_PIPE_RESET_DEASSERT 0x4 ++ ++#define X100_PHY1_EN_REFCLK 0x100070 ++ ++#define X100_PHY0_MODE 0x40088 ++ #define LANE_BIT (0x3) ++ #define LANE_BIT_SHIFT 0x2 ++#define X100_PHY1_SEL 0x100004 ++ #define PHY1_DP_LANE_BIT 0x1 ++ #define PHY1_DP_LANE_BIT_SHIFT 2 ++ ++#define X100_PHY0_LINK_CFG 0x40044 ++ #define LANE_MASTER 0x1 ++ #define LANE_MASTER_SHIFT 1 ++ ++#define X100_PHY0_PLL_EN 0x40010 ++ #define PLL_EN 0x1 ++ #define PLL_EN_SHIFT 1 ++#define X100_PHY0_PMA_WIDTH 0x40020 ++ #define BIT_20 0x5 ++ #define BIT_20_SHIFT 4 ++ ++#define X100_PHY0_PMA0_POWER 0x40014 ++#define X100_PHY0_PMA1_POWER 0x40018 ++ #define A0_ACTIVE 0x1 ++ #define A0_ACTIVE_SHIFT 8 ++ #define A3_POWERDOWN3 0x8 ++ #define A3_POWERDOWN3_SHIFT 8 ++ ++#define X100_PHY1_PMA_MISC 0x1000a0 ++ #define PHY1_PLL_EN 0x1 ++ #define PHY1_PLL_EN_MASK 1 ++ #define PHY1_PLL_EN_SHIFT 8 ++ #define PHY1_BIT_20 0x5 ++ #define PHY1_BIT_20_SHIFT 9 ++ #define PHY1_A0_ACTIVE 0x1 ++ #define PHY1_A0_ACTIVE_SHIFT 2 ++ #define PHY1_A0_ACTIVE_MASK 0x3f ++ #define PHY1_A3_POWERDOWN3 0x8 ++ #define PHY1_A3_POWERDOWN3_MASK 0x3f ++ #define PHY1_A3_POWERDOWN3_SHIFT 2 ++ ++#define X100_PHY0_LINK_RESET 0x40108 ++ #define LINK_RESET 0x1 ++ #define LINK_RESET_MASK 0x1 ++ #define LINTK_RESET_SHIFT 0x1 ++ ++#define X100_PHY0_APB_RESET 0x40100 ++ #define APB_RESET 0x1 ++#define X100_PHY1_APB_RESET 0x100104 ++ #define PHY1_APB_RESET 0x4 ++ ++/* phy origin register */ ++#define X100_PHY0_PLL_CFG 0x30038 ++#define X100_PHY1_PLL_CFG 0xb0038 ++ #define SINGLE_LINK 0x0 ++ #define DOUBLE_LINK 0x2 ++ ++#define X100_PHY0_PMA_CONTROL 0x3800c ++#define X100_PHY1_PMA_CONTROL 0xb800c ++ #define CONTROL_ENABLE 0x1 ++ #define CONTROL_ENABLE_MASK 0x1 ++ #define CONTROL_ENABLE_SHIFT 0x1 ++ ++#define X100_PHY0_PMA_CONTROL2 0x38004 ++#define X100_PHY1_PMA_CONTROL2 0xb8004 ++ #define PLL0_LOCK_DONE (0x1 << 6) ++ #define PLL1_LOCK_DONE (0x1 << 7) ++ ++#define X100_PHY0_PLL0_CLK_SEL 0X684 ++#define X100_PHY0_PLL1_CLK_SEL 0x704 ++#define X100_PHY1_PLL_CLK_SEL 0X80684 ++ #define PLL_LINK_RATE_162000 0xf01 ++ #define PLL_LINK_RATE_270000 0x701 ++ #define PLL_LINK_RATE_540000 0x301 ++ #define PLL_LINK_RATE_810000 0x200 ++ ++#define X100_PHY0_HSCLK0_SEL 0x18398 ++#define X100_PHY0_HSCLK1_SEL 0x1a398 ++#define X100_PHY1_HSCLK_SEL 0x90398 ++ #define HSCLK_LINK_0 0x0 ++ #define HSCLK_LINK_1 0x1 ++ ++#define X100_PHY0_HSCLK0_DIV 0x1839c ++#define X100_PHY0_HSCLK1_DIV 0x1a39c ++#define X100_PHY1_HSCLK_DIV 0x9039c ++ #define HSCLK_LINK_RATE_162000 0x2 ++ #define HSCLK_LINK_RATE_270000 0x1 ++ #define HSCLK_LINK_RATE_540000 0x0 ++ #define HSCLK_LINK_RATE_810000 0x0 ++ ++#define X100_PHY0_PLLDRC0_CTRL 0x18394 ++#define X100_PHY0_PLLDRC1_CTRL 0x1a394 ++#define X100_PHY1_PLLDRC_CTRL 0x90394 ++ #define PLLDRC_LINK0 0x1 ++ #define PLLDRC_LINK1 0x9 ++ ++#define X100_PHY0_PLL0_DSM_M0 0x250 ++#define X100_PHY1_PLL0_DSM_M0 0x80250 ++ #define PLL0_DSM_M0 0x4 ++#define X100_PHY0_PLL0_VCOCAL_START 0x218 ++#define X100_PHY1_PLL0_VCOCAL_START 0x80218 ++ #define PLL0_VCOCAL_START 0xc5e ++#define X100_PHY0_PLL0_VCOCAL_CTRL 0x208 ++#define X100_PHY1_PLL0_VCOCAL_CTRL 0x80208 ++ #define PLL0_VCOCAL_CTRL 0x3 ++ ++#define X100_PHY0_PLL1_DSM_M0 0x350 ++ #define PLL1_DSM_M0 0x4 ++#define X100_PHY0_PLL1_VCOCAL_START 0x318 ++ #define PLL1_VCOCAL_START 0xc5e ++#define X100_PHY0_PLL1_VCOCAL_CTRL 0x308 ++ #define PLL1_VCOCAL_CTRL 0x3 ++ ++#define X100_PHY0_PLL0_CP_PADJ 0x690 ++#define X100_PHY0_PLL0_CP_IADJ 0x694 ++#define X100_PHY0_PLL0_CP_FILT_PADJ 0x698 ++#define X100_PHY0_PLL0_INTDIV 0x240 ++#define X100_PHY0_PLL0_FRACDIVL 0x244 ++#define X100_PHY0_PLL0_FRACDIVH 0x248 ++#define X100_PHY0_PLL0_HIGH_THR 0x24c ++#define X100_PHY0_PLL0_PDIAG_CTRL 0x680 ++#define X100_PHY0_PLL0_VCOCAL_PLLCNT_START 0x220 ++#define X100_PHY0_PLL0_LOCK_PEFCNT 0x270 ++#define X100_PHY0_PLL0_LOCK_PLLCNT_START 0x278 ++#define X100_PHY0_PLL0_LOCK_PLLCNT_THR 0x27c ++ ++#define X100_PHY0_PLL1_CP_PADJ 0x710 ++#define X100_PHY0_PLL1_CP_IADJ 0x714 ++#define X100_PHY0_PLL1_CP_FILT_PADJ 0x718 ++#define X100_PHY0_PLL1_INTDIV 0x340 ++#define X100_PHY0_PLL1_FRACDIVL 0x344 ++#define X100_PHY0_PLL1_FRACDIVH 0x348 ++#define X100_PHY0_PLL1_HIGH_THR 0x34c ++#define X100_PHY0_PLL1_PDIAG_CTRL 0x700 ++#define X100_PHY0_PLL1_VCOCAL_PLLCNT_START 0x320 ++#define X100_PHY0_PLL1_LOCK_PEFCNT 0x370 ++#define X100_PHY0_PLL1_LOCK_PLLCNT_START 0x378 ++#define X100_PHY0_PLL1_LOCK_PLLCNT_THR 0x37c ++ ++#define X100_PHY1_PLL0_CP_PADJ 0x80690 ++#define X100_PHY1_PLL0_CP_IADJ 0x80694 ++#define X100_PHY1_PLL0_CP_FILT_PADJ 0x80698 ++#define X100_PHY1_PLL0_INTDIV 0x80240 ++#define X100_PHY1_PLL0_FRACDIVL 0x80244 ++#define X100_PHY1_PLL0_FRACDIVH 0x80248 ++#define X100_PHY1_PLL0_HIGH_THR 0x8024c ++#define X100_PHY1_PLL0_PDIAG_CTRL 0x80680 ++#define X100_PHY1_PLL0_VCOCAL_PLLCNT_START 0x80220 ++#define X100_PHY1_PLL0_LOCK_PEFCNT 0x80270 ++#define X100_PHY1_PLL0_LOCK_PLLCNT_START 0x80278 ++#define X100_PHY1_PLL0_LOCK_PLLCNT_THR 0x8027c ++ ++#define X100_PHY0_PLL0_TX_PSC_A0 0x18400 ++#define X100_PHY1_PLL0_TX_PSC_A0 0x90400 ++ #define PLL0_TX_PSC_A0 0xfb ++#define X100_PHY0_PLL0_TX_PSC_A2 0x18408 ++#define X100_PHY1_PLL0_TX_PSC_A2 0x90408 ++ #define PLL0_TX_PSC_A2 0x4aa ++#define X100_PHY0_PLL0_TX_PSC_A3 0x1840c ++#define X100_PHY1_PLL0_TX_PSC_A3 0x9040c ++ #define PLL0_TX_PSC_A3 0x4aa ++#define X100_PHY0_PLL0_RX_PSC_A0 0x28000 ++#define X100_PHY1_PLL0_RX_PSC_A0 0xa0000 ++ #define PLL0_RX_PSC_A0 0x0 ++#define X100_PHY0_PLL0_RX_PSC_A2 0x28008 ++#define X100_PHY1_PLL0_RX_PSC_A2 0xa0008 ++ #define PLL0_RX_PSC_A2 0x0 ++#define X100_PHY0_PLL0_RX_PSC_A3 0x2800C ++#define X100_PHY1_PLL0_RX_PSC_A3 0xa000C ++ #define PLL0_RX_PSC_A3 0x0 ++#define X100_PHY0_PLL0_RX_PSC_CAL 0x28018 ++#define X100_PHY1_PLL0_RX_PSC_CAL 0xa0018 ++ #define PLL0_RX_PSC_CAL 0x0 ++ ++#define X100_PHY0_PLL1_TX_PSC_A0 0x1a400 ++ #define PLL1_TX_PSC_A0 0xfb ++#define X100_PHY0_PLL1_TX_PSC_A2 0x1a408 ++ #define PLL1_TX_PSC_A2 0x4aa ++#define X100_PHY0_PLL1_TX_PSC_A3 0x1a40c ++ #define PLL1_TX_PSC_A3 0x4aa ++#define X100_PHY0_PLL1_RX_PSC_A0 0x2a000 ++ #define PLL1_RX_PSC_A0 0x0 ++#define X100_PHY0_PLL1_RX_PSC_A2 0x2a008 ++ #define PLL1_RX_PSC_A2 0x0 ++#define X100_PHY0_PLL1_RX_PSC_A3 0x2a00C ++ #define PLL1_RX_PSC_A3 0x0 ++#define X100_PHY0_PLL1_RX_PSC_CAL 0x2a018 ++ #define PLL1_RX_PSC_CAL 0x0 ++ ++#define X100_PHY0_PLL0_XCVR_CTRL 0x183a8 ++#define X100_PHY1_PLL0_XCVR_CTRL 0x903a8 ++ #define PLL0_XCVR_CTRL 0xf ++#define X100_PHY0_PLL1_XCVR_CTRL 0x1a3a8 ++ #define PLL1_XCVR_CTRL 0xf ++ ++#define X100_PHY0_PLL0_RX_GCSM1_CTRL 0x28420 ++#define X100_PHY1_PLL0_RX_GCSM1_CTRL 0xa0420 ++ #define PLL0_RX_GCSM1_CTRL 0x0 ++#define X100_PHY0_PLL0_RX_GCSM2_CTRL 0x28440 ++#define X100_PHY1_PLL0_RX_GCSM2_CTRL 0xa0440 ++ #define PLL0_RX_GCSM2_CTRL 0x0 ++#define X100_PHY0_PLL0_RX_PERGCSM_CTRL 0x28460 ++#define X100_PHY1_PLL0_RX_PERGCSM_CTRL 0xa0460 ++ #define PLL0_RX_PERGCSM_CTRL 0x0 ++ ++#define X100_PHY0_PLL1_RX_GCSM1_CTRL 0x2a420 ++ #define PLL1_RX_GCSM1_CTRL 0x0 ++#define X100_PHY0_PLL1_RX_GCSM2_CTRL 0x2a440 ++ #define PLL1_RX_GCSM2_CTRL 0x0 ++#define X100_PHY0_PLL1_RX_PERGCSM_CTRL 0x2a460 ++ #define PLL1_RX_PERGCSM_CTRL 0x0 ++ ++/* swing and emphasis */ ++#define X100_PHY0_PLL0_TX_DIAG_ACYA 0x1879c ++#define X100_PHY0_PLL1_TX_DIAG_ACYA 0x1a79c ++#define X100_PHY1_PLL0_TX_DIAG_ACYA 0x9079c ++ #define LOCK 1 ++ #define UNLOCK 0 ++ ++#define X100_PHY0_PLL0_TX_TXCC_CTRL 0x18100 ++#define X100_PHY0_PLL1_TX_TXCC_CTRL 0x1a100 ++#define X100_PHY1_PLL0_TX_TXCC_CTRL 0x90100 ++ #define TX_TXCC_CTRL 0x8a4 ++ ++#define X100_PHY0_PLL0_TX_DRV 0x18318 ++#define X100_PHY0_PLL1_TX_DRV 0x1a318 ++#define X100_PHY1_PLL0_TX_DRV 0x90318 ++ #define TX_DRV 0x3 ++ ++#define X100_PHY0_PLL0_TX_MGNFS 0x18140 ++#define X100_PHY0_PLL1_TX_MGNFS 0x1a140 ++#define X100_PHY1_PLL0_TX_MGNFS 0x90140 ++ ++#define X100_PHY0_PLL0_TX_CPOST 0x18130 ++#define X100_PHY0_PLL1_TX_CPOST 0x1a130 ++#define X100_PHY0_PLL1_TX_CPOST1 0x1a13c ++#define X100_PHY1_PLL0_TX_CPOST 0x90130 ++ ++/******************************phy register end********************************************/ ++#endif /* __X100_REG_H__ */ +diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c +index a4c5fdac8b50..a6718da51717 100644 +--- a/drivers/iommu/arm-smmu.c ++++ b/drivers/iommu/arm-smmu.c +@@ -58,6 +58,10 @@ + #include "io-pgtable.h" + #include "arm-smmu-regs.h" + ++#ifdef CONFIG_ARCH_PHYTIUM ++#define FWID_READ(id) (((u16)(id) >> 3) | (((id) >> SMR_MASK_SHIFT | 0x7000) << SMR_MASK_SHIFT)) ++#endif ++ + #define ARM_MMU500_ACTLR_CPRE (1 << 1) + + #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26) +@@ -983,11 +987,6 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) + if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid) + reg |= SMR_VALID; + writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx)); +- +- if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) == MIDR_PHYTIUM_FT2000PLUS) { +- u32 tmp = 0xf0000000 | ((reg & 0xffff) >> 3); +- writel_relaxed(tmp, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx + 32)); +- } + } + + static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) +@@ -1001,9 +1000,6 @@ static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) + smmu->smrs[idx].valid) + reg |= S2CR_EXIDVALID; + writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx)); +- +- if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) == MIDR_PHYTIUM_FT2000PLUS) +- writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx + 32)); + } + + static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx) +@@ -1386,6 +1382,17 @@ static int arm_smmu_add_device(struct device *dev) + return -ENODEV; + } + ++#ifdef CONFIG_ARCH_PHYTIUM ++ /* FT2000PLUS workaround patch */ ++ if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) == MIDR_PHYTIUM_FT2000PLUS) { ++ int num = fwspec->num_ids; ++ for (i = 0; i < num; i++) { ++ u32 fwid = FWID_READ(fwspec->ids[i]); ++ iommu_fwspec_add_ids(dev, &fwid, 1); ++ } ++ } ++#endif ++ + ret = -EINVAL; + for (i = 0; i < fwspec->num_ids; i++) { + u16 sid = fwspec->ids[i]; +diff --git a/drivers/irqchip/irq-gic-phytium-2500-its.c b/drivers/irqchip/irq-gic-phytium-2500-its.c +index c313d2b3d477..03be44d3782f 100644 +--- a/drivers/irqchip/irq-gic-phytium-2500-its.c ++++ b/drivers/irqchip/irq-gic-phytium-2500-its.c +@@ -2128,6 +2128,8 @@ static int its_alloc_tables(struct its_node *its) + indirect = its_parse_indirect_baser(its, baser, + psz, &order, + its->device_ids); ++ break; ++ + case GITS_BASER_TYPE_VCPU: + indirect = its_parse_indirect_baser(its, baser, + psz, &order, +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c +index ee0dec17786a..c21b8b8cd934 100644 +--- a/drivers/irqchip/irq-gic-v3-its.c ++++ b/drivers/irqchip/irq-gic-v3-its.c +@@ -2077,6 +2077,8 @@ static int its_alloc_tables(struct its_node *its) + indirect = its_parse_indirect_baser(its, baser, + psz, &order, + its->device_ids); ++ break; ++ + case GITS_BASER_TYPE_VCPU: + indirect = its_parse_indirect_baser(its, baser, + psz, &order, +diff --git a/drivers/mmc/host/phytium-mci-pci.c b/drivers/mmc/host/phytium-mci-pci.c +index 904272e69629..dda6089dce5f 100644 +--- a/drivers/mmc/host/phytium-mci-pci.c ++++ b/drivers/mmc/host/phytium-mci-pci.c +@@ -15,8 +15,7 @@ + #include "phytium-mci.h" + + static u32 sd_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | +- MMC_CAP_CMD23 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR50| +- MMC_CAP_4_BIT_DATA; ++ MMC_CAP_CMD23 | MMC_CAP_4_BIT_DATA; + static u32 sd_caps2 = MMC_CAP2_NO_MMC; + + static u32 emmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA | MMC_CAP_WAIT_WHILE_BUSY | +diff --git a/drivers/net/can/phytium_can_pci.c b/drivers/net/can/phytium_can_pci.c +index 40c7969f00e9..87c99a701680 100644 +--- a/drivers/net/can/phytium_can_pci.c ++++ b/drivers/net/can/phytium_can_pci.c +@@ -17,7 +17,7 @@ + #define DRV_NAME "phytium_can_pci" + + #define TX_MAX 64 +-#define CLK_FREQ 600000000 ++#define CLK_FREQ 480000000 + + static const struct can_bittiming_const phytium_ext_bittiming_const = { + .name = "phytium_can_ext", +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 6bc27b7fd452..4109a2fa1df6 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -4858,6 +4858,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PHYTIUM, 0xdc3a, quirk_no_ext_tags); + + #ifdef CONFIG_PCI_ATS + /* +diff --git a/drivers/spi/spi-phytium-plat.c b/drivers/spi/spi-phytium-plat.c +index 384b134ce039..04c2b5d2fb95 100644 +--- a/drivers/spi/spi-phytium-plat.c ++++ b/drivers/spi/spi-phytium-plat.c +@@ -109,11 +109,7 @@ static int phytium_spi_probe(struct platform_device *pdev) + goto out; + } + } +- } +- +- fts->global_cs = device_get_match_data(&pdev->dev); +- +- if (pdev->dev.fwnode) { ++ } else if(has_acpi_companion(&pdev->dev)) { + int n; + int *cs; + struct gpio_desc *gpiod; +@@ -137,6 +133,8 @@ static int phytium_spi_probe(struct platform_device *pdev) + } + } + ++ fts->global_cs = device_get_match_data(&pdev->dev); ++ + ret = phytium_spi_add_host(&pdev->dev, fts); + if (ret) + goto out; +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 462ce49f683a..fe200d1a69fd 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -107,6 +107,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem); + static void hub_release(struct kref *kref); + static int usb_reset_and_verify_device(struct usb_device *udev); + static int hub_port_disable(struct usb_hub *hub, int port1, int set_state); ++static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1, ++ u16 portstatus); + + static inline char *portspeed(struct usb_hub *hub, int portstatus) + { +@@ -1111,6 +1113,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) + USB_PORT_FEAT_ENABLE); + } + ++ /* Make sure a warm-reset request is handled by port_event */ ++ if (type == HUB_RESUME && ++ hub_port_warm_reset_required(hub, port1, portstatus)) ++ set_bit(port1, hub->event_bits); ++ + /* Clear status-change flags; we'll debounce later */ + if (portchange & USB_PORT_STAT_C_CONNECTION) { + need_debounce_delay = true; +@@ -2846,7 +2853,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1, + USB_PORT_FEAT_C_BH_PORT_RESET); + usb_clear_port_feature(hub->hdev, port1, + USB_PORT_FEAT_C_PORT_LINK_STATE); +- usb_clear_port_feature(hub->hdev, port1, ++ ++ if (udev) ++ usb_clear_port_feature(hub->hdev, port1, + USB_PORT_FEAT_C_CONNECTION); + + /* +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index b1f27aa38b10..dddc6b2facb9 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -2137,6 +2137,10 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, + if (rhub->min_rev < minor_revision) + rhub->min_rev = minor_revision; + ++ if (xhci->quirks & XHCI_SLOWDOWN_QUIRK) ++ if (major_revision == 0x03) ++ rhub->min_rev = 0; ++ + /* Port offset and count in the third dword, see section 7.2 */ + temp = readl(addr + 2); + port_offset = XHCI_EXT_PORT_OFF(temp); +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index 9a10e3baca85..a722fd5100fd 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -214,8 +214,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + xhci->quirks |= XHCI_RESET_ON_RESUME; + + if (pdev->vendor == PCI_VENDOR_ID_PHYTIUM || +- pdev->device == PCI_DEVICE_ID_PHYTIUM_XHCI) ++ pdev->device == PCI_DEVICE_ID_PHYTIUM_XHCI) { + xhci->quirks |= XHCI_RESET_ON_RESUME; ++ xhci->quirks |= XHCI_SLOWDOWN_QUIRK; ++ } ++ + /* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */ + if (pdev->vendor == PCI_VENDOR_ID_VIA && + pdev->device == 0x3432) +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index 6230a578324c..bfae3b2765d4 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1846,6 +1846,7 @@ struct xhci_hcd { + #define XHCI_SUSPEND_DELAY BIT_ULL(30) + #define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31) + #define XHCI_ZERO_64B_REGS BIT_ULL(32) ++#define XHCI_SLOWDOWN_QUIRK BIT_ULL(33) + + unsigned int num_active_eps; + unsigned int limit_active_eps; +diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c +index c7471c3fb798..16c09cda3b02 100644 +--- a/kernel/locking/qrwlock.c ++++ b/kernel/locking/qrwlock.c +@@ -70,6 +70,8 @@ EXPORT_SYMBOL(queued_read_lock_slowpath); + */ + void queued_write_lock_slowpath(struct qrwlock *lock) + { ++ int cnts; ++ + /* Put the writer into the wait queue */ + arch_spin_lock(&lock->wait_lock); + +@@ -83,9 +85,8 @@ void queued_write_lock_slowpath(struct qrwlock *lock) + + /* When no more readers or writers, set the locked flag */ + do { +- atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING); +- } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING, +- _QW_LOCKED) != _QW_WAITING); ++ cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING); ++ } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)); + unlock: + arch_spin_unlock(&lock->wait_lock); + } +diff --git a/sound/soc/phytium/phytium_i2s.c b/sound/soc/phytium/phytium_i2s.c +index e995a94c8cc4..9c6ab16d83f7 100644 +--- a/sound/soc/phytium/phytium_i2s.c ++++ b/sound/soc/phytium/phytium_i2s.c +@@ -850,17 +850,24 @@ static void phytium_pcm_free(struct snd_pcm *pcm) + + void snd_i2s_stream_cleanup(struct i2s_stream *azx_dev) + { ++ int cnt = 10; + if (azx_dev->sd_addr) { + if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) { + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0); +- udelay(50); ++ while (cnt--) { ++ if (i2s_read_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0)) == 0) ++ break; ++ } + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 2); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(0), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(0), 0); + } else { + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0); +- udelay(50); ++ while (cnt--) { ++ if (i2s_read_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1)) == 0) ++ break; ++ } + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 2); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(1), 0); +diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c +index a2a175b08b17..2b21d4fee771 100644 +--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c ++++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c +@@ -332,7 +332,7 @@ u64 vgic_sanitise_outer_cacheability(u64 field) + case GIC_BASER_CACHE_nC: + return field; + default: +- return GIC_BASER_CACHE_nC; ++ return GIC_BASER_CACHE_SameAsInner; + } + } + diff --git a/patch/patch-phytium-4.19.9 b/patch/patch-phytium-4.19.9 new file mode 100755 index 000000000000..69152f90cb01 --- /dev/null +++ b/patch/patch-phytium-4.19.9 @@ -0,0 +1,45611 @@ +diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt +index 96dfccc0faa8..046a47e49a94 100644 +--- a/Documentation/devicetree/bindings/arm/cpus.txt ++++ b/Documentation/devicetree/bindings/arm/cpus.txt +@@ -184,6 +184,10 @@ described below. + "nvidia,tegra132-denver" + "nvidia,tegra186-denver" + "nvidia,tegra194-carmel" ++ "phytium,ftc660" ++ "phytium,ftc661" ++ "phytium,ftc662" ++ "phytium,ftc663" + "qcom,krait" + "qcom,kryo" + "qcom,kryo385" +diff --git a/Documentation/devicetree/bindings/gpio/gpio-phytium.txt b/Documentation/devicetree/bindings/gpio/gpio-phytium.txt +new file mode 100644 +index 000000000000..77d4c6c03d00 +--- /dev/null ++++ b/Documentation/devicetree/bindings/gpio/gpio-phytium.txt +@@ -0,0 +1,47 @@ ++* Phytium GPIO controller ++ ++Required properties: ++- compatible : Should contain "phytium,gpio" ++- reg : Address and length of the register set for the device. ++- interrupts: Interrupt mapping for GPIO IRQ. ++- gpio-controller : Marks the device node as a gpio controller. ++- #gpio-cells : Should be 2. The first cell is the pin number and ++ the second cell is used to specify the gpio polarity: ++ 0 = active high ++ 1 = active low ++- #address-cells : should be 1 (for addressing port subnodes). ++- #size-cells : should be 0 (port subnodes). ++ ++The GPIO controller has two ports, each of which are represented as child ++nodes with the following properties: ++ ++Required properties: ++- compatible : "phytium,gpio-port" ++- reg : The integer port index of the port, a single cell. ++ ++Optional properties: ++- nr-gpios : The number of pins in the port, a single cell. ++ ++Example: ++ ++gpio: gpio@28004000 { ++ compatible = "phytium,gpio"; ++ reg = <0x0 0x28004000 0x0 0x1000>; ++ interrupts = ; ++ gpio-controller; ++ #gpio-cells = <2>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ porta { ++ compatible = "phytium,gpio-port"; ++ reg = <0>; ++ nr-gpios = <8>; ++ }; ++ ++ portb { ++ compatible = "phytium,gpio-port"; ++ reg = <1>; ++ nr-gpios = <8>; ++ }; ++}; +diff --git a/Documentation/devicetree/bindings/interrupt-controller/phytium,d2000-ixic.txt b/Documentation/devicetree/bindings/interrupt-controller/phytium,d2000-ixic.txt +new file mode 100644 +index 000000000000..02fd7ce3b5b9 +--- /dev/null ++++ b/Documentation/devicetree/bindings/interrupt-controller/phytium,d2000-ixic.txt +@@ -0,0 +1,29 @@ ++Phytium INTx interrupt controller (IXIC) ++ ++This is a psuedo interrupt controller to handle PCI legacy interrupt on ++Phytium D2000 and FT-2000/4C SoC, which sits between the PCI INTx devices ++and the GIC and forwards the 4 INTx input signals to 4 adjacent GICv3 SPIs. ++ ++Required properties: ++ ++- compatible : "phytium,d2000-ixic" ++ "phytium,ft2004c-ixic" ++- reg : Specifies two regions of the register set, which ++ are called 'ctr' and 'hpb'. ++- interrupt-controller : Identifies the node as an interrupt controller. ++- #interrupt-cells : Specifies the number of cells needed to encode an ++ interrupt source. The value must be 3. ++- intx-spi-base : The SPI number of the first SPI of the 4 adjacent ++ ones the IXIC forwards its interrupts to. ++ ++Example: ++ ixic: interrupt-controller@29000000 { ++ compatible = "phytium,d2000-ixic"; ++ reg-names = "ctr", "hpb"; ++ reg = <0x0 0x29000000 0x0 0x00060000>, ++ <0x0 0x29100000 0x0 0x00002000>; ++ interrupt-controller; ++ interrupt-parent = <&gic>; ++ #interrupt-cells = <3>; ++ intx-spi-base = <28>; ++ }; +diff --git a/Documentation/devicetree/bindings/mailbox/phytium-mailbox.txt b/Documentation/devicetree/bindings/mailbox/phytium-mailbox.txt +new file mode 100644 +index 000000000000..4d6f5a44f6e4 +--- /dev/null ++++ b/Documentation/devicetree/bindings/mailbox/phytium-mailbox.txt +@@ -0,0 +1,32 @@ ++Phytium Mailbox Driver ++====================== ++ ++The Phytium mailbox controller that has a channel/link to communicate ++with the remote end. A link raises interrupt for any received data. However, ++there is no specified way of knowing if the sent data has been read by the ++remote. This driver assumes the sender polls STAT register and the remote ++clears it after having read the data. ++ ++Mailbox Device Node: ++==================== ++ ++Required properties: ++-------------------- ++- compatible: Shall be "phytium,mbox" ++- reg: Contains the mailbox register address range (base ++ address and length) ++- #mbox-cells Shall be 1 - the index of the channel needed. ++- interrupts: Contains the interrupt information corresponding to ++ the link. ++ ++Example: ++-------- ++ ++mbox: mailbox@2a000000 { ++ compatible = "phytium,mbox"; ++ reg = <0x0 0x2a000000 0x0 0x1000>; ++ #mbox-cells = <1>; ++ interrupts = <0 48 4>; ++ clocks = <&sycclk>; ++ clock-names = "apb_pclk"; ++}; +diff --git a/Documentation/devicetree/bindings/pci/phytium,phytium-pcie-ep.txt b/Documentation/devicetree/bindings/pci/phytium,phytium-pcie-ep.txt +new file mode 100644 +index 000000000000..2e40d1e6ee98 +--- /dev/null ++++ b/Documentation/devicetree/bindings/pci/phytium,phytium-pcie-ep.txt +@@ -0,0 +1,21 @@ ++* Phytium PCIe endpoint controller ++ ++Required properties: ++- compatible: Should contain "phytium,phytium-pcie-ep" to identify the IP used. ++- reg: Should contain the controller register base address, AXI interface ++ region base address and hpb register base address respectively. ++- reg-names: Must be "reg", "mem" and "hpb" respectively. ++- max-outbound-regions: Set to maximum number of outbound regions. ++- max-functions: Maximum number of functions that can be configured (default 1). ++ ++Example: ++ ++ep0: ep@0x29030000 { ++ compatible = "phytium,d2000-pcie-ep"; ++ reg = <0x0 0x29030000 0x0 0x10000>, ++ <0x11 0x00000000 0x1 0x00000000>, ++ <0x0 0x29101000 0x0 0x1000>; ++ reg-names = "reg", "mem", "hpb"; ++ max-outbound-regions = <3>; ++ max-functions = /bits/ 8 <1>; ++}; +diff --git a/Documentation/devicetree/bindings/spi/spi-phytium.txt b/Documentation/devicetree/bindings/spi/spi-phytium.txt +new file mode 100644 +index 000000000000..a674d192132c +--- /dev/null ++++ b/Documentation/devicetree/bindings/spi/spi-phytium.txt +@@ -0,0 +1,24 @@ ++Phytium SPI controller ++ ++Required properties: ++- compatible: should be "phytium,spi" ++- #address-cells: see spi-bus.txt ++- #size-cells: see spi-bus.txt ++- reg: address and length of the spi master registers ++- interrupts: should contain one interrupt ++- clocks: spi clock phandle ++- num-cs: see spi-bus.txt ++ ++Optional properties: ++- cs-gpios: see spi-bus.txt ++ ++Example: ++ ++ ++spi0: spi@2800c000 { ++ compatible = "phytium,spi"; ++ interrupts = ; ++ reg = <0x0 0x2800c000 0x0 0x1000>; ++ clocks = <&sysclk_48mhz>; ++ num-cs = <4>; ++}; +diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt +index 2c3fc512e746..807273c439bc 100644 +--- a/Documentation/devicetree/bindings/vendor-prefixes.txt ++++ b/Documentation/devicetree/bindings/vendor-prefixes.txt +@@ -292,6 +292,7 @@ parade Parade Technologies Inc. + pericom Pericom Technology Inc. + pervasive Pervasive Displays, Inc. + phytec PHYTEC Messtechnik GmbH ++phytium Phytium Information Technology Co., Ltd. + picochip Picochip Ltd + pine64 Pine64 + pixcir PIXCIR MICROELECTRONICS Co., Ltd +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index 1b1a0e95c751..b4c1f1f55aec 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -75,6 +75,7 @@ config ARM64 + select CLONE_BACKWARDS + select COMMON_CLK + select CPU_PM if (SUSPEND || CPU_IDLE) ++ select CRC32 + select DCACHE_WORD_ACCESS + select DMA_DIRECT_OPS + select EDAC_SUPPORT +diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms +index 393d2b524284..636822d28503 100644 +--- a/arch/arm64/Kconfig.platforms ++++ b/arch/arm64/Kconfig.platforms +@@ -139,6 +139,12 @@ config ARCH_MVEBU + - Armada 7K SoC Family + - Armada 8K SoC Family + ++config ARCH_PHYTIUM ++ bool "Phytium SoC Family" ++ help ++ This enables support for Phytium ARMv8 SoC family. ++ select ARM_GIC_PHYTIUM_2500 ++ + config ARCH_QCOM + bool "Qualcomm Platforms" + select GPIOLIB +diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile +index 4690364d584b..ff8820d78db5 100644 +--- a/arch/arm64/boot/dts/Makefile ++++ b/arch/arm64/boot/dts/Makefile +@@ -16,6 +16,7 @@ subdir-y += lg + subdir-y += marvell + subdir-y += mediatek + subdir-y += nvidia ++subdir-y += phytium + subdir-y += qcom + subdir-y += realtek + subdir-y += renesas +diff --git a/arch/arm64/boot/dts/phytium/Makefile b/arch/arm64/boot/dts/phytium/Makefile +new file mode 100644 +index 000000000000..8a37a6a01ec2 +--- /dev/null ++++ b/arch/arm64/boot/dts/phytium/Makefile +@@ -0,0 +1,12 @@ ++dtb-$(CONFIG_ARCH_PHYTIUM) += ft2004-devboard-d4-dsk.dtb ++dtb-$(CONFIG_ARCH_PHYTIUM) += ft2004c-devboard-dsk.dtb ++dtb-$(CONFIG_ARCH_PHYTIUM) += ft1500a-devboard-16c-dsk.dtb ++dtb-$(CONFIG_ARCH_PHYTIUM) += ft2000plus-SR-devboard-64c-dsk.dtb ++dtb-$(CONFIG_ARCH_PHYTIUM) += ft2000plus-MR-devboard-64c-dsk.dtb ++dtb-$(CONFIG_ARCH_PHYTIUM) += ft2000ahk-devboard-dsk.dtb ++dtb-$(CONFIG_ARCH_PHYTIUM) += d2000-devboard-dsk.dtb ++dtb-$(CONFIG_ARCH_PHYTIUM) += ft2000ahke-devboard-dsk.dtb ++ ++always := $(dtb-y) ++subdir-y := $(dts-dirs) ++clean-files := *.dtb +diff --git a/arch/arm64/boot/dts/phytium/d2000-devboard-dsk.dts b/arch/arm64/boot/dts/phytium/d2000-devboard-dsk.dts +new file mode 100644 +index 000000000000..5519213c53e3 +--- /dev/null ++++ b/arch/arm64/boot/dts/phytium/d2000-devboard-dsk.dts +@@ -0,0 +1,73 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * DTS file for Phytium D2000 devboard ++ * ++ * Copyright (C) 2020, Phytium Technology Co., Ltd. ++ */ ++ ++/dts-v1/; ++/memreserve/ 0x80000000 0x10000; ++ ++#include "d2000-generic-psci-soc.dtsi" ++ ++/{ ++ model = "D2000 Development Board"; ++ compatible = "phytium,d2000"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ chosen { ++ stdout-path = "uart1:115200n8"; ++ }; ++ ++ memory@00{ ++ device_type = "memory"; ++ reg = <0x0 0x80000000 0x1 0x00000000>; ++ }; ++ ++ memory@01{ ++ device_type = "memory"; ++ reg = <0x20 0x00000000 0x1 0x00000000>; ++ }; ++ ++ firmware { ++ optee { ++ compatible = "linaro,optee-tz"; ++ method = "smc"; ++ }; ++ }; ++}; ++ ++&rtc0 { ++ status = "ok"; ++}; ++ ++&uart1 { ++ status = "ok"; ++}; ++ ++&gmac0 { ++ status = "ok"; ++ phy-mode = "rgmii-txid"; ++}; ++ ++&gmac1 { ++ status = "ok"; ++ phy-mode = "rgmii-txid"; ++}; ++ ++&spi0 { ++ status = "ok"; ++}; ++ ++&qspi { ++ status = "ok"; ++}; ++ ++&i2c0 { ++ status = "ok"; ++}; ++ ++&i2c1 { ++ status = "ok"; ++}; +diff --git a/arch/arm64/boot/dts/phytium/d2000-generic-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/d2000-generic-psci-soc.dtsi +new file mode 100644 +index 000000000000..c7be57a2c2aa +--- /dev/null ++++ b/arch/arm64/boot/dts/phytium/d2000-generic-psci-soc.dtsi +@@ -0,0 +1,525 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * dts file for Phytium D2000 SoC ++ * ++ * Copyright (C) 2020, Phytium Technology Co., Ltd. ++ */ ++ ++#include ++ ++/ { ++ compatible = "phytium,d2000"; ++ interrupt-parent = <&gic>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ aliases { ++ ethernet0 = &gmac0; ++ ethernet1 = &gmac1; ++ }; ++ ++ psci { ++ compatible = "arm,psci-1.0"; ++ method = "smc"; ++ cpu_suspend = <0xc4000001>; ++ cpu_off = <0x84000002>; ++ cpu_on = <0xc4000003>; ++ sys_poweroff = <0x84000008>; ++ sys_reset = <0x84000009>; ++ }; ++ ++ cpus { ++ #address-cells = <0x2>; ++ #size-cells = <0x0>; ++ ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc663", "arm,armv8"; ++ reg = <0x0 0x0>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ clocks = <&scpi_dvfs 0>; ++ }; ++ ++ cpu1: cpu@1 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc663", "arm,armv8"; ++ reg = <0x0 0x1>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ clocks = <&scpi_dvfs 0>; ++ }; ++ ++ cpu2: cpu@100 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc663", "arm,armv8"; ++ reg = <0x0 0x100>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ clocks = <&scpi_dvfs 1>; ++ }; ++ ++ cpu3: cpu@101 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc663", "arm,armv8"; ++ reg = <0x0 0x101>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ clocks = <&scpi_dvfs 1>; ++ }; ++ ++ cpu4: cpu@200 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc663", "arm,armv8"; ++ reg = <0x0 0x200>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ clocks = <&scpi_dvfs 2>; ++ }; ++ ++ cpu5: cpu@201 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc663", "arm,armv8"; ++ reg = <0x0 0x201>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ clocks = <&scpi_dvfs 2>; ++ }; ++ ++ cpu6: cpu@300 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc663", "arm,armv8"; ++ reg = <0x0 0x300>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ clocks = <&scpi_dvfs 3>; ++ }; ++ ++ cpu7: cpu@301 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc663", "arm,armv8"; ++ reg = <0x0 0x301>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ clocks = <&scpi_dvfs 3>; ++ }; ++ }; ++ ++ gic: interrupt-controller@29900000 { ++ compatible = "arm,gic-v3"; ++ #interrupt-cells = <3>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ interrupt-controller; ++ reg = <0x0 0x29a00000 0 0x20000>, /* GICD */ ++ <0x0 0x29b00000 0 0x100000>, /* GICR */ ++ <0x0 0x29c00000 0 0x10000>, /* GICC */ ++ <0x0 0x29c10000 0 0x10000>, /* GICH */ ++ <0x0 0x29c20000 0 0x10000>; /* GICV */ ++ interrupts = ; ++ ++ its: gic-its@29920000 { ++ compatible = "arm,gic-v3-its"; ++ msi-controller; ++ reg = <0x0 0x29a20000 0x0 0x20000>; ++ }; ++ }; ++ ++ timer { ++ compatible = "arm,armv8-timer"; ++ interrupts = , ++ , ++ , ++ ; ++ clock-frequency = <48000000>; ++ }; ++ ++ pmu { ++ compatible = "arm,armv8-pmuv3"; ++ interrupts = ; ++ }; ++ ++ clocks { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ clk250mhz: clk250mhz { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <250000000>; ++ }; ++ ++ sysclk_48mhz: clk48mhz { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <48000000>; ++ }; ++ ++ sysclk_600mhz: clk600mhz { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <600000000>; ++ }; ++ }; ++ ++ soc { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-coherent; ++ ranges; ++ ++ gpio0: gpio@28004000 { ++ compatible = "phytium,gpio"; ++ reg = <0x0 0x28004000 0x0 0x1000>; ++ interrupts = ; ++ gpio-controller; ++ #gpio-cells = <2>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ porta { ++ compatible = "phytium,gpio-port"; ++ reg = <0>; ++ nr-gpios = <8>; ++ }; ++ ++ portb { ++ compatible = "phytium,gpio-port"; ++ reg = <1>; ++ nr-gpios = <8>; ++ }; ++ }; ++ ++ gpio1: gpio@28005000 { ++ compatible = "phytium,gpio"; ++ reg = <0x0 0x28005000 0x0 0x1000>; ++ interrupts = ; ++ gpio-controller; ++ #gpio-cells = <2>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ porta { ++ compatible = "phytium,gpio-port"; ++ reg = <0>; ++ nr-gpios = <8>; ++ }; ++ ++ portb { ++ compatible = "phytium,gpio-port"; ++ reg = <1>; ++ nr-gpios = <8>; ++ }; ++ }; ++ ++ uart0: uart@28000000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x0 0x28000000 0x0 0x1000>; ++ baud = <115200>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz &sysclk_48mhz>; ++ clock-names = "uartclk", "apb_pclk"; ++ }; ++ ++ uart1: uart@28001000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x0 0x28001000 0x0 0x1000>; ++ baud = <115200>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz &sysclk_48mhz>; ++ clock-names = "uartclk", "apb_pclk"; ++ }; ++ ++ uart2: uart@28002000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x0 0x28002000 0x0 0x1000>; ++ baud = <115200>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz &sysclk_48mhz>; ++ clock-names = "uartclk", "apb_pclk"; ++ }; ++ ++ uart3: uart@28003000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x0 0x28003000 0x0 0x1000>; ++ baud = <115200>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz &sysclk_48mhz>; ++ clock-names = "uartclk", "apb_pclk"; ++ }; ++ ++ sdci: sdci@28207c00 { ++ compatible = "phytium,sdci"; ++ reg = <0x0 0x28207c00 0x0 0x100>; ++ interrupts = , ++ , ++ ; ++ clocks = <&sysclk_600mhz &sysclk_600mhz>; ++ clock-names = "phytium_sdc_clk"; ++ no-sdio; ++ no-mmc; ++ no-dma-coherent; ++ }; ++ ++ watchdog0: watchdog@2800a000 { ++ compatible = "arm,sbsa-gwdt"; ++ reg = <0x0 0x2800b000 0x0 0x1000>, ++ <0x0 0x2800a000 0x0 0x1000>; ++ interrupts = ; ++ timeout-sec = <30>; ++ }; ++ ++ watchdog1: watchdog@28016000 { ++ compatible = "arm,sbsa-gwdt"; ++ reg = <0x0 0x28017000 0x0 0x1000>, ++ <0x0 0x28016000 0x0 0x1000>; ++ interrupts = ; ++ timeout-sec = <30>; ++ }; ++ ++ rtc0: rtc@2800d000 { ++ compatible = "phytium,rtc"; ++ reg = <0x0 0x2800d000 0x0 0x1000>; ++ clocks = <&sysclk_48mhz>; ++ clock-names = "rtc_pclk"; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@28006000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x28006000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz>; ++ status = "disabled"; ++ }; ++ ++ i2c1: i2c@28007000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x28007000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz>; ++ status = "disabled"; ++ }; ++ ++ i2c2: i2c@28008000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x28008000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz>; ++ status = "disabled"; ++ }; ++ ++ i2c3: i2c@28009000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x28009000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz>; ++ status = "disabled"; ++ }; ++ ++ spi0: spi@2800c000 { ++ compatible = "phytium,spi"; ++ interrupts = ; ++ reg = <0x0 0x2800c000 0x0 0x1000>; ++ clocks = <&sysclk_48mhz>; ++ num-cs = <4>; ++ }; ++ ++ spi1: spi@28013000 { ++ compatible = "phytium,spi"; ++ interrupts = ; ++ reg = <0x0 0x28013000 0x0 0x1000>; ++ clocks = <&sysclk_48mhz>; ++ num-cs = <4>; ++ }; ++ ++ qspi: qspi@28014000 { ++ compatible = "phytium,qspi"; ++ reg = <0x0 0x28014000 0x0 0x1000>, ++ <0x0 0x0 0x0 0x02000000>; ++ reg-names = "qspi", "qspi_mm"; ++ clocks = <&sysclk_600mhz>; ++ ++ flash@0 { ++ spi-rx-bus-width = <1>; ++ spi-max-frequency = <600000000>; ++ }; ++ }; ++ ++ phytium_axi_setup: stmmac-axi-config { ++ snps,wr_osr_lmt = <0>; ++ snps,rd_osr_lmt = <0>; ++ snps,blen = <0 0 0 0 16 8 4>; ++ }; ++ ++ gmac0: eth@2820c000 { ++ compatible = "snps,dwmac"; ++ reg = <0x0 0x2820c000 0x0 0x2000>; ++ interrupts = ; ++ interrupt-names = "macirq"; ++ clocks = <&clk250mhz>; ++ clock-names = "stmmaceth"; ++ status = "disabled"; ++ ++ snps,pbl = <16>; ++ snps,fixed-burst; ++ snps,axi-config = <&phytium_axi_setup>; ++ snps,force_sf_dma_mode; ++ snps,multicast-filter-bins = <64>; ++ snps,perfect-filter-entries = <128>; ++ tx-fifo-depth = <4096>; ++ rx-fifo-depth = <4096>; ++ max-frame-size = <9000>; ++ }; ++ ++ gmac1: eth@28210000 { ++ compatible = "snps,dwmac"; ++ reg = <0x0 0x28210000 0x0 0x2000>; ++ interrupts = ; ++ interrupt-names = "macirq"; ++ clocks = <&clk250mhz>; ++ clock-names = "stmmaceth"; ++ status = "disabled"; ++ ++ snps,pbl = <16>; ++ snps,fixed-burst; ++ snps,axi-config = <&phytium_axi_setup>; ++ snps,force_sf_dma_mode; ++ snps,multicast-filter-bins = <64>; ++ snps,perfect-filter-entries = <128>; ++ snps,rx-queues-to-use = <2>; ++ tx-fifo-depth = <4096>; ++ rx-fifo-depth = <4096>; ++ max-frame-size = <9000>; ++ }; ++ ++ can0: can@28207000 { ++ compatible = "phytium,can"; ++ reg = <0x0 0x28207000 0x0 0x400>; ++ interrupts = ; ++ clocks = <&sysclk_600mhz>; ++ clock-names = "phytium_can_clk"; ++ tx-fifo-depth = <0x40>; ++ rx-fifo-depth = <0x40>; ++ extend_brp; ++ }; ++ ++ can1: can@28207400 { ++ compatible = "phytium,can"; ++ reg = <0x0 0x28207400 0x0 0x400>; ++ interrupts = ; ++ clocks = <&sysclk_600mhz>; ++ clock-names = "phytium_can_clk"; ++ tx-fifo-depth = <0x40>; ++ rx-fifo-depth = <0x40>; ++ extend_brp; ++ }; ++ ++ can2: can@028207800 { ++ compatible = "phytium,can"; ++ reg = <0x0 0x28207800 0x0 0x400>; ++ interrupts = ; ++ clocks = <&sysclk_600mhz>; ++ clock-names = "phytium_can_clk"; ++ tx-fifo-depth = <0x40>; ++ rx-fifo-depth = <0x40>; ++ extend_brp; ++ }; ++ ++ hda: hda@28206000 { ++ compatible = "phytium,hda"; ++ reg = <0 0x28206000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz>; ++ clock-names = "phytium_hda_clk"; ++ }; ++ ++ mbox: mailbox@2a000000 { ++ compatible = "phytium,mbox"; ++ reg = <0x0 0x2a000000 0x0 0x1000>; ++ interrupts = ; ++ #mbox-cells = <1>; ++ clocks = <&sysclk_48mhz>; ++ clock-names = "apb_pclk"; ++ }; ++ ++ sram: sram@2a006000 { ++ compatible = "phytium,ft2004-sram-ns","mmio-sram"; ++ reg = <0x0 0x2a006000 0x0 0x2000>; ++ ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x0 0x2a006000 0x2000>; ++ ++ scpi_lpri: scpi-shmem@0 { ++ compatible = "phytium,ft2004-scpi-shmem"; ++ reg = <0x1000 0x800>; ++ }; ++ }; ++ ++ scpi_protocol: scpi { ++ compatible = "arm,scpi"; ++ mboxes = <&mbox 0>; ++ shmem = <&scpi_lpri>; ++ ++ clocks { ++ compatible = "arm,scpi-clocks"; ++ ++ scpi_dvfs: scpi_clocks@0 { ++ compatible = "arm,scpi-dvfs-clocks"; ++ #clock-cells = <1>; ++ clock-indices = <0>, <1>, <2>, <3>; ++ clock-output-names = "c0", "c1", "c2", "c3"; ++ }; ++ }; ++ ++ scpi_sensors: sensors { ++ compatible = "arm,scpi-sensors"; ++ #thermal-sensor-cells = <1>; ++ }; ++ }; ++ ++ ixic: interrupt-controller@29000000 { ++ compatible = "phytium,d2000-ixic"; ++ reg-names = "ctr", "hpb"; ++ reg = <0x0 0x29000000 0x0 0x00060000>, ++ <0x0 0x29100000 0x0 0x00002000>; ++ interrupt-controller; ++ interrupt-parent = <&gic>; ++ #interrupt-cells = <3>; ++ intx-spi-base = <28>; ++ }; ++ ++ pcie: pcie { ++ compatible = "pci-host-ecam-generic"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ #interrupt-cells = <1>; ++ reg = <0x0 0x40000000 0x0 0x10000000>; ++ msi-parent = <&its>; ++ bus-range = <0x0 0xff>; ++ interrupt-map-mask = <0x0 0x0 0x0 0x7>; ++ interrupt-map = <0x0 0x0 0x0 0x1 &ixic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x2 &ixic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x3 &ixic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x4 &ixic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>; ++ ranges = <0x01000000 0x00 0x00000000 0x0 0x50000000 0x0 0x00f00000>, ++ <0x02000000 0x00 0x58000000 0x0 0x58000000 0x0 0x28000000>, ++ <0x03000000 0x10 0x00000000 0x10 0x00000000 0x10 0x00000000>; ++ }; ++ }; ++ ++}; +diff --git a/arch/arm64/boot/dts/phytium/ft1500a-16c-generic-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft1500a-16c-generic-psci-soc.dtsi +new file mode 100644 +index 000000000000..a0c73fa3fbe8 +--- /dev/null ++++ b/arch/arm64/boot/dts/phytium/ft1500a-16c-generic-psci-soc.dtsi +@@ -0,0 +1,511 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * dts file for FT-1500A SoC ++ * ++ * Copyright (C) 2019, Phytium Technology Co., Ltd. ++ */ ++ ++#include ++ ++/ { ++ compatible = "phytium,ft1500a"; ++ interrupt-parent = <&gic>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ aliases { ++ ethernet0 = &gmac0; ++ ethernet1 = &gmac1; ++ }; ++ ++ psci { ++ compatible = "arm,psci-1.0", "arm,psci-0.2", "arm,psci"; ++ method = "smc"; ++ cpu_suspend = <0xc4000001>; ++ cpu_off = <0x84000002>; ++ cpu_on = <0xc4000003>; ++ }; ++ ++ cpus { ++ #address-cells = <2>; ++ #size-cells = <0>; ++ ++ cpu-map { ++ cluster0 { ++ core0 { ++ cpu = <&cpu0>; ++ }; ++ core1 { ++ cpu = <&cpu1>; ++ }; ++ core2 { ++ cpu = <&cpu2>; ++ }; ++ core3 { ++ cpu = <&cpu3>; ++ }; ++ }; ++ ++ cluster1 { ++ core0 { ++ cpu = <&cpu4>; ++ }; ++ core1 { ++ cpu = <&cpu5>; ++ }; ++ core2 { ++ cpu = <&cpu6>; ++ }; ++ core3 { ++ cpu = <&cpu7>; ++ }; ++ }; ++ ++ cluster2 { ++ core0 { ++ cpu = <&cpu8>; ++ }; ++ core1 { ++ cpu = <&cpu9>; ++ }; ++ core2 { ++ cpu = <&cpu10>; ++ }; ++ core3 { ++ cpu = <&cpu11>; ++ }; ++ }; ++ ++ cluster3 { ++ core0 { ++ cpu = <&cpu12>; ++ }; ++ core1 { ++ cpu = <&cpu13>; ++ }; ++ core2 { ++ cpu = <&cpu14>; ++ }; ++ core3 { ++ cpu = <&cpu15>; ++ }; ++ }; ++ }; ++ ++ idle-states { ++ entry-method = "arm,psci"; ++ ++ CPU_SLEEP: cpu-sleep { ++ compatible = "arm,idle-state"; ++ local-timer-stop; ++ arm,psci-suspend-param = <0x0010000>; ++ entry-latency-us = <100>; ++ exit-latency-us = <100>; ++ min-residency-us = <200>; ++ }; ++ }; ++ ++ cpu0:cpu@0 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc660", "arm,armv8"; ++ reg = <0x0 0x000>; ++ enable-method = "psci"; ++ cpu-idle-states = <&CPU_SLEEP>; ++ clocks = <&cpuclk 0>; ++ clock-latency = <10000>; ++ cooling-min-level = <0>; /* cooling options */ ++ cooling-max-level = <5>; ++ #cooling-cells = <2>; /* min followed by max */ ++ }; ++ ++ cpu1:cpu@1 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc660", "arm,armv8"; ++ reg = <0x0 0x001>; ++ enable-method = "psci"; ++ cpu-idle-states = <&CPU_SLEEP>; ++ clocks = <&cpuclk 0>; ++ clock-latency = <10000>; ++ }; ++ ++ cpu2:cpu@2 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc660", "arm,armv8"; ++ reg = <0x0 0x002>; ++ enable-method = "psci"; ++ cpu-idle-states = <&CPU_SLEEP>; ++ clocks = <&cpuclk 0>; ++ clock-latency = <10000>; ++ }; ++ ++ cpu3:cpu@3 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc660", "arm,armv8"; ++ reg = <0x0 0x003>; ++ enable-method = "psci"; ++ cpu-idle-states = <&CPU_SLEEP>; ++ clocks = <&cpuclk 0>; ++ clock-latency = <10000>; ++ }; ++ ++ cpu4:cpu@100 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc660", "arm,armv8"; ++ reg = <0x0 0x100>; ++ enable-method = "psci"; ++ cpu-idle-states = <&CPU_SLEEP>; ++ clocks = <&cpuclk 1>; ++ clock-latency = <10000>; ++ cooling-min-level = <0>; /* cooling options */ ++ cooling-max-level = <5>; ++ #cooling-cells = <2>; /* min followed by max */ ++ }; ++ ++ cpu5:cpu@101 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc660", "arm,armv8"; ++ reg = <0x0 0x101>; ++ enable-method = "psci"; ++ cpu-idle-states = <&CPU_SLEEP>; ++ clocks = <&cpuclk 1>; ++ clock-latency = <10000>; ++ }; ++ ++ cpu6:cpu@102 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc660", "arm,armv8"; ++ reg = <0x0 0x102>; ++ enable-method = "psci"; ++ cpu-idle-states = <&CPU_SLEEP>; ++ clocks = <&cpuclk 1>; ++ clock-latency = <10000>; ++ }; ++ ++ cpu7:cpu@103 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc660", "arm,armv8"; ++ reg = <0x0 0x103>; ++ enable-method = "psci"; ++ cpu-idle-states = <&CPU_SLEEP>; ++ clocks = <&cpuclk 1>; ++ clock-latency = <10000>; ++ }; ++ ++ cpu8:cpu@200 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc660", "arm,armv8"; ++ reg = <0x0 0x200>; ++ enable-method = "psci"; ++ cpu-idle-states = <&CPU_SLEEP>; ++ clocks = <&cpuclk 2>; ++ clock-latency = <10000>; ++ cooling-min-level = <0>; /* cooling options */ ++ cooling-max-level = <5>; ++ #cooling-cells = <2>; /* min followed by max */ ++ }; ++ ++ cpu9:cpu@201 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc660", "arm,armv8"; ++ reg = <0x0 0x201>; ++ enable-method = "psci"; ++ cpu-idle-states = <&CPU_SLEEP>; ++ clocks = <&cpuclk 2>; ++ clock-latency = <10000>; ++ }; ++ ++ cpu10:cpu@202 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc660", "arm,armv8"; ++ reg = <0x0 0x202>; ++ enable-method = "psci"; ++ cpu-idle-states = <&CPU_SLEEP>; ++ clocks = <&cpuclk 2>; ++ clock-latency = <10000>; ++ }; ++ ++ cpu11:cpu@203 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc660", "arm,armv8"; ++ reg = <0x0 0x203>; ++ enable-method = "psci"; ++ cpu-idle-states = <&CPU_SLEEP>; ++ clocks = <&cpuclk 2>; ++ clock-latency = <10000>; ++ }; ++ ++ cpu12:cpu@300 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc660", "arm,armv8"; ++ reg = <0x0 0x300>; ++ enable-method = "psci"; ++ cpu-idle-states = <&CPU_SLEEP>; ++ clocks = <&cpuclk 3>; ++ clock-latency = <10000>; ++ cooling-min-level = <0>; /* cooling options */ ++ cooling-max-level = <5>; ++ #cooling-cells = <2>; /* min followed by max */ ++ }; ++ ++ cpu13:cpu@301 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc660", "arm,armv8"; ++ reg = <0x0 0x301>; ++ enable-method = "psci"; ++ cpu-idle-states = <&CPU_SLEEP>; ++ clocks = <&cpuclk 3>; ++ clock-latency = <10000>; ++ }; ++ ++ cpu14:cpu@302 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc660", "arm,armv8"; ++ reg = <0x0 0x302>; ++ enable-method = "psci"; ++ cpu-idle-states = <&CPU_SLEEP>; ++ clocks = <&cpuclk 3>; ++ clock-latency = <10000>; ++ }; ++ ++ cpu15:cpu@303 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc660", "arm,armv8"; ++ reg = <0x0 0x303>; ++ enable-method = "psci"; ++ cpu-idle-states = <&CPU_SLEEP>; ++ clocks = <&cpuclk 3>; ++ clock-latency = <10000>; ++ }; ++ }; ++ ++ gic: interrupt-controller@29800000 { ++ compatible = "arm,gic-v3"; ++ #interrupt-cells = <3>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ interrupt-controller; ++ reg = <0x0 0x29800000 0 0x10000>, /* GICD */ ++ <0x0 0x29a00000 0 0x200000>, /* GICR */ ++ <0x0 0x29c00000 0 0x10000>, /* GICC */ ++ <0x0 0x29c10000 0 0x10000>, /* GICH */ ++ <0x0 0x29c20000 0 0x10000>; /* GICV */ ++ interrupts = ; ++ ++ its: gic-its@29820000 { ++ compatible = "arm,gic-v3-its"; ++ msi-controller; ++ reg = <0x0 0x29820000 0x0 0x20000>; ++ }; ++ }; ++ ++ timer { ++ compatible = "arm,armv8-timer"; ++ interrupts = , ++ , ++ , ++ ; ++ clock-frequency = <50000000>; ++ }; ++ ++ pmu { ++ compatible = "arm,armv8-pmuv3"; ++ interrupts = ; ++ }; ++ ++ clocks { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ /* 50 MHz reference crystal */ ++ refclk: refclk { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <50000000>; ++ }; ++ ++ clk_100mhz: clk_100mhz { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clocks = <&refclk>; ++ clock-frequency = <100000000>; ++ }; ++ ++ cpuclk: cpuclk { ++ compatible = "phytium,1500a-cpu-clock"; ++ #clock-cells = <1>; ++ reg = <0x0 0x28100600 0x0 0x10>; ++ clocks = <&refclk>; ++ mode = <0x2>; /* 0: do not use pll, 1: partially use pll, 2: totally use pll */ ++ /*big-clock;*/ ++ clock-output-names = "cluster0-clk", ++ "cluster1-clk", ++ "cluster2-clk", ++ "cluster3-clk"; ++ }; ++ ++ gmacclk: gmacclk { ++ compatible = "phytium,1500a-gmac-clock"; ++ #clock-cells = <0>; ++ reg = <0x0 0x2810050c 0x0 0x4>; ++ clocks = <&refclk>; ++ clock-frequency = <500000000>; ++ clock-output-names = "gmac-clk"; ++ }; ++ }; ++ ++ soc { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-coherent; ++ ranges; ++ ++ uart0: serial@28000000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x0 0x28000000 0x0 0x1000>; ++ clock-frequency = <50000000>; ++ interrupts = ; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ status = "disabled"; ++ }; ++ ++ uart1: serial@28001000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x0 0x28001000 0x0 0x1000>; ++ clock-frequency = <50000000>; ++ interrupts = ; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@28002000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x28002000 0x0 0x1000>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ clocks = <&clk_100mhz>; ++ status = "disabled"; ++ }; ++ ++ i2c1: i2c@28003000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x28003000 0x0 0x1000>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ clocks = <&clk_100mhz>; ++ status = "disabled"; ++ }; ++ ++ wdt0: watchdog@28004000 { ++ compatible = "snps,dw-wdt"; ++ reg = <0x0 0x28004000 0x0 0x1000>; ++ clocks = <&refclk>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ wdt1: watchdog@28005000 { ++ compatible = "snps,dw-wdt"; ++ reg = <0x0 0x28005000 0x0 0x1000>; ++ clocks = <&refclk>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ gpio: gpio@28006000 { ++ compatible = "snps,dw-apb-gpio"; ++ reg = <0x0 0x28006000 0x0 0x1000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ porta: gpio-controller@0 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <8>; ++ reg = <0>; ++ }; ++ portb: gpio-controller@1 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <8>; ++ reg = <1>; ++ }; ++ portc: gpio-controller@2 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <8>; ++ reg = <2>; ++ }; ++ portd: gpio-controller@3 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <8>; ++ reg = <3>; ++ }; ++ }; ++ ++ gmac0: ethernet@28c00000 { ++ compatible = "snps,dwmac"; ++ reg = <0 0x28c00000 0x0 0x2000>; ++ interrupts = ; ++ interrupt-names = "macirq"; ++ clocks = <&gmacclk>; ++ clock-names = "stmmaceth"; ++ snps,pbl = <32>; ++ snps,fixed-burst; ++ snps,burst_len = <0xe>; ++ snps,force_sf_dma_mode; ++ snps,multicast-filter-bins = <64>; ++ snps,perfect-filter-entries = <1>; ++ max-frame-size = <9000>; ++ status = "disabled"; ++ }; ++ ++ gmac1: ethernet@28c02000 { ++ compatible = "snps,dwmac"; ++ reg = <0 0x28c02000 0x0 0x2000>; ++ interrupts = ; ++ interrupt-names = "macirq"; ++ clocks = <&gmacclk>; ++ clock-names = "stmmaceth"; ++ snps,pbl = <32>; ++ snps,fixed-burst; ++ snps,burst_len = <0xe>; ++ snps,force_sf_dma_mode; ++ snps,multicast-filter-bins = <64>; ++ snps,perfect-filter-entries = <1>; ++ max-frame-size = <9000>; ++ status = "disabled"; ++ }; ++ ++ pcie0: pcie-controller { ++ compatible = "pci-host-ecam-generic"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ #interrupt-cells = <1>; ++ reg = <0 0x40000000 0 0x10000000>; ++ msi-parent = <&its>; ++ interrupt-map-mask = <0x0000 0x0 0x0 0x7>; ++ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 GIC_SPI 0x33 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x2 &gic 0x0 0x0 GIC_SPI 0x34 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x3 &gic 0x0 0x0 GIC_SPI 0x35 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x4 &gic 0x0 0x0 GIC_SPI 0x36 IRQ_TYPE_LEVEL_HIGH>; ++ ranges = <0x01000000 0x00 0x00000000 0x00 0x50000000 0x00 0x1000000>, ++ <0x02000000 0x00 0x60000000 0x00 0x60000000 0x00 0x20000000>, ++ <0x43000000 0x01 0x00000000 0x01 0x00000000 0x01 0x00000000>; ++ }; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/phytium/ft1500a-devboard-16c-dsk.dts b/arch/arm64/boot/dts/phytium/ft1500a-devboard-16c-dsk.dts +new file mode 100644 +index 000000000000..ed2127496e95 +--- /dev/null ++++ b/arch/arm64/boot/dts/phytium/ft1500a-devboard-16c-dsk.dts +@@ -0,0 +1,51 @@ ++/* ++ * DTS file for Phytium FT1500A Generic board ++ * ++ * Copyright (C) 2015, Phytium Technology Co., Ltd. ++ * ++ * This file is licensed under a dual GPLv2 or BSD license. ++ */ ++ ++/dts-v1/; ++/memreserve/ 0x80000000 0x80000; ++ ++#include "ft1500a-16c-generic-psci-soc.dtsi" ++ ++/ { ++ model = "FT1500A-16CORE-DSK Development Board"; ++ compatible = "phytium,ft-1500a"; ++ ++ chosen { ++ linux,pci-probe-only = <1>; ++ stdout-path = "uart1:115200n8"; ++ }; ++ ++ memory { ++ device_type = "memory"; ++ reg = <0x0 0x80000000 0x0 0x80000000>; /* Updated by bootloader */ ++ }; ++}; ++ ++&uart1 { ++ status = "ok"; ++}; ++ ++&i2c0 { ++ status = "ok"; ++}; ++ ++&i2c1 { ++ status = "ok"; ++}; ++ ++&wdt0 { ++ status = "ok"; ++}; ++ ++&gmac0 { ++ phy-mode = "gmii"; ++}; ++ ++&gmac1 { ++ phy-mode = "gmii"; ++}; +diff --git a/arch/arm64/boot/dts/phytium/ft2000ahk-devboard-dsk.dts b/arch/arm64/boot/dts/phytium/ft2000ahk-devboard-dsk.dts +new file mode 100644 +index 000000000000..4afbbf9c827b +--- /dev/null ++++ b/arch/arm64/boot/dts/phytium/ft2000ahk-devboard-dsk.dts +@@ -0,0 +1,52 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * DTS file for Phytium FT-2000A/2 devboard (FT-2000A-HK-DSK series) ++ * ++ * Copyright (C) 2019, Phytium Techonlogy Co., Ltd. ++ */ ++ ++/dts-v1/; ++ ++#include "ft2000ahk-generic-spintable-soc.dtsi" ++ ++/ { ++ model = "FT-2000A-HK-DSK Development Board"; ++ compatible = "phytium,ft-2000ahk"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ chosen { ++ linux,pci-probe-only = <1>; ++ }; ++ ++ memory { ++ device_type = "memory"; ++ reg = <0x0 0x80000000 0x0 0x80000000>; ++ }; ++}; ++ ++&i2c0 { ++ status = "ok"; ++ rtc@68 { ++ compatible = "dallas,ds1339"; ++ reg = <0x68>; ++ }; ++}; ++ ++&uart1 { ++ status = "ok"; ++}; ++ ++&gmac0 { ++ status = "ok"; ++ phy-mode = "rgmii"; ++}; ++ ++&gmac1 { ++ status = "ok"; ++ phy-mode = "rgmii"; ++}; ++ ++&gpio { ++ status = "ok"; ++}; +diff --git a/arch/arm64/boot/dts/phytium/ft2000ahk-generic-spintable-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2000ahk-generic-spintable-soc.dtsi +new file mode 100644 +index 000000000000..fb587b664a83 +--- /dev/null ++++ b/arch/arm64/boot/dts/phytium/ft2000ahk-generic-spintable-soc.dtsi +@@ -0,0 +1,253 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * dts file for FT-2000A/2 SoC ++ * ++ * Copyright (C) 2019, Phytium Technology Co., Ltd. ++ */ ++ ++#include ++ ++/ { ++ compatible = "phytium,ft2000ahk"; ++ interrupt-parent = <&gic>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ aliases { ++ ethernet0 = &gmac0; ++ ethernet1 = &gmac1; ++ }; ++ ++ cpus { ++ #address-cells = <2>; ++ #size-cells = <0>; ++ ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc661", "arm,armv8"; ++ reg = <0x0 0x0>; ++ enable-method = "spin-table"; ++ cpu-release-addr = <0x0 0x8007fff0>; ++ }; ++ ++ cpu1: cpu@1 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc661", "arm,armv8"; ++ reg = <0x0 0x1>; ++ enable-method = "spin-table"; ++ cpu-release-addr = <0x0 0x8007fff0>; ++ }; ++ }; ++ ++ gic: interrupt-controller@71800000 { ++ compatible = "arm,gic-400"; ++ #interrupt-cells = <3>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ interrupt-controller; ++ reg = <0x0 0x71801000 0x0 0x1000>, ++ <0x0 0x71802000 0x0 0x2000>, ++ <0x0 0x71804000 0x0 0x1000>, ++ <0x0 0x71805000 0x0 0x1000>; ++ }; ++ ++ timer { ++ compatible = "arm,armv8-timer"; ++ interrupts = , ++ , ++ , ++ ; ++ clock-frequency = <50000000>; ++ }; ++ ++ pmu { ++ compatible = "arm,armv8-pmuv3"; ++ interrupts = , ++ ; ++ interrupt-affinity = <&cpu0 &cpu1>; ++ }; ++ ++ soc { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-coherent; ++ ranges; ++ ++ clocks { ++ refclk: refclk { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <50000000>; ++ }; ++ ++ clk250mhz: clk250mhz { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <250000000>; ++ }; ++ ++ clk500mhz: clk500mhz { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <500000000>; ++ }; ++ }; ++ ++ uart0: uart@70000000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x0 0x70000000 0x0 0x1000>; ++ clock-frequency = <50000000>; ++ interrupts = ; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ status = "disabled"; ++ }; ++ ++ uart1: uart@70001000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x0 0x70001000 0x0 0x1000>; ++ clock-frequency = <50000000>; ++ interrupts = ; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@70002000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x70002000 0x0 0x1000>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ clocks = <&refclk>; ++ status = "disabled"; ++ }; ++ ++ i2c1: i2c@70003000 { ++ #address-cells = <01>; ++ #size-cells = <0>; ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x70003000 0x0 0x1000>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ clocks = <&refclk>; ++ status = "disabled"; ++ }; ++ ++ watchdog0: wd@70004000 { ++ compatible = "snps,dw-wdt"; ++ reg = <0x0 0x70004000 0x0 0x1000>; ++ clocks = <&refclk>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ watchdog1: wd@70005000 { ++ compatible = "snps,dw-wdt"; ++ reg = <0x0 0x70005000 0x0 0x1000>; ++ clocks = <&refclk>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ gpio: gpio@70006000 { ++ compatible = "snps,dw-apb-gpio"; ++ reg = <0x0 0x70006000 0x0 0x1000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ ++ porta: gpio-controller@0 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <8>; ++ reg = <0>; ++ }; ++ ++ portb: gpio-controller@1 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <8>; ++ reg = <1>; ++ }; ++ ++ portc: gpio-controller@2 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <8>; ++ reg = <2>; ++ }; ++ ++ portd: gpio-controller@3 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <8>; ++ reg = <3>; ++ }; ++ }; ++ ++ gmac0: eth@70c00000 { ++ compatible = "snps,dwmac"; ++ reg = <0x0 0x70c00000 0x0 0x2000>; ++ interrupts = ; ++ interrupt-names = "macirq"; ++ clocks = <&clk500mhz>; ++ clock-names = "stmmaceth"; ++ status = "disabled"; ++ ++ snps,pbl = <16>; ++ snps,fixed-burst; ++ snps,burst_len = <14>; ++ snps,force_sf_dma_mode; ++ snps,multicast-filter-bins = <64>; ++ snps,perfect-filter-entries = <128>; ++ tx-fifo-depth = <4096>; ++ rx-fifo-depth = <4096>; ++ max-frame-size = <9000>; ++ }; ++ ++ gmac1: eth@70c10000 { ++ compatible = "snps,dwmac"; ++ reg = <0x0 0x70c10000 0x0 0x2000>; ++ interrupts = ; ++ interrupt-names = "macirq"; ++ clocks = <&clk500mhz>; ++ clock-names = "stmmaceth"; ++ status = "disabled"; ++ ++ snps,pbl = <16>; ++ snps,fixed-burst; ++ snps,burst_len = <14>; ++ snps,force_sf_dma_mode; ++ snps,multicast-filter-bins = <64>; ++ snps,perfect-filter-entries = <128>; ++ tx-fifo-depth = <4096>; ++ rx-fifo-depth = <4096>; ++ max-frame-size = <9000>; ++ }; ++ ++ pcie: pcie { ++ compatible = "pci-host-ecam-generic"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ #interrupt-cells = <1>; ++ reg = <0x0 0x40000000 0x0 0x4000000>; ++ interrupt-map-mask = <0x0 0x0 0x0 0x7>; ++ interrupt-map = <0x00 0x0 0x0 0x1 &gic 0x00 0x00 GIC_SPI 0x17 IRQ_TYPE_LEVEL_HIGH>, ++ <0x00 0x0 0x0 0x2 &gic 0x00 0x00 GIC_SPI 0x16 IRQ_TYPE_LEVEL_HIGH>, ++ <0x00 0x0 0x0 0x3 &gic 0x00 0x00 GIC_SPI 0x15 IRQ_TYPE_LEVEL_HIGH>, ++ <0x00 0x0 0x0 0x4 &gic 0x00 0x00 GIC_SPI 0x14 IRQ_TYPE_LEVEL_HIGH>; ++ ranges = <0x01000000 0x0 0x00000000 0x0 0x44000000 0x0 0x01000000>, ++ <0x02000000 0x0 0x48000000 0x0 0x48000000 0x0 0x18000000>, ++ <0x03000000 0x1 0x00000000 0x1 0x00000000 0x1 0x00000000>; ++ }; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/phytium/ft2000ahke-devboard-dsk.dts b/arch/arm64/boot/dts/phytium/ft2000ahke-devboard-dsk.dts +new file mode 100644 +index 000000000000..beafd9b7fd12 +--- /dev/null ++++ b/arch/arm64/boot/dts/phytium/ft2000ahke-devboard-dsk.dts +@@ -0,0 +1,68 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * DTS file for Phytium FT-2000A/2 devboard (FT-2000A-HKE-DSK series) ++ * ++ * Copyright (C) 2021, Phytium Techonlogy Co., Ltd. ++ */ ++ ++/dts-v1/; ++ ++#include "ft2000ahke-generic-psci-soc.dtsi" ++ ++/ { ++ model = "FT-2000A-HKE-DSK Development Board"; ++ compatible = "phytium,ft-2000ahke"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ chosen { ++ linux,pci-probe-only = <1>; ++ }; ++ ++ memory { ++ device_type = "memory"; ++ reg = <0x0 0x80000000 0x0 0x7C000000>; ++ }; ++}; ++ ++&i2c0 { ++ status = "ok"; ++ rtc@68 { ++ compatible = "dallas,ds1339"; ++ reg = <0x68>; ++ }; ++}; ++ ++&uart1 { ++ status = "ok"; ++}; ++ ++&gmac0 { ++ status = "ok"; ++ phy-mode = "rgmii"; ++}; ++ ++&gmac1 { ++ status = "ok"; ++ phy-mode = "rgmii"; ++}; ++ ++&gpio { ++ status = "ok"; ++}; ++ ++&spi0 { ++ status = "ok"; ++}; ++ ++&spi1 { ++ status = "ok"; ++}; ++ ++&can0 { ++ status = "ok"; ++}; ++ ++&can1 { ++ status = "ok"; ++}; +diff --git a/arch/arm64/boot/dts/phytium/ft2000ahke-generic-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2000ahke-generic-psci-soc.dtsi +new file mode 100644 +index 000000000000..dd7631e51d33 +--- /dev/null ++++ b/arch/arm64/boot/dts/phytium/ft2000ahke-generic-psci-soc.dtsi +@@ -0,0 +1,312 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * dts file for FT-2000A/2 SoC ++ * ++ * Copyright (C) 2019, Phytium Technology Co., Ltd. ++ */ ++ ++#include ++ ++/ { ++ compatible = "phytium,ft2000ahke"; ++ interrupt-parent = <&gic>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ aliases { ++ ethernet0 = &gmac0; ++ ethernet1 = &gmac1; ++ }; ++ psci { ++ compatible = "arm,psci-1.0", "arm,psci-0.2", "arm,psci"; ++ method = "smc"; ++ cpu_suspend = <0xc4000001>; ++ cpu_off = <0x84000002>; ++ cpu_on = <0xc4000003>; ++ sys_poweroff = <0x84000008>; ++ sys_reset = <0x84000009>; ++ }; ++ ++ cpus { ++ #address-cells = <2>; ++ #size-cells = <0>; ++ ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc661", "arm,armv8"; ++ reg = <0x0 0x0>; ++ enable-method = "psci"; ++ }; ++ ++ cpu1: cpu@1 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc661", "arm,armv8"; ++ reg = <0x0 0x1>; ++ enable-method = "psci"; ++ }; ++ }; ++ ++ gic: interrupt-controller@71800000 { ++ compatible = "arm,gic-400"; ++ #interrupt-cells = <3>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ interrupt-controller; ++ reg = <0x0 0x71801000 0x0 0x1000>, ++ <0x0 0x71802000 0x0 0x2000>, ++ <0x0 0x71804000 0x0 0x1000>, ++ <0x0 0x71805000 0x0 0x1000>; ++ }; ++ ++ timer { ++ compatible = "arm,armv8-timer"; ++ interrupts = , ++ , ++ , ++ ; ++ clock-frequency = <50000000>; ++ }; ++ ++ pmu { ++ compatible = "arm,armv8-pmuv3"; ++ interrupts = , ++ ; ++ interrupt-affinity = <&cpu0 &cpu1>; ++ }; ++ ++ soc { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-coherent; ++ ranges; ++ ++ clocks { ++ refclk: refclk { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <50000000>; ++ }; ++ ++ clk250mhz: clk250mhz { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <250000000>; ++ }; ++ ++ clk500mhz: clk500mhz { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <500000000>; ++ }; ++ ++ sysclk_48mhz: clk48mhz { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <48000000>; ++ }; ++ ++ sysclk_600mhz: clk600mhz { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <600000000>; ++ }; ++ }; ++ ++ uart0: uart@70000000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x0 0x70000000 0x0 0x1000>; ++ clock-frequency = <50000000>; ++ interrupts = ; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ status = "disabled"; ++ }; ++ ++ uart1: uart@70001000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x0 0x70001000 0x0 0x1000>; ++ clock-frequency = <50000000>; ++ interrupts = ; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ status = "disabled"; ++ }; ++ ++ uart2: uart@70007000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x0 0x70007000 0x0 0x1000>; ++ clock-frequency = <50000000>; ++ interrupts = ; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ status = "disabled"; ++ }; ++ ++ uart3: uart@70008000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x0 0x70008000 0x0 0x1000>; ++ clock-frequency = <50000000>; ++ interrupts = ; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@70002000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x70002000 0x0 0x1000>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ clocks = <&refclk>; ++ status = "disabled"; ++ }; ++ ++ i2c1: i2c@70003000 { ++ #address-cells = <01>; ++ #size-cells = <0>; ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x70003000 0x0 0x1000>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ clocks = <&refclk>; ++ status = "disabled"; ++ }; ++ ++ gpio: gpio@70006000 { ++ compatible = "snps,dw-apb-gpio"; ++ reg = <0x0 0x70006000 0x0 0x1000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ ++ porta: gpio-controller@0 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <8>; ++ reg = <0>; ++ }; ++ ++ portb: gpio-controller@1 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <8>; ++ reg = <1>; ++ }; ++ ++ portc: gpio-controller@2 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <8>; ++ reg = <2>; ++ }; ++ ++ portd: gpio-controller@3 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <8>; ++ reg = <3>; ++ }; ++ }; ++ ++ gmac0: eth@70c00000 { ++ compatible = "snps,dwmac"; ++ reg = <0x0 0x70c00000 0x0 0x2000>; ++ interrupts = ; ++ interrupt-names = "macirq"; ++ clocks = <&clk500mhz>; ++ clock-names = "stmmaceth"; ++ status = "disabled"; ++ ++ snps,pbl = <16>; ++ snps,fixed-burst; ++ snps,burst_len = <14>; ++ snps,force_sf_dma_mode; ++ snps,multicast-filter-bins = <64>; ++ snps,perfect-filter-entries = <128>; ++ tx-fifo-depth = <4096>; ++ rx-fifo-depth = <4096>; ++ max-frame-size = <9000>; ++ }; ++ ++ gmac1: eth@70c10000 { ++ compatible = "snps,dwmac"; ++ reg = <0x0 0x70c10000 0x0 0x2000>; ++ interrupts = ; ++ interrupt-names = "macirq"; ++ clocks = <&clk500mhz>; ++ clock-names = "stmmaceth"; ++ status = "disabled"; ++ ++ snps,pbl = <16>; ++ snps,fixed-burst; ++ snps,burst_len = <14>; ++ snps,force_sf_dma_mode; ++ snps,multicast-filter-bins = <64>; ++ snps,perfect-filter-entries = <128>; ++ tx-fifo-depth = <4096>; ++ rx-fifo-depth = <4096>; ++ max-frame-size = <9000>; ++ }; ++ ++ spi0: spi@70009000 { ++ compatible = "phytium,spi"; ++ interrupts = ; ++ reg = <0x0 0x70009000 0x0 0x1000>; ++ clocks = <&sysclk_48mhz>; ++ num-cs = <4>; ++ }; ++ ++ spi1: spi@7000a000 { ++ compatible = "phytium,spi"; ++ interrupts = ; ++ reg = <0x0 0x7000a000 0x0 0x1000>; ++ clocks = <&sysclk_48mhz>; ++ num-cs = <4>; ++ }; ++ ++ can0: can@70014000 { ++ compatible = "phytium,can"; ++ reg = <0x0 0x70014000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&sysclk_600mhz>; ++ clock-names = "phytium_can_clk"; ++ tx-fifo-depth = <0x40>; ++ rx-fifo-depth = <0x40>; ++ }; ++ ++ can1: can@70015000 { ++ compatible = "phytium,can"; ++ reg = <0x0 0x70015000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&sysclk_600mhz>; ++ clock-names = "phytium_can_clk"; ++ tx-fifo-depth = <0x40>; ++ rx-fifo-depth = <0x40>; ++ }; ++ ++ pcie: pcie { ++ compatible = "pci-host-ecam-generic"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ #interrupt-cells = <1>; ++ reg = <0x0 0x40000000 0x0 0x4000000>; ++ interrupt-map-mask = <0x0 0x0 0x0 0x7>; ++ interrupt-map = <0x00 0x0 0x0 0x1 &gic 0x00 0x00 GIC_SPI 0x17 IRQ_TYPE_LEVEL_HIGH>, ++ <0x00 0x0 0x0 0x2 &gic 0x00 0x00 GIC_SPI 0x16 IRQ_TYPE_LEVEL_HIGH>, ++ <0x00 0x0 0x0 0x3 &gic 0x00 0x00 GIC_SPI 0x15 IRQ_TYPE_LEVEL_HIGH>, ++ <0x00 0x0 0x0 0x4 &gic 0x00 0x00 GIC_SPI 0x14 IRQ_TYPE_LEVEL_HIGH>; ++ ranges = <0x01000000 0x0 0x00000000 0x0 0x44000000 0x0 0x01000000>, ++ <0x02000000 0x0 0x48000000 0x0 0x48000000 0x0 0x18000000>, ++ <0x03000000 0x1 0x00000000 0x1 0x00000000 0x1 0x00000000>; ++ }; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/phytium/ft2000plus-MR-devboard-64c-dsk.dts b/arch/arm64/boot/dts/phytium/ft2000plus-MR-devboard-64c-dsk.dts +new file mode 100644 +index 000000000000..7c2ee8e60dce +--- /dev/null ++++ b/arch/arm64/boot/dts/phytium/ft2000plus-MR-devboard-64c-dsk.dts +@@ -0,0 +1,136 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * DTS file for Phytium FT-2000plus devboard. ++ * ++ * Copyright (C) 2019, Phytium Technology Co., Ltd. ++ */ ++ ++/dts-v1/; ++/memreserve/ 0x0000000080000000 0x0000000000010000; ++ ++#include "ft2000plus-MR-psci-soc.dtsi" ++ ++/ { ++ model = "FT-2000plus Development Board"; ++ compatible = "phytium,ft-2000plus"; ++ ++ chosen { ++ linux,pci-probe-only = <1>; ++ }; ++ ++ /* NUMA Node-0 */ ++ memory@00 { ++ device_type = "memory"; ++ /* 0 - 512MiB (512MiB)*/ ++ reg = <0x00000000 0x00000000 0x0 0x20000000>; ++ numa-node-id = <0>; ++ }; ++ memory@01 { ++ device_type = "memory"; ++ /* 2GiB - 4GiB (2GiB) */ ++ reg = <0x00000000 0x80000000 0x0 0x80000000>; ++ numa-node-id = <0>; ++ }; ++ memory@02 { ++ device_type = "memory"; ++ /* 512GiB - 516GiB (4GiB) */ ++ reg = <0x00000080 0x00000000 0x1 0x00000000>; ++ numa-node-id = <0>; ++ }; ++ /* NUMA Node-1 */ ++ memory@10 { ++ device_type = "memory"; ++ /* 1024GiB - 1028GiB (4GiB) */ ++ reg = <0x00000100 0x00000000 0x1 0x00000000>; ++ numa-node-id = <1>; ++ }; ++ memory@11 { ++ device_type = "memory"; ++ /* 1536GiB - 1540GiB (4GiB) */ ++ reg = <0x00000180 0x00000000 0x1 0x00000000>; ++ numa-node-id = <1>; ++ }; ++ /* NUMA Node-2 */ ++ memory@20 { ++ device_type = "memory"; ++ /* 2048GiB - 2052GiB (4GiB) */ ++ reg = <0x00000200 0x00000000 0x1 0x00000000>; ++ numa-node-id = <2>; ++ }; ++ memory@21 { ++ device_type = "memory"; ++ /* 2560GiB - 2564GiB (4GiB) */ ++ reg = <0x00000280 0x00000000 0x1 0x00000000>; ++ numa-node-id = <2>; ++ }; ++ /* NUMA Node-3 */ ++ memory@30 { ++ device_type = "memory"; ++ /* 3072GiB - 3076GiB (4GiB) */ ++ reg = <0x00000300 0x00000000 0x1 0x00000000>; ++ numa-node-id = <3>; ++ }; ++ memory@31 { ++ device_type = "memory"; ++ /* 3584GiB - 3588GiB (4GiB) */ ++ reg = <0x00000380 0x00000000 0x1 0x00000000>; ++ numa-node-id = <3>; ++ }; ++ /* NUMA Node-4 */ ++ memory@40 { ++ device_type = "memory"; ++ /* 4096GiB - 4100GiB (4GiB) */ ++ reg = <0x00000400 0x00000000 0x1 0x00000000>; ++ numa-node-id = <4>; ++ }; ++ memory@41 { ++ device_type = "memory"; ++ /* 4608GiB - 4612GiB (4GiB) */ ++ reg = <0x00000480 0x00000000 0x1 0x00000000>; ++ numa-node-id = <4>; ++ }; ++ /* NUMA Node-5 */ ++ memory@50 { ++ device_type = "memory"; ++ /* 5120GiB - 5124GiB (4GiB) */ ++ reg = <0x00000500 0x00000000 0x1 0x00000000>; ++ numa-node-id = <5>; ++ }; ++ memory@51 { ++ device_type = "memory"; ++ /* 5632GiB - 5636GiB (4GiB) */ ++ reg = <0x00000580 0x00000000 0x1 0x00000000>; ++ numa-node-id = <5>; ++ }; ++ /* NUMA Node-6 */ ++ memory@60 { ++ device_type = "memory"; ++ /* 6144GiB - 6148GiB (4GiB) */ ++ reg = <0x00000600 0x00000000 0x1 0x00000000>; ++ numa-node-id = <6>; ++ }; ++ memory@61 { ++ device_type = "memory"; ++ /* 6656GiB - 6660GiB (4GiB) */ ++ reg = <0x00000680 0x00000000 0x1 0x00000000>; ++ numa-node-id = <6>; ++ }; ++ /* NUMA Node-7 */ ++ memory@70 { ++ device_type = "memory"; ++ /* 7168GiB - 7172GiB (4GiB) */ ++ reg = <0x00000700 0x00000000 0x1 0x00000000>; ++ numa-node-id = <7>; ++ }; ++ memory@71 { ++ device_type = "memory"; ++ /* 7680GiB - 7684GiB (4GiB) */ ++ reg = <0x00000780 0x00000000 0x1 0x00000000>; ++ numa-node-id = <7>; ++ }; ++ ++}; ++ ++&uart1 { ++ status = "ok"; ++}; +diff --git a/arch/arm64/boot/dts/phytium/ft2000plus-MR-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2000plus-MR-psci-soc.dtsi +new file mode 100644 +index 000000000000..1e9da418b1d1 +--- /dev/null ++++ b/arch/arm64/boot/dts/phytium/ft2000plus-MR-psci-soc.dtsi +@@ -0,0 +1,1062 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * dts file for FT-2000plus SoC ++ * ++ * Copyright (C) 2018-2019, Phytium Technology Co., Ltd. ++ */ ++ ++#include ++ ++/ { ++ compatible = "phytium,ft2000plus"; ++ interrupt-parent = <&gic>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ psci { ++ compatible = "arm,psci-1.0"; ++ method = "smc"; ++ cpu_suspend = <0xc4000001>; ++ cpu_off = <0x84000002>; ++ cpu_on = <0xc4000003>; ++ sys_poweroff = <0x84000008>; ++ sys_reset = <0x84000009>; ++ }; ++ ++ cpus { ++ #address-cells = <0x2>; ++ #size-cells = <0x0>; ++ ++ cpu-map { ++ cluster0 { ++ core0 { ++ cpu = <&cpu0>; ++ }; ++ core1 { ++ cpu = <&cpu1>; ++ }; ++ core2 { ++ cpu = <&cpu2>; ++ }; ++ core3 { ++ cpu = <&cpu3>; ++ }; ++ }; ++ ++ cluster1 { ++ core0 { ++ cpu = <&cpu4>; ++ }; ++ core1 { ++ cpu = <&cpu5>; ++ }; ++ core2 { ++ cpu = <&cpu6>; ++ }; ++ core3 { ++ cpu = <&cpu7>; ++ }; ++ }; ++ ++ cluster2 { ++ core0 { ++ cpu = <&cpu8>; ++ }; ++ core1 { ++ cpu = <&cpu9>; ++ }; ++ core2 { ++ cpu = <&cpu10>; ++ }; ++ core3 { ++ cpu = <&cpu11>; ++ }; ++ }; ++ ++ cluster3 { ++ core0 { ++ cpu = <&cpu12>; ++ }; ++ core1 { ++ cpu = <&cpu13>; ++ }; ++ core2 { ++ cpu = <&cpu14>; ++ }; ++ core3 { ++ cpu = <&cpu15>; ++ }; ++ }; ++ ++ cluster4 { ++ core0 { ++ cpu = <&cpu16>; ++ }; ++ core1 { ++ cpu = <&cpu17>; ++ }; ++ core2 { ++ cpu = <&cpu18>; ++ }; ++ core3 { ++ cpu = <&cpu19>; ++ }; ++ }; ++ ++ cluster5 { ++ core0 { ++ cpu = <&cpu20>; ++ }; ++ core1 { ++ cpu = <&cpu21>; ++ }; ++ core2 { ++ cpu = <&cpu22>; ++ }; ++ core3 { ++ cpu = <&cpu23>; ++ }; ++ }; ++ ++ cluster6 { ++ core0 { ++ cpu = <&cpu24>; ++ }; ++ core1 { ++ cpu = <&cpu25>; ++ }; ++ core2 { ++ cpu = <&cpu26>; ++ }; ++ core3 { ++ cpu = <&cpu27>; ++ }; ++ }; ++ ++ cluster7 { ++ core0 { ++ cpu = <&cpu28>; ++ }; ++ core1 { ++ cpu = <&cpu29>; ++ }; ++ core2 { ++ cpu = <&cpu30>; ++ }; ++ core3 { ++ cpu = <&cpu31>; ++ }; ++ }; ++ ++ cluster8 { ++ core0 { ++ cpu = <&cpu32>; ++ }; ++ core1 { ++ cpu = <&cpu33>; ++ }; ++ core2 { ++ cpu = <&cpu34>; ++ }; ++ core3 { ++ cpu = <&cpu35>; ++ }; ++ }; ++ ++ cluster9 { ++ core0 { ++ cpu = <&cpu36>; ++ }; ++ core1 { ++ cpu = <&cpu37>; ++ }; ++ core2 { ++ cpu = <&cpu38>; ++ }; ++ core3 { ++ cpu = <&cpu39>; ++ }; ++ }; ++ ++ cluster10 { ++ core0 { ++ cpu = <&cpu40>; ++ }; ++ core1 { ++ cpu = <&cpu41>; ++ }; ++ core2 { ++ cpu = <&cpu42>; ++ }; ++ core3 { ++ cpu = <&cpu43>; ++ }; ++ }; ++ ++ cluster11 { ++ core0 { ++ cpu = <&cpu44>; ++ }; ++ core1 { ++ cpu = <&cpu45>; ++ }; ++ core2 { ++ cpu = <&cpu46>; ++ }; ++ core3 { ++ cpu = <&cpu47>; ++ }; ++ }; ++ ++ cluster12 { ++ core0 { ++ cpu = <&cpu48>; ++ }; ++ core1 { ++ cpu = <&cpu49>; ++ }; ++ core2 { ++ cpu = <&cpu50>; ++ }; ++ core3 { ++ cpu = <&cpu51>; ++ }; ++ }; ++ ++ cluster13 { ++ core0 { ++ cpu = <&cpu52>; ++ }; ++ core1 { ++ cpu = <&cpu53>; ++ }; ++ core2 { ++ cpu = <&cpu54>; ++ }; ++ core3 { ++ cpu = <&cpu55>; ++ }; ++ }; ++ ++ cluster14 { ++ core0 { ++ cpu = <&cpu56>; ++ }; ++ core1 { ++ cpu = <&cpu57>; ++ }; ++ core2 { ++ cpu = <&cpu58>; ++ }; ++ core3 { ++ cpu = <&cpu59>; ++ }; ++ }; ++ ++ cluster15 { ++ core0 { ++ cpu = <&cpu60>; ++ }; ++ core1 { ++ cpu = <&cpu61>; ++ }; ++ core2 { ++ cpu = <&cpu62>; ++ }; ++ core3 { ++ cpu = <&cpu63>; ++ }; ++ }; ++ }; ++ ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x0>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ }; ++ ++ cpu1: cpu@1 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x1>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ }; ++ ++ cpu2: cpu@2 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x2>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ }; ++ ++ cpu3: cpu@3 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x3>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ }; ++ ++ cpu4: cpu@100 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x100>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ }; ++ ++ cpu5: cpu@101 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x101>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ }; ++ ++ cpu6: cpu@102 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x102>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ }; ++ ++ cpu7: cpu@103 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x103>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ }; ++ ++ cpu8: cpu@200 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x200>; ++ enable-method = "psci"; ++ numa-node-id = <1>; ++ }; ++ ++ cpu9: cpu@201 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x201>; ++ enable-method = "psci"; ++ numa-node-id = <1>; ++ }; ++ ++ cpu10: cpu@202 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x202>; ++ enable-method = "psci"; ++ numa-node-id = <1>; ++ }; ++ ++ cpu11: cpu@203 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x203>; ++ enable-method = "psci"; ++ numa-node-id = <1>; ++ }; ++ ++ cpu12: cpu@300 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x300>; ++ enable-method = "psci"; ++ numa-node-id = <1>; ++ }; ++ ++ cpu13: cpu@301 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x301>; ++ enable-method = "psci"; ++ numa-node-id = <1>; ++ }; ++ ++ cpu14: cpu@302 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x302>; ++ enable-method = "psci"; ++ numa-node-id = <1>; ++ }; ++ ++ cpu15: cpu@303 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x303>; ++ enable-method = "psci"; ++ numa-node-id = <1>; ++ }; ++ ++ cpu16: cpu@400 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x400>; ++ enable-method = "psci"; ++ numa-node-id = <2>; ++ }; ++ ++ cpu17: cpu@401 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x401>; ++ enable-method = "psci"; ++ numa-node-id = <2>; ++ }; ++ ++ cpu18: cpu@402 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x402>; ++ enable-method = "psci"; ++ numa-node-id = <2>; ++ }; ++ ++ cpu19: cpu@403 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x403>; ++ enable-method = "psci"; ++ numa-node-id = <2>; ++ }; ++ ++ cpu20: cpu@500 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x500>; ++ enable-method = "psci"; ++ numa-node-id = <2>; ++ }; ++ ++ cpu21: cpu@501 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x501>; ++ enable-method = "psci"; ++ numa-node-id = <2>; ++ }; ++ ++ cpu22: cpu@502 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x502>; ++ enable-method = "psci"; ++ numa-node-id = <2>; ++ }; ++ ++ cpu23: cpu@503 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x503>; ++ enable-method = "psci"; ++ numa-node-id = <2>; ++ }; ++ ++ cpu24: cpu@600 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x600>; ++ enable-method = "psci"; ++ numa-node-id = <3>; ++ }; ++ ++ cpu25: cpu@601 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x601>; ++ enable-method = "psci"; ++ numa-node-id = <3>; ++ }; ++ ++ cpu26: cpu@602 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x602>; ++ enable-method = "psci"; ++ numa-node-id = <3>; ++ }; ++ ++ cpu27: cpu@603 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x603>; ++ enable-method = "psci"; ++ numa-node-id = <3>; ++ }; ++ ++ cpu28: cpu@700 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x700>; ++ enable-method = "psci"; ++ numa-node-id = <3>; ++ }; ++ ++ cpu29: cpu@701 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x701>; ++ enable-method = "psci"; ++ numa-node-id = <3>; ++ }; ++ ++ cpu30: cpu@702 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x702>; ++ enable-method = "psci"; ++ numa-node-id = <3>; ++ }; ++ ++ cpu31: cpu@703 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x703>; ++ enable-method = "psci"; ++ numa-node-id = <3>; ++ }; ++ ++ cpu32: cpu@800 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x800>; ++ enable-method = "psci"; ++ numa-node-id = <4>; ++ }; ++ ++ cpu33: cpu@801 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x801>; ++ enable-method = "psci"; ++ numa-node-id = <4>; ++ }; ++ ++ cpu34: cpu@802 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x802>; ++ enable-method = "psci"; ++ numa-node-id = <4>; ++ }; ++ ++ cpu35: cpu@803 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x803>; ++ enable-method = "psci"; ++ numa-node-id = <4>; ++ }; ++ ++ cpu36: cpu@900 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x900>; ++ enable-method = "psci"; ++ numa-node-id = <4>; ++ }; ++ ++ cpu37: cpu@901 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x901>; ++ enable-method = "psci"; ++ numa-node-id = <4>; ++ }; ++ ++ cpu38: cpu@902 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x902>; ++ enable-method = "psci"; ++ numa-node-id = <4>; ++ }; ++ ++ cpu39: cpu@903 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x903>; ++ enable-method = "psci"; ++ numa-node-id = <4>; ++ }; ++ ++ cpu40: cpu@a00 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xa00>; ++ enable-method = "psci"; ++ numa-node-id = <5>; ++ }; ++ ++ cpu41: cpu@a01 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xa01>; ++ enable-method = "psci"; ++ numa-node-id = <5>; ++ }; ++ ++ cpu42: cpu@a02 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xa02>; ++ enable-method = "psci"; ++ numa-node-id = <5>; ++ }; ++ ++ cpu43: cpu@a03 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xa03>; ++ enable-method = "psci"; ++ numa-node-id = <5>; ++ }; ++ ++ cpu44: cpu@b00 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xb00>; ++ enable-method = "psci"; ++ numa-node-id = <5>; ++ }; ++ ++ cpu45: cpu@b01 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xb01>; ++ enable-method = "psci"; ++ numa-node-id = <5>; ++ }; ++ ++ cpu46: cpu@b02 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xb02>; ++ enable-method = "psci"; ++ numa-node-id = <5>; ++ }; ++ ++ cpu47: cpu@b03 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xb03>; ++ enable-method = "psci"; ++ numa-node-id = <5>; ++ }; ++ ++ cpu48: cpu@c00 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xc00>; ++ enable-method = "psci"; ++ numa-node-id = <6>; ++ }; ++ ++ cpu49: cpu@c01 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xc01>; ++ enable-method = "psci"; ++ numa-node-id = <6>; ++ }; ++ ++ cpu50: cpu@c02 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xc02>; ++ enable-method = "psci"; ++ numa-node-id = <6>; ++ }; ++ ++ cpu51: cpu@c03 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xc03>; ++ enable-method = "psci"; ++ numa-node-id = <6>; ++ }; ++ ++ cpu52: cpu@d00 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xd00>; ++ enable-method = "psci"; ++ numa-node-id = <6>; ++ }; ++ ++ cpu53: cpu@d01 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xd01>; ++ enable-method = "psci"; ++ numa-node-id = <6>; ++ }; ++ ++ cpu54: cpu@d02 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xd02>; ++ enable-method = "psci"; ++ numa-node-id = <6>; ++ }; ++ ++ cpu55: cpu@d03 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xd03>; ++ enable-method = "psci"; ++ numa-node-id = <6>; ++ }; ++ ++ cpu56: cpu@e00 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xe00>; ++ enable-method = "psci"; ++ numa-node-id = <7>; ++ }; ++ ++ cpu57: cpu@e01 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xe01>; ++ enable-method = "psci"; ++ numa-node-id = <7>; ++ }; ++ ++ cpu58: cpu@e02 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xe02>; ++ enable-method = "psci"; ++ numa-node-id = <7>; ++ }; ++ ++ cpu59: cpu@e03 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xe03>; ++ enable-method = "psci"; ++ numa-node-id = <7>; ++ }; ++ ++ cpu60: cpu@f00 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xf00>; ++ enable-method = "psci"; ++ numa-node-id = <7>; ++ }; ++ ++ cpu61: cpu@f01 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xf01>; ++ enable-method = "psci"; ++ numa-node-id = <7>; ++ }; ++ ++ cpu62: cpu@f02 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xf02>; ++ enable-method = "psci"; ++ numa-node-id = <7>; ++ }; ++ ++ cpu63: cpu@f03 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xf03>; ++ enable-method = "psci"; ++ numa-node-id = <7>; ++ }; ++ }; ++ ++ distance-map { ++ compatible = "numa-distance-map-v1"; ++ distance-matrix = <0x0 0x0 0x0a>, ++ <0x0 0x1 0x14>, ++ <0x0 0x2 0x28>, ++ <0x0 0x3 0x1e>, ++ <0x0 0x4 0x14>, ++ <0x0 0x5 0x1e>, ++ <0x0 0x6 0x32>, ++ <0x0 0x7 0x28>, ++ <0x1 0x0 0x14>, ++ <0x1 0x1 0x0a>, ++ <0x1 0x2 0x1e>, ++ <0x1 0x3 0x14>, ++ <0x1 0x4 0x1e>, ++ <0x1 0x5 0x14>, ++ <0x1 0x6 0x28>, ++ <0x1 0x7 0x1e>, ++ <0x2 0x0 0x28>, ++ <0x2 0x1 0x1e>, ++ <0x2 0x2 0x0a>, ++ <0x2 0x3 0x14>, ++ <0x2 0x4 0x32>, ++ <0x2 0x5 0x28>, ++ <0x2 0x6 0x14>, ++ <0x2 0x7 0x1e>, ++ <0x3 0x0 0x1e>, ++ <0x3 0x1 0x14>, ++ <0x3 0x2 0x14>, ++ <0x3 0x3 0x0a>, ++ <0x3 0x4 0x28>, ++ <0x3 0x5 0x1e>, ++ <0x3 0x6 0x1e>, ++ <0x3 0x7 0x14>, ++ <0x4 0x0 0x14>, ++ <0x4 0x1 0x1e>, ++ <0x4 0x2 0x32>, ++ <0x4 0x3 0x28>, ++ <0x4 0x4 0x0a>, ++ <0x4 0x5 0x14>, ++ <0x4 0x6 0x28>, ++ <0x4 0x7 0x1e>, ++ <0x5 0x0 0x1e>, ++ <0x5 0x1 0x14>, ++ <0x5 0x2 0x28>, ++ <0x5 0x3 0x1e>, ++ <0x5 0x4 0x14>, ++ <0x5 0x5 0x0a>, ++ <0x5 0x6 0x1e>, ++ <0x5 0x7 0x14>, ++ <0x6 0x0 0x32>, ++ <0x6 0x1 0x28>, ++ <0x6 0x2 0x14>, ++ <0x6 0x3 0x1e>, ++ <0x6 0x4 0x28>, ++ <0x6 0x5 0x1e>, ++ <0x6 0x6 0x0a>, ++ <0x6 0x7 0x14>, ++ <0x7 0x0 0x28>, ++ <0x7 0x1 0x1e>, ++ <0x7 0x2 0x1e>, ++ <0x7 0x3 0x14>, ++ <0x7 0x4 0x1e>, ++ <0x7 0x5 0x14>, ++ <0x7 0x6 0x14>, ++ <0x7 0x7 0x0a>; ++ }; ++ ++ ++ gic: interrupt-controller@8002a000000 { ++ compatible = "arm,gic-v3"; ++ #interrupt-cells = <3>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ interrupt-controller; ++ reg = <0x0800 0x2a000000 0 0x10000>, /* GICD */ ++ <0x0800 0x2a800000 0 0x800000>, /* GICR */ ++ <0x0800 0x29c00000 0 0x10000>, /* GICC */ ++ <0x0800 0x29c10000 0 0x10000>, /* GICH */ ++ <0x0800 0x29c20000 0 0x10000>; /* GICV */ ++ interrupts = ; ++ ++ its: gic-its@8002a020000 { ++ compatible = "arm,gic-v3-its"; ++ msi-controller; ++ reg = <0x0800 0x2a020000 0x0 0x20000>; ++ }; ++ }; ++ ++ timer { ++ compatible = "arm,armv8-timer"; ++ interrupts = , ++ , ++ , ++ ; ++ clock-frequency = <50000000>; ++ }; ++ ++ soc { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-coherent; ++ ranges; ++ ++ uart0: serial@28000000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x800 0x28000000 0x0 0x1000>; ++ clock-frequency = <50000000>; ++ interrupts = ; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ status = "disabled"; ++ }; ++ ++ uart1: serial@28001000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x800 0x28001000 0x0 0x1000>; ++ clock-frequency = <50000000>; ++ interrupts = ; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ status = "disabled"; ++ }; ++ ++ gpio0:gpio@80028006000 { ++ compatible = "snps,dw-apb-gpio"; ++ reg = <0x800 0x28006000 0x0 0x1000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "ok"; ++ ++ gpio-controller@0 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <0x2>; ++ snps,nr-gpios = <0x8>; ++ reg = <0x0>; ++ }; ++ ++ gpio-controller@1 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <0x2>; ++ snps,nr-gpios = <0x8>; ++ reg = <0x1>; ++ }; ++ ++ gpio-controller@2 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <0x2>; ++ snps,nr-gpios = <0x8>; ++ reg = <0x2>; ++ }; ++ ++ gpio-controller@3 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <0x2>; ++ snps,nr-gpios = <0x8>; ++ reg = <0x3>; ++ }; ++ }; ++ ++ i2c0: i2c@80028002000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0x800 0x28002000 0x0 0x1000>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ status = "ok"; ++ }; ++ ++ i2c1: i2c@80028003000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0x800 0x28003000 0x0 0x1000>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ status = "ok"; ++ }; ++ ++ pcie0: peu0-c0 { ++ compatible = "pci-host-ecam-generic"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ #interrupt-cells = <1>; ++ reg = <0x800 0x40000000 0 0x2000000>; ++ msi-parent = <&its>; ++ bus-range = <0 0x1f>; ++ interrupt-map-mask = <0x0 0x0 0x0 0x7>; ++ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 GIC_SPI 0x33 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x2 &gic 0x0 0x0 GIC_SPI 0x34 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x3 &gic 0x0 0x0 GIC_SPI 0x35 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x4 &gic 0x0 0x0 GIC_SPI 0x36 IRQ_TYPE_LEVEL_HIGH>; ++ ranges = <0x01000000 0x00 0x00000000 0x800 0x50000000 0x00 0x00300000>, ++ <0x02000000 0x00 0x60000000 0x800 0x60000000 0x00 0x08000000>, ++ <0x03000000 0x20 0x00000000 0x820 0x00000000 0x08 0x00000000>; ++ }; ++ ++ pcie1: peu0-c1 { ++ compatible = "pci-host-ecam-generic"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ #interrupt-cells = <1>; ++ reg = <0x800 0x42000000 0 0x2000000>; ++ msi-parent = <&its>; ++ bus-range = <0x20 0x3f>; ++ interrupt-map-mask = <0x0 0x0 0x0 0x7>; ++ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 GIC_SPI 0x33 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x2 &gic 0x0 0x0 GIC_SPI 0x34 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x3 &gic 0x0 0x0 GIC_SPI 0x35 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x4 &gic 0x0 0x0 GIC_SPI 0x36 IRQ_TYPE_LEVEL_HIGH>; ++ ranges = <0x01000000 0x00 0x00300000 0x800 0x50300000 0x00 0x00300000>, ++ <0x02000000 0x00 0x68000000 0x800 0x68000000 0x00 0x04000000>, ++ <0x03000000 0x28 0x00000000 0x828 0x00000000 0x04 0x00000000>; ++ }; ++ ++ pcie2: peu0-c2 { ++ compatible = "pci-host-ecam-generic"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ #interrupt-cells = <1>; ++ reg = <0x800 0x44000000 0 0x1000000>; ++ msi-parent = <&its>; ++ bus-range = <0x40 0x4f>; ++ interrupt-map-mask = <0x0 0x0 0x0 0x7>; ++ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 GIC_SPI 0x33 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x2 &gic 0x0 0x0 GIC_SPI 0x34 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x3 &gic 0x0 0x0 GIC_SPI 0x35 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x4 &gic 0x0 0x0 GIC_SPI 0x36 IRQ_TYPE_LEVEL_HIGH>; ++ ranges = <0x01000000 0x00 0x00600000 0x800 0x50600000 0x00 0x00300000>, ++ <0x02000000 0x00 0x6c000000 0x800 0x6c000000 0x00 0x02000000>, ++ <0x03000000 0x2c 0x00000000 0x82c 0x00000000 0x04 0x00000000>; ++ }; ++ ++ pcie3: peu1-c0 { ++ compatible = "pci-host-ecam-generic"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ #interrupt-cells = <1>; ++ reg = <0x800 0x45000000 0 0x2000000>; ++ msi-parent = <&its>; ++ bus-range = <0x50 0x6f>; ++ interrupt-map-mask = <0x0 0x0 0x0 0x7>; ++ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 GIC_SPI 0x33 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x2 &gic 0x0 0x0 GIC_SPI 0x34 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x3 &gic 0x0 0x0 GIC_SPI 0x35 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x4 &gic 0x0 0x0 GIC_SPI 0x36 IRQ_TYPE_LEVEL_HIGH>; ++ ranges = <0x01000000 0x00 0x00900000 0x800 0x50900000 0x00 0x00300000>, ++ <0x02000000 0x00 0x6e000000 0x800 0x6e000000 0x00 0x0a000000>, ++ <0x03000000 0x20 0x00000000 0x830 0x00000000 0x08 0x00000000>; ++ }; ++ ++ pcie4: peu1-c1 { ++ compatible = "pci-host-ecam-generic"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ #interrupt-cells = <1>; ++ reg = <0x800 0x47000000 0 0x1000000>; ++ msi-parent = <&its>; ++ bus-range = <0x70 0x7f>; ++ interrupt-map-mask = <0x0 0x0 0x0 0x7>; ++ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 GIC_SPI 0x33 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x2 &gic 0x0 0x0 GIC_SPI 0x34 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x3 &gic 0x0 0x0 GIC_SPI 0x35 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x4 &gic 0x0 0x0 GIC_SPI 0x36 IRQ_TYPE_LEVEL_HIGH>; ++ ranges = <0x01000000 0x00 0x00c00000 0x800 0x50c00000 0x00 0x00300000>, ++ <0x02000000 0x00 0x78000000 0x800 0x78000000 0x00 0x08000000>, ++ <0x03000000 0x38 0x00000000 0x838 0x00000000 0x08 0x00000000>; ++ }; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/phytium/ft2000plus-SR-devboard-64c-dsk.dts b/arch/arm64/boot/dts/phytium/ft2000plus-SR-devboard-64c-dsk.dts +new file mode 100644 +index 000000000000..3e3e1e4a1c38 +--- /dev/null ++++ b/arch/arm64/boot/dts/phytium/ft2000plus-SR-devboard-64c-dsk.dts +@@ -0,0 +1,136 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * DTS file for Phytium FT-2000plus devboard. ++ * ++ * Copyright (C) 2019, Phytium Technology Co., Ltd. ++ */ ++ ++/dts-v1/; ++/memreserve/ 0x0000000080000000 0x0000000000010000; ++ ++#include "ft2000plus-SR-psci-soc.dtsi" ++ ++/ { ++ model = "FT-2000plus Development Board"; ++ compatible = "phytium,ft-2000plus"; ++ ++ chosen { ++ linux,pci-probe-only = <1>; ++ }; ++ ++ /* NUMA Node-0 */ ++ memory@00 { ++ device_type = "memory"; ++ /* 0 - 512MiB (512MiB)*/ ++ reg = <0x00000000 0x00000000 0x0 0x20000000>; ++ numa-node-id = <0>; ++ }; ++ memory@01 { ++ device_type = "memory"; ++ /* 2GiB - 4GiB (2GiB) */ ++ reg = <0x00000000 0x80000000 0x0 0x80000000>; ++ numa-node-id = <0>; ++ }; ++ memory@02 { ++ device_type = "memory"; ++ /* 512GiB - 516GiB (4GiB) */ ++ reg = <0x00000080 0x00000000 0x1 0x00000000>; ++ numa-node-id = <0>; ++ }; ++ /* NUMA Node-1 */ ++ memory@10 { ++ device_type = "memory"; ++ /* 1024GiB - 1028GiB (4GiB) */ ++ reg = <0x00000100 0x00000000 0x1 0x00000000>; ++ numa-node-id = <1>; ++ }; ++ memory@11 { ++ device_type = "memory"; ++ /* 1536GiB - 1540GiB (4GiB) */ ++ reg = <0x00000180 0x00000000 0x1 0x00000000>; ++ numa-node-id = <1>; ++ }; ++ /* NUMA Node-2 */ ++ memory@20 { ++ device_type = "memory"; ++ /* 2048GiB - 2052GiB (4GiB) */ ++ reg = <0x00000200 0x00000000 0x1 0x00000000>; ++ numa-node-id = <2>; ++ }; ++ memory@21 { ++ device_type = "memory"; ++ /* 2560GiB - 2564GiB (4GiB) */ ++ reg = <0x00000280 0x00000000 0x1 0x00000000>; ++ numa-node-id = <2>; ++ }; ++ /* NUMA Node-3 */ ++ memory@30 { ++ device_type = "memory"; ++ /* 3072GiB - 3076GiB (4GiB) */ ++ reg = <0x00000300 0x00000000 0x1 0x00000000>; ++ numa-node-id = <3>; ++ }; ++ memory@31 { ++ device_type = "memory"; ++ /* 3584GiB - 3588GiB (4GiB) */ ++ reg = <0x00000380 0x00000000 0x1 0x00000000>; ++ numa-node-id = <3>; ++ }; ++ /* NUMA Node-4 */ ++ memory@40 { ++ device_type = "memory"; ++ /* 4096GiB - 4100GiB (4GiB) */ ++ reg = <0x00000400 0x00000000 0x1 0x00000000>; ++ numa-node-id = <4>; ++ }; ++ memory@41 { ++ device_type = "memory"; ++ /* 4608GiB - 4612GiB (4GiB) */ ++ reg = <0x00000480 0x00000000 0x1 0x00000000>; ++ numa-node-id = <4>; ++ }; ++ /* NUMA Node-5 */ ++ memory@50 { ++ device_type = "memory"; ++ /* 5120GiB - 5124GiB (4GiB) */ ++ reg = <0x00000500 0x00000000 0x1 0x00000000>; ++ numa-node-id = <5>; ++ }; ++ memory@51 { ++ device_type = "memory"; ++ /* 5632GiB - 5636GiB (4GiB) */ ++ reg = <0x00000580 0x00000000 0x1 0x00000000>; ++ numa-node-id = <5>; ++ }; ++ /* NUMA Node-6 */ ++ memory@60 { ++ device_type = "memory"; ++ /* 6144GiB - 6148GiB (4GiB) */ ++ reg = <0x00000600 0x00000000 0x1 0x00000000>; ++ numa-node-id = <6>; ++ }; ++ memory@61 { ++ device_type = "memory"; ++ /* 6656GiB - 6660GiB (4GiB) */ ++ reg = <0x00000680 0x00000000 0x1 0x00000000>; ++ numa-node-id = <6>; ++ }; ++ /* NUMA Node-7 */ ++ memory@70 { ++ device_type = "memory"; ++ /* 7168GiB - 7172GiB (4GiB) */ ++ reg = <0x00000700 0x00000000 0x1 0x00000000>; ++ numa-node-id = <7>; ++ }; ++ memory@71 { ++ device_type = "memory"; ++ /* 7680GiB - 7684GiB (4GiB) */ ++ reg = <0x00000780 0x00000000 0x1 0x00000000>; ++ numa-node-id = <7>; ++ }; ++ ++}; ++ ++&uart1 { ++ status = "ok"; ++}; +diff --git a/arch/arm64/boot/dts/phytium/ft2000plus-SR-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2000plus-SR-psci-soc.dtsi +new file mode 100644 +index 000000000000..687df1601f3e +--- /dev/null ++++ b/arch/arm64/boot/dts/phytium/ft2000plus-SR-psci-soc.dtsi +@@ -0,0 +1,986 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * dts file for FT-2000plus SoC ++ * ++ * Copyright (C) 2018-2019, Phytium Technology Co., Ltd. ++ */ ++ ++#include ++ ++/ { ++ compatible = "phytium,ft2000plus"; ++ interrupt-parent = <&gic>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ psci { ++ compatible = "arm,psci-1.0"; ++ method = "smc"; ++ cpu_suspend = <0xc4000001>; ++ cpu_off = <0x84000002>; ++ cpu_on = <0xc4000003>; ++ sys_poweroff = <0x84000008>; ++ sys_reset = <0x84000009>; ++ }; ++ ++ cpus { ++ #address-cells = <0x2>; ++ #size-cells = <0x0>; ++ ++ cpu-map { ++ cluster0 { ++ core0 { ++ cpu = <&cpu0>; ++ }; ++ core1 { ++ cpu = <&cpu1>; ++ }; ++ core2 { ++ cpu = <&cpu2>; ++ }; ++ core3 { ++ cpu = <&cpu3>; ++ }; ++ }; ++ ++ cluster1 { ++ core0 { ++ cpu = <&cpu4>; ++ }; ++ core1 { ++ cpu = <&cpu5>; ++ }; ++ core2 { ++ cpu = <&cpu6>; ++ }; ++ core3 { ++ cpu = <&cpu7>; ++ }; ++ }; ++ ++ cluster2 { ++ core0 { ++ cpu = <&cpu8>; ++ }; ++ core1 { ++ cpu = <&cpu9>; ++ }; ++ core2 { ++ cpu = <&cpu10>; ++ }; ++ core3 { ++ cpu = <&cpu11>; ++ }; ++ }; ++ ++ cluster3 { ++ core0 { ++ cpu = <&cpu12>; ++ }; ++ core1 { ++ cpu = <&cpu13>; ++ }; ++ core2 { ++ cpu = <&cpu14>; ++ }; ++ core3 { ++ cpu = <&cpu15>; ++ }; ++ }; ++ ++ cluster4 { ++ core0 { ++ cpu = <&cpu16>; ++ }; ++ core1 { ++ cpu = <&cpu17>; ++ }; ++ core2 { ++ cpu = <&cpu18>; ++ }; ++ core3 { ++ cpu = <&cpu19>; ++ }; ++ }; ++ ++ cluster5 { ++ core0 { ++ cpu = <&cpu20>; ++ }; ++ core1 { ++ cpu = <&cpu21>; ++ }; ++ core2 { ++ cpu = <&cpu22>; ++ }; ++ core3 { ++ cpu = <&cpu23>; ++ }; ++ }; ++ ++ cluster6 { ++ core0 { ++ cpu = <&cpu24>; ++ }; ++ core1 { ++ cpu = <&cpu25>; ++ }; ++ core2 { ++ cpu = <&cpu26>; ++ }; ++ core3 { ++ cpu = <&cpu27>; ++ }; ++ }; ++ ++ cluster7 { ++ core0 { ++ cpu = <&cpu28>; ++ }; ++ core1 { ++ cpu = <&cpu29>; ++ }; ++ core2 { ++ cpu = <&cpu30>; ++ }; ++ core3 { ++ cpu = <&cpu31>; ++ }; ++ }; ++ ++ cluster8 { ++ core0 { ++ cpu = <&cpu32>; ++ }; ++ core1 { ++ cpu = <&cpu33>; ++ }; ++ core2 { ++ cpu = <&cpu34>; ++ }; ++ core3 { ++ cpu = <&cpu35>; ++ }; ++ }; ++ ++ cluster9 { ++ core0 { ++ cpu = <&cpu36>; ++ }; ++ core1 { ++ cpu = <&cpu37>; ++ }; ++ core2 { ++ cpu = <&cpu38>; ++ }; ++ core3 { ++ cpu = <&cpu39>; ++ }; ++ }; ++ ++ cluster10 { ++ core0 { ++ cpu = <&cpu40>; ++ }; ++ core1 { ++ cpu = <&cpu41>; ++ }; ++ core2 { ++ cpu = <&cpu42>; ++ }; ++ core3 { ++ cpu = <&cpu43>; ++ }; ++ }; ++ ++ cluster11 { ++ core0 { ++ cpu = <&cpu44>; ++ }; ++ core1 { ++ cpu = <&cpu45>; ++ }; ++ core2 { ++ cpu = <&cpu46>; ++ }; ++ core3 { ++ cpu = <&cpu47>; ++ }; ++ }; ++ ++ cluster12 { ++ core0 { ++ cpu = <&cpu48>; ++ }; ++ core1 { ++ cpu = <&cpu49>; ++ }; ++ core2 { ++ cpu = <&cpu50>; ++ }; ++ core3 { ++ cpu = <&cpu51>; ++ }; ++ }; ++ ++ cluster13 { ++ core0 { ++ cpu = <&cpu52>; ++ }; ++ core1 { ++ cpu = <&cpu53>; ++ }; ++ core2 { ++ cpu = <&cpu54>; ++ }; ++ core3 { ++ cpu = <&cpu55>; ++ }; ++ }; ++ ++ cluster14 { ++ core0 { ++ cpu = <&cpu56>; ++ }; ++ core1 { ++ cpu = <&cpu57>; ++ }; ++ core2 { ++ cpu = <&cpu58>; ++ }; ++ core3 { ++ cpu = <&cpu59>; ++ }; ++ }; ++ ++ cluster15 { ++ core0 { ++ cpu = <&cpu60>; ++ }; ++ core1 { ++ cpu = <&cpu61>; ++ }; ++ core2 { ++ cpu = <&cpu62>; ++ }; ++ core3 { ++ cpu = <&cpu63>; ++ }; ++ }; ++ }; ++ ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x0>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ }; ++ ++ cpu1: cpu@1 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x1>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ }; ++ ++ cpu2: cpu@2 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x2>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ }; ++ ++ cpu3: cpu@3 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x3>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ }; ++ ++ cpu4: cpu@100 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x100>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ }; ++ ++ cpu5: cpu@101 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x101>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ }; ++ ++ cpu6: cpu@102 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x102>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ }; ++ ++ cpu7: cpu@103 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x103>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ }; ++ ++ cpu8: cpu@200 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x200>; ++ enable-method = "psci"; ++ numa-node-id = <1>; ++ }; ++ ++ cpu9: cpu@201 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x201>; ++ enable-method = "psci"; ++ numa-node-id = <1>; ++ }; ++ ++ cpu10: cpu@202 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x202>; ++ enable-method = "psci"; ++ numa-node-id = <1>; ++ }; ++ ++ cpu11: cpu@203 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x203>; ++ enable-method = "psci"; ++ numa-node-id = <1>; ++ }; ++ ++ cpu12: cpu@300 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x300>; ++ enable-method = "psci"; ++ numa-node-id = <1>; ++ }; ++ ++ cpu13: cpu@301 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x301>; ++ enable-method = "psci"; ++ numa-node-id = <1>; ++ }; ++ ++ cpu14: cpu@302 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x302>; ++ enable-method = "psci"; ++ numa-node-id = <1>; ++ }; ++ ++ cpu15: cpu@303 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x303>; ++ enable-method = "psci"; ++ numa-node-id = <1>; ++ }; ++ ++ cpu16: cpu@400 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x400>; ++ enable-method = "psci"; ++ numa-node-id = <2>; ++ }; ++ ++ cpu17: cpu@401 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x401>; ++ enable-method = "psci"; ++ numa-node-id = <2>; ++ }; ++ ++ cpu18: cpu@402 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x402>; ++ enable-method = "psci"; ++ numa-node-id = <2>; ++ }; ++ ++ cpu19: cpu@403 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x403>; ++ enable-method = "psci"; ++ numa-node-id = <2>; ++ }; ++ ++ cpu20: cpu@500 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x500>; ++ enable-method = "psci"; ++ numa-node-id = <2>; ++ }; ++ ++ cpu21: cpu@501 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x501>; ++ enable-method = "psci"; ++ numa-node-id = <2>; ++ }; ++ ++ cpu22: cpu@502 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x502>; ++ enable-method = "psci"; ++ numa-node-id = <2>; ++ }; ++ ++ cpu23: cpu@503 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x503>; ++ enable-method = "psci"; ++ numa-node-id = <2>; ++ }; ++ ++ cpu24: cpu@600 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x600>; ++ enable-method = "psci"; ++ numa-node-id = <3>; ++ }; ++ ++ cpu25: cpu@601 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x601>; ++ enable-method = "psci"; ++ numa-node-id = <3>; ++ }; ++ ++ cpu26: cpu@602 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x602>; ++ enable-method = "psci"; ++ numa-node-id = <3>; ++ }; ++ ++ cpu27: cpu@603 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x603>; ++ enable-method = "psci"; ++ numa-node-id = <3>; ++ }; ++ ++ cpu28: cpu@700 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x700>; ++ enable-method = "psci"; ++ numa-node-id = <3>; ++ }; ++ ++ cpu29: cpu@701 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x701>; ++ enable-method = "psci"; ++ numa-node-id = <3>; ++ }; ++ ++ cpu30: cpu@702 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x702>; ++ enable-method = "psci"; ++ numa-node-id = <3>; ++ }; ++ ++ cpu31: cpu@703 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x703>; ++ enable-method = "psci"; ++ numa-node-id = <3>; ++ }; ++ ++ cpu32: cpu@800 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x800>; ++ enable-method = "psci"; ++ numa-node-id = <4>; ++ }; ++ ++ cpu33: cpu@801 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x801>; ++ enable-method = "psci"; ++ numa-node-id = <4>; ++ }; ++ ++ cpu34: cpu@802 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x802>; ++ enable-method = "psci"; ++ numa-node-id = <4>; ++ }; ++ ++ cpu35: cpu@803 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x803>; ++ enable-method = "psci"; ++ numa-node-id = <4>; ++ }; ++ ++ cpu36: cpu@900 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x900>; ++ enable-method = "psci"; ++ numa-node-id = <4>; ++ }; ++ ++ cpu37: cpu@901 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x901>; ++ enable-method = "psci"; ++ numa-node-id = <4>; ++ }; ++ ++ cpu38: cpu@902 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x902>; ++ enable-method = "psci"; ++ numa-node-id = <4>; ++ }; ++ ++ cpu39: cpu@903 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0x903>; ++ enable-method = "psci"; ++ numa-node-id = <4>; ++ }; ++ ++ cpu40: cpu@a00 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xa00>; ++ enable-method = "psci"; ++ numa-node-id = <5>; ++ }; ++ ++ cpu41: cpu@a01 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xa01>; ++ enable-method = "psci"; ++ numa-node-id = <5>; ++ }; ++ ++ cpu42: cpu@a02 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xa02>; ++ enable-method = "psci"; ++ numa-node-id = <5>; ++ }; ++ ++ cpu43: cpu@a03 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xa03>; ++ enable-method = "psci"; ++ numa-node-id = <5>; ++ }; ++ ++ cpu44: cpu@b00 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xb00>; ++ enable-method = "psci"; ++ numa-node-id = <5>; ++ }; ++ ++ cpu45: cpu@b01 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xb01>; ++ enable-method = "psci"; ++ numa-node-id = <5>; ++ }; ++ ++ cpu46: cpu@b02 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xb02>; ++ enable-method = "psci"; ++ numa-node-id = <5>; ++ }; ++ ++ cpu47: cpu@b03 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xb03>; ++ enable-method = "psci"; ++ numa-node-id = <5>; ++ }; ++ ++ cpu48: cpu@c00 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xc00>; ++ enable-method = "psci"; ++ numa-node-id = <6>; ++ }; ++ ++ cpu49: cpu@c01 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xc01>; ++ enable-method = "psci"; ++ numa-node-id = <6>; ++ }; ++ ++ cpu50: cpu@c02 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xc02>; ++ enable-method = "psci"; ++ numa-node-id = <6>; ++ }; ++ ++ cpu51: cpu@c03 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xc03>; ++ enable-method = "psci"; ++ numa-node-id = <6>; ++ }; ++ ++ cpu52: cpu@d00 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xd00>; ++ enable-method = "psci"; ++ numa-node-id = <6>; ++ }; ++ ++ cpu53: cpu@d01 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xd01>; ++ enable-method = "psci"; ++ numa-node-id = <6>; ++ }; ++ ++ cpu54: cpu@d02 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xd02>; ++ enable-method = "psci"; ++ numa-node-id = <6>; ++ }; ++ ++ cpu55: cpu@d03 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xd03>; ++ enable-method = "psci"; ++ numa-node-id = <6>; ++ }; ++ ++ cpu56: cpu@e00 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xe00>; ++ enable-method = "psci"; ++ numa-node-id = <7>; ++ }; ++ ++ cpu57: cpu@e01 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xe01>; ++ enable-method = "psci"; ++ numa-node-id = <7>; ++ }; ++ ++ cpu58: cpu@e02 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xe02>; ++ enable-method = "psci"; ++ numa-node-id = <7>; ++ }; ++ ++ cpu59: cpu@e03 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xe03>; ++ enable-method = "psci"; ++ numa-node-id = <7>; ++ }; ++ ++ cpu60: cpu@f00 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xf00>; ++ enable-method = "psci"; ++ numa-node-id = <7>; ++ }; ++ ++ cpu61: cpu@f01 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xf01>; ++ enable-method = "psci"; ++ numa-node-id = <7>; ++ }; ++ ++ cpu62: cpu@f02 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xf02>; ++ enable-method = "psci"; ++ numa-node-id = <7>; ++ }; ++ ++ cpu63: cpu@f03 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc662", "arm,armv8"; ++ reg = <0x0 0xf03>; ++ enable-method = "psci"; ++ numa-node-id = <7>; ++ }; ++ }; ++ ++ distance-map { ++ compatible = "numa-distance-map-v1"; ++ distance-matrix = <0x0 0x0 0x0a>, ++ <0x0 0x1 0x14>, ++ <0x0 0x2 0x28>, ++ <0x0 0x3 0x1e>, ++ <0x0 0x4 0x14>, ++ <0x0 0x5 0x1e>, ++ <0x0 0x6 0x32>, ++ <0x0 0x7 0x28>, ++ <0x1 0x0 0x14>, ++ <0x1 0x1 0x0a>, ++ <0x1 0x2 0x1e>, ++ <0x1 0x3 0x14>, ++ <0x1 0x4 0x1e>, ++ <0x1 0x5 0x14>, ++ <0x1 0x6 0x28>, ++ <0x1 0x7 0x1e>, ++ <0x2 0x0 0x28>, ++ <0x2 0x1 0x1e>, ++ <0x2 0x2 0x0a>, ++ <0x2 0x3 0x14>, ++ <0x2 0x4 0x32>, ++ <0x2 0x5 0x28>, ++ <0x2 0x6 0x14>, ++ <0x2 0x7 0x1e>, ++ <0x3 0x0 0x1e>, ++ <0x3 0x1 0x14>, ++ <0x3 0x2 0x14>, ++ <0x3 0x3 0x0a>, ++ <0x3 0x4 0x28>, ++ <0x3 0x5 0x1e>, ++ <0x3 0x6 0x1e>, ++ <0x3 0x7 0x14>, ++ <0x4 0x0 0x14>, ++ <0x4 0x1 0x1e>, ++ <0x4 0x2 0x32>, ++ <0x4 0x3 0x28>, ++ <0x4 0x4 0x0a>, ++ <0x4 0x5 0x14>, ++ <0x4 0x6 0x28>, ++ <0x4 0x7 0x1e>, ++ <0x5 0x0 0x1e>, ++ <0x5 0x1 0x14>, ++ <0x5 0x2 0x28>, ++ <0x5 0x3 0x1e>, ++ <0x5 0x4 0x14>, ++ <0x5 0x5 0x0a>, ++ <0x5 0x6 0x1e>, ++ <0x5 0x7 0x14>, ++ <0x6 0x0 0x32>, ++ <0x6 0x1 0x28>, ++ <0x6 0x2 0x14>, ++ <0x6 0x3 0x1e>, ++ <0x6 0x4 0x28>, ++ <0x6 0x5 0x1e>, ++ <0x6 0x6 0x0a>, ++ <0x6 0x7 0x14>, ++ <0x7 0x0 0x28>, ++ <0x7 0x1 0x1e>, ++ <0x7 0x2 0x1e>, ++ <0x7 0x3 0x14>, ++ <0x7 0x4 0x1e>, ++ <0x7 0x5 0x14>, ++ <0x7 0x6 0x14>, ++ <0x7 0x7 0x0a>; ++ }; ++ ++ ++ gic: interrupt-controller@8002a000000 { ++ compatible = "arm,gic-v3"; ++ #interrupt-cells = <3>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ interrupt-controller; ++ reg = <0x0800 0x2a000000 0 0x10000>, /* GICD */ ++ <0x0800 0x2a800000 0 0x800000>, /* GICR */ ++ <0x0800 0x29c00000 0 0x10000>, /* GICC */ ++ <0x0800 0x29c10000 0 0x10000>, /* GICH */ ++ <0x0800 0x29c20000 0 0x10000>; /* GICV */ ++ interrupts = ; ++ ++ its: gic-its@8002a020000 { ++ compatible = "arm,gic-v3-its"; ++ msi-controller; ++ reg = <0x0800 0x2a020000 0x0 0x20000>; ++ }; ++ }; ++ ++ timer { ++ compatible = "arm,armv8-timer"; ++ interrupts = , ++ , ++ , ++ ; ++ clock-frequency = <50000000>; ++ }; ++ ++ soc { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-coherent; ++ ranges; ++ ++ uart0: serial@28000000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x800 0x28000000 0x0 0x1000>; ++ clock-frequency = <50000000>; ++ interrupts = ; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ status = "disabled"; ++ }; ++ ++ uart1: serial@28001000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x800 0x28001000 0x0 0x1000>; ++ clock-frequency = <50000000>; ++ interrupts = ; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ status = "disabled"; ++ }; ++ ++ gpio0:gpio@80028006000 { ++ compatible = "snps,dw-apb-gpio"; ++ reg = <0x800 0x28006000 0x0 0x1000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "ok"; ++ ++ gpio-controller@0 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <0x2>; ++ snps,nr-gpios = <0x8>; ++ reg = <0x0>; ++ }; ++ ++ gpio-controller@1 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <0x2>; ++ snps,nr-gpios = <0x8>; ++ reg = <0x1>; ++ }; ++ ++ gpio-controller@2 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <0x2>; ++ snps,nr-gpios = <0x8>; ++ reg = <0x2>; ++ }; ++ ++ gpio-controller@3 { ++ compatible = "snps,dw-apb-gpio-port"; ++ gpio-controller; ++ #gpio-cells = <0x2>; ++ snps,nr-gpios = <0x8>; ++ reg = <0x3>; ++ }; ++ }; ++ ++ i2c0: i2c@80028002000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0x800 0x28002000 0x0 0x1000>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ status = "ok"; ++ }; ++ ++ i2c1: i2c@80028003000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0x800 0x28003000 0x0 0x1000>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ status = "ok"; ++ }; ++ ++ pcie0: peu0-c0 { ++ compatible = "pci-host-ecam-generic"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ #interrupt-cells = <1>; ++ reg = <0x800 0x40000000 0 0x10000000>; ++ msi-parent = <&its>; ++ bus-range = <0 0xff>; ++ interrupt-map-mask = <0x0 0x0 0x0 0x7>; ++ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 GIC_SPI 0x33 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x2 &gic 0x0 0x0 GIC_SPI 0x34 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x3 &gic 0x0 0x0 GIC_SPI 0x35 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x4 &gic 0x0 0x0 GIC_SPI 0x36 IRQ_TYPE_LEVEL_HIGH>; ++ ranges = <0x01000000 0x00 0x00000000 0x800 0x50000000 0x00 0x00f00000>, ++ <0x02000000 0x00 0x60000000 0x800 0x60000000 0x00 0x20000000>, ++ <0x03000000 0x20 0x00000000 0x820 0x00000000 0x20 0x00000000>; ++ }; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/phytium/ft2004-devboard-d4-dsk.dts b/arch/arm64/boot/dts/phytium/ft2004-devboard-d4-dsk.dts +new file mode 100644 +index 000000000000..5bef2e886292 +--- /dev/null ++++ b/arch/arm64/boot/dts/phytium/ft2004-devboard-d4-dsk.dts +@@ -0,0 +1,73 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * DTS file for phytium FT-2000/4 devboard (FT-2000/4-D4-DSK series) ++ * ++ * Copyright (C) 2018-2019, Phytium Technology Co., Ltd. ++ */ ++ ++/dts-v1/; ++/memreserve/ 0x80000000 0x10000; ++ ++#include "ft2004-generic-psci-soc.dtsi" ++ ++/{ ++ model = "FT-2000/4-D4-DSK Development Board"; ++ compatible = "phytium,ft-2004"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ chosen { ++ stdout-path = "uart1:115200n8"; ++ }; ++ ++ memory@00{ ++ device_type = "memory"; ++ reg = <0x0 0x80000000 0x1 0x00000000>; ++ }; ++ ++ memory@01{ ++ device_type = "memory"; ++ reg = <0x20 0x00000000 0x1 0x00000000>; ++ }; ++ ++ firmware { ++ optee { ++ compatible = "linaro,optee-tz"; ++ method = "smc"; ++ }; ++ }; ++}; ++ ++&rtc0 { ++ status = "ok"; ++}; ++ ++&uart1 { ++ status = "ok"; ++}; ++ ++&gmac0 { ++ status = "ok"; ++ phy-mode = "rgmii-rxid"; ++}; ++ ++&gmac1 { ++ status = "ok"; ++ phy-mode = "rgmii-rxid"; ++}; ++ ++&spi0 { ++ status = "ok"; ++}; ++ ++&qspi { ++ status = "ok"; ++}; ++ ++&i2c0 { ++ status = "ok"; ++}; ++ ++&i2c1 { ++ status = "ok"; ++}; +diff --git a/arch/arm64/boot/dts/phytium/ft2004-generic-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2004-generic-psci-soc.dtsi +new file mode 100644 +index 000000000000..a6451654e82f +--- /dev/null ++++ b/arch/arm64/boot/dts/phytium/ft2004-generic-psci-soc.dtsi +@@ -0,0 +1,474 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * dts file for FT-2000/4 SoC ++ * ++ * Copyright (C) 2018-2019, Phytium Technology Co., Ltd. ++ */ ++ ++#include ++ ++/ { ++ compatible = "phytium,ft2004"; ++ interrupt-parent = <&gic>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ aliases { ++ ethernet0 = &gmac0; ++ ethernet1 = &gmac1; ++ }; ++ ++ psci { ++ compatible = "arm,psci-1.0"; ++ method = "smc"; ++ cpu_suspend = <0xc4000001>; ++ cpu_off = <0x84000002>; ++ cpu_on = <0xc4000003>; ++ sys_poweroff = <0x84000008>; ++ sys_reset = <0x84000009>; ++ }; ++ ++ cpus { ++ #address-cells = <0x2>; ++ #size-cells = <0x0>; ++ ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc663", "arm,armv8"; ++ reg = <0x0 0x0>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ clocks = <&scpi_dvfs 0>; ++ }; ++ ++ cpu1: cpu@1 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc663", "arm,armv8"; ++ reg = <0x0 0x1>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ clocks = <&scpi_dvfs 0>; ++ }; ++ ++ cpu2: cpu@100 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc663", "arm,armv8"; ++ reg = <0x0 0x100>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ clocks = <&scpi_dvfs 1>; ++ }; ++ ++ cpu3: cpu@101 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc663", "arm,armv8"; ++ reg = <0x0 0x101>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ clocks = <&scpi_dvfs 1>; ++ }; ++ }; ++ ++ gic: interrupt-controller@29900000 { ++ compatible = "arm,gic-v3"; ++ #interrupt-cells = <3>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ interrupt-controller; ++ reg = <0x0 0x29900000 0 0x20000>, /* GICD */ ++ <0x0 0x29980000 0 0x80000>, /* GICR */ ++ <0x0 0x29c00000 0 0x10000>, /* GICC */ ++ <0x0 0x29c10000 0 0x10000>, /* GICH */ ++ <0x0 0x29c20000 0 0x10000>; /* GICV */ ++ interrupts = ; ++ ++ its: gic-its@29920000 { ++ compatible = "arm,gic-v3-its"; ++ msi-controller; ++ reg = <0x0 0x29920000 0x0 0x20000>; ++ }; ++ }; ++ ++ timer { ++ compatible = "arm,armv8-timer"; ++ interrupts = , ++ , ++ , ++ ; ++ clock-frequency = <48000000>; ++ }; ++ ++ pmu { ++ compatible = "arm,armv8-pmuv3"; ++ interrupts = ; ++ }; ++ ++ clocks { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ clk250mhz: clk250mhz { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <250000000>; ++ }; ++ ++ sysclk_48mhz: clk48mhz { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <48000000>; ++ }; ++ ++ sysclk_600mhz: clk600mhz { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <600000000>; ++ }; ++ }; ++ ++ soc { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-coherent; ++ ranges; ++ ++ gpio0: gpio@28004000 { ++ compatible = "phytium,gpio"; ++ reg = <0x0 0x28004000 0x0 0x1000>; ++ interrupts = ; ++ gpio-controller; ++ #gpio-cells = <2>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ porta { ++ compatible = "phytium,gpio-port"; ++ reg = <0>; ++ nr-gpios = <8>; ++ }; ++ ++ portb { ++ compatible = "phytium,gpio-port"; ++ reg = <1>; ++ nr-gpios = <8>; ++ }; ++ }; ++ ++ gpio1: gpio@28005000 { ++ compatible = "phytium,gpio"; ++ reg = <0x0 0x28005000 0x0 0x1000>; ++ interrupts = ; ++ gpio-controller; ++ #gpio-cells = <2>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ porta { ++ compatible = "phytium,gpio-port"; ++ reg = <0>; ++ nr-gpios = <8>; ++ }; ++ ++ portb { ++ compatible = "phytium,gpio-port"; ++ reg = <1>; ++ nr-gpios = <8>; ++ }; ++ }; ++ ++ uart0: uart@28000000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x0 0x28000000 0x0 0x1000>; ++ baud = <115200>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz &sysclk_48mhz>; ++ clock-names = "uartclk", "apb_pclk"; ++ }; ++ ++ uart1: uart@28001000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x0 0x28001000 0x0 0x1000>; ++ baud = <115200>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz &sysclk_48mhz>; ++ clock-names = "uartclk", "apb_pclk"; ++ }; ++ ++ uart2: uart@28002000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x0 0x28002000 0x0 0x1000>; ++ baud = <115200>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz &sysclk_48mhz>; ++ clock-names = "uartclk", "apb_pclk"; ++ }; ++ ++ uart3: uart@28003000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x0 0x28003000 0x0 0x1000>; ++ baud = <115200>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz &sysclk_48mhz>; ++ clock-names = "uartclk", "apb_pclk"; ++ }; ++ ++ sdci: sdci@28207c00 { ++ compatible = "phytium,sdci"; ++ reg = <0x0 0x28207c00 0x0 0x100>; ++ interrupts = , ++ , ++ ; ++ clocks = <&sysclk_600mhz &sysclk_600mhz>; ++ clock-names = "phytium_sdc_clk"; ++ no-sdio; ++ no-mmc; ++ no-dma-coherent; ++ }; ++ ++ watchdog0: watchdog@2800a000 { ++ compatible = "arm,sbsa-gwdt"; ++ reg = <0x0 0x2800b000 0x0 0x1000>, ++ <0x0 0x2800a000 0x0 0x1000>; ++ interrupts = ; ++ timeout-sec = <30>; ++ }; ++ ++ watchdog1: watchdog@28016000 { ++ compatible = "arm,sbsa-gwdt"; ++ reg = <0x0 0x28017000 0x0 0x1000>, ++ <0x0 0x28016000 0x0 0x1000>; ++ interrupts = ; ++ timeout-sec = <30>; ++ }; ++ ++ rtc0: rtc@2800d000 { ++ compatible = "phytium,rtc"; ++ reg = <0x0 0x2800d000 0x0 0x1000>; ++ clocks = <&sysclk_48mhz>; ++ clock-names = "rtc_pclk"; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@28006000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x28006000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz>; ++ status = "disabled"; ++ }; ++ ++ i2c1: i2c@28007000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x28007000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz>; ++ status = "disabled"; ++ }; ++ ++ i2c2: i2c@28008000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x28008000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz>; ++ status = "disabled"; ++ }; ++ ++ i2c3: i2c@28009000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x28009000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz>; ++ status = "disabled"; ++ }; ++ ++ spi0: spi@2800c000 { ++ compatible = "phytium,spi"; ++ interrupts = ; ++ reg = <0x0 0x2800c000 0x0 0x1000>; ++ clocks = <&sysclk_48mhz>; ++ num-cs = <4>; ++ }; ++ ++ spi1: spi@28013000 { ++ compatible = "phytium,spi"; ++ interrupts = ; ++ reg = <0x0 0x28013000 0x0 0x1000>; ++ clocks = <&sysclk_48mhz>; ++ num-cs = <4>; ++ }; ++ ++ qspi: qspi@28014000 { ++ compatible = "phytium,qspi"; ++ reg = <0x0 0x28014000 0x0 0x1000>, ++ <0x0 0x0 0x0 0x02000000>; ++ reg-names = "qspi", "qspi_mm"; ++ clocks = <&sysclk_600mhz>; ++ ++ flash@0 { ++ spi-rx-bus-width = <1>; ++ spi-max-frequency = <600000000>; ++ }; ++ }; ++ ++ pcie: pcie { ++ compatible = "pci-host-ecam-generic"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ #interrupt-cells = <1>; ++ reg = <0x0 0x40000000 0x0 0x10000000>; ++ msi-parent = <&its>; ++ bus-range = <0x0 0xff>; ++ interrupt-map-mask = <0x0 0x0 0x0 0x7>; ++ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x2 &gic 0x0 0x0 GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x3 &gic 0x0 0x0 GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x4 &gic 0x0 0x0 GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>; ++ ranges = <0x01000000 0x00 0x00000000 0x0 0x50000000 0x0 0x00f00000>, ++ <0x02000000 0x00 0x58000000 0x0 0x58000000 0x0 0x28000000>, ++ <0x03000000 0x10 0x00000000 0x10 0x00000000 0x10 0x00000000>; ++ }; ++ ++ phytium_axi_setup: stmmac-axi-config { ++ snps,wr_osr_lmt = <0>; ++ snps,rd_osr_lmt = <0>; ++ snps,blen = <0 0 0 0 16 8 4>; ++ }; ++ ++ gmac0: eth@2820c000 { ++ compatible = "snps,dwmac"; ++ reg = <0x0 0x2820c000 0x0 0x2000>; ++ interrupts = ; ++ interrupt-names = "macirq"; ++ clocks = <&clk250mhz>; ++ clock-names = "stmmaceth"; ++ status = "disabled"; ++ ++ snps,pbl = <16>; ++ snps,fixed-burst; ++ snps,axi-config = <&phytium_axi_setup>; ++ snps,force_sf_dma_mode; ++ snps,multicast-filter-bins = <64>; ++ snps,perfect-filter-entries = <128>; ++ tx-fifo-depth = <4096>; ++ rx-fifo-depth = <4096>; ++ max-frame-size = <9000>; ++ }; ++ ++ gmac1: eth@28210000 { ++ compatible = "snps,dwmac"; ++ reg = <0x0 0x28210000 0x0 0x2000>; ++ interrupts = ; ++ interrupt-names = "macirq"; ++ clocks = <&clk250mhz>; ++ clock-names = "stmmaceth"; ++ status = "disabled"; ++ ++ snps,pbl = <16>; ++ snps,fixed-burst; ++ snps,axi-config = <&phytium_axi_setup>; ++ snps,force_sf_dma_mode; ++ snps,multicast-filter-bins = <64>; ++ snps,perfect-filter-entries = <128>; ++ snps,rx-queues-to-use = <2>; ++ tx-fifo-depth = <4096>; ++ rx-fifo-depth = <4096>; ++ max-frame-size = <9000>; ++ }; ++ ++ can0: can@28207000 { ++ compatible = "phytium,can"; ++ reg = <0x0 0x28207000 0x0 0x400>; ++ interrupts = ; ++ clocks = <&sysclk_600mhz>; ++ clock-names = "phytium_can_clk"; ++ tx-fifo-depth = <0x40>; ++ rx-fifo-depth = <0x40>; ++ }; ++ ++ can1: can@28207400 { ++ compatible = "phytium,can"; ++ reg = <0x0 0x28207400 0x0 0x400>; ++ interrupts = ; ++ clocks = <&sysclk_600mhz>; ++ clock-names = "phytium_can_clk"; ++ tx-fifo-depth = <0x40>; ++ rx-fifo-depth = <0x40>; ++ }; ++ ++ can2: can@028207800 { ++ compatible = "phytium,can"; ++ reg = <0x0 0x28207800 0x0 0x400>; ++ interrupts = ; ++ clocks = <&sysclk_600mhz>; ++ clock-names = "phytium_can_clk"; ++ tx-fifo-depth = <0x40>; ++ rx-fifo-depth = <0x40>; ++ }; ++ ++ hda: hda@28206000 { ++ compatible = "phytium,hda"; ++ reg = <0 0x28206000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz>; ++ clock-names = "phytium_hda_clk"; ++ }; ++ ++ mbox: mailbox@2a000000 { ++ compatible = "phytium,mbox"; ++ reg = <0x0 0x2a000000 0x0 0x1000>; ++ interrupts = ; ++ #mbox-cells = <1>; ++ clocks = <&sysclk_48mhz>; ++ clock-names = "apb_pclk"; ++ }; ++ ++ sram: sram@2a006000 { ++ compatible = "phytium,ft2004-sram-ns","mmio-sram"; ++ reg = <0x0 0x2a006000 0x0 0x2000>; ++ ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x0 0x2a006000 0x2000>; ++ ++ scpi_lpri: scpi-shmem@0 { ++ compatible = "phytium,ft2004-scpi-shmem"; ++ reg = <0x1000 0x800>; ++ }; ++ }; ++ ++ scpi_protocol: scpi { ++ compatible = "arm,scpi"; ++ mboxes = <&mbox 0>; ++ shmem = <&scpi_lpri>; ++ ++ clocks { ++ compatible = "arm,scpi-clocks"; ++ ++ scpi_dvfs: scpi_clocks@0 { ++ compatible = "arm,scpi-dvfs-clocks"; ++ #clock-cells = <1>; ++ clock-indices = <0>, <1>; ++ clock-output-names = "c0", "c1"; ++ }; ++ }; ++ ++ scpi_sensors: sensors { ++ compatible = "arm,scpi-sensors"; ++ #thermal-sensor-cells = <1>; ++ }; ++ }; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/phytium/ft2004c-devboard-dsk.dts b/arch/arm64/boot/dts/phytium/ft2004c-devboard-dsk.dts +new file mode 100644 +index 000000000000..d8f9c49d7aeb +--- /dev/null ++++ b/arch/arm64/boot/dts/phytium/ft2004c-devboard-dsk.dts +@@ -0,0 +1,74 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * DTS file for Phytium D2000 devboard ++ * ++ * Copyright (C) 2020, Phytium Technology Co., Ltd. ++ */ ++ ++/dts-v1/; ++/memreserve/ 0x80000000 0x10000; ++ ++#include "ft2004c-generic-psci-soc.dtsi" ++ ++/{ ++ model = "FT-2000/4C Development Board"; ++ compatible = "phytium,ft-2004c"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ chosen { ++ stdout-path = "uart1:115200n8"; ++ }; ++ ++ memory@00{ ++ device_type = "memory"; ++ /* 4GiB-64MiB ~ 4GiB is reserved for PBF runtime */ ++ reg = <0x0 0x80000000 0x0 0x7c000000>; ++ }; ++ ++ memory@01{ ++ device_type = "memory"; ++ reg = <0x20 0x00000000 0x1 0x00000000>; ++ }; ++ ++ firmware { ++ optee { ++ compatible = "linaro,optee-tz"; ++ method = "smc"; ++ }; ++ }; ++}; ++ ++&rtc0 { ++ status = "ok"; ++}; ++ ++&uart1 { ++ status = "ok"; ++}; ++ ++&gmac0 { ++ status = "ok"; ++ phy-mode = "rgmii-txid"; ++}; ++ ++&gmac1 { ++ status = "ok"; ++ phy-mode = "rgmii-txid"; ++}; ++ ++&spi0 { ++ status = "ok"; ++}; ++ ++&qspi { ++ status = "ok"; ++}; ++ ++&i2c0 { ++ status = "ok"; ++}; ++ ++&i2c1 { ++ status = "ok"; ++}; +diff --git a/arch/arm64/boot/dts/phytium/ft2004c-generic-psci-soc.dtsi b/arch/arm64/boot/dts/phytium/ft2004c-generic-psci-soc.dtsi +new file mode 100644 +index 000000000000..9b09de80abf0 +--- /dev/null ++++ b/arch/arm64/boot/dts/phytium/ft2004c-generic-psci-soc.dtsi +@@ -0,0 +1,486 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * dts file for Phytium D2000 SoC ++ * ++ * Copyright (C) 2020, Phytium Technology Co., Ltd. ++ */ ++ ++#include ++ ++/ { ++ compatible = "phytium,ft2004c"; ++ interrupt-parent = <&gic>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ aliases { ++ ethernet0 = &gmac0; ++ ethernet1 = &gmac1; ++ }; ++ ++ psci { ++ compatible = "arm,psci-1.0"; ++ method = "smc"; ++ cpu_suspend = <0xc4000001>; ++ cpu_off = <0x84000002>; ++ cpu_on = <0xc4000003>; ++ sys_poweroff = <0x84000008>; ++ sys_reset = <0x84000009>; ++ }; ++ ++ cpus { ++ #address-cells = <0x2>; ++ #size-cells = <0x0>; ++ ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc663", "arm,armv8"; ++ reg = <0x0 0x0>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ clocks = <&scpi_dvfs 0>; ++ }; ++ ++ cpu1: cpu@1 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc663", "arm,armv8"; ++ reg = <0x0 0x1>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ clocks = <&scpi_dvfs 0>; ++ }; ++ ++ cpu2: cpu@100 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc663", "arm,armv8"; ++ reg = <0x0 0x100>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ clocks = <&scpi_dvfs 1>; ++ }; ++ ++ cpu3: cpu@101 { ++ device_type = "cpu"; ++ compatible = "phytium,ftc663", "arm,armv8"; ++ reg = <0x0 0x101>; ++ enable-method = "psci"; ++ numa-node-id = <0>; ++ clocks = <&scpi_dvfs 1>; ++ }; ++ }; ++ ++ gic: interrupt-controller@29900000 { ++ compatible = "arm,gic-v3"; ++ #interrupt-cells = <3>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ interrupt-controller; ++ reg = <0x0 0x29a00000 0 0x20000>, /* GICD */ ++ <0x0 0x29b00000 0 0x100000>, /* GICR */ ++ <0x0 0x29c00000 0 0x10000>, /* GICC */ ++ <0x0 0x29c10000 0 0x10000>, /* GICH */ ++ <0x0 0x29c20000 0 0x10000>; /* GICV */ ++ interrupts = ; ++ ++ its: gic-its@29920000 { ++ compatible = "arm,gic-v3-its"; ++ msi-controller; ++ reg = <0x0 0x29a20000 0x0 0x20000>; ++ }; ++ }; ++ ++ timer { ++ compatible = "arm,armv8-timer"; ++ interrupts = , ++ , ++ , ++ ; ++ clock-frequency = <48000000>; ++ }; ++ ++ pmu { ++ compatible = "arm,armv8-pmuv3"; ++ interrupts = ; ++ }; ++ ++ clocks { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ clk250mhz: clk250mhz { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <250000000>; ++ }; ++ ++ sysclk_48mhz: clk48mhz { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <48000000>; ++ }; ++ ++ sysclk_600mhz: clk600mhz { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <600000000>; ++ }; ++ }; ++ ++ soc { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-coherent; ++ ranges; ++ ++ gpio0: gpio@28004000 { ++ compatible = "phytium,gpio"; ++ reg = <0x0 0x28004000 0x0 0x1000>; ++ interrupts = ; ++ gpio-controller; ++ #gpio-cells = <2>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ porta { ++ compatible = "phytium,gpio-port"; ++ reg = <0>; ++ nr-gpios = <8>; ++ }; ++ ++ portb { ++ compatible = "phytium,gpio-port"; ++ reg = <1>; ++ nr-gpios = <8>; ++ }; ++ }; ++ ++ gpio1: gpio@28005000 { ++ compatible = "phytium,gpio"; ++ reg = <0x0 0x28005000 0x0 0x1000>; ++ interrupts = ; ++ gpio-controller; ++ #gpio-cells = <2>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ porta { ++ compatible = "phytium,gpio-port"; ++ reg = <0>; ++ nr-gpios = <8>; ++ }; ++ ++ portb { ++ compatible = "phytium,gpio-port"; ++ reg = <1>; ++ nr-gpios = <8>; ++ }; ++ }; ++ ++ uart0: uart@28000000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x0 0x28000000 0x0 0x1000>; ++ baud = <115200>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz &sysclk_48mhz>; ++ clock-names = "uartclk", "apb_pclk"; ++ }; ++ ++ uart1: uart@28001000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x0 0x28001000 0x0 0x1000>; ++ baud = <115200>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz &sysclk_48mhz>; ++ clock-names = "uartclk", "apb_pclk"; ++ }; ++ ++ uart2: uart@28002000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x0 0x28002000 0x0 0x1000>; ++ baud = <115200>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz &sysclk_48mhz>; ++ clock-names = "uartclk", "apb_pclk"; ++ }; ++ ++ uart3: uart@28003000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x0 0x28003000 0x0 0x1000>; ++ baud = <115200>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz &sysclk_48mhz>; ++ clock-names = "uartclk", "apb_pclk"; ++ }; ++ ++ sdci: sdci@28207c00 { ++ compatible = "phytium,sdci"; ++ reg = <0x0 0x28207c00 0x0 0x100>; ++ interrupts = , ++ , ++ ; ++ clocks = <&sysclk_600mhz &sysclk_600mhz>; ++ clock-names = "phytium_sdc_clk"; ++ no-sdio; ++ no-mmc; ++ no-dma-coherent; ++ }; ++ ++ watchdog0: watchdog@2800a000 { ++ compatible = "arm,sbsa-gwdt"; ++ reg = <0x0 0x2800b000 0x0 0x1000>, ++ <0x0 0x2800a000 0x0 0x1000>; ++ interrupts = ; ++ timeout-sec = <30>; ++ }; ++ ++ watchdog1: watchdog@28016000 { ++ compatible = "arm,sbsa-gwdt"; ++ reg = <0x0 0x28017000 0x0 0x1000>, ++ <0x0 0x28016000 0x0 0x1000>; ++ interrupts = ; ++ timeout-sec = <30>; ++ }; ++ ++ rtc0: rtc@2800d000 { ++ compatible = "phytium,rtc"; ++ reg = <0x0 0x2800d000 0x0 0x1000>; ++ clocks = <&sysclk_48mhz>; ++ clock-names = "rtc_pclk"; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@28006000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x28006000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz>; ++ status = "disabled"; ++ }; ++ ++ i2c1: i2c@28007000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x28007000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz>; ++ status = "disabled"; ++ }; ++ ++ i2c2: i2c@28008000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x28008000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz>; ++ status = "disabled"; ++ }; ++ ++ i2c3: i2c@28009000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0x0 0x28009000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz>; ++ status = "disabled"; ++ }; ++ ++ spi0: spi@2800c000 { ++ compatible = "phytium,spi"; ++ interrupts = ; ++ reg = <0x0 0x2800c000 0x0 0x1000>; ++ clocks = <&sysclk_48mhz>; ++ num-cs = <4>; ++ }; ++ ++ spi1: spi@28013000 { ++ compatible = "phytium,spi"; ++ interrupts = ; ++ reg = <0x0 0x28013000 0x0 0x1000>; ++ clocks = <&sysclk_48mhz>; ++ num-cs = <4>; ++ }; ++ ++ qspi: qspi@28014000 { ++ compatible = "phytium,qspi"; ++ reg = <0x0 0x28014000 0x0 0x1000>, ++ <0x0 0x0 0x0 0x02000000>; ++ reg-names = "qspi", "qspi_mm"; ++ clocks = <&sysclk_600mhz>; ++ ++ flash@0 { ++ spi-rx-bus-width = <1>; ++ spi-max-frequency = <600000000>; ++ }; ++ }; ++ ++ phytium_axi_setup: stmmac-axi-config { ++ snps,wr_osr_lmt = <0>; ++ snps,rd_osr_lmt = <0>; ++ snps,blen = <0 0 0 0 16 8 4>; ++ }; ++ ++ gmac0: eth@2820c000 { ++ compatible = "snps,dwmac"; ++ reg = <0x0 0x2820c000 0x0 0x2000>; ++ interrupts = ; ++ interrupt-names = "macirq"; ++ clocks = <&clk250mhz>; ++ clock-names = "stmmaceth"; ++ status = "disabled"; ++ ++ snps,pbl = <16>; ++ snps,fixed-burst; ++ snps,axi-config = <&phytium_axi_setup>; ++ snps,force_sf_dma_mode; ++ snps,multicast-filter-bins = <64>; ++ snps,perfect-filter-entries = <128>; ++ tx-fifo-depth = <4096>; ++ rx-fifo-depth = <4096>; ++ max-frame-size = <9000>; ++ }; ++ ++ gmac1: eth@28210000 { ++ compatible = "snps,dwmac"; ++ reg = <0x0 0x28210000 0x0 0x2000>; ++ interrupts = ; ++ interrupt-names = "macirq"; ++ clocks = <&clk250mhz>; ++ clock-names = "stmmaceth"; ++ status = "disabled"; ++ ++ snps,pbl = <16>; ++ snps,fixed-burst; ++ snps,axi-config = <&phytium_axi_setup>; ++ snps,force_sf_dma_mode; ++ snps,multicast-filter-bins = <64>; ++ snps,perfect-filter-entries = <128>; ++ snps,rx-queues-to-use = <2>; ++ tx-fifo-depth = <4096>; ++ rx-fifo-depth = <4096>; ++ max-frame-size = <9000>; ++ }; ++ ++ can0: can@28207000 { ++ compatible = "phytium,can"; ++ reg = <0x0 0x28207000 0x0 0x400>; ++ interrupts = ; ++ clocks = <&sysclk_600mhz>; ++ clock-names = "phytium_can_clk"; ++ tx-fifo-depth = <0x40>; ++ rx-fifo-depth = <0x40>; ++ }; ++ ++ can1: can@28207400 { ++ compatible = "phytium,can"; ++ reg = <0x0 0x28207400 0x0 0x400>; ++ interrupts = ; ++ clocks = <&sysclk_600mhz>; ++ clock-names = "phytium_can_clk"; ++ tx-fifo-depth = <0x40>; ++ rx-fifo-depth = <0x40>; ++ }; ++ ++ can2: can@028207800 { ++ compatible = "phytium,can"; ++ reg = <0x0 0x28207800 0x0 0x400>; ++ interrupts = ; ++ clocks = <&sysclk_600mhz>; ++ clock-names = "phytium_can_clk"; ++ tx-fifo-depth = <0x40>; ++ rx-fifo-depth = <0x40>; ++ }; ++ ++ hda: hda@28206000 { ++ compatible = "phytium,hda"; ++ reg = <0 0x28206000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&sysclk_48mhz>; ++ clock-names = "phytium_hda_clk"; ++ }; ++ ++ mbox: mailbox@2a000000 { ++ compatible = "phytium,mbox"; ++ reg = <0x0 0x2a000000 0x0 0x1000>; ++ interrupts = ; ++ #mbox-cells = <1>; ++ clocks = <&sysclk_48mhz>; ++ clock-names = "apb_pclk"; ++ }; ++ ++ sram: sram@2a006000 { ++ compatible = "phytium,ft2004-sram-ns","mmio-sram"; ++ reg = <0x0 0x2a006000 0x0 0x2000>; ++ ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x0 0x2a006000 0x2000>; ++ ++ scpi_lpri: scpi-shmem@0 { ++ compatible = "phytium,ft2004-scpi-shmem"; ++ reg = <0x1000 0x800>; ++ }; ++ }; ++ ++ scpi_protocol: scpi { ++ compatible = "arm,scpi"; ++ mboxes = <&mbox 0>; ++ shmem = <&scpi_lpri>; ++ ++ clocks { ++ compatible = "arm,scpi-clocks"; ++ ++ scpi_dvfs: scpi_clocks@0 { ++ compatible = "arm,scpi-dvfs-clocks"; ++ #clock-cells = <1>; ++ clock-indices = <0>, <1>, <2>, <3>; ++ clock-output-names = "c0", "c1", "c2", "c3"; ++ }; ++ }; ++ ++ scpi_sensors: sensors { ++ compatible = "arm,scpi-sensors"; ++ #thermal-sensor-cells = <1>; ++ }; ++ }; ++ ++ ixic: interrupt-controller@29000000 { ++ compatible = "phytium,ft2004c-ixic"; ++ reg-names = "ctr", "hpb"; ++ reg = <0x0 0x29000000 0x0 0x00060000>, ++ <0x0 0x29100000 0x0 0x00002000>; ++ interrupt-controller; ++ interrupt-parent = <&gic>; ++ #interrupt-cells = <3>; ++ intx-spi-base = <28>; ++ }; ++ ++ pcie: pcie { ++ compatible = "pci-host-ecam-generic"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ #interrupt-cells = <1>; ++ reg = <0x0 0x40000000 0x0 0x10000000>; ++ msi-parent = <&its>; ++ bus-range = <0x0 0xff>; ++ interrupt-map-mask = <0x0 0x0 0x0 0x7>; ++ interrupt-map = <0x0 0x0 0x0 0x1 &ixic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x2 &ixic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x3 &ixic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, ++ <0x0 0x0 0x0 0x4 &ixic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>; ++ ranges = <0x01000000 0x00 0x00000000 0x0 0x50000000 0x0 0x00f00000>, ++ <0x02000000 0x00 0x58000000 0x0 0x58000000 0x0 0x28000000>, ++ <0x03000000 0x10 0x00000000 0x10 0x00000000 0x10 0x00000000>; ++ }; ++ }; ++ ++}; +diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig +index db8d364f8476..3356b2806457 100644 +--- a/arch/arm64/configs/defconfig ++++ b/arch/arm64/configs/defconfig +@@ -45,6 +45,7 @@ CONFIG_ARCH_HISI=y + CONFIG_ARCH_MEDIATEK=y + CONFIG_ARCH_MESON=y + CONFIG_ARCH_MVEBU=y ++CONFIG_ARCH_PHYTIUM=y + CONFIG_ARCH_QCOM=y + CONFIG_ARCH_ROCKCHIP=y + CONFIG_ARCH_SEATTLE=y +@@ -234,7 +235,8 @@ CONFIG_SMC91X=y + CONFIG_SMSC911X=y + CONFIG_SNI_AVE=y + CONFIG_SNI_NETSEC=y +-CONFIG_STMMAC_ETH=m ++CONFIG_STMMAC_ETH=y ++CONFIG_STMMAC_PLATFORM=y + CONFIG_MDIO_BUS_MUX_MMIOREG=y + CONFIG_AT803X_PHY=m + CONFIG_MARVELL_PHY=m +@@ -322,6 +324,8 @@ CONFIG_SPI_BCM2835AUX=m + CONFIG_SPI_MESON_SPICC=m + CONFIG_SPI_MESON_SPIFC=m + CONFIG_SPI_ORION=y ++CONFIG_SPI_PHYTIUM=y ++CONFIG_SPI_PHYTIUM_QUADSPI=y + CONFIG_SPI_PL022=y + CONFIG_SPI_ROCKCHIP=y + CONFIG_SPI_QUP=y +@@ -347,6 +351,7 @@ CONFIG_GPIO_XGENE_SB=y + CONFIG_GPIO_PCA953X=y + CONFIG_GPIO_PCA953X_IRQ=y + CONFIG_GPIO_MAX77620=y ++CONFIG_GPIO_PHYTIUM=y + CONFIG_POWER_AVS=y + CONFIG_ROCKCHIP_IODOMAIN=y + CONFIG_POWER_RESET_MSM=y +@@ -476,6 +481,7 @@ CONFIG_SND_SOC_RT5514_SPI=m + CONFIG_SND_SOC_RT5645=m + CONFIG_SND_SIMPLE_CARD=m + CONFIG_SND_AUDIO_GRAPH_CARD=m ++CONFIG_SND_HDA_PHYTIUM=m + CONFIG_I2C_HID=m + CONFIG_USB=y + CONFIG_USB_OTG=y +@@ -528,6 +534,7 @@ CONFIG_MMC_DW_ROCKCHIP=y + CONFIG_MMC_SUNXI=y + CONFIG_MMC_BCM2835=y + CONFIG_MMC_SDHCI_XENON=y ++CONFIG_MMC_PHYTIUM_SDCI=y + CONFIG_NEW_LEDS=y + CONFIG_LEDS_CLASS=y + CONFIG_LEDS_GPIO=y +@@ -553,6 +560,7 @@ CONFIG_RTC_DRV_SUN6I=y + CONFIG_RTC_DRV_ARMADA38X=y + CONFIG_RTC_DRV_TEGRA=y + CONFIG_RTC_DRV_XGENE=y ++CONFIG_RTC_DRV_PHYTIUM=y + CONFIG_DMADEVICES=y + CONFIG_DMA_BCM2835=m + CONFIG_K3_DMA=y +@@ -588,6 +596,7 @@ CONFIG_HWSPINLOCK_QCOM=y + CONFIG_ARM_MHU=y + CONFIG_PLATFORM_MHU=y + CONFIG_BCM2835_MBOX=y ++CONFIG_PHYTIUM_MBOX=y + CONFIG_QCOM_APCS_IPC=y + CONFIG_ROCKCHIP_IOMMU=y + CONFIG_TEGRA_IOMMU_SMMU=y +@@ -711,3 +720,5 @@ CONFIG_CRYPTO_AES_ARM64_CE_CCM=y + CONFIG_CRYPTO_AES_ARM64_CE_BLK=y + CONFIG_CRYPTO_CHACHA20_NEON=m + CONFIG_CRYPTO_AES_ARM64_BS=m ++CONFIG_CAN=y ++CONFIG_CAN_PHYTIUM=y +diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h +index ae1f70450fb2..9932aca9704b 100644 +--- a/arch/arm64/include/asm/cpucaps.h ++++ b/arch/arm64/include/asm/cpucaps.h +@@ -51,7 +51,8 @@ + #define ARM64_SSBD 30 + #define ARM64_MISMATCHED_CACHE_TYPE 31 + #define ARM64_HAS_STAGE2_FWB 32 ++#define ARM64_HAS_CRC32 33 + +-#define ARM64_NCAPS 33 ++#define ARM64_NCAPS 34 + + #endif /* __ASM_CPUCAPS_H */ +diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h +index ea690b3562af..4d58045b40f7 100644 +--- a/arch/arm64/include/asm/cputype.h ++++ b/arch/arm64/include/asm/cputype.h +@@ -76,6 +76,7 @@ + #define ARM_CPU_IMP_BRCM 0x42 + #define ARM_CPU_IMP_QCOM 0x51 + #define ARM_CPU_IMP_NVIDIA 0x4E ++#define ARM_CPU_IMP_PHYTIUM 0x70 + + #define ARM_CPU_PART_AEM_V8 0xD0F + #define ARM_CPU_PART_FOUNDATION 0xD00 +@@ -103,6 +104,8 @@ + #define NVIDIA_CPU_PART_DENVER 0x003 + #define NVIDIA_CPU_PART_CARMEL 0x004 + ++#define PHYTIUM_CPU_PART_FTC662 0x662 ++ + #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) + #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) + #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) +@@ -120,6 +123,7 @@ + #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) + #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER) + #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) ++#define MIDR_PHYTIUM_FT2000PLUS MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_FTC662) + + #ifndef __ASSEMBLY__ + +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h +index 1bdeca8918a6..9f82d6b53851 100644 +--- a/arch/arm64/include/asm/pgtable.h ++++ b/arch/arm64/include/asm/pgtable.h +@@ -224,8 +224,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte) + * Only if the new pte is valid and kernel, otherwise TLB maintenance + * or update_mmu_cache() have the necessary barriers. + */ +- if (pte_valid_not_user(pte)) ++ if (pte_valid_not_user(pte)) { + dsb(ishst); ++ isb(); ++ } + } + + extern void __sync_icache_dcache(pte_t pteval); +@@ -432,6 +434,7 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) + { + WRITE_ONCE(*pmdp, pmd); + dsb(ishst); ++ isb(); + } + + static inline void pmd_clear(pmd_t *pmdp) +@@ -482,6 +485,7 @@ static inline void set_pud(pud_t *pudp, pud_t pud) + { + WRITE_ONCE(*pudp, pud); + dsb(ishst); ++ isb(); + } + + static inline void pud_clear(pud_t *pudp) +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c +index e238b7932096..7626b80128f5 100644 +--- a/arch/arm64/kernel/cpufeature.c ++++ b/arch/arm64/kernel/cpufeature.c +@@ -1222,6 +1222,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = { + .cpu_enable = cpu_enable_hw_dbm, + }, + #endif ++ { ++ .desc = "CRC32 instructions", ++ .capability = ARM64_HAS_CRC32, ++ .type = ARM64_CPUCAP_SYSTEM_FEATURE, ++ .matches = has_cpuid_feature, ++ .sys_reg = SYS_ID_AA64ISAR0_EL1, ++ .field_pos = ID_AA64ISAR0_CRC32_SHIFT, ++ .min_field_value = 1, ++ }, + {}, + }; + +diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c +index 6b2686d54411..29cdc99688f3 100644 +--- a/arch/arm64/kernel/hibernate.c ++++ b/arch/arm64/kernel/hibernate.c +@@ -214,7 +214,7 @@ static int create_safe_exec_page(void *src_start, size_t length, + } + + memcpy((void *)dst, src_start, length); +- flush_icache_range(dst, dst + length); ++ __flush_icache_range(dst, dst + length); + + pgdp = pgd_offset_raw(allocator(mask), dst_addr); + if (pgd_none(READ_ONCE(*pgdp))) { +diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile +index 68755fd70dcf..f28f91fd96a2 100644 +--- a/arch/arm64/lib/Makefile ++++ b/arch/arm64/lib/Makefile +@@ -25,3 +25,5 @@ KCOV_INSTRUMENT_atomic_ll_sc.o := n + UBSAN_SANITIZE_atomic_ll_sc.o := n + + lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o ++ ++obj-$(CONFIG_CRC32) += crc32.o +diff --git a/arch/arm64/lib/crc32.S b/arch/arm64/lib/crc32.S +new file mode 100644 +index 000000000000..5bc1e85b4e1c +--- /dev/null ++++ b/arch/arm64/lib/crc32.S +@@ -0,0 +1,60 @@ ++/* ++ * Accelerated CRC32(C) using AArch64 CRC instructions ++ * ++ * Copyright (C) 2016 - 2018 Linaro Ltd ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++ ++ .cpu generic+crc ++ ++ .macro __crc32, c ++0: subs x2, x2, #16 ++ b.mi 8f ++ ldp x3, x4, [x1], #16 ++CPU_BE( rev x3, x3 ) ++CPU_BE( rev x4, x4 ) ++ crc32\c\()x w0, w0, x3 ++ crc32\c\()x w0, w0, x4 ++ b.ne 0b ++ ret ++ ++8: tbz x2, #3, 4f ++ ldr x3, [x1], #8 ++CPU_BE( rev x3, x3 ) ++ crc32\c\()x w0, w0, x3 ++4: tbz x2, #2, 2f ++ ldr w3, [x1], #4 ++CPU_BE( rev w3, w3 ) ++ crc32\c\()w w0, w0, w3 ++2: tbz x2, #1, 1f ++ ldrh w3, [x1], #2 ++CPU_BE( rev16 w3, w3 ) ++ crc32\c\()h w0, w0, w3 ++1: tbz x2, #0, 0f ++ ldrb w3, [x1] ++ crc32\c\()b w0, w0, w3 ++0: ret ++ .endm ++ ++ .align 5 ++ENTRY(crc32_le) ++alternative_if_not ARM64_HAS_CRC32 ++ b crc32_le_base ++alternative_else_nop_endif ++ __crc32 ++ENDPROC(crc32_le) ++ ++ .align 5 ++ENTRY(__crc32c_le) ++alternative_if_not ARM64_HAS_CRC32 ++ b __crc32c_le_base ++alternative_else_nop_endif ++ __crc32 c ++ENDPROC(__crc32c_le) +diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c +index 2664452fa112..bed74949d039 100644 +--- a/drivers/acpi/acpi_apd.c ++++ b/drivers/acpi/acpi_apd.c +@@ -162,6 +162,12 @@ static const struct apd_device_desc hip08_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 250000000, + }; ++ ++static const struct apd_device_desc phytium_i2c_desc = { ++ .setup = acpi_apd_setup, ++ .fixed_clk_rate = 200000000, ++}; ++ + static const struct apd_device_desc thunderx2_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 125000000, +@@ -234,6 +240,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { + { "CAV9007", APD_ADDR(thunderx2_i2c_desc) }, + { "HISI02A1", APD_ADDR(hip07_i2c_desc) }, + { "HISI02A2", APD_ADDR(hip08_i2c_desc) }, ++ { "PHYT0003", APD_ADDR(phytium_i2c_desc) }, + #endif + { } + }; +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h +index 530a3f675490..9647e5d038d5 100644 +--- a/drivers/acpi/internal.h ++++ b/drivers/acpi/internal.h +@@ -89,6 +89,18 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent); + acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context); + void acpi_scan_table_handler(u32 event, void *table, void *context); + ++#ifdef CONFIG_ACPI_GENERIC_GSI ++int acpi_register_irq(struct device *dev, u32 hwirq, int trigger, ++ int polarity, struct fwnode_handle *fwnode); ++#else ++static inline ++int acpi_register_irq(struct device *dev, u32 hwirq, int trigger, ++ int polarity, struct fwnode_handle *fwnode) ++{ ++ return acpi_register_gsi(dev, hwirq, trigger, polarity); ++} ++#endif ++ + /* -------------------------------------------------------------------------- + Device Node Initialization / Removal + -------------------------------------------------------------------------- */ +diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c +index 7c352cba0528..fc9b52ea4ad5 100644 +--- a/drivers/acpi/irq.c ++++ b/drivers/acpi/irq.c +@@ -13,6 +13,8 @@ + #include + #include + ++#include "internal.h" ++ + enum acpi_irq_model_id acpi_irq_model; + + static struct fwnode_handle *acpi_gsi_domain_id; +@@ -41,6 +43,24 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) + } + EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); + ++int acpi_register_irq(struct device *dev, u32 hwirq, int trigger, ++ int polarity, struct fwnode_handle *fwnode) ++{ ++ struct irq_fwspec fwspec; ++ ++ if (!fwnode) { ++ dev_warn(dev, "No registered irqchip for hwirq %d\n", hwirq); ++ return -EINVAL; ++ } ++ ++ fwspec.fwnode = fwnode; ++ fwspec.param[0] = hwirq; ++ fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity); ++ fwspec.param_count = 2; ++ ++ return irq_create_fwspec_mapping(&fwspec); ++} ++ + /** + * acpi_register_gsi() - Map a GSI to a linux IRQ number + * @dev: device for which IRQ has to be mapped +@@ -54,19 +74,7 @@ EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); + int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, + int polarity) + { +- struct irq_fwspec fwspec; +- +- if (WARN_ON(!acpi_gsi_domain_id)) { +- pr_warn("GSI: No registered irqchip, giving up\n"); +- return -EINVAL; +- } +- +- fwspec.fwnode = acpi_gsi_domain_id; +- fwspec.param[0] = gsi; +- fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity); +- fwspec.param_count = 2; +- +- return irq_create_fwspec_mapping(&fwspec); ++ return acpi_register_irq(dev, gsi, trigger, polarity, acpi_gsi_domain_id); + } + EXPORT_SYMBOL_GPL(acpi_register_gsi); + +@@ -95,7 +103,7 @@ EXPORT_SYMBOL_GPL(acpi_unregister_gsi); + * Return: + * The referenced device fwhandle or NULL on failure + */ +-static struct fwnode_handle * ++struct fwnode_handle * + acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source) + { + struct fwnode_handle *result; +@@ -295,3 +303,29 @@ void __init acpi_set_irq_model(enum acpi_irq_model_id model, + acpi_irq_model = model; + acpi_gsi_domain_id = fwnode; + } ++ ++/** ++ * acpi_irq_create_hierarchy - Create a hierarchical IRQ domain with the default ++ * GSI domain as its parent. ++ * @flags: Irq domain flags associated with the domain ++ * @size: Size of the domain. ++ * @fwnode: Optional fwnode of the interrupt controller ++ * @ops: Pointer to the interrupt domain callbacks ++ * @host_data: Controller private data pointer ++ */ ++struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, ++ unsigned int size, ++ struct fwnode_handle *fwnode, ++ const struct irq_domain_ops *ops, ++ void *host_data) ++{ ++ struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id, ++ DOMAIN_BUS_ANY); ++ ++ if (!d) ++ return NULL; ++ ++ return irq_domain_create_hierarchy(d, flags, size, fwnode, ops, ++ host_data); ++} ++EXPORT_SYMBOL_GPL(acpi_irq_create_hierarchy); +diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c +index c576a6fe4ebb..4d53d0cf0a9d 100644 +--- a/drivers/acpi/pci_irq.c ++++ b/drivers/acpi/pci_irq.c +@@ -35,6 +35,8 @@ + #include + #include + ++#include "internal.h" ++ + #define PREFIX "ACPI: " + + #define _COMPONENT ACPI_PCI_COMPONENT +@@ -423,6 +425,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) + char *link = NULL; + char link_desc[16]; + int rc; ++ struct fwnode_handle *rs_fwnode; + + pin = dev->pin; + if (!pin) { +@@ -451,7 +454,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev) + gsi = acpi_pci_link_allocate_irq(entry->link, + entry->index, + &triggering, &polarity, +- &link); ++ &link, ++ &rs_fwnode); + else + gsi = entry->index; + } else +@@ -473,7 +477,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) + return 0; + } + +- rc = acpi_register_gsi(&dev->dev, gsi, triggering, polarity); ++ rc = acpi_register_irq(&dev->dev, gsi, triggering, polarity, rs_fwnode); + if (rc < 0) { + dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n", + pin_name(pin)); +diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c +index d5eec352a6e1..14010783eece 100644 +--- a/drivers/acpi/pci_link.c ++++ b/drivers/acpi/pci_link.c +@@ -74,6 +74,7 @@ struct acpi_pci_link_irq { + u8 resource_type; + u8 possible_count; + u32 possible[ACPI_PCI_LINK_MAX_POSSIBLE]; ++ struct acpi_resource_source resource_source; + u8 initialized:1; + u8 reserved:7; + }; +@@ -135,6 +136,8 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource, + { + struct acpi_resource_extended_irq *p = + &resource->data.extended_irq; ++ struct acpi_resource_source *rs = ++ &link->irq.resource_source; + if (!p || !p->interrupt_count) { + printk(KERN_WARNING PREFIX + "Blank _PRS EXT IRQ resource\n"); +@@ -155,6 +158,12 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource, + link->irq.triggering = p->triggering; + link->irq.polarity = p->polarity; + link->irq.resource_type = ACPI_RESOURCE_TYPE_EXTENDED_IRQ; ++ if (p->resource_source.string_length) { ++ rs->index = p->resource_source.index; ++ rs->string_length = p->resource_source.string_length; ++ rs->string_ptr = kstrdup(p->resource_source.string_ptr, ++ GFP_KERNEL); ++ } + break; + } + default: +@@ -341,7 +350,8 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) + resource->res.data.irq.sharable = ACPI_SHARED; + resource->res.data.extended_irq.interrupt_count = 1; + resource->res.data.extended_irq.interrupts[0] = irq; +- /* ignore resource_source, it's optional */ ++ resource->res.data.extended_irq.resource_source = ++ link->irq.resource_source; + break; + default: + printk(KERN_ERR PREFIX "Invalid Resource_type %d\n", link->irq.resource_type); +@@ -627,7 +637,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) + * failure: return -1 + */ + int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, +- int *polarity, char **name) ++ int *polarity, char **name, struct fwnode_handle **rs_fwnode) + { + int result; + struct acpi_device *device; +@@ -671,6 +681,9 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, + *polarity = link->irq.polarity; + if (name) + *name = acpi_device_bid(link->device); ++ if (rs_fwnode) ++ *rs_fwnode = acpi_get_irq_source_fwhandle(&link->irq.resource_source); ++ + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Link %s is referenced\n", + acpi_device_bid(link->device))); +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c +index 2a29dd9c986d..e05bb452e3f6 100644 +--- a/drivers/firmware/efi/efi.c ++++ b/drivers/firmware/efi/efi.c +@@ -52,7 +52,8 @@ struct efi __read_mostly efi = { + .properties_table = EFI_INVALID_TABLE_ADDR, + .mem_attr_table = EFI_INVALID_TABLE_ADDR, + .rng_seed = EFI_INVALID_TABLE_ADDR, +- .tpm_log = EFI_INVALID_TABLE_ADDR ++ .tpm_log = EFI_INVALID_TABLE_ADDR, ++ .mem_reserve = EFI_INVALID_TABLE_ADDR, + }; + EXPORT_SYMBOL(efi); + +@@ -484,6 +485,7 @@ static __initdata efi_config_table_type_t common_tables[] = { + {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table}, + {LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed}, + {LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log}, ++ {LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &efi.mem_reserve}, + {NULL_GUID, NULL, NULL}, + }; + +@@ -591,6 +593,41 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, + early_memunmap(tbl, sizeof(*tbl)); + } + ++ if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { ++ unsigned long prsv = efi.mem_reserve; ++ ++ while (prsv) { ++ struct linux_efi_memreserve *rsv; ++ u8 *p; ++ int i; ++ ++ /* ++ * Just map a full page: that is what we will get ++ * anyway, and it permits us to map the entire entry ++ * before knowing its size. ++ */ ++ p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE), ++ PAGE_SIZE); ++ if (p == NULL) { ++ pr_err("Could not map UEFI memreserve entry!\n"); ++ return -ENOMEM; ++ } ++ ++ rsv = (void *)(p + prsv % PAGE_SIZE); ++ ++ /* reserve the entry itself */ ++ memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size)); ++ ++ for (i = 0; i < atomic_read(&rsv->count); i++) { ++ memblock_reserve(rsv->entry[i].base, ++ rsv->entry[i].size); ++ } ++ ++ prsv = rsv->next; ++ early_memunmap(p, PAGE_SIZE); ++ } ++ } ++ + return 0; + } + +@@ -937,6 +974,109 @@ bool efi_is_table_address(unsigned long phys_addr) + return false; + } + ++static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); ++static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; ++ ++static int __init efi_memreserve_map_root(void) ++{ ++ if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR) ++ return -ENODEV; ++ ++ efi_memreserve_root = memremap(efi.mem_reserve, ++ sizeof(*efi_memreserve_root), ++ MEMREMAP_WB); ++ if (WARN_ON_ONCE(!efi_memreserve_root)) ++ return -ENOMEM; ++ return 0; ++} ++ ++static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) ++{ ++ struct resource *res, *parent; ++ ++ res = kzalloc(sizeof(struct resource), GFP_ATOMIC); ++ if (!res) ++ return -ENOMEM; ++ ++ res->name = "reserved"; ++ res->flags = IORESOURCE_MEM; ++ res->start = addr; ++ res->end = addr + size - 1; ++ ++ /* we expect a conflict with a 'System RAM' region */ ++ parent = request_resource_conflict(&iomem_resource, res); ++ return parent ? request_resource(parent, res) : 0; ++} ++ ++int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) ++{ ++ struct linux_efi_memreserve *rsv; ++ unsigned long prsv; ++ int rc, index; ++ ++ if (efi_memreserve_root == (void *)ULONG_MAX) ++ return -ENODEV; ++ ++ if (!efi_memreserve_root) { ++ rc = efi_memreserve_map_root(); ++ if (rc) ++ return rc; ++ } ++ ++ /* first try to find a slot in an existing linked list entry */ ++ for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) { ++ rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); ++ index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); ++ if (index < rsv->size) { ++ rsv->entry[index].base = addr; ++ rsv->entry[index].size = size; ++ ++ memunmap(rsv); ++ return efi_mem_reserve_iomem(addr, size); ++ } ++ memunmap(rsv); ++ } ++ ++ /* no slot found - allocate a new linked list entry */ ++ rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC); ++ if (!rsv) ++ return -ENOMEM; ++ ++ rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); ++ if (rc) { ++ free_page((unsigned long)rsv); ++ return rc; ++ } ++ ++ /* ++ * The memremap() call above assumes that a linux_efi_memreserve entry ++ * never crosses a page boundary, so let's ensure that this remains true ++ * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by ++ * using SZ_4K explicitly in the size calculation below. ++ */ ++ rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K); ++ atomic_set(&rsv->count, 1); ++ rsv->entry[0].base = addr; ++ rsv->entry[0].size = size; ++ ++ spin_lock(&efi_mem_reserve_persistent_lock); ++ rsv->next = efi_memreserve_root->next; ++ efi_memreserve_root->next = __pa(rsv); ++ spin_unlock(&efi_mem_reserve_persistent_lock); ++ ++ return efi_mem_reserve_iomem(addr, size); ++} ++ ++static int __init efi_memreserve_root_init(void) ++{ ++ if (efi_memreserve_root) ++ return 0; ++ if (efi_memreserve_map_root()) ++ efi_memreserve_root = (void *)ULONG_MAX; ++ return 0; ++} ++early_initcall(efi_memreserve_root_init); ++ + #ifdef CONFIG_KEXEC + static int update_efi_random_seed(struct notifier_block *nb, + unsigned long code, void *unused) +diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c +index 6920033de6d4..5bcfa08e8bb1 100644 +--- a/drivers/firmware/efi/libstub/arm-stub.c ++++ b/drivers/firmware/efi/libstub/arm-stub.c +@@ -69,6 +69,31 @@ static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg) + return si; + } + ++void install_memreserve_table(efi_system_table_t *sys_table_arg) ++{ ++ struct linux_efi_memreserve *rsv; ++ efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; ++ efi_status_t status; ++ ++ status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), ++ (void **)&rsv); ++ if (status != EFI_SUCCESS) { ++ pr_efi_err(sys_table_arg, "Failed to allocate memreserve entry!\n"); ++ return; ++ } ++ ++ rsv->next = 0; ++ rsv->size = 0; ++ atomic_set(&rsv->count, 0); ++ ++ status = efi_call_early(install_configuration_table, ++ &memreserve_table_guid, ++ rsv); ++ if (status != EFI_SUCCESS) ++ pr_efi_err(sys_table_arg, "Failed to install memreserve config table!\n"); ++} ++ ++ + /* + * This function handles the architcture specific differences between arm and + * arm64 regarding where the kernel image must be loaded and any memory that +@@ -235,6 +260,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, + } + } + ++ install_memreserve_table(sys_table); ++ + new_fdt_addr = fdt_addr; + status = allocate_new_fdt_and_exit_boot(sys_table, handle, + &new_fdt_addr, efi_get_max_fdt_addr(dram_base), +diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig +index 4f52c3a8ec99..cd79b1a3d563 100644 +--- a/drivers/gpio/Kconfig ++++ b/drivers/gpio/Kconfig +@@ -79,6 +79,10 @@ config GPIO_GENERIC + + # put drivers in the right section, in alphabetical order + ++# This symbol is selected by both MMIO and PCI expanders ++config GPIO_PHYTIUM_CORE ++ tristate ++ + # This symbol is selected by both I2C and SPI expanders + config GPIO_MAX730X + tristate +@@ -404,6 +408,16 @@ config GPIO_OMAP + help + Say yes here to enable GPIO support for TI OMAP SoCs. + ++config GPIO_PHYTIUM_PLAT ++ tristate "Phytium GPIO Platform support" ++ default y if ARCH_PHYTIUM ++ depends on ARM64 ++ select GPIO_PHYTIUM_CORE ++ select IRQ_DOMAIN ++ select GENERIC_IRQ_CHIP ++ help ++ Say yes here to enable GPIO support for Phytium SoCs. ++ + config GPIO_PL061 + bool "PrimeCell PL061 GPIO support" + depends on ARM_AMBA +@@ -1306,6 +1320,18 @@ config GPIO_PCIE_IDIO_24 + Input filter control is not supported by this driver, and the input + filters are deactivated by this driver. + ++config GPIO_PHYTIUM_PCI ++ tristate "Phytium GPIO PCI support" ++ select GPIO_PHYTIUM_CORE ++ select IRQ_DOMAIN ++ select GENERIC_IRQ_CHIP ++ help ++ Say Y here to support Phytium PCI GPIO controller on X100 chipset. ++ An interrupt is generated when any of the inputs change state ++ (low to high or high to low). ++ ++ This driver can be used for Phytium X100. ++ + config GPIO_RDC321X + tristate "RDC R-321x GPIO support" + select MFD_CORE +diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile +index c256aff66a65..7e6462a25135 100644 +--- a/drivers/gpio/Makefile ++++ b/drivers/gpio/Makefile +@@ -95,6 +95,9 @@ obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o + obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o + obj-$(CONFIG_GPIO_OCTEON) += gpio-octeon.o + obj-$(CONFIG_GPIO_OMAP) += gpio-omap.o ++obj-$(CONFIG_GPIO_PHYTIUM_CORE) += gpio-phytium-core.o ++obj-$(CONFIG_GPIO_PHYTIUM_PCI) += gpio-phytium-pci.o ++obj-$(CONFIG_GPIO_PHYTIUM_PLAT) += gpio-phytium-platform.o + obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o + obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o + obj-$(CONFIG_GPIO_PCH) += gpio-pch.o +diff --git a/drivers/gpio/gpio-phytium-core.c b/drivers/gpio/gpio-phytium-core.c +new file mode 100644 +index 000000000000..7c87b0868206 +--- /dev/null ++++ b/drivers/gpio/gpio-phytium-core.c +@@ -0,0 +1,348 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (c) 2019, Phytium Corporation. ++ * Copyright (c) 2021, Phytium Corporation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "gpio-phytium-core.h" ++ ++static int get_pin_location(struct phytium_gpio *gpio, unsigned int offset, ++ struct pin_loc *pl) ++{ ++ int ret; ++ ++ if (offset < gpio->ngpio[0]) { ++ pl->port = 0; ++ pl->offset = offset; ++ ret = 0; ++ } else if (offset < (gpio->ngpio[0] + gpio->ngpio[1])) { ++ pl->port = 1; ++ pl->offset = offset - gpio->ngpio[0]; ++ ret = 0; ++ } else { ++ ret = -EINVAL; ++ } ++ ++ return ret; ++} ++ ++static void phytium_gpio_toggle_trigger(struct phytium_gpio *gpio, ++ unsigned int offset) ++{ ++ struct gpio_chip *gc; ++ u32 pol; ++ int val; ++ ++ /* Only port A can provide interrupt source */ ++ if (offset >= gpio->ngpio[0]) ++ return; ++ ++ gc = &gpio->gc; ++ ++ pol = readl(gpio->regs + GPIO_INT_POLARITY); ++ /* Just read the current value right out of the data register */ ++ val = gc->get(gc, offset); ++ if (val) ++ pol &= ~BIT(offset); ++ else ++ pol |= BIT(offset); ++ ++ writel(pol, gpio->regs + GPIO_INT_POLARITY); ++} ++ ++int phytium_gpio_get(struct gpio_chip *gc, unsigned int offset) ++{ ++ struct phytium_gpio *gpio = gpiochip_get_data(gc); ++ struct pin_loc loc; ++ void __iomem *dat; ++ ++ if (get_pin_location(gpio, offset, &loc)) ++ return -EINVAL; ++ ++ dat = gpio->regs + GPIO_EXT_PORTA + (loc.port * GPIO_PORT_STRIDE); ++ ++ return !!(readl(dat) & BIT(loc.offset)); ++} ++EXPORT_SYMBOL_GPL(phytium_gpio_get); ++ ++void phytium_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) ++{ ++ struct phytium_gpio *gpio = gpiochip_get_data(gc); ++ struct pin_loc loc; ++ void __iomem *dr; ++ unsigned long flags; ++ u32 mask; ++ ++ if (get_pin_location(gpio, offset, &loc)) ++ return; ++ dr = gpio->regs + GPIO_SWPORTA_DR + (loc.port * GPIO_PORT_STRIDE); ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ if (value) ++ mask = readl(dr) | BIT(loc.offset); ++ else ++ mask = readl(dr) & ~BIT(loc.offset); ++ ++ writel(mask, dr); ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++} ++EXPORT_SYMBOL_GPL(phytium_gpio_set); ++ ++int phytium_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) ++{ ++ struct phytium_gpio *gpio = gpiochip_get_data(gc); ++ struct pin_loc loc; ++ unsigned long flags; ++ void __iomem *ddr; ++ ++ if (get_pin_location(gpio, offset, &loc)) ++ return -EINVAL; ++ ddr = gpio->regs + GPIO_SWPORTA_DDR + (loc.port * GPIO_PORT_STRIDE); ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ writel(readl(ddr) & ~(BIT(loc.offset)), ddr); ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(phytium_gpio_direction_input); ++ ++int phytium_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, ++ int value) ++{ ++ struct phytium_gpio *gpio = gpiochip_get_data(gc); ++ struct pin_loc loc; ++ unsigned long flags; ++ void __iomem *ddr; ++ ++ if (get_pin_location(gpio, offset, &loc)) ++ return -EINVAL; ++ ddr = gpio->regs + GPIO_SWPORTA_DDR + (loc.port * GPIO_PORT_STRIDE); ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ writel(readl(ddr) | BIT(loc.offset), ddr); ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++ ++ phytium_gpio_set(gc, offset, value); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(phytium_gpio_direction_output); ++ ++void phytium_gpio_irq_ack(struct irq_data *d) ++{ ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d); ++ struct phytium_gpio *gpio = gpiochip_get_data(gc); ++ u32 val = BIT(irqd_to_hwirq(d)); ++ ++ raw_spin_lock(&gpio->lock); ++ ++ writel(val, gpio->regs + GPIO_PORTA_EOI); ++ ++ raw_spin_unlock(&gpio->lock); ++} ++EXPORT_SYMBOL_GPL(phytium_gpio_irq_ack); ++ ++void phytium_gpio_irq_mask(struct irq_data *d) ++{ ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d); ++ struct phytium_gpio *gpio = gpiochip_get_data(gc); ++ u32 val; ++ ++ /* Only port A can provide interrupt source */ ++ if (irqd_to_hwirq(d) >= gpio->ngpio[0]) ++ return; ++ ++ raw_spin_lock(&gpio->lock); ++ ++ val = readl(gpio->regs + GPIO_INTMASK); ++ val |= BIT(irqd_to_hwirq(d)); ++ writel(val, gpio->regs + GPIO_INTMASK); ++ ++ raw_spin_unlock(&gpio->lock); ++} ++EXPORT_SYMBOL_GPL(phytium_gpio_irq_mask); ++ ++void phytium_gpio_irq_unmask(struct irq_data *d) ++{ ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d); ++ struct phytium_gpio *gpio = gpiochip_get_data(gc); ++ u32 val; ++ ++ /* Only port A can provide interrupt source */ ++ if (irqd_to_hwirq(d) >= gpio->ngpio[0]) ++ return; ++ ++ raw_spin_lock(&gpio->lock); ++ ++ val = readl(gpio->regs + GPIO_INTMASK); ++ val &= ~BIT(irqd_to_hwirq(d)); ++ writel(val, gpio->regs + GPIO_INTMASK); ++ ++ raw_spin_unlock(&gpio->lock); ++} ++EXPORT_SYMBOL_GPL(phytium_gpio_irq_unmask); ++ ++int phytium_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) ++{ ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d); ++ struct phytium_gpio *gpio = gpiochip_get_data(gc); ++ int hwirq = irqd_to_hwirq(d); ++ unsigned long flags, lvl, pol; ++ ++ if (hwirq < 0 || hwirq >= gpio->ngpio[0]) ++ return -EINVAL; ++ ++ if ((flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) && ++ (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))) { ++ dev_err(gc->parent, ++ "trying to configure line %d for both level and edge detection, choose one!\n", ++ hwirq); ++ return -EINVAL; ++ } ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ lvl = readl(gpio->regs + GPIO_INTTYPE_LEVEL); ++ pol = readl(gpio->regs + GPIO_INT_POLARITY); ++ ++ switch (flow_type) { ++ case IRQ_TYPE_EDGE_BOTH: ++ lvl |= BIT(hwirq); ++ phytium_gpio_toggle_trigger(gpio, hwirq); ++ irq_set_handler_locked(d, handle_edge_irq); ++ dev_dbg(gc->parent, "line %d: IRQ on both edges\n", hwirq); ++ break; ++ case IRQ_TYPE_EDGE_RISING: ++ lvl |= BIT(hwirq); ++ pol |= BIT(hwirq); ++ irq_set_handler_locked(d, handle_edge_irq); ++ dev_dbg(gc->parent, "line %d: IRQ on RISING edge\n", hwirq); ++ break; ++ case IRQ_TYPE_EDGE_FALLING: ++ lvl |= BIT(hwirq); ++ pol &= ~BIT(hwirq); ++ irq_set_handler_locked(d, handle_edge_irq); ++ dev_dbg(gc->parent, "line %d: IRQ on FALLING edge\n", hwirq); ++ break; ++ case IRQ_TYPE_LEVEL_HIGH: ++ lvl &= ~BIT(hwirq); ++ pol |= BIT(hwirq); ++ irq_set_handler_locked(d, handle_level_irq); ++ dev_dbg(gc->parent, "line %d: IRQ on HIGH level\n", hwirq); ++ break; ++ case IRQ_TYPE_LEVEL_LOW: ++ lvl &= ~BIT(hwirq); ++ pol &= ~BIT(hwirq); ++ irq_set_handler_locked(d, handle_level_irq); ++ dev_dbg(gc->parent, "line %d: IRQ on LOW level\n", hwirq); ++ break; ++ } ++ ++ writel(lvl, gpio->regs + GPIO_INTTYPE_LEVEL); ++ if (flow_type != IRQ_TYPE_EDGE_BOTH) ++ writel(pol, gpio->regs + GPIO_INT_POLARITY); ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(phytium_gpio_irq_set_type); ++ ++void phytium_gpio_irq_enable(struct irq_data *d) ++{ ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d); ++ struct phytium_gpio *gpio = gpiochip_get_data(gc); ++ unsigned long flags; ++ u32 val; ++ ++ /* Only port A can provide interrupt source */ ++ if (irqd_to_hwirq(d) >= gpio->ngpio[0]) ++ return; ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ val = readl(gpio->regs + GPIO_INTEN); ++ val |= BIT(irqd_to_hwirq(d)); ++ writel(val, gpio->regs + GPIO_INTEN); ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++} ++EXPORT_SYMBOL_GPL(phytium_gpio_irq_enable); ++ ++void phytium_gpio_irq_disable(struct irq_data *d) ++{ ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d); ++ struct phytium_gpio *gpio = gpiochip_get_data(gc); ++ unsigned long flags; ++ u32 val; ++ ++ /* Only port A can provide interrupt source */ ++ if (irqd_to_hwirq(d) >= gpio->ngpio[0]) ++ return; ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ val = readl(gpio->regs + GPIO_INTEN); ++ val &= ~BIT(irqd_to_hwirq(d)); ++ writel(val, gpio->regs + GPIO_INTEN); ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++} ++EXPORT_SYMBOL_GPL(phytium_gpio_irq_disable); ++ ++void phytium_gpio_irq_handler(struct irq_desc *desc) ++{ ++ struct gpio_chip *gc = irq_desc_get_handler_data(desc); ++ struct phytium_gpio *gpio = gpiochip_get_data(gc); ++ struct irq_chip *irqchip = irq_desc_get_chip(desc); ++ unsigned long pending; ++ int offset; ++ ++ chained_irq_enter(irqchip, desc); ++ ++ pending = readl(gpio->regs + GPIO_INTSTATUS); ++ if (pending) { ++ for_each_set_bit(offset, &pending, gpio->ngpio[0]) { ++ int gpio_irq = irq_find_mapping(gc->irq.domain, ++ offset); ++ generic_handle_irq(gpio_irq); ++ ++ if ((irq_get_trigger_type(gpio_irq) & ++ IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) ++ phytium_gpio_toggle_trigger(gpio, offset); ++ } ++ } ++ ++ chained_irq_exit(irqchip, desc); ++} ++EXPORT_SYMBOL_GPL(phytium_gpio_irq_handler); ++ ++int phytium_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) ++{ ++ struct phytium_gpio *gpio = gpiochip_get_data(gc); ++ struct pin_loc loc; ++ void __iomem *ddr; ++ ++ if (get_pin_location(gpio, offset, &loc)) ++ return -EINVAL; ++ ddr = gpio->regs + GPIO_SWPORTA_DDR + (loc.port * GPIO_PORT_STRIDE); ++ ++ return !(readl(ddr) & BIT(loc.offset)); ++} ++EXPORT_SYMBOL_GPL(phytium_gpio_get_direction); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Phytium GPIO Controller core"); +diff --git a/drivers/gpio/gpio-phytium-core.h b/drivers/gpio/gpio-phytium-core.h +new file mode 100644 +index 000000000000..236c13530c4e +--- /dev/null ++++ b/drivers/gpio/gpio-phytium-core.h +@@ -0,0 +1,86 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2021, Phytium Corporation. ++ */ ++ ++#ifndef _GPIO_PHYTIUM_H ++#define _GPIO_PHYTIUM_H ++ ++#include ++#include ++ ++#include "gpiolib.h" ++ ++#define GPIO_SWPORTA_DR 0x00 /* WR Port A Output Data Register */ ++#define GPIO_SWPORTA_DDR 0x04 /* WR Port A Data Direction Register */ ++#define GPIO_EXT_PORTA 0x08 /* RO Port A Input Data Register */ ++#define GPIO_SWPORTB_DR 0x0c /* WR Port B Output Data Register */ ++#define GPIO_SWPORTB_DDR 0x10 /* WR Port B Data Direction Register */ ++#define GPIO_EXT_PORTB 0x14 /* RO Port B Input Data Register */ ++ ++#define GPIO_INTEN 0x18 /* WR Port A Interrput Enable Register */ ++#define GPIO_INTMASK 0x1c /* WR Port A Interrupt Mask Register */ ++#define GPIO_INTTYPE_LEVEL 0x20 /* WR Port A Interrupt Level Register */ ++#define GPIO_INT_POLARITY 0x24 /* WR Port A Interrupt Polarity Register */ ++#define GPIO_INTSTATUS 0x28 /* RO Port A Interrupt Status Register */ ++#define GPIO_RAW_INTSTATUS 0x2c /* RO Port A Raw Interrupt Status Register */ ++#define GPIO_LS_SYNC 0x30 /* WR Level-sensitive Synchronization Enable Register */ ++#define GPIO_DEBOUNCE 0x34 /* WR Debounce Enable Register */ ++#define GPIO_PORTA_EOI 0x38 /* WO Port A Clear Interrupt Register */ ++ ++#define MAX_NPORTS 2 ++#define NGPIO_DEFAULT 8 ++#define NGPIO_MAX 32 ++#define GPIO_PORT_STRIDE (GPIO_EXT_PORTB - GPIO_EXT_PORTA) ++ ++struct pin_loc { ++ unsigned int port; ++ unsigned int offset; ++}; ++ ++#ifdef CONFIG_PM_SLEEP ++struct phytium_gpio_ctx { ++ u32 swporta_dr; ++ u32 swporta_ddr; ++ u32 ext_porta; ++ u32 swportb_dr; ++ u32 swportb_ddr; ++ u32 ext_portb; ++ u32 inten; ++ u32 intmask; ++ u32 inttype_level; ++ u32 int_polarity; ++ u32 intstatus; ++ u32 raw_intstatus; ++ u32 ls_sync; ++ u32 debounce; ++}; ++#endif ++ ++struct phytium_gpio { ++ raw_spinlock_t lock; ++ void __iomem *regs; ++ struct gpio_chip gc; ++ unsigned int ngpio[2]; ++ int irq; ++#ifdef CONFIG_PM_SLEEP ++ struct phytium_gpio_ctx ctx; ++#endif ++}; ++ ++int phytium_gpio_get(struct gpio_chip *gc, unsigned int offset); ++void phytium_gpio_set(struct gpio_chip *gc, unsigned int offset, int value); ++ ++int phytium_gpio_get_direction(struct gpio_chip *gc, unsigned int offset); ++int phytium_gpio_direction_input(struct gpio_chip *gc, unsigned int offset); ++int phytium_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value); ++ ++void phytium_gpio_irq_ack(struct irq_data *d); ++void phytium_gpio_irq_mask(struct irq_data *d); ++void phytium_gpio_irq_unmask(struct irq_data *d); ++int phytium_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type); ++void phytium_gpio_irq_enable(struct irq_data *d); ++void phytium_gpio_irq_disable(struct irq_data *d); ++void phytium_gpio_irq_handler(struct irq_desc *desc); ++ ++#endif +diff --git a/drivers/gpio/gpio-phytium-pci.c b/drivers/gpio/gpio-phytium-pci.c +new file mode 100644 +index 000000000000..3a6d655d4cd8 +--- /dev/null ++++ b/drivers/gpio/gpio-phytium-pci.c +@@ -0,0 +1,199 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2021, Phytium Corporation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "gpio-phytium-core.h" ++ ++static struct irq_chip phytium_gpio_pci_irqchip = { ++ .name = "phytium_gpio_pci", ++ .irq_ack = phytium_gpio_irq_ack, ++ .irq_mask = phytium_gpio_irq_mask, ++ .irq_unmask = phytium_gpio_irq_unmask, ++ .irq_set_type = phytium_gpio_irq_set_type, ++ .irq_enable = phytium_gpio_irq_enable, ++ .irq_disable = phytium_gpio_irq_disable, ++}; ++ ++static int phytium_gpio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ++{ ++ struct device *dev = &pdev->dev; ++ struct phytium_gpio *gpio; ++ int err; ++ ++ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); ++ if (!gpio) ++ return -ENOMEM; ++ ++ pci_set_drvdata(pdev, gpio); ++ ++ err = pcim_enable_device(pdev); ++ if (err) { ++ dev_err(dev, "Failed to enable PCI device: err %d\n", err); ++ goto err0; ++ } ++ ++ err = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); ++ if (err) { ++ dev_err(dev, "Failed to iomap PCI device: err %d\n", err); ++ goto err0; ++ } ++ ++ gpio->regs = pcim_iomap_table(pdev)[0]; ++ if (!gpio->regs) { ++ dev_err(dev, "Cannot map PCI resource\n"); ++ err = -ENOMEM; ++ goto err0; ++ } ++ ++ err = pci_enable_msi(pdev); ++ if (err < 0) ++ goto err0; ++ ++ gpio->irq = pdev->irq; ++ if (gpio->irq < 0) ++ dev_warn(dev, "no irq is found.\n"); ++ ++ /* There is only one group of Pins at the moment. */ ++ gpio->ngpio[0] = NGPIO_MAX; ++ ++ /* irq_chip support */ ++ raw_spin_lock_init(&gpio->lock); ++ ++ gpio->gc.base = -1; ++ gpio->gc.get_direction = phytium_gpio_get_direction; ++ gpio->gc.direction_input = phytium_gpio_direction_input; ++ gpio->gc.direction_output = phytium_gpio_direction_output; ++ gpio->gc.get = phytium_gpio_get; ++ gpio->gc.set = phytium_gpio_set; ++ gpio->gc.ngpio = gpio->ngpio[0] + gpio->ngpio[1]; ++ gpio->gc.label = dev_name(dev); ++ gpio->gc.parent = dev; ++ gpio->gc.owner = THIS_MODULE; ++ ++ err = gpiochip_add_data(&gpio->gc, gpio); ++ if (err) { ++ dev_err(dev, "failed to register gpiochip\n"); ++ goto err1; ++ } ++ ++ err = gpiochip_irqchip_add(&gpio->gc, &phytium_gpio_pci_irqchip, ++ 0, handle_bad_irq, IRQ_TYPE_NONE); ++ if (err) { ++ dev_info(dev, "could not add irqchip\n"); ++ goto err1; ++ } ++ gpiochip_set_chained_irqchip(&gpio->gc, &phytium_gpio_pci_irqchip, ++ gpio->irq, ++ phytium_gpio_irq_handler); ++ ++ dev_info(dev, "Phytium PCI GPIO controller @%pa registered\n", ++ &gpio->regs); ++ ++ return 0; ++ ++err1: ++ gpiochip_remove(&gpio->gc); ++err0: ++ pci_set_drvdata(pdev, NULL); ++ return err; ++} ++ ++static void phytium_gpio_pci_remove(struct pci_dev *pdev) ++{ ++ struct phytium_gpio *gpio = pci_get_drvdata(pdev); ++ ++ gpiochip_remove(&gpio->gc); ++ ++ pci_set_drvdata(pdev, NULL); ++} ++ ++static const struct pci_device_id phytium_gpio_pci_ids[] = { ++ { PCI_DEVICE(0x1DB7, 0xDC31) }, ++ { 0 } ++}; ++MODULE_DEVICE_TABLE(pci, phytium_gpio_pci_ids); ++ ++#ifdef CONFIG_PM_SLEEP ++static int phytium_gpio_pci_suspend(struct device *dev) ++{ ++ struct pci_dev *pdev = to_pci_dev(dev); ++ struct phytium_gpio *gpio = pci_get_drvdata(pdev); ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ gpio->ctx.swporta_dr = readl(gpio->regs + GPIO_SWPORTA_DR); ++ gpio->ctx.swporta_ddr = readl(gpio->regs + GPIO_SWPORTA_DDR); ++ gpio->ctx.ext_porta = readl(gpio->regs + GPIO_EXT_PORTA); ++ gpio->ctx.swportb_dr = readl(gpio->regs + GPIO_SWPORTB_DR); ++ gpio->ctx.swportb_ddr = readl(gpio->regs + GPIO_SWPORTB_DDR); ++ gpio->ctx.ext_portb = readl(gpio->regs + GPIO_EXT_PORTB); ++ ++ gpio->ctx.inten = readl(gpio->regs + GPIO_INTEN); ++ gpio->ctx.intmask = readl(gpio->regs + GPIO_INTMASK); ++ gpio->ctx.inttype_level = readl(gpio->regs + GPIO_INTTYPE_LEVEL); ++ gpio->ctx.int_polarity = readl(gpio->regs + GPIO_INT_POLARITY); ++ gpio->ctx.debounce = readl(gpio->regs + GPIO_DEBOUNCE); ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++ ++ return 0; ++} ++ ++static int phytium_gpio_pci_resume(struct device *dev) ++{ ++ struct pci_dev *pdev = to_pci_dev(dev); ++ struct phytium_gpio *gpio = pci_get_drvdata(pdev); ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ writel(gpio->ctx.swporta_dr, gpio->regs + GPIO_SWPORTA_DR); ++ writel(gpio->ctx.swporta_ddr, gpio->regs + GPIO_SWPORTA_DDR); ++ writel(gpio->ctx.ext_porta, gpio->regs + GPIO_EXT_PORTA); ++ writel(gpio->ctx.swportb_dr, gpio->regs + GPIO_SWPORTB_DR); ++ writel(gpio->ctx.swportb_ddr, gpio->regs + GPIO_SWPORTB_DDR); ++ writel(gpio->ctx.ext_portb, gpio->regs + GPIO_EXT_PORTB); ++ ++ writel(gpio->ctx.inten, gpio->regs + GPIO_INTEN); ++ writel(gpio->ctx.intmask, gpio->regs + GPIO_INTMASK); ++ writel(gpio->ctx.inttype_level, gpio->regs + GPIO_INTTYPE_LEVEL); ++ writel(gpio->ctx.int_polarity, gpio->regs + GPIO_INT_POLARITY); ++ writel(gpio->ctx.debounce, gpio->regs + GPIO_DEBOUNCE); ++ ++ writel(0xffffffff, gpio->regs + GPIO_PORTA_EOI); ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++ ++ return 0; ++} ++#endif ++ ++static SIMPLE_DEV_PM_OPS(phytium_gpio_pci_pm_ops, ++ phytium_gpio_pci_suspend, ++ phytium_gpio_pci_resume); ++ ++static struct pci_driver phytium_gpio_pci_driver = { ++ .name = "gpio-phytium-pci", ++ .id_table = phytium_gpio_pci_ids, ++ .probe = phytium_gpio_pci_probe, ++ .remove = phytium_gpio_pci_remove, ++ .driver = { ++ .pm = &phytium_gpio_pci_pm_ops, ++ }, ++}; ++ ++module_pci_driver(phytium_gpio_pci_driver); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_AUTHOR("Cheng Quan "); ++MODULE_DESCRIPTION("Phytium GPIO PCI Driver"); +diff --git a/drivers/gpio/gpio-phytium-platform.c b/drivers/gpio/gpio-phytium-platform.c +new file mode 100644 +index 000000000000..e60f800e8a2c +--- /dev/null ++++ b/drivers/gpio/gpio-phytium-platform.c +@@ -0,0 +1,217 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Support functions for Phytium GPIO ++ * ++ * Copyright (c) 2019, Phytium Corporation. ++ * Written by Chen Baozi ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "gpio-phytium-core.h" ++ ++static struct irq_chip phytium_gpio_irqchip = { ++ .name = "phytium_gpio", ++ .irq_ack = phytium_gpio_irq_ack, ++ .irq_mask = phytium_gpio_irq_mask, ++ .irq_unmask = phytium_gpio_irq_unmask, ++ .irq_set_type = phytium_gpio_irq_set_type, ++ .irq_enable = phytium_gpio_irq_enable, ++ .irq_disable = phytium_gpio_irq_disable, ++}; ++ ++static const struct of_device_id phytium_gpio_of_match[] = { ++ { .compatible = "phytium,gpio", }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, phytium_gpio_of_match); ++ ++static const struct acpi_device_id phytium_gpio_acpi_match[] = { ++ { "PHYT0001", 0 }, ++ { } ++}; ++MODULE_DEVICE_TABLE(acpi, phytium_gpio_acpi_match); ++ ++static int phytium_gpio_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct resource *res; ++ struct phytium_gpio *gpio; ++ struct fwnode_handle *fwnode; ++ int err; ++ ++ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); ++ if (!gpio) ++ return -ENOMEM; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ gpio->regs = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(gpio->regs)) ++ return PTR_ERR(gpio->regs); ++ ++ gpio->irq = -ENXIO; ++ gpio->irq = platform_get_irq(pdev, 0); ++ if (gpio->irq < 0) ++ dev_warn(dev, "no irq is found.\n"); ++ ++ if (!device_get_child_node_count(dev)) ++ return -ENODEV; ++ ++ device_for_each_child_node(dev, fwnode) { ++ int idx; ++ ++ if (fwnode_property_read_u32(fwnode, "reg", &idx) || ++ idx >= MAX_NPORTS) { ++ dev_err(dev, "missing/invalid port index\n"); ++ fwnode_handle_put(fwnode); ++ return -EINVAL; ++ } ++ ++ if (fwnode_property_read_u32(fwnode, "nr-gpios", ++ &gpio->ngpio[idx])) { ++ dev_info(dev, ++ "failed to get number of gpios for Port%c\n", ++ idx ? 'B' : 'A'); ++ gpio->ngpio[idx] = NGPIO_DEFAULT; ++ } ++ } ++ ++ /* irq_chip support */ ++ raw_spin_lock_init(&gpio->lock); ++ ++ gpio->gc.base = -1; ++ gpio->gc.get_direction = phytium_gpio_get_direction; ++ gpio->gc.direction_input = phytium_gpio_direction_input; ++ gpio->gc.direction_output = phytium_gpio_direction_output; ++ gpio->gc.get = phytium_gpio_get; ++ gpio->gc.set = phytium_gpio_set; ++ gpio->gc.ngpio = gpio->ngpio[0] + gpio->ngpio[1]; ++ gpio->gc.label = dev_name(dev); ++ gpio->gc.parent = dev; ++ gpio->gc.owner = THIS_MODULE; ++ ++ err = gpiochip_add_data(&gpio->gc, gpio); ++ if (err) { ++ dev_err(dev, "failed to register gpiochip\n"); ++ goto err1; ++ } ++ ++ err = gpiochip_irqchip_add(&gpio->gc, &phytium_gpio_irqchip, ++ 0, handle_bad_irq, IRQ_TYPE_NONE); ++ if (err) { ++ dev_info(dev, "could not add irqchip\n"); ++ goto err0; ++ } ++ gpiochip_set_chained_irqchip(&gpio->gc, &phytium_gpio_irqchip, ++ gpio->irq, ++ phytium_gpio_irq_handler); ++ ++ platform_set_drvdata(pdev, gpio); ++ dev_info(dev, "Phytium GPIO controller @%pa registered\n", ++ &res->start); ++ ++ return 0; ++ ++err1: ++ gpiochip_remove(&gpio->gc); ++err0: ++ return err; ++} ++ ++static int phytium_gpio_remove(struct platform_device *pdev) ++{ ++ struct phytium_gpio *gpio = platform_get_drvdata(pdev); ++ ++ gpiochip_remove(&gpio->gc); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static int phytium_gpio_suspend(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct phytium_gpio *gpio = platform_get_drvdata(pdev); ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ gpio->ctx.swporta_dr = readl(gpio->regs + GPIO_SWPORTA_DR); ++ gpio->ctx.swporta_ddr = readl(gpio->regs + GPIO_SWPORTA_DDR); ++ gpio->ctx.ext_porta = readl(gpio->regs + GPIO_EXT_PORTA); ++ gpio->ctx.swportb_dr = readl(gpio->regs + GPIO_SWPORTB_DR); ++ gpio->ctx.swportb_ddr = readl(gpio->regs + GPIO_SWPORTB_DDR); ++ gpio->ctx.ext_portb = readl(gpio->regs + GPIO_EXT_PORTB); ++ ++ gpio->ctx.inten = readl(gpio->regs + GPIO_INTEN); ++ gpio->ctx.intmask = readl(gpio->regs + GPIO_INTMASK); ++ gpio->ctx.inttype_level = readl(gpio->regs + GPIO_INTTYPE_LEVEL); ++ gpio->ctx.int_polarity = readl(gpio->regs + GPIO_INT_POLARITY); ++ gpio->ctx.debounce = readl(gpio->regs + GPIO_DEBOUNCE); ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++ ++ return 0; ++} ++ ++static int phytium_gpio_resume(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct phytium_gpio *gpio = platform_get_drvdata(pdev); ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ writel(gpio->ctx.swporta_dr, gpio->regs + GPIO_SWPORTA_DR); ++ writel(gpio->ctx.swporta_ddr, gpio->regs + GPIO_SWPORTA_DDR); ++ writel(gpio->ctx.ext_porta, gpio->regs + GPIO_EXT_PORTA); ++ writel(gpio->ctx.swportb_dr, gpio->regs + GPIO_SWPORTB_DR); ++ writel(gpio->ctx.swportb_ddr, gpio->regs + GPIO_SWPORTB_DDR); ++ writel(gpio->ctx.ext_portb, gpio->regs + GPIO_EXT_PORTB); ++ ++ writel(gpio->ctx.inten, gpio->regs + GPIO_INTEN); ++ writel(gpio->ctx.intmask, gpio->regs + GPIO_INTMASK); ++ writel(gpio->ctx.inttype_level, gpio->regs + GPIO_INTTYPE_LEVEL); ++ writel(gpio->ctx.int_polarity, gpio->regs + GPIO_INT_POLARITY); ++ writel(gpio->ctx.debounce, gpio->regs + GPIO_DEBOUNCE); ++ ++ writel(0xffffffff, gpio->regs + GPIO_PORTA_EOI); ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++ ++ return 0; ++} ++#endif ++ ++static SIMPLE_DEV_PM_OPS(phytium_gpio_pm_ops, phytium_gpio_suspend, ++ phytium_gpio_resume); ++ ++static struct platform_driver phytium_gpio_driver = { ++ .driver = { ++ .name = "gpio-phytium-platform", ++ .pm = &phytium_gpio_pm_ops, ++ .of_match_table = of_match_ptr(phytium_gpio_of_match), ++ .acpi_match_table = ACPI_PTR(phytium_gpio_acpi_match), ++ }, ++ .probe = phytium_gpio_probe, ++ .remove = phytium_gpio_remove, ++}; ++ ++module_platform_driver(phytium_gpio_driver); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_AUTHOR("Chen Baozi "); ++MODULE_DESCRIPTION("Phytium GPIO driver"); +diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig +index cb88528e7b10..d957fc735e2d 100644 +--- a/drivers/gpu/drm/Kconfig ++++ b/drivers/gpu/drm/Kconfig +@@ -315,6 +315,8 @@ source "drivers/gpu/drm/tve200/Kconfig" + + source "drivers/gpu/drm/xen/Kconfig" + ++source "drivers/gpu/drm/phytium/Kconfig" ++ + # Keep legacy drivers last + + menuconfig DRM_LEGACY +diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile +index a6771cef85e2..003ad8887229 100644 +--- a/drivers/gpu/drm/Makefile ++++ b/drivers/gpu/drm/Makefile +@@ -106,4 +106,5 @@ obj-$(CONFIG_DRM_MXSFB) += mxsfb/ + obj-$(CONFIG_DRM_TINYDRM) += tinydrm/ + obj-$(CONFIG_DRM_PL111) += pl111/ + obj-$(CONFIG_DRM_TVE200) += tve200/ ++obj-$(CONFIG_DRM_PHYTIUM) += phytium/ + obj-$(CONFIG_DRM_XEN) += xen/ +diff --git a/drivers/gpu/drm/phytium/Kconfig b/drivers/gpu/drm/phytium/Kconfig +new file mode 100644 +index 000000000000..e3024feb69d0 +--- /dev/null ++++ b/drivers/gpu/drm/phytium/Kconfig +@@ -0,0 +1,7 @@ ++config DRM_PHYTIUM ++ tristate "DRM Support for Phytium Graphics Card" ++ depends on DRM ++ select DRM_KMS_HELPER ++ help ++ Choose this option if you have a phytium graphics card. ++ This driver provides kernel mode setting and buffer management to userspace. +diff --git a/drivers/gpu/drm/phytium/Makefile b/drivers/gpu/drm/phytium/Makefile +new file mode 100644 +index 000000000000..104416fc4313 +--- /dev/null ++++ b/drivers/gpu/drm/phytium/Makefile +@@ -0,0 +1,15 @@ ++phytium-dc-drm-y := phytium_display_drv.o \ ++ phytium_plane.o \ ++ phytium_crtc.o \ ++ phytium_dp.o \ ++ phytium_fb.o \ ++ phytium_gem.o \ ++ phytium_fbdev.o \ ++ phytium_debugfs.o \ ++ x100_dp.o \ ++ phytium_panel.o \ ++ x100_dc.o \ ++ phytium_pci.o ++ ++obj-$(CONFIG_DRM_PHYTIUM) += phytium-dc-drm.o ++CFLAGS_REMOVE_phytium_crtc.o += -mgeneral-regs-only +diff --git a/drivers/gpu/drm/phytium/phytium_crtc.c b/drivers/gpu/drm/phytium/phytium_crtc.c +new file mode 100644 +index 000000000000..796c046d0a73 +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_crtc.c +@@ -0,0 +1,720 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include "phytium_display_drv.h" ++#include "phytium_crtc.h" ++#include "phytium_plane.h" ++#include "phytium_dp.h" ++#include "x100_dc.h" ++#include "phytium_reg.h" ++ ++#define MAXKERNELSIZE 9 ++#define SUBPIXELINDEXBITS 5 ++#define SUBPIXELCOUNT (1 << SUBPIXELINDEXBITS) ++#define SUBPIXELLOADCOUNT (SUBPIXELCOUNT / 2 + 1) ++#define WEIGHTSTATECOUNT (((SUBPIXELLOADCOUNT * MAXKERNELSIZE + 1) & ~1) / 2) ++#define KERNELTABLESIZE (SUBPIXELLOADCOUNT * MAXKERNELSIZE * sizeof(uint16_t)) ++#define PHYALIGN(n, align) (((n) + ((align) - 1)) & ~((align) - 1)) ++#define KERNELSTATES (PHYALIGN(KERNELTABLESIZE + 4, 8)) ++#define PHYPI 3.14159265358979323846f ++ ++#define MATH_Add(X, Y) (float)((X) + (Y)) ++#define MATH_Multiply(X, Y) (float)((X) * (Y)) ++#define MATH_Divide(X, Y) (float)((X) / (Y)) ++#define MATH_DivideFromUInteger(X, Y) ((float)(X) / (float)(Y)) ++#define MATH_I2Float(X) (float)(X) ++ ++struct filter_blit_array { ++ uint8_t kernelSize; ++ uint32_t scaleFactor; ++ uint32_t *kernelStates; ++}; ++ ++static uint32_t dc_scaling_get_factor(uint32_t src_size, uint32_t dst_size) ++{ ++ uint32_t factor = 0; ++ ++ factor = ((src_size - 1) << SCALE_FACTOR_SRC_OFFSET) / (dst_size - 1); ++ ++ return factor; ++} ++ ++static float dc_sint(float x) ++{ ++ const float B = 1.2732395477; ++ const float C = -0.4052847346; ++ const float P = 0.2310792853; ++ float y; ++ ++ if (x < 0) ++ y = B*x - C*x*x; ++ else ++ y = B*x + C*x*x; ++ if (y < 0) ++ y = P * (y * (0 - y) - y) + y; ++ else ++ y = P * (y * y - y) + y; ++ return y; ++} ++ ++static float dc_sinc_filter(float x, int radius) ++{ ++ float pit, pitd, f1, f2, result; ++ float f_radius = MATH_I2Float(radius); ++ ++ if (x == 0.0f) { ++ result = 1.0f; ++ } else if ((x < -f_radius) || (x > f_radius)) { ++ result = 0.0f; ++ } else { ++ pit = MATH_Multiply(PHYPI, x); ++ pitd = MATH_Divide(pit, f_radius); ++ f1 = MATH_Divide(dc_sint(pit), pit); ++ f2 = MATH_Divide(dc_sint(pitd), pitd); ++ result = MATH_Multiply(f1, f2); ++ } ++ ++ return result; ++} ++ ++static int dc_calculate_sync_table( ++ uint8_t kernel_size, ++ uint32_t src_size, ++ uint32_t dst_size, ++ struct filter_blit_array *kernel_info) ++{ ++ uint32_t scale_factor; ++ float f_scale; ++ int kernel_half; ++ float f_subpixel_step; ++ float f_subpixel_offset; ++ uint32_t subpixel_pos; ++ int kernel_pos; ++ int padding; ++ uint16_t *kernel_array; ++ int range = 0; ++ ++ do { ++ /* Compute the scale factor. */ ++ scale_factor = dc_scaling_get_factor(src_size, dst_size); ++ ++ /* Same kernel size and ratio as before? */ ++ if ((kernel_info->kernelSize == kernel_size) && ++ (kernel_info->scaleFactor == kernel_size)) { ++ break; ++ } ++ ++ /* check the array */ ++ if (kernel_info->kernelStates == NULL) ++ break; ++ ++ /* Store new parameters. */ ++ kernel_info->kernelSize = kernel_size; ++ kernel_info->scaleFactor = scale_factor; ++ ++ /* Compute the scale factor. */ ++ f_scale = MATH_DivideFromUInteger(dst_size, src_size); ++ ++ /* Adjust the factor for magnification. */ ++ if (f_scale > 1.0f) ++ f_scale = 1.0f; ++ ++ /* Calculate the kernel half. */ ++ kernel_half = (int) (kernel_info->kernelSize >> 1); ++ ++ /* Calculate the subpixel step. */ ++ f_subpixel_step = MATH_Divide(1.0f, MATH_I2Float(SUBPIXELCOUNT)); ++ ++ /* Init the subpixel offset. */ ++ f_subpixel_offset = 0.5f; ++ ++ /* Determine kernel padding size. */ ++ padding = (MAXKERNELSIZE - kernel_info->kernelSize) / 2; ++ ++ /* Set initial kernel array pointer. */ ++ kernel_array = (uint16_t *) (kernel_info->kernelStates + 1); ++ ++ /* Loop through each subpixel. */ ++ for (subpixel_pos = 0; subpixel_pos < SUBPIXELLOADCOUNT; subpixel_pos++) { ++ /* Define a temporary set of weights. */ ++ float fSubpixelSet[MAXKERNELSIZE]; ++ ++ /* Init the sum of all weights for the current subpixel. */ ++ float fWeightSum = 0.0f; ++ uint16_t weightSum = 0; ++ short int adjustCount, adjustFrom; ++ short int adjustment; ++ ++ /* Compute weights. */ ++ for (kernel_pos = 0; kernel_pos < MAXKERNELSIZE; kernel_pos++) { ++ /* Determine the current index. */ ++ int index = kernel_pos - padding; ++ ++ /* Pad with zeros. */ ++ if ((index < 0) || (index >= kernel_info->kernelSize)) { ++ fSubpixelSet[kernel_pos] = 0.0f; ++ } else { ++ if (kernel_info->kernelSize == 1) { ++ fSubpixelSet[kernel_pos] = 1.0f; ++ } else { ++ /* Compute the x position for filter function. */ ++ float fX = MATH_Add( ++ MATH_I2Float(index - kernel_half), ++ f_subpixel_offset); ++ fX = MATH_Multiply(fX, f_scale); ++ ++ /* Compute the weight. */ ++ fSubpixelSet[kernel_pos] = dc_sinc_filter(fX, ++ kernel_half); ++ } ++ ++ /* Update the sum of weights. */ ++ fWeightSum = MATH_Add(fWeightSum, ++ fSubpixelSet[kernel_pos]); ++ } ++ } ++ ++ /* Adjust weights so that the sum will be 1.0. */ ++ for (kernel_pos = 0; kernel_pos < MAXKERNELSIZE; kernel_pos++) { ++ /* Normalize the current weight. */ ++ float fWeight = MATH_Divide(fSubpixelSet[kernel_pos], ++ fWeightSum); ++ ++ /* Convert the weight to fixed point and store in the table. */ ++ if (fWeight == 0.0f) ++ kernel_array[kernel_pos] = 0x0000; ++ else if (fWeight >= 1.0f) ++ kernel_array[kernel_pos] = 0x4000; ++ else if (fWeight <= -1.0f) ++ kernel_array[kernel_pos] = 0xC000; ++ else ++ kernel_array[kernel_pos] = ++ (int16_t) MATH_Multiply(fWeight, 16384.0f); ++ weightSum += kernel_array[kernel_pos]; ++ } ++ ++ /* Adjust the fixed point coefficients. */ ++ adjustCount = 0x4000 - weightSum; ++ if (adjustCount < 0) { ++ adjustCount = -adjustCount; ++ adjustment = -1; ++ } else { ++ adjustment = 1; ++ } ++ ++ adjustFrom = (MAXKERNELSIZE - adjustCount) / 2; ++ for (kernel_pos = 0; kernel_pos < adjustCount; kernel_pos++) { ++ range = (MAXKERNELSIZE*subpixel_pos + adjustFrom + kernel_pos) * ++ sizeof(uint16_t); ++ if ((range >= 0) && (range < KERNELTABLESIZE)) ++ kernel_array[adjustFrom + kernel_pos] += adjustment; ++ else ++ DRM_ERROR("%s failed\n", __func__); ++ } ++ ++ kernel_array += MAXKERNELSIZE; ++ ++ /* Advance to the next subpixel. */ ++ f_subpixel_offset = MATH_Add(f_subpixel_offset, -f_subpixel_step); ++ } ++ } while (0); ++ ++ return 0; ++} ++ ++static void phytium_dc_scaling_config(struct drm_crtc *crtc, ++ struct drm_crtc_state *old_state) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct drm_display_mode *mode = &crtc->state->adjusted_mode; ++ struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); ++ int phys_pipe = phytium_crtc->phys_pipe; ++ uint32_t group_offset = priv->dc_reg_base[phys_pipe]; ++ uint32_t scale_factor_x, scale_factor_y, i; ++ uint32_t kernelStates[128]; ++ struct filter_blit_array kernel_info_width; ++ void *tmp = NULL; ++ ++ if (mode->hdisplay != mode->crtc_hdisplay || mode->vdisplay != mode->crtc_vdisplay) { ++ phytium_crtc->src_width = mode->hdisplay; ++ phytium_crtc->src_height = mode->vdisplay; ++ phytium_crtc->dst_width = mode->crtc_hdisplay; ++ phytium_crtc->dst_height = mode->crtc_vdisplay; ++ ++ phytium_crtc->dst_x = (mode->crtc_hdisplay - phytium_crtc->dst_width) / 2; ++ phytium_crtc->dst_y = (mode->crtc_vdisplay - phytium_crtc->dst_height) / 2; ++ ++ scale_factor_x = dc_scaling_get_factor(phytium_crtc->src_width, ++ phytium_crtc->dst_width); ++ scale_factor_y = dc_scaling_get_factor(phytium_crtc->src_height, ++ phytium_crtc->dst_height); ++ if (scale_factor_y > (SCALE_FACTOR_Y_MAX << SCALE_FACTOR_SRC_OFFSET)) ++ scale_factor_y = (SCALE_FACTOR_Y_MAX << SCALE_FACTOR_SRC_OFFSET); ++ ++ phytium_writel_reg(priv, scale_factor_x & SCALE_FACTOR_X_MASK, ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X); ++ phytium_writel_reg(priv, scale_factor_y & SCALE_FACTOR_Y_MASK, ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y); ++ phytium_writel_reg(priv, FRAMEBUFFER_TAP, ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG); ++ ++ tmp = kmalloc(KERNELSTATES, GFP_KERNEL); ++ if (!tmp) { ++ DRM_ERROR("malloc %ld failed\n", KERNELSTATES); ++ return; ++ } ++ ++ memset(&kernel_info_width, 0, sizeof(struct filter_blit_array)); ++ kernel_info_width.kernelStates = tmp; ++ memset(kernel_info_width.kernelStates, 0, KERNELSTATES); ++ kernel_neon_begin(); ++ dc_calculate_sync_table(FRAMEBUFFER_HORIZONTAL_FILTER_TAP, ++ phytium_crtc->src_width, ++ phytium_crtc->dst_width, ++ &kernel_info_width); ++ memset(kernelStates, 0, sizeof(kernelStates)); ++ memcpy(kernelStates, kernel_info_width.kernelStates + 1, KERNELSTATES - 4); ++ kernel_neon_end(); ++ phytium_writel_reg(priv, HORI_FILTER_INDEX, ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX); ++ for (i = 0; i < 128; i++) { ++ phytium_writel_reg(priv, kernelStates[i], ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER); ++ } ++ ++ memset(&kernel_info_width, 0, sizeof(struct filter_blit_array)); ++ kernel_info_width.kernelStates = tmp; ++ memset(kernel_info_width.kernelStates, 0, KERNELSTATES); ++ kernel_neon_begin(); ++ dc_calculate_sync_table(FRAMEBUFFER_FILTER_TAP, phytium_crtc->src_height, ++ phytium_crtc->dst_height, &kernel_info_width); ++ memset(kernelStates, 0, sizeof(kernelStates)); ++ memcpy(kernelStates, kernel_info_width.kernelStates + 1, KERNELSTATES - 4); ++ kernel_neon_end(); ++ phytium_writel_reg(priv, VERT_FILTER_INDEX, ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX); ++ for (i = 0; i < 128; i++) ++ phytium_writel_reg(priv, kernelStates[i], ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER); ++ phytium_writel_reg(priv, INITIALOFFSET, ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET); ++ kfree(tmp); ++ phytium_crtc->scale_enable = true; ++ } else { ++ phytium_crtc->scale_enable = false; ++ } ++} ++ ++static void phytium_crtc_gamma_set(struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); ++ int phys_pipe = phytium_crtc->phys_pipe; ++ uint32_t group_offset = priv->dc_reg_base[phys_pipe]; ++ uint32_t config = 0; ++ struct drm_crtc_state *state = crtc->state; ++ struct drm_color_lut *lut; ++ int i; ++ ++ if (state->gamma_lut) { ++ if (WARN((state->gamma_lut->length/sizeof(struct drm_color_lut) != GAMMA_INDEX_MAX), ++ "gamma size is not match\n")) ++ return; ++ lut = (struct drm_color_lut *)state->gamma_lut->data; ++ for (i = 0; i < GAMMA_INDEX_MAX; i++) { ++ phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); ++ config = ((lut[i].red >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; ++ config |= (((lut[i].green >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); ++ config |= (((lut[i].blue >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); ++ } ++ } ++} ++ ++static void phytium_crtc_gamma_init(struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); ++ int phys_pipe = phytium_crtc->phys_pipe; ++ uint32_t group_offset = priv->dc_reg_base[phys_pipe]; ++ uint32_t config = 0; ++ uint16_t *red, *green, *blue; ++ int i; ++ ++ if (WARN((crtc->gamma_size != GAMMA_INDEX_MAX), "gamma size is not match\n")) ++ return; ++ ++ red = crtc->gamma_store; ++ green = red + crtc->gamma_size; ++ blue = green + crtc->gamma_size; ++ ++ for (i = 0; i < GAMMA_INDEX_MAX; i++) { ++ phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); ++ config = ((*red++ >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; ++ config |= (((*green++ >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); ++ config |= (((*blue++ >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); ++ } ++} ++ ++static void phytium_crtc_destroy(struct drm_crtc *crtc) ++{ ++ struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); ++ ++ drm_crtc_cleanup(crtc); ++ kfree(phytium_crtc); ++} ++ ++struct drm_crtc_state * ++phytium_crtc_atomic_duplicate_state(struct drm_crtc *crtc) ++{ ++ struct phytium_crtc_state *phytium_crtc_state = NULL; ++ ++ phytium_crtc_state = kmemdup(crtc->state, sizeof(*phytium_crtc_state), ++ GFP_KERNEL); ++ if (!phytium_crtc_state) ++ return NULL; ++ __drm_atomic_helper_crtc_duplicate_state(crtc, ++ &phytium_crtc_state->base); ++ ++ return &phytium_crtc_state->base; ++} ++ ++void ++phytium_crtc_atomic_destroy_state(struct drm_crtc *crtc, ++ struct drm_crtc_state *state) ++{ ++ struct phytium_crtc_state *phytium_crtc_state = ++ to_phytium_crtc_state(state); ++ ++ phytium_crtc_state = to_phytium_crtc_state(state); ++ __drm_atomic_helper_crtc_destroy_state(state); ++ kfree(phytium_crtc_state); ++} ++ ++static const struct drm_crtc_funcs phytium_crtc_funcs = { ++ .gamma_set = drm_atomic_helper_legacy_gamma_set, ++ .set_config = drm_atomic_helper_set_config, ++ .destroy = phytium_crtc_destroy, ++ .page_flip = drm_atomic_helper_page_flip, ++ .reset = drm_atomic_helper_crtc_reset, ++ .atomic_duplicate_state = phytium_crtc_atomic_duplicate_state, ++ .atomic_destroy_state = phytium_crtc_atomic_destroy_state, ++}; ++ ++static void ++phytium_crtc_atomic_enable(struct drm_crtc *crtc, ++ struct drm_crtc_state *old_state) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct drm_display_mode *mode = &crtc->state->adjusted_mode; ++ struct drm_atomic_state *state = old_state->state; ++ struct drm_connector_state *new_conn_state; ++ struct drm_connector *conn; ++ struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); ++ int phys_pipe = phytium_crtc->phys_pipe; ++ uint32_t group_offset = priv->dc_reg_base[phys_pipe]; ++ int config = 0, i = 0; ++ ++ for_each_new_connector_in_state(state, conn, new_conn_state, i) { ++ if (new_conn_state->crtc != crtc) ++ continue; ++ ++ switch (conn->display_info.bpc) { ++ case 10: ++ phytium_crtc->bpc = DP_RGB101010; ++ break; ++ case 6: ++ phytium_crtc->bpc = DP_RGB666; ++ break; ++ default: ++ phytium_crtc->bpc = DP_RGB888; ++ break; ++ } ++ } ++ ++ /* config pix clock */ ++ phytium_crtc->dc_hw_config_pix_clock(crtc, mode->clock); ++ ++ phytium_dc_scaling_config(crtc, old_state); ++ config = ((mode->crtc_hdisplay & HDISPLAY_END_MASK) << HDISPLAY_END_SHIFT) ++ | ((mode->crtc_htotal&HDISPLAY_TOTAL_MASK) << HDISPLAY_TOTAL_SHIFT); ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HDISPLAY); ++ config = ((mode->crtc_hsync_start & HSYNC_START_MASK) << HSYNC_START_SHIFT) ++ | ((mode->crtc_hsync_end & HSYNC_END_MASK) << HSYNC_END_SHIFT) ++ | HSYNC_PULSE_ENABLED; ++ config |= (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : HSYNC_NEGATIVE; ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HSYNC); ++ config = ((mode->crtc_vdisplay & VDISPLAY_END_MASK) << VDISPLAY_END_SHIFT) ++ | ((mode->crtc_vtotal & VDISPLAY_TOTAL_MASK) << VDISPLAY_TOTAL_SHIFT); ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VDISPLAY); ++ config = ((mode->crtc_vsync_start & VSYNC_START_MASK) << VSYNC_START_SHIFT) ++ | ((mode->crtc_vsync_end & VSYNC_END_MASK) << VSYNC_END_SHIFT) ++ | VSYNC_PULSE_ENABLED; ++ config |= (mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : VSYNC_NEGATIVE; ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VSYNC); ++ config = PANEL_DATAENABLE_ENABLE | PANEL_DATA_ENABLE | PANEL_CLOCK_ENABLE; ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_PANEL_CONFIG); ++ config = phytium_crtc->bpc | OUTPUT_DP; ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_DP_CONFIG); ++ ++ config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); ++ ++ if (crtc->state->active) ++ config |= FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET; ++ else ++ config &= (~(FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET)); ++ ++ if (phytium_crtc->scale_enable) ++ config |= FRAMEBUFFER_SCALE_ENABLE; ++ else ++ config &= (~FRAMEBUFFER_SCALE_ENABLE); ++ ++ config |= FRAMEBUFFER_GAMMA_ENABLE; ++ ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); ++ drm_crtc_vblank_on(crtc); ++} ++ ++static void ++phytium_crtc_atomic_disable(struct drm_crtc *crtc, ++ struct drm_crtc_state *old_state) ++{ ++ struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); ++ ++ drm_crtc_vblank_off(crtc); ++ phytium_crtc->dc_hw_disable(crtc); ++} ++ ++static void phytium_crtc_update_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, ++ const struct drm_display_mode *native_mode) ++{ ++ if (native_mode->clock == drm_mode->clock && ++ native_mode->htotal == drm_mode->htotal && ++ native_mode->vtotal == drm_mode->vtotal) { ++ drm_mode->crtc_hdisplay = native_mode->crtc_hdisplay; ++ drm_mode->crtc_vdisplay = native_mode->crtc_vdisplay; ++ drm_mode->crtc_clock = native_mode->crtc_clock; ++ drm_mode->crtc_hblank_start = native_mode->crtc_hblank_start; ++ drm_mode->crtc_hblank_end = native_mode->crtc_hblank_end; ++ drm_mode->crtc_hsync_start = native_mode->crtc_hsync_start; ++ drm_mode->crtc_hsync_end = native_mode->crtc_hsync_end; ++ drm_mode->crtc_htotal = native_mode->crtc_htotal; ++ drm_mode->crtc_hskew = native_mode->crtc_hskew; ++ drm_mode->crtc_vblank_start = native_mode->crtc_vblank_start; ++ drm_mode->crtc_vblank_end = native_mode->crtc_vblank_end; ++ drm_mode->crtc_vsync_start = native_mode->crtc_vsync_start; ++ drm_mode->crtc_vsync_end = native_mode->crtc_vsync_end; ++ drm_mode->crtc_vtotal = native_mode->crtc_vtotal; ++ } ++} ++ ++static int ++phytium_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) ++{ ++ struct drm_plane_state *new_plane_state = NULL; ++ int ret = 0; ++ struct drm_atomic_state *state = crtc_state->state; ++ struct drm_connector *connector; ++ struct drm_connector_state *new_con_state; ++ uint32_t i; ++ struct phytium_dp_device *phytium_dp = NULL; ++ ++ for_each_new_connector_in_state(state, connector, new_con_state, i) { ++ if (new_con_state->crtc == crtc) { ++ phytium_dp = connector_to_dp_device(connector); ++ break; ++ } ++ } ++ if (phytium_dp) ++ phytium_crtc_update_timing_for_drm_display_mode(&crtc_state->adjusted_mode, ++ &phytium_dp->native_mode); ++ ++ new_plane_state = drm_atomic_get_new_plane_state(crtc_state->state, ++ crtc->primary); ++ if (crtc_state->enable && new_plane_state && !new_plane_state->crtc) { ++ ret = -EINVAL; ++ goto fail; ++ } ++ ++ return 0; ++fail: ++ return ret; ++} ++ ++static void ++phytium_crtc_atomic_begin(struct drm_crtc *crtc, ++ struct drm_crtc_state *old_crtc_state) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); ++ int phys_pipe = phytium_crtc->phys_pipe, config; ++ uint32_t group_offset = priv->dc_reg_base[phys_pipe]; ++ ++ config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); ++ if (config & FRAMEBUFFER_RESET) { ++ phytium_writel_reg(priv, config | FRAMEBUFFER_VALID_PENDING, ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); ++ } ++} ++ ++static void phytium_crtc_atomic_flush(struct drm_crtc *crtc, ++ struct drm_crtc_state *old_crtc_state) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); ++ struct phytium_crtc_state *phytium_crtc_state = NULL; ++ int phys_pipe = phytium_crtc->phys_pipe, config; ++ uint32_t group_offset = priv->dc_reg_base[phys_pipe]; ++ ++ DRM_DEBUG_KMS("crtc->state active:%d enable:%d\n", ++ crtc->state->active, crtc->state->enable); ++ phytium_crtc_state = to_phytium_crtc_state(crtc->state); ++ ++ if (crtc->state->color_mgmt_changed) ++ phytium_crtc_gamma_set(crtc); ++ ++ config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); ++ phytium_writel_reg(priv, config&(~FRAMEBUFFER_VALID_PENDING), ++ group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); ++ ++ if (crtc->state->event) { ++ DRM_DEBUG_KMS("vblank->refcount:%d\n", ++ atomic_read(&dev->vblank[0].refcount)); ++ spin_lock_irq(&dev->event_lock); ++ if (drm_crtc_vblank_get(crtc) == 0) ++ drm_crtc_arm_vblank_event(crtc, crtc->state->event); ++ else ++ drm_crtc_send_vblank_event(crtc, crtc->state->event); ++ crtc->state->event = NULL; ++ spin_unlock_irq(&dev->event_lock); ++ } ++} ++ ++static enum drm_mode_status ++phytium_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ ++ if (mode->crtc_clock > priv->info.crtc_clock_max) ++ return MODE_CLOCK_HIGH; ++ ++ if (mode->hdisplay > priv->info.hdisplay_max) ++ return MODE_BAD_HVALUE; ++ ++ if (mode->vdisplay > priv->info.vdisplay_max) ++ return MODE_BAD_VVALUE; ++ ++ if (mode->flags & DRM_MODE_FLAG_INTERLACE) ++ return MODE_NO_INTERLACE; ++ ++ return MODE_OK; ++} ++ ++static const struct drm_crtc_helper_funcs phytium_crtc_helper_funcs = { ++ .mode_valid = phytium_crtc_mode_valid, ++ .atomic_check = phytium_crtc_atomic_check, ++ .atomic_begin = phytium_crtc_atomic_begin, ++ .atomic_flush = phytium_crtc_atomic_flush, ++ .atomic_enable = phytium_crtc_atomic_enable, ++ .atomic_disable = phytium_crtc_atomic_disable, ++}; ++ ++void phytium_crtc_resume(struct drm_device *drm_dev) ++{ ++ struct drm_crtc *crtc; ++ ++ drm_for_each_crtc(crtc, drm_dev) { ++ phytium_crtc_gamma_init(crtc); ++ } ++} ++ ++int phytium_crtc_init(struct drm_device *dev, int phys_pipe) ++{ ++ struct phytium_crtc *phytium_crtc; ++ struct phytium_crtc_state *phytium_crtc_state; ++ struct phytium_plane *phytium_primary_plane = NULL; ++ struct phytium_plane *phytium_cursor_plane = NULL; ++ struct phytium_display_private *priv = dev->dev_private; ++ int ret; ++ ++ phytium_crtc = kzalloc(sizeof(*phytium_crtc), GFP_KERNEL); ++ if (!phytium_crtc) { ++ ret = -ENOMEM; ++ goto failed_malloc_crtc; ++ } ++ ++ phytium_crtc_state = kzalloc(sizeof(*phytium_crtc_state), GFP_KERNEL); ++ if (!phytium_crtc_state) { ++ ret = -ENOMEM; ++ goto failed_malloc_crtc_state; ++ } ++ ++ phytium_crtc_state->base.crtc = &phytium_crtc->base; ++ phytium_crtc->base.state = &phytium_crtc_state->base; ++ phytium_crtc->phys_pipe = phys_pipe; ++ ++ if (IS_X100(priv)) { ++ phytium_crtc->dc_hw_config_pix_clock = x100_dc_hw_config_pix_clock; ++ phytium_crtc->dc_hw_disable = x100_dc_hw_disable; ++ priv->dc_reg_base[phys_pipe] = X100_DC_BASE(phys_pipe); ++ priv->dcreq_reg_base[phys_pipe] = X100_DCREQ_BASE(phys_pipe); ++ priv->address_transform_base = X100_ADDRESS_TRANSFORM_BASE; ++ } ++ ++ phytium_primary_plane = phytium_primary_plane_create(dev, phys_pipe); ++ if (IS_ERR(phytium_primary_plane)) { ++ ret = PTR_ERR(phytium_primary_plane); ++ DRM_ERROR("create primary plane failed, phys_pipe(%d)\n", phys_pipe); ++ goto failed_create_primary; ++ } ++ ++ phytium_cursor_plane = phytium_cursor_plane_create(dev, phys_pipe); ++ if (IS_ERR(phytium_cursor_plane)) { ++ ret = PTR_ERR(phytium_cursor_plane); ++ DRM_ERROR("create cursor plane failed, phys_pipe(%d)\n", phys_pipe); ++ goto failed_create_cursor; ++ } ++ ++ ret = drm_crtc_init_with_planes(dev, &phytium_crtc->base, ++ &phytium_primary_plane->base, ++ &phytium_cursor_plane->base, ++ &phytium_crtc_funcs, ++ "phys_pipe %d", phys_pipe); ++ ++ if (ret) { ++ DRM_ERROR("init crtc with plane failed, phys_pipe(%d)\n", phys_pipe); ++ goto failed_crtc_init; ++ } ++ drm_crtc_helper_add(&phytium_crtc->base, &phytium_crtc_helper_funcs); ++ drm_crtc_vblank_reset(&phytium_crtc->base); ++ drm_mode_crtc_set_gamma_size(&phytium_crtc->base, GAMMA_INDEX_MAX); ++ drm_crtc_enable_color_mgmt(&phytium_crtc->base, 0, false, GAMMA_INDEX_MAX); ++ phytium_crtc_gamma_init(&phytium_crtc->base); ++ ++ return 0; ++ ++failed_crtc_init: ++failed_create_cursor: ++ /* drm_mode_config_cleanup() will free any crtcs/planes already initialized */ ++failed_create_primary: ++ kfree(phytium_crtc_state); ++failed_malloc_crtc_state: ++ kfree(phytium_crtc); ++failed_malloc_crtc: ++ return ret; ++} +diff --git a/drivers/gpu/drm/phytium/phytium_crtc.h b/drivers/gpu/drm/phytium/phytium_crtc.h +new file mode 100644 +index 000000000000..125a99b42660 +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_crtc.h +@@ -0,0 +1,38 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef __PHYTIUM_CRTC_H__ ++#define __PHYTIUM_CRTC_H__ ++ ++struct phytium_crtc { ++ struct drm_crtc base; ++ int phys_pipe; ++ unsigned int bpc; ++ ++ /* scale */ ++ uint32_t src_width; ++ uint32_t src_height; ++ uint32_t dst_width; ++ uint32_t dst_height; ++ uint32_t dst_x; ++ uint32_t dst_y; ++ bool scale_enable; ++ bool reserve[3]; ++ ++ void (*dc_hw_config_pix_clock)(struct drm_crtc *crtc, int clock); ++ void (*dc_hw_disable)(struct drm_crtc *crtc); ++}; ++ ++struct phytium_crtc_state { ++ struct drm_crtc_state base; ++}; ++ ++#define to_phytium_crtc(x) container_of(x, struct phytium_crtc, base) ++#define to_phytium_crtc_state(x) container_of(x, struct phytium_crtc_state, base) ++ ++void phytium_crtc_resume(struct drm_device *drm_dev); ++int phytium_crtc_init(struct drm_device *dev, int pipe); ++#endif /* __PHYTIUM_CRTC_H__ */ +diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.c b/drivers/gpu/drm/phytium/phytium_debugfs.c +new file mode 100644 +index 000000000000..b38deafcf874 +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_debugfs.c +@@ -0,0 +1,400 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#include ++#include ++ ++#include "phytium_display_drv.h" ++#include "phytium_dp.h" ++#include "phytium_reg.h" ++ ++static ssize_t ++phytium_dp_register_write(struct file *filp, ++ const char __user *ubuf, ++ size_t len, ++ loff_t *ppos) ++{ ++ char tmp[16]; ++ ++ if (len >= sizeof(tmp)) ++ return -EINVAL; ++ ++ memset(tmp, 0, sizeof(tmp)); ++ if (copy_from_user(tmp, ubuf, len)) ++ return -EFAULT; ++ tmp[len] = '\0'; ++ ++ return len; ++} ++ ++static int phytium_dp_register_show(struct seq_file *m, void *data) ++{ ++ struct drm_connector *connector = m->private; ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_M_VID, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_M_VID)); ++ seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_N_VID, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_N_VID)); ++ seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_TRANSFER_UNIT_SIZE, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE)); ++ seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_DATA_COUNT, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_DATA_COUNT)); ++ seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HTOTAL, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL)); ++ seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HRES, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HRES)); ++ seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSWIDTH, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH)); ++ seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSTART, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSTART)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VTOTAL, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VRES, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VRES)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSWIDTH, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSTART, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSTART)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_POLARITY, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC0, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC1, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_USER_SYNC_POLARITY, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_VIDEO_STREAM_ENABLE, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SECONDARY_STREAM_ENABLE, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE)); ++ seq_puts(m, "audio:\n"); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_INPUT_SELECT, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DIRECT_CLKDIV, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_COUNT, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_MAP, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DATA_WINDOW, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_CATEGORY_CODE, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_MAUD, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_MAUD)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_NAUD, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_NAUD)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CLOCK_MODE, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_SOURCE_FORMAT, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY)); ++ seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_AUDIO_ENABLE, ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE)); ++ ++ return 0; ++} ++ ++static int phytium_dp_register_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, phytium_dp_register_show, inode->i_private); ++} ++ ++static const struct file_operations phytium_dp_register_fops = { ++ .owner = THIS_MODULE, ++ .open = phytium_dp_register_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++ .write = phytium_dp_register_write, ++}; ++ ++static ssize_t ++phytium_dp_trigger_train_fail_write(struct file *filp, ++ const char __user *ubuf, ++ size_t len, ++ loff_t *ppos) ++{ ++ struct seq_file *m = filp->private_data; ++ struct drm_connector *connector = m->private; ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ char tmp[16]; ++ ++ if (len >= sizeof(tmp)) ++ return -EINVAL; ++ ++ memset(tmp, 0, sizeof(tmp)); ++ if (copy_from_user(tmp, ubuf, len)) ++ return -EFAULT; ++ tmp[len] = '\0'; ++ ++ if (kstrtouint(tmp, 10, &phytium_dp->trigger_train_fail) != 0) ++ return -EINVAL; ++ ++ return len; ++} ++ ++static int phytium_dp_trigger_train_fail_show(struct seq_file *m, void *data) ++{ ++ struct drm_connector *connector = m->private; ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ ++ seq_printf(m, "trigger_train_fail: %d\n", phytium_dp->trigger_train_fail); ++ seq_printf(m, "train_retry_count: %d\n", phytium_dp->train_retry_count); ++ ++ return 0; ++} ++ ++static int phytium_dp_trigger_train_fail_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, phytium_dp_trigger_train_fail_show, inode->i_private); ++} ++ ++static const struct file_operations phytium_dp_trigger_train_fail_fops = { ++ .owner = THIS_MODULE, ++ .open = phytium_dp_trigger_train_fail_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++ .write = phytium_dp_trigger_train_fail_write, ++}; ++ ++static int phytium_edp_backlight_show(struct seq_file *m, void *data) ++{ ++ struct drm_connector *connector = m->private; ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ ++ if (!phytium_dp->is_edp) ++ return -ENODEV; ++ ++ mutex_lock(&phytium_dp->panel.panel_lock); ++ seq_printf(m, "backlight: %s\n", phytium_dp->panel.backlight_enabled?"enabled":"disabled"); ++ mutex_unlock(&phytium_dp->panel.panel_lock); ++ ++ return 0; ++} ++ ++static int phytium_edp_backlight_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, phytium_edp_backlight_show, inode->i_private); ++} ++ ++static const struct file_operations phytium_edp_backlight_fops = { ++ .owner = THIS_MODULE, ++ .open = phytium_edp_backlight_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static int phytium_edp_power_show(struct seq_file *m, void *data) ++{ ++ struct drm_connector *connector = m->private; ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ ++ if (!phytium_dp->is_edp) ++ return -ENODEV; ++ ++ mutex_lock(&phytium_dp->panel.panel_lock); ++ seq_printf(m, "power: %s\n", phytium_dp->panel.power_enabled?"enabled":"disabled"); ++ mutex_unlock(&phytium_dp->panel.panel_lock); ++ ++ return 0; ++} ++ ++static int phytium_edp_power_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, phytium_edp_power_show, inode->i_private); ++} ++ ++static const struct file_operations phytium_edp_power_fops = { ++ .owner = THIS_MODULE, ++ .open = phytium_edp_power_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++struct dpcd_block { ++ /* DPCD dump start address. */ ++ unsigned int offset; ++ /* DPCD dump end address, inclusive. If unset, .size will be used. */ ++ unsigned int end; ++ /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ ++ size_t size; ++ /* Only valid for eDP. */ ++ bool edp; ++}; ++ ++static const struct dpcd_block phytium_dpcd_debug[] = { ++ { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, ++ { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, ++ { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, ++ { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, ++ { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, ++ { .offset = DP_SET_POWER }, ++ { .offset = DP_EDP_DPCD_REV }, ++ { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, ++ { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, ++ { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, ++ { .offset = DP_DEVICE_SERVICE_IRQ_VECTOR, .size = 1 }, ++ { .offset = DP_TEST_REQUEST, .end = DP_TEST_PATTERN }, ++}; ++ ++static int phytium_dpcd_show(struct seq_file *m, void *data) ++{ ++ struct drm_connector *connector = m->private; ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ uint8_t buf[16], i; ++ ssize_t err; ++ ++ if (connector->status != connector_status_connected) ++ return -ENODEV; ++ ++ for (i = 0; i < ARRAY_SIZE(phytium_dpcd_debug); i++) { ++ const struct dpcd_block *b = &phytium_dpcd_debug[i]; ++ size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); ++ ++ if (WARN_ON(size > sizeof(buf))) ++ continue; ++ ++ err = drm_dp_dpcd_read(&phytium_dp->aux, b->offset, buf, size); ++ if (err <= 0) { ++ DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", ++ size, b->offset, err); ++ continue; ++ } ++ ++ seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); ++ } ++ ++ return 0; ++} ++ ++static int phytium_dpcd_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, phytium_dpcd_show, inode->i_private); ++} ++ ++static const struct file_operations phytium_dpcd_fops = { ++ .owner = THIS_MODULE, ++ .open = phytium_dpcd_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static ssize_t ++phytium_dp_state_write(struct file *filp, ++ const char __user *ubuf, ++ size_t len, ++ loff_t *ppos) ++{ ++ char tmp[16]; ++ ++ if (len >= sizeof(tmp)) ++ return -EINVAL; ++ ++ memset(tmp, 0, sizeof(tmp)); ++ if (copy_from_user(tmp, ubuf, len)) ++ return -EFAULT; ++ tmp[len] = '\0'; ++ ++ return len; ++} ++ ++static int phytium_dp_state_show(struct seq_file *m, void *data) ++{ ++ struct drm_connector *connector = m->private; ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ ++ seq_printf(m, "port number: %d\n", phytium_dp->port); ++ seq_printf(m, "source_max_lane_count: %d\n", phytium_dp->source_max_lane_count); ++ seq_printf(m, "max_source_rates: %d\n", ++ phytium_dp->source_rates[phytium_dp->num_source_rates-1]); ++ if (connector->status == connector_status_connected) { ++ seq_printf(m, "sink_max_lane_count: %d\n", phytium_dp->sink_max_lane_count); ++ seq_printf(m, "max_sink_rates: %d\n", ++ phytium_dp->sink_rates[phytium_dp->num_sink_rates-1]); ++ seq_printf(m, "link_rate: %d\n", phytium_dp->link_rate); ++ seq_printf(m, "link_lane_count: %d\n", phytium_dp->link_lane_count); ++ seq_printf(m, "train_set[0]: %d\n", phytium_dp->train_set[0]); ++ seq_printf(m, "has_audio: %s\n", phytium_dp->has_audio?"yes":"no"); ++ } ++ ++ return 0; ++} ++ ++static int phytium_dp_state_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, phytium_dp_state_show, inode->i_private); ++} ++ ++static const struct file_operations phytium_dp_state_fops = { ++ .owner = THIS_MODULE, ++ .open = phytium_dp_state_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++ .write = phytium_dp_state_write, ++}; ++ ++static const struct phytium_debugfs_connector_files { ++ const char *name; ++ const struct file_operations *fops; ++} phytium_debugfs_connector_files[] = { ++ {"dp_state", &phytium_dp_state_fops}, ++ {"dpcd", &phytium_dpcd_fops}, ++ {"dp_register", &phytium_dp_register_fops}, ++ {"dp_trigger_train_fail", &phytium_dp_trigger_train_fail_fops}, ++}; ++ ++static const struct phytium_debugfs_connector_files phytium_edp_debugfs_connector_files[] = { ++ {"edp_power", &phytium_edp_power_fops}, ++ {"edp_backlight", &phytium_edp_backlight_fops}, ++}; ++ ++int phytium_debugfs_connector_add(struct drm_connector *connector) ++{ ++ struct dentry *root = connector->debugfs_entry; ++ struct dentry *ent; ++ int i; ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ ++ if (!root) ++ return -ENODEV; ++ ++ for (i = 0; i < ARRAY_SIZE(phytium_debugfs_connector_files); i++) { ++ ent = debugfs_create_file(phytium_debugfs_connector_files[i].name, ++ 0644, ++ root, ++ connector, ++ phytium_debugfs_connector_files[i].fops); ++ if (!ent) ++ return -ENOMEM; ++ } ++ ++ if (phytium_dp->is_edp) ++ for (i = 0; i < ARRAY_SIZE(phytium_edp_debugfs_connector_files); i++) { ++ ent = debugfs_create_file(phytium_edp_debugfs_connector_files[i].name, ++ 0644, ++ root, ++ connector, ++ phytium_edp_debugfs_connector_files[i].fops); ++ if (!ent) ++ return -ENOMEM; ++ } ++ ++ return 0; ++} +diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.h b/drivers/gpu/drm/phytium/phytium_debugfs.h +new file mode 100644 +index 000000000000..37ca93c18821 +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_debugfs.h +@@ -0,0 +1,13 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef __PHYTIUM_DEBUGFS_H__ ++#define __PHYTIUM_DEBUGFS_H__ ++ ++int phytium_debugfs_register(struct phytium_display_private *priv); ++int phytium_debugfs_connector_add(struct drm_connector *connector); ++ ++#endif /* __PHYTIUM_DEBUGFS_H__ */ +diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.c b/drivers/gpu/drm/phytium/phytium_display_drv.c +new file mode 100644 +index 000000000000..49a66740388f +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_display_drv.c +@@ -0,0 +1,461 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "phytium_display_drv.h" ++#include "phytium_plane.h" ++#include "phytium_crtc.h" ++#include "phytium_dp.h" ++#include "phytium_gem.h" ++#include "phytium_fb.h" ++#include "phytium_fbdev.h" ++#include "phytium_reg.h" ++#include "phytium_pci.h" ++ ++int dc_fake_mode_enable; ++module_param(dc_fake_mode_enable, int, 0644); ++MODULE_PARM_DESC(dc_fake_mode_enable, "Enable DC fake mode (0-disabled; 1-enabled; default-0)"); ++ ++int dc_fast_training_check = 1; ++module_param(dc_fast_training_check, int, 0644); ++MODULE_PARM_DESC(dc_fast_training_check, "Check dp fast training (0-disabled; 1-enabled; default-1)"); ++ ++int num_source_rates = 4; ++module_param(num_source_rates, int, 0644); ++MODULE_PARM_DESC(num_source_rates, "set the source max rates (1-1.62Gbps; 2-2.7Gbps; 3-5.4Gbps; 4-8.1Gbps; default-4)"); ++ ++int source_max_lane_count = 4; ++module_param(source_max_lane_count, int, 0644); ++MODULE_PARM_DESC(source_max_lane_count, "set the source lane count (1-1lane; 2-2lane; 4-4lane; default-4)"); ++ ++int link_dynamic_adjust; ++module_param(link_dynamic_adjust, int, 0644); ++MODULE_PARM_DESC(link_dynamic_adjust, "dynamic select the train pamameter according to the display mode (0-disabled; 1-enabled; default-1)"); ++ ++int phytium_wait_cmd_done(struct phytium_display_private *priv, ++ uint32_t register_offset, ++ uint32_t request_bit, ++ uint32_t reply_bit) ++{ ++ int timeout = 500, config = 0, ret = 0; ++ ++ do { ++ mdelay(1); ++ timeout--; ++ config = phytium_readl_reg(priv, 0, register_offset); ++ } while ((!(config & reply_bit)) && timeout); ++ ++ phytium_writel_reg(priv, config & (~request_bit), 0, register_offset); ++ ++ if (timeout == 0) { ++ DRM_ERROR("wait cmd reply timeout\n"); ++ ret = -EBUSY; ++ } else { ++ timeout = 500; ++ do { ++ mdelay(1); ++ timeout--; ++ config = phytium_readl_reg(priv, 0, register_offset); ++ } while ((config & reply_bit) && timeout); ++ if (timeout == 0) { ++ DRM_ERROR("clear cmd timeout\n"); ++ ret = -EBUSY; ++ } ++ } ++ mdelay(5); ++ ++ return ret; ++} ++ ++static void phytium_irq_preinstall(struct drm_device *dev) ++{ ++ struct phytium_display_private *priv = dev->dev_private; ++ int i, status; ++ ++ for_each_pipe_masked(priv, i) { ++ status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); ++ phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); ++ } ++} ++ ++static void phytium_irq_uninstall(struct drm_device *dev) ++{ ++ struct phytium_display_private *priv = dev->dev_private; ++ int i, status; ++ ++ for_each_pipe_masked(priv, i) { ++ status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); ++ phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); ++ } ++} ++ ++static irqreturn_t phytium_display_irq_handler(int irq, void *data) ++{ ++ struct drm_device *dev = data; ++ struct phytium_display_private *priv = dev->dev_private; ++ bool enabled = 0; ++ int i = 0, virt_pipe = 0; ++ irqreturn_t ret = IRQ_NONE, ret1 = IRQ_NONE; ++ ++ for_each_pipe_masked(priv, i) { ++ enabled = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); ++ if (enabled & INT_STATUS) { ++ virt_pipe = phytium_get_virt_pipe(priv, i); ++ if (virt_pipe < 0) ++ return IRQ_NONE; ++ drm_handle_vblank(dev, virt_pipe); ++ ret = IRQ_HANDLED; ++ if (priv->dc_hw_clear_msi_irq) ++ priv->dc_hw_clear_msi_irq(priv, i); ++ } ++ } ++ ++ ret1 = phytium_dp_hpd_irq_handler(priv); ++ if (ret == IRQ_HANDLED || ret1 == IRQ_HANDLED) ++ return IRQ_HANDLED; ++ ++ return IRQ_NONE; ++} ++ ++static int phytium_enable_vblank(struct drm_device *dev, unsigned int virt_pipe) ++{ ++ struct phytium_display_private *priv = dev->dev_private; ++ int phys_pipe; ++ ++ phys_pipe = phytium_get_phys_pipe(priv, virt_pipe); ++ if (phys_pipe < 0) ++ return phys_pipe; ++ ++ phytium_writel_reg(priv, INT_ENABLE, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_INT_ENABLE); ++ ++ return 0; ++} ++ ++static void phytium_disable_vblank(struct drm_device *dev, unsigned int virt_pipe) ++{ ++ struct phytium_display_private *priv = dev->dev_private; ++ int phys_pipe; ++ ++ phys_pipe = phytium_get_phys_pipe(priv, virt_pipe); ++ if (phys_pipe >= 0) ++ phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[phys_pipe], ++ PHYTIUM_DC_INT_ENABLE); ++} ++ ++static const struct drm_mode_config_funcs phytium_mode_funcs = { ++ .fb_create = phytium_fb_create, ++ .output_poll_changed = drm_fb_helper_output_poll_changed, ++ .atomic_check = drm_atomic_helper_check, ++ .atomic_commit = drm_atomic_helper_commit, ++}; ++ ++static void phytium_atomic_commit_tail(struct drm_atomic_state *state) ++{ ++ struct drm_device *dev = state->dev; ++ ++ drm_atomic_helper_commit_modeset_disables(dev, state); ++ drm_atomic_helper_commit_planes(dev, state, false); ++ drm_atomic_helper_commit_modeset_enables(dev, state); ++ drm_atomic_helper_commit_hw_done(state); ++ drm_atomic_helper_wait_for_flip_done(dev, state); ++ drm_atomic_helper_cleanup_planes(dev, state); ++} ++ ++static struct drm_mode_config_helper_funcs phytium_mode_config_helpers = { ++ .atomic_commit_tail = phytium_atomic_commit_tail, ++}; ++ ++static int phytium_modeset_init(struct drm_device *dev) ++{ ++ struct phytium_display_private *priv = dev->dev_private; ++ int i = 0, ret; ++ ++ drm_mode_config_init(dev); ++ dev->mode_config.min_width = 0; ++ dev->mode_config.min_height = 0; ++ dev->mode_config.max_width = 16384; ++ dev->mode_config.max_height = 16384; ++ dev->mode_config.cursor_width = 32; ++ dev->mode_config.cursor_height = 32; ++ ++ dev->mode_config.preferred_depth = 24; ++ dev->mode_config.prefer_shadow = 1; ++ dev->mode_config.allow_fb_modifiers = true; ++ ++ dev->mode_config.funcs = &phytium_mode_funcs; ++ dev->mode_config.helper_private = &phytium_mode_config_helpers; ++ ++ for_each_pipe_masked(priv, i) { ++ ret = phytium_crtc_init(dev, i); ++ if (ret) { ++ DRM_ERROR("phytium_crtc_init(pipe %d) return failed\n", i); ++ goto failed_crtc_init; ++ } ++ } ++ ++ for_each_pipe_masked(priv, i) { ++ ret = phytium_dp_init(dev, i); ++ if (ret) { ++ DRM_ERROR("phytium_dp_init(pipe %d) return failed\n", i); ++ goto failed_dp_init; ++ } ++ } ++ ++ drm_mode_config_reset(dev); ++ ++ return 0; ++failed_dp_init: ++failed_crtc_init: ++ drm_mode_config_cleanup(dev); ++ return ret; ++} ++ ++int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe) ++{ ++ int i = 0; ++ int virt_pipe = 0; ++ ++ for_each_pipe_masked(priv, i) { ++ if (i != phys_pipe) ++ virt_pipe++; ++ else ++ return virt_pipe; ++ } ++ ++ DRM_ERROR("%s %d failed\n", __func__, phys_pipe); ++ return -EINVAL; ++} ++ ++int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe) ++{ ++ int i = 0; ++ int tmp = 0; ++ ++ for_each_pipe_masked(priv, i) { ++ if (tmp != virt_pipe) ++ tmp++; ++ else ++ return i; ++ } ++ ++ DRM_ERROR("%s %d failed\n", __func__, virt_pipe); ++ return -EINVAL; ++} ++ ++static int phytium_display_load(struct drm_device *dev, unsigned long flags) ++{ ++ struct phytium_display_private *priv = dev->dev_private; ++ int ret = 0; ++ ++ ret = drm_vblank_init(dev, priv->info.num_pipes); ++ if (ret) { ++ DRM_ERROR("vblank init failed\n"); ++ goto failed_vblank_init; ++ } ++ ++ ret = phytium_modeset_init(dev); ++ if (ret) { ++ DRM_ERROR("phytium_modeset_init failed\n"); ++ goto failed_modeset_init; ++ } ++ ++ if (priv->vram_support) ++ priv->vram_hw_init(priv); ++ ++ ret = drm_irq_install(dev, priv->irq); ++ if (ret) { ++ DRM_ERROR("install irq failed\n"); ++ goto failed_irq_install; ++ } ++ ++ ret = phytium_drm_fbdev_init(dev); ++ if (ret) ++ DRM_ERROR("failed to init dev\n"); ++ ++ return ret; ++ ++failed_irq_install: ++ drm_mode_config_cleanup(dev); ++failed_modeset_init: ++failed_vblank_init: ++ return ret; ++} ++ ++static void phytium_display_unload(struct drm_device *dev) ++{ ++ phytium_drm_fbdev_fini(dev); ++ drm_irq_uninstall(dev); ++ drm_mode_config_cleanup(dev); ++} ++ ++static const struct vm_operations_struct phytium_vm_ops = { ++ .open = drm_gem_vm_open, ++ .close = drm_gem_vm_close, ++}; ++ ++static const struct drm_ioctl_desc phytium_ioctls[] = { ++ /* for test, none so far */ ++}; ++ ++static const struct file_operations phytium_drm_driver_fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .unlocked_ioctl = drm_ioctl, ++ .compat_ioctl = drm_compat_ioctl, ++ .poll = drm_poll, ++ .read = drm_read, ++ .llseek = no_llseek, ++ .mmap = phytium_gem_mmap, ++}; ++ ++struct drm_driver phytium_display_drm_driver = { ++ .driver_features = DRIVER_HAVE_IRQ | ++ DRIVER_IRQ_SHARED | ++ DRIVER_PRIME | ++ DRIVER_MODESET | ++ DRIVER_ATOMIC | ++ DRIVER_GEM, ++ .load = phytium_display_load, ++ .unload = phytium_display_unload, ++ .lastclose = drm_fb_helper_lastclose, ++ .irq_handler = phytium_display_irq_handler, ++ .irq_preinstall = phytium_irq_preinstall, ++ .irq_uninstall = phytium_irq_uninstall, ++ .enable_vblank = phytium_enable_vblank, ++ .disable_vblank = phytium_disable_vblank, ++ .gem_free_object = phytium_gem_free_object, ++ .gem_vm_ops = &phytium_vm_ops, ++ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, ++ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, ++ .gem_prime_export = drm_gem_prime_export, ++ .gem_prime_import = drm_gem_prime_import, ++ .gem_prime_get_sg_table = phytium_gem_prime_get_sg_table, ++ .gem_prime_import_sg_table = phytium_gem_prime_import_sg_table, ++ .gem_prime_vmap = phytium_gem_prime_vmap, ++ .gem_prime_vunmap = phytium_gem_prime_vunmap, ++ .gem_prime_mmap = phytium_gem_prime_mmap, ++ .dumb_create = phytium_gem_dumb_create, ++ .dumb_destroy = phytium_gem_dumb_destroy, ++ .ioctls = phytium_ioctls, ++ .num_ioctls = ARRAY_SIZE(phytium_ioctls), ++ .fops = &phytium_drm_driver_fops, ++ .name = DRV_NAME, ++ .desc = DRV_DESC, ++ .date = DRV_DATE, ++ .major = DRV_MAJOR, ++ .minor = DRV_MINOR, ++}; ++ ++static void phytium_display_shutdown(struct drm_device *dev) ++{ ++ drm_atomic_helper_shutdown(dev); ++} ++ ++static int phytium_display_pm_suspend(struct drm_device *dev) ++{ ++ struct drm_atomic_state *state; ++ struct phytium_display_private *priv = dev->dev_private; ++ int ret, ret1; ++ ++ phytium_dp_hpd_irq_setup(dev, false); ++ cancel_work_sync(&priv->hotplug_work); ++ drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); ++ state = drm_atomic_helper_suspend(dev); ++ if (IS_ERR(state)) { ++ DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", PTR_ERR(state)); ++ ret = PTR_ERR(state); ++ goto suspend_failed; ++ } ++ dev->mode_config.suspend_state = state; ++ ret = phytium_gem_suspend(dev); ++ if (ret) { ++ DRM_ERROR("phytium_gem_suspend failed: %d\n", ret); ++ goto gem_suspend_failed; ++ } ++ ++ return 0; ++ ++gem_suspend_failed: ++ ret1 = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); ++ if (ret1) ++ DRM_ERROR("Failed to resume (%d)\n", ret1); ++ dev->mode_config.suspend_state = NULL; ++suspend_failed: ++ drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); ++ phytium_dp_hpd_irq_setup(dev, true); ++ ++ return ret; ++} ++ ++static int phytium_display_pm_resume(struct drm_device *dev) ++{ ++ struct phytium_display_private *priv = dev->dev_private; ++ int ret = 0; ++ ++ if (WARN_ON(!dev->mode_config.suspend_state)) ++ return -EINVAL; ++ ++ ret = phytium_dp_resume(dev); ++ if (ret) ++ return -EIO; ++ ++ phytium_crtc_resume(dev); ++ phytium_gem_resume(dev); ++ ++ if (priv->vram_support) ++ priv->vram_hw_init(priv); ++ ++ ret = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); ++ if (ret) { ++ DRM_ERROR("Failed to resume (%d)\n", ret); ++ return ret; ++ } ++ ++ dev->mode_config.suspend_state = NULL; ++ drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); ++ phytium_dp_hpd_irq_setup(dev, true); ++ ++ return 0; ++} ++ ++void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev) ++{ ++ INIT_LIST_HEAD(&priv->gem_list_head); ++ spin_lock_init(&priv->hotplug_irq_lock); ++ INIT_WORK(&priv->hotplug_work, phytium_dp_hpd_work_func); ++ priv->dev = dev; ++ priv->display_shutdown = phytium_display_shutdown; ++ priv->display_pm_suspend = phytium_display_pm_suspend; ++ priv->display_pm_resume = phytium_display_pm_resume; ++} ++ ++static int __init phytium_display_init(void) ++{ ++ int ret = 0; ++ ++ ret = pci_register_driver(&phytium_pci_driver); ++ ++ return ret; ++} ++ ++static void __exit phytium_display_exit(void) ++{ ++ pci_unregister_driver(&phytium_pci_driver); ++} ++ ++module_init(phytium_display_init); ++module_exit(phytium_display_exit); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Yang Xun "); ++MODULE_DESCRIPTION("Phytium Display Controller"); +diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.h b/drivers/gpu/drm/phytium/phytium_display_drv.h +new file mode 100644 +index 000000000000..9e052b805fcd +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_display_drv.h +@@ -0,0 +1,151 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef __PHYTIUM_DISPLAY_DRV_H__ ++#define __PHYTIUM_DISPLAY_DRV_H__ ++ ++#include ++#include ++ ++#define DEBUG_LOG 0 ++ ++#define PHYTIUM_FORMAT_MAX_PLANE 3 ++#define DP_MAX_DOWNSTREAM_PORTS 0x10 ++ ++#define DRV_NAME "dc" ++#define DRV_DESC "phytium dc" ++#define DRV_DATE "20201220" ++#define DRV_MAJOR 1 ++#define DRV_MINOR 1 ++ ++/* come from GPU */ ++#define DRM_FORMAT_MOD_VENDOR_PHYTIUM 0x92 ++ ++/* dc:mode0 8x8 16bpp gpu: FBCDC_8X8_V10 */ ++#define DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC fourcc_mod_code(PHYTIUM, 21) ++/* dc:mode3 8x4 32bpp gpu: FBCDC_16X4_v10 */ ++#define DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC fourcc_mod_code(PHYTIUM, 22) ++ ++#define PIPE_MASK_SHIFT 0x0 ++#define PIPE_MASK_MASK 0x7 ++#define EDP_MASK_SHIFT 0x3 ++#define EDP_MASK_MASK 0x7 ++ ++enum phytium_platform { ++ PHYTIUM_PLATFORM_UNINITIALIZED = 0, ++ PHYTIUM_PLATFORM_X100, ++}; ++ ++#define IS_PLATFORM(priv, p) ((priv)->info.platform_mask & BIT(p)) ++ ++#define IS_X100(priv) IS_PLATFORM(priv, PHYTIUM_PLATFORM_X100) ++ ++struct phytium_device_info { ++ unsigned char platform_mask; ++ unsigned char pipe_mask; ++ unsigned char num_pipes; ++ unsigned char total_pipes; ++ unsigned char edp_mask; ++ unsigned int crtc_clock_max; ++ unsigned int hdisplay_max; ++ unsigned int vdisplay_max; ++ unsigned int backlight_max; ++ unsigned long address_mask; ++}; ++ ++struct phytium_display_private { ++ /* hw */ ++ void __iomem *regs; ++ void __iomem *vram_addr; ++ struct phytium_device_info info; ++ bool vram_support; ++ bool reserve[3]; ++ uint32_t dc_reg_base[3]; ++ uint32_t dcreq_reg_base[3]; ++ uint32_t dp_reg_base[3]; ++ uint32_t address_transform_base; ++ uint32_t phy_access_base[3]; ++ ++ /* drm */ ++ struct drm_device *dev; ++ int irq; ++ ++ /* fb_dev */ ++ struct drm_fb_helper fbdev_helper; ++ struct phytium_gem_object *fbdev_phytium_gem; ++ ++ int save_reg[3]; ++ struct list_head gem_list_head; ++ ++ struct work_struct hotplug_work; ++ spinlock_t hotplug_irq_lock; ++ ++ void (*vram_hw_init)(struct phytium_display_private *priv); ++ void (*display_shutdown)(struct drm_device *dev); ++ int (*display_pm_suspend)(struct drm_device *dev); ++ int (*display_pm_resume)(struct drm_device *dev); ++ ++ void (*dc_hw_clear_msi_irq)(struct phytium_display_private *priv, uint32_t phys_pipe); ++ ++ int (*dc_hw_fb_format_check)(const struct drm_mode_fb_cmd2 *mode_cmd, int count); ++}; ++ ++static inline unsigned int ++phytium_readl_reg(struct phytium_display_private *priv, uint32_t group_offset, uint32_t reg_offset) ++{ ++ unsigned int data; ++ ++ data = readl(priv->regs + group_offset + reg_offset); ++#if DEBUG_LOG ++ pr_info("Read 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); ++#endif ++ return data; ++} ++ ++static inline void ++phytium_writel_reg(struct phytium_display_private *priv, uint32_t data, ++ uint32_t group_offset, uint32_t reg_offset) ++{ ++ ++ writel(data, priv->regs + group_offset + reg_offset); ++#if DEBUG_LOG ++ pr_info("Write 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); ++#endif ++} ++ ++static inline void ++phytium_writeb_reg(struct phytium_display_private *priv, uint8_t data, ++ uint32_t group_offset, uint32_t reg_offset) ++{ ++ writeb(data, priv->regs + group_offset + reg_offset); ++#if DEBUG_LOG ++ pr_info("Write 32'h%08x 8'h%08x\n", group_offset + reg_offset, data); ++#endif ++} ++ ++#define for_each_pipe(__dev_priv, __p) \ ++ for ((__p) = 0; (__p) < __dev_priv->info.total_pipes; (__p)++) ++ ++#define for_each_pipe_masked(__dev_priv, __p) \ ++ for ((__p) = 0; (__p) < __dev_priv->info.total_pipes; (__p)++) \ ++ for_each_if((__dev_priv->info.pipe_mask) & BIT(__p)) ++ ++int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe); ++int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe); ++int phytium_wait_cmd_done(struct phytium_display_private *priv, ++ uint32_t register_offset, ++ uint32_t request_bit, ++ uint32_t reply_bit); ++void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev); ++ ++extern struct drm_driver phytium_display_drm_driver; ++extern int dc_fake_mode_enable; ++extern int dc_fast_training_check; ++extern int num_source_rates; ++extern int source_max_lane_count; ++extern int link_dynamic_adjust; ++ ++#endif /* __PHYTIUM_DISPLAY_DRV_H__ */ +diff --git a/drivers/gpu/drm/phytium/phytium_dp.c b/drivers/gpu/drm/phytium/phytium_dp.c +new file mode 100644 +index 000000000000..7c7284bac8ee +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_dp.c +@@ -0,0 +1,2615 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "phytium_display_drv.h" ++#include "phytium_dp.h" ++#include "phytium_debugfs.h" ++#include "x100_dp.h" ++#include "phytium_panel.h" ++#include "phytium_reg.h" ++ ++static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp); ++static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged); ++static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp); ++static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp); ++ ++static int phytium_rate[] = {162000, 270000, 540000, 810000}; ++ ++void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->phy_access_base[port]; ++ ++#if DEBUG_LOG ++ pr_info("phy address write: 0x%x data:0x%x\n", address, data); ++#endif ++ phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); ++ phytium_writel_reg(priv, data, group_offset, PHYTIUM_PHY_WRITE_DATA); ++ phytium_writel_reg(priv, ACCESS_WRITE, group_offset, PHYTIUM_PHY_ACCESS_CTRL); ++ udelay(10); ++} ++ ++uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->phy_access_base[port]; ++ uint32_t data; ++ ++ phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); ++ phytium_writel_reg(priv, ACCESS_READ, group_offset, PHYTIUM_PHY_ACCESS_CTRL); ++ udelay(10); ++ data = phytium_readl_reg(priv, group_offset, PHYTIUM_PHY_READ_DATA); ++#if DEBUG_LOG ++ pr_info("phy address read: 0x%x data:0x%x\n", address, data); ++#endif ++ ++ return data; ++} ++ ++static int ++phytium_dp_hw_aux_transfer_write(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ unsigned int i = 0, j = 0; ++ unsigned int cmd = 0; ++ unsigned int aux_status = 0, interrupt_status = 0; ++ unsigned char *data = msg->buffer; ++ int count_timeout = 0; ++ long ret = 0; ++ ++ for (i = 0; i < 3; i++) { ++ /* clear X100_DP_INTERRUPT_RAW_STATUS */ ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); ++ phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); ++ for (j = 0; j < msg->size; j++) ++ phytium_writeb_reg(priv, data[j], group_offset, PHYTIUM_DP_AUX_WRITE_FIFO); ++ ++ cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); ++ if (msg->size == 0) ++ cmd |= ADDRESS_ONLY; ++ else ++ cmd |= (msg->size-1) & BYTE_COUNT_MASK; ++ phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); ++ ++ count_timeout = 0; ++ do { ++ mdelay(5); ++ interrupt_status = phytium_readl_reg(priv, group_offset, ++ PHYTIUM_DP_INTERRUPT_RAW_STATUS); ++ aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); ++ if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) ++ || (interrupt_status & REPLY_TIMEOUT)) { ++ DRM_DEBUG_KMS("aux wait exit\n"); ++ break; ++ } ++ count_timeout++; ++ } while (count_timeout < 6); ++ ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); ++ if (interrupt_status & REPLY_TIMEOUT) { ++ DRM_DEBUG_KMS("aux write reply timeout\n"); ++ continue; ++ } else if (aux_status & REPLY_ERROR) { ++ DRM_DEBUG_KMS("aux write reply error\n"); ++ continue; ++ } else if (aux_status & REPLY_RECEIVED) { ++ DRM_DEBUG_KMS("aux write reply received succussful\n"); ++ break; ++ } ++ } ++ ++ if (interrupt_status & REPLY_TIMEOUT) { ++ DRM_NOTE("aux(%d) write reply timeout\n", phytium_dp->port); ++ ret = -EIO; ++ goto out; ++ } else if (aux_status & REPLY_ERROR) { ++ DRM_ERROR("aux(%d) write reply error\n", phytium_dp->port); ++ ret = -EIO; ++ goto out; ++ } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { ++ DRM_ERROR("aux(%d) write reply no response\n", phytium_dp->port); ++ ret = -EIO; ++ goto out; ++ } ++ ++ msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); ++ ret = msg->size; ++out: ++ return ret; ++} ++ ++static int ++phytium_dp_hw_aux_transfer_read(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ unsigned int i = 0; ++ unsigned int cmd = 0; ++ unsigned int aux_status = 0, interrupt_status = 0; ++ unsigned char *data = msg->buffer; ++ int count_timeout = 0; ++ long ret = 0; ++ ++ for (i = 0; i < 3; i++) { ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); ++ phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); ++ cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); ++ if (msg->size == 0) ++ cmd |= ADDRESS_ONLY; ++ else ++ cmd |= ((msg->size-1) & BYTE_COUNT_MASK); ++ phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); ++ ++ count_timeout = 0; ++ do { ++ mdelay(5); ++ interrupt_status = phytium_readl_reg(priv, group_offset, ++ PHYTIUM_DP_INTERRUPT_RAW_STATUS); ++ aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); ++ if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) ++ || (interrupt_status & REPLY_TIMEOUT)) { ++ DRM_DEBUG_KMS("aux wait exit\n"); ++ break; ++ } ++ count_timeout++; ++ } while (count_timeout < 6); ++ ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); ++ if (interrupt_status & REPLY_TIMEOUT) { ++ DRM_DEBUG_KMS("aux read reply timeout\n"); ++ continue; ++ } else if (aux_status & REPLY_ERROR) { ++ DRM_DEBUG_KMS("aux read reply error\n"); ++ continue; ++ } else if (aux_status & REPLY_RECEIVED) { ++ DRM_DEBUG_KMS("aux read reply received succussful\n"); ++ break; ++ } ++ } ++ ++ if (interrupt_status & REPLY_TIMEOUT) { ++ DRM_NOTE("aux(%d) read reply timeout\n", phytium_dp->port); ++ ret = -EIO; ++ goto out; ++ } else if (aux_status & REPLY_ERROR) { ++ DRM_ERROR("aux(%d) read reply error\n", phytium_dp->port); ++ ret = -EIO; ++ goto out; ++ } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { ++ DRM_ERROR("aux(%d) read reply no response\n", phytium_dp->port); ++ ret = -EIO; ++ goto out; ++ } ++ ++ msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); ++ ret = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA_COUNT); ++ ++ if (ret > msg->size) { ++ ret = msg->size; ++ } else if (ret != msg->size) { ++ DRM_DEBUG_KMS("aux read count error(ret:0x%lx != 0x%lx)\n", ret, msg->size); ++ ret = -EBUSY; ++ goto out; ++ } ++ ++ for (i = 0; i < ret; i++) ++ data[i] = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA); ++ ++out: ++ return ret; ++} ++ ++static void phytium_get_native_mode(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_display_mode *t, *mode; ++ struct drm_connector *connector = &phytium_dp->connector; ++ struct drm_display_mode *native_mode = &phytium_dp->native_mode; ++ ++ list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { ++ if (mode->type & DRM_MODE_TYPE_PREFERRED) { ++ if (mode->hdisplay != native_mode->hdisplay || ++ mode->vdisplay != native_mode->vdisplay) { ++ memcpy(native_mode, mode, sizeof(*mode)); ++ drm_mode_set_crtcinfo(native_mode, 0); ++ } ++ break; ++ } ++ } ++ ++ if (&mode->head == &connector->probed_modes) ++ native_mode->clock = 0; ++} ++ ++static int phytium_connector_add_common_modes(struct phytium_dp_device *phytium_dp) ++{ ++ int i = 0, ret = 0; ++ struct drm_device *dev = phytium_dp->dev; ++ struct drm_display_mode *mode = NULL, *current_mode = NULL; ++ struct drm_display_mode *native_mode = &phytium_dp->native_mode; ++ bool mode_existed = false; ++ struct mode_size { ++ char name[DRM_DISPLAY_MODE_LEN]; ++ int w; ++ int h; ++ } common_mode[] = { ++ { "640x480", 640, 480}, ++ { "800x600", 800, 600}, ++ { "1024x768", 1024, 768}, ++ { "1280x720", 1280, 720}, ++ { "1280x800", 1280, 800}, ++ {"1280x1024", 1280, 1024}, ++ { "1440x900", 1440, 900}, ++ {"1680x1050", 1680, 1050}, ++ {"1600x1200", 1600, 1200}, ++ {"1920x1080", 1920, 1080}, ++ {"1920x1200", 1920, 1200} ++ }; ++ ++ if (native_mode->clock == 0) ++ return ret; ++ ++ for (i = 0; i < ARRAY_SIZE(common_mode); i++) { ++ mode_existed = false; ++ ++ if (common_mode[i].w > native_mode->hdisplay || ++ common_mode[i].h > native_mode->vdisplay || ++ (common_mode[i].w == native_mode->hdisplay && ++ common_mode[i].h == native_mode->vdisplay)) ++ continue; ++ ++ list_for_each_entry(current_mode, &phytium_dp->connector.probed_modes, head) { ++ if (common_mode[i].w == current_mode->hdisplay && ++ common_mode[i].h == current_mode->vdisplay) { ++ mode_existed = true; ++ break; ++ } ++ } ++ ++ if (mode_existed) ++ continue; ++ ++ mode = drm_mode_duplicate(dev, native_mode); ++ if (mode == NULL) ++ continue; ++ ++ mode->hdisplay = common_mode[i].w; ++ mode->vdisplay = common_mode[i].h; ++ mode->type &= ~DRM_MODE_TYPE_PREFERRED; ++ strncpy(mode->name, common_mode[i].name, DRM_DISPLAY_MODE_LEN); ++ drm_mode_probed_add(&phytium_dp->connector, mode); ++ ret++; ++ } ++ ++ return ret; ++} ++ ++static int phytium_connector_get_modes(struct drm_connector *connector) ++{ ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ struct edid *edid; ++ int ret = 0; ++ ++ edid = phytium_dp->detect_edid; ++ if (edid && drm_edid_is_valid(edid)) { ++ drm_connector_update_edid_property(connector, edid); ++ ret = drm_add_edid_modes(connector, edid); ++ phytium_get_native_mode(phytium_dp); ++ if (dc_fake_mode_enable) ++ ret += phytium_connector_add_common_modes(phytium_dp); ++ } else { ++ ret = drm_add_modes_noedid(connector, 640, 480); ++ } ++ ++ return ret; ++} ++ ++static int ++phytium_connector_mode_valid(struct drm_connector *connector, ++ struct drm_display_mode *mode) ++{ ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ struct drm_display_info *display_info = &phytium_dp->connector.display_info; ++ unsigned int requested, actual; ++ ++ switch (display_info->bpc) { ++ case 10: ++ case 6: ++ case 8: ++ break; ++ default: ++ DRM_INFO("not support bpc(%d)\n", display_info->bpc); ++ display_info->bpc = 8; ++ break; ++ } ++ ++ if ((display_info->color_formats & DRM_COLOR_FORMAT_RGB444) == 0) { ++ DRM_INFO("not support color_format(%d)\n", display_info->color_formats); ++ display_info->color_formats = DRM_COLOR_FORMAT_RGB444; ++ } ++ ++ requested = mode->clock * display_info->bpc * 3 / 1000; ++ actual = phytium_dp->max_link_rate * phytium_dp->max_link_lane_count / 100; ++ actual = actual * 8 / 10; ++ if (requested >= actual) { ++ DRM_DEBUG_KMS("requested=%d, actual=%d, clock=%d\n", requested, actual, ++ mode->clock); ++ return MODE_CLOCK_HIGH; ++ } ++ ++ if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) ++ return MODE_BAD_HVALUE; ++ ++ if ((mode->hdisplay == 1024) && (mode->clock > 78000)) ++ return MODE_BAD_HVALUE; ++ ++ return MODE_OK; ++} ++ ++static const ++struct drm_connector_helper_funcs phytium_connector_helper_funcs = { ++ .get_modes = phytium_connector_get_modes, ++ .mode_valid = phytium_connector_mode_valid, ++ .best_encoder = drm_atomic_helper_best_encoder, ++}; ++ ++static void phytium_dp_set_sink_rates(struct phytium_dp_device *phytium_dp) ++{ ++ static const int dp_rates[] = {162000, 270000, 540000, 810000}; ++ int i, max_rate; ++ ++ max_rate = drm_dp_bw_code_to_link_rate(phytium_dp->dpcd[DP_MAX_LINK_RATE]); ++ for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { ++ if (dp_rates[i] > max_rate) ++ break; ++ phytium_dp->sink_rates[i] = dp_rates[i]; ++ } ++ phytium_dp->num_sink_rates = i; ++} ++ ++static int get_common_rates(const int *source_rates, int source_len, const int *sink_rates, ++ int sink_len, int *common_rates) ++{ ++ int i = 0, j = 0, k = 0; ++ ++ while (i < source_len && j < sink_len) { ++ if (source_rates[i] == sink_rates[j]) { ++ if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) ++ return k; ++ common_rates[k] = source_rates[i]; ++ ++k; ++ ++i; ++ ++j; ++ } else if (source_rates[i] < sink_rates[j]) { ++ ++i; ++ } else { ++ ++j; ++ } ++ } ++ return k; ++} ++ ++static void phytium_dp_set_common_rates(struct phytium_dp_device *phytium_dp) ++{ ++ WARN_ON(!phytium_dp->num_source_rates || !phytium_dp->num_sink_rates); ++ ++ phytium_dp->num_common_rates = get_common_rates(phytium_dp->source_rates, ++ phytium_dp->num_source_rates, ++ phytium_dp->sink_rates, ++ phytium_dp->num_sink_rates, ++ phytium_dp->common_rates); ++ ++ if (WARN_ON(phytium_dp->num_common_rates == 0)) { ++ phytium_dp->common_rates[0] = 162000; ++ phytium_dp->num_common_rates = 1; ++ } ++} ++ ++static bool phytium_dp_get_dpcd(struct phytium_dp_device *phytium_dp) ++{ ++ int ret; ++ unsigned char sink_count = 0; ++ ++ /* get dpcd capability,but don't check data error; so check revision */ ++ ret = drm_dp_dpcd_read(&phytium_dp->aux, 0x00, phytium_dp->dpcd, ++ sizeof(phytium_dp->dpcd)); ++ if (ret < 0) { ++ DRM_ERROR("port %d get DPCD capability fail\n", phytium_dp->port); ++ return false; ++ } ++ ++ if (phytium_dp->dpcd[DP_DPCD_REV] == 0) { ++ DRM_ERROR("DPCD data error: 0x%x\n", phytium_dp->dpcd[DP_DPCD_REV]); ++ return false; ++ } ++ ++ /* parse sink support link */ ++ phytium_dp_set_sink_rates(phytium_dp); ++ phytium_dp_set_common_rates(phytium_dp); ++ phytium_dp->sink_max_lane_count = drm_dp_max_lane_count(phytium_dp->dpcd); ++ phytium_dp->common_max_lane_count = min(phytium_dp->source_max_lane_count, ++ phytium_dp->sink_max_lane_count); ++ ++ /* get dpcd sink count */ ++ if (drm_dp_dpcd_readb(&phytium_dp->aux, DP_SINK_COUNT, &sink_count) <= 0) { ++ DRM_ERROR("get DPCD sink_count fail\n"); ++ return false; ++ } ++ ++ phytium_dp->sink_count = DP_GET_SINK_COUNT(sink_count); ++ if (!phytium_dp->sink_count) { ++ DRM_ERROR("DPCD sink_count should not be zero\n"); ++ return false; ++ } ++ ++ if (!drm_dp_is_branch(phytium_dp->dpcd)) ++ return true; ++ ++ if (phytium_dp->dpcd[DP_DPCD_REV] == 0x10) ++ return true; ++ ++ /* get downstream port for branch device */ ++ ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_DOWNSTREAM_PORT_0, ++ phytium_dp->downstream_ports, DP_MAX_DOWNSTREAM_PORTS); ++ if (ret < 0) { ++ DRM_ERROR("get DPCD DFP fail\n"); ++ return false; ++ } ++ ++ return true; ++} ++ ++static enum drm_connector_status ++phytium_dp_detect_dpcd(struct phytium_dp_device *phytium_dp) ++{ ++ if (!phytium_dp_get_dpcd(phytium_dp)) ++ return connector_status_disconnected; ++ ++ if (!drm_dp_is_branch(phytium_dp->dpcd)) ++ return connector_status_connected; ++ ++ if (phytium_dp->downstream_ports[0] & DP_DS_PORT_HPD) { ++ return phytium_dp->sink_count ? connector_status_connected ++ : connector_status_disconnected; ++ } ++ return connector_status_connected; ++} ++ ++static void phytium_get_adjust_train(struct phytium_dp_device *phytium_dp, ++ const uint8_t link_status[DP_LINK_STATUS_SIZE], uint8_t lane_count) ++{ ++ unsigned char v = 0; ++ unsigned char p = 0; ++ int lane; ++ unsigned char voltage_max; ++ unsigned char preemph_max; ++ ++ /* find max value */ ++ for (lane = 0; lane < lane_count; lane++) { ++ uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); ++ uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); ++ ++ if (this_v > v) ++ v = this_v; ++ if (this_p > p) ++ p = this_p; ++ } ++ voltage_max = DP_TRAIN_VOLTAGE_SWING_LEVEL_3; ++ if (v >= voltage_max) ++ v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; ++ ++ preemph_max = DP_TRAIN_PRE_EMPH_LEVEL_3; ++ if (p >= preemph_max) ++ p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; ++ ++ for (lane = 0; lane < 4; lane++) ++ phytium_dp->train_set[lane] = v | p; ++} ++ ++bool phytium_dp_coding_8b10b_need_enable(unsigned char test_pattern) ++{ ++ switch (test_pattern) { ++ case PHYTIUM_PHY_TP_D10_2: ++ case PHYTIUM_PHY_TP_SYMBOL_ERROR: ++ case PHYTIUM_PHY_TP_CP2520_1: ++ case PHYTIUM_PHY_TP_CP2520_2: ++ case PHYTIUM_PHY_TP_CP2520_3: ++ return true; ++ case PHYTIUM_PHY_TP_PRBS7: ++ case PHYTIUM_PHY_TP_80BIT_CUSTOM: ++ return false; ++ default: ++ return false; ++ } ++} ++ ++bool phytium_dp_scrambled_need_enable(unsigned char test_pattern) ++{ ++ switch (test_pattern) { ++ case PHYTIUM_PHY_TP_SYMBOL_ERROR: ++ case PHYTIUM_PHY_TP_CP2520_1: ++ case PHYTIUM_PHY_TP_CP2520_2: ++ case PHYTIUM_PHY_TP_CP2520_3: ++ return true; ++ case PHYTIUM_PHY_TP_D10_2: ++ case PHYTIUM_PHY_TP_PRBS7: ++ case PHYTIUM_PHY_TP_80BIT_CUSTOM: ++ return false; ++ default: ++ return false; ++ } ++} ++ ++static void phytium_dp_hw_set_lane_setting(struct phytium_dp_device *phytium_dp, ++ uint32_t link_rate, ++ uint8_t train_set) ++{ ++ phytium_dp->funcs->dp_hw_set_phy_lane_setting(phytium_dp, link_rate, train_set); ++} ++ ++static void phytium_dp_hw_set_link(struct phytium_dp_device *phytium_dp, ++ uint8_t lane_count, ++ uint32_t link_rate) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port, ret = 0, retry = 3; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ phytium_writel_reg(priv, lane_count, ++ group_offset, PHYTIUM_DP_LANE_COUNT_SET); ++ phytium_writel_reg(priv, ++ drm_dp_link_rate_to_bw_code(link_rate), ++ group_offset, PHYTIUM_DP_LINK_BW_SET); ++ ++ if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) ++ phytium_writel_reg(priv, ENHANCED_FRAME_ENABLE, ++ group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); ++ else ++ phytium_writel_reg(priv, ENHANCED_FRAME_DISABLE, ++ group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); ++ ++try_again: ++ ret = phytium_dp->funcs->dp_hw_set_phy_lane_and_rate(phytium_dp, lane_count, link_rate); ++ if ((ret < 0) && retry) { ++ retry--; ++ goto try_again; ++ } ++} ++ ++static void phytium_dp_hw_set_test_pattern(struct phytium_dp_device *phytium_dp, ++ uint8_t lane_count, ++ uint8_t test_pattern, ++ uint8_t *custom_pattern, ++ uint32_t custom_pattern_size) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port, val = 0, tmp = 0, i; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ if ((test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) ++ && custom_pattern && (custom_pattern_size > 0)) { ++ val = *(int *)custom_pattern; ++ phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0); ++ val = *(int *)(custom_pattern + 4); ++ phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1); ++ val = *(short int *)(custom_pattern + 8); ++ phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2); ++ } ++ ++ if (test_pattern == PHYTIUM_PHY_TP_D10_2 || test_pattern == PHYTIUM_PHY_TP_PRBS7 ++ || test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) ++ phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, ++ PHYTIUM_DP_SCRAMBLING_DISABLE); ++ else ++ phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, ++ PHYTIUM_DP_SCRAMBLING_DISABLE); ++ ++ tmp = test_pattern - PHYTIUM_PHY_TP_NONE + TEST_PATTERN_NONE; ++ val = 0; ++ for (i = 0; i < lane_count; i++) ++ val |= (tmp << (TEST_PATTERN_LANE_SHIFT * i)); ++ phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_LINK_QUAL_PATTERN_SET); ++} ++ ++static void phytium_dp_hw_set_train_pattern(struct phytium_dp_device *phytium_dp, ++ uint8_t train_pattern) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port, tmp = 0; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ /* Scrambling is disabled for TPS1/TPS2/3 and enabled for TPS4 */ ++ if (train_pattern == DP_TRAINING_PATTERN_4 ++ || train_pattern == DP_TRAINING_PATTERN_DISABLE) { ++ phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, ++ PHYTIUM_DP_SCRAMBLING_DISABLE); ++ phytium_writel_reg(priv, SCRAMBLER_RESET, group_offset, ++ PHYTIUM_DP_FORCE_SCRAMBLER_RESET); ++ } else { ++ phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, ++ PHYTIUM_DP_SCRAMBLING_DISABLE); ++ } ++ switch (train_pattern) { ++ case DP_TRAINING_PATTERN_DISABLE: ++ tmp = TRAINING_OFF; ++ break; ++ case DP_TRAINING_PATTERN_1: ++ tmp = TRAINING_PATTERN_1; ++ break; ++ case DP_TRAINING_PATTERN_2: ++ tmp = TRAINING_PATTERN_2; ++ break; ++ case DP_TRAINING_PATTERN_3: ++ tmp = TRAINING_PATTERN_3; ++ break; ++ case DP_TRAINING_PATTERN_4: ++ tmp = TRAINING_PATTERN_4; ++ break; ++ default: ++ tmp = TRAINING_OFF; ++ break; ++ } ++ ++ phytium_writel_reg(priv, tmp, group_offset, PHYTIUM_DP_TRAINING_PATTERN_SET); ++} ++ ++void phytium_dp_hw_enable_audio(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ int config = 0, config1, data_window = 0; ++ const struct dp_audio_n_m *n_m = NULL; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); ++ phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); ++ ++ data_window = 90*(phytium_dp->link_rate)/100 ++ *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) ++ /phytium_dp->mode.clock/4; ++ ++ phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); ++ ++ n_m = phytium_dp_audio_get_n_m(phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); ++ if (n_m == NULL) { ++ DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", ++ phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); ++ phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); ++ phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); ++ } else { ++ phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); ++ phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); ++ } ++ ++ config1 = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); ++ phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, ++ group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); ++ phytium_writel_reg(priv, config1, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); ++ phytium_writel_reg(priv, config, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); ++} ++ ++static void phytium_dp_hw_audio_shutdown(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, ++ group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); ++} ++ ++static void phytium_dp_hw_audio_digital_mute(struct phytium_dp_device *phytium_dp, bool enable) ++{ ++ struct phytium_display_private *priv = phytium_dp->dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ if (enable) ++ phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, ++ group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); ++ else ++ phytium_writel_reg(priv, SEC_AUDIO_ENABLE, ++ group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); ++} ++ ++static int ++phytium_dp_hw_audio_hw_params(struct phytium_dp_device *phytium_dp, struct audio_info audio_info) ++{ ++ struct phytium_display_private *priv = phytium_dp->dev->dev_private; ++ int port = phytium_dp->port; ++ int ret = 0, data_window = 0; ++ const struct dp_audio_n_m *n_m = NULL; ++ uint32_t fs, ws, fs_accurac; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ DRM_DEBUG_KMS("%s:set port%d sample_rate(%d) channels(%d) sample_width(%d)\n", ++ __func__, phytium_dp->port, audio_info.sample_rate, ++ audio_info.channels, audio_info.sample_width); ++ ++ phytium_writel_reg(priv, INPUT_SELECT_I2S, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT); ++ phytium_writel_reg(priv, APB_CLOCK/audio_info.sample_rate, ++ group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV); ++ phytium_writel_reg(priv, audio_info.channels & CHANNEL_MASK, ++ group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT); ++ phytium_writel_reg(priv, CHANNEL_MAP_DEFAULT, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP); ++ data_window = 90*(phytium_dp->link_rate)/100 ++ *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) ++ /phytium_dp->mode.clock/4; ++ phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); ++ phytium_writel_reg(priv, 0xb5, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE); ++ ++ phytium_writel_reg(priv, CLOCK_MODE_SYNC, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE); ++ phytium_writel_reg(priv, CS_SOURCE_FORMAT_DEFAULT, ++ group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT); ++ ++ switch (audio_info.sample_rate) { ++ case 32000: ++ fs = ORIG_FREQ_32000; ++ fs_accurac = SAMPLING_FREQ_32000; ++ break; ++ case 44100: ++ fs = ORIG_FREQ_44100; ++ fs_accurac = SAMPLING_FREQ_44100; ++ break; ++ case 48000: ++ fs = ORIG_FREQ_48000; ++ fs_accurac = SAMPLING_FREQ_48000; ++ break; ++ case 96000: ++ fs = ORIG_FREQ_96000; ++ fs_accurac = SAMPLING_FREQ_96000; ++ break; ++ case 176400: ++ fs = ORIG_FREQ_176400; ++ fs_accurac = SAMPLING_FREQ_176400; ++ break; ++ case 192000: ++ fs = ORIG_FREQ_192000; ++ fs_accurac = SAMPLING_FREQ_192000; ++ break; ++ default: ++ DRM_ERROR("dp not support sample_rate %d\n", audio_info.sample_rate); ++ goto out; ++ } ++ ++ switch (audio_info.sample_width) { ++ case 16: ++ ws = WORD_LENGTH_16; ++ break; ++ case 18: ++ ws = WORD_LENGTH_18; ++ break; ++ case 20: ++ ws = WORD_LENGTH_20; ++ break; ++ case 24: ++ ws = WORD_LENGTH_24; ++ break; ++ default: ++ DRM_ERROR("dp not support sample_width %d\n", audio_info.sample_width); ++ goto out; ++ } ++ ++ phytium_writel_reg(priv, ((fs&ORIG_FREQ_MASK)<link_rate, audio_info.sample_rate); ++ if (n_m == NULL) { ++ DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", ++ phytium_dp->link_rate, audio_info.sample_rate); ++ phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); ++ phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); ++ ++ } else { ++ phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); ++ phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); ++ } ++ phytium_writel_reg(priv, SECONDARY_STREAM_ENABLE, ++ group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); ++ phytium_dp->audio_info = audio_info; ++ ++ return 0; ++ ++out: ++ phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, ++ group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); ++ ++ return ret; ++} ++ ++void phytium_dp_hw_disable_video(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ phytium_writel_reg(priv, SST_MST_SOURCE_0_DISABLE, ++ group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); ++} ++ ++bool phytium_dp_hw_video_is_enable(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port, config; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); ++ return config ? true : false; ++} ++ ++void phytium_dp_hw_enable_video(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ phytium_writel_reg(priv, SST_MST_SOURCE_0_ENABLE, ++ group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); ++ phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); ++} ++ ++void phytium_dp_hw_config_video(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ unsigned long link_bw, date_rate = 0; ++ struct drm_display_info *display_info = &phytium_dp->connector.display_info; ++ unsigned char tu_size = 64; ++ unsigned long data_per_tu = 0; ++ int symbols_per_tu, frac_symbols_per_tu, symbol_count, udc, value; ++ ++ /* cal M/N and tu_size */ ++ phytium_writel_reg(priv, phytium_dp->mode.crtc_clock/10, group_offset, PHYTIUM_DP_M_VID); ++ phytium_writel_reg(priv, phytium_dp->link_rate/10, group_offset, PHYTIUM_DP_N_VID); ++ link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; ++ date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; ++ ++ /* mul 10 for register setting */ ++ data_per_tu = 10*tu_size * date_rate/link_bw; ++ symbols_per_tu = (data_per_tu/10)&0xff; ++ frac_symbols_per_tu = (data_per_tu%10*16/10) & 0xf; ++ phytium_writel_reg(priv, frac_symbols_per_tu<<24 | symbols_per_tu<<16 | tu_size, ++ group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE); ++ ++ symbol_count = (phytium_dp->mode.crtc_hdisplay*display_info->bpc*3 + 7)/8; ++ udc = (symbol_count + phytium_dp->link_lane_count - 1)/phytium_dp->link_lane_count; ++ phytium_writel_reg(priv, udc, group_offset, PHYTIUM_DP_DATA_COUNT); ++ ++ /* config main stream attributes */ ++ phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal, ++ group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL); ++ phytium_writel_reg(priv, phytium_dp->mode.crtc_hdisplay, ++ group_offset, PHYTIUM_DP_MAIN_LINK_HRES); ++ phytium_writel_reg(priv, ++ phytium_dp->mode.crtc_hsync_end - phytium_dp->mode.crtc_hsync_start, ++ group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH); ++ phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal - phytium_dp->mode.crtc_hsync_start, ++ group_offset, PHYTIUM_DP_MAIN_LINK_HSTART); ++ phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal, ++ group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL); ++ phytium_writel_reg(priv, phytium_dp->mode.crtc_vdisplay, ++ group_offset, PHYTIUM_DP_MAIN_LINK_VRES); ++ phytium_writel_reg(priv, ++ phytium_dp->mode.crtc_vsync_end - phytium_dp->mode.crtc_vsync_start, ++ group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH); ++ phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal - phytium_dp->mode.crtc_vsync_start, ++ group_offset, PHYTIUM_DP_MAIN_LINK_VSTART); ++ ++ value = 0; ++ if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) ++ value = value & (~HSYNC_POLARITY_LOW); ++ else ++ value = value | HSYNC_POLARITY_LOW; ++ ++ if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) ++ value = value & (~VSYNC_POLARITY_LOW); ++ else ++ value = value | VSYNC_POLARITY_LOW; ++ phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY); ++ ++ switch (display_info->bpc) { ++ case 10: ++ value = (MISC0_BIT_DEPTH_10BIT << MISC0_BIT_DEPTH_OFFSET); ++ break; ++ case 6: ++ value = (MISC0_BIT_DEPTH_6BIT << MISC0_BIT_DEPTH_OFFSET); ++ break; ++ default: ++ value = (MISC0_BIT_DEPTH_8BIT << MISC0_BIT_DEPTH_OFFSET); ++ break; ++ } ++ value |= (MISC0_COMPONENT_FORMAT_RGB << MISC0_COMPONENT_FORMAT_SHIFT) ++ | MISC0_SYNCHRONOUS_CLOCK; ++ phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0); ++ phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1); ++ ++ value = USER_ODDEVEN_POLARITY_HIGH | USER_DATA_ENABLE_POLARITY_HIGH; ++ if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) ++ value = value | USER_HSYNC_POLARITY_HIGH; ++ else ++ value = value & (~USER_HSYNC_POLARITY_HIGH); ++ if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) ++ value = value | USER_VSYNC_POLARITY_HIGH; ++ else ++ value = value & (~USER_VSYNC_POLARITY_HIGH); ++ phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY); ++} ++ ++void phytium_dp_hw_disable_output(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ phytium_writel_reg(priv, TRANSMITTER_OUTPUT_DISABLE, ++ group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); ++ phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); ++} ++ ++void phytium_dp_hw_enable_output(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); ++ phytium_writel_reg(priv, TRANSMITTER_OUTPUT_ENABLE, ++ group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); ++} ++ ++void phytium_dp_hw_enable_input_source(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ phytium_writel_reg(priv, VIRTUAL_SOURCE_0_ENABLE, ++ group_offset, PHYTIUM_INPUT_SOURCE_ENABLE); ++} ++ ++void phytium_dp_hw_disable_input_source(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ ++ phytium_writel_reg(priv, (~VIRTUAL_SOURCE_0_ENABLE)&VIRTUAL_SOURCE_0_ENABLE_MASK, ++ priv->dp_reg_base[port], PHYTIUM_INPUT_SOURCE_ENABLE); ++} ++ ++bool phytium_dp_hw_output_is_enable(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ int config = 0; ++ ++ config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); ++ return config ? true : false; ++} ++ ++static void phytium_dp_hw_get_hpd_state(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t val = 0, raw_state = 0; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ val = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_RAW_STATUS); ++ ++ /* maybe miss hpd, so used for clear PHYTIUM_DP_INTERRUPT_RAW_STATUS */ ++ phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); ++ raw_state = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SINK_HPD_STATE); ++ if (val & HPD_EVENT) ++ phytium_dp->dp_hpd_state.hpd_event_state = true; ++ ++ if (val & HPD_IRQ) ++ phytium_dp->dp_hpd_state.hpd_irq_state = true; ++ ++ if (raw_state & HPD_CONNECT) ++ phytium_dp->dp_hpd_state.hpd_raw_state = true; ++ else ++ phytium_dp->dp_hpd_state.hpd_raw_state = false; ++} ++ ++void phytium_dp_hw_hpd_irq_setup(struct phytium_dp_device *phytium_dp, bool enable) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ phytium_dp->dp_hpd_state.hpd_irq_enable = enable; ++ if (enable) ++ phytium_writel_reg(priv, HPD_OTHER_MASK, group_offset, PHYTIUM_DP_INTERRUPT_MASK); ++ else ++ phytium_writel_reg(priv, HPD_IRQ_MASK|HPD_EVENT_MASK|HPD_OTHER_MASK, ++ group_offset, PHYTIUM_DP_INTERRUPT_MASK); ++} ++ ++int phytium_dp_hw_init(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port, ret; ++ uint32_t group_offset = priv->dp_reg_base[port]; ++ ++ ret = phytium_dp->funcs->dp_hw_reset(phytium_dp); ++ if (ret) ++ goto out; ++ ret = phytium_dp->funcs->dp_hw_init_phy(phytium_dp); ++ if (ret) ++ goto out; ++ ++ phytium_writel_reg(priv, AUX_CLK_DIVIDER, group_offset, PHYTIUM_DP_AUX_CLK_DIVIDER); ++ phytium_dp->fast_train_support = false; ++ phytium_dp->hw_spread_enable = phytium_dp->funcs->dp_hw_spread_is_enable(phytium_dp); ++ ++out: ++ return ret; ++} ++ ++static void phytium_dp_hw_set_source_rate_and_lane_count(struct phytium_dp_device *phytium_dp) ++{ ++ phytium_dp->source_rates = phytium_rate; ++ phytium_dp->num_source_rates = num_source_rates; ++ ++ if (phytium_dp->port == 0) ++ phytium_dp->source_max_lane_count = source_max_lane_count; ++ else if (phytium_dp->port == 1) ++ phytium_dp->source_max_lane_count = source_max_lane_count; ++ else if (phytium_dp->port == 2) ++ phytium_dp->source_max_lane_count = 1; ++ else ++ phytium_dp->source_max_lane_count = 1; ++} ++ ++static int phytium_dp_dpcd_get_tp_link(struct phytium_dp_device *phytium_dp, ++ uint8_t *test_lane_count, ++ uint32_t *test_link_rate) ++{ ++ uint8_t test_link_bw; ++ int ret; ++ ++ ret = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_LANE_COUNT, ++ test_lane_count); ++ if (ret <= 0) { ++ DRM_DEBUG_KMS("test pattern Lane count read failed(%d)\n", ret); ++ goto failed; ++ } ++ ++ ret = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_LINK_RATE, ++ &test_link_bw); ++ if (ret <= 0) { ++ DRM_DEBUG_KMS("test pattern link rate read failed(%d)\n", ret); ++ goto failed; ++ } ++ *test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); ++ ++ return 0; ++failed: ++ return ret; ++} ++ ++static int phytium_dp_dpcd_set_link(struct phytium_dp_device *phytium_dp, ++ uint8_t lane_count, uint32_t link_rate) ++{ ++ uint8_t link_config[2]; ++ int ret = 0; ++ ++ link_config[0] = drm_dp_link_rate_to_bw_code(link_rate); ++ link_config[1] = lane_count; ++ if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) { ++ link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; ++ } ++ ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_LINK_BW_SET, link_config, 2); ++ if (ret < 0) { ++ DRM_NOTE("write dpcd DP_LINK_BW_SET fail: ret:%d\n", ret); ++ goto failed; ++ } ++ ++ if (phytium_dp->hw_spread_enable) ++ link_config[0] = DP_SPREAD_AMP_0_5; ++ else ++ link_config[0] = 0; ++ link_config[1] = DP_SET_ANSI_8B10B; ++ ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); ++ if (ret < 0) { ++ DRM_ERROR("write DP_DOWNSPREAD_CTRL fail: ret:%d\n", ret); ++ goto failed; ++ } ++ ++ return 0; ++failed: ++ return ret; ++} ++ ++static int phytium_dp_dpcd_set_test_pattern(struct phytium_dp_device *phytium_dp, ++ uint8_t test_pattern) ++{ ++ unsigned char value; ++ int ret; ++ ++ if (phytium_dp_coding_8b10b_need_enable(test_pattern)) ++ value = DP_SET_ANSI_8B10B; ++ else ++ value = 0; ++ ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value); ++ if (ret < 0) { ++ DRM_ERROR("write DP_MAIN_LINK_CHANNEL_CODING_SET fail: ret:%d\n", ret); ++ goto failed; ++ } ++ ++ if (phytium_dp_scrambled_need_enable(test_pattern)) ++ value = DP_TRAINING_PATTERN_DISABLE; ++ else ++ value = (DP_TRAINING_PATTERN_DISABLE | DP_LINK_SCRAMBLING_DISABLE); ++ ++ ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); ++ if (ret < 0) { ++ DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); ++ goto failed; ++ } ++ ++ ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_LINK_QUAL_LANE0_SET, test_pattern); ++ if (ret < 0) { ++ DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); ++ goto failed; ++ } ++ ++ return 0; ++failed: ++ return ret; ++} ++ ++static int phytium_dp_dpcd_set_train_pattern(struct phytium_dp_device *phytium_dp, ++ uint8_t train_pattern) ++{ ++ uint8_t value; ++ int ret; ++ ++ /* Scrambling is disabled for TPS1/2/3 and enabled for TPS4 */ ++ if (train_pattern == DP_TRAINING_PATTERN_4 || train_pattern == DP_TRAINING_PATTERN_DISABLE) ++ value = train_pattern; ++ else ++ value = (train_pattern | DP_LINK_SCRAMBLING_DISABLE); ++ ++ ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); ++ if (ret < 0) { ++ DRM_NOTE("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); ++ goto failed; ++ } ++ ++ return 0; ++failed: ++ return ret; ++} ++ ++static int ++phytium_dp_dpcd_set_lane_setting(struct phytium_dp_device *phytium_dp, uint8_t *train_set) ++{ ++ int ret = 0; ++ ++ ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_TRAINING_LANE0_SET, ++ phytium_dp->train_set, 4); ++ if (ret < 0) { ++ DRM_ERROR("write DP_TRAINING_LANE0_SET fail: ret:%d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int ++phytium_dp_dpcd_get_adjust_request(struct phytium_dp_device *phytium_dp, uint8_t lane_count) ++{ ++ int ret = 0; ++ uint8_t link_status[DP_LINK_STATUS_SIZE]; ++ ++ ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, ++ link_status, DP_LINK_STATUS_SIZE); ++ if (ret < 0) { ++ DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); ++ goto failed; ++ } ++ phytium_get_adjust_train(phytium_dp, link_status, lane_count); ++ ++ return 0; ++failed: ++ return ret; ++} ++ ++void phytium_dp_dpcd_sink_dpms(struct phytium_dp_device *phytium_dp, int mode) ++{ ++ int ret, i; ++ ++ if (phytium_dp->dpcd[DP_DPCD_REV] < 0x11) ++ return; ++ if (mode != DRM_MODE_DPMS_ON) { ++ ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_SET_POWER, DP_SET_POWER_D3); ++ } else { ++ for (i = 0; i < 3; i++) { ++ ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); ++ if (ret == 1) ++ break; ++ msleep(20); ++ } ++ } ++ ++ if (ret != 1) ++ DRM_DEBUG_KMS("failed to %s sink power state\n", ++ mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); ++} ++ ++static bool phytium_dp_link_training_clock_recovery(struct phytium_dp_device *phytium_dp) ++{ ++ int ret; ++ unsigned char voltage, max_vswing_tries; ++ int voltage_tries; ++ ++ /* clear the test pattern */ ++ phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->link_lane_count, ++ PHYTIUM_PHY_TP_NONE, NULL, 0); ++ ++ /* config source and sink's link rate and lane count */ ++ phytium_dp_hw_set_link(phytium_dp, phytium_dp->link_lane_count, phytium_dp->link_rate); ++ ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->link_lane_count, ++ phytium_dp->link_rate); ++ if (ret < 0) { ++ DRM_NOTE("phytium_dp_dpcd_set_link failed(ret=%d)\n", ret); ++ return false; ++ } ++ ++ /* config source's voltage swing and pre-emphasis(103-106) */ ++ memset(phytium_dp->train_set, 0, sizeof(phytium_dp->train_set)); ++ phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, ++ phytium_dp->train_set[0]); ++ ++ /* config train pattern */ ++ phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); ++ ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); ++ if (ret < 0) { ++ DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); ++ return false; ++ } ++ ++ /* config sink's voltage swing and pre-emphasis(103-106) */ ++ ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); ++ if (ret < 0) { ++ DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); ++ return false; ++ } ++ ++ voltage_tries = 1; ++ max_vswing_tries = 0; ++ for (;;) { ++ unsigned char link_status[DP_LINK_STATUS_SIZE]; ++ ++ drm_dp_link_train_clock_recovery_delay(phytium_dp->dpcd); ++ ++ /* get link status 0x202-0x207 */ ++ ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, ++ link_status, DP_LINK_STATUS_SIZE); ++ if (ret < 0) { ++ DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); ++ return false; ++ } ++ ++ if (drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { ++ DRM_DEBUG_KMS("clock revorery ok\n"); ++ return true; ++ } ++ ++ if (voltage_tries == 5) { ++ DRM_DEBUG_KMS("Same voltage tried 5 times\n"); ++ return false; ++ } ++ ++ if (max_vswing_tries == 1) { ++ DRM_DEBUG_KMS("Max Voltage Swing reached\n"); ++ return false; ++ } ++ ++ voltage = phytium_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; ++ ++ /* config source and sink's voltage swing and pre-emphasis(103-106) */ ++ phytium_get_adjust_train(phytium_dp, link_status, phytium_dp->link_lane_count); ++ phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, ++ phytium_dp->train_set[0]); ++ ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); ++ if (ret < 0) { ++ DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); ++ return false; ++ } ++ ++ if ((phytium_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) ++ ++voltage_tries; ++ else ++ voltage_tries = 1; ++ ++ if (phytium_dp->train_set[0] & DP_TRAIN_MAX_SWING_REACHED) ++ ++max_vswing_tries; ++ ++ DRM_DEBUG_KMS("try train_set:0x%x voltage_tries:%d max_vswing_tries:%d\n", ++ phytium_dp->train_set[0], voltage_tries, max_vswing_tries); ++ } ++} ++ ++static unsigned int phytium_dp_get_training_pattern(struct phytium_dp_device *phytium_dp) ++{ ++ bool sink_tps3, sink_tps4; ++ ++ sink_tps4 = drm_dp_tps4_supported(phytium_dp->dpcd); ++ if (sink_tps4) ++ return DP_TRAINING_PATTERN_4; ++ else if (phytium_dp->link_rate == 810000) ++ DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n"); ++ ++ sink_tps3 = drm_dp_tps3_supported(phytium_dp->dpcd); ++ if (sink_tps3) ++ return DP_TRAINING_PATTERN_3; ++ else if (phytium_dp->link_rate >= 540000) ++ DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); ++ ++ return DP_TRAINING_PATTERN_2; ++} ++ ++static bool phytium_dp_link_training_channel_equalization(struct phytium_dp_device *phytium_dp) ++{ ++ unsigned int training_pattern; ++ int tries, ret; ++ unsigned char link_status[DP_LINK_STATUS_SIZE]; ++ bool channel_eq = false; ++ ++ /* config source and sink's voltage swing and pre-emphasis(103-106), from clock recovery */ ++ phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, ++ phytium_dp->train_set[0]); ++ ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); ++ if (ret < 0) { ++ DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); ++ return channel_eq; ++ } ++ ++ /* config source and sink's train_pattern x */ ++ training_pattern = phytium_dp_get_training_pattern(phytium_dp); ++ phytium_dp_hw_set_train_pattern(phytium_dp, training_pattern); ++ ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, training_pattern); ++ if (ret < 0) { ++ DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); ++ return channel_eq; ++ } ++ ++ for (tries = 0; tries < 5; tries++) { ++ drm_dp_link_train_channel_eq_delay(phytium_dp->dpcd); ++ ++ /* get link status 0x202-0x207 */ ++ ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, ++ link_status, DP_LINK_STATUS_SIZE); ++ if (ret < 0) { ++ DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); ++ break; ++ } ++ ++ /* Make sure clock is still ok */ ++ if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { ++ DRM_DEBUG_KMS("CR check failed, cannot continue channel equalization\n"); ++ break; ++ } ++ ++ if (drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { ++ channel_eq = true; ++ DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); ++ break; ++ } ++ ++ /* config source and sink's voltage swing and pre-emphasis(103-106) */ ++ phytium_get_adjust_train(phytium_dp, link_status, phytium_dp->link_lane_count); ++ phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, ++ phytium_dp->train_set[0]); ++ ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); ++ if (ret < 0) { ++ DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); ++ break; ++ } ++ } ++ ++ /* Try 5 times, else fail and try at lower BW */ ++ if (tries == 5) ++ DRM_DEBUG_KMS("Channel equalization failed 5 times\n"); ++ ++ return channel_eq; ++} ++ ++static void phytium_dp_train_retry_work_fn(struct work_struct *work) ++{ ++ struct phytium_dp_device *phytium_dp = train_retry_to_dp_device(work); ++ struct drm_connector *connector; ++ ++ connector = &phytium_dp->connector; ++ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); ++ mutex_lock(&connector->dev->mode_config.mutex); ++ drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); ++ mutex_unlock(&connector->dev->mode_config.mutex); ++ drm_kms_helper_hotplug_event(connector->dev); ++} ++ ++/* return index of rate in rates array, or -1 if not found */ ++static int phytium_dp_rate_index(const int *rates, int len, int rate) ++{ ++ int i; ++ ++ for (i = 0; i < len; i++) ++ if (rate == rates[i]) ++ return i; ++ ++ return -1; ++} ++ ++int phytium_dp_get_link_train_fallback_values(struct phytium_dp_device *phytium_dp) ++{ ++ int index, ret = 0; ++ ++ if (phytium_dp->is_edp) { ++ phytium_dp->train_retry_count++; ++ DRM_INFO("Retrying Link training for eDP(%d) with same parameters\n", ++ phytium_dp->port); ++ goto out; ++ } else { ++ index = phytium_dp_rate_index(phytium_dp->common_rates, ++ phytium_dp->num_common_rates, ++ phytium_dp->link_rate); ++ if (index > 0) { ++ phytium_dp->link_rate = phytium_dp->common_rates[index - 1]; ++ } else if (phytium_dp->link_lane_count > 1) { ++ phytium_dp->link_rate = phytium_dp->max_link_rate; ++ phytium_dp->link_lane_count = phytium_dp->link_lane_count >> 1; ++ } else { ++ phytium_dp->train_retry_count++; ++ phytium_dp->link_rate = phytium_dp->max_link_rate; ++ phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; ++ DRM_INFO("Retrying Link training for DP(%d) with maximal parameters\n", ++ phytium_dp->port); ++ ret = -1; ++ } ++ } ++ ++out: ++ return ret; ++} ++ ++static int ++phytium_dp_stop_link_train(struct phytium_dp_device *phytium_dp) ++{ ++ int ret; ++ ++ /* config source and sink's train_pattern x: DP_TRAINING_PATTERN_DISABLE */ ++ phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); ++ ++ ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); ++ if (ret < 0) { ++ DRM_NOTE("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int phytium_dp_start_link_train(struct phytium_dp_device *phytium_dp) ++{ ++ int ret = 0; ++ ++ phytium_dp_hw_disable_output(phytium_dp); ++ phytium_dp_hw_disable_input_source(phytium_dp); ++ phytium_dp_hw_disable_video(phytium_dp); ++ phytium_dp_hw_enable_input_source(phytium_dp); ++ phytium_dp_hw_enable_output(phytium_dp); ++ phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_OFF); ++ phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_ON); ++ ++ if (!phytium_dp_link_training_clock_recovery(phytium_dp)) ++ goto failure_handling; ++ ++ if (!phytium_dp_link_training_channel_equalization(phytium_dp)) ++ goto failure_handling; ++ ++ ret = phytium_dp_stop_link_train(phytium_dp); ++ if (ret < 0) { ++ DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); ++ goto out; ++ } ++ ++ if (phytium_dp->trigger_train_fail) { ++ phytium_dp->trigger_train_fail--; ++ goto failure_handling; ++ } ++ phytium_dp->train_retry_count = 0; ++ ++ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training Pass at Link Rate = %d, Lane count = %d\n", ++ phytium_dp->connector.base.id, ++ phytium_dp->connector.name, phytium_dp->link_rate, ++ phytium_dp->link_lane_count); ++ ++ return 0; ++ ++failure_handling: ++ DRM_INFO("[CONNECTOR:%d:%s] Link Training failed at Link Rate = %d, Lane count = %d", ++ phytium_dp->connector.base.id, ++ phytium_dp->connector.name, ++ phytium_dp->link_rate, phytium_dp->link_lane_count); ++ ++ ret = phytium_dp_stop_link_train(phytium_dp); ++ if (ret < 0) { ++ DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); ++ goto out; ++ } ++ ++ phytium_dp_get_link_train_fallback_values(phytium_dp); ++ if (phytium_dp->train_retry_count < 5) ++ schedule_work(&phytium_dp->train_retry_work); ++ else ++ DRM_ERROR("DP(%d) Link Training Unsuccessful, and stop Training\n", ++ phytium_dp->port); ++ ++out: ++ return -1; ++} ++ ++static bool phytium_dp_needs_link_retrain(struct phytium_dp_device *phytium_dp) ++{ ++ unsigned char link_status[DP_LINK_STATUS_SIZE]; ++ int ret = 0; ++ ++ /* get link status 0x202-0x207 */ ++ ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, ++ link_status, DP_LINK_STATUS_SIZE); ++ if (ret < 0) { ++ DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); ++ return true; ++ } ++ ++ if ((phytium_dp->link_rate == 0) || (phytium_dp->link_lane_count == 0)) { ++ DRM_DEBUG_KMS("link_rate(%d) or lane_count(%d) is invalid\n", ++ phytium_dp->link_rate, phytium_dp->link_lane_count); ++ return true; ++ } ++ ++ /* Make sure clock is still ok */ ++ if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { ++ DRM_DEBUG_KMS("Clock recovery check failed\n"); ++ return true; ++ } ++ ++ if (!drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { ++ DRM_DEBUG_KMS("Channel EQ check failed\n"); ++ return true; ++ } ++ ++ if (!phytium_dp_hw_output_is_enable(phytium_dp)) { ++ DRM_DEBUG_KMS("check DP output enable failed\n"); ++ return true; ++ } ++ return false; ++} ++ ++static bool ++phytium_dp_get_sink_irq(struct phytium_dp_device *phytium_dp, u8 *sink_irq_vector) ++{ ++ return drm_dp_dpcd_readb(&phytium_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, ++ sink_irq_vector) == 1; ++} ++ ++static uint8_t phytium_dp_autotest_phy_pattern(struct phytium_dp_device *phytium_dp) ++{ ++ union phytium_phy_tp phytium_phy_tp; ++ int ret; ++ unsigned char test_80_bit_pattern[ ++ (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - ++ DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0}; ++ unsigned char test_pattern; ++ ++ ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_TEST_PHY_PATTERN, ++ &phytium_phy_tp.raw, ++ sizeof(phytium_phy_tp)); ++ if (ret <= 0) { ++ DRM_DEBUG_KMS("Could not read DP_TEST_PHY_PATTERN\n"); ++ goto failed; ++ } ++ ++ test_pattern = phytium_phy_tp.bits.PATTERN; ++ ++ if (test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) { ++ ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0, ++ test_80_bit_pattern, ++ sizeof(test_80_bit_pattern)); ++ if (ret <= 0) { ++ DRM_DEBUG_KMS("Could not read DP_TEST_PHY_PATTERN\n"); ++ goto failed; ++ } ++ } ++ ++ /* config source and sink's link rate and link count */ ++ ret = phytium_dp_dpcd_get_tp_link(phytium_dp, &phytium_dp->compliance.test_lane_count, ++ &phytium_dp->compliance.test_link_rate); ++ if (ret < 0) { ++ DRM_ERROR("phytium_dp_dpcd_get_tp_link fail: ret:%d\n", ret); ++ goto failed; ++ } ++ ++ phytium_dp_hw_set_link(phytium_dp, phytium_dp->compliance.test_lane_count, ++ phytium_dp->compliance.test_link_rate); ++ ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->compliance.test_lane_count, ++ phytium_dp->compliance.test_link_rate); ++ if (ret < 0) { ++ DRM_ERROR("phytium_dp_dpcd_set_link fail: ret:%d\n", ret); ++ goto failed_dpcd_set_link; ++ } ++ ++ /* config source and sink's lane setting: voltage swing and pre-emphasis */ ++ ret = phytium_dp_dpcd_get_adjust_request(phytium_dp, ++ phytium_dp->compliance.test_lane_count); ++ if (ret < 0) { ++ DRM_ERROR("phytium_dp_dpcd_get_adjust_request fail: ret:%d\n", ret); ++ goto failed_dpcd_get_adjust_request; ++ } ++ phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->compliance.test_link_rate, ++ phytium_dp->train_set[0]); ++ ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); ++ if (ret < 0) { ++ DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); ++ goto failed_dpcd_set_lane_setting; ++ } ++ ++ /* config test pattern */ ++ phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->compliance.test_lane_count, ++ test_pattern, test_80_bit_pattern, ++ sizeof(test_80_bit_pattern)); ++ ret = phytium_dp_dpcd_set_test_pattern(phytium_dp, test_pattern); ++ if (ret < 0) { ++ DRM_ERROR("phytium_dp_dpcd_set_test_pattern fail: ret:%d\n", ret); ++ goto failed_dpcd_set_tp; ++ } ++ ++ return DP_TEST_ACK; ++ ++failed_dpcd_set_tp: ++ phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->compliance.test_lane_count, ++ PHYTIUM_PHY_TP_NONE, test_80_bit_pattern, ++ sizeof(test_80_bit_pattern)); ++failed_dpcd_set_link: ++failed_dpcd_set_lane_setting: ++failed_dpcd_get_adjust_request: ++failed: ++ return DP_TEST_NAK; ++} ++ ++static void phytium_dp_handle_test_request(struct phytium_dp_device *phytium_dp) ++{ ++ uint8_t response = DP_TEST_NAK; ++ uint8_t request = 0; ++ int status; ++ ++ status = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_REQUEST, &request); ++ if (status <= 0) { ++ DRM_DEBUG_KMS("Could not read test request from sink\n"); ++ goto update_status; ++ } ++ ++ switch (request) { ++ case DP_TEST_LINK_TRAINING: ++ case DP_TEST_LINK_VIDEO_PATTERN: ++ case DP_TEST_LINK_EDID_READ: ++ DRM_DEBUG_KMS("Not support test request '%02x'\n", request); ++ response = DP_TEST_NAK; ++ break; ++ case DP_TEST_LINK_PHY_TEST_PATTERN: ++ DRM_DEBUG_KMS("PHY_PATTERN test requested\n"); ++ response = phytium_dp_autotest_phy_pattern(phytium_dp); ++ break; ++ default: ++ DRM_DEBUG_KMS("Invalid test request '%02x'\n", request); ++ break; ++ } ++ ++update_status: ++ status = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TEST_RESPONSE, response); ++ if (status <= 0) ++ DRM_DEBUG_KMS("Could not write test response to sink\n"); ++ ++} ++ ++static void phytium_dp_unset_edid(struct drm_connector *connector) ++{ ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ ++ if (phytium_dp->detect_edid) ++ kfree(phytium_dp->detect_edid); ++ phytium_dp->detect_edid = NULL; ++ phytium_dp->has_audio = false; ++} ++ ++static enum drm_connector_status phytium_dp_set_edid(struct drm_connector *connector) ++{ ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ ++ phytium_dp_unset_edid(connector); ++ phytium_dp->detect_edid = drm_get_edid(connector, &phytium_dp->aux.ddc); ++ if (!phytium_dp->detect_edid) ++ return connector_status_disconnected; ++ ++ phytium_dp->has_audio = drm_detect_monitor_audio(phytium_dp->detect_edid); ++ ++ return connector_status_connected; ++} ++ ++static int phytium_dp_long_pulse(struct drm_connector *connector, bool hpd_raw_state) ++{ ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ enum drm_connector_status status = connector->status; ++ bool video_enable = false; ++ uint32_t index = 0; ++ ++ if (phytium_dp->is_edp) ++ status = connector_status_connected; ++ else if (hpd_raw_state) { ++ if (!phytium_dp_needs_link_retrain(phytium_dp)) { ++ status = connector_status_connected; ++ goto out; ++ } ++ } else { ++ status = connector_status_disconnected; ++ goto out; ++ } ++ ++ if (!phytium_dp->is_edp) { ++ status = phytium_dp_detect_dpcd(phytium_dp); ++ if (status == connector_status_disconnected) ++ goto out; ++ ++ index = phytium_dp->num_common_rates-1; ++ phytium_dp->max_link_rate = phytium_dp->common_rates[index]; ++ phytium_dp->max_link_lane_count = phytium_dp->common_max_lane_count; ++ phytium_dp->link_rate = phytium_dp->max_link_rate; ++ phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; ++ DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", ++ phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); ++ ++ video_enable = phytium_dp_hw_video_is_enable(phytium_dp); ++ phytium_dp_start_link_train(phytium_dp); ++ ++ status = phytium_dp_set_edid(connector); ++ if (status == connector_status_disconnected) ++ goto out; ++ ++ if (video_enable) { ++ mdelay(2); ++ phytium_dp_hw_enable_video(phytium_dp); ++ } ++ } ++ ++out: ++ return status; ++} ++ ++static int phytium_dp_short_pulse(struct drm_connector *connector) ++{ ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ enum drm_connector_status status = connector->status; ++ u8 sink_irq_vector = 0; ++ bool video_enable = false; ++ ++ /* handle the test pattern */ ++ if (phytium_dp_get_sink_irq(phytium_dp, &sink_irq_vector) && ++ sink_irq_vector != 0) { ++ drm_dp_dpcd_writeb(&phytium_dp->aux, ++ DP_DEVICE_SERVICE_IRQ_VECTOR, ++ sink_irq_vector); ++ if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) ++ phytium_dp_handle_test_request(phytium_dp); ++ if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) ++ DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); ++ } ++ if (!phytium_dp_needs_link_retrain(phytium_dp)) { ++ status = connector_status_connected; ++ goto out; ++ } ++ ++ video_enable = phytium_dp_hw_video_is_enable(phytium_dp); ++ phytium_dp_start_link_train(phytium_dp); ++ if (video_enable) { ++ mdelay(2); ++ phytium_dp_hw_enable_video(phytium_dp); ++ } ++ ++out: ++ return status; ++} ++ ++void phytium_dp_hpd_poll_handler(struct phytium_display_private *priv) ++{ ++ struct drm_device *dev = priv->dev; ++ struct drm_connector_list_iter conn_iter; ++ struct drm_connector *connector; ++ enum drm_connector_status old_status; ++ bool changed = false; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ DRM_DEBUG_KMS("running encoder hotplug poll functions\n"); ++ drm_connector_list_iter_begin(dev, &conn_iter); ++ drm_for_each_connector_iter(connector, &conn_iter) { ++ if (connector->force) ++ continue; ++ old_status = connector->status; ++ connector->status = drm_helper_probe_detect(connector, NULL, false); ++ if (old_status != connector->status) { ++ const char *old, *new; ++ ++ old = drm_get_connector_status_name(old_status); ++ new = drm_get_connector_status_name(connector->status); ++ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", ++ connector->base.id, ++ connector->name, ++ old, new); ++ changed = true; ++ } ++ } ++ drm_connector_list_iter_end(&conn_iter); ++ mutex_unlock(&dev->mode_config.mutex); ++ ++ if (changed) ++ drm_kms_helper_hotplug_event(dev); ++} ++ ++void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable) ++{ ++ struct phytium_dp_device *phytium_dp; ++ struct drm_encoder *encoder; ++ struct phytium_display_private *priv = dev->dev_private; ++ bool handler = false; ++ bool hpd_raw_state_old = false; ++ ++ /* We might have missed any hotplugs that happened, so polling and handler */ ++ if (enable) { ++ spin_lock(&priv->hotplug_irq_lock); ++ ++ drm_for_each_encoder(encoder, dev) { ++ phytium_dp = encoder_to_dp_device(encoder); ++ if (!phytium_dp->dp_hpd_state.hpd_irq_enable) { ++ hpd_raw_state_old = phytium_dp->dp_hpd_state.hpd_raw_state; ++ phytium_dp_hw_get_hpd_state(phytium_dp); ++ if (phytium_dp->dp_hpd_state.hpd_event_state ++ || phytium_dp->dp_hpd_state.hpd_irq_state ++ || (hpd_raw_state_old != phytium_dp->dp_hpd_state.hpd_raw_state)) { ++ handler = true; ++ } ++ } ++ } ++ spin_unlock(&priv->hotplug_irq_lock); ++ if (handler) ++ phytium_dp_hpd_poll_handler(priv); ++ } ++ ++ drm_for_each_encoder(encoder, dev) { ++ phytium_dp = encoder_to_dp_device(encoder); ++ phytium_dp_hw_hpd_irq_setup(phytium_dp, enable); ++ } ++} ++ ++void phytium_dp_hpd_work_func(struct work_struct *work) ++{ ++ struct phytium_display_private *priv = ++ container_of(work, struct phytium_display_private, hotplug_work); ++ struct drm_device *dev = priv->dev; ++ struct drm_connector_list_iter conn_iter; ++ struct drm_connector *connector; ++ enum drm_connector_status old_status; ++ bool changed = false; ++ ++ mutex_lock(&dev->mode_config.mutex); ++ DRM_DEBUG_KMS("running encoder hotplug work functions\n"); ++ drm_connector_list_iter_begin(dev, &conn_iter); ++ drm_for_each_connector_iter(connector, &conn_iter) { ++ if (connector->force) ++ continue; ++ old_status = connector->status; ++ connector->status = drm_helper_probe_detect(connector, NULL, false); ++ if (old_status != connector->status) { ++ const char *old, *new; ++ ++ old = drm_get_connector_status_name(old_status); ++ new = drm_get_connector_status_name(connector->status); ++ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", ++ connector->base.id, ++ connector->name, ++ old, new); ++ changed = true; ++ } ++ } ++ drm_connector_list_iter_end(&conn_iter); ++ mutex_unlock(&dev->mode_config.mutex); ++ ++ if (changed) ++ drm_kms_helper_hotplug_event(dev); ++ ++ phytium_dp_hpd_irq_setup(dev, true); ++} ++ ++irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv) ++{ ++ struct drm_encoder *encoder = NULL; ++ struct phytium_dp_device *phytium_dp = NULL; ++ struct drm_device *dev = priv->dev; ++ bool handler = false; ++ ++ spin_lock(&priv->hotplug_irq_lock); ++ ++ drm_for_each_encoder(encoder, dev) { ++ phytium_dp = encoder_to_dp_device(encoder); ++ if (phytium_dp->dp_hpd_state.hpd_irq_enable) { ++ phytium_dp_hw_get_hpd_state(phytium_dp); ++ if (phytium_dp->dp_hpd_state.hpd_event_state ++ || phytium_dp->dp_hpd_state.hpd_irq_state) { ++ handler = true; ++ } ++ } ++ } ++ spin_unlock(&priv->hotplug_irq_lock); ++ ++ if (handler) { ++ phytium_dp_hpd_irq_setup(dev, false); ++ schedule_work(&priv->hotplug_work); ++ return IRQ_HANDLED; ++ } ++ return IRQ_NONE; ++} ++ ++ ++static void phytium_dp_fast_link_train_detect(struct phytium_dp_device *phytium_dp) ++{ ++ phytium_dp->fast_train_support = !!(phytium_dp->dpcd[DP_MAX_DOWNSPREAD] ++ & DP_NO_AUX_HANDSHAKE_LINK_TRAINING); ++ DRM_DEBUG_KMS("fast link training %s\n", ++ phytium_dp->fast_train_support ? "supported" : "unsupported"); ++} ++ ++bool phytium_dp_fast_link_train(struct phytium_dp_device *phytium_dp) ++{ ++ int ret = 0; ++ unsigned int training_pattern; ++ ++ /* clear the test pattern */ ++ phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->link_lane_count, ++ PHYTIUM_PHY_TP_NONE, NULL, 0); ++ ++ /* config source and sink's link rate and lane count */ ++ phytium_dp_hw_set_link(phytium_dp, phytium_dp->link_lane_count, phytium_dp->link_rate); ++ ++ /* config source and sink's voltage swing and pre-emphasis(103-106) */ ++ phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, ++ phytium_dp->train_set[0]); ++ ++ /* config train pattern */ ++ phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); ++ usleep_range(500, 600); ++ ++ training_pattern = phytium_dp_get_training_pattern(phytium_dp); ++ phytium_dp_hw_set_train_pattern(phytium_dp, training_pattern); ++ usleep_range(500, 600); ++ ++ phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); ++ ++ if (dc_fast_training_check) { ++ unsigned char link_status[DP_LINK_STATUS_SIZE]; ++ ++ ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, ++ link_status, DP_LINK_STATUS_SIZE); ++ if (ret < 0) { ++ DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); ++ return false; ++ } ++ ++ if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { ++ DRM_DEBUG_KMS("check clock recovery failed\n"); ++ return false; ++ } ++ ++ if (!drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { ++ DRM_DEBUG_KMS("check channel equalization failed\n"); ++ return false; ++ } ++ } ++ ++ return true; ++} ++ ++static enum drm_connector_status ++phytium_connector_detect(struct drm_connector *connector, bool force) ++{ ++ enum drm_connector_status status = connector->status; ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ bool hpd_event_state, hpd_irq_state, hpd_raw_state; ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ bool plugged = true; ++ ++ spin_lock_irq(&priv->hotplug_irq_lock); ++ hpd_event_state = phytium_dp->dp_hpd_state.hpd_event_state; ++ hpd_irq_state = phytium_dp->dp_hpd_state.hpd_irq_state; ++ hpd_raw_state = phytium_dp->dp_hpd_state.hpd_raw_state; ++ phytium_dp->dp_hpd_state.hpd_event_state = false; ++ phytium_dp->dp_hpd_state.hpd_irq_state = false; ++ spin_unlock_irq(&priv->hotplug_irq_lock); ++ ++ if (hpd_event_state) ++ status = phytium_dp_long_pulse(connector, hpd_raw_state); ++ ++ if (hpd_irq_state) ++ status = phytium_dp_short_pulse(connector); ++ ++ if (status == connector_status_unknown) ++ status = connector_status_disconnected; ++ ++ if ((!phytium_dp->is_edp) && (!hpd_raw_state)) ++ status = connector_status_disconnected; ++ ++ if (connector->status != status) { ++ if ((status == connector_status_connected) && phytium_dp->has_audio) ++ plugged = true; ++ else ++ plugged = false; ++ ++ handle_plugged_change(phytium_dp, plugged); ++ } ++ ++ return status; ++} ++ ++static void ++phytium_connector_destroy(struct drm_connector *connector) ++{ ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ ++ drm_connector_cleanup(connector); ++ kfree(phytium_dp); ++} ++ ++static int ++phytium_dp_connector_register(struct drm_connector *connector) ++{ ++ int ret; ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ ++ phytium_dp_aux_init(phytium_dp); ++ if (phytium_dp->is_edp) { ++ phytium_edp_init_connector(phytium_dp); ++ ret = phytium_edp_backlight_device_register(phytium_dp); ++ if (ret) ++ DRM_ERROR("failed to register port(%d) backlight device(ret=%d)\n", ++ phytium_dp->port, ret); ++ } ++ ++ ret = phytium_debugfs_connector_add(connector); ++ if (ret) ++ DRM_ERROR("failed to register phytium connector debugfs(ret=%d)\n", ret); ++ ++ return 0; ++} ++ ++static void ++phytium_dp_connector_unregister(struct drm_connector *connector) ++{ ++ struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); ++ ++ if (phytium_dp->is_edp) { ++ phytium_edp_backlight_device_unregister(phytium_dp); ++ phytium_edp_panel_poweroff(phytium_dp); ++ } ++ drm_dp_aux_unregister(&phytium_dp->aux); ++} ++ ++static const struct drm_connector_funcs phytium_connector_funcs = { ++ .dpms = drm_helper_connector_dpms, ++ .detect = phytium_connector_detect, ++ .fill_modes = drm_helper_probe_single_connector_modes, ++ .destroy = phytium_connector_destroy, ++ .reset = drm_atomic_helper_connector_reset, ++ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, ++ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, ++ .late_register = phytium_dp_connector_register, ++ .early_unregister = phytium_dp_connector_unregister, ++}; ++ ++static void phytium_dp_encoder_mode_set(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted) ++{ ++ struct phytium_dp_device *dp = encoder_to_dp_device(encoder); ++ ++ drm_mode_copy(&dp->mode, adjusted); ++} ++ ++static void phytium_edp_panel_poweron(struct phytium_dp_device *phytium_dp) ++{ ++ phytium_panel_poweron(&phytium_dp->panel); ++} ++ ++static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp) ++{ ++ phytium_panel_poweroff(&phytium_dp->panel); ++} ++ ++static void phytium_edp_backlight_on(struct phytium_dp_device *phytium_dp) ++{ ++ phytium_panel_enable_backlight(&phytium_dp->panel); ++} ++ ++static void phytium_edp_backlight_off(struct phytium_dp_device *phytium_dp) ++{ ++ phytium_panel_disable_backlight(&phytium_dp->panel); ++} ++ ++static void phytium_encoder_disable(struct drm_encoder *encoder) ++{ ++ struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); ++ ++ if (phytium_dp->is_edp) ++ phytium_edp_backlight_off(phytium_dp); ++ ++ phytium_dp_hw_disable_video(phytium_dp); ++ ++ mdelay(50); ++ ++ if (phytium_dp->is_edp) ++ phytium_edp_panel_poweroff(phytium_dp); ++} ++ ++void phytium_dp_adjust_link_train_parameter(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_display_info *display_info = &phytium_dp->connector.display_info; ++ unsigned long link_bw, date_rate = 0, bs_limit, bs_request; ++ int rate = 0; ++ ++ bs_request = phytium_dp->mode.crtc_htotal/(phytium_dp->mode.crtc_clock/1000); ++ date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; ++ ++ for (;;) { ++ bs_limit = 8192 / (phytium_dp->link_rate/1000); ++ link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; ++ rate = 10 * date_rate / link_bw; ++ DRM_DEBUG_KMS("adjust link rate(%d), lane count(%d)\n", ++ phytium_dp->link_rate, phytium_dp->link_lane_count); ++ DRM_DEBUG_KMS("for crtc_clock(%d) bs_request(%ld) bs_limit(%ld) rate(%d)\n", ++ phytium_dp->mode.crtc_clock, bs_request, bs_limit, rate); ++ if ((link_dynamic_adjust && (bs_request < bs_limit) && rate < 10) || ++ ((!link_dynamic_adjust) && (rate < 10))) ++ break; ++ phytium_dp_get_link_train_fallback_values(phytium_dp); ++ } ++ ++ DRM_DEBUG_KMS("Try link training at Link Rate = %d, Lane count = %d\n", ++ phytium_dp->link_rate, phytium_dp->link_lane_count); ++} ++ ++static void phytium_encoder_enable(struct drm_encoder *encoder) ++{ ++ struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); ++ int ret = 0; ++ ++ phytium_dp_hw_disable_video(phytium_dp); ++ ++ if (phytium_dp->is_edp) { ++ phytium_edp_panel_poweron(phytium_dp); ++ if (phytium_dp->fast_train_support) ++ phytium_dp_fast_link_train(phytium_dp); ++ else ++ ret = phytium_dp_start_link_train(phytium_dp); ++ mdelay(2); ++ phytium_dp_fast_link_train_detect(phytium_dp); ++ } else { ++ phytium_dp_adjust_link_train_parameter(phytium_dp); ++ ret = phytium_dp_start_link_train(phytium_dp); ++ mdelay(2); ++ } ++ ++ phytium_dp_hw_config_video(phytium_dp); ++ if (ret == 0) { ++ phytium_dp_hw_enable_video(phytium_dp); ++ if (phytium_dp->has_audio) ++ phytium_dp_hw_enable_audio(phytium_dp); ++ } ++ ++ if (phytium_dp->is_edp) { ++ phytium_edp_backlight_on(phytium_dp); ++ } ++} ++ ++static const struct drm_encoder_helper_funcs phytium_encoder_helper_funcs = { ++ .mode_set = phytium_dp_encoder_mode_set, ++ .disable = phytium_encoder_disable, ++ .enable = phytium_encoder_enable, ++}; ++ ++static const struct drm_encoder_funcs phytium_encoder_funcs = { ++ .destroy = drm_encoder_cleanup, ++}; ++ ++static const struct dp_audio_n_m phytium_dp_audio_n_m[] = { ++ { 32000, 162000, 1024, 10125 }, ++ { 44100, 162000, 784, 5625 }, ++ { 48000, 162000, 512, 3375 }, ++ { 64000, 162000, 2048, 10125 }, ++ { 88200, 162000, 1568, 5625 }, ++ { 96000, 162000, 1024, 3375 }, ++ { 128000, 162000, 4096, 10125 }, ++ { 176400, 162000, 3136, 5625 }, ++ { 192000, 162000, 2048, 3375 }, ++ { 32000, 270000, 1024, 16875 }, ++ { 44100, 270000, 784, 9375 }, ++ { 48000, 270000, 512, 5625 }, ++ { 64000, 270000, 2048, 16875 }, ++ { 88200, 270000, 1568, 9375 }, ++ { 96000, 270000, 1024, 5625 }, ++ { 128000, 270000, 4096, 16875 }, ++ { 176400, 270000, 3136, 9375 }, ++ { 192000, 270000, 2048, 5625 }, ++ { 32000, 540000, 1024, 33750 }, ++ { 44100, 540000, 784, 18750 }, ++ { 48000, 540000, 512, 11250 }, ++ { 64000, 540000, 2048, 33750 }, ++ { 88200, 540000, 1568, 18750 }, ++ { 96000, 540000, 1024, 11250 }, ++ { 128000, 540000, 4096, 33750 }, ++ { 176400, 540000, 3136, 18750 }, ++ { 192000, 540000, 2048, 11250 }, ++ { 32000, 810000, 1024, 50625 }, ++ { 44100, 810000, 784, 28125 }, ++ { 48000, 810000, 512, 16875 }, ++ { 64000, 810000, 2048, 50625 }, ++ { 88200, 810000, 1568, 28125 }, ++ { 96000, 810000, 1024, 16875 }, ++ { 128000, 810000, 4096, 50625 }, ++ { 176400, 810000, 3136, 28125 }, ++ { 192000, 810000, 2048, 16875 }, ++}; ++ ++static int phytium_dp_audio_get_eld(struct device *dev, void *data, u8 *buf, size_t len) ++{ ++ struct phytium_dp_device *phytium_dp = data; ++ ++ memcpy(buf, phytium_dp->connector.eld, min(sizeof(phytium_dp->connector.eld), len)); ++ ++ return 0; ++} ++ ++static int phytium_dp_audio_digital_mute(struct device *dev, void *data, bool enable) ++{ ++ struct phytium_dp_device *phytium_dp = data; ++ ++ phytium_dp_hw_audio_digital_mute(phytium_dp, enable); ++ ++ return 0; ++} ++ ++const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(phytium_dp_audio_n_m); i++) { ++ if (sample_rate == phytium_dp_audio_n_m[i].sample_rate ++ && link_rate == phytium_dp_audio_n_m[i].link_rate) ++ return &phytium_dp_audio_n_m[i]; ++ } ++ ++ return NULL; ++} ++ ++static int phytium_dp_audio_hw_params(struct device *dev, void *data, ++ struct hdmi_codec_daifmt *daifmt, ++ struct hdmi_codec_params *params) ++{ ++ struct phytium_dp_device *phytium_dp = data; ++ int ret = 0; ++ struct audio_info audio_info = { ++ .sample_width = params->sample_width, ++ .sample_rate = params->sample_rate, ++ .channels = params->channels, ++ }; ++ ++ if (daifmt->fmt != HDMI_I2S) { ++ DRM_ERROR("invalid audio format %d\n", daifmt->fmt); ++ ret = -EINVAL; ++ goto failed; ++ } ++ ++ ret = phytium_dp_hw_audio_hw_params(phytium_dp, audio_info); ++ ++failed: ++ return ret; ++} ++ ++static void phytium_dp_audio_shutdown(struct device *dev, void *data) ++{ ++ struct phytium_dp_device *phytium_dp = data; ++ ++ phytium_dp_hw_audio_shutdown(phytium_dp); ++} ++ ++static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged) ++{ ++ if (phytium_dp->plugged_cb && phytium_dp->codec_dev) ++ phytium_dp->plugged_cb(phytium_dp->codec_dev, plugged); ++} ++ ++static int phytium_dp_audio_hook_plugged_cb(struct device *dev, void *data, ++ hdmi_codec_plugged_cb fn, ++ struct device *codec_dev) ++{ ++ struct phytium_dp_device *phytium_dp = data; ++ bool plugged; ++ ++ phytium_dp->plugged_cb = fn; ++ phytium_dp->codec_dev = codec_dev; ++ ++ if ((phytium_dp->connector.status == connector_status_connected) && phytium_dp->has_audio) ++ plugged = true; ++ else ++ plugged = false; ++ ++ handle_plugged_change(phytium_dp, plugged); ++ return 0; ++} ++ ++ ++static const struct hdmi_codec_ops phytium_audio_codec_ops = { ++ .hw_params = phytium_dp_audio_hw_params, ++ .audio_shutdown = phytium_dp_audio_shutdown, ++ .digital_mute = phytium_dp_audio_digital_mute, ++ .get_eld = phytium_dp_audio_get_eld, ++ .hook_plugged_cb = phytium_dp_audio_hook_plugged_cb, ++}; ++ ++static int phytium_dp_audio_codec_init(struct phytium_dp_device *phytium_dp) ++{ ++ struct device *dev = phytium_dp->dev->dev; ++ struct hdmi_codec_pdata codec_data = { ++ .i2s = 1, ++ .spdif = 0, ++ .ops = &phytium_audio_codec_ops, ++ .max_i2s_channels = 2, ++ .data = phytium_dp, ++ }; ++ ++ phytium_dp->audio_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, ++ PLATFORM_DEVID_AUTO, ++ &codec_data, sizeof(codec_data)); ++ ++ return PTR_ERR_OR_ZERO(phytium_dp->audio_pdev); ++} ++ ++static long phytium_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) ++{ ++ struct phytium_dp_device *phytium_dp = container_of(aux, struct phytium_dp_device, aux); ++ long ret = 0; ++ ++ DRM_DEBUG_KMS("msg->size: 0x%lx\n", msg->size); ++ ++ if (WARN_ON(msg->size > 16)) ++ return -E2BIG; ++ ++ switch (msg->request & ~DP_AUX_I2C_MOT) { ++ case DP_AUX_NATIVE_WRITE: ++ case DP_AUX_I2C_WRITE: ++ case DP_AUX_I2C_WRITE_STATUS_UPDATE: ++ ret = phytium_dp_hw_aux_transfer_write(phytium_dp, msg); ++ DRM_DEBUG_KMS("aux write reply:0x%x ret:0x%lx\n", msg->reply, ret); ++ break; ++ case DP_AUX_NATIVE_READ: ++ case DP_AUX_I2C_READ: ++ ret = phytium_dp_hw_aux_transfer_read(phytium_dp, msg); ++ DRM_DEBUG_KMS("aux read ret:0x%lx\n", ret); ++ break; ++ default: ++ ret = -EINVAL; ++ break; ++ } ++ ++ return ret; ++} ++ ++static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp) ++{ ++ drm_dp_aux_init(&phytium_dp->aux); ++ phytium_dp->aux.name = kasprintf(GFP_KERNEL, "dp-%d", phytium_dp->port); ++ phytium_dp->aux.transfer = phytium_dp_aux_transfer; ++} ++ ++int phytium_get_encoder_crtc_mask(struct phytium_dp_device *phytium_dp, int port) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int i, mask = 0; ++ ++ for_each_pipe_masked(priv, i) { ++ if (i != port) ++ mask++; ++ else ++ break; ++ } ++ ++ return BIT(mask); ++} ++ ++static bool phytium_dp_is_edp(struct phytium_dp_device *phytium_dp, int port) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ ++ if (priv->info.edp_mask & BIT(port)) ++ return true; ++ else ++ return false; ++} ++ ++static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp) ++{ ++ enum drm_connector_status status; ++ struct drm_connector *connector = &phytium_dp->connector; ++ ++ phytium_edp_panel_poweron(phytium_dp); ++ ++ status = phytium_dp_detect_dpcd(phytium_dp); ++ if (status == connector_status_disconnected) ++ return false; ++ ++ status = phytium_dp_set_edid(connector); ++ if (status == connector_status_disconnected) ++ return false; ++ ++ connector->status = status; ++ phytium_dp->max_link_rate = phytium_dp->common_rates[phytium_dp->num_common_rates-1]; ++ phytium_dp->max_link_lane_count = phytium_dp->common_max_lane_count; ++ phytium_dp->link_rate = phytium_dp->max_link_rate; ++ phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; ++ DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", ++ phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); ++ ++ return true; ++} ++ ++int phytium_dp_resume(struct drm_device *drm_dev) ++{ ++ struct phytium_dp_device *phytium_dp; ++ struct drm_encoder *encoder; ++ int ret = 0; ++ ++ drm_for_each_encoder(encoder, drm_dev) { ++ phytium_dp = encoder_to_dp_device(encoder); ++ if (phytium_dp->is_edp) { ++ phytium_edp_backlight_off(phytium_dp); ++ phytium_edp_panel_poweroff(phytium_dp); ++ } ++ ret = phytium_dp_hw_init(phytium_dp); ++ if (ret) { ++ DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); ++ return -EIO; ++ } ++ } ++ ++ return 0; ++} ++ ++int phytium_dp_init(struct drm_device *dev, int port) ++{ ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_dp_device *phytium_dp = NULL; ++ int ret, type; ++ ++ DRM_DEBUG_KMS("%s: port %d\n", __func__, port); ++ phytium_dp = kzalloc(sizeof(*phytium_dp), GFP_KERNEL); ++ if (!phytium_dp) { ++ ret = -ENOMEM; ++ goto failed_malloc_dp; ++ } ++ ++ phytium_dp->dev = dev; ++ phytium_dp->port = port; ++ phytium_dp_hw_set_source_rate_and_lane_count(phytium_dp); ++ ++ if (IS_X100(priv)) { ++ x100_dp_func_register(phytium_dp); ++ priv->dp_reg_base[port] = X100_DP_BASE(port); ++ priv->phy_access_base[port] = X100_PHY_ACCESS_BASE(port); ++ } ++ ++ if (phytium_dp_is_edp(phytium_dp, port)) { ++ phytium_dp->is_edp = true; ++ type = DRM_MODE_CONNECTOR_eDP; ++ phytium_dp_panel_init_backlight_funcs(phytium_dp); ++ phytium_edp_backlight_off(phytium_dp); ++ phytium_edp_panel_poweroff(phytium_dp); ++ } else { ++ phytium_dp->is_edp = false; ++ type = DRM_MODE_CONNECTOR_DisplayPort; ++ } ++ ++ ret = phytium_dp_hw_init(phytium_dp); ++ if (ret) { ++ DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); ++ goto failed_init_dp; ++ } ++ ++ ret = drm_encoder_init(dev, &phytium_dp->encoder, ++ &phytium_encoder_funcs, ++ DRM_MODE_ENCODER_TMDS, "DP %d", port); ++ if (ret) { ++ DRM_ERROR("failed to initialize encoder with drm\n"); ++ goto failed_encoder_init; ++ } ++ drm_encoder_helper_add(&phytium_dp->encoder, &phytium_encoder_helper_funcs); ++ phytium_dp->encoder.possible_crtcs = phytium_get_encoder_crtc_mask(phytium_dp, port); ++ ++ phytium_dp->connector.dpms = DRM_MODE_DPMS_OFF; ++ phytium_dp->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; ++ ret = drm_connector_init(dev, &phytium_dp->connector, &phytium_connector_funcs, ++ type); ++ if (ret) { ++ DRM_ERROR("failed to initialize connector with drm\n"); ++ goto failed_connector_init; ++ } ++ drm_connector_helper_add(&phytium_dp->connector, &phytium_connector_helper_funcs); ++ drm_connector_attach_encoder(&phytium_dp->connector, &phytium_dp->encoder); ++ ++ ret = phytium_dp_audio_codec_init(phytium_dp); ++ if (ret) { ++ DRM_ERROR("failed to initialize audio codec\n"); ++ goto failed_connector_init; ++ } ++ ++ phytium_dp->train_retry_count = 0; ++ INIT_WORK(&phytium_dp->train_retry_work, phytium_dp_train_retry_work_fn); ++ drm_connector_register(&phytium_dp->connector); ++ ++ return 0; ++failed_connector_init: ++failed_encoder_init: ++failed_init_dp: ++ kfree(phytium_dp); ++failed_malloc_dp: ++ return ret; ++} +diff --git a/drivers/gpu/drm/phytium/phytium_dp.h b/drivers/gpu/drm/phytium/phytium_dp.h +new file mode 100644 +index 000000000000..e1cf6c8483ad +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_dp.h +@@ -0,0 +1,153 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef __PHYTIUM_DP_H__ ++#define __PHYTIUM_DP_H__ ++ ++#include ++#include ++#include ++ ++struct phytium_dp_device; ++ ++#include "phytium_panel.h" ++ ++struct audio_info { ++ int sample_rate; ++ int channels; ++ int sample_width; ++}; ++ ++struct dp_audio_n_m { ++ int sample_rate; ++ int link_rate; ++ u16 m; ++ u16 n; ++}; ++ ++struct phytium_dp_compliance { ++ unsigned long test_type; ++ uint32_t test_link_rate; ++ u8 test_lane_count; ++ bool test_active; ++ u8 reserve[2]; ++}; ++ ++struct phytium_dp_func { ++ int (*dp_hw_reset)(struct phytium_dp_device *phytium_dp); ++ bool (*dp_hw_spread_is_enable)(struct phytium_dp_device *phytium_dp); ++ int (*dp_hw_set_backlight)(struct phytium_dp_device *phytium_dp, uint32_t level); ++ uint32_t (*dp_hw_get_backlight)(struct phytium_dp_device *phytium_dp); ++ void (*dp_hw_disable_backlight)(struct phytium_dp_device *phytium_dp); ++ void (*dp_hw_enable_backlight)(struct phytium_dp_device *phytium_dp); ++ void (*dp_hw_poweroff_panel)(struct phytium_dp_device *phytium_dp); ++ void (*dp_hw_poweron_panel)(struct phytium_dp_device *phytium_dp); ++ int (*dp_hw_init_phy)(struct phytium_dp_device *phytium_dp); ++ void (*dp_hw_set_phy_lane_setting)(struct phytium_dp_device *phytium_dp, ++ uint32_t link_rate, uint8_t train_set); ++ int (*dp_hw_set_phy_lane_and_rate)(struct phytium_dp_device *phytium_dp, ++ uint8_t link_lane_count, ++ uint32_t link_rate); ++}; ++ ++struct phytium_dp_hpd_state { ++ bool hpd_event_state; ++ bool hpd_irq_state; ++ bool hpd_raw_state; ++ bool hpd_irq_enable; ++}; ++ ++struct phytium_dp_device { ++ struct drm_device *dev; ++ struct drm_encoder encoder; ++ struct drm_connector connector; ++ int port; ++ struct drm_display_mode mode; ++ bool link_trained; ++ bool detect_done; ++ bool is_edp; ++ bool reserve0; ++ struct drm_dp_aux aux; ++ unsigned char dpcd[DP_RECEIVER_CAP_SIZE]; ++ uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; ++ unsigned char downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; ++ unsigned char sink_count; ++ ++ int *source_rates; ++ int num_source_rates; ++ int sink_rates[DP_MAX_SUPPORTED_RATES]; ++ int num_sink_rates; ++ int common_rates[DP_MAX_SUPPORTED_RATES]; ++ int num_common_rates; ++ ++ int source_max_lane_count; ++ int sink_max_lane_count; ++ int common_max_lane_count; ++ ++ int max_link_rate; ++ int max_link_lane_count; ++ int link_rate; ++ int link_lane_count; ++ struct work_struct train_retry_work; ++ int train_retry_count; ++ uint32_t trigger_train_fail; ++ ++ unsigned char train_set[4]; ++ struct edid *detect_edid; ++ bool has_audio; ++ bool fast_train_support; ++ bool hw_spread_enable; ++ bool reserve[1]; ++ struct platform_device *audio_pdev; ++ struct audio_info audio_info; ++ hdmi_codec_plugged_cb plugged_cb; ++ struct device *codec_dev; ++ struct phytium_dp_compliance compliance; ++ struct phytium_dp_func *funcs; ++ struct phytium_dp_hpd_state dp_hpd_state; ++ ++ struct phytium_panel panel; ++ struct drm_display_mode native_mode; ++}; ++ ++union phytium_phy_tp { ++ struct { ++ /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1 ++ * and 3 bits for DP1.2. ++ */ ++ uint8_t PATTERN :3; ++ uint8_t RESERVED :5; ++ } bits; ++ uint8_t raw; ++}; ++ ++/* PHY test patterns ++ * The order of test patterns follows DPCD register PHY_TEST_PATTERN (0x248) ++ */ ++enum phytium_dpcd_phy_tp { ++ PHYTIUM_PHY_TP_NONE = 0, ++ PHYTIUM_PHY_TP_D10_2, ++ PHYTIUM_PHY_TP_SYMBOL_ERROR, ++ PHYTIUM_PHY_TP_PRBS7, ++ PHYTIUM_PHY_TP_80BIT_CUSTOM, ++ PHYTIUM_PHY_TP_CP2520_1, ++ PHYTIUM_PHY_TP_CP2520_2, ++ PHYTIUM_PHY_TP_CP2520_3, ++}; ++#define encoder_to_dp_device(x) container_of(x, struct phytium_dp_device, encoder) ++#define connector_to_dp_device(x) container_of(x, struct phytium_dp_device, connector) ++#define panel_to_dp_device(x) container_of(x, struct phytium_dp_device, panel) ++#define train_retry_to_dp_device(x) container_of(x, struct phytium_dp_device, train_retry_work) ++void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data); ++uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address); ++ ++int phytium_dp_init(struct drm_device *dev, int pipe); ++int phytium_dp_resume(struct drm_device *drm_dev); ++void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable); ++irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv); ++void phytium_dp_hpd_work_func(struct work_struct *work); ++const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate); ++#endif /* __PHYTIUM_DP_H__ */ +diff --git a/drivers/gpu/drm/phytium/phytium_fb.c b/drivers/gpu/drm/phytium/phytium_fb.c +new file mode 100644 +index 000000000000..fecca2cd3b8c +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_fb.c +@@ -0,0 +1,123 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#include ++#include ++#include "phytium_display_drv.h" ++#include "phytium_fb.h" ++#include "phytium_gem.h" ++ ++static int ++phytium_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, ++ unsigned int *handle) ++{ ++ struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); ++ ++ return drm_gem_handle_create(file_priv, &phytium_fb->phytium_gem_obj[0]->base, handle); ++} ++ ++static void phytium_fb_destroy(struct drm_framebuffer *fb) ++{ ++ struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); ++ int i, num_planes; ++ struct drm_gem_object *obj = NULL; ++ ++ num_planes = drm_format_num_planes(fb->format->format); ++ ++ for (i = 0; i < num_planes; i++) { ++ obj = &phytium_fb->phytium_gem_obj[i]->base; ++ if (obj) ++ drm_gem_object_unreference_unlocked(obj); ++ } ++ ++ drm_framebuffer_cleanup(fb); ++ kfree(phytium_fb); ++} ++ ++static struct drm_framebuffer_funcs viv_fb_funcs = { ++ .create_handle = phytium_fb_create_handle, ++ .destroy = phytium_fb_destroy, ++}; ++ ++struct phytium_framebuffer * ++phytium_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, ++ struct phytium_gem_object **phytium_gem_obj, unsigned int num_planes) ++{ ++ struct phytium_framebuffer *phytium_fb; ++ int ret = 0, i; ++ ++ phytium_fb = kzalloc(sizeof(*phytium_fb), GFP_KERNEL); ++ if (!phytium_fb) ++ return ERR_PTR(-ENOMEM); ++ ++ drm_helper_mode_fill_fb_struct(dev, &phytium_fb->base, mode_cmd); ++ ++ ret = drm_framebuffer_init(dev, &phytium_fb->base, &viv_fb_funcs); ++ ++ if (ret) { ++ DRM_ERROR("Failed to initialize framebuffer: %d\n", ret); ++ kfree(phytium_fb); ++ return ERR_PTR(ret); ++ } ++ ++ for (i = 0; i < num_planes; i++) ++ phytium_fb->phytium_gem_obj[i] = phytium_gem_obj[i]; ++ ++ return phytium_fb; ++} ++ ++struct drm_framebuffer * ++phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, ++ const struct drm_mode_fb_cmd2 *mode_cmd) ++{ ++ int ret = 0, i, num_planes; ++ struct drm_gem_object *obj; ++ unsigned int hsub, vsub, size; ++ struct phytium_gem_object *phytium_gem_obj[PHYTIUM_FORMAT_MAX_PLANE] = {0}; ++ struct phytium_framebuffer *phytium_fb; ++ struct phytium_display_private *priv = dev->dev_private; ++ ++ hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); ++ vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); ++ num_planes = min(drm_format_num_planes(mode_cmd->pixel_format), PHYTIUM_FORMAT_MAX_PLANE); ++ for (i = 0; i < num_planes; i++) { ++ unsigned int height = mode_cmd->height / (i ? vsub : 1); ++ ++ size = height * mode_cmd->pitches[i] + mode_cmd->offsets[i]; ++ obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); ++ if (!obj) { ++ DRM_ERROR("Failed to lookup GEM object\n"); ++ ret = -ENXIO; ++ goto error; ++ } ++ ++ if (obj->size < size) { ++ drm_gem_object_unreference_unlocked(obj); ++ ret = -EINVAL; ++ goto error; ++ } ++ ++ phytium_gem_obj[i] = to_phytium_gem_obj(obj); ++ ++ ret = priv->dc_hw_fb_format_check(mode_cmd, i); ++ if (ret < 0) ++ goto error; ++ } ++ ++ phytium_fb = phytium_fb_alloc(dev, mode_cmd, phytium_gem_obj, i); ++ if (IS_ERR(phytium_fb)) { ++ DRM_DEBUG_KMS("phytium_fb_alloc failed\n"); ++ ret = PTR_ERR(phytium_fb); ++ goto error; ++ } ++ ++ return &phytium_fb->base; ++error: ++ for (i--; i >= 0; i--) ++ drm_gem_object_unreference_unlocked(&phytium_gem_obj[i]->base); ++ ++ return ERR_PTR(ret); ++} +diff --git a/drivers/gpu/drm/phytium/phytium_fb.h b/drivers/gpu/drm/phytium/phytium_fb.h +new file mode 100644 +index 000000000000..c11c6c009b13 +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_fb.h +@@ -0,0 +1,24 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef __PHYTIUM_FB_H__ ++#define __PHYTIUM_FB_H__ ++ ++struct phytium_framebuffer { ++ struct drm_framebuffer base; ++ struct phytium_gem_object *phytium_gem_obj[PHYTIUM_FORMAT_MAX_PLANE]; ++}; ++ ++#define to_phytium_framebuffer(fb) container_of(fb, struct phytium_framebuffer, base) ++ ++struct phytium_framebuffer *phytium_fb_alloc(struct drm_device *dev, ++ const struct drm_mode_fb_cmd2 *mode_cmd, ++ struct phytium_gem_object **phytium_gem_obj, ++ unsigned int num_planes); ++ ++struct drm_framebuffer *phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, ++ const struct drm_mode_fb_cmd2 *mode_cmd); ++#endif /* __PHYTIUM_FB_H__ */ +diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.c b/drivers/gpu/drm/phytium/phytium_fbdev.c +new file mode 100644 +index 000000000000..8eb16b3d7c70 +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_fbdev.c +@@ -0,0 +1,158 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include "phytium_display_drv.h" ++#include "phytium_gem.h" ++#include "phytium_fb.h" ++ ++ ++#define PHYTIUM_MAX_CONNECTOR 1 ++#define helper_to_drm_private(x) container_of(x, struct phytium_display_private, fbdev_helper) ++ ++static int phytium_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) ++{ ++ struct drm_fb_helper *helper = info->par; ++ struct phytium_display_private *priv = helper_to_drm_private(helper); ++ ++ return phytium_gem_mmap_obj(&priv->fbdev_phytium_gem->base, vma); ++} ++ ++static struct fb_ops phytium_fbdev_ops = { ++ .owner = THIS_MODULE, ++ DRM_FB_HELPER_DEFAULT_OPS, ++ .fb_mmap = phytium_fbdev_mmap, ++ .fb_fillrect = drm_fb_helper_cfb_fillrect, ++ .fb_copyarea = drm_fb_helper_cfb_copyarea, ++ .fb_imageblit = drm_fb_helper_cfb_imageblit, ++}; ++ ++static int ++phytium_drm_fbdev_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) ++{ ++ struct phytium_display_private *priv = helper_to_drm_private(helper); ++ struct drm_device *dev = helper->dev; ++ unsigned int bytes_per_pixel; ++ struct drm_mode_fb_cmd2 mode_cmd = {0}; ++ struct phytium_framebuffer *phytium_fb = NULL; ++ struct fb_info *fbi = NULL; ++ struct drm_framebuffer *fb = NULL; ++ size_t size = 0; ++ int ret = 0; ++ unsigned long offset; ++ ++ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); ++ mode_cmd.width = sizes->surface_width; ++ mode_cmd.height = sizes->surface_height; ++ mode_cmd.pitches[0] = ALIGN(sizes->surface_width * bytes_per_pixel, 128); ++ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); ++ size = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height); ++ ++ ret = mutex_lock_interruptible(&dev->struct_mutex); ++ if (ret < 0) { ++ DRM_ERROR("failed to get mutex lock\n"); ++ return ret; ++ } ++ ++ priv->fbdev_phytium_gem = phytium_gem_create_object(dev, size); ++ if (!priv->fbdev_phytium_gem) { ++ DRM_ERROR("failed to create gem object\n"); ++ return -ENOMEM; ++ } ++ mutex_unlock(&dev->struct_mutex); ++ ++ fbi = drm_fb_helper_alloc_fbi(helper); ++ if (IS_ERR(fbi)) { ++ DRM_DEV_ERROR(dev->dev, "Failed to create framebuffer info."); ++ ret = PTR_ERR(fbi); ++ goto out; ++ } ++ ++ phytium_fb = phytium_fb_alloc(dev, &mode_cmd, &priv->fbdev_phytium_gem, 1); ++ if (IS_ERR(phytium_fb)) { ++ DRM_DEV_ERROR(dev->dev, "Failed to alloc DRM framebuffer.\n"); ++ ret = PTR_ERR(phytium_fb); ++ goto out; ++ } ++ ++ helper->fb = &(phytium_fb->base); ++ fbi->par = helper; ++ fbi->flags = FBINFO_FLAG_DEFAULT; ++ fbi->fbops = &phytium_fbdev_ops; ++ ++ fb = helper->fb; ++ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth); ++ drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); ++ ++ offset = fbi->var.xoffset * bytes_per_pixel; ++ offset += fbi->var.yoffset * fb->pitches[0]; ++ dev->mode_config.fb_base = 0; ++ fbi->screen_base = priv->fbdev_phytium_gem->vaddr + offset; ++ fbi->screen_size = priv->fbdev_phytium_gem->base.size; ++ fbi->fix.smem_len = priv->fbdev_phytium_gem->base.size; ++ DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%pa offset=%ld size=%zu\n", fb->width, fb->height, ++ fb->format->depth, &priv->fbdev_phytium_gem->iova, offset, size); ++ fbi->skip_vt_switch = true; ++ ++ return 0; ++out: ++ phytium_gem_free_object(&priv->fbdev_phytium_gem->base); ++ return ret; ++} ++ ++static const struct drm_fb_helper_funcs phytium_drm_fb_helper_funcs = { ++ .fb_probe = phytium_drm_fbdev_create, ++}; ++ ++int phytium_drm_fbdev_init(struct drm_device *dev) ++{ ++ struct phytium_display_private *priv = dev->dev_private; ++ struct drm_fb_helper *helper; ++ int ret; ++ ++ if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) ++ return -EINVAL; ++ ++ helper = &priv->fbdev_helper; ++ drm_fb_helper_prepare(dev, helper, &phytium_drm_fb_helper_funcs); ++ ++ ret = drm_fb_helper_init(dev, helper, PHYTIUM_MAX_CONNECTOR); ++ if (ret < 0) { ++ DRM_DEV_ERROR(dev->dev, "Failed to initialize drm fb helper -ret %d\n", ret); ++ return ret; ++ } ++ ++ ret = drm_fb_helper_single_add_all_connectors(helper); ++ if (ret < 0) { ++ DRM_DEV_ERROR(dev->dev, "Failed to add connectors - %d/\n", ret); ++ goto err_drm_fb_helper_fini; ++ } ++ ++ ret = drm_fb_helper_initial_config(helper, 32); ++ return 0; ++ ++err_drm_fb_helper_fini: ++ drm_fb_helper_fini(helper); ++ return ret; ++} ++ ++void phytium_drm_fbdev_fini(struct drm_device *dev) ++{ ++ struct phytium_display_private *priv = dev->dev_private; ++ struct drm_fb_helper *helper; ++ ++ helper = &priv->fbdev_helper; ++ drm_fb_helper_unregister_fbi(helper); ++ ++ if (helper->fb) ++ drm_framebuffer_put(helper->fb); ++ ++ drm_fb_helper_fini(helper); ++} +diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.h b/drivers/gpu/drm/phytium/phytium_fbdev.h +new file mode 100644 +index 000000000000..d291d82c2706 +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_fbdev.h +@@ -0,0 +1,13 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef _PHYTIUM_FBDEV_H ++#define _PHYTIUM_FBDEV_H ++ ++int phytium_drm_fbdev_init(struct drm_device *dev); ++void phytium_drm_fbdev_fini(struct drm_device *dev); ++ ++#endif /* _PHYTIUM_FBDEV_H */ +diff --git a/drivers/gpu/drm/phytium/phytium_gem.c b/drivers/gpu/drm/phytium/phytium_gem.c +new file mode 100644 +index 000000000000..bd0b85e64bbc +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_gem.c +@@ -0,0 +1,281 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include "phytium_display_drv.h" ++#include "phytium_gem.h" ++ ++struct sg_table * ++phytium_gem_prime_get_sg_table(struct drm_gem_object *obj) ++{ ++ struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); ++ struct sg_table *sgt; ++ struct drm_device *dev = obj->dev; ++ int ret; ++ ++ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); ++ if (!sgt) { ++ DRM_DEBUG_KMS("malloc sgt fail\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ ret = dma_get_sgtable_attrs(dev->dev, sgt, phytium_gem_obj->vaddr, ++ phytium_gem_obj->iova, phytium_gem_obj->size, ++ DMA_ATTR_WRITE_COMBINE); ++ if (ret) { ++ DRM_ERROR("failed to allocate sgt, %d\n", ret); ++ kfree(sgt); ++ return ERR_PTR(ret); ++ } ++ ++ return sgt; ++} ++ ++struct drm_gem_object * ++phytium_gem_prime_import_sg_table(struct drm_device *dev, ++ struct dma_buf_attachment *attach, ++ struct sg_table *sgt) ++{ ++ struct phytium_gem_object *phytium_gem_obj = NULL; ++ struct scatterlist *s; ++ dma_addr_t expected; ++ int ret, i; ++ ++ phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); ++ if (!phytium_gem_obj) { ++ DRM_ERROR("failed to allocate phytium_gem_obj\n"); ++ ret = -ENOMEM; ++ goto failed_malloc; ++ } ++ ++ ret = drm_gem_object_init(dev, &phytium_gem_obj->base, attach->dmabuf->size); ++ if (ret) { ++ DRM_ERROR("failed to initialize drm gem object: %d\n", ret); ++ goto failed_object_init; ++ } ++ ++ expected = sg_dma_address(sgt->sgl); ++ for_each_sg(sgt->sgl, s, sgt->nents, i) { ++ if (sg_dma_address(s) != expected) { ++ DRM_ERROR("sg_table is not contiguous"); ++ ret = -EINVAL; ++ goto failed_check_continue; ++ } ++ expected = sg_dma_address(s) + sg_dma_len(s); ++ } ++ ++ phytium_gem_obj->iova = sg_dma_address(sgt->sgl); ++ phytium_gem_obj->sgt = sgt; ++ ++ return &phytium_gem_obj->base; ++failed_check_continue: ++ drm_gem_object_release(&phytium_gem_obj->base); ++failed_object_init: ++ kfree(phytium_gem_obj); ++failed_malloc: ++ return ERR_PTR(ret); ++} ++ ++void *phytium_gem_prime_vmap(struct drm_gem_object *obj) ++{ ++ struct phytium_gem_object *phytium_obj = to_phytium_gem_obj(obj); ++ ++ return phytium_obj->vaddr; ++} ++ ++void phytium_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) ++{ ++} ++ ++int phytium_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) ++{ ++ int ret = 0; ++ ++ ret = drm_gem_mmap_obj(obj, obj->size, vma); ++ if (ret < 0) ++ return ret; ++ ++ return phytium_gem_mmap_obj(obj, vma); ++} ++ ++int phytium_gem_suspend(struct drm_device *drm_dev) ++{ ++ struct phytium_display_private *priv = drm_dev->dev_private; ++ struct phytium_gem_object *phytium_gem_obj = NULL; ++ ++ list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { ++ if (!phytium_gem_obj->is_vram) ++ continue; ++ ++ phytium_gem_obj->vaddr_save = vmalloc(phytium_gem_obj->size); ++ if (!phytium_gem_obj->vaddr_save) ++ goto malloc_failed; ++ ++ memcpy(phytium_gem_obj->vaddr_save, phytium_gem_obj->vaddr, phytium_gem_obj->size); ++ } ++ ++ return 0; ++malloc_failed: ++ list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { ++ if (!phytium_gem_obj->is_vram) ++ continue; ++ ++ if (phytium_gem_obj->vaddr_save) { ++ vfree(phytium_gem_obj->vaddr_save); ++ phytium_gem_obj->vaddr_save = NULL; ++ } ++ } ++ return -ENOMEM; ++} ++ ++void phytium_gem_resume(struct drm_device *drm_dev) ++{ ++ struct phytium_display_private *priv = drm_dev->dev_private; ++ struct phytium_gem_object *phytium_gem_obj = NULL; ++ ++ list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { ++ if (!phytium_gem_obj->is_vram) ++ continue; ++ ++ memcpy(phytium_gem_obj->vaddr, phytium_gem_obj->vaddr_save, phytium_gem_obj->size); ++ vfree(phytium_gem_obj->vaddr_save); ++ phytium_gem_obj->vaddr_save = NULL; ++ } ++} ++ ++void phytium_gem_free_object(struct drm_gem_object *obj) ++{ ++ struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); ++ struct drm_device *dev = obj->dev; ++ ++ DRM_DEBUG_KMS("free phytium_gem_obj iova:0x%pa size:0x%lx\n", ++ &phytium_gem_obj->iova, phytium_gem_obj->size); ++ if (phytium_gem_obj->vaddr) { ++ dma_free_attrs(dev->dev, phytium_gem_obj->size, phytium_gem_obj->vaddr, ++ phytium_gem_obj->iova, 0); ++ list_del(&phytium_gem_obj->list); ++ } ++ else if (obj->import_attach) ++ drm_prime_gem_destroy(obj, phytium_gem_obj->sgt); ++ drm_gem_object_release(obj); ++ kfree(phytium_gem_obj); ++} ++ ++int phytium_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma) ++{ ++ int ret = 0; ++ struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); ++ ++ /* ++ * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the ++ * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map ++ * the whole buffer. ++ */ ++ vma->vm_flags &= ~VM_PFNMAP; ++ vma->vm_pgoff = 0; ++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); ++ ++ if (phytium_gem_obj->is_vram) ++ ret = dma_mmap_attrs(obj->dev->dev, vma, phytium_gem_obj->vaddr, ++ phytium_gem_obj->iova, vma->vm_end - vma->vm_start, ++ DMA_ATTR_WRITE_COMBINE); ++ else ++ ret = dma_mmap_attrs(obj->dev->dev, vma, phytium_gem_obj->vaddr, ++ phytium_gem_obj->iova, vma->vm_end - vma->vm_start, 0); ++ if (ret) ++ drm_gem_vm_close(vma); ++ ++ return ret; ++} ++ ++int phytium_gem_mmap(struct file *filp, struct vm_area_struct *vma) ++{ ++ int ret = 0; ++ ++ ret = drm_gem_mmap(filp, vma); ++ if (ret < 0) ++ return ret; ++ ++ return phytium_gem_mmap_obj(vma->vm_private_data, vma); ++} ++ ++int phytium_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, uint32_t handle) ++{ ++ return drm_gem_dumb_destroy(file, dev, handle); ++} ++ ++struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, unsigned long size) ++{ ++ struct phytium_gem_object *phytium_gem_obj = NULL; ++ struct phytium_display_private *priv = dev->dev_private; ++ int ret = 0; ++ ++ phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); ++ if (!phytium_gem_obj) { ++ DRM_ERROR("failed to allocate phytium_gem_obj\n"); ++ ret = -ENOMEM; ++ goto error; ++ } ++ ++ ret = drm_gem_object_init(dev, &phytium_gem_obj->base, size); ++ if (ret) { ++ DRM_ERROR("failed to initialize drm gem object: %d\n", ret); ++ goto failed_object_init; ++ } ++ ++ phytium_gem_obj->vaddr = dma_alloc_attrs(dev->dev, size, &phytium_gem_obj->iova, ++ GFP_KERNEL, 0); ++ if (!phytium_gem_obj->vaddr) { ++ DRM_ERROR("fail to allocate buffer with size %lx\n", size); ++ ret = -ENOMEM; ++ goto failed_dma_alloc; ++ } ++ ++ phytium_gem_obj->size = size; ++ phytium_gem_obj->is_vram = priv->vram_support; ++ list_add_tail(&phytium_gem_obj->list, &priv->gem_list_head); ++ DRM_DEBUG_KMS("phytium_gem_obj iova:0x%pa size:0x%lx\n", ++ &phytium_gem_obj->iova, phytium_gem_obj->size); ++ return phytium_gem_obj; ++failed_dma_alloc: ++ drm_gem_object_unreference_unlocked(&phytium_gem_obj->base); ++ return ERR_PTR(ret); ++failed_object_init: ++ kfree(phytium_gem_obj); ++error: ++ return ERR_PTR(ret); ++} ++ ++int phytium_gem_dumb_create(struct drm_file *file, struct drm_device *dev, ++ struct drm_mode_create_dumb *args) ++{ ++ int size = 0; ++ struct phytium_gem_object *phytium_gem_obj = NULL; ++ int ret = 0; ++ ++ args->pitch = ALIGN(args->width*DIV_ROUND_UP(args->bpp, 8), 128); ++ args->size = args->pitch * args->height; ++ size = PAGE_ALIGN(args->size); ++ phytium_gem_obj = phytium_gem_create_object(dev, size); ++ if (IS_ERR(phytium_gem_obj)) ++ return PTR_ERR(phytium_gem_obj); ++ ret = drm_gem_handle_create(file, &phytium_gem_obj->base, &args->handle); ++ if (ret) { ++ DRM_ERROR("failed to drm_gem_handle_create\n"); ++ goto failed_gem_handle; ++ } ++ drm_gem_object_unreference_unlocked(&phytium_gem_obj->base); ++ ++ return 0; ++failed_gem_handle: ++ phytium_gem_free_object(&phytium_gem_obj->base); ++ return ret; ++} +diff --git a/drivers/gpu/drm/phytium/phytium_gem.h b/drivers/gpu/drm/phytium/phytium_gem.h +new file mode 100644 +index 000000000000..b1d6b54ebf2f +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_gem.h +@@ -0,0 +1,42 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef __PHYTIUM_GEM_H__ ++#define __PHYTIUM_GEM_H__ ++ ++#include ++ ++struct phytium_gem_object { ++ struct drm_gem_object base; ++ dma_addr_t iova; ++ void *vaddr; ++ unsigned long size; ++ struct sg_table *sgt; ++ bool is_vram; ++ bool reserve[3]; ++ struct list_head list; ++ void *vaddr_save; ++}; ++ ++#define to_phytium_gem_obj(obj) container_of(obj, struct phytium_gem_object, base) ++ ++int phytium_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma); ++int phytium_gem_mmap(struct file *filp, struct vm_area_struct *vma); ++void phytium_gem_free_object(struct drm_gem_object *obj); ++struct sg_table *phytium_gem_prime_get_sg_table(struct drm_gem_object *obj); ++struct drm_gem_object *phytium_gem_prime_import_sg_table(struct drm_device *dev, ++ struct dma_buf_attachment *attach, struct sg_table *sgt); ++void phytium_gem_free_object(struct drm_gem_object *obj); ++int phytium_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, unsigned int handle); ++struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, unsigned long size); ++int phytium_gem_dumb_create(struct drm_file *file, struct drm_device *dev, ++ struct drm_mode_create_dumb *args); ++void *phytium_gem_prime_vmap(struct drm_gem_object *obj); ++void phytium_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); ++int phytium_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); ++int phytium_gem_suspend(struct drm_device *drm_dev); ++void phytium_gem_resume(struct drm_device *drm_dev); ++#endif /* __PHYTIUM_GEM_H__ */ +diff --git a/drivers/gpu/drm/phytium/phytium_panel.c b/drivers/gpu/drm/phytium/phytium_panel.c +new file mode 100644 +index 000000000000..ed16ed15197d +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_panel.c +@@ -0,0 +1,421 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include "phytium_display_drv.h" ++#include "phytium_dp.h" ++#include "phytium_panel.h" ++ ++static int ++phytium_dp_aux_set_backlight(struct phytium_panel *panel, unsigned int level) ++{ ++ struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); ++ unsigned char vals[2] = { 0x0 }; ++ ++ vals[0] = level; ++ if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) { ++ vals[0] = (level & 0xFF00) >> 8; ++ vals[1] = (level & 0xFF); ++ } ++ ++ if (drm_dp_dpcd_write(&phytium_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, ++ vals, sizeof(vals)) < 0) { ++ DRM_DEBUG_KMS("Failed to write aux backlight level\n"); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++static unsigned int phytium_dp_aux_get_backlight(struct phytium_panel *panel) ++{ ++ unsigned char read_val[2] = { 0x0 }; ++ unsigned char level = 0; ++ struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); ++ ++ if (drm_dp_dpcd_read(&phytium_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, ++ &read_val, sizeof(read_val)) < 0) { ++ DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", ++ DP_EDP_BACKLIGHT_BRIGHTNESS_MSB); ++ return 0; ++ } ++ ++ level = read_val[0]; ++ if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) ++ level = (read_val[0] << 8 | read_val[1]); ++ ++ return level; ++} ++ ++static void set_aux_backlight_enable(struct phytium_panel *panel, bool enable) ++{ ++ u8 reg_val = 0; ++ struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); ++ ++ if (!(phytium_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP)) ++ return; ++ ++ if (drm_dp_dpcd_readb(&phytium_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, ++ ®_val) < 0) { ++ DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", ++ DP_EDP_DISPLAY_CONTROL_REGISTER); ++ return; ++ } ++ ++ if (enable) ++ reg_val |= DP_EDP_BACKLIGHT_ENABLE; ++ else ++ reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE); ++ ++ if (drm_dp_dpcd_writeb(&phytium_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, ++ reg_val) != 1) { ++ DRM_DEBUG_KMS("Failed to %s aux backlight\n", ++ enable ? "enable" : "disable"); ++ } ++} ++ ++static void phytium_dp_aux_enable_backlight(struct phytium_panel *panel) ++{ ++ unsigned char dpcd_buf, new_dpcd_buf, edp_backlight_mode; ++ struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); ++ ++ if (drm_dp_dpcd_readb(&phytium_dp->aux, ++ DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) { ++ DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", ++ DP_EDP_BACKLIGHT_MODE_SET_REGISTER); ++ return; ++ } ++ ++ new_dpcd_buf = dpcd_buf; ++ edp_backlight_mode = dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; ++ ++ switch (edp_backlight_mode) { ++ case DP_EDP_BACKLIGHT_CONTROL_MODE_PWM: ++ case DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET: ++ case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT: ++ new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; ++ new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; ++ break; ++ ++ /* Do nothing when it is already DPCD mode */ ++ case DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD: ++ default: ++ break; ++ } ++ ++ if (new_dpcd_buf != dpcd_buf) { ++ if (drm_dp_dpcd_writeb(&phytium_dp->aux, ++ DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) { ++ DRM_DEBUG_KMS("Failed to write aux backlight mode\n"); ++ } ++ } ++ ++ set_aux_backlight_enable(panel, true); ++ phytium_dp_aux_set_backlight(panel, panel->level); ++} ++ ++static void phytium_dp_aux_disable_backlight(struct phytium_panel *panel) ++{ ++ set_aux_backlight_enable(panel, false); ++} ++ ++static void phytium_dp_aux_setup_backlight(struct phytium_panel *panel) ++{ ++ struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); ++ ++ if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) ++ phytium_dp->panel.max = 0xFFFF; ++ else ++ phytium_dp->panel.max = 0xFF; ++ ++ phytium_dp->panel.min = 0; ++ phytium_dp->panel.level = phytium_dp_aux_get_backlight(panel); ++ phytium_dp->panel.backlight_enabled = (phytium_dp->panel.level != 0); ++} ++ ++static void phytium_dp_hw_poweron_panel(struct phytium_panel *panel) ++{ ++ struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); ++ ++ phytium_dp->funcs->dp_hw_poweron_panel(phytium_dp); ++} ++ ++static void phytium_dp_hw_poweroff_panel(struct phytium_panel *panel) ++{ ++ struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); ++ ++ phytium_dp->funcs->dp_hw_poweroff_panel(phytium_dp); ++} ++ ++static int ++phytium_dp_hw_set_backlight(struct phytium_panel *panel, uint32_t level) ++{ ++ int ret; ++ struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); ++ ++ ret = phytium_dp->funcs->dp_hw_set_backlight(phytium_dp, level); ++ ++ return ret; ++} ++ ++static uint32_t phytium_dp_hw_get_backlight(struct phytium_panel *panel) ++{ ++ uint32_t ret; ++ struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); ++ ++ ret = phytium_dp->funcs->dp_hw_get_backlight(phytium_dp); ++ ++ return ret; ++} ++ ++static void phytium_dp_hw_enable_backlight(struct phytium_panel *panel) ++{ ++ struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); ++ ++ phytium_dp->funcs->dp_hw_set_backlight(phytium_dp, phytium_dp->panel.level); ++ phytium_dp->funcs->dp_hw_enable_backlight(phytium_dp); ++} ++ ++static void phytium_dp_hw_disable_backlight(struct phytium_panel *panel) ++{ ++ struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); ++ ++ phytium_dp->funcs->dp_hw_disable_backlight(phytium_dp); ++} ++ ++static void phytium_dp_hw_setup_backlight(struct phytium_panel *panel) ++{ ++ struct drm_device *dev = panel->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ ++ panel->max = priv->info.backlight_max; ++ panel->min = 0; ++ panel->level = phytium_dp_hw_get_backlight(panel); ++} ++ ++void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp) ++{ ++ if (phytium_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && ++ (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) && ++ !(phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) { ++ DRM_DEBUG_KMS("AUX Backlight Control Supported!\n"); ++ phytium_dp->panel.setup_backlight = phytium_dp_aux_setup_backlight; ++ phytium_dp->panel.enable_backlight = phytium_dp_aux_enable_backlight; ++ phytium_dp->panel.disable_backlight = phytium_dp_aux_disable_backlight; ++ phytium_dp->panel.set_backlight = phytium_dp_aux_set_backlight; ++ phytium_dp->panel.get_backlight = phytium_dp_aux_get_backlight; ++ } else { ++ DRM_DEBUG_KMS("SE Backlight Control Supported!\n"); ++ phytium_dp->panel.setup_backlight = phytium_dp_hw_setup_backlight; ++ phytium_dp->panel.enable_backlight = phytium_dp_hw_enable_backlight; ++ phytium_dp->panel.disable_backlight = phytium_dp_hw_disable_backlight; ++ phytium_dp->panel.set_backlight = phytium_dp_hw_set_backlight; ++ phytium_dp->panel.get_backlight = phytium_dp_hw_get_backlight; ++ } ++ phytium_dp->panel.poweron = phytium_dp_hw_poweron_panel; ++ phytium_dp->panel.poweroff = phytium_dp_hw_poweroff_panel; ++ mutex_init(&phytium_dp->panel.panel_lock); ++ phytium_dp->panel.dev = phytium_dp->dev; ++ ++ /* Upper limits from eDP 1.3 spec */ ++ phytium_dp->panel.panel_power_up_delay = 210; /* t1_t3 */ ++ phytium_dp->panel.backlight_on_delay = 50; /* t7 */ ++ phytium_dp->panel.backlight_off_delay = 50; ++ phytium_dp->panel.panel_power_down_delay = 500; /* t10 */ ++ phytium_dp->panel.panel_power_cycle_delay = 510; /* t11 + t12 */ ++} ++ ++void phytium_dp_panel_release_backlight_funcs(struct phytium_dp_device *phytium_dp) ++{ ++ phytium_dp->panel.setup_backlight = NULL; ++ phytium_dp->panel.enable_backlight = NULL; ++ phytium_dp->panel.disable_backlight = NULL; ++ phytium_dp->panel.set_backlight = NULL; ++ phytium_dp->panel.get_backlight = NULL; ++ phytium_dp->panel.poweron = NULL; ++ phytium_dp->panel.poweroff = NULL; ++} ++ ++void phytium_panel_enable_backlight(struct phytium_panel *panel) ++{ ++ ++ if (panel->enable_backlight) { ++ mutex_lock(&panel->panel_lock); ++ msleep(panel->backlight_on_delay); ++ panel->enable_backlight(panel); ++ panel->backlight_enabled = true; ++ mutex_unlock(&panel->panel_lock); ++ } ++} ++ ++void phytium_panel_disable_backlight(struct phytium_panel *panel) ++{ ++ if (panel->disable_backlight) { ++ mutex_lock(&panel->panel_lock); ++ panel->disable_backlight(panel); ++ panel->backlight_enabled = false; ++ msleep(panel->backlight_off_delay); ++ mutex_unlock(&panel->panel_lock); ++ } ++} ++ ++void phytium_panel_poweron(struct phytium_panel *panel) ++{ ++ if (panel->poweron) { ++ mutex_lock(&panel->panel_lock); ++ panel->poweron(panel); ++ panel->power_enabled = true; ++ msleep(panel->panel_power_up_delay); ++ mutex_unlock(&panel->panel_lock); ++ } ++} ++ ++void phytium_panel_poweroff(struct phytium_panel *panel) ++{ ++ if (panel->poweroff) { ++ mutex_lock(&panel->panel_lock); ++ msleep(panel->panel_power_down_delay); ++ panel->poweroff(panel); ++ panel->power_enabled = false; ++ mutex_unlock(&panel->panel_lock); ++ } ++} ++ ++static uint32_t phytium_scale(uint32_t source_val, ++ uint32_t source_min, uint32_t source_max, ++ uint32_t target_min, uint32_t target_max) ++{ ++ uint64_t target_val; ++ ++ WARN_ON(source_min > source_max); ++ WARN_ON(target_min > target_max); ++ ++ /* defensive */ ++ source_val = clamp(source_val, source_min, source_max); ++ ++ /* avoid overflows */ ++ target_val = mul_u32_u32(source_val - source_min, target_max - target_min); ++ target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min); ++ target_val += target_min; ++ ++ return target_val; ++} ++ ++static inline uint32_t ++phytium_scale_hw_to_user(struct phytium_panel *panel, uint32_t hw_level, uint32_t user_max) ++{ ++ return phytium_scale(hw_level, panel->min, panel->max, ++ 0, user_max); ++} ++ ++static inline uint32_t ++phytium_scale_user_to_hw(struct phytium_panel *panel, u32 user_level, u32 user_max) ++{ ++ return phytium_scale(user_level, 0, user_max, ++ panel->min, panel->max); ++} ++ ++static int phytium_backlight_device_update_status(struct backlight_device *bd) ++{ ++ struct phytium_panel *panel = bl_get_data(bd); ++ struct drm_device *dev = panel->dev; ++ uint32_t hw_level = 0; ++ int ret = 0; ++ ++ DRM_DEBUG_KMS("updating phytium_backlight, brightness=%d/%d\n", ++ bd->props.brightness, bd->props.max_brightness); ++ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); ++ hw_level = phytium_scale_user_to_hw(panel, bd->props.brightness, bd->props.max_brightness); ++ ++ if ((panel->set_backlight) && (panel->backlight_enabled)) { ++ mutex_lock(&panel->panel_lock); ++ ret = panel->set_backlight(panel, hw_level); ++ panel->level = hw_level; ++ mutex_unlock(&panel->panel_lock); ++ } ++ drm_modeset_unlock(&dev->mode_config.connection_mutex); ++ ++ return ret; ++} ++ ++static int phytium_backlight_device_get_brightness(struct backlight_device *bd) ++{ ++ struct phytium_panel *panel = bl_get_data(bd); ++ struct drm_device *dev = panel->dev; ++ uint32_t hw_level = 0; ++ int ret; ++ ++ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); ++ if (panel->get_backlight && panel->backlight_enabled) { ++ mutex_lock(&panel->panel_lock); ++ hw_level = panel->get_backlight(panel); ++ panel->level = hw_level; ++ mutex_unlock(&panel->panel_lock); ++ } ++ drm_modeset_unlock(&dev->mode_config.connection_mutex); ++ ret = phytium_scale_hw_to_user(panel, hw_level, bd->props.max_brightness); ++ DRM_DEBUG_KMS("get phytium_backlight, brightness=%d/%d\n", ++ ret, bd->props.max_brightness); ++ ++ return ret; ++} ++ ++static const struct backlight_ops phytium_backlight_device_ops = { ++ .update_status = phytium_backlight_device_update_status, ++ .get_brightness = phytium_backlight_device_get_brightness, ++}; ++ ++int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp) ++{ ++ struct backlight_properties props; ++ char bl_name[16]; ++ ++ if (phytium_dp->panel.setup_backlight) { ++ mutex_lock(&phytium_dp->panel.panel_lock); ++ phytium_dp->panel.setup_backlight(&phytium_dp->panel); ++ mutex_unlock(&phytium_dp->panel.panel_lock); ++ } else { ++ return -EINVAL; ++ } ++ ++ memset(&props, 0, sizeof(props)); ++ props.max_brightness = PHYTIUM_MAX_BL_LEVEL; ++ props.type = BACKLIGHT_RAW; ++ props.brightness = phytium_scale_hw_to_user(&phytium_dp->panel, phytium_dp->panel.level, ++ props.max_brightness); ++ snprintf(bl_name, sizeof(bl_name), "phytium_bl%d", phytium_dp->port); ++ ++ phytium_dp->panel.bl_device = ++ backlight_device_register(bl_name, ++ phytium_dp->connector.kdev, ++ &phytium_dp->panel, ++ &phytium_backlight_device_ops, ++ &props); ++ ++ if (IS_ERR(phytium_dp->panel.bl_device)) { ++ DRM_ERROR("Failed to register backlight: %ld\n", ++ PTR_ERR(phytium_dp->panel.bl_device)); ++ phytium_dp->panel.bl_device = NULL; ++ return -ENODEV; ++ } ++ ++ DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n", ++ phytium_dp->connector.name); ++ ++ return 0; ++} ++ ++void phytium_edp_backlight_device_unregister(struct phytium_dp_device *phytium_dp) ++{ ++ if (phytium_dp->panel.bl_device) { ++ backlight_device_unregister(phytium_dp->panel.bl_device); ++ phytium_dp->panel.bl_device = NULL; ++ } ++} +diff --git a/drivers/gpu/drm/phytium/phytium_panel.h b/drivers/gpu/drm/phytium/phytium_panel.h +new file mode 100644 +index 000000000000..e2d5f068064a +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_panel.h +@@ -0,0 +1,46 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef __PHYTIUM_PANEL_H__ ++#define __PHYTIUM_PANEL_H__ ++#include "phytium_dp.h" ++ ++#define PHYTIUM_MAX_BL_LEVEL 0xFF ++ ++struct phytium_panel { ++ struct drm_device *dev; ++ bool backlight_enabled; ++ bool power_enabled; ++ bool reserve1[2]; ++ unsigned int min; ++ unsigned int level; ++ unsigned int max; ++ struct backlight_device *bl_device; ++ void (*setup_backlight)(struct phytium_panel *panel); ++ uint32_t (*get_backlight)(struct phytium_panel *panel); ++ int (*set_backlight)(struct phytium_panel *panel, uint32_t level); ++ void (*disable_backlight)(struct phytium_panel *panel); ++ void (*enable_backlight)(struct phytium_panel *panel); ++ void (*poweron)(struct phytium_panel *panel); ++ void (*poweroff)(struct phytium_panel *panel); ++ struct mutex panel_lock; ++ uint32_t panel_power_up_delay; ++ uint32_t backlight_on_delay; ++ uint32_t backlight_off_delay; ++ uint32_t panel_power_down_delay; ++ uint32_t panel_power_cycle_delay; ++}; ++ ++void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp); ++void phytium_panel_release_backlight_funcs(struct phytium_dp_device *phytium_dp); ++int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp); ++void phytium_edp_backlight_device_unregister(struct phytium_dp_device *phytium_dp); ++void phytium_panel_enable_backlight(struct phytium_panel *panel); ++void phytium_panel_disable_backlight(struct phytium_panel *panel); ++void phytium_panel_poweron(struct phytium_panel *panel); ++void phytium_panel_poweroff(struct phytium_panel *panel); ++ ++#endif /* __PHYTIUM_PANEL_H__ */ +diff --git a/drivers/gpu/drm/phytium/phytium_pci.c b/drivers/gpu/drm/phytium/phytium_pci.c +new file mode 100644 +index 000000000000..72fe10b242dd +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_pci.c +@@ -0,0 +1,284 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#include "phytium_display_drv.h" ++#include "phytium_pci.h" ++#include "phytium_dp.h" ++#include "x100_dc.h" ++#include "x100_dp.h" ++ ++int dc_msi_enable; ++module_param(dc_msi_enable, int, 0644); ++MODULE_PARM_DESC(dc_msi_enable, "Enable DC msi interrupt (0-disabled; 1-enabled; default-0)"); ++ ++void phytium_pci_vram_hw_init(struct phytium_display_private *priv) ++{ ++ struct phytium_pci_private *pci_priv = to_pci_priv(priv); ++ ++ pci_priv->dc_hw_vram_init(priv, pci_priv->vram_addr, pci_priv->vram_size); ++} ++ ++int phytium_pci_vram_init(struct pci_dev *pdev, struct phytium_display_private *priv) ++{ ++ resource_size_t vram_addr, vram_size; ++ int ret = 0; ++ struct phytium_pci_private *pci_priv = to_pci_priv(priv); ++ ++ vram_addr = pci_resource_start(pdev, 2); ++ vram_size = pci_resource_len(pdev, 2); ++ if ((vram_addr != 0) && (vram_size != 0)) { ++ DRM_DEBUG_KMS("vram_addr:0x%llx vram_size: 0x%llx\n", vram_addr, vram_size); ++ ret = dma_declare_coherent_memory(&pdev->dev, vram_addr, vram_addr, ++ vram_size, DMA_MEMORY_EXCLUSIVE); ++ if (ret) { ++ DRM_ERROR("pci bar2 vram declare fail\n"); ++ ret = -1; ++ goto failed_declare_memory; ++ } ++ pci_priv->vram_addr = vram_addr; ++ pci_priv->vram_size = vram_size; ++ priv->vram_support = true; ++ priv->vram_hw_init = phytium_pci_vram_hw_init; ++ } else { ++ DRM_DEBUG_KMS("not support vram\n"); ++ pci_priv->vram_addr = 0; ++ pci_priv->vram_size = 0; ++ priv->vram_support = false; ++ priv->vram_hw_init = NULL; ++ } ++ ++failed_declare_memory: ++ return ret; ++} ++ ++void phytium_pci_vram_fini(struct pci_dev *pdev, struct phytium_display_private *priv) ++{ ++ if (priv->vram_support) ++ dma_release_declared_memory(&pdev->dev); ++} ++ ++static struct phytium_display_private* ++phytium_pci_private_init(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ struct drm_device *dev = pci_get_drvdata(pdev); ++ struct phytium_display_private *priv = NULL; ++ struct phytium_pci_private *pci_priv = NULL; ++ struct phytium_device_info *phytium_info = (struct phytium_device_info *)ent->driver_data; ++ int i = 0; ++ resource_size_t io_addr, io_size; ++ ++ pci_priv = devm_kzalloc(&pdev->dev, sizeof(*pci_priv), GFP_KERNEL); ++ if (!pci_priv) { ++ DRM_ERROR("no memory to allocate for drm_display_private\n"); ++ goto failed_malloc_priv; ++ } ++ ++ memset(pci_priv, 0, sizeof(*pci_priv)); ++ priv = &pci_priv->base; ++ phytium_display_private_init(priv, dev); ++ ++ memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); ++ DRM_DEBUG_KMS("priv->info.num_pipes :%d\n", priv->info.num_pipes); ++ priv->info.pipe_mask = ((pdev->subsystem_device >> PIPE_MASK_SHIFT) & PIPE_MASK_MASK); ++ priv->info.edp_mask = ((pdev->subsystem_device >> EDP_MASK_SHIFT) & EDP_MASK_MASK); ++ priv->info.num_pipes = 0; ++ for_each_pipe_masked(priv, i) ++ priv->info.num_pipes++; ++ if (priv->info.num_pipes == 0) { ++ DRM_ERROR("num_pipes is zero, so exit init\n"); ++ goto failed_init_numpipe; ++ } ++ ++ io_addr = pci_resource_start(pdev, 0); ++ io_size = pci_resource_len(pdev, 0); ++ priv->regs = ioremap(io_addr, io_size); ++ if (priv->regs == NULL) { ++ DRM_ERROR("pci bar0 ioremap fail, addr:0x%llx, size:0x%llx\n", io_addr, io_size); ++ goto failed_ioremap; ++ } ++ ++ priv->irq = pdev->irq; ++ if (IS_X100(priv)) { ++ pci_priv->dc_hw_vram_init = x100_dc_hw_vram_init; ++ priv->dc_hw_clear_msi_irq = x100_dc_hw_clear_msi_irq; ++ priv->dc_hw_fb_format_check = x100_dc_hw_fb_format_check; ++ } ++ ++ return priv; ++ ++failed_ioremap: ++failed_init_numpipe: ++ devm_kfree(&pdev->dev, pci_priv); ++failed_malloc_priv: ++ return NULL; ++} ++ ++static void ++phytium_pci_private_fini(struct pci_dev *pdev, struct phytium_display_private *priv) ++{ ++ struct phytium_pci_private *pci_priv = to_pci_priv(priv); ++ ++ if (priv->regs) ++ iounmap(priv->regs); ++ ++ devm_kfree(&pdev->dev, pci_priv); ++} ++ ++static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ struct phytium_display_private *priv = NULL; ++ struct drm_device *dev = NULL; ++ int ret = 0; ++ ++ dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); ++ if (IS_ERR(dev)) { ++ DRM_ERROR("failed to allocate drm_device\n"); ++ return PTR_ERR(dev); ++ } ++ dev->pdev = pdev; ++ pci_set_drvdata(pdev, dev); ++ pci_set_master(pdev); ++ ret = pci_enable_device(pdev); ++ if (ret) { ++ DRM_ERROR("pci enbale device fail\n"); ++ goto failed_enable_device; ++ } ++ ++ if (dc_msi_enable) { ++ ret = pci_enable_msi(pdev); ++ if (ret) ++ DRM_ERROR("pci enbale msi fail\n"); ++ } ++ ++ dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); ++ ++ priv = phytium_pci_private_init(pdev, ent); ++ if (priv) ++ dev->dev_private = priv; ++ else ++ goto failed_pci_private_init; ++ ++ ret = phytium_pci_vram_init(pdev, priv); ++ if (ret) { ++ DRM_ERROR("failed to init pci vram\n"); ++ goto failed_pci_vram_init; ++ } ++ ++ ret = drm_dev_register(dev, 0); ++ if (ret) { ++ DRM_ERROR("failed to register drm dev\n"); ++ goto failed_register_drm; ++ } ++ ++ phytium_dp_hpd_irq_setup(dev, true); ++ ++ return 0; ++ ++failed_register_drm: ++ phytium_pci_vram_fini(pdev, priv); ++failed_pci_vram_init: ++ phytium_pci_private_fini(pdev, priv); ++failed_pci_private_init: ++ if (pdev->msi_enabled) ++ pci_disable_msi(pdev); ++ pci_disable_device(pdev); ++failed_enable_device: ++ pci_set_drvdata(pdev, NULL); ++ drm_dev_unref(dev); ++ ++ return -1; ++} ++ ++static void phytium_pci_remove(struct pci_dev *pdev) ++{ ++ struct drm_device *dev = pci_get_drvdata(pdev); ++ struct phytium_display_private *priv = dev->dev_private; ++ ++ phytium_dp_hpd_irq_setup(dev, false); ++ cancel_work_sync(&priv->hotplug_work); ++ drm_dev_unregister(dev); ++ phytium_pci_vram_fini(pdev, priv); ++ phytium_pci_private_fini(pdev, priv); ++ if (pdev->msi_enabled) ++ pci_disable_msi(pdev); ++ pci_disable_device(pdev); ++ pci_set_drvdata(pdev, NULL); ++ drm_dev_unref(dev); ++} ++ ++static void phytium_pci_shutdown(struct pci_dev *pdev) ++{ ++ struct drm_device *dev = pci_get_drvdata(pdev); ++ struct phytium_display_private *priv = dev->dev_private; ++ ++ priv->display_shutdown(dev); ++} ++ ++static int phytium_pci_pm_suspend(struct device *dev) ++{ ++ struct pci_dev *pdev = to_pci_dev(dev); ++ struct drm_device *drm_dev = pci_get_drvdata(pdev); ++ struct phytium_display_private *priv = drm_dev->dev_private; ++ int ret = 0; ++ ++ ret = priv->display_pm_suspend(drm_dev); ++ if (ret < 0) ++ goto out; ++ ++ pci_save_state(pdev); ++ pci_disable_device(pdev); ++ pci_set_power_state(pdev, PCI_D3hot); ++ udelay(200); ++ ++out: ++ return ret; ++} ++ ++static int phytium_pci_pm_resume(struct device *dev) ++{ ++ struct pci_dev *pdev = to_pci_dev(dev); ++ struct drm_device *drm_dev = pci_get_drvdata(pdev); ++ struct phytium_display_private *priv = drm_dev->dev_private; ++ int ret = 0; ++ ++ pci_set_power_state(pdev, PCI_D0); ++ pci_restore_state(pdev); ++ ret = pci_enable_device(pdev); ++ if (ret) ++ return ret; ++ pci_set_master(pdev); ++ ++ return priv->display_pm_resume(drm_dev); ++} ++ ++static const struct dev_pm_ops phytium_pci_pm_ops = { ++ SET_SYSTEM_SLEEP_PM_OPS(phytium_pci_pm_suspend, phytium_pci_pm_resume) ++}; ++ ++static const struct phytium_device_info x100_info = { ++ .platform_mask = BIT(PHYTIUM_PLATFORM_X100), ++ .total_pipes = 3, ++ .crtc_clock_max = X100_DC_PIX_CLOCK_MAX, ++ .hdisplay_max = x100_DC_HDISPLAY_MAX, ++ .vdisplay_max = X100_DC_VDISPLAY_MAX, ++ .address_mask = X100_DC_ADDRESS_MASK, ++ .backlight_max = X100_DP_BACKLIGHT_MAX, ++}; ++ ++static const struct pci_device_id phytium_display_pci_ids[] = { ++ { PCI_VDEVICE(PHYTIUM, 0xdc22), (kernel_ulong_t)&x100_info }, ++ { /* End: all zeroes */ } ++}; ++MODULE_DEVICE_TABLE(pci, phytium_display_pci_ids); ++ ++struct pci_driver phytium_pci_driver = { ++ .name = "phytium_display_pci", ++ .id_table = phytium_display_pci_ids, ++ .probe = phytium_pci_probe, ++ .remove = phytium_pci_remove, ++ .shutdown = phytium_pci_shutdown, ++ .driver.pm = &phytium_pci_pm_ops, ++}; +diff --git a/drivers/gpu/drm/phytium/phytium_pci.h b/drivers/gpu/drm/phytium/phytium_pci.h +new file mode 100644 +index 000000000000..94e3a5e8e95c +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_pci.h +@@ -0,0 +1,23 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef __PHYTIUM_PCI_H__ ++#define __PHYTIUM_PCI_H__ ++ ++#include "phytium_display_drv.h" ++ ++struct phytium_pci_private { ++ struct phytium_display_private base; ++ resource_size_t vram_addr; ++ resource_size_t vram_size; ++ void (*dc_hw_vram_init)(struct phytium_display_private *priv, resource_size_t vram_addr, ++ resource_size_t vram_size); ++}; ++ ++#define to_pci_priv(priv) container_of(priv, struct phytium_pci_private, base) ++ ++extern struct pci_driver phytium_pci_driver; ++#endif /* __PHYTIUM_PCI_H__ */ +diff --git a/drivers/gpu/drm/phytium/phytium_plane.c b/drivers/gpu/drm/phytium/phytium_plane.c +new file mode 100644 +index 000000000000..777bcd137293 +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_plane.c +@@ -0,0 +1,627 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "phytium_display_drv.h" ++#include "phytium_plane.h" ++#include "phytium_fb.h" ++#include "phytium_gem.h" ++#include "phytium_crtc.h" ++#include "x100_dc.h" ++#include "phytium_reg.h" ++ ++#define PHYTIUM_CURS_W_SIZE 32 ++#define PHYTIUM_CURS_H_SIZE 32 ++ ++void phytium_plane_destroy(struct drm_plane *plane) ++{ ++ struct phytium_plane *phytium_plane = to_phytium_plane(plane); ++ ++ drm_plane_cleanup(plane); ++ kfree(phytium_plane); ++} ++ ++/** ++ * phytium_plane_atomic_get_property - fetch plane property value ++ * @plane: plane to fetch property for ++ * @state: state containing the property value ++ * @property: property to look up ++ * @val: pointer to write property value into ++ * ++ * The DRM core does not store shadow copies of properties for ++ * atomic-capable drivers. This entrypoint is used to fetch ++ * the current value of a driver-specific plane property. ++ */ ++static int ++phytium_plane_atomic_get_property(struct drm_plane *plane, ++ const struct drm_plane_state *state, ++ struct drm_property *property, ++ uint64_t *val) ++{ ++ DRM_DEBUG_KMS("Unknown plane property [PROP:%d:%s]\n", property->base.id, property->name); ++ return -EINVAL; ++} ++ ++/** ++ * phytium_plane_atomic_set_property - set plane property value ++ * @plane: plane to set property for ++ * @state: state to update property value in ++ * @property: property to set ++ * @val: value to set property to ++ * ++ * Writes the specified property value for a plane into the provided atomic ++ * state object. ++ * ++ * Returns 0 on success, -EINVAL on unrecognized properties ++ */ ++int ++phytium_plane_atomic_set_property(struct drm_plane *plane, ++ struct drm_plane_state *state, ++ struct drm_property *property, ++ uint64_t val) ++{ ++ DRM_DEBUG_KMS("Unknown plane property [PROP:%d:%s]\n", property->base.id, property->name); ++ return -EINVAL; ++} ++ ++struct drm_plane_state * ++phytium_plane_atomic_duplicate_state(struct drm_plane *plane) ++{ ++ struct drm_plane_state *state = NULL; ++ struct phytium_plane_state *phytium_state = NULL; ++ ++ phytium_state = kmemdup(plane->state, sizeof(*phytium_state), GFP_KERNEL); ++ ++ if (!phytium_state) ++ return NULL; ++ ++ state = &phytium_state->base; ++ if (state->fb) ++ drm_framebuffer_reference(state->fb); ++ ++ state->fence = NULL; ++ state->commit = NULL; ++ ++ return state; ++} ++ ++void ++phytium_plane_atomic_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) ++{ ++ struct phytium_plane_state *phytium_state = to_phytium_plane_state(state); ++ ++ __drm_atomic_helper_plane_destroy_state(state); ++ kfree(phytium_state); ++} ++ ++const struct drm_plane_funcs phytium_plane_funcs = { ++ .update_plane = drm_atomic_helper_update_plane, ++ .disable_plane = drm_atomic_helper_disable_plane, ++ .destroy = phytium_plane_destroy, ++ .reset = drm_atomic_helper_plane_reset, ++ .atomic_get_property = phytium_plane_atomic_get_property, ++ .atomic_set_property = phytium_plane_atomic_set_property, ++ .atomic_duplicate_state = phytium_plane_atomic_duplicate_state, ++ .atomic_destroy_state = phytium_plane_atomic_destroy_state, ++}; ++ ++static int phytium_plane_prepare_fb(struct drm_plane *plane, ++ struct drm_plane_state *state) ++{ ++ struct dma_buf *dma_buf; ++ struct dma_fence *fence; ++ ++ if (!state->fb) ++ return 0; ++ dma_buf = to_phytium_framebuffer(state->fb)->phytium_gem_obj[0]->base.dma_buf; ++ if (dma_buf) { ++ fence = reservation_object_get_excl_rcu(dma_buf->resv); ++ drm_atomic_set_fence_for_plane(state, fence); ++ } ++ ++ return 0; ++} ++ ++static int ++phytium_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) ++{ ++ struct drm_device *dev = plane->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct drm_framebuffer *fb = state->fb; ++ struct drm_crtc *crtc = state->crtc; ++ struct drm_crtc_state *crtc_state; ++ int src_x, src_y, src_w, src_h; ++ unsigned long base_offset; ++ struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); ++ ++ if ((!fb) || (!crtc)) ++ return 0; ++ ++ crtc_state = drm_atomic_get_crtc_state(state->state, crtc); ++ if (IS_ERR(crtc_state)) ++ return PTR_ERR(crtc_state); ++ ++ if (plane->type == DRM_PLANE_TYPE_CURSOR) { ++ src_w = state->src_w >> 16; ++ src_h = state->src_h >> 16; ++ if (phytium_crtc->scale_enable) ++ return -EINVAL; ++ if ((src_w != PHYTIUM_CURS_W_SIZE) || (src_h != PHYTIUM_CURS_W_SIZE)) { ++ DRM_INFO("Invalid cursor size(%d, %d)\n", src_w, src_h); ++ return -EINVAL; ++ } ++ } else if (plane->type == DRM_PLANE_TYPE_PRIMARY) { ++ src_x = state->src_x >> 16; ++ src_y = state->src_y >> 16; ++ src_w = state->src_w >> 16; ++ src_h = state->src_h >> 16; ++ ++ base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; ++ if (base_offset & (priv->info.address_mask)) { ++ DRM_ERROR("fb base address is not aligned by 0x%lx byte\n", ++ priv->info.address_mask); ++ return -EINVAL; ++ } ++ ++ if (src_w != state->crtc_w || src_h != state->crtc_h) { ++ DRM_ERROR("scale not support: crtc_w(0x%x)/h(0x%x) src_w(0x%x)/h(0x%x)\n", ++ state->crtc_w, state->crtc_h, src_w, src_h); ++ return -EINVAL; ++ } ++ ++ if ((state->crtc_x < 0) || (state->crtc_y < 0)) { ++ DRM_ERROR("crtc_x(0x%x)/y(0x%x) of drm plane state is invalid\n", ++ state->crtc_x, state->crtc_y); ++ return -EINVAL; ++ } ++ ++ if ((state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay) ++ || (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)) { ++ DRM_ERROR("plane out of crtc region\n"); ++ return -EINVAL; ++ } ++ } ++ ++ return 0; ++} ++ ++static void phytium_dc_get_plane_parameter(struct drm_plane *plane) ++{ ++ struct phytium_plane *phytium_plane = to_phytium_plane(plane); ++ struct drm_framebuffer *fb = plane->state->fb; ++ struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); ++ struct phytium_gem_object *phytium_gem_obj = NULL; ++ int i, num_planes = 0; ++ ++ num_planes = drm_format_num_planes(fb->format->format); ++ for (i = 0; i < num_planes; i++) { ++ phytium_gem_obj = phytium_fb->phytium_gem_obj[i]; ++ phytium_plane->iova[i] = phytium_gem_obj->iova + fb->offsets[i]; ++ phytium_plane->size[i] = phytium_gem_obj->size - fb->offsets[i]; ++ ++ if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC) ++ phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE0; ++ else if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC) ++ phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE3; ++ else if (fb->modifier == DRM_FORMAT_MOD_LINEAR) ++ phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; ++ else ++ phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; ++ ++ if (i == 0) { ++ switch (fb->format->format) { ++ case DRM_FORMAT_ARGB2101010: ++ case DRM_FORMAT_ABGR2101010: ++ case DRM_FORMAT_RGBA1010102: ++ case DRM_FORMAT_BGRA1010102: ++ phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB2101010; ++ break; ++ ++ case DRM_FORMAT_ARGB8888: ++ case DRM_FORMAT_ABGR8888: ++ case DRM_FORMAT_RGBA8888: ++ case DRM_FORMAT_BGRA8888: ++ phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB8888; ++ break; ++ ++ case DRM_FORMAT_XRGB8888: ++ case DRM_FORMAT_XBGR8888: ++ case DRM_FORMAT_RGBX8888: ++ case DRM_FORMAT_BGRX8888: ++ phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB8888; ++ break; ++ ++ case DRM_FORMAT_ARGB4444: ++ case DRM_FORMAT_ABGR4444: ++ case DRM_FORMAT_RGBA4444: ++ case DRM_FORMAT_BGRA4444: ++ phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB4444; ++ break; ++ ++ case DRM_FORMAT_XRGB4444: ++ case DRM_FORMAT_XBGR4444: ++ case DRM_FORMAT_RGBX4444: ++ case DRM_FORMAT_BGRX4444: ++ phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB4444; ++ break; ++ ++ case DRM_FORMAT_ARGB1555: ++ case DRM_FORMAT_ABGR1555: ++ case DRM_FORMAT_RGBA5551: ++ case DRM_FORMAT_BGRA5551: ++ phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB1555; ++ break; ++ ++ case DRM_FORMAT_XRGB1555: ++ case DRM_FORMAT_XBGR1555: ++ case DRM_FORMAT_RGBX5551: ++ case DRM_FORMAT_BGRX5551: ++ phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB1555; ++ break; ++ ++ case DRM_FORMAT_RGB565: ++ case DRM_FORMAT_BGR565: ++ phytium_plane->format = FRAMEBUFFER_FORMAT_RGB565; ++ break; ++ ++ case DRM_FORMAT_YUYV: ++ phytium_plane->format = FRAMEBUFFER_FORMAT_YUYV; ++ break; ++ ++ case DRM_FORMAT_UYVY: ++ phytium_plane->format = FRAMEBUFFER_FORMAT_UYVY; ++ break; ++ case DRM_FORMAT_NV16: ++ phytium_plane->format = FRAMEBUFFER_FORMAT_NV16; ++ break; ++ case DRM_FORMAT_NV12: ++ phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; ++ break; ++ case DRM_FORMAT_NV21: ++ phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; ++ break; ++ default: ++ DRM_ERROR("unsupported pixel format (format = %d)\n", ++ fb->format->format); ++ return; ++ } ++ ++ switch (fb->format->format) { ++ case DRM_FORMAT_ARGB2101010: ++ case DRM_FORMAT_ARGB8888: ++ case DRM_FORMAT_XRGB8888: ++ case DRM_FORMAT_ARGB4444: ++ case DRM_FORMAT_XRGB4444: ++ case DRM_FORMAT_ARGB1555: ++ case DRM_FORMAT_XRGB1555: ++ case DRM_FORMAT_RGB565: ++ phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; ++ phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; ++ break; ++ ++ case DRM_FORMAT_ABGR2101010: ++ case DRM_FORMAT_ABGR8888: ++ case DRM_FORMAT_XBGR8888: ++ case DRM_FORMAT_ABGR4444: ++ case DRM_FORMAT_XBGR4444: ++ case DRM_FORMAT_ABGR1555: ++ case DRM_FORMAT_XBGR1555: ++ case DRM_FORMAT_BGR565: ++ phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ABGR; ++ phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; ++ break; ++ ++ case DRM_FORMAT_RGBA1010102: ++ case DRM_FORMAT_RGBA8888: ++ case DRM_FORMAT_RGBX8888: ++ case DRM_FORMAT_RGBA4444: ++ case DRM_FORMAT_RGBX4444: ++ case DRM_FORMAT_RGBA5551: ++ case DRM_FORMAT_RGBX5551: ++ phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_RGBA; ++ phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; ++ break; ++ ++ case DRM_FORMAT_BGRA1010102: ++ case DRM_FORMAT_BGRA8888: ++ case DRM_FORMAT_BGRX8888: ++ case DRM_FORMAT_BGRA4444: ++ case DRM_FORMAT_BGRX4444: ++ case DRM_FORMAT_BGRA5551: ++ case DRM_FORMAT_BGRX5551: ++ phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_BGRA; ++ phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; ++ break; ++ ++ case DRM_FORMAT_YUYV: ++ case DRM_FORMAT_UYVY: ++ case DRM_FORMAT_NV16: ++ case DRM_FORMAT_NV12: ++ phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; ++ phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; ++ break; ++ ++ default: ++ DRM_ERROR("unsupported pixel format (format = %d)\n", ++ fb->format->format); ++ return; ++ } ++ } ++ } ++} ++ ++static void phytium_dc_primary_plane_update(struct drm_plane *plane) ++{ ++ struct drm_device *dev = plane->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_plane *phytium_plane = to_phytium_plane(plane); ++ struct drm_framebuffer *fb = plane->state->fb; ++ int phys_pipe = phytium_plane->phys_pipe; ++ int src_x, src_y, crtc_x, crtc_y, crtc_w, crtc_h; ++ unsigned long base_offset; ++ int config; ++ ++ src_x = plane->state->src_x >> 16; ++ src_y = plane->state->src_y >> 16; ++ crtc_x = plane->state->crtc_x; ++ crtc_y = plane->state->crtc_y; ++ crtc_w = plane->state->crtc_w; ++ crtc_h = plane->state->crtc_h; ++ ++ if (phytium_plane->dc_hw_update_dcreq) ++ phytium_plane->dc_hw_update_dcreq(plane); ++ phytium_plane->dc_hw_update_primary_hi_addr(plane); ++ ++ /* config dc */ ++ /* Y */ ++ base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; ++ phytium_writel_reg(priv, (phytium_plane->iova[0] + base_offset) & ADDRESS_MASK, ++ priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS); ++ phytium_writel_reg(priv, ALIGN(fb->pitches[0], 128), ++ priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE); ++ ++ /* U */ ++ phytium_writel_reg(priv, phytium_plane->iova[1] & 0xffffffff, ++ priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS); ++ phytium_writel_reg(priv, ALIGN(fb->pitches[1], 128), ++ priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_STRIDE); ++ ++ /* V */ ++ phytium_writel_reg(priv, phytium_plane->iova[2] & 0xffffffff, ++ priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS); ++ phytium_writel_reg(priv, ALIGN(fb->pitches[2], 128), ++ priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_STRIDE); ++ ++ /* size */ ++ phytium_writel_reg(priv, (crtc_w & WIDTH_MASK) | ((crtc_h&HEIGHT_MASK) << HEIGHT_SHIFT), ++ priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_SIZE); ++ /* config */ ++ config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], ++ PHYTIUM_DC_FRAMEBUFFER_CONFIG); ++ config &= ~(FRAMEBUFFER_FORMAT_MASK << FRAMEBUFFER_FORMAT_SHIFT); ++ config |= (phytium_plane->format << FRAMEBUFFER_FORMAT_SHIFT); ++ config &= ~(1 << FRAMEBUFFER_UVSWIZZLE_SHIFT); ++ config |= (phytium_plane->uv_swizzle << FRAMEBUFFER_UVSWIZZLE_SHIFT); ++ config &= ~(FRAMEBUFFER_SWIZZLE_MASK << FRAMEBUFFER_SWIZZLE_SHIFT); ++ config |= (phytium_plane->swizzle << FRAMEBUFFER_SWIZZLE_SHIFT); ++ config &= ~(FRAMEBUFFER_TILE_MODE_MASK << FRAMEBUFFER_TILE_MODE_SHIFT); ++ config |= (phytium_plane->tiling[0] << FRAMEBUFFER_TILE_MODE_SHIFT); ++ config &= (~FRAMEBUFFER_CLEAR); ++ phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], ++ PHYTIUM_DC_FRAMEBUFFER_CONFIG); ++} ++ ++static void phytium_dc_cursor_plane_update(struct drm_plane *plane) ++{ ++ struct drm_device *dev = plane->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_plane *phytium_plane = to_phytium_plane(plane); ++ struct drm_framebuffer *fb = plane->state->fb; ++ int phys_pipe = phytium_plane->phys_pipe; ++ int config; ++ unsigned long iova; ++ ++ phytium_plane->enable = 1; ++ phytium_plane->cursor_hot_x = fb->hot_x; ++ phytium_plane->cursor_hot_y = fb->hot_y; ++ phytium_plane->cursor_x = plane->state->crtc_x + fb->hot_x; ++ phytium_plane->cursor_y = plane->state->crtc_y + fb->hot_y; ++ ++ config = CURSOR_FORMAT_ARGB8888 | ++ ((phytium_plane->cursor_hot_y & CURSOR_HOT_Y_MASK) << CURSOR_HOT_Y_SHIFT) | ++ ((phytium_plane->cursor_hot_x & CURSOR_HOT_X_MASK) << CURSOR_HOT_X_SHIFT); ++ phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); ++ ++ config = ((phytium_plane->cursor_x & CURSOR_X_MASK) << CURSOR_X_SHIFT) | ++ ((phytium_plane->cursor_y & CURSOR_Y_MASK) << CURSOR_Y_SHIFT); ++ phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], ++ PHYTIUM_DC_CURSOR_LOCATION); ++ iova = phytium_plane->iova[0]; ++ phytium_writel_reg(priv, iova & 0xffffffff, priv->dc_reg_base[phys_pipe], ++ PHYTIUM_DC_CURSOR_ADDRESS); ++ if (phytium_plane->dc_hw_update_cursor_hi_addr) ++ phytium_plane->dc_hw_update_cursor_hi_addr(plane, iova); ++} ++ ++static void phytium_plane_atomic_update(struct drm_plane *plane, ++ struct drm_plane_state *old_state) ++{ ++ struct drm_framebuffer *fb, *old_fb; ++ ++ DRM_DEBUG_KMS("update plane: type=%d\n", plane->type); ++ if (!plane->state->crtc || !plane->state->fb) ++ return; ++ ++ fb = plane->state->fb; ++ old_fb = old_state->fb; ++ ++ if (fb) ++ drm_framebuffer_reference(fb); ++ if (old_fb) ++ drm_framebuffer_unreference(old_fb); ++ ++ phytium_dc_get_plane_parameter(plane); ++ ++ if (plane->type == DRM_PLANE_TYPE_PRIMARY) ++ phytium_dc_primary_plane_update(plane); ++ else if (plane->type == DRM_PLANE_TYPE_CURSOR) ++ phytium_dc_cursor_plane_update(plane); ++} ++ ++static void phytium_plane_atomic_disable(struct drm_plane *plane, ++ struct drm_plane_state *old_state) ++{ ++ struct drm_device *dev = plane->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_plane *phytium_plane = to_phytium_plane(plane); ++ int phys_pipe = phytium_plane->phys_pipe; ++ int config; ++ struct drm_framebuffer *old_fb; ++ ++ old_fb = old_state->fb; ++ if (old_fb) ++ drm_framebuffer_unreference(old_fb); ++ ++ if (plane->type == DRM_PLANE_TYPE_PRIMARY) { ++ phytium_writel_reg(priv, CLEAR_VALUE_RED, priv->dc_reg_base[phys_pipe], ++ PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE); ++ config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], ++ PHYTIUM_DC_FRAMEBUFFER_CONFIG); ++ config |= FRAMEBUFFER_CLEAR; ++ phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], ++ PHYTIUM_DC_FRAMEBUFFER_CONFIG); ++ } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { ++ phytium_writel_reg(priv, CURSOR_FORMAT_DISABLED, ++ priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); ++ } ++} ++ ++const struct drm_plane_helper_funcs phytium_plane_helper_funcs = { ++ .prepare_fb = phytium_plane_prepare_fb, ++ .atomic_check = phytium_plane_atomic_check, ++ .atomic_update = phytium_plane_atomic_update, ++ .atomic_disable = phytium_plane_atomic_disable, ++}; ++ ++struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int phys_pipe) ++{ ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_plane *phytium_plane = NULL; ++ struct phytium_plane_state *phytium_plane_state = NULL; ++ int ret = 0; ++ unsigned int flags = 0; ++ const uint32_t *formats = NULL; ++ uint32_t format_count; ++ const uint64_t *format_modifiers; ++ ++ phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); ++ if (!phytium_plane) { ++ ret = -ENOMEM; ++ goto failed_malloc_plane; ++ } ++ ++ phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); ++ if (!phytium_plane_state) { ++ ret = -ENOMEM; ++ goto failed_malloc_plane_state; ++ } ++ phytium_plane_state->base.plane = &phytium_plane->base; ++ phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; ++ phytium_plane->base.state = &phytium_plane_state->base; ++ phytium_plane->phys_pipe = phys_pipe; ++ ++ if (IS_X100(priv)) { ++ phytium_plane->dc_hw_plane_get_format = x100_dc_hw_plane_get_primary_format; ++ phytium_plane->dc_hw_update_dcreq = x100_dc_hw_update_dcreq; ++ phytium_plane->dc_hw_update_primary_hi_addr = x100_dc_hw_update_primary_hi_addr; ++ phytium_plane->dc_hw_update_cursor_hi_addr = NULL; ++ } ++ ++ phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); ++ ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, ++ &phytium_plane_funcs, formats, ++ format_count, ++ format_modifiers, ++ DRM_PLANE_TYPE_PRIMARY, "primary %d", phys_pipe); ++ ++ if (ret) ++ goto failed_plane_init; ++ ++ flags = DRM_MODE_ROTATE_0; ++ drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); ++ drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); ++ ++ return phytium_plane; ++failed_plane_init: ++ kfree(phytium_plane_state); ++failed_malloc_plane_state: ++ kfree(phytium_plane); ++failed_malloc_plane: ++ return ERR_PTR(ret); ++} ++ ++struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int phys_pipe) ++{ ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_plane *phytium_plane = NULL; ++ struct phytium_plane_state *phytium_plane_state = NULL; ++ int ret = 0; ++ unsigned int flags = 0; ++ const uint32_t *formats = NULL; ++ uint32_t format_count; ++ const uint64_t *format_modifiers; ++ ++ phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); ++ if (!phytium_plane) { ++ ret = -ENOMEM; ++ goto failed_malloc_plane; ++ } ++ ++ phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); ++ if (!phytium_plane_state) { ++ ret = -ENOMEM; ++ goto failed_malloc_plane_state; ++ } ++ phytium_plane_state->base.plane = &phytium_plane->base; ++ phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; ++ phytium_plane->base.state = &phytium_plane_state->base; ++ phytium_plane->phys_pipe = phys_pipe; ++ ++ if (IS_X100(priv)) { ++ phytium_plane->dc_hw_plane_get_format = x100_dc_hw_plane_get_cursor_format; ++ phytium_plane->dc_hw_update_dcreq = NULL; ++ phytium_plane->dc_hw_update_primary_hi_addr = NULL; ++ phytium_plane->dc_hw_update_cursor_hi_addr = NULL; ++ } ++ ++ phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); ++ ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, ++ &phytium_plane_funcs, ++ formats, format_count, ++ format_modifiers, ++ DRM_PLANE_TYPE_CURSOR, "cursor %d", phys_pipe); ++ ++ if (ret) ++ goto failed_plane_init; ++ ++ flags = DRM_MODE_ROTATE_0; ++ drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); ++ drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); ++ ++ return phytium_plane; ++failed_plane_init: ++ kfree(phytium_plane_state); ++failed_malloc_plane_state: ++ kfree(phytium_plane); ++failed_malloc_plane: ++ return ERR_PTR(ret); ++} +diff --git a/drivers/gpu/drm/phytium/phytium_plane.h b/drivers/gpu/drm/phytium/phytium_plane.h +new file mode 100644 +index 000000000000..41bb607d857e +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_plane.h +@@ -0,0 +1,46 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef __PHYTIUM_PLANE_H__ ++#define __PHYTIUM_PLANE_H__ ++ ++struct phytium_plane { ++ struct drm_plane base; ++ int phys_pipe; ++ unsigned long iova[PHYTIUM_FORMAT_MAX_PLANE]; ++ unsigned long size[PHYTIUM_FORMAT_MAX_PLANE]; ++ unsigned int format; ++ unsigned int tiling[PHYTIUM_FORMAT_MAX_PLANE]; ++ unsigned int swizzle; ++ unsigned int uv_swizzle; ++ unsigned int rot_angle; ++ ++ /* only for cursor */ ++ bool enable; ++ bool reserve[3]; ++ unsigned int cursor_x; ++ unsigned int cursor_y; ++ unsigned int cursor_hot_x; ++ unsigned int cursor_hot_y; ++ ++ void (*dc_hw_plane_get_format)(const uint64_t **format_modifiers, ++ const uint32_t **formats, ++ uint32_t *format_count); ++ void (*dc_hw_update_dcreq)(struct drm_plane *plane); ++ void (*dc_hw_update_primary_hi_addr)(struct drm_plane *plane); ++ void (*dc_hw_update_cursor_hi_addr)(struct drm_plane *plane, uint64_t iova); ++}; ++ ++struct phytium_plane_state { ++ struct drm_plane_state base; ++}; ++ ++#define to_phytium_plane(x) container_of(x, struct phytium_plane, base) ++#define to_phytium_plane_state(x) container_of(x, struct phytium_plane_state, base) ++ ++struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int pipe); ++struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int pipe); ++#endif /* __PHYTIUM_PLANE_H__ */ +diff --git a/drivers/gpu/drm/phytium/phytium_reg.h b/drivers/gpu/drm/phytium/phytium_reg.h +new file mode 100644 +index 000000000000..7d8e1183f158 +--- /dev/null ++++ b/drivers/gpu/drm/phytium/phytium_reg.h +@@ -0,0 +1,357 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef __PHYTIUM_REG_H__ ++#define __PHYTIUM_REG_H__ ++ ++/******************************register base******************************************/ ++#define X100_PIPE_BASE(pipe) (0x8000*pipe) ++#define X100_DC_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x0000) ++#define X100_DCREQ_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x2000) ++#define X100_DP_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x3000) ++#define X100_ADDRESS_TRANSFORM_BASE 0x4000 ++#define X100_PHY_ACCESS_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x5000) ++/******************************register base end******************************************/ ++ ++/******************************dc register start******************************************/ ++#define PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS 0x1400 ++ #define ADDRESS_MASK 0xffffff80 ++#define PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE 0x1408 ++#define PHYTIUM_DC_PANEL_CONFIG 0x1418 ++ #define PANEL_DATAENABLE_ENABLE (1<<0) ++ #define PANEL_DATA_ENABLE (1<<4) ++ #define PANEL_CLOCK_ENABLE (1<<8) ++#define PHYTIUM_DC_HDISPLAY 0x1430 ++ #define HDISPLAY_END_SHIFT 0 ++ #define HDISPLAY_END_MASK 0x7fff ++ #define HDISPLAY_TOTAL_SHIFT 16 ++ #define HDISPLAY_TOTAL_MASK 0x7fff ++#define PHYTIUM_DC_HSYNC 0x1438 ++ #define HSYNC_START_SHIFT 0 ++ #define HSYNC_START_MASK 0x7fff ++ #define HSYNC_END_SHIFT 15 ++ #define HSYNC_END_MASK 0x7fff ++ #define HSYNC_PULSE_ENABLED (1<<30) ++ #define HSYNC_NEGATIVE (1<<31) ++#define PHYTIUM_DC_VDISPLAY 0x1440 ++ #define VDISPLAY_END_SHIFT 0 ++ #define VDISPLAY_END_MASK 0x7fff ++ #define VDISPLAY_TOTAL_SHIFT 16 ++ #define VDISPLAY_TOTAL_MASK 0x7fff ++#define PHYTIUM_DC_VSYNC 0x1448 ++ #define VSYNC_START_SHIFT 0 ++ #define VSYNC_START_MASK 0x7fff ++ #define VSYNC_END_SHIFT 15 ++ #define VSYNC_END_MASK 0x7fff ++ #define VSYNC_PULSE_ENABLED (1<<30) ++ #define VSYNC_NEGATIVE (1<<31) ++#define PHYTIUM_DC_DISPLAY_CURRENT_LOCATION 0x1450 ++#define PHYTIUM_DC_GAMMA_INDEX 0x1458 ++ #define GAMMA_INDEX_MAX 256 ++#define PHYTIUM_DC_GAMMA_DATA 0x1460 ++ #define GAMMA_BLUE_SHIFT 0 ++ #define GAMMA_BLUE_MASK 0x3ff ++ #define GAMMA_GREEN_SHIFT 10 ++ #define GAMMA_GREEN_MASK 0x3ff ++ #define GAMMA_RED_SHIFT 20 ++ #define GAMMA_RED_MASK 0x3ff ++#define PHYTIUM_DC_CURSOR_CONFIG 0x1468 ++ #define CURSOR_FORMAT_DISABLED 0x0 ++ #define CURSOR_FORMAT_MASKMODE 0x3 ++ #define CURSOR_FORMAT_ARGB8888 0x2 ++ #define CURSOR_FORMAT_MASK 0x3 ++ #define CURSOR_HOT_Y_SHIFT 8 ++ #define CURSOR_HOT_Y_MASK 0x1f ++ #define CURSOR_HOT_X_SHIFT 16 ++ #define CURSOR_HOT_X_MASK 0x1f ++#define PHYTIUM_DC_CURSOR_ADDRESS 0x146c ++#define PHYTIUM_DC_CURSOR_LOCATION 0x1470 ++ #define CURSOR_X_SHIFT 0 ++ #define CURSOR_X_MASK 0x7fff ++ #define CURSOR_Y_SHIFT 16 ++ #define CURSOR_Y_MASK 0x7fff ++#define PHYTIUM_DC_CURSOR_BACKGROUND 0x1474 ++#define PHYTIUM_DC_CURSOR_FOREGROUND 0x1478 ++#define PHYTIUM_DC_INT_STATUS 0x147c ++ #define INT_STATUS 0x1 ++#define PHYTIUM_DC_INT_ENABLE 0x1480 ++ #define INT_ENABLE 0x1 ++ #define INT_DISABLE 0x0 ++ ++#define PHYTIUM_DC_FRAMEBUFFER_CONFIG 0x1518 ++ #define FRAMEBUFFER_OUTPUT BIT(0) ++ #define FRAMEBUFFER_GAMMA_ENABLE BIT(2) ++ #define FRAMEBUFFER_VALID_PENDING BIT(3) ++ #define FRAMEBUFFER_RESET BIT(4) ++ #define FRAMEBUFFER_PROGRESS BIT(6) ++ #define FRAMEBUFFER_ROT_ANGLE_SHIFT (11) ++ #define FRAMEBUFFER_ROT_ANGLE_MASK (0x7) ++ #define FRAMEBUFFER_ROT_ANGLE_ROT0 (0) ++ #define FRAMEBUFFER_ROT_ANGLE_FLIP_X (1) ++ #define FRAMEBUFFER_ROT_ANGLE_FLIP_Y (2) ++ #define FRAMEBUFFER_TILE_MODE_SHIFT (17) ++ #define FRAMEBUFFER_TILE_MODE_MASK (0x1f) ++ #define FRAMEBUFFER_LINEAR 0 ++ #define FRAMEBUFFER_TILE_MODE0 4 ++ #define FRAMEBUFFER_TILE_MODE3 7 ++ #define FRAMEBUFFER_FORMAT_SHIFT 26 ++ #define FRAMEBUFFER_FORMAT_MASK 0x3f ++ #define FRAMEBUFFER_FORMAT_XRGB4444 0x0 ++ #define FRAMEBUFFER_FORMAT_ARGB4444 0x1 ++ #define FRAMEBUFFER_FORMAT_XRGB1555 0x2 ++ #define FRAMEBUFFER_FORMAT_ARGB1555 0x3 ++ #define FRAMEBUFFER_FORMAT_RGB565 0x4 ++ #define FRAMEBUFFER_FORMAT_XRGB8888 0x5 ++ #define FRAMEBUFFER_FORMAT_ARGB8888 0x6 ++ #define FRAMEBUFFER_FORMAT_YUYV 0x7 ++ #define FRAMEBUFFER_FORMAT_UYVY 0x8 ++ #define FRAMEBUFFER_FORMAT_NV12 0x11 ++ #define FRAMEBUFFER_FORMAT_NV16 0x12 ++ #define FRAMEBUFFER_FORMAT_ARGB2101010 0x16 ++ #define FRAMEBUFFER_SWIZZLE_SHIFT 23 ++ #define FRAMEBUFFER_SWIZZLE_MASK 0x3 ++ #define FRAMEBUFFER_SWIZZLE_ARGB 0 ++ #define FRAMEBUFFER_SWIZZLE_RGBA 1 ++ #define FRAMEBUFFER_SWIZZLE_ABGR 2 ++ #define FRAMEBUFFER_SWIZZLE_BGRA 3 ++ #define FRAMEBUFFER_UVSWIZZLE_SHIFT 25 ++ #define FRAMEBUFFER_UVSWIZZLE_DISABLE 0 ++ #define FRAMEBUFFER_UVSWIZZLE_ENABLE 1 ++ #define FRAMEBUFFER_CLEAR BIT(8) ++ #define FRAMEBUFFER_SCALE_ENABLE BIT(22) ++#define PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG 0x1520 ++ #define FRAMEBUFFER_FILTER_TAP 3 ++ #define FRAMEBUFFER_HORIZONTAL_FILTER_TAP 3 ++ #define FRAMEBUFFER_TAP 0x33 ++#define PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS 0x1530 ++#define PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS 0x1538 ++#define PHYTIUM_DC_OVERLAY_CONFIG 0x1540 ++ #define X100_DC_OVERLAY_ENABLE BIT(24) ++ ++#define PHYTIUM_DC_FRAMEBUFFER_U_STRIDE 0x1800 ++#define PHYTIUM_DC_FRAMEBUFFER_V_STRIDE 0x1808 ++#define PHYTIUM_DC_FRAMEBUFFER_SIZE 0x1810 ++ #define WIDTH_SHIFT 0 ++ #define WIDTH_MASK 0x7fff ++ #define HEIGHT_SHIFT 15 ++ #define HEIGHT_MASK 0x7fff ++ ++#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X 0x1828 ++ #define SCALE_FACTOR_X_MASK 0x7fffffff ++#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y 0x1830 ++ #define SCALE_FACTOR_Y_MASK 0x7fffffff ++ #define SCALE_FACTOR_Y_MAX 0x3 ++ #define SCALE_FACTOR_SRC_OFFSET 16 ++ ++#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX 0x1838 ++ #define HORI_FILTER_INDEX 0x0 ++#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER 0x1a00 ++#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX 0x1a08 ++ #define VERT_FILTER_INDEX 0x0 ++#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER 0x1a10 ++#define PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE 0x1a18 ++ #define CLEAR_VALUE_RED 0x00ff0000 ++ #define CLEAR_VALUE_GREEN 0x0000ff00 ++#define PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET 0x1a20 ++ #define INITIALOFFSET (0x8000 | (0X8000 << 16)) ++#define PHYTIUM_DC_DP_CONFIG 0x1cd0 ++ #define OUTPUT_DP (1<<3) ++ #define DP_RGB666 (0x1) ++ #define DP_RGB888 (0x2) ++ #define DP_RGB101010 (0x3) ++/******************************dc register end********************************************/ ++ ++/******************************phy access register****************************************/ ++#define PHYTIUM_PHY_ACCESS_ADDRESS 0x0000 ++#define PHYTIUM_PHY_WRITE_DATA 0x0004 ++#define PHYTIUM_PHY_READ_DATA 0x0008 ++#define PHYTIUM_PHY_ACCESS_CTRL 0x000c ++ #define ACCESS_WRITE (1<<0) ++ #define ACCESS_READ (1<<1) ++/******************************phy access register end*************************************/ ++ ++/******************************dp register start******************************************/ ++#define PHYTIUM_DP_LINK_BW_SET 0x0000 ++#define PHYTIUM_DP_LANE_COUNT_SET 0x0004 ++#define PHYTIUM_DP_ENHANCED_FRAME_EN 0x0008 ++ #define ENHANCED_FRAME_ENABLE 0x1 ++ #define ENHANCED_FRAME_DISABLE 0x0 ++#define PHYTIUM_DP_TRAINING_PATTERN_SET 0x000c ++ #define TRAINING_OFF 0x0 ++ #define TRAINING_PATTERN_1 0x1 ++ #define TRAINING_PATTERN_2 0x2 ++ #define TRAINING_PATTERN_3 0x3 ++ #define TRAINING_PATTERN_4 0x4 ++#define PHYTIUM_DP_LINK_QUAL_PATTERN_SET 0x0010 ++ #define TEST_PATTERN_NONE 0x0 ++ #define TEST_PATTERN_D10_2 0x1 ++ #define TEST_PATTERN_SYMBOL_ERROR 0x2 ++ #define TEST_PATTERN_PRBS7 0x3 ++ #define TEST_PATTERN_80BIT_CUSTOM 0x4 ++ #define TEST_PATTERN_CP2520_1 0x5 ++ #define TEST_PATTERN_CP2520_2 0x6 ++ #define TEST_PATTERN_CP2520_3 0x7 ++ #define TEST_PATTERN_LANE_SHIFT 8 ++#define PHYTIUM_DP_SCRAMBLING_DISABLE 0x0014 ++ #define SCRAMBLING_ENABLE 0x0 ++ #define SCRAMBLING_DISABLE 0x1 ++#define PHYTIUM_DP_DOWNSPREAD_CTRL 0x0018 ++#define PHYTIUM_DP_ALT_SCRAMBLER_RESET 0x001c ++#define PHYTIUM_DP_HBR2_SCRAMBLER_RESET 0x0020 ++#define PHYTIUM_DP_DISPLAYPORT_VERSION 0x0024 ++#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0 0x0030 ++#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1 0x0034 ++#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2 0x0038 ++#define PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE 0x0080 ++ #define TRANSMITTER_OUTPUT_ENABLE BIT(0) ++ #define TRANSMITTER_OUTPUT_DISABLE 0 ++#define PHYTIUM_DP_VIDEO_STREAM_ENABLE 0x0084 ++ #define SST_MST_SOURCE_0_ENABLE BIT(0) ++ #define SST_MST_SOURCE_0_ENABLE_MASK 0x1 ++ #define SST_MST_SOURCE_0_DISABLE 0 ++#define PHYTIUM_DP_SECONDARY_STREAM_ENABLE 0x0088 ++ #define SECONDARY_STREAM_ENABLE 0x1 ++ #define SECONDARY_STREAM_DISABLE 0x0 ++#define PHYTIUM_DP_SEC_DATA_WINDOW 0x008C ++#define PHYTIUM_DP_SOFT_RESET 0x0090 ++ #define LINK_SOFT_RESET (0x1 << 0) ++ #define VIDEO_SOFT_RESET (0x1 << 1) ++#define PHYTIUM_INPUT_SOURCE_ENABLE 0x0094 ++ #define VIRTUAL_SOURCE_0_ENABLE BIT(0) ++ #define VIRTUAL_SOURCE_0_ENABLE_MASK 0x1 ++#define PHYTIUM_DP_FORCE_SCRAMBLER_RESET 0x00C0 ++ #define SCRAMBLER_RESET BIT(0) ++#define PHYTIUM_DP_SOURCE_CONTROL_STATUS 0x00C4 ++#define PHYTIUM_DP_DATA_CONTROL 0x00C8 ++#define PHYTIUM_DP_CORE_CAPABILITY 0x00F8 ++#define PHYTIUM_DP_CORE_ID 0x00FC ++#define PHYTIUM_DP_AUX_COMMAND 0x0100 ++ #define BYTE_COUNT_MASK 0xf ++ #define COMMAND_SHIFT 8 ++ #define COMMAND_MASK 0xf ++ #define ADDRESS_ONLY (1<<12) ++#define PHYTIUM_DP_AUX_WRITE_FIFO 0x0104 ++#define PHYTIUM_DP_AUX_ADDRESS 0x0108 ++#define PHYTIUM_DP_AUX_CLK_DIVIDER 0x010C ++ #define AUX_CLK_DIVIDER 48 ++#define PHYTIUM_DP_SINK_HPD_STATE 0x0128 ++ #define HPD_CONNECT 0x1 ++ #define HPD_DISCONNECT 0x0 ++#define PHYTIUM_DP_INTERRUPT_RAW_STATUS 0x0130 ++ #define REPLY_TIMEOUT (1<<3) ++ #define DP_STATUS_REQUEST_IN_PROGRESS (1<<1) ++ #define HPD_STATE (0<<1) ++#define PHYTIUM_DP_AUX_REPLY_DATA 0x0134 ++#define PHYTIUM_DP_AUX_REPLY_CODE 0x0138 ++ #define AUX_NATIVE_ACK (0x0<<0) ++ #define AUX_NATIVE_NACK (0x1<<0) ++ #define AUX_NATIVE_DEFER (0x2<<0) ++ #define AUX_NATIVE_MASK (0x3 << 0) ++ #define AUX_I2C_ACK (0x0<<2) ++ #define AUX_I2C_NACK (0x1<<2) ++ #define AUX_I2C_DEFER (0x2<<2) ++ #define AUX_I2C_MASK (0x3 << 2) ++#define PHYTIUM_DP_INTERRUPT_STATUS 0x0140 ++ #define HPD_IRQ (1<<1) ++ #define HPD_EVENT (1<<0) ++#define PHYTIUM_DP_INTERRUPT_MASK 0x0144 ++ #define HPD_IRQ_MASK (1<<1) ++ #define HPD_EVENT_MASK (1<<0) ++ #define HPD_OTHER_MASK 0x3c ++#define PHYTIUM_DP_AUX_REPLY_DATA_COUNT 0x0148 ++#define PHYTIUM_DP_AUX_STATUS 0x014C ++ #define REPLY_RECEIVED 0x1 ++ #define REPLY_IN_PROGRESS 0x2 ++ #define REQUEST_IN_PROGRESS 0x4 ++ #define REPLY_ERROR 0x8 ++#define PHYTIUM_DP_AUX_TIMER 0x0158 ++#define PHYTIUM_DP_MAIN_LINK_HTOTAL 0x0180 ++#define PHYTIUM_DP_MAIN_LINK_VTOTAL 0x0184 ++#define PHYTIUM_DP_MAIN_LINK_POLARITY 0x0188 ++ #define VSYNC_POLARITY_LOW BIT(1) ++ #define HSYNC_POLARITY_LOW BIT(0) ++#define PHYTIUM_DP_MAIN_LINK_HSWIDTH 0x018C ++#define PHYTIUM_DP_MAIN_LINK_VSWIDTH 0x0190 ++#define PHYTIUM_DP_MAIN_LINK_HRES 0x0194 ++#define PHYTIUM_DP_MAIN_LINK_VRES 0x0198 ++#define PHYTIUM_DP_MAIN_LINK_HSTART 0x019C ++#define PHYTIUM_DP_MAIN_LINK_VSTART 0x01A0 ++#define PHYTIUM_DP_MAIN_LINK_MISC0 0x01A4 ++ #define MISC0_SYNCHRONOUS_CLOCK BIT(0) ++ #define MISC0_BIT_DEPTH_OFFSET 5 ++ #define MISC0_BIT_DEPTH_6BIT 0x0 ++ #define MISC0_BIT_DEPTH_8BIT 0x1 ++ #define MISC0_BIT_DEPTH_10BIT 0x2 ++ #define MISC0_COMPONENT_FORMAT_SHIFT 1 ++ #define MISC0_COMPONENT_FORMAT_RGB 0x0 ++#define PHYTIUM_DP_MAIN_LINK_MISC1 0x01A8 ++#define PHYTIUM_DP_M_VID 0x01AC ++#define PHYTIUM_DP_TRANSFER_UNIT_SIZE 0x01B0 ++#define PHYTIUM_DP_N_VID 0x01B4 ++#define PHYTIUM_DP_USER_PIXEL_WIDTH 0x01B8 ++#define PHYTIUM_DP_DATA_COUNT 0x01BC ++#define PHYTIUM_DP_INTERLACED 0x01C0 ++#define PHYTIUM_DP_USER_SYNC_POLARITY 0x01C4 ++ #define USER_ODDEVEN_POLARITY_HIGH BIT(3) ++ #define USER_DATA_ENABLE_POLARITY_HIGH BIT(2) ++ #define USER_VSYNC_POLARITY_HIGH BIT(1) ++ #define USER_HSYNC_POLARITY_HIGH BIT(0) ++#define PHYTIUM_DP_USER_CONTROL 0x01C8 ++#define PHYTIUM_EDP_CRC_ENABLE 0x01D0 ++#define PHYTIUM_EDP_CRC_RED 0x01D4 ++#define PHYTIUM_EDP_CRC_GREEN 0x01D8 ++#define PHYTIUM_EDP_CRC_BLUE 0x01DC ++#define PHYTIUM_DP_SEC_AUDIO_ENABLE 0x0300 ++ #define SEC_AUDIO_ENABLE BIT(0) ++ #define CHANNEL_MUTE_ENABLE BIT(1) ++#define PHYTIUM_DP_SEC_INPUT_SELECT 0x0304 ++ #define INPUT_SELECT_I2S 0x0 ++#define PHYTIUM_DP_SEC_CHANNEL_COUNT 0x0308 ++ #define CHANNEL_2 0x2 ++ #define CHANNEL_2_LFE 0x3 ++ #define CHANNEL_5_1 0x6 ++ #define CHANNEL_7_1 0x7 ++ #define CHANNEL_MASK 0xf ++#define PHYTIUM_DP_SEC_DIRECT_CLKDIV 0x030c ++ #define APB_CLOCK 48000000 ++#define PHYTIUM_DP_SEC_MAUD 0x0318 ++#define PHYTIUM_DP_SEC_NAUD 0x031c ++#define PHYTIUM_DP_SEC_CLOCK_MODE 0x0320 ++ #define CLOCK_MODE_SYNC 0x1 ++#define PHYTIUM_DP_SEC_CS_SOURCE_FORMAT 0x0340 ++ #define CS_SOURCE_FORMAT_DEFAULT 0x0 ++#define PHYTIUM_DP_SEC_CS_CATEGORY_CODE 0x0344 ++#define PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ 0x0348 ++ #define ORIG_FREQ_32000 0xc ++ #define ORIG_FREQ_44100 0xf ++ #define ORIG_FREQ_48000 0xd ++ #define ORIG_FREQ_88200 0x7 ++ #define ORIG_FREQ_96000 0x5 ++ #define ORIG_FREQ_176400 0x3 ++ #define ORIG_FREQ_192000 0x1 ++ #define ORIG_FREQ_MASK 0xf ++ #define ORIG_FREQ_SHIFT 0 ++ #define WORD_LENGTH_16 0x4 ++ #define WORD_LENGTH_18 0x2 ++ #define WORD_LENGTH_20 0xc ++ #define WORD_LENGTH_24 0xd ++ #define WORD_LENGTH_MASK 0xf ++ #define WORD_LENGTH_SHIFT 4 ++#define PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY 0x034c // not used ++ #define SAMPLING_FREQ_32000 0xc ++ #define SAMPLING_FREQ_44100 0x0 ++ #define SAMPLING_FREQ_48000 0x4 ++ #define SAMPLING_FREQ_88200 0x1 ++ #define SAMPLING_FREQ_96000 0x5 ++ #define SAMPLING_FREQ_176400 0x3 ++ #define SAMPLING_FREQ_192000 0x7 ++ #define SAMPLING_FREQ_MASK 0xf ++ #define SAMPLING_FREQ_SHIFT 4 ++#define PHYTIUM_DP_SEC_CHANNEL_MAP 0x035C ++ #define CHANNEL_MAP_DEFAULT 0x87654321 ++/******************************dp register end********************************************/ ++ ++#endif /* __PHYTIUM_REG_H__ */ +diff --git a/drivers/gpu/drm/phytium/x100_dc.c b/drivers/gpu/drm/phytium/x100_dc.c +new file mode 100644 +index 000000000000..06394c232dab +--- /dev/null ++++ b/drivers/gpu/drm/phytium/x100_dc.c +@@ -0,0 +1,321 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include "phytium_display_drv.h" ++#include "x100_reg.h" ++#include "phytium_crtc.h" ++#include "phytium_plane.h" ++#include "phytium_fb.h" ++#include "phytium_gem.h" ++ ++static const unsigned int x100_primary_formats[] = { ++ DRM_FORMAT_ARGB2101010, ++ DRM_FORMAT_ABGR2101010, ++ DRM_FORMAT_RGBA1010102, ++ DRM_FORMAT_BGRA1010102, ++ DRM_FORMAT_ARGB8888, ++ DRM_FORMAT_ABGR8888, ++ DRM_FORMAT_RGBA8888, ++ DRM_FORMAT_BGRA8888, ++ DRM_FORMAT_XRGB8888, ++ DRM_FORMAT_XBGR8888, ++ DRM_FORMAT_RGBX8888, ++ DRM_FORMAT_BGRX8888, ++ DRM_FORMAT_ARGB4444, ++ DRM_FORMAT_ABGR4444, ++ DRM_FORMAT_RGBA4444, ++ DRM_FORMAT_BGRA4444, ++ DRM_FORMAT_XRGB4444, ++ DRM_FORMAT_XBGR4444, ++ DRM_FORMAT_RGBX4444, ++ DRM_FORMAT_BGRX4444, ++ DRM_FORMAT_ARGB1555, ++ DRM_FORMAT_ABGR1555, ++ DRM_FORMAT_RGBA5551, ++ DRM_FORMAT_BGRA5551, ++ DRM_FORMAT_XRGB1555, ++ DRM_FORMAT_XBGR1555, ++ DRM_FORMAT_RGBX5551, ++ DRM_FORMAT_BGRX5551, ++ DRM_FORMAT_RGB565, ++ DRM_FORMAT_BGR565, ++ DRM_FORMAT_YUYV, ++ DRM_FORMAT_UYVY, ++}; ++ ++static uint64_t x100_primary_formats_modifiers[] = { ++ DRM_FORMAT_MOD_LINEAR, ++ DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC, ++ DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC, ++ DRM_FORMAT_MOD_INVALID ++}; ++ ++static const unsigned int x100_cursor_formats[] = { ++ DRM_FORMAT_ARGB8888, ++}; ++ ++void x100_dc_hw_vram_init(struct phytium_display_private *priv, resource_size_t vram_addr, ++ resource_size_t vram_size) ++{ ++ uint32_t config; ++ uint32_t group_offset = priv->address_transform_base; ++ ++ config = phytium_readl_reg(priv, group_offset, ++ X100_GPU_ADDRESS_TRANSFORM_SRC_ADDR); ++ if (config) ++ phytium_writel_reg(priv, config, group_offset, ++ X100_GPU_ADDRESS_TRANSFORM_SRC_ADDR); ++ ++ config = phytium_readl_reg(priv, group_offset, ++ X100_GPU_ADDRESS_TRANSFORM_SIZE); ++ if (config) ++ phytium_writel_reg(priv, config, group_offset, ++ X100_GPU_ADDRESS_TRANSFORM_SIZE); ++ ++ config = phytium_readl_reg(priv, group_offset, ++ X100_GPU_ADDRESS_TRANSFORM_DST_ADDR); ++ if (config) ++ phytium_writel_reg(priv, config, group_offset, ++ X100_GPU_ADDRESS_TRANSFORM_DST_ADDR); ++ ++ phytium_writel_reg(priv, (vram_addr & SRC_ADDR_MASK) >> SRC_ADDR_OFFSET, ++ group_offset, X100_DC_ADDRESS_TRANSFORM_SRC_ADDR); ++ phytium_writel_reg(priv, (vram_size >> SIZE_OFFSET) | ADDRESS_TRANSFORM_ENABLE, ++ group_offset, X100_DC_ADDRESS_TRANSFORM_SIZE); ++ config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_DST_ADDR); ++ phytium_writel_reg(priv, config, group_offset, X100_DC_ADDRESS_TRANSFORM_DST_ADDR); ++} ++ ++void x100_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe) ++{ ++ phytium_writel_reg(priv, MSI_CLEAR, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_MSI_CLEAR); ++} ++ ++void x100_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); ++ int phys_pipe = phytium_crtc->phys_pipe; ++ uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; ++ int ret = 0; ++ ++ /* config pix clock */ ++ phytium_writel_reg(priv, FLAG_REQUEST | CMD_PIXEL_CLOCK | (clock & PIXEL_CLOCK_MASK), ++ group_offset, X100_DCREQ_CMD_REGISTER); ++ ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, ++ FLAG_REQUEST, FLAG_REPLY); ++ if (ret < 0) ++ DRM_ERROR("%s: failed to set pixel clock\n", __func__); ++} ++ ++void x100_dc_hw_disable(struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); ++ int reset_timeout = 100; ++ int config = 0; ++ int phys_pipe = phytium_crtc->phys_pipe; ++ ++ // reset dc ++ config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); ++ phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], ++ X100_DC_CLOCK_CONTROL); ++ phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); ++ do { ++ config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_IDLE); ++ if (config | IS_IDLE) ++ break; ++ mdelay(1); ++ reset_timeout--; ++ } while (reset_timeout); ++ ++ /* reset pix clock */ ++ x100_dc_hw_config_pix_clock(crtc, 0); ++ ++ // reset dc ++ reset_timeout = 100; ++ config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); ++ phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], ++ X100_DC_CLOCK_CONTROL); ++ phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); ++ do { ++ config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_IDLE); ++ if (config | IS_IDLE) ++ break; ++ mdelay(1); ++ reset_timeout--; ++ } while (reset_timeout); ++ ++ /* reset dcreq */ ++ phytium_writel_reg(priv, DCREQ_PLAN_A, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_PLAN); ++ phytium_writel_reg(priv, 0, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_CONTROL); ++ phytium_writel_reg(priv, DCREQ_RESET, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_RESET); ++ msleep(20); ++ phytium_writel_reg(priv, (~DCREQ_RESET)&DCREQ_RESET_MASK, ++ priv->dcreq_reg_base[phys_pipe], X100_DCREQ_RESET); ++} ++ ++int x100_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count) ++{ ++ int ret = 0; ++ ++ switch (mode_cmd->modifier[count]) { ++ case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC: ++ switch (mode_cmd->pixel_format) { ++ case DRM_FORMAT_ARGB4444: ++ case DRM_FORMAT_ABGR4444: ++ case DRM_FORMAT_RGBA4444: ++ case DRM_FORMAT_BGRA4444: ++ case DRM_FORMAT_XRGB4444: ++ case DRM_FORMAT_XBGR4444: ++ case DRM_FORMAT_RGBX4444: ++ case DRM_FORMAT_BGRX4444: ++ case DRM_FORMAT_ARGB1555: ++ case DRM_FORMAT_ABGR1555: ++ case DRM_FORMAT_RGBA5551: ++ case DRM_FORMAT_BGRA5551: ++ case DRM_FORMAT_XRGB1555: ++ case DRM_FORMAT_XBGR1555: ++ case DRM_FORMAT_RGBX5551: ++ case DRM_FORMAT_BGRX5551: ++ case DRM_FORMAT_RGB565: ++ case DRM_FORMAT_BGR565: ++ case DRM_FORMAT_YUYV: ++ case DRM_FORMAT_UYVY: ++ break; ++ default: ++ DRM_ERROR("TILE_MODE0_FBCDC not support DRM_FORMAT %d", ++ mode_cmd->pixel_format); ++ ret = -EINVAL; ++ goto error; ++ } ++ break; ++ case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC: ++ switch (mode_cmd->pixel_format) { ++ case DRM_FORMAT_ARGB2101010: ++ case DRM_FORMAT_ABGR2101010: ++ case DRM_FORMAT_RGBA1010102: ++ case DRM_FORMAT_BGRA1010102: ++ case DRM_FORMAT_ARGB8888: ++ case DRM_FORMAT_ABGR8888: ++ case DRM_FORMAT_RGBA8888: ++ case DRM_FORMAT_BGRA8888: ++ case DRM_FORMAT_XRGB8888: ++ case DRM_FORMAT_XBGR8888: ++ case DRM_FORMAT_RGBX8888: ++ case DRM_FORMAT_BGRX8888: ++ break; ++ default: ++ DRM_ERROR("TILE_MODE3_FBCDC not support DRM_FORMAT %d", ++ mode_cmd->pixel_format); ++ ret = -EINVAL; ++ goto error; ++ } ++ break; ++ case DRM_FORMAT_MOD_LINEAR: ++ break; ++ default: ++ DRM_ERROR("unsupported fb modifier 0x%llx\n", mode_cmd->modifier[0]); ++ ret = -EINVAL; ++ goto error; ++ } ++ ++ return 0; ++error: ++ return ret; ++} ++ ++void x100_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, ++ const uint32_t **formats, ++ uint32_t *format_count) ++{ ++ *format_modifiers = x100_primary_formats_modifiers; ++ *formats = x100_primary_formats; ++ *format_count = ARRAY_SIZE(x100_primary_formats); ++} ++ ++void x100_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, ++ const uint32_t **formats, ++ uint32_t *format_count) ++{ ++ *format_modifiers = NULL; ++ *formats = x100_cursor_formats; ++ *format_count = ARRAY_SIZE(x100_cursor_formats); ++} ++ ++void x100_dc_hw_update_dcreq(struct drm_plane *plane) ++{ ++ struct drm_device *dev = plane->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_plane *phytium_plane = to_phytium_plane(plane); ++ int phys_pipe = phytium_plane->phys_pipe; ++ uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; ++ int config; ++ ++ if (phytium_plane->tiling[0] == FRAMEBUFFER_LINEAR) { ++ phytium_writel_reg(priv, DCREQ_MODE_LINEAR, ++ group_offset, X100_DCREQ_PLANE0_CONFIG); ++ } else { ++ config = DCREQ_NO_LOSSY; ++ if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE0) ++ config |= DCREQ_TILE_TYPE_MODE0; ++ else if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE3) ++ config |= DCREQ_TILE_TYPE_MODE3; ++ else ++ config |= DCREQ_TILE_TYPE_MODE0; ++ ++ switch (phytium_plane->format) { ++ case FRAMEBUFFER_FORMAT_ARGB8888: ++ case FRAMEBUFFER_FORMAT_XRGB8888: ++ config |= DCREQ_COLOURFORMAT_BGRA8888; ++ break; ++ case FRAMEBUFFER_FORMAT_ARGB2101010: ++ config |= DCREQ_COLOURFORMAT_ARGB2101010; ++ break; ++ case FRAMEBUFFER_FORMAT_XRGB4444: ++ case FRAMEBUFFER_FORMAT_ARGB4444: ++ config |= DCREQ_COLOURFORMAT_ARGB4444; ++ break; ++ case FRAMEBUFFER_FORMAT_XRGB1555: ++ case FRAMEBUFFER_FORMAT_ARGB1555: ++ config |= DCREQ_COLOURFORMAT_ARGB1555; ++ break; ++ case FRAMEBUFFER_FORMAT_RGB565: ++ config |= DCREQ_COLOURFORMAT_RGB565; ++ break; ++ case FRAMEBUFFER_FORMAT_YUYV: ++ config |= DCREQ_COLOURFORMAT_YUYV; ++ break; ++ case FRAMEBUFFER_FORMAT_UYVY: ++ config |= DCREQ_COLOURFORMAT_UYVY; ++ break; ++ } ++ config |= DCREQ_ARGBSWIZZLE_ARGB; ++ config |= DCREQ_MODE_TILE; ++ phytium_writel_reg(priv, phytium_plane->iova[0] & 0xffffffff, ++ group_offset, X100_DCREQ_PLANE0_ADDR_START); ++ phytium_writel_reg(priv, (phytium_plane->iova[0] + phytium_plane->size[0]) & ++ 0xffffffff, group_offset, X100_DCREQ_PLANE0_ADDR_END); ++ phytium_writel_reg(priv, config, group_offset, X100_DCREQ_PLANE0_CONFIG); ++ } ++} ++ ++void x100_dc_hw_update_primary_hi_addr(struct drm_plane *plane) ++{ ++ struct drm_device *dev = plane->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ struct phytium_plane *phytium_plane = to_phytium_plane(plane); ++ int phys_pipe = phytium_plane->phys_pipe; ++ ++ phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, ++ priv->dcreq_reg_base[phys_pipe], X100_DCREQ_PIX_DMA_PREFIX); ++} +diff --git a/drivers/gpu/drm/phytium/x100_dc.h b/drivers/gpu/drm/phytium/x100_dc.h +new file mode 100644 +index 000000000000..ae98b4ffe0cf +--- /dev/null ++++ b/drivers/gpu/drm/phytium/x100_dc.h +@@ -0,0 +1,30 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef __X100_DC_H__ ++#define __X100_DC_H__ ++ ++#define X100_DC_PIX_CLOCK_MAX (594000) ++#define x100_DC_HDISPLAY_MAX 3840 ++#define X100_DC_VDISPLAY_MAX 2160 ++#define X100_DC_ADDRESS_MASK 0x3f ++ ++extern void x100_dc_hw_vram_init(struct phytium_display_private *priv, ++ resource_size_t vram_addr, ++ resource_size_t vram_size); ++extern void x100_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe); ++extern void x100_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock); ++extern void x100_dc_hw_disable(struct drm_crtc *crtc); ++extern int x100_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count); ++extern void x100_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, ++ const uint32_t **formats, ++ uint32_t *format_count); ++extern void x100_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, ++ const uint32_t **formats, ++ uint32_t *format_count); ++void x100_dc_hw_update_dcreq(struct drm_plane *plane); ++void x100_dc_hw_update_primary_hi_addr(struct drm_plane *plane); ++#endif /* __X100_DC_H__ */ +diff --git a/drivers/gpu/drm/phytium/x100_dp.c b/drivers/gpu/drm/phytium/x100_dp.c +new file mode 100644 +index 000000000000..4cc390442461 +--- /dev/null ++++ b/drivers/gpu/drm/phytium/x100_dp.c +@@ -0,0 +1,907 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#include "phytium_display_drv.h" ++#include "x100_reg.h" ++#include "phytium_dp.h" ++#include "x100_dp.h" ++ ++/* [reg][ling_rate 1.62->8.1] */ ++static int vco_val[12][4] = { ++ {0x0509, 0x0509, 0x0509, 0x0509}, // CP_PADJ ++ {0x0f00, 0x0f00, 0x0f00, 0x0f00}, // CP_IADJ ++ {0x0F08, 0x0F08, 0x0F08, 0x0F08}, // FILT_PADJ ++ {0x0061, 0x006C, 0x006C, 0x0051}, // INTDIV ++ {0x3333, 0x0000, 0x0000, 0x0000}, // FRACDIVL ++ {0x0000, 0x0000, 0x0000, 0x0000}, // FRACDIVH ++ {0x0042, 0x0048, 0x0048, 0x0036}, // HIGH_THR ++ {0x0002, 0x0002, 0x0002, 0x0002}, // PDIAG_CTRL ++ {0x0c5e, 0x0c5e, 0x0c5e, 0x0c5e}, // VCOCAL_PLLCNT_START ++ {0x00c7, 0x00c7, 0x00c7, 0x00c7}, // LOCK_PEFCNT ++ {0x00c7, 0x00c7, 0x00c7, 0x00c7}, // LOCK_PLLCNT_START ++ {0x0005, 0x0005, 0x0005, 0x0005}, // LOCK_PLLCNT_THR ++}; ++ ++static int mgnfs_val[4][4][4] = // [link_rate][swing][emphasis] ++{ ++ /* 1.62Gbps */ ++ { ++ {0x0026, 0x001f, 0x0012, 0x0000}, ++ {0x0013, 0x0013, 0x0000, 0x0000}, ++ {0x0006, 0x0000, 0x0000, 0x0000}, ++ {0x0000, 0x0000, 0x0000, 0x0000}, ++ }, ++ ++ /* 2.7Gbps */ ++ { ++ {0x0026, 0x001f, 0x0012, 0x0000}, ++ {0x0013, 0x0013, 0x0000, 0x0000}, ++ {0x0006, 0x0000, 0x0000, 0x0000}, ++ {0x0000, 0x0000, 0x0000, 0x0000}, ++ }, ++ ++ /* 5.4Gbps */ ++ { ++ {0x0026, 0x0013, 0x005, 0x0000}, ++ {0x0018, 0x006, 0x0000, 0x0000}, ++ {0x000c, 0x0000, 0x0000, 0x0000}, ++ {0x0000, 0x0000, 0x0000, 0x0000}, ++ }, ++ ++ /* 8.1Gbps */ ++ { ++ {0x0026, 0x0013, 0x005, 0x0000}, ++ {0x0013, 0x006, 0x0000, 0x0000}, ++ {0x0006, 0x0000, 0x0000, 0x0000}, ++ {0x0000, 0x0000, 0x0000, 0x0000}, ++ }, ++}; ++ ++static int cpost_val[4][4][4] = // [link_rate][swing][emphasis] ++{ ++ /* 1.62Gbps */ ++ { ++ {0x0000, 0x0014, 0x0020, 0x002a}, ++ {0x0000, 0x0010, 0x001f, 0x0000}, ++ {0x0000, 0x0013, 0x0000, 0x0000}, ++ {0x0000, 0x0000, 0x0000, 0x0000}, ++ }, ++ ++ /* 2.7Gbps */ ++ { ++ {0x0000, 0x0014, 0x0020, 0x002a}, ++ {0x0000, 0x0010, 0x001f, 0x0000}, ++ {0x0000, 0x0013, 0x0000, 0x0000}, ++ {0x0000, 0x0000, 0x0000, 0x0000}, ++ }, ++ ++ /* 5.4Gbps */ ++ { ++ {0x0000, 0x0014, 0x0022, 0x002e}, ++ {0x0000, 0x0013, 0x0020, 0x0000}, ++ {0x0000, 0x0013, 0x0000, 0x0000}, ++ {0x0000, 0x0000, 0x0000, 0x0000}, ++ }, ++ ++ /* 8.1Gbps */ ++ { ++ {0x0000, 0x0014, 0x0022, 0x002e}, ++ {0x0000, 0x0013, 0x0020, 0x0000}, ++ {0x0000, 0x0013, 0x0000, 0x0000}, ++ {0x0000, 0x0000, 0x0000, 0x0000}, ++ }, ++}; ++ ++static int x100_dp_hw_set_phy_lane_and_rate(struct phytium_dp_device *phytium_dp, ++ uint8_t link_lane_count, ++ uint32_t link_rate) ++{ ++ int port = phytium_dp->port%3; ++ int i = 0, data, tmp, tmp1, index = 0, mask; ++ int timeout = 500, ret = 0; ++ ++ if (port == 0 || port == 1) { ++ /* set pma powerdown */ ++ data = 0; ++ mask = 0; ++ for (i = 0; i < phytium_dp->source_max_lane_count; i++) { ++ data |= (A3_POWERDOWN3 << i*A3_POWERDOWN3_SHIFT); ++ mask |= (((1<source_max_lane_count; i++) { ++ data |= (PLL_EN << i*PLL_EN_SHIFT); ++ mask |= (((1<source_max_lane_count; i++) { ++ data |= (PLL_EN << i*PLL_EN_SHIFT); ++ mask |= (((1<source_max_lane_count; i++) { ++ data |= (A0_ACTIVE << i*A0_ACTIVE_SHIFT); ++ mask |= (((1<port%3; ++ int voltage_swing = 0; ++ int pre_emphasis = 0, link_rate_index = 0; ++ ++ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { ++ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: ++ default: ++ voltage_swing = 0; ++ break; ++ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: ++ voltage_swing = 1; ++ break; ++ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: ++ voltage_swing = 2; ++ break; ++ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: ++ voltage_swing = 3; ++ break; ++ } ++ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { ++ case DP_TRAIN_PRE_EMPH_LEVEL_0: ++ default: ++ pre_emphasis = 0; ++ break; ++ case DP_TRAIN_PRE_EMPH_LEVEL_1: ++ pre_emphasis = 1; ++ break; ++ case DP_TRAIN_PRE_EMPH_LEVEL_2: ++ pre_emphasis = 2; ++ break; ++ case DP_TRAIN_PRE_EMPH_LEVEL_3: ++ pre_emphasis = 3; ++ break; ++ } ++ ++ switch (link_rate) { ++ case 810000: ++ link_rate_index = 3; ++ break; ++ case 540000: ++ link_rate_index = 2; ++ break; ++ case 270000: ++ link_rate_index = 1; ++ break; ++ case 162000: ++ link_rate_index = 0; ++ break; ++ default: ++ DRM_ERROR("phytium dp rate(%d) not support\n", link_rate); ++ link_rate_index = 2; ++ break; ++ } ++ ++ if (port == 0) { ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_DIAG_ACYA, LOCK); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_DRV, TX_DRV); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_MGNFS, ++ mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_CPOST, ++ cpost_val[link_rate_index][voltage_swing][pre_emphasis]); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_DIAG_ACYA, UNLOCK); ++ ++ } else if (port == 1) { ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_DIAG_ACYA, LOCK); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_TXCC_CTRL, TX_TXCC_CTRL); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_DRV, TX_DRV); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_MGNFS, ++ mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_CPOST, ++ cpost_val[link_rate_index][voltage_swing][pre_emphasis]); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_CPOST1, ++ cpost_val[link_rate_index][voltage_swing][pre_emphasis]); ++ phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_DIAG_ACYA, UNLOCK); ++ } else { ++ phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_DIAG_ACYA, LOCK); ++ phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); ++ phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_DRV, TX_DRV); ++ phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_MGNFS, ++ mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); ++ phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_CPOST, ++ cpost_val[link_rate_index][voltage_swing][pre_emphasis]); ++ phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_DIAG_ACYA, UNLOCK); ++ } ++} ++ ++static int x100_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) ++{ ++ int port = phytium_dp->port; ++ int i = 0, data, tmp, mask; ++ int timeout = 500, ret = 0; ++ ++ if (port == 0 || port == 1) { ++ phytium_phy_writel(phytium_dp, X100_PHY0_APB_RESET, APB_RESET); ++ ++ phytium_phy_writel(phytium_dp, X100_PHY0_PIPE_RESET, RESET); ++ ++ /* config lane to dp mode */ ++ data = 0; ++ mask = 0; ++ for (i = 0; i < phytium_dp->source_max_lane_count; i++) { ++ data |= (LANE_BIT << i*LANE_BIT_SHIFT); ++ mask |= (((1<source_max_lane_count; i++) { ++ data |= (LANE_MASTER << i*LANE_MASTER_SHIFT); ++ mask |= (((1<source_max_lane_count; i++) { ++ data |= (PLL_EN << i*PLL_EN_SHIFT); ++ mask |= (((1<source_max_lane_count; i++) { ++ data |= (BIT_20 << i*BIT_20_SHIFT); ++ mask |= (((1<source_max_lane_count; i++) { ++ data |= (A0_ACTIVE << i*A0_ACTIVE_SHIFT); ++ mask |= (((1<dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dcreq_reg_base[port]; ++ int ret = 0; ++ ++ phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_ENABLE, ++ group_offset, X100_DCREQ_CMD_REGISTER); ++ ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, ++ FLAG_REQUEST, FLAG_REPLY); ++ if (ret < 0) ++ DRM_ERROR("%s: failed to poweron panel\n", __func__); ++} ++ ++static void x100_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dcreq_reg_base[port]; ++ int ret = 0; ++ ++ phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_DISABLE, ++ group_offset, X100_DCREQ_CMD_REGISTER); ++ ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, ++ FLAG_REQUEST, FLAG_REPLY); ++ if (ret < 0) ++ DRM_ERROR("%s: failed to poweroff panel\n", __func__); ++} ++ ++static void x100_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port, ret = 0; ++ uint32_t group_offset = priv->dcreq_reg_base[port]; ++ ++ phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_ENABLE, ++ group_offset, X100_DCREQ_CMD_REGISTER); ++ ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, ++ FLAG_REQUEST, FLAG_REPLY); ++ if (ret < 0) ++ DRM_ERROR("%s: failed to enable backlight\n", __func__); ++} ++ ++static void x100_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dcreq_reg_base[port]; ++ int ret = 0; ++ ++ phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_DISABLE, ++ group_offset, X100_DCREQ_CMD_REGISTER); ++ ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, ++ FLAG_REQUEST, FLAG_REPLY); ++ if (ret < 0) ++ DRM_ERROR("%s: failed to disable backlight\n", __func__); ++} ++ ++static uint32_t x100_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int config; ++ uint32_t group_offset = priv->address_transform_base; ++ ++ config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE); ++ return ((config >> BACKLIGHT_VALUE_SHIFT) & BACKLIGHT_VALUE_MASK); ++} ++ ++static int x100_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32_t level) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ uint32_t group_offset = priv->dcreq_reg_base[port]; ++ int config = 0; ++ int ret = 0; ++ ++ if (level > X100_DP_BACKLIGHT_MAX) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ config = FLAG_REQUEST | CMD_BACKLIGHT | ((level & BACKLIGHT_MASK) << BACKLIGHT_SHIFT); ++ phytium_writel_reg(priv, config, group_offset, X100_DCREQ_CMD_REGISTER); ++ ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, ++ FLAG_REQUEST, FLAG_REPLY); ++ if (ret < 0) ++ DRM_ERROR("%s: failed to set backlight\n", __func__); ++ ++out: ++ return ret; ++} ++ ++bool x100_dp_hw_spread_is_enable(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port, config; ++ uint32_t group_offset = priv->address_transform_base; ++ ++ config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); ++ ++ return ((config & DP_SPREAD_ENABLE(port)) ? true:false); ++} ++ ++int x100_dp_hw_reset(struct phytium_dp_device *phytium_dp) ++{ ++ struct drm_device *dev = phytium_dp->dev; ++ struct phytium_display_private *priv = dev->dev_private; ++ int port = phytium_dp->port; ++ int timeout = 100, config, ret = 0; ++ uint32_t group_offset = priv->address_transform_base; ++ ++ config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); ++ config &= (~DC_DP_RESET_STATUS(port)); ++ ++ phytium_writel_reg(priv, config, group_offset, X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); ++ phytium_writel_reg(priv, FLAG_REQUEST | CMD_DC_DP_RESET, ++ priv->dcreq_reg_base[port], X100_DCREQ_CMD_REGISTER); ++ do { ++ mdelay(10); ++ timeout--; ++ config = phytium_readl_reg(priv, group_offset, ++ X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); ++ if (config & DC_DP_RESET_STATUS(port)) ++ break; ++ } while (timeout); ++ if (timeout == 0) { ++ DRM_ERROR("reset dc/dp pipe(%d) failed\n", port); ++ ret = -1; ++ } ++ ++ return ret; ++} ++ ++static struct phytium_dp_func x100_dp_funcs = { ++ .dp_hw_reset = x100_dp_hw_reset, ++ .dp_hw_spread_is_enable = x100_dp_hw_spread_is_enable, ++ .dp_hw_set_backlight = x100_dp_hw_set_backlight, ++ .dp_hw_get_backlight = x100_dp_hw_get_backlight, ++ .dp_hw_disable_backlight = x100_dp_hw_disable_backlight, ++ .dp_hw_enable_backlight = x100_dp_hw_enable_backlight, ++ .dp_hw_poweroff_panel = x100_dp_hw_poweroff_panel, ++ .dp_hw_poweron_panel = x100_dp_hw_poweron_panel, ++ .dp_hw_init_phy = x100_dp_hw_init_phy, ++ .dp_hw_set_phy_lane_setting = x100_dp_hw_set_phy_lane_setting, ++ .dp_hw_set_phy_lane_and_rate = x100_dp_hw_set_phy_lane_and_rate, ++}; ++ ++void x100_dp_func_register(struct phytium_dp_device *phytium_dp) ++{ ++ phytium_dp->funcs = &x100_dp_funcs; ++} +diff --git a/drivers/gpu/drm/phytium/x100_dp.h b/drivers/gpu/drm/phytium/x100_dp.h +new file mode 100644 +index 000000000000..a7a0fc48a58b +--- /dev/null ++++ b/drivers/gpu/drm/phytium/x100_dp.h +@@ -0,0 +1,13 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef __X100_DP_H__ ++#define __X100_DP_H__ ++ ++#define X100_DP_BACKLIGHT_MAX 100 ++ ++void x100_dp_func_register(struct phytium_dp_device *phytium_dp); ++#endif /* __X100_DP_H__ */ +diff --git a/drivers/gpu/drm/phytium/x100_reg.h b/drivers/gpu/drm/phytium/x100_reg.h +new file mode 100644 +index 000000000000..130430e924b5 +--- /dev/null ++++ b/drivers/gpu/drm/phytium/x100_reg.h +@@ -0,0 +1,349 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium display drm driver ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef __X100_REG_H__ ++#define __X100_REG_H__ ++ ++#include "phytium_reg.h" ++ ++/******************************dc register start******************************************/ ++#define X100_DC_CLOCK_CONTROL 0x0000 ++ #define SOFT_RESET (1<<12) ++#define X100_DC_CLOCK_IDLE 0x0004 ++ #define IS_IDLE (1<<16) ++/******************************dc register end********************************************/ ++ ++/******************************dcreq register start**************************************/ ++#define X100_DCREQ_PLANE0_ADDR_START 0x00 ++#define X100_DCREQ_PLANE0_ADDR_END 0x04 ++#define X100_DCREQ_PLANE1_ADDR_START 0x08 ++#define X100_DCREQ_PLANE1_ADDR_END 0x0c ++#define X100_DCREQ_PLANE0_CONFIG 0x10 ++ #define DCREQ_NO_LOSSY (0 << 0) ++ #define DCREQ_LOSSY (1 << 0) ++ #define DCREQ_TILE_TYPE_MASK (0x3 << 1) ++ #define DCREQ_TILE_TYPE_MODE0 (0x1 << 1) ++ #define DCREQ_TILE_TYPE_MODE3 (0x2 << 1) ++ #define DCREQ_COLOURFORMAT_MASK (0x7f << 8) ++ #define DCREQ_COLOURFORMAT_RGB565 (0x5 << 8) ++ #define DCREQ_COLOURFORMAT_ARGB1555 (0x4 << 8) ++ #define DCREQ_COLOURFORMAT_ARGB4444 (0x02 << 8) ++ #define DCREQ_COLOURFORMAT_BGRA8888 (0x29 << 8) ++ #define DCREQ_COLOURFORMAT_ARGB2101010 (0xe << 8) ++ #define DCREQ_COLOURFORMAT_YUYV (0x59 << 8) ++ #define DCREQ_COLOURFORMAT_UYVY (0x5b << 8) ++ #define DCREQ_ARGBSWIZZLE_MASK (0xf << 4) ++ #define DCREQ_ARGBSWIZZLE_ARGB (0X0 << 4) ++ #define DCREQ_ARGBSWIZZLE_BGRA (0XC << 4) ++ #define DCREQ_MODE_MASK (1 << 16) ++ #define DCREQ_MODE_LINEAR (0 << 16) ++ #define DCREQ_MODE_TILE (1 << 16) ++#define X100_DCREQ_PLANE1_CONFIG(pipe) 0x14 ++#define X100_DCREQ_PLANE0_CLEAR_COLOR_L 0x18 ++#define X100_DCREQ_PLANE0_CLEAR_COLOR_H 0x1C ++#define X100_DCREQ_PLANE1_CLEAR_COLOR_L 0x20 ++#define X100_DCREQ_PLANE1_CLEAR_COLOR_H 0x24 ++#define X100_DCREQ_CMD_REGISTER 0x38 ++ #define FLAG_REPLY (1<<31) ++ #define FLAG_REQUEST (1<<30) ++ #define CMD_PIXEL_CLOCK (0x0 << 28) ++ #define CMD_BACKLIGHT (0x1 << 28) ++ #define CMD_DC_DP_RESET (0x3 << 28) ++ #define BACKLIGHT_SHIFT 21 ++ #define BACKLIGHT_MASK 0x7f ++ #define BACKLIGHT_MAX 100 ++ #define BACKLIGHT_ENABLE (101 << BACKLIGHT_SHIFT) ++ #define BACKLIGHT_DISABLE (102 << BACKLIGHT_SHIFT) ++ #define PANEL_POWER_ENABLE (103 << BACKLIGHT_SHIFT) ++ #define PANEL_POWER_DISABLE (104 << BACKLIGHT_SHIFT) ++ #define PIXEL_CLOCK_MASK (0x1fffff) ++#define X100_DCREQ_FBCD_CLOCK_CONFIG 0x3c ++#define X100_DCREQ_PIX_DMA_PREFIX 0x50 ++ #define PREFIX_MASK 0xff ++ #define PREFIX_SHIFT 32 ++#define X100_DCREQ_FRAME_START 0x54 ++#define X100_DCREQ_FILTER_CONFIG 0x58 ++#define X100_DCREQ_CONTROL 0x5C ++ #define DC_REQ_ENABLE (1<<0) ++#define X100_DCREQ_MSI_CLEAR 0x60 ++ #define MSI_CLEAR 0x0 ++#define X100_DCREQ_RESET 0x68 ++ #define DCREQ_RESET (0x3 << 0) ++ #define DCREQ_RESET_MASK 0x3 ++#define X100_DCREQ_PLAN 0x94 ++ #define DCREQ_PLAN_A 0x0 ++ #define DCREQ_PLAN_B 0X5 ++/******************************dcreq register end**************************************/ ++ ++/******************************address transform register start**************************/ ++#define X100_GPU_ADDRESS_TRANSFORM_SRC_ADDR 0x0 ++#define X100_GPU_ADDRESS_TRANSFORM_SIZE 0x4 ++#define X100_GPU_ADDRESS_TRANSFORM_DST_ADDR 0x8 ++ ++#define X100_DC_ADDRESS_TRANSFORM_SRC_ADDR 0x24 ++ #define SRC_ADDR_OFFSET 22 ++ #define SRC_ADDR_MASK 0xffffffffff ++#define X100_DC_ADDRESS_TRANSFORM_SIZE 0x28 ++ #define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) ++ #define SIZE_OFFSET 22 ++#define X100_DC_ADDRESS_TRANSFORM_DST_ADDR 0x2c ++ #define DST_ADDR_OFFSET 22 ++#define X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS 0x48 ++ #define DC_DP_RESET_STATUS(pipe) (1 << pipe) ++ #define DP_SPREAD_ENABLE(pipe) (0x8 << pipe) ++#define X100_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE 0x4c ++ #define BACKLIGHT_VALUE_MASK (0x7f) ++ #define BACKLIGHT_VALUE_SHIFT 16 ++/******************************address transform register end**************************/ ++ ++/******************************phy register start******************************************/ ++/* self define */ ++#define X100_PHY0_PIPE_RESET 0x40104 ++ #define RESET 0x0 ++ #define RESET_DEASSERT 0x1 ++#define X100_PHY1_PIPE_RESET 0x100100 ++ #define PHY1_PIPE_RESET 0x0 ++ #define PHY1_PIPE_RESET_DEASSERT 0x4 ++ ++#define X100_PHY1_EN_REFCLK 0x100070 ++ ++#define X100_PHY0_MODE 0x40088 ++ #define LANE_BIT (0x3) ++ #define LANE_BIT_SHIFT 0x2 ++#define X100_PHY1_SEL 0x100004 ++ #define PHY1_DP_LANE_BIT 0x1 ++ #define PHY1_DP_LANE_BIT_SHIFT 2 ++ ++#define X100_PHY0_LINK_CFG 0x40044 ++ #define LANE_MASTER 0x1 ++ #define LANE_MASTER_SHIFT 1 ++ ++#define X100_PHY0_PLL_EN 0x40010 ++ #define PLL_EN 0x1 ++ #define PLL_EN_SHIFT 1 ++#define X100_PHY0_PMA_WIDTH 0x40020 ++ #define BIT_20 0x5 ++ #define BIT_20_SHIFT 4 ++ ++#define X100_PHY0_PMA0_POWER 0x40014 ++#define X100_PHY0_PMA1_POWER 0x40018 ++ #define A0_ACTIVE 0x1 ++ #define A0_ACTIVE_SHIFT 8 ++ #define A3_POWERDOWN3 0x8 ++ #define A3_POWERDOWN3_SHIFT 8 ++ ++#define X100_PHY1_PMA_MISC 0x1000a0 ++ #define PHY1_PLL_EN 0x1 ++ #define PHY1_PLL_EN_MASK 1 ++ #define PHY1_PLL_EN_SHIFT 8 ++ #define PHY1_BIT_20 0x5 ++ #define PHY1_BIT_20_SHIFT 9 ++ #define PHY1_A0_ACTIVE 0x1 ++ #define PHY1_A0_ACTIVE_SHIFT 2 ++ #define PHY1_A0_ACTIVE_MASK 0x3f ++ #define PHY1_A3_POWERDOWN3 0x8 ++ #define PHY1_A3_POWERDOWN3_MASK 0x3f ++ #define PHY1_A3_POWERDOWN3_SHIFT 2 ++ ++#define X100_PHY0_LINK_RESET 0x40108 ++ #define LINK_RESET 0x1 ++ #define LINK_RESET_MASK 0x1 ++ #define LINTK_RESET_SHIFT 0x1 ++ ++#define X100_PHY0_APB_RESET 0x40100 ++ #define APB_RESET 0x1 ++#define X100_PHY1_APB_RESET 0x100104 ++ #define PHY1_APB_RESET 0x4 ++ ++/* phy origin register */ ++#define X100_PHY0_PLL_CFG 0x30038 ++#define X100_PHY1_PLL_CFG 0xb0038 ++ #define SINGLE_LINK 0x0 ++ #define DOUBLE_LINK 0x2 ++ ++#define X100_PHY0_PMA_CONTROL 0x3800c ++#define X100_PHY1_PMA_CONTROL 0xb800c ++ #define CONTROL_ENABLE 0x1 ++ #define CONTROL_ENABLE_MASK 0x1 ++ #define CONTROL_ENABLE_SHIFT 0x1 ++ ++#define X100_PHY0_PMA_CONTROL2 0x38004 ++#define X100_PHY1_PMA_CONTROL2 0xb8004 ++ #define PLL0_LOCK_DONE (0x1 << 6) ++ #define PLL1_LOCK_DONE (0x1 << 7) ++ ++#define X100_PHY0_PLL0_CLK_SEL 0X684 ++#define X100_PHY0_PLL1_CLK_SEL 0x704 ++#define X100_PHY1_PLL_CLK_SEL 0X80684 ++ #define PLL_LINK_RATE_162000 0xf01 ++ #define PLL_LINK_RATE_270000 0x701 ++ #define PLL_LINK_RATE_540000 0x301 ++ #define PLL_LINK_RATE_810000 0x200 ++ ++#define X100_PHY0_HSCLK0_SEL 0x18398 ++#define X100_PHY0_HSCLK1_SEL 0x1a398 ++#define X100_PHY1_HSCLK_SEL 0x90398 ++ #define HSCLK_LINK_0 0x0 ++ #define HSCLK_LINK_1 0x1 ++ ++#define X100_PHY0_HSCLK0_DIV 0x1839c ++#define X100_PHY0_HSCLK1_DIV 0x1a39c ++#define X100_PHY1_HSCLK_DIV 0x9039c ++ #define HSCLK_LINK_RATE_162000 0x2 ++ #define HSCLK_LINK_RATE_270000 0x1 ++ #define HSCLK_LINK_RATE_540000 0x0 ++ #define HSCLK_LINK_RATE_810000 0x0 ++ ++#define X100_PHY0_PLLDRC0_CTRL 0x18394 ++#define X100_PHY0_PLLDRC1_CTRL 0x1a394 ++#define X100_PHY1_PLLDRC_CTRL 0x90394 ++ #define PLLDRC_LINK0 0x1 ++ #define PLLDRC_LINK1 0x9 ++ ++#define X100_PHY0_PLL0_DSM_M0 0x250 ++#define X100_PHY1_PLL0_DSM_M0 0x80250 ++ #define PLL0_DSM_M0 0x4 ++#define X100_PHY0_PLL0_VCOCAL_START 0x218 ++#define X100_PHY1_PLL0_VCOCAL_START 0x80218 ++ #define PLL0_VCOCAL_START 0xc5e ++#define X100_PHY0_PLL0_VCOCAL_CTRL 0x208 ++#define X100_PHY1_PLL0_VCOCAL_CTRL 0x80208 ++ #define PLL0_VCOCAL_CTRL 0x3 ++ ++#define X100_PHY0_PLL1_DSM_M0 0x350 ++ #define PLL1_DSM_M0 0x4 ++#define X100_PHY0_PLL1_VCOCAL_START 0x318 ++ #define PLL1_VCOCAL_START 0xc5e ++#define X100_PHY0_PLL1_VCOCAL_CTRL 0x308 ++ #define PLL1_VCOCAL_CTRL 0x3 ++ ++#define X100_PHY0_PLL0_CP_PADJ 0x690 ++#define X100_PHY0_PLL0_CP_IADJ 0x694 ++#define X100_PHY0_PLL0_CP_FILT_PADJ 0x698 ++#define X100_PHY0_PLL0_INTDIV 0x240 ++#define X100_PHY0_PLL0_FRACDIVL 0x244 ++#define X100_PHY0_PLL0_FRACDIVH 0x248 ++#define X100_PHY0_PLL0_HIGH_THR 0x24c ++#define X100_PHY0_PLL0_PDIAG_CTRL 0x680 ++#define X100_PHY0_PLL0_VCOCAL_PLLCNT_START 0x220 ++#define X100_PHY0_PLL0_LOCK_PEFCNT 0x270 ++#define X100_PHY0_PLL0_LOCK_PLLCNT_START 0x278 ++#define X100_PHY0_PLL0_LOCK_PLLCNT_THR 0x27c ++ ++#define X100_PHY0_PLL1_CP_PADJ 0x710 ++#define X100_PHY0_PLL1_CP_IADJ 0x714 ++#define X100_PHY0_PLL1_CP_FILT_PADJ 0x718 ++#define X100_PHY0_PLL1_INTDIV 0x340 ++#define X100_PHY0_PLL1_FRACDIVL 0x344 ++#define X100_PHY0_PLL1_FRACDIVH 0x348 ++#define X100_PHY0_PLL1_HIGH_THR 0x34c ++#define X100_PHY0_PLL1_PDIAG_CTRL 0x700 ++#define X100_PHY0_PLL1_VCOCAL_PLLCNT_START 0x320 ++#define X100_PHY0_PLL1_LOCK_PEFCNT 0x370 ++#define X100_PHY0_PLL1_LOCK_PLLCNT_START 0x378 ++#define X100_PHY0_PLL1_LOCK_PLLCNT_THR 0x37c ++ ++#define X100_PHY1_PLL0_CP_PADJ 0x80690 ++#define X100_PHY1_PLL0_CP_IADJ 0x80694 ++#define X100_PHY1_PLL0_CP_FILT_PADJ 0x80698 ++#define X100_PHY1_PLL0_INTDIV 0x80240 ++#define X100_PHY1_PLL0_FRACDIVL 0x80244 ++#define X100_PHY1_PLL0_FRACDIVH 0x80248 ++#define X100_PHY1_PLL0_HIGH_THR 0x8024c ++#define X100_PHY1_PLL0_PDIAG_CTRL 0x80680 ++#define X100_PHY1_PLL0_VCOCAL_PLLCNT_START 0x80220 ++#define X100_PHY1_PLL0_LOCK_PEFCNT 0x80270 ++#define X100_PHY1_PLL0_LOCK_PLLCNT_START 0x80278 ++#define X100_PHY1_PLL0_LOCK_PLLCNT_THR 0x8027c ++ ++#define X100_PHY0_PLL0_TX_PSC_A0 0x18400 ++#define X100_PHY1_PLL0_TX_PSC_A0 0x90400 ++ #define PLL0_TX_PSC_A0 0xfb ++#define X100_PHY0_PLL0_TX_PSC_A2 0x18408 ++#define X100_PHY1_PLL0_TX_PSC_A2 0x90408 ++ #define PLL0_TX_PSC_A2 0x4aa ++#define X100_PHY0_PLL0_TX_PSC_A3 0x1840c ++#define X100_PHY1_PLL0_TX_PSC_A3 0x9040c ++ #define PLL0_TX_PSC_A3 0x4aa ++#define X100_PHY0_PLL0_RX_PSC_A0 0x28000 ++#define X100_PHY1_PLL0_RX_PSC_A0 0xa0000 ++ #define PLL0_RX_PSC_A0 0x0 ++#define X100_PHY0_PLL0_RX_PSC_A2 0x28008 ++#define X100_PHY1_PLL0_RX_PSC_A2 0xa0008 ++ #define PLL0_RX_PSC_A2 0x0 ++#define X100_PHY0_PLL0_RX_PSC_A3 0x2800C ++#define X100_PHY1_PLL0_RX_PSC_A3 0xa000C ++ #define PLL0_RX_PSC_A3 0x0 ++#define X100_PHY0_PLL0_RX_PSC_CAL 0x28018 ++#define X100_PHY1_PLL0_RX_PSC_CAL 0xa0018 ++ #define PLL0_RX_PSC_CAL 0x0 ++ ++#define X100_PHY0_PLL1_TX_PSC_A0 0x1a400 ++ #define PLL1_TX_PSC_A0 0xfb ++#define X100_PHY0_PLL1_TX_PSC_A2 0x1a408 ++ #define PLL1_TX_PSC_A2 0x4aa ++#define X100_PHY0_PLL1_TX_PSC_A3 0x1a40c ++ #define PLL1_TX_PSC_A3 0x4aa ++#define X100_PHY0_PLL1_RX_PSC_A0 0x2a000 ++ #define PLL1_RX_PSC_A0 0x0 ++#define X100_PHY0_PLL1_RX_PSC_A2 0x2a008 ++ #define PLL1_RX_PSC_A2 0x0 ++#define X100_PHY0_PLL1_RX_PSC_A3 0x2a00C ++ #define PLL1_RX_PSC_A3 0x0 ++#define X100_PHY0_PLL1_RX_PSC_CAL 0x2a018 ++ #define PLL1_RX_PSC_CAL 0x0 ++ ++#define X100_PHY0_PLL0_XCVR_CTRL 0x183a8 ++#define X100_PHY1_PLL0_XCVR_CTRL 0x903a8 ++ #define PLL0_XCVR_CTRL 0xf ++#define X100_PHY0_PLL1_XCVR_CTRL 0x1a3a8 ++ #define PLL1_XCVR_CTRL 0xf ++ ++#define X100_PHY0_PLL0_RX_GCSM1_CTRL 0x28420 ++#define X100_PHY1_PLL0_RX_GCSM1_CTRL 0xa0420 ++ #define PLL0_RX_GCSM1_CTRL 0x0 ++#define X100_PHY0_PLL0_RX_GCSM2_CTRL 0x28440 ++#define X100_PHY1_PLL0_RX_GCSM2_CTRL 0xa0440 ++ #define PLL0_RX_GCSM2_CTRL 0x0 ++#define X100_PHY0_PLL0_RX_PERGCSM_CTRL 0x28460 ++#define X100_PHY1_PLL0_RX_PERGCSM_CTRL 0xa0460 ++ #define PLL0_RX_PERGCSM_CTRL 0x0 ++ ++#define X100_PHY0_PLL1_RX_GCSM1_CTRL 0x2a420 ++ #define PLL1_RX_GCSM1_CTRL 0x0 ++#define X100_PHY0_PLL1_RX_GCSM2_CTRL 0x2a440 ++ #define PLL1_RX_GCSM2_CTRL 0x0 ++#define X100_PHY0_PLL1_RX_PERGCSM_CTRL 0x2a460 ++ #define PLL1_RX_PERGCSM_CTRL 0x0 ++ ++/* swing and emphasis */ ++#define X100_PHY0_PLL0_TX_DIAG_ACYA 0x1879c ++#define X100_PHY0_PLL1_TX_DIAG_ACYA 0x1a79c ++#define X100_PHY1_PLL0_TX_DIAG_ACYA 0x9079c ++ #define LOCK 1 ++ #define UNLOCK 0 ++ ++#define X100_PHY0_PLL0_TX_TXCC_CTRL 0x18100 ++#define X100_PHY0_PLL1_TX_TXCC_CTRL 0x1a100 ++#define X100_PHY1_PLL0_TX_TXCC_CTRL 0x90100 ++ #define TX_TXCC_CTRL 0x8a4 ++ ++#define X100_PHY0_PLL0_TX_DRV 0x18318 ++#define X100_PHY0_PLL1_TX_DRV 0x1a318 ++#define X100_PHY1_PLL0_TX_DRV 0x90318 ++ #define TX_DRV 0x3 ++ ++#define X100_PHY0_PLL0_TX_MGNFS 0x18140 ++#define X100_PHY0_PLL1_TX_MGNFS 0x1a140 ++#define X100_PHY1_PLL0_TX_MGNFS 0x90140 ++ ++#define X100_PHY0_PLL0_TX_CPOST 0x18130 ++#define X100_PHY0_PLL1_TX_CPOST 0x1a130 ++#define X100_PHY0_PLL1_TX_CPOST1 0x1a13c ++#define X100_PHY1_PLL0_TX_CPOST 0x90130 ++ ++/******************************phy register end********************************************/ ++#endif /* __X100_REG_H__ */ +diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig +index 451d4ae50e66..ca1e2e109220 100644 +--- a/drivers/i2c/busses/Kconfig ++++ b/drivers/i2c/busses/Kconfig +@@ -529,6 +529,21 @@ config I2C_DESIGNWARE_BAYTRAIL + the platform firmware controlling it. You should say Y if running on + a BayTrail system using the AXP288. + ++config I2C_PHYTIUM_CORE ++ tristate ++ ++config I2C_PHYTIUM_PCI ++ tristate "Phytium I2C PCI" ++ depends on PCI && ARCH_PHYTIUM ++ select I2C_PHYTIUM_CORE ++ select I2C_SMBUS ++ help ++ If you say yes to this option, support will be included for the ++ Phytium I2C adapter. Only master mode is supported. ++ ++ This driver can also be built as a module. If so, the module ++ will be called i2c-phytium-pci. ++ + config I2C_DIGICOLOR + tristate "Conexant Digicolor I2C driver" + depends on ARCH_DIGICOLOR +diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile +index 18b26af82b1c..2ea9bbfddb30 100644 +--- a/drivers/i2c/busses/Makefile ++++ b/drivers/i2c/busses/Makefile +@@ -53,6 +53,9 @@ i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-bayt + obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o + i2c-designware-pci-objs := i2c-designware-pcidrv.o + obj-$(CONFIG_I2C_DIGICOLOR) += i2c-digicolor.o ++obj-$(CONFIG_I2C_PHYTIUM_CORE) += i2c-phytium-core.o ++i2c-phytium-core-objs := i2c-phytium-common.o i2c-phytium-master.o ++obj-$(CONFIG_I2C_PHYTIUM_PCI) += i2c-phytium-pci.o + obj-$(CONFIG_I2C_EFM32) += i2c-efm32.o + obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o + obj-$(CONFIG_I2C_EMEV2) += i2c-emev2.o +diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c +index b5750fd85125..3818b7a0a847 100644 +--- a/drivers/i2c/busses/i2c-designware-platdrv.c ++++ b/drivers/i2c/busses/i2c-designware-platdrv.c +@@ -151,6 +151,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = { + { "APMC0D0F", 0 }, + { "HISI02A1", 0 }, + { "HISI02A2", 0 }, ++ { "PHYT0003", 0 }, + { } + }; + MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); +diff --git a/drivers/i2c/busses/i2c-phytium-common.c b/drivers/i2c/busses/i2c-phytium-common.c +new file mode 100644 +index 000000000000..7a6f0ca75299 +--- /dev/null ++++ b/drivers/i2c/busses/i2c-phytium-common.c +@@ -0,0 +1,152 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Phytium I2C adapter driver. ++ * ++ * Based on the TI DAVINCI I2C adapter driver. ++ * ++ * Copyright (C) 2021,Phytium Technology Co.,Ltd. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "i2c-phytium-core.h" ++ ++static char *abort_sources[] = { ++ [ABRT_7B_ADDR_NOACK] = ++ "slave address not acknowledged (7bit mode)", ++ [ABRT_10ADDR1_NOACK] = ++ "first address byte not acknowledged (10bit mode)", ++ [ABRT_10ADDR2_NOACK] = ++ "second address byte not acknowledged (10bit mode)", ++ [ABRT_TXDATA_NOACK] = ++ "data not acknowledged", ++ [ABRT_GCALL_NOACK] = ++ "no acknowledgement for a general call", ++ [ABRT_GCALL_READ] = ++ "read after general call", ++ [ABRT_SBYTE_ACKDET] = ++ "start byte acknowledged", ++ [ABRT_SBYTE_NORSTRT] = ++ "trying to send start byte when restart is disabled", ++ [ABRT_10B_RD_NORSTRT] = ++ "trying to read when restart is disabled (10bit mode)", ++ [ABRT_MASTER_DIS] = ++ "trying to use disabled adapter", ++ [ARB_LOST] = ++ "lost arbitration", ++ [ABRT_SLAVE_FLUSH_TXFIFO] = ++ "read command so flush old data in the TX FIFO", ++ [ABRT_SLAVE_ARBLOST] = ++ "slave lost the bus while transmitting data to a remote master", ++ [ABRT_SLAVE_RD_INTX] = ++ "incorrect slave-transmitter mode configuration", ++}; ++ ++u32 phytium_readl(struct phytium_i2c_dev *dev, int offset) ++{ ++ return readl_relaxed(dev->base + offset); ++} ++ ++void phytium_writel(struct phytium_i2c_dev *dev, u32 b, int offset) ++{ ++ writel_relaxed(b, dev->base + offset); ++} ++ ++void __i2c_phytium_disable(struct phytium_i2c_dev *dev) ++{ ++ int timeout = 100; ++ ++ do { ++ __i2c_phytium_disable_nowait(dev); ++ if ((phytium_readl(dev, IC_ENABLE_STATUS) & 1) == 0) ++ return; ++ ++ /* ++ * Wait 10 times the signaling period of the highest I2C ++ * transfer supported by the driver (for 400KHz this is ++ * 25us). ++ */ ++ usleep_range(25, 250); ++ } while (timeout--); ++ ++ dev_warn(dev->dev, "timeout in disabling adapter\n"); ++} ++ ++int i2c_phytium_wait_bus_not_busy(struct phytium_i2c_dev *dev) ++{ ++ int timeout = 20; /* 20 ms */ ++ ++ while (phytium_readl(dev, IC_STATUS) & IC_STATUS_ACTIVITY) { ++ if (timeout <= 0) { ++ dev_warn(dev->dev, "timeout waiting for bus ready\n"); ++ i2c_recover_bus(&dev->adapter); ++ ++ if (phytium_readl(dev, IC_STATUS) & IC_STATUS_ACTIVITY) ++ return -ETIMEDOUT; ++ return 0; ++ } ++ timeout--; ++ usleep_range(1000, 1100); ++ } ++ ++ return 0; ++} ++ ++int i2c_phytium_handle_tx_abort(struct phytium_i2c_dev *dev) ++{ ++ unsigned long abort_source = dev->abort_source; ++ int i; ++ ++ if (abort_source & IC_TX_ABRT_NOACK) { ++ for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) ++ dev_dbg(dev->dev, ++ "%s: %s\n", __func__, abort_sources[i]); ++ return -EREMOTEIO; ++ } ++ ++ for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) ++ dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]); ++ ++ if (abort_source & IC_TX_ARB_LOST) ++ return -EAGAIN; ++ else if (abort_source & IC_TX_ABRT_GCALL_READ) ++ return -EINVAL; ++ else ++ return -EIO; ++ ++ return 0; ++} ++ ++u32 i2c_phytium_func(struct i2c_adapter *adapter) ++{ ++ struct phytium_i2c_dev *dev = i2c_get_adapdata(adapter); ++ ++ return dev->functionality; ++} ++ ++void i2c_phytium_disable(struct phytium_i2c_dev *dev) ++{ ++ /* Disable controller */ ++ __i2c_phytium_disable(dev); ++ ++ /* Disable all interupts */ ++ phytium_writel(dev, 0, IC_INTR_MASK); ++ phytium_readl(dev, IC_CLR_INTR); ++} ++ ++void i2c_phytium_disable_int(struct phytium_i2c_dev *dev) ++{ ++ phytium_writel(dev, 0, IC_INTR_MASK); ++} ++ ++MODULE_AUTHOR("Cheng Quan "); ++MODULE_DESCRIPTION("Phytium I2C bus adapter core"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/i2c/busses/i2c-phytium-core.h b/drivers/i2c/busses/i2c-phytium-core.h +new file mode 100644 +index 000000000000..651c298362a5 +--- /dev/null ++++ b/drivers/i2c/busses/i2c-phytium-core.h +@@ -0,0 +1,233 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Phytium I2C adapter driver. ++ * ++ * Copyright (C) 2021, Phytium Technology Co.,Ltd. ++ */ ++ ++#include ++#include ++#include ++ ++#define IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \ ++ I2C_FUNC_SMBUS_BYTE | \ ++ I2C_FUNC_SMBUS_BYTE_DATA | \ ++ I2C_FUNC_SMBUS_WORD_DATA | \ ++ I2C_FUNC_SMBUS_BLOCK_DATA | \ ++ I2C_FUNC_SMBUS_I2C_BLOCK) ++ ++#define IC_CON_MASTER 0x1 ++#define IC_CON_SPEED_STD 0x2 ++#define IC_CON_SPEED_FAST 0x4 ++#define IC_CON_SPEED_HIGH 0x6 ++#define IC_CON_SPEED_MASK 0x6 ++#define IC_CON_10BITADDR_SLAVE 0x8 ++#define IC_CON_10BITADDR_MASTER 0x10 ++#define IC_CON_RESTART_EN 0x20 ++#define IC_CON_SLAVE_DISABLE 0x40 ++#define IC_CON_STOP_DET_IFADDRESSED 0x80 ++#define IC_CON_TX_EMPTY_CTRL 0x100 ++#define IC_CON_RX_FIFO_FULL_HLD_CTRL 0x200 ++ ++#define IC_CON 0x0 ++#define IC_TAR 0x4 ++#define IC_SAR 0x8 ++#define IC_DATA_CMD 0x10 ++#define IC_SS_SCL_HCNT 0x14 ++#define IC_SS_SCL_LCNT 0x18 ++#define IC_FS_SCL_HCNT 0x1c ++#define IC_FS_SCL_LCNT 0x20 ++#define IC_HS_SCL_HCNT 0x24 ++#define IC_HS_SCL_LCNT 0x28 ++#define IC_INTR_STAT 0x2c ++#define IC_INTR_MASK 0x30 ++#define IC_RAW_INTR_STAT 0x34 ++#define IC_RX_TL 0x38 ++#define IC_TX_TL 0x3c ++#define IC_CLR_INTR 0x40 ++#define IC_CLR_RX_UNDER 0x44 ++#define IC_CLR_RX_OVER 0x48 ++#define IC_CLR_TX_OVER 0x4c ++#define IC_CLR_RD_REQ 0x50 ++#define IC_CLR_TX_ABRT 0x54 ++#define IC_CLR_RX_DONE 0x58 ++#define IC_CLR_ACTIVITY 0x5c ++#define IC_CLR_STOP_DET 0x60 ++#define IC_CLR_START_DET 0x64 ++#define IC_CLR_GEN_CALL 0x68 ++#define IC_ENABLE 0x6c ++#define IC_STATUS 0x70 ++#define IC_TXFLR 0x74 ++#define IC_RXFLR 0x78 ++#define IC_SDA_HOLD 0x7c ++#define IC_TX_ABRT_SOURCE 0x80 ++#define IC_ENABLE_STATUS 0x9c ++#define IC_SMBCLK_LOW_MEXT 0xa8 ++#define IC_SMBCLK_LOW_TIMEOUT 0xac ++#define IC_SMBDAT_STUCK_TIMEOUT 0xb4 ++#define IC_CLR_SMBCLK_EXT_LOW_TIMEOUT 0xbc ++#define IC_CLR_SMBCLK_TMO_LOW_TIMEOUT 0xc0 ++#define IC_CLR_SMBDAT_LOW_TIMEOUT 0xc4 ++#define IC_CLR_SMBALERT_IN_N 0xd0 ++#define IC_COMP_PARAM_1 0xf4 ++ ++#define IC_INTR_RX_UNDER 0x001 ++#define IC_INTR_RX_OVER 0x002 ++#define IC_INTR_RX_FULL 0x004 ++#define IC_INTR_TX_OVER 0x008 ++#define IC_INTR_TX_EMPTY 0x010 ++#define IC_INTR_RD_REQ 0x020 ++#define IC_INTR_TX_ABRT 0x040 ++#define IC_INTR_RX_DONE 0x080 ++#define IC_INTR_ACTIVITY 0x100 ++#define IC_INTR_STOP_DET 0x200 ++#define IC_INTR_START_DET 0x400 ++#define IC_INTR_GEN_CALL 0x800 ++#define IC_INTR_SMBCLK_EXT_LOW_TIMEOUT 0x1000 ++#define IC_INTR_SMBCLK_TMO_LOW_TIMEOUT 0x2000 ++#define IC_INTR_SMBSDA_LOW_TIMEOUT 0x4000 ++#define IC_INTR_SMBALERT_IN_N 0x20000 ++ ++#define IC_INTR_DEFAULT_MASK (IC_INTR_RX_FULL | \ ++ IC_INTR_TX_ABRT | \ ++ IC_INTR_STOP_DET) ++#define IC_INTR_MASTER_MASK (IC_INTR_DEFAULT_MASK | \ ++ IC_INTR_TX_EMPTY) ++#define IC_INTR_SMBUS_MASK (IC_INTR_MASTER_MASK | \ ++ IC_INTR_SMBCLK_EXT_LOW_TIMEOUT | \ ++ IC_INTR_SMBCLK_TMO_LOW_TIMEOUT | \ ++ IC_INTR_SMBSDA_LOW_TIMEOUT) ++ ++#define IC_STATUS_ACTIVITY 0x1 ++#define IC_STATUS_TFE BIT(2) ++#define IC_STATUS_MASTER_ACTIVITY BIT(5) ++#define IC_STATUS_SLAVE_ACTIVITY BIT(6) ++ ++#define IC_SDA_HOLD_RX_SHIFT 16 ++#define IC_SDA_HOLD_RX_MASK GENMASK(23, IC_SDA_HOLD_RX_SHIFT) ++ ++#define IC_ERR_TX_ABRT 0x1 ++ ++#define IC_TAR_10BITADDR_MASTER BIT(12) ++ ++#define IC_COMP_PARAM_1_SPEED_MODE_HIGH (BIT(2) | BIT(3)) ++#define IC_COMP_PARAM_1_SPEED_MODE_MASK GENMASK(3, 2) ++ ++#define STATUS_IDLE 0x0 ++#define STATUS_WRITE_IN_PROGRESS 0x1 ++#define STATUS_READ_IN_PROGRESS 0x2 ++ ++#define ABRT_7B_ADDR_NOACK 0 ++#define ABRT_10ADDR1_NOACK 1 ++#define ABRT_10ADDR2_NOACK 2 ++#define ABRT_TXDATA_NOACK 3 ++#define ABRT_GCALL_NOACK 4 ++#define ABRT_GCALL_READ 5 ++#define ABRT_SBYTE_ACKDET 7 ++#define ABRT_SBYTE_NORSTRT 9 ++#define ABRT_10B_RD_NORSTRT 10 ++#define ABRT_MASTER_DIS 11 ++#define ARB_LOST 12 ++#define ABRT_SLAVE_FLUSH_TXFIFO 13 ++#define ABRT_SLAVE_ARBLOST 14 ++#define ABRT_SLAVE_RD_INTX 15 ++ ++#define IC_TX_ABRT_7B_ADDR_NOACK (1UL << ABRT_7B_ADDR_NOACK) ++#define IC_TX_ABRT_10ADDR1_NOACK (1UL << ABRT_10ADDR1_NOACK) ++#define IC_TX_ABRT_10ADDR2_NOACK (1UL << ABRT_10ADDR2_NOACK) ++#define IC_TX_ABRT_TXDATA_NOACK (1UL << ABRT_TXDATA_NOACK) ++#define IC_TX_ABRT_GCALL_NOACK (1UL << ABRT_GCALL_NOACK) ++#define IC_TX_ABRT_GCALL_READ (1UL << ABRT_GCALL_READ) ++#define IC_TX_ABRT_SBYTE_ACKDET (1UL << ABRT_SBYTE_ACKDET) ++#define IC_TX_ABRT_SBYTE_NORSTRT (1UL << ABRT_SBYTE_NORSTRT) ++#define IC_TX_ABRT_10B_RD_NORSTRT (1UL << ABRT_10B_RD_NORSTRT) ++#define IC_TX_ABRT_MASTER_DIS (1UL << ABRT_MASTER_DIS) ++#define IC_TX_ARB_LOST (1UL << ARB_LOST) ++#define IC_RX_ABRT_SLAVE_RD_INTX (1UL << ABRT_SLAVE_RD_INTX) ++#define IC_RX_ABRT_SLAVE_ARBLOST (1UL << ABRT_SLAVE_ARBLOST) ++#define IC_RX_ABRT_SLAVE_FLUSH_TXFIFO (1UL << ABRT_SLAVE_FLUSH_TXFIFO) ++ ++#define IC_TX_ABRT_NOACK (IC_TX_ABRT_7B_ADDR_NOACK | \ ++ IC_TX_ABRT_10ADDR1_NOACK | \ ++ IC_TX_ABRT_10ADDR2_NOACK | \ ++ IC_TX_ABRT_TXDATA_NOACK | \ ++ IC_TX_ABRT_GCALL_NOACK) ++#define CONTROLLER_TYPE_IIC 0 ++#define CONTROLLER_TYPE_SMBUS 1 ++ ++struct phytium_i2c_dev { ++ struct device *dev; ++ void __iomem *base; ++ int irq; ++ u32 flags; ++ struct completion cmd_complete; ++ u32 (*get_clk_rate_khz)(struct phytium_i2c_dev *dev); ++ ++ struct i2c_adapter adapter; ++ struct i2c_client *ara; ++ struct i2c_smbus_alert_setup alert_data; ++ ++ struct phytium_pci_i2c *controller; ++ ++ unsigned int status; ++ int cmd_err; ++ u32 abort_source; ++ ++ struct i2c_msg *msgs; ++ int msgs_num; ++ int msg_write_idx; ++ int msg_read_idx; ++ int msg_err; ++ u32 tx_buf_len; ++ u8 *tx_buf; ++ u32 rx_buf_len; ++ u8 *rx_buf; ++ ++ u32 master_cfg; ++ u32 functionality; ++ unsigned int tx_fifo_depth; ++ unsigned int rx_fifo_depth; ++ int rx_outstanding; ++ ++ struct i2c_timings timings; ++ u32 sda_hold_time; ++ u16 ss_hcnt; ++ u16 ss_lcnt; ++ u16 fs_hcnt; ++ u16 fs_lcnt; ++ u16 fp_hcnt; ++ u16 fp_lcnt; ++ u16 hs_hcnt; ++ u16 hs_lcnt; ++ ++ void (*disable)(struct phytium_i2c_dev *dev); ++ void (*disable_int)(struct phytium_i2c_dev *dev); ++ int (*init)(struct phytium_i2c_dev *dev); ++}; ++ ++#define ACCESS_INTR_MASK 0x00000004 ++ ++#define DEFAULT_CLOCK_FREQUENCY 48000000 ++ ++u32 phytium_readl(struct phytium_i2c_dev *dev, int offset); ++void phytium_writel(struct phytium_i2c_dev *dev, u32 b, int offset); ++int i2c_phytium_wait_bus_not_busy(struct phytium_i2c_dev *dev); ++int i2c_phytium_handle_tx_abort(struct phytium_i2c_dev *dev); ++u32 i2c_phytium_func(struct i2c_adapter *adap); ++void i2c_phytium_disable(struct phytium_i2c_dev *dev); ++void i2c_phytium_disable_int(struct phytium_i2c_dev *dev); ++ ++static inline void __i2c_phytium_enable(struct phytium_i2c_dev *dev) ++{ ++ phytium_writel(dev, 1, IC_ENABLE); ++} ++ ++static inline void __i2c_phytium_disable_nowait(struct phytium_i2c_dev *dev) ++{ ++ phytium_writel(dev, 0, IC_ENABLE); ++} ++ ++void __i2c_phytium_disable(struct phytium_i2c_dev *dev); ++ ++extern int i2c_phytium_probe(struct phytium_i2c_dev *dev); ++ +diff --git a/drivers/i2c/busses/i2c-phytium-master.c b/drivers/i2c/busses/i2c-phytium-master.c +new file mode 100644 +index 000000000000..8b82006db3ca +--- /dev/null ++++ b/drivers/i2c/busses/i2c-phytium-master.c +@@ -0,0 +1,498 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Phytium I2C adapter driver. ++ * ++ * Copyright (C) 2021, Phytium Technology Co., Ltd. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "i2c-phytium-core.h" ++ ++static int i2c_phytium_init_master(struct phytium_i2c_dev *dev) ++{ ++ /* Disable the adapter */ ++ __i2c_phytium_disable(dev); ++ ++ /* Write standard speed timing parameters */ ++ phytium_writel(dev, dev->ss_hcnt, IC_SS_SCL_HCNT); ++ phytium_writel(dev, dev->ss_lcnt, IC_SS_SCL_LCNT); ++ ++ /* Write fast mode/fast mode plus timing parameters */ ++ phytium_writel(dev, dev->fs_hcnt, IC_FS_SCL_HCNT); ++ phytium_writel(dev, dev->fs_lcnt, IC_FS_SCL_LCNT); ++ ++ /* Write high speed timing parameters if supported */ ++ if (dev->hs_hcnt && dev->hs_hcnt) { ++ phytium_writel(dev, dev->hs_hcnt, IC_HS_SCL_HCNT); ++ phytium_writel(dev, dev->hs_lcnt, IC_HS_SCL_LCNT); ++ } ++ ++ /* Write SDA hold time if supported */ ++ if (dev->sda_hold_time) ++ phytium_writel(dev, dev->sda_hold_time, IC_SDA_HOLD); ++ ++ /* Configure Tx/Rx FIFO threshold levels */ ++ phytium_writel(dev, dev->tx_fifo_depth >> 1, IC_TX_TL); ++ phytium_writel(dev, 0, IC_RX_TL); ++ ++ /* Configure the I2C master */ ++ phytium_writel(dev, dev->master_cfg, IC_CON); ++ ++ return 0; ++} ++ ++static void i2c_phytium_xfer_init(struct phytium_i2c_dev *dev) ++{ ++ struct i2c_msg *msgs = dev->msgs; ++ u32 ic_con, ic_tar = 0; ++ ++ /* Disable the adapter */ ++ __i2c_phytium_disable(dev); ++ ++ /* If the slave address is 10-bit address, enable 10BITADDR */ ++ ic_con = phytium_readl(dev, IC_CON); ++ if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) { ++ ic_con |= IC_CON_10BITADDR_MASTER; ++ ic_tar = IC_TAR_10BITADDR_MASTER; ++ } else { ++ ic_con &= ~IC_CON_10BITADDR_MASTER; ++ } ++ ++ phytium_writel(dev, ic_con, IC_CON); ++ ++ /* ++ * Set the slave (target) address and enable 10-bit addressing mode ++ * if applicable. ++ */ ++ phytium_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, IC_TAR); ++ ++ /* Enforce disabled interrupts */ ++ i2c_phytium_disable_int(dev); ++ ++ /* Enable the adapter */ ++ __i2c_phytium_enable(dev); ++ ++ /* Clear and enable interrupts */ ++ phytium_readl(dev, IC_CLR_INTR); ++ if (dev->ara) ++ phytium_writel(dev, IC_INTR_SMBUS_MASK, IC_INTR_MASK); ++ else ++ phytium_writel(dev, IC_INTR_MASTER_MASK, IC_INTR_MASK); ++} ++ ++static void i2c_phytium_xfer_msg(struct phytium_i2c_dev *dev) ++{ ++ struct i2c_msg *msgs = dev->msgs; ++ u32 intr_mask; ++ int tx_limit, rx_limit; ++ u32 addr = msgs[dev->msg_write_idx].addr; ++ u32 buf_len = dev->tx_buf_len; ++ u8 *buf = dev->tx_buf; ++ bool need_restart = false; ++ ++ intr_mask = IC_INTR_MASTER_MASK; ++ ++ for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) { ++ u32 flags = msgs[dev->msg_write_idx].flags; ++ ++ if (msgs[dev->msg_write_idx].addr != addr) { ++ dev_err(dev->dev, ++ "%s: invalid target address\n", __func__); ++ dev->msg_err = -EINVAL; ++ break; ++ } ++ ++ if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { ++ /* new i2c_msg */ ++ buf = msgs[dev->msg_write_idx].buf; ++ buf_len = msgs[dev->msg_write_idx].len; ++ ++ if ((dev->master_cfg & IC_CON_RESTART_EN) && ++ (dev->msg_write_idx > 0)) ++ need_restart = true; ++ } ++ ++ tx_limit = dev->tx_fifo_depth - phytium_readl(dev, IC_TXFLR); ++ rx_limit = dev->tx_fifo_depth - phytium_readl(dev, IC_RXFLR); ++ ++ while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) { ++ u32 cmd = 0; ++ ++ if (dev->msg_write_idx == dev->msgs_num - 1 && ++ buf_len == 1 && !(flags & I2C_M_RECV_LEN)) ++ cmd |= BIT(9); ++ ++ if (need_restart) { ++ cmd |= BIT(10); ++ need_restart = false; ++ } ++ ++ if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { ++ /* avoid rx buffer overrun */ ++ if (dev->rx_outstanding >= dev->rx_fifo_depth) ++ break; ++ ++ phytium_writel(dev, cmd | 0x100, IC_DATA_CMD); ++ rx_limit--; ++ dev->rx_outstanding++; ++ } else { ++ phytium_writel(dev, cmd | *buf++, IC_DATA_CMD); ++ } ++ tx_limit--; ++ buf_len--; ++ } ++ ++ dev->tx_buf = buf; ++ dev->tx_buf_len = buf_len; ++ ++ /* ++ * Because we don't know the buffer length in the ++ * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop ++ * the transaction here. ++ */ ++ if (buf_len > 0 || flags & I2C_M_RECV_LEN) { ++ /* more bytes to be written */ ++ dev->status |= STATUS_WRITE_IN_PROGRESS; ++ break; ++ } else { ++ dev->status &= ~STATUS_WRITE_IN_PROGRESS; ++ } ++ } ++ ++ if (dev->msg_write_idx == dev->msgs_num) ++ intr_mask &= ~IC_INTR_TX_EMPTY; ++ ++ if (dev->msg_err) ++ intr_mask = 0; ++ ++ phytium_writel(dev, intr_mask, IC_INTR_MASK); ++} ++ ++static u8 i2c_phytium_recv_len(struct phytium_i2c_dev *dev, u8 len) ++{ ++ struct i2c_msg *msgs = dev->msgs; ++ u32 flags = msgs[dev->msg_read_idx].flags; ++ ++ /* ++ * Adjust the buffer length and mask the flag ++ * after receiving the first byte. ++ */ ++ len += (flags & I2C_CLIENT_PEC) ? 2 : 1; ++ dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding); ++ msgs[dev->msg_read_idx].len = len; ++ msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN; ++ ++ return len; ++} ++ ++static void i2c_phytium_read(struct phytium_i2c_dev *dev) ++{ ++ struct i2c_msg *msgs = dev->msgs; ++ int rx_valid; ++ ++ for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) { ++ u32 len; ++ u8 *buf; ++ ++ if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD)) ++ continue; ++ ++ if (!(dev->status & STATUS_READ_IN_PROGRESS)) { ++ len = msgs[dev->msg_read_idx].len; ++ buf = msgs[dev->msg_read_idx].buf; ++ } else { ++ len = dev->rx_buf_len; ++ buf = dev->rx_buf; ++ } ++ ++ rx_valid = phytium_readl(dev, IC_RXFLR); ++ ++ for (; len > 0 && rx_valid > 0; len--, rx_valid--) { ++ u32 flags = msgs[dev->msg_read_idx].flags; ++ ++ *buf = phytium_readl(dev, IC_DATA_CMD); ++ /* Ensure length byte is a valid value */ ++ if (flags & I2C_M_RECV_LEN && ++ *buf <= I2C_SMBUS_BLOCK_MAX && *buf > 0) { ++ len = i2c_phytium_recv_len(dev, *buf); ++ } ++ buf++; ++ dev->rx_outstanding--; ++ } ++ ++ if (len > 0) { ++ dev->status |= STATUS_READ_IN_PROGRESS; ++ dev->rx_buf_len = len; ++ dev->rx_buf = buf; ++ return; ++ } else ++ dev->status &= ~STATUS_READ_IN_PROGRESS; ++ } ++} ++ ++static int i2c_phytium_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], int num) ++{ ++ struct phytium_i2c_dev *dev = i2c_get_adapdata(adapter); ++ int ret; ++ ++ dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num); ++ ++ pm_runtime_get_sync(dev->dev); ++ ++ reinit_completion(&dev->cmd_complete); ++ dev->msgs = msgs; ++ dev->msgs_num = num; ++ dev->cmd_err = 0; ++ dev->msg_write_idx = 0; ++ dev->msg_read_idx = 0; ++ dev->msg_err = 0; ++ dev->status = STATUS_IDLE; ++ dev->abort_source = 0; ++ dev->rx_outstanding = 0; ++ ++ ret = i2c_phytium_wait_bus_not_busy(dev); ++ if (ret < 0) ++ goto done; ++ ++ /* Start the transfers */ ++ i2c_phytium_xfer_init(dev); ++ ++ /* Wait for tx to complete */ ++ if (!wait_for_completion_timeout(&dev->cmd_complete, adapter->timeout)) { ++ dev_err(dev->dev, "controller timed out\n"); ++ i2c_recover_bus(&dev->adapter); ++ i2c_phytium_init_master(dev); ++ ret = -ETIMEDOUT; ++ goto done; ++ } ++ ++ __i2c_phytium_disable_nowait(dev); ++ ++ if (dev->msg_err) { ++ ret = dev->msg_err; ++ goto done; ++ } ++ ++ if (likely(!dev->cmd_err && !dev->status)) { ++ ret = num; ++ goto done; ++ } ++ ++ /* We have got an error */ ++ if (dev->cmd_err == IC_ERR_TX_ABRT) { ++ ret = i2c_phytium_handle_tx_abort(dev); ++ goto done; ++ } ++ ++ if (dev->status) ++ dev_err(dev->dev, "transfer terminated early.\n"); ++ ++ ret = -EIO; ++ ++done: ++ pm_runtime_mark_last_busy(dev->dev); ++ pm_runtime_put_autosuspend(dev->dev); ++ ++ return ret; ++} ++ ++static const struct i2c_algorithm i2c_phytium_algo = { ++ .master_xfer = i2c_phytium_xfer, ++ .functionality = i2c_phytium_func, ++}; ++ ++static const struct i2c_adapter_quirks i2c_phytium_quirks = { ++ .flags = I2C_AQ_NO_ZERO_LEN, ++}; ++ ++static u32 i2c_phytium_read_clear_intrbits(struct phytium_i2c_dev *dev) ++{ ++ u32 stat; ++ ++ stat = phytium_readl(dev, IC_INTR_STAT); ++ ++ if (stat & IC_INTR_RX_UNDER) ++ phytium_readl(dev, IC_CLR_RX_UNDER); ++ if (stat & IC_INTR_RX_OVER) ++ phytium_readl(dev, IC_CLR_RX_OVER); ++ if (stat & IC_INTR_TX_OVER) ++ phytium_readl(dev, IC_CLR_TX_OVER); ++ if (stat & IC_INTR_RD_REQ) ++ phytium_readl(dev, IC_CLR_RD_REQ); ++ if (stat & IC_INTR_TX_ABRT) { ++ dev->abort_source = phytium_readl(dev, IC_TX_ABRT_SOURCE); ++ phytium_readl(dev, IC_CLR_TX_ABRT); ++ } ++ if (stat & IC_INTR_RX_DONE) ++ phytium_readl(dev, IC_CLR_RX_DONE); ++ if (stat & IC_INTR_ACTIVITY) ++ phytium_readl(dev, IC_CLR_ACTIVITY); ++ if (stat & IC_INTR_STOP_DET) ++ phytium_readl(dev, IC_CLR_STOP_DET); ++ if (stat & IC_INTR_START_DET) ++ phytium_readl(dev, IC_CLR_START_DET); ++ if (stat & IC_INTR_GEN_CALL) ++ phytium_readl(dev, IC_CLR_GEN_CALL); ++ if (stat & IC_INTR_SMBCLK_EXT_LOW_TIMEOUT) ++ phytium_readl(dev, IC_CLR_SMBCLK_EXT_LOW_TIMEOUT); ++ if (stat & IC_INTR_SMBCLK_TMO_LOW_TIMEOUT) ++ phytium_readl(dev, IC_CLR_SMBCLK_TMO_LOW_TIMEOUT); ++ if (stat & IC_INTR_SMBSDA_LOW_TIMEOUT) ++ phytium_readl(dev, IC_CLR_SMBDAT_LOW_TIMEOUT); ++ if (stat & IC_INTR_SMBALERT_IN_N) ++ phytium_readl(dev, IC_CLR_SMBALERT_IN_N); ++ ++ return stat; ++} ++ ++static int i2c_phytium_irq_handler_master(struct phytium_i2c_dev *dev) ++{ ++ u32 stat; ++ ++ stat = i2c_phytium_read_clear_intrbits(dev); ++ ++ /* SMBus interrupt */ ++ if (dev->ara) { ++ if (stat & (IC_INTR_SMBCLK_EXT_LOW_TIMEOUT | IC_INTR_SMBCLK_TMO_LOW_TIMEOUT)) { ++ phytium_writel(dev, phytium_readl(dev, IC_ENABLE) & (~BIT(6)), ++ IC_ENABLE); ++ phytium_writel(dev, phytium_readl(dev, IC_ENABLE) | BIT(4), ++ IC_ENABLE); ++ goto abort; ++ } ++ ++ if (stat & IC_INTR_SMBSDA_LOW_TIMEOUT) { ++ phytium_writel(dev, phytium_readl(dev, IC_ENABLE) | BIT(6), ++ IC_ENABLE); ++ goto abort; ++ } ++ ++ if (stat & IC_INTR_SMBALERT_IN_N) ++ i2c_handle_smbus_alert(dev->ara); ++ } ++ ++ if (stat & IC_INTR_TX_ABRT) { ++ dev->cmd_err |= IC_ERR_TX_ABRT; ++ dev->status = STATUS_IDLE; ++ ++ /* Anytime TX_ABRT is set, the contents of the tx/rx ++ * buffers are flushed. Make sure to skip them. ++ */ ++ phytium_writel(dev, 0, IC_INTR_MASK); ++ goto abort; ++ } ++ ++ if (stat & IC_INTR_RX_FULL) ++ i2c_phytium_read(dev); ++ ++ if (stat & IC_INTR_TX_EMPTY) ++ i2c_phytium_xfer_msg(dev); ++ ++abort: ++ if ((stat & (IC_INTR_TX_ABRT | IC_INTR_STOP_DET)) || ++ dev->msg_err) ++ complete(&dev->cmd_complete); ++ else if (unlikely(dev->flags & ACCESS_INTR_MASK)) { ++ /* Workaround to trigger pending interrupt */ ++ stat = phytium_readl(dev, IC_INTR_MASK); ++ i2c_phytium_disable_int(dev); ++ phytium_writel(dev, stat, IC_INTR_MASK); ++ } ++ ++ return 0; ++} ++ ++static irqreturn_t i2c_phytium_isr(int this_irq, void *dev_id) ++{ ++ struct phytium_i2c_dev *dev = dev_id; ++ u32 stat, enabled; ++ ++ enabled = phytium_readl(dev, IC_ENABLE); ++ stat = phytium_readl(dev, IC_RAW_INTR_STAT); ++ if (!enabled || !(stat & ~IC_INTR_ACTIVITY)) ++ return IRQ_NONE; ++ ++ i2c_phytium_irq_handler_master(dev); ++ ++ return IRQ_HANDLED; ++} ++ ++int i2c_phytium_probe(struct phytium_i2c_dev *dev) ++{ ++ const char *mode_str; ++ struct i2c_adapter *adapter = &dev->adapter; ++ unsigned long irq_flags; ++ int ret; ++ ++ init_completion(&dev->cmd_complete); ++ ++ dev->init = i2c_phytium_init_master; ++ dev->disable = i2c_phytium_disable; ++ dev->disable_int = i2c_phytium_disable_int; ++ ++ switch (dev->master_cfg & IC_CON_SPEED_MASK) { ++ case IC_CON_SPEED_STD: ++ mode_str = "Standard Mode"; ++ break; ++ case IC_CON_SPEED_HIGH: ++ mode_str = "High Speed Mode"; ++ break; ++ default: ++ mode_str = "Fast Mode"; ++ } ++ dev_dbg(dev->dev, "Bus speed: %s\n", mode_str); ++ ++ ret = dev->init(dev); ++ if (ret) ++ return ret; ++ ++ /* XXX: should be initialized in firmware, remove it in future */ ++#define DEFAULT_TIMEOUT (DEFAULT_CLOCK_FREQUENCY / 1000 * 35) ++ phytium_writel(dev, DEFAULT_TIMEOUT, IC_SMBCLK_LOW_MEXT); ++ phytium_writel(dev, DEFAULT_TIMEOUT, IC_SMBCLK_LOW_TIMEOUT); ++ phytium_writel(dev, DEFAULT_TIMEOUT, IC_SMBDAT_STUCK_TIMEOUT); ++ ++ snprintf(adapter->name, sizeof(adapter->name), "Phytium I2C adapter"); ++ adapter->retries = 3; ++ adapter->algo = &i2c_phytium_algo; ++ adapter->quirks = &i2c_phytium_quirks; ++ adapter->dev.parent = dev->dev; ++ i2c_set_adapdata(adapter, dev); ++ ++ irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; ++ ++ i2c_phytium_disable_int(dev); ++ ret = devm_request_irq(dev->dev, dev->irq, i2c_phytium_isr, irq_flags, ++ dev_name(dev->dev), dev); ++ if (ret) { ++ dev_err(dev->dev, "failed to request irq %i: %d\n", dev->irq, ret); ++ return ret; ++ } ++ ++ /* ++ * Increment PM usage count during adapter registration in order to ++ * avoid possible spurious runtime suspend when adapter device is ++ * registered to the device core and immediate resume in case bus has ++ * registered I2C slaves that do I2C transfers in their probe. ++ */ ++ pm_runtime_get_noresume(dev->dev); ++ ret = i2c_add_numbered_adapter(adapter); ++ if (ret) ++ dev_err(dev->dev, "fail to add adapter: %d\n", ret); ++ pm_runtime_put_noidle(dev->dev); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(i2c_phytium_probe); ++ ++MODULE_DESCRIPTION("Phytium I2C bus master adapter"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/i2c/busses/i2c-phytium-pci.c b/drivers/i2c/busses/i2c-phytium-pci.c +new file mode 100644 +index 000000000000..1d6514a4915c +--- /dev/null ++++ b/drivers/i2c/busses/i2c-phytium-pci.c +@@ -0,0 +1,237 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * PCI driver for Phytium I2C adapter. ++ * ++ * Copyright (C) 2021,Phytium Technology Co.,Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "i2c-phytium-core.h" ++ ++#define DRV_NAME "i2c-phytium-pci" ++ ++enum phytium_pci_ctl_id_t { ++ octopus_i2c, ++}; ++ ++struct scl_sda_cfg { ++ u32 ss_hcnt; ++ u32 fs_hcnt; ++ u32 ss_lcnt; ++ u32 fs_lcnt; ++ u32 sda_hold; ++}; ++ ++struct phytium_pci_i2c { ++ u32 bus_num; ++ u32 bus_cfg; ++ u32 tx_fifo_depth; ++ u32 rx_fifo_depth; ++ u32 clk_khz; ++ u32 functionality; ++ u32 flags; ++ struct scl_sda_cfg *scl_sda_cfg; ++ int (*setup)(struct pci_dev *pdev, struct phytium_pci_i2c *c); ++}; ++ ++/* Octopus HCNT/LCNT/SDA hold time */ ++static struct scl_sda_cfg octopus_config = { ++ .ss_hcnt = 0x190, ++ .ss_lcnt = 0x1d6, ++ .fs_hcnt = 0x3c, ++ .fs_lcnt = 0x82, ++ .sda_hold = 0x0, // XXX ++}; ++ ++static int octopus_setup(struct pci_dev *pdev, struct phytium_pci_i2c *c) ++{ ++ struct phytium_i2c_dev *i2c = pci_get_drvdata(pdev); ++ ++ if (pdev->device == 0xdc32) { ++ /* ++ * Since we have already register the adapter, the dev->irq ++ * must be valid. ++ */ ++ i2c->alert_data.irq = i2c->irq; ++ ++ i2c->ara = i2c_setup_smbus_alert(&i2c->adapter, &i2c->alert_data); ++ if (!i2c->ara) ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ ++static struct phytium_pci_i2c pci_ctrl_info[] = { ++ [octopus_i2c] = { ++ .bus_num = -1, ++ .bus_cfg = IC_CON_MASTER | IC_CON_SLAVE_DISABLE | ++ IC_CON_RESTART_EN | IC_CON_SPEED_FAST, ++ .tx_fifo_depth = 7, ++ .rx_fifo_depth = 7, ++ .functionality = I2C_FUNC_10BIT_ADDR, ++ .clk_khz = 48000000, ++ .scl_sda_cfg = &octopus_config, ++ .setup = octopus_setup, ++ }, ++}; ++ ++#ifdef CONFIG_PM ++static int i2c_phytium_pci_suspend(struct device *dev) ++{ ++ struct pci_dev *pdev = to_pci_dev(dev); ++ struct phytium_i2c_dev *i_dev = pci_get_drvdata(pdev); ++ ++ i_dev->disable(i_dev); ++ ++ return 0; ++} ++ ++static int i2c_phytium_pci_resume(struct device *dev) ++{ ++ struct pci_dev *pdev = to_pci_dev(dev); ++ struct phytium_i2c_dev *i_dev = pci_get_drvdata(pdev); ++ ++ return i_dev->init(i_dev); ++} ++#endif ++ ++static UNIVERSAL_DEV_PM_OPS(i2c_phytium_pm_ops, i2c_phytium_pci_suspend, ++ i2c_phytium_pci_resume, NULL); ++ ++static u32 i2c_phytium_get_clk_rate_khz(struct phytium_i2c_dev *dev) ++{ ++ return dev->controller->clk_khz; ++} ++ ++static int i2c_phytium_pci_probe(struct pci_dev *pdev, ++ const struct pci_device_id *id) ++{ ++ struct phytium_i2c_dev *dev; ++ struct i2c_adapter *adapter; ++ struct phytium_pci_i2c *controller; ++ struct scl_sda_cfg *cfg; ++ int ret; ++ ++ if (id->driver_data >= ARRAY_SIZE(pci_ctrl_info)) { ++ dev_err(&pdev->dev, "%s: invalid driver data %ld\n", __func__, ++ id->driver_data); ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ controller = &pci_ctrl_info[id->driver_data]; ++ ++ ret = pcim_enable_device(pdev); ++ if (ret) { ++ dev_err(&pdev->dev, "Failed to enable I2C PCI device (%d)\n", ret); ++ goto out; ++ } ++ ++ ret = pcim_iomap_regions(pdev, 0x1, pci_name(pdev)); ++ if (ret) { ++ dev_err(&pdev->dev, "I/O memory remapping failed\n"); ++ goto out; ++ } ++ ++ dev = devm_kzalloc(&pdev->dev, sizeof(struct phytium_i2c_dev), GFP_KERNEL); ++ if (!dev) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ dev->controller = controller; ++ dev->get_clk_rate_khz = i2c_phytium_get_clk_rate_khz; ++ dev->base = pcim_iomap_table(pdev)[0]; ++ dev->dev = &pdev->dev; ++ dev->irq = pdev->irq; ++ dev->flags |= controller->flags; ++ ++ dev->functionality = controller->functionality | IC_DEFAULT_FUNCTIONALITY; ++ dev->master_cfg = controller->bus_cfg; ++ if (controller->scl_sda_cfg) { ++ cfg = controller->scl_sda_cfg; ++ dev->ss_hcnt = cfg->ss_hcnt; ++ dev->fs_hcnt = cfg->fs_hcnt; ++ dev->ss_lcnt = cfg->ss_lcnt; ++ dev->fs_lcnt = cfg->fs_lcnt; ++ dev->sda_hold_time = cfg->sda_hold; ++ } ++ ++ pci_set_drvdata(pdev, dev); ++ ++ dev->tx_fifo_depth = controller->tx_fifo_depth; ++ dev->rx_fifo_depth = controller->rx_fifo_depth; ++ ++ adapter = &dev->adapter; ++ adapter->owner = THIS_MODULE; ++ adapter->class = 0; ++ ACPI_COMPANION_SET(&adapter->dev, ACPI_COMPANION(&pdev->dev)); ++ adapter->nr = controller->bus_num; ++ ++ ret = i2c_phytium_probe(dev); ++ if (ret) ++ goto out; ++ ++ if (controller->setup) { ++ ret = controller->setup(pdev, controller); ++ if (ret) ++ goto out; ++ } ++ ++ pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); ++ pm_runtime_use_autosuspend(&pdev->dev); ++ pm_runtime_put_autosuspend(&pdev->dev); ++ pm_runtime_allow(&pdev->dev); ++ ++out: ++ return ret; ++} ++ ++static void i2c_phytium_pci_remove(struct pci_dev *pdev) ++{ ++ struct phytium_i2c_dev *dev = pci_get_drvdata(pdev); ++ ++ dev->disable(dev); ++ pm_runtime_forbid(&pdev->dev); ++ pm_runtime_get_noresume(&pdev->dev); ++ ++ i2c_del_adapter(&dev->adapter); ++} ++ ++static const struct pci_device_id i2_phytium_pci_ids[] = { ++ { PCI_VDEVICE(PHYTIUM, 0xdc32), octopus_i2c }, ++ { PCI_VDEVICE(PHYTIUM, 0xdc30), octopus_i2c }, ++ { } ++}; ++MODULE_DEVICE_TABLE(pci, i2_phytium_pci_ids); ++ ++static struct pci_driver phytium_i2c_driver = { ++ .name = DRV_NAME, ++ .id_table = i2_phytium_pci_ids, ++ .probe = i2c_phytium_pci_probe, ++ .remove = i2c_phytium_pci_remove, ++ .driver = { ++ .pm = &i2c_phytium_pm_ops, ++ }, ++}; ++ ++module_pci_driver(phytium_i2c_driver); ++ ++MODULE_ALIAS("i2c-phytium-pci"); ++MODULE_AUTHOR("Cheng Quan "); ++MODULE_DESCRIPTION("Phytium PCI I2C bus adapter"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig +index d90d9f1098ff..958312b2169c 100644 +--- a/drivers/input/serio/Kconfig ++++ b/drivers/input/serio/Kconfig +@@ -39,6 +39,18 @@ config SERIO_I8042 + To compile this driver as a module, choose M here: the + module will be called i8042. + ++config SERIO_PHYTIUM_PS2 ++ depends on SERIO ++ tristate "PHYTIUM PS/2 (keyboard and mouse)" ++ default y if ARCH_PHYTIUM ++ depends on PCI ++ help ++ This selects support for the PS/2 Host Controller on ++ Phytium SoCs. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called phytium-ps2. ++ + config SERIO_SERPORT + tristate "Serial port line discipline" + default y +diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile +index 67950a5ccb3f..c180361bdd12 100644 +--- a/drivers/input/serio/Makefile ++++ b/drivers/input/serio/Makefile +@@ -7,6 +7,7 @@ + + obj-$(CONFIG_SERIO) += serio.o + obj-$(CONFIG_SERIO_I8042) += i8042.o ++obj-$(CONFIG_SERIO_PHYTIUM_PS2) += phytium-ps2.o + obj-$(CONFIG_SERIO_PARKBD) += parkbd.o + obj-$(CONFIG_SERIO_SERPORT) += serport.o + obj-$(CONFIG_SERIO_CT82C710) += ct82c710.o +diff --git a/drivers/input/serio/phytium-ps2.c b/drivers/input/serio/phytium-ps2.c +new file mode 100644 +index 000000000000..cf4fe74fd0dd +--- /dev/null ++++ b/drivers/input/serio/phytium-ps2.c +@@ -0,0 +1,186 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Phytium PS/2 keyboard controller driver. ++ * ++ * Copyright (C) 2021, Phytium Technology Co., Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DRV_NAME "phytium_ps2_pci" ++ ++#define REG_STAT 0x0 ++#define REG_STAT_TX_TIMEOUT 0x1 ++#define REG_STAT_RX_TIMEOUT 0x2 ++#define REG_STAT_TX_FULL 0x4 ++#define REG_CTRL 0x4 ++#define REG_CTRL_RESET 0x1 ++#define REG_CTRL_TX_TIMEOUT 0x2 ++#define REG_CTRL_RX_TIMEOUT 0x4 ++#define REG_CTRL_RX_INTR 0x8 ++#define REG_INTR 0x8 ++#define REG_INTR_TIMEOUT 0x1 ++#define REG_INTR_RX 0x2 ++#define REG_TX 0xc ++#define REG_RX 0x10 ++#define REG_TIMER_VAL 0x14 ++ ++#define REG_CTRL_ENABLE (REG_CTRL_TX_TIMEOUT|REG_CTRL_RX_TIMEOUT|REG_CTRL_RX_INTR) ++#define REG_DATA_PARITY 0x100 ++ ++#define STAT_RX_COUNTER(stat) ((stat >> 8) & 0x1f) ++ ++struct phytium_ps2_data { ++ void __iomem *base; ++ struct serio *io; ++ struct pci_dev *dev; ++}; ++ ++static irqreturn_t phytium_ps2_irq(int irq, void *devid) ++{ ++ struct phytium_ps2_data *ps2if = devid; ++ u32 status, scancode, val = 0; ++ unsigned int flag; ++ int i, rxcount; ++ ++ status = readl(ps2if->base + REG_STAT); ++ if (!status) ++ return IRQ_NONE; ++ ++ /* Check if there is timeout interrupt */ ++ if (status & (REG_STAT_RX_TIMEOUT|REG_STAT_TX_TIMEOUT)) ++ val |= REG_INTR_TIMEOUT; ++ ++ rxcount = STAT_RX_COUNTER(status); ++ for (i = 0; i < rxcount; i++) { ++ scancode = readl(ps2if->base + REG_RX) & 0x1ff; ++ ++ if (rxcount <= 16 && scancode != 0x1ff) { ++ flag = ((scancode & REG_DATA_PARITY) ? SERIO_PARITY : 0); ++ serio_interrupt(ps2if->io, scancode & 0xff, flag); ++ } ++ } ++ ++ val |= REG_INTR_RX; ++ writel(val, ps2if->base + REG_INTR); ++ ++ return IRQ_HANDLED; ++} ++ ++int phytium_ps2_write(struct serio *serio, unsigned char val) ++{ ++ struct phytium_ps2_data *ps2if = serio->port_data; ++ unsigned int stat; ++ ++ do { ++ stat = readl(ps2if->base + REG_STAT); ++ cpu_relax(); ++ } while (stat & REG_STAT_TX_FULL); ++ ++ writel(val, ps2if->base + REG_TX); ++ ++ return 0; ++} ++ ++int phytium_ps2_open(struct serio *io) ++{ ++ struct phytium_ps2_data *ps2if = io->port_data; ++ ++ writel(REG_CTRL_RESET, ps2if->base + REG_CTRL); ++ /* Wait 4ms for the controller to be reset */ ++ usleep_range(4000, 6000); ++ writel(REG_CTRL_ENABLE, ps2if->base + REG_CTRL); ++ ++ return 0; ++} ++ ++void phytium_ps2_close(struct serio *io) ++{ ++ struct phytium_ps2_data *ps2if = io->port_data; ++ ++ writel(0, ps2if->base + REG_CTRL); ++} ++ ++static int phytium_pci_ps2_probe(struct pci_dev *pdev, const struct pci_device_id *id) ++{ ++ struct phytium_ps2_data *ps2if; ++ struct serio *serio; ++ int ret; ++ ++ ret = pcim_enable_device(pdev); ++ if (ret) ++ goto out; ++ ++ ret = pcim_iomap_regions(pdev, 0x1, DRV_NAME); ++ if (ret) ++ goto out; ++ ++ ps2if = devm_kzalloc(&pdev->dev, sizeof(struct phytium_ps2_data), GFP_KERNEL); ++ serio = kzalloc(sizeof(struct serio), GFP_KERNEL); ++ if (!ps2if || !serio) { ++ ret = -ENOMEM; ++ goto free; ++ } ++ ++ serio->id.type = SERIO_8042; ++ serio->write = phytium_ps2_write; ++ serio->open = phytium_ps2_open; ++ serio->close = phytium_ps2_close; ++ strlcpy(serio->name, pci_name(pdev), sizeof(serio->name)); ++ strlcpy(serio->phys, dev_name(&pdev->dev), sizeof(serio->phys)); ++ serio->port_data = ps2if; ++ serio->dev.parent = &pdev->dev; ++ ps2if->io = serio; ++ ps2if->dev = pdev; ++ ps2if->base = pcim_iomap_table(pdev)[0]; ++ ++ ret = devm_request_irq(&pdev->dev, pdev->irq, phytium_ps2_irq, ++ IRQF_SHARED, DRV_NAME, ps2if); ++ if (ret) { ++ dev_err(&pdev->dev, "could not request IRQ %d\n", pdev->irq); ++ goto free; ++ } ++ ++ pci_set_drvdata(pdev, ps2if); ++ serio_register_port(ps2if->io); ++ ++ return 0; ++ ++free: ++ kfree(serio); ++out: ++ return ret; ++} ++ ++static void phytium_pci_ps2_remove(struct pci_dev *pdev) ++{ ++ struct phytium_ps2_data *ps2if = pci_get_drvdata(pdev); ++ ++ serio_unregister_port(ps2if->io); ++ pcim_iounmap_regions(pdev, 0x1); ++} ++ ++static const struct pci_device_id phytium_pci_ps2_ids[] = { ++ { PCI_VDEVICE(PHYTIUM, 0xdc34) }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(pci, phytium_pci_ps2_ids); ++ ++static struct pci_driver phytium_pci_ps2_driver = { ++ .name = DRV_NAME, ++ .id_table = phytium_pci_ps2_ids, ++ .probe = phytium_pci_ps2_probe, ++ .remove = phytium_pci_ps2_remove, ++}; ++module_pci_driver(phytium_pci_ps2_driver); ++ ++MODULE_AUTHOR("Cheng Quan "); ++MODULE_DESCRIPTION("Phytium PCI PS/2 controller driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c +index fd1b80ef9490..a6718da51717 100644 +--- a/drivers/iommu/arm-smmu.c ++++ b/drivers/iommu/arm-smmu.c +@@ -53,9 +53,15 @@ + + #include + ++#include ++ + #include "io-pgtable.h" + #include "arm-smmu-regs.h" + ++#ifdef CONFIG_ARCH_PHYTIUM ++#define FWID_READ(id) (((u16)(id) >> 3) | (((id) >> SMR_MASK_SHIFT | 0x7000) << SMR_MASK_SHIFT)) ++#endif ++ + #define ARM_MMU500_ACTLR_CPRE (1 << 1) + + #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26) +@@ -1376,6 +1382,17 @@ static int arm_smmu_add_device(struct device *dev) + return -ENODEV; + } + ++#ifdef CONFIG_ARCH_PHYTIUM ++ /* FT2000PLUS workaround patch */ ++ if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) == MIDR_PHYTIUM_FT2000PLUS) { ++ int num = fwspec->num_ids; ++ for (i = 0; i < num; i++) { ++ u32 fwid = FWID_READ(fwspec->ids[i]); ++ iommu_fwspec_add_ids(dev, &fwid, 1); ++ } ++ } ++#endif ++ + ret = -EINVAL; + for (i = 0; i < fwspec->num_ids; i++) { + u16 sid = fwspec->ids[i]; +diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c +index 8c15c5980299..84dabdbd1c7c 100644 +--- a/drivers/iommu/iommu.c ++++ b/drivers/iommu/iommu.c +@@ -33,6 +33,9 @@ + #include + #include + #include ++#ifdef CONFIG_ARCH_PHYTIUM ++#include ++#endif + + static struct kset *iommu_group_kset; + static DEFINE_IDA(iommu_group_ida); +@@ -126,7 +129,19 @@ static int __init iommu_set_def_domain_type(char *str) + if (ret) + return ret; + ++#ifdef CONFIG_ARCH_PHYTIUM ++ /* ++ * Always set default iommu type to IOMMU_DOMAIN_IDENTITY ++ * on Phytium FT-2000+ SoC to avoid unnecessary troubles ++ * introduced by the SMMU workaround. ++ */ ++ if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) == MIDR_PHYTIUM_FT2000PLUS) ++ iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; ++ else ++ iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA; ++#else + iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA; ++#endif + return 0; + } + early_param("iommu.passthrough", iommu_set_def_domain_type); +@@ -1238,6 +1253,16 @@ int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) + + bus->iommu_ops = ops; + ++#ifdef CONFIG_ARCH_PHYTIUM ++ /* ++ * Always set default iommu type to IOMMU_DOMAIN_IDENTITY ++ * on Phytium FT-2000+ SoC to avoid unnecessary troubles ++ * introduced by the SMMU workaround. ++ */ ++ if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) == MIDR_PHYTIUM_FT2000PLUS) ++ iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; ++#endif ++ + /* Do IOMMU specific setup for this bus-type */ + err = iommu_bus_init(bus, ops); + if (err) +diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig +index 383e7b70221d..c29cd8f6e7df 100644 +--- a/drivers/irqchip/Kconfig ++++ b/drivers/irqchip/Kconfig +@@ -57,6 +57,15 @@ config ARM_GIC_V3_ITS_FSL_MC + depends on FSL_MC_BUS + default ARM_GIC_V3_ITS + ++config ARM_GIC_PHYTIUM_2500 ++ bool ++ select IRQ_DOMAIN ++ select GENERIC_IRQ_MULTI_HANDLER ++ select IRQ_DOMAIN_HIERARCHY ++ select PARTITION_PERCPU ++ select GENERIC_IRQ_EFFECTIVE_AFF_MASK ++ select GENERIC_MSI_IRQ_DOMAIN ++ + config ARM_NVIC + bool + select IRQ_DOMAIN +@@ -371,6 +380,14 @@ config QCOM_PDC + Power Domain Controller driver to manage and configure wakeup + IRQs for Qualcomm Technologies Inc (QTI) mobile chips. + ++config PHYTIUM_IXIC ++ bool "Phytium D2000 SoC PCI Legacy Interrupt Controller" ++ depends on ARCH_PHYTIUM ++ select IRQ_DOMAIN ++ select IRQ_DOMAIN_HIERARCHY ++ help ++ This enables support PCI Legacy Interrupt on Phytium D2000 SoC. ++ + endmenu + + config SIFIVE_PLIC +diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile +index fbd1ec8070ef..e6fc39085149 100644 +--- a/drivers/irqchip/Makefile ++++ b/drivers/irqchip/Makefile +@@ -31,6 +31,7 @@ obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o + obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o + obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o + obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o ++obj-$(CONFIG_ARM_GIC_PHYTIUM_2500) += irq-gic-phytium-2500.o irq-gic-phytium-2500-its.o + obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o + obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o + obj-$(CONFIG_ARM_NVIC) += irq-nvic.o +@@ -88,3 +89,4 @@ obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o + obj-$(CONFIG_NDS32) += irq-ativic32.o + obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o + obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o ++obj-$(CONFIG_PHYTIUM_IXIC) += irq-phytium-ixic.o +diff --git a/drivers/irqchip/irq-gic-phytium-2500-its.c b/drivers/irqchip/irq-gic-phytium-2500-its.c +new file mode 100644 +index 000000000000..03be44d3782f +--- /dev/null ++++ b/drivers/irqchip/irq-gic-phytium-2500-its.c +@@ -0,0 +1,4158 @@ ++/* ++ * Copyright (C) 2020 Phytium Corporation. ++ * Author: Wang Yinfeng ++ * Chen Baozi ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include "irq-gic-common.h" ++ ++#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) ++#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) ++#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) ++#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3) ++ ++#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) ++#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) ++ ++static u32 lpi_id_bits; ++ ++/* ++ * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to ++ * deal with (one configuration byte per interrupt). PENDBASE has to ++ * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). ++ */ ++#define LPI_NRBITS lpi_id_bits ++#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) ++#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) ++ ++#define LPI_PROP_DEFAULT_PRIO 0xa0 ++ ++/* ++ * Collection structure - just an ID, and a redistributor address to ++ * ping. We use one per CPU as a bag of interrupts assigned to this ++ * CPU. ++ */ ++struct its_collection { ++ u64 target_address; ++ u16 col_id; ++}; ++ ++/* ++ * The ITS_BASER structure - contains memory information, cached ++ * value of BASER register configuration and ITS page size. ++ */ ++struct its_baser { ++ void *base; ++ u64 val; ++ u32 order; ++ u32 psz; ++}; ++ ++struct its_device; ++ ++/* ++ * The ITS structure - contains most of the infrastructure, with the ++ * top-level MSI domain, the command queue, the collections, and the ++ * list of devices writing to it. ++ */ ++struct its_node { ++ raw_spinlock_t lock; ++ struct list_head entry; ++ void __iomem *base; ++ phys_addr_t phys_base; ++ struct its_cmd_block *cmd_base; ++ struct its_cmd_block *cmd_write; ++ struct its_baser tables[GITS_BASER_NR_REGS]; ++ struct its_collection *collections; ++ struct fwnode_handle *fwnode_handle; ++ u64 (*get_msi_base)(struct its_device *its_dev); ++ u64 cbaser_save; ++ u32 ctlr_save; ++ struct list_head its_device_list; ++ u64 flags; ++ unsigned long list_nr; ++ u32 ite_size; ++ u32 device_ids; ++ int numa_node; ++ unsigned int msi_domain_flags; ++ u32 pre_its_base; /* for Socionext Synquacer */ ++ bool is_v4; ++ int vlpi_redist_offset; ++}; ++ ++#define ITS_ITT_ALIGN SZ_256 ++ ++/* The maximum number of VPEID bits supported by VLPI commands */ ++#define ITS_MAX_VPEID_BITS (16) ++#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) ++ ++/* Convert page order to size in bytes */ ++#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) ++ ++struct event_lpi_map { ++ unsigned long *lpi_map; ++ u16 *col_map; ++ irq_hw_number_t lpi_base; ++ int nr_lpis; ++ struct mutex vlpi_lock; ++ struct its_vm *vm; ++ struct its_vlpi_map *vlpi_maps; ++ int nr_vlpis; ++}; ++ ++/* ++ * The ITS view of a device - belongs to an ITS, owns an interrupt ++ * translation table, and a list of interrupts. If it some of its ++ * LPIs are injected into a guest (GICv4), the event_map.vm field ++ * indicates which one. ++ */ ++struct its_device { ++ struct list_head entry; ++ struct its_node *its; ++ struct event_lpi_map event_map; ++ void *itt; ++ u32 nr_ites; ++ u32 device_id; ++}; ++ ++static struct { ++ raw_spinlock_t lock; ++ struct its_device *dev; ++ struct its_vpe **vpes; ++ int next_victim; ++} vpe_proxy; ++ ++struct cpu_lpi_count { ++ atomic_t managed; ++ atomic_t unmanaged; ++}; ++ ++static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count); ++ ++static LIST_HEAD(its_nodes); ++static DEFINE_RAW_SPINLOCK(its_lock); ++static struct rdists *gic_rdists; ++static struct irq_domain *its_parent; ++ ++static unsigned long its_list_map; ++static u16 vmovp_seq_num; ++static DEFINE_RAW_SPINLOCK(vmovp_lock); ++ ++static DEFINE_IDA(its_vpeid_ida); ++ ++#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) ++#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) ++#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) ++#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) ++ ++static struct its_collection *dev_event_to_col(struct its_device *its_dev, ++ u32 event) ++{ ++ struct its_node *its = its_dev->its; ++ ++ return its->collections + its_dev->event_map.col_map[event]; ++} ++ ++static struct its_collection *valid_col(struct its_collection *col) ++{ ++ if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15))) ++ return NULL; ++ ++ return col; ++} ++ ++static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) ++{ ++ if (valid_col(its->collections + vpe->col_idx)) ++ return vpe; ++ ++ return NULL; ++} ++ ++/* ++ * ITS command descriptors - parameters to be encoded in a command ++ * block. ++ */ ++struct its_cmd_desc { ++ union { ++ struct { ++ struct its_device *dev; ++ u32 event_id; ++ } its_inv_cmd; ++ ++ struct { ++ struct its_device *dev; ++ u32 event_id; ++ } its_clear_cmd; ++ ++ struct { ++ struct its_device *dev; ++ u32 event_id; ++ } its_int_cmd; ++ ++ struct { ++ struct its_device *dev; ++ int valid; ++ } its_mapd_cmd; ++ ++ struct { ++ struct its_collection *col; ++ int valid; ++ } its_mapc_cmd; ++ ++ struct { ++ struct its_device *dev; ++ u32 phys_id; ++ u32 event_id; ++ } its_mapti_cmd; ++ ++ struct { ++ struct its_device *dev; ++ struct its_collection *col; ++ u32 event_id; ++ } its_movi_cmd; ++ ++ struct { ++ struct its_device *dev; ++ u32 event_id; ++ } its_discard_cmd; ++ ++ struct { ++ struct its_collection *col; ++ } its_invall_cmd; ++ ++ struct { ++ struct its_vpe *vpe; ++ } its_vinvall_cmd; ++ ++ struct { ++ struct its_vpe *vpe; ++ struct its_collection *col; ++ bool valid; ++ } its_vmapp_cmd; ++ ++ struct { ++ struct its_vpe *vpe; ++ struct its_device *dev; ++ u32 virt_id; ++ u32 event_id; ++ bool db_enabled; ++ } its_vmapti_cmd; ++ ++ struct { ++ struct its_vpe *vpe; ++ struct its_device *dev; ++ u32 event_id; ++ bool db_enabled; ++ } its_vmovi_cmd; ++ ++ struct { ++ struct its_vpe *vpe; ++ struct its_collection *col; ++ u16 seq_num; ++ u16 its_list; ++ } its_vmovp_cmd; ++ }; ++}; ++ ++/* ++ * The ITS command block, which is what the ITS actually parses. ++ */ ++struct its_cmd_block { ++ u64 raw_cmd[4]; ++}; ++ ++#define ITS_CMD_QUEUE_SZ SZ_64K ++#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) ++ ++typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, ++ struct its_cmd_block *, ++ struct its_cmd_desc *); ++ ++typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, ++ struct its_cmd_block *, ++ struct its_cmd_desc *); ++ ++static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) ++{ ++ u64 mask = GENMASK_ULL(h, l); ++ *raw_cmd &= ~mask; ++ *raw_cmd |= (val << l) & mask; ++} ++ ++static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) ++{ ++ its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); ++} ++ ++static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) ++{ ++ its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); ++} ++ ++static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) ++{ ++ its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); ++} ++ ++static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) ++{ ++ its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); ++} ++ ++static void its_encode_size(struct its_cmd_block *cmd, u8 size) ++{ ++ its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); ++} ++ ++static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) ++{ ++ its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8); ++} ++ ++static void its_encode_valid(struct its_cmd_block *cmd, int valid) ++{ ++ its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); ++} ++ ++static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) ++{ ++ its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16); ++} ++ ++static void its_encode_collection(struct its_cmd_block *cmd, u16 col) ++{ ++ its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); ++} ++ ++static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) ++{ ++ its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); ++} ++ ++static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) ++{ ++ its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); ++} ++ ++static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) ++{ ++ its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); ++} ++ ++static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) ++{ ++ its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); ++} ++ ++static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) ++{ ++ its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); ++} ++ ++static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) ++{ ++ its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); ++} ++ ++static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) ++{ ++ its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16); ++} ++ ++static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) ++{ ++ its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); ++} ++ ++static inline void its_fixup_cmd(struct its_cmd_block *cmd) ++{ ++ /* Let's fixup BE commands */ ++ cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); ++ cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); ++ cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); ++ cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); ++} ++ ++static struct its_collection *its_build_mapd_cmd(struct its_node *its, ++ struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ unsigned long itt_addr; ++ u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); ++ ++ itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); ++ itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); ++ ++ its_encode_cmd(cmd, GITS_CMD_MAPD); ++ its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); ++ its_encode_size(cmd, size - 1); ++ its_encode_itt(cmd, itt_addr); ++ its_encode_valid(cmd, desc->its_mapd_cmd.valid); ++ ++ its_fixup_cmd(cmd); ++ ++ return NULL; ++} ++ ++static struct its_collection *its_build_mapc_cmd(struct its_node *its, ++ struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ its_encode_cmd(cmd, GITS_CMD_MAPC); ++ its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); ++ its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); ++ its_encode_valid(cmd, desc->its_mapc_cmd.valid); ++ ++ its_fixup_cmd(cmd); ++ ++ return desc->its_mapc_cmd.col; ++} ++ ++static struct its_collection *its_build_mapti_cmd(struct its_node *its, ++ struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ struct its_collection *col; ++ ++ col = dev_event_to_col(desc->its_mapti_cmd.dev, ++ desc->its_mapti_cmd.event_id); ++ if (is_kdump_kernel()) ++ col->col_id = col->col_id % 65; ++ else ++ col->col_id = col->col_id % 64; ++ ++ its_encode_cmd(cmd, GITS_CMD_MAPTI); ++ its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); ++ its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); ++ its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); ++ its_encode_collection(cmd, col->col_id); ++ ++ its_fixup_cmd(cmd); ++ ++ return valid_col(col); ++} ++ ++static struct its_collection *its_build_movi_cmd(struct its_node *its, ++ struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ struct its_collection *col; ++ ++ col = dev_event_to_col(desc->its_movi_cmd.dev, ++ desc->its_movi_cmd.event_id); ++ ++ its_encode_cmd(cmd, GITS_CMD_MOVI); ++ its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); ++ its_encode_event_id(cmd, desc->its_movi_cmd.event_id); ++ its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); ++ ++ its_fixup_cmd(cmd); ++ ++ return valid_col(col); ++} ++ ++static struct its_collection *its_build_discard_cmd(struct its_node *its, ++ struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ struct its_collection *col; ++ ++ col = dev_event_to_col(desc->its_discard_cmd.dev, ++ desc->its_discard_cmd.event_id); ++ ++ its_encode_cmd(cmd, GITS_CMD_DISCARD); ++ its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); ++ its_encode_event_id(cmd, desc->its_discard_cmd.event_id); ++ ++ its_fixup_cmd(cmd); ++ ++ return valid_col(col); ++} ++ ++static struct its_collection *its_build_inv_cmd(struct its_node *its, ++ struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ struct its_collection *col; ++ ++ col = dev_event_to_col(desc->its_inv_cmd.dev, ++ desc->its_inv_cmd.event_id); ++ ++ its_encode_cmd(cmd, GITS_CMD_INV); ++ its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); ++ its_encode_event_id(cmd, desc->its_inv_cmd.event_id); ++ ++ its_fixup_cmd(cmd); ++ ++ return valid_col(col); ++} ++ ++static struct its_collection *its_build_int_cmd(struct its_node *its, ++ struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ struct its_collection *col; ++ ++ col = dev_event_to_col(desc->its_int_cmd.dev, ++ desc->its_int_cmd.event_id); ++ ++ its_encode_cmd(cmd, GITS_CMD_INT); ++ its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); ++ its_encode_event_id(cmd, desc->its_int_cmd.event_id); ++ ++ its_fixup_cmd(cmd); ++ ++ return valid_col(col); ++} ++ ++static struct its_collection *its_build_clear_cmd(struct its_node *its, ++ struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ struct its_collection *col; ++ ++ col = dev_event_to_col(desc->its_clear_cmd.dev, ++ desc->its_clear_cmd.event_id); ++ ++ its_encode_cmd(cmd, GITS_CMD_CLEAR); ++ its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); ++ its_encode_event_id(cmd, desc->its_clear_cmd.event_id); ++ ++ its_fixup_cmd(cmd); ++ ++ return valid_col(col); ++} ++ ++static struct its_collection *its_build_invall_cmd(struct its_node *its, ++ struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ its_encode_cmd(cmd, GITS_CMD_INVALL); ++ its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); ++ ++ its_fixup_cmd(cmd); ++ ++ return NULL; ++} ++ ++static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, ++ struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ its_encode_cmd(cmd, GITS_CMD_VINVALL); ++ its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); ++ ++ its_fixup_cmd(cmd); ++ ++ return valid_vpe(its, desc->its_vinvall_cmd.vpe); ++} ++ ++static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, ++ struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ unsigned long vpt_addr; ++ u64 target; ++ ++ vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); ++ target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; ++ ++ its_encode_cmd(cmd, GITS_CMD_VMAPP); ++ its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); ++ its_encode_valid(cmd, desc->its_vmapp_cmd.valid); ++ its_encode_target(cmd, target); ++ its_encode_vpt_addr(cmd, vpt_addr); ++ its_encode_vpt_size(cmd, LPI_NRBITS - 1); ++ ++ its_fixup_cmd(cmd); ++ ++ return valid_vpe(its, desc->its_vmapp_cmd.vpe); ++} ++ ++static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, ++ struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ u32 db; ++ ++ if (desc->its_vmapti_cmd.db_enabled) ++ db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; ++ else ++ db = 1023; ++ ++ its_encode_cmd(cmd, GITS_CMD_VMAPTI); ++ its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); ++ its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); ++ its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); ++ its_encode_db_phys_id(cmd, db); ++ its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); ++ ++ its_fixup_cmd(cmd); ++ ++ return valid_vpe(its, desc->its_vmapti_cmd.vpe); ++} ++ ++static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, ++ struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ u32 db; ++ ++ if (desc->its_vmovi_cmd.db_enabled) ++ db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; ++ else ++ db = 1023; ++ ++ its_encode_cmd(cmd, GITS_CMD_VMOVI); ++ its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); ++ its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); ++ its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); ++ its_encode_db_phys_id(cmd, db); ++ its_encode_db_valid(cmd, true); ++ ++ its_fixup_cmd(cmd); ++ ++ return valid_vpe(its, desc->its_vmovi_cmd.vpe); ++} ++ ++static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, ++ struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ u64 target; ++ ++ target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; ++ its_encode_cmd(cmd, GITS_CMD_VMOVP); ++ its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); ++ its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); ++ its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); ++ its_encode_target(cmd, target); ++ ++ its_fixup_cmd(cmd); ++ ++ return valid_vpe(its, desc->its_vmovp_cmd.vpe); ++} ++ ++static u64 its_cmd_ptr_to_offset(struct its_node *its, ++ struct its_cmd_block *ptr) ++{ ++ return (ptr - its->cmd_base) * sizeof(*ptr); ++} ++ ++static int its_queue_full(struct its_node *its) ++{ ++ int widx; ++ int ridx; ++ ++ widx = its->cmd_write - its->cmd_base; ++ ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); ++ ++ /* This is incredibly unlikely to happen, unless the ITS locks up. */ ++ if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) ++ return 1; ++ ++ return 0; ++} ++ ++static struct its_cmd_block *its_allocate_entry(struct its_node *its) ++{ ++ struct its_cmd_block *cmd; ++ u32 count = 1000000; /* 1s! */ ++ ++ while (its_queue_full(its)) { ++ count--; ++ if (!count) { ++ pr_err_ratelimited("ITS queue not draining\n"); ++ return NULL; ++ } ++ cpu_relax(); ++ udelay(1); ++ } ++ ++ cmd = its->cmd_write++; ++ ++ /* Handle queue wrapping */ ++ if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) ++ its->cmd_write = its->cmd_base; ++ ++ /* Clear command */ ++ cmd->raw_cmd[0] = 0; ++ cmd->raw_cmd[1] = 0; ++ cmd->raw_cmd[2] = 0; ++ cmd->raw_cmd[3] = 0; ++ ++ return cmd; ++} ++ ++static struct its_cmd_block *its_post_commands(struct its_node *its) ++{ ++ u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); ++ ++ writel_relaxed(wr, its->base + GITS_CWRITER); ++ ++ return its->cmd_write; ++} ++ ++static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) ++{ ++ /* ++ * Make sure the commands written to memory are observable by ++ * the ITS. ++ */ ++ if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) ++ gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); ++ else ++ dsb(ishst); ++} ++ ++static int its_wait_for_range_completion(struct its_node *its, ++ struct its_cmd_block *from, ++ struct its_cmd_block *to) ++{ ++ u64 rd_idx, from_idx, to_idx; ++ u32 count = 1000000; /* 1s! */ ++ ++ from_idx = its_cmd_ptr_to_offset(its, from); ++ to_idx = its_cmd_ptr_to_offset(its, to); ++ ++ while (1) { ++ rd_idx = readl_relaxed(its->base + GITS_CREADR); ++ ++ /* Direct case */ ++ if (from_idx < to_idx && rd_idx >= to_idx) ++ break; ++ ++ /* Wrapped case */ ++ if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx) ++ break; ++ ++ count--; ++ if (!count) { ++ pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n", ++ from_idx, to_idx, rd_idx); ++ return -1; ++ } ++ cpu_relax(); ++ udelay(1); ++ } ++ ++ return 0; ++} ++ ++/* Warning, macro hell follows */ ++#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ ++void name(struct its_node *its, \ ++ buildtype builder, \ ++ struct its_cmd_desc *desc) \ ++{ \ ++ struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ ++ synctype *sync_obj; \ ++ unsigned long flags; \ ++ \ ++ raw_spin_lock_irqsave(&its->lock, flags); \ ++ \ ++ cmd = its_allocate_entry(its); \ ++ if (!cmd) { /* We're soooooo screewed... */ \ ++ raw_spin_unlock_irqrestore(&its->lock, flags); \ ++ return; \ ++ } \ ++ sync_obj = builder(its, cmd, desc); \ ++ its_flush_cmd(its, cmd); \ ++ \ ++ if (sync_obj) { \ ++ sync_cmd = its_allocate_entry(its); \ ++ if (!sync_cmd) \ ++ goto post; \ ++ \ ++ buildfn(its, sync_cmd, sync_obj); \ ++ its_flush_cmd(its, sync_cmd); \ ++ } \ ++ \ ++post: \ ++ next_cmd = its_post_commands(its); \ ++ raw_spin_unlock_irqrestore(&its->lock, flags); \ ++ \ ++ if (its_wait_for_range_completion(its, cmd, next_cmd)) \ ++ pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ ++} ++ ++static void its_build_sync_cmd(struct its_node *its, ++ struct its_cmd_block *sync_cmd, ++ struct its_collection *sync_col) ++{ ++ its_encode_cmd(sync_cmd, GITS_CMD_SYNC); ++ its_encode_target(sync_cmd, sync_col->target_address); ++ ++ its_fixup_cmd(sync_cmd); ++} ++ ++static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, ++ struct its_collection, its_build_sync_cmd) ++ ++static void its_build_vsync_cmd(struct its_node *its, ++ struct its_cmd_block *sync_cmd, ++ struct its_vpe *sync_vpe) ++{ ++ its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); ++ its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); ++ ++ its_fixup_cmd(sync_cmd); ++} ++ ++static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, ++ struct its_vpe, its_build_vsync_cmd) ++ ++static void its_send_int(struct its_device *dev, u32 event_id) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_int_cmd.dev = dev; ++ desc.its_int_cmd.event_id = event_id; ++ ++ its_send_single_command(dev->its, its_build_int_cmd, &desc); ++} ++ ++static void its_send_clear(struct its_device *dev, u32 event_id) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_clear_cmd.dev = dev; ++ desc.its_clear_cmd.event_id = event_id; ++ ++ its_send_single_command(dev->its, its_build_clear_cmd, &desc); ++} ++ ++static void its_send_inv(struct its_device *dev, u32 event_id) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_inv_cmd.dev = dev; ++ desc.its_inv_cmd.event_id = event_id; ++ ++ its_send_single_command(dev->its, its_build_inv_cmd, &desc); ++} ++ ++static void its_send_mapd(struct its_device *dev, int valid) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_mapd_cmd.dev = dev; ++ desc.its_mapd_cmd.valid = !!valid; ++ ++ its_send_single_command(dev->its, its_build_mapd_cmd, &desc); ++} ++ ++static void its_send_mapc(struct its_node *its, struct its_collection *col, ++ int valid) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_mapc_cmd.col = col; ++ desc.its_mapc_cmd.valid = !!valid; ++ ++ its_send_single_command(its, its_build_mapc_cmd, &desc); ++} ++ ++static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_mapti_cmd.dev = dev; ++ desc.its_mapti_cmd.phys_id = irq_id; ++ desc.its_mapti_cmd.event_id = id; ++ ++ its_send_single_command(dev->its, its_build_mapti_cmd, &desc); ++} ++ ++static void its_send_movi(struct its_device *dev, ++ struct its_collection *col, u32 id) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_movi_cmd.dev = dev; ++ desc.its_movi_cmd.col = col; ++ desc.its_movi_cmd.event_id = id; ++ ++ its_send_single_command(dev->its, its_build_movi_cmd, &desc); ++} ++ ++static void its_send_discard(struct its_device *dev, u32 id) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_discard_cmd.dev = dev; ++ desc.its_discard_cmd.event_id = id; ++ ++ its_send_single_command(dev->its, its_build_discard_cmd, &desc); ++} ++ ++static void its_send_invall(struct its_node *its, struct its_collection *col) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_invall_cmd.col = col; ++ ++ its_send_single_command(its, its_build_invall_cmd, &desc); ++} ++ ++static void its_send_vmapti(struct its_device *dev, u32 id) ++{ ++ struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; ++ struct its_cmd_desc desc; ++ ++ desc.its_vmapti_cmd.vpe = map->vpe; ++ desc.its_vmapti_cmd.dev = dev; ++ desc.its_vmapti_cmd.virt_id = map->vintid; ++ desc.its_vmapti_cmd.event_id = id; ++ desc.its_vmapti_cmd.db_enabled = map->db_enabled; ++ ++ its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); ++} ++ ++static void its_send_vmovi(struct its_device *dev, u32 id) ++{ ++ struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; ++ struct its_cmd_desc desc; ++ ++ desc.its_vmovi_cmd.vpe = map->vpe; ++ desc.its_vmovi_cmd.dev = dev; ++ desc.its_vmovi_cmd.event_id = id; ++ desc.its_vmovi_cmd.db_enabled = map->db_enabled; ++ ++ its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); ++} ++ ++static void its_send_vmapp(struct its_node *its, ++ struct its_vpe *vpe, bool valid) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_vmapp_cmd.vpe = vpe; ++ desc.its_vmapp_cmd.valid = valid; ++ desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; ++ ++ its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); ++} ++ ++static void its_send_vmovp(struct its_vpe *vpe) ++{ ++ struct its_cmd_desc desc; ++ struct its_node *its; ++ unsigned long flags; ++ int col_id = vpe->col_idx; ++ ++ desc.its_vmovp_cmd.vpe = vpe; ++ desc.its_vmovp_cmd.its_list = (u16)its_list_map; ++ ++ if (!its_list_map) { ++ its = list_first_entry(&its_nodes, struct its_node, entry); ++ desc.its_vmovp_cmd.seq_num = 0; ++ desc.its_vmovp_cmd.col = &its->collections[col_id]; ++ its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); ++ return; ++ } ++ ++ /* ++ * Yet another marvel of the architecture. If using the ++ * its_list "feature", we need to make sure that all ITSs ++ * receive all VMOVP commands in the same order. The only way ++ * to guarantee this is to make vmovp a serialization point. ++ * ++ * Wall <-- Head. ++ */ ++ raw_spin_lock_irqsave(&vmovp_lock, flags); ++ ++ desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; ++ ++ /* Emit VMOVPs */ ++ list_for_each_entry(its, &its_nodes, entry) { ++ if (!its->is_v4) ++ continue; ++ ++ if (!vpe->its_vm->vlpi_count[its->list_nr]) ++ continue; ++ ++ desc.its_vmovp_cmd.col = &its->collections[col_id]; ++ its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); ++ } ++ ++ raw_spin_unlock_irqrestore(&vmovp_lock, flags); ++} ++ ++static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_vinvall_cmd.vpe = vpe; ++ its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); ++} ++ ++/* ++ * irqchip functions - assumes MSI, mostly. ++ */ ++ ++static inline u32 its_get_event_id(struct irq_data *d) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ return d->hwirq - its_dev->event_map.lpi_base; ++} ++ ++static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) ++{ ++ irq_hw_number_t hwirq; ++ void *va; ++ u8 *cfg; ++ ++ if (irqd_is_forwarded_to_vcpu(d)) { ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ u32 event = its_get_event_id(d); ++ struct its_vlpi_map *map; ++ ++ va = page_address(its_dev->event_map.vm->vprop_page); ++ map = &its_dev->event_map.vlpi_maps[event]; ++ hwirq = map->vintid; ++ ++ /* Remember the updated property */ ++ map->properties &= ~clr; ++ map->properties |= set | LPI_PROP_GROUP1; ++ } else { ++ va = gic_rdists->prop_table_va; ++ hwirq = d->hwirq; ++ } ++ ++ cfg = va + hwirq - 8192; ++ *cfg &= ~clr; ++ *cfg |= set | LPI_PROP_GROUP1; ++ ++ /* ++ * Make the above write visible to the redistributors. ++ * And yes, we're flushing exactly: One. Single. Byte. ++ * Humpf... ++ */ ++ if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) ++ gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); ++ else ++ dsb(ishst); ++} ++ ++static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ ++ lpi_write_config(d, clr, set); ++ its_send_inv(its_dev, its_get_event_id(d)); ++} ++ ++static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ u32 event = its_get_event_id(d); ++ ++ if (its_dev->event_map.vlpi_maps[event].db_enabled == enable) ++ return; ++ ++ its_dev->event_map.vlpi_maps[event].db_enabled = enable; ++ ++ /* ++ * More fun with the architecture: ++ * ++ * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI ++ * value or to 1023, depending on the enable bit. But that ++ * would be issueing a mapping for an /existing/ DevID+EventID ++ * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI ++ * to the /same/ vPE, using this opportunity to adjust the ++ * doorbell. Mouahahahaha. We loves it, Precious. ++ */ ++ its_send_vmovi(its_dev, event); ++} ++ ++static void its_mask_irq(struct irq_data *d) ++{ ++ if (irqd_is_forwarded_to_vcpu(d)) ++ its_vlpi_set_doorbell(d, false); ++ ++ lpi_update_config(d, LPI_PROP_ENABLED, 0); ++} ++ ++static void its_unmask_irq(struct irq_data *d) ++{ ++ if (irqd_is_forwarded_to_vcpu(d)) ++ its_vlpi_set_doorbell(d, true); ++ ++ lpi_update_config(d, 0, LPI_PROP_ENABLED); ++} ++ ++static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu) ++{ ++ if (irqd_affinity_is_managed(d)) ++ return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); ++ ++ return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); ++} ++ ++static void its_inc_lpi_count(struct irq_data *d, int cpu) ++{ ++ if (irqd_affinity_is_managed(d)) ++ atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); ++ else ++ atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); ++} ++ ++static void its_dec_lpi_count(struct irq_data *d, int cpu) ++{ ++ if (irqd_affinity_is_managed(d)) ++ atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); ++ else ++ atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); ++} ++ ++static unsigned int cpumask_pick_least_loaded(unsigned int target_cpu, struct irq_data *d, ++ const struct cpumask *cpu_mask) ++{ ++ unsigned int cpu = target_cpu, tmp, target_skt, dest_skt; ++ int count = S32_MAX; ++ ++ target_skt = (cpu_logical_map(target_cpu) >> 16) & 0xff; ++ ++ for_each_cpu(tmp, cpu_mask) { ++ int this_count = its_read_lpi_count(d, tmp); ++ dest_skt = (cpu_logical_map(tmp) >> 16) & 0xff; ++ ++ if ((this_count < count) && (dest_skt == target_skt)) { ++ cpu = tmp; ++ count = this_count; ++ } ++ } ++ ++ return cpu; ++} ++ ++/* ++ * As suggested by Thomas Gleixner in: ++ * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de ++ */ ++static int its_select_cpu(unsigned int target_cpu, struct irq_data *d, ++ const struct cpumask *aff_mask) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ cpumask_var_t tmpmask; ++ int cpu = target_cpu, node; ++ ++ if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC)) ++ return -ENOMEM; ++ ++ node = its_dev->its->numa_node; ++ ++ if (!irqd_affinity_is_managed(d)) { ++ /* First try the NUMA node */ ++ if (node != NUMA_NO_NODE) { ++ /* ++ * Try the intersection of the affinity mask and the ++ * node mask (and the online mask, just to be safe). ++ */ ++ cpumask_and(tmpmask, cpumask_of_node(node), aff_mask); ++ cpumask_and(tmpmask, tmpmask, cpu_online_mask); ++ ++ /* ++ * Ideally, we would check if the mask is empty, and ++ * try again on the full node here. ++ * ++ * But it turns out that the way ACPI describes the ++ * affinity for ITSs only deals about memory, and ++ * not target CPUs, so it cannot describe a single ++ * ITS placed next to two NUMA nodes. ++ * ++ * Instead, just fallback on the online mask. This ++ * diverges from Thomas' suggestion above. ++ */ ++ cpu = cpumask_pick_least_loaded(target_cpu, d, tmpmask); ++ if (cpu < nr_cpu_ids) ++ goto out; ++ ++ /* If we can't cross sockets, give up */ ++ if ((its_dev->its->flags & ++ ITS_FLAGS_WORKAROUND_CAVIUM_23144)) ++ goto out; ++ ++ /* If the above failed, expand the search */ ++ } ++ ++ /* Try the intersection of the affinity and online masks */ ++ cpumask_and(tmpmask, aff_mask, cpu_online_mask); ++ ++ /* If that doesn't fly, the online mask is the last resort */ ++ if (cpumask_empty(tmpmask)) ++ cpumask_copy(tmpmask, cpu_online_mask); ++ ++ cpu = cpumask_pick_least_loaded(target_cpu, d, tmpmask); ++ } else { ++ cpumask_and(tmpmask, irq_data_get_affinity_mask(d), ++ cpu_online_mask); ++ ++ /* If we cannot cross sockets, limit the search to that node */ ++ if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && ++ node != NUMA_NO_NODE) ++ cpumask_and(tmpmask, tmpmask, cpumask_of_node(node)); ++ ++ cpu = cpumask_pick_least_loaded(target_cpu, d, tmpmask); ++ } ++out: ++ free_cpumask_var(tmpmask); ++ ++ pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, ++ cpumask_pr_args(aff_mask), cpu); ++ return cpu; ++} ++ ++#define MAX_MARS3_SKT_COUNT 8 ++ ++static int its_cpumask_select(struct its_device *its_dev, ++ const struct cpumask *mask_val, ++ const struct cpumask *cpu_mask) ++{ ++ unsigned int skt, skt_id, i; ++ phys_addr_t its_phys_base; ++ unsigned int cpu, cpus = 0; ++ ++ unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; ++ ++ its_phys_base = its_dev->its->phys_base; ++ skt_id = (its_phys_base >> 41) & 0x7; ++ ++ for (i = 0; i < nr_cpu_ids; i++) { ++ skt = (cpu_logical_map(i) >> 16) & 0xff; ++ if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { ++ if ((is_kdump_kernel()) && (skt_id == skt)) { ++ return i; ++ } ++ ++ skt_cpu_cnt[skt]++; ++ } ++ else if (skt != 0xff) ++ pr_err("socket address: %d is out of range.", skt); ++ } ++ ++ if (0 != skt_id) { ++ for (i = 0; i < skt_id; i++) ++ cpus += skt_cpu_cnt[i]; ++ } ++ ++ cpu = cpumask_any_and(mask_val, cpu_mask); ++ cpus = cpus + cpu % skt_cpu_cnt[skt_id]; ++ ++ return cpus; ++} ++ ++static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, ++ bool force) ++{ ++ unsigned int cpu, target_cpu; ++ const struct cpumask *cpu_mask = cpu_online_mask; ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ struct its_collection *target_col; ++ u32 id = its_get_event_id(d); ++ int prev_cpu; ++ ++ /* A forwarded interrupt should use irq_set_vcpu_affinity */ ++ if (irqd_is_forwarded_to_vcpu(d)) ++ return -EINVAL; ++ ++ prev_cpu = its_dev->event_map.col_map[id]; ++ its_dec_lpi_count(d, prev_cpu); ++ ++ target_cpu = its_cpumask_select(its_dev, mask_val, cpu_mask); ++ if (!force) ++ cpu = its_select_cpu(target_cpu, d, mask_val); ++ else ++ cpu = cpumask_pick_least_loaded(target_cpu, d, mask_val); ++ ++ if (cpu < 0 || cpu >= nr_cpu_ids) ++ goto err; ++ ++ /* don't set the affinity when the target cpu is same as current one */ ++ if (cpu != prev_cpu) { ++ target_col = &its_dev->its->collections[cpu]; ++ its_send_movi(its_dev, target_col, id); ++ its_dev->event_map.col_map[id] = cpu; ++ irq_data_update_effective_affinity(d, cpumask_of(cpu)); ++ } ++ ++ its_inc_lpi_count(d, cpu); ++ ++ return IRQ_SET_MASK_OK_DONE; ++ ++err: ++ its_inc_lpi_count(d, prev_cpu); ++ return -EINVAL; ++} ++ ++static u64 its_irq_get_msi_base(struct its_device *its_dev) ++{ ++ struct its_node *its = its_dev->its; ++ ++ return its->phys_base + GITS_TRANSLATER; ++} ++ ++static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ struct its_node *its; ++ u64 addr; ++ ++ its = its_dev->its; ++ addr = its->get_msi_base(its_dev); ++ ++ msg->address_lo = lower_32_bits(addr); ++ msg->address_hi = upper_32_bits(addr); ++ msg->data = its_get_event_id(d); ++} ++ ++static int its_irq_set_irqchip_state(struct irq_data *d, ++ enum irqchip_irq_state which, ++ bool state) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ u32 event = its_get_event_id(d); ++ ++ if (which != IRQCHIP_STATE_PENDING) ++ return -EINVAL; ++ ++ if (state) ++ its_send_int(its_dev, event); ++ else ++ its_send_clear(its_dev, event); ++ ++ return 0; ++} ++ ++static void its_map_vm(struct its_node *its, struct its_vm *vm) ++{ ++ unsigned long flags; ++ ++ /* Not using the ITS list? Everything is always mapped. */ ++ if (!its_list_map) ++ return; ++ ++ raw_spin_lock_irqsave(&vmovp_lock, flags); ++ ++ /* ++ * If the VM wasn't mapped yet, iterate over the vpes and get ++ * them mapped now. ++ */ ++ vm->vlpi_count[its->list_nr]++; ++ ++ if (vm->vlpi_count[its->list_nr] == 1) { ++ int i; ++ ++ for (i = 0; i < vm->nr_vpes; i++) { ++ struct its_vpe *vpe = vm->vpes[i]; ++ struct irq_data *d = irq_get_irq_data(vpe->irq); ++ ++ /* Map the VPE to the first possible CPU */ ++ vpe->col_idx = cpumask_first(cpu_online_mask); ++ its_send_vmapp(its, vpe, true); ++ its_send_vinvall(its, vpe); ++ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); ++ } ++ } ++ ++ raw_spin_unlock_irqrestore(&vmovp_lock, flags); ++} ++ ++static void its_unmap_vm(struct its_node *its, struct its_vm *vm) ++{ ++ unsigned long flags; ++ ++ /* Not using the ITS list? Everything is always mapped. */ ++ if (!its_list_map) ++ return; ++ ++ raw_spin_lock_irqsave(&vmovp_lock, flags); ++ ++ if (!--vm->vlpi_count[its->list_nr]) { ++ int i; ++ ++ for (i = 0; i < vm->nr_vpes; i++) ++ its_send_vmapp(its, vm->vpes[i], false); ++ } ++ ++ raw_spin_unlock_irqrestore(&vmovp_lock, flags); ++} ++ ++static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ u32 event = its_get_event_id(d); ++ int ret = 0; ++ ++ if (!info->map) ++ return -EINVAL; ++ ++ mutex_lock(&its_dev->event_map.vlpi_lock); ++ ++ if (!its_dev->event_map.vm) { ++ struct its_vlpi_map *maps; ++ ++ maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), ++ GFP_KERNEL); ++ if (!maps) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ its_dev->event_map.vm = info->map->vm; ++ its_dev->event_map.vlpi_maps = maps; ++ } else if (its_dev->event_map.vm != info->map->vm) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ /* Get our private copy of the mapping information */ ++ its_dev->event_map.vlpi_maps[event] = *info->map; ++ ++ if (irqd_is_forwarded_to_vcpu(d)) { ++ /* Already mapped, move it around */ ++ its_send_vmovi(its_dev, event); ++ } else { ++ /* Ensure all the VPEs are mapped on this ITS */ ++ its_map_vm(its_dev->its, info->map->vm); ++ ++ /* ++ * Flag the interrupt as forwarded so that we can ++ * start poking the virtual property table. ++ */ ++ irqd_set_forwarded_to_vcpu(d); ++ ++ /* Write out the property to the prop table */ ++ lpi_write_config(d, 0xff, info->map->properties); ++ ++ /* Drop the physical mapping */ ++ its_send_discard(its_dev, event); ++ ++ /* and install the virtual one */ ++ its_send_vmapti(its_dev, event); ++ ++ /* Increment the number of VLPIs */ ++ its_dev->event_map.nr_vlpis++; ++ } ++ ++out: ++ mutex_unlock(&its_dev->event_map.vlpi_lock); ++ return ret; ++} ++ ++static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ u32 event = its_get_event_id(d); ++ int ret = 0; ++ ++ mutex_lock(&its_dev->event_map.vlpi_lock); ++ ++ if (!its_dev->event_map.vm || ++ !its_dev->event_map.vlpi_maps[event].vm) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ /* Copy our mapping information to the incoming request */ ++ *info->map = its_dev->event_map.vlpi_maps[event]; ++ ++out: ++ mutex_unlock(&its_dev->event_map.vlpi_lock); ++ return ret; ++} ++ ++static int its_vlpi_unmap(struct irq_data *d) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ u32 event = its_get_event_id(d); ++ int ret = 0; ++ ++ mutex_lock(&its_dev->event_map.vlpi_lock); ++ ++ if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ /* Drop the virtual mapping */ ++ its_send_discard(its_dev, event); ++ ++ /* and restore the physical one */ ++ irqd_clr_forwarded_to_vcpu(d); ++ its_send_mapti(its_dev, d->hwirq, event); ++ lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | ++ LPI_PROP_ENABLED | ++ LPI_PROP_GROUP1)); ++ ++ /* Potentially unmap the VM from this ITS */ ++ its_unmap_vm(its_dev->its, its_dev->event_map.vm); ++ ++ /* ++ * Drop the refcount and make the device available again if ++ * this was the last VLPI. ++ */ ++ if (!--its_dev->event_map.nr_vlpis) { ++ its_dev->event_map.vm = NULL; ++ kfree(its_dev->event_map.vlpi_maps); ++ } ++ ++out: ++ mutex_unlock(&its_dev->event_map.vlpi_lock); ++ return ret; ++} ++ ++static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ ++ if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) ++ return -EINVAL; ++ ++ if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) ++ lpi_update_config(d, 0xff, info->config); ++ else ++ lpi_write_config(d, 0xff, info->config); ++ its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); ++ ++ return 0; ++} ++ ++static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ struct its_cmd_info *info = vcpu_info; ++ ++ /* Need a v4 ITS */ ++ if (!its_dev->its->is_v4) ++ return -EINVAL; ++ ++ /* Unmap request? */ ++ if (!info) ++ return its_vlpi_unmap(d); ++ ++ switch (info->cmd_type) { ++ case MAP_VLPI: ++ return its_vlpi_map(d, info); ++ ++ case GET_VLPI: ++ return its_vlpi_get(d, info); ++ ++ case PROP_UPDATE_VLPI: ++ case PROP_UPDATE_AND_INV_VLPI: ++ return its_vlpi_prop_update(d, info); ++ ++ default: ++ return -EINVAL; ++ } ++} ++ ++static int its_irq_retrigger(struct irq_data *d) ++{ ++ return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); ++} ++ ++static struct irq_chip its_irq_chip = { ++ .name = "ITS", ++ .irq_mask = its_mask_irq, ++ .irq_unmask = its_unmask_irq, ++ .irq_eoi = irq_chip_eoi_parent, ++ .irq_set_affinity = its_set_affinity, ++ .irq_compose_msi_msg = its_irq_compose_msi_msg, ++ .irq_set_irqchip_state = its_irq_set_irqchip_state, ++ .irq_retrigger = its_irq_retrigger, ++ .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, ++}; ++ ++ ++/* ++ * How we allocate LPIs: ++ * ++ * lpi_range_list contains ranges of LPIs that are to available to ++ * allocate from. To allocate LPIs, just pick the first range that ++ * fits the required allocation, and reduce it by the required ++ * amount. Once empty, remove the range from the list. ++ * ++ * To free a range of LPIs, add a free range to the list, sort it and ++ * merge the result if the new range happens to be adjacent to an ++ * already free block. ++ * ++ * The consequence of the above is that allocation is cost is low, but ++ * freeing is expensive. We assumes that freeing rarely occurs. ++ */ ++#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ ++ ++static DEFINE_MUTEX(lpi_range_lock); ++static LIST_HEAD(lpi_range_list); ++ ++struct lpi_range { ++ struct list_head entry; ++ u32 base_id; ++ u32 span; ++}; ++ ++static struct lpi_range *mk_lpi_range(u32 base, u32 span) ++{ ++ struct lpi_range *range; ++ ++ range = kzalloc(sizeof(*range), GFP_KERNEL); ++ if (range) { ++ INIT_LIST_HEAD(&range->entry); ++ range->base_id = base; ++ range->span = span; ++ } ++ ++ return range; ++} ++ ++static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b) ++{ ++ struct lpi_range *ra, *rb; ++ ++ ra = container_of(a, struct lpi_range, entry); ++ rb = container_of(b, struct lpi_range, entry); ++ ++ return rb->base_id - ra->base_id; ++} ++ ++static void merge_lpi_ranges(void) ++{ ++ struct lpi_range *range, *tmp; ++ ++ list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { ++ if (!list_is_last(&range->entry, &lpi_range_list) && ++ (tmp->base_id == (range->base_id + range->span))) { ++ tmp->base_id = range->base_id; ++ tmp->span += range->span; ++ list_del(&range->entry); ++ kfree(range); ++ } ++ } ++} ++ ++static int alloc_lpi_range(u32 nr_lpis, u32 *base) ++{ ++ struct lpi_range *range, *tmp; ++ int err = -ENOSPC; ++ ++ mutex_lock(&lpi_range_lock); ++ ++ list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { ++ if (range->span >= nr_lpis) { ++ *base = range->base_id; ++ range->base_id += nr_lpis; ++ range->span -= nr_lpis; ++ ++ if (range->span == 0) { ++ list_del(&range->entry); ++ kfree(range); ++ } ++ ++ err = 0; ++ break; ++ } ++ } ++ ++ mutex_unlock(&lpi_range_lock); ++ ++ pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); ++ return err; ++} ++ ++static int free_lpi_range(u32 base, u32 nr_lpis) ++{ ++ struct lpi_range *new; ++ int err = 0; ++ ++ mutex_lock(&lpi_range_lock); ++ ++ new = mk_lpi_range(base, nr_lpis); ++ if (!new) { ++ err = -ENOMEM; ++ goto out; ++ } ++ ++ list_add(&new->entry, &lpi_range_list); ++ list_sort(NULL, &lpi_range_list, lpi_range_cmp); ++ merge_lpi_ranges(); ++out: ++ mutex_unlock(&lpi_range_lock); ++ return err; ++} ++ ++static int __init its_lpi_init(u32 id_bits) ++{ ++ u32 lpis = (1UL << id_bits) - 8192; ++ u32 numlpis; ++ int err; ++ ++ numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer); ++ ++ if (numlpis > 2 && !WARN_ON(numlpis > lpis)) { ++ lpis = numlpis; ++ pr_info("ITS: Using hypervisor restricted LPI range [%u]\n", ++ lpis); ++ } ++ ++ /* ++ * Initializing the allocator is just the same as freeing the ++ * full range of LPIs. ++ */ ++ err = free_lpi_range(8192, lpis); ++ pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); ++ return err; ++} ++ ++static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) ++{ ++ unsigned long *bitmap = NULL; ++ int err = 0; ++ ++ do { ++ err = alloc_lpi_range(nr_irqs, base); ++ if (!err) ++ break; ++ ++ nr_irqs /= 2; ++ } while (nr_irqs > 0); ++ ++ if (err) ++ goto out; ++ ++ bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC); ++ if (!bitmap) ++ goto out; ++ ++ *nr_ids = nr_irqs; ++ ++out: ++ if (!bitmap) ++ *base = *nr_ids = 0; ++ ++ return bitmap; ++} ++ ++static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) ++{ ++ WARN_ON(free_lpi_range(base, nr_ids)); ++ kfree(bitmap); ++} ++ ++static void gic_reset_prop_table(void *va) ++{ ++ /* Priority 0xa0, Group-1, disabled */ ++ memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); ++ ++ /* Make sure the GIC will observe the written configuration */ ++ gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); ++} ++ ++static struct page *its_allocate_prop_table(gfp_t gfp_flags) ++{ ++ struct page *prop_page; ++ ++ prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); ++ if (!prop_page) ++ return NULL; ++ ++ gic_reset_prop_table(page_address(prop_page)); ++ ++ return prop_page; ++} ++ ++static void its_free_prop_table(struct page *prop_page) ++{ ++ free_pages((unsigned long)page_address(prop_page), ++ get_order(LPI_PROPBASE_SZ)); ++} ++ ++static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) ++{ ++ phys_addr_t start, end, addr_end; ++ u64 i; ++ ++ /* ++ * We don't bother checking for a kdump kernel as by ++ * construction, the LPI tables are out of this kernel's ++ * memory map. ++ */ ++ if (is_kdump_kernel()) ++ return true; ++ ++ addr_end = addr + size - 1; ++ ++ for_each_reserved_mem_region(i, &start, &end) { ++ if (addr >= start && addr_end <= end) ++ return true; ++ } ++ ++ /* Not found, not a good sign... */ ++ pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n", ++ &addr, &addr_end); ++ add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); ++ return false; ++} ++ ++static int gic_reserve_range(phys_addr_t addr, unsigned long size) ++{ ++ if (efi_enabled(EFI_CONFIG_TABLES)) ++ return efi_mem_reserve_persistent(addr, size); ++ ++ return 0; ++} ++static int __init its_setup_lpi_prop_table(void) ++{ ++ if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { ++ u64 val; ++ ++ val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); ++ lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1; ++ ++ gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); ++ gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa, ++ LPI_PROPBASE_SZ, ++ MEMREMAP_WB); ++ gic_reset_prop_table(gic_rdists->prop_table_va); ++ } else { ++ struct page *page; ++ ++ lpi_id_bits = min_t(u32, ++ GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), ++ ITS_MAX_LPI_NRBITS); ++ page = its_allocate_prop_table(GFP_NOWAIT); ++ if (!page) { ++ pr_err("Failed to allocate PROPBASE\n"); ++ return -ENOMEM; ++ } ++ ++ gic_rdists->prop_table_pa = page_to_phys(page); ++ gic_rdists->prop_table_va = page_address(page); ++ WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa, ++ LPI_PROPBASE_SZ)); ++ } ++ ++ pr_info("GICv-2500: using LPI property table @%pa\n", ++ &gic_rdists->prop_table_pa); ++ ++ return its_lpi_init(lpi_id_bits); ++} ++ ++static const char *its_base_type_string[] = { ++ [GITS_BASER_TYPE_DEVICE] = "Devices", ++ [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", ++ [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)", ++ [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", ++ [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", ++ [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", ++ [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", ++}; ++ ++static u64 its_read_baser(struct its_node *its, struct its_baser *baser) ++{ ++ u32 idx = baser - its->tables; ++ ++ return gits_read_baser(its->base + GITS_BASER + (idx << 3)); ++} ++ ++static void its_write_baser(struct its_node *its, struct its_baser *baser, ++ u64 val) ++{ ++ u32 idx = baser - its->tables; ++ ++ gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); ++ baser->val = its_read_baser(its, baser); ++} ++ ++static int its_setup_baser(struct its_node *its, struct its_baser *baser, ++ u64 cache, u64 shr, u32 psz, u32 order, ++ bool indirect) ++{ ++ u64 val = its_read_baser(its, baser); ++ u64 esz = GITS_BASER_ENTRY_SIZE(val); ++ u64 type = GITS_BASER_TYPE(val); ++ u64 baser_phys, tmp; ++ u32 alloc_pages; ++ void *base; ++ ++retry_alloc_baser: ++ alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); ++ if (alloc_pages > GITS_BASER_PAGES_MAX) { ++ pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", ++ &its->phys_base, its_base_type_string[type], ++ alloc_pages, GITS_BASER_PAGES_MAX); ++ alloc_pages = GITS_BASER_PAGES_MAX; ++ order = get_order(GITS_BASER_PAGES_MAX * psz); ++ } ++ ++ base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); ++ if (!base) ++ return -ENOMEM; ++ ++ baser_phys = virt_to_phys(base); ++ ++ /* Check if the physical address of the memory is above 48bits */ ++ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) { ++ ++ /* 52bit PA is supported only when PageSize=64K */ ++ if (psz != SZ_64K) { ++ pr_err("ITS: no 52bit PA support when psz=%d\n", psz); ++ free_pages((unsigned long)base, order); ++ return -ENXIO; ++ } ++ ++ /* Convert 52bit PA to 48bit field */ ++ baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys); ++ } ++ ++retry_baser: ++ val = (baser_phys | ++ (type << GITS_BASER_TYPE_SHIFT) | ++ ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | ++ ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | ++ cache | ++ shr | ++ GITS_BASER_VALID); ++ ++ val |= indirect ? GITS_BASER_INDIRECT : 0x0; ++ ++ switch (psz) { ++ case SZ_4K: ++ val |= GITS_BASER_PAGE_SIZE_4K; ++ break; ++ case SZ_16K: ++ val |= GITS_BASER_PAGE_SIZE_16K; ++ break; ++ case SZ_64K: ++ val |= GITS_BASER_PAGE_SIZE_64K; ++ break; ++ } ++ ++ its_write_baser(its, baser, val); ++ tmp = baser->val; ++ ++ if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { ++ /* ++ * Shareability didn't stick. Just use ++ * whatever the read reported, which is likely ++ * to be the only thing this redistributor ++ * supports. If that's zero, make it ++ * non-cacheable as well. ++ */ ++ shr = tmp & GITS_BASER_SHAREABILITY_MASK; ++ if (!shr) { ++ cache = GITS_BASER_nC; ++ gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); ++ } ++ goto retry_baser; ++ } ++ ++ if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { ++ /* ++ * Page size didn't stick. Let's try a smaller ++ * size and retry. If we reach 4K, then ++ * something is horribly wrong... ++ */ ++ free_pages((unsigned long)base, order); ++ baser->base = NULL; ++ ++ switch (psz) { ++ case SZ_16K: ++ psz = SZ_4K; ++ goto retry_alloc_baser; ++ case SZ_64K: ++ psz = SZ_16K; ++ goto retry_alloc_baser; ++ } ++ } ++ ++ if (val != tmp) { ++ pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", ++ &its->phys_base, its_base_type_string[type], ++ val, tmp); ++ free_pages((unsigned long)base, order); ++ return -ENXIO; ++ } ++ ++ baser->order = order; ++ baser->base = base; ++ baser->psz = psz; ++ tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; ++ ++ pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", ++ &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), ++ its_base_type_string[type], ++ (unsigned long)virt_to_phys(base), ++ indirect ? "indirect" : "flat", (int)esz, ++ psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); ++ ++ return 0; ++} ++ ++static bool its_parse_indirect_baser(struct its_node *its, ++ struct its_baser *baser, ++ u32 psz, u32 *order, u32 ids) ++{ ++ u64 tmp = its_read_baser(its, baser); ++ u64 type = GITS_BASER_TYPE(tmp); ++ u64 esz = GITS_BASER_ENTRY_SIZE(tmp); ++ u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; ++ u32 new_order = *order; ++ bool indirect = false; ++ ++ /* No need to enable Indirection if memory requirement < (psz*2)bytes */ ++ if ((esz << ids) > (psz * 2)) { ++ /* ++ * Find out whether hw supports a single or two-level table by ++ * table by reading bit at offset '62' after writing '1' to it. ++ */ ++ its_write_baser(its, baser, val | GITS_BASER_INDIRECT); ++ indirect = !!(baser->val & GITS_BASER_INDIRECT); ++ ++ if (indirect) { ++ /* ++ * The size of the lvl2 table is equal to ITS page size ++ * which is 'psz'. For computing lvl1 table size, ++ * subtract ID bits that sparse lvl2 table from 'ids' ++ * which is reported by ITS hardware times lvl1 table ++ * entry size. ++ */ ++ ids -= ilog2(psz / (int)esz); ++ esz = GITS_LVL1_ENTRY_SIZE; ++ } ++ } ++ ++ /* ++ * Allocate as many entries as required to fit the ++ * range of device IDs that the ITS can grok... The ID ++ * space being incredibly sparse, this results in a ++ * massive waste of memory if two-level device table ++ * feature is not supported by hardware. ++ */ ++ new_order = max_t(u32, get_order(esz << ids), new_order); ++ if (new_order >= MAX_ORDER) { ++ new_order = MAX_ORDER - 1; ++ ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); ++ pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n", ++ &its->phys_base, its_base_type_string[type], ++ its->device_ids, ids); ++ } ++ ++ *order = new_order; ++ ++ return indirect; ++} ++ ++static void its_free_tables(struct its_node *its) ++{ ++ int i; ++ ++ for (i = 0; i < GITS_BASER_NR_REGS; i++) { ++ if (its->tables[i].base) { ++ free_pages((unsigned long)its->tables[i].base, ++ its->tables[i].order); ++ its->tables[i].base = NULL; ++ } ++ } ++} ++ ++static int its_alloc_tables(struct its_node *its) ++{ ++ u64 shr = GITS_BASER_InnerShareable; ++ u64 cache = GITS_BASER_RaWaWb; ++ u32 psz = SZ_64K; ++ int err, i; ++ ++ if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) ++ /* erratum 24313: ignore memory access type */ ++ cache = GITS_BASER_nCnB; ++ ++ for (i = 0; i < GITS_BASER_NR_REGS; i++) { ++ struct its_baser *baser = its->tables + i; ++ u64 val = its_read_baser(its, baser); ++ u64 type = GITS_BASER_TYPE(val); ++ u32 order = get_order(psz); ++ bool indirect = false; ++ ++ switch (type) { ++ case GITS_BASER_TYPE_NONE: ++ continue; ++ ++ case GITS_BASER_TYPE_DEVICE: ++ indirect = its_parse_indirect_baser(its, baser, ++ psz, &order, ++ its->device_ids); ++ break; ++ ++ case GITS_BASER_TYPE_VCPU: ++ indirect = its_parse_indirect_baser(its, baser, ++ psz, &order, ++ ITS_MAX_VPEID_BITS); ++ break; ++ } ++ ++ err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); ++ if (err < 0) { ++ its_free_tables(its); ++ return err; ++ } ++ ++ /* Update settings which will be used for next BASERn */ ++ psz = baser->psz; ++ cache = baser->val & GITS_BASER_CACHEABILITY_MASK; ++ shr = baser->val & GITS_BASER_SHAREABILITY_MASK; ++ } ++ ++ return 0; ++} ++ ++static int its_alloc_collections(struct its_node *its) ++{ ++ int i; ++ ++ its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), ++ GFP_KERNEL); ++ if (!its->collections) ++ return -ENOMEM; ++ ++ for (i = 0; i < nr_cpu_ids; i++) ++ its->collections[i].target_address = ~0ULL; ++ ++ return 0; ++} ++ ++static struct page *its_allocate_pending_table(gfp_t gfp_flags) ++{ ++ struct page *pend_page; ++ ++ pend_page = alloc_pages(gfp_flags | __GFP_ZERO, ++ get_order(LPI_PENDBASE_SZ)); ++ if (!pend_page) ++ return NULL; ++ ++ /* Make sure the GIC will observe the zero-ed page */ ++ gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); ++ ++ return pend_page; ++} ++ ++static void its_free_pending_table(struct page *pt) ++{ ++ free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); ++} ++ ++/* ++ * Booting with kdump and LPIs enabled is generally fine. Any other ++ * case is wrong in the absence of firmware/EFI support. ++ */ ++static bool enabled_lpis_allowed(void) ++{ ++ phys_addr_t addr; ++ u64 val; ++ ++ /* Check whether the property table is in a reserved region */ ++ val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); ++ addr = val & GENMASK_ULL(51, 12); ++ ++ return gic_check_reserved_range(addr, LPI_PROPBASE_SZ); ++} ++ ++static int __init allocate_lpi_tables(void) ++{ ++ u64 val; ++ int err, cpu; ++ ++ /* ++ * If LPIs are enabled while we run this from the boot CPU, ++ * flag the RD tables as pre-allocated if the stars do align. ++ */ ++ val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR); ++ if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) { ++ gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | ++ RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING); ++ pr_info("GICv3: Using preallocated redistributor tables\n"); ++ } ++ ++ err = its_setup_lpi_prop_table(); ++ if (err) ++ return err; ++ ++ /* ++ * We allocate all the pending tables anyway, as we may have a ++ * mix of RDs that have had LPIs enabled, and some that ++ * don't. We'll free the unused ones as each CPU comes online. ++ */ ++ for_each_possible_cpu(cpu) { ++ struct page *pend_page; ++ ++ pend_page = its_allocate_pending_table(GFP_NOWAIT); ++ if (!pend_page) { ++ pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); ++ return -ENOMEM; ++ } ++ ++ gic_data_rdist_cpu(cpu)->pend_page = pend_page; ++ } ++ ++ return 0; ++} ++ ++static void its_cpu_init_lpis(void) ++{ ++ void __iomem *rbase = gic_data_rdist_rd_base(); ++ struct page *pend_page; ++ phys_addr_t paddr; ++ u64 val, tmp; ++ ++ if (gic_data_rdist()->lpi_enabled) ++ return; ++ ++ val = readl_relaxed(rbase + GICR_CTLR); ++ if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && ++ (val & GICR_CTLR_ENABLE_LPIS)) { ++ /* ++ * Check that we get the same property table on all ++ * RDs. If we don't, this is hopeless. ++ */ ++ paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); ++ paddr &= GENMASK_ULL(51, 12); ++ if (WARN_ON(gic_rdists->prop_table_pa != paddr)) ++ add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); ++ ++ paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); ++ paddr &= GENMASK_ULL(51, 16); ++ ++ WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); ++ its_free_pending_table(gic_data_rdist()->pend_page); ++ gic_data_rdist()->pend_page = NULL; ++ ++ goto out; ++ } ++ ++ pend_page = gic_data_rdist()->pend_page; ++ paddr = page_to_phys(pend_page); ++ WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); ++ ++ /* set PROPBASE */ ++ val = (gic_rdists->prop_table_pa | ++ GICR_PROPBASER_InnerShareable | ++ GICR_PROPBASER_RaWaWb | ++ ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); ++ ++ gicr_write_propbaser(val, rbase + GICR_PROPBASER); ++ tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); ++ ++ if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { ++ if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { ++ /* ++ * The HW reports non-shareable, we must ++ * remove the cacheability attributes as ++ * well. ++ */ ++ val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | ++ GICR_PROPBASER_CACHEABILITY_MASK); ++ val |= GICR_PROPBASER_nC; ++ gicr_write_propbaser(val, rbase + GICR_PROPBASER); ++ } ++ pr_info_once("GIC: using cache flushing for LPI property table\n"); ++ gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; ++ } ++ ++ /* set PENDBASE */ ++ val = (page_to_phys(pend_page) | ++ GICR_PENDBASER_InnerShareable | ++ GICR_PENDBASER_RaWaWb); ++ ++ gicr_write_pendbaser(val, rbase + GICR_PENDBASER); ++ tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); ++ ++ if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { ++ /* ++ * The HW reports non-shareable, we must remove the ++ * cacheability attributes as well. ++ */ ++ val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | ++ GICR_PENDBASER_CACHEABILITY_MASK); ++ val |= GICR_PENDBASER_nC; ++ gicr_write_pendbaser(val, rbase + GICR_PENDBASER); ++ } ++ ++ /* Enable LPIs */ ++ val = readl_relaxed(rbase + GICR_CTLR); ++ val |= GICR_CTLR_ENABLE_LPIS; ++ writel_relaxed(val, rbase + GICR_CTLR); ++ ++ /* Make sure the GIC has seen the above */ ++ dsb(sy); ++out: ++ gic_data_rdist()->lpi_enabled = true; ++ pr_info("GICv-2500: CPU%d: using %s LPI pending table @%pa\n", ++ smp_processor_id(), ++ gic_data_rdist()->pend_page ? "allocated" : "reserved", ++ &paddr); ++} ++ ++static void its_cpu_init_collection(struct its_node *its) ++{ ++ int cpu = smp_processor_id(); ++ unsigned long mpid, skt_id; ++ phys_addr_t its_phys_base; ++ u64 target; ++ ++ /* avoid cross node collections and its mapping */ ++ if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { ++ struct device_node *cpu_node; ++ ++ cpu_node = of_get_cpu_node(cpu, NULL); ++ if (its->numa_node != NUMA_NO_NODE && ++ its->numa_node != of_node_to_nid(cpu_node)) ++ return; ++ } ++ ++ mpid = cpu_logical_map(cpu); ++ its_phys_base = its->phys_base; ++ skt_id = (its_phys_base >> 41) & 0x7; ++ ++ /* ++ * We now have to bind each collection to its target ++ * redistributor. ++ */ ++ if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { ++ /* ++ * This ITS wants the physical address of the ++ * redistributor. ++ */ ++ target = gic_data_rdist()->phys_base; ++ } else { ++ /* This ITS wants a linear CPU number. */ ++ target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); ++ target = GICR_TYPER_CPU_NUMBER(target) << 16; ++ } ++ ++ /* Perform collection mapping */ ++ its->collections[cpu].target_address = target; ++ if (is_kdump_kernel()) ++ its->collections[cpu].col_id = cpu % 65; ++ else ++ its->collections[cpu].col_id = cpu % 64; ++ ++ its_send_mapc(its, &its->collections[cpu], 1); ++ its_send_invall(its, &its->collections[cpu]); ++} ++ ++static void its_cpu_init_collections(void) ++{ ++ struct its_node *its; ++ ++ raw_spin_lock(&its_lock); ++ ++ list_for_each_entry(its, &its_nodes, entry) ++ its_cpu_init_collection(its); ++ ++ raw_spin_unlock(&its_lock); ++} ++ ++static struct its_device *its_find_device(struct its_node *its, u32 dev_id) ++{ ++ struct its_device *its_dev = NULL, *tmp; ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&its->lock, flags); ++ ++ list_for_each_entry(tmp, &its->its_device_list, entry) { ++ if (tmp->device_id == dev_id) { ++ its_dev = tmp; ++ break; ++ } ++ } ++ ++ raw_spin_unlock_irqrestore(&its->lock, flags); ++ ++ return its_dev; ++} ++ ++static struct its_baser *its_get_baser(struct its_node *its, u32 type) ++{ ++ int i; ++ ++ for (i = 0; i < GITS_BASER_NR_REGS; i++) { ++ if (GITS_BASER_TYPE(its->tables[i].val) == type) ++ return &its->tables[i]; ++ } ++ ++ return NULL; ++} ++ ++static bool its_alloc_table_entry(struct its_baser *baser, u32 id) ++{ ++ struct page *page; ++ u32 esz, idx; ++ __le64 *table; ++ ++ /* Don't allow device id that exceeds single, flat table limit */ ++ esz = GITS_BASER_ENTRY_SIZE(baser->val); ++ if (!(baser->val & GITS_BASER_INDIRECT)) ++ return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); ++ ++ /* Compute 1st level table index & check if that exceeds table limit */ ++ idx = id >> ilog2(baser->psz / esz); ++ if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) ++ return false; ++ ++ table = baser->base; ++ ++ /* Allocate memory for 2nd level table */ ++ if (!table[idx]) { ++ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz)); ++ if (!page) ++ return false; ++ ++ /* Flush Lvl2 table to PoC if hw doesn't support coherency */ ++ if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) ++ gic_flush_dcache_to_poc(page_address(page), baser->psz); ++ ++ table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); ++ ++ /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ ++ if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) ++ gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); ++ ++ /* Ensure updated table contents are visible to ITS hardware */ ++ dsb(sy); ++ } ++ ++ return true; ++} ++ ++static bool its_alloc_device_table(struct its_node *its, u32 dev_id) ++{ ++ struct its_baser *baser; ++ ++ baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); ++ ++ /* Don't allow device id that exceeds ITS hardware limit */ ++ if (!baser) ++ return (ilog2(dev_id) < its->device_ids); ++ ++ return its_alloc_table_entry(baser, dev_id); ++} ++ ++static bool its_alloc_vpe_table(u32 vpe_id) ++{ ++ struct its_node *its; ++ ++ /* ++ * Make sure the L2 tables are allocated on *all* v4 ITSs. We ++ * could try and only do it on ITSs corresponding to devices ++ * that have interrupts targeted at this VPE, but the ++ * complexity becomes crazy (and you have tons of memory ++ * anyway, right?). ++ */ ++ list_for_each_entry(its, &its_nodes, entry) { ++ struct its_baser *baser; ++ ++ if (!its->is_v4) ++ continue; ++ ++ baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); ++ if (!baser) ++ return false; ++ ++ if (!its_alloc_table_entry(baser, vpe_id)) ++ return false; ++ } ++ ++ return true; ++} ++ ++static struct its_device *its_create_device(struct its_node *its, u32 dev_id, ++ int nvecs, bool alloc_lpis) ++{ ++ struct its_device *dev; ++ unsigned long *lpi_map = NULL; ++ unsigned long flags; ++ u16 *col_map = NULL; ++ void *itt; ++ int lpi_base; ++ int nr_lpis; ++ int nr_ites; ++ int sz; ++ ++ if (!its_alloc_device_table(its, dev_id)) ++ return NULL; ++ ++ if (WARN_ON(!is_power_of_2(nvecs))) ++ nvecs = roundup_pow_of_two(nvecs); ++ ++ dev = kzalloc(sizeof(*dev), GFP_KERNEL); ++ /* ++ * Even if the device wants a single LPI, the ITT must be ++ * sized as a power of two (and you need at least one bit...). ++ */ ++ nr_ites = max(2, nvecs); ++ sz = nr_ites * its->ite_size; ++ sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; ++ itt = kzalloc(sz, GFP_KERNEL); ++ if (alloc_lpis) { ++ lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); ++ if (lpi_map) ++ col_map = kcalloc(nr_lpis, sizeof(*col_map), ++ GFP_KERNEL); ++ } else { ++ col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL); ++ nr_lpis = 0; ++ lpi_base = 0; ++ } ++ ++ if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { ++ kfree(dev); ++ kfree(itt); ++ kfree(lpi_map); ++ kfree(col_map); ++ return NULL; ++ } ++ ++ gic_flush_dcache_to_poc(itt, sz); ++ ++ dev->its = its; ++ dev->itt = itt; ++ dev->nr_ites = nr_ites; ++ dev->event_map.lpi_map = lpi_map; ++ dev->event_map.col_map = col_map; ++ dev->event_map.lpi_base = lpi_base; ++ dev->event_map.nr_lpis = nr_lpis; ++ mutex_init(&dev->event_map.vlpi_lock); ++ dev->device_id = dev_id; ++ INIT_LIST_HEAD(&dev->entry); ++ ++ raw_spin_lock_irqsave(&its->lock, flags); ++ list_add(&dev->entry, &its->its_device_list); ++ raw_spin_unlock_irqrestore(&its->lock, flags); ++ ++ /* Map device to its ITT */ ++ its_send_mapd(dev, 1); ++ ++ return dev; ++} ++ ++static void its_free_device(struct its_device *its_dev) ++{ ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&its_dev->its->lock, flags); ++ list_del(&its_dev->entry); ++ raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); ++ kfree(its_dev->itt); ++ kfree(its_dev); ++} ++ ++static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) ++{ ++ int idx; ++ ++ idx = find_first_zero_bit(dev->event_map.lpi_map, ++ dev->event_map.nr_lpis); ++ if (idx == dev->event_map.nr_lpis) ++ return -ENOSPC; ++ ++ *hwirq = dev->event_map.lpi_base + idx; ++ set_bit(idx, dev->event_map.lpi_map); ++ ++ return 0; ++} ++ ++static int its_msi_prepare(struct irq_domain *domain, struct device *dev, ++ int nvec, msi_alloc_info_t *info) ++{ ++ struct its_node *its; ++ struct its_device *its_dev; ++ struct msi_domain_info *msi_info; ++ u32 dev_id; ++ ++ /* ++ * We ignore "dev" entierely, and rely on the dev_id that has ++ * been passed via the scratchpad. This limits this domain's ++ * usefulness to upper layers that definitely know that they ++ * are built on top of the ITS. ++ */ ++ dev_id = info->scratchpad[0].ul; ++ ++ msi_info = msi_get_domain_info(domain); ++ its = msi_info->data; ++ ++ if (!gic_rdists->has_direct_lpi && ++ vpe_proxy.dev && ++ vpe_proxy.dev->its == its && ++ dev_id == vpe_proxy.dev->device_id) { ++ /* Bad luck. Get yourself a better implementation */ ++ WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", ++ dev_id); ++ return -EINVAL; ++ } ++ ++ its_dev = its_find_device(its, dev_id); ++ if (its_dev) { ++ /* ++ * We already have seen this ID, probably through ++ * another alias (PCI bridge of some sort). No need to ++ * create the device. ++ */ ++ pr_debug("Reusing ITT for devID %x\n", dev_id); ++ goto out; ++ } ++ ++ its_dev = its_create_device(its, dev_id, nvec, true); ++ if (!its_dev) ++ return -ENOMEM; ++ ++ pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); ++out: ++ info->scratchpad[0].ptr = its_dev; ++ return 0; ++} ++ ++static struct msi_domain_ops its_msi_domain_ops = { ++ .msi_prepare = its_msi_prepare, ++}; ++ ++static int its_irq_gic_domain_alloc(struct irq_domain *domain, ++ unsigned int virq, ++ irq_hw_number_t hwirq) ++{ ++ struct irq_fwspec fwspec; ++ ++ if (irq_domain_get_of_node(domain->parent)) { ++ fwspec.fwnode = domain->parent->fwnode; ++ fwspec.param_count = 3; ++ fwspec.param[0] = GIC_IRQ_TYPE_LPI; ++ fwspec.param[1] = hwirq; ++ fwspec.param[2] = IRQ_TYPE_EDGE_RISING; ++ } else if (is_fwnode_irqchip(domain->parent->fwnode)) { ++ fwspec.fwnode = domain->parent->fwnode; ++ fwspec.param_count = 2; ++ fwspec.param[0] = hwirq; ++ fwspec.param[1] = IRQ_TYPE_EDGE_RISING; ++ } else { ++ return -EINVAL; ++ } ++ ++ return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); ++} ++ ++static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs, void *args) ++{ ++ msi_alloc_info_t *info = args; ++ struct its_device *its_dev = info->scratchpad[0].ptr; ++ irq_hw_number_t hwirq; ++ int err; ++ int i; ++ ++ for (i = 0; i < nr_irqs; i++) { ++ err = its_alloc_device_irq(its_dev, &hwirq); ++ if (err) ++ return err; ++ ++ err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); ++ if (err) ++ return err; ++ ++ irq_domain_set_hwirq_and_chip(domain, virq + i, ++ hwirq, &its_irq_chip, its_dev); ++ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); ++ pr_debug("ID:%d pID:%d vID:%d\n", ++ (int)(hwirq - its_dev->event_map.lpi_base), ++ (int) hwirq, virq + i); ++ } ++ ++ return 0; ++} ++ ++static int its_cpumask_first(struct its_device *its_dev, ++ const struct cpumask *cpu_mask) ++{ ++ unsigned int skt, skt_id, i; ++ phys_addr_t its_phys_base; ++ unsigned int cpu, cpus = 0; ++ unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; ++ ++ its_phys_base = its_dev->its->phys_base; ++ skt_id = (its_phys_base >> 41) & 0x7; ++ ++ for (i = 0; i < nr_cpu_ids; i++) { ++ skt = (cpu_logical_map(i) >> 16) & 0xff; ++ if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { ++ if ((is_kdump_kernel()) && (skt_id == skt)) { ++ return i; ++ } ++ ++ skt_cpu_cnt[skt]++; ++ } ++ else if (0xff != skt ) ++ pr_err("socket address: %d is out of range.", skt); ++ } ++ ++ if (0 != skt_id) { ++ for (i = 0; i < skt_id; i++) ++ cpus += skt_cpu_cnt[i]; ++ } ++ ++ cpu = cpumask_first(cpu_mask); ++ if ((cpu > cpus) && (cpu < (cpus + skt_cpu_cnt[skt_id]))) ++ cpus = cpu; ++ ++ return cpus; ++} ++ ++static int its_irq_domain_activate(struct irq_domain *domain, ++ struct irq_data *d, bool reserve) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ u32 event = its_get_event_id(d); ++ const struct cpumask *cpu_mask = cpu_online_mask; ++ int cpu; ++ ++ /* get the cpu_mask of local node */ ++ if (its_dev->its->numa_node >= 0) ++ cpu_mask = cpumask_of_node(its_dev->its->numa_node); ++ ++ /* Bind the LPI to the first possible CPU */ ++ cpu = its_cpumask_first(its_dev, cpu_mask); ++ printk("its_irq_domain_activate: MAPTI irq %d hwirq %ld on cpu %d\n", ++ d->irq, d->hwirq, cpu); ++ ++ its_inc_lpi_count(d, cpu); ++ its_dev->event_map.col_map[event] = cpu; ++ irq_data_update_effective_affinity(d, cpumask_of(cpu)); ++ ++ /* Map the GIC IRQ and event to the device */ ++ its_send_mapti(its_dev, d->hwirq, event); ++ return 0; ++} ++ ++static void its_irq_domain_deactivate(struct irq_domain *domain, ++ struct irq_data *d) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ u32 event = its_get_event_id(d); ++ ++ its_dec_lpi_count(d, its_dev->event_map.col_map[event]); ++ /* Stop the delivery of interrupts */ ++ its_send_discard(its_dev, event); ++} ++ ++static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs) ++{ ++ struct irq_data *d = irq_domain_get_irq_data(domain, virq); ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ int i; ++ ++ for (i = 0; i < nr_irqs; i++) { ++ struct irq_data *data = irq_domain_get_irq_data(domain, ++ virq + i); ++ u32 event = its_get_event_id(data); ++ ++ /* Mark interrupt index as unused */ ++ clear_bit(event, its_dev->event_map.lpi_map); ++ ++ /* Nuke the entry in the domain */ ++ irq_domain_reset_irq_data(data); ++ } ++ ++ /* If all interrupts have been freed, start mopping the floor */ ++ if (bitmap_empty(its_dev->event_map.lpi_map, ++ its_dev->event_map.nr_lpis)) { ++ its_lpi_free(its_dev->event_map.lpi_map, ++ its_dev->event_map.lpi_base, ++ its_dev->event_map.nr_lpis); ++ kfree(its_dev->event_map.col_map); ++ ++ /* Unmap device/itt */ ++ its_send_mapd(its_dev, 0); ++ its_free_device(its_dev); ++ } ++ ++ irq_domain_free_irqs_parent(domain, virq, nr_irqs); ++} ++ ++static const struct irq_domain_ops its_domain_ops = { ++ .alloc = its_irq_domain_alloc, ++ .free = its_irq_domain_free, ++ .activate = its_irq_domain_activate, ++ .deactivate = its_irq_domain_deactivate, ++}; ++ ++/* ++ * This is insane. ++ * ++ * If a GICv4 doesn't implement Direct LPIs (which is extremely ++ * likely), the only way to perform an invalidate is to use a fake ++ * device to issue an INV command, implying that the LPI has first ++ * been mapped to some event on that device. Since this is not exactly ++ * cheap, we try to keep that mapping around as long as possible, and ++ * only issue an UNMAP if we're short on available slots. ++ * ++ * Broken by design(tm). ++ */ ++static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) ++{ ++ /* Already unmapped? */ ++ if (vpe->vpe_proxy_event == -1) ++ return; ++ ++ its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); ++ vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; ++ ++ /* ++ * We don't track empty slots at all, so let's move the ++ * next_victim pointer if we can quickly reuse that slot ++ * instead of nuking an existing entry. Not clear that this is ++ * always a win though, and this might just generate a ripple ++ * effect... Let's just hope VPEs don't migrate too often. ++ */ ++ if (vpe_proxy.vpes[vpe_proxy.next_victim]) ++ vpe_proxy.next_victim = vpe->vpe_proxy_event; ++ ++ vpe->vpe_proxy_event = -1; ++} ++ ++static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) ++{ ++ if (!gic_rdists->has_direct_lpi) { ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&vpe_proxy.lock, flags); ++ its_vpe_db_proxy_unmap_locked(vpe); ++ raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); ++ } ++} ++ ++static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) ++{ ++ /* Already mapped? */ ++ if (vpe->vpe_proxy_event != -1) ++ return; ++ ++ /* This slot was already allocated. Kick the other VPE out. */ ++ if (vpe_proxy.vpes[vpe_proxy.next_victim]) ++ its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); ++ ++ /* Map the new VPE instead */ ++ vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; ++ vpe->vpe_proxy_event = vpe_proxy.next_victim; ++ vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; ++ ++ vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; ++ its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); ++} ++ ++static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) ++{ ++ unsigned long flags; ++ struct its_collection *target_col; ++ ++ if (gic_rdists->has_direct_lpi) { ++ void __iomem *rdbase; ++ ++ rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; ++ gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); ++ while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) ++ cpu_relax(); ++ ++ return; ++ } ++ ++ raw_spin_lock_irqsave(&vpe_proxy.lock, flags); ++ ++ its_vpe_db_proxy_map_locked(vpe); ++ ++ target_col = &vpe_proxy.dev->its->collections[to]; ++ its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); ++ vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; ++ ++ raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); ++} ++ ++static int its_vpe_set_affinity(struct irq_data *d, ++ const struct cpumask *mask_val, ++ bool force) ++{ ++ struct its_vpe *vpe = irq_data_get_irq_chip_data(d); ++ int cpu = cpumask_first(mask_val); ++ ++ /* ++ * Changing affinity is mega expensive, so let's be as lazy as ++ * we can and only do it if we really have to. Also, if mapped ++ * into the proxy device, we need to move the doorbell ++ * interrupt to its new location. ++ */ ++ if (vpe->col_idx != cpu) { ++ int from = vpe->col_idx; ++ ++ vpe->col_idx = cpu; ++ its_send_vmovp(vpe); ++ its_vpe_db_proxy_move(vpe, from, cpu); ++ } ++ ++ irq_data_update_effective_affinity(d, cpumask_of(cpu)); ++ ++ return IRQ_SET_MASK_OK_DONE; ++} ++ ++static void its_vpe_schedule(struct its_vpe *vpe) ++{ ++ void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); ++ u64 val; ++ ++ /* Schedule the VPE */ ++ val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & ++ GENMASK_ULL(51, 12); ++ val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; ++ val |= GICR_VPROPBASER_RaWb; ++ val |= GICR_VPROPBASER_InnerShareable; ++ gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); ++ ++ val = virt_to_phys(page_address(vpe->vpt_page)) & ++ GENMASK_ULL(51, 16); ++ val |= GICR_VPENDBASER_RaWaWb; ++ val |= GICR_VPENDBASER_NonShareable; ++ /* ++ * There is no good way of finding out if the pending table is ++ * empty as we can race against the doorbell interrupt very ++ * easily. So in the end, vpe->pending_last is only an ++ * indication that the vcpu has something pending, not one ++ * that the pending table is empty. A good implementation ++ * would be able to read its coarse map pretty quickly anyway, ++ * making this a tolerable issue. ++ */ ++ val |= GICR_VPENDBASER_PendingLast; ++ val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; ++ val |= GICR_VPENDBASER_Valid; ++ gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); ++} ++ ++static void its_vpe_deschedule(struct its_vpe *vpe) ++{ ++ void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); ++ u32 count = 1000000; /* 1s! */ ++ bool clean; ++ u64 val; ++ ++ /* We're being scheduled out */ ++ val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); ++ val &= ~GICR_VPENDBASER_Valid; ++ gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); ++ ++ do { ++ val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); ++ clean = !(val & GICR_VPENDBASER_Dirty); ++ if (!clean) { ++ count--; ++ cpu_relax(); ++ udelay(1); ++ } ++ } while (!clean && count); ++ ++ if (unlikely(!clean && !count)) { ++ pr_err_ratelimited("ITS virtual pending table not cleaning\n"); ++ vpe->idai = false; ++ vpe->pending_last = true; ++ } else { ++ vpe->idai = !!(val & GICR_VPENDBASER_IDAI); ++ vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); ++ } ++} ++ ++static void its_vpe_invall(struct its_vpe *vpe) ++{ ++ struct its_node *its; ++ ++ list_for_each_entry(its, &its_nodes, entry) { ++ if (!its->is_v4) ++ continue; ++ ++ if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) ++ continue; ++ ++ /* ++ * Sending a VINVALL to a single ITS is enough, as all ++ * we need is to reach the redistributors. ++ */ ++ its_send_vinvall(its, vpe); ++ return; ++ } ++} ++ ++static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) ++{ ++ struct its_vpe *vpe = irq_data_get_irq_chip_data(d); ++ struct its_cmd_info *info = vcpu_info; ++ ++ switch (info->cmd_type) { ++ case SCHEDULE_VPE: ++ its_vpe_schedule(vpe); ++ return 0; ++ ++ case DESCHEDULE_VPE: ++ its_vpe_deschedule(vpe); ++ return 0; ++ ++ case INVALL_VPE: ++ its_vpe_invall(vpe); ++ return 0; ++ ++ default: ++ return -EINVAL; ++ } ++} ++ ++static void its_vpe_send_cmd(struct its_vpe *vpe, ++ void (*cmd)(struct its_device *, u32)) ++{ ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&vpe_proxy.lock, flags); ++ ++ its_vpe_db_proxy_map_locked(vpe); ++ cmd(vpe_proxy.dev, vpe->vpe_proxy_event); ++ ++ raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); ++} ++ ++static void its_vpe_send_inv(struct irq_data *d) ++{ ++ struct its_vpe *vpe = irq_data_get_irq_chip_data(d); ++ ++ if (gic_rdists->has_direct_lpi) { ++ void __iomem *rdbase; ++ ++ rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; ++ gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR); ++ while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) ++ cpu_relax(); ++ } else { ++ its_vpe_send_cmd(vpe, its_send_inv); ++ } ++} ++ ++static void its_vpe_mask_irq(struct irq_data *d) ++{ ++ /* ++ * We need to unmask the LPI, which is described by the parent ++ * irq_data. Instead of calling into the parent (which won't ++ * exactly do the right thing, let's simply use the ++ * parent_data pointer. Yes, I'm naughty. ++ */ ++ lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); ++ its_vpe_send_inv(d); ++} ++ ++static void its_vpe_unmask_irq(struct irq_data *d) ++{ ++ /* Same hack as above... */ ++ lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); ++ its_vpe_send_inv(d); ++} ++ ++static int its_vpe_set_irqchip_state(struct irq_data *d, ++ enum irqchip_irq_state which, ++ bool state) ++{ ++ struct its_vpe *vpe = irq_data_get_irq_chip_data(d); ++ ++ if (which != IRQCHIP_STATE_PENDING) ++ return -EINVAL; ++ ++ if (gic_rdists->has_direct_lpi) { ++ void __iomem *rdbase; ++ ++ rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; ++ if (state) { ++ gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); ++ } else { ++ gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); ++ while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) ++ cpu_relax(); ++ } ++ } else { ++ if (state) ++ its_vpe_send_cmd(vpe, its_send_int); ++ else ++ its_vpe_send_cmd(vpe, its_send_clear); ++ } ++ ++ return 0; ++} ++ ++static struct irq_chip its_vpe_irq_chip = { ++ .name = "GICv4-vpe", ++ .irq_mask = its_vpe_mask_irq, ++ .irq_unmask = its_vpe_unmask_irq, ++ .irq_eoi = irq_chip_eoi_parent, ++ .irq_set_affinity = its_vpe_set_affinity, ++ .irq_set_irqchip_state = its_vpe_set_irqchip_state, ++ .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, ++}; ++ ++static int its_vpe_id_alloc(void) ++{ ++ return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); ++} ++ ++static void its_vpe_id_free(u16 id) ++{ ++ ida_simple_remove(&its_vpeid_ida, id); ++} ++ ++static int its_vpe_init(struct its_vpe *vpe) ++{ ++ struct page *vpt_page; ++ int vpe_id; ++ ++ /* Allocate vpe_id */ ++ vpe_id = its_vpe_id_alloc(); ++ if (vpe_id < 0) ++ return vpe_id; ++ ++ /* Allocate VPT */ ++ vpt_page = its_allocate_pending_table(GFP_KERNEL); ++ if (!vpt_page) { ++ its_vpe_id_free(vpe_id); ++ return -ENOMEM; ++ } ++ ++ if (!its_alloc_vpe_table(vpe_id)) { ++ its_vpe_id_free(vpe_id); ++ its_free_pending_table(vpe->vpt_page); ++ return -ENOMEM; ++ } ++ ++ vpe->vpe_id = vpe_id; ++ vpe->vpt_page = vpt_page; ++ vpe->vpe_proxy_event = -1; ++ ++ return 0; ++} ++ ++static void its_vpe_teardown(struct its_vpe *vpe) ++{ ++ its_vpe_db_proxy_unmap(vpe); ++ its_vpe_id_free(vpe->vpe_id); ++ its_free_pending_table(vpe->vpt_page); ++} ++ ++static void its_vpe_irq_domain_free(struct irq_domain *domain, ++ unsigned int virq, ++ unsigned int nr_irqs) ++{ ++ struct its_vm *vm = domain->host_data; ++ int i; ++ ++ irq_domain_free_irqs_parent(domain, virq, nr_irqs); ++ ++ for (i = 0; i < nr_irqs; i++) { ++ struct irq_data *data = irq_domain_get_irq_data(domain, ++ virq + i); ++ struct its_vpe *vpe = irq_data_get_irq_chip_data(data); ++ ++ BUG_ON(vm != vpe->its_vm); ++ ++ clear_bit(data->hwirq, vm->db_bitmap); ++ its_vpe_teardown(vpe); ++ irq_domain_reset_irq_data(data); ++ } ++ ++ if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { ++ its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); ++ its_free_prop_table(vm->vprop_page); ++ } ++} ++ ++static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs, void *args) ++{ ++ struct its_vm *vm = args; ++ unsigned long *bitmap; ++ struct page *vprop_page; ++ int base, nr_ids, i, err = 0; ++ ++ BUG_ON(!vm); ++ ++ bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); ++ if (!bitmap) ++ return -ENOMEM; ++ ++ if (nr_ids < nr_irqs) { ++ its_lpi_free(bitmap, base, nr_ids); ++ return -ENOMEM; ++ } ++ ++ vprop_page = its_allocate_prop_table(GFP_KERNEL); ++ if (!vprop_page) { ++ its_lpi_free(bitmap, base, nr_ids); ++ return -ENOMEM; ++ } ++ ++ vm->db_bitmap = bitmap; ++ vm->db_lpi_base = base; ++ vm->nr_db_lpis = nr_ids; ++ vm->vprop_page = vprop_page; ++ ++ for (i = 0; i < nr_irqs; i++) { ++ vm->vpes[i]->vpe_db_lpi = base + i; ++ err = its_vpe_init(vm->vpes[i]); ++ if (err) ++ break; ++ err = its_irq_gic_domain_alloc(domain, virq + i, ++ vm->vpes[i]->vpe_db_lpi); ++ if (err) ++ break; ++ irq_domain_set_hwirq_and_chip(domain, virq + i, i, ++ &its_vpe_irq_chip, vm->vpes[i]); ++ set_bit(i, bitmap); ++ } ++ ++ if (err) { ++ if (i > 0) ++ its_vpe_irq_domain_free(domain, virq, i - 1); ++ ++ its_lpi_free(bitmap, base, nr_ids); ++ its_free_prop_table(vprop_page); ++ } ++ ++ return err; ++} ++ ++static int its_vpe_irq_domain_activate(struct irq_domain *domain, ++ struct irq_data *d, bool reserve) ++{ ++ struct its_vpe *vpe = irq_data_get_irq_chip_data(d); ++ struct its_node *its; ++ ++ /* If we use the list map, we issue VMAPP on demand... */ ++ if (its_list_map) ++ return 0; ++ ++ /* Map the VPE to the first possible CPU */ ++ vpe->col_idx = cpumask_first(cpu_online_mask); ++ ++ list_for_each_entry(its, &its_nodes, entry) { ++ if (!its->is_v4) ++ continue; ++ ++ its_send_vmapp(its, vpe, true); ++ its_send_vinvall(its, vpe); ++ } ++ ++ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); ++ ++ return 0; ++} ++ ++static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, ++ struct irq_data *d) ++{ ++ struct its_vpe *vpe = irq_data_get_irq_chip_data(d); ++ struct its_node *its; ++ ++ /* ++ * If we use the list map, we unmap the VPE once no VLPIs are ++ * associated with the VM. ++ */ ++ if (its_list_map) ++ return; ++ ++ list_for_each_entry(its, &its_nodes, entry) { ++ if (!its->is_v4) ++ continue; ++ ++ its_send_vmapp(its, vpe, false); ++ } ++} ++ ++static const struct irq_domain_ops its_vpe_domain_ops = { ++ .alloc = its_vpe_irq_domain_alloc, ++ .free = its_vpe_irq_domain_free, ++ .activate = its_vpe_irq_domain_activate, ++ .deactivate = its_vpe_irq_domain_deactivate, ++}; ++ ++static int its_force_quiescent(void __iomem *base) ++{ ++ u32 count = 1000000; /* 1s */ ++ u32 val; ++ ++ val = readl_relaxed(base + GITS_CTLR); ++ /* ++ * GIC architecture specification requires the ITS to be both ++ * disabled and quiescent for writes to GITS_BASER or ++ * GITS_CBASER to not have UNPREDICTABLE results. ++ */ ++ if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) ++ return 0; ++ ++ /* Disable the generation of all interrupts to this ITS */ ++ val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); ++ writel_relaxed(val, base + GITS_CTLR); ++ ++ /* Poll GITS_CTLR and wait until ITS becomes quiescent */ ++ while (1) { ++ val = readl_relaxed(base + GITS_CTLR); ++ if (val & GITS_CTLR_QUIESCENT) ++ return 0; ++ ++ count--; ++ if (!count) ++ return -EBUSY; ++ ++ cpu_relax(); ++ udelay(1); ++ } ++} ++ ++static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) ++{ ++ struct its_node *its = data; ++ ++ /* erratum 22375: only alloc 8MB table size */ ++ its->device_ids = 0x14; /* 20 bits, 8MB */ ++ its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; ++ ++ return true; ++} ++ ++static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) ++{ ++ struct its_node *its = data; ++ ++ its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; ++ ++ return true; ++} ++ ++static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) ++{ ++ struct its_node *its = data; ++ ++ /* On QDF2400, the size of the ITE is 16Bytes */ ++ its->ite_size = 16; ++ ++ return true; ++} ++ ++static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) ++{ ++ struct its_node *its = its_dev->its; ++ ++ /* ++ * The Socionext Synquacer SoC has a so-called 'pre-ITS', ++ * which maps 32-bit writes targeted at a separate window of ++ * size '4 << device_id_bits' onto writes to GITS_TRANSLATER ++ * with device ID taken from bits [device_id_bits + 1:2] of ++ * the window offset. ++ */ ++ return its->pre_its_base + (its_dev->device_id << 2); ++} ++ ++static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) ++{ ++ struct its_node *its = data; ++ u32 pre_its_window[2]; ++ u32 ids; ++ ++ if (!fwnode_property_read_u32_array(its->fwnode_handle, ++ "socionext,synquacer-pre-its", ++ pre_its_window, ++ ARRAY_SIZE(pre_its_window))) { ++ ++ its->pre_its_base = pre_its_window[0]; ++ its->get_msi_base = its_irq_get_msi_base_pre_its; ++ ++ ids = ilog2(pre_its_window[1]) - 2; ++ if (its->device_ids > ids) ++ its->device_ids = ids; ++ ++ /* the pre-ITS breaks isolation, so disable MSI remapping */ ++ its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; ++ return true; ++ } ++ return false; ++} ++ ++static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) ++{ ++ struct its_node *its = data; ++ ++ /* ++ * Hip07 insists on using the wrong address for the VLPI ++ * page. Trick it into doing the right thing... ++ */ ++ its->vlpi_redist_offset = SZ_128K; ++ return true; ++} ++ ++static const struct gic_quirk its_quirks[] = { ++#ifdef CONFIG_CAVIUM_ERRATUM_22375 ++ { ++ .desc = "ITS: Cavium errata 22375, 24313", ++ .iidr = 0xa100034c, /* ThunderX pass 1.x */ ++ .mask = 0xffff0fff, ++ .init = its_enable_quirk_cavium_22375, ++ }, ++#endif ++#ifdef CONFIG_CAVIUM_ERRATUM_23144 ++ { ++ .desc = "ITS: Cavium erratum 23144", ++ .iidr = 0xa100034c, /* ThunderX pass 1.x */ ++ .mask = 0xffff0fff, ++ .init = its_enable_quirk_cavium_23144, ++ }, ++#endif ++#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 ++ { ++ .desc = "ITS: QDF2400 erratum 0065", ++ .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ ++ .mask = 0xffffffff, ++ .init = its_enable_quirk_qdf2400_e0065, ++ }, ++#endif ++#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS ++ { ++ /* ++ * The Socionext Synquacer SoC incorporates ARM's own GIC-500 ++ * implementation, but with a 'pre-ITS' added that requires ++ * special handling in software. ++ */ ++ .desc = "ITS: Socionext Synquacer pre-ITS", ++ .iidr = 0x0001143b, ++ .mask = 0xffffffff, ++ .init = its_enable_quirk_socionext_synquacer, ++ }, ++#endif ++#ifdef CONFIG_HISILICON_ERRATUM_161600802 ++ { ++ .desc = "ITS: Hip07 erratum 161600802", ++ .iidr = 0x00000004, ++ .mask = 0xffffffff, ++ .init = its_enable_quirk_hip07_161600802, ++ }, ++#endif ++ { ++ } ++}; ++ ++static void its_enable_quirks(struct its_node *its) ++{ ++ u32 iidr = readl_relaxed(its->base + GITS_IIDR); ++ ++ gic_enable_quirks(iidr, its_quirks, its); ++} ++ ++static int its_save_disable(void) ++{ ++ struct its_node *its; ++ int err = 0; ++ ++ raw_spin_lock(&its_lock); ++ list_for_each_entry(its, &its_nodes, entry) { ++ void __iomem *base; ++ ++ if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) ++ continue; ++ ++ base = its->base; ++ its->ctlr_save = readl_relaxed(base + GITS_CTLR); ++ err = its_force_quiescent(base); ++ if (err) { ++ pr_err("ITS@%pa: failed to quiesce: %d\n", ++ &its->phys_base, err); ++ writel_relaxed(its->ctlr_save, base + GITS_CTLR); ++ goto err; ++ } ++ ++ its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); ++ } ++ ++err: ++ if (err) { ++ list_for_each_entry_continue_reverse(its, &its_nodes, entry) { ++ void __iomem *base; ++ ++ if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) ++ continue; ++ ++ base = its->base; ++ writel_relaxed(its->ctlr_save, base + GITS_CTLR); ++ } ++ } ++ raw_spin_unlock(&its_lock); ++ ++ return err; ++} ++ ++static void its_restore_enable(void) ++{ ++ struct its_node *its; ++ int ret; ++ ++ raw_spin_lock(&its_lock); ++ list_for_each_entry(its, &its_nodes, entry) { ++ void __iomem *base; ++ int i; ++ ++ if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) ++ continue; ++ ++ base = its->base; ++ ++ /* ++ * Make sure that the ITS is disabled. If it fails to quiesce, ++ * don't restore it since writing to CBASER or BASER ++ * registers is undefined according to the GIC v3 ITS ++ * Specification. ++ */ ++ ret = its_force_quiescent(base); ++ if (ret) { ++ pr_err("ITS@%pa: failed to quiesce on resume: %d\n", ++ &its->phys_base, ret); ++ continue; ++ } ++ ++ gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); ++ ++ /* ++ * Writing CBASER resets CREADR to 0, so make CWRITER and ++ * cmd_write line up with it. ++ */ ++ its->cmd_write = its->cmd_base; ++ gits_write_cwriter(0, base + GITS_CWRITER); ++ ++ /* Restore GITS_BASER from the value cache. */ ++ for (i = 0; i < GITS_BASER_NR_REGS; i++) { ++ struct its_baser *baser = &its->tables[i]; ++ ++ if (!(baser->val & GITS_BASER_VALID)) ++ continue; ++ ++ its_write_baser(its, baser, baser->val); ++ } ++ writel_relaxed(its->ctlr_save, base + GITS_CTLR); ++ ++ /* ++ * Reinit the collection if it's stored in the ITS. This is ++ * indicated by the col_id being less than the HCC field. ++ * CID < HCC as specified in the GIC v3 Documentation. ++ */ ++ if (its->collections[smp_processor_id()].col_id < ++ GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) ++ its_cpu_init_collection(its); ++ } ++ raw_spin_unlock(&its_lock); ++} ++ ++static struct syscore_ops its_syscore_ops = { ++ .suspend = its_save_disable, ++ .resume = its_restore_enable, ++}; ++ ++static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) ++{ ++ struct irq_domain *inner_domain; ++ struct msi_domain_info *info; ++ ++ info = kzalloc(sizeof(*info), GFP_KERNEL); ++ if (!info) ++ return -ENOMEM; ++ ++ inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); ++ if (!inner_domain) { ++ kfree(info); ++ return -ENOMEM; ++ } ++ ++ inner_domain->parent = its_parent; ++ irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); ++ inner_domain->flags |= its->msi_domain_flags; ++ info->ops = &its_msi_domain_ops; ++ info->data = its; ++ inner_domain->host_data = info; ++ ++ return 0; ++} ++ ++static int its_init_vpe_domain(void) ++{ ++ struct its_node *its; ++ u32 devid; ++ int entries; ++ ++ if (gic_rdists->has_direct_lpi) { ++ pr_info("ITS: Using DirectLPI for VPE invalidation\n"); ++ return 0; ++ } ++ ++ /* Any ITS will do, even if not v4 */ ++ its = list_first_entry(&its_nodes, struct its_node, entry); ++ ++ entries = roundup_pow_of_two(nr_cpu_ids); ++ vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), ++ GFP_KERNEL); ++ if (!vpe_proxy.vpes) { ++ pr_err("ITS: Can't allocate GICv4 proxy device array\n"); ++ return -ENOMEM; ++ } ++ ++ /* Use the last possible DevID */ ++ devid = GENMASK(its->device_ids - 1, 0); ++ vpe_proxy.dev = its_create_device(its, devid, entries, false); ++ if (!vpe_proxy.dev) { ++ kfree(vpe_proxy.vpes); ++ pr_err("ITS: Can't allocate GICv4 proxy device\n"); ++ return -ENOMEM; ++ } ++ ++ BUG_ON(entries > vpe_proxy.dev->nr_ites); ++ ++ raw_spin_lock_init(&vpe_proxy.lock); ++ vpe_proxy.next_victim = 0; ++ pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", ++ devid, vpe_proxy.dev->nr_ites); ++ ++ return 0; ++} ++ ++static int __init its_compute_its_list_map(struct resource *res, ++ void __iomem *its_base) ++{ ++ int its_number; ++ u32 ctlr; ++ ++ /* ++ * This is assumed to be done early enough that we're ++ * guaranteed to be single-threaded, hence no ++ * locking. Should this change, we should address ++ * this. ++ */ ++ its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); ++ if (its_number >= GICv4_ITS_LIST_MAX) { ++ pr_err("ITS@%pa: No ITSList entry available!\n", ++ &res->start); ++ return -EINVAL; ++ } ++ ++ ctlr = readl_relaxed(its_base + GITS_CTLR); ++ ctlr &= ~GITS_CTLR_ITS_NUMBER; ++ ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; ++ writel_relaxed(ctlr, its_base + GITS_CTLR); ++ ctlr = readl_relaxed(its_base + GITS_CTLR); ++ if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { ++ its_number = ctlr & GITS_CTLR_ITS_NUMBER; ++ its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; ++ } ++ ++ if (test_and_set_bit(its_number, &its_list_map)) { ++ pr_err("ITS@%pa: Duplicate ITSList entry %d\n", ++ &res->start, its_number); ++ return -EINVAL; ++ } ++ ++ return its_number; ++} ++ ++static int __init its_probe_one(struct resource *res, ++ struct fwnode_handle *handle, int numa_node) ++{ ++ struct its_node *its; ++ void __iomem *its_base; ++ u32 val, ctlr; ++ u64 baser, tmp, typer; ++ int err; ++ ++ its_base = ioremap(res->start, resource_size(res)); ++ if (!its_base) { ++ pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); ++ return -ENOMEM; ++ } ++ ++ val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; ++ if (val != 0x30 && val != 0x40) { ++ pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); ++ err = -ENODEV; ++ goto out_unmap; ++ } ++ ++ err = its_force_quiescent(its_base); ++ if (err) { ++ pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); ++ goto out_unmap; ++ } ++ ++ pr_info("ITS %pR\n", res); ++ ++ its = kzalloc(sizeof(*its), GFP_KERNEL); ++ if (!its) { ++ err = -ENOMEM; ++ goto out_unmap; ++ } ++ ++ raw_spin_lock_init(&its->lock); ++ INIT_LIST_HEAD(&its->entry); ++ INIT_LIST_HEAD(&its->its_device_list); ++ typer = gic_read_typer(its_base + GITS_TYPER); ++ its->base = its_base; ++ its->phys_base = res->start; ++ its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); ++ its->device_ids = GITS_TYPER_DEVBITS(typer); ++ its->is_v4 = !!(typer & GITS_TYPER_VLPIS); ++ if (its->is_v4) { ++ if (!(typer & GITS_TYPER_VMOVP)) { ++ err = its_compute_its_list_map(res, its_base); ++ if (err < 0) ++ goto out_free_its; ++ ++ its->list_nr = err; ++ ++ pr_info("ITS@%pa: Using ITS number %d\n", ++ &res->start, err); ++ } else { ++ pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); ++ } ++ } ++ ++ its->numa_node = numa_node; ++ ++ its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, ++ get_order(ITS_CMD_QUEUE_SZ)); ++ if (!its->cmd_base) { ++ err = -ENOMEM; ++ goto out_free_its; ++ } ++ its->cmd_write = its->cmd_base; ++ its->fwnode_handle = handle; ++ its->get_msi_base = its_irq_get_msi_base; ++ its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP; ++ ++ its_enable_quirks(its); ++ ++ err = its_alloc_tables(its); ++ if (err) ++ goto out_free_cmd; ++ ++ err = its_alloc_collections(its); ++ if (err) ++ goto out_free_tables; ++ ++ baser = (virt_to_phys(its->cmd_base) | ++ GITS_CBASER_RaWaWb | ++ GITS_CBASER_InnerShareable | ++ (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | ++ GITS_CBASER_VALID); ++ ++ gits_write_cbaser(baser, its->base + GITS_CBASER); ++ tmp = gits_read_cbaser(its->base + GITS_CBASER); ++ ++ if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { ++ if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { ++ /* ++ * The HW reports non-shareable, we must ++ * remove the cacheability attributes as ++ * well. ++ */ ++ baser &= ~(GITS_CBASER_SHAREABILITY_MASK | ++ GITS_CBASER_CACHEABILITY_MASK); ++ baser |= GITS_CBASER_nC; ++ gits_write_cbaser(baser, its->base + GITS_CBASER); ++ } ++ pr_info("ITS: using cache flushing for cmd queue\n"); ++ its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; ++ } ++ ++ gits_write_cwriter(0, its->base + GITS_CWRITER); ++ ctlr = readl_relaxed(its->base + GITS_CTLR); ++ ctlr |= GITS_CTLR_ENABLE; ++ if (its->is_v4) ++ ctlr |= GITS_CTLR_ImDe; ++ writel_relaxed(ctlr, its->base + GITS_CTLR); ++ ++ if (GITS_TYPER_HCC(typer)) ++ its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE; ++ ++ err = its_init_domain(handle, its); ++ if (err) ++ goto out_free_tables; ++ ++ raw_spin_lock(&its_lock); ++ list_add(&its->entry, &its_nodes); ++ raw_spin_unlock(&its_lock); ++ ++ return 0; ++ ++out_free_tables: ++ its_free_tables(its); ++out_free_cmd: ++ free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); ++out_free_its: ++ kfree(its); ++out_unmap: ++ iounmap(its_base); ++ pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); ++ return err; ++} ++ ++static bool gic_rdists_supports_plpis(void) ++{ ++ return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); ++} ++ ++static int redist_disable_lpis(void) ++{ ++ void __iomem *rbase = gic_data_rdist_rd_base(); ++ u64 timeout = USEC_PER_SEC; ++ u64 val; ++ ++ if (!gic_rdists_supports_plpis()) { ++ pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); ++ return -ENXIO; ++ } ++ ++ val = readl_relaxed(rbase + GICR_CTLR); ++ if (!(val & GICR_CTLR_ENABLE_LPIS)) ++ return 0; ++ ++ /* ++ * If coming via a CPU hotplug event, we don't need to disable ++ * LPIs before trying to re-enable them. They are already ++ * configured and all is well in the world. ++ * ++ * If running with preallocated tables, there is nothing to do. ++ */ ++ if (gic_data_rdist()->lpi_enabled || ++ (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) ++ return 0; ++ ++ /* ++ * From that point on, we only try to do some damage control. ++ */ ++ pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", ++ smp_processor_id()); ++ add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); ++ ++ /* Disable LPIs */ ++ val &= ~GICR_CTLR_ENABLE_LPIS; ++ writel_relaxed(val, rbase + GICR_CTLR); ++ ++ /* Make sure any change to GICR_CTLR is observable by the GIC */ ++ dsb(sy); ++ ++ /* ++ * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs ++ * from 1 to 0 before programming GICR_PEND{PROP}BASER registers. ++ * Error out if we time out waiting for RWP to clear. ++ */ ++ while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) { ++ if (!timeout) { ++ pr_err("CPU%d: Timeout while disabling LPIs\n", ++ smp_processor_id()); ++ return -ETIMEDOUT; ++ } ++ udelay(1); ++ timeout--; ++ } ++ ++ /* ++ * After it has been written to 1, it is IMPLEMENTATION ++ * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be ++ * cleared to 0. Error out if clearing the bit failed. ++ */ ++ if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) { ++ pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id()); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++int phytium_its_cpu_init(void) ++{ ++ if (!list_empty(&its_nodes)) { ++ int ret; ++ ++ ret = redist_disable_lpis(); ++ if (ret) ++ return ret; ++ ++ its_cpu_init_lpis(); ++ its_cpu_init_collections(); ++ } ++ ++ return 0; ++} ++ ++static const struct of_device_id its_device_id[] = { ++ { .compatible = "arm,gic-v3-its", }, ++ {}, ++}; ++ ++static int __init its_of_probe(struct device_node *node) ++{ ++ struct device_node *np; ++ struct resource res; ++ ++ for (np = of_find_matching_node(node, its_device_id); np; ++ np = of_find_matching_node(np, its_device_id)) { ++ if (!of_device_is_available(np)) ++ continue; ++ if (!of_property_read_bool(np, "msi-controller")) { ++ pr_warn("%pOF: no msi-controller property, ITS ignored\n", ++ np); ++ continue; ++ } ++ ++ if (of_address_to_resource(np, 0, &res)) { ++ pr_warn("%pOF: no regs?\n", np); ++ continue; ++ } ++ ++ its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); ++ } ++ return 0; ++} ++ ++#ifdef CONFIG_ACPI ++ ++#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) ++ ++#ifdef CONFIG_ACPI_NUMA ++struct its_srat_map { ++ /* numa node id */ ++ u32 numa_node; ++ /* GIC ITS ID */ ++ u32 its_id; ++}; ++ ++static struct its_srat_map *its_srat_maps __initdata; ++static int its_in_srat __initdata; ++ ++static int __init acpi_get_its_numa_node(u32 its_id) ++{ ++ int i; ++ ++ for (i = 0; i < its_in_srat; i++) { ++ if (its_id == its_srat_maps[i].its_id) ++ return its_srat_maps[i].numa_node; ++ } ++ return NUMA_NO_NODE; ++} ++ ++static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header, ++ const unsigned long end) ++{ ++ return 0; ++} ++ ++static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, ++ const unsigned long end) ++{ ++ int node; ++ struct acpi_srat_gic_its_affinity *its_affinity; ++ ++ its_affinity = (struct acpi_srat_gic_its_affinity *)header; ++ if (!its_affinity) ++ return -EINVAL; ++ ++ if (its_affinity->header.length < sizeof(*its_affinity)) { ++ pr_err("SRAT: Invalid header length %d in ITS affinity\n", ++ its_affinity->header.length); ++ return -EINVAL; ++ } ++ ++ node = acpi_map_pxm_to_node(its_affinity->proximity_domain); ++ ++ if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { ++ pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); ++ return 0; ++ } ++ ++ its_srat_maps[its_in_srat].numa_node = node; ++ its_srat_maps[its_in_srat].its_id = its_affinity->its_id; ++ its_in_srat++; ++ pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", ++ its_affinity->proximity_domain, its_affinity->its_id, node); ++ ++ return 0; ++} ++ ++static void __init acpi_table_parse_srat_its(void) ++{ ++ int count; ++ ++ count = acpi_table_parse_entries(ACPI_SIG_SRAT, ++ sizeof(struct acpi_table_srat), ++ ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, ++ gic_acpi_match_srat_its, 0); ++ if (count <= 0) ++ return; ++ ++ its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map), ++ GFP_KERNEL); ++ if (!its_srat_maps) { ++ pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n"); ++ return; ++ } ++ ++ acpi_table_parse_entries(ACPI_SIG_SRAT, ++ sizeof(struct acpi_table_srat), ++ ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, ++ gic_acpi_parse_srat_its, 0); ++} ++ ++/* free the its_srat_maps after ITS probing */ ++static void __init acpi_its_srat_maps_free(void) ++{ ++ kfree(its_srat_maps); ++} ++#else ++static void __init acpi_table_parse_srat_its(void) { } ++static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } ++static void __init acpi_its_srat_maps_free(void) { } ++#endif ++ ++static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, ++ const unsigned long end) ++{ ++ struct acpi_madt_generic_translator *its_entry; ++ struct fwnode_handle *dom_handle; ++ struct resource res; ++ int err; ++ ++ its_entry = (struct acpi_madt_generic_translator *)header; ++ memset(&res, 0, sizeof(res)); ++ res.start = its_entry->base_address; ++ res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; ++ res.flags = IORESOURCE_MEM; ++ ++ dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address); ++ if (!dom_handle) { ++ pr_err("ITS@%pa: Unable to allocate GIC-Phytium-2500 ITS domain token\n", ++ &res.start); ++ return -ENOMEM; ++ } ++ ++ err = iort_register_domain_token(its_entry->translation_id, res.start, ++ dom_handle); ++ if (err) { ++ pr_err("ITS@%pa: Unable to register GIC-Phytium-2500 ITS domain token (ITS ID %d) to IORT\n", ++ &res.start, its_entry->translation_id); ++ goto dom_err; ++ } ++ ++ err = its_probe_one(&res, dom_handle, ++ acpi_get_its_numa_node(its_entry->translation_id)); ++ if (!err) ++ return 0; ++ ++ iort_deregister_domain_token(its_entry->translation_id); ++dom_err: ++ irq_domain_free_fwnode(dom_handle); ++ return err; ++} ++ ++static void __init its_acpi_probe(void) ++{ ++ acpi_table_parse_srat_its(); ++ acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, ++ gic_acpi_parse_madt_its, 0); ++ acpi_its_srat_maps_free(); ++} ++#else ++static void __init its_acpi_probe(void) { } ++#endif ++ ++int __init phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, ++ struct irq_domain *parent_domain) ++{ ++ struct device_node *of_node; ++ struct its_node *its; ++ bool has_v4 = false; ++ int err; ++ ++ its_parent = parent_domain; ++ of_node = to_of_node(handle); ++ if (of_node) ++ its_of_probe(of_node); ++ else ++ its_acpi_probe(); ++ ++ if (list_empty(&its_nodes)) { ++ pr_warn("ITS: No ITS available, not enabling LPIs\n"); ++ return -ENXIO; ++ } ++ ++ gic_rdists = rdists; ++ ++ err = allocate_lpi_tables(); ++ if (err) ++ return err; ++ ++ list_for_each_entry(its, &its_nodes, entry) ++ has_v4 |= its->is_v4; ++ ++ if (has_v4 & rdists->has_vlpis) { ++ if (its_init_vpe_domain() || ++ its_init_v4(parent_domain, &its_vpe_domain_ops)) { ++ rdists->has_vlpis = false; ++ pr_err("ITS: Disabling GICv4 support\n"); ++ } ++ } ++ ++ register_syscore_ops(&its_syscore_ops); ++ ++ return 0; ++} +diff --git a/drivers/irqchip/irq-gic-phytium-2500.c b/drivers/irqchip/irq-gic-phytium-2500.c +new file mode 100644 +index 000000000000..87635081c807 +--- /dev/null ++++ b/drivers/irqchip/irq-gic-phytium-2500.c +@@ -0,0 +1,1881 @@ ++/* ++ * Copyright (C) 2020 Phytium Corporation. ++ * Author: Wang Yinfeng ++ * Chen Baozi ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#define pr_fmt(fmt) "GIC-2500: " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include "irq-gic-common.h" ++#include ++ ++#define MAX_MARS3_SOC_COUNT 8 ++#define MARS3_ADDR_SKTID_SHIFT 41 ++ ++struct gic_dist_desc { ++ void __iomem *dist_base; ++ phys_addr_t phys_base; ++ unsigned long size; ++}; ++ ++struct redist_region { ++ void __iomem *redist_base; ++ phys_addr_t phys_base; ++ bool single_redist; ++}; ++ ++static struct gic_dist_desc mars3_gic_dists[MAX_MARS3_SOC_COUNT] __read_mostly; ++ ++static unsigned int mars3_sockets_bitmap = 0x1; ++ ++#define mars3_irq_to_skt(hwirq) (((hwirq) - 32) % 8) ++ ++struct gic_chip_data { ++ struct fwnode_handle *fwnode; ++ void __iomem *dist_base; ++ struct redist_region *redist_regions; ++ struct rdists rdists; ++ struct irq_domain *domain; ++ u64 redist_stride; ++ u32 nr_redist_regions; ++ bool has_rss; ++ unsigned int irq_nr; ++ struct partition_desc *ppi_descs[16]; ++}; ++ ++static struct gic_chip_data gic_data __read_mostly; ++static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); ++ ++static struct gic_kvm_info gic_v3_kvm_info; ++static DEFINE_PER_CPU(bool, has_rss); ++ ++#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4) ++#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) ++#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) ++#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) ++ ++/* Our default, arbitrary priority value. Linux only uses one anyway. */ ++#define DEFAULT_PMR_VALUE 0xf0 ++ ++static inline unsigned int gic_irq(struct irq_data *d) ++{ ++ return d->hwirq; ++} ++ ++static inline int gic_irq_in_rdist(struct irq_data *d) ++{ ++ return gic_irq(d) < 32; ++} ++ ++static inline void __iomem *gic_dist_base(struct irq_data *d) ++{ ++ if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */ ++ return gic_data_rdist_sgi_base(); ++ ++ if (d->hwirq <= 1023) /* SPI -> dist_base */ ++ return gic_data.dist_base; ++ ++ return NULL; ++} ++ ++static void gic_do_wait_for_rwp(void __iomem *base) ++{ ++ u32 count = 1000000; /* 1s! */ ++ ++ while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { ++ count--; ++ if (!count) { ++ pr_err_ratelimited("RWP timeout, gone fishing\n"); ++ return; ++ } ++ cpu_relax(); ++ udelay(1); ++ }; ++} ++ ++/* Wait for completion of a distributor change */ ++static void gic_dist_wait_for_rwp(void) ++{ ++ gic_do_wait_for_rwp(gic_data.dist_base); ++} ++ ++/* Wait for completion of a redistributor change */ ++static void gic_redist_wait_for_rwp(void) ++{ ++ gic_do_wait_for_rwp(gic_data_rdist_rd_base()); ++} ++ ++#ifdef CONFIG_ARM64 ++ ++static u64 __maybe_unused gic_read_iar(void) ++{ ++ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154)) ++ return gic_read_iar_cavium_thunderx(); ++ else ++ return gic_read_iar_common(); ++} ++#endif ++ ++static void gic_enable_redist(bool enable) ++{ ++ void __iomem *rbase; ++ u32 count = 1000000; /* 1s! */ ++ u32 val; ++ unsigned long mpidr; ++ int i; ++ ++ rbase = gic_data_rdist_rd_base(); ++ ++ val = readl_relaxed(rbase + GICR_WAKER); ++ if (enable) ++ /* Wake up this CPU redistributor */ ++ val &= ~GICR_WAKER_ProcessorSleep; ++ else ++ val |= GICR_WAKER_ProcessorSleep; ++ writel_relaxed(val, rbase + GICR_WAKER); ++ ++ if (!enable) { /* Check that GICR_WAKER is writeable */ ++ val = readl_relaxed(rbase + GICR_WAKER); ++ if (!(val & GICR_WAKER_ProcessorSleep)) ++ return; /* No PM support in this redistributor */ ++ } ++ ++ while (--count) { ++ val = readl_relaxed(rbase + GICR_WAKER); ++ if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) ++ break; ++ cpu_relax(); ++ udelay(1); ++ }; ++ if (!count) ++ pr_err_ratelimited("redistributor failed to %s...\n", ++ enable ? "wakeup" : "sleep"); ++ ++ mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); ++ ++ /* Either Aff0 or Aff1 is not zero */ ++ if (mpidr & 0xffff) ++ return; ++ ++ /* Skip 64 Redistributors */ ++ rbase = rbase + 64 * SZ_128K; ++ ++ for (i = 0; i < 4; i++) { ++ val = readl_relaxed(rbase + GICR_WAKER); ++ if (enable) ++ val &= ~GICR_WAKER_ProcessorSleep; ++ else ++ val |= GICR_WAKER_ProcessorSleep; ++ writel_relaxed(val, rbase + GICR_WAKER); ++ ++ if (!enable) { ++ val = readl_relaxed(rbase + GICR_WAKER); ++ if (!(val & GICR_WAKER_ProcessorSleep)) ++ return; ++ } ++ ++ count = 1000000; /* 1s! */ ++ while (--count) { ++ val = readl_relaxed(rbase + GICR_WAKER); ++ if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) ++ break; ++ cpu_relax(); ++ udelay(1); ++ }; ++ ++ if (!count) ++ pr_err_ratelimited("CPU MPIDR 0x%lx: redistributor %d failed to %s...\n", ++ mpidr, 64 + i, enable ? "wakeup" : "sleep"); ++ ++ rbase = rbase + SZ_128K; ++ } ++} ++ ++/* ++ * Routines to disable, enable, EOI and route interrupts ++ */ ++static int gic_peek_irq(struct irq_data *d, u32 offset) ++{ ++ u32 mask = 1 << (gic_irq(d) % 32); ++ void __iomem *base; ++ unsigned int skt; ++ ++ if (gic_irq_in_rdist(d)) ++ base = gic_data_rdist_sgi_base(); ++ else { ++ skt = mars3_irq_to_skt(gic_irq(d)); ++ base = mars3_gic_dists[skt].dist_base; ++ } ++ ++ return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); ++} ++ ++static void gic_poke_irq(struct irq_data *d, u32 offset) ++{ ++ u32 mask = 1 << (gic_irq(d) % 32); ++ void __iomem *base; ++ unsigned long mpidr; ++ void __iomem *rbase; ++ int i; ++ unsigned int skt; ++ ++ if (gic_irq_in_rdist(d)) { ++ base = gic_data_rdist_sgi_base(); ++ ++ writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4); ++ gic_redist_wait_for_rwp(); ++ ++ mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); ++ ++ if ((mpidr & 0xffff) == 0) { ++ rbase = base + 64*SZ_128K; ++ ++ for (i = 0; i < 4; i++) { ++ writel_relaxed(mask, rbase + offset + (gic_irq(d) / 32) * 4); ++ gic_do_wait_for_rwp(rbase - SZ_64K); ++ rbase = rbase + SZ_128K; ++ } ++ } ++ } else { ++ skt = mars3_irq_to_skt(gic_irq(d)); ++ base = mars3_gic_dists[skt].dist_base; ++ writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4); ++ gic_do_wait_for_rwp(base); ++ } ++} ++ ++static void gic_mask_irq(struct irq_data *d) ++{ ++ gic_poke_irq(d, GICD_ICENABLER); ++} ++ ++static void gic_eoimode1_mask_irq(struct irq_data *d) ++{ ++ gic_mask_irq(d); ++ /* ++ * When masking a forwarded interrupt, make sure it is ++ * deactivated as well. ++ * ++ * This ensures that an interrupt that is getting ++ * disabled/masked will not get "stuck", because there is ++ * noone to deactivate it (guest is being terminated). ++ */ ++ if (irqd_is_forwarded_to_vcpu(d)) ++ gic_poke_irq(d, GICD_ICACTIVER); ++} ++ ++static void gic_unmask_irq(struct irq_data *d) ++{ ++ gic_poke_irq(d, GICD_ISENABLER); ++} ++ ++static int gic_irq_set_irqchip_state(struct irq_data *d, ++ enum irqchip_irq_state which, bool val) ++{ ++ u32 reg; ++ ++ if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */ ++ return -EINVAL; ++ ++ switch (which) { ++ case IRQCHIP_STATE_PENDING: ++ reg = val ? GICD_ISPENDR : GICD_ICPENDR; ++ break; ++ ++ case IRQCHIP_STATE_ACTIVE: ++ reg = val ? GICD_ISACTIVER : GICD_ICACTIVER; ++ break; ++ ++ case IRQCHIP_STATE_MASKED: ++ reg = val ? GICD_ICENABLER : GICD_ISENABLER; ++ break; ++ ++ default: ++ return -EINVAL; ++ } ++ ++ gic_poke_irq(d, reg); ++ return 0; ++} ++ ++static int gic_irq_get_irqchip_state(struct irq_data *d, ++ enum irqchip_irq_state which, bool *val) ++{ ++ if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */ ++ return -EINVAL; ++ ++ switch (which) { ++ case IRQCHIP_STATE_PENDING: ++ *val = gic_peek_irq(d, GICD_ISPENDR); ++ break; ++ ++ case IRQCHIP_STATE_ACTIVE: ++ *val = gic_peek_irq(d, GICD_ISACTIVER); ++ break; ++ ++ case IRQCHIP_STATE_MASKED: ++ *val = !gic_peek_irq(d, GICD_ISENABLER); ++ break; ++ ++ default: ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static void gic_eoi_irq(struct irq_data *d) ++{ ++ gic_write_eoir(gic_irq(d)); ++} ++ ++static void gic_eoimode1_eoi_irq(struct irq_data *d) ++{ ++ /* ++ * No need to deactivate an LPI, or an interrupt that ++ * is is getting forwarded to a vcpu. ++ */ ++ if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) ++ return; ++ gic_write_dir(gic_irq(d)); ++} ++ ++static int gic_set_type(struct irq_data *d, unsigned int type) ++{ ++ unsigned int irq = gic_irq(d); ++ unsigned long mpidr; ++ int i; ++ void __iomem *base; ++ void __iomem *rbase; ++ unsigned int skt; ++ int ret; ++ ++ /* Interrupt configuration for SGIs can't be changed */ ++ if (irq < 16) ++ return -EINVAL; ++ ++ /* SPIs have restrictions on the supported types */ ++ if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && ++ type != IRQ_TYPE_EDGE_RISING) ++ return -EINVAL; ++ ++ if (gic_irq_in_rdist(d)) { ++ base = gic_data_rdist_sgi_base(); ++ ret = gic_configure_irq(irq, type, base, gic_redist_wait_for_rwp); ++ ++ mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); ++ ++ if ((mpidr & 0xffff) == 0) { ++ rbase = base + 64*SZ_128K; ++ ++ for (i = 0; i < 4; i++) { ++ ret = gic_configure_irq(irq, type, rbase, NULL); ++ gic_do_wait_for_rwp(rbase - SZ_64K); ++ rbase = rbase + SZ_128K; ++ } ++ } ++ } else { ++ skt = mars3_irq_to_skt(gic_irq(d)); ++ base = mars3_gic_dists[skt].dist_base; ++ ret = gic_configure_irq(irq, type, base, NULL); ++ gic_do_wait_for_rwp(base); ++ } ++ ++ return ret; ++} ++ ++static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) ++{ ++ if (vcpu) ++ irqd_set_forwarded_to_vcpu(d); ++ else ++ irqd_clr_forwarded_to_vcpu(d); ++ return 0; ++} ++ ++static u64 gic_mpidr_to_affinity(unsigned long mpidr) ++{ ++ u64 aff; ++ ++ aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | ++ MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | ++ MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | ++ MPIDR_AFFINITY_LEVEL(mpidr, 0)); ++ ++ return aff; ++} ++ ++static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) ++{ ++ u32 irqnr; ++ ++ do { ++ irqnr = gic_read_iar(); ++ ++ if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) { ++ int err; ++ ++ if (static_branch_likely(&supports_deactivate_key)) ++ gic_write_eoir(irqnr); ++ else ++ isb(); ++ ++ err = handle_domain_irq(gic_data.domain, irqnr, regs); ++ if (err) { ++ WARN_ONCE(true, "Unexpected interrupt received!\n"); ++ if (static_branch_likely(&supports_deactivate_key)) { ++ if (irqnr < 8192) ++ gic_write_dir(irqnr); ++ } else { ++ gic_write_eoir(irqnr); ++ } ++ } ++ continue; ++ } ++ if (irqnr < 16) { ++ gic_write_eoir(irqnr); ++ if (static_branch_likely(&supports_deactivate_key)) ++ gic_write_dir(irqnr); ++#ifdef CONFIG_SMP ++ /* ++ * Unlike GICv2, we don't need an smp_rmb() here. ++ * The control dependency from gic_read_iar to ++ * the ISB in gic_write_eoir is enough to ensure ++ * that any shared data read by handle_IPI will ++ * be read after the ACK. ++ */ ++ handle_IPI(irqnr, regs); ++#else ++ WARN_ONCE(true, "Unexpected SGI received!\n"); ++#endif ++ continue; ++ } ++ } while (irqnr != ICC_IAR1_EL1_SPURIOUS); ++} ++ ++static void __init gic_dist_init(void) ++{ ++ unsigned int i; ++ u64 affinity; ++ void __iomem *base; ++ unsigned int skt; ++ ++ for (skt = 0; skt < MAX_MARS3_SOC_COUNT; skt++) { ++ if ((((unsigned int)1 << skt) & mars3_sockets_bitmap) == 0) ++ continue; ++ ++ base = mars3_gic_dists[skt].dist_base; ++ ++ /* Disable the distributor */ ++ writel_relaxed(0, base + GICD_CTLR); ++ gic_do_wait_for_rwp(base); ++ ++ /* ++ * Configure SPIs as non-secure Group-1. This will only matter ++ * if the GIC only has a single security state. This will not ++ * do the right thing if the kernel is running in secure mode, ++ * but that's not the intended use case anyway. ++ */ ++ for (i = 32; i < gic_data.irq_nr; i += 32) ++ writel_relaxed(~0, base + GICD_IGROUPR + i / 8); ++ ++ gic_dist_config(base, gic_data.irq_nr, NULL); ++ gic_do_wait_for_rwp(base); ++ ++ /* Enable distributor with ARE, Group1 */ ++ writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, ++ base + GICD_CTLR); ++ ++ /* ++ * Set all global interrupts to the boot CPU only. ARE must be ++ * enabled. ++ */ ++ affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); ++ for (i = 32; i < gic_data.irq_nr; i++) ++ gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); ++ } ++} ++ ++static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) ++{ ++ int ret = -ENODEV; ++ int i; ++ ++ for (i = 0; i < gic_data.nr_redist_regions; i++) { ++ void __iomem *ptr = gic_data.redist_regions[i].redist_base; ++ u64 typer; ++ u32 reg; ++ ++ reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; ++ if (reg != GIC_PIDR2_ARCH_GICv3 && ++ reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */ ++ pr_warn("No redistributor present @%p\n", ptr); ++ break; ++ } ++ ++ do { ++ typer = gic_read_typer(ptr + GICR_TYPER); ++ ret = fn(gic_data.redist_regions + i, ptr); ++ if (!ret) ++ return 0; ++ ++ if (gic_data.redist_regions[i].single_redist) ++ break; ++ ++ if (gic_data.redist_stride) { ++ ptr += gic_data.redist_stride; ++ } else { ++ ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ ++ if (typer & GICR_TYPER_VLPIS) ++ ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ ++ } ++ } while (!(typer & GICR_TYPER_LAST)); ++ } ++ ++ return ret ? -ENODEV : 0; ++} ++ ++static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) ++{ ++ unsigned long mpidr = cpu_logical_map(smp_processor_id()); ++ u64 typer; ++ u32 aff, aff2_skt, rdist_skt; ++ ++ /* ++ * Convert affinity to a 32bit value that can be matched to ++ * GICR_TYPER bits [63:32]. ++ */ ++ aff = (MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | ++ MPIDR_AFFINITY_LEVEL(mpidr, 0)); ++ ++ aff2_skt = MPIDR_AFFINITY_LEVEL(mpidr, 2) & 0x7; ++ rdist_skt = (((u64)region->phys_base >> MARS3_ADDR_SKTID_SHIFT) & 0x7); ++ ++ if (aff2_skt != rdist_skt) ++ return 1; ++ ++ typer = gic_read_typer(ptr + GICR_TYPER); ++ if ((typer >> 32) == aff) { ++ u64 offset = ptr - region->redist_base; ++ gic_data_rdist_rd_base() = ptr; ++ gic_data_rdist()->phys_base = region->phys_base + offset; ++ ++ pr_info("CPU%d: found redistributor %lx region %d:%pa\n", ++ smp_processor_id(), mpidr, ++ (int)(region - gic_data.redist_regions), ++ &gic_data_rdist()->phys_base); ++ return 0; ++ } ++ ++ /* Try next one */ ++ return 1; ++} ++ ++static int gic_populate_rdist(void) ++{ ++ if (gic_iterate_rdists(__gic_populate_rdist) == 0) ++ return 0; ++ ++ /* We couldn't even deal with ourselves... */ ++ WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", ++ smp_processor_id(), ++ (unsigned long)cpu_logical_map(smp_processor_id())); ++ return -ENODEV; ++} ++ ++static int __gic_update_vlpi_properties(struct redist_region *region, ++ void __iomem *ptr) ++{ ++ u64 typer = gic_read_typer(ptr + GICR_TYPER); ++ gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); ++ gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS); ++ ++ return 1; ++} ++ ++static void gic_update_vlpi_properties(void) ++{ ++ gic_iterate_rdists(__gic_update_vlpi_properties); ++ pr_info("%sVLPI support, %sdirect LPI support\n", ++ !gic_data.rdists.has_vlpis ? "no " : "", ++ !gic_data.rdists.has_direct_lpi ? "no " : ""); ++} ++ ++static void gic_cpu_sys_reg_init(void) ++{ ++ int i, cpu = smp_processor_id(); ++ u64 mpidr = cpu_logical_map(cpu); ++ u64 need_rss = MPIDR_RS(mpidr); ++ bool group0; ++ u32 val, pribits; ++ ++ /* ++ * Need to check that the SRE bit has actually been set. If ++ * not, it means that SRE is disabled at EL2. We're going to ++ * die painfully, and there is nothing we can do about it. ++ * ++ * Kindly inform the luser. ++ */ ++ if (!gic_enable_sre()) ++ pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); ++ ++ pribits = gic_read_ctlr(); ++ pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; ++ pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; ++ pribits++; ++ ++ /* ++ * Let's find out if Group0 is under control of EL3 or not by ++ * setting the highest possible, non-zero priority in PMR. ++ * ++ * If SCR_EL3.FIQ is set, the priority gets shifted down in ++ * order for the CPU interface to set bit 7, and keep the ++ * actual priority in the non-secure range. In the process, it ++ * looses the least significant bit and the actual priority ++ * becomes 0x80. Reading it back returns 0, indicating that ++ * we're don't have access to Group0. ++ */ ++ write_gicreg(BIT(8 - pribits), ICC_PMR_EL1); ++ val = read_gicreg(ICC_PMR_EL1); ++ group0 = val != 0; ++ ++ /* Set priority mask register */ ++ write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); ++ ++ /* ++ * Some firmwares hand over to the kernel with the BPR changed from ++ * its reset value (and with a value large enough to prevent ++ * any pre-emptive interrupts from working at all). Writing a zero ++ * to BPR restores is reset value. ++ */ ++ gic_write_bpr1(0); ++ ++ if (static_branch_likely(&supports_deactivate_key)) { ++ /* EOI drops priority only (mode 1) */ ++ gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop); ++ } else { ++ /* EOI deactivates interrupt too (mode 0) */ ++ gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); ++ } ++ ++ /* Always whack Group0 before Group1 */ ++ if (group0) { ++ switch(pribits) { ++ case 8: ++ case 7: ++ write_gicreg(0, ICC_AP0R3_EL1); ++ write_gicreg(0, ICC_AP0R2_EL1); ++ case 6: ++ write_gicreg(0, ICC_AP0R1_EL1); ++ case 5: ++ case 4: ++ write_gicreg(0, ICC_AP0R0_EL1); ++ } ++ ++ isb(); ++ } ++ ++ switch(pribits) { ++ case 8: ++ case 7: ++ write_gicreg(0, ICC_AP1R3_EL1); ++ write_gicreg(0, ICC_AP1R2_EL1); ++ case 6: ++ write_gicreg(0, ICC_AP1R1_EL1); ++ case 5: ++ case 4: ++ write_gicreg(0, ICC_AP1R0_EL1); ++ } ++ ++ isb(); ++ ++ /* ... and let's hit the road... */ ++ gic_write_grpen1(1); ++ ++ /* Keep the RSS capability status in per_cpu variable */ ++ per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS); ++ ++ /* Check all the CPUs have capable of sending SGIs to other CPUs */ ++ for_each_online_cpu(i) { ++ bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu); ++ ++ need_rss |= MPIDR_RS(cpu_logical_map(i)); ++ if (need_rss && (!have_rss)) ++ pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", ++ cpu, (unsigned long)mpidr, ++ i, (unsigned long)cpu_logical_map(i)); ++ } ++ ++ /** ++ * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0, ++ * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED ++ * UNPREDICTABLE choice of : ++ * - The write is ignored. ++ * - The RS field is treated as 0. ++ */ ++ if (need_rss && (!gic_data.has_rss)) ++ pr_crit_once("RSS is required but GICD doesn't support it\n"); ++} ++ ++static bool gicv3_nolpi; ++ ++static int __init gicv3_nolpi_cfg(char *buf) ++{ ++ return strtobool(buf, &gicv3_nolpi); ++} ++early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); ++ ++static int gic_dist_supports_lpis(void) ++{ ++ return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && !gicv3_nolpi; ++} ++ ++static void gic_cpu_init(void) ++{ ++ void __iomem *rbase; ++ unsigned long mpidr; ++ int i; ++ ++ /* Register ourselves with the rest of the world */ ++ if (gic_populate_rdist()) ++ return; ++ ++ gic_enable_redist(true); ++ ++ rbase = gic_data_rdist_sgi_base(); ++ ++ /* Configure SGIs/PPIs as non-secure Group-1 */ ++ writel_relaxed(~0, rbase + GICR_IGROUPR0); ++ ++ gic_cpu_config(rbase, gic_redist_wait_for_rwp); ++ ++ mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); ++ ++ if ((mpidr & 0xFFFF) == 0) { ++ rbase = rbase + 64*SZ_128K; ++ ++ for (i = 0; i < 4; i++) { ++ /* Configure SGIs/PPIs as non-secure Group-1 */ ++ writel_relaxed(~0, rbase + GICR_IGROUPR0); ++ ++ gic_cpu_config(rbase, NULL); ++ gic_do_wait_for_rwp(rbase - SZ_64K); ++ ++ rbase = rbase + SZ_128K; ++ } ++ } ++ ++ /* initialise system registers */ ++ gic_cpu_sys_reg_init(); ++} ++ ++#ifdef CONFIG_SMP ++ ++#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) ++#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) ++ ++static int gic_starting_cpu(unsigned int cpu) ++{ ++ gic_cpu_init(); ++ ++ if (gic_dist_supports_lpis()) ++ phytium_its_cpu_init(); ++ ++ return 0; ++} ++ ++static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, ++ unsigned long cluster_id) ++{ ++ int next_cpu, cpu = *base_cpu; ++ unsigned long mpidr = cpu_logical_map(cpu); ++ u16 tlist = 0; ++ ++ while (cpu < nr_cpu_ids) { ++ tlist |= 1 << (mpidr & 0xf); ++ ++ next_cpu = cpumask_next(cpu, mask); ++ if (next_cpu >= nr_cpu_ids) ++ goto out; ++ cpu = next_cpu; ++ ++ mpidr = cpu_logical_map(cpu); ++ ++ if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { ++ cpu--; ++ goto out; ++ } ++ } ++out: ++ *base_cpu = cpu; ++ return tlist; ++} ++ ++#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ ++ (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ ++ << ICC_SGI1R_AFFINITY_## level ##_SHIFT) ++ ++static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) ++{ ++ u64 val; ++ ++ val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | ++ MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | ++ irq << ICC_SGI1R_SGI_ID_SHIFT | ++ MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | ++ MPIDR_TO_SGI_RS(cluster_id) | ++ tlist << ICC_SGI1R_TARGET_LIST_SHIFT); ++ ++ pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); ++ gic_write_sgi1r(val); ++} ++ ++static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) ++{ ++ int cpu; ++ ++ if (WARN_ON(irq >= 16)) ++ return; ++ ++ /* ++ * Ensure that stores to Normal memory are visible to the ++ * other CPUs before issuing the IPI. ++ */ ++ wmb(); ++ ++ for_each_cpu(cpu, mask) { ++ u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); ++ u16 tlist; ++ ++ tlist = gic_compute_target_list(&cpu, mask, cluster_id); ++ gic_send_sgi(cluster_id, tlist, irq); ++ } ++ ++ /* Force the above writes to ICC_SGI1R_EL1 to be executed */ ++ isb(); ++} ++ ++static void gic_smp_init(void) ++{ ++ set_smp_cross_call(gic_raise_softirq); ++ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, ++ "irqchip/arm/gic_phytium_2500:starting", ++ gic_starting_cpu, NULL); ++} ++ ++static int gic_cpumask_select(struct irq_data *d, const struct cpumask *mask_val) ++{ ++ unsigned int skt, irq_skt, i; ++ unsigned int cpu, cpus = 0; ++ unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; ++ ++ irq_skt = mars3_irq_to_skt(gic_irq(d)); ++ ++ for (i = 0; i < nr_cpu_ids; i++) { ++ skt = (cpu_logical_map(i) >> 16) & 0xff; ++ if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) { ++ if ((is_kdump_kernel()) && (irq_skt == skt)) { ++ return i; ++ } ++ ++ skt_cpu_cnt[skt]++; ++ } ++ else if (skt != 0xff) ++ pr_err("socket address: %d is out of range.", skt); ++ } ++ ++ if (0 != irq_skt) { ++ for (i = 0; i < irq_skt; i++) ++ cpus += skt_cpu_cnt[i]; ++ } ++ ++ cpu = cpumask_any_and(mask_val, cpu_online_mask); ++ cpus = cpus + cpu % skt_cpu_cnt[irq_skt]; ++ ++ return cpus; ++} ++ ++static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, ++ bool force) ++{ ++ unsigned int cpu, skt; ++ void __iomem *reg; ++ int enabled; ++ u64 val; ++ ++ if (force) ++ cpu = cpumask_first(mask_val); ++ else ++ cpu = gic_cpumask_select(d, mask_val); ++ ++ if (cpu >= nr_cpu_ids) ++ return -EINVAL; ++ ++ if (gic_irq_in_rdist(d)) ++ return -EINVAL; ++ ++ /* If interrupt was enabled, disable it first */ ++ enabled = gic_peek_irq(d, GICD_ISENABLER); ++ if (enabled) ++ gic_mask_irq(d); ++ ++ skt = mars3_irq_to_skt(gic_irq(d)); ++ reg = mars3_gic_dists[skt].dist_base + GICD_IROUTER + (gic_irq(d) * 8); ++ val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); ++ ++ gic_write_irouter(val, reg); ++ ++ /* ++ * If the interrupt was enabled, enabled it again. Otherwise, ++ * just wait for the distributor to have digested our changes. ++ */ ++ if (enabled) ++ gic_unmask_irq(d); ++ else ++ gic_dist_wait_for_rwp(); ++ ++ irq_data_update_effective_affinity(d, cpumask_of(cpu)); ++ ++ return IRQ_SET_MASK_OK_DONE; ++} ++#else ++#define gic_set_affinity NULL ++#define gic_smp_init() do { } while(0) ++#endif ++ ++static int gic_retrigger(struct irq_data *data) ++{ ++ return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); ++} ++ ++#ifdef CONFIG_CPU_PM ++/* Check whether it's single security state view */ ++static bool gic_dist_security_disabled(void) ++{ ++ return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; ++} ++ ++static int gic_cpu_pm_notifier(struct notifier_block *self, ++ unsigned long cmd, void *v) ++{ ++ if (cmd == CPU_PM_EXIT) { ++ if (gic_dist_security_disabled()) ++ gic_enable_redist(true); ++ gic_cpu_sys_reg_init(); ++ } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) { ++ gic_write_grpen1(0); ++ gic_enable_redist(false); ++ } ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block gic_cpu_pm_notifier_block = { ++ .notifier_call = gic_cpu_pm_notifier, ++}; ++ ++static void gic_cpu_pm_init(void) ++{ ++ cpu_pm_register_notifier(&gic_cpu_pm_notifier_block); ++} ++ ++#else ++static inline void gic_cpu_pm_init(void) { } ++#endif /* CONFIG_CPU_PM */ ++ ++static struct irq_chip gic_chip = { ++ .name = "GIC-Phytium-2500", ++ .irq_mask = gic_mask_irq, ++ .irq_unmask = gic_unmask_irq, ++ .irq_eoi = gic_eoi_irq, ++ .irq_set_type = gic_set_type, ++ .irq_set_affinity = gic_set_affinity, ++ .irq_retrigger = gic_retrigger, ++ .irq_get_irqchip_state = gic_irq_get_irqchip_state, ++ .irq_set_irqchip_state = gic_irq_set_irqchip_state, ++ .flags = IRQCHIP_SET_TYPE_MASKED | ++ IRQCHIP_SKIP_SET_WAKE | ++ IRQCHIP_MASK_ON_SUSPEND, ++}; ++ ++static struct irq_chip gic_eoimode1_chip = { ++ .name = "GIC-Phytium-2500", ++ .irq_mask = gic_eoimode1_mask_irq, ++ .irq_unmask = gic_unmask_irq, ++ .irq_eoi = gic_eoimode1_eoi_irq, ++ .irq_set_type = gic_set_type, ++ .irq_set_affinity = gic_set_affinity, ++ .irq_retrigger = gic_retrigger, ++ .irq_get_irqchip_state = gic_irq_get_irqchip_state, ++ .irq_set_irqchip_state = gic_irq_set_irqchip_state, ++ .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, ++ .flags = IRQCHIP_SET_TYPE_MASKED | ++ IRQCHIP_SKIP_SET_WAKE | ++ IRQCHIP_MASK_ON_SUSPEND, ++}; ++ ++#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) ++ ++static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, ++ irq_hw_number_t hw) ++{ ++ struct irq_chip *chip = &gic_chip; ++ struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); ++ ++ if (static_branch_likely(&supports_deactivate_key)) ++ chip = &gic_eoimode1_chip; ++ ++ /* SGIs are private to the core kernel */ ++ if (hw < 16) ++ return -EPERM; ++ /* Nothing here */ ++ if (hw >= gic_data.irq_nr && hw < 8192) ++ return -EPERM; ++ /* Off limits */ ++ if (hw >= GIC_ID_NR) ++ return -EPERM; ++ ++ /* PPIs */ ++ if (hw < 32) { ++ irq_set_percpu_devid(irq); ++ irq_domain_set_info(d, irq, hw, chip, d->host_data, ++ handle_percpu_devid_irq, NULL, NULL); ++ irq_set_status_flags(irq, IRQ_NOAUTOEN); ++ } ++ /* SPIs */ ++ if (hw >= 32 && hw < gic_data.irq_nr) { ++ irq_domain_set_info(d, irq, hw, chip, d->host_data, ++ handle_fasteoi_irq, NULL, NULL); ++ irq_set_probe(irq); ++ irqd_set_single_target(irqd); ++ } ++ /* LPIs */ ++ if (hw >= 8192 && hw < GIC_ID_NR) { ++ if (!gic_dist_supports_lpis()) ++ return -EPERM; ++ irq_domain_set_info(d, irq, hw, chip, d->host_data, ++ handle_fasteoi_irq, NULL, NULL); ++ } ++ ++ /* Prevents SW retriggers which mess up the ACK/EOI ordering */ ++ irqd_set_handle_enforce_irqctx(irqd); ++ return 0; ++} ++ ++#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) ++ ++static int gic_irq_domain_translate(struct irq_domain *d, ++ struct irq_fwspec *fwspec, ++ unsigned long *hwirq, ++ unsigned int *type) ++{ ++ if (is_of_node(fwspec->fwnode)) { ++ if (fwspec->param_count < 3) ++ return -EINVAL; ++ ++ switch (fwspec->param[0]) { ++ case 0: /* SPI */ ++ *hwirq = fwspec->param[1] + 32; ++ break; ++ case 1: /* PPI */ ++ case GIC_IRQ_TYPE_PARTITION: ++ *hwirq = fwspec->param[1] + 16; ++ break; ++ case GIC_IRQ_TYPE_LPI: /* LPI */ ++ *hwirq = fwspec->param[1]; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; ++ ++ /* ++ * Make it clear that broken DTs are... broken. ++ * Partitionned PPIs are an unfortunate exception. ++ */ ++ WARN_ON(*type == IRQ_TYPE_NONE && ++ fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); ++ return 0; ++ } ++ ++ if (is_fwnode_irqchip(fwspec->fwnode)) { ++ if (fwspec->param_count != 2) ++ return -EINVAL; ++ ++ *hwirq = fwspec->param[0]; ++ *type = fwspec->param[1]; ++ ++ WARN_ON(*type == IRQ_TYPE_NONE); ++ return 0; ++ } ++ ++ return -EINVAL; ++} ++ ++static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs, void *arg) ++{ ++ int i, ret; ++ irq_hw_number_t hwirq; ++ unsigned int type = IRQ_TYPE_NONE; ++ struct irq_fwspec *fwspec = arg; ++ ++ ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); ++ if (ret) ++ return ret; ++ ++ for (i = 0; i < nr_irqs; i++) { ++ ret = gic_irq_domain_map(domain, virq + i, hwirq + i); ++ if (ret) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs) ++{ ++ int i; ++ ++ for (i = 0; i < nr_irqs; i++) { ++ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); ++ irq_set_handler(virq + i, NULL); ++ irq_domain_reset_irq_data(d); ++ } ++} ++ ++static int gic_irq_domain_select(struct irq_domain *d, ++ struct irq_fwspec *fwspec, ++ enum irq_domain_bus_token bus_token) ++{ ++ /* Not for us */ ++ if (fwspec->fwnode != d->fwnode) ++ return 0; ++ ++ /* If this is not DT, then we have a single domain */ ++ if (!is_of_node(fwspec->fwnode)) ++ return 1; ++ ++ /* ++ * If this is a PPI and we have a 4th (non-null) parameter, ++ * then we need to match the partition domain. ++ */ ++ if (fwspec->param_count >= 4 && ++ fwspec->param[0] == 1 && fwspec->param[3] != 0) ++ return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]); ++ ++ return d == gic_data.domain; ++} ++ ++static const struct irq_domain_ops gic_irq_domain_ops = { ++ .translate = gic_irq_domain_translate, ++ .alloc = gic_irq_domain_alloc, ++ .free = gic_irq_domain_free, ++ .select = gic_irq_domain_select, ++}; ++ ++static int partition_domain_translate(struct irq_domain *d, ++ struct irq_fwspec *fwspec, ++ unsigned long *hwirq, ++ unsigned int *type) ++{ ++ struct device_node *np; ++ int ret; ++ ++ np = of_find_node_by_phandle(fwspec->param[3]); ++ if (WARN_ON(!np)) ++ return -EINVAL; ++ ++ ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]], ++ of_node_to_fwnode(np)); ++ if (ret < 0) ++ return ret; ++ ++ *hwirq = ret; ++ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops partition_domain_ops = { ++ .translate = partition_domain_translate, ++ .select = gic_irq_domain_select, ++}; ++ ++static int __init gic_init_bases(void __iomem *dist_base, ++ struct redist_region *rdist_regs, ++ u32 nr_redist_regions, ++ u64 redist_stride, ++ struct fwnode_handle *handle) ++{ ++ u32 typer; ++ int gic_irqs; ++ int err; ++ ++ if (!is_hyp_mode_available()) ++ static_branch_disable(&supports_deactivate_key); ++ ++ if (static_branch_likely(&supports_deactivate_key)) ++ pr_info("GIC: Using split EOI/Deactivate mode\n"); ++ ++ gic_data.fwnode = handle; ++ gic_data.dist_base = dist_base; ++ gic_data.redist_regions = rdist_regs; ++ gic_data.nr_redist_regions = nr_redist_regions; ++ gic_data.redist_stride = redist_stride; ++ ++ /* ++ * Find out how many interrupts are supported. ++ * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) ++ */ ++ typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); ++ gic_data.rdists.gicd_typer = typer; ++ gic_irqs = GICD_TYPER_IRQS(typer); ++ if (gic_irqs > 1020) ++ gic_irqs = 1020; ++ gic_data.irq_nr = gic_irqs; ++ ++ gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, ++ &gic_data); ++ irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); ++ gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); ++ gic_data.rdists.has_vlpis = true; ++ gic_data.rdists.has_direct_lpi = true; ++ ++ if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { ++ err = -ENOMEM; ++ goto out_free; ++ } ++ ++ gic_data.has_rss = !!(typer & GICD_TYPER_RSS); ++ pr_info("Distributor has %sRange Selector support\n", ++ gic_data.has_rss ? "" : "no "); ++ ++ set_handle_irq(gic_handle_irq); ++ ++ gic_update_vlpi_properties(); ++ ++ gic_smp_init(); ++ gic_dist_init(); ++ gic_cpu_init(); ++ gic_cpu_pm_init(); ++ ++ if (gic_dist_supports_lpis()) { ++ phytium_its_init(handle, &gic_data.rdists, gic_data.domain); ++ phytium_its_cpu_init(); ++ } ++ ++ return 0; ++ ++out_free: ++ if (gic_data.domain) ++ irq_domain_remove(gic_data.domain); ++ free_percpu(gic_data.rdists.rdist); ++ return err; ++} ++ ++static int __init gic_validate_dist_version(void __iomem *dist_base) ++{ ++ u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; ++ ++ if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) ++ return -ENODEV; ++ ++ return 0; ++} ++ ++/* Create all possible partitions at boot time */ ++static void __init gic_populate_ppi_partitions(struct device_node *gic_node) ++{ ++ struct device_node *parts_node, *child_part; ++ int part_idx = 0, i; ++ int nr_parts; ++ struct partition_affinity *parts; ++ ++ parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); ++ if (!parts_node) ++ return; ++ ++ nr_parts = of_get_child_count(parts_node); ++ ++ if (!nr_parts) ++ goto out_put_node; ++ ++ parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); ++ if (WARN_ON(!parts)) ++ goto out_put_node; ++ ++ for_each_child_of_node(parts_node, child_part) { ++ struct partition_affinity *part; ++ int n; ++ ++ part = &parts[part_idx]; ++ ++ part->partition_id = of_node_to_fwnode(child_part); ++ ++ pr_info("GIC: PPI partition %s[%d] { ", ++ child_part->name, part_idx); ++ ++ n = of_property_count_elems_of_size(child_part, "affinity", ++ sizeof(u32)); ++ WARN_ON(n <= 0); ++ ++ for (i = 0; i < n; i++) { ++ int err, cpu; ++ u32 cpu_phandle; ++ struct device_node *cpu_node; ++ ++ err = of_property_read_u32_index(child_part, "affinity", ++ i, &cpu_phandle); ++ if (WARN_ON(err)) ++ continue; ++ ++ cpu_node = of_find_node_by_phandle(cpu_phandle); ++ if (WARN_ON(!cpu_node)) ++ continue; ++ ++ cpu = of_cpu_node_to_id(cpu_node); ++ if (WARN_ON(cpu < 0)) ++ continue; ++ ++ pr_cont("%pOF[%d] ", cpu_node, cpu); ++ ++ cpumask_set_cpu(cpu, &part->mask); ++ } ++ ++ pr_cont("}\n"); ++ part_idx++; ++ } ++ ++ for (i = 0; i < 16; i++) { ++ unsigned int irq; ++ struct partition_desc *desc; ++ struct irq_fwspec ppi_fwspec = { ++ .fwnode = gic_data.fwnode, ++ .param_count = 3, ++ .param = { ++ [0] = GIC_IRQ_TYPE_PARTITION, ++ [1] = i, ++ [2] = IRQ_TYPE_NONE, ++ }, ++ }; ++ ++ irq = irq_create_fwspec_mapping(&ppi_fwspec); ++ if (WARN_ON(!irq)) ++ continue; ++ desc = partition_create_desc(gic_data.fwnode, parts, nr_parts, ++ irq, &partition_domain_ops); ++ if (WARN_ON(!desc)) ++ continue; ++ ++ gic_data.ppi_descs[i] = desc; ++ } ++ ++out_put_node: ++ of_node_put(parts_node); ++} ++ ++static void __init gic_of_setup_kvm_info(struct device_node *node) ++{ ++ int ret; ++ struct resource r; ++ u32 gicv_idx; ++ ++ gic_v3_kvm_info.type = GIC_V3; ++ ++ gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0); ++ if (!gic_v3_kvm_info.maint_irq) ++ return; ++ ++ if (of_property_read_u32(node, "#redistributor-regions", ++ &gicv_idx)) ++ gicv_idx = 1; ++ ++ gicv_idx += 3; /* Also skip GICD, GICC, GICH */ ++ ret = of_address_to_resource(node, gicv_idx, &r); ++ if (!ret) ++ gic_v3_kvm_info.vcpu = r; ++ ++ gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; ++ gic_set_kvm_info(&gic_v3_kvm_info); ++} ++ ++static int __init gic_of_init(struct device_node *node, struct device_node *parent) ++{ ++ void __iomem *dist_base; ++ struct redist_region *rdist_regs; ++ u64 redist_stride; ++ u32 nr_redist_regions; ++ int err, i, skt; ++ struct resource res; ++ ++ dist_base = of_iomap(node, 0); ++ if (!dist_base) { ++ pr_err("%pOF: unable to map gic dist registers\n", node); ++ return -ENXIO; ++ } ++ ++ err = gic_validate_dist_version(dist_base); ++ if (err) { ++ pr_err("%pOF: no distributor detected, giving up\n", node); ++ goto out_unmap_dist; ++ } ++ ++ if (of_address_to_resource(node, 0, &res)) { ++ printk("Error: No GIC Distributor in FDT\n"); ++ goto out_unmap_dist; ++ } ++ ++ mars3_gic_dists[0].phys_base = res.start; ++ mars3_gic_dists[0].size = resource_size(&res); ++ mars3_gic_dists[0].dist_base = dist_base; ++ ++ if (of_property_read_u32(node, "#mars3-soc-bitmap", &mars3_sockets_bitmap)) ++ mars3_sockets_bitmap = 0x1; ++ ++ for (skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { ++ if ((((unsigned int)1 << skt) & mars3_sockets_bitmap) == 0) ++ continue; ++ ++ mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | ++ mars3_gic_dists[0].phys_base; ++ mars3_gic_dists[skt].size = mars3_gic_dists[0].size; ++ mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, ++ mars3_gic_dists[skt].size); ++ } ++ ++ if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) ++ nr_redist_regions = 1; ++ ++ rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs), ++ GFP_KERNEL); ++ if (!rdist_regs) { ++ err = -ENOMEM; ++ goto out_unmap_dist; ++ } ++ ++ for (i = 0; i < nr_redist_regions; i++) { ++ struct resource res; ++ int ret; ++ ++ ret = of_address_to_resource(node, 1 + i, &res); ++ rdist_regs[i].redist_base = of_iomap(node, 1 + i); ++ if (ret || !rdist_regs[i].redist_base) { ++ pr_err("%pOF: couldn't map region %d\n", node, i); ++ err = -ENODEV; ++ goto out_unmap_rdist; ++ } ++ rdist_regs[i].phys_base = res.start; ++ } ++ ++ if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) ++ redist_stride = 0; ++ ++ err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions, ++ redist_stride, &node->fwnode); ++ if (err) ++ goto out_unmap_rdist; ++ ++ gic_populate_ppi_partitions(node); ++ ++ if (static_branch_likely(&supports_deactivate_key)) ++ gic_of_setup_kvm_info(node); ++ return 0; ++ ++out_unmap_rdist: ++ for (i = 0; i < nr_redist_regions; i++) ++ if (rdist_regs[i].redist_base) ++ iounmap(rdist_regs[i].redist_base); ++ kfree(rdist_regs); ++out_unmap_dist: ++ iounmap(dist_base); ++ return err; ++} ++ ++IRQCHIP_DECLARE(gic_phyt_2500, "arm,gic-phytium-2500", gic_of_init); ++ ++#ifdef CONFIG_ACPI ++static struct ++{ ++ void __iomem *dist_base; ++ struct redist_region *redist_regs; ++ u32 nr_redist_regions; ++ bool single_redist; ++ u32 maint_irq; ++ int maint_irq_mode; ++ phys_addr_t vcpu_base; ++} acpi_data __initdata; ++ ++static int gic_mars3_sockets_bitmap(void) ++{ ++ unsigned int skt, i; ++ int skt_bitmap = 0; ++ unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; ++ ++ for (i = 0; i < max_t(unsigned int, nr_cpu_ids, NR_CPUS); i++) { ++ skt = (cpu_logical_map(i) >> 16) & 0xff; ++ if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) ++ skt_cpu_cnt[skt]++; ++ else if (skt != 0xff) ++ pr_err("socket address: %d is out of range.", skt); ++ } ++ ++ for (i = 0; i < MAX_MARS3_SOC_COUNT; i++) { ++ if (skt_cpu_cnt[i] > 0) ++ skt_bitmap |= (1 << i); ++ } ++ ++ return skt_bitmap; ++} ++ ++static void __init ++gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) ++{ ++ static int count = 0; ++ ++ acpi_data.redist_regs[count].phys_base = phys_base; ++ acpi_data.redist_regs[count].redist_base = redist_base; ++ acpi_data.redist_regs[count].single_redist = acpi_data.single_redist; ++ count++; ++} ++ ++static int __init ++gic_acpi_parse_madt_redist(struct acpi_subtable_header *header, ++ const unsigned long end) ++{ ++ struct acpi_madt_generic_redistributor *redist = ++ (struct acpi_madt_generic_redistributor *)header; ++ void __iomem *redist_base; ++ ++ redist_base = ioremap(redist->base_address, redist->length); ++ if (!redist_base) { ++ pr_err("Couldn't map GICR region @%llx\n", redist->base_address); ++ return -ENOMEM; ++ } ++ ++ gic_acpi_register_redist(redist->base_address, redist_base); ++ return 0; ++} ++ ++static int __init ++gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header, ++ const unsigned long end) ++{ ++ struct acpi_madt_generic_interrupt *gicc = ++ (struct acpi_madt_generic_interrupt *)header; ++ u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; ++ u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; ++ void __iomem *redist_base; ++ ++ /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */ ++ if (!(gicc->flags & ACPI_MADT_ENABLED)) ++ return 0; ++ ++ redist_base = ioremap(gicc->gicr_base_address, size); ++ if (!redist_base) ++ return -ENOMEM; ++ ++ gic_acpi_register_redist(gicc->gicr_base_address, redist_base); ++ return 0; ++} ++ ++static int __init gic_acpi_collect_gicr_base(void) ++{ ++ acpi_tbl_entry_handler redist_parser; ++ enum acpi_madt_type type; ++ ++ if (acpi_data.single_redist) { ++ type = ACPI_MADT_TYPE_GENERIC_INTERRUPT; ++ redist_parser = gic_acpi_parse_madt_gicc; ++ } else { ++ type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR; ++ redist_parser = gic_acpi_parse_madt_redist; ++ } ++ ++ /* Collect redistributor base addresses in GICR entries */ ++ if (acpi_table_parse_madt(type, redist_parser, 0) > 0) ++ return 0; ++ ++ pr_info("No valid GICR entries exist\n"); ++ return -ENODEV; ++} ++ ++static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header, ++ const unsigned long end) ++{ ++ /* Subtable presence means that redist exists, that's it */ ++ return 0; ++} ++ ++static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header, ++ const unsigned long end) ++{ ++ struct acpi_madt_generic_interrupt *gicc = ++ (struct acpi_madt_generic_interrupt *)header; ++ ++ /* ++ * If GICC is enabled and has valid gicr base address, then it means ++ * GICR base is presented via GICC ++ */ ++ if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) ++ return 0; ++ ++ /* ++ * It's perfectly valid firmware can pass disabled GICC entry, driver ++ * should not treat as errors, skip the entry instead of probe fail. ++ */ ++ if (!(gicc->flags & ACPI_MADT_ENABLED)) ++ return 0; ++ ++ return -ENODEV; ++} ++ ++static int __init gic_acpi_count_gicr_regions(void) ++{ ++ int count; ++ ++ /* ++ * Count how many redistributor regions we have. It is not allowed ++ * to mix redistributor description, GICR and GICC subtables have to be ++ * mutually exclusive. ++ */ ++ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, ++ gic_acpi_match_gicr, 0); ++ if (count > 0) { ++ acpi_data.single_redist = false; ++ return count; ++ } ++ ++ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, ++ gic_acpi_match_gicc, 0); ++ if (count > 0) ++ acpi_data.single_redist = true; ++ ++ return count; ++} ++ ++static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, ++ struct acpi_probe_entry *ape) ++{ ++ struct acpi_madt_generic_distributor *dist; ++ int count; ++ ++ dist = (struct acpi_madt_generic_distributor *)header; ++ if (dist->version != ape->driver_data) ++ return false; ++ ++ /* We need to do that exercise anyway, the sooner the better */ ++ count = gic_acpi_count_gicr_regions(); ++ if (count <= 0) ++ return false; ++ ++ acpi_data.nr_redist_regions = count; ++ return true; ++} ++ ++static int __init gic_acpi_parse_virt_madt_gicc(struct acpi_subtable_header *header, ++ const unsigned long end) ++{ ++ struct acpi_madt_generic_interrupt *gicc = ++ (struct acpi_madt_generic_interrupt *)header; ++ int maint_irq_mode; ++ static int first_madt = true; ++ ++ /* Skip unusable CPUs */ ++ if (!(gicc->flags & ACPI_MADT_ENABLED)) ++ return 0; ++ ++ maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ? ++ ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; ++ ++ if (first_madt) { ++ first_madt = false; ++ ++ acpi_data.maint_irq = gicc->vgic_interrupt; ++ acpi_data.maint_irq_mode = maint_irq_mode; ++ acpi_data.vcpu_base = gicc->gicv_base_address; ++ ++ return 0; ++ } ++ ++ /* ++ * The maintenance interrupt and GICV should be the same for every CPU ++ */ ++ if ((acpi_data.maint_irq != gicc->vgic_interrupt) || ++ (acpi_data.maint_irq_mode != maint_irq_mode) || ++ (acpi_data.vcpu_base != gicc->gicv_base_address)) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++static bool __init gic_acpi_collect_virt_info(void) ++{ ++ int count; ++ ++ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, ++ gic_acpi_parse_virt_madt_gicc, 0); ++ ++ return (count > 0); ++} ++ ++#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K) ++#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K) ++#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K) ++ ++static void __init gic_acpi_setup_kvm_info(void) ++{ ++ int irq; ++ ++ if (!gic_acpi_collect_virt_info()) { ++ pr_warn("Unable to get hardware information used for virtualization\n"); ++ return; ++ } ++ ++ gic_v3_kvm_info.type = GIC_V3; ++ ++ irq = acpi_register_gsi(NULL, acpi_data.maint_irq, ++ acpi_data.maint_irq_mode, ++ ACPI_ACTIVE_HIGH); ++ if (irq <= 0) ++ return; ++ ++ gic_v3_kvm_info.maint_irq = irq; ++ ++ if (acpi_data.vcpu_base) { ++ struct resource *vcpu = &gic_v3_kvm_info.vcpu; ++ ++ vcpu->flags = IORESOURCE_MEM; ++ vcpu->start = acpi_data.vcpu_base; ++ vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; ++ } ++ ++ gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; ++ gic_set_kvm_info(&gic_v3_kvm_info); ++} ++ ++static int __init ++gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) ++{ ++ struct acpi_madt_generic_distributor *dist; ++ struct fwnode_handle *domain_handle; ++ size_t size; ++ int i, err, skt; ++ ++ /* Get distributor base address */ ++ dist = (struct acpi_madt_generic_distributor *)header; ++ acpi_data.dist_base = ioremap(dist->base_address, ++ ACPI_GICV3_DIST_MEM_SIZE); ++ if (!acpi_data.dist_base) { ++ pr_err("Unable to map GICD registers\n"); ++ return -ENOMEM; ++ } ++ ++ err = gic_validate_dist_version(acpi_data.dist_base); ++ if (err) { ++ pr_err("No distributor detected at @%p, giving up\n", ++ acpi_data.dist_base); ++ goto out_dist_unmap; ++ } ++ ++ mars3_gic_dists[0].phys_base = dist->base_address; ++ mars3_gic_dists[0].size = ACPI_GICV3_DIST_MEM_SIZE; ++ mars3_gic_dists[0].dist_base = acpi_data.dist_base; ++ ++ mars3_sockets_bitmap = gic_mars3_sockets_bitmap(); ++ if (mars3_sockets_bitmap == 0) { ++ mars3_sockets_bitmap = 0x1; ++ pr_err("No socket, please check cpus MPIDR_AFFINITY_LEVEL!"); ++ } else ++ pr_info("mars3_sockets_bitmap = 0x%x\n", mars3_sockets_bitmap); ++ ++ for (skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { ++ if ((((unsigned int)1 << skt) & mars3_sockets_bitmap) == 0) ++ continue; ++ ++ mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | ++ mars3_gic_dists[0].phys_base; ++ mars3_gic_dists[skt].size = mars3_gic_dists[0].size; ++ mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, ++ mars3_gic_dists[skt].size); ++ } ++ ++ size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions; ++ acpi_data.redist_regs = kzalloc(size, GFP_KERNEL); ++ if (!acpi_data.redist_regs) { ++ err = -ENOMEM; ++ goto out_dist_unmap; ++ } ++ ++ err = gic_acpi_collect_gicr_base(); ++ if (err) ++ goto out_redist_unmap; ++ ++ domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base); ++ if (!domain_handle) { ++ err = -ENOMEM; ++ goto out_redist_unmap; ++ } ++ ++ err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs, ++ acpi_data.nr_redist_regions, 0, domain_handle); ++ if (err) ++ goto out_fwhandle_free; ++ ++ acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); ++ ++ if (static_branch_likely(&supports_deactivate_key)) ++ gic_acpi_setup_kvm_info(); ++ ++ return 0; ++ ++out_fwhandle_free: ++ irq_domain_free_fwnode(domain_handle); ++out_redist_unmap: ++ for (i = 0; i < acpi_data.nr_redist_regions; i++) ++ if (acpi_data.redist_regs[i].redist_base) ++ iounmap(acpi_data.redist_regs[i].redist_base); ++ kfree(acpi_data.redist_regs); ++out_dist_unmap: ++ iounmap(acpi_data.dist_base); ++ return err; ++} ++IRQCHIP_ACPI_DECLARE(gic_phyt_2500, ACPI_MADT_TYPE_PHYTIUM_2500, ++ acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3, ++ gic_acpi_init); ++#endif +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c +index c2df341ff6fa..c21b8b8cd934 100644 +--- a/drivers/irqchip/irq-gic-v3-its.c ++++ b/drivers/irqchip/irq-gic-v3-its.c +@@ -19,13 +19,16 @@ + #include + #include + #include ++#include + #include + #include ++#include + #include + #include + #include + #include + #include ++#include + #include + #include + #include +@@ -52,6 +55,7 @@ + #define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3) + + #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) ++#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) + + static u32 lpi_id_bits; + +@@ -161,6 +165,13 @@ static struct { + int next_victim; + } vpe_proxy; + ++struct cpu_lpi_count { ++ atomic_t managed; ++ atomic_t unmanaged; ++}; ++ ++static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count); ++ + static LIST_HEAD(its_nodes); + static DEFINE_RAW_SPINLOCK(its_lock); + static struct rdists *gic_rdists; +@@ -173,6 +184,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock); + static DEFINE_IDA(its_vpeid_ida); + + #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) ++#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) + #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) + #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) + +@@ -1028,7 +1040,7 @@ static inline u32 its_get_event_id(struct irq_data *d) + static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) + { + irq_hw_number_t hwirq; +- struct page *prop_page; ++ void *va; + u8 *cfg; + + if (irqd_is_forwarded_to_vcpu(d)) { +@@ -1036,7 +1048,7 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) + u32 event = its_get_event_id(d); + struct its_vlpi_map *map; + +- prop_page = its_dev->event_map.vm->vprop_page; ++ va = page_address(its_dev->event_map.vm->vprop_page); + map = &its_dev->event_map.vlpi_maps[event]; + hwirq = map->vintid; + +@@ -1044,11 +1056,11 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) + map->properties &= ~clr; + map->properties |= set | LPI_PROP_GROUP1; + } else { +- prop_page = gic_rdists->prop_page; ++ va = gic_rdists->prop_table_va; + hwirq = d->hwirq; + } + +- cfg = page_address(prop_page) + hwirq - 8192; ++ cfg = va + hwirq - 8192; + *cfg &= ~clr; + *cfg |= set | LPI_PROP_GROUP1; + +@@ -1110,42 +1122,159 @@ static void its_unmask_irq(struct irq_data *d) + lpi_update_config(d, 0, LPI_PROP_ENABLED); + } + ++static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu) ++{ ++ if (irqd_affinity_is_managed(d)) ++ return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); ++ ++ return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); ++} ++ ++static void its_inc_lpi_count(struct irq_data *d, int cpu) ++{ ++ if (irqd_affinity_is_managed(d)) ++ atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); ++ else ++ atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); ++} ++ ++static void its_dec_lpi_count(struct irq_data *d, int cpu) ++{ ++ if (irqd_affinity_is_managed(d)) ++ atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); ++ else ++ atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); ++} ++ ++static unsigned int cpumask_pick_least_loaded(struct irq_data *d, ++ const struct cpumask *cpu_mask) ++{ ++ unsigned int cpu = nr_cpu_ids, tmp; ++ int count = S32_MAX; ++ ++ for_each_cpu(tmp, cpu_mask) { ++ int this_count = its_read_lpi_count(d, tmp); ++ if (this_count < count) { ++ cpu = tmp; ++ count = this_count; ++ } ++ } ++ ++ return cpu; ++} ++ ++/* ++ * As suggested by Thomas Gleixner in: ++ * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de ++ */ ++static int its_select_cpu(struct irq_data *d, ++ const struct cpumask *aff_mask) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ cpumask_var_t tmpmask; ++ int cpu, node; ++ ++ if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC)) ++ return -ENOMEM; ++ ++ node = its_dev->its->numa_node; ++ ++ if (!irqd_affinity_is_managed(d)) { ++ /* First try the NUMA node */ ++ if (node != NUMA_NO_NODE) { ++ /* ++ * Try the intersection of the affinity mask and the ++ * node mask (and the online mask, just to be safe). ++ */ ++ cpumask_and(tmpmask, cpumask_of_node(node), aff_mask); ++ cpumask_and(tmpmask, tmpmask, cpu_online_mask); ++ ++ /* ++ * Ideally, we would check if the mask is empty, and ++ * try again on the full node here. ++ * ++ * But it turns out that the way ACPI describes the ++ * affinity for ITSs only deals about memory, and ++ * not target CPUs, so it cannot describe a single ++ * ITS placed next to two NUMA nodes. ++ * ++ * Instead, just fallback on the online mask. This ++ * diverges from Thomas' suggestion above. ++ */ ++ cpu = cpumask_pick_least_loaded(d, tmpmask); ++ if (cpu < nr_cpu_ids) ++ goto out; ++ ++ /* If we can't cross sockets, give up */ ++ if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)) ++ goto out; ++ ++ /* If the above failed, expand the search */ ++ } ++ ++ /* Try the intersection of the affinity and online masks */ ++ cpumask_and(tmpmask, aff_mask, cpu_online_mask); ++ ++ /* If that doesn't fly, the online mask is the last resort */ ++ if (cpumask_empty(tmpmask)) ++ cpumask_copy(tmpmask, cpu_online_mask); ++ ++ cpu = cpumask_pick_least_loaded(d, tmpmask); ++ } else { ++ cpumask_and(tmpmask, irq_data_get_affinity_mask(d), cpu_online_mask); ++ ++ /* If we cannot cross sockets, limit the search to that node */ ++ if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && ++ node != NUMA_NO_NODE) ++ cpumask_and(tmpmask, tmpmask, cpumask_of_node(node)); ++ ++ cpu = cpumask_pick_least_loaded(d, tmpmask); ++ } ++out: ++ free_cpumask_var(tmpmask); ++ ++ pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu); ++ return cpu; ++} ++ + static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) + { +- unsigned int cpu; +- const struct cpumask *cpu_mask = cpu_online_mask; + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_collection *target_col; + u32 id = its_get_event_id(d); ++ int cpu, prev_cpu; + + /* A forwarded interrupt should use irq_set_vcpu_affinity */ + if (irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; + +- /* lpi cannot be routed to a redistributor that is on a foreign node */ +- if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { +- if (its_dev->its->numa_node >= 0) { +- cpu_mask = cpumask_of_node(its_dev->its->numa_node); +- if (!cpumask_intersects(mask_val, cpu_mask)) +- return -EINVAL; +- } +- } ++ prev_cpu = its_dev->event_map.col_map[id]; ++ its_dec_lpi_count(d, prev_cpu); + +- cpu = cpumask_any_and(mask_val, cpu_mask); ++ if (!force) ++ cpu = its_select_cpu(d, mask_val); ++ else ++ cpu = cpumask_pick_least_loaded(d, mask_val); + +- if (cpu >= nr_cpu_ids) +- return -EINVAL; ++ if (cpu < 0 || cpu >= nr_cpu_ids) ++ goto err; + + /* don't set the affinity when the target cpu is same as current one */ +- if (cpu != its_dev->event_map.col_map[id]) { ++ if (cpu != prev_cpu) { + target_col = &its_dev->its->collections[cpu]; + its_send_movi(its_dev, target_col, id); + its_dev->event_map.col_map[id] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + } + ++ its_inc_lpi_count(d, cpu); ++ + return IRQ_SET_MASK_OK_DONE; ++ ++err: ++ its_inc_lpi_count(d, prev_cpu); ++ return -EINVAL; + } + + static u64 its_irq_get_msi_base(struct its_device *its_dev) +@@ -1168,7 +1297,8 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) + msg->address_hi = upper_32_bits(addr); + msg->data = its_get_event_id(d); + +- iommu_dma_map_msi_msg(d->irq, msg); ++ if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) != MIDR_PHYTIUM_FT2000PLUS) ++ iommu_dma_map_msi_msg(d->irq, msg); + } + + static int its_irq_set_irqchip_state(struct irq_data *d, +@@ -1412,6 +1542,11 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) + } + } + ++static int its_irq_retrigger(struct irq_data *d) ++{ ++ return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); ++} ++ + static struct irq_chip its_irq_chip = { + .name = "ITS", + .irq_mask = its_mask_irq, +@@ -1420,6 +1555,7 @@ static struct irq_chip its_irq_chip = { + .irq_set_affinity = its_set_affinity, + .irq_compose_msi_msg = its_irq_compose_msi_msg, + .irq_set_irqchip_state = its_irq_set_irqchip_state, ++ .irq_retrigger = its_irq_retrigger, + .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, + }; + +@@ -1597,6 +1733,15 @@ static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) + kfree(bitmap); + } + ++static void gic_reset_prop_table(void *va) ++{ ++ /* Priority 0xa0, Group-1, disabled */ ++ memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); ++ ++ /* Make sure the GIC will observe the written configuration */ ++ gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); ++} ++ + static struct page *its_allocate_prop_table(gfp_t gfp_flags) + { + struct page *prop_page; +@@ -1605,13 +1750,7 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags) + if (!prop_page) + return NULL; + +- /* Priority 0xa0, Group-1, disabled */ +- memset(page_address(prop_page), +- LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, +- LPI_PROPBASE_SZ); +- +- /* Make sure the GIC will observe the written configuration */ +- gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ); ++ gic_reset_prop_table(page_address(prop_page)); + + return prop_page; + } +@@ -1622,20 +1761,74 @@ static void its_free_prop_table(struct page *prop_page) + get_order(LPI_PROPBASE_SZ)); + } + +-static int __init its_alloc_lpi_tables(void) ++static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) + { +- phys_addr_t paddr; ++ phys_addr_t start, end, addr_end; ++ u64 i; + +- lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), +- ITS_MAX_LPI_NRBITS); +- gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); +- if (!gic_rdists->prop_page) { +- pr_err("Failed to allocate PROPBASE\n"); +- return -ENOMEM; ++ /* ++ * We don't bother checking for a kdump kernel as by ++ * construction, the LPI tables are out of this kernel's ++ * memory map. ++ */ ++ if (is_kdump_kernel()) ++ return true; ++ ++ addr_end = addr + size - 1; ++ ++ for_each_reserved_mem_region(i, &start, &end) { ++ if (addr >= start && addr_end <= end) ++ return true; + } + +- paddr = page_to_phys(gic_rdists->prop_page); +- pr_info("GIC: using LPI property table @%pa\n", &paddr); ++ /* Not found, not a good sign... */ ++ pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n", ++ &addr, &addr_end); ++ add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); ++ return false; ++} ++ ++static int gic_reserve_range(phys_addr_t addr, unsigned long size) ++{ ++ if (efi_enabled(EFI_CONFIG_TABLES)) ++ return efi_mem_reserve_persistent(addr, size); ++ ++ return 0; ++} ++ ++static int __init its_setup_lpi_prop_table(void) ++{ ++ if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { ++ u64 val; ++ ++ val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); ++ lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1; ++ ++ gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); ++ gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa, ++ LPI_PROPBASE_SZ, ++ MEMREMAP_WB); ++ gic_reset_prop_table(gic_rdists->prop_table_va); ++ } else { ++ struct page *page; ++ ++ lpi_id_bits = min_t(u32, ++ GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), ++ ITS_MAX_LPI_NRBITS); ++ page = its_allocate_prop_table(GFP_NOWAIT); ++ if (!page) { ++ pr_err("Failed to allocate PROPBASE\n"); ++ return -ENOMEM; ++ } ++ ++ gic_rdists->prop_table_pa = page_to_phys(page); ++ gic_rdists->prop_table_va = page_address(page); ++ WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa, ++ LPI_PROPBASE_SZ)); ++ } ++ ++ pr_info("GICv3: using LPI property table @%pa\n", ++ &gic_rdists->prop_table_pa); + + return its_lpi_init(lpi_id_bits); + } +@@ -1884,6 +2077,8 @@ static int its_alloc_tables(struct its_node *its) + indirect = its_parse_indirect_baser(its, baser, + psz, &order, + its->device_ids); ++ break; ++ + case GITS_BASER_TYPE_VCPU: + indirect = its_parse_indirect_baser(its, baser, + psz, &order, +@@ -1924,12 +2119,9 @@ static int its_alloc_collections(struct its_node *its) + static struct page *its_allocate_pending_table(gfp_t gfp_flags) + { + struct page *pend_page; +- /* +- * The pending pages have to be at least 64kB aligned, +- * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below. +- */ ++ + pend_page = alloc_pages(gfp_flags | __GFP_ZERO, +- get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); ++ get_order(LPI_PENDBASE_SZ)); + if (!pend_page) + return NULL; + +@@ -1941,36 +2133,103 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags) + + static void its_free_pending_table(struct page *pt) + { +- free_pages((unsigned long)page_address(pt), +- get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); ++ free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); ++} ++ ++/* ++ * Booting with kdump and LPIs enabled is generally fine. Any other ++ * case is wrong in the absence of firmware/EFI support. ++ */ ++static bool enabled_lpis_allowed(void) ++{ ++ phys_addr_t addr; ++ u64 val; ++ ++ /* Check whether the property table is in a reserved region */ ++ val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); ++ addr = val & GENMASK_ULL(51, 12); ++ ++ return gic_check_reserved_range(addr, LPI_PROPBASE_SZ); ++} ++ ++static int __init allocate_lpi_tables(void) ++{ ++ u64 val; ++ int err, cpu; ++ ++ /* ++ * If LPIs are enabled while we run this from the boot CPU, ++ * flag the RD tables as pre-allocated if the stars do align. ++ */ ++ val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR); ++ if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) { ++ gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | ++ RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING); ++ pr_info("GICv3: Using preallocated redistributor tables\n"); ++ } ++ ++ err = its_setup_lpi_prop_table(); ++ if (err) ++ return err; ++ ++ /* ++ * We allocate all the pending tables anyway, as we may have a ++ * mix of RDs that have had LPIs enabled, and some that ++ * don't. We'll free the unused ones as each CPU comes online. ++ */ ++ for_each_possible_cpu(cpu) { ++ struct page *pend_page; ++ ++ pend_page = its_allocate_pending_table(GFP_NOWAIT); ++ if (!pend_page) { ++ pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); ++ return -ENOMEM; ++ } ++ ++ gic_data_rdist_cpu(cpu)->pend_page = pend_page; ++ } ++ ++ return 0; + } + + static void its_cpu_init_lpis(void) + { + void __iomem *rbase = gic_data_rdist_rd_base(); + struct page *pend_page; ++ phys_addr_t paddr; + u64 val, tmp; + +- /* If we didn't allocate the pending table yet, do it now */ +- pend_page = gic_data_rdist()->pend_page; +- if (!pend_page) { +- phys_addr_t paddr; ++ if (gic_data_rdist()->lpi_enabled) ++ return; + +- pend_page = its_allocate_pending_table(GFP_NOWAIT); +- if (!pend_page) { +- pr_err("Failed to allocate PENDBASE for CPU%d\n", +- smp_processor_id()); +- return; +- } ++ val = readl_relaxed(rbase + GICR_CTLR); ++ if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && ++ (val & GICR_CTLR_ENABLE_LPIS)) { ++ /* ++ * Check that we get the same property table on all ++ * RDs. If we don't, this is hopeless. ++ */ ++ paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); ++ paddr &= GENMASK_ULL(51, 12); ++ if (WARN_ON(gic_rdists->prop_table_pa != paddr)) ++ add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); ++ ++ paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); ++ paddr &= GENMASK_ULL(51, 16); + +- paddr = page_to_phys(pend_page); +- pr_info("CPU%d: using LPI pending table @%pa\n", +- smp_processor_id(), &paddr); +- gic_data_rdist()->pend_page = pend_page; ++ WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); ++ its_free_pending_table(gic_data_rdist()->pend_page); ++ gic_data_rdist()->pend_page = NULL; ++ ++ goto out; + } + ++ pend_page = gic_data_rdist()->pend_page; ++ paddr = page_to_phys(pend_page); ++ WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); ++ + /* set PROPBASE */ +- val = (page_to_phys(gic_rdists->prop_page) | ++ val = (gic_rdists->prop_table_pa | + GICR_PROPBASER_InnerShareable | + GICR_PROPBASER_RaWaWb | + ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); +@@ -2020,6 +2279,12 @@ static void its_cpu_init_lpis(void) + + /* Make sure the GIC has seen the above */ + dsb(sy); ++out: ++ gic_data_rdist()->lpi_enabled = true; ++ pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n", ++ smp_processor_id(), ++ gic_data_rdist()->pend_page ? "allocated" : "reserved", ++ &paddr); + } + + static void its_cpu_init_collection(struct its_node *its) +@@ -2394,22 +2659,13 @@ static int its_irq_domain_activate(struct irq_domain *domain, + { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); +- const struct cpumask *cpu_mask = cpu_online_mask; + int cpu; + +- /* get the cpu_mask of local node */ +- if (its_dev->its->numa_node >= 0) +- cpu_mask = cpumask_of_node(its_dev->its->numa_node); +- +- /* Bind the LPI to the first possible CPU */ +- cpu = cpumask_first_and(cpu_mask, cpu_online_mask); +- if (cpu >= nr_cpu_ids) { +- if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) +- return -EINVAL; +- +- cpu = cpumask_first(cpu_online_mask); +- } ++ cpu = its_select_cpu(d, cpu_online_mask); ++ if (cpu < 0 || cpu >= nr_cpu_ids) ++ return -EINVAL; + ++ its_inc_lpi_count(d, cpu); + its_dev->event_map.col_map[event] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + +@@ -2424,6 +2680,7 @@ static void its_irq_domain_deactivate(struct irq_domain *domain, + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + ++ its_dec_lpi_count(d, its_dev->event_map.col_map[event]); + /* Stop the delivery of interrupts */ + its_send_discard(its_dev, event); + } +@@ -3498,16 +3755,6 @@ static int redist_disable_lpis(void) + u64 timeout = USEC_PER_SEC; + u64 val; + +- /* +- * If coming via a CPU hotplug event, we don't need to disable +- * LPIs before trying to re-enable them. They are already +- * configured and all is well in the world. Detect this case +- * by checking the allocation of the pending table for the +- * current CPU. +- */ +- if (gic_data_rdist()->pend_page) +- return 0; +- + if (!gic_rdists_supports_plpis()) { + pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); + return -ENXIO; +@@ -3517,7 +3764,21 @@ static int redist_disable_lpis(void) + if (!(val & GICR_CTLR_ENABLE_LPIS)) + return 0; + +- pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n", ++ /* ++ * If coming via a CPU hotplug event, we don't need to disable ++ * LPIs before trying to re-enable them. They are already ++ * configured and all is well in the world. ++ * ++ * If running with preallocated tables, there is nothing to do. ++ */ ++ if (gic_data_rdist()->lpi_enabled || ++ (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) ++ return 0; ++ ++ /* ++ * From that point on, we only try to do some damage control. ++ */ ++ pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", + smp_processor_id()); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + +@@ -3773,7 +4034,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, + } + + gic_rdists = rdists; +- err = its_alloc_lpi_tables(); ++ ++ err = allocate_lpi_tables(); + if (err) + return err; + +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c +index d5912f1ec884..c37bbb719a3c 100644 +--- a/drivers/irqchip/irq-gic-v3.c ++++ b/drivers/irqchip/irq-gic-v3.c +@@ -653,7 +653,9 @@ early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); + + static int gic_dist_supports_lpis(void) + { +- return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && !gicv3_nolpi; ++ return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && ++ !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && ++ !gicv3_nolpi); + } + + static void gic_cpu_init(void) +@@ -673,10 +675,6 @@ static void gic_cpu_init(void) + + gic_cpu_config(rbase, gic_redist_wait_for_rwp); + +- /* Give LPIs a spin */ +- if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) +- its_cpu_init(); +- + /* initialise system registers */ + gic_cpu_sys_reg_init(); + } +@@ -689,6 +687,10 @@ static void gic_cpu_init(void) + static int gic_starting_cpu(unsigned int cpu) + { + gic_cpu_init(); ++ ++ if (gic_dist_supports_lpis()) ++ its_cpu_init(); ++ + return 0; + } + +@@ -818,6 +820,11 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + #define gic_smp_init() do { } while(0) + #endif + ++static int gic_retrigger(struct irq_data *data) ++{ ++ return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); ++} ++ + #ifdef CONFIG_CPU_PM + /* Check whether it's single security state view */ + static bool gic_dist_security_disabled(void) +@@ -859,6 +866,7 @@ static struct irq_chip gic_chip = { + .irq_eoi = gic_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, ++ .irq_retrigger = gic_retrigger, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .flags = IRQCHIP_SET_TYPE_MASKED | +@@ -873,6 +881,7 @@ static struct irq_chip gic_eoimode1_chip = { + .irq_eoi = gic_eoimode1_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, ++ .irq_retrigger = gic_retrigger, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, +@@ -887,6 +896,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) + { + struct irq_chip *chip = &gic_chip; ++ struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); + + if (static_branch_likely(&supports_deactivate_key)) + chip = &gic_eoimode1_chip; +@@ -913,7 +923,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + irq_set_probe(irq); +- irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); ++ irqd_set_single_target(irqd); + } + /* LPIs */ + if (hw >= 8192 && hw < GIC_ID_NR) { +@@ -923,6 +933,8 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + handle_fasteoi_irq, NULL, NULL); + } + ++ /* Prevents SW retriggers which mess up the ACK/EOI ordering */ ++ irqd_set_handle_enforce_irqctx(irqd); + return 0; + } + +@@ -1127,14 +1139,16 @@ static int __init gic_init_bases(void __iomem *dist_base, + + gic_update_vlpi_properties(); + +- if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) +- its_init(handle, &gic_data.rdists, gic_data.domain); +- + gic_smp_init(); + gic_dist_init(); + gic_cpu_init(); + gic_cpu_pm_init(); + ++ if (gic_dist_supports_lpis()) { ++ its_init(handle, &gic_data.rdists, gic_data.domain); ++ its_cpu_init(); ++ } ++ + return 0; + + out_free: +diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c +index ced10c44b68a..76e61392c3df 100644 +--- a/drivers/irqchip/irq-gic.c ++++ b/drivers/irqchip/irq-gic.c +@@ -350,6 +350,11 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + } + #endif + ++static int gic_retrigger(struct irq_data *data) ++{ ++ return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); ++} ++ + static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) + { + u32 irqstat, irqnr; +@@ -420,6 +425,7 @@ static const struct irq_chip gic_chip = { + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoi_irq, + .irq_set_type = gic_set_type, ++ .irq_retrigger = gic_retrigger, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .flags = IRQCHIP_SET_TYPE_MASKED | +@@ -972,6 +978,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) + { + struct gic_chip_data *gic = d->host_data; ++ struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); + + if (hw < 32) { + irq_set_percpu_devid(irq); +@@ -982,8 +989,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + irq_set_probe(irq); +- irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); ++ irqd_set_single_target(irqd); + } ++ ++ /* Prevents SW retriggers which mess up the ACK/EOI ordering */ ++ irqd_set_handle_enforce_irqctx(irqd); + return 0; + } + +diff --git a/drivers/irqchip/irq-phytium-ixic.c b/drivers/irqchip/irq-phytium-ixic.c +new file mode 100644 +index 000000000000..dd3d755b7b15 +--- /dev/null ++++ b/drivers/irqchip/irq-phytium-ixic.c +@@ -0,0 +1,267 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Driver for Phytium D2000 PCIe legacy INTx interrupt controller ++ * ++ * Copyright (c) 2020 Phytium Technology Co., Ltd. ++ * ++ * Author: Chen Baozi ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#define NUM_IRQS 4 ++ ++#define CTR_BANK_NUM 6 ++#define CTR_BANK_SIZE 0x10000 ++#define CTR_BANK_ISTATUS_LOCAL 0x184 ++ ++#define HPB_INTX_STATUS_0 0x0 ++#define HPB_INTX_STATUS_1 0x1000 ++ ++struct ixic_irq_data { ++ void __iomem *ctr; ++ void __iomem *hpb; ++ u32 spi_base; ++}; ++ ++static void phytium_ixic_irq_eoi(struct irq_data *d) ++{ ++ struct ixic_irq_data *data = irq_data_get_irq_chip_data(d); ++ unsigned int intx = irqd_to_hwirq(d); ++ u32 gstatus = readl(data->hpb) | (readl(data->hpb + HPB_INTX_STATUS_1) << 12); ++ u32 imask, istatus; ++ int i; ++ ++ WARN_ON(intx >= NUM_IRQS); ++ imask = 1 << (3 - intx); ++ istatus = (1 << intx) << 24; ++ for (i = 0; i < CTR_BANK_NUM; i++, gstatus >>= 4) { ++ if (gstatus & imask) ++ writel(istatus, data->ctr + CTR_BANK_SIZE*i + CTR_BANK_ISTATUS_LOCAL); ++ } ++ ++ irq_chip_eoi_parent(d); ++} ++ ++static struct irq_chip phytium_ixic_irq_chip = { ++ .name = "IXIU", ++ .irq_eoi = phytium_ixic_irq_eoi, ++ .irq_mask = irq_chip_mask_parent, ++ .irq_unmask = irq_chip_unmask_parent, ++ .irq_set_type = irq_chip_set_type_parent, ++ .irq_set_affinity = irq_chip_set_affinity_parent, ++ .flags = IRQCHIP_MASK_ON_SUSPEND, ++}; ++ ++static int phytium_ixic_translate(struct irq_domain *domain, ++ struct irq_fwspec *fwspec, ++ unsigned long *hwirq, ++ unsigned int *type) ++{ ++ struct ixic_irq_data *info = domain->host_data; ++ ++ if (is_of_node(fwspec->fwnode)) { ++ if (fwspec->param_count != 3) ++ return -EINVAL; ++ ++ if (fwspec->param[0] != GIC_SPI) ++ return -EINVAL; /* No PPI should point to this domain */ ++ ++ *hwirq = fwspec->param[1] - info->spi_base; ++ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; ++ } else { ++ if (fwspec->param_count != 2) ++ return -EINVAL; ++ *hwirq = fwspec->param[0] - info->spi_base; ++ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; ++ } ++ ++ return 0; ++} ++ ++static int phytium_ixic_alloc(struct irq_domain *dom, unsigned int virq, ++ unsigned int nr_irqs, void *data) ++{ ++ struct irq_fwspec *fwspec = data; ++ struct irq_fwspec parent_fwspec; ++ struct ixic_irq_data *info = dom->host_data; ++ irq_hw_number_t hwirq; ++ ++ /* We assume the device use the parent's format directly */ ++ parent_fwspec = *fwspec; ++ if (is_of_node(dom->parent->fwnode)) { ++ if (fwspec->param_count != 3) ++ return -EINVAL; /* Not GIC compliant */ ++ if (fwspec->param[0] != GIC_SPI) ++ return -EINVAL; /* No PPI should point to this domain */ ++ ++ /* Get the local hwirq of IXIC */ ++ hwirq = fwspec->param[1] - info->spi_base; ++ } else { ++ hwirq = fwspec->param[0] - info->spi_base; ++ } ++ WARN_ON(nr_irqs != 1); ++ irq_domain_set_hwirq_and_chip(dom, virq, hwirq, &phytium_ixic_irq_chip, info); ++ ++ parent_fwspec.fwnode = dom->parent->fwnode; ++ return irq_domain_alloc_irqs_parent(dom, virq, nr_irqs, &parent_fwspec); ++} ++ ++static const struct irq_domain_ops ixic_domain_ops = { ++ .translate = phytium_ixic_translate, ++ .alloc = phytium_ixic_alloc, ++ .free = irq_domain_free_irqs_common, ++}; ++ ++static struct ixic_irq_data *phytium_ixic_init(const struct fwnode_handle *fwnode, ++ struct resource *ctr, struct resource *hpb) ++{ ++ struct ixic_irq_data *data; ++ int err; ++ ++ data = kzalloc(sizeof(*data), GFP_KERNEL); ++ if (!data) ++ return ERR_PTR(-ENOMEM); ++ ++ if (fwnode_property_read_u32_array(fwnode, "intx-spi-base", ++ &data->spi_base, 1)) { ++ err = -ENODEV; ++ goto out_free; ++ } ++ ++ data->ctr = ioremap(ctr->start, resource_size(ctr)); ++ if (!data->ctr) { ++ err = -ENODEV; ++ goto out_free; ++ } ++ ++ data->hpb = ioremap(hpb->start, resource_size(hpb)); ++ if (!data->hpb) { ++ err = -ENODEV; ++ goto out_free; ++ } ++ ++ return data; ++ ++out_free: ++ kfree(data); ++ return ERR_PTR(err); ++} ++ ++static int __init phytium_ixic_dt_init(struct device_node *node, ++ struct device_node *parent) ++{ ++ struct irq_domain *pd, *d; ++ struct ixic_irq_data *data; ++ struct resource ctr, hpb; ++ ++ if (!parent) { ++ pr_err("%pOF: no parent, giving up\n", node); ++ return -ENODEV; ++ } ++ ++ pd = irq_find_host(parent); ++ if (!pd) { ++ pr_err("%pOF: unable to obtain parent domain\n", node); ++ return -ENXIO; ++ } ++ ++ if (of_address_to_resource(node, 0, &ctr)) { ++ pr_err("%pOF: failed to parse 'ctr' memory resource\n", node); ++ return -ENXIO; ++ } ++ ++ if (of_address_to_resource(node, 1, &hpb)) { ++ pr_err("%pOF: failed to parse 'hpb' memory resource\n", node); ++ return -ENXIO; ++ } ++ ++ data = phytium_ixic_init(of_node_to_fwnode(node), &ctr, &hpb); ++ if (IS_ERR(data)) ++ return PTR_ERR(data); ++ ++ d = irq_domain_add_hierarchy(pd, 0, NUM_IRQS, node, &ixic_domain_ops, data); ++ if (!d) { ++ pr_err("%pOF: failed to allocate domain\n", node); ++ goto out_unmap; ++ } ++ ++ pr_info("%pOF: %d interrupts forwarded to %pOF\n", node, NUM_IRQS, parent); ++ ++ return 0; ++ ++out_unmap: ++ iounmap(data->ctr); ++ iounmap(data->hpb); ++ kfree(data); ++ return -ENOMEM; ++} ++IRQCHIP_DECLARE(d2000_ixic, "phytium,d2000-ixic", phytium_ixic_dt_init); ++IRQCHIP_DECLARE(ft2004c_ixic, "phytium,ft2004c-ixic", phytium_ixic_dt_init); ++ ++#ifdef CONFIG_ACPI ++static int phytium_ixic_acpi_probe(struct platform_device *pdev) ++{ ++ struct irq_domain *domain; ++ struct ixic_irq_data *data; ++ struct resource *ctr, *hpb; ++ ++ ctr = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!ctr) { ++ dev_err(&pdev->dev, "failed to parse 'ctr' memory resource\n"); ++ return -ENXIO; ++ } ++ ++ hpb = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ if (!hpb) { ++ dev_err(&pdev->dev, "failed to parse 'hpb' memory resource\n"); ++ return -ENXIO; ++ } ++ ++ data = phytium_ixic_init(dev_fwnode(&pdev->dev), ctr, hpb); ++ if (IS_ERR(data)) ++ return PTR_ERR(data); ++ ++ domain = acpi_irq_create_hierarchy(0, NUM_IRQS, dev_fwnode(&pdev->dev), ++ &ixic_domain_ops, data); ++ if (!domain) { ++ dev_err(&pdev->dev, "failed to create IRQ domain\n"); ++ goto out_unmap; ++ } ++ ++ dev_info(&pdev->dev, "%d interrupts forwarded\n", NUM_IRQS); ++ ++ return 0; ++ ++out_unmap: ++ iounmap(data->ctr); ++ iounmap(data->hpb); ++ kfree(data); ++ return -ENOMEM; ++} ++ ++static const struct acpi_device_id phytium_ixic_acpi_ids[] = { ++ { "PHYT0013" }, ++ { /* sentinel */ } ++}; ++MODULE_DEVICE_TABLE(acpi, phytium_ixic_acpi_ids); ++ ++static struct platform_driver phytium_ixic_driver = { ++ .driver = { ++ .name = "phytium-ixic", ++ .acpi_match_table = phytium_ixic_acpi_ids, ++ }, ++ .probe = phytium_ixic_acpi_probe, ++}; ++builtin_platform_driver(phytium_ixic_driver); ++#endif +diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig +index 841c005d8ebb..461e2397dc88 100644 +--- a/drivers/mailbox/Kconfig ++++ b/drivers/mailbox/Kconfig +@@ -21,6 +21,12 @@ config IMX_MBOX + help + Mailbox implementation for i.MX Messaging Unit (MU). + ++config PHYTIUM_MBOX ++ tristate "Phytium SoC Mailbox Support" ++ depends on ARCH_PHYTIUM || COMPILE_TEST ++ help ++ This driver provides the support for the Phytium mailbox controller. ++ + config PLATFORM_MHU + tristate "Platform MHU Mailbox" + depends on OF +diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile +index c818b5d011ae..de3cbe3ffa44 100644 +--- a/drivers/mailbox/Makefile ++++ b/drivers/mailbox/Makefile +@@ -9,6 +9,8 @@ obj-$(CONFIG_ARM_MHU) += arm_mhu.o + + obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o + ++obj-$(CONFIG_PHYTIUM_MBOX) += phytium_mailbox.o ++ + obj-$(CONFIG_PLATFORM_MHU) += platform_mhu.o + + obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o +diff --git a/drivers/mailbox/phytium_mailbox.c b/drivers/mailbox/phytium_mailbox.c +new file mode 100644 +index 000000000000..c797d4b4769f +--- /dev/null ++++ b/drivers/mailbox/phytium_mailbox.c +@@ -0,0 +1,200 @@ ++/* ++ * Phytium SoC mailbox driver ++ * ++ * Copyright (c) 2020 Phytium Corporation. ++ * ++ * This program is free software: you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, version 2 of the License. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define INTR_STAT 0x0 ++#define INTR_SET 0x8 ++#define INTR_CLR 0x10 ++ ++#define TX_REG 0x100 ++ ++#define NR_CHANS 1 ++ ++struct phytium_mbox_link { ++ unsigned irq; ++ void __iomem *tx_reg; ++ void __iomem *rx_reg; ++}; ++ ++struct phytium_mbox { ++ void __iomem *base; ++ struct phytium_mbox_link mlink; ++ struct mbox_chan chan; ++ struct mbox_controller mbox; ++}; ++ ++static irqreturn_t phytium_mbox_rx_irq(int irq, void *ch) ++{ ++ struct mbox_chan *chan = ch; ++ struct phytium_mbox_link *mlink = chan->con_priv; ++ u32 val; ++ ++ val = readl_relaxed(mlink->rx_reg + INTR_STAT); ++ if (!val) ++ return IRQ_NONE; ++ ++ mbox_chan_received_data(chan, (void *)&val); ++ ++ writel_relaxed(val, mlink->rx_reg + INTR_CLR); ++ ++ return IRQ_HANDLED; ++} ++ ++static int phytium_mbox_send_data(struct mbox_chan *chan, void *data) ++{ ++ struct phytium_mbox_link *mlink = chan->con_priv; ++ u32 *arg = data; ++ ++ writel_relaxed(*arg, mlink->tx_reg + INTR_SET); ++ ++ return 0; ++} ++ ++static int phytium_mbox_startup(struct mbox_chan *chan) ++{ ++ struct phytium_mbox_link *mlink = chan->con_priv; ++ u32 val; ++ int ret; ++ ++ val = readl_relaxed(mlink->tx_reg + INTR_STAT); ++ writel_relaxed(val, mlink->tx_reg + INTR_CLR); ++ ++ ret = request_irq(mlink->irq, phytium_mbox_rx_irq, ++ IRQF_SHARED, "phytium_mbox_link", chan); ++ if (ret) { ++ dev_err(chan->mbox->dev, ++ "Unable to acquire IRQ %d\n", mlink->irq); ++ } ++ ++ return ret; ++} ++ ++static void phytium_mbox_shutdown(struct mbox_chan *chan) ++{ ++ struct phytium_mbox_link *mlink = chan->con_priv; ++ ++ free_irq(mlink->irq, chan); ++} ++ ++static bool phytium_mbox_last_tx_done(struct mbox_chan *chan) ++{ ++ struct phytium_mbox_link *mlink = chan->con_priv; ++ u32 val = readl_relaxed(mlink->tx_reg + INTR_STAT); ++ ++ return (val == (u32)(1U << 31)); ++} ++ ++static const struct mbox_chan_ops phytium_mbox_ops = { ++ .send_data = phytium_mbox_send_data, ++ .startup = phytium_mbox_startup, ++ .shutdown = phytium_mbox_shutdown, ++ .last_tx_done = phytium_mbox_last_tx_done, ++}; ++ ++static const struct acpi_device_id phytium_mbox_acpi_match[] = { ++ { "PHYT0009", 0 }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(acpi, phytium_mbox_acpi_match); ++ ++static const struct of_device_id phytium_mbox_of_match[] = { ++ { .compatible = "phytium,mbox", }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, phytium_mbox_of_match); ++ ++static int phytium_mbox_probe(struct platform_device *pdev) ++{ ++ struct phytium_mbox *mbox; ++ struct resource *res; ++ int err, irq; ++ ++ /* Allocate memory for device */ ++ mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL); ++ if (!mbox) ++ return -ENOMEM; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ mbox->base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(mbox->base)) { ++ dev_err(&pdev->dev, "ioremap base failed\n"); ++ return PTR_ERR(mbox->base); ++ } ++ ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) { ++ dev_err(&pdev->dev, "cannot obtain irq\n"); ++ return irq; ++ } ++ ++ mbox->chan.con_priv = &mbox->mlink; ++ mbox->mlink.irq = irq; ++ mbox->mlink.rx_reg = mbox->base; ++ mbox->mlink.tx_reg = mbox->mlink.rx_reg + TX_REG; ++ ++ mbox->mbox.dev = &pdev->dev; ++ mbox->mbox.chans = &mbox->chan; ++ mbox->mbox.num_chans = NR_CHANS; ++ mbox->mbox.ops = &phytium_mbox_ops; ++ mbox->mbox.txdone_irq = false; ++ mbox->mbox.txdone_poll = true; ++ mbox->mbox.txpoll_period = 1; ++ ++ platform_set_drvdata(pdev, mbox); ++ ++ err = mbox_controller_register(&mbox->mbox); ++ if (err) { ++ dev_err(&pdev->dev, "Failed to register mailboxes %d\n", err); ++ goto fail; ++ } ++ ++ dev_info(&pdev->dev, "Phytium SoC Mailbox registered\n"); ++fail: ++ return err; ++} ++ ++static int phytium_mbox_remove(struct platform_device *pdev) ++{ ++ struct phytium_mbox *mbox = platform_get_drvdata(pdev); ++ ++ mbox_controller_unregister(&mbox->mbox); ++ ++ return 0; ++} ++ ++static struct platform_driver phytium_mbox_driver = { ++ .probe = phytium_mbox_probe, ++ .remove = phytium_mbox_remove, ++ .driver = { ++ .name = "phytium-mbox", ++ .of_match_table = of_match_ptr(phytium_mbox_of_match), ++ .acpi_match_table = ACPI_PTR(phytium_mbox_acpi_match), ++ }, ++}; ++ ++module_platform_driver(phytium_mbox_driver); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Phytium SoC Mailbox Driver"); ++MODULE_AUTHOR("Chen Baozi "); +diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig +index 11841f4b7b2b..6a6a0e7fec43 100644 +--- a/drivers/mfd/Kconfig ++++ b/drivers/mfd/Kconfig +@@ -915,6 +915,19 @@ config UCB1400_CORE + To compile this driver as a module, choose M here: the + module will be called ucb1400_core. + ++config MFD_PHYTIUM_I2S_LSD ++ bool "PHYTIUM X100 I2S LSD MFD driver" ++ depends on (PCI && ARCH_PHYTIUM) ++ help ++ This enables support for the Phytium X100 LSD I2S controller. ++ ++config MFD_PHYTIUM_I2S_MMD ++ bool "PHYTIUM X100 I2S MMD MFD driver" ++ depends on (PCI && ARCH_PHYTIUM) ++ help ++ This enables support for the Phytium X100 MMD I2S controllers ++ for Display Port. ++ + config MFD_PM8XXX + tristate "Qualcomm PM8xxx PMIC chips driver" + depends on (ARM || HEXAGON || COMPILE_TEST) +diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile +index 5856a9489cbd..65a8aa1d1abf 100644 +--- a/drivers/mfd/Makefile ++++ b/drivers/mfd/Makefile +@@ -241,3 +241,5 @@ obj-$(CONFIG_MFD_SC27XX_PMIC) += sprd-sc27xx-spi.o + obj-$(CONFIG_RAVE_SP_CORE) += rave-sp.o + obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o + ++obj-$(CONFIG_MFD_PHYTIUM_I2S_LSD) += phytium_x100_i2s_lsd.o ++obj-$(CONFIG_MFD_PHYTIUM_I2S_MMD) += phytium_x100_i2s_mmd.o +diff --git a/drivers/mfd/phytium_x100_i2s_lsd.c b/drivers/mfd/phytium_x100_i2s_lsd.c +new file mode 100644 +index 000000000000..720501f35c52 +--- /dev/null ++++ b/drivers/mfd/phytium_x100_i2s_lsd.c +@@ -0,0 +1,136 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Phytium I2S LSD MFD driver over PCI bus ++ * ++ * Copyright (C) 2020-2021, Phytium Technology Co.,Ltd. ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++ ++struct phytium_x100_mfd { ++ struct device *dev; ++}; ++ ++struct pdata_x100_mfd { ++ struct device *dev; ++ char *name; ++ int clk_base; ++}; ++ ++static struct resource phytium_x100_i2s_res0[] = { ++ [0] = { ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .flags = IORESOURCE_MEM, ++ }, ++ [2] = { ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static struct mfd_cell phytium_x100_mfd_cells[] = { ++ { ++ .id = 0, ++ .name = "phytium-i2s", ++ .of_compatible = "phytium,i2s", ++ .resources = phytium_x100_i2s_res0, ++ .num_resources = ARRAY_SIZE(phytium_x100_i2s_res0), ++ .ignore_resource_conflicts = true, ++ }, ++}; ++ ++static void phytium_x100_i2s_setup(struct pci_dev *pdev) ++{ ++ struct mfd_cell *cell = &phytium_x100_mfd_cells[0]; ++ struct resource *res = (struct resource *)cell->resources; ++ struct pdata_x100_mfd *pdata; ++ ++ res[0].start = pci_resource_start(pdev, 0); ++ res[0].end = pci_resource_start(pdev, 0) + 0x0fff; ++ ++ res[1].start = pci_resource_start(pdev, 0) + 0x1000; ++ res[1].end = pci_resource_start(pdev, 0) + 0x1fff; ++ ++ res[2].start = pdev->irq; ++ res[2].end = pdev->irq; ++ ++ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); ++ ++ pdata->dev = &pdev->dev; ++ pdata->name = "phytium-i2s-lsd"; ++ pdata->clk_base = 480000000; ++ ++ cell->platform_data = pdata; ++ cell->pdata_size = sizeof(*pdata); ++} ++ ++static int phytium_x100_mfd_probe(struct pci_dev *pdev, ++ const struct pci_device_id *id) ++{ ++ struct phytium_x100_mfd *phytium_mfd; ++ int ret; ++ ++ ret = pcim_enable_device(pdev); ++ if (ret) ++ return ret; ++ ++ pci_set_master(pdev); ++ ++ phytium_mfd = devm_kzalloc(&pdev->dev, sizeof(*phytium_mfd), GFP_KERNEL); ++ if (!phytium_mfd) ++ return -ENOMEM; ++ ++ phytium_mfd->dev = &pdev->dev; ++ dev_set_drvdata(&pdev->dev, phytium_mfd); ++ ++ phytium_x100_i2s_setup(pdev); ++ ++ ret = mfd_add_devices(&pdev->dev, 0, phytium_x100_mfd_cells, ++ ARRAY_SIZE(phytium_x100_mfd_cells), NULL, 0, ++ NULL); ++ if (ret) ++ return 0; ++ ++ return 0; ++} ++ ++ ++static void phytium_x100_mfd_remove(struct pci_dev *pdev) ++{ ++ mfd_remove_devices(&pdev->dev); ++} ++ ++static const struct pci_device_id phytium_x100_mfd_ids[] = { ++ { ++ .vendor = 0x1DB7, ++ .device = 0xDC2B, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .class = 0x3, ++ .class_mask = 0, ++ }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(pci, phytium_x100_mfd_ids); ++ ++static struct pci_driver phytium_i2s_lsd_mfd_driver = { ++ .name = "phytium_x100_mfd_i2s", ++ .id_table = phytium_x100_mfd_ids, ++ .probe = phytium_x100_mfd_probe, ++ .remove = phytium_x100_mfd_remove, ++}; ++ ++module_pci_driver(phytium_i2s_lsd_mfd_driver); ++ ++MODULE_AUTHOR("Yiqun Zhang "); ++MODULE_DESCRIPTION("Phytium X100 MFD PCI driver for I2S-LSD"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/mfd/phytium_x100_i2s_mmd.c b/drivers/mfd/phytium_x100_i2s_mmd.c +new file mode 100644 +index 000000000000..e327e5c6288c +--- /dev/null ++++ b/drivers/mfd/phytium_x100_i2s_mmd.c +@@ -0,0 +1,190 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Phytium I2S MMD MFD driver over PCI bus ++ * ++ * Copyright (C) 2020-2021, Phytium Technology Co.,Ltd. ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++ ++struct phytium_x100_mfd { ++ struct device *dev; ++}; ++ ++struct pdata_x100_mfd { ++ struct device *dev; ++ char *name; ++ int clk_base; ++}; ++ ++static struct resource phytium_x100_i2s_res0[] = { ++ [0] = { ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .flags = IORESOURCE_MEM, ++ }, ++ [2] = { ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static struct resource phytium_x100_i2s_res1[] = { ++ [0] = { ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .flags = IORESOURCE_MEM, ++ }, ++ [2] = { ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static struct resource phytium_x100_i2s_res2[] = { ++ [0] = { ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .flags = IORESOURCE_MEM, ++ }, ++ [2] = { ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static struct mfd_cell phytium_x100_mfd_cells[] = { ++ { ++ .id = 1, ++ .name = "phytium-i2s", ++ .of_compatible = "phytium,i2s", ++ .resources = phytium_x100_i2s_res0, ++ .num_resources = ARRAY_SIZE(phytium_x100_i2s_res0), ++ .ignore_resource_conflicts = true, ++ }, ++ { ++ .id = 2, ++ .name = "phytium-i2s", ++ .of_compatible = "phytium,i2s", ++ .resources = phytium_x100_i2s_res1, ++ .num_resources = ARRAY_SIZE(phytium_x100_i2s_res1), ++ .ignore_resource_conflicts = true, ++ }, ++ { ++ .id = 3, ++ .name = "phytium-i2s", ++ .of_compatible = "phytium,i2s", ++ .resources = phytium_x100_i2s_res2, ++ .num_resources = ARRAY_SIZE(phytium_x100_i2s_res2), ++ .ignore_resource_conflicts = true, ++ }, ++}; ++ ++static void phytium_x100_i2s_setup(struct pci_dev *pdev, int i) ++{ ++ struct mfd_cell *cell = &phytium_x100_mfd_cells[i]; ++ struct resource *res = (struct resource *)cell->resources; ++ struct pdata_x100_mfd *pdata; ++ ++ res[0].start = pci_resource_start(pdev, 0) + 0x2000 * i + 0x1000; ++ res[0].end = pci_resource_start(pdev, 0) + 0x2000 * i + 0x1fff; ++ ++ res[1].start = pci_resource_start(pdev, 0) + 0x2000 * i; ++ res[1].end = pci_resource_start(pdev, 0) + 0x2000 * i + 0x0fff; ++ ++ res[2].start = pdev->irq; ++ res[2].end = pdev->irq; ++ ++ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); ++ ++ pdata->dev = &pdev->dev; ++ pdata->clk_base = 600000000; ++ switch (i) { ++ case 0: ++ pdata->name = "phytium-i2s-dp0"; ++ break; ++ case 1: ++ pdata->name = "phytium-i2s-dp1"; ++ break; ++ case 2: ++ pdata->name = "phytium-i2s-dp2"; ++ break; ++ default: ++ break; ++ } ++ ++ cell->platform_data = pdata; ++ cell->pdata_size = sizeof(*pdata); ++} ++ ++static int phytium_x100_mfd_probe(struct pci_dev *pdev, ++ const struct pci_device_id *id) ++{ ++ struct phytium_x100_mfd *phytium_mfd; ++ int i; ++ int ret; ++ ++ ret = pcim_enable_device(pdev); ++ if (ret) ++ return ret; ++ ++ pci_set_master(pdev); ++ ++ phytium_mfd = devm_kzalloc(&pdev->dev, sizeof(*phytium_mfd), GFP_KERNEL); ++ if (!phytium_mfd) ++ return -ENOMEM; ++ ++ phytium_mfd->dev = &pdev->dev; ++ dev_set_drvdata(&pdev->dev, phytium_mfd); ++ ++ for (i = 0; i < 3; i++) ++ phytium_x100_i2s_setup(pdev, i); ++ ++ ret = mfd_add_devices(&pdev->dev, 0, phytium_x100_mfd_cells, ++ ARRAY_SIZE(phytium_x100_mfd_cells), NULL, 0, ++ NULL); ++ if (ret) ++ return 0; ++ ++ return 0; ++} ++ ++ ++static void phytium_x100_mfd_remove(struct pci_dev *pdev) ++{ ++ mfd_remove_devices(&pdev->dev); ++} ++ ++static const struct pci_device_id phytium_x100_mfd_ids[] = { ++ { ++ .vendor = 0x1DB7, ++ .device = 0xDC23, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .class = 0x3, ++ .class_mask = 0, ++ }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(pci, phytium_x100_mfd_ids); ++ ++static struct pci_driver phytium_i2s_mmd_mfd_driver = { ++ .name = "phytium_x100_mfd_mmd", ++ .id_table = phytium_x100_mfd_ids, ++ .probe = phytium_x100_mfd_probe, ++ .remove = phytium_x100_mfd_remove, ++}; ++ ++module_pci_driver(phytium_i2s_mmd_mfd_driver); ++ ++MODULE_AUTHOR("Yiqun Zhang "); ++MODULE_DESCRIPTION("Phytium X100 MFD PCI driver for I2S-DP"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig +index 694d0828215d..91800da7e277 100644 +--- a/drivers/mmc/host/Kconfig ++++ b/drivers/mmc/host/Kconfig +@@ -943,3 +943,21 @@ config MMC_SDHCI_OMAP + If you have a controller with this interface, say Y or M here. + + If unsure, say N. ++ ++config MMC_PHYTIUM_SDCI ++ tristate "Phytium FT SD Host Controller support" ++ depends on ARM64 ++ help ++ This selects support for the Phytium FT4C SD Host Controller ++ ++config MMC_PHYTIUM_MCI_PCI ++ tristate "Phytium octopus PCI MultiMedia Card Interface support" ++ depends on ARCH_PHYTIUM ++ default y if ARCH_PHYTIUM ++ help ++ This selects support for the PCI MultiMedia Card Interface on Phytium ++ X100 chipset. ++ ++ If you have a controller with this interface, say Y or M here. ++ ++ If unsure, say N. +diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile +index ce8398e6f2c0..c6212a81bc4d 100644 +--- a/drivers/mmc/host/Makefile ++++ b/drivers/mmc/host/Makefile +@@ -69,6 +69,8 @@ obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o + obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o + obj-$(CONFIG_MMC_TOSHIBA_PCI) += toshsd.o + obj-$(CONFIG_MMC_BCM2835) += bcm2835.o ++obj-$(CONFIG_MMC_PHYTIUM_SDCI) += phytium-sdci.o ++obj-$(CONFIG_MMC_PHYTIUM_MCI_PCI) += phytium-mci-pci.o phytium-mci.o + + obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o + obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o +diff --git a/drivers/mmc/host/phytium-mci-pci.c b/drivers/mmc/host/phytium-mci-pci.c +new file mode 100644 +index 000000000000..dda6089dce5f +--- /dev/null ++++ b/drivers/mmc/host/phytium-mci-pci.c +@@ -0,0 +1,178 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Phytium Multimedia Card Interface PCI driver ++ * ++ * Copyright (C) 2020 Phytium Technology Co.,Ltd. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include "phytium-mci.h" ++ ++static u32 sd_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | ++ MMC_CAP_CMD23 | MMC_CAP_4_BIT_DATA; ++static u32 sd_caps2 = MMC_CAP2_NO_MMC; ++ ++static u32 emmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA | MMC_CAP_WAIT_WHILE_BUSY | ++ MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_HW_RESET | MMC_CAP_MMC_HIGHSPEED | ++ MMC_CAP_NONREMOVABLE; ++static u32 emmc_caps2 = MMC_CAP2_NO_SDIO | MMC_CAP2_NO_SD; ++ ++#define PCI_BAR_NO 0 ++ ++#if defined CONFIG_PM && defined CONFIG_PM_SLEEP ++static const struct dev_pm_ops phytium_mci_dev_pm_ops = { ++ SET_SYSTEM_SLEEP_PM_OPS(phytium_mci_suspend, ++ phytium_mci_resume) ++ SET_RUNTIME_PM_OPS(phytium_mci_runtime_suspend, ++ phytium_mci_runtime_resume, NULL) ++}; ++#else ++#define phytium_mci_dev_pm_ops NULL ++#endif ++ ++static int ++phytium_mci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) ++{ ++ struct phytium_mci_host *host; ++ struct mmc_host *mmc; ++ int ret; ++ ++ ret = pcim_enable_device(pdev); ++ ++ if (ret) ++ return ret; ++ pci_set_master(pdev); ++ ++ mmc = mmc_alloc_host(sizeof(struct phytium_mci_host), &pdev->dev); ++ ++ if (!mmc) ++ return -ENOMEM; ++ ++ host = mmc_priv(mmc); ++ ++ pci_enable_msi(pdev); ++ ++ host->irq = pdev->irq; ++ host->irq_flags = IRQF_SHARED; ++ host->dev = &pdev->dev; ++ ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_NO, pci_name(pdev)); ++ ++ if (ret) { ++ dev_err(&pdev->dev, "I/O memory remapping failed\n"); ++ goto host_free; ++ } ++ ++ host->base = pcim_iomap_table(pdev)[PCI_BAR_NO]; ++ host->is_use_dma = 1; ++ host->is_device_x100 = 1; ++ ++ if (pdev->devfn == 2) { ++ host->caps = emmc_caps; ++ host->caps2 = emmc_caps2; ++ } else { ++ host->caps = sd_caps; ++ host->caps2 = sd_caps2; ++ mmc->f_max = 25000000; /* stable frequency */ ++ } ++ ++ host->mmc = mmc; ++ host->clk_rate = MCI_CLK; ++ ++ dev_info(&pdev->dev, "%s %d: [bar %d] addr: 0x%llx size: 0x%llx km: 0x%llx devfn:%d\n", ++ __func__, __LINE__, PCI_BAR_NO, pci_resource_start(pdev, 0), ++ pci_resource_len(pdev, 0), (uint64_t)host->base, pdev->devfn); ++ ++ dev_dbg(&pdev->dev, "%s %d:irq:0x%x\n", __func__, __LINE__, host->irq); ++ ++ ret = phytium_mci_common_probe(host); ++ ++ if (ret == MCI_REALEASE_MEM) { ++ ret = -ENOMEM; ++ goto release_mem; ++ } else if (ret) { ++ goto release; ++ } ++ pci_set_drvdata(pdev, mmc); ++ dev_info(&pdev->dev, "%s %d: probe phytium mci successful.\n", __func__, __LINE__); ++ return 0; ++ ++release: ++ phytium_mci_deinit_hw(host); ++release_mem: ++ ++ if (host->dma.adma_table) { ++ dma_free_coherent(&pdev->dev, ++ MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), ++ host->dma.adma_table, host->dma.adma_addr); ++ } ++host_free: ++ mmc_free_host(mmc); ++ pci_disable_device(pdev); ++ return ret; ++} ++ ++static void phytium_mci_pci_remove(struct pci_dev *pdev) ++{ ++ struct phytium_mci_host *host; ++ struct mmc_host *mmc; ++ ++ mmc = pci_get_drvdata(pdev); ++ if (!mmc) { ++ dev_info(&pdev->dev, "%s %d: mmc is null.\n", __func__, __LINE__); ++ return; ++ } ++ host = mmc_priv(mmc); ++ if (!host) { ++ dev_info(&pdev->dev, "%s %d: host is null.\n", __func__, __LINE__); ++ mmc_remove_host(mmc); ++ mmc_free_host(mmc); ++ return; ++ } ++ ++ del_timer(&host->hotplug_timer); ++ ++ mmc_remove_host(host->mmc); ++ ++ if (host->dma.adma_table) { ++ dma_free_coherent(&pdev->dev, ++ MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), ++ host->dma.adma_table, host->dma.adma_addr); ++ } ++ phytium_mci_deinit_hw(host); ++ mmc_free_host(mmc); ++ pci_set_drvdata(pdev, NULL); ++} ++ ++static const struct pci_device_id phytium_mci_pci_tbl[] = { ++ { ++ .vendor = 0x1DB7, ++ .device = 0xDC28, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .class = 0x5, ++ .class_mask = 0, ++ }, ++ {} ++}; ++MODULE_DEVICE_TABLE(pci, phytium_mci_pci_tbl); ++ ++static struct pci_driver phytium_mci_pci_driver = { ++ .name = "phytium-mci-pci", ++ .id_table = phytium_mci_pci_tbl, ++ .probe = phytium_mci_pci_probe, ++ .remove = phytium_mci_pci_remove, ++ .driver = { ++ .pm = &phytium_mci_dev_pm_ops, ++ } ++}; ++module_pci_driver(phytium_mci_pci_driver); ++ ++MODULE_DESCRIPTION("Phytium Multimedia Card Interface PCI driver"); ++MODULE_LICENSE("GPL v2"); ++MODULE_AUTHOR("Cheng Quan "); +diff --git a/drivers/mmc/host/phytium-mci.c b/drivers/mmc/host/phytium-mci.c +new file mode 100644 +index 000000000000..7e9ba20df6ae +--- /dev/null ++++ b/drivers/mmc/host/phytium-mci.c +@@ -0,0 +1,1482 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Driver for Phytium Multimedia Card Interface ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "phytium-mci.h" ++ ++static const u32 cmd_ints_mask = MCI_INT_MASK_RE | MCI_INT_MASK_CMD | MCI_INT_MASK_RCRC | ++ MCI_INT_MASK_RTO | MCI_INT_MASK_HTO | MCI_RAW_INTS_HLE; ++ ++static const u32 data_ints_mask = MCI_INT_MASK_DTO | MCI_INT_MASK_DCRC | MCI_INT_MASK_DRTO | ++ MCI_INT_MASK_SBE_BCI; ++static const u32 cmd_err_ints_mask = MCI_INT_MASK_RTO | MCI_INT_MASK_RCRC | MCI_INT_MASK_RE | ++ MCI_INT_MASK_DCRC | MCI_INT_MASK_DRTO | ++ MCI_MASKED_INTS_SBE_BCI; ++ ++static const u32 dmac_ints_mask = MCI_DMAC_INT_ENA_FBE | MCI_DMAC_INT_ENA_DU | ++ MCI_DMAC_INT_ENA_NIS | MCI_DMAC_INT_ENA_AIS; ++static const u32 dmac_err_ints_mask = MCI_DMAC_INT_ENA_FBE | MCI_DMAC_INT_ENA_DU | ++ MCI_DMAC_INT_ENA_AIS; ++ ++static void phytium_mci_cmd_next(struct phytium_mci_host *host, ++ struct mmc_request *mrq, ++ struct mmc_command *cmd); ++static void phytium_mci_adma_reset(struct phytium_mci_host *host); ++static void phytium_mci_send_cmd(struct phytium_mci_host *host, u32 cmd, u32 arg); ++static bool phytium_mci_data_xfer_done(struct phytium_mci_host *host, u32 events, ++ struct mmc_request *mrq, struct mmc_data *data); ++static void phytium_mci_init_adma_table(struct phytium_mci_host *host, ++ struct phytium_mci_dma *dma); ++static void phytium_mci_init_hw(struct phytium_mci_host *host); ++static int phytium_mci_get_cd(struct mmc_host *mmc); ++static int phytium_mci_err_irq(struct phytium_mci_host *host, u32 dmac_events, u32 events); ++ ++static void sdr_set_bits(void __iomem *reg, u32 bs) ++{ ++ u32 val = readl(reg); ++ ++ val |= bs; ++ writel(val, reg); ++} ++ ++static void sdr_clr_bits(void __iomem *reg, u32 bs) ++{ ++ u32 val = readl(reg); ++ ++ val &= ~bs; ++ writel(val, reg); ++} ++ ++static void phytium_mci_reset_hw(struct phytium_mci_host *host) ++{ ++ sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET); ++ ++ while (readl(host->base + MCI_CNTRL) & (MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET)) ++ cpu_relax(); ++ phytium_mci_send_cmd(host, MCI_CMD_UPD_CLK, 0); ++} ++ ++static void phytium_mci_update_external_clk(struct phytium_mci_host *host, u32 uhs_reg_value) ++{ ++ writel(0, host->base + MCI_UHS_REG_EXT); ++ writel(uhs_reg_value, host->base + MCI_UHS_REG_EXT); ++ while (!(readl(host->base + MCI_CCLK_RDY) & 0x1)) ++ cpu_relax(); ++ ++} ++ ++static void phytium_mci_prepare_data(struct phytium_mci_host *host, ++ struct mmc_request *mrq) ++{ ++ struct mmc_data *data = mrq->data; ++ ++ if (!(data->host_cookie & MCI_PREPARE_FLAG)) { ++ data->host_cookie |= MCI_PREPARE_FLAG; ++ data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, ++ mmc_get_dma_dir(data)); ++ } ++} ++ ++static void phytium_mci_unprepare_data(struct phytium_mci_host *host, ++ struct mmc_request *mrq) ++{ ++ struct mmc_data *data = mrq->data; ++ ++ if (data->host_cookie & MCI_ASYNC_FLAG) ++ return; ++ ++ if (data->host_cookie & MCI_PREPARE_FLAG) { ++ dma_unmap_sg(host->dev, data->sg, data->sg_len, mmc_get_dma_dir(data)); ++ data->host_cookie &= ~MCI_PREPARE_FLAG; ++ } ++} ++ ++static void phytium_mci_send_cmd(struct phytium_mci_host *host, u32 cmd, u32 arg) ++{ ++ ++ writel(arg, host->base + MCI_CMDARG); ++ wmb(); /* drain writebuffer */ ++ ++ while (readl(host->base + MCI_STATUS) & MCI_STATUS_CARD_BUSY) ++ cpu_relax(); ++ ++ writel(MCI_CMD_START | cmd, host->base + MCI_CMD); ++ ++ while (readl(host->base + MCI_CMD) & MCI_CMD_START) ++ cpu_relax(); ++ ++} ++ ++static void phytium_mci_update_cmd11(struct phytium_mci_host *host, u32 cmd) ++{ ++ writel(MCI_CMD_START | cmd, host->base + MCI_CMD); ++ ++ while (readl(host->base + MCI_CMD) & MCI_CMD_START) ++ cpu_relax(); ++} ++ ++static void phytium_mci_set_clk(struct phytium_mci_host *host, struct mmc_ios *ios) ++{ ++ u32 div = 0xff, drv = 0, sample = 0; ++ unsigned long clk_rate; ++ u32 mci_cmd_bits = MCI_CMD_UPD_CLK; ++ u32 cmd_reg; ++ u32 cur_cmd_index; ++ u32 first_uhs_div, tmp_ext_reg; ++ ++ cmd_reg = readl(host->base + MCI_CMD); ++ cur_cmd_index = cmd_reg & 0x3F; ++ ++ if (cur_cmd_index == SD_SWITCH_VOLTAGE) ++ mci_cmd_bits |= MCI_CMD_VOLT_SWITCH; ++ if (ios->clock) { ++ if (host->current_ios_clk == ios->clock) ++ return; ++ ++ dev_dbg(host->dev, "will change clock, host->clk_rate: %ld, ios->clock: %d\n", ++ host->clk_rate, ios->clock); ++ ++ if (ios->clock >= 25000000) ++ tmp_ext_reg = 0x202; ++ else if (ios->clock == 400000) ++ tmp_ext_reg = 0x502; ++ else ++ tmp_ext_reg = 0x302; ++ ++ phytium_mci_update_external_clk(host, tmp_ext_reg); ++ sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); ++ ++ if (cur_cmd_index == SD_SWITCH_VOLTAGE) ++ phytium_mci_update_cmd11(host, mci_cmd_bits | cmd_reg); ++ else ++ phytium_mci_send_cmd(host, mci_cmd_bits, 0); ++ ++ clk_rate = host->clk_rate; ++ first_uhs_div = 1 + ((tmp_ext_reg >> 8)&0xFF); ++ div = clk_rate / (2 * first_uhs_div * ios->clock); ++ if (div > 2) { ++ sample = div / 2 + 1; ++ drv = sample - 1; ++ writel((sample << 16) | (drv << 8) | (div & 0xff), ++ host->base + MCI_CLKDIV); ++ } else if (div == 2) { ++ drv = 0; ++ sample = 1; ++ writel((drv << 8) | (sample << 16) | (div & 0xff), ++ host->base + MCI_CLKDIV); ++ } ++ ++ dev_dbg(host->dev, "UHS_REG_EXT ext: %x, CLKDIV: %x\n", ++ readl(host->base + MCI_UHS_REG_EXT), readl(host->base + MCI_CLKDIV)); ++ ++ sdr_set_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); ++ ++ if (cur_cmd_index == SD_SWITCH_VOLTAGE) ++ phytium_mci_update_cmd11(host, mci_cmd_bits | cmd_reg); ++ else ++ phytium_mci_send_cmd(host, mci_cmd_bits, 0); ++ ++ host->current_ios_clk = ios->clock; ++ ++ dev_dbg(host->dev, "host->clk_rate: %ld, ios->clock: %d\n", ++ host->clk_rate, ios->clock); ++ } else { ++ host->current_ios_clk = 0; ++ sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); ++ sdr_clr_bits(host->base + MCI_UHS_REG_EXT, MCI_EXT_CLK_ENABLE); ++ dev_dbg(host->dev, "host->clk_rate: %ld, ios->clock: %d\n", ++ host->clk_rate, ios->clock); ++ } ++} ++ ++static inline u32 ++phytium_mci_cmd_find_resp(struct phytium_mci_host *host, ++ struct mmc_request *mrq, ++ struct mmc_command *cmd) ++{ ++ u32 resp; ++ ++ switch (mmc_resp_type(cmd)) { ++ case MMC_RSP_R1: ++ case MMC_RSP_R1B: ++ resp = 0x5; ++ break; ++ ++ case MMC_RSP_R2: ++ resp = 0x7; ++ break; ++ ++ case MMC_RSP_R3: ++ resp = 0x1; ++ break; ++ ++ case MMC_RSP_NONE: ++ default: ++ resp = 0x0; ++ break; ++ } ++ ++ return resp; ++} ++ ++static inline ++u32 phytium_mci_cmd_prepare_raw_cmd(struct phytium_mci_host *host, ++ struct mmc_request *mrq, ++ struct mmc_command *cmd) ++{ ++ u32 opcode = cmd->opcode; ++ u32 resp = phytium_mci_cmd_find_resp(host, mrq, cmd); ++ u32 rawcmd = ((opcode & 0x3f) | ((resp & 0x7) << 6)); ++ ++ if (opcode == MMC_GO_INACTIVE_STATE || ++ (opcode == SD_IO_RW_DIRECT && ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) ++ rawcmd |= (0x1 << 14); ++ else if (opcode == SD_SWITCH_VOLTAGE) ++ rawcmd |= (0x1 << 28); ++ ++ if (test_and_clear_bit(MCI_CARD_NEED_INIT, &host->flags)) ++ rawcmd |= (0x1 << 15); ++ ++ if (cmd->data) { ++ struct mmc_data *data = cmd->data; ++ ++ rawcmd |= (0x1 << 9); ++ ++ if (data->flags & MMC_DATA_WRITE) ++ rawcmd |= (0x1 << 10); ++ } ++ ++ return (rawcmd | (0x1 << 29) | (0x1 << 31)); ++} ++ ++static inline void ++phytium_mci_adma_write_desc(struct phytium_mci_host *host, ++ struct phytium_adma2_64_desc *desc, ++ dma_addr_t addr, u32 len, u32 attribute) ++{ ++ desc->attribute = attribute; ++ desc->len = len; ++ desc->addr_lo = lower_32_bits(addr); ++ desc->addr_hi = upper_32_bits(addr); ++ dev_dbg(host->dev, "%s %d:addr_lo:0x%x ddr_hi:0x%x\n", __func__, ++ __LINE__, desc->addr_lo, desc->addr_hi); ++ ++ if ((attribute == 0x80000004) || (attribute == 0x8000000c)) { ++ desc->desc_lo = 0; ++ desc->desc_hi = 0; ++ } ++} ++ ++static void ++phytium_mci_data_sg_write_2_admc_table(struct phytium_mci_host *host, struct mmc_data *data) ++{ ++ struct phytium_adma2_64_desc *desc; ++ u32 dma_len, i; ++ dma_addr_t dma_address; ++ struct scatterlist *sg; ++ ++ phytium_mci_init_adma_table(host, &host->dma); ++ ++ desc = host->dma.adma_table; ++ for_each_sg(data->sg, sg, data->sg_count, i) { ++ dma_address = sg_dma_address(sg); ++ dma_len = sg_dma_len(sg); ++ ++ if (i == 0) { ++ if (sg_is_last(sg) || (data->sg_count == 1 && dma_len == SD_BLOCK_SIZE)) ++ phytium_mci_adma_write_desc(host, desc, dma_address, ++ dma_len, 0x8000000c); ++ else ++ phytium_mci_adma_write_desc(host, desc, dma_address, ++ dma_len, 0x8000001a); ++ } else if (sg_is_last(sg)) { ++ phytium_mci_adma_write_desc(host, desc, dma_address, ++ dma_len, 0x80000004); ++ } else { ++ phytium_mci_adma_write_desc(host, desc, dma_address, ++ dma_len, 0x80000012); ++ } ++ ++ desc++; ++ } ++} ++ ++static void ++phytium_mci_data_sg_write_2_fifo(struct phytium_mci_host *host, struct mmc_data *data) ++{ ++ struct scatterlist *sg; ++ u32 dma_len, i, j; ++ u32 *virt_addr; ++ ++ if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { ++ writel(0x1<<10, host->base + MCI_CMD); ++ for_each_sg(data->sg, sg, data->sg_count, i) { ++ dma_len = sg_dma_len(sg); ++ virt_addr = sg_virt(data->sg); ++ for (j = 0; j < (dma_len / 4); j++) { ++ writel(*virt_addr, host->base + MCI_DATA); ++ virt_addr++; ++ } ++ } ++ } ++} ++ ++static void phytium_mci_restart_clk(struct phytium_mci_host *host) ++{ ++ u32 clk_div, uhs; ++ ++ while (readl(host->base + MCI_CMD) & MCI_CMD_START) ++ cpu_relax(); ++ sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); ++ clk_div = readl(host->base + MCI_CLKDIV); ++ uhs = readl(host->base + MCI_UHS_REG_EXT); ++ writel(0, host->base + MCI_UHS_REG_EXT); ++ writel(uhs, host->base + MCI_UHS_REG_EXT); ++ while (!(readl(host->base + MCI_CCLK_RDY) & 0x1)) ++ cpu_relax(); ++ ++ writel(clk_div, host->base + MCI_CLKDIV); ++ sdr_set_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); ++ writel(MCI_CMD_START | MCI_CMD_UPD_CLK, host->base + MCI_CMD); ++ while (readl(host->base + MCI_CMD) & MCI_CMD_START) ++ cpu_relax(); ++} ++ ++static int ++phytim_mci_start_multiple_write(struct phytium_mci_host *host, ++ struct mmc_request *mrq, u32 cnts, u32 offset) ++{ ++ u32 rawcmd, cmd_status; ++ struct mmc_command *cmd = mrq->cmd; ++ u32 *rsp = cmd->resp; ++ unsigned long deadline_time; ++ ++ if (!(host->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) ++ return -ESHUTDOWN; ++ ++ while ((readl(host->base + MCI_STATUS) & (MCI_STATUS_CARD_BUSY))) ++ cpu_relax(); ++ ++ writel(0xffffe, host->base + MCI_RAW_INTS); ++ rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); ++ writel(mrq->data->blksz, host->base + MCI_BLKSIZ); ++ writel(cnts * mrq->data->blksz, host->base + MCI_BYTCNT); ++ writel(cmd->arg + offset, host->base + MCI_CMDARG); ++ writel(rawcmd, host->base + MCI_CMD); ++ deadline_time = jiffies + msecs_to_jiffies(200); ++ ++ cmd_status = readl(host->base + MCI_RAW_INTS); ++ while (!(cmd_status & MCI_MASKED_INTS_CMD)) { ++ if (!(host->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) ++ return -ESHUTDOWN; ++ ++ cmd_status = readl(host->base + MCI_RAW_INTS); ++ if (cmd_err_ints_mask & cmd_status) ++ return -ESHUTDOWN; ++ ++ if (cmd_status & MCI_MASKED_INTS_CMD) ++ break; ++ ++ if (time_after(jiffies, deadline_time)) ++ return -ESHUTDOWN; ++ } ++ ++ if (cmd_status & MCI_MASKED_INTS_CMD) { ++ if (cmd->flags & MMC_RSP_136) { ++ rsp[3] = readl(host->base + MCI_RESP0); ++ rsp[2] = readl(host->base + MCI_RESP1); ++ rsp[1] = readl(host->base + MCI_RESP2); ++ rsp[0] = readl(host->base + MCI_RESP3); ++ } else { ++ rsp[0] = readl(host->base + MCI_RESP0); ++ } ++ } ++ deadline_time = jiffies + msecs_to_jiffies(1000); ++ while (!(cmd_status & MCI_MASKED_INTS_DTO)) { ++ if (!(host->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) ++ return -ESHUTDOWN; ++ cmd_status = readl(host->base + MCI_RAW_INTS); ++ if (cmd_err_ints_mask & cmd_status) ++ return -ESHUTDOWN; ++ if (cmd_status & MCI_MASKED_INTS_DTO) ++ return 0; ++ if (time_after(jiffies, deadline_time)) ++ return -ESHUTDOWN; ++ } ++ return 0; ++} ++ ++static int ++phytium_mci_start_sbc_stop_cmd(struct phytium_mci_host *host, struct mmc_request *mrq, ++ struct mmc_command *cmd, u32 arg) ++{ ++ u32 rawcmd, cmd_status; ++ u32 *rsp = cmd->resp; ++ unsigned long deadline_time; ++ ++ writel(0xffffe, host->base + MCI_RAW_INTS); ++ ++ while ((readl(host->base + MCI_STATUS) & (MCI_STATUS_CARD_BUSY))) ++ cpu_relax(); ++ ++ rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); ++ writel(arg, host->base + MCI_CMDARG); ++ writel(rawcmd, host->base + MCI_CMD); ++ ++ deadline_time = jiffies + msecs_to_jiffies(200); ++ cmd_status = readl(host->base + MCI_RAW_INTS); ++ while (!(cmd_status & MCI_MASKED_INTS_CMD)) { ++ if (!(host->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) ++ return -ENOMEDIUM; ++ ++ cmd_status = readl(host->base + MCI_RAW_INTS); ++ if (cmd_err_ints_mask & cmd_status) ++ return -ETIMEDOUT; ++ ++ if (cmd_status & MCI_MASKED_INTS_CMD) ++ break; ++ ++ if (time_after(jiffies, deadline_time)) ++ return -ETIMEDOUT; ++ } ++ ++ if (cmd_status & MCI_MASKED_INTS_CMD) { ++ if (cmd->flags & MMC_RSP_136) { ++ rsp[3] = readl(host->base + MCI_RESP0); ++ rsp[2] = readl(host->base + MCI_RESP1); ++ rsp[1] = readl(host->base + MCI_RESP2); ++ rsp[0] = readl(host->base + MCI_RESP3); ++ } else { ++ rsp[0] = readl(host->base + MCI_RESP0); ++ } ++ } ++ ++ if (cmd_err_ints_mask & cmd_status) ++ return -ETIMEDOUT; ++ ++ return 0; ++} ++ ++static void ++phytium_mci_start_write_multiple_non_dma(struct mmc_host *mmc, struct mmc_request *mrq) ++{ ++ struct phytium_mci_host *host = mmc_priv(mmc); ++ struct mmc_data *data = mrq->data; ++ u32 write_cnts, last_cnts; ++ u32 i, j, k, send_cnt_one_sg, block_offset; ++ int ret = 0, dma_len; ++ struct scatterlist *sg; ++ u32 *virt_addr = NULL; ++ ++ write_cnts = data->blocks / 4; ++ (data->blocks % 4) ? write_cnts++ : write_cnts; ++ last_cnts = data->blocks % 4; ++ if (!(host->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) { ++ ret = -ENOMEDIUM; ++ goto write_err; ++ } ++ ++ dev_dbg(host->dev, "%s: cmd:%d, block counts:%d\n", ++ __func__, mrq->cmd->opcode, data->blocks); ++ ++ sdr_clr_bits(host->base + MCI_CNTRL, MCI_CNTRL_USE_INTERNAL_DMAC); ++ sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET); ++ while (readl(host->base + MCI_CNTRL) & MCI_CNTRL_FIFO_RESET) ++ cpu_relax(); ++ sdr_clr_bits(host->base + MCI_BUS_MODE, MCI_BUS_MODE_DE); ++ ++ if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { ++ block_offset = 0; ++ for_each_sg(data->sg, sg, data->sg_count, i) { ++ /* Each SG data transfor starts */ ++ dma_len = sg_dma_len(sg); ++ send_cnt_one_sg = (dma_len / MCI_MAX_FIFO_CNT) + 1; ++ virt_addr = sg_virt(sg); ++ for (k = 0; k < send_cnt_one_sg; k++) { ++ if (dma_len && dma_len >= MCI_MAX_FIFO_CNT) { ++ /*first write sbc cmd*/ ++ ret = phytium_mci_start_sbc_stop_cmd(host, mrq, ++ mrq->sbc, 4); ++ if (ret) ++ goto write_err; ++ writel(0x1 << 10, host->base + MCI_CMD); ++ for (j = 0; j < (MCI_MAX_FIFO_CNT / 4); j++) { ++ writel(*virt_addr, host->base + MCI_DATA); ++ virt_addr++; ++ } ++ ++ /*second write cmd25 here*/ ++ ret = phytim_mci_start_multiple_write(host, mrq, 4, ++ block_offset); ++ if (ret) ++ goto write_err; ++ block_offset += 4; ++ dma_len -= MCI_MAX_FIFO_CNT; ++ } else if (dma_len > 0) { ++ /*first write sbc cmd*/ ++ last_cnts = dma_len / 512; ++ ret = phytium_mci_start_sbc_stop_cmd(host, mrq, mrq->sbc, ++ last_cnts); ++ if (ret) ++ goto write_err; ++ writel(0x1 << 10, host->base + MCI_CMD); ++ for (j = 0; j < (dma_len / 4); j++) { ++ writel(*virt_addr, host->base + MCI_DATA); ++ virt_addr++; ++ } ++ /*second write cmd25 here*/ ++ ret = phytim_mci_start_multiple_write(host, mrq, last_cnts, ++ block_offset); ++ if (ret) ++ goto write_err; ++ block_offset += last_cnts; ++ dma_len = 0; ++ } else { ++ dev_dbg(host->dev, "%s: sg %d end\n", __func__, i); ++ break; ++ } ++ } ++ } ++ } ++ ++write_err: ++ host->data = NULL; ++ host->cmd = NULL; ++ host->mrq = NULL; ++ writel(0xffffe, host->base + MCI_RAW_INTS); ++ if (ret) { ++ data->bytes_xfered = 0; ++ if (ret == -ESHUTDOWN) { ++ sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET); ++ while (readl(host->base + MCI_CNTRL) & MCI_CNTRL_FIFO_RESET) ++ cpu_relax(); ++ ++ sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_CONTROLLER_RESET); ++ while (readl(host->base + MCI_STATUS) & MCI_STATUS_CARD_BUSY) ++ sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_CONTROLLER_RESET); ++ phytium_mci_restart_clk(host); ++ phytium_mci_start_sbc_stop_cmd(host, mrq, mrq->stop, mrq->stop->arg); ++ } ++ data->error = -ETIMEDOUT; ++ mrq->cmd->error = -ETIMEDOUT; ++ mmc_request_done(host->mmc, mrq); ++ return; ++ } ++ data->bytes_xfered = data->blocks * data->blksz; ++ mmc_request_done(host->mmc, mrq); ++} ++ ++static void ++phytium_mci_start_data(struct phytium_mci_host *host, struct mmc_request *mrq, ++ struct mmc_command *cmd, struct mmc_data *data) ++{ ++ bool read; ++ u32 rawcmd; ++ unsigned long flags; ++ ++ ++ WARN_ON(host->cmd); ++ host->cmd = cmd; ++ cmd->error = 0; ++ WARN_ON(host->data); ++ host->data = data; ++ read = data->flags & MMC_DATA_READ; ++ ++ if (!(host->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) { ++ phytium_mci_err_irq(host, 0, MCI_INT_MASK_RTO); ++ return; ++ } ++ /* clear interrupts */ ++ writel(0xffffe, host->base + MCI_RAW_INTS); ++ ++ sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET); ++ ++ while (readl(host->base + MCI_CNTRL) & (MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET)) ++ cpu_relax(); ++ ++ if (host->adtc_type == COMMOM_ADTC) ++ sdr_clr_bits(host->base + MCI_CNTRL, MCI_CNTRL_USE_INTERNAL_DMAC); ++ else ++ sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_USE_INTERNAL_DMAC); ++ wmb(); /* drain writebuffer */ ++ sdr_clr_bits(host->base + MCI_CNTRL, MCI_CNTRL_INT_ENABLE); ++ ++ rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); ++ if (host->is_use_dma && host->adtc_type == BLOCK_RW_ADTC) ++ phytium_mci_data_sg_write_2_admc_table(host, data); ++ else ++ phytium_mci_data_sg_write_2_fifo(host, data); ++ ++ spin_lock_irqsave(&host->lock, flags); ++ sdr_set_bits(host->base + MCI_INT_MASK, cmd_ints_mask | data_ints_mask); ++ if (host->is_use_dma && host->adtc_type == BLOCK_RW_ADTC) { ++ sdr_set_bits(host->base + MCI_DMAC_INT_ENA, dmac_ints_mask); ++ /* Enable the IDMAC */ ++ sdr_set_bits(host->base + MCI_BUS_MODE, MCI_BUS_MODE_DE); ++ writel((u32)host->dma.adma_addr, host->base + MCI_DESC_LIST_ADDRL); ++ writel((u32)(host->dma.adma_addr >> 32), host->base + MCI_DESC_LIST_ADDRH); ++ } ++ writel(mrq->data->blksz, host->base + MCI_BLKSIZ); ++ writel(mrq->data->blocks * mrq->data->blksz, host->base + MCI_BYTCNT); ++ sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_INT_ENABLE); ++ writel(cmd->arg, host->base + MCI_CMDARG); ++ wmb(); /* drain writebuffer */ ++ writel(rawcmd, host->base + MCI_CMD); ++ spin_unlock_irqrestore(&host->lock, flags); ++} ++ ++static void phytium_mci_track_cmd_data(struct phytium_mci_host *host, ++ struct mmc_command *cmd, ++ struct mmc_data *data) ++{ ++ if (host->error) ++ dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", ++ __func__, cmd->opcode, cmd->arg, host->error); ++} ++ ++static void phytium_mci_request_done(struct phytium_mci_host *host, struct mmc_request *mrq) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&host->lock, flags); ++ host->mrq = NULL; ++ if (host->cmd) ++ host->cmd = NULL; ++ spin_unlock_irqrestore(&host->lock, flags); ++ phytium_mci_track_cmd_data(host, mrq->cmd, mrq->data); ++ ++ if (mrq->data) ++ phytium_mci_unprepare_data(host, mrq); ++ ++ mmc_request_done(host->mmc, mrq); ++} ++ ++static bool phytium_mci_cmd_done(struct phytium_mci_host *host, int events, ++ struct mmc_request *mrq, struct mmc_command *cmd) ++{ ++ bool done = false; ++ unsigned long flags; ++ u32 *rsp = cmd->resp; ++ ++ if (!(events & (MCI_RAW_INTS_RCRC | MCI_RAW_INTS_RE | MCI_RAW_INTS_CMD | ++ MCI_RAW_INTS_RTO | MCI_INT_MASK_HTO))) { ++ dev_err(host->dev, "No interrupt generation:h%x\n", events); ++ return done; ++ } ++ ++ spin_lock_irqsave(&host->lock, flags); ++ done = !host->cmd; ++ host->cmd = NULL; ++ if (done) { ++ spin_unlock_irqrestore(&host->lock, flags); ++ return true; ++ } ++ sdr_clr_bits(host->base + MCI_INT_MASK, cmd_ints_mask); ++ spin_unlock_irqrestore(&host->lock, flags); ++ ++ if (cmd->flags & MMC_RSP_PRESENT) { ++ if (cmd->flags & MMC_RSP_136) { ++ rsp[3] = readl(host->base + MCI_RESP0); ++ rsp[2] = readl(host->base + MCI_RESP1); ++ rsp[1] = readl(host->base + MCI_RESP2); ++ rsp[0] = readl(host->base + MCI_RESP3); ++ } else { ++ rsp[0] = readl(host->base + MCI_RESP0); ++ } ++ ++ if (cmd->opcode == SD_SEND_RELATIVE_ADDR) ++ host->current_rca = rsp[0] & 0xFFFF0000; ++ } ++ if (!(events & (MCI_RAW_INTS_CMD | MCI_INT_MASK_HTO))) { ++ if (!(host->caps & MMC_CAP_NONREMOVABLE) && (events & MCI_RAW_INTS_RTO) ++ && readl(host->base + MCI_CARD_DETECT)) { ++ cmd->error = -ENOMEDIUM; ++ rsp[0] = 0; ++ } else if (events & MCI_RAW_INTS_RTO || ++ (cmd->opcode != MMC_SEND_TUNING_BLOCK && ++ cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)) { ++ cmd->error = -ETIMEDOUT; ++ } else if (events & MCI_RAW_INTS_RCRC) { ++ cmd->error = -EILSEQ; ++ } else { ++ cmd->error = -ETIMEDOUT; ++ } ++ } ++ phytium_mci_cmd_next(host, mrq, cmd); ++ return true; ++} ++ ++static void phytium_mci_start_command(struct phytium_mci_host *host, ++ struct mmc_request *mrq, ++ struct mmc_command *cmd) ++{ ++ u32 rawcmd; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&host->lock, flags); ++ WARN_ON(host->cmd); ++ host->cmd = cmd; ++ cmd->error = 0; ++ writel(0xffffe, host->base + MCI_RAW_INTS); ++ ++ rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); ++ spin_unlock_irqrestore(&host->lock, flags); ++ ++ if (!(host->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) { ++ phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, cmd); ++ return; ++ } ++ ++ spin_lock_irqsave(&host->lock, flags); ++ sdr_set_bits(host->base + MCI_INT_MASK, cmd_ints_mask); ++ writel(cmd->arg, host->base + MCI_CMDARG); ++ writel(rawcmd, host->base + MCI_CMD); ++ spin_unlock_irqrestore(&host->lock, flags); ++} ++ ++static void ++phytium_mci_cmd_next(struct phytium_mci_host *host, struct mmc_request *mrq, ++ struct mmc_command *cmd) ++{ ++ if ((cmd->error && !(cmd->opcode == MMC_SEND_TUNING_BLOCK || ++ cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)) || ++ (mrq->sbc && mrq->sbc->error)) { ++ phytium_mci_request_done(host, mrq); ++ } else if (cmd == mrq->sbc) { ++ if ((mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) || ++ (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) || ++ (mrq->cmd->opcode == MMC_READ_SINGLE_BLOCK) || ++ (mrq->cmd->opcode == MMC_WRITE_BLOCK)) { ++ dev_dbg(host->dev, "%s %d:sbc done and next cmd :%d length:%d\n", ++ __func__, __LINE__, mrq->cmd->opcode, mrq->data->sg->length); ++ phytium_mci_prepare_data(host, mrq); ++ if (host->is_use_dma) ++ host->adtc_type = BLOCK_RW_ADTC; ++ else ++ host->adtc_type = COMMOM_ADTC; ++ phytium_mci_start_data(host, mrq, mrq->cmd, mrq->data); ++ } else { ++ dev_err(host->dev, "%s %d:ERROR: cmd %d followers the SBC\n", ++ __func__, __LINE__, cmd->opcode); ++ } ++ } else if (!cmd->data) { ++ phytium_mci_request_done(host, mrq); ++ } ++} ++ ++static void phytium_mci_ops_request(struct mmc_host *mmc, struct mmc_request *mrq) ++{ ++ struct phytium_mci_host *host = mmc_priv(mmc); ++ ++ host->error = 0; ++ WARN_ON(host->mrq); ++ host->mrq = mrq; ++ ++ while ((readl(host->base + MCI_STATUS) & (MCI_STATUS_CARD_BUSY))) ++ cpu_relax(); ++ ++ dev_dbg(host->dev, "%s %d: cmd:%d arg:0x%x\n", __func__, __LINE__, ++ mrq->cmd->opcode, mrq->cmd->arg); ++ ++ if (host->is_device_x100 && mrq->sbc && mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) { ++ phytium_mci_start_write_multiple_non_dma(mmc, mrq); ++ return; ++ } ++ ++ if (mrq->sbc) { ++ phytium_mci_start_command(host, mrq, mrq->sbc); ++ return; ++ } ++ if (mrq->data) { ++ phytium_mci_prepare_data(host, mrq); ++ ++ if ((mrq->data->sg->length >= 512) && host->is_use_dma && ++ ((mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) || ++ (mrq->cmd->opcode == MMC_READ_SINGLE_BLOCK) || ++ (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) || ++ (mrq->cmd->opcode == MMC_WRITE_BLOCK) || ++ (mrq->cmd->opcode == SD_IO_RW_EXTENDED))) ++ ++ host->adtc_type = BLOCK_RW_ADTC; ++ else ++ host->adtc_type = COMMOM_ADTC; ++ ++ phytium_mci_start_data(host, mrq, mrq->cmd, mrq->data); ++ return; ++ } ++ phytium_mci_start_command(host, mrq, mrq->cmd); ++} ++ ++static void phytium_mci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) ++{ ++ struct phytium_mci_host *host = mmc_priv(mmc); ++ struct mmc_data *data = mrq->data; ++ ++ if (!data) ++ return; ++ ++ phytium_mci_prepare_data(host, mrq); ++ data->host_cookie |= MCI_ASYNC_FLAG; ++} ++ ++static void phytium_mci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, ++ int err) ++{ ++ struct phytium_mci_host *host = mmc_priv(mmc); ++ struct mmc_data *data = mrq->data; ++ ++ if (!data) ++ return; ++ ++ if (data->host_cookie & MCI_ASYNC_FLAG) { ++ data->host_cookie &= ~MCI_ASYNC_FLAG; ++ phytium_mci_unprepare_data(host, mrq); ++ } ++} ++ ++static void phytium_mci_data_read_without_dma(struct phytium_mci_host *host, ++ struct mmc_data *data) ++{ ++ u32 length, i, data_val, dma_len, tmp = 0; ++ u32 *virt_addr; ++ unsigned long flags; ++ struct scatterlist *sg; ++ ++ length = data->blocks * data->blksz; ++ ++ if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { ++ spin_lock_irqsave(&host->lock, flags); ++ if (data->host_cookie & MCI_ASYNC_FLAG) { ++ tmp = MCI_ASYNC_FLAG; ++ phytium_mci_post_req(host->mmc, data->mrq, 0); ++ } else { ++ phytium_mci_unprepare_data(host, data->mrq); ++ } ++ ++ for_each_sg(data->sg, sg, data->sg_count, i) { ++ dma_len = sg_dma_len(sg); ++ virt_addr = sg_virt(data->sg); ++ ++ for (i = 0; i < (dma_len / 4); i++) { ++ data_val = readl(host->base + MCI_DATA); ++ memcpy(virt_addr, &data_val, 4); ++ ++virt_addr; ++ } ++ } ++ ++ if (tmp & MCI_ASYNC_FLAG) ++ phytium_mci_pre_req(host->mmc, data->mrq); ++ else ++ phytium_mci_prepare_data(host, data->mrq); ++ ++ spin_unlock_irqrestore(&host->lock, flags); ++ } ++ data->bytes_xfered = length; ++} ++ ++static void phytium_mci_data_xfer_next(struct phytium_mci_host *host, ++ struct mmc_request *mrq, ++ struct mmc_data *data) ++{ ++ if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && ++ (data->error && !mrq->sbc)) { ++ while ((readl(host->base + MCI_STATUS) & (MCI_STATUS_CARD_BUSY))) ++ cpu_relax(); ++ phytium_mci_start_command(host, mrq, mrq->stop); ++ } else { ++ phytium_mci_request_done(host, mrq); ++ } ++} ++ ++static bool phytium_mci_data_xfer_done(struct phytium_mci_host *host, u32 events, ++ struct mmc_request *mrq, struct mmc_data *data) ++{ ++ unsigned long flags; ++ bool done; ++ ++ unsigned int check_data = events & (MCI_RAW_INTS_DTO | MCI_RAW_INTS_RCRC | ++ MCI_RAW_INTS_DCRC | MCI_RAW_INTS_RE | ++ MCI_RAW_INTS_DRTO | MCI_RAW_INTS_EBE | ++ MCI_DMAC_STATUS_AIS | MCI_DMAC_STATUS_DU | ++ MCI_RAW_INTS_SBE_BCI | MCI_INT_MASK_RTO); ++ ++ spin_lock_irqsave(&host->lock, flags); ++ done = !host->data; ++ ++ if (check_data || host->data) ++ host->data = NULL; ++ spin_unlock_irqrestore(&host->lock, flags); ++ ++ if (done) ++ return true; ++ if (check_data) { ++ spin_lock_irqsave(&host->lock, flags); ++ sdr_clr_bits(host->base + MCI_DMAC_INT_ENA, dmac_ints_mask); ++ sdr_clr_bits(host->base + MCI_INT_MASK, data_ints_mask); ++ /* Stop the IDMAC running */ ++ sdr_clr_bits(host->base + MCI_BUS_MODE, MCI_BUS_MODE_DE); ++ dev_dbg(host->dev, "DMA stop\n"); ++ spin_unlock_irqrestore(&host->lock, flags); ++ ++ if (events & MCI_RAW_INTS_DTO) { ++ if (!host->is_use_dma || ++ (host->is_use_dma && host->adtc_type == COMMOM_ADTC && ++ (mrq->cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC)) ++ phytium_mci_data_read_without_dma(host, data); ++ else ++ data->bytes_xfered = data->blocks * data->blksz; ++ } else { ++ data->bytes_xfered = 0; ++ if (!(host->caps & MMC_CAP_NONREMOVABLE) ++ && readl(host->base + MCI_CARD_DETECT) ++ && (events & cmd_err_ints_mask)) { ++ data->error = -ENOMEDIUM; ++ data->mrq->cmd->error = -ENOMEDIUM; ++ } else if (events & (MCI_RAW_INTS_DCRC | MCI_RAW_INTS_EBE | ++ MCI_RAW_INTS_SBE_BCI)) { ++ data->error = -EILSEQ; ++ } else { ++ data->error = -ETIMEDOUT; ++ } ++ } ++ ++ phytium_mci_data_xfer_next(host, mrq, data); ++ done = true; ++ } ++ return done; ++} ++ ++static int phytium_mci_card_busy(struct mmc_host *mmc) ++{ ++ struct phytium_mci_host *host = mmc_priv(mmc); ++ u32 status; ++ ++ status = readl(host->base + MCI_STATUS); ++ ++ return !!(status & MCI_STATUS_CARD_BUSY); ++} ++ ++static void __phytium_mci_enable_sdio_irq(struct phytium_mci_host *host, int enable) ++{ ++ if (enable) ++ sdr_set_bits(host->base + MCI_INT_MASK, MCI_INT_MASK_SDIO); ++ else ++ sdr_clr_bits(host->base + MCI_INT_MASK, MCI_INT_MASK_SDIO); ++} ++ ++static void phytium_mci_enable_sdio_irq(struct mmc_host *mmc, int enable) ++{ ++ struct phytium_mci_host *host = mmc_priv(mmc); ++ ++ __phytium_mci_enable_sdio_irq(host, enable); ++} ++ ++static void hotplug_timer_func(struct timer_list *t) ++{ ++ struct phytium_mci_host *host; ++ u32 status; ++ ++ host = from_timer(host, t, hotplug_timer); ++ if (!host) ++ return; ++ ++ status = readl(host->base + MCI_CARD_DETECT); ++ ++ if (status & 0x1) { ++ if (host->mmc->card) { ++ cancel_delayed_work(&host->mmc->detect); ++ mmc_detect_change(host->mmc, msecs_to_jiffies(100)); ++ } ++ } else { ++ cancel_delayed_work(&host->mmc->detect); ++ mmc_detect_change(host->mmc, msecs_to_jiffies(200)); ++ } ++} ++ ++static int phytium_mci_err_irq(struct phytium_mci_host *host, u32 dmac_events, u32 events) ++{ ++ struct mmc_request *mrq; ++ struct mmc_command *cmd; ++ struct mmc_data *data; ++ ++ mrq = host->mrq; ++ cmd = host->cmd; ++ data = host->data; ++ ++ if (cmd && (cmd == mrq->sbc)) { ++ phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, mrq->sbc); ++ } else if (cmd && (cmd == mrq->stop)) { ++ phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, mrq->stop); ++ } else if (data) { ++ data->error = -ETIMEDOUT; ++ if ((data->flags & MMC_DATA_READ) == MMC_DATA_READ || ++ (data->flags & MMC_DATA_WRITE) == MMC_DATA_WRITE) ++ phytium_mci_data_xfer_done(host, events | dmac_events, mrq, data); ++ } else if (cmd) { ++ phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, mrq->cmd); ++ } ++ ++ return 0; ++} ++ ++static irqreturn_t phytium_mci_irq(int irq, void *dev_id) ++{ ++ struct phytium_mci_host *host = (struct phytium_mci_host *) dev_id; ++ unsigned long flags; ++ struct mmc_request *mrq; ++ struct mmc_command *cmd; ++ struct mmc_data *data; ++ u32 events, event_mask, dmac_events, dmac_evt_mask; ++ ++ if (!host) ++ return IRQ_NONE; ++ writel(0, host->base + 0xfd0); ++ ++ spin_lock_irqsave(&host->lock, flags); ++ events = readl(host->base + MCI_RAW_INTS); ++ dmac_events = readl(host->base + MCI_DMAC_STATUS); ++ event_mask = readl(host->base + MCI_INT_MASK); ++ dmac_evt_mask = readl(host->base + MCI_DMAC_INT_ENA); ++ if ((!events) && (!(dmac_events&0x1fff))) { ++ spin_unlock_irqrestore(&host->lock, flags); ++ return IRQ_NONE; ++ } ++ dev_dbg(host->dev, "%s:events:%x,mask:0x%x,dmac_events:%x,dmac_mask:0x%x,cmd:%d\n", ++ __func__, events, event_mask, dmac_events, dmac_evt_mask, ++ host->mrq ? host->mrq->cmd->opcode : 255); ++ ++ mrq = host->mrq; ++ cmd = host->cmd; ++ data = host->data; ++ ++ if (((events & event_mask) & MCI_RAW_INTS_SDIO) && ++ ((events == 0x10001) || (events == 0x10000) || (events == 0x10040))) { ++ writel(events, host->base + MCI_RAW_INTS); ++ __phytium_mci_enable_sdio_irq(host, 0); ++ sdio_signal_irq(host->mmc); ++ spin_unlock_irqrestore(&host->lock, flags); ++ goto irq_out; ++ } ++ ++ writel(events, host->base + MCI_RAW_INTS); ++ writel(dmac_events, host->base + MCI_DMAC_STATUS); ++ spin_unlock_irqrestore(&host->lock, flags); ++ ++ if (((events & event_mask) == 0) && ((dmac_evt_mask & dmac_events) == 0)) ++ goto irq_out; ++ ++ if (((events & event_mask) & MCI_RAW_INTS_CD) && !(host->caps & MMC_CAP_NONREMOVABLE)) { ++ mod_timer(&host->hotplug_timer, jiffies + usecs_to_jiffies(20000)); ++ dev_dbg(host->dev, "sd status changed here ! status:[%d] [%s %d]", ++ readl(host->base + MCI_CARD_DETECT), __func__, __LINE__); ++ ++ if ((events & event_mask) == MCI_RAW_INTS_CD) ++ goto irq_out; ++ } ++ ++ if (!mrq) { ++ if (events & MCI_RAW_INTS_HLE) ++ dev_dbg(host->dev, ++ "%s: MRQ=NULL and HW write locked, events=%08x,event_mask=%08x\n", ++ __func__, events, event_mask); ++ else ++ dev_dbg(host->dev, "%s: MRQ=NULL events:%08X evt_mask=%08X,sd_status:%d\n", ++ __func__, events, event_mask, readl(host->base + MCI_CARD_DETECT)); ++ goto irq_out; ++ } ++ ++ if ((dmac_events & dmac_err_ints_mask) || (events & cmd_err_ints_mask)) { ++ dev_dbg(host->dev, "ERR:events:%x,mask:0x%x,dmac_evts:%x,dmac_mask:0x%x,cmd:%d\n", ++ events, event_mask, dmac_events, dmac_evt_mask, mrq->cmd->opcode); ++ phytium_mci_err_irq(host, dmac_events & dmac_err_ints_mask, ++ events & cmd_err_ints_mask); ++ goto irq_out; ++ } ++ ++ if ((events & MCI_MASKED_INTS_DTO) && (events & MCI_MASKED_INTS_CMD)) { ++ phytium_mci_cmd_done(host, events, mrq, cmd); ++ phytium_mci_data_xfer_done(host, (events & data_ints_mask) | ++ (dmac_events & dmac_ints_mask), mrq, data); ++ } else if (events & MCI_MASKED_INTS_CMD || ++ ((events & MCI_INT_MASK_HTO) && (cmd->opcode == SD_SWITCH_VOLTAGE))) { ++ phytium_mci_cmd_done(host, events, mrq, cmd); ++ } else if (events & MCI_MASKED_INTS_DTO) { ++ phytium_mci_data_xfer_done(host, (events & data_ints_mask) | ++ (dmac_events & dmac_ints_mask), mrq, data); ++ } ++ ++irq_out: ++ return IRQ_HANDLED; ++} ++ ++static void phytium_mci_init_hw(struct phytium_mci_host *host) ++{ ++ u32 val; ++ ++ sdr_set_bits(host->base + MCI_PWREN, MCI_PWREN_ENABLE); ++ sdr_set_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); ++ sdr_set_bits(host->base + MCI_UHS_REG_EXT, MCI_EXT_CLK_ENABLE); ++ sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); ++ ++ phytium_mci_reset_hw(host); ++ ++ if (host->mmc->caps & MMC_CAP_NONREMOVABLE) ++ sdr_set_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); ++ else ++ sdr_clr_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); ++ ++ writel(0, host->base + MCI_INT_MASK); ++ val = readl(host->base + MCI_RAW_INTS); ++ writel(val, host->base + MCI_RAW_INTS); ++ writel(0, host->base + MCI_DMAC_INT_ENA); ++ val = readl(host->base + MCI_DMAC_STATUS); ++ writel(val, host->base + MCI_DMAC_STATUS); ++ if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE)) ++ writel(MCI_INT_MASK_CD, host->base + MCI_INT_MASK); ++ ++ sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_INT_ENABLE | ++ MCI_CNTRL_USE_INTERNAL_DMAC); ++ ++ writel(0xFFFFFFFF, host->base + MCI_TMOUT); ++ dev_info(host->dev, "init hardware done!"); ++ ++} ++ ++void phytium_mci_deinit_hw(struct phytium_mci_host *host) ++{ ++ u32 val; ++ ++ sdr_clr_bits(host->base + MCI_PWREN, MCI_PWREN_ENABLE); ++ sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); ++ sdr_clr_bits(host->base + MCI_UHS_REG_EXT, MCI_EXT_CLK_ENABLE); ++ sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); ++ writel(0, host->base + MCI_INT_MASK); ++ val = readl(host->base + MCI_RAW_INTS); ++ writel(val, host->base + MCI_RAW_INTS); ++ writel(0, host->base + MCI_DMAC_INT_ENA); ++ val = readl(host->base + MCI_DMAC_STATUS); ++ writel(val, host->base + MCI_DMAC_STATUS); ++ if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE)) ++ writel(MCI_INT_MASK_CD, host->base + MCI_INT_MASK); ++} ++EXPORT_SYMBOL_GPL(phytium_mci_deinit_hw); ++ ++static void phytium_mci_adma_reset(struct phytium_mci_host *host) ++{ ++ u32 bmod = readl(host->base + MCI_BUS_MODE); ++ ++ bmod |= MCI_BUS_MODE_SWR; ++ writel(bmod, host->base + MCI_BUS_MODE); ++} ++ ++static void phytium_mci_init_adma_table(struct phytium_mci_host *host, ++ struct phytium_mci_dma *dma) ++{ ++ struct phytium_adma2_64_desc *adma_table = dma->adma_table; ++ dma_addr_t dma_addr; ++ int i; ++ ++ memset(adma_table, 0, sizeof(struct phytium_adma2_64_desc) * MAX_BD_NUM); ++ ++ for (i = 0; i < (MAX_BD_NUM - 1); i++) { ++ dma_addr = dma->adma_addr + sizeof(*adma_table) * (i + 1); ++ adma_table[i].desc_lo = lower_32_bits(dma_addr); ++ adma_table[i].desc_hi = upper_32_bits(dma_addr); ++ adma_table[i].attribute = 0; ++ adma_table[i].NON1 = 0; ++ adma_table[i].len = 0; ++ adma_table[i].NON2 = 0; ++ } ++ ++ phytium_mci_adma_reset(host); ++} ++ ++static void phytium_mci_set_buswidth(struct phytium_mci_host *host, u32 width) ++{ ++ u32 val; ++ ++ switch (width) { ++ case MMC_BUS_WIDTH_1: ++ val = MCI_BUS_1BITS; ++ break; ++ ++ case MMC_BUS_WIDTH_4: ++ val = MCI_BUS_4BITS; ++ break; ++ ++ case MMC_BUS_WIDTH_8: ++ val = MCI_BUS_8BITS; ++ break; ++ default: ++ val = MCI_BUS_4BITS; ++ break; ++ } ++ writel(val, host->base + MCI_CTYPE); ++ dev_dbg(host->dev, "Bus Width = %d, set value:0x%x\n", width, val); ++} ++ ++static void phytium_mci_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) ++{ ++ struct phytium_mci_host *host = mmc_priv(mmc); ++ ++ if (ios->timing == MMC_TIMING_MMC_DDR52 || ios->timing == MMC_TIMING_UHS_DDR50) ++ sdr_set_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_DDR); ++ else ++ sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_DDR); ++ ++ phytium_mci_set_buswidth(host, ios->bus_width); ++ ++ switch (ios->power_mode) { ++ case MMC_POWER_UP: ++ set_bit(MCI_CARD_NEED_INIT, &host->flags); ++ writel(MCI_POWER_ON, host->base + MCI_PWREN); ++ break; ++ ++ case MMC_POWER_ON: ++ break; ++ ++ case MMC_POWER_OFF: ++ writel(MCI_POWER_OFF, host->base + MCI_PWREN); ++ break; ++ ++ default: ++ break; ++ } ++ phytium_mci_set_clk(host, ios); ++} ++ ++static void phytium_mci_ack_sdio_irq(struct mmc_host *mmc) ++{ ++ unsigned long flags; ++ struct phytium_mci_host *host = mmc_priv(mmc); ++ ++ spin_lock_irqsave(&host->lock, flags); ++ __phytium_mci_enable_sdio_irq(host, 1); ++ spin_unlock_irqrestore(&host->lock, flags); ++} ++ ++static int phytium_mci_get_cd(struct mmc_host *mmc) ++{ ++ struct phytium_mci_host *host = mmc_priv(mmc); ++ u32 status; ++ ++ if (mmc->caps & MMC_CAP_NONREMOVABLE) ++ return 1; ++ ++ status = readl(host->base + MCI_CARD_DETECT); ++ ++ if ((status & 0x1) == 0x1) ++ return 0; ++ ++ return 1; ++} ++ ++static int phytium_mci_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios) ++{ ++ struct phytium_mci_host *host = mmc_priv(mmc); ++ unsigned int is_voltage_180 = 0; ++ ++ is_voltage_180 = readl(host->base + MCI_UHS_REG); ++ if ((mmc->caps & MMC_CAP_NONREMOVABLE) && (ios->signal_voltage != MMC_SIGNAL_VOLTAGE_180)) ++ return -EINVAL; ++ ++ if ((ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) && (is_voltage_180 & 0x1)) ++ sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); ++ else if ((ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) && (!(is_voltage_180 & 0x1))) ++ sdr_set_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); ++ else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_120) ++ return -EINVAL; ++ return 0; ++} ++ ++static void phytium_mci_hw_reset(struct mmc_host *mmc) ++{ ++ struct phytium_mci_host *host = mmc_priv(mmc); ++ u32 reset_flag; ++ ++ if (host->is_use_dma) { ++ reset_flag = MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET; ++ phytium_mci_adma_reset(host); ++ sdr_set_bits(host->base + MCI_CNTRL, reset_flag); ++ } else { ++ reset_flag = MCI_CNTRL_FIFO_RESET; ++ sdr_set_bits(host->base + MCI_CNTRL, reset_flag); ++ } ++ ++ while (readl(host->base + MCI_CNTRL) & reset_flag) ++ cpu_relax(); ++ ++ sdr_clr_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); ++ udelay(5); ++ sdr_set_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); ++ usleep_range(200, 300); ++} ++ ++#ifdef CONFIG_PM_SLEEP ++int phytium_mci_suspend(struct device *dev) ++{ ++ struct mmc_host *mmc = dev_get_drvdata(dev); ++ struct phytium_mci_host *host = mmc_priv(mmc); ++ ++ phytium_mci_deinit_hw(host); ++ return 0; ++} ++EXPORT_SYMBOL(phytium_mci_suspend); ++ ++int phytium_mci_resume(struct device *dev) ++{ ++ struct mmc_host *mmc = dev_get_drvdata(dev); ++ struct phytium_mci_host *host = mmc_priv(mmc); ++ ++ phytium_mci_init_hw(host); ++ return 0; ++} ++EXPORT_SYMBOL(phytium_mci_resume); ++ ++#endif ++ ++#ifdef CONFIG_PM ++int phytium_mci_runtime_suspend(struct device *dev) ++{ ++ struct mmc_host *mmc = dev_get_drvdata(dev); ++ struct phytium_mci_host *host = mmc_priv(mmc); ++ ++ phytium_mci_deinit_hw(host); ++ return 0; ++} ++EXPORT_SYMBOL(phytium_mci_runtime_suspend); ++ ++int phytium_mci_runtime_resume(struct device *dev) ++{ ++ struct mmc_host *mmc = dev_get_drvdata(dev); ++ struct phytium_mci_host *host = mmc_priv(mmc); ++ ++ phytium_mci_init_hw(host); ++ return 0; ++} ++EXPORT_SYMBOL(phytium_mci_runtime_resume); ++ ++#endif ++ ++static struct mmc_host_ops phytium_mci_ops = { ++ .post_req = phytium_mci_post_req, ++ .pre_req = phytium_mci_pre_req, ++ .request = phytium_mci_ops_request, ++ .set_ios = phytium_mci_ops_set_ios, ++ .get_cd = phytium_mci_get_cd, ++ .enable_sdio_irq = phytium_mci_enable_sdio_irq, ++ .ack_sdio_irq = phytium_mci_ack_sdio_irq, ++ .card_busy = phytium_mci_card_busy, ++ .start_signal_voltage_switch = phytium_mci_ops_switch_volt, ++ .hw_reset = phytium_mci_hw_reset, ++}; ++ ++int phytium_mci_common_probe(struct phytium_mci_host *host) ++{ ++ struct mmc_host *mmc = host->mmc; ++ struct device *dev = host->dev; ++ int uhs_reg_value = 0x502; ++ int ret; ++ ++ dma_set_mask(dev, DMA_BIT_MASK(64)); ++ dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); ++ ++ timer_setup(&host->hotplug_timer, hotplug_timer_func, 0); ++ ++ mmc->f_min = MCI_F_MIN; ++ if (!mmc->f_max) ++ mmc->f_max = MCI_F_MAX; ++ ++ mmc->ops = &phytium_mci_ops; ++ mmc->ocr_avail_sdio = MMC_VDD_32_33 | MMC_VDD_33_34; ++ mmc->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34; ++ mmc->ocr_avail_mmc = MMC_VDD_165_195; ++ mmc->caps |= host->caps; ++ ++ if (mmc->caps & MMC_CAP_SDIO_IRQ) { ++ mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; ++ dev_dbg(host->dev, "%s %d: MMC_CAP_SDIO_IRQ\n", __func__, __LINE__); ++ } ++ mmc->caps2 |= host->caps2; ++ if (host->is_use_dma) { ++ /* MMC core transfer sizes tunable parameters */ ++ mmc->max_segs = MAX_BD_NUM; ++ mmc->max_seg_size = 4 * 1024; ++ mmc->max_blk_size = 512; ++ mmc->max_req_size = 512 * 1024; ++ mmc->max_blk_count = mmc->max_req_size / 512; ++ host->dma.adma_table = dma_zalloc_coherent(host->dev, ++ MAX_BD_NUM * ++ sizeof(struct phytium_adma2_64_desc), ++ &host->dma.adma_addr, GFP_KERNEL); ++ if (!host->dma.adma_table) ++ return MCI_REALEASE_MEM; ++ ++ host->dma.desc_sz = ADMA2_64_DESC_SZ; ++ phytium_mci_init_adma_table(host, &host->dma); ++ } else { ++ mmc->max_segs = MAX_BD_NUM; ++ mmc->max_seg_size = 4 * 1024; ++ mmc->max_blk_size = 512; ++ mmc->max_req_size = 4 * 512; ++ mmc->max_blk_count = mmc->max_req_size / 512; ++ } ++ writel(MCI_SET_FIFOTH(0x2, 7, 8), host->base + MCI_FIFOTH); ++ sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); ++ phytium_mci_update_external_clk(host, uhs_reg_value); ++ ++ spin_lock_init(&host->lock); ++ ++ phytium_mci_init_hw(host); ++ ret = devm_request_irq(host->dev, host->irq, phytium_mci_irq, ++ host->irq_flags, "phytium-mci", host); ++ ++ if (ret) ++ return ret; ++ ++ ret = mmc_add_host(mmc); ++ ++ if (ret) { ++ dev_err(host->dev, "%s %d: mmc add host!\n", __func__, __LINE__); ++ return ret; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(phytium_mci_common_probe); ++ ++MODULE_DESCRIPTION("Phytium Multimedia Card Interface driver"); ++MODULE_LICENSE("GPL v2"); ++MODULE_AUTHOR("Cheng Quan "); +diff --git a/drivers/mmc/host/phytium-mci.h b/drivers/mmc/host/phytium-mci.h +new file mode 100644 +index 000000000000..53ba351b6c0d +--- /dev/null ++++ b/drivers/mmc/host/phytium-mci.h +@@ -0,0 +1,349 @@ ++/* SPDX-License-Identifier: GPL-2.0+ */ ++/* ++ * Driver for Phytium Multimedia Card Interface ++ * ++ * Copyright (C) 2021 Phytium Technology Co., Ltd. ++ */ ++ ++#ifndef __PHYTIUM_MCI_H ++#define __PHYTIUM_MCI_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/*------------------------------------------------------*/ ++/* Common Definition */ ++/*------------------------------------------------------*/ ++#define MAX_BD_NUM 128 ++#define SD_BLOCK_SIZE 512 ++ ++#define MCI_BUS_1BITS 0x0 ++#define MCI_BUS_4BITS 0x1 ++#define MCI_BUS_8BITS (0x1 << 16) ++ ++#define MCI_SD_DRV_VALUE 0 ++#define MCI_SD_SAMP_VALUE_MAX 0 ++#define MCI_SD_SAMP_VALUE_MIN 0 ++ ++#define MCI_TIMEOUT_CMD_VALUE 0xFFFFFFFF ++#define MCI_POWER_ON 1 ++#define MCI_POWER_OFF 0 ++ ++#define MCI_PREPARE_FLAG (0x1 << 0) ++#define MCI_ASYNC_FLAG (0x1 << 1) ++#define MCI_MMAP_FLAG (0x1 << 2) ++ ++#define MCI_CMD_TIMEOUT (HZ/10 * 50) /* 100ms x5 */ ++#define MCI_DATA_TIMEOUT (HZ * 10) /* 1000ms x5 */ ++ ++#define MCI_CMD_TYPE_ADTC 0x2 ++ ++#define MCI_F_MIN 400000 ++#define MCI_F_MAX 50000000 ++ ++#define MCI_CLK 1200000000 ++#define MCI_REALEASE_MEM 0x1 ++#define MCI_MAX_FIFO_CNT 0x800 ++ ++/* FIFOTH register defines */ ++#define MCI_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \ ++ ((r) & 0xFFF) << 16 | ((t) & 0xFFF)) ++/* Card read threshold */ ++#define MCI_SET_THLD(v, x) (((v) & 0xFFF) << 16 | (x)) ++#define MCI_CARD_WR_THR_EN BIT(2) ++#define MCI_CARD_RD_THR_EN BIT(0) ++ ++/*----------------------------------------------------------------------*/ ++/* Register Offset */ ++/*----------------------------------------------------------------------*/ ++#define MCI_CNTRL 0x00 /* the controller config reg */ ++#define MCI_PWREN 0x04 /* the power enable reg */ ++#define MCI_CLKDIV 0x08 /* the clock divider reg */ ++#define MCI_CLKENA 0x10 /* the clock enable reg */ ++#define MCI_TMOUT 0x14 /* the timeout reg */ ++#define MCI_CTYPE 0x18 /* the card type reg */ ++#define MCI_BLKSIZ 0x1C /* the block size reg */ ++#define MCI_BYTCNT 0x20 /* the byte count reg */ ++#define MCI_INT_MASK 0x24 /* the interrupt mask reg */ ++#define MCI_CMDARG 0x28 /* the command argument reg */ ++#define MCI_CMD 0x2C /* the command reg */ ++#define MCI_RESP0 0x30 /* the response reg0 */ ++#define MCI_RESP1 0x34 /* the response reg1 */ ++#define MCI_RESP2 0x38 /* the response reg2 */ ++#define MCI_RESP3 0X3C /* the response reg3 */ ++#define MCI_MASKED_INTS 0x40 /* the masked interrupt status reg */ ++#define MCI_RAW_INTS 0x44 /* the raw interrupt status reg */ ++#define MCI_STATUS 0x48 /* the status reg */ ++#define MCI_FIFOTH 0x4C /* the FIFO threshold watermark reg */ ++#define MCI_CARD_DETECT 0x50 /* the card detect reg */ ++#define MCI_CARD_WRTPRT 0x54 /* the card write protect reg */ ++#define MCI_CCLK_RDY 0x58 /* first div is ready? 1:ready,0:not ready*/ ++#define MCI_TRAN_CARD_CNT 0x5C /* the transferred CIU card byte count reg */ ++#define MCI_TRAN_FIFO_CNT 0x60 /* the transferred host to FIFO byte count reg */ ++#define MCI_DEBNCE 0x64 /* the debounce count reg */ ++#define MCI_UID 0x68 /* the user ID reg */ ++#define MCI_VID 0x6C /* the controller version ID reg */ ++#define MCI_HWCONF 0x70 /* the hardware configuration reg */ ++#define MCI_UHS_REG 0x74 /* the UHS-I reg */ ++#define MCI_CARD_RESET 0x78 /* the card reset reg */ ++#define MCI_BUS_MODE 0x80 /* the bus mode reg */ ++#define MCI_DESC_LIST_ADDRL 0x88 /* the descriptor list low base address reg */ ++#define MCI_DESC_LIST_ADDRH 0x8C /* the descriptor list high base address reg */ ++#define MCI_DMAC_STATUS 0x90 /* the internal DMAC status reg */ ++#define MCI_DMAC_INT_ENA 0x94 /* the internal DMAC interrupt enable reg */ ++#define MCI_CUR_DESC_ADDRL 0x98 /* the current host descriptor low address reg */ ++#define MCI_CUR_DESC_ADDRH 0x9C /* the current host descriptor high address reg */ ++#define MCI_CUR_BUF_ADDRL 0xA0 /* the current buffer low address reg */ ++#define MCI_CUR_BUF_ADDRH 0xA4 /* the current buffer high address reg */ ++#define MCI_CARD_THRCTL 0x100 /* the card threshold control reg */ ++#define MCI_UHS_REG_EXT 0x108 /* the UHS register extension */ ++#define MCI_EMMC_DDR_REG 0x10C /* the EMMC DDR reg */ ++#define MCI_ENABLE_SHIFT 0x110 /* the enable phase shift reg */ ++#define MCI_DATA 0x200 /* the data FIFO access */ ++ ++/* Command register defines */ ++#define MCI_CMD_START BIT(31) ++#define MCI_CMD_USE_HOLD_REG BIT(29) ++#define MCI_CMD_VOLT_SWITCH BIT(28) ++#define MCI_CMD_CCS_EXP BIT(23) ++#define MCI_CMD_CEATA_RD BIT(22) ++#define MCI_CMD_UPD_CLK BIT(21) ++#define MCI_CMD_INIT BIT(15) ++#define MCI_CMD_STOP BIT(14) ++#define MCI_CMD_PRV_DAT_WAIT BIT(13) ++#define MCI_CMD_SEND_STOP BIT(12) ++#define MCI_CMD_STRM_MODE BIT(11) ++#define MCI_CMD_DAT_WR BIT(10) ++#define MCI_CMD_DAT_EXP BIT(9) ++#define MCI_CMD_RESP_CRC BIT(8) ++#define MCI_CMD_RESP_LONG BIT(7) ++#define MCI_CMD_RESP_EXP BIT(6) ++#define MCI_CMD_INDX(n) ((n) & 0x1F) ++ ++/*------------------------------------------------------*/ ++/* Register Mask */ ++/*------------------------------------------------------*/ ++/* MCI_CNTRL mask */ ++#define MCI_CNTRL_CONTROLLER_RESET (0x1 << 0) /* RW */ ++#define MCI_CNTRL_FIFO_RESET (0x1 << 1) /* RW */ ++#define MCI_CNTRL_DMA_RESET (0x1 << 2) /* RW */ ++#define MCI_CNTRL_RES (0x1 << 3) /* */ ++#define MCI_CNTRL_INT_ENABLE (0x1 << 4) /* RW */ ++#define MCI_CNTRL_DMA_ENABLE (0x1 << 5) /* RW */ ++#define MCI_CNTRL_READ_WAIT (0x1 << 6) /* RW */ ++#define MCI_CNTRL_SEND_IRQ_RESPONSE (0x1 << 7) /* RW */ ++#define MCI_CNTRL_ABORT_READ_DATA (0x1 << 8) /* RW */ ++#define MCI_CNTRL_ENDIAN (0x1 << 11) /* RW */ ++//#define MCI_CNTRL_CARD_VOLTAGE_A (0xF << 16) /* RW */ ++//#define MCI_CNTRL_CARD_VOLTAGE_B (0xF << 20) /* RW */ ++#define MCI_CNTRL_ENABLE_OD_PULLUP (0x1 << 24) /* RW */ ++#define MCI_CNTRL_USE_INTERNAL_DMAC (0x1 << 25) /* RW */ ++ ++/* MCI_PWREN mask */ ++#define MCI_PWREN_ENABLE (0x1 << 0) /* RW */ ++ ++/* MCI_CLKENA mask */ ++#define MCI_CLKENA_CCLK_ENABLE (0x1 << 0) /* RW */ ++#define MCI_CLKENA_CCLK_LOW_POWER (0x1 << 16) /* RW */ ++#define MCI_EXT_CLK_ENABLE (0x1 << 1) ++ ++/* MCI_INT_MASK mask */ ++#define MCI_INT_MASK_CD (0x1 << 0) /* RW */ ++#define MCI_INT_MASK_RE (0x1 << 1) /* RW */ ++#define MCI_INT_MASK_CMD (0x1 << 2) /* RW */ ++#define MCI_INT_MASK_DTO (0x1 << 3) /* RW */ ++#define MCI_INT_MASK_TXDR (0x1 << 4) /* RW */ ++#define MCI_INT_MASK_RXDR (0x1 << 5) /* RW */ ++#define MCI_INT_MASK_RCRC (0x1 << 6) /* RW */ ++#define MCI_INT_MASK_DCRC (0x1 << 7) /* RW */ ++#define MCI_INT_MASK_RTO (0x1 << 8) /* RW */ ++#define MCI_INT_MASK_DRTO (0x1 << 9) /* RW */ ++#define MCI_INT_MASK_HTO (0x1 << 10) /* RW */ ++#define MCI_INT_MASK_FRUN (0x1 << 11) /* RW */ ++#define MCI_INT_MASK_HLE (0x1 << 12) /* RW */ ++#define MCI_INT_MASK_SBE_BCI (0x1 << 13) /* RW */ ++#define MCI_INT_MASK_ACD (0x1 << 14) /* RW */ ++#define MCI_INT_MASK_EBE (0x1 << 15) /* RW */ ++#define MCI_INT_MASK_SDIO (0x1 << 16) /* RW */ ++ ++/* MCI_MASKED_INTS mask */ ++#define MCI_MASKED_INTS_CD (0x1 << 0) /* RO */ ++#define MCI_MASKED_INTS_RE (0x1 << 1) /* RO */ ++#define MCI_MASKED_INTS_CMD (0x1 << 2) /* RO */ ++#define MCI_MASKED_INTS_DTO (0x1 << 3) /* RO */ ++#define MCI_MASKED_INTS_TXDR (0x1 << 4) /* RO */ ++#define MCI_MASKED_INTS_RXDR (0x1 << 5) /* RO */ ++#define MCI_MASKED_INTS_RCRC (0x1 << 6) /* RO */ ++#define MCI_MASKED_INTS_DCRC (0x1 << 7) /* RO */ ++#define MCI_MASKED_INTS_RTO (0x1 << 8) /* RO */ ++#define MCI_MASKED_INTS_DRTO (0x1 << 9) /* RO */ ++#define MCI_MASKED_INTS_HTO (0x1 << 10) /* RO */ ++#define MCI_MASKED_INTS_FRUN (0x1 << 11) /* RO */ ++#define MCI_MASKED_INTS_HLE (0x1 << 12) /* RO */ ++#define MCI_MASKED_INTS_SBE_BCI (0x1 << 13) /* RO */ ++#define MCI_MASKED_INTS_ACD (0x1 << 14) /* RO */ ++#define MCI_MASKED_INTS_EBE (0x1 << 15) /* RO */ ++#define MCI_MASKED_INTS_SDIO (0x1 << 16) /* RO */ ++ ++/* MCI_RAW_INTS mask */ ++#define MCI_RAW_INTS_CD (0x1 << 0) /* W1C */ ++#define MCI_RAW_INTS_RE (0x1 << 1) /* W1C */ ++#define MCI_RAW_INTS_CMD (0x1 << 2) /* W1C */ ++#define MCI_RAW_INTS_DTO (0x1 << 3) /* W1C */ ++#define MCI_RAW_INTS_TXDR (0x1 << 4) /* W1C */ ++#define MCI_RAW_INTS_RXDR (0x1 << 5) /* W1C */ ++#define MCI_RAW_INTS_RCRC (0x1 << 6) /* W1C */ ++#define MCI_RAW_INTS_DCRC (0x1 << 7) /* W1C */ ++#define MCI_RAW_INTS_RTO (0x1 << 8) /* W1C */ ++#define MCI_RAW_INTS_DRTO (0x1 << 9) /* W1C */ ++#define MCI_RAW_INTS_HTO (0x1 << 10) /* W1C */ ++#define MCI_RAW_INTS_FRUN (0x1 << 11) /* W1C */ ++#define MCI_RAW_INTS_HLE (0x1 << 12) /* W1C */ ++#define MCI_RAW_INTS_SBE_BCI (0x1 << 13) /* W1C */ ++#define MCI_RAW_INTS_ACD (0x1 << 14) /* W1C */ ++#define MCI_RAW_INTS_EBE (0x1 << 15) /* W1C */ ++#define MCI_RAW_INTS_SDIO (0x1 << 16) /* W1C */ ++ ++/* MCI_STATUS mask */ ++#define MCI_STATUS_FIFO_RX (0x1 << 0) /* RO */ ++#define MCI_STATUS_FIFO_TX (0x1 << 1) /* RO */ ++#define MCI_STATUS_FIFO_EMPTY (0x1 << 2) /* RO */ ++#define MCI_STATUS_FIFO_FULL (0x1 << 3) /* RO */ ++#define MCI_STATUS_CARD_STATUS (0x1 << 8) /* RO */ ++#define MCI_STATUS_CARD_BUSY (0x1 << 9) /* RO */ ++#define MCI_STATUS_DATA_BUSY (0x1 << 10) /* RO */ ++#define MCI_STATUS_DMA_ACK (0x1 << 31) /* RO */ ++#define MCI_STATUS_DMA_REQ (0x1 << 32) /* RO */ ++ ++/* MCI_UHS_REG mask */ ++#define MCI_UHS_REG_VOLT (0x1 << 0) /* RW */ ++#define MCI_UHS_REG_DDR (0x1 << 16) /* RW */ ++ ++/* MCI_CARD_RESET mask */ ++#define MCI_CARD_RESET_ENABLE (0x1 << 0) /* RW */ ++ ++/* MCI_BUS_MODE mask */ ++#define MCI_BUS_MODE_SWR (0x1 << 0) /* RW */ ++#define MCI_BUS_MODE_FB (0x1 << 1) /* RW */ ++#define MCI_BUS_MODE_DE (0x1 << 7) /* RW */ ++ ++/* MCI_DMAC_STATUS mask */ ++#define MCI_DMAC_STATUS_TI (0x1 << 0) /* RW */ ++#define MCI_DMAC_STATUS_RI (0x1 << 1) /* RW */ ++#define MCI_DMAC_STATUS_FBE (0x1 << 2) /* RW */ ++#define MCI_DMAC_STATUS_DU (0x1 << 4) /* RW */ ++#define MCI_DMAC_STATUS_NIS (0x1 << 8) /* RW */ ++#define MCI_DMAC_STATUS_AIS (0x1 << 9) /* RW */ ++ ++/* MCI_DMAC_INT_ENA mask */ ++#define MCI_DMAC_INT_ENA_TI (0x1 << 0) /* RW */ ++#define MCI_DMAC_INT_ENA_RI (0x1 << 1) /* RW */ ++#define MCI_DMAC_INT_ENA_FBE (0x1 << 2) /* RW */ ++#define MCI_DMAC_INT_ENA_DU (0x1 << 4) /* RW */ ++#define MCI_DMAC_INT_ENA_CES (0x1 << 5) /* RW */ ++#define MCI_DMAC_INT_ENA_NIS (0x1 << 8) /* RW */ ++#define MCI_DMAC_INT_ENA_AIS (0x1 << 9) /* RW */ ++ ++/* MCI_CARD_THRCTL mask */ ++#define MCI_CARD_THRCTL_CARDRD (0x1 << 0) /* RW */ ++#define MCI_CARD_THRCTL_BUSY_CLR (0x1 << 1) /* RW */ ++#define MCI_CARD_THRCTL_CARDWR (0x1 << 2) /* RW */ ++ ++/* MCI_UHS_REG_EXT mask */ ++#define MCI_UHS_REG_EXT_MMC_VOLT (0x1 << 0) /* RW */ ++#define MCI_UHS_REG_EXT_CLK_ENA (0x1 << 1) /* RW */ ++ ++/* MCI_EMMC_DDR_REG mask */ ++#define MCI_EMMC_DDR_CYCLE (0x1 << 0) /* RW */ ++ ++/*--------------------------------------*/ ++/* Structure Type */ ++/*--------------------------------------*/ ++/* Maximum segments assuming a 512KiB maximum requisition */ ++/* size and a minimum4KiB page size. */ ++#define MCI_MAX_SEGS 128 ++/* ADMA2 64-bit DMA descriptor size */ ++#define ADMA2_64_DESC_SZ 32 ++ ++/* Each descriptor can transfer up to 4KB of data in chained mode */ ++/*ADMA2 64-bit descriptor.*/ ++struct phytium_adma2_64_desc { ++ u32 attribute; ++#define IDMAC_DES0_DIC BIT(1) ++#define IDMAC_DES0_LD BIT(2) ++#define IDMAC_DES0_FD BIT(3) ++#define IDMAC_DES0_CH BIT(4) ++#define IDMAC_DES0_ER BIT(5) ++#define IDMAC_DES0_CES BIT(30) ++#define IDMAC_DES0_OWN BIT(31) ++ u32 NON1; ++ u32 len; ++ u32 NON2; ++ u32 addr_lo; /* Lower 32-bits of Buffer Address Pointer 1*/ ++ u32 addr_hi; /* Upper 32-bits of Buffer Address Pointer 1*/ ++ u32 desc_lo; /* Lower 32-bits of Next Descriptor Address */ ++ u32 desc_hi; /* Upper 32-bits of Next Descriptor Address */ ++} __packed __aligned(4); ++ ++struct phytium_mci_dma { ++ struct scatterlist *sg; /* I/O scatter list */ ++ /* ADMA descriptor table, pointer to adma_table array */ ++ struct phytium_adma2_64_desc *adma_table; ++ /* Mapped ADMA descr. table, the physical address of adma_table array */ ++ dma_addr_t adma_addr; ++ unsigned int desc_sz; /* ADMA descriptor size */ ++}; ++ ++enum adtc_t { ++ COMMOM_ADTC = 0, ++ BLOCK_RW_ADTC = 1 ++}; ++ ++struct phytium_mci_host { ++ struct device *dev; ++ struct mmc_host *mmc; ++ u32 caps; ++ u32 caps2; ++ spinlock_t lock; ++ struct mmc_request *mrq; ++ struct mmc_command *cmd; ++ struct mmc_data *data; ++ int error; ++ void __iomem *base; /* host base address */ ++ void *adma_table1; ++ dma_addr_t adma_addr1; ++ struct phytium_mci_dma dma_rx; /* dma channel */ ++ struct phytium_mci_dma dma_tx; /* dma channel */ ++ struct phytium_mci_dma dma; /* dma channel */ ++ u64 dma_mask; ++ bool vqmmc_enabled; ++ u32 *sg_virt_addr; ++ enum adtc_t adtc_type; /* 0:common adtc cmd; 1:block r/w adtc cmd;*/ ++ struct timer_list hotplug_timer; ++ struct delayed_work req_timeout; ++ int irq; /* host interrupt */ ++ u32 current_rca; /*the current rca value*/ ++ u32 current_ios_clk; ++ u32 is_use_dma; ++ u32 is_device_x100; ++ struct clk *src_clk; /* phytium_mci source clock */ ++ unsigned long clk_rate; ++ unsigned long clk_div; ++ unsigned long irq_flags; ++ unsigned long flags; ++#define MCI_CARD_NEED_INIT 1 ++ ++}; ++ ++int phytium_mci_common_probe(struct phytium_mci_host *host); ++void phytium_mci_deinit_hw(struct phytium_mci_host *host); ++int phytium_mci_runtime_suspend(struct device *dev); ++int phytium_mci_runtime_resume(struct device *dev); ++int phytium_mci_resume(struct device *dev); ++int phytium_mci_suspend(struct device *dev); ++ ++#endif /* __PHYTIUM_MCI_HW_H */ +diff --git a/drivers/mmc/host/phytium-sdci.c b/drivers/mmc/host/phytium-sdci.c +new file mode 100644 +index 000000000000..effa50bc5a49 +--- /dev/null ++++ b/drivers/mmc/host/phytium-sdci.c +@@ -0,0 +1,1442 @@ ++/* ++ * File Name: phytium_sdci.c - Phytium FT SDCI dirver ++ * ++ * Copyright (C) 2019 Phytium Technology Co.,Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "phytium-sdci.h" ++ ++static const u32 cmd_ints_mask = SDCI_SDCI_NORMAL_ISER_ECC_EN | SDCI_SDCI_NORMAL_ISER_EEI_EN; ++static const u32 data_ints_mask = SDCI_BD_ISER_ETRS_EN; ++static const u32 err_ints_mask = SDCI_ERROR_ISER_ECTE_EN | SDCI_ERROR_ISR_CCRCE_EN | ++ SDCI_ERROR_ISR_CIR_EN | SDCI_ERROR_ISR_CNR_EN; ++static const u32 caps = MMC_CAP_ERASE; ++ ++static void hotplug_timer_func(struct timer_list *t); ++static bool phytium_sdci_private_send_cmd(struct phytium_sdci_host *host, ++ u32 cmd, u32 resp_type, u32 arg); ++static bool phytium_sdci_cmd_done(struct phytium_sdci_host *host, int events, ++ struct mmc_request *mrq, ++ struct mmc_command *cmd); ++static bool phytium_sdci_data_xfer_done(struct phytium_sdci_host *host, ++ u32 events, struct mmc_request *mrq, ++ struct mmc_data *data); ++static void phytium_sdci_cmd_next(struct phytium_sdci_host *host, ++ struct mmc_request *mrq, ++ struct mmc_command *cmd); ++ ++static int phytium_sdci_cmd13_process(struct phytium_sdci_host *host, ++ struct mmc_request *mrq, ++ struct mmc_data *data, ++ u32 wait_timeout_ms, ++ u32 send_once_time_ms); ++ ++static int phytium_sd_error(struct phytium_sdci_host *host) ++{ ++ int temp; ++ temp = readl(host->base + SDCI_NORMAL_ISR); ++ dev_err(host->dev, "[%s %d]SDCI_NORMAL_ISR:%x\n", __func__, __LINE__, temp); ++ temp = readl(host->base + SDCI_BD_ISR); ++ temp = readl(host->base + SDCI_ERROR_ISR); ++ dev_err(host->dev, "[%s %d]SDCI_ERROR_ISR:%x\n", __func__, __LINE__, temp); ++ temp = readl(host->base + SDCI_BD_ISR); ++ dev_err(host->dev, "[%s %d]SDCI_BD_ISR:%x\n", __func__, __LINE__, temp); ++ temp = readl(host->base + SDCI_RESP0); ++ dev_err(host->dev, "[%s %d]SDCI_RESP0:%x\n", __func__, __LINE__, temp); ++ ++ return 0; ++} ++ ++static void sdr_set_bits(void __iomem *reg, u32 bs) ++{ ++ u32 val; ++ ++ val = readl(reg); ++ val |= bs; ++ ++ writel(val, reg); ++} ++ ++static void sdr_clr_bits(void __iomem *reg, u32 bs) ++{ ++ u32 val; ++ ++ val = readl(reg); ++ val &= ~bs; ++ ++ writel(val, reg); ++} ++ ++static void phytium_sdci_reset_hw(struct phytium_sdci_host *host) ++{ ++ sdr_set_bits(host->base + SDCI_SOFTWARE, ++ SDCI_SOFTWARE_SRST); ++ sdr_clr_bits(host->base + SDCI_SOFTWARE, ++ SDCI_SOFTWARE_SRST); ++ while (!(readl(host->base + SDCI_STATUS) & SDCI_STATUS_IDIE)) ++ cpu_relax(); ++} ++ ++static void phytium_sdci_prepare_data(struct phytium_sdci_host *host, ++ struct mmc_request *mrq) ++{ ++ struct mmc_data *data = mrq->data; ++ bool read; ++ ++ read = (data->flags & MMC_DATA_READ) != 0; ++ data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, ++ read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); ++} ++ ++static void phytium_sdci_unprepare_data(struct phytium_sdci_host *host, ++ struct mmc_request *mrq) ++{ ++ bool read; ++ struct mmc_data *data = mrq->data; ++ ++ read = (data->flags & MMC_DATA_READ) != 0; ++ dma_unmap_sg(host->dev, data->sg, data->sg_len, ++ read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); ++} ++ ++static void phytium_sdci_set_clk(struct phytium_sdci_host *host, ++ struct mmc_ios *ios) ++{ ++ unsigned long clk_rate; ++ u32 div = 0xffffffff, div_reg; ++ ++ if (ios->clock) { ++ clk_rate = host->clk_rate; ++ div = ((clk_rate / (2 * ios->clock)) - 1); ++ div_reg = readl(host->base + SDCI_CLOCK_D); ++ if (div_reg == div) ++ return; ++ writel(div, host->base + SDCI_CLOCK_D); ++ writel(0, host->base + SDCI_SD_DRV); ++ writel(5, host->base + SDCI_SD_SAMP); ++ ++ sdr_set_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_SRST); ++ sdr_clr_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_SRST); ++ while (!(readl(host->base + SDCI_STATUS) & SDCI_STATUS_IDIE)) ++ cpu_relax(); ++ dev_dbg(host->dev, "host->clk_rate: %ld, ios->clock: %d\n", ++ host->clk_rate, ios->clock); ++ } ++} ++ ++ ++static inline u32 phytium_sdci_cmd_find_resp(struct phytium_sdci_host *host, ++ struct mmc_request *mrq, ++ struct mmc_command *cmd) ++{ ++ u32 resp; ++ ++ switch (mmc_resp_type(cmd)) { ++ case MMC_RSP_R1: ++ resp = 0x2; ++ break; ++ case MMC_RSP_R1B: ++ resp = 0x2; ++ break; ++ case MMC_RSP_R2: ++ resp = 0x1; ++ break; ++ case MMC_RSP_R3: ++ resp = 0x3; ++ break; ++ case MMC_RSP_NONE: ++ default: ++ resp = 0x0; ++ break; ++ } ++ ++ return resp; ++} ++ ++static inline u32 phytium_sdci_cmd_prepare_raw_cmd(struct phytium_sdci_host *host, ++ struct mmc_request *mrq, struct mmc_command *cmd) ++{ ++ /* ++ * rawcmd : ++ * trty << 14 | opcode << 8 | cmdw << 6 | cice << 4 | crce << 3 | resp ++ */ ++ u32 resp, rawcmd; ++ u32 opcode = cmd->opcode; ++ ++ resp = phytium_sdci_cmd_find_resp(host, mrq, cmd); ++ rawcmd = ((opcode << 8) | resp); ++ ++ if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) { ++ rawcmd = (rawcmd | (SDCI_CMD_TYPE_ADTC << 14)); ++ } ++ ++ return rawcmd; ++} ++ ++static void ++phytium_sdci_unexpected_error_handler(struct phytium_sdci_host *host, ++ struct mmc_request *mrq, ++ struct mmc_data *data, ++ int err_type) ++{ ++ unsigned long flags; ++ int error; ++ ++ spin_lock_irqsave(&host->lock, flags); ++ host->mrq = NULL; ++ host->cmd = NULL; ++ host->data = NULL; ++ spin_unlock_irqrestore(&host->lock, flags); ++ ++ if (err_type & ERR_CARD_ABSENT) { ++ host->mmc->detect_change = 1; ++ dev_dbg(host->dev, "SD is absent when send cmd:%d\n", mrq->cmd->opcode); ++ } ++ ++ switch (err_type) { ++ case ERR_CARD_ABSENT: ++ error = -ENOMEDIUM; ++ break; ++ case ERR_TIMEOUT: ++ error = -ETIMEDOUT; ++ break; ++ case ERR_CMD_RESPONED: ++ error = -EIO; ++ break; ++ default: ++ error = -ETIMEDOUT; ++ break; ++ } ++ ++ if (data) { ++ data->error = error; ++ phytium_sdci_unprepare_data(host, mrq); ++ ++ if ((data->flags & MMC_DATA_READ) == MMC_DATA_READ || ++ (data->flags & MMC_DATA_WRITE) == MMC_DATA_WRITE) ++ phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_TRS_R, mrq, data); ++ } else { ++ mrq->cmd->error = error; ++ } ++ ++ mmc_request_done(host->mmc, mrq); ++} ++ ++static bool phytium_sdci_start_data(struct phytium_sdci_host *host, struct mmc_request *mrq, ++ struct mmc_command *cmd, struct mmc_data *data) ++{ ++ bool read, res; ++ u32 sg_dma_addrh, sg_dma_addrl; ++ u32 sd_block_addrh, sd_block_addrl; ++ u32 temp, timeout, sd_status; ++ u32 block_cnt = 0; ++ u32 sd_block_addr = cmd->arg; ++ u32 private_cmd, resp_type, arg; ++ u32 j, dma_len; ++ unsigned long deadline_time; ++ dma_addr_t dma_address; ++ struct scatterlist *sg; ++ int ret; ++ ++ WARN_ON(host->cmd); ++ host->cmd = cmd; ++ ++ WARN_ON(host->data); ++ host->data = data; ++ read = data->flags & MMC_DATA_READ; ++ ++ for_each_sg(data->sg, sg, data->sg_count, j) { ++ writel(0, host->base + SDCI_COMMAND); ++ ++ dma_address = sg_dma_address(sg); ++ sg_dma_addrh = (u32) (dma_address >> 32); ++ sg_dma_addrl = (u32) dma_address; ++ ++ dma_len = sg_dma_len(sg); ++ block_cnt = (dma_len / SD_BLOCK_SIZE); ++ ++ sd_block_addrh = 0; ++ sd_block_addrl = sd_block_addr; ++ ++ sdr_set_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_BDRST); ++ sdr_clr_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_BDRST); ++ writel(block_cnt, host->base + SDCI_BLK_CNT); ++ ++ if ((mrq->data->flags & MMC_DATA_READ) == MMC_DATA_READ) { ++ writel(sg_dma_addrl, host->base + SDCI_BD_RX); ++ writel(sg_dma_addrh, host->base + SDCI_BD_RX); ++ writel(sd_block_addrl, host->base + SDCI_BD_RX); ++ writel(sd_block_addrh, host->base + SDCI_BD_RX); ++ timeout = 100 * block_cnt; ++ } else { ++ timeout = 250 * block_cnt; ++ ret = phytium_sdci_cmd13_process(host, mrq, data, timeout, 1); ++ if (ret != SDCI_CMD13_OK) ++ return false; ++ ++ writel(sg_dma_addrl, host->base + SDCI_BD_TX); ++ writel(sg_dma_addrh, host->base + SDCI_BD_TX); ++ writel(sd_block_addrl, host->base + SDCI_BD_TX); ++ writel(sd_block_addrh, host->base + SDCI_BD_TX); ++ } ++ ++ deadline_time = jiffies + msecs_to_jiffies(timeout); ++ ++ temp = readl(host->base + SDCI_BD_ISR); ++ if ((mrq->data->flags & MMC_DATA_READ) == MMC_DATA_READ) { ++ while ((temp & SDCI_BD_ISR_TRS_R) != SDCI_BD_ISR_TRS_R) { ++ sd_status = readl(host->base + SDCI_STATUS); ++ if (sd_status & SDCI_STATUS_CDSL) { ++ phytium_sdci_unexpected_error_handler(host, mrq, data, ++ ERR_CARD_ABSENT); ++ if (temp & SDCI_BD_ISR_DAIS) ++ writel(1, host->base + SDCI_BD_ISR); ++ return false; ++ } ++ ++ temp = readl(host->base + SDCI_BD_ISR); ++ if (time_after(jiffies, deadline_time)) { ++ phytium_sdci_unexpected_error_handler(host, mrq, data, ++ ERR_TIMEOUT); ++ dev_err(host->dev, ++ "Read Data timeout:jiffies:0x%lx,dt_jiffies:" ++ "0x%lx, BD_isr_reg:0x%x,cmd:%d, REG_D0:0x%x\n", ++ jiffies, jiffies - deadline_time, temp, ++ cmd->opcode, readl(host->base + SDCI_STATUS)); ++ ++ return false; ++ } ++ } ++ } else { ++ while ((temp & SDCI_BD_ISR_TRS_W) != SDCI_BD_ISR_TRS_W) { ++ sd_status = readl(host->base + SDCI_STATUS); ++ if (sd_status & SDCI_STATUS_CDSL) { ++ phytium_sdci_unexpected_error_handler(host, mrq, data, ++ ERR_CARD_ABSENT); ++ dev_err(host->dev, "[%s][%d]: Card absent ! cmd(%d)\n", ++ __func__, __LINE__, mrq->cmd->opcode); ++ return false; ++ } ++ ++ temp = readl(host->base + SDCI_BD_ISR); ++ if (time_after(jiffies, deadline_time)) { ++ phytium_sdci_unexpected_error_handler(host, mrq, data, ++ ERR_TIMEOUT); ++ dev_err(host->dev, ++ "Write Date timeout: jiffies:0x%lx,dt_jiffies:" ++ "0x%lx,BD_isr_reg:0x%x\n", ++ jiffies, jiffies - deadline_time, temp); ++ return false; ++ } ++ } ++ } ++ writel(1, host->base + SDCI_BD_ISR); ++ writel(1, host->base + SDCI_NORMAL_ISR); ++ sd_block_addr = sd_block_addr + block_cnt; ++ ++ if (j < (data->sg_count - 1) && 1 < block_cnt) { ++ private_cmd = MMC_STOP_TRANSMISSION; ++ resp_type = 0x2; ++ arg = 0; ++ res = phytium_sdci_private_send_cmd(host, private_cmd, ++ resp_type, arg); ++ if (!res) { ++ sd_status = readl(host->base + SDCI_STATUS); ++ if (sd_status & SDCI_STATUS_CDSL) { ++ phytium_sdci_unexpected_error_handler(host, mrq, data, ++ ERR_CARD_ABSENT); ++ writel(1, host->base + SDCI_BD_ISR); ++ dev_err(host->dev, ++ "[%s][%d]:Card absent ! private_cmd(%d)\n", ++ __func__, __LINE__, private_cmd); ++ } else { ++ phytium_sdci_unexpected_error_handler(host, mrq, data, ++ ERR_CMD_RESPONED); ++ dev_err(host->dev, ++ "[%s][%d] cmd(%d) response errored\n", ++ __func__, __LINE__, mrq->cmd->opcode); ++ phytium_sd_error(host); ++ } ++ writel(1, host->base + SDCI_NORMAL_ISR); ++ return false; ++ } ++ writel(1, host->base + SDCI_NORMAL_ISR); ++ } ++ } ++ ++ host->is_multi_rw_only_one_blkcnt = false; ++ ++ if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK && block_cnt == 1) || ++ (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK && block_cnt == 1)) ++ host->is_multi_rw_only_one_blkcnt = true; ++ ++ phytium_sdci_cmd_done(host, SDCI_NORMAL_ISR_CC, mrq, cmd); ++ if ((mrq->data->flags & MMC_DATA_READ) == MMC_DATA_READ) ++ phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_TRS_R, ++ mrq, data); ++ else ++ phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_TRS_W, ++ mrq, data); ++ ++ return true; ++} ++ ++static int phytium_sdci_auto_cmd_done(struct phytium_sdci_host *host, ++ int events, struct mmc_command *cmd) ++{ ++ u32 *rsp = cmd->resp; ++ ++ rsp[0] = readl(host->base + SDCI_RESP0); ++ ++ if (events & SDCI_NORMAL_ISR_CC) ++ cmd->error = 0; ++ else { ++ phytium_sdci_reset_hw(host); ++ dev_err(host->dev, ++ "%s: AUTO_CMD%d arg=%08X; rsp %08X; cmd_error=%d\n", ++ __func__, cmd->opcode, cmd->arg, rsp[0], cmd->error); ++ } ++ ++ return cmd->error; ++} ++ ++static void phytium_sdci_track_cmd_data(struct phytium_sdci_host *host, ++ struct mmc_command *cmd, ++ struct mmc_data *data) ++{ ++ if (host->error) ++ dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", ++ __func__, cmd->opcode, cmd->arg, host->error); ++} ++ ++static void phytium_sdci_request_done(struct phytium_sdci_host *host, ++ struct mmc_request *mrq) ++{ ++ unsigned long flags; ++ ++ dev_dbg(host->dev, ++ "%s_%d:mrq->cmd->opcode:%d, mrq->cmd->arg:0x%x resp 0x%x 0x%x 0x%x 0x%x\n", ++ __func__, __LINE__, mrq->cmd->opcode, mrq->cmd->arg, ++ mrq->cmd->resp[0], mrq->cmd->resp[1], mrq->cmd->resp[2], ++ mrq->cmd->resp[3]); ++ ++ spin_lock_irqsave(&host->lock, flags); ++ host->mrq = NULL; ++ spin_unlock_irqrestore(&host->lock, flags); ++ ++ phytium_sdci_track_cmd_data(host, mrq->cmd, mrq->data); ++ if (mrq->data) ++ phytium_sdci_unprepare_data(host, mrq); ++ mmc_request_done(host->mmc, mrq); ++} ++ ++static bool ++phytium_sdci_auto_command_done(struct phytium_sdci_host *host, int events, ++ struct mmc_request *mrq, struct mmc_command *cmd) ++{ ++ u32 *rsp = cmd->resp; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&host->lock, flags); ++ host->cmd = NULL; ++ spin_unlock_irqrestore(&host->lock, flags); ++ ++ sdr_clr_bits(host->base + SDCI_NORMAL_ISER, cmd_ints_mask); ++ ++ rsp[0] = 0x900; ++ phytium_sdci_request_done(host, mrq); ++ return true; ++} ++ ++/* returns true if command is fully handled; returns false otherwise */ ++static bool phytium_sdci_cmd_done(struct phytium_sdci_host *host, int events, ++ struct mmc_request *mrq, ++ struct mmc_command *cmd) ++{ ++ bool done = false; ++ bool sbc_error; ++ unsigned long flags; ++ u32 *rsp = cmd->resp; ++ ++ if (mrq->sbc && cmd == mrq->cmd && ++ (events & SDCI_NORMAL_ISR_CC)) ++ phytium_sdci_auto_cmd_done(host, events, mrq->sbc); ++ ++ sbc_error = mrq->sbc && mrq->sbc->error; ++ ++ if (!sbc_error && !(events & (SDCI_NORMAL_ISR_CC | ++ SDCI_NORMAL_ISR_CR | ++ SDCI_NORMAL_ISR_TIMEOUT))) ++ return done; ++ ++ spin_lock_irqsave(&host->lock, flags); ++ done = !host->cmd; ++ host->cmd = NULL; ++ spin_unlock_irqrestore(&host->lock, flags); ++ ++ if (done) ++ return true; ++ ++ sdr_clr_bits(host->base + SDCI_NORMAL_ISER, cmd_ints_mask); ++ ++ if (cmd->flags & MMC_RSP_PRESENT) { ++ if (cmd->flags & MMC_RSP_136) { ++ rsp[0] = readl(host->base + SDCI_RESP0); ++ rsp[1] = readl(host->base + SDCI_RESP1); ++ rsp[2] = readl(host->base + SDCI_RESP2); ++ rsp[3] = readl(host->base + SDCI_RESP3); ++ } else ++ rsp[0] = readl(host->base + SDCI_RESP0); ++ ++ if(cmd->opcode == SD_SEND_RELATIVE_ADDR) ++ host->current_rca = rsp[0] & 0xFFFF0000; ++ } ++ ++ if (!sbc_error && ++ !(events & SDCI_NORMAL_ISR_CC) && ++ (events & SDCI_NORMAL_ISR_TIMEOUT)) ++ cmd->error = -ETIMEDOUT; ++ ++ if (cmd->error) ++ dev_dbg(host->dev, ++ "%s: cmd=%d arg=%08X; rsp %08X; cmd_error=%d\n", ++ __func__, cmd->opcode, cmd->arg, rsp[0], ++ cmd->error); ++ ++ phytium_sdci_cmd_next(host, mrq, cmd); ++ ++ return true; ++} ++ ++static bool set_databus_width(struct phytium_sdci_host *host) ++{ ++ bool res; ++ u32 cmd, resp_type, arg; ++ ++ cmd = SD_APP_SET_BUS_WIDTH; ++ resp_type = 0x2; ++ arg = 0x2; ++ res = phytium_sdci_private_send_cmd(host, cmd, resp_type, arg); ++ if (!res) ++ return false; ++ ++ cmd = MMC_APP_CMD; ++ resp_type = 0x2; ++ arg = host->current_rca; ++ res = phytium_sdci_private_send_cmd(host, cmd, resp_type, arg); ++ if (!res) ++ return false; ++ ++ return true; ++} ++ ++ ++static void phytium_sdci_start_command(struct phytium_sdci_host *host, ++ struct mmc_request *mrq, ++ struct mmc_command *cmd) ++{ ++ u32 rawcmd; ++ struct mmc_data *data = mrq->data; ++ dma_addr_t dma_adtc_buf; ++ u32 dma_bufh,dma_bufl; ++ u32 block_cnt = 0; ++ ++ WARN_ON(host->cmd); ++ host->cmd = cmd; ++ ++ cmd->error = 0; ++ rawcmd = phytium_sdci_cmd_prepare_raw_cmd(host, mrq, cmd); ++ if (cmd->opcode == MMC_STOP_TRANSMISSION || ++ cmd->opcode == MMC_SEND_STATUS) ++ writel(1, host->base + SDCI_ERROR_ISR); ++ sdr_set_bits(host->base + SDCI_NORMAL_ISER, cmd_ints_mask); ++ writel(rawcmd, host->base + SDCI_COMMAND); ++ ++ if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) { ++ WARN_ON(host->data); ++ host->data = data; ++ ++ dma_adtc_buf = host->dma_rx.bd_addr; ++ dma_bufh = (u32) (dma_adtc_buf >> 32); ++ dma_bufl = (u32) dma_adtc_buf; ++ block_cnt = mrq->data->blocks; ++ sdr_set_bits(host->base + SDCI_BD_ISER, data_ints_mask); ++ writel(block_cnt, host->base + SDCI_BLK_CNT); ++ ++ if ((mrq->data->flags & MMC_DATA_READ) == MMC_DATA_READ) { ++ writel(dma_bufl, host->base + SDCI_BD_RX); ++ writel(dma_bufh, host->base + SDCI_BD_RX); ++ writel(cmd->arg, host->base + SDCI_BD_RX); ++ writel(0, host->base + SDCI_BD_RX); ++ } else { ++ writel(dma_bufl, host->base + SDCI_BD_TX); ++ writel(dma_bufh, host->base + SDCI_BD_TX); ++ writel(cmd->arg, host->base + SDCI_BD_TX); ++ writel(0, host->base + SDCI_BD_TX); ++ } ++ } else { ++ writel(cmd->arg, host->base + SDCI_ARGUMENT); ++ } ++} ++ ++static void phytium_sdci_cmd_next(struct phytium_sdci_host *host, ++ struct mmc_request *mrq, ++ struct mmc_command *cmd) ++{ ++ if (cmd->error || (mrq->sbc && mrq->sbc->error)) ++ phytium_sdci_request_done(host, mrq); ++ else if (cmd == mrq->sbc) ++ phytium_sdci_start_command(host, mrq, mrq->cmd); ++ else if (!cmd->data) ++ phytium_sdci_request_done(host, mrq); ++} ++ ++static int phytium_sdci_cmd13_process(struct phytium_sdci_host *host, ++ struct mmc_request *mrq, ++ struct mmc_data *data, ++ u32 wait_timeout_ms, ++ u32 send_once_time_ms) ++{ ++ u32 private_cmd, resp_type, arg, temp, sd_status; ++ unsigned long deadline_time; ++ bool res; ++ ++ deadline_time = jiffies + msecs_to_jiffies(wait_timeout_ms); ++ ++ do { ++ private_cmd = MMC_SEND_STATUS; ++ resp_type = 0x2; ++ arg = host->current_rca; ++ ++ res = phytium_sdci_private_send_cmd(host, private_cmd, resp_type, arg); ++ if (!res) { ++ sd_status = readl(host->base + SDCI_STATUS); ++ if (sd_status & SDCI_STATUS_CDSL) { ++ phytium_sdci_unexpected_error_handler(host, mrq, data, ++ ERR_CARD_ABSENT); ++ dev_err(host->dev, ++ "[%s][%d] Card absent! private_cmd(%d)\n", ++ __func__, __LINE__, private_cmd); ++ } else { ++ phytium_sdci_unexpected_error_handler(host, mrq, data, ++ ERR_CMD_RESPONED); ++ ++ dev_err(host->dev, ++ "[%s][%d] private_cmd(%d) response errored\n", ++ __func__, __LINE__, private_cmd); ++ phytium_sd_error(host); ++ } ++ writel(1, host->base + SDCI_BD_ISR); ++ return SDCI_CMD13_FAILED; ++ } ++ ++ temp = readl(host->base + SDCI_RESP0); ++ ++ if (time_after(jiffies, deadline_time)) { ++ ++ if (mrq->cmd->opcode == MMC_SEND_STATUS) ++ return SDCI_CMD13_OK; ++ ++ dev_err(host->dev, ++ "SD card is not in transfer mode,timeout:%d,rsp[0]:%x\n", ++ wait_timeout_ms, temp); ++ ++ phytium_sdci_unexpected_error_handler(host, mrq, data, ++ ERR_TIMEOUT); ++ phytium_sd_error(host); ++ return SDCI_CMD13_FAILED; ++ } ++ ++ writel(1, host->base + SDCI_NORMAL_ISR); ++ ++ if (CARD_TRAN_STATE != (temp & CARD_CURRENT_STATE) && send_once_time_ms) ++ mdelay(send_once_time_ms); ++ ++ } while (CARD_TRAN_STATE != (temp & CARD_CURRENT_STATE)); ++ ++ return SDCI_CMD13_OK; ++} ++ ++static void phytium_sdci_ops_request(struct mmc_host *mmc, ++ struct mmc_request *mrq) ++{ ++ struct phytium_sdci_host *host = mmc_priv(mmc); ++ unsigned long flags; ++ bool res; ++ u32 status_sd; ++ int res_cmd13; ++ ++ host->error = 0; ++ WARN_ON(host->mrq); ++ host->mrq = mrq; ++ ++ dev_dbg(host->dev, ++ "phytium_sdci_ops_request:mrq->cmd->opcode:%d, mrq->cmd->arg:0x%x \n", ++ mrq->cmd->opcode, mrq->cmd->arg); ++ ++ if (mrq->cmd->opcode == MMC_SEND_STATUS && ++ (mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_ADTC) { ++ u32 status = readl(host->base + SDCI_STATUS); ++ if (status & SDCI_STATUS_CDSL) { ++ phytium_sdci_unexpected_error_handler(host, mrq, NULL, ++ ERR_CARD_ABSENT); ++ return; ++ } else { ++ res_cmd13 = phytium_sdci_cmd13_process(host, mrq, NULL, 400, 5); ++ if (res_cmd13 == SDCI_CMD13_FAILED) ++ return; ++ } ++ } else if (mrq->cmd->opcode == MMC_STOP_TRANSMISSION) { ++ status_sd = readl(host->base + SDCI_STATUS); ++ if (status_sd & SDCI_STATUS_CDSL) { ++ phytium_sdci_unexpected_error_handler(host, mrq, NULL, ++ ERR_CARD_ABSENT); ++ return; ++ } ++ } ++ ++ if (mrq->data){ ++ phytium_sdci_prepare_data(host, mrq); ++ if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK || ++ mrq->cmd->opcode == MMC_READ_SINGLE_BLOCK || ++ mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || ++ mrq->cmd->opcode == MMC_WRITE_BLOCK) { ++ host->adtc_type = BLOCK_RW_ADTC; ++ phytium_sdci_start_data(host, mrq, ++ mrq->cmd, mrq->data); ++ return; ++ } ++ host->adtc_type = COMMOM_ADTC; ++ } ++ ++ if (mrq->cmd->opcode == SD_IO_RW_DIRECT || ++ mrq->cmd->opcode == SD_IO_SEND_OP_COND) { ++ spin_lock_irqsave(&host->lock, flags); ++ host->mrq = NULL; ++ host->cmd = NULL; ++ spin_unlock_irqrestore(&host->lock, flags); ++ mrq->cmd->error = -EINVAL; ++ mmc_request_done(host->mmc, mrq); ++ ++ return; ++ } ++ ++ if (mrq->cmd->opcode == SD_APP_SEND_SCR) { ++ res = set_databus_width(host); ++ if (!res) { ++ phytium_sdci_unexpected_error_handler(host, mrq, NULL, ERR_CMD_RESPONED); ++ return; ++ } ++ } ++ ++ /* if SBC is required, we have HW option and SW option. ++ * if HW option is enabled, and SBC does not have "special" flags, ++ * use HW option, otherwise use SW option ++ */ ++ if (mrq->sbc && ++ (!mmc_card_mmc(mmc->card) || (mrq->sbc->arg & 0xFFFF0000))) ++ phytium_sdci_start_command(host, mrq, mrq->sbc); ++ else ++ phytium_sdci_start_command(host, mrq, mrq->cmd); ++} ++ ++static void phytium_sdci_data_xfer_next(struct phytium_sdci_host *host, ++ struct mmc_request *mrq, ++ struct mmc_data *data) ++{ ++ if (mmc_op_multi(mrq->cmd->opcode) && ++ mrq->stop && !mrq->stop->error && ++ !mrq->sbc && host->is_multi_rw_only_one_blkcnt) { ++ host->is_multi_rw_only_one_blkcnt = false; ++ phytium_sdci_auto_command_done(host, SDCI_NORMAL_ISR_CC, mrq, mrq->stop); ++ } else if (mmc_op_multi(mrq->cmd->opcode) && ++ mrq->stop && !mrq->stop->error && ++ !mrq->sbc) ++ phytium_sdci_start_command(host, mrq, mrq->stop); ++ else ++ phytium_sdci_request_done(host, mrq); ++} ++ ++static inline void get_data_buffer(struct mmc_data *data, ++ u32 *bytes, u32 **pointer) ++{ ++ struct scatterlist *sg; ++ ++ sg = &data->sg[0]; ++ *bytes = sg->length; ++ *pointer = sg_virt(sg); ++} ++ ++static bool phytium_sdci_data_xfer_done(struct phytium_sdci_host *host, ++ u32 events, struct mmc_request *mrq, ++ struct mmc_data *data) ++{ ++ struct mmc_command *stop = data->stop; ++ unsigned long flags; ++ bool done; ++ unsigned int check_data; ++ u32 sg_length,i; ++ u32 *sg_virt_addr; ++ ++ check_data = events & (SDCI_BD_ISR_TRS_R | SDCI_BD_ISR_TRS_W | SDCI_BD_ISR_EDTE); ++ ++ spin_lock_irqsave(&host->lock, flags); ++ done = !host->data; ++ if (check_data) ++ host->data = NULL; ++ spin_unlock_irqrestore(&host->lock, flags); ++ ++ if (done) ++ return true; ++ ++ if (check_data || (stop && stop->error)) { ++ sdr_clr_bits(host->base + SDCI_BD_ISER, data_ints_mask); ++ dev_dbg(host->dev, "DMA stop\n"); ++ ++ if (((events & SDCI_BD_ISR_TRS_R) || ++ (events & SDCI_BD_ISR_TRS_W)) && ++ (!stop || !stop->error)) { ++ if ((mrq->cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC && ++ (host->adtc_type == COMMOM_ADTC)) { ++ get_data_buffer(data, &sg_length, ++ &host->sg_virt_addr); ++ sg_virt_addr = host->sg_virt_addr; ++ ++ for (i = 0; i < (sg_length/4); i++) { ++ *sg_virt_addr = host->dma_rx.buf[i]; ++ sg_virt_addr++; ++ } ++ } ++ data->bytes_xfered = data->blocks * data->blksz; ++ } else { ++ dev_dbg(host->dev, "interrupt events: %x\n", events); ++ phytium_sdci_reset_hw(host); ++ data->bytes_xfered = 0; ++ dev_dbg(host->dev, "%s: cmd=%d; blocks=%d", ++ __func__, mrq->cmd->opcode, data->blocks); ++ dev_dbg(host->dev, "data_error=%d xfer_size=%d\n", ++ (int)data->error, data->bytes_xfered); ++ } ++ ++ phytium_sdci_data_xfer_next(host, mrq, data); ++ done = true; ++ } ++ ++ return done; ++} ++ ++ ++static int phytium_sdci_card_busy(struct mmc_host *mmc) ++{ ++ struct phytium_sdci_host *host = mmc_priv(mmc); ++ u32 status; ++ ++ /* check if any pin between dat[0:3] is low */ ++ status = readl(host->base + SDCI_STATUS); ++ if (((status >> 20) & 0xf) != 0xf) ++ return 1; ++ ++ return 0; ++} ++ ++static void phytium_sdci_request_timeout(struct work_struct *work) ++{ ++ struct phytium_sdci_host *host; ++ ++ host = container_of(work, struct phytium_sdci_host, req_timeout.work); ++ dev_err(host->dev, "%s: aborting cmd/data/mrq\n", __func__); ++ if (host->mrq) { ++ dev_err(host->dev, "%s: aborting mrq=%p cmd=%d\n", __func__, ++ host->mrq, host->mrq->cmd->opcode); ++ if (host->cmd) { ++ dev_err(host->dev, "%s: aborting cmd=%d\n", ++ __func__, host->cmd->opcode); ++ phytium_sdci_cmd_done(host, SDCI_NORMAL_ISR_TIMEOUT, ++ host->mrq, host->cmd); ++ } else if (host->data) { ++ dev_err(host->dev, "%s: abort data: cmd%d; %d blocks\n", ++ __func__, host->mrq->cmd->opcode, ++ host->data->blocks); ++ phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_EDTE, ++ host->mrq, host->data); ++ } ++ } ++} ++ ++static void hotplug_timer_func(struct timer_list *t) ++{ ++ struct phytium_sdci_host *host; ++ u32 status; ++ ++ host = from_timer(host, t, hotplug_timer); ++ if (!host) ++ dev_err(host->dev, "%s: Not find host!\n", __func__); ++ status = readl(host->base + SDCI_STATUS); ++ ++ if (status & SDCI_STATUS_CDSL) { /* card absent */ ++ if (host->mmc->card) { ++ cancel_delayed_work(&host->mmc->detect); ++ mmc_detect_change(host->mmc, ++ msecs_to_jiffies(100)); ++ } ++ } else { /* card insert */ ++ cancel_delayed_work(&host->mmc->detect); ++ mmc_detect_change(host->mmc, msecs_to_jiffies(200)); ++ } ++} ++ ++static irqreturn_t phytium_sdci_irq(int irq, void *dev_id) ++{ ++ struct phytium_sdci_host *host = (struct phytium_sdci_host *) dev_id; ++ unsigned long flags; ++ struct mmc_request *mrq; ++ struct mmc_command *cmd; ++ u32 events; ++ ++ if (!host) ++ return IRQ_NONE; ++ ++ spin_lock_irqsave(&host->lock, flags); ++ events = readl(host->base + SDCI_NORMAL_ISR); ++ /* clear interrupts */ ++ writel(1, host->base + SDCI_NORMAL_ISR); ++ ++ mrq = host->mrq; ++ cmd = host->cmd; ++ spin_unlock_irqrestore(&host->lock, flags); ++ ++ if (events & (SDCI_NORMAL_ISR_CR | SDCI_NORMAL_ISR_CI)) { ++ mod_timer(&host->hotplug_timer, ++ jiffies + usecs_to_jiffies(30000)); ++ goto irq_out; ++ } ++ ++ if (!(events & cmd_ints_mask)) ++ goto irq_out; ++ ++ if (!mrq) { ++ dev_err(host->dev, "%s: MRQ=NULL; events=%08X\n", ++ __func__, events); ++ WARN_ON(1); ++ goto irq_out; ++ } ++ ++ dev_dbg(host->dev, "%s: events=%08X\n", __func__, events); ++ ++ if (cmd) ++ phytium_sdci_cmd_done(host, events, mrq, cmd); ++ ++irq_out: ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t phytium_sdci_dma_irq(int irq, void *dev_id) ++{ ++ struct phytium_sdci_host *host = (struct phytium_sdci_host *) dev_id; ++ unsigned long flags; ++ struct mmc_request *mrq; ++ struct mmc_command *cmd; ++ struct mmc_data *data; ++ u32 events; ++ ++ spin_lock_irqsave(&host->lock, flags); ++ events = readl(host->base + SDCI_BD_ISR); ++ writel(1, host->base + SDCI_BD_ISR); ++ ++ mrq = host->mrq; ++ cmd = host->cmd; ++ data = host->data; ++ spin_unlock_irqrestore(&host->lock, flags); ++ ++ if (!(events & data_ints_mask)) ++ goto dma_irq_out; ++ ++ if (!mrq) { ++ dev_err(host->dev, ++ "%s: MRQ=NULL; events=%08X\n", ++ __func__, events); ++ goto dma_irq_out; ++ } ++ ++ dev_dbg(host->dev, "%s: events=%08X\n", __func__, events); ++ ++ if (data) ++ phytium_sdci_data_xfer_done(host, events, mrq, data); ++ ++dma_irq_out: ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t phytium_sdci_err_irq(int irq, void *dev_id) ++{ ++ struct phytium_sdci_host *host = (struct phytium_sdci_host *) dev_id; ++ unsigned long flags; ++ struct mmc_request *mrq; ++ struct mmc_command *cmd; ++ struct mmc_data *data; ++ u32 events; ++ ++ if (!host) ++ return IRQ_NONE; ++ ++ spin_lock_irqsave(&host->lock, flags); ++ events = readl(host->base + SDCI_ERROR_ISR); ++ mrq = host->mrq; ++ cmd = host->cmd; ++ data = host->data; ++ spin_unlock_irqrestore(&host->lock, flags); ++ ++ if (!(events&err_ints_mask)) ++ goto err_irq_out; ++ ++ if (!mrq) { ++ sdr_clr_bits(host->base + SDCI_NORMAL_ISER, SDCI_NORMAL_ISR_EI); ++ writel(1, host->base + SDCI_ERROR_ISR); ++ dev_err(host->dev, "%s: MRQ=NULL; events=%08X\n", __func__, events); ++ goto err_irq_out; ++ } ++ sdr_clr_bits(host->base + SDCI_NORMAL_ISER, SDCI_NORMAL_ISR_EI); ++ if (data) { ++ dev_err(host->dev, ++ "[%s][%d]: cmd(%d); %d read blocks, status:%x,flag:%x\n", ++ __func__, __LINE__, mrq->cmd->opcode, data->blocks, events, data->flags); ++ data->error = -ETIMEDOUT; ++ if ((data->flags & MMC_DATA_READ) == MMC_DATA_READ || ++ (data->flags & MMC_DATA_WRITE) == MMC_DATA_WRITE) ++ phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_EDTE | SDCI_BD_ISR_TRS_R, ++ mrq, data); ++ mrq->cmd->error = -ETIMEDOUT; ++ mmc_request_done(host->mmc, mrq); ++ } else if (cmd) { ++ phytium_sdci_cmd_done(host, SDCI_NORMAL_ISR_TIMEOUT, mrq, cmd); ++ } ++ ++ writel(1, host->base + SDCI_NORMAL_ISR); ++ writel(1, host->base + SDCI_ERROR_ISR); ++err_irq_out: ++ return IRQ_HANDLED; ++} ++ ++static void phytium_sdci_init_hw(struct phytium_sdci_host *host) ++{ ++ u32 val; ++ ++ /* Reset */ ++ phytium_sdci_reset_hw(host); ++ ++ val = SDCI_SEN_CREFR_VAL | SDCI_SEN_DEBNCE_VAL; ++ writel(val, host->base + SDCI_SD_SEN); ++ ++ /* Disable and clear all interrupts */ ++ writel(0, host->base + SDCI_NORMAL_ISER); ++ writel(0, host->base + SDCI_ERROR_ISER); ++ writel(0, host->base + SDCI_BD_ISER); ++ ++ writel(1, host->base + SDCI_NORMAL_ISR); ++ writel(1, host->base + SDCI_ERROR_ISR); ++ writel(1, host->base + SDCI_BD_ISR); ++ ++ sdr_set_bits(host->base + SDCI_NORMAL_ISER, ++ SDCI_SDCI_NORMAL_ISER_ECI|SDCI_SDCI_NORMAL_ISER_ECR); ++ /* Configure default cmd timeout to 0.1(s)s = val/25M */ ++ val = SDCI_F_MAX / 10; ++ writel(val, host->base + SDCI_TIMEOUT_CMD); ++ writel(SDCI_TIMEOUT_DATA_VALUE, host->base + SDCI_TIMEOUT_DATA); ++ ++ val = 0x0F00; ++ writel(val,host->base + SDCI_CONTROLLER); ++ ++ dev_dbg(host->dev, "init hardware done!"); ++} ++ ++static void phytium_sdci_deinit_hw(struct phytium_sdci_host *host) ++{ ++ /* Disable and clear all interrupts */ ++ writel(0, host->base + SDCI_NORMAL_ISER); ++ writel(0, host->base + SDCI_ERROR_ISER); ++ writel(0, host->base + SDCI_BD_ISER); ++ ++ writel(0, host->base + SDCI_NORMAL_ISR); ++ writel(0, host->base + SDCI_ERROR_ISR); ++ writel(0, host->base + SDCI_BD_ISR); ++} ++ ++static void phytium_sdci_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) ++{ ++ struct phytium_sdci_host *host = mmc_priv(mmc); ++ ++ if (ios->bus_width == MMC_BUS_WIDTH_4) ++ mmc->caps = mmc->caps & (~MMC_CAP_4_BIT_DATA); ++ ++ /* Suspend/Resume will do power off/on */ ++ switch (ios->power_mode) { ++ case MMC_POWER_UP: ++ writel(SDCI_POWER_ON, host->base + SDCI_POWER); ++ break; ++ case MMC_POWER_ON: ++ phytium_sdci_set_clk(host, ios); ++ break; ++ case MMC_POWER_OFF: ++ writel(SDCI_POWER_OFF, host->base + SDCI_POWER); ++ break; ++ default: ++ break; ++ } ++} ++ ++static int phytium_sdci_get_cd(struct mmc_host *mmc) ++{ ++ struct phytium_sdci_host *host = mmc_priv(mmc); ++ u32 status = readl(host->base + SDCI_STATUS); ++ ++ if (((status >> 19) & 0x1) == 0x1) ++ return 0; ++ ++ return 1; ++} ++ ++static void phytium_sdci_hw_reset(struct mmc_host *mmc) ++{ ++ struct phytium_sdci_host *host = mmc_priv(mmc); ++ ++ sdr_set_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_SRST); ++ sdr_clr_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_SRST); ++ while (!(readl(host->base + SDCI_STATUS) & SDCI_STATUS_IDIE)) ++ cpu_relax(); ++} ++ ++static struct mmc_host_ops phytium_sdci_ops = { ++ .request = phytium_sdci_ops_request, ++ .set_ios = phytium_sdci_ops_set_ios, ++ .get_cd = phytium_sdci_get_cd, ++ .card_busy = phytium_sdci_card_busy, ++ .hw_reset = phytium_sdci_hw_reset, ++}; ++ ++static bool phytium_sdci_private_send_cmd(struct phytium_sdci_host *host, ++ u32 cmd, u32 resp_type,u32 arg) ++{ ++ u32 temp, sd_cmd, sd_arg, sd_status; ++ unsigned long deadline_time; ++ ++ writel(1, host->base + SDCI_NORMAL_ISR); ++ writel(1, host->base + SDCI_ERROR_ISR); ++ ++ sd_cmd = (cmd << 8) | resp_type; ++ sd_arg = arg; ++ writel(sd_cmd, host->base + SDCI_COMMAND); ++ writel(sd_arg, host->base + SDCI_ARGUMENT); ++ ++ if (cmd == MMC_STOP_TRANSMISSION) ++ deadline_time = jiffies + msecs_to_jiffies(1000); ++ else ++ deadline_time = jiffies + msecs_to_jiffies(100); ++ ++ temp = readl(host->base + SDCI_NORMAL_ISR); ++ while ((temp & SDCI_NORMAL_ISR_CC) != SDCI_NORMAL_ISR_CC) { ++ sd_status = readl(host->base + SDCI_STATUS); ++ if (sd_status & SDCI_STATUS_CDSL) ++ return false; ++ ++ temp = readl(host->base + SDCI_NORMAL_ISR); ++ if (time_after(jiffies, deadline_time)) ++ return false; ++ ++ if (cmd == MMC_STOP_TRANSMISSION) ++ mdelay(1); ++ } ++ ++ return true; ++} ++ ++static int phytium_sdci_probe(struct platform_device *pdev) ++{ ++ struct mmc_host *mmc; ++ struct phytium_sdci_host *host; ++ struct resource *res; ++ int ret; ++ const struct acpi_device_id *match; ++ struct device *dev = &pdev->dev; ++ ++ /* Allocate MMC host for this device */ ++ mmc = mmc_alloc_host(sizeof(struct phytium_sdci_host), &pdev->dev); ++ if (!mmc) ++ return -ENOMEM; ++ ++ host = mmc_priv(mmc); ++ ret = mmc_of_parse(mmc); ++ if (ret) ++ goto host_free; ++ ++ if (dev->of_node) { ++ host->src_clk = devm_clk_get(&pdev->dev, "phytium_sdc_clk"); ++ if (IS_ERR(host->src_clk)) { ++ ret = PTR_ERR(host->src_clk); ++ goto host_free; ++ } ++ ++ host->clk_rate = clk_get_rate(host->src_clk); ++ if (device_property_read_bool(dev, "no-dma-coherent")) ++ dev->archdata.dma_coherent = false; ++ } else if (has_acpi_companion(dev)) { ++ match = acpi_match_device(dev->driver->acpi_match_table, dev); ++ if (!match) { ++ dev_err(dev, "Error ACPI match data is missing\n"); ++ return -ENODEV; ++ } ++ ++ acpi_dma_configure(dev, DEV_DMA_NOT_SUPPORTED); ++ ++ host->clk_rate = 600000000; ++ } else { ++ dev_err(&pdev->dev, "No DT found\n"); ++ return -EINVAL; ++ } ++ ++ dma_set_mask(dev, DMA_BIT_MASK(40)); ++ dma_set_coherent_mask(dev, DMA_BIT_MASK(40)); ++ ++ timer_setup(&host->hotplug_timer, hotplug_timer_func, 0); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ host->base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(host->base)) { ++ ret = PTR_ERR(host->base); ++ goto host_free; ++ } ++ ++ host->irq = platform_get_irq(pdev, 1); ++ if (host->irq < 0) { ++ ret = -EINVAL; ++ goto host_free; ++ } ++ ++ host->irq_err = platform_get_irq(pdev, 2); ++ if (host->irq_err < 0) { ++ ret = -EINVAL; ++ goto host_free; ++ } ++ ++ host->irq_bd = platform_get_irq(pdev, 0); ++ if (host->irq_bd < 0) { ++ ret = -EINVAL; ++ goto host_free; ++ } ++ ++ host->caps = caps; ++ host->dev = &pdev->dev; ++ host->mmc = mmc; ++ ++ if((4 * SDCI_F_MAX) > host->clk_rate) ++ host->clk_div = 1; ++ else ++ host->clk_div = ((host->clk_rate / (2 * SDCI_F_MAX)) - 1); ++ ++ /* Set host parameters to mmc */ ++ mmc->f_min = SDCI_F_MIN; ++ mmc->f_max = (host->clk_rate / ((host->clk_div + 1) * 2)); ++ mmc->ops = &phytium_sdci_ops; ++ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; ++ ++ mmc->caps |= host->caps; ++ /* MMC core transfer sizes tunable parameters */ ++ mmc->max_segs = MAX_BD_NUM; ++ mmc->max_seg_size = 512 * 1024; ++ mmc->max_blk_size = 512; ++ mmc->max_req_size = 512 * 1024; ++ mmc->max_blk_count = mmc->max_req_size / 512; ++ ++ host->dma_rx.buf = dma_zalloc_coherent(&pdev->dev, ++ MAX_BD_NUM, ++ &host->dma_rx.bd_addr, ++ GFP_KERNEL); ++ if (!host->dma_rx.buf){ ++ ret = -ENOMEM; ++ goto release_mem; ++ } ++ ++ host->cmd_timeout = msecs_to_jiffies(100); ++ host->data_timeout = msecs_to_jiffies(250); ++ ++ INIT_DELAYED_WORK(&host->req_timeout, phytium_sdci_request_timeout); ++ spin_lock_init(&host->lock); ++ ++ platform_set_drvdata(pdev, mmc); ++ phytium_sdci_init_hw(host); ++ ++ ret = devm_request_irq(&pdev->dev, host->irq, phytium_sdci_irq, ++ IRQF_SHARED, pdev->name, host); ++ if (ret) ++ goto release; ++ ++ ret = devm_request_irq(&pdev->dev, host->irq_err, phytium_sdci_err_irq, ++ IRQF_SHARED, pdev->name, host); ++ if (ret) ++ goto release; ++ ++ ret = devm_request_irq(&pdev->dev, host->irq_bd, phytium_sdci_dma_irq, ++ IRQF_SHARED, pdev->name, host); ++ if (ret) ++ goto release; ++ ++ ret = mmc_add_host(mmc); ++ if (ret) ++ goto release; ++ ++ return 0; ++ ++release: ++ platform_set_drvdata(pdev, NULL); ++ phytium_sdci_deinit_hw(host); ++release_mem: ++ if (host->dma_rx.buf) ++ dma_free_coherent(&pdev->dev, MAX_BD_NUM, ++ host->dma_rx.buf, ++ host->dma_rx.bd_addr); ++host_free: ++ mmc_free_host(mmc); ++ ++ return ret; ++} ++ ++static int phytium_sdci_remove(struct platform_device *pdev) ++{ ++ struct mmc_host *mmc; ++ struct phytium_sdci_host *host; ++ ++ mmc = platform_get_drvdata(pdev); ++ host = mmc_priv(mmc); ++ ++ cancel_delayed_work_sync(&host->req_timeout); ++ platform_set_drvdata(pdev, NULL); ++ mmc_remove_host(host->mmc); ++ phytium_sdci_deinit_hw(host); ++ ++ if (host->dma_rx.buf) ++ dma_free_coherent(&pdev->dev, MAX_BD_NUM, ++ host->dma_rx.buf, host->dma_rx.bd_addr); ++ ++ mmc_free_host(host->mmc); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static int phytium_sdci_suspend(struct device *dev) ++{ ++ struct mmc_host *mmc = dev_get_drvdata(dev); ++ struct phytium_sdci_host *host = mmc_priv(mmc); ++ ++ phytium_sdci_deinit_hw(host); ++ return 0; ++} ++ ++static int phytium_sdci_resume(struct device *dev) ++{ ++ struct mmc_host *mmc = dev_get_drvdata(dev); ++ struct phytium_sdci_host *host = mmc_priv(mmc); ++ ++ phytium_sdci_init_hw(host); ++ mmc->caps = mmc->caps | MMC_CAP_4_BIT_DATA; ++ ++ return 0; ++} ++#endif ++ ++#ifdef CONFIG_PM ++static int phytium_sdci_runtime_suspend(struct device *dev) ++{ ++ struct mmc_host *mmc = dev_get_drvdata(dev); ++ struct phytium_sdci_host *host = mmc_priv(mmc); ++ ++ phytium_sdci_deinit_hw(host); ++ ++ return 0; ++} ++ ++static int phytium_sdci_runtime_resume(struct device *dev) ++{ ++ struct mmc_host *mmc = dev_get_drvdata(dev); ++ struct phytium_sdci_host *host = mmc_priv(mmc); ++ ++ phytium_sdci_init_hw(host); ++ ++ return 0; ++} ++ ++static const struct dev_pm_ops phytium_sdci_dev_pm_ops = { ++ SET_SYSTEM_SLEEP_PM_OPS(phytium_sdci_suspend, ++ phytium_sdci_resume) ++ SET_RUNTIME_PM_OPS(phytium_sdci_runtime_suspend, ++ phytium_sdci_runtime_resume, NULL) ++}; ++#else ++#define phytium_sdci_dev_pm_ops NULL ++#endif ++ ++static const struct of_device_id phytium_sdci_of_ids[] = { ++ { .compatible = "phytium,sdci", }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, phytium_sdci_of_ids); ++ ++#ifdef CONFIG_ACPI ++static const struct acpi_device_id phytium_sdci_acpi_ids[] = { ++ { .id = "PHYT0005" }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(acpi, phytium_sdci_acpi_ids); ++#else ++#define phytium_sdci_acpi_ids NULL ++#endif ++ ++static struct platform_driver phytium_sdci_driver = { ++ .probe = phytium_sdci_probe, ++ .remove = phytium_sdci_remove, ++ .driver = { ++ .name = "sdci-phytium", ++ .of_match_table = phytium_sdci_of_ids, ++ .acpi_match_table = phytium_sdci_acpi_ids, ++ .pm = &phytium_sdci_dev_pm_ops, ++ }, ++}; ++ ++module_platform_driver(phytium_sdci_driver); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_AUTHOR("Cheng Quan "); ++MODULE_AUTHOR("Chen Baozi "); ++MODULE_DESCRIPTION("Phytium SD Card Interface driver"); +diff --git a/drivers/mmc/host/phytium-sdci.h b/drivers/mmc/host/phytium-sdci.h +new file mode 100644 +index 000000000000..97afd4cb9288 +--- /dev/null ++++ b/drivers/mmc/host/phytium-sdci.h +@@ -0,0 +1,204 @@ ++/* ++ * File Name: phytium_sdci.h - Phytium FT SDCI dirver ++ * ++ * Copyright (C) 2019 Phytium Technology Co.,Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++/*---------------------------------------------------------------------------*/ ++/* Common Definition */ ++/*---------------------------------------------------------------------------*/ ++#define MAX_BD_NUM 0x1000 ++#define SD_BLOCK_SIZE 512 ++ ++/*---------------------------------------------------------------------------*/ ++/* Register Offset */ ++/*---------------------------------------------------------------------------*/ ++#define SDCI_CONTROLLER 0x00 /* controller config reg */ ++#define SDCI_ARGUMENT 0x04 /* argument reg */ ++#define SDCI_COMMAND 0x08 /* command reg */ ++#define SDCI_CLOCK_D 0x0C /* clock divide reg */ ++#define SDCI_SOFTWARE 0x10 /* controller reset reg */ ++#define SDCI_POWER 0X14 /* POWRE CONTROL REG */ ++#define SDCI_TIMEOUT_CMD 0x18 /* cmd timeout config reg */ ++#define SDCI_TIMEOUT_DATA 0x1C /* data timeout reg */ ++#define SDCI_NORMAL_ISER 0x20 /* normal ISR config reg */ ++#define SDCI_ERROR_ISER 0x24 /* erroe ISR config reg */ ++#define SDCI_BD_ISER 0x28 /* BD ISR config reg */ ++#define SDCI_CAPA 0x2C /* BD ISR config reg */ ++#define SDCI_SD_DRV 0x30 /* SD card driving phase position reg */ ++#define SDCI_SD_SAMP 0x34 /* SD card sampling phase position reg */ ++#define SDCI_SD_SEN 0x38 /* SD card detection reg */ ++#define SDCI_HDS_AXI 0x3C /* AXI boundary config reg */ ++#define SDCI_BD_RX 0x40 /* BD rx addr reg */ ++#define SDCI_BD_TX 0x60 /* BD tx addr reg */ ++#define SDCI_BLK_CNT 0x80 /* r/w block num reg */ ++#define SDCI_NORMAL_ISR 0xC0 /* normal ISR status reg */ ++#define SDCI_ERROR_ISR 0xC4 /* error ISR status reg */ ++#define SDCI_BD_ISR 0xC8 /* BD ISR status reg */ ++#define SDCI_BD_STATUS 0xCC /* BD descriptor status reg */ ++#define SDCI_STATUS 0xD0 /* status reg */ ++#define SDCI_BLOCK 0xD4 /* block len reg */ ++#define SDCI_RESP0 0xE0 /* response reg0 */ ++#define SDCI_RESP1 0xE4 /* response reg1 */ ++#define SDCI_RESP2 0xE8 /* response reg2 */ ++#define SDCI_RESP3 0XEC /* response reg3 */ ++ ++/*---------------------------------------------------------------------------*/ ++/* Register Mask */ ++/*---------------------------------------------------------------------------*/ ++/* SDCI_CONTROLLER mask */ ++#define SDCI_CONTROLLER_ECRCWR (0x1 << 0) /* RW */ ++#define SDCI_CONTROLLER_ECRCRD (0x1 << 1) /* RW */ ++#define SDCI_CONTROLLER_RESEDE (0x1 << 2) /* RW */ ++#define SDCI_CONTROLLER_PERMDR (0x3 << 8) /* RW */ ++#define SDCI_CONTROLLER_PERMDX (0x3 << 10) /* RW */ ++ ++/* SDCI_SOFTWARE mask */ ++#define SDCI_SOFTWARE_SRST (0x1 << 0) /* RW */ ++#define SDCI_SOFTWARE_SCRST (0x1 << 1) /* RW */ ++#define SDCI_SOFTWARE_BDRST (0x1 << 2) /* RW */ ++#define SDCI_SOFTWARE_CFCLF (0x1 << 3) /* RW */ ++#define SDCI_SOFTWARE_SDRST (0x1 << 4) /* RW */ ++ ++/* SDCI_NORMAL_ISER mask */ ++#define SDCI_SDCI_NORMAL_ISER_ECC_EN (0x1 << 0) /* RW */ ++#define SDCI_SDCI_NORMAL_ISER_ECR (0x1 << 1) /* RW */ ++#define SDCI_SDCI_NORMAL_ISER_ECI (0x1 << 2) /* RW */ ++#define SDCI_SDCI_NORMAL_ISER_EEI_EN (0x1 << 15) /* RW */ ++ ++/* SDCI_NORMAL_ISR mask */ ++#define SDCI_NORMAL_ISR_CC (0x1 << 0) /* R */ ++#define SDCI_NORMAL_ISR_CR (0x1 << 1) /* R */ ++#define SDCI_NORMAL_ISR_CI (0x1 << 2) /* R */ ++#define SDCI_NORMAL_ISR_TIMEOUT (0x1 << 3) /* R */ ++#define SDCI_NORMAL_ISR_EI (0x1 << 15) /* R */ ++ ++/* SDCI_ERROR_ISER mask */ ++#define SDCI_ERROR_ISER_ECTE_EN (0x1 << 0) /* RW */ ++#define SDCI_ERROR_ISR_CCRCE_EN (0x1 << 1) /* RW */ ++#define SDCI_ERROR_ISR_CIR_EN (0x1 << 3) /* RW */ ++#define SDCI_ERROR_ISR_CNR_EN (0x1 << 4) /* RW */ ++/* SDCI_ERROR_ISR mask */ ++#define SDCI_ERROR_ISR_CTE (0x1 << 0) /* R */ ++#define SDCI_ERROR_ISR_CCRCE (0x1 << 1) /* R */ ++#define SDCI_ERROR_ISR_CIR (0x1 << 3) /* R */ ++#define SDCI_ERROR_ISR_CNR (0x1 << 4) /* R */ ++ ++/* SDCI_BD_ISER mask */ ++#define SDCI_BD_ISER_ETRS_EN (0x1 << 8) /* RW */ ++#define SDCI_BD_ISER_DATFRAX_EN (0x1 << 7) /* RW */ ++ ++/* SDCI_BD_ISR mask */ ++#define SDCI_BD_ISR_TRS_W (0x1 << 0) /* R */ ++#define SDCI_BD_ISR_TRS_R (0x1 << 8) /* R */ ++#define SDCI_BD_ISR_EDTE (0x1 << 3) /* R */ ++#define SDCI_BD_ISR_DAIS (0x1 << 15) /* R */ ++#define SDCI_BD_ISR_DATFRAX (0x1 << 7) /* R */ ++ ++/* SDCI_HDS_AXI mask */ ++#define SDCI_HDS_AXI_AWDOMAIN (0x1 << 0) /* RW */ ++#define SDCI_HDS_AXI_ARDOMAIN (0x1 << 12) /* RW */ ++#define SDCI_HDS_AXI_AWCACHE (0x6 << 24) /* RW */ ++#define SDCI_HDS_AXI_ARCACHE (0xB << 28) /* RW */ ++ ++/* SDCI_STATUS mask */ ++#define SDCI_STATUS_CMD_BUSY (0x0 << 0) /* R */ ++#define SDCI_STATUS_CMD_READY (0x1 << 0) /* R */ ++#define SDCI_STATUS_IDIE (0x1 << 12) /* R */ ++#define SDCI_CARD_BUSY_IN_PRG (0x1 << 20) /* R D0 BUSY:0,IDLE:1 */ ++ ++/* SDCI_STATUS */ ++#define SDCI_STATUS_CDSL (0x1 << 19) /* R */ ++ ++/*---------------------------------------------------------------------------*/ ++/* Register Value */ ++/*---------------------------------------------------------------------------*/ ++#define SDCI_SD_DRV_VALUE 0 ++#define SDCI_SD_SAMP_VALUE_MAX 50 ++#define SDCI_SD_SAMP_VALUE_MIN 0 ++ ++#define SDCI_TIMEOUT_CMD_VALUE 0xFFFFFFFF ++#define SDCI_TIMEOUT_DATA_VALUE 0xFFFFFFFF ++#define SDCI_POWER_ON 1 ++#define SDCI_POWER_OFF 0 ++ ++#define SDCI_CMD_TIMEOUT 10 ++#define SDCI_DAT_TIMEOUT 5000 ++ ++#define SDCI_CMD_TYPE_ADTC 0x2 ++ ++#define SDCI_F_MIN 400000 ++#define SDCI_F_MAX 25000000 ++ ++#define SDCI_SEN_CREFR_VAL (0x1 << 1) ++#define SDCI_SEN_DEBNCE_VAL (0xB << 8) ++ ++#define CARD_CURRENT_STATE (0xF << 9) ++#define CARD_PRG_STATE (0x7 << 9) ++#define CARD_TRAN_STATE (0x4 << 9) ++ ++#define SDCI_CMD13_OK 1 ++#define SDCI_CMD13_FAILED 0 ++ ++#define ERR_TIMEOUT (0x1 << 0) ++#define ERR_CARD_ABSENT (0x1 << 1) ++#define ERR_CMD_RESPONED (0x1 << 2) ++ ++/*---------------------------------------------------------------------------*/ ++/* Structure Type */ ++/*---------------------------------------------------------------------------*/ ++struct phytium_sdci_dma { ++ struct scatterlist *sg; ++ u32 *buf; ++ dma_addr_t bd_addr; ++ size_t bytes; ++}; ++ ++typedef enum { ++ COMMOM_ADTC = 0, ++ BLOCK_RW_ADTC = 1 ++} adtc_type_t; ++ ++struct phytium_sdci_host { ++ struct device *dev; ++ struct mmc_host *mmc; ++ u32 caps; ++ spinlock_t lock; ++ ++ struct mmc_request *mrq; ++ struct mmc_command *cmd; ++ struct mmc_data *data; ++ int error; ++ ++ void __iomem *base; ++ ++ struct phytium_sdci_dma dma_rx; ++ struct phytium_sdci_dma dma_tx; ++ ++ u32 *sg_virt_addr; ++ adtc_type_t adtc_type; ++ ++ struct timer_list hotplug_timer; ++ ++ struct delayed_work req_timeout; ++ u32 cmd_timeout; ++ u32 data_timeout; ++ ++ int irq; ++ int irq_err; ++ int irq_bd; ++ ++ struct clk *src_clk; ++ unsigned long clk_rate; ++ unsigned long clk_div; ++ unsigned long real_rate; ++ ++ u32 current_rca; ++ bool is_multi_rw_only_one_blkcnt; ++}; ++ +diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig +index 5fc9a1bde4ac..95a4000ffd09 100644 +--- a/drivers/mtd/nand/raw/Kconfig ++++ b/drivers/mtd/nand/raw/Kconfig +@@ -285,6 +285,17 @@ config MTD_NAND_ATMEL + Enables support for NAND Flash / Smart Media Card interface + on Atmel AT91 processors. + ++config MTD_NAND_PHYTIUM ++ tristate ++ ++config MTD_NAND_PHYTIUM_PCI ++ tristate "Support Phytium NAND controller as a PCI device" ++ select MTD_NAND_PHYTIUM ++ depends on PCI ++ help ++ Enable the driver for NAND flash controller on Phytium X100 chipset, ++ using the Phytium NAND controller core. ++ + config MTD_NAND_MARVELL + tristate "NAND controller support on Marvell boards" + depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \ +diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile +index d5a5f9832b88..71794ec47ad6 100644 +--- a/drivers/mtd/nand/raw/Makefile ++++ b/drivers/mtd/nand/raw/Makefile +@@ -58,6 +58,9 @@ obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o + obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o + obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o + ++obj-$(CONFIG_MTD_NAND_PHYTIUM) += phytium_nand.o ++obj-$(CONFIG_MTD_NAND_PHYTIUM_PCI) += phytium_nand_pci.o ++ + nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o + nand-objs += nand_amd.o + nand-objs += nand_hynix.o +diff --git a/drivers/mtd/nand/raw/phytium_nand.c b/drivers/mtd/nand/raw/phytium_nand.c +new file mode 100644 +index 000000000000..e285dc35ec42 +--- /dev/null ++++ b/drivers/mtd/nand/raw/phytium_nand.c +@@ -0,0 +1,2117 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Core driver for Phytium NAND flash controller ++ * ++ * Copyright (C) 2020-2021, Phytium Technology Co., Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "phytium_nand.h" ++ ++u16 timing_asy_mode0[TIMING_ASY_NUM] = { /* x100 pass, sample: 1 */ ++ 0x03, 0x03, 0x28, 0x28, 0x03, 0x03, 0x06, 0x06, 0x28, 0x70, 0x30, 0x50}; ++u16 timing_asy_mode1[TIMING_ASY_NUM] = { /* x100 pass, sample: 1 */ ++ 0x03, 0x03, 0x14, 0x14, 0x03, 0x03, 0x06, 0x06, 0x14, 0x70, 0x30, 0x28}; ++u16 timing_asy_mode2[TIMING_ASY_NUM] = { /* x100 pass, sample: 7/8 (unlic) */ ++ 0x03, 0x03, 0x0D, 0x0D, 0x03, 0x03, 0x06, 0x06, 0x0D, 0x70, 0x20, 0x1A}; ++u16 timing_asy_mode3[TIMING_ASY_NUM] = { /* x100 pass, sample: 4-7 */ ++ 0x03, 0x03, 0x0A, 0x0A, 0x03, 0x03, 0x06, 0x06, 0x0A, 0x70, 0x20, 0x14}; ++u16 timing_asy_mode4[TIMING_ASY_NUM] = { /* x100 1.8v pass */ ++ 0x03, 0x03, 0x08, 0x08, 0x03, 0x03, 0x06, 0x06, 0x08, 0x70, 0x15, 0x10}; ++u16 timing_asy_mode5[TIMING_ASY_NUM] = { /* x100 1.8v pass */ ++ 0x03, 0x03, 0x07, 0x07, 0x03, 0x03, 0x06, 0x06, 0x07, 0x20, 0x15, 0x0E}; ++u16 timing_syn_mode0[TIMING_SYN_NUM] = { /* x100 1.8v pass */ ++ 0x20, 0x41, 0x05, 0x20, 0x10, 0x19, 0x62, 0x40, 0x38, 0x20, 0x00, 0x09, ++ 0x50, 0x20}; ++u16 timing_syn_mode1[TIMING_SYN_NUM] = { /* x100 1.8v pass */ ++ 0x18, 0x32, 0x06, 0x18, 0x0C, 0x10, 0x76, 0x40, 0x2A, 0x1E, 0x00, 0x12, ++ 0x24, 0x18}; ++u16 timing_syn_mode2[TIMING_SYN_NUM] = { /* x100 1.8v pass */ ++ 0x10, 0x0A, 0x04, 0x10, 0x08, 0x0A, 0x6E, 0x50, 0x1D, 0x10, 0x00, 0x0C, ++ 0x18, 0x10}; ++u16 timing_syn_mode3[TIMING_SYN_NUM] = { /* x100 1.8v pass */ ++ 0x0C, 0x1A, 0x02, 0x0C, 0x06, 0x08, 0x78, 0x7C, 0x15, 0x0C, 0x00, 0x08, ++ 0x12, 0x0C}; ++u16 timing_syn_mode4[TIMING_SYN_NUM] = { /* x100 1.8v failed */ ++ 0x08, 0x17, 0x05, 0x08, 0x04, 0x01, 0x73, 0x40, 0x0C, 0x08, 0x00, 0x06, ++ 0x0C, 0x10}; ++u16 timing_tog_ddr_mode0[TIMING_TOG_NUM] = { /* 600M clk */ ++ 0x14, 0x0a, 0x08, 0x08, 0xc8, 0xc8, 0x08, 0x08, 0x20, 0x0a, 0x14, 0x08}; ++ ++static u32 nfc_ecc_errover; ++static u32 nfc_ecc_err; ++static u32 nfc_irq_st; ++static u32 nfc_irq_en; ++static u32 nfc_irq_complete; ++ ++/* ++ * Internal helper to conditionnally apply a delay (from the above structure, ++ * most of the time). ++ */ ++static void cond_delay(unsigned int ns) ++{ ++ if (!ns) ++ return; ++ ++ if (ns < 10000) ++ ndelay(ns); ++ else ++ udelay(DIV_ROUND_UP(ns, 1000)); ++} ++ ++static inline struct phytium_nfc *to_phytium_nfc(struct nand_controller *ctrl) ++{ ++ return container_of(ctrl, struct phytium_nfc, controller); ++} ++ ++static inline struct phytium_nand_chip *to_phytium_nand(struct nand_chip *chip) ++{ ++ return container_of(chip, struct phytium_nand_chip, chip); ++} ++ ++static u32 phytium_read(struct phytium_nfc *nfc, u32 reg) ++{ ++ return readl_relaxed(nfc->regs + reg); ++} ++ ++static void phytium_write(struct phytium_nfc *nfc, u32 reg, u32 value) ++{ ++ return writel_relaxed(value, nfc->regs + reg); ++} ++ ++static inline int phytium_wait_busy(struct phytium_nfc *nfc) ++{ ++ u32 status; ++ ++ if (nfc_ecc_errover) { ++ nfc_ecc_errover = 0; ++ return 0; ++ } ++ ++ return readl_relaxed_poll_timeout(nfc->regs + NDSR, status, ++ !(status & NDSR_BUSY), 10, 10000); ++} ++ ++static void phytium_nfc_disable_int(struct phytium_nfc *nfc, u32 int_mask) ++{ ++ u32 reg; ++ ++ reg = phytium_read(nfc, NDIR_MASK); ++ phytium_write(nfc, NDIR_MASK, reg | int_mask); ++} ++ ++static void phytium_nfc_enable_int(struct phytium_nfc *nfc, u32 int_mask) ++{ ++ u32 reg; ++ ++ reg = phytium_read(nfc, NDIR_MASK); ++ phytium_write(nfc, NDIR_MASK, reg & (~int_mask)); ++} ++ ++static void phytium_nfc_clear_int(struct phytium_nfc *nfc, u32 int_mask) ++{ ++ phytium_write(nfc, NDIR_MASK, int_mask); ++} ++ ++static int phytium_nfc_cmd_correct(struct phytium_nfc_op *nfc_op) ++{ ++ if (!nfc_op) ++ return -EINVAL; ++ ++ if (nfc_op->cmd_len == 0x01) { ++ nfc_op->cmd[1] = nfc_op->cmd[0]; ++ nfc_op->cmd[0] = 0; ++ } ++ ++ return 0; ++} ++ ++static int phytium_nfc_addr_correct(struct phytium_nfc_op *nfc_op) ++{ ++ u32 len; ++ int i, j; ++ ++ if (!nfc_op) ++ return -EINVAL; ++ ++ len = nfc_op->addr_len > PHYTIUM_NFC_ADDR_MAX_LEN ? ++ PHYTIUM_NFC_ADDR_MAX_LEN : nfc_op->addr_len; ++ ++ if (len == PHYTIUM_NFC_ADDR_MAX_LEN) ++ return 0; ++ ++ for (i = len-1, j = PHYTIUM_NFC_ADDR_MAX_LEN - 1; i >= 0; i--, j--) { ++ nfc_op->addr[j] = nfc_op->addr[i]; ++ nfc_op->addr[i] = 0; ++ } ++ ++ return 0; ++} ++ ++static void phytium_nfc_parse_instructions(struct nand_chip *chip, ++ const struct nand_subop *subop, ++ struct phytium_nfc_op *nfc_op) ++{ ++ struct nand_op_instr *instr = NULL; ++ bool first_cmd = true; ++ u32 op_id; ++ int i; ++ ++ /* Reset the input structure as most of its fields will be OR'ed */ ++ memset(nfc_op, 0, sizeof(struct phytium_nfc_op)); ++ ++ for (op_id = 0; op_id < subop->ninstrs; op_id++) { ++ unsigned int offset, naddrs; ++ const u8 *addrs; ++ int len; ++ ++ instr = (struct nand_op_instr *)&subop->instrs[op_id]; ++ ++ switch (instr->type) { ++ case NAND_OP_CMD_INSTR: ++ if (first_cmd) { ++ nfc_op->cmd[0] = instr->ctx.cmd.opcode; ++ } else { ++ nfc_op->cmd[1] = instr->ctx.cmd.opcode; ++ nfc_op->cmd_ctrl.nfc_ctrl.dbc = 1; ++ } ++ ++ nfc_op->cle_ale_delay_ns = instr->delay_ns; ++ first_cmd = false; ++ nfc_op->cmd_len++; ++ ++ break; ++ ++ case NAND_OP_ADDR_INSTR: ++ offset = nand_subop_get_addr_start_off(subop, op_id); ++ naddrs = nand_subop_get_num_addr_cyc(subop, op_id); ++ addrs = &instr->ctx.addr.addrs[offset]; ++ ++ nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = naddrs; ++ ++ for (i = 0; i < min_t(u32, PHYTIUM_NFC_ADDR_MAX_LEN, naddrs); i++) ++ nfc_op->addr[i] = addrs[i]; ++ ++ nfc_op->cle_ale_delay_ns = instr->delay_ns; ++ ++ nfc_op->addr_len = naddrs; ++ break; ++ ++ case NAND_OP_DATA_IN_INSTR: ++ nfc_op->data_instr = instr; ++ nfc_op->data_instr_idx = op_id; ++ nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; ++ len = nand_subop_get_data_len(subop, op_id); ++ nfc_op->page_cnt = len; ++ nfc_op->data_delay_ns = instr->delay_ns; ++ ++ break; ++ ++ case NAND_OP_DATA_OUT_INSTR: ++ nfc_op->data_instr = instr; ++ nfc_op->data_instr_idx = op_id; ++ nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; ++ len = nand_subop_get_data_len(subop, op_id); ++ nfc_op->page_cnt = len; ++ nfc_op->data_delay_ns = instr->delay_ns; ++ break; ++ ++ case NAND_OP_WAITRDY_INSTR: ++ nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms; ++ nfc_op->rdy_delay_ns = instr->delay_ns; ++ break; ++ } ++ } ++} ++ ++int phytium_nfc_prepare_cmd(struct nand_chip *chip, ++ struct phytium_nfc_op *nfc_op, ++ enum dma_data_direction direction) ++{ ++ struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ int i; ++ ++ phytium_nfc_cmd_correct(nfc_op); ++ phytium_nfc_addr_correct(nfc_op); ++ ++ nfc_op->cmd_ctrl.nfc_ctrl.csel = phytium_nand->selected_die; ++ ++ for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) ++ nfc_op->mem_addr_first[i] = (nfc->dma_phy_addr >> (8 * i)) & 0xFF; ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(phytium_nfc_prepare_cmd); ++ ++static int phytium_nfc_cmd_dump(struct phytium_nfc *nfc, ++ struct phytium_nfc_op *nfc_op, u8 *buf) ++{ ++ u8 *p; ++ u8 str[1024] = {0}; ++ int i; ++ ++ sprintf(str, "Phytium NFC cmd dump:\n"); ++ sprintf(str, "%s cmd0:%x, cmd1:%x, ctrl:%x, page_cnt:%d\n", ++ str, nfc_op->cmd[0], nfc_op->cmd[1], nfc_op->cmd_ctrl.ctrl, nfc_op->page_cnt); ++ ++ p = &nfc_op->addr[0]; ++ sprintf(str, "%s addr:%02x %02x %02x %02x %02x\n", ++ str, p[0], p[1], p[2], p[3], p[4]); ++ ++ p = &nfc_op->mem_addr_first[0]; ++ sprintf(str, "%s mem_addr_first:%02x %02x %02x %02x %02x\n", ++ str, p[0], p[1], p[2], p[3], p[4]); ++ ++ for (i = 0; i < PHYTIUM_NFC_DSP_SIZE; i++) ++ sprintf(str, "%s %02x", str, buf[i]); ++ ++ dev_info(nfc->dev, "%s\n", str); ++ ++ return 0; ++} ++ ++int phytium_nfc_data_dump(struct phytium_nfc *nfc, u8 *buf, u32 len) ++{ ++ u8 str[1024] = {0}; ++ int i; ++ ++ len = len > 512 ? 512 : len; ++ ++ sprintf(str, "Phytium NFC data dump: %d\n", len); ++ for (i = 0; i < len; i++) { ++ if (i && (i%128 == 0)) { ++ dev_info(nfc->dev, "next:\n%s\n", str); ++ memset(str, 0, 1024); ++ } ++ ++ if (i && (i%16 == 0)) ++ sprintf(str, "%s\n", str); ++ sprintf(str, "%s %02x", str, buf[i]); ++ } ++ ++ dev_info(nfc->dev, "%s\n", str); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(phytium_nfc_data_dump); ++ ++int phytium_nfc_send_cmd(struct nand_chip *chip, ++ struct phytium_nfc_op *nfc_op) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ u32 value = 0; ++ ++ memset((u8 *)nfc->dsp_addr, 0, PAGE_SIZE); ++ memcpy((u8 *)nfc->dsp_addr, (u8 *)nfc_op, PHYTIUM_NFC_DSP_SIZE); ++ ++ phytium_nfc_cmd_dump(nfc, nfc_op, (u8 *)nfc->dsp_addr); ++ ++ if (phytium_wait_busy(nfc) != 0) { ++ dev_err(nfc->dev, "NFC was always busy\n"); ++ dev_err(nfc->dev, "NFC state: %x\n", phytium_read(nfc, NDSR)); ++ dev_err(nfc->dev, "NFC debug: %x\n", phytium_read(nfc, ND_DEBUG)); ++ return 0; ++ } ++ ++ reinit_completion(&nfc->complete); ++ ++ spin_lock(&nfc->spinlock); ++ value = nfc->dsp_phy_addr & 0xFFFFFFFF; ++ phytium_write(nfc, NDAR0, value); ++ ++ /* Don't modify NDAR1_DMA_RLEN & NDAR1_DMA_WLEN */ ++ value = phytium_read(nfc, NDAR1); ++ value |= NDAR1_H8((nfc->dsp_phy_addr >> 32) & 0xFF); ++ phytium_write(nfc, NDAR1, value); ++ ++ phytium_nfc_enable_int(nfc, NDIR_CMD_FINISH_MASK); ++ ++ value |= NDAR1_DMA_EN; ++ phytium_write(nfc, NDAR1, value); ++ spin_unlock(&nfc->spinlock); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(phytium_nfc_send_cmd); ++ ++int phytium_nfc_prepare_cmd2(struct nand_chip *chip, ++ struct phytium_nfc_op *nfc_op, ++ enum dma_data_direction direction, ++ u32 cmd_num) ++{ ++ struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); ++ int i; ++ ++ for (i = 0; i < cmd_num; i++) { ++ phytium_nfc_cmd_correct(nfc_op); ++ phytium_nfc_addr_correct(nfc_op); ++ nfc_op->cmd_ctrl.nfc_ctrl.csel = phytium_nand->selected_die; ++ nfc_op++; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(phytium_nfc_prepare_cmd2); ++ ++int phytium_nfc_send_cmd2(struct nand_chip *chip, ++ struct phytium_nfc_op *nfc_op, ++ u32 cmd_num) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ u32 value = 0; ++ int i; ++ ++ memset((u8 *)nfc->dsp_addr, 0, PAGE_SIZE); ++ ++ for (i = 0; i < cmd_num; i++) { ++ memcpy((u8 *)nfc->dsp_addr + i*PHYTIUM_NFC_DSP_SIZE, ++ (u8 *)nfc_op, PHYTIUM_NFC_DSP_SIZE); ++ phytium_nfc_cmd_dump(nfc, nfc_op, (u8 *)nfc->dsp_addr + i*PHYTIUM_NFC_DSP_SIZE); ++ nfc_op++; ++ } ++ ++ if (phytium_wait_busy(nfc) != 0) { ++ dev_err(nfc->dev, "NFC was always busy\n"); ++ dev_err(nfc->dev, "NFC state: %x\n", phytium_read(nfc, NDSR)); ++ dev_err(nfc->dev, "NFC debug: %x\n", phytium_read(nfc, ND_DEBUG)); ++ return 0; ++ } ++ ++ reinit_completion(&nfc->complete); ++ ++ spin_lock(&nfc->spinlock); ++ value = nfc->dsp_phy_addr & 0xFFFFFFFF; ++ phytium_write(nfc, NDAR0, value); ++ ++ /* Don't modify NDAR1_DMA_RLEN & NDAR1_DMA_WLEN */ ++ value = phytium_read(nfc, NDAR1); ++ value |= NDAR1_H8((nfc->dsp_phy_addr >> 32) & 0xFF); ++ phytium_write(nfc, NDAR1, value); ++ ++ phytium_nfc_enable_int(nfc, NDIR_DMA_FINISH_MASK | NDIR_ECC_ERR_MASK); ++ ++ value |= NDAR1_DMA_EN; ++ phytium_write(nfc, NDAR1, value); ++ spin_unlock(&nfc->spinlock); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(phytium_nfc_send_cmd2); ++ ++int phytium_nfc_wait_op(struct nand_chip *chip, ++ u32 timeout_ms) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ int ret; ++ ++ /* Timeout is expressed in ms */ ++ if (!timeout_ms) ++ timeout_ms = IRQ_TIMEOUT; ++ else if (timeout_ms > 1000) ++ timeout_ms = 1000; ++ else if (timeout_ms < 100) ++ timeout_ms = 100; ++ ++ ret = wait_for_completion_timeout(&nfc->complete, ++ msecs_to_jiffies(timeout_ms)); ++ if (!ret) { ++ dev_err(nfc->dev, "Timeout waiting for RB signal\n"); ++ dev_err(nfc->dev, "NFC state: %x\n", phytium_read(nfc, NDSR)); ++ dev_err(nfc->dev, "NFC irq state: %x, irq en:%x\n", ++ phytium_read(nfc, NDIR), phytium_read(nfc, NDIR_MASK)); ++ dev_err(nfc->dev, "NFC debug: %x\n", phytium_read(nfc, ND_DEBUG)); ++ ++ complete_release(&nfc->complete); ++ phytium_nfc_clear_int(nfc, NDIR_ALL_INT); ++ return -ETIMEDOUT; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(phytium_nfc_wait_op); ++ ++static int phytium_nfc_xfer_data_pio(struct phytium_nfc *nfc, ++ const struct nand_subop *subop, ++ struct phytium_nfc_op *nfc_op) ++{ ++ const struct nand_op_instr *instr = nfc_op->data_instr; ++ unsigned int op_id = nfc_op->data_instr_idx; ++ unsigned int len = nand_subop_get_data_len(subop, op_id); ++ unsigned int offset = nand_subop_get_data_start_off(subop, op_id); ++ bool reading = (instr->type == NAND_OP_DATA_IN_INSTR); ++ ++ if (reading) { ++ u8 *in = instr->ctx.data.buf.in + offset; ++ ++ memcpy(in, nfc->dma_buf, len); ++ ++ nfc->dma_offset = 0; ++ } else { ++ const u8 *out = instr->ctx.data.buf.out + offset; ++ ++ memcpy(nfc->dma_buf, out, len); ++ } ++ ++ return 0; ++} ++ ++static int memcpy_to_reg16(struct phytium_nfc *nfc, u32 reg, u16 *buf, size_t len) ++{ ++ int i; ++ u32 val = 0; ++ ++ if (!nfc || !buf || (len >= 16)) ++ return -EINVAL; ++ ++ for (i = 0; i < len; i++) { ++ val = (val << 16) + buf[i]; ++ if (i % 2) { ++ phytium_write(nfc, reg, val); ++ val = 0; ++ reg += 4; ++ } ++ } ++ ++ return 0; ++} ++ ++int phytium_nfc_default_data_interface(struct phytium_nfc *nfc) ++{ ++ int value; ++ ++ value = phytium_read(nfc, NDCR0); ++ value &= (~NDCR0_IN_MODE(3)); ++ value |= NDCR0_IN_MODE(nfc->inter_mode); ++ phytium_write(nfc, NDCR0, value); ++ ++ switch (nfc->inter_mode) { ++ case ASYN_SDR: ++ if (nfc->timing_mode == ASY_MODE4) { ++ memcpy_to_reg16(nfc, NDTR0, timing_asy_mode4, TIMING_ASY_NUM); ++ phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(4)); ++ } else if (nfc->timing_mode == ASY_MODE3) { ++ memcpy_to_reg16(nfc, NDTR0, timing_asy_mode3, TIMING_ASY_NUM); ++ phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(5)); ++ } else if (nfc->timing_mode == ASY_MODE2) { ++ memcpy_to_reg16(nfc, NDTR0, timing_asy_mode2, TIMING_ASY_NUM); ++ phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(3)); ++ } else if (nfc->timing_mode == ASY_MODE1) { ++ memcpy_to_reg16(nfc, NDTR0, timing_asy_mode1, TIMING_ASY_NUM); ++ phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(2)); ++ } else { ++ memcpy_to_reg16(nfc, NDTR0, timing_asy_mode0, TIMING_ASY_NUM); ++ phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(1)); ++ } ++ phytium_write(nfc, ND_INTERVAL_TIME, 0x01); ++ break; ++ case ONFI_DDR: ++ if (nfc->timing_mode == SYN_MODE4) { ++ memcpy_to_reg16(nfc, NDTR6, timing_syn_mode4, TIMING_SYN_NUM); ++ phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(0x0D)); ++ phytium_write(nfc, ND_INTERVAL_TIME, 0x30); ++ } else if (nfc->timing_mode == SYN_MODE3) { ++ memcpy_to_reg16(nfc, NDTR6, timing_syn_mode3, TIMING_SYN_NUM); ++ phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(0x05)); ++ phytium_write(nfc, ND_INTERVAL_TIME, 0x18); ++ } else if (nfc->timing_mode == SYN_MODE2) { ++ memcpy_to_reg16(nfc, NDTR6, timing_syn_mode2, TIMING_SYN_NUM); ++ phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(0x08)); ++ phytium_write(nfc, ND_INTERVAL_TIME, 0x20); ++ } else if (nfc->timing_mode == SYN_MODE1) { ++ memcpy_to_reg16(nfc, NDTR6, timing_syn_mode1, TIMING_SYN_NUM); ++ phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(0x12)); ++ phytium_write(nfc, ND_INTERVAL_TIME, 0x40); ++ } else { ++ memcpy_to_reg16(nfc, NDTR6, timing_syn_mode0, TIMING_SYN_NUM); ++ phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(0x12)); ++ phytium_write(nfc, ND_INTERVAL_TIME, 0x40); ++ } ++ break; ++ case TOG_ASYN_DDR: ++ phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(8)); ++ phytium_write(nfc, ND_INTERVAL_TIME, 0xC8); ++ memcpy_to_reg16(nfc, NDTR13, timing_tog_ddr_mode0, TIMING_TOG_NUM); ++ break; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(phytium_nfc_default_data_interface); ++ ++static int phytium_nfc_naked_waitrdy_exec(struct nand_chip *chip, ++ const struct nand_subop *subop) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ struct phytium_nfc_op nfc_op; ++ enum dma_data_direction direction; ++ int ret = 0; ++ ++ phytium_nfc_parse_instructions(chip, subop, &nfc_op); ++ ++ dev_info(nfc->dev, "Phytium nand command 0x%02x 0x%02x.\n", ++ nfc_op.cmd[0], nfc_op.cmd[1]); ++ ++ switch (nfc_op.cmd[0]) { ++ case NAND_CMD_PARAM: ++ memset(nfc->dma_buf, 0, PAGE_SIZE); ++ direction = DMA_FROM_DEVICE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ_PARAM; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ if (nfc->inter_pro == NAND_ONFI) ++ nfc_op.page_cnt = 3 * sizeof(struct nand_onfi_params); ++ else if (nfc->inter_pro == NAND_JEDEC) ++ nfc_op.page_cnt = 3 * sizeof(struct nand_jedec_params); ++ if (nfc_op.page_cnt) ++ nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; ++ nfc->dma_offset = 0; ++ break; ++ case NAND_CMD_SET_FEATURES: ++ direction = DMA_TO_DEVICE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_SET_FTR; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; ++ if (nfc->inter_mode != ASYN_SDR) { ++ dev_err(nfc->dev, "Not support SET_FEATURES command!\n"); ++ return 0; ++ } ++ break; ++ case NAND_CMD_GET_FEATURES: ++ direction = DMA_FROM_DEVICE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_GET_FTR; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; ++ break; ++ case NAND_CMD_READ0: ++ if (nfc_op.cmd[1] == NAND_CMD_READSTART) { /* large page */ ++ direction = DMA_FROM_DEVICE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ } else if (nfc_op.cmd[1] == NAND_CMD_SEQIN) { /* program page begin */ ++ nfc_op.cmd[0] = NAND_CMD_SEQIN; ++ nfc_op.cmd[1] = NAND_CMD_PAGEPROG; ++ direction = DMA_TO_DEVICE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ } else { /* small page */ ++ direction = DMA_FROM_DEVICE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ } ++ break; ++ case NAND_CMD_RNDOUT: /* change read column */ ++ direction = DMA_FROM_DEVICE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_READ_COL; ++ nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; ++ break; ++ case NAND_CMD_READSTART: /* large page */ ++ direction = DMA_FROM_DEVICE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ break; ++ case NAND_CMD_RNDOUTSTART: /* change read column */ ++ direction = DMA_FROM_DEVICE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_READ_COL; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ nfc->dma_offset = nfc_op.addr[1]; ++ nfc->dma_offset = (nfc->dma_offset << 8) + nfc_op.addr[0]; ++ break; ++ case NAND_CMD_SEQIN: /* program begin */ ++ if (nfc_op.cmd[0] == NAND_CMD_READ0) { ++ nfc_op.cmd[0] = NAND_CMD_SEQIN; ++ nfc_op.cmd[1] = NAND_CMD_PAGEPROG; ++ } ++ direction = DMA_TO_DEVICE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ break; ++ case NAND_CMD_RNDIN: /* change write column */ ++ direction = DMA_TO_DEVICE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_WR_COL; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ break; ++ case NAND_CMD_PAGEPROG: /* program end */ ++ nfc_op.cmd[0] = NAND_CMD_RNDIN; ++ nfc_op.cmd[1] = NAND_CMD_PAGEPROG; ++ direction = DMA_TO_DEVICE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ break; ++ default: ++ dev_err(nfc->dev, "Not support cmd %d.\n", nfc_op.cmd[1]); ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if ((nfc_op.data_instr) && (direction == DMA_TO_DEVICE)) ++ phytium_nfc_xfer_data_pio(nfc, subop, &nfc_op); ++ ++ phytium_nfc_prepare_cmd(chip, &nfc_op, direction); ++ phytium_nfc_send_cmd(chip, &nfc_op); ++ cond_delay(nfc_op.cle_ale_delay_ns); ++ nfc_op.rdy_timeout_ms = nfc_op.rdy_timeout_ms; ++ ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); ++ if (ret) ++ goto out; ++ ++ cond_delay(nfc_op.rdy_delay_ns); ++ ++ if ((nfc_op.data_instr) && (direction == DMA_FROM_DEVICE)) ++ phytium_nfc_xfer_data_pio(nfc, subop, &nfc_op); ++ ++out: ++ return ret; ++} ++ ++static int phytium_nfc_read_id_type_exec(struct nand_chip *chip, ++ const struct nand_subop *subop) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ struct phytium_nfc_op nfc_op; ++ enum dma_data_direction direction; ++ u16 read_len = 0; ++ int ret; ++ u8 *buf = nfc->dma_buf; ++ ++ memset(nfc->dma_buf, 0, PAGE_SIZE); ++ direction = DMA_FROM_DEVICE; ++ ++ phytium_nfc_parse_instructions(chip, subop, &nfc_op); ++ read_len = nfc_op.page_cnt; ++ nfc_op.page_cnt = (read_len & 0x03) ? ((read_len & 0xFFFC) + 4) : read_len; ++ ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ_ID; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 0; ++ ++ phytium_nfc_prepare_cmd(chip, &nfc_op, direction); ++ phytium_nfc_send_cmd(chip, &nfc_op); ++ cond_delay(nfc_op.cle_ale_delay_ns); ++ ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); ++ if (ret) ++ return ret; ++ ++ cond_delay(nfc_op.rdy_delay_ns); ++ ++ if (!strncmp(nfc->dma_buf, "ONFI", 4)) { ++ nfc->inter_pro = NAND_ONFI; ++ } else if (!strncmp(nfc->dma_buf, "JEDEC", 5)) { ++ nfc->inter_pro = NAND_JEDEC; ++ if (buf[5] == 1) ++ nfc->inter_mode = ASYN_SDR; ++ else if (buf[5] == 2) ++ nfc->inter_mode = TOG_ASYN_DDR; ++ else if (buf[5] == 4) ++ nfc->inter_mode = ASYN_SDR; ++ } else { ++ nfc->inter_pro = NAND_OTHER; ++ } ++ ++ dev_info(nfc->dev, "Nand protocol: %d, interface mode: %d\n", ++ nfc->inter_pro, nfc->inter_mode); ++ ++ phytium_nfc_xfer_data_pio(nfc, subop, &nfc_op); ++ ++ return 0; ++} ++ ++static int phytium_nfc_read_status_exec(struct nand_chip *chip, ++ const struct nand_subop *subop) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ struct phytium_nfc_op nfc_op; ++ enum dma_data_direction direction; ++ u16 read_len = 0; ++ u32 timeout, count = 0; ++ int ret = 0; ++ ++ direction = DMA_FROM_DEVICE; ++ ++ phytium_nfc_parse_instructions(chip, subop, &nfc_op); ++ read_len = nfc_op.page_cnt; ++ nfc_op.page_cnt = (read_len & 0x03) ? ((read_len & 0xFFFC) + 4) : read_len; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ_STATUS; ++ phytium_nfc_prepare_cmd(chip, &nfc_op, direction); ++ ++read_status_retry: ++ count++; ++ phytium_nfc_send_cmd(chip, &nfc_op); ++ cond_delay(nfc_op.cle_ale_delay_ns); ++ timeout = nfc_op.rdy_timeout_ms ? nfc_op.rdy_timeout_ms : 10; ++ ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); ++ if (ret) ++ goto out; ++ ++ phytium_nfc_xfer_data_pio(nfc, subop, &nfc_op); ++ ++ if (0xE0 != *(u8 *)(nfc->dma_buf)) { ++ dev_info(nfc->dev, "Retry to read status (%x)\n", *(u8 *)(nfc->dma_buf)); ++ ++ if (count < 5) ++ goto read_status_retry; ++ } ++ ++out: ++ return ret; ++} ++ ++static int phytium_nfc_reset_cmd_type_exec(struct nand_chip *chip, ++ const struct nand_subop *subop) ++{ ++ struct phytium_nfc_op nfc_op; ++ enum dma_data_direction direction; ++ ++ phytium_nfc_parse_instructions(chip, subop, &nfc_op); ++ ++ direction = DMA_NONE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_RESET; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ phytium_nfc_prepare_cmd(chip, &nfc_op, direction); ++ phytium_nfc_send_cmd(chip, &nfc_op); ++ cond_delay(nfc_op.cle_ale_delay_ns); ++ ++ return phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); ++} ++ ++static int phytium_nfc_erase_cmd_type_exec(struct nand_chip *chip, ++ const struct nand_subop *subop) ++{ ++ struct phytium_nfc_op nfc_op; ++ enum dma_data_direction direction; ++ ++ phytium_nfc_parse_instructions(chip, subop, &nfc_op); ++ direction = DMA_NONE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_ERASE; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ phytium_nfc_prepare_cmd(chip, &nfc_op, direction); ++ phytium_nfc_send_cmd(chip, &nfc_op); ++ cond_delay(nfc_op.cle_ale_delay_ns); ++ ++ return phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); ++} ++ ++static int phytium_nfc_data_in_type_exec(struct nand_chip *chip, ++ const struct nand_subop *subop) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ struct phytium_nfc_op nfc_op; ++ struct nand_op_instr *instr; ++ unsigned int op_id; ++ unsigned int len; ++ unsigned int offset; ++ u8 *in = NULL; ++ ++ phytium_nfc_parse_instructions(chip, subop, &nfc_op); ++ if (nfc_op.data_instr->type != NAND_OP_DATA_IN_INSTR) { ++ dev_err(nfc->dev, "Phytium nfc instrs parser failed!\n"); ++ return -EINVAL; ++ } ++ ++ instr = nfc_op.data_instr; ++ op_id = nfc_op.data_instr_idx; ++ len = nand_subop_get_data_len(subop, op_id); ++ offset = nand_subop_get_data_start_off(subop, op_id); ++ in = instr->ctx.data.buf.in + offset; ++ ++ phytium_nfc_cmd_dump(nfc, &nfc_op, (u8 *)nfc->dsp_addr); ++ ++ memcpy(in, nfc->dma_buf + nfc->dma_offset, len); ++ nfc->dma_offset += len; ++ phytium_nfc_data_dump(nfc, in, len); ++ ++ return 0; ++} ++ ++static const struct nand_op_parser phytium_nfc_op_parser = NAND_OP_PARSER( ++ /* Naked commands not supported, use a function for each pattern */ ++ NAND_OP_PARSER_PATTERN( ++ phytium_nfc_read_id_type_exec, ++ NAND_OP_PARSER_PAT_CMD_ELEM(false), ++ NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), ++ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)), ++ NAND_OP_PARSER_PATTERN( ++ phytium_nfc_erase_cmd_type_exec, ++ NAND_OP_PARSER_PAT_CMD_ELEM(false), ++ NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), ++ NAND_OP_PARSER_PAT_CMD_ELEM(false), ++ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), ++ NAND_OP_PARSER_PATTERN( ++ phytium_nfc_read_status_exec, ++ NAND_OP_PARSER_PAT_CMD_ELEM(false), ++ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)), ++ NAND_OP_PARSER_PATTERN( ++ phytium_nfc_reset_cmd_type_exec, ++ NAND_OP_PARSER_PAT_CMD_ELEM(false), ++ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), ++ NAND_OP_PARSER_PATTERN( ++ phytium_nfc_naked_waitrdy_exec, ++ NAND_OP_PARSER_PAT_CMD_ELEM(false), ++ NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), ++ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false), ++ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)), ++ NAND_OP_PARSER_PATTERN( ++ phytium_nfc_naked_waitrdy_exec, ++ NAND_OP_PARSER_PAT_CMD_ELEM(false), ++ NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), ++ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), ++ NAND_OP_PARSER_PATTERN( ++ phytium_nfc_naked_waitrdy_exec, ++ NAND_OP_PARSER_PAT_CMD_ELEM(false), ++ NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), ++ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 8), ++ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), ++ NAND_OP_PARSER_PATTERN( ++ phytium_nfc_naked_waitrdy_exec, ++ NAND_OP_PARSER_PAT_CMD_ELEM(false), ++ NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), ++ NAND_OP_PARSER_PAT_CMD_ELEM(false), ++ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)), ++ NAND_OP_PARSER_PATTERN( ++ phytium_nfc_data_in_type_exec, ++ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)), ++ ); ++ ++static int phytium_nfc_exec_op(struct nand_chip *chip, ++ const struct nand_operation *op, ++ bool check_only) ++{ ++ return nand_op_parser_exec_op(chip, &phytium_nfc_op_parser, ++ op, check_only); ++} ++ ++static int phytium_nfc_reset(struct phytium_nfc *nfc) ++{ ++ u32 value; ++ ++ phytium_write(nfc, NDIR_MASK, NDIR_ALL_INT); ++ phytium_write(nfc, NDSR, NDIR_ALL_INT); ++ ++ phytium_write(nfc, ND_ERR_CLR, 0x0F); ++ phytium_write(nfc, NDFIFO_CLR, 1); ++ ++ value = phytium_read(nfc, NDCR0); ++ phytium_write(nfc, NDCR0, value & ~(NDCR0_ECC_EN | NDCR0_SPARE_EN)); ++ ++ return 0; ++} ++ ++static void phytium_nfc_select_chip(struct mtd_info *mtd, int die_nr) ++{ ++ struct nand_chip *chip = mtd_to_nand(mtd); ++ struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ ++ dev_dbg(nfc->dev, "Phytium nand selected chip %d\n", die_nr); ++ ++ if (chip == nfc->selected_chip && die_nr == phytium_nand->selected_die) ++ return; ++ ++ if (die_nr < 0 || die_nr >= phytium_nand->nsels) { ++ nfc->selected_chip = NULL; ++ phytium_nand->selected_die = -1; ++ return; ++ } ++ ++ phytium_nfc_reset(nfc); ++ ++ nfc->selected_chip = chip; ++ phytium_nand->selected_die = die_nr; ++} ++ ++static int phytium_nand_ooblayout_ecc(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ struct nand_chip *chip = mtd_to_nand(mtd); ++ ++ if (section) ++ return -ERANGE; ++ ++ oobregion->length = chip->ecc.total; ++ oobregion->offset = mtd->oobsize - oobregion->length; ++ ++ return 0; ++} ++ ++static int phytium_nand_ooblayout_free(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ struct nand_chip *chip = mtd_to_nand(mtd); ++ ++ if (section) ++ return -ERANGE; ++ ++ /* ++ * Bootrom looks in bytes 0 & 5 for bad blocks for the ++ * 4KB page / 4bit BCH combination. ++ */ ++ if (mtd->writesize >= SZ_4K) ++ oobregion->offset = 6; ++ else ++ oobregion->offset = 2; ++ ++ oobregion->length = mtd->oobsize - chip->ecc.total - oobregion->offset; ++ ++ return 0; ++} ++ ++static const struct mtd_ooblayout_ops phytium_nand_ooblayout_ops = { ++ .ecc = phytium_nand_ooblayout_ecc, ++ .free = phytium_nand_ooblayout_free, ++}; ++ ++static void phytium_nfc_enable_hw_ecc(struct nand_chip *chip) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ u32 ndcr0 = phytium_read(nfc, NDCR0); ++ ++ if (!(ndcr0 & NDCR0_ECC_EN)) ++ phytium_write(nfc, NDCR0, ndcr0 | NDCR0_ECC_EN); ++} ++ ++static void phytium_nfc_disable_hw_ecc(struct nand_chip *chip) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ u32 ndcr0 = phytium_read(nfc, NDCR0); ++ ++ if (ndcr0 & NDCR0_ECC_EN) ++ phytium_write(nfc, NDCR0, ndcr0 & ~NDCR0_ECC_EN); ++} ++ ++static void nfc_irq_callback(struct work_struct *work) ++{ ++ struct phytium_nfc *nfc = container_of(work, struct phytium_nfc, work); ++ ++ if (!nfc) ++ return; ++ ++ if (nfc_irq_complete) ++ complete_all(&nfc->complete); ++ ++ nfc_irq_st = 0; ++ nfc_irq_en = 0; ++ nfc_irq_complete = 0; ++} ++ ++static irqreturn_t phytium_nfc_isr(int irq, void *dev_id) ++{ ++ struct phytium_nfc *nfc = dev_id; ++ u32 st = phytium_read(nfc, NDIR); ++ u32 ien = (~phytium_read(nfc, NDIR_MASK)) & NDIR_ALL_INT; ++ ++ if (!(st & ien)) ++ return IRQ_NONE; ++ ++ nfc_irq_st = st; ++ nfc_irq_en = ien; ++ phytium_nfc_disable_int(nfc, st & NDIR_ALL_INT); ++ phytium_write(nfc, 0xFD0, 0); ++ ++ if (st & (NDIR_CMD_FINISH | NDIR_DMA_FINISH)) { ++ if (st & NDIR_ECC_ERR) ++ nfc_ecc_err = 1; ++ phytium_write(nfc, NDIR, st); ++ nfc_irq_complete = 1; ++ } else if (st & (NDIR_FIFO_TIMEOUT | NDIR_PGFINISH)) { ++ phytium_write(nfc, NDIR, st); ++ phytium_nfc_enable_int(nfc, (~st) & (NDIR_DMA_FINISH_MASK | ++ NDIR_PGFINISH_MASK | ++ NDIR_FIFO_TIMEOUT_MASK | ++ NDIR_CMD_FINISH_MASK)); ++ nfc_irq_complete = 0; ++ } else if (st & NDIR_ECC_ERR) { ++ phytium_write(nfc, ND_ERR_CLR, 0x08); ++ phytium_write(nfc, NDIR, st); ++ phytium_write(nfc, NDFIFO_CLR, 0x01); ++ nfc_irq_complete = 1; ++ nfc_ecc_errover = 1; ++ } else { ++ phytium_write(nfc, NDIR, st); ++ nfc_irq_complete = 1; ++ } ++ ++ schedule_work(&nfc->work); ++ ++ return IRQ_HANDLED; ++} ++ ++static int phytium_nfc_hw_ecc_correct(struct nand_chip *chip, ++ char *buf, int len) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ u32 i, j, value, tmp; ++ int stat = 0; ++ ++ if (!buf) ++ return -EINVAL; ++ ++ for (i = 0; i < chip->ecc.steps; i++) { ++ for (j = 0; j < 2; j++) { ++ value = phytium_read(nfc, 0xB8 + 4 * (2 * i + j)); ++ dev_info(nfc->dev, "ECC_FLAG: offset:%x value:0x%08x\n", ++ 0xB8 + 4 * (2 * i + j), value); ++ ++ tmp = value & 0xFFFF; ++ if (tmp && (tmp <= 4096)) { ++ tmp--; ++ stat++; ++ dev_info(nfc->dev, "ECC_CORRECT %x %02x\n", ++ chip->ecc.size * i + (tmp >> 3), ++ buf[chip->ecc.size * i + (tmp >> 3)]); ++ dev_info(nfc->dev, "ECC_CORRECT xor %x %02x\n", ++ 0x01 << (tmp % 8), buf[chip->ecc.size * i + (tmp >> 3)]); ++ } else if (tmp > 4096) { ++ dev_info(nfc->dev, "ECC_CORRECT offset > 4096!\n"); ++ } ++ ++ tmp = (value >> 16) & 0xFFFF; ++ if (tmp && (tmp <= 4096)) { ++ tmp--; ++ stat++; ++ dev_info(nfc->dev, "ECC_CORRECT %x %02x\n", ++ chip->ecc.size * i + (tmp >> 3), ++ buf[chip->ecc.size * i + (tmp >> 3)]); ++ dev_info(nfc->dev, "ECC_CORRECT xor %x %02x\n", ++ chip->ecc.size * i + (tmp >> 3), ++ buf[chip->ecc.size * i + (tmp >> 3)]); ++ } else if (tmp > 4096) { ++ dev_info(nfc->dev, "ECC_CORRECT offset > 4096!\n"); ++ } ++ } ++ } ++ ++ return stat; ++} ++ ++static int phytium_nand_page_read(struct mtd_info *mtd, struct nand_chip *chip, ++ u8 *buf, u8 *oob_buf, int oob_len, int page, ++ bool read) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ struct phytium_nand_chip *phytium_nand = NULL; ++ const struct nand_sdr_timings *sdr = NULL; ++ struct phytium_nfc_op nfc_op; ++ enum dma_data_direction direction; ++ int ret = 0; ++ ++ memset(&nfc_op, 0, sizeof(nfc_op)); ++ phytium_nand = to_phytium_nand(chip); ++ sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); ++ ++ memset(nfc->dma_buf, 0x0, mtd->writesize + mtd->oobsize); ++ direction = DMA_FROM_DEVICE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; ++ nfc_op.cmd[0] = NAND_CMD_READ0; ++ nfc_op.cmd[1] = NAND_CMD_READSTART; ++ nfc_op.cmd_len = 2; ++ nfc_op.cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); ++ nfc_op.rdy_timeout_ms = PSEC_TO_MSEC(sdr->tR_max); ++ nfc_op.rdy_delay_ns = PSEC_TO_NSEC(sdr->tRR_min); ++ ++ nfc_op.cmd_ctrl.nfc_ctrl.dbc = 1; ++ nfc_op.addr[2] = page; ++ nfc_op.addr[3] = page >> 8; ++ nfc_op.addr[4] = page >> 16; ++ nfc_op.addr_len = 5; ++ nfc_op.cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ ++ nfc_op.page_cnt = mtd->writesize; ++ nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; ++ nfc_op.cmd_ctrl.nfc_ctrl.ecc_en = 0; ++ ++ /* For data read/program */ ++ phytium_nfc_prepare_cmd(chip, &nfc_op, direction); ++ phytium_nfc_send_cmd(chip, &nfc_op); ++ cond_delay(nfc_op.cle_ale_delay_ns); ++ ++ ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); ++ if (ret) ++ return ret; ++ ++ if ((direction == DMA_FROM_DEVICE) && buf) ++ memcpy(buf, nfc->dma_buf, mtd->writesize); ++ ++ return ret; ++} ++ ++static int phytium_nand_oob_read(struct mtd_info *mtd, struct nand_chip *chip, ++ u8 *buf, u8 *oob_buf, int oob_len, int page, ++ bool read) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ struct phytium_nand_chip *phytium_nand = NULL; ++ const struct nand_sdr_timings *sdr = NULL; ++ struct phytium_nfc_op nfc_op; ++ enum dma_data_direction direction; ++ int ret = 0; ++ ++ memset(&nfc_op, 0, sizeof(nfc_op)); ++ phytium_nand = to_phytium_nand(chip); ++ sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); ++ ++ memset(nfc->dma_buf, 0x00, mtd->writesize + mtd->oobsize); ++ direction = DMA_FROM_DEVICE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; ++ nfc_op.cmd[0] = NAND_CMD_READ0; ++ nfc_op.cmd[1] = NAND_CMD_READSTART; ++ nfc_op.cmd_len = 2; ++ nfc_op.cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); ++ nfc_op.rdy_timeout_ms = PSEC_TO_MSEC(sdr->tR_max); ++ nfc_op.rdy_delay_ns = PSEC_TO_NSEC(sdr->tRR_min); ++ ++ nfc_op.cmd_ctrl.nfc_ctrl.dbc = 1; ++ nfc_op.addr[2] = page; ++ nfc_op.addr[3] = page >> 8; ++ nfc_op.addr[4] = page >> 16; ++ nfc_op.addr_len = 5; ++ nfc_op.cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ ++ nfc_op.page_cnt = oob_len; ++ nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; ++ nfc_op.cmd_ctrl.nfc_ctrl.ecc_en = 0; ++ nfc_op.addr[0] = mtd->writesize & 0xFF; ++ nfc_op.addr[1] = (mtd->writesize >> 8) & 0xFF; ++ ++ /* For data read/program */ ++ phytium_nfc_prepare_cmd(chip, &nfc_op, direction); ++ phytium_nfc_send_cmd(chip, &nfc_op); ++ cond_delay(nfc_op.cle_ale_delay_ns); ++ ++ ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); ++ if (ret) ++ return ret; ++ ++ cond_delay(nfc_op.rdy_delay_ns); ++ ++ if (direction == DMA_FROM_DEVICE) ++ memcpy(oob_buf, nfc->dma_buf, oob_len); ++ ++ return ret; ++} ++ ++static int phytium_nand_get_ecc_total(struct mtd_info *mtd, ++ struct nand_ecc_ctrl *ecc) ++{ ++ int ecc_total = 0; ++ ++ switch (mtd->writesize) { ++ case 0x200: ++ if (ecc->strength == 4) ++ ecc_total = 7; ++ else if (ecc->strength == 2) ++ ecc_total = 4; ++ break; ++ case 0x800: ++ if (ecc->strength == 4) ++ ecc_total = 0x1a; ++ else if (ecc->strength == 2) ++ ecc_total = 0xd; ++ break; ++ case 0x1000: ++ if (ecc->strength == 4) ++ ecc_total = 0x34; ++ else if (ecc->strength == 2) ++ ecc_total = 0x1a; ++ break; ++ case 0x2000: ++ if (ecc->strength == 4) ++ ecc_total = 0x68; ++ else if (ecc->strength == 2) ++ ecc_total = 0x34; ++ break; ++ case 0x4000: ++ if (ecc->strength == 4) ++ ecc_total = 0xd0; ++ else if (ecc->strength == 2) ++ ecc_total = 0x68; ++ break; ++ default: ++ break; ++ } ++ ++ return ecc_total; ++} ++ ++static int phytium_nand_page_read_hwecc(struct mtd_info *mtd, struct nand_chip *chip, ++ u8 *buf, u8 *oob_buf, int oob_len, int page, ++ bool read) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ struct phytium_nand_chip *phytium_nand = NULL; ++ const struct nand_sdr_timings *sdr = NULL; ++ struct phytium_nfc_op *nfc_op = NULL; ++ enum dma_data_direction direction; ++ u32 ecc_offset; ++ int max_bitflips = 0; ++ u32 nfc_state = 0; ++ int ret = 0; ++ int i; ++ ++ phytium_nand = to_phytium_nand(chip); ++ sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); ++ ++ ecc_offset = phytium_nand->ecc.offset; ++ memset(nfc->dma_buf, 0x00, mtd->writesize + mtd->oobsize); ++ nfc_op = kzalloc(2 * sizeof(struct phytium_nfc_op), GFP_KERNEL); ++ if (!nfc_op) { ++ dev_err(nfc->dev, "Can't malloc space for phytium_nfc_op\n"); ++ return 0; ++ } ++ ++ nfc_op->cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); ++ nfc_op->rdy_timeout_ms = PSEC_TO_MSEC(sdr->tR_max); ++ nfc_op->rdy_delay_ns = PSEC_TO_NSEC(sdr->tRR_min); ++ ++ direction = DMA_FROM_DEVICE; ++ nfc_op->cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; ++ nfc_op->cmd[0] = NAND_CMD_READ0; ++ nfc_op->cmd[1] = NAND_CMD_READSTART; ++ nfc_op->cmd_len = 2; ++ nfc_op->addr_len = 5; ++ nfc_op->cmd_ctrl.nfc_ctrl.dbc = 1; ++ nfc_op->addr[2] = page; ++ nfc_op->addr[3] = page >> 8; ++ nfc_op->addr[4] = page >> 16; ++ nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; ++ nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; ++ nfc_op->cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ nfc_op->page_cnt = mtd->writesize; ++ nfc_op->cmd_ctrl.nfc_ctrl.nc = 1; ++ for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) ++ nfc_op->mem_addr_first[i] = (nfc->dma_phy_addr >> (8 * i)) & 0xFF; ++ ++ nfc_op++; ++ memcpy(nfc_op, nfc_op - 1, sizeof(struct phytium_nfc_op)); ++ nfc_op->cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_READ_COL; ++ nfc_op->cmd[0] = NAND_CMD_RNDOUT; ++ nfc_op->cmd[1] = NAND_CMD_RNDOUTSTART; ++ memset(&nfc_op->addr, 0, PHYTIUM_NFC_ADDR_MAX_LEN); ++ nfc_op->addr_len = 2; ++ nfc_op->addr[0] = mtd->writesize + phytium_nand->ecc.offset; ++ nfc_op->addr[1] = (mtd->writesize + phytium_nand->ecc.offset) >> 8; ++ nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = 0x02; ++ nfc_op->page_cnt = phytium_nand_get_ecc_total(mtd, &chip->ecc); ++ nfc_op->cmd_ctrl.nfc_ctrl.nc = 0; ++ nfc_op->cmd_ctrl.nfc_ctrl.auto_rs = 0; ++ nfc_op->cmd_ctrl.nfc_ctrl.ecc_en = 1; ++ for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) ++ nfc_op->mem_addr_first[i] = ++ ((nfc->dma_phy_addr + mtd->writesize) >> (8 * i)) & 0xFF; ++ ++ nfc_op--; ++ phytium_nfc_prepare_cmd2(chip, nfc_op, direction, 2); ++ phytium_nfc_send_cmd2(chip, nfc_op, 2); ++ cond_delay(nfc_op->cle_ale_delay_ns); ++ ++ ret = phytium_nfc_wait_op(chip, nfc_op->rdy_timeout_ms); ++ if (ret) ++ return ret; ++ ++ cond_delay(nfc_op->rdy_delay_ns); ++ ++ if ((direction == DMA_FROM_DEVICE) && buf) { ++ nfc_state = phytium_read(nfc, NDSR); ++ if ((nfc_state & NDSR_ECC_ERROVER) || (nfc_ecc_errover == 1)) { ++ for (i = 0; i < mtd->writesize/16; i++) { ++ if (0xFF != *(u8 *)(nfc->dma_buf + i)) { ++ dev_info(nfc->dev, "NFC: NDSR_ECC_ERROVER %x\n", page); ++ mtd->ecc_stats.failed++; ++ mtd->ecc_stats.corrected += max_bitflips; ++ break; ++ } ++ } ++ } else if (nfc_state & NDSR_ECC_ERR) { ++ max_bitflips = phytium_nfc_hw_ecc_correct(chip, ++ nfc->dma_buf, mtd->writesize); ++ mtd->ecc_stats.corrected += max_bitflips; ++ dev_info(nfc->dev, "NFC: NDSR_ECC_ERR page:%x, bit:%d\n", ++ page, max_bitflips); ++ } ++ ++ memcpy(buf, nfc->dma_buf, mtd->writesize); ++ } ++ ++ return max_bitflips; ++} ++ ++static int phytium_nand_page_write(struct mtd_info *mtd, struct nand_chip *chip, ++ const u8 *buf, u8 *oob_buf, int oob_len, int page, ++ bool read) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ struct phytium_nand_chip *phytium_nand = NULL; ++ const struct nand_sdr_timings *sdr = NULL; ++ struct phytium_nfc_op nfc_op; ++ enum dma_data_direction direction; ++ int ret = 0; ++ ++ memset(&nfc_op, 0, sizeof(nfc_op)); ++ phytium_nand = to_phytium_nand(chip); ++ sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); ++ ++ memcpy(nfc->dma_buf, buf, mtd->writesize); ++ direction = DMA_TO_DEVICE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; ++ nfc_op.cmd[0] = NAND_CMD_SEQIN; ++ nfc_op.cmd[1] = NAND_CMD_PAGEPROG; ++ nfc_op.cmd_len = 2; ++ nfc_op.addr_len = 5; ++ nfc_op.cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); ++ nfc_op.rdy_timeout_ms = PSEC_TO_MSEC(sdr->tPROG_max); ++ nfc_op.rdy_delay_ns = 0; ++ ++ nfc_op.cmd_ctrl.nfc_ctrl.dbc = 1; ++ nfc_op.addr[2] = page; ++ nfc_op.addr[3] = page >> 8; ++ nfc_op.addr[4] = page >> 16; ++ nfc_op.cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; ++ nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ nfc_op.page_cnt = mtd->writesize; ++ ++ /* For data read/program */ ++ phytium_nfc_prepare_cmd(chip, &nfc_op, direction); ++ phytium_nfc_send_cmd(chip, &nfc_op); ++ cond_delay(nfc_op.cle_ale_delay_ns); ++ ++ ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); ++ if (ret) ++ goto out; ++ ++ cond_delay(nfc_op.rdy_delay_ns); ++out: ++ return ret; ++} ++ ++static int phytium_nand_oob_write(struct mtd_info *mtd, struct nand_chip *chip, ++ u8 *buf, u8 *oob_buf, int oob_len, int page, ++ bool read) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ struct phytium_nand_chip *phytium_nand = NULL; ++ const struct nand_sdr_timings *sdr = NULL; ++ struct phytium_nfc_op nfc_op; ++ enum dma_data_direction direction; ++ int ret = 0; ++ ++ memset(&nfc_op, 0, sizeof(nfc_op)); ++ phytium_nand = to_phytium_nand(chip); ++ sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); ++ ++ direction = DMA_TO_DEVICE; ++ nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; ++ nfc_op.cmd[0] = NAND_CMD_SEQIN; ++ nfc_op.cmd[1] = NAND_CMD_PAGEPROG; ++ nfc_op.cmd_len = 2; ++ nfc_op.addr_len = 5; ++ nfc_op.cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); ++ nfc_op.rdy_timeout_ms = PSEC_TO_MSEC(sdr->tPROG_max); ++ nfc_op.rdy_delay_ns = 0; ++ ++ nfc_op.cmd_ctrl.nfc_ctrl.dbc = 1; ++ nfc_op.addr[2] = page; ++ nfc_op.addr[3] = page >> 8; ++ nfc_op.addr[4] = page >> 16; ++ nfc_op.cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; ++ nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; ++ nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ ++ nfc_op.page_cnt = oob_len; ++ nfc_op.cmd_ctrl.nfc_ctrl.ecc_en = 0; ++ nfc_op.addr[0] = mtd->writesize & 0xFF; ++ nfc_op.addr[1] = (mtd->writesize >> 8) & 0xFF; ++ nfc_op.cmd_ctrl.nfc_ctrl.ecc_en = 0; ++ memcpy(nfc->dma_buf, oob_buf, mtd->oobsize); ++ ++ /* For data read/program */ ++ phytium_nfc_prepare_cmd(chip, &nfc_op, direction); ++ phytium_nfc_send_cmd(chip, &nfc_op); ++ cond_delay(nfc_op.cle_ale_delay_ns); ++ ++ ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); ++ if (ret) ++ goto out; ++ ++ cond_delay(nfc_op.rdy_delay_ns); ++out: ++ return ret; ++} ++ ++static int phytium_nand_page_write_hwecc(struct mtd_info *mtd, struct nand_chip *chip, ++ const u8 *buf, u8 *oob_buf, int oob_len, int page, ++ bool read) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ struct phytium_nand_chip *phytium_nand = NULL; ++ const struct nand_sdr_timings *sdr = NULL; ++ struct phytium_nfc_op *nfc_op; ++ enum dma_data_direction direction; ++ u32 ecc_offset; ++ int ret = 0; ++ int i; ++ ++ phytium_nand = to_phytium_nand(chip); ++ sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); ++ ecc_offset = phytium_nand->ecc.offset; ++ ++ nfc_op = kzalloc(2 * sizeof(struct phytium_nfc_op), GFP_KERNEL); ++ if (!nfc_op) { ++ dev_err(nfc->dev, "Can't malloc space for phytium_nfc_op\n"); ++ return 0; ++ } ++ ++ nfc_op->cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); ++ nfc_op->rdy_timeout_ms = PSEC_TO_MSEC(sdr->tR_max); ++ nfc_op->rdy_delay_ns = PSEC_TO_NSEC(sdr->tRR_min); ++ ++ direction = DMA_TO_DEVICE; ++ nfc_op->cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_ROW_ADDR; ++ nfc_op->cmd[0] = NAND_CMD_SEQIN; ++ nfc_op->cmd_len = 1; ++ nfc_op->addr_len = 5; ++ nfc_op->cmd_ctrl.nfc_ctrl.dbc = 0; ++ nfc_op->addr[2] = page; ++ nfc_op->addr[3] = page >> 8; ++ nfc_op->addr[4] = page >> 16; ++ nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; ++ nfc_op->cmd_ctrl.nfc_ctrl.auto_rs = 0; ++ nfc_op->cmd_ctrl.nfc_ctrl.nc = 1; ++ for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) ++ nfc_op->mem_addr_first[i] = (nfc->dma_phy_addr >> (8 * i)) & 0xFF; ++ ++ /* The first dsp must have data to transfer */ ++ memcpy(nfc->dma_buf, buf, mtd->writesize); ++ nfc_op->page_cnt = mtd->writesize; ++ nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; ++ ++ nfc_op++; ++ memcpy(nfc_op, nfc_op - 1, sizeof(struct phytium_nfc_op)); ++ nfc_op->cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; ++ nfc_op->cmd_ctrl.nfc_ctrl.dbc = 1; ++ nfc_op->cmd_ctrl.nfc_ctrl.auto_rs = 1; ++ nfc_op->cmd[0] = NAND_CMD_RNDIN; ++ nfc_op->cmd[1] = NAND_CMD_PAGEPROG; ++ memset(&nfc_op->addr, 0, PHYTIUM_NFC_ADDR_MAX_LEN); ++ nfc_op->addr_len = 2; ++ nfc_op->cmd_len = 2; ++ nfc_op->addr[0] = mtd->writesize + ecc_offset; ++ nfc_op->addr[1] = (mtd->writesize + ecc_offset) >> 8; ++ nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = 0x02; ++ nfc_op->page_cnt = phytium_nand_get_ecc_total(mtd, &chip->ecc); ++ nfc_op->cmd_ctrl.nfc_ctrl.nc = 0; ++ nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; ++ nfc_op->cmd_ctrl.nfc_ctrl.ecc_en = 1; ++ for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) ++ nfc_op->mem_addr_first[i] = ++ ((nfc->dma_phy_addr + mtd->writesize + ecc_offset) >> (8 * i)) & 0xFF; ++ ++ /* when enable ECC, must offer ecc_offset of oob, but no oobdata */ ++ nfc_op--; ++ phytium_nfc_prepare_cmd2(chip, nfc_op, direction, 2); ++ phytium_nfc_send_cmd2(chip, nfc_op, 2); ++ cond_delay(nfc_op->cle_ale_delay_ns); ++ ++ ret = phytium_nfc_wait_op(chip, nfc_op->rdy_timeout_ms); ++ if (ret) ++ goto out; ++ ++ cond_delay(nfc_op->rdy_delay_ns); ++out: ++ return ret; ++} ++ ++static int phytium_nfc_hw_ecc_bch_read_page_raw(struct mtd_info *mtd, ++ struct nand_chip *chip, ++ u8 *buf, int oob_required, ++ int page) ++{ ++ u32 oob_len = oob_required ? mtd->oobsize : 0; ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ int ret; ++ ++ ret = phytium_nand_page_read(mtd, chip, buf, NULL, 0, page, true); ++ if (oob_required) ++ ret = phytium_nand_oob_read(mtd, chip, NULL, chip->oob_poi, ++ oob_len, page, true); ++ ++ phytium_nfc_data_dump(nfc, buf, mtd->writesize); ++ ++ return ret; ++} ++ ++static int phytium_nfc_hw_ecc_bch_read_oob_raw(struct mtd_info *mtd, ++ struct nand_chip *chip, int page) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ int ret; ++ ++ /* Invalidate page cache */ ++ chip->pagebuf = -1; ++ memset(chip->oob_poi, 0xFF, mtd->oobsize); ++ ++ ret = phytium_nand_oob_read(mtd, chip, NULL, chip->oob_poi, ++ mtd->oobsize, page, true); ++ ++ phytium_nfc_data_dump(nfc, chip->oob_poi, mtd->oobsize); ++ ++ return ret; ++} ++ ++static int phytium_nfc_hw_ecc_bch_read_page(struct mtd_info *mtd, ++ struct nand_chip *chip, ++ u8 *buf, int oob_required, ++ int page) ++{ ++ int ret; ++ u32 oob_len = oob_required ? mtd->oobsize : 0; ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ struct phytium_nand_chip *phytium_nand = NULL; ++ ++ phytium_nand = to_phytium_nand(chip); ++ ++ phytium_nfc_enable_hw_ecc(chip); ++ ret = phytium_nand_page_read_hwecc(mtd, chip, buf, NULL, ++ 0, page, true); ++ phytium_nfc_disable_hw_ecc(chip); ++ ++ if (oob_required) { ++ oob_len = mtd->oobsize; ++ ret = phytium_nand_oob_read(mtd, chip, NULL, chip->oob_poi, ++ oob_len, page, true); ++ } ++ ++ phytium_nfc_data_dump(nfc, buf, mtd->writesize); ++ ++ return ret; ++} ++ ++static int phytium_nfc_hw_ecc_bch_read_oob(struct mtd_info *mtd, ++ struct nand_chip *chip, ++ int page) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ u32 oob_len = mtd->oobsize; ++ int ret; ++ ++ ret = phytium_nand_oob_read(mtd, chip, NULL, chip->oob_poi, ++ oob_len, page, true); ++ ++ phytium_nfc_data_dump(nfc, chip->oob_poi, oob_len); ++ ++ return ret; ++} ++ ++static int phytium_nfc_hw_ecc_bch_write_page_raw(struct mtd_info *mtd, ++ struct nand_chip *chip, ++ const u8 *buf, ++ int oob_required, int page) ++{ ++ void *oob_buf = oob_required ? chip->oob_poi : NULL; ++ ++ if (oob_required) ++ phytium_nand_oob_write(mtd, chip, NULL, oob_buf, ++ mtd->oobsize, page, false); ++ ++ return phytium_nand_page_write(mtd, chip, buf, NULL, ++ 0, page, false); ++} ++ ++static int phytium_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd, ++ struct nand_chip *chip, ++ const u8 *buf, ++ int oob_required, int page) ++{ ++ int ret; ++ void *oob_buf = oob_required ? chip->oob_poi : NULL; ++ u32 oob_len; ++ ++ if (oob_required) { ++ oob_len = mtd->oobsize; ++ phytium_nand_oob_write(mtd, chip, NULL, oob_buf, ++ oob_len, page, false); ++ } ++ ++ phytium_nfc_enable_hw_ecc(chip); ++ ret = phytium_nand_page_write_hwecc(mtd, chip, buf, NULL, ++ 0, page, false); ++ phytium_nfc_disable_hw_ecc(chip); ++ ++ return ret; ++} ++ ++static int phytium_nfc_hw_ecc_bch_write_oob_raw(struct mtd_info *mtd, ++ struct nand_chip *chip, int page) ++{ ++ return phytium_nand_oob_write(mtd, chip, NULL, chip->oob_poi, ++ mtd->oobsize, page, false); ++} ++ ++static int phytium_nfc_hw_ecc_bch_write_oob(struct mtd_info *mtd, ++ struct nand_chip *chip, int page) ++{ ++ struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); ++ u32 oob_len = mtd->oobsize - phytium_nand->ecc.length; ++ ++ return phytium_nand_oob_write(mtd, chip, NULL, chip->oob_poi, ++ oob_len, page, false); ++} ++ ++static int phytium_nand_hw_ecc_ctrl_init(struct mtd_info *mtd, ++ struct nand_ecc_ctrl *ecc) ++{ ++ struct nand_chip *chip = mtd_to_nand(mtd); ++ ++ if ((mtd->writesize + mtd->oobsize > MAX_CHUNK_SIZE)) ++ return -ENOTSUPP; ++ ++ chip->ecc.algo = NAND_ECC_BCH; ++ ecc->read_page_raw = phytium_nfc_hw_ecc_bch_read_page_raw; ++ ecc->read_page = phytium_nfc_hw_ecc_bch_read_page; ++ ecc->read_oob_raw = phytium_nfc_hw_ecc_bch_read_oob_raw; ++ ecc->read_oob = phytium_nfc_hw_ecc_bch_read_oob; ++ ecc->write_page_raw = phytium_nfc_hw_ecc_bch_write_page_raw; ++ ecc->write_page = phytium_nfc_hw_ecc_bch_write_page; ++ ecc->write_oob_raw = phytium_nfc_hw_ecc_bch_write_oob_raw; ++ ecc->write_oob = phytium_nfc_hw_ecc_bch_write_oob; ++ ++ return 0; ++} ++ ++static int phytium_nand_ecc_init(struct mtd_info *mtd, ++ struct nand_ecc_ctrl *ecc) ++{ ++ struct nand_chip *chip = mtd_to_nand(mtd); ++ int ret = 0; ++ ++ if (ecc->mode != NAND_ECC_NONE && (!ecc->size || !ecc->strength)) { ++ if (chip->ecc_step_ds && chip->ecc_strength_ds) { ++ ecc->size = chip->ecc_step_ds; ++ ecc->strength = chip->ecc_strength_ds; ++ } else { ++ ecc->size = 512; ++ ecc->strength = 1; ++ } ++ } ++ ++ mtd_set_ooblayout(mtd, &phytium_nand_ooblayout_ops); ++ ++ switch (ecc->mode) { ++ case NAND_ECC_HW: ++ ret = phytium_nand_hw_ecc_ctrl_init(mtd, ecc); ++ break; ++ case NAND_ECC_NONE: ++ ecc->read_page_raw = phytium_nfc_hw_ecc_bch_read_page_raw; ++ ecc->read_oob_raw = phytium_nfc_hw_ecc_bch_read_oob; ++ ecc->write_page_raw = phytium_nfc_hw_ecc_bch_write_page_raw; ++ ecc->write_oob_raw = phytium_nfc_hw_ecc_bch_write_oob_raw; ++ ecc->read_page = ecc->read_page_raw; ++ ecc->read_oob = ecc->read_oob_raw; ++ ecc->write_page = ecc->write_page_raw; ++ ecc->write_oob = ecc->write_oob_raw; ++ break; ++ case NAND_ECC_SOFT: ++ case NAND_ECC_ON_DIE: ++ default: ++ ret = -EINVAL; ++ break; ++ } ++ ++ return ret; ++} ++ ++static u8 bbt_pattern[] = {'P', 'H', 'Y', 'b', 't', '0' }; ++static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'Y', 'H', 'P' }; ++ ++static struct nand_bbt_descr bbt_main_descr = { ++ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | ++ NAND_BBT_2BIT | NAND_BBT_VERSION, ++ .offs = 8, ++ .len = 6, ++ .veroffs = 14, ++ .maxblocks = 8, /* Last 8 blocks in each chip */ ++ .pattern = bbt_pattern ++}; ++ ++static struct nand_bbt_descr bbt_mirror_descr = { ++ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | ++ NAND_BBT_2BIT | NAND_BBT_VERSION, ++ .offs = 8, ++ .len = 6, ++ .veroffs = 14, ++ .maxblocks = 8, /* Last 8 blocks in each chip */ ++ .pattern = bbt_mirror_pattern ++}; ++ ++static int phytium_nand_attach_chip(struct nand_chip *chip) ++{ ++ struct mtd_info *mtd = nand_to_mtd(chip); ++ struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ int ret = 0; ++ ++ if (nfc->caps->flash_bbt) ++ chip->bbt_options |= NAND_BBT_USE_FLASH; ++ ++ if (chip->bbt_options & NAND_BBT_USE_FLASH) { ++ /* ++ * We'll use a bad block table stored in-flash and don't ++ * allow writing the bad block marker to the flash. ++ */ ++ chip->bbt_options |= NAND_BBT_NO_OOB_BBM; ++ chip->bbt_td = &bbt_main_descr; ++ chip->bbt_md = &bbt_mirror_descr; ++ } ++ ++ if (chip->options & NAND_BUSWIDTH_16) ++ phytium_nand->ndcr |= NDCR0_WIDTH; ++ ++ /* ++ * On small page NANDs, only one cycle is needed to pass the ++ * column address. ++ */ ++ if (mtd->writesize <= 512) ++ phytium_nand->addr_cyc = 1; ++ else ++ phytium_nand->addr_cyc = 2; ++ ++ /* ++ * Now add the number of cycles needed to pass the row ++ * address. ++ * ++ * Addressing a chip using CS 2 or 3 should also need the third row ++ * cycle but due to inconsistance in the documentation and lack of ++ * hardware to test this situation, this case is not supported. ++ */ ++ if (chip->options & NAND_ROW_ADDR_3) ++ phytium_nand->addr_cyc += 3; ++ else ++ phytium_nand->addr_cyc += 2; ++ ++ if (nfc->caps) { ++ if (chip->ecc.mode == NAND_ECC_HW) { ++ chip->ecc.size = nfc->caps->ecc_step_size; ++ chip->ecc.strength = nfc->caps->ecc_strength; ++ chip->ecc.bytes = 7; ++ } else { ++ chip->ecc.size = 512; ++ chip->ecc.strength = 1; ++ chip->ecc.bytes = 0; ++ } ++ chip->ecc.mode = NAND_ECC_HW; ++ } ++ ++ if (chip->ecc.strength == 0x04) ++ phytium_nand->ndcr |= NDCR0_ECC_STREN(4); ++ else if (chip->ecc.strength == 0x02) ++ phytium_nand->ndcr |= NDCR0_ECC_STREN(2); ++ else ++ phytium_nand->ndcr |= NDCR0_ECC_STREN(0); ++ ++ ret = phytium_nand_ecc_init(mtd, &chip->ecc); ++ if (ret) { ++ dev_err(nfc->dev, "ECC init failed: %d\n", ret); ++ goto out; ++ } ++ ++ /* ++ * Subpage write not available with hardware ECC, prohibit also ++ * subpage read as in userspace subpage access would still be ++ * allowed and subpage write, if used, would lead to numerous ++ * uncorrectable ECC errors. ++ */ ++ if (chip->ecc.mode == NAND_ECC_HW) ++ chip->options |= NAND_NO_SUBPAGE_WRITE; ++ ++ /* ++ * We keep the MTD name unchanged to avoid breaking platforms ++ * where the MTD cmdline parser is used and the bootloader ++ * has not been updated to use the new naming scheme. ++ */ ++ if (nfc->caps->legacy_of_bindings) ++ mtd->name = "x100_nand-0"; ++ ++out: ++ return ret; ++} ++ ++static const struct nand_controller_ops phytium_nand_controller_ops = { ++ .attach_chip = phytium_nand_attach_chip, ++}; ++ ++static void phytium_nand_chips_cleanup(struct phytium_nfc *nfc) ++{ ++ struct phytium_nand_chip *entry, *temp; ++ ++ list_for_each_entry_safe(entry, temp, &nfc->chips, node) { ++ nand_release(nand_to_mtd(&entry->chip)); ++ list_del(&entry->node); ++ } ++} ++ ++static int phytium_nfc_init_dma(struct phytium_nfc *nfc) ++{ ++ int ret; ++ ++ ret = dma_set_mask_and_coherent(nfc->dev, DMA_BIT_MASK(64)); ++ if (ret) ++ return ret; ++ ++ nfc->dsp_addr = dma_alloc_coherent(nfc->dev, PAGE_SIZE, ++ &nfc->dsp_phy_addr, GFP_KERNEL | GFP_DMA); ++ if (!nfc->dsp_addr) ++ return -ENOMEM; ++ ++ nfc->dma_buf = dma_alloc_coherent(nfc->dev, MAX_CHUNK_SIZE, ++ &nfc->dma_phy_addr, GFP_KERNEL | GFP_DMA); ++ if (!nfc->dma_buf) ++ return -ENOMEM; ++ ++ dev_info(nfc->dev, "NFC address dsp_phy_addr:%llx, dma_phy_addr:%llx\n", ++ nfc->dsp_phy_addr, nfc->dma_phy_addr); ++ ++ return 0; ++} ++ ++int phytium_nfc_init(struct phytium_nfc *nfc) ++{ ++ u32 value; ++ ++ nfc->inter_mode = ASYN_SDR; ++ nfc->timing_mode = ASY_MODE0; ++ ++ phytium_write(nfc, NDCR1, NDCR1_SAMPL_PHASE(1)); ++ phytium_write(nfc, ND_INTERVAL_TIME, 1); ++ phytium_write(nfc, NDFIFO_LEVEL0, 4); ++ phytium_write(nfc, NDFIFO_LEVEL1, 4); ++ phytium_write(nfc, NDFIFO_CLR, 1); ++ phytium_write(nfc, ND_ERR_CLR, 1); ++ ++ /* Configure the DMA */ ++ phytium_nfc_init_dma(nfc); ++ ++ phytium_write(nfc, NDCR0, ++ NDCR0_IN_MODE(nfc->inter_mode) | NDCR0_ECC_STREN(4)); ++ ++ phytium_nfc_reset(nfc); ++ ++ value = phytium_read(nfc, NDCR0); ++ phytium_write(nfc, NDCR0, value | NDCR0_EN); ++ ++ nfc_ecc_errover = 0; ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(phytium_nfc_init); ++ ++static int phytium_nfc_setup_data_interface(struct mtd_info *mtd, int chipnr, ++ const struct nand_data_interface ++ *conf) ++{ ++ struct nand_chip *chip = mtd_to_nand(mtd); ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ unsigned int period_ns = 2; ++ const struct nand_sdr_timings *sdr; ++ struct phytium_nfc_timings nfc_tmg; ++ int read_delay; ++ ++ sdr = nand_get_sdr_timings(conf); ++ if (IS_ERR(sdr)) ++ return PTR_ERR(sdr); ++ ++ nfc_tmg.tRP = TO_CYCLES(DIV_ROUND_UP(sdr->tRC_min, 2), period_ns) - 1; ++ nfc_tmg.tRH = nfc_tmg.tRP; ++ nfc_tmg.tWP = TO_CYCLES(DIV_ROUND_UP(sdr->tWC_min, 2), period_ns) - 1; ++ nfc_tmg.tWH = nfc_tmg.tWP; ++ nfc_tmg.tCS = TO_CYCLES(sdr->tCS_min, period_ns); ++ nfc_tmg.tCH = TO_CYCLES(sdr->tCH_min, period_ns) - 1; ++ nfc_tmg.tADL = TO_CYCLES(sdr->tADL_min, period_ns); ++ dev_info(nfc->dev, "[nfc_tmg]tRP: %d, tRH:%d, tWP:%d tWH:%d\n", ++ nfc_tmg.tRP, nfc_tmg.tRH, nfc_tmg.tWP, nfc_tmg.tWH); ++ dev_info(nfc->dev, "[nfc_tmg]tCS: %d, tCH:%d, tADL:%d\n", ++ nfc_tmg.tCS, nfc_tmg.tCH, nfc_tmg.tADL); ++ ++ read_delay = sdr->tRC_min >= 30000 ? ++ MIN_RD_DEL_CNT : MIN_RD_DEL_CNT + nfc_tmg.tRH; ++ ++ nfc_tmg.tAR = TO_CYCLES(sdr->tAR_min, period_ns); ++ nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min), ++ period_ns) - 2, ++ nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min), ++ period_ns); ++ dev_info(nfc->dev, "[nfc_tmg]tAR: %d, tWHR:%d, tRHW:%d\n", ++ nfc_tmg.tAR, nfc_tmg.tWHR, nfc_tmg.tRHW); ++ ++ nfc_tmg.tR = TO_CYCLES(sdr->tWB_max, period_ns); ++ ++ if (chipnr < 0) ++ return 0; ++ ++ if (nfc_tmg.tWP > 0x10) ++ nfc->timing_mode = ASY_MODE1; ++ else if (nfc_tmg.tWP < 0x0D) ++ nfc->timing_mode = ASY_MODE3; ++ ++ if (nfc->inter_mode == ONFI_DDR) ++ nfc->timing_mode = SYN_MODE3; ++ ++ phytium_nfc_default_data_interface(nfc); ++ ++ return 0; ++} ++ ++static int phytium_nand_chip_init(struct phytium_nfc *nfc) ++{ ++ struct device *dev = nfc->dev; ++ struct phytium_nand_chip *phytium_nand; ++ struct mtd_info *mtd; ++ struct nand_chip *chip; ++ int ret; ++ ++ /* Alloc the nand chip structure */ ++ phytium_nand = devm_kzalloc(dev, sizeof(*phytium_nand), GFP_KERNEL); ++ if (!phytium_nand) ++ return -ENOMEM; ++ ++ phytium_nand->nsels = 1; ++ phytium_nand->selected_die = -1; ++ ++ chip = &phytium_nand->chip; ++ chip->controller = &nfc->controller; ++ chip->exec_op = phytium_nfc_exec_op; ++ chip->select_chip = phytium_nfc_select_chip; ++ chip->setup_data_interface = phytium_nfc_setup_data_interface; ++ phytium_nfc_default_data_interface(nfc); ++ ++ mtd = nand_to_mtd(chip); ++ mtd->dev.parent = dev; ++ mtd->owner = THIS_MODULE; ++ ++ /* ++ * Default to HW ECC engine mode. If the nand-ecc-mode property is given ++ * in the DT node, this entry will be overwritten in nand_scan_ident(). ++ */ ++ chip->ecc.mode = NAND_ECC_HW; ++ ++ chip->options |= NAND_BUSWIDTH_AUTO; ++ chip->options |= NAND_SKIP_BBTSCAN; ++ chip->bbt_options |= NAND_BBT_NO_OOB; ++ ++ ret = nand_scan(mtd, phytium_nand->nsels); ++ if (ret) { ++ dev_err(dev, "could not scan the nand chip\n"); ++ goto out; ++ } ++ ++ ret = mtd_device_register(mtd, nfc->caps->parts, nfc->caps->nr_parts - 1); ++ if (ret) { ++ dev_err(dev, "failed to register mtd device: %d\n", ret); ++ nand_release(mtd); ++ return ret; ++ } ++ ++ phytium_nand->ecc.length = phytium_nand_get_ecc_total(mtd, &chip->ecc); ++ phytium_nand->ecc.offset = mtd->oobsize - phytium_nand->ecc.length; ++ chip->ecc.total = phytium_nand_get_ecc_total(mtd, &chip->ecc); ++ ++ mtd_ooblayout_ecc(mtd, 0, &phytium_nand->ecc); ++ ++ dev_info(dev, "ooblayout ecc offset: %x, length: %x\n", ++ phytium_nand->ecc.offset, phytium_nand->ecc.length); ++ ++out: ++ list_add_tail(&phytium_nand->node, &nfc->chips); ++ return 0; ++} ++ ++int phytium_nand_init(struct phytium_nfc *nfc) ++{ ++ int ret; ++ ++ nand_controller_init(&nfc->controller); ++ nfc->controller.ops = &phytium_nand_controller_ops; ++ INIT_LIST_HEAD(&nfc->chips); ++ ++ init_completion(&nfc->complete); ++ ++ /* Init the controller and then probe the chips */ ++ ret = phytium_nfc_init(nfc); ++ if (ret) ++ goto out; ++ ++ ret = devm_request_irq(nfc->dev, nfc->irq, phytium_nfc_isr, ++ IRQF_SHARED, "phytium-nfc", nfc); ++ if (ret) ++ goto out; ++ ++ INIT_WORK(&nfc->work, nfc_irq_callback); ++ ++ ret = phytium_nand_chip_init(nfc); ++ if (ret) ++ goto out; ++ ++ spin_lock_init(&nfc->spinlock); ++ ++out: ++ return ret; ++} ++EXPORT_SYMBOL_GPL(phytium_nand_init); ++ ++int phytium_nand_remove(struct phytium_nfc *nfc) ++{ ++ phytium_nand_chips_cleanup(nfc); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(phytium_nand_remove); ++ ++static int phytium_nfc_wait_ndrun(struct nand_chip *chip) ++{ ++ struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); ++ int ret = 0; ++ u32 val; ++ ++ ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val, ++ (val & NDSR_RB) == 0, ++ 0, 100 * 1000); ++ if (ret) { ++ dev_err(nfc->dev, "Timeout on NAND controller run mode\n"); ++ ret = -EAGAIN; ++ } ++ ++ return ret; ++} ++ ++int phytium_nand_prepare(struct phytium_nfc *nfc) ++{ ++ struct phytium_nand_chip *chip = NULL; ++ ++ list_for_each_entry(chip, &nfc->chips, node) ++ phytium_nfc_wait_ndrun(&chip->chip); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(phytium_nand_prepare); ++ ++int phytium_nand_resume(struct phytium_nfc *nfc) ++{ ++ nfc->selected_chip = NULL; ++ phytium_nfc_init(nfc); ++ phytium_nfc_default_data_interface(nfc); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(phytium_nand_resume); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Phytium NAND controller driver"); ++MODULE_AUTHOR("Zhu Mingshuai "); +diff --git a/drivers/mtd/nand/raw/phytium_nand.h b/drivers/mtd/nand/raw/phytium_nand.h +new file mode 100644 +index 000000000000..8a1c6d682c65 +--- /dev/null ++++ b/drivers/mtd/nand/raw/phytium_nand.h +@@ -0,0 +1,441 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Phytium NAND flash controller driver ++ * ++ * Copyright (C) 2020-2021, Phytium Technology, Co., Ltd. ++ */ ++#ifndef PHYTIUM_NAND_H ++#define PHYTIUM_NAND_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* NFC does not support transfers of larger chunks at a time */ ++#define MAX_PAGE_NUM 16 ++#define MAX_CHUNK_SIZE ((1024 + 76) * 16) ++ ++#define POLL_PERIOD 0 ++#define POLL_TIMEOUT 100000 ++/* Interrupt maximum wait period in ms */ ++#define IRQ_TIMEOUT 1000 ++ ++/* Latency in clock cycles between SoC pins and NFC logic */ ++#define MIN_RD_DEL_CNT 3 ++ ++#define PHYTIUM_NFC_ADDR_MAX_LEN 5 ++#define PHYTIUM_NFC_DSP_SIZE 16 ++ ++/* NAND controller flash control register */ ++#define NDCR0 0x00 ++#define NDCR0_EN BIT(0) ++#define NDCR0_WIDTH BIT(1) ++#define NDCR0_IN_MODE(x) (min_t(u32, x, 0x3) << 2) ++#define NDCR0_ECC_EN BIT(4) ++#define NDCR0_ECC_STREN(x) (min_t(u32, x, 0x7) << 5) ++#define NDCR0_SPARE_EN BIT(8) ++#define NDCR0_SPARE_SIZE(x) (min_t(u32, x, 0xFFF) << 9) ++#define NDCR0_GENERIC_FIELDS_MASK ++ ++#define NDCR1 0x04 ++#define NDCR1_SAMPL_PHASE(x) min_t(u32, x, 0xFFFF) ++ ++#define NDAR0 0x08 ++ ++#define NDAR1 0x0C ++#define NDAR1_H8(x) min_t(u32, x, 0xFF) ++#define NDAR1_DMA_EN BIT(8) ++#define NDAR1_EMPTY(x) (min_t(u32, x, 0x7F) << 9) ++#define NDAR1_DMA_RLEN(x) (min_t(u32, x, 0xFF) << 9) ++#define NDAR1_DMA_WLEN(x) (min_t(u32, x, 0xFF) << 9) ++ ++#define NDTR0 0x10 ++#define NDTR0_TCS_TCLS(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR0_TCLS_TWP(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR1 0x14 ++#define NDTR1_TWH(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR1_TWP(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR2 0x18 ++#define NDTR2_TCH_TCLH(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR2_TCLH_TWH(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR3 0x1c ++#define NDTR3_TDQ_EN(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR3_TCH_TWH(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR4 0x20 ++#define NDTR4_TWHR_SMX(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR4_TREH(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR5 0x24 ++#define NDTR5_TRC(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR5_TADL_SMX(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR6 0x28 ++#define NDTR6_TCAD_TCS_SMX(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR6_RES(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR7 0x2c ++#define NDTR7_TCK(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR7_TDQ_EN(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR8 0x30 ++#define NDTR8_TCAD_TCK_SMX(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR8_HF_TCK(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR9 0x34 ++#define NDTR9_TWHR(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR9_TCCS_TCALS_SMX(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR10 0x38 ++#define NDTR10_TCK(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR10_MTCK(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR11 0x3c ++#define NDTR11_TCK_TCALS(x) (min_t(u32, x, 0xFFFF) << 16) ++#define NDTR11_RES(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR12 0x40 ++#define NDTR12_TWRCK(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR12_RES(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR13 0x44 ++#define NDTR13_TWRHCA(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR13_TRLCA(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR14 0x48 ++#define NDTR14_TWRHCE(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR14_RES(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR15 0x4c ++#define NDTR15_TCDQSS_TWPRE_TDS(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR15_HFTDSC(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR16 0x50 ++#define NDTR16_TWPST_TDH(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR16_TWPSTH(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR17 0x54 ++#define NDTR17_TCS_TRPRE(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR17_TRELDQS(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDTR18 0x58 ++#define NDTR18_TRPST_TDQSRE(x) (min_t(u32, x, 0xFFFF) << 0) ++#define NDTR18_RES(x) (min_t(u32, x, 0xFFFF) << 16) ++ ++#define NDFIFO 0x5c ++#define NDFIFO_REV (min_t(u32, x, 0) << 12) ++#define NDFIFO_FULL BIT(11) ++#define NDFIFO_EMP BIT(10) ++#define NDFIFO_CNT(x) (min_t(u32, x, 0x3F) << 0) ++ ++#define ND_INTERVAL_TIME 0x60 ++#define NDCMD_INTERVAL_TIME 0x64 ++#define NDFIFO_TIMEOUT 0x68 ++#define NDFIFO_LEVEL0 0x6c ++#define NDFIFO_LEVEL1 0x70 ++#define NDWP 0x74 ++#define NDFIFO_CLR 0x78 ++ ++#define NDSR 0x7c ++#define NDSR_BUSY BIT(0) ++#define NDSR_DMA_BUSY BIT(1) ++#define NDSR_DMA_PGFINISH BIT(2) ++#define NDSR_DMA_FINISH BIT(3) ++#define NDSR_FIFO_EMP BIT(4) ++#define NDSR_FIFO_FULL BIT(5) ++#define NDSR_FIFO_TIMEOUT BIT(6) ++#define NDSR_CS(x) (min_t(u32, x, 0xF) << 7) ++#define NDSR_CMD_PGFINISH BIT(11) ++#define NDSR_PG_PGFINISH BIT(12) ++#define NDSR_RE BIT(13) ++#define NDSR_DQS BIT(14) ++#define NDSR_RB BIT(15) ++#define NDSR_ECC_BUSY BIT(16) ++#define NDSR_ECC_FINISH BIT(17) ++#define NDSR_ECC_RIGHT BIT(18) ++#define NDSR_ECC_ERR BIT(19) ++#define NDSR_ECC_ERROVER BIT(20) ++#define NDSR_AXI_DSP_ERR BIT(21) ++#define NDSR_AXI_RD_ERR BIT(22) ++#define NDSR_AXI_WR_ERR BIT(23) ++ ++#define NDIR_MASK 0x80 ++#define NDIR_BUSY_MASK BIT(0) ++#define NDIR_DMA_BUSY_MASK BIT(1) ++#define NDIR_DMA_PGFINISH_MASK BIT(2) ++#define NDIR_DMA_FINISH_MASK BIT(3) ++#define NDIR_FIFO_EMP_MASK BIT(4) ++#define NDIR_FIFO_FULL_MASK BIT(5) ++#define NDIR_FIFO_TIMEOUT_MASK BIT(6) ++#define NDIR_CMD_FINISH_MASK BIT(7) ++#define NDIR_PGFINISH_MASK BIT(8) ++#define NDIR_RE_MASK BIT(9) ++#define NDIR_DQS_MASK BIT(10) ++#define NDIR_RB_MASK BIT(11) ++#define NDIR_ECC_FINISH_MASK BIT(12) ++#define NDIR_ECC_ERR_MASK BIT(13) ++ ++#define NDIR 0x84 ++#define NDIR_ALL_INT GENMASK(13, 0) ++#define NDIR_BUSY BIT(0) ++#define NDIR_DMA_BUSY BIT(1) ++#define NDIR_DMA_PGFINISH BIT(2) ++#define NDIR_DMA_FINISH BIT(3) ++#define NDIR_FIFO_EMP BIT(4) ++#define NDIR_FIFO_FULL BIT(5) ++#define NDIR_FIFO_TIMEOUT BIT(6) ++#define NDIR_CMD_FINISH BIT(7) ++#define NDIR_PGFINISH BIT(8) ++#define NDIR_RE BIT(9) ++#define NDIR_DQS BIT(10) ++#define NDIR_RB BIT(11) ++#define NDIR_ECC_FINISH BIT(12) ++#define NDIR_ECC_ERR BIT(13) ++ ++#define ND_DEBUG 0x88 ++ ++#define ND_ERR_CLR 0x8c ++#define ND_DSP_ERR_CLR BIT(0) ++#define ND_AXI_RD_ERR_CLR BIT(1) ++#define ND_AXI_WR_ERR_CLR BIT(2) ++#define ND_ECC_ERR_CLR BIT(3) ++ ++#define NDCR_PAGE_SZ(x) (x >= 2048 ? BIT(24) : 0) ++ ++enum nand_inter_pro { ++ NAND_ONFI, ++ NAND_JEDEC, ++ NAND_OTHER, ++}; ++ ++enum nand_inter_mode { ++ ASYN_SDR, ++ ONFI_DDR, ++ TOG_ASYN_DDR, ++}; ++ ++enum asy_timing_mode { ++ ASY_MODE0, ++ ASY_MODE1, ++ ASY_MODE2, ++ ASY_MODE3, ++ ASY_MODE4, ++}; ++ ++enum onfi_syn_timing_mode { ++ SYN_MODE0 = 0x10, ++ SYN_MODE1, ++ SYN_MODE2, ++ SYN_MODE3, ++ SYN_MODE4, ++}; ++ ++/** ++ * NAND controller timings expressed in NAND Controller clock cycles ++ * ++ * @tRP: ND_nRE pulse width ++ * @tRH: ND_nRE high duration ++ * @tWP: ND_nWE pulse time ++ * @tWH: ND_nWE high duration ++ * @tCS: Enable signal setup time ++ * @tCH: Enable signal hold time ++ * @tADL: Address to write data delay ++ * @tAR: ND_ALE low to ND_nRE low delay ++ * @tWHR: ND_nWE high to ND_nRE low for status read ++ * @tRHW: ND_nRE high duration, read to write delay ++ * @tR: ND_nWE high to ND_nRE low for read ++ */ ++struct phytium_nfc_timings { ++ u16 tRP; ++ u16 tRH; ++ u16 tWP; /* NDTR1_TWP */ ++ u16 tWH; /* NDTR1_TWH */ ++ u16 tCS; ++ u16 tCH; ++ u16 tADL; ++ u16 tAR; ++ u16 tWHR; ++ u16 tRHW; ++ u16 tR; ++}; ++ ++/** ++ * NAND chip structure: stores NAND chip device related information ++ * ++ * @chip: Base NAND chip structure ++ * @node: Used to store NAND chips into a list ++ * @ndcr: Controller register value for this NAND chip ++ * @ndtr0: Timing registers 0 value for this NAND chip ++ * @ndtr1: Timing registers 1 value for this NAND chip ++ * @selected_die: Current active CS ++ * @nsels: Number of CS lines required by the NAND chip ++ */ ++struct phytium_nand_chip { ++ struct nand_chip chip; ++ struct list_head node; ++ u32 ndcr; ++ u32 ndtr0; ++ u32 ndtr1; ++ int addr_cyc; ++ int selected_die; ++ unsigned int nsels; ++ struct mtd_oob_region ecc; ++}; ++ ++/** ++ * NAND controller capabilities for distinction between compatible strings ++ * ++ * @max_cs_nb: Number of Chip Select lines available ++ * @max_rb_nb: Number of Ready/Busy lines available ++ * @legacy_of_bindings: Indicates if DT parsing must be done using the old ++ * fashion way ++ * @flash_bbt: ++ * @ecc_strength: ++ * @ecc_step_size: ++ * @parts: ++ * @nr_parts: ++ */ ++struct phytium_nfc_caps { ++ unsigned int max_cs_nb; ++ unsigned int max_rb_nb; ++ bool legacy_of_bindings; ++ bool flash_bbt; ++ int ecc_strength; ++ int ecc_step_size; ++ struct mtd_partition *parts; ++ unsigned int nr_parts; ++}; ++ ++/** ++ * NAND controller structure: stores Marvell NAND controller information ++ * ++ * @controller: Base controller structure ++ * @dev: Parent device (used to print error messages) ++ * @regs: NAND controller registers ++ * @reg_clk: Regiters clock ++ * @complete: Completion object to wait for NAND controller events ++ * @chips: List containing all the NAND chips attached to ++ * this NAND controller ++ * @caps: NAND controller capabilities for each compatible string ++ * @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only) ++ */ ++struct phytium_nfc { ++ struct nand_controller controller; ++ struct device *dev; ++ void __iomem *regs; ++ int irq; ++ struct completion complete; ++ struct list_head chips; ++ struct nand_chip *selected_chip; ++ struct phytium_nfc_caps *caps; ++ ++ void *dsp_addr; ++ dma_addr_t dsp_phy_addr; ++ ++ void *dma_buf; ++ u32 dma_offset; ++ dma_addr_t dma_phy_addr; ++ ++ enum nand_inter_pro inter_pro; ++ enum nand_inter_mode inter_mode; ++ u32 timing_mode; ++ ++ spinlock_t spinlock; ++ struct work_struct work; ++}; ++ ++/** ++ * Derives a duration in numbers of clock cycles. ++ * ++ * @ps: Duration in pico-seconds ++ * @period_ns: Clock period in nano-seconds ++ * ++ * Convert the duration in nano-seconds, then divide by the period and ++ * return the number of clock periods. ++ */ ++#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns)) ++#define TO_CYCLES64(ps, period_ns) (DIV_ROUND_UP_ULL(div_u64(ps, 1000), \ ++ period_ns)) ++ ++struct phytium_nfc_cmd_ctrl { ++ u16 csel:4; ++ u16 dbc:1; ++ u16 addr_cyc:3; ++ u16 nc:1; ++#define TYPE_RESET 0x00 ++#define TYPE_SET_FTR 0x01 ++#define TYPE_GET_FTR 0x02 ++#define TYPE_READ_ID 0x03 ++#define TYPE_PAGE_PRO 0x04 ++#define TYPE_ERASE 0x05 ++#define TYPE_READ 0x06 ++#define TYPE_TOGGLE 0x07 ++#define TYPE_READ_PARAM 0x02 ++#define TYPE_READ_STATUS 0x03 ++#define TYPE_CH_READ_COL 0x03 ++#define TYPE_CH_ROW_ADDR 0x01 ++#define TYPE_CH_WR_COL 0x01 ++ u16 cmd_type:4; ++ u16 dc:1; ++ u16 auto_rs:1; ++ u16 ecc_en:1; ++}; ++ ++/** ++ * NAND driver structure filled during the parsing of the ->exec_op() subop ++ * subset of instructions. ++ * ++ * @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle ++ * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin ++ * @rdy_delay_ns: Optional delay after waiting for the RB pin ++ * @data_delay_ns: Optional delay after the data xfer ++ * @data_instr_idx: Index of the data instruction in the subop ++ * @data_instr: Pointer to the data instruction in the subop ++ */ ++struct phytium_nfc_op { ++ u8 cmd[2]; ++ union { ++ u16 ctrl; ++ struct phytium_nfc_cmd_ctrl nfc_ctrl; ++ } cmd_ctrl; ++ u8 addr[PHYTIUM_NFC_ADDR_MAX_LEN]; ++ u16 page_cnt; ++ u8 mem_addr_first[PHYTIUM_NFC_ADDR_MAX_LEN]; ++ ++ u32 cmd_len; ++ u32 addr_len; ++ ++ u32 cle_ale_delay_ns; ++ u32 rdy_timeout_ms; ++ u32 rdy_delay_ns; ++ u32 data_delay_ns; ++ u32 data_instr_idx; ++ struct nand_op_instr *data_instr; ++} __attribute__ ((__packed__)); ++ ++#define TIMING_ASY_NUM 12 ++#define TIMING_SYN_NUM 14 ++#define TIMING_TOG_NUM 12 ++ ++#define TMP_DMA_DEBUG 0 /* Temporary dma space */ ++ ++int phytium_nand_init(struct phytium_nfc *nfc); ++int phytium_nand_remove(struct phytium_nfc *nfc); ++int phytium_nand_prepare(struct phytium_nfc *nfc); ++int phytium_nand_suspend(struct phytium_nfc *nfc); ++int phytium_nand_resume(struct phytium_nfc *nfc); ++ ++#endif /* NAND_PHYTIUM_NAND_H */ +diff --git a/drivers/mtd/nand/raw/phytium_nand_pci.c b/drivers/mtd/nand/raw/phytium_nand_pci.c +new file mode 100644 +index 000000000000..4f614910f34d +--- /dev/null ++++ b/drivers/mtd/nand/raw/phytium_nand_pci.c +@@ -0,0 +1,149 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * PCI driver for Phytium NAND flash controller ++ * ++ * Copyright (C) 2021, Phytium Technology Co., Ltd. ++ */ ++#include ++#include ++#include ++ ++#include "phytium_nand.h" ++ ++#define DRV_NAME "phytium_nand_pci" ++ ++static struct mtd_partition partition_info[] = { ++ { ++ .name = "Flash partition 1", ++ .offset = 0x0000000, ++ .size = 0x4000000 }, ++ { ++ .name = "Flash partition 2", ++ .offset = 0x4000000, ++ .size = 0x8000000 }, ++ { ++ .name = "Flash partition 3", ++ .offset = 0x8000000, ++ .size = 0x10000000 }, ++ { ++ .name = "Flash partition 4", ++ .offset = 0x10000000, ++ .size = 0x12000000 }, ++ { ++ .name = "Flash partition 5", ++ .offset = 0x12000000, ++ .size = 0x14000000 }, ++}; ++ ++static struct phytium_nfc_caps x100_nfc_caps = { ++ .max_cs_nb = 2, ++ .max_rb_nb = 1, ++ .legacy_of_bindings = true, ++ .ecc_strength = 4, ++ .ecc_step_size = 512, ++ .nr_parts = 5, ++ .parts = partition_info, ++}; ++ ++static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) ++{ ++ struct phytium_nfc *nfc; ++ int ret; ++ ++ ret = pcim_enable_device(pdev); ++ if (ret) ++ return ret; ++ ++ ret = pcim_iomap_regions(pdev, 0x1, pci_name(pdev)); ++ if (ret) { ++ dev_err(&pdev->dev, "I/O memory remapping failed\n"); ++ return ret; ++ } ++ ++ pci_set_master(pdev); ++ pci_try_set_mwi(pdev); ++ ++ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); ++ if (ret) ++ return ret; ++ ++ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); ++ if (ret) ++ return ret; ++ ++ nfc = devm_kzalloc(&pdev->dev, sizeof(struct phytium_nfc), ++ GFP_KERNEL); ++ if (!nfc) ++ return -ENOMEM; ++ ++ nfc->dev = &pdev->dev; ++ nfc->regs = pcim_iomap_table(pdev)[0]; ++ nfc->irq = pdev->irq; ++ nfc->caps = &x100_nfc_caps; ++ ++ ret = phytium_nand_init(nfc); ++ if (ret) ++ return ret; ++ ++ pci_set_drvdata(pdev, nfc); ++ ++ return ret; ++} ++ ++static void phytium_pci_remove(struct pci_dev *pdev) ++{ ++ struct phytium_nfc *nfc = pci_get_drvdata(pdev); ++ int ret; ++ ++ ret = phytium_nand_remove(nfc); ++ if (ret) ++ dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret); ++} ++ ++static int __maybe_unused phytium_nfc_prepare(struct device *dev) ++{ ++ struct pci_dev *pci = to_pci_dev(dev); ++ struct phytium_nfc *nfc = pci_get_drvdata(pci); ++ int ret; ++ ++ ret = phytium_nand_prepare(nfc); ++ ++ return 0; ++} ++ ++static int __maybe_unused phytium_nfc_resume(struct device *dev) ++{ ++ struct pci_dev *pci = to_pci_dev(dev); ++ struct phytium_nfc *nfc = pci_get_drvdata(pci); ++ int ret; ++ ++ ret = phytium_nand_resume(nfc); ++ ++ return ret; ++} ++ ++static const struct dev_pm_ops phytium_pci_dev_pm_ops = { ++ .prepare = phytium_nfc_prepare, ++ .resume = phytium_nfc_resume, ++}; ++ ++static const struct pci_device_id phytium_pci_id_table[] = { ++ { PCI_VDEVICE(PHYTIUM, 0xdc29) }, ++ { } ++}; ++MODULE_DEVICE_TABLE(pci, phytium_pci_id_table); ++ ++static struct pci_driver phytium_pci_driver = { ++ .name = DRV_NAME, ++ .id_table = phytium_pci_id_table, ++ .probe = phytium_pci_probe, ++ .remove = phytium_pci_remove, ++ .driver = { ++ .pm = &phytium_pci_dev_pm_ops, ++ }, ++}; ++module_pci_driver(phytium_pci_driver); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("PCI driver for Phytium NAND controller"); ++MODULE_AUTHOR("Zhu Mingshuai "); +diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig +index 6cc9c929ff57..9c2e9e8509b7 100644 +--- a/drivers/mtd/spi-nor/Kconfig ++++ b/drivers/mtd/spi-nor/Kconfig +@@ -7,6 +7,15 @@ menuconfig MTD_SPI_NOR + + if MTD_SPI_NOR + ++config SPI_PHYTIUM_QUADSPI ++ tristate "Phytium Quad SPI Controller" ++ depends on ARCH_PHYTIUM || ARM ++ depends on OF && HAS_IOMEM ++ help ++ This enables support for the Quad SPI controller in master mode. ++ This driver does not support generic SPI. The implementation only ++ supports SPI NOR. ++ + config MTD_MT81xx_NOR + tristate "Mediatek MT81xx SPI NOR flash controller" + depends on HAS_IOMEM +diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile +index f4c61d282abd..ebc8ce095bd0 100644 +--- a/drivers/mtd/spi-nor/Makefile ++++ b/drivers/mtd/spi-nor/Makefile +@@ -1,5 +1,6 @@ + # SPDX-License-Identifier: GPL-2.0 + obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o ++obj-$(CONFIG_SPI_PHYTIUM_QUADSPI) += phytium-quadspi.o + obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o + obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o + obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o +diff --git a/drivers/mtd/spi-nor/phytium-quadspi.c b/drivers/mtd/spi-nor/phytium-quadspi.c +new file mode 100644 +index 000000000000..15502ecc295a +--- /dev/null ++++ b/drivers/mtd/spi-nor/phytium-quadspi.c +@@ -0,0 +1,1006 @@ ++/* ++ * Phytium SPI core controller driver. ++ * ++ * Copyright (c) 2019, Phytium Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms and conditions of the GNU General Public License, ++ * version 2, as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define QSPI_FLASH_CAP_REG 0x000 ++#define QSPI_RD_CFG_REG 0x004 ++#define QSPI_WR_CFG_REG 0x008 ++#define QSPI_FLUSH_REG 0x00C ++#define QSPI_CMD_PORT_REG 0x010 ++#define QSPI_ADDR_PORT_REG 0x014 ++#define QSPI_HD_PORT_REG 0x018 ++#define QSPI_LD_PORT_REG 0x01C ++#define QSPI_FUN_SET_REG 0x020 ++#define QSPI_WIP_REG 0x024 ++#define QSPI_WP_REG 0x028 ++#define QSPI_MODE_REG 0x02C ++ ++#define QSPI_FLASH_CAP_NUM_SHIFT 3 ++#define QSPI_FLASH_CAP_NUM_MASK (0x3 << QSPI_FLASH_CAP_NUM_SHIFT) ++#define QSPI_FLASH_CAP_CAP_SHIFT 0 ++#define QSPI_FLASH_CAP_CAP_MASK (0x7 << QSPI_FLASH_CAP_CAP_SHIFT) ++ ++#define QSPI_RD_CFG_RD_CMD_SHIFT 24 ++#define QSPI_RD_CFG_RD_CMD_MASK (0xFF << QSPI_RD_CFG_RD_CMD_SHIFT) ++#define QSPI_RD_CFG_RD_THROUGH_SHIFT 23 ++#define QSPI_RD_CFG_RD_THROUGH_MASK (0x01 << QSPI_RD_CFG_RD_THROUGH_SHIFT) ++#define QSPI_RD_CFG_RD_TRANSFER_SHIFT 20 ++#define QSPI_RD_CFG_RD_TRANSFER_MASK (0x07 << QSPI_RD_CFG_RD_TRANSFER_SHIFT) ++#define QSPI_RD_CFG_RD_ADDR_SEL_SHIFT 19 ++#define QSPI_RD_CFG_RD_ADDR_SEL_MASK (0x1 << QSPI_RD_CFG_RD_ADDR_SEL_SHIFT) ++#define QSPI_RD_CFG_RD_LATENCY_SHIFT 18 ++#define QSPI_RD_CFG_RD_LATENCY_MASK (0x1 << QSPI_RD_CFG_RD_LATENCY_SHIFT) ++#define QSPI_RD_CFG_MODE_BYTE_SHIFT 17 ++#define QSPI_RD_CFG_MODE_BYTE_MASK (0x1 << QSPI_RD_CFG_MODE_BYTE_SHIFT) ++#define QSPI_RD_CFG_CMD_SIGN_SHIFT 9 ++#define QSPI_RD_CFG_CMD_SIGN_MASK (0xFF << QSPI_RD_CFG_CMD_SIGN_SHIFT) ++#define QSPI_RD_CFG_DUMMY_SHIFT 4 ++#define QSPI_RD_CFG_DUMMY_MASK (0x1F << QSPI_RD_CFG_DUMMY_SHIFT) ++#define QSPI_RD_CFG_D_BUFFER_SHIFT 3 ++#define QSPI_RD_CFG_D_BUFFER_MASK (0x1 << QSPI_RD_CFG_D_BUFFER_SHIFT) ++#define QSPI_RD_CFG_RD_SCK_SEL_SHIFT 0 ++#define QSPI_RD_CFG_RD_SCK_SEL_MASK (0x3 << QSPI_RD_CFG_RD_SCK_SEL_SHIFT) ++ ++#define QSPI_WR_CFG_WR_CMD_SHIFT 24 ++#define QSPI_WR_CFG_WR_CMD_MASK (0xFF << QSPI_WR_CFG_WR_CMD_SHIFT) ++#define QSPI_WR_CFG_WR_WAIT_SHIFT 9 ++#define QSPI_WR_CFG_WR_WAIT_MASK (0x01 << QSPI_WR_CFG_WR_WAIT_SHIFT) ++#define QSPI_WR_CFG_WR_THROUGH_SHIFT 8 ++#define QSPI_WR_CFG_WR_THROUGH_MAS (0x01 << QSPI_WR_CFG_WR_THROUGH_SHIFT) ++#define QSPI_WR_CFG_WR_TRANSFER_SHIFT 5 ++#define QSPI_WR_CFG_WR_TRANSFER_MASK (0X7 << QSPI_WR_CFG_WR_TRANSFER_SHIFT) ++#define QSPI_WR_CFG_WR_ADDR_SEL_SHIFT 4 ++#define QSPI_WR_CFG_WR_ADDR_SEL_MASK (0x1 << QSPI_WR_CFG_WR_ADDR_SEL_SHIFT) ++#define QSPI_WR_CFG_WR_MODE_SHIFT 3 ++#define QSPI_WR_CFG_WR_MODE (0x01 << QSPI_WR_CFG_WR_MODE_SHIFT) ++#define QSPI_WR_CFG_WR_SCK_SEL_SHIFT 0 ++#define QSPI_WR_CFG_WR_SCK_SEL_MASK (0x7 << QSPI_WR_CFG_WR_SCK_SEL_SHIFT) ++ ++#define QSPI_FLUSH_EN (0x1 << 0) ++ ++#define QSPI_CMD_PORT_CMD_SHIFT 24 ++#define QSPI_CMD_PORT_CMD_MASK (0xFF << QSPI_CMD_PORT_CMD_SHIFT) ++#define QSPI_CMD_PORT_WAIT_SHIFT 22 ++#define QSPI_CMD_PORT_WAIT_MASK (0x1 << QSPI_CMD_PORT_WAIT_SHIFT) ++#define QSPI_CMD_PORT_THROUGH_SHIFT 21 ++#define QSPI_CMD_PORT_THROUGH_MASK (0x1 << QSPI_CMD_PORT_THROUGH_SHIFT) ++#define QSPI_CMD_PORT_CS_SHIFT 19 ++#define QSPI_CMD_PORT_CS_MASK (0x3 << QSPI_CMD_PORT_CS_SHIFT) ++#define QSPI_CMD_PORT_TRANSFER_SHIFT 16 ++#define QSPI_CMD_PORT_TRANSFER_MASK (0x7 << QSPI_CMD_PORT_TRANSFER_SHIFT) ++#define QSPI_CMD_PORT_CMD_ADDR_SHIFT 15 ++#define QSPI_CMD_PORT_CMD_ADDR_MASK (0x1 << QSPI_CMD_PORT_CMD_ADDR_SHIFT) ++#define QSPI_CMD_PORT_LATENCY_SHIFT 14 ++#define QSPI_CMD_PORT_LATENCY_MASK (0x1 << QSPI_CMD_PORT_LATENCY_SHIFT) ++#define QSPI_CMD_PORT_DATA_TRANSFER_SHIFT 13 ++#define QSPI_CMD_PORT_DATA_TRANSFER_MASK (0x1 << 13) ++#define QSPI_CMD_PORT_SEL_SHIFT 12 ++#define QSPI_CMD_PORT_SEL_MASK (0x1 << QSPI_CMD_PORT_SEL_SHIFT) ++#define QSPI_CMD_PORT_DUMMY_SHIFT 7 ++#define QSPI_CMD_PORT_DUMMY_MASK (0x1F << QSPI_CMD_PORT_DUMMY_SHIFT) ++#define QSPI_CMD_PORT_P_BUFFER_SHIFT 6 ++#define QSPI_CMD_PORT_P_BUFFER_MASK (0x1 << QSPI_CMD_PORT_P_BUFFER_SHIFT) ++#define QSPI_CMD_PORT_RW_NUM_SHIFT 3 ++#define QSPI_CMD_PORT_RW_NUM_MASK (0x7 << QSPI_CMD_PORT_RW_NUM_SHIFT) ++#define QSPI_CMD_PORT_SCK_SEL_SHIFT 0 ++#define QSPI_CMD_PORT_SCK_SEL_MASK (0x7 << QSPI_CMD_PORT_SCK_SEL_SHIFT) ++ ++#define QSPI_FUN_SET_HOLD_SHIFT 24 ++#define QSPI_FUN_SET_HOLD_MASK (0xFF << QSPI_FUN_SET_HOLD_SHIFT) ++#define QSPI_FUN_SET_SETUP_SHIFT 16 ++#define QSPI_FUN_SET_SETUP_MASK (0xFF << QSPI_FUN_SET_SETUP_SHIFT) ++#define QSPI_FUN_SET_DELAY_SHIFT 0 ++#define QSPI_FUN_SET_DELAY_MASK (0xFFFF << QSPI_FUN_SET_DELAY_SHIFT) ++ ++#define QSPI_WIP_W_CMD_SHIFT 24 ++#define QSPI_WIP_W_CMD_MASK (0xFF << QSPI_WIP_W_CMD_SHIFT) ++#define QSPI_WIP_W_TRANSFER_SHIFT 3 ++#define QSPI_WIP_W_TRANSFER_MASK (0x3 << QSPI_WIP_W_TRANSFER_SHIFT) ++#define QSPI_WIP_W_SCK_SEL_SHIFT 0 ++#define QSPI_WIP_W_SCK_SEL_MASK (0x7 << QSPI_WIP_W_SCK_SEL_SHIFT) ++ ++#define QSPI_WP_EN_SHIFT 17 ++#define QSPI_WP_EN_MASK (0x1 << QSPI_WP_EN_SHIFT) ++#define QSPI_WP_IO2_SHIFT 16 ++#define QSPI_WP_IO2_MASK (0x1 << QSPI_WP_IO2_SHIFT) ++#define QSPI_WP_HOLD_SHIFT 8 ++#define QSPI_WP_HOLD_MASK (0xFF << QSPI_WP_HOLD_SHIFT) ++#define QSPI_WP_SETUP_SHIFT 0 ++#define QSPI_WP_SETUP_MASK (0xFF << QSPI_WP_SETUP_SHIFT) ++ ++#define QSPI_MODE_VALID_SHIFT 8 ++#define QSPI_MODE_VALID_MASK (0xFF << QSPI_MODE_VALID_SHIFT) ++#define QSPI_MODE_SHIFT 0 ++#define QSPI_MODE_MASK (0xFF << QSPI_MODE_SHIFT) ++ ++#define FSIZE_VAL(size) (__fls(size) - 1) ++ ++#define PHYTIUM_MAX_MMAP_S SZ_512M ++#define PHYTIUM_MAX_NORCHIP 2 ++ ++#define PHYTIUM_QSPI_FIFO_SZ 32 ++#define PHYTIUM_QSPI_FIFO_TIMEOUT_US 50000 ++#define PHYTIUM_QSPI_BUSY_TIMEOUT_US 100000 ++ ++#define PHYTIUM_SCK_SEL 0x05 ++#define PHYTIUM_CMD_SCK_SEL 0x07 ++ ++#define PHYTIUM_FMODE_MM 0x01 ++#define PHYTIUM_FMODE_IN 0x02 ++ ++/* ++ * the codes of the different commands ++ */ ++#define CMD_WRDI 0x04 ++#define CMD_RDID 0x9F ++#define CMD_RDSR 0x05 ++#define CMD_WREN 0x06 ++#define CMD_RDAR 0x65 ++#define CMD_P4E 0x20 ++#define CMD_4P4E 0x21 ++#define CMD_BE 0x60 ++#define CMD_4BE 0xC7 ++#define CMD_READ 0x03 ++#define CMD_FAST_READ 0x0B ++#define CMD_QOR 0x6B ++#define CMD_QIOR 0xEB ++#define CMD_DDRFR 0x0D ++#define CMD_DDRQIOQ 0xED ++#define CMD_PP 0x02 ++#define CMD_QPP 0x32 ++#define CMD_SE 0xD8 ++#define CMD_4FAST_READ 0x0C ++#define CMD_4READ 0x13 ++#define CMD_4QOR 0x6C ++#define CMD_4QIOR 0xEC ++#define CMD_4DDRFR 0x0E ++#define CMD_4DDRQIOR 0xEE ++#define CMD_4PP 0x12 ++#define CMD_4QPP 0x34 ++#define CMD_4SE 0xDC ++ ++#define PHYTIUM_QSPI_1_1_1 0 ++#define PHYTIUM_QSPI_1_1_2 1 ++#define PHYTIUM_QSPI_1_1_4 2 ++#define PHYTIUM_QSPI_1_2_2 3 ++#define PHYTIUM_QSPI_1_4_4 4 ++#define PHYTIUM_QSPI_2_2_2 5 ++#define PHYTIUM_QSPI_4_4_4 6 ++ ++struct phytium_qspi_flash { ++ struct spi_nor nor; ++ struct phytium_qspi *qspi; ++ u32 cs; ++ u32 fsize; ++ u32 presc; ++ u32 clk_div; ++ u32 read_mode; ++ bool registered; ++ u32 prefetch_limit; ++ u32 addr_width; ++ u32 read_cmd; ++}; ++ ++struct phytium_qspi { ++ struct device *dev; ++ void __iomem *io_base; ++ void __iomem *mm_base; ++ resource_size_t mm_size; ++ u32 nor_num; ++ struct clk *clk; ++ u32 clk_rate; ++ struct phytium_qspi_flash flash[PHYTIUM_MAX_NORCHIP]; ++ ++ spinlock_t spinlock; ++ ++ /* ++ * to protect device configuration, could be different between ++ * 2 flash access (bk1, bk2) ++ */ ++ struct mutex lock; ++}; ++ ++/* Need to enable p_buffer */ ++static int memcpy_from_ftreg(struct phytium_qspi *qspi, u_char *buf, size_t len) ++{ ++ int i; ++ u32 val = 0; ++ ++ if (!qspi || !buf) ++ return -EINVAL; ++ ++ for (i = 0; i < len; i++) { ++ if (0 == i % 4) ++ val = readl_relaxed(qspi->io_base + QSPI_LD_PORT_REG); ++ ++ buf[i] = (u_char) (val >> (i % 4) * 8) & 0xFF; ++ } ++ ++ return 0; ++} ++ ++/* Not to enable p_buffer */ ++static int memcpy_to_ftreg(struct phytium_qspi *qspi, u_char *buf, size_t len) ++{ ++ u32 val = 0; ++ ++ if (!qspi || !buf || (len >= 8)) ++ return -EINVAL; ++ ++ if (1 == len) { ++ val = buf[0]; ++ } else if (2 == len) { ++ val = buf[1]; ++ val = (val << 8) + buf[0]; ++ } else if (3 == len) { ++ val = buf[2]; ++ val = (val << 8) + buf[1]; ++ val = (val << 8) + buf[0]; ++ } else if (4 == len) { ++ val = buf[3]; ++ val = (val << 8) + buf[2]; ++ val = (val << 8) + buf[1]; ++ val = (val << 8) + buf[0]; ++ } ++ ++ writel_relaxed(val, qspi->io_base + QSPI_LD_PORT_REG); ++ ++ return 0; ++} ++ ++static int phytium_qspi_wait_cmd(struct phytium_qspi *qspi, ++ struct phytium_qspi_flash *flash) ++{ ++ u32 cmd = 0; ++ u32 cnt = 0; ++ ++ cmd |= CMD_RDSR << QSPI_CMD_PORT_CMD_SHIFT; ++ cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); ++ cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; ++ ++ writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); ++ ++ cnt = PHYTIUM_QSPI_BUSY_TIMEOUT_US / 10; ++ while (readl_relaxed(qspi->io_base + QSPI_LD_PORT_REG) & 0x01) { ++ udelay(10); ++ cnt--; ++ if (!cnt) { ++ dev_err(qspi->dev, "wait command process timeout\n"); ++ break; ++ } ++ } ++ ++ return !cnt; ++} ++ ++static int phytium_qspi_cmd_enable(struct phytium_qspi *qspi) ++{ ++ u32 val = 0; ++ ++ writel_relaxed(val, qspi->io_base + QSPI_LD_PORT_REG); ++ ++ return 0; ++} ++ ++static int phytium_qspi_write_enable(struct phytium_qspi *qspi, ++ struct phytium_qspi_flash *flash) ++{ ++ u32 cmd = 0; ++ ++ cmd = CMD_WREN << QSPI_CMD_PORT_CMD_SHIFT; ++ cmd |= PHYTIUM_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; ++ cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; ++ ++ writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); ++ phytium_qspi_cmd_enable(qspi); ++ ++ return 0; ++} ++ ++static int phytium_qspi_write_disable(struct phytium_qspi *qspi, ++ struct phytium_qspi_flash *flash) ++{ ++ u32 cmd = 0; ++ ++ cmd = CMD_WRDI << QSPI_CMD_PORT_CMD_SHIFT; ++ cmd |= PHYTIUM_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; ++ cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; ++ ++ writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); ++ phytium_qspi_cmd_enable(qspi); ++ ++ return 0; ++} ++ ++static int phytium_qspi_read_flash_id(struct phytium_qspi *qspi, ++ struct phytium_qspi_flash *flash, u8 opcode, u8 *buf, int len) ++{ ++ u32 cmd = 0; ++ unsigned long iflags; ++ ++ cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; ++ cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); ++ cmd |= BIT(QSPI_CMD_PORT_P_BUFFER_SHIFT); ++ cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; ++ cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; ++ ++ writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); ++ phytium_qspi_cmd_enable(qspi); ++ ++ spin_lock_irqsave(&qspi->spinlock, iflags); ++ memcpy_from_ftreg(qspi, buf, len); ++ spin_unlock_irqrestore(&qspi->spinlock, iflags); ++ ++ dev_dbg(qspi->dev, "read flash id:%x\n", *(u32 *)buf); ++ return 0; ++} ++ ++static int phytium_qspi_read_flash_sfdp(struct phytium_qspi *qspi, ++ struct phytium_qspi_flash *flash, u8 opcode, loff_t from, u8 *buf, int len) ++{ ++ unsigned long iflags; ++ u32 cmd = 0; ++ ++ cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; ++ cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); ++ cmd |= BIT(QSPI_CMD_PORT_P_BUFFER_SHIFT); ++ cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); ++ cmd |= PHYTIUM_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; ++ cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; ++ ++ writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); ++ writel_relaxed(from, qspi->io_base + QSPI_ADDR_PORT_REG); ++ phytium_qspi_cmd_enable(qspi); ++ ++ spin_lock_irqsave(&qspi->spinlock, iflags); ++ memcpy_from_ftreg(qspi, buf, len); ++ spin_unlock_irqrestore(&qspi->spinlock, iflags); ++ ++ dev_dbg(qspi->dev, "read flash sfdp:0x%llx 0x%llx\n", ++ *(u64 *)buf, *(u64 *)(buf + 8)); ++ return 0; ++} ++ ++static int phytium_qspi_read_flash_sr1(struct phytium_qspi *qspi, ++ struct phytium_qspi_flash *flash, u8 opcode, u8 *buf, int len) ++{ ++ u32 cmd = 0; ++ u32 val; ++ ++ cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; ++ cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); ++ cmd |= (len << QSPI_CMD_PORT_RW_NUM_SHIFT) & QSPI_CMD_PORT_RW_NUM_MASK; ++ cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; ++ cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; ++ ++ writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); ++ phytium_qspi_cmd_enable(qspi); ++ ++ val = readl_relaxed(qspi->io_base + QSPI_LD_PORT_REG); ++ buf[0] = (u8)val; ++ ++ return 0; ++} ++ ++static int phytium_qspi_read_reg(struct spi_nor *nor, ++ u8 opcode, u8 *buf, int len) ++{ ++ struct phytium_qspi_flash *flash = nor->priv; ++ struct device *dev = flash->qspi->dev; ++ struct phytium_qspi *qspi = flash->qspi; ++ unsigned long iflags; ++ u32 cmd = 0; ++ ++ dev_dbg(dev, "read_reg: cmd:%#.2x buf:%pK len:%#x\n", opcode, buf, len); ++ ++ switch (opcode) { ++ case CMD_RDID: ++ phytium_qspi_read_flash_id(qspi, flash, opcode, buf, len); ++ return 0; ++ case CMD_RDSR: ++ phytium_qspi_read_flash_sr1(qspi, flash, opcode, buf, len); ++ return 0; ++ default: ++ break; ++ } ++ ++ cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; ++ cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); ++ cmd |= BIT(QSPI_CMD_PORT_P_BUFFER_SHIFT); ++ cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; ++ cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; ++ ++ writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); ++ phytium_qspi_cmd_enable(qspi); ++ ++ spin_lock_irqsave(&qspi->spinlock, iflags); ++ memcpy_from_ftreg(qspi, buf, len); ++ spin_unlock_irqrestore(&qspi->spinlock, iflags); ++ ++ return 0; ++} ++ ++static int phytium_qspi_write_reg(struct spi_nor *nor, u8 opcode, ++ u8 *buf, int len) ++{ ++ struct phytium_qspi_flash *flash = nor->priv; ++ struct device *dev = flash->qspi->dev; ++ struct phytium_qspi *qspi = flash->qspi; ++ u32 cmd = 0; ++ ++ dev_dbg(dev, "write_reg: cmd:%#.2x buf:%pK len:%#x\n", ++ opcode, buf, len); ++ ++ switch(opcode){ ++ case CMD_WREN: ++ phytium_qspi_write_enable(qspi, flash); ++ return 0; ++ case CMD_WRDI: ++ phytium_qspi_write_disable(qspi, flash); ++ return 0; ++ default: ++ break; ++ } ++ ++ cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; ++ cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; ++ cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; ++ ++ if ((len > 8) || (NULL == buf)) { ++ dev_err(dev, "data length exceed. commad %x, len:%d \n", opcode, len); ++ return -EINVAL; ++ } ++ else if(len > 0){ ++ cmd |= ((len - 1) << QSPI_CMD_PORT_RW_NUM_SHIFT) & QSPI_CMD_PORT_RW_NUM_MASK; ++ cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); ++ } ++ ++ writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); ++ memcpy_to_ftreg(qspi, buf, len); ++ ++ return 0; ++} ++ ++static ssize_t phytium_qspi_read_tmp(struct phytium_qspi *qspi, u32 read_cmd, ++ loff_t from, size_t len, u_char *buf) ++{ ++ u32 addr = (u32)from; ++ u64 val = 0; ++ ++ if (!qspi) ++ return -1; ++ ++ dev_dbg(qspi->dev, "read cmd:%x, addr:%x len:%zx\n", read_cmd, addr, len); ++ writel_relaxed(read_cmd, qspi->io_base + QSPI_RD_CFG_REG); ++ ++ memcpy_fromio(buf, qspi->mm_base + addr, len); ++ ++ val = *(u64 *)(buf); ++ dev_dbg(qspi->dev, "read val:%llx\n", val); ++ ++ return len; ++} ++ ++static ssize_t phytium_qspi_read(struct spi_nor *nor, loff_t from, size_t len, ++ u_char *buf) ++{ ++ struct phytium_qspi_flash *flash = nor->priv; ++ struct phytium_qspi *qspi = flash->qspi; ++ u32 cmd = nor->read_opcode; ++ u32 addr = (u32)from; ++ ++ addr = addr + flash->cs * flash->fsize; ++ dev_dbg(qspi->dev, "read(%#.2x): buf:%pK from:%#.8x len:%#zx\n", ++ nor->read_opcode, buf, addr, len); ++ ++ cmd = cmd << QSPI_RD_CFG_RD_CMD_SHIFT; ++ cmd |= BIT(QSPI_RD_CFG_D_BUFFER_SHIFT); ++ cmd |= flash->clk_div << QSPI_CMD_PORT_SCK_SEL_SHIFT; ++ ++ cmd &= ~QSPI_RD_CFG_RD_TRANSFER_MASK; ++ cmd |= (flash->addr_width << QSPI_RD_CFG_RD_TRANSFER_SHIFT); ++ ++ switch (nor->read_opcode) { ++ case CMD_READ: ++ case CMD_FAST_READ: ++ case CMD_QIOR: ++ case CMD_QOR: ++ cmd &= ~QSPI_RD_CFG_RD_ADDR_SEL_MASK; ++ break; ++ case CMD_4READ: ++ case CMD_4FAST_READ: ++ case CMD_4QOR: ++ case CMD_4QIOR: ++ cmd |= BIT(QSPI_RD_CFG_RD_ADDR_SEL_SHIFT); ++ break; ++ case 0x5A: ++ cmd &= ~QSPI_RD_CFG_RD_ADDR_SEL_MASK; ++ phytium_qspi_read_flash_sfdp(qspi, flash, nor->read_opcode, from, buf, len); ++ return 0; ++ break; ++ default: ++ break; ++ } ++ ++ if((PHYTIUM_QSPI_1_1_4 == flash->addr_width) || ++ (PHYTIUM_QSPI_1_4_4 == flash->addr_width)) { ++ cmd |= BIT(QSPI_RD_CFG_RD_LATENCY_SHIFT); ++ ++ cmd &= ~QSPI_RD_CFG_DUMMY_MASK; ++ cmd |= (0x07 << QSPI_RD_CFG_DUMMY_SHIFT); ++ } ++ ++ dev_dbg(qspi->dev, "read(%#.2x): cmd:%#x\n", nor->read_opcode, cmd); ++ if (cmd != flash->read_cmd) ++ flash->read_cmd = cmd; ++ ++ writel_relaxed(cmd, qspi->io_base + QSPI_RD_CFG_REG); ++ ++ memcpy_fromio(buf, qspi->mm_base + addr, len); ++ ++ return len; ++} ++ ++static ssize_t phytium_qspi_write(struct spi_nor *nor, loff_t to, size_t len, ++ const u_char *buf) ++{ ++ struct phytium_qspi_flash *flash = nor->priv; ++ struct device *dev = flash->qspi->dev; ++ struct phytium_qspi *qspi = flash->qspi; ++ u32 cmd = nor->program_opcode; ++ u32 addr = (u32)to; ++ int i; ++ u_char tmp[8] = {0}; ++ size_t mask = 0x03; ++ ++ addr = addr + flash->cs * flash->fsize; ++ dev_dbg(dev, "write(%#.2x): buf:%p to:%#.8x len:%#zx\n", ++ nor->program_opcode, buf, addr, len); ++ ++ if (addr & 0x03) { ++ dev_err(dev, "Addr not four-byte aligned!\n"); ++ return -EINVAL; ++ } ++ ++ cmd = cmd << QSPI_WR_CFG_WR_CMD_SHIFT; ++ cmd |= BIT(QSPI_WR_CFG_WR_MODE_SHIFT); ++ cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; ++ ++ switch (nor->program_opcode) { ++ case CMD_PP: ++ case CMD_QPP: ++ cmd &= ~QSPI_WR_CFG_WR_ADDR_SEL_MASK; ++ break; ++ case CMD_4PP: ++ case CMD_4QPP: ++ cmd |= BIT(QSPI_WR_CFG_WR_ADDR_SEL_SHIFT); ++ break; ++ default: ++ dev_err(qspi->dev, "Not support program command:%#x\n", ++ nor->erase_opcode); ++ return -EINVAL; ++ } ++ ++ dev_dbg(qspi->dev, "write cmd:%x\n", cmd); ++ writel_relaxed(cmd, qspi->io_base + QSPI_WR_CFG_REG); ++ ++ for (i = 0; i < len/4; i++) { ++ writel_relaxed(*(u32 *)(buf + 4*i), qspi->mm_base + addr + 4*i); ++ } ++ ++ if (len & mask) { ++ addr = addr + (len & ~mask); ++ phytium_qspi_read_tmp(qspi, flash->read_cmd, addr, 4, &tmp[0]); ++ memcpy(tmp, buf + (len & ~mask), len & mask); ++ writel_relaxed(*(u32 *)(tmp), qspi->mm_base + addr); ++ } ++ ++ writel_relaxed(QSPI_FLUSH_EN, qspi->io_base + QSPI_FLUSH_REG); ++ ++ phytium_qspi_wait_cmd(qspi, flash); ++ ++ return len; ++} ++ ++static int phytium_qspi_erase(struct spi_nor *nor, loff_t offs) ++{ ++ struct phytium_qspi_flash *flash = nor->priv; ++ struct device *dev = flash->qspi->dev; ++ struct phytium_qspi *qspi = flash->qspi; ++ u32 cmd = nor->erase_opcode; ++ u32 addr = (u32)offs; ++ ++ dev_dbg(dev, "erase(%#.2x):offs:%#x\n", nor->erase_opcode, (u32)offs); ++ ++ phytium_qspi_write_enable(qspi, flash); ++ cmd = cmd << QSPI_CMD_PORT_CMD_SHIFT; ++ cmd |= PHYTIUM_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; ++ cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; ++ ++ /* s25fl256s1 not supoort D8, DC, 20, 21 */ ++ switch (nor->erase_opcode) { ++ case CMD_SE: ++ cmd &= ~QSPI_CMD_PORT_SEL_MASK; ++ cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); ++ writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); ++ break; ++ case CMD_4SE: ++ cmd |= BIT(QSPI_CMD_PORT_SEL_SHIFT); ++ cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); ++ writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); ++ break; ++ case CMD_P4E: ++ cmd &= ~QSPI_CMD_PORT_SEL_MASK; ++ cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); ++ writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); ++ break; ++ case CMD_4P4E: ++ cmd |= BIT(QSPI_CMD_PORT_SEL_SHIFT); ++ cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); ++ writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); ++ break; ++ case CMD_BE: ++ cmd &= ~QSPI_CMD_PORT_SEL_MASK; ++ break; ++ case CMD_4BE: ++ cmd |= BIT(QSPI_CMD_PORT_SEL_SHIFT); ++ break; ++ default: ++ dev_err(qspi->dev, "Not support erase command:%#x\n", ++ nor->erase_opcode); ++ return -EINVAL; ++ } ++ ++ writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); ++ phytium_qspi_cmd_enable(qspi); ++ phytium_qspi_wait_cmd(qspi, flash); ++ ++ return 0; ++} ++ ++static int phytium_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops) ++{ ++ struct phytium_qspi_flash *flash = nor->priv; ++ struct phytium_qspi *qspi = flash->qspi; ++ ++ mutex_lock(&qspi->lock); ++ return 0; ++} ++ ++static void phytium_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops) ++{ ++ struct phytium_qspi_flash *flash = nor->priv; ++ struct phytium_qspi *qspi = flash->qspi; ++ ++ mutex_unlock(&qspi->lock); ++} ++ ++static int phytium_qspi_get_flash_size(struct phytium_qspi *qspi, u32 size) ++{ ++ int ret = 0; ++ u32 value; ++ ++ switch (size) { ++ case SZ_4M: ++ value = 0; ++ break; ++ case SZ_8M: ++ value = 1; ++ break; ++ case SZ_16M: ++ value = 2; ++ break; ++ case SZ_32M: ++ value = 3; ++ break; ++ case SZ_64M: ++ value = 4; ++ break; ++ case SZ_128M: ++ value = 5; ++ break; ++ case SZ_256M: ++ value = 6; ++ break; ++ case SZ_512M: ++ value = 7; ++ break; ++ default: ++ value = 0; ++ ++ ret = -EINVAL; ++ return ret; ++ } ++ ++ return value; ++} ++static int phytium_qspi_flash_setup(struct phytium_qspi *qspi, ++ struct device_node *np) ++{ ++ struct spi_nor_hwcaps hwcaps = { ++ .mask = SNOR_HWCAPS_READ | ++ SNOR_HWCAPS_READ_FAST | ++ SNOR_HWCAPS_PP, ++ }; ++ u32 width, presc; ++ u32 cs_num = 0; ++ u32 max_rate = 0; ++ u32 clk_div = 0; ++ u32 flash_cap = 0; ++ u32 addr_width = PHYTIUM_QSPI_1_1_1; ++ struct phytium_qspi_flash *flash; ++ struct mtd_info *mtd; ++ int ret; ++ ++ of_property_read_u32(np, "reg", &cs_num); ++ if (cs_num >= PHYTIUM_MAX_NORCHIP) ++ return -EINVAL; ++ ++ of_property_read_u32(np, "spi-max-frequency", &max_rate); ++ if (!max_rate) ++ return -EINVAL; ++ ++ of_property_read_u32(np, "spi-clk-div", &clk_div); ++ if (!clk_div) ++ clk_div = PHYTIUM_SCK_SEL; ++ ++ if (clk_div < 4) ++ return -EINVAL; ++ ++ presc = DIV_ROUND_UP(qspi->clk_rate, max_rate) - 1; ++ ++ of_property_read_u32(np, "spi-rx-bus-width", &width); ++ if (!width) ++ width = 1; ++ ++ if (width == 4) { ++ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; ++ addr_width = PHYTIUM_QSPI_1_1_4; ++ } else if (width == 2) { ++ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; ++ addr_width = PHYTIUM_QSPI_1_1_2; ++ } else if (width != 1) ++ return -EINVAL; ++ ++ flash = &qspi->flash[cs_num]; ++ flash->qspi = qspi; ++ flash->cs = cs_num; ++ flash->presc = presc; ++ flash->clk_div = clk_div; ++ flash->addr_width = addr_width; ++ ++ flash->nor.dev = qspi->dev; ++ spi_nor_set_flash_node(&flash->nor, np); ++ flash->nor.priv = flash; ++ mtd = &flash->nor.mtd; ++ ++ flash->nor.read = phytium_qspi_read; ++ flash->nor.write = phytium_qspi_write; ++ flash->nor.erase = phytium_qspi_erase; ++ flash->nor.read_reg = phytium_qspi_read_reg; ++ flash->nor.write_reg = phytium_qspi_write_reg; ++ flash->nor.prepare = phytium_qspi_prep; ++ flash->nor.unprepare = phytium_qspi_unprep; ++ ++ ret = spi_nor_scan(&flash->nor, NULL, &hwcaps); ++ if (ret) { ++ dev_err(qspi->dev, "device scan failed\n"); ++ return ret; ++ } ++ ++ flash->fsize = mtd->size; ++ flash->prefetch_limit = mtd->size - PHYTIUM_QSPI_FIFO_SZ; ++ ++ ret = phytium_qspi_get_flash_size(flash->qspi, mtd->size); ++ if (ret < 1) { ++ dev_err(qspi->dev, "flash size invalid\n"); ++ return ret; ++ } ++ ++ flash_cap = cs_num << QSPI_FLASH_CAP_NUM_SHIFT; ++ flash_cap |= ret; ++ writel_relaxed(flash_cap, qspi->io_base + QSPI_FLASH_CAP_REG); ++ ++ flash->read_mode = PHYTIUM_FMODE_MM; ++ ++ ret = mtd_device_register(mtd, NULL, 0); ++ if (ret) { ++ dev_err(qspi->dev, "mtd device parse failed\n"); ++ return ret; ++ } ++ ++ flash->registered = true; ++ ++ dev_dbg(qspi->dev, "read mm:%s %px cs:%d bus:%d clk-div:%d\n", ++ flash->read_mode == PHYTIUM_FMODE_MM ? "yes" : "no", ++ qspi->mm_base, cs_num, width, clk_div); ++ ++ dev_dbg(qspi->dev, "mtd->size:%llx, mtd->erasesize:%x, fsize:%x\n", ++ mtd->size, mtd->erasesize, flash->fsize); ++ ++ return 0; ++} ++ ++static void phytium_qspi_mtd_free(struct phytium_qspi *qspi) ++{ ++ int i; ++ ++ for (i = 0; i < PHYTIUM_MAX_NORCHIP; i++) ++ if (qspi->flash[i].registered) ++ mtd_device_unregister(&qspi->flash[i].nor.mtd); ++} ++ ++static ssize_t clk_div_show(struct device *dev, struct device_attribute *attr, ++ char *buf) ++{ ++ struct phytium_qspi *qspi = dev_get_drvdata(dev); ++ struct phytium_qspi_flash *flash = &qspi->flash[0]; ++ ++ return sprintf(buf, "Flash 0 clk-div: %d\n", flash->clk_div); ++} ++ ++static ssize_t clk_div_store(struct device *dev, ++ struct device_attribute *attr, const char *buf, ++ size_t size) ++{ ++ struct phytium_qspi *qspi = dev_get_drvdata(dev); ++ struct phytium_qspi_flash *flash = &qspi->flash[0]; ++ long value; ++ char *token; ++ ssize_t status; ++ ++ token = strsep ((char **)&buf, " "); ++ if (!token) ++ return -EINVAL; ++ ++ status = kstrtol(token, 0, &value); ++ if (status) ++ return status; ++ ++ flash->clk_div = (u8)value; ++ ++ return size; ++} ++static DEVICE_ATTR_RW(clk_div); ++ ++static struct attribute *phytium_qspi_attrs[] = { ++ &dev_attr_clk_div.attr, ++ NULL, ++}; ++ ++static struct attribute_group phytium_qspi_attr_group = { ++ .attrs = phytium_qspi_attrs, ++}; ++ ++static int phytium_qspi_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct device_node *flash_np; ++ struct phytium_qspi *qspi; ++ struct resource *res; ++ int ret; ++ ++ qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL); ++ if (!qspi) ++ return -ENOMEM; ++ ++ qspi->nor_num = of_get_child_count(dev->of_node); ++ if (!qspi->nor_num || qspi->nor_num > PHYTIUM_MAX_NORCHIP) ++ return -ENODEV; ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi"); ++ qspi->io_base = devm_ioremap_resource(dev, res); ++ if (IS_ERR(qspi->io_base)) ++ return PTR_ERR(qspi->io_base); ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm"); ++ qspi->mm_base = devm_ioremap_resource(dev, res); ++ if (IS_ERR(qspi->mm_base)) ++ return PTR_ERR(qspi->mm_base); ++ ++ qspi->mm_size = resource_size(res); ++ ++ qspi->clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(qspi->clk)) ++ return PTR_ERR(qspi->clk); ++ ++ qspi->clk_rate = clk_get_rate(qspi->clk); ++ if (!qspi->clk_rate) ++ return -EINVAL; ++ ++ ret = clk_prepare_enable(qspi->clk); ++ if (ret) { ++ dev_err(dev, "can not enable the clock\n"); ++ return ret; ++ } ++ ++ qspi->dev = dev; ++ platform_set_drvdata(pdev, qspi); ++ mutex_init(&qspi->lock); ++ spin_lock_init(&qspi->spinlock); ++ ++ for_each_available_child_of_node(dev->of_node, flash_np) { ++ ret = phytium_qspi_flash_setup(qspi, flash_np); ++ if (ret) { ++ dev_err(dev, "unable to setup flash chip\n"); ++ goto err_flash; ++ } ++ } ++ ++ ret = sysfs_create_group(&qspi->dev->kobj, &phytium_qspi_attr_group); ++ if (ret) { ++ dev_err(dev, "unable to create sysfs\n"); ++ goto err_flash; ++ } ++ ++ return 0; ++ ++err_flash: ++ mutex_destroy(&qspi->lock); ++ phytium_qspi_mtd_free(qspi); ++ ++ clk_disable_unprepare(qspi->clk); ++ return ret; ++} ++ ++static int phytium_qspi_remove(struct platform_device *pdev) ++{ ++ struct phytium_qspi *qspi = platform_get_drvdata(pdev); ++ ++ sysfs_remove_group(&qspi->dev->kobj, &phytium_qspi_attr_group); ++ ++ phytium_qspi_mtd_free(qspi); ++ mutex_destroy(&qspi->lock); ++ ++ clk_disable_unprepare(qspi->clk); ++ return 0; ++} ++ ++static const struct of_device_id phytium_qspi_match[] = { ++ {.compatible = "phytium,qspi"}, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, phytium_qspi_match); ++ ++static struct platform_driver phytium_qspi_driver = { ++ .probe = phytium_qspi_probe, ++ .remove = phytium_qspi_remove, ++ .driver = { ++ .name = "phytium-quadspi", ++ .of_match_table = phytium_qspi_match, ++ }, ++}; ++ ++module_platform_driver(phytium_qspi_driver); ++ ++MODULE_AUTHOR("Mingshuai Zhu "); ++MODULE_DESCRIPTION("Phytium QuadSPI driver"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c +index f028277fb1ce..6bd2bd88f29c 100644 +--- a/drivers/mtd/spi-nor/spi-nor.c ++++ b/drivers/mtd/spi-nor/spi-nor.c +@@ -1263,6 +1263,7 @@ static const struct flash_info spi_nor_ids[] = { + /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */ + { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "XM25QH128B", INFO(0x205018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { }, + }; + +diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig +index 7cdd0cead693..013503b2b506 100644 +--- a/drivers/net/can/Kconfig ++++ b/drivers/net/can/Kconfig +@@ -119,6 +119,30 @@ config CAN_JANZ_ICAN3 + This driver can also be built as a module. If so, the module will be + called janz-ican3.ko. + ++config CAN_PHYTIUM_CORE ++ tristate "Phytium CAN controller core driver" ++ ++config CAN_PHYTIUM_PLAT ++ tristate "Phytium CAN platform support" ++ select CAN_PHYTIUM_CORE ++ depends on ARCH_PHYTIUM || COMPILE_TEST ++ default y if ARCH_PHYTIUM ++ ---help--- ++ This driver supports for the on-chip Phytium CAN controller found on ++ FT-2000/4 and D2000 SoCs. ++ ++ To compile this driver as a module, choose M here. ++ ++config CAN_PHYTIUM_PCI ++ tristate "Phytium CAN PCI support" ++ select CAN_PHYTIUM_CORE ++ depends on PCI || COMPILE_TEST ++ ---help--- ++ This driver is for Phytium CAN controller of X100 chipset which ++ is a PCI device. ++ ++ To compile this driver as a module, choose M here. ++ + config CAN_SUN4I + tristate "Allwinner A10 CAN controller" + depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST +diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile +index 44922bf29b6a..373ddddad65f 100644 +--- a/drivers/net/can/Makefile ++++ b/drivers/net/can/Makefile +@@ -28,6 +28,9 @@ obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o + obj-$(CONFIG_CAN_MSCAN) += mscan/ + obj-$(CONFIG_CAN_M_CAN) += m_can/ + obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_canfd/ ++obj-$(CONFIG_CAN_PHYTIUM_CORE) += phytium_can.o ++obj-$(CONFIG_CAN_PHYTIUM_PLAT) += phytium_can_plat.o ++obj-$(CONFIG_CAN_PHYTIUM_PCI) += phytium_can_pci.o + obj-$(CONFIG_CAN_SJA1000) += sja1000/ + obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o + obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o +diff --git a/drivers/net/can/phytium_can.c b/drivers/net/can/phytium_can.c +new file mode 100644 +index 000000000000..30a3f52910b4 +--- /dev/null ++++ b/drivers/net/can/phytium_can.c +@@ -0,0 +1,694 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* Phytium CAN core controller driver ++ * ++ * Copyright (C) 2018-2021, Phytium Technology Co., Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include "phytium_can.h" ++ ++static void phytium_write_reg(const struct phytium_can_priv *priv, ++ enum phytium_can_reg reg, u32 val) ++{ ++ writel(val, priv->reg_base + reg); ++} ++ ++static u32 phytium_read_reg(const struct phytium_can_priv *priv, ++ enum phytium_can_reg reg) ++{ ++ return readl(priv->reg_base + reg); ++} ++ ++static void phytium_set_reg_bits(const struct phytium_can_priv *priv, ++ enum phytium_can_reg reg, u32 bs) ++{ ++ u32 val = readl(priv->reg_base + reg); ++ ++ val |= bs; ++ writel(val, priv->reg_base + reg); ++} ++ ++static void phytium_clr_reg_bits(const struct phytium_can_priv *priv, ++ enum phytium_can_reg reg, u32 bs) ++{ ++ u32 val = readl(priv->reg_base + reg); ++ ++ val &= ~bs; ++ writel(val, priv->reg_base + reg); ++} ++ ++static int phytium_set_bittiming(struct net_device *ndev) ++{ ++ struct phytium_can_priv *priv = netdev_priv(ndev); ++ struct can_bittiming *bt = &priv->can.bittiming; ++ u32 btr; ++ u32 is_config_mode; ++ ++ /* Check whether Phytium CAN is in configuration mode. ++ * It cannot set bit timing if Phytium CAN is not in configuration mode. ++ */ ++ is_config_mode = (priv->read_reg(priv, FTCAN_CTRL) & ++ FTCAN_CTRL_XFER_MASK); ++ if (is_config_mode) { ++ netdev_alert(ndev, ++ "BUG! Cannot set bittiming - CAN is not in config mode\n"); ++ return -EPERM; ++ } ++ ++ /* Setting Baud Rate prescalar value in BRPR Register */ ++ btr = (bt->brp - 1) << 16; ++ ++ /* Setting Time Segment 1 in BTR Register */ ++ btr |= (bt->prop_seg - 1) << 2; ++ ++ btr |= (bt->phase_seg1 - 1) << 5; ++ ++ /* Setting Time Segment 2 in BTR Register */ ++ btr |= (bt->phase_seg2 - 1) << 8; ++ ++ /* Setting Synchronous jump width in BTR Register */ ++ btr |= (bt->sjw - 1); ++ ++ priv->write_reg(priv, FTCAN_DAT_RATE_CTRL, btr); ++ priv->write_reg(priv, FTCAN_ARB_RATE_CTRL, btr); ++ ++ netdev_dbg(ndev, "DAT=0x%08x, ARB=0x%08x\n", ++ priv->read_reg(priv, FTCAN_DAT_RATE_CTRL), ++ priv->read_reg(priv, FTCAN_ARB_RATE_CTRL)); ++ ++ return 0; ++} ++ ++static int phytium_can_start(struct net_device *ndev) ++{ ++ struct phytium_can_priv *priv = netdev_priv(ndev); ++ int err; ++ ++ err = phytium_set_bittiming(ndev); ++ if (err < 0) ++ return err; ++ ++ /* Identifier mask enable */ ++ priv->set_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_AIME_MASK); ++ priv->write_reg(priv, FTCAN_ACC_ID0_MASK, FTCAN_ACC_IDN_MASK); ++ priv->write_reg(priv, FTCAN_ACC_ID1_MASK, FTCAN_ACC_IDN_MASK); ++ priv->write_reg(priv, FTCAN_ACC_ID2_MASK, FTCAN_ACC_IDN_MASK); ++ priv->write_reg(priv, FTCAN_ACC_ID3_MASK, FTCAN_ACC_IDN_MASK); ++ ++ /* Enable interrupts */ ++ priv->write_reg(priv, FTCAN_INTR, FTCAN_INTR_EN); ++ ++ /*Enable Transfer*/ ++ priv->set_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_XFER_MASK); ++ ++ netdev_dbg(ndev, "status:#x%08x\n", ++ priv->read_reg(priv, FTCAN_XFER_STS)); ++ ++ priv->can.state = CAN_STATE_ERROR_ACTIVE; ++ return 0; ++} ++ ++static int phytium_do_set_mode(struct net_device *ndev, enum can_mode mode) ++{ ++ int ret; ++ ++ switch (mode) { ++ case CAN_MODE_START: ++ ret = phytium_can_start(ndev); ++ if (ret < 0) { ++ netdev_err(ndev, "xcan_chip_start failed!\n"); ++ return ret; ++ } ++ netif_wake_queue(ndev); ++ break; ++ default: ++ ret = -EOPNOTSUPP; ++ break; ++ } ++ ++ return ret; ++} ++ ++static int phytium_can_start_xmit(struct sk_buff *skb, struct net_device *ndev) ++{ ++ struct phytium_can_priv *priv = netdev_priv(ndev); ++ struct net_device_stats *stats = &ndev->stats; ++ struct can_frame *cf = (struct can_frame *)skb->data; ++ u32 id, dlc, frame_head[2] = {0, 0}, data[8] = {0, 0}; ++ u32 tx_fifo_cnt; ++ unsigned long flags; ++ ++ if (can_dropped_invalid_skb(ndev, skb)) ++ return NETDEV_TX_OK; ++ ++ /* Check if the TX buffer is full */ ++ tx_fifo_cnt = (priv->read_reg(priv, FTCAN_FIFO_CNT) >> FTCAN_FIFO_CNT_TFN_SHIFT); ++ if (tx_fifo_cnt == priv->tx_max) { ++ netif_stop_queue(ndev); ++ netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n"); ++ return NETDEV_TX_BUSY; ++ } ++ ++ if (priv->tx_head == priv->tx_tail) { ++ priv->tx_head = 0; ++ priv->tx_tail = 0; ++ } ++ ++ /* Watch carefully on the bit sequence */ ++ if (cf->can_id & CAN_EFF_FLAG) { ++ /* Extended CAN ID format */ ++ id = ((cf->can_id & CAN_EFF_MASK) << FTCAN_IDR_ID2_SHIFT) & ++ FTCAN_IDR_ID2_MASK; ++ id |= (((cf->can_id & CAN_EFF_MASK) >> ++ (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) << ++ FTCAN_IDR_ID1_SHIFT) & FTCAN_IDR_ID1_MASK; ++ ++ /* The substibute remote TX request bit should be "1" ++ * for extended frames as in the Xilinx CAN datasheet ++ */ ++ id |= FTCAN_IDR_IDE_MASK | FTCAN_IDR_SRR_MASK; ++ ++ if (cf->can_id & CAN_RTR_FLAG) ++ /* Extended frames remote TX request */ ++ id |= FTCAN_IDR_RTR_MASK; ++ ++ dlc = cf->can_dlc << FTCAN_IDR_EDLC_SHIFT; ++ ++ frame_head[0] = cpu_to_be32p(&id);//id; ++ frame_head[1] = cpu_to_be32p(&dlc);//dlc; ++ ++ /* Write the Frame to Phytium CAN TX FIFO */ ++ priv->write_reg(priv, FTCAN_TX_FIFO, frame_head[0]); ++ priv->write_reg(priv, FTCAN_TX_FIFO, frame_head[1]); ++ } else { ++ /* Standard CAN ID format */ ++ id = ((cf->can_id & CAN_SFF_MASK) << FTCAN_IDR_ID1_SHIFT) & ++ FTCAN_IDR_ID1_MASK; ++ ++ if (cf->can_id & CAN_RTR_FLAG) ++ /* Standard frames remote TX request */ ++ id |= FTCAN_IDR_SRR_MASK; ++ ++ dlc = ((cf->can_dlc << FTCAN_IDR_SDLC_SHIFT) | FTCAN_IDR_PAD_MASK); ++ id |= dlc; ++ ++ frame_head[0] = cpu_to_be32p(&id); ++ ++ /* Write the Frame to Xilinx CAN TX FIFO */ ++ priv->write_reg(priv, FTCAN_TX_FIFO, frame_head[0]); ++ } ++ ++ if (!(cf->can_id & CAN_RTR_FLAG)) { ++ if (cf->can_dlc > 0) { ++ data[0] = (*(__be32 *)(cf->data + 0)); ++ priv->write_reg(priv, FTCAN_TX_FIFO, data[0]); ++ } ++ if (cf->can_dlc > 4) { ++ data[1] = (*(__be32 *)(cf->data + 4)); ++ priv->write_reg(priv, FTCAN_TX_FIFO, data[1]); ++ } ++ stats->tx_bytes += cf->can_dlc; ++ } ++ ++ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); ++ priv->tx_head++; ++ ++ /* triggers tranmission */ ++ spin_lock_irqsave(&priv->lock, flags); ++ priv->clr_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_XFER_MASK); ++ priv->set_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_TXREQ_MASK); ++ priv->set_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_TXREQ_MASK | FTCAN_CTRL_XFER_MASK); ++ netif_stop_queue(ndev); ++ spin_unlock_irqrestore(&priv->lock, flags); ++ ++ return NETDEV_TX_OK; ++} ++ ++static void phytium_can_rx(struct net_device *ndev) ++{ ++ struct phytium_can_priv *priv = netdev_priv(ndev); ++ int data_cnt, i; ++ u32 buf[64]; ++ ++ data_cnt = priv->read_reg(priv, FTCAN_FIFO_CNT); ++ data_cnt &= 0x7F; ++ ++ for (i = 0; i < data_cnt; ++i) ++ buf[i] = priv->read_reg(priv, FTCAN_RX_FIFO); ++ ++ if (priv->is_kfifo_full_err) ++ return; ++ ++ if ((KFIFO_LEN - kfifo_len(&priv->rx_kfifo)) < (data_cnt * 4)) { ++ netdev_err(ndev, "RX kfifo is full,restart CAN controller!\n"); ++ priv->is_kfifo_full_err = true; ++ return; ++ } ++ ++ kfifo_in(&priv->rx_kfifo, buf, data_cnt * 4); ++ ++ cancel_delayed_work(&priv->can_frame_work); ++ schedule_delayed_work(&priv->can_frame_work, 0); ++} ++ ++static void phytium_err_interrupt(struct net_device *ndev, u32 isr) ++{ ++ struct phytium_can_priv *priv = netdev_priv(ndev); ++ struct net_device_stats *stats = &ndev->stats; ++ struct can_frame *cf; ++ struct sk_buff *skb; ++ u32 txerr = 0, rxerr = 0; ++ ++ skb = alloc_can_err_skb(ndev, &cf); ++ ++ rxerr = priv->read_reg(priv, FTCAN_ERR_CNT) & FTCAN_ERR_CNT_RFN_MASK; ++ txerr = ((priv->read_reg(priv, FTCAN_ERR_CNT) & ++ FTCAN_ERR_CNT_TFN_MASK) >> FTCAN_ERR_CNT_TFN_SHIFT); ++ ++ if (isr & FTCAN_INTR_BOIS_MASK) { ++ priv->can.state = CAN_STATE_BUS_OFF; ++ priv->can.can_stats.bus_off++; ++ /* Leave device in Config Mode in bus-off state */ ++ can_bus_off(ndev); ++ if (skb) ++ cf->can_id |= CAN_ERR_BUSOFF; ++ } else if ((isr & FTCAN_INTR_PEIS_MASK) == FTCAN_INTR_PEIS_MASK) { ++ priv->can.state = CAN_STATE_ERROR_PASSIVE; ++ priv->can.can_stats.error_passive++; ++ if (skb) { ++ cf->can_id |= CAN_ERR_CRTL; ++ cf->data[1] = (rxerr > 127) ? ++ CAN_ERR_CRTL_RX_PASSIVE : ++ CAN_ERR_CRTL_TX_PASSIVE; ++ cf->data[6] = txerr; ++ cf->data[7] = rxerr; ++ } ++ } else if (isr & FTCAN_INTR_PWIS_MASK) { ++ priv->can.state = CAN_STATE_ERROR_WARNING; ++ priv->can.can_stats.error_warning++; ++ if (skb) { ++ cf->can_id |= CAN_ERR_CRTL; ++ cf->data[1] |= (txerr > rxerr) ? ++ CAN_ERR_CRTL_TX_WARNING : ++ CAN_ERR_CRTL_RX_WARNING; ++ cf->data[6] = txerr; ++ cf->data[7] = rxerr; ++ } ++ } ++ ++ /* Check for RX FIFO Overflow interrupt */ ++ if (isr & FTCAN_INTR_RFIS_MASK) { ++ stats->rx_over_errors++; ++ stats->rx_errors++; ++ ++ if (skb) { ++ cf->can_id |= CAN_ERR_CRTL; ++ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; ++ } ++ } ++ ++ if (skb) { ++ stats->rx_packets++; ++ stats->rx_bytes += cf->can_dlc; ++ netif_rx(skb); ++ } ++ ++ if ((isr & FTCAN_INTR_RFIS_MASK) && ++ (FTCAN_XFER_OVERLOAD_FRAM == ++ (priv->read_reg(priv, FTCAN_XFER_STS) & ++ FTCAN_XFER_FRAS_MASK))) ++ phytium_can_rx(ndev); ++} ++ ++static int ++phytium_get_frame_from_kfifo(struct net_device *ndev, u8 *buf, int len) ++{ ++ u32 id, dlc, net_dlc, is_standard_frame_flag; ++ u32 rdout_dlc, data[2] = {0, 0}; ++ struct can_frame *cf; ++ struct phytium_can_priv *priv; ++ struct net_device_stats *stats; ++ struct sk_buff *skb; ++ ++ memcpy(&id, buf, 4); ++ ++ id = be32_to_cpup(&id); ++ ++ if (id & FTCAN_IDR_IDE_MASK) { ++ /* Received an Extended format frame */ ++ memcpy(&dlc, buf + 4, 4); ++ dlc = (dlc >> 2) & 0xf; ++ ++ net_dlc = get_can_dlc(dlc); ++ if (net_dlc > 4 && len >= 16) ++ rdout_dlc = 16; ++ else if (net_dlc > 0 && len >= 12) ++ rdout_dlc = 12; ++ else if (net_dlc == 0 && len >= 8) ++ rdout_dlc = 8; ++ else ++ return 0; ++ ++ is_standard_frame_flag = 0; ++ } else { ++ /* Received a standard format frame */ ++ dlc = (id & FTCAN_IDR_DLC_MASK) >> FTCAN_IDR_SDLC_SHIFT; ++ net_dlc = get_can_dlc(dlc); ++ if (net_dlc > 4 && len >= 12) ++ rdout_dlc = 12; ++ else if (net_dlc > 0 && len >= 8) ++ rdout_dlc = 8; ++ else if (net_dlc == 0 && len >= 4) ++ rdout_dlc = 4; ++ else ++ return 0; ++ ++ is_standard_frame_flag = 1; ++ } ++ ++ if (unlikely(!ndev)) ++ return -1; ++ ++ priv = netdev_priv(ndev); ++ stats = &ndev->stats; ++ skb = alloc_can_skb(ndev, &cf); ++ if (unlikely(!skb)) { ++ stats->rx_dropped++; ++ return rdout_dlc; ++ } ++ /* Change Phytium CAN ID format to socketCAN ID format */ ++ if (id & FTCAN_IDR_IDE_MASK) { ++ cf->can_id = (id & FTCAN_IDR_ID1_MASK) >> 3; ++ cf->can_id |= (id & FTCAN_IDR_ID2_MASK) >> ++ FTCAN_IDR_ID2_SHIFT; ++ cf->can_id |= CAN_EFF_FLAG; ++ if (id & FTCAN_IDR_RTR_MASK) ++ cf->can_id |= CAN_RTR_FLAG; ++ } else { ++ cf->can_id = (id & FTCAN_IDR_ID1_MASK) >> ++ FTCAN_IDR_ID1_SHIFT; ++ if (id & FTCAN_IDR_SRR_MASK) ++ cf->can_id |= CAN_RTR_FLAG; ++ } ++ ++ cf->can_dlc = net_dlc; ++ ++ if (!(cf->can_id & CAN_RTR_FLAG)) { ++ if (cf->can_dlc > 0 && is_standard_frame_flag) { ++ memcpy(data, buf + 4, 4); ++ *(__be32 *)(cf->data) = (data[0]); ++ } else if (cf->can_dlc > 0 && !is_standard_frame_flag) { ++ memcpy(data, buf + 8, 4); ++ *(__be32 *)(cf->data) = (data[0]); ++ } ++ ++ if (cf->can_dlc > 4 && is_standard_frame_flag) { ++ memcpy(data + 1, buf + 8, 4); ++ *(__be32 *)(cf->data + 4) = (data[1]); ++ } else if (cf->can_dlc > 0 && !is_standard_frame_flag) { ++ memcpy(data + 1, buf + 12, 4); ++ *(__be32 *)(cf->data + 4) = (data[1]); ++ } ++ } ++ stats->rx_bytes += cf->can_dlc; ++ stats->rx_packets++; ++ ++ netif_receive_skb(skb); ++ return rdout_dlc; ++} ++ ++static void phytium_poll_kfifo(struct work_struct *work) ++{ ++ u32 len, no_rd_len; ++ int rdout_len; ++ u8 *buffer; ++ struct phytium_can_priv *priv = container_of(work, ++ struct phytium_can_priv, ++ can_frame_work.work); ++ struct net_device *ndev = priv->ndev; ++ ++ len = kfifo_len(&priv->rx_kfifo); ++ if (!len) ++ return; ++ ++ buffer = kzalloc(len + 4 * 4, GFP_KERNEL); ++ if (!buffer) ++ return; ++ ++ if (priv->can_frame[0]) { ++ memcpy(buffer, priv->can_frame + 1, priv->can_frame[0]); ++ if (!kfifo_out(&priv->rx_kfifo, buffer + priv->can_frame[0], len)) ++ dev_err(priv->dev, "Kfifo_out error.\n"); ++ len += priv->can_frame[0]; ++ } else { ++ if (!kfifo_out(&priv->rx_kfifo, buffer, len)) ++ dev_err(priv->dev, "Kfifo_out error.\n"); ++ } ++ ++ no_rd_len = len; ++ do { ++ if (no_rd_len >= CAN_FRAM_MIN_IN_FIFO) { ++ rdout_len = phytium_get_frame_from_kfifo(ndev, buffer + (len - no_rd_len), ++ no_rd_len); ++ if (rdout_len == -1) { ++ priv->can_frame[0] = 0; ++ break; ++ } else if (!rdout_len) { ++ priv->can_frame[0] = no_rd_len; ++ memcpy(priv->can_frame + 1, ++ buffer + (len - no_rd_len), no_rd_len); ++ break; ++ } ++ ++ no_rd_len -= rdout_len; ++ if (!no_rd_len) { ++ /* clear unfinished data length stored in can_frame[0] */ ++ priv->can_frame[0] = 0; ++ break; ++ } ++ } else { ++ priv->can_frame[0] = no_rd_len; ++ memcpy(priv->can_frame + 1, ++ buffer + (len - no_rd_len), no_rd_len); ++ break; ++ } ++ } while (1); ++ ++ kfree(buffer); ++} ++ ++static void phytium_tx_interrupt(struct net_device *ndev, u32 isr) ++{ ++ struct phytium_can_priv *priv = netdev_priv(ndev); ++ struct net_device_stats *stats = &ndev->stats; ++ ++ while ((priv->tx_head - priv->tx_tail > 0) && ++ (isr & FTCAN_INTR_TEIS_MASK)) { ++ priv->set_reg_bits(priv, FTCAN_INTR, ++ FTCAN_INTR_TEIC_MASK | FTCAN_INTR_REIC_MASK); ++ can_get_echo_skb(ndev, priv->tx_tail % priv->tx_max); ++ priv->tx_tail++; ++ stats->tx_packets++; ++ isr = (priv->read_reg(priv, FTCAN_INTR) & ++ FTCAN_INTR_STATUS_MASK); ++ } ++ ++ priv->clr_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_XFER_MASK); ++ priv->clr_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_TXREQ_MASK); ++ priv->set_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_XFER_MASK); ++ can_led_event(ndev, CAN_LED_EVENT_TX); ++ netif_wake_queue(ndev); ++} ++ ++static irqreturn_t phytium_can_irq(int irq, void *dev_id) ++{ ++ struct net_device *ndev = (struct net_device *)dev_id; ++ struct phytium_can_priv *priv = netdev_priv(ndev); ++ u32 isr; ++ ++ /* Get the interrupt status from Phytium CAN */ ++ isr = (priv->read_reg(priv, FTCAN_INTR) & FTCAN_INTR_STATUS_MASK); ++ if (!isr) ++ return IRQ_NONE; ++ ++ /* Check for the type of error interrupt and Processing it */ ++ if (isr & (FTCAN_INTR_EIS_MASK | FTCAN_INTR_RFIS_MASK | ++ FTCAN_INTR_BOIS_MASK | FTCAN_INTR_PEIS_MASK)) { ++ if (isr & FTCAN_INTR_RFIS_MASK) { ++ priv->clr_reg_bits(priv, FTCAN_INTR, ++ FTCAN_INTR_EN); ++ priv->set_reg_bits(priv, FTCAN_INTR, ++ FTCAN_INTR_REIC_MASK | ++ FTCAN_INTR_TEIC_MASK); ++ } ++ ++ phytium_err_interrupt(ndev, isr); ++ ++ priv->set_reg_bits(priv, FTCAN_INTR, ++ (FTCAN_INTR_EIC_MASK | FTCAN_INTR_RFIC_MASK | ++ FTCAN_INTR_BOIC_MASK | FTCAN_INTR_PEIC_MASK)); ++ priv->set_reg_bits(priv, FTCAN_INTR, FTCAN_INTR_EN); ++ return IRQ_HANDLED; ++ } ++ ++ if ((isr & FTCAN_INTR_TEIS_MASK)) { ++ isr &= (~FTCAN_INTR_REIS_MASK); ++ phytium_tx_interrupt(ndev, isr); ++ } ++ ++ if (isr & (FTCAN_INTR_REIS_MASK)) { ++ priv->clr_reg_bits(priv, FTCAN_INTR, ++ FTCAN_INTR_REIE_MASK); ++ phytium_can_rx(ndev); ++ priv->set_reg_bits(priv, FTCAN_INTR, FTCAN_INTR_REIC_MASK); ++ priv->set_reg_bits(priv, FTCAN_INTR, ++ FTCAN_INTR_REIE_MASK); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static void phytium_can_stop(struct net_device *ndev) ++{ ++ struct phytium_can_priv *priv = netdev_priv(ndev); ++ u32 ier, data_cnt, i; ++ ++ /* Disable interrupts and leave the can in configuration mode */ ++ ier = (FTCAN_INTR_DIS & FTCAN_INTR_EN_MASK); ++ priv->clr_reg_bits(priv, FTCAN_INTR, ier); ++ ++ priv = netdev_priv(ndev); ++ ++ data_cnt = priv->read_reg(priv, FTCAN_FIFO_CNT); ++ data_cnt &= 0x7F; ++ for (i = 0; i < data_cnt; ++i) ++ priv->read_reg(priv, FTCAN_RX_FIFO); ++ ++ memset(priv->can_frame, 0, sizeof(priv->can_frame)); ++ priv->is_kfifo_full_err = false; ++ kfifo_reset(&priv->rx_kfifo); ++ /* Disable Transfer */ ++ priv->clr_reg_bits(priv, FTCAN_CTRL, FTCAN_CTRL_XFER_MASK); ++ priv->can.state = CAN_STATE_STOPPED; ++} ++ ++static int phytium_can_open(struct net_device *ndev) ++{ ++ struct phytium_can_priv *priv = netdev_priv(ndev); ++ int ret; ++ ++ ret = request_irq(ndev->irq, phytium_can_irq, priv->irq_flags, ++ ndev->name, ndev); ++ if (ret < 0) { ++ netdev_err(ndev, "irq allocation for CAN failed\n"); ++ goto err; ++ } ++ ++ ret = open_candev(ndev); ++ if (ret) ++ goto err_irq; ++ ++ ret = phytium_can_start(ndev); ++ if (ret < 0) { ++ netdev_err(ndev, "failed to start!\n"); ++ goto err_candev; ++ } ++ ++ can_led_event(ndev, CAN_LED_EVENT_OPEN); ++ ++ netif_start_queue(ndev); ++ ++ return 0; ++ ++err_candev: ++ close_candev(ndev); ++err_irq: ++ free_irq(ndev->irq, ndev); ++err: ++ return ret; ++} ++ ++static int phytium_can_close(struct net_device *ndev) ++{ ++ netif_stop_queue(ndev); ++ phytium_can_stop(ndev); ++ free_irq(ndev->irq, ndev); ++ close_candev(ndev); ++ can_led_event(ndev, CAN_LED_EVENT_STOP); ++ ++ return 0; ++} ++ ++static int phytium_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) ++{ ++ struct phytium_can_priv *priv = netdev_priv(ndev); ++ ++ bec->rxerr = priv->read_reg(priv, FTCAN_ERR_CNT) & FTCAN_ERR_CNT_RFN_MASK; ++ bec->txerr = ((priv->read_reg(priv, FTCAN_ERR_CNT) & ++ FTCAN_ERR_CNT_TFN_MASK) >> FTCAN_ERR_CNT_TFN_SHIFT); ++ ++ return 0; ++} ++ ++static const struct net_device_ops phytium_can_netdev_ops = { ++ .ndo_open = phytium_can_open, ++ .ndo_stop = phytium_can_close, ++ .ndo_start_xmit = phytium_can_start_xmit, ++ .ndo_change_mtu = can_change_mtu, ++}; ++ ++void register_phytium_can(struct phytium_can_priv *priv) ++{ ++ int ret; ++ ++ priv->can.do_set_mode = phytium_do_set_mode; ++ priv->can.do_get_berr_counter = phytium_get_berr_counter; ++ ++ priv->ndev->netdev_ops = &phytium_can_netdev_ops; ++ ++ priv->write_reg = phytium_write_reg; ++ priv->read_reg = phytium_read_reg; ++ priv->set_reg_bits = phytium_set_reg_bits; ++ priv->clr_reg_bits = phytium_clr_reg_bits; ++ ++ if (kfifo_alloc(&priv->rx_kfifo, KFIFO_LEN, GFP_KERNEL)) { ++ dev_err(priv->dev, "failed to allocate kfifo\n"); ++ goto err; ++ } ++ ++ INIT_DELAYED_WORK(&priv->can_frame_work, phytium_poll_kfifo); ++ ++ ret = register_candev(priv->ndev); ++ if (ret) { ++ dev_err(priv->dev, "fail to register failed (err=%d)\n", ret); ++ goto err; ++ } ++ ++ devm_can_led_init(priv->ndev); ++ netdev_dbg(priv->ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n", ++ priv->reg_base, priv->ndev->irq, priv->can.clock.freq, priv->tx_max); ++ return; ++err: ++ free_candev(priv->ndev); ++} ++EXPORT_SYMBOL_GPL(register_phytium_can); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Cheng Quan "); ++MODULE_DESCRIPTION("Core driver for Phytium CAN controller"); +diff --git a/drivers/net/can/phytium_can.h b/drivers/net/can/phytium_can.h +new file mode 100644 +index 000000000000..f1d9103690ee +--- /dev/null ++++ b/drivers/net/can/phytium_can.h +@@ -0,0 +1,156 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* CAN bus driver for Phytium CAN controller. ++ * ++ * Copyright (C) 2021, Phytium Technology Co.,Ltd. ++ */ ++ ++#ifndef PHYTIUM_CAN_H ++#define PHYTIUM_CAN_H ++ ++enum phytium_can_reg { ++ FTCAN_CTRL = 0x00, /* Global control register */ ++ FTCAN_INTR = 0x04, /* Interrupt register */ ++ FTCAN_ARB_RATE_CTRL = 0x08, /* Arbitration rate control register */ ++ FTCAN_DAT_RATE_CTRL = 0x0C, /* Data rate control register */ ++ FTCAN_ACC_ID0 = 0x10, /* Acceptance identifier0 register */ ++ FTCAN_ACC_ID1 = 0x14, /* Acceptance identifier1 register */ ++ FTCAN_ACC_ID2 = 0x18, /* Acceptance identifier2 register */ ++ FTCAN_ACC_ID3 = 0x1C, /* Acceptance identifier3 register */ ++ FTCAN_ACC_ID0_MASK = 0x20, /* Acceptance identifier0 mask register */ ++ FTCAN_ACC_ID1_MASK = 0x24, /* Acceptance identifier1 mask register */ ++ FTCAN_ACC_ID2_MASK = 0x28, /* Acceptance identifier2 mask register */ ++ FTCAN_ACC_ID3_MASK = 0x2C, /* Acceptance identifier3 mask register */ ++ FTCAN_XFER_STS = 0x30, /* Transfer status register */ ++ FTCAN_ERR_CNT = 0x34, /* Error counter register */ ++ FTCAN_FIFO_CNT = 0x38, /* FIFO counter register */ ++ FTCAN_DMA_CTRL = 0x3C, /* DMA request control register */ ++ FTCAN_TX_FIFO = 0x100, /* TX FIFO shadow register */ ++ FTCAN_RX_FIFO = 0x200, /* RX FIFO shadow register */ ++}; ++ ++/* FTCAN_CTRL mask */ ++#define FTCAN_CTRL_XFER_MASK (0x1 << 0) /* Transfer enable */ ++#define FTCAN_CTRL_TXREQ_MASK (0x1 << 1) /* Transmit request */ ++#define FTCAN_CTRL_AIME_MASK (0x1 << 2) /* Acceptance identifier mask enable */ ++ ++/* FTCAN_INTR mask */ ++#define FTCAN_INTR_STATUS_MASK (0xFF << 0) /* the interrupt status */ ++#define FTCAN_INTR_BOIS_MASK (0x1 << 0) /* Bus off interrupt status */ ++#define FTCAN_INTR_PWIS_MASK (0x1 << 1) /* Passive warning interrupt status */ ++#define FTCAN_INTR_PEIS_MASK (0x1 << 2) /* Passive error interrupt status */ ++#define FTCAN_INTR_RFIS_MASK (0x1 << 3) /* RX FIFO full interrupt status */ ++#define FTCAN_INTR_TFIS_MASK (0x1 << 4) /* TX FIFO empty interrupt status */ ++#define FTCAN_INTR_REIS_MASK (0x1 << 5) /* RX frame end interrupt status */ ++#define FTCAN_INTR_TEIS_MASK (0x1 << 6) /* TX frame end interrupt status */ ++#define FTCAN_INTR_EIS_MASK (0x1 << 7) /* Error interrupt status */ ++ ++#define FTCAN_INTR_EN_MASK (0xFF << 8) /* the interrupt enable */ ++#define FTCAN_INTR_BOIE_MASK (0x1 << 8) /* Bus off interrupt enable */ ++#define FTCAN_INTR_PWIE_MASK (0x1 << 9) /* Passive warning interrupt enable */ ++#define FTCAN_INTR_PEIE_MASK (0x1 << 10) /* Passive error interrupt enable */ ++#define FTCAN_INTR_RFIE_MASK (0x1 << 11) /* RX FIFO full interrupt enable */ ++#define FTCAN_INTR_TFIE_MASK (0x1 << 12) /* TX FIFO empty interrupt enable */ ++#define FTCAN_INTR_REIE_MASK (0x1 << 13) /* RX frame end interrupt enable */ ++#define FTCAN_INTR_TEIE_MASK (0x1 << 14) /* TX frame end interrupt enable */ ++#define FTCAN_INTR_EIE_MASK (0x1 << 15) /* Error interrupt enable */ ++ ++#define FTCAN_INTR_BOIC_MASK (0x1 << 16) /* Bus off interrupt clear */ ++#define FTCAN_INTR_PWIC_MASK (0x1 << 17) /* Passive warning interrupt clear */ ++#define FTCAN_INTR_PEIC_MASK (0x1 << 18) /* Passive error interrupt clear */ ++#define FTCAN_INTR_RFIC_MASK (0x1 << 19) /* RX FIFO full interrupt clear */ ++#define FTCAN_INTR_TFIC_MASK (0x1 << 20) /* TX FIFO empty interrupt clear */ ++#define FTCAN_INTR_REIC_MASK (0x1 << 21) /* RX frame end interrupt clear */ ++#define FTCAN_INTR_TEIC_MASK (0x1 << 22) /* TX frame end interrupt clear */ ++#define FTCAN_INTR_EIC_MASK (0x1 << 23) /* Error interrupt clear */ ++ ++#define FTCAN_XFER_XFERS_MASK (0x1 << 10) /* Transfer status, 1:idle,0:busy */ ++#define FTCAN_XFER_FRAS_MASK (0x7) /* frame status */ ++#define FTCAN_XFER_OVERLOAD_FRAM 0x3 ++ ++/* FTCAN_ACC_ID(0-3)_MASK mask */ ++#define FTCAN_ACC_IDN_MASK 0x1FFFFFFF ++ ++/* FTCAN_ERR_CNT_OFFSET mask */ ++#define FTCAN_ERR_CNT_RFN_MASK (0xFF << 0) /* Receive error counter */ ++#define FTCAN_ERR_CNT_TFN_MASK (0xFF << 16) /* Transmit error counter */ ++ ++/* FTCAN_FIFO_CNT_OFFSET mask */ ++#define FTCAN_FIFO_CNT_RFN_MASK (0xFF << 0) /* Receive FIFO valid data number */ ++#define FTCAN_FIFO_CNT_TFN_MASK (0xFF << 16)/* Transmit FIFO valid data number */ ++ ++#define FTCAN_ERR_CNT_TFN_SHIFT 16 /* Tx Error Count shift */ ++#define FTCAN_FIFO_CNT_TFN_SHIFT 16 /* Tx FIFO Count shift */ ++#define FTCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */ ++#define FTCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */ ++#define FTCAN_IDR_SDLC_SHIFT 14 ++#define FTCAN_IDR_EDLC_SHIFT 26 ++ ++#define FTCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */ ++#define FTCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */ ++#define FTCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */ ++#define FTCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */ ++#define FTCAN_IDR_RTR_MASK 0x00000001 /* Extended frames remote TX request */ ++#define FTCAN_IDR_DLC_MASK 0x0003C000 /* Standard msg dlc */ ++#define FTCAN_IDR_PAD_MASK 0x00003FFF /* Standard msg padding 1 */ ++ ++#define FTCAN_INTR_EN (FTCAN_INTR_TEIE_MASK | \ ++ FTCAN_INTR_REIE_MASK | \ ++ FTCAN_INTR_RFIE_MASK) ++ ++#define FTCAN_INTR_DIS 0x00000000 ++#define FTCAN_NAPI_WEIGHT 64 ++ ++#define KFIFO_LEN 4096 ++#define CAN_FRAM_MIN_IN_FIFO 4 ++ ++/* struct phytium_can_priv - This definition define CAN driver instance ++ * @can: CAN private data structure. ++ * @rx_kfifo: Received frame FIFO ++ * @can_frame_work: Poll data from kfifo ++ * @can_clk: Pointer to struct clk ++ * @tx_head: Tx CAN packets ready to send on the queue ++ * @tx_tail: Tx CAN packets successfully sended on the queue ++ * @tx_max: Maximum number packets the driver can send ++ * @read_reg: For reading data from CAN registers ++ * @write_reg: For writing data to CAN registers ++ * @set_reg_bits: For writing data to CAN registers bit ++ * @clr_reg_bits: For writing 0 to CAN registers bit ++ * @dev: Device data structure ++ * @ndev: Network device data structure ++ * @reg_base: Ioremapped address to registers ++ * @irq_flags: For request_irq() ++ * @lock: The spin lock flag ++ * @isr: The interrupt status ++ * @can_frame: Store unfinished data frame ++ * @is_kfifo_full_err: Full flag for kfifo ++ */ ++struct phytium_can_priv { ++ struct can_priv can; ++ struct kfifo rx_kfifo; ++ struct delayed_work can_frame_work; ++ struct clk *can_clk; ++ ++ unsigned int tx_head; ++ unsigned int tx_tail; ++ unsigned int tx_max; ++ ++ u32 (*read_reg)(const struct phytium_can_priv *priv, enum phytium_can_reg reg); ++ void (*write_reg)(const struct phytium_can_priv *priv, enum phytium_can_reg reg, u32 val); ++ void (*set_reg_bits)(const struct phytium_can_priv *priv, enum phytium_can_reg reg, u32 bs); ++ void (*clr_reg_bits)(const struct phytium_can_priv *priv, enum phytium_can_reg reg, u32 bs); ++ ++ struct device *dev; ++ struct net_device *ndev; ++ ++ void __iomem *reg_base; ++ unsigned long irq_flags; ++ spinlock_t lock; /* lock for tx */ ++ ++ u32 isr; ++ u32 can_frame[4]; ++ u32 is_kfifo_full_err; ++}; ++ ++void register_phytium_can(struct phytium_can_priv *priv); ++ ++#endif /* PHYTIUM_CAN_H */ +diff --git a/drivers/net/can/phytium_can_pci.c b/drivers/net/can/phytium_can_pci.c +new file mode 100644 +index 000000000000..87c99a701680 +--- /dev/null ++++ b/drivers/net/can/phytium_can_pci.c +@@ -0,0 +1,107 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* PCI CAN bus driver for Phytium CAN controller ++ * ++ * Copyright (C) 2021, Phytium Technology Co., Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "phytium_can.h" ++ ++#define DRV_NAME "phytium_can_pci" ++ ++#define TX_MAX 64 ++#define CLK_FREQ 480000000 ++ ++static const struct can_bittiming_const phytium_ext_bittiming_const = { ++ .name = "phytium_can_ext", ++ .tseg1_min = 1, ++ .tseg1_max = 16, ++ .tseg2_min = 1, ++ .tseg2_max = 8, ++ .sjw_max = 4, ++ .brp_min = 1, ++ .brp_max = 8192, ++ .brp_inc = 2, ++}; ++ ++static int phytium_can_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) ++{ ++ struct net_device *ndev; ++ struct phytium_can_priv *priv; ++ int ret; ++ ++ ret = pcim_enable_device(pdev); ++ if (ret) ++ return ret; ++ ++ ret = pcim_iomap_regions(pdev, 0x1, pci_name(pdev)); ++ if (ret) ++ return ret; ++ ++ ndev = alloc_candev(sizeof(struct phytium_can_priv), TX_MAX); ++ if (!ndev) ++ return -ENOMEM; ++ ++ priv = netdev_priv(ndev); ++ priv->dev = &pdev->dev; ++ ++ priv->reg_base = pcim_iomap_table(pdev)[0]; ++ ++ priv->tx_head = 0; ++ priv->tx_tail = 0; ++ priv->tx_max = TX_MAX; ++ priv->ndev = ndev; ++ priv->is_kfifo_full_err = false; ++ priv->can.bittiming_const = &phytium_ext_bittiming_const; ++ ++ ndev->irq = pdev->irq; ++ ndev->flags |= IFF_ECHO; /* We support local echo */ ++ priv->irq_flags = IRQF_SHARED; ++ ++ spin_lock_init(&priv->lock); ++ ++ pci_set_drvdata(pdev, ndev); ++ SET_NETDEV_DEV(ndev, &pdev->dev); ++ ++ priv->can.clock.freq = CLK_FREQ; ++ ++ register_phytium_can(priv); ++ ++ return ret; ++} ++ ++static void phytium_can_pci_remove(struct pci_dev *pdev) ++{ ++ struct net_device *ndev = pci_get_drvdata(pdev); ++ struct phytium_can_priv *priv = netdev_priv(ndev); ++ ++ kfifo_free(&priv->rx_kfifo); ++ unregister_candev(ndev); ++ free_candev(ndev); ++} ++ ++static const struct pci_device_id phytium_pci_id_table[] = { ++ { PCI_VDEVICE(PHYTIUM, 0xdc2d) }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(pci, phytium_pci_id_table); ++ ++static struct pci_driver phytium_can_pci_driver = { ++ .name = DRV_NAME, ++ .id_table = phytium_pci_id_table, ++ .probe = phytium_can_pci_probe, ++ .remove = phytium_can_pci_remove, ++}; ++ ++module_pci_driver(phytium_can_pci_driver); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Phytium can controller driver"); ++MODULE_AUTHOR("Cheng Quan "); +diff --git a/drivers/net/can/phytium_can_plat.c b/drivers/net/can/phytium_can_plat.c +new file mode 100644 +index 000000000000..9bf2d533ddf6 +--- /dev/null ++++ b/drivers/net/can/phytium_can_plat.c +@@ -0,0 +1,175 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* Platform CAN bus driver for Phytium CAN controller ++ * ++ * Copyright (C) 2018-2021, Phytium Technology Co., Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "phytium_can.h" ++ ++#define DRV_NAME "phytium_can_plat" ++ ++static const struct can_bittiming_const phytium_bittiming_const = { ++ .name = "phytium_can", ++ .tseg1_min = 1, ++ .tseg1_max = 16, ++ .tseg2_min = 1, ++ .tseg2_max = 8, ++ .sjw_max = 4, ++ .brp_min = 1, ++ .brp_max = 512, ++ .brp_inc = 2, ++}; ++ ++static const struct can_bittiming_const phytium_ext_bittiming_const = { ++ .name = "phytium_can_ext", ++ .tseg1_min = 1, ++ .tseg1_max = 16, ++ .tseg2_min = 1, ++ .tseg2_max = 8, ++ .sjw_max = 4, ++ .brp_min = 1, ++ .brp_max = 8192, ++ .brp_inc = 2, ++}; ++ ++static int phytium_can_probe(struct platform_device *pdev) ++{ ++ struct resource *res; ++ struct net_device *ndev; ++ struct phytium_can_priv *priv; ++ u32 tx_max; ++ struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev); ++ int ret; ++ ++ ret = fwnode_property_read_u32(fwnode, "tx-fifo-depth", &tx_max); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "tx-fifo-depth get error.\n"); ++ goto err; ++ } ++ ++ ndev = alloc_candev(sizeof(struct phytium_can_priv), tx_max); ++ if (!ndev) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ ++ priv = netdev_priv(ndev); ++ priv->dev = &pdev->dev; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ priv->reg_base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(priv->reg_base)) ++ return PTR_ERR(priv->reg_base); ++ ++ priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING; ++ priv->tx_head = 0; ++ priv->tx_tail = 0; ++ priv->tx_max = tx_max; ++ priv->ndev = ndev; ++ priv->is_kfifo_full_err = false; ++ ++ if (fwnode_property_present(fwnode, "extend_brp")) ++ priv->can.bittiming_const = &phytium_ext_bittiming_const; ++ else ++ priv->can.bittiming_const = &phytium_bittiming_const; ++ ++ ndev->irq = platform_get_irq(pdev, 0); ++ ndev->flags |= IFF_ECHO; /* We support local echo */ ++ priv->irq_flags = IRQF_SHARED; ++ ++ spin_lock_init(&priv->lock); ++ ++ platform_set_drvdata(pdev, ndev); ++ SET_NETDEV_DEV(ndev, &pdev->dev); ++ ++ /* Getting the CAN can_clk info */ ++ if (pdev->dev.of_node) { ++ priv->can_clk = devm_clk_get(&pdev->dev, "phytium_can_clk"); ++ if (IS_ERR(priv->can_clk)) { ++ dev_err(&pdev->dev, "Device clock not found.\n"); ++ ret = PTR_ERR(priv->can_clk); ++ goto free; ++ } ++ ++ ret = clk_prepare_enable(priv->can_clk); ++ if (ret) ++ goto free; ++ ++ priv->can.clock.freq = clk_get_rate(priv->can_clk); ++ } else if (has_acpi_companion(&pdev->dev)) { ++ ret = fwnode_property_read_u32(fwnode, "clock-frequency", ++ &priv->can.clock.freq); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "clock frequency get error.\n"); ++ goto free; ++ } ++ } ++ ++ register_phytium_can(priv); ++ ++ return 0; ++ ++free: ++ free_candev(ndev); ++err: ++ return ret; ++} ++ ++static int phytium_can_remove(struct platform_device *pdev) ++{ ++ struct net_device *ndev = platform_get_drvdata(pdev); ++ struct phytium_can_priv *priv = netdev_priv(ndev); ++ ++ kfifo_free(&priv->rx_kfifo); ++ unregister_candev(ndev); ++ free_candev(ndev); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_ACPI ++static const struct acpi_device_id phytium_can_acpi_match[] = { ++ { "PHYT000A", 0 }, ++ {} ++}; ++MODULE_DEVICE_TABLE(acpi, phytium_can_acpi_match); ++#endif ++ ++/* Match table for OF platform binding */ ++static const struct of_device_id phytium_can_of_ids[] = { ++ { .compatible = "phytium,can", }, ++ { /* end of list */ }, ++}; ++MODULE_DEVICE_TABLE(of, phytium_can_of_ids); ++ ++static struct platform_driver phytium_can_driver = { ++ .probe = phytium_can_probe, ++ .remove = phytium_can_remove, ++ .driver = { ++ .name = DRV_NAME, ++ .of_match_table = phytium_can_of_ids, ++ .acpi_match_table = ACPI_PTR(phytium_can_acpi_match), ++ }, ++}; ++ ++module_platform_driver(phytium_can_driver); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Cheng Quan "); ++MODULE_DESCRIPTION("Platform CAN bus driver for Phytium CAN Controller"); +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +index fad503820e04..8241d670f6a1 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +@@ -9,6 +9,7 @@ + * warranty of any kind, whether express or implied. + */ + ++#include + #include + #include + #include +@@ -32,6 +33,12 @@ static int dwmac_generic_probe(struct platform_device *pdev) + dev_err(&pdev->dev, "dt configuration failed\n"); + return PTR_ERR(plat_dat); + } ++ } else if (has_acpi_companion(&pdev->dev)) { ++ plat_dat = stmmac_probe_config_acpi(pdev, &stmmac_res.mac); ++ if (!plat_dat) { ++ dev_err(&pdev->dev, "acpi configuration failed\n"); ++ return -EINVAL; ++ } + } else { + plat_dat = dev_get_platdata(&pdev->dev); + if (!plat_dat) { +@@ -84,6 +91,17 @@ static const struct of_device_id dwmac_generic_match[] = { + }; + MODULE_DEVICE_TABLE(of, dwmac_generic_match); + ++#ifdef CONFIG_ACPI ++static const struct acpi_device_id dwmac_acpi_ids[] = { ++ { .id = "PHYT0004" }, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(acpi, dwmac_acpi_ids); ++#else ++#define dwmac_acpi_ids NULL ++#endif ++ + static struct platform_driver dwmac_generic_driver = { + .probe = dwmac_generic_probe, + .remove = stmmac_pltfr_remove, +@@ -91,6 +109,7 @@ static struct platform_driver dwmac_generic_driver = { + .name = STMMAC_RESOURCE_NAME, + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = of_match_ptr(dwmac_generic_match), ++ .acpi_match_table = ACPI_PTR(dwmac_acpi_ids), + }, + }; + module_platform_driver(dwmac_generic_driver); +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 75896d6ba6e2..0ddcfa4fdce7 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -24,6 +24,7 @@ + https://bugzilla.stlinux.com/ + *******************************************************************************/ + ++#include + #include + #include + #include +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +index 2b800ce1d5bf..a4485ea0d2b1 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +@@ -18,6 +18,9 @@ + Author: Giuseppe Cavallaro + *******************************************************************************/ + ++#include ++#include ++#include + #include + #include + #include +@@ -607,6 +610,248 @@ void stmmac_remove_config_dt(struct platform_device *pdev, + EXPORT_SYMBOL_GPL(stmmac_probe_config_dt); + EXPORT_SYMBOL_GPL(stmmac_remove_config_dt); + ++#ifdef CONFIG_ACPI ++/* ++ * Parse ACPI _DSD to setup AXI register ++ */ ++static struct stmmac_axi * stmmac_axi_setup_acpi(struct platform_device *pdev) ++{ ++ struct fwnode_handle *np = dev_fwnode(&(pdev->dev)); ++ struct stmmac_axi * axi; ++ ++ axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL); ++ if (!axi) ++ return ERR_PTR(-ENOMEM); ++ ++ axi->axi_lpi_en = fwnode_property_read_bool(np, "snps,lpi_en"); ++ axi->axi_xit_frm = fwnode_property_read_bool(np, "snps,xit_frm"); ++ axi->axi_kbbe = fwnode_property_read_bool(np, "snps,axi_kbbe"); ++ axi->axi_fb = fwnode_property_read_bool(np, "snps,axi_fb"); ++ axi->axi_mb = fwnode_property_read_bool(np, "snps,axi_mb"); ++ axi->axi_rb = fwnode_property_read_bool(np, "snps,axi_rb"); ++ ++ if (fwnode_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt)) ++ axi->axi_wr_osr_lmt = 1; ++ if (fwnode_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt)) ++ axi->axi_rd_osr_lmt = 1; ++ fwnode_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN); ++ ++ return axi; ++} ++ ++/** ++ * Parse ACPI _DSD parameters for multiple queues configuration ++ */ ++static void stmmac_mtl_setup_acpi(struct platform_device *pdev, ++ struct plat_stmmacenet_data *plat) ++{ ++ plat->rx_queues_to_use = 1; ++ plat->tx_queues_to_use = 1; ++ ++ /** ++ * First Queue must always be in DCB mode. As MTL_QUEUE_DCB=1 we need ++ * to always set this, otherwise Queue will be classified as AVB ++ * (because MTL_QUEUE_AVB = 0). ++ */ ++ plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; ++ plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; ++ ++ plat->rx_queues_cfg[0].use_prio = true; ++ ++ plat->rx_queues_cfg[0].pkt_route = 0x0; ++ ++ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; ++ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; ++ ++ plat->tx_queues_cfg[0].use_prio = true; ++} ++ ++static int stmmac_acpi_phy(struct plat_stmmacenet_data *plat, ++ struct fwnode_handle *np, struct device *dev) ++{ ++ plat->mdio_bus_data = devm_kzalloc(dev, ++ sizeof(struct stmmac_mdio_bus_data), ++ GFP_KERNEL); ++ ++ return 0; ++} ++ ++int fw_get_phy_mode(struct fwnode_handle *np) ++{ ++ const char *pm; ++ int err, i; ++ ++ err = fwnode_property_read_string(np, "phy-mode", &pm); ++ if (err < 0) ++ err = fwnode_property_read_string(np, "phy-connection-mode", &pm); ++ if (err < 0) ++ return err; ++ ++ for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) { ++ if (!strcasecmp(pm, phy_modes(i))) ++ return i; ++ } ++ ++ return -ENODEV; ++} ++ ++int stmmac_acpi_clock_setup(struct plat_stmmacenet_data *plat, ++ struct platform_device *pdev) ++{ ++ struct fwnode_handle *np = dev_fwnode(&(pdev->dev)); ++ struct device * dev = &pdev->dev; ++ struct clk *clk = ERR_PTR(-ENODEV); ++ u64 clk_freq = 0; ++ int err; ++ ++ err = fwnode_property_read_u64(np, "clock-frequency", &clk_freq); ++ if (err < 0) ++ clk_freq = 125000000; /* default to 125MHz */ ++ ++ plat->stmmac_clk = devm_clk_get(dev, dev_name(dev)); ++ if (IS_ERR(plat->stmmac_clk)) { ++ clk = clk_register_fixed_rate(dev, dev_name(dev), NULL, 0, clk_freq); ++ if (IS_ERR(clk)) ++ return -1; ++ if (clk_register_clkdev(clk, dev_name(dev), dev_name(dev))) ++ return -1; ++ plat->stmmac_clk = clk; ++ } ++ clk_prepare_enable(plat->stmmac_clk); ++ ++ plat->pclk = devm_clk_get(dev, "pclk"); ++ if (IS_ERR(plat->pclk)) ++ plat->pclk = NULL; ++ clk_prepare_enable(plat->pclk); ++ ++ plat->clk_ptp_ref = devm_clk_get(dev, "ptp_ref"); ++ if (IS_ERR(plat->clk_ptp_ref)) { ++ plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); ++ plat->clk_ptp_ref = NULL; ++ } ++ ++ plat->stmmac_rst = devm_reset_control_get(dev,STMMAC_RESOURCE_NAME); ++ if (IS_ERR(plat->stmmac_rst)) { ++ dev_info(dev, "no reset control found\n"); ++ plat->stmmac_rst = NULL; ++ } ++ ++ return 0; ++} ++ ++/** ++ * Parse ACPI driver parameters ++ */ ++struct plat_stmmacenet_data * ++stmmac_probe_config_acpi(struct platform_device *pdev, const char **mac) ++{ ++ struct fwnode_handle *np; ++ struct plat_stmmacenet_data *plat; ++ struct stmmac_dma_cfg *dma_cfg; ++ ++ plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); ++ if (!plat) ++ return ERR_PTR(-ENOMEM); ++ ++ np = dev_fwnode(&(pdev->dev)); ++ ++ plat->interface = fw_get_phy_mode(np); ++ ++ /* Get max speed of operation from device tree */ ++ if (fwnode_property_read_u32(np, "max-speed", &plat->max_speed)) ++ plat->max_speed = -1; ++ ++ if (fwnode_property_read_u32(np, "bus_id", &plat->bus_id)) ++ plat->bus_id = 2; ++ ++ /* Default to PHY auto-detection */ ++ plat->phy_addr = -1; ++ ++ /* "snps,phy-addr" is not a standard property. Mark it as deprecated ++ * and warn of its use. Remove this when PHY node support is added. ++ */ ++ if (fwnode_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) ++ dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); ++ ++ if (stmmac_acpi_phy(plat, np, &pdev->dev)) ++ return ERR_PTR(-ENODEV); ++ ++ fwnode_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); ++ fwnode_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size); ++ if (plat->tx_fifo_size == 0) ++ plat->tx_fifo_size = 0x10000; ++ if (plat->rx_fifo_size == 0) ++ plat->rx_fifo_size = 0x10000; ++ ++ plat->force_sf_dma_mode = ++ fwnode_property_read_bool(np, "snps,force_sf_dma_mode"); ++ plat->en_tx_lpi_clockgating = ++ fwnode_property_read_bool(np, "snps,en-tx-lpi-clockgating"); ++ ++ /* Set the maxmtu to a default of JUMBO_LEN in case the ++ * parameter is not present. ++ */ ++ plat->maxmtu = JUMBO_LEN; ++ ++ /* Set default value for multicast hash bins */ ++ plat->multicast_filter_bins = HASH_TABLE_SIZE; ++ ++ /* Set default value for unicast filter entries */ ++ plat->unicast_filter_entries = 1; ++ ++ /* Only to "snps,dwmac" */ ++ fwnode_property_read_u32(np, "max-frame-size", &plat->maxmtu); ++ fwnode_property_read_u32(np, "snps,multicast-filter-bins", ++ &plat->multicast_filter_bins); ++ fwnode_property_read_u32(np, "snps,perfect-filter-entries", ++ &plat->unicast_filter_entries); ++ plat->unicast_filter_entries = dwmac1000_validate_ucast_entries( ++ plat->unicast_filter_entries); ++ plat->multicast_filter_bins = dwmac1000_validate_mcast_bins( ++ plat->multicast_filter_bins); ++ plat->has_gmac = 1; ++ plat->pmt = 1; ++ ++ dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); ++ if (!dma_cfg) ++ return ERR_PTR(-ENOMEM); ++ plat->dma_cfg = dma_cfg; ++ ++ fwnode_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); ++ if (!dma_cfg->pbl) ++ dma_cfg->pbl = DEFAULT_DMA_PBL; ++ ++ fwnode_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl); ++ fwnode_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl); ++ dma_cfg->pblx8 = !fwnode_property_read_bool(np, "snps,no-pbl-x8"); ++ ++ dma_cfg->aal = fwnode_property_read_bool(np, "snps,aal"); ++ dma_cfg->fixed_burst = fwnode_property_read_bool(np, "snps,fixed-burst"); ++ dma_cfg->mixed_burst = fwnode_property_read_bool(np, "snps,mixed-burst"); ++ ++ plat->force_thresh_dma_mode = fwnode_property_read_bool(np, "snps,force_thresh_dma_mode"); ++ if (plat->force_thresh_dma_mode) ++ plat->force_sf_dma_mode = 0; ++ ++ fwnode_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed); ++ ++ plat->axi = stmmac_axi_setup_acpi(pdev); ++ ++ stmmac_mtl_setup_acpi(pdev, plat); ++ ++ stmmac_acpi_clock_setup(plat,pdev); ++ ++ return plat; ++} ++#else ++struct plat_stmmacenet_data * ++stmmac_probe_config_acpi(struct platform_device *pdev, const char **mac) ++{ ++ return ERR_PTR(-EINVAL); ++} ++#endif /* CONFIG_ACPI */ ++EXPORT_SYMBOL_GPL(stmmac_probe_config_acpi); ++ + int stmmac_get_platform_resources(struct platform_device *pdev, + struct stmmac_resources *stmmac_res) + { +@@ -617,33 +862,43 @@ int stmmac_get_platform_resources(struct platform_device *pdev, + /* Get IRQ information early to have an ability to ask for deferred + * probe if needed before we went too far with resource allocation. + */ +- stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); +- if (stmmac_res->irq < 0) { +- if (stmmac_res->irq != -EPROBE_DEFER) { +- dev_err(&pdev->dev, +- "MAC IRQ configuration information not found\n"); ++ if (pdev->dev.of_node) { ++ stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); ++ if (stmmac_res->irq < 0) { ++ if (stmmac_res->irq != -EPROBE_DEFER) { ++ dev_err(&pdev->dev, ++ "MAC IRQ configuration information not found\n"); ++ } ++ return stmmac_res->irq; + } +- return stmmac_res->irq; +- } + +- /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq +- * The external wake up irq can be passed through the platform code +- * named as "eth_wake_irq" +- * +- * In case the wake up interrupt is not passed from the platform +- * so the driver will continue to use the mac irq (ndev->irq) +- */ +- stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); +- if (stmmac_res->wol_irq < 0) { +- if (stmmac_res->wol_irq == -EPROBE_DEFER) ++ /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq ++ * The external wake up irq can be passed through the platform code ++ * named as "eth_wake_irq" ++ * ++ * In case the wake up interrupt is not passed from the platform ++ * so the driver will continue to use the mac irq (ndev->irq) ++ */ ++ stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); ++ if (stmmac_res->wol_irq < 0) { ++ if (stmmac_res->wol_irq == -EPROBE_DEFER) ++ return -EPROBE_DEFER; ++ stmmac_res->wol_irq = stmmac_res->irq; ++ } ++ ++ stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); ++ if (stmmac_res->lpi_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; ++ } else if (has_acpi_companion(&pdev->dev)) { ++ stmmac_res->irq = platform_get_irq(pdev, 0); ++ if (stmmac_res->irq < 0) ++ dev_err(&pdev->dev, ++ "MAC IRQ configuration information not found\n"); ++ + stmmac_res->wol_irq = stmmac_res->irq; ++ stmmac_res->lpi_irq = -1; + } + +- stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); +- if (stmmac_res->lpi_irq == -EPROBE_DEFER) +- return -EPROBE_DEFER; +- + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res); + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +index b72eb0de57b7..8e117ad0e42a 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +@@ -23,6 +23,8 @@ + + struct plat_stmmacenet_data * + stmmac_probe_config_dt(struct platform_device *pdev, const char **mac); ++struct plat_stmmacenet_data * ++stmmac_probe_config_acpi(struct platform_device *pdev, const char **mac); + void stmmac_remove_config_dt(struct platform_device *pdev, + struct plat_stmmacenet_data *plat); + +diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig +index 028b287466fb..a1a024fe78e3 100644 +--- a/drivers/pci/controller/Kconfig ++++ b/drivers/pci/controller/Kconfig +@@ -278,5 +278,15 @@ config VMD + To compile this driver as a module, choose M here: the + module will be called vmd. + ++config PCIE_PHYTIUM_EP ++ tristate "Phytium PCIe endpoint controller" ++ depends on OF ++ depends on PCI_ENDPOINT ++ help ++ Say Y here if you want to support Phytium PCIe controller in ++ endpoint mode on Phytium SoC. The controller can act as Root Port ++ or End Point with different phytium firmware. But End Point mode only support ++ one physical function. ++ + source "drivers/pci/controller/dwc/Kconfig" + endmenu +diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile +index d56a507495c5..45fbf1ff3354 100644 +--- a/drivers/pci/controller/Makefile ++++ b/drivers/pci/controller/Makefile +@@ -29,6 +29,8 @@ obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o + obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o + obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o + obj-$(CONFIG_VMD) += vmd.o ++obj-$(CONFIG_PCIE_PHYTIUM_EP) += pcie-phytium-ep.o ++ + # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW + obj-y += dwc/ + +diff --git a/drivers/pci/controller/pcie-phytium-ep.c b/drivers/pci/controller/pcie-phytium-ep.c +new file mode 100644 +index 000000000000..2c5989b9c544 +--- /dev/null ++++ b/drivers/pci/controller/pcie-phytium-ep.c +@@ -0,0 +1,480 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Phytium d2000 pcie endpoint driver ++ * ++ * Copyright (c) 2021 Phytium Limited. ++ * ++ * Author: ++ * Yang Xun ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "pcie-phytium-ep.h" ++#include "pcie-phytium-register.h" ++ ++#define PHYTIUM_PCIE_EP_IRQ_PCI_ADDR_NONE 0x0 ++#define PHYTIUM_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x1 ++ ++static int phytium_pcie_ep_write_header(struct pci_epc *epc, unsigned char fn, ++ struct pci_epf_header *hdr) ++{ ++ struct phytium_pcie_ep *priv = epc_get_drvdata(epc); ++ u16 tmp = 0; ++ ++ phytium_pcie_writew(priv, fn, PHYTIUM_PCI_VENDOR_ID, hdr->vendorid); ++ phytium_pcie_writew(priv, fn, PHYTIUM_PCI_DEVICE_ID, hdr->deviceid); ++ phytium_pcie_writeb(priv, fn, PHYTIUM_PCI_REVISION_ID, hdr->revid); ++ phytium_pcie_writeb(priv, fn, PHYTIUM_PCI_CLASS_PROG, hdr->progif_code); ++ phytium_pcie_writew(priv, fn, PHYTIUM_PCI_CLASS_DEVICE, ++ hdr->subclass_code | (hdr->baseclass_code << 8)); ++ ++ phytium_pcie_writew(priv, fn, PHYTIUM_PCI_SUBSYS_VENDOR_ID, ++ hdr->subsys_vendor_id); ++ phytium_pcie_writew(priv, fn, PHYTIUM_PCI_SUBSYS_DEVICE_ID, ++ hdr->subsys_id); ++ ++ tmp = phytium_pcie_readw(priv, fn, PHYTIUM_PCI_INTERRUPT_PIN); ++ tmp = ((tmp & (~INTERRUPT_PIN_MASK)) | hdr->interrupt_pin); ++ phytium_pcie_writew(priv, fn, PHYTIUM_PCI_INTERRUPT_PIN, tmp); ++ ++ tmp = phytium_pcie_readw(priv, fn, PHYTIUM_PCI_MSIX_CAP); ++ phytium_pcie_writew(priv, fn, PHYTIUM_PCI_MSIX_CAP, MSIX_DISABLE); ++ ++ return 0; ++} ++ ++static int phytium_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, ++ struct pci_epf_bar *epf_bar) ++{ ++ struct phytium_pcie_ep *priv = epc_get_drvdata(epc); ++ u64 sz = 0, sz_mask, atr_size; ++ int flags = epf_bar->flags; ++ u32 setting, src_addr0, src_addr1, trsl_addr0, trsl_addr1, trsl_param; ++ enum pci_barno barno = epf_bar->barno; ++ struct pci_epc_mem *mem = epc->mem; ++ ++ if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (barno & 1)) { ++ dev_err(&epc->dev, "bar %d do not support mem64\n", barno); ++ return -EINVAL; ++ } ++ ++ if (barno & 1) { ++ dev_err(&epc->dev, "not support bar 1/3/5\n"); ++ return -EINVAL; ++ } ++ dev_dbg(epc->dev.parent, "set bar%d mapping address 0x%pa size 0x%lx\n", ++ barno, &(epf_bar->phys_addr), epf_bar->size); ++ ++ if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { ++ setting = BAR_IO_TYPE; ++ sz = max_t(size_t, epf_bar->size, BAR_IO_MIN_APERTURE); ++ sz = 1 << fls64(sz - 1); ++ sz_mask = ~(sz - 1); ++ setting |= sz_mask; ++ trsl_param = TRSL_ID_IO; ++ } else { ++ setting = BAR_MEM_TYPE; ++ sz = max_t(size_t, epf_bar->size, BAR_MEM_MIN_APERTURE); ++ sz = 1 << fls64(sz - 1); ++ sz_mask = ~(sz - 1); ++ setting |= lower_32_bits(sz_mask); ++ ++ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ++ setting |= BAR_MEM_64BIT; ++ ++ if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) ++ setting |= BAR_MEM_PREFETCHABLE; ++ ++ trsl_param = TRSL_ID_MASTER; ++ } ++ ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_BAR(barno), setting); ++ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_BAR(barno + 1), ++ upper_32_bits(sz_mask)); ++ dev_dbg(epc->dev.parent, "set bar%d mapping address 0x%pa size 0x%llx 0x%x\n", ++ barno, &(epf_bar->phys_addr), sz, lower_32_bits(epf_bar->phys_addr)); ++ sz = ALIGN(sz, mem->page_size); ++ atr_size = fls64(sz - 1) - 1; ++ src_addr0 = ATR_IMPL | ((atr_size & ATR_SIZE_MASK) << ATR_SIZE_SHIFT); ++ src_addr1 = 0; ++ trsl_addr0 = (lower_32_bits(epf_bar->phys_addr) & TRSL_ADDR_32_12_MASK); ++ trsl_addr1 = upper_32_bits(epf_bar->phys_addr); ++ ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_SRC_ADDR0(barno), ++ src_addr0); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_SRC_ADDR1(barno), ++ src_addr1); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_ADDR0(barno), ++ trsl_addr0); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_ADDR1(barno), ++ trsl_addr1); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_PARAM(barno), ++ trsl_param); ++ ++ return 0; ++} ++ ++static void phytium_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, ++ struct pci_epf_bar *epf_bar) ++{ ++ struct phytium_pcie_ep *priv = epc_get_drvdata(epc); ++ int flags = epf_bar->flags; ++ enum pci_barno barno = epf_bar->barno; ++ ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_BAR(barno), 0); ++ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_BAR(barno + 1), 0); ++ ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_SRC_ADDR0(barno), 0); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_SRC_ADDR1(barno), 0); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_ADDR0(barno), 0); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_ADDR1(barno), 0); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_PARAM(barno), 0); ++} ++ ++static int phytium_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, ++ phys_addr_t addr, u64 pci_addr, ++ size_t size) ++{ ++ struct phytium_pcie_ep *priv = epc_get_drvdata(epc); ++ u32 src_addr0, src_addr1, trsl_addr0, trsl_addr1, trsl_param, atr_size; ++ u64 sz = 0; ++ u32 r; ++ struct pci_epc_mem *mem = epc->mem; ++ ++ r = find_first_zero_bit(&priv->ob_region_map, ++ sizeof(priv->ob_region_map) * BITS_PER_LONG); ++ if (r >= priv->max_regions) { ++ dev_err(&epc->dev, "no free outbound region\n"); ++ return -EINVAL; ++ } ++ ++ dev_dbg(epc->dev.parent, "set slave %d: mapping address 0x%pa to pci 0x%llx, size 0x%zx\n", ++ r, &addr, pci_addr, size); ++ ++ sz = ALIGN(size, mem->page_size); ++ atr_size = fls64(sz - 1) - 1; ++ src_addr0 = ATR_IMPL | ((atr_size & ATR_SIZE_MASK) << ATR_SIZE_SHIFT); ++ src_addr0 |= (lower_32_bits(addr) & SRC_ADDR_32_12_MASK); ++ src_addr1 = upper_32_bits(addr); ++ trsl_addr0 = (lower_32_bits(pci_addr) & TRSL_ADDR_32_12_MASK); ++ trsl_addr1 = upper_32_bits(pci_addr); ++ trsl_param = TRSL_ID_PCIE_TR; ++ ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR0(r), ++ src_addr0); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR1(r), ++ src_addr1); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR0(r), ++ trsl_addr0); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR1(r), ++ trsl_addr1); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_PARAM(r), ++ trsl_param); ++ set_bit(r, &priv->ob_region_map); ++ priv->ob_addr[r] = addr; ++ ++ return 0; ++} ++ ++static void phytium_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, ++ phys_addr_t addr) ++{ ++ struct phytium_pcie_ep *priv = epc_get_drvdata(epc); ++ u32 r; ++ ++ for (r = 0; r < priv->max_regions; r++) ++ if (priv->ob_addr[r] == addr) ++ break; ++ ++ if (r == priv->max_regions) { ++ dev_err(&epc->dev, "used unmap addr 0x%pa\n", &addr); ++ return; ++ } ++ dev_dbg(epc->dev.parent, "set slave %d: unmapping address 0x%pa\n", r, &addr); ++ ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR0(r), 0); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR1(r), 0); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR0(r), 0); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR1(r), 0); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_PARAM(r), 0); ++ priv->ob_addr[r] = 0; ++ clear_bit(r, &priv->ob_region_map); ++} ++ ++static int phytium_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc) ++{ ++ struct phytium_pcie_ep *priv = epc_get_drvdata(epc); ++ u16 flags = 0; ++ ++ flags = (mmc & MSI_NUM_MASK) << MSI_NUM_SHIFT; ++ flags &= ~MSI_MASK_SUPPORT; ++ phytium_pcie_writew(priv, fn, PHYTIUM_PCI_INTERRUPT_PIN, flags); ++ ++ return 0; ++} ++ ++static int phytium_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) ++{ ++ struct phytium_pcie_ep *priv = epc_get_drvdata(epc); ++ u16 flags, mme; ++ u32 cap = PHYTIUM_PCI_CF_MSI_BASE; ++ ++ flags = phytium_pcie_readw(priv, fn, cap + PCI_MSI_FLAGS); ++ if (!(flags & PCI_MSI_FLAGS_ENABLE)) ++ return -EINVAL; ++ ++ mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; ++ ++ return mme; ++} ++ ++static int phytium_pcie_ep_send_msi_irq(struct phytium_pcie_ep *priv, u8 fn, ++ u8 interrupt_num) ++{ ++ u32 cap = PHYTIUM_PCI_CF_MSI_BASE; ++ u16 flags, mme, data_mask, data; ++ u8 msi_count; ++ u64 pci_addr, pci_addr_mask = IRQ_MAPPING_SIZE - 1; ++ u32 src_addr0, src_addr1, trsl_addr0, trsl_addr1, trsl_param, atr_size; ++ ++ flags = phytium_pcie_readw(priv, fn, cap + PCI_MSI_FLAGS); ++ if (!(flags & PCI_MSI_FLAGS_ENABLE)) ++ return -EINVAL; ++ ++ mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; ++ msi_count = 1 << mme; ++ if (!interrupt_num || interrupt_num > msi_count) ++ return -EINVAL; ++ ++ data_mask = msi_count - 1; ++ data = phytium_pcie_readw(priv, fn, cap + PCI_MSI_DATA_64); ++ data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); ++ ++ /* Get the PCI address */ ++ pci_addr = phytium_pcie_readl(priv, fn, cap + PCI_MSI_ADDRESS_HI); ++ pci_addr <<= 32; ++ pci_addr |= phytium_pcie_readl(priv, fn, cap + PCI_MSI_ADDRESS_LO); ++ pci_addr &= GENMASK_ULL(63, 2); ++ ++ if (priv->irq_pci_addr != (pci_addr & ~pci_addr_mask) || (priv->irq_pci_fn != fn)) { ++ /* First region for IRQ writes. */ ++ atr_size = fls64(pci_addr_mask) - 1; ++ src_addr0 = ATR_IMPL | ((atr_size & ATR_SIZE_MASK) << ATR_SIZE_SHIFT); ++ src_addr0 |= (lower_32_bits(priv->irq_phys_addr) & SRC_ADDR_32_12_MASK); ++ src_addr1 = upper_32_bits(priv->irq_phys_addr); ++ trsl_addr0 = (lower_32_bits(pci_addr) & TRSL_ADDR_32_12_MASK); ++ trsl_addr1 = upper_32_bits(pci_addr); ++ trsl_param = TRSL_ID_PCIE_TR; ++ ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR0(0), ++ src_addr0); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR1(0), ++ src_addr1); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR0(0), ++ trsl_addr0); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR1(0), ++ trsl_addr1); ++ phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_PARAM(0), ++ trsl_param); ++ priv->irq_pci_addr = (pci_addr & ~pci_addr_mask); ++ priv->irq_pci_fn = fn; ++ } ++ ++ dev_dbg(priv->epc->dev.parent, "send event %d\n", data); ++ writew(data, priv->irq_cpu_addr + (pci_addr & pci_addr_mask)); ++ ++ return 0; ++} ++ ++static int phytium_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, ++ enum pci_epc_irq_type type, ++ u16 interrupt_num) ++{ ++ struct phytium_pcie_ep *priv = epc_get_drvdata(epc); ++ ++ switch (type) { ++ case PCI_EPC_IRQ_MSI: ++ return phytium_pcie_ep_send_msi_irq(priv, fn, interrupt_num); ++ ++ default: ++ break; ++ } ++ ++ return -EINVAL; ++} ++ ++static int phytium_pcie_ep_start(struct pci_epc *epc) ++{ ++ struct pci_epf *epf; ++ u32 cfg; ++ ++ cfg = BIT(0); ++ list_for_each_entry(epf, &epc->pci_epf, list) ++ cfg |= BIT(epf->func_no); ++ ++ list_for_each_entry(epf, &epc->pci_epf, list) ++ pci_epf_linkup(epf); ++ ++ return 0; ++} ++ ++static const struct pci_epc_ops phytium_pcie_epc_ops = { ++ .write_header = phytium_pcie_ep_write_header, ++ .set_bar = phytium_pcie_ep_set_bar, ++ .clear_bar = phytium_pcie_ep_clear_bar, ++ .map_addr = phytium_pcie_ep_map_addr, ++ .unmap_addr = phytium_pcie_ep_unmap_addr, ++ .set_msi = phytium_pcie_ep_set_msi, ++ .get_msi = phytium_pcie_ep_get_msi, ++ .raise_irq = phytium_pcie_ep_raise_irq, ++ .start = phytium_pcie_ep_start, ++}; ++ ++ ++ ++static int phytium_pcie_ep_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct phytium_pcie_ep *priv = NULL; ++ struct resource *res; ++ struct device_node *np = dev->of_node; ++ struct pci_epc *epc; ++ int ret = 0, value; ++ ++ dev_dbg(dev, "enter %s\n", __func__); ++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); ++ priv->reg_base = devm_ioremap_resource(dev, res); ++ if (IS_ERR(priv->reg_base)) { ++ dev_err(dev, "missing \"reg\"\n"); ++ return PTR_ERR(priv->reg_base); ++ } ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); ++ if (!res) { ++ dev_err(dev, "missing \"mem\"\n"); ++ return -EINVAL; ++ } ++ priv->mem_res = res; ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hpb"); ++ priv->hpb_base = devm_ioremap_resource(dev, res); ++ if (IS_ERR(priv->hpb_base)) { ++ dev_err(dev, "missing \"hpb\"\n"); ++ return PTR_ERR(priv->hpb_base); ++ } ++ ++ ret = of_property_read_u32(np, "max-outbound-regions", &priv->max_regions); ++ if (ret < 0) { ++ dev_err(dev, "missing \"max-outbound-regions\"\n"); ++ return ret; ++ } ++ dev_info(dev, "%s max-outbound-regions %d\n", __func__, priv->max_regions); ++ ++ priv->ob_addr = devm_kcalloc(dev, priv->max_regions, ++ sizeof(*priv->ob_addr), GFP_KERNEL); ++ if (!priv->ob_addr) ++ return -ENOMEM; ++ ++ platform_set_drvdata(pdev, priv); ++ ++ epc = devm_pci_epc_create(dev, &phytium_pcie_epc_ops); ++ if (IS_ERR(epc)) { ++ dev_err(dev, "failed to create epc device\n"); ++ return PTR_ERR(epc); ++ } ++ ++ priv->epc = epc; ++ epc_set_drvdata(epc, priv); ++ ++ if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) ++ epc->max_functions = 1; ++ dev_info(dev, "%s epc->max_functions %d\n", __func__, epc->max_functions); ++ ++ ++ ret = pci_epc_mem_init(epc, priv->mem_res->start, ++ resource_size(priv->mem_res)); ++ if (ret < 0) { ++ dev_err(dev, "failed to initialize the memory space\n"); ++ return ret; ++ } ++ ++ priv->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &priv->irq_phys_addr, ++ SZ_4K); ++ if (!priv->irq_cpu_addr) { ++ dev_err(dev, "failed to reserve memory space for MSI\n"); ++ ret = -ENOMEM; ++ goto err_alloc_irq_mem; ++ } ++ priv->irq_pci_addr = PHYTIUM_PCIE_EP_IRQ_PCI_ADDR_NONE; ++ /* Reserve region 0 for IRQS */ ++ set_bit(0, &priv->ob_region_map); ++ ++ value = ((lower_32_bits(priv->mem_res->start) >> C0_PREF_VALUE_SHIFT) ++ & C0_PREF_BASE_MASK) << C0_PREF_BASE_SHIFT; ++ value |= (((lower_32_bits(priv->mem_res->end) >> C0_PREF_VALUE_SHIFT) ++ & C0_PREF_LIMIT_MASK) << C0_PREF_LIMIT_SHIFT); ++ phytium_hpb_writel(priv, PHYTIUM_HPB_C0_PREF_BASE_LIMIT, value); ++ ++ value = ((upper_32_bits(priv->mem_res->start) >> C0_PREF_UP32_VALUE_SHIFT) ++ & C0_PREF_BASE_UP32_MASK) << C0_PREF_BASE_UP32_SHIFT; ++ value |= (((upper_32_bits(priv->mem_res->end) >> C0_PREF_UP32_VALUE_SHIFT) ++ & C0_PREF_LIMIT_UP32_MASK) << C0_PREF_LIMIT_UP32_SHIFT); ++ phytium_hpb_writel(priv, PHYTIUM_HPB_C0_PREF_BASE_LIMIT_UP32, value); ++ ++ dev_dbg(dev, "exit %s successful\n", __func__); ++ return 0; ++ ++err_alloc_irq_mem: ++ pci_epc_mem_exit(epc); ++ return ret; ++} ++ ++static int phytium_pcie_ep_remove(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct phytium_pcie_ep *priv = dev_get_drvdata(dev); ++ struct pci_epc *epc = priv->epc; ++ ++ pci_epc_mem_exit(epc); ++ ++ return 0; ++} ++ ++static const struct of_device_id phytium_pcie_ep_of_match[] = { ++ { .compatible = "phytium,d2000-pcie-ep" }, ++ { }, ++}; ++ ++static struct platform_driver phytium_pcie_ep_driver = { ++ .driver = { ++ .name = "phytium-pcie-ep", ++ .of_match_table = phytium_pcie_ep_of_match, ++ }, ++ .probe = phytium_pcie_ep_probe, ++ .remove = phytium_pcie_ep_remove, ++}; ++ ++module_platform_driver(phytium_pcie_ep_driver); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Yang Xun "); ++MODULE_DESCRIPTION("Phytium Pcie Controller Endpoint driver"); +diff --git a/drivers/pci/controller/pcie-phytium-ep.h b/drivers/pci/controller/pcie-phytium-ep.h +new file mode 100644 +index 000000000000..27d39a222f3c +--- /dev/null ++++ b/drivers/pci/controller/pcie-phytium-ep.h +@@ -0,0 +1,95 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium d2000 pcie endpoint driver ++ * ++ * Copyright (c) 2021 Phytium Limited. ++ * ++ * Author: ++ * Yang Xun ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++#ifndef __PCIE_PHYTIUM_EP_H__ ++#define __PCIE_PHYTIUM_EP_H__ ++ ++#include "pcie-phytium-register.h" ++ ++#define IRQ_MAPPING_SIZE 0x1000 ++struct phytium_pcie_ep { ++ void __iomem *reg_base; ++ struct resource *mem_res; ++ void __iomem *hpb_base; ++ unsigned int max_regions; ++ unsigned long ob_region_map; ++ phys_addr_t *ob_addr; ++ phys_addr_t irq_phys_addr; ++ void __iomem *irq_cpu_addr; ++ unsigned long irq_pci_addr; ++ u8 irq_pci_fn; ++ struct pci_epc *epc; ++}; ++ ++static inline void ++phytium_pcie_writeb(struct phytium_pcie_ep *priv, u8 fn, u32 reg, u8 value) ++{ ++ pr_debug("Write 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); ++ writeb(value, priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); ++} ++ ++static inline unsigned char ++phytium_pcie_readb(struct phytium_pcie_ep *priv, u8 fn, u32 reg) ++{ ++ unsigned char value; ++ ++ value = readb(priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); ++ pr_debug("Read 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); ++ ++ return value; ++} ++ ++static inline void ++phytium_pcie_writew(struct phytium_pcie_ep *priv, u8 fn, u32 reg, u16 value) ++{ ++ pr_debug("Write 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); ++ writew(value, priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); ++} ++ ++static inline unsigned short ++phytium_pcie_readw(struct phytium_pcie_ep *priv, u8 fn, u32 reg) ++{ ++ unsigned short value; ++ ++ value = readw(priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); ++ pr_debug("Read 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); ++ ++ return value; ++} ++ ++static inline void ++phytium_pcie_writel(struct phytium_pcie_ep *priv, u8 fn, u32 reg, u32 value) ++{ ++ pr_debug("Write 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); ++ writel(value, priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); ++} ++ ++static inline unsigned int ++phytium_pcie_readl(struct phytium_pcie_ep *priv, u8 fn, u32 reg) ++{ ++ unsigned int value; ++ ++ value = readl(priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); ++ pr_debug("Read 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); ++ ++ return value; ++} ++ ++static inline void ++phytium_hpb_writel(struct phytium_pcie_ep *priv, u32 reg, u32 value) ++{ ++ pr_debug("Write 32'h%08x 32'h%08x\n", reg, value); ++ writel(value, priv->hpb_base + reg); ++} ++#endif +diff --git a/drivers/pci/controller/pcie-phytium-register.h b/drivers/pci/controller/pcie-phytium-register.h +new file mode 100644 +index 000000000000..7b90d416dd4b +--- /dev/null ++++ b/drivers/pci/controller/pcie-phytium-register.h +@@ -0,0 +1,87 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Phytium d2000 pcie endpoint driver ++ * ++ * Copyright (c) 2021 Phytium Limited. ++ * ++ * Author: ++ * Yang Xun ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++#ifndef __PCIE_PHYTIUM_REGISTER_H__ ++#define __PCIE_PHYTIUM_REGISTER_H__ ++ ++#define PHYTIUM_PCIE_FUNC_BASE(fn) (((fn) << 14) & GENMASK(16, 14)) ++#define PHYTIUM_PCI_VENDOR_ID 0x98 ++#define PHYTIUM_PCI_DEVICE_ID 0x9a ++#define PHYTIUM_PCI_REVISION_ID 0x9c ++#define PHYTIUM_PCI_CLASS_PROG 0x9d ++#define PHYTIUM_PCI_CLASS_DEVICE 0x9e ++#define PHYTIUM_PCI_SUBSYS_VENDOR_ID 0xa0 ++#define PHYTIUM_PCI_SUBSYS_DEVICE_ID 0xa2 ++#define PHYTIUM_PCI_INTERRUPT_PIN 0xa8 ++#define INTERRUPT_PIN_MASK 0x7 ++#define MSI_DISABLE (1 << 3) ++#define MSI_NUM_MASK (0x7) ++#define MSI_NUM_SHIFT 4 ++#define MSI_MASK_SUPPORT (1 << 7) ++#define PHYTIUM_PCI_MSIX_CAP 0xaa ++ #define MSIX_DISABLE (0 << 15) ++ ++#define PHYTIUM_PCI_BAR_0 0xe4 ++#define PHYTIUM_PCI_BAR(bar_num) (0xe4 + bar_num * 4) ++#define BAR_IO_TYPE (1 << 0) ++#define BAR_MEM_TYPE (0 << 0) ++#define BAR_MEM_64BIT (1 << 2) ++#define BAR_MEM_PREFETCHABLE (1 << 3) ++#define BAR_IO_MIN_APERTURE 4 ++#define BAR_MEM_MIN_APERTURE 16 ++ ++ ++#define PHYTIUM_PCI_WIN0_BASE 0x600 ++#define PHYTIUM_PCI_WIN0_SRC_ADDR0(table) (PHYTIUM_PCI_WIN0_BASE + 0X20 * table + 0x0) ++#define ATR_IMPL 0x1 ++#define ATR_SIZE_MASK 0x3f ++#define ATR_SIZE_SHIFT 1 ++#define ATR_SIZE_ALIGN 0x1000 ++#define SRC_ADDR_32_12_MASK 0xfffff000 ++ ++#define PHYTIUM_PCI_WIN0_SRC_ADDR1(table) (PHYTIUM_PCI_WIN0_BASE + 0X20 * table + 0x4) ++#define PHYTIUM_PCI_WIN0_TRSL_ADDR0(table) (PHYTIUM_PCI_WIN0_BASE + 0X20 * table + 0x8) ++#define TRSL_ADDR_32_12_MASK 0xfffff000 ++ ++#define PHYTIUM_PCI_WIN0_TRSL_ADDR1(table) (PHYTIUM_PCI_WIN0_BASE + 0X20 * table + 0xc) ++#define PHYTIUM_PCI_WIN0_TRSL_PARAM(table) (PHYTIUM_PCI_WIN0_BASE + 0X20 * table + 0x10) ++#define TRSL_ID_IO 0x1 ++#define TRSL_ID_MASTER 0x4 ++#define TRSL_ID_PCIE_TR 0x0 ++ ++#define PHYTIUM_PCI_SLAVE0_BASE 0x800 ++#define PHYTIUM_PCI_SLAVE0_SRC_ADDR0(table) (PHYTIUM_PCI_SLAVE0_BASE + 0X20 * table + 0x0) ++#define PHYTIUM_PCI_SLAVE0_SRC_ADDR1(table) (PHYTIUM_PCI_SLAVE0_BASE + 0X20 * table + 0x4) ++#define PHYTIUM_PCI_SLAVE0_TRSL_ADDR0(table) (PHYTIUM_PCI_SLAVE0_BASE + 0X20 * table + 0x8) ++#define PHYTIUM_PCI_SLAVE0_TRSL_ADDR1(table) (PHYTIUM_PCI_SLAVE0_BASE + 0X20 * table + 0xc) ++#define PHYTIUM_PCI_SLAVE0_TRSL_PARAM(table) (PHYTIUM_PCI_SLAVE0_BASE + 0X20 * table + 0x10) ++ ++#define PHYTIUM_PCI_CF_MSI_BASE 0x10e0 ++#define PHYTIUM_PCI_CF_MSI_CONTROL 0x10e2 ++ ++#define PHYTIUM_HPB_C0_PREF_BASE_LIMIT 0xa30 ++ #define C0_PREF_LIMIT_MASK 0xfff ++ #define C0_PREF_LIMIT_SHIFT 20 ++ #define C0_PREF_BASE_MASK 0xfff ++ #define C0_PREF_BASE_SHIFT 4 ++ #define C0_PREF_VALUE_SHIFT 20 ++#define PHYTIUM_HPB_C0_PREF_BASE_LIMIT_UP32 0xa34 ++ #define C0_PREF_LIMIT_UP32_MASK 0xff ++ #define C0_PREF_LIMIT_UP32_SHIFT 8 ++ #define C0_PREF_BASE_UP32_MASK 0xff ++ #define C0_PREF_BASE_UP32_SHIFT 0 ++ #define C0_PREF_UP32_VALUE_SHIFT 0 ++#endif ++ ++ +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 6bc27b7fd452..4109a2fa1df6 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -4858,6 +4858,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PHYTIUM, 0xdc3a, quirk_no_ext_tags); + + #ifdef CONFIG_PCI_ATS + /* +diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig +index 7d7be60a2413..3e4effbf31ef 100644 +--- a/drivers/rtc/Kconfig ++++ b/drivers/rtc/Kconfig +@@ -1793,6 +1793,16 @@ config RTC_DRV_RTD119X + If you say yes here, you get support for the RTD1295 SoC + Real Time Clock. + ++config RTC_DRV_PHYTIUM ++ tristate "Phytium RTC" ++ depends on ARCH_PHYTIUM ++ default y if ARCH_PHYTIUM ++ help ++ Say yes here to support the Phytium SoC real time clock. ++ ++ This driver can also be built as a module, if so, the module ++ will be called "rtc-phytium". ++ + comment "HID Sensor RTC drivers" + + config RTC_DRV_HID_SENSOR_TIME +diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile +index 5ff2fc0c361a..289447233c6b 100644 +--- a/drivers/rtc/Makefile ++++ b/drivers/rtc/Makefile +@@ -120,6 +120,7 @@ obj-$(CONFIG_RTC_DRV_PCF85363) += rtc-pcf85363.o + obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o + obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o + obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o ++obj-$(CONFIG_RTC_DRV_PHYTIUM) += rtc-phytium.o + obj-$(CONFIG_RTC_DRV_PIC32) += rtc-pic32.o + obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o + obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o +diff --git a/drivers/rtc/rtc-phytium.c b/drivers/rtc/rtc-phytium.c +new file mode 100644 +index 000000000000..97a560dfa3af +--- /dev/null ++++ b/drivers/rtc/rtc-phytium.c +@@ -0,0 +1,331 @@ ++/* ++ * Phytium Real Time Clock Driver ++ * ++ * Copyright (c) 2019, Phytium Technology Co., Ltd. ++ * ++ * Chen Baozi ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define RTC_CMR 0x04 ++#define RTC_AES_SEL 0x08 ++#define RTC_AES_SEL_COUNTER 0x100 ++#define RTC_CCR 0x0C ++#define RTC_CCR_IE BIT(0) ++#define RTC_CCR_MASK BIT(1) ++#define RTC_CCR_EN BIT(2) ++#define RTC_CCR_WEN BIT(3) ++#define RTC_STAT 0x10 ++#define RTC_STAT_BIT BIT(0) ++#define RTC_RSTAT 0x14 ++#define RTC_EOI 0x18 ++#define RTC_VER 0x1C ++#define RTC_CDR_LOW 0x20 ++#define RTC_CCVR 0x24 ++#define RTC_CLR_LOW 0x28 ++#define RTC_CLR 0x2c ++#define RTC_COUNTER_HB_OFFSET 15 ++#define RTC_COUNTER_LB_MASK 0x7fff ++ ++spinlock_t spinlock_phytium_rtc; ++ ++struct phytium_rtc_dev { ++ struct rtc_device *rtc; ++ struct device *dev; ++ unsigned long alarm_time; ++ void __iomem *csr_base; ++ struct clk *clk; ++ unsigned int irq_wake; ++ unsigned int irq_enabled; ++}; ++ ++static int phytium_rtc_read_time(struct device *dev, struct rtc_time *tm) ++{ ++ struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); ++ ++ unsigned long counter = 0; ++ unsigned long tmp = 0; ++ ++ spin_lock(&spinlock_phytium_rtc); ++ writel(RTC_AES_SEL_COUNTER, pdata->csr_base + RTC_AES_SEL); ++ counter = readl(pdata->csr_base + RTC_CCVR); ++ tmp = readl(pdata->csr_base + RTC_CDR_LOW); ++ printk("%s_%d:counter:0x%lx\n", __func__, __LINE__, counter); ++ spin_unlock(&spinlock_phytium_rtc); ++ ++ rtc_time_to_tm(counter, tm); ++ return rtc_valid_tm(tm); ++} ++ ++static int phytium_rtc_set_mmss(struct device *dev, unsigned long secs) ++{ ++ struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); ++ unsigned long counter = 0; ++ unsigned long tmp = 0; ++ ++ spin_lock(&spinlock_phytium_rtc); ++ ++ writel(RTC_AES_SEL_COUNTER, pdata->csr_base + RTC_AES_SEL); ++ writel(0x00000000, pdata->csr_base + RTC_CLR_LOW); ++ writel((u32)secs, pdata->csr_base + RTC_CLR); ++ writel(RTC_AES_SEL_COUNTER, pdata->csr_base + RTC_AES_SEL); ++ counter = readl(pdata->csr_base + RTC_CLR); ++ tmp = readl(pdata->csr_base + RTC_CLR_LOW); ++ ++ spin_unlock(&spinlock_phytium_rtc); ++ ++ return 0; ++} ++ ++static int phytium_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) ++{ ++ struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); ++ ++ rtc_time_to_tm(pdata->alarm_time, &alrm->time); ++ alrm->enabled = readl(pdata->csr_base + RTC_CCR) & RTC_CCR_IE; ++ ++ return 0; ++} ++ ++static int phytium_rtc_alarm_irq_enable(struct device *dev, u32 enabled) ++{ ++ struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); ++ u32 ccr; ++ ++ ccr = readl(pdata->csr_base + RTC_CCR); ++ if (enabled) { ++ ccr &= ~RTC_CCR_MASK; ++ ccr |= RTC_CCR_IE; ++ } else { ++ ccr &= ~RTC_CCR_IE; ++ ccr |= RTC_CCR_MASK; ++ } ++ writel(ccr, pdata->csr_base + RTC_CCR); ++ ++ return 0; ++} ++ ++static int phytium_rtc_alarm_irq_enabled(struct device *dev) ++{ ++ struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); ++ ++ return readl(pdata->csr_base + RTC_CCR) & RTC_CCR_IE ? 1: 0; ++} ++ ++static int phytium_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) ++{ ++ struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); ++ unsigned long rtc_time; ++ unsigned long alarm_time; ++ ++ rtc_time = readl(pdata->csr_base + RTC_CCVR); ++ rtc_tm_to_time(&alrm->time, &alarm_time); ++ ++ pdata->alarm_time = alarm_time; ++ writel((u32) pdata->alarm_time, pdata->csr_base + RTC_CMR); ++ ++ phytium_rtc_alarm_irq_enable(dev, alrm->enabled); ++ ++ return 0; ++} ++ ++static const struct rtc_class_ops phytium_rtc_ops = { ++ .read_time = phytium_rtc_read_time, ++ .set_mmss = phytium_rtc_set_mmss, ++ .read_alarm = phytium_rtc_read_alarm, ++ .set_alarm = phytium_rtc_set_alarm, ++ .alarm_irq_enable = phytium_rtc_alarm_irq_enable, ++}; ++ ++static irqreturn_t phytium_rtc_interrupt(int irq, void *id) ++{ ++ struct phytium_rtc_dev *pdata = (struct phytium_rtc_dev *) id; ++ ++ /* Check if interrupt asserted */ ++ if (!(readl(pdata->csr_base + RTC_STAT) & RTC_STAT_BIT)) ++ return IRQ_NONE; ++ ++ /* Clear interrupt */ ++ readl(pdata->csr_base + RTC_EOI); ++ ++ rtc_update_irq(pdata->rtc, 1, RTC_IRQF | RTC_AF); ++ ++ return IRQ_HANDLED; ++} ++ ++static int phytium_rtc_probe(struct platform_device *pdev) ++{ ++ struct phytium_rtc_dev *pdata; ++ struct resource *res; ++ int ret; ++ int irq; ++ ++ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); ++ if (!pdata) ++ return -ENOMEM; ++ platform_set_drvdata(pdev, pdata); ++ pdata->dev = &pdev->dev; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ pdata->csr_base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(pdata->csr_base)) ++ return PTR_ERR(pdata->csr_base); ++ ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) { ++ dev_err(&pdev->dev, "No IRQ resource\n"); ++ return irq; ++ } ++ ret = devm_request_irq(&pdev->dev, irq, phytium_rtc_interrupt, 0, ++ dev_name(&pdev->dev), pdata); ++ if (ret) { ++ dev_err(&pdev->dev, "Could not request IRQ\n"); ++ return ret; ++ } ++ ++#ifndef CONFIG_ACPI ++ pdata->clk = devm_clk_get(&pdev->dev, NULL); ++ if (IS_ERR(pdata->clk)) { ++ dev_err(&pdev->dev, "Couldn't get the clock for RTC\n"); ++ return -ENODEV; ++ } ++ ++ ret = clk_prepare_enable(pdata->clk); ++ if (ret) ++ return ret; ++#endif ++ ++ spin_lock_init(&spinlock_phytium_rtc); ++ ++ /* Turn on the clock and the crystal */ ++ writel(RTC_CCR_EN, pdata->csr_base + RTC_CCR); ++ ++ ret = device_init_wakeup(&pdev->dev, 1); ++ if (ret) { ++ clk_disable_unprepare(pdata->clk); ++ return ret; ++ } ++ ++ pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, ++ &phytium_rtc_ops, THIS_MODULE); ++ if (IS_ERR(pdata->rtc)) { ++ clk_disable_unprepare(pdata->clk); ++ return PTR_ERR(pdata->rtc); ++ } ++ ++ /* HW does not support update faster than 1 seconds */ ++ pdata->rtc->uie_unsupported = 1; ++ ++ return 0; ++} ++ ++static int phytium_rtc_remove(struct platform_device *pdev) ++{ ++ struct phytium_rtc_dev *pdata = platform_get_drvdata(pdev); ++ ++ phytium_rtc_alarm_irq_enable(&pdev->dev, 0); ++ device_init_wakeup(&pdev->dev, 0); ++ clk_disable_unprepare(pdata->clk); ++ return 0; ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static int phytium_rtc_suspend(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct phytium_rtc_dev *pdata = platform_get_drvdata(pdev); ++ int irq; ++ ++ /* ++ * If this RTC alarm will be used for waking the system up, ++ * don't disable it of course. Else we just disable the alarm ++ * and await suspension. ++ */ ++ irq = platform_get_irq(pdev, 0); ++ if (device_may_wakeup(&pdev->dev)) { ++ if (!enable_irq_wake(irq)) ++ pdata->irq_wake = 1; ++ } else { ++ pdata->irq_enabled = phytium_rtc_alarm_irq_enabled(dev); ++ phytium_rtc_alarm_irq_enable(dev, 0); ++ clk_disable_unprepare(pdata->clk); ++ } ++ ++ return 0; ++} ++ ++static int phytium_rtc_resume(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct phytium_rtc_dev *pdata = platform_get_drvdata(pdev); ++ int irq; ++ int rc; ++ ++ irq = platform_get_irq(pdev, 0); ++ if (device_may_wakeup(&pdev->dev)) { ++ if (pdata->irq_wake) { ++ disable_irq_wake(irq); ++ pdata->irq_wake = 0; ++ } ++ } else { ++ rc = clk_prepare_enable(pdata->clk); ++ if (rc) { ++ dev_err(dev, "Unable to enable clock error %d\n", rc); ++ return rc; ++ } ++ phytium_rtc_alarm_irq_enable(dev, pdata->irq_enabled); ++ } ++ ++ return 0; ++} ++#endif ++ ++static SIMPLE_DEV_PM_OPS(phytium_rtc_pm_ops, phytium_rtc_suspend, phytium_rtc_resume); ++ ++#ifdef CONFIG_OF ++static const struct of_device_id phytium_rtc_of_match[] = { ++ { .compatible = "phytium,rtc" }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, phytium_rtc_of_match); ++#endif ++ ++#ifdef CONFIG_ACPI ++static const struct acpi_device_id phytium_rtc_acpi_match[] = { ++ { "PHYT0002", 0 }, ++ { } ++}; ++#endif ++ ++static struct platform_driver phytium_rtc_driver = { ++ .probe = phytium_rtc_probe, ++ .remove = phytium_rtc_remove, ++ .driver = { ++ .name = "phytium-rtc", ++ .pm = &phytium_rtc_pm_ops, ++ .of_match_table = of_match_ptr(phytium_rtc_of_match), ++ .acpi_match_table = ACPI_PTR(phytium_rtc_acpi_match), ++ }, ++}; ++ ++module_platform_driver(phytium_rtc_driver); ++ ++MODULE_DESCRIPTION("Phytium RTC driver"); ++MODULE_AUTHOR("Chen Baozi "); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig +index 671d078349cc..b6b253727784 100644 +--- a/drivers/spi/Kconfig ++++ b/drivers/spi/Kconfig +@@ -454,6 +454,31 @@ config SPI_ORION + This enables using the SPI master controller on the Orion + and MVEBU chips. + ++config SPI_PHYTIUM ++ tristate ++ depends on ARCH_PHYTIUM || COMPILE_TEST ++ ++config SPI_PHYTIUM_PLAT ++ tristate "Phytium SPI controller platform support" ++ select SPI_PHYTIUM ++ help ++ This selects a platform driver for Phytium SPI controller. ++ ++ If you say yes to this option, support will be included for ++ FT-2000/4 and D2000 families of SPI controller. ++ ++config SPI_PHYTIUM_PCI ++ tristate "Phytium SPI controller PCI support" ++ depends on PCI ++ select SPI_PHYTIUM ++ help ++ This selects a PCI driver for Phytium SPI controller. ++ ++ If you say yes to this option, support will be included for ++ Phytium X100 chipset of SPI controller. ++ ++ If unsure, say N. ++ + config SPI_PIC32 + tristate "Microchip PIC32 series SPI" + depends on MACH_PIC32 || COMPILE_TEST +diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile +index a90d55970036..01d74f2a1afe 100644 +--- a/drivers/spi/Makefile ++++ b/drivers/spi/Makefile +@@ -35,6 +35,9 @@ obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o + obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o + obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o + spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o ++obj-$(CONFIG_SPI_PHYTIUM) += spi-phytium.o ++obj-$(CONFIG_SPI_PHYTIUM_PLAT) += spi-phytium-plat.o ++obj-$(CONFIG_SPI_PHYTIUM_PCI) += spi-phytium-pci.o + obj-$(CONFIG_SPI_EFM32) += spi-efm32.o + obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o + obj-$(CONFIG_SPI_FALCON) += spi-falcon.o +diff --git a/drivers/spi/spi-phytium-pci.c b/drivers/spi/spi-phytium-pci.c +new file mode 100644 +index 000000000000..5a856b317fc8 +--- /dev/null ++++ b/drivers/spi/spi-phytium-pci.c +@@ -0,0 +1,124 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Phytium SPI core controller PCI driver. ++ * ++ * Copyright (c) 2019-2021, Phytium Corporation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "spi-phytium.h" ++ ++#define DRIVER_NAME "phytium_spi_pci" ++ ++static int phytium_spi_pci_probe(struct pci_dev *pdev, ++ const struct pci_device_id *id) ++{ ++ struct phytium_spi *fts; ++ int pci_bar = 0; ++ int ret; ++ ++ fts = devm_kzalloc(&pdev->dev, sizeof(struct phytium_spi), ++ GFP_KERNEL); ++ if (!fts) ++ return -ENOMEM; ++ ++ ret = pcim_enable_device(pdev); ++ if (ret) ++ return ret; ++ ++ ret = pcim_iomap_regions(pdev, 1 << pci_bar, pci_name(pdev)); ++ if (ret) { ++ dev_err(&pdev->dev, "pci iomap failed?\n"); ++ return ret; ++ } ++ ++ fts->regs = pcim_iomap_table(pdev)[pci_bar]; ++ if (IS_ERR(fts->regs)) { ++ dev_err(&pdev->dev, "SPI region map failed\n"); ++ return PTR_ERR(fts->regs); ++ } ++ ++ fts->irq = pdev->irq; ++ if (fts->irq < 0) { ++ dev_err(&pdev->dev, "no irq resource?\n"); ++ return fts->irq; /* -ENXIO */ ++ } ++ ++ fts->bus_num = -1; ++ ++ fts->max_freq = 48000000; ++ ++ fts->num_cs = 4; ++ ++ fts->global_cs = 1; ++ ++ ret = phytium_spi_add_host(&pdev->dev, fts); ++ if (ret) ++ return ret; ++ ++ pci_set_drvdata(pdev, fts); ++ return 0; ++} ++ ++static void phytium_spi_pci_remove(struct pci_dev *pdev) ++{ ++ struct phytium_spi *fts = pci_get_drvdata(pdev); ++ ++ phytium_spi_remove_host(fts); ++} ++ ++ ++#ifdef CONFIG_PM_SLEEP ++static int spi_suspend(struct device *dev) ++{ ++ struct spi_master *master = dev_get_drvdata(dev); ++ struct phytium_spi *fts = spi_master_get_devdata(master); ++ ++ return phytium_spi_suspend_host(fts); ++} ++ ++static int spi_resume(struct device *dev) ++{ ++ struct spi_master *master = dev_get_drvdata(dev); ++ struct phytium_spi *fts = spi_master_get_devdata(master); ++ ++ return phytium_spi_resume_host(fts); ++} ++#endif ++ ++static SIMPLE_DEV_PM_OPS(phytium_spi_pm_ops, spi_suspend, spi_resume); ++ ++static const struct pci_device_id phytium_device_pci_tbl[] = { ++ { PCI_VDEVICE(PHYTIUM, 0xdc2c) }, ++ {}, ++}; ++ ++static struct pci_driver phytium_spi_pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = phytium_device_pci_tbl, ++ .probe = phytium_spi_pci_probe, ++ .remove = phytium_spi_pci_remove, ++ .driver = { ++ .pm = &phytium_spi_pm_ops, ++ } ++}; ++ ++module_pci_driver(phytium_spi_pci_driver); ++ ++MODULE_AUTHOR("Yiqun Zhang "); ++MODULE_DESCRIPTION("PCI Driver for Phytium SPI controller core"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/spi/spi-phytium-plat.c b/drivers/spi/spi-phytium-plat.c +new file mode 100644 +index 000000000000..04c2b5d2fb95 +--- /dev/null ++++ b/drivers/spi/spi-phytium-plat.c +@@ -0,0 +1,206 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Phytium SPI core controller platform driver. ++ * ++ * Copyright (c) 2019-2021, Phytium Corporation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "spi-phytium.h" ++ ++#define DRIVER_NAME "phytium_spi" ++ ++struct phytium_spi_clk { ++ struct phytium_spi fts; ++ struct clk *clk; ++}; ++ ++static int phytium_spi_probe(struct platform_device *pdev) ++{ ++ struct phytium_spi_clk *ftsc; ++ struct phytium_spi *fts; ++ struct resource *mem; ++ int ret; ++ int num_cs; ++ int cs_gpio; ++ int i; ++ ++ ftsc = devm_kzalloc(&pdev->dev, sizeof(struct phytium_spi_clk), ++ GFP_KERNEL); ++ if (!ftsc) ++ return -ENOMEM; ++ ++ fts = &ftsc->fts; ++ ++ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!mem) { ++ dev_err(&pdev->dev, "no mem resource?\n"); ++ return -EINVAL; ++ } ++ ++ fts->regs = devm_ioremap_resource(&pdev->dev, mem); ++ if (IS_ERR(fts->regs)) { ++ dev_err(&pdev->dev, "SPI region map failed\n"); ++ return PTR_ERR(fts->regs); ++ } ++ ++ fts->irq = platform_get_irq(pdev, 0); ++ if (fts->irq < 0) { ++ dev_err(&pdev->dev, "no irq resource?\n"); ++ return fts->irq; /* -ENXIO */ ++ } ++ ++ if (pdev->dev.of_node) { ++ ftsc->clk = devm_clk_get(&pdev->dev, NULL); ++ ++ if (IS_ERR(ftsc->clk)) ++ return PTR_ERR(ftsc->clk); ++ ret = clk_prepare_enable(ftsc->clk); ++ if (ret) ++ return ret; ++ ++ fts->max_freq = clk_get_rate(ftsc->clk); ++ } else if (has_acpi_companion(&pdev->dev)) { ++ fts->max_freq = 48000000; ++ } ++ ++ fts->bus_num = pdev->id; ++ device_property_read_u32(&pdev->dev, "reg-io-width", &fts->reg_io_width); ++ ++ num_cs = 4; ++ ++ device_property_read_u32(&pdev->dev, "num-cs", &num_cs); ++ ++ fts->num_cs = num_cs; ++ ++ if (pdev->dev.of_node) { ++ int i; ++ ++ for (i = 0; i < fts->num_cs; i++) { ++ cs_gpio = of_get_named_gpio(pdev->dev.of_node, ++ "cs-gpios", i); ++ ++ if (cs_gpio == -EPROBE_DEFER) { ++ ret = cs_gpio; ++ goto out; ++ } ++ ++ if (gpio_is_valid(cs_gpio)) { ++ ret = devm_gpio_request(&pdev->dev, cs_gpio, ++ dev_name(&pdev->dev)); ++ if (ret) ++ goto out; ++ } ++ } ++ } else if(has_acpi_companion(&pdev->dev)) { ++ int n; ++ int *cs; ++ struct gpio_desc *gpiod; ++ ++ n = gpiod_count(&pdev->dev, "cs"); ++ ++ cs = devm_kcalloc(&pdev->dev, n, sizeof(int), GFP_KERNEL); ++ fts->cs = cs; ++ ++ for (i = 0; i < n; i++) { ++ gpiod = devm_gpiod_get_index_optional(&pdev->dev, "cs", i, ++ GPIOD_OUT_LOW); ++ ++ if (IS_ERR(gpiod)) { ++ ret = PTR_ERR(gpiod); ++ goto out; ++ } ++ ++ cs_gpio = desc_to_gpio(gpiod); ++ cs[i] = cs_gpio; ++ } ++ } ++ ++ fts->global_cs = device_get_match_data(&pdev->dev); ++ ++ ret = phytium_spi_add_host(&pdev->dev, fts); ++ if (ret) ++ goto out; ++ ++ platform_set_drvdata(pdev, ftsc); ++ return 0; ++ ++out: ++ clk_disable_unprepare(ftsc->clk); ++ return ret; ++} ++ ++static int phytium_spi_remove(struct platform_device *pdev) ++{ ++ struct phytium_spi_clk *ftsc = platform_get_drvdata(pdev); ++ ++ phytium_spi_remove_host(&ftsc->fts); ++ clk_disable_unprepare(ftsc->clk); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static int spi_suspend(struct device *dev) ++{ ++ struct spi_master *master = dev_get_drvdata(dev); ++ struct phytium_spi *fts = spi_master_get_devdata(master); ++ ++ return phytium_spi_suspend_host(fts); ++} ++ ++static int spi_resume(struct device *dev) ++{ ++ struct spi_master *master = dev_get_drvdata(dev); ++ struct phytium_spi *fts = spi_master_get_devdata(master); ++ ++ return phytium_spi_resume_host(fts); ++} ++#endif ++ ++static SIMPLE_DEV_PM_OPS(phytium_spi_pm_ops, spi_suspend, spi_resume); ++ ++static const struct of_device_id phytium_spi_of_match[] = { ++ { .compatible = "phytium,spi", .data = (void *)0 }, ++ { /* end of table */} ++}; ++MODULE_DEVICE_TABLE(of, phytium_spi_of_match); ++ ++static const struct acpi_device_id phytium_spi_acpi_match[] = { ++ {"PHYT000E", 0}, ++ {} ++}; ++MODULE_DEVICE_TABLE(acpi, phytium_spi_acpi_match); ++ ++static struct platform_driver phytium_spi_driver = { ++ .probe = phytium_spi_probe, ++ .remove = phytium_spi_remove, ++ .driver = { ++ .name = DRIVER_NAME, ++ .of_match_table = of_match_ptr(phytium_spi_of_match), ++ .acpi_match_table = ACPI_PTR(phytium_spi_acpi_match), ++ .pm = &phytium_spi_pm_ops, ++ }, ++}; ++module_platform_driver(phytium_spi_driver); ++ ++MODULE_AUTHOR("Yiqun Zhang "); ++MODULE_DESCRIPTION("Platform Driver for Phytium SPI controller core"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/spi/spi-phytium.c b/drivers/spi/spi-phytium.c +new file mode 100644 +index 000000000000..aa1859ebe755 +--- /dev/null ++++ b/drivers/spi/spi-phytium.c +@@ -0,0 +1,528 @@ ++/* ++ * Phytium SPI core controller driver. ++ * ++ * Copyright (c) 2019-2021, Phytium Corporation. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms and conditions of the GNU General Public License, ++ * version 2, as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "spi-phytium.h" ++ ++static inline u32 phytium_readl(struct phytium_spi *fts, u32 offset) ++{ ++ return __raw_readl(fts->regs + offset); ++} ++ ++static inline u16 phytium_readw(struct phytium_spi *fts, u32 offset) ++{ ++ return __raw_readw(fts->regs + offset); ++} ++ ++static inline void phytium_writel(struct phytium_spi *fts, u32 offset, u32 val) ++{ ++ __raw_writel(val, fts->regs + offset); ++} ++ ++static inline void phytium_writew(struct phytium_spi *fts, u32 offset, u16 val) ++{ ++ __raw_writew(val, fts->regs + offset); ++} ++ ++static inline u32 phytium_read_io_reg(struct phytium_spi *fts, u32 offset) ++{ ++ switch (fts->reg_io_width) { ++ case 2: ++ return phytium_readw(fts, offset); ++ case 4: ++ default: ++ return phytium_readl(fts, offset); ++ } ++} ++ ++static inline void phytium_write_io_reg(struct phytium_spi *fts, u32 offset, u32 val) ++{ ++ switch (fts->reg_io_width) { ++ case 2: ++ phytium_writew(fts, offset, val); ++ break; ++ case 4: ++ default: ++ phytium_writel(fts, offset, val); ++ break; ++ } ++} ++ ++static inline void spi_enable_chip(struct phytium_spi *fts, int enable) ++{ ++ phytium_writel(fts, SSIENR, (enable ? 1 : 0)); ++} ++ ++static inline void spi_set_clk(struct phytium_spi *fts, u16 div) ++{ ++ phytium_writel(fts, BAUDR, div); ++} ++ ++static inline void spi_mask_intr(struct phytium_spi *fts, u32 mask) ++{ ++ u32 new_mask; ++ ++ new_mask = phytium_readl(fts, IMR) & ~mask; ++ phytium_writel(fts, IMR, new_mask); ++} ++ ++static inline void spi_umask_intr(struct phytium_spi *fts, u32 mask) ++{ ++ u32 new_mask; ++ ++ new_mask = phytium_readl(fts, IMR) | mask; ++ phytium_writel(fts, IMR, new_mask); ++} ++ ++static inline void spi_global_cs(struct phytium_spi *fts) ++{ ++ u32 global_cs_en, mask, setmask; ++ ++ mask = GENMASK(fts->num_cs-1, 0) << fts->num_cs; ++ setmask = ~GENMASK(fts->num_cs-1, 0); ++ global_cs_en = (phytium_readl(fts, GCSR) | mask) & setmask; ++ ++ phytium_writel(fts, GCSR, global_cs_en); ++} ++ ++static inline void spi_reset_chip(struct phytium_spi *fts) ++{ ++ spi_enable_chip(fts, 0); ++ if (fts->global_cs) ++ spi_global_cs(fts); ++ spi_mask_intr(fts, 0xff); ++ spi_enable_chip(fts, 1); ++} ++ ++static inline void spi_shutdown_chip(struct phytium_spi *fts) ++{ ++ spi_enable_chip(fts, 0); ++ spi_set_clk(fts, 0); ++} ++ ++struct phytium_spi_chip { ++ u8 poll_mode; ++ u8 type; ++ void (*cs_control)(u32 command); ++}; ++ ++struct chip_data { ++ u8 cs; ++ u8 tmode; ++ u8 type; ++ ++ u8 poll_mode; ++ ++ u16 clk_div; ++ u32 speed_hz; ++ void (*cs_control)(u32 command); ++}; ++ ++static void phytium_spi_set_cs(struct spi_device *spi, bool enable) ++{ ++ struct phytium_spi *fts = spi_master_get_devdata(spi->master); ++ struct chip_data *chip = spi_get_ctldata(spi); ++ u32 origin; ++ ++ if (chip && chip->cs_control) ++ chip->cs_control(!enable); ++ ++ if (!enable) { ++ phytium_writel(fts, SER, BIT(spi->chip_select)); ++ if (fts->global_cs) { ++ origin = phytium_readl(fts, GCSR); ++ phytium_writel(fts, GCSR, origin | (1 << spi->chip_select)); ++ } ++ } else { ++ if (fts->global_cs) { ++ origin = phytium_readl(fts, GCSR); ++ phytium_writel(fts, GCSR, origin & ~(1 << spi->chip_select)); ++ } ++ } ++} ++ ++static inline u32 tx_max(struct phytium_spi *fts) ++{ ++ u32 tx_left, tx_room, rxtx_gap; ++ ++ tx_left = (fts->tx_end - fts->tx) / fts->n_bytes; ++ tx_room = fts->fifo_len - phytium_readl(fts, TXFLR); ++ ++ rxtx_gap = ((fts->rx_end - fts->rx) - (fts->tx_end - fts->tx)) ++ / fts->n_bytes; ++ ++ return min3(tx_left, tx_room, (u32) (fts->fifo_len - rxtx_gap)); ++} ++ ++static inline u32 rx_max(struct phytium_spi *fts) ++{ ++ u32 rx_left = (fts->rx_end - fts->rx) / fts->n_bytes; ++ ++ return min_t(u32, rx_left, phytium_readl(fts, RXFLR)); ++} ++ ++static void phytium_writer(struct phytium_spi *fts) ++{ ++ u32 max = tx_max(fts); ++ u16 txw = 0; ++ ++ while (max--) { ++ if (fts->tx_end - fts->len) { ++ if (fts->n_bytes == 1) ++ txw = *(u8 *)(fts->tx); ++ else ++ txw = *(u16 *)(fts->tx); ++ } ++ phytium_write_io_reg(fts, DR, txw); ++ fts->tx += fts->n_bytes; ++ } ++} ++ ++static void phytium_reader(struct phytium_spi *fts) ++{ ++ u32 max = rx_max(fts); ++ u16 rxw; ++ ++ while (max--) { ++ rxw = phytium_read_io_reg(fts, DR); ++ if (fts->rx_end - fts->len) { ++ if (fts->n_bytes == 1) ++ *(u8 *)(fts->rx) = rxw; ++ else ++ *(u16 *)(fts->rx) = rxw; ++ } ++ fts->rx += fts->n_bytes; ++ } ++} ++ ++static void int_error_stop(struct phytium_spi *fts, const char *msg) ++{ ++ spi_reset_chip(fts); ++ ++ dev_err(&fts->master->dev, "%s\n", msg); ++ fts->master->cur_msg->status = -EIO; ++ spi_finalize_current_transfer(fts->master); ++} ++ ++static irqreturn_t interrupt_transfer(struct phytium_spi *fts) ++{ ++ u16 irq_status = phytium_readl(fts, ISR); ++ ++ if (irq_status & (INT_TXOI | INT_RXOI | INT_RXUI)) { ++ phytium_readl(fts, ICR); ++ int_error_stop(fts, "interrupt_transfer: fifo overrun/underrun"); ++ return IRQ_HANDLED; ++ } ++ ++ phytium_reader(fts); ++ if (fts->rx_end == fts->rx) { ++ spi_mask_intr(fts, INT_TXEI); ++ spi_finalize_current_transfer(fts->master); ++ return IRQ_HANDLED; ++ } ++ if (irq_status & INT_TXEI) { ++ spi_mask_intr(fts, INT_TXEI); ++ phytium_writer(fts); ++ spi_umask_intr(fts, INT_TXEI); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t phytium_spi_irq(int irq, void *dev_id) ++{ ++ struct spi_master *master = dev_id; ++ struct phytium_spi *fts = spi_master_get_devdata(master); ++ u16 irq_status = phytium_readl(fts, ISR) & 0x3f; ++ ++ if (!irq_status) ++ return IRQ_NONE; ++ ++ if (!master->cur_msg) { ++ spi_mask_intr(fts, INT_TXEI); ++ return IRQ_HANDLED; ++ } ++ ++ if (fts->transfer_handler) ++ return fts->transfer_handler(fts); ++ else ++ return IRQ_HANDLED; ++} ++ ++static int poll_transfer(struct phytium_spi *fts) ++{ ++ do { ++ phytium_writer(fts); ++ phytium_reader(fts); ++ cpu_relax(); ++ } while (fts->rx_end > fts->rx); ++ ++ return 0; ++} ++ ++static int phytium_spi_transfer_one(struct spi_master *master, ++ struct spi_device *spi, struct spi_transfer *transfer) ++{ ++ struct phytium_spi *fts = spi_master_get_devdata(master); ++ struct chip_data *chip = spi_get_ctldata(spi); ++ u8 imask = 0; ++ u16 txlevel = 0; ++ u16 clk_div; ++ u32 cr0; ++ ++ fts->tx = (void *)transfer->tx_buf; ++ fts->tx_end = fts->tx + transfer->len; ++ fts->rx = transfer->rx_buf; ++ fts->rx_end = fts->rx + transfer->len; ++ fts->len = transfer->len; ++ ++ spi_enable_chip(fts, 0); ++ ++ if (transfer->speed_hz != chip->speed_hz) { ++ clk_div = (fts->max_freq / transfer->speed_hz + 1) & 0xfffe; ++ ++ chip->speed_hz = transfer->speed_hz; ++ chip->clk_div = clk_div; ++ ++ spi_set_clk(fts, chip->clk_div); ++ } ++ ++ if (transfer->bits_per_word == 8) { ++ fts->n_bytes = 1; ++ } else if (transfer->bits_per_word == 16) { ++ fts->n_bytes = 2; ++ } else { ++ return -EINVAL; ++ } ++ ++ cr0 = (transfer->bits_per_word - 1) ++ | (chip->type << FRF_OFFSET) ++ | (spi->mode << MODE_OFFSET) ++ | (chip->tmode << TMOD_OFFSET); ++ ++ if (chip->cs_control) { ++ if (fts->rx && fts->tx) ++ chip->tmode = TMOD_TR; ++ else if (fts->rx) ++ chip->tmode = TMOD_RO; ++ else ++ chip->tmode = TMOD_TO; ++ ++ cr0 &= ~TMOD_MASK; ++ cr0 |= (chip->tmode << TMOD_OFFSET); ++ } ++ ++ phytium_writel(fts, CTRL0, cr0); ++ ++ spi_mask_intr(fts, 0xff); ++ ++ if (!chip->poll_mode) { ++ txlevel = min_t(u16, fts->fifo_len / 2, fts->len / fts->n_bytes); ++ phytium_writel(fts, TXFLTR, txlevel); ++ ++ imask |= INT_TXEI | INT_TXOI | ++ INT_RXUI | INT_RXOI; ++ spi_umask_intr(fts, imask); ++ ++ fts->transfer_handler = interrupt_transfer; ++ } ++ ++ spi_enable_chip(fts, 1); ++ ++ if (chip->poll_mode) ++ return poll_transfer(fts); ++ ++ return 1; ++} ++ ++static void phytium_spi_handle_err(struct spi_master *master, ++ struct spi_message *msg) ++{ ++ struct phytium_spi *fts = spi_master_get_devdata(master); ++ ++ spi_reset_chip(fts); ++} ++ ++static int phytium_spi_setup(struct spi_device *spi) ++{ ++ struct phytium_spi_chip *chip_info = NULL; ++ struct chip_data *chip; ++ int ret; ++ ++ chip = spi_get_ctldata(spi); ++ if (!chip) { ++ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); ++ if (!chip) ++ return -ENOMEM; ++ spi_set_ctldata(spi, chip); ++ } ++ ++ chip_info = spi->controller_data; ++ ++ if (chip_info) { ++ if (chip_info->cs_control) ++ chip->cs_control = chip_info->cs_control; ++ ++ chip->poll_mode = chip_info->poll_mode; ++ chip->type = chip_info->type; ++ } ++ ++ chip->tmode = 0; ++ ++ if (gpio_is_valid(spi->cs_gpio)) { ++ ret = gpio_direction_output(spi->cs_gpio, ++ !(spi->mode & SPI_CS_HIGH)); ++ if (ret) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void phytium_spi_cleanup(struct spi_device *spi) ++{ ++ struct chip_data *chip = spi_get_ctldata(spi); ++ ++ kfree(chip); ++ spi_set_ctldata(spi, NULL); ++} ++ ++static void spi_hw_init(struct device *dev, struct phytium_spi *fts) ++{ ++ spi_reset_chip(fts); ++ ++ if (!fts->fifo_len) { ++ u32 fifo; ++ ++ for (fifo = 1; fifo < 256; fifo++) { ++ phytium_writel(fts, TXFLTR, fifo); ++ if (fifo != phytium_readl(fts, TXFLTR)) ++ break; ++ } ++ phytium_writel(fts, TXFLTR, 0); ++ ++ fts->fifo_len = (fifo == 1) ? 0 : fifo; ++ dev_dbg(dev, "Detected FIFO size: %u bytes\n", fts->fifo_len); ++ } ++} ++ ++int phytium_spi_add_host(struct device *dev, struct phytium_spi *fts) ++{ ++ struct spi_master *master; ++ int ret; ++ ++ BUG_ON(fts == NULL); ++ ++ master = spi_alloc_master(dev, 0); ++ if (!master) ++ return -ENOMEM; ++ ++ fts->master = master; ++ snprintf(fts->name, sizeof(fts->name), "phytium_spi%d", fts->bus_num); ++ ++ ret = request_irq(fts->irq, phytium_spi_irq, IRQF_SHARED, fts->name, master); ++ if (ret < 0) { ++ dev_err(dev, "can not get IRQ\n"); ++ goto err_free_master; ++ } ++ ++ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP; ++ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); ++ master->bus_num = fts->bus_num; ++ master->num_chipselect = fts->num_cs; ++ master->setup = phytium_spi_setup; ++ master->cleanup = phytium_spi_cleanup; ++ master->set_cs = phytium_spi_set_cs; ++ master->transfer_one = phytium_spi_transfer_one; ++ master->handle_err = phytium_spi_handle_err; ++ master->max_speed_hz = fts->max_freq; ++ master->dev.of_node = dev->of_node; ++ master->dev.fwnode = dev->fwnode; ++ master->flags = SPI_MASTER_GPIO_SS; ++ master->cs_gpios = fts->cs; ++ ++ spi_hw_init(dev, fts); ++ ++ spi_master_set_devdata(master, fts); ++ ret = devm_spi_register_master(dev, master); ++ if (ret) { ++ dev_err(&master->dev, "problem registering spi master\n"); ++ goto err_exit; ++ } ++ ++ return 0; ++ ++err_exit: ++ spi_enable_chip(fts, 0); ++ free_irq(fts->irq, master); ++err_free_master: ++ spi_master_put(master); ++ return ret; ++} ++EXPORT_SYMBOL_GPL(phytium_spi_add_host); ++ ++void phytium_spi_remove_host(struct phytium_spi *fts) ++{ ++ spi_shutdown_chip(fts); ++ ++ free_irq(fts->irq, fts->master); ++} ++EXPORT_SYMBOL_GPL(phytium_spi_remove_host); ++ ++int phytium_spi_suspend_host(struct phytium_spi *fts) ++{ ++ int ret; ++ ++ ret = spi_controller_suspend(fts->master); ++ if (ret) ++ return ret; ++ ++ spi_shutdown_chip(fts); ++ return 0; ++} ++EXPORT_SYMBOL_GPL(phytium_spi_suspend_host); ++ ++int phytium_spi_resume_host(struct phytium_spi *fts) ++{ ++ int ret; ++ ++ spi_hw_init(&fts->master->dev, fts); ++ ret = spi_controller_resume(fts->master); ++ if (ret) ++ dev_err(&fts->master->dev, "fail to start queue (%d)\n", ret); ++ return ret; ++} ++EXPORT_SYMBOL_GPL(phytium_spi_resume_host); ++ ++MODULE_AUTHOR("Zhu Mingshuai "); ++MODULE_AUTHOR("Chen Baozi "); ++MODULE_DESCRIPTION("Driver for Phytium SPI controller core"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/spi/spi-phytium.h b/drivers/spi/spi-phytium.h +new file mode 100644 +index 000000000000..44b53ef4d1ea +--- /dev/null ++++ b/drivers/spi/spi-phytium.h +@@ -0,0 +1,66 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef PHYTIUM_SPI_HEADER_H ++#define PHYTIUM_SPI_HEADER_H ++ ++#include ++#include ++#include ++ ++#define CTRL0 0x00 ++#define SSIENR 0x08 ++#define SER 0x10 ++#define BAUDR 0x14 ++#define TXFLTR 0x18 ++#define TXFLR 0x20 ++#define RXFLR 0x24 ++#define IMR 0x2c ++#define ISR 0x30 ++#define ICR 0x48 ++#define DR 0x60 ++#define GCSR 0x100 ++ ++#define FRF_OFFSET 4 ++#define MODE_OFFSET 6 ++#define TMOD_OFFSET 8 ++ ++#define TMOD_MASK (0x3 << TMOD_OFFSET) ++#define TMOD_TR 0x0 ++#define TMOD_TO 0x1 ++#define TMOD_RO 0x2 ++ ++#define INT_TXEI (1 << 0) ++#define INT_TXOI (1 << 1) ++#define INT_RXUI (1 << 2) ++#define INT_RXOI (1 << 3) ++ ++struct phytium_spi { ++ struct spi_master *master; ++ char name[16]; ++ ++ void __iomem *regs; ++ bool global_cs; ++ unsigned long paddr; ++ int irq; ++ u32 fifo_len; ++ u32 max_freq; ++ ++ u32 reg_io_width; ++ u16 bus_num; ++ u16 num_cs; ++ int *cs; ++ ++ size_t len; ++ void *tx; ++ void *tx_end; ++ void *rx; ++ void *rx_end; ++ u8 n_bytes; ++ irqreturn_t (*transfer_handler)(struct phytium_spi *fts); ++}; ++ ++extern int phytium_spi_add_host(struct device *dev, struct phytium_spi *fts); ++extern void phytium_spi_remove_host(struct phytium_spi *fts); ++extern int phytium_spi_suspend_host(struct phytium_spi *fts); ++extern int phytium_spi_resume_host(struct phytium_spi *fts); ++ ++#endif /* PHYTIUM_SPI_HEADER_H */ +diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig +index 3c59e19029be..26d8a42cdc8b 100644 +--- a/drivers/tee/optee/Kconfig ++++ b/drivers/tee/optee/Kconfig +@@ -13,3 +13,33 @@ config OPTEE_SHM_NUM_PRIV_PAGES + help + This sets the number of private shared memory pages to be + used by OP-TEE TEE driver. ++ ++if OPTEE ++ ++choice ++ prompt "Default conduit method" ++ default OPTEE_DEFAULT_METHOD_NONE ++ help ++ This option sets the default conduit method for OP-TEE in case ++ firmware misses "method" property. If in doubt, select "none" ++ which depends on firmware to provide the value. ++ ++config OPTEE_DEFAULT_METHOD_NONE ++ bool "none" ++ help ++ There is no default conduit method used by the driver. Require ++ firwmare to provide the method explicitly. ++ ++config OPTEE_DEFAULT_METHOD_HVC ++ bool "hvc" ++ help ++ Use the "hvc" as default conduit method. ++ ++config OPTEE_DEFAULT_METHOD_SMC ++ bool "smc" ++ help ++ Use the "hvc" as default conduit method. ++ ++endchoice ++ ++endif +diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c +index e1aafe842d66..51d20d234c61 100644 +--- a/drivers/tee/optee/core.c ++++ b/drivers/tee/optee/core.c +@@ -14,6 +14,7 @@ + + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + ++#include + #include + #include + #include +@@ -529,15 +530,23 @@ static void optee_smccc_hvc(unsigned long a0, unsigned long a1, + arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res); + } + +-static optee_invoke_fn *get_invoke_func(struct device_node *np) ++#if defined(CONFIG_OPTEE_DEFAULT_METHOD_HVC) ++#define DEFAULT_CONDUIT_METHOD optee_smccc_hvc ++#elif defined(CONFIG_OPTEE_DEFAULT_METHOD_SMC) ++#define DEFAULT_CONDUIT_METHOD optee_smccc_hvc ++#else ++#define DEFAULT_CONDUIT_METHOD ERR_PTR(-ENXIO) ++#endif ++ ++static optee_invoke_fn *get_invoke_func(struct device *dev) + { + const char *method; + +- pr_info("probing for conduit method from DT.\n"); ++ pr_info("probing for conduit method.\n"); + +- if (of_property_read_string(np, "method", &method)) { ++ if (device_property_read_string(dev, "method", &method)) { + pr_warn("missing \"method\" property\n"); +- return ERR_PTR(-ENXIO); ++ return DEFAULT_CONDUIT_METHOD; + } + + if (!strcmp("hvc", method)) +@@ -549,7 +558,37 @@ static optee_invoke_fn *get_invoke_func(struct device_node *np) + return ERR_PTR(-EINVAL); + } + +-static struct optee *optee_probe(struct device_node *np) ++static int optee_remove(struct platform_device *pdev) ++{ ++ struct optee *optee = platform_get_drvdata(pdev); ++ ++ /* ++ * Ask OP-TEE to free all cached shared memory objects to decrease ++ * reference counters and also avoid wild pointers in secure world ++ * into the old shared memory range. ++ */ ++ optee_disable_shm_cache(optee); ++ ++ /* ++ * The two devices have to be unregistered before we can free the ++ * other resources. ++ */ ++ tee_device_unregister(optee->supp_teedev); ++ tee_device_unregister(optee->teedev); ++ ++ tee_shm_pool_free(optee->pool); ++ if (optee->memremaped_shm) ++ memunmap(optee->memremaped_shm); ++ optee_wait_queue_exit(&optee->wait_queue); ++ optee_supp_uninit(&optee->supp); ++ mutex_destroy(&optee->call_queue.mutex); ++ ++ kfree(optee); ++ ++ return 0; ++} ++ ++static int optee_probe(struct platform_device *pdev) + { + optee_invoke_fn *invoke_fn; + struct tee_shm_pool *pool; +@@ -559,25 +598,25 @@ static struct optee *optee_probe(struct device_node *np) + u32 sec_caps; + int rc; + +- invoke_fn = get_invoke_func(np); ++ invoke_fn = get_invoke_func(&pdev->dev); + if (IS_ERR(invoke_fn)) +- return (void *)invoke_fn; ++ return PTR_ERR(invoke_fn); + + if (!optee_msg_api_uid_is_optee_api(invoke_fn)) { + pr_warn("api uid mismatch\n"); +- return ERR_PTR(-EINVAL); ++ return -EINVAL; + } + + optee_msg_get_os_revision(invoke_fn); + + if (!optee_msg_api_revision_is_compatible(invoke_fn)) { + pr_warn("api revision mismatch\n"); +- return ERR_PTR(-EINVAL); ++ return -EINVAL; + } + + if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) { + pr_warn("capabilities mismatch\n"); +- return ERR_PTR(-EINVAL); ++ return -EINVAL; + } + + /* +@@ -589,7 +628,7 @@ static struct optee *optee_probe(struct device_node *np) + + pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm, sec_caps); + if (IS_ERR(pool)) +- return (void *)pool; ++ return PTR_ERR(pool); + + optee = kzalloc(sizeof(*optee), GFP_KERNEL); + if (!optee) { +@@ -631,8 +670,10 @@ static struct optee *optee_probe(struct device_node *np) + + optee_enable_shm_cache(optee); + ++ platform_set_drvdata(pdev, optee); ++ + pr_info("initialized driver\n"); +- return optee; ++ return 0; + err: + if (optee) { + /* +@@ -648,81 +689,37 @@ static struct optee *optee_probe(struct device_node *np) + tee_shm_pool_free(pool); + if (memremaped_shm) + memunmap(memremaped_shm); +- return ERR_PTR(rc); +-} +- +-static void optee_remove(struct optee *optee) +-{ +- /* +- * Ask OP-TEE to free all cached shared memory objects to decrease +- * reference counters and also avoid wild pointers in secure world +- * into the old shared memory range. +- */ +- optee_disable_shm_cache(optee); +- +- /* +- * The two devices has to be unregistered before we can free the +- * other resources. +- */ +- tee_device_unregister(optee->supp_teedev); +- tee_device_unregister(optee->teedev); +- +- tee_shm_pool_free(optee->pool); +- if (optee->memremaped_shm) +- memunmap(optee->memremaped_shm); +- optee_wait_queue_exit(&optee->wait_queue); +- optee_supp_uninit(&optee->supp); +- mutex_destroy(&optee->call_queue.mutex); +- +- kfree(optee); ++ return rc; + } + +-static const struct of_device_id optee_match[] = { ++static const struct of_device_id optee_dt_match[] = { + { .compatible = "linaro,optee-tz" }, + {}, + }; ++MODULE_DEVICE_TABLE(of, optee_dt_match); + +-static struct optee *optee_svc; +- +-static int __init optee_driver_init(void) +-{ +- struct device_node *fw_np; +- struct device_node *np; +- struct optee *optee; +- +- /* Node is supposed to be below /firmware */ +- fw_np = of_find_node_by_name(NULL, "firmware"); +- if (!fw_np) +- return -ENODEV; +- +- np = of_find_matching_node(fw_np, optee_match); +- if (!np) +- return -ENODEV; +- +- optee = optee_probe(np); +- of_node_put(np); +- +- if (IS_ERR(optee)) +- return PTR_ERR(optee); +- +- optee_svc = optee; +- +- return 0; +-} +-module_init(optee_driver_init); +- +-static void __exit optee_driver_exit(void) +-{ +- struct optee *optee = optee_svc; +- +- optee_svc = NULL; +- if (optee) +- optee_remove(optee); +-} +-module_exit(optee_driver_exit); ++#ifdef CONFIG_ACPI ++static const struct acpi_device_id optee_acpi_match[] = { ++ { "PHYT8003" }, ++ { } ++}; ++MODULE_DEVICE_TABLE(acpi, optee_acpi_match); ++#endif ++ ++static struct platform_driver optee_driver = { ++ .probe = optee_probe, ++ .remove = optee_remove, ++ .driver = { ++ .name = "optee", ++ .of_match_table = optee_dt_match, ++ .acpi_match_table = ACPI_PTR(optee_acpi_match), ++ }, ++}; ++module_platform_driver(optee_driver); + + MODULE_AUTHOR("Linaro"); + MODULE_DESCRIPTION("OP-TEE driver"); + MODULE_SUPPORTED_DEVICE(""); + MODULE_VERSION("1.0"); + MODULE_LICENSE("GPL v2"); ++MODULE_ALIAS("platform:optee"); +diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig +index df8bd0c7b97d..a07eb0390506 100644 +--- a/drivers/tty/serial/Kconfig ++++ b/drivers/tty/serial/Kconfig +@@ -73,6 +73,17 @@ config SERIAL_AMBA_PL011_CONSOLE + your boot loader (lilo or loadlin) about how to pass options to the + kernel at boot time.) + ++config SERIAL_PHYTIUM_PCI ++ tristate "Phytium PCI serial port support" ++ depends on PCI ++ select SERIAL_CORE ++ help ++ This driver supports the Phytium UART controller on PCI/PCIe adapters. ++ If you want to compile this driver into the kernel, say Y here. To ++ compile this driver as a module, choose M here. ++ ++ If unsure, say N. ++ + config SERIAL_EARLYCON_ARM_SEMIHOST + bool "Early console using ARM semihosting" + depends on ARM64 || ARM +diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile +index daac675612df..6d4cf6bc06ca 100644 +--- a/drivers/tty/serial/Makefile ++++ b/drivers/tty/serial/Makefile +@@ -24,6 +24,7 @@ obj-$(CONFIG_SERIAL_8250) += 8250/ + + obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o + obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o ++obj-$(CONFIG_SERIAL_PHYTIUM_PCI) += phytium-uart.o + obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o + obj-$(CONFIG_SERIAL_PXA_NON8250) += pxa.o + obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o +diff --git a/drivers/tty/serial/phytium-uart.c b/drivers/tty/serial/phytium-uart.c +new file mode 100644 +index 000000000000..bd59a97740c7 +--- /dev/null ++++ b/drivers/tty/serial/phytium-uart.c +@@ -0,0 +1,922 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Driver for Phytium PCI UART controller ++ * ++ * Copyright 2021 Phytium Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DRV_NAME "phytium_uart" ++ ++#define REG_DR 0x00 ++#define REG_FR 0x18 ++#define REG_IBRD 0x24 ++#define REG_FBRD 0x28 ++#define REG_LCRH_RX 0x2c ++#define REG_LCRH_TX 0x2c ++#define REG_CR 0x30 ++#define REG_IFLS 0x34 ++#define REG_IMSC 0x38 ++#define REG_RIS 0x3c ++#define REG_MIS 0x40 ++#define REG_ICR 0x44 ++ ++#define REG_DR_OE (1 << 11) ++#define REG_DR_BE (1 << 10) ++#define REG_DR_PE (1 << 9) ++#define REG_DR_FE (1 << 8) ++ ++#define REG_LCRH_SPS 0x80 ++#define REG_LCRH_WLEN_8 0x60 ++#define REG_LCRH_WLEN_7 0x40 ++#define REG_LCRH_WLEN_6 0x20 ++#define REG_LCRH_WLEN_5 0x00 ++#define REG_LCRH_FEN 0x10 ++#define REG_LCRH_STP2 0x08 ++#define REG_LCRH_EPS 0x04 ++#define REG_LCRH_PEN 0x02 ++#define REG_LCRH_BRK 0x01 ++ ++#define REG_FR_RI 0x100 ++#define REG_FR_TXFE 0x080 ++#define REG_FR_RXFF 0x040 ++#define REG_FR_TXFF 0x020 ++#define REG_FR_RXFE 0x010 ++#define REG_FR_BUSY 0x008 ++#define REG_FR_DCD 0x004 ++#define REG_FR_DSR 0x002 ++#define REG_FR_CTS 0x001 ++#define REG_FR_TMSK (REG_FR_TXFF + REG_FR_BUSY) ++ ++#define REG_CR_CTSEN 0x8000 /* CTS hardware flow control */ ++#define REG_CR_RTSEN 0x4000 /* RTS hardware flow control */ ++#define REG_CR_OUT2 0x2000 /* OUT2 */ ++#define REG_CR_OUT1 0x1000 /* OUT1 */ ++#define REG_CR_RTS 0x0800 /* RTS */ ++#define REG_CR_DTR 0x0400 /* DTR */ ++#define REG_CR_RXE 0x0200 /* receive enable */ ++#define REG_CR_TXE 0x0100 /* transmit enable */ ++#define REG_CR_LBE 0x0080 /* loopback enable */ ++#define REG_CR_RTIE 0x0040 ++#define REG_CR_TIE 0x0020 ++#define REG_CR_RIE 0x0010 ++#define REG_CR_MSIE 0x0008 ++#define REG_CR_IIRLP 0x0004 /* SIR low power mode */ ++#define REG_CR_SIREN 0x0002 /* SIR enable */ ++#define REG_CR_UARTEN 0x0001 /* UART enable */ ++ ++#define REG_IFLS_RX1_8 (0 << 3) ++#define REG_IFLS_RX2_8 (1 << 3) ++#define REG_IFLS_RX4_8 (2 << 3) ++#define REG_IFLS_RX6_8 (3 << 3) ++#define REG_IFLS_RX7_8 (4 << 3) ++#define REG_IFLS_TX1_8 (0 << 0) ++#define REG_IFLS_TX2_8 (1 << 0) ++#define REG_IFLS_TX4_8 (2 << 0) ++#define REG_IFLS_TX6_8 (3 << 0) ++ ++#define REG_IMSC_OEIM (1 << 10) /* overrun error interrupt mask */ ++#define REG_IMSC_BEIM (1 << 9) /* break error interrupt mask */ ++#define REG_IMSC_PEIM (1 << 8) /* parity error interrupt mask */ ++#define REG_IMSC_FEIM (1 << 7) /* framing error interrupt mask */ ++#define REG_IMSC_RTIM (1 << 6) /* receive timeout interrupt mask */ ++#define REG_IMSC_TXIM (1 << 5) /* transmit interrupt mask */ ++#define REG_IMSC_RXIM (1 << 4) /* receive interrupt mask */ ++#define REG_IMSC_DSRMIM (1 << 3) /* DSR interrupt mask */ ++#define REG_IMSC_DCDMIM (1 << 2) /* DCD interrupt mask */ ++#define REG_IMSC_CTSMIM (1 << 1) /* CTS interrupt mask */ ++#define REG_IMSC_RIMIM (1 << 0) /* RI interrupt mask */ ++ ++#define REG_ICR_OEIS (1 << 10) /* overrun error interrupt status */ ++#define REG_ICR_BEIS (1 << 9) /* break error interrupt status */ ++#define REG_ICR_PEIS (1 << 8) /* parity error interrupt status */ ++#define REG_ICR_FEIS (1 << 7) /* framing error interrupt status */ ++#define REG_ICR_RTIS (1 << 6) /* receive timeout interrupt status */ ++#define REG_ICR_TXIS (1 << 5) /* transmit interrupt status */ ++#define REG_ICR_RXIS (1 << 4) /* receive interrupt status */ ++#define REG_ICR_DSRMIS (1 << 3) /* DSR interrupt status */ ++#define REG_ICR_DCDMIS (1 << 2) /* DCD interrupt status */ ++#define REG_ICR_CTSMIS (1 << 1) /* CTS interrupt status */ ++#define REG_ICR_RIMIS (1 << 0) /* RI interrupt status */ ++ ++#define UART_NR 12 ++ ++#define UART_DR_ERROR (REG_DR_OE|REG_DR_BE|REG_DR_PE|REG_DR_FE) ++#define UART_DUMMY_DR_RX (1 << 16) ++ ++#define DEFAULT_UARTCLK 48000000 /* 48 MHz */ ++ ++/* ++ * We wrap our port structure around the generic uart_port. ++ */ ++struct phytium_uart_port { ++ struct uart_port port; ++ unsigned int im; /* interrupt mask */ ++ unsigned int old_status; ++ unsigned int old_cr; /* state during shutdown */ ++ char type[12]; ++}; ++ ++static unsigned int phytium_uart_read(const struct phytium_uart_port *pup, ++ unsigned int reg) ++{ ++ void __iomem *addr = pup->port.membase + reg; ++ ++ return readl_relaxed(addr); ++} ++ ++static void phytium_uart_write(unsigned int val, const struct phytium_uart_port *pup, ++ unsigned int reg) ++{ ++ void __iomem *addr = pup->port.membase + reg; ++ ++ writel_relaxed(val, addr); ++} ++ ++static int phytium_fifo_to_tty(struct phytium_uart_port *pup) ++{ ++ u16 status; ++ unsigned int ch, flag, fifotaken; ++ ++ for (fifotaken = 0; fifotaken < 256; fifotaken++) { ++ status = phytium_uart_read(pup, REG_FR); ++ if (status & REG_FR_RXFE) ++ break; ++ ++ /* Take chars from the FIFO and update status */ ++ ch = phytium_uart_read(pup, REG_DR) | UART_DUMMY_DR_RX; ++ flag = TTY_NORMAL; ++ pup->port.icount.rx++; ++ ++ if (unlikely(ch & UART_DR_ERROR)) { ++ if (ch & REG_DR_BE) { ++ ch &= ~(REG_DR_FE | REG_DR_PE); ++ pup->port.icount.brk++; ++ if (uart_handle_break(&pup->port)) ++ continue; ++ } else if (ch & REG_DR_PE) ++ pup->port.icount.parity++; ++ else if (ch & REG_DR_FE) ++ pup->port.icount.frame++; ++ if (ch & REG_DR_OE) ++ pup->port.icount.overrun++; ++ ++ ch &= pup->port.read_status_mask; ++ ++ if (ch & REG_DR_BE) ++ flag = TTY_BREAK; ++ else if (ch & REG_DR_PE) ++ flag = TTY_PARITY; ++ else if (ch & REG_DR_FE) ++ flag = TTY_FRAME; ++ } ++ ++ if (uart_handle_sysrq_char(&pup->port, ch & 255)) ++ continue; ++ ++ uart_insert_char(&pup->port, ch, REG_DR_OE, ch, flag); ++ } ++ ++ return fifotaken; ++} ++ ++static void phytium_rx_chars(struct phytium_uart_port *pup) ++__releases(&pup->port.lock) ++__acquires(&pup->port.lock) ++{ ++ phytium_fifo_to_tty(pup); ++ ++ spin_unlock(&pup->port.lock); ++ tty_flip_buffer_push(&pup->port.state->port); ++ spin_lock(&pup->port.lock); ++} ++ ++static void phytium_stop_tx(struct uart_port *port) ++{ ++ struct phytium_uart_port *pup = ++ container_of(port, struct phytium_uart_port, port); ++ ++ pup->im &= ~REG_IMSC_TXIM; ++ phytium_uart_write(pup->im, pup, REG_IMSC); ++} ++ ++static bool phytium_tx_char(struct phytium_uart_port *pup, unsigned char c, ++ bool from_irq) ++{ ++ ++ if (unlikely(!from_irq) && ++ phytium_uart_read(pup, REG_FR) & REG_FR_TXFF) ++ return false; /* unable to transmit character */ ++ ++ phytium_uart_write(c, pup, REG_DR); ++ pup->port.icount.tx++; ++ ++ return true; ++} ++ ++static bool phytium_tx_chars(struct phytium_uart_port *pup, bool from_irq) ++{ ++ struct circ_buf *xmit = &pup->port.state->xmit; ++ int count = pup->port.fifosize >> 1; ++ ++ if (pup->port.x_char) { ++ if (!phytium_tx_char(pup, pup->port.x_char, from_irq)) ++ return true; ++ pup->port.x_char = 0; ++ --count; ++ } ++ if (uart_circ_empty(xmit) || uart_tx_stopped(&pup->port)) { ++ phytium_stop_tx(&pup->port); ++ return false; ++ } ++ ++ do { ++ if (likely(from_irq) && count-- == 0) ++ break; ++ ++ if (!phytium_tx_char(pup, xmit->buf[xmit->tail], from_irq)) ++ break; ++ ++ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); ++ } while (!uart_circ_empty(xmit)); ++ ++ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) ++ uart_write_wakeup(&pup->port); ++ ++ if (uart_circ_empty(xmit)) { ++ phytium_stop_tx(&pup->port); ++ return false; ++ } ++ return true; ++} ++ ++static void phytium_modem_status(struct phytium_uart_port *pup) ++{ ++ unsigned int status, delta; ++ ++ status = phytium_uart_read(pup, REG_FR) & (REG_FR_DCD|REG_FR_DSR|REG_FR_CTS); ++ ++ delta = status ^ pup->old_status; ++ pup->old_status = status; ++ ++ if (!delta) ++ return; ++ ++ if (delta & REG_FR_DCD) ++ uart_handle_dcd_change(&pup->port, status & REG_FR_DCD); ++ ++ if (delta & REG_FR_DSR) ++ pup->port.icount.dsr++; ++ ++ if (delta & REG_FR_CTS) ++ uart_handle_cts_change(&pup->port, status & REG_FR_CTS); ++ ++ wake_up_interruptible(&pup->port.state->port.delta_msr_wait); ++} ++ ++static irqreturn_t phytium_uart_interrupt(int irq, void *dev_id) ++{ ++ struct phytium_uart_port *pup = dev_id; ++ unsigned long flags; ++ unsigned int status, pass_counter = 256; ++ int handled = 0; ++ ++ spin_lock_irqsave(&pup->port.lock, flags); ++ status = phytium_uart_read(pup, REG_RIS) & pup->im; ++ if (status) { ++ do { ++ phytium_uart_write(status & ~(REG_ICR_TXIS|REG_ICR_RTIS|REG_ICR_RXIS), ++ pup, REG_ICR); ++ ++ if (status & (REG_ICR_RTIS|REG_ICR_RXIS)) ++ phytium_rx_chars(pup); ++ ++ if (status & (REG_ICR_DSRMIS|REG_ICR_DCDMIS| ++ REG_ICR_CTSMIS|REG_ICR_RIMIS)) ++ phytium_modem_status(pup); ++ if (status & REG_ICR_TXIS) ++ phytium_tx_chars(pup, true); ++ ++ if (pass_counter-- == 0) ++ break; ++ ++ status = phytium_uart_read(pup, REG_RIS) & pup->im; ++ } while (status != 0); ++ handled = 1; ++ } ++ spin_unlock_irqrestore(&pup->port.lock, flags); ++ ++ return IRQ_RETVAL(handled); ++} ++ ++static unsigned int phytium_tx_empty(struct uart_port *port) ++{ ++ unsigned int status; ++ struct phytium_uart_port *pup = ++ container_of(port, struct phytium_uart_port, port); ++ ++ status = phytium_uart_read(pup, REG_FR) & (REG_FR_BUSY | REG_FR_TXFF); ++ ++ return status ? 0 : TIOCSER_TEMT; ++} ++ ++static void phytium_set_mctrl(struct uart_port *port, unsigned int mctrl) ++{ ++ struct phytium_uart_port *pup = ++ container_of(port, struct phytium_uart_port, port); ++ unsigned int cr; ++ ++ cr = phytium_uart_read(pup, REG_CR); ++ ++#define TIOCMBIT(tiocmbit, uartbit) \ ++ do { \ ++ if (mctrl & tiocmbit) \ ++ cr |= uartbit; \ ++ else \ ++ cr &= ~uartbit; \ ++ } while (0) ++ ++ TIOCMBIT(TIOCM_RTS, REG_CR_RTS); ++ TIOCMBIT(TIOCM_DTR, REG_CR_DTR); ++ TIOCMBIT(TIOCM_OUT1, REG_CR_OUT1); ++ TIOCMBIT(TIOCM_OUT2, REG_CR_OUT2); ++ TIOCMBIT(TIOCM_LOOP, REG_CR_LBE); ++ ++ if (port->status & UPSTAT_AUTORTS) { ++ /* We need to disable auto-RTS if we want to turn RTS off */ ++ TIOCMBIT(TIOCM_RTS, REG_CR_RTSEN); ++ } ++#undef TIOCMBIT ++ ++ phytium_uart_write(cr, pup, REG_CR); ++} ++ ++static unsigned int phytium_get_mctrl(struct uart_port *port) ++{ ++ struct phytium_uart_port *pup = ++ container_of(port, struct phytium_uart_port, port); ++ unsigned int cr = 0; ++ unsigned int status = phytium_uart_read(pup, REG_FR); ++ ++#define TIOCMBIT(uartbit, tiocmbit) \ ++ do { \ ++ if (status & uartbit) \ ++ cr |= tiocmbit; \ ++ } while (0) ++ ++ TIOCMBIT(REG_FR_DCD, TIOCM_CAR); ++ TIOCMBIT(REG_FR_DSR, TIOCM_DSR); ++ TIOCMBIT(REG_FR_CTS, TIOCM_CTS); ++ TIOCMBIT(REG_FR_RI, TIOCM_RNG); ++#undef TIOCMBIT ++ return cr; ++} ++ ++static void phytium_start_tx(struct uart_port *port) ++{ ++ struct phytium_uart_port *pup = ++ container_of(port, struct phytium_uart_port, port); ++ ++ if (phytium_tx_chars(pup, false)) { ++ pup->im |= REG_IMSC_TXIM; ++ phytium_uart_write(pup->im, pup, REG_IMSC); ++ } ++} ++ ++static void phytium_stop_rx(struct uart_port *port) ++{ ++ struct phytium_uart_port *pup = ++ container_of(port, struct phytium_uart_port, port); ++ ++ pup->im &= ~(REG_IMSC_RXIM|REG_IMSC_RTIM|REG_IMSC_FEIM| ++ REG_IMSC_PEIM|REG_IMSC_BEIM|REG_IMSC_OEIM); ++ phytium_uart_write(pup->im, pup, REG_IMSC); ++} ++ ++static void phytium_enable_ms(struct uart_port *port) ++{ ++ struct phytium_uart_port *pup = ++ container_of(port, struct phytium_uart_port, port); ++ ++ pup->im |= REG_IMSC_RIMIM|REG_IMSC_CTSMIM|REG_IMSC_DCDMIM|REG_IMSC_DSRMIM; ++ phytium_uart_write(pup->im, pup, REG_IMSC); ++} ++ ++static void phytium_break_ctl(struct uart_port *port, int break_state) ++{ ++ struct phytium_uart_port *pup = ++ container_of(port, struct phytium_uart_port, port); ++ unsigned long flags; ++ unsigned int lcr_h; ++ ++ spin_lock_irqsave(&pup->port.lock, flags); ++ lcr_h = phytium_uart_read(pup, REG_LCRH_TX); ++ if (break_state == -1) ++ lcr_h |= REG_LCRH_BRK; ++ else ++ lcr_h &= ~REG_LCRH_BRK; ++ phytium_uart_write(lcr_h, pup, REG_LCRH_TX); ++ spin_unlock_irqrestore(&pup->port.lock, flags); ++} ++ ++static int phytium_hwinit(struct uart_port *port) ++{ ++ struct phytium_uart_port *pup = ++ container_of(port, struct phytium_uart_port, port); ++ ++ /* XXX: more configurable setup method in future */ ++ pup->port.uartclk = DEFAULT_UARTCLK; ++ ++ /* Clear pending error and receive interrupts */ ++ phytium_uart_write(REG_ICR_OEIS | REG_ICR_BEIS | REG_ICR_PEIS | ++ REG_ICR_FEIS | REG_ICR_RTIS | REG_ICR_RXIS, ++ pup, REG_ICR); ++ ++ /* ++ * Save interrupts enable mask, and enable RX interrupts in case if ++ * the interrupt is used for NMI entry. ++ */ ++ pup->im = phytium_uart_read(pup, REG_IMSC); ++ phytium_uart_write(REG_IMSC_RTIM | REG_IMSC_RXIM, pup, REG_IMSC); ++ ++ return 0; ++} ++ ++static int phytium_uart_allocate_irq(struct phytium_uart_port *pup) ++{ ++ phytium_uart_write(pup->im, pup, REG_IMSC); ++ ++ return request_irq(pup->port.irq, phytium_uart_interrupt, IRQF_SHARED, DRV_NAME, pup); ++} ++ ++static void phytium_enable_interrtups(struct phytium_uart_port *pup) ++{ ++ unsigned int i; ++ ++ spin_lock_irq(&pup->port.lock); ++ ++ /* Clear out any spuriously appearing RX interrupts */ ++ phytium_uart_write(REG_ICR_RTIS | REG_ICR_RXIS, pup, REG_ICR); ++ ++ /* ++ * RXIS is asserted only when the RX FIFO transitions from below ++ * to above the trigger threshold. If the RX FIFO is already ++ * full to the threashold this can't happen and RXIS will now be ++ * stuck off. Drain the RX FIFO explicitly to fix this: ++ */ ++ for (i = 0; i < pup->port.fifosize * 2; i++) { ++ if (phytium_uart_read(pup, REG_FR) & REG_FR_RXFE) ++ break; ++ ++ phytium_uart_read(pup, REG_DR); ++ } ++ ++ pup->im = REG_IMSC_RTIM | REG_IMSC_RXIM; ++ phytium_uart_write(pup->im, pup, REG_IMSC); ++ spin_unlock_irq(&pup->port.lock); ++} ++ ++static int phytium_startup(struct uart_port *port) ++{ ++ struct phytium_uart_port *pup = ++ container_of(port, struct phytium_uart_port, port); ++ unsigned int cr; ++ int ret = 0; ++ ++ ret = phytium_hwinit(port); ++ if (ret) ++ goto out; ++ ++ ret = phytium_uart_allocate_irq(pup); ++ if (ret) ++ goto out; ++ ++ phytium_uart_write(REG_IFLS_RX4_8|REG_IFLS_TX4_8, pup, REG_IFLS); ++ ++ spin_lock_irq(&pup->port.lock); ++ ++ /* restore RTS and DTR */ ++ cr = pup->old_cr & (REG_CR_RTS | REG_CR_DTR); ++ cr |= REG_CR_UARTEN | REG_CR_RXE | REG_CR_TXE; ++ phytium_uart_write(cr, pup, REG_CR); ++ ++ spin_unlock_irq(&pup->port.lock); ++ ++ /* initialise the old status of the modem signals */ ++ pup->old_status = phytium_uart_read(pup, REG_FR) & (REG_FR_DCD|REG_FR_DSR|REG_FR_CTS); ++ ++ phytium_enable_interrtups(pup); ++ ++out: ++ return ret; ++} ++ ++static void phytium_shutdown_channel(struct phytium_uart_port *pup, ++ unsigned int lcrh) ++{ ++ unsigned long val; ++ ++ val = phytium_uart_read(pup, lcrh); ++ val &= ~(REG_LCRH_BRK | REG_LCRH_FEN); ++ phytium_uart_write(val, pup, lcrh); ++} ++ ++static void phytium_disable_uart(struct phytium_uart_port *pup) ++{ ++ unsigned int cr; ++ ++ pup->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); ++ spin_lock_irq(&pup->port.lock); ++ cr = phytium_uart_read(pup, REG_CR); ++ pup->old_cr = cr; ++ cr &= REG_CR_RTS | REG_CR_DTR; ++ cr |= REG_CR_UARTEN | REG_CR_TXE; ++ phytium_uart_write(cr, pup, REG_CR); ++ spin_unlock_irq(&pup->port.lock); ++ ++ /* ++ * disable break condition and fifos ++ */ ++ phytium_shutdown_channel(pup, REG_LCRH_RX); ++} ++ ++static void phytium_disable_interrupts(struct phytium_uart_port *pup) ++{ ++ spin_lock_irq(&pup->port.lock); ++ ++ /* mask all interrupts and clear all pending ones */ ++ pup->im = 0; ++ phytium_uart_write(pup->im, pup, REG_IMSC); ++ phytium_uart_write(0xffff, pup, REG_ICR); ++ ++ spin_unlock_irq(&pup->port.lock); ++} ++ ++static void phytium_shutdown(struct uart_port *port) ++{ ++ struct phytium_uart_port *pup = ++ container_of(port, struct phytium_uart_port, port); ++ ++ phytium_disable_interrupts(pup); ++ ++ free_irq(pup->port.irq, pup); ++ ++ phytium_disable_uart(pup); ++ ++ if (pup->port.ops->flush_buffer) ++ pup->port.ops->flush_buffer(port); ++} ++ ++static void ++phytium_setup_status_masks(struct uart_port *port, struct ktermios *termios) ++{ ++ port->read_status_mask = REG_DR_OE | 255; ++ if (termios->c_iflag & INPCK) ++ port->read_status_mask |= REG_DR_FE | REG_DR_PE; ++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) ++ port->read_status_mask |= REG_DR_BE; ++ ++ /* ++ * Characters to ignore ++ */ ++ port->ignore_status_mask = 0; ++ if (termios->c_iflag & IGNPAR) ++ port->ignore_status_mask |= REG_DR_FE | REG_DR_PE; ++ if (termios->c_iflag & IGNBRK) { ++ port->ignore_status_mask |= REG_DR_BE; ++ /* ++ * If we're ignoring parity and break indicators, ++ * ignore overruns too (for real raw support). ++ */ ++ if (termios->c_iflag & IGNPAR) ++ port->ignore_status_mask |= REG_DR_OE; ++ } ++ ++ /* ++ * Ignore all characters if CREAD is not set. ++ */ ++ if ((termios->c_cflag & CREAD) == 0) ++ port->ignore_status_mask |= UART_DUMMY_DR_RX; ++} ++ ++static void ++phytium_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) ++{ ++ struct phytium_uart_port *pup = ++ container_of(port, struct phytium_uart_port, port); ++ unsigned int lcr_h, old_cr; ++ unsigned long flags; ++ unsigned int baud, quot; ++ ++ /* Ask the core to calculate the divisor for us. */ ++ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); ++ ++ if (baud > port->uartclk/16) ++ quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); ++ else ++ quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); ++ ++ switch (termios->c_cflag & CSIZE) { ++ case CS5: ++ lcr_h = REG_LCRH_WLEN_5; ++ break; ++ case CS6: ++ lcr_h = REG_LCRH_WLEN_6; ++ break; ++ case CS7: ++ lcr_h = REG_LCRH_WLEN_7; ++ break; ++ default: /* CS8 */ ++ lcr_h = REG_LCRH_WLEN_8; ++ break; ++ } ++ if (termios->c_cflag & CSTOPB) ++ lcr_h |= REG_LCRH_STP2; ++ if (termios->c_cflag & PARENB) { ++ lcr_h |= REG_LCRH_PEN; ++ if (!(termios->c_cflag & PARODD)) ++ lcr_h |= REG_LCRH_EPS; ++ if (termios->c_cflag & CMSPAR) ++ lcr_h |= REG_LCRH_SPS; ++ } ++ if (pup->port.fifosize > 1) ++ lcr_h |= REG_LCRH_FEN; ++ ++ spin_lock_irqsave(&port->lock, flags); ++ ++ /* ++ * Update the per-port timeout. ++ */ ++ uart_update_timeout(port, termios->c_cflag, baud); ++ ++ phytium_setup_status_masks(port, termios); ++ ++ if (UART_ENABLE_MS(port, termios->c_cflag)) ++ phytium_enable_ms(port); ++ ++ /* first, disable everything */ ++ old_cr = phytium_uart_read(pup, REG_CR); ++ phytium_uart_write(0, pup, REG_CR); ++ ++ if (termios->c_cflag & CRTSCTS) { ++ if (old_cr & REG_CR_RTS) ++ old_cr |= REG_CR_RTSEN; ++ ++ old_cr |= REG_CR_CTSEN; ++ port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; ++ } else { ++ old_cr &= ~(REG_CR_CTSEN | REG_CR_RTSEN); ++ port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); ++ } ++ ++ /* Set baud rate */ ++ phytium_uart_write(quot & 0x3f, pup, REG_FBRD); ++ phytium_uart_write(quot >> 6, pup, REG_IBRD); ++ ++ phytium_uart_write(lcr_h, pup, REG_LCRH_RX); ++ phytium_uart_write(old_cr, pup, REG_CR); ++ ++ spin_unlock_irqrestore(&port->lock, flags); ++} ++ ++static const char *phytium_type(struct uart_port *port) ++{ ++ struct phytium_uart_port *pup = ++ container_of(port, struct phytium_uart_port, port); ++ return pup->port.type == PORT_PHYTIUM ? pup->type : NULL; ++} ++ ++static void phytium_release_port(struct uart_port *port) ++{ ++ /* Nothing to release ... */ ++} ++ ++static int phytium_request_port(struct uart_port *port) ++{ ++ /* UARTs always present */ ++ return 0; ++} ++ ++static void phytium_config_port(struct uart_port *port, int flags) ++{ ++ if (flags & UART_CONFIG_TYPE) { ++ port->type = PORT_PHYTIUM; ++ phytium_request_port(port); ++ } ++} ++ ++static int phytium_verify_port(struct uart_port *port, struct serial_struct *ser) ++{ ++ int ret = 0; ++ ++ if (ser->type != PORT_UNKNOWN && ser->type != PORT_PHYTIUM) ++ ret = -EINVAL; ++ if (ser->irq < 0 || ser->irq >= nr_irqs) ++ ret = -EINVAL; ++ if (ser->baud_base < 9600) ++ ret = -EINVAL; ++ ++ return ret; ++} ++ ++static const struct uart_ops phytium_uart_ops = { ++ .tx_empty = phytium_tx_empty, ++ .set_mctrl = phytium_set_mctrl, ++ .get_mctrl = phytium_get_mctrl, ++ .stop_tx = phytium_stop_tx, ++ .start_tx = phytium_start_tx, ++ .stop_rx = phytium_stop_rx, ++ .enable_ms = phytium_enable_ms, ++ .break_ctl = phytium_break_ctl, ++ .startup = phytium_startup, ++ .shutdown = phytium_shutdown, ++ .set_termios = phytium_set_termios, ++ .type = phytium_type, ++ .release_port = phytium_release_port, ++ .request_port = phytium_request_port, ++ .config_port = phytium_config_port, ++ .verify_port = phytium_verify_port, ++}; ++ ++static struct phytium_uart_port *uart_ports[UART_NR]; ++ ++static struct uart_driver phytium_uart = { ++ .owner = THIS_MODULE, ++ .driver_name = DRV_NAME, ++ .dev_name = "ttyS", ++ .nr = UART_NR, ++}; ++ ++void phytium_unregister_port(struct phytium_uart_port *pup) ++{ ++ int i; ++ bool busy = false; ++ ++ for (i = 0; i < ARRAY_SIZE(uart_ports); i++) { ++ if (uart_ports[i] == pup) ++ uart_ports[i] = NULL; ++ else if (uart_ports[i]) ++ busy = true; ++ } ++ ++ if (!busy) ++ uart_unregister_driver(&phytium_uart); ++} ++ ++static int phytium_find_free_port(void) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(uart_ports); i++) ++ if (uart_ports[i] == NULL) ++ return i; ++ ++ return -EBUSY; ++} ++ ++static int phytium_register_port(struct phytium_uart_port *pup) ++{ ++ int rc; ++ ++ /* Ensure interrupts from this UART are masked and cleared */ ++ phytium_uart_write(0, pup, REG_IMSC); ++ phytium_uart_write(0xffff, pup, REG_ICR); ++ ++ if (!phytium_uart.state) { ++ rc = uart_register_driver(&phytium_uart); ++ if (rc < 0) { ++ dev_err(pup->port.dev, ++ "Failed to register Phytium PCI UART driver\n"); ++ return rc; ++ } ++ } ++ ++ rc = uart_add_one_port(&phytium_uart, &pup->port); ++ if (rc) ++ phytium_unregister_port(pup); ++ ++ return rc; ++} ++ ++static int phytium_uart_probe(struct pci_dev *pdev, ++ const struct pci_device_id *id) ++{ ++ struct phytium_uart_port *pup; ++ int portnr, rc; ++ ++ portnr = phytium_find_free_port(); ++ if (portnr < 0) ++ return portnr; ++ ++ pup = devm_kzalloc(&pdev->dev, sizeof(struct phytium_uart_port), ++ GFP_KERNEL); ++ if (!pup) ++ return -ENOMEM; ++ ++ rc = pcim_enable_device(pdev); ++ if (rc) ++ return rc; ++ ++ rc = pcim_iomap_regions_request_all(pdev, 0x01, pci_name(pdev)); ++ if (rc) ++ return rc; ++ ++ pup->port.iotype = UPIO_MEM32; ++ pup->port.irq = pdev->irq; ++ pup->port.mapbase = pci_resource_start(pdev, 0); ++ pup->port.membase = pcim_iomap_table(pdev)[0]; ++ pup->port.ops = &phytium_uart_ops; ++ pup->port.dev = &pdev->dev; ++ pup->port.fifosize = 32; ++ pup->port.flags = UPF_BOOT_AUTOCONF; ++ pup->port.line = portnr; ++ ++ uart_ports[portnr] = pup; ++ ++ pup->old_cr = 0; ++ snprintf(pup->type, sizeof(pup->type), "pci-uart"); ++ ++ pci_set_drvdata(pdev, pup); ++ ++ return phytium_register_port(pup); ++} ++ ++static void phytium_uart_remove(struct pci_dev *pdev) ++{ ++ struct phytium_uart_port *pup = pci_get_drvdata(pdev); ++ ++ uart_remove_one_port(&phytium_uart, &pup->port); ++ phytium_unregister_port(pup); ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static int phytium_uart_suspend(struct device *dev) ++{ ++ struct pci_dev *pdev = to_pci_dev(dev); ++ struct phytium_uart_port *pup = pci_get_drvdata(pdev); ++ ++ if (pup) ++ uart_suspend_port(&phytium_uart, &pup->port); ++ ++ return 0; ++} ++ ++static int phytium_uart_resume(struct device *dev) ++{ ++ struct pci_dev *pdev = to_pci_dev(dev); ++ struct phytium_uart_port *pup = pci_get_drvdata(pdev); ++ ++ if (pup) ++ uart_resume_port(&phytium_uart, &pup->port); ++ ++ return 0; ++} ++#endif ++ ++static SIMPLE_DEV_PM_OPS(phytium_dev_pm_ops, phytium_uart_suspend, phytium_uart_resume); ++ ++static const struct pci_device_id pci_ids[] = { ++ { PCI_VDEVICE(PHYTIUM, 0xdc2e) }, ++ { 0 } ++}; ++MODULE_DEVICE_TABLE(pci, pci_ids); ++ ++static struct pci_driver phytium_uart_pci_driver = { ++ .name = DRV_NAME, ++ .probe = phytium_uart_probe, ++ .remove = phytium_uart_remove, ++ .driver = { ++ .pm = &phytium_dev_pm_ops, ++ }, ++ .id_table = pci_ids, ++}; ++ ++static int __init phytium_uart_init(void) ++{ ++ pr_info("Serial: Phytium PCI UART driver\n"); ++ ++ return pci_register_driver(&phytium_uart_pci_driver); ++} ++ ++static void __exit phytium_uart_exit(void) ++{ ++ pci_unregister_driver(&phytium_uart_pci_driver); ++} ++ ++module_init(phytium_uart_init); ++module_exit(phytium_uart_exit); ++ ++MODULE_AUTHOR("Chen Baozi "); ++MODULE_DESCRIPTION("Phytium PCI serial port driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 462ce49f683a..fe200d1a69fd 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -107,6 +107,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem); + static void hub_release(struct kref *kref); + static int usb_reset_and_verify_device(struct usb_device *udev); + static int hub_port_disable(struct usb_hub *hub, int port1, int set_state); ++static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1, ++ u16 portstatus); + + static inline char *portspeed(struct usb_hub *hub, int portstatus) + { +@@ -1111,6 +1113,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) + USB_PORT_FEAT_ENABLE); + } + ++ /* Make sure a warm-reset request is handled by port_event */ ++ if (type == HUB_RESUME && ++ hub_port_warm_reset_required(hub, port1, portstatus)) ++ set_bit(port1, hub->event_bits); ++ + /* Clear status-change flags; we'll debounce later */ + if (portchange & USB_PORT_STAT_C_CONNECTION) { + need_debounce_delay = true; +@@ -2846,7 +2853,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1, + USB_PORT_FEAT_C_BH_PORT_RESET); + usb_clear_port_feature(hub->hdev, port1, + USB_PORT_FEAT_C_PORT_LINK_STATE); +- usb_clear_port_feature(hub->hdev, port1, ++ ++ if (udev) ++ usb_clear_port_feature(hub->hdev, port1, + USB_PORT_FEAT_C_CONNECTION); + + /* +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index b1f27aa38b10..dddc6b2facb9 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -2137,6 +2137,10 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, + if (rhub->min_rev < minor_revision) + rhub->min_rev = minor_revision; + ++ if (xhci->quirks & XHCI_SLOWDOWN_QUIRK) ++ if (major_revision == 0x03) ++ rhub->min_rev = 0; ++ + /* Port offset and count in the third dword, see section 7.2 */ + temp = readl(addr + 2); + port_offset = XHCI_EXT_PORT_OFF(temp); +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index 51dd8e00c4f8..a722fd5100fd 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -41,6 +41,7 @@ + #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8 + #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 + #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 ++#define PCI_DEVICE_ID_PHYTIUM_XHCI 0xdc27 + + #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 + #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba +@@ -212,6 +213,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + if (pdev->vendor == PCI_VENDOR_ID_VIA) + xhci->quirks |= XHCI_RESET_ON_RESUME; + ++ if (pdev->vendor == PCI_VENDOR_ID_PHYTIUM || ++ pdev->device == PCI_DEVICE_ID_PHYTIUM_XHCI) { ++ xhci->quirks |= XHCI_RESET_ON_RESUME; ++ xhci->quirks |= XHCI_SLOWDOWN_QUIRK; ++ } ++ + /* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */ + if (pdev->vendor == PCI_VENDOR_ID_VIA && + pdev->device == 0x3432) +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index 6230a578324c..bfae3b2765d4 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1846,6 +1846,7 @@ struct xhci_hcd { + #define XHCI_SUSPEND_DELAY BIT_ULL(30) + #define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31) + #define XHCI_ZERO_64B_REGS BIT_ULL(32) ++#define XHCI_SLOWDOWN_QUIRK BIT_ULL(33) + + unsigned int num_active_eps; + unsigned int limit_active_eps; +diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c +index 501aebb5b81f..472e8bd2536d 100644 +--- a/drivers/watchdog/dw_wdt.c ++++ b/drivers/watchdog/dw_wdt.c +@@ -18,6 +18,7 @@ + + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + ++#include + #include + #include + #include +@@ -54,7 +55,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " + struct dw_wdt { + void __iomem *regs; + struct clk *clk; +- unsigned long rate; ++ u64 rate; + struct watchdog_device wdd; + struct reset_control *rst; + /* Save/restore */ +@@ -252,18 +253,32 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) + if (IS_ERR(dw_wdt->regs)) + return PTR_ERR(dw_wdt->regs); + +- dw_wdt->clk = devm_clk_get(dev, NULL); +- if (IS_ERR(dw_wdt->clk)) +- return PTR_ERR(dw_wdt->clk); ++ if (dev->of_node) { ++ dw_wdt->clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(dw_wdt->clk)) ++ return PTR_ERR(dw_wdt->clk); + +- ret = clk_prepare_enable(dw_wdt->clk); +- if (ret) +- return ret; ++ ret = clk_prepare_enable(dw_wdt->clk); ++ if (ret) ++ return ret; + +- dw_wdt->rate = clk_get_rate(dw_wdt->clk); +- if (dw_wdt->rate == 0) { +- ret = -EINVAL; +- goto out_disable_clk; ++ dw_wdt->rate = clk_get_rate(dw_wdt->clk); ++ if (dw_wdt->rate == 0) { ++ ret = -EINVAL; ++ goto out_disable_clk; ++ } ++ } else if (has_acpi_companion(&pdev->dev)) { ++ /* ++ * When Driver probe with ACPI device, clock devices ++ * are not available, so watchdog rate get from ++ * clock-frequency property given in _DSD object. ++ */ ++ device_property_read_u64(dev, "clock-frequency", ++ &dw_wdt->rate); ++ if (dw_wdt->rate == 0) { ++ ret = -EINVAL; ++ goto out_disable_clk; ++ } + } + + dw_wdt->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL); +@@ -325,6 +340,12 @@ static int dw_wdt_drv_remove(struct platform_device *pdev) + return 0; + } + ++static const struct acpi_device_id dw_wdt_acpi_match[] = { ++ { "PHYT0014", 0 }, ++ { } ++}; ++MODULE_DEVICE_TABLE(acpi, dw_wdt_acpi_match); ++ + #ifdef CONFIG_OF + static const struct of_device_id dw_wdt_of_match[] = { + { .compatible = "snps,dw-wdt", }, +@@ -339,6 +360,7 @@ static struct platform_driver dw_wdt_driver = { + .driver = { + .name = "dw_wdt", + .of_match_table = of_match_ptr(dw_wdt_of_match), ++ .acpi_match_table = ACPI_PTR(dw_wdt_acpi_match), + .pm = &dw_wdt_pm_ops, + }, + }; +diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h +index 14499757338f..c524216e0363 100644 +--- a/include/acpi/acpi_drivers.h ++++ b/include/acpi/acpi_drivers.h +@@ -81,7 +81,7 @@ + + int acpi_irq_penalty_init(void); + int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, +- int *polarity, char **name); ++ int *polarity, char **name, struct fwnode_handle **rs_fwnode); + int acpi_pci_link_free_irq(acpi_handle handle); + + /* ACPI PCI Device Binding (pci_bind.c) */ +diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h +index c50ef7e6b942..e764040a2e51 100644 +--- a/include/acpi/actbl2.h ++++ b/include/acpi/actbl2.h +@@ -502,7 +502,8 @@ enum acpi_madt_type { + ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13, + ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14, + ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15, +- ACPI_MADT_TYPE_RESERVED = 16 /* 16 and greater are reserved */ ++ ACPI_MADT_TYPE_RESERVED = 16, ++ ACPI_MADT_TYPE_PHYTIUM_2500 = 128 + }; + + /* +diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h +index bfe1639df02d..97fc498dc767 100644 +--- a/include/drm/drm_cache.h ++++ b/include/drm/drm_cache.h +@@ -47,6 +47,24 @@ static inline bool drm_arch_can_wc_memory(void) + return false; + #elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3) + return false; ++#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ /* ++ * The DRM driver stack is designed to work with cache coherent devices ++ * only, but permits an optimization to be enabled in some cases, where ++ * for some buffers, both the CPU and the GPU use uncached mappings, ++ * removing the need for DMA snooping and allocation in the CPU caches. ++ * ++ * The use of uncached GPU mappings relies on the correct implementation ++ * of the PCIe NoSnoop TLP attribute by the platform, otherwise the GPU ++ * will use cached mappings nonetheless. On x86 platforms, this does not ++ * seem to matter, as uncached CPU mappings will snoop the caches in any ++ * case. However, on ARM and arm64, enabling this optimization on a ++ * platform where NoSnoop is ignored results in loss of coherency, which ++ * breaks correct operation of the device. Since we have no way of ++ * detecting whether NoSnoop works or not, just disable this ++ * optimization entirely for ARM and arm64. ++ */ ++ return false; + #else + return true; + #endif +diff --git a/include/linux/acpi.h b/include/linux/acpi.h +index de8d3d3fa651..b110b2fe460f 100644 +--- a/include/linux/acpi.h ++++ b/include/linux/acpi.h +@@ -23,6 +23,7 @@ + + #include + #include /* for struct resource */ ++#include + #include + #include + #include +@@ -323,6 +324,22 @@ int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); + void acpi_set_irq_model(enum acpi_irq_model_id model, + struct fwnode_handle *fwnode); + ++struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, ++ unsigned int size, ++ struct fwnode_handle *fwnode, ++ const struct irq_domain_ops *ops, ++ void *host_data); ++ ++#ifdef CONFIG_ACPI_GENERIC_GSI ++struct fwnode_handle *acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source); ++#else ++static inline ++struct fwnode_handle *acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source) ++{ ++ return NULL; ++} ++#endif ++ + #ifdef CONFIG_X86_IO_APIC + extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); + #else +diff --git a/include/linux/efi.h b/include/linux/efi.h +index 401e4b254e30..104a88eb0d31 100644 +--- a/include/linux/efi.h ++++ b/include/linux/efi.h +@@ -566,7 +566,7 @@ typedef efi_status_t efi_get_variable_t (efi_char16_t *name, efi_guid_t *vendor, + unsigned long *data_size, void *data); + typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char16_t *name, + efi_guid_t *vendor); +-typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, ++typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, + u32 attr, unsigned long data_size, + void *data); + typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count); +@@ -672,6 +672,7 @@ void efi_native_runtime_setup(void); + #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) + #define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) + #define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa) ++#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2) + + typedef struct { + efi_guid_t guid; +@@ -957,6 +958,7 @@ extern struct efi { + unsigned long mem_attr_table; /* memory attributes table */ + unsigned long rng_seed; /* UEFI firmware random seed */ + unsigned long tpm_log; /* TPM2 Event Log table */ ++ unsigned long mem_reserve; /* Linux EFI memreserve table */ + efi_get_time_t *get_time; + efi_set_time_t *set_time; + efi_get_wakeup_time_t *get_wakeup_time; +@@ -1041,6 +1043,7 @@ extern int __init efi_uart_console_only (void); + extern u64 efi_mem_desc_end(efi_memory_desc_t *md); + extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md); + extern void efi_mem_reserve(phys_addr_t addr, u64 size); ++extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size); + extern void efi_initialize_iomem_resources(struct resource *code_resource, + struct resource *data_resource, struct resource *bss_resource); + extern void efi_reserve_boot_services(void); +@@ -1662,4 +1665,20 @@ extern int efi_tpm_eventlog_init(void); + /* Workqueue to queue EFI Runtime Services */ + extern struct workqueue_struct *efi_rts_wq; + ++struct linux_efi_memreserve { ++ int size; // allocated size of the array ++ atomic_t count; // number of entries used ++ phys_addr_t next; // pa of next struct instance ++ struct { ++ phys_addr_t base; ++ phys_addr_t size; ++ } entry[0]; ++}; ++ ++#define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \ ++ (count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0])) ++ ++#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \ ++ / sizeof(((struct linux_efi_memreserve *)0)->entry[0])) ++ + #endif /* _LINUX_EFI_H */ +diff --git a/include/linux/irq.h b/include/linux/irq.h +index 201de12a9957..913ae49081ea 100644 +--- a/include/linux/irq.h ++++ b/include/linux/irq.h +@@ -208,6 +208,8 @@ struct irq_data { + * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target + * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set + * IRQD_CAN_RESERVE - Can use reservation mode ++ * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked ++ * from actual interrupt context. + */ + enum { + IRQD_TRIGGER_MASK = 0xf, +@@ -230,6 +232,7 @@ enum { + IRQD_SINGLE_TARGET = (1 << 24), + IRQD_DEFAULT_TRIGGER_SET = (1 << 25), + IRQD_CAN_RESERVE = (1 << 26), ++ IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28), + }; + + #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) +@@ -299,6 +302,16 @@ static inline bool irqd_is_single_target(struct irq_data *d) + return __irqd_to_state(d) & IRQD_SINGLE_TARGET; + } + ++static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d) ++{ ++ __irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX; ++} ++ ++static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d) ++{ ++ return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX; ++} ++ + static inline bool irqd_is_wakeup_set(struct irq_data *d) + { + return __irqd_to_state(d) & IRQD_WAKEUP_STATE; +diff --git a/include/linux/irqchip/arm-gic-phytium-2500.h b/include/linux/irqchip/arm-gic-phytium-2500.h +new file mode 100644 +index 000000000000..2ae61f3293cc +--- /dev/null ++++ b/include/linux/irqchip/arm-gic-phytium-2500.h +@@ -0,0 +1,621 @@ ++/* ++ * Copyright (C) 2020 Phytium Corporation. ++ * Author: Wang Yinfeng ++ * Chen Baozi ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++#ifndef __LINUX_IRQCHIP_ARM_GIC_PHYTIUM_2500_H ++#define __LINUX_IRQCHIP_ARM_GIC_PHYTIUM_2500_H ++ ++/* ++ * Distributor registers. We assume we're running non-secure, with ARE ++ * being set. Secure-only and non-ARE registers are not described. ++ */ ++#define GICD_CTLR 0x0000 ++#define GICD_TYPER 0x0004 ++#define GICD_IIDR 0x0008 ++#define GICD_STATUSR 0x0010 ++#define GICD_SETSPI_NSR 0x0040 ++#define GICD_CLRSPI_NSR 0x0048 ++#define GICD_SETSPI_SR 0x0050 ++#define GICD_CLRSPI_SR 0x0058 ++#define GICD_SEIR 0x0068 ++#define GICD_IGROUPR 0x0080 ++#define GICD_ISENABLER 0x0100 ++#define GICD_ICENABLER 0x0180 ++#define GICD_ISPENDR 0x0200 ++#define GICD_ICPENDR 0x0280 ++#define GICD_ISACTIVER 0x0300 ++#define GICD_ICACTIVER 0x0380 ++#define GICD_IPRIORITYR 0x0400 ++#define GICD_ICFGR 0x0C00 ++#define GICD_IGRPMODR 0x0D00 ++#define GICD_NSACR 0x0E00 ++#define GICD_IROUTER 0x6000 ++#define GICD_IDREGS 0xFFD0 ++#define GICD_PIDR2 0xFFE8 ++ ++/* ++ * Those registers are actually from GICv2, but the spec demands that they ++ * are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3). ++ */ ++#define GICD_ITARGETSR 0x0800 ++#define GICD_SGIR 0x0F00 ++#define GICD_CPENDSGIR 0x0F10 ++#define GICD_SPENDSGIR 0x0F20 ++ ++#define GICD_CTLR_RWP (1U << 31) ++#define GICD_CTLR_DS (1U << 6) ++#define GICD_CTLR_ARE_NS (1U << 4) ++#define GICD_CTLR_ENABLE_G1A (1U << 1) ++#define GICD_CTLR_ENABLE_G1 (1U << 0) ++ ++#define GICD_IIDR_IMPLEMENTER_SHIFT 0 ++#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT) ++#define GICD_IIDR_REVISION_SHIFT 12 ++#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT) ++#define GICD_IIDR_VARIANT_SHIFT 16 ++#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT) ++#define GICD_IIDR_PRODUCT_ID_SHIFT 24 ++#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT) ++ ++ ++/* ++ * In systems with a single security state (what we emulate in KVM) ++ * the meaning of the interrupt group enable bits is slightly different ++ */ ++#define GICD_CTLR_ENABLE_SS_G1 (1U << 1) ++#define GICD_CTLR_ENABLE_SS_G0 (1U << 0) ++ ++#define GICD_TYPER_RSS (1U << 26) ++#define GICD_TYPER_LPIS (1U << 17) ++#define GICD_TYPER_MBIS (1U << 16) ++ ++#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) ++#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) ++#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) ++ ++#define GICD_IROUTER_SPI_MODE_ONE (0U << 31) ++#define GICD_IROUTER_SPI_MODE_ANY (1U << 31) ++ ++#define GIC_PIDR2_ARCH_MASK 0xf0 ++#define GIC_PIDR2_ARCH_GICv3 0x30 ++#define GIC_PIDR2_ARCH_GICv4 0x40 ++ ++#define GIC_V3_DIST_SIZE 0x10000 ++ ++/* ++ * Re-Distributor registers, offsets from RD_base ++ */ ++#define GICR_CTLR GICD_CTLR ++#define GICR_IIDR 0x0004 ++#define GICR_TYPER 0x0008 ++#define GICR_STATUSR GICD_STATUSR ++#define GICR_WAKER 0x0014 ++#define GICR_SETLPIR 0x0040 ++#define GICR_CLRLPIR 0x0048 ++#define GICR_SEIR GICD_SEIR ++#define GICR_PROPBASER 0x0070 ++#define GICR_PENDBASER 0x0078 ++#define GICR_INVLPIR 0x00A0 ++#define GICR_INVALLR 0x00B0 ++#define GICR_SYNCR 0x00C0 ++#define GICR_MOVLPIR 0x0100 ++#define GICR_MOVALLR 0x0110 ++#define GICR_IDREGS GICD_IDREGS ++#define GICR_PIDR2 GICD_PIDR2 ++ ++#define GICR_CTLR_ENABLE_LPIS (1UL << 0) ++#define GICR_CTLR_RWP (1UL << 3) ++ ++#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) ++ ++#define GICR_WAKER_ProcessorSleep (1U << 1) ++#define GICR_WAKER_ChildrenAsleep (1U << 2) ++ ++#define GIC_BASER_CACHE_nCnB 0ULL ++#define GIC_BASER_CACHE_SameAsInner 0ULL ++#define GIC_BASER_CACHE_nC 1ULL ++#define GIC_BASER_CACHE_RaWt 2ULL ++#define GIC_BASER_CACHE_RaWb 3ULL ++#define GIC_BASER_CACHE_WaWt 4ULL ++#define GIC_BASER_CACHE_WaWb 5ULL ++#define GIC_BASER_CACHE_RaWaWt 6ULL ++#define GIC_BASER_CACHE_RaWaWb 7ULL ++#define GIC_BASER_CACHE_MASK 7ULL ++#define GIC_BASER_NonShareable 0ULL ++#define GIC_BASER_InnerShareable 1ULL ++#define GIC_BASER_OuterShareable 2ULL ++#define GIC_BASER_SHAREABILITY_MASK 3ULL ++ ++#define GIC_BASER_CACHEABILITY(reg, inner_outer, type) \ ++ (GIC_BASER_CACHE_##type << reg##_##inner_outer##_CACHEABILITY_SHIFT) ++ ++#define GIC_BASER_SHAREABILITY(reg, type) \ ++ (GIC_BASER_##type << reg##_SHAREABILITY_SHIFT) ++ ++/* encode a size field of width @w containing @n - 1 units */ ++#define GIC_ENCODE_SZ(n, w) (((unsigned long)(n) - 1) & GENMASK_ULL(((w) - 1), 0)) ++ ++#define GICR_PROPBASER_SHAREABILITY_SHIFT (10) ++#define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7) ++#define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56) ++#define GICR_PROPBASER_SHAREABILITY_MASK \ ++ GIC_BASER_SHAREABILITY(GICR_PROPBASER, SHAREABILITY_MASK) ++#define GICR_PROPBASER_INNER_CACHEABILITY_MASK \ ++ GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, MASK) ++#define GICR_PROPBASER_OUTER_CACHEABILITY_MASK \ ++ GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, MASK) ++#define GICR_PROPBASER_CACHEABILITY_MASK GICR_PROPBASER_INNER_CACHEABILITY_MASK ++ ++#define GICR_PROPBASER_InnerShareable \ ++ GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable) ++ ++#define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB) ++#define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC) ++#define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) ++#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) ++#define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt) ++#define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb) ++#define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt) ++#define GICR_PROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWb) ++ ++#define GICR_PROPBASER_IDBITS_MASK (0x1f) ++#define GICR_PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 12)) ++#define GICR_PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 16)) ++ ++#define GICR_PENDBASER_SHAREABILITY_SHIFT (10) ++#define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT (7) ++#define GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT (56) ++#define GICR_PENDBASER_SHAREABILITY_MASK \ ++ GIC_BASER_SHAREABILITY(GICR_PENDBASER, SHAREABILITY_MASK) ++#define GICR_PENDBASER_INNER_CACHEABILITY_MASK \ ++ GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, MASK) ++#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK \ ++ GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, MASK) ++#define GICR_PENDBASER_CACHEABILITY_MASK GICR_PENDBASER_INNER_CACHEABILITY_MASK ++ ++#define GICR_PENDBASER_InnerShareable \ ++ GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable) ++ ++#define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB) ++#define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC) ++#define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) ++#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) ++#define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt) ++#define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb) ++#define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt) ++#define GICR_PENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWb) ++ ++#define GICR_PENDBASER_PTZ BIT_ULL(62) ++ ++/* ++ * Re-Distributor registers, offsets from SGI_base ++ */ ++#define GICR_IGROUPR0 GICD_IGROUPR ++#define GICR_ISENABLER0 GICD_ISENABLER ++#define GICR_ICENABLER0 GICD_ICENABLER ++#define GICR_ISPENDR0 GICD_ISPENDR ++#define GICR_ICPENDR0 GICD_ICPENDR ++#define GICR_ISACTIVER0 GICD_ISACTIVER ++#define GICR_ICACTIVER0 GICD_ICACTIVER ++#define GICR_IPRIORITYR0 GICD_IPRIORITYR ++#define GICR_ICFGR0 GICD_ICFGR ++#define GICR_IGRPMODR0 GICD_IGRPMODR ++#define GICR_NSACR GICD_NSACR ++ ++#define GICR_TYPER_PLPIS (1U << 0) ++#define GICR_TYPER_VLPIS (1U << 1) ++#define GICR_TYPER_DirectLPIS (1U << 3) ++#define GICR_TYPER_LAST (1U << 4) ++ ++#define GIC_V3_REDIST_SIZE 0x20000 ++ ++#define LPI_PROP_GROUP1 (1 << 1) ++#define LPI_PROP_ENABLED (1 << 0) ++ ++/* ++ * Re-Distributor registers, offsets from VLPI_base ++ */ ++#define GICR_VPROPBASER 0x0070 ++ ++#define GICR_VPROPBASER_IDBITS_MASK 0x1f ++ ++#define GICR_VPROPBASER_SHAREABILITY_SHIFT (10) ++#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7) ++#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56) ++ ++#define GICR_VPROPBASER_SHAREABILITY_MASK \ ++ GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK) ++#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \ ++ GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK) ++#define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \ ++ GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK) ++#define GICR_VPROPBASER_CACHEABILITY_MASK \ ++ GICR_VPROPBASER_INNER_CACHEABILITY_MASK ++ ++#define GICR_VPROPBASER_InnerShareable \ ++ GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable) ++ ++#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB) ++#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC) ++#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) ++#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) ++#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt) ++#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb) ++#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt) ++#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb) ++ ++#define GICR_VPENDBASER 0x0078 ++ ++#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10) ++#define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7) ++#define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56) ++#define GICR_VPENDBASER_SHAREABILITY_MASK \ ++ GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK) ++#define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \ ++ GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK) ++#define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \ ++ GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK) ++#define GICR_VPENDBASER_CACHEABILITY_MASK \ ++ GICR_VPENDBASER_INNER_CACHEABILITY_MASK ++ ++#define GICR_VPENDBASER_NonShareable \ ++ GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable) ++ ++#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) ++#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) ++#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) ++#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) ++#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt) ++#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb) ++#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt) ++#define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb) ++ ++#define GICR_VPENDBASER_Dirty (1ULL << 60) ++#define GICR_VPENDBASER_PendingLast (1ULL << 61) ++#define GICR_VPENDBASER_IDAI (1ULL << 62) ++#define GICR_VPENDBASER_Valid (1ULL << 63) ++ ++/* ++ * ITS registers, offsets from ITS_base ++ */ ++#define GITS_CTLR 0x0000 ++#define GITS_IIDR 0x0004 ++#define GITS_TYPER 0x0008 ++#define GITS_CBASER 0x0080 ++#define GITS_CWRITER 0x0088 ++#define GITS_CREADR 0x0090 ++#define GITS_BASER 0x0100 ++#define GITS_IDREGS_BASE 0xffd0 ++#define GITS_PIDR0 0xffe0 ++#define GITS_PIDR1 0xffe4 ++#define GITS_PIDR2 GICR_PIDR2 ++#define GITS_PIDR4 0xffd0 ++#define GITS_CIDR0 0xfff0 ++#define GITS_CIDR1 0xfff4 ++#define GITS_CIDR2 0xfff8 ++#define GITS_CIDR3 0xfffc ++ ++#define GITS_TRANSLATER 0x10040 ++ ++#define GITS_CTLR_ENABLE (1U << 0) ++#define GITS_CTLR_ImDe (1U << 1) ++#define GITS_CTLR_ITS_NUMBER_SHIFT 4 ++#define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT) ++#define GITS_CTLR_QUIESCENT (1U << 31) ++ ++#define GITS_TYPER_PLPIS (1UL << 0) ++#define GITS_TYPER_VLPIS (1UL << 1) ++#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 ++#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1) ++#define GITS_TYPER_IDBITS_SHIFT 8 ++#define GITS_TYPER_DEVBITS_SHIFT 13 ++#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) ++#define GITS_TYPER_PTA (1UL << 19) ++#define GITS_TYPER_HCC_SHIFT 24 ++#define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff) ++#define GITS_TYPER_VMOVP (1ULL << 37) ++ ++#define GITS_IIDR_REV_SHIFT 12 ++#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) ++#define GITS_IIDR_REV(r) (((r) >> GITS_IIDR_REV_SHIFT) & 0xf) ++#define GITS_IIDR_PRODUCTID_SHIFT 24 ++ ++#define GITS_CBASER_VALID (1ULL << 63) ++#define GITS_CBASER_SHAREABILITY_SHIFT (10) ++#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) ++#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53) ++#define GITS_CBASER_SHAREABILITY_MASK \ ++ GIC_BASER_SHAREABILITY(GITS_CBASER, SHAREABILITY_MASK) ++#define GITS_CBASER_INNER_CACHEABILITY_MASK \ ++ GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, MASK) ++#define GITS_CBASER_OUTER_CACHEABILITY_MASK \ ++ GIC_BASER_CACHEABILITY(GITS_CBASER, OUTER, MASK) ++#define GITS_CBASER_CACHEABILITY_MASK GITS_CBASER_INNER_CACHEABILITY_MASK ++ ++#define GITS_CBASER_InnerShareable \ ++ GIC_BASER_SHAREABILITY(GITS_CBASER, InnerShareable) ++ ++#define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB) ++#define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC) ++#define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) ++#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) ++#define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt) ++#define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb) ++#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt) ++#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb) ++ ++#define GITS_BASER_NR_REGS 8 ++ ++#define GITS_BASER_VALID (1ULL << 63) ++#define GITS_BASER_INDIRECT (1ULL << 62) ++ ++#define GITS_BASER_INNER_CACHEABILITY_SHIFT (59) ++#define GITS_BASER_OUTER_CACHEABILITY_SHIFT (53) ++#define GITS_BASER_INNER_CACHEABILITY_MASK \ ++ GIC_BASER_CACHEABILITY(GITS_BASER, INNER, MASK) ++#define GITS_BASER_CACHEABILITY_MASK GITS_BASER_INNER_CACHEABILITY_MASK ++#define GITS_BASER_OUTER_CACHEABILITY_MASK \ ++ GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, MASK) ++#define GITS_BASER_SHAREABILITY_MASK \ ++ GIC_BASER_SHAREABILITY(GITS_BASER, SHAREABILITY_MASK) ++ ++#define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB) ++#define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC) ++#define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) ++#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) ++#define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt) ++#define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb) ++#define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt) ++#define GITS_BASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWb) ++ ++#define GITS_BASER_TYPE_SHIFT (56) ++#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) ++#define GITS_BASER_ENTRY_SIZE_SHIFT (48) ++#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) ++#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) ++#define GITS_BASER_PHYS_52_to_48(phys) \ ++ (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12) ++#define GITS_BASER_SHAREABILITY_SHIFT (10) ++#define GITS_BASER_InnerShareable \ ++ GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) ++#define GITS_BASER_PAGE_SIZE_SHIFT (8) ++#define GITS_BASER_PAGE_SIZE_4K (0ULL << GITS_BASER_PAGE_SIZE_SHIFT) ++#define GITS_BASER_PAGE_SIZE_16K (1ULL << GITS_BASER_PAGE_SIZE_SHIFT) ++#define GITS_BASER_PAGE_SIZE_64K (2ULL << GITS_BASER_PAGE_SIZE_SHIFT) ++#define GITS_BASER_PAGE_SIZE_MASK (3ULL << GITS_BASER_PAGE_SIZE_SHIFT) ++#define GITS_BASER_PAGES_MAX 256 ++#define GITS_BASER_PAGES_SHIFT (0) ++#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1) ++ ++#define GITS_BASER_TYPE_NONE 0 ++#define GITS_BASER_TYPE_DEVICE 1 ++#define GITS_BASER_TYPE_VCPU 2 ++#define GITS_BASER_TYPE_RESERVED3 3 ++#define GITS_BASER_TYPE_COLLECTION 4 ++#define GITS_BASER_TYPE_RESERVED5 5 ++#define GITS_BASER_TYPE_RESERVED6 6 ++#define GITS_BASER_TYPE_RESERVED7 7 ++ ++#define GITS_LVL1_ENTRY_SIZE (8UL) ++ ++/* ++ * ITS commands ++ */ ++#define GITS_CMD_MAPD 0x08 ++#define GITS_CMD_MAPC 0x09 ++#define GITS_CMD_MAPTI 0x0a ++#define GITS_CMD_MAPI 0x0b ++#define GITS_CMD_MOVI 0x01 ++#define GITS_CMD_DISCARD 0x0f ++#define GITS_CMD_INV 0x0c ++#define GITS_CMD_MOVALL 0x0e ++#define GITS_CMD_INVALL 0x0d ++#define GITS_CMD_INT 0x03 ++#define GITS_CMD_CLEAR 0x04 ++#define GITS_CMD_SYNC 0x05 ++ ++/* ++ * GICv4 ITS specific commands ++ */ ++#define GITS_CMD_GICv4(x) ((x) | 0x20) ++#define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL) ++#define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC) ++#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI) ++#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI) ++#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC) ++/* VMOVP is the odd one, as it doesn't have a physical counterpart */ ++#define GITS_CMD_VMOVP GITS_CMD_GICv4(2) ++ ++/* ++ * ITS error numbers ++ */ ++#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107 ++#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109 ++#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307 ++#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507 ++#define E_ITS_MAPD_DEVICE_OOR 0x010801 ++#define E_ITS_MAPD_ITTSIZE_OOR 0x010802 ++#define E_ITS_MAPC_PROCNUM_OOR 0x010902 ++#define E_ITS_MAPC_COLLECTION_OOR 0x010903 ++#define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04 ++#define E_ITS_MAPTI_ID_OOR 0x010a05 ++#define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06 ++#define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07 ++#define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09 ++#define E_ITS_MOVALL_PROCNUM_OOR 0x010e01 ++#define E_ITS_DISCARD_UNMAPPED_INTERRUPT 0x010f07 ++ ++/* ++ * CPU interface registers ++ */ ++#define ICC_CTLR_EL1_EOImode_SHIFT (1) ++#define ICC_CTLR_EL1_EOImode_drop_dir (0U << ICC_CTLR_EL1_EOImode_SHIFT) ++#define ICC_CTLR_EL1_EOImode_drop (1U << ICC_CTLR_EL1_EOImode_SHIFT) ++#define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT) ++#define ICC_CTLR_EL1_CBPR_SHIFT 0 ++#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT) ++#define ICC_CTLR_EL1_PRI_BITS_SHIFT 8 ++#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT) ++#define ICC_CTLR_EL1_ID_BITS_SHIFT 11 ++#define ICC_CTLR_EL1_ID_BITS_MASK (0x7 << ICC_CTLR_EL1_ID_BITS_SHIFT) ++#define ICC_CTLR_EL1_SEIS_SHIFT 14 ++#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT) ++#define ICC_CTLR_EL1_A3V_SHIFT 15 ++#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) ++#define ICC_CTLR_EL1_RSS (0x1 << 18) ++#define ICC_PMR_EL1_SHIFT 0 ++#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) ++#define ICC_BPR0_EL1_SHIFT 0 ++#define ICC_BPR0_EL1_MASK (0x7 << ICC_BPR0_EL1_SHIFT) ++#define ICC_BPR1_EL1_SHIFT 0 ++#define ICC_BPR1_EL1_MASK (0x7 << ICC_BPR1_EL1_SHIFT) ++#define ICC_IGRPEN0_EL1_SHIFT 0 ++#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT) ++#define ICC_IGRPEN1_EL1_SHIFT 0 ++#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT) ++#define ICC_SRE_EL1_DIB (1U << 2) ++#define ICC_SRE_EL1_DFB (1U << 1) ++#define ICC_SRE_EL1_SRE (1U << 0) ++ ++/* ++ * Hypervisor interface registers (SRE only) ++ */ ++#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1) ++ ++#define ICH_LR_EOI (1ULL << 41) ++#define ICH_LR_GROUP (1ULL << 60) ++#define ICH_LR_HW (1ULL << 61) ++#define ICH_LR_STATE (3ULL << 62) ++#define ICH_LR_PENDING_BIT (1ULL << 62) ++#define ICH_LR_ACTIVE_BIT (1ULL << 63) ++#define ICH_LR_PHYS_ID_SHIFT 32 ++#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) ++#define ICH_LR_PRIORITY_SHIFT 48 ++#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT) ++ ++/* These are for GICv2 emulation only */ ++#define GICH_LR_VIRTUALID (0x3ffUL << 0) ++#define GICH_LR_PHYSID_CPUID_SHIFT (10) ++#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) ++ ++#define ICH_MISR_EOI (1 << 0) ++#define ICH_MISR_U (1 << 1) ++ ++#define ICH_HCR_EN (1 << 0) ++#define ICH_HCR_UIE (1 << 1) ++#define ICH_HCR_NPIE (1 << 3) ++#define ICH_HCR_TC (1 << 10) ++#define ICH_HCR_TALL0 (1 << 11) ++#define ICH_HCR_TALL1 (1 << 12) ++#define ICH_HCR_EOIcount_SHIFT 27 ++#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT) ++ ++#define ICH_VMCR_ACK_CTL_SHIFT 2 ++#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT) ++#define ICH_VMCR_FIQ_EN_SHIFT 3 ++#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT) ++#define ICH_VMCR_CBPR_SHIFT 4 ++#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT) ++#define ICH_VMCR_EOIM_SHIFT 9 ++#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT) ++#define ICH_VMCR_BPR1_SHIFT 18 ++#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT) ++#define ICH_VMCR_BPR0_SHIFT 21 ++#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT) ++#define ICH_VMCR_PMR_SHIFT 24 ++#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) ++#define ICH_VMCR_ENG0_SHIFT 0 ++#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT) ++#define ICH_VMCR_ENG1_SHIFT 1 ++#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT) ++ ++#define ICH_VTR_PRI_BITS_SHIFT 29 ++#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT) ++#define ICH_VTR_ID_BITS_SHIFT 23 ++#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT) ++#define ICH_VTR_SEIS_SHIFT 22 ++#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT) ++#define ICH_VTR_A3V_SHIFT 21 ++#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT) ++ ++#define ICC_IAR1_EL1_SPURIOUS 0x3ff ++ ++#define ICC_SRE_EL2_SRE (1 << 0) ++#define ICC_SRE_EL2_ENABLE (1 << 3) ++ ++#define ICC_SGI1R_TARGET_LIST_SHIFT 0 ++#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT) ++#define ICC_SGI1R_AFFINITY_1_SHIFT 16 ++#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) ++#define ICC_SGI1R_SGI_ID_SHIFT 24 ++#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT) ++#define ICC_SGI1R_AFFINITY_2_SHIFT 32 ++#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) ++#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 ++#define ICC_SGI1R_RS_SHIFT 44 ++#define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT) ++#define ICC_SGI1R_AFFINITY_3_SHIFT 48 ++#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) ++ ++#include ++ ++#ifndef __ASSEMBLY__ ++ ++/* ++ * We need a value to serve as a irq-type for LPIs. Choose one that will ++ * hopefully pique the interest of the reviewer. ++ */ ++#define GIC_IRQ_TYPE_LPI 0xa110c8ed ++ ++struct rdists { ++ struct { ++ void __iomem *rd_base; ++ struct page *pend_page; ++ phys_addr_t phys_base; ++ bool lpi_enabled; ++ } __percpu *rdist; ++ phys_addr_t prop_table_pa; ++ void *prop_table_va; ++ u64 flags; ++ u32 gicd_typer; ++ bool has_vlpis; ++ bool has_direct_lpi; ++}; ++ ++struct irq_domain; ++struct fwnode_handle; ++int phytium_its_cpu_init(void); ++int phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, ++ struct irq_domain *domain); ++ ++static inline bool gic_enable_sre(void) ++{ ++ u32 val; ++ ++ val = gic_read_sre(); ++ if (val & ICC_SRE_EL1_SRE) ++ return true; ++ ++ val |= ICC_SRE_EL1_SRE; ++ gic_write_sre(val); ++ val = gic_read_sre(); ++ ++ return !!(val & ICC_SRE_EL1_SRE); ++} ++ ++#endif ++ ++#endif +diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h +index 8bdbb5f29494..c2a7b863fc2e 100644 +--- a/include/linux/irqchip/arm-gic-v3.h ++++ b/include/linux/irqchip/arm-gic-v3.h +@@ -585,8 +585,10 @@ struct rdists { + void __iomem *rd_base; + struct page *pend_page; + phys_addr_t phys_base; ++ bool lpi_enabled; + } __percpu *rdist; +- struct page *prop_page; ++ phys_addr_t prop_table_pa; ++ void *prop_table_va; + u64 flags; + u32 gicd_typer; + bool has_vlpis; +diff --git a/include/linux/memblock.h b/include/linux/memblock.h +index 516920549378..c3e5e5e8fa2a 100644 +--- a/include/linux/memblock.h ++++ b/include/linux/memblock.h +@@ -316,6 +316,9 @@ static inline int memblock_get_region_node(const struct memblock_region *r) + } + #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ + ++void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align, ++ phys_addr_t min_addr, phys_addr_t max_addr, ++ int nid); + phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); + phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); + +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h +index d157983b84cf..60d9d8f5956a 100644 +--- a/include/linux/pci_ids.h ++++ b/include/linux/pci_ids.h +@@ -3086,4 +3086,6 @@ + + #define PCI_VENDOR_ID_NCUBE 0x10ff + ++#define PCI_VENDOR_ID_PHYTIUM 0x1db7 ++ + #endif /* _LINUX_PCI_IDS_H */ +diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h +index cd1773d0e08f..935f53b9b771 100644 +--- a/include/sound/hdaudio.h ++++ b/include/sound/hdaudio.h +@@ -354,6 +354,7 @@ struct hdac_bus { + bool align_bdle_4k:1; /* BDLE align 4K boundary */ + bool reverse_assign:1; /* assign devices in reverse order */ + bool corbrp_self_clear:1; /* CORBRP clears itself after reset */ ++ bool cmd_resend:1; /* command resend */ + + int bdl_pos_adj; /* BDL position adjustment */ + +diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h +index 9483c55f871b..1e3f2d0fe547 100644 +--- a/include/sound/hdmi-codec.h ++++ b/include/sound/hdmi-codec.h +@@ -55,6 +55,9 @@ struct hdmi_codec_params { + int channels; + }; + ++typedef void (*hdmi_codec_plugged_cb)(struct device *dev, ++ bool plugged); ++ + struct hdmi_codec_pdata; + struct hdmi_codec_ops { + /* +@@ -96,6 +99,14 @@ struct hdmi_codec_ops { + */ + int (*get_dai_id)(struct snd_soc_component *comment, + struct device_node *endpoint); ++ ++ /* ++ * Hook callback function to handle connector plug event. ++ * Optional ++ */ ++ int (*hook_plugged_cb)(struct device *dev, void *data, ++ hdmi_codec_plugged_cb fn, ++ struct device *codec_dev); + }; + + /* HDMI codec initalization data */ +@@ -107,6 +118,12 @@ struct hdmi_codec_pdata { + void *data; + }; + ++struct snd_soc_component; ++struct snd_soc_jack; ++ ++int hdmi_codec_set_jack_detect(struct snd_soc_component *component, ++ struct snd_soc_jack *jack); ++ + #define HDMI_CODEC_DRV_NAME "hdmi-audio-codec" + + #endif /* __HDMI_CODEC_H__ */ +diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h +index dce5f9dae121..998ca4e67c0e 100644 +--- a/include/uapi/linux/serial_core.h ++++ b/include/uapi/linux/serial_core.h +@@ -281,4 +281,7 @@ + /* MediaTek BTIF */ + #define PORT_MTK_BTIF 117 + ++/* Phytium PCI UART */ ++#define PORT_PHYTIUM 118 ++ + #endif /* _UAPILINUX_SERIAL_CORE_H */ +diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c +index 6f636136cccc..c733e9bd7396 100644 +--- a/kernel/irq/debugfs.c ++++ b/kernel/irq/debugfs.c +@@ -208,8 +208,7 @@ static ssize_t irq_debug_write(struct file *file, const char __user *user_buf, + err = -EINVAL; + } else { + desc->istate |= IRQS_PENDING; +- check_irq_resend(desc); +- err = 0; ++ err = check_irq_resend(desc); + } + + raw_spin_unlock_irqrestore(&desc->lock, flags); +diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h +index ca6afa267070..d37b77b3daf8 100644 +--- a/kernel/irq/internals.h ++++ b/kernel/irq/internals.h +@@ -101,7 +101,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc); + irqreturn_t handle_irq_event(struct irq_desc *desc); + + /* Resending of interrupts :*/ +-void check_irq_resend(struct irq_desc *desc); ++int check_irq_resend(struct irq_desc *desc); + bool irq_wait_for_poll(struct irq_desc *desc); + void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); + +@@ -411,6 +411,10 @@ static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) + { + return desc->pending_mask; + } ++static inline bool handle_enforce_irqctx(struct irq_data *data) ++{ ++ return irqd_is_handle_enforce_irqctx(data); ++} + bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear); + #else /* CONFIG_GENERIC_PENDING_IRQ */ + static inline bool irq_can_move_pcntxt(struct irq_data *data) +@@ -437,6 +441,10 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear) + { + return false; + } ++static inline bool handle_enforce_irqctx(struct irq_data *data) ++{ ++ return false; ++} + #endif /* !CONFIG_GENERIC_PENDING_IRQ */ + + #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY) +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c +index 578d0e5f1b5b..2b767ee47b69 100644 +--- a/kernel/irq/irqdesc.c ++++ b/kernel/irq/irqdesc.c +@@ -618,9 +618,15 @@ void irq_init_desc(unsigned int irq) + int generic_handle_irq(unsigned int irq) + { + struct irq_desc *desc = irq_to_desc(irq); ++ struct irq_data *data; + + if (!desc) + return -EINVAL; ++ ++ data = irq_desc_get_irq_data(desc); ++ if (WARN_ON_ONCE(!in_irq() && handle_enforce_irqctx(data))) ++ return -EPERM; ++ + generic_handle_irq_desc(desc); + return 0; + } +diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c +index 95414ad3506a..f1e3f80b21e2 100644 +--- a/kernel/irq/resend.c ++++ b/kernel/irq/resend.c +@@ -45,56 +45,88 @@ static void resend_irqs(unsigned long arg) + /* Tasklet to handle resend: */ + static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0); + ++static int irq_sw_resend(struct irq_desc *desc) ++{ ++ unsigned int irq = irq_desc_get_irq(desc); ++ ++ /* ++ * Validate whether this interrupt can be safely injected from ++ * non interrupt context ++ */ ++ if (handle_enforce_irqctx(&desc->irq_data)) ++ return -EINVAL; ++ ++ /* ++ * If the interrupt is running in the thread context of the parent ++ * irq we need to be careful, because we cannot trigger it ++ * directly. ++ */ ++ if (irq_settings_is_nested_thread(desc)) { ++ /* ++ * If the parent_irq is valid, we retrigger the parent, ++ * otherwise we do nothing. ++ */ ++ if (!desc->parent_irq) ++ return -EINVAL; ++ irq = desc->parent_irq; ++ } ++ ++ /* Set it pending and activate the softirq: */ ++ set_bit(irq, irqs_resend); ++ tasklet_schedule(&resend_tasklet); ++ return 0; ++} ++ ++#else ++static int irq_sw_resend(struct irq_desc *desc) ++{ ++ return -EINVAL; ++} ++#endif ++ ++static int try_retrigger(struct irq_desc *desc) ++{ ++ if (desc->irq_data.chip->irq_retrigger) ++ return desc->irq_data.chip->irq_retrigger(&desc->irq_data); ++ ++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY ++ return irq_chip_retrigger_hierarchy(&desc->irq_data); ++#else ++ return 0; + #endif ++} + + /* + * IRQ resend + * + * Is called with interrupts disabled and desc->lock held. + */ +-void check_irq_resend(struct irq_desc *desc) ++int check_irq_resend(struct irq_desc *desc) + { ++ int err = 0; ++ + /* +- * We do not resend level type interrupts. Level type +- * interrupts are resent by hardware when they are still +- * active. Clear the pending bit so suspend/resume does not +- * get confused. ++ * We do not resend level type interrupts. Level type interrupts ++ * are resent by hardware when they are still active. Clear the ++ * pending bit so suspend/resume does not get confused. + */ + if (irq_settings_is_level(desc)) { + desc->istate &= ~IRQS_PENDING; +- return; ++ return -EINVAL; + } + if (desc->istate & IRQS_REPLAY) +- return; +- if (desc->istate & IRQS_PENDING) { +- desc->istate &= ~IRQS_PENDING; +- desc->istate |= IRQS_REPLAY; ++ return -EBUSY; + +- if (!desc->irq_data.chip->irq_retrigger || +- !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { +-#ifdef CONFIG_HARDIRQS_SW_RESEND +- unsigned int irq = irq_desc_get_irq(desc); +- +- /* +- * If the interrupt is running in the thread +- * context of the parent irq we need to be +- * careful, because we cannot trigger it +- * directly. +- */ +- if (irq_settings_is_nested_thread(desc)) { +- /* +- * If the parent_irq is valid, we +- * retrigger the parent, otherwise we +- * do nothing. +- */ +- if (!desc->parent_irq) +- return; +- irq = desc->parent_irq; +- } +- /* Set it pending and activate the softirq: */ +- set_bit(irq, irqs_resend); +- tasklet_schedule(&resend_tasklet); +-#endif +- } +- } ++ if (!(desc->istate & IRQS_PENDING)) ++ return 0; ++ ++ desc->istate &= ~IRQS_PENDING; ++ ++ if (!try_retrigger(desc)) ++ err = irq_sw_resend(desc); ++ ++ /* If the retrigger was successfull, mark it with the REPLAY bit */ ++ if (!err) ++ desc->istate |= IRQS_REPLAY; ++ return err; + } +diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c +index c7471c3fb798..16c09cda3b02 100644 +--- a/kernel/locking/qrwlock.c ++++ b/kernel/locking/qrwlock.c +@@ -70,6 +70,8 @@ EXPORT_SYMBOL(queued_read_lock_slowpath); + */ + void queued_write_lock_slowpath(struct qrwlock *lock) + { ++ int cnts; ++ + /* Put the writer into the wait queue */ + arch_spin_lock(&lock->wait_lock); + +@@ -83,9 +85,8 @@ void queued_write_lock_slowpath(struct qrwlock *lock) + + /* When no more readers or writers, set the locked flag */ + do { +- atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING); +- } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING, +- _QW_LOCKED) != _QW_WAITING); ++ cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING); ++ } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)); + unlock: + arch_spin_unlock(&lock->wait_lock); + } +diff --git a/lib/crc32.c b/lib/crc32.c +index a6c9afafc8c8..45b1d67a1767 100644 +--- a/lib/crc32.c ++++ b/lib/crc32.c +@@ -183,21 +183,21 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p, + } + + #if CRC_LE_BITS == 1 +-u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) ++u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len) + { + return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE); + } +-u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) ++u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) + { + return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE); + } + #else +-u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) ++u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len) + { + return crc32_le_generic(crc, p, len, + (const u32 (*)[256])crc32table_le, CRC32_POLY_LE); + } +-u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) ++u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) + { + return crc32_le_generic(crc, p, len, + (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE); +@@ -206,6 +206,9 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) + EXPORT_SYMBOL(crc32_le); + EXPORT_SYMBOL(__crc32c_le); + ++u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); ++u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); ++ + /* + * This multiplies the polynomials x and y modulo the given modulus. + * This follows the "little-endian" CRC convention that the lsbit +diff --git a/mm/memblock.c b/mm/memblock.c +index 237944479d25..e5fbec3b69f1 100644 +--- a/mm/memblock.c ++++ b/mm/memblock.c +@@ -1349,7 +1349,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i + static void * __init memblock_virt_alloc_internal( + phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, +- int nid) ++ int nid, bool exact_nid) + { + phys_addr_t alloc; + void *ptr; +@@ -1377,7 +1377,7 @@ static void * __init memblock_virt_alloc_internal( + if (alloc && !memblock_reserve(alloc, size)) + goto done; + +- if (nid != NUMA_NO_NODE) { ++ if (nid != NUMA_NO_NODE && !exact_nid) { + alloc = memblock_find_in_range_node(size, align, min_addr, + max_addr, NUMA_NO_NODE, + flags); +@@ -1443,7 +1443,7 @@ void * __init memblock_virt_alloc_try_nid_raw( + &max_addr, (void *)_RET_IP_); + + ptr = memblock_virt_alloc_internal(size, align, +- min_addr, max_addr, nid); ++ min_addr, max_addr, nid, false); + #ifdef CONFIG_DEBUG_VM + if (ptr && size > 0) + memset(ptr, PAGE_POISON_PATTERN, size); +@@ -1451,6 +1451,43 @@ void * __init memblock_virt_alloc_try_nid_raw( + return ptr; + } + ++/** ++ * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node ++ * without zeroing memory ++ * @size: size of memory block to be allocated in bytes ++ * @align: alignment of the region and block's size ++ * @min_addr: the lower bound of the memory region from where the allocation ++ * is preferred (phys address) ++ * @max_addr: the upper bound of the memory region from where the allocation ++ * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to ++ * allocate only from memory limited by memblock.current_limit value ++ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node ++ * ++ * Public function, provides additional debug information (including caller ++ * info), if enabled. Does not zero allocated memory. ++ * ++ * Return: ++ * Virtual address of allocated memory block on success, NULL on failure. ++ */ ++void * __init memblock_alloc_exact_nid_raw( ++ phys_addr_t size, phys_addr_t align, ++ phys_addr_t min_addr, phys_addr_t max_addr, ++ int nid) ++{ ++ void *ptr; ++ ++ memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", ++ __func__, (u64)size, (u64)align, nid, &min_addr, ++ &max_addr, (void *)_RET_IP_); ++ ++ ptr = memblock_virt_alloc_internal(size, align, ++ min_addr, max_addr, nid, true); ++ if (ptr && size > 0) ++ memset(ptr, PAGE_POISON_PATTERN, size); ++ ++ return ptr; ++} ++ + /** + * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block + * @size: size of memory block to be allocated in bytes +@@ -1480,7 +1517,7 @@ void * __init memblock_virt_alloc_try_nid_nopanic( + &max_addr, (void *)_RET_IP_); + + ptr = memblock_virt_alloc_internal(size, align, +- min_addr, max_addr, nid); ++ min_addr, max_addr, nid, false); + if (ptr) + memset(ptr, 0, size); + return ptr; +@@ -1515,7 +1552,7 @@ void * __init memblock_virt_alloc_try_nid( + __func__, (u64)size, (u64)align, nid, &min_addr, + &max_addr, (void *)_RET_IP_); + ptr = memblock_virt_alloc_internal(size, align, +- min_addr, max_addr, nid); ++ min_addr, max_addr, nid, false); + if (ptr) { + memset(ptr, 0, size); + return ptr; +diff --git a/mm/sparse.c b/mm/sparse.c +index 10b07eea9a6e..ecd34c6b6cb6 100644 +--- a/mm/sparse.c ++++ b/mm/sparse.c +@@ -17,6 +17,8 @@ + #include + #include + ++#include ++ + /* + * Permanent SPARSEMEM data: + * +@@ -405,7 +407,7 @@ static void __init sparse_buffer_init(unsigned long size, int nid) + { + WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ + sparsemap_buf = +- memblock_virt_alloc_try_nid_raw(size, PAGE_SIZE, ++ memblock_alloc_exact_nid_raw(size, section_map_size(), + __pa(MAX_DMA_ADDRESS), + BOOTMEM_ALLOC_ACCESSIBLE, nid); + sparsemap_buf_end = sparsemap_buf + size; +diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c +index 74244d8e2909..d3abb492b3ba 100644 +--- a/sound/hda/hdac_controller.c ++++ b/sound/hda/hdac_controller.c +@@ -139,6 +139,9 @@ int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val) + { + unsigned int addr = azx_command_addr(val); + unsigned int wp, rp; ++ unsigned long timeout; ++ unsigned int rirb_wp; ++ int i = 0; + + spin_lock_irq(&bus->reg_lock); + +@@ -165,6 +168,41 @@ int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val) + bus->corb.buf[wp] = cpu_to_le32(val); + snd_hdac_chip_writew(bus, CORBWP, wp); + ++ if (bus->cmd_resend) { ++ timeout = jiffies + msecs_to_jiffies(1000); ++ udelay(80); ++ rirb_wp = snd_hdac_chip_readw(bus, RIRBWP); ++ while (rirb_wp == bus->rirb.wp) { ++ udelay(80); ++ rirb_wp = snd_hdac_chip_readw(bus, RIRBWP); ++ if (rirb_wp != bus->rirb.wp) ++ break; ++ if (i > 5) ++ break; ++ if (time_after(jiffies, timeout)) ++ break; ++ ++ /* add command to corb */ ++ wp = snd_hdac_chip_readw(bus, CORBWP); ++ if (wp == 0xffff) { ++ /* something wrong, controller likely turned to D3 */ ++ spin_unlock_irq(&bus->reg_lock); ++ return -EIO; ++ } ++ wp++; ++ wp %= AZX_MAX_CORB_ENTRIES; ++ ++ rp = snd_hdac_chip_readw(bus, CORBRP); ++ if (wp == rp) { ++ /* oops, it's full */ ++ spin_unlock_irq(&bus->reg_lock); ++ return -EAGAIN; ++ } ++ bus->corb.buf[wp] = cpu_to_le32(val); ++ snd_hdac_chip_writew(bus, CORBWP, wp); ++ i++; ++ } ++ } + spin_unlock_irq(&bus->reg_lock); + + return 0; +diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c +index eee422390d8e..38586457ee09 100644 +--- a/sound/hda/hdac_stream.c ++++ b/sound/hda/hdac_stream.c +@@ -51,7 +51,11 @@ void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start) + + trace_snd_hdac_stream_start(bus, azx_dev); + ++#ifdef CONFIG_SND_HDA_PHYTIUM ++ azx_dev->start_wallclk = snd_hdac_chip_readl(bus, WALLCLK) / 15; ++#else + azx_dev->start_wallclk = snd_hdac_chip_readl(bus, WALLCLK); ++#endif + if (!fresh_start) + azx_dev->start_wallclk -= azx_dev->period_wallclk; + +@@ -469,7 +473,11 @@ static u64 azx_cc_read(const struct cyclecounter *cc) + { + struct hdac_stream *azx_dev = container_of(cc, struct hdac_stream, cc); + ++#ifdef CONFIG_SND_HDA_PHYTIUM ++ return snd_hdac_chip_readl(azx_dev->bus, WALLCLK) / 25; ++#else + return snd_hdac_chip_readl(azx_dev->bus, WALLCLK); ++#endif + } + + static void azx_timecounter_init(struct hdac_stream *azx_dev, +diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig +index 4235907b7858..df95dad1233f 100644 +--- a/sound/pci/hda/Kconfig ++++ b/sound/pci/hda/Kconfig +@@ -21,6 +21,21 @@ config SND_HDA_INTEL + To compile this driver as a module, choose M here: the module + will be called snd-hda-intel. + ++config SND_HDA_PHYTIUM ++ tristate "PHYTIUM HD Audio" ++ depends on SOUND ++ select SND_HDA ++ help ++ Say Y here to support the HDA controller present in PHYTIUM ++ SoCs ++ ++ This options enables support for the HD Audio controller ++ present in some PHYTIUM SoCs, used to communicate audio ++ to the "High Definition Audio" codec. ++ ++ To compile this driver as a module, choose M here: the module ++ will be called snd-hda-phytium. ++ + config SND_HDA_TEGRA + tristate "NVIDIA Tegra HD Audio" + depends on ARCH_TEGRA +diff --git a/sound/pci/hda/Makefile b/sound/pci/hda/Makefile +index b57432f00056..90e32c8ce07b 100644 +--- a/sound/pci/hda/Makefile ++++ b/sound/pci/hda/Makefile +@@ -1,5 +1,6 @@ + # SPDX-License-Identifier: GPL-2.0 + snd-hda-intel-objs := hda_intel.o ++snd-hda-phytium-objs := hda_phytium.o + snd-hda-tegra-objs := hda_tegra.o + + snd-hda-codec-y := hda_bind.o hda_codec.o hda_jack.o hda_auto_parser.o hda_sysfs.o +@@ -48,3 +49,4 @@ obj-$(CONFIG_SND_HDA_CODEC_HDMI) += snd-hda-codec-hdmi.o + # when built in kernel + obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-intel.o + obj-$(CONFIG_SND_HDA_TEGRA) += snd-hda-tegra.o ++obj-$(CONFIG_SND_HDA_PHYTIUM) += snd-hda-phytium.o +diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c +index a12e594d4e3b..ff62cfa63c58 100644 +--- a/sound/pci/hda/hda_controller.c ++++ b/sound/pci/hda/hda_controller.c +@@ -28,6 +28,8 @@ + #include + #include + ++#include "hda_phytium.h" ++ + #ifdef CONFIG_X86 + /* for art-tsc conversion */ + #include +@@ -171,6 +173,10 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream) + snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid); + unsigned short ctls = spdif ? spdif->ctls : 0; + ++ struct hda_ft *hda; ++ hda = container_of(chip, struct hda_ft, chip); ++ hda->substream = substream; ++ + trace_azx_pcm_prepare(chip, azx_dev); + dsp_lock(azx_dev); + if (dsp_is_locked(azx_dev)) { +diff --git a/sound/pci/hda/hda_phytium.c b/sound/pci/hda/hda_phytium.c +new file mode 100644 +index 000000000000..8cb66a8023c0 +--- /dev/null ++++ b/sound/pci/hda/hda_phytium.c +@@ -0,0 +1,1218 @@ ++/* ++ * hda_phytium.c - Implementation of primary alsa driver code base ++ * for Intel HD Audio of Phytium. ++ * ++ * Copyright(c) 2018 Phytium Corporation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "hda_codec.h" ++#include "hda_controller.h" ++#include "hda_phytium.h" ++ ++#include "hda_intel_trace.h" ++ ++/* position fix mode */ ++enum { ++ POS_FIX_AUTO, ++ POS_FIX_LPIB, ++ POS_FIX_POSBUF, ++ POS_FIX_VIACOMBO, ++ POS_FIX_COMBO, ++}; ++ ++/* Define IN stream 0 FIFO size offset in VIA controller */ ++#define VIA_IN_STREAM0_FIFO_SIZE_OFFSET 0x90 ++ ++/* FT have 4 playback and 4 capture */ ++#define FT4C_NUM_CAPTURE 4 ++#define FT4C_NUM_PLAYBACK 4 ++ ++#define DWORD_BYTE_WIDTH 4 ++#define BYTE_BIT_WIDTH 8 ++ ++static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; ++static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; ++static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; ++static char *model[SNDRV_CARDS]; ++static int position_fix[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = 1}; ++static int bdl_pos_adj[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = -1}; ++static int probe_mask[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = -1}; ++static int probe_only[SNDRV_CARDS]; ++static int jackpoll_ms[SNDRV_CARDS]; ++static int single_cmd = -1; ++static int enable_msi = -1; ++#ifdef CONFIG_SND_HDA_INPUT_BEEP ++static bool beep_mode[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = ++ CONFIG_SND_HDA_INPUT_BEEP_MODE}; ++#endif ++ ++module_param_array(index, int, NULL, 0444); ++MODULE_PARM_DESC(index, "Index value for Intel HD audio interface."); ++module_param_array(id, charp, NULL, 0444); ++MODULE_PARM_DESC(id, "ID string for Intel HD audio interface."); ++module_param_array(enable, bool, NULL, 0444); ++MODULE_PARM_DESC(enable, "Enable Intel HD audio interface."); ++module_param_array(model, charp, NULL, 0444); ++MODULE_PARM_DESC(model, "Use the given board model."); ++module_param_array(position_fix, int, NULL, 0444); ++MODULE_PARM_DESC(position_fix, "DMA pointer read method." ++ "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO)."); ++module_param_array(bdl_pos_adj, int, NULL, 0644); ++MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset."); ++module_param_array(probe_mask, int, NULL, 0444); ++MODULE_PARM_DESC(probe_mask, "Bitmask to probe codecs (default = -1)."); ++module_param_array(probe_only, int, NULL, 0444); ++MODULE_PARM_DESC(probe_only, "Only probing and no codec initialization."); ++module_param_array(jackpoll_ms, int, NULL, 0444); ++MODULE_PARM_DESC(jackpoll_ms, "Ms between polling for jack events (default = 0, using unsol events only)"); ++module_param(single_cmd, bint, 0444); ++MODULE_PARM_DESC(single_cmd, "Use single command to communicate with codecs " ++ "(for debugging only)."); ++module_param(enable_msi, bint, 0444); ++MODULE_PARM_DESC(enable_msi, "Enable Message Signaled Interrupt (MSI)"); ++#ifdef CONFIG_SND_HDA_INPUT_BEEP ++module_param_array(beep_mode, bool, NULL, 0444); ++MODULE_PARM_DESC(beep_mode, "Select HDA Beep registration mode " ++ "(0=off, 1=on) (default=1)."); ++#endif ++ ++#define power_save 0 ++ ++static int align_buffer_size = -1; ++module_param(align_buffer_size, bint, 0644); ++MODULE_PARM_DESC(align_buffer_size, ++ "Force buffer and period sizes to be multiple of 128 bytes."); ++ ++/* driver types */ ++enum { ++ AZX_DRIVER_ICH, ++ AZX_DRIVER_PCH, ++ AZX_DRIVER_SCH, ++ AZX_DRIVER_HDMI, ++ AZX_DRIVER_ATI, ++ AZX_DRIVER_ATIHDMI, ++ AZX_DRIVER_ATIHDMI_NS, ++ AZX_DRIVER_VIA, ++ AZX_DRIVER_SIS, ++ AZX_DRIVER_ULI, ++ AZX_DRIVER_NVIDIA, ++ AZX_DRIVER_TERA, ++ AZX_DRIVER_CTX, ++ AZX_DRIVER_CTHDA, ++ AZX_DRIVER_CMEDIA, ++ AZX_DRIVER_GENERIC, ++ AZX_DRIVER_FT, ++ AZX_NUM_DRIVERS, /* keep this as last entry */ ++}; ++ ++/* NOP for other archs */ ++static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf, ++ bool on) ++{ ++} ++ ++static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev, ++ struct snd_pcm_substream *substream, bool on) ++{ ++} ++ ++static int azx_acquire_irq(struct azx *chip, int do_disconnect); ++ ++/* calculate runtime delay from LPIB */ ++static int azx_get_delay_from_lpib(struct azx *chip, struct azx_dev *azx_dev, ++ unsigned int pos) ++{ ++ struct snd_pcm_substream *substream = azx_dev->core.substream; ++ int stream = substream->stream; ++ unsigned int lpib_pos = azx_get_pos_lpib(chip, azx_dev); ++ int delay; ++ ++ if (stream == SNDRV_PCM_STREAM_PLAYBACK) ++ delay = pos - lpib_pos; ++ else ++ delay = lpib_pos - pos; ++ if (delay < 0) { ++ if (delay >= azx_dev->core.delay_negative_threshold) ++ delay = 0; ++ else ++ delay += azx_dev->core.bufsize; ++ } ++ ++ if (delay >= azx_dev->core.period_bytes) { ++ dev_info(chip->card->dev, ++ "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n", ++ delay, azx_dev->core.period_bytes); ++ delay = 0; ++ chip->driver_caps &= ~AZX_DCAPS_COUNT_LPIB_DELAY; ++ chip->get_delay[stream] = NULL; ++ } ++ ++ return bytes_to_frames(substream->runtime, delay); ++} ++ ++static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev); ++ ++/* called from IRQ */ ++static int azx_position_check(struct azx *chip, struct azx_dev *azx_dev) ++{ ++ struct hda_ft *hda = container_of(chip, struct hda_ft, chip); ++ int ok; ++ ++ ok = azx_position_ok(chip, azx_dev); ++ if (ok == 1) { ++ azx_dev->irq_pending = 0; ++ return ok; ++ } else if (ok == 0) { ++ /* bogus IRQ, process it later */ ++ azx_dev->irq_pending = 1; ++ schedule_work(&hda->irq_pending_work); ++ } ++ return 0; ++} ++ ++static int azx_ft_link_power(struct azx *chip, bool enable) ++{ ++ return 0; ++} ++ ++/* ++ * Check whether the current DMA position is acceptable for updating ++ * periods. Returns non-zero if it's OK. ++ * ++ * Many HD-audio controllers appear pretty inaccurate about ++ * the update-IRQ timing. The IRQ is issued before actually the ++ * data is processed. So, we need to process it afterwords in a ++ * workqueue. ++ */ ++static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev) ++{ ++ struct snd_pcm_substream *substream = azx_dev->core.substream; ++ int stream = substream->stream; ++ u32 wallclk; ++ unsigned int pos; ++ ++ wallclk = (azx_readl(chip, WALLCLK) - azx_dev->core.start_wallclk); ++ ++ if (wallclk < (azx_dev->core.period_wallclk * 2) / 3) ++ return -1; /* bogus (too early) interrupt */ ++ ++ if (chip->get_position[stream]) ++ pos = chip->get_position[stream](chip, azx_dev); ++ else { /* use the position buffer as default */ ++ pos = azx_get_pos_posbuf(chip, azx_dev); ++ if (!pos || pos == (u32)-1) { ++ dev_info(chip->card->dev, ++ "Invalid position buffer, using LPIB read method instead.\n"); ++ chip->get_position[stream] = azx_get_pos_lpib; ++ if (chip->get_position[0] == azx_get_pos_lpib && ++ chip->get_position[1] == azx_get_pos_lpib) ++ azx_bus(chip)->use_posbuf = false; ++ pos = azx_get_pos_lpib(chip, azx_dev); ++ chip->get_delay[stream] = NULL; ++ } else { ++ chip->get_position[stream] = azx_get_pos_posbuf; ++ if (chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY) ++ chip->get_delay[stream] = azx_get_delay_from_lpib; ++ } ++ } ++ ++ if (pos >= azx_dev->core.bufsize) ++ pos = 0; ++ ++ if (WARN_ONCE(!azx_dev->core.period_bytes, ++ "hda-ft: zero azx_dev->period_bytes")) ++ return -1; /* this shouldn't happen! */ ++ if (wallclk < (azx_dev->core.period_wallclk * 5) / 4 && ++ pos % azx_dev->core.period_bytes > azx_dev->core.period_bytes / 2) ++ /* NG - it's below the first next period boundary */ ++ return chip->bdl_pos_adj ? 0 : -1; ++ ++ azx_dev->core.start_wallclk += wallclk; ++ ++ return 1; /* OK, it's fine */ ++} ++ ++/* The work for pending PCM period updates. */ ++static void azx_irq_pending_work(struct work_struct *work) ++{ ++ struct hda_ft *hda = container_of(work, struct hda_ft, irq_pending_work); ++ struct azx *chip = &hda->chip; ++ struct hdac_bus *bus = azx_bus(chip); ++ struct hdac_stream *s; ++ int pending, ok; ++ ++ if (!hda->irq_pending_warned) { ++ dev_info(chip->card->dev, ++ "IRQ timing workaround is activated for card #%d. Suggest a bigger bdl_pos_adj.\n", ++ chip->card->number); ++ hda->irq_pending_warned = 1; ++ } ++ ++ for (;;) { ++ pending = 0; ++ spin_lock_irq(&bus->reg_lock); ++ list_for_each_entry(s, &bus->stream_list, list) { ++ struct azx_dev *azx_dev = stream_to_azx_dev(s); ++ if (!azx_dev->irq_pending || ++ !s->substream || ++ !s->running) ++ continue; ++ ok = azx_position_ok(chip, azx_dev); ++ if (ok > 0) { ++ azx_dev->irq_pending = 0; ++ spin_unlock(&bus->reg_lock); ++ snd_pcm_period_elapsed(s->substream); ++ spin_lock(&bus->reg_lock); ++ } else if (ok < 0) { ++ pending = 0; /* too early */ ++ } else ++ pending++; ++ } ++ spin_unlock_irq(&bus->reg_lock); ++ if (!pending) ++ return; ++ msleep(1); ++ } ++} ++ ++/* clear irq_pending flags and assure no on-going workq */ ++static void azx_clear_irq_pending(struct azx *chip) ++{ ++ struct hdac_bus *bus = azx_bus(chip); ++ struct hdac_stream *s; ++ ++ spin_lock_irq(&bus->reg_lock); ++ list_for_each_entry(s, &bus->stream_list, list) { ++ struct azx_dev *azx_dev = stream_to_azx_dev(s); ++ azx_dev->irq_pending = 0; ++ } ++ spin_unlock_irq(&bus->reg_lock); ++} ++ ++static int azx_acquire_irq(struct azx *chip, int do_disconnect) ++{ ++ struct hdac_bus *bus = azx_bus(chip); ++ ++ struct hda_ft *hda = container_of(chip, struct hda_ft, chip); ++ struct platform_device *pdev = to_platform_device(hda->dev); ++ int irq_id = platform_get_irq(pdev, 0); ++ int err; ++ ++ err = devm_request_irq(chip->card->dev, irq_id, azx_interrupt, ++ IRQF_SHARED, KBUILD_MODNAME, chip); ++ if (err) { ++ dev_err(chip->card->dev, ++ "unable to request IRQ %d, disabling device\n", ++ irq_id); ++ if (do_disconnect) ++ snd_card_disconnect(chip->card); ++ return err; ++ } ++ bus->irq = irq_id; ++ ++ return 0; ++} ++ ++/* get the current DMA position with correction on VIA chips */ ++static unsigned int azx_via_get_position(struct azx *chip, ++ struct azx_dev *azx_dev) ++{ ++ unsigned int link_pos, mini_pos, bound_pos; ++ unsigned int mod_link_pos, mod_dma_pos, mod_mini_pos; ++ unsigned int fifo_size; ++ ++ link_pos = snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev)); ++ if (azx_dev->core.substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ++ /* Playback, no problem using link position */ ++ return link_pos; ++ } ++ ++ /* Capture */ ++ /* For new chipset, ++ * use mod to get the DMA position just like old chipset ++ */ ++ mod_dma_pos = le32_to_cpu(*azx_dev->core.posbuf); ++ mod_dma_pos %= azx_dev->core.period_bytes; ++ ++ /* azx_dev->fifo_size can't get FIFO size of in stream. ++ * Get from base address + offset. ++ */ ++ fifo_size = readw(azx_bus(chip)->remap_addr + ++ VIA_IN_STREAM0_FIFO_SIZE_OFFSET); ++ ++ if (azx_dev->insufficient) { ++ /* Link position never gather than FIFO size */ ++ if (link_pos <= fifo_size) ++ return 0; ++ ++ azx_dev->insufficient = 0; ++ } ++ ++ if (link_pos <= fifo_size) ++ mini_pos = azx_dev->core.bufsize + link_pos - fifo_size; ++ else ++ mini_pos = link_pos - fifo_size; ++ ++ /* Find nearest previous boudary */ ++ mod_mini_pos = mini_pos % azx_dev->core.period_bytes; ++ mod_link_pos = link_pos % azx_dev->core.period_bytes; ++ if (mod_link_pos >= fifo_size) ++ bound_pos = link_pos - mod_link_pos; ++ else if (mod_dma_pos >= mod_mini_pos) ++ bound_pos = mini_pos - mod_mini_pos; ++ else { ++ bound_pos = mini_pos - mod_mini_pos + azx_dev->core.period_bytes; ++ if (bound_pos >= azx_dev->core.bufsize) ++ bound_pos = 0; ++ } ++ ++ /* Calculate real DMA position we want */ ++ return bound_pos + mod_dma_pos; ++} ++ ++#ifdef CONFIG_PM ++static DEFINE_MUTEX(card_list_lock); ++static LIST_HEAD(card_list); ++ ++static void azx_add_card_list(struct azx *chip) ++{ ++ struct hda_ft *hda = container_of(chip, struct hda_ft, chip); ++ mutex_lock(&card_list_lock); ++ list_add(&hda->list, &card_list); ++ mutex_unlock(&card_list_lock); ++} ++ ++static void azx_del_card_list(struct azx *chip) ++{ ++ struct hda_ft *hda = container_of(chip, struct hda_ft, chip); ++ mutex_lock(&card_list_lock); ++ list_del_init(&hda->list); ++ mutex_unlock(&card_list_lock); ++} ++ ++#else ++#define azx_add_card_list(chip) /* NOP */ ++#define azx_del_card_list(chip) /* NOP */ ++#endif /* CONFIG_PM */ ++ ++#if defined(CONFIG_PM_SLEEP) ++/* power management */ ++static int azx_suspend(struct device *dev) ++{ ++ struct snd_card *card = dev_get_drvdata(dev); ++ struct azx *chip; ++ struct hda_ft *hda; ++ struct hdac_bus *bus; ++ ++ if (!card) ++ return 0; ++ ++ chip = card->private_data; ++ hda = container_of(chip, struct hda_ft, chip); ++ if (chip->disabled || !chip->running) ++ return 0; ++ ++ bus = azx_bus(chip); ++ snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); ++ azx_clear_irq_pending(chip); ++ azx_stop_chip(chip); ++ if (bus->irq >= 0) { ++ free_irq(bus->irq, chip); ++ bus->irq = -1; ++ } ++ ++ return 0; ++} ++ ++static int azx_resume(struct device *dev) ++{ ++ struct snd_card *card = dev_get_drvdata(dev); ++ struct azx *chip; ++ struct hda_ft *hda; ++ struct hdac_bus *bus; ++ int index; ++ struct snd_pcm_substream *substream; ++ struct azx_dev *azx_dev; ++ int err; ++ ++ if (!card) ++ return 0; ++ ++ chip = card->private_data; ++ hda = container_of(chip, struct hda_ft, chip); ++ bus = azx_bus(chip); ++ if (chip->disabled || !chip->running) ++ return 0; ++ ++ if (azx_acquire_irq(chip, 1) < 0) ++ return -EIO; ++ ++ index = chip->dev_index; ++ ++ snd_hdac_bus_exit_link_reset(bus); ++ usleep_range(1000, 1200); ++ ++ azx_init_chip(chip, 0); ++ ++ snd_power_change_state(card, SNDRV_CTL_POWER_D0); ++ ++ if (hda->substream && hda->substream->runtime) { ++ substream = hda->substream; ++ ++ if(substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED){ ++ substream->runtime->status->state = substream->runtime->status->suspended_state; ++ err = substream->ops->prepare(substream); ++ if (err < 0) ++ return err; ++ } ++ ++ azx_dev = get_azx_dev(substream); ++ hda->substream = NULL; ++ } ++ ++ return 0; ++} ++#endif /* CONFIG_PM_SLEEP */ ++ ++#ifdef CONFIG_PM ++static int azx_runtime_suspend(struct device *dev) ++{ ++ struct snd_card *card = dev_get_drvdata(dev); ++ struct azx *chip; ++ struct hda_ft *hda; ++ ++ if (!card) ++ return 0; ++ ++ chip = card->private_data; ++ hda = container_of(chip, struct hda_ft, chip); ++ if (chip->disabled) ++ return 0; ++ ++ if (!azx_has_pm_runtime(chip)) ++ return 0; ++ ++ azx_stop_chip(chip); ++ azx_enter_link_reset(chip); ++ azx_clear_irq_pending(chip); ++ ++ return 0; ++} ++ ++static int azx_runtime_resume(struct device *dev) ++{ ++ struct snd_card *card = dev_get_drvdata(dev); ++ struct azx *chip; ++ struct hda_ft *hda; ++ struct hdac_bus *bus; ++ struct hda_codec *codec; ++ int status; ++ int index; ++ ++ if (!card) ++ return 0; ++ ++ chip = card->private_data; ++ hda = container_of(chip, struct hda_ft, chip); ++ bus = azx_bus(chip); ++ if (chip->disabled) ++ return 0; ++ ++ if (!azx_has_pm_runtime(chip)) ++ return 0; ++ ++ /* Read STATESTS before controller reset */ ++ status = azx_readw(chip, STATESTS); ++ ++ index = chip->dev_index; ++ ++ snd_hdac_bus_exit_link_reset(bus); ++ usleep_range(1000, 1200); ++ ++ azx_init_chip(chip, 0); ++ ++ if (status) { ++ list_for_each_codec(codec, &chip->bus) ++ if (status & (1 << codec->addr)) ++ schedule_delayed_work(&codec->jackpoll_work, ++ codec->jackpoll_interval); ++ } ++ ++ return 0; ++} ++ ++static int azx_runtime_idle(struct device *dev) ++{ ++ struct snd_card *card = dev_get_drvdata(dev); ++ struct azx *chip; ++ struct hda_ft *hda; ++ ++ if (!card) ++ return 0; ++ ++ chip = card->private_data; ++ hda = container_of(chip, struct hda_ft, chip); ++ if (chip->disabled) ++ return 0; ++ ++ if (!azx_has_pm_runtime(chip) || ++ azx_bus(chip)->codec_powered || !chip->running) ++ return -EBUSY; ++ ++ return 0; ++} ++ ++static const struct dev_pm_ops azx_pm = { ++ SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume) ++ SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle) ++}; ++ ++#define hda_ft_pm &azx_pm ++#else ++#define hda_ft_pm NULL ++#endif /* CONFIG_PM */ ++ ++static int azx_probe_continue(struct azx *chip); ++ ++/* ++ * destructor ++ */ ++static int azx_free(struct azx *chip) ++{ ++ struct hda_ft *hda = container_of(chip, struct hda_ft, chip); ++ struct hdac_bus *bus = azx_bus(chip); ++ struct platform_device *pdev = to_platform_device(hda->dev); ++ struct device *hddev = hda->dev; ++ struct resource *res; ++ resource_size_t size; ++ ++ if (azx_has_pm_runtime(chip) && chip->running) ++ pm_runtime_get_noresume(&pdev->dev); ++ ++ azx_del_card_list(chip); ++ ++ complete_all(&hda->probe_wait); ++ ++ if (bus->chip_init) { ++ azx_clear_irq_pending(chip); ++ azx_stop_all_streams(chip); ++ azx_stop_chip(chip); ++ } ++ ++ if (bus->irq >= 0) ++ free_irq(bus->irq, (void*)chip); ++ ++ devm_iounmap(hddev, bus->remap_addr); ++ ++ azx_free_stream_pages(chip); ++ azx_free_streams(chip); ++ snd_hdac_bus_exit(bus); ++ ++ if (chip->region_requested){ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ size = resource_size(res); ++ devm_release_mem_region(hddev, res->start, size); ++ } ++ ++ kfree(hda); ++ ++ return 0; ++} ++ ++static int azx_dev_disconnect(struct snd_device *device) ++{ ++ struct azx *chip = device->device_data; ++ ++ chip->bus.shutdown = 1; ++ return 0; ++} ++ ++static int azx_dev_free(struct snd_device *device) ++{ ++ return azx_free(device->device_data); ++} ++ ++static int check_position_fix(struct azx *chip, int fix) ++{ ++ switch (fix) { ++ case POS_FIX_AUTO: ++ case POS_FIX_LPIB: ++ case POS_FIX_POSBUF: ++ case POS_FIX_VIACOMBO: ++ case POS_FIX_COMBO: ++ return fix; ++ } ++ ++ if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) { ++ dev_dbg(chip->card->dev, "Using LPIB position fix\n"); ++ return POS_FIX_LPIB; ++ } ++ return POS_FIX_AUTO; ++} ++ ++static void assign_position_fix(struct azx *chip, int fix) ++{ ++ static azx_get_pos_callback_t callbacks[] = { ++ [POS_FIX_AUTO] = NULL, ++ [POS_FIX_LPIB] = azx_get_pos_lpib, ++ [POS_FIX_POSBUF] = azx_get_pos_posbuf, ++ [POS_FIX_VIACOMBO] = azx_via_get_position, ++ [POS_FIX_COMBO] = azx_get_pos_lpib, ++ }; ++ ++ chip->get_position[0] = chip->get_position[1] = callbacks[fix]; ++ ++ /* combo mode uses LPIB only for playback */ ++ if (fix == POS_FIX_COMBO) ++ chip->get_position[1] = NULL; ++ ++ if (fix == POS_FIX_POSBUF && ++ (chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY)) { ++ chip->get_delay[0] = chip->get_delay[1] = ++ azx_get_delay_from_lpib; ++ } ++ ++} ++ ++#define AZX_FORCE_CODEC_MASK 0x100 ++ ++static void check_probe_mask(struct azx *chip, int dev) ++{ ++ chip->codec_probe_mask = probe_mask[dev]; ++ ++ /* check forced option */ ++ if (chip->codec_probe_mask != -1 && ++ (chip->codec_probe_mask & AZX_FORCE_CODEC_MASK)) { ++ azx_bus(chip)->codec_mask = chip->codec_probe_mask & 0xff; ++ dev_info(chip->card->dev, "codec_mask forced to 0x%x\n", ++ (int)azx_bus(chip)->codec_mask); ++ } ++} ++ ++static void azx_probe_work(struct work_struct *work) ++{ ++ struct hda_ft *hda = container_of(work, struct hda_ft, probe_work); ++ azx_probe_continue(&hda->chip); ++} ++ ++/* ++ * constructor ++ */ ++static const struct hdac_io_ops axi_hda_io_ops; ++static const struct hda_controller_ops axi_hda_ops; ++ ++static int hda_ft_create(struct snd_card *card, struct platform_device *pdev, ++ int dev, unsigned int driver_caps, ++ struct azx **rchip) ++{ ++ static struct snd_device_ops ops = { ++ .dev_disconnect = azx_dev_disconnect, ++ .dev_free = azx_dev_free, ++ }; ++ struct hda_ft *hda; ++ struct azx *chip; ++ int err; ++ ++ *rchip = NULL; ++ ++ hda = devm_kzalloc(&pdev->dev, sizeof(*hda), GFP_KERNEL); ++ if (!hda) ++ return -ENOMEM; ++ hda->dev = &pdev->dev; ++ chip = &hda->chip; ++ mutex_init(&chip->open_mutex); ++ chip->card = card; ++ chip->ops = &axi_hda_ops; ++ chip->driver_caps = driver_caps; ++ chip->driver_type = driver_caps & 0xff; ++ chip->dev_index = dev; ++ chip->jackpoll_ms = jackpoll_ms; ++ INIT_LIST_HEAD(&chip->pcm_list); ++ INIT_WORK(&hda->irq_pending_work, azx_irq_pending_work); ++ INIT_LIST_HEAD(&hda->list); ++ ++ init_completion(&hda->probe_wait); ++ assign_position_fix(chip, check_position_fix(chip, position_fix[dev])); ++ check_probe_mask(chip, dev); ++ ++ if (single_cmd < 0) /* allow fallback to single_cmd at errors */ ++ chip->fallback_to_single_cmd = 0; ++ else /* explicitly set to single_cmd or not */ ++ chip->single_cmd = single_cmd; ++ ++ if (bdl_pos_adj[dev] < 0) { ++ switch (chip->driver_type) { ++ case AZX_DRIVER_FT: ++ bdl_pos_adj[dev] = 32; ++ break; ++ default: ++ bdl_pos_adj[dev] = 32; ++ break; ++ } ++ } ++ chip->bdl_pos_adj = bdl_pos_adj[dev]; ++ ++ err = azx_bus_init(chip, model[dev], &axi_hda_io_ops); ++ if (err < 0) { ++ kfree(hda); ++ return err; ++ } ++ ++ if (chip->driver_type == AZX_DRIVER_NVIDIA) { ++ dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n"); ++ chip->bus.needs_damn_long_delay = 1; ++ } ++ ++ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); ++ if (err < 0) { ++ dev_err(card->dev, "Error creating device [card]!\n"); ++ azx_free(chip); ++ return err; ++ } ++ ++ /* continue probing in work context as may trigger request module */ ++ INIT_WORK(&hda->probe_work, azx_probe_work); ++ ++ *rchip = chip; ++ ++ return 0; ++} ++ ++static int azx_first_init(struct azx *chip) ++{ ++ struct hda_ft *hda = container_of(chip, struct hda_ft, chip); ++ struct platform_device *pdev = to_platform_device(hda->dev); ++ struct device *hddev = hda->dev; ++ ++ int dev = chip->dev_index; ++ bool full_reset; ++ ++ struct snd_card *card = chip->card; ++ struct hdac_bus *bus = azx_bus(chip); ++ int err; ++ unsigned short gcap; ++ unsigned int dma_bits = 64; ++ ++ struct resource *res; ++ const struct acpi_device_id *match; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ hda->regs = devm_ioremap_resource(hddev, res); ++ if (IS_ERR(hda->regs)) ++ return PTR_ERR(hda->regs); ++ chip->region_requested = 1; ++ ++ bus->addr = res->start; ++ bus->remap_addr = hda->regs; ++ if (bus->remap_addr == NULL) { ++ dev_err(card->dev, "ioremap error\n"); ++ return -ENXIO; ++ } ++ ++ bus->cmd_resend = 1; ++ ++ if (azx_acquire_irq(chip, 0) < 0) ++ return -EBUSY; ++ ++ synchronize_irq(bus->irq); ++ ++ gcap = azx_readw(chip, GCAP); ++ dev_dbg(card->dev, "chipset global capabilities = 0x%x\n", gcap); ++ ++ /* disable 64bit DMA address on some devices */ ++ if (chip->driver_caps & AZX_DCAPS_NO_64BIT) { ++ dev_dbg(card->dev, "Disabling 64bit DMA\n"); ++ gcap &= ~AZX_GCAP_64OK; ++ } ++ ++ /* disable buffer size rounding to 128-byte multiples if supported */ ++ if (align_buffer_size >= 0) ++ chip->align_buffer_size = !!align_buffer_size; ++ else { ++ if (chip->driver_caps & AZX_DCAPS_NO_ALIGN_BUFSIZE) ++ chip->align_buffer_size = 0; ++ else ++ chip->align_buffer_size = 1; ++ } ++ ++ if (hddev->of_node) { ++ ++ } else if (has_acpi_companion(hddev)){ ++ match = acpi_match_device(hddev->driver->acpi_match_table, hddev); ++ if (!match) { ++ dev_err(card->dev, "Error ACPI match data is missing\n"); ++ return -ENODEV; ++ } ++ acpi_dma_configure(hddev,DEV_DMA_NOT_SUPPORTED); ++ } ++ ++ /* allow 64bit DMA address if supported by H/W */ ++ if (!(gcap & AZX_GCAP_64OK)) ++ dma_bits = 32; ++ if (!dma_set_mask(hddev, DMA_BIT_MASK(dma_bits))) { ++ dma_set_coherent_mask(hddev, DMA_BIT_MASK(dma_bits)); ++ } else { ++ dma_set_mask(hddev, DMA_BIT_MASK(32)); ++ dma_set_coherent_mask(hddev, DMA_BIT_MASK(32)); ++ } ++ ++ /* read number of streams from GCAP register instead of using ++ * hardcoded value ++ */ ++ chip->capture_streams = (gcap >> 8) & 0x0f; ++ chip->playback_streams = (gcap >> 12) & 0x0f; ++ if (!chip->playback_streams && !chip->capture_streams) { ++ /* gcap didn't give any info, switching to old method */ ++ chip->playback_streams = FT4C_NUM_PLAYBACK; ++ chip->capture_streams = FT4C_NUM_CAPTURE; ++ } ++ chip->capture_index_offset = 0; ++ chip->playback_index_offset = chip->capture_streams; ++ chip->num_streams = chip->playback_streams + chip->capture_streams; ++ ++ /* initialize streams */ ++ err = azx_init_streams(chip); ++ if (err < 0) ++ return err; ++ ++ err = azx_alloc_stream_pages(chip); ++ if (err < 0) ++ return err; ++ ++ full_reset = (probe_only[dev] & 2) ? false : true; ++ azx_init_chip(chip, full_reset); ++ ++ /* codec detection */ ++ if (!azx_bus(chip)->codec_mask) { ++ dev_err(card->dev, "no codecs found!\n"); ++ return -ENODEV; ++ } ++ ++ strcpy(card->driver, "ft-hda"); ++ strcpy(card->shortname, "ft-hda"); ++ snprintf(card->longname, sizeof(card->longname), ++ "%s at 0x%lx irq %i", ++ card->shortname, bus->addr, bus->irq); ++ ++ return 0; ++} ++ ++/* ++ * HDA controller ops. ++ */ ++ ++/* APB register access. */ ++static void axi_azx_writel(u32 value, u32 __iomem *addr) ++{ ++ writel(value, addr); ++} ++ ++static u32 axi_azx_readl(u32 __iomem *addr) ++{ ++ return readl(addr); ++} ++ ++static void axi_azx_writew(u16 value, u16 __iomem *addr) ++{ ++ u32 data; ++ u32 offset; ++ ++ offset = (u64)addr & 0x03; ++ addr = (u16 __iomem *)((u64)addr & 0xFFFFFFFFFFFFFFFC); ++ data = readl(addr); ++ data &= ~(0xFFFF << offset * BYTE_BIT_WIDTH); ++ data |= (value << offset * BYTE_BIT_WIDTH); ++ writel(data, addr); ++} ++ ++static u16 axi_azx_readw(u16 __iomem *addr) ++{ ++ return readw(addr); ++} ++ ++static void axi_azx_writeb(u8 value, u8 __iomem *addr) ++{ ++ u32 data; ++ u32 offset; ++ ++ offset = (u64)addr & 0x03; ++ addr = (u8 __iomem *)((u64)addr & 0xFFFFFFFFFFFFFFFC); ++ data = readl(addr); ++ data &= ~(0xFF << offset * BYTE_BIT_WIDTH); ++ data |= (value << offset * BYTE_BIT_WIDTH); ++ writel(data, addr); ++} ++ ++static u8 axi_azx_readb(u8 __iomem *addr) ++{ ++ return readb(addr); ++} ++ ++/* DMA page allocation helpers. */ ++static int dma_alloc_pages(struct hdac_bus *bus, ++ int type, ++ size_t size, ++ struct snd_dma_buffer *buf) ++{ ++ struct azx *chip = bus_to_azx(bus); ++ int err; ++ ++ err = snd_dma_alloc_pages(type, ++ bus->dev, ++ size, buf); ++ if (err < 0) ++ return err; ++ mark_pages_wc(chip, buf, true); ++ return 0; ++} ++ ++static void dma_free_pages(struct hdac_bus *bus, struct snd_dma_buffer *buf) ++{ ++ struct azx *chip = bus_to_azx(bus); ++ ++ mark_pages_wc(chip, buf, false); ++ snd_dma_free_pages(buf); ++} ++ ++static int substream_alloc_pages(struct azx *chip, ++ struct snd_pcm_substream *substream, ++ size_t size) ++{ ++ struct azx_dev *azx_dev = get_azx_dev(substream); ++ int ret; ++ ++ mark_runtime_wc(chip, azx_dev, substream, false); ++ ret = snd_pcm_lib_malloc_pages(substream, size); ++ if (ret < 0) ++ return ret; ++ ++ mark_runtime_wc(chip, azx_dev, substream, true); ++ return 0; ++} ++ ++static int substream_free_pages(struct azx *chip, ++ struct snd_pcm_substream *substream) ++{ ++ struct azx_dev *azx_dev = get_azx_dev(substream); ++ mark_runtime_wc(chip, azx_dev, substream, false); ++ return snd_pcm_lib_free_pages(substream); ++} ++ ++static void pcm_mmap_prepare(struct snd_pcm_substream *substream, ++ struct vm_area_struct *area) ++{ ++ ++} ++ ++static const struct hdac_io_ops axi_hda_io_ops = { ++ .reg_writel = axi_azx_writel, ++ .reg_readl = axi_azx_readl, ++ .reg_writew = axi_azx_writew, ++ .reg_readw = axi_azx_readw, ++ .reg_writeb = axi_azx_writeb, ++ .reg_readb = axi_azx_readb, ++ .dma_alloc_pages = dma_alloc_pages, ++ .dma_free_pages = dma_free_pages, ++}; ++ ++static const struct hda_controller_ops axi_hda_ops = { ++ .substream_alloc_pages = substream_alloc_pages, ++ .substream_free_pages = substream_free_pages, ++ .pcm_mmap_prepare = pcm_mmap_prepare, ++ .position_check = azx_position_check, ++ .link_power = azx_ft_link_power, ++}; ++ ++static int hda_ft_probe(struct platform_device *pdev) ++{ ++ const unsigned int driver_flags = AZX_DCAPS_SYNC_WRITE | AZX_DRIVER_FT; ++ static int dev; ++ struct snd_card *card; ++ struct hda_ft *hda; ++ struct azx *chip; ++ bool schedule_probe; ++ int err; ++ ++ if (dev >= SNDRV_CARDS) ++ return -ENODEV; ++ if (!enable[dev]) { ++ dev++; ++ return -ENOENT; ++ } ++ ++ err = snd_card_new(&pdev->dev, index[dev], id[dev], THIS_MODULE, ++ 0, &card); ++ if (err < 0) { ++ dev_err(&pdev->dev, "Error creating card!\n"); ++ return err; ++ } ++ ++ err = hda_ft_create(card, pdev,dev, driver_flags, &chip); ++ if (err < 0) ++ goto out_free; ++ card->private_data = chip; ++ hda = container_of(chip, struct hda_ft, chip); ++ ++ dev_set_drvdata(&pdev->dev, card); ++ ++ schedule_probe = !chip->disabled; ++ ++ if (schedule_probe) ++ schedule_work(&hda->probe_work); ++ ++ dev++; ++ if (chip->disabled) ++ complete_all(&hda->probe_wait); ++ return 0; ++ ++out_free: ++ snd_card_free(card); ++ return err; ++} ++ ++/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ ++static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = { ++ [AZX_DRIVER_FT] = 4, ++}; ++ ++static int azx_probe_continue(struct azx *chip) ++{ ++ struct hda_ft *hda = container_of(chip, struct hda_ft, chip); ++ struct device *hddev = hda->dev; ++ int dev = chip->dev_index; ++ int err; ++ struct hdac_bus *bus = azx_bus(chip); ++ ++ hda->probe_continued = 1; ++ ++ err = azx_first_init(chip); ++ if (err < 0) ++ goto out_free; ++ ++#ifdef CONFIG_SND_HDA_INPUT_BEEP ++ chip->beep_mode = beep_mode[dev]; ++#endif ++ ++ /* create codec instances */ ++ err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]); ++ if (err < 0) ++ goto out_free; ++ ++ if ((probe_only[dev] & 1) == 0) { ++ err = azx_codec_configure(chip); ++ if (err < 0) ++ goto out_free; ++ } ++ ++ err = snd_card_register(chip->card); ++ if (err < 0) ++ goto out_free; ++ ++ chip->running = 1; ++ azx_add_card_list(chip); ++ snd_hda_set_power_save(&chip->bus, power_save * 1000); ++ ++ if (azx_has_pm_runtime(chip)) ++ pm_runtime_put_noidle(hddev); ++ return err; ++ ++out_free: ++ free_irq(bus->irq, (void*)chip); ++ return err; ++} ++ ++static int hda_ft_remove(struct platform_device *pdev) ++{ ++ struct snd_card *card = dev_get_drvdata(&pdev->dev); ++ struct azx *chip; ++ struct hda_ft *hda; ++ ++ if (card) { ++ /* cancel the pending probing work */ ++ chip = card->private_data; ++ hda = container_of(chip, struct hda_ft, chip); ++ cancel_work_sync(&hda->probe_work); ++ ++ snd_card_free(card); ++ return 0; ++ } ++ return 0; ++} ++ ++static void hda_ft_shutdown(struct platform_device *pdev) ++{ ++ struct snd_card *card = dev_get_drvdata(&pdev->dev); ++ struct azx *chip; ++ ++ if (!card) ++ return; ++ chip = card->private_data; ++ if (chip && chip->running) ++ azx_stop_chip(chip); ++} ++ ++static const struct of_device_id hda_ft_of_match[] = { ++ { .compatible = "phytium,hda" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, hda_ft_of_match); ++ ++#ifdef CONFIG_ACPI ++static const struct acpi_device_id hda_ft_acpi_match[] = { ++ { .id = "PHYT0006" }, ++ {} ++}; ++MODULE_DEVICE_TABLE(acpi, hda_ft_acpi_match); ++#else ++#define hda_ft_acpi_match NULL ++#endif ++ ++static struct platform_driver ft_platform_hda = { ++ .driver = { ++ .name = "ft-hda", ++ .pm = hda_ft_pm, ++ .of_match_table = hda_ft_of_match, ++ .acpi_match_table = hda_ft_acpi_match, ++ }, ++ .probe = hda_ft_probe, ++ .remove = hda_ft_remove, ++ .shutdown = hda_ft_shutdown, ++}; ++ ++module_platform_driver(ft_platform_hda); ++ ++MODULE_DESCRIPTION("FT HDA bus driver"); ++MODULE_LICENSE("GPL v2"); +diff --git a/sound/pci/hda/hda_phytium.h b/sound/pci/hda/hda_phytium.h +new file mode 100644 +index 000000000000..23412cb4c711 +--- /dev/null ++++ b/sound/pci/hda/hda_phytium.h +@@ -0,0 +1,51 @@ ++/* ++ * hda_ft.h - Implementation of primary alsa driver code base ++ * for Intel HD Audio of Phytium. ++ * ++ * Copyright(c) 2018 Phytium Corporation. All rights reserved. ++ * ++ * Copyright(c) 2018 Leo Hou ++ * ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ * You should have received a copy of the GNU General Public License along with ++ * this program; if not, write to the Free Software Foundation, Inc., 59 ++ * Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++ */ ++#ifndef __SOUND_HDA_PHYTIUM_H__ ++#define __SOUND_HDA_PHYTIUM_H__ ++ ++#include "hda_controller.h" ++ ++struct hda_ft { ++ struct azx chip; ++ struct snd_pcm_substream *substream; ++ struct device *dev; ++ void __iomem *regs; ++ ++ /* for pending irqs */ ++ struct work_struct irq_pending_work; ++ ++ /* sync probing */ ++ struct completion probe_wait; ++ struct work_struct probe_work; ++ ++ /* card list (for power_save trigger) */ ++ struct list_head list; ++ ++ /* extra flags */ ++ unsigned int irq_pending_warned:1; ++ unsigned int probe_continued:1; ++ ++}; ++ ++#endif +diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig +index 1cf11cf51e1d..32296faf39c5 100644 +--- a/sound/soc/Kconfig ++++ b/sound/soc/Kconfig +@@ -59,6 +59,7 @@ source "sound/soc/intel/Kconfig" + source "sound/soc/mediatek/Kconfig" + source "sound/soc/meson/Kconfig" + source "sound/soc/mxs/Kconfig" ++source "sound/soc/phytium/Kconfig" + source "sound/soc/pxa/Kconfig" + source "sound/soc/qcom/Kconfig" + source "sound/soc/rockchip/Kconfig" +diff --git a/sound/soc/Makefile b/sound/soc/Makefile +index 62a5f87c3cfc..2cdfbe1db8c2 100644 +--- a/sound/soc/Makefile ++++ b/sound/soc/Makefile +@@ -43,6 +43,7 @@ obj-$(CONFIG_SND_SOC) += mxs/ + obj-$(CONFIG_SND_SOC) += nuc900/ + obj-$(CONFIG_SND_SOC) += omap/ + obj-$(CONFIG_SND_SOC) += kirkwood/ ++obj-$(CONFIG_SND_SOC) += phytium/ + obj-$(CONFIG_SND_SOC) += pxa/ + obj-$(CONFIG_SND_SOC) += qcom/ + obj-$(CONFIG_SND_SOC) += rockchip/ +diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig +index efb095dbcd71..75e8f6389967 100644 +--- a/sound/soc/codecs/Kconfig ++++ b/sound/soc/codecs/Kconfig +@@ -606,6 +606,15 @@ config SND_SOC_ES8328_SPI + depends on SPI_MASTER + select SND_SOC_ES8328 + ++config SND_SOC_ES8336 ++ tristate "Everest Semi ES8336 CODEC" ++ depends on I2C ++ select GPIO_PHYTIUM_PCI ++ ++config SND_SOC_ES8388 ++ tristate "Everest Semi ES8388 CODEC" ++ depends on I2C ++ + config SND_SOC_GTM601 + tristate 'GTM601 UMTS modem audio codec' + +diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile +index 7ae7c85e8219..1ba362d2dd65 100644 +--- a/sound/soc/codecs/Makefile ++++ b/sound/soc/codecs/Makefile +@@ -76,6 +76,8 @@ snd-soc-es8316-objs := es8316.o + snd-soc-es8328-objs := es8328.o + snd-soc-es8328-i2c-objs := es8328-i2c.o + snd-soc-es8328-spi-objs := es8328-spi.o ++snd-soc-es8336-objs := es8336.o ++snd-soc-es8388-objs := es8388.o + snd-soc-gtm601-objs := gtm601.o + snd-soc-hdac-hdmi-objs := hdac_hdmi.o + snd-soc-ics43432-objs := ics43432.o +@@ -336,6 +338,8 @@ obj-$(CONFIG_SND_SOC_ES8316) += snd-soc-es8316.o + obj-$(CONFIG_SND_SOC_ES8328) += snd-soc-es8328.o + obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o + obj-$(CONFIG_SND_SOC_ES8328_SPI)+= snd-soc-es8328-spi.o ++obj-$(CONFIG_SND_SOC_ES8336)+= snd-soc-es8336.o ++obj-$(CONFIG_SND_SOC_ES8388) += snd-soc-es8388.o + obj-$(CONFIG_SND_SOC_GTM601) += snd-soc-gtm601.o + obj-$(CONFIG_SND_SOC_HDAC_HDMI) += snd-soc-hdac-hdmi.o + obj-$(CONFIG_SND_SOC_ICS43432) += snd-soc-ics43432.o +diff --git a/sound/soc/codecs/es8336.c b/sound/soc/codecs/es8336.c +new file mode 100644 +index 000000000000..c381f4fc4df4 +--- /dev/null ++++ b/sound/soc/codecs/es8336.c +@@ -0,0 +1,1093 @@ ++/* ++ * es8336.c -- es8336 ALSA SoC audio driver ++ * Copyright Everest Semiconductor Co.,Ltd ++ * Phytium Information Technology Co.,Ltd ++ * ++ * Author: David Yang ++ * Yiqun Zhang ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "es8336.h" ++ ++#define INVALID_GPIO -1 ++#define GPIO_LOW 0 ++#define GPIO_HIGH 1 ++ ++static struct snd_soc_component *es8336_component; ++ ++static const struct reg_default es8336_reg_defaults[] = { ++ {0x00, 0x03}, {0x01, 0x03}, {0x02, 0x00}, {0x03, 0x20}, ++ {0x04, 0x11}, {0x05, 0x00}, {0x06, 0x11}, {0x07, 0x00}, ++ {0x08, 0x00}, {0x09, 0x01}, {0x0a, 0x00}, {0x0b, 0x00}, ++ {0x0c, 0xf8}, {0x0d, 0x3f}, {0x0e, 0x00}, {0x0f, 0x00}, ++ {0x10, 0x01}, {0x11, 0xfc}, {0x12, 0x28}, {0x13, 0x00}, ++ {0x14, 0x00}, {0x15, 0x33}, {0x16, 0x00}, {0x17, 0x00}, ++ {0x18, 0x88}, {0x19, 0x06}, {0x1a, 0x22}, {0x1b, 0x03}, ++ {0x1c, 0x0f}, {0x1d, 0x00}, {0x1e, 0x80}, {0x1f, 0x80}, ++ {0x20, 0x00}, {0x21, 0x00}, {0x22, 0xc0}, {0x23, 0x00}, ++ {0x24, 0x01}, {0x25, 0x08}, {0x26, 0x10}, {0x27, 0xc0}, ++ {0x28, 0x00}, {0x29, 0x1c}, {0x2a, 0x00}, {0x2b, 0xb0}, ++ {0x2c, 0x32}, {0x2d, 0x03}, {0x2e, 0x00}, {0x2f, 0x11}, ++ {0x30, 0x10}, {0x31, 0x00}, {0x32, 0x00}, {0x33, 0xc0}, ++ {0x34, 0xc0}, {0x35, 0x1f}, {0x36, 0xf7}, {0x37, 0xfd}, ++ {0x38, 0xff}, {0x39, 0x1f}, {0x3a, 0xf7}, {0x3b, 0xfd}, ++ {0x3c, 0xff}, {0x3d, 0x1f}, {0x3e, 0xf7}, {0x3f, 0xfd}, ++ {0x40, 0xff}, {0x41, 0x1f}, {0x42, 0xf7}, {0x43, 0xfd}, ++ {0x44, 0xff}, {0x45, 0x1f}, {0x46, 0xf7}, {0x47, 0xfd}, ++ {0x48, 0xff}, {0x49, 0x1f}, {0x4a, 0xf7}, {0x4b, 0xfd}, ++ {0x4c, 0xff}, {0x4d, 0x00}, {0x4e, 0x00}, {0x4f, 0xff}, ++ {0x50, 0x00}, {0x51, 0x00}, {0x52, 0x00}, {0x53, 0x00}, ++}; ++ ++/* codec private data */ ++struct es8336_priv { ++ struct regmap *regmap; ++ unsigned int dmic_amic; ++ unsigned int sysclk; ++ struct snd_pcm_hw_constraint_list *sysclk_constraints; ++ struct clk *mclk; ++ int debounce_time; ++ int hp_det_invert; ++ struct delayed_work work; ++ ++ int spk_ctl_gpio; ++ int hp_det_gpio; ++ bool muted; ++ bool hp_inserted; ++ bool spk_active_level; ++ ++ int pwr_count; ++}; ++ ++/* ++ * es8336_reset ++ * write value 0xff to reg0x00, the chip will be in reset mode ++ * then, writer 0x00 to reg0x00, unreset the chip ++ */ ++static int es8336_reset(struct snd_soc_component *component) ++{ ++ snd_soc_component_write(component, ES8336_RESET_REG00, 0x3F); ++ usleep_range(5000, 5500); ++ return snd_soc_component_write(component, ES8336_RESET_REG00, 0x03); ++} ++ ++static void es8336_enable_spk(struct es8336_priv *es8336, bool enable) ++{ ++ bool level; ++ ++ level = enable ? es8336->spk_active_level : !es8336->spk_active_level; ++ gpio_set_value(es8336->spk_ctl_gpio, level); ++} ++ ++static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -9600, 50, 1); ++static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -9600, 50, 1); ++static const DECLARE_TLV_DB_SCALE(hpmixer_gain_tlv, -1200, 150, 0); ++static const DECLARE_TLV_DB_SCALE(mic_bst_tlv, 0, 1200, 0); ++ ++static unsigned int linin_pga_tlv[] = { ++ TLV_DB_RANGE_HEAD(9), ++ 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0), ++ 1, 1, TLV_DB_SCALE_ITEM(300, 0, 0), ++ 2, 2, TLV_DB_SCALE_ITEM(600, 0, 0), ++ 3, 3, TLV_DB_SCALE_ITEM(900, 0, 0), ++ 4, 4, TLV_DB_SCALE_ITEM(1200, 0, 0), ++ 5, 5, TLV_DB_SCALE_ITEM(1500, 0, 0), ++ 6, 6, TLV_DB_SCALE_ITEM(1800, 0, 0), ++ 7, 7, TLV_DB_SCALE_ITEM(2100, 0, 0), ++ 8, 8, TLV_DB_SCALE_ITEM(2400, 0, 0), ++}; ++ ++static unsigned int hpout_vol_tlv[] = { ++ TLV_DB_RANGE_HEAD(1), ++ 0, 3, TLV_DB_SCALE_ITEM(-4800, 1200, 0), ++}; ++ ++static const char *const alc_func_txt[] = { "Off", "On" }; ++ ++static const struct soc_enum alc_func = ++ SOC_ENUM_SINGLE(ES8336_ADC_ALC1_REG29, 6, 2, alc_func_txt); ++ ++static const char *const ng_type_txt[] = { ++ "Constant PGA Gain", "Mute ADC Output" }; ++ ++static const struct soc_enum ng_type = ++ SOC_ENUM_SINGLE(ES8336_ADC_ALC6_REG2E, 6, 2, ng_type_txt); ++ ++static const char *const adcpol_txt[] = { "Normal", "Invert" }; ++ ++static const struct soc_enum adcpol = ++ SOC_ENUM_SINGLE(ES8336_ADC_MUTE_REG26, 1, 2, adcpol_txt); ++ ++static const char *const dacpol_txt[] = { ++ "Normal", "R Invert", "L Invert", "L + R Invert" }; ++ ++static const struct soc_enum dacpol = ++ SOC_ENUM_SINGLE(ES8336_DAC_SET1_REG30, 0, 4, dacpol_txt); ++ ++static const struct snd_kcontrol_new es8336_snd_controls[] = { ++ /* HP OUT VOLUME */ ++ SOC_DOUBLE_TLV("HP Playback Volume", ES8336_CPHP_ICAL_VOL_REG18, ++ 4, 0, 4, 1, hpout_vol_tlv), ++ /* HPMIXER VOLUME Control */ ++ SOC_DOUBLE_TLV("HPMixer Gain", ES8336_HPMIX_VOL_REG16, ++ 0, 4, 7, 0, hpmixer_gain_tlv), ++ ++ /* DAC Digital controls */ ++ SOC_DOUBLE_R_TLV("DAC Playback Volume", ES8336_DAC_VOLL_REG33, ++ ES8336_DAC_VOLR_REG34, 0, 0xC0, 1, dac_vol_tlv), ++ ++ SOC_SINGLE("Enable DAC Soft Ramp", ES8336_DAC_SET1_REG30, 4, 1, 1), ++ SOC_SINGLE("DAC Soft Ramp Rate", ES8336_DAC_SET1_REG30, 2, 4, 0), ++ ++ SOC_ENUM("Playback Polarity", dacpol), ++ SOC_SINGLE("DAC Notch Filter", ES8336_DAC_SET2_REG31, 6, 1, 0), ++ SOC_SINGLE("DAC Double Fs Mode", ES8336_DAC_SET2_REG31, 7, 1, 0), ++ SOC_SINGLE("DAC Volume Control-LeR", ES8336_DAC_SET2_REG31, 2, 1, 0), ++ SOC_SINGLE("DAC Stereo Enhancement", ES8336_DAC_SET3_REG32, 0, 7, 0), ++ ++ /* +20dB D2SE PGA Control */ ++ SOC_SINGLE_TLV("MIC Boost", ES8336_ADC_D2SEPGA_REG24, ++ 0, 1, 0, mic_bst_tlv), ++ /* 0-+24dB Lineinput PGA Control */ ++ SOC_SINGLE_TLV("Input PGA", ES8336_ADC_PGAGAIN_REG23, ++ 4, 8, 0, linin_pga_tlv), ++}; ++ ++/* Analog Input MUX */ ++static const char * const es8336_analog_in_txt[] = { ++ "lin1-rin1", ++ "lin2-rin2", ++ "lin1-rin1 with 20db Boost", ++ "lin2-rin2 with 20db Boost" ++}; ++ ++static const unsigned int es8336_analog_in_values[] = { 0, 1, 2, 3 }; ++ ++static const struct soc_enum es8336_analog_input_enum = ++ SOC_VALUE_ENUM_SINGLE(ES8336_ADC_PDN_LINSEL_REG22, 4, 3, ++ ARRAY_SIZE(es8336_analog_in_txt), ++ es8336_analog_in_txt, ++ es8336_analog_in_values); ++ ++static const struct snd_kcontrol_new es8336_analog_in_mux_controls = ++ SOC_DAPM_ENUM("Route", es8336_analog_input_enum); ++ ++/* Dmic MUX */ ++static const char * const es8336_dmic_txt[] = { ++ "dmic disable", ++ "dmic data at high level", ++ "dmic data at low level", ++}; ++ ++static const unsigned int es8336_dmic_values[] = { 0, 2, 3 }; ++ ++static const struct soc_enum es8336_dmic_src_enum = ++ SOC_VALUE_ENUM_SINGLE(ES8336_ADC_DMIC_REG25, 0, 3, ++ ARRAY_SIZE(es8336_dmic_txt), ++ es8336_dmic_txt, ++ es8336_dmic_values); ++ ++static const struct snd_kcontrol_new es8336_dmic_src_controls = ++ SOC_DAPM_ENUM("Route", es8336_dmic_src_enum); ++ ++/* hp mixer mux */ ++static const char *const es8336_hpmux_texts[] = { ++ "lin1-rin1", ++ "lin2-rin2", ++ "lin-rin with Boost", ++ "lin-rin with Boost and PGA" ++}; ++ ++static const unsigned int es8336_hpmux_values[] = { 0, 1, 2, 3 }; ++ ++static const struct soc_enum es8336_left_hpmux_enum = ++ SOC_VALUE_ENUM_SINGLE(ES8336_HPMIX_SEL_REG13, 4, 7, ++ ARRAY_SIZE(es8336_hpmux_texts), ++ es8336_hpmux_texts, ++ es8336_hpmux_values); ++ ++static const struct snd_kcontrol_new es8336_left_hpmux_controls = ++ SOC_DAPM_ENUM("Route", es8336_left_hpmux_enum); ++ ++static const struct soc_enum es8336_right_hpmux_enum = ++ SOC_VALUE_ENUM_SINGLE(ES8336_HPMIX_SEL_REG13, 0, 7, ++ ARRAY_SIZE(es8336_hpmux_texts), ++ es8336_hpmux_texts, ++ es8336_hpmux_values); ++ ++static const struct snd_kcontrol_new es8336_right_hpmux_controls = ++ SOC_DAPM_ENUM("Route", es8336_right_hpmux_enum); ++ ++/* headphone Output Mixer */ ++static const struct snd_kcontrol_new es8336_out_left_mix[] = { ++ SOC_DAPM_SINGLE("LLIN Switch", ES8336_HPMIX_SWITCH_REG14, ++ 6, 1, 0), ++ SOC_DAPM_SINGLE("Left DAC Switch", ES8336_HPMIX_SWITCH_REG14, ++ 7, 1, 0), ++}; ++ ++static const struct snd_kcontrol_new es8336_out_right_mix[] = { ++ SOC_DAPM_SINGLE("RLIN Switch", ES8336_HPMIX_SWITCH_REG14, ++ 2, 1, 0), ++ SOC_DAPM_SINGLE("Right DAC Switch", ES8336_HPMIX_SWITCH_REG14, ++ 3, 1, 0), ++}; ++ ++/* DAC data source mux */ ++static const char * const es8336_dacsrc_texts[] = { ++ "LDATA TO LDAC, RDATA TO RDAC", ++ "LDATA TO LDAC, LDATA TO RDAC", ++ "RDATA TO LDAC, RDATA TO RDAC", ++ "RDATA TO LDAC, LDATA TO RDAC", ++}; ++ ++static const unsigned int es8336_dacsrc_values[] = { 0, 1, 2, 3 }; ++ ++static const struct soc_enum es8336_dacsrc_mux_enum = ++ SOC_VALUE_ENUM_SINGLE(ES8336_DAC_SET1_REG30, 6, 4, ++ ARRAY_SIZE(es8336_dacsrc_texts), ++ es8336_dacsrc_texts, ++ es8336_dacsrc_values); ++static const struct snd_kcontrol_new es8336_dacsrc_mux_controls = ++ SOC_DAPM_ENUM("Route", es8336_dacsrc_mux_enum); ++ ++static const struct snd_soc_dapm_widget es8336_dapm_widgets[] = { ++ /* Input Lines */ ++ SND_SOC_DAPM_INPUT("DMIC"), ++ SND_SOC_DAPM_INPUT("MIC1"), ++ SND_SOC_DAPM_INPUT("MIC2"), ++ ++ SND_SOC_DAPM_MICBIAS("micbias", SND_SOC_NOPM, ++ 0, 0), ++ /* Input MUX */ ++ SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0, ++ &es8336_analog_in_mux_controls), ++ ++ SND_SOC_DAPM_PGA("Line input PGA", ES8336_ADC_PDN_LINSEL_REG22, ++ 7, 1, NULL, 0), ++ ++ /* ADCs */ ++ SND_SOC_DAPM_ADC("Mono ADC", NULL, ES8336_ADC_PDN_LINSEL_REG22, 6, 1), ++ ++ /* Dmic MUX */ ++ SND_SOC_DAPM_MUX("Digital Mic Mux", SND_SOC_NOPM, 0, 0, ++ &es8336_dmic_src_controls), ++ ++ /* Digital Interface */ ++ SND_SOC_DAPM_AIF_OUT("I2S OUT", "I2S1 Capture", 1, ++ ES8336_SDP_ADCFMT_REG0A, 6, 1), ++ ++ SND_SOC_DAPM_AIF_IN("I2S IN", "I2S1 Playback", 0, ++ SND_SOC_NOPM, 0, 0), ++ ++ /* DACs DATA SRC MUX */ ++ SND_SOC_DAPM_MUX("DAC SRC Mux", SND_SOC_NOPM, 0, 0, ++ &es8336_dacsrc_mux_controls), ++ /* DACs */ ++ SND_SOC_DAPM_DAC("Right DAC", NULL, ES8336_DAC_PDN_REG2F, 0, 1), ++ SND_SOC_DAPM_DAC("Left DAC", NULL, ES8336_DAC_PDN_REG2F, 4, 1), ++ ++ /* Headphone Output Side */ ++ /* hpmux for hp mixer */ ++ SND_SOC_DAPM_MUX("Left Hp mux", SND_SOC_NOPM, 0, 0, ++ &es8336_left_hpmux_controls), ++ SND_SOC_DAPM_MUX("Right Hp mux", SND_SOC_NOPM, 0, 0, ++ &es8336_right_hpmux_controls), ++ /* Output mixer */ ++ SND_SOC_DAPM_MIXER("Left Hp mixer", ES8336_HPMIX_PDN_REG15, ++ 4, 1, &es8336_out_left_mix[0], ++ ARRAY_SIZE(es8336_out_left_mix)), ++ SND_SOC_DAPM_MIXER("Right Hp mixer", ES8336_HPMIX_PDN_REG15, ++ 0, 1, &es8336_out_right_mix[0], ++ ARRAY_SIZE(es8336_out_right_mix)), ++ SND_SOC_DAPM_MIXER("Left Hp mixer", SND_SOC_NOPM, ++ 4, 1, &es8336_out_left_mix[0], ++ ARRAY_SIZE(es8336_out_left_mix)), ++ SND_SOC_DAPM_MIXER("Right Hp mixer", SND_SOC_NOPM, ++ 0, 1, &es8336_out_right_mix[0], ++ ARRAY_SIZE(es8336_out_right_mix)), ++ ++ /* Output charge pump */ ++ SND_SOC_DAPM_PGA("HPCP L", ES8336_CPHP_OUTEN_REG17, ++ 6, 0, NULL, 0), ++ SND_SOC_DAPM_PGA("HPCP R", ES8336_CPHP_OUTEN_REG17, ++ 2, 0, NULL, 0), ++ ++ /* Output Driver */ ++ SND_SOC_DAPM_PGA("HPVOL L", ES8336_CPHP_OUTEN_REG17, ++ 5, 0, NULL, 0), ++ SND_SOC_DAPM_PGA("HPVOL R", ES8336_CPHP_OUTEN_REG17, ++ 1, 0, NULL, 0), ++ /* Output Lines */ ++ SND_SOC_DAPM_OUTPUT("HPOL"), ++ SND_SOC_DAPM_OUTPUT("HPOR"), ++}; ++ ++static const struct snd_soc_dapm_route es8336_dapm_routes[] = { ++ /* ++ * record route map ++ */ ++ {"MIC1", NULL, "micbias"}, ++ {"MIC2", NULL, "micbias"}, ++ {"DMIC", NULL, "micbias"}, ++ ++ {"Differential Mux", "lin1-rin1", "MIC1"}, ++ {"Differential Mux", "lin2-rin2", "MIC2"}, ++ {"Differential Mux", "lin1-rin1 with 20db Boost", "MIC1"}, ++ {"Differential Mux", "lin2-rin2 with 20db Boost", "MIC2"}, ++ {"Line input PGA", NULL, "Differential Mux"}, ++ ++ {"Mono ADC", NULL, "Line input PGA"}, ++ ++ {"Digital Mic Mux", "dmic disable", "Mono ADC"}, ++ {"Digital Mic Mux", "dmic data at high level", "DMIC"}, ++ {"Digital Mic Mux", "dmic data at low level", "DMIC"}, ++ ++ {"I2S OUT", NULL, "Digital Mic Mux"}, ++ /* ++ * playback route map ++ */ ++ {"DAC SRC Mux", "LDATA TO LDAC, RDATA TO RDAC", "I2S IN"}, ++ {"DAC SRC Mux", "LDATA TO LDAC, LDATA TO RDAC", "I2S IN"}, ++ {"DAC SRC Mux", "RDATA TO LDAC, RDATA TO RDAC", "I2S IN"}, ++ {"DAC SRC Mux", "RDATA TO LDAC, LDATA TO RDAC", "I2S IN"}, ++ ++ {"Left DAC", NULL, "DAC SRC Mux"}, ++ {"Right DAC", NULL, "DAC SRC Mux"}, ++ ++ {"Left Hp mux", "lin1-rin1", "MIC1"}, ++ {"Left Hp mux", "lin2-rin2", "MIC2"}, ++ {"Left Hp mux", "lin-rin with Boost", "Differential Mux"}, ++ {"Left Hp mux", "lin-rin with Boost and PGA", "Line input PGA"}, ++ ++ {"Right Hp mux", "lin1-rin1", "MIC1"}, ++ {"Right Hp mux", "lin2-rin2", "MIC2"}, ++ {"Right Hp mux", "lin-rin with Boost", "Differential Mux"}, ++ {"Right Hp mux", "lin-rin with Boost and PGA", "Line input PGA"}, ++ ++ {"Left Hp mixer", "LLIN Switch", "Left Hp mux"}, ++ {"Left Hp mixer", "Left DAC Switch", "Left DAC"}, ++ ++ {"Right Hp mixer", "RLIN Switch", "Right Hp mux"}, ++ {"Right Hp mixer", "Right DAC Switch", "Right DAC"}, ++ ++ {"HPCP L", NULL, "Left Hp mixer"}, ++ {"HPCP R", NULL, "Right Hp mixer"}, ++ ++ {"HPVOL L", NULL, "HPCP L"}, ++ {"HPVOL R", NULL, "HPCP R"}, ++ ++ {"HPOL", NULL, "HPVOL L"}, ++ {"HPOR", NULL, "HPVOL R"}, ++}; ++ ++ ++/* The set of rates we can generate from the above for each SYSCLK */ ++ ++static unsigned int rates_12288[] = { ++ 8000, 12000, 16000, 24000, 24000, 32000, 48000, 96000, ++}; ++ ++static struct snd_pcm_hw_constraint_list constraints_12288 = { ++ .count = ARRAY_SIZE(rates_12288), ++ .list = rates_12288, ++}; ++ ++static unsigned int rates_112896[] = { ++ 8000, 11025, 22050, 44100, ++}; ++ ++static struct snd_pcm_hw_constraint_list constraints_112896 = { ++ .count = ARRAY_SIZE(rates_112896), ++ .list = rates_112896, ++}; ++ ++static unsigned int rates_12[] = { ++ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, ++ 48000, 88235, 96000, ++}; ++ ++static struct snd_pcm_hw_constraint_list constraints_12 = { ++ .count = ARRAY_SIZE(rates_12), ++ .list = rates_12, ++}; ++ ++/* ++ * Note that this should be called from init rather than from hw_params. ++ */ ++static int es8336_set_dai_sysclk(struct snd_soc_dai *codec_dai, ++ int clk_id, unsigned int freq, int dir) ++{ ++ struct snd_soc_component *component = codec_dai->component; ++ struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); ++ ++ switch (freq) { ++ case 11289600: ++ case 18432000: ++ case 22579200: ++ case 36864000: ++ es8336->sysclk_constraints = &constraints_112896; ++ es8336->sysclk = freq; ++ return 0; ++ case 12288000: ++ case 19200000: ++ case 16934400: ++ case 24576000: ++ case 33868800: ++ es8336->sysclk_constraints = &constraints_12288; ++ es8336->sysclk = freq; ++ return 0; ++ case 12000000: ++ case 24000000: ++ es8336->sysclk_constraints = &constraints_12; ++ es8336->sysclk = freq; ++ return 0; ++ } ++ ++ return 0; ++} ++ ++static int es8336_set_dai_fmt(struct snd_soc_dai *codec_dai, ++ unsigned int fmt) ++{ ++ struct snd_soc_component *component = codec_dai->component; ++ u8 iface = 0; ++ u8 adciface = 0; ++ u8 daciface = 0; ++ ++ iface = snd_soc_component_read32(component, ES8336_IFACE); ++ adciface = snd_soc_component_read32(component, ES8336_ADC_IFACE); ++ daciface = snd_soc_component_read32(component, ES8336_DAC_IFACE); ++ ++ /* set master/slave audio interface */ ++ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { ++ case SND_SOC_DAIFMT_CBM_CFM: ++ iface |= 0x80; ++ break; ++ case SND_SOC_DAIFMT_CBS_CFS: ++ iface &= 0x7F; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ /* interface format */ ++ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { ++ case SND_SOC_DAIFMT_I2S: ++ adciface &= 0xFC; ++ daciface &= 0xFC; ++ break; ++ case SND_SOC_DAIFMT_RIGHT_J: ++ return -EINVAL; ++ case SND_SOC_DAIFMT_LEFT_J: ++ adciface &= 0xFC; ++ daciface &= 0xFC; ++ adciface |= 0x01; ++ daciface |= 0x01; ++ break; ++ case SND_SOC_DAIFMT_DSP_A: ++ adciface &= 0xDC; ++ daciface &= 0xDC; ++ adciface |= 0x03; ++ daciface |= 0x03; ++ break; ++ case SND_SOC_DAIFMT_DSP_B: ++ adciface &= 0xDC; ++ daciface &= 0xDC; ++ adciface |= 0x23; ++ daciface |= 0x23; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ /* clock inversion */ ++ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { ++ case SND_SOC_DAIFMT_NB_NF: ++ iface &= 0xDF; ++ adciface &= 0xDF; ++ daciface &= 0xDF; ++ break; ++ case SND_SOC_DAIFMT_IB_IF: ++ iface |= 0x20; ++ adciface |= 0x20; ++ daciface |= 0x20; ++ break; ++ case SND_SOC_DAIFMT_IB_NF: ++ iface |= 0x20; ++ adciface &= 0xDF; ++ daciface &= 0xDF; ++ break; ++ case SND_SOC_DAIFMT_NB_IF: ++ iface &= 0xDF; ++ adciface |= 0x20; ++ daciface |= 0x20; ++ break; ++ default: ++ return -EINVAL; ++ } ++ snd_soc_component_write(component, ES8336_IFACE, iface); ++ snd_soc_component_write(component, ES8336_ADC_IFACE, adciface); ++ snd_soc_component_write(component, ES8336_DAC_IFACE, daciface); ++ return 0; ++} ++ ++static int es8336_pcm_startup(struct snd_pcm_substream *substream, ++ struct snd_soc_dai *dai) ++{ ++ struct snd_soc_component *component = dai->component; ++ struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); ++ bool playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); ++ ++ snd_soc_component_write(component, ES8336_RESET_REG00, 0xC0); ++ snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x00); ++ /* es8336: both playback and capture need dac mclk */ ++ snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, ++ ES8336_CLKMGR_MCLK_DIV_MASK | ++ ES8336_CLKMGR_DAC_MCLK_MASK, ++ ES8336_CLKMGR_MCLK_DIV_NML | ++ ES8336_CLKMGR_DAC_MCLK_EN); ++ es8336->pwr_count++; ++ ++ if (playback) { ++ snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0x3F); ++ snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0x1F); ++ snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x88); ++ snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x00); ++ snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0xBB); ++ snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x10); ++ snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x30); ++ snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x02); ++ snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x00); ++ snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x66); ++ snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, ++ ES8336_CLKMGR_DAC_MCLK_MASK | ++ ES8336_CLKMGR_DAC_ANALOG_MASK, ++ ES8336_CLKMGR_DAC_MCLK_EN | ++ ES8336_CLKMGR_DAC_ANALOG_EN); ++ msleep(50); ++ } else { ++ snd_soc_component_update_bits(component, ++ ES8336_ADC_PDN_LINSEL_REG22, 0xC0, 0x20); ++ snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, ++ ES8336_CLKMGR_ADC_MCLK_MASK | ++ ES8336_CLKMGR_ADC_ANALOG_MASK, ++ ES8336_CLKMGR_ADC_MCLK_EN | ++ ES8336_CLKMGR_ADC_ANALOG_EN); ++ } ++ ++ return 0; ++} ++ ++static void es8336_pcm_shutdown(struct snd_pcm_substream *substream, ++ struct snd_soc_dai *dai) ++{ ++ struct snd_soc_component *component = dai->component; ++ struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); ++ bool playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); ++ ++ if (playback) { ++ snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x00); ++ snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); ++ snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x03); ++ snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x22); ++ snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x06); ++ snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x00); ++ snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x33); ++ snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0x00); ++ snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x00); ++ snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0xFF); ++ snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0xFF); ++ snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, ++ ES8336_CLKMGR_DAC_ANALOG_MASK, ++ ES8336_CLKMGR_DAC_ANALOG_DIS); ++ } else { ++ snd_soc_component_write(component, ES8336_ADC_PDN_LINSEL_REG22, 0xc0); ++ snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, ++ ES8336_CLKMGR_ADC_MCLK_MASK | ++ ES8336_CLKMGR_ADC_ANALOG_MASK, ++ ES8336_CLKMGR_ADC_MCLK_DIS | ++ ES8336_CLKMGR_ADC_ANALOG_DIS); ++ } ++ ++ if (--es8336->pwr_count == 0) { ++ if (!es8336->hp_inserted) ++ snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x3F); ++ snd_soc_component_write(component, ES8336_CLKMGR_CLKSW_REG01, 0xF3); ++ } ++} ++ ++ ++static int es8336_pcm_hw_params(struct snd_pcm_substream *substream, ++ struct snd_pcm_hw_params *params, ++ struct snd_soc_dai *dai) ++{ ++ struct snd_soc_component *component = dai->component; ++ int val = 0; ++ ++ switch (params_format(params)) { ++ case SNDRV_PCM_FORMAT_S16_LE: ++ val = ES8336_DACWL_16; ++ break; ++ case SNDRV_PCM_FORMAT_S20_3LE: ++ val = ES8336_DACWL_20; ++ break; ++ case SNDRV_PCM_FORMAT_S24_LE: ++ val = ES8336_DACWL_24; ++ break; ++ case SNDRV_PCM_FORMAT_S32_LE: ++ val = ES8336_DACWL_32; ++ break; ++ default: ++ val = ES8336_DACWL_16; ++ break; ++ } ++ ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ++ snd_soc_component_update_bits(component, ES8336_SDP_DACFMT_REG0B, ++ ES8336_DACWL_MASK, val); ++ else ++ snd_soc_component_update_bits(component, ES8336_SDP_ADCFMT_REG0A, ++ ES8336_ADCWL_MASK, val); ++ ++ return 0; ++} ++ ++static int es8336_mute(struct snd_soc_dai *dai, int mute) ++{ ++ struct snd_soc_component *component = dai->component; ++ struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); ++ ++ es8336->muted = mute; ++ if (mute) { ++ es8336_enable_spk(es8336, false); ++ msleep(100); ++ snd_soc_component_write(component, ES8336_DAC_SET1_REG30, 0x20); ++ } else if (dai->playback_active) { ++ snd_soc_component_write(component, ES8336_DAC_SET1_REG30, 0x00); ++ msleep(130); ++ if (!es8336->hp_inserted) ++ es8336_enable_spk(es8336, true); ++ } ++ return 0; ++} ++ ++static int es8336_set_bias_level(struct snd_soc_component *component, ++ enum snd_soc_bias_level level) ++{ ++ struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); ++ ++ switch (level) { ++ case SND_SOC_BIAS_ON: ++ break; ++ ++ case SND_SOC_BIAS_PREPARE: ++ break; ++ ++ case SND_SOC_BIAS_STANDBY: ++ break; ++ ++ case SND_SOC_BIAS_OFF: ++ snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x00); ++ snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); ++ snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x03); ++ snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x22); ++ snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x06); ++ snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x00); ++ snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x33); ++ snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0x00); ++ snd_soc_component_write(component, ES8336_ADC_PDN_LINSEL_REG22, 0xC0); ++ if (!es8336->hp_inserted) ++ snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x3F); ++ snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0x3F); ++ snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0x1F); ++ snd_soc_component_write(component, ES8336_RESET_REG00, 0x00); ++ break; ++ } ++ ++ return 0; ++} ++ ++#define es8336_RATES SNDRV_PCM_RATE_8000_96000 ++ ++#define es8336_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ ++ SNDRV_PCM_FMTBIT_S24_LE) ++ ++static const struct snd_soc_dai_ops es8336_ops = { ++ .startup = es8336_pcm_startup, ++ .hw_params = es8336_pcm_hw_params, ++ .set_fmt = es8336_set_dai_fmt, ++ .set_sysclk = es8336_set_dai_sysclk, ++ .digital_mute = es8336_mute, ++ .shutdown = es8336_pcm_shutdown, ++}; ++ ++static struct snd_soc_dai_driver es8336_dai = { ++ .name = "es8336-hifi", ++ .playback = { ++ .stream_name = "Playback", ++ .channels_min = 1, ++ .channels_max = 2, ++ .rates = es8336_RATES, ++ .formats = es8336_FORMATS, ++ }, ++ .capture = { ++ .stream_name = "Capture", ++ .channels_min = 1, ++ .channels_max = 2, ++ .rates = es8336_RATES, ++ .formats = es8336_FORMATS, ++ }, ++ .ops = &es8336_ops, ++ .symmetric_rates = 1, ++}; ++ ++static int es8336_init_regs(struct snd_soc_component *component) ++{ ++ snd_soc_component_write(component, ES8336_RESET_REG00, 0x3f); ++ usleep_range(5000, 5500); ++ snd_soc_component_write(component, ES8336_RESET_REG00, 0x00); ++ snd_soc_component_write(component, ES8336_SYS_VMIDSEL_REG0C, 0xFF); ++ msleep(30); ++ snd_soc_component_write(component, ES8336_CLKMGR_CLKSEL_REG02, 0x08); ++ snd_soc_component_write(component, ES8336_CLKMGR_ADCOSR_REG03, 0x20); ++ snd_soc_component_write(component, ES8336_CLKMGR_ADCDIV1_REG04, 0x11); ++ snd_soc_component_write(component, ES8336_CLKMGR_ADCDIV2_REG05, 0x00); ++ snd_soc_component_write(component, ES8336_CLKMGR_DACDIV1_REG06, 0x11); ++ snd_soc_component_write(component, ES8336_CLKMGR_DACDIV2_REG07, 0x00); ++ snd_soc_component_write(component, ES8336_CLKMGR_CPDIV_REG08, 0x00); ++ snd_soc_component_write(component, ES8336_SDP_MS_BCKDIV_REG09, 0x04); ++ snd_soc_component_write(component, ES8336_CLKMGR_CLKSW_REG01, 0x7F); ++ snd_soc_component_write(component, ES8336_CAL_TYPE_REG1C, 0x0F); ++ snd_soc_component_write(component, ES8336_CAL_HPLIV_REG1E, 0x90); ++ snd_soc_component_write(component, ES8336_CAL_HPRIV_REG1F, 0x90); ++ snd_soc_component_write(component, ES8336_ADC_VOLUME_REG27, 0x00); ++ snd_soc_component_write(component, ES8336_ADC_PDN_LINSEL_REG22, 0xc0); ++ snd_soc_component_write(component, ES8336_ADC_D2SEPGA_REG24, 0x00); ++ snd_soc_component_write(component, ES8336_ADC_DMIC_REG25, 0x08); ++ snd_soc_component_write(component, ES8336_DAC_SET2_REG31, 0x20); ++ snd_soc_component_write(component, ES8336_DAC_SET3_REG32, 0x00); ++ snd_soc_component_write(component, ES8336_DAC_VOLL_REG33, 0x00); ++ snd_soc_component_write(component, ES8336_DAC_VOLR_REG34, 0x00); ++ snd_soc_component_write(component, ES8336_SDP_ADCFMT_REG0A, 0x00); ++ snd_soc_component_write(component, ES8336_SDP_DACFMT_REG0B, 0x00); ++ snd_soc_component_write(component, ES8336_SYS_VMIDLOW_REG10, 0x11); ++ snd_soc_component_write(component, ES8336_SYS_VSEL_REG11, 0xFC); ++ snd_soc_component_write(component, ES8336_SYS_REF_REG12, 0x28); ++ snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0x04); ++ snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0x0C); ++ snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); ++ snd_soc_component_write(component, ES8336_HPMIX_SEL_REG13, 0x00); ++ snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x88); ++ snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x00); ++ snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0xBB); ++ snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x10); ++ snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x30); ++ snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x02); ++ snd_soc_component_write(component, ES8336_CPHP_ICAL_VOL_REG18, 0x00); ++ snd_soc_component_write(component, ES8336_GPIO_SEL_REG4D, 0x02); ++ snd_soc_component_write(component, ES8336_GPIO_DEBUNCE_INT_REG4E, 0x02); ++ snd_soc_component_write(component, ES8336_TESTMODE_REG50, 0xA0); ++ snd_soc_component_write(component, ES8336_TEST1_REG51, 0x00); ++ snd_soc_component_write(component, ES8336_TEST2_REG52, 0x00); ++ snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x00); ++ snd_soc_component_write(component, ES8336_RESET_REG00, 0xC0); ++ msleep(50); ++ snd_soc_component_write(component, ES8336_ADC_PGAGAIN_REG23, 0x60); ++ snd_soc_component_write(component, ES8336_ADC_D2SEPGA_REG24, 0x01); ++ /* adc ds mode, HPF enable */ ++ snd_soc_component_write(component, ES8336_ADC_DMIC_REG25, 0x08); ++ snd_soc_component_write(component, ES8336_ADC_ALC1_REG29, 0xcd); ++ snd_soc_component_write(component, ES8336_ADC_ALC2_REG2A, 0x08); ++ snd_soc_component_write(component, ES8336_ADC_ALC3_REG2B, 0xa0); ++ snd_soc_component_write(component, ES8336_ADC_ALC4_REG2C, 0x05); ++ snd_soc_component_write(component, ES8336_ADC_ALC5_REG2D, 0x06); ++ snd_soc_component_write(component, ES8336_ADC_ALC6_REG2E, 0x61); ++ return 0; ++} ++ ++static int es8336_suspend(struct snd_soc_component *component) ++{ ++ return 0; ++} ++ ++static int es8336_resume(struct snd_soc_component *component) ++{ ++ struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); ++ int ret; ++ ++ es8336_reset(component); /* UPDATED BY DAVID,15-3-5 */ ++ ret = snd_soc_component_read32(component, ES8336_CLKMGR_ADCDIV2_REG05); ++ if (!ret) { ++ es8336_init_regs(component); ++ snd_soc_component_write(component, ES8336_GPIO_SEL_REG4D, 0x02); ++ /* max debance time, enable interrupt, low active */ ++ snd_soc_component_write(component, ES8336_GPIO_DEBUNCE_INT_REG4E, 0xf3); ++ /* es8336_set_bias_level(component, SND_SOC_BIAS_OFF); */ ++ snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x00); ++ snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); ++ snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x03); ++ snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x22); ++ snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x06); ++ snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x00); ++ snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x33); ++ snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0x00); ++ if (!es8336->hp_inserted) ++ snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x3F); ++ snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0xFF); ++ snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0xFF); ++ snd_soc_component_write(component, ES8336_CLKMGR_CLKSW_REG01, 0xF3); ++ snd_soc_component_write(component, ES8336_ADC_PDN_LINSEL_REG22, 0xc0); ++ } ++ return 0; ++} ++ ++static irqreturn_t es8336_irq_handler(int irq, void *data) ++{ ++ struct es8336_priv *es8336 = data; ++ ++ queue_delayed_work(system_power_efficient_wq, &es8336->work, ++ msecs_to_jiffies(es8336->debounce_time)); ++ ++ return IRQ_HANDLED; ++} ++ ++static void hp_work(struct work_struct *work) ++{ ++ struct es8336_priv *es8336; ++ int enable; ++ ++ es8336 = container_of(work, struct es8336_priv, work.work); ++ enable = gpio_get_value(es8336->hp_det_gpio); ++ if (es8336->hp_det_invert) ++ enable = !enable; ++ ++ es8336->hp_inserted = enable ? true : false; ++ if (!es8336->muted) { ++ if (es8336->hp_inserted) ++ es8336_enable_spk(es8336, false); ++ else ++ es8336_enable_spk(es8336, true); ++ } ++} ++ ++static int es8336_probe(struct snd_soc_component *component) ++{ ++ struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); ++ int ret = 0; ++ ++ es8336_component = component; ++ ret = snd_soc_component_read32(component, ES8336_CLKMGR_ADCDIV2_REG05); ++ if (!ret) { ++ es8336_reset(component); /* UPDATED BY DAVID,15-3-5 */ ++ ret = snd_soc_component_read32(component, ES8336_CLKMGR_ADCDIV2_REG05); ++ if (!ret) { ++ es8336_init_regs(component); ++ snd_soc_component_write(component, ES8336_GPIO_SEL_REG4D, 0x02); ++ /* max debance time, enable interrupt, low active */ ++ snd_soc_component_write(component, ++ ES8336_GPIO_DEBUNCE_INT_REG4E, 0xf3); ++ ++ /* es8336_set_bias_level(codec, SND_SOC_BIAS_OFF); */ ++ snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x00); ++ snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); ++ snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x03); ++ snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x22); ++ snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x06); ++ snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x00); ++ snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x33); ++ snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0x00); ++ if (!es8336->hp_inserted) ++ snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, ++ 0x3F); ++ snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0xFF); ++ snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0xFF); ++ snd_soc_component_write(component, ES8336_CLKMGR_CLKSW_REG01, 0xF3); ++ snd_soc_component_write(component, ++ ES8336_ADC_PDN_LINSEL_REG22, 0xc0); ++ } ++ } ++ ++ return ret; ++} ++ ++static void es8336_remove(struct snd_soc_component *component) ++{ ++ es8336_set_bias_level(component, SND_SOC_BIAS_OFF); ++} ++ ++const struct regmap_config es8336_regmap_config = { ++ .reg_bits = 8, ++ .val_bits = 8, ++ .max_register = ES8336_TEST3_REG53, ++ .cache_type = REGCACHE_RBTREE, ++ .reg_defaults = es8336_reg_defaults, ++ .num_reg_defaults = ARRAY_SIZE(es8336_reg_defaults), ++}; ++ ++static const struct snd_soc_component_driver soc_component_dev_es8336 = { ++ .probe = es8336_probe, ++ .remove = es8336_remove, ++ .suspend = es8336_suspend, ++ .resume = es8336_resume, ++ .set_bias_level = es8336_set_bias_level, ++ ++ .controls = es8336_snd_controls, ++ .num_controls = ARRAY_SIZE(es8336_snd_controls), ++ .dapm_widgets = es8336_dapm_widgets, ++ .num_dapm_widgets = ARRAY_SIZE(es8336_dapm_widgets), ++ .dapm_routes = es8336_dapm_routes, ++ .num_dapm_routes = ARRAY_SIZE(es8336_dapm_routes), ++}; ++ ++static int es8336_i2c_probe(struct i2c_client *i2c, ++ const struct i2c_device_id *id) ++{ ++ struct es8336_priv *es8336; ++ struct gpio_desc *gpiod; ++ int ret = -1; ++ int hp_irq; ++ ++ es8336 = devm_kzalloc(&i2c->dev, sizeof(*es8336), GFP_KERNEL); ++ if (!es8336) ++ return -ENOMEM; ++ ++ es8336->debounce_time = 200; ++ es8336->hp_det_invert = 0; ++ es8336->pwr_count = 0; ++ es8336->hp_inserted = false; ++ es8336->muted = true; ++ ++ es8336->regmap = devm_regmap_init_i2c(i2c, &es8336_regmap_config); ++ if (IS_ERR(es8336->regmap)) { ++ ret = PTR_ERR(es8336->regmap); ++ dev_err(&i2c->dev, "Failed to init regmap: %d\n", ret); ++ return ret; ++ } ++ ++ i2c_set_clientdata(i2c, es8336); ++ ++ gpiod = devm_gpiod_get_index_optional(&i2c->dev, "sel", 0, ++ GPIOD_OUT_HIGH); ++ ++ if (!gpiod) { ++ dev_info(&i2c->dev, "Can not get spk_ctl_gpio\n"); ++ es8336->spk_ctl_gpio = INVALID_GPIO; ++ } else { ++ es8336->spk_ctl_gpio = desc_to_gpio(gpiod); ++ es8336->spk_active_level = 0; ++ es8336_enable_spk(es8336, false); ++ } ++ ++ gpiod = devm_gpiod_get_index_optional(&i2c->dev, "det", 0, ++ GPIOD_IN); ++ ++ if (!gpiod) { ++ dev_info(&i2c->dev, "Can not get hp_det_gpio\n"); ++ es8336->hp_det_gpio = INVALID_GPIO; ++ } else { ++ es8336->hp_det_gpio = desc_to_gpio(gpiod); ++ INIT_DELAYED_WORK(&es8336->work, hp_work); ++ es8336->hp_det_invert = 0; ++ hp_irq = gpio_to_irq(es8336->hp_det_gpio); ++ ret = devm_request_threaded_irq(&i2c->dev, hp_irq, NULL, ++ es8336_irq_handler, ++ IRQF_TRIGGER_FALLING | ++ IRQF_TRIGGER_RISING | ++ IRQF_ONESHOT, ++ "es8336_interrupt", es8336); ++ if (ret < 0) { ++ dev_err(&i2c->dev, "request_irq failed: %d\n", ret); ++ return ret; ++ } ++ ++ schedule_delayed_work(&es8336->work, ++ msecs_to_jiffies(es8336->debounce_time)); ++ } ++ ++ ret = snd_soc_register_component(&i2c->dev, ++ &soc_component_dev_es8336, ++ &es8336_dai, 1); ++ ++ return ret; ++} ++ ++static int es8336_i2c_remove(struct i2c_client *client) ++{ ++ kfree(i2c_get_clientdata(client)); ++ return 0; ++} ++ ++static void es8336_i2c_shutdown(struct i2c_client *client) ++{ ++ struct es8336_priv *es8336 = i2c_get_clientdata(client); ++ ++ if (es8336_component != NULL) { ++ es8336_enable_spk(es8336, false); ++ msleep(20); ++ es8336_set_bias_level(es8336_component, SND_SOC_BIAS_OFF); ++ } ++} ++ ++static const struct i2c_device_id es8336_i2c_id[] = { ++ {"es8336", 0}, ++ {"10ES8336:00", 0}, ++ {"10ES8336", 0}, ++ { } ++}; ++MODULE_DEVICE_TABLE(i2c, es8336_i2c_id); ++ ++static const struct of_device_id es8336_of_match[] = { ++ { .compatible = "everest,es8336", }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, es8336_of_match); ++ ++static const struct acpi_device_id es8336_acpi_match[] = { ++ { "ESSX8336", 0}, ++ { } ++}; ++MODULE_DEVICE_TABLE(acpi, es8336_acpi_match); ++ ++static struct i2c_driver es8336_i2c_driver = { ++ .driver = { ++ .name = "es8336", ++ .of_match_table = es8336_of_match, ++ .acpi_match_table = es8336_acpi_match, ++ }, ++ .probe = es8336_i2c_probe, ++ .remove = es8336_i2c_remove, ++ .shutdown = es8336_i2c_shutdown, ++ .id_table = es8336_i2c_id, ++}; ++ ++module_i2c_driver(es8336_i2c_driver); ++MODULE_DESCRIPTION("ASoC es8336 driver"); ++MODULE_LICENSE("GPL"); +diff --git a/sound/soc/codecs/es8336.h b/sound/soc/codecs/es8336.h +new file mode 100644 +index 000000000000..d2c74c11ffd1 +--- /dev/null ++++ b/sound/soc/codecs/es8336.h +@@ -0,0 +1,161 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright Everest Semiconductor Co.,Ltd ++ * Phytium Information Technology Co.,Ltd ++ * ++ * Author: David Yang ++ * Yiqun Zhang ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ */ ++ ++#ifndef _ES8336_H ++#define _ES8336_H ++ ++/* ES8336 register space */ ++/* ++ * RESET Control ++ */ ++#define ES8336_RESET_REG00 0x00 ++/* ++ * Clock Managerment ++ */ ++#define ES8336_CLKMGR_CLKSW_REG01 0x01 ++#define ES8336_CLKMGR_CLKSEL_REG02 0x02 ++#define ES8336_CLKMGR_ADCOSR_REG03 0x03 ++#define ES8336_CLKMGR_ADCDIV1_REG04 0x04 ++#define ES8336_CLKMGR_ADCDIV2_REG05 0x05 ++#define ES8336_CLKMGR_DACDIV1_REG06 0x06 ++#define ES8336_CLKMGR_DACDIV2_REG07 0x07 ++#define ES8336_CLKMGR_CPDIV_REG08 0x08 ++/* ++ * SDP Control ++ */ ++#define ES8336_SDP_MS_BCKDIV_REG09 0x09 ++#define ES8336_SDP_ADCFMT_REG0A 0x0a ++#define ES8336_SDP_DACFMT_REG0B 0x0b ++/* ++ * System Control ++ */ ++#define ES8336_SYS_VMIDSEL_REG0C 0x0c ++#define ES8336_SYS_PDN_REG0D 0x0d ++#define ES8336_SYS_LP1_REG0E 0x0e ++#define ES8336_SYS_LP2_REG0F 0x0f ++#define ES8336_SYS_VMIDLOW_REG10 0x10 ++#define ES8336_SYS_VSEL_REG11 0x11 ++#define ES8336_SYS_REF_REG12 0x12 ++/* ++ * HP Mixer ++ */ ++#define ES8336_HPMIX_SEL_REG13 0x13 ++#define ES8336_HPMIX_SWITCH_REG14 0x14 ++#define ES8336_HPMIX_PDN_REG15 0x15 ++#define ES8336_HPMIX_VOL_REG16 0x16 ++/* ++ * Charge Pump Headphone driver ++ */ ++#define ES8336_CPHP_OUTEN_REG17 0x17 ++#define ES8336_CPHP_ICAL_VOL_REG18 0x18 ++#define ES8336_CPHP_PDN1_REG19 0x19 ++#define ES8336_CPHP_PDN2_REG1A 0x1a ++#define ES8336_CPHP_LDOCTL_REG1B 0x1b ++/* ++ * Calibration ++ */ ++#define ES8336_CAL_TYPE_REG1C 0x1c ++#define ES8336_CAL_SET_REG1D 0x1d ++#define ES8336_CAL_HPLIV_REG1E 0x1e ++#define ES8336_CAL_HPRIV_REG1F 0x1f ++#define ES8336_CAL_HPLMV_REG20 0x20 ++#define ES8336_CAL_HPRMV_REG21 0x21 ++/* ++ * ADC Control ++ */ ++#define ES8336_ADC_PDN_LINSEL_REG22 0x22 ++#define ES8336_ADC_PGAGAIN_REG23 0x23 ++#define ES8336_ADC_D2SEPGA_REG24 0x24 ++#define ES8336_ADC_DMIC_REG25 0x25 ++#define ES8336_ADC_MUTE_REG26 0x26 ++#define ES8336_ADC_VOLUME_REG27 0x27 ++#define ES8336_ADC_ALC1_REG29 0x29 ++#define ES8336_ADC_ALC2_REG2A 0x2a ++#define ES8336_ADC_ALC3_REG2B 0x2b ++#define ES8336_ADC_ALC4_REG2C 0x2c ++#define ES8336_ADC_ALC5_REG2D 0x2d ++#define ES8336_ADC_ALC6_REG2E 0x2e ++/* ++ * DAC Control ++ */ ++#define ES8336_DAC_PDN_REG2F 0x2f ++#define ES8336_DAC_SET1_REG30 0x30 ++#define ES8336_DAC_SET2_REG31 0x31 ++#define ES8336_DAC_SET3_REG32 0x32 ++#define ES8336_DAC_VOLL_REG33 0x33 ++#define ES8336_DAC_VOLR_REG34 0x34 ++/* ++ * GPIO ++ */ ++#define ES8336_GPIO_SEL_REG4D 0x4D ++#define ES8336_GPIO_DEBUNCE_INT_REG4E 0x4E ++#define ES8336_GPIO_FLAG 0x4F ++/* ++ * TEST MODE ++ */ ++#define ES8336_TESTMODE_REG50 0x50 ++#define ES8336_TEST1_REG51 0x51 ++#define ES8336_TEST2_REG52 0x52 ++#define ES8336_TEST3_REG53 0x53 ++ ++#define ES8336_IFACE ES8336_SDP_MS_BCKDIV_REG09 ++#define ES8336_ADC_IFACE ES8336_SDP_ADCFMT_REG0A ++#define ES8336_DAC_IFACE ES8336_SDP_DACFMT_REG0B ++ ++#define ES8336_REGNUM 84 ++ ++/* REGISTER 0X01 CLOCK MANAGER */ ++#define ES8336_CLKMGR_MCLK_DIV_MASK (0X1<<7) ++#define ES8336_CLKMGR_MCLK_DIV_NML (0X0<<7) ++#define ES8336_CLKMGR_MCLK_DIV_1 (0X1<<7) ++#define ES8336_CLKMGR_ADC_MCLK_MASK (0X1<<3) ++#define ES8336_CLKMGR_ADC_MCLK_EN (0X1<<3) ++#define ES8336_CLKMGR_ADC_MCLK_DIS (0X0<<3) ++#define ES8336_CLKMGR_DAC_MCLK_MASK (0X1<<2) ++#define ES8336_CLKMGR_DAC_MCLK_EN (0X1<<2) ++#define ES8336_CLKMGR_DAC_MCLK_DIS (0X0<<2) ++#define ES8336_CLKMGR_ADC_ANALOG_MASK (0X1<<1) ++#define ES8336_CLKMGR_ADC_ANALOG_EN (0X1<<1) ++#define ES8336_CLKMGR_ADC_ANALOG_DIS (0X0<<1) ++#define ES8336_CLKMGR_DAC_ANALOG_MASK (0X1<<0) ++#define ES8336_CLKMGR_DAC_ANALOG_EN (0X1<<0) ++#define ES8336_CLKMGR_DAC_ANALOG_DIS (0X0<<0) ++ ++/* REGISTER 0X0A */ ++#define ES8336_ADCWL_MASK (0x7 << 2) ++#define ES8336_ADCWL_32 (0x4 << 2) ++#define ES8336_ADCWL_24 (0x0 << 2) ++#define ES8336_ADCWL_20 (0x1 << 2) ++#define ES8336_ADCWL_18 (0x2 << 2) ++#define ES8336_ADCWL_16 (0x3 << 2) ++#define ES8336_ADCFMT_MASK (0x3 << 0) ++#define ES8336_ADCFMT_I2S (0x0 << 0) ++#define ES8336_ADCWL_LEFT (0x1 << 0) ++#define ES8336_ADCWL_RIGHT (0x2 << 0) ++#define ES8336_ADCWL_PCM (0x3 << 0) ++ ++/* REGISTER 0X0B */ ++#define ES8336_DACWL_MASK (0x7 << 2) ++#define ES8336_DACWL_32 (0x4 << 2) ++#define ES8336_DACWL_24 (0x0 << 2) ++#define ES8336_DACWL_20 (0x1 << 2) ++#define ES8336_DACWL_18 (0x2 << 2) ++#define ES8336_DACWL_16 (0x3 << 2) ++#define ES8336_DACFMT_MASK (0x3 << 0) ++#define ES8336_DACFMT_I2S (0x0 << 0) ++#define ES8336_DACWL_LEFT (0x1 << 0) ++#define ES8336_DACWL_RIGHT (0x2 << 0) ++#define ES8336_DACWL_PCM (0x3 << 0) ++ ++#endif +diff --git a/sound/soc/codecs/es8388.c b/sound/soc/codecs/es8388.c +new file mode 100644 +index 000000000000..c4a4d19219b4 +--- /dev/null ++++ b/sound/soc/codecs/es8388.c +@@ -0,0 +1,819 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * es8388.c -- ES8388 ALSA SoC Audio driver ++ * ++ * Copyright 2021 Phytium Technology ++ * Author: Yiqun Zhang ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "es8388.h" ++#include ++#include ++ ++static const unsigned int rates_12288[] = { ++ 8000, 12000, 16000, 24000, 32000, 48000, 96000, ++}; ++ ++static const int ratios_12288[] = { ++ 10, 7, 6, 4, 3, 2, 0, ++}; ++ ++static const struct snd_pcm_hw_constraint_list constraints_12288 = { ++ .count = ARRAY_SIZE(rates_12288), ++ .list = rates_12288, ++}; ++ ++static const unsigned int rates_11289[] = { ++ 8018, 11025, 22050, 44100, 88200, ++}; ++ ++static const int ratios_11289[] = { ++ 9, 7, 4, 2, 0, ++}; ++ ++static const struct snd_pcm_hw_constraint_list constraints_11289 = { ++ .count = ARRAY_SIZE(rates_11289), ++ .list = rates_11289, ++}; ++ ++#define ES8388_RATES (SNDRV_PCM_RATE_192000 | \ ++ SNDRV_PCM_RATE_96000 | \ ++ SNDRV_PCM_RATE_88200 | \ ++ SNDRV_PCM_RATE_8000_48000) ++#define ES8388_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \ ++ SNDRV_PCM_FMTBIT_S18_3LE | \ ++ SNDRV_PCM_FMTBIT_S20_3LE | \ ++ SNDRV_PCM_FMTBIT_S24_LE | \ ++ SNDRV_PCM_FMTBIT_S32_LE) ++ ++struct es8388_priv { ++ struct regmap *regmap; ++ struct clk *clk; ++ int playback_fs; ++ bool deemph; ++ int mclkdiv2; ++ const struct snd_pcm_hw_constraint_list *sysclk_constraints; ++ const int *mclk_ratios; ++ bool master; ++}; ++ ++/* ++ * ES8388 Controls ++ */ ++static const char * const adcpol_txt[] = {"Normal", "L Invert", "R Invert", ++ "L + R Invert"}; ++static SOC_ENUM_SINGLE_DECL(adcpol, ++ ES8388_ADCCONTROL6, 6, adcpol_txt); ++ ++static const DECLARE_TLV_DB_SCALE(play_tlv, -3000, 100, 0); ++static const DECLARE_TLV_DB_SCALE(dac_adc_tlv, -9600, 50, 0); ++static const DECLARE_TLV_DB_SCALE(pga_tlv, 0, 300, 0); ++static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0); ++static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 300, 0); ++ ++static const struct { ++ int rate; ++ unsigned int val; ++} deemph_settings[] = { ++ { 0, ES8388_DACCONTROL6_DEEMPH_OFF }, ++ { 32000, ES8388_DACCONTROL6_DEEMPH_32k }, ++ { 44100, ES8388_DACCONTROL6_DEEMPH_44_1k }, ++ { 48000, ES8388_DACCONTROL6_DEEMPH_48k }, ++}; ++ ++static int es8388_set_deemph(struct snd_soc_component *component) ++{ ++ struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); ++ int val, i, best; ++ ++ /* ++ * If we're using deemphasis select the nearest available sample ++ * rate. ++ */ ++ if (es8388->deemph) { ++ best = 0; ++ for (i = 1; i < ARRAY_SIZE(deemph_settings); i++) { ++ if (abs(deemph_settings[i].rate - es8388->playback_fs) < ++ abs(deemph_settings[best].rate - es8388->playback_fs)) ++ best = i; ++ } ++ ++ val = deemph_settings[best].val; ++ } else { ++ val = ES8388_DACCONTROL6_DEEMPH_OFF; ++ } ++ ++ dev_dbg(component->dev, "Set deemphasis %d\n", val); ++ ++ return snd_soc_component_update_bits(component, ES8388_DACCONTROL6, ++ ES8388_DACCONTROL6_DEEMPH_MASK, val); ++} ++ ++static int es8388_get_deemph(struct snd_kcontrol *kcontrol, ++ struct snd_ctl_elem_value *ucontrol) ++{ ++ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); ++ struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); ++ ++ ucontrol->value.integer.value[0] = es8388->deemph; ++ return 0; ++} ++ ++static int es8388_put_deemph(struct snd_kcontrol *kcontrol, ++ struct snd_ctl_elem_value *ucontrol) ++{ ++ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); ++ struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); ++ unsigned int deemph = ucontrol->value.integer.value[0]; ++ int ret; ++ ++ if (deemph > 1) ++ return -EINVAL; ++ ++ ret = es8388_set_deemph(component); ++ if (ret < 0) ++ return ret; ++ ++ es8388->deemph = deemph; ++ ++ return 0; ++} ++ ++static const struct snd_kcontrol_new es8388_snd_controls[] = { ++ SOC_DOUBLE_R_TLV("Capture Digital Volume", ++ ES8388_ADCCONTROL8, ES8388_ADCCONTROL9, ++ 0, 0xc0, 1, dac_adc_tlv), ++ ++ SOC_SINGLE_BOOL_EXT("DAC Deemphasis Switch", 0, ++ es8388_get_deemph, es8388_put_deemph), ++ ++ SOC_ENUM("Capture Polarity", adcpol), ++ ++ SOC_SINGLE_TLV("Left Mixer Left Bypass Volume", ++ ES8388_DACCONTROL17, 3, 7, 1, bypass_tlv), ++ SOC_SINGLE_TLV("Left Mixer Right Bypass Volume", ++ ES8388_DACCONTROL19, 3, 7, 1, bypass_tlv), ++ SOC_SINGLE_TLV("Right Mixer Left Bypass Volume", ++ ES8388_DACCONTROL18, 3, 7, 1, bypass_tlv), ++ SOC_SINGLE_TLV("Right Mixer Right Bypass Volume", ++ ES8388_DACCONTROL20, 3, 7, 1, bypass_tlv), ++ ++ SOC_DOUBLE_R_TLV("PCM Volume", ++ ES8388_LDACVOL, ES8388_RDACVOL, ++ 0, ES8388_DACVOL_MAX, 1, dac_adc_tlv), ++ ++ SOC_DOUBLE_R_TLV("Output 1 Playback Volume", ++ ES8388_LOUT1VOL, ES8388_ROUT1VOL, ++ 0, ES8388_OUT1VOL_MAX, 0, play_tlv), ++ ++ SOC_DOUBLE_R_TLV("Output 2 Playback Volume", ++ ES8388_LOUT2VOL, ES8388_ROUT2VOL, ++ 0, ES8388_OUT2VOL_MAX, 0, play_tlv), ++ ++ SOC_DOUBLE_TLV("Mic PGA Volume", ES8388_ADCCONTROL1, ++ 4, 0, 8, 0, mic_tlv), ++}; ++ ++/* ++ * DAPM Controls ++ */ ++static const char * const es8388_line_texts[] = { ++ "Line 1", "Line 2", "PGA", "Differential"}; ++ ++static const struct soc_enum es8388_lline_enum = ++ SOC_ENUM_SINGLE(ES8388_DACCONTROL16, 3, ++ ARRAY_SIZE(es8388_line_texts), ++ es8388_line_texts); ++static const struct snd_kcontrol_new es8388_left_line_controls = ++ SOC_DAPM_ENUM("Route", es8388_lline_enum); ++ ++static const struct soc_enum es8388_rline_enum = ++ SOC_ENUM_SINGLE(ES8388_DACCONTROL16, 0, ++ ARRAY_SIZE(es8388_line_texts), ++ es8388_line_texts); ++static const struct snd_kcontrol_new es8388_right_line_controls = ++ SOC_DAPM_ENUM("Route", es8388_lline_enum); ++ ++/* Left Mixer */ ++static const struct snd_kcontrol_new es8388_left_mixer_controls[] = { ++ SOC_DAPM_SINGLE("Playback Switch", ES8388_DACCONTROL17, 7, 1, 0), ++ SOC_DAPM_SINGLE("Left Bypass Switch", ES8388_DACCONTROL17, 6, 1, 0), ++ SOC_DAPM_SINGLE("Right Playback Switch", ES8388_DACCONTROL18, 7, 1, 0), ++ SOC_DAPM_SINGLE("Right Bypass Switch", ES8388_DACCONTROL18, 6, 1, 0), ++}; ++ ++/* Right Mixer */ ++static const struct snd_kcontrol_new es8388_right_mixer_controls[] = { ++ SOC_DAPM_SINGLE("Left Playback Switch", ES8388_DACCONTROL19, 7, 1, 0), ++ SOC_DAPM_SINGLE("Left Bypass Switch", ES8388_DACCONTROL19, 6, 1, 0), ++ SOC_DAPM_SINGLE("Playback Switch", ES8388_DACCONTROL20, 7, 1, 0), ++ SOC_DAPM_SINGLE("Right Bypass Switch", ES8388_DACCONTROL20, 6, 1, 0), ++}; ++ ++static const char * const es8388_pga_sel[] = { ++ "Line 1", "Line 2", "Line 3", "Differential"}; ++ ++/* Left PGA Mux */ ++static const struct soc_enum es8388_lpga_enum = ++ SOC_ENUM_SINGLE(ES8388_ADCCONTROL2, 6, ++ ARRAY_SIZE(es8388_pga_sel), ++ es8388_pga_sel); ++static const struct snd_kcontrol_new es8388_left_pga_controls = ++ SOC_DAPM_ENUM("Route", es8388_lpga_enum); ++ ++/* Right PGA Mux */ ++static const struct soc_enum es8388_rpga_enum = ++ SOC_ENUM_SINGLE(ES8388_ADCCONTROL2, 4, ++ ARRAY_SIZE(es8388_pga_sel), ++ es8388_pga_sel); ++static const struct snd_kcontrol_new es8388_right_pga_controls = ++ SOC_DAPM_ENUM("Route", es8388_rpga_enum); ++ ++/* Differential Mux */ ++static const char * const es8388_diff_sel[] = {"Line 1", "Line 2"}; ++static SOC_ENUM_SINGLE_DECL(diffmux, ++ ES8388_ADCCONTROL3, 7, es8388_diff_sel); ++static const struct snd_kcontrol_new es8388_diffmux_controls = ++ SOC_DAPM_ENUM("Route", diffmux); ++ ++/* Mono ADC Mux */ ++static const char * const es8388_mono_mux[] = {"Stereo", "Mono (Left)", ++ "Mono (Right)", "Digital Mono"}; ++static SOC_ENUM_SINGLE_DECL(monomux, ++ ES8388_ADCCONTROL3, 3, es8388_mono_mux); ++static const struct snd_kcontrol_new es8388_monomux_controls = ++ SOC_DAPM_ENUM("Route", monomux); ++ ++static const struct snd_soc_dapm_widget es8388_dapm_widgets[] = { ++ SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0, ++ &es8388_diffmux_controls), ++ SND_SOC_DAPM_MUX("Left ADC Mux", SND_SOC_NOPM, 0, 0, ++ &es8388_monomux_controls), ++ SND_SOC_DAPM_MUX("Right ADC Mux", SND_SOC_NOPM, 0, 0, ++ &es8388_monomux_controls), ++ ++ SND_SOC_DAPM_MUX("Left PGA Mux", ES8388_ADCPOWER, ++ ES8388_ADCPOWER_AINL_OFF, 1, ++ &es8388_left_pga_controls), ++ SND_SOC_DAPM_MUX("Right PGA Mux", ES8388_ADCPOWER, ++ ES8388_ADCPOWER_AINR_OFF, 1, ++ &es8388_right_pga_controls), ++ ++ SND_SOC_DAPM_MUX("Left Line Mux", SND_SOC_NOPM, 0, 0, ++ &es8388_left_line_controls), ++ SND_SOC_DAPM_MUX("Right Line Mux", SND_SOC_NOPM, 0, 0, ++ &es8388_right_line_controls), ++ ++ SND_SOC_DAPM_ADC("Right ADC", "Right Capture", ES8388_ADCPOWER, ++ ES8388_ADCPOWER_ADCR_OFF, 1), ++ SND_SOC_DAPM_ADC("Left ADC", "Left Capture", ES8388_ADCPOWER, ++ ES8388_ADCPOWER_ADCL_OFF, 1), ++ ++ SND_SOC_DAPM_SUPPLY("DAC STM", ES8388_CHIPPOWER, ++ ES8388_CHIPPOWER_DACSTM_RESET, 1, NULL, 0), ++ SND_SOC_DAPM_SUPPLY("ADC STM", ES8388_CHIPPOWER, ++ ES8388_CHIPPOWER_ADCSTM_RESET, 1, NULL, 0), ++ ++ SND_SOC_DAPM_SUPPLY("DAC DIG", ES8388_CHIPPOWER, ++ ES8388_CHIPPOWER_DACDIG_OFF, 1, NULL, 0), ++ SND_SOC_DAPM_SUPPLY("ADC DIG", ES8388_CHIPPOWER, ++ ES8388_CHIPPOWER_ADCDIG_OFF, 1, NULL, 0), ++ ++ SND_SOC_DAPM_SUPPLY("DAC DLL", ES8388_CHIPPOWER, ++ ES8388_CHIPPOWER_DACDLL_OFF, 1, NULL, 0), ++ SND_SOC_DAPM_SUPPLY("ADC DLL", ES8388_CHIPPOWER, ++ ES8388_CHIPPOWER_ADCDLL_OFF, 1, NULL, 0), ++ ++ SND_SOC_DAPM_SUPPLY("ADC Vref", ES8388_CHIPPOWER, ++ ES8388_CHIPPOWER_ADCVREF_OFF, 1, NULL, 0), ++ SND_SOC_DAPM_SUPPLY("DAC Vref", ES8388_CHIPPOWER, ++ ES8388_CHIPPOWER_DACVREF_OFF, 1, NULL, 0), ++ ++ SND_SOC_DAPM_DAC("Right DAC", "Right Playback", ES8388_DACPOWER, ++ ES8388_DACPOWER_RDAC_OFF, 1), ++ SND_SOC_DAPM_DAC("Left DAC", "Left Playback", ES8388_DACPOWER, ++ ES8388_DACPOWER_LDAC_OFF, 1), ++ ++ SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0, ++ &es8388_left_mixer_controls[0], ++ ARRAY_SIZE(es8388_left_mixer_controls)), ++ SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0, ++ &es8388_right_mixer_controls[0], ++ ARRAY_SIZE(es8388_right_mixer_controls)), ++ ++ SND_SOC_DAPM_PGA("Right Out 2", ES8388_DACPOWER, ++ ES8388_DACPOWER_ROUT2_ON, 0, NULL, 0), ++ SND_SOC_DAPM_PGA("Left Out 2", ES8388_DACPOWER, ++ ES8388_DACPOWER_LOUT2_ON, 0, NULL, 0), ++ SND_SOC_DAPM_PGA("Right Out 1", ES8388_DACPOWER, ++ ES8388_DACPOWER_ROUT1_ON, 0, NULL, 0), ++ SND_SOC_DAPM_PGA("Left Out 1", ES8388_DACPOWER, ++ ES8388_DACPOWER_LOUT1_ON, 0, NULL, 0), ++ ++ SND_SOC_DAPM_OUTPUT("LOUT1"), ++ SND_SOC_DAPM_OUTPUT("ROUT1"), ++ SND_SOC_DAPM_OUTPUT("LOUT2"), ++ SND_SOC_DAPM_OUTPUT("ROUT2"), ++ ++ SND_SOC_DAPM_INPUT("LINPUT1"), ++ SND_SOC_DAPM_INPUT("LINPUT2"), ++ SND_SOC_DAPM_INPUT("RINPUT1"), ++ SND_SOC_DAPM_INPUT("RINPUT2"), ++}; ++ ++static const struct snd_soc_dapm_route es8388_dapm_routes[] = { ++ { "Left Line Mux", "Line 1", "LINPUT1" }, ++ { "Left Line Mux", "Line 2", "LINPUT2" }, ++ { "Left Line Mux", "PGA", "Left PGA Mux" }, ++ { "Left Line Mux", "Differential", "Differential Mux" }, ++ ++ { "Right Line Mux", "Line 1", "RINPUT1" }, ++ { "Right Line Mux", "Line 2", "RINPUT2" }, ++ { "Right Line Mux", "PGA", "Right PGA Mux" }, ++ { "Right Line Mux", "Differential", "Differential Mux" }, ++ ++ { "Left PGA Mux", "Line 1", "LINPUT1" }, ++ { "Left PGA Mux", "Line 2", "LINPUT2" }, ++ { "Left PGA Mux", "Differential", "Differential Mux" }, ++ ++ { "Right PGA Mux", "Line 1", "RINPUT1" }, ++ { "Right PGA Mux", "Line 2", "RINPUT2" }, ++ { "Right PGA Mux", "Differential", "Differential Mux" }, ++ ++ { "Differential Mux", "Line 1", "LINPUT1" }, ++ { "Differential Mux", "Line 1", "RINPUT1" }, ++ { "Differential Mux", "Line 2", "LINPUT2" }, ++ { "Differential Mux", "Line 2", "RINPUT2" }, ++ ++ { "Left ADC Mux", "Stereo", "Left PGA Mux" }, ++ { "Left ADC Mux", "Mono (Left)", "Left PGA Mux" }, ++ { "Left ADC Mux", "Digital Mono", "Left PGA Mux" }, ++ ++ { "Right ADC Mux", "Stereo", "Right PGA Mux" }, ++ { "Right ADC Mux", "Mono (Right)", "Right PGA Mux" }, ++ { "Right ADC Mux", "Digital Mono", "Right PGA Mux" }, ++ ++ { "Left ADC", NULL, "Left ADC Mux" }, ++ { "Right ADC", NULL, "Right ADC Mux" }, ++ ++ { "ADC DIG", NULL, "ADC STM" }, ++ { "ADC DIG", NULL, "ADC Vref" }, ++ { "ADC DIG", NULL, "ADC DLL" }, ++ ++ { "Left ADC", NULL, "ADC DIG" }, ++ { "Right ADC", NULL, "ADC DIG" }, ++ ++ { "Left Line Mux", "Line 1", "LINPUT1" }, ++ { "Left Line Mux", "Line 2", "LINPUT2" }, ++ { "Left Line Mux", "PGA", "Left PGA Mux" }, ++ { "Left Line Mux", "Differential", "Differential Mux" }, ++ ++ { "Right Line Mux", "Line 1", "RINPUT1" }, ++ { "Right Line Mux", "Line 2", "RINPUT2" }, ++ { "Right Line Mux", "PGA", "Right PGA Mux" }, ++ { "Right Line Mux", "Differential", "Differential Mux" }, ++ ++ { "Left Out 1", NULL, "Left DAC" }, ++ { "Right Out 1", NULL, "Right DAC" }, ++ { "Left Out 2", NULL, "Left DAC" }, ++ { "Right Out 2", NULL, "Right DAC" }, ++ ++ { "Left Mixer", "Playback Switch", "Left DAC" }, ++ { "Left Mixer", "Left Bypass Switch", "Left Line Mux" }, ++ { "Left Mixer", "Right Playback Switch", "Right DAC" }, ++ { "Left Mixer", "Right Bypass Switch", "Right Line Mux" }, ++ ++ { "Right Mixer", "Left Playback Switch", "Left DAC" }, ++ { "Right Mixer", "Left Bypass Switch", "Left Line Mux" }, ++ { "Right Mixer", "Playback Switch", "Right DAC" }, ++ { "Right Mixer", "Right Bypass Switch", "Right Line Mux" }, ++ ++ { "DAC DIG", NULL, "DAC STM" }, ++ { "DAC DIG", NULL, "DAC Vref" }, ++ { "DAC DIG", NULL, "DAC DLL" }, ++ ++ { "Left DAC", NULL, "DAC DIG" }, ++ { "Right DAC", NULL, "DAC DIG" }, ++ ++ { "Left Out 1", NULL, "Left Mixer" }, ++ { "LOUT1", NULL, "Left Out 1" }, ++ { "Right Out 1", NULL, "Right Mixer" }, ++ { "ROUT1", NULL, "Right Out 1" }, ++ ++ { "Left Out 2", NULL, "Left Mixer" }, ++ { "LOUT2", NULL, "Left Out 2" }, ++ { "Right Out 2", NULL, "Right Mixer" }, ++ { "ROUT2", NULL, "Right Out 2" }, ++}; ++ ++static int es8388_mute(struct snd_soc_dai *dai, int mute) ++{ ++ return snd_soc_component_update_bits(dai->component, ES8388_DACCONTROL3, ++ ES8388_DACCONTROL3_DACMUTE, ++ mute ? ES8388_DACCONTROL3_DACMUTE : 0); ++} ++ ++static int es8388_startup(struct snd_pcm_substream *substream, ++ struct snd_soc_dai *dai) ++{ ++ struct snd_soc_component *component = dai->component; ++ struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); ++ ++ if (es8388->master && es8388->sysclk_constraints) ++ snd_pcm_hw_constraint_list(substream->runtime, 0, ++ SNDRV_PCM_HW_PARAM_RATE, ++ es8388->sysclk_constraints); ++ ++ return 0; ++} ++ ++static int es8388_hw_params(struct snd_pcm_substream *substream, ++ struct snd_pcm_hw_params *params, ++ struct snd_soc_dai *dai) ++{ ++ struct snd_soc_component *component = dai->component; ++ struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); ++ int i; ++ int reg; ++ int wl; ++ int ratio; ++ ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ++ reg = ES8388_DACCONTROL2; ++ else ++ reg = ES8388_ADCCONTROL5; ++ ++ if (es8388->master) { ++ if (!es8388->sysclk_constraints) { ++ dev_err(component->dev, "No MCLK configured\n"); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < es8388->sysclk_constraints->count; i++) ++ if (es8388->sysclk_constraints->list[i] == ++ params_rate(params)) ++ break; ++ ++ if (i == es8388->sysclk_constraints->count) { ++ dev_err(component->dev, ++ "LRCLK %d unsupported with current clock\n", ++ params_rate(params)); ++ return -EINVAL; ++ } ++ ratio = es8388->mclk_ratios[i]; ++ } else { ++ ratio = 0; ++ es8388->mclkdiv2 = 0; ++ } ++ ++ snd_soc_component_update_bits(component, ES8388_MASTERMODE, ++ ES8388_MASTERMODE_MCLKDIV2, ++ es8388->mclkdiv2 ? ES8388_MASTERMODE_MCLKDIV2 : 0); ++ ++ switch (params_width(params)) { ++ case 16: ++ wl = 3; ++ break; ++ case 18: ++ wl = 2; ++ break; ++ case 20: ++ wl = 1; ++ break; ++ case 24: ++ wl = 0; ++ break; ++ case 32: ++ wl = 4; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ++ snd_soc_component_update_bits(component, ES8388_DACCONTROL1, ++ ES8388_DACCONTROL1_DACWL_MASK, ++ wl << ES8388_DACCONTROL1_DACWL_SHIFT); ++ ++ es8388->playback_fs = params_rate(params); ++ es8388_set_deemph(component); ++ } else ++ snd_soc_component_update_bits(component, ES8388_ADCCONTROL4, ++ ES8388_ADCCONTROL4_ADCWL_MASK, ++ wl << ES8388_ADCCONTROL4_ADCWL_SHIFT); ++ ++ return snd_soc_component_update_bits(component, reg, ES8388_RATEMASK, ratio); ++} ++ ++static int es8388_set_sysclk(struct snd_soc_dai *codec_dai, ++ int clk_id, unsigned int freq, int dir) ++{ ++ struct snd_soc_component *component = codec_dai->component; ++ struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); ++ int mclkdiv2 = 0; ++ ++ switch (freq) { ++ case 0: ++ es8388->sysclk_constraints = NULL; ++ es8388->mclk_ratios = NULL; ++ break; ++ case 22579200: ++ mclkdiv2 = 1; ++ /* fallthru */ ++ case 11289600: ++ es8388->sysclk_constraints = &constraints_11289; ++ es8388->mclk_ratios = ratios_11289; ++ break; ++ case 24576000: ++ mclkdiv2 = 1; ++ /* fallthru */ ++ case 12288000: ++ es8388->sysclk_constraints = &constraints_12288; ++ es8388->mclk_ratios = ratios_12288; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ es8388->mclkdiv2 = mclkdiv2; ++ return 0; ++} ++ ++static int es8388_set_dai_fmt(struct snd_soc_dai *codec_dai, ++ unsigned int fmt) ++{ ++ struct snd_soc_component *component = codec_dai->component; ++ struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); ++ u8 dac_mode = 0; ++ u8 adc_mode = 0; ++ ++ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { ++ case SND_SOC_DAIFMT_CBM_CFM: ++ /* Master serial port mode, with BCLK generated automatically */ ++ snd_soc_component_update_bits(component, ES8388_MASTERMODE, ++ ES8388_MASTERMODE_MSC, ++ ES8388_MASTERMODE_MSC); ++ es8388->master = true; ++ break; ++ case SND_SOC_DAIFMT_CBS_CFS: ++ /* Slave serial port mode */ ++ snd_soc_component_update_bits(component, ES8388_MASTERMODE, ++ ES8388_MASTERMODE_MSC, 0); ++ es8388->master = false; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ /* interface format */ ++ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { ++ case SND_SOC_DAIFMT_I2S: ++ dac_mode |= ES8388_DACCONTROL1_DACFORMAT_I2S; ++ adc_mode |= ES8388_ADCCONTROL4_ADCFORMAT_I2S; ++ break; ++ case SND_SOC_DAIFMT_RIGHT_J: ++ dac_mode |= ES8388_DACCONTROL1_DACFORMAT_RJUST; ++ adc_mode |= ES8388_ADCCONTROL4_ADCFORMAT_RJUST; ++ break; ++ case SND_SOC_DAIFMT_LEFT_J: ++ dac_mode |= ES8388_DACCONTROL1_DACFORMAT_LJUST; ++ adc_mode |= ES8388_ADCCONTROL4_ADCFORMAT_LJUST; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ /* clock inversion */ ++ if ((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_NB_NF) ++ return -EINVAL; ++ ++ snd_soc_component_update_bits(component, ES8388_DACCONTROL1, ++ ES8388_DACCONTROL1_DACFORMAT_MASK, dac_mode); ++ snd_soc_component_update_bits(component, ES8388_ADCCONTROL4, ++ ES8388_ADCCONTROL4_ADCFORMAT_MASK, adc_mode); ++ ++ return 0; ++} ++ ++static int es8388_set_bias_level(struct snd_soc_component *component, ++ enum snd_soc_bias_level level) ++{ ++ switch (level) { ++ case SND_SOC_BIAS_ON: ++ break; ++ ++ case SND_SOC_BIAS_PREPARE: ++ /* VREF, VMID=2x50k, digital enabled */ ++ snd_soc_component_write(component, ES8388_CHIPPOWER, 0); ++ snd_soc_component_update_bits(component, ES8388_CONTROL1, ++ ES8388_CONTROL1_VMIDSEL_MASK | ++ ES8388_CONTROL1_ENREF, ++ ES8388_CONTROL1_VMIDSEL_50k | ++ ES8388_CONTROL1_ENREF); ++ break; ++ ++ case SND_SOC_BIAS_STANDBY: ++ if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) { ++ snd_soc_component_update_bits(component, ES8388_CONTROL1, ++ ES8388_CONTROL1_VMIDSEL_MASK | ++ ES8388_CONTROL1_ENREF, ++ ES8388_CONTROL1_VMIDSEL_5k | ++ ES8388_CONTROL1_ENREF); ++ ++ /* Charge caps */ ++ msleep(100); ++ } ++ ++ snd_soc_component_write(component, ES8388_CONTROL2, ++ ES8388_CONTROL2_OVERCURRENT_ON | ++ ES8388_CONTROL2_THERMAL_SHUTDOWN_ON); ++ ++ /* VREF, VMID=2*500k, digital stopped */ ++ snd_soc_component_update_bits(component, ES8388_CONTROL1, ++ ES8388_CONTROL1_VMIDSEL_MASK | ++ ES8388_CONTROL1_ENREF, ++ ES8388_CONTROL1_VMIDSEL_500k | ++ ES8388_CONTROL1_ENREF); ++ break; ++ ++ case SND_SOC_BIAS_OFF: ++ snd_soc_component_update_bits(component, ES8388_CONTROL1, ++ ES8388_CONTROL1_VMIDSEL_MASK | ++ ES8388_CONTROL1_ENREF, ++ 0); ++ break; ++ } ++ return 0; ++} ++ ++static const struct snd_soc_dai_ops es8388_dai_ops = { ++ .startup = es8388_startup, ++ .hw_params = es8388_hw_params, ++ .digital_mute = es8388_mute, ++ .set_sysclk = es8388_set_sysclk, ++ .set_fmt = es8388_set_dai_fmt, ++}; ++ ++static struct snd_soc_dai_driver es8388_dai = { ++ .name = "es8388-hifi", ++ .playback = { ++ .stream_name = "Playback", ++ .channels_min = 2, ++ .channels_max = 2, ++ .rates = ES8388_RATES, ++ .formats = ES8388_FORMATS, ++ }, ++ .capture = { ++ .stream_name = "Capture", ++ .channels_min = 2, ++ .channels_max = 2, ++ .rates = ES8388_RATES, ++ .formats = ES8388_FORMATS, ++ }, ++ .ops = &es8388_dai_ops, ++ .symmetric_rates = 1, ++}; ++ ++static int es8388_suspend(struct snd_soc_component *component) ++{ ++ return 0; ++} ++ ++static int es8388_resume(struct snd_soc_component *component) ++{ ++ struct regmap *regmap = dev_get_regmap(component->dev, NULL); ++ struct es8388_priv *es8388; ++ int ret; ++ ++ es8388 = snd_soc_component_get_drvdata(component); ++ ++ regcache_mark_dirty(regmap); ++ ret = regcache_sync(regmap); ++ if (ret) { ++ dev_err(component->dev, "unable to sync regcache\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int es8388_component_probe(struct snd_soc_component *component) ++{ ++ snd_soc_component_write(component, ES8388_ADCPOWER, 0xf0); ++ snd_soc_component_write(component, ES8388_CONTROL1, 0x30); ++ snd_soc_component_write(component, ES8388_DACCONTROL21, 0x80); ++ snd_soc_component_write(component, ES8388_ADCCONTROL10, 0xda); ++ ++ return 0; ++} ++ ++static void es8388_remove(struct snd_soc_component *component) ++{ ++} ++ ++const struct regmap_config es8388_regmap_config = { ++ .reg_bits = 8, ++ .val_bits = 8, ++ .max_register = ES8388_REG_MAX, ++ .cache_type = REGCACHE_RBTREE, ++ .use_single_rw = true, ++}; ++EXPORT_SYMBOL_GPL(es8388_regmap_config); ++ ++static const struct snd_soc_component_driver es8388_component_driver = { ++ .probe = es8388_component_probe, ++ .remove = es8388_remove, ++ .suspend = es8388_suspend, ++ .resume = es8388_resume, ++ .set_bias_level = es8388_set_bias_level, ++ .controls = es8388_snd_controls, ++ .num_controls = ARRAY_SIZE(es8388_snd_controls), ++ .dapm_widgets = es8388_dapm_widgets, ++ .num_dapm_widgets = ARRAY_SIZE(es8388_dapm_widgets), ++ .dapm_routes = es8388_dapm_routes, ++ .num_dapm_routes = ARRAY_SIZE(es8388_dapm_routes), ++ .suspend_bias_off = 1, ++ .idle_bias_on = 1, ++ .use_pmdown_time = 1, ++ .endianness = 1, ++ .non_legacy_dai_naming = 1, ++}; ++ ++int es8388_probe(struct device *dev, struct regmap *regmap) ++{ ++ struct es8388_priv *es8388; ++ ++ if (IS_ERR(regmap)) ++ return PTR_ERR(regmap); ++ ++ es8388 = devm_kzalloc(dev, sizeof(*es8388), GFP_KERNEL); ++ if (es8388 == NULL) ++ return -ENOMEM; ++ ++ es8388->regmap = regmap; ++ ++ dev_set_drvdata(dev, es8388); ++ ++ return devm_snd_soc_register_component(dev, ++ &es8388_component_driver, &es8388_dai, 1); ++} ++EXPORT_SYMBOL_GPL(es8388_probe); ++ ++static const struct i2c_device_id es8388_id[] = { ++ { "es8388", 0 }, ++ { } ++}; ++MODULE_DEVICE_TABLE(i2c, es8388_id); ++ ++static const struct of_device_id es8388_of_match[] = { ++ { .compatible = "everest,es8388", }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, es8388_of_match); ++ ++static struct acpi_device_id es8388_acpi_match[] = { ++ {"ESSX8388", 0 }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(acpi, es8388_acpi_match); ++ ++static int es8388_i2c_probe(struct i2c_client *i2c, ++ const struct i2c_device_id *id) ++{ ++ return es8388_probe(&i2c->dev, ++ devm_regmap_init_i2c(i2c, &es8388_regmap_config)); ++} ++ ++static struct i2c_driver es8388_i2c_driver = { ++ .driver = { ++ .name = "es8388", ++ .of_match_table = es8388_of_match, ++ .acpi_match_table = es8388_acpi_match, ++ }, ++ .probe = es8388_i2c_probe, ++ .id_table = es8388_id, ++}; ++ ++module_i2c_driver(es8388_i2c_driver); ++ ++MODULE_DESCRIPTION("ASoC ES8388 driver"); ++MODULE_AUTHOR("Yiqun Zhang "); ++MODULE_LICENSE("GPL"); +diff --git a/sound/soc/codecs/es8388.h b/sound/soc/codecs/es8388.h +new file mode 100644 +index 000000000000..5858a71261fb +--- /dev/null ++++ b/sound/soc/codecs/es8388.h +@@ -0,0 +1,290 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * es8388.h -- ES8388 ALSA SoC Audio driver ++ */ ++ ++#ifndef _ES8388_H ++#define _ES8388_H ++ ++#include ++ ++struct device; ++ ++extern const struct regmap_config es8388_regmap_config; ++int es8388_probe(struct device *dev, struct regmap *regmap); ++ ++#define ES8388_DACLVOL 46 ++#define ES8388_DACRVOL 47 ++#define ES8388_DACCTL 28 ++#define ES8388_RATEMASK (0x1f << 0) ++ ++#define ES8388_CONTROL1 0x00 ++#define ES8388_CONTROL1_VMIDSEL_OFF (0 << 0) ++#define ES8388_CONTROL1_VMIDSEL_50k (1 << 0) ++#define ES8388_CONTROL1_VMIDSEL_500k (2 << 0) ++#define ES8388_CONTROL1_VMIDSEL_5k (3 << 0) ++#define ES8388_CONTROL1_VMIDSEL_MASK (3 << 0) ++#define ES8388_CONTROL1_ENREF (1 << 2) ++#define ES8388_CONTROL1_SEQEN (1 << 3) ++#define ES8388_CONTROL1_SAMEFS (1 << 4) ++#define ES8388_CONTROL1_DACMCLK_ADC (0 << 5) ++#define ES8388_CONTROL1_DACMCLK_DAC (1 << 5) ++#define ES8388_CONTROL1_LRCM (1 << 6) ++#define ES8388_CONTROL1_SCP_RESET (1 << 7) ++ ++#define ES8388_CONTROL2 0x01 ++#define ES8388_CONTROL2_VREF_BUF_OFF (1 << 0) ++#define ES8388_CONTROL2_VREF_LOWPOWER (1 << 1) ++#define ES8388_CONTROL2_IBIASGEN_OFF (1 << 2) ++#define ES8388_CONTROL2_ANALOG_OFF (1 << 3) ++#define ES8388_CONTROL2_VREF_BUF_LOWPOWER (1 << 4) ++#define ES8388_CONTROL2_VCM_MOD_LOWPOWER (1 << 5) ++#define ES8388_CONTROL2_OVERCURRENT_ON (1 << 6) ++#define ES8388_CONTROL2_THERMAL_SHUTDOWN_ON (1 << 7) ++ ++#define ES8388_CHIPPOWER 0x02 ++#define ES8388_CHIPPOWER_DACVREF_OFF 0 ++#define ES8388_CHIPPOWER_ADCVREF_OFF 1 ++#define ES8388_CHIPPOWER_DACDLL_OFF 2 ++#define ES8388_CHIPPOWER_ADCDLL_OFF 3 ++#define ES8388_CHIPPOWER_DACSTM_RESET 4 ++#define ES8388_CHIPPOWER_ADCSTM_RESET 5 ++#define ES8388_CHIPPOWER_DACDIG_OFF 6 ++#define ES8388_CHIPPOWER_ADCDIG_OFF 7 ++ ++#define ES8388_ADCPOWER 0x03 ++#define ES8388_ADCPOWER_INT1_LOWPOWER 0 ++#define ES8388_ADCPOWER_FLASH_ADC_LOWPOWER 1 ++#define ES8388_ADCPOWER_ADC_BIAS_GEN_OFF 2 ++#define ES8388_ADCPOWER_MIC_BIAS_OFF 3 ++#define ES8388_ADCPOWER_ADCR_OFF 4 ++#define ES8388_ADCPOWER_ADCL_OFF 5 ++#define ES8388_ADCPOWER_AINR_OFF 6 ++#define ES8388_ADCPOWER_AINL_OFF 7 ++ ++#define ES8388_DACPOWER 0x04 ++#define ES8388_DACPOWER_OUT3_ON 0 ++#define ES8388_DACPOWER_MONO_ON 1 ++#define ES8388_DACPOWER_ROUT2_ON 2 ++#define ES8388_DACPOWER_LOUT2_ON 3 ++#define ES8388_DACPOWER_ROUT1_ON 4 ++#define ES8388_DACPOWER_LOUT1_ON 5 ++#define ES8388_DACPOWER_RDAC_OFF 6 ++#define ES8388_DACPOWER_LDAC_OFF 7 ++ ++#define ES8388_CHIPLOPOW1 0x05 ++#define ES8388_CHIPLOPOW2 0x06 ++#define ES8388_ANAVOLMANAG 0x07 ++ ++#define ES8388_MASTERMODE 0x08 ++#define ES8388_MASTERMODE_BCLKDIV (0 << 0) ++#define ES8388_MASTERMODE_BCLK_INV (1 << 5) ++#define ES8388_MASTERMODE_MCLKDIV2 (1 << 6) ++#define ES8388_MASTERMODE_MSC (1 << 7) ++ ++#define ES8388_ADCCONTROL1 0x09 ++#define ES8388_ADCCONTROL2 0x0a ++#define ES8388_ADCCONTROL3 0x0b ++ ++#define ES8388_ADCCONTROL4 0x0c ++#define ES8388_ADCCONTROL4_ADCFORMAT_MASK (3 << 0) ++#define ES8388_ADCCONTROL4_ADCFORMAT_I2S (0 << 0) ++#define ES8388_ADCCONTROL4_ADCFORMAT_LJUST (1 << 0) ++#define ES8388_ADCCONTROL4_ADCFORMAT_RJUST (2 << 0) ++#define ES8388_ADCCONTROL4_ADCFORMAT_PCM (3 << 0) ++#define ES8388_ADCCONTROL4_ADCWL_SHIFT 2 ++#define ES8388_ADCCONTROL4_ADCWL_MASK (7 << 2) ++#define ES8388_ADCCONTROL4_ADCLRP_I2S_POL_NORMAL (0 << 5) ++#define ES8388_ADCCONTROL4_ADCLRP_I2S_POL_INV (1 << 5) ++#define ES8388_ADCCONTROL4_ADCLRP_PCM_MSB_CLK2 (0 << 5) ++#define ES8388_ADCCONTROL4_ADCLRP_PCM_MSB_CLK1 (1 << 5) ++ ++#define ES8388_ADCCONTROL5 0x0d ++#define ES8388_ADCCONTROL5_RATEMASK (0x1f << 0) ++ ++#define ES8388_ADCCONTROL6 0x0e ++ ++#define ES8388_ADCCONTROL7 0x0f ++#define ES8388_ADCCONTROL7_ADC_MUTE (1 << 2) ++#define ES8388_ADCCONTROL7_ADC_LER (1 << 3) ++#define ES8388_ADCCONTROL7_ADC_ZERO_CROSS (1 << 4) ++#define ES8388_ADCCONTROL7_ADC_SOFT_RAMP (1 << 5) ++#define ES8388_ADCCONTROL7_ADC_RAMP_RATE_4 (0 << 6) ++#define ES8388_ADCCONTROL7_ADC_RAMP_RATE_8 (1 << 6) ++#define ES8388_ADCCONTROL7_ADC_RAMP_RATE_16 (2 << 6) ++#define ES8388_ADCCONTROL7_ADC_RAMP_RATE_32 (3 << 6) ++ ++#define ES8388_ADCCONTROL8 0x10 ++#define ES8388_ADCCONTROL9 0x11 ++#define ES8388_ADCCONTROL10 0x12 ++#define ES8388_ADCCONTROL11 0x13 ++#define ES8388_ADCCONTROL12 0x14 ++#define ES8388_ADCCONTROL13 0x15 ++#define ES8388_ADCCONTROL14 0x16 ++ ++#define ES8388_DACCONTROL1 0x17 ++#define ES8388_DACCONTROL1_DACFORMAT_MASK (3 << 1) ++#define ES8388_DACCONTROL1_DACFORMAT_I2S (0 << 1) ++#define ES8388_DACCONTROL1_DACFORMAT_LJUST (1 << 1) ++#define ES8388_DACCONTROL1_DACFORMAT_RJUST (2 << 1) ++#define ES8388_DACCONTROL1_DACFORMAT_PCM (3 << 1) ++#define ES8388_DACCONTROL1_DACWL_SHIFT 3 ++#define ES8388_DACCONTROL1_DACWL_MASK (7 << 3) ++#define ES8388_DACCONTROL1_DACLRP_I2S_POL_NORMAL (0 << 6) ++#define ES8388_DACCONTROL1_DACLRP_I2S_POL_INV (1 << 6) ++#define ES8388_DACCONTROL1_DACLRP_PCM_MSB_CLK2 (0 << 6) ++#define ES8388_DACCONTROL1_DACLRP_PCM_MSB_CLK1 (1 << 6) ++#define ES8388_DACCONTROL1_LRSWAP (1 << 7) ++ ++#define ES8388_DACCONTROL2 0x18 ++#define ES8388_DACCONTROL2_RATEMASK (0x1f << 0) ++#define ES8388_DACCONTROL2_DOUBLESPEED (1 << 5) ++ ++#define ES8388_DACCONTROL3 0x19 ++#define ES8388_DACCONTROL3_AUTOMUTE (1 << 2) ++#define ES8388_DACCONTROL3_DACMUTE (1 << 2) ++#define ES8388_DACCONTROL3_LEFTGAINVOL (1 << 3) ++#define ES8388_DACCONTROL3_DACZEROCROSS (1 << 4) ++#define ES8388_DACCONTROL3_DACSOFTRAMP (1 << 5) ++#define ES8388_DACCONTROL3_DACRAMPRATE (3 << 6) ++ ++#define ES8388_LDACVOL 0x1a ++#define ES8388_LDACVOL_MASK (0 << 0) ++#define ES8388_LDACVOL_MAX (0xc0) ++ ++#define ES8388_RDACVOL 0x1b ++#define ES8388_RDACVOL_MASK (0 << 0) ++#define ES8388_RDACVOL_MAX (0xc0) ++ ++#define ES8388_DACVOL_MAX (0xc0) ++ ++#define ES8388_DACCONTROL4 0x1a ++#define ES8388_DACCONTROL5 0x1b ++ ++#define ES8388_DACCONTROL6 0x1c ++#define ES8388_DACCONTROL6_CLICKFREE (1 << 3) ++#define ES8388_DACCONTROL6_DAC_INVR (1 << 4) ++#define ES8388_DACCONTROL6_DAC_INVL (1 << 5) ++#define ES8388_DACCONTROL6_DEEMPH_MASK (3 << 6) ++#define ES8388_DACCONTROL6_DEEMPH_OFF (0 << 6) ++#define ES8388_DACCONTROL6_DEEMPH_32k (1 << 6) ++#define ES8388_DACCONTROL6_DEEMPH_44_1k (2 << 6) ++#define ES8388_DACCONTROL6_DEEMPH_48k (3 << 6) ++ ++#define ES8388_DACCONTROL7 0x1d ++#define ES8388_DACCONTROL7_VPP_SCALE_3p5 (0 << 0) ++#define ES8388_DACCONTROL7_VPP_SCALE_4p0 (1 << 0) ++#define ES8388_DACCONTROL7_VPP_SCALE_3p0 (2 << 0) ++#define ES8388_DACCONTROL7_VPP_SCALE_2p5 (3 << 0) ++#define ES8388_DACCONTROL7_SHELVING_STRENGTH (1 << 2) /* In eights */ ++#define ES8388_DACCONTROL7_MONO (1 << 5) ++#define ES8388_DACCONTROL7_ZEROR (1 << 6) ++#define ES8388_DACCONTROL7_ZEROL (1 << 7) ++ ++/* Shelving filter */ ++#define ES8388_DACCONTROL8 0x1e ++#define ES8388_DACCONTROL9 0x1f ++#define ES8388_DACCONTROL10 0x20 ++#define ES8388_DACCONTROL11 0x21 ++#define ES8388_DACCONTROL12 0x22 ++#define ES8388_DACCONTROL13 0x23 ++#define ES8388_DACCONTROL14 0x24 ++#define ES8388_DACCONTROL15 0x25 ++ ++#define ES8388_DACCONTROL16 0x26 ++#define ES8388_DACCONTROL16_RMIXSEL_RIN1 (0 << 0) ++#define ES8388_DACCONTROL16_RMIXSEL_RIN2 (1 << 0) ++#define ES8388_DACCONTROL16_RMIXSEL_RIN3 (2 << 0) ++#define ES8388_DACCONTROL16_RMIXSEL_RADC (3 << 0) ++#define ES8388_DACCONTROL16_LMIXSEL_LIN1 (0 << 3) ++#define ES8388_DACCONTROL16_LMIXSEL_LIN2 (1 << 3) ++#define ES8388_DACCONTROL16_LMIXSEL_LIN3 (2 << 3) ++#define ES8388_DACCONTROL16_LMIXSEL_LADC (3 << 3) ++ ++#define ES8388_DACCONTROL17 0x27 ++#define ES8388_DACCONTROL17_LI2LOVOL (7 << 3) ++#define ES8388_DACCONTROL17_LI2LO (1 << 6) ++#define ES8388_DACCONTROL17_LD2LO (1 << 7) ++ ++#define ES8388_DACCONTROL18 0x28 ++#define ES8388_DACCONTROL18_RI2LOVOL (7 << 3) ++#define ES8388_DACCONTROL18_RI2LO (1 << 6) ++#define ES8388_DACCONTROL18_RD2LO (1 << 7) ++ ++#define ES8388_DACCONTROL19 0x29 ++#define ES8388_DACCONTROL19_LI2ROVOL (7 << 3) ++#define ES8388_DACCONTROL19_LI2RO (1 << 6) ++#define ES8388_DACCONTROL19_LD2RO (1 << 7) ++ ++#define ES8388_DACCONTROL20 0x2a ++#define ES8388_DACCONTROL20_RI2ROVOL (7 << 3) ++#define ES8388_DACCONTROL20_RI2RO (1 << 6) ++#define ES8388_DACCONTROL20_RD2RO (1 << 7) ++ ++#define ES8388_DACCONTROL21 0x2b ++#define ES8388_DACCONTROL21_LI2MOVOL (7 << 3) ++#define ES8388_DACCONTROL21_LI2MO (1 << 6) ++#define ES8388_DACCONTROL21_LD2MO (1 << 7) ++ ++#define ES8388_DACCONTROL22 0x2c ++#define ES8388_DACCONTROL22_RI2MOVOL (7 << 3) ++#define ES8388_DACCONTROL22_RI2MO (1 << 6) ++#define ES8388_DACCONTROL22_RD2MO (1 << 7) ++ ++#define ES8388_DACCONTROL23 0x2d ++#define ES8388_DACCONTROL23_MOUTINV (1 << 1) ++#define ES8388_DACCONTROL23_HPSWPOL (1 << 2) ++#define ES8388_DACCONTROL23_HPSWEN (1 << 3) ++#define ES8388_DACCONTROL23_VROI_1p5k (0 << 4) ++#define ES8388_DACCONTROL23_VROI_40k (1 << 4) ++#define ES8388_DACCONTROL23_OUT3_VREF (0 << 5) ++#define ES8388_DACCONTROL23_OUT3_ROUT1 (1 << 5) ++#define ES8388_DACCONTROL23_OUT3_MONOOUT (2 << 5) ++#define ES8388_DACCONTROL23_OUT3_RIGHT_MIXER (3 << 5) ++#define ES8388_DACCONTROL23_ROUT2INV (1 << 7) ++ ++/* LOUT1 Amplifier */ ++#define ES8388_LOUT1VOL 0x2e ++#define ES8388_LOUT1VOL_MASK (0 << 5) ++#define ES8388_LOUT1VOL_MAX (0x24) ++ ++/* ROUT1 Amplifier */ ++#define ES8388_ROUT1VOL 0x2f ++#define ES8388_ROUT1VOL_MASK (0 << 5) ++#define ES8388_ROUT1VOL_MAX (0x24) ++ ++#define ES8388_OUT1VOL_MAX (0x24) ++ ++/* LOUT2 Amplifier */ ++#define ES8388_LOUT2VOL 0x30 ++#define ES8388_LOUT2VOL_MASK (0 << 5) ++#define ES8388_LOUT2VOL_MAX (0x24) ++ ++/* ROUT2 Amplifier */ ++#define ES8388_ROUT2VOL 0x31 ++#define ES8388_ROUT2VOL_MASK (0 << 5) ++#define ES8388_ROUT2VOL_MAX (0x24) ++ ++#define ES8388_OUT2VOL_MAX (0x24) ++ ++/* Mono Out Amplifier */ ++#define ES8388_MONOOUTVOL 0x32 ++#define ES8388_MONOOUTVOL_MASK (0 << 5) ++#define ES8388_MONOOUTVOL_MAX (0x24) ++ ++#define ES8388_DACCONTROL29 0x33 ++#define ES8388_DACCONTROL30 0x34 ++ ++#define ES8388_SYSCLK 0 ++ ++#define ES8388_REG_MAX 0x35 ++ ++#define ES8388_1536FS 1536 ++#define ES8388_1024FS 1024 ++#define ES8388_768FS 768 ++#define ES8388_512FS 512 ++#define ES8388_384FS 384 ++#define ES8388_256FS 256 ++#define ES8388_128FS 128 ++ ++#endif +diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c +index d00734d31e04..9af31378077d 100644 +--- a/sound/soc/codecs/hdmi-codec.c ++++ b/sound/soc/codecs/hdmi-codec.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -285,6 +286,8 @@ struct hdmi_codec_priv { + uint8_t eld[MAX_ELD_BYTES]; + struct snd_pcm_chmap *chmap_info; + unsigned int chmap_idx; ++ struct snd_soc_jack *jack; ++ unsigned int jack_status; + }; + + static const struct snd_soc_dapm_widget hdmi_widgets[] = { +@@ -698,6 +701,44 @@ static int hdmi_dai_probe(struct snd_soc_dai *dai) + return snd_soc_dapm_add_routes(dapm, &route, 1); + } + ++static void hdmi_codec_jack_report(struct hdmi_codec_priv *hcp, ++ unsigned int jack_status) ++{ ++ if (hcp->jack && jack_status != hcp->jack_status) { ++ snd_soc_jack_report(hcp->jack, jack_status, SND_JACK_LINEOUT); ++ hcp->jack_status = jack_status; ++ } ++} ++ ++static void plugged_cb(struct device *dev, bool plugged) ++{ ++ struct hdmi_codec_priv *hcp = dev_get_drvdata(dev); ++ ++ if (plugged) ++ hdmi_codec_jack_report(hcp, SND_JACK_LINEOUT); ++ else ++ hdmi_codec_jack_report(hcp, 0); ++} ++ ++static int hdmi_codec_set_jack(struct snd_soc_component *component, ++ struct snd_soc_jack *jack, ++ void *data) ++{ ++ struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component); ++ int ret = -EOPNOTSUPP; ++ ++ if (hcp->hcd.ops->hook_plugged_cb) { ++ hcp->jack = jack; ++ ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent, ++ hcp->hcd.data, ++ plugged_cb, ++ component->dev); ++ if (ret) ++ hcp->jack = NULL; ++ } ++ return ret; ++} ++ + static const struct snd_soc_dai_driver hdmi_i2s_dai = { + .name = "i2s-hifi", + .id = DAI_ID_I2S, +@@ -749,6 +790,7 @@ static const struct snd_soc_component_driver hdmi_driver = { + .use_pmdown_time = 1, + .endianness = 1, + .non_legacy_dai_naming = 1, ++ .set_jack = hdmi_codec_set_jack, + }; + + static int hdmi_codec_probe(struct platform_device *pdev) +diff --git a/sound/soc/phytium/Kconfig b/sound/soc/phytium/Kconfig +new file mode 100644 +index 000000000000..4f9f12ce26b1 +--- /dev/null ++++ b/sound/soc/phytium/Kconfig +@@ -0,0 +1,31 @@ ++config SND_SOC_PHYTIUM_I2S ++ bool "Phytium I2S Device Driver" ++ depends on ARCH_PHYTIUM ++ help ++ Say Y or M if you want to add support for I2S driver for ++ Phytium I2S device . The device supports 2 channels each ++ for play and record. ++ ++config SND_PMDK_ES8388 ++ tristate "Phytium X100 machine support with ES8388" ++ depends on I2C && SND_SOC_PHYTIUM_I2S ++ select SND_SOC_ES8388 ++ help ++ Say Y if you want to add Phytium machine support for ++ ES8388 codecs. ++ ++config SND_PMDK_ES8336 ++ tristate "Phytium X100 machine support with ES8336" ++ depends on I2C && SND_SOC_PHYTIUM_I2S ++ select SND_SOC_ES8336 ++ help ++ Say Y if you want to add Phytium machine support for ++ ES8336 codecs. ++ ++config SND_PMDK_DP ++ tristate "Phytium machine support with X100 DP" ++ depends on I2C && SND_SOC_PHYTIUM_I2S ++ select SND_SOC_HDMI_CODEC ++ help ++ Say Y if you want to add Phytium machine support for ++ Displayport on X100. +diff --git a/sound/soc/phytium/Makefile b/sound/soc/phytium/Makefile +new file mode 100644 +index 000000000000..db3c0659e844 +--- /dev/null ++++ b/sound/soc/phytium/Makefile +@@ -0,0 +1,13 @@ ++# PHYTIUM Platform Support ++ ++snd-soc-phytium-i2s-objs :=phytium_i2s.o ++obj-$(CONFIG_SND_SOC_PHYTIUM_I2S) += snd-soc-phytium-i2s.o ++ ++snd-soc-pmdk-es8388-objs :=pmdk_es8388.o ++obj-$(CONFIG_SND_PMDK_ES8388) += snd-soc-pmdk-es8388.o ++ ++snd-soc-pmdk-es8336-objs :=pmdk_es8336.o ++obj-$(CONFIG_SND_PMDK_ES8336) += snd-soc-pmdk-es8336.o ++ ++snd-soc-pmdk-dp-objs :=pmdk_dp.o ++obj-$(CONFIG_SND_PMDK_DP) += snd-soc-pmdk-dp.o +diff --git a/sound/soc/phytium/local.h b/sound/soc/phytium/local.h +new file mode 100644 +index 000000000000..43e989c36fd8 +--- /dev/null ++++ b/sound/soc/phytium/local.h +@@ -0,0 +1,326 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2020-2021, Phytium Technology Co.,Ltd. ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#ifndef __PHYTIUM_I2S_LOCAL_H ++#define __PHYTIUM_I2S_LOCAL_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* I2S clk setting*/ ++#define CLK_CFG0 0xc00 ++#define CLK_CFG1 0xc04 ++ ++/* common register for all channel */ ++#define I2S_IER 0x000 ++#define IRER 0x004 ++#define ITER 0x008 ++#define CER 0x00C ++ ++#define RXFFR 0x014 ++#define TXFFR 0x018 ++ ++/* Interrupt status register fields */ ++#define ISR_TXFO BIT(5) ++#define ISR_TXFE BIT(4) ++#define ISR_RXFO BIT(1) ++#define ISR_RXDA BIT(0) ++ ++/* I2STxRxRegisters for all channels */ ++#define LRBR_LTHR(x) (0x40 * x + 0x020) ++#define RRBR_RTHR(x) (0x40 * x + 0x024) ++#define RER(x) (0x40 * x + 0x028) ++ ++#define RCR(x) (0x40 * x + 0x030) ++ ++#define ISR(x) (0x40 * x + 0x038) ++#define IMR(x) (0x40 * x + 0x03C) ++#define ROR(x) (0x40 * x + 0x040) ++#define TOR(x) (0x40 * x + 0x044) ++#define RFCR(x) (0x40 * x + 0x048) ++#define TFCR(x) (0x40 * x + 0x04C) ++#define RFF(x) (0x40 * x + 0x050) ++#define TFF(x) (0x40 * x + 0x054) ++ ++/*enable txd and rxd block channel0~3 */ ++#define TER(x) (0x40 * x + 0x02C) ++#define CCR 0x010 ++#define TCR(x) (0x40 * x + 0x034) ++ ++ ++/* I2SCOMPRegisters */ ++#define I2S_COMP_PARAM_2 0x01F0 ++#define I2S_COMP_PARAM_1 0x01F4 ++#define I2S_COMP_VERSION 0x01F8 ++#define I2S_COMP_TYPE 0x01FC ++ ++/***I2S AND DMA***/ ++ ++#define DMA_GCAP 0x0024 ++ ++#define DMA_CHAL_CONFG1 0x0028 ++ ++#define DMA_CHAL_CONFG0 0x0004 ++#define DMA_MASK_INT 0x000c ++#define DMA_BDLPU(x) (0x40 * x + 0x0040) ++#define DMA_BDLPL(x) (0x40 * x + 0x0044) ++#define DMA_CHALX_DEV_ADDR(x) (0x40 * x + 0x0048) ++#define DMA_CHALX_CBL(x) (0x40 * x + 0x0054) ++#define DMA_CHALX_LVI(x) (0x40 * x + 0x004c) ++ ++#define DMA_CHALX_DSIZE(x) (0x40 * x + 0x0064) ++#define DMA_CHALX_DLENTH(x) (0x40 * x + 0x0068) ++#define DMA_CHALX_CTL(x) (0x40 * x + 0x0058) ++ ++ ++#define DMA_CTL 0x0000 ++ ++#define DMA_LPIB(x) (0x40 * x + 0x0050) ++ ++#define DMA_STS 0x0008 ++ ++/****************/ ++ ++ ++/* max number of fragments - we may use more if allocating more pages for BDL */ ++#define BDL_SIZE 4096 ++#define AZX_MAX_BDL_ENTRIES (BDL_SIZE / 16) ++ ++/* ++ * Component parameter register fields - define the I2S block's ++ * configuration. ++ */ ++#define COMP1_TX_WORDSIZE_3(r) (((r) & GENMASK(27, 25)) >> 25) ++#define COMP1_TX_WORDSIZE_2(r) (((r) & GENMASK(24, 22)) >> 22) ++#define COMP1_TX_WORDSIZE_1(r) (((r) & GENMASK(21, 19)) >> 19) ++#define COMP1_TX_WORDSIZE_0(r) (((r) & GENMASK(18, 16)) >> 16) ++#define COMP1_TX_CHANNELS(r) (((r) & GENMASK(10, 9)) >> 9) ++#define COMP1_RX_CHANNELS(r) (((r) & GENMASK(8, 7)) >> 7) ++#define COMP1_RX_ENABLED(r) (((r) & BIT(6)) >> 6) ++#define COMP1_TX_ENABLED(r) (((r) & BIT(5)) >> 5) ++#define COMP1_MODE_EN(r) (((r) & BIT(4)) >> 4) ++#define COMP1_FIFO_DEPTH_GLOBAL(r) (((r) & GENMASK(3, 2)) >> 2) ++#define COMP1_APB_DATA_WIDTH(r) (((r) & GENMASK(1, 0)) >> 0) ++ ++#define COMP2_RX_WORDSIZE_3(r) (((r) & GENMASK(12, 10)) >> 10) ++#define COMP2_RX_WORDSIZE_2(r) (((r) & GENMASK(9, 7)) >> 7) ++#define COMP2_RX_WORDSIZE_1(r) (((r) & GENMASK(5, 3)) >> 3) ++#define COMP2_RX_WORDSIZE_0(r) (((r) & GENMASK(2, 0)) >> 0) ++ ++/* Number of entries in WORDSIZE and DATA_WIDTH parameter registers */ ++#define COMP_MAX_WORDSIZE (1 << 3) ++#define COMP_MAX_DATA_WIDTH (1 << 2) ++ ++#define MAX_CHANNEL_NUM 8 ++#define MIN_CHANNEL_NUM 2 ++ ++#define azx_bus(chip) (&(chip)->bus.core) ++#define bus_to_azx(_bus) container_of(_bus, struct azx, bus.core) ++ ++#define I2S_UNSOL_QUEUE_SIZE 64 ++#define I2S_MAX_CODECS 8 /* limit by controller side */ ++ ++ ++#define azx_stream(dev) (&(dev)->core) ++ ++struct i2sc_bus { ++ struct device *dev; ++ const struct i2s_bus_ops *ops; ++ const struct i2s_io_ops *io_ops; ++ const struct i2s_ext_bus_ops *ext_ops; ++ ++ /* h/w resources */ ++ unsigned long addr; ++ void __iomem *remap_addr; ++ int irq; ++ ++ /* codec linked list */ ++ struct list_head codec_list; ++ unsigned int num_codecs; ++ ++ unsigned int unsol_rp, unsol_wp; ++ struct work_struct unsol_work; ++ ++ struct snd_dma_buffer bdl0; ++ struct snd_dma_buffer bdl1; ++ ++ /* i2s_stream linked list */ ++ struct list_head stream_list; ++ ++ bool reverse_assign; /* assign devices in reverse order */ ++ ++ int bdl_pos_adj; /* BDL position adjustment */ ++ ++ /* locks */ ++ spinlock_t reg_lock; ++}; ++ ++struct i2s_bus { ++ struct i2sc_bus core; ++ ++ struct snd_card *card; ++ ++ struct pci_dev *pci; ++ ++ struct mutex prepare_mutex; ++}; ++ ++ ++/* ++ * i2s stream ++ */ ++struct i2s_stream { ++ struct i2sc_bus *bus; ++ struct snd_dma_buffer bdl; /* BDL buffer */ ++ __le32 *posbuf; /* position buffer pointer */ ++ int direction; /* playback / capture (SNDRV_PCM_STREAM_*) */ ++ ++ unsigned int bufsize; /* size of the play buffer in bytes */ ++ unsigned int period_bytes; /* size of the period in bytes */ ++ unsigned int frags; /* number for period in the play buffer */ ++ unsigned int fifo_size; /* FIFO size */ ++ ++ void __iomem *sd_addr; /* stream descriptor pointer */ ++ ++ u32 sd_int_sta_mask; /* stream int status mask */ ++ ++ /* pcm support */ ++ struct snd_pcm_substream *substream; /* assigned substream, ++ * set in PCM open ++ */ ++ unsigned int format_val; /* format value to be set in the ++ * controller and the codec ++ */ ++ unsigned char stream_tag; /* assigned stream */ ++ unsigned char index; /* stream index */ ++ int assigned_key; /* last device# key assigned to */ ++ ++ bool opened; ++ bool running; ++ bool prepared; ++ bool no_period_wakeup; ++ ++ int delay_negative_threshold; ++ ++ struct list_head list; ++ ++}; ++ ++ ++struct azx_dev { ++ struct i2s_stream core; ++ unsigned int irq_pending:1; ++}; ++ ++ ++ ++/* PCM setup */ ++static inline struct azx_dev *get_azx_dev(struct snd_pcm_substream *substream) ++{ ++ return substream->runtime->private_data; ++} ++ ++ ++#define AZX_MAX_CODECS HDA_MAX_CODECS ++#define AZX_DEFAULT_CODECS 4 ++ ++#define stream_to_azx_dev(s) container_of(s, struct azx_dev, core) ++ ++struct azx; ++ ++struct i2s_controller_ops { ++ int (*substream_alloc_pages)(struct azx *chip, ++ struct snd_pcm_substream *substream, ++ size_t size); ++ int (*substream_free_pages)(struct azx *chip, ++ struct snd_pcm_substream *substream); ++ int (*position_check)(struct azx *chip, struct azx_dev *azx_dev); ++}; ++ ++struct i2s_io_ops { ++ int (*dma_alloc_pages)(struct i2sc_bus *bus, int type, size_t size, ++ struct snd_dma_buffer *buf); ++ void (*dma_free_pages)(struct i2sc_bus *bus, ++ struct snd_dma_buffer *buf); ++}; ++ ++struct azx { ++ struct i2s_bus bus; ++ ++ struct snd_card *card; ++ struct pci_dev *pci; ++ int dev_index; ++ ++ int playback_streams; ++ int playback_index_offset; ++ int capture_streams; ++ int capture_index_offset; ++ int num_streams; ++ ++ /* Register interaction. */ ++ const struct i2s_controller_ops *ops; ++ ++ /* locks */ ++ struct mutex open_mutex; /* Prevents concurrent open/close operations */ ++ ++ /* PCM */ ++ struct list_head pcm_list; /* azx_pcm list */ ++ ++ /* flags */ ++ int bdl_pos_adj; ++ unsigned int running:1; ++ unsigned int region_requested:1; ++ unsigned int disabled:1; ++}; ++struct i2s_phytium { ++ struct azx chip; ++ struct snd_pcm_substream *substream; ++ struct device *dev; ++ struct device *pdev; ++ void __iomem *regs; ++ void __iomem *regs_db; ++ int irq_id; ++ ++ /* for pending irqs */ ++ struct work_struct irq_pending_work; ++ ++ /* sync probing */ ++ struct completion probe_wait; ++ struct work_struct probe_work; ++ ++ /* extra flags */ ++ unsigned int irq_pending_warned:1; ++ unsigned int probe_continued:1; ++ unsigned int i2s_dp:1; ++ ++ unsigned int i2s_reg_comp1; ++ unsigned int i2s_reg_comp2; ++ struct clk *clk; ++ unsigned int capability; ++ unsigned int quirks; ++ u32 fifo_th; ++ int active; ++ u32 xfer_resolution; ++ u32 ccr; ++ u32 clk_base; ++ ++ struct i2s_clk_config_data config; ++ ++ /*azx_dev*/ ++ struct i2s_stream core; ++}; ++ ++#define azx_alloc_stream_pages(chip) \ ++ snd_i2s_bus_alloc_stream_pages(azx_bus(chip)) ++ ++#endif +diff --git a/sound/soc/phytium/phytium_i2s.c b/sound/soc/phytium/phytium_i2s.c +new file mode 100644 +index 000000000000..9c6ab16d83f7 +--- /dev/null ++++ b/sound/soc/phytium/phytium_i2s.c +@@ -0,0 +1,1345 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Phytium I2S ASoc driver ++ * ++ * Copyright (C) 2020-2021, Phytium Technology Co.,Ltd. ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "local.h" ++ ++/* FT have 1 playback and 1 capture */ ++#define FT4C_NUM_CAPTURE 1 ++#define FT4C_NUM_PLAYBACK 1 ++ ++struct pdata_x100_mfd { ++ struct device *dev; ++ char *name; ++ int clk_base; ++}; ++ ++static inline void i2s_write_reg(void __iomem *io_base, int reg, u32 val) ++{ ++ writel(val, io_base + reg); ++} ++ ++static inline u32 i2s_read_reg(void __iomem *io_base, int reg) ++{ ++ return readl(io_base + reg); ++} ++ ++static inline void i2s_disable_channels(struct i2s_phytium *dev, u32 stream) ++{ ++ u32 i = 0; ++ ++ if (stream == SNDRV_PCM_STREAM_PLAYBACK) { ++ for (i = 0; i < 4; i++) ++ i2s_write_reg(dev->regs, TER(i), 0); ++ } else { ++ for (i = 0; i < 4; i++) ++ i2s_write_reg(dev->regs, RER(i), 0); ++ } ++} ++ ++static int substream_free_pages(struct azx *chip, ++ struct snd_pcm_substream *substream) ++{ ++ return snd_pcm_lib_free_pages(substream); ++} ++ ++static void stream_update(struct i2sc_bus *bus, struct i2s_stream *s) ++{ ++ struct azx *chip = bus_to_azx(bus); ++ ++ struct azx_dev *azx_dev = stream_to_azx_dev(s); ++ ++ /* check whether this IRQ is really acceptable */ ++ if (!chip->ops->position_check || ++ chip->ops->position_check(chip, azx_dev)) { ++ spin_unlock(&bus->reg_lock); ++ snd_pcm_period_elapsed(azx_stream(azx_dev)->substream); ++ spin_lock(&bus->reg_lock); ++ } ++ ++} ++ ++int snd_i2s_bus_handle_stream_irq(struct i2sc_bus *bus, unsigned int status, ++ void (*ack)(struct i2sc_bus *, ++ struct i2s_stream *)) ++{ ++ struct i2s_stream *azx_dev; ++ u32 sd_status, qc_sd_status; ++ int handled = 0; ++ ++ list_for_each_entry(azx_dev, &bus->stream_list, list) { ++ ++ if (status & azx_dev->sd_int_sta_mask) { ++ sd_status = i2s_read_reg(azx_dev->sd_addr, DMA_STS); ++ i2s_write_reg(azx_dev->sd_addr, DMA_STS, azx_dev->sd_int_sta_mask); ++ qc_sd_status = i2s_read_reg(azx_dev->sd_addr, DMA_STS); ++ handled |= 1 << azx_dev->index; ++ azx_dev->running = 1; ++ if (!azx_dev->substream || !azx_dev->running || ++ !(sd_status & 0xffffffff)) { ++ continue; ++ } ++ if (ack) ++ ack(bus, azx_dev); ++ } ++ } ++ return handled; ++} ++ ++irqreturn_t azx_i2s_interrupt(int irq, void *dev_id) ++{ ++ struct azx *chip = dev_id; ++ struct i2sc_bus *bus = azx_bus(chip); ++ u32 status; ++ bool active, handled = false; ++ int repeat = 0; /* count for avoiding endless loop */ ++ ++ spin_lock(&bus->reg_lock); ++ ++ if (chip->disabled) ++ goto unlock; ++ ++ do { ++ ++ status = i2s_read_reg(bus->remap_addr, DMA_STS); ++ ++ if (status == 0) ++ break; ++ ++ handled = true; ++ active = false; ++ if (snd_i2s_bus_handle_stream_irq(bus, status, stream_update)) ++ active = true; ++ ++ ++ } while (active && ++repeat < 1); ++ ++ unlock: ++ spin_unlock(&bus->reg_lock); ++ ++ return IRQ_RETVAL(handled); ++} ++ ++static int azx_acquire_irq(struct azx *chip, int do_disconnect) ++{ ++ struct i2sc_bus *bus = azx_bus(chip); ++ struct i2s_phytium *i2s = container_of(chip, struct i2s_phytium, chip); ++ int err; ++ ++ err = devm_request_irq(i2s->dev, i2s->irq_id, azx_i2s_interrupt, IRQF_SHARED, ++ "phytium i2s", chip); ++ ++ if (err < 0) { ++ dev_err(i2s->dev, "failed to request irq\n"); ++ return err; ++ } ++ ++ bus->irq = i2s->irq_id; ++ ++ return 0; ++} ++ ++static void i2s_start(struct i2s_phytium *dev, ++ struct snd_pcm_substream *substream) ++{ ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ++ i2s_write_reg(dev->regs, ITER, 1); ++ else ++ i2s_write_reg(dev->regs, IRER, 1); ++ ++ /*enable the clock*/ ++ i2s_write_reg(dev->regs, CER, 1); ++ ++ /*enable the i2s*/ ++ i2s_write_reg(dev->regs, I2S_IER, 1); ++} ++ ++static void i2s_stop(struct i2s_phytium *dev, ++ struct snd_pcm_substream *substream) ++{ ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ++ i2s_write_reg(dev->regs, ITER, 0); ++ else ++ i2s_write_reg(dev->regs, IRER, 0); ++ ++ if (!dev->active) { ++ i2s_write_reg(dev->regs, CER, 0); ++ i2s_write_reg(dev->regs, I2S_IER, 0); ++ } ++} ++ ++static void dw_i2s_config(struct i2s_phytium *dev, int stream) ++{ ++ i2s_disable_channels(dev, stream); ++ ++ if (stream == SNDRV_PCM_STREAM_PLAYBACK) { ++ i2s_write_reg(dev->regs, TCR(0), dev->xfer_resolution); ++ i2s_write_reg(dev->regs, TER(0), 1); ++ } else { ++ i2s_write_reg(dev->regs, RCR(0), dev->xfer_resolution); ++ i2s_write_reg(dev->regs, RER(0), 1); ++ } ++} ++ ++static int dw_i2s_hw_params(struct snd_pcm_substream *substream, ++ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) ++{ ++ struct i2s_phytium *dev = snd_soc_dai_get_drvdata(dai); ++ struct i2s_clk_config_data *config = &dev->config; ++ u64 fix, point; ++ u32 cfg = 0; ++ ++ switch (params_format(params)) { ++ case SNDRV_PCM_FORMAT_S16_LE: ++ config->data_width = 16; ++ dev->ccr = 0x00; ++ dev->xfer_resolution = 0x02; ++ break; ++ ++ case SNDRV_PCM_FORMAT_S24_LE: ++ config->data_width = 24; ++ dev->ccr = 0x08; ++ dev->xfer_resolution = 0x04; ++ break; ++ ++ case SNDRV_PCM_FORMAT_S32_LE: ++ config->data_width = 32; ++ dev->ccr = 0x10; ++ dev->xfer_resolution = 0x05; ++ break; ++ ++ default: ++ dev_err(dev->dev, "phytium-i2s: unsupported PCM fmt"); ++ return -EINVAL; ++ } ++ ++ config->chan_nr = params_channels(params); ++ ++ switch (config->chan_nr) { ++ case EIGHT_CHANNEL_SUPPORT: ++ case SIX_CHANNEL_SUPPORT: ++ case FOUR_CHANNEL_SUPPORT: ++ case TWO_CHANNEL_SUPPORT: ++ break; ++ default: ++ dev_err(dev->dev, "channel not supported\n"); ++ return -EINVAL; ++ } ++ ++ dw_i2s_config(dev, substream->stream); ++ ++ i2s_write_reg(dev->regs, CCR, dev->ccr); ++ ++ config->sample_rate = params_rate(params); ++ if (dev->capability & DW_I2S_MASTER) { ++ fix = dev->clk_base / config->sample_rate / config->data_width / 32; ++ point = ((dev->clk_base / config->sample_rate) << 10) / config->data_width / 32; ++ point = (point - (fix << 10)) * 10; ++ cfg = ((u16) fix << 16) | (u16) point; ++ i2s_write_reg(dev->regs, CLK_CFG0, cfg); ++ i2s_write_reg(dev->regs, CLK_CFG1, 0xf); ++ } ++ return 0; ++} ++ ++static int dw_i2s_prepare(struct snd_pcm_substream *substream, ++ struct snd_soc_dai *dai) ++{ ++ struct i2s_phytium *dev = snd_soc_dai_get_drvdata(dai); ++ ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ++ i2s_write_reg(dev->regs, TXFFR, 1); ++ else ++ i2s_write_reg(dev->regs, RXFFR, 1); ++ ++ return 0; ++} ++ ++static int dw_i2s_trigger(struct snd_pcm_substream *substream, ++ int cmd, struct snd_soc_dai *dai) ++{ ++ struct i2s_phytium *dev = snd_soc_dai_get_drvdata(dai); ++ int ret = 0; ++ ++ switch (cmd) { ++ case SNDRV_PCM_TRIGGER_START: ++ case SNDRV_PCM_TRIGGER_RESUME: ++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ++ dev->active++; ++ i2s_start(dev, substream); ++ break; ++ ++ case SNDRV_PCM_TRIGGER_STOP: ++ case SNDRV_PCM_TRIGGER_SUSPEND: ++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH: ++ dev->active--; ++ i2s_stop(dev, substream); ++ break; ++ default: ++ ret = -EINVAL; ++ break; ++ } ++ ++ return ret; ++} ++ ++static int dw_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) ++{ ++ struct i2s_phytium *dev = snd_soc_dai_get_drvdata(cpu_dai); ++ int ret = 0; ++ ++ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { ++ case SND_SOC_DAIFMT_CBM_CFM: ++ if (dev->capability & DW_I2S_SLAVE) ++ ret = 0; ++ else ++ ret = -EINVAL; ++ break; ++ case SND_SOC_DAIFMT_CBS_CFS: ++ if (dev->capability & DW_I2S_MASTER) ++ ret = 0; ++ else ++ ret = -EINVAL; ++ break; ++ case SND_SOC_DAIFMT_CBM_CFS: ++ case SND_SOC_DAIFMT_CBS_CFM: ++ ret = -EINVAL; ++ break; ++ default: ++ dev_dbg(dev->dev, "phytium/i2s: Invalid master/slave format\n"); ++ ret = -EINVAL; ++ break; ++ } ++ return ret; ++} ++ ++static const struct snd_soc_dai_ops phytium_i2s_dai_ops = { ++ .hw_params = dw_i2s_hw_params, ++ .prepare = dw_i2s_prepare, ++ .trigger = dw_i2s_trigger, ++ .set_fmt = dw_i2s_set_fmt, ++}; ++ ++#ifdef CONFIG_PM ++static int phytium_i2s_suspend(struct snd_soc_dai *dai) ++{ ++ return 0; ++} ++ ++static int phytium_i2s_resume(struct snd_soc_dai *dai) ++{ ++ struct i2s_phytium *dev = snd_soc_dai_get_drvdata(dai); ++ if (dai->playback_active) ++ dw_i2s_config(dev, SNDRV_PCM_STREAM_PLAYBACK); ++ if (dai->capture_active) ++ dw_i2s_config(dev, SNDRV_PCM_STREAM_CAPTURE); ++ return 0; ++} ++#else ++#define phytium_i2s_suspend NULL ++#define phytium_i2s_resume NULL ++#endif ++ ++static struct snd_soc_dai_driver phytium_i2s_dai = { ++ .playback = { ++ .stream_name = "i2s-Playback", ++ .channels_min = 2, ++ .channels_max = 2, ++ .rates = SNDRV_PCM_RATE_8000_192000, ++ .formats = SNDRV_PCM_FMTBIT_S8 | ++ SNDRV_PCM_FMTBIT_S16_LE | ++ SNDRV_PCM_FMTBIT_S20_LE | ++ SNDRV_PCM_FMTBIT_S24_LE | ++ SNDRV_PCM_FMTBIT_S32_LE, ++ }, ++ .capture = { ++ .stream_name = "i2s-Capture", ++ .channels_min = 2, ++ .channels_max = 2, ++ .rates = SNDRV_PCM_RATE_8000_192000, ++ .formats = SNDRV_PCM_FMTBIT_S8 | ++ SNDRV_PCM_FMTBIT_S16_LE | ++ SNDRV_PCM_FMTBIT_S20_LE | ++ SNDRV_PCM_FMTBIT_S24_LE | ++ SNDRV_PCM_FMTBIT_S32_LE, ++ }, ++ .ops = &phytium_i2s_dai_ops, ++ .suspend = phytium_i2s_suspend, ++ .resume = phytium_i2s_resume, ++ .symmetric_rates = 1, ++}; ++ ++static const struct snd_pcm_hardware phytium_pcm_hardware = { ++ .info = SNDRV_PCM_INFO_INTERLEAVED | ++ SNDRV_PCM_INFO_MMAP | ++ SNDRV_PCM_INFO_MMAP_VALID | ++ SNDRV_PCM_INFO_BLOCK_TRANSFER, ++ .rates = SNDRV_PCM_RATE_8000 | ++ SNDRV_PCM_RATE_32000 | ++ SNDRV_PCM_RATE_44100 | ++ SNDRV_PCM_RATE_48000, ++ .rate_min = 8000, ++ .rate_max = 48000, ++ .formats = (SNDRV_PCM_FMTBIT_S8 | ++ SNDRV_PCM_FMTBIT_S16_LE | ++ SNDRV_PCM_FMTBIT_S20_LE | ++ SNDRV_PCM_FMTBIT_S24_LE | ++ SNDRV_PCM_FMTBIT_S32_LE), ++ .channels_min = 2, ++ .channels_max = 2, ++ .buffer_bytes_max = 4096*16, ++ .period_bytes_min = 1024, ++ .period_bytes_max = 4096*4, ++ .periods_min = 2, ++ .periods_max = 16, ++ .fifo_size = 16, ++}; ++ ++struct i2s_stream *snd_i2s_stream_assign(struct i2sc_bus *bus, ++ struct snd_pcm_substream *substream) ++{ ++ struct i2s_stream *azx_dev; ++ struct i2s_stream *res = NULL; ++ ++ /* make a non-zero unique key for the substream */ ++ int key = (substream->pcm->device << 16) | (substream->number << 2) | ++ (substream->stream + 1); ++ ++ list_for_each_entry(azx_dev, &bus->stream_list, list) { ++ if (azx_dev->direction != substream->stream) ++ continue; ++ ++ azx_dev->opened = 0; ++ ++ if (azx_dev->assigned_key == key) { ++ res = azx_dev; ++ break; ++ } ++ ++ if (!res || bus->reverse_assign) ++ res = azx_dev; ++ } ++ ++ if (res) { ++ spin_lock_irq(&bus->reg_lock); ++ res->opened = 1; ++ res->running = 0; ++ res->assigned_key = key; ++ res->substream = substream; ++ spin_unlock_irq(&bus->reg_lock); ++ } ++ ++ return res; ++} ++ ++/* assign a stream for the PCM */ ++static inline struct azx_dev * ++azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream) ++{ ++ struct i2s_stream *s; ++ ++ s = snd_i2s_stream_assign(azx_bus(chip), substream); ++ if (!s) ++ return NULL; ++ return stream_to_azx_dev(s); ++} ++ ++static int phytium_pcm_open(struct snd_pcm_substream *substream) ++{ ++ struct snd_soc_pcm_runtime *rtd = substream->private_data; ++ struct i2s_phytium *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai); ++ ++ struct azx *chip = &dev->chip; ++ struct azx_dev *azx_dev; ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ ++ azx_dev = azx_assign_device(chip, substream); ++ if (azx_dev == NULL) ++ return -EBUSY; ++ ++ snd_soc_set_runtime_hwparams(substream, &phytium_pcm_hardware); ++ snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); ++ runtime->private_data = dev; ++ ++ return 0; ++} ++ ++static int phytium_pcm_close(struct snd_pcm_substream *substream) ++{ ++ struct snd_soc_pcm_runtime *rtd = substream->private_data; ++ struct i2s_phytium *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai); ++ struct azx *chip = &dev->chip; ++ struct azx_dev *azx_dev = get_azx_dev(substream); ++ ++ mutex_lock(&chip->open_mutex); ++ azx_stream(azx_dev)->opened = 0; ++ azx_stream(azx_dev)->running = 0; ++ azx_stream(azx_dev)->substream = NULL; ++ ++ mutex_unlock(&chip->open_mutex); ++ return 0; ++} ++ ++static int phytium_pcm_new(struct snd_soc_pcm_runtime *rtd) ++{ ++ struct i2s_phytium *i2s = snd_soc_dai_get_drvdata(rtd->cpu_dai); ++ size_t size = phytium_pcm_hardware.buffer_bytes_max; ++ ++ return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, ++ SNDRV_DMA_TYPE_DEV, ++ i2s->pdev, size, size); ++} ++ ++static const struct i2s_io_ops axi_i2s_io_ops; ++static const struct i2s_controller_ops axi_i2s_ops; ++ ++static int phytium_pcm_hw_params(struct snd_pcm_substream *substream, ++ struct snd_pcm_hw_params *hw_params) ++{ ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ struct i2s_phytium *dev = runtime->private_data; ++ struct azx *chip = &dev->chip; ++ struct azx_dev *azx_dev = get_azx_dev(substream); ++ int ret; ++ ++ azx_dev->core.bufsize = 0; ++ azx_dev->core.period_bytes = 0; ++ azx_dev->core.format_val = 0; ++ ++ ret = chip->ops->substream_alloc_pages(chip, substream, ++ params_buffer_bytes(hw_params)); ++ ++ return ret; ++} ++/* ++ * set up a BDL entry ++ */ ++static int setup_bdle(struct i2sc_bus *bus, ++ struct snd_dma_buffer *dmab, ++ struct i2s_stream *azx_dev, __le32 **bdlp, ++ int ofs, int size, int with_ioc) ++{ ++ struct snd_pcm_substream *substream = azx_dev->substream; ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ __le32 *bdl = *bdlp; ++ ++ dmab->addr = runtime->dma_addr; ++ while (size > 0) { ++ dma_addr_t addr; ++ int chunk; ++ ++ if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES) ++ return -EINVAL; ++ ++ addr = snd_sgbuf_get_addr(dmab, ofs); ++ ++ /* program the address field of the BDL entry */ ++ bdl[0] = cpu_to_le32((u32)addr); ++ ++ bdl[1] = cpu_to_le32(upper_32_bits(addr)); ++ ++ /* program the size field of the BDL entry */ ++ chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size); ++ ++ bdl[2] = cpu_to_le32(chunk); ++ ++ /* program the IOC to enable interrupt ++ * only when the whole fragment is processed ++ */ ++ size -= chunk; ++ bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01); ++ ++ bdl += 4; ++ azx_dev->frags++; ++ ofs += chunk; ++ } ++ *bdlp = bdl; ++ return ofs; ++} ++ ++int snd_i2s_stream_setup_periods(struct i2s_stream *azx_dev) ++{ ++ struct i2sc_bus *bus = azx_dev->bus; ++ struct snd_pcm_substream *substream = azx_dev->substream; ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ __le32 *bdl; ++ int i, ofs, periods, period_bytes; ++ int pos_adj, pos_align; ++ ++ period_bytes = azx_dev->period_bytes; ++ periods = azx_dev->bufsize / period_bytes; ++ ++ /* program the initial BDL entries */ ++ bdl = (__le32 *)azx_dev->bdl.area; ++ ++ ofs = 0; ++ azx_dev->frags = 0; ++ ++ pos_adj = bus->bdl_pos_adj; ++ ++ if (!azx_dev->no_period_wakeup && pos_adj > 0) { ++ ++ pos_align = pos_adj; ++ pos_adj = (pos_adj * runtime->rate + 47999) / 48000; ++ if (!pos_adj) ++ pos_adj = pos_align; ++ else ++ pos_adj = ((pos_adj + pos_align - 1) / pos_align) * ++ pos_align; ++ pos_adj = frames_to_bytes(runtime, pos_adj); ++ if (pos_adj >= period_bytes) { ++ dev_warn(bus->dev, "Too big adjustment %d\n", ++ pos_adj); ++ pos_adj = 0; ++ } else { ++ ++ ofs = setup_bdle(bus, snd_pcm_get_dma_buf(substream), ++ azx_dev, ++ &bdl, ofs, pos_adj, true); ++ if (ofs < 0) ++ goto error; ++ } ++ } else ++ pos_adj = 0; ++ ++ for (i = 0; i < periods; i++) { ++ if (i == periods - 1 && pos_adj) ++ ofs = setup_bdle(bus, snd_pcm_get_dma_buf(substream), ++ azx_dev, &bdl, ofs, ++ period_bytes - pos_adj, 0); ++ else ++ ofs = setup_bdle(bus, snd_pcm_get_dma_buf(substream), ++ azx_dev, &bdl, ofs, ++ period_bytes, ++ !azx_dev->no_period_wakeup); ++ if (ofs < 0) ++ goto error; ++ } ++ return 0; ++ ++ error: ++ dev_err(bus->dev, "Too many BDL entries: buffer=%d, period=%d\n", ++ azx_dev->bufsize, period_bytes); ++ return -EINVAL; ++} ++ ++int snd_i2s_stream_set_params(struct i2s_stream *azx_dev, ++ unsigned int format_val) ++{ ++ unsigned int bufsize, period_bytes; ++ struct snd_pcm_substream *substream = azx_dev->substream; ++ struct snd_pcm_runtime *runtime; ++ int err; ++ ++ if (!substream) ++ return -EINVAL; ++ ++ runtime = substream->runtime; ++ bufsize = snd_pcm_lib_buffer_bytes(substream); ++ period_bytes = snd_pcm_lib_period_bytes(substream); ++ if (bufsize != azx_dev->bufsize || ++ period_bytes != azx_dev->period_bytes || ++ format_val != azx_dev->format_val || ++ runtime->no_period_wakeup != azx_dev->no_period_wakeup) { ++ ++ azx_dev->bufsize = bufsize; ++ azx_dev->period_bytes = period_bytes; ++ azx_dev->format_val = format_val; ++ azx_dev->no_period_wakeup = runtime->no_period_wakeup; ++ err = snd_i2s_stream_setup_periods(azx_dev); ++ if (err < 0) ++ return err; ++ } ++ ++ return 0; ++} ++ ++int snd_i2s_stream_setup(struct i2s_stream *azx_dev) ++{ ++ struct snd_pcm_runtime *runtime; ++ ++ if (azx_dev->substream) ++ runtime = azx_dev->substream->runtime; ++ else ++ runtime = NULL; ++ ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHAL_CONFG0, 0x8180); ++ i2s_write_reg(azx_dev->sd_addr, DMA_MASK_INT, 0x80000003); ++ ++ if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) { ++ i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(0), (u32)azx_dev->bdl.addr); ++ i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(0), upper_32_bits(azx_dev->bdl.addr)); ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(0), 0x1c8); ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CBL(0), azx_dev->bufsize); ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_LVI(0), azx_dev->frags - 1); ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DSIZE(0), 0x2);//0x2 ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DLENTH(0), 0x0);//0x0 ++ } else { ++ i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(1), (u32)azx_dev->bdl.addr); ++ i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(1), upper_32_bits(azx_dev->bdl.addr)); ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(1), 0x1c0); ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CBL(1), azx_dev->bufsize); ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_LVI(1), azx_dev->frags - 1); ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DSIZE(1), 0x8);//0x8 ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DLENTH(1), 0x0); ++ } ++ ++ if (runtime && runtime->period_size > 64) ++ azx_dev->delay_negative_threshold = ++ -frames_to_bytes(runtime, 64); ++ else ++ azx_dev->delay_negative_threshold = 0; ++ ++ return 0; ++} ++ ++static int phytium_pcm_prepare(struct snd_pcm_substream *substream) ++{ ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ struct i2s_phytium *dev = runtime->private_data; ++ struct azx *chip = &dev->chip; ++ struct azx_dev *azx_dev = get_azx_dev(substream); ++ struct i2sc_bus *bus = azx_bus(chip); ++ struct i2s_stream *hstr_p; ++ struct i2s_phytium *i2s = runtime->private_data; ++ int err; ++ ++ i2s->substream = substream; ++ azx_dev->core.substream = substream; ++ azx_dev->core.sd_addr = i2s->regs_db; ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ++ azx_dev->core.bdl.area = bus->bdl0.area; ++ azx_dev->core.bdl.addr = bus->bdl0.addr; ++ } else { ++ azx_dev->core.bdl.area = bus->bdl1.area; ++ azx_dev->core.bdl.addr = bus->bdl1.addr; ++ } ++ ++ if (!substream) ++ return -EINVAL; ++ ++ hstr_p = azx_stream(azx_dev); ++ hstr_p->direction = substream->stream; ++ ++ err = snd_i2s_stream_set_params(azx_stream(azx_dev), 0); ++ if (err < 0) ++ goto unlock; ++ ++ snd_i2s_stream_setup(azx_stream(azx_dev)); ++ ++ unlock: ++ if (!err) ++ azx_stream(azx_dev)->prepared = 1; ++ ++ return err; ++} ++ ++void snd_i2s_stream_clear(struct i2s_stream *azx_dev) ++{ ++ if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0x0); ++ else ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0x0); ++ ++ azx_dev->running = false; ++} ++ ++void snd_i2s_stream_stop(struct i2s_stream *azx_dev) ++{ ++ snd_i2s_stream_clear(azx_dev); ++} ++ ++void snd_i2s_stream_start(struct i2s_stream *azx_dev, bool fresh_start) ++{ ++ if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0x1); ++ else ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0x5); ++ ++ azx_dev->running = true; ++} ++ ++static int phytium_pcm_trigger(struct snd_pcm_substream *substream, int cmd) ++{ ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ struct i2s_phytium *dev = runtime->private_data; ++ struct azx *chip = &dev->chip; ++ struct i2sc_bus *bus = azx_bus(chip); ++ struct azx_dev *azx_dev = get_azx_dev(substream); ++ struct snd_pcm_substream *s; ++ struct i2s_stream *hstr; ++ bool start; ++ int sbits = 0; ++ ++ hstr = azx_stream(azx_dev); ++ hstr->direction = substream->stream; ++ ++ switch (cmd) { ++ case SNDRV_PCM_TRIGGER_START: ++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ++ case SNDRV_PCM_TRIGGER_RESUME: ++ start = true; ++ break; ++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH: ++ case SNDRV_PCM_TRIGGER_SUSPEND: ++ case SNDRV_PCM_TRIGGER_STOP: ++ start = false; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ snd_pcm_group_for_each_entry(s, substream) { ++ if (s->pcm->card != substream->pcm->card) ++ continue; ++ azx_dev = get_azx_dev(s); ++ sbits |= 1 << azx_dev->core.index; ++ snd_pcm_trigger_done(s, substream); ++ } ++ ++ spin_lock(&bus->reg_lock); ++ ++ snd_pcm_group_for_each_entry(s, substream) { ++ if (s->pcm->card != substream->pcm->card) ++ continue; ++ azx_dev = get_azx_dev(s); ++ if (start) ++ snd_i2s_stream_start(azx_stream(azx_dev), true); ++ else ++ snd_i2s_stream_stop(azx_stream(azx_dev)); ++ } ++ ++ i2s_write_reg(dev->regs_db, DMA_CTL, 0x1); ++ spin_unlock(&bus->reg_lock); ++ ++ return 0; ++} ++ ++static void phytium_pcm_free(struct snd_pcm *pcm) ++{ ++ snd_pcm_lib_preallocate_free_for_all(pcm); ++} ++ ++void snd_i2s_stream_cleanup(struct i2s_stream *azx_dev) ++{ ++ int cnt = 10; ++ if (azx_dev->sd_addr) { ++ if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) { ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0); ++ while (cnt--) { ++ if (i2s_read_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0)) == 0) ++ break; ++ } ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 2); ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0); ++ i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(0), 0); ++ i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(0), 0); ++ } else { ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0); ++ while (cnt--) { ++ if (i2s_read_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1)) == 0) ++ break; ++ } ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 2); ++ i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0); ++ i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(1), 0); ++ i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(1), 0); ++ } ++ } ++} ++ ++static int phytium_pcm_hw_free(struct snd_pcm_substream *substream) ++{ ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ struct i2s_phytium *dev = runtime->private_data; ++ struct azx *chip = &dev->chip; ++ struct i2s_stream *hstr_p; ++ struct azx_dev *azx_dev = get_azx_dev(substream); ++ int err; ++ ++ hstr_p = azx_stream(azx_dev); ++ hstr_p->direction = substream->stream; ++ snd_i2s_stream_cleanup(azx_stream(azx_dev)); ++ ++ err = chip->ops->substream_free_pages(chip, substream); ++ azx_stream(azx_dev)->prepared = 0; ++ ++ return err; ++} ++ ++static snd_pcm_uframes_t phytium_pcm_pointer(struct snd_pcm_substream *substream) ++{ ++ struct snd_pcm_runtime *runtime = substream->runtime; ++ struct i2s_phytium *dev = runtime->private_data; ++ ++ int stream = substream->stream; ++ ++ u32 pos = i2s_read_reg(dev->regs_db, DMA_LPIB(stream)); ++ ++ return bytes_to_frames(substream->runtime, pos); ++} ++ ++static const struct snd_pcm_ops phytium_pcm_ops = { ++ .open = phytium_pcm_open, ++ .close = phytium_pcm_close, ++ .hw_params = phytium_pcm_hw_params, ++ .prepare = phytium_pcm_prepare, ++ .hw_free = phytium_pcm_hw_free, ++ .trigger = phytium_pcm_trigger, ++ .pointer = phytium_pcm_pointer, ++}; ++ ++static const struct snd_soc_component_driver phytium_i2s_component = { ++ .name = "phytium-i2s", ++ .pcm_new = phytium_pcm_new, ++ .pcm_free = phytium_pcm_free, ++ .ops = &phytium_pcm_ops, ++}; ++ ++/* Maximum bit resolution of a channel - not uniformly spaced */ ++static const u32 fifo_width[COMP_MAX_WORDSIZE] = { ++ 12, 16, 20, 24, 32, 0, 0, 0 ++}; ++ ++/* Width of (DMA) bus */ ++static const u32 bus_widths[COMP_MAX_DATA_WIDTH] = { ++ DMA_SLAVE_BUSWIDTH_1_BYTE, ++ DMA_SLAVE_BUSWIDTH_2_BYTES, ++ DMA_SLAVE_BUSWIDTH_4_BYTES, ++ DMA_SLAVE_BUSWIDTH_UNDEFINED ++}; ++ ++/* PCM format to support channel resolution */ ++static const u32 formats[COMP_MAX_WORDSIZE] = { ++ SNDRV_PCM_FMTBIT_S16_LE, ++ SNDRV_PCM_FMTBIT_S16_LE, ++ SNDRV_PCM_FMTBIT_S24_LE, ++ SNDRV_PCM_FMTBIT_S24_LE, ++ SNDRV_PCM_FMTBIT_S32_LE, ++ 0, ++ 0, ++ 0 ++}; ++ ++static int phytium_configure_dai(struct i2s_phytium *dev) ++{ ++ u32 comp1 = i2s_read_reg(dev->regs, dev->i2s_reg_comp1); ++ u32 comp2 = i2s_read_reg(dev->regs, dev->i2s_reg_comp2); ++ u32 fifo_depth = 1 << (1 + COMP1_FIFO_DEPTH_GLOBAL(comp1)); ++ u32 idx; ++ ++ if (COMP1_TX_ENABLED(comp1)) { ++ dev_dbg(dev->dev, " phytium: play supported\n"); ++ idx = COMP1_TX_WORDSIZE_0(comp1); ++ if (WARN_ON(idx >= ARRAY_SIZE(formats))) ++ return -EINVAL; ++ } ++ ++ if (COMP1_RX_ENABLED(comp1)) { ++ dev_dbg(dev->dev, "phytium: record supported\n"); ++ idx = COMP2_RX_WORDSIZE_0(comp2); ++ if (WARN_ON(idx >= ARRAY_SIZE(formats))) ++ return -EINVAL; ++ if (dev->quirks & DW_I2S_QUIRK_16BIT_IDX_OVERRIDE) ++ idx = 1; ++ } ++ ++ if (COMP1_MODE_EN(comp1)) { ++ dev_dbg(dev->dev, "phytium: i2s master mode supported\n"); ++ dev->capability |= DW_I2S_MASTER; ++ } else { ++ dev_dbg(dev->dev, "phytium: i2s slave mode supported\n"); ++ dev->capability |= DW_I2S_SLAVE; ++ } ++ ++ dev->fifo_th = fifo_depth / 2; ++ return 0; ++} ++ ++static int phytium_configure_dai_by_dt(struct i2s_phytium *dev) ++{ ++ u32 comp1 = i2s_read_reg(dev->regs, I2S_COMP_PARAM_1); ++ u32 comp2 = i2s_read_reg(dev->regs, I2S_COMP_PARAM_2); ++ u32 idx = COMP1_APB_DATA_WIDTH(comp1); ++ u32 idx2; ++ int ret; ++ ++ if (WARN_ON(idx >= ARRAY_SIZE(bus_widths))) ++ return -EINVAL; ++ ++ ret = phytium_configure_dai(dev); ++ if (ret < 0) ++ return ret; ++ ++ if (COMP1_TX_ENABLED(comp1)) { ++ idx2 = COMP1_TX_WORDSIZE_0(comp1); ++ dev->capability |= DWC_I2S_PLAY; ++ } ++ if (COMP1_RX_ENABLED(comp1)) { ++ idx2 = COMP2_RX_WORDSIZE_0(comp2); ++ dev->capability |= DWC_I2S_RECORD; ++ } ++ ++ return 0; ++} ++ ++static int dma_alloc_pages(struct i2sc_bus *bus, int type, size_t size, ++ struct snd_dma_buffer *buf) ++{ ++ int err; ++ ++ err = snd_dma_alloc_pages(type, bus->dev, size, buf); ++ if (err < 0) ++ return err; ++ ++ return 0; ++} ++ ++int snd_i2s_bus_alloc_stream_pages(struct i2sc_bus *bus) ++{ ++ struct i2s_stream *s; ++ int num_streams = 0; ++ int err; ++ ++ list_for_each_entry(s, &bus->stream_list, list) { ++ ++ /* allocate memory for the BDL for each stream */ ++ err = bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, ++ BDL_SIZE, &s->bdl); ++ if (num_streams == 0) { ++ bus->bdl0.addr = s->bdl.addr; ++ bus->bdl0.area = s->bdl.area; ++ } else { ++ bus->bdl1.addr = s->bdl.addr; ++ bus->bdl1.area = s->bdl.area; ++ } ++ num_streams++; ++ if (err < 0) ++ return -ENOMEM; ++ } ++ ++ if (WARN_ON(!num_streams)) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++static int stream_direction(struct azx *chip, unsigned char index) ++{ ++ if (index >= chip->playback_index_offset && ++ index < chip->playback_index_offset + chip->playback_streams) ++ return SNDRV_PCM_STREAM_PLAYBACK; ++ return SNDRV_PCM_STREAM_CAPTURE; ++ ++} ++ ++void snd_i2s_stream_init(struct i2sc_bus *bus, struct i2s_stream *azx_dev, ++ int idx, int direction, int tag) ++{ ++ azx_dev->bus = bus; ++ azx_dev->sd_addr = bus->remap_addr; ++ ++ if (idx == 0) ++ azx_dev->sd_int_sta_mask = 1 << idx; ++ else ++ azx_dev->sd_int_sta_mask = 1 << 8; ++ ++ azx_dev->index = idx; ++ azx_dev->direction = direction; ++ azx_dev->stream_tag = tag; ++ ++ list_add_tail(&azx_dev->list, &bus->stream_list); ++ ++} ++ ++int azx_i2s_init_streams(struct azx *chip) ++{ ++ int i; ++ ++ for (i = 0; i < chip->num_streams; i++) { ++ struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL); ++ int dir, tag; ++ ++ if (!azx_dev) ++ return -ENOMEM; ++ ++ dir = stream_direction(chip, i); ++ ++ tag = i + 1; ++ ++ snd_i2s_stream_init(azx_bus(chip), azx_stream(azx_dev), ++ i, dir, tag); ++ } ++ ++ return 0; ++} ++ ++static int azx_first_init(struct azx *chip) ++{ ++ struct i2s_phytium *i2s = container_of(chip, struct i2s_phytium, chip); ++ struct platform_device *pdev = to_platform_device(i2s->dev); ++ struct device *i2sdev = i2s->dev; ++ struct i2sc_bus *bus = azx_bus(chip); ++ struct resource *res; ++ int err; ++ unsigned int dma_bits = 64; ++ ++ chip->region_requested = 1; ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ bus->addr = res->start; ++ bus->remap_addr = i2s->regs_db; ++ bus->dev = i2s->pdev; ++ ++ if (bus->remap_addr == NULL) { ++ dev_err(i2sdev, "ioremap error\n"); ++ return -ENXIO; ++ } ++ ++ if (azx_acquire_irq(chip, 0) < 0) ++ return -EBUSY; ++ ++ synchronize_irq(bus->irq); ++ ++ if (!dma_set_mask(i2sdev, DMA_BIT_MASK(dma_bits))) { ++ err = dma_set_coherent_mask(i2sdev, DMA_BIT_MASK(dma_bits)); ++ } else { ++ err = dma_set_mask(i2sdev, DMA_BIT_MASK(32)); ++ err = dma_set_coherent_mask(i2sdev, DMA_BIT_MASK(32)); ++ } ++ ++ chip->playback_streams = FT4C_NUM_PLAYBACK; ++ chip->capture_streams = FT4C_NUM_CAPTURE; ++ ++ chip->playback_index_offset = 0; ++ chip->capture_index_offset = chip->playback_streams; ++ chip->num_streams = chip->playback_streams + chip->capture_streams; ++ ++ err = azx_i2s_init_streams(chip); ++ if (err < 0) ++ return err; ++ ++ err = azx_alloc_stream_pages(chip); ++ if (err < 0) ++ return err; ++ ++ return 0; ++} ++ ++static int azx_probe_continue(struct azx *chip) ++{ ++ struct i2s_phytium *i2s = container_of(chip, struct i2s_phytium, chip); ++ int err; ++ ++ i2s->probe_continued = 1; ++ ++ err = azx_first_init(chip); ++ if (err < 0) ++ goto out_free; ++ ++ chip->running = 1; ++ ++out_free: ++ return err; ++} ++ ++static void azx_probe_work(struct work_struct *work) ++{ ++ struct i2s_phytium *i2s = container_of(work, struct i2s_phytium, probe_work); ++ ++ azx_probe_continue(&i2s->chip); ++} ++ ++int azx_i2s_bus_init(struct azx *chip, ++ const struct i2s_io_ops *io_ops) ++{ ++ struct i2s_bus *bus = &chip->bus; ++ ++ bus->core.io_ops = io_ops; ++ ++ INIT_LIST_HEAD(&bus->core.stream_list); ++ bus->card = chip->card; ++ mutex_init(&bus->prepare_mutex); ++ bus->pci = chip->pci; ++ ++ bus->core.bdl_pos_adj = chip->bdl_pos_adj; ++ return 0; ++} ++ ++static int i2s_phytium_create(struct platform_device *pdev, ++ int dev, struct azx **rchip, struct i2s_phytium *i2s) ++{ ++ struct azx *chip; ++ int err; ++ ++ *rchip = NULL; ++ ++ if (!i2s) ++ return -ENOMEM; ++ chip = &i2s->chip; ++ ++ mutex_init(&chip->open_mutex); ++ ++ chip->ops = &axi_i2s_ops; ++ chip->dev_index = dev; ++ ++ INIT_LIST_HEAD(&chip->pcm_list); ++ init_completion(&i2s->probe_wait); ++ ++ chip->bdl_pos_adj = 32; ++ err = azx_i2s_bus_init(chip, &axi_i2s_io_ops); ++ if (err < 0) { ++ kfree(i2s); ++ return err; ++ } ++ ++ INIT_WORK(&i2s->probe_work, azx_probe_work); ++ *rchip = chip; ++ return 0; ++} ++ ++static int substream_alloc_pages(struct azx *chip, ++ struct snd_pcm_substream *substream, ++ size_t size) ++{ ++ int ret; ++ ++ ret = snd_pcm_lib_malloc_pages(substream, size); ++ if (ret < 0) ++ return ret; ++ ++ return 0; ++} ++ ++static void dma_free_pages(struct i2sc_bus *bus, ++ struct snd_dma_buffer *buf) ++{ ++ snd_dma_free_pages(buf); ++} ++ ++static const struct i2s_io_ops axi_i2s_io_ops = { ++ .dma_alloc_pages = dma_alloc_pages, ++ .dma_free_pages = dma_free_pages, ++}; ++ ++static const struct i2s_controller_ops axi_i2s_ops = { ++ .substream_alloc_pages = substream_alloc_pages, ++ .substream_free_pages = substream_free_pages, ++}; ++ ++ ++static int phytium_i2s_probe(struct platform_device *pdev) ++{ ++ struct i2s_phytium *i2s; ++ struct azx *chip; ++ struct resource *res; ++ struct pdata_x100_mfd *pdata; ++ struct snd_soc_dai_driver *dai_drv; ++ int err, ret; ++ int card_num = 1; ++ bool schedule_probe; ++ ++ i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL); ++ if (!i2s) ++ return -ENOMEM; ++ ++ dai_drv = devm_kzalloc(&pdev->dev, sizeof(*dai_drv), GFP_KERNEL); ++ if (!dai_drv) ++ return -ENOMEM; ++ memcpy(dai_drv, &phytium_i2s_dai, sizeof(phytium_i2s_dai)); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ i2s->regs = devm_ioremap_resource(&pdev->dev, res); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ i2s->regs_db = devm_ioremap_resource(&pdev->dev, res); ++ ++ if (IS_ERR(i2s->regs)) ++ return PTR_ERR(i2s->regs); ++ ++ i2s->irq_id = platform_get_irq(pdev, 0); ++ ++ if (i2s->irq_id < 0) ++ return i2s->irq_id; ++ ++ i2s->i2s_reg_comp1 = I2S_COMP_PARAM_1; ++ i2s->i2s_reg_comp2 = I2S_COMP_PARAM_2; ++ ++ ret = phytium_configure_dai_by_dt(i2s); ++ if (ret < 0) ++ return ret; ++ ++ err = i2s_phytium_create(pdev, card_num, &chip, i2s); ++ if (err < 0) ++ return err; ++ i2s = container_of(chip, struct i2s_phytium, chip); ++ schedule_probe = !chip->disabled; ++ ++ dev_set_drvdata(&pdev->dev, i2s); ++ ++ pdata = dev_get_platdata(&pdev->dev); ++ dai_drv->name = pdata->name; ++ i2s->dev = &pdev->dev; ++ i2s->pdev = pdata->dev; ++ i2s->clk_base = pdata->clk_base; ++ ++ ret = devm_snd_soc_register_component(&pdev->dev, &phytium_i2s_component, ++ dai_drv, 1); ++ if (ret != 0) ++ dev_err(&pdev->dev, "not able to register dai\n"); ++ ++ if (schedule_probe) ++ schedule_work(&i2s->probe_work); ++ ++ if (chip->disabled) ++ complete_all(&i2s->probe_wait); ++ ++ return 0; ++} ++ ++static int phytium_i2s_remove(struct platform_device *pdev) ++{ ++ pm_runtime_disable(&pdev->dev); ++ return 0; ++} ++ ++static struct platform_driver phytium_i2s_driver = { ++ .probe = phytium_i2s_probe, ++ .remove = phytium_i2s_remove, ++ .driver = { ++ .name = "phytium-i2s", ++ }, ++}; ++ ++module_platform_driver(phytium_i2s_driver); ++ ++MODULE_DESCRIPTION("Phytium I2S Driver"); ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Zhang Yiqun "); +diff --git a/sound/soc/phytium/pmdk_dp.c b/sound/soc/phytium/pmdk_dp.c +new file mode 100644 +index 000000000000..0e5a001e8126 +--- /dev/null ++++ b/sound/soc/phytium/pmdk_dp.c +@@ -0,0 +1,227 @@ ++/* ++ * pmdk_dp.c ++ * ++ * Copyright (c) 2021 Phytium Technology Co. Ltd ++ * Author: Yiqun Zhang ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++ ++struct pmdk_dp_private { ++ struct snd_soc_jack jack0; ++ struct snd_soc_jack jack1; ++ struct snd_soc_jack jack2; ++}; ++ ++/* PMDK widgets */ ++static const struct snd_soc_dapm_widget pmdk_dp_dapm_widgets[] = { ++ SND_SOC_DAPM_LINE("DP", NULL), ++}; ++ ++/* PMDK control */ ++static const struct snd_kcontrol_new pmdk_controls[] = { ++ SOC_DAPM_PIN_SWITCH("DP"), ++}; ++ ++/* PMDK connections */ ++static const struct snd_soc_dapm_route pmdk_dp_audio_map[] = { ++ {"DP", NULL, "TX"}, ++}; ++ ++static struct snd_soc_jack_pin dp0_pins[] = { ++ { ++ .pin = "DP/HDMI 0", ++ .mask = SND_JACK_LINEOUT, ++ }, ++}; ++ ++static struct snd_soc_jack_pin dp1_pins[] = { ++ { ++ .pin = "DP/HDMI 1", ++ .mask = SND_JACK_LINEOUT, ++ }, ++}; ++ ++static struct snd_soc_jack_pin dp2_pins[] = { ++ { ++ .pin = "DP/HDMI 2", ++ .mask = SND_JACK_LINEOUT, ++ }, ++}; ++ ++#define SMDK_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \ ++ SND_SOC_DAIFMT_CBS_CFS) ++ ++static int pmdk_dp0_init(struct snd_soc_pcm_runtime *runtime) ++{ ++ struct snd_soc_card *card = runtime->card; ++ struct pmdk_dp_private *priv = snd_soc_card_get_drvdata(card); ++ struct snd_soc_component *component = runtime->codec_dai->component; ++ int ret; ++ ++ ret = snd_soc_card_jack_new(card, "DP/HDMI 0", ++ SND_JACK_LINEOUT, ++ &priv->jack0, dp0_pins, ++ ARRAY_SIZE(dp0_pins)); ++ if (ret) { ++ dev_err(card->dev, "Jack creation failed %d\n", ret); ++ return ret; ++ } ++ snd_soc_component_set_jack(component, &priv->jack0, NULL); ++ return ret; ++} ++ ++static int pmdk_dp1_init(struct snd_soc_pcm_runtime *runtime) ++{ ++ struct snd_soc_card *card = runtime->card; ++ struct pmdk_dp_private *priv = snd_soc_card_get_drvdata(card); ++ struct snd_soc_component *component = runtime->codec_dai->component; ++ int ret; ++ ++ ret = snd_soc_card_jack_new(card, "DP/HDMI 1", ++ SND_JACK_LINEOUT, ++ &priv->jack1, dp1_pins, ++ ARRAY_SIZE(dp1_pins)); ++ if (ret) { ++ dev_err(card->dev, "Jack creation failed %d\n", ret); ++ return ret; ++ } ++ snd_soc_component_set_jack(component, &priv->jack1, NULL); ++ return ret; ++} ++ ++static int pmdk_dp2_init(struct snd_soc_pcm_runtime *runtime) ++{ ++ struct snd_soc_card *card = runtime->card; ++ struct pmdk_dp_private *priv = snd_soc_card_get_drvdata(card); ++ struct snd_soc_component *component = runtime->codec_dai->component; ++ int ret; ++ ++ ret = snd_soc_card_jack_new(card, "DP/HDMI 2", ++ SND_JACK_LINEOUT, ++ &priv->jack2, dp2_pins, ++ ARRAY_SIZE(dp2_pins)); ++ if (ret) { ++ dev_err(card->dev, "Jack creation failed %d\n", ret); ++ return ret; ++ } ++ snd_soc_component_set_jack(component, &priv->jack2, NULL); ++ return ret; ++} ++ ++static struct snd_soc_dai_link pmdk_dai0 = { ++ .name = "Phytium dp0-audio", ++ .stream_name = "Playback", ++ .cpu_dai_name = "phytium-i2s-dp0", ++ .codec_dai_name = "i2s-hifi", ++ .platform_name = "snd-soc-dummy", ++ .codec_name = "hdmi-audio-codec.0.auto", ++ .dai_fmt = SMDK_DAI_FMT, ++ .init = pmdk_dp0_init, ++}; ++ ++static struct snd_soc_dai_link pmdk_dai1 = { ++ .name = "Phytium dp1-audio", ++ .stream_name = "Playback", ++ .cpu_dai_name = "phytium-i2s-dp1", ++ .codec_dai_name = "i2s-hifi", ++ .platform_name = "snd-soc-dummy", ++ .codec_name = "hdmi-audio-codec.1.auto", ++ .dai_fmt = SMDK_DAI_FMT, ++ .init = pmdk_dp1_init, ++}; ++ ++static struct snd_soc_dai_link pmdk_dai2 = { ++ .name = "Phytium dp2-audio", ++ .stream_name = "Playback", ++ .cpu_dai_name = "phytium-i2s-dp2", ++ .codec_dai_name = "i2s-hifi", ++ .platform_name = "snd-soc-dummy", ++ .codec_name = "hdmi-audio-codec.2.auto", ++ .dai_fmt = SMDK_DAI_FMT, ++ .init = pmdk_dp2_init, ++}; ++ ++static struct snd_soc_card pmdk = { ++ .name = "PMDK-I2S", ++ .owner = THIS_MODULE, ++ ++ .dapm_widgets = pmdk_dp_dapm_widgets, ++ .num_dapm_widgets = ARRAY_SIZE(pmdk_dp_dapm_widgets), ++ .controls = pmdk_controls, ++ .num_controls = ARRAY_SIZE(pmdk_controls), ++ .dapm_routes = pmdk_dp_audio_map, ++ .num_dapm_routes = ARRAY_SIZE(pmdk_dp_audio_map), ++}; ++ ++static int pmdk_sound_probe(struct platform_device *pdev) ++{ ++ struct snd_soc_card *card = &pmdk; ++ struct pmdk_dp_private *priv; ++ struct snd_soc_dai_link *pmdk_dai; ++ int num_dp = 2; ++ card->dev = &pdev->dev; ++ device_property_read_u32(&pdev->dev, "num-dp", &num_dp); ++ pmdk_dai = devm_kzalloc(&pdev->dev, num_dp * sizeof(*pmdk_dai), GFP_KERNEL); ++ if (!pmdk_dai) ++ return -ENOMEM; ++ ++ switch (num_dp) { ++ case 1: ++ pmdk_dai[0] = pmdk_dai0; ++ break; ++ case 2: ++ pmdk_dai[0] = pmdk_dai0; ++ pmdk_dai[1] = pmdk_dai1; ++ break; ++ case 3: ++ pmdk_dai[0] = pmdk_dai0; ++ pmdk_dai[1] = pmdk_dai1; ++ pmdk_dai[2] = pmdk_dai2; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ card->dai_link = pmdk_dai; ++ card->num_links = num_dp; ++ ++ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ ++ snd_soc_card_set_drvdata(card, priv); ++ ++ return devm_snd_soc_register_card(&pdev->dev, card); ++} ++ ++static const struct acpi_device_id pmdk_sound_acpi_match[] = { ++ { "PHYT8006", 0}, ++ { } ++}; ++MODULE_DEVICE_TABLE(acpi, pmdk_sound_acpi_match); ++ ++static struct platform_driver pmdk_sound_driver = { ++ .probe = pmdk_sound_probe, ++ .driver = { ++ .name = "pmdk_dp", ++ .acpi_match_table = pmdk_sound_acpi_match, ++#ifdef CONFIG_PM ++ .pm = &snd_soc_pm_ops, ++#endif ++ }, ++}; ++ ++module_platform_driver(pmdk_sound_driver); ++ ++MODULE_AUTHOR("Zhang Yiqun"); ++MODULE_DESCRIPTION("ALSA SoC PMDK DP"); ++MODULE_LICENSE("GPL"); +diff --git a/sound/soc/phytium/pmdk_es8336.c b/sound/soc/phytium/pmdk_es8336.c +new file mode 100644 +index 000000000000..56333513fbdf +--- /dev/null ++++ b/sound/soc/phytium/pmdk_es8336.c +@@ -0,0 +1,100 @@ ++/* ++ * pmdk_es8336.c ++ * ++ * Copyright (c) 2021 Phytium Techonology Co. Ltd ++ * Author: Zhang Yiqun ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++ ++ ++/* PMDK widgets */ ++static const struct snd_soc_dapm_widget pmdk_es8336_dapm_widgets[] = { ++ SND_SOC_DAPM_HP("HP", NULL), ++ SND_SOC_DAPM_MIC("Int Mic", NULL), ++ SND_SOC_DAPM_MIC("Mic In", NULL), ++}; ++ ++/* PMDK control */ ++static const struct snd_kcontrol_new pmdk_controls[] = { ++ SOC_DAPM_PIN_SWITCH("HP"), ++ SOC_DAPM_PIN_SWITCH("Int Mic"), ++ SOC_DAPM_PIN_SWITCH("Mic In"), ++}; ++ ++/* PMDK connections */ ++static const struct snd_soc_dapm_route pmdk_es8336_audio_map[] = { ++ {"DMIC", NULL, "Int Mic"}, ++ {"MIC1", NULL, "Mic In"}, ++ {"MIC2", NULL, "Mic In"}, ++ ++ {"HP", NULL, "HPOL"}, ++ {"HP", NULL, "HPOR"}, ++}; ++ ++#define PMDK_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \ ++ SND_SOC_DAIFMT_CBS_CFS) ++ ++static struct snd_soc_dai_link pmdk_dai[] = { ++ { ++ .name = "ES8336 HIFI", ++ .stream_name = "ES8336 HIFI", ++ .cpu_dai_name = "phytium-i2s-lsd", ++ .codec_dai_name = "es8336-hifi", ++ .platform_name = "snd-soc-dummy", ++ .codec_name = "i2c-ESSX8336:00", ++ .dai_fmt = PMDK_DAI_FMT, ++ }, ++}; ++ ++static struct snd_soc_card pmdk = { ++ .name = "PMDK-I2S", ++ .owner = THIS_MODULE, ++ .dai_link = pmdk_dai, ++ .num_links = ARRAY_SIZE(pmdk_dai), ++ ++ .dapm_widgets = pmdk_es8336_dapm_widgets, ++ .num_dapm_widgets = ARRAY_SIZE(pmdk_es8336_dapm_widgets), ++ .controls = pmdk_controls, ++ .num_controls = ARRAY_SIZE(pmdk_controls), ++ .dapm_routes = pmdk_es8336_audio_map, ++ .num_dapm_routes = ARRAY_SIZE(pmdk_es8336_audio_map), ++}; ++ ++static int pmdk_sound_probe(struct platform_device *pdev) ++{ ++ struct snd_soc_card *card = &pmdk; ++ struct device *dev = &pdev->dev; ++ card->dev = dev; ++ return devm_snd_soc_register_card(&pdev->dev, card); ++} ++ ++static const struct acpi_device_id pmdk_sound_acpi_match[] = { ++ { "PHYT8005", 0}, ++ { } ++}; ++MODULE_DEVICE_TABLE(acpi, pmdk_sound_acpi_match); ++ ++static struct platform_driver pmdk_sound_driver = { ++ .probe = pmdk_sound_probe, ++ .driver = { ++ .name = "pmdk_es8336", ++ .acpi_match_table = pmdk_sound_acpi_match, ++#ifdef CONFIG_PM ++ .pm = &snd_soc_pm_ops, ++#endif ++ }, ++}; ++ ++module_platform_driver(pmdk_sound_driver); ++MODULE_AUTHOR("Zhang Yiqun "); ++MODULE_DESCRIPTION("ALSA SoC PMDK ES8336"); ++MODULE_LICENSE("GPL"); +diff --git a/sound/soc/phytium/pmdk_es8388.c b/sound/soc/phytium/pmdk_es8388.c +new file mode 100644 +index 000000000000..194a4dca7ea8 +--- /dev/null ++++ b/sound/soc/phytium/pmdk_es8388.c +@@ -0,0 +1,174 @@ ++/* ++ * pmdk_es8388.c ++ * ++ * Copyright (c) 2021 Phytium Techonology Co. Ltd ++ * Author: Zhang Yiqun ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++static struct snd_soc_jack hs_jack; ++ ++/* Headset jack detection DAPM pins */ ++static struct snd_soc_jack_pin hs_jack_pins[] = { ++ { ++ .pin = "FrontIn", ++ .mask = SND_JACK_MICROPHONE, ++ }, ++ { ++ .pin = "RearIn", ++ .mask = SND_JACK_MICROPHONE, ++ .invert = 1 ++ }, ++ { ++ .pin = "Front", ++ .mask = SND_JACK_HEADPHONE, ++ }, ++ { ++ .pin = "Rear", ++ .mask = SND_JACK_HEADPHONE, ++ .invert = 1 ++ }, ++}; ++ ++/* Headset jack detection gpios */ ++static struct snd_soc_jack_gpio hs_jack_gpios[] = { ++ { ++ .name = "det", ++ .report = SND_JACK_HEADSET, ++ .debounce_time = 200, ++ .invert = 1, ++ }, ++}; ++ ++/* PMDK widgets */ ++static const struct snd_soc_dapm_widget pmdk_es8388_dapm_widgets[] = { ++ SND_SOC_DAPM_HP("Front", NULL), ++ SND_SOC_DAPM_HP("Rear", NULL), ++ ++ SND_SOC_DAPM_MIC("FrontIn", NULL), ++ SND_SOC_DAPM_MIC("RearIn", NULL), ++}; ++ ++/* PMDK control */ ++static const struct snd_kcontrol_new pmdk_controls[] = { ++ SOC_DAPM_PIN_SWITCH("Front"), ++ SOC_DAPM_PIN_SWITCH("Rear"), ++ SOC_DAPM_PIN_SWITCH("FrontIn"), ++ SOC_DAPM_PIN_SWITCH("RearIn"), ++}; ++ ++/* PMDK connections */ ++static const struct snd_soc_dapm_route pmdk_es8388_audio_map[] = { ++ {"LINPUT1", NULL, "FrontIn"}, ++ {"RINPUT1", NULL, "FrontIn"}, ++ ++ {"LINPUT2", NULL, "RearIn"}, ++ {"RINPUT2", NULL, "RearIn"}, ++ ++ {"Front", NULL, "LOUT1"}, ++ {"Front", NULL, "ROUT1"}, ++ ++ {"Rear", NULL, "LOUT2"}, ++ {"Rear", NULL, "ROUT2"}, ++}; ++ ++static int pmdk_es8388_init(struct snd_soc_pcm_runtime *rtd) ++{ ++ int ret; ++ ++ /* Jack detection API stuff */ ++ ret = snd_soc_card_jack_new(rtd->card, "Headset Jack", SND_JACK_HEADSET, ++ &hs_jack, hs_jack_pins, ++ ARRAY_SIZE(hs_jack_pins)); ++ if (ret) ++ goto err; ++ ++ ret = snd_soc_jack_add_gpios(&hs_jack, ARRAY_SIZE(hs_jack_gpios), ++ hs_jack_gpios); ++ if (ret) ++ goto err; ++ ++ return 0; ++ ++err: ++ return ret; ++} ++ ++#define PMDK_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \ ++ SND_SOC_DAIFMT_CBS_CFS) ++ ++static struct snd_soc_dai_link pmdk_dai[] = { ++ { ++ .name = "ES8388 HIFI", ++ .stream_name = "ES8388 HIFI", ++ .cpu_dai_name = "phytium-i2s-lsd", ++ .codec_dai_name = "es8388-hifi", ++ .platform_name = "snd-soc-dummy", ++ .codec_name = "i2c-ESSX8388:00", ++ .dai_fmt = PMDK_DAI_FMT, ++ .init = pmdk_es8388_init, ++ }, ++}; ++ ++static struct snd_soc_card pmdk = { ++ .name = "PMDK-I2S", ++ .owner = THIS_MODULE, ++ .dai_link = pmdk_dai, ++ .num_links = ARRAY_SIZE(pmdk_dai), ++ ++ .dapm_widgets = pmdk_es8388_dapm_widgets, ++ .num_dapm_widgets = ARRAY_SIZE(pmdk_es8388_dapm_widgets), ++ .controls = pmdk_controls, ++ .num_controls = ARRAY_SIZE(pmdk_controls), ++ .dapm_routes = pmdk_es8388_audio_map, ++ .num_dapm_routes = ARRAY_SIZE(pmdk_es8388_audio_map), ++}; ++ ++static int pmdk_sound_probe(struct platform_device *pdev) ++{ ++ struct snd_soc_card *card = &pmdk; ++ struct device *dev = &pdev->dev; ++ int n; ++ ++ card->dev = dev; ++ hs_jack_gpios[0].gpiod_dev = dev; ++ n = gpiod_count(dev, "det"); ++ ++ if(n < 0) ++ pmdk_dai[0].init = NULL; ++ ++ return devm_snd_soc_register_card(&pdev->dev, card); ++} ++ ++static const struct acpi_device_id pmdk_sound_acpi_match[] = { ++ { "PHYT8004", 0}, ++ { } ++}; ++MODULE_DEVICE_TABLE(acpi, pmdk_sound_acpi_match); ++ ++static struct platform_driver pmdk_sound_driver = { ++ .probe = pmdk_sound_probe, ++ .driver = { ++ .name = "pmdk_es8388", ++ .acpi_match_table = pmdk_sound_acpi_match, ++#ifdef CONFIG_PM ++ .pm = &snd_soc_pm_ops, ++#endif ++ }, ++}; ++ ++module_platform_driver(pmdk_sound_driver); ++ ++MODULE_AUTHOR("Zhang Yiqun"); ++MODULE_DESCRIPTION("ALSA SoC PMDK ES8388"); ++MODULE_LICENSE("GPL"); +diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c +index a2a175b08b17..2b21d4fee771 100644 +--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c ++++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c +@@ -332,7 +332,7 @@ u64 vgic_sanitise_outer_cacheability(u64 field) + case GIC_BASER_CACHE_nC: + return field; + default: +- return GIC_BASER_CACHE_nC; ++ return GIC_BASER_CACHE_SameAsInner; + } + } + diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c index 74244d8e2909..d3abb492b3ba 100644 --- a/sound/hda/hdac_controller.c +++ b/sound/hda/hdac_controller.c @@ -139,6 +139,9 @@ int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val) { unsigned int addr = azx_command_addr(val); unsigned int wp, rp; + unsigned long timeout; + unsigned int rirb_wp; + int i = 0; spin_lock_irq(&bus->reg_lock); @@ -165,6 +168,41 @@ int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val) bus->corb.buf[wp] = cpu_to_le32(val); snd_hdac_chip_writew(bus, CORBWP, wp); + if (bus->cmd_resend) { + timeout = jiffies + msecs_to_jiffies(1000); + udelay(80); + rirb_wp = snd_hdac_chip_readw(bus, RIRBWP); + while (rirb_wp == bus->rirb.wp) { + udelay(80); + rirb_wp = snd_hdac_chip_readw(bus, RIRBWP); + if (rirb_wp != bus->rirb.wp) + break; + if (i > 5) + break; + if (time_after(jiffies, timeout)) + break; + + /* add command to corb */ + wp = snd_hdac_chip_readw(bus, CORBWP); + if (wp == 0xffff) { + /* something wrong, controller likely turned to D3 */ + spin_unlock_irq(&bus->reg_lock); + return -EIO; + } + wp++; + wp %= AZX_MAX_CORB_ENTRIES; + + rp = snd_hdac_chip_readw(bus, CORBRP); + if (wp == rp) { + /* oops, it's full */ + spin_unlock_irq(&bus->reg_lock); + return -EAGAIN; + } + bus->corb.buf[wp] = cpu_to_le32(val); + snd_hdac_chip_writew(bus, CORBWP, wp); + i++; + } + } spin_unlock_irq(&bus->reg_lock); return 0; diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c index eee422390d8e..38586457ee09 100644 --- a/sound/hda/hdac_stream.c +++ b/sound/hda/hdac_stream.c @@ -51,7 +51,11 @@ void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start) trace_snd_hdac_stream_start(bus, azx_dev); +#ifdef CONFIG_SND_HDA_PHYTIUM + azx_dev->start_wallclk = snd_hdac_chip_readl(bus, WALLCLK) / 15; +#else azx_dev->start_wallclk = snd_hdac_chip_readl(bus, WALLCLK); +#endif if (!fresh_start) azx_dev->start_wallclk -= azx_dev->period_wallclk; @@ -469,7 +473,11 @@ static u64 azx_cc_read(const struct cyclecounter *cc) { struct hdac_stream *azx_dev = container_of(cc, struct hdac_stream, cc); +#ifdef CONFIG_SND_HDA_PHYTIUM + return snd_hdac_chip_readl(azx_dev->bus, WALLCLK) / 25; +#else return snd_hdac_chip_readl(azx_dev->bus, WALLCLK); +#endif } static void azx_timecounter_init(struct hdac_stream *azx_dev, diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig index 4235907b7858..df95dad1233f 100644 --- a/sound/pci/hda/Kconfig +++ b/sound/pci/hda/Kconfig @@ -21,6 +21,21 @@ config SND_HDA_INTEL To compile this driver as a module, choose M here: the module will be called snd-hda-intel. +config SND_HDA_PHYTIUM + tristate "PHYTIUM HD Audio" + depends on SOUND + select SND_HDA + help + Say Y here to support the HDA controller present in PHYTIUM + SoCs + + This options enables support for the HD Audio controller + present in some PHYTIUM SoCs, used to communicate audio + to the "High Definition Audio" codec. + + To compile this driver as a module, choose M here: the module + will be called snd-hda-phytium. + config SND_HDA_TEGRA tristate "NVIDIA Tegra HD Audio" depends on ARCH_TEGRA diff --git a/sound/pci/hda/Makefile b/sound/pci/hda/Makefile index b57432f00056..90e32c8ce07b 100644 --- a/sound/pci/hda/Makefile +++ b/sound/pci/hda/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 snd-hda-intel-objs := hda_intel.o +snd-hda-phytium-objs := hda_phytium.o snd-hda-tegra-objs := hda_tegra.o snd-hda-codec-y := hda_bind.o hda_codec.o hda_jack.o hda_auto_parser.o hda_sysfs.o @@ -48,3 +49,4 @@ obj-$(CONFIG_SND_HDA_CODEC_HDMI) += snd-hda-codec-hdmi.o # when built in kernel obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-intel.o obj-$(CONFIG_SND_HDA_TEGRA) += snd-hda-tegra.o +obj-$(CONFIG_SND_HDA_PHYTIUM) += snd-hda-phytium.o diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 8198d2e53b7d..74c5dc04bbc8 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -28,6 +28,8 @@ #include #include +#include "hda_phytium.h" + #ifdef CONFIG_X86 /* for art-tsc conversion */ #include @@ -171,6 +173,10 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream) snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid); unsigned short ctls = spdif ? spdif->ctls : 0; + struct hda_ft *hda; + hda = container_of(chip, struct hda_ft, chip); + hda->substream = substream; + trace_azx_pcm_prepare(chip, azx_dev); dsp_lock(azx_dev); if (dsp_is_locked(azx_dev)) { diff --git a/sound/pci/hda/hda_phytium.c b/sound/pci/hda/hda_phytium.c new file mode 100644 index 000000000000..8cb66a8023c0 --- /dev/null +++ b/sound/pci/hda/hda_phytium.c @@ -0,0 +1,1218 @@ +/* + * hda_phytium.c - Implementation of primary alsa driver code base + * for Intel HD Audio of Phytium. + * + * Copyright(c) 2018 Phytium Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hda_codec.h" +#include "hda_controller.h" +#include "hda_phytium.h" + +#include "hda_intel_trace.h" + +/* position fix mode */ +enum { + POS_FIX_AUTO, + POS_FIX_LPIB, + POS_FIX_POSBUF, + POS_FIX_VIACOMBO, + POS_FIX_COMBO, +}; + +/* Define IN stream 0 FIFO size offset in VIA controller */ +#define VIA_IN_STREAM0_FIFO_SIZE_OFFSET 0x90 + +/* FT have 4 playback and 4 capture */ +#define FT4C_NUM_CAPTURE 4 +#define FT4C_NUM_PLAYBACK 4 + +#define DWORD_BYTE_WIDTH 4 +#define BYTE_BIT_WIDTH 8 + +static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; +static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; +static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; +static char *model[SNDRV_CARDS]; +static int position_fix[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = 1}; +static int bdl_pos_adj[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = -1}; +static int probe_mask[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = -1}; +static int probe_only[SNDRV_CARDS]; +static int jackpoll_ms[SNDRV_CARDS]; +static int single_cmd = -1; +static int enable_msi = -1; +#ifdef CONFIG_SND_HDA_INPUT_BEEP +static bool beep_mode[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = + CONFIG_SND_HDA_INPUT_BEEP_MODE}; +#endif + +module_param_array(index, int, NULL, 0444); +MODULE_PARM_DESC(index, "Index value for Intel HD audio interface."); +module_param_array(id, charp, NULL, 0444); +MODULE_PARM_DESC(id, "ID string for Intel HD audio interface."); +module_param_array(enable, bool, NULL, 0444); +MODULE_PARM_DESC(enable, "Enable Intel HD audio interface."); +module_param_array(model, charp, NULL, 0444); +MODULE_PARM_DESC(model, "Use the given board model."); +module_param_array(position_fix, int, NULL, 0444); +MODULE_PARM_DESC(position_fix, "DMA pointer read method." + "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO)."); +module_param_array(bdl_pos_adj, int, NULL, 0644); +MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset."); +module_param_array(probe_mask, int, NULL, 0444); +MODULE_PARM_DESC(probe_mask, "Bitmask to probe codecs (default = -1)."); +module_param_array(probe_only, int, NULL, 0444); +MODULE_PARM_DESC(probe_only, "Only probing and no codec initialization."); +module_param_array(jackpoll_ms, int, NULL, 0444); +MODULE_PARM_DESC(jackpoll_ms, "Ms between polling for jack events (default = 0, using unsol events only)"); +module_param(single_cmd, bint, 0444); +MODULE_PARM_DESC(single_cmd, "Use single command to communicate with codecs " + "(for debugging only)."); +module_param(enable_msi, bint, 0444); +MODULE_PARM_DESC(enable_msi, "Enable Message Signaled Interrupt (MSI)"); +#ifdef CONFIG_SND_HDA_INPUT_BEEP +module_param_array(beep_mode, bool, NULL, 0444); +MODULE_PARM_DESC(beep_mode, "Select HDA Beep registration mode " + "(0=off, 1=on) (default=1)."); +#endif + +#define power_save 0 + +static int align_buffer_size = -1; +module_param(align_buffer_size, bint, 0644); +MODULE_PARM_DESC(align_buffer_size, + "Force buffer and period sizes to be multiple of 128 bytes."); + +/* driver types */ +enum { + AZX_DRIVER_ICH, + AZX_DRIVER_PCH, + AZX_DRIVER_SCH, + AZX_DRIVER_HDMI, + AZX_DRIVER_ATI, + AZX_DRIVER_ATIHDMI, + AZX_DRIVER_ATIHDMI_NS, + AZX_DRIVER_VIA, + AZX_DRIVER_SIS, + AZX_DRIVER_ULI, + AZX_DRIVER_NVIDIA, + AZX_DRIVER_TERA, + AZX_DRIVER_CTX, + AZX_DRIVER_CTHDA, + AZX_DRIVER_CMEDIA, + AZX_DRIVER_GENERIC, + AZX_DRIVER_FT, + AZX_NUM_DRIVERS, /* keep this as last entry */ +}; + +/* NOP for other archs */ +static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf, + bool on) +{ +} + +static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev, + struct snd_pcm_substream *substream, bool on) +{ +} + +static int azx_acquire_irq(struct azx *chip, int do_disconnect); + +/* calculate runtime delay from LPIB */ +static int azx_get_delay_from_lpib(struct azx *chip, struct azx_dev *azx_dev, + unsigned int pos) +{ + struct snd_pcm_substream *substream = azx_dev->core.substream; + int stream = substream->stream; + unsigned int lpib_pos = azx_get_pos_lpib(chip, azx_dev); + int delay; + + if (stream == SNDRV_PCM_STREAM_PLAYBACK) + delay = pos - lpib_pos; + else + delay = lpib_pos - pos; + if (delay < 0) { + if (delay >= azx_dev->core.delay_negative_threshold) + delay = 0; + else + delay += azx_dev->core.bufsize; + } + + if (delay >= azx_dev->core.period_bytes) { + dev_info(chip->card->dev, + "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n", + delay, azx_dev->core.period_bytes); + delay = 0; + chip->driver_caps &= ~AZX_DCAPS_COUNT_LPIB_DELAY; + chip->get_delay[stream] = NULL; + } + + return bytes_to_frames(substream->runtime, delay); +} + +static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev); + +/* called from IRQ */ +static int azx_position_check(struct azx *chip, struct azx_dev *azx_dev) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + int ok; + + ok = azx_position_ok(chip, azx_dev); + if (ok == 1) { + azx_dev->irq_pending = 0; + return ok; + } else if (ok == 0) { + /* bogus IRQ, process it later */ + azx_dev->irq_pending = 1; + schedule_work(&hda->irq_pending_work); + } + return 0; +} + +static int azx_ft_link_power(struct azx *chip, bool enable) +{ + return 0; +} + +/* + * Check whether the current DMA position is acceptable for updating + * periods. Returns non-zero if it's OK. + * + * Many HD-audio controllers appear pretty inaccurate about + * the update-IRQ timing. The IRQ is issued before actually the + * data is processed. So, we need to process it afterwords in a + * workqueue. + */ +static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev) +{ + struct snd_pcm_substream *substream = azx_dev->core.substream; + int stream = substream->stream; + u32 wallclk; + unsigned int pos; + + wallclk = (azx_readl(chip, WALLCLK) - azx_dev->core.start_wallclk); + + if (wallclk < (azx_dev->core.period_wallclk * 2) / 3) + return -1; /* bogus (too early) interrupt */ + + if (chip->get_position[stream]) + pos = chip->get_position[stream](chip, azx_dev); + else { /* use the position buffer as default */ + pos = azx_get_pos_posbuf(chip, azx_dev); + if (!pos || pos == (u32)-1) { + dev_info(chip->card->dev, + "Invalid position buffer, using LPIB read method instead.\n"); + chip->get_position[stream] = azx_get_pos_lpib; + if (chip->get_position[0] == azx_get_pos_lpib && + chip->get_position[1] == azx_get_pos_lpib) + azx_bus(chip)->use_posbuf = false; + pos = azx_get_pos_lpib(chip, azx_dev); + chip->get_delay[stream] = NULL; + } else { + chip->get_position[stream] = azx_get_pos_posbuf; + if (chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY) + chip->get_delay[stream] = azx_get_delay_from_lpib; + } + } + + if (pos >= azx_dev->core.bufsize) + pos = 0; + + if (WARN_ONCE(!azx_dev->core.period_bytes, + "hda-ft: zero azx_dev->period_bytes")) + return -1; /* this shouldn't happen! */ + if (wallclk < (azx_dev->core.period_wallclk * 5) / 4 && + pos % azx_dev->core.period_bytes > azx_dev->core.period_bytes / 2) + /* NG - it's below the first next period boundary */ + return chip->bdl_pos_adj ? 0 : -1; + + azx_dev->core.start_wallclk += wallclk; + + return 1; /* OK, it's fine */ +} + +/* The work for pending PCM period updates. */ +static void azx_irq_pending_work(struct work_struct *work) +{ + struct hda_ft *hda = container_of(work, struct hda_ft, irq_pending_work); + struct azx *chip = &hda->chip; + struct hdac_bus *bus = azx_bus(chip); + struct hdac_stream *s; + int pending, ok; + + if (!hda->irq_pending_warned) { + dev_info(chip->card->dev, + "IRQ timing workaround is activated for card #%d. Suggest a bigger bdl_pos_adj.\n", + chip->card->number); + hda->irq_pending_warned = 1; + } + + for (;;) { + pending = 0; + spin_lock_irq(&bus->reg_lock); + list_for_each_entry(s, &bus->stream_list, list) { + struct azx_dev *azx_dev = stream_to_azx_dev(s); + if (!azx_dev->irq_pending || + !s->substream || + !s->running) + continue; + ok = azx_position_ok(chip, azx_dev); + if (ok > 0) { + azx_dev->irq_pending = 0; + spin_unlock(&bus->reg_lock); + snd_pcm_period_elapsed(s->substream); + spin_lock(&bus->reg_lock); + } else if (ok < 0) { + pending = 0; /* too early */ + } else + pending++; + } + spin_unlock_irq(&bus->reg_lock); + if (!pending) + return; + msleep(1); + } +} + +/* clear irq_pending flags and assure no on-going workq */ +static void azx_clear_irq_pending(struct azx *chip) +{ + struct hdac_bus *bus = azx_bus(chip); + struct hdac_stream *s; + + spin_lock_irq(&bus->reg_lock); + list_for_each_entry(s, &bus->stream_list, list) { + struct azx_dev *azx_dev = stream_to_azx_dev(s); + azx_dev->irq_pending = 0; + } + spin_unlock_irq(&bus->reg_lock); +} + +static int azx_acquire_irq(struct azx *chip, int do_disconnect) +{ + struct hdac_bus *bus = azx_bus(chip); + + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + struct platform_device *pdev = to_platform_device(hda->dev); + int irq_id = platform_get_irq(pdev, 0); + int err; + + err = devm_request_irq(chip->card->dev, irq_id, azx_interrupt, + IRQF_SHARED, KBUILD_MODNAME, chip); + if (err) { + dev_err(chip->card->dev, + "unable to request IRQ %d, disabling device\n", + irq_id); + if (do_disconnect) + snd_card_disconnect(chip->card); + return err; + } + bus->irq = irq_id; + + return 0; +} + +/* get the current DMA position with correction on VIA chips */ +static unsigned int azx_via_get_position(struct azx *chip, + struct azx_dev *azx_dev) +{ + unsigned int link_pos, mini_pos, bound_pos; + unsigned int mod_link_pos, mod_dma_pos, mod_mini_pos; + unsigned int fifo_size; + + link_pos = snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev)); + if (azx_dev->core.substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + /* Playback, no problem using link position */ + return link_pos; + } + + /* Capture */ + /* For new chipset, + * use mod to get the DMA position just like old chipset + */ + mod_dma_pos = le32_to_cpu(*azx_dev->core.posbuf); + mod_dma_pos %= azx_dev->core.period_bytes; + + /* azx_dev->fifo_size can't get FIFO size of in stream. + * Get from base address + offset. + */ + fifo_size = readw(azx_bus(chip)->remap_addr + + VIA_IN_STREAM0_FIFO_SIZE_OFFSET); + + if (azx_dev->insufficient) { + /* Link position never gather than FIFO size */ + if (link_pos <= fifo_size) + return 0; + + azx_dev->insufficient = 0; + } + + if (link_pos <= fifo_size) + mini_pos = azx_dev->core.bufsize + link_pos - fifo_size; + else + mini_pos = link_pos - fifo_size; + + /* Find nearest previous boudary */ + mod_mini_pos = mini_pos % azx_dev->core.period_bytes; + mod_link_pos = link_pos % azx_dev->core.period_bytes; + if (mod_link_pos >= fifo_size) + bound_pos = link_pos - mod_link_pos; + else if (mod_dma_pos >= mod_mini_pos) + bound_pos = mini_pos - mod_mini_pos; + else { + bound_pos = mini_pos - mod_mini_pos + azx_dev->core.period_bytes; + if (bound_pos >= azx_dev->core.bufsize) + bound_pos = 0; + } + + /* Calculate real DMA position we want */ + return bound_pos + mod_dma_pos; +} + +#ifdef CONFIG_PM +static DEFINE_MUTEX(card_list_lock); +static LIST_HEAD(card_list); + +static void azx_add_card_list(struct azx *chip) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + mutex_lock(&card_list_lock); + list_add(&hda->list, &card_list); + mutex_unlock(&card_list_lock); +} + +static void azx_del_card_list(struct azx *chip) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + mutex_lock(&card_list_lock); + list_del_init(&hda->list); + mutex_unlock(&card_list_lock); +} + +#else +#define azx_add_card_list(chip) /* NOP */ +#define azx_del_card_list(chip) /* NOP */ +#endif /* CONFIG_PM */ + +#if defined(CONFIG_PM_SLEEP) +/* power management */ +static int azx_suspend(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + struct hda_ft *hda; + struct hdac_bus *bus; + + if (!card) + return 0; + + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + if (chip->disabled || !chip->running) + return 0; + + bus = azx_bus(chip); + snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); + azx_clear_irq_pending(chip); + azx_stop_chip(chip); + if (bus->irq >= 0) { + free_irq(bus->irq, chip); + bus->irq = -1; + } + + return 0; +} + +static int azx_resume(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + struct hda_ft *hda; + struct hdac_bus *bus; + int index; + struct snd_pcm_substream *substream; + struct azx_dev *azx_dev; + int err; + + if (!card) + return 0; + + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + bus = azx_bus(chip); + if (chip->disabled || !chip->running) + return 0; + + if (azx_acquire_irq(chip, 1) < 0) + return -EIO; + + index = chip->dev_index; + + snd_hdac_bus_exit_link_reset(bus); + usleep_range(1000, 1200); + + azx_init_chip(chip, 0); + + snd_power_change_state(card, SNDRV_CTL_POWER_D0); + + if (hda->substream && hda->substream->runtime) { + substream = hda->substream; + + if(substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED){ + substream->runtime->status->state = substream->runtime->status->suspended_state; + err = substream->ops->prepare(substream); + if (err < 0) + return err; + } + + azx_dev = get_azx_dev(substream); + hda->substream = NULL; + } + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +static int azx_runtime_suspend(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + struct hda_ft *hda; + + if (!card) + return 0; + + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + if (chip->disabled) + return 0; + + if (!azx_has_pm_runtime(chip)) + return 0; + + azx_stop_chip(chip); + azx_enter_link_reset(chip); + azx_clear_irq_pending(chip); + + return 0; +} + +static int azx_runtime_resume(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + struct hda_ft *hda; + struct hdac_bus *bus; + struct hda_codec *codec; + int status; + int index; + + if (!card) + return 0; + + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + bus = azx_bus(chip); + if (chip->disabled) + return 0; + + if (!azx_has_pm_runtime(chip)) + return 0; + + /* Read STATESTS before controller reset */ + status = azx_readw(chip, STATESTS); + + index = chip->dev_index; + + snd_hdac_bus_exit_link_reset(bus); + usleep_range(1000, 1200); + + azx_init_chip(chip, 0); + + if (status) { + list_for_each_codec(codec, &chip->bus) + if (status & (1 << codec->addr)) + schedule_delayed_work(&codec->jackpoll_work, + codec->jackpoll_interval); + } + + return 0; +} + +static int azx_runtime_idle(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + struct hda_ft *hda; + + if (!card) + return 0; + + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + if (chip->disabled) + return 0; + + if (!azx_has_pm_runtime(chip) || + azx_bus(chip)->codec_powered || !chip->running) + return -EBUSY; + + return 0; +} + +static const struct dev_pm_ops azx_pm = { + SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume) + SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle) +}; + +#define hda_ft_pm &azx_pm +#else +#define hda_ft_pm NULL +#endif /* CONFIG_PM */ + +static int azx_probe_continue(struct azx *chip); + +/* + * destructor + */ +static int azx_free(struct azx *chip) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + struct hdac_bus *bus = azx_bus(chip); + struct platform_device *pdev = to_platform_device(hda->dev); + struct device *hddev = hda->dev; + struct resource *res; + resource_size_t size; + + if (azx_has_pm_runtime(chip) && chip->running) + pm_runtime_get_noresume(&pdev->dev); + + azx_del_card_list(chip); + + complete_all(&hda->probe_wait); + + if (bus->chip_init) { + azx_clear_irq_pending(chip); + azx_stop_all_streams(chip); + azx_stop_chip(chip); + } + + if (bus->irq >= 0) + free_irq(bus->irq, (void*)chip); + + devm_iounmap(hddev, bus->remap_addr); + + azx_free_stream_pages(chip); + azx_free_streams(chip); + snd_hdac_bus_exit(bus); + + if (chip->region_requested){ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + size = resource_size(res); + devm_release_mem_region(hddev, res->start, size); + } + + kfree(hda); + + return 0; +} + +static int azx_dev_disconnect(struct snd_device *device) +{ + struct azx *chip = device->device_data; + + chip->bus.shutdown = 1; + return 0; +} + +static int azx_dev_free(struct snd_device *device) +{ + return azx_free(device->device_data); +} + +static int check_position_fix(struct azx *chip, int fix) +{ + switch (fix) { + case POS_FIX_AUTO: + case POS_FIX_LPIB: + case POS_FIX_POSBUF: + case POS_FIX_VIACOMBO: + case POS_FIX_COMBO: + return fix; + } + + if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) { + dev_dbg(chip->card->dev, "Using LPIB position fix\n"); + return POS_FIX_LPIB; + } + return POS_FIX_AUTO; +} + +static void assign_position_fix(struct azx *chip, int fix) +{ + static azx_get_pos_callback_t callbacks[] = { + [POS_FIX_AUTO] = NULL, + [POS_FIX_LPIB] = azx_get_pos_lpib, + [POS_FIX_POSBUF] = azx_get_pos_posbuf, + [POS_FIX_VIACOMBO] = azx_via_get_position, + [POS_FIX_COMBO] = azx_get_pos_lpib, + }; + + chip->get_position[0] = chip->get_position[1] = callbacks[fix]; + + /* combo mode uses LPIB only for playback */ + if (fix == POS_FIX_COMBO) + chip->get_position[1] = NULL; + + if (fix == POS_FIX_POSBUF && + (chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY)) { + chip->get_delay[0] = chip->get_delay[1] = + azx_get_delay_from_lpib; + } + +} + +#define AZX_FORCE_CODEC_MASK 0x100 + +static void check_probe_mask(struct azx *chip, int dev) +{ + chip->codec_probe_mask = probe_mask[dev]; + + /* check forced option */ + if (chip->codec_probe_mask != -1 && + (chip->codec_probe_mask & AZX_FORCE_CODEC_MASK)) { + azx_bus(chip)->codec_mask = chip->codec_probe_mask & 0xff; + dev_info(chip->card->dev, "codec_mask forced to 0x%x\n", + (int)azx_bus(chip)->codec_mask); + } +} + +static void azx_probe_work(struct work_struct *work) +{ + struct hda_ft *hda = container_of(work, struct hda_ft, probe_work); + azx_probe_continue(&hda->chip); +} + +/* + * constructor + */ +static const struct hdac_io_ops axi_hda_io_ops; +static const struct hda_controller_ops axi_hda_ops; + +static int hda_ft_create(struct snd_card *card, struct platform_device *pdev, + int dev, unsigned int driver_caps, + struct azx **rchip) +{ + static struct snd_device_ops ops = { + .dev_disconnect = azx_dev_disconnect, + .dev_free = azx_dev_free, + }; + struct hda_ft *hda; + struct azx *chip; + int err; + + *rchip = NULL; + + hda = devm_kzalloc(&pdev->dev, sizeof(*hda), GFP_KERNEL); + if (!hda) + return -ENOMEM; + hda->dev = &pdev->dev; + chip = &hda->chip; + mutex_init(&chip->open_mutex); + chip->card = card; + chip->ops = &axi_hda_ops; + chip->driver_caps = driver_caps; + chip->driver_type = driver_caps & 0xff; + chip->dev_index = dev; + chip->jackpoll_ms = jackpoll_ms; + INIT_LIST_HEAD(&chip->pcm_list); + INIT_WORK(&hda->irq_pending_work, azx_irq_pending_work); + INIT_LIST_HEAD(&hda->list); + + init_completion(&hda->probe_wait); + assign_position_fix(chip, check_position_fix(chip, position_fix[dev])); + check_probe_mask(chip, dev); + + if (single_cmd < 0) /* allow fallback to single_cmd at errors */ + chip->fallback_to_single_cmd = 0; + else /* explicitly set to single_cmd or not */ + chip->single_cmd = single_cmd; + + if (bdl_pos_adj[dev] < 0) { + switch (chip->driver_type) { + case AZX_DRIVER_FT: + bdl_pos_adj[dev] = 32; + break; + default: + bdl_pos_adj[dev] = 32; + break; + } + } + chip->bdl_pos_adj = bdl_pos_adj[dev]; + + err = azx_bus_init(chip, model[dev], &axi_hda_io_ops); + if (err < 0) { + kfree(hda); + return err; + } + + if (chip->driver_type == AZX_DRIVER_NVIDIA) { + dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n"); + chip->bus.needs_damn_long_delay = 1; + } + + err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); + if (err < 0) { + dev_err(card->dev, "Error creating device [card]!\n"); + azx_free(chip); + return err; + } + + /* continue probing in work context as may trigger request module */ + INIT_WORK(&hda->probe_work, azx_probe_work); + + *rchip = chip; + + return 0; +} + +static int azx_first_init(struct azx *chip) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + struct platform_device *pdev = to_platform_device(hda->dev); + struct device *hddev = hda->dev; + + int dev = chip->dev_index; + bool full_reset; + + struct snd_card *card = chip->card; + struct hdac_bus *bus = azx_bus(chip); + int err; + unsigned short gcap; + unsigned int dma_bits = 64; + + struct resource *res; + const struct acpi_device_id *match; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hda->regs = devm_ioremap_resource(hddev, res); + if (IS_ERR(hda->regs)) + return PTR_ERR(hda->regs); + chip->region_requested = 1; + + bus->addr = res->start; + bus->remap_addr = hda->regs; + if (bus->remap_addr == NULL) { + dev_err(card->dev, "ioremap error\n"); + return -ENXIO; + } + + bus->cmd_resend = 1; + + if (azx_acquire_irq(chip, 0) < 0) + return -EBUSY; + + synchronize_irq(bus->irq); + + gcap = azx_readw(chip, GCAP); + dev_dbg(card->dev, "chipset global capabilities = 0x%x\n", gcap); + + /* disable 64bit DMA address on some devices */ + if (chip->driver_caps & AZX_DCAPS_NO_64BIT) { + dev_dbg(card->dev, "Disabling 64bit DMA\n"); + gcap &= ~AZX_GCAP_64OK; + } + + /* disable buffer size rounding to 128-byte multiples if supported */ + if (align_buffer_size >= 0) + chip->align_buffer_size = !!align_buffer_size; + else { + if (chip->driver_caps & AZX_DCAPS_NO_ALIGN_BUFSIZE) + chip->align_buffer_size = 0; + else + chip->align_buffer_size = 1; + } + + if (hddev->of_node) { + + } else if (has_acpi_companion(hddev)){ + match = acpi_match_device(hddev->driver->acpi_match_table, hddev); + if (!match) { + dev_err(card->dev, "Error ACPI match data is missing\n"); + return -ENODEV; + } + acpi_dma_configure(hddev,DEV_DMA_NOT_SUPPORTED); + } + + /* allow 64bit DMA address if supported by H/W */ + if (!(gcap & AZX_GCAP_64OK)) + dma_bits = 32; + if (!dma_set_mask(hddev, DMA_BIT_MASK(dma_bits))) { + dma_set_coherent_mask(hddev, DMA_BIT_MASK(dma_bits)); + } else { + dma_set_mask(hddev, DMA_BIT_MASK(32)); + dma_set_coherent_mask(hddev, DMA_BIT_MASK(32)); + } + + /* read number of streams from GCAP register instead of using + * hardcoded value + */ + chip->capture_streams = (gcap >> 8) & 0x0f; + chip->playback_streams = (gcap >> 12) & 0x0f; + if (!chip->playback_streams && !chip->capture_streams) { + /* gcap didn't give any info, switching to old method */ + chip->playback_streams = FT4C_NUM_PLAYBACK; + chip->capture_streams = FT4C_NUM_CAPTURE; + } + chip->capture_index_offset = 0; + chip->playback_index_offset = chip->capture_streams; + chip->num_streams = chip->playback_streams + chip->capture_streams; + + /* initialize streams */ + err = azx_init_streams(chip); + if (err < 0) + return err; + + err = azx_alloc_stream_pages(chip); + if (err < 0) + return err; + + full_reset = (probe_only[dev] & 2) ? false : true; + azx_init_chip(chip, full_reset); + + /* codec detection */ + if (!azx_bus(chip)->codec_mask) { + dev_err(card->dev, "no codecs found!\n"); + return -ENODEV; + } + + strcpy(card->driver, "ft-hda"); + strcpy(card->shortname, "ft-hda"); + snprintf(card->longname, sizeof(card->longname), + "%s at 0x%lx irq %i", + card->shortname, bus->addr, bus->irq); + + return 0; +} + +/* + * HDA controller ops. + */ + +/* APB register access. */ +static void axi_azx_writel(u32 value, u32 __iomem *addr) +{ + writel(value, addr); +} + +static u32 axi_azx_readl(u32 __iomem *addr) +{ + return readl(addr); +} + +static void axi_azx_writew(u16 value, u16 __iomem *addr) +{ + u32 data; + u32 offset; + + offset = (u64)addr & 0x03; + addr = (u16 __iomem *)((u64)addr & 0xFFFFFFFFFFFFFFFC); + data = readl(addr); + data &= ~(0xFFFF << offset * BYTE_BIT_WIDTH); + data |= (value << offset * BYTE_BIT_WIDTH); + writel(data, addr); +} + +static u16 axi_azx_readw(u16 __iomem *addr) +{ + return readw(addr); +} + +static void axi_azx_writeb(u8 value, u8 __iomem *addr) +{ + u32 data; + u32 offset; + + offset = (u64)addr & 0x03; + addr = (u8 __iomem *)((u64)addr & 0xFFFFFFFFFFFFFFFC); + data = readl(addr); + data &= ~(0xFF << offset * BYTE_BIT_WIDTH); + data |= (value << offset * BYTE_BIT_WIDTH); + writel(data, addr); +} + +static u8 axi_azx_readb(u8 __iomem *addr) +{ + return readb(addr); +} + +/* DMA page allocation helpers. */ +static int dma_alloc_pages(struct hdac_bus *bus, + int type, + size_t size, + struct snd_dma_buffer *buf) +{ + struct azx *chip = bus_to_azx(bus); + int err; + + err = snd_dma_alloc_pages(type, + bus->dev, + size, buf); + if (err < 0) + return err; + mark_pages_wc(chip, buf, true); + return 0; +} + +static void dma_free_pages(struct hdac_bus *bus, struct snd_dma_buffer *buf) +{ + struct azx *chip = bus_to_azx(bus); + + mark_pages_wc(chip, buf, false); + snd_dma_free_pages(buf); +} + +static int substream_alloc_pages(struct azx *chip, + struct snd_pcm_substream *substream, + size_t size) +{ + struct azx_dev *azx_dev = get_azx_dev(substream); + int ret; + + mark_runtime_wc(chip, azx_dev, substream, false); + ret = snd_pcm_lib_malloc_pages(substream, size); + if (ret < 0) + return ret; + + mark_runtime_wc(chip, azx_dev, substream, true); + return 0; +} + +static int substream_free_pages(struct azx *chip, + struct snd_pcm_substream *substream) +{ + struct azx_dev *azx_dev = get_azx_dev(substream); + mark_runtime_wc(chip, azx_dev, substream, false); + return snd_pcm_lib_free_pages(substream); +} + +static void pcm_mmap_prepare(struct snd_pcm_substream *substream, + struct vm_area_struct *area) +{ + +} + +static const struct hdac_io_ops axi_hda_io_ops = { + .reg_writel = axi_azx_writel, + .reg_readl = axi_azx_readl, + .reg_writew = axi_azx_writew, + .reg_readw = axi_azx_readw, + .reg_writeb = axi_azx_writeb, + .reg_readb = axi_azx_readb, + .dma_alloc_pages = dma_alloc_pages, + .dma_free_pages = dma_free_pages, +}; + +static const struct hda_controller_ops axi_hda_ops = { + .substream_alloc_pages = substream_alloc_pages, + .substream_free_pages = substream_free_pages, + .pcm_mmap_prepare = pcm_mmap_prepare, + .position_check = azx_position_check, + .link_power = azx_ft_link_power, +}; + +static int hda_ft_probe(struct platform_device *pdev) +{ + const unsigned int driver_flags = AZX_DCAPS_SYNC_WRITE | AZX_DRIVER_FT; + static int dev; + struct snd_card *card; + struct hda_ft *hda; + struct azx *chip; + bool schedule_probe; + int err; + + if (dev >= SNDRV_CARDS) + return -ENODEV; + if (!enable[dev]) { + dev++; + return -ENOENT; + } + + err = snd_card_new(&pdev->dev, index[dev], id[dev], THIS_MODULE, + 0, &card); + if (err < 0) { + dev_err(&pdev->dev, "Error creating card!\n"); + return err; + } + + err = hda_ft_create(card, pdev,dev, driver_flags, &chip); + if (err < 0) + goto out_free; + card->private_data = chip; + hda = container_of(chip, struct hda_ft, chip); + + dev_set_drvdata(&pdev->dev, card); + + schedule_probe = !chip->disabled; + + if (schedule_probe) + schedule_work(&hda->probe_work); + + dev++; + if (chip->disabled) + complete_all(&hda->probe_wait); + return 0; + +out_free: + snd_card_free(card); + return err; +} + +/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ +static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = { + [AZX_DRIVER_FT] = 4, +}; + +static int azx_probe_continue(struct azx *chip) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + struct device *hddev = hda->dev; + int dev = chip->dev_index; + int err; + struct hdac_bus *bus = azx_bus(chip); + + hda->probe_continued = 1; + + err = azx_first_init(chip); + if (err < 0) + goto out_free; + +#ifdef CONFIG_SND_HDA_INPUT_BEEP + chip->beep_mode = beep_mode[dev]; +#endif + + /* create codec instances */ + err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]); + if (err < 0) + goto out_free; + + if ((probe_only[dev] & 1) == 0) { + err = azx_codec_configure(chip); + if (err < 0) + goto out_free; + } + + err = snd_card_register(chip->card); + if (err < 0) + goto out_free; + + chip->running = 1; + azx_add_card_list(chip); + snd_hda_set_power_save(&chip->bus, power_save * 1000); + + if (azx_has_pm_runtime(chip)) + pm_runtime_put_noidle(hddev); + return err; + +out_free: + free_irq(bus->irq, (void*)chip); + return err; +} + +static int hda_ft_remove(struct platform_device *pdev) +{ + struct snd_card *card = dev_get_drvdata(&pdev->dev); + struct azx *chip; + struct hda_ft *hda; + + if (card) { + /* cancel the pending probing work */ + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + cancel_work_sync(&hda->probe_work); + + snd_card_free(card); + return 0; + } + return 0; +} + +static void hda_ft_shutdown(struct platform_device *pdev) +{ + struct snd_card *card = dev_get_drvdata(&pdev->dev); + struct azx *chip; + + if (!card) + return; + chip = card->private_data; + if (chip && chip->running) + azx_stop_chip(chip); +} + +static const struct of_device_id hda_ft_of_match[] = { + { .compatible = "phytium,hda" }, + {}, +}; +MODULE_DEVICE_TABLE(of, hda_ft_of_match); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id hda_ft_acpi_match[] = { + { .id = "PHYT0006" }, + {} +}; +MODULE_DEVICE_TABLE(acpi, hda_ft_acpi_match); +#else +#define hda_ft_acpi_match NULL +#endif + +static struct platform_driver ft_platform_hda = { + .driver = { + .name = "ft-hda", + .pm = hda_ft_pm, + .of_match_table = hda_ft_of_match, + .acpi_match_table = hda_ft_acpi_match, + }, + .probe = hda_ft_probe, + .remove = hda_ft_remove, + .shutdown = hda_ft_shutdown, +}; + +module_platform_driver(ft_platform_hda); + +MODULE_DESCRIPTION("FT HDA bus driver"); +MODULE_LICENSE("GPL v2"); diff --git a/sound/pci/hda/hda_phytium.h b/sound/pci/hda/hda_phytium.h new file mode 100644 index 000000000000..23412cb4c711 --- /dev/null +++ b/sound/pci/hda/hda_phytium.h @@ -0,0 +1,51 @@ +/* + * hda_ft.h - Implementation of primary alsa driver code base + * for Intel HD Audio of Phytium. + * + * Copyright(c) 2018 Phytium Corporation. All rights reserved. + * + * Copyright(c) 2018 Leo Hou + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef __SOUND_HDA_PHYTIUM_H__ +#define __SOUND_HDA_PHYTIUM_H__ + +#include "hda_controller.h" + +struct hda_ft { + struct azx chip; + struct snd_pcm_substream *substream; + struct device *dev; + void __iomem *regs; + + /* for pending irqs */ + struct work_struct irq_pending_work; + + /* sync probing */ + struct completion probe_wait; + struct work_struct probe_work; + + /* card list (for power_save trigger) */ + struct list_head list; + + /* extra flags */ + unsigned int irq_pending_warned:1; + unsigned int probe_continued:1; + +}; + +#endif diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig index 1cf11cf51e1d..32296faf39c5 100644 --- a/sound/soc/Kconfig +++ b/sound/soc/Kconfig @@ -59,6 +59,7 @@ source "sound/soc/intel/Kconfig" source "sound/soc/mediatek/Kconfig" source "sound/soc/meson/Kconfig" source "sound/soc/mxs/Kconfig" +source "sound/soc/phytium/Kconfig" source "sound/soc/pxa/Kconfig" source "sound/soc/qcom/Kconfig" source "sound/soc/rockchip/Kconfig" diff --git a/sound/soc/Makefile b/sound/soc/Makefile index 62a5f87c3cfc..2cdfbe1db8c2 100644 --- a/sound/soc/Makefile +++ b/sound/soc/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_SND_SOC) += mxs/ obj-$(CONFIG_SND_SOC) += nuc900/ obj-$(CONFIG_SND_SOC) += omap/ obj-$(CONFIG_SND_SOC) += kirkwood/ +obj-$(CONFIG_SND_SOC) += phytium/ obj-$(CONFIG_SND_SOC) += pxa/ obj-$(CONFIG_SND_SOC) += qcom/ obj-$(CONFIG_SND_SOC) += rockchip/ diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index efb095dbcd71..75e8f6389967 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -606,6 +606,15 @@ config SND_SOC_ES8328_SPI depends on SPI_MASTER select SND_SOC_ES8328 +config SND_SOC_ES8336 + tristate "Everest Semi ES8336 CODEC" + depends on I2C + select GPIO_PHYTIUM_PCI + +config SND_SOC_ES8388 + tristate "Everest Semi ES8388 CODEC" + depends on I2C + config SND_SOC_GTM601 tristate 'GTM601 UMTS modem audio codec' diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index 7ae7c85e8219..1ba362d2dd65 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -76,6 +76,8 @@ snd-soc-es8316-objs := es8316.o snd-soc-es8328-objs := es8328.o snd-soc-es8328-i2c-objs := es8328-i2c.o snd-soc-es8328-spi-objs := es8328-spi.o +snd-soc-es8336-objs := es8336.o +snd-soc-es8388-objs := es8388.o snd-soc-gtm601-objs := gtm601.o snd-soc-hdac-hdmi-objs := hdac_hdmi.o snd-soc-ics43432-objs := ics43432.o @@ -336,6 +338,8 @@ obj-$(CONFIG_SND_SOC_ES8316) += snd-soc-es8316.o obj-$(CONFIG_SND_SOC_ES8328) += snd-soc-es8328.o obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o obj-$(CONFIG_SND_SOC_ES8328_SPI)+= snd-soc-es8328-spi.o +obj-$(CONFIG_SND_SOC_ES8336)+= snd-soc-es8336.o +obj-$(CONFIG_SND_SOC_ES8388) += snd-soc-es8388.o obj-$(CONFIG_SND_SOC_GTM601) += snd-soc-gtm601.o obj-$(CONFIG_SND_SOC_HDAC_HDMI) += snd-soc-hdac-hdmi.o obj-$(CONFIG_SND_SOC_ICS43432) += snd-soc-ics43432.o diff --git a/sound/soc/codecs/es8336.c b/sound/soc/codecs/es8336.c new file mode 100644 index 000000000000..c381f4fc4df4 --- /dev/null +++ b/sound/soc/codecs/es8336.c @@ -0,0 +1,1093 @@ +/* + * es8336.c -- es8336 ALSA SoC audio driver + * Copyright Everest Semiconductor Co.,Ltd + * Phytium Information Technology Co.,Ltd + * + * Author: David Yang + * Yiqun Zhang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "es8336.h" + +#define INVALID_GPIO -1 +#define GPIO_LOW 0 +#define GPIO_HIGH 1 + +static struct snd_soc_component *es8336_component; + +static const struct reg_default es8336_reg_defaults[] = { + {0x00, 0x03}, {0x01, 0x03}, {0x02, 0x00}, {0x03, 0x20}, + {0x04, 0x11}, {0x05, 0x00}, {0x06, 0x11}, {0x07, 0x00}, + {0x08, 0x00}, {0x09, 0x01}, {0x0a, 0x00}, {0x0b, 0x00}, + {0x0c, 0xf8}, {0x0d, 0x3f}, {0x0e, 0x00}, {0x0f, 0x00}, + {0x10, 0x01}, {0x11, 0xfc}, {0x12, 0x28}, {0x13, 0x00}, + {0x14, 0x00}, {0x15, 0x33}, {0x16, 0x00}, {0x17, 0x00}, + {0x18, 0x88}, {0x19, 0x06}, {0x1a, 0x22}, {0x1b, 0x03}, + {0x1c, 0x0f}, {0x1d, 0x00}, {0x1e, 0x80}, {0x1f, 0x80}, + {0x20, 0x00}, {0x21, 0x00}, {0x22, 0xc0}, {0x23, 0x00}, + {0x24, 0x01}, {0x25, 0x08}, {0x26, 0x10}, {0x27, 0xc0}, + {0x28, 0x00}, {0x29, 0x1c}, {0x2a, 0x00}, {0x2b, 0xb0}, + {0x2c, 0x32}, {0x2d, 0x03}, {0x2e, 0x00}, {0x2f, 0x11}, + {0x30, 0x10}, {0x31, 0x00}, {0x32, 0x00}, {0x33, 0xc0}, + {0x34, 0xc0}, {0x35, 0x1f}, {0x36, 0xf7}, {0x37, 0xfd}, + {0x38, 0xff}, {0x39, 0x1f}, {0x3a, 0xf7}, {0x3b, 0xfd}, + {0x3c, 0xff}, {0x3d, 0x1f}, {0x3e, 0xf7}, {0x3f, 0xfd}, + {0x40, 0xff}, {0x41, 0x1f}, {0x42, 0xf7}, {0x43, 0xfd}, + {0x44, 0xff}, {0x45, 0x1f}, {0x46, 0xf7}, {0x47, 0xfd}, + {0x48, 0xff}, {0x49, 0x1f}, {0x4a, 0xf7}, {0x4b, 0xfd}, + {0x4c, 0xff}, {0x4d, 0x00}, {0x4e, 0x00}, {0x4f, 0xff}, + {0x50, 0x00}, {0x51, 0x00}, {0x52, 0x00}, {0x53, 0x00}, +}; + +/* codec private data */ +struct es8336_priv { + struct regmap *regmap; + unsigned int dmic_amic; + unsigned int sysclk; + struct snd_pcm_hw_constraint_list *sysclk_constraints; + struct clk *mclk; + int debounce_time; + int hp_det_invert; + struct delayed_work work; + + int spk_ctl_gpio; + int hp_det_gpio; + bool muted; + bool hp_inserted; + bool spk_active_level; + + int pwr_count; +}; + +/* + * es8336_reset + * write value 0xff to reg0x00, the chip will be in reset mode + * then, writer 0x00 to reg0x00, unreset the chip + */ +static int es8336_reset(struct snd_soc_component *component) +{ + snd_soc_component_write(component, ES8336_RESET_REG00, 0x3F); + usleep_range(5000, 5500); + return snd_soc_component_write(component, ES8336_RESET_REG00, 0x03); +} + +static void es8336_enable_spk(struct es8336_priv *es8336, bool enable) +{ + bool level; + + level = enable ? es8336->spk_active_level : !es8336->spk_active_level; + gpio_set_value(es8336->spk_ctl_gpio, level); +} + +static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -9600, 50, 1); +static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -9600, 50, 1); +static const DECLARE_TLV_DB_SCALE(hpmixer_gain_tlv, -1200, 150, 0); +static const DECLARE_TLV_DB_SCALE(mic_bst_tlv, 0, 1200, 0); + +static unsigned int linin_pga_tlv[] = { + TLV_DB_RANGE_HEAD(9), + 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0), + 1, 1, TLV_DB_SCALE_ITEM(300, 0, 0), + 2, 2, TLV_DB_SCALE_ITEM(600, 0, 0), + 3, 3, TLV_DB_SCALE_ITEM(900, 0, 0), + 4, 4, TLV_DB_SCALE_ITEM(1200, 0, 0), + 5, 5, TLV_DB_SCALE_ITEM(1500, 0, 0), + 6, 6, TLV_DB_SCALE_ITEM(1800, 0, 0), + 7, 7, TLV_DB_SCALE_ITEM(2100, 0, 0), + 8, 8, TLV_DB_SCALE_ITEM(2400, 0, 0), +}; + +static unsigned int hpout_vol_tlv[] = { + TLV_DB_RANGE_HEAD(1), + 0, 3, TLV_DB_SCALE_ITEM(-4800, 1200, 0), +}; + +static const char *const alc_func_txt[] = { "Off", "On" }; + +static const struct soc_enum alc_func = + SOC_ENUM_SINGLE(ES8336_ADC_ALC1_REG29, 6, 2, alc_func_txt); + +static const char *const ng_type_txt[] = { + "Constant PGA Gain", "Mute ADC Output" }; + +static const struct soc_enum ng_type = + SOC_ENUM_SINGLE(ES8336_ADC_ALC6_REG2E, 6, 2, ng_type_txt); + +static const char *const adcpol_txt[] = { "Normal", "Invert" }; + +static const struct soc_enum adcpol = + SOC_ENUM_SINGLE(ES8336_ADC_MUTE_REG26, 1, 2, adcpol_txt); + +static const char *const dacpol_txt[] = { + "Normal", "R Invert", "L Invert", "L + R Invert" }; + +static const struct soc_enum dacpol = + SOC_ENUM_SINGLE(ES8336_DAC_SET1_REG30, 0, 4, dacpol_txt); + +static const struct snd_kcontrol_new es8336_snd_controls[] = { + /* HP OUT VOLUME */ + SOC_DOUBLE_TLV("HP Playback Volume", ES8336_CPHP_ICAL_VOL_REG18, + 4, 0, 4, 1, hpout_vol_tlv), + /* HPMIXER VOLUME Control */ + SOC_DOUBLE_TLV("HPMixer Gain", ES8336_HPMIX_VOL_REG16, + 0, 4, 7, 0, hpmixer_gain_tlv), + + /* DAC Digital controls */ + SOC_DOUBLE_R_TLV("DAC Playback Volume", ES8336_DAC_VOLL_REG33, + ES8336_DAC_VOLR_REG34, 0, 0xC0, 1, dac_vol_tlv), + + SOC_SINGLE("Enable DAC Soft Ramp", ES8336_DAC_SET1_REG30, 4, 1, 1), + SOC_SINGLE("DAC Soft Ramp Rate", ES8336_DAC_SET1_REG30, 2, 4, 0), + + SOC_ENUM("Playback Polarity", dacpol), + SOC_SINGLE("DAC Notch Filter", ES8336_DAC_SET2_REG31, 6, 1, 0), + SOC_SINGLE("DAC Double Fs Mode", ES8336_DAC_SET2_REG31, 7, 1, 0), + SOC_SINGLE("DAC Volume Control-LeR", ES8336_DAC_SET2_REG31, 2, 1, 0), + SOC_SINGLE("DAC Stereo Enhancement", ES8336_DAC_SET3_REG32, 0, 7, 0), + + /* +20dB D2SE PGA Control */ + SOC_SINGLE_TLV("MIC Boost", ES8336_ADC_D2SEPGA_REG24, + 0, 1, 0, mic_bst_tlv), + /* 0-+24dB Lineinput PGA Control */ + SOC_SINGLE_TLV("Input PGA", ES8336_ADC_PGAGAIN_REG23, + 4, 8, 0, linin_pga_tlv), +}; + +/* Analog Input MUX */ +static const char * const es8336_analog_in_txt[] = { + "lin1-rin1", + "lin2-rin2", + "lin1-rin1 with 20db Boost", + "lin2-rin2 with 20db Boost" +}; + +static const unsigned int es8336_analog_in_values[] = { 0, 1, 2, 3 }; + +static const struct soc_enum es8336_analog_input_enum = + SOC_VALUE_ENUM_SINGLE(ES8336_ADC_PDN_LINSEL_REG22, 4, 3, + ARRAY_SIZE(es8336_analog_in_txt), + es8336_analog_in_txt, + es8336_analog_in_values); + +static const struct snd_kcontrol_new es8336_analog_in_mux_controls = + SOC_DAPM_ENUM("Route", es8336_analog_input_enum); + +/* Dmic MUX */ +static const char * const es8336_dmic_txt[] = { + "dmic disable", + "dmic data at high level", + "dmic data at low level", +}; + +static const unsigned int es8336_dmic_values[] = { 0, 2, 3 }; + +static const struct soc_enum es8336_dmic_src_enum = + SOC_VALUE_ENUM_SINGLE(ES8336_ADC_DMIC_REG25, 0, 3, + ARRAY_SIZE(es8336_dmic_txt), + es8336_dmic_txt, + es8336_dmic_values); + +static const struct snd_kcontrol_new es8336_dmic_src_controls = + SOC_DAPM_ENUM("Route", es8336_dmic_src_enum); + +/* hp mixer mux */ +static const char *const es8336_hpmux_texts[] = { + "lin1-rin1", + "lin2-rin2", + "lin-rin with Boost", + "lin-rin with Boost and PGA" +}; + +static const unsigned int es8336_hpmux_values[] = { 0, 1, 2, 3 }; + +static const struct soc_enum es8336_left_hpmux_enum = + SOC_VALUE_ENUM_SINGLE(ES8336_HPMIX_SEL_REG13, 4, 7, + ARRAY_SIZE(es8336_hpmux_texts), + es8336_hpmux_texts, + es8336_hpmux_values); + +static const struct snd_kcontrol_new es8336_left_hpmux_controls = + SOC_DAPM_ENUM("Route", es8336_left_hpmux_enum); + +static const struct soc_enum es8336_right_hpmux_enum = + SOC_VALUE_ENUM_SINGLE(ES8336_HPMIX_SEL_REG13, 0, 7, + ARRAY_SIZE(es8336_hpmux_texts), + es8336_hpmux_texts, + es8336_hpmux_values); + +static const struct snd_kcontrol_new es8336_right_hpmux_controls = + SOC_DAPM_ENUM("Route", es8336_right_hpmux_enum); + +/* headphone Output Mixer */ +static const struct snd_kcontrol_new es8336_out_left_mix[] = { + SOC_DAPM_SINGLE("LLIN Switch", ES8336_HPMIX_SWITCH_REG14, + 6, 1, 0), + SOC_DAPM_SINGLE("Left DAC Switch", ES8336_HPMIX_SWITCH_REG14, + 7, 1, 0), +}; + +static const struct snd_kcontrol_new es8336_out_right_mix[] = { + SOC_DAPM_SINGLE("RLIN Switch", ES8336_HPMIX_SWITCH_REG14, + 2, 1, 0), + SOC_DAPM_SINGLE("Right DAC Switch", ES8336_HPMIX_SWITCH_REG14, + 3, 1, 0), +}; + +/* DAC data source mux */ +static const char * const es8336_dacsrc_texts[] = { + "LDATA TO LDAC, RDATA TO RDAC", + "LDATA TO LDAC, LDATA TO RDAC", + "RDATA TO LDAC, RDATA TO RDAC", + "RDATA TO LDAC, LDATA TO RDAC", +}; + +static const unsigned int es8336_dacsrc_values[] = { 0, 1, 2, 3 }; + +static const struct soc_enum es8336_dacsrc_mux_enum = + SOC_VALUE_ENUM_SINGLE(ES8336_DAC_SET1_REG30, 6, 4, + ARRAY_SIZE(es8336_dacsrc_texts), + es8336_dacsrc_texts, + es8336_dacsrc_values); +static const struct snd_kcontrol_new es8336_dacsrc_mux_controls = + SOC_DAPM_ENUM("Route", es8336_dacsrc_mux_enum); + +static const struct snd_soc_dapm_widget es8336_dapm_widgets[] = { + /* Input Lines */ + SND_SOC_DAPM_INPUT("DMIC"), + SND_SOC_DAPM_INPUT("MIC1"), + SND_SOC_DAPM_INPUT("MIC2"), + + SND_SOC_DAPM_MICBIAS("micbias", SND_SOC_NOPM, + 0, 0), + /* Input MUX */ + SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0, + &es8336_analog_in_mux_controls), + + SND_SOC_DAPM_PGA("Line input PGA", ES8336_ADC_PDN_LINSEL_REG22, + 7, 1, NULL, 0), + + /* ADCs */ + SND_SOC_DAPM_ADC("Mono ADC", NULL, ES8336_ADC_PDN_LINSEL_REG22, 6, 1), + + /* Dmic MUX */ + SND_SOC_DAPM_MUX("Digital Mic Mux", SND_SOC_NOPM, 0, 0, + &es8336_dmic_src_controls), + + /* Digital Interface */ + SND_SOC_DAPM_AIF_OUT("I2S OUT", "I2S1 Capture", 1, + ES8336_SDP_ADCFMT_REG0A, 6, 1), + + SND_SOC_DAPM_AIF_IN("I2S IN", "I2S1 Playback", 0, + SND_SOC_NOPM, 0, 0), + + /* DACs DATA SRC MUX */ + SND_SOC_DAPM_MUX("DAC SRC Mux", SND_SOC_NOPM, 0, 0, + &es8336_dacsrc_mux_controls), + /* DACs */ + SND_SOC_DAPM_DAC("Right DAC", NULL, ES8336_DAC_PDN_REG2F, 0, 1), + SND_SOC_DAPM_DAC("Left DAC", NULL, ES8336_DAC_PDN_REG2F, 4, 1), + + /* Headphone Output Side */ + /* hpmux for hp mixer */ + SND_SOC_DAPM_MUX("Left Hp mux", SND_SOC_NOPM, 0, 0, + &es8336_left_hpmux_controls), + SND_SOC_DAPM_MUX("Right Hp mux", SND_SOC_NOPM, 0, 0, + &es8336_right_hpmux_controls), + /* Output mixer */ + SND_SOC_DAPM_MIXER("Left Hp mixer", ES8336_HPMIX_PDN_REG15, + 4, 1, &es8336_out_left_mix[0], + ARRAY_SIZE(es8336_out_left_mix)), + SND_SOC_DAPM_MIXER("Right Hp mixer", ES8336_HPMIX_PDN_REG15, + 0, 1, &es8336_out_right_mix[0], + ARRAY_SIZE(es8336_out_right_mix)), + SND_SOC_DAPM_MIXER("Left Hp mixer", SND_SOC_NOPM, + 4, 1, &es8336_out_left_mix[0], + ARRAY_SIZE(es8336_out_left_mix)), + SND_SOC_DAPM_MIXER("Right Hp mixer", SND_SOC_NOPM, + 0, 1, &es8336_out_right_mix[0], + ARRAY_SIZE(es8336_out_right_mix)), + + /* Output charge pump */ + SND_SOC_DAPM_PGA("HPCP L", ES8336_CPHP_OUTEN_REG17, + 6, 0, NULL, 0), + SND_SOC_DAPM_PGA("HPCP R", ES8336_CPHP_OUTEN_REG17, + 2, 0, NULL, 0), + + /* Output Driver */ + SND_SOC_DAPM_PGA("HPVOL L", ES8336_CPHP_OUTEN_REG17, + 5, 0, NULL, 0), + SND_SOC_DAPM_PGA("HPVOL R", ES8336_CPHP_OUTEN_REG17, + 1, 0, NULL, 0), + /* Output Lines */ + SND_SOC_DAPM_OUTPUT("HPOL"), + SND_SOC_DAPM_OUTPUT("HPOR"), +}; + +static const struct snd_soc_dapm_route es8336_dapm_routes[] = { + /* + * record route map + */ + {"MIC1", NULL, "micbias"}, + {"MIC2", NULL, "micbias"}, + {"DMIC", NULL, "micbias"}, + + {"Differential Mux", "lin1-rin1", "MIC1"}, + {"Differential Mux", "lin2-rin2", "MIC2"}, + {"Differential Mux", "lin1-rin1 with 20db Boost", "MIC1"}, + {"Differential Mux", "lin2-rin2 with 20db Boost", "MIC2"}, + {"Line input PGA", NULL, "Differential Mux"}, + + {"Mono ADC", NULL, "Line input PGA"}, + + {"Digital Mic Mux", "dmic disable", "Mono ADC"}, + {"Digital Mic Mux", "dmic data at high level", "DMIC"}, + {"Digital Mic Mux", "dmic data at low level", "DMIC"}, + + {"I2S OUT", NULL, "Digital Mic Mux"}, + /* + * playback route map + */ + {"DAC SRC Mux", "LDATA TO LDAC, RDATA TO RDAC", "I2S IN"}, + {"DAC SRC Mux", "LDATA TO LDAC, LDATA TO RDAC", "I2S IN"}, + {"DAC SRC Mux", "RDATA TO LDAC, RDATA TO RDAC", "I2S IN"}, + {"DAC SRC Mux", "RDATA TO LDAC, LDATA TO RDAC", "I2S IN"}, + + {"Left DAC", NULL, "DAC SRC Mux"}, + {"Right DAC", NULL, "DAC SRC Mux"}, + + {"Left Hp mux", "lin1-rin1", "MIC1"}, + {"Left Hp mux", "lin2-rin2", "MIC2"}, + {"Left Hp mux", "lin-rin with Boost", "Differential Mux"}, + {"Left Hp mux", "lin-rin with Boost and PGA", "Line input PGA"}, + + {"Right Hp mux", "lin1-rin1", "MIC1"}, + {"Right Hp mux", "lin2-rin2", "MIC2"}, + {"Right Hp mux", "lin-rin with Boost", "Differential Mux"}, + {"Right Hp mux", "lin-rin with Boost and PGA", "Line input PGA"}, + + {"Left Hp mixer", "LLIN Switch", "Left Hp mux"}, + {"Left Hp mixer", "Left DAC Switch", "Left DAC"}, + + {"Right Hp mixer", "RLIN Switch", "Right Hp mux"}, + {"Right Hp mixer", "Right DAC Switch", "Right DAC"}, + + {"HPCP L", NULL, "Left Hp mixer"}, + {"HPCP R", NULL, "Right Hp mixer"}, + + {"HPVOL L", NULL, "HPCP L"}, + {"HPVOL R", NULL, "HPCP R"}, + + {"HPOL", NULL, "HPVOL L"}, + {"HPOR", NULL, "HPVOL R"}, +}; + + +/* The set of rates we can generate from the above for each SYSCLK */ + +static unsigned int rates_12288[] = { + 8000, 12000, 16000, 24000, 24000, 32000, 48000, 96000, +}; + +static struct snd_pcm_hw_constraint_list constraints_12288 = { + .count = ARRAY_SIZE(rates_12288), + .list = rates_12288, +}; + +static unsigned int rates_112896[] = { + 8000, 11025, 22050, 44100, +}; + +static struct snd_pcm_hw_constraint_list constraints_112896 = { + .count = ARRAY_SIZE(rates_112896), + .list = rates_112896, +}; + +static unsigned int rates_12[] = { + 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, + 48000, 88235, 96000, +}; + +static struct snd_pcm_hw_constraint_list constraints_12 = { + .count = ARRAY_SIZE(rates_12), + .list = rates_12, +}; + +/* + * Note that this should be called from init rather than from hw_params. + */ +static int es8336_set_dai_sysclk(struct snd_soc_dai *codec_dai, + int clk_id, unsigned int freq, int dir) +{ + struct snd_soc_component *component = codec_dai->component; + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + + switch (freq) { + case 11289600: + case 18432000: + case 22579200: + case 36864000: + es8336->sysclk_constraints = &constraints_112896; + es8336->sysclk = freq; + return 0; + case 12288000: + case 19200000: + case 16934400: + case 24576000: + case 33868800: + es8336->sysclk_constraints = &constraints_12288; + es8336->sysclk = freq; + return 0; + case 12000000: + case 24000000: + es8336->sysclk_constraints = &constraints_12; + es8336->sysclk = freq; + return 0; + } + + return 0; +} + +static int es8336_set_dai_fmt(struct snd_soc_dai *codec_dai, + unsigned int fmt) +{ + struct snd_soc_component *component = codec_dai->component; + u8 iface = 0; + u8 adciface = 0; + u8 daciface = 0; + + iface = snd_soc_component_read32(component, ES8336_IFACE); + adciface = snd_soc_component_read32(component, ES8336_ADC_IFACE); + daciface = snd_soc_component_read32(component, ES8336_DAC_IFACE); + + /* set master/slave audio interface */ + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBM_CFM: + iface |= 0x80; + break; + case SND_SOC_DAIFMT_CBS_CFS: + iface &= 0x7F; + break; + default: + return -EINVAL; + } + + /* interface format */ + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_I2S: + adciface &= 0xFC; + daciface &= 0xFC; + break; + case SND_SOC_DAIFMT_RIGHT_J: + return -EINVAL; + case SND_SOC_DAIFMT_LEFT_J: + adciface &= 0xFC; + daciface &= 0xFC; + adciface |= 0x01; + daciface |= 0x01; + break; + case SND_SOC_DAIFMT_DSP_A: + adciface &= 0xDC; + daciface &= 0xDC; + adciface |= 0x03; + daciface |= 0x03; + break; + case SND_SOC_DAIFMT_DSP_B: + adciface &= 0xDC; + daciface &= 0xDC; + adciface |= 0x23; + daciface |= 0x23; + break; + default: + return -EINVAL; + } + + /* clock inversion */ + switch (fmt & SND_SOC_DAIFMT_INV_MASK) { + case SND_SOC_DAIFMT_NB_NF: + iface &= 0xDF; + adciface &= 0xDF; + daciface &= 0xDF; + break; + case SND_SOC_DAIFMT_IB_IF: + iface |= 0x20; + adciface |= 0x20; + daciface |= 0x20; + break; + case SND_SOC_DAIFMT_IB_NF: + iface |= 0x20; + adciface &= 0xDF; + daciface &= 0xDF; + break; + case SND_SOC_DAIFMT_NB_IF: + iface &= 0xDF; + adciface |= 0x20; + daciface |= 0x20; + break; + default: + return -EINVAL; + } + snd_soc_component_write(component, ES8336_IFACE, iface); + snd_soc_component_write(component, ES8336_ADC_IFACE, adciface); + snd_soc_component_write(component, ES8336_DAC_IFACE, daciface); + return 0; +} + +static int es8336_pcm_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + bool playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); + + snd_soc_component_write(component, ES8336_RESET_REG00, 0xC0); + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x00); + /* es8336: both playback and capture need dac mclk */ + snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, + ES8336_CLKMGR_MCLK_DIV_MASK | + ES8336_CLKMGR_DAC_MCLK_MASK, + ES8336_CLKMGR_MCLK_DIV_NML | + ES8336_CLKMGR_DAC_MCLK_EN); + es8336->pwr_count++; + + if (playback) { + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0x3F); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0x1F); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x88); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0xBB); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x10); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x30); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x02); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x00); + snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x66); + snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, + ES8336_CLKMGR_DAC_MCLK_MASK | + ES8336_CLKMGR_DAC_ANALOG_MASK, + ES8336_CLKMGR_DAC_MCLK_EN | + ES8336_CLKMGR_DAC_ANALOG_EN); + msleep(50); + } else { + snd_soc_component_update_bits(component, + ES8336_ADC_PDN_LINSEL_REG22, 0xC0, 0x20); + snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, + ES8336_CLKMGR_ADC_MCLK_MASK | + ES8336_CLKMGR_ADC_ANALOG_MASK, + ES8336_CLKMGR_ADC_MCLK_EN | + ES8336_CLKMGR_ADC_ANALOG_EN); + } + + return 0; +} + +static void es8336_pcm_shutdown(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + bool playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); + + if (playback) { + snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x00); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x03); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x22); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x06); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x33); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0x00); + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x00); + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0xFF); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0xFF); + snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, + ES8336_CLKMGR_DAC_ANALOG_MASK, + ES8336_CLKMGR_DAC_ANALOG_DIS); + } else { + snd_soc_component_write(component, ES8336_ADC_PDN_LINSEL_REG22, 0xc0); + snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, + ES8336_CLKMGR_ADC_MCLK_MASK | + ES8336_CLKMGR_ADC_ANALOG_MASK, + ES8336_CLKMGR_ADC_MCLK_DIS | + ES8336_CLKMGR_ADC_ANALOG_DIS); + } + + if (--es8336->pwr_count == 0) { + if (!es8336->hp_inserted) + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x3F); + snd_soc_component_write(component, ES8336_CLKMGR_CLKSW_REG01, 0xF3); + } +} + + +static int es8336_pcm_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + int val = 0; + + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S16_LE: + val = ES8336_DACWL_16; + break; + case SNDRV_PCM_FORMAT_S20_3LE: + val = ES8336_DACWL_20; + break; + case SNDRV_PCM_FORMAT_S24_LE: + val = ES8336_DACWL_24; + break; + case SNDRV_PCM_FORMAT_S32_LE: + val = ES8336_DACWL_32; + break; + default: + val = ES8336_DACWL_16; + break; + } + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + snd_soc_component_update_bits(component, ES8336_SDP_DACFMT_REG0B, + ES8336_DACWL_MASK, val); + else + snd_soc_component_update_bits(component, ES8336_SDP_ADCFMT_REG0A, + ES8336_ADCWL_MASK, val); + + return 0; +} + +static int es8336_mute(struct snd_soc_dai *dai, int mute) +{ + struct snd_soc_component *component = dai->component; + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + + es8336->muted = mute; + if (mute) { + es8336_enable_spk(es8336, false); + msleep(100); + snd_soc_component_write(component, ES8336_DAC_SET1_REG30, 0x20); + } else if (dai->playback_active) { + snd_soc_component_write(component, ES8336_DAC_SET1_REG30, 0x00); + msleep(130); + if (!es8336->hp_inserted) + es8336_enable_spk(es8336, true); + } + return 0; +} + +static int es8336_set_bias_level(struct snd_soc_component *component, + enum snd_soc_bias_level level) +{ + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + + switch (level) { + case SND_SOC_BIAS_ON: + break; + + case SND_SOC_BIAS_PREPARE: + break; + + case SND_SOC_BIAS_STANDBY: + break; + + case SND_SOC_BIAS_OFF: + snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x00); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x03); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x22); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x06); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x33); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0x00); + snd_soc_component_write(component, ES8336_ADC_PDN_LINSEL_REG22, 0xC0); + if (!es8336->hp_inserted) + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x3F); + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0x3F); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0x1F); + snd_soc_component_write(component, ES8336_RESET_REG00, 0x00); + break; + } + + return 0; +} + +#define es8336_RATES SNDRV_PCM_RATE_8000_96000 + +#define es8336_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ + SNDRV_PCM_FMTBIT_S24_LE) + +static const struct snd_soc_dai_ops es8336_ops = { + .startup = es8336_pcm_startup, + .hw_params = es8336_pcm_hw_params, + .set_fmt = es8336_set_dai_fmt, + .set_sysclk = es8336_set_dai_sysclk, + .digital_mute = es8336_mute, + .shutdown = es8336_pcm_shutdown, +}; + +static struct snd_soc_dai_driver es8336_dai = { + .name = "es8336-hifi", + .playback = { + .stream_name = "Playback", + .channels_min = 1, + .channels_max = 2, + .rates = es8336_RATES, + .formats = es8336_FORMATS, + }, + .capture = { + .stream_name = "Capture", + .channels_min = 1, + .channels_max = 2, + .rates = es8336_RATES, + .formats = es8336_FORMATS, + }, + .ops = &es8336_ops, + .symmetric_rates = 1, +}; + +static int es8336_init_regs(struct snd_soc_component *component) +{ + snd_soc_component_write(component, ES8336_RESET_REG00, 0x3f); + usleep_range(5000, 5500); + snd_soc_component_write(component, ES8336_RESET_REG00, 0x00); + snd_soc_component_write(component, ES8336_SYS_VMIDSEL_REG0C, 0xFF); + msleep(30); + snd_soc_component_write(component, ES8336_CLKMGR_CLKSEL_REG02, 0x08); + snd_soc_component_write(component, ES8336_CLKMGR_ADCOSR_REG03, 0x20); + snd_soc_component_write(component, ES8336_CLKMGR_ADCDIV1_REG04, 0x11); + snd_soc_component_write(component, ES8336_CLKMGR_ADCDIV2_REG05, 0x00); + snd_soc_component_write(component, ES8336_CLKMGR_DACDIV1_REG06, 0x11); + snd_soc_component_write(component, ES8336_CLKMGR_DACDIV2_REG07, 0x00); + snd_soc_component_write(component, ES8336_CLKMGR_CPDIV_REG08, 0x00); + snd_soc_component_write(component, ES8336_SDP_MS_BCKDIV_REG09, 0x04); + snd_soc_component_write(component, ES8336_CLKMGR_CLKSW_REG01, 0x7F); + snd_soc_component_write(component, ES8336_CAL_TYPE_REG1C, 0x0F); + snd_soc_component_write(component, ES8336_CAL_HPLIV_REG1E, 0x90); + snd_soc_component_write(component, ES8336_CAL_HPRIV_REG1F, 0x90); + snd_soc_component_write(component, ES8336_ADC_VOLUME_REG27, 0x00); + snd_soc_component_write(component, ES8336_ADC_PDN_LINSEL_REG22, 0xc0); + snd_soc_component_write(component, ES8336_ADC_D2SEPGA_REG24, 0x00); + snd_soc_component_write(component, ES8336_ADC_DMIC_REG25, 0x08); + snd_soc_component_write(component, ES8336_DAC_SET2_REG31, 0x20); + snd_soc_component_write(component, ES8336_DAC_SET3_REG32, 0x00); + snd_soc_component_write(component, ES8336_DAC_VOLL_REG33, 0x00); + snd_soc_component_write(component, ES8336_DAC_VOLR_REG34, 0x00); + snd_soc_component_write(component, ES8336_SDP_ADCFMT_REG0A, 0x00); + snd_soc_component_write(component, ES8336_SDP_DACFMT_REG0B, 0x00); + snd_soc_component_write(component, ES8336_SYS_VMIDLOW_REG10, 0x11); + snd_soc_component_write(component, ES8336_SYS_VSEL_REG11, 0xFC); + snd_soc_component_write(component, ES8336_SYS_REF_REG12, 0x28); + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0x04); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0x0C); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); + snd_soc_component_write(component, ES8336_HPMIX_SEL_REG13, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x88); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0xBB); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x10); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x30); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x02); + snd_soc_component_write(component, ES8336_CPHP_ICAL_VOL_REG18, 0x00); + snd_soc_component_write(component, ES8336_GPIO_SEL_REG4D, 0x02); + snd_soc_component_write(component, ES8336_GPIO_DEBUNCE_INT_REG4E, 0x02); + snd_soc_component_write(component, ES8336_TESTMODE_REG50, 0xA0); + snd_soc_component_write(component, ES8336_TEST1_REG51, 0x00); + snd_soc_component_write(component, ES8336_TEST2_REG52, 0x00); + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x00); + snd_soc_component_write(component, ES8336_RESET_REG00, 0xC0); + msleep(50); + snd_soc_component_write(component, ES8336_ADC_PGAGAIN_REG23, 0x60); + snd_soc_component_write(component, ES8336_ADC_D2SEPGA_REG24, 0x01); + /* adc ds mode, HPF enable */ + snd_soc_component_write(component, ES8336_ADC_DMIC_REG25, 0x08); + snd_soc_component_write(component, ES8336_ADC_ALC1_REG29, 0xcd); + snd_soc_component_write(component, ES8336_ADC_ALC2_REG2A, 0x08); + snd_soc_component_write(component, ES8336_ADC_ALC3_REG2B, 0xa0); + snd_soc_component_write(component, ES8336_ADC_ALC4_REG2C, 0x05); + snd_soc_component_write(component, ES8336_ADC_ALC5_REG2D, 0x06); + snd_soc_component_write(component, ES8336_ADC_ALC6_REG2E, 0x61); + return 0; +} + +static int es8336_suspend(struct snd_soc_component *component) +{ + return 0; +} + +static int es8336_resume(struct snd_soc_component *component) +{ + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + int ret; + + es8336_reset(component); /* UPDATED BY DAVID,15-3-5 */ + ret = snd_soc_component_read32(component, ES8336_CLKMGR_ADCDIV2_REG05); + if (!ret) { + es8336_init_regs(component); + snd_soc_component_write(component, ES8336_GPIO_SEL_REG4D, 0x02); + /* max debance time, enable interrupt, low active */ + snd_soc_component_write(component, ES8336_GPIO_DEBUNCE_INT_REG4E, 0xf3); + /* es8336_set_bias_level(component, SND_SOC_BIAS_OFF); */ + snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x00); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x03); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x22); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x06); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x33); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0x00); + if (!es8336->hp_inserted) + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x3F); + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0xFF); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0xFF); + snd_soc_component_write(component, ES8336_CLKMGR_CLKSW_REG01, 0xF3); + snd_soc_component_write(component, ES8336_ADC_PDN_LINSEL_REG22, 0xc0); + } + return 0; +} + +static irqreturn_t es8336_irq_handler(int irq, void *data) +{ + struct es8336_priv *es8336 = data; + + queue_delayed_work(system_power_efficient_wq, &es8336->work, + msecs_to_jiffies(es8336->debounce_time)); + + return IRQ_HANDLED; +} + +static void hp_work(struct work_struct *work) +{ + struct es8336_priv *es8336; + int enable; + + es8336 = container_of(work, struct es8336_priv, work.work); + enable = gpio_get_value(es8336->hp_det_gpio); + if (es8336->hp_det_invert) + enable = !enable; + + es8336->hp_inserted = enable ? true : false; + if (!es8336->muted) { + if (es8336->hp_inserted) + es8336_enable_spk(es8336, false); + else + es8336_enable_spk(es8336, true); + } +} + +static int es8336_probe(struct snd_soc_component *component) +{ + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + int ret = 0; + + es8336_component = component; + ret = snd_soc_component_read32(component, ES8336_CLKMGR_ADCDIV2_REG05); + if (!ret) { + es8336_reset(component); /* UPDATED BY DAVID,15-3-5 */ + ret = snd_soc_component_read32(component, ES8336_CLKMGR_ADCDIV2_REG05); + if (!ret) { + es8336_init_regs(component); + snd_soc_component_write(component, ES8336_GPIO_SEL_REG4D, 0x02); + /* max debance time, enable interrupt, low active */ + snd_soc_component_write(component, + ES8336_GPIO_DEBUNCE_INT_REG4E, 0xf3); + + /* es8336_set_bias_level(codec, SND_SOC_BIAS_OFF); */ + snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x00); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x03); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x22); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x06); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x33); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0x00); + if (!es8336->hp_inserted) + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, + 0x3F); + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0xFF); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0xFF); + snd_soc_component_write(component, ES8336_CLKMGR_CLKSW_REG01, 0xF3); + snd_soc_component_write(component, + ES8336_ADC_PDN_LINSEL_REG22, 0xc0); + } + } + + return ret; +} + +static void es8336_remove(struct snd_soc_component *component) +{ + es8336_set_bias_level(component, SND_SOC_BIAS_OFF); +} + +const struct regmap_config es8336_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = ES8336_TEST3_REG53, + .cache_type = REGCACHE_RBTREE, + .reg_defaults = es8336_reg_defaults, + .num_reg_defaults = ARRAY_SIZE(es8336_reg_defaults), +}; + +static const struct snd_soc_component_driver soc_component_dev_es8336 = { + .probe = es8336_probe, + .remove = es8336_remove, + .suspend = es8336_suspend, + .resume = es8336_resume, + .set_bias_level = es8336_set_bias_level, + + .controls = es8336_snd_controls, + .num_controls = ARRAY_SIZE(es8336_snd_controls), + .dapm_widgets = es8336_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(es8336_dapm_widgets), + .dapm_routes = es8336_dapm_routes, + .num_dapm_routes = ARRAY_SIZE(es8336_dapm_routes), +}; + +static int es8336_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct es8336_priv *es8336; + struct gpio_desc *gpiod; + int ret = -1; + int hp_irq; + + es8336 = devm_kzalloc(&i2c->dev, sizeof(*es8336), GFP_KERNEL); + if (!es8336) + return -ENOMEM; + + es8336->debounce_time = 200; + es8336->hp_det_invert = 0; + es8336->pwr_count = 0; + es8336->hp_inserted = false; + es8336->muted = true; + + es8336->regmap = devm_regmap_init_i2c(i2c, &es8336_regmap_config); + if (IS_ERR(es8336->regmap)) { + ret = PTR_ERR(es8336->regmap); + dev_err(&i2c->dev, "Failed to init regmap: %d\n", ret); + return ret; + } + + i2c_set_clientdata(i2c, es8336); + + gpiod = devm_gpiod_get_index_optional(&i2c->dev, "sel", 0, + GPIOD_OUT_HIGH); + + if (!gpiod) { + dev_info(&i2c->dev, "Can not get spk_ctl_gpio\n"); + es8336->spk_ctl_gpio = INVALID_GPIO; + } else { + es8336->spk_ctl_gpio = desc_to_gpio(gpiod); + es8336->spk_active_level = 0; + es8336_enable_spk(es8336, false); + } + + gpiod = devm_gpiod_get_index_optional(&i2c->dev, "det", 0, + GPIOD_IN); + + if (!gpiod) { + dev_info(&i2c->dev, "Can not get hp_det_gpio\n"); + es8336->hp_det_gpio = INVALID_GPIO; + } else { + es8336->hp_det_gpio = desc_to_gpio(gpiod); + INIT_DELAYED_WORK(&es8336->work, hp_work); + es8336->hp_det_invert = 0; + hp_irq = gpio_to_irq(es8336->hp_det_gpio); + ret = devm_request_threaded_irq(&i2c->dev, hp_irq, NULL, + es8336_irq_handler, + IRQF_TRIGGER_FALLING | + IRQF_TRIGGER_RISING | + IRQF_ONESHOT, + "es8336_interrupt", es8336); + if (ret < 0) { + dev_err(&i2c->dev, "request_irq failed: %d\n", ret); + return ret; + } + + schedule_delayed_work(&es8336->work, + msecs_to_jiffies(es8336->debounce_time)); + } + + ret = snd_soc_register_component(&i2c->dev, + &soc_component_dev_es8336, + &es8336_dai, 1); + + return ret; +} + +static int es8336_i2c_remove(struct i2c_client *client) +{ + kfree(i2c_get_clientdata(client)); + return 0; +} + +static void es8336_i2c_shutdown(struct i2c_client *client) +{ + struct es8336_priv *es8336 = i2c_get_clientdata(client); + + if (es8336_component != NULL) { + es8336_enable_spk(es8336, false); + msleep(20); + es8336_set_bias_level(es8336_component, SND_SOC_BIAS_OFF); + } +} + +static const struct i2c_device_id es8336_i2c_id[] = { + {"es8336", 0}, + {"10ES8336:00", 0}, + {"10ES8336", 0}, + { } +}; +MODULE_DEVICE_TABLE(i2c, es8336_i2c_id); + +static const struct of_device_id es8336_of_match[] = { + { .compatible = "everest,es8336", }, + { } +}; +MODULE_DEVICE_TABLE(of, es8336_of_match); + +static const struct acpi_device_id es8336_acpi_match[] = { + { "ESSX8336", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, es8336_acpi_match); + +static struct i2c_driver es8336_i2c_driver = { + .driver = { + .name = "es8336", + .of_match_table = es8336_of_match, + .acpi_match_table = es8336_acpi_match, + }, + .probe = es8336_i2c_probe, + .remove = es8336_i2c_remove, + .shutdown = es8336_i2c_shutdown, + .id_table = es8336_i2c_id, +}; + +module_i2c_driver(es8336_i2c_driver); +MODULE_DESCRIPTION("ASoC es8336 driver"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/es8336.h b/sound/soc/codecs/es8336.h new file mode 100644 index 000000000000..d2c74c11ffd1 --- /dev/null +++ b/sound/soc/codecs/es8336.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright Everest Semiconductor Co.,Ltd + * Phytium Information Technology Co.,Ltd + * + * Author: David Yang + * Yiqun Zhang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef _ES8336_H +#define _ES8336_H + +/* ES8336 register space */ +/* + * RESET Control + */ +#define ES8336_RESET_REG00 0x00 +/* + * Clock Managerment + */ +#define ES8336_CLKMGR_CLKSW_REG01 0x01 +#define ES8336_CLKMGR_CLKSEL_REG02 0x02 +#define ES8336_CLKMGR_ADCOSR_REG03 0x03 +#define ES8336_CLKMGR_ADCDIV1_REG04 0x04 +#define ES8336_CLKMGR_ADCDIV2_REG05 0x05 +#define ES8336_CLKMGR_DACDIV1_REG06 0x06 +#define ES8336_CLKMGR_DACDIV2_REG07 0x07 +#define ES8336_CLKMGR_CPDIV_REG08 0x08 +/* + * SDP Control + */ +#define ES8336_SDP_MS_BCKDIV_REG09 0x09 +#define ES8336_SDP_ADCFMT_REG0A 0x0a +#define ES8336_SDP_DACFMT_REG0B 0x0b +/* + * System Control + */ +#define ES8336_SYS_VMIDSEL_REG0C 0x0c +#define ES8336_SYS_PDN_REG0D 0x0d +#define ES8336_SYS_LP1_REG0E 0x0e +#define ES8336_SYS_LP2_REG0F 0x0f +#define ES8336_SYS_VMIDLOW_REG10 0x10 +#define ES8336_SYS_VSEL_REG11 0x11 +#define ES8336_SYS_REF_REG12 0x12 +/* + * HP Mixer + */ +#define ES8336_HPMIX_SEL_REG13 0x13 +#define ES8336_HPMIX_SWITCH_REG14 0x14 +#define ES8336_HPMIX_PDN_REG15 0x15 +#define ES8336_HPMIX_VOL_REG16 0x16 +/* + * Charge Pump Headphone driver + */ +#define ES8336_CPHP_OUTEN_REG17 0x17 +#define ES8336_CPHP_ICAL_VOL_REG18 0x18 +#define ES8336_CPHP_PDN1_REG19 0x19 +#define ES8336_CPHP_PDN2_REG1A 0x1a +#define ES8336_CPHP_LDOCTL_REG1B 0x1b +/* + * Calibration + */ +#define ES8336_CAL_TYPE_REG1C 0x1c +#define ES8336_CAL_SET_REG1D 0x1d +#define ES8336_CAL_HPLIV_REG1E 0x1e +#define ES8336_CAL_HPRIV_REG1F 0x1f +#define ES8336_CAL_HPLMV_REG20 0x20 +#define ES8336_CAL_HPRMV_REG21 0x21 +/* + * ADC Control + */ +#define ES8336_ADC_PDN_LINSEL_REG22 0x22 +#define ES8336_ADC_PGAGAIN_REG23 0x23 +#define ES8336_ADC_D2SEPGA_REG24 0x24 +#define ES8336_ADC_DMIC_REG25 0x25 +#define ES8336_ADC_MUTE_REG26 0x26 +#define ES8336_ADC_VOLUME_REG27 0x27 +#define ES8336_ADC_ALC1_REG29 0x29 +#define ES8336_ADC_ALC2_REG2A 0x2a +#define ES8336_ADC_ALC3_REG2B 0x2b +#define ES8336_ADC_ALC4_REG2C 0x2c +#define ES8336_ADC_ALC5_REG2D 0x2d +#define ES8336_ADC_ALC6_REG2E 0x2e +/* + * DAC Control + */ +#define ES8336_DAC_PDN_REG2F 0x2f +#define ES8336_DAC_SET1_REG30 0x30 +#define ES8336_DAC_SET2_REG31 0x31 +#define ES8336_DAC_SET3_REG32 0x32 +#define ES8336_DAC_VOLL_REG33 0x33 +#define ES8336_DAC_VOLR_REG34 0x34 +/* + * GPIO + */ +#define ES8336_GPIO_SEL_REG4D 0x4D +#define ES8336_GPIO_DEBUNCE_INT_REG4E 0x4E +#define ES8336_GPIO_FLAG 0x4F +/* + * TEST MODE + */ +#define ES8336_TESTMODE_REG50 0x50 +#define ES8336_TEST1_REG51 0x51 +#define ES8336_TEST2_REG52 0x52 +#define ES8336_TEST3_REG53 0x53 + +#define ES8336_IFACE ES8336_SDP_MS_BCKDIV_REG09 +#define ES8336_ADC_IFACE ES8336_SDP_ADCFMT_REG0A +#define ES8336_DAC_IFACE ES8336_SDP_DACFMT_REG0B + +#define ES8336_REGNUM 84 + +/* REGISTER 0X01 CLOCK MANAGER */ +#define ES8336_CLKMGR_MCLK_DIV_MASK (0X1<<7) +#define ES8336_CLKMGR_MCLK_DIV_NML (0X0<<7) +#define ES8336_CLKMGR_MCLK_DIV_1 (0X1<<7) +#define ES8336_CLKMGR_ADC_MCLK_MASK (0X1<<3) +#define ES8336_CLKMGR_ADC_MCLK_EN (0X1<<3) +#define ES8336_CLKMGR_ADC_MCLK_DIS (0X0<<3) +#define ES8336_CLKMGR_DAC_MCLK_MASK (0X1<<2) +#define ES8336_CLKMGR_DAC_MCLK_EN (0X1<<2) +#define ES8336_CLKMGR_DAC_MCLK_DIS (0X0<<2) +#define ES8336_CLKMGR_ADC_ANALOG_MASK (0X1<<1) +#define ES8336_CLKMGR_ADC_ANALOG_EN (0X1<<1) +#define ES8336_CLKMGR_ADC_ANALOG_DIS (0X0<<1) +#define ES8336_CLKMGR_DAC_ANALOG_MASK (0X1<<0) +#define ES8336_CLKMGR_DAC_ANALOG_EN (0X1<<0) +#define ES8336_CLKMGR_DAC_ANALOG_DIS (0X0<<0) + +/* REGISTER 0X0A */ +#define ES8336_ADCWL_MASK (0x7 << 2) +#define ES8336_ADCWL_32 (0x4 << 2) +#define ES8336_ADCWL_24 (0x0 << 2) +#define ES8336_ADCWL_20 (0x1 << 2) +#define ES8336_ADCWL_18 (0x2 << 2) +#define ES8336_ADCWL_16 (0x3 << 2) +#define ES8336_ADCFMT_MASK (0x3 << 0) +#define ES8336_ADCFMT_I2S (0x0 << 0) +#define ES8336_ADCWL_LEFT (0x1 << 0) +#define ES8336_ADCWL_RIGHT (0x2 << 0) +#define ES8336_ADCWL_PCM (0x3 << 0) + +/* REGISTER 0X0B */ +#define ES8336_DACWL_MASK (0x7 << 2) +#define ES8336_DACWL_32 (0x4 << 2) +#define ES8336_DACWL_24 (0x0 << 2) +#define ES8336_DACWL_20 (0x1 << 2) +#define ES8336_DACWL_18 (0x2 << 2) +#define ES8336_DACWL_16 (0x3 << 2) +#define ES8336_DACFMT_MASK (0x3 << 0) +#define ES8336_DACFMT_I2S (0x0 << 0) +#define ES8336_DACWL_LEFT (0x1 << 0) +#define ES8336_DACWL_RIGHT (0x2 << 0) +#define ES8336_DACWL_PCM (0x3 << 0) + +#endif diff --git a/sound/soc/codecs/es8388.c b/sound/soc/codecs/es8388.c new file mode 100644 index 000000000000..c4a4d19219b4 --- /dev/null +++ b/sound/soc/codecs/es8388.c @@ -0,0 +1,819 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * es8388.c -- ES8388 ALSA SoC Audio driver + * + * Copyright 2021 Phytium Technology + * Author: Yiqun Zhang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "es8388.h" +#include +#include + +static const unsigned int rates_12288[] = { + 8000, 12000, 16000, 24000, 32000, 48000, 96000, +}; + +static const int ratios_12288[] = { + 10, 7, 6, 4, 3, 2, 0, +}; + +static const struct snd_pcm_hw_constraint_list constraints_12288 = { + .count = ARRAY_SIZE(rates_12288), + .list = rates_12288, +}; + +static const unsigned int rates_11289[] = { + 8018, 11025, 22050, 44100, 88200, +}; + +static const int ratios_11289[] = { + 9, 7, 4, 2, 0, +}; + +static const struct snd_pcm_hw_constraint_list constraints_11289 = { + .count = ARRAY_SIZE(rates_11289), + .list = rates_11289, +}; + +#define ES8388_RATES (SNDRV_PCM_RATE_192000 | \ + SNDRV_PCM_RATE_96000 | \ + SNDRV_PCM_RATE_88200 | \ + SNDRV_PCM_RATE_8000_48000) +#define ES8388_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \ + SNDRV_PCM_FMTBIT_S18_3LE | \ + SNDRV_PCM_FMTBIT_S20_3LE | \ + SNDRV_PCM_FMTBIT_S24_LE | \ + SNDRV_PCM_FMTBIT_S32_LE) + +struct es8388_priv { + struct regmap *regmap; + struct clk *clk; + int playback_fs; + bool deemph; + int mclkdiv2; + const struct snd_pcm_hw_constraint_list *sysclk_constraints; + const int *mclk_ratios; + bool master; +}; + +/* + * ES8388 Controls + */ +static const char * const adcpol_txt[] = {"Normal", "L Invert", "R Invert", + "L + R Invert"}; +static SOC_ENUM_SINGLE_DECL(adcpol, + ES8388_ADCCONTROL6, 6, adcpol_txt); + +static const DECLARE_TLV_DB_SCALE(play_tlv, -3000, 100, 0); +static const DECLARE_TLV_DB_SCALE(dac_adc_tlv, -9600, 50, 0); +static const DECLARE_TLV_DB_SCALE(pga_tlv, 0, 300, 0); +static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0); +static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 300, 0); + +static const struct { + int rate; + unsigned int val; +} deemph_settings[] = { + { 0, ES8388_DACCONTROL6_DEEMPH_OFF }, + { 32000, ES8388_DACCONTROL6_DEEMPH_32k }, + { 44100, ES8388_DACCONTROL6_DEEMPH_44_1k }, + { 48000, ES8388_DACCONTROL6_DEEMPH_48k }, +}; + +static int es8388_set_deemph(struct snd_soc_component *component) +{ + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + int val, i, best; + + /* + * If we're using deemphasis select the nearest available sample + * rate. + */ + if (es8388->deemph) { + best = 0; + for (i = 1; i < ARRAY_SIZE(deemph_settings); i++) { + if (abs(deemph_settings[i].rate - es8388->playback_fs) < + abs(deemph_settings[best].rate - es8388->playback_fs)) + best = i; + } + + val = deemph_settings[best].val; + } else { + val = ES8388_DACCONTROL6_DEEMPH_OFF; + } + + dev_dbg(component->dev, "Set deemphasis %d\n", val); + + return snd_soc_component_update_bits(component, ES8388_DACCONTROL6, + ES8388_DACCONTROL6_DEEMPH_MASK, val); +} + +static int es8388_get_deemph(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + + ucontrol->value.integer.value[0] = es8388->deemph; + return 0; +} + +static int es8388_put_deemph(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + unsigned int deemph = ucontrol->value.integer.value[0]; + int ret; + + if (deemph > 1) + return -EINVAL; + + ret = es8388_set_deemph(component); + if (ret < 0) + return ret; + + es8388->deemph = deemph; + + return 0; +} + +static const struct snd_kcontrol_new es8388_snd_controls[] = { + SOC_DOUBLE_R_TLV("Capture Digital Volume", + ES8388_ADCCONTROL8, ES8388_ADCCONTROL9, + 0, 0xc0, 1, dac_adc_tlv), + + SOC_SINGLE_BOOL_EXT("DAC Deemphasis Switch", 0, + es8388_get_deemph, es8388_put_deemph), + + SOC_ENUM("Capture Polarity", adcpol), + + SOC_SINGLE_TLV("Left Mixer Left Bypass Volume", + ES8388_DACCONTROL17, 3, 7, 1, bypass_tlv), + SOC_SINGLE_TLV("Left Mixer Right Bypass Volume", + ES8388_DACCONTROL19, 3, 7, 1, bypass_tlv), + SOC_SINGLE_TLV("Right Mixer Left Bypass Volume", + ES8388_DACCONTROL18, 3, 7, 1, bypass_tlv), + SOC_SINGLE_TLV("Right Mixer Right Bypass Volume", + ES8388_DACCONTROL20, 3, 7, 1, bypass_tlv), + + SOC_DOUBLE_R_TLV("PCM Volume", + ES8388_LDACVOL, ES8388_RDACVOL, + 0, ES8388_DACVOL_MAX, 1, dac_adc_tlv), + + SOC_DOUBLE_R_TLV("Output 1 Playback Volume", + ES8388_LOUT1VOL, ES8388_ROUT1VOL, + 0, ES8388_OUT1VOL_MAX, 0, play_tlv), + + SOC_DOUBLE_R_TLV("Output 2 Playback Volume", + ES8388_LOUT2VOL, ES8388_ROUT2VOL, + 0, ES8388_OUT2VOL_MAX, 0, play_tlv), + + SOC_DOUBLE_TLV("Mic PGA Volume", ES8388_ADCCONTROL1, + 4, 0, 8, 0, mic_tlv), +}; + +/* + * DAPM Controls + */ +static const char * const es8388_line_texts[] = { + "Line 1", "Line 2", "PGA", "Differential"}; + +static const struct soc_enum es8388_lline_enum = + SOC_ENUM_SINGLE(ES8388_DACCONTROL16, 3, + ARRAY_SIZE(es8388_line_texts), + es8388_line_texts); +static const struct snd_kcontrol_new es8388_left_line_controls = + SOC_DAPM_ENUM("Route", es8388_lline_enum); + +static const struct soc_enum es8388_rline_enum = + SOC_ENUM_SINGLE(ES8388_DACCONTROL16, 0, + ARRAY_SIZE(es8388_line_texts), + es8388_line_texts); +static const struct snd_kcontrol_new es8388_right_line_controls = + SOC_DAPM_ENUM("Route", es8388_lline_enum); + +/* Left Mixer */ +static const struct snd_kcontrol_new es8388_left_mixer_controls[] = { + SOC_DAPM_SINGLE("Playback Switch", ES8388_DACCONTROL17, 7, 1, 0), + SOC_DAPM_SINGLE("Left Bypass Switch", ES8388_DACCONTROL17, 6, 1, 0), + SOC_DAPM_SINGLE("Right Playback Switch", ES8388_DACCONTROL18, 7, 1, 0), + SOC_DAPM_SINGLE("Right Bypass Switch", ES8388_DACCONTROL18, 6, 1, 0), +}; + +/* Right Mixer */ +static const struct snd_kcontrol_new es8388_right_mixer_controls[] = { + SOC_DAPM_SINGLE("Left Playback Switch", ES8388_DACCONTROL19, 7, 1, 0), + SOC_DAPM_SINGLE("Left Bypass Switch", ES8388_DACCONTROL19, 6, 1, 0), + SOC_DAPM_SINGLE("Playback Switch", ES8388_DACCONTROL20, 7, 1, 0), + SOC_DAPM_SINGLE("Right Bypass Switch", ES8388_DACCONTROL20, 6, 1, 0), +}; + +static const char * const es8388_pga_sel[] = { + "Line 1", "Line 2", "Line 3", "Differential"}; + +/* Left PGA Mux */ +static const struct soc_enum es8388_lpga_enum = + SOC_ENUM_SINGLE(ES8388_ADCCONTROL2, 6, + ARRAY_SIZE(es8388_pga_sel), + es8388_pga_sel); +static const struct snd_kcontrol_new es8388_left_pga_controls = + SOC_DAPM_ENUM("Route", es8388_lpga_enum); + +/* Right PGA Mux */ +static const struct soc_enum es8388_rpga_enum = + SOC_ENUM_SINGLE(ES8388_ADCCONTROL2, 4, + ARRAY_SIZE(es8388_pga_sel), + es8388_pga_sel); +static const struct snd_kcontrol_new es8388_right_pga_controls = + SOC_DAPM_ENUM("Route", es8388_rpga_enum); + +/* Differential Mux */ +static const char * const es8388_diff_sel[] = {"Line 1", "Line 2"}; +static SOC_ENUM_SINGLE_DECL(diffmux, + ES8388_ADCCONTROL3, 7, es8388_diff_sel); +static const struct snd_kcontrol_new es8388_diffmux_controls = + SOC_DAPM_ENUM("Route", diffmux); + +/* Mono ADC Mux */ +static const char * const es8388_mono_mux[] = {"Stereo", "Mono (Left)", + "Mono (Right)", "Digital Mono"}; +static SOC_ENUM_SINGLE_DECL(monomux, + ES8388_ADCCONTROL3, 3, es8388_mono_mux); +static const struct snd_kcontrol_new es8388_monomux_controls = + SOC_DAPM_ENUM("Route", monomux); + +static const struct snd_soc_dapm_widget es8388_dapm_widgets[] = { + SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0, + &es8388_diffmux_controls), + SND_SOC_DAPM_MUX("Left ADC Mux", SND_SOC_NOPM, 0, 0, + &es8388_monomux_controls), + SND_SOC_DAPM_MUX("Right ADC Mux", SND_SOC_NOPM, 0, 0, + &es8388_monomux_controls), + + SND_SOC_DAPM_MUX("Left PGA Mux", ES8388_ADCPOWER, + ES8388_ADCPOWER_AINL_OFF, 1, + &es8388_left_pga_controls), + SND_SOC_DAPM_MUX("Right PGA Mux", ES8388_ADCPOWER, + ES8388_ADCPOWER_AINR_OFF, 1, + &es8388_right_pga_controls), + + SND_SOC_DAPM_MUX("Left Line Mux", SND_SOC_NOPM, 0, 0, + &es8388_left_line_controls), + SND_SOC_DAPM_MUX("Right Line Mux", SND_SOC_NOPM, 0, 0, + &es8388_right_line_controls), + + SND_SOC_DAPM_ADC("Right ADC", "Right Capture", ES8388_ADCPOWER, + ES8388_ADCPOWER_ADCR_OFF, 1), + SND_SOC_DAPM_ADC("Left ADC", "Left Capture", ES8388_ADCPOWER, + ES8388_ADCPOWER_ADCL_OFF, 1), + + SND_SOC_DAPM_SUPPLY("DAC STM", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_DACSTM_RESET, 1, NULL, 0), + SND_SOC_DAPM_SUPPLY("ADC STM", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_ADCSTM_RESET, 1, NULL, 0), + + SND_SOC_DAPM_SUPPLY("DAC DIG", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_DACDIG_OFF, 1, NULL, 0), + SND_SOC_DAPM_SUPPLY("ADC DIG", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_ADCDIG_OFF, 1, NULL, 0), + + SND_SOC_DAPM_SUPPLY("DAC DLL", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_DACDLL_OFF, 1, NULL, 0), + SND_SOC_DAPM_SUPPLY("ADC DLL", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_ADCDLL_OFF, 1, NULL, 0), + + SND_SOC_DAPM_SUPPLY("ADC Vref", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_ADCVREF_OFF, 1, NULL, 0), + SND_SOC_DAPM_SUPPLY("DAC Vref", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_DACVREF_OFF, 1, NULL, 0), + + SND_SOC_DAPM_DAC("Right DAC", "Right Playback", ES8388_DACPOWER, + ES8388_DACPOWER_RDAC_OFF, 1), + SND_SOC_DAPM_DAC("Left DAC", "Left Playback", ES8388_DACPOWER, + ES8388_DACPOWER_LDAC_OFF, 1), + + SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0, + &es8388_left_mixer_controls[0], + ARRAY_SIZE(es8388_left_mixer_controls)), + SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0, + &es8388_right_mixer_controls[0], + ARRAY_SIZE(es8388_right_mixer_controls)), + + SND_SOC_DAPM_PGA("Right Out 2", ES8388_DACPOWER, + ES8388_DACPOWER_ROUT2_ON, 0, NULL, 0), + SND_SOC_DAPM_PGA("Left Out 2", ES8388_DACPOWER, + ES8388_DACPOWER_LOUT2_ON, 0, NULL, 0), + SND_SOC_DAPM_PGA("Right Out 1", ES8388_DACPOWER, + ES8388_DACPOWER_ROUT1_ON, 0, NULL, 0), + SND_SOC_DAPM_PGA("Left Out 1", ES8388_DACPOWER, + ES8388_DACPOWER_LOUT1_ON, 0, NULL, 0), + + SND_SOC_DAPM_OUTPUT("LOUT1"), + SND_SOC_DAPM_OUTPUT("ROUT1"), + SND_SOC_DAPM_OUTPUT("LOUT2"), + SND_SOC_DAPM_OUTPUT("ROUT2"), + + SND_SOC_DAPM_INPUT("LINPUT1"), + SND_SOC_DAPM_INPUT("LINPUT2"), + SND_SOC_DAPM_INPUT("RINPUT1"), + SND_SOC_DAPM_INPUT("RINPUT2"), +}; + +static const struct snd_soc_dapm_route es8388_dapm_routes[] = { + { "Left Line Mux", "Line 1", "LINPUT1" }, + { "Left Line Mux", "Line 2", "LINPUT2" }, + { "Left Line Mux", "PGA", "Left PGA Mux" }, + { "Left Line Mux", "Differential", "Differential Mux" }, + + { "Right Line Mux", "Line 1", "RINPUT1" }, + { "Right Line Mux", "Line 2", "RINPUT2" }, + { "Right Line Mux", "PGA", "Right PGA Mux" }, + { "Right Line Mux", "Differential", "Differential Mux" }, + + { "Left PGA Mux", "Line 1", "LINPUT1" }, + { "Left PGA Mux", "Line 2", "LINPUT2" }, + { "Left PGA Mux", "Differential", "Differential Mux" }, + + { "Right PGA Mux", "Line 1", "RINPUT1" }, + { "Right PGA Mux", "Line 2", "RINPUT2" }, + { "Right PGA Mux", "Differential", "Differential Mux" }, + + { "Differential Mux", "Line 1", "LINPUT1" }, + { "Differential Mux", "Line 1", "RINPUT1" }, + { "Differential Mux", "Line 2", "LINPUT2" }, + { "Differential Mux", "Line 2", "RINPUT2" }, + + { "Left ADC Mux", "Stereo", "Left PGA Mux" }, + { "Left ADC Mux", "Mono (Left)", "Left PGA Mux" }, + { "Left ADC Mux", "Digital Mono", "Left PGA Mux" }, + + { "Right ADC Mux", "Stereo", "Right PGA Mux" }, + { "Right ADC Mux", "Mono (Right)", "Right PGA Mux" }, + { "Right ADC Mux", "Digital Mono", "Right PGA Mux" }, + + { "Left ADC", NULL, "Left ADC Mux" }, + { "Right ADC", NULL, "Right ADC Mux" }, + + { "ADC DIG", NULL, "ADC STM" }, + { "ADC DIG", NULL, "ADC Vref" }, + { "ADC DIG", NULL, "ADC DLL" }, + + { "Left ADC", NULL, "ADC DIG" }, + { "Right ADC", NULL, "ADC DIG" }, + + { "Left Line Mux", "Line 1", "LINPUT1" }, + { "Left Line Mux", "Line 2", "LINPUT2" }, + { "Left Line Mux", "PGA", "Left PGA Mux" }, + { "Left Line Mux", "Differential", "Differential Mux" }, + + { "Right Line Mux", "Line 1", "RINPUT1" }, + { "Right Line Mux", "Line 2", "RINPUT2" }, + { "Right Line Mux", "PGA", "Right PGA Mux" }, + { "Right Line Mux", "Differential", "Differential Mux" }, + + { "Left Out 1", NULL, "Left DAC" }, + { "Right Out 1", NULL, "Right DAC" }, + { "Left Out 2", NULL, "Left DAC" }, + { "Right Out 2", NULL, "Right DAC" }, + + { "Left Mixer", "Playback Switch", "Left DAC" }, + { "Left Mixer", "Left Bypass Switch", "Left Line Mux" }, + { "Left Mixer", "Right Playback Switch", "Right DAC" }, + { "Left Mixer", "Right Bypass Switch", "Right Line Mux" }, + + { "Right Mixer", "Left Playback Switch", "Left DAC" }, + { "Right Mixer", "Left Bypass Switch", "Left Line Mux" }, + { "Right Mixer", "Playback Switch", "Right DAC" }, + { "Right Mixer", "Right Bypass Switch", "Right Line Mux" }, + + { "DAC DIG", NULL, "DAC STM" }, + { "DAC DIG", NULL, "DAC Vref" }, + { "DAC DIG", NULL, "DAC DLL" }, + + { "Left DAC", NULL, "DAC DIG" }, + { "Right DAC", NULL, "DAC DIG" }, + + { "Left Out 1", NULL, "Left Mixer" }, + { "LOUT1", NULL, "Left Out 1" }, + { "Right Out 1", NULL, "Right Mixer" }, + { "ROUT1", NULL, "Right Out 1" }, + + { "Left Out 2", NULL, "Left Mixer" }, + { "LOUT2", NULL, "Left Out 2" }, + { "Right Out 2", NULL, "Right Mixer" }, + { "ROUT2", NULL, "Right Out 2" }, +}; + +static int es8388_mute(struct snd_soc_dai *dai, int mute) +{ + return snd_soc_component_update_bits(dai->component, ES8388_DACCONTROL3, + ES8388_DACCONTROL3_DACMUTE, + mute ? ES8388_DACCONTROL3_DACMUTE : 0); +} + +static int es8388_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + + if (es8388->master && es8388->sysclk_constraints) + snd_pcm_hw_constraint_list(substream->runtime, 0, + SNDRV_PCM_HW_PARAM_RATE, + es8388->sysclk_constraints); + + return 0; +} + +static int es8388_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + int i; + int reg; + int wl; + int ratio; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + reg = ES8388_DACCONTROL2; + else + reg = ES8388_ADCCONTROL5; + + if (es8388->master) { + if (!es8388->sysclk_constraints) { + dev_err(component->dev, "No MCLK configured\n"); + return -EINVAL; + } + + for (i = 0; i < es8388->sysclk_constraints->count; i++) + if (es8388->sysclk_constraints->list[i] == + params_rate(params)) + break; + + if (i == es8388->sysclk_constraints->count) { + dev_err(component->dev, + "LRCLK %d unsupported with current clock\n", + params_rate(params)); + return -EINVAL; + } + ratio = es8388->mclk_ratios[i]; + } else { + ratio = 0; + es8388->mclkdiv2 = 0; + } + + snd_soc_component_update_bits(component, ES8388_MASTERMODE, + ES8388_MASTERMODE_MCLKDIV2, + es8388->mclkdiv2 ? ES8388_MASTERMODE_MCLKDIV2 : 0); + + switch (params_width(params)) { + case 16: + wl = 3; + break; + case 18: + wl = 2; + break; + case 20: + wl = 1; + break; + case 24: + wl = 0; + break; + case 32: + wl = 4; + break; + default: + return -EINVAL; + } + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + snd_soc_component_update_bits(component, ES8388_DACCONTROL1, + ES8388_DACCONTROL1_DACWL_MASK, + wl << ES8388_DACCONTROL1_DACWL_SHIFT); + + es8388->playback_fs = params_rate(params); + es8388_set_deemph(component); + } else + snd_soc_component_update_bits(component, ES8388_ADCCONTROL4, + ES8388_ADCCONTROL4_ADCWL_MASK, + wl << ES8388_ADCCONTROL4_ADCWL_SHIFT); + + return snd_soc_component_update_bits(component, reg, ES8388_RATEMASK, ratio); +} + +static int es8388_set_sysclk(struct snd_soc_dai *codec_dai, + int clk_id, unsigned int freq, int dir) +{ + struct snd_soc_component *component = codec_dai->component; + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + int mclkdiv2 = 0; + + switch (freq) { + case 0: + es8388->sysclk_constraints = NULL; + es8388->mclk_ratios = NULL; + break; + case 22579200: + mclkdiv2 = 1; + /* fallthru */ + case 11289600: + es8388->sysclk_constraints = &constraints_11289; + es8388->mclk_ratios = ratios_11289; + break; + case 24576000: + mclkdiv2 = 1; + /* fallthru */ + case 12288000: + es8388->sysclk_constraints = &constraints_12288; + es8388->mclk_ratios = ratios_12288; + break; + default: + return -EINVAL; + } + + es8388->mclkdiv2 = mclkdiv2; + return 0; +} + +static int es8388_set_dai_fmt(struct snd_soc_dai *codec_dai, + unsigned int fmt) +{ + struct snd_soc_component *component = codec_dai->component; + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + u8 dac_mode = 0; + u8 adc_mode = 0; + + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBM_CFM: + /* Master serial port mode, with BCLK generated automatically */ + snd_soc_component_update_bits(component, ES8388_MASTERMODE, + ES8388_MASTERMODE_MSC, + ES8388_MASTERMODE_MSC); + es8388->master = true; + break; + case SND_SOC_DAIFMT_CBS_CFS: + /* Slave serial port mode */ + snd_soc_component_update_bits(component, ES8388_MASTERMODE, + ES8388_MASTERMODE_MSC, 0); + es8388->master = false; + break; + default: + return -EINVAL; + } + + /* interface format */ + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_I2S: + dac_mode |= ES8388_DACCONTROL1_DACFORMAT_I2S; + adc_mode |= ES8388_ADCCONTROL4_ADCFORMAT_I2S; + break; + case SND_SOC_DAIFMT_RIGHT_J: + dac_mode |= ES8388_DACCONTROL1_DACFORMAT_RJUST; + adc_mode |= ES8388_ADCCONTROL4_ADCFORMAT_RJUST; + break; + case SND_SOC_DAIFMT_LEFT_J: + dac_mode |= ES8388_DACCONTROL1_DACFORMAT_LJUST; + adc_mode |= ES8388_ADCCONTROL4_ADCFORMAT_LJUST; + break; + default: + return -EINVAL; + } + + /* clock inversion */ + if ((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_NB_NF) + return -EINVAL; + + snd_soc_component_update_bits(component, ES8388_DACCONTROL1, + ES8388_DACCONTROL1_DACFORMAT_MASK, dac_mode); + snd_soc_component_update_bits(component, ES8388_ADCCONTROL4, + ES8388_ADCCONTROL4_ADCFORMAT_MASK, adc_mode); + + return 0; +} + +static int es8388_set_bias_level(struct snd_soc_component *component, + enum snd_soc_bias_level level) +{ + switch (level) { + case SND_SOC_BIAS_ON: + break; + + case SND_SOC_BIAS_PREPARE: + /* VREF, VMID=2x50k, digital enabled */ + snd_soc_component_write(component, ES8388_CHIPPOWER, 0); + snd_soc_component_update_bits(component, ES8388_CONTROL1, + ES8388_CONTROL1_VMIDSEL_MASK | + ES8388_CONTROL1_ENREF, + ES8388_CONTROL1_VMIDSEL_50k | + ES8388_CONTROL1_ENREF); + break; + + case SND_SOC_BIAS_STANDBY: + if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) { + snd_soc_component_update_bits(component, ES8388_CONTROL1, + ES8388_CONTROL1_VMIDSEL_MASK | + ES8388_CONTROL1_ENREF, + ES8388_CONTROL1_VMIDSEL_5k | + ES8388_CONTROL1_ENREF); + + /* Charge caps */ + msleep(100); + } + + snd_soc_component_write(component, ES8388_CONTROL2, + ES8388_CONTROL2_OVERCURRENT_ON | + ES8388_CONTROL2_THERMAL_SHUTDOWN_ON); + + /* VREF, VMID=2*500k, digital stopped */ + snd_soc_component_update_bits(component, ES8388_CONTROL1, + ES8388_CONTROL1_VMIDSEL_MASK | + ES8388_CONTROL1_ENREF, + ES8388_CONTROL1_VMIDSEL_500k | + ES8388_CONTROL1_ENREF); + break; + + case SND_SOC_BIAS_OFF: + snd_soc_component_update_bits(component, ES8388_CONTROL1, + ES8388_CONTROL1_VMIDSEL_MASK | + ES8388_CONTROL1_ENREF, + 0); + break; + } + return 0; +} + +static const struct snd_soc_dai_ops es8388_dai_ops = { + .startup = es8388_startup, + .hw_params = es8388_hw_params, + .digital_mute = es8388_mute, + .set_sysclk = es8388_set_sysclk, + .set_fmt = es8388_set_dai_fmt, +}; + +static struct snd_soc_dai_driver es8388_dai = { + .name = "es8388-hifi", + .playback = { + .stream_name = "Playback", + .channels_min = 2, + .channels_max = 2, + .rates = ES8388_RATES, + .formats = ES8388_FORMATS, + }, + .capture = { + .stream_name = "Capture", + .channels_min = 2, + .channels_max = 2, + .rates = ES8388_RATES, + .formats = ES8388_FORMATS, + }, + .ops = &es8388_dai_ops, + .symmetric_rates = 1, +}; + +static int es8388_suspend(struct snd_soc_component *component) +{ + return 0; +} + +static int es8388_resume(struct snd_soc_component *component) +{ + struct regmap *regmap = dev_get_regmap(component->dev, NULL); + struct es8388_priv *es8388; + int ret; + + es8388 = snd_soc_component_get_drvdata(component); + + regcache_mark_dirty(regmap); + ret = regcache_sync(regmap); + if (ret) { + dev_err(component->dev, "unable to sync regcache\n"); + return ret; + } + + return 0; +} + +static int es8388_component_probe(struct snd_soc_component *component) +{ + snd_soc_component_write(component, ES8388_ADCPOWER, 0xf0); + snd_soc_component_write(component, ES8388_CONTROL1, 0x30); + snd_soc_component_write(component, ES8388_DACCONTROL21, 0x80); + snd_soc_component_write(component, ES8388_ADCCONTROL10, 0xda); + + return 0; +} + +static void es8388_remove(struct snd_soc_component *component) +{ +} + +const struct regmap_config es8388_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = ES8388_REG_MAX, + .cache_type = REGCACHE_RBTREE, + .use_single_rw = true, +}; +EXPORT_SYMBOL_GPL(es8388_regmap_config); + +static const struct snd_soc_component_driver es8388_component_driver = { + .probe = es8388_component_probe, + .remove = es8388_remove, + .suspend = es8388_suspend, + .resume = es8388_resume, + .set_bias_level = es8388_set_bias_level, + .controls = es8388_snd_controls, + .num_controls = ARRAY_SIZE(es8388_snd_controls), + .dapm_widgets = es8388_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(es8388_dapm_widgets), + .dapm_routes = es8388_dapm_routes, + .num_dapm_routes = ARRAY_SIZE(es8388_dapm_routes), + .suspend_bias_off = 1, + .idle_bias_on = 1, + .use_pmdown_time = 1, + .endianness = 1, + .non_legacy_dai_naming = 1, +}; + +int es8388_probe(struct device *dev, struct regmap *regmap) +{ + struct es8388_priv *es8388; + + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + es8388 = devm_kzalloc(dev, sizeof(*es8388), GFP_KERNEL); + if (es8388 == NULL) + return -ENOMEM; + + es8388->regmap = regmap; + + dev_set_drvdata(dev, es8388); + + return devm_snd_soc_register_component(dev, + &es8388_component_driver, &es8388_dai, 1); +} +EXPORT_SYMBOL_GPL(es8388_probe); + +static const struct i2c_device_id es8388_id[] = { + { "es8388", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, es8388_id); + +static const struct of_device_id es8388_of_match[] = { + { .compatible = "everest,es8388", }, + { } +}; +MODULE_DEVICE_TABLE(of, es8388_of_match); + +static struct acpi_device_id es8388_acpi_match[] = { + {"ESSX8388", 0 }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, es8388_acpi_match); + +static int es8388_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + return es8388_probe(&i2c->dev, + devm_regmap_init_i2c(i2c, &es8388_regmap_config)); +} + +static struct i2c_driver es8388_i2c_driver = { + .driver = { + .name = "es8388", + .of_match_table = es8388_of_match, + .acpi_match_table = es8388_acpi_match, + }, + .probe = es8388_i2c_probe, + .id_table = es8388_id, +}; + +module_i2c_driver(es8388_i2c_driver); + +MODULE_DESCRIPTION("ASoC ES8388 driver"); +MODULE_AUTHOR("Yiqun Zhang "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/es8388.h b/sound/soc/codecs/es8388.h new file mode 100644 index 000000000000..5858a71261fb --- /dev/null +++ b/sound/soc/codecs/es8388.h @@ -0,0 +1,290 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * es8388.h -- ES8388 ALSA SoC Audio driver + */ + +#ifndef _ES8388_H +#define _ES8388_H + +#include + +struct device; + +extern const struct regmap_config es8388_regmap_config; +int es8388_probe(struct device *dev, struct regmap *regmap); + +#define ES8388_DACLVOL 46 +#define ES8388_DACRVOL 47 +#define ES8388_DACCTL 28 +#define ES8388_RATEMASK (0x1f << 0) + +#define ES8388_CONTROL1 0x00 +#define ES8388_CONTROL1_VMIDSEL_OFF (0 << 0) +#define ES8388_CONTROL1_VMIDSEL_50k (1 << 0) +#define ES8388_CONTROL1_VMIDSEL_500k (2 << 0) +#define ES8388_CONTROL1_VMIDSEL_5k (3 << 0) +#define ES8388_CONTROL1_VMIDSEL_MASK (3 << 0) +#define ES8388_CONTROL1_ENREF (1 << 2) +#define ES8388_CONTROL1_SEQEN (1 << 3) +#define ES8388_CONTROL1_SAMEFS (1 << 4) +#define ES8388_CONTROL1_DACMCLK_ADC (0 << 5) +#define ES8388_CONTROL1_DACMCLK_DAC (1 << 5) +#define ES8388_CONTROL1_LRCM (1 << 6) +#define ES8388_CONTROL1_SCP_RESET (1 << 7) + +#define ES8388_CONTROL2 0x01 +#define ES8388_CONTROL2_VREF_BUF_OFF (1 << 0) +#define ES8388_CONTROL2_VREF_LOWPOWER (1 << 1) +#define ES8388_CONTROL2_IBIASGEN_OFF (1 << 2) +#define ES8388_CONTROL2_ANALOG_OFF (1 << 3) +#define ES8388_CONTROL2_VREF_BUF_LOWPOWER (1 << 4) +#define ES8388_CONTROL2_VCM_MOD_LOWPOWER (1 << 5) +#define ES8388_CONTROL2_OVERCURRENT_ON (1 << 6) +#define ES8388_CONTROL2_THERMAL_SHUTDOWN_ON (1 << 7) + +#define ES8388_CHIPPOWER 0x02 +#define ES8388_CHIPPOWER_DACVREF_OFF 0 +#define ES8388_CHIPPOWER_ADCVREF_OFF 1 +#define ES8388_CHIPPOWER_DACDLL_OFF 2 +#define ES8388_CHIPPOWER_ADCDLL_OFF 3 +#define ES8388_CHIPPOWER_DACSTM_RESET 4 +#define ES8388_CHIPPOWER_ADCSTM_RESET 5 +#define ES8388_CHIPPOWER_DACDIG_OFF 6 +#define ES8388_CHIPPOWER_ADCDIG_OFF 7 + +#define ES8388_ADCPOWER 0x03 +#define ES8388_ADCPOWER_INT1_LOWPOWER 0 +#define ES8388_ADCPOWER_FLASH_ADC_LOWPOWER 1 +#define ES8388_ADCPOWER_ADC_BIAS_GEN_OFF 2 +#define ES8388_ADCPOWER_MIC_BIAS_OFF 3 +#define ES8388_ADCPOWER_ADCR_OFF 4 +#define ES8388_ADCPOWER_ADCL_OFF 5 +#define ES8388_ADCPOWER_AINR_OFF 6 +#define ES8388_ADCPOWER_AINL_OFF 7 + +#define ES8388_DACPOWER 0x04 +#define ES8388_DACPOWER_OUT3_ON 0 +#define ES8388_DACPOWER_MONO_ON 1 +#define ES8388_DACPOWER_ROUT2_ON 2 +#define ES8388_DACPOWER_LOUT2_ON 3 +#define ES8388_DACPOWER_ROUT1_ON 4 +#define ES8388_DACPOWER_LOUT1_ON 5 +#define ES8388_DACPOWER_RDAC_OFF 6 +#define ES8388_DACPOWER_LDAC_OFF 7 + +#define ES8388_CHIPLOPOW1 0x05 +#define ES8388_CHIPLOPOW2 0x06 +#define ES8388_ANAVOLMANAG 0x07 + +#define ES8388_MASTERMODE 0x08 +#define ES8388_MASTERMODE_BCLKDIV (0 << 0) +#define ES8388_MASTERMODE_BCLK_INV (1 << 5) +#define ES8388_MASTERMODE_MCLKDIV2 (1 << 6) +#define ES8388_MASTERMODE_MSC (1 << 7) + +#define ES8388_ADCCONTROL1 0x09 +#define ES8388_ADCCONTROL2 0x0a +#define ES8388_ADCCONTROL3 0x0b + +#define ES8388_ADCCONTROL4 0x0c +#define ES8388_ADCCONTROL4_ADCFORMAT_MASK (3 << 0) +#define ES8388_ADCCONTROL4_ADCFORMAT_I2S (0 << 0) +#define ES8388_ADCCONTROL4_ADCFORMAT_LJUST (1 << 0) +#define ES8388_ADCCONTROL4_ADCFORMAT_RJUST (2 << 0) +#define ES8388_ADCCONTROL4_ADCFORMAT_PCM (3 << 0) +#define ES8388_ADCCONTROL4_ADCWL_SHIFT 2 +#define ES8388_ADCCONTROL4_ADCWL_MASK (7 << 2) +#define ES8388_ADCCONTROL4_ADCLRP_I2S_POL_NORMAL (0 << 5) +#define ES8388_ADCCONTROL4_ADCLRP_I2S_POL_INV (1 << 5) +#define ES8388_ADCCONTROL4_ADCLRP_PCM_MSB_CLK2 (0 << 5) +#define ES8388_ADCCONTROL4_ADCLRP_PCM_MSB_CLK1 (1 << 5) + +#define ES8388_ADCCONTROL5 0x0d +#define ES8388_ADCCONTROL5_RATEMASK (0x1f << 0) + +#define ES8388_ADCCONTROL6 0x0e + +#define ES8388_ADCCONTROL7 0x0f +#define ES8388_ADCCONTROL7_ADC_MUTE (1 << 2) +#define ES8388_ADCCONTROL7_ADC_LER (1 << 3) +#define ES8388_ADCCONTROL7_ADC_ZERO_CROSS (1 << 4) +#define ES8388_ADCCONTROL7_ADC_SOFT_RAMP (1 << 5) +#define ES8388_ADCCONTROL7_ADC_RAMP_RATE_4 (0 << 6) +#define ES8388_ADCCONTROL7_ADC_RAMP_RATE_8 (1 << 6) +#define ES8388_ADCCONTROL7_ADC_RAMP_RATE_16 (2 << 6) +#define ES8388_ADCCONTROL7_ADC_RAMP_RATE_32 (3 << 6) + +#define ES8388_ADCCONTROL8 0x10 +#define ES8388_ADCCONTROL9 0x11 +#define ES8388_ADCCONTROL10 0x12 +#define ES8388_ADCCONTROL11 0x13 +#define ES8388_ADCCONTROL12 0x14 +#define ES8388_ADCCONTROL13 0x15 +#define ES8388_ADCCONTROL14 0x16 + +#define ES8388_DACCONTROL1 0x17 +#define ES8388_DACCONTROL1_DACFORMAT_MASK (3 << 1) +#define ES8388_DACCONTROL1_DACFORMAT_I2S (0 << 1) +#define ES8388_DACCONTROL1_DACFORMAT_LJUST (1 << 1) +#define ES8388_DACCONTROL1_DACFORMAT_RJUST (2 << 1) +#define ES8388_DACCONTROL1_DACFORMAT_PCM (3 << 1) +#define ES8388_DACCONTROL1_DACWL_SHIFT 3 +#define ES8388_DACCONTROL1_DACWL_MASK (7 << 3) +#define ES8388_DACCONTROL1_DACLRP_I2S_POL_NORMAL (0 << 6) +#define ES8388_DACCONTROL1_DACLRP_I2S_POL_INV (1 << 6) +#define ES8388_DACCONTROL1_DACLRP_PCM_MSB_CLK2 (0 << 6) +#define ES8388_DACCONTROL1_DACLRP_PCM_MSB_CLK1 (1 << 6) +#define ES8388_DACCONTROL1_LRSWAP (1 << 7) + +#define ES8388_DACCONTROL2 0x18 +#define ES8388_DACCONTROL2_RATEMASK (0x1f << 0) +#define ES8388_DACCONTROL2_DOUBLESPEED (1 << 5) + +#define ES8388_DACCONTROL3 0x19 +#define ES8388_DACCONTROL3_AUTOMUTE (1 << 2) +#define ES8388_DACCONTROL3_DACMUTE (1 << 2) +#define ES8388_DACCONTROL3_LEFTGAINVOL (1 << 3) +#define ES8388_DACCONTROL3_DACZEROCROSS (1 << 4) +#define ES8388_DACCONTROL3_DACSOFTRAMP (1 << 5) +#define ES8388_DACCONTROL3_DACRAMPRATE (3 << 6) + +#define ES8388_LDACVOL 0x1a +#define ES8388_LDACVOL_MASK (0 << 0) +#define ES8388_LDACVOL_MAX (0xc0) + +#define ES8388_RDACVOL 0x1b +#define ES8388_RDACVOL_MASK (0 << 0) +#define ES8388_RDACVOL_MAX (0xc0) + +#define ES8388_DACVOL_MAX (0xc0) + +#define ES8388_DACCONTROL4 0x1a +#define ES8388_DACCONTROL5 0x1b + +#define ES8388_DACCONTROL6 0x1c +#define ES8388_DACCONTROL6_CLICKFREE (1 << 3) +#define ES8388_DACCONTROL6_DAC_INVR (1 << 4) +#define ES8388_DACCONTROL6_DAC_INVL (1 << 5) +#define ES8388_DACCONTROL6_DEEMPH_MASK (3 << 6) +#define ES8388_DACCONTROL6_DEEMPH_OFF (0 << 6) +#define ES8388_DACCONTROL6_DEEMPH_32k (1 << 6) +#define ES8388_DACCONTROL6_DEEMPH_44_1k (2 << 6) +#define ES8388_DACCONTROL6_DEEMPH_48k (3 << 6) + +#define ES8388_DACCONTROL7 0x1d +#define ES8388_DACCONTROL7_VPP_SCALE_3p5 (0 << 0) +#define ES8388_DACCONTROL7_VPP_SCALE_4p0 (1 << 0) +#define ES8388_DACCONTROL7_VPP_SCALE_3p0 (2 << 0) +#define ES8388_DACCONTROL7_VPP_SCALE_2p5 (3 << 0) +#define ES8388_DACCONTROL7_SHELVING_STRENGTH (1 << 2) /* In eights */ +#define ES8388_DACCONTROL7_MONO (1 << 5) +#define ES8388_DACCONTROL7_ZEROR (1 << 6) +#define ES8388_DACCONTROL7_ZEROL (1 << 7) + +/* Shelving filter */ +#define ES8388_DACCONTROL8 0x1e +#define ES8388_DACCONTROL9 0x1f +#define ES8388_DACCONTROL10 0x20 +#define ES8388_DACCONTROL11 0x21 +#define ES8388_DACCONTROL12 0x22 +#define ES8388_DACCONTROL13 0x23 +#define ES8388_DACCONTROL14 0x24 +#define ES8388_DACCONTROL15 0x25 + +#define ES8388_DACCONTROL16 0x26 +#define ES8388_DACCONTROL16_RMIXSEL_RIN1 (0 << 0) +#define ES8388_DACCONTROL16_RMIXSEL_RIN2 (1 << 0) +#define ES8388_DACCONTROL16_RMIXSEL_RIN3 (2 << 0) +#define ES8388_DACCONTROL16_RMIXSEL_RADC (3 << 0) +#define ES8388_DACCONTROL16_LMIXSEL_LIN1 (0 << 3) +#define ES8388_DACCONTROL16_LMIXSEL_LIN2 (1 << 3) +#define ES8388_DACCONTROL16_LMIXSEL_LIN3 (2 << 3) +#define ES8388_DACCONTROL16_LMIXSEL_LADC (3 << 3) + +#define ES8388_DACCONTROL17 0x27 +#define ES8388_DACCONTROL17_LI2LOVOL (7 << 3) +#define ES8388_DACCONTROL17_LI2LO (1 << 6) +#define ES8388_DACCONTROL17_LD2LO (1 << 7) + +#define ES8388_DACCONTROL18 0x28 +#define ES8388_DACCONTROL18_RI2LOVOL (7 << 3) +#define ES8388_DACCONTROL18_RI2LO (1 << 6) +#define ES8388_DACCONTROL18_RD2LO (1 << 7) + +#define ES8388_DACCONTROL19 0x29 +#define ES8388_DACCONTROL19_LI2ROVOL (7 << 3) +#define ES8388_DACCONTROL19_LI2RO (1 << 6) +#define ES8388_DACCONTROL19_LD2RO (1 << 7) + +#define ES8388_DACCONTROL20 0x2a +#define ES8388_DACCONTROL20_RI2ROVOL (7 << 3) +#define ES8388_DACCONTROL20_RI2RO (1 << 6) +#define ES8388_DACCONTROL20_RD2RO (1 << 7) + +#define ES8388_DACCONTROL21 0x2b +#define ES8388_DACCONTROL21_LI2MOVOL (7 << 3) +#define ES8388_DACCONTROL21_LI2MO (1 << 6) +#define ES8388_DACCONTROL21_LD2MO (1 << 7) + +#define ES8388_DACCONTROL22 0x2c +#define ES8388_DACCONTROL22_RI2MOVOL (7 << 3) +#define ES8388_DACCONTROL22_RI2MO (1 << 6) +#define ES8388_DACCONTROL22_RD2MO (1 << 7) + +#define ES8388_DACCONTROL23 0x2d +#define ES8388_DACCONTROL23_MOUTINV (1 << 1) +#define ES8388_DACCONTROL23_HPSWPOL (1 << 2) +#define ES8388_DACCONTROL23_HPSWEN (1 << 3) +#define ES8388_DACCONTROL23_VROI_1p5k (0 << 4) +#define ES8388_DACCONTROL23_VROI_40k (1 << 4) +#define ES8388_DACCONTROL23_OUT3_VREF (0 << 5) +#define ES8388_DACCONTROL23_OUT3_ROUT1 (1 << 5) +#define ES8388_DACCONTROL23_OUT3_MONOOUT (2 << 5) +#define ES8388_DACCONTROL23_OUT3_RIGHT_MIXER (3 << 5) +#define ES8388_DACCONTROL23_ROUT2INV (1 << 7) + +/* LOUT1 Amplifier */ +#define ES8388_LOUT1VOL 0x2e +#define ES8388_LOUT1VOL_MASK (0 << 5) +#define ES8388_LOUT1VOL_MAX (0x24) + +/* ROUT1 Amplifier */ +#define ES8388_ROUT1VOL 0x2f +#define ES8388_ROUT1VOL_MASK (0 << 5) +#define ES8388_ROUT1VOL_MAX (0x24) + +#define ES8388_OUT1VOL_MAX (0x24) + +/* LOUT2 Amplifier */ +#define ES8388_LOUT2VOL 0x30 +#define ES8388_LOUT2VOL_MASK (0 << 5) +#define ES8388_LOUT2VOL_MAX (0x24) + +/* ROUT2 Amplifier */ +#define ES8388_ROUT2VOL 0x31 +#define ES8388_ROUT2VOL_MASK (0 << 5) +#define ES8388_ROUT2VOL_MAX (0x24) + +#define ES8388_OUT2VOL_MAX (0x24) + +/* Mono Out Amplifier */ +#define ES8388_MONOOUTVOL 0x32 +#define ES8388_MONOOUTVOL_MASK (0 << 5) +#define ES8388_MONOOUTVOL_MAX (0x24) + +#define ES8388_DACCONTROL29 0x33 +#define ES8388_DACCONTROL30 0x34 + +#define ES8388_SYSCLK 0 + +#define ES8388_REG_MAX 0x35 + +#define ES8388_1536FS 1536 +#define ES8388_1024FS 1024 +#define ES8388_768FS 768 +#define ES8388_512FS 512 +#define ES8388_384FS 384 +#define ES8388_256FS 256 +#define ES8388_128FS 128 + +#endif diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c index 7994e8ddc7d2..44914e7986f5 100644 --- a/sound/soc/codecs/hdmi-codec.c +++ b/sound/soc/codecs/hdmi-codec.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -285,6 +286,8 @@ struct hdmi_codec_priv { uint8_t eld[MAX_ELD_BYTES]; struct snd_pcm_chmap *chmap_info; unsigned int chmap_idx; + struct snd_soc_jack *jack; + unsigned int jack_status; }; static const struct snd_soc_dapm_widget hdmi_widgets[] = { @@ -700,6 +703,44 @@ static int hdmi_dai_probe(struct snd_soc_dai *dai) return snd_soc_dapm_add_routes(dapm, &route, 1); } +static void hdmi_codec_jack_report(struct hdmi_codec_priv *hcp, + unsigned int jack_status) +{ + if (hcp->jack && jack_status != hcp->jack_status) { + snd_soc_jack_report(hcp->jack, jack_status, SND_JACK_LINEOUT); + hcp->jack_status = jack_status; + } +} + +static void plugged_cb(struct device *dev, bool plugged) +{ + struct hdmi_codec_priv *hcp = dev_get_drvdata(dev); + + if (plugged) + hdmi_codec_jack_report(hcp, SND_JACK_LINEOUT); + else + hdmi_codec_jack_report(hcp, 0); +} + +static int hdmi_codec_set_jack(struct snd_soc_component *component, + struct snd_soc_jack *jack, + void *data) +{ + struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component); + int ret = -EOPNOTSUPP; + + if (hcp->hcd.ops->hook_plugged_cb) { + hcp->jack = jack; + ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent, + hcp->hcd.data, + plugged_cb, + component->dev); + if (ret) + hcp->jack = NULL; + } + return ret; +} + static const struct snd_soc_dai_driver hdmi_i2s_dai = { .name = "i2s-hifi", .id = DAI_ID_I2S, @@ -751,6 +792,7 @@ static const struct snd_soc_component_driver hdmi_driver = { .use_pmdown_time = 1, .endianness = 1, .non_legacy_dai_naming = 1, + .set_jack = hdmi_codec_set_jack, }; static int hdmi_codec_probe(struct platform_device *pdev) diff --git a/sound/soc/phytium/Kconfig b/sound/soc/phytium/Kconfig new file mode 100644 index 000000000000..4f9f12ce26b1 --- /dev/null +++ b/sound/soc/phytium/Kconfig @@ -0,0 +1,31 @@ +config SND_SOC_PHYTIUM_I2S + bool "Phytium I2S Device Driver" + depends on ARCH_PHYTIUM + help + Say Y or M if you want to add support for I2S driver for + Phytium I2S device . The device supports 2 channels each + for play and record. + +config SND_PMDK_ES8388 + tristate "Phytium X100 machine support with ES8388" + depends on I2C && SND_SOC_PHYTIUM_I2S + select SND_SOC_ES8388 + help + Say Y if you want to add Phytium machine support for + ES8388 codecs. + +config SND_PMDK_ES8336 + tristate "Phytium X100 machine support with ES8336" + depends on I2C && SND_SOC_PHYTIUM_I2S + select SND_SOC_ES8336 + help + Say Y if you want to add Phytium machine support for + ES8336 codecs. + +config SND_PMDK_DP + tristate "Phytium machine support with X100 DP" + depends on I2C && SND_SOC_PHYTIUM_I2S + select SND_SOC_HDMI_CODEC + help + Say Y if you want to add Phytium machine support for + Displayport on X100. diff --git a/sound/soc/phytium/Makefile b/sound/soc/phytium/Makefile new file mode 100644 index 000000000000..db3c0659e844 --- /dev/null +++ b/sound/soc/phytium/Makefile @@ -0,0 +1,13 @@ +# PHYTIUM Platform Support + +snd-soc-phytium-i2s-objs :=phytium_i2s.o +obj-$(CONFIG_SND_SOC_PHYTIUM_I2S) += snd-soc-phytium-i2s.o + +snd-soc-pmdk-es8388-objs :=pmdk_es8388.o +obj-$(CONFIG_SND_PMDK_ES8388) += snd-soc-pmdk-es8388.o + +snd-soc-pmdk-es8336-objs :=pmdk_es8336.o +obj-$(CONFIG_SND_PMDK_ES8336) += snd-soc-pmdk-es8336.o + +snd-soc-pmdk-dp-objs :=pmdk_dp.o +obj-$(CONFIG_SND_PMDK_DP) += snd-soc-pmdk-dp.o diff --git a/sound/soc/phytium/local.h b/sound/soc/phytium/local.h new file mode 100644 index 000000000000..43e989c36fd8 --- /dev/null +++ b/sound/soc/phytium/local.h @@ -0,0 +1,326 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2021, Phytium Technology Co.,Ltd. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __PHYTIUM_I2S_LOCAL_H +#define __PHYTIUM_I2S_LOCAL_H + +#include +#include +#include +#include +#include +#include + +/* I2S clk setting*/ +#define CLK_CFG0 0xc00 +#define CLK_CFG1 0xc04 + +/* common register for all channel */ +#define I2S_IER 0x000 +#define IRER 0x004 +#define ITER 0x008 +#define CER 0x00C + +#define RXFFR 0x014 +#define TXFFR 0x018 + +/* Interrupt status register fields */ +#define ISR_TXFO BIT(5) +#define ISR_TXFE BIT(4) +#define ISR_RXFO BIT(1) +#define ISR_RXDA BIT(0) + +/* I2STxRxRegisters for all channels */ +#define LRBR_LTHR(x) (0x40 * x + 0x020) +#define RRBR_RTHR(x) (0x40 * x + 0x024) +#define RER(x) (0x40 * x + 0x028) + +#define RCR(x) (0x40 * x + 0x030) + +#define ISR(x) (0x40 * x + 0x038) +#define IMR(x) (0x40 * x + 0x03C) +#define ROR(x) (0x40 * x + 0x040) +#define TOR(x) (0x40 * x + 0x044) +#define RFCR(x) (0x40 * x + 0x048) +#define TFCR(x) (0x40 * x + 0x04C) +#define RFF(x) (0x40 * x + 0x050) +#define TFF(x) (0x40 * x + 0x054) + +/*enable txd and rxd block channel0~3 */ +#define TER(x) (0x40 * x + 0x02C) +#define CCR 0x010 +#define TCR(x) (0x40 * x + 0x034) + + +/* I2SCOMPRegisters */ +#define I2S_COMP_PARAM_2 0x01F0 +#define I2S_COMP_PARAM_1 0x01F4 +#define I2S_COMP_VERSION 0x01F8 +#define I2S_COMP_TYPE 0x01FC + +/***I2S AND DMA***/ + +#define DMA_GCAP 0x0024 + +#define DMA_CHAL_CONFG1 0x0028 + +#define DMA_CHAL_CONFG0 0x0004 +#define DMA_MASK_INT 0x000c +#define DMA_BDLPU(x) (0x40 * x + 0x0040) +#define DMA_BDLPL(x) (0x40 * x + 0x0044) +#define DMA_CHALX_DEV_ADDR(x) (0x40 * x + 0x0048) +#define DMA_CHALX_CBL(x) (0x40 * x + 0x0054) +#define DMA_CHALX_LVI(x) (0x40 * x + 0x004c) + +#define DMA_CHALX_DSIZE(x) (0x40 * x + 0x0064) +#define DMA_CHALX_DLENTH(x) (0x40 * x + 0x0068) +#define DMA_CHALX_CTL(x) (0x40 * x + 0x0058) + + +#define DMA_CTL 0x0000 + +#define DMA_LPIB(x) (0x40 * x + 0x0050) + +#define DMA_STS 0x0008 + +/****************/ + + +/* max number of fragments - we may use more if allocating more pages for BDL */ +#define BDL_SIZE 4096 +#define AZX_MAX_BDL_ENTRIES (BDL_SIZE / 16) + +/* + * Component parameter register fields - define the I2S block's + * configuration. + */ +#define COMP1_TX_WORDSIZE_3(r) (((r) & GENMASK(27, 25)) >> 25) +#define COMP1_TX_WORDSIZE_2(r) (((r) & GENMASK(24, 22)) >> 22) +#define COMP1_TX_WORDSIZE_1(r) (((r) & GENMASK(21, 19)) >> 19) +#define COMP1_TX_WORDSIZE_0(r) (((r) & GENMASK(18, 16)) >> 16) +#define COMP1_TX_CHANNELS(r) (((r) & GENMASK(10, 9)) >> 9) +#define COMP1_RX_CHANNELS(r) (((r) & GENMASK(8, 7)) >> 7) +#define COMP1_RX_ENABLED(r) (((r) & BIT(6)) >> 6) +#define COMP1_TX_ENABLED(r) (((r) & BIT(5)) >> 5) +#define COMP1_MODE_EN(r) (((r) & BIT(4)) >> 4) +#define COMP1_FIFO_DEPTH_GLOBAL(r) (((r) & GENMASK(3, 2)) >> 2) +#define COMP1_APB_DATA_WIDTH(r) (((r) & GENMASK(1, 0)) >> 0) + +#define COMP2_RX_WORDSIZE_3(r) (((r) & GENMASK(12, 10)) >> 10) +#define COMP2_RX_WORDSIZE_2(r) (((r) & GENMASK(9, 7)) >> 7) +#define COMP2_RX_WORDSIZE_1(r) (((r) & GENMASK(5, 3)) >> 3) +#define COMP2_RX_WORDSIZE_0(r) (((r) & GENMASK(2, 0)) >> 0) + +/* Number of entries in WORDSIZE and DATA_WIDTH parameter registers */ +#define COMP_MAX_WORDSIZE (1 << 3) +#define COMP_MAX_DATA_WIDTH (1 << 2) + +#define MAX_CHANNEL_NUM 8 +#define MIN_CHANNEL_NUM 2 + +#define azx_bus(chip) (&(chip)->bus.core) +#define bus_to_azx(_bus) container_of(_bus, struct azx, bus.core) + +#define I2S_UNSOL_QUEUE_SIZE 64 +#define I2S_MAX_CODECS 8 /* limit by controller side */ + + +#define azx_stream(dev) (&(dev)->core) + +struct i2sc_bus { + struct device *dev; + const struct i2s_bus_ops *ops; + const struct i2s_io_ops *io_ops; + const struct i2s_ext_bus_ops *ext_ops; + + /* h/w resources */ + unsigned long addr; + void __iomem *remap_addr; + int irq; + + /* codec linked list */ + struct list_head codec_list; + unsigned int num_codecs; + + unsigned int unsol_rp, unsol_wp; + struct work_struct unsol_work; + + struct snd_dma_buffer bdl0; + struct snd_dma_buffer bdl1; + + /* i2s_stream linked list */ + struct list_head stream_list; + + bool reverse_assign; /* assign devices in reverse order */ + + int bdl_pos_adj; /* BDL position adjustment */ + + /* locks */ + spinlock_t reg_lock; +}; + +struct i2s_bus { + struct i2sc_bus core; + + struct snd_card *card; + + struct pci_dev *pci; + + struct mutex prepare_mutex; +}; + + +/* + * i2s stream + */ +struct i2s_stream { + struct i2sc_bus *bus; + struct snd_dma_buffer bdl; /* BDL buffer */ + __le32 *posbuf; /* position buffer pointer */ + int direction; /* playback / capture (SNDRV_PCM_STREAM_*) */ + + unsigned int bufsize; /* size of the play buffer in bytes */ + unsigned int period_bytes; /* size of the period in bytes */ + unsigned int frags; /* number for period in the play buffer */ + unsigned int fifo_size; /* FIFO size */ + + void __iomem *sd_addr; /* stream descriptor pointer */ + + u32 sd_int_sta_mask; /* stream int status mask */ + + /* pcm support */ + struct snd_pcm_substream *substream; /* assigned substream, + * set in PCM open + */ + unsigned int format_val; /* format value to be set in the + * controller and the codec + */ + unsigned char stream_tag; /* assigned stream */ + unsigned char index; /* stream index */ + int assigned_key; /* last device# key assigned to */ + + bool opened; + bool running; + bool prepared; + bool no_period_wakeup; + + int delay_negative_threshold; + + struct list_head list; + +}; + + +struct azx_dev { + struct i2s_stream core; + unsigned int irq_pending:1; +}; + + + +/* PCM setup */ +static inline struct azx_dev *get_azx_dev(struct snd_pcm_substream *substream) +{ + return substream->runtime->private_data; +} + + +#define AZX_MAX_CODECS HDA_MAX_CODECS +#define AZX_DEFAULT_CODECS 4 + +#define stream_to_azx_dev(s) container_of(s, struct azx_dev, core) + +struct azx; + +struct i2s_controller_ops { + int (*substream_alloc_pages)(struct azx *chip, + struct snd_pcm_substream *substream, + size_t size); + int (*substream_free_pages)(struct azx *chip, + struct snd_pcm_substream *substream); + int (*position_check)(struct azx *chip, struct azx_dev *azx_dev); +}; + +struct i2s_io_ops { + int (*dma_alloc_pages)(struct i2sc_bus *bus, int type, size_t size, + struct snd_dma_buffer *buf); + void (*dma_free_pages)(struct i2sc_bus *bus, + struct snd_dma_buffer *buf); +}; + +struct azx { + struct i2s_bus bus; + + struct snd_card *card; + struct pci_dev *pci; + int dev_index; + + int playback_streams; + int playback_index_offset; + int capture_streams; + int capture_index_offset; + int num_streams; + + /* Register interaction. */ + const struct i2s_controller_ops *ops; + + /* locks */ + struct mutex open_mutex; /* Prevents concurrent open/close operations */ + + /* PCM */ + struct list_head pcm_list; /* azx_pcm list */ + + /* flags */ + int bdl_pos_adj; + unsigned int running:1; + unsigned int region_requested:1; + unsigned int disabled:1; +}; +struct i2s_phytium { + struct azx chip; + struct snd_pcm_substream *substream; + struct device *dev; + struct device *pdev; + void __iomem *regs; + void __iomem *regs_db; + int irq_id; + + /* for pending irqs */ + struct work_struct irq_pending_work; + + /* sync probing */ + struct completion probe_wait; + struct work_struct probe_work; + + /* extra flags */ + unsigned int irq_pending_warned:1; + unsigned int probe_continued:1; + unsigned int i2s_dp:1; + + unsigned int i2s_reg_comp1; + unsigned int i2s_reg_comp2; + struct clk *clk; + unsigned int capability; + unsigned int quirks; + u32 fifo_th; + int active; + u32 xfer_resolution; + u32 ccr; + u32 clk_base; + + struct i2s_clk_config_data config; + + /*azx_dev*/ + struct i2s_stream core; +}; + +#define azx_alloc_stream_pages(chip) \ + snd_i2s_bus_alloc_stream_pages(azx_bus(chip)) + +#endif diff --git a/sound/soc/phytium/phytium_i2s.c b/sound/soc/phytium/phytium_i2s.c new file mode 100644 index 000000000000..9c6ab16d83f7 --- /dev/null +++ b/sound/soc/phytium/phytium_i2s.c @@ -0,0 +1,1345 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium I2S ASoc driver + * + * Copyright (C) 2020-2021, Phytium Technology Co.,Ltd. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "local.h" + +/* FT have 1 playback and 1 capture */ +#define FT4C_NUM_CAPTURE 1 +#define FT4C_NUM_PLAYBACK 1 + +struct pdata_x100_mfd { + struct device *dev; + char *name; + int clk_base; +}; + +static inline void i2s_write_reg(void __iomem *io_base, int reg, u32 val) +{ + writel(val, io_base + reg); +} + +static inline u32 i2s_read_reg(void __iomem *io_base, int reg) +{ + return readl(io_base + reg); +} + +static inline void i2s_disable_channels(struct i2s_phytium *dev, u32 stream) +{ + u32 i = 0; + + if (stream == SNDRV_PCM_STREAM_PLAYBACK) { + for (i = 0; i < 4; i++) + i2s_write_reg(dev->regs, TER(i), 0); + } else { + for (i = 0; i < 4; i++) + i2s_write_reg(dev->regs, RER(i), 0); + } +} + +static int substream_free_pages(struct azx *chip, + struct snd_pcm_substream *substream) +{ + return snd_pcm_lib_free_pages(substream); +} + +static void stream_update(struct i2sc_bus *bus, struct i2s_stream *s) +{ + struct azx *chip = bus_to_azx(bus); + + struct azx_dev *azx_dev = stream_to_azx_dev(s); + + /* check whether this IRQ is really acceptable */ + if (!chip->ops->position_check || + chip->ops->position_check(chip, azx_dev)) { + spin_unlock(&bus->reg_lock); + snd_pcm_period_elapsed(azx_stream(azx_dev)->substream); + spin_lock(&bus->reg_lock); + } + +} + +int snd_i2s_bus_handle_stream_irq(struct i2sc_bus *bus, unsigned int status, + void (*ack)(struct i2sc_bus *, + struct i2s_stream *)) +{ + struct i2s_stream *azx_dev; + u32 sd_status, qc_sd_status; + int handled = 0; + + list_for_each_entry(azx_dev, &bus->stream_list, list) { + + if (status & azx_dev->sd_int_sta_mask) { + sd_status = i2s_read_reg(azx_dev->sd_addr, DMA_STS); + i2s_write_reg(azx_dev->sd_addr, DMA_STS, azx_dev->sd_int_sta_mask); + qc_sd_status = i2s_read_reg(azx_dev->sd_addr, DMA_STS); + handled |= 1 << azx_dev->index; + azx_dev->running = 1; + if (!azx_dev->substream || !azx_dev->running || + !(sd_status & 0xffffffff)) { + continue; + } + if (ack) + ack(bus, azx_dev); + } + } + return handled; +} + +irqreturn_t azx_i2s_interrupt(int irq, void *dev_id) +{ + struct azx *chip = dev_id; + struct i2sc_bus *bus = azx_bus(chip); + u32 status; + bool active, handled = false; + int repeat = 0; /* count for avoiding endless loop */ + + spin_lock(&bus->reg_lock); + + if (chip->disabled) + goto unlock; + + do { + + status = i2s_read_reg(bus->remap_addr, DMA_STS); + + if (status == 0) + break; + + handled = true; + active = false; + if (snd_i2s_bus_handle_stream_irq(bus, status, stream_update)) + active = true; + + + } while (active && ++repeat < 1); + + unlock: + spin_unlock(&bus->reg_lock); + + return IRQ_RETVAL(handled); +} + +static int azx_acquire_irq(struct azx *chip, int do_disconnect) +{ + struct i2sc_bus *bus = azx_bus(chip); + struct i2s_phytium *i2s = container_of(chip, struct i2s_phytium, chip); + int err; + + err = devm_request_irq(i2s->dev, i2s->irq_id, azx_i2s_interrupt, IRQF_SHARED, + "phytium i2s", chip); + + if (err < 0) { + dev_err(i2s->dev, "failed to request irq\n"); + return err; + } + + bus->irq = i2s->irq_id; + + return 0; +} + +static void i2s_start(struct i2s_phytium *dev, + struct snd_pcm_substream *substream) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + i2s_write_reg(dev->regs, ITER, 1); + else + i2s_write_reg(dev->regs, IRER, 1); + + /*enable the clock*/ + i2s_write_reg(dev->regs, CER, 1); + + /*enable the i2s*/ + i2s_write_reg(dev->regs, I2S_IER, 1); +} + +static void i2s_stop(struct i2s_phytium *dev, + struct snd_pcm_substream *substream) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + i2s_write_reg(dev->regs, ITER, 0); + else + i2s_write_reg(dev->regs, IRER, 0); + + if (!dev->active) { + i2s_write_reg(dev->regs, CER, 0); + i2s_write_reg(dev->regs, I2S_IER, 0); + } +} + +static void dw_i2s_config(struct i2s_phytium *dev, int stream) +{ + i2s_disable_channels(dev, stream); + + if (stream == SNDRV_PCM_STREAM_PLAYBACK) { + i2s_write_reg(dev->regs, TCR(0), dev->xfer_resolution); + i2s_write_reg(dev->regs, TER(0), 1); + } else { + i2s_write_reg(dev->regs, RCR(0), dev->xfer_resolution); + i2s_write_reg(dev->regs, RER(0), 1); + } +} + +static int dw_i2s_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) +{ + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(dai); + struct i2s_clk_config_data *config = &dev->config; + u64 fix, point; + u32 cfg = 0; + + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S16_LE: + config->data_width = 16; + dev->ccr = 0x00; + dev->xfer_resolution = 0x02; + break; + + case SNDRV_PCM_FORMAT_S24_LE: + config->data_width = 24; + dev->ccr = 0x08; + dev->xfer_resolution = 0x04; + break; + + case SNDRV_PCM_FORMAT_S32_LE: + config->data_width = 32; + dev->ccr = 0x10; + dev->xfer_resolution = 0x05; + break; + + default: + dev_err(dev->dev, "phytium-i2s: unsupported PCM fmt"); + return -EINVAL; + } + + config->chan_nr = params_channels(params); + + switch (config->chan_nr) { + case EIGHT_CHANNEL_SUPPORT: + case SIX_CHANNEL_SUPPORT: + case FOUR_CHANNEL_SUPPORT: + case TWO_CHANNEL_SUPPORT: + break; + default: + dev_err(dev->dev, "channel not supported\n"); + return -EINVAL; + } + + dw_i2s_config(dev, substream->stream); + + i2s_write_reg(dev->regs, CCR, dev->ccr); + + config->sample_rate = params_rate(params); + if (dev->capability & DW_I2S_MASTER) { + fix = dev->clk_base / config->sample_rate / config->data_width / 32; + point = ((dev->clk_base / config->sample_rate) << 10) / config->data_width / 32; + point = (point - (fix << 10)) * 10; + cfg = ((u16) fix << 16) | (u16) point; + i2s_write_reg(dev->regs, CLK_CFG0, cfg); + i2s_write_reg(dev->regs, CLK_CFG1, 0xf); + } + return 0; +} + +static int dw_i2s_prepare(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(dai); + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + i2s_write_reg(dev->regs, TXFFR, 1); + else + i2s_write_reg(dev->regs, RXFFR, 1); + + return 0; +} + +static int dw_i2s_trigger(struct snd_pcm_substream *substream, + int cmd, struct snd_soc_dai *dai) +{ + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(dai); + int ret = 0; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + dev->active++; + i2s_start(dev, substream); + break; + + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + dev->active--; + i2s_stop(dev, substream); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int dw_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) +{ + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(cpu_dai); + int ret = 0; + + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBM_CFM: + if (dev->capability & DW_I2S_SLAVE) + ret = 0; + else + ret = -EINVAL; + break; + case SND_SOC_DAIFMT_CBS_CFS: + if (dev->capability & DW_I2S_MASTER) + ret = 0; + else + ret = -EINVAL; + break; + case SND_SOC_DAIFMT_CBM_CFS: + case SND_SOC_DAIFMT_CBS_CFM: + ret = -EINVAL; + break; + default: + dev_dbg(dev->dev, "phytium/i2s: Invalid master/slave format\n"); + ret = -EINVAL; + break; + } + return ret; +} + +static const struct snd_soc_dai_ops phytium_i2s_dai_ops = { + .hw_params = dw_i2s_hw_params, + .prepare = dw_i2s_prepare, + .trigger = dw_i2s_trigger, + .set_fmt = dw_i2s_set_fmt, +}; + +#ifdef CONFIG_PM +static int phytium_i2s_suspend(struct snd_soc_dai *dai) +{ + return 0; +} + +static int phytium_i2s_resume(struct snd_soc_dai *dai) +{ + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(dai); + if (dai->playback_active) + dw_i2s_config(dev, SNDRV_PCM_STREAM_PLAYBACK); + if (dai->capture_active) + dw_i2s_config(dev, SNDRV_PCM_STREAM_CAPTURE); + return 0; +} +#else +#define phytium_i2s_suspend NULL +#define phytium_i2s_resume NULL +#endif + +static struct snd_soc_dai_driver phytium_i2s_dai = { + .playback = { + .stream_name = "i2s-Playback", + .channels_min = 2, + .channels_max = 2, + .rates = SNDRV_PCM_RATE_8000_192000, + .formats = SNDRV_PCM_FMTBIT_S8 | + SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S20_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S32_LE, + }, + .capture = { + .stream_name = "i2s-Capture", + .channels_min = 2, + .channels_max = 2, + .rates = SNDRV_PCM_RATE_8000_192000, + .formats = SNDRV_PCM_FMTBIT_S8 | + SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S20_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S32_LE, + }, + .ops = &phytium_i2s_dai_ops, + .suspend = phytium_i2s_suspend, + .resume = phytium_i2s_resume, + .symmetric_rates = 1, +}; + +static const struct snd_pcm_hardware phytium_pcm_hardware = { + .info = SNDRV_PCM_INFO_INTERLEAVED | + SNDRV_PCM_INFO_MMAP | + SNDRV_PCM_INFO_MMAP_VALID | + SNDRV_PCM_INFO_BLOCK_TRANSFER, + .rates = SNDRV_PCM_RATE_8000 | + SNDRV_PCM_RATE_32000 | + SNDRV_PCM_RATE_44100 | + SNDRV_PCM_RATE_48000, + .rate_min = 8000, + .rate_max = 48000, + .formats = (SNDRV_PCM_FMTBIT_S8 | + SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S20_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S32_LE), + .channels_min = 2, + .channels_max = 2, + .buffer_bytes_max = 4096*16, + .period_bytes_min = 1024, + .period_bytes_max = 4096*4, + .periods_min = 2, + .periods_max = 16, + .fifo_size = 16, +}; + +struct i2s_stream *snd_i2s_stream_assign(struct i2sc_bus *bus, + struct snd_pcm_substream *substream) +{ + struct i2s_stream *azx_dev; + struct i2s_stream *res = NULL; + + /* make a non-zero unique key for the substream */ + int key = (substream->pcm->device << 16) | (substream->number << 2) | + (substream->stream + 1); + + list_for_each_entry(azx_dev, &bus->stream_list, list) { + if (azx_dev->direction != substream->stream) + continue; + + azx_dev->opened = 0; + + if (azx_dev->assigned_key == key) { + res = azx_dev; + break; + } + + if (!res || bus->reverse_assign) + res = azx_dev; + } + + if (res) { + spin_lock_irq(&bus->reg_lock); + res->opened = 1; + res->running = 0; + res->assigned_key = key; + res->substream = substream; + spin_unlock_irq(&bus->reg_lock); + } + + return res; +} + +/* assign a stream for the PCM */ +static inline struct azx_dev * +azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream) +{ + struct i2s_stream *s; + + s = snd_i2s_stream_assign(azx_bus(chip), substream); + if (!s) + return NULL; + return stream_to_azx_dev(s); +} + +static int phytium_pcm_open(struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai); + + struct azx *chip = &dev->chip; + struct azx_dev *azx_dev; + struct snd_pcm_runtime *runtime = substream->runtime; + + azx_dev = azx_assign_device(chip, substream); + if (azx_dev == NULL) + return -EBUSY; + + snd_soc_set_runtime_hwparams(substream, &phytium_pcm_hardware); + snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); + runtime->private_data = dev; + + return 0; +} + +static int phytium_pcm_close(struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai); + struct azx *chip = &dev->chip; + struct azx_dev *azx_dev = get_azx_dev(substream); + + mutex_lock(&chip->open_mutex); + azx_stream(azx_dev)->opened = 0; + azx_stream(azx_dev)->running = 0; + azx_stream(azx_dev)->substream = NULL; + + mutex_unlock(&chip->open_mutex); + return 0; +} + +static int phytium_pcm_new(struct snd_soc_pcm_runtime *rtd) +{ + struct i2s_phytium *i2s = snd_soc_dai_get_drvdata(rtd->cpu_dai); + size_t size = phytium_pcm_hardware.buffer_bytes_max; + + return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, + SNDRV_DMA_TYPE_DEV, + i2s->pdev, size, size); +} + +static const struct i2s_io_ops axi_i2s_io_ops; +static const struct i2s_controller_ops axi_i2s_ops; + +static int phytium_pcm_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *hw_params) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct i2s_phytium *dev = runtime->private_data; + struct azx *chip = &dev->chip; + struct azx_dev *azx_dev = get_azx_dev(substream); + int ret; + + azx_dev->core.bufsize = 0; + azx_dev->core.period_bytes = 0; + azx_dev->core.format_val = 0; + + ret = chip->ops->substream_alloc_pages(chip, substream, + params_buffer_bytes(hw_params)); + + return ret; +} +/* + * set up a BDL entry + */ +static int setup_bdle(struct i2sc_bus *bus, + struct snd_dma_buffer *dmab, + struct i2s_stream *azx_dev, __le32 **bdlp, + int ofs, int size, int with_ioc) +{ + struct snd_pcm_substream *substream = azx_dev->substream; + struct snd_pcm_runtime *runtime = substream->runtime; + __le32 *bdl = *bdlp; + + dmab->addr = runtime->dma_addr; + while (size > 0) { + dma_addr_t addr; + int chunk; + + if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES) + return -EINVAL; + + addr = snd_sgbuf_get_addr(dmab, ofs); + + /* program the address field of the BDL entry */ + bdl[0] = cpu_to_le32((u32)addr); + + bdl[1] = cpu_to_le32(upper_32_bits(addr)); + + /* program the size field of the BDL entry */ + chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size); + + bdl[2] = cpu_to_le32(chunk); + + /* program the IOC to enable interrupt + * only when the whole fragment is processed + */ + size -= chunk; + bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01); + + bdl += 4; + azx_dev->frags++; + ofs += chunk; + } + *bdlp = bdl; + return ofs; +} + +int snd_i2s_stream_setup_periods(struct i2s_stream *azx_dev) +{ + struct i2sc_bus *bus = azx_dev->bus; + struct snd_pcm_substream *substream = azx_dev->substream; + struct snd_pcm_runtime *runtime = substream->runtime; + __le32 *bdl; + int i, ofs, periods, period_bytes; + int pos_adj, pos_align; + + period_bytes = azx_dev->period_bytes; + periods = azx_dev->bufsize / period_bytes; + + /* program the initial BDL entries */ + bdl = (__le32 *)azx_dev->bdl.area; + + ofs = 0; + azx_dev->frags = 0; + + pos_adj = bus->bdl_pos_adj; + + if (!azx_dev->no_period_wakeup && pos_adj > 0) { + + pos_align = pos_adj; + pos_adj = (pos_adj * runtime->rate + 47999) / 48000; + if (!pos_adj) + pos_adj = pos_align; + else + pos_adj = ((pos_adj + pos_align - 1) / pos_align) * + pos_align; + pos_adj = frames_to_bytes(runtime, pos_adj); + if (pos_adj >= period_bytes) { + dev_warn(bus->dev, "Too big adjustment %d\n", + pos_adj); + pos_adj = 0; + } else { + + ofs = setup_bdle(bus, snd_pcm_get_dma_buf(substream), + azx_dev, + &bdl, ofs, pos_adj, true); + if (ofs < 0) + goto error; + } + } else + pos_adj = 0; + + for (i = 0; i < periods; i++) { + if (i == periods - 1 && pos_adj) + ofs = setup_bdle(bus, snd_pcm_get_dma_buf(substream), + azx_dev, &bdl, ofs, + period_bytes - pos_adj, 0); + else + ofs = setup_bdle(bus, snd_pcm_get_dma_buf(substream), + azx_dev, &bdl, ofs, + period_bytes, + !azx_dev->no_period_wakeup); + if (ofs < 0) + goto error; + } + return 0; + + error: + dev_err(bus->dev, "Too many BDL entries: buffer=%d, period=%d\n", + azx_dev->bufsize, period_bytes); + return -EINVAL; +} + +int snd_i2s_stream_set_params(struct i2s_stream *azx_dev, + unsigned int format_val) +{ + unsigned int bufsize, period_bytes; + struct snd_pcm_substream *substream = azx_dev->substream; + struct snd_pcm_runtime *runtime; + int err; + + if (!substream) + return -EINVAL; + + runtime = substream->runtime; + bufsize = snd_pcm_lib_buffer_bytes(substream); + period_bytes = snd_pcm_lib_period_bytes(substream); + if (bufsize != azx_dev->bufsize || + period_bytes != azx_dev->period_bytes || + format_val != azx_dev->format_val || + runtime->no_period_wakeup != azx_dev->no_period_wakeup) { + + azx_dev->bufsize = bufsize; + azx_dev->period_bytes = period_bytes; + azx_dev->format_val = format_val; + azx_dev->no_period_wakeup = runtime->no_period_wakeup; + err = snd_i2s_stream_setup_periods(azx_dev); + if (err < 0) + return err; + } + + return 0; +} + +int snd_i2s_stream_setup(struct i2s_stream *azx_dev) +{ + struct snd_pcm_runtime *runtime; + + if (azx_dev->substream) + runtime = azx_dev->substream->runtime; + else + runtime = NULL; + + i2s_write_reg(azx_dev->sd_addr, DMA_CHAL_CONFG0, 0x8180); + i2s_write_reg(azx_dev->sd_addr, DMA_MASK_INT, 0x80000003); + + if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) { + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(0), (u32)azx_dev->bdl.addr); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(0), upper_32_bits(azx_dev->bdl.addr)); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(0), 0x1c8); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CBL(0), azx_dev->bufsize); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_LVI(0), azx_dev->frags - 1); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DSIZE(0), 0x2);//0x2 + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DLENTH(0), 0x0);//0x0 + } else { + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(1), (u32)azx_dev->bdl.addr); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(1), upper_32_bits(azx_dev->bdl.addr)); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(1), 0x1c0); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CBL(1), azx_dev->bufsize); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_LVI(1), azx_dev->frags - 1); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DSIZE(1), 0x8);//0x8 + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DLENTH(1), 0x0); + } + + if (runtime && runtime->period_size > 64) + azx_dev->delay_negative_threshold = + -frames_to_bytes(runtime, 64); + else + azx_dev->delay_negative_threshold = 0; + + return 0; +} + +static int phytium_pcm_prepare(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct i2s_phytium *dev = runtime->private_data; + struct azx *chip = &dev->chip; + struct azx_dev *azx_dev = get_azx_dev(substream); + struct i2sc_bus *bus = azx_bus(chip); + struct i2s_stream *hstr_p; + struct i2s_phytium *i2s = runtime->private_data; + int err; + + i2s->substream = substream; + azx_dev->core.substream = substream; + azx_dev->core.sd_addr = i2s->regs_db; + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + azx_dev->core.bdl.area = bus->bdl0.area; + azx_dev->core.bdl.addr = bus->bdl0.addr; + } else { + azx_dev->core.bdl.area = bus->bdl1.area; + azx_dev->core.bdl.addr = bus->bdl1.addr; + } + + if (!substream) + return -EINVAL; + + hstr_p = azx_stream(azx_dev); + hstr_p->direction = substream->stream; + + err = snd_i2s_stream_set_params(azx_stream(azx_dev), 0); + if (err < 0) + goto unlock; + + snd_i2s_stream_setup(azx_stream(azx_dev)); + + unlock: + if (!err) + azx_stream(azx_dev)->prepared = 1; + + return err; +} + +void snd_i2s_stream_clear(struct i2s_stream *azx_dev) +{ + if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0x0); + else + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0x0); + + azx_dev->running = false; +} + +void snd_i2s_stream_stop(struct i2s_stream *azx_dev) +{ + snd_i2s_stream_clear(azx_dev); +} + +void snd_i2s_stream_start(struct i2s_stream *azx_dev, bool fresh_start) +{ + if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0x1); + else + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0x5); + + azx_dev->running = true; +} + +static int phytium_pcm_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct i2s_phytium *dev = runtime->private_data; + struct azx *chip = &dev->chip; + struct i2sc_bus *bus = azx_bus(chip); + struct azx_dev *azx_dev = get_azx_dev(substream); + struct snd_pcm_substream *s; + struct i2s_stream *hstr; + bool start; + int sbits = 0; + + hstr = azx_stream(azx_dev); + hstr->direction = substream->stream; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + case SNDRV_PCM_TRIGGER_RESUME: + start = true; + break; + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + case SNDRV_PCM_TRIGGER_SUSPEND: + case SNDRV_PCM_TRIGGER_STOP: + start = false; + break; + default: + return -EINVAL; + } + + snd_pcm_group_for_each_entry(s, substream) { + if (s->pcm->card != substream->pcm->card) + continue; + azx_dev = get_azx_dev(s); + sbits |= 1 << azx_dev->core.index; + snd_pcm_trigger_done(s, substream); + } + + spin_lock(&bus->reg_lock); + + snd_pcm_group_for_each_entry(s, substream) { + if (s->pcm->card != substream->pcm->card) + continue; + azx_dev = get_azx_dev(s); + if (start) + snd_i2s_stream_start(azx_stream(azx_dev), true); + else + snd_i2s_stream_stop(azx_stream(azx_dev)); + } + + i2s_write_reg(dev->regs_db, DMA_CTL, 0x1); + spin_unlock(&bus->reg_lock); + + return 0; +} + +static void phytium_pcm_free(struct snd_pcm *pcm) +{ + snd_pcm_lib_preallocate_free_for_all(pcm); +} + +void snd_i2s_stream_cleanup(struct i2s_stream *azx_dev) +{ + int cnt = 10; + if (azx_dev->sd_addr) { + if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) { + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0); + while (cnt--) { + if (i2s_read_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0)) == 0) + break; + } + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 2); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(0), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(0), 0); + } else { + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0); + while (cnt--) { + if (i2s_read_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1)) == 0) + break; + } + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 2); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(1), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(1), 0); + } + } +} + +static int phytium_pcm_hw_free(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct i2s_phytium *dev = runtime->private_data; + struct azx *chip = &dev->chip; + struct i2s_stream *hstr_p; + struct azx_dev *azx_dev = get_azx_dev(substream); + int err; + + hstr_p = azx_stream(azx_dev); + hstr_p->direction = substream->stream; + snd_i2s_stream_cleanup(azx_stream(azx_dev)); + + err = chip->ops->substream_free_pages(chip, substream); + azx_stream(azx_dev)->prepared = 0; + + return err; +} + +static snd_pcm_uframes_t phytium_pcm_pointer(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct i2s_phytium *dev = runtime->private_data; + + int stream = substream->stream; + + u32 pos = i2s_read_reg(dev->regs_db, DMA_LPIB(stream)); + + return bytes_to_frames(substream->runtime, pos); +} + +static const struct snd_pcm_ops phytium_pcm_ops = { + .open = phytium_pcm_open, + .close = phytium_pcm_close, + .hw_params = phytium_pcm_hw_params, + .prepare = phytium_pcm_prepare, + .hw_free = phytium_pcm_hw_free, + .trigger = phytium_pcm_trigger, + .pointer = phytium_pcm_pointer, +}; + +static const struct snd_soc_component_driver phytium_i2s_component = { + .name = "phytium-i2s", + .pcm_new = phytium_pcm_new, + .pcm_free = phytium_pcm_free, + .ops = &phytium_pcm_ops, +}; + +/* Maximum bit resolution of a channel - not uniformly spaced */ +static const u32 fifo_width[COMP_MAX_WORDSIZE] = { + 12, 16, 20, 24, 32, 0, 0, 0 +}; + +/* Width of (DMA) bus */ +static const u32 bus_widths[COMP_MAX_DATA_WIDTH] = { + DMA_SLAVE_BUSWIDTH_1_BYTE, + DMA_SLAVE_BUSWIDTH_2_BYTES, + DMA_SLAVE_BUSWIDTH_4_BYTES, + DMA_SLAVE_BUSWIDTH_UNDEFINED +}; + +/* PCM format to support channel resolution */ +static const u32 formats[COMP_MAX_WORDSIZE] = { + SNDRV_PCM_FMTBIT_S16_LE, + SNDRV_PCM_FMTBIT_S16_LE, + SNDRV_PCM_FMTBIT_S24_LE, + SNDRV_PCM_FMTBIT_S24_LE, + SNDRV_PCM_FMTBIT_S32_LE, + 0, + 0, + 0 +}; + +static int phytium_configure_dai(struct i2s_phytium *dev) +{ + u32 comp1 = i2s_read_reg(dev->regs, dev->i2s_reg_comp1); + u32 comp2 = i2s_read_reg(dev->regs, dev->i2s_reg_comp2); + u32 fifo_depth = 1 << (1 + COMP1_FIFO_DEPTH_GLOBAL(comp1)); + u32 idx; + + if (COMP1_TX_ENABLED(comp1)) { + dev_dbg(dev->dev, " phytium: play supported\n"); + idx = COMP1_TX_WORDSIZE_0(comp1); + if (WARN_ON(idx >= ARRAY_SIZE(formats))) + return -EINVAL; + } + + if (COMP1_RX_ENABLED(comp1)) { + dev_dbg(dev->dev, "phytium: record supported\n"); + idx = COMP2_RX_WORDSIZE_0(comp2); + if (WARN_ON(idx >= ARRAY_SIZE(formats))) + return -EINVAL; + if (dev->quirks & DW_I2S_QUIRK_16BIT_IDX_OVERRIDE) + idx = 1; + } + + if (COMP1_MODE_EN(comp1)) { + dev_dbg(dev->dev, "phytium: i2s master mode supported\n"); + dev->capability |= DW_I2S_MASTER; + } else { + dev_dbg(dev->dev, "phytium: i2s slave mode supported\n"); + dev->capability |= DW_I2S_SLAVE; + } + + dev->fifo_th = fifo_depth / 2; + return 0; +} + +static int phytium_configure_dai_by_dt(struct i2s_phytium *dev) +{ + u32 comp1 = i2s_read_reg(dev->regs, I2S_COMP_PARAM_1); + u32 comp2 = i2s_read_reg(dev->regs, I2S_COMP_PARAM_2); + u32 idx = COMP1_APB_DATA_WIDTH(comp1); + u32 idx2; + int ret; + + if (WARN_ON(idx >= ARRAY_SIZE(bus_widths))) + return -EINVAL; + + ret = phytium_configure_dai(dev); + if (ret < 0) + return ret; + + if (COMP1_TX_ENABLED(comp1)) { + idx2 = COMP1_TX_WORDSIZE_0(comp1); + dev->capability |= DWC_I2S_PLAY; + } + if (COMP1_RX_ENABLED(comp1)) { + idx2 = COMP2_RX_WORDSIZE_0(comp2); + dev->capability |= DWC_I2S_RECORD; + } + + return 0; +} + +static int dma_alloc_pages(struct i2sc_bus *bus, int type, size_t size, + struct snd_dma_buffer *buf) +{ + int err; + + err = snd_dma_alloc_pages(type, bus->dev, size, buf); + if (err < 0) + return err; + + return 0; +} + +int snd_i2s_bus_alloc_stream_pages(struct i2sc_bus *bus) +{ + struct i2s_stream *s; + int num_streams = 0; + int err; + + list_for_each_entry(s, &bus->stream_list, list) { + + /* allocate memory for the BDL for each stream */ + err = bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, + BDL_SIZE, &s->bdl); + if (num_streams == 0) { + bus->bdl0.addr = s->bdl.addr; + bus->bdl0.area = s->bdl.area; + } else { + bus->bdl1.addr = s->bdl.addr; + bus->bdl1.area = s->bdl.area; + } + num_streams++; + if (err < 0) + return -ENOMEM; + } + + if (WARN_ON(!num_streams)) + return -EINVAL; + + return 0; +} + +static int stream_direction(struct azx *chip, unsigned char index) +{ + if (index >= chip->playback_index_offset && + index < chip->playback_index_offset + chip->playback_streams) + return SNDRV_PCM_STREAM_PLAYBACK; + return SNDRV_PCM_STREAM_CAPTURE; + +} + +void snd_i2s_stream_init(struct i2sc_bus *bus, struct i2s_stream *azx_dev, + int idx, int direction, int tag) +{ + azx_dev->bus = bus; + azx_dev->sd_addr = bus->remap_addr; + + if (idx == 0) + azx_dev->sd_int_sta_mask = 1 << idx; + else + azx_dev->sd_int_sta_mask = 1 << 8; + + azx_dev->index = idx; + azx_dev->direction = direction; + azx_dev->stream_tag = tag; + + list_add_tail(&azx_dev->list, &bus->stream_list); + +} + +int azx_i2s_init_streams(struct azx *chip) +{ + int i; + + for (i = 0; i < chip->num_streams; i++) { + struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL); + int dir, tag; + + if (!azx_dev) + return -ENOMEM; + + dir = stream_direction(chip, i); + + tag = i + 1; + + snd_i2s_stream_init(azx_bus(chip), azx_stream(azx_dev), + i, dir, tag); + } + + return 0; +} + +static int azx_first_init(struct azx *chip) +{ + struct i2s_phytium *i2s = container_of(chip, struct i2s_phytium, chip); + struct platform_device *pdev = to_platform_device(i2s->dev); + struct device *i2sdev = i2s->dev; + struct i2sc_bus *bus = azx_bus(chip); + struct resource *res; + int err; + unsigned int dma_bits = 64; + + chip->region_requested = 1; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + bus->addr = res->start; + bus->remap_addr = i2s->regs_db; + bus->dev = i2s->pdev; + + if (bus->remap_addr == NULL) { + dev_err(i2sdev, "ioremap error\n"); + return -ENXIO; + } + + if (azx_acquire_irq(chip, 0) < 0) + return -EBUSY; + + synchronize_irq(bus->irq); + + if (!dma_set_mask(i2sdev, DMA_BIT_MASK(dma_bits))) { + err = dma_set_coherent_mask(i2sdev, DMA_BIT_MASK(dma_bits)); + } else { + err = dma_set_mask(i2sdev, DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(i2sdev, DMA_BIT_MASK(32)); + } + + chip->playback_streams = FT4C_NUM_PLAYBACK; + chip->capture_streams = FT4C_NUM_CAPTURE; + + chip->playback_index_offset = 0; + chip->capture_index_offset = chip->playback_streams; + chip->num_streams = chip->playback_streams + chip->capture_streams; + + err = azx_i2s_init_streams(chip); + if (err < 0) + return err; + + err = azx_alloc_stream_pages(chip); + if (err < 0) + return err; + + return 0; +} + +static int azx_probe_continue(struct azx *chip) +{ + struct i2s_phytium *i2s = container_of(chip, struct i2s_phytium, chip); + int err; + + i2s->probe_continued = 1; + + err = azx_first_init(chip); + if (err < 0) + goto out_free; + + chip->running = 1; + +out_free: + return err; +} + +static void azx_probe_work(struct work_struct *work) +{ + struct i2s_phytium *i2s = container_of(work, struct i2s_phytium, probe_work); + + azx_probe_continue(&i2s->chip); +} + +int azx_i2s_bus_init(struct azx *chip, + const struct i2s_io_ops *io_ops) +{ + struct i2s_bus *bus = &chip->bus; + + bus->core.io_ops = io_ops; + + INIT_LIST_HEAD(&bus->core.stream_list); + bus->card = chip->card; + mutex_init(&bus->prepare_mutex); + bus->pci = chip->pci; + + bus->core.bdl_pos_adj = chip->bdl_pos_adj; + return 0; +} + +static int i2s_phytium_create(struct platform_device *pdev, + int dev, struct azx **rchip, struct i2s_phytium *i2s) +{ + struct azx *chip; + int err; + + *rchip = NULL; + + if (!i2s) + return -ENOMEM; + chip = &i2s->chip; + + mutex_init(&chip->open_mutex); + + chip->ops = &axi_i2s_ops; + chip->dev_index = dev; + + INIT_LIST_HEAD(&chip->pcm_list); + init_completion(&i2s->probe_wait); + + chip->bdl_pos_adj = 32; + err = azx_i2s_bus_init(chip, &axi_i2s_io_ops); + if (err < 0) { + kfree(i2s); + return err; + } + + INIT_WORK(&i2s->probe_work, azx_probe_work); + *rchip = chip; + return 0; +} + +static int substream_alloc_pages(struct azx *chip, + struct snd_pcm_substream *substream, + size_t size) +{ + int ret; + + ret = snd_pcm_lib_malloc_pages(substream, size); + if (ret < 0) + return ret; + + return 0; +} + +static void dma_free_pages(struct i2sc_bus *bus, + struct snd_dma_buffer *buf) +{ + snd_dma_free_pages(buf); +} + +static const struct i2s_io_ops axi_i2s_io_ops = { + .dma_alloc_pages = dma_alloc_pages, + .dma_free_pages = dma_free_pages, +}; + +static const struct i2s_controller_ops axi_i2s_ops = { + .substream_alloc_pages = substream_alloc_pages, + .substream_free_pages = substream_free_pages, +}; + + +static int phytium_i2s_probe(struct platform_device *pdev) +{ + struct i2s_phytium *i2s; + struct azx *chip; + struct resource *res; + struct pdata_x100_mfd *pdata; + struct snd_soc_dai_driver *dai_drv; + int err, ret; + int card_num = 1; + bool schedule_probe; + + i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL); + if (!i2s) + return -ENOMEM; + + dai_drv = devm_kzalloc(&pdev->dev, sizeof(*dai_drv), GFP_KERNEL); + if (!dai_drv) + return -ENOMEM; + memcpy(dai_drv, &phytium_i2s_dai, sizeof(phytium_i2s_dai)); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + i2s->regs = devm_ioremap_resource(&pdev->dev, res); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + i2s->regs_db = devm_ioremap_resource(&pdev->dev, res); + + if (IS_ERR(i2s->regs)) + return PTR_ERR(i2s->regs); + + i2s->irq_id = platform_get_irq(pdev, 0); + + if (i2s->irq_id < 0) + return i2s->irq_id; + + i2s->i2s_reg_comp1 = I2S_COMP_PARAM_1; + i2s->i2s_reg_comp2 = I2S_COMP_PARAM_2; + + ret = phytium_configure_dai_by_dt(i2s); + if (ret < 0) + return ret; + + err = i2s_phytium_create(pdev, card_num, &chip, i2s); + if (err < 0) + return err; + i2s = container_of(chip, struct i2s_phytium, chip); + schedule_probe = !chip->disabled; + + dev_set_drvdata(&pdev->dev, i2s); + + pdata = dev_get_platdata(&pdev->dev); + dai_drv->name = pdata->name; + i2s->dev = &pdev->dev; + i2s->pdev = pdata->dev; + i2s->clk_base = pdata->clk_base; + + ret = devm_snd_soc_register_component(&pdev->dev, &phytium_i2s_component, + dai_drv, 1); + if (ret != 0) + dev_err(&pdev->dev, "not able to register dai\n"); + + if (schedule_probe) + schedule_work(&i2s->probe_work); + + if (chip->disabled) + complete_all(&i2s->probe_wait); + + return 0; +} + +static int phytium_i2s_remove(struct platform_device *pdev) +{ + pm_runtime_disable(&pdev->dev); + return 0; +} + +static struct platform_driver phytium_i2s_driver = { + .probe = phytium_i2s_probe, + .remove = phytium_i2s_remove, + .driver = { + .name = "phytium-i2s", + }, +}; + +module_platform_driver(phytium_i2s_driver); + +MODULE_DESCRIPTION("Phytium I2S Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Zhang Yiqun "); diff --git a/sound/soc/phytium/pmdk_dp.c b/sound/soc/phytium/pmdk_dp.c new file mode 100644 index 000000000000..0e5a001e8126 --- /dev/null +++ b/sound/soc/phytium/pmdk_dp.c @@ -0,0 +1,227 @@ +/* + * pmdk_dp.c + * + * Copyright (c) 2021 Phytium Technology Co. Ltd + * Author: Yiqun Zhang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include + +struct pmdk_dp_private { + struct snd_soc_jack jack0; + struct snd_soc_jack jack1; + struct snd_soc_jack jack2; +}; + +/* PMDK widgets */ +static const struct snd_soc_dapm_widget pmdk_dp_dapm_widgets[] = { + SND_SOC_DAPM_LINE("DP", NULL), +}; + +/* PMDK control */ +static const struct snd_kcontrol_new pmdk_controls[] = { + SOC_DAPM_PIN_SWITCH("DP"), +}; + +/* PMDK connections */ +static const struct snd_soc_dapm_route pmdk_dp_audio_map[] = { + {"DP", NULL, "TX"}, +}; + +static struct snd_soc_jack_pin dp0_pins[] = { + { + .pin = "DP/HDMI 0", + .mask = SND_JACK_LINEOUT, + }, +}; + +static struct snd_soc_jack_pin dp1_pins[] = { + { + .pin = "DP/HDMI 1", + .mask = SND_JACK_LINEOUT, + }, +}; + +static struct snd_soc_jack_pin dp2_pins[] = { + { + .pin = "DP/HDMI 2", + .mask = SND_JACK_LINEOUT, + }, +}; + +#define SMDK_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \ + SND_SOC_DAIFMT_CBS_CFS) + +static int pmdk_dp0_init(struct snd_soc_pcm_runtime *runtime) +{ + struct snd_soc_card *card = runtime->card; + struct pmdk_dp_private *priv = snd_soc_card_get_drvdata(card); + struct snd_soc_component *component = runtime->codec_dai->component; + int ret; + + ret = snd_soc_card_jack_new(card, "DP/HDMI 0", + SND_JACK_LINEOUT, + &priv->jack0, dp0_pins, + ARRAY_SIZE(dp0_pins)); + if (ret) { + dev_err(card->dev, "Jack creation failed %d\n", ret); + return ret; + } + snd_soc_component_set_jack(component, &priv->jack0, NULL); + return ret; +} + +static int pmdk_dp1_init(struct snd_soc_pcm_runtime *runtime) +{ + struct snd_soc_card *card = runtime->card; + struct pmdk_dp_private *priv = snd_soc_card_get_drvdata(card); + struct snd_soc_component *component = runtime->codec_dai->component; + int ret; + + ret = snd_soc_card_jack_new(card, "DP/HDMI 1", + SND_JACK_LINEOUT, + &priv->jack1, dp1_pins, + ARRAY_SIZE(dp1_pins)); + if (ret) { + dev_err(card->dev, "Jack creation failed %d\n", ret); + return ret; + } + snd_soc_component_set_jack(component, &priv->jack1, NULL); + return ret; +} + +static int pmdk_dp2_init(struct snd_soc_pcm_runtime *runtime) +{ + struct snd_soc_card *card = runtime->card; + struct pmdk_dp_private *priv = snd_soc_card_get_drvdata(card); + struct snd_soc_component *component = runtime->codec_dai->component; + int ret; + + ret = snd_soc_card_jack_new(card, "DP/HDMI 2", + SND_JACK_LINEOUT, + &priv->jack2, dp2_pins, + ARRAY_SIZE(dp2_pins)); + if (ret) { + dev_err(card->dev, "Jack creation failed %d\n", ret); + return ret; + } + snd_soc_component_set_jack(component, &priv->jack2, NULL); + return ret; +} + +static struct snd_soc_dai_link pmdk_dai0 = { + .name = "Phytium dp0-audio", + .stream_name = "Playback", + .cpu_dai_name = "phytium-i2s-dp0", + .codec_dai_name = "i2s-hifi", + .platform_name = "snd-soc-dummy", + .codec_name = "hdmi-audio-codec.0.auto", + .dai_fmt = SMDK_DAI_FMT, + .init = pmdk_dp0_init, +}; + +static struct snd_soc_dai_link pmdk_dai1 = { + .name = "Phytium dp1-audio", + .stream_name = "Playback", + .cpu_dai_name = "phytium-i2s-dp1", + .codec_dai_name = "i2s-hifi", + .platform_name = "snd-soc-dummy", + .codec_name = "hdmi-audio-codec.1.auto", + .dai_fmt = SMDK_DAI_FMT, + .init = pmdk_dp1_init, +}; + +static struct snd_soc_dai_link pmdk_dai2 = { + .name = "Phytium dp2-audio", + .stream_name = "Playback", + .cpu_dai_name = "phytium-i2s-dp2", + .codec_dai_name = "i2s-hifi", + .platform_name = "snd-soc-dummy", + .codec_name = "hdmi-audio-codec.2.auto", + .dai_fmt = SMDK_DAI_FMT, + .init = pmdk_dp2_init, +}; + +static struct snd_soc_card pmdk = { + .name = "PMDK-I2S", + .owner = THIS_MODULE, + + .dapm_widgets = pmdk_dp_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(pmdk_dp_dapm_widgets), + .controls = pmdk_controls, + .num_controls = ARRAY_SIZE(pmdk_controls), + .dapm_routes = pmdk_dp_audio_map, + .num_dapm_routes = ARRAY_SIZE(pmdk_dp_audio_map), +}; + +static int pmdk_sound_probe(struct platform_device *pdev) +{ + struct snd_soc_card *card = &pmdk; + struct pmdk_dp_private *priv; + struct snd_soc_dai_link *pmdk_dai; + int num_dp = 2; + card->dev = &pdev->dev; + device_property_read_u32(&pdev->dev, "num-dp", &num_dp); + pmdk_dai = devm_kzalloc(&pdev->dev, num_dp * sizeof(*pmdk_dai), GFP_KERNEL); + if (!pmdk_dai) + return -ENOMEM; + + switch (num_dp) { + case 1: + pmdk_dai[0] = pmdk_dai0; + break; + case 2: + pmdk_dai[0] = pmdk_dai0; + pmdk_dai[1] = pmdk_dai1; + break; + case 3: + pmdk_dai[0] = pmdk_dai0; + pmdk_dai[1] = pmdk_dai1; + pmdk_dai[2] = pmdk_dai2; + break; + default: + return -EINVAL; + } + + card->dai_link = pmdk_dai; + card->num_links = num_dp; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + snd_soc_card_set_drvdata(card, priv); + + return devm_snd_soc_register_card(&pdev->dev, card); +} + +static const struct acpi_device_id pmdk_sound_acpi_match[] = { + { "PHYT8006", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, pmdk_sound_acpi_match); + +static struct platform_driver pmdk_sound_driver = { + .probe = pmdk_sound_probe, + .driver = { + .name = "pmdk_dp", + .acpi_match_table = pmdk_sound_acpi_match, +#ifdef CONFIG_PM + .pm = &snd_soc_pm_ops, +#endif + }, +}; + +module_platform_driver(pmdk_sound_driver); + +MODULE_AUTHOR("Zhang Yiqun"); +MODULE_DESCRIPTION("ALSA SoC PMDK DP"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/phytium/pmdk_es8336.c b/sound/soc/phytium/pmdk_es8336.c new file mode 100644 index 000000000000..56333513fbdf --- /dev/null +++ b/sound/soc/phytium/pmdk_es8336.c @@ -0,0 +1,100 @@ +/* + * pmdk_es8336.c + * + * Copyright (c) 2021 Phytium Techonology Co. Ltd + * Author: Zhang Yiqun + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include + + +/* PMDK widgets */ +static const struct snd_soc_dapm_widget pmdk_es8336_dapm_widgets[] = { + SND_SOC_DAPM_HP("HP", NULL), + SND_SOC_DAPM_MIC("Int Mic", NULL), + SND_SOC_DAPM_MIC("Mic In", NULL), +}; + +/* PMDK control */ +static const struct snd_kcontrol_new pmdk_controls[] = { + SOC_DAPM_PIN_SWITCH("HP"), + SOC_DAPM_PIN_SWITCH("Int Mic"), + SOC_DAPM_PIN_SWITCH("Mic In"), +}; + +/* PMDK connections */ +static const struct snd_soc_dapm_route pmdk_es8336_audio_map[] = { + {"DMIC", NULL, "Int Mic"}, + {"MIC1", NULL, "Mic In"}, + {"MIC2", NULL, "Mic In"}, + + {"HP", NULL, "HPOL"}, + {"HP", NULL, "HPOR"}, +}; + +#define PMDK_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \ + SND_SOC_DAIFMT_CBS_CFS) + +static struct snd_soc_dai_link pmdk_dai[] = { + { + .name = "ES8336 HIFI", + .stream_name = "ES8336 HIFI", + .cpu_dai_name = "phytium-i2s-lsd", + .codec_dai_name = "es8336-hifi", + .platform_name = "snd-soc-dummy", + .codec_name = "i2c-ESSX8336:00", + .dai_fmt = PMDK_DAI_FMT, + }, +}; + +static struct snd_soc_card pmdk = { + .name = "PMDK-I2S", + .owner = THIS_MODULE, + .dai_link = pmdk_dai, + .num_links = ARRAY_SIZE(pmdk_dai), + + .dapm_widgets = pmdk_es8336_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(pmdk_es8336_dapm_widgets), + .controls = pmdk_controls, + .num_controls = ARRAY_SIZE(pmdk_controls), + .dapm_routes = pmdk_es8336_audio_map, + .num_dapm_routes = ARRAY_SIZE(pmdk_es8336_audio_map), +}; + +static int pmdk_sound_probe(struct platform_device *pdev) +{ + struct snd_soc_card *card = &pmdk; + struct device *dev = &pdev->dev; + card->dev = dev; + return devm_snd_soc_register_card(&pdev->dev, card); +} + +static const struct acpi_device_id pmdk_sound_acpi_match[] = { + { "PHYT8005", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, pmdk_sound_acpi_match); + +static struct platform_driver pmdk_sound_driver = { + .probe = pmdk_sound_probe, + .driver = { + .name = "pmdk_es8336", + .acpi_match_table = pmdk_sound_acpi_match, +#ifdef CONFIG_PM + .pm = &snd_soc_pm_ops, +#endif + }, +}; + +module_platform_driver(pmdk_sound_driver); +MODULE_AUTHOR("Zhang Yiqun "); +MODULE_DESCRIPTION("ALSA SoC PMDK ES8336"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/phytium/pmdk_es8388.c b/sound/soc/phytium/pmdk_es8388.c new file mode 100644 index 000000000000..194a4dca7ea8 --- /dev/null +++ b/sound/soc/phytium/pmdk_es8388.c @@ -0,0 +1,174 @@ +/* + * pmdk_es8388.c + * + * Copyright (c) 2021 Phytium Techonology Co. Ltd + * Author: Zhang Yiqun + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include + +static struct snd_soc_jack hs_jack; + +/* Headset jack detection DAPM pins */ +static struct snd_soc_jack_pin hs_jack_pins[] = { + { + .pin = "FrontIn", + .mask = SND_JACK_MICROPHONE, + }, + { + .pin = "RearIn", + .mask = SND_JACK_MICROPHONE, + .invert = 1 + }, + { + .pin = "Front", + .mask = SND_JACK_HEADPHONE, + }, + { + .pin = "Rear", + .mask = SND_JACK_HEADPHONE, + .invert = 1 + }, +}; + +/* Headset jack detection gpios */ +static struct snd_soc_jack_gpio hs_jack_gpios[] = { + { + .name = "det", + .report = SND_JACK_HEADSET, + .debounce_time = 200, + .invert = 1, + }, +}; + +/* PMDK widgets */ +static const struct snd_soc_dapm_widget pmdk_es8388_dapm_widgets[] = { + SND_SOC_DAPM_HP("Front", NULL), + SND_SOC_DAPM_HP("Rear", NULL), + + SND_SOC_DAPM_MIC("FrontIn", NULL), + SND_SOC_DAPM_MIC("RearIn", NULL), +}; + +/* PMDK control */ +static const struct snd_kcontrol_new pmdk_controls[] = { + SOC_DAPM_PIN_SWITCH("Front"), + SOC_DAPM_PIN_SWITCH("Rear"), + SOC_DAPM_PIN_SWITCH("FrontIn"), + SOC_DAPM_PIN_SWITCH("RearIn"), +}; + +/* PMDK connections */ +static const struct snd_soc_dapm_route pmdk_es8388_audio_map[] = { + {"LINPUT1", NULL, "FrontIn"}, + {"RINPUT1", NULL, "FrontIn"}, + + {"LINPUT2", NULL, "RearIn"}, + {"RINPUT2", NULL, "RearIn"}, + + {"Front", NULL, "LOUT1"}, + {"Front", NULL, "ROUT1"}, + + {"Rear", NULL, "LOUT2"}, + {"Rear", NULL, "ROUT2"}, +}; + +static int pmdk_es8388_init(struct snd_soc_pcm_runtime *rtd) +{ + int ret; + + /* Jack detection API stuff */ + ret = snd_soc_card_jack_new(rtd->card, "Headset Jack", SND_JACK_HEADSET, + &hs_jack, hs_jack_pins, + ARRAY_SIZE(hs_jack_pins)); + if (ret) + goto err; + + ret = snd_soc_jack_add_gpios(&hs_jack, ARRAY_SIZE(hs_jack_gpios), + hs_jack_gpios); + if (ret) + goto err; + + return 0; + +err: + return ret; +} + +#define PMDK_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \ + SND_SOC_DAIFMT_CBS_CFS) + +static struct snd_soc_dai_link pmdk_dai[] = { + { + .name = "ES8388 HIFI", + .stream_name = "ES8388 HIFI", + .cpu_dai_name = "phytium-i2s-lsd", + .codec_dai_name = "es8388-hifi", + .platform_name = "snd-soc-dummy", + .codec_name = "i2c-ESSX8388:00", + .dai_fmt = PMDK_DAI_FMT, + .init = pmdk_es8388_init, + }, +}; + +static struct snd_soc_card pmdk = { + .name = "PMDK-I2S", + .owner = THIS_MODULE, + .dai_link = pmdk_dai, + .num_links = ARRAY_SIZE(pmdk_dai), + + .dapm_widgets = pmdk_es8388_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(pmdk_es8388_dapm_widgets), + .controls = pmdk_controls, + .num_controls = ARRAY_SIZE(pmdk_controls), + .dapm_routes = pmdk_es8388_audio_map, + .num_dapm_routes = ARRAY_SIZE(pmdk_es8388_audio_map), +}; + +static int pmdk_sound_probe(struct platform_device *pdev) +{ + struct snd_soc_card *card = &pmdk; + struct device *dev = &pdev->dev; + int n; + + card->dev = dev; + hs_jack_gpios[0].gpiod_dev = dev; + n = gpiod_count(dev, "det"); + + if(n < 0) + pmdk_dai[0].init = NULL; + + return devm_snd_soc_register_card(&pdev->dev, card); +} + +static const struct acpi_device_id pmdk_sound_acpi_match[] = { + { "PHYT8004", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, pmdk_sound_acpi_match); + +static struct platform_driver pmdk_sound_driver = { + .probe = pmdk_sound_probe, + .driver = { + .name = "pmdk_es8388", + .acpi_match_table = pmdk_sound_acpi_match, +#ifdef CONFIG_PM + .pm = &snd_soc_pm_ops, +#endif + }, +}; + +module_platform_driver(pmdk_sound_driver); + +MODULE_AUTHOR("Zhang Yiqun"); +MODULE_DESCRIPTION("ALSA SoC PMDK ES8388"); +MODULE_LICENSE("GPL"); diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index a2a175b08b17..2b21d4fee771 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -332,7 +332,7 @@ u64 vgic_sanitise_outer_cacheability(u64 field) case GIC_BASER_CACHE_nC: return field; default: - return GIC_BASER_CACHE_nC; + return GIC_BASER_CACHE_SameAsInner; } } -- Gitee From 32454647fb14d2c7dc70b9b752b4cd317f7ecbca Mon Sep 17 00:00:00 2001 From: zhangjianwei Date: Mon, 8 Aug 2022 15:08:01 +0800 Subject: [PATCH 2/5] update phytium dc driver,add x100 gpu driver --- drivers/gpu/drm/phytium/Kconfig | 9 + drivers/gpu/drm/phytium/Makefile | 9 +- drivers/gpu/drm/phytium/octopus/Kconfig | 30 + drivers/gpu/drm/phytium/octopus/Makefile | 14 + drivers/gpu/drm/phytium/octopus/allocmem.c | 435 + drivers/gpu/drm/phytium/octopus/allocmem.h | 181 + drivers/gpu/drm/phytium/octopus/cache_km.c | 2572 ++++++ drivers/gpu/drm/phytium/octopus/cache_km.h | 164 + drivers/gpu/drm/phytium/octopus/cache_ops.h | 57 + .../drm/phytium/octopus/client_cache_bridge.h | 83 + .../octopus/client_cache_direct_bridge.c | 120 + .../octopus/client_devicememhistory_bridge.h | 111 + .../client_devicememhistory_direct_bridge.c | 194 + .../phytium/octopus/client_htbuffer_bridge.h | 71 + .../octopus/client_htbuffer_direct_bridge.c | 85 + .../drm/phytium/octopus/client_mm_bridge.h | 241 + .../phytium/octopus/client_mm_direct_bridge.c | 732 ++ .../drm/phytium/octopus/client_pvrtl_bridge.h | 93 + .../octopus/client_pvrtl_direct_bridge.c | 175 + .../drm/phytium/octopus/client_ri_bridge.h | 89 + .../phytium/octopus/client_ri_direct_bridge.c | 182 + .../drm/phytium/octopus/client_sync_bridge.h | 102 + .../octopus/client_sync_direct_bridge.c | 262 + .../octopus/client_synctracking_bridge.h | 68 + .../client_synctracking_direct_bridge.c | 92 + .../drm/phytium/octopus/common_cache_bridge.h | 129 + .../drm/phytium/octopus/common_cmm_bridge.h | 114 + .../octopus/common_devicememhistory_bridge.h | 185 + .../drm/phytium/octopus/common_di_bridge.h | 153 + .../drm/phytium/octopus/common_dma_bridge.h | 123 + .../phytium/octopus/common_dmabuf_bridge.h | 127 + .../phytium/octopus/common_htbuffer_bridge.h | 104 + .../drm/phytium/octopus/common_mm_bridge.h | 781 ++ .../drm/phytium/octopus/common_pvrtl_bridge.h | 214 + .../octopus/common_rgxbreakpoint_bridge.h | 149 + .../phytium/octopus/common_rgxcmp_bridge.h | 230 + .../phytium/octopus/common_rgxfwdbg_bridge.h | 200 + .../phytium/octopus/common_rgxhwperf_bridge.h | 135 + .../octopus/common_rgxkicksync_bridge.h | 144 + .../phytium/octopus/common_rgxray_bridge.h | 129 + .../octopus/common_rgxregconfig_bridge.h | 146 + .../octopus/common_rgxsignals_bridge.h | 77 + .../phytium/octopus/common_rgxta3d_bridge.h | 403 + .../octopus/common_rgxtimerquery_bridge.h | 112 + .../phytium/octopus/common_rgxtq2_bridge.h | 228 + .../drm/phytium/octopus/common_rgxtq_bridge.h | 176 + .../drm/phytium/octopus/common_ri_bridge.h | 225 + .../phytium/octopus/common_srvcore_bridge.h | 369 + .../drm/phytium/octopus/common_sync_bridge.h | 254 + .../octopus/common_synctracking_bridge.h | 97 + .../gpu/drm/phytium/octopus/config_kernel.h | 165 + .../gpu/drm/phytium/octopus/config_kernel.mk | 49 + .../configs/rgxconfig_km_30.V.816.20.h | 127 + .../drm/phytium/octopus/connection_server.c | 557 ++ .../drm/phytium/octopus/connection_server.h | 134 + .../octopus/cores/rgxcore_km_30.3.816.20.h | 69 + .../gpu/drm/phytium/octopus/debug_common.c | 1643 ++++ .../gpu/drm/phytium/octopus/debug_common.h | 51 + drivers/gpu/drm/phytium/octopus/device.h | 532 ++ .../drm/phytium/octopus/device_connection.h | 123 + drivers/gpu/drm/phytium/octopus/devicemem.c | 2981 +++++++ drivers/gpu/drm/phytium/octopus/devicemem.h | 736 ++ .../drm/phytium/octopus/devicemem_heapcfg.c | 184 + .../drm/phytium/octopus/devicemem_heapcfg.h | 184 + .../octopus/devicemem_history_server.c | 1912 +++++ .../octopus/devicemem_history_server.h | 152 + .../gpu/drm/phytium/octopus/devicemem_pdump.h | 363 + .../drm/phytium/octopus/devicemem_server.c | 1796 ++++ .../drm/phytium/octopus/devicemem_server.h | 682 ++ .../phytium/octopus/devicemem_server_utils.h | 214 + .../drm/phytium/octopus/devicemem_typedefs.h | 142 + .../gpu/drm/phytium/octopus/devicemem_utils.c | 1174 +++ .../gpu/drm/phytium/octopus/devicemem_utils.h | 522 ++ drivers/gpu/drm/phytium/octopus/di_common.h | 228 + drivers/gpu/drm/phytium/octopus/di_impl_brg.c | 882 ++ drivers/gpu/drm/phytium/octopus/di_impl_brg.h | 92 + .../drm/phytium/octopus/di_impl_brg_intern.h | 61 + drivers/gpu/drm/phytium/octopus/di_server.c | 744 ++ drivers/gpu/drm/phytium/octopus/di_server.h | 183 + drivers/gpu/drm/phytium/octopus/dllist.h | 408 + drivers/gpu/drm/phytium/octopus/dma_flags.h | 50 + drivers/gpu/drm/phytium/octopus/dma_km.c | 413 + drivers/gpu/drm/phytium/octopus/dma_km.h | 83 + drivers/gpu/drm/phytium/octopus/dma_support.c | 335 + drivers/gpu/drm/phytium/octopus/dma_support.h | 112 + .../gpu/drm/phytium/octopus/env_connection.h | 90 + drivers/gpu/drm/phytium/octopus/event.c | 514 ++ drivers/gpu/drm/phytium/octopus/event.h | 54 + drivers/gpu/drm/phytium/octopus/fwload.c | 258 + drivers/gpu/drm/phytium/octopus/fwload.h | 158 + .../gpu/drm/phytium/octopus/fwtrace_string.h | 52 + drivers/gpu/drm/phytium/octopus/handle.c | 2300 +++++ drivers/gpu/drm/phytium/octopus/handle.h | 201 + drivers/gpu/drm/phytium/octopus/handle_idr.c | 440 + drivers/gpu/drm/phytium/octopus/handle_impl.h | 89 + .../gpu/drm/phytium/octopus/handle_types.h | 85 + drivers/gpu/drm/phytium/octopus/hash.c | 734 ++ drivers/gpu/drm/phytium/octopus/hash.h | 247 + drivers/gpu/drm/phytium/octopus/htb_debug.c | 1190 +++ drivers/gpu/drm/phytium/octopus/htb_debug.h | 64 + drivers/gpu/drm/phytium/octopus/htbserver.c | 886 ++ drivers/gpu/drm/phytium/octopus/htbserver.h | 251 + drivers/gpu/drm/phytium/octopus/htbuffer.c | 186 + drivers/gpu/drm/phytium/octopus/htbuffer.h | 135 + .../gpu/drm/phytium/octopus/htbuffer_init.h | 114 + drivers/gpu/drm/phytium/octopus/htbuffer_sf.h | 241 + .../gpu/drm/phytium/octopus/htbuffer_types.h | 118 + drivers/gpu/drm/phytium/octopus/img_3dtypes.h | 248 + drivers/gpu/drm/phytium/octopus/img_defs.h | 561 ++ drivers/gpu/drm/phytium/octopus/img_elf.h | 111 + drivers/gpu/drm/phytium/octopus/img_types.h | 298 + .../gpu/drm/phytium/octopus/img_types_check.h | 58 + drivers/gpu/drm/phytium/octopus/info_page.h | 99 + .../drm/phytium/octopus/info_page_client.h | 89 + .../gpu/drm/phytium/octopus/info_page_defs.h | 91 + .../gpu/drm/phytium/octopus/info_page_km.c | 138 + .../drm/phytium/octopus/interrupt_support.c | 151 + .../drm/phytium/octopus/interrupt_support.h | 103 + .../phytium/octopus/kernel_compatibility.h | 497 ++ .../octopus/kernel_config_compatibility.h | 54 + .../gpu/drm/phytium/octopus/kernel_nospec.h | 71 + .../gpu/drm/phytium/octopus/kernel_types.h | 137 + .../drm/phytium/octopus/km/rgx_bvnc_defs_km.h | 262 + .../phytium/octopus/km/rgx_bvnc_table_km.h | 429 + .../drm/phytium/octopus/km/rgx_cr_defs_km.h | 6710 +++++++++++++++ .../gpu/drm/phytium/octopus/km/rgxdefs_km.h | 386 + .../drm/phytium/octopus/km/rgxmmudefs_km.h | 275 + .../gpu/drm/phytium/octopus/km/rgxtbdefs_km.h | 520 ++ drivers/gpu/drm/phytium/octopus/km_apphint.c | 1616 ++++ drivers/gpu/drm/phytium/octopus/km_apphint.h | 99 + .../gpu/drm/phytium/octopus/km_apphint_defs.h | 178 + .../phytium/octopus/km_apphint_defs_common.h | 281 + drivers/gpu/drm/phytium/octopus/linkage.h | 52 + .../gpu/drm/phytium/octopus/linux_sw_sync.h | 66 + drivers/gpu/drm/phytium/octopus/lists.c | 60 + drivers/gpu/drm/phytium/octopus/lists.h | 355 + drivers/gpu/drm/phytium/octopus/lock.h | 425 + drivers/gpu/drm/phytium/octopus/lock_types.h | 92 + drivers/gpu/drm/phytium/octopus/log2.h | 417 + drivers/gpu/drm/phytium/octopus/mem_utils.c | 449 + drivers/gpu/drm/phytium/octopus/mmu_common.c | 4446 ++++++++++ drivers/gpu/drm/phytium/octopus/mmu_common.h | 776 ++ .../gpu/drm/phytium/octopus/module_common.c | 534 ++ .../gpu/drm/phytium/octopus/module_common.h | 67 + .../gpu/drm/phytium/octopus/multicore_defs.h | 53 + .../gpu/drm/phytium/octopus/opaque_types.h | 56 + .../gpu/drm/phytium/octopus/os_cpu_cache.h | 69 + .../drm/phytium/octopus/os_srvinit_param.h | 322 + .../drm/phytium/octopus/osconnection_server.c | 155 + .../drm/phytium/octopus/osconnection_server.h | 121 + drivers/gpu/drm/phytium/octopus/osdi_impl.h | 188 + drivers/gpu/drm/phytium/octopus/osfunc.c | 2492 ++++++ drivers/gpu/drm/phytium/octopus/osfunc.h | 1655 ++++ drivers/gpu/drm/phytium/octopus/osfunc_arm.c | 151 + .../gpu/drm/phytium/octopus/osfunc_arm64.c | 288 + .../gpu/drm/phytium/octopus/osfunc_common.h | 266 + .../gpu/drm/phytium/octopus/osfunc_riscv.c | 179 + drivers/gpu/drm/phytium/octopus/osfunc_x86.c | 134 + .../gpu/drm/phytium/octopus/oskm_apphint.h | 176 + drivers/gpu/drm/phytium/octopus/osmmap.h | 116 + drivers/gpu/drm/phytium/octopus/osmmap_stub.c | 146 + .../gpu/drm/phytium/octopus/ospvr_gputrace.h | 167 + drivers/gpu/drm/phytium/octopus/pci_support.c | 726 ++ drivers/gpu/drm/phytium/octopus/pci_support.h | 99 + drivers/gpu/drm/phytium/octopus/pdump.h | 232 + drivers/gpu/drm/phytium/octopus/pdump_km.h | 1149 +++ drivers/gpu/drm/phytium/octopus/pdump_mmu.h | 171 + .../gpu/drm/phytium/octopus/pdump_physmem.h | 243 + .../drm/phytium/octopus/pdump_symbolicaddr.h | 55 + drivers/gpu/drm/phytium/octopus/pdumpdefs.h | 246 + drivers/gpu/drm/phytium/octopus/pdumpdesc.h | 211 + drivers/gpu/drm/phytium/octopus/physheap.c | 562 ++ drivers/gpu/drm/phytium/octopus/physheap.h | 307 + .../gpu/drm/phytium/octopus/physheap_config.h | 142 + drivers/gpu/drm/phytium/octopus/physmem.c | 730 ++ drivers/gpu/drm/phytium/octopus/physmem.h | 249 + .../gpu/drm/phytium/octopus/physmem_dmabuf.c | 1279 +++ .../gpu/drm/phytium/octopus/physmem_dmabuf.h | 113 + .../gpu/drm/phytium/octopus/physmem_hostmem.c | 206 + .../gpu/drm/phytium/octopus/physmem_hostmem.h | 65 + drivers/gpu/drm/phytium/octopus/physmem_lma.c | 1753 ++++ drivers/gpu/drm/phytium/octopus/physmem_lma.h | 97 + .../gpu/drm/phytium/octopus/physmem_osmem.h | 129 + .../drm/phytium/octopus/physmem_osmem_linux.c | 3912 +++++++++ .../drm/phytium/octopus/physmem_osmem_linux.h | 49 + .../gpu/drm/phytium/octopus/physmem_test.c | 710 ++ .../gpu/drm/phytium/octopus/physmem_test.h | 51 + .../octopus/phytiumvr/buffer_attribs.h | 147 + .../octopus/phytiumvr/img_drm_fourcc.h | 137 + .../drm/phytium/octopus/phytiumvr/mem_types.h | 64 + .../octopus/phytiumvr/pvrsrv_sync_ext.h | 72 + drivers/gpu/drm/phytium/octopus/pmr.c | 3600 ++++++++ drivers/gpu/drm/phytium/octopus/pmr.h | 1137 +++ drivers/gpu/drm/phytium/octopus/pmr_impl.h | 553 ++ drivers/gpu/drm/phytium/octopus/pmr_os.c | 596 ++ drivers/gpu/drm/phytium/octopus/pmr_os.h | 62 + drivers/gpu/drm/phytium/octopus/power.c | 953 +++ drivers/gpu/drm/phytium/octopus/power.h | 414 + .../gpu/drm/phytium/octopus/private_data.h | 53 + drivers/gpu/drm/phytium/octopus/proc_stats.h | 135 + .../gpu/drm/phytium/octopus/process_stats.c | 3434 ++++++++ .../gpu/drm/phytium/octopus/process_stats.h | 254 + drivers/gpu/drm/phytium/octopus/pvr_bridge.h | 457 + .../gpu/drm/phytium/octopus/pvr_bridge_k.c | 590 ++ .../gpu/drm/phytium/octopus/pvr_bridge_k.h | 103 + .../gpu/drm/phytium/octopus/pvr_buffer_sync.c | 585 ++ .../gpu/drm/phytium/octopus/pvr_buffer_sync.h | 125 + .../phytium/octopus/pvr_buffer_sync_shared.h | 57 + .../phytium/octopus/pvr_counting_timeline.c | 307 + .../phytium/octopus/pvr_counting_timeline.h | 69 + drivers/gpu/drm/phytium/octopus/pvr_debug.c | 481 ++ drivers/gpu/drm/phytium/octopus/pvr_debug.h | 873 ++ drivers/gpu/drm/phytium/octopus/pvr_debugfs.c | 609 ++ drivers/gpu/drm/phytium/octopus/pvr_debugfs.h | 50 + .../gpu/drm/phytium/octopus/pvr_dicommon.h | 59 + .../gpu/drm/phytium/octopus/pvr_dma_resv.h | 70 + drivers/gpu/drm/phytium/octopus/pvr_drm.c | 319 + drivers/gpu/drm/phytium/octopus/pvr_drm.h | 83 + .../gpu/drm/phytium/octopus/pvr_drm_core.h | 76 + drivers/gpu/drm/phytium/octopus/pvr_drv.h | 101 + drivers/gpu/drm/phytium/octopus/pvr_dvfs.h | 136 + .../gpu/drm/phytium/octopus/pvr_dvfs_device.c | 670 ++ .../gpu/drm/phytium/octopus/pvr_dvfs_device.h | 58 + .../drm/phytium/octopus/pvr_fd_sync_kernel.h | 76 + drivers/gpu/drm/phytium/octopus/pvr_fence.c | 1046 +++ drivers/gpu/drm/phytium/octopus/pvr_fence.h | 233 + .../gpu/drm/phytium/octopus/pvr_fence_trace.h | 225 + .../gpu/drm/phytium/octopus/pvr_gputrace.c | 1244 +++ .../gpu/drm/phytium/octopus/pvr_intrinsics.h | 70 + .../gpu/drm/phytium/octopus/pvr_ion_stats.h | 80 + .../gpu/drm/phytium/octopus/pvr_linux_fence.h | 103 + .../gpu/drm/phytium/octopus/pvr_notifier.c | 531 ++ .../gpu/drm/phytium/octopus/pvr_notifier.h | 269 + drivers/gpu/drm/phytium/octopus/pvr_pci_drv.c | 243 + drivers/gpu/drm/phytium/octopus/pvr_procfs.h | 50 + .../gpu/drm/phytium/octopus/pvr_ricommon.h | 68 + .../gpu/drm/phytium/octopus/pvr_sw_fence.c | 199 + .../gpu/drm/phytium/octopus/pvr_sw_fence.h | 60 + drivers/gpu/drm/phytium/octopus/pvr_sync.h | 118 + .../gpu/drm/phytium/octopus/pvr_sync_file.c | 1123 +++ drivers/gpu/drm/phytium/octopus/pvr_uaccess.h | 99 + drivers/gpu/drm/phytium/octopus/pvr_vmap.h | 83 + drivers/gpu/drm/phytium/octopus/pvrmodule.h | 48 + drivers/gpu/drm/phytium/octopus/pvrsrv.c | 3286 +++++++ drivers/gpu/drm/phytium/octopus/pvrsrv.h | 551 ++ .../gpu/drm/phytium/octopus/pvrsrv_apphint.h | 71 + .../drm/phytium/octopus/pvrsrv_bridge_init.c | 511 ++ .../drm/phytium/octopus/pvrsrv_bridge_init.h | 57 + .../gpu/drm/phytium/octopus/pvrsrv_cleanup.h | 159 + .../gpu/drm/phytium/octopus/pvrsrv_device.h | 404 + .../drm/phytium/octopus/pvrsrv_device_types.h | 55 + .../gpu/drm/phytium/octopus/pvrsrv_devvar.h | 291 + .../gpu/drm/phytium/octopus/pvrsrv_error.c | 61 + .../gpu/drm/phytium/octopus/pvrsrv_error.h | 61 + .../gpu/drm/phytium/octopus/pvrsrv_errors.h | 407 + .../octopus/pvrsrv_memalloc_physheap.h | 85 + .../phytium/octopus/pvrsrv_memallocflags.h | 912 ++ .../octopus/pvrsrv_memallocflags_internal.h | 78 + drivers/gpu/drm/phytium/octopus/pvrsrv_pool.c | 260 + drivers/gpu/drm/phytium/octopus/pvrsrv_pool.h | 135 + .../gpu/drm/phytium/octopus/pvrsrv_sync_km.h | 65 + .../drm/phytium/octopus/pvrsrv_sync_server.h | 277 + .../gpu/drm/phytium/octopus/pvrsrv_tlcommon.h | 261 + .../drm/phytium/octopus/pvrsrv_tlstreams.h | 61 + drivers/gpu/drm/phytium/octopus/pvrsrvkm.mk | 149 + drivers/gpu/drm/phytium/octopus/pvrversion.h | 68 + drivers/gpu/drm/phytium/octopus/ra.c | 1898 +++++ drivers/gpu/drm/phytium/octopus/ra.h | 375 + drivers/gpu/drm/phytium/octopus/rgx_bridge.h | 242 + drivers/gpu/drm/phytium/octopus/rgx_common.h | 254 + .../gpu/drm/phytium/octopus/rgx_compat_bvnc.h | 140 + drivers/gpu/drm/phytium/octopus/rgx_fw_info.h | 135 + .../phytium/octopus/rgx_fwif_alignchecks.h | 184 + .../gpu/drm/phytium/octopus/rgx_fwif_hwperf.h | 119 + drivers/gpu/drm/phytium/octopus/rgx_fwif_km.h | 2292 +++++ .../phytium/octopus/rgx_fwif_resetframework.h | 74 + drivers/gpu/drm/phytium/octopus/rgx_fwif_sf.h | 898 ++ .../gpu/drm/phytium/octopus/rgx_fwif_shared.h | 322 + .../drm/phytium/octopus/rgx_heap_firmware.h | 126 + drivers/gpu/drm/phytium/octopus/rgx_heaps.h | 65 + drivers/gpu/drm/phytium/octopus/rgx_hwperf.h | 1772 ++++ .../drm/phytium/octopus/rgx_hwperf_table.c | 638 ++ .../drm/phytium/octopus/rgx_hwperf_table.h | 479 ++ .../drm/phytium/octopus/rgx_memallocflags.h | 58 + drivers/gpu/drm/phytium/octopus/rgx_meta.h | 385 + drivers/gpu/drm/phytium/octopus/rgx_mips.h | 374 + drivers/gpu/drm/phytium/octopus/rgx_options.h | 294 + .../drm/phytium/octopus/rgx_pdump_panics.h | 64 + drivers/gpu/drm/phytium/octopus/rgx_riscv.h | 250 + .../gpu/drm/phytium/octopus/rgx_tq_shared.h | 63 + drivers/gpu/drm/phytium/octopus/rgxapi_km.h | 309 + .../gpu/drm/phytium/octopus/rgxbreakpoint.c | 295 + .../gpu/drm/phytium/octopus/rgxbreakpoint.h | 141 + drivers/gpu/drm/phytium/octopus/rgxbvnc.c | 811 ++ drivers/gpu/drm/phytium/octopus/rgxbvnc.h | 90 + drivers/gpu/drm/phytium/octopus/rgxccb.c | 2723 ++++++ drivers/gpu/drm/phytium/octopus/rgxccb.h | 347 + drivers/gpu/drm/phytium/octopus/rgxcompute.c | 1411 +++ drivers/gpu/drm/phytium/octopus/rgxcompute.h | 183 + drivers/gpu/drm/phytium/octopus/rgxdebug.c | 3846 +++++++++ drivers/gpu/drm/phytium/octopus/rgxdebug.h | 227 + drivers/gpu/drm/phytium/octopus/rgxdevice.h | 824 ++ .../drm/phytium/octopus/rgxfw_log_helper.h | 79 + drivers/gpu/drm/phytium/octopus/rgxfwdbg.c | 282 + drivers/gpu/drm/phytium/octopus/rgxfwdbg.h | 113 + .../gpu/drm/phytium/octopus/rgxfwimageutils.c | 1066 +++ .../gpu/drm/phytium/octopus/rgxfwimageutils.h | 262 + .../drm/phytium/octopus/rgxfwtrace_strings.c | 56 + drivers/gpu/drm/phytium/octopus/rgxfwutils.c | 7548 +++++++++++++++++ drivers/gpu/drm/phytium/octopus/rgxfwutils.h | 1353 +++ .../gpu/drm/phytium/octopus/rgxheapconfig.h | 278 + drivers/gpu/drm/phytium/octopus/rgxhwperf.c | 391 + drivers/gpu/drm/phytium/octopus/rgxhwperf.h | 60 + .../drm/phytium/octopus/rgxhwperf_common.c | 3576 ++++++++ .../drm/phytium/octopus/rgxhwperf_common.h | 488 ++ drivers/gpu/drm/phytium/octopus/rgxinit.c | 4690 ++++++++++ drivers/gpu/drm/phytium/octopus/rgxinit.h | 306 + drivers/gpu/drm/phytium/octopus/rgxkicksync.c | 790 ++ drivers/gpu/drm/phytium/octopus/rgxkicksync.h | 129 + drivers/gpu/drm/phytium/octopus/rgxlayer.h | 509 ++ .../gpu/drm/phytium/octopus/rgxlayer_impl.c | 967 +++ .../gpu/drm/phytium/octopus/rgxlayer_impl.h | 61 + drivers/gpu/drm/phytium/octopus/rgxmem.c | 936 ++ drivers/gpu/drm/phytium/octopus/rgxmem.h | 147 + .../gpu/drm/phytium/octopus/rgxmipsmmuinit.c | 1007 +++ .../gpu/drm/phytium/octopus/rgxmipsmmuinit.h | 94 + drivers/gpu/drm/phytium/octopus/rgxmmuinit.c | 1272 +++ drivers/gpu/drm/phytium/octopus/rgxmmuinit.h | 61 + .../gpu/drm/phytium/octopus/rgxmulticore.c | 213 + .../gpu/drm/phytium/octopus/rgxmulticore.h | 54 + drivers/gpu/drm/phytium/octopus/rgxpmdefs.h | 5019 +++++++++++ drivers/gpu/drm/phytium/octopus/rgxpower.c | 1562 ++++ drivers/gpu/drm/phytium/octopus/rgxpower.h | 272 + drivers/gpu/drm/phytium/octopus/rgxray.c | 732 ++ drivers/gpu/drm/phytium/octopus/rgxray.h | 112 + .../gpu/drm/phytium/octopus/rgxregconfig.c | 315 + .../gpu/drm/phytium/octopus/rgxregconfig.h | 130 + drivers/gpu/drm/phytium/octopus/rgxshader.c | 313 + drivers/gpu/drm/phytium/octopus/rgxshader.h | 85 + drivers/gpu/drm/phytium/octopus/rgxsignals.c | 99 + drivers/gpu/drm/phytium/octopus/rgxsignals.h | 71 + drivers/gpu/drm/phytium/octopus/rgxsrvinit.c | 1423 ++++ .../gpu/drm/phytium/octopus/rgxstartstop.c | 851 ++ .../gpu/drm/phytium/octopus/rgxstartstop.h | 84 + .../gpu/drm/phytium/octopus/rgxsyncutils.c | 184 + .../gpu/drm/phytium/octopus/rgxsyncutils.h | 76 + drivers/gpu/drm/phytium/octopus/rgxta3d.c | 5481 ++++++++++++ drivers/gpu/drm/phytium/octopus/rgxta3d.h | 507 ++ .../gpu/drm/phytium/octopus/rgxtdmtransfer.c | 1325 +++ .../gpu/drm/phytium/octopus/rgxtdmtransfer.h | 132 + drivers/gpu/drm/phytium/octopus/rgxtimecorr.c | 645 ++ drivers/gpu/drm/phytium/octopus/rgxtimecorr.h | 269 + .../gpu/drm/phytium/octopus/rgxtimerquery.c | 243 + .../gpu/drm/phytium/octopus/rgxtimerquery.h | 123 + drivers/gpu/drm/phytium/octopus/rgxtransfer.c | 1712 ++++ drivers/gpu/drm/phytium/octopus/rgxtransfer.h | 153 + .../drm/phytium/octopus/rgxtransfer_shader.h | 61 + drivers/gpu/drm/phytium/octopus/rgxutils.c | 221 + drivers/gpu/drm/phytium/octopus/rgxutils.h | 185 + drivers/gpu/drm/phytium/octopus/ri_server.c | 2114 +++++ drivers/gpu/drm/phytium/octopus/ri_server.h | 106 + drivers/gpu/drm/phytium/octopus/ri_typedefs.h | 52 + .../drm/phytium/octopus/rogue_trace_events.h | 543 ++ .../drm/phytium/octopus/server_cache_bridge.c | 448 + .../drm/phytium/octopus/server_cmm_bridge.c | 410 + .../octopus/server_devicememhistory_bridge.c | 772 ++ .../drm/phytium/octopus/server_di_bridge.c | 594 ++ .../drm/phytium/octopus/server_dma_bridge.c | 469 + .../phytium/octopus/server_dmabuf_bridge.c | 492 ++ .../phytium/octopus/server_htbuffer_bridge.c | 322 + .../drm/phytium/octopus/server_mm_bridge.c | 3093 +++++++ .../drm/phytium/octopus/server_pvrtl_bridge.c | 785 ++ .../octopus/server_rgxbreakpoint_bridge.c | 372 + .../phytium/octopus/server_rgxcmp_bridge.c | 1013 +++ .../phytium/octopus/server_rgxfwdbg_bridge.c | 306 + .../phytium/octopus/server_rgxhwperf_bridge.c | 363 + .../octopus/server_rgxkicksync_bridge.c | 559 ++ .../phytium/octopus/server_rgxray_bridge.c | 600 ++ .../octopus/server_rgxregconfig_bridge.c | 241 + .../octopus/server_rgxsignals_bridge.c | 168 + .../phytium/octopus/server_rgxta3d_bridge.c | 2172 +++++ .../octopus/server_rgxtimerquery_bridge.c | 168 + .../phytium/octopus/server_rgxtq2_bridge.c | 1166 +++ .../drm/phytium/octopus/server_rgxtq_bridge.c | 1162 +++ .../drm/phytium/octopus/server_ri_bridge.c | 729 ++ .../phytium/octopus/server_srvcore_bridge.c | 1019 +++ .../drm/phytium/octopus/server_sync_bridge.c | 730 ++ .../octopus/server_synctracking_bridge.c | 317 + .../phytium/octopus/services_kernel_client.h | 267 + drivers/gpu/drm/phytium/octopus/services_km.h | 180 + drivers/gpu/drm/phytium/octopus/servicesext.h | 172 + drivers/gpu/drm/phytium/octopus/sofunc_pvr.h | 94 + drivers/gpu/drm/phytium/octopus/sofunc_rgx.h | 95 + drivers/gpu/drm/phytium/octopus/srvcore.c | 1421 ++++ drivers/gpu/drm/phytium/octopus/srvcore.h | 216 + drivers/gpu/drm/phytium/octopus/srvinit.h | 68 + drivers/gpu/drm/phytium/octopus/srvkm.h | 145 + drivers/gpu/drm/phytium/octopus/sync.c | 899 ++ drivers/gpu/drm/phytium/octopus/sync.h | 316 + .../gpu/drm/phytium/octopus/sync_checkpoint.c | 2979 +++++++ .../gpu/drm/phytium/octopus/sync_checkpoint.h | 665 ++ .../octopus/sync_checkpoint_external.h | 83 + .../phytium/octopus/sync_checkpoint_init.h | 82 + .../octopus/sync_checkpoint_internal.h | 270 + .../phytium/octopus/sync_fallback_server.h | 200 + .../gpu/drm/phytium/octopus/sync_internal.h | 127 + .../drm/phytium/octopus/sync_prim_internal.h | 84 + drivers/gpu/drm/phytium/octopus/sync_server.c | 1227 +++ drivers/gpu/drm/phytium/octopus/sync_server.h | 266 + drivers/gpu/drm/phytium/octopus/syscommon.h | 146 + drivers/gpu/drm/phytium/octopus/sysconfig.c | 976 +++ drivers/gpu/drm/phytium/octopus/sysconfig.h | 66 + .../gpu/drm/phytium/octopus/sysconfig_cmn.c | 132 + drivers/gpu/drm/phytium/octopus/sysinfo.h | 44 + .../gpu/drm/phytium/octopus/sysvalidation.h | 63 + drivers/gpu/drm/phytium/octopus/tlclient.c | 469 + drivers/gpu/drm/phytium/octopus/tlclient.h | 257 + drivers/gpu/drm/phytium/octopus/tlintern.c | 473 ++ drivers/gpu/drm/phytium/octopus/tlintern.h | 345 + drivers/gpu/drm/phytium/octopus/tlserver.c | 747 ++ drivers/gpu/drm/phytium/octopus/tlserver.h | 97 + drivers/gpu/drm/phytium/octopus/tlstream.c | 1622 ++++ drivers/gpu/drm/phytium/octopus/tlstream.h | 600 ++ .../gpu/drm/phytium/octopus/trace_events.c | 265 + .../gpu/drm/phytium/octopus/trace_events.h | 198 + .../drm/phytium/octopus/uniq_key_splay_tree.c | 280 + .../drm/phytium/octopus/uniq_key_splay_tree.h | 90 + drivers/gpu/drm/phytium/octopus/vmm_impl.h | 186 + .../gpu/drm/phytium/octopus/vmm_pvz_client.c | 138 + .../gpu/drm/phytium/octopus/vmm_pvz_client.h | 77 + .../gpu/drm/phytium/octopus/vmm_pvz_common.h | 65 + .../gpu/drm/phytium/octopus/vmm_pvz_server.c | 245 + .../gpu/drm/phytium/octopus/vmm_pvz_server.h | 121 + .../gpu/drm/phytium/octopus/vmm_type_stub.c | 119 + drivers/gpu/drm/phytium/octopus/vz_vm.h | 61 + drivers/gpu/drm/phytium/octopus/vz_vmm_pvz.c | 183 + drivers/gpu/drm/phytium/octopus/vz_vmm_pvz.h | 79 + drivers/gpu/drm/phytium/octopus/vz_vmm_vm.c | 223 + drivers/gpu/drm/phytium/phytium_crtc.c | 476 +- drivers/gpu/drm/phytium/phytium_crtc.h | 19 +- drivers/gpu/drm/phytium/phytium_debugfs.c | 84 +- drivers/gpu/drm/phytium/phytium_debugfs.h | 12 +- drivers/gpu/drm/phytium/phytium_display_drv.c | 81 +- drivers/gpu/drm/phytium/phytium_display_drv.h | 47 +- drivers/gpu/drm/phytium/phytium_dp.c | 820 +- drivers/gpu/drm/phytium/phytium_dp.h | 64 +- drivers/gpu/drm/phytium/phytium_fb.c | 12 +- drivers/gpu/drm/phytium/phytium_fb.h | 12 +- drivers/gpu/drm/phytium/phytium_fbdev.c | 12 +- drivers/gpu/drm/phytium/phytium_fbdev.h | 12 +- drivers/gpu/drm/phytium/phytium_gem.c | 12 +- drivers/gpu/drm/phytium/phytium_gem.h | 12 +- drivers/gpu/drm/phytium/phytium_panel.c | 12 +- drivers/gpu/drm/phytium/phytium_panel.h | 12 +- drivers/gpu/drm/phytium/phytium_pci.c | 29 +- drivers/gpu/drm/phytium/phytium_pci.h | 13 +- drivers/gpu/drm/phytium/phytium_plane.c | 358 +- drivers/gpu/drm/phytium/phytium_plane.h | 17 +- drivers/gpu/drm/phytium/phytium_reg.h | 613 +- drivers/gpu/drm/phytium/x100_dc.c | 800 +- drivers/gpu/drm/phytium/x100_dc.h | 25 +- drivers/gpu/drm/phytium/x100_dp.c | 1278 ++- drivers/gpu/drm/phytium/x100_dp.h | 12 +- drivers/gpu/drm/phytium/x100_reg.h | 411 +- 464 files changed, 225239 insertions(+), 2213 deletions(-) create mode 100644 drivers/gpu/drm/phytium/octopus/Kconfig create mode 100644 drivers/gpu/drm/phytium/octopus/Makefile create mode 100644 drivers/gpu/drm/phytium/octopus/allocmem.c create mode 100644 drivers/gpu/drm/phytium/octopus/allocmem.h create mode 100644 drivers/gpu/drm/phytium/octopus/cache_km.c create mode 100644 drivers/gpu/drm/phytium/octopus/cache_km.h create mode 100644 drivers/gpu/drm/phytium/octopus/cache_ops.h create mode 100644 drivers/gpu/drm/phytium/octopus/client_cache_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/client_cache_direct_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/client_devicememhistory_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/client_devicememhistory_direct_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/client_htbuffer_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/client_htbuffer_direct_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/client_mm_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/client_mm_direct_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/client_pvrtl_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/client_pvrtl_direct_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/client_ri_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/client_ri_direct_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/client_sync_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/client_sync_direct_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/client_synctracking_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/client_synctracking_direct_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/common_cache_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_cmm_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_devicememhistory_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_di_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_dma_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_dmabuf_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_htbuffer_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_mm_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_pvrtl_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_rgxbreakpoint_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_rgxcmp_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_rgxfwdbg_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_rgxhwperf_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_rgxkicksync_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_rgxray_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_rgxregconfig_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_rgxsignals_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_rgxta3d_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_rgxtimerquery_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_rgxtq2_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_rgxtq_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_ri_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_srvcore_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_sync_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/common_synctracking_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/config_kernel.h create mode 100644 drivers/gpu/drm/phytium/octopus/config_kernel.mk create mode 100644 drivers/gpu/drm/phytium/octopus/configs/rgxconfig_km_30.V.816.20.h create mode 100644 drivers/gpu/drm/phytium/octopus/connection_server.c create mode 100644 drivers/gpu/drm/phytium/octopus/connection_server.h create mode 100644 drivers/gpu/drm/phytium/octopus/cores/rgxcore_km_30.3.816.20.h create mode 100644 drivers/gpu/drm/phytium/octopus/debug_common.c create mode 100644 drivers/gpu/drm/phytium/octopus/debug_common.h create mode 100644 drivers/gpu/drm/phytium/octopus/device.h create mode 100644 drivers/gpu/drm/phytium/octopus/device_connection.h create mode 100644 drivers/gpu/drm/phytium/octopus/devicemem.c create mode 100644 drivers/gpu/drm/phytium/octopus/devicemem.h create mode 100644 drivers/gpu/drm/phytium/octopus/devicemem_heapcfg.c create mode 100644 drivers/gpu/drm/phytium/octopus/devicemem_heapcfg.h create mode 100644 drivers/gpu/drm/phytium/octopus/devicemem_history_server.c create mode 100644 drivers/gpu/drm/phytium/octopus/devicemem_history_server.h create mode 100644 drivers/gpu/drm/phytium/octopus/devicemem_pdump.h create mode 100644 drivers/gpu/drm/phytium/octopus/devicemem_server.c create mode 100644 drivers/gpu/drm/phytium/octopus/devicemem_server.h create mode 100644 drivers/gpu/drm/phytium/octopus/devicemem_server_utils.h create mode 100644 drivers/gpu/drm/phytium/octopus/devicemem_typedefs.h create mode 100644 drivers/gpu/drm/phytium/octopus/devicemem_utils.c create mode 100644 drivers/gpu/drm/phytium/octopus/devicemem_utils.h create mode 100644 drivers/gpu/drm/phytium/octopus/di_common.h create mode 100644 drivers/gpu/drm/phytium/octopus/di_impl_brg.c create mode 100644 drivers/gpu/drm/phytium/octopus/di_impl_brg.h create mode 100644 drivers/gpu/drm/phytium/octopus/di_impl_brg_intern.h create mode 100644 drivers/gpu/drm/phytium/octopus/di_server.c create mode 100644 drivers/gpu/drm/phytium/octopus/di_server.h create mode 100644 drivers/gpu/drm/phytium/octopus/dllist.h create mode 100644 drivers/gpu/drm/phytium/octopus/dma_flags.h create mode 100644 drivers/gpu/drm/phytium/octopus/dma_km.c create mode 100644 drivers/gpu/drm/phytium/octopus/dma_km.h create mode 100644 drivers/gpu/drm/phytium/octopus/dma_support.c create mode 100644 drivers/gpu/drm/phytium/octopus/dma_support.h create mode 100644 drivers/gpu/drm/phytium/octopus/env_connection.h create mode 100644 drivers/gpu/drm/phytium/octopus/event.c create mode 100644 drivers/gpu/drm/phytium/octopus/event.h create mode 100644 drivers/gpu/drm/phytium/octopus/fwload.c create mode 100644 drivers/gpu/drm/phytium/octopus/fwload.h create mode 100644 drivers/gpu/drm/phytium/octopus/fwtrace_string.h create mode 100644 drivers/gpu/drm/phytium/octopus/handle.c create mode 100644 drivers/gpu/drm/phytium/octopus/handle.h create mode 100644 drivers/gpu/drm/phytium/octopus/handle_idr.c create mode 100644 drivers/gpu/drm/phytium/octopus/handle_impl.h create mode 100644 drivers/gpu/drm/phytium/octopus/handle_types.h create mode 100644 drivers/gpu/drm/phytium/octopus/hash.c create mode 100644 drivers/gpu/drm/phytium/octopus/hash.h create mode 100644 drivers/gpu/drm/phytium/octopus/htb_debug.c create mode 100644 drivers/gpu/drm/phytium/octopus/htb_debug.h create mode 100644 drivers/gpu/drm/phytium/octopus/htbserver.c create mode 100644 drivers/gpu/drm/phytium/octopus/htbserver.h create mode 100644 drivers/gpu/drm/phytium/octopus/htbuffer.c create mode 100644 drivers/gpu/drm/phytium/octopus/htbuffer.h create mode 100644 drivers/gpu/drm/phytium/octopus/htbuffer_init.h create mode 100644 drivers/gpu/drm/phytium/octopus/htbuffer_sf.h create mode 100644 drivers/gpu/drm/phytium/octopus/htbuffer_types.h create mode 100644 drivers/gpu/drm/phytium/octopus/img_3dtypes.h create mode 100644 drivers/gpu/drm/phytium/octopus/img_defs.h create mode 100644 drivers/gpu/drm/phytium/octopus/img_elf.h create mode 100644 drivers/gpu/drm/phytium/octopus/img_types.h create mode 100644 drivers/gpu/drm/phytium/octopus/img_types_check.h create mode 100644 drivers/gpu/drm/phytium/octopus/info_page.h create mode 100644 drivers/gpu/drm/phytium/octopus/info_page_client.h create mode 100644 drivers/gpu/drm/phytium/octopus/info_page_defs.h create mode 100644 drivers/gpu/drm/phytium/octopus/info_page_km.c create mode 100644 drivers/gpu/drm/phytium/octopus/interrupt_support.c create mode 100644 drivers/gpu/drm/phytium/octopus/interrupt_support.h create mode 100644 drivers/gpu/drm/phytium/octopus/kernel_compatibility.h create mode 100644 drivers/gpu/drm/phytium/octopus/kernel_config_compatibility.h create mode 100644 drivers/gpu/drm/phytium/octopus/kernel_nospec.h create mode 100644 drivers/gpu/drm/phytium/octopus/kernel_types.h create mode 100644 drivers/gpu/drm/phytium/octopus/km/rgx_bvnc_defs_km.h create mode 100644 drivers/gpu/drm/phytium/octopus/km/rgx_bvnc_table_km.h create mode 100644 drivers/gpu/drm/phytium/octopus/km/rgx_cr_defs_km.h create mode 100644 drivers/gpu/drm/phytium/octopus/km/rgxdefs_km.h create mode 100644 drivers/gpu/drm/phytium/octopus/km/rgxmmudefs_km.h create mode 100644 drivers/gpu/drm/phytium/octopus/km/rgxtbdefs_km.h create mode 100644 drivers/gpu/drm/phytium/octopus/km_apphint.c create mode 100644 drivers/gpu/drm/phytium/octopus/km_apphint.h create mode 100644 drivers/gpu/drm/phytium/octopus/km_apphint_defs.h create mode 100644 drivers/gpu/drm/phytium/octopus/km_apphint_defs_common.h create mode 100644 drivers/gpu/drm/phytium/octopus/linkage.h create mode 100644 drivers/gpu/drm/phytium/octopus/linux_sw_sync.h create mode 100644 drivers/gpu/drm/phytium/octopus/lists.c create mode 100644 drivers/gpu/drm/phytium/octopus/lists.h create mode 100644 drivers/gpu/drm/phytium/octopus/lock.h create mode 100644 drivers/gpu/drm/phytium/octopus/lock_types.h create mode 100644 drivers/gpu/drm/phytium/octopus/log2.h create mode 100644 drivers/gpu/drm/phytium/octopus/mem_utils.c create mode 100644 drivers/gpu/drm/phytium/octopus/mmu_common.c create mode 100644 drivers/gpu/drm/phytium/octopus/mmu_common.h create mode 100644 drivers/gpu/drm/phytium/octopus/module_common.c create mode 100644 drivers/gpu/drm/phytium/octopus/module_common.h create mode 100644 drivers/gpu/drm/phytium/octopus/multicore_defs.h create mode 100644 drivers/gpu/drm/phytium/octopus/opaque_types.h create mode 100644 drivers/gpu/drm/phytium/octopus/os_cpu_cache.h create mode 100644 drivers/gpu/drm/phytium/octopus/os_srvinit_param.h create mode 100644 drivers/gpu/drm/phytium/octopus/osconnection_server.c create mode 100644 drivers/gpu/drm/phytium/octopus/osconnection_server.h create mode 100644 drivers/gpu/drm/phytium/octopus/osdi_impl.h create mode 100644 drivers/gpu/drm/phytium/octopus/osfunc.c create mode 100644 drivers/gpu/drm/phytium/octopus/osfunc.h create mode 100644 drivers/gpu/drm/phytium/octopus/osfunc_arm.c create mode 100644 drivers/gpu/drm/phytium/octopus/osfunc_arm64.c create mode 100644 drivers/gpu/drm/phytium/octopus/osfunc_common.h create mode 100644 drivers/gpu/drm/phytium/octopus/osfunc_riscv.c create mode 100644 drivers/gpu/drm/phytium/octopus/osfunc_x86.c create mode 100644 drivers/gpu/drm/phytium/octopus/oskm_apphint.h create mode 100644 drivers/gpu/drm/phytium/octopus/osmmap.h create mode 100644 drivers/gpu/drm/phytium/octopus/osmmap_stub.c create mode 100644 drivers/gpu/drm/phytium/octopus/ospvr_gputrace.h create mode 100644 drivers/gpu/drm/phytium/octopus/pci_support.c create mode 100644 drivers/gpu/drm/phytium/octopus/pci_support.h create mode 100644 drivers/gpu/drm/phytium/octopus/pdump.h create mode 100644 drivers/gpu/drm/phytium/octopus/pdump_km.h create mode 100644 drivers/gpu/drm/phytium/octopus/pdump_mmu.h create mode 100644 drivers/gpu/drm/phytium/octopus/pdump_physmem.h create mode 100644 drivers/gpu/drm/phytium/octopus/pdump_symbolicaddr.h create mode 100644 drivers/gpu/drm/phytium/octopus/pdumpdefs.h create mode 100644 drivers/gpu/drm/phytium/octopus/pdumpdesc.h create mode 100644 drivers/gpu/drm/phytium/octopus/physheap.c create mode 100644 drivers/gpu/drm/phytium/octopus/physheap.h create mode 100644 drivers/gpu/drm/phytium/octopus/physheap_config.h create mode 100644 drivers/gpu/drm/phytium/octopus/physmem.c create mode 100644 drivers/gpu/drm/phytium/octopus/physmem.h create mode 100644 drivers/gpu/drm/phytium/octopus/physmem_dmabuf.c create mode 100644 drivers/gpu/drm/phytium/octopus/physmem_dmabuf.h create mode 100644 drivers/gpu/drm/phytium/octopus/physmem_hostmem.c create mode 100644 drivers/gpu/drm/phytium/octopus/physmem_hostmem.h create mode 100644 drivers/gpu/drm/phytium/octopus/physmem_lma.c create mode 100644 drivers/gpu/drm/phytium/octopus/physmem_lma.h create mode 100644 drivers/gpu/drm/phytium/octopus/physmem_osmem.h create mode 100644 drivers/gpu/drm/phytium/octopus/physmem_osmem_linux.c create mode 100644 drivers/gpu/drm/phytium/octopus/physmem_osmem_linux.h create mode 100644 drivers/gpu/drm/phytium/octopus/physmem_test.c create mode 100644 drivers/gpu/drm/phytium/octopus/physmem_test.h create mode 100644 drivers/gpu/drm/phytium/octopus/phytiumvr/buffer_attribs.h create mode 100644 drivers/gpu/drm/phytium/octopus/phytiumvr/img_drm_fourcc.h create mode 100644 drivers/gpu/drm/phytium/octopus/phytiumvr/mem_types.h create mode 100644 drivers/gpu/drm/phytium/octopus/phytiumvr/pvrsrv_sync_ext.h create mode 100644 drivers/gpu/drm/phytium/octopus/pmr.c create mode 100644 drivers/gpu/drm/phytium/octopus/pmr.h create mode 100644 drivers/gpu/drm/phytium/octopus/pmr_impl.h create mode 100644 drivers/gpu/drm/phytium/octopus/pmr_os.c create mode 100644 drivers/gpu/drm/phytium/octopus/pmr_os.h create mode 100644 drivers/gpu/drm/phytium/octopus/power.c create mode 100644 drivers/gpu/drm/phytium/octopus/power.h create mode 100644 drivers/gpu/drm/phytium/octopus/private_data.h create mode 100644 drivers/gpu/drm/phytium/octopus/proc_stats.h create mode 100644 drivers/gpu/drm/phytium/octopus/process_stats.c create mode 100644 drivers/gpu/drm/phytium/octopus/process_stats.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_bridge_k.c create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_bridge_k.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_buffer_sync.c create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_buffer_sync.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_buffer_sync_shared.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_counting_timeline.c create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_counting_timeline.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_debug.c create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_debug.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_debugfs.c create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_debugfs.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_dicommon.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_dma_resv.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_drm.c create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_drm.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_drm_core.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_drv.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_dvfs.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_dvfs_device.c create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_dvfs_device.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_fd_sync_kernel.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_fence.c create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_fence.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_fence_trace.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_gputrace.c create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_intrinsics.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_ion_stats.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_linux_fence.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_notifier.c create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_notifier.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_pci_drv.c create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_procfs.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_ricommon.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_sw_fence.c create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_sw_fence.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_sync.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_sync_file.c create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_uaccess.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvr_vmap.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrmodule.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv.c create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_apphint.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_bridge_init.c create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_bridge_init.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_cleanup.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_device.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_device_types.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_devvar.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_error.c create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_error.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_errors.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_memalloc_physheap.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_memallocflags.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_memallocflags_internal.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_pool.c create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_pool.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_sync_km.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_sync_server.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_tlcommon.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrv_tlstreams.h create mode 100644 drivers/gpu/drm/phytium/octopus/pvrsrvkm.mk create mode 100644 drivers/gpu/drm/phytium/octopus/pvrversion.h create mode 100644 drivers/gpu/drm/phytium/octopus/ra.c create mode 100644 drivers/gpu/drm/phytium/octopus/ra.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_bridge.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_common.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_compat_bvnc.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_fw_info.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_fwif_alignchecks.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_fwif_hwperf.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_fwif_km.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_fwif_resetframework.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_fwif_sf.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_fwif_shared.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_heap_firmware.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_heaps.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_hwperf.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_hwperf_table.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_hwperf_table.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_memallocflags.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_meta.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_mips.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_options.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_pdump_panics.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_riscv.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgx_tq_shared.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxapi_km.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxbreakpoint.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxbreakpoint.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxbvnc.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxbvnc.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxccb.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxccb.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxcompute.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxcompute.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxdebug.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxdebug.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxdevice.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxfw_log_helper.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxfwdbg.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxfwdbg.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxfwimageutils.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxfwimageutils.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxfwtrace_strings.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxfwutils.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxfwutils.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxheapconfig.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxhwperf.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxhwperf.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxhwperf_common.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxhwperf_common.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxinit.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxinit.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxkicksync.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxkicksync.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxlayer.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxlayer_impl.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxlayer_impl.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxmem.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxmem.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxmipsmmuinit.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxmipsmmuinit.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxmmuinit.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxmmuinit.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxmulticore.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxmulticore.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxpmdefs.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxpower.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxpower.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxray.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxray.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxregconfig.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxregconfig.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxshader.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxshader.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxsignals.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxsignals.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxsrvinit.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxstartstop.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxstartstop.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxsyncutils.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxsyncutils.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxta3d.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxta3d.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxtdmtransfer.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxtdmtransfer.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxtimecorr.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxtimecorr.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxtimerquery.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxtimerquery.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxtransfer.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxtransfer.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxtransfer_shader.h create mode 100644 drivers/gpu/drm/phytium/octopus/rgxutils.c create mode 100644 drivers/gpu/drm/phytium/octopus/rgxutils.h create mode 100644 drivers/gpu/drm/phytium/octopus/ri_server.c create mode 100644 drivers/gpu/drm/phytium/octopus/ri_server.h create mode 100644 drivers/gpu/drm/phytium/octopus/ri_typedefs.h create mode 100644 drivers/gpu/drm/phytium/octopus/rogue_trace_events.h create mode 100644 drivers/gpu/drm/phytium/octopus/server_cache_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_cmm_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_devicememhistory_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_di_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_dma_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_dmabuf_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_htbuffer_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_mm_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_pvrtl_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_rgxbreakpoint_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_rgxcmp_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_rgxfwdbg_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_rgxhwperf_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_rgxkicksync_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_rgxray_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_rgxregconfig_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_rgxsignals_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_rgxta3d_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_rgxtimerquery_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_rgxtq2_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_rgxtq_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_ri_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_srvcore_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_sync_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/server_synctracking_bridge.c create mode 100644 drivers/gpu/drm/phytium/octopus/services_kernel_client.h create mode 100644 drivers/gpu/drm/phytium/octopus/services_km.h create mode 100644 drivers/gpu/drm/phytium/octopus/servicesext.h create mode 100644 drivers/gpu/drm/phytium/octopus/sofunc_pvr.h create mode 100644 drivers/gpu/drm/phytium/octopus/sofunc_rgx.h create mode 100644 drivers/gpu/drm/phytium/octopus/srvcore.c create mode 100644 drivers/gpu/drm/phytium/octopus/srvcore.h create mode 100644 drivers/gpu/drm/phytium/octopus/srvinit.h create mode 100644 drivers/gpu/drm/phytium/octopus/srvkm.h create mode 100644 drivers/gpu/drm/phytium/octopus/sync.c create mode 100644 drivers/gpu/drm/phytium/octopus/sync.h create mode 100644 drivers/gpu/drm/phytium/octopus/sync_checkpoint.c create mode 100644 drivers/gpu/drm/phytium/octopus/sync_checkpoint.h create mode 100644 drivers/gpu/drm/phytium/octopus/sync_checkpoint_external.h create mode 100644 drivers/gpu/drm/phytium/octopus/sync_checkpoint_init.h create mode 100644 drivers/gpu/drm/phytium/octopus/sync_checkpoint_internal.h create mode 100644 drivers/gpu/drm/phytium/octopus/sync_fallback_server.h create mode 100644 drivers/gpu/drm/phytium/octopus/sync_internal.h create mode 100644 drivers/gpu/drm/phytium/octopus/sync_prim_internal.h create mode 100644 drivers/gpu/drm/phytium/octopus/sync_server.c create mode 100644 drivers/gpu/drm/phytium/octopus/sync_server.h create mode 100644 drivers/gpu/drm/phytium/octopus/syscommon.h create mode 100644 drivers/gpu/drm/phytium/octopus/sysconfig.c create mode 100644 drivers/gpu/drm/phytium/octopus/sysconfig.h create mode 100644 drivers/gpu/drm/phytium/octopus/sysconfig_cmn.c create mode 100644 drivers/gpu/drm/phytium/octopus/sysinfo.h create mode 100644 drivers/gpu/drm/phytium/octopus/sysvalidation.h create mode 100644 drivers/gpu/drm/phytium/octopus/tlclient.c create mode 100644 drivers/gpu/drm/phytium/octopus/tlclient.h create mode 100644 drivers/gpu/drm/phytium/octopus/tlintern.c create mode 100644 drivers/gpu/drm/phytium/octopus/tlintern.h create mode 100644 drivers/gpu/drm/phytium/octopus/tlserver.c create mode 100644 drivers/gpu/drm/phytium/octopus/tlserver.h create mode 100644 drivers/gpu/drm/phytium/octopus/tlstream.c create mode 100644 drivers/gpu/drm/phytium/octopus/tlstream.h create mode 100644 drivers/gpu/drm/phytium/octopus/trace_events.c create mode 100644 drivers/gpu/drm/phytium/octopus/trace_events.h create mode 100644 drivers/gpu/drm/phytium/octopus/uniq_key_splay_tree.c create mode 100644 drivers/gpu/drm/phytium/octopus/uniq_key_splay_tree.h create mode 100644 drivers/gpu/drm/phytium/octopus/vmm_impl.h create mode 100644 drivers/gpu/drm/phytium/octopus/vmm_pvz_client.c create mode 100644 drivers/gpu/drm/phytium/octopus/vmm_pvz_client.h create mode 100644 drivers/gpu/drm/phytium/octopus/vmm_pvz_common.h create mode 100644 drivers/gpu/drm/phytium/octopus/vmm_pvz_server.c create mode 100644 drivers/gpu/drm/phytium/octopus/vmm_pvz_server.h create mode 100644 drivers/gpu/drm/phytium/octopus/vmm_type_stub.c create mode 100644 drivers/gpu/drm/phytium/octopus/vz_vm.h create mode 100644 drivers/gpu/drm/phytium/octopus/vz_vmm_pvz.c create mode 100644 drivers/gpu/drm/phytium/octopus/vz_vmm_pvz.h create mode 100644 drivers/gpu/drm/phytium/octopus/vz_vmm_vm.c mode change 100644 => 100755 drivers/gpu/drm/phytium/phytium_panel.c mode change 100644 => 100755 drivers/gpu/drm/phytium/phytium_panel.h mode change 100644 => 100755 drivers/gpu/drm/phytium/phytium_pci.c mode change 100644 => 100755 drivers/gpu/drm/phytium/phytium_pci.h mode change 100644 => 100755 drivers/gpu/drm/phytium/x100_dc.c mode change 100644 => 100755 drivers/gpu/drm/phytium/x100_dc.h mode change 100644 => 100755 drivers/gpu/drm/phytium/x100_reg.h diff --git a/drivers/gpu/drm/phytium/Kconfig b/drivers/gpu/drm/phytium/Kconfig index e3024feb69d0..36fc9069ebb7 100644 --- a/drivers/gpu/drm/phytium/Kconfig +++ b/drivers/gpu/drm/phytium/Kconfig @@ -1,3 +1,5 @@ +source "drivers/gpu/drm/phytium/octopus/Kconfig" + config DRM_PHYTIUM tristate "DRM Support for Phytium Graphics Card" depends on DRM @@ -5,3 +7,10 @@ config DRM_PHYTIUM help Choose this option if you have a phytium graphics card. This driver provides kernel mode setting and buffer management to userspace. + +config PHYTIUM_PCI_DRIVER + bool "Support for Phytium Pcie Graphics Card" + depends on DRM_PHYTIUM + default y + help + Support for Phytium pcie driver, used for Pcie Graphics Card as found. diff --git a/drivers/gpu/drm/phytium/Makefile b/drivers/gpu/drm/phytium/Makefile index 104416fc4313..f4f1f3a15cb6 100644 --- a/drivers/gpu/drm/phytium/Makefile +++ b/drivers/gpu/drm/phytium/Makefile @@ -8,8 +8,11 @@ phytium-dc-drm-y := phytium_display_drv.o \ phytium_debugfs.o \ x100_dp.o \ phytium_panel.o \ - x100_dc.o \ - phytium_pci.o + x100_dc.o + +phytium-dc-drm-$(CONFIG_PHYTIUM_PCI_DRIVER) += phytium_pci.o obj-$(CONFIG_DRM_PHYTIUM) += phytium-dc-drm.o -CFLAGS_REMOVE_phytium_crtc.o += -mgeneral-regs-only +CFLAGS_REMOVE_x100_dc.o += -mgeneral-regs-only + +obj-$(CONFIG_DRM_PHYTIUMVR_OCTOPUS) += octopus/ diff --git a/drivers/gpu/drm/phytium/octopus/Kconfig b/drivers/gpu/drm/phytium/octopus/Kconfig new file mode 100644 index 000000000000..a5773f92b8ea --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/Kconfig @@ -0,0 +1,30 @@ +config DRM_PHYTIUMVR_OCTOPUS + tristate "PhytiumVR Octopus" + depends on HAS_IOMEM + depends on DRM + select DRM_KMS_HELPER + select SYNC_FILE + help + Driver for Phytium octopus 3D graphics hardware. + + Say Y or M here if your SoC contains a PhytiumVR Octopus GPU. For more + information, see . + +config DRM_PHYTIUMVR_DVFS + bool "Enable Phytium GPU DVFS features" + depends on DRM_PHYTIUMVR_OCTOPUS + select PM_DEVFREQ + select PM_OPP + select DEVFREQ_THERMAL + select DEVFREQ_GOV_SIMPLE_ONDEMAND + default n + help + Add additional DVFS featurs to the Phytium Octopus GPU driver. + +config DRM_PHYTIUMVR_OCTOPUS_DMA + bool "Enable DMA support for Phtium Octopus GPU" + depends on DRM_PHYTIUMVR_OCTOPUS + select CONFIG_PHYTIUM_AXI_DMAC + help + Add DMA support to accelarate memory copy between system memory and + GPU device memory. diff --git a/drivers/gpu/drm/phytium/octopus/Makefile b/drivers/gpu/drm/phytium/octopus/Makefile new file mode 100644 index 000000000000..631d2465cc82 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/Makefile @@ -0,0 +1,14 @@ +img_basedir := drivers/gpu/drm/phytium/octopus +include $(img_basedir)/config_kernel.mk + +obj-$(CONFIG_DRM_PHYTIUMVR_OCTOPUS) += pvrsrvkm.o + +ccflags-y += \ + -include config_kernel.h \ + -Iinclude/drm \ + -I$(img_basedir) \ + -I$(img_basedir)/km \ + -I$(img_basedir)/system \ + -D__linux__ + +include $(img_basedir)/pvrsrvkm.mk diff --git a/drivers/gpu/drm/phytium/octopus/allocmem.c b/drivers/gpu/drm/phytium/octopus/allocmem.c new file mode 100644 index 000000000000..0d19725d0de2 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/allocmem.c @@ -0,0 +1,435 @@ +/*************************************************************************/ /*! +@File +@Title Host memory management implementation for Linux +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include +#include + +#include "img_defs.h" +#include "allocmem.h" +#include "pvr_debug.h" +#include "process_stats.h" +#if defined(DEBUG) && defined(SUPPORT_VALIDATION) +#include "pvrsrv.h" +#endif +#include "osfunc.h" + +/* + * DEBUG_MEMSTATS_ALLOC_RECORD_VALUES needs to be different from + * DEBUG_MEMSTATS_VALUES defined in process_stats.h. + * The reason for this is that the file and line where the allocation happens + * are tracked from the OSAllocMem params. If DEBUG_MEMSTATS_VALUES were to be + * used, all OSAllocMem allocation statistics would point to allocmem.c, which + * is not expected behaviour. + */ +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) +#define DEBUG_MEMSTATS_ALLOC_RECORD_VALUES ,pvAllocFromFile, ui32AllocFromLine +#else +#define DEBUG_MEMSTATS_ALLOC_RECORD_VALUES +#endif + +/* + * When memory statistics are disabled, memory records are used instead. + * In order for these to work, the PID of the process that requested the + * allocation needs to be stored at the end of the kmalloc'd memory, making + * sure 4 extra bytes are allocated to fit the PID. + * + * There is no need for this extra allocation when memory statistics are + * enabled, since all allocations are tracked in DebugFS mem_area files. + */ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS) +#define ALLOCMEM_MEMSTATS_PADDING sizeof(IMG_UINT32) +#else +#define ALLOCMEM_MEMSTATS_PADDING 0UL +#endif + +/* How many times kmalloc can fail before the allocation threshold is reduced */ +static const IMG_UINT32 g_ui32kmallocFailLimit = 10; +/* How many kmalloc failures happened since the last allocation threshold change */ +static IMG_UINT32 g_ui32kmallocFailCount = 0; +/* Current kmalloc threshold value in bytes */ +static IMG_UINT32 g_ui32kmallocThreshold = PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD; +/* Spinlock used so that the global variables above may not be modified by more than 1 thread at a time */ +static DEFINE_SPINLOCK(kmalloc_lock); + +#if defined(DEBUG) && defined(SUPPORT_VALIDATION) +static DEFINE_SPINLOCK(kmalloc_leak_lock); +static IMG_UINT32 g_ui32kmallocLeakCounter = 0; +#endif + +static inline void OSTryDecreaseKmallocThreshold(void) +{ + unsigned long flags; + spin_lock_irqsave(&kmalloc_lock, flags); + + g_ui32kmallocFailCount++; + + if (g_ui32kmallocFailCount >= g_ui32kmallocFailLimit) + { + g_ui32kmallocFailCount = 0; + if (g_ui32kmallocThreshold > PAGE_SIZE) + { + g_ui32kmallocThreshold >>= 1; + printk(KERN_INFO "Threshold is now set to %d\n", g_ui32kmallocThreshold); + } + } + + spin_unlock_irqrestore(&kmalloc_lock, flags); +} + +static inline void OSResetKmallocFailCount(void) +{ + unsigned long flags; + spin_lock_irqsave(&kmalloc_lock, flags); + + g_ui32kmallocFailCount = 0; + + spin_unlock_irqrestore(&kmalloc_lock, flags); +} + +static inline void _pvr_vfree(const void* pvAddr) +{ +#if defined(DEBUG) + /* Size harder to come by for vmalloc and since vmalloc allocates + * a whole number of pages, poison the minimum size known to have + * been allocated. + */ + OSCachedMemSet((void*)pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE, + PAGE_SIZE); +#endif + vfree(pvAddr); +} + +static inline void _pvr_kfree(const void* pvAddr) +{ +#if defined(DEBUG) + /* Poison whole memory block */ + OSCachedMemSet((void*)pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE, + ksize(pvAddr)); +#endif + kfree(pvAddr); +} + +static inline void _pvr_alloc_stats_add(void *pvAddr, IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS) +{ +#if !defined(PVRSRV_ENABLE_PROCESS_STATS) + PVR_UNREFERENCED_PARAMETER(pvAddr); +#else + if (!is_vmalloc_addr(pvAddr)) + { +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + IMG_CPU_PHYADDR sCpuPAddr; + sCpuPAddr.uiAddr = 0; + + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, + pvAddr, + sCpuPAddr, + ksize(pvAddr), + NULL, + OSGetCurrentClientProcessIDKM() + DEBUG_MEMSTATS_ALLOC_RECORD_VALUES); +#else + { + /* Store the PID in the final additional 4 bytes allocated */ + IMG_UINT32 *puiTemp = IMG_OFFSET_ADDR(pvAddr, ksize(pvAddr) - ALLOCMEM_MEMSTATS_PADDING); + *puiTemp = OSGetCurrentClientProcessIDKM(); + } + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvAddr), OSGetCurrentClientProcessIDKM()); +#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ + } + else + { +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + IMG_CPU_PHYADDR sCpuPAddr; + sCpuPAddr.uiAddr = 0; + + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, + pvAddr, + sCpuPAddr, + ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)), + NULL, + OSGetCurrentClientProcessIDKM() + DEBUG_MEMSTATS_ALLOC_RECORD_VALUES); +#else + PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, + ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)), + (IMG_UINT64)(uintptr_t) pvAddr, + OSGetCurrentClientProcessIDKM()); +#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ + } +#endif /* !defined(PVRSRV_ENABLE_PROCESS_STATS) */ +} + +static inline void _pvr_alloc_stats_remove(void *pvAddr) +{ +#if !defined(PVRSRV_ENABLE_PROCESS_STATS) + PVR_UNREFERENCED_PARAMETER(pvAddr); +#else + if (!is_vmalloc_addr(pvAddr)) + { +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + { + IMG_UINT32 *puiTemp = IMG_OFFSET_ADDR(pvAddr, ksize(pvAddr) - ALLOCMEM_MEMSTATS_PADDING); + PVRSRVStatsDecrMemKAllocStat(ksize(pvAddr), *puiTemp); + } +#else + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, + (IMG_UINT64)(uintptr_t) pvAddr, + OSGetCurrentClientProcessIDKM()); +#endif + } + else + { +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, + (IMG_UINT64)(uintptr_t) pvAddr); +#else + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, + (IMG_UINT64)(uintptr_t) pvAddr, + OSGetCurrentClientProcessIDKM()); +#endif + } +#endif /* !defined(PVRSRV_ENABLE_PROCESS_STATS) */ +} + +void *(OSAllocMem)(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS) +{ + void *pvRet = NULL; + + if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) <= g_ui32kmallocThreshold) + { + pvRet = kmalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL); + if (pvRet == NULL) + { + OSTryDecreaseKmallocThreshold(); + } + else + { + OSResetKmallocFailCount(); + } + } + + if (pvRet == NULL) + { + pvRet = vmalloc(ui32Size); + } + + if (pvRet != NULL) + { + _pvr_alloc_stats_add(pvRet, ui32Size DEBUG_MEMSTATS_ALLOC_RECORD_VALUES); + } + + return pvRet; +} + +void *(OSAllocZMem)(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS) +{ + void *pvRet = NULL; + + if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) <= g_ui32kmallocThreshold) + { + pvRet = kzalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL); + if (pvRet == NULL) + { + OSTryDecreaseKmallocThreshold(); + } + else + { + OSResetKmallocFailCount(); + } + } + + if (pvRet == NULL) + { + pvRet = vzalloc(ui32Size); + } + + if (pvRet != NULL) + { + _pvr_alloc_stats_add(pvRet, ui32Size DEBUG_MEMSTATS_ALLOC_RECORD_VALUES); + } + + return pvRet; +} + +/* + * The parentheses around OSFreeMem prevent the macro in allocmem.h from + * applying, as it would break the function's definition. + */ +void (OSFreeMem)(void *pvMem) +{ +#if defined(DEBUG) && defined(SUPPORT_VALIDATION) + unsigned long flags; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + if (psPVRSRVData) + { + IMG_UINT32 ui32kmallocLeakMax = psPVRSRVData->sMemLeakIntervals.ui32OSAlloc; + + spin_lock_irqsave(&kmalloc_leak_lock, flags); + + g_ui32kmallocLeakCounter++; + if (ui32kmallocLeakMax && (g_ui32kmallocLeakCounter >= ui32kmallocLeakMax)) + { + g_ui32kmallocLeakCounter = 0; + spin_unlock_irqrestore(&kmalloc_leak_lock, flags); + + PVR_DPF((PVR_DBG_WARNING, + "%s: Skipped freeing of pointer 0x%p to trigger memory leak.", + __func__, + pvMem)); + return; + } + + spin_unlock_irqrestore(&kmalloc_leak_lock, flags); + } +#endif + if (pvMem != NULL) + { + _pvr_alloc_stats_remove(pvMem); + + if (!is_vmalloc_addr(pvMem)) + { + _pvr_kfree(pvMem); + } + else + { + _pvr_vfree(pvMem); + } + } +} + +void *OSAllocMemNoStats(IMG_UINT32 ui32Size) +{ + void *pvRet = NULL; + + if (ui32Size <= g_ui32kmallocThreshold) + { + pvRet = kmalloc(ui32Size, GFP_KERNEL); + if (pvRet == NULL) + { + OSTryDecreaseKmallocThreshold(); + } + else + { + OSResetKmallocFailCount(); + } + } + + if (pvRet == NULL) + { + pvRet = vmalloc(ui32Size); + } + + return pvRet; +} + +void *OSAllocZMemNoStats(IMG_UINT32 ui32Size) +{ + void *pvRet = NULL; + + if (ui32Size <= g_ui32kmallocThreshold) + { + pvRet = kzalloc(ui32Size, GFP_KERNEL); + if (pvRet == NULL) + { + OSTryDecreaseKmallocThreshold(); + } + else + { + OSResetKmallocFailCount(); + } + } + + if (pvRet == NULL) + { + pvRet = vzalloc(ui32Size); + } + + return pvRet; +} + +/* + * The parentheses around OSFreeMemNoStats prevent the macro in allocmem.h from + * applying, as it would break the function's definition. + */ +void (OSFreeMemNoStats)(void *pvMem) +{ +#if defined(DEBUG) && defined(SUPPORT_VALIDATION) + unsigned long flags; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + if (psPVRSRVData) + { + IMG_UINT32 ui32kmallocLeakMax = psPVRSRVData->sMemLeakIntervals.ui32OSAlloc; + + spin_lock_irqsave(&kmalloc_leak_lock, flags); + + g_ui32kmallocLeakCounter++; + if (ui32kmallocLeakMax && (g_ui32kmallocLeakCounter >= ui32kmallocLeakMax)) + { + g_ui32kmallocLeakCounter = 0; + spin_unlock_irqrestore(&kmalloc_leak_lock, flags); + + PVR_DPF((PVR_DBG_WARNING, + "%s: Skipped freeing of pointer 0x%p to trigger memory leak.", + __func__, + pvMem)); + return; + } + + spin_unlock_irqrestore(&kmalloc_leak_lock, flags); + } +#endif + if (pvMem != NULL) + { + if (!is_vmalloc_addr(pvMem)) + { + _pvr_kfree(pvMem); + } + else + { + _pvr_vfree(pvMem); + } + } +} diff --git a/drivers/gpu/drm/phytium/octopus/allocmem.h b/drivers/gpu/drm/phytium/octopus/allocmem.h new file mode 100644 index 000000000000..1cdba577eb28 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/allocmem.h @@ -0,0 +1,181 @@ +/*************************************************************************/ /*! +@File allocmem.h +@Title memory allocation header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Memory-Allocation API definitions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef ALLOCMEM_H +#define ALLOCMEM_H + +#include "img_types.h" +#include "pvr_debug.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +#if !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) || !defined(DEBUG) || !defined(PVRSRV_ENABLE_PROCESS_STATS) || !defined(PVRSRV_ENABLE_MEMORY_STATS) || defined(DOXYGEN) + +/**************************************************************************/ /*! +@Function OSAllocMem +@Description Allocates CPU memory. Contents are uninitialized. + If passed a size of zero, function should not assert, + but just return a NULL pointer. +@Input ui32Size Size of required allocation (in bytes) +@Return Pointer to allocated memory on success. + Otherwise NULL. + */ /**************************************************************************/ +void *OSAllocMem(IMG_UINT32 ui32Size); +#define OSAllocMem(_size) (OSAllocMem)((_size)) +/**************************************************************************/ /*! +@Function OSAllocZMem +@Description Allocates CPU memory and initializes the contents to zero. + If passed a size of zero, function should not assert, + but just return a NULL pointer. +@Input ui32Size Size of required allocation (in bytes) +@Return Pointer to allocated memory on success. + Otherwise NULL. + */ /**************************************************************************/ +void *OSAllocZMem(IMG_UINT32 ui32Size); +#define OSAllocZMem(_size) (OSAllocZMem)((_size)) + +#else +void *OSAllocMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine); +void *OSAllocZMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine); +#define OSAllocMem(_size) (OSAllocMem)((_size), (__FILE__), (__LINE__)) +#define OSAllocZMem(_size) (OSAllocZMem)((_size), (__FILE__), (__LINE__)) +#endif + +/**************************************************************************/ /*! +@Function OSAllocMemNoStats +@Description Allocates CPU memory. Contents are uninitialized. + If passed a size of zero, function should not assert, + but just return a NULL pointer. + The allocated memory is not accounted for by process stats. + Process stats are an optional feature (enabled only when + PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount + of memory allocated to help in debugging. Where this is not + required, OSAllocMem() and OSAllocMemNoStats() equate to + the same operation. +@Input ui32Size Size of required allocation (in bytes) +@Return Pointer to allocated memory on success. + Otherwise NULL. + */ /**************************************************************************/ +void *OSAllocMemNoStats(IMG_UINT32 ui32Size); + +/**************************************************************************/ /*! +@Function OSAllocZMemNoStats +@Description Allocates CPU memory and initializes the contents to zero. + If passed a size of zero, function should not assert, + but just return a NULL pointer. + The allocated memory is not accounted for by process stats. + Process stats are an optional feature (enabled only when + PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount + of memory allocated to help in debugging. Where this is not + required, OSAllocZMem() and OSAllocZMemNoStats() equate to + the same operation. +@Input ui32Size Size of required allocation (in bytes) +@Return Pointer to allocated memory on success. + Otherwise NULL. + */ /**************************************************************************/ +void *OSAllocZMemNoStats(IMG_UINT32 ui32Size); + +/**************************************************************************/ /*! +@Function OSFreeMem +@Description Frees previously allocated CPU memory. +@Input pvCpuVAddr Pointer to the memory to be freed. +@Return None. + */ /**************************************************************************/ +void OSFreeMem(void *pvCpuVAddr); + +/**************************************************************************/ /*! +@Function OSFreeMemNoStats +@Description Frees previously allocated CPU memory. + The freed memory does not update the figures in process stats. + Process stats are an optional feature (enabled only when + PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount + of memory allocated to help in debugging. Where this is not + required, OSFreeMem() and OSFreeMemNoStats() equate to the + same operation. +@Input pvCpuVAddr Pointer to the memory to be freed. +@Return None. + */ /**************************************************************************/ +void OSFreeMemNoStats(void *pvCpuVAddr); + +/* + * These macros allow us to catch double-free bugs on DEBUG builds and + * prevent crashes on RELEASE builds. + */ + +/*! @cond Doxygen_Suppress */ +#if defined(DEBUG) +#define double_free_sentinel ((void *)&OSFreeMem) +#define ALLOCMEM_ASSERT(exp) PVR_ASSERT(exp) +#else +#define double_free_sentinel NULL +#define ALLOCMEM_ASSERT(exp) do {} while (0) +#endif +/*! @endcond */ + +/*! Frees memory allocated by OSAllocMem(). */ +#define OSFreeMem(_ptr) do { \ + ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \ + (OSFreeMem)(_ptr); \ + (_ptr) = double_free_sentinel; \ + MSC_SUPPRESS_4127 \ + } while (0) + +/*! Frees memory allocated by OSAllocMemNoStats(). */ +#define OSFreeMemNoStats(_ptr) do { \ + ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \ + (OSFreeMemNoStats)(_ptr); \ + (_ptr) = double_free_sentinel; \ + MSC_SUPPRESS_4127 \ + } while (0) + +#if defined(__cplusplus) +} +#endif + +#endif /* ALLOCMEM_H */ + +/****************************************************************************** + End of file (allocmem.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/cache_km.c b/drivers/gpu/drm/phytium/octopus/cache_km.c new file mode 100644 index 000000000000..fa96147f8528 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/cache_km.c @@ -0,0 +1,2572 @@ +/*************************************************************************/ /*! +@File cache_km.c +@Title CPU d-cache maintenance operations framework +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements server side code for CPU d-cache maintenance taking + into account the idiosyncrasies of the various types of CPU + d-cache instruction-set architecture (ISA) maintenance + mechanisms. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#if defined(__linux__) +#include +#include +#include +#include +#include +#include +#endif + +#include "pmr.h" +#include "log2.h" +#include "device.h" +#include "pvrsrv.h" +#include "osfunc.h" +#include "cache_km.h" +#include "pvr_debug.h" +#include "lock_types.h" +#include "allocmem.h" +#include "process_stats.h" +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) +#include "ri_server.h" +#endif +#include "devicemem.h" +#include "pvrsrv_apphint.h" +#include "pvrsrv_sync_server.h" +#include "km_apphint_defs.h" +#include "km_apphint_defs_common.h" +#include "oskm_apphint.h" +#include "di_server.h" + +/* This header must always be included last */ +#if defined(__linux__) +#include "kernel_compatibility.h" +#endif + +/* Top-level file-local build definitions */ +#if defined(PVRSRV_ENABLE_CACHEOP_STATS) && defined(__linux__) +#define CACHEOP_DEBUG +#define CACHEOP_STATS_ITEMS_MAX 32 +#define INCR_WRAP(x) ((x+1) >= CACHEOP_STATS_ITEMS_MAX ? 0 : (x+1)) +#define DECR_WRAP(x) ((x-1) < 0 ? (CACHEOP_STATS_ITEMS_MAX-1) : (x-1)) +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) +/* Refer to CacheOpStatsExecLogHeader() for header item names */ +#define CACHEOP_RI_PRINTF_HEADER "%-8s %-10s %-10s %-5s %-16s %-16s %-10s %-10s %-18s %-18s %-12s" +#define CACHEOP_RI_PRINTF "%-8d %-10s %-10s %-5s 0x%-14llx 0x%-14llx 0x%-8llx 0x%-8llx %-18llu %-18llu 0x%-10x\n" +#else +#define CACHEOP_PRINTF_HEADER "%-8s %-10s %-10s %-5s %-10s %-10s %-18s %-18s %-12s" +#define CACHEOP_PRINTF "%-8d %-10s %-10s %-5s 0x%-8llx 0x%-8llx %-18llu %-18llu 0x%-10x\n" +#endif +#endif + +//#define CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING /* Force OS page (not cache line) flush granularity */ +#define CACHEOP_PVR_ASSERT(x) /* Define as PVR_ASSERT(x), enable for swdev & testing */ +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) +#define CACHEOP_THREAD_WAIT_TIMEOUT 0ULL /* Wait indefinitely */ +#else +#define CACHEOP_THREAD_WAIT_TIMEOUT 500000ULL /* Wait 500ms between wait unless woken-up on demand */ +#endif +#define CACHEOP_FENCE_WAIT_TIMEOUT 1000ULL /* Wait 1ms between wait events unless woken-up */ +#define CACHEOP_FENCE_RETRY_ABORT 1000ULL /* Fence retries that aborts fence operation */ +#define CACHEOP_SEQ_MIDPOINT (IMG_UINT32) 0x7FFFFFFF /* Where seqNum(s) are rebase, compared at */ +#define CACHEOP_ABORT_FENCE_ERROR_STRING "detected stalled client, retrying cacheop fence" +#define CACHEOP_DEVMEM_OOR_ERROR_STRING "cacheop device memory request is out of range" +#define CACHEOP_MAX_DEBUG_MESSAGE_LEN 160 + +typedef struct _CACHEOP_WORK_ITEM_ +{ + PMR *psPMR; + IMG_UINT32 ui32OpSeqNum; + IMG_DEVMEM_SIZE_T uiSize; + PVRSRV_CACHE_OP uiCacheOp; + IMG_DEVMEM_OFFSET_T uiOffset; + PVRSRV_TIMELINE iTimeline; + SYNC_TIMELINE_OBJ sSWTimelineObj; + PVRSRV_DEVICE_NODE *psDevNode; +#if defined(CACHEOP_DEBUG) + IMG_UINT64 ui64EnqueuedTime; + IMG_UINT64 ui64DequeuedTime; + IMG_UINT64 ui64ExecuteTime; + IMG_BOOL bDeferred; + IMG_BOOL bKMReq; + IMG_BOOL bUMF; + IMG_PID pid; +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + RGXFWIF_DM eFenceOpType; +#endif +#endif +} CACHEOP_WORK_ITEM; + +typedef struct _CACHEOP_STATS_EXEC_ITEM_ +{ + IMG_PID pid; + IMG_UINT32 ui32OpSeqNum; + PVRSRV_CACHE_OP uiCacheOp; + IMG_DEVMEM_SIZE_T uiOffset; + IMG_DEVMEM_SIZE_T uiSize; + IMG_UINT64 ui64EnqueuedTime; + IMG_UINT64 ui64DequeuedTime; + IMG_UINT64 ui64ExecuteTime; + IMG_BOOL bIsFence; + IMG_BOOL bKMReq; + IMG_BOOL bUMF; + IMG_BOOL bDeferred; +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEV_PHYADDR sDevPAddr; + RGXFWIF_DM eFenceOpType; +#endif +} CACHEOP_STATS_EXEC_ITEM; + +typedef enum _CACHEOP_CONFIG_ +{ + CACHEOP_CONFIG_DEFAULT = 0, + /* cache flush mechanism types */ + CACHEOP_CONFIG_URBF = 4, + /* sw-emulated deferred flush mechanism */ + CACHEOP_CONFIG_KDF = 8, + /* pseudo configuration items */ + CACHEOP_CONFIG_LAST = 16, + CACHEOP_CONFIG_KLOG = 16, + CACHEOP_CONFIG_ALL = 31 +} CACHEOP_CONFIG; + +typedef struct _CACHEOP_WORK_QUEUE_ +{ +/* + * Init. state & primary device node framework + * is anchored on. + */ + IMG_BOOL bInit; +/* + MMU page size/shift & d-cache line size + */ + size_t uiPageSize; + IMG_UINT32 uiLineSize; + IMG_UINT32 uiLineShift; + IMG_UINT32 uiPageShift; + OS_CACHE_OP_ADDR_TYPE uiCacheOpAddrType; +/* + CacheOp deferred queueing protocol + + Implementation geared for performance, atomic counter based + - Value Space is 0 -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> n. + - Index Space is 0 -> 1 -> 2 -> 3 -> 0 -> 1 -> 2 -> 3 -> 0 -> m. + - Index = Value modulo CACHEOP_INDICES_LOG2_SIZE. + + Write counter never collides with read counter in index space + - Unless at start of day when both are initialised to zero. + - This means we sacrifice one entry when the queue is full. + - Incremented by producer + - Value space tracks total number of CacheOps queued. + - Index space identifies CacheOp CCB queue index. + + Read counter increments towards write counter in value space + - Empty queue occurs when read equals write counter. + - Wrap-round logic handled by consumer as/when needed. + - Incremented by consumer + - Value space tracks total # of CacheOps executed. + - Index space identifies CacheOp CCB queue index. + + Total queued size adjusted up/down during write/read activity + - Counter might overflow but does not compromise framework. + */ + ATOMIC_T hReadCounter; + ATOMIC_T hWriteCounter; +/* + CacheOp sequence numbers + + hCommonSeqNum: + - Common sequence, numbers every CacheOp operation in both UM/KM. + - In KM + - Every deferred CacheOp (on behalf of UM) gets a unique seqNum. + - Last executed deferred CacheOp updates gsCwq.hCompletedSeqNum. + - Under debug, all CacheOp gets a unique seqNum for tracking. + - This includes all UM/KM synchronous non-deferred CacheOp(s) + - In UM + - CacheOp(s) discarding happens in both UM and KM space. + + hCompletedSeqNum: + - Tracks last executed KM/deferred RBF/Global CacheOp(s) + */ + ATOMIC_T hCommonSeqNum; + ATOMIC_T hCompletedSeqNum; +/* + CacheOp information page + + psInfoPagePMR: + - Single system-wide OS page that is multi-mapped in UM/KM. + - Mapped into clients using read-only memory protection. + - Mapped into server using read/write memory protection. + - Contains information pertaining to cache framework. + + pui32InfoPage: + - Server linear address pointer to said information page. + - Each info-page entry currently of sizeof(IMG_UINT32). + */ + PMR *psInfoPagePMR; + IMG_UINT32 *pui32InfoPage; +/* + CacheOp deferred work-item queue + + CACHEOP_INDICES_LOG2_SIZE + */ +#define CACHEOP_INDICES_LOG2_SIZE (4) +#define CACHEOP_INDICES_MAX (1 << CACHEOP_INDICES_LOG2_SIZE) +#define CACHEOP_INDICES_MASK (CACHEOP_INDICES_MAX-1) + CACHEOP_WORK_ITEM asWorkItems[CACHEOP_INDICES_MAX]; +#if defined(CACHEOP_DEBUG) +/* + CacheOp statistics + */ + DI_ENTRY *psDIEntry; + IMG_HANDLE hStatsExecLock; + IMG_UINT32 ui32ServerASync; + IMG_UINT32 ui32ServerSyncVA; + IMG_UINT32 ui32ServerSync; + IMG_UINT32 ui32ServerRBF; + IMG_UINT32 ui32ServerDTL; + IMG_UINT32 ui32ClientSync; + IMG_UINT32 ui32ClientRBF; + IMG_UINT32 ui32TotalFenceOps; + IMG_UINT32 ui32TotalExecOps; + IMG_UINT32 ui32AvgExecTime; + IMG_UINT32 ui32AvgExecTimeRemainder; + IMG_UINT32 ui32AvgFenceTime; + IMG_UINT32 ui32AvgFenceTimeRemainder; + IMG_INT32 i32StatsExecWriteIdx; + CACHEOP_STATS_EXEC_ITEM asStatsExecuted[CACHEOP_STATS_ITEMS_MAX]; +#endif +/* + CacheOp (re)configuration + */ + DI_ENTRY *psConfigTune; + IMG_HANDLE hConfigLock; +/* + CacheOp deferred worker thread + + eConfig + - Runtime configuration + + hWorkerThread + - CacheOp thread handler + + hThreadWakeUpEvtObj + - Event object to drive CacheOp worker thread sleep/wake-ups. + + hClientWakeUpEvtObj + - Event object to unblock stalled clients waiting on queue. + */ + CACHEOP_CONFIG eConfig; + IMG_UINT32 ui32Config; + IMG_HANDLE hWorkerThread; + IMG_HANDLE hDeferredLock; + IMG_HANDLE hThreadWakeUpEvtObj; + IMG_HANDLE hClientWakeUpEvtObj; + IMG_UINT32 ui32FenceWaitTimeUs; + IMG_UINT32 ui32FenceRetryAbort; + IMG_BOOL bSupportsUMFlush; +} CACHEOP_WORK_QUEUE; + +/* Top-level CacheOp framework object */ +static CACHEOP_WORK_QUEUE gsCwq; + +#define CacheOpConfigSupports(e) ((gsCwq.eConfig & (e)) ? IMG_TRUE : IMG_FALSE) + +static INLINE IMG_UINT32 CacheOpIdxRead(ATOMIC_T *phCounter) +{ + IMG_UINT32 ui32Idx = OSAtomicRead(phCounter); + return ui32Idx & CACHEOP_INDICES_MASK; +} + +static INLINE IMG_UINT32 CacheOpIdxIncrement(ATOMIC_T *phCounter) +{ + IMG_UINT32 ui32Idx = OSAtomicIncrement(phCounter); + return ui32Idx & CACHEOP_INDICES_MASK; +} + +static INLINE IMG_UINT32 CacheOpIdxNext(ATOMIC_T *phCounter) +{ + IMG_UINT32 ui32Idx = OSAtomicRead(phCounter); + return ++ui32Idx & CACHEOP_INDICES_MASK; +} + +static INLINE IMG_UINT32 CacheOpIdxSpan(ATOMIC_T *phLhs, ATOMIC_T *phRhs) +{ + return OSAtomicRead(phLhs) - OSAtomicRead(phRhs); +} + +/* Callback to dump info of cacheop thread in debug_dump */ +static void CacheOpThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVR_DUMPDEBUG_LOG(" Configuration: QSZ: %d, UKT: %d, KDFT: %d, " + "LINESIZE: %d, PGSIZE: %d, KDF: %s, " + "URBF: %s", + CACHEOP_INDICES_MAX, + gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD], + gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD], + gsCwq.pui32InfoPage[CACHEOP_INFO_LINESIZE], + gsCwq.pui32InfoPage[CACHEOP_INFO_PGSIZE], + gsCwq.eConfig & CACHEOP_CONFIG_KDF ? "Yes" : "No", + gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No" + ); + PVR_DUMPDEBUG_LOG(" Pending deferred CacheOp entries : %u", + CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter)); +} + +#if defined(CACHEOP_DEBUG) +static INLINE void CacheOpStatsExecLogHeader(IMG_CHAR szBuffer[CACHEOP_MAX_DEBUG_MESSAGE_LEN]) +{ + OSSNPrintf(szBuffer, CACHEOP_MAX_DEBUG_MESSAGE_LEN, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + CACHEOP_RI_PRINTF_HEADER, +#else + CACHEOP_PRINTF_HEADER, +#endif + "Pid", + "CacheOp", + " Type", + "Mode", +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + "DevVAddr", + "DevPAddr", +#endif + "Offset", + "Size", + "xTime (us)", + "qTime (us)", + "SeqNum"); +} + +static void CacheOpStatsExecLogWrite(CACHEOP_WORK_ITEM *psCacheOpWorkItem) +{ + IMG_UINT64 ui64ExecuteTime; + IMG_UINT64 ui64EnqueuedTime; + IMG_INT32 i32WriteOffset; + + if (!psCacheOpWorkItem->ui32OpSeqNum && !psCacheOpWorkItem->uiCacheOp) + { + /* This breaks the logic of read-out, so we do not queue items + with zero sequence number and no CacheOp */ + return; + } + else if (psCacheOpWorkItem->bKMReq && !CacheOpConfigSupports(CACHEOP_CONFIG_KLOG)) + { + /* KM logs spams the history due to frequency, this removes it completely */ + return; + } + + OSLockAcquire(gsCwq.hStatsExecLock); + + i32WriteOffset = gsCwq.i32StatsExecWriteIdx; + gsCwq.asStatsExecuted[i32WriteOffset].pid = psCacheOpWorkItem->pid; + gsCwq.i32StatsExecWriteIdx = INCR_WRAP(gsCwq.i32StatsExecWriteIdx); + gsCwq.asStatsExecuted[i32WriteOffset].bUMF = psCacheOpWorkItem->bUMF; + gsCwq.asStatsExecuted[i32WriteOffset].uiSize = psCacheOpWorkItem->uiSize; + gsCwq.asStatsExecuted[i32WriteOffset].bKMReq = psCacheOpWorkItem->bKMReq; + gsCwq.asStatsExecuted[i32WriteOffset].uiOffset = psCacheOpWorkItem->uiOffset; + gsCwq.asStatsExecuted[i32WriteOffset].uiCacheOp = psCacheOpWorkItem->uiCacheOp; + gsCwq.asStatsExecuted[i32WriteOffset].bDeferred = psCacheOpWorkItem->bDeferred; + gsCwq.asStatsExecuted[i32WriteOffset].ui32OpSeqNum = psCacheOpWorkItem->ui32OpSeqNum; + gsCwq.asStatsExecuted[i32WriteOffset].ui64ExecuteTime = psCacheOpWorkItem->ui64ExecuteTime; + gsCwq.asStatsExecuted[i32WriteOffset].ui64EnqueuedTime = psCacheOpWorkItem->ui64EnqueuedTime; + gsCwq.asStatsExecuted[i32WriteOffset].ui64DequeuedTime = psCacheOpWorkItem->ui64DequeuedTime; + /* During early system initialisation, only non-fence & non-PMR CacheOps are processed */ + gsCwq.asStatsExecuted[i32WriteOffset].bIsFence = gsCwq.bInit && !psCacheOpWorkItem->psPMR; + CACHEOP_PVR_ASSERT(gsCwq.asStatsExecuted[i32WriteOffset].pid); +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + if (gsCwq.bInit && psCacheOpWorkItem->psPMR) + { + IMG_CPU_PHYADDR sDevPAddr; + PVRSRV_ERROR eError; + IMG_BOOL bValid; + + /* Get more detailed information regarding the sub allocations that + PMR has from RI manager for process that requested the CacheOp */ + eError = RIDumpProcessListKM(psCacheOpWorkItem->psPMR, + gsCwq.asStatsExecuted[i32WriteOffset].pid, + gsCwq.asStatsExecuted[i32WriteOffset].uiOffset, + &gsCwq.asStatsExecuted[i32WriteOffset].sDevVAddr); + PVR_GOTO_IF_ERROR(eError, e0); + + /* (Re)lock here as some PMR might have not been locked */ + eError = PMRLockSysPhysAddresses(psCacheOpWorkItem->psPMR); + PVR_GOTO_IF_ERROR(eError, e0); + + eError = PMR_CpuPhysAddr(psCacheOpWorkItem->psPMR, + gsCwq.uiPageShift, + 1, + gsCwq.asStatsExecuted[i32WriteOffset].uiOffset, + &sDevPAddr, + &bValid); + if (eError != PVRSRV_OK) + { + eError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); + goto e0; + } + + eError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); + + gsCwq.asStatsExecuted[i32WriteOffset].sDevPAddr.uiAddr = sDevPAddr.uiAddr; + } + + if (gsCwq.asStatsExecuted[i32WriteOffset].bIsFence) + { + gsCwq.asStatsExecuted[i32WriteOffset].eFenceOpType = psCacheOpWorkItem->eFenceOpType; + } +#endif + + { + /* Convert timing from nanoseconds to microseconds */ + IMG_UINT64 ui64ExecuteTimeNs = gsCwq.asStatsExecuted[i32WriteOffset].ui64ExecuteTime; + IMG_UINT64 ui64EnqueuedTimeNs = gsCwq.asStatsExecuted[i32WriteOffset].ui64EnqueuedTime; + + do_div(ui64ExecuteTimeNs, 1000); + do_div(ui64EnqueuedTimeNs, 1000); + + ui64ExecuteTime = ui64ExecuteTimeNs; + ui64EnqueuedTime = ui64EnqueuedTimeNs; + } + + /* Coalesced deferred CacheOps do not contribute to statistics, + as both enqueue/execute time is identical for these CacheOps */ + if (!gsCwq.asStatsExecuted[i32WriteOffset].bIsFence) + { + /* Calculate the approximate cumulative moving average execution time. + * This calculation is based on standard equation: + * + * CMAnext = (new + count * CMAprev) / (count + 1) + * + * but in simplified form: + * + * CMAnext = CMAprev + (new - CMAprev) / (count + 1) + * + * this gets rid of multiplication and prevents overflow. + * + * Also to increase accuracy that we lose with integer division, + * we hold the moving remainder of the division and add it. + * + * CMAnext = CMAprev + (new - CMAprev + CMRprev) / (count + 1) + * + * Multiple tests proved it to be the best solution for approximating + * CMA using integers. + * + */ + + IMG_UINT32 ui32Time = ui64ExecuteTime - ui64EnqueuedTime; + IMG_INT32 i32Div = (IMG_INT32)ui32Time - (IMG_INT32)gsCwq.ui32AvgExecTime + (IMG_INT32)gsCwq.ui32AvgExecTimeRemainder; + + gsCwq.ui32AvgExecTime += i32Div / (IMG_INT32)(gsCwq.ui32TotalExecOps + 1); + gsCwq.ui32AvgExecTimeRemainder = i32Div % (IMG_INT32)(gsCwq.ui32TotalExecOps + 1); + + gsCwq.ui32TotalExecOps++; + } + + if (!gsCwq.asStatsExecuted[i32WriteOffset].bKMReq) + { + /* This operation queues only UM CacheOp in per-PID process statistics database */ + PVRSRVStatsUpdateCacheOpStats(gsCwq.asStatsExecuted[i32WriteOffset].uiCacheOp, + gsCwq.asStatsExecuted[i32WriteOffset].ui32OpSeqNum, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + gsCwq.asStatsExecuted[i32WriteOffset].sDevVAddr, + gsCwq.asStatsExecuted[i32WriteOffset].sDevPAddr, + gsCwq.asStatsExecuted[i32WriteOffset].eFenceOpType, +#endif + gsCwq.asStatsExecuted[i32WriteOffset].uiOffset, + gsCwq.asStatsExecuted[i32WriteOffset].uiSize, + ui64ExecuteTime-ui64EnqueuedTime, + gsCwq.asStatsExecuted[i32WriteOffset].bUMF, + gsCwq.asStatsExecuted[i32WriteOffset].bIsFence, + psCacheOpWorkItem->pid); + } + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) +e0: +#endif + OSLockRelease(gsCwq.hStatsExecLock); +} + +static int CacheOpStatsExecLogRead(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + IMG_CHAR *pszFlushype; + IMG_CHAR *pszCacheOpType; + IMG_CHAR *pszFlushSource; + IMG_INT32 i32ReadOffset; + IMG_INT32 i32WriteOffset; + IMG_UINT64 ui64EnqueuedTime; + IMG_UINT64 ui64DequeuedTime; + IMG_UINT64 ui64ExecuteTime; + IMG_CHAR szBuffer[CACHEOP_MAX_DEBUG_MESSAGE_LEN] = {0}; + PVR_UNREFERENCED_PARAMETER(pvData); + + OSLockAcquire(gsCwq.hStatsExecLock); + + DIPrintf(psEntry, + "Primary CPU d-cache architecture: LSZ: 0x%d, URBF: %s\n", + gsCwq.uiLineSize, + gsCwq.bSupportsUMFlush ? "Yes" : "No" + ); + + DIPrintf(psEntry, + "Configuration: QSZ: %d, UKT: %d, KDFT: %d, KDF: %s, URBF: %s\n", + CACHEOP_INDICES_MAX, + gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD], + gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD], + gsCwq.eConfig & CACHEOP_CONFIG_KDF ? "Yes" : "No", + gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No" + ); + + DIPrintf(psEntry, + "Summary: OP[F][TL] (tot.avg): %d.%d/%d.%d/%d, [KM][UM][A]SYNC: %d.%d/%d/%d, RBF (um/km): %d/%d\n", + gsCwq.ui32TotalExecOps, gsCwq.ui32AvgExecTime, gsCwq.ui32TotalFenceOps, gsCwq.ui32AvgFenceTime, gsCwq.ui32ServerDTL, + gsCwq.ui32ServerSync, gsCwq.ui32ServerSyncVA, gsCwq.ui32ClientSync, gsCwq.ui32ServerASync, + gsCwq.ui32ClientRBF, gsCwq.ui32ServerRBF + ); + + CacheOpStatsExecLogHeader(szBuffer); + DIPrintf(psEntry, "%s\n", szBuffer); + + i32WriteOffset = gsCwq.i32StatsExecWriteIdx; + for (i32ReadOffset = DECR_WRAP(i32WriteOffset); + i32ReadOffset != i32WriteOffset; + i32ReadOffset = DECR_WRAP(i32ReadOffset)) + { + if (!gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum && + !gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp) + { + break; + } + + { + /* Convert from nano-seconds to micro-seconds */ + IMG_UINT64 ui64ExecuteTimeNs = gsCwq.asStatsExecuted[i32ReadOffset].ui64ExecuteTime; + IMG_UINT64 ui64EnqueuedTimeNs = gsCwq.asStatsExecuted[i32ReadOffset].ui64EnqueuedTime; + IMG_UINT64 ui64DequeuedTimeNs = gsCwq.asStatsExecuted[i32ReadOffset].ui64DequeuedTime; + + do_div(ui64ExecuteTimeNs, 1000); + do_div(ui64EnqueuedTimeNs, 1000); + do_div(ui64DequeuedTimeNs, 1000); + + ui64ExecuteTime = ui64ExecuteTimeNs; + ui64EnqueuedTime = ui64EnqueuedTimeNs; + ui64DequeuedTime = ui64DequeuedTimeNs; + } + + if (gsCwq.asStatsExecuted[i32ReadOffset].bIsFence) + { + IMG_CHAR *pszMode = ""; + IMG_CHAR *pszFenceType = ""; + pszCacheOpType = "Fence"; + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + switch (gsCwq.asStatsExecuted[i32ReadOffset].eFenceOpType) + { + case RGXFWIF_DM_GP: + pszFenceType = " GP "; + break; + + case RGXFWIF_DM_TDM: + pszFenceType = " TDM "; + break; + + case RGXFWIF_DM_GEOM: + pszFenceType = " GEOM"; + break; + + case RGXFWIF_DM_3D: + pszFenceType = " 3D "; + break; + + case RGXFWIF_DM_CDM: + pszFenceType = " CDM "; + break; + + default: + pszFenceType = " DM? "; + CACHEOP_PVR_ASSERT(0); + break; + } +#endif + + DIPrintf(psEntry, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + CACHEOP_RI_PRINTF, +#else + CACHEOP_PRINTF, +#endif + gsCwq.asStatsExecuted[i32ReadOffset].pid, + pszCacheOpType, + pszFenceType, + pszMode, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + 0ull, + 0ull, +#endif + gsCwq.asStatsExecuted[i32ReadOffset].uiOffset, + gsCwq.asStatsExecuted[i32ReadOffset].uiSize, + ui64ExecuteTime - ui64EnqueuedTime, + ui64DequeuedTime ? ui64DequeuedTime - ui64EnqueuedTime : 0, /* CacheOp might not have a valid DequeuedTime */ + gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum); + } + else + { + IMG_DEVMEM_SIZE_T ui64NumOfPages; + + ui64NumOfPages = gsCwq.asStatsExecuted[i32ReadOffset].uiSize >> gsCwq.uiPageShift; + if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC) + { + pszFlushype = "RBF.Fast"; + } + else + { + pszFlushype = "RBF.Slow"; + } + + if (gsCwq.asStatsExecuted[i32ReadOffset].bUMF) + { + pszFlushSource = " UM"; + } + else + { + /* + - Request originates directly from a KM thread or in KM (KM<), or + - Request originates from a UM thread and is KM deferred (KM+), or + */ + pszFlushSource = + gsCwq.asStatsExecuted[i32ReadOffset].bKMReq ? " KM<" : + gsCwq.asStatsExecuted[i32ReadOffset].bDeferred && gsCwq.asStatsExecuted[i32ReadOffset].ui64ExecuteTime ? " KM+" : + !gsCwq.asStatsExecuted[i32ReadOffset].ui64ExecuteTime ? " KM-" : " KM"; + } + + switch (gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp) + { + case PVRSRV_CACHE_OP_NONE: + pszCacheOpType = "None"; + break; + case PVRSRV_CACHE_OP_CLEAN: + pszCacheOpType = "Clean"; + break; + case PVRSRV_CACHE_OP_INVALIDATE: + pszCacheOpType = "Invalidate"; + break; + case PVRSRV_CACHE_OP_FLUSH: + pszCacheOpType = "Flush"; + break; + case PVRSRV_CACHE_OP_TIMELINE: + pszCacheOpType = "Timeline"; + pszFlushype = " "; + break; + default: + pszCacheOpType = "Unknown"; + gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum = + (IMG_UINT32) gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp; + break; + } + + DIPrintf(psEntry, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + CACHEOP_RI_PRINTF, +#else + CACHEOP_PRINTF, +#endif + gsCwq.asStatsExecuted[i32ReadOffset].pid, + pszCacheOpType, + pszFlushype, + pszFlushSource, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + gsCwq.asStatsExecuted[i32ReadOffset].sDevVAddr.uiAddr, + gsCwq.asStatsExecuted[i32ReadOffset].sDevPAddr.uiAddr, +#endif + gsCwq.asStatsExecuted[i32ReadOffset].uiOffset, + gsCwq.asStatsExecuted[i32ReadOffset].uiSize, + ui64ExecuteTime - ui64EnqueuedTime, + ui64DequeuedTime ? ui64DequeuedTime - ui64EnqueuedTime : 0, /* CacheOp might not have a valid DequeuedTime */ + gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum); + } + } + + OSLockRelease(gsCwq.hStatsExecLock); + + return 0; +} +#endif /* defined(CACHEOP_DEBUG) */ + +static INLINE void CacheOpStatsReset(void) +{ +#if defined(CACHEOP_DEBUG) + gsCwq.ui32TotalExecOps = 0; + gsCwq.ui32TotalFenceOps = 0; + gsCwq.ui32AvgExecTime = 0; + gsCwq.ui32AvgExecTimeRemainder = 0; + gsCwq.ui32AvgFenceTime = 0; + gsCwq.ui32AvgFenceTimeRemainder = 0; + gsCwq.ui32ClientRBF = 0; + gsCwq.ui32ClientSync = 0; + gsCwq.ui32ServerRBF = 0; + gsCwq.ui32ServerASync = 0; + gsCwq.ui32ServerSyncVA = 0; + gsCwq.ui32ServerSync = 0; + gsCwq.ui32ServerDTL = 0; + gsCwq.i32StatsExecWriteIdx = 0; + OSCachedMemSet(gsCwq.asStatsExecuted, 0, sizeof(gsCwq.asStatsExecuted)); +#endif +} + +static void CacheOpConfigUpdate(IMG_UINT32 ui32Config) +{ + OSLockAcquire(gsCwq.hConfigLock); + + /* Step 0, set the gsCwq.eConfig bits */ + if (!(ui32Config & (CACHEOP_CONFIG_LAST - 1))) + { + gsCwq.eConfig = CACHEOP_CONFIG_KDF; + if (gsCwq.bSupportsUMFlush) + { + gsCwq.eConfig |= CACHEOP_CONFIG_URBF; + } + } + else + { + if (ui32Config & CACHEOP_CONFIG_KDF) + { + gsCwq.eConfig |= CACHEOP_CONFIG_KDF; + } + else + { + gsCwq.eConfig &= ~CACHEOP_CONFIG_KDF; + } + + if (gsCwq.bSupportsUMFlush && (ui32Config & CACHEOP_CONFIG_URBF)) + { + gsCwq.eConfig |= CACHEOP_CONFIG_URBF; + } + else + { + gsCwq.eConfig &= ~CACHEOP_CONFIG_URBF; + } + } + + if (ui32Config & CACHEOP_CONFIG_KLOG) + { + /* Suppress logs from KM caller */ + gsCwq.eConfig |= CACHEOP_CONFIG_KLOG; + } + else + { + gsCwq.eConfig &= ~CACHEOP_CONFIG_KLOG; + } + + /* Step 1, set gsCwq.ui32Config based on gsCwq.eConfig */ + ui32Config = 0; + + if (gsCwq.eConfig & CACHEOP_CONFIG_KDF) + { + ui32Config |= CACHEOP_CONFIG_KDF; + } + if (gsCwq.eConfig & CACHEOP_CONFIG_URBF) + { + ui32Config |= CACHEOP_CONFIG_URBF; + } + if (gsCwq.eConfig & CACHEOP_CONFIG_KLOG) + { + ui32Config |= CACHEOP_CONFIG_KLOG; + } + gsCwq.ui32Config = ui32Config; + + + /* Step 3, in certain cases where a CacheOp/VA is provided, this threshold determines at what point + the optimisation due to the presence of said VA (i.e. us not having to remap the PMR pages in KM) + is clawed-back because of the overhead of maintaining such large request which might stalls the + user thread; so to hide this latency have these CacheOps executed on deferred CacheOp thread */ + gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] = (IMG_UINT32)(PVR_DIRTY_BYTES_FLUSH_THRESHOLD >> 2); + + /* Step 4, if no UM support, all requests are done in KM so zero these forcing all client requests + to come down into the KM for maintenance */ + gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = 0; + + if (gsCwq.bSupportsUMFlush) + { + /* With URBF enabled we never go to the kernel */ + if (gsCwq.eConfig & CACHEOP_CONFIG_URBF) + { + gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = (IMG_UINT32)~0; + } + } + + /* Step 5, reset stats. */ + CacheOpStatsReset(); + + OSLockRelease(gsCwq.hConfigLock); +} + +static int CacheOpConfigRead(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(pvData); + DIPrintf(psEntry, + "KDF: %s, URBF: %s\n", + gsCwq.eConfig & CACHEOP_CONFIG_KDF ? "Yes" : "No", + gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No" + ); + return 0; +} + +static INLINE PVRSRV_ERROR CacheOpConfigQuery(const PVRSRV_DEVICE_NODE *psDevNode, + const void *psPrivate, + IMG_UINT32 *pui32Value) +{ + IMG_UINT32 ui32ID = (IMG_UINT32)(uintptr_t) psPrivate; + PVR_UNREFERENCED_PARAMETER(psDevNode); + + switch (ui32ID) + { + case APPHINT_ID_CacheOpConfig: + *pui32Value = gsCwq.ui32Config; + break; + + case APPHINT_ID_CacheOpUMKMThresholdSize: + *pui32Value = gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD]; + break; + + default: + break; + } + + return PVRSRV_OK; +} + +static INLINE PVRSRV_ERROR CacheOpConfigSet(const PVRSRV_DEVICE_NODE *psDevNode, + const void *psPrivate, + IMG_UINT32 ui32Value) +{ + IMG_UINT32 ui32ID = (IMG_UINT32)(uintptr_t) psPrivate; + PVR_UNREFERENCED_PARAMETER(psDevNode); + + switch (ui32ID) + { + case APPHINT_ID_CacheOpConfig: + CacheOpConfigUpdate(ui32Value & CACHEOP_CONFIG_ALL); + break; + + + case APPHINT_ID_CacheOpUMKMThresholdSize: + { + if (!ui32Value || !gsCwq.bSupportsUMFlush) + { + /* CPU ISA does not support UM flush, therefore every request goes down into + the KM, silently ignore request to adjust threshold */ + PVR_ASSERT(! gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD]); + break; + } + else if (ui32Value < gsCwq.uiPageSize) + { + /* Silently round-up to OS page size */ + ui32Value = gsCwq.uiPageSize; + } + + /* Align to OS page size */ + ui32Value &= ~(gsCwq.uiPageSize - 1); + + gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = ui32Value; + + break; + } + + default: + break; + } + + return PVRSRV_OK; +} + +static INLINE void CacheOpQItemRecycle(CACHEOP_WORK_ITEM *psCacheOpWorkItem) +{ + PVRSRV_ERROR eError; + eError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); + /* Set to max as precaution should recycling this CacheOp index fail + to reset it, this is purely to safe-guard having to discard such + subsequent deferred CacheOps or signal the sw sync timeline */ + psCacheOpWorkItem->iTimeline = PVRSRV_NO_TIMELINE; + psCacheOpWorkItem->ui32OpSeqNum = (IMG_UINT32)~0; +#if defined(CACHEOP_DEBUG) + psCacheOpWorkItem->psPMR = (void *)(uintptr_t)~0; +#endif +} + +static INLINE void CacheOpQItemReadCheck(CACHEOP_WORK_ITEM *psCacheOpWorkItem) +{ +#if defined(CACHEOP_DEBUG) + CACHEOP_PVR_ASSERT(psCacheOpWorkItem->psPMR); + CACHEOP_PVR_ASSERT(psCacheOpWorkItem->psPMR != (void *)(uintptr_t)~0); + CACHEOP_PVR_ASSERT(psCacheOpWorkItem->ui32OpSeqNum != (IMG_UINT32)~0); +#else + PVR_UNREFERENCED_PARAMETER(psCacheOpWorkItem); +#endif +} + +static INLINE void CacheOpQItemWriteCheck(CACHEOP_WORK_ITEM *psCacheOpWorkItem) +{ +#if defined(CACHEOP_DEBUG) + CACHEOP_PVR_ASSERT(psCacheOpWorkItem->psPMR == (void *)(uintptr_t)~0); + CACHEOP_PVR_ASSERT(psCacheOpWorkItem->ui32OpSeqNum == (IMG_UINT32)~0); + CACHEOP_PVR_ASSERT(psCacheOpWorkItem->iTimeline == PVRSRV_NO_TIMELINE); +#else + PVR_UNREFERENCED_PARAMETER(psCacheOpWorkItem); +#endif +} + +static INLINE IMG_UINT32 CacheOpGetNextCommonSeqNum(void) +{ + IMG_UINT32 ui32SeqNum = OSAtomicIncrement(&gsCwq.hCommonSeqNum); + if (! ui32SeqNum) + { + ui32SeqNum = OSAtomicIncrement(&gsCwq.hCommonSeqNum); + } + return ui32SeqNum; +} + +static INLINE IMG_BOOL CacheOpFenceCheck(IMG_UINT32 ui32CompletedSeqNum, + IMG_UINT32 ui32FenceSeqNum) +{ + IMG_UINT32 ui32RebasedCompletedNum; + IMG_UINT32 ui32RebasedFenceNum; + IMG_UINT32 ui32Rebase; + + if (ui32FenceSeqNum == 0) + { + return IMG_TRUE; + } + + /* + The problem statement is how to compare two values on + a numerical sequentially incrementing timeline in the + presence of wrap around arithmetic semantics using a + single ui32 counter & atomic (increment) operations. + + The rationale for the solution here is to rebase the + incoming values to the sequence midpoint and perform + comparisons there; this allows us to handle overflow + or underflow wrap-round using only a single integer. + + NOTE: Here we assume that the absolute value of the + difference between the two incoming values in _not_ + greater than CACHEOP_SEQ_MIDPOINT. This assumption + holds as it implies that it is very _unlikely_ that 2 + billion CacheOp requests could have been made between + a single client's CacheOp request & the corresponding + fence check. This code sequence is hopefully a _more_ + hand optimised (branchless) version of this: + + x = ui32CompletedOpSeqNum + y = ui32FenceOpSeqNum + + if (|x - y| < CACHEOP_SEQ_MIDPOINT) + return (x - y) >= 0 ? true : false + else + return (y - x) >= 0 ? true : false + */ + ui32Rebase = CACHEOP_SEQ_MIDPOINT - ui32CompletedSeqNum; + + /* ui32Rebase could be either positive/negative, in + any case we still perform operation using unsigned + semantics as 2's complement notation always means + we end up with the correct result */ + ui32RebasedCompletedNum = ui32Rebase + ui32CompletedSeqNum; + ui32RebasedFenceNum = ui32Rebase + ui32FenceSeqNum; + + return (ui32RebasedCompletedNum >= ui32RebasedFenceNum); +} + +static INLINE PVRSRV_ERROR CacheOpTimelineBind(PVRSRV_DEVICE_NODE *psDevNode, + CACHEOP_WORK_ITEM *psCacheOpWorkItem, + PVRSRV_TIMELINE iTimeline) +{ + PVRSRV_ERROR eError; + + /* Always default the incoming CacheOp work-item to safe values */ + SyncClearTimelineObj(&psCacheOpWorkItem->sSWTimelineObj); + psCacheOpWorkItem->iTimeline = PVRSRV_NO_TIMELINE; + psCacheOpWorkItem->psDevNode = psDevNode; + if (iTimeline == PVRSRV_NO_TIMELINE) + { + return PVRSRV_OK; + } + + psCacheOpWorkItem->iTimeline = iTimeline; + eError = SyncSWGetTimelineObj(iTimeline, &psCacheOpWorkItem->sSWTimelineObj); + PVR_LOG_IF_ERROR(eError, "SyncSWGetTimelineObj"); + + return eError; +} + +static INLINE PVRSRV_ERROR CacheOpTimelineExec(CACHEOP_WORK_ITEM *psCacheOpWorkItem) +{ + PVRSRV_ERROR eError; + + if (psCacheOpWorkItem->iTimeline == PVRSRV_NO_TIMELINE) + { + return PVRSRV_OK; + } + CACHEOP_PVR_ASSERT(psCacheOpWorkItem->sSWTimelineObj.pvTlObj); + + eError = SyncSWTimelineAdvanceKM(psCacheOpWorkItem->psDevNode, + &psCacheOpWorkItem->sSWTimelineObj); + (void) SyncSWTimelineReleaseKM(&psCacheOpWorkItem->sSWTimelineObj); + + return eError; +} + +static INLINE void CacheOpExecRangeBased(PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_CACHE_OP uiCacheOp, + IMG_BYTE *pbCpuVirtAddr, + IMG_CPU_PHYADDR sCpuPhyAddr, + IMG_DEVMEM_OFFSET_T uiPgAlignedOffset, + IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset, + IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset) +{ + IMG_BYTE *pbCpuVirtAddrEnd; + IMG_BYTE *pbCpuVirtAddrStart; + IMG_CPU_PHYADDR sCpuPhyAddrEnd; + IMG_CPU_PHYADDR sCpuPhyAddrStart; + IMG_DEVMEM_SIZE_T uiRelFlushSize; + IMG_DEVMEM_OFFSET_T uiRelFlushOffset; + IMG_DEVMEM_SIZE_T uiNextPgAlignedOffset; + + /* These quantities allows us to perform cache operations + at cache-line granularity thereby ensuring we do not + perform more than is necessary */ + CACHEOP_PVR_ASSERT(uiPgAlignedOffset < uiCLAlignedEndOffset); + uiRelFlushSize = (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize; + uiRelFlushOffset = 0; + + if (uiCLAlignedStartOffset > uiPgAlignedOffset) + { + /* Zero unless initially starting at an in-page offset */ + uiRelFlushOffset = uiCLAlignedStartOffset - uiPgAlignedOffset; + uiRelFlushSize -= uiRelFlushOffset; + } + + /* uiRelFlushSize is gsCwq.uiPageSize unless current outstanding CacheOp + size is smaller. The 1st case handles in-page CacheOp range and + the 2nd case handles multiple-page CacheOp range with a last + CacheOp size that is less than gsCwq.uiPageSize */ + uiNextPgAlignedOffset = uiPgAlignedOffset + (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize; + if (uiNextPgAlignedOffset < uiPgAlignedOffset) + { + /* uiNextPgAlignedOffset is greater than uiCLAlignedEndOffset + by implication of this wrap-round; this only happens when + uiPgAlignedOffset is the last page aligned offset */ + uiRelFlushSize = uiRelFlushOffset ? + uiCLAlignedEndOffset - uiCLAlignedStartOffset : + uiCLAlignedEndOffset - uiPgAlignedOffset; + } + else + { + if (uiNextPgAlignedOffset > uiCLAlignedEndOffset) + { + uiRelFlushSize = uiRelFlushOffset ? + uiCLAlignedEndOffset - uiCLAlignedStartOffset : + uiCLAlignedEndOffset - uiPgAlignedOffset; + } + } + + /* More efficient to request cache maintenance operation for full + relative range as opposed to multiple cache-aligned ranges */ + sCpuPhyAddrStart.uiAddr = sCpuPhyAddr.uiAddr + uiRelFlushOffset; + sCpuPhyAddrEnd.uiAddr = sCpuPhyAddrStart.uiAddr + uiRelFlushSize; + if (pbCpuVirtAddr) + { + pbCpuVirtAddrStart = pbCpuVirtAddr + uiRelFlushOffset; + pbCpuVirtAddrEnd = pbCpuVirtAddrStart + uiRelFlushSize; + } + else + { + /* Some OS/Env layer support functions expect NULL(s) */ + pbCpuVirtAddrStart = NULL; + pbCpuVirtAddrEnd = NULL; + } + + /* Perform requested CacheOp on the CPU data cache for successive cache + line worth of bytes up to page or in-page cache-line boundary */ + switch (uiCacheOp) + { + case PVRSRV_CACHE_OP_CLEAN: + OSCPUCacheCleanRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd, + sCpuPhyAddrStart, sCpuPhyAddrEnd); + break; + case PVRSRV_CACHE_OP_INVALIDATE: + OSCPUCacheInvalidateRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd, + sCpuPhyAddrStart, sCpuPhyAddrEnd); + break; + case PVRSRV_CACHE_OP_FLUSH: + OSCPUCacheFlushRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd, + sCpuPhyAddrStart, sCpuPhyAddrEnd); + break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d", + __func__, uiCacheOp)); + break; + } + +#if defined(CACHEOP_DEBUG) + /* Tracks the number of kernel-mode cacheline maintenance instructions */ + gsCwq.ui32ServerRBF += (uiRelFlushSize & ((IMG_DEVMEM_SIZE_T)~(gsCwq.uiLineSize - 1))) >> gsCwq.uiLineShift; +#endif +} + +static INLINE void CacheOpExecRangeBasedVA(PVRSRV_DEVICE_NODE *psDevNode, + IMG_CPU_VIRTADDR pvAddress, + IMG_DEVMEM_SIZE_T uiSize, + PVRSRV_CACHE_OP uiCacheOp) +{ + IMG_CPU_PHYADDR sCpuPhyAddrUnused = + { IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL) }; + IMG_BYTE *pbEnd = (IMG_BYTE*)((uintptr_t)pvAddress + (uintptr_t)uiSize); + IMG_BYTE *pbStart = (IMG_BYTE*)((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiLineSize-1)); + + /* + If the start/end address isn't aligned to cache line size, round it up to the + nearest multiple; this ensures that we flush all the cache lines affected by + unaligned start/end addresses. + */ + pbEnd = (IMG_BYTE *) PVR_ALIGN((uintptr_t)pbEnd, (uintptr_t)gsCwq.uiLineSize); + switch (uiCacheOp) + { + case PVRSRV_CACHE_OP_CLEAN: + OSCPUCacheCleanRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused); + break; + case PVRSRV_CACHE_OP_INVALIDATE: + OSCPUCacheInvalidateRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused); + break; + case PVRSRV_CACHE_OP_FLUSH: + OSCPUCacheFlushRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused); + break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d", + __func__, uiCacheOp)); + break; + } + +#if defined(CACHEOP_DEBUG) + /* Tracks the number of kernel-mode cacheline maintenance instructions */ + gsCwq.ui32ServerRBF += (uiSize & ((IMG_DEVMEM_SIZE_T)~(gsCwq.uiLineSize - 1))) >> gsCwq.uiLineShift; +#endif +} + +static INLINE PVRSRV_ERROR CacheOpValidateUMVA(PMR *psPMR, + IMG_CPU_VIRTADDR pvAddress, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PVRSRV_CACHE_OP uiCacheOp, + void **ppvOutAddress) +{ + PVRSRV_ERROR eError = PVRSRV_OK; +#if defined(__linux__) && !defined(CACHEFLUSH_NO_KMRBF_USING_UMVA) + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; +#endif + void __user *pvAddr; + + IMG_BOOL bReadOnlyInvalidate = + (uiCacheOp == PVRSRV_CACHE_OP_INVALIDATE) && + !PVRSRV_CHECK_CPU_WRITEABLE(PMR_Flags(psPMR)); + + if (!pvAddress || bReadOnlyInvalidate) + { + /* As pvAddress is optional, NULL is expected from UM/KM requests */ + /* Also don't allow invalidates for UMVA of read-only memory */ + pvAddr = NULL; + goto e0; + } + + + +#if !defined(__linux__) || defined(CACHEFLUSH_NO_KMRBF_USING_UMVA) + pvAddr = NULL; +#else + /* Validate VA, assume most basic address limit access_ok() check */ + pvAddr = (void __user *)(uintptr_t)((uintptr_t)pvAddress + uiOffset); + if (!access_ok(pvAddr, uiSize)) + { + pvAddr = NULL; + if (! mm) + { + /* Bad KM request, don't silently ignore */ + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_CPU_ADDR, e0); + } + } + else if (mm) + { + mmap_read_lock(mm); + vma = find_vma(mm, (unsigned long)(uintptr_t)pvAddr); + + if (!vma || + vma->vm_start > (unsigned long)(uintptr_t)pvAddr || + vma->vm_end < (unsigned long)(uintptr_t)pvAddr + uiSize || + vma->vm_private_data != psPMR) + { + /* + * Request range is not fully mapped or is not matching the PMR + * Ignore request's VA. + */ + pvAddr = NULL; + } + mmap_read_unlock(mm); + } +#endif + +e0: + *ppvOutAddress = (IMG_CPU_VIRTADDR __force) pvAddr; + return eError; +} + +static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, + IMG_CPU_VIRTADDR pvAddress, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PVRSRV_CACHE_OP uiCacheOp, + IMG_BOOL bIsRequestValidated) + +{ + IMG_HANDLE hPrivOut = NULL; + IMG_BOOL bPMRIsSparse; + IMG_UINT32 ui32PageIndex; + IMG_UINT32 ui32NumOfPages; + size_t uiOutSize; /* Effectively unused */ + PVRSRV_DEVICE_NODE *psDevNode; + IMG_DEVMEM_SIZE_T uiPgAlignedSize; + IMG_DEVMEM_OFFSET_T uiPgAlignedOffset; + IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset; + IMG_DEVMEM_OFFSET_T uiPgAlignedEndOffset; + IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset; + IMG_DEVMEM_OFFSET_T uiPgAlignedStartOffset; + IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_CPU_PHYADDR asCpuPhyAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_CPU_PHYADDR *psCpuPhyAddr = asCpuPhyAddr; + IMG_BOOL bIsPMRInfoValid = IMG_FALSE; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BYTE *pbCpuVirtAddr = NULL; + IMG_BOOL *pbValid = abValid; + + if (uiCacheOp == PVRSRV_CACHE_OP_NONE || uiCacheOp == PVRSRV_CACHE_OP_TIMELINE) + { + return PVRSRV_OK; + } + + if (! bIsRequestValidated) + { + IMG_DEVMEM_SIZE_T uiLPhysicalSize; + + /* Need to validate parameters before proceeding */ + eError = PMR_PhysicalSize(psPMR, &uiLPhysicalSize); + PVR_LOG_RETURN_IF_ERROR(eError, "uiLPhysicalSize"); + + PVR_LOG_RETURN_IF_FALSE(((uiOffset+uiSize) <= uiLPhysicalSize), CACHEOP_DEVMEM_OOR_ERROR_STRING, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE); + } + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_LOG_RETURN_IF_ERROR(eError, "PMRLockSysPhysAddresses"); + + /* Fast track the request if a CPU VA is provided and CPU ISA supports VA only maintenance */ + eError = CacheOpValidateUMVA(psPMR, pvAddress, uiOffset, uiSize, uiCacheOp, (void**)&pbCpuVirtAddr); + if (eError == PVRSRV_OK) + { + pvAddress = pbCpuVirtAddr; + + if (pvAddress && gsCwq.uiCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + { + CacheOpExecRangeBasedVA(PMR_DeviceNode(psPMR), pvAddress, uiSize, uiCacheOp); + + eError = PMRUnlockSysPhysAddresses(psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); +#if defined(CACHEOP_DEBUG) + gsCwq.ui32ServerSyncVA += 1; +#endif + return PVRSRV_OK; + } + else if (pvAddress) + { + /* Round down the incoming VA (if any) down to the nearest page aligned VA */ + pvAddress = (void*)((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiPageSize-1)); +#if defined(CACHEOP_DEBUG) + gsCwq.ui32ServerSyncVA += 1; +#endif + } + } + else + { + /* + * This validation pathway has been added to accommodate any/all requests that might + * cause the kernel to Oops; essentially, KM requests should prevalidate cache maint. + * parameters but if this fails then we would rather fail gracefully than cause the + * kernel to Oops so instead we log the fact that an invalid KM virtual address was + * supplied and what action was taken to mitigate against kernel Oops(ing) if any. + */ + CACHEOP_PVR_ASSERT(pbCpuVirtAddr == NULL); + + if (gsCwq.uiCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_PHYSICAL) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Invalid vaddress 0x%p in CPU d-cache maint. op, using paddress", + __func__, + pvAddress)); + + /* We can still proceed as kernel/cpu uses CPU PA for d-cache maintenance */ + pvAddress = NULL; + } + else + { + /* + * The approach here is to attempt a reacquisition of the PMR kernel VA and see if + * said VA corresponds to the parameter VA, if so fail requested cache maint. op. + * cause this indicates some kind of internal, memory and/or meta-data corruption + * else we reissue the request using this (re)acquired alias PMR kernel VA. + */ + if (PMR_IsSparse(psPMR)) + { + eError = PMRAcquireSparseKernelMappingData(psPMR, + 0, + gsCwq.uiPageSize, + (void **)&pbCpuVirtAddr, + &uiOutSize, + &hPrivOut); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", e0); + } + else + { + eError = PMRAcquireKernelMappingData(psPMR, + 0, + gsCwq.uiPageSize, + (void **)&pbCpuVirtAddr, + &uiOutSize, + &hPrivOut); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", e0); + } + + /* Here, we only compare these CPU virtual addresses at granularity of the OS page size */ + if ((uintptr_t)pbCpuVirtAddr == ((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiPageSize-1))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid vaddress 0x%p in CPU d-cache maint. op, no alt. so failing request", + __func__, + pvAddress)); + + eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); + PVR_LOG_GOTO_WITH_ERROR("PMRReleaseKernelMappingData", eError, PVRSRV_ERROR_INVALID_CPU_ADDR, e0); + } + else if (gsCwq.uiCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Bad vaddress 0x%p in CPU d-cache maint. op, using reacquired vaddress 0x%p", + __func__, + pvAddress, + pbCpuVirtAddr)); + + /* Note that this might still fail if there is kernel memory/meta-data corruption; + there is not much we can do here but at the least we will be informed of this + before the kernel Oops(ing) */ + CacheOpExecRangeBasedVA(PMR_DeviceNode(psPMR), pbCpuVirtAddr, uiSize, uiCacheOp); + + eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); + PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); + + eError = PVRSRV_OK; + goto e0; + } + else + { + /* At this junction, we have exhausted every possible work-around possible but we do + know that VA reacquisition returned another/alias page-aligned VA; so with this + future expectation of PMRAcquireKernelMappingData(), we proceed */ + PVR_DPF((PVR_DBG_WARNING, + "%s: Bad vaddress %p in CPU d-cache maint. op, will use reacquired vaddress", + __func__, + pvAddress)); + + eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); + PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); + + /* NULL this to force per-page reacquisition down-stream */ + pvAddress = NULL; + } + } + } + + /* NULL clobbered var., OK to proceed */ + pbCpuVirtAddr = NULL; + eError = PVRSRV_OK; + + /* Need this for kernel mapping */ + bPMRIsSparse = PMR_IsSparse(psPMR); + psDevNode = PMR_DeviceNode(psPMR); + + /* Round the incoming offset down to the nearest cache-line / page aligned-address */ + uiCLAlignedEndOffset = uiOffset + uiSize; + uiCLAlignedEndOffset = PVR_ALIGN(uiCLAlignedEndOffset, (IMG_DEVMEM_SIZE_T)gsCwq.uiLineSize); + uiCLAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)gsCwq.uiLineSize-1)); + + uiPgAlignedEndOffset = uiCLAlignedEndOffset; + uiPgAlignedEndOffset = PVR_ALIGN(uiPgAlignedEndOffset, (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize); + uiPgAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)gsCwq.uiPageSize-1)); + uiPgAlignedSize = uiPgAlignedEndOffset - uiPgAlignedStartOffset; + +#if defined(CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING) + /* For internal debug if cache-line optimised + flushing is suspected of causing data corruption */ + uiCLAlignedStartOffset = uiPgAlignedStartOffset; + uiCLAlignedEndOffset = uiPgAlignedEndOffset; +#endif + + /* Type of allocation backing the PMR data */ + ui32NumOfPages = uiPgAlignedSize >> gsCwq.uiPageShift; + if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + /* The pbValid array is allocated first as it is needed in + both physical/virtual cache maintenance methods */ + pbValid = OSAllocZMem(ui32NumOfPages * sizeof(IMG_BOOL)); + if (! pbValid) + { + pbValid = abValid; + } + else if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + { + psCpuPhyAddr = OSAllocZMem(ui32NumOfPages * sizeof(IMG_CPU_PHYADDR)); + if (! psCpuPhyAddr) + { + psCpuPhyAddr = asCpuPhyAddr; + OSFreeMem(pbValid); + pbValid = abValid; + } + } + } + + /* We always retrieve PMR data in bulk, up-front if number of pages is within + PMR_MAX_TRANSLATION_STACK_ALLOC limits else we check to ensure that a + dynamic buffer has been allocated to satisfy requests outside limits */ + if (ui32NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC || pbValid != abValid) + { + if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + { + /* Look-up PMR CpuPhyAddr once, if possible */ + eError = PMR_CpuPhysAddr(psPMR, + gsCwq.uiPageShift, + ui32NumOfPages, + uiPgAlignedStartOffset, + psCpuPhyAddr, + pbValid); + if (eError == PVRSRV_OK) + { + bIsPMRInfoValid = IMG_TRUE; + } + } + else + { + /* Look-up PMR per-page validity once, if possible */ + eError = PMR_IsOffsetValid(psPMR, + gsCwq.uiPageShift, + ui32NumOfPages, + uiPgAlignedStartOffset, + pbValid); + bIsPMRInfoValid = (eError == PVRSRV_OK) ? IMG_TRUE : IMG_FALSE; + } + } + + /* For each (possibly non-contiguous) PMR page(s), carry out the requested cache maint. op. */ + for (uiPgAlignedOffset = uiPgAlignedStartOffset, ui32PageIndex = 0; + uiPgAlignedOffset < uiPgAlignedEndOffset; + uiPgAlignedOffset += (IMG_DEVMEM_OFFSET_T) gsCwq.uiPageSize, ui32PageIndex += 1) + { + + if (! bIsPMRInfoValid) + { + /* Never cross page boundary without looking up corresponding PMR page physical + address and/or page validity if these were not looked-up, in bulk, up-front */ + ui32PageIndex = 0; + if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + { + eError = PMR_CpuPhysAddr(psPMR, + gsCwq.uiPageShift, + 1, + uiPgAlignedOffset, + psCpuPhyAddr, + pbValid); + PVR_LOG_GOTO_IF_ERROR(eError, "PMR_CpuPhysAddr", e0); + } + else + { + eError = PMR_IsOffsetValid(psPMR, + gsCwq.uiPageShift, + 1, + uiPgAlignedOffset, + pbValid); + PVR_LOG_GOTO_IF_ERROR(eError, "PMR_IsOffsetValid", e0); + } + } + + /* Skip invalid PMR pages (i.e. sparse) */ + if (pbValid[ui32PageIndex] == IMG_FALSE) + { + CACHEOP_PVR_ASSERT(bPMRIsSparse); + continue; + } + + if (pvAddress) + { + /* The caller has supplied either a KM/UM CpuVA, so use it unconditionally */ + pbCpuVirtAddr = + (void *)(uintptr_t)((uintptr_t)pvAddress + (uintptr_t)(uiPgAlignedOffset-uiPgAlignedStartOffset)); + } + /* Skip CpuVA acquire if CacheOp can be maintained entirely using CpuPA */ + else if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_PHYSICAL) + { + if (bPMRIsSparse) + { + eError = + PMRAcquireSparseKernelMappingData(psPMR, + uiPgAlignedOffset, + gsCwq.uiPageSize, + (void **)&pbCpuVirtAddr, + &uiOutSize, + &hPrivOut); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", e0); + } + else + { + eError = + PMRAcquireKernelMappingData(psPMR, + uiPgAlignedOffset, + gsCwq.uiPageSize, + (void **)&pbCpuVirtAddr, + &uiOutSize, + &hPrivOut); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", e0); + } + } + + /* Issue actual cache maintenance for PMR */ + CacheOpExecRangeBased(psDevNode, + uiCacheOp, + pbCpuVirtAddr, + (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) ? + psCpuPhyAddr[ui32PageIndex] : psCpuPhyAddr[0], + uiPgAlignedOffset, + uiCLAlignedStartOffset, + uiCLAlignedEndOffset); + + if (! pvAddress) + { + /* The caller has not supplied either a KM/UM CpuVA, release mapping */ + if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_PHYSICAL) + { + eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); + PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); + } + } + } + +e0: + if (psCpuPhyAddr != asCpuPhyAddr) + { + OSFreeMem(psCpuPhyAddr); + } + + if (pbValid != abValid) + { + OSFreeMem(pbValid); + } + + eError = PMRUnlockSysPhysAddresses(psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); + + return eError; +} + +static PVRSRV_ERROR CacheOpQListExecRangeBased(void) +{ + IMG_UINT32 ui32NumOfEntries; + PVRSRV_ERROR eError = PVRSRV_OK; + CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL; + + /* Take a snapshot of the current count of deferred entries at this junction */ + ui32NumOfEntries = CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter); + if (! ui32NumOfEntries) + { + return PVRSRV_OK; + } +#if defined(CACHEOP_DEBUG) + CACHEOP_PVR_ASSERT(ui32NumOfEntries < CACHEOP_INDICES_MAX); +#endif + + while (ui32NumOfEntries) + { + if (! OSAtomicRead(&gsCwq.hReadCounter)) + { + /* Normally, the read-counter will trail the write counter until the write + counter wraps-round to zero. Under this condition we (re)calculate as the + read-counter too is wrapping around at this point */ + ui32NumOfEntries = CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter); + } +#if defined(CACHEOP_DEBUG) + /* Something's gone horribly wrong if these 2 counters are identical at this point */ + CACHEOP_PVR_ASSERT(OSAtomicRead(&gsCwq.hWriteCounter) != OSAtomicRead(&gsCwq.hReadCounter)); +#endif + + /* Select the next pending deferred work-item for RBF cache maintenance */ + psCacheOpWorkItem = &gsCwq.asWorkItems[CacheOpIdxNext(&gsCwq.hReadCounter)]; + CacheOpQItemReadCheck(psCacheOpWorkItem); +#if defined(CACHEOP_DEBUG) + /* The time waiting in the queue to be serviced */ + psCacheOpWorkItem->ui64DequeuedTime = OSClockns64(); +#endif + + eError = CacheOpPMRExec(psCacheOpWorkItem->psPMR, + NULL, /* No UM virtual address */ + psCacheOpWorkItem->uiOffset, + psCacheOpWorkItem->uiSize, + psCacheOpWorkItem->uiCacheOp, + IMG_TRUE /* PMR is pre-validated */ + ); + if (eError != PVRSRV_OK) + { +#if defined(CACHEOP_DEBUG) +#define PID_FMTSPEC " PID:%u" +#define CACHE_OP_WORK_PID psCacheOpWorkItem->pid +#else +#define PID_FMTSPEC "%s" +#define CACHE_OP_WORK_PID "" +#endif + + PVR_LOG(("Deferred CacheOpPMRExec failed:" + PID_FMTSPEC + " PMR:%p" + " Offset:%" IMG_UINT64_FMTSPECX + " Size:%" IMG_UINT64_FMTSPECX + " CacheOp:%d," + " error: %d", + CACHE_OP_WORK_PID, + psCacheOpWorkItem->psPMR, + psCacheOpWorkItem->uiOffset, + psCacheOpWorkItem->uiSize, + psCacheOpWorkItem->uiCacheOp, + eError)); + +#undef PID_FMTSPEC +#undef CACHE_OP_WORK_PID + } + +#if defined(CACHEOP_DEBUG) + psCacheOpWorkItem->ui64ExecuteTime = OSClockns64(); + CacheOpStatsExecLogWrite(psCacheOpWorkItem); +#endif + + /* The currently executed CacheOp item updates gsCwq.hCompletedSeqNum. + NOTE: This CacheOp item might be a discard item, if so its seqNum + still updates the gsCwq.hCompletedSeqNum */ + OSAtomicWrite(&gsCwq.hCompletedSeqNum, psCacheOpWorkItem->ui32OpSeqNum); + + /* If CacheOp is timeline(d), notify timeline waiters */ + eError = CacheOpTimelineExec(psCacheOpWorkItem); + PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec"); + + /* Indicate that this CCB work-item slot is now free for (re)use */ + CacheOpQItemRecycle(psCacheOpWorkItem); + (void) CacheOpIdxIncrement(&gsCwq.hReadCounter); + ui32NumOfEntries = ui32NumOfEntries - 1; + } + + return eError; +} + +static INLINE PVRSRV_ERROR CacheOpQListExec(void) +{ + PVRSRV_ERROR eError; + + eError = CacheOpQListExecRangeBased(); + PVR_LOG_IF_ERROR(eError, "CacheOpQListExecRangeBased"); + + /* Signal any waiting threads blocked on CacheOp fence checks update + completed sequence number to last queue work item */ + eError = OSEventObjectSignal(gsCwq.hClientWakeUpEvtObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + + return eError; +} + +static void CacheOpThread(void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = pvData; + IMG_HANDLE hOSEvent; + PVRSRV_ERROR eError; + + /* Open CacheOp thread event object, abort driver if event object open fails */ + eError = OSEventObjectOpen(gsCwq.hThreadWakeUpEvtObj, &hOSEvent); + PVR_LOG_IF_ERROR(eError, "OSEventObjectOpen"); + + /* While driver is in good state & loaded, perform pending cache maintenance */ + while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && gsCwq.bInit) + { + /* Sleep-wait here until when signalled for new queued CacheOp work items; + when woken-up, drain deferred queue completely before next event-wait */ + (void) OSEventObjectWaitKernel(hOSEvent, CACHEOP_THREAD_WAIT_TIMEOUT); + while (CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter)) + { + eError = CacheOpQListExec(); + PVR_LOG_IF_ERROR(eError, "CacheOpQListExec"); + } + } + + eError = CacheOpQListExec(); + PVR_LOG_IF_ERROR(eError, "CacheOpQListExec"); + + eError = OSEventObjectClose(hOSEvent); + PVR_LOG_IF_ERROR(eError, "OSEventObjectClose"); +} + +static PVRSRV_ERROR CacheOpBatchExecTimeline(PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_TIMELINE iTimeline, + IMG_UINT32 ui32CurrentFenceSeqNum, + IMG_UINT32 *pui32NextFenceSeqNum) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32NextIdx; + CACHEOP_WORK_ITEM sCacheOpWorkItem = {NULL}; + CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL; + + eError = CacheOpTimelineBind(psDevNode, &sCacheOpWorkItem, iTimeline); + PVR_LOG_RETURN_IF_ERROR(eError, "CacheOpTimelineBind"); + + OSLockAcquire(gsCwq.hDeferredLock); + + /* + Check if there is any deferred queueing space available and that nothing is + currently queued. This second check is required as Android where timelines + are used sets a timeline signalling deadline of 1000ms to signal timelines + else complains. So seeing we cannot be sure how long the CacheOp presently + in the queue would take we should not send this timeline down the queue as + well. + */ + ui32NextIdx = CacheOpIdxNext(&gsCwq.hWriteCounter); + if (!CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter) && + CacheOpIdxRead(&gsCwq.hReadCounter) != ui32NextIdx) + { + psCacheOpWorkItem = &gsCwq.asWorkItems[ui32NextIdx]; + CacheOpQItemWriteCheck(psCacheOpWorkItem); + + psCacheOpWorkItem->sSWTimelineObj = sCacheOpWorkItem.sSWTimelineObj; + psCacheOpWorkItem->iTimeline = sCacheOpWorkItem.iTimeline; + psCacheOpWorkItem->psDevNode = sCacheOpWorkItem.psDevNode; + psCacheOpWorkItem->ui32OpSeqNum = CacheOpGetNextCommonSeqNum(); + psCacheOpWorkItem->uiCacheOp = PVRSRV_CACHE_OP_TIMELINE; + psCacheOpWorkItem->uiOffset = (IMG_DEVMEM_OFFSET_T)0; + psCacheOpWorkItem->uiSize = (IMG_DEVMEM_SIZE_T)0; + /* Defer timeline using information page PMR */ + psCacheOpWorkItem->psPMR = gsCwq.psInfoPagePMR; + eError = PMRLockSysPhysAddresses(psCacheOpWorkItem->psPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddresses", e0); +#if defined(CACHEOP_DEBUG) + psCacheOpWorkItem->pid = OSGetCurrentClientProcessIDKM(); + psCacheOpWorkItem->ui64EnqueuedTime = OSClockns64(); + gsCwq.ui32ServerASync += 1; + gsCwq.ui32ServerDTL += 1; +#endif + + /* Mark index ready for cache maintenance */ + (void) CacheOpIdxIncrement(&gsCwq.hWriteCounter); + + OSLockRelease(gsCwq.hDeferredLock); + + eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + else + { + + OSLockRelease(gsCwq.hDeferredLock); + + /* signal timeline. + * All ops with timelines and partial batches were executed synchronously. */ + eError = CacheOpTimelineExec(&sCacheOpWorkItem); + PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec"); + } + + return eError; +e0: + if (psCacheOpWorkItem) + { + /* Need to ensure we leave this CacheOp QItem in the proper recycled state */ + CacheOpQItemRecycle(psCacheOpWorkItem); + OSLockRelease(gsCwq.hDeferredLock); + } + + return eError; +} + +static PVRSRV_ERROR CacheOpBatchExecRangeBased(PVRSRV_DEVICE_NODE *psDevNode, + PMR **ppsPMR, + IMG_CPU_VIRTADDR *pvAddress, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_DEVMEM_SIZE_T *puiSize, + PVRSRV_CACHE_OP *puiCacheOp, + IMG_UINT32 ui32NumCacheOps, + PVRSRV_TIMELINE uiTimeline, + IMG_UINT32 uiCurrentFenceSeqNum, + IMG_UINT32 *pui32NextFenceSeqNum) +{ + IMG_UINT32 ui32Idx; + IMG_UINT32 ui32NextIdx; + IMG_BOOL bBatchHasTimeline; + IMG_BOOL bCacheOpConfigKDF; + IMG_DEVMEM_SIZE_T uiLogicalSize; + IMG_BOOL bBatchForceSynchronous = IMG_FALSE; + PVRSRV_ERROR eError = PVRSRV_OK; + CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL; +#if defined(CACHEOP_DEBUG) + CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; + IMG_UINT32 ui32OpSeqNum = CacheOpGetNextCommonSeqNum(); + sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); +#endif + + /* Check if batch has an associated timeline update */ + bBatchHasTimeline = puiCacheOp[ui32NumCacheOps-1] & PVRSRV_CACHE_OP_TIMELINE; + puiCacheOp[ui32NumCacheOps-1] &= ~(PVRSRV_CACHE_OP_TIMELINE); + + /* Check if batch is forcing synchronous execution */ + bBatchForceSynchronous = puiCacheOp[ui32NumCacheOps-1] & PVRSRV_CACHE_OP_FORCE_SYNCHRONOUS; + puiCacheOp[ui32NumCacheOps-1] &= ~(PVRSRV_CACHE_OP_FORCE_SYNCHRONOUS); + + /* Check if config. supports kernel deferring of cacheops */ + bCacheOpConfigKDF = CacheOpConfigSupports(CACHEOP_CONFIG_KDF); + + /* + Client expects the next fence seqNum to be zero unless the server has deferred + at least one CacheOp in the submitted queue in which case the server informs + the client of the last CacheOp seqNum deferred in this batch. + */ + for (*pui32NextFenceSeqNum = 0, ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++) + { + /* Fail UM request, don't silently ignore */ + PVR_GOTO_IF_INVALID_PARAM(puiSize[ui32Idx], eError, e0); + + if (bCacheOpConfigKDF) + { + /* Check if there is deferred queueing space available */ + ui32NextIdx = CacheOpIdxNext(&gsCwq.hWriteCounter); + if (ui32NextIdx != CacheOpIdxRead(&gsCwq.hReadCounter)) + { + psCacheOpWorkItem = &gsCwq.asWorkItems[ui32NextIdx]; + } + } + + /* + Normally, we would like to defer client CacheOp(s) but we may not always be in a + position or is necessary to do so based on the following reasons: + 0 - There is currently no queueing space left to enqueue this CacheOp, this might + imply the system is queueing more requests than can be consumed by the CacheOp + thread in time. + 1 - Batch has timeline, action this now due to Android timeline signaling deadlines. + 2 - Batch is forced synchronous. Necessary on Android for batches scheduled in the + middle of add operation. Those cannot have timelines that client plans to add + during actual batch execution and thus make synchronization on Android tricky. + 3 - Configuration does not support deferring of cache maintenance operations so we + execute the batch synchronously/immediately. + 4 - CacheOp has an INVALIDATE, as this is used to transfer device memory buffer + ownership back to the processor, we cannot defer it so action it immediately. + 5 - CacheOp size too small (single OS page size) to warrant overhead of deferment, + 6 - CacheOp size OK for deferment, but a client virtual address is supplied so we + might has well just take advantage of said VA & flush immediately in UM context. + 7 - Prevent DoS attack if a malicious client queues something very large, say 1GiB. + Here we upper bound this threshold to PVR_DIRTY_BYTES_FLUSH_THRESHOLD. + */ + if (!psCacheOpWorkItem || + bBatchHasTimeline || + bBatchForceSynchronous || + !bCacheOpConfigKDF || + puiCacheOp[ui32Idx] & PVRSRV_CACHE_OP_INVALIDATE || + (puiSize[ui32Idx] <= (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize) || + (pvAddress[ui32Idx] && puiSize[ui32Idx] < (IMG_DEVMEM_SIZE_T)gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD]) || + (puiSize[ui32Idx] >= (IMG_DEVMEM_SIZE_T)(gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] << 2))) + { + /* When the CacheOp thread not keeping up, trash d-cache */ +#if defined(CACHEOP_DEBUG) + sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64(); + gsCwq.ui32ServerSync += 1; +#endif + psCacheOpWorkItem = NULL; + + eError = CacheOpPMRExec(ppsPMR[ui32Idx], + pvAddress[ui32Idx], + puiOffset[ui32Idx], + puiSize[ui32Idx], + puiCacheOp[ui32Idx], + IMG_FALSE); + PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpExecPMR", e0); + +#if defined(CACHEOP_DEBUG) + sCacheOpWorkItem.ui64ExecuteTime = OSClockns64(); + sCacheOpWorkItem.ui32OpSeqNum = ui32OpSeqNum; + sCacheOpWorkItem.psPMR = ppsPMR[ui32Idx]; + sCacheOpWorkItem.uiSize = puiSize[ui32Idx]; + sCacheOpWorkItem.uiOffset = puiOffset[ui32Idx]; + sCacheOpWorkItem.uiCacheOp = puiCacheOp[ui32Idx]; + CacheOpStatsExecLogWrite(&sCacheOpWorkItem); +#endif + + continue; + } + + /* Need to validate request parameters here before enqueing */ + eError = PMR_LogicalSize(ppsPMR[ui32Idx], &uiLogicalSize); + PVR_LOG_GOTO_IF_ERROR(eError, "PMR_LogicalSize", e0); + eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; + PVR_LOG_GOTO_IF_FALSE(((puiOffset[ui32Idx]+puiSize[ui32Idx]) <= uiLogicalSize), CACHEOP_DEVMEM_OOR_ERROR_STRING, e0); + eError = PVRSRV_OK; + + OSLockAcquire(gsCwq.hDeferredLock); + + /* Select next item off the queue to defer with */ + ui32NextIdx = CacheOpIdxNext(&gsCwq.hWriteCounter); + if (ui32NextIdx != CacheOpIdxRead(&gsCwq.hReadCounter)) + { + psCacheOpWorkItem = &gsCwq.asWorkItems[ui32NextIdx]; + CacheOpQItemWriteCheck(psCacheOpWorkItem); + } + else + { + /* Retry, disable KDF for this batch */ + OSLockRelease(gsCwq.hDeferredLock); + bCacheOpConfigKDF = IMG_FALSE; + psCacheOpWorkItem = NULL; + ui32Idx = ui32Idx - 1; + continue; + } + + /* Timeline need to be looked-up (i.e. bind) in the user context + before deferring into the CacheOp thread kernel context */ + eError = CacheOpTimelineBind(psDevNode, psCacheOpWorkItem, PVRSRV_NO_TIMELINE); + PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpTimelineBind", e1); + + /* Prepare & enqueue next deferred work item for CacheOp thread */ + psCacheOpWorkItem->ui32OpSeqNum = CacheOpGetNextCommonSeqNum(); + *pui32NextFenceSeqNum = psCacheOpWorkItem->ui32OpSeqNum; + psCacheOpWorkItem->uiCacheOp = puiCacheOp[ui32Idx]; + psCacheOpWorkItem->uiOffset = puiOffset[ui32Idx]; + psCacheOpWorkItem->uiSize = puiSize[ui32Idx]; + psCacheOpWorkItem->psPMR = ppsPMR[ui32Idx]; +#if defined(CACHEOP_DEBUG) + psCacheOpWorkItem->ui64EnqueuedTime = OSClockns64(); + psCacheOpWorkItem->pid = sCacheOpWorkItem.pid; + psCacheOpWorkItem->bDeferred = IMG_TRUE; + psCacheOpWorkItem->bKMReq = IMG_FALSE; + psCacheOpWorkItem->bUMF = IMG_FALSE; + gsCwq.ui32ServerASync += 1; +#endif + + /* Increment deferred size & mark index ready for cache maintenance */ + (void) CacheOpIdxIncrement(&gsCwq.hWriteCounter); + + OSLockRelease(gsCwq.hDeferredLock); + psCacheOpWorkItem = NULL; + } + + /* Signal the CacheOp thread to ensure these items get processed */ + eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + +e1: + if (psCacheOpWorkItem) + { + /* Need to ensure we leave this CacheOp QItem in the proper recycled state */ + CacheOpQItemRecycle(psCacheOpWorkItem); + OSLockRelease(gsCwq.hDeferredLock); + } +e0: + if (bBatchHasTimeline) + { + PVRSRV_ERROR eError2; + eError2 = CacheOpBatchExecTimeline(psDevNode, uiTimeline, + uiCurrentFenceSeqNum, pui32NextFenceSeqNum); + eError = (eError2 == PVRSRV_ERROR_RETRY) ? eError2 : eError; + } + + return eError; +} + + +PVRSRV_ERROR CacheOpExec (PPVRSRV_DEVICE_NODE psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd, + PVRSRV_CACHE_OP uiCacheOp) +{ +#if defined(CACHEOP_DEBUG) + IMG_UINT64 ui64EnqueueTime = OSClockns64(); +#endif + + switch (uiCacheOp) + { + case PVRSRV_CACHE_OP_CLEAN: + OSCPUCacheCleanRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd); + break; + case PVRSRV_CACHE_OP_INVALIDATE: + OSCPUCacheInvalidateRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd); + break; + case PVRSRV_CACHE_OP_FLUSH: + OSCPUCacheFlushRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd); + break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d", + __func__, uiCacheOp)); + break; + } + +#if defined(CACHEOP_DEBUG) + if (CacheOpConfigSupports(CACHEOP_CONFIG_KLOG)) + { + CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; + + gsCwq.ui32ServerSync += 1; + gsCwq.ui32ServerRBF += + ((sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr) & ((IMG_DEVMEM_SIZE_T)~(gsCwq.uiLineSize - 1))) >> gsCwq.uiLineShift; + + sCacheOpWorkItem.uiOffset = 0; + sCacheOpWorkItem.bKMReq = IMG_TRUE; + sCacheOpWorkItem.uiCacheOp = uiCacheOp; + /* Use information page PMR for logging KM request */ + sCacheOpWorkItem.psPMR = gsCwq.psInfoPagePMR; + sCacheOpWorkItem.ui64EnqueuedTime = ui64EnqueueTime; + sCacheOpWorkItem.ui64ExecuteTime = OSClockns64(); + sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); + sCacheOpWorkItem.ui32OpSeqNum = CacheOpGetNextCommonSeqNum(); + sCacheOpWorkItem.uiSize = (sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr); + + CacheOpStatsExecLogWrite(&sCacheOpWorkItem); + } +#endif + + return PVRSRV_OK; +} + +PVRSRV_ERROR CacheOpValExec(PMR *psPMR, + IMG_UINT64 uiAddress, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PVRSRV_CACHE_OP uiCacheOp) +{ + PVRSRV_ERROR eError; + IMG_CPU_VIRTADDR pvAddress = (IMG_CPU_VIRTADDR)(uintptr_t)uiAddress; +#if defined(CACHEOP_DEBUG) + CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; + gsCwq.ui32ServerSync += 1; + sCacheOpWorkItem.psPMR = psPMR; + sCacheOpWorkItem.uiSize = uiSize; + sCacheOpWorkItem.uiOffset = uiOffset; + sCacheOpWorkItem.uiCacheOp = uiCacheOp; + sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); + sCacheOpWorkItem.ui32OpSeqNum = CacheOpGetNextCommonSeqNum(); + sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64(); +#endif + + eError = CacheOpPMRExec(psPMR, + pvAddress, + uiOffset, + uiSize, + uiCacheOp, + IMG_FALSE); + PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpPMRExec", e0); + +#if defined(CACHEOP_DEBUG) + sCacheOpWorkItem.ui64ExecuteTime = OSClockns64(); + CacheOpStatsExecLogWrite(&sCacheOpWorkItem); +#endif + +e0: + return eError; +} + +PVRSRV_ERROR CacheOpQueue (CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32NumCacheOps, + PMR **ppsPMR, + IMG_UINT64 *puiAddress, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_DEVMEM_SIZE_T *puiSize, + PVRSRV_CACHE_OP *puiCacheOp, + IMG_UINT32 ui32OpTimeline, + IMG_UINT32 uiCurrentFenceSeqNum, + IMG_UINT32 *pui32NextFenceSeqNum) +{ + PVRSRV_ERROR eError; + PVRSRV_TIMELINE uiTimeline = (PVRSRV_TIMELINE)ui32OpTimeline; + IMG_CPU_VIRTADDR *pvAddress = (IMG_CPU_VIRTADDR*)(uintptr_t)puiAddress; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (!gsCwq.bInit) + { + PVR_LOG(("CacheOp framework not initialised, failing request")); + return PVRSRV_ERROR_NOT_INITIALISED; + } + else if (! ui32NumCacheOps) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + /* Ensure any single timeline CacheOp request is processed immediately */ + else if (ui32NumCacheOps == 1 && puiCacheOp[0] == PVRSRV_CACHE_OP_TIMELINE) + { + eError = CacheOpBatchExecTimeline(psDevNode, uiTimeline, uiCurrentFenceSeqNum, pui32NextFenceSeqNum); + } + /* This is the default entry for all client requests */ + else + { + if (!(gsCwq.eConfig & (CACHEOP_CONFIG_LAST-1))) + { + /* default the configuration before execution */ + CacheOpConfigUpdate(CACHEOP_CONFIG_DEFAULT); + } + + eError = + CacheOpBatchExecRangeBased(psDevNode, + ppsPMR, + pvAddress, + puiOffset, + puiSize, + puiCacheOp, + ui32NumCacheOps, + uiTimeline, + uiCurrentFenceSeqNum, + pui32NextFenceSeqNum); + } + + return eError; +} + +PVRSRV_ERROR CacheOpFence (RGXFWIF_DM eFenceOpType, IMG_UINT32 ui32FenceOpSeqNum) +{ + IMG_HANDLE hOSEvent; + PVRSRV_ERROR eError2; + IMG_UINT32 ui32RetryAbort; + IMG_UINT32 ui32CompletedOpSeqNum; + PVRSRV_ERROR eError = PVRSRV_OK; +#if defined(CACHEOP_DEBUG) + IMG_UINT64 uiTimeNow; + CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; + sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); + sCacheOpWorkItem.ui32OpSeqNum = ui32FenceOpSeqNum; + sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64(); + uiTimeNow = sCacheOpWorkItem.ui64EnqueuedTime; +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + sCacheOpWorkItem.eFenceOpType = eFenceOpType; +#endif + sCacheOpWorkItem.uiSize = (uintptr_t) OSAtomicRead(&gsCwq.hCompletedSeqNum); + sCacheOpWorkItem.uiOffset = 0; +#endif + PVR_UNREFERENCED_PARAMETER(eFenceOpType); + + /* If initial fence check fails, then wait-and-retry in loop */ + ui32CompletedOpSeqNum = OSAtomicRead(&gsCwq.hCompletedSeqNum); + if (CacheOpFenceCheck(ui32CompletedOpSeqNum, ui32FenceOpSeqNum)) + { +#if defined(CACHEOP_DEBUG) + sCacheOpWorkItem.uiSize = (uintptr_t) ui32CompletedOpSeqNum; +#endif + goto e0; + } + + /* Open CacheOp update event object, if event open fails return error */ + eError2 = OSEventObjectOpen(gsCwq.hClientWakeUpEvtObj, &hOSEvent); + PVR_LOG_GOTO_IF_ERROR(eError2, "OSEventObjectOpen", e0); + + /* Linear (i.e. use exponential?) back-off, upper bounds user wait */ + for (ui32RetryAbort = gsCwq.ui32FenceRetryAbort; ;--ui32RetryAbort) + { + /* (Re)read completed CacheOp sequence number before waiting */ + ui32CompletedOpSeqNum = OSAtomicRead(&gsCwq.hCompletedSeqNum); + if (CacheOpFenceCheck(ui32CompletedOpSeqNum, ui32FenceOpSeqNum)) + { +#if defined(CACHEOP_DEBUG) + sCacheOpWorkItem.uiSize = (uintptr_t) ui32CompletedOpSeqNum; +#endif + break; + } + + (void) OSEventObjectWaitTimeout(hOSEvent, gsCwq.ui32FenceWaitTimeUs); + + if (! ui32RetryAbort) + { +#if defined(CACHEOP_DEBUG) + sCacheOpWorkItem.uiSize = (uintptr_t) OSAtomicRead(&gsCwq.hCompletedSeqNum); + sCacheOpWorkItem.uiOffset = 0; + uiTimeNow = OSClockns64(); +#endif + PVR_LOG(("CacheOpFence() event: "CACHEOP_ABORT_FENCE_ERROR_STRING)); + eError = PVRSRV_ERROR_RETRY; + break; + } + else + { +#if defined(CACHEOP_DEBUG) + uiTimeNow = OSClockns64(); +#endif + } + } + + eError2 = OSEventObjectClose(hOSEvent); + PVR_LOG_IF_ERROR(eError2, "OSEventObjectOpen"); + +e0: +#if defined(CACHEOP_DEBUG) + sCacheOpWorkItem.ui64ExecuteTime = uiTimeNow; + if (ui32FenceOpSeqNum) + { + IMG_UINT64 ui64TimeTakenNs = sCacheOpWorkItem.ui64EnqueuedTime - sCacheOpWorkItem.ui64ExecuteTime; + IMG_UINT32 ui32Time; + IMG_INT32 i32Div; + + do_div(ui64TimeTakenNs, 1000); + ui32Time = ui64TimeTakenNs; + + /* Only fences pending on CacheOps contribute towards statistics, + * Calculate the approximate cumulative moving average fence time. + * This calculation is based on standard equation: + * + * CMAnext = (new + count * CMAprev) / (count + 1) + * + * but in simplified form: + * + * CMAnext = CMAprev + (new - CMAprev) / (count + 1) + * + * this gets rid of multiplication and prevents overflow. + * + * Also to increase accuracy that we lose with integer division, + * we hold the moving remainder of the division and add it. + * + * CMAnext = CMAprev + (new - CMAprev + CMRprev) / (count + 1) + * + * Multiple tests proved it to be the best solution for approximating + * CMA using integers. + * + */ + + i32Div = (IMG_INT32)ui32Time - (IMG_INT32)gsCwq.ui32AvgFenceTime + (IMG_INT32)gsCwq.ui32AvgFenceTimeRemainder; + + + gsCwq.ui32AvgFenceTime += i32Div / (IMG_INT32)(gsCwq.ui32TotalFenceOps + 1); + gsCwq.ui32AvgFenceTimeRemainder = i32Div % (IMG_INT32)(gsCwq.ui32TotalFenceOps + 1); + + + gsCwq.ui32TotalFenceOps++; + + } + CacheOpStatsExecLogWrite(&sCacheOpWorkItem); +#endif + + return eError; +} + +PVRSRV_ERROR CacheOpLog (PMR *psPMR, + IMG_UINT64 puiAddress, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT64 ui64EnqueuedTimeUs, + IMG_UINT64 ui64ExecuteTimeUs, + IMG_UINT32 ui32NumRBF, + PVRSRV_CACHE_OP uiCacheOp) +{ +#if defined(CACHEOP_DEBUG) + CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; + PVR_UNREFERENCED_PARAMETER(puiAddress); + + sCacheOpWorkItem.psPMR = psPMR; + sCacheOpWorkItem.uiSize = uiSize; + sCacheOpWorkItem.uiOffset = uiOffset; + sCacheOpWorkItem.uiCacheOp = uiCacheOp; + sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); + sCacheOpWorkItem.ui32OpSeqNum = CacheOpGetNextCommonSeqNum(); + + sCacheOpWorkItem.ui64EnqueuedTime = ui64EnqueuedTimeUs; + sCacheOpWorkItem.ui64ExecuteTime = ui64ExecuteTimeUs; + sCacheOpWorkItem.bUMF = IMG_TRUE; + gsCwq.ui32ClientRBF += ui32NumRBF; + gsCwq.ui32ClientSync += 1; + + CacheOpStatsExecLogWrite(&sCacheOpWorkItem); +#else + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(uiCacheOp); + PVR_UNREFERENCED_PARAMETER(ui32NumRBF); + PVR_UNREFERENCED_PARAMETER(puiAddress); + PVR_UNREFERENCED_PARAMETER(ui64ExecuteTimeUs); + PVR_UNREFERENCED_PARAMETER(ui64EnqueuedTimeUs); +#endif + return PVRSRV_OK; +} + +PVRSRV_ERROR CacheOpInit2 (void) +{ + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_CACHEOPTHREADPRIORITY; + IMG_UINT32 ui32AppHintCacheOpThreadPriority; + + PVRSRV_ERROR eError; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + /* Create an event object for pending CacheOp work items */ + eError = OSEventObjectCreate("PVRSRV_CACHEOP_EVENTOBJECT", &gsCwq.hThreadWakeUpEvtObj); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", e0); + + /* Create an event object for updating pending fence checks on CacheOp */ + eError = OSEventObjectCreate("PVRSRV_CACHEOP_EVENTOBJECT", &gsCwq.hClientWakeUpEvtObj); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", e0); + + /* Appending work-items is not concurrent, lock protects against this */ + eError = OSLockCreate((POS_LOCK*)&gsCwq.hDeferredLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); + + /* Apphint read/write is not concurrent, so lock protects against this */ + eError = OSLockCreate((POS_LOCK*)&gsCwq.hConfigLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); + + gsCwq.ui32FenceWaitTimeUs = CACHEOP_FENCE_WAIT_TIMEOUT; + gsCwq.ui32FenceRetryAbort = CACHEOP_FENCE_RETRY_ABORT; + +#if defined(CACHEFLUSH_ISA_SUPPORTS_UM_FLUSH) + gsCwq.bSupportsUMFlush = IMG_TRUE; +#else + gsCwq.bSupportsUMFlush = IMG_FALSE; +#endif + + gsCwq.pui32InfoPage = psPVRSRVData->pui32InfoPage; + gsCwq.psInfoPagePMR = psPVRSRVData->psInfoPagePMR; + + /* Normally, platforms should use their default configurations, put exceptions here */ +#if defined(__i386__) || defined(__x86_64__) +#if !defined(TC_MEMORY_CONFIG) + CacheOpConfigUpdate(CACHEOP_CONFIG_URBF | CACHEOP_CONFIG_KDF); +#else + CacheOpConfigUpdate(CACHEOP_CONFIG_KDF); +#endif +#else /* defined(__x86__) */ + CacheOpConfigUpdate(CACHEOP_CONFIG_DEFAULT); +#endif + + /* Initialise the remaining occupants of the CacheOp information page */ + gsCwq.pui32InfoPage[CACHEOP_INFO_PGSIZE] = (IMG_UINT32)gsCwq.uiPageSize; + gsCwq.pui32InfoPage[CACHEOP_INFO_LINESIZE] = (IMG_UINT32)gsCwq.uiLineSize; + + /* Set before spawning thread */ + gsCwq.bInit = IMG_TRUE; + + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintUINT32(pvAppHintState, CacheOpThreadPriority, + &ui32AppHintDefault, &ui32AppHintCacheOpThreadPriority); + OSFreeKMAppHintState(pvAppHintState); + + /* Create a thread which is used to execute the deferred CacheOp(s), + these are CacheOp(s) executed by the server on behalf of clients + asynchronously. All clients synchronise with the server before + submitting any HW operation (i.e. device kicks) to ensure that + client device work-load memory is coherent */ + eError = OSThreadCreatePriority(&gsCwq.hWorkerThread, + "pvr_cacheop", + CacheOpThread, + CacheOpThreadDumpInfo, + IMG_TRUE, + psPVRSRVData, + ui32AppHintCacheOpThreadPriority); + PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreatePriority", e0); + { + DI_ITERATOR_CB sIterator = {.pfnShow = CacheOpConfigRead}; + /* Writing the unsigned integer binary encoding of CACHEOP_CONFIG + into this file cycles through avail. configuration(s) */ + eError = DICreateEntry("cacheop_config", NULL, &sIterator, NULL, + DI_ENTRY_TYPE_GENERIC, &gsCwq.psConfigTune); + PVR_LOG_GOTO_IF_FALSE(gsCwq.psConfigTune, "DICreateEntry", e0); + } + + /* Register the CacheOp framework (re)configuration handlers */ + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_CacheOpConfig, + CacheOpConfigQuery, + CacheOpConfigSet, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) APPHINT_ID_CacheOpConfig); + + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_CacheOpUMKMThresholdSize, + CacheOpConfigQuery, + CacheOpConfigSet, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) APPHINT_ID_CacheOpUMKMThresholdSize); + + return PVRSRV_OK; +e0: + CacheOpDeInit2(); + return eError; +} + +void CacheOpDeInit2 (void) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + gsCwq.bInit = IMG_FALSE; + + if (gsCwq.hThreadWakeUpEvtObj) + { + eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + + if (gsCwq.hClientWakeUpEvtObj) + { + eError = OSEventObjectSignal(gsCwq.hClientWakeUpEvtObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + + if (gsCwq.hWorkerThread) + { + LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) + { + eError = OSThreadDestroy(gsCwq.hWorkerThread); + if (PVRSRV_OK == eError) + { + gsCwq.hWorkerThread = NULL; + break; + } + OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); + gsCwq.hWorkerThread = NULL; + } + + if (gsCwq.hClientWakeUpEvtObj) + { + eError = OSEventObjectDestroy(gsCwq.hClientWakeUpEvtObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); + gsCwq.hClientWakeUpEvtObj = NULL; + } + + if (gsCwq.hThreadWakeUpEvtObj) + { + eError = OSEventObjectDestroy(gsCwq.hThreadWakeUpEvtObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); + gsCwq.hThreadWakeUpEvtObj = NULL; + } + + if (gsCwq.hConfigLock) + { + eError = OSLockDestroy(gsCwq.hConfigLock); + PVR_LOG_IF_ERROR(eError, "OSLockDestroy"); + gsCwq.hConfigLock = NULL; + } + + if (gsCwq.hDeferredLock) + { + eError = OSLockDestroy(gsCwq.hDeferredLock); + PVR_LOG_IF_ERROR(eError, "OSLockDestroy"); + gsCwq.hDeferredLock = NULL; + } + + if (gsCwq.psConfigTune) + { + DIDestroyEntry(gsCwq.psConfigTune); + gsCwq.psConfigTune = NULL; + } + + gsCwq.pui32InfoPage = NULL; + gsCwq.psInfoPagePMR = NULL; +} + +PVRSRV_ERROR CacheOpInit (void) +{ + IMG_UINT32 idx; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* DDK initialisation is anticipated to be performed on the boot + processor (little core in big/little systems) though this may + not always be the case. If so, the value cached here is the + system wide safe (i.e. smallest) L1 d-cache line size value + on any/such platforms with mismatched d-cache line sizes */ + gsCwq.uiPageSize = OSGetPageSize(); + gsCwq.uiPageShift = OSGetPageShift(); + gsCwq.uiLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); + gsCwq.uiLineShift = ExactLog2(gsCwq.uiLineSize); + PVR_LOG_RETURN_IF_FALSE((gsCwq.uiLineSize && gsCwq.uiPageSize && gsCwq.uiPageShift), "", PVRSRV_ERROR_INIT_FAILURE); + gsCwq.uiCacheOpAddrType = OSCPUCacheOpAddressType(); + + /* More information regarding these atomic counters can be found + in the CACHEOP_WORK_QUEUE type definition at top of file */ + OSAtomicWrite(&gsCwq.hCompletedSeqNum, 0); + OSAtomicWrite(&gsCwq.hCommonSeqNum, 0); + OSAtomicWrite(&gsCwq.hWriteCounter, 0); + OSAtomicWrite(&gsCwq.hReadCounter, 0); + + for (idx = 0; idx < CACHEOP_INDICES_MAX; idx++) + { + gsCwq.asWorkItems[idx].iTimeline = PVRSRV_NO_TIMELINE; + gsCwq.asWorkItems[idx].psPMR = (void *)(uintptr_t)~0; + gsCwq.asWorkItems[idx].ui32OpSeqNum = (IMG_UINT32)~0; + } + + +#if defined(CACHEOP_DEBUG) + /* debugfs file read-out is not concurrent, so lock protects against this */ + eError = OSLockCreate((POS_LOCK*)&gsCwq.hStatsExecLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); + + gsCwq.i32StatsExecWriteIdx = 0; + OSCachedMemSet(gsCwq.asStatsExecuted, 0, sizeof(gsCwq.asStatsExecuted)); + + { + DI_ITERATOR_CB sIterator = {.pfnShow = CacheOpStatsExecLogRead}; + /* File captures the most recent subset of CacheOp(s) executed */ + eError = DICreateEntry("cacheop_history", NULL, &sIterator, NULL, + DI_ENTRY_TYPE_GENERIC, &gsCwq.psDIEntry); + PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", e0); + } +e0: +#endif + return eError; +} + +void CacheOpDeInit (void) +{ +#if defined(CACHEOP_DEBUG) + if (gsCwq.hStatsExecLock) + { + (void) OSLockDestroy(gsCwq.hStatsExecLock); + gsCwq.hStatsExecLock = NULL; + } + + if (gsCwq.psDIEntry) + { + DIDestroyEntry(gsCwq.psDIEntry); + gsCwq.psDIEntry = NULL; + } +#endif +} diff --git a/drivers/gpu/drm/phytium/octopus/cache_km.h b/drivers/gpu/drm/phytium/octopus/cache_km.h new file mode 100644 index 000000000000..69cf094607a6 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/cache_km.h @@ -0,0 +1,164 @@ +/*************************************************************************/ /*! +@File cache_km.h +@Title CPU cache management header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef CACHE_KM_H +#define CACHE_KM_H + +#if defined(__linux__) +#include +#else +#define KERNEL_VERSION +#endif + +#include "pvrsrv_error.h" +#include "os_cpu_cache.h" +#include "img_types.h" +#include "cache_ops.h" +#include "device.h" +#include "pmr.h" + +typedef IMG_UINT32 PVRSRV_CACHE_OP_ADDR_TYPE; /*!< Represents CPU address type required for CPU d-cache maintenance */ +#define PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL 0x1 /*!< Operation requires CPU virtual address only */ +#define PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL 0x2 /*!< Operation requires CPU physical address only */ +#define PVRSRV_CACHE_OP_ADDR_TYPE_BOTH 0x3 /*!< Operation requires both CPU virtual & physical addresses */ + +#include "connection_server.h" + +/* + * CacheOpInit() & CacheOpDeInit() + * + * This must be called to initialise the KM cache maintenance framework. + * This is called early during the driver/module (un)loading phase. + */ +PVRSRV_ERROR CacheOpInit(void); +void CacheOpDeInit(void); + +/* + * CacheOpInit2() & CacheOpDeInit2() + * + * This must be called to initialise the UM cache maintenance framework. + * This is called when the driver is loaded/unloaded from the kernel. + */ +PVRSRV_ERROR CacheOpInit2(void); +void CacheOpDeInit2(void); + +/* + * CacheOpExec() + * + * This is the primary CPU data-cache maintenance interface and it is + * always guaranteed to be synchronous; the arguments supplied must be + * pre-validated for performance reasons else the d-cache maintenance + * operation might cause the underlying OS kernel to fault. + */ +PVRSRV_ERROR CacheOpExec(PPVRSRV_DEVICE_NODE psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd, + PVRSRV_CACHE_OP uiCacheOp); + +/* + * CacheOpValExec() + * + * Same as CacheOpExec(), except arguments are _Validated_ before being + * presented to the underlying OS kernel for CPU data-cache maintenance. + * The uiAddress is the start CPU virtual address for the to-be d-cache + * maintained PMR, it can be NULL in which case a remap will be performed + * internally, if required for cache maintenance. This is primarily used + * as the services client bridge call handler for synchronous user-mode + * cache maintenance requests. + */ +PVRSRV_ERROR CacheOpValExec(PMR *psPMR, + IMG_UINT64 uiAddress, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PVRSRV_CACHE_OP uiCacheOp); + +/* + * CacheOpQueue() + * + * This is the secondary cache maintenance interface and it is not + * guaranteed to be synchronous in that requests could be deferred + * and executed asynchronously. This interface is primarily meant + * as services client bridge call handler. Both uiInfoPgGFSeqNum + * and ui32[Current,Next]FenceSeqNum implements an internal client + * server queueing protocol so making use of this interface outside + * of services client is not recommended and should not be done. + */ +PVRSRV_ERROR CacheOpQueue(CONNECTION_DATA *psConnection, + PPVRSRV_DEVICE_NODE psDevNode, + IMG_UINT32 ui32OpCount, + PMR **ppsPMR, + IMG_UINT64 *puiAddress, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_DEVMEM_SIZE_T *puiSize, + PVRSRV_CACHE_OP *puiCacheOp, + IMG_UINT32 ui32OpTimeline, + IMG_UINT32 uiCurrentFenceSeqNum, + IMG_UINT32 *puiNextFenceSeqNum); + +/* + * CacheOpFence() + * + * This is used for fencing for any client in-flight cache maintenance + * operations that might have been deferred by the use of CacheOpQueue(). + * This should be called before any subsequent HW device kicks to ensure + * device memory is coherent with the HW before the kick. + */ +PVRSRV_ERROR CacheOpFence(RGXFWIF_DM eOpType, IMG_UINT32 ui32OpSeqNum); + +/* + * CacheOpLog() + * + * This is used for logging client cache maintenance operations that + * was executed in user-space. + */ +PVRSRV_ERROR CacheOpLog(PMR *psPMR, + IMG_UINT64 uiAddress, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT64 ui64QueuedTimeMs, + IMG_UINT64 ui64ExecuteTimeMs, + IMG_UINT32 ui32NumRBF, + PVRSRV_CACHE_OP uiCacheOp); + +#endif /* CACHE_KM_H */ diff --git a/drivers/gpu/drm/phytium/octopus/cache_ops.h b/drivers/gpu/drm/phytium/octopus/cache_ops.h new file mode 100644 index 000000000000..da1a47cf8c8a --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/cache_ops.h @@ -0,0 +1,57 @@ +/*************************************************************************/ /*! +@File +@Title Services cache management header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Defines for cache management which are visible internally + and externally +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef CACHE_OPS_H +#define CACHE_OPS_H +#include "img_types.h" + +#define CACHE_BATCH_MAX (8) +#define MAX_DMA_OPS (34) +typedef IMG_UINT32 PVRSRV_CACHE_OP; /*!< Type represents cache maintenance operation */ +#define PVRSRV_CACHE_OP_NONE 0x0 /*!< No operation */ +#define PVRSRV_CACHE_OP_CLEAN 0x1 /*!< Flush w/o invalidate */ +#define PVRSRV_CACHE_OP_INVALIDATE 0x2 /*!< Invalidate w/o flush */ +#define PVRSRV_CACHE_OP_FLUSH 0x3 /*!< Flush w/ invalidate */ + +#endif /* CACHE_OPS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/client_cache_bridge.h b/drivers/gpu/drm/phytium/octopus/client_cache_bridge.h new file mode 100644 index 000000000000..5cd848e53d8d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/client_cache_bridge.h @@ -0,0 +1,83 @@ +/******************************************************************************* +@File +@Title Client bridge header for cache +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for cache +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_CACHE_BRIDGE_H +#define CLIENT_CACHE_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_cache_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpQueue(IMG_HANDLE hBridge, + IMG_UINT32 ui32NumCacheOps, + IMG_HANDLE * phPMR, + IMG_UINT64 * pui64Address, + IMG_DEVMEM_OFFSET_T * puiOffset, + IMG_DEVMEM_SIZE_T * puiSize, + PVRSRV_CACHE_OP * piuCacheOp, + IMG_UINT32 ui32OpTimeline, + IMG_UINT32 ui32CurrentFenceSeqNum, + IMG_UINT32 * pui32NextFenceSeqNum); + +IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpExec(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_UINT64 ui64Address, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, PVRSRV_CACHE_OP iuCacheOp); + +IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpLog(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_UINT64 ui64Address, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + IMG_INT64 i64QueuedTimeUs, + IMG_INT64 i64ExecuteTimeUs, + IMG_INT32 i32NumRBF, PVRSRV_CACHE_OP iuCacheOp); + +#endif /* CLIENT_CACHE_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/client_cache_direct_bridge.c b/drivers/gpu/drm/phytium/octopus/client_cache_direct_bridge.c new file mode 100644 index 000000000000..6e4cf3417bef --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/client_cache_direct_bridge.c @@ -0,0 +1,120 @@ +/******************************************************************************* +@File +@Title Direct client bridge for cache +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for cache + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_cache_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "cache_ops.h" + +#include "cache_km.h" + +IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpQueue(IMG_HANDLE hBridge, + IMG_UINT32 ui32NumCacheOps, + IMG_HANDLE * phPMR, + IMG_UINT64 * pui64Address, + IMG_DEVMEM_OFFSET_T * puiOffset, + IMG_DEVMEM_SIZE_T * puiSize, + PVRSRV_CACHE_OP * piuCacheOp, + IMG_UINT32 ui32OpTimeline, + IMG_UINT32 ui32CurrentFenceSeqNum, + IMG_UINT32 * pui32NextFenceSeqNum) +{ + PVRSRV_ERROR eError; + PMR **psPMRInt; + + psPMRInt = (PMR **) phPMR; + + eError = + CacheOpQueue(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32NumCacheOps, + psPMRInt, + pui64Address, + puiOffset, + puiSize, + piuCacheOp, ui32OpTimeline, ui32CurrentFenceSeqNum, pui32NextFenceSeqNum); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpExec(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_UINT64 ui64Address, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, PVRSRV_CACHE_OP iuCacheOp) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = CacheOpValExec(psPMRInt, ui64Address, uiOffset, uiSize, iuCacheOp); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpLog(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_UINT64 ui64Address, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + IMG_INT64 i64QueuedTimeUs, + IMG_INT64 i64ExecuteTimeUs, + IMG_INT32 i32NumRBF, PVRSRV_CACHE_OP iuCacheOp) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + CacheOpLog(psPMRInt, + ui64Address, + uiOffset, uiSize, i64QueuedTimeUs, i64ExecuteTimeUs, i32NumRBF, iuCacheOp); + + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/client_devicememhistory_bridge.h b/drivers/gpu/drm/phytium/octopus/client_devicememhistory_bridge.h new file mode 100644 index 000000000000..daad32221b02 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/client_devicememhistory_bridge.h @@ -0,0 +1,111 @@ +/******************************************************************************* +@File +@Title Client bridge header for devicememhistory +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for devicememhistory +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_DEVICEMEMHISTORY_BRIDGE_H +#define CLIENT_DEVICEMEMHISTORY_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_devicememhistory_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMap(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_SIZE_T uiOffset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_SIZE_T uiOffset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge, + IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge, + IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_SIZE_T uiOffset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 * pui32AllocPageIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 * pui32FreePageIndices, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut); + +#endif /* CLIENT_DEVICEMEMHISTORY_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/client_devicememhistory_direct_bridge.c b/drivers/gpu/drm/phytium/octopus/client_devicememhistory_direct_bridge.c new file mode 100644 index 000000000000..5a173ef63b34 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/client_devicememhistory_direct_bridge.c @@ -0,0 +1,194 @@ +/******************************************************************************* +@File +@Title Direct client bridge for devicememhistory +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for devicememhistory + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_devicememhistory_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "img_types.h" +#include "img_defs.h" +#include "devicemem_typedefs.h" + +#include "devicemem_history_server.h" + +IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMap(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_SIZE_T uiOffset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + DevicememHistoryMapKM(psPMRInt, + uiOffset, + sDevVAddr, + uiSize, + puiText, + ui32Log2PageSize, ui32AllocationIndex, pui32AllocationIndexOut); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_SIZE_T uiOffset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + DevicememHistoryUnmapKM(psPMRInt, + uiOffset, + sDevVAddr, + uiSize, + puiText, + ui32Log2PageSize, ui32AllocationIndex, pui32AllocationIndexOut); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge, + IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + DevicememHistoryMapVRangeKM(sBaseDevVAddr, + ui32ui32StartPage, + ui32NumPages, + uiAllocSize, + puiText, + ui32Log2PageSize, + ui32AllocationIndex, pui32AllocationIndexOut); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge, + IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + DevicememHistoryUnmapVRangeKM(sBaseDevVAddr, + ui32ui32StartPage, + ui32NumPages, + uiAllocSize, + puiText, + ui32Log2PageSize, + ui32AllocationIndex, pui32AllocationIndexOut); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_SIZE_T uiOffset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR * puiText, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 * pui32AllocPageIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 * pui32FreePageIndices, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 * pui32AllocationIndexOut) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = + DevicememHistorySparseChangeKM(psPMRInt, + uiOffset, + sDevVAddr, + uiSize, + puiText, + ui32Log2PageSize, + ui32AllocPageCount, + pui32AllocPageIndices, + ui32FreePageCount, + pui32FreePageIndices, + ui32AllocationIndex, pui32AllocationIndexOut); + + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/client_htbuffer_bridge.h b/drivers/gpu/drm/phytium/octopus/client_htbuffer_bridge.h new file mode 100644 index 000000000000..104b6f3c2c7e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/client_htbuffer_bridge.h @@ -0,0 +1,71 @@ +/******************************************************************************* +@File +@Title Client bridge header for htbuffer +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for htbuffer +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_HTBUFFER_BRIDGE_H +#define CLIENT_HTBUFFER_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_htbuffer_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR BridgeHTBControl(IMG_HANDLE hBridge, + IMG_UINT32 ui32NumGroups, + IMG_UINT32 * pui32GroupEnable, + IMG_UINT32 ui32LogLevel, + IMG_UINT32 ui32EnablePID, + IMG_UINT32 ui32LogMode, IMG_UINT32 ui32OpMode); + +IMG_INTERNAL PVRSRV_ERROR BridgeHTBLog(IMG_HANDLE hBridge, + IMG_UINT32 ui32PID, + IMG_UINT32 ui32TID, + IMG_UINT64 ui64TimeStamp, + IMG_UINT32 ui32SF, + IMG_UINT32 ui32NumArgs, IMG_UINT32 * pui32Args); + +#endif /* CLIENT_HTBUFFER_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/client_htbuffer_direct_bridge.c b/drivers/gpu/drm/phytium/octopus/client_htbuffer_direct_bridge.c new file mode 100644 index 000000000000..5cc3435282fd --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/client_htbuffer_direct_bridge.c @@ -0,0 +1,85 @@ +/******************************************************************************* +@File +@Title Direct client bridge for htbuffer +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for htbuffer + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_htbuffer_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "devicemem_typedefs.h" +#include "htbuffer_types.h" + +#include "htbserver.h" + +IMG_INTERNAL PVRSRV_ERROR BridgeHTBControl(IMG_HANDLE hBridge, + IMG_UINT32 ui32NumGroups, + IMG_UINT32 * pui32GroupEnable, + IMG_UINT32 ui32LogLevel, + IMG_UINT32 ui32EnablePID, + IMG_UINT32 ui32LogMode, IMG_UINT32 ui32OpMode) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + HTBControlKM(ui32NumGroups, + pui32GroupEnable, ui32LogLevel, ui32EnablePID, ui32LogMode, ui32OpMode); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeHTBLog(IMG_HANDLE hBridge, + IMG_UINT32 ui32PID, + IMG_UINT32 ui32TID, + IMG_UINT64 ui64TimeStamp, + IMG_UINT32 ui32SF, + IMG_UINT32 ui32NumArgs, IMG_UINT32 * pui32Args) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = HTBLogKM(ui32PID, ui32TID, ui64TimeStamp, ui32SF, ui32NumArgs, pui32Args); + + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/client_mm_bridge.h b/drivers/gpu/drm/phytium/octopus/client_mm_bridge.h new file mode 100644 index 000000000000..f79d4f99914e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/client_mm_bridge.h @@ -0,0 +1,241 @@ +/******************************************************************************* +@File +@Title Client bridge header for mm +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for mm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_MM_BRIDGE_H +#define CLIENT_MM_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_mm_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR BridgePMRExportPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_HANDLE * phPMRExport, + IMG_UINT64 * pui64Size, + IMG_UINT32 * pui32Log2Contig, + IMG_UINT64 * pui64Password); + +IMG_INTERNAL PVRSRV_ERROR BridgePMRUnexportPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMRExport); + +IMG_INTERNAL PVRSRV_ERROR BridgePMRGetUID(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, IMG_UINT64 * pui64UID); + +IMG_INTERNAL PVRSRV_ERROR BridgePMRMakeLocalImportHandle(IMG_HANDLE hBridge, + IMG_HANDLE hBuffer, IMG_HANDLE * phExtMem); + +IMG_INTERNAL PVRSRV_ERROR BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge, IMG_HANDLE hExtMem); + +IMG_INTERNAL PVRSRV_ERROR BridgePMRImportPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMRExport, + IMG_UINT64 ui64uiPassword, + IMG_UINT64 ui64uiSize, + IMG_UINT32 ui32uiLog2Contig, IMG_HANDLE * phPMR); + +IMG_INTERNAL PVRSRV_ERROR BridgePMRLocalImportPMR(IMG_HANDLE hBridge, + IMG_HANDLE hExtHandle, + IMG_HANDLE * phPMR, + IMG_DEVMEM_SIZE_T * puiSize, + IMG_DEVMEM_ALIGN_T * puiAlign); + +IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR); + +IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR); + +IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 * pui32MappingTable, + IMG_UINT32 ui32Log2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32AnnotationLength, + const IMG_CHAR * puiAnnotation, + IMG_PID ui32PID, + IMG_HANDLE * phPMRPtr, + IMG_UINT32 ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 * pui32MappingTable, + IMG_UINT32 ui32Log2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32AnnotationLength, + const IMG_CHAR * puiAnnotation, + IMG_PID ui32PID, + IMG_HANDLE * phPMRPtr, + IMG_UINT32 ui32PDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPin(IMG_HANDLE hBridge, IMG_HANDLE hPMR); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnpin(IMG_HANDLE hBridge, IMG_HANDLE hPMR); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPinValidate(IMG_HANDLE hBridge, + IMG_HANDLE hMapping, IMG_HANDLE hPMR); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnpinInvalidate(IMG_HANDLE hBridge, + IMG_HANDLE hMapping, IMG_HANDLE hPMR); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge, + IMG_BOOL bbKernelMemoryCtx, + IMG_HANDLE * phDevMemServerContext, + IMG_HANDLE * phPrivData, + IMG_UINT32 * pui32CPUCacheLineSize); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerContext); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemCtx, + IMG_DEV_VIRTADDR sHeapBaseAddr, + IMG_DEVMEM_SIZE_T uiHeapLength, + IMG_UINT32 ui32Log2DataPageSize, + IMG_HANDLE * phDevmemHeapPtr); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge, IMG_HANDLE hDevmemHeap); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPMR(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerHeap, + IMG_HANDLE hReservation, + IMG_HANDLE hPMR, + PVRSRV_MEMALLOCFLAGS_T uiMapFlags, + IMG_HANDLE * phMapping); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hMapping); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRange(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerHeap, + IMG_DEV_VIRTADDR sAddress, + IMG_DEVMEM_SIZE_T uiLength, + IMG_HANDLE * phReservation); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge, + IMG_HANDLE hReservation); + +IMG_INTERNAL PVRSRV_ERROR BridgeChangeSparseMem(IMG_HANDLE hBridge, + IMG_HANDLE hSrvDevMemHeap, + IMG_HANDLE hPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 * pui32AllocPageIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 * pui32FreePageIndices, + IMG_UINT32 ui32SparseFlags, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT64 ui64CPUVAddr); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPages(IMG_HANDLE hBridge, + IMG_HANDLE hReservation, + IMG_HANDLE hPMR, + IMG_UINT32 ui32PageCount, + IMG_UINT32 ui32PhysicalPgOffset, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEV_VIRTADDR sDevVAddr); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge, + IMG_HANDLE hReservation, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 ui32PageCount); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemCtx, + IMG_DEV_VIRTADDR sAddress); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemFlushDevSLCRange(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemCtx, + IMG_DEV_VIRTADDR sAddress, + IMG_DEVMEM_SIZE_T uiSize, + IMG_BOOL bInvalidate); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemCtx, + IMG_UINT64 ui64FBSCEntries); + +IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge, + IMG_UINT32 * pui32NumHeapConfigs); + +IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapCount(IMG_HANDLE hBridge, + IMG_UINT32 ui32HeapConfigIndex, + IMG_UINT32 * pui32NumHeaps); + +IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge, + IMG_UINT32 ui32HeapConfigIndex, + IMG_UINT32 ui32HeapConfigNameBufSz, + IMG_CHAR * puiHeapConfigName); + +IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge, + IMG_UINT32 ui32HeapConfigIndex, + IMG_UINT32 ui32HeapIndex, + IMG_UINT32 ui32HeapNameBufSz, + IMG_CHAR * puiHeapNameOut, + IMG_DEV_VIRTADDR * psDevVAddrBase, + IMG_DEVMEM_SIZE_T * puiHeapLength, + IMG_DEVMEM_SIZE_T * puiReservedRegionLength, + IMG_UINT32 * pui32Log2DataPageSizeOut, + IMG_UINT32 * pui32Log2ImportAlignmentOut); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemCtx, + IMG_UINT32 ui32PID, IMG_BOOL bRegister); + +IMG_INTERNAL PVRSRV_ERROR BridgeGetMaxDevMemSize(IMG_HANDLE hBridge, + IMG_DEVMEM_SIZE_T * puiLMASize, + IMG_DEVMEM_SIZE_T * puiUMASize); + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemCtx, + IMG_DEV_VIRTADDR * psFaultAddress); + +IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVUpdateOOMStats(IMG_HANDLE hBridge, + IMG_UINT32 ui32ui32StatType, IMG_PID ui32pid); + +#endif /* CLIENT_MM_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/client_mm_direct_bridge.c b/drivers/gpu/drm/phytium/octopus/client_mm_direct_bridge.c new file mode 100644 index 000000000000..cebe7dcdba3c --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/client_mm_direct_bridge.c @@ -0,0 +1,732 @@ +/******************************************************************************* +@File +@Title Direct client bridge for mm +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for mm + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_mm_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "pvrsrv_memallocflags.h" +#include "devicemem_typedefs.h" + +#include "devicemem.h" +#include "devicemem_server.h" +#include "pmr.h" +#include "devicemem_heapcfg.h" +#include "physmem.h" +#include "devicemem_utils.h" +#include "process_stats.h" + +IMG_INTERNAL PVRSRV_ERROR BridgePMRExportPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_HANDLE * phPMRExport, + IMG_UINT64 * pui64Size, + IMG_UINT32 * pui32Log2Contig, + IMG_UINT64 * pui64Password) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PMR_EXPORT *psPMRExportInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = PMRExportPMR(psPMRInt, &psPMRExportInt, pui64Size, pui32Log2Contig, pui64Password); + + *phPMRExport = psPMRExportInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgePMRUnexportPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMRExport) +{ + PVRSRV_ERROR eError; + PMR_EXPORT *psPMRExportInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRExportInt = (PMR_EXPORT *) hPMRExport; + + eError = PMRUnexportPMR(psPMRExportInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgePMRGetUID(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, IMG_UINT64 * pui64UID) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = PMRGetUID(psPMRInt, pui64UID); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgePMRMakeLocalImportHandle(IMG_HANDLE hBridge, + IMG_HANDLE hBuffer, IMG_HANDLE * phExtMem) +{ + PVRSRV_ERROR eError; + PMR *psBufferInt; + PMR *psExtMemInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psBufferInt = (PMR *) hBuffer; + + eError = PMRMakeLocalImportHandle(psBufferInt, &psExtMemInt); + + *phExtMem = psExtMemInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge, IMG_HANDLE hExtMem) +{ + PVRSRV_ERROR eError; + PMR *psExtMemInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psExtMemInt = (PMR *) hExtMem; + + eError = PMRUnmakeLocalImportHandle(psExtMemInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgePMRImportPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMRExport, + IMG_UINT64 ui64uiPassword, + IMG_UINT64 ui64uiSize, + IMG_UINT32 ui32uiLog2Contig, IMG_HANDLE * phPMR) +{ + PVRSRV_ERROR eError; + PMR_EXPORT *psPMRExportInt; + PMR *psPMRInt = NULL; + + psPMRExportInt = (PMR_EXPORT *) hPMRExport; + + eError = + PhysmemImportPMR(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + psPMRExportInt, + ui64uiPassword, ui64uiSize, ui32uiLog2Contig, &psPMRInt); + + *phPMR = psPMRInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgePMRLocalImportPMR(IMG_HANDLE hBridge, + IMG_HANDLE hExtHandle, + IMG_HANDLE * phPMR, + IMG_DEVMEM_SIZE_T * puiSize, + IMG_DEVMEM_ALIGN_T * puiAlign) +{ + PVRSRV_ERROR eError; + PMR *psExtHandleInt; + PMR *psPMRInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psExtHandleInt = (PMR *) hExtHandle; + + eError = PMRLocalImportPMR(psExtHandleInt, &psPMRInt, puiSize, puiAlign); + + *phPMR = psPMRInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = PMRUnrefPMR(psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = PMRUnrefUnlockPMR(psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 * pui32MappingTable, + IMG_UINT32 ui32Log2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32AnnotationLength, + const IMG_CHAR * puiAnnotation, + IMG_PID ui32PID, + IMG_HANDLE * phPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PMR *psPMRPtrInt = NULL; + + eError = + PhysmemNewRamBackedPMR_direct(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + uiSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + ui32Log2PageSize, + uiFlags, + ui32AnnotationLength, + puiAnnotation, ui32PID, &psPMRPtrInt, ui32PDumpFlags); + + *phPMRPtr = psPMRPtrInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 * pui32MappingTable, + IMG_UINT32 ui32Log2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32AnnotationLength, + const IMG_CHAR * puiAnnotation, + IMG_PID ui32PID, + IMG_HANDLE * phPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PMR *psPMRPtrInt = NULL; + + eError = + PhysmemNewRamBackedLockedPMR(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + uiSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + ui32Log2PageSize, + uiFlags, + ui32AnnotationLength, + puiAnnotation, ui32PID, &psPMRPtrInt, ui32PDumpFlags); + + *phPMRPtr = psPMRPtrInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPin(IMG_HANDLE hBridge, IMG_HANDLE hPMR) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = DevmemIntPin(psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnpin(IMG_HANDLE hBridge, IMG_HANDLE hPMR) +{ + PVRSRV_ERROR eError; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRInt = (PMR *) hPMR; + + eError = DevmemIntUnpin(psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPinValidate(IMG_HANDLE hBridge, + IMG_HANDLE hMapping, IMG_HANDLE hPMR) +{ + PVRSRV_ERROR eError; + DEVMEMINT_MAPPING *psMappingInt; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psMappingInt = (DEVMEMINT_MAPPING *) hMapping; + psPMRInt = (PMR *) hPMR; + + eError = DevmemIntPinValidate(psMappingInt, psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnpinInvalidate(IMG_HANDLE hBridge, + IMG_HANDLE hMapping, IMG_HANDLE hPMR) +{ + PVRSRV_ERROR eError; + DEVMEMINT_MAPPING *psMappingInt; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psMappingInt = (DEVMEMINT_MAPPING *) hMapping; + psPMRInt = (PMR *) hPMR; + + eError = DevmemIntUnpinInvalidate(psMappingInt, psPMRInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge, + IMG_BOOL bbKernelMemoryCtx, + IMG_HANDLE * phDevMemServerContext, + IMG_HANDLE * phPrivData, + IMG_UINT32 * pui32CPUCacheLineSize) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevMemServerContextInt = NULL; + IMG_HANDLE hPrivDataInt = NULL; + + eError = + DevmemIntCtxCreate(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + bbKernelMemoryCtx, + &psDevMemServerContextInt, &hPrivDataInt, pui32CPUCacheLineSize); + + *phDevMemServerContext = psDevMemServerContextInt; + *phPrivData = hPrivDataInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerContext) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemServerContextInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext; + + eError = DevmemIntCtxDestroy(psDevmemServerContextInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemCtx, + IMG_DEV_VIRTADDR sHeapBaseAddr, + IMG_DEVMEM_SIZE_T uiHeapLength, + IMG_UINT32 ui32Log2DataPageSize, + IMG_HANDLE * phDevmemHeapPtr) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntHeapCreate(psDevmemCtxInt, + sHeapBaseAddr, + uiHeapLength, ui32Log2DataPageSize, &psDevmemHeapPtrInt); + + *phDevmemHeapPtr = psDevmemHeapPtrInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge, IMG_HANDLE hDevmemHeap) +{ + PVRSRV_ERROR eError; + DEVMEMINT_HEAP *psDevmemHeapInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemHeapInt = (DEVMEMINT_HEAP *) hDevmemHeap; + + eError = DevmemIntHeapDestroy(psDevmemHeapInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPMR(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerHeap, + IMG_HANDLE hReservation, + IMG_HANDLE hPMR, + PVRSRV_MEMALLOCFLAGS_T uiMapFlags, + IMG_HANDLE * phMapping) +{ + PVRSRV_ERROR eError; + DEVMEMINT_HEAP *psDevmemServerHeapInt; + DEVMEMINT_RESERVATION *psReservationInt; + PMR *psPMRInt; + DEVMEMINT_MAPPING *psMappingInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; + psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; + psPMRInt = (PMR *) hPMR; + + eError = + DevmemIntMapPMR(psDevmemServerHeapInt, + psReservationInt, psPMRInt, uiMapFlags, &psMappingInt); + + *phMapping = psMappingInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hMapping) +{ + PVRSRV_ERROR eError; + DEVMEMINT_MAPPING *psMappingInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psMappingInt = (DEVMEMINT_MAPPING *) hMapping; + + eError = DevmemIntUnmapPMR(psMappingInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRange(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemServerHeap, + IMG_DEV_VIRTADDR sAddress, + IMG_DEVMEM_SIZE_T uiLength, + IMG_HANDLE * phReservation) +{ + PVRSRV_ERROR eError; + DEVMEMINT_HEAP *psDevmemServerHeapInt; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; + + eError = + DevmemIntReserveRange(psDevmemServerHeapInt, sAddress, uiLength, &psReservationInt); + + *phReservation = psReservationInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge, IMG_HANDLE hReservation) +{ + PVRSRV_ERROR eError; + DEVMEMINT_RESERVATION *psReservationInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; + + eError = DevmemIntUnreserveRange(psReservationInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeChangeSparseMem(IMG_HANDLE hBridge, + IMG_HANDLE hSrvDevMemHeap, + IMG_HANDLE hPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 * pui32AllocPageIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 * pui32FreePageIndices, + IMG_UINT32 ui32SparseFlags, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEV_VIRTADDR sDevVAddr, IMG_UINT64 ui64CPUVAddr) +{ + PVRSRV_ERROR eError; + DEVMEMINT_HEAP *psSrvDevMemHeapInt; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSrvDevMemHeapInt = (DEVMEMINT_HEAP *) hSrvDevMemHeap; + psPMRInt = (PMR *) hPMR; + + eError = + DevmemIntChangeSparse(psSrvDevMemHeapInt, + psPMRInt, + ui32AllocPageCount, + pui32AllocPageIndices, + ui32FreePageCount, + pui32FreePageIndices, + ui32SparseFlags, uiFlags, sDevVAddr, ui64CPUVAddr); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPages(IMG_HANDLE hBridge, + IMG_HANDLE hReservation, + IMG_HANDLE hPMR, + IMG_UINT32 ui32PageCount, + IMG_UINT32 ui32PhysicalPgOffset, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEV_VIRTADDR sDevVAddr) +{ + PVRSRV_ERROR eError; + DEVMEMINT_RESERVATION *psReservationInt; + PMR *psPMRInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; + psPMRInt = (PMR *) hPMR; + + eError = + DevmemIntMapPages(psReservationInt, + psPMRInt, ui32PageCount, ui32PhysicalPgOffset, uiFlags, sDevVAddr); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge, + IMG_HANDLE hReservation, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 ui32PageCount) +{ + PVRSRV_ERROR eError; + DEVMEMINT_RESERVATION *psReservationInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; + + eError = DevmemIntUnmapPages(psReservationInt, sDevVAddr, ui32PageCount); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemCtx, + IMG_DEV_VIRTADDR sAddress) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntIsVDevAddrValid(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + psDevmemCtxInt, sAddress); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemFlushDevSLCRange(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemCtx, + IMG_DEV_VIRTADDR sAddress, + IMG_DEVMEM_SIZE_T uiSize, + IMG_BOOL bInvalidate) +{ +#if defined(RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED) + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = DevmemIntFlushDevSLCRange(psDevmemCtxInt, sAddress, uiSize, bInvalidate); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hDevmemCtx); + PVR_UNREFERENCED_PARAMETER(sAddress); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(bInvalidate); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemCtx, + IMG_UINT64 ui64FBSCEntries) +{ +#if defined(RGX_FEATURE_FBCDC) + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = DevmemIntInvalidateFBSCTable(psDevmemCtxInt, ui64FBSCEntries); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hDevmemCtx); + PVR_UNREFERENCED_PARAMETER(ui64FBSCEntries); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge, + IMG_UINT32 * pui32NumHeapConfigs) +{ + PVRSRV_ERROR eError; + + eError = + HeapCfgHeapConfigCount(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + pui32NumHeapConfigs); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapCount(IMG_HANDLE hBridge, + IMG_UINT32 ui32HeapConfigIndex, + IMG_UINT32 * pui32NumHeaps) +{ + PVRSRV_ERROR eError; + + eError = + HeapCfgHeapCount(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32HeapConfigIndex, pui32NumHeaps); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge, + IMG_UINT32 ui32HeapConfigIndex, + IMG_UINT32 ui32HeapConfigNameBufSz, + IMG_CHAR * puiHeapConfigName) +{ + PVRSRV_ERROR eError; + + eError = + HeapCfgHeapConfigName(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32HeapConfigIndex, ui32HeapConfigNameBufSz, puiHeapConfigName); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge, + IMG_UINT32 ui32HeapConfigIndex, + IMG_UINT32 ui32HeapIndex, + IMG_UINT32 ui32HeapNameBufSz, + IMG_CHAR * puiHeapNameOut, + IMG_DEV_VIRTADDR * psDevVAddrBase, + IMG_DEVMEM_SIZE_T * puiHeapLength, + IMG_DEVMEM_SIZE_T * puiReservedRegionLength, + IMG_UINT32 * pui32Log2DataPageSizeOut, + IMG_UINT32 * pui32Log2ImportAlignmentOut) +{ + PVRSRV_ERROR eError; + + eError = + HeapCfgHeapDetails(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + ui32HeapConfigIndex, + ui32HeapIndex, + ui32HeapNameBufSz, + puiHeapNameOut, + psDevVAddrBase, + puiHeapLength, + puiReservedRegionLength, + pui32Log2DataPageSizeOut, pui32Log2ImportAlignmentOut); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemCtx, + IMG_UINT32 ui32PID, IMG_BOOL bRegister) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = DevmemIntRegisterPFNotifyKM(psDevmemCtxInt, ui32PID, bRegister); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeGetMaxDevMemSize(IMG_HANDLE hBridge, + IMG_DEVMEM_SIZE_T * puiLMASize, + IMG_DEVMEM_SIZE_T * puiUMASize) +{ + PVRSRV_ERROR eError; + + eError = + PVRSRVGetMaxDevMemSizeKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + puiLMASize, puiUMASize); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge, + IMG_HANDLE hDevmemCtx, + IMG_DEV_VIRTADDR * psFaultAddress) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtxInt; + + psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; + + eError = + DevmemIntGetFaultAddress(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + psDevmemCtxInt, psFaultAddress); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVUpdateOOMStats(IMG_HANDLE hBridge, + IMG_UINT32 ui32ui32StatType, IMG_PID ui32pid) +{ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = PVRSRVServerUpdateOOMStats(ui32ui32StatType, ui32pid); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(ui32ui32StatType); + PVR_UNREFERENCED_PARAMETER(ui32pid); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} diff --git a/drivers/gpu/drm/phytium/octopus/client_pvrtl_bridge.h b/drivers/gpu/drm/phytium/octopus/client_pvrtl_bridge.h new file mode 100644 index 000000000000..3292c9ddddf7 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/client_pvrtl_bridge.h @@ -0,0 +1,93 @@ +/******************************************************************************* +@File +@Title Client bridge header for pvrtl +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for pvrtl +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_PVRTL_BRIDGE_H +#define CLIENT_PVRTL_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_pvrtl_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR BridgeTLOpenStream(IMG_HANDLE hBridge, + const IMG_CHAR * puiName, + IMG_UINT32 ui32Mode, + IMG_HANDLE * phSD, IMG_HANDLE * phTLPMR); + +IMG_INTERNAL PVRSRV_ERROR BridgeTLCloseStream(IMG_HANDLE hBridge, IMG_HANDLE hSD); + +IMG_INTERNAL PVRSRV_ERROR BridgeTLAcquireData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 * pui32ReadOffset, + IMG_UINT32 * pui32ReadLen); + +IMG_INTERNAL PVRSRV_ERROR BridgeTLReleaseData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 ui32ReadOffset, IMG_UINT32 ui32ReadLen); + +IMG_INTERNAL PVRSRV_ERROR BridgeTLDiscoverStreams(IMG_HANDLE hBridge, + const IMG_CHAR * puiNamePattern, + IMG_UINT32 ui32Size, + IMG_CHAR * puiStreams, + IMG_UINT32 * pui32NumFound); + +IMG_INTERNAL PVRSRV_ERROR BridgeTLReserveStream(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 * pui32BufferOffset, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32SizeMin, + IMG_UINT32 * pui32Available); + +IMG_INTERNAL PVRSRV_ERROR BridgeTLCommitStream(IMG_HANDLE hBridge, + IMG_HANDLE hSD, IMG_UINT32 ui32ReqSize); + +IMG_INTERNAL PVRSRV_ERROR BridgeTLWriteData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 ui32Size, IMG_BYTE * pui8Data); + +#endif /* CLIENT_PVRTL_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/client_pvrtl_direct_bridge.c b/drivers/gpu/drm/phytium/octopus/client_pvrtl_direct_bridge.c new file mode 100644 index 000000000000..05e59344716f --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/client_pvrtl_direct_bridge.c @@ -0,0 +1,175 @@ +/******************************************************************************* +@File +@Title Direct client bridge for pvrtl +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for pvrtl + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_pvrtl_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "devicemem_typedefs.h" +#include "pvrsrv_tlcommon.h" + +#include "tlserver.h" + +IMG_INTERNAL PVRSRV_ERROR BridgeTLOpenStream(IMG_HANDLE hBridge, + const IMG_CHAR * puiName, + IMG_UINT32 ui32Mode, + IMG_HANDLE * phSD, IMG_HANDLE * phTLPMR) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt = NULL; + PMR *psTLPMRInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = TLServerOpenStreamKM(puiName, ui32Mode, &psSDInt, &psTLPMRInt); + + *phSD = psSDInt; + *phTLPMR = psTLPMRInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeTLCloseStream(IMG_HANDLE hBridge, IMG_HANDLE hSD) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = TLServerCloseStreamKM(psSDInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeTLAcquireData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 * pui32ReadOffset, + IMG_UINT32 * pui32ReadLen) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = TLServerAcquireDataKM(psSDInt, pui32ReadOffset, pui32ReadLen); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeTLReleaseData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 ui32ReadOffset, IMG_UINT32 ui32ReadLen) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = TLServerReleaseDataKM(psSDInt, ui32ReadOffset, ui32ReadLen); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeTLDiscoverStreams(IMG_HANDLE hBridge, + const IMG_CHAR * puiNamePattern, + IMG_UINT32 ui32Size, + IMG_CHAR * puiStreams, IMG_UINT32 * pui32NumFound) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = TLServerDiscoverStreamsKM(puiNamePattern, ui32Size, puiStreams, pui32NumFound); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeTLReserveStream(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 * pui32BufferOffset, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32SizeMin, IMG_UINT32 * pui32Available) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = + TLServerReserveStreamKM(psSDInt, + pui32BufferOffset, ui32Size, ui32SizeMin, pui32Available); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeTLCommitStream(IMG_HANDLE hBridge, + IMG_HANDLE hSD, IMG_UINT32 ui32ReqSize) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = TLServerCommitStreamKM(psSDInt, ui32ReqSize); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeTLWriteData(IMG_HANDLE hBridge, + IMG_HANDLE hSD, + IMG_UINT32 ui32Size, IMG_BYTE * pui8Data) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC *psSDInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSDInt = (TL_STREAM_DESC *) hSD; + + eError = TLServerWriteDataKM(psSDInt, ui32Size, pui8Data); + + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/client_ri_bridge.h b/drivers/gpu/drm/phytium/octopus/client_ri_bridge.h new file mode 100644 index 000000000000..30b0c03527ba --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/client_ri_bridge.h @@ -0,0 +1,89 @@ +/******************************************************************************* +@File +@Title Client bridge header for ri +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for ri +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_RI_BRIDGE_H +#define CLIENT_RI_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_ri_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntry(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle); + +IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge, + IMG_HANDLE hPMRHandle, + IMG_UINT32 ui32TextBSize, + const IMG_CHAR * puiTextB, + IMG_UINT64 ui64Offset, + IMG_UINT64 ui64Size, + IMG_BOOL bIsImport, + IMG_BOOL bIsSuballoc, IMG_HANDLE * phRIHandle); + +IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteProcListEntry(IMG_HANDLE hBridge, + IMG_UINT32 ui32TextBSize, + const IMG_CHAR * puiTextB, + IMG_UINT64 ui64Size, + IMG_UINT64 ui64DevVAddr, + IMG_HANDLE * phRIHandle); + +IMG_INTERNAL PVRSRV_ERROR BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge, + IMG_HANDLE hRIHandle, IMG_DEV_VIRTADDR sAddr); + +IMG_INTERNAL PVRSRV_ERROR BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge, IMG_HANDLE hRIHandle); + +IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpList(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle); + +IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpAll(IMG_HANDLE hBridge); + +IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpProcess(IMG_HANDLE hBridge, IMG_PID ui32Pid); + +IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntryWithOwner(IMG_HANDLE hBridge, + IMG_HANDLE hPMRHandle, IMG_PID ui32Owner); + +#endif /* CLIENT_RI_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/client_ri_direct_bridge.c b/drivers/gpu/drm/phytium/octopus/client_ri_direct_bridge.c new file mode 100644 index 000000000000..f0580ad6a62f --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/client_ri_direct_bridge.c @@ -0,0 +1,182 @@ +/******************************************************************************* +@File +@Title Direct client bridge for ri +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for ri + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_ri_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "ri_typedefs.h" + +#include "ri_server.h" + +IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntry(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle) +{ + PVRSRV_ERROR eError; + PMR *psPMRHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRHandleInt = (PMR *) hPMRHandle; + + eError = RIWritePMREntryKM(psPMRHandleInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge, + IMG_HANDLE hPMRHandle, + IMG_UINT32 ui32TextBSize, + const IMG_CHAR * puiTextB, + IMG_UINT64 ui64Offset, + IMG_UINT64 ui64Size, + IMG_BOOL bIsImport, + IMG_BOOL bIsSuballoc, IMG_HANDLE * phRIHandle) +{ + PVRSRV_ERROR eError; + PMR *psPMRHandleInt; + RI_HANDLE psRIHandleInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRHandleInt = (PMR *) hPMRHandle; + + eError = + RIWriteMEMDESCEntryKM(psPMRHandleInt, + ui32TextBSize, + puiTextB, + ui64Offset, ui64Size, bIsImport, bIsSuballoc, &psRIHandleInt); + + *phRIHandle = psRIHandleInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteProcListEntry(IMG_HANDLE hBridge, + IMG_UINT32 ui32TextBSize, + const IMG_CHAR * puiTextB, + IMG_UINT64 ui64Size, + IMG_UINT64 ui64DevVAddr, + IMG_HANDLE * phRIHandle) +{ + PVRSRV_ERROR eError; + RI_HANDLE psRIHandleInt = NULL; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = + RIWriteProcListEntryKM(ui32TextBSize, puiTextB, ui64Size, ui64DevVAddr, &psRIHandleInt); + + *phRIHandle = psRIHandleInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge, + IMG_HANDLE hRIHandle, IMG_DEV_VIRTADDR sAddr) +{ + PVRSRV_ERROR eError; + RI_HANDLE psRIHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psRIHandleInt = (RI_HANDLE) hRIHandle; + + eError = RIUpdateMEMDESCAddrKM(psRIHandleInt, sAddr); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge, IMG_HANDLE hRIHandle) +{ + PVRSRV_ERROR eError; + RI_HANDLE psRIHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psRIHandleInt = (RI_HANDLE) hRIHandle; + + eError = RIDeleteMEMDESCEntryKM(psRIHandleInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpList(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle) +{ + PVRSRV_ERROR eError; + PMR *psPMRHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRHandleInt = (PMR *) hPMRHandle; + + eError = RIDumpListKM(psPMRHandleInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpAll(IMG_HANDLE hBridge) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = RIDumpAllKM(); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpProcess(IMG_HANDLE hBridge, IMG_PID ui32Pid) +{ + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = RIDumpProcessKM(ui32Pid); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntryWithOwner(IMG_HANDLE hBridge, + IMG_HANDLE hPMRHandle, IMG_PID ui32Owner) +{ + PVRSRV_ERROR eError; + PMR *psPMRHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psPMRHandleInt = (PMR *) hPMRHandle; + + eError = RIWritePMREntryWithOwnerKM(psPMRHandleInt, ui32Owner); + + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/client_sync_bridge.h b/drivers/gpu/drm/phytium/octopus/client_sync_bridge.h new file mode 100644 index 000000000000..e78f22ba333e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/client_sync_bridge.h @@ -0,0 +1,102 @@ +/******************************************************************************* +@File +@Title Client bridge header for sync +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for sync +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_SYNC_BRIDGE_H +#define CLIENT_SYNC_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_sync_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge, + IMG_HANDLE * phSyncHandle, + IMG_UINT32 * pui32SyncPrimVAddr, + IMG_UINT32 * pui32SyncPrimBlockSize, + IMG_HANDLE * phhSyncPMR); + +IMG_INTERNAL PVRSRV_ERROR BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge, IMG_HANDLE hSyncHandle); + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimSet(IMG_HANDLE hBridge, + IMG_HANDLE hSyncHandle, + IMG_UINT32 ui32Index, IMG_UINT32 ui32Value); + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDump(IMG_HANDLE hBridge, + IMG_HANDLE hSyncHandle, IMG_UINT32 ui32Offset); + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge, + IMG_HANDLE hSyncHandle, + IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value); + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge, + IMG_HANDLE hSyncHandle, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiPDumpFlags); + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge, + IMG_HANDLE hSyncHandle, + IMG_UINT32 ui32Offset, + IMG_DEVMEM_OFFSET_T uiWriteOffset, + IMG_DEVMEM_SIZE_T uiPacketSize, + IMG_DEVMEM_SIZE_T uiBufferSize); + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncAllocEvent(IMG_HANDLE hBridge, + IMG_BOOL bServerSync, + IMG_UINT32 ui32FWAddr, + IMG_UINT32 ui32ClassNameSize, + const IMG_CHAR * puiClassName); + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncFreeEvent(IMG_HANDLE hBridge, IMG_UINT32 ui32FWAddr); + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncCheckpointSignalledPDumpPol(IMG_HANDLE hBridge, + PVRSRV_FENCE hFence); + +#endif /* CLIENT_SYNC_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/client_sync_direct_bridge.c b/drivers/gpu/drm/phytium/octopus/client_sync_direct_bridge.c new file mode 100644 index 000000000000..6be66f1ef1c6 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/client_sync_direct_bridge.c @@ -0,0 +1,262 @@ +/******************************************************************************* +@File +@Title Direct client bridge for sync +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for sync + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_sync_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ +#include "pdump.h" +#include "pdumpdefs.h" +#include "devicemem_typedefs.h" +#include "pvrsrv_sync_km.h" +#include + +#include "sync.h" +#include "sync_server.h" +#include "pdump.h" +#include "pvrsrv_sync_km.h" +#include "sync_fallback_server.h" +#include "sync_checkpoint.h" + +IMG_INTERNAL PVRSRV_ERROR BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge, + IMG_HANDLE * phSyncHandle, + IMG_UINT32 * pui32SyncPrimVAddr, + IMG_UINT32 * pui32SyncPrimBlockSize, + IMG_HANDLE * phhSyncPMR) +{ + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + PMR *pshSyncPMRInt = NULL; + + eError = + PVRSRVAllocSyncPrimitiveBlockKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + &psSyncHandleInt, + pui32SyncPrimVAddr, + pui32SyncPrimBlockSize, &pshSyncPMRInt); + + *phSyncHandle = psSyncHandleInt; + *phhSyncPMR = pshSyncPMRInt; + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge, IMG_HANDLE hSyncHandle) +{ + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimSet(IMG_HANDLE hBridge, + IMG_HANDLE hSyncHandle, + IMG_UINT32 ui32Index, IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = PVRSRVSyncPrimSetKM(psSyncHandleInt, ui32Index, ui32Value); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDump(IMG_HANDLE hBridge, + IMG_HANDLE hSyncHandle, IMG_UINT32 ui32Offset) +{ +#if defined(PDUMP) + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = PVRSRVSyncPrimPDumpKM(psSyncHandleInt, ui32Offset); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hSyncHandle); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge, + IMG_HANDLE hSyncHandle, + IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value) +{ +#if defined(PDUMP) + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = PVRSRVSyncPrimPDumpValueKM(psSyncHandleInt, ui32Offset, ui32Value); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hSyncHandle); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge, + IMG_HANDLE hSyncHandle, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiPDumpFlags) +{ +#if defined(PDUMP) + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = + PVRSRVSyncPrimPDumpPolKM(psSyncHandleInt, + ui32Offset, ui32Value, ui32Mask, eOperator, uiPDumpFlags); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hSyncHandle); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + PVR_UNREFERENCED_PARAMETER(eOperator); + PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge, + IMG_HANDLE hSyncHandle, + IMG_UINT32 ui32Offset, + IMG_DEVMEM_OFFSET_T uiWriteOffset, + IMG_DEVMEM_SIZE_T uiPacketSize, + IMG_DEVMEM_SIZE_T uiBufferSize) +{ +#if defined(PDUMP) + PVRSRV_ERROR eError; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; + + eError = + PVRSRVSyncPrimPDumpCBPKM(psSyncHandleInt, + ui32Offset, uiWriteOffset, uiPacketSize, uiBufferSize); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hSyncHandle); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + PVR_UNREFERENCED_PARAMETER(uiWriteOffset); + PVR_UNREFERENCED_PARAMETER(uiPacketSize); + PVR_UNREFERENCED_PARAMETER(uiBufferSize); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncAllocEvent(IMG_HANDLE hBridge, + IMG_BOOL bServerSync, + IMG_UINT32 ui32FWAddr, + IMG_UINT32 ui32ClassNameSize, + const IMG_CHAR * puiClassName) +{ + PVRSRV_ERROR eError; + + eError = + PVRSRVSyncAllocEventKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + bServerSync, ui32FWAddr, ui32ClassNameSize, puiClassName); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncFreeEvent(IMG_HANDLE hBridge, IMG_UINT32 ui32FWAddr) +{ + PVRSRV_ERROR eError; + + eError = PVRSRVSyncFreeEventKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ui32FWAddr); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncCheckpointSignalledPDumpPol(IMG_HANDLE hBridge, + PVRSRV_FENCE hFence) +{ +#if defined(PDUMP) + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(hBridge); + + eError = PVRSRVSyncCheckpointSignalledPDumpPolKM(hFence); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(hFence); + + return PVRSRV_ERROR_NOT_IMPLEMENTED; +#endif +} diff --git a/drivers/gpu/drm/phytium/octopus/client_synctracking_bridge.h b/drivers/gpu/drm/phytium/octopus/client_synctracking_bridge.h new file mode 100644 index 000000000000..53dc86377075 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/client_synctracking_bridge.h @@ -0,0 +1,68 @@ +/******************************************************************************* +@File +@Title Client bridge header for synctracking +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Exports the client bridge functions for synctracking +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef CLIENT_SYNCTRACKING_BRIDGE_H +#define CLIENT_SYNCTRACKING_BRIDGE_H + +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) +#include "pvr_bridge_client.h" +#include "pvr_bridge.h" +#endif + +#include "common_synctracking_bridge.h" + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge, IMG_HANDLE hhRecord); + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordAdd(IMG_HANDLE hBridge, + IMG_HANDLE * phhRecord, + IMG_HANDLE hhServerSyncPrimBlock, + IMG_UINT32 ui32ui32FwBlockAddr, + IMG_UINT32 ui32ui32SyncOffset, + IMG_BOOL bbServerSync, + IMG_UINT32 ui32ClassNameSize, + const IMG_CHAR * puiClassName); + +#endif /* CLIENT_SYNCTRACKING_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/client_synctracking_direct_bridge.c b/drivers/gpu/drm/phytium/octopus/client_synctracking_direct_bridge.c new file mode 100644 index 000000000000..5b1034c2db59 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/client_synctracking_direct_bridge.c @@ -0,0 +1,92 @@ +/******************************************************************************* +@File +@Title Direct client bridge for synctracking +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the client side of the bridge for synctracking + which is used in calls from Server context. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "client_synctracking_bridge.h" +#include "img_defs.h" +#include "pvr_debug.h" + +/* Module specific includes */ + +#include "sync.h" +#include "sync_server.h" + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge, IMG_HANDLE hhRecord) +{ + PVRSRV_ERROR eError; + SYNC_RECORD_HANDLE pshRecordInt; + PVR_UNREFERENCED_PARAMETER(hBridge); + + pshRecordInt = (SYNC_RECORD_HANDLE) hhRecord; + + eError = PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordAdd(IMG_HANDLE hBridge, + IMG_HANDLE * phhRecord, + IMG_HANDLE hhServerSyncPrimBlock, + IMG_UINT32 ui32ui32FwBlockAddr, + IMG_UINT32 ui32ui32SyncOffset, + IMG_BOOL bbServerSync, + IMG_UINT32 ui32ClassNameSize, + const IMG_CHAR * puiClassName) +{ + PVRSRV_ERROR eError; + SYNC_RECORD_HANDLE pshRecordInt = NULL; + SYNC_PRIMITIVE_BLOCK *pshServerSyncPrimBlockInt; + + pshServerSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK *) hhServerSyncPrimBlock; + + eError = + PVRSRVSyncRecordAddKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), + &pshRecordInt, + pshServerSyncPrimBlockInt, + ui32ui32FwBlockAddr, + ui32ui32SyncOffset, + bbServerSync, ui32ClassNameSize, puiClassName); + + *phhRecord = pshRecordInt; + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/common_cache_bridge.h b/drivers/gpu/drm/phytium/octopus/common_cache_bridge.h new file mode 100644 index 000000000000..39fcf0479ffe --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_cache_bridge.h @@ -0,0 +1,129 @@ +/******************************************************************************* +@File +@Title Common bridge header for cache +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for cache +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_CACHE_BRIDGE_H +#define COMMON_CACHE_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "cache_ops.h" + +#define PVRSRV_BRIDGE_CACHE_CMD_FIRST 0 +#define PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE PVRSRV_BRIDGE_CACHE_CMD_FIRST+0 +#define PVRSRV_BRIDGE_CACHE_CACHEOPEXEC PVRSRV_BRIDGE_CACHE_CMD_FIRST+1 +#define PVRSRV_BRIDGE_CACHE_CACHEOPLOG PVRSRV_BRIDGE_CACHE_CMD_FIRST+2 +#define PVRSRV_BRIDGE_CACHE_CMD_LAST (PVRSRV_BRIDGE_CACHE_CMD_FIRST+2) + +/******************************************* + CacheOpQueue + *******************************************/ + +/* Bridge in structure for CacheOpQueue */ +typedef struct PVRSRV_BRIDGE_IN_CACHEOPQUEUE_TAG +{ + PVRSRV_CACHE_OP *piuCacheOp; + IMG_UINT64 *pui64Address; + IMG_DEVMEM_OFFSET_T *puiOffset; + IMG_DEVMEM_SIZE_T *puiSize; + IMG_HANDLE *phPMR; + IMG_UINT32 ui32CurrentFenceSeqNum; + IMG_UINT32 ui32NumCacheOps; + IMG_UINT32 ui32OpTimeline; +} __packed PVRSRV_BRIDGE_IN_CACHEOPQUEUE; + +/* Bridge out structure for CacheOpQueue */ +typedef struct PVRSRV_BRIDGE_OUT_CACHEOPQUEUE_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32NextFenceSeqNum; +} __packed PVRSRV_BRIDGE_OUT_CACHEOPQUEUE; + +/******************************************* + CacheOpExec + *******************************************/ + +/* Bridge in structure for CacheOpExec */ +typedef struct PVRSRV_BRIDGE_IN_CACHEOPEXEC_TAG +{ + IMG_UINT64 ui64Address; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_SIZE_T uiSize; + IMG_HANDLE hPMR; + PVRSRV_CACHE_OP iuCacheOp; +} __packed PVRSRV_BRIDGE_IN_CACHEOPEXEC; + +/* Bridge out structure for CacheOpExec */ +typedef struct PVRSRV_BRIDGE_OUT_CACHEOPEXEC_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_CACHEOPEXEC; + +/******************************************* + CacheOpLog + *******************************************/ + +/* Bridge in structure for CacheOpLog */ +typedef struct PVRSRV_BRIDGE_IN_CACHEOPLOG_TAG +{ + IMG_INT64 i64ExecuteTimeUs; + IMG_INT64 i64QueuedTimeUs; + IMG_UINT64 ui64Address; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_SIZE_T uiSize; + IMG_HANDLE hPMR; + IMG_INT32 i32NumRBF; + PVRSRV_CACHE_OP iuCacheOp; +} __packed PVRSRV_BRIDGE_IN_CACHEOPLOG; + +/* Bridge out structure for CacheOpLog */ +typedef struct PVRSRV_BRIDGE_OUT_CACHEOPLOG_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_CACHEOPLOG; + +#endif /* COMMON_CACHE_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_cmm_bridge.h b/drivers/gpu/drm/phytium/octopus/common_cmm_bridge.h new file mode 100644 index 000000000000..569632df4861 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_cmm_bridge.h @@ -0,0 +1,114 @@ +/******************************************************************************* +@File +@Title Common bridge header for cmm +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for cmm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_CMM_BRIDGE_H +#define COMMON_CMM_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "devicemem_typedefs.h" + +#define PVRSRV_BRIDGE_CMM_CMD_FIRST 0 +#define PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX PVRSRV_BRIDGE_CMM_CMD_FIRST+0 +#define PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX PVRSRV_BRIDGE_CMM_CMD_FIRST+1 +#define PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX PVRSRV_BRIDGE_CMM_CMD_FIRST+2 +#define PVRSRV_BRIDGE_CMM_CMD_LAST (PVRSRV_BRIDGE_CMM_CMD_FIRST+2) + +/******************************************* + DevmemIntExportCtx + *******************************************/ + +/* Bridge in structure for DevmemIntExportCtx */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX_TAG +{ + IMG_HANDLE hContext; + IMG_HANDLE hPMR; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX; + +/* Bridge out structure for DevmemIntExportCtx */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX_TAG +{ + IMG_HANDLE hContextExport; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX; + +/******************************************* + DevmemIntUnexportCtx + *******************************************/ + +/* Bridge in structure for DevmemIntUnexportCtx */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX_TAG +{ + IMG_HANDLE hContextExport; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX; + +/* Bridge out structure for DevmemIntUnexportCtx */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX; + +/******************************************* + DevmemIntAcquireRemoteCtx + *******************************************/ + +/* Bridge in structure for DevmemIntAcquireRemoteCtx */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX_TAG +{ + IMG_HANDLE hPMR; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX; + +/* Bridge out structure for DevmemIntAcquireRemoteCtx */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX_TAG +{ + IMG_HANDLE hContext; + IMG_HANDLE hPrivData; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX; + +#endif /* COMMON_CMM_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_devicememhistory_bridge.h b/drivers/gpu/drm/phytium/octopus/common_devicememhistory_bridge.h new file mode 100644 index 000000000000..7c66354b534e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_devicememhistory_bridge.h @@ -0,0 +1,185 @@ +/******************************************************************************* +@File +@Title Common bridge header for devicememhistory +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for devicememhistory +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_DEVICEMEMHISTORY_BRIDGE_H +#define COMMON_DEVICEMEMHISTORY_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "img_types.h" +#include "img_defs.h" +#include "devicemem_typedefs.h" + +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST 0 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+0 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+1 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+2 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+3 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4 +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4) + +/******************************************* + DevicememHistoryMap + *******************************************/ + +/* Bridge in structure for DevicememHistoryMap */ +typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP_TAG +{ + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEVMEM_SIZE_T uiOffset; + IMG_DEVMEM_SIZE_T uiSize; + IMG_HANDLE hPMR; + const IMG_CHAR *puiText; + IMG_UINT32 ui32AllocationIndex; + IMG_UINT32 ui32Log2PageSize; +} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP; + +/* Bridge out structure for DevicememHistoryMap */ +typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32AllocationIndexOut; +} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP; + +/******************************************* + DevicememHistoryUnmap + *******************************************/ + +/* Bridge in structure for DevicememHistoryUnmap */ +typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP_TAG +{ + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEVMEM_SIZE_T uiOffset; + IMG_DEVMEM_SIZE_T uiSize; + IMG_HANDLE hPMR; + const IMG_CHAR *puiText; + IMG_UINT32 ui32AllocationIndex; + IMG_UINT32 ui32Log2PageSize; +} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP; + +/* Bridge out structure for DevicememHistoryUnmap */ +typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32AllocationIndexOut; +} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP; + +/******************************************* + DevicememHistoryMapVRange + *******************************************/ + +/* Bridge in structure for DevicememHistoryMapVRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE_TAG +{ + IMG_DEV_VIRTADDR sBaseDevVAddr; + IMG_DEVMEM_SIZE_T uiAllocSize; + const IMG_CHAR *puiText; + IMG_UINT32 ui32AllocationIndex; + IMG_UINT32 ui32Log2PageSize; + IMG_UINT32 ui32NumPages; + IMG_UINT32 ui32ui32StartPage; +} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE; + +/* Bridge out structure for DevicememHistoryMapVRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32AllocationIndexOut; +} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE; + +/******************************************* + DevicememHistoryUnmapVRange + *******************************************/ + +/* Bridge in structure for DevicememHistoryUnmapVRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE_TAG +{ + IMG_DEV_VIRTADDR sBaseDevVAddr; + IMG_DEVMEM_SIZE_T uiAllocSize; + const IMG_CHAR *puiText; + IMG_UINT32 ui32AllocationIndex; + IMG_UINT32 ui32Log2PageSize; + IMG_UINT32 ui32NumPages; + IMG_UINT32 ui32ui32StartPage; +} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE; + +/* Bridge out structure for DevicememHistoryUnmapVRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32AllocationIndexOut; +} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE; + +/******************************************* + DevicememHistorySparseChange + *******************************************/ + +/* Bridge in structure for DevicememHistorySparseChange */ +typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE_TAG +{ + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEVMEM_SIZE_T uiOffset; + IMG_DEVMEM_SIZE_T uiSize; + IMG_HANDLE hPMR; + IMG_UINT32 *pui32AllocPageIndices; + IMG_UINT32 *pui32FreePageIndices; + const IMG_CHAR *puiText; + IMG_UINT32 ui32AllocPageCount; + IMG_UINT32 ui32AllocationIndex; + IMG_UINT32 ui32FreePageCount; + IMG_UINT32 ui32Log2PageSize; +} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE; + +/* Bridge out structure for DevicememHistorySparseChange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32AllocationIndexOut; +} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE; + +#endif /* COMMON_DEVICEMEMHISTORY_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_di_bridge.h b/drivers/gpu/drm/phytium/octopus/common_di_bridge.h new file mode 100644 index 000000000000..40b6bed28c82 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_di_bridge.h @@ -0,0 +1,153 @@ +/******************************************************************************* +@File +@Title Common bridge header for di +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for di +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_DI_BRIDGE_H +#define COMMON_DI_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "pvrsrv_tlcommon.h" +#include "pvr_dicommon.h" + +#define PVRSRV_BRIDGE_DI_CMD_FIRST 0 +#define PVRSRV_BRIDGE_DI_DICREATECONTEXT PVRSRV_BRIDGE_DI_CMD_FIRST+0 +#define PVRSRV_BRIDGE_DI_DIDESTROYCONTEXT PVRSRV_BRIDGE_DI_CMD_FIRST+1 +#define PVRSRV_BRIDGE_DI_DIREADENTRY PVRSRV_BRIDGE_DI_CMD_FIRST+2 +#define PVRSRV_BRIDGE_DI_DIWRITEENTRY PVRSRV_BRIDGE_DI_CMD_FIRST+3 +#define PVRSRV_BRIDGE_DI_DILISTALLENTRIES PVRSRV_BRIDGE_DI_CMD_FIRST+4 +#define PVRSRV_BRIDGE_DI_CMD_LAST (PVRSRV_BRIDGE_DI_CMD_FIRST+4) + +/******************************************* + DICreateContext + *******************************************/ + +/* Bridge in structure for DICreateContext */ +typedef struct PVRSRV_BRIDGE_IN_DICREATECONTEXT_TAG +{ + IMG_CHAR *puiStreamName; +} __packed PVRSRV_BRIDGE_IN_DICREATECONTEXT; + +/* Bridge out structure for DICreateContext */ +typedef struct PVRSRV_BRIDGE_OUT_DICREATECONTEXT_TAG +{ + IMG_HANDLE hContext; + IMG_CHAR *puiStreamName; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DICREATECONTEXT; + +/******************************************* + DIDestroyContext + *******************************************/ + +/* Bridge in structure for DIDestroyContext */ +typedef struct PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT_TAG +{ + IMG_HANDLE hContext; +} __packed PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT; + +/* Bridge out structure for DIDestroyContext */ +typedef struct PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT; + +/******************************************* + DIReadEntry + *******************************************/ + +/* Bridge in structure for DIReadEntry */ +typedef struct PVRSRV_BRIDGE_IN_DIREADENTRY_TAG +{ + IMG_UINT64 ui64Offset; + IMG_UINT64 ui64Size; + IMG_HANDLE hContext; + const IMG_CHAR *puiEntryPath; +} __packed PVRSRV_BRIDGE_IN_DIREADENTRY; + +/* Bridge out structure for DIReadEntry */ +typedef struct PVRSRV_BRIDGE_OUT_DIREADENTRY_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DIREADENTRY; + +/******************************************* + DIWriteEntry + *******************************************/ + +/* Bridge in structure for DIWriteEntry */ +typedef struct PVRSRV_BRIDGE_IN_DIWRITEENTRY_TAG +{ + IMG_UINT64 ui64ValueSize; + IMG_HANDLE hContext; + const IMG_CHAR *puiEntryPath; + const IMG_CHAR *puiValue; +} __packed PVRSRV_BRIDGE_IN_DIWRITEENTRY; + +/* Bridge out structure for DIWriteEntry */ +typedef struct PVRSRV_BRIDGE_OUT_DIWRITEENTRY_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DIWRITEENTRY; + +/******************************************* + DIListAllEntries + *******************************************/ + +/* Bridge in structure for DIListAllEntries */ +typedef struct PVRSRV_BRIDGE_IN_DILISTALLENTRIES_TAG +{ + IMG_HANDLE hContext; +} __packed PVRSRV_BRIDGE_IN_DILISTALLENTRIES; + +/* Bridge out structure for DIListAllEntries */ +typedef struct PVRSRV_BRIDGE_OUT_DILISTALLENTRIES_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DILISTALLENTRIES; + +#endif /* COMMON_DI_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_dma_bridge.h b/drivers/gpu/drm/phytium/octopus/common_dma_bridge.h new file mode 100644 index 000000000000..57a881c64676 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_dma_bridge.h @@ -0,0 +1,123 @@ +/******************************************************************************* +@File +@Title Common bridge header for dma +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for dma +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_DMA_BRIDGE_H +#define COMMON_DMA_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "pvrsrv_sync_km.h" + +#define PVRSRV_BRIDGE_DMA_CMD_FIRST 0 +#define PVRSRV_BRIDGE_DMA_DMATRANSFER PVRSRV_BRIDGE_DMA_CMD_FIRST+0 +#define PVRSRV_BRIDGE_DMA_DMASPARSEMAPPINGTABLE PVRSRV_BRIDGE_DMA_CMD_FIRST+1 +#define PVRSRV_BRIDGE_DMA_DMADEVICEPARAMS PVRSRV_BRIDGE_DMA_CMD_FIRST+2 +#define PVRSRV_BRIDGE_DMA_CMD_LAST (PVRSRV_BRIDGE_DMA_CMD_FIRST+2) + +/******************************************* + DmaTransfer + *******************************************/ + +/* Bridge in structure for DmaTransfer */ +typedef struct PVRSRV_BRIDGE_IN_DMATRANSFER_TAG +{ + IMG_UINT64 *pui64Address; + IMG_DEVMEM_OFFSET_T *puiOffset; + IMG_DEVMEM_SIZE_T *puiSize; + IMG_HANDLE *phPMR; + PVRSRV_TIMELINE hUpdateTimeline; + IMG_UINT32 ui32NumDMAs; + IMG_UINT32 ui32uiFlags; +} __packed PVRSRV_BRIDGE_IN_DMATRANSFER; + +/* Bridge out structure for DmaTransfer */ +typedef struct PVRSRV_BRIDGE_OUT_DMATRANSFER_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DMATRANSFER; + +/******************************************* + DmaSparseMappingTable + *******************************************/ + +/* Bridge in structure for DmaSparseMappingTable */ +typedef struct PVRSRV_BRIDGE_IN_DMASPARSEMAPPINGTABLE_TAG +{ + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_HANDLE hPMR; + IMG_BOOL *pbTable; + IMG_UINT32 ui32SizeInPages; +} __packed PVRSRV_BRIDGE_IN_DMASPARSEMAPPINGTABLE; + +/* Bridge out structure for DmaSparseMappingTable */ +typedef struct PVRSRV_BRIDGE_OUT_DMASPARSEMAPPINGTABLE_TAG +{ + IMG_BOOL *pbTable; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DMASPARSEMAPPINGTABLE; + +/******************************************* + DmaDeviceParams + *******************************************/ + +/* Bridge in structure for DmaDeviceParams */ +typedef struct PVRSRV_BRIDGE_IN_DMADEVICEPARAMS_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_DMADEVICEPARAMS; + +/* Bridge out structure for DmaDeviceParams */ +typedef struct PVRSRV_BRIDGE_OUT_DMADEVICEPARAMS_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32DmaBuffAlign; + IMG_UINT32 ui32DmaTransferMult; +} __packed PVRSRV_BRIDGE_OUT_DMADEVICEPARAMS; + +#endif /* COMMON_DMA_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_dmabuf_bridge.h b/drivers/gpu/drm/phytium/octopus/common_dmabuf_bridge.h new file mode 100644 index 000000000000..853816a1e9bb --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_dmabuf_bridge.h @@ -0,0 +1,127 @@ +/******************************************************************************* +@File +@Title Common bridge header for dmabuf +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for dmabuf +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_DMABUF_BRIDGE_H +#define COMMON_DMABUF_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "pvrsrv_memallocflags.h" + +#define PVRSRV_BRIDGE_DMABUF_CMD_FIRST 0 +#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+0 +#define PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1 +#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2 +#define PVRSRV_BRIDGE_DMABUF_CMD_LAST (PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2) + +/******************************************* + PhysmemImportDmaBuf + *******************************************/ + +/* Bridge in structure for PhysmemImportDmaBuf */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF_TAG +{ + const IMG_CHAR *puiName; + IMG_INT ifd; + IMG_UINT32 ui32NameSize; + PVRSRV_MEMALLOCFLAGS_T uiFlags; +} __packed PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF; + +/* Bridge out structure for PhysmemImportDmaBuf */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF_TAG +{ + IMG_DEVMEM_ALIGN_T uiAlign; + IMG_DEVMEM_SIZE_T uiSize; + IMG_HANDLE hPMRPtr; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF; + +/******************************************* + PhysmemExportDmaBuf + *******************************************/ + +/* Bridge in structure for PhysmemExportDmaBuf */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF_TAG +{ + IMG_HANDLE hPMR; +} __packed PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF; + +/* Bridge out structure for PhysmemExportDmaBuf */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF_TAG +{ + PVRSRV_ERROR eError; + IMG_INT iFd; +} __packed PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF; + +/******************************************* + PhysmemImportSparseDmaBuf + *******************************************/ + +/* Bridge in structure for PhysmemImportSparseDmaBuf */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF_TAG +{ + IMG_DEVMEM_SIZE_T uiChunkSize; + IMG_UINT32 *pui32MappingTable; + const IMG_CHAR *puiName; + IMG_INT ifd; + IMG_UINT32 ui32NameSize; + IMG_UINT32 ui32NumPhysChunks; + IMG_UINT32 ui32NumVirtChunks; + PVRSRV_MEMALLOCFLAGS_T uiFlags; +} __packed PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF; + +/* Bridge out structure for PhysmemImportSparseDmaBuf */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF_TAG +{ + IMG_DEVMEM_ALIGN_T uiAlign; + IMG_DEVMEM_SIZE_T uiSize; + IMG_HANDLE hPMRPtr; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF; + +#endif /* COMMON_DMABUF_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_htbuffer_bridge.h b/drivers/gpu/drm/phytium/octopus/common_htbuffer_bridge.h new file mode 100644 index 000000000000..35b779f7440b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_htbuffer_bridge.h @@ -0,0 +1,104 @@ +/******************************************************************************* +@File +@Title Common bridge header for htbuffer +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for htbuffer +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_HTBUFFER_BRIDGE_H +#define COMMON_HTBUFFER_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "devicemem_typedefs.h" +#include "htbuffer_types.h" + +#define PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST 0 +#define PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0 +#define PVRSRV_BRIDGE_HTBUFFER_HTBLOG PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1 +#define PVRSRV_BRIDGE_HTBUFFER_CMD_LAST (PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1) + +/******************************************* + HTBControl + *******************************************/ + +/* Bridge in structure for HTBControl */ +typedef struct PVRSRV_BRIDGE_IN_HTBCONTROL_TAG +{ + IMG_UINT32 *pui32GroupEnable; + IMG_UINT32 ui32EnablePID; + IMG_UINT32 ui32LogLevel; + IMG_UINT32 ui32LogMode; + IMG_UINT32 ui32NumGroups; + IMG_UINT32 ui32OpMode; +} __packed PVRSRV_BRIDGE_IN_HTBCONTROL; + +/* Bridge out structure for HTBControl */ +typedef struct PVRSRV_BRIDGE_OUT_HTBCONTROL_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_HTBCONTROL; + +/******************************************* + HTBLog + *******************************************/ + +/* Bridge in structure for HTBLog */ +typedef struct PVRSRV_BRIDGE_IN_HTBLOG_TAG +{ + IMG_UINT64 ui64TimeStamp; + IMG_UINT32 *pui32Args; + IMG_UINT32 ui32NumArgs; + IMG_UINT32 ui32PID; + IMG_UINT32 ui32SF; + IMG_UINT32 ui32TID; +} __packed PVRSRV_BRIDGE_IN_HTBLOG; + +/* Bridge out structure for HTBLog */ +typedef struct PVRSRV_BRIDGE_OUT_HTBLOG_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_HTBLOG; + +#endif /* COMMON_HTBUFFER_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_mm_bridge.h b/drivers/gpu/drm/phytium/octopus/common_mm_bridge.h new file mode 100644 index 000000000000..98898204aee4 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_mm_bridge.h @@ -0,0 +1,781 @@ +/******************************************************************************* +@File +@Title Common bridge header for mm +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for mm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_MM_BRIDGE_H +#define COMMON_MM_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "pvrsrv_memallocflags.h" +#include "devicemem_typedefs.h" + +#define PVRSRV_BRIDGE_MM_CMD_FIRST 0 +#define PVRSRV_BRIDGE_MM_PMREXPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+0 +#define PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+1 +#define PVRSRV_BRIDGE_MM_PMRGETUID PVRSRV_BRIDGE_MM_CMD_FIRST+2 +#define PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE PVRSRV_BRIDGE_MM_CMD_FIRST+3 +#define PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE PVRSRV_BRIDGE_MM_CMD_FIRST+4 +#define PVRSRV_BRIDGE_MM_PMRIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+5 +#define PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+6 +#define PVRSRV_BRIDGE_MM_PMRUNREFPMR PVRSRV_BRIDGE_MM_CMD_FIRST+7 +#define PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR PVRSRV_BRIDGE_MM_CMD_FIRST+8 +#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+9 +#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+10 +#define PVRSRV_BRIDGE_MM_DEVMEMINTPIN PVRSRV_BRIDGE_MM_CMD_FIRST+11 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN PVRSRV_BRIDGE_MM_CMD_FIRST+12 +#define PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE PVRSRV_BRIDGE_MM_CMD_FIRST+13 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE PVRSRV_BRIDGE_MM_CMD_FIRST+14 +#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+15 +#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+16 +#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+17 +#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+18 +#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+19 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+20 +#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+21 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+22 +#define PVRSRV_BRIDGE_MM_CHANGESPARSEMEM PVRSRV_BRIDGE_MM_CMD_FIRST+23 +#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+24 +#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+25 +#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID PVRSRV_BRIDGE_MM_CMD_FIRST+26 +#define PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE PVRSRV_BRIDGE_MM_CMD_FIRST+27 +#define PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE PVRSRV_BRIDGE_MM_CMD_FIRST+28 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+29 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+30 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME PVRSRV_BRIDGE_MM_CMD_FIRST+31 +#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS PVRSRV_BRIDGE_MM_CMD_FIRST+32 +#define PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM PVRSRV_BRIDGE_MM_CMD_FIRST+33 +#define PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE PVRSRV_BRIDGE_MM_CMD_FIRST+34 +#define PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS PVRSRV_BRIDGE_MM_CMD_FIRST+35 +#define PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS PVRSRV_BRIDGE_MM_CMD_FIRST+36 +#define PVRSRV_BRIDGE_MM_CMD_LAST (PVRSRV_BRIDGE_MM_CMD_FIRST+36) + +/******************************************* + PMRExportPMR + *******************************************/ + +/* Bridge in structure for PMRExportPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMREXPORTPMR_TAG +{ + IMG_HANDLE hPMR; +} __packed PVRSRV_BRIDGE_IN_PMREXPORTPMR; + +/* Bridge out structure for PMRExportPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMREXPORTPMR_TAG +{ + IMG_UINT64 ui64Password; + IMG_UINT64 ui64Size; + IMG_HANDLE hPMRExport; + PVRSRV_ERROR eError; + IMG_UINT32 ui32Log2Contig; +} __packed PVRSRV_BRIDGE_OUT_PMREXPORTPMR; + +/******************************************* + PMRUnexportPMR + *******************************************/ + +/* Bridge in structure for PMRUnexportPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR_TAG +{ + IMG_HANDLE hPMRExport; +} __packed PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR; + +/* Bridge out structure for PMRUnexportPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR; + +/******************************************* + PMRGetUID + *******************************************/ + +/* Bridge in structure for PMRGetUID */ +typedef struct PVRSRV_BRIDGE_IN_PMRGETUID_TAG +{ + IMG_HANDLE hPMR; +} __packed PVRSRV_BRIDGE_IN_PMRGETUID; + +/* Bridge out structure for PMRGetUID */ +typedef struct PVRSRV_BRIDGE_OUT_PMRGETUID_TAG +{ + IMG_UINT64 ui64UID; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_PMRGETUID; + +/******************************************* + PMRMakeLocalImportHandle + *******************************************/ + +/* Bridge in structure for PMRMakeLocalImportHandle */ +typedef struct PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE_TAG +{ + IMG_HANDLE hBuffer; +} __packed PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE; + +/* Bridge out structure for PMRMakeLocalImportHandle */ +typedef struct PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE_TAG +{ + IMG_HANDLE hExtMem; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE; + +/******************************************* + PMRUnmakeLocalImportHandle + *******************************************/ + +/* Bridge in structure for PMRUnmakeLocalImportHandle */ +typedef struct PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE_TAG +{ + IMG_HANDLE hExtMem; +} __packed PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE; + +/* Bridge out structure for PMRUnmakeLocalImportHandle */ +typedef struct PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE; + +/******************************************* + PMRImportPMR + *******************************************/ + +/* Bridge in structure for PMRImportPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMRIMPORTPMR_TAG +{ + IMG_UINT64 ui64uiPassword; + IMG_UINT64 ui64uiSize; + IMG_HANDLE hPMRExport; + IMG_UINT32 ui32uiLog2Contig; +} __packed PVRSRV_BRIDGE_IN_PMRIMPORTPMR; + +/* Bridge out structure for PMRImportPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMRIMPORTPMR_TAG +{ + IMG_HANDLE hPMR; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_PMRIMPORTPMR; + +/******************************************* + PMRLocalImportPMR + *******************************************/ + +/* Bridge in structure for PMRLocalImportPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR_TAG +{ + IMG_HANDLE hExtHandle; +} __packed PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR; + +/* Bridge out structure for PMRLocalImportPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR_TAG +{ + IMG_DEVMEM_ALIGN_T uiAlign; + IMG_DEVMEM_SIZE_T uiSize; + IMG_HANDLE hPMR; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR; + +/******************************************* + PMRUnrefPMR + *******************************************/ + +/* Bridge in structure for PMRUnrefPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMRUNREFPMR_TAG +{ + IMG_HANDLE hPMR; +} __packed PVRSRV_BRIDGE_IN_PMRUNREFPMR; + +/* Bridge out structure for PMRUnrefPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFPMR_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_PMRUNREFPMR; + +/******************************************* + PMRUnrefUnlockPMR + *******************************************/ + +/* Bridge in structure for PMRUnrefUnlockPMR */ +typedef struct PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR_TAG +{ + IMG_HANDLE hPMR; +} __packed PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR; + +/* Bridge out structure for PMRUnrefUnlockPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR; + +/******************************************* + PhysmemNewRamBackedPMR + *******************************************/ + +/* Bridge in structure for PhysmemNewRamBackedPMR */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR_TAG +{ + IMG_DEVMEM_SIZE_T uiChunkSize; + IMG_DEVMEM_SIZE_T uiSize; + IMG_UINT32 *pui32MappingTable; + const IMG_CHAR *puiAnnotation; + IMG_UINT32 ui32AnnotationLength; + IMG_UINT32 ui32Log2PageSize; + IMG_UINT32 ui32NumPhysChunks; + IMG_UINT32 ui32NumVirtChunks; + IMG_UINT32 ui32PDumpFlags; + IMG_PID ui32PID; + PVRSRV_MEMALLOCFLAGS_T uiFlags; +} __packed PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR; + +/* Bridge out structure for PhysmemNewRamBackedPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR_TAG +{ + IMG_HANDLE hPMRPtr; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR; + +/******************************************* + PhysmemNewRamBackedLockedPMR + *******************************************/ + +/* Bridge in structure for PhysmemNewRamBackedLockedPMR */ +typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG +{ + IMG_DEVMEM_SIZE_T uiChunkSize; + IMG_DEVMEM_SIZE_T uiSize; + IMG_UINT32 *pui32MappingTable; + const IMG_CHAR *puiAnnotation; + IMG_UINT32 ui32AnnotationLength; + IMG_UINT32 ui32Log2PageSize; + IMG_UINT32 ui32NumPhysChunks; + IMG_UINT32 ui32NumVirtChunks; + IMG_UINT32 ui32PDumpFlags; + IMG_PID ui32PID; + PVRSRV_MEMALLOCFLAGS_T uiFlags; +} __packed PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR; + +/* Bridge out structure for PhysmemNewRamBackedLockedPMR */ +typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG +{ + IMG_HANDLE hPMRPtr; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR; + +/******************************************* + DevmemIntPin + *******************************************/ + +/* Bridge in structure for DevmemIntPin */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPIN_TAG +{ + IMG_HANDLE hPMR; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTPIN; + +/* Bridge out structure for DevmemIntPin */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPIN_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTPIN; + +/******************************************* + DevmemIntUnpin + *******************************************/ + +/* Bridge in structure for DevmemIntUnpin */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN_TAG +{ + IMG_HANDLE hPMR; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN; + +/* Bridge out structure for DevmemIntUnpin */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN; + +/******************************************* + DevmemIntPinValidate + *******************************************/ + +/* Bridge in structure for DevmemIntPinValidate */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE_TAG +{ + IMG_HANDLE hMapping; + IMG_HANDLE hPMR; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE; + +/* Bridge out structure for DevmemIntPinValidate */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE; + +/******************************************* + DevmemIntUnpinInvalidate + *******************************************/ + +/* Bridge in structure for DevmemIntUnpinInvalidate */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE_TAG +{ + IMG_HANDLE hMapping; + IMG_HANDLE hPMR; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE; + +/* Bridge out structure for DevmemIntUnpinInvalidate */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE; + +/******************************************* + DevmemIntCtxCreate + *******************************************/ + +/* Bridge in structure for DevmemIntCtxCreate */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE_TAG +{ + IMG_BOOL bbKernelMemoryCtx; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE; + +/* Bridge out structure for DevmemIntCtxCreate */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE_TAG +{ + IMG_HANDLE hDevMemServerContext; + IMG_HANDLE hPrivData; + PVRSRV_ERROR eError; + IMG_UINT32 ui32CPUCacheLineSize; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE; + +/******************************************* + DevmemIntCtxDestroy + *******************************************/ + +/* Bridge in structure for DevmemIntCtxDestroy */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY_TAG +{ + IMG_HANDLE hDevmemServerContext; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY; + +/* Bridge out structure for DevmemIntCtxDestroy */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY; + +/******************************************* + DevmemIntHeapCreate + *******************************************/ + +/* Bridge in structure for DevmemIntHeapCreate */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE_TAG +{ + IMG_DEV_VIRTADDR sHeapBaseAddr; + IMG_DEVMEM_SIZE_T uiHeapLength; + IMG_HANDLE hDevmemCtx; + IMG_UINT32 ui32Log2DataPageSize; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE; + +/* Bridge out structure for DevmemIntHeapCreate */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE_TAG +{ + IMG_HANDLE hDevmemHeapPtr; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE; + +/******************************************* + DevmemIntHeapDestroy + *******************************************/ + +/* Bridge in structure for DevmemIntHeapDestroy */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY_TAG +{ + IMG_HANDLE hDevmemHeap; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY; + +/* Bridge out structure for DevmemIntHeapDestroy */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY; + +/******************************************* + DevmemIntMapPMR + *******************************************/ + +/* Bridge in structure for DevmemIntMapPMR */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR_TAG +{ + IMG_HANDLE hDevmemServerHeap; + IMG_HANDLE hPMR; + IMG_HANDLE hReservation; + PVRSRV_MEMALLOCFLAGS_T uiMapFlags; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR; + +/* Bridge out structure for DevmemIntMapPMR */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR_TAG +{ + IMG_HANDLE hMapping; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR; + +/******************************************* + DevmemIntUnmapPMR + *******************************************/ + +/* Bridge in structure for DevmemIntUnmapPMR */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR_TAG +{ + IMG_HANDLE hMapping; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR; + +/* Bridge out structure for DevmemIntUnmapPMR */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR; + +/******************************************* + DevmemIntReserveRange + *******************************************/ + +/* Bridge in structure for DevmemIntReserveRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE_TAG +{ + IMG_DEV_VIRTADDR sAddress; + IMG_DEVMEM_SIZE_T uiLength; + IMG_HANDLE hDevmemServerHeap; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE; + +/* Bridge out structure for DevmemIntReserveRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE_TAG +{ + IMG_HANDLE hReservation; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE; + +/******************************************* + DevmemIntUnreserveRange + *******************************************/ + +/* Bridge in structure for DevmemIntUnreserveRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE_TAG +{ + IMG_HANDLE hReservation; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE; + +/* Bridge out structure for DevmemIntUnreserveRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE; + +/******************************************* + ChangeSparseMem + *******************************************/ + +/* Bridge in structure for ChangeSparseMem */ +typedef struct PVRSRV_BRIDGE_IN_CHANGESPARSEMEM_TAG +{ + IMG_DEV_VIRTADDR sDevVAddr; + IMG_UINT64 ui64CPUVAddr; + IMG_HANDLE hPMR; + IMG_HANDLE hSrvDevMemHeap; + IMG_UINT32 *pui32AllocPageIndices; + IMG_UINT32 *pui32FreePageIndices; + IMG_UINT32 ui32AllocPageCount; + IMG_UINT32 ui32FreePageCount; + IMG_UINT32 ui32SparseFlags; + PVRSRV_MEMALLOCFLAGS_T uiFlags; +} __packed PVRSRV_BRIDGE_IN_CHANGESPARSEMEM; + +/* Bridge out structure for ChangeSparseMem */ +typedef struct PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM; + +/******************************************* + DevmemIntMapPages + *******************************************/ + +/* Bridge in structure for DevmemIntMapPages */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES_TAG +{ + IMG_DEV_VIRTADDR sDevVAddr; + IMG_HANDLE hPMR; + IMG_HANDLE hReservation; + IMG_UINT32 ui32PageCount; + IMG_UINT32 ui32PhysicalPgOffset; + PVRSRV_MEMALLOCFLAGS_T uiFlags; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES; + +/* Bridge out structure for DevmemIntMapPages */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES; + +/******************************************* + DevmemIntUnmapPages + *******************************************/ + +/* Bridge in structure for DevmemIntUnmapPages */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES_TAG +{ + IMG_DEV_VIRTADDR sDevVAddr; + IMG_HANDLE hReservation; + IMG_UINT32 ui32PageCount; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES; + +/* Bridge out structure for DevmemIntUnmapPages */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES; + +/******************************************* + DevmemIsVDevAddrValid + *******************************************/ + +/* Bridge in structure for DevmemIsVDevAddrValid */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID_TAG +{ + IMG_DEV_VIRTADDR sAddress; + IMG_HANDLE hDevmemCtx; +} __packed PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID; + +/* Bridge out structure for DevmemIsVDevAddrValid */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID; + +/******************************************* + DevmemFlushDevSLCRange + *******************************************/ + +/* Bridge in structure for DevmemFlushDevSLCRange */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE_TAG +{ + IMG_DEV_VIRTADDR sAddress; + IMG_DEVMEM_SIZE_T uiSize; + IMG_HANDLE hDevmemCtx; + IMG_BOOL bInvalidate; +} __packed PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE; + +/* Bridge out structure for DevmemFlushDevSLCRange */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE; + +/******************************************* + DevmemInvalidateFBSCTable + *******************************************/ + +/* Bridge in structure for DevmemInvalidateFBSCTable */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE_TAG +{ + IMG_UINT64 ui64FBSCEntries; + IMG_HANDLE hDevmemCtx; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE; + +/* Bridge out structure for DevmemInvalidateFBSCTable */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE; + +/******************************************* + HeapCfgHeapConfigCount + *******************************************/ + +/* Bridge in structure for HeapCfgHeapConfigCount */ +typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT; + +/* Bridge out structure for HeapCfgHeapConfigCount */ +typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32NumHeapConfigs; +} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT; + +/******************************************* + HeapCfgHeapCount + *******************************************/ + +/* Bridge in structure for HeapCfgHeapCount */ +typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT_TAG +{ + IMG_UINT32 ui32HeapConfigIndex; +} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT; + +/* Bridge out structure for HeapCfgHeapCount */ +typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32NumHeaps; +} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT; + +/******************************************* + HeapCfgHeapConfigName + *******************************************/ + +/* Bridge in structure for HeapCfgHeapConfigName */ +typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME_TAG +{ + IMG_CHAR *puiHeapConfigName; + IMG_UINT32 ui32HeapConfigIndex; + IMG_UINT32 ui32HeapConfigNameBufSz; +} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME; + +/* Bridge out structure for HeapCfgHeapConfigName */ +typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME_TAG +{ + IMG_CHAR *puiHeapConfigName; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME; + +/******************************************* + HeapCfgHeapDetails + *******************************************/ + +/* Bridge in structure for HeapCfgHeapDetails */ +typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS_TAG +{ + IMG_CHAR *puiHeapNameOut; + IMG_UINT32 ui32HeapConfigIndex; + IMG_UINT32 ui32HeapIndex; + IMG_UINT32 ui32HeapNameBufSz; +} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS; + +/* Bridge out structure for HeapCfgHeapDetails */ +typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS_TAG +{ + IMG_DEV_VIRTADDR sDevVAddrBase; + IMG_DEVMEM_SIZE_T uiHeapLength; + IMG_DEVMEM_SIZE_T uiReservedRegionLength; + IMG_CHAR *puiHeapNameOut; + PVRSRV_ERROR eError; + IMG_UINT32 ui32Log2DataPageSizeOut; + IMG_UINT32 ui32Log2ImportAlignmentOut; +} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS; + +/******************************************* + DevmemIntRegisterPFNotifyKM + *******************************************/ + +/* Bridge in structure for DevmemIntRegisterPFNotifyKM */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM_TAG +{ + IMG_HANDLE hDevmemCtx; + IMG_BOOL bRegister; + IMG_UINT32 ui32PID; +} __packed PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM; + +/* Bridge out structure for DevmemIntRegisterPFNotifyKM */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM; + +/******************************************* + GetMaxDevMemSize + *******************************************/ + +/* Bridge in structure for GetMaxDevMemSize */ +typedef struct PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE; + +/* Bridge out structure for GetMaxDevMemSize */ +typedef struct PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE_TAG +{ + IMG_DEVMEM_SIZE_T uiLMASize; + IMG_DEVMEM_SIZE_T uiUMASize; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE; + +/******************************************* + DevmemGetFaultAddress + *******************************************/ + +/* Bridge in structure for DevmemGetFaultAddress */ +typedef struct PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS_TAG +{ + IMG_HANDLE hDevmemCtx; +} __packed PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS; + +/* Bridge out structure for DevmemGetFaultAddress */ +typedef struct PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS_TAG +{ + IMG_DEV_VIRTADDR sFaultAddress; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS; + +/******************************************* + PVRSRVUpdateOOMStats + *******************************************/ + +/* Bridge in structure for PVRSRVUpdateOOMStats */ +typedef struct PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS_TAG +{ + IMG_PID ui32pid; + IMG_UINT32 ui32ui32StatType; +} __packed PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS; + +/* Bridge out structure for PVRSRVUpdateOOMStats */ +typedef struct PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS; + +#endif /* COMMON_MM_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_pvrtl_bridge.h b/drivers/gpu/drm/phytium/octopus/common_pvrtl_bridge.h new file mode 100644 index 000000000000..cd14d683da24 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_pvrtl_bridge.h @@ -0,0 +1,214 @@ +/******************************************************************************* +@File +@Title Common bridge header for pvrtl +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for pvrtl +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_PVRTL_BRIDGE_H +#define COMMON_PVRTL_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "devicemem_typedefs.h" +#include "pvrsrv_tlcommon.h" + +#define PVRSRV_BRIDGE_PVRTL_CMD_FIRST 0 +#define PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+0 +#define PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+1 +#define PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+2 +#define PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+3 +#define PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS PVRSRV_BRIDGE_PVRTL_CMD_FIRST+4 +#define PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+5 +#define PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+6 +#define PVRSRV_BRIDGE_PVRTL_TLWRITEDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7 +#define PVRSRV_BRIDGE_PVRTL_CMD_LAST (PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7) + +/******************************************* + TLOpenStream + *******************************************/ + +/* Bridge in structure for TLOpenStream */ +typedef struct PVRSRV_BRIDGE_IN_TLOPENSTREAM_TAG +{ + const IMG_CHAR *puiName; + IMG_UINT32 ui32Mode; +} __packed PVRSRV_BRIDGE_IN_TLOPENSTREAM; + +/* Bridge out structure for TLOpenStream */ +typedef struct PVRSRV_BRIDGE_OUT_TLOPENSTREAM_TAG +{ + IMG_HANDLE hSD; + IMG_HANDLE hTLPMR; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_TLOPENSTREAM; + +/******************************************* + TLCloseStream + *******************************************/ + +/* Bridge in structure for TLCloseStream */ +typedef struct PVRSRV_BRIDGE_IN_TLCLOSESTREAM_TAG +{ + IMG_HANDLE hSD; +} __packed PVRSRV_BRIDGE_IN_TLCLOSESTREAM; + +/* Bridge out structure for TLCloseStream */ +typedef struct PVRSRV_BRIDGE_OUT_TLCLOSESTREAM_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_TLCLOSESTREAM; + +/******************************************* + TLAcquireData + *******************************************/ + +/* Bridge in structure for TLAcquireData */ +typedef struct PVRSRV_BRIDGE_IN_TLACQUIREDATA_TAG +{ + IMG_HANDLE hSD; +} __packed PVRSRV_BRIDGE_IN_TLACQUIREDATA; + +/* Bridge out structure for TLAcquireData */ +typedef struct PVRSRV_BRIDGE_OUT_TLACQUIREDATA_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32ReadLen; + IMG_UINT32 ui32ReadOffset; +} __packed PVRSRV_BRIDGE_OUT_TLACQUIREDATA; + +/******************************************* + TLReleaseData + *******************************************/ + +/* Bridge in structure for TLReleaseData */ +typedef struct PVRSRV_BRIDGE_IN_TLRELEASEDATA_TAG +{ + IMG_HANDLE hSD; + IMG_UINT32 ui32ReadLen; + IMG_UINT32 ui32ReadOffset; +} __packed PVRSRV_BRIDGE_IN_TLRELEASEDATA; + +/* Bridge out structure for TLReleaseData */ +typedef struct PVRSRV_BRIDGE_OUT_TLRELEASEDATA_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_TLRELEASEDATA; + +/******************************************* + TLDiscoverStreams + *******************************************/ + +/* Bridge in structure for TLDiscoverStreams */ +typedef struct PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS_TAG +{ + const IMG_CHAR *puiNamePattern; + IMG_CHAR *puiStreams; + IMG_UINT32 ui32Size; +} __packed PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS; + +/* Bridge out structure for TLDiscoverStreams */ +typedef struct PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS_TAG +{ + IMG_CHAR *puiStreams; + PVRSRV_ERROR eError; + IMG_UINT32 ui32NumFound; +} __packed PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS; + +/******************************************* + TLReserveStream + *******************************************/ + +/* Bridge in structure for TLReserveStream */ +typedef struct PVRSRV_BRIDGE_IN_TLRESERVESTREAM_TAG +{ + IMG_HANDLE hSD; + IMG_UINT32 ui32Size; + IMG_UINT32 ui32SizeMin; +} __packed PVRSRV_BRIDGE_IN_TLRESERVESTREAM; + +/* Bridge out structure for TLReserveStream */ +typedef struct PVRSRV_BRIDGE_OUT_TLRESERVESTREAM_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32Available; + IMG_UINT32 ui32BufferOffset; +} __packed PVRSRV_BRIDGE_OUT_TLRESERVESTREAM; + +/******************************************* + TLCommitStream + *******************************************/ + +/* Bridge in structure for TLCommitStream */ +typedef struct PVRSRV_BRIDGE_IN_TLCOMMITSTREAM_TAG +{ + IMG_HANDLE hSD; + IMG_UINT32 ui32ReqSize; +} __packed PVRSRV_BRIDGE_IN_TLCOMMITSTREAM; + +/* Bridge out structure for TLCommitStream */ +typedef struct PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM; + +/******************************************* + TLWriteData + *******************************************/ + +/* Bridge in structure for TLWriteData */ +typedef struct PVRSRV_BRIDGE_IN_TLWRITEDATA_TAG +{ + IMG_HANDLE hSD; + IMG_BYTE *pui8Data; + IMG_UINT32 ui32Size; +} __packed PVRSRV_BRIDGE_IN_TLWRITEDATA; + +/* Bridge out structure for TLWriteData */ +typedef struct PVRSRV_BRIDGE_OUT_TLWRITEDATA_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_TLWRITEDATA; + +#endif /* COMMON_PVRTL_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_rgxbreakpoint_bridge.h b/drivers/gpu/drm/phytium/octopus/common_rgxbreakpoint_bridge.h new file mode 100644 index 000000000000..ba6c1d6492a8 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_rgxbreakpoint_bridge.h @@ -0,0 +1,149 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxbreakpoint +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxbreakpoint +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXBREAKPOINT_BRIDGE_H +#define COMMON_RGXBREAKPOINT_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" + +#define PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_LAST (PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+4) + +/******************************************* + RGXSetBreakpoint + *******************************************/ + +/* Bridge in structure for RGXSetBreakpoint */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT_TAG +{ + IMG_HANDLE hPrivData; + IMG_UINT32 eFWDataMaster; + IMG_UINT32 ui32BreakpointAddr; + IMG_UINT32 ui32DM; + IMG_UINT32 ui32HandlerAddr; +} __packed PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT; + +/* Bridge out structure for RGXSetBreakpoint */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT; + +/******************************************* + RGXClearBreakpoint + *******************************************/ + +/* Bridge in structure for RGXClearBreakpoint */ +typedef struct PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT_TAG +{ + IMG_HANDLE hPrivData; +} __packed PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT; + +/* Bridge out structure for RGXClearBreakpoint */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT; + +/******************************************* + RGXEnableBreakpoint + *******************************************/ + +/* Bridge in structure for RGXEnableBreakpoint */ +typedef struct PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT_TAG +{ + IMG_HANDLE hPrivData; +} __packed PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT; + +/* Bridge out structure for RGXEnableBreakpoint */ +typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT; + +/******************************************* + RGXDisableBreakpoint + *******************************************/ + +/* Bridge in structure for RGXDisableBreakpoint */ +typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT_TAG +{ + IMG_HANDLE hPrivData; +} __packed PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT; + +/* Bridge out structure for RGXDisableBreakpoint */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT; + +/******************************************* + RGXOverallocateBPRegisters + *******************************************/ + +/* Bridge in structure for RGXOverallocateBPRegisters */ +typedef struct PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS_TAG +{ + IMG_UINT32 ui32SharedRegs; + IMG_UINT32 ui32TempRegs; +} __packed PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS; + +/* Bridge out structure for RGXOverallocateBPRegisters */ +typedef struct PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS; + +#endif /* COMMON_RGXBREAKPOINT_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_rgxcmp_bridge.h b/drivers/gpu/drm/phytium/octopus/common_rgxcmp_bridge.h new file mode 100644 index 000000000000..890fb3daecad --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_rgxcmp_bridge.h @@ -0,0 +1,230 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxcmp +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxcmp +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXCMP_BRIDGE_H +#define COMMON_RGXCMP_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "pvrsrv_sync_km.h" + +#define PVRSRV_BRIDGE_RGXCMP_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2 PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST (PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7) + +/******************************************* + RGXCreateComputeContext + *******************************************/ + +/* Bridge in structure for RGXCreateComputeContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT_TAG +{ + IMG_UINT64 ui64RobustnessAddress; + IMG_HANDLE hPrivData; + IMG_BYTE *pui8FrameworkCmd; + IMG_BYTE *pui8StaticComputeContextState; + IMG_UINT32 ui32ContextFlags; + IMG_UINT32 ui32FrameworkCmdSize; + IMG_UINT32 ui32MaxDeadlineMS; + IMG_UINT32 ui32PackedCCBSizeU88; + IMG_UINT32 ui32Priority; + IMG_UINT32 ui32StaticComputeContextStateSize; +} __packed PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT; + +/* Bridge out structure for RGXCreateComputeContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT_TAG +{ + IMG_HANDLE hComputeContext; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT; + +/******************************************* + RGXDestroyComputeContext + *******************************************/ + +/* Bridge in structure for RGXDestroyComputeContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT_TAG +{ + IMG_HANDLE hComputeContext; +} __packed PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT; + +/* Bridge out structure for RGXDestroyComputeContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT; + +/******************************************* + RGXFlushComputeData + *******************************************/ + +/* Bridge in structure for RGXFlushComputeData */ +typedef struct PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA_TAG +{ + IMG_HANDLE hComputeContext; +} __packed PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA; + +/* Bridge out structure for RGXFlushComputeData */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA; + +/******************************************* + RGXSetComputeContextPriority + *******************************************/ + +/* Bridge in structure for RGXSetComputeContextPriority */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY_TAG +{ + IMG_HANDLE hComputeContext; + IMG_UINT32 ui32Priority; +} __packed PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY; + +/* Bridge out structure for RGXSetComputeContextPriority */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY; + +/******************************************* + RGXNotifyComputeWriteOffsetUpdate + *******************************************/ + +/* Bridge in structure for RGXNotifyComputeWriteOffsetUpdate */ +typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG +{ + IMG_HANDLE hComputeContext; +} __packed PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE; + +/* Bridge out structure for RGXNotifyComputeWriteOffsetUpdate */ +typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE; + +/******************************************* + RGXKickCDM2 + *******************************************/ + +/* Bridge in structure for RGXKickCDM2 */ +typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM2_TAG +{ + IMG_UINT64 ui64DeadlineInus; + IMG_HANDLE hComputeContext; + IMG_UINT32 *pui32ClientUpdateOffset; + IMG_UINT32 *pui32ClientUpdateValue; + IMG_UINT32 *pui32SyncPMRFlags; + IMG_BYTE *pui8DMCmd; + IMG_CHAR *puiUpdateFenceName; + IMG_HANDLE *phClientUpdateUFOSyncPrimBlock; + IMG_HANDLE *phSyncPMRs; + PVRSRV_FENCE hCheckFenceFd; + PVRSRV_TIMELINE hUpdateTimeline; + IMG_UINT32 ui32ClientCacheOpSeqNum; + IMG_UINT32 ui32ClientUpdateCount; + IMG_UINT32 ui32CmdSize; + IMG_UINT32 ui32ExtJobRef; + IMG_UINT32 ui32NumOfWorkgroups; + IMG_UINT32 ui32NumOfWorkitems; + IMG_UINT32 ui32PDumpFlags; + IMG_UINT32 ui32SyncPMRCount; +} __packed PVRSRV_BRIDGE_IN_RGXKICKCDM2; + +/* Bridge out structure for RGXKickCDM2 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXKICKCDM2_TAG +{ + PVRSRV_ERROR eError; + PVRSRV_FENCE hUpdateFence; +} __packed PVRSRV_BRIDGE_OUT_RGXKICKCDM2; + +/******************************************* + RGXSetComputeContextProperty + *******************************************/ + +/* Bridge in structure for RGXSetComputeContextProperty */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Input; + IMG_HANDLE hComputeContext; + IMG_UINT32 ui32Property; +} __packed PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY; + +/* Bridge out structure for RGXSetComputeContextProperty */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Output; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY; + +/******************************************* + RGXGetLastDeviceError + *******************************************/ + +/* Bridge in structure for RGXGetLastDeviceError */ +typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR; + +/* Bridge out structure for RGXGetLastDeviceError */ +typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32Error; +} __packed PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR; + +#endif /* COMMON_RGXCMP_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_rgxfwdbg_bridge.h b/drivers/gpu/drm/phytium/octopus/common_rgxfwdbg_bridge.h new file mode 100644 index 000000000000..9a1590c75f0f --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_rgxfwdbg_bridge.h @@ -0,0 +1,200 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxfwdbg +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxfwdbg +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXFWDBG_BRIDGE_H +#define COMMON_RGXFWDBG_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "devicemem_typedefs.h" +#include "rgx_bridge.h" +#include "pvrsrv_memallocflags.h" + +#define PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST (PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+7) + +/******************************************* + RGXFWDebugSetFWLog + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetFWLog */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG_TAG +{ + IMG_UINT32 ui32RGXFWLogType; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG; + +/* Bridge out structure for RGXFWDebugSetFWLog */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG; + +/******************************************* + RGXFWDebugDumpFreelistPageList + *******************************************/ + +/* Bridge in structure for RGXFWDebugDumpFreelistPageList */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST; + +/* Bridge out structure for RGXFWDebugDumpFreelistPageList */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST; + +/******************************************* + RGXFWDebugSetHCSDeadline + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetHCSDeadline */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE_TAG +{ + IMG_UINT32 ui32RGXHCSDeadline; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE; + +/* Bridge out structure for RGXFWDebugSetHCSDeadline */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE; + +/******************************************* + RGXFWDebugSetOSidPriority + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetOSidPriority */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY_TAG +{ + IMG_UINT32 ui32OSid; + IMG_UINT32 ui32Priority; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY; + +/* Bridge out structure for RGXFWDebugSetOSidPriority */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY; + +/******************************************* + RGXFWDebugSetOSNewOnlineState + *******************************************/ + +/* Bridge in structure for RGXFWDebugSetOSNewOnlineState */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE_TAG +{ + IMG_UINT32 ui32OSNewState; + IMG_UINT32 ui32OSid; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE; + +/* Bridge out structure for RGXFWDebugSetOSNewOnlineState */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE; + +/******************************************* + RGXFWDebugPHRConfigure + *******************************************/ + +/* Bridge in structure for RGXFWDebugPHRConfigure */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE_TAG +{ + IMG_UINT32 ui32ui32PHRMode; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE; + +/* Bridge out structure for RGXFWDebugPHRConfigure */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE; + +/******************************************* + RGXFWDebugWdgConfigure + *******************************************/ + +/* Bridge in structure for RGXFWDebugWdgConfigure */ +typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE_TAG +{ + IMG_UINT32 ui32ui32WdgPeriodUs; +} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE; + +/* Bridge out structure for RGXFWDebugWdgConfigure */ +typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE; + +/******************************************* + RGXCurrentTime + *******************************************/ + +/* Bridge in structure for RGXCurrentTime */ +typedef struct PVRSRV_BRIDGE_IN_RGXCURRENTTIME_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXCURRENTTIME; + +/* Bridge out structure for RGXCurrentTime */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCURRENTTIME_TAG +{ + IMG_UINT64 ui64Time; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCURRENTTIME; + +#endif /* COMMON_RGXFWDBG_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_rgxhwperf_bridge.h b/drivers/gpu/drm/phytium/octopus/common_rgxhwperf_bridge.h new file mode 100644 index 000000000000..7e92dbf2b40c --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_rgxhwperf_bridge.h @@ -0,0 +1,135 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxhwperf +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxhwperf +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXHWPERF_BRIDGE_H +#define COMMON_RGXHWPERF_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "rgx_hwperf.h" + +#define PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST (PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3) + +/******************************************* + RGXCtrlHWPerf + *******************************************/ + +/* Bridge in structure for RGXCtrlHWPerf */ +typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERF_TAG +{ + IMG_UINT64 ui64Mask; + IMG_BOOL bToggle; + IMG_UINT32 ui32StreamId; +} __packed PVRSRV_BRIDGE_IN_RGXCTRLHWPERF; + +/* Bridge out structure for RGXCtrlHWPerf */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF; + +/******************************************* + RGXConfigureHWPerfBlocks + *******************************************/ + +/* Bridge in structure for RGXConfigureHWPerfBlocks */ +typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS_TAG +{ + RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigs; + IMG_UINT32 ui32ArrayLen; + IMG_UINT32 ui32CtrlWord; +} __packed PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS; + +/* Bridge out structure for RGXConfigureHWPerfBlocks */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS; + +/******************************************* + RGXGetHWPerfBvncFeatureFlags + *******************************************/ + +/* Bridge in structure for RGXGetHWPerfBvncFeatureFlags */ +typedef struct PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS; + +/* Bridge out structure for RGXGetHWPerfBvncFeatureFlags */ +typedef struct PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS_TAG +{ + RGX_HWPERF_BVNC sBVNC; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS; + +/******************************************* + RGXControlHWPerfBlocks + *******************************************/ + +/* Bridge in structure for RGXControlHWPerfBlocks */ +typedef struct PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS_TAG +{ + IMG_UINT16 *pui16BlockIDs; + IMG_BOOL bEnable; + IMG_UINT32 ui32ArrayLen; +} __packed PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS; + +/* Bridge out structure for RGXControlHWPerfBlocks */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS; + +#endif /* COMMON_RGXHWPERF_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_rgxkicksync_bridge.h b/drivers/gpu/drm/phytium/octopus/common_rgxkicksync_bridge.h new file mode 100644 index 000000000000..f355b1a93ee3 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_rgxkicksync_bridge.h @@ -0,0 +1,144 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxkicksync +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxkicksync +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXKICKSYNC_BRIDGE_H +#define COMMON_RGXKICKSYNC_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "pvrsrv_sync_km.h" + +#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2 PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3) + +/******************************************* + RGXCreateKickSyncContext + *******************************************/ + +/* Bridge in structure for RGXCreateKickSyncContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT_TAG +{ + IMG_HANDLE hPrivData; + IMG_UINT32 ui32ContextFlags; + IMG_UINT32 ui32PackedCCBSizeU88; +} __packed PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT; + +/* Bridge out structure for RGXCreateKickSyncContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT_TAG +{ + IMG_HANDLE hKickSyncContext; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT; + +/******************************************* + RGXDestroyKickSyncContext + *******************************************/ + +/* Bridge in structure for RGXDestroyKickSyncContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT_TAG +{ + IMG_HANDLE hKickSyncContext; +} __packed PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT; + +/* Bridge out structure for RGXDestroyKickSyncContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT; + +/******************************************* + RGXKickSync2 + *******************************************/ + +/* Bridge in structure for RGXKickSync2 */ +typedef struct PVRSRV_BRIDGE_IN_RGXKICKSYNC2_TAG +{ + IMG_HANDLE hKickSyncContext; + IMG_UINT32 *pui32UpdateDevVarOffset; + IMG_UINT32 *pui32UpdateValue; + IMG_CHAR *puiUpdateFenceName; + IMG_HANDLE *phUpdateUFODevVarBlock; + PVRSRV_FENCE hCheckFenceFD; + PVRSRV_TIMELINE hTimelineFenceFD; + IMG_UINT32 ui32ClientCacheOpSeqNum; + IMG_UINT32 ui32ClientUpdateCount; + IMG_UINT32 ui32ExtJobRef; +} __packed PVRSRV_BRIDGE_IN_RGXKICKSYNC2; + +/* Bridge out structure for RGXKickSync2 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXKICKSYNC2_TAG +{ + PVRSRV_ERROR eError; + PVRSRV_FENCE hUpdateFenceFD; +} __packed PVRSRV_BRIDGE_OUT_RGXKICKSYNC2; + +/******************************************* + RGXSetKickSyncContextProperty + *******************************************/ + +/* Bridge in structure for RGXSetKickSyncContextProperty */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Input; + IMG_HANDLE hKickSyncContext; + IMG_UINT32 ui32Property; +} __packed PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY; + +/* Bridge out structure for RGXSetKickSyncContextProperty */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Output; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY; + +#endif /* COMMON_RGXKICKSYNC_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_rgxray_bridge.h b/drivers/gpu/drm/phytium/octopus/common_rgxray_bridge.h new file mode 100644 index 000000000000..9384b6f28b6f --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_rgxray_bridge.h @@ -0,0 +1,129 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxray +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxray +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXRAY_BRIDGE_H +#define COMMON_RGXRAY_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "pvrsrv_sync_km.h" + +#define PVRSRV_BRIDGE_RGXRAY_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXRAY_RGXCREATERAYCONTEXT PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRAYCONTEXT PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXRAY_RGXKICKRDM PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXRAY_CMD_LAST (PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+2) + +/******************************************* + RGXCreateRayContext + *******************************************/ + +/* Bridge in structure for RGXCreateRayContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT_TAG +{ + IMG_HANDLE hPrivData; + IMG_BYTE *pui8sStaticRayContextState; + IMG_UINT32 ui32ContextFlags; + IMG_UINT32 ui32StaticRayContextStateSize; + IMG_UINT32 ui32ui32Priority; +} __packed PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT; + +/* Bridge out structure for RGXCreateRayContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT_TAG +{ + IMG_HANDLE hRayContext; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT; + +/******************************************* + RGXDestroyRayContext + *******************************************/ + +/* Bridge in structure for RGXDestroyRayContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT_TAG +{ + IMG_HANDLE hRayContext; +} __packed PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT; + +/* Bridge out structure for RGXDestroyRayContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT; + +/******************************************* + RGXKickRDM + *******************************************/ + +/* Bridge in structure for RGXKickRDM */ +typedef struct PVRSRV_BRIDGE_IN_RGXKICKRDM_TAG +{ + IMG_HANDLE hRayContext; + IMG_UINT32 *pui32ClientUpdateOffset; + IMG_UINT32 *pui32ClientUpdateValue; + IMG_BYTE *pui8DMCmd; + IMG_CHAR *puiUpdateFenceName; + IMG_HANDLE *phClientUpdateUFOSyncPrimBlock; + PVRSRV_FENCE hCheckFenceFd; + PVRSRV_TIMELINE hUpdateTimeline; + IMG_UINT32 ui32ClientCacheOpSeqNum; + IMG_UINT32 ui32ClientUpdateCount; + IMG_UINT32 ui32CmdSize; + IMG_UINT32 ui32ExtJobRef; + IMG_UINT32 ui32PDumpFlags; +} __packed PVRSRV_BRIDGE_IN_RGXKICKRDM; + +/* Bridge out structure for RGXKickRDM */ +typedef struct PVRSRV_BRIDGE_OUT_RGXKICKRDM_TAG +{ + PVRSRV_ERROR eError; + PVRSRV_FENCE hUpdateFence; +} __packed PVRSRV_BRIDGE_OUT_RGXKICKRDM; + +#endif /* COMMON_RGXRAY_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_rgxregconfig_bridge.h b/drivers/gpu/drm/phytium/octopus/common_rgxregconfig_bridge.h new file mode 100644 index 000000000000..ef02af828bb5 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_rgxregconfig_bridge.h @@ -0,0 +1,146 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxregconfig +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxregconfig +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXREGCONFIG_BRIDGE_H +#define COMMON_RGXREGCONFIG_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" + +#define PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXREGCONFIG_CMD_LAST (PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+4) + +/******************************************* + RGXSetRegConfigType + *******************************************/ + +/* Bridge in structure for RGXSetRegConfigType */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE_TAG +{ + IMG_UINT8 ui8RegPowerIsland; +} __packed PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE; + +/* Bridge out structure for RGXSetRegConfigType */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE; + +/******************************************* + RGXAddRegconfig + *******************************************/ + +/* Bridge in structure for RGXAddRegconfig */ +typedef struct PVRSRV_BRIDGE_IN_RGXADDREGCONFIG_TAG +{ + IMG_UINT64 ui64RegMask; + IMG_UINT64 ui64RegValue; + IMG_UINT32 ui32RegAddr; +} __packed PVRSRV_BRIDGE_IN_RGXADDREGCONFIG; + +/* Bridge out structure for RGXAddRegconfig */ +typedef struct PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG; + +/******************************************* + RGXClearRegConfig + *******************************************/ + +/* Bridge in structure for RGXClearRegConfig */ +typedef struct PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG; + +/* Bridge out structure for RGXClearRegConfig */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG; + +/******************************************* + RGXEnableRegConfig + *******************************************/ + +/* Bridge in structure for RGXEnableRegConfig */ +typedef struct PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG; + +/* Bridge out structure for RGXEnableRegConfig */ +typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG; + +/******************************************* + RGXDisableRegConfig + *******************************************/ + +/* Bridge in structure for RGXDisableRegConfig */ +typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG; + +/* Bridge out structure for RGXDisableRegConfig */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG; + +#endif /* COMMON_RGXREGCONFIG_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_rgxsignals_bridge.h b/drivers/gpu/drm/phytium/octopus/common_rgxsignals_bridge.h new file mode 100644 index 000000000000..a9d779ab7d8a --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_rgxsignals_bridge.h @@ -0,0 +1,77 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxsignals +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxsignals +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXSIGNALS_BRIDGE_H +#define COMMON_RGXSIGNALS_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" + +#define PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXSIGNALS_CMD_LAST (PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST+0) + +/******************************************* + RGXNotifySignalUpdate + *******************************************/ + +/* Bridge in structure for RGXNotifySignalUpdate */ +typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE_TAG +{ + IMG_DEV_VIRTADDR sDevSignalAddress; + IMG_HANDLE hPrivData; +} __packed PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE; + +/* Bridge out structure for RGXNotifySignalUpdate */ +typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE; + +#endif /* COMMON_RGXSIGNALS_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_rgxta3d_bridge.h b/drivers/gpu/drm/phytium/octopus/common_rgxta3d_bridge.h new file mode 100644 index 000000000000..c08db7ee9ac1 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_rgxta3d_bridge.h @@ -0,0 +1,403 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxta3d +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxta3d +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXTA3D_BRIDGE_H +#define COMMON_RGXTA3D_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "rgx_fwif_shared.h" +#include "devicemem_typedefs.h" +#include "pvrsrv_sync_km.h" + +#define PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+8 +#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+9 +#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+10 +#define PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+11 +#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2 PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+12 +#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13 +#define PVRSRV_BRIDGE_RGXTA3D_CMD_LAST (PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13) + +/******************************************* + RGXCreateHWRTDataSet + *******************************************/ + +/* Bridge in structure for RGXCreateHWRTDataSet */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET_TAG +{ + IMG_DEV_VIRTADDR sPMDataAddr0; + IMG_DEV_VIRTADDR sPMDataAddr1; + IMG_DEV_VIRTADDR sPMSecureDataAddr0; + IMG_DEV_VIRTADDR sPMSecureDataAddr1; + IMG_DEV_VIRTADDR sTailPtrsDevVAddr; + IMG_DEV_VIRTADDR sVHeapTableDevVAddr; + IMG_UINT64 ui64PPPMultiSampleCtl; + IMG_HANDLE *phapsFreeLists; + IMG_UINT32 ui32ISPMergeLowerX; + IMG_UINT32 ui32ISPMergeLowerY; + IMG_UINT32 ui32ISPMergeScaleX; + IMG_UINT32 ui32ISPMergeScaleY; + IMG_UINT32 ui32ISPMergeUpperX; + IMG_UINT32 ui32ISPMergeUpperY; + IMG_UINT32 ui32PPPScreen; + IMG_UINT32 ui32RgnStride; + IMG_UINT32 ui32TEAA; + IMG_UINT32 ui32TEMTILE1; + IMG_UINT32 ui32TEMTILE2; + IMG_UINT32 ui32TEScreen; + IMG_UINT32 ui32TPCSize; + IMG_UINT32 ui32TPCStride; + IMG_UINT16 ui16MaxRTs; +} __packed PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET; + +/* Bridge out structure for RGXCreateHWRTDataSet */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET_TAG +{ + IMG_HANDLE hKmHwRTDataSet0; + IMG_HANDLE hKmHwRTDataSet1; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET; + +/******************************************* + RGXDestroyHWRTDataSet + *******************************************/ + +/* Bridge in structure for RGXDestroyHWRTDataSet */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET_TAG +{ + IMG_HANDLE hKmHwRTDataSet; +} __packed PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET; + +/* Bridge out structure for RGXDestroyHWRTDataSet */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET; + +/******************************************* + RGXCreateZSBuffer + *******************************************/ + +/* Bridge in structure for RGXCreateZSBuffer */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER_TAG +{ + IMG_HANDLE hPMR; + IMG_HANDLE hReservation; + PVRSRV_MEMALLOCFLAGS_T uiMapFlags; +} __packed PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER; + +/* Bridge out structure for RGXCreateZSBuffer */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER_TAG +{ + IMG_HANDLE hsZSBufferKM; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER; + +/******************************************* + RGXDestroyZSBuffer + *******************************************/ + +/* Bridge in structure for RGXDestroyZSBuffer */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER_TAG +{ + IMG_HANDLE hsZSBufferMemDesc; +} __packed PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER; + +/* Bridge out structure for RGXDestroyZSBuffer */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER; + +/******************************************* + RGXPopulateZSBuffer + *******************************************/ + +/* Bridge in structure for RGXPopulateZSBuffer */ +typedef struct PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER_TAG +{ + IMG_HANDLE hsZSBufferKM; +} __packed PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER; + +/* Bridge out structure for RGXPopulateZSBuffer */ +typedef struct PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER_TAG +{ + IMG_HANDLE hsPopulation; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER; + +/******************************************* + RGXUnpopulateZSBuffer + *******************************************/ + +/* Bridge in structure for RGXUnpopulateZSBuffer */ +typedef struct PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER_TAG +{ + IMG_HANDLE hsPopulation; +} __packed PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER; + +/* Bridge out structure for RGXUnpopulateZSBuffer */ +typedef struct PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER; + +/******************************************* + RGXCreateFreeList + *******************************************/ + +/* Bridge in structure for RGXCreateFreeList */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATEFREELIST_TAG +{ + IMG_DEV_VIRTADDR spsFreeListBaseDevVAddr; + IMG_DEV_VIRTADDR spsFreeListStateDevVAddr; + IMG_DEVMEM_OFFSET_T uiPMROffset; + IMG_DEVMEM_OFFSET_T uiPMRStateOffset; + IMG_HANDLE hMemCtxPrivData; + IMG_HANDLE hsFreeListPMR; + IMG_HANDLE hsFreeListStatePMR; + IMG_HANDLE hsGlobalFreeList; + IMG_BOOL bbFreeListCheck; + IMG_UINT32 ui32GrowFLPages; + IMG_UINT32 ui32GrowParamThreshold; + IMG_UINT32 ui32InitFLPages; + IMG_UINT32 ui32MaxFLPages; +} __packed PVRSRV_BRIDGE_IN_RGXCREATEFREELIST; + +/* Bridge out structure for RGXCreateFreeList */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST_TAG +{ + IMG_HANDLE hCleanupCookie; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST; + +/******************************************* + RGXDestroyFreeList + *******************************************/ + +/* Bridge in structure for RGXDestroyFreeList */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST_TAG +{ + IMG_HANDLE hCleanupCookie; +} __packed PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST; + +/* Bridge out structure for RGXDestroyFreeList */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST; + +/******************************************* + RGXCreateRenderContext + *******************************************/ + +/* Bridge in structure for RGXCreateRenderContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT_TAG +{ + IMG_UINT64 ui64RobustnessAddress; + IMG_HANDLE hPrivData; + IMG_BYTE *pui8FrameworkCmd; + IMG_BYTE *pui8StaticRenderContextState; + IMG_UINT32 ui32ContextFlags; + IMG_UINT32 ui32FrameworkCmdSize; + IMG_UINT32 ui32Max3DDeadlineMS; + IMG_UINT32 ui32MaxTADeadlineMS; + IMG_UINT32 ui32PackedCCBSizeU8888; + IMG_UINT32 ui32Priority; + IMG_UINT32 ui32StaticRenderContextStateSize; +} __packed PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT; + +/* Bridge out structure for RGXCreateRenderContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT_TAG +{ + IMG_HANDLE hRenderContext; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT; + +/******************************************* + RGXDestroyRenderContext + *******************************************/ + +/* Bridge in structure for RGXDestroyRenderContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT_TAG +{ + IMG_HANDLE hCleanupCookie; +} __packed PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT; + +/* Bridge out structure for RGXDestroyRenderContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT; + +/******************************************* + RGXSetRenderContextPriority + *******************************************/ + +/* Bridge in structure for RGXSetRenderContextPriority */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY_TAG +{ + IMG_HANDLE hRenderContext; + IMG_UINT32 ui32Priority; +} __packed PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY; + +/* Bridge out structure for RGXSetRenderContextPriority */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY; + +/******************************************* + RGXRenderContextStalled + *******************************************/ + +/* Bridge in structure for RGXRenderContextStalled */ +typedef struct PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED_TAG +{ + IMG_HANDLE hRenderContext; +} __packed PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED; + +/* Bridge out structure for RGXRenderContextStalled */ +typedef struct PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED; + +/******************************************* + RGXKickTA3D2 + *******************************************/ + +/* Bridge in structure for RGXKickTA3D2 */ +typedef struct PVRSRV_BRIDGE_IN_RGXKICKTA3D2_TAG +{ + IMG_UINT64 ui64Deadline; + IMG_HANDLE hKMHWRTDataSet; + IMG_HANDLE hMSAAScratchBuffer; + IMG_HANDLE hPRFenceUFOSyncPrimBlock; + IMG_HANDLE hRenderContext; + IMG_HANDLE hZSBuffer; + IMG_UINT32 *pui32Client3DUpdateSyncOffset; + IMG_UINT32 *pui32Client3DUpdateValue; + IMG_UINT32 *pui32ClientTAFenceSyncOffset; + IMG_UINT32 *pui32ClientTAFenceValue; + IMG_UINT32 *pui32ClientTAUpdateSyncOffset; + IMG_UINT32 *pui32ClientTAUpdateValue; + IMG_UINT32 *pui32SyncPMRFlags; + IMG_BYTE *pui83DCmd; + IMG_BYTE *pui83DPRCmd; + IMG_BYTE *pui8TACmd; + IMG_CHAR *puiUpdateFenceName; + IMG_CHAR *puiUpdateFenceName3D; + IMG_HANDLE *phClient3DUpdateSyncPrimBlock; + IMG_HANDLE *phClientTAFenceSyncPrimBlock; + IMG_HANDLE *phClientTAUpdateSyncPrimBlock; + IMG_HANDLE *phSyncPMRs; + IMG_BOOL bbAbort; + IMG_BOOL bbKick3D; + IMG_BOOL bbKickPR; + IMG_BOOL bbKickTA; + PVRSRV_FENCE hCheckFence; + PVRSRV_FENCE hCheckFence3D; + PVRSRV_TIMELINE hUpdateTimeline; + PVRSRV_TIMELINE hUpdateTimeline3D; + IMG_UINT32 ui323DCmdSize; + IMG_UINT32 ui323DPRCmdSize; + IMG_UINT32 ui32Client3DUpdateCount; + IMG_UINT32 ui32ClientCacheOpSeqNum; + IMG_UINT32 ui32ClientTAFenceCount; + IMG_UINT32 ui32ClientTAUpdateCount; + IMG_UINT32 ui32ExtJobRef; + IMG_UINT32 ui32FRFenceUFOSyncOffset; + IMG_UINT32 ui32FRFenceValue; + IMG_UINT32 ui32NumberOfDrawCalls; + IMG_UINT32 ui32NumberOfIndices; + IMG_UINT32 ui32NumberOfMRTs; + IMG_UINT32 ui32PDumpFlags; + IMG_UINT32 ui32RenderTargetSize; + IMG_UINT32 ui32SyncPMRCount; + IMG_UINT32 ui32TACmdSize; +} __packed PVRSRV_BRIDGE_IN_RGXKICKTA3D2; + +/* Bridge out structure for RGXKickTA3D2 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTA3D2_TAG +{ + PVRSRV_ERROR eError; + PVRSRV_FENCE hUpdateFence; + PVRSRV_FENCE hUpdateFence3D; +} __packed PVRSRV_BRIDGE_OUT_RGXKICKTA3D2; + +/******************************************* + RGXSetRenderContextProperty + *******************************************/ + +/* Bridge in structure for RGXSetRenderContextProperty */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Input; + IMG_HANDLE hRenderContext; + IMG_UINT32 ui32Property; +} __packed PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY; + +/* Bridge out structure for RGXSetRenderContextProperty */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Output; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY; + +#endif /* COMMON_RGXTA3D_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_rgxtimerquery_bridge.h b/drivers/gpu/drm/phytium/octopus/common_rgxtimerquery_bridge.h new file mode 100644 index 000000000000..0631839bca74 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_rgxtimerquery_bridge.h @@ -0,0 +1,112 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxtimerquery +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxtimerquery +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXTIMERQUERY_BRIDGE_H +#define COMMON_RGXTIMERQUERY_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" + +#define PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXTIMERQUERY_RGXBEGINTIMERQUERY PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXTIMERQUERY_RGXENDTIMERQUERY PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXTIMERQUERY_RGXQUERYTIMER PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_LAST (PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+2) + +/******************************************* + RGXBeginTimerQuery + *******************************************/ + +/* Bridge in structure for RGXBeginTimerQuery */ +typedef struct PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY_TAG +{ + IMG_UINT32 ui32QueryId; +} __packed PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY; + +/* Bridge out structure for RGXBeginTimerQuery */ +typedef struct PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY; + +/******************************************* + RGXEndTimerQuery + *******************************************/ + +/* Bridge in structure for RGXEndTimerQuery */ +typedef struct PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY; + +/* Bridge out structure for RGXEndTimerQuery */ +typedef struct PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY; + +/******************************************* + RGXQueryTimer + *******************************************/ + +/* Bridge in structure for RGXQueryTimer */ +typedef struct PVRSRV_BRIDGE_IN_RGXQUERYTIMER_TAG +{ + IMG_UINT32 ui32QueryId; +} __packed PVRSRV_BRIDGE_IN_RGXQUERYTIMER; + +/* Bridge out structure for RGXQueryTimer */ +typedef struct PVRSRV_BRIDGE_OUT_RGXQUERYTIMER_TAG +{ + IMG_UINT64 ui64EndTime; + IMG_UINT64 ui64StartTime; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXQUERYTIMER; + +#endif /* COMMON_RGXTIMERQUERY_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_rgxtq2_bridge.h b/drivers/gpu/drm/phytium/octopus/common_rgxtq2_bridge.h new file mode 100644 index 000000000000..2fbf92231f17 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_rgxtq2_bridge.h @@ -0,0 +1,228 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxtq2 +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxtq2 +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXTQ2_BRIDGE_H +#define COMMON_RGXTQ2_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "pvrsrv_sync_km.h" + +#define PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2 PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RGXTQ2_CMD_LAST (PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7) + +/******************************************* + RGXTDMCreateTransferContext + *******************************************/ + +/* Bridge in structure for RGXTDMCreateTransferContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT_TAG +{ + IMG_HANDLE hPrivData; + IMG_BYTE *pui8FrameworkCmd; + IMG_UINT32 ui32ContextFlags; + IMG_UINT32 ui32FrameworkCmdSize; + IMG_UINT32 ui32PackedCCBSizeU88; + IMG_UINT32 ui32Priority; +} __packed PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT; + +/* Bridge out structure for RGXTDMCreateTransferContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT_TAG +{ + IMG_HANDLE hTransferContext; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT; + +/******************************************* + RGXTDMDestroyTransferContext + *******************************************/ + +/* Bridge in structure for RGXTDMDestroyTransferContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT_TAG +{ + IMG_HANDLE hTransferContext; +} __packed PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT; + +/* Bridge out structure for RGXTDMDestroyTransferContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT; + +/******************************************* + RGXTDMSetTransferContextPriority + *******************************************/ + +/* Bridge in structure for RGXTDMSetTransferContextPriority */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG +{ + IMG_HANDLE hTransferContext; + IMG_UINT32 ui32Priority; +} __packed PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY; + +/* Bridge out structure for RGXTDMSetTransferContextPriority */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY; + +/******************************************* + RGXTDMNotifyWriteOffsetUpdate + *******************************************/ + +/* Bridge in structure for RGXTDMNotifyWriteOffsetUpdate */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG +{ + IMG_HANDLE hTransferContext; + IMG_UINT32 ui32PDumpFlags; +} __packed PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE; + +/* Bridge out structure for RGXTDMNotifyWriteOffsetUpdate */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE; + +/******************************************* + RGXTDMSubmitTransfer2 + *******************************************/ + +/* Bridge in structure for RGXTDMSubmitTransfer2 */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2_TAG +{ + IMG_UINT64 ui64DeadlineInus; + IMG_HANDLE hTransferContext; + IMG_UINT32 *pui32SyncPMRFlags; + IMG_UINT32 *pui32UpdateSyncOffset; + IMG_UINT32 *pui32UpdateValue; + IMG_UINT8 *pui8FWCommand; + IMG_CHAR *puiUpdateFenceName; + IMG_HANDLE *phSyncPMRs; + IMG_HANDLE *phUpdateUFOSyncPrimBlock; + PVRSRV_FENCE hCheckFenceFD; + PVRSRV_TIMELINE hUpdateTimeline; + IMG_UINT32 ui32Characteristic1; + IMG_UINT32 ui32Characteristic2; + IMG_UINT32 ui32ClientCacheOpSeqNum; + IMG_UINT32 ui32ClientUpdateCount; + IMG_UINT32 ui32CommandSize; + IMG_UINT32 ui32ExternalJobReference; + IMG_UINT32 ui32PDumpFlags; + IMG_UINT32 ui32SyncPMRCount; +} __packed PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2; + +/* Bridge out structure for RGXTDMSubmitTransfer2 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2_TAG +{ + PVRSRV_ERROR eError; + PVRSRV_FENCE hUpdateFence; +} __packed PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2; + +/******************************************* + RGXTDMGetSharedMemory + *******************************************/ + +/* Bridge in structure for RGXTDMGetSharedMemory */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY; + +/* Bridge out structure for RGXTDMGetSharedMemory */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY_TAG +{ + IMG_HANDLE hCLIPMRMem; + IMG_HANDLE hUSCPMRMem; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY; + +/******************************************* + RGXTDMReleaseSharedMemory + *******************************************/ + +/* Bridge in structure for RGXTDMReleaseSharedMemory */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY_TAG +{ + IMG_HANDLE hPMRMem; +} __packed PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY; + +/* Bridge out structure for RGXTDMReleaseSharedMemory */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY; + +/******************************************* + RGXTDMSetTransferContextProperty + *******************************************/ + +/* Bridge in structure for RGXTDMSetTransferContextProperty */ +typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Input; + IMG_HANDLE hTransferContext; + IMG_UINT32 ui32Property; +} __packed PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY; + +/* Bridge out structure for RGXTDMSetTransferContextProperty */ +typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Output; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY; + +#endif /* COMMON_RGXTQ2_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_rgxtq_bridge.h b/drivers/gpu/drm/phytium/octopus/common_rgxtq_bridge.h new file mode 100644 index 000000000000..055e3b0f4a92 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_rgxtq_bridge.h @@ -0,0 +1,176 @@ +/******************************************************************************* +@File +@Title Common bridge header for rgxtq +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for rgxtq +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RGXTQ_BRIDGE_H +#define COMMON_RGXTQ_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "rgx_bridge.h" +#include "pvrsrv_sync_km.h" + +#define PVRSRV_BRIDGE_RGXTQ_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2 PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RGXTQ_CMD_LAST (PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4) + +/******************************************* + RGXCreateTransferContext + *******************************************/ + +/* Bridge in structure for RGXCreateTransferContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT_TAG +{ + IMG_HANDLE hPrivData; + IMG_BYTE *pui8FrameworkCmd; + IMG_UINT32 ui32ContextFlags; + IMG_UINT32 ui32FrameworkCmdize; + IMG_UINT32 ui32PackedCCBSizeU8888; + IMG_UINT32 ui32Priority; +} __packed PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT; + +/* Bridge out structure for RGXCreateTransferContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT_TAG +{ + IMG_HANDLE hCLIPMRMem; + IMG_HANDLE hTransferContext; + IMG_HANDLE hUSCPMRMem; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT; + +/******************************************* + RGXDestroyTransferContext + *******************************************/ + +/* Bridge in structure for RGXDestroyTransferContext */ +typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT_TAG +{ + IMG_HANDLE hTransferContext; +} __packed PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT; + +/* Bridge out structure for RGXDestroyTransferContext */ +typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT; + +/******************************************* + RGXSetTransferContextPriority + *******************************************/ + +/* Bridge in structure for RGXSetTransferContextPriority */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY_TAG +{ + IMG_HANDLE hTransferContext; + IMG_UINT32 ui32Priority; +} __packed PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY; + +/* Bridge out structure for RGXSetTransferContextPriority */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY; + +/******************************************* + RGXSubmitTransfer2 + *******************************************/ + +/* Bridge in structure for RGXSubmitTransfer2 */ +typedef struct PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2_TAG +{ + IMG_HANDLE hTransferContext; + IMG_UINT32 *pui32ClientUpdateCount; + IMG_UINT32 *pui32CommandSize; + IMG_UINT32 *pui32SyncPMRFlags; + IMG_UINT32 *pui32TQPrepareFlags; + IMG_UINT32 **pui32UpdateSyncOffset; + IMG_UINT32 **pui32UpdateValue; + IMG_UINT8 **pui8FWCommand; + IMG_CHAR *puiUpdateFenceName; + IMG_HANDLE *phSyncPMRs; + IMG_HANDLE **phUpdateUFOSyncPrimBlock; + PVRSRV_TIMELINE h2DUpdateTimeline; + PVRSRV_TIMELINE h3DUpdateTimeline; + PVRSRV_FENCE hCheckFenceFD; + IMG_UINT32 ui32ClientCacheOpSeqNum; + IMG_UINT32 ui32ExtJobRef; + IMG_UINT32 ui32PrepareCount; + IMG_UINT32 ui32SyncPMRCount; +} __packed PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2; + +/* Bridge out structure for RGXSubmitTransfer2 */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2_TAG +{ + PVRSRV_ERROR eError; + PVRSRV_FENCE h2DUpdateFence; + PVRSRV_FENCE h3DUpdateFence; +} __packed PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2; + +/******************************************* + RGXSetTransferContextProperty + *******************************************/ + +/* Bridge in structure for RGXSetTransferContextProperty */ +typedef struct PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Input; + IMG_HANDLE hTransferContext; + IMG_UINT32 ui32Property; +} __packed PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY; + +/* Bridge out structure for RGXSetTransferContextProperty */ +typedef struct PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY_TAG +{ + IMG_UINT64 ui64Output; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY; + +#endif /* COMMON_RGXTQ_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_ri_bridge.h b/drivers/gpu/drm/phytium/octopus/common_ri_bridge.h new file mode 100644 index 000000000000..d1f2a9894981 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_ri_bridge.h @@ -0,0 +1,225 @@ +/******************************************************************************* +@File +@Title Common bridge header for ri +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for ri +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_RI_BRIDGE_H +#define COMMON_RI_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "ri_typedefs.h" + +#define PVRSRV_BRIDGE_RI_CMD_FIRST 0 +#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+0 +#define PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+1 +#define PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+2 +#define PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR PVRSRV_BRIDGE_RI_CMD_FIRST+3 +#define PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+4 +#define PVRSRV_BRIDGE_RI_RIDUMPLIST PVRSRV_BRIDGE_RI_CMD_FIRST+5 +#define PVRSRV_BRIDGE_RI_RIDUMPALL PVRSRV_BRIDGE_RI_CMD_FIRST+6 +#define PVRSRV_BRIDGE_RI_RIDUMPPROCESS PVRSRV_BRIDGE_RI_CMD_FIRST+7 +#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER PVRSRV_BRIDGE_RI_CMD_FIRST+8 +#define PVRSRV_BRIDGE_RI_CMD_LAST (PVRSRV_BRIDGE_RI_CMD_FIRST+8) + +/******************************************* + RIWritePMREntry + *******************************************/ + +/* Bridge in structure for RIWritePMREntry */ +typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY_TAG +{ + IMG_HANDLE hPMRHandle; +} __packed PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY; + +/* Bridge out structure for RIWritePMREntry */ +typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY; + +/******************************************* + RIWriteMEMDESCEntry + *******************************************/ + +/* Bridge in structure for RIWriteMEMDESCEntry */ +typedef struct PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY_TAG +{ + IMG_UINT64 ui64Offset; + IMG_UINT64 ui64Size; + IMG_HANDLE hPMRHandle; + const IMG_CHAR *puiTextB; + IMG_BOOL bIsImport; + IMG_BOOL bIsSuballoc; + IMG_UINT32 ui32TextBSize; +} __packed PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY; + +/* Bridge out structure for RIWriteMEMDESCEntry */ +typedef struct PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY_TAG +{ + IMG_HANDLE hRIHandle; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY; + +/******************************************* + RIWriteProcListEntry + *******************************************/ + +/* Bridge in structure for RIWriteProcListEntry */ +typedef struct PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY_TAG +{ + IMG_UINT64 ui64DevVAddr; + IMG_UINT64 ui64Size; + const IMG_CHAR *puiTextB; + IMG_UINT32 ui32TextBSize; +} __packed PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY; + +/* Bridge out structure for RIWriteProcListEntry */ +typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY_TAG +{ + IMG_HANDLE hRIHandle; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY; + +/******************************************* + RIUpdateMEMDESCAddr + *******************************************/ + +/* Bridge in structure for RIUpdateMEMDESCAddr */ +typedef struct PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR_TAG +{ + IMG_DEV_VIRTADDR sAddr; + IMG_HANDLE hRIHandle; +} __packed PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR; + +/* Bridge out structure for RIUpdateMEMDESCAddr */ +typedef struct PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR; + +/******************************************* + RIDeleteMEMDESCEntry + *******************************************/ + +/* Bridge in structure for RIDeleteMEMDESCEntry */ +typedef struct PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY_TAG +{ + IMG_HANDLE hRIHandle; +} __packed PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY; + +/* Bridge out structure for RIDeleteMEMDESCEntry */ +typedef struct PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY; + +/******************************************* + RIDumpList + *******************************************/ + +/* Bridge in structure for RIDumpList */ +typedef struct PVRSRV_BRIDGE_IN_RIDUMPLIST_TAG +{ + IMG_HANDLE hPMRHandle; +} __packed PVRSRV_BRIDGE_IN_RIDUMPLIST; + +/* Bridge out structure for RIDumpList */ +typedef struct PVRSRV_BRIDGE_OUT_RIDUMPLIST_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RIDUMPLIST; + +/******************************************* + RIDumpAll + *******************************************/ + +/* Bridge in structure for RIDumpAll */ +typedef struct PVRSRV_BRIDGE_IN_RIDUMPALL_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_RIDUMPALL; + +/* Bridge out structure for RIDumpAll */ +typedef struct PVRSRV_BRIDGE_OUT_RIDUMPALL_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RIDUMPALL; + +/******************************************* + RIDumpProcess + *******************************************/ + +/* Bridge in structure for RIDumpProcess */ +typedef struct PVRSRV_BRIDGE_IN_RIDUMPPROCESS_TAG +{ + IMG_PID ui32Pid; +} __packed PVRSRV_BRIDGE_IN_RIDUMPPROCESS; + +/* Bridge out structure for RIDumpProcess */ +typedef struct PVRSRV_BRIDGE_OUT_RIDUMPPROCESS_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RIDUMPPROCESS; + +/******************************************* + RIWritePMREntryWithOwner + *******************************************/ + +/* Bridge in structure for RIWritePMREntryWithOwner */ +typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER_TAG +{ + IMG_HANDLE hPMRHandle; + IMG_PID ui32Owner; +} __packed PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER; + +/* Bridge out structure for RIWritePMREntryWithOwner */ +typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER; + +#endif /* COMMON_RI_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_srvcore_bridge.h b/drivers/gpu/drm/phytium/octopus/common_srvcore_bridge.h new file mode 100644 index 000000000000..ea08d54d3d37 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_srvcore_bridge.h @@ -0,0 +1,369 @@ +/******************************************************************************* +@File +@Title Common bridge header for srvcore +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for srvcore +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_SRVCORE_BRIDGE_H +#define COMMON_SRVCORE_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "pvrsrv_device_types.h" +#include "cache_ops.h" + +#define PVRSRV_BRIDGE_SRVCORE_CMD_FIRST 0 +#define PVRSRV_BRIDGE_SRVCORE_CONNECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+0 +#define PVRSRV_BRIDGE_SRVCORE_DISCONNECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+1 +#define PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+2 +#define PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+3 +#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+4 +#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+5 +#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+6 +#define PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+7 +#define PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+8 +#define PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+9 +#define PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+10 +#define PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+11 +#define PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+12 +#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+13 +#define PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+14 +#define PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+15 +#define PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+16 +#define PVRSRV_BRIDGE_SRVCORE_CMD_LAST (PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+16) + +/******************************************* + Connect + *******************************************/ + +/* Bridge in structure for Connect */ +typedef struct PVRSRV_BRIDGE_IN_CONNECT_TAG +{ + IMG_UINT32 ui32ClientBuildOptions; + IMG_UINT32 ui32ClientDDKBuild; + IMG_UINT32 ui32ClientDDKVersion; + IMG_UINT32 ui32Flags; +} __packed PVRSRV_BRIDGE_IN_CONNECT; + +/* Bridge out structure for Connect */ +typedef struct PVRSRV_BRIDGE_OUT_CONNECT_TAG +{ + IMG_UINT64 ui64PackedBvnc; + PVRSRV_ERROR eError; + IMG_UINT32 ui32CapabilityFlags; + IMG_UINT8 ui8KernelArch; +} __packed PVRSRV_BRIDGE_OUT_CONNECT; + +/******************************************* + Disconnect + *******************************************/ + +/* Bridge in structure for Disconnect */ +typedef struct PVRSRV_BRIDGE_IN_DISCONNECT_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_DISCONNECT; + +/* Bridge out structure for Disconnect */ +typedef struct PVRSRV_BRIDGE_OUT_DISCONNECT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DISCONNECT; + +/******************************************* + AcquireGlobalEventObject + *******************************************/ + +/* Bridge in structure for AcquireGlobalEventObject */ +typedef struct PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT; + +/* Bridge out structure for AcquireGlobalEventObject */ +typedef struct PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT_TAG +{ + IMG_HANDLE hGlobalEventObject; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT; + +/******************************************* + ReleaseGlobalEventObject + *******************************************/ + +/* Bridge in structure for ReleaseGlobalEventObject */ +typedef struct PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT_TAG +{ + IMG_HANDLE hGlobalEventObject; +} __packed PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT; + +/* Bridge out structure for ReleaseGlobalEventObject */ +typedef struct PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT; + +/******************************************* + EventObjectOpen + *******************************************/ + +/* Bridge in structure for EventObjectOpen */ +typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN_TAG +{ + IMG_HANDLE hEventObject; +} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN; + +/* Bridge out structure for EventObjectOpen */ +typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN_TAG +{ + IMG_HANDLE hOSEvent; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN; + +/******************************************* + EventObjectWait + *******************************************/ + +/* Bridge in structure for EventObjectWait */ +typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT_TAG +{ + IMG_HANDLE hOSEventKM; +} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT; + +/* Bridge out structure for EventObjectWait */ +typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT; + +/******************************************* + EventObjectClose + *******************************************/ + +/* Bridge in structure for EventObjectClose */ +typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE_TAG +{ + IMG_HANDLE hOSEventKM; +} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE; + +/* Bridge out structure for EventObjectClose */ +typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE; + +/******************************************* + DumpDebugInfo + *******************************************/ + +/* Bridge in structure for DumpDebugInfo */ +typedef struct PVRSRV_BRIDGE_IN_DUMPDEBUGINFO_TAG +{ + IMG_UINT32 ui32VerbLevel; +} __packed PVRSRV_BRIDGE_IN_DUMPDEBUGINFO; + +/* Bridge out structure for DumpDebugInfo */ +typedef struct PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO; + +/******************************************* + GetDevClockSpeed + *******************************************/ + +/* Bridge in structure for GetDevClockSpeed */ +typedef struct PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED; + +/* Bridge out structure for GetDevClockSpeed */ +typedef struct PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32ClockSpeed; +} __packed PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED; + +/******************************************* + HWOpTimeout + *******************************************/ + +/* Bridge in structure for HWOpTimeout */ +typedef struct PVRSRV_BRIDGE_IN_HWOPTIMEOUT_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_HWOPTIMEOUT; + +/* Bridge out structure for HWOpTimeout */ +typedef struct PVRSRV_BRIDGE_OUT_HWOPTIMEOUT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_HWOPTIMEOUT; + +/******************************************* + AlignmentCheck + *******************************************/ + +/* Bridge in structure for AlignmentCheck */ +typedef struct PVRSRV_BRIDGE_IN_ALIGNMENTCHECK_TAG +{ + IMG_UINT32 *pui32AlignChecks; + IMG_UINT32 ui32AlignChecksSize; +} __packed PVRSRV_BRIDGE_IN_ALIGNMENTCHECK; + +/* Bridge out structure for AlignmentCheck */ +typedef struct PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK; + +/******************************************* + GetDeviceStatus + *******************************************/ + +/* Bridge in structure for GetDeviceStatus */ +typedef struct PVRSRV_BRIDGE_IN_GETDEVICESTATUS_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_GETDEVICESTATUS; + +/* Bridge out structure for GetDeviceStatus */ +typedef struct PVRSRV_BRIDGE_OUT_GETDEVICESTATUS_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32DeviceSatus; +} __packed PVRSRV_BRIDGE_OUT_GETDEVICESTATUS; + +/******************************************* + GetMultiCoreInfo + *******************************************/ + +/* Bridge in structure for GetMultiCoreInfo */ +typedef struct PVRSRV_BRIDGE_IN_GETMULTICOREINFO_TAG +{ + IMG_UINT64 *pui64Caps; + IMG_UINT32 ui32CapsSize; +} __packed PVRSRV_BRIDGE_IN_GETMULTICOREINFO; + +/* Bridge out structure for GetMultiCoreInfo */ +typedef struct PVRSRV_BRIDGE_OUT_GETMULTICOREINFO_TAG +{ + IMG_UINT64 *pui64Caps; + PVRSRV_ERROR eError; + IMG_UINT32 ui32NumCores; +} __packed PVRSRV_BRIDGE_OUT_GETMULTICOREINFO; + +/******************************************* + EventObjectWaitTimeout + *******************************************/ + +/* Bridge in structure for EventObjectWaitTimeout */ +typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT_TAG +{ + IMG_UINT64 ui64uiTimeoutus; + IMG_HANDLE hOSEventKM; +} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT; + +/* Bridge out structure for EventObjectWaitTimeout */ +typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT; + +/******************************************* + FindProcessMemStats + *******************************************/ + +/* Bridge in structure for FindProcessMemStats */ +typedef struct PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS_TAG +{ + IMG_UINT32 *pui32MemStatsArray; + IMG_BOOL bbAllProcessStats; + IMG_UINT32 ui32ArrSize; + IMG_UINT32 ui32PID; +} __packed PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS; + +/* Bridge out structure for FindProcessMemStats */ +typedef struct PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS_TAG +{ + IMG_UINT32 *pui32MemStatsArray; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS; + +/******************************************* + AcquireInfoPage + *******************************************/ + +/* Bridge in structure for AcquireInfoPage */ +typedef struct PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE; + +/* Bridge out structure for AcquireInfoPage */ +typedef struct PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE_TAG +{ + IMG_HANDLE hPMR; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE; + +/******************************************* + ReleaseInfoPage + *******************************************/ + +/* Bridge in structure for ReleaseInfoPage */ +typedef struct PVRSRV_BRIDGE_IN_RELEASEINFOPAGE_TAG +{ + IMG_HANDLE hPMR; +} __packed PVRSRV_BRIDGE_IN_RELEASEINFOPAGE; + +/* Bridge out structure for ReleaseInfoPage */ +typedef struct PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE; + +#endif /* COMMON_SRVCORE_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_sync_bridge.h b/drivers/gpu/drm/phytium/octopus/common_sync_bridge.h new file mode 100644 index 000000000000..62baf70450fb --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_sync_bridge.h @@ -0,0 +1,254 @@ +/******************************************************************************* +@File +@Title Common bridge header for sync +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for sync +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_SYNC_BRIDGE_H +#define COMMON_SYNC_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "pdump.h" +#include "pdumpdefs.h" +#include "devicemem_typedefs.h" +#include "pvrsrv_sync_km.h" +#include + +#define PVRSRV_BRIDGE_SYNC_CMD_FIRST 0 +#define PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK PVRSRV_BRIDGE_SYNC_CMD_FIRST+0 +#define PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK PVRSRV_BRIDGE_SYNC_CMD_FIRST+1 +#define PVRSRV_BRIDGE_SYNC_SYNCPRIMSET PVRSRV_BRIDGE_SYNC_CMD_FIRST+2 +#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP PVRSRV_BRIDGE_SYNC_CMD_FIRST+3 +#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE PVRSRV_BRIDGE_SYNC_CMD_FIRST+4 +#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL PVRSRV_BRIDGE_SYNC_CMD_FIRST+5 +#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP PVRSRV_BRIDGE_SYNC_CMD_FIRST+6 +#define PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT PVRSRV_BRIDGE_SYNC_CMD_FIRST+7 +#define PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT PVRSRV_BRIDGE_SYNC_CMD_FIRST+8 +#define PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL PVRSRV_BRIDGE_SYNC_CMD_FIRST+9 +#define PVRSRV_BRIDGE_SYNC_CMD_LAST (PVRSRV_BRIDGE_SYNC_CMD_FIRST+9) + +/******************************************* + AllocSyncPrimitiveBlock + *******************************************/ + +/* Bridge in structure for AllocSyncPrimitiveBlock */ +typedef struct PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK_TAG +{ + IMG_UINT32 ui32EmptyStructPlaceholder; +} __packed PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK; + +/* Bridge out structure for AllocSyncPrimitiveBlock */ +typedef struct PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK_TAG +{ + IMG_HANDLE hSyncHandle; + IMG_HANDLE hhSyncPMR; + PVRSRV_ERROR eError; + IMG_UINT32 ui32SyncPrimBlockSize; + IMG_UINT32 ui32SyncPrimVAddr; +} __packed PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK; + +/******************************************* + FreeSyncPrimitiveBlock + *******************************************/ + +/* Bridge in structure for FreeSyncPrimitiveBlock */ +typedef struct PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK_TAG +{ + IMG_HANDLE hSyncHandle; +} __packed PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK; + +/* Bridge out structure for FreeSyncPrimitiveBlock */ +typedef struct PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK; + +/******************************************* + SyncPrimSet + *******************************************/ + +/* Bridge in structure for SyncPrimSet */ +typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMSET_TAG +{ + IMG_HANDLE hSyncHandle; + IMG_UINT32 ui32Index; + IMG_UINT32 ui32Value; +} __packed PVRSRV_BRIDGE_IN_SYNCPRIMSET; + +/* Bridge out structure for SyncPrimSet */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMSET_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMSET; + +/******************************************* + SyncPrimPDump + *******************************************/ + +/* Bridge in structure for SyncPrimPDump */ +typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP_TAG +{ + IMG_HANDLE hSyncHandle; + IMG_UINT32 ui32Offset; +} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP; + +/* Bridge out structure for SyncPrimPDump */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP; + +/******************************************* + SyncPrimPDumpValue + *******************************************/ + +/* Bridge in structure for SyncPrimPDumpValue */ +typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE_TAG +{ + IMG_HANDLE hSyncHandle; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32Value; +} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE; + +/* Bridge out structure for SyncPrimPDumpValue */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE; + +/******************************************* + SyncPrimPDumpPol + *******************************************/ + +/* Bridge in structure for SyncPrimPDumpPol */ +typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL_TAG +{ + IMG_HANDLE hSyncHandle; + PDUMP_POLL_OPERATOR eOperator; + IMG_UINT32 ui32Mask; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32Value; + PDUMP_FLAGS_T uiPDumpFlags; +} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL; + +/* Bridge out structure for SyncPrimPDumpPol */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL; + +/******************************************* + SyncPrimPDumpCBP + *******************************************/ + +/* Bridge in structure for SyncPrimPDumpCBP */ +typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP_TAG +{ + IMG_DEVMEM_SIZE_T uiBufferSize; + IMG_DEVMEM_SIZE_T uiPacketSize; + IMG_DEVMEM_OFFSET_T uiWriteOffset; + IMG_HANDLE hSyncHandle; + IMG_UINT32 ui32Offset; +} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP; + +/* Bridge out structure for SyncPrimPDumpCBP */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP; + +/******************************************* + SyncAllocEvent + *******************************************/ + +/* Bridge in structure for SyncAllocEvent */ +typedef struct PVRSRV_BRIDGE_IN_SYNCALLOCEVENT_TAG +{ + const IMG_CHAR *puiClassName; + IMG_BOOL bServerSync; + IMG_UINT32 ui32ClassNameSize; + IMG_UINT32 ui32FWAddr; +} __packed PVRSRV_BRIDGE_IN_SYNCALLOCEVENT; + +/* Bridge out structure for SyncAllocEvent */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT; + +/******************************************* + SyncFreeEvent + *******************************************/ + +/* Bridge in structure for SyncFreeEvent */ +typedef struct PVRSRV_BRIDGE_IN_SYNCFREEEVENT_TAG +{ + IMG_UINT32 ui32FWAddr; +} __packed PVRSRV_BRIDGE_IN_SYNCFREEEVENT; + +/* Bridge out structure for SyncFreeEvent */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCFREEEVENT_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_SYNCFREEEVENT; + +/******************************************* + SyncCheckpointSignalledPDumpPol + *******************************************/ + +/* Bridge in structure for SyncCheckpointSignalledPDumpPol */ +typedef struct PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL_TAG +{ + PVRSRV_FENCE hFence; +} __packed PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL; + +/* Bridge out structure for SyncCheckpointSignalledPDumpPol */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL; + +#endif /* COMMON_SYNC_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/common_synctracking_bridge.h b/drivers/gpu/drm/phytium/octopus/common_synctracking_bridge.h new file mode 100644 index 000000000000..f41b6bc4a913 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/common_synctracking_bridge.h @@ -0,0 +1,97 @@ +/******************************************************************************* +@File +@Title Common bridge header for synctracking +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declares common defines and structures used by both the client + and server side of the bridge for synctracking +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef COMMON_SYNCTRACKING_BRIDGE_H +#define COMMON_SYNCTRACKING_BRIDGE_H + +#include + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST 0 +#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+0 +#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1 +#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST (PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1) + +/******************************************* + SyncRecordRemoveByHandle + *******************************************/ + +/* Bridge in structure for SyncRecordRemoveByHandle */ +typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE_TAG +{ + IMG_HANDLE hhRecord; +} __packed PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE; + +/* Bridge out structure for SyncRecordRemoveByHandle */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE_TAG +{ + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE; + +/******************************************* + SyncRecordAdd + *******************************************/ + +/* Bridge in structure for SyncRecordAdd */ +typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDADD_TAG +{ + IMG_HANDLE hhServerSyncPrimBlock; + const IMG_CHAR *puiClassName; + IMG_BOOL bbServerSync; + IMG_UINT32 ui32ClassNameSize; + IMG_UINT32 ui32ui32FwBlockAddr; + IMG_UINT32 ui32ui32SyncOffset; +} __packed PVRSRV_BRIDGE_IN_SYNCRECORDADD; + +/* Bridge out structure for SyncRecordAdd */ +typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDADD_TAG +{ + IMG_HANDLE hhRecord; + PVRSRV_ERROR eError; +} __packed PVRSRV_BRIDGE_OUT_SYNCRECORDADD; + +#endif /* COMMON_SYNCTRACKING_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/config_kernel.h b/drivers/gpu/drm/phytium/octopus/config_kernel.h new file mode 100644 index 000000000000..49fcce2bf87b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/config_kernel.h @@ -0,0 +1,165 @@ +#define SUPPORT_RGXRAY_BRIDGE +#define PVRSRV_ENABLE_CCCB_GROW +#define RGX_FW_FILENAME "rgx.fw" +#define RGX_SH_FILENAME "rgx.sh" +#define PVR_BUILD_DIR "ft_pci" +#define PVR_BUILD_TYPE "release" +#define PVRSRV_MODNAME "pvrsrvkm" +#define PVRHMMU_MODNAME "" +#define PVRSYNC_MODNAME "pvr_sync" +#define SUPPORT_RGX 1 +#define DISPLAY_CONTROLLER drm_nulldisp +#define RELEASE +#define SUPPORT_PHYSMEM_TEST +#define RGX_BVNC_CORE_KM_HEADER "cores/rgxcore_km_30.3.816.20.h" +#define RGX_BNC_CONFIG_KM_HEADER "configs/rgxconfig_km_30.V.816.20.h" +#define PVRSRV_NEED_PVR_DPF +#define PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY 5 +#define PVRSRV_POISON_ON_ALLOC_VALUE 0xd9 +#define PVRSRV_POISON_ON_FREE_VALUE 0x63 +#define RGX_NUM_OS_SUPPORTED 1 +#define RGX_OSID_0_DEFAULT_PRIORITY (1 - 0) +#define RGX_OSID_1_DEFAULT_PRIORITY (1 - 1) +#define RGX_OSID_2_DEFAULT_PRIORITY (1 - 2) +#define RGX_OSID_3_DEFAULT_PRIORITY (1 - 3) +#define RGX_OSID_4_DEFAULT_PRIORITY (1 - 4) +#define RGX_OSID_5_DEFAULT_PRIORITY (1 - 5) +#define RGX_OSID_6_DEFAULT_PRIORITY (1 - 6) +#define RGX_OSID_7_DEFAULT_PRIORITY (1 - 7) +#define RGX_HCS_DEFAULT_DEADLINE_MS 0xFFFFFFFFU +#define PVRSRV_APPHINT_DRIVERMODE 0x7FFFFFFF +#define RGX_FW_HEAP_SHIFT 25 +#define PVRSRV_VZ_BYPASS_HMMU +#define PVRSRV_APPHINT_CDMARBITRATIONOVERRIDE 0x00000000 +#ifdef CONFIG_DRM_PHYTIUMVR_DVFS +#define SUPPORT_LINUX_DVFS 1 +#endif +#define SUPPORT_POWMON_COMPONENT +#define SUPPORT_POWER_VALIDATION_VIA_DEBUGFS +#define PVR_LDM_DRIVER_REGISTRATION_NAME "pvrsrvkm" +#define PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN 256 +#define ION_DEFAULT_HEAP_NAME "ion_system_heap" +#define ION_DEFAULT_HEAP_ID_MASK (1 << ION_HEAP_TYPE_SYSTEM) +#define TRACK_FW_BOOT +#define PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT APPHNT_BLDVAR_DBGDUMPLIMIT +#define PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG IMG_FALSE +#define PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE 0x4000 +#define PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE 786432 +#define PVRSRV_APPHINT_ENABLESIGNATURECHECKS APPHNT_BLDVAR_ENABLESIGNATURECHECKS +#define PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE RGXFW_SIG_BUFFER_SIZE_MIN +#define PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING IMG_FALSE +#define PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG +#define PVRSRV_APPHINT_VALIDATEIRQ 0 +#define PVRSRV_APPHINT_DISABLECLOCKGATING 0 +#define PVRSRV_APPHINT_DISABLEDMOVERLAP 0 +#define PVRSRV_APPHINT_FABRICCOHERENCYOVERRIDE 2 +#define PVRSRV_APPHINT_ENABLEDMKILLINGRANDMODE 0 +#define PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL +#define PVRSRV_APPHINT_ENABLERDPOWERISLAND RGX_RD_POWER_ISLAND_DEFAULT +#define PVRSRV_APPHINT_HWVALENABLESPUPOWERMASKCHANGE 0 +#define PVRSRV_APPHINT_HWVALAVAILABLESPUMASK 0xFFFFFFFF +#define PVRSRV_APPHINT_KILLINGCTL 0 +#define PVRSRV_APPHINT_CDMTDM_KILLINGCTL 0 +#define PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH 0 +#define PVRSRV_APPHINT_ENABLESOFTRESETCNTEXTSWITCH 0 +#define PVRSRV_APPHINT_FIRMWAREPERF FW_PERF_CONF_NONE +#define PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN +#define PVRSRV_APPHINT_HWPERFDISABLECOUNTERFILTER 0 +#define PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB 2048 +#define PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB 2048 +#define PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS 50 +#define PVRSRV_APPHINT_JONESDISABLEMASK 0 +#define PVRSRV_APPHINT_NEWFILTERINGMODE IMG_FALSE +#define PVRSRV_APPHINT_TRUNCATEMODE 0 +#define PVRSRV_APPHINT_EMUMAXFREQ 0 +#define PVRSRV_APPHINT_GPIOVALIDATIONMODE 0 +#define PVRSRV_APPHINT_RGXBVNC "" +#define PVRSRV_APPHINT_RISCVDMITEST 0 +#define PVRSRV_APPHINT_RCEDISABLEMASK 0x00000000 +#define RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US 1000000 +#define PVRSRV_APPHINT_ISPSCHEDULINGLATENCYMODE 1 +#define PVRSRV_APPHINT_VALIDATESOCUSCTIMERS 0 +#define PVRSRV_APPHINT_SHGEOMPIPEMASK_OVERRIDE 0 +#define PVRSRV_APPHINT_CLEANUPTHREADPRIORITY 5 +#define PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY 0 +#define PVRSRV_APPHINT_CACHEOPTHREADPRIORITY 1 +#define PVRSRV_APPHINT_ASSERTONHWRTRIGGER IMG_FALSE +#define PVRSRV_APPHINT_ASSERTOUTOFMEMORY IMG_FALSE +#define PVRSRV_APPHINT_CHECKMLIST APPHNT_BLDVAR_DEBUG +#define PVRSRV_APPHINT_DISABLEFEDLOGGING IMG_FALSE +#define PVRSRV_APPHINT_KCCB_SIZE_LOG2 7 +#define PVRSRV_APPHINT_ENABLEAPM RGX_ACTIVEPM_DEFAULT +#define PVRSRV_APPHINT_ENABLEHTBLOGGROUP 0 +#define PVRSRV_APPHINT_ENABLELOGGROUP RGXFWIF_LOG_TYPE_NONE +#define PVRSRV_APPHINT_FIRMWARELOGTYPE 0 +#define PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS +#define PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE 0 +#define PVRSRV_APPHINT_HTBOPERATIONMODE HTB_OPMODE_DROPOLDEST +#define PVRSRV_APPHINT_HTBUFFERSIZE 64 +#define PVRSRV_APPHINT_ENABLEFTRACEGPU IMG_FALSE +#define PVRSRV_APPHINT_HWPERFFWFILTER 0 +#define PVRSRV_APPHINT_HWPERFHOSTFILTER 0 +#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES 0 +#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL 0 +#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES 0 +#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL 0 +#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN 0 +#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGL 0 +#define PVRSRV_APPHINT_TIMECORRCLOCK 0 +#define PVRSRV_APPHINT_ENABLEFWPOISONONFREE IMG_FALSE +#define PVRSRV_APPHINT_FWPOISONONFREEVALUE 0xBD +#define PVRSRV_APPHINT_ZEROFREELIST IMG_FALSE +#define PVRSRV_APPHINT_GPUUNITSPOWERCHANGE IMG_FALSE +#define PVRSRV_APPHINT_DISABLEPDUMPPANIC IMG_FALSE +#define PVRSRV_APPHINT_CACHEOPCONFIG 0 +#define PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE 0 +#define PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC IMG_FALSE +#define PVRSRV_APPHINT_PHYSMEMTESTPASSES APPHNT_PHYSMEMTEST_ENABLE +#define PVRSRV_APPHINT_TESTSLRINTERVAL 0 +#define SOC_TIMER_FREQ 20 +#define PDVFS_COM_HOST 1 +#define PDVFS_COM_AP 2 +#define PDVFS_COM_PMC 3 +#define PDVFS_COM_IMG_CLKDIV 4 +#define PDVFS_COM PDVFS_COM_HOST +#define PVR_GPIO_MODE_GENERAL 1 +#define PVR_GPIO_MODE_POWMON_PIN 2 +#define PVR_GPIO_MODE PVR_GPIO_MODE_GENERAL +#define PVRSRV_ENABLE_PROCESS_STATS +#define PVR_ANNOTATION_MAX_LEN 63 +#define SUPPORT_DI_BRG_IMPL +#define PVR_LINUX_PHYSMEM_MAX_POOL_PAGES 10240 +#define PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES 20480 +#define PVR_DIRTY_BYTES_FLUSH_THRESHOLD 524288 +#define PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD 256 +#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 2 +#define PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD 16384 +#define SUPPORT_NATIVE_FENCE_SYNC +#define PVRSRV_STALLED_CCB_ACTION + +#ifdef CONFIG_DRM_PHYTIUMVR_OCTOPUS_DMA +#define SUPPORT_DMA_TRANSFER 1 +/*#define PVRSRV_DEBUG_DMA 1 */ +#endif + +#define UPDATE_FENCE_CHECKPOINT_COUNT 1 +#define PVR_DRM_NAME "pvr" +#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES 16 +#define RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS 0 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D 14 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D 14 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM 13 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA 15 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D 16 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC 13 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM 14 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ3D 17 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ2D 17 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM 15 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TA 16 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_3D 17 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_KICKSYNC 13 +#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TDM 17 +#define SUPPORT_BUFFER_SYNC 1 +#define SUPPORT_LMA_SUSPEND_TO_RAM 1 +#define SUPPORT_FWLOAD_ON_PROBE 1 diff --git a/drivers/gpu/drm/phytium/octopus/config_kernel.mk b/drivers/gpu/drm/phytium/octopus/config_kernel.mk new file mode 100644 index 000000000000..bf0595b2506b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/config_kernel.mk @@ -0,0 +1,49 @@ +override HOST_PRIMARY_ARCH := host_aarch64 +override HOST_32BIT_ARCH := host_armhf +override HOST_FORCE_32BIT := +override HOST_ALL_ARCH := host_aarch64 host_armhf +override TARGET_PRIMARY_ARCH := target_aarch64 +override TARGET_SECONDARY_ARCH := +override TARGET_ALL_ARCH := target_aarch64 +override TARGET_FORCE_32BIT := +override PVR_ARCH := octopus +override METAG_VERSION_NEEDED := 2.8.1.0.3 +override RISCV_VERSION_NEEDED := 1.0.1 +override KERNELDIR := /home/test/src/linux/ +override KERNEL_ID := 4.19.0.ft+ +override PVRSRV_MODULE_BASEDIR := /lib/modules/4.19.0.ft+/extra/ +override KERNEL_COMPONENTS := srvkm drm_nulldisp +override WINDOW_SYSTEM := xorg +override PVRSRV_MODNAME := pvrsrvkm +override PVRHMMU_MODNAME := +override PVRSYNC_MODNAME := pvr_sync +override PVR_BUILD_DIR := ft_pci +override PVR_BUILD_TYPE := release +override SUPPORT_RGX := 1 +override DISPLAY_CONTROLLER := drm_nulldisp +override PVR_SYSTEM := ft_pci +override PVR_LOADER := pvr_pci_drv +override BUILD := release +override SORT_BRIDGE_STRUCTS := 1 +override DEBUGLINK := 1 +override SUPPORT_PHYSMEM_TEST := 1 +override RGX_BNC := 30.V.816.20 +override RGX_NUM_OS_SUPPORTED := 1 +override VMM_TYPE := stub +override SUPPORT_POWMON_COMPONENT := 1 +override RGX_TIMECORR_CLOCK := mono +override PDVFS_COM_HOST := 1 +override PDVFS_COM_AP := 2 +override PDVFS_COM_PMC := 3 +override PDVFS_COM_IMG_CLKDIV := 4 +override PDVFS_COM := PDVFS_COM_HOST +override PVR_GPIO_MODE_GENERAL := 1 +override PVR_GPIO_MODE_POWMON_PIN := 2 +override PVR_GPIO_MODE := PVR_GPIO_MODE_GENERAL +override PVR_HANDLE_BACKEND := idr +override SUPPORT_DMABUF_BRIDGE := 1 +override SUPPORT_DI_BRG_IMPL := 1 +override SUPPORT_NATIVE_FENCE_SYNC := 1 +override SUPPORT_DMA_FENCE := 1 +override SUPPORT_BUFFER_SYNC := 1 +override SUPPORT_LMA_SUSPEND_TO_RAM := 1 diff --git a/drivers/gpu/drm/phytium/octopus/configs/rgxconfig_km_30.V.816.20.h b/drivers/gpu/drm/phytium/octopus/configs/rgxconfig_km_30.V.816.20.h new file mode 100644 index 000000000000..16d65049fc48 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/configs/rgxconfig_km_30.V.816.20.h @@ -0,0 +1,127 @@ +/*************************************************************************/ /*! +@Title RGX Configuration for BVNC 30.V.816.20 (kernel defines) +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCONFIG_KM_30_V_816_20_H +#define RGXCONFIG_KM_30_V_816_20_H + +/***** Automatically generated file. Do not edit manually ********************/ + +/****************************************************************************** + * B.V.N.C Validation defines + *****************************************************************************/ +#define RGX_BNC_KM_B 30 +#define RGX_BNC_KM_N 816 +#define RGX_BNC_KM_C 20 + +/****************************************************************************** + * DDK Defines + *****************************************************************************/ +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE +#define RGX_FEATURE_AXI_ACE +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U) +#define RGX_FEATURE_CLUSTER_GROUPING +#define RGX_FEATURE_COMPUTE +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE +#define RGX_FEATURE_COMPUTE_OVERLAP +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U) +#define RGX_FEATURE_COREID_PER_OS +#define RGX_FEATURE_DUST_POWER_ISLAND_S7 +#define RGX_FEATURE_ECC_RAMS (0U) +#define RGX_FEATURE_FASTRENDER_DM +#define RGX_FEATURE_FBCDC (4U) +#define RGX_FEATURE_FBCDC_ALGORITHM (4U) +#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U) +#define RGX_FEATURE_GPU_VIRTUALISATION +#define RGX_FEATURE_GS_RTA_SUPPORT +#define RGX_FEATURE_LAYOUT_MARS (0U) +#define RGX_FEATURE_MAX_TPU_PER_SPU (2U) +#define RGX_FEATURE_META (MTP219) +#define RGX_FEATURE_META_COREMEM_BANKS (8U) +#define RGX_FEATURE_META_COREMEM_SIZE (96U) +#define RGX_FEATURE_META_DMA +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES +#define RGX_FEATURE_MMU_VERSION (4U) +#define RGX_FEATURE_NUM_CLUSTERS (2U) +#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8U) +#define RGX_FEATURE_NUM_ISP_PER_SPU (2U) +#define RGX_FEATURE_NUM_MEMBUS (1U) +#define RGX_FEATURE_NUM_OSIDS (8U) +#define RGX_FEATURE_NUM_SPU (1U) +#define RGX_FEATURE_PBE_CHECKSUM_2D +#define RGX_FEATURE_PBE_PER_SPU (1U) +#define RGX_FEATURE_PBVNC_COREID_REG +#define RGX_FEATURE_PDS_TEMPSIZE8 +#define RGX_FEATURE_PERFBUS +#define RGX_FEATURE_PERF_COUNTER_BATCH +#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES +#define RGX_FEATURE_POWER_ISLAND_VERSION (1U) +#define RGX_FEATURE_RAY_TRACING_ARCH (0U) +#define RGX_FEATURE_RENDER_TARGET_XY_MAX (16384U) +#define RGX_FEATURE_S7_CACHE_HIERARCHY +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE +#define RGX_FEATURE_SCALABLE_TE_ARCH (1U) +#define RGX_FEATURE_SCALABLE_VCE (1U) +#define RGX_FEATURE_SCALABLE_VDM_GPP +#define RGX_FEATURE_SIGNAL_SNOOPING +#define RGX_FEATURE_SLC_BANKS (2U) +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */ + /* customer-configurable. True SLC */ + /* size must be sourced from */ + /* register. */ +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (256U) +#define RGX_FEATURE_SLC_VIVT +#define RGX_FEATURE_SYS_BUS_SECURE_RESET +#define RGX_FEATURE_TDM_PDS_CHECKSUM +#define RGX_FEATURE_TESSELLATION +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS +#define RGX_FEATURE_VDM_DRAWINDIRECT +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) +#define RGX_FEATURE_ZLS_CHECKSUM + +#endif /* RGXCONFIG_KM_30_V_816_20_H */ diff --git a/drivers/gpu/drm/phytium/octopus/connection_server.c b/drivers/gpu/drm/phytium/octopus/connection_server.c new file mode 100644 index 000000000000..16fc47d8fed9 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/connection_server.c @@ -0,0 +1,557 @@ +/*************************************************************************/ /*! +@File +@Title Server side connection management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Handles connections coming from the client and the management + connection based information +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "handle.h" +#include "pvrsrv.h" +#include "connection_server.h" +#include "osconnection_server.h" +#include "allocmem.h" +#include "pvr_debug.h" +#include "sync_server.h" +#include "process_stats.h" +#include "pdump_km.h" +#include "osfunc.h" +#include "tlstream.h" + +/* PID associated with Connection currently being purged by Cleanup thread */ +static IMG_PID gCurrentPurgeConnectionPid; + +static PVRSRV_ERROR ConnectionDataDestroy(CONNECTION_DATA *psConnection) +{ + PVRSRV_ERROR eError; + PROCESS_HANDLE_BASE *psProcessHandleBase; + IMG_UINT64 ui64MaxBridgeTime; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + if (psPVRSRVData->bUnload) + { + /* driver is unloading so do not allow the bridge lock to be released */ + ui64MaxBridgeTime = 0; + } + else + { + ui64MaxBridgeTime = CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS; + } + + PVR_ASSERT(psConnection != NULL); + PVR_LOG_RETURN_IF_INVALID_PARAM(psConnection, "psConnection"); + + /* Close HWPerfClient stream here even though we created it in + * PVRSRVConnectKM(). */ + if (psConnection->hClientTLStream) + { + TLStreamClose(psConnection->hClientTLStream); + psConnection->hClientTLStream = NULL; + PVR_DPF((PVR_DBG_MESSAGE, "Destroyed private stream.")); + } + + /* Get process handle base to decrement the refcount */ + psProcessHandleBase = psConnection->psProcessHandleBase; + + if (psProcessHandleBase != NULL) + { + /* acquire the lock now to ensure unref and removal from the + * hash table is atomic. + * if the refcount becomes zero then the lock needs to be held + * until the entry is removed from the hash table. + */ + OSLockAcquire(psPVRSRVData->hProcessHandleBase_Lock); + + /* In case the refcount becomes 0 we can remove the process handle base */ + if (OSAtomicDecrement(&psProcessHandleBase->iRefCount) == 0) + { + uintptr_t uiHashValue; + + uiHashValue = HASH_Remove(psPVRSRVData->psProcessHandleBase_Table, psConnection->pid); + OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock); + + if (!uiHashValue) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to remove handle base from hash table.", + __func__)); + return PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE; + } + + eError = PVRSRVFreeKernelHandles(psProcessHandleBase->psHandleBase); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVFreeKernelHandles"); + + eError = PVRSRVFreeHandleBase(psProcessHandleBase->psHandleBase, ui64MaxBridgeTime); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVFreeHandleBase:1"); + + OSFreeMem(psProcessHandleBase); + } + else + { + OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock); + } + + psConnection->psProcessHandleBase = NULL; + } + + /* Free handle base for this connection */ + if (psConnection->psHandleBase != NULL) + { + eError = PVRSRVFreeHandleBase(psConnection->psHandleBase, ui64MaxBridgeTime); + /* + * If we get PVRSRV_ERROR_RETRY we need to pass this back to the caller + * who will schedule a retry. + * Do not log this as it is an expected exception. + * This can occur if the Firmware is still processing a workload from + * the client when a tear-down request is received. + * Retrying will allow the in-flight work to be completed and the + * tear-down request can be completed when the FW is no longer busy. + */ + if (PVRSRV_ERROR_RETRY == eError) + { + return eError; + } + else + { + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVFreeHandleBase:2"); + } + + psConnection->psHandleBase = NULL; + } + + if (psConnection->psSyncConnectionData != NULL) + { + SyncUnregisterConnection(psConnection->psSyncConnectionData); + psConnection->psSyncConnectionData = NULL; + } + + if (psConnection->psPDumpConnectionData != NULL) + { + PDumpUnregisterConnection(psConnection->psPDumpConnectionData); + psConnection->psPDumpConnectionData = NULL; + } + + /* Call environment specific connection data deinit function */ + if (psConnection->hOsPrivateData != NULL) + { + eError = OSConnectionPrivateDataDeInit(psConnection->hOsPrivateData); + PVR_LOG_RETURN_IF_ERROR(eError, "OSConnectionPrivateDataDeInit"); + + psConnection->hOsPrivateData = NULL; + } + + /* Close the PID stats entry as late as possible to catch all frees */ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + if (psConnection->hProcessStats != NULL) + { + PVRSRVStatsDeregisterProcess(psConnection->hProcessStats); + psConnection->hProcessStats = NULL; + } +#endif + + OSFreeMemNoStats(psConnection); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVCommonConnectionConnect(void **ppvPrivData, void *pvOSData) +{ + CONNECTION_DATA *psConnection; + PVRSRV_ERROR eError; + PROCESS_HANDLE_BASE *psProcessHandleBase; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + /* Allocate connection data area, no stats since process not registered yet */ + psConnection = OSAllocZMemNoStats(sizeof(*psConnection)); + PVR_LOG_RETURN_IF_NOMEM(psConnection, "psConnection"); + + /* Allocate process statistics as early as possible to catch all allocs */ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + eError = PVRSRVStatsRegisterProcess(&psConnection->hProcessStats); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVStatsRegisterProcess", failure); +#endif + + /* Call environment specific connection data init function */ + eError = OSConnectionPrivateDataInit(&psConnection->hOsPrivateData, pvOSData); + PVR_LOG_GOTO_IF_ERROR(eError, "OSConnectionPrivateDataInit", failure); + + psConnection->pid = OSGetCurrentClientProcessIDKM(); + psConnection->vpid = OSGetCurrentVirtualProcessID(); + psConnection->tid = (IMG_UINT32)OSGetCurrentClientThreadIDKM(); + OSStringLCopy(psConnection->pszProcName, OSGetCurrentClientProcessNameKM(), PVRSRV_CONNECTION_PROCESS_NAME_LEN); + +#if defined(SUPPORT_DMA_TRANSFER) + OSLockCreate(&psConnection->hDmaReqLock); + + eError = OSEventObjectCreate("Dma transfer cleanup event object", + &psConnection->hDmaEventObject); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", failure); + + OSAtomicWrite(&psConnection->ui32NumDmaTransfersInFlight, 0); + psConnection->bAcceptDmaRequests = IMG_TRUE; +#endif + +#if defined(DEBUG) || defined(PDUMP) + PVR_LOG(("%s connected", psConnection->pszProcName)); +#endif + + /* Register this connection with the sync core */ + eError = SyncRegisterConnection(&psConnection->psSyncConnectionData); + PVR_LOG_GOTO_IF_ERROR(eError, "SyncRegisterConnection", failure); + + /* + * Register this connection and Sync PDump callback with + * the pdump core. Pass in the Sync connection data. + */ + eError = PDumpRegisterConnection(psConnection->psSyncConnectionData, + SyncConnectionPDumpSyncBlocks, + &psConnection->psPDumpConnectionData); + PVR_LOG_GOTO_IF_ERROR(eError, "PDumpRegisterConnection", failure); + + /* Allocate handle base for this connection */ + eError = PVRSRVAllocHandleBase(&psConnection->psHandleBase, + PVRSRV_HANDLE_BASE_TYPE_CONNECTION); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandleBase", failure); + + /* Try to get process handle base if it already exists */ + OSLockAcquire(psPVRSRVData->hProcessHandleBase_Lock); + psProcessHandleBase = (PROCESS_HANDLE_BASE*) HASH_Retrieve(PVRSRVGetPVRSRVData()->psProcessHandleBase_Table, + psConnection->pid); + + /* In case there is none we are going to allocate one */ + if (psProcessHandleBase == NULL) + { + psProcessHandleBase = OSAllocZMem(sizeof(PROCESS_HANDLE_BASE)); + PVR_LOG_GOTO_IF_NOMEM(psProcessHandleBase, eError, failureLock); + + OSAtomicWrite(&psProcessHandleBase->iRefCount, 0); + + /* Allocate handle base for this process */ + eError = PVRSRVAllocHandleBase(&psProcessHandleBase->psHandleBase, + PVRSRV_HANDLE_BASE_TYPE_PROCESS); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "PVRSRVAllocHandleBase"); + OSFreeMem(psProcessHandleBase); + goto failureLock; + } + + /* Insert the handle base into the global hash table */ + if (!HASH_Insert(PVRSRVGetPVRSRVData()->psProcessHandleBase_Table, + psConnection->pid, + (uintptr_t) psProcessHandleBase)) + { + PVRSRVFreeHandleBase(psProcessHandleBase->psHandleBase, 0); + + OSFreeMem(psProcessHandleBase); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE, failureLock); + } + } + OSAtomicIncrement(&psProcessHandleBase->iRefCount); + + OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock); + + /* hConnectionsLock now resides in PVRSRV_DEVICE_NODE */ + { + PVRSRV_DEVICE_NODE *psDevNode = OSGetDevNode(psConnection); + + OSLockAcquire(psDevNode->hConnectionsLock); + dllist_add_to_tail(&psDevNode->sConnections, &psConnection->sConnectionListNode); + OSLockRelease(psDevNode->hConnectionsLock); + } + + psConnection->psProcessHandleBase = psProcessHandleBase; + + *ppvPrivData = psConnection; + + return PVRSRV_OK; + +failureLock: + OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock); +failure: + ConnectionDataDestroy(psConnection); + + return eError; +} + +static PVRSRV_ERROR _CleanupThreadPurgeConnectionData(void *pvConnectionData) +{ + PVRSRV_ERROR eErrorConnection, eErrorKernel; + CONNECTION_DATA *psConnectionData = pvConnectionData; + + gCurrentPurgeConnectionPid = psConnectionData->pid; + + eErrorConnection = ConnectionDataDestroy(psConnectionData); + if (eErrorConnection != PVRSRV_OK) + { + if (eErrorConnection == PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Failed to purge connection data %p " + "(deferring destruction)", + __func__, + psConnectionData)); + } + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Connection data %p deferred destruction finished", + __func__, + psConnectionData)); + } + + /* Check if possible resize the global handle base */ + eErrorKernel = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE); + PVR_LOG_IF_ERROR(eErrorKernel, "PVRSRVPurgeHandles"); + + gCurrentPurgeConnectionPid = 0; + + return eErrorConnection; +} + +#if defined(SUPPORT_DMA_TRANSFER) +static void WaitForOutstandingDma(CONNECTION_DATA *psConnectionData) +{ + + PVRSRV_ERROR eError; + IMG_HANDLE hEvent; + IMG_UINT32 ui32Tries = 100; + +#if defined(DMA_VERBOSE) + PVR_DPF((PVR_DBG_ERROR, + "Waiting on %d DMA transfers in flight...", OSAtomicRead(&psConnectionData->ui32NumDmaTransfersInFlight))); +#endif + + eError = OSEventObjectOpen(psConnectionData->hDmaEventObject, &hEvent); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__)); + return; + } + + while (OSAtomicRead(&psConnectionData->ui32NumDmaTransfersInFlight) != 0) + { + /* + #define DMA_TRANSFER_TIMEOUT_US (5000000ULL) + + This currently doesn't work properly. Wait time is not as requested. + Using OSSleepms instead + + OSEventObjectWaitKernel(hEvent, DMA_TRANSFER_TIMEOUT_US); + */ + OSSleepms(50); + if (!ui32Tries) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Timeout while waiting on outstanding DMA transfers!", __func__)); + break; + } + + ui32Tries--; + } + + OSEventObjectClose(hEvent); +} +#endif + +void PVRSRVCommonConnectionDisconnect(void *pvDataPtr) +{ + CONNECTION_DATA *psConnectionData = pvDataPtr; + PVRSRV_DEVICE_NODE *psDevNode = OSGetDevNode(psConnectionData); + + OSLockAcquire(psDevNode->hConnectionsLock); + dllist_remove_node(&psConnectionData->sConnectionListNode); + OSLockRelease(psDevNode->hConnectionsLock); + + /* Notify the PDump core if the pdump control client is disconnecting */ + if (psConnectionData->ui32ClientFlags & SRV_FLAGS_PDUMPCTRL) + { + PDumpDisconnectionNotify(); + } +#if defined(SUPPORT_DMA_TRANSFER) + OSLockAcquire(psConnectionData->hDmaReqLock); + + psConnectionData->bAcceptDmaRequests = IMG_FALSE; + + OSLockRelease(psConnectionData->hDmaReqLock); + + WaitForOutstandingDma(psConnectionData); + + OSEventObjectDestroy(psConnectionData->hDmaEventObject); + OSLockDestroy(psConnectionData->hDmaReqLock); +#endif + +#if defined(DEBUG) || defined(PDUMP) + PVR_LOG(("%s disconnected", psConnectionData->pszProcName)); +#endif + +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK) +#endif + { + /* Defer the release of the connection data */ + psConnectionData->sCleanupThreadFn.pfnFree = _CleanupThreadPurgeConnectionData; + psConnectionData->sCleanupThreadFn.pvData = psConnectionData; + psConnectionData->sCleanupThreadFn.bDependsOnHW = IMG_FALSE; + CLEANUP_THREAD_SET_RETRY_COUNT(&psConnectionData->sCleanupThreadFn, + CLEANUP_THREAD_RETRY_COUNT_DEFAULT); + PVRSRVCleanupThreadAddWork(&psConnectionData->sCleanupThreadFn); + } +} + +IMG_PID PVRSRVGetPurgeConnectionPid(void) +{ + return gCurrentPurgeConnectionPid; +} + +/* Prefix for debug messages about Active Connections */ +#define DEBUG_DUMP_CONNECTION_FORMAT_STR " P%d-V%d-T%d-%s," +#define CONNECTIONS_PREFIX "Connections Device ID:%u(%d)" +#define MAX_CONNECTIONS_PREFIX (29) +#define MAX_DEBUG_DUMP_CONNECTION_STR_LEN (1+10+10+10+7+PVRSRV_CONNECTION_PROCESS_NAME_LEN) +#define MAX_DEBUG_DUMP_STRING_LEN (1+MAX_CONNECTIONS_PREFIX+(3*MAX_DEBUG_DUMP_CONNECTION_STR_LEN)) + +void PVRSRVConnectionDebugNotify(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PDLLIST_NODE pNext, pNode; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDevNode; + IMG_BOOL bFoundConnections = IMG_FALSE; + + /* + * Connections are linked to each device-node within + * PVRSRV_DATA->psDeviceNodeList. Traverse this and display each connection + * within device node. + */ + for (psDevNode = psPVRSRVData->psDeviceNodeList; + psDevNode != NULL; + psDevNode = psDevNode->psNext) + { + /* We must check for an initialised device before accessing its mutex. + * The mutex is initialised as part of DeviceInitialize() which occurs + * on first access to the device node. + */ + if (psDevNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) + { + continue; + } + + bFoundConnections = IMG_TRUE; + + OSLockAcquire(psDevNode->hConnectionsLock); + if (dllist_is_empty(&psDevNode->sConnections)) + { + PVR_DUMPDEBUG_LOG(CONNECTIONS_PREFIX " No active connections", + (unsigned char)psDevNode->sDevId.ui32InternalID, + (unsigned char)psDevNode->sDevId.i32OsDeviceID); + } + else + { + IMG_CHAR sActiveConnections[MAX_DEBUG_DUMP_STRING_LEN]; + IMG_UINT16 i, uiPos = 0; + IMG_BOOL bPrinted = IMG_FALSE; + size_t uiSize = sizeof(sActiveConnections); + + IMG_CHAR szTmpConBuff[MAX_CONNECTIONS_PREFIX + 1]; + i = OSSNPrintf(szTmpConBuff, + MAX_CONNECTIONS_PREFIX, + CONNECTIONS_PREFIX, + (unsigned char)psDevNode->sDevId.ui32InternalID, + (unsigned char)psDevNode->sDevId.i32OsDeviceID); + OSStringLCopy(sActiveConnections+uiPos, szTmpConBuff, uiSize); + + /* Move the write offset to the end of the current string */ + uiPos += i; + /* Update the amount of remaining space available to copy into */ + uiSize -= i; + + dllist_foreach_node(&psDevNode->sConnections, pNode, pNext) + { + CONNECTION_DATA *sData = IMG_CONTAINER_OF(pNode, CONNECTION_DATA, sConnectionListNode); + + IMG_CHAR sTmpBuff[MAX_DEBUG_DUMP_CONNECTION_STR_LEN]; + i = OSSNPrintf(sTmpBuff, MAX_DEBUG_DUMP_CONNECTION_STR_LEN, + DEBUG_DUMP_CONNECTION_FORMAT_STR, sData->pid, sData->vpid, sData->tid, sData->pszProcName); + i = MIN(MAX_DEBUG_DUMP_CONNECTION_STR_LEN, i); + bPrinted = IMG_FALSE; + + OSStringLCopy(sActiveConnections+uiPos, sTmpBuff, uiSize); + + /* Move the write offset to the end of the current string */ + uiPos += i; + /* Update the amount of remaining space available to copy into */ + uiSize -= i; + + /* If there is not enough space to add another connection to this line, output the line */ + if (uiSize <= MAX_DEBUG_DUMP_CONNECTION_STR_LEN) + { + PVR_DUMPDEBUG_LOG("%s", sActiveConnections); + + /* + * Remove the "Connections:" prefix from the buffer. + * Leave the subsequent buffer contents indented by the same + * amount to aid in interpreting the debug output. + */ + uiPos = sizeof(CONNECTIONS_PREFIX) - 1; + /* Reset the amount of space available to copy into */ + uiSize = MAX_DEBUG_DUMP_STRING_LEN - uiPos; + bPrinted = IMG_TRUE; + } + } + + /* Only print the current line if it hasn't already been printed */ + if (!bPrinted) + { + // Strip of the final comma + sActiveConnections[OSStringNLength(sActiveConnections, MAX_DEBUG_DUMP_STRING_LEN) - 1] = '\0'; + PVR_DUMPDEBUG_LOG("%s", sActiveConnections); + } +#undef MAX_DEBUG_DUMP_STRING_LEN +#undef MAX_DEBUG_DUMP_CONNECTIONS_PER_LINE + } + OSLockRelease(psDevNode->hConnectionsLock); + } + + /* Check to see if we have displayed anything from the loop above */ + if (bFoundConnections == IMG_FALSE) + { + PVR_DUMPDEBUG_LOG("Connections: No Devices: No active connections"); + } +} diff --git a/drivers/gpu/drm/phytium/octopus/connection_server.h b/drivers/gpu/drm/phytium/octopus/connection_server.h new file mode 100644 index 000000000000..2714d1df4d36 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/connection_server.h @@ -0,0 +1,134 @@ +/*************************************************************************/ /*! +@File +@Title Server side connection management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description API for server side connection management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(CONNECTION_SERVER_H) +#define CONNECTION_SERVER_H + + +#include "img_types.h" +#include "img_defs.h" +#include "handle.h" +#include "pvrsrv_cleanup.h" + +/* Variable used to hold in memory the timeout for the current time slice*/ +extern IMG_UINT64 gui64TimesliceLimit; +/* Counter number of handle data freed during the current time slice */ +extern IMG_UINT32 gui32HandleDataFreeCounter; +/* Set the maximum time the freeing of the resources can keep the lock */ +#define CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS (3000 * 1000) /* 3ms */ + +typedef struct _CONNECTION_DATA_ +{ + PVRSRV_HANDLE_BASE *psHandleBase; + PROCESS_HANDLE_BASE *psProcessHandleBase; + struct _SYNC_CONNECTION_DATA_ *psSyncConnectionData; + struct _PDUMP_CONNECTION_DATA_ *psPDumpConnectionData; + + /* Holds the client flags supplied at connection time */ + IMG_UINT32 ui32ClientFlags; + + /* + * OS specific data can be stored via this handle. + * See osconnection_server.h for a generic mechanism + * for initialising this field. + */ + IMG_HANDLE hOsPrivateData; + +#define PVRSRV_CONNECTION_PROCESS_NAME_LEN (16) + IMG_PID pid; + IMG_PID vpid; + IMG_UINT32 tid; + IMG_CHAR pszProcName[PVRSRV_CONNECTION_PROCESS_NAME_LEN]; + + IMG_HANDLE hProcessStats; + + IMG_HANDLE hClientTLStream; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /* + * Connection-based values per application which can be modified by the + * AppHint settings 'OSid, OSidReg, bOSidAxiProtReg' for each application. + * These control where the connection's memory allocation is sourced from. + * ui32OSid, ui32OSidReg range from 0..(GPUVIRT_VALIDATION_NUM_OS - 1). + */ + IMG_UINT32 ui32OSid; + IMG_UINT32 ui32OSidReg; + IMG_BOOL bOSidAxiProtReg; +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + +#if defined(SUPPORT_DMA_TRANSFER) + IMG_BOOL bAcceptDmaRequests; + ATOMIC_T ui32NumDmaTransfersInFlight; + POS_LOCK hDmaReqLock; + IMG_HANDLE hDmaEventObject; +#endif + /* Structure which is hooked into the cleanup thread work list */ + PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn; + + DLLIST_NODE sConnectionListNode; + + /* List navigation for deferred freeing of connection data */ + struct _CONNECTION_DATA_ **ppsThis; + struct _CONNECTION_DATA_ *psNext; +} CONNECTION_DATA; + +#include "osconnection_server.h" + +PVRSRV_ERROR PVRSRVCommonConnectionConnect(void **ppvPrivData, void *pvOSData); +void PVRSRVCommonConnectionDisconnect(void *pvPrivData); + +IMG_PID PVRSRVGetPurgeConnectionPid(void); + +void PVRSRVConnectionDebugNotify(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVConnectionPrivateData) +#endif +static INLINE +IMG_HANDLE PVRSRVConnectionPrivateData(CONNECTION_DATA *psConnection) +{ + return (psConnection != NULL) ? psConnection->hOsPrivateData : NULL; +} + +#endif /* !defined(CONNECTION_SERVER_H) */ diff --git a/drivers/gpu/drm/phytium/octopus/cores/rgxcore_km_30.3.816.20.h b/drivers/gpu/drm/phytium/octopus/cores/rgxcore_km_30.3.816.20.h new file mode 100644 index 000000000000..d3bd388cf31e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/cores/rgxcore_km_30.3.816.20.h @@ -0,0 +1,69 @@ +/*************************************************************************/ /*! +@Title RGX Core BVNC 30.3.816.20 +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXCORE_KM_30_3_816_20_H +#define RGXCORE_KM_30_3_816_20_H + +/* Automatically generated file (14/12/2020 09:01:31): Do not edit manually */ +/* CS: @5690709 */ + +/****************************************************************************** + * BVNC = 30.3.816.20 + *****************************************************************************/ +#define RGX_BVNC_KM_B 30 +#define RGX_BVNC_KM_V 3 +#define RGX_BVNC_KM_N 816 +#define RGX_BVNC_KM_C 20 + +/****************************************************************************** + * Errata + *****************************************************************************/ + + + + +/****************************************************************************** + * Enhancements + *****************************************************************************/ + + + +#endif /* RGXCORE_KM_30_3_816_20_H */ diff --git a/drivers/gpu/drm/phytium/octopus/debug_common.c b/drivers/gpu/drm/phytium/octopus/debug_common.c new file mode 100644 index 000000000000..c82d64d5bb9d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/debug_common.c @@ -0,0 +1,1643 @@ +/*************************************************************************/ /*! +@File +@Title Debug Functionality +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Creates common debug info entries. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__linux__) +#include +#endif /* #if !defined(__linux__) */ + +#include "debug_common.h" +#include "pvrsrv.h" +#include "di_server.h" +#include "lists.h" +#include "pvrversion.h" +#include "rgx_options.h" +#include "allocmem.h" +#include "rgxfwutils.h" + +#ifdef SUPPORT_RGX +#include "rgxdevice.h" +#include "rgxdebug.h" +#include "rgxinit.h" +static IMG_HANDLE ghGpuUtilUserDebugFS; +#endif + +static DI_ENTRY *gpsVersionDIEntry; +static DI_ENTRY *gpsStatusDIEntry; +static DI_ENTRY *gpsDumpDebugDIEntry; + +#ifdef SUPPORT_RGX +static DI_ENTRY *gpsFWTraceDIEntry; +#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS +static DI_ENTRY *gpsPowMonDIEntry; +#endif /* SUPPORT_POWER_VALIDATION_VIA_DEBUGFS */ +#ifdef SUPPORT_FIRMWARE_GCOV +static DI_ENTRY *gpsFirmwareGcovDIEntry; +#endif /* SUPPORT_FIRMWARE_GCOV */ +#ifdef SUPPORT_POWER_SAMPLING_VIA_DEBUGFS +static DI_ENTRY *gpsPowerDataDIEntry; +#endif /* SUPPORT_POWER_SAMPLING_VIA_DEBUGFS */ +#ifdef SUPPORT_VALIDATION +static DI_ENTRY *gpsRGXRegsDIEntry; +#endif /* SUPPORT_VALIDATION */ +#endif /* SUPPORT_RGX */ +#ifdef SUPPORT_VALIDATION +static DI_ENTRY *gpsTestMemLeakDIEntry; +#endif /* SUPPORT_VALIDATION */ +#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) +static DI_ENTRY *gpsDebugLevelDIEntry; +#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */ + +/*************************************************************************/ /*! + Version DebugFS entry +*/ /**************************************************************************/ + +static void *_DebugVersionCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, + va_list va) +{ + IMG_UINT64 *pui64CurrentPosition = va_arg(va, IMG_UINT64 *); + IMG_UINT64 ui64Position = va_arg(va, IMG_UINT64); + IMG_UINT64 ui64CurrentPosition = *pui64CurrentPosition; + + (*pui64CurrentPosition)++; + + return (ui64CurrentPosition == ui64Position) ? psDevNode : NULL; +} + +static void *_VersionDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + PVR_UNREFERENCED_PARAMETER(psEntry); + + if (psPVRSRVData == NULL) { + PVR_DPF((PVR_DBG_ERROR, "psPVRSRVData = NULL")); + return NULL; + } + + if (*pui64Pos == 0) + { + return DI_START_TOKEN; + } + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugVersionCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _VersionDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvPriv) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvPriv); +} + +static void *_VersionDINext(OSDI_IMPL_ENTRY *psEntry,void *pvPriv, + IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + (*pui64Pos)++; + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugVersionCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +#define DI_PRINT_VERSION_FMTSPEC \ + "%s Version: %u.%u @ %u (%s) build options: 0x%08x %s\n" +#define STR_DEBUG "debug" +#define STR_RELEASE "release" + +#define BUILD_OPT_LEN 80 + +static inline void _AppendOptionStr(IMG_CHAR pszBuildOptions[], const IMG_CHAR* str, OSDI_IMPL_ENTRY *psEntry, IMG_UINT32* pui32BuildOptionLen) +{ + IMG_UINT32 ui32BuildOptionLen = *pui32BuildOptionLen; + const IMG_UINT32 strLen = OSStringLength(str); + const IMG_UINT32 optStrLen = sizeof(IMG_CHAR) * (BUILD_OPT_LEN-1); + + if ((ui32BuildOptionLen + strLen) > optStrLen) + { + pszBuildOptions[ui32BuildOptionLen] = '\0'; + DIPrintf(psEntry, "%s\n", pszBuildOptions); + ui32BuildOptionLen = 0; + } + if (strLen < optStrLen) + { + OSStringLCopy(pszBuildOptions+ui32BuildOptionLen, str, strLen); + ui32BuildOptionLen += strLen - 1; + } + *pui32BuildOptionLen = ui32BuildOptionLen; +} + +static int _VersionDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvPriv) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + + if (pvPriv == DI_START_TOKEN) + { + if (psPVRSRVData->sDriverInfo.bIsNoMatch) + { + const BUILD_INFO *psBuildInfo; + + psBuildInfo = &psPVRSRVData->sDriverInfo.sUMBuildInfo; + DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC, + "UM Driver", + PVRVERSION_UNPACK_MAJ(psBuildInfo->ui32BuildVersion), + PVRVERSION_UNPACK_MIN(psBuildInfo->ui32BuildVersion), + psBuildInfo->ui32BuildRevision, + (psBuildInfo->ui32BuildType == BUILD_TYPE_DEBUG) ? + STR_DEBUG : STR_RELEASE, + psBuildInfo->ui32BuildOptions, + PVR_BUILD_DIR); + + psBuildInfo = &psPVRSRVData->sDriverInfo.sKMBuildInfo; + DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC, + "KM Driver (" PVR_ARCH_NAME ")", + PVRVERSION_UNPACK_MAJ(psBuildInfo->ui32BuildVersion), + PVRVERSION_UNPACK_MIN(psBuildInfo->ui32BuildVersion), + psBuildInfo->ui32BuildRevision, + (psBuildInfo->ui32BuildType == BUILD_TYPE_DEBUG) ? + STR_DEBUG : STR_RELEASE, + psBuildInfo->ui32BuildOptions, + PVR_BUILD_DIR); + } + else + { + /* bIsNoMatch is `false` in one of the following cases: + * - UM & KM version parameters actually match. + * - A comparison between UM & KM has not been made yet, because no + * client ever connected. + * + * In both cases, available (KM) version info is the best output we + * can provide. + */ + DIPrintf(psEntry, "Driver Version: %s (%s) (%s) build options: " + "0x%08lx %s\n", PVRVERSION_STRING, PVR_ARCH_NAME, + PVR_BUILD_TYPE, RGX_BUILD_OPTIONS_KM, PVR_BUILD_DIR); + } + } + else if (pvPriv != NULL) + { + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *) pvPriv; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; +#ifdef SUPPORT_RGX + PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; +#if defined(DEBUG) || defined(SUPPORT_VALIDATION) + IMG_CHAR pszBuildOptions[BUILD_OPT_LEN]; + IMG_UINT32 ui32BuildOptionLen = 0; + static const char* aszOptions[] = RGX_BUILD_OPTIONS_LIST; + int i = 0; +#endif +#endif /* SUPPORT_RGX */ + IMG_BOOL bFwVersionInfoPrinted = IMG_FALSE; + + DIPrintf(psEntry, "\nDevice Name: %s\n", psDevConfig->pszName); + + if (psDevConfig->pszVersion) + { + DIPrintf(psEntry, "Device Version: %s\n", + psDevConfig->pszVersion); + } + + if (psDevNode->pfnDeviceVersionString) + { + IMG_CHAR *pszVerStr; + + if (psDevNode->pfnDeviceVersionString(psDevNode, + &pszVerStr) == PVRSRV_OK) + { + DIPrintf(psEntry, "%s\n", pszVerStr); + + OSFreeMem(pszVerStr); + } + } + +#ifdef SUPPORT_RGX + /* print device's firmware version info */ + if (psDevInfo->psRGXFWIfOsInitMemDesc != NULL) + { + /* psDevInfo->psRGXFWIfOsInitMemDesc should be permanently mapped */ + if (psDevInfo->psRGXFWIfOsInit != NULL) + { + if (psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated) + { + const RGXFWIF_COMPCHECKS *psRGXCompChecks = + &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks; + IMG_UINT32 ui32DDKVer = psRGXCompChecks->ui32DDKVersion; + + DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC, + "Firmware", + PVRVERSION_UNPACK_MAJ(ui32DDKVer), + PVRVERSION_UNPACK_MIN(ui32DDKVer), + psRGXCompChecks->ui32DDKBuild, + ((psRGXCompChecks->ui32BuildOptions & + OPTIONS_DEBUG_MASK) ? STR_DEBUG : STR_RELEASE), + psRGXCompChecks->ui32BuildOptions, + PVR_BUILD_DIR); + bFwVersionInfoPrinted = IMG_TRUE; + +#if defined(DEBUG) || defined(SUPPORT_VALIDATION) + DIPrintf(psEntry, "Firmware Build Options:\n"); + + for (i = 0; i < ARRAY_SIZE(aszOptions); i++) + { + if ((psRGXCompChecks->ui32BuildOptions & 1<psDeviceNodeList, + _DebugPowerDataCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _DebugPowerDataDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvData); +} + +static void *_DebugPowerDataDINext(OSDI_IMPL_ENTRY *psEntry, + void *pvData, + IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 0; + + PVR_UNREFERENCED_PARAMETER(pvData); + + (*pui64Pos)++; + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugPowerDataCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static PVRSRV_ERROR SendPowerCounterCommand(PVRSRV_DEVICE_NODE* psDeviceNode, + RGXFWIF_COUNTER_DUMP_REQUEST eRequestType, + IMG_UINT32 *pui32kCCBCommandSlot) +{ + PVRSRV_ERROR eError; + + RGXFWIF_KCCB_CMD sCounterDumpCmd; + + sCounterDumpCmd.eCmdType = RGXFWIF_KCCB_CMD_COUNTER_DUMP; + sCounterDumpCmd.uCmdData.sCounterDumpConfigData.eCounterDumpRequest = eRequestType; + + eError = RGXScheduleCommandAndGetKCCBSlot(psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + &sCounterDumpCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + pui32kCCBCommandSlot); + PVR_LOG_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot"); + + return eError; +} + +static void *_IsDevNodeNotInitialised(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + return psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE ? + NULL : psDeviceNode; +} + +static void _SendPowerCounterCommand(PVRSRV_DEVICE_NODE* psDeviceNode, + va_list va) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32kCCBCommandSlot; + + OSLockAcquire(psDevInfo->hCounterDumpingLock); + + SendPowerCounterCommand(psDeviceNode, + va_arg(va, RGXFWIF_COUNTER_DUMP_REQUEST), + &ui32kCCBCommandSlot); + + OSLockRelease(psDevInfo->hCounterDumpingLock); +} + +static int _DebugPowerDataDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + IMG_UINT32 ui32kCCBCommandSlot; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (pvData != NULL) + { + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) + { + PVR_DPF((PVR_DBG_ERROR, "Not all device nodes were initialised " + "when power counter data was requested!")); + return -EIO; + } + + OSLockAcquire(psDevInfo->hCounterDumpingLock); + + eError = SendPowerCounterCommand(psDeviceNode, + RGXFWIF_PWR_COUNTER_DUMP_SAMPLE, + &ui32kCCBCommandSlot); + + if (eError != PVRSRV_OK) + { + OSLockRelease(psDevInfo->hCounterDumpingLock); + return -EIO; + } + + /* Wait for FW complete completion */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, + ui32kCCBCommandSlot, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); + OSLockRelease(psDevInfo->hCounterDumpingLock); + return -EIO; + } + + /* Read back the buffer */ + { + IMG_UINT32* pui32PowerBuffer; + IMG_UINT32 ui32NumOfRegs, ui32SamplePeriod; + IMG_UINT32 i, j; + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCounterBufferMemDesc, + (void**)&pui32PowerBuffer); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "DevmemAcquireCpuVirtAddr"); + OSLockRelease(psDevInfo->hCounterDumpingLock); + return -EIO; + } + + ui32NumOfRegs = *pui32PowerBuffer++; + ui32SamplePeriod = *pui32PowerBuffer++; + + if (ui32NumOfRegs) + { + DIPrintf(psEntry, "Power counter data for device id: %d\n", + psDeviceNode->sDevId.i32OsDeviceID); + DIPrintf(psEntry, "Sample period: 0x%08x\n", ui32SamplePeriod); + + for (i = 0; i < ui32NumOfRegs; i++) + { + IMG_UINT32 ui32High, ui32Low; + IMG_UINT32 ui32RegOffset = *pui32PowerBuffer++; + IMG_UINT32 ui32NumOfInstances = *pui32PowerBuffer++; + + PVR_ASSERT(ui32NumOfInstances); + + DIPrintf(psEntry, "0x%08x:", ui32RegOffset); + + for (j = 0; j < ui32NumOfInstances; j++) + { + ui32Low = *pui32PowerBuffer++; + ui32High = *pui32PowerBuffer++; + + DIPrintf(psEntry, " 0x%016llx", + (IMG_UINT64) ui32Low | (IMG_UINT64) ui32High << 32); + } + + DIPrintf(psEntry, "\n"); + } + } + + DevmemReleaseCpuVirtAddr(psDevInfo->psCounterBufferMemDesc); + } + + OSLockRelease(psDevInfo->hCounterDumpingLock); + } + + return eError; +} + +static IMG_INT64 PowerDataSet(const IMG_CHAR __user *pcBuffer, + IMG_UINT64 ui64Count, IMG_UINT64 *pui64Pos, + void *pvData) +{ + PVRSRV_DATA* psPVRSRVData = (PVRSRV_DATA*) pvData; + RGXFWIF_COUNTER_DUMP_REQUEST eRequest; + + PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); + PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); + PVR_RETURN_IF_FALSE(ui64Count >= 1, -EINVAL); + PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); + + if (List_PVRSRV_DEVICE_NODE_Any(psPVRSRVData->psDeviceNodeList, + _IsDevNodeNotInitialised)) + { + PVR_DPF((PVR_DBG_ERROR, "Not all device nodes were initialised when " + "power counter data was requested!")); + return -EIO; + } + + if (pcBuffer[0] == '1') + { + eRequest = RGXFWIF_PWR_COUNTER_DUMP_START; + } + else if (pcBuffer[0] == '0') + { + eRequest = RGXFWIF_PWR_COUNTER_DUMP_STOP; + } + else + { + return -EINVAL; + } + + List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList, + _SendPowerCounterCommand, eRequest); + + *pui64Pos += ui64Count; + return ui64Count; +} + +#endif /* defined(SUPPORT_RGX) && defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) */ + +/*************************************************************************/ /*! + Status DebugFS entry +*/ /**************************************************************************/ + +static void *_DebugStatusCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, + va_list va) +{ + IMG_UINT64 *pui64CurrentPosition = va_arg(va, IMG_UINT64 *); + IMG_UINT64 ui64Position = va_arg(va, IMG_UINT64); + IMG_UINT64 ui64CurrentPosition = *pui64CurrentPosition; + + (*pui64CurrentPosition)++; + + return (ui64CurrentPosition == ui64Position) ? psDevNode : NULL; +} + +static void *_DebugStatusDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + if (*pui64Pos == 0) + { + return DI_START_TOKEN; + } + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugStatusCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _DebugStatusDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvData); +} + +static void *_DebugStatusDINext(OSDI_IMPL_ENTRY *psEntry, + void *pvData, + IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + PVR_UNREFERENCED_PARAMETER(pvData); + + (*pui64Pos)++; + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugStatusCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static int _DebugStatusDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + if (pvData == DI_START_TOKEN) + { + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + + if (psPVRSRVData != NULL) + { + switch (psPVRSRVData->eServicesState) + { + case PVRSRV_SERVICES_STATE_OK: + DIPrintf(psEntry, "Driver Status: OK\n"); + break; + case PVRSRV_SERVICES_STATE_BAD: + DIPrintf(psEntry, "Driver Status: BAD\n"); + break; + case PVRSRV_SERVICES_STATE_UNDEFINED: + DIPrintf(psEntry, "Driver Status: UNDEFINED\n"); + break; + default: + DIPrintf(psEntry, "Driver Status: UNKNOWN (%d)\n", + psPVRSRVData->eServicesState); + break; + } + } + } + else if (pvData != NULL) + { + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; + IMG_CHAR *pszStatus = ""; + IMG_CHAR *pszReason = ""; + PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus; + PVRSRV_DEVICE_HEALTH_REASON eHealthReason; + + /* Update the health status now if possible... */ + if (psDeviceNode->pfnUpdateHealthStatus) + { + psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, IMG_FALSE); + } + eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus); + eHealthReason = OSAtomicRead(&psDeviceNode->eHealthReason); + + switch (eHealthStatus) + { + case PVRSRV_DEVICE_HEALTH_STATUS_OK: pszStatus = "OK"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: pszStatus = "NOT RESPONDING"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: pszStatus = "DEAD"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: pszStatus = "FAULT"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: pszStatus = "UNDEFINED"; break; + default: pszStatus = "UNKNOWN"; break; + } + + switch (eHealthReason) + { + case PVRSRV_DEVICE_HEALTH_REASON_NONE: pszReason = ""; break; + case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: pszReason = " (Asserted)"; break; + case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: pszReason = " (Poll failing)"; break; + case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: pszReason = " (Global Event Object timeouts rising)"; break; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: pszReason = " (KCCB offset invalid)"; break; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: pszReason = " (KCCB stalled)"; break; + case PVRSRV_DEVICE_HEALTH_REASON_IDLING: pszReason = " (Idling)"; break; + case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: pszReason = " (Restarting)"; break; + case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS: pszReason = " (Missing interrupts)"; break; + default: pszReason = " (Unknown reason)"; break; + } + + DIPrintf(psEntry, "Firmware Status: %s%s\n", pszStatus, pszReason); + if (PVRSRV_ERROR_LIMIT_REACHED) + { + DIPrintf(psEntry, "Server Errors: %d+\n", IMG_UINT32_MAX); + } + else + { + DIPrintf(psEntry, "Server Errors: %d\n", PVRSRV_KM_ERRORS); + } + + + /* Write other useful stats to aid the test cycle... */ + if (psDeviceNode->pvDevice != NULL) + { +#ifdef SUPPORT_RGX + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_HWRINFOBUF *psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBufCtl; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + +#ifdef PVRSRV_DEBUG_LISR_EXECUTION + /* Show the detected #LISR, #MISR scheduled calls */ + DIPrintf(psEntry, "RGX #LISR: %llu\n", psDeviceNode->ui64nLISR); + DIPrintf(psEntry, "RGX #MISR: %llu\n", psDeviceNode->ui64nMISR); +#endif /* PVRSRV_DEBUG_LISR_EXECUTION */ + + /* Calculate the number of HWR events in total across all the DMs... */ + if (psHWRInfoBuf != NULL) + { + IMG_UINT32 ui32HWREventCount = 0; + IMG_UINT32 ui32CRREventCount = 0; + IMG_UINT32 ui32DMIndex; + + for (ui32DMIndex = 0; ui32DMIndex < RGXFWIF_DM_MAX; ui32DMIndex++) + { + ui32HWREventCount += psHWRInfoBuf->aui32HwrDmLockedUpCount[ui32DMIndex]; + ui32CRREventCount += psHWRInfoBuf->aui32HwrDmOverranCount[ui32DMIndex]; + } + + DIPrintf(psEntry, "HWR Event Count: %d\n", ui32HWREventCount); + DIPrintf(psEntry, "CRR Event Count: %d\n", ui32CRREventCount); +#ifdef PVRSRV_STALLED_CCB_ACTION + /* Write the number of Sync Lockup Recovery (SLR) events... */ + DIPrintf(psEntry, "SLR Event Count: %d\n", psDevInfo->psRGXFWIfFwOsData->ui32ForcedUpdatesRequested); +#endif /* PVRSRV_STALLED_CCB_ACTION */ + } + + /* Show error counts */ + DIPrintf(psEntry, "WGP Error Count: %d\n", psDevInfo->sErrorCounts.ui32WGPErrorCount); + DIPrintf(psEntry, "TRP Error Count: %d\n", psDevInfo->sErrorCounts.ui32TRPErrorCount); + + /* + * Guest drivers do not support the following functionality: + * - Perform actual on-chip fw tracing. + * - Collect actual on-chip GPU utilization stats. + * - Perform actual on-chip GPU power/dvfs management. + * - As a result no more information can be provided. + */ + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + if (psFwSysData != NULL) + { + DIPrintf(psEntry, "FWF Event Count: %d\n", psFwSysData->ui32FWFaults); + } + + /* Write the number of APM events... */ + DIPrintf(psEntry, "APM Event Count: %d\n", psDevInfo->ui32ActivePMReqTotal); + + /* Write the current GPU Utilisation values... */ + if (psDevInfo->pfnGetGpuUtilStats && + eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_OK) + { + RGXFWIF_GPU_UTIL_STATS sGpuUtilStats; + PVRSRV_ERROR eError = PVRSRV_OK; + + eError = psDevInfo->pfnGetGpuUtilStats(psDeviceNode, + ghGpuUtilUserDebugFS, + &sGpuUtilStats); + + if ((eError == PVRSRV_OK) && + ((IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative)) + { + IMG_UINT64 util; + IMG_UINT32 rem; + + util = 100 * sGpuUtilStats.ui64GpuStatActive; + util = OSDivide64(util, (IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative, &rem); + + DIPrintf(psEntry, "GPU Utilisation: %u%%\n", (IMG_UINT32)util); + } + else + { + DIPrintf(psEntry, "GPU Utilisation: -\n"); + } + } + } +#endif /* SUPPORT_RGX */ + } + } + + return 0; +} + +static IMG_INT64 DebugStatusSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); + PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); + PVR_RETURN_IF_FALSE(ui64Count >= 1, -EINVAL); + PVR_RETURN_IF_FALSE(pcBuffer[0] == 'k' || pcBuffer[0] == 'K', -EINVAL); + PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); + + psPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_BAD; + + *pui64Pos += ui64Count; + return ui64Count; +} + +/*************************************************************************/ /*! + Dump Debug DebugFS entry +*/ /**************************************************************************/ + +static void *_DebugDumpDebugCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, va_list va) +{ + IMG_UINT64 *pui64CurrentPosition = va_arg(va, IMG_UINT64 *); + IMG_UINT64 ui64Position = va_arg(va, IMG_UINT64); + IMG_UINT64 ui64CurrentPosition = *pui64CurrentPosition; + + (*pui64CurrentPosition)++; + + return (ui64CurrentPosition == ui64Position) ? psDevNode : NULL; +} + +static void *_DebugDumpDebugDIStart(OSDI_IMPL_ENTRY *psEntry, + IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + if (*pui64Pos == 0) + { + return DI_START_TOKEN; + } + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugDumpDebugCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _DebugDumpDebugDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvData); +} + +static void *_DebugDumpDebugDINext(OSDI_IMPL_ENTRY *psEntry, + void *pvData, + IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + PVR_UNREFERENCED_PARAMETER(pvData); + + (*pui64Pos)++; + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugDumpDebugCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _DumpDebugDIPrintf(void *pvDumpDebugFile, + const IMG_CHAR *pszFormat, ...) +{ + OSDI_IMPL_ENTRY *psEntry = pvDumpDebugFile; + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; + va_list ArgList; + + va_start(ArgList, pszFormat); + vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList); + va_end(ArgList); + DIPrintf(psEntry, "%s\n", szBuffer); +} + +static int _DebugDumpDebugDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + if (pvData != NULL && pvData != DI_START_TOKEN) + { + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; + + if (psDeviceNode->pvDevice != NULL) + { + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, + _DumpDebugDIPrintf, psEntry); + } + } + + return 0; +} + +#ifdef SUPPORT_RGX + +/*************************************************************************/ /*! + Firmware Trace DebugFS entry +*/ /**************************************************************************/ + +static void *_DebugFWTraceCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, va_list va) +{ + IMG_UINT64 *pui64CurrentPosition = va_arg(va, IMG_UINT64 *); + IMG_UINT64 ui64Position = va_arg(va, IMG_UINT64); + IMG_UINT64 ui64CurrentPosition = *pui64CurrentPosition; + + (*pui64CurrentPosition)++; + + return (ui64CurrentPosition == ui64Position) ? psDevNode : NULL; +} + +static void *_DebugFWTraceDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + if (*pui64Pos == 0) + { + return DI_START_TOKEN; + } + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugFWTraceCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _DebugFWTraceDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvData); +} + +static void *_DebugFWTraceDINext(OSDI_IMPL_ENTRY *psEntry, + void *pvData, + IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + PVR_UNREFERENCED_PARAMETER(pvData); + + (*pui64Pos)++; + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _DebugFWTraceCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _FWTraceDIPrintf(void *pvDumpDebugFile, + const IMG_CHAR *pszFormat, ...) +{ + OSDI_IMPL_ENTRY *psEntry = pvDumpDebugFile; + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; + va_list ArgList; + + va_start(ArgList, pszFormat); + vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList); + va_end(ArgList); + DIPrintf(psEntry, "%s\n", szBuffer); +} + +static int _DebugFWTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + if (pvData != NULL && pvData != DI_START_TOKEN) + { + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; + + if (psDeviceNode->pvDevice != NULL) + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + RGXDumpFirmwareTrace(_FWTraceDIPrintf, psEntry, psDevInfo); + } + } + + return 0; +} + +#ifdef SUPPORT_FIRMWARE_GCOV + +static PVRSRV_RGXDEV_INFO *getPsDevInfo(OSDI_IMPL_ENTRY *psEntry) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + + if (psPVRSRVData != NULL) + { + if (psPVRSRVData->psDeviceNodeList != NULL) + { + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psPVRSRVData->psDeviceNodeList->pvDevice; + return psDevInfo; + } + } + return NULL; +} + +static void *_FirmwareGcovDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = getPsDevInfo(psEntry); + + if (psDevInfo != NULL) + { + if (psDevInfo->psFirmwareGcovBufferMemDesc != NULL) + { + void *pvCpuVirtAddr; + DevmemAcquireCpuVirtAddr(psDevInfo->psFirmwareGcovBufferMemDesc, &pvCpuVirtAddr); + return *pui64Pos ? NULL : pvCpuVirtAddr; + } + } + + return NULL; +} + +static void _FirmwareGcovDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = getPsDevInfo(psEntry); + + PVR_UNREFERENCED_PARAMETER(pvData); + + if (psDevInfo != NULL) + { + if (psDevInfo->psFirmwareGcovBufferMemDesc != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psFirmwareGcovBufferMemDesc); + } + } +} + +static void *_FirmwareGcovDINext(OSDI_IMPL_ENTRY *psEntry, + void *pvData, + IMG_UINT64 *pui64Pos) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvData); + PVR_UNREFERENCED_PARAMETER(pui64Pos); + return NULL; +} + +static int _FirmwareGcovDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = getPsDevInfo(psEntry); + + if (psDevInfo != NULL) + { + DIWrite(psEntry, pvData, psDevInfo->ui32FirmwareGcovSize); + } + return 0; +} + +#endif /* SUPPORT_FIRMWARE_GCOV */ + +#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS + +/*************************************************************************/ /*! + Power monitoring DebugFS entry +*/ /**************************************************************************/ + +static void *_PowMonCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, va_list va) +{ + IMG_UINT64 *pui64CurrentPosition = va_arg(va, IMG_UINT64 *); + IMG_UINT64 ui64Position = va_arg(va, IMG_UINT64); + IMG_UINT64 ui64CurrentPosition = *pui64CurrentPosition; + + (*pui64CurrentPosition)++; + + return (ui64CurrentPosition == ui64Position) ? psDevNode : NULL; +} + +static void *_PowMonTraceDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + if (*pui64Pos == 0) + { + return DI_START_TOKEN; + } + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _PowMonCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _PowMonTraceDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvData); +} + +static void *_PowMonTraceDINext(OSDI_IMPL_ENTRY *psEntry, + void *pvData, + IMG_UINT64 *pui64Pos) +{ + PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); + IMG_UINT64 uiCurrentPosition = 1; + + PVR_UNREFERENCED_PARAMETER(pvData); + + (*pui64Pos)++; + + return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + _PowMonCompare_AnyVaCb, + &uiCurrentPosition, + *pui64Pos); +} + +static void _PowMonTraceDIPrintf(void *pvDumpDebugFile, + const IMG_CHAR *pszFormat, ...) +{ + OSDI_IMPL_ENTRY *psEntry = pvDumpDebugFile; + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; + va_list ArgList; + + va_start(ArgList, pszFormat); + vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList); + va_end(ArgList); + DIPrintf(psEntry, "%s\n", szBuffer); +} + +static int _PowMonTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + if (pvData != NULL && pvData != DI_START_TOKEN) + { + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; + + if (psDeviceNode->pvDevice != NULL) + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + RGXDumpPowerMonitoring(_PowMonTraceDIPrintf, psEntry, psDevInfo); + } + } + + return 0; +} + +#endif /* SUPPORT_POWER_VALIDATION_VIA_DEBUGFS */ + +#ifdef SUPPORT_VALIDATION + +#ifndef SYS_RGX_DEV_UNMAPPED_FW_REG +#define SYS_RGX_DEV_UNMAPPED_FW_REG 0XFFFFFFFF +#endif +#define DI_RGXREGS_TIMEOUT_MS 1000 + +/*************************************************************************/ /*! + RGX Registers Dump DebugFS entry +*/ /**************************************************************************/ + +static IMG_INT64 _RgxRegsSeek(IMG_UINT64 ui64Offset, void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = pvData; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_LOG_RETURN_IF_FALSE(psPVRSRVData != NULL, "psPVRSRVData is NULL", -1); + + psDevInfo = psPVRSRVData->psDeviceNodeList->pvDevice; + + PVR_LOG_RETURN_IF_FALSE(ui64Offset <= (psDevInfo->ui32RegSize - 4), + "register offset is too big", -1); + + return ui64Offset; +} + +static IMG_INT64 _RgxRegsRead(IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = pvData; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT64 ui64RegVal = 0; + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT64 ui64CompRes; + + PVR_LOG_RETURN_IF_FALSE(psPVRSRVData != NULL, + "psPVRSRVData is NULL", -ENXIO); + PVR_LOG_RETURN_IF_FALSE(ui64Count == 4 || ui64Count == 8, + "wrong RGX register size", -EIO); + PVR_LOG_RETURN_IF_FALSE(!(*pui64Pos & (ui64Count - 1)), + "register read offset isn't aligned", -EINVAL); + + psDevInfo = psPVRSRVData->psDeviceNodeList->pvDevice; + + if (*pui64Pos >= SYS_RGX_DEV_UNMAPPED_FW_REG) + { + if (!psDevInfo->bFirmwareInitialised) + { + PVR_DPF((PVR_DBG_ERROR, "RGX Register offset is above PCI mapped range but " + "Firmware isn't yet initialised\n")); + return -EIO; + } + + reinit_completion(&psDevInfo->sFwRegs.sRegComp); + + eError = RGXScheduleRgxRegCommand(psDevInfo, + 0x00, + ui64Count, + (IMG_UINT32) *pui64Pos, + IMG_FALSE); + + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "RGXScheduleRgxRegCommand"); + return -EIO; + } + + ui64CompRes = wait_for_completion_timeout(&psDevInfo->sFwRegs.sRegComp, + msecs_to_jiffies(DI_RGXREGS_TIMEOUT_MS)); + if (!ui64CompRes) + { + PVR_DPF((PVR_DBG_ERROR, "FW RGX Register access timeout %#x\n", + (IMG_UINT32) *pui64Pos)); + return -EIO; + } + + OSCachedMemCopy(pcBuffer, &psDevInfo->sFwRegs.ui64RegVal, ui64Count); + } + else + { + ui64RegVal = ui64Count == 4 ? + OSReadHWReg32(psDevInfo->pvRegsBaseKM, *pui64Pos) : + OSReadHWReg64(psDevInfo->pvRegsBaseKM, *pui64Pos); + OSCachedMemCopy(pcBuffer, &ui64RegVal, ui64Count); + } + + return ui64Count; +} + +static IMG_INT64 _RgxRegsWrite(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = pvData; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT64 ui64RegVal = 0; + PVRSRV_RGXDEV_INFO *psDevInfo; + + /* ignore the '\0' character */ + ui64Count -= 1; + + PVR_LOG_RETURN_IF_FALSE(psPVRSRVData != NULL, + "psPVRSRVData == NULL", -ENXIO); + PVR_LOG_RETURN_IF_FALSE(ui64Count == 4 || ui64Count == 8, + "wrong RGX register size", -EIO); + PVR_LOG_RETURN_IF_FALSE(!(*pui64Pos & (ui64Count - 1)), + "register read offset isn't aligned", -EINVAL); + + psDevInfo = psPVRSRVData->psDeviceNodeList->pvDevice; + + if (*pui64Pos >= SYS_RGX_DEV_UNMAPPED_FW_REG) + { + if (!psDevInfo->bFirmwareInitialised) + { + PVR_DPF((PVR_DBG_ERROR, "RGX Register offset is above PCI mapped range but " + "Firmware isn't yet initialised\n")); + return -EIO; + } + + if (ui64Count == 4) + ui64RegVal = (IMG_UINT64) *((IMG_UINT32 *) pcBuffer); + else + ui64RegVal = *((IMG_UINT64 *) pcBuffer); + + eError = RGXScheduleRgxRegCommand(psDevInfo, + ui64RegVal, + ui64Count, + (IMG_UINT32) *pui64Pos, + IMG_TRUE); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "RGXScheduleRgxRegCommand"); + return -EIO; + } + + } + else + { + if (ui64Count == 4) + { + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, *pui64Pos, + *((IMG_UINT32 *) (void *) pcBuffer)); + } + else + { + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, *pui64Pos, + *((IMG_UINT64 *) (void *) pcBuffer)); + } + } + + return ui64Count; +} + +#endif /* SUPPORT_VALIDATION */ + +#endif /* SUPPORT_RGX */ + +#ifdef SUPPORT_VALIDATION + +static int TestMemLeakDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + PVR_UNREFERENCED_PARAMETER(pvData); + + PVR_RETURN_IF_FALSE(pvData != NULL, -EINVAL); + + DIPrintf(psEntry, "os: %s, %u\ngpu: %s, %u\nmmu: %s, %u\n", + psPVRSRVData->sMemLeakIntervals.ui32OSAlloc ? "enabled" : "disabled", + psPVRSRVData->sMemLeakIntervals.ui32OSAlloc, + psPVRSRVData->sMemLeakIntervals.ui32GPU ? "enabled" : "disabled", + psPVRSRVData->sMemLeakIntervals.ui32GPU, + psPVRSRVData->sMemLeakIntervals.ui32MMU ? "enabled" : "disabled", + psPVRSRVData->sMemLeakIntervals.ui32MMU); + + return 0; +} + +static IMG_INT64 TestMemLeakDISet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + IMG_CHAR *pcTemp; + unsigned long ui32MemLeakInterval; + + PVR_UNREFERENCED_PARAMETER(pvData); + + PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); + PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); + PVR_RETURN_IF_FALSE(ui64Count <= 16, -EINVAL); + PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); + + pcTemp = strchr(pcBuffer, ','); + + if (kstrtoul(pcTemp+1, 0, &ui32MemLeakInterval) != 0) + { + return -EINVAL; + } + + if (strncmp(pcBuffer, "os", pcTemp-pcBuffer) == 0) + { + psPVRSRVData->sMemLeakIntervals.ui32OSAlloc = ui32MemLeakInterval; + } + else if (strncmp(pcBuffer, "gpu", pcTemp-pcBuffer) == 0) + { + psPVRSRVData->sMemLeakIntervals.ui32GPU = ui32MemLeakInterval; + } + else if (strncmp(pcBuffer, "mmu", pcTemp-pcBuffer) == 0) + { + psPVRSRVData->sMemLeakIntervals.ui32MMU = ui32MemLeakInterval; + } + else + { + return -EINVAL; + } + + *pui64Pos += ui64Count; + return ui64Count; +} + +#endif /* SUPPORT_VALIDATION */ + +#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) + +/*************************************************************************/ /*! + Debug level DebugFS entry +*/ /**************************************************************************/ + +static int DebugLevelDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + DIPrintf(psEntry, "%u\n", OSDebugLevel()); + + return 0; +} + +#ifndef __GNUC__ +static int __builtin_ffsl(long int x) +{ + for (size_t i = 0; i < sizeof(x) * 8; i++) + { + if (x & (1 << i)) + { + return i + 1; + } + } + return 0; +} +#endif /* __GNUC__ */ + +static IMG_INT64 DebugLevelSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) +{ + const IMG_UINT uiMaxBufferSize = 6; + IMG_UINT32 ui32Level; + + PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); + PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); + PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL); + PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); + + if (sscanf(pcBuffer, "%u", &ui32Level) == 0) + { + return -EINVAL; + } + + OSSetDebugLevel(ui32Level & ((1 << __builtin_ffsl(DBGPRIV_LAST)) - 1)); + + *pui64Pos += ui64Count; + return ui64Count; +} +#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */ + +PVRSRV_ERROR DebugCommonInit(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_ERROR eError; + + PVR_ASSERT(psPVRSRVData != NULL); + + /* + * The DebugFS entries are designed to work in a single device system but + * this function will be called multiple times in a multi-device system. + * Return an error in this case. + */ + if (gpsVersionDIEntry) + { + return -EEXIST; + } + +#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE) + if (SORgxGpuUtilStatsRegister(&ghGpuUtilUserDebugFS) != PVRSRV_OK) + { + return -ENOMEM; + } +#endif /* defined(SUPPORT_RGX) && !defined(NO_HARDWARE) */ + + { + DI_ITERATOR_CB sIterator = { + .pfnStart = _VersionDIStart, + .pfnStop = _VersionDIStop, + .pfnNext = _VersionDINext, + .pfnShow = _VersionDIShow + }; + + eError = DICreateEntry("version", NULL, &sIterator, psPVRSRVData, + DI_ENTRY_TYPE_GENERIC, &gpsVersionDIEntry); + PVR_GOTO_IF_ERROR(eError, return_error_); + } + + { + DI_ITERATOR_CB sIterator = { + .pfnStart = _DebugStatusDIStart, + .pfnStop = _DebugStatusDIStop, + .pfnNext = _DebugStatusDINext, + .pfnShow = _DebugStatusDIShow, + .pfnWrite = DebugStatusSet + }; + eError = DICreateEntry("status", NULL, &sIterator, psPVRSRVData, + DI_ENTRY_TYPE_GENERIC, &gpsStatusDIEntry); + PVR_GOTO_IF_ERROR(eError, return_error_); + } + + { + DI_ITERATOR_CB sIterator = { + .pfnStart = _DebugDumpDebugDIStart, + .pfnStop = _DebugDumpDebugDIStop, + .pfnNext = _DebugDumpDebugDINext, + .pfnShow = _DebugDumpDebugDIShow + }; + eError = DICreateEntry("debug_dump", NULL, &sIterator, psPVRSRVData, + DI_ENTRY_TYPE_GENERIC, &gpsDumpDebugDIEntry); + PVR_GOTO_IF_ERROR(eError, return_error_); + } + +#ifdef SUPPORT_RGX + if (! PVRSRV_VZ_MODE_IS(GUEST)) + { + { + DI_ITERATOR_CB sIterator = { + .pfnStart = _DebugFWTraceDIStart, + .pfnStop = _DebugFWTraceDIStop, + .pfnNext = _DebugFWTraceDINext, + .pfnShow = _DebugFWTraceDIShow + }; + eError = DICreateEntry("firmware_trace", NULL, &sIterator, + psPVRSRVData, DI_ENTRY_TYPE_GENERIC, + &gpsFWTraceDIEntry); + PVR_GOTO_IF_ERROR(eError, return_error_); + } + +#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS + { + DI_ITERATOR_CB sIterator = { + .pfnStart = _PowMonTraceDIStart, + .pfnStop = _PowMonTraceDIStop, + .pfnNext = _PowMonTraceDINext, + .pfnShow = _PowMonTraceDIShow + }; + eError = DICreateEntry("power_mon", NULL, &sIterator, psPVRSRVData, + DI_ENTRY_TYPE_GENERIC, &gpsPowMonDIEntry); + PVR_GOTO_IF_ERROR(eError, return_error_); + } +#endif /* SUPPORT_POWER_VALIDATION_VIA_DEBUGFS */ + } + +#ifdef SUPPORT_FIRMWARE_GCOV + { + DI_ITERATOR_CB sIterator = { + .pfnStart = _FirmwareGcovDIStart, + .pfnStop = _FirmwareGcovDIStop, + .pfnNext = _FirmwareGcovDINext, + .pfnShow = _FirmwareGcovDIShow + }; + + eError = DICreateEntry("firmware_gcov", NULL, &sIterator, psPVRSRVData, + DI_ENTRY_TYPE_GENERIC, &gpsFirmwareGcovDIEntry); + PVR_GOTO_IF_ERROR(eError, return_error_); + } +#endif /* SUPPORT_FIRMWARE_GCOV */ + +#ifdef SUPPORT_POWER_SAMPLING_VIA_DEBUGFS + { + DI_ITERATOR_CB sIterator = { + .pfnStart = _DebugPowerDataDIStart, + .pfnStop = _DebugPowerDataDIStop, + .pfnNext = _DebugPowerDataDINext, + .pfnShow = _DebugPowerDataDIShow, + .pfnWrite = PowerDataSet + }; + eError = DICreateEntry("power_data", NULL, &sIterator, psPVRSRVData, + DI_ENTRY_TYPE_GENERIC, &gpsPowerDataDIEntry); + PVR_GOTO_IF_ERROR(eError, return_error_); + } +#endif /* SUPPORT_POWER_SAMPLING_VIA_DEBUGFS */ + +#ifdef SUPPORT_VALIDATION + { + DI_ITERATOR_CB sIterator = { + .pfnSeek = _RgxRegsSeek, + .pfnRead = _RgxRegsRead, + .pfnWrite = _RgxRegsWrite + }; + eError = DICreateEntry("rgxregs", NULL, &sIterator, psPVRSRVData, + DI_ENTRY_TYPE_RANDOM_ACCESS, &gpsRGXRegsDIEntry); + + PVR_GOTO_IF_ERROR(eError, return_error_); + } +#endif /* SUPPORT_VALIDATION */ +#endif /* SUPPORT_RGX */ + +#ifdef SUPPORT_VALIDATION + { + DI_ITERATOR_CB sIterator = { + .pfnShow = TestMemLeakDIShow, + .pfnWrite = TestMemLeakDISet + }; + eError = DICreateEntry("test_memleak", NULL, &sIterator, psPVRSRVData, + DI_ENTRY_TYPE_GENERIC, &gpsTestMemLeakDIEntry); + PVR_GOTO_IF_ERROR(eError, return_error_); + } +#endif /* SUPPORT_VALIDATION */ + +#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) + { + DI_ITERATOR_CB sIterator = { + .pfnShow = DebugLevelDIShow, + .pfnWrite = DebugLevelSet + }; + eError = DICreateEntry("debug_level", NULL, &sIterator, NULL, + DI_ENTRY_TYPE_GENERIC, &gpsDebugLevelDIEntry); + PVR_GOTO_IF_ERROR(eError, return_error_); + } +#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */ + + return PVRSRV_OK; + +return_error_: + DebugCommonDeInit(); + + return eError; +} + +void DebugCommonDeInit(void) +{ +#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) + if (gpsDebugLevelDIEntry != NULL) + { + DIDestroyEntry(gpsDebugLevelDIEntry); + } +#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */ + +#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE) + if (ghGpuUtilUserDebugFS != NULL) + { + SORgxGpuUtilStatsUnregister(ghGpuUtilUserDebugFS); + ghGpuUtilUserDebugFS = NULL; + } +#endif /* defined(SUPPORT_RGX) && !defined(NO_HARDWARE) */ + +#ifdef SUPPORT_RGX + if (gpsFWTraceDIEntry != NULL) + { + DIDestroyEntry(gpsFWTraceDIEntry); + } + +#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS + if (gpsPowMonDIEntry != NULL) + { + DIDestroyEntry(gpsPowMonDIEntry); + } +#endif /* SUPPORT_POWER_VALIDATION_VIA_DEBUGFS */ + +#ifdef SUPPORT_FIRMWARE_GCOV + if (gpsFirmwareGcovDIEntry != NULL) + { + DIDestroyEntry(gpsFirmwareGcovDIEntry); + } +#endif /* SUPPORT_FIRMWARE_GCOV */ + +#ifdef SUPPORT_POWER_SAMPLING_VIA_DEBUGFS + if (gpsPowerDataDIEntry != NULL) + { + DIDestroyEntry(gpsPowerDataDIEntry); + } +#endif /* SUPPORT_POWER_SAMPLING_VIA_DEBUGFS */ + +#ifdef SUPPORT_VALIDATION + if (gpsRGXRegsDIEntry != NULL) + { + DIDestroyEntry(gpsRGXRegsDIEntry); + } +#endif /* SUPPORT_VALIDATION */ +#endif /* SUPPORT_RGX */ + +#ifdef SUPPORT_VALIDATION + if (gpsTestMemLeakDIEntry != NULL) + { + DIDestroyEntry(gpsTestMemLeakDIEntry); + } +#endif /* SUPPORT_VALIDATION */ + + if (gpsDumpDebugDIEntry != NULL) + { + DIDestroyEntry(gpsDumpDebugDIEntry); + } + + if (gpsStatusDIEntry != NULL) + { + DIDestroyEntry(gpsStatusDIEntry); + } + + if (gpsVersionDIEntry != NULL) + { + DIDestroyEntry(gpsVersionDIEntry); + } +} diff --git a/drivers/gpu/drm/phytium/octopus/debug_common.h b/drivers/gpu/drm/phytium/octopus/debug_common.h new file mode 100644 index 000000000000..fb6982dd5be7 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/debug_common.h @@ -0,0 +1,51 @@ +/*************************************************************************/ /*! +@File +@Title Common debug definitions and functions. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DEBUG_COMMON_H +#define DEBUG_COMMON_H + +#include "pvrsrv_error.h" + +PVRSRV_ERROR DebugCommonInit(void); +void DebugCommonDeInit(void); + +#endif /* DEBUG_COMMON_H */ diff --git a/drivers/gpu/drm/phytium/octopus/device.h b/drivers/gpu/drm/phytium/octopus/device.h new file mode 100644 index 000000000000..29d80204c679 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/device.h @@ -0,0 +1,532 @@ +/**************************************************************************/ /*! +@File +@Title Common Device header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device related function templates and defines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef DEVICE_H +#define DEVICE_H + +#include "devicemem_heapcfg.h" +#include "mmu_common.h" +#include "ra.h" /* RA_ARENA */ +#include "pvrsrv_device.h" +#include "sync_checkpoint.h" +#include "srvkm.h" +#include "physheap.h" +#include "sync_internal.h" +#include "sysinfo.h" +#include "dllist.h" + +#include "rgx_bvnc_defs_km.h" + +#include "lock.h" + +#include "power.h" + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "virt_validation_defs.h" +#endif + +typedef struct _PVRSRV_POWER_DEV_TAG_ *PPVRSRV_POWER_DEV; + +struct SYNC_RECORD; + +struct _CONNECTION_DATA_; + +/*************************************************************************/ /*! + @Function AllocUFOBlockCallback + @Description Device specific callback for allocation of a UFO block + + @Input psDeviceNode Pointer to device node to allocate + the UFO for. + @Output ppsMemDesc Pointer to pointer for the memdesc of + the allocation + @Output pui32SyncAddr FW Base address of the UFO block + @Output puiSyncPrimBlockSize Size of the UFO block + + @Return PVRSRV_OK if allocation was successful +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*AllocUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + DEVMEM_MEMDESC **ppsMemDesc, + IMG_UINT32 *pui32SyncAddr, + IMG_UINT32 *puiSyncPrimBlockSize); + +/*************************************************************************/ /*! + @Function FreeUFOBlockCallback + @Description Device specific callback for freeing of a UFO + + @Input psDeviceNode Pointer to device node that the UFO block was + allocated from. + @Input psMemDesc Pointer to pointer for the memdesc of the UFO + block to free. +*/ /**************************************************************************/ +typedef void (*FreeUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + DEVMEM_MEMDESC *psMemDesc); + +typedef struct _PVRSRV_DEVICE_IDENTIFIER_ +{ + /* Pdump memory and register bank names */ + IMG_CHAR *pszPDumpDevName; + IMG_CHAR *pszPDumpRegName; + + /* Under Linux, this is the minor number of RenderNode corresponding to this Device */ + IMG_INT32 i32OsDeviceID; + /* Services layer enumeration of the device used in pvrdebug */ + IMG_UINT32 ui32InternalID; +} PVRSRV_DEVICE_IDENTIFIER; + +typedef struct _DEVICE_MEMORY_INFO_ +{ + /* Heap count. Doesn't include additional heaps from PVRSRVCreateDeviceMemHeap */ + IMG_UINT32 ui32HeapCount; + + /* Blueprints for creating new device memory contexts */ + IMG_UINT32 uiNumHeapConfigs; + DEVMEM_HEAP_CONFIG *psDeviceMemoryHeapConfigArray; + DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeap; +} DEVICE_MEMORY_INFO; + + +typedef struct _PG_HANDLE_ +{ + union + { + void *pvHandle; + IMG_UINT64 ui64Handle; + }u; + /* The allocation order is log2 value of the number of pages to allocate. + * As such this is a correspondingly small value. E.g, for order 4 we + * are talking 2^4 * PAGE_SIZE contiguous allocation. + * DevPxAlloc API does not need to support orders higher than 4. + */ +#if defined(SUPPORT_GPUVIRT_VALIDATION) + IMG_BYTE uiOrder; /* Order of the corresponding allocation */ + IMG_BYTE uiOSid; /* OSid to use for allocation arena. + * Connection-specific. */ + IMG_BYTE uiPad1, + uiPad2; /* Spare */ +#else + IMG_BYTE uiOrder; /* Order of the corresponding allocation */ + IMG_BYTE uiPad1, + uiPad2, + uiPad3; /* Spare */ +#endif +} PG_HANDLE; + +#define MMU_BAD_PHYS_ADDR (0xbadbad00badULL) +#define DUMMY_PAGE ("DUMMY_PAGE") +#define DEV_ZERO_PAGE ("DEV_ZERO_PAGE") +#define PVR_DUMMY_PAGE_INIT_VALUE (0x0) +#define PVR_ZERO_PAGE_INIT_VALUE (0x0) + +typedef struct __DEFAULT_PAGE__ +{ + /*Page handle for the page allocated (UMA/LMA)*/ + PG_HANDLE sPageHandle; + POS_LOCK psPgLock; + ATOMIC_T atRefCounter; + /*Default page size in terms of log2 */ + IMG_UINT32 ui32Log2PgSize; + IMG_UINT64 ui64PgPhysAddr; +#if defined(PDUMP) + IMG_HANDLE hPdumpPg; +#endif +} PVRSRV_DEF_PAGE; + +typedef enum _PVRSRV_DEVICE_STATE_ +{ + PVRSRV_DEVICE_STATE_UNDEFINED = 0, + PVRSRV_DEVICE_STATE_INIT, + PVRSRV_DEVICE_STATE_ACTIVE, + PVRSRV_DEVICE_STATE_DEINIT, + PVRSRV_DEVICE_STATE_BAD, +} PVRSRV_DEVICE_STATE; + +typedef enum _PVRSRV_DEVICE_HEALTH_STATUS_ +{ + PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED = 0, + PVRSRV_DEVICE_HEALTH_STATUS_OK, + PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING, + PVRSRV_DEVICE_HEALTH_STATUS_DEAD, + PVRSRV_DEVICE_HEALTH_STATUS_FAULT +} PVRSRV_DEVICE_HEALTH_STATUS; + +typedef enum _PVRSRV_DEVICE_HEALTH_REASON_ +{ + PVRSRV_DEVICE_HEALTH_REASON_NONE = 0, + PVRSRV_DEVICE_HEALTH_REASON_ASSERTED, + PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING, + PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS, + PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT, + PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED, + PVRSRV_DEVICE_HEALTH_REASON_IDLING, + PVRSRV_DEVICE_HEALTH_REASON_RESTARTING, + PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS +} PVRSRV_DEVICE_HEALTH_REASON; + +typedef enum _PVRSRV_DEVICE_DEBUG_DUMP_STATUS_ +{ + PVRSRV_DEVICE_DEBUG_DUMP_NONE = 0, + PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE +} PVRSRV_DEVICE_DEBUG_DUMP_STATUS; + +typedef struct _MMU_PX_SETUP_ +{ +#if defined(SUPPORT_GPUVIRT_VALIDATION) + PVRSRV_ERROR (*pfnDevPxAllocGPV)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, size_t uiSize, + PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, + IMG_UINT32 ui32OSid, IMG_PID uiPid); +#endif + PVRSRV_ERROR (*pfnDevPxAlloc)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, size_t uiSize, + PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, + IMG_PID uiPid); + + void (*pfnDevPxFree)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, PG_HANDLE *psMemHandle); + + PVRSRV_ERROR (*pfnDevPxMap)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, PG_HANDLE *pshMemHandle, + size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, + void **pvPtr); + + void (*pfnDevPxUnMap)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + PG_HANDLE *psMemHandle, void *pvPtr); + + PVRSRV_ERROR (*pfnDevPxClean)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + PG_HANDLE *pshMemHandle, + IMG_UINT32 uiOffset, + IMG_UINT32 uiLength); + + IMG_UINT32 uiMMUPxLog2AllocGran; + + RA_ARENA *psPxRA; +} MMU_PX_SETUP; + +typedef struct _PVRSRV_DEVICE_NODE_ +{ + PVRSRV_DEVICE_IDENTIFIER sDevId; + + PVRSRV_DEVICE_STATE eDevState; + PVRSRV_DEVICE_FABRIC_TYPE eDevFabricType; + + ATOMIC_T eHealthStatus; /* Holds values from PVRSRV_DEVICE_HEALTH_STATUS */ + ATOMIC_T eHealthReason; /* Holds values from PVRSRV_DEVICE_HEALTH_REASON */ + ATOMIC_T eDebugDumpRequested; /* Holds values from PVRSRV_DEVICE_DEBUG_DUMP_STATUS */ + + IMG_HANDLE *hDebugTable; + + /* device specific MMU attributes */ + MMU_DEVICEATTRIBS *psMMUDevAttrs; + /* Device specific MMU firmware attributes, used only in some devices */ + MMU_DEVICEATTRIBS *psFirmwareMMUDevAttrs; + + MMU_PX_SETUP sDevMMUPxSetup; + + /* lock for power state transitions */ + POS_LOCK hPowerLock; + IMG_PID uiPwrLockOwnerPID; /* Only valid between lock and corresponding unlock + operations of hPowerLock */ + + /* current system device power state */ + PVRSRV_SYS_POWER_STATE eCurrentSysPowerState; + PPVRSRV_POWER_DEV psPowerDev; + + /* multicore configuration information */ + IMG_UINT32 ui32MultiCoreNumCores; /* total cores primary + secondaries. 0 for non-multi core */ + IMG_UINT32 ui32MultiCorePrimaryId; /* primary core id for this device */ + IMG_UINT64 *pui64MultiCoreCapabilities; /* capabilities for each core */ + + /* + callbacks the device must support: + */ + + PVRSRV_ERROR (*pfnDevSLCFlushRange)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_BOOL bInvalidate); + + PVRSRV_ERROR (*pfnInvalFBSCTable)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + MMU_CONTEXT *psMMUContext, + IMG_UINT64 ui64FBSCEntries); + + PVRSRV_ERROR (*pfnValidateOrTweakPhysAddrs)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + MMU_DEVICEATTRIBS *psDevAttrs, + IMG_UINT64 *pui64Addr); + + void (*pfnMMUCacheInvalidate)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + MMU_CONTEXT *psMMUContext, + MMU_LEVEL eLevel, + IMG_BOOL bUnmap); + + PVRSRV_ERROR (*pfnMMUCacheInvalidateKick)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + IMG_UINT32 *pui32NextMMUInvalidateUpdate); + + IMG_UINT32 (*pfnMMUCacheGetInvalidateCounter)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); + + + void (*pfnDumpDebugInfo)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); + + PVRSRV_ERROR (*pfnUpdateHealthStatus)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + IMG_BOOL bIsTimerPoll); + +#if defined(SUPPORT_AUTOVZ) + void (*pfnUpdateAutoVzWatchdog)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); +#endif + + PVRSRV_ERROR (*pfnValidationGPUUnitsPowerChange)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32NewState); + + PVRSRV_ERROR (*pfnResetHWRLogs)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); + + PVRSRV_ERROR (*pfnVerifyBVNC)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64GivenBVNC, IMG_UINT64 ui64CoreIdMask); + + /* Method to drain device HWPerf packets from firmware buffer to host buffer */ + PVRSRV_ERROR (*pfnServiceHWPerf)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); + + PVRSRV_ERROR (*pfnDeviceVersionString)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_CHAR **ppszVersionString); + + PVRSRV_ERROR (*pfnDeviceClockSpeed)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_PUINT32 pui32RGXClockSpeed); + + PVRSRV_ERROR (*pfnSoftReset)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64ResetValue1, IMG_UINT64 ui64ResetValue2); + + PVRSRV_ERROR (*pfnAlignmentCheck)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32FWAlignChecksSize, IMG_UINT32 aui32FWAlignChecks[]); + IMG_BOOL (*pfnCheckDeviceFeature)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64FeatureMask); + + IMG_INT32 (*pfnGetDeviceFeatureValue)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, enum _RGX_FEATURE_WITH_VALUE_INDEX_ eFeatureIndex); + + PVRSRV_ERROR (*pfnGetMultiCoreInfo)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32CapsSize, + IMG_UINT32 *pui32NumCores, IMG_UINT64 *pui64Caps); + + IMG_BOOL (*pfnHasFBCDCVersion31)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); + + MMU_DEVICEATTRIBS* (*pfnGetMMUDeviceAttributes)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_BOOL bKernelMemoryCtx); + + PVRSRV_DEVICE_CONFIG *psDevConfig; + + /* device post-finalise compatibility check */ + PVRSRV_ERROR (*pfnInitDeviceCompatCheck) (struct _PVRSRV_DEVICE_NODE_*); + + /* initialise device-specific physheaps */ + PVRSRV_ERROR (*pfnPhysMemDeviceHeapsInit) (struct _PVRSRV_DEVICE_NODE_ *); + + /* initialise fw mmu, if FW not using GPU mmu, NULL otherwise. */ + PVRSRV_ERROR (*pfnFwMMUInit) (struct _PVRSRV_DEVICE_NODE_ *); + + /* information about the device's address space and heaps */ + DEVICE_MEMORY_INFO sDevMemoryInfo; + + /* device's shared-virtual-memory heap max virtual address */ + IMG_UINT64 ui64GeneralSVMHeapTopVA; + + ATOMIC_T iNumClockSpeedChanges; + + /* private device information */ + void *pvDevice; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + RA_ARENA *psOSSharedArena; + RA_ARENA *psOSidSubArena[GPUVIRT_VALIDATION_NUM_OS]; + /* Number of supported OSid for this device node given available memory */ + IMG_UINT32 ui32NumOSId; +#endif + + /* FW_MAIN, FW_CONFIG and FW_GUEST heaps. Should be part of registered heaps? */ + PHYS_HEAP *psFWMainPhysHeap; + PHYS_HEAP *psFWCfgPhysHeap; + PHYS_HEAP *apsFWPremapPhysHeap[RGX_NUM_OS_SUPPORTED]; + + IMG_UINT32 ui32RegisteredPhysHeaps; + PHYS_HEAP **papsRegisteredPhysHeaps; + + /* PHYS_HEAP Mapping table to the platform's physical memory heap(s) + * used by this device. The physical heaps are created based on + * the PHYS_HEAP_CONFIG data from the platform's system layer at device + * creation time. + * + * The first entry (apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL]) will be used for allocations + * where the phys heap hint CPU_LOCAL flag is not set. Normally this will be an LMA heap + * (but the device configuration could specify a UMA heap here, if desired) + * The second entry (apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]) will be used for allocations + * where the phys heap hint CPU_LOCAL flag is set. Normally this will be a UMA heap + * (but the configuration could specify an LMA heap here, if desired) + * The third entry (apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]) will be used for allocations + * where the memalloc flags phys heap hint is MAIN,CONFIG or RAW; this is used when virtualization is enabled + * The device configuration will always specify two physical heap IDs - in the event of the device + * only using one physical heap, both of these IDs will be the same, and hence both pointers below + * will also be the same; when virtualization is enabled the device configuration specifies + * three physical heap IDs, the last being for PVRSRV_PHYS_HEAP_FW_MAIN allocations + */ + PHYS_HEAP *apsPhysHeap[PVRSRV_PHYS_HEAP_LAST]; + + /* RA reserved for storing the MMU mappings of firmware. + * The memory backing up this RA must persist between driver or OS reboots */ + RA_ARENA *psFwMMUReservedMemArena; + + /* Flag indicating if the firmware has been initialised during the + * 1st boot of the Host driver according to the AutoVz life-cycle. */ + IMG_BOOL bAutoVzFwIsUp; + + struct _PVRSRV_DEVICE_NODE_ *psNext; + struct _PVRSRV_DEVICE_NODE_ **ppsThis; + + /* Functions for notification about memory contexts */ + PVRSRV_ERROR (*pfnRegisterMemoryContext)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + MMU_CONTEXT *psMMUContext, + IMG_HANDLE *hPrivData); + void (*pfnUnregisterMemoryContext)(IMG_HANDLE hPrivData); + + /* Functions for allocation/freeing of UFOs */ + AllocUFOBlockCallback pfnAllocUFOBlock; /*!< Callback for allocation of a block of UFO memory */ + FreeUFOBlockCallback pfnFreeUFOBlock; /*!< Callback for freeing of a block of UFO memory */ + + IMG_HANDLE hSyncServerNotify; + POS_LOCK hSyncServerListLock; + DLLIST_NODE sSyncServerSyncsList; + + IMG_HANDLE hSyncServerRecordNotify; + POS_LOCK hSyncServerRecordLock; + IMG_UINT32 ui32SyncServerRecordCount; + IMG_UINT32 ui32SyncServerRecordCountHighWatermark; + DLLIST_NODE sSyncServerRecordList; + struct SYNC_RECORD *apsSyncServerRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN]; + IMG_UINT32 uiSyncServerRecordFreeIdx; + + IMG_HANDLE hSyncCheckpointRecordNotify; + POS_LOCK hSyncCheckpointRecordLock; + IMG_UINT32 ui32SyncCheckpointRecordCount; + IMG_UINT32 ui32SyncCheckpointRecordCountHighWatermark; + DLLIST_NODE sSyncCheckpointRecordList; + struct SYNC_CHECKPOINT_RECORD *apsSyncCheckpointRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN]; + IMG_UINT32 uiSyncCheckpointRecordFreeIdx; + + IMG_HANDLE hSyncCheckpointNotify; + POS_SPINLOCK hSyncCheckpointListLock; /*!< Protects sSyncCheckpointSyncsList */ + DLLIST_NODE sSyncCheckpointSyncsList; + + PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext; + PSYNC_PRIM_CONTEXT hSyncPrimContext; + + /* With this sync-prim we make sure the MMU cache is flushed + * before we free the page table memory */ + PVRSRV_CLIENT_SYNC_PRIM *psMMUCacheSyncPrim; + IMG_UINT32 ui32NextMMUInvalidateUpdate; + + IMG_HANDLE hCmdCompNotify; + IMG_HANDLE hDbgReqNotify; + IMG_HANDLE hHtbDbgReqNotify; + IMG_HANDLE hAppHintDbgReqNotify; + IMG_HANDLE hThreadsDbgReqNotify; + + PVRSRV_DEF_PAGE sDummyPage; + PVRSRV_DEF_PAGE sDevZeroPage; + + POSWR_LOCK hMemoryContextPageFaultNotifyListLock; + DLLIST_NODE sMemoryContextPageFaultNotifyListHead; + + /* System DMA capability */ + IMG_BOOL bHasSystemDMA; + IMG_HANDLE hDmaTxChan; + IMG_HANDLE hDmaRxChan; + +#if defined(PDUMP) + /* + * FBC clear color register default value to use. + */ + IMG_UINT64 ui64FBCClearColour; + + /* Device-level callback which is called when pdump.exe starts. + * Should be implemented in device-specific init code, e.g. rgxinit.c + */ + PVRSRV_ERROR (*pfnPDumpInitDevice)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); + /* device-level callback to return pdump ID associated to a memory context */ + IMG_UINT32 (*pfnMMUGetContextID)(IMG_HANDLE hDevMemContext); + + IMG_UINT8 *pui8DeferredSyncCPSignal; /*! Deferred fence events buffer */ + + IMG_UINT16 ui16SyncCPReadIdx; /*! Read index in the above deferred fence events buffer */ + + IMG_UINT16 ui16SyncCPWriteIdx; /*! Write index in the above deferred fence events buffer */ + + POS_LOCK hSyncCheckpointSignalLock; /*! Guards data shared between an sleepable-contexts */ + + void *pvSyncCPMISR; /*! MISR to emit pending/deferred fence signals */ + + void *hTransition; /*!< SyncCheckpoint PdumpTransition Cookie */ + + DLLIST_NODE sSyncCheckpointContextListHead; /*!< List head for the sync chkpt contexts */ + + POS_LOCK hSyncCheckpointContextListLock; /*! lock for accessing sync chkpt contexts list */ + +#endif + +#if defined(SUPPORT_VALIDATION) + POS_LOCK hValidationLock; +#endif + + POS_LOCK hConnectionsLock; /*!< Lock protecting sConnections */ + DLLIST_NODE sConnections; /*!< The list of currently active connection objects for this device node */ +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + IMG_UINT64 ui64nLISR; /*!< Number of LISR calls seen */ + IMG_UINT64 ui64nMISR; /*!< Number of MISR calls made */ +#endif +} PVRSRV_DEVICE_NODE; + +/* + * Macros to be used instead of calling directly the pfns since these macros + * will expand the feature passed as argument into the bitmask/index to work + * with the macros defined in rgx_bvnc_defs_km.h + */ +#define PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, Feature) \ + psDevNode->pfnCheckDeviceFeature(psDevNode, RGX_FEATURE_##Feature##_BIT_MASK) +#define PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, Feature) \ + psDevNode->pfnGetDeviceFeatureValue(psDevNode, RGX_FEATURE_##Feature##_IDX) + +PVRSRV_ERROR PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bInitSuccessful); + +PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode); + +PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32ClientBuildOptions); + + +#endif /* DEVICE_H */ + +/****************************************************************************** + End of file (device.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/device_connection.h b/drivers/gpu/drm/phytium/octopus/device_connection.h new file mode 100644 index 000000000000..359080b69d12 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/device_connection.h @@ -0,0 +1,123 @@ +/*************************************************************************/ /*! +@File device_connection.h +@Title +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(DEVICE_CONNECTION_H) +#define DEVICE_CONNECTION_H + +#include "img_types.h" +#include "img_defs.h" + +#if defined(__KERNEL__) +typedef struct _PVRSRV_DEVICE_NODE_ *SHARED_DEV_CONNECTION; +#else +#include "connection.h" +typedef const struct PVRSRV_DEV_CONNECTION_TAG *SHARED_DEV_CONNECTION; +#endif + +/****************************************************************************** + * Device capability flags and masks + * + * Following bitmask shows allocated ranges and values for our device + * capability settings: + * + * 31 27 23 19 15 11 7 3 0 + * |...|...|...|...|...|...|...|... + * ** CACHE_COHERENT [0x1..0x2] + * x PVRSRV_CACHE_COHERENT_DEVICE_FLAG + * x. PVRSRV_CACHE_COHERENT_CPU_FLAG + * *... NONMAPPABLE_MEMORY [0x8] + * x... PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG + * *.... PDUMP_IS_RECORDING [0x10] + * x.... PVRSRV_PDUMP_IS_RECORDING + * ***........ DEVMEM_SVM_ALLOC [0x100..0x400] + * x........ PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED + * x......... PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED + * x.......... PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL + * *........... FBCDC_V3_1 [0x800] + * x........... FBCDC_V3_1_USED + * *............ PVRSRV_SYSTEM_DMA + * x............ PVRSRV_SYSTEM_DMA_USED + * |...|...|...|...|...|...|...|... + *****************************************************************************/ + +/* Flag to be passed over the bridge during connection stating whether CPU cache coherent is available*/ +#define PVRSRV_CACHE_COHERENT_SHIFT (0) +#define PVRSRV_CACHE_COHERENT_DEVICE_FLAG (1U << PVRSRV_CACHE_COHERENT_SHIFT) +#define PVRSRV_CACHE_COHERENT_CPU_FLAG (2U << PVRSRV_CACHE_COHERENT_SHIFT) +#define PVRSRV_CACHE_COHERENT_EMULATE_FLAG (4U << PVRSRV_CACHE_COHERENT_SHIFT) +#define PVRSRV_CACHE_COHERENT_MASK (7U << PVRSRV_CACHE_COHERENT_SHIFT) + +/* Flag to be passed over the bridge during connection stating whether CPU non-mappable memory is present */ +#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT (7) +#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG (1U << PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT) + +/* Flag to be passed over the bridge to indicate PDump activity */ +#define PVRSRV_PDUMP_IS_RECORDING_SHIFT (4) +#define PVRSRV_PDUMP_IS_RECORDING (1U << PVRSRV_PDUMP_IS_RECORDING_SHIFT) + +/* Flag to be passed over the bridge during connection stating SVM allocation availability */ +#define PVRSRV_DEVMEM_SVM_ALLOC_SHIFT (8) +#define PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED (1U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT) +#define PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED (2U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT) +#define PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL (4U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT) + +/* Flag to be passed over the bridge during connection stating whether GPU uses FBCDC v3.1 */ +#define PVRSRV_FBCDC_V3_1_USED_SHIFT (11) +#define PVRSRV_FBCDC_V3_1_USED (1U << PVRSRV_FBCDC_V3_1_USED_SHIFT) + +/* Flag to be passed over the bridge during connection stating whether System has + DMA transfer capability to and from device memory */ +#define PVRSRV_SYSTEM_DMA_SHIFT (12) +#define PVRSRV_SYSTEM_DMA_USED (1U << PVRSRV_SYSTEM_DMA_SHIFT) + +static INLINE IMG_HANDLE GetBridgeHandle(SHARED_DEV_CONNECTION hDevConnection) +{ +#if defined(__KERNEL__) + return hDevConnection; +#else + return hDevConnection->hServices; +#endif +} + + +#endif /* !defined(DEVICE_CONNECTION_H) */ diff --git a/drivers/gpu/drm/phytium/octopus/devicemem.c b/drivers/gpu/drm/phytium/octopus/devicemem.c new file mode 100644 index 000000000000..866d4c599217 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/devicemem.c @@ -0,0 +1,2981 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Front End (nominally Client side part, but now invokable + from server too) of device memory management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +#include "devicemem.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "allocmem.h" +#include "ra.h" +#include "osfunc.h" +#include "osmmap.h" +#include "devicemem_utils.h" +#include "client_mm_bridge.h" +#include "client_cache_bridge.h" +#include "services_km.h" +#include "pvrsrv_memallocflags_internal.h" + +#if defined(PDUMP) +#if defined(__KERNEL__) +#include "pdump_km.h" +#else +#include "pdump_um.h" +#endif +#include "devicemem_pdump.h" +#endif +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +#include "client_ri_bridge.h" +#endif +#include "client_devicememhistory_bridge.h" +#include "info_page_client.h" + +#include "rgx_heaps.h" +#if defined(__KERNEL__) +#include "pvrsrv.h" +#include "rgxdefs_km.h" +#include "rgx_bvnc_defs_km.h" +#include "device.h" +#include "rgxdevice.h" +#include "pvr_ricommon.h" +#include "pvrsrv_apphint.h" +#include "oskm_apphint.h" +#if defined(__linux__) +#include "linux/kernel.h" +#endif +#else +#include "rgxdefs.h" +#endif + +#if defined(__KERNEL__) && defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +extern PVRSRV_ERROR RIDumpAllKM(void); +#endif + +#if defined(__KERNEL__) +#define GET_ERROR_STRING(eError) PVRSRVGetErrorString(eError) +#else +#define GET_ERROR_STRING(eError) PVRSRVGetErrorString(eError) +#endif + +#if defined(__KERNEL__) +/* Derive the virtual from the hPMR */ +static +IMG_UINT64 _GetPremappedVA(PMR *psPMR, PVRSRV_DEVICE_NODE *psDevNode) +{ + PVRSRV_ERROR eError; + IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS; + + IMG_DEV_PHYADDR sDevAddr; + IMG_BOOL bValid; + PVRSRV_PHYS_HEAP eFirstHeap = (PVRSRV_VZ_MODE_IS(GUEST) ? PVRSRV_PHYS_HEAP_FW_CONFIG : PVRSRV_PHYS_HEAP_FW_MAIN); + PHYS_HEAP *psPhysHeap = psDevNode->apsPhysHeap[eFirstHeap]; + IMG_DEV_PHYADDR sHeapAddr; + + eError = PhysHeapGetDevPAddr(psPhysHeap, &sHeapAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapGetDevPAddr", fail); + +#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES) +{ + if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_UMA || + PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_DMA) + { + IMG_DEV_PHYADDR sDevPAddrCorrected; + + PhysHeapCpuPAddrToDevPAddr(psPhysHeap, 1, &sDevPAddrCorrected, (IMG_CPU_PHYADDR *)&sHeapAddr); + sHeapAddr.uiAddr = sDevPAddrCorrected.uiAddr; + } +} +#endif + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddr", fail); + + eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sDevAddr, &bValid); + if (eError != PVRSRV_OK) + { + PVR_LOG_IF_ERROR(eError, "PMR_DevPhysAddr"); + eError = PMRUnlockSysPhysAddresses(psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddr"); + goto fail; + } + + eError = PMRUnlockSysPhysAddresses(psPMR); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddr"); + + ui64OptionalMapAddress = RGX_FIRMWARE_RAW_HEAP_BASE | (sDevAddr.uiAddr - sHeapAddr.uiAddr); + + PVR_DPF((PVR_DBG_ALLOC, "%s: sDevAddr.uiAddr = 0x%"IMG_UINT64_FMTSPECx" sHeapAddr.uiAddr = 0x%"IMG_UINT64_FMTSPECx" => ui64OptionalMapAddress = 0x%"IMG_UINT64_FMTSPECx, + __func__, sDevAddr.uiAddr, sHeapAddr.uiAddr, ui64OptionalMapAddress)); +fail: + return ui64OptionalMapAddress; +} +#endif + +/***************************************************************************** + * Sub allocation internals * + *****************************************************************************/ +static INLINE PVRSRV_MEMALLOCFLAGS_T +DevmemOverrideFlagsOrPassThrough(SHARED_DEV_CONNECTION hDevConnection, PVRSRV_MEMALLOCFLAGS_T uiFlags) +{ +#if defined(__KERNEL__) && defined(RGX_FEATURE_GPU_CPU_COHERENCY) + /* + * Override the requested memory flags of FW allocations only, + * non-FW allocations pass-through unmodified. + * + * On fully coherent platforms: + * - We upgrade uncached, CPU-only cached or GPU-only cached to + * full coherency. This gives caching improvements for free. + * + * On ace-lite platforms: + * - If the allocation is not CPU cached, then there is nothing + * for the GPU to snoop regardless of the GPU cache setting. + * - If the allocation is not GPU cached, then the SLC will not + * be used and will not snoop the CPU even if it is CPU cached. + * - Therefore only the GPU setting can be upgraded to coherent + * if it is already GPU cached incoherent and the CPU is cached. + * + * All other platforms: + * - Do not modify the allocation flags. + */ + if (PVRSRV_CHECK_FW_MAIN(uiFlags)) + { + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDevConnection; + + if (PVRSRVSystemSnoopingOfDeviceCache(psDevNode->psDevConfig) && + PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig)) + { + /* Clear existing flags, mark the allocation as fully coherent. */ + uiFlags &= ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK); + uiFlags |= PVRSRV_MEMALLOCFLAG_CACHE_COHERENT; + } + else if ((PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) || PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags)) && + (PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags)) && + PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig) && + psDevNode->eDevFabricType == PVRSRV_DEVICE_FABRIC_ACELITE) + { + /* Upgrade the allocation from GPU cached incoherent to GPU cached coherent. */ + uiFlags &= ~PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK; + uiFlags |= PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT; + } + } +#else + PVR_UNREFERENCED_PARAMETER(hDevConnection); +#endif + + return uiFlags; +} + +static INLINE void +CheckAnnotationLength(const IMG_CHAR *pszAnnotation) +{ + IMG_UINT32 length = OSStringLength(pszAnnotation); + + if (length >= DEVMEM_ANNOTATION_MAX_LEN) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Annotation \"%s\" has been truncated to %d characters from %d characters", + __func__, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN - 1, length)); + } +} + +static PVRSRV_ERROR +AllocateDeviceMemory(SHARED_DEV_CONNECTION hDevConnection, + IMG_UINT32 uiLog2Quantum, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_DEVMEM_ALIGN_T uiAlign, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_BOOL bExportable, + const IMG_CHAR *pszAnnotation, + DEVMEM_IMPORT **ppsImport) +{ + DEVMEM_IMPORT *psImport; + PVRSRV_MEMALLOCFLAGS_T uiPMRFlags; + IMG_HANDLE hPMR; + PVRSRV_ERROR eError; + + eError = DevmemImportStructAlloc(hDevConnection, + &psImport); + PVR_GOTO_IF_ERROR(eError, failAlloc); + + /* check if shift value is not too big (sizeof(1ULL)) */ + PVR_ASSERT(uiLog2Quantum < sizeof(unsigned long long) * 8); + /* Check the size is a multiple of the quantum */ + PVR_ASSERT((uiSize & ((1ULL<psImport; + SHARED_DEV_CONNECTION hDevConnection; + IMG_HANDLE hPMR; + IMG_HANDLE hSrvDevMemHeap; + POS_LOCK hLock; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_CPU_VIRTADDR pvCpuVAddr; + DEVMEM_PROPERTIES_T uiProperties; + + if (NULL == psImport) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Sparse memory import", __func__)); + goto e0; + } + + hDevConnection = psImport->hDevConnection; + hPMR = psImport->hPMR; + hLock = psImport->hLock; + sDevVAddr = psImport->sDeviceImport.sDevVAddr; + pvCpuVAddr = psImport->sCPUImport.pvCPUVAddr; + + if (NULL == hDevConnection) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Bridge handle", __func__)); + goto e0; + } + + if (NULL == hPMR) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid PMR handle", __func__)); + goto e0; + } + + if ((uiSparseFlags & SPARSE_RESIZE_BOTH) && (0 == sDevVAddr.uiAddr)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Device Virtual Map", __func__)); + goto e0; + } + + if ((uiSparseFlags & SPARSE_MAP_CPU_ADDR) && (NULL == pvCpuVAddr)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid CPU Virtual Map", __func__)); + goto e0; + } + + uiProperties = GetImportProperties(psMemDesc->psImport); + + if (uiProperties & DEVMEM_PROPERTIES_SECURE) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Secure buffers currently do not support sparse changes", + __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e0; + } + + if (uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: This memory descriptor doesn't support sparse changes", + __func__)); + eError = PVRSRV_ERROR_INVALID_REQUEST; + goto e0; + } + +#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE + if (psMemDesc->sCPUMemDesc.ui32RefCount > 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: This memory descriptor is mapped more than once (refcnt: %u)into " + "CPU Address space.\nRelease all CPU maps of this object and retry...", + __func__, psMemDesc->sCPUMemDesc.ui32RefCount)); + eError = PVRSRV_ERROR_OBJECT_STILL_REFERENCED; + goto e0; + } +#endif + + hSrvDevMemHeap = psImport->sDeviceImport.psHeap->hDevMemServerHeap; + + OSLockAcquire(hLock); + + eError = BridgeChangeSparseMem(GetBridgeHandle(hDevConnection), + hSrvDevMemHeap, + hPMR, + ui32AllocPageCount, + paui32AllocPageIndices, + ui32FreePageCount, + pauiFreePageIndices, + uiSparseFlags, + psImport->uiFlags, + sDevVAddr, + (IMG_UINT64)((uintptr_t)pvCpuVAddr)); + + OSLockRelease(hLock); + + if (eError != PVRSRV_OK) + { + goto e0; + } + + if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + BridgeDevicememHistorySparseChange(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + psMemDesc->uiOffset, + psMemDesc->sDeviceMemDesc.sDevVAddr, + psMemDesc->uiAllocSize, + psMemDesc->szText, + DevmemGetHeapLog2PageSize(psImport->sDeviceImport.psHeap), + ui32AllocPageCount, + paui32AllocPageIndices, + ui32FreePageCount, + pauiFreePageIndices, + psMemDesc->ui32AllocationIndex, + &psMemDesc->ui32AllocationIndex); + } + +e0: + return eError; +} + +static void +FreeDeviceMemory(DEVMEM_IMPORT *psImport) +{ + DevmemImportStructRelease(psImport); +} + +static PVRSRV_ERROR +SubAllocImportAlloc(RA_PERARENA_HANDLE hArena, + RA_LENGTH_T uiSize, + RA_FLAGS_T _flags, + const IMG_CHAR *pszAnnotation, + /* returned data */ + RA_BASE_T *puiBase, + RA_LENGTH_T *puiActualSize, + RA_PERISPAN_HANDLE *phImport) +{ + /* When suballocations need a new lump of memory, the RA calls + back here. Later, in the kernel, we must construct a new PMR + and a pairing between the new lump of virtual memory and the + PMR (whether or not such PMR is backed by physical memory) */ + DEVMEM_HEAP *psHeap; + DEVMEM_IMPORT *psImport; + IMG_DEVMEM_ALIGN_T uiAlign; + PVRSRV_ERROR eError; + IMG_UINT32 ui32MappingTable = 0; + PVRSRV_MEMALLOCFLAGS_T uiFlags = (PVRSRV_MEMALLOCFLAGS_T) _flags; + IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS; + + /* Per-arena private handle is, for us, the heap */ + psHeap = hArena; + + /* align to the l.s.b. of the size... e.g. 96kiB aligned to + 32kiB. NB: There is an argument to say that the RA should never + ask us for Non-power-of-2 size anyway, but I don't want to make + that restriction arbitrarily now */ + uiAlign = uiSize & ~(uiSize-1); + + /* Technically this is only required for guest drivers due to + fw heaps being pre-allocated and pre-mapped resulting in + a 1:1 (i.e. virtual : physical) offset correlation but we + force this behaviour for all drivers to maintain consistency + (i.e. heap->VA uiAlign <= heap->PA uiLog2Quantum) */ + if (uiAlign > (IMG_DEVMEM_ALIGN_T)(1ULL << psHeap->uiLog2Quantum)) + { + uiAlign = (IMG_DEVMEM_ALIGN_T)(1ULL << psHeap->uiLog2Quantum); + } + + /* The RA should not have invoked us with a size that is not a + multiple of the quantum anyway */ + PVR_ASSERT((uiSize & ((1ULL<uiLog2Quantum)-1)) == 0); + + eError = AllocateDeviceMemory(psHeap->psCtx->hDevConnection, + psHeap->uiLog2Quantum, + uiSize, + uiSize, + 1, + 1, + &ui32MappingTable, + uiAlign, + uiFlags, + IMG_FALSE, + "PMR sub-allocated", + &psImport); + PVR_GOTO_IF_ERROR(eError, failAlloc); + +#if defined(PDUMP) && defined(DEBUG) +#if defined(__KERNEL__) + PDUMPCOMMENTWITHFLAGS(PDUMP_CONT, + "Created PMR for sub-allocations with handle ID: 0x%p Annotation: \"%s\" (PID %u)", + psImport->hPMR, pszAnnotation, OSGetCurrentProcessID()); +#else + PDUMPCOMMENTF(psHeap->psCtx->hDevConnection, PDUMP_FLAGS_CONTINUOUS, + "Created PMR for sub-allocations with handle ID: %p Annotation: \"%s\" (PID %u)", + psImport->hPMR, pszAnnotation, OSGetCurrentProcessID()); +#endif +#else + PVR_UNREFERENCED_PARAMETER(pszAnnotation); +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) + { +#if defined(__KERNEL__) + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevNode->pvDevice; + + PVR_ASSERT(PVRSRV_CHECK_FW_MAIN(uiFlags)); + + /* If allocation is made by the Kernel from the firmware heap, account for it + * under the PVR_SYS_ALLOC_PID. + */ + if ((psHeap == psDevInfo->psFirmwareMainHeap) || (psHeap == psDevInfo->psFirmwareConfigHeap)) + { + eError = BridgeRIWritePMREntryWithOwner (GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR, + PVR_SYS_ALLOC_PID); + PVR_LOG_IF_ERROR(eError, "BridgeRIWritePMREntryWithOwner"); + } + else +#endif + { + eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR); + PVR_LOG_IF_ERROR(eError, "BridgeRIWritePMREntry"); + } + } +#endif + +#if defined(__KERNEL__) + if (psHeap->bPremapped) + { + ui64OptionalMapAddress = _GetPremappedVA(psImport->hPMR, psHeap->psCtx->hDevConnection); + } +#endif + + /* + Suballocations always get mapped into the device was we need to + key the RA off something and as we can't export suballocations + there is no valid reason to request an allocation an not map it + */ + eError = DevmemImportStructDevMap(psHeap, + IMG_TRUE, + psImport, + ui64OptionalMapAddress); + PVR_GOTO_IF_ERROR(eError, failMap); + + OSLockAcquire(psImport->hLock); + /* Mark this import struct as zeroed so we can save some PDump LDBs + * and do not have to CPU map + mem set()*/ + if (uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) + { + psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_ZEROED; + } + else if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) + { + psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_POISONED; + } + psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_CLEAN; + OSLockRelease(psImport->hLock); + + *puiBase = psImport->sDeviceImport.sDevVAddr.uiAddr; + *puiActualSize = uiSize; + *phImport = psImport; + + return PVRSRV_OK; + + /* error exit paths follow */ + +failMap: + FreeDeviceMemory(psImport); +failAlloc: + + return eError; +} + +static void +SubAllocImportFree(RA_PERARENA_HANDLE hArena, + RA_BASE_T uiBase, + RA_PERISPAN_HANDLE hImport) +{ + DEVMEM_IMPORT *psImport = hImport; +#if !defined(PVRSRV_NEED_PVR_ASSERT) + PVR_UNREFERENCED_PARAMETER(hArena); + PVR_UNREFERENCED_PARAMETER(uiBase); +#endif + + PVR_ASSERT(psImport != NULL); + PVR_ASSERT(hArena == psImport->sDeviceImport.psHeap); + PVR_ASSERT(uiBase == psImport->sDeviceImport.sDevVAddr.uiAddr); + + (void) DevmemImportStructDevUnmap(psImport); + (void) DevmemImportStructRelease(psImport); +} + +/***************************************************************************** + * Devmem context internals * + *****************************************************************************/ + +static PVRSRV_ERROR +PopulateContextFromBlueprint(struct DEVMEM_CONTEXT_TAG *psCtx, + DEVMEM_HEAPCFGID uiHeapBlueprintID) +{ + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2; + struct DEVMEM_HEAP_TAG **ppsHeapArray; + IMG_UINT32 uiNumHeaps; + IMG_UINT32 uiHeapsToUnwindOnError; + IMG_UINT32 uiHeapIndex; + IMG_DEV_VIRTADDR sDevVAddrBase; + IMG_CHAR aszHeapName[DEVMEM_HEAPNAME_MAXLENGTH]; + IMG_DEVMEM_SIZE_T uiHeapLength; + IMG_DEVMEM_SIZE_T uiReservedRegionLength; + IMG_DEVMEM_LOG2ALIGN_T uiLog2DataPageSize; + IMG_DEVMEM_LOG2ALIGN_T uiLog2ImportAlignment; + + eError = DevmemHeapCount(psCtx->hDevConnection, + uiHeapBlueprintID, + &uiNumHeaps); + PVR_GOTO_IF_ERROR(eError, e0); + + if (uiNumHeaps == 0) + { + ppsHeapArray = NULL; + } + else + { + ppsHeapArray = OSAllocMem(sizeof(*ppsHeapArray) * uiNumHeaps); + PVR_GOTO_IF_NOMEM(ppsHeapArray, eError, e0); + } + + uiHeapsToUnwindOnError = 0; + + for (uiHeapIndex = 0; uiHeapIndex < uiNumHeaps; uiHeapIndex++) + { + eError = DevmemHeapDetails(psCtx->hDevConnection, + uiHeapBlueprintID, + uiHeapIndex, + &aszHeapName[0], + sizeof(aszHeapName), + &sDevVAddrBase, + &uiHeapLength, + &uiReservedRegionLength, + &uiLog2DataPageSize, + &uiLog2ImportAlignment); + PVR_GOTO_IF_ERROR(eError, e1); + + eError = DevmemCreateHeap(psCtx, + sDevVAddrBase, + uiHeapLength, + uiReservedRegionLength, + uiLog2DataPageSize, + uiLog2ImportAlignment, + aszHeapName, + uiHeapBlueprintID, + &ppsHeapArray[uiHeapIndex]); + PVR_GOTO_IF_ERROR(eError, e1); + + uiHeapsToUnwindOnError = uiHeapIndex + 1; + } + + psCtx->uiAutoHeapCount = uiNumHeaps; + psCtx->ppsAutoHeapArray = ppsHeapArray; + + PVR_ASSERT(psCtx->uiNumHeaps >= psCtx->uiAutoHeapCount); + PVR_ASSERT(psCtx->uiAutoHeapCount == uiNumHeaps); + + return PVRSRV_OK; + + /* error exit paths */ +e1: + for (uiHeapIndex = 0; uiHeapIndex < uiHeapsToUnwindOnError; uiHeapIndex++) + { + eError2 = DevmemDestroyHeap(ppsHeapArray[uiHeapIndex]); + PVR_ASSERT(eError2 == PVRSRV_OK); + } + + if (uiNumHeaps != 0) + { + OSFreeMem(ppsHeapArray); + } + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static PVRSRV_ERROR +UnpopulateContextFromBlueprint(struct DEVMEM_CONTEXT_TAG *psCtx) +{ + PVRSRV_ERROR eReturn = PVRSRV_OK; + PVRSRV_ERROR eError2; + IMG_UINT32 uiHeapIndex; + IMG_BOOL bDoCheck = IMG_TRUE; +#if defined(__KERNEL__) + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + bDoCheck = IMG_FALSE; + } +#endif + + for (uiHeapIndex = 0; uiHeapIndex < psCtx->uiAutoHeapCount; uiHeapIndex++) + { + if (!psCtx->ppsAutoHeapArray[uiHeapIndex]) + { + continue; + } + + eError2 = DevmemDestroyHeap(psCtx->ppsAutoHeapArray[uiHeapIndex]); + if (eError2 != PVRSRV_OK) + { + eReturn = eError2; + } + else + { + psCtx->ppsAutoHeapArray[uiHeapIndex] = NULL; + } + } + + if ((!bDoCheck || (eReturn == PVRSRV_OK)) && psCtx->ppsAutoHeapArray) + { + OSFreeMem(psCtx->ppsAutoHeapArray); + psCtx->ppsAutoHeapArray = NULL; + psCtx->uiAutoHeapCount = 0; + } + + return eReturn; +} + +/***************************************************************************** + * Devmem context functions * + *****************************************************************************/ + +IMG_INTERNAL PVRSRV_ERROR +DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection, + DEVMEM_HEAPCFGID uiHeapBlueprintID, + DEVMEM_CONTEXT **ppsCtxPtr) +{ + PVRSRV_ERROR eError; + DEVMEM_CONTEXT *psCtx; + /* handle to the server-side counterpart of the device memory + context (specifically, for handling mapping to device MMU) */ + IMG_HANDLE hDevMemServerContext; + IMG_HANDLE hPrivData; + IMG_BOOL bHeapCfgMetaId = (uiHeapBlueprintID == DEVMEM_HEAPCFG_META); + + PVR_GOTO_IF_NOMEM(ppsCtxPtr, eError, e0); + + psCtx = OSAllocMem(sizeof(*psCtx)); + PVR_GOTO_IF_NOMEM(psCtx, eError, e0); + + psCtx->uiNumHeaps = 0; + + psCtx->hDevConnection = hDevConnection; + + /* Create (server-side) Device Memory context */ + eError = BridgeDevmemIntCtxCreate(GetBridgeHandle(psCtx->hDevConnection), + bHeapCfgMetaId, + &hDevMemServerContext, + &hPrivData, + &psCtx->ui32CPUCacheLineSize); + PVR_GOTO_IF_ERROR(eError, e1); + + psCtx->hDevMemServerContext = hDevMemServerContext; + psCtx->hPrivData = hPrivData; + + /* automagic heap creation */ + psCtx->uiAutoHeapCount = 0; + + eError = PopulateContextFromBlueprint(psCtx, uiHeapBlueprintID); + PVR_GOTO_IF_ERROR(eError, e2); + + *ppsCtxPtr = psCtx; + + PVR_ASSERT(psCtx->uiNumHeaps == psCtx->uiAutoHeapCount); + return PVRSRV_OK; + + /* error exit paths follow */ + +e2: + PVR_ASSERT(psCtx->uiAutoHeapCount == 0); + PVR_ASSERT(psCtx->uiNumHeaps == 0); + BridgeDevmemIntCtxDestroy(GetBridgeHandle(psCtx->hDevConnection), hDevMemServerContext); + +e1: + OSFreeMem(psCtx); + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx, + IMG_HANDLE *hPrivData) +{ + PVRSRV_ERROR eError; + + PVR_GOTO_IF_INVALID_PARAM(psCtx, eError, e0); + PVR_GOTO_IF_INVALID_PARAM(hPrivData, eError, e0); + + *hPrivData = psCtx->hPrivData; + return PVRSRV_OK; + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx) +{ + PVRSRV_ERROR eError; + + PVR_GOTO_IF_INVALID_PARAM(psCtx, eError, e0); + return PVRSRV_OK; + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +IMG_INTERNAL PVRSRV_ERROR +DevmemFindHeapByName(const struct DEVMEM_CONTEXT_TAG *psCtx, + const IMG_CHAR *pszHeapName, + struct DEVMEM_HEAP_TAG **ppsHeapRet) +{ + IMG_UINT32 uiHeapIndex; + + /* N.B. This func is only useful for finding "automagic" heaps by name */ + for (uiHeapIndex = 0; + uiHeapIndex < psCtx->uiAutoHeapCount; + uiHeapIndex++) + { + if (!OSStringNCompare(psCtx->ppsAutoHeapArray[uiHeapIndex]->pszName, pszHeapName, OSStringLength(psCtx->ppsAutoHeapArray[uiHeapIndex]->pszName) + 1)) + { + *ppsHeapRet = psCtx->ppsAutoHeapArray[uiHeapIndex]; + return PVRSRV_OK; + } + } + + return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemDestroyContext(DEVMEM_CONTEXT *psCtx) +{ + PVRSRV_ERROR eError; + IMG_BOOL bDoCheck = IMG_TRUE; + +#if defined(__KERNEL__) + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + bDoCheck = IMG_FALSE; + } +#endif + + PVR_RETURN_IF_INVALID_PARAM(psCtx); + + eError = UnpopulateContextFromBlueprint(psCtx); + if (bDoCheck && eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: UnpopulateContextFromBlueprint failed (%d) leaving %d heaps", + __func__, eError, psCtx->uiNumHeaps)); + goto e1; + } + + eError = BridgeDevmemIntCtxDestroy(GetBridgeHandle(psCtx->hDevConnection), + psCtx->hDevMemServerContext); + if (bDoCheck) + { + PVR_LOG_GOTO_IF_ERROR(eError, "BridgeDevMemIntCtxDestroy", e1); + + /* should be no more heaps left */ + if (psCtx->uiNumHeaps) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Additional heaps remain in DEVMEM_CONTEXT", + __func__)); + eError = PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT; + goto e1; + } + } + + OSCachedMemSet(psCtx, 0, sizeof(*psCtx)); + OSFreeMem(psCtx); + +e1: + return eError; +} + +/***************************************************************************** + * Devmem heap query functions * + *****************************************************************************/ + +IMG_INTERNAL PVRSRV_ERROR +DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection, + IMG_UINT32 *puiNumHeapConfigsOut) +{ + PVRSRV_ERROR eError; + eError = BridgeHeapCfgHeapConfigCount(GetBridgeHandle(hDevConnection), + puiNumHeapConfigsOut); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 *puiNumHeapsOut) +{ + PVRSRV_ERROR eError; + eError = BridgeHeapCfgHeapCount(GetBridgeHandle(hDevConnection), + uiHeapConfigIndex, + puiNumHeapsOut); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemHeapConfigName(SHARED_DEV_CONNECTION hDevConnection, + IMG_UINT32 uiHeapConfigIndex, + IMG_CHAR *pszConfigNameOut, + IMG_UINT32 uiConfigNameBufSz) +{ + PVRSRV_ERROR eError; + eError = BridgeHeapCfgHeapConfigName(GetBridgeHandle(hDevConnection), + uiHeapConfigIndex, + uiConfigNameBufSz, + pszConfigNameOut); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 uiHeapIndex, + IMG_CHAR *pszHeapNameOut, + IMG_UINT32 uiHeapNameBufSz, + IMG_DEV_VIRTADDR *psDevVAddrBaseOut, + IMG_DEVMEM_SIZE_T *puiHeapLengthOut, + IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut, + IMG_UINT32 *puiLog2DataPageSizeOut, + IMG_UINT32 *puiLog2ImportAlignmentOut) +{ + PVRSRV_ERROR eError; + + eError = BridgeHeapCfgHeapDetails(GetBridgeHandle(hDevConnection), + uiHeapConfigIndex, + uiHeapIndex, + uiHeapNameBufSz, + pszHeapNameOut, + psDevVAddrBaseOut, + puiHeapLengthOut, + puiReservedRegionLengthOut, + puiLog2DataPageSizeOut, + puiLog2ImportAlignmentOut); + + VG_MARK_INITIALIZED(pszHeapNameOut, uiHeapNameBufSz); + + return eError; +} + +/***************************************************************************** + * Devmem heap functions * + *****************************************************************************/ + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetHeapInt(DEVMEM_HEAP *psHeap, + IMG_HANDLE *phDevmemHeap) +{ + PVR_RETURN_IF_INVALID_PARAM(psHeap); + *phDevmemHeap = psHeap->hDevMemServerHeap; + return PVRSRV_OK; +} + +/* See devicemem.h for important notes regarding the arguments + to this function */ +IMG_INTERNAL PVRSRV_ERROR +DevmemCreateHeap(DEVMEM_CONTEXT *psCtx, + IMG_DEV_VIRTADDR sBaseAddress, + IMG_DEVMEM_SIZE_T uiLength, + IMG_DEVMEM_SIZE_T uiReservedRegionLength, + IMG_UINT32 ui32Log2Quantum, + IMG_UINT32 ui32Log2ImportAlignment, + const IMG_CHAR *pszName, + DEVMEM_HEAPCFGID uiHeapBlueprintID, + DEVMEM_HEAP **ppsHeapPtr) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_ERROR eError2; + DEVMEM_HEAP *psHeap; + /* handle to the server-side counterpart of the device memory heap + (specifically, for handling mapping to device MMU) */ + IMG_HANDLE hDevMemServerHeap; + IMG_UINT32 ui32Policy = RA_POLICY_DEFAULT, ui32PolicyVMRA; + + IMG_CHAR aszBuf[100]; + IMG_CHAR *pszStr; + IMG_UINT32 ui32pszStrSize; + + if (ppsHeapPtr == NULL || + uiReservedRegionLength % DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + + ui32PolicyVMRA = RA_POLICY_DEFAULT; + + PVR_ASSERT(uiReservedRegionLength + DEVMEM_HEAP_MINIMUM_SIZE <= uiLength); + + psHeap = OSAllocMem(sizeof(*psHeap)); + PVR_GOTO_IF_NOMEM(psHeap, eError, e0); + + /* Need to keep local copy of heap name, so caller may free theirs */ + ui32pszStrSize = OSStringLength(pszName) + 1; + pszStr = OSAllocMem(ui32pszStrSize); + PVR_GOTO_IF_NOMEM(pszStr, eError, e1); + OSStringLCopy(pszStr, pszName, ui32pszStrSize); + psHeap->pszName = pszStr; + + psHeap->uiSize = uiLength; + psHeap->uiReservedRegionSize = uiReservedRegionLength; + psHeap->sBaseAddress = sBaseAddress; + psHeap->bPremapped = IMG_FALSE; + OSAtomicWrite(&psHeap->hImportCount, 0); + + OSSNPrintf(aszBuf, sizeof(aszBuf), + "NDM heap '%s' (suballocs) ctx:%p", + pszName, psCtx); + ui32pszStrSize = OSStringLength(aszBuf) + 1; + pszStr = OSAllocMem(ui32pszStrSize); + PVR_GOTO_IF_NOMEM(pszStr, eError, e2); + OSStringLCopy(pszStr, aszBuf, ui32pszStrSize); + psHeap->pszSubAllocRAName = pszStr; + +#if defined(__KERNEL__) + if (uiHeapBlueprintID == DEVMEM_HEAPCFG_META) + { + void *pvAppHintState = NULL; + IMG_UINT32 ui32FirmwarePolicydefault = 0, ui32FirmwarePolicy=0; + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintUINT32(pvAppHintState, DevMemFWHeapPolicy, + &ui32FirmwarePolicydefault, &ui32FirmwarePolicy); + ui32PolicyVMRA = ui32Policy = ui32FirmwarePolicy; + OSFreeKMAppHintState(pvAppHintState); + } +#endif + +#if defined(PDUMP) + /* The META heap is shared globally so a single physical memory import + * may be used to satisfy allocations of different processes. + * This is problematic when PDumping because the physical memory + * import used to satisfy a new allocation may actually have been + * imported (and thus the PDump MALLOC generated) before the PDump + * client was started, leading to the MALLOC being missing. + * + * This is solved by disabling splitting of imports for the META physmem + * RA, meaning that every firmware allocation gets its own import, thus + * ensuring the MALLOC is present for every allocation made within the + * pdump capture range + */ + if (uiHeapBlueprintID == DEVMEM_HEAPCFG_META) + { + ui32Policy |= RA_POLICY_NO_SPLIT; + } +#else + PVR_UNREFERENCED_PARAMETER(uiHeapBlueprintID); +#endif + + psHeap->psSubAllocRA = RA_Create(psHeap->pszSubAllocRAName, + /* Subsequent imports: */ + ui32Log2Quantum, + RA_LOCKCLASS_2, + SubAllocImportAlloc, + SubAllocImportFree, + (RA_PERARENA_HANDLE) psHeap, + ui32Policy); + if (psHeap->psSubAllocRA == NULL) + { + eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA; + goto e3; + } + + psHeap->uiLog2ImportAlignment = ui32Log2ImportAlignment; + psHeap->uiLog2Quantum = ui32Log2Quantum; + + if (!OSStringNCompare(pszName, RGX_GENERAL_SVM_HEAP_IDENT, sizeof(RGX_GENERAL_SVM_HEAP_IDENT))) + { + /* The SVM heap normally starts out as this type though + it may transition to DEVMEM_HEAP_MANAGER_USER + on platforms with more processor virtual address + bits than device virtual address bits */ + psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_KERNEL; + } + else if (uiReservedRegionLength != 0) + { + /* Heaps which specify reserved VA space range are dual managed: + * - sBaseAddress to (sBaseAddress+uiReservedRegionLength-1): User managed + * - (sBaseAddress+uiReservedRegionLength) to uiLength: RA managed + */ + psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_DUAL_USER_RA; + } + else + { + /* Otherwise, heap manager is decided (USER or RA) at first map */ + psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_UNKNOWN; + } + + /* Mark the heap to be managed by RA */ + if (!OSStringNCompare(pszName, RGX_VK_CAPT_REPLAY_HEAP_IDENT, + sizeof(RGX_VK_CAPT_REPLAY_HEAP_IDENT))) + { + psHeap->ui32HeapManagerFlags |= DEVMEM_HEAP_MANAGER_RA; + } + + OSSNPrintf(aszBuf, sizeof(aszBuf), + "NDM heap '%s' (QVM) ctx:%p", + pszName, psCtx); + ui32pszStrSize = OSStringLength(aszBuf) + 1; + pszStr = OSAllocMem(ui32pszStrSize); + if (pszStr == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e4; + } + OSStringLCopy(pszStr, aszBuf, ui32pszStrSize); + psHeap->pszQuantizedVMRAName = pszStr; + + psHeap->psQuantizedVMRA = RA_Create(psHeap->pszQuantizedVMRAName, + /* Subsequent import: */ + 0, RA_LOCKCLASS_1, NULL, NULL, + (RA_PERARENA_HANDLE) psHeap, + ui32PolicyVMRA); + if (psHeap->psQuantizedVMRA == NULL) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA, e5); + } + + if (!RA_Add(psHeap->psQuantizedVMRA, + /* Make sure the VMRA doesn't allocate from reserved VAs */ + (RA_BASE_T)sBaseAddress.uiAddr + uiReservedRegionLength, + (RA_LENGTH_T)uiLength, + (RA_FLAGS_T)0, /* This RA doesn't use or need flags */ + NULL /* per ispan handle */)) + { + RA_Delete(psHeap->psQuantizedVMRA); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA, e5); + } + + psHeap->psCtx = psCtx; + + + /* Create server-side counterpart of Device Memory heap */ + eError = BridgeDevmemIntHeapCreate(GetBridgeHandle(psCtx->hDevConnection), + psCtx->hDevMemServerContext, + sBaseAddress, + uiLength, + ui32Log2Quantum, + &hDevMemServerHeap); + PVR_GOTO_IF_ERROR(eError, e6); + + psHeap->hDevMemServerHeap = hDevMemServerHeap; + + eError = OSLockCreate(&psHeap->hLock); + PVR_GOTO_IF_ERROR(eError, e7); + + psHeap->psCtx->uiNumHeaps++; + *ppsHeapPtr = psHeap; + +#if defined(PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING) + psHeap->psMemDescList = NULL; +#endif /* PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING */ + + return PVRSRV_OK; + + /* error exit paths */ +e7: + eError2 = BridgeDevmemIntHeapDestroy(GetBridgeHandle(psCtx->hDevConnection), + psHeap->hDevMemServerHeap); + PVR_ASSERT (eError2 == PVRSRV_OK); +e6: + if (psHeap->psQuantizedVMRA) + RA_Delete(psHeap->psQuantizedVMRA); +e5: + if (psHeap->pszQuantizedVMRAName) + OSFreeMem(psHeap->pszQuantizedVMRAName); +e4: + RA_Delete(psHeap->psSubAllocRA); +e3: + OSFreeMem(psHeap->pszSubAllocRAName); +e2: + OSFreeMem(psHeap->pszName); +e1: + OSFreeMem(psHeap); +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetHeapBaseDevVAddr(struct DEVMEM_HEAP_TAG *psHeap, + IMG_DEV_VIRTADDR *pDevVAddr) +{ + PVR_RETURN_IF_INVALID_PARAM(psHeap); + + *pDevVAddr = psHeap->sBaseAddress; + + return PVRSRV_OK; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign) +{ + IMG_DEVMEM_SIZE_T uiSize = *puiSize; + IMG_DEVMEM_ALIGN_T uiAlign = *puiAlign; + + /* Just in case someone changes definition of IMG_DEVMEM_ALIGN_T. */ + static_assert(sizeof(unsigned long long) == sizeof(uiAlign), + "invalid uiAlign size"); + /* This value is used for shifting so it cannot be greater than number + * of bits in unsigned long long (sizeof(1ULL)). Using greater value is + * undefined behaviour. */ + if (uiLog2Quantum >= sizeof(unsigned long long) * 8) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if ((1ULL << uiLog2Quantum) > uiAlign) + { + uiAlign = 1ULL << uiLog2Quantum; + } + uiSize = (uiSize + uiAlign - 1) & ~(uiAlign - 1); + + *puiSize = uiSize; + *puiAlign = uiAlign; + + return PVRSRV_OK; +} + + +IMG_INTERNAL PVRSRV_ERROR +DevmemDestroyHeap(DEVMEM_HEAP *psHeap) +{ + PVRSRV_ERROR eError; + IMG_INT uiImportCount; +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + IMG_BOOL bDoCheck = IMG_TRUE; +#if defined(__KERNEL__) + if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + bDoCheck = IMG_FALSE; + } +#endif +#endif + + PVR_RETURN_IF_INVALID_PARAM(psHeap); + + uiImportCount = OSAtomicRead(&psHeap->hImportCount); + if (uiImportCount > 0) + { + PVR_DPF((PVR_DBG_ERROR, "%d(%s) leaks remain", uiImportCount, psHeap->pszName)); +#if defined(__KERNEL__) +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + PVR_DPF((PVR_DBG_ERROR, "Details of remaining allocated device memory (for all processes):")); + RIDumpAllKM(); +#else + PVR_DPF((PVR_DBG_ERROR, "Compile with PVRSRV_ENABLE_GPU_MEMORY_INFO=1 to get a full " + "list of all driver allocations.")); +#endif +#endif +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + if (bDoCheck) +#endif + { + return PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP; + } + } + + eError = BridgeDevmemIntHeapDestroy(GetBridgeHandle(psHeap->psCtx->hDevConnection), + psHeap->hDevMemServerHeap); +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + if (bDoCheck) +#endif + { + PVR_LOG_RETURN_IF_ERROR(eError, "BridgeDevmemIntHeapDestroy"); + } + + PVR_ASSERT(psHeap->psCtx->uiNumHeaps > 0); + psHeap->psCtx->uiNumHeaps--; + + OSLockDestroy(psHeap->hLock); + + if (psHeap->psQuantizedVMRA) + { + RA_Delete(psHeap->psQuantizedVMRA); + } + if (psHeap->pszQuantizedVMRAName) + { + OSFreeMem(psHeap->pszQuantizedVMRAName); + } + + RA_Delete(psHeap->psSubAllocRA); + OSFreeMem(psHeap->pszSubAllocRAName); + + OSFreeMem(psHeap->pszName); + + OSCachedMemSet(psHeap, 0, sizeof(*psHeap)); + OSFreeMem(psHeap); + + return PVRSRV_OK; +} + +/***************************************************************************** + * Devmem allocation/free functions * + *****************************************************************************/ + +IMG_INTERNAL PVRSRV_ERROR +DevmemSubAllocateAndMap(IMG_UINT8 uiPreAllocMultiplier, + DEVMEM_HEAP *psHeap, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr, + IMG_DEV_VIRTADDR *psDevVirtAddr) +{ + PVRSRV_ERROR eError; + eError = DevmemSubAllocate(uiPreAllocMultiplier, + psHeap, + uiSize, + uiAlign, + uiFlags, + pszText, + ppsMemDescPtr); + PVR_GOTO_IF_ERROR(eError, fail_alloc); + + eError = DevmemMapToDevice(*ppsMemDescPtr, + psHeap, + psDevVirtAddr); + PVR_GOTO_IF_ERROR(eError, fail_map); + + return PVRSRV_OK; + +fail_map: + DevmemFree(*ppsMemDescPtr); +fail_alloc: + ppsMemDescPtr = NULL; + return eError; + +} + +static INLINE void _MemSet(void *pvMem, + IMG_UINT8 uiPattern, + IMG_DEVMEM_SIZE_T uiSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags) +{ + if (PVRSRV_CHECK_CPU_UNCACHED(uiFlags)) + { + OSDeviceMemSet(pvMem, uiPattern, uiSize); + } + else + { + /* it's safe to use OSCachedMemSet() for cached and wc memory */ + OSCachedMemSet(pvMem, uiPattern, uiSize); + } +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier, + DEVMEM_HEAP *psHeap, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + RA_BASE_T uiAllocatedAddr = 0; + RA_LENGTH_T uiAllocatedSize; + RA_PERISPAN_HANDLE hImport; /* the "import" from which this sub-allocation came */ + PVRSRV_ERROR eError; + DEVMEM_MEMDESC *psMemDesc = NULL; + IMG_DEVMEM_OFFSET_T uiOffset = 0; + DEVMEM_IMPORT *psImport; + IMG_UINT32 ui32CPUCacheLineSize; + void *pvAddr = NULL; + + IMG_BOOL bImportClean; + IMG_BOOL bCPUCleanFlag = PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags); + IMG_BOOL bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags); + IMG_BOOL bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags); + IMG_BOOL bCPUCached = (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) || + PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags)); + IMG_BOOL bGPUCached = (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) || + PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags)); + IMG_BOOL bAlign = ! (PVRSRV_CHECK_NO_CACHE_LINE_ALIGN(uiFlags)); + PVRSRV_CACHE_OP eOp = PVRSRV_CACHE_OP_INVALIDATE; + IMG_UINT32 ui32CacheLineSize = 0; + DEVMEM_PROPERTIES_T uiProperties; + + if (uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) + { + /* Deferred Allocation not supported on SubAllocs*/ + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failParams); + } + + PVR_GOTO_IF_INVALID_PARAM(psHeap, eError, failParams); + PVR_GOTO_IF_INVALID_PARAM(psHeap->psCtx, eError, failParams); + PVR_GOTO_IF_INVALID_PARAM(ppsMemDescPtr, eError, failParams); + + uiFlags = DevmemOverrideFlagsOrPassThrough(psHeap->psCtx->hDevConnection, uiFlags); + +#if defined(__KERNEL__) + { + /* The hDevConnection holds two different types of pointers depending on the + * address space in which it is used. + * In this instance the variable points to the device node in server */ + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection; + ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, SLC_CACHE_LINE_SIZE_BITS)); + } +#else + ui32CacheLineSize = ROGUE_CACHE_LINE_SIZE; +#endif + + /* The following logic makes sure that any cached memory is aligned to both the CPU and GPU. + * To be aligned on both you have to take the Lowest Common Multiple (LCM) of the cache line sizes of each. + * As the possibilities are all powers of 2 then simply the largest number can be picked as the LCM. + * Therefore this algorithm just picks the highest from the CPU, GPU and given alignments. + */ + ui32CPUCacheLineSize = psHeap->psCtx->ui32CPUCacheLineSize; + /* If the CPU cache line size is larger than the alignment given then it is the lowest common multiple + * Also checking if the allocation is going to be cached on the CPU + * Currently there is no check for the validity of the cache coherent option. + * In this case, the alignment could be applied but the mode could still fall back to uncached. + */ + if (bAlign && ui32CPUCacheLineSize > uiAlign && bCPUCached) + { + uiAlign = ui32CPUCacheLineSize; + } + + /* If the GPU cache line size is larger than the alignment given then it is the lowest common multiple + * Also checking if the allocation is going to be cached on the GPU via checking for any of the cached options. + * Currently there is no check for the validity of the cache coherent option. + * In this case, the alignment could be applied but the mode could still fall back to uncached. + */ + if (bAlign && ui32CacheLineSize > uiAlign && bGPUCached) + { + uiAlign = ui32CacheLineSize; + } + + eError = DevmemValidateParams(uiSize, + uiAlign, + &uiFlags); + PVR_GOTO_IF_ERROR(eError, failParams); + + eError = DevmemMemDescAlloc(&psMemDesc); + PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); + + /* No request for exportable memory so use the RA */ + eError = RA_Alloc(psHeap->psSubAllocRA, + uiSize, + uiPreAllocMultiplier, + uiFlags, + uiAlign, + pszText, + &uiAllocatedAddr, + &uiAllocatedSize, + &hImport); + PVR_GOTO_IF_ERROR(eError, failDeviceMemAlloc); + + psImport = hImport; + + /* This assignment is assuming the RA returns an hImport where suballocations + * can be made from if uiSize is NOT a page multiple of the passed heap. + * + * So we check if uiSize is a page multiple and mark it as exportable + * if it is not. + * */ + OSLockAcquire(psImport->hLock); + if (!(uiSize & ((1ULL << psHeap->uiLog2Quantum) - 1)) && + (uiPreAllocMultiplier == RA_NO_IMPORT_MULTIPLIER)) + { + psImport->uiProperties |= DEVMEM_PROPERTIES_EXPORTABLE; + } + psImport->uiProperties |= DEVMEM_PROPERTIES_SUBALLOCATABLE; + uiProperties = psImport->uiProperties; + OSLockRelease(psImport->hLock); + + uiOffset = uiAllocatedAddr - psImport->sDeviceImport.sDevVAddr.uiAddr; + +#if defined(PDUMP) && defined(DEBUG) +#if defined(__KERNEL__) + PDUMPCOMMENTWITHFLAGS(PDUMP_CONT, + "Suballocated %u Byte for \"%s\" from PMR with handle ID: 0x%p (PID %u)", + (IMG_UINT32) uiSize, pszText, psImport->hPMR, OSGetCurrentProcessID()); +#else + PDUMPCOMMENTF(psHeap->psCtx->hDevConnection, PDUMP_FLAGS_CONTINUOUS, + "Suballocated %u Byte for \"%s\" from PMR with handle ID: %p (PID %u)", + (IMG_UINT32) uiSize, + pszText, + psImport->hPMR, + OSGetCurrentProcessID()); +#endif +#endif + + DevmemMemDescInit(psMemDesc, + uiOffset, + psImport, + uiSize); + + bImportClean = ((uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_CLEAN) != 0); + + /* Zero the memory */ + if (bZero) + { + /* Has the import been zeroed on allocation and were no suballocations returned to it so far? */ + bImportClean = bImportClean && ((uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_ZEROED) != 0); + + if (!bImportClean) + { + eOp = PVRSRV_CACHE_OP_FLUSH; + + eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvAddr); + PVR_GOTO_IF_ERROR(eError, failMaintenance); + + /* uiSize is a 64-bit quantity whereas the 3rd argument + * to OSDeviceMemSet is a 32-bit quantity on 32-bit systems + * hence a compiler warning of implicit cast and loss of data. + * Added explicit cast and assert to remove warning. + */ + PVR_ASSERT(uiSize < IMG_UINT32_MAX); + + _MemSet(pvAddr, 0, uiSize, uiFlags); + +#if defined(PDUMP) + DevmemPDumpLoadZeroMem(psMemDesc, 0, uiSize, PDUMP_FLAGS_CONTINUOUS); +#endif + } + } + else if (bPoisonOnAlloc) + { + /* Has the import been poisoned on allocation and were no suballocations returned to it so far? */ + bPoisonOnAlloc = (uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_POISONED) != 0; + + if (!bPoisonOnAlloc) + { + eOp = PVRSRV_CACHE_OP_FLUSH; + + eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvAddr); + PVR_GOTO_IF_ERROR(eError, failMaintenance); + + _MemSet(pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE, uiSize, uiFlags); + + bPoisonOnAlloc = IMG_TRUE; + } + } + + /* Flush or invalidate */ + if (bCPUCached && !bImportClean && (bZero || bCPUCleanFlag || bPoisonOnAlloc)) + { + /* BridgeCacheOpQueue _may_ be deferred so use BridgeCacheOpExec + to ensure this cache maintenance is actioned immediately */ + eError = BridgeCacheOpExec (GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + (IMG_UINT64)(uintptr_t) + pvAddr - psMemDesc->uiOffset, + psMemDesc->uiOffset, + psMemDesc->uiAllocSize, + eOp); + PVR_GOTO_IF_ERROR(eError, failMaintenance); + } + + if (pvAddr) + { + DevmemReleaseCpuVirtAddr(psMemDesc); + pvAddr = NULL; + } + + /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when + * the allocation gets mapped/unmapped + */ + CheckAnnotationLength(pszText); + OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI)) + { + /* Attach RI information */ + eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + OSStringNLength(psMemDesc->szText, DEVMEM_ANNOTATION_MAX_LEN), + psMemDesc->szText, + psMemDesc->uiOffset, + uiAllocatedSize, + IMG_FALSE, + IMG_TRUE, + &(psMemDesc->hRIHandle)); + PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); + } +#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + PVR_UNREFERENCED_PARAMETER (pszText); +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + + *ppsMemDescPtr = psMemDesc; + + return PVRSRV_OK; + + /* error exit paths follow */ + +failMaintenance: + if (pvAddr) + { + DevmemReleaseCpuVirtAddr(psMemDesc); + pvAddr = NULL; + } + DevmemMemDescRelease(psMemDesc); + psMemDesc = NULL; /* Make sure we don't do a discard after the release */ +failDeviceMemAlloc: + if (psMemDesc) + { + DevmemMemDescDiscard(psMemDesc); + } +failMemDescAlloc: +failParams: + PVR_ASSERT(eError != PVRSRV_OK); + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC, + __func__, + PVRSRVGETERRORSTRING(eError), + uiSize)); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + IMG_UINT32 uiLog2HeapPageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + PVRSRV_ERROR eError; + DEVMEM_MEMDESC *psMemDesc = NULL; + DEVMEM_IMPORT *psImport; + IMG_UINT32 ui32MappingTable = 0; + + eError = DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize, + &uiSize, + &uiAlign); + PVR_GOTO_IF_ERROR(eError, failParams); + + uiFlags = DevmemOverrideFlagsOrPassThrough(hDevConnection, uiFlags); + + eError = DevmemValidateParams(uiSize, + uiAlign, + &uiFlags); + PVR_GOTO_IF_ERROR(eError, failParams); + + eError = DevmemMemDescAlloc(&psMemDesc); + PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); + + eError = AllocateDeviceMemory(hDevConnection, + uiLog2HeapPageSize, + uiSize, + uiSize, + 1, + 1, + &ui32MappingTable, + uiAlign, + uiFlags, + IMG_TRUE, + pszText, + &psImport); + PVR_GOTO_IF_ERROR(eError, failDeviceMemAlloc); + + DevmemMemDescInit(psMemDesc, + 0, + psImport, + uiSize); + + *ppsMemDescPtr = psMemDesc; + + /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when + * the allocation gets mapped/unmapped + */ + CheckAnnotationLength(pszText); + OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) + { + eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR); + PVR_LOG_IF_ERROR(eError, "BridgeRIWritePMREntry"); + + /* Attach RI information */ + eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR, + sizeof("^"), + "^", + psMemDesc->uiOffset, + uiSize, + IMG_FALSE, + IMG_FALSE, + &psMemDesc->hRIHandle); + PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); + } +#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + PVR_UNREFERENCED_PARAMETER (pszText); +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + + return PVRSRV_OK; + + /* error exit paths follow */ + +failDeviceMemAlloc: + DevmemMemDescDiscard(psMemDesc); + +failMemDescAlloc: +failParams: + PVR_ASSERT(eError != PVRSRV_OK); + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC, + __func__, + PVRSRVGETERRORSTRING(eError), + uiSize)); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_DEVMEM_ALIGN_T uiAlign, + IMG_UINT32 uiLog2HeapPageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + PVRSRV_ERROR eError; + DEVMEM_MEMDESC *psMemDesc = NULL; + DEVMEM_IMPORT *psImport; + + eError = DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize, + &uiSize, + &uiAlign); + PVR_GOTO_IF_ERROR(eError, failParams); + + uiFlags = DevmemOverrideFlagsOrPassThrough(hDevConnection, uiFlags); + + eError = DevmemValidateParams(uiSize, + uiAlign, + &uiFlags); + PVR_GOTO_IF_ERROR(eError, failParams); + + eError = DevmemMemDescAlloc(&psMemDesc); + PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); + + eError = AllocateDeviceMemory(hDevConnection, + uiLog2HeapPageSize, + uiSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiAlign, + uiFlags, + IMG_TRUE, + pszText, + &psImport); + PVR_GOTO_IF_ERROR(eError, failDeviceMemAlloc); + + DevmemMemDescInit(psMemDesc, + 0, + psImport, + uiSize); + + /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when + * the allocation gets mapped/unmapped + */ + CheckAnnotationLength(pszText); + OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) + { + eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR); + PVR_LOG_IF_ERROR(eError, "BridgeRIWritePMREntry"); + + /* Attach RI information */ + eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + sizeof("^"), + "^", + psMemDesc->uiOffset, + uiSize, + IMG_FALSE, + IMG_FALSE, + &psMemDesc->hRIHandle); + PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); + } +#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + PVR_UNREFERENCED_PARAMETER (pszText); +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + + *ppsMemDescPtr = psMemDesc; + + return PVRSRV_OK; + + /* error exit paths follow */ + +failDeviceMemAlloc: + DevmemMemDescDiscard(psMemDesc); + +failMemDescAlloc: +failParams: + PVR_ASSERT(eError != PVRSRV_OK); + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC, + __func__, + PVRSRVGETERRORSTRING(eError), + uiSize)); + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hServerHandle, + IMG_HANDLE *hLocalImportHandle) +{ + return BridgePMRMakeLocalImportHandle(GetBridgeHandle(hDevConnection), + hServerHandle, + hLocalImportHandle); +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hLocalImportHandle) +{ + return BridgePMRUnmakeLocalImportHandle(GetBridgeHandle(hDevConnection), hLocalImportHandle); +} + +/***************************************************************************** + * Devmem unsecure export functions * + *****************************************************************************/ + +#if defined(SUPPORT_INSECURE_EXPORT) + +static PVRSRV_ERROR +_Mapping_Export(DEVMEM_IMPORT *psImport, + DEVMEM_EXPORTHANDLE *phPMRExportHandlePtr, + DEVMEM_EXPORTKEY *puiExportKeyPtr, + DEVMEM_SIZE_T *puiSize, + DEVMEM_LOG2ALIGN_T *puiLog2Contig) +{ + /* Gets an export handle and key for the PMR used for this mapping */ + /* Can only be done if there are no suballocations for this mapping */ + + PVRSRV_ERROR eError; + DEVMEM_EXPORTHANDLE hPMRExportHandle; + DEVMEM_EXPORTKEY uiExportKey; + IMG_DEVMEM_SIZE_T uiSize; + IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig; + + PVR_GOTO_IF_INVALID_PARAM(psImport, eError, failParams); + + if ((GetImportProperties(psImport) & DEVMEM_PROPERTIES_EXPORTABLE) == 0) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION, failParams); + } + + eError = BridgePMRExportPMR(GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR, + &hPMRExportHandle, + &uiSize, + &uiLog2Contig, + &uiExportKey); + PVR_GOTO_IF_ERROR(eError, failExport); + + PVR_ASSERT(uiSize == psImport->uiSize); + + *phPMRExportHandlePtr = hPMRExportHandle; + *puiExportKeyPtr = uiExportKey; + *puiSize = uiSize; + *puiLog2Contig = uiLog2Contig; + + return PVRSRV_OK; + + /* error exit paths follow */ + +failExport: +failParams: + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; + +} + +static void +_Mapping_Unexport(DEVMEM_IMPORT *psImport, + DEVMEM_EXPORTHANDLE hPMRExportHandle) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT (psImport != NULL); + + eError = BridgePMRUnexportPMR(GetBridgeHandle(psImport->hDevConnection), + hPMRExportHandle); + PVR_ASSERT(eError == PVRSRV_OK); +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemExport(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_EXPORTCOOKIE *psExportCookie) +{ + /* Caller to provide storage for export cookie struct */ + PVRSRV_ERROR eError; + IMG_HANDLE hPMRExportHandle = 0; + IMG_UINT64 uiPMRExportPassword = 0; + IMG_DEVMEM_SIZE_T uiSize = 0; + IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig = 0; + + PVR_GOTO_IF_INVALID_PARAM(psMemDesc, eError, e0); + PVR_GOTO_IF_INVALID_PARAM(psExportCookie, eError, e0); + + if (DEVMEM_PROPERTIES_EXPORTABLE != + (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_EXPORTABLE)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: This Memory (0x%p) cannot be exported!...", + __func__, psMemDesc)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_REQUEST, e0); + } + + eError = _Mapping_Export(psMemDesc->psImport, + &hPMRExportHandle, + &uiPMRExportPassword, + &uiSize, + &uiLog2Contig); + if (eError != PVRSRV_OK) + { + psExportCookie->uiSize = 0; + goto e0; + } + + psExportCookie->hPMRExportHandle = hPMRExportHandle; + psExportCookie->uiPMRExportPassword = uiPMRExportPassword; + psExportCookie->uiSize = uiSize; + psExportCookie->uiLog2ContiguityGuarantee = uiLog2Contig; + + return PVRSRV_OK; + + /* error exit paths follow */ + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +IMG_INTERNAL void +DevmemUnexport(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_EXPORTCOOKIE *psExportCookie) +{ + _Mapping_Unexport(psMemDesc->psImport, + psExportCookie->hPMRExportHandle); + + psExportCookie->uiSize = 0; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemImport(SHARED_DEV_CONNECTION hDevConnection, + DEVMEM_EXPORTCOOKIE *psCookie, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + DEVMEM_MEMDESC *psMemDesc = NULL; + DEVMEM_IMPORT *psImport; + IMG_HANDLE hPMR; + PVRSRV_ERROR eError; + + PVR_GOTO_IF_INVALID_PARAM(ppsMemDescPtr, eError, failParams); + + eError = DevmemMemDescAlloc(&psMemDesc); + PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); + + eError = DevmemImportStructAlloc(hDevConnection, + &psImport); + if (eError != PVRSRV_OK) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, failImportAlloc); + } + + /* Get a handle to the PMR (inc refcount) */ + eError = BridgePMRImportPMR(GetBridgeHandle(hDevConnection), + psCookie->hPMRExportHandle, + psCookie->uiPMRExportPassword, + psCookie->uiSize, /* not trusted - just for validation */ + psCookie->uiLog2ContiguityGuarantee, /* not trusted - just for validation */ + &hPMR); + PVR_GOTO_IF_ERROR(eError, failImport); + + DevmemImportStructInit(psImport, + psCookie->uiSize, + 1ULL << psCookie->uiLog2ContiguityGuarantee, + uiFlags, + hPMR, + DEVMEM_PROPERTIES_IMPORTED | + DEVMEM_PROPERTIES_EXPORTABLE); + + DevmemMemDescInit(psMemDesc, + 0, + psImport, + psImport->uiSize); + + *ppsMemDescPtr = psMemDesc; + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI)) + { + /* Attach RI information */ + eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + sizeof("^"), + "^", + psMemDesc->uiOffset, + psMemDesc->psImport->uiSize, + IMG_TRUE, + IMG_TRUE, + &psMemDesc->hRIHandle); + PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); + } +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + + return PVRSRV_OK; + + /* error exit paths follow */ + +failImport: + DevmemImportDiscard(psImport); +failImportAlloc: + DevmemMemDescDiscard(psMemDesc); +failMemDescAlloc: +failParams: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +#endif /* SUPPORT_INSECURE_EXPORT */ + +/***************************************************************************** + * Common MemDesc functions * + *****************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +DevmemUnpin(DEVMEM_MEMDESC *psMemDesc) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + DEVMEM_IMPORT *psImport = psMemDesc->psImport; + DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psImport); + + if (uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE) + { + eError = PVRSRV_ERROR_INVALID_REQUEST; + PVR_DPF((PVR_DBG_ERROR, + "%s: The passed allocation is not valid to unpin", + __func__)); + + goto e_exit; + } + + /* Stop if the allocation might have suballocations. */ + if (!(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE)) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, + "%s: The passed allocation is not valid to unpin because " + "there might be suballocations on it. Make sure you allocate a page multiple " + "of the heap when using PVRSRVAllocDeviceMem()", + __func__)); + + goto e_exit; + } + + /* Stop if the Import is still mapped to CPU */ + if (psImport->sCPUImport.ui32RefCount) + { + eError = PVRSRV_ERROR_STILL_MAPPED; + PVR_DPF((PVR_DBG_ERROR, + "%s: There are still %u references on the CPU mapping. " + "Please remove all CPU mappings before unpinning.", + __func__, + psImport->sCPUImport.ui32RefCount)); + + goto e_exit; + } + + /* Only unpin if it is not already unpinned + * Return PVRSRV_OK */ + if (uiProperties & DEVMEM_PROPERTIES_UNPINNED) + { + goto e_exit; + } + + /* Unpin it and invalidate mapping */ + if (psImport->sDeviceImport.bMapped) + { + eError = BridgeDevmemIntUnpinInvalidate(GetBridgeHandle(psImport->hDevConnection), + psImport->sDeviceImport.hMapping, + psImport->hPMR); + } + else + { + /* Or just unpin it */ + eError = BridgeDevmemIntUnpin(GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR); + } + + /* Update flags and RI when call was successful */ + if (eError == PVRSRV_OK) + { + OSLockAcquire(psImport->hLock); + psImport->uiProperties |= DEVMEM_PROPERTIES_UNPINNED; + OSLockRelease(psImport->hLock); + } + else + { + /* Or just show what went wrong */ + PVR_DPF((PVR_DBG_ERROR, "%s: Unpin aborted because of error %d", + __func__, + eError)); + } + +e_exit: + return eError; +} + + +IMG_INTERNAL PVRSRV_ERROR +DevmemPin(DEVMEM_MEMDESC *psMemDesc) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + DEVMEM_IMPORT *psImport = psMemDesc->psImport; + DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psImport); + + if (uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_REQUEST, e_exit); + } + + /* Only pin if it is unpinned */ + if ((uiProperties & DEVMEM_PROPERTIES_UNPINNED) == 0) + { + goto e_exit; + } + + /* Pin it and make mapping valid */ + if (psImport->sDeviceImport.bMapped) + { + eError = BridgeDevmemIntPinValidate(GetBridgeHandle(psImport->hDevConnection), + psImport->sDeviceImport.hMapping, + psImport->hPMR); + } + else + { + /* Or just pin it */ + eError = BridgeDevmemIntPin(GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR); + } + + if ((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_PMR_NEW_MEMORY)) + { + OSLockAcquire(psImport->hLock); + psImport->uiProperties &= ~DEVMEM_PROPERTIES_UNPINNED; + OSLockRelease(psImport->hLock); + } + else + { + /* Or just show what went wrong */ + PVR_DPF((PVR_DBG_ERROR, "%s: Pin aborted because of error %d", + __func__, + eError)); + } + +e_exit: + return eError; +} + + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetSize(DEVMEM_MEMDESC *psMemDesc, IMG_DEVMEM_SIZE_T* puiSize) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + *puiSize = psMemDesc->uiAllocSize; + + return eError; +} + +IMG_INTERNAL void +DevmemGetAnnotation(DEVMEM_MEMDESC *psMemDesc, IMG_CHAR **pszAnnotation) +{ + /* + * It is expected that psMemDesc->szText is a valid NUL-terminated string, + * since DevmemMemDescAlloc uses OSAllocZMem to create the memdesc. + */ + *pszAnnotation = psMemDesc->szText; +} + +/* + This function is called for freeing any class of memory + */ +IMG_INTERNAL IMG_BOOL +DevmemFree(DEVMEM_MEMDESC *psMemDesc) +{ + if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_SECURE) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Please use methods dedicated to secure buffers.", + __func__)); + return IMG_FALSE; + } + + return DevmemMemDescRelease(psMemDesc); +} + +IMG_INTERNAL IMG_BOOL +DevmemReleaseDevAddrAndFree(DEVMEM_MEMDESC *psMemDesc) +{ + DevmemReleaseDevVirtAddr(psMemDesc); + return DevmemFree(psMemDesc); +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_HEAP *psHeap, + IMG_DEV_VIRTADDR *psDevVirtAddr) +{ + DEVMEM_IMPORT *psImport; + IMG_DEV_VIRTADDR sDevVAddr; + PVRSRV_ERROR eError; + IMG_BOOL bMap = IMG_TRUE; + IMG_BOOL bDestroyed = IMG_FALSE; + IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS; + DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psMemDesc->psImport); + + /* Do not try to map unpinned memory */ + if (uiProperties & DEVMEM_PROPERTIES_UNPINNED) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_MAP_REQUEST, failFlags); + } + + OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock); + PVR_GOTO_IF_INVALID_PARAM(psHeap, eError, failParams); + + if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED, failCheck); + } + + /* Don't map memory for deferred allocations */ + if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) + { + PVR_ASSERT(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE); + bMap = IMG_FALSE; + } + + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + psMemDesc->sDeviceMemDesc.ui32RefCount, + psMemDesc->sDeviceMemDesc.ui32RefCount+1); + + psImport = psMemDesc->psImport; + DevmemMemDescAcquire(psMemDesc); + +#if defined(__KERNEL__) + if (psHeap->bPremapped) + { + ui64OptionalMapAddress = _GetPremappedVA(psImport->hPMR, psHeap->psCtx->hDevConnection); + } +#endif + + eError = DevmemImportStructDevMap(psHeap, + bMap, + psImport, + ui64OptionalMapAddress); + PVR_GOTO_IF_ERROR(eError, failMap); + + sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr; + sDevVAddr.uiAddr += psMemDesc->uiOffset; + psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr; + psMemDesc->sDeviceMemDesc.ui32RefCount++; + + *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr; + + OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); + + if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + BridgeDevicememHistoryMap(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + psMemDesc->uiOffset, + psMemDesc->sDeviceMemDesc.sDevVAddr, + psMemDesc->uiAllocSize, + psMemDesc->szText, + DevmemGetHeapLog2PageSize(psHeap), + psMemDesc->ui32AllocationIndex, + &psMemDesc->ui32AllocationIndex); + } + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) + { + if (psMemDesc->hRIHandle) + { + eError = BridgeRIUpdateMEMDESCAddr(GetBridgeHandle(psImport->hDevConnection), + psMemDesc->hRIHandle, + psImport->sDeviceImport.sDevVAddr); + PVR_LOG_IF_ERROR(eError, "BridgeRIUpdateMEMDESCAddr"); + } + } +#endif + + return PVRSRV_OK; + +failMap: + bDestroyed = DevmemMemDescRelease(psMemDesc); +failCheck: +failParams: + if (!bDestroyed) + { + OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); + } + PVR_ASSERT(eError != PVRSRV_OK); +failFlags: + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_HEAP *psHeap, + IMG_DEV_VIRTADDR sDevVirtAddr) +{ + DEVMEM_IMPORT *psImport; + IMG_DEV_VIRTADDR sDevVAddr; + PVRSRV_ERROR eError; + IMG_BOOL bMap = IMG_TRUE; + IMG_BOOL bDestroyed = IMG_FALSE; + DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psMemDesc->psImport); + + /* Do not try to map unpinned memory */ + if (uiProperties & DEVMEM_PROPERTIES_UNPINNED) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_MAP_REQUEST, failFlags); + } + + OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock); + PVR_GOTO_IF_INVALID_PARAM(psHeap, eError, failParams); + + if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED, failCheck); + } + + /* Don't map memory for deferred allocations */ + if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) + { + PVR_ASSERT(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE); + bMap = IMG_FALSE; + } + + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + psMemDesc->sDeviceMemDesc.ui32RefCount, + psMemDesc->sDeviceMemDesc.ui32RefCount+1); + + psImport = psMemDesc->psImport; + DevmemMemDescAcquire(psMemDesc); + + eError = DevmemImportStructDevMap(psHeap, + bMap, + psImport, + sDevVirtAddr.uiAddr); + PVR_GOTO_IF_ERROR(eError, failMap); + + sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr; + sDevVAddr.uiAddr += psMemDesc->uiOffset; + psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr; + psMemDesc->sDeviceMemDesc.ui32RefCount++; + + OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); + + if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + BridgeDevicememHistoryMap(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + psMemDesc->uiOffset, + psMemDesc->sDeviceMemDesc.sDevVAddr, + psMemDesc->uiAllocSize, + psMemDesc->szText, + DevmemGetHeapLog2PageSize(psHeap), + psMemDesc->ui32AllocationIndex, + &psMemDesc->ui32AllocationIndex); + } + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) + { + if (psMemDesc->hRIHandle) + { + eError = BridgeRIUpdateMEMDESCAddr(GetBridgeHandle(psImport->hDevConnection), + psMemDesc->hRIHandle, + psImport->sDeviceImport.sDevVAddr); + PVR_LOG_IF_ERROR(eError, "BridgeRIUpdateMEMDESCAddr"); + } + } +#endif + + return PVRSRV_OK; + +failMap: + bDestroyed = DevmemMemDescRelease(psMemDesc); +failCheck: +failParams: + if (!bDestroyed) + { + OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); + } + PVR_ASSERT(eError != PVRSRV_OK); +failFlags: + return eError; +} + +IMG_INTERNAL IMG_DEV_VIRTADDR +DevmemGetDevVirtAddr(DEVMEM_MEMDESC *psMemDesc) +{ + if (psMemDesc->sDeviceMemDesc.ui32RefCount == 0) + { + PVR_LOG_ERROR(PVRSRV_ERROR_DEVICEMEM_NO_MAPPING, "DevmemGetDevVirtAddr"); + } + + PVR_ASSERT(psMemDesc->sDeviceMemDesc.sDevVAddr.uiAddr !=0 ); + + return psMemDesc->sDeviceMemDesc.sDevVAddr; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc, + IMG_DEV_VIRTADDR *psDevVirtAddr) +{ + PVRSRV_ERROR eError; + + /* Do not try to map unpinned memory */ + if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_UNPINNED) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_MAP_REQUEST, failCheck); + } + + OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + psMemDesc->sDeviceMemDesc.ui32RefCount, + psMemDesc->sDeviceMemDesc.ui32RefCount+1); + + if (psMemDesc->sDeviceMemDesc.ui32RefCount == 0) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_NO_MAPPING, failRelease); + } + psMemDesc->sDeviceMemDesc.ui32RefCount++; + + *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr; + OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); + + return PVRSRV_OK; + +failRelease: + OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); + PVR_ASSERT(eError != PVRSRV_OK); +failCheck: + return eError; +} + +IMG_INTERNAL void +DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc) +{ + PVR_ASSERT(psMemDesc != NULL); + + OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + psMemDesc->sDeviceMemDesc.ui32RefCount, + psMemDesc->sDeviceMemDesc.ui32RefCount-1); + + PVR_ASSERT(psMemDesc->sDeviceMemDesc.ui32RefCount != 0); + + if (--psMemDesc->sDeviceMemDesc.ui32RefCount == 0) + { + if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + BridgeDevicememHistoryUnmap(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + psMemDesc->uiOffset, + psMemDesc->sDeviceMemDesc.sDevVAddr, + psMemDesc->uiAllocSize, + psMemDesc->szText, + DevmemGetHeapLog2PageSize(psMemDesc->psImport->sDeviceImport.psHeap), + psMemDesc->ui32AllocationIndex, + &psMemDesc->ui32AllocationIndex); + } + + /* When device mapping destroyed, zero Dev VA so DevmemGetDevVirtAddr() + * returns 0 */ + if (DevmemImportStructDevUnmap(psMemDesc->psImport) == IMG_TRUE) + { + psMemDesc->sDeviceMemDesc.sDevVAddr.uiAddr = 0; + } + OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); + + DevmemMemDescRelease(psMemDesc); + } + else + { + OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); + } +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc, + void **ppvCpuVirtAddr) +{ + PVRSRV_ERROR eError; + DEVMEM_PROPERTIES_T uiProperties; + + PVR_ASSERT(psMemDesc != NULL); + PVR_ASSERT(ppvCpuVirtAddr != NULL); + + uiProperties = GetImportProperties(psMemDesc->psImport); + + if (uiProperties & + (DEVMEM_PROPERTIES_UNPINNED | DEVMEM_PROPERTIES_SECURE)) + { +#if defined(SUPPORT_SECURITY_VALIDATION) + if (uiProperties & DEVMEM_PROPERTIES_SECURE) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Allocation is a secure buffer. " + "It should not be possible to map to CPU, but for security " + "validation this will be allowed for testing purposes, " + "as long as the buffer is pinned.", + __func__)); + } + + if (uiProperties & DEVMEM_PROPERTIES_UNPINNED) +#endif + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Allocation is currently unpinned or a secure buffer. " + "Not possible to map to CPU!", + __func__)); + return PVRSRV_ERROR_INVALID_MAP_REQUEST; + } + } + + if (uiProperties & DEVMEM_PROPERTIES_NO_CPU_MAPPING) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: CPU Mapping is not possible on this allocation!", + __func__)); + return PVRSRV_ERROR_INVALID_MAP_REQUEST; + } + + OSLockAcquire(psMemDesc->sCPUMemDesc.hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + psMemDesc->sCPUMemDesc.ui32RefCount, + psMemDesc->sCPUMemDesc.ui32RefCount+1); + + if (psMemDesc->sCPUMemDesc.ui32RefCount++ == 0) + { + DEVMEM_IMPORT *psImport = psMemDesc->psImport; + IMG_UINT8 *pui8CPUVAddr; + + DevmemMemDescAcquire(psMemDesc); + eError = DevmemImportStructCPUMap(psImport); + PVR_GOTO_IF_ERROR(eError, failMap); + + pui8CPUVAddr = psImport->sCPUImport.pvCPUVAddr; + pui8CPUVAddr += psMemDesc->uiOffset; + psMemDesc->sCPUMemDesc.pvCPUVAddr = pui8CPUVAddr; + } + *ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr; + + VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize); + + OSLockRelease(psMemDesc->sCPUMemDesc.hLock); + + return PVRSRV_OK; + +failMap: + PVR_ASSERT(eError != PVRSRV_OK); + psMemDesc->sCPUMemDesc.ui32RefCount--; + + if (!DevmemMemDescRelease(psMemDesc)) + { + OSLockRelease(psMemDesc->sCPUMemDesc.hLock); + } + return eError; +} + +IMG_INTERNAL void +DevmemReacquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc, + void **ppvCpuVirtAddr) +{ + PVR_ASSERT(psMemDesc != NULL); + PVR_ASSERT(ppvCpuVirtAddr != NULL); + + if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_NO_CPU_MAPPING) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: CPU UnMapping is not possible on this allocation!", + __func__)); + return; + } + + OSLockAcquire(psMemDesc->sCPUMemDesc.hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + psMemDesc->sCPUMemDesc.ui32RefCount, + psMemDesc->sCPUMemDesc.ui32RefCount+1); + + *ppvCpuVirtAddr = NULL; + if (psMemDesc->sCPUMemDesc.ui32RefCount) + { + *ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr; + psMemDesc->sCPUMemDesc.ui32RefCount += 1; + } + + VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize); + OSLockRelease(psMemDesc->sCPUMemDesc.hLock); +} + +IMG_INTERNAL void +DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc) +{ + PVR_ASSERT(psMemDesc != NULL); + + if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_NO_CPU_MAPPING) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: CPU UnMapping is not possible on this allocation!", + __func__)); + return; + } + + OSLockAcquire(psMemDesc->sCPUMemDesc.hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + psMemDesc->sCPUMemDesc.ui32RefCount, + psMemDesc->sCPUMemDesc.ui32RefCount-1); + + PVR_ASSERT(psMemDesc->sCPUMemDesc.ui32RefCount != 0); + + if (--psMemDesc->sCPUMemDesc.ui32RefCount == 0) + { + OSLockRelease(psMemDesc->sCPUMemDesc.hLock); + DevmemImportStructCPUUnmap(psMemDesc->psImport); + DevmemMemDescRelease(psMemDesc); + } + else + { + OSLockRelease(psMemDesc->sCPUMemDesc.hLock); + } +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc, + IMG_HANDLE *phImport) +{ + if ((GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_EXPORTABLE) == 0) + { + return PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION; + } + + *phImport = psMemDesc->psImport->hPMR; + + return PVRSRV_OK; +} + +#if !defined(__KERNEL__) +IMG_INTERNAL PVRSRV_ERROR +DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc, + IMG_UINT64 *pui64UID) +{ + DEVMEM_IMPORT *psImport = psMemDesc->psImport; + PVRSRV_ERROR eError; + + if (!(GetImportProperties(psImport) & (DEVMEM_PROPERTIES_IMPORTED | + DEVMEM_PROPERTIES_EXPORTABLE))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: This Memory (0x%p) doesn't support the functionality requested...", + __func__, psMemDesc)); + return PVRSRV_ERROR_INVALID_REQUEST; + } + + eError = BridgePMRGetUID(GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR, + pui64UID); + + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc, + IMG_HANDLE *hReservation) +{ + DEVMEM_IMPORT *psImport; + + PVR_ASSERT(psMemDesc); + psImport = psMemDesc->psImport; + + PVR_ASSERT(psImport); + *hReservation = psImport->sDeviceImport.hReservation; + + return PVRSRV_OK; +} + +#endif /* !__KERNEL__ */ + +/* Kernel usage of this function will only work with + * memdescs of buffers allocated in the FW memory context + * that is created in the Server + */ +PVRSRV_ERROR +DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc, + IMG_HANDLE *phPMR, + IMG_DEVMEM_OFFSET_T *puiPMROffset) +{ + DEVMEM_IMPORT *psImport; + + PVR_ASSERT(psMemDesc); + *puiPMROffset = psMemDesc->uiOffset; + psImport = psMemDesc->psImport; + + PVR_ASSERT(psImport); + *phPMR = psImport->hPMR; + + return PVRSRV_OK; +} + +#if defined(__KERNEL__) +IMG_INTERNAL void +DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc, + PVRSRV_MEMALLOCFLAGS_T *puiFlags) +{ + DEVMEM_IMPORT *psImport; + + PVR_ASSERT(psMemDesc); + psImport = psMemDesc->psImport; + + PVR_ASSERT(psImport); + *puiFlags = psImport->uiFlags; +} + +IMG_INTERNAL SHARED_DEV_CONNECTION +DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc) +{ + return psMemDesc->psImport->hDevConnection; +} +#endif /* __KERNEL__ */ + +IMG_INTERNAL PVRSRV_ERROR +DevmemLocalImport(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hExtHandle, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + DEVMEM_MEMDESC **ppsMemDescPtr, + IMG_DEVMEM_SIZE_T *puiSizePtr, + const IMG_CHAR *pszAnnotation) +{ + DEVMEM_MEMDESC *psMemDesc = NULL; + DEVMEM_IMPORT *psImport; + IMG_DEVMEM_SIZE_T uiSize; + IMG_DEVMEM_ALIGN_T uiAlign; + IMG_HANDLE hPMR; + PVRSRV_ERROR eError; + + PVR_GOTO_IF_INVALID_PARAM(ppsMemDescPtr, eError, failParams); + + eError = DevmemMemDescAlloc(&psMemDesc); + PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); + + eError = DevmemImportStructAlloc(hDevConnection, + &psImport); + if (eError != PVRSRV_OK) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, failImportAlloc); + } + + /* Get the PMR handle and its size from the server */ + eError = BridgePMRLocalImportPMR(GetBridgeHandle(hDevConnection), + hExtHandle, + &hPMR, + &uiSize, + &uiAlign); + PVR_GOTO_IF_ERROR(eError, failImport); + + DevmemImportStructInit(psImport, + uiSize, + uiAlign, + uiFlags, + hPMR, + DEVMEM_PROPERTIES_IMPORTED | + DEVMEM_PROPERTIES_EXPORTABLE); + + DevmemMemDescInit(psMemDesc, + 0, + psImport, + uiSize); + + *ppsMemDescPtr = psMemDesc; + if (puiSizePtr) + *puiSizePtr = uiSize; + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI)) + { + /* Attach RI information. + * Set backed size to 0 since this allocation has been allocated + * by the same process and has been accounted for. */ + eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->psImport->hPMR, + sizeof("^"), + "^", + psMemDesc->uiOffset, + psMemDesc->psImport->uiSize, + IMG_TRUE, + IMG_FALSE, + &(psMemDesc->hRIHandle)); + PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); + } +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + + + /* Copy the allocation descriptive name and size so it can be passed + * to DevicememHistory when the allocation gets mapped/unmapped + */ + CheckAnnotationLength(pszAnnotation); + OSStringLCopy(psMemDesc->szText, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN); + + return PVRSRV_OK; + +failImport: + DevmemImportDiscard(psImport); +failImportAlloc: + DevmemMemDescDiscard(psMemDesc); +failMemDescAlloc: +failParams: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +#if !defined(__KERNEL__) +IMG_INTERNAL PVRSRV_ERROR +DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext, + IMG_DEV_VIRTADDR sDevVAddr) +{ + return BridgeDevmemIsVDevAddrValid(GetBridgeHandle(psContext->hDevConnection), + psContext->hDevMemServerContext, + sDevVAddr); +} + + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetFaultAddress(DEVMEM_CONTEXT *psContext, + IMG_DEV_VIRTADDR *psFaultAddress) +{ + return BridgeDevmemGetFaultAddress(GetBridgeHandle(psContext->hDevConnection), + psContext->hDevMemServerContext, + psFaultAddress); +} +IMG_INTERNAL PVRSRV_ERROR +DevmemFlushDeviceSLCRange(DEVMEM_MEMDESC *psMemDesc, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_BOOL bInvalidate) +{ + DEVMEM_IMPORT *psImport = psMemDesc->psImport; + return BridgeDevmemFlushDevSLCRange(GetBridgeHandle(psImport->hDevConnection), + psImport->sDeviceImport.psHeap->psCtx->hDevMemServerContext, + sDevVAddr, + uiSize, + bInvalidate); +} + +#if defined(RGX_FEATURE_FBCDC) +IMG_INTERNAL PVRSRV_ERROR +DevmemInvalidateFBSCTable(DEVMEM_CONTEXT *psContext, + IMG_UINT64 ui64FBSCEntries) +{ + return BridgeDevmemInvalidateFBSCTable(GetBridgeHandle(psContext->hDevConnection), + psContext->hDevMemServerContext, + ui64FBSCEntries); +} +#endif + +#endif /* !__KERNEL__ */ + +IMG_INTERNAL IMG_UINT32 +DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap) +{ + return psHeap->uiLog2Quantum; +} + +IMG_INTERNAL IMG_DEVMEM_SIZE_T +DevmemGetHeapReservedSize(DEVMEM_HEAP *psHeap) +{ + return psHeap->uiReservedRegionSize; +} + +#if !defined(__KERNEL__) +/**************************************************************************/ /*! +@Function RegisterDevMemPFNotify +@Description Registers that the application wants to be signaled when a page + fault occurs. + +@Input psContext Memory context the process that would like to + be notified about. +@Input ui32PID The PID of the calling process. +@Input bRegister If true, register. If false, de-register. +@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code + */ /***************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext, + IMG_UINT32 ui32PID, + IMG_BOOL bRegister) +{ + PVRSRV_ERROR eError; + + eError = BridgeDevmemIntRegisterPFNotifyKM(GetBridgeHandle(psContext->hDevConnection), + psContext->hDevMemServerContext, + ui32PID, + bRegister); + if (eError == PVRSRV_ERROR_BRIDGE_CALL_FAILED) + { + PVR_LOG_ERROR(eError, "BridgeDevmemIntRegisterPFNotifyKM"); + } + + return eError; +} +#endif /* !__KERNEL__ */ + +IMG_INTERNAL void +DevmemHeapSetPremapStatus(DEVMEM_HEAP *psHeap, IMG_BOOL IsPremapped) +{ + psHeap->bPremapped = IsPremapped; +} diff --git a/drivers/gpu/drm/phytium/octopus/devicemem.h b/drivers/gpu/drm/phytium/octopus/devicemem.h new file mode 100644 index 000000000000..96002b8a2c4e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/devicemem.h @@ -0,0 +1,736 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management core internal +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Services internal interface to core device memory management + functions that are shared between client and server code. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SRVCLIENT_DEVICEMEM_H +#define SRVCLIENT_DEVICEMEM_H + +/****************************************************************************** + * * + * +------------+ +------------+ +--------------+ +--------------+ * + * | a sub- | | a sub- | | an | | allocation | * + * | allocation | | allocation | | allocation | | also mapped | * + * | | | | | in proc 1 | | into proc 2 | * + * +------------+ +------------+ +--------------+ +--------------+ * + * | | | | * + * +--------------+ +--------------+ +--------------+ * + * | page gran- | | page gran- | | page gran- | * + * | ular mapping | | ular mapping | | ular mapping | * + * +--------------+ +--------------+ +--------------+ * + * | | | * + * | | | * + * | | | * + * +--------------+ +--------------+ * + * | | | | * + * | A "P.M.R." | | A "P.M.R." | * + * | | | | * + * +--------------+ +--------------+ * + * * + ******************************************************************************/ + +/* + All device memory allocations are ultimately a view upon (not + necessarily the whole of) a "PMR". + + A PMR is a "Physical Memory Resource", which may be a + "pre-faulted" lump of physical memory, or it may be a + representation of some physical memory that will be instantiated + at some future time. + + PMRs always represent multiple of some power-of-2 "contiguity" + promised by the PMR, which will allow them to be mapped in whole + pages into the device MMU. As memory allocations may be smaller + than a page, these mappings may be suballocated and thus shared + between multiple allocations in one process. A PMR may also be + mapped simultaneously into multiple device memory contexts + (cross-process scenario), however, for security reasons, it is not + legal to share a PMR "both ways" at once, that is, mapped into + multiple processes and divided up amongst several suballocations. + + This PMR terminology is introduced here for background + information, but is generally of little concern to the caller of + this API. This API handles suballocations and mappings, and the + caller thus deals primarily with MEMORY DESCRIPTORS representing + an allocation or suballocation, HEAPS representing ranges of + virtual addresses in a CONTEXT. +*/ + +/* + |<---------------------------context------------------------------>| + |<-------heap------->| |<-------heap------->|<-------heap------->| + |<-alloc->| | |<-alloc->|<-alloc->|| |<-alloc->| | +*/ + +#include "img_types.h" +#include "img_defs.h" +#include "devicemem_typedefs.h" +#include "pdumpdefs.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" + +#include "pdump.h" + +#include "device_connection.h" + + +typedef IMG_UINT32 DEVMEM_HEAPCFGID; +#define DEVMEM_HEAPCFG_FORCLIENTS 0 +#define DEVMEM_HEAPCFG_META 1 + + +/* + In order to call the server side functions, we need a bridge handle. + We abstract that here, as we may wish to change its form. + */ + +typedef IMG_HANDLE DEVMEM_BRIDGE_HANDLE; + +/*************************************************************************/ /*! +@Function DevmemUnpin +@Description This is the counterpart to DevmemPin(). It is meant to be + called before repinning an allocation. + + For a detailed description see client API documentation. + +@Input phMemDesc The MemDesc that is going to be unpinned. + +@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is + registered to be reclaimed. Error otherwise. +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +DevmemUnpin(DEVMEM_MEMDESC *psMemDesc); + +/*************************************************************************/ /*! +@Function DevmemPin +@Description This is the counterpart to DevmemUnpin(). It is meant to be + called after unpinning an allocation. + + For a detailed description see client API documentation. + +@Input phMemDesc The MemDesc that is going to be pinned. + +@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content + was successfully restored. + + PVRSRV_ERROR_PMR_NEW_MEMORY when the content + could not be restored and new physical memory + was allocated. + + A different error otherwise. +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +DevmemPin(DEVMEM_MEMDESC *psMemDesc); + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetHeapInt(DEVMEM_HEAP *psHeap, + IMG_HANDLE *phDevmemHeap); + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetSize(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_SIZE_T* puiSize); + +IMG_INTERNAL void +DevmemGetAnnotation(DEVMEM_MEMDESC *psMemDesc, + IMG_CHAR **pszAnnotation); + +/* + * DevmemCreateContext() + * + * Create a device memory context + * + * This must be called before any heap is created in this context + * + * Caller to provide bridge handle which will be recorded internally and used + * for all future operations on items from this memory context. Caller also + * to provide devicenode handle, as this is used for MMU configuration and + * also to determine the heap configuration for the auto-instantiated heaps. + * + * Note that when compiled in services/server, the hBridge is not used and + * is thrown away by the "fake" direct bridge. (This may change. It is + * recommended that NULL be passed for the handle for now.) + * + * hDeviceNode and uiHeapBlueprintID shall together dictate which heap-config + * to use. + * + * This will cause the server side counterpart to be created also. + * + * If you call DevmemCreateContext() (and the call succeeds) you are promising + * that you will later call Devmem_ContextDestroy(), except for abnormal + * process termination in which case it is expected it will be destroyed as + * part of handle clean up. + * + * Caller to provide storage for the pointer to the newly created + * NEWDEVMEM_CONTEXT object. + */ +PVRSRV_ERROR +DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection, + DEVMEM_HEAPCFGID uiHeapBlueprintID, + DEVMEM_CONTEXT **ppsCtxPtr); + +/* + * DevmemAcquireDevPrivData() + * + * Acquire the device private data for this memory context + */ +PVRSRV_ERROR +DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx, + IMG_HANDLE *hPrivData); + +/* + * DevmemReleaseDevPrivData() + * + * Release the device private data for this memory context + */ +PVRSRV_ERROR +DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx); + +/* + * DevmemDestroyContext() + * + * Undoes that done by DevmemCreateContext() + */ +PVRSRV_ERROR +DevmemDestroyContext(DEVMEM_CONTEXT *psCtx); + +/* + * DevmemCreateHeap() + * + * Create a heap in the given context. + * + * N.B. Not intended to be called directly, though it can be. + * Normally, heaps are instantiated at context creation time according + * to the specified blueprint. See DevmemCreateContext() for details. + * + * This will cause MMU code to set up data structures for the heap, + * but may not cause page tables to be modified until allocations are + * made from the heap. + * + * uiReservedRegionLength Reserved address space for static VAs shared + * between clients and firmware + * + * The "Quantum" is both the device MMU page size to be configured for + * this heap, and the unit multiples of which "quantized" allocations + * are made (allocations smaller than this, known as "suballocations" + * will be made from a "sub alloc RA" and will "import" chunks + * according to this quantum) + * + * Where imported PMRs (or, for example, PMRs created by device class + * buffers) are mapped into this heap, it is important that the + * physical contiguity guarantee offered by the PMR is greater than or + * equal to the quantum size specified here, otherwise the attempt to + * map it will fail. "Normal" allocations via Devmem_Allocate + * shall automatically meet this requirement, as each "import" will + * trigger the creation of a PMR with the desired contiguity. The + * supported quantum sizes in that case shall be dictated by the OS + * specific implementation of PhysmemNewOSRamBackedPMR() (see) + */ +PVRSRV_ERROR +DevmemCreateHeap(DEVMEM_CONTEXT *psCtxPtr, + /* base and length of heap */ + IMG_DEV_VIRTADDR sBaseAddress, + IMG_DEVMEM_SIZE_T uiLength, + IMG_DEVMEM_SIZE_T uiReservedRegionLength, + /* log2 of allocation quantum, i.e. "page" size. + All allocations (that go to server side) are + multiples of this. We use a client-side RA to + make sub-allocations from this */ + IMG_UINT32 ui32Log2Quantum, + /* The minimum import alignment for this heap */ + IMG_UINT32 ui32Log2ImportAlignment, + /* Name of heap for debug */ + /* N.B. Okay to exist on caller's stack - this + func takes a copy if it needs it. */ + const IMG_CHAR *pszName, + DEVMEM_HEAPCFGID uiHeapBlueprintID, + DEVMEM_HEAP **ppsHeapPtr); +/* + * DevmemDestroyHeap() + * + * Reverses DevmemCreateHeap() + * + * N.B. All allocations must have been freed and all mappings must + * have been unmapped before invoking this call + */ +PVRSRV_ERROR +DevmemDestroyHeap(DEVMEM_HEAP *psHeap); + +/* + * DevmemExportalignAdjustSizeAndAlign() + * Compute the Size and Align passed to avoid suballocations + * (used when allocation with PVRSRV_MEMALLOCFLAG_EXPORTALIGN). + * + * Returns PVRSRV_ERROR_INVALID_PARAMS if uiLog2Quantum has invalid value. + */ +IMG_INTERNAL PVRSRV_ERROR +DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign); + +/* + * DevmemSubAllocate() + * + * Makes an allocation (possibly a "suballocation", as described + * below) of device virtual memory from this heap. + * + * The size and alignment of the allocation will be honoured by the RA + * that allocates the "suballocation". The resulting allocation will + * be mapped into GPU virtual memory and the physical memory to back + * it will exist, by the time this call successfully completes. + * + * The size must be a positive integer multiple of the alignment. + * (i.e. the alignment specifies the alignment of both the start and + * the end of the resulting allocation.) + * + * Allocations made via this API are routed through a "suballocation + * RA" which is responsible for ensuring that small allocations can be + * made without wasting physical memory in the server. Furthermore, + * such suballocations can be made entirely client side without + * needing to go to the server unless the allocation spills into a new + * page. + * + * Such suballocations cause many allocations to share the same "PMR". + * This happens only when the flags match exactly. + * + */ + +PVRSRV_ERROR +DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier, + DEVMEM_HEAP *psHeap, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr); + +#define DevmemAllocate(...) \ + DevmemSubAllocate(DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER, __VA_ARGS__) + +PVRSRV_ERROR +DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + IMG_UINT32 uiLog2HeapPageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr); + +PVRSRV_ERROR +DeviceMemChangeSparse(DEVMEM_MEMDESC *psMemDesc, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *paui32AllocPageIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pauiFreePageIndices, + SPARSE_MEM_RESIZE_FLAGS uiFlags); + +PVRSRV_ERROR +DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_DEVMEM_ALIGN_T uiAlign, + IMG_UINT32 uiLog2HeapPageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr); + +PVRSRV_ERROR +DevmemSubAllocateAndMap(IMG_UINT8 uiPreAllocMultiplier, + DEVMEM_HEAP *psHeap, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr, + IMG_DEV_VIRTADDR *psDevVirtAddr); + +#define DevmemAllocateAndMap(...) \ + DevmemSubAllocateAndMap(DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER, __VA_ARGS__) + +/* + * DevmemFree() + * + * Reverses that done by DevmemSubAllocate() N.B. The underlying + * mapping and server side allocation _may_ not be torn down, for + * example, if the allocation has been exported, or if multiple + * allocations were suballocated from the same mapping, but this is + * properly refcounted, so the caller does not have to care. + */ + +IMG_BOOL +DevmemFree(DEVMEM_MEMDESC *psMemDesc); + +IMG_BOOL +DevmemReleaseDevAddrAndFree(DEVMEM_MEMDESC *psMemDesc); + +/* + DevmemMapToDevice: + + Map an allocation to the device it was allocated from. + This function _must_ be called before any call to + DevmemAcquireDevVirtAddr is made as it binds the allocation + to the heap. + DevmemReleaseDevVirtAddr is used to release the reference + to the device mapping this function created, but it doesn't + mean that the memory will actually be unmapped from the + device as other references to the mapping obtained via + DevmemAcquireDevVirtAddr could still be active. +*/ +PVRSRV_ERROR DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_HEAP *psHeap, + IMG_DEV_VIRTADDR *psDevVirtAddr); + +/* + DevmemMapToDeviceAddress: + + Same as DevmemMapToDevice but the caller chooses the address + to map to. +*/ +IMG_INTERNAL PVRSRV_ERROR +DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_HEAP *psHeap, + IMG_DEV_VIRTADDR sDevVirtAddr); + +/* + DevmemGetDevVirtAddr + + Obtain the MemDesc's device virtual address. + This function _must_ be called after DevmemMapToDevice(Address) + and is expected to be used be functions which didn't allocate + the MemDesc but need to know it's address. + It will PVR_ASSERT if no device mapping exists and 0 is returned. + */ +IMG_DEV_VIRTADDR +DevmemGetDevVirtAddr(DEVMEM_MEMDESC *psMemDesc); + +/* + DevmemAcquireDevVirtAddr + + Acquire the MemDesc's device virtual address. + This function _must_ be called after DevmemMapToDevice + and is expected to be used be functions which didn't allocate + the MemDesc but need to know it's address + */ +PVRSRV_ERROR DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc, + IMG_DEV_VIRTADDR *psDevVirtAddrRet); + +/* + * DevmemReleaseDevVirtAddr() + * + * give up the licence to use the device virtual address that was + * acquired by "Acquire" or "MapToDevice" + */ +void +DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc); + +/* + * DevmemAcquireCpuVirtAddr() + * + * Acquires a license to use the cpu virtual address of this mapping. + * Note that the memory may not have been mapped into cpu virtual + * memory prior to this call. On first "acquire" the memory will be + * mapped in (if it wasn't statically mapped in) and on last put it + * _may_ become unmapped. Later calling "Acquire" again, _may_ cause + * the memory to be mapped at a different address. + */ +PVRSRV_ERROR DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc, + void **ppvCpuVirtAddr); + +/* + * DevmemReacquireCpuVirtAddr() + * + * (Re)acquires license to use the cpu virtual address of this mapping + * if (and only if) there is already a pre-existing license to use the + * cpu virtual address for the mapping, returns NULL otherwise. + */ +void DevmemReacquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc, + void **ppvCpuVirtAddr); + +/* + * DevmemReleaseDevVirtAddr() + * + * give up the licence to use the cpu virtual address that was granted + * with the "Get" call. + */ +void +DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc); + +#if defined(SUPPORT_INSECURE_EXPORT) +/* + * DevmemExport() + * + * Given a memory allocation allocated with DevmemAllocateExportable() + * create a "cookie" that can be passed intact by the caller's own choice + * of secure IPC to another process and used as the argument to "map" + * to map this memory into a heap in the target processes. N.B. This can + * also be used to map into multiple heaps in one process, though that's not + * the intention. + * + * Note, the caller must later call Unexport before freeing the + * memory. + */ +PVRSRV_ERROR DevmemExport(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_EXPORTCOOKIE *psExportCookie); + + +void DevmemUnexport(DEVMEM_MEMDESC *psMemDesc, + DEVMEM_EXPORTCOOKIE *psExportCookie); + +PVRSRV_ERROR +DevmemImport(SHARED_DEV_CONNECTION hDevConnection, + DEVMEM_EXPORTCOOKIE *psCookie, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + DEVMEM_MEMDESC **ppsMemDescPtr); +#endif /* SUPPORT_INSECURE_EXPORT */ + +/* + * DevmemMakeLocalImportHandle() + * + * This is a "special case" function for making a server export cookie + * which went through the direct bridge into an export cookie that can + * be passed through the client bridge. + */ +PVRSRV_ERROR +DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hServerExport, + IMG_HANDLE *hClientExport); + +/* + * DevmemUnmakeLocalImportHandle() + * + * Free any resource associated with the Make operation + */ +PVRSRV_ERROR +DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hClientExport); + +/* + * + * The following set of functions is specific to the heap "blueprint" + * stuff, for automatic creation of heaps when a context is created + * + */ + + +/* Devmem_HeapConfigCount: returns the number of heap configs that + this device has. Note that there is no acquire/release semantics + required, as this data is guaranteed to be constant for the + lifetime of the device node */ +PVRSRV_ERROR +DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection, + IMG_UINT32 *puiNumHeapConfigsOut); + +/* Devmem_HeapCount: returns the number of heaps that a given heap + config on this device has. Note that there is no acquire/release + semantics required, as this data is guaranteed to be constant for + the lifetime of the device node */ +PVRSRV_ERROR +DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 *puiNumHeapsOut); +/* Devmem_HeapConfigName: return the name of the given heap config. + The caller is to provide the storage for the returned string and + indicate the number of bytes (including null terminator) for such + string in the BufSz arg. Note that there is no acquire/release + semantics required, as this data is guaranteed to be constant for + the lifetime of the device node. + */ +PVRSRV_ERROR +DevmemHeapConfigName(SHARED_DEV_CONNECTION hsDevConnection, + IMG_UINT32 uiHeapConfigIndex, + IMG_CHAR *pszConfigNameOut, + IMG_UINT32 uiConfigNameBufSz); + +/* Devmem_HeapDetails: fetches all the metadata that is recorded in + this heap "blueprint". Namely: heap name (caller to provide + storage, and indicate buffer size (including null terminator) in + BufSz arg), device virtual address and length, log2 of data page + size (will be one of 12, 14, 16, 18, 20, 21, at time of writing). + Note that there is no acquire/release semantics required, as this + data is guaranteed to be constant for the lifetime of the device + node. */ +PVRSRV_ERROR +DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 uiHeapIndex, + IMG_CHAR *pszHeapNameOut, + IMG_UINT32 uiHeapNameBufSz, + IMG_DEV_VIRTADDR *psDevVAddrBaseOut, + IMG_DEVMEM_SIZE_T *puiHeapLengthOut, + IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut, + IMG_UINT32 *puiLog2DataPageSize, + IMG_UINT32 *puiLog2ImportAlignmentOut); + +/* + * Devmem_FindHeapByName() + * + * returns the heap handle for the named _automagic_ heap in this + * context. "automagic" heaps are those that are born with the + * context from a blueprint + */ +PVRSRV_ERROR +DevmemFindHeapByName(const DEVMEM_CONTEXT *psCtx, + const IMG_CHAR *pszHeapName, + DEVMEM_HEAP **ppsHeapRet); + +/* + * DevmemGetHeapBaseDevVAddr() + * + * returns the device virtual address of the base of the heap. + */ + +PVRSRV_ERROR +DevmemGetHeapBaseDevVAddr(DEVMEM_HEAP *psHeap, + IMG_DEV_VIRTADDR *pDevVAddr); + +PVRSRV_ERROR +DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc, + IMG_HANDLE *phImport); + +PVRSRV_ERROR +DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc, + IMG_UINT64 *pui64UID); + +PVRSRV_ERROR +DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc, + IMG_HANDLE *hReservation); + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc, + IMG_HANDLE *hPMR, + IMG_DEVMEM_OFFSET_T *puiPMROffset); + +IMG_INTERNAL void +DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc, + PVRSRV_MEMALLOCFLAGS_T *puiFlags); + +IMG_INTERNAL SHARED_DEV_CONNECTION +DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc); + +PVRSRV_ERROR +DevmemLocalImport(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hExtHandle, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + DEVMEM_MEMDESC **ppsMemDescPtr, + IMG_DEVMEM_SIZE_T *puiSizePtr, + const IMG_CHAR *pszAnnotation); + +IMG_INTERNAL PVRSRV_ERROR +DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext, + IMG_DEV_VIRTADDR sDevVAddr); + +IMG_INTERNAL PVRSRV_ERROR +DevmemGetFaultAddress(DEVMEM_CONTEXT *psContext, + IMG_DEV_VIRTADDR *psFaultAddress); + +IMG_INTERNAL PVRSRV_ERROR +DevmemFlushDeviceSLCRange(DEVMEM_MEMDESC *psMemDesc, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_BOOL bInvalidate); + +IMG_INTERNAL PVRSRV_ERROR +DevmemInvalidateFBSCTable(DEVMEM_CONTEXT *psContext, + IMG_UINT64 ui64FBSCEntries); + +/* DevmemGetHeapLog2PageSize() + * + * Get the page size used for a certain heap. + */ +IMG_UINT32 +DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap); + +/* DevmemGetHeapReservedSize() + * + * Get the reserved size used for a certain heap. + */ +IMG_DEVMEM_SIZE_T +DevmemGetHeapReservedSize(DEVMEM_HEAP *psHeap); + +/*************************************************************************/ /*! +@Function RegisterDevMemPFNotify +@Description Registers that the application wants to be signaled when a page + fault occurs. + +@Input psContext Memory context the process that would like to + be notified about. +@Input ui32PID The PID of the calling process. +@Input bRegister If true, register. If false, de-register. +@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext, + IMG_UINT32 ui32PID, + IMG_BOOL bRegister); + +/*************************************************************************/ /*! +@Function GetMaxDevMemSize +@Description Get the amount of device memory on current platform + (memory size in Bytes) +@Output puiLMASize LMA memory size +@Output puiUMASize UMA memory size +@Return Error code +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +GetMaxDevMemSize(SHARED_DEV_CONNECTION hDevConnection, + IMG_DEVMEM_SIZE_T *puiLMASize, + IMG_DEVMEM_SIZE_T *puiUMASize); + +/*************************************************************************/ /*! +@Function DevmemHeapSetPremapStatus +@Description In some special cases like virtualisation, a device memory heap + must be entirely backed by physical memory and mapped into the + device's virtual address space. This is done at context creation. + When objects are allocated from such a heap, the mapping part + must be skipped. The 'bPremapped' flag dictates if allocations + are to be mapped or not. + +@Input psHeap Device memory heap to be updated +@Input IsPremapped The premapping status to be set +*/ /**************************************************************************/ +IMG_INTERNAL void +DevmemHeapSetPremapStatus(DEVMEM_HEAP *psHeap, IMG_BOOL IsPremapped); + +#endif /* #ifndef SRVCLIENT_DEVICEMEM_H */ diff --git a/drivers/gpu/drm/phytium/octopus/devicemem_heapcfg.c b/drivers/gpu/drm/phytium/octopus/devicemem_heapcfg.c new file mode 100644 index 000000000000..ee014f5c96be --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/devicemem_heapcfg.c @@ -0,0 +1,184 @@ +/*************************************************************************/ /*! +@File devicemem_heapcfg.c +@Title Device Heap Configuration Helper Functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device memory management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +/* our exported API */ +#include "devicemem_heapcfg.h" +#include "devicemem_utils.h" + +#include "device.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "osfunc.h" + +#include "connection_server.h" + +static INLINE void _CheckBlueprintHeapAlignment(DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint) +{ + IMG_UINT32 ui32OSPageSize = OSGetPageShift(); + + /* Any heap length should at least match OS page size at the minimum or + * a multiple of OS page size */ + if ((psHeapBlueprint->uiHeapLength < DEVMEM_HEAP_MINIMUM_SIZE) || + (psHeapBlueprint->uiHeapLength & (ui32OSPageSize - 1))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid Heap \"%s\" Size: " + "%"IMG_UINT64_FMTSPEC + "("IMG_DEVMEM_SIZE_FMTSPEC")", + __func__, + psHeapBlueprint->pszName, + psHeapBlueprint->uiHeapLength, + psHeapBlueprint->uiHeapLength)); + PVR_DPF((PVR_DBG_ERROR, + "Heap Size should always be a non-zero value and a " + "multiple of OS Page Size:%u(0x%x)", + ui32OSPageSize, ui32OSPageSize)); + PVR_ASSERT(psHeapBlueprint->uiHeapLength >= ui32OSPageSize); + } + + + PVR_ASSERT(psHeapBlueprint->uiReservedRegionLength % DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY == 0); +} + +void HeapCfgBlueprintInit(const IMG_CHAR *pszName, + IMG_UINT64 ui64HeapBaseAddr, + IMG_DEVMEM_SIZE_T uiHeapLength, + IMG_DEVMEM_SIZE_T uiReservedRegionLength, + IMG_UINT32 ui32Log2DataPageSize, + IMG_UINT32 uiLog2ImportAlignment, + DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint) +{ + psHeapBlueprint->pszName = pszName; + psHeapBlueprint->sHeapBaseAddr.uiAddr = ui64HeapBaseAddr; + psHeapBlueprint->uiHeapLength = uiHeapLength; + psHeapBlueprint->uiReservedRegionLength = uiReservedRegionLength; + psHeapBlueprint->uiLog2DataPageSize = ui32Log2DataPageSize; + psHeapBlueprint->uiLog2ImportAlignment = uiLog2ImportAlignment; + + _CheckBlueprintHeapAlignment(psHeapBlueprint); +} + +PVRSRV_ERROR +HeapCfgHeapConfigCount(CONNECTION_DATA * psConnection, + const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 *puiNumHeapConfigsOut) +{ + + PVR_UNREFERENCED_PARAMETER(psConnection); + + *puiNumHeapConfigsOut = psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs; + + return PVRSRV_OK; +} + +PVRSRV_ERROR +HeapCfgHeapCount(CONNECTION_DATA * psConnection, + const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 *puiNumHeapsOut) +{ + if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) + { + return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; + } + + *puiNumHeapsOut = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps; + + return PVRSRV_OK; +} + +PVRSRV_ERROR +HeapCfgHeapConfigName(CONNECTION_DATA * psConnection, + const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 uiHeapConfigNameBufSz, + IMG_CHAR *pszHeapConfigNameOut) +{ + if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) + { + return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; + } + + OSSNPrintf(pszHeapConfigNameOut, uiHeapConfigNameBufSz, "%s", psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].pszName); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +HeapCfgHeapDetails(CONNECTION_DATA * psConnection, + const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 uiHeapIndex, + IMG_UINT32 uiHeapNameBufSz, + IMG_CHAR *pszHeapNameOut, + IMG_DEV_VIRTADDR *psDevVAddrBaseOut, + IMG_DEVMEM_SIZE_T *puiHeapLengthOut, + IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut, + IMG_UINT32 *puiLog2DataPageSizeOut, + IMG_UINT32 *puiLog2ImportAlignmentOut) +{ + DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint; + + if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) + { + return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; + } + + if (uiHeapIndex >= psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps) + { + return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX; + } + + psHeapBlueprint = &psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].psHeapBlueprintArray[uiHeapIndex]; + + OSSNPrintf(pszHeapNameOut, uiHeapNameBufSz, "%s", psHeapBlueprint->pszName); + *psDevVAddrBaseOut = psHeapBlueprint->sHeapBaseAddr; + *puiHeapLengthOut = psHeapBlueprint->uiHeapLength; + *puiReservedRegionLengthOut = psHeapBlueprint->uiReservedRegionLength; + *puiLog2DataPageSizeOut = psHeapBlueprint->uiLog2DataPageSize; + *puiLog2ImportAlignmentOut = psHeapBlueprint->uiLog2ImportAlignment; + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/devicemem_heapcfg.h b/drivers/gpu/drm/phytium/octopus/devicemem_heapcfg.h new file mode 100644 index 000000000000..5eecfe0ca96e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/devicemem_heapcfg.h @@ -0,0 +1,184 @@ +/**************************************************************************/ /*! +@File +@Title Device Heap Configuration Helper Functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device memory management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef DEVICEMEMHEAPCFG_H +#define DEVICEMEMHEAPCFG_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" + +/* + * Supported log2 page size values for RGX_GENERAL_NON_4K_HEAP_ID + */ +#define RGX_HEAP_4KB_PAGE_SHIFT (12U) +#define RGX_HEAP_16KB_PAGE_SHIFT (14U) +#define RGX_HEAP_64KB_PAGE_SHIFT (16U) +#define RGX_HEAP_256KB_PAGE_SHIFT (18U) +#define RGX_HEAP_1MB_PAGE_SHIFT (20U) +#define RGX_HEAP_2MB_PAGE_SHIFT (21U) + +struct _PVRSRV_DEVICE_NODE_; +struct _CONNECTION_DATA_; + + +/* + A "heap config" is a blueprint to be used for initial setting up of heaps + when a device memory context is created. + + We define a data structure to define this, but it's really down to the + caller to populate it. This is all expected to be in-kernel. We provide an + API that client code can use to enquire about the blueprint, such that it may + do the heap set-up during the context creation call on behalf of the user. +*/ + +/* Blueprint for a single heap */ +typedef struct _DEVMEM_HEAP_BLUEPRINT_ +{ + /* Name of this heap - for debug purposes, and perhaps for lookup + by name */ + const IMG_CHAR *pszName; + + /* Virtual address of the beginning of the heap. This _must_ be a + multiple of the data page size for the heap. It is + _recommended_ that it be coarser than that - especially, it + should begin on a boundary appropriate to the MMU for the + device. For Rogue, this is a Page Directory boundary, or 1GB + (virtual address a multiple of 0x0040000000). */ + IMG_DEV_VIRTADDR sHeapBaseAddr; + + /* Length of the heap. Given that the END address of the heap has + a similar restriction to that of the _beginning_ of the heap. + That is the heap length _must_ be a whole number of data pages. + Again, the recommendation is that it ends on a 1GB boundary. + Again, this is not essential, but we do know that (at the time + of writing) the current implementation of mmu_common.c is such + that no two heaps may share a page directory, thus the + remaining virtual space would be wasted if the length were not + a multiple of 1GB */ + IMG_DEVMEM_SIZE_T uiHeapLength; + + /* VA space starting sHeapBaseAddr to uiReservedRegionLength-1 are reserved + for statically defined addresses (shared/known between clients and FW). + Services never maps allocations into this reserved address space _unless_ + explicitly requested via PVRSRVMapToDeviceAddress by passing sDevVirtAddr + which falls within this reserved range. Since this range is completely for + clients to manage (where allocations are page granular), it _must_ again be + a whole number of data pages. Additionally, another constraint enforces this + to be a multiple of DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY (which evaluates to + max page size supported) to support varied pages sizes */ + IMG_DEVMEM_SIZE_T uiReservedRegionLength; + + /* Data page size. This is the page size that is going to get + programmed into the MMU, so it needs to be a valid one for the + device. Importantly, the start address and length _must_ be + multiples of this page size. Note that the page size is + specified as the log 2 relative to 1 byte (e.g. 12 indicates + 4kB) */ + IMG_UINT32 uiLog2DataPageSize; + + /* Import alignment. Force imports to this heap to be + aligned to at least this value */ + IMG_UINT32 uiLog2ImportAlignment; + +} DEVMEM_HEAP_BLUEPRINT; + +void HeapCfgBlueprintInit(const IMG_CHAR *pszName, + IMG_UINT64 ui64HeapBaseAddr, + IMG_DEVMEM_SIZE_T uiHeapLength, + IMG_DEVMEM_SIZE_T uiReservedRegionLength, + IMG_UINT32 ui32Log2DataPageSize, + IMG_UINT32 uiLog2ImportAlignment, + DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint); + +/* Entire named heap config */ +typedef struct _DEVMEM_HEAP_CONFIG_ +{ + /* Name of this heap config - for debug and maybe lookup */ + const IMG_CHAR *pszName; + + /* Number of heaps in this config */ + IMG_UINT32 uiNumHeaps; + + /* Array of individual heap blueprints as defined above */ + DEVMEM_HEAP_BLUEPRINT *psHeapBlueprintArray; +} DEVMEM_HEAP_CONFIG; + + +PVRSRV_ERROR +HeapCfgHeapConfigCount(struct _CONNECTION_DATA_ *psConnection, + const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + IMG_UINT32 *puiNumHeapConfigsOut +); + +PVRSRV_ERROR +HeapCfgHeapCount(struct _CONNECTION_DATA_ *psConnection, + const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 *puiNumHeapsOut +); + +PVRSRV_ERROR +HeapCfgHeapConfigName(struct _CONNECTION_DATA_ *psConnection, + const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 uiHeapConfigNameBufSz, + IMG_CHAR *pszHeapConfigNameOut +); + +PVRSRV_ERROR +HeapCfgHeapDetails(struct _CONNECTION_DATA_ *psConnection, + const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + IMG_UINT32 uiHeapConfigIndex, + IMG_UINT32 uiHeapIndex, + IMG_UINT32 uiHeapNameBufSz, + IMG_CHAR *pszHeapNameOut, + IMG_DEV_VIRTADDR *psDevVAddrBaseOut, + IMG_DEVMEM_SIZE_T *puiHeapLengthOut, + IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut, + IMG_UINT32 *puiLog2DataPageSizeOut, + IMG_UINT32 *puiLog2ImportAlignmentOut +); + +#endif diff --git a/drivers/gpu/drm/phytium/octopus/devicemem_history_server.c b/drivers/gpu/drm/phytium/octopus/devicemem_history_server.c new file mode 100644 index 000000000000..9fd7dda3bb79 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/devicemem_history_server.c @@ -0,0 +1,1912 @@ +/*************************************************************************/ /*! +@File +@Title Devicemem history functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Devicemem history functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "allocmem.h" +#include "img_defs.h" +#include "pmr.h" +#include "pvrsrv.h" +#include "pvrsrv_device.h" +#include "pvr_debug.h" +#include "devicemem_server.h" +#include "lock.h" +#include "devicemem_history_server.h" +#include "pdump_km.h" +#include "di_server.h" + +#define ALLOCATION_LIST_NUM_ENTRIES 10000 + +/* data type to hold an allocation index. + * we make it 16 bits wide if possible + */ +#if ALLOCATION_LIST_NUM_ENTRIES <= 0xFFFF +typedef uint16_t ALLOC_INDEX_T; +#else +typedef uint32_t ALLOC_INDEX_T; +#endif + +/* a record describing a single allocation known to DeviceMemHistory. + * this is an element in a doubly linked list of allocations + */ +typedef struct _RECORD_ALLOCATION_ +{ + /* time when this RECORD_ALLOCATION was created/initialised */ + IMG_UINT64 ui64CreationTime; + /* serial number of the PMR relating to this allocation */ + IMG_UINT64 ui64Serial; + /* base DevVAddr of this allocation */ + IMG_DEV_VIRTADDR sDevVAddr; + /* size in bytes of this allocation */ + IMG_DEVMEM_SIZE_T uiSize; + /* Log2 page size of this allocation's GPU pages */ + IMG_UINT32 ui32Log2PageSize; + /* Process ID (PID) this allocation belongs to */ + IMG_PID uiPID; + /* index of previous allocation in the list */ + ALLOC_INDEX_T ui32Prev; + /* index of next allocation in the list */ + ALLOC_INDEX_T ui32Next; + /* annotation/name of this allocation */ + IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN]; +} RECORD_ALLOCATION; + +/* each command in the circular buffer is prefixed with an 8-bit value + * denoting the command type + */ +typedef enum _COMMAND_TYPE_ +{ + COMMAND_TYPE_NONE, + COMMAND_TYPE_TIMESTAMP, + COMMAND_TYPE_MAP_ALL, + COMMAND_TYPE_UNMAP_ALL, + COMMAND_TYPE_MAP_RANGE, + COMMAND_TYPE_UNMAP_RANGE, + /* sentinel value */ + COMMAND_TYPE_COUNT, +} COMMAND_TYPE; + +/* Timestamp command: + * This command is inserted into the circular buffer to provide an updated + * timestamp. + * The nanosecond-accuracy timestamp is packed into a 56-bit integer, in order + * for the whole command to fit into 8 bytes. + */ +typedef struct _COMMAND_TIMESTAMP_ +{ + IMG_UINT8 aui8TimeNs[7]; +} COMMAND_TIMESTAMP; + +/* MAP_ALL command: + * This command denotes the allocation at the given index was wholly mapped + * in to the GPU MMU + */ +typedef struct _COMMAND_MAP_ALL_ +{ + ALLOC_INDEX_T uiAllocIndex; +} COMMAND_MAP_ALL; + +/* UNMAP_ALL command: + * This command denotes the allocation at the given index was wholly unmapped + * from the GPU MMU + * Note: COMMAND_MAP_ALL and COMMAND_UNMAP_ALL commands have the same layout. + */ +typedef COMMAND_MAP_ALL COMMAND_UNMAP_ALL; + +/* packing attributes for the MAP_RANGE command */ +#define MAP_RANGE_MAX_START ((1 << 18) - 1) +#define MAP_RANGE_MAX_RANGE ((1 << 12) - 1) + +/* MAP_RANGE command: + * Denotes a range of pages within the given allocation being mapped. + * The range is expressed as [Page Index] + [Page Count] + * This information is packed into a 40-bit integer, in order to make + * the command size 8 bytes. + */ + +typedef struct _COMMAND_MAP_RANGE_ +{ + IMG_UINT8 aui8Data[5]; + ALLOC_INDEX_T uiAllocIndex; +} COMMAND_MAP_RANGE; + +/* UNMAP_RANGE command: + * Denotes a range of pages within the given allocation being mapped. + * The range is expressed as [Page Index] + [Page Count] + * This information is packed into a 40-bit integer, in order to make + * the command size 8 bytes. + * Note: COMMAND_MAP_RANGE and COMMAND_UNMAP_RANGE commands have the same layout. + */ +typedef COMMAND_MAP_RANGE COMMAND_UNMAP_RANGE; + +/* wrapper structure for a command */ +typedef struct _COMMAND_WRAPPER_ +{ + IMG_UINT8 ui8Type; + union { + COMMAND_TIMESTAMP sTimeStamp; + COMMAND_MAP_ALL sMapAll; + COMMAND_UNMAP_ALL sUnmapAll; + COMMAND_MAP_RANGE sMapRange; + COMMAND_UNMAP_RANGE sUnmapRange; + } u; +} COMMAND_WRAPPER; + +/* target size for the circular buffer of commands */ +#define CIRCULAR_BUFFER_SIZE_KB 2048 +/* turn the circular buffer target size into a number of commands */ +#define CIRCULAR_BUFFER_NUM_COMMANDS ((CIRCULAR_BUFFER_SIZE_KB * 1024) / sizeof(COMMAND_WRAPPER)) + +/* index value denoting the end of a list */ +#define END_OF_LIST 0xFFFFFFFF +#define ALLOC_INDEX_TO_PTR(idx) (&(gsDevicememHistoryData.sRecords.pasAllocations[idx])) +#define CHECK_ALLOC_INDEX(idx) (idx < ALLOCATION_LIST_NUM_ENTRIES) + +/* wrapper structure for the allocation records and the commands circular buffer */ +typedef struct _RECORDS_ +{ + RECORD_ALLOCATION *pasAllocations; + IMG_UINT32 ui32AllocationsListHead; + + IMG_UINT32 ui32Head; + IMG_UINT32 ui32Tail; + COMMAND_WRAPPER *pasCircularBuffer; +} RECORDS; + +typedef struct _DEVICEMEM_HISTORY_DATA_ +{ + /* DI entry */ + DI_ENTRY *psDIEntry; + + RECORDS sRecords; + POS_LOCK hLock; +} DEVICEMEM_HISTORY_DATA; + +static DEVICEMEM_HISTORY_DATA gsDevicememHistoryData; + +/* gsDevicememHistoryData is static, hLock is NULL unless + * EnablePageFaultDebug is set and DevicememHistoryInitKM() + * was called. + */ +static void DevicememHistoryLock(void) +{ + if (gsDevicememHistoryData.hLock) + { + OSLockAcquire(gsDevicememHistoryData.hLock); + } +} + +static void DevicememHistoryUnlock(void) +{ + if (gsDevicememHistoryData.hLock) + { + OSLockRelease(gsDevicememHistoryData.hLock); + } +} + +/* given a time stamp, calculate the age in nanoseconds */ +static IMG_UINT64 _CalculateAge(IMG_UINT64 ui64Now, + IMG_UINT64 ui64Then, + IMG_UINT64 ui64Max) +{ + if (ui64Now >= ui64Then) + { + /* no clock wrap */ + return ui64Now - ui64Then; + } + else + { + /* clock has wrapped */ + return (ui64Max - ui64Then) + ui64Now + 1; + } +} + +/* AcquireCBSlot: + * Acquire the next slot in the circular buffer and + * move the circular buffer head along by one + * Returns a pointer to the acquired slot. + */ +static COMMAND_WRAPPER *AcquireCBSlot(void) +{ + COMMAND_WRAPPER *psSlot; + + psSlot = &gsDevicememHistoryData.sRecords.pasCircularBuffer[gsDevicememHistoryData.sRecords.ui32Head]; + + gsDevicememHistoryData.sRecords.ui32Head = + (gsDevicememHistoryData.sRecords.ui32Head + 1) + % CIRCULAR_BUFFER_NUM_COMMANDS; + + return psSlot; +} + +/* TimeStampPack: + * Packs the given timestamp value into the COMMAND_TIMESTAMP structure. + * This takes a 64-bit nanosecond timestamp and packs it in to a 56-bit + * integer in the COMMAND_TIMESTAMP command. + */ +static void TimeStampPack(COMMAND_TIMESTAMP *psTimeStamp, IMG_UINT64 ui64Now) +{ + IMG_UINT32 i; + + for (i = 0; i < ARRAY_SIZE(psTimeStamp->aui8TimeNs); i++) + { + psTimeStamp->aui8TimeNs[i] = ui64Now & 0xFF; + ui64Now >>= 8; + } +} + +/* packing a 64-bit nanosecond into a 7-byte integer loses the + * top 8 bits of data. This must be taken into account when + * comparing a full timestamp against an unpacked timestamp + */ +#define TIME_STAMP_MASK ((1LLU << 56) - 1) +#define DO_TIME_STAMP_MASK(ns64) (ns64 & TIME_STAMP_MASK) + +/* TimeStampUnpack: + * Unpack the timestamp value from the given COMMAND_TIMESTAMP command + */ +static IMG_UINT64 TimeStampUnpack(COMMAND_TIMESTAMP *psTimeStamp) +{ + IMG_UINT64 ui64TimeNs = 0; + IMG_UINT32 i; + + for (i = ARRAY_SIZE(psTimeStamp->aui8TimeNs); i > 0; i--) + { + ui64TimeNs <<= 8; + ui64TimeNs |= (IMG_UINT64) psTimeStamp->aui8TimeNs[i - 1]; + } + + return ui64TimeNs; +} + +#if defined(PDUMP) + +static void EmitPDumpAllocation(IMG_UINT32 ui32AllocationIndex, + RECORD_ALLOCATION *psAlloc) +{ + PDUMPCOMMENT("[SrvPFD] Allocation: %u" + " Addr: " IMG_DEV_VIRTADDR_FMTSPEC + " Size: " IMG_DEVMEM_SIZE_FMTSPEC + " Page size: %u" + " PID: %u" + " Process: %s" + " Name: %s", + ui32AllocationIndex, + psAlloc->sDevVAddr.uiAddr, + psAlloc->uiSize, + 1U << psAlloc->ui32Log2PageSize, + psAlloc->uiPID, + OSGetCurrentClientProcessNameKM(), + psAlloc->szName); +} + +static void EmitPDumpMapUnmapAll(COMMAND_TYPE eType, + IMG_UINT32 ui32AllocationIndex) +{ + const IMG_CHAR *pszOpName; + + switch (eType) + { + case COMMAND_TYPE_MAP_ALL: + pszOpName = "MAP_ALL"; + break; + case COMMAND_TYPE_UNMAP_ALL: + pszOpName = "UNMAP_ALL"; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapAll: Invalid type: %u", + eType)); + return; + + } + + PDUMPCOMMENT("[SrvPFD] Op: %s Allocation: %u", + pszOpName, + ui32AllocationIndex); +} + +static void EmitPDumpMapUnmapRange(COMMAND_TYPE eType, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32Count) +{ + const IMG_CHAR *pszOpName; + + switch (eType) + { + case COMMAND_TYPE_MAP_RANGE: + pszOpName = "MAP_RANGE"; + break; + case COMMAND_TYPE_UNMAP_RANGE: + pszOpName = "UNMAP_RANGE"; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapRange: Invalid type: %u", + eType)); + return; + } + + PDUMPCOMMENT("[SrvPFD] Op: %s Allocation: %u Start Page: %u Count: %u", + pszOpName, + ui32AllocationIndex, + ui32StartPage, + ui32Count); +} + +#endif + +/* InsertTimeStampCommand: + * Insert a timestamp command into the circular buffer. + */ +static void InsertTimeStampCommand(IMG_UINT64 ui64Now) +{ + COMMAND_WRAPPER *psCommand; + + psCommand = AcquireCBSlot(); + + psCommand->ui8Type = COMMAND_TYPE_TIMESTAMP; + + TimeStampPack(&psCommand->u.sTimeStamp, ui64Now); +} + +/* InsertMapAllCommand: + * Insert a "MAP_ALL" command for the given allocation into the circular buffer + */ +static void InsertMapAllCommand(IMG_UINT32 ui32AllocIndex) +{ + COMMAND_WRAPPER *psCommand; + + psCommand = AcquireCBSlot(); + + psCommand->ui8Type = COMMAND_TYPE_MAP_ALL; + psCommand->u.sMapAll.uiAllocIndex = ui32AllocIndex; + +#if defined(PDUMP) + EmitPDumpMapUnmapAll(COMMAND_TYPE_MAP_ALL, ui32AllocIndex); +#endif +} + +/* InsertUnmapAllCommand: + * Insert a "UNMAP_ALL" command for the given allocation into the circular buffer + */ +static void InsertUnmapAllCommand(IMG_UINT32 ui32AllocIndex) +{ + COMMAND_WRAPPER *psCommand; + + psCommand = AcquireCBSlot(); + + psCommand->ui8Type = COMMAND_TYPE_UNMAP_ALL; + psCommand->u.sUnmapAll.uiAllocIndex = ui32AllocIndex; + +#if defined(PDUMP) + EmitPDumpMapUnmapAll(COMMAND_TYPE_UNMAP_ALL, ui32AllocIndex); +#endif +} + +/* MapRangePack: + * Pack the given StartPage and Count values into the 40-bit representation + * in the MAP_RANGE command. + */ +static void MapRangePack(COMMAND_MAP_RANGE *psMapRange, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32Count) +{ + IMG_UINT64 ui64Data; + IMG_UINT32 i; + + /* we must encode the data into 40 bits: + * 18 bits for the start page index + * 12 bits for the range + */ + PVR_ASSERT(ui32StartPage <= MAP_RANGE_MAX_START); + PVR_ASSERT(ui32Count <= MAP_RANGE_MAX_RANGE); + + ui64Data = (((IMG_UINT64) ui32StartPage) << 12) | ui32Count; + + for (i = 0; i < ARRAY_SIZE(psMapRange->aui8Data); i++) + { + psMapRange->aui8Data[i] = ui64Data & 0xFF; + ui64Data >>= 8; + } +} + +/* MapRangePack: + * Unpack the StartPage and Count values from the 40-bit representation + * in the MAP_RANGE command. + */ +static void MapRangeUnpack(COMMAND_MAP_RANGE *psMapRange, + IMG_UINT32 *pui32StartPage, + IMG_UINT32 *pui32Count) +{ + IMG_UINT64 ui64Data = 0; + IMG_UINT32 i; + + for (i = ARRAY_SIZE(psMapRange->aui8Data); i > 0; i--) + { + ui64Data <<= 8; + ui64Data |= (IMG_UINT64) psMapRange->aui8Data[i - 1]; + } + + *pui32StartPage = (ui64Data >> 12); + *pui32Count = ui64Data & ((1 << 12) - 1); +} + +/* InsertMapRangeCommand: + * Insert a MAP_RANGE command into the circular buffer with the given + * StartPage and Count values. + */ +static void InsertMapRangeCommand(IMG_UINT32 ui32AllocIndex, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32Count) +{ + COMMAND_WRAPPER *psCommand; + + psCommand = AcquireCBSlot(); + + psCommand->ui8Type = COMMAND_TYPE_MAP_RANGE; + psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex; + + MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count); + +#if defined(PDUMP) + EmitPDumpMapUnmapRange(COMMAND_TYPE_MAP_RANGE, + ui32AllocIndex, + ui32StartPage, + ui32Count); +#endif +} + +/* InsertUnmapRangeCommand: + * Insert a UNMAP_RANGE command into the circular buffer with the given + * StartPage and Count values. + */ +static void InsertUnmapRangeCommand(IMG_UINT32 ui32AllocIndex, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32Count) +{ + COMMAND_WRAPPER *psCommand; + + psCommand = AcquireCBSlot(); + + psCommand->ui8Type = COMMAND_TYPE_UNMAP_RANGE; + psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex; + + MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count); + +#if defined(PDUMP) + EmitPDumpMapUnmapRange(COMMAND_TYPE_UNMAP_RANGE, + ui32AllocIndex, + ui32StartPage, + ui32Count); +#endif +} + +/* InsertAllocationToList: + * Helper function for the allocation list. + * Inserts the given allocation at the head of the list, whose current head is + * pointed to by pui32ListHead + */ +static void InsertAllocationToList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc) +{ + RECORD_ALLOCATION *psAlloc; + + psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc); + + if (*pui32ListHead == END_OF_LIST) + { + /* list is currently empty, so just replace it */ + *pui32ListHead = ui32Alloc; + psAlloc->ui32Next = psAlloc->ui32Prev = *pui32ListHead; + } + else + { + RECORD_ALLOCATION *psHeadAlloc; + RECORD_ALLOCATION *psTailAlloc; + + psHeadAlloc = ALLOC_INDEX_TO_PTR(*pui32ListHead); + psTailAlloc = ALLOC_INDEX_TO_PTR(psHeadAlloc->ui32Prev); + + /* make the new alloc point forwards to the previous head */ + psAlloc->ui32Next = *pui32ListHead; + /* make the new alloc point backwards to the previous tail */ + psAlloc->ui32Prev = psHeadAlloc->ui32Prev; + + /* the head is now our new alloc */ + *pui32ListHead = ui32Alloc; + + /* the old head now points back to the new head */ + psHeadAlloc->ui32Prev = *pui32ListHead; + + /* the tail now points forward to the new head */ + psTailAlloc->ui32Next = ui32Alloc; + } +} + +static void InsertAllocationToBusyList(IMG_UINT32 ui32Alloc) +{ + InsertAllocationToList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc); +} + +/* RemoveAllocationFromList: + * Helper function for the allocation list. + * Removes the given allocation from the list, whose head is + * pointed to by pui32ListHead + */ +static void RemoveAllocationFromList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc) +{ + RECORD_ALLOCATION *psAlloc; + + psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc); + + /* if this is the only element in the list then just make the list empty */ + if ((*pui32ListHead == ui32Alloc) && (psAlloc->ui32Next == ui32Alloc)) + { + *pui32ListHead = END_OF_LIST; + } + else + { + RECORD_ALLOCATION *psPrev, *psNext; + + psPrev = ALLOC_INDEX_TO_PTR(psAlloc->ui32Prev); + psNext = ALLOC_INDEX_TO_PTR(psAlloc->ui32Next); + + /* remove the allocation from the list */ + psPrev->ui32Next = psAlloc->ui32Next; + psNext->ui32Prev = psAlloc->ui32Prev; + + /* if this allocation is the head then update the head */ + if (*pui32ListHead == ui32Alloc) + { + *pui32ListHead = psAlloc->ui32Prev; + } + } +} + +static void RemoveAllocationFromBusyList(IMG_UINT32 ui32Alloc) +{ + RemoveAllocationFromList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc); +} + +/* TouchBusyAllocation: + * Move the given allocation to the head of the list + */ +static void TouchBusyAllocation(IMG_UINT32 ui32Alloc) +{ + RemoveAllocationFromBusyList(ui32Alloc); + InsertAllocationToBusyList(ui32Alloc); +} + +/* GetOldestBusyAllocation: + * Returns the index of the oldest allocation in the MRU list + */ +static IMG_UINT32 GetOldestBusyAllocation(void) +{ + IMG_UINT32 ui32Alloc; + RECORD_ALLOCATION *psAlloc; + + ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead; + + if (ui32Alloc == END_OF_LIST) + { + return END_OF_LIST; + } + + psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc); + + return psAlloc->ui32Prev; +} + +static IMG_UINT32 GetFreeAllocation(void) +{ + IMG_UINT32 ui32Alloc; + + ui32Alloc = GetOldestBusyAllocation(); + + return ui32Alloc; +} + + +/* InitialiseAllocation: + * Initialise the given allocation structure with the given properties + */ +static void InitialiseAllocation(RECORD_ALLOCATION *psAlloc, + const IMG_CHAR *pszName, + IMG_UINT64 ui64Serial, + IMG_PID uiPID, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 ui32Log2PageSize) +{ + OSStringLCopy(psAlloc->szName, pszName, sizeof(psAlloc->szName)); + psAlloc->ui64Serial = ui64Serial; + psAlloc->uiPID = uiPID; + psAlloc->sDevVAddr = sDevVAddr; + psAlloc->uiSize = uiSize; + psAlloc->ui32Log2PageSize = ui32Log2PageSize; + psAlloc->ui64CreationTime = OSClockns64(); +} + +/* CreateAllocation: + * Creates a new allocation with the given properties then outputs the + * index of the allocation + */ +static PVRSRV_ERROR CreateAllocation(const IMG_CHAR *pszName, + IMG_UINT64 ui64Serial, + IMG_PID uiPID, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 ui32Log2PageSize, + IMG_BOOL bAutoPurge, + IMG_UINT32 *puiAllocationIndex) +{ + IMG_UINT32 ui32Alloc; + RECORD_ALLOCATION *psAlloc; + + ui32Alloc = GetFreeAllocation(); + + psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc); + + InitialiseAllocation(ALLOC_INDEX_TO_PTR(ui32Alloc), + pszName, + ui64Serial, + uiPID, + sDevVAddr, + uiSize, + ui32Log2PageSize); + + /* put the newly initialised allocation at the front of the MRU list */ + TouchBusyAllocation(ui32Alloc); + + *puiAllocationIndex = ui32Alloc; + +#if defined(PDUMP) + EmitPDumpAllocation(ui32Alloc, psAlloc); +#endif + + return PVRSRV_OK; +} + +/* MatchAllocation: + * Tests if the allocation at the given index matches the supplied properties. + * Returns IMG_TRUE if it is a match, otherwise IMG_FALSE. + */ +static IMG_BOOL MatchAllocation(IMG_UINT32 ui32AllocationIndex, + IMG_UINT64 ui64Serial, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszName, + IMG_UINT32 ui32Log2PageSize, + IMG_PID uiPID) +{ + RECORD_ALLOCATION *psAlloc; + + psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocationIndex); + + return (psAlloc->ui64Serial == ui64Serial) && + (psAlloc->sDevVAddr.uiAddr == sDevVAddr.uiAddr) && + (psAlloc->uiSize == uiSize) && + (psAlloc->ui32Log2PageSize == ui32Log2PageSize) && + (OSStringNCompare(psAlloc->szName, pszName, DEVMEM_ANNOTATION_MAX_LEN) == 0); +} + +/* FindOrCreateAllocation: + * Convenience function. + * Given a set of allocation properties (serial, DevVAddr, size, name, etc), + * this function will look for an existing record of this allocation and + * create the allocation if there is no existing record + */ +static PVRSRV_ERROR FindOrCreateAllocation(IMG_UINT32 ui32AllocationIndexHint, + IMG_UINT64 ui64Serial, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const char *pszName, + IMG_UINT32 ui32Log2PageSize, + IMG_PID uiPID, + IMG_BOOL bSparse, + IMG_UINT32 *pui32AllocationIndexOut, + IMG_BOOL *pbCreated) +{ + IMG_UINT32 ui32AllocationIndex; + PVRSRV_ERROR eError; + + if (ui32AllocationIndexHint != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) + { + IMG_BOOL bHaveAllocation; + + /* first, try to match against the index given by the client. + * if the caller provided a hint but the allocation record is no longer + * there, it must have been purged, so go ahead and create a new allocation + */ + bHaveAllocation = MatchAllocation(ui32AllocationIndexHint, + ui64Serial, + sDevVAddr, + uiSize, + pszName, + ui32Log2PageSize, + uiPID); + if (bHaveAllocation) + { + *pbCreated = IMG_FALSE; + *pui32AllocationIndexOut = ui32AllocationIndexHint; + return PVRSRV_OK; + } + } + + /* if there is no record of the allocation then we + * create it now + */ + eError = CreateAllocation(pszName, + ui64Serial, + uiPID, + sDevVAddr, + uiSize, + ui32Log2PageSize, + IMG_TRUE, + &ui32AllocationIndex); + + if (eError == PVRSRV_OK) + { + *pui32AllocationIndexOut = ui32AllocationIndex; + *pbCreated = IMG_TRUE; + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to create record for allocation %s", + __func__, + pszName)); + } + + return eError; +} + +/* GenerateMapUnmapCommandsForSparsePMR: + * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the PMR's + * current mapping table + * + * PMR: The PMR whose mapping table to read. + * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to. + * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping + * + * This function goes through every page in the PMR's mapping table and looks for + * virtually contiguous ranges to record as being mapped or unmapped. + */ +static void GenerateMapUnmapCommandsForSparsePMR(PMR *psPMR, + IMG_UINT32 ui32AllocIndex, + IMG_BOOL bMap) +{ + PMR_MAPPING_TABLE *psMappingTable; + IMG_UINT32 ui32DonePages = 0; + IMG_UINT32 ui32NumPages; + IMG_UINT32 i; + IMG_BOOL bInARun = IMG_FALSE; + IMG_UINT32 ui32CurrentStart = 0; + IMG_UINT32 ui32RunCount = 0; + + psMappingTable = PMR_GetMappingTable(psPMR); + ui32NumPages = psMappingTable->ui32NumPhysChunks; + + if (ui32NumPages == 0) + { + /* nothing to do */ + return; + } + + for (i = 0; i < psMappingTable->ui32NumVirtChunks; i++) + { + if (psMappingTable->aui32Translation[i] != TRANSLATION_INVALID) + { + if (!bInARun) + { + bInARun = IMG_TRUE; + ui32CurrentStart = i; + ui32RunCount = 1; + } + else + { + ui32RunCount++; + } + } + + if (bInARun) + { + /* test if we need to end this current run and generate the command, + * either because the next page is not virtually contiguous + * to the current page, we have reached the maximum range, + * or this is the last page in the mapping table + */ + if ((psMappingTable->aui32Translation[i] == TRANSLATION_INVALID) || + (ui32RunCount == MAP_RANGE_MAX_RANGE) || + (i == (psMappingTable->ui32NumVirtChunks - 1))) + { + if (bMap) + { + InsertMapRangeCommand(ui32AllocIndex, + ui32CurrentStart, + ui32RunCount); + } + else + { + InsertUnmapRangeCommand(ui32AllocIndex, + ui32CurrentStart, + ui32RunCount); + } + + ui32DonePages += ui32RunCount; + + if (ui32DonePages == ui32NumPages) + { + break; + } + + bInARun = IMG_FALSE; + } + } + } + +} + +/* GenerateMapUnmapCommandsForChangeList: + * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the + * list of page change (page map or page unmap) indices given. + * + * ui32NumPages: Number of pages which have changed. + * pui32PageList: List of indices of the pages which have changed. + * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to. + * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping + * + * This function goes through every page in the list and looks for + * virtually contiguous ranges to record as being mapped or unmapped. + */ +static void GenerateMapUnmapCommandsForChangeList(IMG_UINT32 ui32NumPages, + IMG_UINT32 *pui32PageList, + IMG_UINT32 ui32AllocIndex, + IMG_BOOL bMap) +{ + IMG_UINT32 i; + IMG_BOOL bInARun = IMG_FALSE; + IMG_UINT32 ui32CurrentStart = 0; + IMG_UINT32 ui32RunCount = 0; + + for (i = 0; i < ui32NumPages; i++) + { + if (!bInARun) + { + bInARun = IMG_TRUE; + ui32CurrentStart = pui32PageList[i]; + } + + ui32RunCount++; + + /* we flush if: + * - the next page in the list is not one greater than the current page + * - this is the last page in the list + * - we have reached the maximum range size + */ + if ((i == (ui32NumPages - 1)) || + ((pui32PageList[i] + 1) != pui32PageList[i + 1]) || + (ui32RunCount == MAP_RANGE_MAX_RANGE)) + { + if (bMap) + { + InsertMapRangeCommand(ui32AllocIndex, + ui32CurrentStart, + ui32RunCount); + } + else + { + InsertUnmapRangeCommand(ui32AllocIndex, + ui32CurrentStart, + ui32RunCount); + } + + bInARun = IMG_FALSE; + ui32RunCount = 0; + } + } +} + +/* DevicememHistoryMapKM: + * Entry point for when an allocation is mapped into the MMU GPU + * + * psPMR: The PMR to which the allocation belongs. + * ui32Offset: The offset within the PMR at which the allocation begins. + * sDevVAddr: The DevVAddr at which the allocation begins. + * szName: Annotation/name for the allocation. + * ui32Log2PageSize: Page size of the allocation, expressed in log2 form. + * ui32AllocationIndex: Allocation index as provided by the client. + * We will use this as a short-cut to find the allocation + * in our records. + * pui32AllocationIndexOut: An updated allocation index for the client. + * This may be a new value if we just created the + * allocation record. + */ +PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR, + IMG_UINT32 ui32Offset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const char szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *pui32AllocationIndexOut) +{ + IMG_BOOL bSparse = PMR_IsSparse(psPMR); + IMG_UINT64 ui64Serial; + IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); + PVRSRV_ERROR eError; + IMG_BOOL bCreated; + + if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && + !CHECK_ALLOC_INDEX(ui32AllocationIndex)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", + __func__, + ui32AllocationIndex)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PMRGetUID(psPMR, &ui64Serial); + + DevicememHistoryLock(); + + eError = FindOrCreateAllocation(ui32AllocationIndex, + ui64Serial, + sDevVAddr, + uiSize, + szName, + ui32Log2PageSize, + uiPID, + bSparse, + &ui32AllocationIndex, + &bCreated); + + if ((eError == PVRSRV_OK) && !bCreated) + { + /* touch the allocation so it goes to the head of our MRU list */ + TouchBusyAllocation(ui32AllocationIndex); + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", + __func__, + szName, + PVRSRVGETERRORSTRING(eError))); + goto out_unlock; + } + + if (!bSparse) + { + InsertMapAllCommand(ui32AllocationIndex); + } + else + { + GenerateMapUnmapCommandsForSparsePMR(psPMR, + ui32AllocationIndex, + IMG_TRUE); + } + + InsertTimeStampCommand(OSClockns64()); + + *pui32AllocationIndexOut = ui32AllocationIndex; + +out_unlock: + DevicememHistoryUnlock(); + + return eError; +} + +static void VRangeInsertMapUnmapCommands(IMG_BOOL bMap, + IMG_UINT32 ui32AllocationIndex, + IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32NumPages, + const IMG_CHAR *pszName) +{ + while (ui32NumPages > 0) + { + IMG_UINT32 ui32PagesToAdd; + + ui32PagesToAdd = MIN(ui32NumPages, MAP_RANGE_MAX_RANGE); + + if (ui32StartPage > MAP_RANGE_MAX_START) + { + PVR_DPF((PVR_DBG_WARNING, "Cannot record %s range beginning at page " + "%u on allocation %s", + bMap ? "map" : "unmap", + ui32StartPage, + pszName)); + return; + } + + if (bMap) + { + InsertMapRangeCommand(ui32AllocationIndex, + ui32StartPage, + ui32PagesToAdd); + } + else + { + InsertUnmapRangeCommand(ui32AllocationIndex, + ui32StartPage, + ui32PagesToAdd); + } + + ui32StartPage += ui32PagesToAdd; + ui32NumPages -= ui32PagesToAdd; + } +} + +PVRSRV_ERROR DevicememHistoryMapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *pui32AllocationIndexOut) +{ + IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); + PVRSRV_ERROR eError; + IMG_BOOL bCreated; + + if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && + !CHECK_ALLOC_INDEX(ui32AllocationIndex)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", + __func__, + ui32AllocationIndex)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + DevicememHistoryLock(); + + eError = FindOrCreateAllocation(ui32AllocationIndex, + 0, + sBaseDevVAddr, + uiAllocSize, + szName, + ui32Log2PageSize, + uiPID, + IMG_FALSE, + &ui32AllocationIndex, + &bCreated); + + if ((eError == PVRSRV_OK) && !bCreated) + { + /* touch the allocation so it goes to the head of our MRU list */ + TouchBusyAllocation(ui32AllocationIndex); + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", + __func__, + szName, + PVRSRVGETERRORSTRING(eError))); + goto out_unlock; + } + + VRangeInsertMapUnmapCommands(IMG_TRUE, + ui32AllocationIndex, + sBaseDevVAddr, + ui32StartPage, + ui32NumPages, + szName); + + *pui32AllocationIndexOut = ui32AllocationIndex; + +out_unlock: + DevicememHistoryUnlock(); + + return eError; + +} + +PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *pui32AllocationIndexOut) +{ + IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); + PVRSRV_ERROR eError; + IMG_BOOL bCreated; + + if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && + !CHECK_ALLOC_INDEX(ui32AllocationIndex)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", + __func__, + ui32AllocationIndex)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + DevicememHistoryLock(); + + eError = FindOrCreateAllocation(ui32AllocationIndex, + 0, + sBaseDevVAddr, + uiAllocSize, + szName, + ui32Log2PageSize, + uiPID, + IMG_FALSE, + &ui32AllocationIndex, + &bCreated); + + if ((eError == PVRSRV_OK) && !bCreated) + { + /* touch the allocation so it goes to the head of our MRU list */ + TouchBusyAllocation(ui32AllocationIndex); + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", + __func__, + szName, + PVRSRVGETERRORSTRING(eError))); + goto out_unlock; + } + + VRangeInsertMapUnmapCommands(IMG_FALSE, + ui32AllocationIndex, + sBaseDevVAddr, + ui32StartPage, + ui32NumPages, + szName); + + *pui32AllocationIndexOut = ui32AllocationIndex; + +out_unlock: + DevicememHistoryUnlock(); + + return eError; +} + + + +/* DevicememHistoryUnmapKM: + * Entry point for when an allocation is unmapped from the MMU GPU + * + * psPMR: The PMR to which the allocation belongs. + * ui32Offset: The offset within the PMR at which the allocation begins. + * sDevVAddr: The DevVAddr at which the allocation begins. + * szName: Annotation/name for the allocation. + * ui32Log2PageSize: Page size of the allocation, expressed in log2 form. + * ui32AllocationIndex: Allocation index as provided by the client. + * We will use this as a short-cut to find the allocation + * in our records. + * pui32AllocationIndexOut: An updated allocation index for the client. + * This may be a new value if we just created the + * allocation record. + */ +PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR, + IMG_UINT32 ui32Offset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const char szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *pui32AllocationIndexOut) +{ + IMG_BOOL bSparse = PMR_IsSparse(psPMR); + IMG_UINT64 ui64Serial; + IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); + PVRSRV_ERROR eError; + IMG_BOOL bCreated; + + if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && + !CHECK_ALLOC_INDEX(ui32AllocationIndex)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", + __func__, + ui32AllocationIndex)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PMRGetUID(psPMR, &ui64Serial); + + DevicememHistoryLock(); + + eError = FindOrCreateAllocation(ui32AllocationIndex, + ui64Serial, + sDevVAddr, + uiSize, + szName, + ui32Log2PageSize, + uiPID, + bSparse, + &ui32AllocationIndex, + &bCreated); + + if ((eError == PVRSRV_OK) && !bCreated) + { + /* touch the allocation so it goes to the head of our MRU list */ + TouchBusyAllocation(ui32AllocationIndex); + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", + __func__, + szName, + PVRSRVGETERRORSTRING(eError))); + goto out_unlock; + } + + if (!bSparse) + { + InsertUnmapAllCommand(ui32AllocationIndex); + } + else + { + GenerateMapUnmapCommandsForSparsePMR(psPMR, + ui32AllocationIndex, + IMG_FALSE); + } + + InsertTimeStampCommand(OSClockns64()); + + *pui32AllocationIndexOut = ui32AllocationIndex; + +out_unlock: + DevicememHistoryUnlock(); + + return eError; +} + +/* DevicememHistorySparseChangeKM: + * Entry point for when a sparse allocation is changed, such that some of the + * pages within the sparse allocation are mapped or unmapped. + * + * psPMR: The PMR to which the allocation belongs. + * ui32Offset: The offset within the PMR at which the allocation begins. + * sDevVAddr: The DevVAddr at which the allocation begins. + * szName: Annotation/name for the allocation. + * ui32Log2PageSize: Page size of the allocation, expressed in log2 form. + * ui32AllocPageCount: Number of pages which have been mapped. + * paui32AllocPageIndices: Indices of pages which have been mapped. + * ui32FreePageCount: Number of pages which have been unmapped. + * paui32FreePageIndices: Indices of pages which have been unmapped. + * ui32AllocationIndex: Allocation index as provided by the client. + * We will use this as a short-cut to find the allocation + * in our records. + * pui32AllocationIndexOut: An updated allocation index for the client. + * This may be a new value if we just created the + * allocation record. + */ +PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR, + IMG_UINT32 ui32Offset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const char szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *paui32AllocPageIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *paui32FreePageIndices, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *pui32AllocationIndexOut) +{ + IMG_UINT64 ui64Serial; + IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); + PVRSRV_ERROR eError; + IMG_BOOL bCreated; + + if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && + !CHECK_ALLOC_INDEX(ui32AllocationIndex)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", + __func__, + ui32AllocationIndex)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PMRGetUID(psPMR, &ui64Serial); + + DevicememHistoryLock(); + + eError = FindOrCreateAllocation(ui32AllocationIndex, + ui64Serial, + sDevVAddr, + uiSize, + szName, + ui32Log2PageSize, + uiPID, + IMG_TRUE /* bSparse */, + &ui32AllocationIndex, + &bCreated); + + if ((eError == PVRSRV_OK) && !bCreated) + { + /* touch the allocation so it goes to the head of our MRU list */ + TouchBusyAllocation(ui32AllocationIndex); + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", + __func__, + szName, + PVRSRVGETERRORSTRING(eError))); + goto out_unlock; + } + + GenerateMapUnmapCommandsForChangeList(ui32AllocPageCount, + paui32AllocPageIndices, + ui32AllocationIndex, + IMG_TRUE); + + GenerateMapUnmapCommandsForChangeList(ui32FreePageCount, + paui32FreePageIndices, + ui32AllocationIndex, + IMG_FALSE); + + InsertTimeStampCommand(OSClockns64()); + + *pui32AllocationIndexOut = ui32AllocationIndex; + +out_unlock: + DevicememHistoryUnlock(); + + return eError; + +} + +/* CircularBufferIterateStart: + * Initialise local state for iterating over the circular buffer + */ +static void CircularBufferIterateStart(IMG_UINT32 *pui32Head, IMG_UINT32 *pui32Iter) +{ + *pui32Head = gsDevicememHistoryData.sRecords.ui32Head; + + if (*pui32Head != 0) + { + *pui32Iter = *pui32Head - 1; + } + else + { + *pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1; + } +} + +/* CircularBufferIteratePrevious: + * Iterate to the previous item in the circular buffer. + * This is called repeatedly to iterate over the whole circular buffer. + */ +static COMMAND_WRAPPER *CircularBufferIteratePrevious(IMG_UINT32 ui32Head, + IMG_UINT32 *pui32Iter, + COMMAND_TYPE *peType, + IMG_BOOL *pbLast) +{ + IMG_UINT8 *pui8Header; + COMMAND_WRAPPER *psOut = NULL; + + psOut = gsDevicememHistoryData.sRecords.pasCircularBuffer + *pui32Iter; + + pui8Header = (void *) psOut; + + /* Check the command looks valid. + * this condition should never happen, but check for it anyway + * and try to handle it + */ + if (*pui8Header >= COMMAND_TYPE_COUNT) + { + /* invalid header detected. Circular buffer corrupted? */ + PVR_DPF((PVR_DBG_ERROR, "CircularBufferIteratePrevious: " + "Invalid header: %u", + *pui8Header)); + *pbLast = IMG_TRUE; + return NULL; + } + + *peType = *pui8Header; + + if (*pui32Iter != 0) + { + (*pui32Iter)--; + } + else + { + *pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1; + } + + + /* inform the caller this is the last command if either we have reached + * the head (where we started) or if we have reached an empty command, + * which means we have covered all populated entries + */ + if ((*pui32Iter == ui32Head) || (*peType == COMMAND_TYPE_NONE)) + { + /* this is the final iteration */ + *pbLast = IMG_TRUE; + } + + return psOut; +} + +/* MapUnmapCommandGetInfo: + * Helper function to get the address and mapping information from a MAP_ALL, UNMAP_ALL, + * MAP_RANGE or UNMAP_RANGE command + */ +static void MapUnmapCommandGetInfo(COMMAND_WRAPPER *psCommand, + COMMAND_TYPE eType, + IMG_DEV_VIRTADDR *psDevVAddrStart, + IMG_DEV_VIRTADDR *psDevVAddrEnd, + IMG_BOOL *pbMap, + IMG_UINT32 *pui32AllocIndex) +{ + if ((eType == COMMAND_TYPE_MAP_ALL) || ((eType == COMMAND_TYPE_UNMAP_ALL))) + { + COMMAND_MAP_ALL *psMapAll = &psCommand->u.sMapAll; + RECORD_ALLOCATION *psAlloc; + + *pbMap = (eType == COMMAND_TYPE_MAP_ALL); + *pui32AllocIndex = psMapAll->uiAllocIndex; + + psAlloc = ALLOC_INDEX_TO_PTR(psMapAll->uiAllocIndex); + + *psDevVAddrStart = psAlloc->sDevVAddr; + psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr + psAlloc->uiSize - 1; + } + else if ((eType == COMMAND_TYPE_MAP_RANGE) || ((eType == COMMAND_TYPE_UNMAP_RANGE))) + { + COMMAND_MAP_RANGE *psMapRange = &psCommand->u.sMapRange; + RECORD_ALLOCATION *psAlloc; + IMG_UINT32 ui32StartPage, ui32Count; + + *pbMap = (eType == COMMAND_TYPE_MAP_RANGE); + *pui32AllocIndex = psMapRange->uiAllocIndex; + + psAlloc = ALLOC_INDEX_TO_PTR(psMapRange->uiAllocIndex); + + MapRangeUnpack(psMapRange, &ui32StartPage, &ui32Count); + + psDevVAddrStart->uiAddr = psAlloc->sDevVAddr.uiAddr + + ((1ULL << psAlloc->ui32Log2PageSize) * ui32StartPage); + + psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr + + ((1ULL << psAlloc->ui32Log2PageSize) * ui32Count) - 1; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid command type: %u", + __func__, + eType)); + } +} + +/* DevicememHistoryQuery: + * Entry point for rgxdebug to look up addresses relating to a page fault + */ +IMG_BOOL DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn, + DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut, + IMG_UINT32 ui32PageSizeBytes, + IMG_BOOL bMatchAnyAllocInPage) +{ + IMG_UINT32 ui32Head, ui32Iter; + COMMAND_TYPE eType = COMMAND_TYPE_NONE; + COMMAND_WRAPPER *psCommand = NULL; + IMG_BOOL bLast = IMG_FALSE; + IMG_UINT64 ui64StartTime = OSClockns64(); + IMG_UINT64 ui64TimeNs = 0; + + /* initialise the results count for the caller */ + psQueryOut->ui32NumResults = 0; + + DevicememHistoryLock(); + + /* if the search is constrained to a particular PID then we + * first search the list of allocations to see if this + * PID is known to us + */ + if (psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY) + { + IMG_UINT32 ui32Alloc; + ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead; + + while (ui32Alloc != END_OF_LIST) + { + RECORD_ALLOCATION *psAlloc; + + psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc); + + if (psAlloc->uiPID == psQueryIn->uiPID) + { + goto found_pid; + } + + if (ui32Alloc == gsDevicememHistoryData.sRecords.ui32AllocationsListHead) + { + /* gone through whole list */ + break; + } + } + + /* PID not found, so we do not have any suitable data for this + * page fault + */ + goto out_unlock; + } + +found_pid: + + CircularBufferIterateStart(&ui32Head, &ui32Iter); + + while (!bLast) + { + psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType, &bLast); + + if (eType == COMMAND_TYPE_TIMESTAMP) + { + ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp); + continue; + } + + if ((eType == COMMAND_TYPE_MAP_ALL) || + (eType == COMMAND_TYPE_UNMAP_ALL) || + (eType == COMMAND_TYPE_MAP_RANGE) || + (eType == COMMAND_TYPE_UNMAP_RANGE)) + { + RECORD_ALLOCATION *psAlloc; + IMG_DEV_VIRTADDR sAllocStartAddrOrig, sAllocEndAddrOrig; + IMG_DEV_VIRTADDR sAllocStartAddr, sAllocEndAddr; + IMG_BOOL bMap; + IMG_UINT32 ui32AllocIndex; + + MapUnmapCommandGetInfo(psCommand, + eType, + &sAllocStartAddrOrig, + &sAllocEndAddrOrig, + &bMap, + &ui32AllocIndex); + + sAllocStartAddr = sAllocStartAddrOrig; + sAllocEndAddr = sAllocEndAddrOrig; + + psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex); + + /* skip this command if we need to search within + * a particular PID, and this allocation is not from + * that PID + */ + if ((psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY) && + (psAlloc->uiPID != psQueryIn->uiPID)) + { + continue; + } + + /* if the allocation was created after this event, then this + * event must be for an old/removed allocation, so skip it + */ + if (DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs) + { + continue; + } + + /* if the caller wants us to match any allocation in the + * same page as the allocation then tweak the real start/end + * addresses of the allocation here + */ + if (bMatchAnyAllocInPage) + { + sAllocStartAddr.uiAddr = sAllocStartAddr.uiAddr & ~(IMG_UINT64) (ui32PageSizeBytes - 1); + sAllocEndAddr.uiAddr = (sAllocEndAddr.uiAddr + ui32PageSizeBytes - 1) & ~(IMG_UINT64) (ui32PageSizeBytes - 1); + } + + if ((psQueryIn->sDevVAddr.uiAddr >= sAllocStartAddr.uiAddr) && + (psQueryIn->sDevVAddr.uiAddr < sAllocEndAddr.uiAddr)) + { + DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult = &psQueryOut->sResults[psQueryOut->ui32NumResults]; + + OSStringLCopy(psResult->szString, psAlloc->szName, sizeof(psResult->szString)); + psResult->sBaseDevVAddr = psAlloc->sDevVAddr; + psResult->uiSize = psAlloc->uiSize; + psResult->bMap = bMap; + psResult->ui64Age = _CalculateAge(ui64StartTime, ui64TimeNs, TIME_STAMP_MASK); + psResult->ui64When = ui64TimeNs; + /* write the responsible PID in the placeholder */ + psResult->sProcessInfo.uiPID = psAlloc->uiPID; + + if ((eType == COMMAND_TYPE_MAP_ALL) || (eType == COMMAND_TYPE_UNMAP_ALL)) + { + psResult->bRange = IMG_FALSE; + psResult->bAll = IMG_TRUE; + } + else + { + psResult->bRange = IMG_TRUE; + MapRangeUnpack(&psCommand->u.sMapRange, + &psResult->ui32StartPage, + &psResult->ui32PageCount); + psResult->bAll = (psResult->ui32PageCount * (1U << psAlloc->ui32Log2PageSize)) + == psAlloc->uiSize; + psResult->sMapStartAddr = sAllocStartAddrOrig; + psResult->sMapEndAddr = sAllocEndAddrOrig; + } + + psQueryOut->ui32NumResults++; + + if (psQueryOut->ui32NumResults == DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS) + { + break; + } + } + } + } + +out_unlock: + DevicememHistoryUnlock(); + + return psQueryOut->ui32NumResults > 0; +} + +static void DeviceMemHistoryFmt(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN], + IMG_PID uiPID, + const IMG_CHAR *pszName, + const IMG_CHAR *pszAction, + IMG_DEV_VIRTADDR sDevVAddrStart, + IMG_DEV_VIRTADDR sDevVAddrEnd, + IMG_UINT64 ui64TimeNs) +{ + + OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, + /* PID NAME MAP/UNMAP MIN-MAX SIZE AbsUS AgeUS*/ + "%04u %-40s %-10s " + IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC " " + "0x%08" IMG_UINT64_FMTSPECX " " + "%013" IMG_UINT64_FMTSPEC, /* 13 digits is over 2 hours of ns */ + uiPID, + pszName, + pszAction, + sDevVAddrStart.uiAddr, + sDevVAddrEnd.uiAddr, + sDevVAddrEnd.uiAddr - sDevVAddrStart.uiAddr + 1, + ui64TimeNs); +} + +static void DeviceMemHistoryFmtHeader(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]) +{ + OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, + "%-4s %-40s %-6s %10s %10s %8s %13s", + "PID", + "NAME", + "ACTION", + "ADDR MIN", + "ADDR MAX", + "SIZE", + "ABS NS"); +} + +static const char *CommandTypeToString(COMMAND_TYPE eType) +{ + switch (eType) + { + case COMMAND_TYPE_MAP_ALL: + return "MapAll"; + case COMMAND_TYPE_UNMAP_ALL: + return "UnmapAll"; + case COMMAND_TYPE_MAP_RANGE: + return "MapRange"; + case COMMAND_TYPE_UNMAP_RANGE: + return "UnmapRange"; + case COMMAND_TYPE_TIMESTAMP: + return "TimeStamp"; + default: + return "???"; + } +} + +static void DevicememHistoryPrintAll(OSDI_IMPL_ENTRY *psEntry) +{ + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; + IMG_UINT32 ui32Iter; + IMG_UINT32 ui32Head; + IMG_BOOL bLast = IMG_FALSE; + IMG_UINT64 ui64TimeNs = 0; + IMG_UINT64 ui64StartTime = OSClockns64(); + + DeviceMemHistoryFmtHeader(szBuffer); + DIPrintf(psEntry, "%s\n", szBuffer); + + CircularBufferIterateStart(&ui32Head, &ui32Iter); + + while (!bLast) + { + COMMAND_WRAPPER *psCommand; + COMMAND_TYPE eType = COMMAND_TYPE_NONE; + + psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType, + &bLast); + + if (eType == COMMAND_TYPE_TIMESTAMP) + { + ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp); + continue; + } + + + if ((eType == COMMAND_TYPE_MAP_ALL) || + (eType == COMMAND_TYPE_UNMAP_ALL) || + (eType == COMMAND_TYPE_MAP_RANGE) || + (eType == COMMAND_TYPE_UNMAP_RANGE)) + { + RECORD_ALLOCATION *psAlloc; + IMG_DEV_VIRTADDR sDevVAddrStart, sDevVAddrEnd; + IMG_BOOL bMap; + IMG_UINT32 ui32AllocIndex; + + MapUnmapCommandGetInfo(psCommand, + eType, + &sDevVAddrStart, + &sDevVAddrEnd, + &bMap, + &ui32AllocIndex); + + psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex); + + if (DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs) + { + /* if this event relates to an allocation we + * are no longer tracking then do not print it + */ + continue; + } + + DeviceMemHistoryFmt(szBuffer, + psAlloc->uiPID, + psAlloc->szName, + CommandTypeToString(eType), + sDevVAddrStart, + sDevVAddrEnd, + ui64TimeNs); + + DIPrintf(psEntry, "%s\n", szBuffer); + } + } + + DIPrintf(psEntry, "\nTimestamp reference: %013" IMG_UINT64_FMTSPEC "\n", + ui64StartTime); +} + +static int DevicememHistoryPrintAllWrapper(OSDI_IMPL_ENTRY *psEntry, + void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(pvData); + + DevicememHistoryLock(); + DevicememHistoryPrintAll(psEntry); + DevicememHistoryUnlock(); + + return 0; +} + +static PVRSRV_ERROR CreateRecords(void) +{ + gsDevicememHistoryData.sRecords.pasAllocations = + OSAllocMem(sizeof(RECORD_ALLOCATION) * ALLOCATION_LIST_NUM_ENTRIES); + + PVR_RETURN_IF_NOMEM(gsDevicememHistoryData.sRecords.pasAllocations); + + /* Allocated and initialise the circular buffer with zeros so every + * command is initialised as a command of type COMMAND_TYPE_NONE. */ + gsDevicememHistoryData.sRecords.pasCircularBuffer = + OSAllocZMem(sizeof(COMMAND_WRAPPER) * CIRCULAR_BUFFER_NUM_COMMANDS); + + if (gsDevicememHistoryData.sRecords.pasCircularBuffer == NULL) + { + OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + return PVRSRV_OK; +} + +static void DestroyRecords(void) +{ + OSFreeMem(gsDevicememHistoryData.sRecords.pasCircularBuffer); + OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations); +} + +static void InitialiseRecords(void) +{ + IMG_UINT32 i; + + /* initialise the allocations list */ + + gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Prev = ALLOCATION_LIST_NUM_ENTRIES - 1; + gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Next = 1; + + for (i = 1; i < ALLOCATION_LIST_NUM_ENTRIES; i++) + { + gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Prev = i - 1; + gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Next = i + 1; + } + + gsDevicememHistoryData.sRecords.pasAllocations[ALLOCATION_LIST_NUM_ENTRIES - 1].ui32Next = 0; + + gsDevicememHistoryData.sRecords.ui32AllocationsListHead = 0; +} + +PVRSRV_ERROR DevicememHistoryInitKM(void) +{ + PVRSRV_ERROR eError; + DI_ITERATOR_CB sIterator = {.pfnShow = DevicememHistoryPrintAllWrapper}; + + eError = OSLockCreate(&gsDevicememHistoryData.hLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", err_lock); + + eError = CreateRecords(); + PVR_LOG_GOTO_IF_ERROR(eError, "CreateRecords", err_allocations); + + InitialiseRecords(); + + eError = DICreateEntry("devicemem_history", NULL, &sIterator, NULL, + DI_ENTRY_TYPE_GENERIC, + &gsDevicememHistoryData.psDIEntry); + PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", err_di_creation); + + return PVRSRV_OK; + +err_di_creation: + DestroyRecords(); +err_allocations: + OSLockDestroy(gsDevicememHistoryData.hLock); + gsDevicememHistoryData.hLock = NULL; +err_lock: + return eError; +} + +void DevicememHistoryDeInitKM(void) +{ + if (gsDevicememHistoryData.psDIEntry != NULL) + { + DIDestroyEntry(gsDevicememHistoryData.psDIEntry); + } + + DestroyRecords(); + + if (gsDevicememHistoryData.hLock != NULL) + { + OSLockDestroy(gsDevicememHistoryData.hLock); + gsDevicememHistoryData.hLock = NULL; + } +} diff --git a/drivers/gpu/drm/phytium/octopus/devicemem_history_server.h b/drivers/gpu/drm/phytium/octopus/devicemem_history_server.h new file mode 100644 index 000000000000..26d064aaa38c --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/devicemem_history_server.h @@ -0,0 +1,152 @@ +/*************************************************************************/ /*! +@File devicemem_history_server.h +@Title Resource Information abstraction +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Devicemem History functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DEVICEMEM_HISTORY_SERVER_H +#define DEVICEMEM_HISTORY_SERVER_H + +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "rgxmem.h" +#include "devicemem_utils.h" + +PVRSRV_ERROR DevicememHistoryInitKM(void); + +void DevicememHistoryDeInitKM(void); + +PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR, + IMG_UINT32 ui32Offset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const char szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *pui32AllocationIndexOut); + +PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR, + IMG_UINT32 ui32Offset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const char szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *pui32AllocationIndexOut); + +PVRSRV_ERROR DevicememHistoryMapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *ui32AllocationIndexOut); + +PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr, + IMG_UINT32 ui32StartPage, + IMG_UINT32 ui32NumPages, + IMG_DEVMEM_SIZE_T uiAllocSize, + const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32AllocationIndex, + IMG_UINT32 *ui32AllocationIndexOut); + +PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR, + IMG_UINT32 ui32Offset, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + const char szName[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT32 ui32PageSize, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *paui32AllocPageIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pauiFreePageIndices, + IMG_UINT32 AllocationIndex, + IMG_UINT32 *pui32AllocationIndexOut); + +/* used when the PID does not matter */ +#define DEVICEMEM_HISTORY_PID_ANY 0xFFFFFFFE + +typedef struct _DEVICEMEM_HISTORY_QUERY_IN_ +{ + IMG_PID uiPID; + IMG_DEV_VIRTADDR sDevVAddr; +} DEVICEMEM_HISTORY_QUERY_IN; + +/* Store up to 4 results for a lookup. In the case of the faulting page being + * re-mapped between the page fault occurring on HW and the page fault analysis + * being done, the second result entry will show the allocation being unmapped. + * A further 2 entries are added to cater for multiple buffers in the same page. + */ +#define DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS 4 + +typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_RESULT_ +{ + IMG_CHAR szString[DEVMEM_ANNOTATION_MAX_LEN]; + IMG_DEV_VIRTADDR sBaseDevVAddr; + IMG_DEVMEM_SIZE_T uiSize; + IMG_BOOL bMap; + IMG_BOOL bRange; + IMG_BOOL bAll; + IMG_UINT64 ui64When; + IMG_UINT64 ui64Age; + /* info for sparse map/unmap operations (i.e. bRange=IMG_TRUE) */ + IMG_UINT32 ui32StartPage; + IMG_UINT32 ui32PageCount; + IMG_DEV_VIRTADDR sMapStartAddr; + IMG_DEV_VIRTADDR sMapEndAddr; + RGXMEM_PROCESS_INFO sProcessInfo; +} DEVICEMEM_HISTORY_QUERY_OUT_RESULT; + +typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_ +{ + IMG_UINT32 ui32NumResults; + /* result 0 is the newest */ + DEVICEMEM_HISTORY_QUERY_OUT_RESULT sResults[DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS]; +} DEVICEMEM_HISTORY_QUERY_OUT; + +IMG_BOOL +DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn, + DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut, + IMG_UINT32 ui32PageSizeBytes, + IMG_BOOL bMatchAnyAllocInPage); + +#endif diff --git a/drivers/gpu/drm/phytium/octopus/devicemem_pdump.h b/drivers/gpu/drm/phytium/octopus/devicemem_pdump.h new file mode 100644 index 000000000000..84757797fbdb --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/devicemem_pdump.h @@ -0,0 +1,363 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management PDump internal +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Services internal interface to PDump device memory management + functions that are shared between client and server code. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DEVICEMEM_PDUMP_H +#define DEVICEMEM_PDUMP_H + +#include "devicemem.h" +#include "pdumpdefs.h" +#include "pdump.h" + +#if defined(PDUMP) +/* + * DevmemPDumpLoadMem() + * + * takes a memory descriptor, offset, and size, and takes the current contents + * of the memory at that location and writes it to the prm pdump file, and + * emits a pdump LDB to load the data from that file. The intention here is + * that the contents of the simulated buffer upon pdump playback will be made + * to be the same as they are when this command is run, enabling pdump of + * cases where the memory has been modified externally, i.e. by the host cpu + * or by a third party. + */ +void +DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PDUMP_FLAGS_T uiPDumpFlags); + +/* + * DevmemPDumpLoadZeroMem() + * + * As DevmemPDumpLoadMem() but the PDump allocation will be populated with + * zeros from the zero page in the parameter stream + */ +void +DevmemPDumpLoadZeroMem(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PDUMP_FLAGS_T uiPDumpFlags); + +/* + * DevmemPDumpLoadMemValue32() + * + * As above but dumps the value at a dword-aligned address in plain text to + * the pdump script2 file. Useful for patching a buffer at pdump playback by + * simply editing the script output file. + * + * (The same functionality can be achieved by the above function but the + * binary PARAM file must be patched in that case.) + */ +IMG_INTERNAL void +DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + PDUMP_FLAGS_T uiPDumpFlags); + +/* + * DevmemPDumpMemValue64() + * + * As above but dumps the 64bit-value at a dword-aligned address in plain text + * to the pdump script2 file. Useful for patching a buffer at pdump playback by + * simply editing the script output file. + * + * (The same functionality can be achieved by the above function but the + * binary PARAM file must be patched in that case.) + */ +IMG_INTERNAL void +DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT64 ui64Value, + PDUMP_FLAGS_T uiPDumpFlags); + +/* + * DevmemPDumpPageCatBaseToSAddr() + * + * Returns the symbolic address of a piece of memory represented by an offset + * into the mem descriptor. + */ +PVRSRV_ERROR +DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T *puiMemOffset, + IMG_CHAR *pszName, + IMG_UINT32 ui32Size); + +/* + * DevmemPDumpSaveToFile() + * + * Emits a pdump SAB to cause the current contents of the memory to be written + * to the given file during playback + */ +void +DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 uiFileOffset); + +/* + * DevmemPDumpSaveToFileVirtual() + * + * Emits a pdump SAB, just like DevmemPDumpSaveToFile(), but uses the virtual + * address and device MMU context to cause the pdump player to traverse the + * MMU page tables itself. + */ +void +DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PdumpFlags); + +/* + * DevmemPDumpDataDescriptor() + * + * Emits a pdump CMD:OutputData, using the virtual address and device MMU + * context. Provides more flexibility than a pdump SAB because metadata can + * be passed to an external pdump player library via the command header. + */ +void +DevmemPDumpDataDescriptor(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32HeaderType, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_UINT32 ui32PdumpFlags); + + +/* + * + * DevmemPDumpDevmemPol32() + * + * Writes a PDump 'POL' command to wait for a masked 32-bit memory location to + * become the specified value. + */ +PVRSRV_ERROR +DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T ui32PDumpFlags); + +#if defined(__KERNEL__) +/* + * + * DevmemPDumpDevmemCheck32() + * + * Writes a PDump 'POL' command to run a single-shot check for a masked + * 32-bit memory location to match the specified value. + */ +PVRSRV_ERROR +DevmemPDumpDevmemCheck32(const DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T ui32PDumpFlags); +#endif + +/* + * DevmemPDumpCBP() + * + * Polls for space in circular buffer. Reads the read offset from memory and + * waits until there is enough space to write the packet. + * + * psMemDesc - MemDesc which contains the read offset + * uiReadOffset - Offset into MemDesc to the read offset + * uiWriteOffset - Current write offset + * uiPacketSize - Size of packet to write + * uiBufferSize - Size of circular buffer + */ +PVRSRV_ERROR +DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiReadOffset, + IMG_DEVMEM_OFFSET_T uiWriteOffset, + IMG_DEVMEM_SIZE_T uiPacketSize, + IMG_DEVMEM_SIZE_T uiBufferSize); + +#else /* PDUMP */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemPDumpLoadMem) +#endif +static INLINE void +DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psMemDesc); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemPDumpLoadMemValue32) +#endif +static INLINE void +DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psMemDesc); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemPDumpLoadMemValue64) +#endif +static INLINE void +DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT64 ui64Value, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psMemDesc); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(ui64Value); + PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemPDumpPageCatBaseToSAddr) +#endif +static INLINE PVRSRV_ERROR +DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T *puiMemOffset, + IMG_CHAR *pszName, + IMG_UINT32 ui32Size) +{ + PVR_UNREFERENCED_PARAMETER(psMemDesc); + PVR_UNREFERENCED_PARAMETER(puiMemOffset); + PVR_UNREFERENCED_PARAMETER(pszName); + PVR_UNREFERENCED_PARAMETER(ui32Size); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemPDumpSaveToFile) +#endif +static INLINE void +DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 uiFileOffset) +{ + PVR_UNREFERENCED_PARAMETER(psMemDesc); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(pszFilename); + PVR_UNREFERENCED_PARAMETER(uiFileOffset); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemPDumpSaveToFileVirtual) +#endif +static INLINE void +DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PdumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psMemDesc); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(pszFilename); + PVR_UNREFERENCED_PARAMETER(ui32FileOffset); + PVR_UNREFERENCED_PARAMETER(ui32PdumpFlags); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemPDumpDevmemPol32) +#endif +static INLINE PVRSRV_ERROR +DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psMemDesc); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + PVR_UNREFERENCED_PARAMETER(eOperator); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemPDumpCBP) +#endif +static INLINE PVRSRV_ERROR +DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiReadOffset, + IMG_DEVMEM_OFFSET_T uiWriteOffset, + IMG_DEVMEM_SIZE_T uiPacketSize, + IMG_DEVMEM_SIZE_T uiBufferSize) +{ + PVR_UNREFERENCED_PARAMETER(psMemDesc); + PVR_UNREFERENCED_PARAMETER(uiReadOffset); + PVR_UNREFERENCED_PARAMETER(uiWriteOffset); + PVR_UNREFERENCED_PARAMETER(uiPacketSize); + PVR_UNREFERENCED_PARAMETER(uiBufferSize); + + return PVRSRV_OK; +} +#endif /* PDUMP */ +#endif /* DEVICEMEM_PDUMP_H */ diff --git a/drivers/gpu/drm/phytium/octopus/devicemem_server.c b/drivers/gpu/drm/phytium/octopus/devicemem_server.c new file mode 100644 index 000000000000..a0d052e11142 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/devicemem_server.c @@ -0,0 +1,1796 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Server-side component of the Device Memory Management. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +/* our exported API */ +#include "devicemem_server.h" +#include "devicemem_utils.h" +#include "devicemem.h" + +#include "device.h" /* For device node */ +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" + +#include "mmu_common.h" +#include "pdump_km.h" +#include "pmr.h" +#include "physmem.h" +#include "pdumpdesc.h" + +#include "allocmem.h" +#include "osfunc.h" +#include "lock.h" + +#define DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE (1 << 0) + +struct _DEVMEMINT_CTX_ +{ + PVRSRV_DEVICE_NODE *psDevNode; + + /* MMU common code needs to have a context. There's a one-to-one + correspondence between device memory context and MMU context, + but we have the abstraction here so that we don't need to care + what the MMU does with its context, and the MMU code need not + know about us at all. */ + MMU_CONTEXT *psMMUContext; + + ATOMIC_T hRefCount; + + /* This handle is for devices that require notification when a new + memory context is created and they need to store private data that + is associated with the context. */ + IMG_HANDLE hPrivData; + + /* Protects access to sProcessNotifyListHead */ + POSWR_LOCK hListLock; + + /* The following tracks UM applications that need to be notified of a + * page fault */ + DLLIST_NODE sProcessNotifyListHead; + /* The following is a node for the list of registered devmem contexts */ + DLLIST_NODE sPageFaultNotifyListElem; + + /* Device virtual address of a page fault on this context */ + IMG_DEV_VIRTADDR sFaultAddress; + + /* General purpose flags */ + IMG_UINT32 ui32Flags; +}; + +struct _DEVMEMINT_CTX_EXPORT_ +{ + DEVMEMINT_CTX *psDevmemCtx; + PMR *psPMR; + ATOMIC_T hRefCount; + DLLIST_NODE sNode; +}; + +struct _DEVMEMINT_HEAP_ +{ + struct _DEVMEMINT_CTX_ *psDevmemCtx; + IMG_UINT32 uiLog2PageSize; + ATOMIC_T hRefCount; +}; + +struct _DEVMEMINT_RESERVATION_ +{ + struct _DEVMEMINT_HEAP_ *psDevmemHeap; + IMG_DEV_VIRTADDR sBase; + IMG_DEVMEM_SIZE_T uiLength; +}; + +struct _DEVMEMINT_MAPPING_ +{ + struct _DEVMEMINT_RESERVATION_ *psReservation; + PMR *psPMR; + IMG_UINT32 uiNumPages; +}; + +struct _DEVMEMINT_PF_NOTIFY_ +{ + IMG_UINT32 ui32PID; + DLLIST_NODE sProcessNotifyListElem; +}; + +/*************************************************************************/ /*! +@Function DevmemIntCtxAcquire +@Description Acquire a reference to the provided device memory context. +@Return None +*/ /**************************************************************************/ +static INLINE void DevmemIntCtxAcquire(DEVMEMINT_CTX *psDevmemCtx) +{ + OSAtomicIncrement(&psDevmemCtx->hRefCount); +} + +/*************************************************************************/ /*! +@Function DevmemIntCtxRelease +@Description Release the reference to the provided device memory context. + If this is the last reference which was taken then the + memory context will be freed. +@Return None +*/ /**************************************************************************/ +static INLINE void DevmemIntCtxRelease(DEVMEMINT_CTX *psDevmemCtx) +{ + if (OSAtomicDecrement(&psDevmemCtx->hRefCount) == 0) + { + /* The last reference has gone, destroy the context */ + PVRSRV_DEVICE_NODE *psDevNode = psDevmemCtx->psDevNode; + DLLIST_NODE *psNode, *psNodeNext; + + /* If there are any PIDs registered for page fault notification. + * Loop through the registered PIDs and free each one */ + dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext) + { + DEVMEMINT_PF_NOTIFY *psNotifyNode = + IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem); + dllist_remove_node(psNode); + OSFreeMem(psNotifyNode); + } + + /* If this context is in the list registered for a debugger, remove + * from that list */ + if (dllist_node_is_in_list(&psDevmemCtx->sPageFaultNotifyListElem)) + { + dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem); + } + + if (psDevNode->pfnUnregisterMemoryContext) + { + psDevNode->pfnUnregisterMemoryContext(psDevmemCtx->hPrivData); + } + MMU_ContextDestroy(psDevmemCtx->psMMUContext); + + OSWRLockDestroy(psDevmemCtx->hListLock); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed memory context %p", + __func__, psDevmemCtx)); + OSFreeMem(psDevmemCtx); + } +} + +/*************************************************************************/ /*! +@Function DevmemIntHeapAcquire +@Description Acquire a reference to the provided device memory heap. +@Return None +*/ /**************************************************************************/ +static INLINE void DevmemIntHeapAcquire(DEVMEMINT_HEAP *psDevmemHeap) +{ + OSAtomicIncrement(&psDevmemHeap->hRefCount); +} + +/*************************************************************************/ /*! +@Function DevmemIntHeapRelease +@Description Release the reference to the provided device memory heap. + If this is the last reference which was taken then the + memory context will be freed. +@Return None +*/ /**************************************************************************/ +static INLINE void DevmemIntHeapRelease(DEVMEMINT_HEAP *psDevmemHeap) +{ + OSAtomicDecrement(&psDevmemHeap->hRefCount); +} + +PVRSRV_ERROR +DevmemIntUnpin(PMR *psPMR) +{ + PVRSRV_ERROR eError; + + /* Unpin */ + eError = PMRUnpinPMR(psPMR, IMG_FALSE); + + return eError; +} + +PVRSRV_ERROR +DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR) +{ + PVRSRV_ERROR eError; + + eError = PMRUnpinPMR(psPMR, IMG_TRUE); + PVR_GOTO_IF_ERROR(eError, e_exit); + + /* Invalidate mapping */ + eError = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, + psDevmemMapping->psReservation->sBase, + psDevmemMapping->uiNumPages, + psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize, + IMG_FALSE, /* !< Choose to invalidate PT entries */ + psPMR); + +e_exit: + return eError; +} + +PVRSRV_ERROR +DevmemIntPin(PMR *psPMR) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Start the pinning */ + eError = PMRPinPMR(psPMR); + + return eError; +} + +PVRSRV_ERROR +DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR) +{ + PVRSRV_ERROR eError; + PVRSRV_ERROR eErrorMMU = PVRSRV_OK; + IMG_UINT32 uiLog2PageSize = psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize; + + /* Start the pinning */ + eError = PMRPinPMR(psPMR); + + if (eError == PVRSRV_OK) + { + /* Make mapping valid again */ + eErrorMMU = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, + psDevmemMapping->psReservation->sBase, + psDevmemMapping->uiNumPages, + uiLog2PageSize, + IMG_TRUE, /* !< Choose to make PT entries valid again */ + psPMR); + } + else if (eError == PVRSRV_ERROR_PMR_NEW_MEMORY) + { + /* If we lost the physical backing we have to map it again because + * the old physical addresses are not valid anymore. */ + PMR_FLAGS_T uiFlags; + uiFlags = PMR_Flags(psPMR); + + eErrorMMU = MMU_MapPages(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, + uiFlags, + psDevmemMapping->psReservation->sBase, + psPMR, + 0, + psDevmemMapping->uiNumPages, + NULL, + uiLog2PageSize); + } + + /* Just overwrite eError if the mappings failed. + * PMR_NEW_MEMORY has to be propagated to the user. */ + if (eErrorMMU != PVRSRV_OK) + { + eError = eErrorMMU; + } + + return eError; +} + +/*************************************************************************/ /*! +@Function DevmemServerGetImportHandle +@Description For given exportable memory descriptor returns PMR handle. +@Return Memory is exportable - Success + PVRSRV_ERROR failure code +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc, + IMG_HANDLE *phImport) +{ + PVRSRV_ERROR eError; + + if ((GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_EXPORTABLE) == 0) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION, e0); + } + + /* A new handle means a new import tracking the PMR. + * Hence the source PMR memory layout should be marked fixed + * to make sure the importer view of the memory is the same as + * the exporter throughout its lifetime */ + PMR_SetLayoutFixed((PMR *)psMemDesc->psImport->hPMR, IMG_TRUE); + + *phImport = psMemDesc->psImport->hPMR; + return PVRSRV_OK; + +e0: + return eError; +} + +/*************************************************************************/ /*! +@Function DevmemServerGetHeapHandle +@Description For given reservation returns the Heap handle. +@Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation, + IMG_HANDLE *phHeap) +{ + if (psReservation == NULL || phHeap == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *phHeap = psReservation->psDevmemHeap; + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function DevmemServerGetContext +@Description For given heap returns the context. +@Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemServerGetContext(DEVMEMINT_HEAP *psDevmemHeap, + DEVMEMINT_CTX **ppsDevmemCtxPtr) +{ + if (psDevmemHeap == NULL || ppsDevmemCtxPtr == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *ppsDevmemCtxPtr = psDevmemHeap->psDevmemCtx; + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function DevmemServerGetPrivData +@Description For given context returns the private data handle. +@Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemServerGetPrivData(DEVMEMINT_CTX *psDevmemCtx, + IMG_HANDLE *phPrivData) +{ + if (psDevmemCtx == NULL || phPrivData == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *phPrivData = psDevmemCtx->hPrivData; + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function DevmemIntCtxCreate +@Description Creates and initialises a device memory context. +@Return valid Device Memory context handle - Success + PVRSRV_ERROR failure code +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemIntCtxCreate(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bKernelMemoryCtx, + DEVMEMINT_CTX **ppsDevmemCtxPtr, + IMG_HANDLE *hPrivData, + IMG_UINT32 *pui32CPUCacheLineSize) +{ + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtx; + IMG_HANDLE hPrivDataInt = NULL; + MMU_DEVICEATTRIBS *psMMUDevAttrs = psDeviceNode->pfnGetMMUDeviceAttributes(psDeviceNode, + bKernelMemoryCtx); + + PVR_DPF((PVR_DBG_MESSAGE, "%s", __func__)); + + /* + * Ensure that we are safe to perform unaligned accesses on memory + * we mark write-combine, as the compiler might generate + * instructions operating on this memory which require this + * assumption to be true. + */ + PVR_ASSERT(OSIsWriteCombineUnalignedSafe()); + + /* allocate a Devmem context */ + psDevmemCtx = OSAllocMem(sizeof(*psDevmemCtx)); + PVR_LOG_GOTO_IF_NOMEM(psDevmemCtx, eError, fail_alloc); + + OSAtomicWrite(&psDevmemCtx->hRefCount, 1); + psDevmemCtx->psDevNode = psDeviceNode; + + /* Call down to MMU context creation */ + + eError = MMU_ContextCreate(psConnection, + psDeviceNode, + &psDevmemCtx->psMMUContext, + psMMUDevAttrs); + PVR_LOG_GOTO_IF_ERROR(eError, "MMU_ContextCreate", fail_mmucontext); + + if (psDeviceNode->pfnRegisterMemoryContext) + { + eError = psDeviceNode->pfnRegisterMemoryContext(psDeviceNode, psDevmemCtx->psMMUContext, &hPrivDataInt); + PVR_LOG_GOTO_IF_ERROR(eError, "pfnRegisterMemoryContext", fail_register); + } + + /* Store the private data as it is required to unregister the memory context */ + psDevmemCtx->hPrivData = hPrivDataInt; + *hPrivData = hPrivDataInt; + *ppsDevmemCtxPtr = psDevmemCtx; + + /* Pass the CPU cache line size through the bridge to the user mode as it can't be queried in user mode.*/ + *pui32CPUCacheLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); + + /* Initialise the PID notify list */ + OSWRLockCreate(&psDevmemCtx->hListLock); + dllist_init(&(psDevmemCtx->sProcessNotifyListHead)); + psDevmemCtx->sPageFaultNotifyListElem.psNextNode = NULL; + psDevmemCtx->sPageFaultNotifyListElem.psPrevNode = NULL; + + /* Initialise page fault address */ + psDevmemCtx->sFaultAddress.uiAddr = 0ULL; + + /* Initialise flags */ + psDevmemCtx->ui32Flags = 0; + + return PVRSRV_OK; + +fail_register: + MMU_ContextDestroy(psDevmemCtx->psMMUContext); +fail_mmucontext: + OSFreeMem(psDevmemCtx); +fail_alloc: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/*************************************************************************/ /*! +@Function DevmemIntHeapCreate +@Description Creates and initialises a device memory heap. +@Return valid Device Memory heap handle - Success + PVRSRV_ERROR failure code +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx, + IMG_DEV_VIRTADDR sHeapBaseAddr, + IMG_DEVMEM_SIZE_T uiHeapLength, + IMG_UINT32 uiLog2DataPageSize, + DEVMEMINT_HEAP **ppsDevmemHeapPtr) +{ + DEVMEMINT_HEAP *psDevmemHeap; + + PVR_DPF((PVR_DBG_MESSAGE, "%s", __func__)); + + /* allocate a Devmem context */ + psDevmemHeap = OSAllocMem(sizeof(*psDevmemHeap)); + PVR_LOG_RETURN_IF_NOMEM(psDevmemHeap, "psDevmemHeap"); + + psDevmemHeap->psDevmemCtx = psDevmemCtx; + + DevmemIntCtxAcquire(psDevmemHeap->psDevmemCtx); + + OSAtomicWrite(&psDevmemHeap->hRefCount, 1); + + psDevmemHeap->uiLog2PageSize = uiLog2DataPageSize; + + *ppsDevmemHeapPtr = psDevmemHeap; + + return PVRSRV_OK; +} + +PVRSRV_ERROR DevmemIntAllocDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_DEF_PAGE *psDefPage, + IMG_INT uiInitValue, + IMG_CHAR *pcDefPageName, + IMG_BOOL bInitPage) +{ + IMG_UINT32 ui32RefCnt; + PVRSRV_ERROR eError = PVRSRV_OK; + + OSLockAcquire(psDefPage->psPgLock); + + /* We know there will not be 4G number of sparse PMR's */ + ui32RefCnt = OSAtomicIncrement(&psDefPage->atRefCounter); + + if (1 == ui32RefCnt) + { + IMG_DEV_PHYADDR sDevPhysAddr = {0}; + +#if defined(PDUMP) + PDUMPCOMMENT("Alloc %s page object", pcDefPageName); +#endif + + /* Allocate the dummy page required for sparse backing */ + eError = DevPhysMemAlloc(psDevNode, + (1 << psDefPage->ui32Log2PgSize), + 0, + uiInitValue, + bInitPage, +#if defined(PDUMP) + psDevNode->psMMUDevAttrs->pszMMUPxPDumpMemSpaceName, + pcDefPageName, + &psDefPage->hPdumpPg, +#endif + &psDefPage->sPageHandle, + &sDevPhysAddr); + if (PVRSRV_OK != eError) + { + OSAtomicDecrement(&psDefPage->atRefCounter); + } + else + { + psDefPage->ui64PgPhysAddr = sDevPhysAddr.uiAddr; + } + } + + OSLockRelease(psDefPage->psPgLock); + + return eError; +} + +void DevmemIntFreeDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_DEF_PAGE *psDefPage, + IMG_CHAR *pcDefPageName) +{ + IMG_UINT32 ui32RefCnt; + + ui32RefCnt = OSAtomicRead(&psDefPage->atRefCounter); + + /* For the cases where the dummy page allocation fails due to lack of memory + * The refcount can still be 0 even for a sparse allocation */ + if (0 != ui32RefCnt) + { + OSLockAcquire(psDefPage->psPgLock); + + /* We know there will not be 4G number of sparse PMR's */ + ui32RefCnt = OSAtomicDecrement(&psDefPage->atRefCounter); + + if (0 == ui32RefCnt) + { + PDUMPCOMMENT("Free %s page object", pcDefPageName); + + /* Free the dummy page when refcount reaches zero */ + DevPhysMemFree(psDevNode, +#if defined(PDUMP) + psDefPage->hPdumpPg, +#endif + &psDefPage->sPageHandle); + +#if defined(PDUMP) + psDefPage->hPdumpPg = NULL; +#endif + psDefPage->ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; + } + + OSLockRelease(psDefPage->psPgLock); + } + +} + +PVRSRV_ERROR +DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation, + PMR *psPMR, + IMG_UINT32 ui32PageCount, + IMG_UINT32 ui32PhysicalPgOffset, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEV_VIRTADDR sDevVAddrBase) +{ + PVRSRV_ERROR eError; + + if (psReservation->psDevmemHeap->uiLog2PageSize > PMR_GetLog2Contiguity(psPMR)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Device heap and PMR have incompatible Log2Contiguity (%u - %u). " + "PMR contiguity must be a multiple of the heap contiguity!", + __func__, + psReservation->psDevmemHeap->uiLog2PageSize, + PMR_GetLog2Contiguity(psPMR))); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + + eError = MMU_MapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, + uiFlags, + sDevVAddrBase, + psPMR, + ui32PhysicalPgOffset, + ui32PageCount, + NULL, + psReservation->psDevmemHeap->uiLog2PageSize); + +e0: + return eError; +} + +PVRSRV_ERROR +DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32PageCount) +{ + /* Unmap the pages and mark them invalid in the MMU PTE */ + MMU_UnmapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, + 0, + sDevVAddrBase, + ui32PageCount, + NULL, + psReservation->psDevmemHeap->uiLog2PageSize, + 0); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap, + DEVMEMINT_RESERVATION *psReservation, + PMR *psPMR, + PVRSRV_MEMALLOCFLAGS_T uiMapFlags, + DEVMEMINT_MAPPING **ppsMappingPtr) +{ + PVRSRV_ERROR eError; + DEVMEMINT_MAPPING *psMapping; + /* number of pages (device pages) that allocation spans */ + IMG_UINT32 ui32NumDevPages; + /* device virtual address of start of allocation */ + IMG_DEV_VIRTADDR sAllocationDevVAddr; + /* and its length */ + IMG_DEVMEM_SIZE_T uiAllocationSize; + IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize; + IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE; + PVRSRV_DEVICE_NODE *psDevNode; + PMR_FLAGS_T uiPMRFlags; + PVRSRV_DEF_PAGE *psDefPage; + IMG_CHAR *pszPageName; + + if (uiLog2HeapContiguity > PMR_GetLog2Contiguity(psPMR)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Device heap and PMR have incompatible contiguity (%u - %u). " + "Heap contiguity must be a multiple of the heap contiguity!", + __func__, + uiLog2HeapContiguity, + PMR_GetLog2Contiguity(psPMR) )); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + psDevNode = psDevmemHeap->psDevmemCtx->psDevNode; + + /* allocate memory to record the mapping info */ + psMapping = OSAllocMem(sizeof(*psMapping)); + PVR_LOG_GOTO_IF_NOMEM(psMapping, eError, e0); + + uiAllocationSize = psReservation->uiLength; + + ui32NumDevPages = 0xffffffffU & ( ( (uiAllocationSize - 1) >> uiLog2HeapContiguity) + 1); + PVR_ASSERT((IMG_DEVMEM_SIZE_T) ui32NumDevPages << uiLog2HeapContiguity == uiAllocationSize); + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_GOTO_IF_ERROR(eError, e2); + + sAllocationDevVAddr = psReservation->sBase; + + /*Check if the PMR that needs to be mapped is sparse */ + bIsSparse = PMR_IsSparse(psPMR); + if (bIsSparse) + { + /*Get the flags*/ + uiPMRFlags = PMR_Flags(psPMR); + bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags); + + if (bNeedBacking) + { + IMG_INT uiInitValue; + + if (PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiPMRFlags)) + { + psDefPage = &psDevmemHeap->psDevmemCtx->psDevNode->sDevZeroPage; + uiInitValue = PVR_ZERO_PAGE_INIT_VALUE; + pszPageName = DEV_ZERO_PAGE; + } + else + { + psDefPage = &psDevmemHeap->psDevmemCtx->psDevNode->sDummyPage; + uiInitValue = PVR_DUMMY_PAGE_INIT_VALUE; + pszPageName = DUMMY_PAGE; + } + + /* Error is logged with in the function if any failures. + * As the allocation fails we need to fail the map request and + * return appropriate error + * + * Allocation of dummy/zero page is done after locking the pages for PMR physically + * By implementing this way, the best case path of dummy/zero page being most likely to be + * allocated after physically locking down pages, is considered. + * If the dummy/zero page allocation fails, we do unlock the physical address and the impact + * is a bit more in on demand mode of operation */ + eError = DevmemIntAllocDefBackingPage(psDevNode, + psDefPage, + uiInitValue, + pszPageName, + IMG_TRUE); + PVR_GOTO_IF_ERROR(eError, e3); + } + + /* N.B. We pass mapping permission flags to MMU_MapPages and let + * it reject the mapping if the permissions on the PMR are not compatible. */ + eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext, + uiMapFlags, + sAllocationDevVAddr, + psPMR, + 0, + ui32NumDevPages, + NULL, + uiLog2HeapContiguity); + PVR_GOTO_IF_ERROR(eError, e4); + } + else + { + eError = MMU_MapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext, + sAllocationDevVAddr, + psPMR, + (IMG_DEVMEM_SIZE_T) ui32NumDevPages << uiLog2HeapContiguity, + uiMapFlags, + uiLog2HeapContiguity); + PVR_GOTO_IF_ERROR(eError, e3); + } + + psMapping->psReservation = psReservation; + psMapping->uiNumPages = ui32NumDevPages; + psMapping->psPMR = psPMR; + + /* Don't bother with refcount on reservation, as a reservation + only ever holds one mapping, so we directly increment the + refcount on the heap instead */ + DevmemIntHeapAcquire(psMapping->psReservation->psDevmemHeap); + + *ppsMappingPtr = psMapping; + + return PVRSRV_OK; +e4: + if (bNeedBacking) + { + /*if the mapping failed, the allocated dummy ref count need + * to be handled accordingly */ + DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode, + psDefPage, + pszPageName); + } +e3: + { + PVRSRV_ERROR eError1=PVRSRV_OK; + eError1 = PMRUnlockSysPhysAddresses(psPMR); + PVR_LOG_IF_ERROR(eError1, "PMRUnlockSysPhysAddresses"); + + *ppsMappingPtr = NULL; + } +e2: + OSFreeMem(psMapping); + +e0: + PVR_ASSERT (eError != PVRSRV_OK); + return eError; +} + + +PVRSRV_ERROR +DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping) +{ + PVRSRV_ERROR eError; + DEVMEMINT_HEAP *psDevmemHeap = psMapping->psReservation->psDevmemHeap; + /* device virtual address of start of allocation */ + IMG_DEV_VIRTADDR sAllocationDevVAddr; + /* number of pages (device pages) that allocation spans */ + IMG_UINT32 ui32NumDevPages; + IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE; + PMR_FLAGS_T uiPMRFlags; + + ui32NumDevPages = psMapping->uiNumPages; + sAllocationDevVAddr = psMapping->psReservation->sBase; + + /*Check if the PMR that needs to be mapped is sparse */ + bIsSparse = PMR_IsSparse(psMapping->psPMR); + + if (bIsSparse) + { + /*Get the flags*/ + uiPMRFlags = PMR_Flags(psMapping->psPMR); + bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags); + + if (bNeedBacking) + { + if (PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiPMRFlags)) + { + DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode, + &psDevmemHeap->psDevmemCtx->psDevNode->sDevZeroPage, + DEV_ZERO_PAGE); + } + else + { + DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode, + &psDevmemHeap->psDevmemCtx->psDevNode->sDummyPage, + DUMMY_PAGE); + } + } + + MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext, + 0, + sAllocationDevVAddr, + ui32NumDevPages, + NULL, + psMapping->psReservation->psDevmemHeap->uiLog2PageSize, + 0); + } + else + { + MMU_UnmapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext, + sAllocationDevVAddr, + ui32NumDevPages, + psMapping->psReservation->psDevmemHeap->uiLog2PageSize); + } + + + + eError = PMRUnlockSysPhysAddresses(psMapping->psPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Don't bother with refcount on reservation, as a reservation + only ever holds one mapping, so we directly decrement the + refcount on the heap instead */ + DevmemIntHeapRelease(psDevmemHeap); + + OSFreeMem(psMapping); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap, + IMG_DEV_VIRTADDR sAllocationDevVAddr, + IMG_DEVMEM_SIZE_T uiAllocationSize, + DEVMEMINT_RESERVATION **ppsReservationPtr) +{ + PVRSRV_ERROR eError; + DEVMEMINT_RESERVATION *psReservation; + + /* allocate memory to record the reservation info */ + psReservation = OSAllocMem(sizeof(*psReservation)); + PVR_LOG_GOTO_IF_NOMEM(psReservation, eError, e0); + + psReservation->sBase = sAllocationDevVAddr; + psReservation->uiLength = uiAllocationSize; + + eError = MMU_Alloc(psDevmemHeap->psDevmemCtx->psMMUContext, + uiAllocationSize, + &uiAllocationSize, + 0, /* IMG_UINT32 uiProtFlags */ + 0, /* alignment is n/a since we supply devvaddr */ + &sAllocationDevVAddr, + psDevmemHeap->uiLog2PageSize); + PVR_GOTO_IF_ERROR(eError, e1); + + /* since we supplied the virt addr, MMU_Alloc shouldn't have + chosen a new one for us */ + PVR_ASSERT(sAllocationDevVAddr.uiAddr == psReservation->sBase.uiAddr); + + DevmemIntHeapAcquire(psDevmemHeap); + + psReservation->psDevmemHeap = psDevmemHeap; + *ppsReservationPtr = psReservation; + + return PVRSRV_OK; + + /* + * error exit paths follow + */ + +e1: + OSFreeMem(psReservation); + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psReservation) +{ + IMG_DEV_VIRTADDR sBase = psReservation->sBase; + IMG_UINT32 uiLength = psReservation->uiLength; + IMG_UINT32 uiLog2DataPageSize = psReservation->psDevmemHeap->uiLog2PageSize; + + MMU_Free(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, + sBase, + uiLength, + uiLog2DataPageSize); + + DevmemIntHeapRelease(psReservation->psDevmemHeap); + OSFreeMem(psReservation); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap) +{ + if (OSAtomicRead(&psDevmemHeap->hRefCount) != 1) + { + PVR_DPF((PVR_DBG_ERROR, "BUG! %s called but has too many references (%d) " + "which probably means allocations have been made from the heap and not freed", + __func__, + OSAtomicRead(&psDevmemHeap->hRefCount))); + + /* + * Try again later when you've freed all the memory + * + * Note: + * We don't expect the application to retry (after all this call would + * succeed if the client had freed all the memory which it should have + * done before calling this function). However, given there should be + * an associated handle, when the handle base is destroyed it will free + * any allocations leaked by the client and then it will retry this call, + * which should then succeed. + */ + return PVRSRV_ERROR_RETRY; + } + + PVR_ASSERT(OSAtomicRead(&psDevmemHeap->hRefCount) == 1); + + DevmemIntCtxRelease(psDevmemHeap->psDevmemCtx); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed heap %p", __func__, psDevmemHeap)); + OSFreeMem(psDevmemHeap); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap, + PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + SPARSE_MEM_RESIZE_FLAGS uiSparseFlags, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT64 sCpuVAddrBase) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + IMG_UINT32 uiLog2PMRContiguity = PMR_GetLog2Contiguity(psPMR); + IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize; + IMG_UINT32 uiOrderDiff = uiLog2PMRContiguity - uiLog2HeapContiguity; + IMG_UINT32 uiPagesPerOrder = 1 << uiOrderDiff; + + IMG_UINT32 *pai32MapIndices = pai32AllocIndices; + IMG_UINT32 *pai32UnmapIndices = pai32FreeIndices; + IMG_UINT32 uiMapPageCount = ui32AllocPageCount; + IMG_UINT32 uiUnmapPageCount = ui32FreePageCount; + + /* Special case: + * Adjust indices if we map into a heap that uses smaller page sizes + * than the physical allocation itself. + * The incoming parameters are all based on the page size of the PMR + * but the mapping functions expects parameters to be in terms of heap page sizes. */ + if (uiOrderDiff != 0) + { + IMG_UINT32 uiPgIdx, uiPgOffset; + + uiMapPageCount = (uiMapPageCount << uiOrderDiff); + uiUnmapPageCount = (uiUnmapPageCount << uiOrderDiff); + + pai32MapIndices = OSAllocMem(uiMapPageCount * sizeof(*pai32MapIndices)); + PVR_GOTO_IF_NOMEM(pai32MapIndices, eError, e0); + + pai32UnmapIndices = OSAllocMem(uiUnmapPageCount * sizeof(*pai32UnmapIndices)); + if (!pai32UnmapIndices) + { + OSFreeMem(pai32MapIndices); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0); + } + + /* Every chunk index needs to be translated from physical indices + * into heap based indices. */ + for (uiPgIdx = 0; uiPgIdx < ui32AllocPageCount; uiPgIdx++) + { + for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++) + { + pai32MapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] = + pai32AllocIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset; + } + } + + for (uiPgIdx = 0; uiPgIdx < ui32FreePageCount; uiPgIdx++) + { + for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++) + { + pai32UnmapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] = + pai32FreeIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset; + } + } + } + + /* + * The order of steps in which this request is done is given below. The order of + * operations is very important in this case: + * + * 1. The parameters are validated in function PMR_ChangeSparseMem below. + * A successful response indicates all the parameters are correct. + * In failure case we bail out from here without processing further. + * 2. On success, get the PMR specific operations done. this includes page alloc, page free + * and the corresponding PMR status changes. + * when this call fails, it is ensured that the state of the PMR before is + * not disturbed. If it succeeds, then we can go ahead with the subsequent steps. + * 3. Invalidate the GPU page table entries for the pages to be freed. + * 4. Write the GPU page table entries for the pages that got allocated. + * 5. Change the corresponding CPU space map. + * + * The above steps can be selectively controlled using flags. + */ + if (uiSparseFlags & (SPARSE_REMAP_MEM | SPARSE_RESIZE_BOTH)) + { + /* Do the PMR specific changes first */ + eError = PMR_ChangeSparseMem(psPMR, + ui32AllocPageCount, + pai32AllocIndices, + ui32FreePageCount, + pai32FreeIndices, + uiSparseFlags); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Failed to do PMR specific changes.", + __func__)); + goto e1; + } + + /* Invalidate the page table entries for the free pages. + * Optimisation later would be not to touch the ones that gets re-mapped */ + if ((0 != ui32FreePageCount) && (uiSparseFlags & SPARSE_RESIZE_FREE)) + { + PMR_FLAGS_T uiPMRFlags; + + /*Get the flags*/ + uiPMRFlags = PMR_Flags(psPMR); + + if (SPARSE_REMAP_MEM != (uiSparseFlags & SPARSE_REMAP_MEM)) + { + /* Unmap the pages and mark them invalid in the MMU PTE */ + MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext, + uiFlags, + sDevVAddrBase, + uiUnmapPageCount, + pai32UnmapIndices, + uiLog2HeapContiguity, + uiPMRFlags); + } + } + + /* Wire the pages tables that got allocated */ + if ((0 != ui32AllocPageCount) && (uiSparseFlags & SPARSE_RESIZE_ALLOC)) + { + /* Map the pages and mark them Valid in the MMU PTE */ + eError = MMU_MapPages (psDevmemHeap->psDevmemCtx->psMMUContext, + uiFlags, + sDevVAddrBase, + psPMR, + 0, + uiMapPageCount, + pai32MapIndices, + uiLog2HeapContiguity); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Failed to map alloc indices.", + __func__)); + goto e1; + } + } + + /* Currently only used for debug */ + if (SPARSE_REMAP_MEM == (uiSparseFlags & SPARSE_REMAP_MEM)) + { + eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext, + uiFlags, + sDevVAddrBase, + psPMR, + 0, + uiMapPageCount, + pai32UnmapIndices, + uiLog2HeapContiguity); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Failed to map Free indices.", + __func__)); + goto e1; + } + } + } + +#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE + /* Do the changes in sparse on to the CPU virtual map accordingly */ + if (uiSparseFlags & SPARSE_MAP_CPU_ADDR) + { + if (sCpuVAddrBase != 0) + { + eError = PMR_ChangeSparseMemCPUMap(psPMR, + sCpuVAddrBase, + ui32AllocPageCount, + pai32AllocIndices, + ui32FreePageCount, + pai32FreeIndices); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Failed to map to CPU addr space.", + __func__)); + goto e0; + } + } + } +#endif + +e1: + if (pai32MapIndices != pai32AllocIndices) + { + OSFreeMem(pai32MapIndices); + } + if (pai32UnmapIndices != pai32FreeIndices) + { + OSFreeMem(pai32UnmapIndices); + } +e0: + return eError; +} + +/*************************************************************************/ /*! +@Function DevmemIntCtxDestroy +@Description Destroy that created by DevmemIntCtxCreate +@Input psDevmemCtx Device Memory context +@Return cannot fail. +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemIntCtxDestroy(DEVMEMINT_CTX *psDevmemCtx) +{ + /* + We can't determine if we should be freeing the context here + as a refcount!=1 could be due to either the fact that heap(s) + remain with allocations on them, or that this memory context + has been exported. + As the client couldn't do anything useful with this information + anyway and the fact that the refcount will ensure we only + free the context when _all_ references have been released + don't bother checking and just return OK regardless. + */ + DevmemIntCtxRelease(psDevmemCtx); + return PVRSRV_OK; +} + +PVRSRV_ERROR DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_DEV_VIRTADDR sDevAddr) +{ + IMG_UINT32 i, j, uiLog2HeapPageSize = 0; + DEVICE_MEMORY_INFO *psDinfo = &psDevNode->sDevMemoryInfo; + DEVMEM_HEAP_CONFIG *psConfig = psDinfo->psDeviceMemoryHeapConfigArray; + + IMG_BOOL bFound = IMG_FALSE; + + for (i = 0; + i < psDinfo->uiNumHeapConfigs && !bFound; + i++) + { + for (j = 0; + j < psConfig[i].uiNumHeaps && !bFound; + j++) + { + IMG_DEV_VIRTADDR uiBase = + psConfig[i].psHeapBlueprintArray[j].sHeapBaseAddr; + IMG_DEVMEM_SIZE_T uiSize = + psConfig[i].psHeapBlueprintArray[j].uiHeapLength; + + if ((sDevAddr.uiAddr >= uiBase.uiAddr) && + (sDevAddr.uiAddr < (uiBase.uiAddr + uiSize))) + { + uiLog2HeapPageSize = + psConfig[i].psHeapBlueprintArray[j].uiLog2DataPageSize; + bFound = IMG_TRUE; + } + } + } + + if (uiLog2HeapPageSize == 0) + { + return PVRSRV_ERROR_INVALID_GPU_ADDR; + } + + return MMU_IsVDevAddrValid(psDevMemContext->psMMUContext, + uiLog2HeapPageSize, + sDevAddr) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_GPU_ADDR; +} + +PVRSRV_ERROR +DevmemIntFlushDevSLCRange(DEVMEMINT_CTX *psDevMemContext, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_BOOL bInvalidate) +{ + PVRSRV_DEVICE_NODE *psDevNode = psDevMemContext->psDevNode; + MMU_CONTEXT *psMMUContext = psDevMemContext->psMMUContext; + + if (psDevNode->pfnDevSLCFlushRange) + { + return psDevNode->pfnDevSLCFlushRange(psDevNode, + psMMUContext, + sDevVAddr, + uiSize, + bInvalidate); + } + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +PVRSRV_ERROR +DevmemIntInvalidateFBSCTable(DEVMEMINT_CTX *psDevMemContext, + IMG_UINT64 ui64FBSCEntryMask) +{ + PVRSRV_DEVICE_NODE *psDevNode = psDevMemContext->psDevNode; + MMU_CONTEXT *psMMUContext = psDevMemContext->psMMUContext; + + if (psDevNode->pfnInvalFBSCTable) + { + return psDevNode->pfnInvalFBSCTable(psDevNode, + psMMUContext, + ui64FBSCEntryMask); + } + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +PVRSRV_ERROR DevmemIntGetFaultAddress(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_DEV_VIRTADDR *psFaultAddress) +{ + if ((psDevMemContext->ui32Flags & DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE) == 0) + { + return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + } + + *psFaultAddress = psDevMemContext->sFaultAddress; + psDevMemContext->ui32Flags &= ~DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE; + + return PVRSRV_OK; +} + +static POSWR_LOCK g_hExportCtxListLock; +static DLLIST_NODE g_sExportCtxList; + +PVRSRV_ERROR +DevmemIntInit(void) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + dllist_init(&g_sExportCtxList); + + eError = OSWRLockCreate(&g_hExportCtxListLock); + + return eError; +} + +PVRSRV_ERROR +DevmemIntDeInit(void) +{ + PVR_ASSERT(dllist_is_empty(&g_sExportCtxList)); + + OSWRLockDestroy(g_hExportCtxListLock); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +DevmemIntExportCtx(DEVMEMINT_CTX *psContext, + PMR *psPMR, + DEVMEMINT_CTX_EXPORT **ppsContextExport) +{ + DEVMEMINT_CTX_EXPORT *psCtxExport; + + psCtxExport = OSAllocMem(sizeof(DEVMEMINT_CTX_EXPORT)); + PVR_LOG_RETURN_IF_NOMEM(psCtxExport, "psCtxExport"); + + DevmemIntCtxAcquire(psContext); + PMRRefPMR(psPMR); + /* Now that the source PMR is exported, the layout + * can't change as there could be outstanding importers + * This is to make sure both exporter and importers view of + * the memory is same */ + PMR_SetLayoutFixed(psPMR, IMG_TRUE); + psCtxExport->psDevmemCtx = psContext; + psCtxExport->psPMR = psPMR; + OSWRLockAcquireWrite(g_hExportCtxListLock); + dllist_add_to_tail(&g_sExportCtxList, &psCtxExport->sNode); + OSWRLockReleaseWrite(g_hExportCtxListLock); + + *ppsContextExport = psCtxExport; + + return PVRSRV_OK; +} + +PVRSRV_ERROR +DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport) +{ + PMRUnrefPMR(psContextExport->psPMR); + DevmemIntCtxRelease(psContextExport->psDevmemCtx); + OSWRLockAcquireWrite(g_hExportCtxListLock); + dllist_remove_node(&psContextExport->sNode); + OSWRLockReleaseWrite(g_hExportCtxListLock); + OSFreeMem(psContextExport); + + /* Unable to find exported context, return error */ + return PVRSRV_OK; +} + +PVRSRV_ERROR +DevmemIntAcquireRemoteCtx(PMR *psPMR, + DEVMEMINT_CTX **ppsContext, + IMG_HANDLE *phPrivData) +{ + PDLLIST_NODE psListNode, psListNodeNext; + DEVMEMINT_CTX_EXPORT *psCtxExport; + + OSWRLockAcquireRead(g_hExportCtxListLock); + /* Find context from list using PMR as key */ + dllist_foreach_node(&g_sExportCtxList, psListNode, psListNodeNext) + { + psCtxExport = IMG_CONTAINER_OF(psListNode, DEVMEMINT_CTX_EXPORT, sNode); + if (psCtxExport->psPMR == psPMR) + { + OSWRLockReleaseRead(g_hExportCtxListLock); + DevmemIntCtxAcquire(psCtxExport->psDevmemCtx); + *ppsContext = psCtxExport->psDevmemCtx; + *phPrivData = psCtxExport->psDevmemCtx->hPrivData; + + /* PMR should have been already exported to import it + * If a PMR is exported, its immutable and the same is + * checked here */ + PVR_ASSERT(IMG_TRUE == PMR_IsMemLayoutFixed(psPMR)); + + return PVRSRV_OK; + } + } + OSWRLockReleaseRead(g_hExportCtxListLock); + + /* Unable to find exported context, return error */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire remote context. Could not retrieve context with given PMR", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; +} + +/*************************************************************************/ /*! +@Function DevmemIntRegisterPFNotify +@Description Registers a PID to be notified when a page fault occurs on a + specific device memory context. +@Input psDevmemCtx The context to be notified about. +@Input ui32PID The PID of the process that would like to be + notified. +@Input bRegister If true, register. If false, de-register. +@Return PVRSRV_ERROR. +*/ /**************************************************************************/ +PVRSRV_ERROR DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx, + IMG_INT32 ui32PID, + IMG_BOOL bRegister) +{ + PVRSRV_DEVICE_NODE *psDevNode; + DLLIST_NODE *psNode, *psNodeNext; + DEVMEMINT_PF_NOTIFY *psNotifyNode; + IMG_BOOL bPresent = IMG_FALSE; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psDevmemCtx, "psDevmemCtx"); + + psDevNode = psDevmemCtx->psDevNode; + + if (bRegister) + { + OSWRLockAcquireRead(psDevmemCtx->hListLock); + /* If this is the first PID in the list, the device memory context + * needs to be registered for notification */ + if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead)) + { + OSWRLockReleaseRead(psDevmemCtx->hListLock); + dllist_add_to_tail(&psDevNode->sMemoryContextPageFaultNotifyListHead, + &psDevmemCtx->sPageFaultNotifyListElem); + } + else + { + OSWRLockReleaseRead(psDevmemCtx->hListLock); + } + } + + /* Loop through the registered PIDs and check whether this one is + * present */ + OSWRLockAcquireRead(psDevmemCtx->hListLock); + dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext) + { + psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem); + + if (psNotifyNode->ui32PID == ui32PID) + { + bPresent = IMG_TRUE; + break; + } + } + OSWRLockReleaseRead(psDevmemCtx->hListLock); + + if (bRegister) + { + if (bPresent) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Trying to register a PID that is already registered", + __func__)); + return PVRSRV_ERROR_PID_ALREADY_REGISTERED; + } + + psNotifyNode = OSAllocMem(sizeof(*psNotifyNode)); + if (psNotifyNode == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unable to allocate memory for the notify list", + __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + psNotifyNode->ui32PID = ui32PID; + OSWRLockAcquireWrite(psDevmemCtx->hListLock); + dllist_add_to_tail(&(psDevmemCtx->sProcessNotifyListHead), &(psNotifyNode->sProcessNotifyListElem)); + OSWRLockReleaseWrite(psDevmemCtx->hListLock); + } + else + { + if (!bPresent) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Trying to unregister a PID that is not registered", + __func__)); + return PVRSRV_ERROR_PID_NOT_REGISTERED; + } + dllist_remove_node(psNode); + psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem); + OSFreeMem(psNotifyNode); + } + + if (!bRegister) + { + /* If the last process in the list is being unregistered, then also + * unregister the device memory context from the notify list. */ + OSWRLockAcquireWrite(psDevmemCtx->hListLock); + if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead)) + { + dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem); + } + OSWRLockReleaseWrite(psDevmemCtx->hListLock); + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function DevmemIntPFNotify +@Description Notifies any processes that have registered themselves to be + notified when a page fault happens on a specific device memory + context. +@Input *psDevNode The device node. +@Input ui64FaultedPCAddress The page catalogue address that faulted. +@Input sFaultAddress The address that triggered the fault. +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT64 ui64FaultedPCAddress, + IMG_DEV_VIRTADDR sFaultAddress) +{ + DLLIST_NODE *psNode, *psNodeNext; + DEVMEMINT_PF_NOTIFY *psNotifyNode; + PVRSRV_ERROR eError; + DEVMEMINT_CTX *psDevmemCtx = NULL; + IMG_BOOL bFailed = IMG_FALSE; + + OSWRLockAcquireRead(psDevNode->hMemoryContextPageFaultNotifyListLock); + if (dllist_is_empty(&(psDevNode->sMemoryContextPageFaultNotifyListHead))) + { + OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock); + return PVRSRV_OK; + } + + dllist_foreach_node(&(psDevNode->sMemoryContextPageFaultNotifyListHead), psNode, psNodeNext) + { + DEVMEMINT_CTX *psThisContext = + IMG_CONTAINER_OF(psNode, DEVMEMINT_CTX, sPageFaultNotifyListElem); + IMG_DEV_PHYADDR sPCDevPAddr; + + eError = MMU_AcquireBaseAddr(psThisContext->psMMUContext, &sPCDevPAddr); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "MMU_AcquireBaseAddr"); + OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock); + return eError; + } + + if (sPCDevPAddr.uiAddr == ui64FaultedPCAddress) + { + psDevmemCtx = psThisContext; + break; + } + } + OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock); + + if (psDevmemCtx == NULL) + { + /* Not found, just return */ + return PVRSRV_OK; + } + OSWRLockAcquireRead(psDevmemCtx->hListLock); + + /* + * Store the first occurrence of a page fault address, + * until that address is consumed by a client. + */ + if ((psDevmemCtx->ui32Flags & DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE) == 0) + { + psDevmemCtx->sFaultAddress = sFaultAddress; + psDevmemCtx->ui32Flags |= DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE; + } + + /* Loop through each registered PID and send a signal to the process */ + dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext) + { + psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem); + + eError = OSDebugSignalPID(psNotifyNode->ui32PID); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unable to signal process for PID: %u", + __func__, + psNotifyNode->ui32PID)); + + PVR_ASSERT(!"Unable to signal process"); + + bFailed = IMG_TRUE; + } + } + OSWRLockReleaseRead(psDevmemCtx->hListLock); + + if (bFailed) + { + return PVRSRV_ERROR_SIGNAL_FAILED; + } + + return PVRSRV_OK; +} + +#if defined(PDUMP) +IMG_UINT32 DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext) +{ + IMG_UINT32 ui32MMUContextID; + MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32MMUContextID, PDUMP_FLAGS_CONTINUOUS); + return ui32MMUContextID; +} + +PVRSRV_ERROR +DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx, + IMG_DEV_VIRTADDR sDevAddrStart, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 ui32ArraySize, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_UINT32 uiPDumpMMUCtx; + + PVR_UNREFERENCED_PARAMETER(ui32ArraySize); + + eError = MMU_AcquirePDumpMMUContext(psDevmemCtx->psMMUContext, + &uiPDumpMMUCtx, + ui32PDumpFlags); + + PVR_ASSERT(eError == PVRSRV_OK); + + /* + The following SYSMEM refers to the 'MMU Context', hence it + should be the MMU context, not the PMR, that says what the PDump + MemSpace tag is? + From a PDump P.O.V. it doesn't matter which name space we use as long + as that MemSpace is used on the 'MMU Context' we're dumping from + */ + eError = PDumpMMUSAB(psDevmemCtx->psDevNode->sDevId.pszPDumpDevName, + uiPDumpMMUCtx, + sDevAddrStart, + uiSize, + pszFilename, + ui32FileOffset, + ui32PDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + + MMU_ReleasePDumpMMUContext(psDevmemCtx->psMMUContext, ui32PDumpFlags); + return PVRSRV_OK; +} + + +PVRSRV_ERROR +DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Width, + IMG_UINT32 ui32Height, + IMG_UINT32 ui32StrideInBytes, + IMG_DEV_VIRTADDR sDevBaseAddr, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + PDUMP_PIXEL_FORMAT ePixelFormat, + IMG_UINT32 ui32AddrMode, + IMG_UINT32 ui32PDumpFlags) +{ + IMG_UINT32 ui32ContextID; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID, ui32PDumpFlags); + + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "MMU_AcquirePDumpMMUContext"); + return PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID; + } + + eError = PDumpBitmapKM(psDeviceNode, + pszFileName, + ui32FileOffset, + ui32Width, + ui32Height, + ui32StrideInBytes, + sDevBaseAddr, + ui32ContextID, + ui32Size, + ePixelFormat, + ui32AddrMode, + ui32PDumpFlags); + + /* Don't care about return value */ + MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext, ui32PDumpFlags); + + return eError; +} + +PVRSRV_ERROR +DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32LogicalWidth, + IMG_UINT32 ui32LogicalHeight, + IMG_UINT32 ui32PhysicalWidth, + IMG_UINT32 ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT ePixFmt, + IMG_MEMLAYOUT eMemLayout, + IMG_FB_COMPRESSION eFBCompression, + const IMG_UINT32 *paui32FBCClearColour, + PDUMP_FBC_SWIZZLE eFBCSwizzle, + IMG_DEV_VIRTADDR sHeader, + IMG_UINT32 ui32HeaderSize, + IMG_UINT32 ui32PDumpFlags) +{ + IMG_UINT32 ui32ContextID; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(ui32Size); + + eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID, ui32PDumpFlags); + PVR_LOG_RETURN_IF_ERROR(eError, "MMU_AcquirePDumpMMUContext"); + + eError = PDumpImageDescriptor(psDeviceNode, + ui32ContextID, + (IMG_CHAR *)pszFileName, + sData, + ui32DataSize, + ui32LogicalWidth, + ui32LogicalHeight, + ui32PhysicalWidth, + ui32PhysicalHeight, + ePixFmt, + eMemLayout, + eFBCompression, + paui32FBCClearColour, + eFBCSwizzle, + sHeader, + ui32HeaderSize, + ui32PDumpFlags); + PVR_LOG_IF_ERROR(eError, "PDumpImageDescriptor"); + + /* Don't care about return value */ + (void) MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext, ui32PDumpFlags); + + return eError; +} + +PVRSRV_ERROR +DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32HeaderType, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_UINT32 ui32PDumpFlags) +{ + IMG_UINT32 ui32ContextID; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(ui32Size); + + if ((ui32HeaderType != IBIN_HEADER_TYPE) && + (ui32HeaderType != DATA_HEADER_TYPE)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid header type (%u)", + __func__, + ui32HeaderType)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID, ui32PDumpFlags); + PVR_LOG_RETURN_IF_ERROR(eError, "MMU_AcquirePDumpMMUContext"); + + eError = PDumpDataDescriptor(psDeviceNode, + ui32ContextID, + (IMG_CHAR *)pszFileName, + sData, + ui32DataSize, + ui32HeaderType, + ui32ElementType, + ui32ElementCount, + ui32PDumpFlags); + PVR_LOG_IF_ERROR(eError, "PDumpDataDescriptor"); + + /* Don't care about return value */ + (void) MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext, ui32PDumpFlags); + + return eError; +} + +#endif diff --git a/drivers/gpu/drm/phytium/octopus/devicemem_server.h b/drivers/gpu/drm/phytium/octopus/devicemem_server.h new file mode 100644 index 000000000000..7fd91576998c --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/devicemem_server.h @@ -0,0 +1,682 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Server side component for device memory management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DEVICEMEM_SERVER_H +#define DEVICEMEM_SERVER_H + +#include "device.h" /* For device node */ +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" + +#include "connection_server.h" +#include "pmr.h" + +typedef struct _DEVMEMINT_CTX_ DEVMEMINT_CTX; +typedef struct _DEVMEMINT_CTX_EXPORT_ DEVMEMINT_CTX_EXPORT; +typedef struct _DEVMEMINT_HEAP_ DEVMEMINT_HEAP; + +typedef struct _DEVMEMINT_RESERVATION_ DEVMEMINT_RESERVATION; +typedef struct _DEVMEMINT_MAPPING_ DEVMEMINT_MAPPING; +typedef struct _DEVMEMINT_PF_NOTIFY_ DEVMEMINT_PF_NOTIFY; + + +/*************************************************************************/ /*! +@Function DevmemIntUnpin +@Description This is the counterpart to DevmemPin(). It is meant to be + called when the allocation is NOT mapped in the device virtual + space. + +@Input psPMR The physical memory to unpin. + +@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is + registered to be reclaimed. Error otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR DevmemIntUnpin(PMR *psPMR); + +/*************************************************************************/ /*! +@Function DevmemIntUnpinInvalidate +@Description This is the counterpart to DevmemIntPinValidate(). It is meant + to be called for allocations that ARE mapped in the device + virtual space and we have to invalidate the mapping. + +@Input psPMR The physical memory to unpin. + +@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is + registered to be reclaimed. Error otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR); + +/*************************************************************************/ /*! +@Function DevmemIntPin +@Description This is the counterpart to DevmemIntUnpin(). + Is meant to be called if there is NO device mapping present. + +@Input psPMR The physical memory to pin. + +@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content + was successfully restored. + + PVRSRV_ERROR_PMR_NEW_MEMORY when the content + could not be restored and new physical memory + was allocated. + + A different error otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR DevmemIntPin(PMR *psPMR); + +/*************************************************************************/ /*! +@Function DevmemIntPinValidate +@Description This is the counterpart to DevmemIntUnpinInvalidate(). + Is meant to be called if there is IS a device mapping present + that needs to be taken care of. + +@Input psDevmemMapping The mapping structure used for the passed PMR. + +@Input psPMR The physical memory to pin. + +@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content + was successfully restored. + + PVRSRV_ERROR_PMR_NEW_MEMORY when the content + could not be restored and new physical memory + was allocated. + + A different error otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR); +/* + * DevmemServerGetImportHandle() + * + * For given exportable memory descriptor returns PMR handle + * + */ +PVRSRV_ERROR +DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc, + IMG_HANDLE *phImport); + +/* + * DevmemServerGetHeapHandle() + * + * For given reservation returns the Heap handle + * + */ +PVRSRV_ERROR +DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation, + IMG_HANDLE *phHeap); + +/* + * DevmemServerGetContext() + * + * For given heap returns the context. + * + */ +PVRSRV_ERROR +DevmemServerGetContext(DEVMEMINT_HEAP *psDevmemHeap, + DEVMEMINT_CTX **ppsDevmemCtxPtr); + +/* + * DevmemServerGetPrivData() + * + * For given context returns the private data handle. + * + */ +PVRSRV_ERROR +DevmemServerGetPrivData(DEVMEMINT_CTX *psDevmemCtx, + IMG_HANDLE *phPrivData); + +/* + * DevmemIntAllocDefBackingPage + * + * This function allocates default backing page and initializes it + * with a given default value + * + */ +PVRSRV_ERROR DevmemIntAllocDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_DEF_PAGE *psDefPage, + IMG_INT uiInitValue, + IMG_CHAR *pcDefPageName, + IMG_BOOL bInitPage); +/* + * DevmemIntFreeDefBackingPage + * + * Frees a given page + */ +void DevmemIntFreeDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_DEF_PAGE *psDefPage, + IMG_CHAR *pcDefPageName); + + +/* + * DevmemIntCtxCreate() + * + * Create a Server-side Device Memory Context. This is usually the counterpart + * of the client side memory context, and indeed is usually created at the + * same time. + * + * You must have one of these before creating any heaps. + * + * All heaps must have been destroyed before calling + * DevmemIntCtxDestroy() + * + * If you call DevmemIntCtxCreate() (and it succeeds) you are promising to + * later call DevmemIntCtxDestroy() + * + * Note that this call will cause the device MMU code to do some work for + * creating the device memory context, but it does not guarantee that a page + * catalogue will have been created, as this may be deferred until the first + * allocation. + * + * Caller to provide storage for a pointer to the DEVMEM_CTX object that will + * be created by this call. + */ +PVRSRV_ERROR +DevmemIntCtxCreate(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + /* devnode / perproc etc */ + IMG_BOOL bKernelMemoryCtx, + DEVMEMINT_CTX **ppsDevmemCtxPtr, + IMG_HANDLE *hPrivData, + IMG_UINT32 *pui32CPUCacheLineSize); +/* + * DevmemIntCtxDestroy() + * + * Undoes a prior DevmemIntCtxCreate or DevmemIntCtxImport. + */ +PVRSRV_ERROR +DevmemIntCtxDestroy(DEVMEMINT_CTX *psDevmemCtx); + +/* + * DevmemIntHeapCreate() + * + * Creates a new heap in this device memory context. This will cause a call + * into the MMU code to allocate various data structures for managing this + * heap. It will not necessarily cause any page tables to be set up, as this + * can be deferred until first allocation. (i.e. we shouldn't care - it's up + * to the MMU code) + * + * Note that the data page size must be specified (as log 2). The data page + * size as specified here will be communicated to the mmu module, and thus may + * determine the page size configured in page directory entries for subsequent + * allocations from this heap. It is essential that the page size here is less + * than or equal to the "minimum contiguity guarantee" of any PMR that you + * subsequently attempt to map to this heap. + * + * If you call DevmemIntHeapCreate() (and the call succeeds) you are promising + * that you shall subsequently call DevmemIntHeapDestroy() + * + * Caller to provide storage for a pointer to the DEVMEM_HEAP object that will + * be created by this call. + */ +PVRSRV_ERROR +DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx, + IMG_DEV_VIRTADDR sHeapBaseAddr, + IMG_DEVMEM_SIZE_T uiHeapLength, + IMG_UINT32 uiLog2DataPageSize, + DEVMEMINT_HEAP **ppsDevmemHeapPtr); +/* + * DevmemIntHeapDestroy() + * + * Destroys a heap previously created with DevmemIntHeapCreate() + * + * All allocations from his heap must have been freed before this + * call. + */ +PVRSRV_ERROR +DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap); + +/* + * DevmemIntMapPMR() + * + * Maps the given PMR to the virtual range previously allocated with + * DevmemIntReserveRange() + * + * If appropriate, the PMR must have had its physical backing committed, as + * this call will call into the MMU code to set up the page tables for this + * allocation, which shall in turn request the physical addresses from the + * PMR. Alternatively, the PMR implementation can choose to do so off the + * the back of the "lock" callback, which it will receive as a result + * (indirectly) of this call. + * + * This function makes no promise w.r.t. the circumstances that it can be + * called, and these would be "inherited" from the implementation of the PMR. + * For example if the PMR "lock" callback causes pages to be pinned at that + * time (which may cause scheduling or disk I/O etc.) then it would not be + * legal to "Map" the PMR in a context where scheduling events are disallowed. + * + * If you call DevmemIntMapPMR() (and the call succeeds) then you are promising + * that you shall later call DevmemIntUnmapPMR() + */ +PVRSRV_ERROR +DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap, + DEVMEMINT_RESERVATION *psReservation, + PMR *psPMR, + PVRSRV_MEMALLOCFLAGS_T uiMapFlags, + DEVMEMINT_MAPPING **ppsMappingPtr); +/* + * DevmemIntUnmapPMR() + * + * Reverses the mapping caused by DevmemIntMapPMR() + */ +PVRSRV_ERROR +DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping); + +/* DevmemIntMapPages() + * + * Maps an arbitrary amount of pages from a PMR to a reserved range + * + * @input psReservation Reservation handle for the range + * @input psPMR PMR that is mapped + * @input ui32PageCount Number of consecutive pages that are + * mapped + * @input ui32PhysicalPgOffset Logical offset in the PMR + * @input uiFlags Mapping flags + * @input sDevVAddrBase Virtual address base to start the + * mapping from + */ +PVRSRV_ERROR +DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation, + PMR *psPMR, + IMG_UINT32 ui32PageCount, + IMG_UINT32 ui32PhysicalPgOffset, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEV_VIRTADDR sDevVAddrBase); + +/* DevmemIntUnmapPages() + * + * Unmaps an arbitrary amount of pages from a reserved range + * + * @input psReservation Reservation handle for the range + * @input sDevVAddrBase Virtual address base to start from + * @input ui32PageCount Number of consecutive pages that are + * unmapped + */ +PVRSRV_ERROR +DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32PageCount); + +/* + * DevmemIntReserveRange() + * + * Indicates that the specified range should be reserved from the given heap. + * + * In turn causes the page tables to be allocated to cover the specified range. + * + * If you call DevmemIntReserveRange() (and the call succeeds) then you are + * promising that you shall later call DevmemIntUnreserveRange() + */ +PVRSRV_ERROR +DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap, + IMG_DEV_VIRTADDR sAllocationDevVAddr, + IMG_DEVMEM_SIZE_T uiAllocationSize, + DEVMEMINT_RESERVATION **ppsReservationPtr); +/* + * DevmemIntUnreserveRange() + * + * Undoes the state change caused by DevmemIntReserveRage() + */ +PVRSRV_ERROR +DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psDevmemReservation); + +/*************************************************************************/ /*! +@Function DevmemIntChangeSparse +@Description Changes the sparse allocations of a PMR by allocating and freeing + pages and changing their corresponding CPU and GPU mappings. + +@input psDevmemHeap Pointer to the heap we map on +@input psPMR The PMR we want to map +@input ui32AllocPageCount Number of pages to allocate +@input pai32AllocIndices The logical PMR indices where pages will + be allocated. May be NULL. +@input ui32FreePageCount Number of pages to free +@input pai32FreeIndices The logical PMR indices where pages will + be freed. May be NULL. +@input uiSparseFlags Flags passed in to determine which kind + of sparse change the user wanted. + See devicemem_typedefs.h for details. +@input uiFlags Memalloc flags for this virtual range. +@input sDevVAddrBase The base address of the virtual range of + this sparse allocation. +@input sCpuVAddrBase The CPU base address of this allocation. + May be 0 if not existing. +@Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap, + PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + SPARSE_MEM_RESIZE_FLAGS uiSparseFlags, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT64 sCpuVAddrBase); + +/* + * DevmemIntFlushDevSLCRange() + * + * Flush specified device context's virtual address range from SLC. + */ +PVRSRV_ERROR +DevmemIntFlushDevSLCRange(DEVMEMINT_CTX *psDevmemCtx, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_BOOL bInvalidate); + +/* + * DevmemIntRGXInvalidateFBSCTable() + * + * Invalidate selected FBSC table indices. + * + */ +PVRSRV_ERROR +DevmemIntInvalidateFBSCTable(DEVMEMINT_CTX *psDevmemCtx, + IMG_UINT64 ui64FBSCEntryMask); + +PVRSRV_ERROR +DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_DEV_VIRTADDR sDevAddr); + +PVRSRV_ERROR +DevmemIntGetFaultAddress(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_DEV_VIRTADDR *psFaultAddress); + +/*************************************************************************/ /*! +@Function DevmemIntRegisterPFNotifyKM +@Description Registers a PID to be notified when a page fault occurs on a + specific device memory context. +@Input psDevmemCtx The context to be notified about. +@Input ui32PID The PID of the process that would like to be + notified. +@Input bRegister If true, register. If false, de-register. +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR +DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx, + IMG_INT32 ui32PID, + IMG_BOOL bRegister); + +/*************************************************************************/ /*! +@Function DevmemIntPFNotify +@Description Notifies any processes that have registered themselves to be + notified when a page fault happens on a specific device memory + context. +@Input *psDevNode The device node. +@Input ui64FaultedPCAddress The page catalogue address that faulted. +@Input sFaultAddress The address that triggered the fault. +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT64 ui64FaultedPCAddress, + IMG_DEV_VIRTADDR sFaultAddress); + +#if defined(PDUMP) +/* + * DevmemIntPDumpSaveToFileVirtual() + * + * Writes out PDump "SAB" commands with the data found in memory at + * the given virtual address. + */ +PVRSRV_ERROR +DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx, + IMG_DEV_VIRTADDR sDevAddrStart, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 uiArraySize, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PDumpFlags); + +IMG_UINT32 +DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext); + +PVRSRV_ERROR +DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Width, + IMG_UINT32 ui32Height, + IMG_UINT32 ui32StrideInBytes, + IMG_DEV_VIRTADDR sDevBaseAddr, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + PDUMP_PIXEL_FORMAT ePixelFormat, + IMG_UINT32 ui32AddrMode, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR +DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32LogicalWidth, + IMG_UINT32 ui32LogicalHeight, + IMG_UINT32 ui32PhysicalWidth, + IMG_UINT32 ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT ePixFmt, + IMG_MEMLAYOUT eMemLayout, + IMG_FB_COMPRESSION eFBCompression, + const IMG_UINT32 *paui32FBCClearColour, + PDUMP_FBC_SWIZZLE eFBCSwizzle, + IMG_DEV_VIRTADDR sHeader, + IMG_UINT32 ui32HeaderSize, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR +DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32HeaderType, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_UINT32 ui32PDumpFlags); +#else /* PDUMP */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemIntPDumpSaveToFileVirtual) +#endif +static INLINE PVRSRV_ERROR +DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx, + IMG_DEV_VIRTADDR sDevAddrStart, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 uiArraySize, + const IMG_CHAR *pszFilename, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psDevmemCtx); + PVR_UNREFERENCED_PARAMETER(sDevAddrStart); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(uiArraySize); + PVR_UNREFERENCED_PARAMETER(pszFilename); + PVR_UNREFERENCED_PARAMETER(ui32FileOffset); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemIntPDumpBitmap) +#endif +static INLINE PVRSRV_ERROR +DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Width, + IMG_UINT32 ui32Height, + IMG_UINT32 ui32StrideInBytes, + IMG_DEV_VIRTADDR sDevBaseAddr, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + PDUMP_PIXEL_FORMAT ePixelFormat, + IMG_UINT32 ui32AddrMode, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32FileOffset); + PVR_UNREFERENCED_PARAMETER(ui32Width); + PVR_UNREFERENCED_PARAMETER(ui32Height); + PVR_UNREFERENCED_PARAMETER(ui32StrideInBytes); + PVR_UNREFERENCED_PARAMETER(sDevBaseAddr); + PVR_UNREFERENCED_PARAMETER(psDevMemContext); + PVR_UNREFERENCED_PARAMETER(ui32Size); + PVR_UNREFERENCED_PARAMETER(ePixelFormat); + PVR_UNREFERENCED_PARAMETER(ui32AddrMode); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemIntPDumpImageDescriptor) +#endif +static INLINE PVRSRV_ERROR +DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32LogicalWidth, + IMG_UINT32 ui32LogicalHeight, + IMG_UINT32 ui32PhysicalWidth, + IMG_UINT32 ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT ePixFmt, + IMG_MEMLAYOUT eMemLayout, + IMG_FB_COMPRESSION eFBCompression, + const IMG_UINT32 *paui32FBCClearColour, + PDUMP_FBC_SWIZZLE eFBCSwizzle, + IMG_DEV_VIRTADDR sHeader, + IMG_UINT32 ui32HeaderSize, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psDevMemContext); + PVR_UNREFERENCED_PARAMETER(ui32Size); + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(sData); + PVR_UNREFERENCED_PARAMETER(ui32DataSize); + PVR_UNREFERENCED_PARAMETER(ui32LogicalWidth); + PVR_UNREFERENCED_PARAMETER(ui32LogicalHeight); + PVR_UNREFERENCED_PARAMETER(ui32PhysicalWidth); + PVR_UNREFERENCED_PARAMETER(ui32PhysicalHeight); + PVR_UNREFERENCED_PARAMETER(ePixFmt); + PVR_UNREFERENCED_PARAMETER(eMemLayout); + PVR_UNREFERENCED_PARAMETER(eFBCompression); + PVR_UNREFERENCED_PARAMETER(paui32FBCClearColour); + PVR_UNREFERENCED_PARAMETER(eFBCSwizzle); + PVR_UNREFERENCED_PARAMETER(sHeader); + PVR_UNREFERENCED_PARAMETER(ui32HeaderSize); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(DevmemIntPDumpDataDescriptor) +#endif +static INLINE PVRSRV_ERROR +DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_CTX *psDevMemContext, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psDevMemContext); + PVR_UNREFERENCED_PARAMETER(ui32Size); + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(sData); + PVR_UNREFERENCED_PARAMETER(ui32DataSize); + PVR_UNREFERENCED_PARAMETER(ui32ElementType); + PVR_UNREFERENCED_PARAMETER(ui32ElementCount); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#endif /* PDUMP */ + +PVRSRV_ERROR +DevmemIntInit(void); + +PVRSRV_ERROR +DevmemIntDeInit(void); + +PVRSRV_ERROR +DevmemIntExportCtx(DEVMEMINT_CTX *psContext, + PMR *psPMR, + DEVMEMINT_CTX_EXPORT **ppsContextExport); + +PVRSRV_ERROR +DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport); + +PVRSRV_ERROR +DevmemIntAcquireRemoteCtx(PMR *psPMR, + DEVMEMINT_CTX **ppsContext, + IMG_HANDLE *phPrivData); + +#endif /* DEVICEMEM_SERVER_H */ diff --git a/drivers/gpu/drm/phytium/octopus/devicemem_server_utils.h b/drivers/gpu/drm/phytium/octopus/devicemem_server_utils.h new file mode 100644 index 000000000000..31d9779c8e2d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/devicemem_server_utils.h @@ -0,0 +1,214 @@ +/**************************************************************************/ /*! +@File +@Title Device Memory Management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header file utilities that are specific to device memory functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#include "img_defs.h" +#include "img_types.h" +#include "device.h" +#include "pvrsrv_memallocflags.h" +#include "pvrsrv.h" + +static INLINE PVRSRV_ERROR DevmemCPUCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_MEMALLOCFLAGS_T ulFlags, + IMG_UINT32 *pui32Ret) +{ + IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags); + IMG_UINT32 ui32Ret; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags)); + + switch (ui32CPUCacheMode) + { + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: + ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED; + break; + + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: + ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; + break; + + case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT: +#if defined(SAFETY_CRITICAL_BUILD) + ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; +#else + ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED; +#endif + break; + + case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT: + + /* + * If system has no coherency but coherency has been requested for CPU + * and GPU we currently fall back to write-combine. + * This avoids errors on arm64 when uncached is turned into ordered device memory + * and suffers from problems with unaligned access. + */ + if ( (PVRSRV_GPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) && + !(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) ) + { + ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; + } + else + { +#if defined(SAFETY_CRITICAL_BUILD) + ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; +#else + ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED; +#endif + } + break; + + default: + PVR_LOG(("DevmemCPUCacheMode: Unknown CPU cache mode 0x%08x", ui32CPUCacheMode)); + PVR_ASSERT(0); + /* + We should never get here, but if we do then setting the mode + to uncached is the safest thing to do. + */ + ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED; + eError = PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; + break; + } + + *pui32Ret = ui32Ret; + + return eError; +} + +static INLINE PVRSRV_ERROR DevmemDeviceCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_MEMALLOCFLAGS_T ulFlags, + IMG_UINT32 *pui32Ret) +{ + IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags); + IMG_UINT32 ui32Ret; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags)); + + switch (ui32DeviceCacheMode) + { + case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED: + ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED; + break; + + case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC: + ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC; + break; + + case PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT: +#if defined(SAFETY_CRITICAL_BUILD) + ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC; +#else + ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED; +#endif + break; + + case PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT: + + /* + * If system has no coherency but coherency has been requested for CPU + * and GPU we currently fall back to write-combine. + * This avoids errors on arm64 when uncached is turned into ordered device memory + * and suffers from problems with unaligned access. + */ + if ( (PVRSRV_CPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) && + !(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) ) + { + ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC; + } + else + { +#if defined(SAFETY_CRITICAL_BUILD) + ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC; +#else + ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED; +#endif + } + break; + + default: + PVR_LOG(("DevmemDeviceCacheMode: Unknown device cache mode 0x%08x", ui32DeviceCacheMode)); + PVR_ASSERT(0); + /* + We should never get here, but if we do then setting the mode + to uncached is the safest thing to do. + */ + ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED; + eError = PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; + break; + } + + *pui32Ret = ui32Ret; + + return eError; +} + +static INLINE IMG_BOOL DevmemCPUCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_MEMALLOCFLAGS_T ulFlags) +{ + IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags); + IMG_BOOL bRet = IMG_FALSE; + + PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags)); + + if (ui32CPUCacheMode == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) + { + bRet = PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig); + } + return bRet; +} + +static INLINE IMG_BOOL DevmemDeviceCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_MEMALLOCFLAGS_T ulFlags) +{ + IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags); + IMG_BOOL bRet = IMG_FALSE; + + PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags)); + + if (ui32DeviceCacheMode == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) + { + bRet = PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig); + } + return bRet; +} diff --git a/drivers/gpu/drm/phytium/octopus/devicemem_typedefs.h b/drivers/gpu/drm/phytium/octopus/devicemem_typedefs.h new file mode 100644 index 000000000000..3ebabacd4d0a --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/devicemem_typedefs.h @@ -0,0 +1,142 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Client side part of device memory management -- this file + is forked from new_devmem_allocation.h as this one has to + reside in the top level include so that client code is able + to make use of the typedefs. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DEVICEMEM_TYPEDEFS_H +#define DEVICEMEM_TYPEDEFS_H + +#include +#include "img_types.h" +#include "pvrsrv_memallocflags.h" + +typedef struct DEVMEM_CONTEXT_TAG DEVMEM_CONTEXT; /*!< Convenience typedef for struct DEVMEM_CONTEXT_TAG */ +typedef struct DEVMEM_HEAP_TAG DEVMEM_HEAP; /*!< Convenience typedef for struct DEVMEM_HEAP_TAG */ +typedef struct DEVMEM_MEMDESC_TAG DEVMEM_MEMDESC; /*!< Convenience typedef for struct DEVMEM_MEMDESC_TAG */ +typedef struct DEVMEM_PAGELIST_TAG DEVMEM_PAGELIST; /*!< Convenience typedef for struct DEVMEM_PAGELIST_TAG */ + +typedef IMG_HANDLE DEVMEM_EXPORTHANDLE; /*!< Typedef for DeviceMem Export Handle */ +typedef IMG_UINT64 DEVMEM_EXPORTKEY; /*!< Typedef for DeviceMem Export Key */ +typedef IMG_DEVMEM_SIZE_T DEVMEM_SIZE_T; /*!< Typedef for DeviceMem SIZE_T */ +typedef IMG_DEVMEM_LOG2ALIGN_T DEVMEM_LOG2ALIGN_T; /*!< Typedef for DeviceMem LOG2 Alignment */ + +typedef struct DEVMEMX_PHYS_MEMDESC_TAG DEVMEMX_PHYSDESC; /*!< Convenience typedef for DevmemX physical */ +typedef struct DEVMEMX_VIRT_MEMDESC_TAG DEVMEMX_VIRTDESC; /*!< Convenience typedef for DevmemX virtual */ + +/*! calling code needs all the info in this struct, to be able to pass it around */ +typedef struct +{ + /*! A handle to the PMR. */ + IMG_HANDLE hPMRExportHandle; + /*! The "key" to prove we have authorisation to use this PMR */ + IMG_UINT64 uiPMRExportPassword; + /*! Size and alignment properties for this PMR. Note, these + numbers are not trusted in kernel, but we need to cache them + client-side in order to allocate from the VM arena. The kernel + will know the actual alignment and size of the PMR and thus + would prevent client code from breaching security here. Ditto + for physmem granularity (aka page size) if this is different + from alignment */ + IMG_DEVMEM_SIZE_T uiSize; + /*! We call this "contiguity guarantee" to be more precise than + calling it "alignment" or "page size", terms which may seem + similar but have different emphasis. The number reported here + is the minimum contiguity guarantee from the creator of the + PMR. Now, there is no requirement to allocate that coarsely + from the RA. The alignment given to the RA simply needs to be + at least as coarse as the device page size for the heap we + ultimately intend to map into. What is important is that the + device MMU data page size is not greater than the minimum + contiguity guarantee from the PMR. This value is reported to + the client in order that it can choose to make early checks and + perhaps decide which heap (in a variable page size scenario) it + would be safe to map this PMR into. For convenience, the + client may choose to use this argument as the alignment of the + virtual range he chooses to allocate, but this is _not_ + necessary and in many cases would be able to get away with a + finer alignment, should the heap into which this PMR will be + mapped support it. */ + IMG_DEVMEM_LOG2ALIGN_T uiLog2ContiguityGuarantee; +} DEVMEM_EXPORTCOOKIE; + +/* Enum that describes the operation associated with changing sparse memory */ +typedef enum Resize { + SPARSE_RESIZE_NONE = 0, + + /* This should be set to indicate the change needs allocation */ + SPARSE_RESIZE_ALLOC = 1, + + /* This should be set to indicate the change needs free */ + SPARSE_RESIZE_FREE = 2, + + SPARSE_RESIZE_BOTH = ((IMG_UINT8)SPARSE_RESIZE_ALLOC | (IMG_UINT8)SPARSE_RESIZE_FREE), + + /* This should be set to silently swap underlying physical memory + * without disturbing its device or cpu virtual maps. + * This flag is not supported in the case of PDUMP and could lead to + * PDUMP panic when used. + */ + SPARSE_REMAP_MEM = 4, + + /* Should be set to get the sparse changes appear in cpu virtual map */ + SPARSE_MAP_CPU_ADDR = 8 +}SPARSE_MEM_RESIZE_FLAGS; + +/* To be used with all the sparse allocations that gets mapped to CPU Virtual + * space. The sparse allocation CPU mapping is torn down and re-mapped every + * time the sparse allocation layout changes. + */ +#define PVRSRV_UNMAP_ON_SPARSE_CHANGE 1 + +/* To use with DevmemSubAllocate() as the default factor if no over-allocation + * is desired. + */ +#define DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER (1U) + +/* Defines the max length for PMR, MemDesc, Device memory History and RI debug + * annotations stored in memory, including the null terminator. + */ +#define DEVMEM_ANNOTATION_MAX_LEN (PVR_ANNOTATION_MAX_LEN + 1) + +#endif /* #ifndef DEVICEMEM_TYPEDEFS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/devicemem_utils.c b/drivers/gpu/drm/phytium/octopus/devicemem_utils.c new file mode 100644 index 000000000000..d79692d31252 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/devicemem_utils.c @@ -0,0 +1,1174 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management internal utility functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Utility functions used internally by device memory management + code. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +#include "allocmem.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "ra.h" +#include "devicemem_utils.h" +#include "client_mm_bridge.h" +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +#include "client_ri_bridge.h" +#if defined(__KERNEL__) +#include "pvrsrv.h" +#else +#include "pvr_bridge_client.h" +#endif +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "proc_stats.h" +#endif + +/* + SVM heap management support functions for CPU (un)mapping + */ +#define DEVMEM_MAP_SVM_USER_MANAGED_RETRY 2 + +static inline PVRSRV_ERROR +DevmemCPUMapSVMKernelManaged(DEVMEM_HEAP *psHeap, + DEVMEM_IMPORT *psImport, + IMG_UINT64 *ui64MapAddress) +{ + PVRSRV_ERROR eError; + IMG_UINT64 ui64SvmMapAddr; + IMG_UINT64 ui64SvmMapAddrEnd; + IMG_UINT64 ui64SvmHeapAddrEnd; + + /* SVM heap management always has XXX_MANAGER_KERNEL unless we + have triggered the fall back code-path in which case we + should not be calling into this code-path */ + PVR_ASSERT(psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_KERNEL); + + /* By acquiring the CPU virtual address here, it essentially + means we lock-down the virtual address for the duration + of the life-cycle of the allocation until a de-allocation + request comes in. Thus the allocation is guaranteed not to + change its virtual address on the CPU during its life-time. + NOTE: Import might have already been CPU Mapped before now, + normally this is not a problem, see fall back */ + eError = DevmemImportStructCPUMap(psImport); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "DevmemImportStructCPUMap"); + eError = PVRSRV_ERROR_DEVICEMEM_MAP_FAILED; + goto failSVM; + } + + /* Supplied kernel mmap virtual address is also device virtual address; + calculate the heap & kernel supplied mmap virtual address limits */ + ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr; + ui64SvmHeapAddrEnd = psHeap->sBaseAddress.uiAddr + psHeap->uiSize; + ui64SvmMapAddrEnd = ui64SvmMapAddr + psImport->uiSize; + PVR_ASSERT(ui64SvmMapAddr != (IMG_UINT64)0); + + /* SVM limit test may fail if processor has more virtual address bits than device */ + if ((ui64SvmMapAddr >= ui64SvmHeapAddrEnd || ui64SvmMapAddrEnd > ui64SvmHeapAddrEnd) || + (ui64SvmMapAddr & ~(ui64SvmHeapAddrEnd - 1))) + { + /* Unmap incompatible SVM virtual address, this + may not release address if it was elsewhere + CPU Mapped before call into this function */ + DevmemImportStructCPUUnmap(psImport); + + /* Flag incompatible SVM mapping */ + eError = PVRSRV_ERROR_BAD_MAPPING; + goto failSVM; + } + + *ui64MapAddress = ui64SvmMapAddr; +failSVM: + /* either OK, MAP_FAILED or BAD_MAPPING */ + return eError; +} + +static inline void +DevmemCPUUnmapSVMKernelManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport) +{ + PVR_UNREFERENCED_PARAMETER(psHeap); + DevmemImportStructCPUUnmap(psImport); +} + +static inline PVRSRV_ERROR +DevmemCPUMapSVMUserManaged(DEVMEM_HEAP *psHeap, + DEVMEM_IMPORT *psImport, + IMG_UINT uiAlign, + IMG_UINT64 *ui64MapAddress) +{ + RA_LENGTH_T uiAllocatedSize; + RA_BASE_T uiAllocatedAddr; + IMG_UINT64 ui64SvmMapAddr; + IMG_UINT uiRetry = 0; + PVRSRV_ERROR eError; + + /* If SVM heap management has transitioned to XXX_MANAGER_USER, + this is essentially a fall back approach that ensures we + continue to satisfy SVM alloc. This approach is not without + hazards in that we may specify a virtual address that is + already in use by the user process */ + PVR_ASSERT(psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER); + + /* Normally, for SVM heap allocations, CPUMap _must_ be done + before DevMap; ideally the initial CPUMap should be done by + SVM functions though this is not a hard requirement as long + as the prior elsewhere obtained CPUMap virtual address meets + SVM address requirements. This is a fall-back code-pathway + so we have to test that this assumption holds before we + progress any further */ + OSLockAcquire(psImport->sCPUImport.hLock); + + if (psImport->sCPUImport.ui32RefCount) + { + /* Already CPU Mapped SVM heap allocation, this prior elsewhere + obtained virtual address is responsible for the above + XXX_MANAGER_KERNEL failure. As we are not responsible for + this, we cannot progress any further so need to fail */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Previously obtained CPU map address not SVM compatible" + , __func__)); + + /* Revert SVM heap to DEVMEM_HEAP_MANAGER_KERNEL */ + psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_KERNEL; + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Reverting SVM heap back to kernel managed", + __func__)); + + OSLockRelease(psImport->sCPUImport.hLock); + + /* Do we need a more specific error code here */ + eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED; + goto failSVM; + } + + OSLockRelease(psImport->sCPUImport.hLock); + + do + { + /* Next we proceed to instruct the kernel to use the RA_Alloc supplied + virtual address to map-in this SVM import suballocation; there is no + guarantee that this RA_Alloc virtual address may not collide with an + already in-use VMA range in the process */ + eError = RA_Alloc(psHeap->psQuantizedVMRA, + psImport->uiSize, + RA_NO_IMPORT_MULTIPLIER, + 0, /* flags: this RA doesn't use flags*/ + uiAlign, + "SVM_Virtual_Alloc", + &uiAllocatedAddr, + &uiAllocatedSize, + NULL /* don't care about per-import priv data */); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "RA_Alloc"); +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + if (eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) + { + PVRSRV_ERROR eErr; + eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection), + PVRSRV_PROCESS_STAT_TYPE_OOM_VIRTMEM_COUNT, + OSGetCurrentProcessID()); + PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats"); + } +#endif + goto failSVM; + } + + /* No reason for allocated virtual size to be different from + the PMR's size */ + psImport->sCPUImport.pvCPUVAddr = (void*)(uintptr_t)uiAllocatedAddr; + PVR_ASSERT(uiAllocatedSize == psImport->uiSize); + + /* Map the import or allocation using the RA_Alloc virtual address; + the kernel may fail the request if the supplied virtual address + is already in-use in which case we re-try using another virtual + address obtained from the RA_Alloc */ + eError = DevmemImportStructCPUMap(psImport); + if (eError != PVRSRV_OK) + { + /* For now we simply discard failed RA_Alloc() obtained virtual + address (i.e. plenty of virtual space), this prevents us from + re-using these and furthermore essentially blacklists these + addresses from future SVM consideration; We exit fall-back + attempt if retry exceeds the fall-back retry limit */ + if (uiRetry++ > DEVMEM_MAP_SVM_USER_MANAGED_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Cannot find SVM compatible address, bad mapping", + __func__)); + eError = PVRSRV_ERROR_BAD_MAPPING; + goto failSVM; + } + } + else + { + /* Found compatible SVM virtual address, set as device virtual address */ + ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr; + } + } while (eError != PVRSRV_OK); + + *ui64MapAddress = ui64SvmMapAddr; +failSVM: + return eError; +} + +static inline void +DevmemCPUUnmapSVMUserManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport) +{ + RA_BASE_T uiAllocatedAddr; + + /* We only free SVM compatible addresses, all addresses in + the blacklist are essentially excluded from future RA_Alloc */ + uiAllocatedAddr = psImport->sDeviceImport.sDevVAddr.uiAddr; + RA_Free(psHeap->psQuantizedVMRA, uiAllocatedAddr); + + DevmemImportStructCPUUnmap(psImport); +} + +static inline PVRSRV_ERROR +DevmemImportStructDevMapSVM(DEVMEM_HEAP *psHeap, + DEVMEM_IMPORT *psImport, + IMG_UINT uiAlign, + IMG_UINT64 *ui64MapAddress) +{ + PVRSRV_ERROR eError; + + switch (psHeap->ui32HeapManagerFlags) + { + case DEVMEM_HEAP_MANAGER_KERNEL: + eError = DevmemCPUMapSVMKernelManaged(psHeap, + psImport, + ui64MapAddress); + if (eError == PVRSRV_ERROR_BAD_MAPPING) + { + /* If the SVM map address is outside of SVM heap limits, + change heap type to DEVMEM_HEAP_MANAGER_USER */ + psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_USER; + + PVR_DPF((PVR_DBG_WARNING, + "%s: Kernel managed SVM heap is now user managed", + __func__)); + + /* Retry using user managed fall-back approach */ + eError = DevmemCPUMapSVMUserManaged(psHeap, + psImport, + uiAlign, + ui64MapAddress); + } + break; + + case DEVMEM_HEAP_MANAGER_USER: + eError = DevmemCPUMapSVMUserManaged(psHeap, + psImport, + uiAlign, + ui64MapAddress); + break; + + default: + eError = PVRSRV_ERROR_INVALID_PARAMS; + break; + } + + return eError; +} + +static inline void +DevmemImportStructDevUnmapSVM(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport) +{ + switch (psHeap->ui32HeapManagerFlags) + { + case DEVMEM_HEAP_MANAGER_KERNEL: + DevmemCPUUnmapSVMKernelManaged(psHeap, psImport); + break; + + case DEVMEM_HEAP_MANAGER_USER: + DevmemCPUUnmapSVMUserManaged(psHeap, psImport); + break; + + default: + break; + } +} + +/* + The Devmem import structure is the structure we use + to manage memory that is "imported" (which is page + granular) from the server into our process, this + includes allocations. + + This allows memory to be imported without requiring + any CPU or device mapping. Memory can then be mapped + into the device or CPU on demand, but neither is + required. + */ + +IMG_INTERNAL +void DevmemImportStructAcquire(DEVMEM_IMPORT *psImport) +{ + IMG_INT iRefCount = OSAtomicIncrement(&psImport->hRefCount); + PVR_UNREFERENCED_PARAMETER(iRefCount); + PVR_ASSERT(iRefCount != 1); + + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psImport, + iRefCount-1, + iRefCount); +} + +IMG_INTERNAL +IMG_BOOL DevmemImportStructRelease(DEVMEM_IMPORT *psImport) +{ + IMG_INT iRefCount = OSAtomicDecrement(&psImport->hRefCount); + PVR_ASSERT(iRefCount >= 0); + + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psImport, + iRefCount+1, + iRefCount); + + if (iRefCount == 0) + { + BridgePMRUnrefPMR(GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR); + OSLockDestroy(psImport->sCPUImport.hLock); + OSLockDestroy(psImport->sDeviceImport.hLock); + OSLockDestroy(psImport->hLock); + OSFreeMem(psImport); + + return IMG_TRUE; + } + + return IMG_FALSE; +} + +IMG_INTERNAL +void DevmemImportDiscard(DEVMEM_IMPORT *psImport) +{ + PVR_ASSERT(OSAtomicRead(&psImport->hRefCount) == 0); + OSLockDestroy(psImport->sCPUImport.hLock); + OSLockDestroy(psImport->sDeviceImport.hLock); + OSLockDestroy(psImport->hLock); + OSFreeMem(psImport); +} + +IMG_INTERNAL +PVRSRV_ERROR DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc) +{ + DEVMEM_MEMDESC *psMemDesc; + PVRSRV_ERROR eError; + + /* Must be zeroed in case it needs to be freed before it is initialised */ + psMemDesc = OSAllocZMem(sizeof(DEVMEM_MEMDESC)); + PVR_GOTO_IF_NOMEM(psMemDesc, eError, failAlloc); + + eError = OSLockCreate(&psMemDesc->hLock); + PVR_GOTO_IF_ERROR(eError, failMDLock); + + eError = OSLockCreate(&psMemDesc->sDeviceMemDesc.hLock); + PVR_GOTO_IF_ERROR(eError, failDMDLock); + + eError = OSLockCreate(&psMemDesc->sCPUMemDesc.hLock); + PVR_GOTO_IF_ERROR(eError, failCMDLock); + + OSAtomicWrite(&psMemDesc->hRefCount, 0); + + *ppsMemDesc = psMemDesc; + + return PVRSRV_OK; + +failCMDLock: + OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock); +failDMDLock: + OSLockDestroy(psMemDesc->hLock); +failMDLock: + OSFreeMem(psMemDesc); +failAlloc: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +/* + Init the MemDesc structure + */ +IMG_INTERNAL +void DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + DEVMEM_IMPORT *psImport, + IMG_DEVMEM_SIZE_T uiSize) +{ + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + 0, + 1); + + psMemDesc->psImport = psImport; + psMemDesc->uiOffset = uiOffset; + + psMemDesc->sDeviceMemDesc.ui32RefCount = 0; + psMemDesc->sCPUMemDesc.ui32RefCount = 0; + psMemDesc->uiAllocSize = uiSize; + psMemDesc->hPrivData = NULL; + psMemDesc->ui32AllocationIndex = DEVICEMEM_HISTORY_ALLOC_INDEX_NONE; + + OSAtomicWrite(&psMemDesc->hRefCount, 1); +} + +IMG_INTERNAL +void DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc) +{ + IMG_INT iRefCount = 0; + + iRefCount = OSAtomicIncrement(&psMemDesc->hRefCount); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + iRefCount-1, + iRefCount); + + PVR_UNREFERENCED_PARAMETER(iRefCount); +} + +IMG_INTERNAL +IMG_BOOL DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc) +{ + IMG_INT iRefCount; + PVR_ASSERT(psMemDesc != NULL); + + iRefCount = OSAtomicDecrement(&psMemDesc->hRefCount); + PVR_ASSERT(iRefCount >= 0); + + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psMemDesc, + iRefCount+1, + iRefCount); + + if (iRefCount == 0) + { +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI) && + (psMemDesc->hRIHandle)) + { + PVRSRV_ERROR eError; + + eError = BridgeRIDeleteMEMDESCEntry(GetBridgeHandle(psMemDesc->psImport->hDevConnection), + psMemDesc->hRIHandle); + PVR_LOG_IF_ERROR(eError, "BridgeRIDeleteMEMDESCEntry"); + } +#endif + + OSLockAcquire(psMemDesc->psImport->hLock); + if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_SUBALLOCATABLE) + { + /* As soon as the first sub-allocation on the psImport is freed + * we might get dirty memory when reusing it. + * We have to delete the ZEROED, CLEAN & POISONED flag */ + + psMemDesc->psImport->uiProperties &= + ~(DEVMEM_PROPERTIES_IMPORT_IS_ZEROED | + DEVMEM_PROPERTIES_IMPORT_IS_CLEAN | + DEVMEM_PROPERTIES_IMPORT_IS_POISONED); + + OSLockRelease(psMemDesc->psImport->hLock); + + RA_Free(psMemDesc->psImport->sDeviceImport.psHeap->psSubAllocRA, + psMemDesc->psImport->sDeviceImport.sDevVAddr.uiAddr + + psMemDesc->uiOffset); + } + else + { + OSLockRelease(psMemDesc->psImport->hLock); + DevmemImportStructRelease(psMemDesc->psImport); + } + + OSLockDestroy(psMemDesc->sCPUMemDesc.hLock); + OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock); + OSLockDestroy(psMemDesc->hLock); + OSFreeMem(psMemDesc); + + return IMG_TRUE; + } + + return IMG_FALSE; +} + +IMG_INTERNAL +void DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc) +{ + PVR_ASSERT(OSAtomicRead(&psMemDesc->hRefCount) == 0); + + OSLockDestroy(psMemDesc->sCPUMemDesc.hLock); + OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock); + OSLockDestroy(psMemDesc->hLock); + OSFreeMem(psMemDesc); +} + + +IMG_INTERNAL +PVRSRV_ERROR DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + PVRSRV_MEMALLOCFLAGS_T *puiFlags) +{ + if ((*puiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) && + (*puiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Zero on Alloc and Poison on Alloc are mutually exclusive.", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (uiAlign & (uiAlign-1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: The requested alignment is not a power of two.", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (uiSize == 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Please request a non-zero size value.", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* If zero flag is set we have to have write access to the page. */ + if (PVRSRV_CHECK_ZERO_ON_ALLOC(*puiFlags) || PVRSRV_CHECK_CPU_WRITEABLE(*puiFlags)) + { + (*puiFlags) |= PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE; + } + + return PVRSRV_OK; +} + +/* + Allocate and init an import structure + */ +IMG_INTERNAL +PVRSRV_ERROR DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection, + DEVMEM_IMPORT **ppsImport) +{ + DEVMEM_IMPORT *psImport; + PVRSRV_ERROR eError; + + psImport = OSAllocMem(sizeof(*psImport)); + PVR_RETURN_IF_FALSE(psImport != NULL, PVRSRV_ERROR_OUT_OF_MEMORY); + + /* Setup some known bad values for things we don't have yet */ + psImport->sDeviceImport.hReservation = LACK_OF_RESERVATION_POISON; + psImport->sDeviceImport.hMapping = LACK_OF_MAPPING_POISON; + psImport->sDeviceImport.psHeap = NULL; + psImport->sDeviceImport.bMapped = IMG_FALSE; + + eError = OSLockCreate(&psImport->sDeviceImport.hLock); + PVR_GOTO_IF_ERROR(eError, failDIOSLockCreate); + + psImport->sCPUImport.hOSMMapData = NULL; + psImport->sCPUImport.pvCPUVAddr = NULL; + + eError = OSLockCreate(&psImport->sCPUImport.hLock); + PVR_GOTO_IF_ERROR(eError, failCIOSLockCreate); + + /* Set up common elements */ + psImport->hDevConnection = hDevConnection; + + /* Setup properties */ + psImport->uiProperties = 0; + + /* Setup refcounts */ + psImport->sDeviceImport.ui32RefCount = 0; + psImport->sCPUImport.ui32RefCount = 0; + OSAtomicWrite(&psImport->hRefCount, 0); + + /* Create the lock */ + eError = OSLockCreate(&psImport->hLock); + PVR_GOTO_IF_ERROR(eError, failILockAlloc); + + *ppsImport = psImport; + + return PVRSRV_OK; + +failILockAlloc: + OSLockDestroy(psImport->sCPUImport.hLock); +failCIOSLockCreate: + OSLockDestroy(psImport->sDeviceImport.hLock); +failDIOSLockCreate: + OSFreeMem(psImport); + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +/* + Initialise the import structure + */ +IMG_INTERNAL +void DevmemImportStructInit(DEVMEM_IMPORT *psImport, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_HANDLE hPMR, + DEVMEM_PROPERTIES_T uiProperties) +{ + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psImport, + 0, + 1); + + psImport->uiSize = uiSize; + psImport->uiAlign = uiAlign; + psImport->uiFlags = uiFlags; + psImport->hPMR = hPMR; + psImport->uiProperties = uiProperties; + OSAtomicWrite(&psImport->hRefCount, 1); +} + +/* Allocate the requested device virtual address region + * from the heap */ +static PVRSRV_ERROR DevmemReserveVARange(DEVMEM_HEAP *psHeap, + DEVMEM_SIZE_T uiSize, + IMG_UINT uiAlign, + RA_LENGTH_T *puiAllocatedSize, + IMG_UINT64 ui64OptionalMapAddress) +{ + PVRSRV_ERROR eError; + + /* Allocate space in the VM */ + eError = RA_Alloc_Range(psHeap->psQuantizedVMRA, + uiSize, + 0, + uiAlign, + ui64OptionalMapAddress, + puiAllocatedSize); + + if (PVRSRV_OK != eError) + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + if ((eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) || + (eError == PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL)) + { + PVRSRV_ERROR eErr; + eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection), + PVRSRV_PROCESS_STAT_TYPE_INVALID_VIRTMEM, + OSGetCurrentProcessID()); + PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats"); + } +#endif + return eError; + } + + /* No reason for the allocated virtual size to be different from + the PMR's size */ + PVR_ASSERT(*puiAllocatedSize == uiSize); + + return PVRSRV_OK; +} + +/* + Map an import to the device + */ +IMG_INTERNAL +PVRSRV_ERROR DevmemImportStructDevMap(DEVMEM_HEAP *psHeap, + IMG_BOOL bMap, + DEVMEM_IMPORT *psImport, + IMG_UINT64 ui64OptionalMapAddress) +{ + DEVMEM_DEVICE_IMPORT *psDeviceImport; + RA_BASE_T uiAllocatedAddr; + RA_LENGTH_T uiAllocatedSize; + IMG_DEV_VIRTADDR sBase; + IMG_HANDLE hReservation; + PVRSRV_ERROR eError; + IMG_UINT uiAlign; + IMG_BOOL bDestroyed = IMG_FALSE; + + /* Round the provided import alignment to the configured heap alignment */ + uiAlign = 1ULL << psHeap->uiLog2ImportAlignment; + uiAlign = (psImport->uiAlign + uiAlign - 1) & ~(uiAlign-1); + + psDeviceImport = &psImport->sDeviceImport; + + OSLockAcquire(psDeviceImport->hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psImport, + psDeviceImport->ui32RefCount, + psDeviceImport->ui32RefCount+1); + + if (psDeviceImport->ui32RefCount++ == 0) + { + DevmemImportStructAcquire(psImport); + + OSAtomicIncrement(&psHeap->hImportCount); + + if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags)) + { + /* SVM (shared virtual memory) imports or allocations always + need to acquire CPU virtual address first as address is + used to map the allocation into the device virtual address + space; i.e. the virtual address of the allocation for both + the CPU/GPU must be identical. */ + eError = DevmemImportStructDevMapSVM(psHeap, + psImport, + uiAlign, + &ui64OptionalMapAddress); + PVR_GOTO_IF_ERROR(eError, failVMRAAlloc); + } + + if (ui64OptionalMapAddress == 0) + { + /* If heap is _completely_ managed by USER or KERNEL, we shouldn't + * be here, as this is RA manager code-path */ + if (psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER || + psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_KERNEL) + { + PVR_DPF((PVR_DBG_ERROR, + psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER ? + "%s: Heap is user managed, please use PVRSRVMapToDeviceAddress().": + "%s: Heap is kernel managed, use right allocation flags (e.g. SVM).", + __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc); + } + + if (psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_UNKNOWN) + { + /* Only set the heap manager (to RA) at first map when heap manager + * is unknown. It might be a dual heap (both, user and RA managed), + * in which case heap manager is set at creation time */ + psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_RA; + } + + /* Allocate space in the VM */ + eError = RA_Alloc(psHeap->psQuantizedVMRA, + psImport->uiSize, + RA_NO_IMPORT_MULTIPLIER, + 0, /* flags: this RA doesn't use flags*/ + uiAlign, + "Virtual_Alloc", + &uiAllocatedAddr, + &uiAllocatedSize, + NULL /* don't care about per-import priv data */ + ); + if (PVRSRV_OK != eError) + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + if (eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) + { + PVRSRV_ERROR eErr; + eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection), + PVRSRV_PROCESS_STAT_TYPE_OOM_VIRTMEM_COUNT, + OSGetCurrentProcessID()); + PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats"); + } +#endif + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM, failVMRAAlloc); + } + + /* No reason for the allocated virtual size to be different from + the PMR's size */ + PVR_ASSERT(uiAllocatedSize == psImport->uiSize); + + sBase.uiAddr = uiAllocatedAddr; + + } + else + { + IMG_UINT64 ui64ValidEndAddr; + + /* Ensure supplied ui64OptionalMapAddress is within heap range */ + ui64ValidEndAddr = psHeap->sBaseAddress.uiAddr + psHeap->uiSize; + if ((ui64OptionalMapAddress + psImport->uiSize > ui64ValidEndAddr) || + (ui64OptionalMapAddress < psHeap->sBaseAddress.uiAddr)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: ui64OptionalMapAddress %p is outside of heap limits <%p:%p>." + , __func__ + , (void*)(uintptr_t)ui64OptionalMapAddress + , (void*)(uintptr_t)psHeap->sBaseAddress.uiAddr + , (void*)(uintptr_t)ui64ValidEndAddr)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc); + } + + switch (psHeap->ui32HeapManagerFlags) + { + case DEVMEM_HEAP_MANAGER_UNKNOWN: + /* DEVMEM_HEAP_MANAGER_USER can apply to _any_ heap and can only + * be determined here. This heap type transitions from + * DEVMEM_HEAP_MANAGER_UNKNOWN to DEVMEM_HEAP_MANAGER_USER on + * 1st alloc. */ + psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_USER; + break; + + case DEVMEM_HEAP_MANAGER_USER: + case DEVMEM_HEAP_MANAGER_KERNEL: + if (! psHeap->uiSize) + { + PVR_DPF((PVR_DBG_ERROR, + psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER ? + "%s: Heap DEVMEM_HEAP_MANAGER_USER is disabled.": + "%s: Heap DEVMEM_HEAP_MANAGER_KERNEL is disabled." + , __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_HEAP, failVMRAAlloc); + } + break; + + case DEVMEM_HEAP_MANAGER_DUAL_USER_RA: + /* When the heap is dual managed, ensure supplied ui64OptionalMapAddress + * and import size are within heap address space range */ + if (ui64OptionalMapAddress + psImport->uiSize <= + psHeap->sBaseAddress.uiAddr + psHeap->uiReservedRegionSize) + { + break; + } + else + { + /* Allocate requested VM range */ + eError = DevmemReserveVARange(psHeap, + psImport->uiSize, + uiAlign, + &uiAllocatedSize, + ui64OptionalMapAddress); + if (eError != PVRSRV_OK) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_VA_ALLOC_FAILED, failVMRAAlloc); + } + + } + break; + case DEVMEM_HEAP_MANAGER_RA: + /* Allocate requested VM range */ + eError = DevmemReserveVARange(psHeap, + psImport->uiSize, + uiAlign, + &uiAllocatedSize, + ui64OptionalMapAddress); + if (eError != PVRSRV_OK) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_VA_ALLOC_FAILED, failVMRAAlloc); + } + break; + + default: + break; + } + + if (ui64OptionalMapAddress & ((1 << psHeap->uiLog2Quantum) - 1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid address to map to. Please provide an " + "address aligned to a page multiple of the heap." + , __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc); + } + + if (psImport->uiSize & ((1 << psHeap->uiLog2Quantum) - 1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid heap to map to. " + "Please choose a heap that can handle smaller page sizes." + , __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc); + } + + uiAllocatedAddr = ui64OptionalMapAddress; + uiAllocatedSize = psImport->uiSize; + sBase.uiAddr = uiAllocatedAddr; + } + + if (psHeap->bPremapped) + { + /* no virtual address reservation and mapping are required for memory that's already mapped */ + psDeviceImport->hReservation = LACK_OF_RESERVATION_POISON; + psDeviceImport->hMapping = LACK_OF_MAPPING_POISON; + } + else + { + /* Setup page tables for the allocated VM space */ + eError = BridgeDevmemIntReserveRange(GetBridgeHandle(psHeap->psCtx->hDevConnection), + psHeap->hDevMemServerHeap, + sBase, + uiAllocatedSize, + &hReservation); + PVR_GOTO_IF_ERROR(eError, failReserve); + + if (bMap) + { + PVRSRV_MEMALLOCFLAGS_T uiMapFlags; + + uiMapFlags = psImport->uiFlags & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK; + + /* Actually map the PMR to allocated VM space */ + eError = BridgeDevmemIntMapPMR(GetBridgeHandle(psHeap->psCtx->hDevConnection), + psHeap->hDevMemServerHeap, + hReservation, + psImport->hPMR, + uiMapFlags, + &psDeviceImport->hMapping); + PVR_GOTO_IF_ERROR(eError, failMap); + + psDeviceImport->bMapped = IMG_TRUE; + } + + psDeviceImport->hReservation = hReservation; + } + + /* Setup device mapping specific parts of the mapping info */ + psDeviceImport->sDevVAddr.uiAddr = uiAllocatedAddr; + psDeviceImport->psHeap = psHeap; + } + else + { + /* + Check that we've been asked to map it into the + same heap 2nd time around + */ + if (psHeap != psDeviceImport->psHeap) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_HEAP, failParams); + } + } + OSLockRelease(psDeviceImport->hLock); + + return PVRSRV_OK; + +failMap: + if (!psHeap->bPremapped) + { + BridgeDevmemIntUnreserveRange(GetBridgeHandle(psHeap->psCtx->hDevConnection), + hReservation); + } +failReserve: + if (ui64OptionalMapAddress == 0) + { + RA_Free(psHeap->psQuantizedVMRA, + uiAllocatedAddr); + } +failVMRAAlloc: + if ((ui64OptionalMapAddress) && PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags)) + { + DevmemImportStructDevUnmapSVM(psHeap, psImport); + } + bDestroyed = DevmemImportStructRelease(psImport); + OSAtomicDecrement(&psHeap->hImportCount); +failParams: + if (!bDestroyed) + { + psDeviceImport->ui32RefCount--; + OSLockRelease(psDeviceImport->hLock); + } + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/* + Unmap an import from the Device + */ +IMG_INTERNAL +IMG_BOOL DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport) +{ + PVRSRV_ERROR eError; + DEVMEM_DEVICE_IMPORT *psDeviceImport; + + psDeviceImport = &psImport->sDeviceImport; + + OSLockAcquire(psDeviceImport->hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psImport, + psDeviceImport->ui32RefCount, + psDeviceImport->ui32RefCount-1); + + if (--psDeviceImport->ui32RefCount == 0) + { + DEVMEM_HEAP *psHeap = psDeviceImport->psHeap; + + if (!psHeap->bPremapped) + { + if (psDeviceImport->bMapped) + { + eError = BridgeDevmemIntUnmapPMR(GetBridgeHandle(psImport->hDevConnection), + psDeviceImport->hMapping); + PVR_ASSERT(eError == PVRSRV_OK); + } + + eError = BridgeDevmemIntUnreserveRange(GetBridgeHandle(psImport->hDevConnection), + psDeviceImport->hReservation); + PVR_ASSERT(eError == PVRSRV_OK); + } + + psDeviceImport->bMapped = IMG_FALSE; + psDeviceImport->hMapping = LACK_OF_MAPPING_POISON; + psDeviceImport->hReservation = LACK_OF_RESERVATION_POISON; + + /* DEVMEM_HEAP_MANAGER_RA can also come from a dual managed heap in which case, + we need to check if the allocated VA falls within RA managed range */ + if ((psHeap->ui32HeapManagerFlags & DEVMEM_HEAP_MANAGER_RA) && + psDeviceImport->sDevVAddr.uiAddr >= (psHeap->sBaseAddress.uiAddr + psHeap->uiReservedRegionSize) && + psDeviceImport->sDevVAddr.uiAddr < (psHeap->sBaseAddress.uiAddr + psHeap->uiSize)) + { + RA_Free(psHeap->psQuantizedVMRA, psDeviceImport->sDevVAddr.uiAddr); + } + + if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags)) + { + DevmemImportStructDevUnmapSVM(psHeap, psImport); + } + + OSLockRelease(psDeviceImport->hLock); + + DevmemImportStructRelease(psImport); + + OSAtomicDecrement(&psHeap->hImportCount); + + return IMG_TRUE; + } + else + { + OSLockRelease(psDeviceImport->hLock); + return IMG_FALSE; + } +} + +/* + Map an import into the CPU + */ +IMG_INTERNAL +PVRSRV_ERROR DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport) +{ + PVRSRV_ERROR eError; + DEVMEM_CPU_IMPORT *psCPUImport; + size_t uiMappingLength; + + psCPUImport = &psImport->sCPUImport; + + OSLockAcquire(psCPUImport->hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psImport, + psCPUImport->ui32RefCount, + psCPUImport->ui32RefCount+1); + + if (psCPUImport->ui32RefCount++ == 0) + { + DevmemImportStructAcquire(psImport); + + eError = OSMMapPMR(GetBridgeHandle(psImport->hDevConnection), + psImport->hPMR, + psImport->uiSize, + psImport->uiFlags, + &psCPUImport->hOSMMapData, + &psCPUImport->pvCPUVAddr, + &uiMappingLength); + PVR_GOTO_IF_ERROR(eError, failMap); + + /* MappingLength might be rounded up to page size */ + PVR_ASSERT(uiMappingLength >= psImport->uiSize); + } + OSLockRelease(psCPUImport->hLock); + + return PVRSRV_OK; + +failMap: + psCPUImport->ui32RefCount--; + if (!DevmemImportStructRelease(psImport)) + { + OSLockRelease(psCPUImport->hLock); + } + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/* + Unmap an import from the CPU + */ +IMG_INTERNAL +void DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport) +{ + DEVMEM_CPU_IMPORT *psCPUImport; + + psCPUImport = &psImport->sCPUImport; + + OSLockAcquire(psCPUImport->hLock); + DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", + __func__, + psImport, + psCPUImport->ui32RefCount, + psCPUImport->ui32RefCount-1); + + if (--psCPUImport->ui32RefCount == 0) + { + /* psImport->uiSize is a 64-bit quantity whereas the 5th + * argument to OSUnmapPMR is a 32-bit quantity on 32-bit systems + * hence a compiler warning of implicit cast and loss of data. + * Added explicit cast and assert to remove warning. + */ +#if defined(__linux__) && defined(__i386__) + PVR_ASSERT(psImport->uiSizehDevConnection), + psImport->hPMR, + psCPUImport->hOSMMapData, + psCPUImport->pvCPUVAddr, + (size_t)psImport->uiSize); + + psCPUImport->hOSMMapData = NULL; + psCPUImport->pvCPUVAddr = NULL; + + OSLockRelease(psCPUImport->hLock); + + DevmemImportStructRelease(psImport); + } + else + { + OSLockRelease(psCPUImport->hLock); + } +} diff --git a/drivers/gpu/drm/phytium/octopus/devicemem_utils.h b/drivers/gpu/drm/phytium/octopus/devicemem_utils.h new file mode 100644 index 000000000000..33cba9a460f4 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/devicemem_utils.h @@ -0,0 +1,522 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management internal utility functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Utility functions used internally by device memory management + code. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DEVICEMEM_UTILS_H +#define DEVICEMEM_UTILS_H + +#include "devicemem.h" +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvr_debug.h" +#include "allocmem.h" +#include "ra.h" +#include "osfunc.h" +#include "lock.h" +#include "osmmap.h" + +#define DEVMEM_HEAPNAME_MAXLENGTH 160 + +/* + * Reserved VA space of a heap must always be multiple of DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY, + * this check is validated in the DDK. Note this is only reserving "Virtual Address" space and + * physical allocations (and mappings thereon) should only be done as much as required (to avoid + * wastage). + * Granularity has been chosen to support the max possible practically used OS page size. + */ +#define DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY 0x10000 /* 64KB is MAX anticipated OS page size */ + +/* + * VA heap size should be at least OS page size. This check is validated in the DDK. + */ +#define DEVMEM_HEAP_MINIMUM_SIZE 0x10000 /* 64KB is MAX anticipated OS page size */ + +#if defined(DEVMEM_DEBUG) && defined(REFCOUNT_DEBUG) +#define DEVMEM_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_ERROR, __FILE__, __LINE__, fmt, __VA_ARGS__) +#else +#define DEVMEM_REFCOUNT_PRINT(fmt, ...) +#endif + +/* If we need a "hMapping" but we don't have a server-side mapping, we poison + * the entry with this value so that it's easily recognised in the debugger. + * Note that this is potentially a valid handle, but then so is NULL, which is + * no better, indeed worse, as it's not obvious in the debugger. The value + * doesn't matter. We _never_ use it (and because it's valid, we never assert + * it isn't this) but it's nice to have a value in the source code that we can + * grep for if things go wrong. + */ +#define LACK_OF_MAPPING_POISON ((IMG_HANDLE)0x6116dead) +#define LACK_OF_RESERVATION_POISON ((IMG_HANDLE)0x7117dead) + +#define DEVICEMEM_HISTORY_ALLOC_INDEX_NONE 0xFFFFFFFF + +struct DEVMEM_CONTEXT_TAG +{ + + SHARED_DEV_CONNECTION hDevConnection; + + /* Number of heaps that have been created in this context + * (regardless of whether they have allocations) + */ + IMG_UINT32 uiNumHeaps; + + /* Each "DEVMEM_CONTEXT" has a counterpart in the server, which + * is responsible for handling the mapping into device MMU. + * We have a handle to that here. + */ + IMG_HANDLE hDevMemServerContext; + + /* Number of automagically created heaps in this context, + * i.e. those that are born at context creation time from the + * chosen "heap config" or "blueprint" + */ + IMG_UINT32 uiAutoHeapCount; + + /* Pointer to array of such heaps */ + struct DEVMEM_HEAP_TAG **ppsAutoHeapArray; + + /* The cache line size for use when allocating memory, + * as it is not queryable on the client side + */ + IMG_UINT32 ui32CPUCacheLineSize; + + /* Private data handle for device specific data */ + IMG_HANDLE hPrivData; +}; + +/* Flags that record how a heaps virtual address space is managed. */ +#define DEVMEM_HEAP_MANAGER_UNKNOWN 0 +/* Heap VAs assigned by the client of Services APIs, heap's RA not used at all. */ +#define DEVMEM_HEAP_MANAGER_USER (1U << 0) +/* Heap VAs managed by the OSs kernel, VA from CPU mapping call used */ +#define DEVMEM_HEAP_MANAGER_KERNEL (1U << 1) +/* Heap VAs managed by the heap's own RA */ +#define DEVMEM_HEAP_MANAGER_RA (1U << 2) +/* Heap VAs managed jointly by Services and the client of Services. + * The reserved region of the heap is managed explicitly by the client of Services + * The non-reserved region of the heap is managed by the heap's own RA */ +#define DEVMEM_HEAP_MANAGER_DUAL_USER_RA (DEVMEM_HEAP_MANAGER_USER | DEVMEM_HEAP_MANAGER_RA) + +struct DEVMEM_HEAP_TAG +{ + /* Name of heap - for debug and lookup purposes. */ + IMG_CHAR *pszName; + + /* Number of live imports in the heap */ + ATOMIC_T hImportCount; + + /* Base address and size of heap, required by clients due to some + * requesters not being full range + */ + IMG_DEV_VIRTADDR sBaseAddress; + DEVMEM_SIZE_T uiSize; + + DEVMEM_SIZE_T uiReservedRegionSize; /* uiReservedRegionLength in DEVMEM_HEAP_BLUEPRINT */ + + /* The heap manager, describing if the space is managed by the user, an RA, + * kernel or combination */ + IMG_UINT32 ui32HeapManagerFlags; + + /* This RA is for managing sub-allocations within the imports (PMRs) + * within the heap's virtual space. RA only used in DevmemSubAllocate() + * to track sub-allocated buffers. + * + * Resource Span - a PMR import added when the RA calls the + * imp_alloc CB (SubAllocImportAlloc) which returns the + * PMR import and size (span length). + * Resource - an allocation/buffer i.e. a MemDesc. Resource size represents + * the size of the sub-allocation. + */ + RA_ARENA *psSubAllocRA; + IMG_CHAR *pszSubAllocRAName; + + /* The psQuantizedVMRA is for the coarse allocation (PMRs) of virtual + * space from the heap. + * + * Resource Span - the heap's VM space from base to base+length, + * only one is added at heap creation. + * Resource - a PMR import associated with the heap. Dynamic number + * as memory is allocated/freed from or mapped/unmapped to + * the heap. Resource size follows PMR logical size. + */ + RA_ARENA *psQuantizedVMRA; + IMG_CHAR *pszQuantizedVMRAName; + + /* We also need to store a copy of the quantum size in order to feed + * this down to the server. + */ + IMG_UINT32 uiLog2Quantum; + + /* Store a copy of the minimum import alignment */ + IMG_UINT32 uiLog2ImportAlignment; + + /* The parent memory context for this heap */ + struct DEVMEM_CONTEXT_TAG *psCtx; + + /* Lock to protect this structure */ + POS_LOCK hLock; + + /* Each "DEVMEM_HEAP" has a counterpart in the server, which is + * responsible for handling the mapping into device MMU. + * We have a handle to that here. + */ + IMG_HANDLE hDevMemServerHeap; + + /* This heap is fully allocated and premapped into the device address space. + * Used in virtualisation for firmware heaps of Guest and optionally Host drivers. */ + IMG_BOOL bPremapped; +}; + +typedef IMG_UINT32 DEVMEM_PROPERTIES_T; /*!< Typedef for Devicemem properties */ +#define DEVMEM_PROPERTIES_EXPORTABLE (1UL<<0) /*!< Is it exportable? */ +#define DEVMEM_PROPERTIES_IMPORTED (1UL<<1) /*!< Is it imported from another process? */ +#define DEVMEM_PROPERTIES_SUBALLOCATABLE (1UL<<2) /*!< Is it suballocatable? */ +#define DEVMEM_PROPERTIES_UNPINNED (1UL<<3) /*!< Is it currently pinned? */ +#define DEVMEM_PROPERTIES_IMPORT_IS_ZEROED (1UL<<4) /*!< Is the memory fully zeroed? */ +#define DEVMEM_PROPERTIES_IMPORT_IS_CLEAN (1UL<<5) /*!< Is the memory clean, i.e. not been used before? */ +#define DEVMEM_PROPERTIES_SECURE (1UL<<6) /*!< Is it a special secure buffer? No CPU maps allowed! */ +#define DEVMEM_PROPERTIES_IMPORT_IS_POISONED (1UL<<7) /*!< Is the memory fully poisoned? */ +#define DEVMEM_PROPERTIES_NO_CPU_MAPPING (1UL<<8) /* No CPU Mapping is allowed, RW attributes + are further derived from allocation memory flags */ +#define DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE (1UL<<9) /* No sparse resizing allowed, once a memory + layout is chosen, no change allowed later, + This includes pinning and unpinning */ + + +typedef struct DEVMEM_DEVICE_IMPORT_TAG +{ + DEVMEM_HEAP *psHeap; /*!< Heap this import is bound to */ + IMG_DEV_VIRTADDR sDevVAddr; /*!< Device virtual address of the import */ + IMG_UINT32 ui32RefCount; /*!< Refcount of the device virtual address */ + IMG_HANDLE hReservation; /*!< Device memory reservation handle */ + IMG_HANDLE hMapping; /*!< Device mapping handle */ + IMG_BOOL bMapped; /*!< This is import mapped? */ + POS_LOCK hLock; /*!< Lock to protect the device import */ +} DEVMEM_DEVICE_IMPORT; + +typedef struct DEVMEM_CPU_IMPORT_TAG +{ + void *pvCPUVAddr; /*!< CPU virtual address of the import */ + IMG_UINT32 ui32RefCount; /*!< Refcount of the CPU virtual address */ + IMG_HANDLE hOSMMapData; /*!< CPU mapping handle */ + POS_LOCK hLock; /*!< Lock to protect the CPU import */ +} DEVMEM_CPU_IMPORT; + +typedef struct DEVMEM_IMPORT_TAG +{ + SHARED_DEV_CONNECTION hDevConnection; + IMG_DEVMEM_ALIGN_T uiAlign; /*!< Alignment of the PMR */ + DEVMEM_SIZE_T uiSize; /*!< Size of import */ + ATOMIC_T hRefCount; /*!< Refcount for this import */ + DEVMEM_PROPERTIES_T uiProperties; /*!< Stores properties of an import like if + it is exportable, pinned or suballocatable */ + IMG_HANDLE hPMR; /*!< Handle to the PMR */ + PVRSRV_MEMALLOCFLAGS_T uiFlags; /*!< Flags for this import */ + POS_LOCK hLock; /*!< Lock to protect the import */ + + DEVMEM_DEVICE_IMPORT sDeviceImport; /*!< Device specifics of the import */ + DEVMEM_CPU_IMPORT sCPUImport; /*!< CPU specifics of the import */ +} DEVMEM_IMPORT; + +typedef struct DEVMEM_DEVICE_MEMDESC_TAG +{ + IMG_DEV_VIRTADDR sDevVAddr; /*!< Device virtual address of the allocation */ + IMG_UINT32 ui32RefCount; /*!< Refcount of the device virtual address */ + POS_LOCK hLock; /*!< Lock to protect device memdesc */ +} DEVMEM_DEVICE_MEMDESC; + +typedef struct DEVMEM_CPU_MEMDESC_TAG +{ + void *pvCPUVAddr; /*!< CPU virtual address of the import */ + IMG_UINT32 ui32RefCount; /*!< Refcount of the device CPU address */ + POS_LOCK hLock; /*!< Lock to protect CPU memdesc */ +} DEVMEM_CPU_MEMDESC; + +struct DEVMEM_MEMDESC_TAG +{ + DEVMEM_IMPORT *psImport; /*!< Import this memdesc is on */ + IMG_DEVMEM_OFFSET_T uiOffset; /*!< Offset into import where our allocation starts */ + IMG_DEVMEM_SIZE_T uiAllocSize; /*!< Size of the allocation */ + ATOMIC_T hRefCount; /*!< Refcount of the memdesc */ + POS_LOCK hLock; /*!< Lock to protect memdesc */ + IMG_HANDLE hPrivData; + + DEVMEM_DEVICE_MEMDESC sDeviceMemDesc; /*!< Device specifics of the memdesc */ + DEVMEM_CPU_MEMDESC sCPUMemDesc; /*!< CPU specifics of the memdesc */ + + IMG_CHAR szText[DEVMEM_ANNOTATION_MAX_LEN]; /*!< Annotation for this memdesc */ + + IMG_UINT32 ui32AllocationIndex; + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + IMG_HANDLE hRIHandle; /*!< Handle to RI information */ +#endif +}; + +/* The physical descriptor used to store handles and information of device + * physical allocations. + */ +struct DEVMEMX_PHYS_MEMDESC_TAG +{ + IMG_UINT32 uiNumPages; /*!< Number of pages that the import has*/ + IMG_UINT32 uiLog2PageSize; /*!< Page size */ + ATOMIC_T hRefCount; /*!< Refcount of the memdesc */ + PVRSRV_MEMALLOCFLAGS_T uiFlags; /*!< Flags for this import */ + IMG_HANDLE hPMR; /*!< Handle to the PMR */ + DEVMEM_CPU_IMPORT sCPUImport; /*!< CPU specifics of the memdesc */ + DEVMEM_BRIDGE_HANDLE hBridge; /*!< Bridge connection for the server */ + void *pvUserData; /*!< User data */ +}; + +/* The virtual descriptor used to store handles and information of a device + * virtual range and the mappings to it. + */ +struct DEVMEMX_VIRT_MEMDESC_TAG +{ + IMG_UINT32 uiNumPages; /*!< Number of pages that the import has*/ + PVRSRV_MEMALLOCFLAGS_T uiFlags; /*!< Flags for this import */ + DEVMEMX_PHYSDESC **apsPhysDescTable; /*!< Table to store links to physical descs */ + DEVMEM_DEVICE_IMPORT sDeviceImport; /*!< Device specifics of the memdesc */ + + IMG_CHAR szText[DEVMEM_ANNOTATION_MAX_LEN]; /*!< Annotation for this virt memdesc */ + IMG_UINT32 ui32AllocationIndex; /*!< To track mappings in this range */ + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + IMG_HANDLE hRIHandle; /*!< Handle to RI information */ +#endif +}; + +#define DEVICEMEM_UTILS_NO_ADDRESS 0 + +/****************************************************************************** +@Function DevmemValidateParams +@Description Check if flags are conflicting and if align is a size multiple. + +@Input uiSize Size of the import. +@Input uiAlign Alignment of the import. +@Input puiFlags Pointer to the flags for the import. +@return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + PVRSRV_MEMALLOCFLAGS_T *puiFlags); + +/****************************************************************************** +@Function DevmemImportStructAlloc +@Description Allocates memory for an import struct. Does not allocate a PMR! + Create locks for CPU and Devmem mappings. + +@Input hDevConnection Connection to use for calls from the import. +@Input ppsImport The import to allocate. +@return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection, + DEVMEM_IMPORT **ppsImport); + +/****************************************************************************** +@Function DevmemImportStructInit +@Description Initialises the import struct with the given parameters. + Set it's refcount to 1! + +@Input psImport The import to initialise. +@Input uiSize Size of the import. +@Input uiAlign Alignment of allocations in the import. +@Input uiMapFlags +@Input hPMR Reference to the PMR of this import struct. +@Input uiProperties Properties of the import. Is it exportable, + imported, suballocatable, unpinned? +******************************************************************************/ +void DevmemImportStructInit(DEVMEM_IMPORT *psImport, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + PVRSRV_MEMALLOCFLAGS_T uiMapFlags, + IMG_HANDLE hPMR, + DEVMEM_PROPERTIES_T uiProperties); + +/****************************************************************************** +@Function DevmemImportStructDevMap +@Description NEVER call after the last DevmemMemDescRelease() + Maps the PMR referenced by the import struct to the device's + virtual address space. + Does nothing but increase the cpu mapping refcount if the + import struct was already mapped. + +@Input psHeap The heap to map to. +@Input bMap Caller can choose if the import should be really + mapped in the page tables or if just a virtual range + should be reserved and the refcounts increased. +@Input psImport The import we want to map. +@Input uiOptionalMapAddress An optional address to map to. + Pass DEVICEMEM_UTILS_NOADDRESS if not used. +@return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR DevmemImportStructDevMap(DEVMEM_HEAP *psHeap, + IMG_BOOL bMap, + DEVMEM_IMPORT *psImport, + IMG_UINT64 uiOptionalMapAddress); + +/****************************************************************************** +@Function DevmemImportStructDevUnmap +@Description Unmaps the PMR referenced by the import struct from the + device's virtual address space. + If this was not the last remaining CPU mapping on the import + struct only the cpu mapping refcount is decreased. +@return A boolean to signify if the import was unmapped. +******************************************************************************/ +IMG_BOOL DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport); + +/****************************************************************************** +@Function DevmemImportStructCPUMap +@Description NEVER call after the last DevmemMemDescRelease() + Maps the PMR referenced by the import struct to the CPU's + virtual address space. + Does nothing but increase the cpu mapping refcount if the + import struct was already mapped. +@return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport); + +/****************************************************************************** +@Function DevmemImportStructCPUUnmap +@Description Unmaps the PMR referenced by the import struct from the CPU's + virtual address space. + If this was not the last remaining CPU mapping on the import + struct only the cpu mapping refcount is decreased. +******************************************************************************/ +void DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport); + + +/****************************************************************************** +@Function DevmemImportStructAcquire +@Description Acquire an import struct by increasing it's refcount. +******************************************************************************/ +void DevmemImportStructAcquire(DEVMEM_IMPORT *psImport); + +/****************************************************************************** +@Function DevmemImportStructRelease +@Description Reduces the refcount of the import struct. + Destroys the import in the case it was the last reference. + Destroys underlying PMR if this import was the last reference + to it. +@return A boolean to signal if the import was destroyed. True = yes. +******************************************************************************/ +IMG_BOOL DevmemImportStructRelease(DEVMEM_IMPORT *psImport); + +/****************************************************************************** +@Function DevmemImportDiscard +@Description Discard a created, but unitilised import structure. + This must only be called before DevmemImportStructInit + after which DevmemImportStructRelease must be used to + "free" the import structure. +******************************************************************************/ +void DevmemImportDiscard(DEVMEM_IMPORT *psImport); + +/****************************************************************************** +@Function DevmemMemDescAlloc +@Description Allocates a MemDesc and create it's various locks. + Zero the allocated memory. +@return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc); + +/****************************************************************************** +@Function DevmemMemDescInit +@Description Sets the given offset and import struct fields in the MemDesc. + Initialises refcount to 1 and other values to 0. + +@Input psMemDesc MemDesc to initialise. +@Input uiOffset Offset in the import structure. +@Input psImport Import the MemDesc is on. +@Input uiAllocSize Size of the allocation +******************************************************************************/ +void DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc, + IMG_DEVMEM_OFFSET_T uiOffset, + DEVMEM_IMPORT *psImport, + IMG_DEVMEM_SIZE_T uiAllocSize); + +/****************************************************************************** +@Function DevmemMemDescAcquire +@Description Acquires the MemDesc by increasing it's refcount. +******************************************************************************/ +void DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc); + +/****************************************************************************** +@Function DevmemMemDescRelease +@Description Releases the MemDesc by reducing it's refcount. + Destroy the MemDesc if it's recount is 0. + Destroy the import struct the MemDesc is on if that was the + last MemDesc on the import, probably following the destruction + of the underlying PMR. +@return A boolean to signal if the MemDesc was destroyed. True = yes. +******************************************************************************/ +IMG_BOOL DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc); + +/****************************************************************************** +@Function DevmemMemDescDiscard +@Description Discard a created, but uninitialised MemDesc structure. + This must only be called before DevmemMemDescInit after + which DevmemMemDescRelease must be used to "free" the + MemDesc structure. +******************************************************************************/ +void DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc); + + +/****************************************************************************** +@Function GetImportProperties +@Description Atomically read psImport->uiProperties + It's possible that another thread modifies uiProperties + immediately after this function returns, making its result + stale. So, it's recommended to use this function only to + check if certain non-volatile flags were set. +******************************************************************************/ +static INLINE DEVMEM_PROPERTIES_T GetImportProperties(DEVMEM_IMPORT *psImport) +{ + DEVMEM_PROPERTIES_T uiProperties; + + OSLockAcquire(psImport->hLock); + uiProperties = psImport->uiProperties; + OSLockRelease(psImport->hLock); + return uiProperties; +} + +#endif /* DEVICEMEM_UTILS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/di_common.h b/drivers/gpu/drm/phytium/octopus/di_common.h new file mode 100644 index 000000000000..d3661da85b99 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/di_common.h @@ -0,0 +1,228 @@ +/*************************************************************************/ /*! +@File +@Title Common types for Debug Info framework. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DI_COMMON_H +#define DI_COMMON_H + +#include "img_types.h" + +/* Token that signals that a header should be printed. */ +#define DI_START_TOKEN ((void *) 1) + +/* This is a public handle to an entry. */ +typedef struct DI_GROUP DI_GROUP; +typedef struct DI_ENTRY DI_ENTRY; +typedef struct OSDI_IMPL_ENTRY OSDI_IMPL_ENTRY; + +/*! Debug Info entries types. */ +typedef enum DI_ENTRY_TYPE +{ + DI_ENTRY_TYPE_GENERIC, /*!< generic entry type, implements + start/stop/next/show iterator + interface */ + DI_ENTRY_TYPE_RANDOM_ACCESS, /*!< random access entry, implements + seek/read iterator interface */ +} DI_ENTRY_TYPE; + +/*! @Function DI_PFN_START + * + * @Description + * Start operation returns first entry and passes it to Show operation. + * + * @Input psEntry pointer to the implementation entry + * @InOut pui64Pos current data position in the entry + * + * @Return pointer to data that will be passed to the other iterator + * functions in pvData argument + */ +typedef void *(*DI_PFN_START)(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos); + +/*! @Function DI_PFN_STOP + * + * @Description + * Stop operations is called after iterator reaches end of data. + * + * If pvData was allocated in pfnStart it should be freed here. + * + * @Input psEntry pointer to the implementation entry + * @Input pvData pointer to data returned from pfnStart/pfnNext + */ +typedef void (*DI_PFN_STOP)(OSDI_IMPL_ENTRY *psEntry, void *pvData); + +/*! @Function DI_PFN_NEXT + * + * @Description + * Next returns next data entry and passes it to Show operation. + * + * @Input psEntry pointer to the implementation entry + * @Input pvData pointer to data returned from pfnStart/pfnNext + * @InOut pui64Pos current data position in the entry + */ +typedef void *(*DI_PFN_NEXT)(OSDI_IMPL_ENTRY *psEntry, void *pvData, + IMG_UINT64 *pui64Pos); + +/*! @Function DI_PFN_SHOW + * + * @Description + * Outputs the data element. + * + * @Input psEntry pointer to the implementation entry + * @Input pvData pointer to data returned from pfnStart/pfnNext + */ +typedef int (*DI_PFN_SHOW)(OSDI_IMPL_ENTRY *psEntry, void *pvData); + +/*! @Function DI_PFN_SEEK + * + * @Description + * Changes position of the entry data pointer + * + * @Input uiOffset new entry offset (absolute) + * @Input pvData private data provided during entry creation + */ +typedef IMG_INT64 (*DI_PFN_SEEK)(IMG_UINT64 ui64Offset, void *pvData); + +/*! @Function DI_PFN_READ + * + * @Description + * Retrieves data from the entry from position previously set by Seek. + * + * @Input pszBuffer output buffer + * @Input ui64Count length of the output buffer + * @InOut pui64Pos pointer to the current position in the entry + * @Input pvData private data provided during entry creation + */ +typedef IMG_INT64 (*DI_PFN_READ)(IMG_CHAR *pszBuffer, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData); + +/*! @Function DI_PFN_WRITE + * + * @Description + * Handle writes operation to the entry. + * + * @Input pszBuffer NUL-terminated buffer containing written data + * @Input ui64Count length of the data in pszBuffer (length of the buffer) + * @InOut pui64Pos pointer to the current position in the entry + * @Input pvData private data provided during entry creation + */ +typedef IMG_INT64 (*DI_PFN_WRITE)(const IMG_CHAR *pszBuffer, + IMG_UINT64 ui64Count, IMG_UINT64 *pui64Pos, + void *pvData); + +/*! Debug info entry iterator. + * + * This covers all entry types: GENERIC and RANDOM_ACCESS. + * + * The GENERIC entry type + * + * The GENERIC type should implement either a full set of following callbacks: + * pfnStart, pfnStop, pfnNext and pfnShow, or pfnShow only. If only pfnShow + * callback is given the framework will use default handlers in place of the + * other ones. + * + * e.g. for generic entry: + * + * struct sIter = { + * .pfnStart = StartCb, .pfnStop = StopCb, pfnNext = NextCb, + * .pfnShow = ShowCb + * }; + * + * The use case for implementing pfnShow only is if the data for the given + * entry is short and can be printed in one go because the pfnShow callback + * will be called only once. + * + * e.g. for one-shot print generic entry: + * + * struct sIter = { + * .pfnShow = SingleShowCb + * }; + * + * The DICreateEntry() function will return error if DI_ENTRY_TYPE_GENERIC + * type is used and invalid combination of callbacks is given. + * + * The RANDOM_ACCESS entry + * + * The RANDOM_ACCESS type should implement either both pfnSeek and pfnRead + * or pfnRead only callbacks. + * + * e.g. of seekable and readable random access entry: + * + * struct sIter = { + * .pfnSeek = SeekCb, .pfnRead = ReadCb + * }; + * + * The DICreateEntry() function will return error if DI_ENTRY_TYPE_RANDOM_ACCESS + * type is used and invalid combination of callbacks is given. + * + * Writing to file (optional) + * + * The iterator allows also to pass a pfnWrite callback that allows implementing + * write operation on the entry. The write operation is entry type agnostic + * which means that it can be defined for both GENERIC and RANDOM_ACCESS + * entries. + * + * e.g. for writable one-shot print generic entry + * + * struct sIter = { + * .pfnShow = SingleShowCb, .pfnWrite = WriteCb + * }; + */ +typedef struct DI_ITERATOR_CB +{ + /* Generic entry interface. */ + + DI_PFN_START pfnStart; /*!< Starts iteration and returns first element + of entry's data. */ + DI_PFN_STOP pfnStop; /*!< Stops iteration. */ + DI_PFN_NEXT pfnNext; /*!< Returns next element of entry's data. */ + DI_PFN_SHOW pfnShow; /*!< Shows current data element of an entry. */ + + /* Optional random access entry interface. */ + + DI_PFN_SEEK pfnSeek; /*!< Sets data pointer in an entry. */ + DI_PFN_READ pfnRead; /*!< Reads data from an entry. */ + + /* Optional writing to entry interface. */ + + DI_PFN_WRITE pfnWrite; /*!< Performs write operation on an entry. */ +} DI_ITERATOR_CB; + +#endif /* DI_COMMON_H */ diff --git a/drivers/gpu/drm/phytium/octopus/di_impl_brg.c b/drivers/gpu/drm/phytium/octopus/di_impl_brg.c new file mode 100644 index 000000000000..acd5add6634c --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/di_impl_brg.c @@ -0,0 +1,882 @@ +/*************************************************************************/ /*! +@File +@Title OS agnostic implementation of Debug Info interface. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements osdi_impl.h API to provide access to driver's + debug data via pvrdebug. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "allocmem.h" +#include "hash.h" +#include "img_defs.h" +#include "img_types.h" +#include "lock.h" +#include "osfunc_common.h" +#include "osfunc.h" /* for thread */ +#include "tlstream.h" +#include "dllist.h" + +#include "osdi_impl.h" +#include "di_impl_brg.h" +#include "di_impl_brg_intern.h" +#include "pvr_dicommon.h" +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) +#include "pvrsrv.h" +#endif + +#define ENTRIES_TABLE_INIT_SIZE 64 +#define STREAM_BUFFER_SIZE 0x4000 /* 16KB */ +#define STREAM_LINE_LENGTH 512 + +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) +#define WRITER_THREAD_SLEEP_TIMEOUT 0ull +#else +#define WRITER_THREAD_SLEEP_TIMEOUT 28800000000ull +#endif +#define WRITER_THREAD_DESTROY_TIMEOUT 100000ull +#define WRITER_THREAD_DESTROY_RETRIES 10u + +#define WRITE_RETRY_COUNT 10 /* retry a write to a TL buffer 10 times */ +#define WRITE_RETRY_WAIT_TIME 100 /* wait 10ms between write retries */ + +typedef enum THREAD_STATE +{ + THREAD_STATE_NULL, + THREAD_STATE_ALIVE, + THREAD_STATE_TERMINATED, +} THREAD_STATE; + +static struct DIIB_IMPL +{ + HASH_TABLE *psEntriesTable; /*!< Table of entries. */ + POS_LOCK psEntriesLock; /*!< Protects psEntriesTable. */ + IMG_HANDLE hWriterThread; + IMG_HANDLE hWriterEventObject; + ATOMIC_T eThreadState; + + DLLIST_NODE sWriterQueue; + POS_LOCK psWriterLock; /*!< Protects sWriterQueue. */ +} *_g_psImpl; + +struct DIIB_GROUP +{ + const IMG_CHAR *pszName; + struct DIIB_GROUP *psParentGroup; +}; + +struct DIIB_ENTRY +{ + struct DIIB_GROUP *psParentGroup; + OSDI_IMPL_ENTRY sImplEntry; + DI_ITERATOR_CB sIterCb; + DI_ENTRY_TYPE eType; + IMG_CHAR pszFullPath[DI_IMPL_BRG_PATH_LEN]; + void *pvPrivData; + + POS_LOCK hLock; /*!< Protects access to entry's iterator. */ +}; + +struct DI_CONTEXT_TAG +{ + IMG_HANDLE hStream; + ATOMIC_T iRefCnt; + IMG_BOOL bClientConnected; /*!< Indicated that the client is or is not + connected to the DI. */ +}; + +struct DIIB_WORK_ITEM +{ + DI_CONTEXT *psContext; + DIIB_ENTRY *psEntry; + IMG_UINT64 ui64Size; + IMG_UINT64 ui64Offset; + + DLLIST_NODE sQueueElement; +}; + +/* Declaring function here to avoid dependencies that are introduced by + * including osfunc.h. */ +IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2, + size_t uiSize); + +/* djb2 hash function is public domain */ +static IMG_UINT32 _Hash(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen) +{ + IMG_CHAR *pszStr = pKey; + IMG_UINT32 ui32Hash = 5381, ui32Char; + + PVR_UNREFERENCED_PARAMETER(uKeySize); + PVR_UNREFERENCED_PARAMETER(uHashTabLen); + + while ((ui32Char = *pszStr++) != '\0') + { + ui32Hash = ((ui32Hash << 5) + ui32Hash) + ui32Char; /* hash * 33 + c */ + } + + return ui32Hash; +} + +static IMG_BOOL _Compare(size_t uKeySize, void *pKey1, void *pKey2) +{ + IMG_CHAR *pszKey1 = pKey1, *pszKey2 = pKey2; + + return OSStringNCompare(pszKey1, pszKey2, uKeySize) == 0; +} + +/* ----- native callbacks interface ----------------------------------------- */ + +static void _WriteWithRetires(void *pvNativeHandle, const IMG_CHAR *pszStr, + IMG_UINT uiLen) +{ + PVRSRV_ERROR eError; + IMG_INT iRetry = 0; + IMG_UINT32 ui32Flags = TL_FLAG_NO_WRITE_FAILED; + + do + { + /* Try to write to the buffer but don't inject MOST_RECENT_WRITE_FAILED + * packet in case of failure because we're going to retry. */ + eError = TLStreamWriteRetFlags(pvNativeHandle, (IMG_UINT8 *) pszStr, + uiLen, &ui32Flags); + if (eError == PVRSRV_ERROR_STREAM_FULL) + { + // wait to give the client a change to read + OSSleepms(WRITE_RETRY_WAIT_TIME); + } + } + while (eError == PVRSRV_ERROR_STREAM_FULL && iRetry++ < WRITE_RETRY_COUNT); + + /* One last try to write to the buffer. In this case upon failure + * a MOST_RECENT_WRITE_FAILED packet will be inject to the buffer to + * indicate data loss. */ + if (eError == PVRSRV_ERROR_STREAM_FULL) + { + eError = TLStreamWrite(pvNativeHandle, (IMG_UINT8 *) pszStr, uiLen); + } + + PVR_LOG_IF_ERROR(eError, "TLStreamWrite"); +} + +__printf(2, 0) +static void _VPrintf(void *pvNativeHandle, const IMG_CHAR *pszFmt, + va_list pArgs) +{ + IMG_CHAR pcBuffer[STREAM_LINE_LENGTH]; + IMG_UINT uiLen = OSVSNPrintf(pcBuffer, sizeof(pcBuffer) - 1, pszFmt, pArgs); + pcBuffer[uiLen] = '\0'; + + _WriteWithRetires(pvNativeHandle, pcBuffer, uiLen + 1); +} + +static void _Puts(void *pvNativeHandle, const IMG_CHAR *pszStr) +{ + _WriteWithRetires(pvNativeHandle, pszStr, OSStringLength(pszStr) + 1); +} + +static IMG_BOOL _HasOverflowed(void *pvNativeHandle) +{ + PVR_UNREFERENCED_PARAMETER(pvNativeHandle); + return IMG_FALSE; +} + +static OSDI_IMPL_ENTRY_CB _g_sEntryCallbacks = { + .pfnVPrintf = _VPrintf, + .pfnPuts = _Puts, + .pfnHasOverflowed = _HasOverflowed, +}; + +/* ----- entry operations --------------------------------------------------- */ + +static PVRSRV_ERROR _ContextUnrefAndMaybeDestroy(DI_CONTEXT *psContext) +{ + if (OSAtomicDecrement(&psContext->iRefCnt) == 0) + { + TLStreamClose(psContext->hStream); + OSFreeMem(psContext); + } + + return PVRSRV_OK; +} + +static IMG_INT64 _ReadGeneric(const DI_CONTEXT *psContext, DIIB_ENTRY *psEntry) +{ + IMG_INT64 iRet = 0; + IMG_UINT64 ui64Pos = 0; + DI_ITERATOR_CB *psIter = &psEntry->sIterCb; + OSDI_IMPL_ENTRY *psImplEntry = &psEntry->sImplEntry; + PVRSRV_ERROR eError; + + if (psIter->pfnStart != NULL) + { + /* this is a full sequence of the operation */ + void *pvData = psIter->pfnStart(psImplEntry, &ui64Pos); + + while (pvData != NULL && psContext->bClientConnected) + { + iRet = psIter->pfnShow(psImplEntry, pvData); + if (iRet < 0) + { + break; + } + + pvData = psIter->pfnNext(psImplEntry, pvData, &ui64Pos); + } + + psIter->pfnStop(psImplEntry, pvData); + } + else if (psIter->pfnShow != NULL) + { + /* this is a simplified sequence of the operation */ + iRet = psIter->pfnShow(psImplEntry, NULL); + } + + eError = TLStreamMarkEOS(psImplEntry->pvNative, IMG_FALSE); + PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamMarkEOS", return_error_); + + return iRet; + +return_error_: + return -1; +} + +static IMG_INT64 _ReadRndAccess(DIIB_ENTRY *psEntry, IMG_UINT64 ui64Count, + IMG_UINT64 *pui64Pos, void *pvData) +{ + PVRSRV_ERROR eError; + IMG_UINT8 *pui8Buffer; + IMG_HANDLE hStream = psEntry->sImplEntry.pvNative; + + if (psEntry->sIterCb.pfnRead == NULL) + { + return -1; + } + + eError = TLStreamReserve(hStream, &pui8Buffer, ui64Count); + PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamReserve", return_error_); + + psEntry->sIterCb.pfnRead((IMG_CHAR *) pui8Buffer, ui64Count, pui64Pos, + pvData); + + eError = TLStreamCommit(hStream, ui64Count); + PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCommit", return_error_); + + eError = TLStreamMarkEOS(psEntry->sImplEntry.pvNative, IMG_FALSE); + PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamMarkEOS", return_error_); + + return 0; + +return_error_: + return -1; +} + +static void _WriterThread(void *pvArg) +{ + PVRSRV_ERROR eError; + IMG_HANDLE hEvent; + DLLIST_NODE *psNode; + + eError = OSEventObjectOpen(_g_psImpl->hWriterEventObject, &hEvent); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen"); + +#ifdef PVRSRV_FORCE_UNLOAD_IF_BAD_STATE + while (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK && + OSAtomicRead(&_g_psImpl->eThreadState) == THREAD_STATE_ALIVE) +#else + while (OSAtomicRead(&_g_psImpl->eThreadState) == THREAD_STATE_ALIVE) +#endif + { + struct DIIB_WORK_ITEM *psItem = NULL; + + OSLockAcquire(_g_psImpl->psWriterLock); + /* Get element from list tail so that we always get the oldest element + * (elements are added to head). */ + while ((psNode = dllist_get_prev_node(&_g_psImpl->sWriterQueue)) != NULL) + { + IMG_INT64 i64Ret; + DIIB_ENTRY *psEntry; + OSDI_IMPL_ENTRY *psImplEntry; + + dllist_remove_node(psNode); + OSLockRelease(_g_psImpl->psWriterLock); + + psItem = IMG_CONTAINER_OF(psNode, struct DIIB_WORK_ITEM, + sQueueElement); + + psEntry = psItem->psEntry; + psImplEntry = &psItem->psEntry->sImplEntry; + + /* if client has already disconnected we can just drop this item */ + if (psItem->psContext->bClientConnected) + { + + PVR_ASSERT(psItem->psContext->hStream != NULL); + + psImplEntry->pvNative = psItem->psContext->hStream; + + if (psEntry->eType == DI_ENTRY_TYPE_GENERIC) + { + i64Ret = _ReadGeneric(psItem->psContext, psEntry); + PVR_LOG_IF_FALSE(i64Ret >= 0, "generic access read operation " + "failed"); + } + else if (psEntry->eType == DI_ENTRY_TYPE_RANDOM_ACCESS) + { + IMG_UINT64 ui64Pos = psItem->ui64Offset; + + i64Ret = _ReadRndAccess(psEntry, psItem->ui64Size, &ui64Pos, + psEntry->pvPrivData); + PVR_LOG_IF_FALSE(i64Ret >= 0, "random access read operation " + "failed"); + } + else + { + PVR_ASSERT(psEntry->eType == DI_ENTRY_TYPE_GENERIC || + psEntry->eType == DI_ENTRY_TYPE_RANDOM_ACCESS); + } + + psImplEntry->pvNative = NULL; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "client reading entry \"%s\" has " + "disconnected", psEntry->pszFullPath)); + } + + _ContextUnrefAndMaybeDestroy(psItem->psContext); + OSFreeMemNoStats(psItem); + + OSLockAcquire(_g_psImpl->psWriterLock); + } + OSLockRelease(_g_psImpl->psWriterLock); + + eError = OSEventObjectWaitKernel(hEvent, WRITER_THREAD_SLEEP_TIMEOUT); + if (eError != PVRSRV_OK && eError != PVRSRV_ERROR_TIMEOUT) + { + PVR_LOG_ERROR(eError, "OSEventObjectWaitKernel"); + } + } + + OSLockAcquire(_g_psImpl->psWriterLock); + /* clear the queue if there are any items pending */ + while ((psNode = dllist_get_prev_node(&_g_psImpl->sWriterQueue)) != NULL) + { + struct DIIB_WORK_ITEM *psItem = IMG_CONTAINER_OF(psNode, + struct DIIB_WORK_ITEM, + sQueueElement); + + dllist_remove_node(psNode); + _ContextUnrefAndMaybeDestroy(psItem->psContext); + OSFreeMem(psItem); + } + OSLockRelease(_g_psImpl->psWriterLock); + + eError = OSEventObjectClose(hEvent); + PVR_LOG_IF_ERROR(eError, "OSEventObjectClose"); + + OSAtomicWrite(&_g_psImpl->eThreadState, THREAD_STATE_TERMINATED); +} + +/* ----- DI internal API ---------------------------------------------------- */ + +DIIB_ENTRY *DIImplBrgFind(const IMG_CHAR *pszPath) +{ + DIIB_ENTRY *psEntry; + + OSLockAcquire(_g_psImpl->psEntriesLock); + psEntry = (void *) HASH_Retrieve_Extended(_g_psImpl->psEntriesTable, + (IMG_CHAR *) pszPath); + OSLockRelease(_g_psImpl->psEntriesLock); + + return psEntry; +} + +/* ----- DI bridge interface ------------------------------------------------ */ + +static PVRSRV_ERROR _CreateStream(IMG_CHAR *pszStreamName, IMG_HANDLE *phStream) +{ + IMG_UINT32 iRet; + IMG_HANDLE hStream; + PVRSRV_ERROR eError; + + /* for now only one stream can be created. Should we be able to create + * per context stream? */ + iRet = OSSNPrintf(pszStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE, + "di_stream_%x", OSGetCurrentClientProcessIDKM()); + if (iRet >= PRVSRVTL_MAX_STREAM_NAME_SIZE) + { + /* this check is superfluous because it can never happen but in case + * someone changes the definition of PRVSRVTL_MAX_STREAM_NAME_SIZE + * handle this case */ + pszStreamName[0] = '\0'; + return PVRSRV_ERROR_INTERNAL_ERROR; + } + + eError = TLStreamCreate(&hStream, pszStreamName, STREAM_BUFFER_SIZE, + TL_OPMODE_DROP_NEWER, NULL, NULL, NULL, NULL); + PVR_RETURN_IF_ERROR(eError); + + *phStream = hStream; + + return PVRSRV_OK; +} + +PVRSRV_ERROR DICreateContextKM(IMG_CHAR *pszStreamName, DI_CONTEXT **ppsContext) +{ + PVRSRV_ERROR eError; + DI_CONTEXT *psContext; + IMG_HANDLE hStream = NULL; + THREAD_STATE eTState; + + PVR_LOG_RETURN_IF_INVALID_PARAM(ppsContext != NULL, "ppsContext"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pszStreamName != NULL, "pszStreamName"); + + psContext = OSAllocMem(sizeof(*psContext)); + PVR_LOG_GOTO_IF_NOMEM(psContext, eError, return_); + + eError = _CreateStream(pszStreamName, &hStream); + PVR_LOG_GOTO_IF_ERROR(eError, "_CreateStream", free_desc_); + + psContext->hStream = hStream; + /* indicated to the write thread if the client is still connected and + * waiting for the data */ + psContext->bClientConnected = IMG_TRUE; + OSAtomicWrite(&psContext->iRefCnt, 1); + + eTState = OSAtomicCompareExchange(&_g_psImpl->eThreadState, + THREAD_STATE_NULL, + THREAD_STATE_ALIVE); + + /* if the thread has not been started yet do it */ + if (eTState == THREAD_STATE_NULL) + { + PVR_ASSERT(_g_psImpl->hWriterThread == NULL); + + eError = OSThreadCreate(&_g_psImpl->hWriterThread, "di_writer", + _WriterThread, NULL, IMG_FALSE, NULL); + PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreate", free_close_stream_); + } + + *ppsContext = psContext; + + return PVRSRV_OK; + +free_close_stream_: + TLStreamClose(psContext->hStream); + OSAtomicWrite(&_g_psImpl->eThreadState, THREAD_STATE_TERMINATED); +free_desc_: + OSFreeMem(psContext); +return_: + return eError; +} + +PVRSRV_ERROR DIDestroyContextKM(DI_CONTEXT *psContext) +{ + PVR_LOG_RETURN_IF_INVALID_PARAM(psContext != NULL, "psContext"); + + /* pass the information to the write thread that the client has + * disconnected */ + psContext->bClientConnected = IMG_FALSE; + + return _ContextUnrefAndMaybeDestroy(psContext); +} + +PVRSRV_ERROR DIReadEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath, + IMG_UINT64 ui64Offset, IMG_UINT64 ui64Size) +{ + PVRSRV_ERROR eError; + struct DIIB_WORK_ITEM *psItem; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psContext != NULL, "psContext"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pszEntryPath != NULL, "pszEntryPath"); + + /* 'no stats' to avoid acquiring the process stats locks */ + psItem = OSAllocMemNoStats(sizeof(*psItem)); + PVR_LOG_GOTO_IF_NOMEM(psItem, eError, return_); + + psItem->psContext = psContext; + psItem->psEntry = DIImplBrgFind(pszEntryPath); + PVR_LOG_GOTO_IF_FALSE_VA(psItem->psEntry != NULL, free_item_, + "entry %s does not exist", pszEntryPath); + psItem->ui64Size = ui64Size; + psItem->ui64Offset = ui64Offset; + + /* increment ref count on the context so that it doesn't get freed + * before it gets processed by the writer thread. */ + OSAtomicIncrement(&psContext->iRefCnt); + + OSLockAcquire(_g_psImpl->psWriterLock); + dllist_add_to_head(&_g_psImpl->sWriterQueue, &psItem->sQueueElement); + OSLockRelease(_g_psImpl->psWriterLock); + + eError = OSEventObjectSignal(_g_psImpl->hWriterEventObject); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + + return PVRSRV_OK; + +free_item_: + eError = PVRSRV_ERROR_NOT_FOUND; + OSFreeMemNoStats(psItem); +return_: + return eError; +} + +PVRSRV_ERROR DIWriteEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath, + IMG_UINT64 ui64ValueSize, const IMG_CHAR *pszValue) +{ + DIIB_ENTRY *psEntry; + DI_PFN_WRITE pfnEntryPuts; + IMG_INT64 i64Length = 0; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psContext != NULL, "psContext"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pszEntryPath != NULL, "pszEntryPath"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pszValue != NULL, "pszValue"); + + psEntry = DIImplBrgFind(pszEntryPath); + PVR_LOG_RETURN_IF_FALSE_VA(psEntry != NULL, PVRSRV_ERROR_NOT_FOUND, + "entry %s does not exist", pszEntryPath); + + pfnEntryPuts = psEntry->sIterCb.pfnWrite; + if (pfnEntryPuts != NULL) + { + i64Length = pfnEntryPuts(pszValue, ui64ValueSize, (IMG_UINT64*)&i64Length, psEntry->pvPrivData); + + /* To deal with -EINVAL being returned */ + PVR_LOG_RETURN_IF_INVALID_PARAM(i64Length >= 0, pszValue); + } + else + { + PVR_LOG_MSG(PVR_DBG_WARNING, "Unable to write to Entry. Write callback not enabled"); + return PVRSRV_ERROR_INVALID_REQUEST; + } + return PVRSRV_OK; +} + +static PVRSRV_ERROR _listName(uintptr_t k, + uintptr_t v, + void* hStream) +{ + PVRSRV_ERROR eError; + DIIB_ENTRY *psEntry; + IMG_UINT32 ui32Size; + IMG_CHAR aszName[DI_IMPL_BRG_PATH_LEN]; + + psEntry = (DIIB_ENTRY*) v; + PVR_ASSERT(psEntry != NULL); + PVR_UNREFERENCED_PARAMETER(k); + + ui32Size = OSSNPrintf(aszName, DI_IMPL_BRG_PATH_LEN, "%s\n", psEntry->pszFullPath); + PVR_LOG_IF_FALSE(ui32Size > 5, "ui32Size too small, Error suspected!"); + eError = TLStreamWrite(hStream, (IMG_UINT8 *)aszName, ui32Size+1); + + return eError; +} + + +PVRSRV_ERROR DIListAllEntriesKM(DI_CONTEXT *psContext) +{ + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psContext != NULL, "psContext"); + + eError = HASH_Iterate(_g_psImpl->psEntriesTable, _listName, psContext->hStream); + PVR_LOG_IF_ERROR(eError, "HASH_Iterate_Extended"); + + eError = TLStreamMarkEOS(psContext->hStream, IMG_FALSE); + return eError; +} + +/* ----- DI implementation interface ---------------------------------------- */ + +static PVRSRV_ERROR _Init(void) +{ + PVRSRV_ERROR eError; + + _g_psImpl = OSAllocMem(sizeof(*_g_psImpl)); + PVR_LOG_GOTO_IF_NOMEM(_g_psImpl, eError, return_); + + _g_psImpl->psEntriesTable = HASH_Create_Extended(ENTRIES_TABLE_INIT_SIZE, + DI_IMPL_BRG_PATH_LEN, + _Hash, _Compare); + PVR_LOG_GOTO_IF_NOMEM(_g_psImpl->psEntriesTable, eError, free_impl_); + + eError = OSLockCreate(&_g_psImpl->psEntriesLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSCreateLock", free_table_); + + eError = OSLockCreate(&_g_psImpl->psWriterLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSCreateLock", free_entries_lock_); + + eError = OSEventObjectCreate("DI_WRITER_EO", + &_g_psImpl->hWriterEventObject); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", free_writer_lock_); + + _g_psImpl->hWriterThread = NULL; + OSAtomicWrite(&_g_psImpl->eThreadState, THREAD_STATE_NULL); + + dllist_init(&_g_psImpl->sWriterQueue); + + return PVRSRV_OK; + +free_writer_lock_: + OSLockDestroy(_g_psImpl->psWriterLock); +free_entries_lock_: + OSLockDestroy(_g_psImpl->psEntriesLock); +free_table_: + HASH_Delete_Extended(_g_psImpl->psEntriesTable, IMG_FALSE); +free_impl_: + OSFreeMem(_g_psImpl); + _g_psImpl = NULL; +return_: + return eError; +} + +static void _DeInit(void) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + THREAD_STATE eTState; + + eTState = OSAtomicCompareExchange(&_g_psImpl->eThreadState, + THREAD_STATE_ALIVE, + THREAD_STATE_TERMINATED); + + if (eTState == THREAD_STATE_ALIVE) + { + if (_g_psImpl->hWriterEventObject != NULL) + { + eError = OSEventObjectSignal(_g_psImpl->hWriterEventObject); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + + LOOP_UNTIL_TIMEOUT(WRITER_THREAD_DESTROY_TIMEOUT) + { + eError = OSThreadDestroy(_g_psImpl->hWriterThread); + if (eError == PVRSRV_OK) + { + break; + } + OSWaitus(WRITER_THREAD_DESTROY_TIMEOUT/WRITER_THREAD_DESTROY_RETRIES); + } END_LOOP_UNTIL_TIMEOUT(); + + PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); + } + + if (_g_psImpl->hWriterEventObject != NULL) + { + eError = OSEventObjectDestroy(_g_psImpl->hWriterEventObject); + PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); + } + + HASH_Delete_Extended(_g_psImpl->psEntriesTable, IMG_FALSE); + OSLockDestroy(_g_psImpl->psWriterLock); + OSLockDestroy(_g_psImpl->psEntriesLock); + OSFreeMem(_g_psImpl); + _g_psImpl = NULL; +} + +/* Recursively traverses the ancestors list up to the root group and + * appends their names preceded by "/" to the path in reverse order + * (root group's name first and psGroup group's name last). + * Returns current offset in the path (the current path length without the + * NUL character). If there is no more space in the path returns -1 + * to indicate an error (the path is too long to fit into the buffer). */ +static IMG_INT _BuildGroupPath(IMG_CHAR *pszPath, const DIIB_GROUP *psGroup) +{ + IMG_INT iOff; + + if (psGroup == NULL) + { + return 0; + } + + PVR_ASSERT(pszPath != NULL); + + iOff = _BuildGroupPath(pszPath, psGroup->psParentGroup); + PVR_RETURN_IF_FALSE(iOff != -1, -1); + + iOff += OSStringLCopy(pszPath + iOff, "/", + DI_IMPL_BRG_PATH_LEN - iOff); + PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, -1); + + iOff += OSStringLCopy(pszPath + iOff, psGroup->pszName, + DI_IMPL_BRG_PATH_LEN - iOff); + PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, -1); + + return iOff; +} + +static PVRSRV_ERROR _BuildEntryPath(IMG_CHAR *pszPath, const IMG_CHAR *pszName, + const DIIB_GROUP *psGroup) +{ + IMG_INT iOff = _BuildGroupPath(pszPath, psGroup); + PVR_RETURN_IF_FALSE(iOff != -1, PVRSRV_ERROR_INVALID_OFFSET); + + iOff += OSStringLCopy(pszPath + iOff, "/", DI_IMPL_BRG_PATH_LEN - iOff); + PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, + PVRSRV_ERROR_INVALID_OFFSET); + + iOff += OSStringLCopy(pszPath + iOff, pszName, DI_IMPL_BRG_PATH_LEN - iOff); + PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, + PVRSRV_ERROR_INVALID_OFFSET); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _CreateEntry(const IMG_CHAR *pszName, + DI_ENTRY_TYPE eType, + const DI_ITERATOR_CB *psIterCb, + void *pvPrivData, + void *pvParentGroup, + void **pvEntry) +{ + DIIB_GROUP *psParentGroup = pvParentGroup; + DIIB_ENTRY *psEntry; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pvEntry != NULL, "pvEntry"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pvParentGroup != NULL, "pvParentGroup"); + + switch (eType) + { + case DI_ENTRY_TYPE_GENERIC: + break; + case DI_ENTRY_TYPE_RANDOM_ACCESS: + break; + default: + PVR_DPF((PVR_DBG_ERROR, "eType invalid in %s()", __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, return_); + } + + psEntry = OSAllocMem(sizeof(*psEntry)); + PVR_LOG_GOTO_IF_NOMEM(psEntry, eError, return_); + + eError = OSLockCreate(&psEntry->hLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", free_entry_); + + psEntry->eType = eType; + psEntry->sIterCb = *psIterCb; + psEntry->pvPrivData = pvPrivData; + psEntry->psParentGroup = psParentGroup; + psEntry->pszFullPath[0] = '\0'; + + psEntry->sImplEntry.pvPrivData = pvPrivData; + psEntry->sImplEntry.pvNative = NULL; + psEntry->sImplEntry.psCb = &_g_sEntryCallbacks; + + eError = _BuildEntryPath(psEntry->pszFullPath, pszName, + psEntry->psParentGroup); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s() failed in _BuildEntryPath() for \"%s\" " + "entry", __func__, pszName)); + goto destroy_lock_; + } + + OSLockAcquire(_g_psImpl->psEntriesLock); + eError = HASH_Insert_Extended(_g_psImpl->psEntriesTable, + psEntry->pszFullPath, + (uintptr_t) psEntry) ? + PVRSRV_OK : PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; + OSLockRelease(_g_psImpl->psEntriesLock); + PVR_LOG_GOTO_IF_ERROR(eError, "HASH_Insert_Extended failed", destroy_lock_); + + *pvEntry = psEntry; + + return PVRSRV_OK; + +destroy_lock_: + OSLockDestroy(psEntry->hLock); +free_entry_: + OSFreeMem(psEntry); +return_: + return eError; +} + +static void _DestroyEntry(void *pvEntry) +{ + DIIB_ENTRY *psEntry = pvEntry; + PVR_ASSERT(psEntry != NULL); + + OSLockAcquire(_g_psImpl->psEntriesLock); + HASH_Remove_Extended(_g_psImpl->psEntriesTable, psEntry->pszFullPath); + OSLockRelease(_g_psImpl->psEntriesLock); + + OSLockDestroy(psEntry->hLock); + OSFreeMem(psEntry); +} + +static PVRSRV_ERROR _CreateGroup(const IMG_CHAR *pszName, + void *pvParentGroup, + void **ppvGroup) +{ + DIIB_GROUP *psNewGroup; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); + PVR_LOG_RETURN_IF_INVALID_PARAM(ppvGroup != NULL, "ppvGroup"); + + psNewGroup = OSAllocMem(sizeof(*psNewGroup)); + PVR_LOG_RETURN_IF_NOMEM(psNewGroup, "OSAllocMem"); + + psNewGroup->pszName = pszName; + psNewGroup->psParentGroup = pvParentGroup; + + *ppvGroup = psNewGroup; + + return PVRSRV_OK; +} + +static void _DestroyGroup(void *pvGroup) +{ + DIIB_GROUP *psGroup = pvGroup; + PVR_ASSERT(psGroup != NULL); + + OSFreeMem(psGroup); +} + +PVRSRV_ERROR PVRDIImplBrgRegister(void) +{ + OSDI_IMPL_CB sImplCb = { + .pfnInit = _Init, + .pfnDeInit = _DeInit, + .pfnCreateEntry = _CreateEntry, + .pfnDestroyEntry = _DestroyEntry, + .pfnCreateGroup = _CreateGroup, + .pfnDestroyGroup = _DestroyGroup + }; + + return DIRegisterImplementation("impl_brg", &sImplCb); +} diff --git a/drivers/gpu/drm/phytium/octopus/di_impl_brg.h b/drivers/gpu/drm/phytium/octopus/di_impl_brg.h new file mode 100644 index 000000000000..eb5dd0796d6e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/di_impl_brg.h @@ -0,0 +1,92 @@ +/*************************************************************************/ /*! +@File +@Title OS agnostic implementation of Debug Info interface. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_IMPL_BRG_H +#define PVR_IMPL_BRG_H + +#include "pvrsrv_error.h" + +typedef struct DI_CONTEXT_TAG DI_CONTEXT; +typedef struct DI_ENTRY_DESC DI_ENTRY_DESC; + +PVRSRV_ERROR PVRDIImplBrgRegister(void); + +/*! @Function DICreateContextKM + * + * @Description + * Creates DI context which among others also creates a TL stream for reading + * entries. + * + * @Output pszStreamName: name of the TL stream created in this context + * @Output ppsContext: pointer to the new context + * + * @Return PVRSRV_ERROR error code + * PVRSRV_OK in case of a success + * PVRSRV_ERROR_INVALID_PARAMS if any of the parameters is invalid + * PVRSRV_ERROR_OUT_OF_MEMORY if any of the memory allocations failed + * error codes returned by TLStreamCreate() + */ +PVRSRV_ERROR DICreateContextKM(IMG_CHAR *pszStreamName, + DI_CONTEXT **ppsContext); + +/*! @Function DIDestroyContextKM + * + * @Description + * Destroy the DI context and all underlying dependencies. + * + * @Input psContext: pointer to the context + * + * @Return PVRSRV_ERROR error code + * PVRSRV_OK in case of a success + * PVRSRV_ERROR_INVALID_PARAMS if invalid context pointer given + */ +PVRSRV_ERROR DIDestroyContextKM(DI_CONTEXT *psContext); + +PVRSRV_ERROR DIReadEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath, + IMG_UINT64 ui64Offset, IMG_UINT64 ui64Size); + +PVRSRV_ERROR DIWriteEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath, + IMG_UINT64 ui64ValueSize, const IMG_CHAR *pszValue); + +PVRSRV_ERROR DIListAllEntriesKM(DI_CONTEXT *psContext); + +#endif /* PVR_IMPL_BRG_H */ diff --git a/drivers/gpu/drm/phytium/octopus/di_impl_brg_intern.h b/drivers/gpu/drm/phytium/octopus/di_impl_brg_intern.h new file mode 100644 index 000000000000..62facc99ad35 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/di_impl_brg_intern.h @@ -0,0 +1,61 @@ +/*************************************************************************/ /*! +@File +@Title OS agnostic implementation of Debug Info internal interface. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_IMPL_BRG_INTERN_H +#define PVR_IMPL_BRG_INTERN_H + +typedef struct DIIB_GROUP DIIB_GROUP; +typedef struct DIIB_ENTRY DIIB_ENTRY; + +/*! @Function DIImplBrgFind + * + * @Description + * Retrieves an entry based on a given path. + * + * @Input pszPath: Full entry path in form of + * /rootGroup/.../parentGroup/entryName. + * + * @Return Returns entry object if exists or NULL otherwise. + */ +DIIB_ENTRY *DIImplBrgFind(const IMG_CHAR *pszPath); + +#endif /* PVR_IMPL_BRG_INTERN_H */ diff --git a/drivers/gpu/drm/phytium/octopus/di_server.c b/drivers/gpu/drm/phytium/octopus/di_server.c new file mode 100644 index 000000000000..0357336cf670 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/di_server.c @@ -0,0 +1,744 @@ +/*************************************************************************/ /*! +@File +@Title Debug Info framework functions and types. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "di_server.h" +#include "osdi_impl.h" +#include "pvrsrv_error.h" +#include "dllist.h" +#include "lock.h" +#include "allocmem.h" + +#define ROOT_GROUP_NAME PVR_DRM_NAME + +/*! Implementation object. */ +typedef struct DI_IMPL +{ + const IMG_CHAR *pszName; /*pszName = ROOT_GROUP_NAME; + + dllist_init(&_g_psRootGroup->sListNode); + dllist_init(&_g_psRootGroup->sGroupList); + dllist_init(&_g_psRootGroup->sEntryList); + dllist_init(&_g_psRootGroup->sNativeHandleList); + + return PVRSRV_OK; + +destroy_lock_: + OSLockDestroy(_g_hLock); +return_: + return eError; +} + +/* Destroys the whole tree of group and entries for a given group as a root. */ +static void _DeInitGroupRecursively(DI_GROUP *psGroup) +{ + DLLIST_NODE *psThis, *psNext; + + dllist_foreach_node(&psGroup->sEntryList, psThis, psNext) + { + DI_ENTRY *psThisEntry = IMG_CONTAINER_OF(psThis, DI_ENTRY, sListNode); + DIDestroyEntry(psThisEntry); + } + + dllist_foreach_node(&psGroup->sGroupList, psThis, psNext) + { + DI_GROUP *psThisGroup = IMG_CONTAINER_OF(psThis, DI_GROUP, sListNode); + + _DeInitGroupRecursively(psThisGroup); + } + + DIDestroyGroup(psGroup); +} + +void DIDeInit(void) +{ + DLLIST_NODE *psThis, *psNext; + + OSLockAcquire(_g_hLock); + + if (!dllist_is_empty(&_g_psRootGroup->sGroupList) || + !dllist_is_empty(&_g_psRootGroup->sEntryList)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: entries or groups still exist during " + "de-initialisation process, destroying all", __func__)); + } + + _DeInitGroupRecursively(_g_psRootGroup); + _g_psRootGroup = NULL; + + /* Remove all of the implementations. */ + dllist_foreach_node(&_g_sImpls, psThis, psNext) + { + DI_IMPL *psDiImpl = IMG_CONTAINER_OF(psThis, DI_IMPL, sListNode); + + if (psDiImpl->bInitialised) + { + psDiImpl->sCb.pfnDeInit(); + psDiImpl->bInitialised = IMG_FALSE; + } + + dllist_remove_node(&psDiImpl->sListNode); + OSFreeMem(psDiImpl); + } + + OSLockRelease(_g_hLock); + + /* all resources freed so free the lock itself too */ + + OSLockDestroy(_g_hLock); +} + +static IMG_BOOL _ValidateIteratorCb(const DI_ITERATOR_CB *psIterCb, + DI_ENTRY_TYPE eType) +{ + IMG_UINT32 uiFlags = 0; + + if (psIterCb == NULL) + { + return IMG_FALSE; + } + + if (eType == DI_ENTRY_TYPE_GENERIC) + { + uiFlags |= psIterCb->pfnShow != NULL ? BIT(0) : 0; + uiFlags |= psIterCb->pfnStart != NULL ? BIT(1) : 0; + uiFlags |= psIterCb->pfnStop != NULL ? BIT(2) : 0; + uiFlags |= psIterCb->pfnNext != NULL ? BIT(3) : 0; + + /* either only pfnShow or all callbacks need to be set */ + if (uiFlags != BIT(0) && !BITMASK_HAS(uiFlags, 0x0f)) + { + return IMG_FALSE; + } + } + else if (eType == DI_ENTRY_TYPE_RANDOM_ACCESS) + { + uiFlags |= psIterCb->pfnRead != NULL ? BIT(0) : 0; + uiFlags |= psIterCb->pfnSeek != NULL ? BIT(1) : 0; + + /* either only pfnRead or all callbacks need to be set */ + if (uiFlags != BIT(0) && !BITMASK_HAS(uiFlags, 0x03)) + { + return IMG_FALSE; + } + } + else + { + return IMG_FALSE; + } + + return IMG_TRUE; +} + +static PVRSRV_ERROR _CreateNativeEntry(DI_ENTRY *psEntry, + const DI_NATIVE_HANDLE *psNativeParent) +{ + PVRSRV_ERROR eError; + DI_IMPL *psImpl = psNativeParent->psDiImpl; + + DI_NATIVE_HANDLE *psNativeEntry = OSAllocMem(sizeof(*psNativeEntry)); + PVR_LOG_GOTO_IF_NOMEM(psNativeEntry, eError, return_); + + eError = psImpl->sCb.pfnCreateEntry(psEntry->pszName, + psEntry->eType, + &psEntry->sIterCb, + psEntry->pvPrivData, + psNativeParent->pvHandle, + &psNativeEntry->pvHandle); + PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->sCb.pfnCreateGroup", free_memory_); + + psNativeEntry->psDiImpl = psImpl; + + dllist_add_to_head(&psEntry->sNativeHandleList, &psNativeEntry->sListNode); + + return PVRSRV_OK; + +free_memory_: + OSFreeMem(psNativeEntry); +return_: + return eError; +} + +static void _DestroyNativeEntry(DI_NATIVE_HANDLE *psNativeEntry) +{ + dllist_remove_node(&psNativeEntry->sListNode); + OSFreeMem(psNativeEntry); +} + +PVRSRV_ERROR DICreateEntry(const IMG_CHAR *pszName, + DI_GROUP *psGroup, + const DI_ITERATOR_CB *psIterCb, + void *pvPriv, + DI_ENTRY_TYPE eType, + DI_ENTRY **ppsEntry) +{ + PVRSRV_ERROR eError; + DLLIST_NODE *psThis, *psNext; + DI_ENTRY *psEntry; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); + PVR_LOG_RETURN_IF_INVALID_PARAM(_ValidateIteratorCb(psIterCb, eType), + "psIterCb"); + PVR_LOG_RETURN_IF_INVALID_PARAM(ppsEntry != NULL, "psEntry"); + + psEntry = OSAllocMem(sizeof(*psEntry)); + PVR_LOG_RETURN_IF_NOMEM(psEntry, "OSAllocMem"); + + if (psGroup == NULL) + { + psGroup = _g_psRootGroup; + } + + psEntry->pszName = pszName; + psEntry->pvPrivData = pvPriv; + psEntry->eType = eType; + psEntry->sIterCb = *psIterCb; + dllist_init(&psEntry->sNativeHandleList); + + OSLockAcquire(_g_hLock); + + dllist_add_to_tail(&psGroup->sEntryList, &psEntry->sListNode); + + /* Iterate over all of the native handles of parent group to create + * the entry for every registered implementation. */ + dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext) + { + DI_NATIVE_HANDLE *psNativeGroup = + IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); + + eError = _CreateNativeEntry(psEntry, psNativeGroup); + PVR_GOTO_IF_ERROR(eError, cleanup_); + } + + OSLockRelease(_g_hLock); + + *ppsEntry = psEntry; + + return PVRSRV_OK; + +cleanup_: + OSLockRelease(_g_hLock); + + /* Something went wrong so if there were any native entries created remove + * them from the list, free them and free the DI entry itself. */ + dllist_foreach_node(&psEntry->sNativeHandleList, psThis, psNext) + { + DI_NATIVE_HANDLE *psNativeEntry = + IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); + + _DestroyNativeEntry(psNativeEntry); + } + + OSFreeMem(psEntry); + + return eError; +} + +void DIDestroyEntry(DI_ENTRY *psEntry) +{ + DLLIST_NODE *psThis, *psNext; + + PVR_LOG_RETURN_VOID_IF_FALSE(psEntry != NULL, + "psEntry invalid in DIDestroyEntry()"); + + /* Iterate through all of the native entries of the DI entry, remove + * them from the list and then destroy them. After that, destroy the + * DI entry itself. */ + dllist_foreach_node(&psEntry->sNativeHandleList, psThis, psNext) + { + DI_NATIVE_HANDLE *psNative = IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, + sListNode); + + /* The implementation must ensure that entry is not removed if any + * operations are being executed on the entry. If this is the case + * the implementation should block until all of them are finished + * and prevent any further operations. + * This will guarantee proper synchronisation between the DI framework + * and underlying implementations and prevent destruction/access + * races. */ + psNative->psDiImpl->sCb.pfnDestroyEntry(psNative->pvHandle); + dllist_remove_node(&psNative->sListNode); + OSFreeMem(psNative); + } + + dllist_remove_node(&psEntry->sListNode); + + OSFreeMem(psEntry); +} + +static PVRSRV_ERROR _CreateNativeGroup(DI_GROUP *psGroup, + const DI_NATIVE_HANDLE *psNativeParent, + DI_NATIVE_HANDLE **ppsNativeGroup) +{ + PVRSRV_ERROR eError; + DI_IMPL *psImpl = psNativeParent->psDiImpl; + + DI_NATIVE_HANDLE *psNativeGroup = OSAllocMem(sizeof(*psNativeGroup)); + PVR_LOG_GOTO_IF_NOMEM(psNativeGroup, eError, return_); + + eError = psImpl->sCb.pfnCreateGroup(psGroup->pszName, + psNativeParent->pvHandle, + &psNativeGroup->pvHandle); + PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->sCb.pfnCreateGroup", free_memory_); + + psNativeGroup->psDiImpl = psImpl; + + dllist_add_to_head(&psGroup->sNativeHandleList, &psNativeGroup->sListNode); + + *ppsNativeGroup = psNativeGroup; + + return PVRSRV_OK; + +free_memory_: + OSFreeMem(psNativeGroup); +return_: + return eError; +} + +static void _DestroyNativeGroup(DI_NATIVE_HANDLE *psNativeEntry) +{ + dllist_remove_node(&psNativeEntry->sListNode); + OSFreeMem(psNativeEntry); +} + +PVRSRV_ERROR DICreateGroup(const IMG_CHAR *pszName, + DI_GROUP *psParent, + DI_GROUP **ppsGroup) +{ + PVRSRV_ERROR eError; + DLLIST_NODE *psThis, *psNext; + DI_GROUP *psGroup; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); + PVR_LOG_RETURN_IF_INVALID_PARAM(ppsGroup != NULL, "ppsDiGroup"); + + psGroup = OSAllocMem(sizeof(*psGroup)); + PVR_LOG_RETURN_IF_NOMEM(psGroup, "OSAllocMem"); + + if (psParent == NULL) + { + psParent = _g_psRootGroup; + } + + psGroup->pszName = pszName; + psGroup->psParent = psParent; + dllist_init(&psGroup->sGroupList); + dllist_init(&psGroup->sEntryList); + dllist_init(&psGroup->sNativeHandleList); + + OSLockAcquire(_g_hLock); + + dllist_add_to_tail(&psParent->sGroupList, &psGroup->sListNode); + + /* Iterate over all of the native handles of parent group to create + * the group for every registered implementation. */ + dllist_foreach_node(&psParent->sNativeHandleList, psThis, psNext) + { + DI_NATIVE_HANDLE *psNativeGroup = NULL, *psNativeParent = + IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); + + eError = _CreateNativeGroup(psGroup, psNativeParent, &psNativeGroup); + PVR_GOTO_IF_ERROR(eError, cleanup_); + } + + OSLockRelease(_g_hLock); + + *ppsGroup = psGroup; + + return PVRSRV_OK; + +cleanup_: + OSLockRelease(_g_hLock); + + /* Something went wrong so if there were any native groups created remove + * them from the list, free them and free the DI group itself. */ + dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext) + { + DI_NATIVE_HANDLE *psNativeGroup = + IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); + + dllist_remove_node(&psNativeGroup->sListNode); + OSFreeMem(psNativeGroup); + } + + OSFreeMem(psGroup); + + return eError; +} + +void DIDestroyGroup(DI_GROUP *psGroup) +{ + DLLIST_NODE *psThis, *psNext; + + PVR_LOG_RETURN_VOID_IF_FALSE(psGroup != NULL, + "psGroup invalid in DIDestroyGroup()"); + + /* Iterate through all of the native groups of the DI group, remove + * them from the list and then destroy them. After that destroy the + * DI group itself. */ + dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext) + { + DI_NATIVE_HANDLE *psNative = IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, + sListNode); + + psNative->psDiImpl->sCb.pfnDestroyGroup(psNative->pvHandle); + dllist_remove_node(&psNative->sListNode); + OSFreeMem(psNative); + } + + dllist_remove_node(&psGroup->sListNode); + + OSFreeMem(psGroup); +} + +void *DIGetPrivData(const OSDI_IMPL_ENTRY *psEntry) +{ + PVR_ASSERT(psEntry != NULL); + + return psEntry->pvPrivData; +} + +void DIPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, ...) +{ + va_list args; + + PVR_ASSERT(psEntry != NULL); + PVR_ASSERT(psEntry->psCb != NULL); + PVR_ASSERT(psEntry->psCb->pfnVPrintf != NULL); + PVR_ASSERT(psEntry->pvNative != NULL); + + va_start(args, pszFmt); + psEntry->psCb->pfnVPrintf(psEntry->pvNative, pszFmt, args); + va_end(args); +} + +void DIPuts(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszStr) +{ + PVR_ASSERT(psEntry != NULL); + PVR_ASSERT(psEntry->psCb != NULL); + PVR_ASSERT(psEntry->psCb->pfnPuts != NULL); + PVR_ASSERT(psEntry->pvNative != NULL); + + psEntry->psCb->pfnPuts(psEntry->pvNative, pszStr); +} + +IMG_BOOL DIHasOverflowed(const OSDI_IMPL_ENTRY *psEntry) +{ + PVR_ASSERT(psEntry != NULL); + PVR_ASSERT(psEntry->psCb != NULL); + PVR_ASSERT(psEntry->psCb->pfnHasOverflowed != NULL); + PVR_ASSERT(psEntry->pvNative != NULL); + + return psEntry->psCb->pfnHasOverflowed(psEntry->pvNative); +} + +/* ---- OS implementation API ---------------------------------------------- */ + +static IMG_BOOL _ValidateImplCb(const OSDI_IMPL_CB *psImplCb) +{ + PVR_GOTO_IF_FALSE(psImplCb->pfnInit != NULL, failed_); + PVR_GOTO_IF_FALSE(psImplCb->pfnDeInit != NULL, failed_); + PVR_GOTO_IF_FALSE(psImplCb->pfnCreateGroup != NULL, failed_); + PVR_GOTO_IF_FALSE(psImplCb->pfnDestroyGroup != NULL, failed_); + PVR_GOTO_IF_FALSE(psImplCb->pfnCreateEntry != NULL, failed_); + PVR_GOTO_IF_FALSE(psImplCb->pfnDestroyEntry != NULL, failed_); + + return IMG_TRUE; + +failed_: + return IMG_FALSE; +} + +/* Walks the tree of groups and entries and create all of the native handles + * for the given implementation for all of the already existing groups and + * entries. */ +static PVRSRV_ERROR _InitNativeHandlesRecursively(DI_IMPL *psImpl, + DI_GROUP *psGroup, + DI_NATIVE_HANDLE *psNativeParent) +{ + PVRSRV_ERROR eError; + DLLIST_NODE *psThis, *psNext; + DI_NATIVE_HANDLE *psNativeGroup; + + psNativeGroup = OSAllocMem(sizeof(*psNativeGroup)); + PVR_LOG_RETURN_IF_NOMEM(psNativeGroup, "OSAllocMem"); + + eError = psImpl->sCb.pfnCreateGroup(psGroup->pszName, + psNativeParent ? psNativeParent->pvHandle : NULL, + &psNativeGroup->pvHandle); + PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->sCb.pfnCreateGroup", free_memory_); + + psNativeGroup->psDiImpl = psImpl; + + dllist_add_to_head(&psGroup->sNativeHandleList, + &psNativeGroup->sListNode); + + dllist_foreach_node(&psGroup->sGroupList, psThis, psNext) + { + DI_GROUP *psThisGroup = IMG_CONTAINER_OF(psThis, DI_GROUP, sListNode); + + // and then walk the new group + eError = _InitNativeHandlesRecursively(psImpl, psThisGroup, + psNativeGroup); + PVR_LOG_RETURN_IF_ERROR(eError, "_InitNativeHandlesRecursively"); + } + + dllist_foreach_node(&psGroup->sEntryList, psThis, psNext) + { + DI_ENTRY *psThisEntry = IMG_CONTAINER_OF(psThis, DI_ENTRY, sListNode); + + eError = _CreateNativeEntry(psThisEntry, psNativeGroup); + PVR_LOG_RETURN_IF_ERROR(eError, "_CreateNativeEntry"); + } + + return PVRSRV_OK; + +free_memory_: + OSFreeMem(psNativeGroup); + + return eError; +} + +/* Walks the tree of groups and entries and destroys all of the native handles + * for the given implementation. */ +static void _DeInitNativeHandlesRecursively(DI_IMPL *psImpl, DI_GROUP *psGroup) +{ + DLLIST_NODE *psThis, *psNext; + + dllist_foreach_node(&psGroup->sEntryList, psThis, psNext) + { + DI_ENTRY *psThisEntry = IMG_CONTAINER_OF(psThis, DI_ENTRY, sListNode); + + // free all of the native entries that belong to this implementation + dllist_foreach_node(&psThisEntry->sNativeHandleList, psThis, psNext) + { + DI_NATIVE_HANDLE *psNativeEntry = + IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); + + if (psNativeEntry->psDiImpl == psImpl) + { + _DestroyNativeEntry(psNativeEntry); + // there can be only one entry on the list for a given + // implementation + break; + } + } + } + + dllist_foreach_node(&psGroup->sGroupList, psThis, psNext) + { + DI_GROUP *psThisGroup = IMG_CONTAINER_OF(psThis, DI_GROUP, sListNode); + + // and then walk the new group + _DeInitNativeHandlesRecursively(psImpl, psThisGroup); + } + + // free all of the native entries that belong to this implementation + dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext) + { + DI_NATIVE_HANDLE *psNativeGroup = + IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); + + if (psNativeGroup->psDiImpl == psImpl) + { + _DestroyNativeGroup(psNativeGroup); + // there can be only one entry on the list for a given + // implementation + break; + } + } +} + +static PVRSRV_ERROR _InitImpl(DI_IMPL *psImpl) +{ + PVRSRV_ERROR eError; + // DI_NATIVE_HANDLE *psNativeGroup; + + eError = psImpl->sCb.pfnInit(); + PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->pfnInit()", return_); + + /* if the implementation is being created after any groups or entries + * have been created we need to walk the current tree and create + * native groups and entries for all of the existing ones */ + eError = _InitNativeHandlesRecursively(psImpl, _g_psRootGroup, NULL); + PVR_LOG_GOTO_IF_ERROR(eError, "_InitNativeHandlesRecursively", + free_native_handles_and_deinit_); + + psImpl->bInitialised = IMG_TRUE; + + return PVRSRV_OK; + +free_native_handles_and_deinit_: + /* something went wrong so we need to walk the tree and remove all of the + * native entries and groups that we've created before we can destroy + * the implementation */ + _DeInitNativeHandlesRecursively(psImpl, _g_psRootGroup); + psImpl->sCb.pfnDeInit(); +return_: + return eError; +} + +PVRSRV_ERROR DIRegisterImplementation(const IMG_CHAR *pszName, + const OSDI_IMPL_CB *psImplCb) +{ + DI_IMPL *psImpl; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); + PVR_LOG_RETURN_IF_INVALID_PARAM(_ValidateImplCb(psImplCb), "psImplCb"); + /* if root group does not exist it can mean 2 things: + * - DIInit() was not called so initialisation order is incorrect and needs + * to be fixed + * - DIInit() failed but if that happens we should never make it here */ + PVR_ASSERT(_g_psRootGroup != NULL); + + psImpl = OSAllocMem(sizeof(*psImpl)); + PVR_LOG_RETURN_IF_NOMEM(psImpl, "OSAllocMem"); + + psImpl->pszName = pszName; + psImpl->sCb = *psImplCb; + + OSLockAcquire(_g_hLock); + + eError = _InitImpl(psImpl); + if (eError != PVRSRV_OK) + { + /* implementation could not be initialised so remove it from the + * list, free the memory and forget about it */ + + PVR_DPF((PVR_DBG_ERROR, "%s: could not initialise \"%s\" debug " + "info implementation, discarding", __func__, + psImpl->pszName)); + + goto free_impl_; + } + + psImpl->bInitialised = IMG_TRUE; + + dllist_add_to_tail(&_g_sImpls, &psImpl->sListNode); + + OSLockRelease(_g_hLock); + + return PVRSRV_OK; + +free_impl_: + OSLockRelease(_g_hLock); + + OSFreeMem(psImpl); + + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/di_server.h b/drivers/gpu/drm/phytium/octopus/di_server.h new file mode 100644 index 000000000000..e5b640b63567 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/di_server.h @@ -0,0 +1,183 @@ +/*************************************************************************/ /*! +@File +@Title Functions for creating Debug Info groups and entries. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DI_SERVER_H +#define DI_SERVER_H + +#include + +#include "di_common.h" +#include "pvrsrv_error.h" +#include "img_defs.h" + +/*! @Function DIInit + * + * @Description + * Initialises Debug Info framework. This function will create common resources + * for the framework. + * + * Note: This function must be called before first call to + * DIRegisterImplementation() all of the implementations. + */ +PVRSRV_ERROR DIInit(void); + +/*! @Function DIDeInit + * + * @Description + * De-initialises Debug Info framework. This function will call pfnDeInit() + * on each implementation and clean up common resources. + * + * In case some of the entries and groups have not been cleaned up this function + * will also perform recursive sweep and remove all entries and group for + * all implementations. + */ +void DIDeInit(void); + +/*! @Function DICreateEntry + * + * @Description + * Creates debug info entry. Depending on different implementations the entry + * might be for example a DebugFS file or something totally different. + * + * The entry will belong to a parent group if provided or to the root group + * if not. + * + * @Input pszName: name of the new entry + * @Input psDiGroup: parent group, if NULL entry will belong to the root group + * @Input psIterCb: implementation of the iterator for the entry + * @Input psPriv: private data that will be passed to the iterator operations + * @Input eType: type of the entry + * + * @Output ppsEntry: handle to the newly created entry + * + * @Return PVRSRV_ERROR error code + */ +PVRSRV_ERROR DICreateEntry(const IMG_CHAR *pszName, + DI_GROUP *psGroup, + const DI_ITERATOR_CB *psIterCb, + void *psPriv, + DI_ENTRY_TYPE eType, + DI_ENTRY **ppsEntry); + +/*! @Function DIDestroyEntry + * + * @Description + * Destroys debug info entry. + * + * @Input psEntry: handle to the entry + */ +void DIDestroyEntry(DI_ENTRY *psEntry); + +/*! @Function DICreateGroup + * + * @Description + * Creates debug info group. Depending on different implementations the group + * might be for example a DebugFS directory or something totally different. + * + * The group will belong to a parent group if provided or to the root group + * if not. + * + * @Input pszName: name of the new entry + * @Input psParent: parent group, if NULL entry will belong to the root group + * + * @Output ppsGroup: handle to the newly created entry + * + * @Return PVRSRV_ERROR error code + */ +PVRSRV_ERROR DICreateGroup(const IMG_CHAR *pszName, + DI_GROUP *psParent, + DI_GROUP **ppsGroup); + +/*! @Function DIDestroyGroup + * + * @Description + * Destroys debug info group. + * + * @Input psGroup: handle to the group + */ +void DIDestroyGroup(DI_GROUP *psGroup); + +/*! @Function DIGetPrivData + * + * @Description + * Retrieves private data from psEntry. The data is either passed during + * entry creation via psPriv parameter of DICreateEntry() function + * or by explicitly setting it with DIGetPrivData() function. + * + * @Input psEntry pointer to OSDI_IMPL_ENTRY object + * + * @Returns pointer to the private data (can be NULL if private data + * has not been specified) + */ +void *DIGetPrivData(const OSDI_IMPL_ENTRY *psEntry); + +/*! @Function DIPrintf + * + * @Description + * Prints formatted string to the DI entry. + * + * @Input psEntry pointer to OSDI_IMPL_ENTRY object + * @Input pszFmt NUL-terminated format string + */ +void DIPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, ...) + __printf(2, 3); + +/*! @Function DIPrintf + * + * @Description + * Prints a string to the DI entry. + * + * @Input psEntry pointer to OSDI_IMPL_ENTRY object + * @Input pszFmt NUL-terminated string + */ +void DIPuts(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszStr); + +/*! @Function DIHasOverflowed + * + * @Description + * Checks if the DI buffer has overflowed. + * + * @Return IMG_TRUE if buffer overflowed + */ +IMG_BOOL DIHasOverflowed(const OSDI_IMPL_ENTRY *psEntry); + +#endif /* DI_SERVER_H */ diff --git a/drivers/gpu/drm/phytium/octopus/dllist.h b/drivers/gpu/drm/phytium/octopus/dllist.h new file mode 100644 index 000000000000..1d9b7eb9e709 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/dllist.h @@ -0,0 +1,408 @@ +/*************************************************************************/ /*! +@File +@Title Double linked list header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Double linked list interface +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DLLIST_H +#define DLLIST_H + +#include "img_types.h" +#include "img_defs.h" + +/*! + Pointer to a linked list node +*/ +typedef struct DLLIST_NODE_ *PDLLIST_NODE; + + +/*! + Node in a linked list +*/ +/* + * Note: the following structure's size is architecture-dependent and clients + * may need to create a mirror of the structure definition if it needs to be + * used in a structure shared between host and device. + * Consider such clients if any changes are made to this structure. + */ +typedef struct DLLIST_NODE_ +{ + struct DLLIST_NODE_ *psPrevNode; + struct DLLIST_NODE_ *psNextNode; +} DLLIST_NODE; + + +/*! + Static initialiser +*/ +#define DECLARE_DLLIST(n) \ +DLLIST_NODE (n) = {&(n), &(n)} + +/*************************************************************************/ /*! +@Function dllist_foreach_node + +@Description Walk through all the nodes on the list. + Safe against removal of (node). + +@Input list_head List node to start the operation +@Input node Current list node +@Input next Node after the current one + +*/ +/*****************************************************************************/ +#define dllist_foreach_node(list_head, node, next) \ + for ((node) = (list_head)->psNextNode, (next) = (node)->psNextNode; \ + (node) != (list_head); \ + (node) = (next), (next) = (node)->psNextNode) + +#define dllist_foreach_node_backwards(list_head, node, prev) \ + for ((node) = (list_head)->psPrevNode, (prev) = (node)->psPrevNode; \ + (node) != (list_head); \ + (node) = (prev), (prev) = (node)->psPrevNode) + + +/*************************************************************************/ /*! +@Function dllist_foreach + +@Description Simplification of dllist_foreach_node. + Walk through all the nodes on the list. + Safe against removal of currently-iterated node. + + Adds utility-macro dllist_cur() to typecast the current node. + +@Input list_head List node to start the operation + +*/ +/*****************************************************************************/ +#define dllist_foreach(list_head) \ + for (DLLIST_NODE *_DllNode = (list_head).psNextNode, *_DllNext = _DllNode->psNextNode; \ + _DllNode != &(list_head); \ + _DllNode = _DllNext, _DllNext = _DllNode->psNextNode) + +#define dllist_foreach_backwards(list_head) \ + for (DLLIST_NODE *_DllNode = (list_head).psPrevNode, *_DllPrev = _DllNode->psPrevNode; \ + _DllNode != &(list_head); \ + _DllNode = _DllPrev, _DllPrev = _DllNode->psPrevNode) + +#define dllist_cur(type, member) IMG_CONTAINER_OF(_DllNode, type, member) + +/*************************************************************************/ /*! +@Function dllist_init + +@Description Initialize a new double linked list + +@Input psListHead List head Node + +*/ +/*****************************************************************************/ +static INLINE +void dllist_init(PDLLIST_NODE psListHead) +{ + psListHead->psPrevNode = psListHead; + psListHead->psNextNode = psListHead; +} + +/*************************************************************************/ /*! +@Function dllist_is_empty + +@Description Returns whether the list is empty + +@Input psListHead List head Node + +*/ +/*****************************************************************************/ +static INLINE +bool dllist_is_empty(PDLLIST_NODE psListHead) +{ + return ((psListHead->psPrevNode == psListHead) + && (psListHead->psNextNode == psListHead)); +} + +/*************************************************************************/ /*! +@Function dllist_add_to_head + +@Description Add psNewNode to head of list psListHead + +@Input psListHead Head Node +@Input psNewNode New Node + +*/ +/*****************************************************************************/ +static INLINE +void dllist_add_to_head(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode) +{ + PDLLIST_NODE psTmp; + + psTmp = psListHead->psNextNode; + + psListHead->psNextNode = psNewNode; + psNewNode->psNextNode = psTmp; + + psTmp->psPrevNode = psNewNode; + psNewNode->psPrevNode = psListHead; +} + + +/*************************************************************************/ /*! +@Function dllist_add_to_tail + +@Description Add psNewNode to tail of list psListHead + +@Input psListHead Head Node +@Input psNewNode New Node + +*/ +/*****************************************************************************/ +static INLINE +void dllist_add_to_tail(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode) +{ + PDLLIST_NODE psTmp; + + psTmp = psListHead->psPrevNode; + + psListHead->psPrevNode = psNewNode; + psNewNode->psPrevNode = psTmp; + + psTmp->psNextNode = psNewNode; + psNewNode->psNextNode = psListHead; +} + +/*************************************************************************/ /*! +@Function dllist_node_is_in_list + +@Description Returns true if psNode is in a list + +@Input psNode List node + +*/ +/*****************************************************************************/ +static INLINE +bool dllist_node_is_in_list(PDLLIST_NODE psNode) +{ + return (psNode->psNextNode != NULL); +} + +/*************************************************************************/ /*! +@Function dllist_get_next_node + +@Description Returns the list node after psListHead or NULL psListHead is + the only element in the list. + +@Input psListHead List node to start the operation + +*/ +/*****************************************************************************/ +static INLINE +PDLLIST_NODE dllist_get_next_node(PDLLIST_NODE psListHead) +{ + if (psListHead->psNextNode == psListHead) + { + return NULL; + } + else + { + return psListHead->psNextNode; + } +} + +/*************************************************************************/ /*! +@Function dllist_get_prev_node + +@Description Returns the list node preceding psListHead or NULL if + psListHead is the only element in the list. + +@Input psListHead List node to start the operation + +*/ +/*****************************************************************************/ +static INLINE +PDLLIST_NODE dllist_get_prev_node(PDLLIST_NODE psListHead) +{ + if (psListHead->psPrevNode == psListHead) + { + return NULL; + } + else + { + return psListHead->psPrevNode; + } +} + +/*************************************************************************/ /*! +@Function dllist_remove_node + +@Description Removes psListNode from the list where it currently belongs + +@Input psListNode List node to be removed + +*/ +/*****************************************************************************/ +static INLINE +void dllist_remove_node(PDLLIST_NODE psListNode) +{ + psListNode->psNextNode->psPrevNode = psListNode->psPrevNode; + psListNode->psPrevNode->psNextNode = psListNode->psNextNode; + + /* Clear the node to show it's not in a list */ + psListNode->psPrevNode = NULL; + psListNode->psNextNode = NULL; +} + +/*************************************************************************/ /*! +@Function dllist_replace_head + +@Description Moves the list from psOldHead to psNewHead + +@Input psOldHead List node to be replaced. Will become a + head node of an empty list. +@Input psNewHead List node to be inserted. Must be an + empty list head. + +*/ +/*****************************************************************************/ +static INLINE +void dllist_replace_head(PDLLIST_NODE psOldHead, PDLLIST_NODE psNewHead) +{ + if (dllist_is_empty(psOldHead)) + { + psNewHead->psNextNode = psNewHead; + psNewHead->psPrevNode = psNewHead; + } + else + { + /* Change the neighbouring nodes */ + psOldHead->psNextNode->psPrevNode = psNewHead; + psOldHead->psPrevNode->psNextNode = psNewHead; + + /* Copy the old data to the new node */ + psNewHead->psNextNode = psOldHead->psNextNode; + psNewHead->psPrevNode = psOldHead->psPrevNode; + + /* Remove links to the previous list */ + psOldHead->psNextNode = psOldHead; + psOldHead->psPrevNode = psOldHead; + } +} + +/**************************************************************************/ /*! +@Function dllist_insert_list_at_head + +@Description Inserts psInHead list into the head of the psOutHead list. + After this operation psOutHead will contain psInHead at the + head of the list and the remaining elements that were + already in psOutHead will be places after the psInList (so + at a tail of the original list). + +@Input psOutHead List node psInHead will be inserted to. +@Input psInHead List node to be inserted to psOutHead. + After this operation this becomes an empty list. +*/ /***************************************************************************/ +static INLINE +void dllist_insert_list_at_head(PDLLIST_NODE psOutHead, PDLLIST_NODE psInHead) +{ + PDLLIST_NODE psInHeadNextNode = psInHead->psNextNode; + PDLLIST_NODE psOutHeadNextNode = psOutHead->psNextNode; + + if (!dllist_is_empty(psInHead)) + { + psOutHead->psNextNode = psInHeadNextNode; + psInHeadNextNode->psPrevNode = psOutHead; + + psInHead->psPrevNode->psNextNode = psOutHeadNextNode; + psOutHeadNextNode->psPrevNode = psInHead->psPrevNode; + + dllist_init(psInHead); + } + } + +/*************************************************************************/ /*! +@Description Pointer to a dllist comparison callback function. +@Input psNode Pointer to a node in a dllist. +@Input psNext Pointer to psNode's next neighbour. +*/ /**************************************************************************/ +typedef bool (*DLLIST_CMP_CB)(const DLLIST_NODE *psNode, const DLLIST_NODE *psNext); + +/*************************************************************************/ /*! +@Function dllist_sort + +@Description Insert-sorts the List in place + The cmpr function passes the current and next node, + From which the user writes the function responsible + for choosing to swap order or not. + The function returns true if a swap is required + +@Input psListHead List Head to be sorted. + +@Input cmpr Function pointer to use for sorting + +*/ +/*****************************************************************************/ +static INLINE void dllist_sort(PDLLIST_NODE psListHead, + DLLIST_CMP_CB cmpr) +{ + DLLIST_NODE *node, *next; + DLLIST_NODE sTempHead; + + dllist_init(&sTempHead); + + dllist_foreach_node(psListHead, node, next) + { + dllist_remove_node(node); + dllist_add_to_head(&sTempHead, node); + } + + while (!dllist_is_empty(&sTempHead)) + { + DLLIST_NODE *psSmallestNode = NULL; + + dllist_foreach_node(&sTempHead, node, next) + { + if (!psSmallestNode || cmpr(psSmallestNode, node)) + { + psSmallestNode = node; + } + } + + dllist_remove_node(psSmallestNode); + dllist_add_to_tail(psListHead, psSmallestNode); + } +} + +#endif /* DLLIST_H */ diff --git a/drivers/gpu/drm/phytium/octopus/dma_flags.h b/drivers/gpu/drm/phytium/octopus/dma_flags.h new file mode 100644 index 000000000000..1a82763ae9bd --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/dma_flags.h @@ -0,0 +1,50 @@ +/*************************************************************************/ /*! +@File +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef DMA_FLAGS_H +#define DMA_FLAGS_H + +/* these should match flags in pvrsrv_dma.h */ +#define DMA_FLAG_MEM_TO_DEV (1U<<0) +#define DMA_FLAG_DEV_TO_MEM (0U<<0) + +#define DMA_FLAG_SYNCHRONOUS (1U<<1) + +#endif /* DMA_FLAGS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/dma_km.c b/drivers/gpu/drm/phytium/octopus/dma_km.c new file mode 100644 index 000000000000..074b77d16f60 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/dma_km.c @@ -0,0 +1,413 @@ +/*************************************************************************/ /*! +@File dma_km.c +@Title kernel side of dma transfer scheduling +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements server side code for allowing DMA transfers between + cpu and device memory. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#if defined(__linux__) +#include +#include +#include +#include +#include +#include +#endif + +#include "pmr.h" +#include "log2.h" +#include "device.h" +#include "pvrsrv.h" +#include "osfunc.h" +#include "dma_km.h" +#include "pvr_debug.h" +#include "lock_types.h" +#include "allocmem.h" +#include "process_stats.h" +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) +#include "ri_server.h" +#endif +#include "devicemem.h" +#include "pvrsrv_apphint.h" +#include "pvrsrv_sync_server.h" +#include "km_apphint_defs.h" +#include "di_server.h" +#include "dma_flags.h" + +/* This header must always be included last */ +#if defined(__linux__) +#include "kernel_compatibility.h" +#endif + +typedef struct _SERVER_CLEANUP_DATA_ +{ + PVRSRV_DEVICE_NODE *psDevNode; + CONNECTION_DATA *psConnection; + IMG_UINT32 uiNumDMA; + IMG_UINT32 uiCount; + SYNC_TIMELINE_OBJ sTimelineObject; + void* pvChan; + PMR** ppsPMR; +} SERVER_CLEANUP_DATA; + +#if !defined(NO_HARDWARE) +static void Cleanup(void* pvCleanupData, IMG_BOOL bAdvanceTimeline) +{ + IMG_UINT i; + PVRSRV_ERROR eError; + SERVER_CLEANUP_DATA* psCleanupData = (SERVER_CLEANUP_DATA*) pvCleanupData; + +#if defined(DMA_VERBOSE) + PVR_DPF((PVR_DBG_ERROR, "Server Cleanup thread entry (%p)", pvCleanupData)); +#endif + + for (i=0; iuiCount; i++) + { + eError = PMRUnlockSysPhysAddresses(psCleanupData->ppsPMR[i]); + PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); + } + + /* Advance timeline */ + if (psCleanupData->sTimelineObject.pvTlObj && bAdvanceTimeline) + { + eError = SyncSWTimelineAdvanceKM(psCleanupData->psDevNode, &psCleanupData->sTimelineObject); + PVR_LOG_IF_ERROR(eError, "SyncSWTimelineAdvanceKM"); + eError = SyncSWTimelineReleaseKM(&psCleanupData->sTimelineObject); + PVR_LOG_IF_ERROR(eError, "SyncSWTimelineReleaseKM"); + } + + OSAtomicDecrement(&psCleanupData->psConnection->ui32NumDmaTransfersInFlight); +#if defined(DMA_VERBOSE) + PVR_DPF((PVR_DBG_ERROR, "Decremented to %d", OSAtomicRead(&psCleanupData->psConnection->ui32NumDmaTransfersInFlight))); +#endif + eError = OSEventObjectSignal(psCleanupData->psConnection->hDmaEventObject); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: OSEventObjectSignal failed: %s", + __func__, PVRSRVGetErrorString(eError))); + } + + + OSFreeMem(psCleanupData->ppsPMR); + OSFreeMem(psCleanupData); +} +#endif /* !defined(NO_HARDWARE) */ + +IMG_EXPORT PVRSRV_ERROR +PVRSRVInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + + if (psDevConfig->bHasDma) + { + + PVR_ASSERT(psDevConfig->pfnSlaveDMAGetChan != NULL); + PVR_ASSERT(psDevConfig->pfnSlaveDMAFreeChan != NULL); + PVR_ASSERT(psDevConfig->pszDmaTxChanName != NULL); + PVR_ASSERT(psDevConfig->pszDmaRxChanName != NULL); + + psDeviceNode->hDmaTxChan = + psDevConfig->pfnSlaveDMAGetChan(psDevConfig, + psDevConfig->pszDmaTxChanName); + if (!psDeviceNode->hDmaTxChan) + { + return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + } + psDeviceNode->hDmaRxChan = + psDevConfig->pfnSlaveDMAGetChan(psDevConfig, + psDevConfig->pszDmaRxChanName); + if (!psDeviceNode->hDmaRxChan) + { + psDevConfig->pfnSlaveDMAFreeChan(psDevConfig, psDeviceNode->hDmaTxChan); + return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + } + psDeviceNode->bHasSystemDMA = true; + } + + return PVRSRV_OK; +} + +IMG_EXPORT void +PVRSRVDeInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + if (psDeviceNode->bHasSystemDMA) + { + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + + psDevConfig->pfnSlaveDMAFreeChan(psDevConfig, psDeviceNode->hDmaRxChan); + psDevConfig->pfnSlaveDMAFreeChan(psDevConfig, psDeviceNode->hDmaTxChan); + } +} + +IMG_EXPORT PVRSRV_ERROR +DmaDeviceParams(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 *ui32DmaBuffAlign, + IMG_UINT32 *ui32DmaTransferMult) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; + + *ui32DmaBuffAlign = psDevConfig->ui32DmaAlignment; + *ui32DmaTransferMult = psDevConfig->ui32DmaTransferUnit; + + return PVRSRV_OK; +} + +IMG_EXPORT PVRSRV_ERROR +DmaSparseMappingTable(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32SizeInPages, + IMG_BOOL *pbTable) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_DEV_PHYADDR *psDevPhyAddr; + IMG_BOOL *pbValid; + + psDevPhyAddr = OSAllocZMem(ui32SizeInPages * sizeof(IMG_CPU_PHYADDR)); + PVR_LOG_GOTO_IF_NOMEM(psDevPhyAddr, eError, err1); + + pbValid = OSAllocZMem(ui32SizeInPages * sizeof(IMG_BOOL)); + PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, err2); + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddresses", err3); + + eError = PMR_DevPhysAddr(psPMR, + OSGetPageShift(), + ui32SizeInPages, + uiOffset, + psDevPhyAddr, + pbValid); + PVR_LOG_GOTO_IF_ERROR(eError, "PMR_DevPhysAddr", err3); + + PMRUnlockSysPhysAddresses(psPMR); + + memcpy(pbTable, pbValid, ui32SizeInPages * sizeof(IMG_BOOL)); + +err3: + OSFreeMem(pbValid); +err2: + OSFreeMem(psDevPhyAddr); +err1: + return eError; +} + +IMG_EXPORT PVRSRV_ERROR +DmaTransfer(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 uiNumDMAs, + PMR** ppsPMR, + IMG_UINT64 *puiAddress, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_UINT32 uiFlags, + PVRSRV_TIMELINE iUpdateFenceTimeline) +{ + + PVRSRV_ERROR eError = PVRSRV_OK; +#if defined(NO_HARDWARE) + /* On nohw the kernel call just advances the timeline to signal completion */ + + SYNC_TIMELINE_OBJ sSwTimeline = {NULL, PVRSRV_NO_TIMELINE}; + + if (iUpdateFenceTimeline != PVRSRV_NO_TIMELINE) + { + eError = SyncSWGetTimelineObj(iUpdateFenceTimeline, &sSwTimeline); + PVR_LOG_RETURN_IF_ERROR(eError, "SyncSWGetTimelineObj"); + + eError = SyncSWTimelineAdvanceKM(psDevNode, &sSwTimeline); + PVR_LOG_RETURN_IF_ERROR(eError, "SyncSWTimelineAdvanceKM"); + + eError = SyncSWTimelineReleaseKM(&sSwTimeline); + PVR_LOG_RETURN_IF_ERROR(eError, "SyncSWTimelineReleaseKM"); + } + + return PVRSRV_OK; + +#else + IMG_DEV_PHYADDR *psDevPhyAddr; + IMG_DMA_ADDR *psDmaAddr; + IMG_BOOL *pbValid; + IMG_UINT32 i; + PVRSRV_DEVICE_CONFIG* psDevConfig = psDevNode->psDevConfig; + void* pvChan = NULL; + SERVER_CLEANUP_DATA* psServerData; + void* pvOSData; + + OSLockAcquire(psConnection->hDmaReqLock); + + if (!psConnection->bAcceptDmaRequests) + { + OSLockRelease(psConnection->hDmaReqLock); + return PVRSRV_OK; + } + + OSAtomicIncrement(&psConnection->ui32NumDmaTransfersInFlight); +#if defined(DMA_VERBOSE) + PVR_DPF((PVR_DBG_ERROR, "Incremented to %d", OSAtomicRead(&psConnection->ui32NumDmaTransfersInFlight))); +#endif + psServerData = OSAllocZMem(sizeof(SERVER_CLEANUP_DATA)); + PVR_LOG_GOTO_IF_NOMEM(psServerData, eError, e0); + + pvChan = uiFlags & (DMA_FLAG_MEM_TO_DEV) ? psDevNode->hDmaTxChan : psDevNode->hDmaRxChan; + if (!pvChan) + { + eError = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + PVR_LOG_GOTO_IF_ERROR(eError, "Error acquiring DMA channel", e1); + } + + if (iUpdateFenceTimeline != PVRSRV_NO_TIMELINE) + { + eError = SyncSWGetTimelineObj(iUpdateFenceTimeline, &psServerData->sTimelineObject); + PVR_LOG_GOTO_IF_ERROR(eError, "SyncSWGetTimelineObj", e1); + } + + psServerData->uiCount = 0; + psServerData->psDevNode = psDevNode; + psServerData->psConnection = psConnection; + psServerData->pvChan = pvChan; + psServerData->ppsPMR = OSAllocZMem(sizeof(PMR*) * uiNumDMAs); + PVR_LOG_GOTO_IF_NOMEM(psServerData->ppsPMR, eError, e2); + + eError = OSDmaAllocData(psDevNode, uiNumDMAs, &pvOSData); + PVR_LOG_GOTO_IF_ERROR(eError, "OSDmaAllocData failed", e3); + + for (i=0; i> OSGetPageShift(); + + psDmaAddr = OSAllocZMem(ui32SizeInPages * sizeof(IMG_DMA_ADDR)); + PVR_LOG_GOTO_IF_NOMEM(psDmaAddr, eError, loop_e0); + + psDevPhyAddr = OSAllocZMem(ui32SizeInPages * sizeof(IMG_CPU_PHYADDR)); + PVR_LOG_GOTO_IF_NOMEM(psDevPhyAddr, eError, loop_e1); + + pbValid = OSAllocZMem(ui32SizeInPages * sizeof(IMG_BOOL)); + PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, loop_e2); + + eError = PMRLockSysPhysAddresses(ppsPMR[i]); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddresses", loop_e3); + + psServerData->ppsPMR[i] = ppsPMR[i]; + + eError = PMR_DevPhysAddr(ppsPMR[i], + OSGetPageShift(), + ui32SizeInPages, + puiOffset[i], + psDevPhyAddr, + pbValid); + PVR_LOG_GOTO_IF_ERROR(eError, "PMR_DevPhysAddr", loop_e4); + + psDevConfig->pfnDevPhysAddr2DmaAddr(psDevConfig, + psDmaAddr, + psDevPhyAddr, + pbValid, + ui32SizeInPages, + PMR_IsSparse(ppsPMR[i])); + + if (!PMR_IsSparse(ppsPMR[i])) + { + eError = OSDmaPrepareTransfer(psDevNode, + pvChan, + &psDmaAddr[0], (IMG_UINT64*)puiAddress[i], + puiSize[i], (uiFlags & DMA_FLAG_MEM_TO_DEV), pvOSData, + psServerData, Cleanup, (i == 0)); + PVR_LOG_GOTO_IF_ERROR(eError, "OSDmaPrepareTransfer", loop_e4); + psServerData->uiCount++; + + } + else + { + eError = OSDmaPrepareTransferSparse(psDevNode, pvChan, + psDmaAddr, pbValid, + (IMG_UINT64*)puiAddress[i], puiSize[i], + uiOffsetInPage, ui32SizeInPages, + (uiFlags & DMA_FLAG_MEM_TO_DEV), + pvOSData, psServerData, + Cleanup, (i == 0)); + PVR_LOG_GOTO_IF_ERROR(eError, "OSDmaPrepareTransferSparse", loop_e4); + psServerData->uiCount++; + } + + OSFreeMem(pbValid); + OSFreeMem(psDevPhyAddr); + OSFreeMem(psDmaAddr); + + continue; + +loop_e4: + PMRUnlockSysPhysAddresses(ppsPMR[i]); +loop_e3: + OSFreeMem(pbValid); +loop_e2: + OSFreeMem(psDevPhyAddr); +loop_e1: + OSFreeMem(psDmaAddr); +loop_e0: + break; + } + + if (psServerData->uiCount == uiNumDMAs) + { + OSDmaSubmitTransfer(psDevNode, pvOSData, pvChan, (uiFlags & DMA_FLAG_SYNCHRONOUS)); + } + else + { + /* One of the transfers could not be programmed, roll back */ + OSDmaForceCleanup(psDevNode, pvChan, pvOSData, psServerData, Cleanup); + } + OSLockRelease(psConnection->hDmaReqLock); + return eError; + +e3: + OSFreeMem(psServerData->ppsPMR); +e2: + if (iUpdateFenceTimeline != PVRSRV_NO_TIMELINE) + { + SyncSWTimelineReleaseKM(&psServerData->sTimelineObject); + } +e1: + OSFreeMem(psServerData); +e0: + OSLockRelease(psConnection->hDmaReqLock); + return eError; +#endif +} diff --git a/drivers/gpu/drm/phytium/octopus/dma_km.h b/drivers/gpu/drm/phytium/octopus/dma_km.h new file mode 100644 index 000000000000..5347cdc300ee --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/dma_km.h @@ -0,0 +1,83 @@ +/*************************************************************************/ /*! +@File dma_km.h +@Title DMA transfer module header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DMA_KM_H +#define DMA_KM_H + +#if defined(__linux__) +#include +#else +#define KERNEL_VERSION +#endif + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "cache_ops.h" +#include "device.h" +#include "pmr.h" +#include "pvrsrv_sync_km.h" +#include "connection_server.h" + +PVRSRV_ERROR DmaDeviceParams(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 *ui32DmaBuffAlign, + IMG_UINT32 *ui32DmaTransferMult); + +PVRSRV_ERROR DmaSparseMappingTable(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32SizeInPages, + IMG_BOOL *pbTable); + +PVRSRV_ERROR DmaTransfer(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 uiNumDMAs, + PMR** ppsPMR, + IMG_UINT64 *puiAddress, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_BOOL bMemToDev, + PVRSRV_TIMELINE iUpdateTimeline); + +PVRSRV_ERROR PVRSRVInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode); +void PVRSRVDeInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode); + +#endif /* DMA_KM_H */ diff --git a/drivers/gpu/drm/phytium/octopus/dma_support.c b/drivers/gpu/drm/phytium/octopus/dma_support.c new file mode 100644 index 000000000000..061d095ccecb --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/dma_support.c @@ -0,0 +1,335 @@ +/*************************************************************************/ /*! +@File dma_support.c +@Title System DMA support +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Provides a contiguous memory allocator (i.e. DMA allocator); + APIs are used for allocation/ioremapping (DMA/PA <-> CPU/VA) +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#if defined(__linux__) +#include +#include +#endif + +#include "allocmem.h" +#include "dma_support.h" + +#define DMA_MAX_IOREMAP_ENTRIES 8 +static IMG_BOOL gbEnableDmaIoRemapping = IMG_FALSE; +static DMA_ALLOC gsDmaIoRemapArray[DMA_MAX_IOREMAP_ENTRIES] = {{0}}; +static IMG_UINT32 gsDmaIoRemapRef[DMA_MAX_IOREMAP_ENTRIES] = {0}; + +/*! +****************************************************************************** + @Function SysDmaAllocMem + + @Description Allocates physically contiguous memory + + @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code + ******************************************************************************/ +PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psDmaAlloc != NULL && psDmaAlloc->pvOSDevice != NULL) + { +#if defined(__linux__) + psDmaAlloc->pvVirtAddr = + dma_alloc_coherent((struct device *)psDmaAlloc->pvOSDevice, + (size_t) psDmaAlloc->ui64Size, + (dma_addr_t *)&psDmaAlloc->sBusAddr.uiAddr, + GFP_KERNEL); + PVR_LOG_RETURN_IF_FALSE((NULL != psDmaAlloc->pvVirtAddr), "dma_alloc_coherent() failed", PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES); +#else + #error "Provide OS implementation of DMA allocation"; +#endif + } + + return eError; +} + +/*! +****************************************************************************** + @Function SysDmaFreeMem + + @Description Free physically contiguous memory + + @Return void + ******************************************************************************/ +void SysDmaFreeMem(DMA_ALLOC *psDmaAlloc) +{ + if (psDmaAlloc && psDmaAlloc->pvVirtAddr) + { +#if defined(__linux__) + dma_free_coherent((struct device *)psDmaAlloc->pvOSDevice, + (size_t) psDmaAlloc->ui64Size, + psDmaAlloc->pvVirtAddr, + (dma_addr_t )psDmaAlloc->sBusAddr.uiAddr); +#else + #error "Provide OS implementation of DMA deallocation"; +#endif + } +} + +/*! +****************************************************************************** + @Function SysDmaRegisterForIoRemapping + + @Description Registers DMA_ALLOC for manual I/O remapping + + @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code + ******************************************************************************/ +PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psDmaAlloc) +{ + IMG_UINT32 ui32Idx; + PVRSRV_ERROR eError = PVRSRV_ERROR_TOO_FEW_BUFFERS; + + if (psDmaAlloc == NULL || + psDmaAlloc->ui64Size == 0 || + psDmaAlloc->sBusAddr.uiAddr == 0) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + else if (psDmaAlloc->pvVirtAddr == NULL) + { + /* Check if an I/O remap entry already exists for this request */ + for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) + { + if (gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr && + gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr <= psDmaAlloc->sBusAddr.uiAddr && + gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr + gsDmaIoRemapArray[ui32Idx].ui64Size >= psDmaAlloc->sBusAddr.uiAddr + psDmaAlloc->ui64Size) + { + PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].pvVirtAddr); + break; + } + } + + if (ui32Idx < DMA_MAX_IOREMAP_ENTRIES) + { + IMG_UINT64 ui64Offset; + ui64Offset = psDmaAlloc->sBusAddr.uiAddr - gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr; + psDmaAlloc->pvVirtAddr = gsDmaIoRemapArray[ui32Idx].pvVirtAddr + (uintptr_t)ui64Offset; + gsDmaIoRemapRef[ui32Idx] += 1; + return PVRSRV_OK; + } + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Check if there is a free I/O remap table entry for this request */ + for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) + { + if (gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr == 0) + { + PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].pvVirtAddr == NULL); + PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].ui64Size == 0); + break; + } + } + + if (ui32Idx >= DMA_MAX_IOREMAP_ENTRIES) + { + return eError; + } + + gsDmaIoRemapArray[ui32Idx].ui64Size = psDmaAlloc->ui64Size; + gsDmaIoRemapArray[ui32Idx].sBusAddr = psDmaAlloc->sBusAddr; + gsDmaIoRemapArray[ui32Idx].pvVirtAddr = psDmaAlloc->pvVirtAddr; + gsDmaIoRemapRef[ui32Idx] += 1; + + PVR_LOG(("DMA: register I/O remap: VA: 0x%p, PA: 0x%llx, Size: 0x%llx", + psDmaAlloc->pvVirtAddr, + psDmaAlloc->sBusAddr.uiAddr, + psDmaAlloc->ui64Size)); + + gbEnableDmaIoRemapping = IMG_TRUE; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + @Function SysDmaDeregisterForIoRemapping + + @Description Deregisters DMA_ALLOC from manual I/O remapping + + @Return void + ******************************************************************************/ +void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psDmaAlloc) +{ + IMG_UINT32 ui32Idx; + + if (psDmaAlloc == NULL || + psDmaAlloc->ui64Size == 0 || + psDmaAlloc->pvVirtAddr == NULL || + psDmaAlloc->sBusAddr.uiAddr == 0) + { + return; + } + + /* Remove specified entry from the list of I/O remap entries */ + for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) + { + if (gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr && + gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr <= psDmaAlloc->sBusAddr.uiAddr && + gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr + gsDmaIoRemapArray[ui32Idx].ui64Size >= psDmaAlloc->sBusAddr.uiAddr + psDmaAlloc->ui64Size) + { + if (! --gsDmaIoRemapRef[ui32Idx]) + { + PVR_LOG(("DMA: deregister I/O remap: VA: 0x%p, PA: 0x%llx, Size: 0x%llx", + gsDmaIoRemapArray[ui32Idx].pvVirtAddr, + gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr, + gsDmaIoRemapArray[ui32Idx].ui64Size)); + + gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr = 0; + gsDmaIoRemapArray[ui32Idx].pvVirtAddr = NULL; + gsDmaIoRemapArray[ui32Idx].ui64Size = 0; + } + + break; + } + } + + /* Check if no other I/O remap entries exists for remapping */ + for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) + { + if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr != NULL) + { + break; + } + } + + if (ui32Idx == DMA_MAX_IOREMAP_ENTRIES) + { + /* No entries found so disable remapping */ + gbEnableDmaIoRemapping = IMG_FALSE; + } +} + +/*! +****************************************************************************** + @Function SysDmaDevPAddrToCpuVAddr + + @Description Maps a DMA_ALLOC physical address to CPU virtual address + + @Return IMG_CPU_VIRTADDR on success. Otherwise, a NULL + ******************************************************************************/ +IMG_CPU_VIRTADDR SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size) +{ + IMG_CPU_VIRTADDR pvDMAVirtAddr = NULL; + DMA_ALLOC *psHeapDmaAlloc; + IMG_UINT32 ui32Idx; + + if (gbEnableDmaIoRemapping == IMG_FALSE) + { + return pvDMAVirtAddr; + } + + for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) + { + psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx]; + if (psHeapDmaAlloc->sBusAddr.uiAddr && uiAddr >= psHeapDmaAlloc->sBusAddr.uiAddr) + { + IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size; + IMG_UINT64 uiOffset = uiAddr - psHeapDmaAlloc->sBusAddr.uiAddr; + + if (uiOffset < uiSpan) + { + PVR_ASSERT((uiOffset+ui64Size-1) < uiSpan); + pvDMAVirtAddr = psHeapDmaAlloc->pvVirtAddr + uiOffset; + + PVR_DPF((PVR_DBG_MESSAGE, + "DMA: remap: PA: 0x%llx => VA: 0x%p", + uiAddr, pvDMAVirtAddr)); + + break; + } + } + } + + return pvDMAVirtAddr; +} + +/*! +****************************************************************************** + @Function SysDmaCpuVAddrToDevPAddr + + @Description Maps a DMA_ALLOC CPU virtual address to physical address + + @Return Non-zero value on success. Otherwise, a 0 + ******************************************************************************/ +IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr) +{ + IMG_UINT64 uiAddr = 0; + DMA_ALLOC *psHeapDmaAlloc; + IMG_UINT32 ui32Idx; + + if (gbEnableDmaIoRemapping == IMG_FALSE) + { + return uiAddr; + } + + for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) + { + psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx]; + if (psHeapDmaAlloc->pvVirtAddr && pvDMAVirtAddr >= psHeapDmaAlloc->pvVirtAddr) + { + IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size; + IMG_UINT64 uiOffset = pvDMAVirtAddr - psHeapDmaAlloc->pvVirtAddr; + + if (uiOffset < uiSpan) + { + uiAddr = psHeapDmaAlloc->sBusAddr.uiAddr + uiOffset; + + PVR_DPF((PVR_DBG_MESSAGE, + "DMA: remap: VA: 0x%p => PA: 0x%llx", + pvDMAVirtAddr, uiAddr)); + + break; + } + } + } + + return uiAddr; +} + +/****************************************************************************** + End of file (dma_support.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/dma_support.h b/drivers/gpu/drm/phytium/octopus/dma_support.h new file mode 100644 index 000000000000..46989d8641f9 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/dma_support.h @@ -0,0 +1,112 @@ +/*************************************************************************/ /*! +@File dma_support.h +@Title Device contiguous memory allocator and I/O re-mapper +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description This header provides a contiguous memory allocator API; mainly + used for allocating / ioremapping (DMA/PA <-> CPU/VA) +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef DMA_SUPPORT_H +#define DMA_SUPPORT_H + +#include "osfunc.h" +#include "pvrsrv.h" + +typedef struct _DMA_ALLOC_ +{ + IMG_UINT64 ui64Size; + IMG_CPU_VIRTADDR pvVirtAddr; + IMG_DEV_PHYADDR sBusAddr; + void *pvOSDevice; +} DMA_ALLOC; + +/*! +******************************************************************************* + @Function SysDmaAllocMem + @Description Allocates physically contiguous memory + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc); + +/*! +******************************************************************************* + @Function SysDmaFreeMem + @Description Free physically contiguous memory + @Return void +******************************************************************************/ +void SysDmaFreeMem(DMA_ALLOC *psCmaAlloc); + +/*! +******************************************************************************* + @Function SysDmaRegisterForIoRemapping + @Description Registers DMA_ALLOC for manual I/O remapping + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc); + +/*! +******************************************************************************* + @Function SysDmaDeregisterForIoRemapping + @Description Deregisters DMA_ALLOC from manual I/O remapping + @Return void +******************************************************************************/ +void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc); + +/*! +******************************************************************************* + @Function SysDmaDevPAddrToCpuVAddr + @Description Maps a DMA_ALLOC physical address to CPU virtual address + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +IMG_CPU_VIRTADDR +SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size); + +/*! +******************************************************************************* + @Function SysDmaCpuVAddrToDevPAddr + @Description Maps a DMA_ALLOC CPU virtual address to physical address + @Return Non-zero value on success. Otherwise, a 0 +******************************************************************************/ +IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr); + +#endif /* DMA_SUPPORT_H */ + +/****************************************************************************** + End of file (dma_support.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/env_connection.h b/drivers/gpu/drm/phytium/octopus/env_connection.h new file mode 100644 index 000000000000..90be0497cc02 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/env_connection.h @@ -0,0 +1,90 @@ +/*************************************************************************/ /*! +@File +@Title Server side connection management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Linux specific server side connection management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(ENV_CONNECTION_H) +#define ENV_CONNECTION_H + +#include +#include +#include + +#include "handle.h" +#include "pvr_debug.h" +#include "device.h" + +#if defined(SUPPORT_ION) +#include PVR_ANDROID_ION_HEADER +#include "ion_sys.h" +#include "allocmem.h" +#endif + +typedef struct _ENV_CONNECTION_PRIVATE_DATA_ +{ + struct file *psFile; + PVRSRV_DEVICE_NODE *psDevNode; +} ENV_CONNECTION_PRIVATE_DATA; + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) +#define ION_CLIENT_NAME_SIZE 50 + +typedef struct _ENV_ION_CONNECTION_DATA_ +{ + IMG_CHAR azIonClientName[ION_CLIENT_NAME_SIZE]; + struct ion_device *psIonDev; + struct ion_client *psIonClient; +} ENV_ION_CONNECTION_DATA; +#endif + +typedef struct _ENV_CONNECTION_DATA_ +{ + pid_t owner; + + struct file *psFile; + PVRSRV_DEVICE_NODE *psDevNode; + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + ENV_ION_CONNECTION_DATA *psIonData; +#endif +} ENV_CONNECTION_DATA; + +#endif /* !defined(ENV_CONNECTION_H) */ diff --git a/drivers/gpu/drm/phytium/octopus/event.c b/drivers/gpu/drm/phytium/octopus/event.c new file mode 100644 index 000000000000..2f00ce7858be --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/event.c @@ -0,0 +1,514 @@ +/*************************************************************************/ /*! +@File +@Title Event Object +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0) +#include +#endif +#include +#include +#include +#include +#include +#include + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "allocmem.h" +#include "event.h" +#include "pvr_debug.h" +#include "pvrsrv.h" +#include "pvr_bridge_k.h" + +#include "osfunc.h" + +/* Uncomment to enable event object stats that are useful for debugging. + * The stats can be gotten at any time (during lifetime of event object) + * using OSEventObjectDumpdebugInfo API */ +// #define LINUX_EVENT_OBJECT_STATS + + +typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG +{ + rwlock_t sLock; + /* Counts how many times event object was signalled i.e. how many times + * LinuxEventObjectSignal() was called on a given event object. + * Used for detecting pending signals. + * Note that this is in no way related to OS signals. */ + atomic_t sEventSignalCount; + struct list_head sList; +} PVRSRV_LINUX_EVENT_OBJECT_LIST; + + +typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG +{ + IMG_UINT32 ui32EventSignalCountPrevious; +#if defined(DEBUG) + IMG_UINT ui32Stats; +#endif + +#ifdef LINUX_EVENT_OBJECT_STATS + POS_LOCK hLock; + IMG_UINT32 ui32ScheduleAvoided; + IMG_UINT32 ui32ScheduleCalled; + IMG_UINT32 ui32ScheduleSleptFully; + IMG_UINT32 ui32ScheduleSleptPartially; + IMG_UINT32 ui32ScheduleReturnedImmediately; +#endif + wait_queue_head_t sWait; + struct list_head sList; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList; +} PVRSRV_LINUX_EVENT_OBJECT; + +/*! +****************************************************************************** + + @Function LinuxEventObjectListCreate + + @Description + + Linux wait object list creation + + @Output hOSEventKM : Pointer to the event object list handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList) +{ + PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList; + + psEvenObjectList = OSAllocMem(sizeof(*psEvenObjectList)); + if (psEvenObjectList == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + INIT_LIST_HEAD(&psEvenObjectList->sList); + + rwlock_init(&psEvenObjectList->sLock); + atomic_set(&psEvenObjectList->sEventSignalCount, 0); + + *phEventObjectList = (IMG_HANDLE *) psEvenObjectList; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function LinuxEventObjectListDestroy + + @Description + + Linux wait object list destruction + + @Input hOSEventKM : Event object list handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList) +{ + PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList; + + if (psEvenObjectList) + { + if (!list_empty(&psEvenObjectList->sList)) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty")); + return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; + } + OSFreeMem(psEvenObjectList); + /*not nulling pointer, copy on stack*/ + } + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function LinuxEventObjectDelete + + @Description + + Linux wait object removal + + @Input hOSEventObject : Event object handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject) +{ + if (hOSEventObject) + { + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList; + + write_lock_bh(&psLinuxEventObjectList->sLock); + list_del(&psLinuxEventObject->sList); + write_unlock_bh(&psLinuxEventObjectList->sLock); + +#ifdef LINUX_EVENT_OBJECT_STATS + OSLockDestroy(psLinuxEventObject->hLock); +#endif + +#if defined(DEBUG) +// PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectDelete: Event object waits: %u", psLinuxEventObject->ui32Stats)); +#endif + + OSFreeMem(psLinuxEventObject); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; + } + return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; +} + +/*! +****************************************************************************** + + @Function LinuxEventObjectAdd + + @Description + + Linux wait object addition + + @Input hOSEventObjectList : Event object list handle + @Output phOSEventObject : Pointer to the event object handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject) + { + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; + + /* allocate completion variable */ + psLinuxEventObject = OSAllocMem(sizeof(*psLinuxEventObject)); + if (psLinuxEventObject == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + INIT_LIST_HEAD(&psLinuxEventObject->sList); + + /* Start with the timestamp at which event object was added to the list */ + psLinuxEventObject->ui32EventSignalCountPrevious = atomic_read(&psLinuxEventObjectList->sEventSignalCount); + +#ifdef LINUX_EVENT_OBJECT_STATS + PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&psLinuxEventObject->hLock), "OSLockCreate"); + psLinuxEventObject->ui32ScheduleAvoided = 0; + psLinuxEventObject->ui32ScheduleCalled = 0; + psLinuxEventObject->ui32ScheduleSleptFully = 0; + psLinuxEventObject->ui32ScheduleSleptPartially = 0; + psLinuxEventObject->ui32ScheduleReturnedImmediately = 0; +#endif + +#if defined(DEBUG) + psLinuxEventObject->ui32Stats = 0; +#endif + init_waitqueue_head(&psLinuxEventObject->sWait); + + psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList; + + write_lock_bh(&psLinuxEventObjectList->sLock); + list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList); + write_unlock_bh(&psLinuxEventObjectList->sLock); + + *phOSEventObject = psLinuxEventObject; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function LinuxEventObjectSignal + + @Description + + Linux wait object signaling function + + @Input hOSEventObjectList : Event object list handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList) +{ + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; + struct list_head *psListEntry, *psListEntryTemp, *psList; + psList = &psLinuxEventObjectList->sList; + + /* Move the timestamp ahead for this call, so a potential "Wait" from any + * EventObject/s doesn't wait for the signal to occur before returning. Early + * setting/incrementing of timestamp reduces the window where a concurrent + * "Wait" call might block while "this" Signal call is being processed */ + atomic_inc(&psLinuxEventObjectList->sEventSignalCount); + + read_lock_bh(&psLinuxEventObjectList->sLock); + list_for_each_safe(psListEntry, psListEntryTemp, psList) + { + psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList); + wake_up_interruptible(&psLinuxEventObject->sWait); + } + read_unlock_bh(&psLinuxEventObjectList->sLock); + + return PVRSRV_OK; +} + +static void _TryToFreeze(void) +{ + /* if we reach zero it means that all of the threads called try_to_freeze */ + LinuxBridgeNumActiveKernelThreadsDecrement(); + + /* Returns true if the thread was frozen, should we do anything with this + * information? What do we return? Which one is the error case? */ + try_to_freeze(); + + LinuxBridgeNumActiveKernelThreadsIncrement(); +} + +void LinuxEventObjectDumpDebugInfo(IMG_HANDLE hOSEventObject) +{ +#ifdef LINUX_EVENT_OBJECT_STATS + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject; + + OSLockAcquire(psLinuxEventObject->hLock); + PVR_LOG(("%s: EvObj(%p) schedule: Avoided(%u) Called(%u) ReturnedImmediately(%u) SleptFully(%u) SleptPartially(%u)", + __func__, psLinuxEventObject, psLinuxEventObject->ui32ScheduleAvoided, + psLinuxEventObject->ui32ScheduleCalled, psLinuxEventObject->ui32ScheduleReturnedImmediately, + psLinuxEventObject->ui32ScheduleSleptFully, psLinuxEventObject->ui32ScheduleSleptPartially)); + OSLockRelease(psLinuxEventObject->hLock); +#else + PVR_LOG(("%s: LINUX_EVENT_OBJECT_STATS disabled!", __func__)); +#endif +} + +/*! +****************************************************************************** + + @Function LinuxEventObjectWait + + @Description + + Linux wait object routine + + @Input hOSEventObject : Event object handle + + @Input ui64Timeoutus : Time out value in usec + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, + IMG_UINT64 ui64Timeoutus, + IMG_BOOL bFreezable) +{ + IMG_UINT32 ui32EventSignalCount; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + IMG_UINT32 ui32Remainder; + long timeOutJiffies; +#ifdef LINUX_EVENT_OBJECT_STATS + long totalTimeoutJiffies; + IMG_BOOL bScheduleCalled = IMG_FALSE; +#endif + + DEFINE_WAIT(sWait); + + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList; + + /* Check if the driver is good shape */ + if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* usecs_to_jiffies only takes an uint. So if our timeout is bigger than an + * uint use the msec version. With such a long timeout we really don't need + * the high resolution of usecs. */ + if (ui64Timeoutus > 0xffffffffULL) + timeOutJiffies = msecs_to_jiffies(OSDivide64(ui64Timeoutus, 1000, &ui32Remainder)); + else + timeOutJiffies = usecs_to_jiffies(ui64Timeoutus); + +#ifdef LINUX_EVENT_OBJECT_STATS + totalTimeoutJiffies = timeOutJiffies; +#endif + + do + { + prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE); + ui32EventSignalCount = (IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount); + + if (psLinuxEventObject->ui32EventSignalCountPrevious != ui32EventSignalCount) + { + /* There is a pending event signal i.e. LinuxEventObjectSignal() + * was called on the event object since the last time we checked. + * Return without waiting. */ + break; + } + + if (signal_pending(current)) + { + /* There is an OS signal pending so return. + * This allows to kill/interrupt user space processes which + * are waiting on this event object. */ + break; + } + +#ifdef LINUX_EVENT_OBJECT_STATS + bScheduleCalled = IMG_TRUE; +#endif + timeOutJiffies = schedule_timeout(timeOutJiffies); + + if (bFreezable) + { + _TryToFreeze(); + } + +#if defined(DEBUG) + psLinuxEventObject->ui32Stats++; +#endif + + + } while (timeOutJiffies); + + finish_wait(&psLinuxEventObject->sWait, &sWait); + + psLinuxEventObject->ui32EventSignalCountPrevious = ui32EventSignalCount; + +#ifdef LINUX_EVENT_OBJECT_STATS + OSLockAcquire(psLinuxEventObject->hLock); + if (bScheduleCalled) + { + psLinuxEventObject->ui32ScheduleCalled++; + if (totalTimeoutJiffies == timeOutJiffies) + { + psLinuxEventObject->ui32ScheduleReturnedImmediately++; + } + else if (timeOutJiffies == 0) + { + psLinuxEventObject->ui32ScheduleSleptFully++; + } + else + { + psLinuxEventObject->ui32ScheduleSleptPartially++; + } + } + else + { + psLinuxEventObject->ui32ScheduleAvoided++; + } + OSLockRelease(psLinuxEventObject->hLock); +#endif + + if (signal_pending(current)) + { + return PVRSRV_ERROR_INTERRUPTED; + } + else + { + return timeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT; + } +} + +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + +PVRSRV_ERROR LinuxEventObjectWaitUntilSignalled(IMG_HANDLE hOSEventObject) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + DEFINE_WAIT(sWait); + + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = + (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = + psLinuxEventObject->psLinuxEventObjectList; + + /* Check if the driver is in good shape */ + if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE); + + if (psLinuxEventObject->ui32EventSignalCountPrevious != + (IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount)) + { + /* There is a pending signal, so return without waiting */ + goto finish; + } + + schedule(); + + _TryToFreeze(); + +finish: + finish_wait(&psLinuxEventObject->sWait, &sWait); + + psLinuxEventObject->ui32EventSignalCountPrevious = + (IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount); + + return PVRSRV_OK; +} + +#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ diff --git a/drivers/gpu/drm/phytium/octopus/event.h b/drivers/gpu/drm/phytium/octopus/event.h new file mode 100644 index 000000000000..712f78f3e96d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/event.h @@ -0,0 +1,54 @@ +/*************************************************************************/ /*! +@File +@Title Event Object +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList); +PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList); +PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject); +PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject); +PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList); +PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, + IMG_UINT64 ui64Timeoutus, + IMG_BOOL bFreezable); +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) +PVRSRV_ERROR LinuxEventObjectWaitUntilSignalled(IMG_HANDLE hOSEventObject); +#endif +void LinuxEventObjectDumpDebugInfo(IMG_HANDLE hOSEventObject); diff --git a/drivers/gpu/drm/phytium/octopus/fwload.c b/drivers/gpu/drm/phytium/octopus/fwload.c new file mode 100644 index 000000000000..aca3899774f7 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/fwload.c @@ -0,0 +1,258 @@ +/*************************************************************************/ /*! +@File +@Title Services firmware load and access routines for Linux +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include +#include + +#include "device.h" +#include "module_common.h" +#include "fwload.h" +#include "pvr_debug.h" +#include "srvkm.h" + +#if defined(RGX_FW_SIGNED) + +#include +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) +#include +#else +#define PKEY_ID_PKCS7 2 +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) */ + +#include "signfw.h" +#endif /* RGX_FW_SIGNED */ + +struct OS_FW_IMAGE_t +{ + const struct firmware *psFW; + size_t uSignatureSize; +}; + +#if defined(RGX_FW_SIGNED) + +static int OSCheckSignature(const struct FirmwareSignatureHeader *psHeader, size_t uSize) +{ + if (be32_to_cpu(psHeader->ui32SignatureLen) >= uSize - sizeof(*psHeader)) + { + return -EBADMSG; + } + + if (psHeader->ui8IDType != PKEY_ID_PKCS7) + { + return -ENOPKG; + } + + if (psHeader->ui8Algo != 0 || psHeader->ui8HashAlgo != 0 || + psHeader->ui8SignerLen != 0 || psHeader->ui8KeyIDLen != 0 || + psHeader->__ui8Padding[0] != 0 || psHeader->__ui8Padding[1] != 0 || + psHeader->__ui8Padding[2] != 0) + { + return -EBADMSG; + } + + return 0; +} + +bool OSVerifyFirmware(OS_FW_IMAGE *psFWImage) +{ + const struct firmware *psFW = psFWImage->psFW; + const u8 *pui8FWData = psFW->data; + size_t uFWSize = psFW->size; + uint32_t ui32MagicLen = sizeof(MODULE_SIG_STRING) - 1; + struct FirmwareSignatureHeader sHeader; + int err; + + if (uFWSize <= ui32MagicLen) + { + return false; + } + + /* + * Linux Kernel's sign-file utility is primarily intended for signing + * modules, and so appends the MODULE_SIG_STRING magic at the end of + * the signature. Only proceed with verification if this magic is found. + */ + if (memcmp(pui8FWData + uFWSize - ui32MagicLen, MODULE_SIG_STRING, ui32MagicLen) != 0) + { + return false; + } + + uFWSize -= ui32MagicLen; + if (uFWSize <= sizeof(sHeader)) + { + return false; + } + + /* + * After the magic, a header is placed which informs about the digest / + * crypto algorithm etc. Copy that header and ensure that it has valid + * contents (We only support RSA Crypto, SHA Hash, X509 certificate and + * PKCS#7 signature). + */ + memcpy(&sHeader, pui8FWData + (uFWSize - sizeof(sHeader)), sizeof(sHeader)); + if (OSCheckSignature(&sHeader, uFWSize) != 0) + { + return false; + } + + /* + * As all information is now extracted, we can go ahead and ask PKCS + * module to verify the sign. + */ + uFWSize -= be32_to_cpu(sHeader.ui32SignatureLen) + sizeof(sHeader); + err = verify_pkcs7_signature(pui8FWData, uFWSize, pui8FWData + uFWSize, + be32_to_cpu(sHeader.ui32SignatureLen), NULL, + VERIFYING_UNSPECIFIED_SIGNATURE, NULL, NULL); + if (err == 0) + { + psFWImage->uSignatureSize = psFW->size - uFWSize; + PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware Successfully Verified", + __func__)); + return true; + } + + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Verification Failed (%d)", + __func__, err)); + return false; +} + +#else /* defined(RGX_FW_SIGNED) */ + +inline bool OSVerifyFirmware(OS_FW_IMAGE *psFWImage) +{ + return true; +} + +#endif /* defined(RGX_FW_SIGNED) */ + +PVRSRV_ERROR +OSLoadFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, const IMG_CHAR *pszBVNCString, + bool (*pfnVerifyFirmware)(OS_FW_IMAGE*), OS_FW_IMAGE **ppsFWImage) +{ + const struct firmware *psFW = NULL; + OS_FW_IMAGE *psFWImage; + IMG_INT32 res; + PVRSRV_ERROR eError; + + res = request_firmware(&psFW, pszBVNCString, psDeviceNode->psDevConfig->pvOSDevice); + if (res != 0) + { + PVR_DPF((PVR_DBG_WARNING, "%s: request_firmware('%s') failed (%d)", + __func__, pszBVNCString, res)); + + release_firmware(psFW); + if (res == -ENOENT) + { + PVR_DPF((PVR_DBG_ERROR, "%s: request_firmware('%s') failed (%d) (ERROR_NOT_FOUND)", + __func__, pszBVNCString, res)); + eError = PVRSRV_ERROR_NOT_FOUND; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: request_firmware('%s') failed (%d) (ERROR_NOT_READY)", + __func__, pszBVNCString, res)); + eError = PVRSRV_ERROR_NOT_READY; + } + goto err_exit; + } + + psFWImage = OSAllocZMem(sizeof(*psFWImage)); + if (psFWImage == NULL) + { + PVR_DPF((PVR_DBG_WARNING, "%s: OSAllocZMem('%s') failed.", + __func__, pszBVNCString)); + + release_firmware(psFW); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_exit; + } + + psFWImage->psFW = psFW; + if (pfnVerifyFirmware != NULL && !pfnVerifyFirmware(psFWImage)) + { + release_firmware(psFW); + OSFreeMem(psFWImage); + eError = PVRSRV_ERROR_NOT_AUTHENTICATED; + goto err_exit; + } + + *ppsFWImage = psFWImage; + return PVRSRV_OK; + +err_exit: + *ppsFWImage = NULL; + return eError; +} + +void +OSUnloadFirmware(OS_FW_IMAGE *psFWImage) +{ + const struct firmware *psFW = psFWImage->psFW; + + release_firmware(psFW); + OSFreeMem(psFWImage); +} + +size_t +OSFirmwareSize(OS_FW_IMAGE *psFWImage) +{ + const struct firmware *psFW = psFWImage->psFW; + return psFW->size - psFWImage->uSignatureSize; +} + +const void * +OSFirmwareData(OS_FW_IMAGE *psFWImage) +{ + const struct firmware *psFW = psFWImage->psFW; + + return psFW->data; +} + +/****************************************************************************** + End of file (fwload.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/fwload.h b/drivers/gpu/drm/phytium/octopus/fwload.h new file mode 100644 index 000000000000..d149f74eac65 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/fwload.h @@ -0,0 +1,158 @@ +/*************************************************************************/ /*! +@File +@Title Services RGX OS Interface for loading the firmware +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description This file defines the OS interface through which the RGX + device initialisation code in the kernel/server will obtain + the RGX firmware binary image. The API is used during the + initialisation of an RGX device via the + PVRSRVCommonDeviceInitialise() + call sequence. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef FWLOAD_H +#define FWLOAD_H + +#include "img_defs.h" +#include "device_connection.h" +#include "device.h" + +/*! Opaque type handle defined and known to the OS layer implementation of this + * fwload.h OS API. This private data is allocated in the implementation of + * OSLoadFirmware() and contains whatever data and information needed to be + * able to acquire and return the firmware binary image to the Services + * kernel/server during initialisation. + * It is no longer required and may be freed when OSUnloadFirmware() is called. + */ +typedef struct OS_FW_IMAGE_t OS_FW_IMAGE; + +#if defined(__linux__) + +bool OSVerifyFirmware(OS_FW_IMAGE* psFWImage); + +#endif + +/*************************************************************************/ /*! +@Function OSLoadFirmware +@Description The OS implementation must load or acquire the firmware (FW) + image binary needed by the driver stack. + A handle to the common layer device node is given to identify + which device instance in the system is being initialised. The + BVNC string is also supplied so that the implementation knows + which FW image to retrieve since each FW image only supports one + GPU type/revision. + The calling server code supports multiple GPU types and revisions + and will detect the specific GPU type and revision before calling + this API. It will also have runtime configuration of the VZ mode, + hence this API must be able to retrieve different FW binary + images based on the pszBVNCString given. The purpose of the end + platform/system is key to understand which FW images must be + available to the kernel server. + On exit the implementation must return a pointer to some private + data it uses to hold the FW image information and data. It will + be passed onto later API calls by the kernel server code. + NULL should be returned if the FW image could not be retrieved. + The format of the BVNC string is as follows ([x] denotes + optional field): + "rgx.fw[.signed].B.V[p].N.C[.vz]" + The implementation must first try to load the FW identified + by the pszBVpNCString parameter. If this is not available then it + should drop back to retrieving the FW identified by the + pszBVNCString parameter. The fields in the string are: + B, V, N, C are all unsigned integer identifying type/revision. + [.signed] is present when RGX_FW_SIGNED=1 is defined in the + server build. + [p] denotes a provisional (pre-silicon) GPU configuration. + [.vz] is present when the kernel server is loaded on the HOST + of a virtualised platform. See the DriverMode server + AppHint for details. + +@Input psDeviceNode Device instance identifier. +@Input pszBVNCString Identifier string of the FW image to + be loaded/acquired in production driver. +@Input pfnVerifyFirmware Callback which checks validity of FW image. +@Output ppsFWImage Ptr to private data on success, + NULL otherwise. +@Return PVRSRV_ERROR PVRSRV_OK on success, + PVRSRV_ERROR_NOT_READY if filesystem is not + ready/initialised, + PVRSRV_ERROR_NOT_FOUND if no suitable FW + image could be found + PVRSRV_ERROR_OUT_OF_MEMORY if unable to alloc + memory for FW image + PVRSRV_ERROR_NOT_AUTHENTICATED if FW image + cannot be verified. +*/ /**************************************************************************/ +PVRSRV_ERROR OSLoadFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, + const IMG_CHAR *pszBVNCString, + bool (*pfnVerifyFirmware)(OS_FW_IMAGE*), + OS_FW_IMAGE **ppsFWImage); + +/*************************************************************************/ /*! +@Function OSFirmwareData +@Description This function returns a pointer to the start of the FW image + binary data held in memory. It must remain valid until + OSUnloadFirmware() is called. +@Input psFWImage Private data opaque handle +@Return void* Ptr to FW binary image to start on GPU. +*/ /**************************************************************************/ +const void* OSFirmwareData(OS_FW_IMAGE *psFWImage); + +/*************************************************************************/ /*! +@Function OSFirmwareSize +@Description This function returns the size of the FW image binary data. +@Input psFWImage Private data opaque handle +@Return size_t Size in bytes of the firmware binary image +*/ /**************************************************************************/ +size_t OSFirmwareSize(OS_FW_IMAGE *psFWImage); + +/*************************************************************************/ /*! +@Function OSUnloadFirmware +@Description This is called when the server has completed firmware + initialisation and no longer needs the private data, possibly + allocated by OSLoadFirmware(). +@Input psFWImage Private data opaque handle +*/ /**************************************************************************/ +void OSUnloadFirmware(OS_FW_IMAGE *psFWImage); + +#endif /* FWLOAD_H */ + +/****************************************************************************** + End of file (fwload.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/fwtrace_string.h b/drivers/gpu/drm/phytium/octopus/fwtrace_string.h new file mode 100644 index 000000000000..b0d4ab1b2561 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/fwtrace_string.h @@ -0,0 +1,52 @@ +/*************************************************************************/ /*! +@File fwtrace_string.h +@Title RGX Firmware trace strings for KM +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Platform Generic +@Description This file defines SFs tuple. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef KM_TRACE_STRING_H +#define KM_TRACE_STRING_H + +#include "rgx_fwif_sf.h" + +extern const RGXKM_STID_FMT SFs[]; +extern const IMG_UINT32 g_ui32SFsCount; + +#endif /* KM_TRACE_STRING_H */ diff --git a/drivers/gpu/drm/phytium/octopus/handle.c b/drivers/gpu/drm/phytium/octopus/handle.c new file mode 100644 index 000000000000..a8bb7da6112e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/handle.c @@ -0,0 +1,2300 @@ +/*************************************************************************/ /*! +@File +@Title Resource Handle Manager +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Provide resource handle management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +/* See handle.h for a description of the handle API. */ + +/* + * The implementation supports movable handle structures, allowing the address + * of a handle structure to change without having to fix up pointers in + * any of the handle structures. For example, the linked list mechanism + * used to link subhandles together uses handle array indices rather than + * pointers to the structures themselves. + */ + +#if defined(__linux__) +#include +#else +#include +#endif + +#include "img_defs.h" +#include "handle.h" +#include "handle_impl.h" +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvrsrv.h" + +#define HANDLE_HASH_TAB_INIT_SIZE 32 + +#define TEST_FLAG(v, f) BITMASK_HAS(v, f) +#define TEST_ALLOC_FLAG(psHandleData, f) BITMASK_HAS((psHandleData)->eFlag, f) + + +/* Linked list structure. Used for both the list head and list items */ +typedef struct _HANDLE_LIST_ +{ + IMG_HANDLE hPrev; + IMG_HANDLE hNext; + IMG_HANDLE hParent; +} HANDLE_LIST; + +typedef struct _HANDLE_DATA_ +{ + /* The handle that represents this structure */ + IMG_HANDLE hHandle; + + /* Handle type */ + PVRSRV_HANDLE_TYPE eType; + + /* Flags specified when the handle was allocated */ + PVRSRV_HANDLE_ALLOC_FLAG eFlag; + + /* Pointer to the data that the handle represents */ + void *pvData; + + /* + * Callback specified at handle allocation time to + * release/destroy/free the data represented by the + * handle when it's reference count reaches 0. This + * should always be NULL for subhandles. + */ + PFN_HANDLE_RELEASE pfnReleaseData; + + /* List head for subhandles of this handle */ + HANDLE_LIST sChildren; + + /* List entry for sibling subhandles */ + HANDLE_LIST sSiblings; + + /* Reference count. The pfnReleaseData callback gets called when the + * reference count hits zero + */ + IMG_UINT32 ui32RefCount; + +#if defined(PVRSRV_DEBUG_HANDLE_LOCK) + /* Store the handle base used for this handle, so we + * can later access the handle base lock (or check if + * it has been already acquired) + */ + PVRSRV_HANDLE_BASE *psBase; +#endif + +} HANDLE_DATA; + +struct _HANDLE_BASE_ +{ + /* Pointer to a handle implementations base structure */ + HANDLE_IMPL_BASE *psImplBase; + + /* + * Pointer to handle hash table. + * The hash table is used to do reverse lookups, converting data + * pointers to handles. + */ + HASH_TABLE *psHashTab; + + /* Type specific (connection/global/process) Lock handle */ + POS_LOCK hLock; + + /* Can be connection, process, global */ + PVRSRV_HANDLE_BASE_TYPE eType; +}; + +/* + * The key for the handle hash table is an array of three elements, the + * pointer to the resource, the resource type and the parent handle (or + * NULL if there is no parent). The eHandKey enumeration gives the + * array indices of the elements making up the key. + */ +enum eHandKey +{ + HAND_KEY_DATA = 0, + HAND_KEY_TYPE, + HAND_KEY_PARENT, + HAND_KEY_LEN /* Must be last item in list */ +}; + +/* HAND_KEY is the type of the hash table key */ +typedef uintptr_t HAND_KEY[HAND_KEY_LEN]; + +typedef struct FREE_HANDLE_DATA_TAG +{ + PVRSRV_HANDLE_BASE *psBase; + PVRSRV_HANDLE_TYPE eHandleFreeType; + /* timing data (ns) to release bridge lock upon the deadline */ + IMG_UINT64 ui64TimeStart; + IMG_UINT64 ui64MaxBridgeTime; +} FREE_HANDLE_DATA; + +typedef struct FREE_KERNEL_HANDLE_DATA_TAG +{ + PVRSRV_HANDLE_BASE *psBase; + HANDLE_DATA *psProcessHandleData; + IMG_HANDLE hKernelHandle; +} FREE_KERNEL_HANDLE_DATA; + +/* Stores a pointer to the function table of the handle back-end in use */ +static HANDLE_IMPL_FUNCTAB const *gpsHandleFuncs; + +static POS_LOCK gKernelHandleLock; +static IMG_BOOL gbLockInitialised = IMG_FALSE; + +void LockHandle(PVRSRV_HANDLE_BASE *psBase) +{ + OSLockAcquire(psBase->hLock); +} + +void UnlockHandle(PVRSRV_HANDLE_BASE *psBase) +{ + OSLockRelease(psBase->hLock); +} + +/* + * Kernel handle base structure. This is used for handles that are not + * allocated on behalf of a particular process. + */ +PVRSRV_HANDLE_BASE *gpsKernelHandleBase = NULL; + +/* Increase the reference count on the given handle. + * The handle lock must already be acquired. + * Returns: the reference count after the increment + */ +static inline IMG_UINT32 _HandleRef(HANDLE_DATA *psHandleData) +{ +#if defined(PVRSRV_DEBUG_HANDLE_LOCK) + if (!OSLockIsLocked(psHandleData->psBase->hLock)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__)); + OSDumpStack(); + } +#endif + psHandleData->ui32RefCount++; + return psHandleData->ui32RefCount; +} + +/* Decrease the reference count on the given handle. + * The handle lock must already be acquired. + * Returns: the reference count after the decrement + */ +static inline IMG_UINT32 _HandleUnref(HANDLE_DATA *psHandleData) +{ +#if defined(PVRSRV_DEBUG_HANDLE_LOCK) + if (!OSLockIsLocked(psHandleData->psBase->hLock)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__)); + OSDumpStack(); + } +#endif + PVR_ASSERT(psHandleData->ui32RefCount > 0); + psHandleData->ui32RefCount--; + + return psHandleData->ui32RefCount; +} + +#if defined(PVRSRV_NEED_PVR_DPF) +static const IMG_CHAR *HandleTypeToString(PVRSRV_HANDLE_TYPE eType) +{ + #define HANDLETYPE(x) \ + case PVRSRV_HANDLE_TYPE_##x: \ + return #x; + switch (eType) + { + #include "handle_types.h" + #undef HANDLETYPE + + default: + return "INVALID"; + } +} + +static const IMG_CHAR *HandleBaseTypeToString(PVRSRV_HANDLE_BASE_TYPE eType) +{ + #define HANDLEBASETYPE(x) \ + case PVRSRV_HANDLE_BASE_TYPE_##x: \ + return #x; + switch (eType) + { + HANDLEBASETYPE(CONNECTION); + HANDLEBASETYPE(PROCESS); + HANDLEBASETYPE(GLOBAL); + #undef HANDLEBASETYPE + + default: + return "INVALID"; + } +} +#endif + +/*! +******************************************************************************* + @Function GetHandleData + @Description Get the handle data structure for a given handle + @Input psBase - pointer to handle base structure + hHandle - handle from client + eType - handle type or PVRSRV_HANDLE_TYPE_NONE if the handle + type is not to be checked. + @Output ppsHandleData - pointer to a pointer to the handle data struct + @Return Error code or PVRSRV_OK +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(GetHandleData) +#endif +static INLINE +PVRSRV_ERROR GetHandleData(PVRSRV_HANDLE_BASE *psBase, + HANDLE_DATA **ppsHandleData, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType) +{ + HANDLE_DATA *psHandleData; + PVRSRV_ERROR eError; + + eError = gpsHandleFuncs->pfnGetHandleData(psBase->psImplBase, + hHandle, + (void **)&psHandleData); + PVR_RETURN_IF_ERROR(eError); + + /* + * Unless PVRSRV_HANDLE_TYPE_NONE was passed in to this function, + * check handle is of the correct type. + */ + if (unlikely(eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandleData->eType)) + { + PVR_DPF((PVR_DBG_ERROR, + "GetHandleData: Type mismatch. Lookup request: Handle %p, type: %s (%u) but stored handle is type %s (%u)", + hHandle, + HandleTypeToString(eType), + eType, + HandleTypeToString(psHandleData->eType), + psHandleData->eType)); + return PVRSRV_ERROR_HANDLE_TYPE_MISMATCH; + } + + /* Return the handle structure */ + *ppsHandleData = psHandleData; + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + @Function HandleListInit + @Description Initialise a linked list structure embedded in a handle + structure. + @Input hHandle - handle containing the linked list structure + psList - pointer to linked list structure + hParent - parent handle or NULL +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListInit) +#endif +static INLINE +void HandleListInit(IMG_HANDLE hHandle, HANDLE_LIST *psList, IMG_HANDLE hParent) +{ + psList->hPrev = hHandle; + psList->hNext = hHandle; + psList->hParent = hParent; +} + +/*! +******************************************************************************* + @Function InitParentList + @Description Initialise the children list head in a handle structure. + The children are the subhandles of this handle. + @Input psHandleData - pointer to handle data structure +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(InitParentList) +#endif +static INLINE +void InitParentList(HANDLE_DATA *psHandleData) +{ + IMG_HANDLE hParent = psHandleData->hHandle; + + HandleListInit(hParent, &psHandleData->sChildren, hParent); +} + +/*! +******************************************************************************* + + @Function InitChildEntry + @Description Initialise the child list entry in a handle structure. The list + entry is used to link together subhandles of a given handle. + @Input psHandleData - pointer to handle data structure +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(InitChildEntry) +#endif +static INLINE +void InitChildEntry(HANDLE_DATA *psHandleData) +{ + HandleListInit(psHandleData->hHandle, &psHandleData->sSiblings, NULL); +} + +/*! +******************************************************************************* + @Function HandleListIsEmpty + @Description Determine whether a given linked list is empty. + @Input hHandle - handle containing the list head + psList - pointer to the list head + @Return IMG_TRUE if the list is empty, IMG_FALSE if it isn't. +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListIsEmpty) +#endif +static INLINE +IMG_BOOL HandleListIsEmpty(IMG_HANDLE hHandle, HANDLE_LIST *psList) /* Instead of passing in the handle can we not just do (psList->hPrev == psList->hNext) ? IMG_TRUE : IMG_FALSE ??? */ +{ + IMG_BOOL bIsEmpty; + + bIsEmpty = (IMG_BOOL)(psList->hNext == hHandle); + +#ifdef DEBUG + { + IMG_BOOL bIsEmpty2; + + bIsEmpty2 = (IMG_BOOL)(psList->hPrev == hHandle); + PVR_ASSERT(bIsEmpty == bIsEmpty2); + } +#endif + + return bIsEmpty; +} + +#ifdef DEBUG +/*! +******************************************************************************* + @Function NoChildren + @Description Determine whether a handle has any subhandles + @Input psHandleData - pointer to handle data structure + @Return IMG_TRUE if the handle has no subhandles, IMG_FALSE if it does. +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(NoChildren) +#endif +static INLINE +IMG_BOOL NoChildren(HANDLE_DATA *psHandleData) +{ + PVR_ASSERT(psHandleData->sChildren.hParent == psHandleData->hHandle); + + return HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sChildren); +} + +/*! +******************************************************************************* + @Function NoParent + @Description Determine whether a handle is a subhandle + @Input psHandleData - pointer to handle data structure + @Return IMG_TRUE if the handle is not a subhandle, IMG_FALSE if it is. +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(NoParent) +#endif +static INLINE +IMG_BOOL NoParent(HANDLE_DATA *psHandleData) +{ + if (HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sSiblings)) + { + PVR_ASSERT(psHandleData->sSiblings.hParent == NULL); + + return IMG_TRUE; + } + + PVR_ASSERT(psHandleData->sSiblings.hParent != NULL); + return IMG_FALSE; +} +#endif /*DEBUG*/ + +/*! +******************************************************************************* + @Function ParentHandle + @Description Determine the parent of a handle + @Input psHandleData - pointer to handle data structure + @Return Parent handle, or NULL if the handle is not a subhandle. +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(ParentHandle) +#endif +static INLINE +IMG_HANDLE ParentHandle(HANDLE_DATA *psHandleData) +{ + return psHandleData->sSiblings.hParent; +} + +/* + * GetHandleListFromHandleAndOffset is used to generate either a + * pointer to the subhandle list head, or a pointer to the linked list + * structure of an item on a subhandle list. + * The list head is itself on the list, but is at a different offset + * in the handle structure to the linked list structure for items on + * the list. The two linked list structures are differentiated by + * the third parameter, containing the parent handle. The parent field + * in the list head structure references the handle structure that contains + * it. For items on the list, the parent field in the linked list structure + * references the parent handle, which will be different from the handle + * containing the linked list structure. + */ +#ifdef INLINE_IS_PRAGMA +#pragma inline(GetHandleListFromHandleAndOffset) +#endif +static INLINE +HANDLE_LIST *GetHandleListFromHandleAndOffset(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hEntry, + IMG_HANDLE hParent, + size_t uiParentOffset, + size_t uiEntryOffset) +{ + HANDLE_DATA *psHandleData = NULL; + + PVR_ASSERT(psBase != NULL); + + if (GetHandleData(psBase, &psHandleData, hEntry, + PVRSRV_HANDLE_TYPE_NONE) != PVRSRV_OK) + { + return NULL; + } + + if (hEntry == hParent) + { + return (HANDLE_LIST *)IMG_OFFSET_ADDR(psHandleData, uiParentOffset); + } + else + { + return (HANDLE_LIST *)IMG_OFFSET_ADDR(psHandleData, uiEntryOffset); + } +} + +/*! +******************************************************************************* + @Function HandleListInsertBefore + @Description Insert a handle before a handle currently on the list. + @Input hEntry - handle to be inserted after + psEntry - pointer to handle structure to be inserted after + uiParentOffset - offset to list head struct in handle structure + hNewEntry - handle to be inserted + psNewEntry - pointer to handle structure of item to be inserted + uiEntryOffset - offset of list item struct in handle structure + hParent - parent handle of hNewEntry + @Return Error code or PVRSRV_OK +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListInsertBefore) +#endif +static INLINE +PVRSRV_ERROR HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hEntry, + HANDLE_LIST *psEntry, + size_t uiParentOffset, + IMG_HANDLE hNewEntry, + HANDLE_LIST *psNewEntry, + size_t uiEntryOffset, + IMG_HANDLE hParent) +{ + HANDLE_LIST *psPrevEntry; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psEntry != NULL, "psEntry"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psNewEntry != NULL, "psNewEntry"); + + psPrevEntry = GetHandleListFromHandleAndOffset(psBase, + psEntry->hPrev, + hParent, + uiParentOffset, + uiEntryOffset); + if (psPrevEntry == NULL) + { + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } + + PVR_ASSERT(psNewEntry->hParent == NULL); + PVR_ASSERT(hEntry == psPrevEntry->hNext); + +#if defined(DEBUG) + { + HANDLE_LIST *psParentList; + + psParentList = GetHandleListFromHandleAndOffset(psBase, + hParent, + hParent, + uiParentOffset, + uiParentOffset); + PVR_ASSERT(psParentList && psParentList->hParent == hParent); + } +#endif /* defined(DEBUG) */ + + psNewEntry->hPrev = psEntry->hPrev; + psEntry->hPrev = hNewEntry; + + psNewEntry->hNext = hEntry; + psPrevEntry->hNext = hNewEntry; + + psNewEntry->hParent = hParent; + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + @Function AdoptChild + @Description Assign a subhandle to a handle + @Input psParentData - pointer to handle structure of parent handle + psChildData - pointer to handle structure of child subhandle + @Return Error code or PVRSRV_OK +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(AdoptChild) +#endif +static INLINE +PVRSRV_ERROR AdoptChild(PVRSRV_HANDLE_BASE *psBase, + HANDLE_DATA *psParentData, + HANDLE_DATA *psChildData) +{ + IMG_HANDLE hParent = psParentData->sChildren.hParent; + + PVR_ASSERT(hParent == psParentData->hHandle); + + return HandleListInsertBefore(psBase, + hParent, + &psParentData->sChildren, + offsetof(HANDLE_DATA, sChildren), + psChildData->hHandle, + &psChildData->sSiblings, + offsetof(HANDLE_DATA, sSiblings), + hParent); +} + +/*! +******************************************************************************* + @Function HandleListRemove + @Description Remove a handle from a list + @Input hEntry - handle to be removed + psEntry - pointer to handle structure of item to be removed + uiEntryOffset - offset of list item struct in handle structure + uiParentOffset - offset to list head struct in handle structure + @Return Error code or PVRSRV_OK +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListRemove) +#endif +static INLINE +PVRSRV_ERROR HandleListRemove(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hEntry, + HANDLE_LIST *psEntry, + size_t uiEntryOffset, + size_t uiParentOffset) +{ + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psEntry != NULL, "psEntry"); + + if (!HandleListIsEmpty(hEntry, psEntry)) + { + HANDLE_LIST *psPrev; + HANDLE_LIST *psNext; + + psPrev = GetHandleListFromHandleAndOffset(psBase, + psEntry->hPrev, + psEntry->hParent, + uiParentOffset, + uiEntryOffset); + if (psPrev == NULL) + { + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } + + psNext = GetHandleListFromHandleAndOffset(psBase, + psEntry->hNext, + psEntry->hParent, + uiParentOffset, + uiEntryOffset); + if (psNext == NULL) + { + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } + + /* + * The list head is on the list, and we don't want to + * remove it. + */ + PVR_ASSERT(psEntry->hParent != NULL); + + psPrev->hNext = psEntry->hNext; + psNext->hPrev = psEntry->hPrev; + + HandleListInit(hEntry, psEntry, NULL); + } + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + @Function UnlinkFromParent + @Description Remove a subhandle from its parents list + @Input psHandleData - pointer to handle data structure of child + subhandle. + @Return Error code or PVRSRV_OK +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(UnlinkFromParent) +#endif +static INLINE +PVRSRV_ERROR UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase, + HANDLE_DATA *psHandleData) +{ + return HandleListRemove(psBase, + psHandleData->hHandle, + &psHandleData->sSiblings, + offsetof(HANDLE_DATA, sSiblings), + offsetof(HANDLE_DATA, sChildren)); +} + +/*! +******************************************************************************* + @Function HandleListIterate + @Description Iterate over the items in a list + @Input psHead - pointer to list head + uiParentOffset - offset to list head struct in handle structure + uiEntryOffset - offset of list item struct in handle structure + pfnIterFunc - function to be called for each handle in the list + @Return Error code or PVRSRV_OK +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListIterate) +#endif +static INLINE +PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase, + HANDLE_LIST *psHead, + size_t uiParentOffset, + size_t uiEntryOffset, + PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE)) +{ + IMG_HANDLE hHandle = psHead->hNext; + IMG_HANDLE hParent = psHead->hParent; + IMG_HANDLE hNext; + + PVR_ASSERT(psHead->hParent != NULL); + + /* + * Follow the next chain from the list head until we reach + * the list head again, which signifies the end of the list. + */ + while (hHandle != hParent) + { + HANDLE_LIST *psEntry; + PVRSRV_ERROR eError; + + psEntry = GetHandleListFromHandleAndOffset(psBase, + hHandle, + hParent, + uiParentOffset, + uiEntryOffset); + if (psEntry == NULL) + { + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } + + PVR_ASSERT(psEntry->hParent == psHead->hParent); + + /* + * Get the next index now, in case the list item is + * modified by the iteration function. + */ + hNext = psEntry->hNext; + + eError = (*pfnIterFunc)(psBase, hHandle); + PVR_RETURN_IF_ERROR(eError); + + hHandle = hNext; + } + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + @Function IterateOverChildren + @Description Iterate over the subhandles of a parent handle + @Input psParentData - pointer to parent handle structure + pfnIterFunc - function to be called for each subhandle + @Return Error code or PVRSRV_OK +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(IterateOverChildren) +#endif +static INLINE +PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase, + HANDLE_DATA *psParentData, + PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE)) +{ + return HandleListIterate(psBase, + &psParentData->sChildren, + offsetof(HANDLE_DATA, sChildren), + offsetof(HANDLE_DATA, sSiblings), + pfnIterFunc); +} + +/*! +******************************************************************************* + @Function ParentIfPrivate + @Description Return the parent handle if the handle was allocated with + PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE, else return NULL. + @Input psHandleData - pointer to handle data structure + @Return Parent handle or NULL +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(ParentIfPrivate) +#endif +static INLINE +IMG_HANDLE ParentIfPrivate(HANDLE_DATA *psHandleData) +{ + return TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? + ParentHandle(psHandleData) : NULL; +} + +/*! +******************************************************************************* + @Function InitKey + @Description Initialise a hash table key for the current process + @Input aKey - pointer to key + psBase - pointer to handle base structure + pvData - pointer to the resource the handle represents + eType - type of resource +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(InitKey) +#endif +static INLINE +void InitKey(HAND_KEY aKey, + PVRSRV_HANDLE_BASE *psBase, + void *pvData, + PVRSRV_HANDLE_TYPE eType, + IMG_HANDLE hParent) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + + aKey[HAND_KEY_DATA] = (uintptr_t)pvData; + aKey[HAND_KEY_TYPE] = (uintptr_t)eType; + aKey[HAND_KEY_PARENT] = (uintptr_t)hParent; +} + +static PVRSRV_ERROR FreeHandleWrapper(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle); + +/*! +******************************************************************************* + @Function FreeHandle + @Description Free a handle data structure. + @Input psBase - Pointer to handle base structure + hHandle - Handle to be freed + eType - Type of the handle to be freed + ppvData - Location for data associated with the freed handle + @Output ppvData - Points to the data associated with the freed handle + @Return PVRSRV_OK or PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR FreeHandle(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType, + void **ppvData) +{ + HANDLE_DATA *psHandleData = NULL; + HANDLE_DATA *psReleasedHandleData; + PVRSRV_ERROR eError; + + eError = GetHandleData(psBase, &psHandleData, hHandle, eType); + PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); + + if (_HandleUnref(psHandleData) > 0) + { + /* this handle still has references so do not destroy it + * or the underlying object yet + */ + return PVRSRV_OK; + } + + /* Call the release data callback for each reference on the handle */ + if (psHandleData->pfnReleaseData != NULL) + { + eError = psHandleData->pfnReleaseData(psHandleData->pvData); + if (eError == PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: " + "Got retry while calling release data callback for %p (type = %d)", + __func__, + hHandle, + (IMG_UINT32)psHandleData->eType)); + + /* the caller should retry, so retain a reference on the handle */ + _HandleRef(psHandleData); + + return eError; + } + else if (eError != PVRSRV_OK) + { + return eError; + } + } + + if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + HAND_KEY aKey; + IMG_HANDLE hRemovedHandle; + + InitKey(aKey, psBase, psHandleData->pvData, psHandleData->eType, ParentIfPrivate(psHandleData)); + + hRemovedHandle = (IMG_HANDLE)HASH_Remove_Extended(psBase->psHashTab, aKey); + + PVR_ASSERT(hRemovedHandle != NULL); + PVR_ASSERT(hRemovedHandle == psHandleData->hHandle); + PVR_UNREFERENCED_PARAMETER(hRemovedHandle); + } + + eError = UnlinkFromParent(psBase, psHandleData); + PVR_LOG_RETURN_IF_ERROR(eError, "UnlinkFromParent"); + + /* Free children */ + eError = IterateOverChildren(psBase, psHandleData, FreeHandleWrapper); + PVR_LOG_RETURN_IF_ERROR(eError, "IterateOverChildren"); + + eError = gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase, + psHandleData->hHandle, + (void **)&psReleasedHandleData); + if (unlikely(eError == PVRSRV_OK)) + { + PVR_ASSERT(psReleasedHandleData == psHandleData); + } + + if (ppvData) + { + *ppvData = psHandleData->pvData; + } + + OSFreeMem(psHandleData); + + return eError; +} + +static PVRSRV_ERROR FreeHandleWrapper(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle) +{ + return FreeHandle(psBase, hHandle, PVRSRV_HANDLE_TYPE_NONE, NULL); +} + +/*! +******************************************************************************* + @Function FindHandle + @Description Find handle corresponding to a resource pointer + @Input psBase - pointer to handle base structure + pvData - pointer to resource to be associated with the handle + eType - the type of resource + @Return The handle, or NULL if not found +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(FindHandle) +#endif +static INLINE +IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase, + void *pvData, + PVRSRV_HANDLE_TYPE eType, + IMG_HANDLE hParent) +{ + HAND_KEY aKey; + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + InitKey(aKey, psBase, pvData, eType, hParent); + + return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey); +} + +/*! +******************************************************************************* + @Function AllocHandle + @Description Allocate a new handle + @Input phHandle - location for new handle + pvData - pointer to resource to be associated with the handle + eType - the type of resource + hParent - parent handle or NULL + pfnReleaseData - Function to release resource at handle release + time + @Output phHandle - points to new handle + @Return Error code or PVRSRV_OK +******************************************************************************/ +static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE *phHandle, + void *pvData, + PVRSRV_HANDLE_TYPE eType, + PVRSRV_HANDLE_ALLOC_FLAG eFlag, + IMG_HANDLE hParent, + PFN_HANDLE_RELEASE pfnReleaseData) +{ + HANDLE_DATA *psNewHandleData; + IMG_HANDLE hHandle; + PVRSRV_ERROR eError; + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(psBase != NULL && psBase->psHashTab != NULL); + PVR_ASSERT(gpsHandleFuncs); + + if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + /* Handle must not already exist */ + PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == NULL); + } + + psNewHandleData = OSAllocZMem(sizeof(*psNewHandleData)); + PVR_LOG_RETURN_IF_NOMEM(psNewHandleData, "OSAllocZMem"); + + eError = gpsHandleFuncs->pfnAcquireHandle(psBase->psImplBase, &hHandle, + psNewHandleData); + PVR_LOG_GOTO_IF_ERROR(eError, "pfnAcquireHandle", + ErrorFreeHandleData); + + /* + * If a data pointer can be associated with multiple handles, we + * don't put the handle in the hash table, as the data pointer + * may not map to a unique handle + */ + if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + HAND_KEY aKey; + + /* Initialise hash key */ + InitKey(aKey, psBase, pvData, eType, hParent); + + /* Put the new handle in the hash table */ + eError = HASH_Insert_Extended(psBase->psHashTab, aKey, (uintptr_t)hHandle) ? + PVRSRV_OK : PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; + PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "couldn't add handle to hash table", + ErrorReleaseHandle); + } + + psNewHandleData->hHandle = hHandle; + psNewHandleData->eType = eType; + psNewHandleData->eFlag = eFlag; + psNewHandleData->pvData = pvData; + psNewHandleData->pfnReleaseData = pfnReleaseData; + psNewHandleData->ui32RefCount = 1; + + InitParentList(psNewHandleData); +#if defined(DEBUG) + PVR_ASSERT(NoChildren(psNewHandleData)); +#endif + + InitChildEntry(psNewHandleData); +#if defined(DEBUG) + PVR_ASSERT(NoParent(psNewHandleData)); +#endif + +#if defined(PVRSRV_DEBUG_HANDLE_LOCK) + psNewHandleData->psBase = psBase; +#endif + + /* Return the new handle to the client */ + *phHandle = psNewHandleData->hHandle; + + return PVRSRV_OK; + +ErrorReleaseHandle: + (void)gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase, hHandle, NULL); + +ErrorFreeHandleData: + OSFreeMem(psNewHandleData); + + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVAllocHandle + @Description Allocate a handle + @Input psBase - pointer to handle base structure + pvData - pointer to resource to be associated with the handle + eType - the type of resource + pfnReleaseData - Function to release resource at handle release + time + @Output phHandle - points to new handle + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE *phHandle, + void *pvData, + PVRSRV_HANDLE_TYPE eType, + PVRSRV_HANDLE_ALLOC_FLAG eFlag, + PFN_HANDLE_RELEASE pfnReleaseData) +{ + PVRSRV_ERROR eError; + + LockHandle(psBase); + eError = PVRSRVAllocHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, pfnReleaseData); + UnlockHandle(psBase); + + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVAllocHandleUnlocked + @Description Allocate a handle without acquiring/releasing the handle lock. + The function assumes you hold the lock when called. + @Input phHandle - location for new handle + pvData - pointer to resource to be associated with the handle + eType - the type of resource + pfnReleaseData - Function to release resource at handle release + time + @Output phHandle - points to new handle + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE *phHandle, + void *pvData, + PVRSRV_HANDLE_TYPE eType, + PVRSRV_HANDLE_ALLOC_FLAG eFlag, + PFN_HANDLE_RELEASE pfnReleaseData) +{ + *phHandle = NULL; + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pfnReleaseData != NULL, "pfnReleaseData"); + + return AllocHandle(psBase, phHandle, pvData, eType, eFlag, NULL, pfnReleaseData); +} + +/*! +******************************************************************************* + @Function PVRSRVAllocSubHandle + @Description Allocate a subhandle + @Input pvData - pointer to resource to be associated with the subhandle + eType - the type of resource + hParent - parent handle + @Output phHandle - points to new subhandle + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE *phHandle, + void *pvData, + PVRSRV_HANDLE_TYPE eType, + PVRSRV_HANDLE_ALLOC_FLAG eFlag, + IMG_HANDLE hParent) +{ + PVRSRV_ERROR eError; + + LockHandle(psBase); + eError = PVRSRVAllocSubHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, hParent); + UnlockHandle(psBase); + + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVAllocSubHandleUnlocked + @Description Allocate a subhandle without acquiring/releasing the handle + lock. The function assumes you hold the lock when called. + @Input pvData - pointer to resource to be associated with the subhandle + eType - the type of resource + hParent - parent handle + @Output phHandle - points to new subhandle + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE *phHandle, + void *pvData, + PVRSRV_HANDLE_TYPE eType, + PVRSRV_HANDLE_ALLOC_FLAG eFlag, + IMG_HANDLE hParent) +{ + HANDLE_DATA *psPHandleData = NULL; + HANDLE_DATA *psCHandleData = NULL; + IMG_HANDLE hParentKey; + IMG_HANDLE hHandle; + PVRSRV_ERROR eError; + + *phHandle = NULL; + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_GOTO_IF_INVALID_PARAM(psBase, eError, Exit); + + hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? hParent : NULL; + + /* Lookup the parent handle */ + eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE); + PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "failed to get parent handle structure", + Exit); + + eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey, NULL); + PVR_GOTO_IF_ERROR(eError, Exit); + + eError = GetHandleData(psBase, &psCHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE); + /* If we were able to allocate the handle then there should be no reason why we + * can't also get it's handle structure. Otherwise something has gone badly wrong. + */ + PVR_ASSERT(eError == PVRSRV_OK); + PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "Failed to get parent handle structure", + ExitFreeHandle); + + /* + * Get the parent handle structure again, in case the handle + * structure has moved (depending on the implementation + * of AllocHandle). + */ + eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE); + PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "failed to get parent handle structure", + ExitFreeHandle); + + eError = AdoptChild(psBase, psPHandleData, psCHandleData); + PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "parent handle failed to adopt subhandle", + ExitFreeHandle); + + *phHandle = hHandle; + + return PVRSRV_OK; + +ExitFreeHandle: + (void) FreeHandle(psBase, hHandle, eType, NULL); +Exit: + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVFindHandle + @Description Find handle corresponding to a resource pointer + @Input pvData - pointer to resource to be associated with the handle + eType - the type of resource + @Output phHandle - points to returned handle + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE *phHandle, + void *pvData, + PVRSRV_HANDLE_TYPE eType) +{ + PVRSRV_ERROR eError; + + LockHandle(psBase); + eError = PVRSRVFindHandleUnlocked(psBase, phHandle, pvData, eType); + UnlockHandle(psBase); + + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVFindHandleUnlocked + @Description Find handle corresponding to a resource pointer without + acquiring/releasing the handle lock. The function assumes you + hold the lock when called. + @Input pvData - pointer to resource to be associated with the handle + eType - the type of resource + @Output phHandle - points to the returned handle + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE *phHandle, + void *pvData, + PVRSRV_HANDLE_TYPE eType) +{ + IMG_HANDLE hHandle; + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + + /* See if there is a handle for this data pointer */ + hHandle = FindHandle(psBase, pvData, eType, NULL); + if (hHandle == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error finding handle. Type %u", + __func__, + eType)); + + return PVRSRV_ERROR_HANDLE_NOT_FOUND; + } + + *phHandle = hHandle; + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + @Function PVRSRVLookupHandle + @Description Lookup the data pointer corresponding to a handle + @Input hHandle - handle from client + eType - handle type + bRef - If TRUE, a reference will be added on the handle if the + lookup is successful. + @Output ppvData - points to the return data pointer + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, + void **ppvData, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType, + IMG_BOOL bRef) +{ + PVRSRV_ERROR eError; + + LockHandle(psBase); + eError = PVRSRVLookupHandleUnlocked(psBase, ppvData, hHandle, eType, bRef); + UnlockHandle(psBase); + + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVLookupHandleUnlocked + @Description Lookup the data pointer corresponding to a handle without + acquiring/releasing the handle lock. The function assumes you + hold the lock when called. + @Input hHandle - handle from client + eType - handle type + bRef - If TRUE, a reference will be added on the handle if the + lookup is successful. + @Output ppvData - points to the returned data pointer + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, + void **ppvData, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType, + IMG_BOOL bRef) +{ + HANDLE_DATA *psHandleData = NULL; + PVRSRV_ERROR eError; + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + + eError = GetHandleData(psBase, &psHandleData, hHandle, eType); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error looking up handle (%s) for base %p of type %s. Handle %p, type %s", + __func__, + PVRSRVGetErrorString(eError), + psBase, + HandleBaseTypeToString(psBase->eType), + (void*) hHandle, + HandleTypeToString(eType))); +#if defined(DEBUG) || defined(PVRSRV_NEED_PVR_DPF) + OSDumpStack(); +#endif + return eError; + } + + if (psHandleData->ui32RefCount == 0) + { + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } + + if (bRef) + { + _HandleRef(psHandleData); + } + + *ppvData = psHandleData->pvData; + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + @Function PVRSRVLookupSubHandle + @Description Lookup the data pointer corresponding to a subhandle + @Input hHandle - handle from client + eType - handle type + hAncestor - ancestor handle + @Output ppvData - points to the returned data pointer + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, + void **ppvData, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType, + IMG_HANDLE hAncestor) +{ + HANDLE_DATA *psPHandleData = NULL; + HANDLE_DATA *psCHandleData = NULL; + PVRSRV_ERROR eError; + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + + LockHandle(psBase); + + eError = GetHandleData(psBase, &psCHandleData, hHandle, eType); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error looking up subhandle (%s). Handle %p, type %u", + __func__, + PVRSRVGetErrorString(eError), + (void*) hHandle, + eType)); + OSDumpStack(); + goto ExitUnlock; + } + + /* Look for hAncestor among the handle's ancestors */ + for (psPHandleData = psCHandleData; ParentHandle(psPHandleData) != hAncestor; ) + { + eError = GetHandleData(psBase, &psPHandleData, ParentHandle(psPHandleData), PVRSRV_HANDLE_TYPE_NONE); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "GetHandleData"); + eError = PVRSRV_ERROR_INVALID_SUBHANDLE; + goto ExitUnlock; + } + } + + *ppvData = psCHandleData->pvData; + + eError = PVRSRV_OK; + +ExitUnlock: + UnlockHandle(psBase); + + return eError; +} + + +/*! +******************************************************************************* + @Function PVRSRVReleaseHandle + @Description Release a handle that is no longer needed + @Input hHandle - handle from client + eType - handle type + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType) +{ + PVRSRV_ERROR eError; + + LockHandle(psBase); + eError = PVRSRVReleaseHandleUnlocked(psBase, hHandle, eType); + UnlockHandle(psBase); + + return eError; +} + + +/*! +******************************************************************************* + @Function PVRSRVReleaseHandleUnlocked + @Description Release a handle that is no longer needed without + acquiring/releasing the handle lock. The function assumes you + hold the lock when called. + @Input hHandle - handle from client + eType - handle type + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType) +{ + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + + return FreeHandle(psBase, hHandle, eType, NULL); +} + +/*! +******************************************************************************* + @Function PVRSRVPurgeHandles + @Description Purge handles for a given handle base + @Input psBase - pointer to handle base structure + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + + LockHandle(psBase); + eError = gpsHandleFuncs->pfnPurgeHandles(psBase->psImplBase); + UnlockHandle(psBase); + + return eError; +} + +static PVRSRV_ERROR HandleUnrefAndMaybeMarkForFree(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType); + +static PVRSRV_ERROR HandleUnrefAndMaybeMarkForFreeWrapper(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle) +{ + return HandleUnrefAndMaybeMarkForFree(psBase, hHandle, PVRSRV_HANDLE_TYPE_NONE); +} + +static PVRSRV_ERROR HandleUnrefAndMaybeMarkForFree(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType) +{ + HANDLE_DATA *psHandleData = NULL; + PVRSRV_ERROR eError; + + eError = GetHandleData(psBase, &psHandleData, hHandle, eType); + PVR_RETURN_IF_ERROR(eError); + + if (psHandleData->ui32RefCount == 0) + { + /* the handle is already in the destruction phase + * i.e. its refcount has already reached 0 + */ + return PVRSRV_OK; + } + + if (_HandleUnref(psHandleData) > 0) + { + /* this handle still has references so do not destroy it + * or the underlying object yet + */ + return PVRSRV_ERROR_OBJECT_STILL_REFERENCED; + } + + /* Prepare children for destruction */ + eError = IterateOverChildren(psBase, psHandleData, + HandleUnrefAndMaybeMarkForFreeWrapper); + PVR_LOG_RETURN_IF_ERROR(eError, "IterateOverChildren->HandleUnrefAndMaybeMarkForFree"); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR HandleFreePrivData(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType); + +static PVRSRV_ERROR HandleFreePrivDataWrapper(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle) +{ + return HandleFreePrivData(psBase, hHandle, PVRSRV_HANDLE_TYPE_NONE); +} + +static PVRSRV_ERROR HandleFreePrivData(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType) +{ + HANDLE_DATA *psHandleData = NULL; + PVRSRV_ERROR eError; + + eError = GetHandleData(psBase, &psHandleData, hHandle, eType); + PVR_RETURN_IF_ERROR(eError); + + /* Call the release data callback for each reference on the handle */ + if (psHandleData->pfnReleaseData != NULL) + { + eError = psHandleData->pfnReleaseData(psHandleData->pvData); + if (eError == PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_MESSAGE, + "FreeHandle: " + "Got retry while calling release data callback for %p (type = %d)", + hHandle, + (IMG_UINT32)psHandleData->eType)); + + return eError; + } + else if (eError != PVRSRV_OK) + { + return eError; + } + + /* we don't need this so make sure it's not called on + * the pvData for the second time + */ + psHandleData->pfnReleaseData = NULL; + } + + /* Free children's data */ + eError = IterateOverChildren(psBase, psHandleData, + HandleFreePrivDataWrapper); + PVR_LOG_RETURN_IF_ERROR(eError, "IterateOverChildren->HandleFreePrivData"); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR HandleFreeDestroy(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType); + +static PVRSRV_ERROR HandleFreeDestroyWrapper(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle) +{ + return HandleFreeDestroy(psBase, hHandle, PVRSRV_HANDLE_TYPE_NONE); +} + +static PVRSRV_ERROR HandleFreeDestroy(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType) +{ + HANDLE_DATA *psHandleData = NULL; + HANDLE_DATA *psReleasedHandleData; + PVRSRV_ERROR eError; + + eError = GetHandleData(psBase, &psHandleData, hHandle, eType); + PVR_RETURN_IF_ERROR(eError); + + eError = UnlinkFromParent(psBase, psHandleData); + PVR_LOG_RETURN_IF_ERROR(eError, "UnlinkFromParent"); + + if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + HAND_KEY aKey; + IMG_HANDLE hRemovedHandle; + + InitKey(aKey, psBase, psHandleData->pvData, psHandleData->eType, + ParentIfPrivate(psHandleData)); + + hRemovedHandle = (IMG_HANDLE) HASH_Remove_Extended(psBase->psHashTab, + aKey); + + PVR_ASSERT(hRemovedHandle != NULL); + PVR_ASSERT(hRemovedHandle == psHandleData->hHandle); + PVR_UNREFERENCED_PARAMETER(hRemovedHandle); + } + + /* Free children */ + eError = IterateOverChildren(psBase, psHandleData, HandleFreeDestroyWrapper); + PVR_LOG_RETURN_IF_ERROR(eError, "IterateOverChildren->HandleFreeDestroy"); + + eError = gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase, + psHandleData->hHandle, + (void **)&psReleasedHandleData); + OSFreeMem(psHandleData); + PVR_LOG_RETURN_IF_ERROR(eError, "pfnReleaseHandle"); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVReleaseHandleStagedUnlock(PVRSRV_HANDLE_BASE *psBase, + IMG_HANDLE hHandle, + PVRSRV_HANDLE_TYPE eType) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_FALSE(psBase != NULL, "psBase invalid", + PVRSRV_ERROR_INVALID_PARAMS); + + eError = HandleUnrefAndMaybeMarkForFree(psBase, hHandle, eType); + if (eError == PVRSRV_ERROR_OBJECT_STILL_REFERENCED) + { + return PVRSRV_OK; + } + else if (eError != PVRSRV_OK) + { + return eError; + } + + UnlockHandle(psBase); + + eError = HandleFreePrivData(psBase, hHandle, eType); + if (eError != PVRSRV_OK) + { + LockHandle(psBase); + return eError; + } + + LockHandle(psBase); + + return HandleFreeDestroy(psBase, hHandle, eType); +} + +/*! +******************************************************************************* + @Function PVRSRVAllocHandleBase + @Description Allocate a handle base structure for a process + @Input eType - handle type + @Output ppsBase - points to handle base structure pointer + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase, + PVRSRV_HANDLE_BASE_TYPE eType) +{ + PVRSRV_HANDLE_BASE *psBase; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_FALSE(gpsHandleFuncs != NULL, "handle management not initialised", + PVRSRV_ERROR_NOT_READY); + PVR_LOG_RETURN_IF_INVALID_PARAM(ppsBase != NULL, "ppsBase"); + + psBase = OSAllocZMem(sizeof(*psBase)); + PVR_LOG_RETURN_IF_NOMEM(psBase, "psBase"); + + eError = OSLockCreate(&psBase->hLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", ErrorFreeHandleBase); + + psBase->eType = eType; + + LockHandle(psBase); + + eError = gpsHandleFuncs->pfnCreateHandleBase(&psBase->psImplBase); + PVR_GOTO_IF_ERROR(eError, ErrorUnlock); + + psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE, + sizeof(HAND_KEY), + HASH_Func_Default, + HASH_Key_Comp_Default); + PVR_LOG_GOTO_IF_FALSE(psBase->psHashTab != NULL, "couldn't create data pointer" + " hash table", ErrorDestroyHandleBase); + + *ppsBase = psBase; + + UnlockHandle(psBase); + + return PVRSRV_OK; + +ErrorDestroyHandleBase: + (void)gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase); + +ErrorUnlock: + UnlockHandle(psBase); + OSLockDestroy(psBase->hLock); + +ErrorFreeHandleBase: + OSFreeMem(psBase); + + return eError; +} + +#if defined(DEBUG) +typedef struct _COUNT_HANDLE_DATA_ +{ + PVRSRV_HANDLE_BASE *psBase; + IMG_UINT32 uiHandleDataCount; +} COUNT_HANDLE_DATA; + +/* Used to count the number of handles that have data associated with them */ +static PVRSRV_ERROR CountHandleDataWrapper(IMG_HANDLE hHandle, void *pvData) +{ + COUNT_HANDLE_DATA *psData = (COUNT_HANDLE_DATA *)pvData; + HANDLE_DATA *psHandleData = NULL; + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psData != NULL, "psData"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psData->psBase != NULL, "psData->psBase"); + + eError = GetHandleData(psData->psBase, + &psHandleData, + hHandle, + PVRSRV_HANDLE_TYPE_NONE); + PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); + + if (psHandleData != NULL) + { + psData->uiHandleDataCount++; + } + + return PVRSRV_OK; +} + +/* Print a handle in the handle base. Used with the iterator callback. */ +static PVRSRV_ERROR ListHandlesInBase(IMG_HANDLE hHandle, void *pvData) +{ + PVRSRV_HANDLE_BASE *psBase = (PVRSRV_HANDLE_BASE*) pvData; + HANDLE_DATA *psHandleData = NULL; + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); + + eError = GetHandleData(psBase, + &psHandleData, + hHandle, + PVRSRV_HANDLE_TYPE_NONE); + PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); + + if (psHandleData != NULL) + { + PVR_DPF((PVR_DBG_WARNING, " Handle: %6u, Refs: %3u, Type: %s (%u), pvData<%p>", + (IMG_UINT32) (uintptr_t) psHandleData->hHandle, + psHandleData->ui32RefCount, + HandleTypeToString(psHandleData->eType), + psHandleData->eType, + psHandleData->pvData)); + } + + return PVRSRV_OK; +} + +#endif /* defined(DEBUG) */ + +static INLINE IMG_BOOL _CheckIfMaxTimeExpired(IMG_UINT64 ui64TimeStart, IMG_UINT64 ui64MaxBridgeTime) +{ + /* unsigned arithmetic is well defined so this will wrap around correctly */ + return (OSClockns64() - ui64TimeStart) >= ui64MaxBridgeTime; +} + +static PVRSRV_ERROR FreeKernelHandlesWrapperIterKernel(IMG_HANDLE hHandle, void *pvData) +{ + FREE_KERNEL_HANDLE_DATA *psData = (FREE_KERNEL_HANDLE_DATA *)pvData; + HANDLE_DATA *psKernelHandleData = NULL; + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsHandleFuncs); + + /* Get kernel handle data. */ + eError = GetHandleData(KERNEL_HANDLE_BASE, + &psKernelHandleData, + hHandle, + PVRSRV_HANDLE_TYPE_NONE); + PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); + + if (psKernelHandleData->pvData == psData->psProcessHandleData->pvData) + { + /* This kernel handle belongs to our process handle. */ + psData->hKernelHandle = hHandle; + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR FreeKernelHandlesWrapperIterProcess(IMG_HANDLE hHandle, void *pvData) +{ + FREE_KERNEL_HANDLE_DATA *psData = (FREE_KERNEL_HANDLE_DATA *)pvData; + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsHandleFuncs); + + /* Get process handle data. */ + eError = GetHandleData(psData->psBase, + &psData->psProcessHandleData, + hHandle, + PVRSRV_HANDLE_TYPE_NONE); + PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); + + if (psData->psProcessHandleData->eFlag == PVRSRV_HANDLE_ALLOC_FLAG_MULTI +#if defined(SUPPORT_INSECURE_EXPORT) + || psData->psProcessHandleData->eType == PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT +#endif + ) + { + /* Only multi alloc process handles might be in kernel handle base. */ + psData->hKernelHandle = NULL; + /* Iterate over kernel handles. */ + eError = gpsHandleFuncs->pfnIterateOverHandles(KERNEL_HANDLE_BASE->psImplBase, + &FreeKernelHandlesWrapperIterKernel, + (void *)psData); + PVR_LOG_RETURN_IF_FALSE(eError == PVRSRV_OK, "failed to iterate over kernel handles", + eError); + + if (psData->hKernelHandle) + { + /* Release kernel handle which belongs to our process handle. */ + eError = gpsHandleFuncs->pfnReleaseHandle(KERNEL_HANDLE_BASE->psImplBase, + psData->hKernelHandle, + NULL); + PVR_LOG_RETURN_IF_FALSE(eError == PVRSRV_OK, "couldn't release kernel handle", + eError); + } + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR FreeHandleDataWrapper(IMG_HANDLE hHandle, void *pvData) +{ + FREE_HANDLE_DATA *psData = (FREE_HANDLE_DATA *)pvData; + HANDLE_DATA *psHandleData = NULL; + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsHandleFuncs); + + PVR_LOG_RETURN_IF_INVALID_PARAM(psData != NULL, "psData"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psData->psBase != NULL, "psData->psBase"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psData->eHandleFreeType != PVRSRV_HANDLE_TYPE_NONE, + "psData->eHandleFreeType"); + + eError = GetHandleData(psData->psBase, + &psHandleData, + hHandle, + PVRSRV_HANDLE_TYPE_NONE); + PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); + + if (psHandleData == NULL || psHandleData->eType != psData->eHandleFreeType) + { + return PVRSRV_OK; + } + + PVR_ASSERT(psHandleData->ui32RefCount > 0); + + while (psHandleData->ui32RefCount != 0) + { + if (psHandleData->pfnReleaseData != NULL) + { + eError = psHandleData->pfnReleaseData(psHandleData->pvData); + if (eError == PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: " + "Got retry while calling release data callback for %p (type = %d)", + __func__, + hHandle, + (IMG_UINT32)psHandleData->eType)); + + return eError; + } + else if (eError != PVRSRV_OK) + { + return eError; + } + } + + _HandleUnref(psHandleData); + } + + if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + HAND_KEY aKey; + IMG_HANDLE hRemovedHandle; + + InitKey(aKey, + psData->psBase, + psHandleData->pvData, + psHandleData->eType, + ParentIfPrivate(psHandleData)); + + hRemovedHandle = (IMG_HANDLE)HASH_Remove_Extended(psData->psBase->psHashTab, aKey); + + PVR_ASSERT(hRemovedHandle != NULL); + PVR_ASSERT(hRemovedHandle == psHandleData->hHandle); + PVR_UNREFERENCED_PARAMETER(hRemovedHandle); + } + + eError = gpsHandleFuncs->pfnSetHandleData(psData->psBase->psImplBase, hHandle, NULL); + PVR_RETURN_IF_ERROR(eError); + + OSFreeMem(psHandleData); + + /* If we reach the end of the time slice release we can release the global + * lock, invoke the scheduler and reacquire the lock */ + if ((psData->ui64MaxBridgeTime != 0) && _CheckIfMaxTimeExpired(psData->ui64TimeStart, psData->ui64MaxBridgeTime)) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Lock timeout (timeout: %" IMG_UINT64_FMTSPEC")", + __func__, + psData->ui64MaxBridgeTime)); + /* UnlockHandle(psData->psBase); - func only run in single thread ctx */ + /* Invoke the scheduler to check if other processes are waiting for the lock */ + OSReleaseThreadQuanta(); + /* LockHandle(psData->psBase); - func only run in single thread ctx */ + /* Set again lock timeout and reset the counter */ + psData->ui64TimeStart = OSClockns64(); + PVR_DPF((PVR_DBG_MESSAGE, "%s: Lock acquired again", __func__)); + } + + return PVRSRV_OK; +} + +/* The Ordered Array of PVRSRV_HANDLE_TYPE Enum Entries. + * + * Some handles must be destroyed prior to other handles, + * such relationships are established with respect to handle types. + * Therefore elements of this array have to maintain specific order, + * e.g. the PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET must be placed + * before PVRSRV_HANDLE_TYPE_RGX_FREELIST. + * + * If ordering is incorrect driver may fail on the ground of cleanup + * routines. Unfortunately, we can mainly rely on the actual definition of + * the array, there is no explicit information about all relationships + * between handle types. These relationships do not necessarily come from + * bridge-specified handle attributes such as 'sub handle' and 'parent + * handle'. They may come from internal/private ref-counters contained by + * objects referenced by our kernel handles. + * + * For example, at the bridge level, PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET + * and PVRSRV_HANDLE_TYPE_RGX_FREELIST have no explicit relationship, meaning + * none of them is a sub-handle for the other. + * However the freelist contains internal ref-count that is decremented by + * the destroy routine for KM_HW_RT_DATASET. + * + * BE CAREFUL when adding/deleting/moving handle types. + */ +static const PVRSRV_HANDLE_TYPE g_aeOrderedFreeList[] = +{ + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, + PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, + PVRSRV_HANDLE_TYPE_RGX_FREELIST, + PVRSRV_HANDLE_TYPE_RGX_MEMORY_BLOCK, + PVRSRV_HANDLE_TYPE_RGX_POPULATION, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, + PVRSRV_HANDLE_TYPE_RI_HANDLE, + PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER, + PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT, + PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, + PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_PAGELIST, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_SECURE_EXPORT, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_TYPE_DC_PIN_HANDLE, + PVRSRV_HANDLE_TYPE_DC_BUFFER, + PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT, + PVRSRV_HANDLE_TYPE_DC_DEVICE, + PVRSRV_HANDLE_TYPE_PVR_TL_SD, + PVRSRV_HANDLE_TYPE_DI_CONTEXT, + PVRSRV_HANDLE_TYPE_MM_PLAT_CLEANUP +}; + +/*! +******************************************************************************* + @Function PVRSRVFreeKernelHandles + @Description Free kernel handles which belongs to process handles + @Input psBase - pointer to handle base structure + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVFreeKernelHandles(PVRSRV_HANDLE_BASE *psBase) +{ + FREE_KERNEL_HANDLE_DATA sHandleData = {NULL}; + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsHandleFuncs); + + LockHandle(psBase); + + sHandleData.psBase = psBase; + /* Iterate over process handles. */ + eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase, + &FreeKernelHandlesWrapperIterProcess, + (void *)&sHandleData); + PVR_LOG_GOTO_IF_ERROR(eError, "pfnIterateOverHandles", ExitUnlock); + + eError = PVRSRV_OK; + +ExitUnlock: + UnlockHandle(psBase); + + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVRetrieveProcessHandleBase + @Description Returns a pointer to the process handle base for the current + process. If the current process is the cleanup thread, then the + process handle base for the process currently being cleaned up + is returned + @Return Pointer to the process handle base, or NULL if not found. +******************************************************************************/ +PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void) +{ + PVRSRV_HANDLE_BASE *psHandleBase = NULL; + PROCESS_HANDLE_BASE *psProcHandleBase = NULL; + PVRSRV_DATA *psPvrData = PVRSRVGetPVRSRVData(); + IMG_PID ui32PurgePid = PVRSRVGetPurgeConnectionPid(); + + OSLockAcquire(psPvrData->hProcessHandleBase_Lock); + + /* Check to see if we're being called from the cleanup thread... */ + if ((OSGetCurrentClientProcessIDKM() == psPvrData->cleanupThreadPid) && + (ui32PurgePid > 0)) + { + /* Check to see if the cleanup thread has already removed the + * process handle base from the HASH table. + */ + psHandleBase = psPvrData->psProcessHandleBaseBeingFreed; + /* psHandleBase shouldn't be null, as cleanup thread + * should be removing this from the HASH table before + * we get here, so assert if not. + */ + PVR_ASSERT(psHandleBase); + } + else + { + /* Not being called from the cleanup thread, so return the process + * handle base for the current process. + */ + psProcHandleBase = (PROCESS_HANDLE_BASE*) HASH_Retrieve(psPvrData->psProcessHandleBase_Table, + OSGetCurrentClientProcessIDKM()); + } + OSLockRelease(psPvrData->hProcessHandleBase_Lock); + + if (psHandleBase == NULL && psProcHandleBase != NULL) + { + psHandleBase = psProcHandleBase->psHandleBase; + } + return psHandleBase; +} + +/*! +******************************************************************************* + @Function PVRSRVFreeHandleBase + @Description Free a handle base structure + @Input psBase - pointer to handle base structure + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime) +{ +#if defined(DEBUG) + COUNT_HANDLE_DATA sCountData = {NULL}; +#endif + FREE_HANDLE_DATA sHandleData = {NULL}; + IMG_UINT32 i; + PVRSRV_ERROR eError; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + IMG_PID uiCleanupPid = psPVRSRVData->cleanupThreadPid; + + PVR_ASSERT(gpsHandleFuncs); + + /* LockHandle(psBase); - func only run in single thread ctx */ + + /* If this is a process handle base being freed by the cleanup + * thread, store this in psPVRSRVData->psProcessHandleBaseBeingFreed + */ + if ((OSGetCurrentClientProcessIDKM() == uiCleanupPid) && + (psBase->eType == PVRSRV_HANDLE_BASE_TYPE_PROCESS)) + { + psPVRSRVData->psProcessHandleBaseBeingFreed = psBase; + } + + sHandleData.psBase = psBase; + sHandleData.ui64TimeStart = OSClockns64(); + sHandleData.ui64MaxBridgeTime = ui64MaxBridgeTime; + + +#if defined(DEBUG) + + sCountData.psBase = psBase; + + eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase, + &CountHandleDataWrapper, + (void *)&sCountData); + PVR_LOG_GOTO_IF_ERROR(eError, "pfnIterateOverHandles", ExitUnlock); + + if (sCountData.uiHandleDataCount != 0) + { + IMG_BOOL bList = sCountData.uiHandleDataCount < HANDLE_DEBUG_LISTING_MAX_NUM; + + PVR_DPF((PVR_DBG_WARNING, + "%s: %u remaining handles in handle base 0x%p " + "(PVRSRV_HANDLE_BASE_TYPE %u).%s", + __func__, + sCountData.uiHandleDataCount, + psBase, + psBase->eType, + bList ? "": " Skipping details, too many items...")); + + if (bList) + { + PVR_DPF((PVR_DBG_WARNING, "-------- Listing Handles --------")); + (void) gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase, + &ListHandlesInBase, + psBase); + PVR_DPF((PVR_DBG_WARNING, "-------- Done Listing --------")); + } + } + +#endif /* defined(DEBUG) */ + + /* + * As we're freeing handles based on type, make sure all + * handles have actually had their data freed to avoid + * resources being leaked + */ + for (i = 0; i < ARRAY_SIZE(g_aeOrderedFreeList); i++) + { + sHandleData.eHandleFreeType = g_aeOrderedFreeList[i]; + + /* Make sure all handles have been freed before destroying the handle base */ + eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase, + &FreeHandleDataWrapper, + (void *)&sHandleData); + PVR_GOTO_IF_ERROR(eError, ExitUnlock); + } + + + if (psBase->psHashTab != NULL) + { + HASH_Delete(psBase->psHashTab); + } + + eError = gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase); + PVR_GOTO_IF_ERROR(eError, ExitUnlock); + + /* UnlockHandle(psBase); - func only run in single thread ctx */ + OSLockDestroy(psBase->hLock); + OSFreeMem(psBase); + + return eError; + +ExitUnlock: + if (OSGetCurrentClientProcessIDKM() == uiCleanupPid) + { + psPVRSRVData->psProcessHandleBaseBeingFreed = NULL; + } + /* UnlockHandle(psBase); - func only run in single thread ctx */ + + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVHandleInit + @Description Initialise handle management + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVHandleInit(void) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsKernelHandleBase == NULL); + PVR_ASSERT(gpsHandleFuncs == NULL); + PVR_ASSERT(!gbLockInitialised); + + eError = OSLockCreate(&gKernelHandleLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); + + gbLockInitialised = IMG_TRUE; + + eError = PVRSRVHandleGetFuncTable(&gpsHandleFuncs); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVHandleGetFuncTable", ErrorHandleDeinit); + + eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase, + PVRSRV_HANDLE_BASE_TYPE_GLOBAL); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandleBase", ErrorHandleDeinit); + + eError = gpsHandleFuncs->pfnEnableHandlePurging(gpsKernelHandleBase->psImplBase); + PVR_LOG_GOTO_IF_ERROR(eError, "pfnEnableHandlePurging", + ErrorHandleDeinit); + + return PVRSRV_OK; + +ErrorHandleDeinit: + (void) PVRSRVHandleDeInit(); + + return eError; +} + +/*! +******************************************************************************* + @Function PVRSRVHandleDeInit + @Description De-initialise handle management + @Return Error code or PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR PVRSRVHandleDeInit(void) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (gpsHandleFuncs != NULL) + { + if (gpsKernelHandleBase != NULL) + { + eError = PVRSRVFreeHandleBase(gpsKernelHandleBase, 0 /* do not release bridge lock */); + if (eError == PVRSRV_OK) + { + gpsKernelHandleBase = NULL; + } + else + { + PVR_LOG_ERROR(eError, "PVRSRVFreeHandleBase"); + } + } + + if (eError == PVRSRV_OK) + { + gpsHandleFuncs = NULL; + } + } + else + { + /* If we don't have a handle function table we shouldn't have a handle base either */ + PVR_ASSERT(gpsKernelHandleBase == NULL); + } + + if (gbLockInitialised) + { + OSLockDestroy(gKernelHandleLock); + gbLockInitialised = IMG_FALSE; + } + + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/handle.h b/drivers/gpu/drm/phytium/octopus/handle.h new file mode 100644 index 000000000000..179efa7bb5a5 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/handle.h @@ -0,0 +1,201 @@ +/**************************************************************************/ /*! +@File +@Title Handle Manager API +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Provide handle management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#if !defined(HANDLE_API_H) +#define HANDLE_API_H + +#include "lock_types.h" + +/* + * Handle API + * ---------- + * The handle API is intended to provide handles for kernel resources, which + * can then be passed back to user space processes. + * + * The following functions comprise the API. Each function takes a pointer to + * a PVRSRV_HANDLE_BASE structure, one of which is allocated for each process, + * and stored in the per-process data area. Use KERNEL_HANDLE_BASE for handles + * not allocated for a particular process, or for handles that need to be + * allocated before the PVRSRV_HANDLE_BASE structure for the process is + * available. + * + * PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, + * PVRSRV_HANDLE_ALLOC_FLAG eFlag); + * + * Allocate a handle phHandle, for the resource of type eType pointed to by + * pvData. + * + * For handles that have a definite lifetime, where the corresponding resource + * is explicitly created and destroyed, eFlag should be zero. + * + * If a particular resource may be referenced multiple times by a given + * process, setting eFlag to PVRSRV_HANDLE_ALLOC_FLAG_MULTI will allow multiple + * handles to be allocated for the resource. Such handles cannot be found with + * PVRSRVFindHandle. + * + * PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, + * PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent); + * + * This function is similar to PVRSRVAllocHandle, except that the allocated + * handles are associated with a parent handle, hParent, that has been + * allocated previously. Subhandles are automatically deallocated when their + * parent handle is deallocated. + * Subhandles can be treated as ordinary handles. For example, they may have + * subhandles of their own, and may be explicitly deallocated using + * PVRSRVReleaseHandle (see below). + * + * PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType); + * + * Find the handle previously allocated for the resource pointed to by pvData, + * of type eType. Handles allocated with the flag + * PVRSRV_HANDLE_ALLOC_FLAG_MULTI cannot be found using this function. + * + * PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, + * void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + * + * Given a handle for a resource of type eType, return the pointer to the + * resource. + * + * PVRSRV_ERROR PVRSRVLookuSubHandle(PVRSRV_HANDLE_BASE *psBase, + * void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, + * IMH_HANDLE hAncestor); + * + * Similar to PVRSRVLookupHandle, but checks the handle is a descendant + * of hAncestor. + * + * PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + * + * Deallocate a handle of given type. + * + * Return the parent of a handle in *phParent, or NULL if the handle has + * no parent. + */ + +#include "img_types.h" +#include "img_defs.h" +#include "hash.h" + +typedef enum +{ + #define HANDLETYPE(x) PVRSRV_HANDLE_TYPE_##x, + #include "handle_types.h" + #undef HANDLETYPE +} PVRSRV_HANDLE_TYPE; + +static_assert(PVRSRV_HANDLE_TYPE_NONE == 0, "PVRSRV_HANDLE_TYPE_NONE must be zero"); + +typedef enum +{ + PVRSRV_HANDLE_BASE_TYPE_CONNECTION, + PVRSRV_HANDLE_BASE_TYPE_PROCESS, + PVRSRV_HANDLE_BASE_TYPE_GLOBAL +} PVRSRV_HANDLE_BASE_TYPE; + + +typedef enum +{ + /* No flags */ + PVRSRV_HANDLE_ALLOC_FLAG_NONE = 0, + /* Multiple handles can point at the given data pointer */ + PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 0x01, + /* Subhandles are allocated in a private handle space */ + PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 0x02 +} PVRSRV_HANDLE_ALLOC_FLAG; + +typedef struct _HANDLE_BASE_ PVRSRV_HANDLE_BASE; + +typedef struct _PROCESS_HANDLE_BASE_ +{ + PVRSRV_HANDLE_BASE *psHandleBase; + ATOMIC_T iRefCount; + +} PROCESS_HANDLE_BASE; + +extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase; +#define KERNEL_HANDLE_BASE (gpsKernelHandleBase) + +#define HANDLE_DEBUG_LISTING_MAX_NUM 20 + +typedef PVRSRV_ERROR (*PFN_HANDLE_RELEASE)(void *pvData); + +PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData); +PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData); + +PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent); +PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent); + +PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType); +PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType); + +PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef); +PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef); + +PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor); + +PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); +PVRSRV_ERROR PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); +PVRSRV_ERROR PVRSRVReleaseHandleStagedUnlock(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + +PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase); + +PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase, + PVRSRV_HANDLE_BASE_TYPE eType); + +PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime); + +PVRSRV_ERROR PVRSRVFreeKernelHandles(PVRSRV_HANDLE_BASE *psBase); + +PVRSRV_ERROR PVRSRVHandleInit(void); + +PVRSRV_ERROR PVRSRVHandleDeInit(void); + +PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void); + +void LockHandle(PVRSRV_HANDLE_BASE *psBase); +void UnlockHandle(PVRSRV_HANDLE_BASE *psBase); + +#endif /* !defined(HANDLE_API_H) */ diff --git a/drivers/gpu/drm/phytium/octopus/handle_idr.c b/drivers/gpu/drm/phytium/octopus/handle_idr.c new file mode 100644 index 000000000000..9514527b8580 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/handle_idr.c @@ -0,0 +1,440 @@ +/*************************************************************************/ /*! +@File +@Title Resource Handle Manager - IDR Back-end +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Provide IDR based resource handle management back-end +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#include +#include +#include +#include +#include + +#include "handle_impl.h" +#include "allocmem.h" +#include "osfunc.h" +#include "pvr_debug.h" + +#define ID_VALUE_MIN 1 +#define ID_VALUE_MAX INT_MAX + +#define ID_TO_HANDLE(i) ((IMG_HANDLE)(uintptr_t)(i)) +#define HANDLE_TO_ID(h) ((IMG_INT)(uintptr_t)(h)) + +struct _HANDLE_IMPL_BASE_ +{ + struct idr sIdr; + + IMG_UINT32 ui32MaxHandleValue; + + IMG_UINT32 ui32TotalHandCount; +}; + +typedef struct _HANDLE_ITER_DATA_WRAPPER_ +{ + PFN_HANDLE_ITER pfnHandleIter; + void *pvHandleIterData; +} HANDLE_ITER_DATA_WRAPPER; + + +static int HandleIterFuncWrapper(int id, void *data, void *iter_data) +{ + HANDLE_ITER_DATA_WRAPPER *psIterData = (HANDLE_ITER_DATA_WRAPPER *)iter_data; + + PVR_UNREFERENCED_PARAMETER(data); + + return (int)psIterData->pfnHandleIter(ID_TO_HANDLE(id), psIterData->pvHandleIterData); +} + +/*! +****************************************************************************** + + @Function AcquireHandle + + @Description Acquire a new handle + + @Input psBase - Pointer to handle base structure + phHandle - Points to a handle pointer + pvData - Pointer to resource to be associated with the handle + + @Output phHandle - Points to a handle pointer + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR AcquireHandle(HANDLE_IMPL_BASE *psBase, + IMG_HANDLE *phHandle, + void *pvData) +{ + int id; + int result; + + PVR_ASSERT(psBase != NULL); + PVR_ASSERT(phHandle != NULL); + PVR_ASSERT(pvData != NULL); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) + idr_preload(GFP_KERNEL); + id = idr_alloc(&psBase->sIdr, pvData, ID_VALUE_MIN, psBase->ui32MaxHandleValue + 1, 0); + idr_preload_end(); + + result = id; +#else + do + { + if (idr_pre_get(&psBase->sIdr, GFP_KERNEL) == 0) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + result = idr_get_new_above(&psBase->sIdr, pvData, ID_VALUE_MIN, &id); + } while (result == -EAGAIN); + + if ((IMG_UINT32)id > psBase->ui32MaxHandleValue) + { + idr_remove(&psBase->sIdr, id); + result = -ENOSPC; + } +#endif + + if (result < 0) + { + if (result == -ENOSPC) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Limit of %u handles reached", + __func__, psBase->ui32MaxHandleValue)); + + return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; + } + + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psBase->ui32TotalHandCount++; + + *phHandle = ID_TO_HANDLE(id); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function ReleaseHandle + + @Description Release a handle that is no longer needed. + + @Input psBase - Pointer to handle base structure + hHandle - Handle to release + ppvData - Points to a void data pointer + + @Output ppvData - Points to a void data pointer + + @Return PVRSRV_OK or PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR ReleaseHandle(HANDLE_IMPL_BASE *psBase, + IMG_HANDLE hHandle, + void **ppvData) +{ + int id = HANDLE_TO_ID(hHandle); + void *pvData; + + PVR_ASSERT(psBase); + + /* Get the data associated with the handle. If we get back NULL then + it's an invalid handle */ + + pvData = idr_find(&psBase->sIdr, id); + if (likely(pvData)) + { + idr_remove(&psBase->sIdr, id); + psBase->ui32TotalHandCount--; + } + + if (unlikely(pvData == NULL)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Handle out of range (%u > %u)", + __func__, id, psBase->ui32TotalHandCount)); + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } + + if (ppvData) + { + *ppvData = pvData; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function GetHandleData + + @Description Get the data associated with the given handle + + @Input psBase - Pointer to handle base structure + hHandle - Handle from which data should be retrieved + ppvData - Points to a void data pointer + + @Output ppvData - Points to a void data pointer + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR GetHandleData(HANDLE_IMPL_BASE *psBase, + IMG_HANDLE hHandle, + void **ppvData) +{ + int id = HANDLE_TO_ID(hHandle); + void *pvData; + + PVR_ASSERT(psBase); + PVR_ASSERT(ppvData); + + pvData = idr_find(&psBase->sIdr, id); + if (likely(pvData)) + { + *ppvData = pvData; + + return PVRSRV_OK; + } + else + { + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } +} + +/*! +****************************************************************************** + + @Function SetHandleData + + @Description Set the data associated with the given handle + + @Input psBase - Pointer to handle base structure + hHandle - Handle for which data should be changed + pvData - Pointer to new data to be associated with the handle + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR SetHandleData(HANDLE_IMPL_BASE *psBase, + IMG_HANDLE hHandle, + void *pvData) +{ + int id = HANDLE_TO_ID(hHandle); + void *pvOldData; + + PVR_ASSERT(psBase); + + pvOldData = idr_replace(&psBase->sIdr, pvData, id); + if (IS_ERR(pvOldData)) + { + if (PTR_ERR(pvOldData) == -ENOENT) + { + return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED; + } + else + { + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR IterateOverHandles(HANDLE_IMPL_BASE *psBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData) +{ + HANDLE_ITER_DATA_WRAPPER sIterData; + + PVR_ASSERT(psBase); + PVR_ASSERT(pfnHandleIter); + + sIterData.pfnHandleIter = pfnHandleIter; + sIterData.pvHandleIterData = pvHandleIterData; + + return (PVRSRV_ERROR)idr_for_each(&psBase->sIdr, HandleIterFuncWrapper, &sIterData); +} + +/*! +****************************************************************************** + + @Function EnableHandlePurging + + @Description Enable purging for a given handle base + + @Input psBase - pointer to handle base structure + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR EnableHandlePurging(HANDLE_IMPL_BASE *psBase) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + PVR_ASSERT(psBase); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PurgeHandles + + @Description Purge handles for a given handle base + + @Input psBase - Pointer to handle base structure + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR PurgeHandles(HANDLE_IMPL_BASE *psBase) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + PVR_ASSERT(psBase); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function CreateHandleBase + + @Description Create a handle base structure + + @Input ppsBase - pointer to handle base structure pointer + + @Output ppsBase - points to handle base structure pointer + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR CreateHandleBase(HANDLE_IMPL_BASE **ppsBase) +{ + HANDLE_IMPL_BASE *psBase; + + PVR_ASSERT(ppsBase); + + psBase = OSAllocZMem(sizeof(*psBase)); + if (psBase == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't allocate generic handle base", + __func__)); + + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + idr_init(&psBase->sIdr); + + psBase->ui32MaxHandleValue = ID_VALUE_MAX; + psBase->ui32TotalHandCount = 0; + + *ppsBase = psBase; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function DestroyHandleBase + + @Description Destroy a handle base structure + + @Input psBase - pointer to handle base structure + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR DestroyHandleBase(HANDLE_IMPL_BASE *psBase) +{ + PVR_ASSERT(psBase); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) + idr_remove_all(&psBase->sIdr); +#endif + + /* Finally destroy the idr */ + idr_destroy(&psBase->sIdr); + + OSFreeMem(psBase); + + return PVRSRV_OK; +} + + +static const HANDLE_IMPL_FUNCTAB g_sHandleFuncTab = +{ + .pfnAcquireHandle = AcquireHandle, + .pfnReleaseHandle = ReleaseHandle, + .pfnGetHandleData = GetHandleData, + .pfnSetHandleData = SetHandleData, + .pfnIterateOverHandles = IterateOverHandles, + .pfnEnableHandlePurging = EnableHandlePurging, + .pfnPurgeHandles = PurgeHandles, + .pfnCreateHandleBase = CreateHandleBase, + .pfnDestroyHandleBase = DestroyHandleBase +}; + +PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs) +{ + static IMG_BOOL bAcquired = IMG_FALSE; + + if (bAcquired) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Function table already acquired", + __func__)); + return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + } + + if (ppsFuncs == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *ppsFuncs = &g_sHandleFuncTab; + + bAcquired = IMG_TRUE; + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/handle_impl.h b/drivers/gpu/drm/phytium/octopus/handle_impl.h new file mode 100644 index 000000000000..3287f712b728 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/handle_impl.h @@ -0,0 +1,89 @@ +/**************************************************************************/ /*! +@File +@Title Implementation Callbacks for Handle Manager API +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Part of the handle manager API. This file is for declarations + and definitions that are private/internal to the handle manager + API but need to be shared between the generic handle manager + code and the various handle manager backends, i.e. the code that + implements the various callbacks. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#if !defined(HANDLE_IMPL_H) +#define HANDLE_IMPL_H + +#include "img_types.h" +#include "pvrsrv_error.h" + +typedef struct _HANDLE_IMPL_BASE_ HANDLE_IMPL_BASE; + +typedef PVRSRV_ERROR (*PFN_HANDLE_ITER)(IMG_HANDLE hHandle, void *pvData); + +typedef struct _HANDLE_IMPL_FUNCTAB_ +{ + /* Acquire a new handle which is associated with the given data */ + PVRSRV_ERROR (*pfnAcquireHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE *phHandle, void *pvData); + + /* Release the given handle (optionally returning the data associated with it) */ + PVRSRV_ERROR (*pfnReleaseHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData); + + /* Get the data associated with the given handle */ + PVRSRV_ERROR (*pfnGetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData); + + /* Set the data associated with the given handle */ + PVRSRV_ERROR (*pfnSetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void *pvData); + + PVRSRV_ERROR (*pfnIterateOverHandles)(HANDLE_IMPL_BASE *psHandleBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData); + + /* Enable handle purging on the given handle base */ + PVRSRV_ERROR (*pfnEnableHandlePurging)(HANDLE_IMPL_BASE *psHandleBase); + + /* Purge handles on the given handle base */ + PVRSRV_ERROR (*pfnPurgeHandles)(HANDLE_IMPL_BASE *psHandleBase); + + /* Create handle base */ + PVRSRV_ERROR (*pfnCreateHandleBase)(HANDLE_IMPL_BASE **psHandleBase); + + /* Destroy handle base */ + PVRSRV_ERROR (*pfnDestroyHandleBase)(HANDLE_IMPL_BASE *psHandleBase); +} HANDLE_IMPL_FUNCTAB; + +PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs); + +#endif /* !defined(HANDLE_IMPL_H) */ diff --git a/drivers/gpu/drm/phytium/octopus/handle_types.h b/drivers/gpu/drm/phytium/octopus/handle_types.h new file mode 100644 index 000000000000..812279992725 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/handle_types.h @@ -0,0 +1,85 @@ +/**************************************************************************/ /*! +@File +@Title Handle Manager handle types +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Provide handle management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ +/* NOTE: Do not add include guards to this file */ + +HANDLETYPE(NONE) +HANDLETYPE(SHARED_EVENT_OBJECT) +HANDLETYPE(EVENT_OBJECT_CONNECT) +HANDLETYPE(PMR_LOCAL_EXPORT_HANDLE) +HANDLETYPE(PHYSMEM_PMR) +HANDLETYPE(PHYSMEM_PMR_EXPORT) +HANDLETYPE(PHYSMEM_PMR_SECURE_EXPORT) +HANDLETYPE(DEVMEMINT_CTX) +HANDLETYPE(DEVMEMINT_CTX_EXPORT) +HANDLETYPE(DEVMEMINT_HEAP) +HANDLETYPE(DEVMEMINT_RESERVATION) +HANDLETYPE(DEVMEMINT_MAPPING) +HANDLETYPE(RGX_FW_MEMDESC) +HANDLETYPE(RGX_FREELIST) +HANDLETYPE(RGX_MEMORY_BLOCK) +HANDLETYPE(RGX_SERVER_RENDER_CONTEXT) +HANDLETYPE(RGX_SERVER_TQ_CONTEXT) +HANDLETYPE(RGX_SERVER_TQ_TDM_CONTEXT) +HANDLETYPE(RGX_SERVER_COMPUTE_CONTEXT) +HANDLETYPE(RGX_SERVER_RAY_CONTEXT) +HANDLETYPE(RGX_SERVER_KICKSYNC_CONTEXT) +HANDLETYPE(SYNC_PRIMITIVE_BLOCK) +HANDLETYPE(SYNC_RECORD_HANDLE) +HANDLETYPE(PVRSRV_TIMELINE_SERVER) +HANDLETYPE(PVRSRV_FENCE_SERVER) +HANDLETYPE(PVRSRV_FENCE_EXPORT) +HANDLETYPE(RGX_KM_HW_RT_DATASET) +HANDLETYPE(RGX_FWIF_ZSBUFFER) +HANDLETYPE(RGX_POPULATION) +HANDLETYPE(DC_DEVICE) +HANDLETYPE(DC_DISPLAY_CONTEXT) +HANDLETYPE(DC_BUFFER) +HANDLETYPE(DC_PIN_HANDLE) +HANDLETYPE(DEVMEM_MEM_IMPORT) +HANDLETYPE(PHYSMEM_PMR_PAGELIST) +HANDLETYPE(PVR_TL_SD) +HANDLETYPE(RI_HANDLE) +HANDLETYPE(DEV_PRIV_DATA) +HANDLETYPE(MM_PLAT_CLEANUP) +HANDLETYPE(WORKEST_RETURN_DATA) +HANDLETYPE(DI_CONTEXT) diff --git a/drivers/gpu/drm/phytium/octopus/hash.c b/drivers/gpu/drm/phytium/octopus/hash.c new file mode 100644 index 000000000000..e364219573b0 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/hash.c @@ -0,0 +1,734 @@ +/*************************************************************************/ /*! +@File +@Title Self scaling hash tables. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description + Implements simple self scaling hash tables. Hash collisions are handled by + chaining entries together. Hash tables are increased in size when they + become more than (50%?) full and decreased in size when less than (25%?) + full. Hash tables are never decreased below their initial size. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* include/ */ +#include "img_defs.h" +#include "img_types.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" + +/* services/shared/include/ */ +#include "hash.h" + +/* services/client/include/ or services/server/include/ */ +#include "osfunc_common.h" +#include "allocmem.h" + +//#define PERF_DBG_RESIZE +#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE) +#include +#endif + +#if defined(__KERNEL__) +#include "pvrsrv.h" +#endif + +#define KEY_TO_INDEX(pHash, key, uSize) \ + ((pHash)->pfnHashFunc((pHash)->uKeySize, (key), (uSize)) % (uSize)) + +#define KEY_COMPARE(pHash, pKey1, pKey2) \ + ((pHash)->pfnKeyComp((pHash)->uKeySize, (pKey1), (pKey2))) + +#if defined(__linux__) && defined(__KERNEL__) +#define _AllocMem OSAllocMemNoStats +#define _AllocZMem OSAllocZMemNoStats +#define _FreeMem OSFreeMemNoStats +#else +#define _AllocMem OSAllocMem +#define _AllocZMem OSAllocZMem +#define _FreeMem OSFreeMem +#endif + +#define NO_SHRINK 0 + +/* Each entry in a hash table is placed into a bucket */ +typedef struct _BUCKET_ +{ + struct _BUCKET_ *pNext; /*!< the next bucket on the same chain */ + uintptr_t v; /*!< entry value */ + uintptr_t k[]; /* PRQA S 0642 */ + /* override dynamic array declaration warning */ +} BUCKET; + +struct _HASH_TABLE_ +{ + IMG_UINT32 uSize; /*!< current size of the hash table */ + IMG_UINT32 uCount; /*!< number of entries currently in the hash table */ + IMG_UINT32 uMinimumSize; /*!< the minimum size that the hash table should be re-sized to */ + IMG_UINT32 uKeySize; /*!< size of key in bytes */ + IMG_UINT32 uShrinkThreshold; /*!< The threshold at which to trigger a shrink */ + IMG_UINT32 uGrowThreshold; /*!< The threshold at which to trigger a grow */ + HASH_FUNC* pfnHashFunc; /*!< hash function */ + HASH_KEY_COMP* pfnKeyComp; /*!< key comparison function */ + BUCKET** ppBucketTable; /*!< the hash table array */ +#if defined(DEBUG) + const char* pszFile; + unsigned int ui32LineNum; +#endif +}; + +/*************************************************************************/ /*! +@Function HASH_Func_Default +@Description Hash function intended for hashing keys composed of uintptr_t + arrays. +@Input uKeySize The size of the hash key, in bytes. +@Input pKey A pointer to the key to hash. +@Input uHashTabLen The length of the hash table. +@Return The hash value. +*/ /**************************************************************************/ +IMG_INTERNAL IMG_UINT32 +HASH_Func_Default(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen) +{ + uintptr_t *p = (uintptr_t *)pKey; + IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t); + IMG_UINT32 ui; + IMG_UINT32 uHashKey = 0; + + PVR_UNREFERENCED_PARAMETER(uHashTabLen); + + PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0); + + for (ui = 0; ui < uKeyLen; ui++) + { + IMG_UINT32 uHashPart = (IMG_UINT32)*p++; + + uHashPart += (uHashPart << 12); + uHashPart ^= (uHashPart >> 22); + uHashPart += (uHashPart << 4); + uHashPart ^= (uHashPart >> 9); + uHashPart += (uHashPart << 10); + uHashPart ^= (uHashPart >> 2); + uHashPart += (uHashPart << 7); + uHashPart ^= (uHashPart >> 12); + + uHashKey += uHashPart; + } + + return uHashKey; +} + +/*************************************************************************/ /*! +@Function HASH_Key_Comp_Default +@Description Compares keys composed of uintptr_t arrays. +@Input uKeySize The size of the hash key, in bytes. +@Input pKey1 Pointer to first hash key to compare. +@Input pKey2 Pointer to second hash key to compare. +@Return IMG_TRUE - The keys match. + IMG_FALSE - The keys don't match. +*/ /**************************************************************************/ +IMG_INTERNAL IMG_BOOL +HASH_Key_Comp_Default(size_t uKeySize, void *pKey1, void *pKey2) +{ + uintptr_t *p1 = (uintptr_t *)pKey1; + uintptr_t *p2 = (uintptr_t *)pKey2; + IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t); + IMG_UINT32 ui; + + PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0); + + for (ui = 0; ui < uKeyLen; ui++) + { + if (*p1++ != *p2++) + return IMG_FALSE; + } + + return IMG_TRUE; +} + +/*************************************************************************/ /*! +@Function _ChainInsert +@Description Insert a bucket into the appropriate hash table chain. +@Input pBucket The bucket +@Input ppBucketTable The hash table +@Input uSize The size of the hash table +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static void +_ChainInsert(HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize) +{ + IMG_UINT32 uIndex; + + /* We assume that all parameters passed by the caller are valid. */ + PVR_ASSERT(pBucket != NULL); + PVR_ASSERT(ppBucketTable != NULL); + PVR_ASSERT(uSize != 0); + + uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize); /* PRQA S 0432,0541 */ /* ignore dynamic array warning */ + pBucket->pNext = ppBucketTable[uIndex]; + ppBucketTable[uIndex] = pBucket; +} + +/*************************************************************************/ /*! +@Function _Rehash +@Description Iterate over every entry in an old hash table and rehash into + the new table. +@Input ppOldTable The old hash table +@Input uOldSize The size of the old hash table +@Input ppNewTable The new hash table +@Input uNewSize The size of the new hash table +@Return None +*/ /**************************************************************************/ +static void +_Rehash(HASH_TABLE *pHash, + BUCKET **ppOldTable, IMG_UINT32 uOldSize, + BUCKET **ppNewTable, IMG_UINT32 uNewSize) +{ + IMG_UINT32 uIndex; + for (uIndex=0; uIndex< uOldSize; uIndex++) + { + BUCKET *pBucket; + pBucket = ppOldTable[uIndex]; + while (pBucket != NULL) + { + BUCKET *pNextBucket = pBucket->pNext; + _ChainInsert(pHash, pBucket, ppNewTable, uNewSize); + pBucket = pNextBucket; + } + } +} + +/*************************************************************************/ /*! +@Function _Resize +@Description Attempt to resize a hash table, failure to allocate a new + larger hash table is not considered a hard failure. We simply + continue and allow the table to fill up, the effect is to + allow hash chains to become longer. +@Input pHash Hash table to resize. +@Input uNewSize Required table size. +@Return IMG_TRUE Success + IMG_FALSE Failed +*/ /**************************************************************************/ +static IMG_BOOL +_Resize(HASH_TABLE *pHash, IMG_UINT32 uNewSize) +{ + BUCKET **ppNewTable; + IMG_UINT32 uiThreshold = uNewSize >> 2; +#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE) + struct timeval start, end; +#endif + + if (uNewSize == pHash->uSize) + { + return IMG_TRUE; + } + +#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE) + gettimeofday(&start, NULL); +#endif + + ppNewTable = _AllocZMem(sizeof(BUCKET *) * uNewSize); + if (ppNewTable == NULL) + { + return IMG_FALSE; + } + + _Rehash(pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, uNewSize); + + _FreeMem(pHash->ppBucketTable); + +#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE) + gettimeofday(&end, NULL); + if (start.tv_usec > end.tv_usec) + { + end.tv_usec = 1000000 - start.tv_usec + end.tv_usec; + } + else + { + end.tv_usec -= start.tv_usec; + } + + PVR_DPF((PVR_DBG_ERROR, "%s: H:%p O:%d N:%d C:%d G:%d S:%d T:%06luus", __func__, pHash, pHash->uSize, uNewSize, pHash->uCount, pHash->uGrowThreshold, pHash->uShrinkThreshold, end.tv_usec)); +#endif + + /*not nulling pointer, being reassigned just below*/ + pHash->ppBucketTable = ppNewTable; + pHash->uSize = uNewSize; + + pHash->uGrowThreshold = uiThreshold * 3; + pHash->uShrinkThreshold = (uNewSize <= pHash->uMinimumSize) ? NO_SHRINK : uiThreshold; + + return IMG_TRUE; +} + + +/*************************************************************************/ /*! +@Function HASH_Create_Extended +@Description Create a self scaling hash table, using the supplied key size, + and the supplied hash and key comparison functions. +@Input uInitialLen Initial and minimum length of the hash table, + where the length refers to the number of entries + in the hash table, not its size in bytes. +@Input uKeySize The size of the key, in bytes. +@Input pfnHashFunc Pointer to hash function. +@Input pfnKeyComp Pointer to key comparison function. +@Return NULL or hash table handle. +*/ /**************************************************************************/ +IMG_INTERNAL +HASH_TABLE * HASH_Create_Extended_Int (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp) +{ + HASH_TABLE *pHash; + + if (uInitialLen == 0 || uKeySize == 0 || pfnHashFunc == NULL || pfnKeyComp == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: invalid input parameters", __func__)); + return NULL; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: InitialSize=0x%x", __func__, uInitialLen)); + + pHash = _AllocMem(sizeof(HASH_TABLE)); + if (pHash == NULL) + { + return NULL; + } + + pHash->uCount = 0; + pHash->uSize = uInitialLen; + pHash->uMinimumSize = uInitialLen; + pHash->uKeySize = uKeySize; + pHash->uGrowThreshold = (uInitialLen >> 2) * 3; + pHash->uShrinkThreshold = NO_SHRINK; + pHash->pfnHashFunc = pfnHashFunc; + pHash->pfnKeyComp = pfnKeyComp; + + pHash->ppBucketTable = _AllocZMem(sizeof(BUCKET *) * pHash->uSize); + if (pHash->ppBucketTable == NULL) + { + _FreeMem(pHash); + /*not nulling pointer, out of scope*/ + return NULL; + } + + return pHash; +} + +#if defined(DEBUG) +IMG_INTERNAL +HASH_TABLE * HASH_Create_Extended_Debug (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp, + const char *file, const unsigned int line) +{ + HASH_TABLE *hash; + hash = HASH_Create_Extended_Int(uInitialLen, uKeySize, + pfnHashFunc, pfnKeyComp); + if (hash) + { + hash->pszFile = file; + hash->ui32LineNum = line; + } + return hash; +} +#endif + +/*************************************************************************/ /*! +@Function HASH_Create +@Description Create a self scaling hash table with a key consisting of a + single uintptr_t, and using the default hash and key + comparison functions. +@Input uInitialLen Initial and minimum length of the hash table, + where the length refers to the number of entries + in the hash table, not its size in bytes. +@Return NULL or hash table handle. +*/ /**************************************************************************/ +IMG_INTERNAL +HASH_TABLE * HASH_Create_Int (IMG_UINT32 uInitialLen) +{ + return HASH_Create_Extended_Int(uInitialLen, sizeof(uintptr_t), + &HASH_Func_Default, &HASH_Key_Comp_Default); +} + +#if defined(DEBUG) +IMG_INTERNAL +HASH_TABLE * HASH_Create_Debug(IMG_UINT32 uInitialLen, const char *file, const unsigned int line) +{ + HASH_TABLE *hash; + hash = HASH_Create_Extended_Int(uInitialLen, sizeof(uintptr_t), + &HASH_Func_Default, &HASH_Key_Comp_Default); + if (hash) + { + hash->pszFile = file; + hash->ui32LineNum = line; + } + return hash; +} +#endif + +/*************************************************************************/ /*! +@Function HASH_Delete_Extended +@Description Delete a hash table created by HASH_Create_Extended or + HASH_Create. All entries in the table should have been removed + before calling this function. +@Input pHash Hash table +@Input bWarn Set false to suppress warnings in the case of + deletion with active entries. +*/ /**************************************************************************/ +IMG_INTERNAL void +HASH_Delete_Extended(HASH_TABLE *pHash, IMG_BOOL bWarn) +{ + IMG_BOOL bDoCheck = IMG_TRUE; +#if defined(__KERNEL__) && !defined(__QNXNTO__) + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + if (psPVRSRVData != NULL) + { + if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + bDoCheck = IMG_FALSE; + } + } +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + else + { + bDoCheck = IMG_FALSE; + } +#endif +#endif + if (pHash != NULL) + { + PVR_DPF((PVR_DBG_MESSAGE, "HASH_Delete")); + + if (bDoCheck) + { + PVR_ASSERT(pHash->uCount==0); + } + if (pHash->uCount != 0) + { + IMG_UINT32 i; + if (bWarn) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Leak detected in hash table!", __func__)); + PVR_DPF((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmem context", __func__)); + PVR_DPF((PVR_DBG_ERROR, "%s: Removing remaining %u hash entries.", __func__, pHash->uCount)); +#if defined(DEBUG) + PVR_DPF ((PVR_DBG_ERROR, "%s: Hash %p created at %s:%u.", __func__, (uintptr_t*)pHash, pHash->pszFile, pHash->ui32LineNum)); +#endif + } + + for (i = 0; i < pHash->uSize; i++) + { + BUCKET *pBucket = pHash->ppBucketTable[i]; + while (pBucket != NULL) + { + BUCKET *pNextBucket = pBucket->pNext; + _FreeMem(pBucket); + pBucket = pNextBucket; + } + } + + } + _FreeMem(pHash->ppBucketTable); + pHash->ppBucketTable = NULL; + _FreeMem(pHash); + /*not nulling pointer, copy on stack*/ + } +} + +/*************************************************************************/ /*! +@Function HASH_Delete +@Description Delete a hash table created by HASH_Create_Extended or + HASH_Create. All entries in the table must have been removed + before calling this function. +@Input pHash Hash table +*/ /**************************************************************************/ +IMG_INTERNAL void +HASH_Delete(HASH_TABLE *pHash) +{ + HASH_Delete_Extended(pHash, IMG_TRUE); +} + +/*************************************************************************/ /*! +@Function HASH_Insert_Extended +@Description Insert a key value pair into a hash table created with + HASH_Create_Extended. +@Input pHash The hash table. +@Input pKey Pointer to the key. +@Input v The value associated with the key. +@Return IMG_TRUE - success. + IMG_FALSE - failure. +*/ /**************************************************************************/ +IMG_INTERNAL IMG_BOOL +HASH_Insert_Extended(HASH_TABLE *pHash, void *pKey, uintptr_t v) +{ + BUCKET *pBucket; + + PVR_ASSERT(pHash != NULL); + + if (pHash == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter", __func__)); + return IMG_FALSE; + } + + pBucket = _AllocMem(sizeof(BUCKET) + pHash->uKeySize); + if (pBucket == NULL) + { + return IMG_FALSE; + } + + pBucket->v = v; + /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k (linux)*/ + OSCachedMemCopy(pBucket->k, pKey, pHash->uKeySize); + + _ChainInsert(pHash, pBucket, pHash->ppBucketTable, pHash->uSize); + + pHash->uCount++; + + /* check if we need to think about re-balancing */ + if (pHash->uCount > pHash->uGrowThreshold) + { + /* Ignore the return code from _Resize because the hash table is + still in a valid state and although not ideally sized, it is still + functional */ + _Resize(pHash, pHash->uSize << 1); + } + + return IMG_TRUE; +} + +/*************************************************************************/ /*! +@Function HASH_Insert +@Description Insert a key value pair into a hash table created with + HASH_Create. +@Input pHash The hash table. +@Input k The key value. +@Input v The value associated with the key. +@Return IMG_TRUE - success. + IMG_FALSE - failure. +*/ /**************************************************************************/ +IMG_INTERNAL IMG_BOOL +HASH_Insert(HASH_TABLE *pHash, uintptr_t k, uintptr_t v) +{ + return HASH_Insert_Extended(pHash, &k, v); +} + +/*************************************************************************/ /*! +@Function HASH_Remove_Extended +@Description Remove a key from a hash table created with + HASH_Create_Extended. +@Input pHash The hash table. +@Input pKey Pointer to key. +@Return 0 if the key is missing, or the value associated with the key. +*/ /**************************************************************************/ +IMG_INTERNAL uintptr_t +HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey) +{ + BUCKET **ppBucket; + IMG_UINT32 uIndex; + + PVR_ASSERT(pHash != NULL); + + if (pHash == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Null hash table", __func__)); + return 0; + } + + uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize); + + for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext)) + { + /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */ + if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) + { + BUCKET *pBucket = *ppBucket; + uintptr_t v = pBucket->v; + (*ppBucket) = pBucket->pNext; + + _FreeMem(pBucket); + /*not nulling original pointer, already overwritten*/ + + pHash->uCount--; + + /* check if we need to think about re-balancing, when the shrink + * threshold is 0 we are at the minimum size, no further shrink */ + if (pHash->uCount < pHash->uShrinkThreshold) + { + /* Ignore the return code from _Resize because the + hash table is still in a valid state and although + not ideally sized, it is still functional */ + _Resize(pHash, MAX(pHash->uSize >> 1, pHash->uMinimumSize)); + } + + return v; + } + } + return 0; +} + +/*************************************************************************/ /*! +@Function HASH_Remove +@Description Remove a key value pair from a hash table created with + HASH_Create. +@Input pHash The hash table. +@Input pKey Pointer to key. +@Return 0 if the key is missing, or the value associated with the key. +*/ /**************************************************************************/ +IMG_INTERNAL uintptr_t +HASH_Remove(HASH_TABLE *pHash, uintptr_t k) +{ + return HASH_Remove_Extended(pHash, &k); +} + +/*************************************************************************/ /*! +@Function HASH_Retrieve_Extended +@Description Retrieve a value from a hash table created with + HASH_Create_Extended. +@Input pHash The hash table. +@Input pKey Pointer to key. +@Return 0 if the key is missing, or the value associated with the key. +*/ /**************************************************************************/ +IMG_INTERNAL uintptr_t +HASH_Retrieve_Extended(HASH_TABLE *pHash, void *pKey) +{ + BUCKET **ppBucket; + IMG_UINT32 uIndex; + + PVR_ASSERT(pHash != NULL); + + if (pHash == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Null hash table", __func__)); + return 0; + } + + uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize); + + for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext)) + { + /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */ + if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) + { + BUCKET *pBucket = *ppBucket; + uintptr_t v = pBucket->v; + + return v; + } + } + return 0; +} + +/*************************************************************************/ /*! +@Function HASH_Retrieve +@Description Retrieve a value from a hash table created with HASH_Create. +@Input pHash The hash table. +@Input pKey Pointer to key. +@Return 0 if the key is missing, or the value associated with the key. +*/ /**************************************************************************/ +IMG_INTERNAL uintptr_t +HASH_Retrieve(HASH_TABLE *pHash, uintptr_t k) +{ + return HASH_Retrieve_Extended(pHash, &k); +} + +/*************************************************************************/ /*! +@Function HASH_Iterate +@Description Iterate over every entry in the hash table. +@Input pHash Hash table to iterate. +@Input pfnCallback Callback to call with the key and data for each +. entry in the hash table +@Return Callback error if any, otherwise PVRSRV_OK +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback, void* args) +{ + IMG_UINT32 uIndex; + for (uIndex=0; uIndex < pHash->uSize; uIndex++) + { + BUCKET *pBucket; + pBucket = pHash->ppBucketTable[uIndex]; + while (pBucket != NULL) + { + PVRSRV_ERROR eError; + BUCKET *pNextBucket = pBucket->pNext; + + eError = pfnCallback((uintptr_t) ((void *) *(pBucket->k)), pBucket->v, args); + + /* The callback might want us to break out early */ + if (eError != PVRSRV_OK) + return eError; + + pBucket = pNextBucket; + } + } + return PVRSRV_OK; +} + +#ifdef HASH_TRACE +/*************************************************************************/ /*! +@Function HASH_Dump +@Description Dump out some information about a hash table. +@Input pHash The hash table. +*/ /**************************************************************************/ +void +HASH_Dump(HASH_TABLE *pHash) +{ + IMG_UINT32 uIndex; + IMG_UINT32 uMaxLength=0; + IMG_UINT32 uEmptyCount=0; + + PVR_ASSERT(pHash != NULL); + for (uIndex=0; uIndexuSize; uIndex++) + { + BUCKET *pBucket; + IMG_UINT32 uLength = 0; + if (pHash->ppBucketTable[uIndex] == NULL) + { + uEmptyCount++; + } + for (pBucket=pHash->ppBucketTable[uIndex]; + pBucket != NULL; + pBucket = pBucket->pNext) + { + uLength++; + } + uMaxLength = MAX(uMaxLength, uLength); + } + + PVR_TRACE(("hash table: uMinimumSize=%d size=%d count=%d", + pHash->uMinimumSize, pHash->uSize, pHash->uCount)); + PVR_TRACE((" empty=%d max=%d", uEmptyCount, uMaxLength)); +} +#endif diff --git a/drivers/gpu/drm/phytium/octopus/hash.h b/drivers/gpu/drm/phytium/octopus/hash.h new file mode 100644 index 000000000000..0999c5b330a2 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/hash.h @@ -0,0 +1,247 @@ +/*************************************************************************/ /*! +@File +@Title Self scaling hash tables +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements simple self scaling hash tables. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef HASH_H +#define HASH_H + +#include "img_types.h" +#include "pvrsrv_error.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/* + * Keys passed to the comparison function are only guaranteed to be aligned on + * an uintptr_t boundary. + */ +typedef IMG_UINT32 HASH_FUNC(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen); +typedef IMG_BOOL HASH_KEY_COMP(size_t uKeySize, void *pKey1, void *pKey2); + +typedef struct _HASH_TABLE_ HASH_TABLE; + +typedef PVRSRV_ERROR (*HASH_pfnCallback) ( + uintptr_t k, + uintptr_t v, + void* pvPriv +); + +#if defined(DEBUG) +#else +#define HASH_CREATE(LEN) HASH_Create(LEN) +#endif + +/*************************************************************************/ /*! +@Function HASH_Func_Default +@Description Hash function intended for hashing keys composed of uintptr_t + arrays. +@Input uKeySize The size of the hash key, in bytes. +@Input pKey A pointer to the key to hash. +@Input uHashTabLen The length of the hash table. +@Return The hash value. +*/ /**************************************************************************/ +IMG_UINT32 HASH_Func_Default(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen); + +/*************************************************************************/ /*! +@Function HASH_Key_Comp_Default +@Description Compares keys composed of uintptr_t arrays. +@Input uKeySize The size of the hash key, in bytes. +@Input pKey1 Pointer to first hash key to compare. +@Input pKey2 Pointer to second hash key to compare. +@Return IMG_TRUE - The keys match. + IMG_FALSE - The keys don't match. +*/ /**************************************************************************/ +IMG_BOOL HASH_Key_Comp_Default(size_t uKeySize, void *pKey1, void *pKey2); + +/*************************************************************************/ /*! +@Function HASH_Create_Extended +@Description Create a self scaling hash table, using the supplied key size, + and the supplied hash and key comparison functions. +@Input uInitialLen Initial and minimum length of the hash table, + where the length refers to the number of entries + in the hash table, not its size in bytes. +@Input uKeySize The size of the key, in bytes. +@Input pfnHashFunc Pointer to hash function. +@Input pfnKeyComp Pointer to key comparison function. +@Return NULL or hash table handle. +*/ /**************************************************************************/ +HASH_TABLE * HASH_Create_Extended_Int(IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp); +#if defined(DEBUG) +#define HASH_Create_Extended(LEN, KS, FUN, CMP) HASH_Create_Extended_Debug(LEN, KS, FUN, CMP, __FILE__, __LINE__) +HASH_TABLE * HASH_Create_Extended_Debug (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp, + const char *file, const unsigned int line); +#else +#define HASH_Create_Extended HASH_Create_Extended_Int +#endif + +/*************************************************************************/ /*! +@Function HASH_Create +@Description Create a self scaling hash table with a key consisting of a + single uintptr_t, and using the default hash and key + comparison functions. +@Input uInitialLen Initial and minimum length of the hash table, + where the length refers to the number of entries + in the hash table, not its size in bytes. +@Return NULL or hash table handle. +*/ /**************************************************************************/ +HASH_TABLE * HASH_Create_Int(IMG_UINT32 uInitialLen); +#if defined(DEBUG) +#define HASH_Create(LEN) HASH_Create_Debug(LEN, __FILE__, __LINE__) +HASH_TABLE * HASH_Create_Debug (IMG_UINT32 uInitialLen, const char *file, const unsigned int line); +#else +#define HASH_Create HASH_Create_Int +#endif + +/*************************************************************************/ /*! +@Function HASH_Delete_Extended +@Description Delete a hash table created by HASH_Create_Extended or + HASH_Create. All entries in the table should have been removed + before calling this function. +@Input pHash Hash table +@Input bWarn Set false to suppress warnings in the case of + deletion with active entries. +@Return None +*/ /**************************************************************************/ +void HASH_Delete_Extended(HASH_TABLE *pHash, IMG_BOOL bWarn); + +/*************************************************************************/ /*! +@Function HASH_Delete +@Description Delete a hash table created by HASH_Create_Extended or + HASH_Create. All entries in the table must have been removed + before calling this function. +@Input pHash Hash table +@Return None +*/ /**************************************************************************/ +void HASH_Delete(HASH_TABLE *pHash); + +/*************************************************************************/ /*! +@Function HASH_Insert_Extended +@Description Insert a key value pair into a hash table created with + HASH_Create_Extended. +@Input pHash The hash table. +@Input pKey Pointer to the key. +@Input v The value associated with the key. +@Return IMG_TRUE - success. + IMG_FALSE - failure. +*/ /**************************************************************************/ +IMG_BOOL HASH_Insert_Extended(HASH_TABLE *pHash, void *pKey, uintptr_t v); + +/*************************************************************************/ /*! +@Function HASH_Insert +@Description Insert a key value pair into a hash table created with + HASH_Create. +@Input pHash The hash table. +@Input k The key value. +@Input v The value associated with the key. +@Return IMG_TRUE - success. + IMG_FALSE - failure. +*/ /**************************************************************************/ +IMG_BOOL HASH_Insert(HASH_TABLE *pHash, uintptr_t k, uintptr_t v); + +/*************************************************************************/ /*! +@Function HASH_Remove_Extended +@Description Remove a key from a hash table created with + HASH_Create_Extended. +@Input pHash The hash table. +@Input pKey Pointer to key. +@Return 0 if the key is missing, or the value associated with the key. +*/ /**************************************************************************/ +uintptr_t HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey); + +/*************************************************************************/ /*! +@Function HASH_Remove +@Description Remove a key value pair from a hash table created with + HASH_Create. +@Input pHash The hash table. +@Input k The key value. +@Return 0 if the key is missing, or the value associated with the key. +*/ /**************************************************************************/ +uintptr_t HASH_Remove(HASH_TABLE *pHash, uintptr_t k); + +/*************************************************************************/ /*! +@Function HASH_Retrieve_Extended +@Description Retrieve a value from a hash table created with + HASH_Create_Extended. +@Input pHash The hash table. +@Input pKey Pointer to key. +@Return 0 if the key is missing, or the value associated with the key. +*/ /**************************************************************************/ +uintptr_t HASH_Retrieve_Extended(HASH_TABLE *pHash, void *pKey); + +/*************************************************************************/ /*! +@Function HASH_Retrieve +@Description Retrieve a value from a hash table created with HASH_Create. +@Input pHash The hash table. +@Input k The key value. +@Return 0 if the key is missing, or the value associated with the key. +*/ /**************************************************************************/ +uintptr_t HASH_Retrieve(HASH_TABLE *pHash, uintptr_t k); + +/*************************************************************************/ /*! +@Function HASH_Iterate +@Description Iterate over every entry in the hash table. +@Input pHash Hash table to iterate. +@Input pfnCallback Callback to call with the key and data for each +. entry in the hash table +@Return Callback error if any, otherwise PVRSRV_OK +*/ /**************************************************************************/ +PVRSRV_ERROR HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback, void* args); + +#ifdef HASH_TRACE +/*************************************************************************/ /*! +@Function HASH_Dump +@Description Dump out some information about a hash table. +@Input pHash The hash table. +*/ /**************************************************************************/ +void HASH_Dump(HASH_TABLE *pHash); +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /* HASH_H */ + +/****************************************************************************** + End of file (hash.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/htb_debug.c b/drivers/gpu/drm/phytium/octopus/htb_debug.c new file mode 100644 index 000000000000..42bc5522ab37 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/htb_debug.c @@ -0,0 +1,1190 @@ +/*************************************************************************/ /*! +@File htb_debug.c +@Title Debug Functionality +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Provides kernel side debugFS Functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "rgxdevice.h" +#include "htbserver.h" +#include "htbuffer.h" +#include "htbuffer_types.h" +#include "tlstream.h" +#include "tlclient.h" +#include "pvrsrv_tlcommon.h" +#include "di_server.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "osfunc.h" +#include "allocmem.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "htb_debug.h" + +// Global data handles for buffer manipulation and processing + +typedef struct { + IMG_PBYTE pBuf; /* Raw data buffer from TL stream */ + IMG_UINT32 uiBufLen; /* Amount of data to process from 'pBuf' */ + IMG_UINT32 uiTotal; /* Total bytes processed */ + IMG_UINT32 uiMsgLen; /* Length of HTB message to be processed */ + IMG_PBYTE pCurr; /* pointer to current message to be decoded */ + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; /* Output string */ +} HTB_Sentinel_t; + +typedef struct +{ + DI_ENTRY *psDumpHostDiEntry; /* debug info entry */ + HTB_Sentinel_t sSentinel; /* private control structure for HTB DI + operations */ + IMG_HANDLE hStream; /* stream handle for debugFS use */ +} HTB_DBG_INFO; + +static HTB_DBG_INFO g_sHTBData; + +// Comment out for extra debug level +// #define HTB_CHATTY_PRINT(x) PVR_DPF(x) +#define HTB_CHATTY_PRINT(x) + +typedef void (DI_PRINTF)(const OSDI_IMPL_ENTRY *, const IMG_CHAR *, ...); + +/****************************************************************************** + * debugFS display routines + *****************************************************************************/ +static int HTBDumpBuffer(DI_PRINTF, OSDI_IMPL_ENTRY *, void *); + +static int _DebugHBTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + int retVal; + + PVR_ASSERT(psEntry != NULL); + + /* psEntry should never be NULL */ + if (psEntry == NULL) + { + return -1; + } + + /* Ensure that we have a valid address to use to dump info from. If NULL we + * return a failure code to terminate the DI read call. pvData is either + * DI_START_TOKEN (for the initial call) or an HTB buffer address for + * subsequent calls [returned from the NEXT function]. */ + if (pvData == NULL) + { + return -1; + } + + retVal = HTBDumpBuffer(DIPrintf, psEntry, pvData); + + HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Returning %d", __func__, retVal)); + + return retVal; +} + +static IMG_UINT32 idToLogIdx(IMG_UINT32); /* Forward declaration */ + +/* + * HTB_GetNextMessage + * + * Get next non-empty message block from the buffer held in pSentinel->pBuf + * If we exhaust the data buffer we refill it (after releasing the previous + * message(s) [only one non-NULL message, but PAD messages will get released + * as we traverse them]. + * + * Input: + * pSentinel references the already acquired data buffer + * + * Output: + * pSentinel + * -> uiMsglen updated to the size of the non-NULL message + * + * Returns: + * Address of first non-NULL message in the buffer (if any) + * NULL if there is no further data available from the stream and the buffer + * contents have been drained. + */ +static IMG_PBYTE HTB_GetNextMessage(HTB_Sentinel_t *pSentinel) +{ + void *pNext, *pLast, *pStart, *pData = NULL; + void *pCurrent; /* Current processing point within buffer */ + PVRSRVTL_PPACKETHDR ppHdr; /* Current packet header */ + IMG_UINT32 uiHdrType; /* Packet header type */ + IMG_UINT32 uiMsgSize; /* Message size of current packet (bytes) */ + IMG_BOOL bUnrecognizedErrorPrinted = IMG_FALSE; + IMG_UINT32 ui32Data; + IMG_UINT32 ui32LogIdx; + PVRSRV_ERROR eError; + + PVR_ASSERT(pSentinel != NULL); + + pLast = pSentinel->pBuf + pSentinel->uiBufLen; + + pStart = pSentinel->pBuf; + + pNext = pStart; + pSentinel->uiMsgLen = 0; // Reset count for this message + uiMsgSize = 0; // nothing processed so far + ui32LogIdx = HTB_SF_LAST; // Loop terminator condition + + do + { + /* + * If we've drained the buffer we must RELEASE and ACQUIRE some more. + */ + if (pNext >= pLast) + { + eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, + g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen); + + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'", __func__, + "TLClientAcquireData", PVRSRVGETERRORSTRING(eError))); + return NULL; + } + + // Reset our limits - if we've returned an empty buffer we're done. + pLast = pSentinel->pBuf + pSentinel->uiBufLen; + pStart = pSentinel->pBuf; + pNext = pStart; + + if (pStart == NULL || pLast == NULL) + { + return NULL; + } + } + + /* + * We should have a header followed by data block(s) in the stream. + */ + + pCurrent = pNext; + ppHdr = GET_PACKET_HDR(pCurrent); + + if (ppHdr == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unexpected NULL packet in Host Trace buffer", + __func__)); + pSentinel->uiMsgLen += uiMsgSize; + return NULL; // This should never happen + } + + /* + * This should *NEVER* fire. If it does it means we have got some + * dubious packet header back from the HTB stream. In this case + * the sensible thing is to abort processing and return to + * the caller + */ + uiHdrType = GET_PACKET_TYPE(ppHdr); + + PVR_ASSERT(uiHdrType < PVRSRVTL_PACKETTYPE_LAST && + uiHdrType > PVRSRVTL_PACKETTYPE_UNDEF); + + if (uiHdrType < PVRSRVTL_PACKETTYPE_LAST && + uiHdrType > PVRSRVTL_PACKETTYPE_UNDEF) + { + /* + * We have a (potentially) valid data header. We should see if + * the associated packet header matches one of our expected + * types. + */ + pNext = GET_NEXT_PACKET_ADDR(ppHdr); + + PVR_ASSERT(pNext != NULL); + + uiMsgSize = (IMG_UINT32)((size_t)pNext - (size_t)ppHdr); + + pSentinel->uiMsgLen += uiMsgSize; + + pData = GET_PACKET_DATA_PTR(ppHdr); + + /* + * Handle non-DATA packet types. These include PAD fields which + * may have data associated and other types. We simply discard + * these as they have no decodable information within them. + */ + if (uiHdrType != PVRSRVTL_PACKETTYPE_DATA) + { + /* + * Now release the current non-data packet and proceed to the + * next entry (if any). + */ + eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, + g_sHTBData.hStream, uiMsgSize); + + HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Packet Type %x " + "Length %u", __func__, uiHdrType, uiMsgSize)); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - '%s' message" + " size %u", __func__, "TLClientReleaseDataLess", + PVRSRVGETERRORSTRING(eError), uiMsgSize)); + } + + eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, + g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen); + + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - %s Giving up", + __func__, "TLClientAcquireData", + PVRSRVGETERRORSTRING(eError))); + + return NULL; + } + pSentinel->uiMsgLen = 0; + // Reset our limits - if we've returned an empty buffer we're done. + pLast = pSentinel->pBuf + pSentinel->uiBufLen; + pStart = pSentinel->pBuf; + pNext = pStart; + + if (pStart == NULL || pLast == NULL) + { + return NULL; + } + continue; + } + if (pData == NULL || pData >= pLast) + { + continue; + } + ui32Data = *(IMG_UINT32 *)pData; + ui32LogIdx = idToLogIdx(ui32Data); + } + else + { + PVR_DPF((PVR_DBG_WARNING, "Unexpected Header @%p value %x", + ppHdr, uiHdrType)); + + return NULL; + } + + /* + * Check if the unrecognized ID is valid and therefore, tracebuf + * needs updating. + */ + if (HTB_SF_LAST == ui32LogIdx && HTB_LOG_VALIDID(ui32Data) + && IMG_FALSE == bUnrecognizedErrorPrinted) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Unrecognised LOG value '%x' GID %x Params %d ID %x @ '%p'", + __func__, ui32Data, HTB_SF_GID(ui32Data), + HTB_SF_PARAMNUM(ui32Data), ui32Data & 0xfff, pData)); + bUnrecognizedErrorPrinted = IMG_FALSE; + } + + } while (HTB_SF_LAST == ui32LogIdx); + + HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Returning data @ %p Log value '%x'", + __func__, pCurrent, ui32Data)); + + return pCurrent; +} + +/* + * HTB_GetFirstMessage + * + * Called from START to obtain the buffer address of the first message within + * pSentinel->pBuf. Will ACQUIRE data if the buffer is empty. + * + * Input: + * pSentinel + * pui64Pos Offset within the debugFS file + * + * Output: + * pSentinel->pCurr Set to reference the first valid non-NULL message within + * the buffer. If no valid message is found set to NULL. + * pSentinel + * ->pBuf if unset on entry + * ->uiBufLen if pBuf unset on entry + * + * Side-effects: + * HTB TL stream will be updated to bypass any zero-length PAD messages before + * the first non-NULL message (if any). + */ +static void HTB_GetFirstMessage(HTB_Sentinel_t *pSentinel, IMG_UINT64 *pui64Pos) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(pui64Pos); + + if (pSentinel == NULL) + return; + + if (pSentinel->pBuf == NULL) + { + /* Acquire data */ + pSentinel->uiMsgLen = 0; + + eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, + g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen); + + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'", + __func__, "TLClientAcquireData", PVRSRVGETERRORSTRING(eError))); + + pSentinel->pBuf = NULL; + pSentinel->pCurr = NULL; + } + else + { + /* + * If there is no data available we set pSentinel->pCurr to NULL + * and return. This is expected behaviour if we've drained the + * data and nothing else has yet been produced. + */ + if (pSentinel->uiBufLen == 0 || pSentinel->pBuf == NULL) + { + HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Empty Buffer @ %p", + __func__, pSentinel->pBuf)); + + pSentinel->pCurr = NULL; + return; + } + } + } + + /* Locate next message within buffer. NULL => no more data to process */ + pSentinel->pCurr = HTB_GetNextMessage(pSentinel); +} + +/* + * _DebugHBTraceDIStart: + * + * Returns the address to use for subsequent 'Show', 'Next', 'Stop' file ops. + * Return DI_START_TOKEN for the very first call and allocate a sentinel for + * use by the 'Show' routine and its helpers. + * This is stored in the psEntry's private hook field. + * + * We obtain access to the TLstream associated with the HTB. If this doesn't + * exist (because no pvrdebug capture trace has been set) we simply return with + * a NULL value which will stop the DI traversal. + */ +static void *_DebugHBTraceDIStart(OSDI_IMPL_ENTRY *psEntry, + IMG_UINT64 *pui64Pos) +{ + HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry); + PVRSRV_ERROR eError; + IMG_UINT32 uiTLMode; + void *retVal; + IMG_HANDLE hStream; + + /* The sentinel object should have been allocated during the creation + * of the DI entry. If it's not there it means that something went + * wrong. Return NULL in such case. */ + if (pSentinel == NULL) + { + return NULL; + } + + /* Check to see if the HTB stream has been configured yet. If not, there is + * nothing to display so we just return NULL to stop the stream access. + */ + if (!HTBIsConfigured()) + { + return NULL; + } + + /* Open the stream in non-blocking mode so that we can determine if there + * is no data to consume. Also disable the producer callback (if any) and + * the open callback so that we do not generate spurious trace data when + * accessing the stream. + */ + uiTLMode = PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING| + PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK| + PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK; + + /* If two or more processes try to read from this file at the same time + * the TLClientOpenStream() function will handle this by allowing only + * one of them to actually open the stream. The other process will get + * an error stating that the stream is already open. The open function + * is threads safe. */ + eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, HTB_STREAM_NAME, uiTLMode, + &hStream); + + if (eError == PVRSRV_ERROR_ALREADY_OPEN) + { + /* Stream allows only one reader so return error if it's already + * opened. */ + HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Stream handle %p already " + "exists for %s", __func__, g_sHTBData.hStream, + HTB_STREAM_NAME)); + return NULL; + } + else if (eError != PVRSRV_OK) + { + /* + * No stream available so nothing to report + */ + return NULL; + } + + /* There is a window where hStream can be NULL but the stream is already + * opened. This shouldn't matter since the TLClientOpenStream() will make + * sure that only one stream can be opened and only one process can reach + * this place at a time. Also the .stop function will be always called + * after this function returns so there should be no risk of stream + * not being closed. */ + PVR_ASSERT(g_sHTBData.hStream == NULL); + g_sHTBData.hStream = hStream; + + /* We're starting the read operation so ensure we properly zero the + * sentinel object. */ + memset(pSentinel, 0, sizeof(*pSentinel)); + + /* + * Find the first message location within pSentinel->pBuf + * => for DI_START_TOKEN we must issue our first ACQUIRE, also for the + * subsequent re-START calls (if any). + */ + + HTB_GetFirstMessage(pSentinel, pui64Pos); + + retVal = *pui64Pos == 0 ? DI_START_TOKEN : pSentinel->pCurr; + + HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Returning %p, Stream %s @ %p", + __func__, retVal, HTB_STREAM_NAME, g_sHTBData.hStream)); + + return retVal; +} + +/* + * _DebugTBTraceDIStop: + * + * Stop processing data collection and release any previously allocated private + * data structure if we have exhausted the previously filled data buffers. + */ +static void _DebugHBTraceDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry); + IMG_UINT32 uiMsgLen; + PVRSRV_ERROR eError; + + if (pSentinel == NULL) + { + return; + } + + uiMsgLen = pSentinel->uiMsgLen; + + HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: MsgLen = %d", __func__, uiMsgLen)); + + /* If we get here the handle should never be NULL because + * _DebugHBTraceDIStart() shouldn't allow that. */ + if (g_sHTBData.hStream == NULL) + { + return; + } + + if (uiMsgLen != 0) + { + eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, + g_sHTBData.hStream, uiMsgLen); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - %s, nBytes %u", + __func__, "TLClientReleaseDataLess", + PVRSRVGETERRORSTRING(eError), uiMsgLen)); + } + } + + eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", + "TLClientCloseStream", PVRSRVGETERRORSTRING(eError), + __func__)); + } + + g_sHTBData.hStream = NULL; +} + + +/* + * _DebugHBTraceDINext: + * + * This is where we release any acquired data which has been processed by the + * DIShow routine. If we have encountered a DI entry overflow we stop + * processing and return NULL. Otherwise we release the message that we + * previously processed and simply update our position pointer to the next + * valid HTB message (if any) + */ +static void *_DebugHBTraceDINext(OSDI_IMPL_ENTRY *psEntry, void *pvPriv, + IMG_UINT64 *pui64Pos) +{ + HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry); + IMG_UINT64 ui64CurPos; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(pvPriv); + + if (pui64Pos) + { + ui64CurPos = *pui64Pos; + *pui64Pos = ui64CurPos + 1; + } + + /* Determine if we've had an overflow on the previous 'Show' call. If so + * we leave the previously acquired data in the queue (by releasing 0 bytes) + * and return NULL to end this DI entry iteration. + * If we have not overflowed we simply get the next HTB message and use that + * for our display purposes. */ + + if (DIHasOverflowed(psEntry)) + { + (void) TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream, + 0); + + HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: OVERFLOW - returning NULL", + __func__)); + + return NULL; + } + else + { + eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, + g_sHTBData.hStream, + pSentinel->uiMsgLen); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s' @ %p Length %d", + __func__, "TLClientReleaseDataLess", + PVRSRVGETERRORSTRING(eError), pSentinel->pCurr, + pSentinel->uiMsgLen)); + PVR_DPF((PVR_DBG_WARNING, "%s: Buffer @ %p..%p", __func__, + pSentinel->pBuf, + (IMG_PBYTE) (pSentinel->pBuf + pSentinel->uiBufLen))); + + } + + eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, + g_sHTBData.hStream, &pSentinel->pBuf, + &pSentinel->uiBufLen); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'\nPrev message len %d", + __func__, "TLClientAcquireData", + PVRSRVGETERRORSTRING(eError), pSentinel->uiMsgLen)); + pSentinel->pBuf = NULL; + } + + pSentinel->uiMsgLen = 0; /* We don't (yet) know the message size */ + } + + HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Returning %p Msglen %d", __func__, + pSentinel->pBuf, pSentinel->uiMsgLen)); + + if (pSentinel->pBuf == NULL || pSentinel->uiBufLen == 0) + { + return NULL; + } + + pSentinel->pCurr = HTB_GetNextMessage(pSentinel); + + return pSentinel->pCurr; +} + +/****************************************************************************** + * HTB Dumping routines and definitions + *****************************************************************************/ +#define IS_VALID_FMT_STRING(FMT) (strchr(FMT, '%') != NULL) +#define MAX_STRING_SIZE (128) + +typedef enum +{ + TRACEBUF_ARG_TYPE_INT, + TRACEBUF_ARG_TYPE_ERR, + TRACEBUF_ARG_TYPE_NONE +} TRACEBUF_ARG_TYPE; + +/* + * Array of all Host Trace log IDs used to convert the tracebuf data + */ +typedef struct _HTB_TRACEBUF_LOG_ { + HTB_LOG_SFids eSFId; + IMG_CHAR *pszName; + IMG_CHAR *pszFmt; + IMG_UINT32 ui32ArgNum; +} HTB_TRACEBUF_LOG; + +static const HTB_TRACEBUF_LOG aLogs[] = { +#define X(a, b, c, d, e) {HTB_LOG_CREATESFID(a,b,e), #c, d, e}, + HTB_LOG_SFIDLIST +#undef X +}; + +static const IMG_CHAR *aGroups[] = { +#define X(A,B) #B, + HTB_LOG_SFGROUPLIST +#undef X +}; +static const IMG_UINT32 uiMax_aGroups = ARRAY_SIZE(aGroups) - 1; + +static TRACEBUF_ARG_TYPE ExtractOneArgFmt(IMG_CHAR **, IMG_CHAR *); +/* + * ExtractOneArgFmt + * + * Scan the input 'printf-like' string *ppszFmt and return the next + * value string to be displayed. If there is no '%' format field in the + * string we return 'TRACEBUF_ARG_TYPE_NONE' and leave the input string + * untouched. + * + * Input + * ppszFmt reference to format string to be decoded + * pszOneArgFmt single field format from *ppszFmt + * + * Returns + * TRACEBUF_ARG_TYPE_ERR unrecognised argument + * TRACEBUF_ARG_TYPE_INT variable is of numeric type + * TRACEBUF_ARG_TYPE_NONE no variable reference in *ppszFmt + * + * Side-effect + * *ppszFmt is updated to reference the next part of the format string + * to be scanned + */ +static TRACEBUF_ARG_TYPE ExtractOneArgFmt( + IMG_CHAR **ppszFmt, + IMG_CHAR *pszOneArgFmt) +{ + IMG_CHAR *pszFmt; + IMG_CHAR *psT; + IMG_UINT32 ui32Count = MAX_STRING_SIZE; + IMG_UINT32 ui32OneArgSize; + TRACEBUF_ARG_TYPE eRet = TRACEBUF_ARG_TYPE_ERR; + + if (NULL == ppszFmt) + return TRACEBUF_ARG_TYPE_ERR; + + pszFmt = *ppszFmt; + if (NULL == pszFmt) + return TRACEBUF_ARG_TYPE_ERR; + + /* + * Find the first '%' + * NOTE: we can be passed a simple string to display which will have no + * parameters embedded within it. In this case we simply return + * TRACEBUF_ARG_TYPE_NONE and the string contents will be the full pszFmt + */ + psT = strchr(pszFmt, '%'); + if (psT == NULL) + { + return TRACEBUF_ARG_TYPE_NONE; + } + + /* Find next conversion identifier after the initial '%' */ + while ((*psT++) && (ui32Count-- > 0)) + { + switch (*psT) + { + case 'd': + case 'i': + case 'o': + case 'u': + case 'x': + case 'X': + { + eRet = TRACEBUF_ARG_TYPE_INT; + goto _found_arg; + } + case 's': + { + eRet = TRACEBUF_ARG_TYPE_ERR; + goto _found_arg; + } + } + } + + if ((psT == NULL) || (ui32Count == 0)) return TRACEBUF_ARG_TYPE_ERR; + +_found_arg: + ui32OneArgSize = psT - pszFmt + 1; + OSCachedMemCopy(pszOneArgFmt, pszFmt, ui32OneArgSize); + pszOneArgFmt[ui32OneArgSize] = '\0'; + + *ppszFmt = psT + 1; + + return eRet; +} + +static IMG_UINT32 idToLogIdx(IMG_UINT32 ui32CheckData) +{ + IMG_UINT32 i = 0; + for (i = 0; aLogs[i].eSFId != HTB_SF_LAST; i++) + { + if ( ui32CheckData == aLogs[i].eSFId ) + return i; + } + /* Nothing found, return max value */ + return HTB_SF_LAST; +} + +/* + * DecodeHTB + * + * Decode the data buffer message located at pBuf. This should be a valid + * HTB message as we are provided with the start of the buffer. If empty there + * is no message to process. We update the uiMsgLen field with the size of the + * HTB message that we have processed so that it can be returned to the system + * on successful logging of the message to the output file. + * + * Input + * pSentinel reference to newly read data and pending completion data + * from a previous invocation [handle DI entry buffer overflow] + * -> pBuf reference to raw data that we are to parse + * -> uiBufLen total number of bytes of data available + * -> pCurr start of message to decode + * + * pvDumpDebugFile output file + * pfnDumpDebugPrintf output generating routine + * + * Output + * pSentinel + * -> uiMsgLen length of the decoded message which will be freed to + * the system on successful completion of the DI entry + * update via _DebugHBTraceDINext(), + * Return Value + * 0 successful decode + * -1 unsuccessful decode + */ +static int +DecodeHTB(HTB_Sentinel_t *pSentinel, OSDI_IMPL_ENTRY *pvDumpDebugFile, + DI_PRINTF pfnDumpDebugPrintf) +{ + IMG_UINT32 ui32Data, ui32LogIdx, ui32ArgsCur; + IMG_CHAR *pszFmt = NULL; + IMG_CHAR aszOneArgFmt[MAX_STRING_SIZE]; + IMG_BOOL bUnrecognizedErrorPrinted = IMG_FALSE; + + size_t nPrinted; + + void *pNext, *pLast, *pStart, *pData = NULL; + PVRSRVTL_PPACKETHDR ppHdr; /* Current packet header */ + IMG_UINT32 uiHdrType; /* Packet header type */ + IMG_UINT32 uiMsgSize; /* Message size of current packet (bytes) */ + IMG_BOOL bPacketsDropped; + + pLast = pSentinel->pBuf + pSentinel->uiBufLen; + pStart = pSentinel->pCurr; + + pSentinel->uiMsgLen = 0; // Reset count for this message + + HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Buf @ %p..%p, Length = %d", + __func__, pStart, pLast, pSentinel->uiBufLen)); + + /* + * We should have a DATA header with the necessary information following + */ + ppHdr = GET_PACKET_HDR(pStart); + + if (ppHdr == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unexpected NULL packet in Host Trace buffer", __func__)); + return -1; + } + + uiHdrType = GET_PACKET_TYPE(ppHdr); + PVR_ASSERT(uiHdrType == PVRSRVTL_PACKETTYPE_DATA); + + pNext = GET_NEXT_PACKET_ADDR(ppHdr); + + PVR_ASSERT(pNext != NULL); + + uiMsgSize = (IMG_UINT32)((size_t)pNext - (size_t)ppHdr); + + pSentinel->uiMsgLen += uiMsgSize; + + pData = GET_PACKET_DATA_PTR(ppHdr); + + if (pData == NULL || pData >= pLast) + { + HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: pData = %p, pLast = %p " + "Returning 0", __func__, pData, pLast)); + return 0; + } + + ui32Data = *(IMG_UINT32 *)pData; + ui32LogIdx = idToLogIdx(ui32Data); + + /* + * Check if the unrecognised ID is valid and therefore, tracebuf + * needs updating. + */ + if (ui32LogIdx == HTB_SF_LAST) + { + if (HTB_LOG_VALIDID(ui32Data)) + { + if (!bUnrecognizedErrorPrinted) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Unrecognised LOG value '%x' GID %x Params %d ID %x @ '%p'", + __func__, ui32Data, HTB_SF_GID(ui32Data), + HTB_SF_PARAMNUM(ui32Data), ui32Data & 0xfff, pData)); + bUnrecognizedErrorPrinted = IMG_TRUE; + } + + return 0; + } + + PVR_DPF((PVR_DBG_ERROR, + "%s: Unrecognised and invalid LOG value detected '%x'", + __func__, ui32Data)); + + return -1; + } + + /* The string format we are going to display */ + /* + * The display will show the header (log-ID, group-ID, number of params) + * The maximum parameter list length = 15 (only 4bits used to encode) + * so we need HEADER + 15 * sizeof(UINT32) and the displayed string + * describing the event. We use a buffer in the per-process pSentinel + * structure to hold the data. + */ + pszFmt = aLogs[ui32LogIdx].pszFmt; + + /* add the message payload size to the running count */ + ui32ArgsCur = HTB_SF_PARAMNUM(ui32Data); + + /* Determine if we've over-filled the buffer and had to drop packets */ + bPacketsDropped = CHECK_PACKETS_DROPPED(ppHdr); + if (bPacketsDropped || + (uiHdrType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED)) + { + /* Flag this as it is useful to know ... */ + + PVR_DUMPDEBUG_LOG("\n<========================== *** PACKETS DROPPED *** ======================>\n"); + } + + { + IMG_UINT32 ui32Timestampns, ui32PID, ui32TID; + IMG_UINT64 ui64Timestamp, ui64TimestampSec; + IMG_CHAR *szBuffer = pSentinel->szBuffer; // Buffer start + IMG_CHAR *pszBuffer = pSentinel->szBuffer; // Current place in buf + size_t uBufBytesAvailable = sizeof(pSentinel->szBuffer); + IMG_UINT32 *pui32Data = (IMG_UINT32 *)pData; + IMG_UINT32 ui_aGroupIdx; + + // Get PID field from data stream + pui32Data++; + ui32PID = *pui32Data; + // Get TID field from data stream + pui32Data++; + ui32TID = *pui32Data; + // Get Timestamp part 1 from data stream + pui32Data++; + ui64Timestamp = (IMG_UINT64) *pui32Data << 32; + // Get Timestamp part 2 from data stream + pui32Data++; + ui64Timestamp |= (IMG_UINT64) *pui32Data; + // Move to start of message contents data + pui32Data++; + + /* + * We need to snprintf the data to a local in-kernel buffer + * and then PVR_DUMPDEBUG_LOG() that in one shot + */ + ui_aGroupIdx = MIN(HTB_SF_GID(ui32Data), uiMax_aGroups); + + /* Divide by 1B to get seconds & mod using output var (nanosecond resolution)*/ + ui64TimestampSec = OSDivide64r64(ui64Timestamp, 1000000000, &ui32Timestampns); + + nPrinted = OSSNPrintf(szBuffer, uBufBytesAvailable, "%010"IMG_UINT64_FMTSPEC".%09u:%-5u-%-5u-%s> ", + ui64TimestampSec, ui32Timestampns, ui32PID, ui32TID, aGroups[ui_aGroupIdx]); + if (nPrinted >= uBufBytesAvailable) + { + PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," + " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, + uBufBytesAvailable); + + nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ + } + + PVR_DUMPDEBUG_LOG("%s", pszBuffer); + /* Update where our next 'output' point in the buffer is */ + pszBuffer += nPrinted; + uBufBytesAvailable -= nPrinted; + + /* + * Print one argument at a time as this simplifies handling variable + * number of arguments. Special case handling for no arguments. + * This is the case for simple format strings such as + * HTB_SF_MAIN_KICK_UNCOUNTED. + */ + if (ui32ArgsCur == 0) + { + if (pszFmt) + { + nPrinted = OSStringLCopy(pszBuffer, pszFmt, uBufBytesAvailable); + if (nPrinted >= uBufBytesAvailable) + { + PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," + " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, + uBufBytesAvailable); + nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ + } + PVR_DUMPDEBUG_LOG("%s", pszBuffer); + pszBuffer += nPrinted; + /* Don't update the uBufBytesAvailable as we have finished this + * message decode. pszBuffer - szBuffer is the total amount of + * data we have decoded. + */ + } + } + else + { + if (HTB_SF_GID(ui32Data) == HTB_GID_CTRL && HTB_SF_ID(ui32Data) == HTB_ID_MARK_SCALE) + { + IMG_UINT32 i; + IMG_UINT32 ui32ArgArray[HTB_MARK_SCALE_ARG_ARRAY_SIZE]; + IMG_UINT64 ui64OSTS = 0; + IMG_UINT32 ui32OSTSRem = 0; + IMG_UINT64 ui64CRTS = 0; + + /* Retrieve 6 args to an array */ + for (i = 0; i < ARRAY_SIZE(ui32ArgArray); i++) + { + ui32ArgArray[i] = *pui32Data; + pui32Data++; + --ui32ArgsCur; + } + + ui64OSTS = (IMG_UINT64) ui32ArgArray[HTB_ARG_OSTS_PT1] << 32 | ui32ArgArray[HTB_ARG_OSTS_PT2]; + ui64CRTS = (IMG_UINT64) ui32ArgArray[HTB_ARG_CRTS_PT1] << 32 | ui32ArgArray[HTB_ARG_CRTS_PT2]; + + /* Divide by 1B to get seconds, remainder in nano seconds*/ + ui64OSTS = OSDivide64r64(ui64OSTS, 1000000000, &ui32OSTSRem); + + nPrinted = OSSNPrintf(pszBuffer, + uBufBytesAvailable, + "HTBFWMkSync Mark=%u OSTS=%010" IMG_UINT64_FMTSPEC ".%09u CRTS=%" IMG_UINT64_FMTSPEC " CalcClkSpd=%u\n", + ui32ArgArray[HTB_ARG_SYNCMARK], + ui64OSTS, + ui32OSTSRem, + ui64CRTS, + ui32ArgArray[HTB_ARG_CLKSPD]); + + if (nPrinted >= uBufBytesAvailable) + { + PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," + " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, + uBufBytesAvailable); + nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ + } + + PVR_DUMPDEBUG_LOG("%s", pszBuffer); + pszBuffer += nPrinted; + uBufBytesAvailable -= nPrinted; + } + else + { + while (IS_VALID_FMT_STRING(pszFmt) && (uBufBytesAvailable > 0)) + { + IMG_UINT32 ui32TmpArg = *pui32Data; + TRACEBUF_ARG_TYPE eArgType; + + eArgType = ExtractOneArgFmt(&pszFmt, aszOneArgFmt); + + pui32Data++; + ui32ArgsCur--; + + switch (eArgType) + { + case TRACEBUF_ARG_TYPE_INT: + nPrinted = OSSNPrintf(pszBuffer, uBufBytesAvailable, + aszOneArgFmt, ui32TmpArg); + break; + + case TRACEBUF_ARG_TYPE_NONE: + nPrinted = OSStringLCopy(pszBuffer, pszFmt, + uBufBytesAvailable); + break; + + default: + nPrinted = OSSNPrintf(pszBuffer, uBufBytesAvailable, + "Error processing arguments, type not " + "recognized (fmt: %s)", aszOneArgFmt); + break; + } + if (nPrinted >= uBufBytesAvailable) + { + PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," + " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, + uBufBytesAvailable); + nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ + } + PVR_DUMPDEBUG_LOG("%s", pszBuffer); + pszBuffer += nPrinted; + uBufBytesAvailable -= nPrinted; + } + /* Display any remaining text in pszFmt string */ + if (pszFmt) + { + nPrinted = OSStringLCopy(pszBuffer, pszFmt, uBufBytesAvailable); + if (nPrinted >= uBufBytesAvailable) + { + PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," + " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, + uBufBytesAvailable); + nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ + } + PVR_DUMPDEBUG_LOG("%s", pszBuffer); + pszBuffer += nPrinted; + /* Don't update the uBufBytesAvailable as we have finished this + * message decode. pszBuffer - szBuffer is the total amount of + * data we have decoded. + */ + } + } + } + + /* Update total bytes processed */ + pSentinel->uiTotal += (pszBuffer - szBuffer); + } + return 0; +} + +/* + * HTBDumpBuffer: Dump the Host Trace Buffer using the TLClient API + * + * This routine just parses *one* message from the buffer. + * The stream will be opened by the Start() routine, closed by the Stop() and + * updated for data consumed by this routine once we have DebugPrintf'd it. + * We use the new TLReleaseDataLess() routine which enables us to update the + * HTB contents with just the amount of data we have successfully processed. + * If we need to leave the data available we can call this with a 0 count. + * This will happen in the case of a buffer overflow so that we can reprocess + * any data which wasn't handled before. + * + * In case of overflow or an error we return -1 otherwise 0 + * + * Input: + * pfnPrintf output routine to display data + * psEntry handle to debug frontend + * pvData data address to start dumping from + * (set by Start() / Next()) + */ +static int HTBDumpBuffer(DI_PRINTF pfnPrintf, OSDI_IMPL_ENTRY *psEntry, + void *pvData) +{ + HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry); + + PVR_ASSERT(pvData != NULL); + + if (pvData == DI_START_TOKEN) + { + if (pSentinel->pCurr == NULL) + { + HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: DI_START_TOKEN, " + "Empty buffer", __func__)); + return 0; + } + PVR_ASSERT(pSentinel->pCurr != NULL); + + /* Display a Header as we have data to process */ + pfnPrintf(psEntry, "%-20s:%-5s-%-5s-%s %s\n", "Timestamp", "PID", "TID", "Group>", + "Log Entry"); + } + else + { + if (pvData != NULL) + { + PVR_ASSERT(pSentinel->pCurr == pvData); + } + } + + return DecodeHTB(pSentinel, psEntry, pfnPrintf); +} + + +/****************************************************************************** + * External Entry Point routines ... + *****************************************************************************/ +/*************************************************************************/ /*! + @Function HTB_CreateDIEntry + + @Description Create the debugFS entry-point for the host-trace-buffer + + @Returns eError internal error code, PVRSRV_OK on success + + */ /*************************************************************************/ +PVRSRV_ERROR HTB_CreateDIEntry(void) +{ + PVRSRV_ERROR eError; + + DI_ITERATOR_CB sIterator = { + .pfnStart = _DebugHBTraceDIStart, + .pfnStop = _DebugHBTraceDIStop, + .pfnNext = _DebugHBTraceDINext, + .pfnShow = _DebugHBTraceDIShow, + }; + + eError = DICreateEntry("host_trace", NULL, &sIterator, + &g_sHTBData.sSentinel, + DI_ENTRY_TYPE_GENERIC, + &g_sHTBData.psDumpHostDiEntry); + PVR_LOG_RETURN_IF_ERROR(eError, "DICreateEntry"); + + return PVRSRV_OK; +} + + +/*************************************************************************/ /*! + @Function HTB_DestroyDIEntry + + @Description Destroy the debugFS entry-point created by earlier + HTB_CreateDIEntry() call. +*/ /**************************************************************************/ +void HTB_DestroyDIEntry(void) +{ + if (g_sHTBData.psDumpHostDiEntry != NULL) + { + DIDestroyEntry(g_sHTBData.psDumpHostDiEntry); + g_sHTBData.psDumpHostDiEntry = NULL; + } +} + +/* EOF */ diff --git a/drivers/gpu/drm/phytium/octopus/htb_debug.h b/drivers/gpu/drm/phytium/octopus/htb_debug.h new file mode 100644 index 000000000000..f38e1291a307 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/htb_debug.h @@ -0,0 +1,64 @@ +/*************************************************************************/ /*! +@File htb_debug.h +@Title Linux debugFS routine setup header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef HTB_DEBUG_H +#define HTB_DEBUG_H + +/**************************************************************************/ /*! + @Function HTB_CreateDIEntry + + @Description Create the debugFS entry-point for the host-trace-buffer + + @Returns eError internal error code, PVRSRV_OK on success + + */ /**************************************************************************/ +PVRSRV_ERROR HTB_CreateDIEntry(void); + +/**************************************************************************/ /*! + @Function HTB_DestroyFSEntry + + @Description Destroy the debugFS entry-point created by earlier + HTB_CreateDIEntry() call. +*/ /**************************************************************************/ +void HTB_DestroyDIEntry(void); + +#endif /* HTB_DEBUG_H */ diff --git a/drivers/gpu/drm/phytium/octopus/htbserver.c b/drivers/gpu/drm/phytium/octopus/htbserver.c new file mode 100644 index 000000000000..d91a83f1b7e7 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/htbserver.c @@ -0,0 +1,886 @@ +/*************************************************************************/ /*! +@File htbserver.c +@Title Host Trace Buffer server implementation. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Host Trace Buffer provides a mechanism to log Host events to a + buffer in a similar way to the Firmware Trace mechanism. + Host Trace Buffer logs data using a Transport Layer buffer. + The Transport Layer and pvrtld tool provides the mechanism to + retrieve the trace data. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "htbserver.h" +#include "htbuffer.h" +#include "htbuffer_types.h" +#include "tlstream.h" +#include "pvrsrv_tlcommon.h" +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" +#include "osfunc.h" +#include "allocmem.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "pvrsrv_apphint.h" +#include "oskm_apphint.h" + +/* size of circular buffer controlling the maximum number of concurrent PIDs logged */ +#define HTB_MAX_NUM_PID 8 + +/* number of times to try rewriting a log entry */ +#define HTB_LOG_RETRY_COUNT 5 + +/*************************************************************************/ /*! + Host Trace Buffer control information structure +*/ /**************************************************************************/ +typedef struct +{ + IMG_UINT32 ui32BufferSize; /*!< Requested buffer size in bytes + Once set this may not be changed */ + + HTB_OPMODE_CTRL eOpMode; /*!< Control what trace data is dropped if + the buffer is full. + Once set this may not be changed */ + +/* IMG_UINT32 ui32GroupEnable; */ /*!< Flags word controlling groups to be + logged */ + + IMG_UINT32 ui32LogLevel; /*!< Log level to control messages logged */ + + IMG_UINT32 aui32EnablePID[HTB_MAX_NUM_PID]; /*!< PIDs to enable logging for + a specific set of processes */ + + IMG_UINT32 ui32PIDCount; /*!< Current number of PIDs being logged */ + + IMG_UINT32 ui32PIDHead; /*!< Head of the PID circular buffer */ + + HTB_LOGMODE_CTRL eLogMode; /*!< Logging mode control */ + + IMG_BOOL bLogDropSignalled; /*!< Flag indicating if a log message has + been signalled as dropped */ + + /* synchronisation parameters */ + IMG_UINT64 ui64SyncOSTS; + IMG_UINT64 ui64SyncCRTS; + IMG_UINT32 ui32SyncCalcClkSpd; + IMG_UINT32 ui32SyncMarker; + + IMG_BOOL bInitDone; /* Set by HTBInit, reset by HTBDeInit */ + + POS_SPINLOCK hRepeatMarkerLock; /*!< Spinlock used in HTBLogKM to protect global variables + (ByteCount, OSTS, CRTS ClkSpeed) + from becoming inconsistent due to calls from + both KM and UM */ + + IMG_UINT32 ui32ByteCount; /* Byte count used for triggering repeat sync point */ + /* static variables containing details of previous sync point */ + IMG_UINT64 ui64OSTS; + IMG_UINT64 ui64CRTS; + IMG_UINT32 ui32ClkSpeed; + +} HTB_CTRL_INFO; + + +/*************************************************************************/ /*! +*/ /**************************************************************************/ +static const IMG_UINT32 MapFlags[] = +{ + 0, /* HTB_OPMODE_UNDEF = 0 */ + TL_OPMODE_DROP_NEWER, /* HTB_OPMODE_DROPLATEST */ + TL_OPMODE_DROP_OLDEST,/* HTB_OPMODE_DROPOLDEST */ + TL_OPMODE_BLOCK /* HTB_OPMODE_BLOCK */ +}; + +static_assert(0 == HTB_OPMODE_UNDEF, "Unexpected value for HTB_OPMODE_UNDEF"); +static_assert(1 == HTB_OPMODE_DROPLATEST, "Unexpected value for HTB_OPMODE_DROPLATEST"); +static_assert(2 == HTB_OPMODE_DROPOLDEST, "Unexpected value for HTB_OPMODE_DROPOLDEST"); +static_assert(3 == HTB_OPMODE_BLOCK, "Unexpected value for HTB_OPMODE_BLOCK"); + +static_assert(1 == TL_OPMODE_DROP_NEWER, "Unexpected value for TL_OPMODE_DROP_NEWER"); +static_assert(2 == TL_OPMODE_DROP_OLDEST, "Unexpected value for TL_OPMODE_DROP_OLDEST"); +static_assert(3 == TL_OPMODE_BLOCK, "Unexpected value for TL_OPMODE_BLOCK"); + +static const IMG_UINT32 g_ui32TLBaseFlags; //TL_FLAG_NO_SIGNAL_ON_COMMIT + +/* Minimum TL buffer size. + * Large enough for around 60 worst case messages or 200 average messages + */ +#define HTB_TL_BUFFER_SIZE_MIN (0x10000) + +/* Minimum concentration of HTB packets in a TL Stream is 60% + * If we just put the HTB header in the TL stream (12 bytes), the TL overhead + * is 8 bytes for its own header, so for the smallest possible (and most + * inefficient) packet we have 3/5 of the buffer used for actual HTB data. + * This shift is used as a guaranteed estimation on when to produce a repeat + * packet. By shifting the size of the buffer by 1 we effectively /2 this + * under the 60% boundary chance we may have overwritten the marker and thus + * guaranteed to always have a marker in the stream */ +#define HTB_MARKER_PREDICTION_THRESHOLD(val) (val >> 1) + +static HTB_CTRL_INFO g_sCtrl; +static IMG_BOOL g_bConfigured = IMG_FALSE; +static IMG_HANDLE g_hTLStream; + + +/************************************************************************/ /*! + @Function _LookupFlags + @Description Convert HTBuffer Operation mode to TLStream flags + + @Input eModeHTBuffer Operation Mode + + @Return IMG_UINT32 TLStream FLags +*/ /**************************************************************************/ +static IMG_UINT32 +_LookupFlags( HTB_OPMODE_CTRL eMode ) +{ + return (eMode < ARRAY_SIZE(MapFlags)) ? MapFlags[eMode] : 0; +} + + +/************************************************************************/ /*! + @Function _HTBLogDebugInfo + @Description Debug dump handler used to dump the state of the HTB module. + Called for each verbosity level during a debug dump. Function + only prints state when called for High verbosity. + + @Input hDebugRequestHandle See PFN_DBGREQ_NOTIFY + + @Input ui32VerbLevel See PFN_DBGREQ_NOTIFY + + @Input pfnDumpDebugPrintf See PFN_DBGREQ_NOTIFY + + @Input pvDumpDebugFile See PFN_DBGREQ_NOTIFY + +*/ /**************************************************************************/ +static void _HTBLogDebugInfo( + PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile +) +{ + PVR_UNREFERENCED_PARAMETER(hDebugRequestHandle); + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) + { + + if (g_bConfigured) + { + IMG_INT i; + + PVR_DUMPDEBUG_LOG("------[ HTB Log state: On ]------"); + + PVR_DUMPDEBUG_LOG("HTB Log mode: %d", g_sCtrl.eLogMode); + PVR_DUMPDEBUG_LOG("HTB Log level: %d", g_sCtrl.ui32LogLevel); + PVR_DUMPDEBUG_LOG("HTB Buffer Opmode: %d", g_sCtrl.eOpMode); + + for (i=0; i < HTB_FLAG_NUM_EL; i++) + { + PVR_DUMPDEBUG_LOG("HTB Log group %d: %x", i, g_auiHTBGroupEnable[i]); + } + } + else + { + PVR_DUMPDEBUG_LOG("------[ HTB Log state: Off ]------"); + } + } +} + +/************************************************************************/ /*! + @Function HTBDeviceCreate + @Description Initialisation actions for HTB at device creation. + + @Input psDeviceNode Reference to the device node in context + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBDeviceCreate( + PVRSRV_DEVICE_NODE *psDeviceNode +) +{ + PVRSRV_ERROR eError; + + eError = PVRSRVRegisterDbgRequestNotify(&psDeviceNode->hHtbDbgReqNotify, + psDeviceNode, &_HTBLogDebugInfo, DEBUG_REQUEST_HTB, NULL); + PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify"); + + return eError; +} + +/************************************************************************/ /*! + @Function HTBIDeviceDestroy + @Description De-initialisation actions for HTB at device destruction. + + @Input psDeviceNode Reference to the device node in context + +*/ /**************************************************************************/ +void +HTBDeviceDestroy( + PVRSRV_DEVICE_NODE *psDeviceNode +) +{ + if (psDeviceNode->hHtbDbgReqNotify) + { + /* No much we can do if it fails, driver unloading */ + (void)PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hHtbDbgReqNotify); + psDeviceNode->hHtbDbgReqNotify = NULL; + } +} + +static IMG_UINT32 g_ui32HTBufferSize = HTB_TL_BUFFER_SIZE_MIN; + +/* + * AppHint access routine forward definitions + */ +static PVRSRV_ERROR _HTBSetLogGroup(const PVRSRV_DEVICE_NODE *, const void *, + IMG_UINT32); +static PVRSRV_ERROR _HTBReadLogGroup(const PVRSRV_DEVICE_NODE *, const void *, + IMG_UINT32 *); + +static PVRSRV_ERROR _HTBSetOpMode(const PVRSRV_DEVICE_NODE *, const void *, + IMG_UINT32); +static PVRSRV_ERROR _HTBReadOpMode(const PVRSRV_DEVICE_NODE *, const void *, + IMG_UINT32 *); + +static void _OnTLReaderOpenCallback(void *); + +/************************************************************************/ /*! + @Function HTBInit + @Description Allocate and initialise the Host Trace Buffer + The buffer size may be changed by specifying + HTBufferSizeInKB=xxxx + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBInit(void) +{ + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault; + IMG_UINT32 ui32BufBytes; + PVRSRV_ERROR eError; + + if (g_sCtrl.bInitDone) + { + PVR_DPF((PVR_DBG_ERROR, "HTBInit: Driver already initialised")); + return PVRSRV_ERROR_ALREADY_EXISTS; + } + + /* + * Buffer Size can be configured by specifying a value in the AppHint + * This will only take effect at module load time so there is no query + * or setting mechanism available. + */ + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HTBufferSizeInKB, + NULL, + NULL, + APPHINT_OF_DRIVER_NO_DEVICE, + NULL); + + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableHTBLogGroup, + _HTBReadLogGroup, + _HTBSetLogGroup, + APPHINT_OF_DRIVER_NO_DEVICE, + NULL); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HTBOperationMode, + _HTBReadOpMode, + _HTBSetOpMode, + APPHINT_OF_DRIVER_NO_DEVICE, + NULL); + + /* + * Now get whatever values have been configured for our AppHints + */ + OSCreateKMAppHintState(&pvAppHintState); + ui32AppHintDefault = HTB_TL_BUFFER_SIZE_MIN / 1024; + OSGetKMAppHintUINT32(pvAppHintState, HTBufferSizeInKB, + &ui32AppHintDefault, &g_ui32HTBufferSize); + OSFreeKMAppHintState(pvAppHintState); + + ui32BufBytes = g_ui32HTBufferSize * 1024; + + /* initialise rest of state */ + g_sCtrl.ui32BufferSize = + (ui32BufBytes < HTB_TL_BUFFER_SIZE_MIN) + ? HTB_TL_BUFFER_SIZE_MIN + : ui32BufBytes; + g_sCtrl.eOpMode = HTB_OPMODE_DROPOLDEST; + g_sCtrl.ui32LogLevel = 0; + g_sCtrl.ui32PIDCount = 0; + g_sCtrl.ui32PIDHead = 0; + g_sCtrl.eLogMode = HTB_LOGMODE_ALLPID; + g_sCtrl.bLogDropSignalled = IMG_FALSE; + + eError = OSSpinLockCreate(&g_sCtrl.hRepeatMarkerLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSSpinLockCreate"); + + g_sCtrl.bInitDone = IMG_TRUE; + + /* Log the current driver parameter setting for the HTBufferSizeInKB. + * We do this here as there is no other infrastructure for obtaining + * the value. + */ + if (g_ui32HTBufferSize != ui32AppHintDefault) + { + PVR_LOG(("Increasing HTBufferSize to %uKB", g_ui32HTBufferSize)); + } + + return PVRSRV_OK; +} + +/************************************************************************/ /*! + @Function HTBDeInit + @Description Close the Host Trace Buffer and free all resources. Must + perform a no-op if already de-initialised. + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBDeInit( void ) +{ + if (!g_sCtrl.bInitDone) + return PVRSRV_OK; + + if (g_hTLStream) + { + TLStreamClose( g_hTLStream ); + g_hTLStream = NULL; + } + + if (g_sCtrl.hRepeatMarkerLock != NULL) + { + OSSpinLockDestroy(g_sCtrl.hRepeatMarkerLock); + g_sCtrl.hRepeatMarkerLock = NULL; + } + + g_sCtrl.bInitDone = IMG_FALSE; + return PVRSRV_OK; +} + + +/*************************************************************************/ /*! + AppHint interface functions +*/ /**************************************************************************/ +static +PVRSRV_ERROR _HTBSetLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 ui32Value) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psPrivate); + + return HTBControlKM(1, &ui32Value, 0, 0, + HTB_LOGMODE_UNDEF, HTB_OPMODE_UNDEF); +} + +static +PVRSRV_ERROR _HTBReadLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 *pui32Value) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psPrivate); + + *pui32Value = g_auiHTBGroupEnable[0]; + return PVRSRV_OK; +} + +static +PVRSRV_ERROR _HTBSetOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 ui32Value) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psPrivate); + + return HTBControlKM(0, NULL, 0, 0, HTB_LOGMODE_UNDEF, ui32Value); +} + +static +PVRSRV_ERROR _HTBReadOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 *pui32Value) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psPrivate); + + *pui32Value = (IMG_UINT32)g_sCtrl.eOpMode; + return PVRSRV_OK; +} + + +static void +_OnTLReaderOpenCallback( void *pvArg ) +{ + if ( g_hTLStream ) + { + IMG_UINT64 ui64Time; + OSClockMonotonicns64(&ui64Time); + (void) HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, + g_sCtrl.ui32SyncMarker, + ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), + ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)), + ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), + ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)), + g_sCtrl.ui32SyncCalcClkSpd); + } + + PVR_UNREFERENCED_PARAMETER(pvArg); +} + + +/*************************************************************************/ /*! + @Function HTBControlKM + @Description Update the configuration of the Host Trace Buffer + + @Input ui32NumFlagGroups Number of group enable flags words + + @Input aui32GroupEnable Flags words controlling groups to be logged + + @Input ui32LogLevel Log level to record + + @Input ui32EnablePID PID to enable logging for a specific process + + @Input eLogMode Enable logging for all or specific processes, + + @Input eOpMode Control the behaviour of the data buffer + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBControlKM( + const IMG_UINT32 ui32NumFlagGroups, + const IMG_UINT32 * aui32GroupEnable, + const IMG_UINT32 ui32LogLevel, + const IMG_UINT32 ui32EnablePID, + const HTB_LOGMODE_CTRL eLogMode, + const HTB_OPMODE_CTRL eOpMode +) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT; + IMG_UINT32 i; + IMG_UINT64 ui64Time; + OSClockMonotonicns64(&ui64Time); + + if ( !g_bConfigured && ui32NumFlagGroups ) + { + eError = TLStreamCreate( + &g_hTLStream, + HTB_STREAM_NAME, + g_sCtrl.ui32BufferSize, + _LookupFlags(HTB_OPMODE_DROPOLDEST) | g_ui32TLBaseFlags, + _OnTLReaderOpenCallback, NULL, NULL, NULL); + PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamCreate"); + g_bConfigured = IMG_TRUE; + } + + if (HTB_OPMODE_UNDEF != eOpMode && g_sCtrl.eOpMode != eOpMode) + { + g_sCtrl.eOpMode = eOpMode; + eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags)); + while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- ) + { + OSReleaseThreadQuanta(); + eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags)); + } + PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamReconfigure"); + } + + if ( ui32EnablePID ) + { + g_sCtrl.aui32EnablePID[g_sCtrl.ui32PIDHead] = ui32EnablePID; + g_sCtrl.ui32PIDHead++; + g_sCtrl.ui32PIDHead %= HTB_MAX_NUM_PID; + g_sCtrl.ui32PIDCount++; + if ( g_sCtrl.ui32PIDCount > HTB_MAX_NUM_PID ) + { + g_sCtrl.ui32PIDCount = HTB_MAX_NUM_PID; + } + } + + /* HTB_LOGMODE_ALLPID overrides ui32EnablePID */ + if ( HTB_LOGMODE_ALLPID == eLogMode ) + { + OSCachedMemSet(g_sCtrl.aui32EnablePID, 0, sizeof(g_sCtrl.aui32EnablePID)); + g_sCtrl.ui32PIDCount = 0; + g_sCtrl.ui32PIDHead = 0; + } + if ( HTB_LOGMODE_UNDEF != eLogMode ) + { + g_sCtrl.eLogMode = eLogMode; + } + + if ( ui32NumFlagGroups ) + { + for (i = 0; i < HTB_FLAG_NUM_EL && i < ui32NumFlagGroups; i++) + { + g_auiHTBGroupEnable[i] = aui32GroupEnable[i]; + } + for (; i < HTB_FLAG_NUM_EL; i++) + { + g_auiHTBGroupEnable[i] = 0; + } + } + + if ( ui32LogLevel ) + { + g_sCtrl.ui32LogLevel = ui32LogLevel; + } + + /* Dump the current configuration state */ + eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_OPMODE, g_sCtrl.eOpMode); + PVR_LOG_IF_ERROR(eError, "HTBLog"); + eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_ENABLE_GROUP, g_auiHTBGroupEnable[0]); + PVR_LOG_IF_ERROR(eError, "HTBLog"); + eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_LOG_LEVEL, g_sCtrl.ui32LogLevel); + PVR_LOG_IF_ERROR(eError, "HTBLog"); + eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_LOGMODE, g_sCtrl.eLogMode); + PVR_LOG_IF_ERROR(eError, "HTBLog"); + for (i = 0; i < g_sCtrl.ui32PIDCount; i++) + { + eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_ENABLE_PID, g_sCtrl.aui32EnablePID[i]); + PVR_LOG_IF_ERROR(eError, "HTBLog"); + } + /* Else should never be hit as we set the spd when the power state is updated */ + if (0 != g_sCtrl.ui32SyncMarker && 0 != g_sCtrl.ui32SyncCalcClkSpd) + { + eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, + g_sCtrl.ui32SyncMarker, + ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)), + ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)), + g_sCtrl.ui32SyncCalcClkSpd); + PVR_LOG_IF_ERROR(eError, "HTBLog"); + } + + return eError; +} + +/*************************************************************************/ /*! +*/ /**************************************************************************/ +static IMG_BOOL +_ValidPID( IMG_UINT32 PID ) +{ + IMG_UINT32 i; + + for (i = 0; i < g_sCtrl.ui32PIDCount; i++) + { + if ( g_sCtrl.aui32EnablePID[i] == PID ) + { + return IMG_TRUE; + } + } + return IMG_FALSE; +} + + +/*************************************************************************/ /*! + @Function HTBSyncPartitionMarker + @Description Write an HTB sync partition marker to the HTB log + + @Input ui33Marker Marker value + +*/ /**************************************************************************/ +void +HTBSyncPartitionMarker( + const IMG_UINT32 ui32Marker +) +{ + g_sCtrl.ui32SyncMarker = ui32Marker; + if ( g_hTLStream ) + { + PVRSRV_ERROR eError; + IMG_UINT64 ui64Time; + OSClockMonotonicns64(&ui64Time); + + /* Else should never be hit as we set the spd when the power state is updated */ + if (0 != g_sCtrl.ui32SyncCalcClkSpd) + { + eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, + ui32Marker, + ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)), + ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)), + g_sCtrl.ui32SyncCalcClkSpd); + PVR_WARN_IF_ERROR(eError, "HTBLog"); + } + } +} + +/*************************************************************************/ /*! + @Function HTBSyncPartitionMarkerRepeat + @Description Write a HTB sync partition marker to the HTB log, given + the previous values to repeat. + + @Input ui33Marker Marker value + @Input ui64SyncOSTS previous OSTS + @Input ui64SyncCRTS previous CRTS + @Input ui32ClkSpeed previous Clock speed + +*/ /**************************************************************************/ +void +HTBSyncPartitionMarkerRepeat( + const IMG_UINT32 ui32Marker, + const IMG_UINT64 ui64SyncOSTS, + const IMG_UINT64 ui64SyncCRTS, + const IMG_UINT32 ui32ClkSpeed +) +{ + if ( g_hTLStream ) + { + PVRSRV_ERROR eError; + IMG_UINT64 ui64Time; + OSClockMonotonicns64(&ui64Time); + + /* Else should never be hit as we set the spd when the power state is updated */ + if (0 != ui32ClkSpeed) + { + eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, + ui32Marker, + ((IMG_UINT32)((ui64SyncOSTS>>32)&0xffffffffU)), ((IMG_UINT32)(ui64SyncOSTS&0xffffffffU)), + ((IMG_UINT32)((ui64SyncCRTS>>32)&0xffffffffU)), ((IMG_UINT32)(ui64SyncCRTS&0xffffffffU)), + ui32ClkSpeed); + PVR_WARN_IF_ERROR(eError, "HTBLog"); + } + } +} + +/*************************************************************************/ /*! + @Function HTBSyncScale + @Description Write FW-Host synchronisation data to the HTB log when clocks + change or are re-calibrated + + @Input bLogValues IMG_TRUE if value should be immediately written + out to the log + + @Input ui32OSTS OS Timestamp + + @Input ui32CRTS Rogue timestamp + + @Input ui32CalcClkSpd Calculated clock speed + +*/ /**************************************************************************/ +void +HTBSyncScale( + const IMG_BOOL bLogValues, + const IMG_UINT64 ui64OSTS, + const IMG_UINT64 ui64CRTS, + const IMG_UINT32 ui32CalcClkSpd +) +{ + g_sCtrl.ui64SyncOSTS = ui64OSTS; + g_sCtrl.ui64SyncCRTS = ui64CRTS; + g_sCtrl.ui32SyncCalcClkSpd = ui32CalcClkSpd; + if (g_hTLStream && bLogValues) + { + PVRSRV_ERROR eError; + IMG_UINT64 ui64Time; + OSClockMonotonicns64(&ui64Time); + eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, + g_sCtrl.ui32SyncMarker, + ((IMG_UINT32)((ui64OSTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64OSTS&0xffffffff)), + ((IMG_UINT32)((ui64CRTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64CRTS&0xffffffff)), + ui32CalcClkSpd); + /* + * Don't spam the log with non-failure cases + */ + PVR_WARN_IF_ERROR(eError, "HTBLog"); + } +} + + +/*************************************************************************/ /*! + @Function HTBLogKM + @Description Record a Host Trace Buffer log event + + @Input PID The PID of the process the event is associated + with. This is provided as an argument rather + than querying internally so that events associated + with a particular process, but performed by + another can be logged correctly. + + @Input ui64TimeStamp The timestamp to be associated with this log event + + @Input SF The log event ID + + @Input ... Log parameters + + @Return PVRSRV_OK Success. + +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBLogKM( + IMG_UINT32 PID, + IMG_UINT32 TID, + IMG_UINT64 ui64TimeStamp, + HTB_LOG_SFids SF, + IMG_UINT32 ui32NumArgs, + IMG_UINT32 * aui32Args +) +{ + OS_SPINLOCK_FLAGS uiSpinLockFlags; + IMG_UINT32 ui32ReturnFlags = 0; + + /* Local snapshot variables of global counters */ + IMG_UINT64 ui64OSTSSnap; + IMG_UINT64 ui64CRTSSnap; + IMG_UINT32 ui32ClkSpeedSnap; + + /* format of messages is: SF:PID:TID:TIMEPT1:TIMEPT2:[PARn]* + * Buffer is on the stack so we don't need a semaphore to guard it + */ + IMG_UINT32 aui32MessageBuffer[HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS]; + + /* Min HTB size is HTB_TL_BUFFER_SIZE_MIN : 10000 bytes and Max message/ + * packet size is 4*(HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS) = 80 bytes, + * hence with these constraints this design is unlikely to get + * PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED error + */ + PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_ENABLED; + IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT; + IMG_UINT32 * pui32Message = aui32MessageBuffer; + IMG_UINT32 ui32MessageSize = 4 * (HTB_LOG_HEADER_SIZE+ui32NumArgs); + + PVR_LOG_GOTO_IF_INVALID_PARAM(aui32Args != NULL, eError, ReturnError); + PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs == HTB_SF_PARAMNUM(SF), eError, ReturnError); + PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs <= HTB_LOG_MAX_PARAMS, eError, ReturnError); + + if ( g_hTLStream + && ( 0 == PID || ~0 == PID || HTB_LOGMODE_ALLPID == g_sCtrl.eLogMode || _ValidPID(PID) ) +/* && ( g_sCtrl.ui32GroupEnable & (0x1 << HTB_SF_GID(SF)) ) */ +/* && ( g_sCtrl.ui32LogLevel >= HTB_SF_LVL(SF) ) */ + ) + { + *pui32Message++ = SF; + *pui32Message++ = PID; + *pui32Message++ = TID; + *pui32Message++ = ((IMG_UINT32)((ui64TimeStamp>>32)&0xffffffff)); + *pui32Message++ = ((IMG_UINT32)(ui64TimeStamp&0xffffffff)); + while ( ui32NumArgs ) + { + ui32NumArgs--; + pui32Message[ui32NumArgs] = aui32Args[ui32NumArgs]; + } + + eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags ); + while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- ) + { + OSReleaseThreadQuanta(); + eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags ); + } + + if ( PVRSRV_OK == eError ) + { + g_sCtrl.bLogDropSignalled = IMG_FALSE; + } + else if ( PVRSRV_ERROR_STREAM_FULL != eError || !g_sCtrl.bLogDropSignalled ) + { + PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "TLStreamWrite", PVRSRVGETERRORSTRING(eError), __func__)); + } + if ( PVRSRV_ERROR_STREAM_FULL == eError ) + { + g_sCtrl.bLogDropSignalled = IMG_TRUE; + } + + } + + if (SF == HTB_SF_CTRL_FWSYNC_MARK_SCALE) + { + OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); + + /* If a marker is being placed reset byte count from last marker */ + g_sCtrl.ui32ByteCount = 0; + g_sCtrl.ui64OSTS = (IMG_UINT64)aui32Args[HTB_ARG_OSTS_PT1] << 32 | aui32Args[HTB_ARG_OSTS_PT2]; + g_sCtrl.ui64CRTS = (IMG_UINT64)aui32Args[HTB_ARG_CRTS_PT1] << 32 | aui32Args[HTB_ARG_CRTS_PT2]; + g_sCtrl.ui32ClkSpeed = aui32Args[HTB_ARG_CLKSPD]; + + OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); + } + else + { + OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); + /* Increase global count */ + g_sCtrl.ui32ByteCount += ui32MessageSize; + + /* Check if packet has overwritten last marker/rpt && + If the packet count is over half the size of the buffer */ + if (ui32ReturnFlags & TL_FLAG_OVERWRITE_DETECTED && + g_sCtrl.ui32ByteCount > HTB_MARKER_PREDICTION_THRESHOLD(g_sCtrl.ui32BufferSize)) + { + /* Take snapshot of global variables */ + ui64OSTSSnap = g_sCtrl.ui64OSTS; + ui64CRTSSnap = g_sCtrl.ui64CRTS; + ui32ClkSpeedSnap = g_sCtrl.ui32ClkSpeed; + /* Reset global variable counter */ + g_sCtrl.ui32ByteCount = 0; + OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); + + /* Produce a repeat marker */ + HTBSyncPartitionMarkerRepeat(g_sCtrl.ui32SyncMarker, ui64OSTSSnap, ui64CRTSSnap, ui32ClkSpeedSnap); + } + else + { + OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); + } + } + +ReturnError: + return eError; +} + +/*************************************************************************/ /*! + @Function HTBIsConfigured + @Description Determine if HTB stream has been configured + + @Input none + + @Return IMG_FALSE Stream has not been configured + IMG_TRUE Stream has been configured + +*/ /**************************************************************************/ +IMG_BOOL +HTBIsConfigured(void) +{ + return g_bConfigured; +} +/* EOF */ diff --git a/drivers/gpu/drm/phytium/octopus/htbserver.h b/drivers/gpu/drm/phytium/octopus/htbserver.h new file mode 100644 index 000000000000..780635f95274 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/htbserver.h @@ -0,0 +1,251 @@ +/*************************************************************************/ /*! +@File htbserver.h +@Title Host Trace Buffer server implementation. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved + +@Description Host Trace Buffer provides a mechanism to log Host events to a + buffer in a similar way to the Firmware Trace mechanism. + Host Trace Buffer logs data using a Transport Layer buffer. + The Transport Layer and pvrtld tool provides the mechanism to + retrieve the trace data. + + A Host Trace can be merged with a corresponding Firmware Trace. + This is achieved by inserting synchronisation data into both + traces and post processing to merge them. + + The FW Trace will contain a "Sync Partition Marker". This is + updated every time the RGX is brought out of reset (RGX clock + timestamps reset at this point) and is repeated when the FW + Trace buffer wraps to ensure there is always at least 1 + partition marker in the Firmware Trace buffer whenever it is + read. + + The Host Trace will contain corresponding "Sync Partition + Markers" - #HTBSyncPartitionMarker(). Each partition is then + subdivided into "Sync Scale" sections - #HTBSyncScale(). The + "Sync Scale" data allows the timestamps from the two traces to + be correlated. The "Sync Scale" data is updated as part of the + standard RGX time correlation code (rgxtimecorr.c) and is + updated periodically including on power and clock changes. + +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef HTBSERVER_H +#define HTBSERVER_H + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv.h" +#include "htbuffer.h" + + +/************************************************************************/ /*! + @Function HTBIDeviceCreate + @Description Initialisation actions for HTB at device creation. + + @Input psDeviceNode Reference to the device node in context + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBDeviceCreate(PVRSRV_DEVICE_NODE *psDeviceNode); + +/************************************************************************/ /*! + @Function HTBIDeviceDestroy + @Description De-initialisation actions for HTB at device destruction. + + @Input psDeviceNode Reference to the device node in context + +*/ /**************************************************************************/ +void +HTBDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode); + +/************************************************************************/ /*! + @Function HTBInit + @Description Initialise the Host Trace Buffer and allocate all resources + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBInit(void); + +/************************************************************************/ /*! + @Function HTBDeInit + @Description Close the Host Trace Buffer and free all resources + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBDeInit(void); + +/*************************************************************************/ /*! + @Function HTBConfigureKM + @Description Configure or update the configuration of the Host Trace Buffer + + @Input ui32NameSize Size of the pszName string + + @Input pszName Name to use for the underlying data buffer + + @Input ui32BufferSize Size of the underlying data buffer + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBConfigureKM(IMG_UINT32 ui32NameSize, const IMG_CHAR * pszName, + const IMG_UINT32 ui32BufferSize); + + +/*************************************************************************/ /*! + @Function HTBControlKM + @Description Update the configuration of the Host Trace Buffer + + @Input ui32NumFlagGroups Number of group enable flags words + + @Input aui32GroupEnable Flags words controlling groups to be logged + + @Input ui32LogLevel Log level to record + + @Input ui32EnablePID PID to enable logging for a specific process + + @Input eLogMode Enable logging for all or specific processes, + + @Input eOpMode Control the behaviour of the data buffer + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBControlKM(const IMG_UINT32 ui32NumFlagGroups, + const IMG_UINT32 *aui32GroupEnable, + const IMG_UINT32 ui32LogLevel, + const IMG_UINT32 ui32EnablePID, + const HTB_LOGMODE_CTRL eLogMode, + const HTB_OPMODE_CTRL eOpMode); + + +/*************************************************************************/ /*! + @Function HTBSyncPartitionMarker + @Description Write an HTB sync partition marker to the HTB log + + @Input ui32Marker Marker value + +*/ /**************************************************************************/ +void +HTBSyncPartitionMarker(const IMG_UINT32 ui32Marker); + +/*************************************************************************/ /*! + @Function HTBSyncPartitionMarkerRpt + @Description Write a HTB sync partition marker to the HTB log, given + the previous values to repeat. + + @Input ui32Marker Marker value + @Input ui64SyncOSTS previous OSTS + @Input ui64SyncCRTS previous CRTS + @Input ui32ClkSpeed previous Clockspeed + +*/ /**************************************************************************/ +void +HTBSyncPartitionMarkerRepeat(const IMG_UINT32 ui32Marker, + const IMG_UINT64 ui64SyncOSTS, + const IMG_UINT64 ui64SyncCRTS, + const IMG_UINT32 ui32ClkSpeed); + +/*************************************************************************/ /*! + @Function HTBSyncScale + @Description Write FW-Host synchronisation data to the HTB log when clocks + change or are re-calibrated + + @Input bLogValues IMG_TRUE if value should be immediately written + out to the log + + @Input ui64OSTS OS Timestamp + + @Input ui64CRTS Rogue timestamp + + @Input ui32CalcClkSpd Calculated clock speed + +*/ /**************************************************************************/ +void +HTBSyncScale(const IMG_BOOL bLogValues, const IMG_UINT64 ui64OSTS, + const IMG_UINT64 ui64CRTS, const IMG_UINT32 ui32CalcClkSpd); + +/*************************************************************************/ /*! + @Function HTBLogKM + @Description Record a Host Trace Buffer log event + + @Input PID The PID of the process the event is associated + with. This is provided as an argument rather + than querying internally so that events associated + with a particular process, but performed by + another can be logged correctly. + + @Input TID The TID of the process the event is associated with. + + @Input ui64TimeStamp The timestamp to be associated with this log event + + @Input SF The log event ID + + @Input ... Log parameters + + @Return PVRSRV_OK Success. + +*/ /**************************************************************************/ +PVRSRV_ERROR +HTBLogKM(IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStamp, HTB_LOG_SFids SF, + IMG_UINT32 ui32NumArgs, IMG_UINT32 *aui32Args); + +/*************************************************************************/ /*! + @Function HTBIsConfigured + @Description Determine if HTB stream has been configured + + @Input none + + @Return IMG_FALSE Stream has not been configured + IMG_TRUE Stream has been configured + +*/ /**************************************************************************/ +IMG_BOOL +HTBIsConfigured(void); +#endif /* HTBSERVER_H */ + +/* EOF */ diff --git a/drivers/gpu/drm/phytium/octopus/htbuffer.c b/drivers/gpu/drm/phytium/octopus/htbuffer.c new file mode 100644 index 000000000000..245cf11fbaab --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/htbuffer.c @@ -0,0 +1,186 @@ +/*************************************************************************/ /*! +@File htbuffer.c +@Title Host Trace Buffer shared API. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Host Trace Buffer provides a mechanism to log Host events to a + buffer in a similar way to the Firmware Trace mechanism. + Host Trace Buffer logs data using a Transport Layer buffer. + The Transport Layer and pvrtld tool provides the mechanism to + retrieve the trace data. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include "htbuffer.h" +#include "osfunc.h" +#include "client_htbuffer_bridge.h" + +/* The group flags array of ints large enough to store all the group flags + * NB: This will only work while all logging is in the kernel + */ +IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL] = {0}; + + +/*************************************************************************/ /*! + @Function HTBControl + @Description Update the configuration of the Host Trace Buffer + @Input hSrvHandle Server Handle + @Input ui32NumFlagGroups Number of group enable flags words + @Input aui32GroupEnable Flags words controlling groups to be logged + @Input ui32LogLevel Log level to record + @Input ui32EnablePID PID to enable logging for a specific process + @Input eLogPidMode Enable logging for all or specific processes, + @Input eOpMode Control what trace data is dropped if the TL + buffer is full + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +HTBControl( + IMG_HANDLE hSrvHandle, + IMG_UINT32 ui32NumFlagGroups, + IMG_UINT32 * aui32GroupEnable, + IMG_UINT32 ui32LogLevel, + IMG_UINT32 ui32EnablePID, + HTB_LOGMODE_CTRL eLogPidMode, + HTB_OPMODE_CTRL eOpMode +) +{ + return BridgeHTBControl( + hSrvHandle, + ui32NumFlagGroups, + aui32GroupEnable, + ui32LogLevel, + ui32EnablePID, + eLogPidMode, + eOpMode + ); +} + + +/*************************************************************************/ /*! +*/ /**************************************************************************/ +static PVRSRV_ERROR +_HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStampus, + HTB_LOG_SFids SF, va_list args) +{ +#if defined(__KERNEL__) + IMG_UINT32 i; + IMG_UINT32 ui32NumArgs = HTB_SF_PARAMNUM(SF); +#if defined(__KLOCWORK__) + IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS + 1]; // Prevent KW False-positive +#else + IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS]; +#endif + + PVR_ASSERT(ui32NumArgs <= HTB_LOG_MAX_PARAMS); + ui32NumArgs = (ui32NumArgs>HTB_LOG_MAX_PARAMS) ? + HTB_LOG_MAX_PARAMS : ui32NumArgs; + + /* unpack var args before sending over bridge */ + for (i=0; i>32)&0xffffffff)) +#define HTBLOG_PTR_BITS_LOW(p) ((IMG_UINT32)(((IMG_UINT64)((uintptr_t)p))&0xffffffff)) + +/* macros to cast 64-bit integers into 32-bit integer components for Host Trace */ +#define HTBLOG_U64_BITS_HIGH(u) ((IMG_UINT32)((u>>32)&0xffffffff)) +#define HTBLOG_U64_BITS_LOW(u) ((IMG_UINT32)(u&0xffffffff)) + +/*************************************************************************/ /*! + @Function HTBLog + @Description Record a Host Trace Buffer log event + + @Input PID The PID of the process the event is associated + with. This is provided as an argument rather + than querying internally so that events associated + with a particular process, but performed by + another can be logged correctly. + + @Input TID The TID (Thread ID) of the thread the event is + associated with. + + @Input TimeStampus The timestamp in us for this event + + @Input SF The log event ID + + @Input ... Log parameters + + @Return PVRSRV_OK Success. + +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStampns, IMG_UINT32 SF, ...); + + +/*************************************************************************/ /*! + @Function HTBLogSimple + @Description Record a Host Trace Buffer log event with implicit PID and Timestamp + + @Input SF The log event ID + + @Input ... Log parameters + + @Return PVRSRV_OK Success. + +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +HTBLogSimple(IMG_HANDLE hSrvHandle, IMG_UINT32 SF, ...); + + + +/* DEBUG log group enable */ +#if !defined(HTB_DEBUG_LOG_GROUP) +#undef HTB_LOG_TYPE_DBG /* No trace statements in this log group should be checked in */ +#define HTB_LOG_TYPE_DBG __BUILDERROR__ +#endif + + +#if defined(__cplusplus) +} +#endif + +#endif /* HTBUFFER_H */ +/***************************************************************************** + End of file (htbuffer.h) +*****************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/htbuffer_init.h b/drivers/gpu/drm/phytium/octopus/htbuffer_init.h new file mode 100644 index 000000000000..dde23d90eac9 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/htbuffer_init.h @@ -0,0 +1,114 @@ +/*************************************************************************/ /*! +@File htbuffer_init.h +@Title Host Trace Buffer functions needed for Services initialisation +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef HTBUFFER_INIT_H +#define HTBUFFER_INIT_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "img_types.h" +#include "img_defs.h" + +/*************************************************************************/ /*! + @Function HTBConfigure + @Description Configure the Host Trace Buffer. + Once these parameters are set they may not be changed + + @Input hSrvHandle Server Handle + + @Input pszBufferName Name to use for the TL buffer, this will be + required to request trace data from the TL + + @Input ui32BufferSize Requested TL buffer size in bytes + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +HTBConfigure( + IMG_HANDLE hSrvHandle, + IMG_CHAR * pszBufferName, + IMG_UINT32 ui32BufferSize +); + +/*************************************************************************/ /*! + @Function HTBControl + @Description Update the configuration of the Host Trace Buffer + + @Input hSrvHandle Server Handle + + @Input ui32NumFlagGroups Number of group enable flags words + + @Input aui32GroupEnable Flags words controlling groups to be logged + + @Input ui32LogLevel Log level to record + + @Input ui32EnablePID PID to enable logging for a specific process + + @Input eLogMode Enable logging for all or specific processes, + + @Input eOpMode Control what trace data is dropped if the TL + buffer is full + + @Return eError Internal services call returned eError error + number +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +HTBControl( + IMG_HANDLE hSrvHandle, + IMG_UINT32 ui32NumFlagGroups, + IMG_UINT32 * aui32GroupEnable, + IMG_UINT32 ui32LogLevel, + IMG_UINT32 ui32EnablePID, + HTB_LOGMODE_CTRL eLogMode, + HTB_OPMODE_CTRL eOpMode +); + +#if defined(__cplusplus) +} +#endif + +#endif /* HTBUFFER_INIT_H */ +/***************************************************************************** + End of file (htbuffer_init.h) +*****************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/htbuffer_sf.h b/drivers/gpu/drm/phytium/octopus/htbuffer_sf.h new file mode 100644 index 000000000000..daa9e74db27f --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/htbuffer_sf.h @@ -0,0 +1,241 @@ +/*************************************************************************/ /*! +@File htbuffer_sf.h +@Title Host Trace Buffer interface string format specifiers +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the Host Trace Buffer logging messages. The following + list are the messages the host driver prints. Changing anything + but the first column or spelling mistakes in the strings will + break compatibility with log files created with older/newer + driver versions. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef HTBUFFER_SF_H +#define HTBUFFER_SF_H + +#if defined(__cplusplus) +extern "C" { +#endif + + +/****************************************************************************** + * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you + * WILL BREAK host tracing message compatibility with previous + * driver versions. Only add new ones, if so required. + *****************************************************************************/ + + +/* String used in pvrdebug -h output */ +#define HTB_LOG_GROUPS_STRING_LIST "ctrl,mmu,sync,main,brg" + +/* Used in print statements to display log group state, one %s per group defined */ +#define HTB_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s" + +/* Available log groups - Master template + * + * Group usage is as follows: + * CTRL - Internal Host Trace information and synchronisation data + * MMU - MMU page mapping information + * SYNC - Synchronisation debug + * MAIN - Data master kicks, etc. tying in with the MAIN group in FWTrace + * DBG - Temporary debugging group, logs not to be left in the driver + * + */ +#define HTB_LOG_SFGROUPLIST \ + X( HTB_GROUP_NONE, NONE ) \ +/* gid, group flag / apphint name */ \ + X( HTB_GROUP_CTRL, CTRL ) \ + X( HTB_GROUP_MMU, MMU ) \ + X( HTB_GROUP_SYNC, SYNC ) \ + X( HTB_GROUP_MAIN, MAIN ) \ + X( HTB_GROUP_BRG, BRG ) \ +/* Debug group HTB_GROUP_DBG must always be last */ \ + X( HTB_GROUP_DBG, DBG ) + + +/* Table of String Format specifiers, the group they belong and the number of + * arguments each expects. Xmacro styled macros are used to generate what is + * needed without requiring hand editing. + * + * id : unique id within a group + * gid : group id as defined above + * sym name : symbolic name of enumerations used to identify message strings + * string : Actual string + * #args : number of arguments the string format requires + */ +#define HTB_LOG_SFIDLIST \ +/*id, gid, sym name, string, # arguments */ \ +X( 0, HTB_GROUP_NONE, HTB_SF_FIRST, "You should not use this string", 0) \ +\ +X( 1, HTB_GROUP_CTRL, HTB_SF_CTRL_LOGMODE, "HTB log mode set to %d (1- all PID, 2 - restricted PID)\n", 1) \ +X( 2, HTB_GROUP_CTRL, HTB_SF_CTRL_ENABLE_PID, "HTB enable logging for PID %d\n", 1) \ +X( 3, HTB_GROUP_CTRL, HTB_SF_CTRL_ENABLE_GROUP, "HTB enable logging groups 0x%08x\n", 1) \ +X( 4, HTB_GROUP_CTRL, HTB_SF_CTRL_LOG_LEVEL, "HTB log level set to %d\n", 1) \ +X( 5, HTB_GROUP_CTRL, HTB_SF_CTRL_OPMODE, "HTB operating mode set to %d (1 - droplatest, 2 - drop oldest, 3 - block)\n", 1) \ +X( 6, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_SCALE, "HTBFWSync OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \ +X( 7, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_SCALE_RPT, "FW Sync scale info OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \ +X( 8, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK, "FW Sync Partition marker: %d\n", 1) \ +X( 9, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK_RPT, "FW Sync Partition repeat: %d\n", 1) \ +X( 10, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK_SCALE, "Text not used", 6)\ +\ +X( 1, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_TABLE, "MMU page op table entry page_id=%08x%08x index=%d level=%d val=%08x%08x map=%d\n", 7) \ +X( 2, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_ALLOC, "MMU allocating DevVAddr from %08x%08x to %08x%08x\n", 4) \ +X( 3, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_FREE, "MMU freeing DevVAddr from %08x%08x to %08x%08x\n", 4) \ +X( 4, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_MAP, "MMU mapping DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \ +X( 5, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_PMRMAP, "MMU mapping PMR DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \ +X( 6, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_UNMAP, "MMU unmapping DevVAddr %08x%08x\n", 2) \ +\ +X( 1, HTB_GROUP_SYNC, HTB_SF_SYNC_SERVER_ALLOC, "Server sync allocation [%08X]\n", 1) \ +X( 2, HTB_GROUP_SYNC, HTB_SF_SYNC_SERVER_UNREF, "Server sync unreferenced [%08X]\n", 1) \ +X( 3, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_CREATE, "Sync OP create 0x%08x, block count=%d, server syncs=%d, client syncs=%d\n", 4) \ +X( 4, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_TAKE, "Sync OP take 0x%08x server syncs=%d, client syncs=%d\n", 3) \ +X( 5, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_COMPLETE, "Sync OP complete 0x%08x\n", 1) \ +X( 6, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_DESTROY, "Sync OP destroy 0x%08x\n", 1) \ +\ +X( 1, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx %08X @ %d\n", 2) \ +X( 2, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx %08X @ %d\n", 2) \ +X( 3, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_CDM_DEPRECATED,"Kick CDM: FWCtx %08X @ %d\n", 2) \ +X( 4, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_RTU, "Kick RTU: FWCtx %08X @ %d\n", 2) \ +X( 5, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_SHG, "Kick SHG: FWCtx %08X @ %d\n", 2) \ +X( 6, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_2D_DEPRECATED, "Kick 2D: FWCtx %08X @ %d\n", 2) \ +X( 7, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_UNCOUNTED, "Kick (uncounted) for all DMs\n", 0) \ +X( 8, HTB_GROUP_MAIN, HTB_SF_MAIN_FWCCB_CMD, "FW CCB Cmd: %d\n", 1) \ +X( 9, HTB_GROUP_MAIN, HTB_SF_MAIN_PRE_POWER, "Pre-power duration @ phase [%d] (0-shutdown,1-startup) RGX: %llu ns SYS: %llu ns\n", 3) \ +X(10, HTB_GROUP_MAIN, HTB_SF_MAIN_POST_POWER, "Post-power duration @ phase [%d] (0-shutdown,1-startup) SYS: %llu ns RGX: %llu ns\n", 3) \ +X(11, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_TA, "Kick TA: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \ +X(12, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_3D, "Kick 3D: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \ +X(13, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_CDM, "Kick CDM: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \ +X(14, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_2D, "Kick 2D: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \ +\ +X( 1, HTB_GROUP_BRG, HTB_SF_BRG_BRIDGE_CALL, "Bridge call: start: %010u: bid %03d fid %d\n", 3) \ +X( 2, HTB_GROUP_BRG, HTB_SF_BRG_BRIDGE_CALL_ERR, "Bridge call: start: %010u: bid %03d fid %d error %d\n", 4) \ +\ +X( 1, HTB_GROUP_DBG, HTB_SF_DBG_INTPAIR, "0x%8.8x 0x%8.8x\n", 2) \ +\ +X( 65535, HTB_GROUP_NONE, HTB_SF_LAST, "You should not use this string\n", 15) + + + +/* gid - Group numbers */ +typedef enum _HTB_LOG_SFGROUPS { +#define X(A,B) A, + HTB_LOG_SFGROUPLIST +#undef X +} HTB_LOG_SFGROUPS; + + +/* Group flags are stored in an array of elements. + * Each of which have a certain number of bits. + */ +#define HTB_FLAG_EL_T IMG_UINT32 +#define HTB_FLAG_NUM_BITS_IN_EL (sizeof(HTB_FLAG_EL_T) * 8) + +#define HTB_LOG_GROUP_FLAG_GROUP(gid) ((gid-1) / HTB_FLAG_NUM_BITS_IN_EL) +#define HTB_LOG_GROUP_FLAG(gid) (gid ? (0x1 << ((gid-1)%HTB_FLAG_NUM_BITS_IN_EL)) : 0) +#define HTB_LOG_GROUP_FLAG_NAME(gid) HTB_LOG_TYPE_ ## gid + +/* Group enable flags */ +typedef enum _HTB_LOG_TYPE { +#define X(a, b) HTB_LOG_GROUP_FLAG_NAME(b) = HTB_LOG_GROUP_FLAG(a), + HTB_LOG_SFGROUPLIST +#undef X +} HTB_LOG_TYPE; + + + +/* The symbolic names found in the table above are assigned an ui32 value of + * the following format: + * 31 30 28 27 20 19 16 15 12 11 0 bits + * - --- ---- ---- ---- ---- ---- ---- ---- + * 0-11: id number + * 12-15: group id number + * 16-19: number of parameters + * 20-27: unused + * 28-30: active: identify SF packet, otherwise regular int32 + * 31: reserved for signed/unsigned compatibility + * + * The following macro assigns those values to the enum generated SF ids list. + */ +#define HTB_LOG_IDMARKER (0x70000000) +#define HTB_LOG_CREATESFID(a,b,e) (((a) | (b << 12) | (e << 16)) | HTB_LOG_IDMARKER) + +#define HTB_LOG_IDMASK (0xFFF00000) +#define HTB_LOG_VALIDID(I) ( ((I) & HTB_LOG_IDMASK) == HTB_LOG_IDMARKER ) + +typedef enum HTB_LOG_SFids { +#define X(a, b, c, d, e) c = HTB_LOG_CREATESFID(a,b,e), + HTB_LOG_SFIDLIST +#undef X +} HTB_LOG_SFids; + +/* Return the group id that the given (enum generated) id belongs to */ +#define HTB_SF_GID(x) (((x)>>12) & 0xf) +/* Future improvement to support log levels */ +#define HTB_SF_LVL(x) (0) +/* Returns how many arguments the SF(string format) for the given + * (enum generated) id requires. + */ +#define HTB_SF_PARAMNUM(x) (((x)>>16) & 0xf) +/* Returns the id of given enum */ +#define HTB_SF_ID(x) (x & 0xfff) + +/* Format of messages is: SF:PID:TID:TIMEPT1:TIMEPT2:[PARn]* + */ +#define HTB_LOG_HEADER_SIZE 5 +#define HTB_LOG_MAX_PARAMS 15 + +#if defined(__cplusplus) +} +#endif + +/* Defines for handling MARK_SCALE special case */ +#define HTB_GID_CTRL 1 +#define HTB_ID_MARK_SCALE 10 +#define HTB_MARK_SCALE_ARG_ARRAY_SIZE 6 + +/* Defines for extracting args from array for special case MARK_SCALE */ +#define HTB_ARG_SYNCMARK 0 +#define HTB_ARG_OSTS_PT1 1 +#define HTB_ARG_OSTS_PT2 2 +#define HTB_ARG_CRTS_PT1 3 +#define HTB_ARG_CRTS_PT2 4 +#define HTB_ARG_CLKSPD 5 + +#endif /* HTBUFFER_SF_H */ +/***************************************************************************** + End of file (htbuffer_sf.h) +*****************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/htbuffer_types.h b/drivers/gpu/drm/phytium/octopus/htbuffer_types.h new file mode 100644 index 000000000000..3b995b5a4f7b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/htbuffer_types.h @@ -0,0 +1,118 @@ +/*************************************************************************/ /*! +@File htbuffer_types.h +@Title Host Trace Buffer types. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Host Trace Buffer provides a mechanism to log Host events to a + buffer in a similar way to the Firmware Trace mechanism. + Host Trace Buffer logs data using a Transport Layer buffer. + The Transport Layer and pvrtld tool provides the mechanism to + retrieve the trace data. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef HTBUFFER_TYPES_H +#define HTBUFFER_TYPES_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "img_defs.h" +#include "htbuffer_sf.h" + +/* The group flags array of ints large enough to store all the group flags */ +#define HTB_FLAG_NUM_EL (((HTB_GROUP_DBG-1) / HTB_FLAG_NUM_BITS_IN_EL) + 1) +extern IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL]; + +#define HTB_GROUP_ENABLED(SF) (g_auiHTBGroupEnable[HTB_LOG_GROUP_FLAG_GROUP(HTB_SF_GID(SF))] & HTB_LOG_GROUP_FLAG(HTB_SF_GID(SF))) + +/*************************************************************************/ /*! + Host Trace Buffer operation mode + Care must be taken if changing this enum to ensure the MapFlags[] array + in htbserver.c is kept in-step. +*/ /**************************************************************************/ +typedef enum +{ + /*! Undefined operation mode */ + HTB_OPMODE_UNDEF = 0, + + /*! Drop latest, intended for continuous logging to a UM daemon. + * If the daemon does not keep up, the most recent log data + * will be dropped + */ + HTB_OPMODE_DROPLATEST, + + /*! Drop oldest, intended for crash logging. + * Data will be continuously written to a circular buffer. + * After a crash the buffer will contain events leading up to the crash + */ + HTB_OPMODE_DROPOLDEST, + + /*! Block write if buffer is full */ + HTB_OPMODE_BLOCK, + + HTB_OPMODE_LAST = HTB_OPMODE_BLOCK +} HTB_OPMODE_CTRL; + + +/*************************************************************************/ /*! + Host Trace Buffer log mode control +*/ /**************************************************************************/ +typedef enum +{ + /*! Undefined log mode, used if update is not applied */ + HTB_LOGMODE_UNDEF = 0, + + /*! Log trace messages for all PIDs. */ + HTB_LOGMODE_ALLPID, + + /*! Log trace messages for specific PIDs only. */ + HTB_LOGMODE_RESTRICTEDPID, + + HTB_LOGMODE_LAST = HTB_LOGMODE_RESTRICTEDPID +} HTB_LOGMODE_CTRL; + + +#if defined(__cplusplus) +} +#endif + +#endif /* HTBUFFER_TYPES_H */ + +/****************************************************************************** + End of file (htbuffer_types.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/img_3dtypes.h b/drivers/gpu/drm/phytium/octopus/img_3dtypes.h new file mode 100644 index 000000000000..0d8648ca63b5 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/img_3dtypes.h @@ -0,0 +1,248 @@ +/*************************************************************************/ /*! +@File +@Title Global 3D types for use by IMG APIs +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Defines 3D types for use by IMG APIs +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef IMG_3DTYPES_H +#define IMG_3DTYPES_H + +#include +#include "img_types.h" +#include "img_defs.h" + +/** + * Comparison functions + * This comparison function is defined as: + * A {CmpFunc} B + * A is a reference value, e.g., incoming depth etc. + * B is the sample value, e.g., value in depth buffer. + */ +typedef enum _IMG_COMPFUNC_ +{ + IMG_COMPFUNC_NEVER, /**< The comparison never succeeds */ + IMG_COMPFUNC_LESS, /**< The comparison is a less-than operation */ + IMG_COMPFUNC_EQUAL, /**< The comparison is an equal-to operation */ + IMG_COMPFUNC_LESS_EQUAL, /**< The comparison is a less-than or equal-to + operation */ + IMG_COMPFUNC_GREATER, /**< The comparison is a greater-than operation + */ + IMG_COMPFUNC_NOT_EQUAL, /**< The comparison is a no-equal-to operation + */ + IMG_COMPFUNC_GREATER_EQUAL, /**< The comparison is a greater-than or + equal-to operation */ + IMG_COMPFUNC_ALWAYS, /**< The comparison always succeeds */ +} IMG_COMPFUNC; + +/** + * Stencil op functions + */ +typedef enum _IMG_STENCILOP_ +{ + IMG_STENCILOP_KEEP, /**< Keep original value */ + IMG_STENCILOP_ZERO, /**< Set stencil to 0 */ + IMG_STENCILOP_REPLACE, /**< Replace stencil entry */ + IMG_STENCILOP_INCR_SAT, /**< Increment stencil entry, clamping to max */ + IMG_STENCILOP_DECR_SAT, /**< Decrement stencil entry, clamping to zero */ + IMG_STENCILOP_INVERT, /**< Invert bits in stencil entry */ + IMG_STENCILOP_INCR, /**< Increment stencil entry, + wrapping if necessary */ + IMG_STENCILOP_DECR, /**< Decrement stencil entry, + wrapping if necessary */ +} IMG_STENCILOP; + +/** + * Alpha blending allows colours and textures on one surface + * to be blended with transparency onto another surface. + * These definitions apply to both source and destination blending + * states + */ +typedef enum _IMG_BLEND_ +{ + IMG_BLEND_ZERO = 0, /**< Blend factor is (0,0,0,0) */ + IMG_BLEND_ONE, /**< Blend factor is (1,1,1,1) */ + IMG_BLEND_SRC_COLOUR, /**< Blend factor is the source colour */ + IMG_BLEND_INV_SRC_COLOUR, /**< Blend factor is the inverted source colour + (i.e. 1-src_col) */ + IMG_BLEND_SRC_ALPHA, /**< Blend factor is the source alpha */ + IMG_BLEND_INV_SRC_ALPHA, /**< Blend factor is the inverted source alpha + (i.e. 1-src_alpha) */ + IMG_BLEND_DEST_ALPHA, /**< Blend factor is the destination alpha */ + IMG_BLEND_INV_DEST_ALPHA, /**< Blend factor is the inverted destination + alpha */ + IMG_BLEND_DEST_COLOUR, /**< Blend factor is the destination colour */ + IMG_BLEND_INV_DEST_COLOUR, /**< Blend factor is the inverted destination + colour */ + IMG_BLEND_SRC_ALPHASAT, /**< Blend factor is the alpha saturation (the + minimum of (Src alpha, + 1 - destination alpha)) */ + IMG_BLEND_BLEND_FACTOR, /**< Blend factor is a constant */ + IMG_BLEND_INVBLEND_FACTOR, /**< Blend factor is a constant (inverted)*/ + IMG_BLEND_SRC1_COLOUR, /**< Blend factor is the colour outputted from + the pixel shader */ + IMG_BLEND_INV_SRC1_COLOUR, /**< Blend factor is the inverted colour + outputted from the pixel shader */ + IMG_BLEND_SRC1_ALPHA, /**< Blend factor is the alpha outputted from + the pixel shader */ + IMG_BLEND_INV_SRC1_ALPHA /**< Blend factor is the inverted alpha + outputted from the pixel shader */ +} IMG_BLEND; + +/** + * The arithmetic operation to perform when blending + */ +typedef enum _IMG_BLENDOP_ +{ + IMG_BLENDOP_ADD = 0, /**< Result = (Source + Destination) */ + IMG_BLENDOP_SUBTRACT, /**< Result = (Source - Destination) */ + IMG_BLENDOP_REV_SUBTRACT, /**< Result = (Destination - Source) */ + IMG_BLENDOP_MIN, /**< Result = min (Source, Destination) */ + IMG_BLENDOP_MAX /**< Result = max (Source, Destination) */ +} IMG_BLENDOP; + +/** + * Logical operation to perform when logic ops are enabled + */ +typedef enum _IMG_LOGICOP_ +{ + IMG_LOGICOP_CLEAR = 0, /**< Result = 0 */ + IMG_LOGICOP_SET, /**< Result = -1 */ + IMG_LOGICOP_COPY, /**< Result = Source */ + IMG_LOGICOP_COPY_INVERTED, /**< Result = ~Source */ + IMG_LOGICOP_NOOP, /**< Result = Destination */ + IMG_LOGICOP_INVERT, /**< Result = ~Destination */ + IMG_LOGICOP_AND, /**< Result = Source & Destination */ + IMG_LOGICOP_NAND, /**< Result = ~(Source & Destination) */ + IMG_LOGICOP_OR, /**< Result = Source | Destination */ + IMG_LOGICOP_NOR, /**< Result = ~(Source | Destination) */ + IMG_LOGICOP_XOR, /**< Result = Source ^ Destination */ + IMG_LOGICOP_EQUIV, /**< Result = ~(Source ^ Destination) */ + IMG_LOGICOP_AND_REVERSE, /**< Result = Source & ~Destination */ + IMG_LOGICOP_AND_INVERTED, /**< Result = ~Source & Destination */ + IMG_LOGICOP_OR_REVERSE, /**< Result = Source | ~Destination */ + IMG_LOGICOP_OR_INVERTED /**< Result = ~Source | Destination */ +} IMG_LOGICOP; + +/** + * Type of fog blending supported + */ +typedef enum _IMG_FOGMODE_ +{ + IMG_FOGMODE_NONE, /**< No fog blending - fog calculations are + * based on the value output from the vertex phase */ + IMG_FOGMODE_LINEAR, /**< Linear interpolation */ + IMG_FOGMODE_EXP, /**< Exponential */ + IMG_FOGMODE_EXP2, /**< Exponential squaring */ +} IMG_FOGMODE; + +/** + * Types of filtering + */ +typedef enum _IMG_FILTER_ +{ + IMG_FILTER_DONTCARE, /**< Any filtering mode is acceptable */ + IMG_FILTER_POINT, /**< Point filtering */ + IMG_FILTER_LINEAR, /**< Bi-linear filtering */ + IMG_FILTER_BICUBIC, /**< Bi-cubic filtering */ +} IMG_FILTER; + +/** + * Addressing modes for textures + */ +typedef enum _IMG_ADDRESSMODE_ +{ + IMG_ADDRESSMODE_REPEAT, /**< Texture repeats continuously */ + IMG_ADDRESSMODE_FLIP, /**< Texture flips on odd integer part */ + IMG_ADDRESSMODE_CLAMP, /**< Texture clamped at 0 or 1 */ + IMG_ADDRESSMODE_FLIPCLAMP, /**< Flipped once, then clamp */ + IMG_ADDRESSMODE_CLAMPBORDER, + IMG_ADDRESSMODE_OGL_CLAMP, + IMG_ADDRESSMODE_OVG_TILEFILL, + IMG_ADDRESSMODE_DONTCARE, +} IMG_ADDRESSMODE; + +/** + * Culling based on winding order of triangle. + */ +typedef enum _IMG_CULLMODE_ +{ + IMG_CULLMODE_NONE, /**< Don't cull */ + IMG_CULLMODE_FRONTFACING, /**< Front facing triangles */ + IMG_CULLMODE_BACKFACING, /**< Back facing triangles */ +} IMG_CULLMODE; + +/** + * Colour for clearing surfaces. + * The four elements of the 4 x 32 bit array will map to colour + * R,G,B,A components, in order. + * For YUV colour space the order is Y,U,V. + * For Depth and Stencil formats D maps to R and S maps to G. + */ +typedef union IMG_CLEAR_COLOUR_TAG { + IMG_UINT32 aui32[4]; + IMG_INT32 ai32[4]; + IMG_FLOAT af32[4]; +} IMG_CLEAR_COLOUR; + +static_assert(sizeof(IMG_FLOAT) == sizeof(IMG_INT32), "Size of IMG_FLOAT is not 32 bits."); + +/*! ************************************************************************//** +@brief Specifies the MSAA resolve operation. +*/ /**************************************************************************/ +typedef enum _IMG_RESOLVE_OP_ +{ + IMG_RESOLVE_BLEND = 0, /*!< box filter on the samples */ + IMG_RESOLVE_MIN = 1, /*!< minimum of the samples */ + IMG_RESOLVE_MAX = 2, /*!< maximum of the samples */ + IMG_RESOLVE_SAMPLE0 = 3, /*!< choose sample 0 */ + IMG_RESOLVE_SAMPLE1 = 4, /*!< choose sample 1 */ + IMG_RESOLVE_SAMPLE2 = 5, /*!< choose sample 2 */ + IMG_RESOLVE_SAMPLE3 = 6, /*!< choose sample 3 */ + IMG_RESOLVE_SAMPLE4 = 7, /*!< choose sample 4 */ + IMG_RESOLVE_SAMPLE5 = 8, /*!< choose sample 5 */ + IMG_RESOLVE_SAMPLE6 = 9, /*!< choose sample 6 */ + IMG_RESOLVE_SAMPLE7 = 10, /*!< choose sample 7 */ +} IMG_RESOLVE_OP; + + +#endif /* IMG_3DTYPES_H */ +/****************************************************************************** + End of file (img_3dtypes.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/img_defs.h b/drivers/gpu/drm/phytium/octopus/img_defs.h new file mode 100644 index 000000000000..4d548b5fecf1 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/img_defs.h @@ -0,0 +1,561 @@ +/*************************************************************************/ /*! +@File +@Title Common header containing type definitions for portability +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Contains variable and structure definitions. Any platform + specific types should be defined in this file. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef IMG_DEFS_H +#define IMG_DEFS_H + +#if defined(__linux__) && defined(__KERNEL__) +#include +#else +#include +#endif +#if !(defined(__linux__) && defined(__KERNEL__)) +#include +#endif + +#include "img_types.h" + +#if defined(NO_INLINE_FUNCS) + #define INLINE + #define FORCE_INLINE +#else +#if defined(__cplusplus) || defined(INTEGRITY_OS) + #if !defined(INLINE) + #define INLINE inline + #endif + #define FORCE_INLINE static inline +#else +#if !defined(INLINE) + #define INLINE __inline +#endif +#if (defined(UNDER_WDDM) || defined(WINDOWS_WDF)) && defined(_X86_) + #define FORCE_INLINE __forceinline +#else + #define FORCE_INLINE static __inline +#endif +#endif +#endif + +/* True if the GCC version is at least the given version. False for older + * versions of GCC, or other compilers. + */ +#define GCC_VERSION_AT_LEAST(major, minor) \ + (__GNUC__ > (major) || \ + (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) + +/* Use Clang's __has_extension and __has_builtin macros if available. */ +#if defined(__has_extension) +#define has_clang_extension(e) __has_extension(e) +#else +#define has_clang_extension(e) 0 +#endif + +#if defined(__has_builtin) +#define has_clang_builtin(e) __has_builtin(e) +#else +#define has_clang_builtin(e) 0 +#endif + +/* Use this in any file, or use attributes under GCC - see below */ +#ifndef PVR_UNREFERENCED_PARAMETER +#define PVR_UNREFERENCED_PARAMETER(param) ((void)(param)) +#endif + +/* static_assert(condition, "message to print if it fails"); + * + * Assert something at compile time. If the assertion fails, try to print + * the message, otherwise do nothing. static_assert is available if: + * + * - It's already defined as a macro (e.g. by in C11) + * - We're using MSVC which exposes static_assert unconditionally + * - We're using a C++ compiler that supports C++11 + * - We're using GCC 4.6 and up in C mode (in which case it's available as + * _Static_assert) + * + * In all other cases, fall back to an equivalent that makes an invalid + * declaration. + */ +#if !defined(static_assert) && !defined(_MSC_VER) && \ + (!defined(__cplusplus) || __cplusplus < 201103L) || defined(__KLOCWORK__) + /* static_assert isn't already available */ + #if !defined(__cplusplus) && (GCC_VERSION_AT_LEAST(4, 6) || \ + (defined(__clang__) && has_clang_extension(c_static_assert))) + #define static_assert _Static_assert + #else + #define static_assert(expr, message) \ + extern int static_assert_failed[(expr) ? 1 : -1] __attribute__((unused)) + #endif +#endif + +/* + * unreachable("explanation") can be used to indicate to the compiler that + * some parts of the code can never be reached, like the default branch + * of a switch that covers all real-world possibilities, even though there + * are other ints that exist for instance. + * + * The message will be printed as an assert() when debugging. + * + * Note: there is no need to add a 'return' or any error handling after + * calling unreachable(), as this call will never return. + */ +#if defined(__linux__) && defined(__KERNEL__) +/* Kernel has its own unreachable(), which is a simple infinite loop */ +#elif GCC_VERSION_AT_LEAST(4, 5) || has_clang_builtin(__builtin_unreachable) + #define unreachable(msg) \ + do { \ + assert(!(msg)); \ + __builtin_unreachable(); \ + } while (0) +#elif defined(_MSC_VER) + #define unreachable(msg) \ + do { \ + assert(!(msg)); \ + __assume(0); \ + } while (0) +#else + #define unreachable(msg) \ + do { \ + assert(!(msg)); \ + while (1); \ + } while (0) +#endif + +/* + * assume(x > 2 && x <= 7) works like an assert(), except it hints to the + * compiler what it can assume to optimise the code, like a limited range + * of parameter values. + */ +#if has_clang_builtin(__builtin_assume) + #define assume(expr) \ + do { \ + assert(expr); \ + __builtin_assume(expr); \ + } while (0) +#elif defined(_MSC_VER) + #define assume(expr) \ + do { \ + assert(expr); \ + __assume(expr); \ + } while (0) +#elif defined(__linux__) && defined(__KERNEL__) + #define assume(expr) ((void)(expr)) +#elif GCC_VERSION_AT_LEAST(4, 5) || has_clang_builtin(__builtin_unreachable) + #define assume(expr) \ + do { \ + if (unlikely(!(expr))) \ + unreachable("Assumption isn't true: " # expr); \ + } while (0) +#else + #define assume(expr) assert(expr) +#endif + +/*! Macro to calculate the n-byte aligned value from that supplied rounding up. + * n must be a power of two. + * + * Both arguments should be of a type with the same size otherwise the macro may + * cut off digits, e.g. imagine a 64 bit address in _x and a 32 bit value in _n. + */ +#define PVR_ALIGN(_x, _n) (((_x)+((_n)-1U)) & ~((_n)-1U)) + +#if defined(_WIN32) + +#if defined(WINDOWS_WDF) + + /* + * For WINDOWS_WDF drivers we don't want these defines to overwrite calling conventions propagated through the build system. + * This 'empty' choice helps to resolve all the calling conv issues. + * + */ + #define IMG_CALLCONV + #define C_CALLCONV + + #define IMG_INTERNAL + #define IMG_RESTRICT __restrict + + /* + * The proper way of dll linking under MS compilers is made of two things: + * - decorate implementation with __declspec(dllexport) + * this decoration helps compiler with making the so called + * 'export library' + * - decorate forward-declaration (in a source dependent on a dll) with + * __declspec(dllimport), this decoration helps the compiler to make + * faster and smaller code in terms of calling dll-imported functions + * + * Usually these decorations are performed by having a single macro define + * making that expands to a proper __declspec() depending on the + * translation unit, dllexport inside the dll source and dllimport outside + * the dll source. Having IMG_EXPORT and IMG_IMPORT resolving to the same + * __declspec() makes no sense, but at least works. + */ + #define IMG_IMPORT __declspec(dllexport) + #define IMG_EXPORT __declspec(dllexport) + +#else + + #define IMG_CALLCONV __stdcall + #define IMG_INTERNAL + #define IMG_EXPORT __declspec(dllexport) + #define IMG_RESTRICT __restrict + #define C_CALLCONV __cdecl + + /* + * IMG_IMPORT is defined as IMG_EXPORT so that headers and implementations + * match. Some compilers require the header to be declared IMPORT, while + * the implementation is declared EXPORT. + */ + #define IMG_IMPORT IMG_EXPORT + +#endif + +#if defined(UNDER_WDDM) + #ifndef _INC_STDLIB + #if defined(__mips) + /* do nothing */ + #elif defined(UNDER_MSBUILD) + /* do nothing */ + #else + _CRTIMP void __cdecl abort(void); + #endif + #endif +#endif /* UNDER_WDDM */ +#else + #if (defined(__linux__) || defined(__QNXNTO__)) && defined(__KERNEL__) + #define IMG_INTERNAL + #define IMG_EXPORT + #define IMG_CALLCONV + #elif defined(__linux__) || defined(__METAG) || defined(__mips) || defined(__QNXNTO__) || defined(__riscv) + #define IMG_CALLCONV + #define C_CALLCONV + + #if defined(__METAG) + #define IMG_INTERNAL + #else + #define IMG_INTERNAL __attribute__((visibility("hidden"))) + #endif + + #define IMG_EXPORT __attribute__((visibility("default"))) + #define IMG_RESTRICT __restrict__ + #elif defined(INTEGRITY_OS) + #define IMG_CALLCONV + #define IMG_INTERNAL + #define IMG_EXPORT + #define IMG_RESTRICT + #define C_CALLCONV + #define __cdecl + + #ifndef USE_CODE + #define IMG_ABORT() printf("IMG_ABORT was called.\n") + #endif + #else + #error("define an OS") + #endif + +#endif + +/* Use default definition if not overridden */ +#ifndef IMG_ABORT + #if defined(EXIT_ON_ABORT) + #define IMG_ABORT() exit(1) + #else + #define IMG_ABORT() abort() + #endif +#endif + +/* The best way to suppress unused parameter warnings using GCC is to use a + * variable attribute. Place the __maybe_unused between the type and name of an + * unused parameter in a function parameter list e.g. 'int __maybe_unused var'. + * This should only be used in GCC build environments, for example, in files + * that compile only on Linux. + * Other files should use PVR_UNREFERENCED_PARAMETER + */ + +/* Kernel macros for compiler attributes */ +/* Note: param positions start at 1 */ +#if defined(__linux__) && defined(__KERNEL__) + #include + + #if !defined(__fallthrough) + #if defined(__GNUC__) && GCC_VERSION_AT_LEAST(7, 0) + #define __fallthrough __attribute__((__fallthrough__)) + #else + #define __fallthrough + #endif + #endif +#elif defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES) + #define __must_check __attribute__((warn_unused_result)) + #define __maybe_unused __attribute__((unused)) + #define __malloc __attribute__((malloc)) + + /* Bionic's might have defined these already */ + /* See https://android.googlesource.com/platform/bionic.git/+/master/libc/include/sys/cdefs.h */ + #if !defined(__packed) + #define __packed __attribute__((packed)) + #endif + #if !defined(__aligned) + #define __aligned(n) __attribute__((aligned(n))) + #endif + #if !defined(__noreturn) + #define __noreturn __attribute__((noreturn)) + #endif + + /* That one compiler that supports attributes but doesn't support + * the printf attribute... */ + #if defined(__GNUC__) + #define __printf(fmt, va) __attribute__((format(printf, (fmt), (va)))) + #else + #define __printf(fmt, va) + #endif /* defined(__GNUC__) */ + + #if defined(__cplusplus) && (__cplusplus >= 201703L) + #define __fallthrough [[fallthrough]] + #elif defined(__GNUC__) && GCC_VERSION_AT_LEAST(7, 0) + #define __fallthrough __attribute__((__fallthrough__)) + #else + #define __fallthrough + #endif + + #define __user + #define __force + #define __iomem +#else + /* Silently ignore those attributes */ + #define __printf(fmt, va) + #define __packed + #define __aligned(n) + #define __must_check + #define __maybe_unused + #define __malloc + + #if defined(_MSC_VER) || defined(CC_ARM) + #define __noreturn __declspec(noreturn) + #else + #define __noreturn + #endif + + /* This may already been defined, e.g. by SAL (Source Annotation Language) */ + #if !defined(__fallthrough) + #define __fallthrough + #endif + + #define __user + #define __force + #define __iomem +#endif + + +/* Other attributes, following the same style */ +#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES) + #define __const_function __attribute__((const)) +#else + #define __const_function +#endif + + +/* GCC builtins */ +#if defined(__linux__) && defined(__KERNEL__) + #include +#elif defined(__GNUC__) || defined(INTEGRITY_OS) + +/* Klocwork does not support __builtin_expect, which makes the actual condition + * expressions hidden during analysis, affecting it negatively. */ +#if !defined(__KLOCWORK__) && !defined(INTEGRITY_OS) && !defined(DEBUG) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#endif + + /* Compiler memory barrier to prevent reordering */ + #define barrier() __asm__ __volatile__("": : :"memory") +#else + #define barrier() static_assert(0, "barrier() isn't supported by your compiler"); +#endif + +/* That one OS that defines one but not the other... */ +#ifndef likely + #define likely(x) (x) +#endif +#ifndef unlikely + #define unlikely(x) (x) +#endif + +/* These two macros are also provided by the kernel */ +#ifndef BIT +#define BIT(b) (1UL << (b)) +#endif + +#ifndef BIT_ULL +#define BIT_ULL(b) (1ULL << (b)) +#endif + +#define BIT_SET(f, b) BITMASK_SET((f), BIT(b)) +#define BIT_UNSET(f, b) BITMASK_UNSET((f), BIT(b)) +#define BIT_TOGGLE(f, b) BITMASK_TOGGLE((f), BIT(b)) +#define BIT_ISSET(f, b) BITMASK_HAS((f), BIT(b)) + +#define BITMASK_SET(f, m) do { ((f) |= (m)); } while (false) +#define BITMASK_UNSET(f, m) do { ((f) &= ~(m)); } while (false) +#define BITMASK_TOGGLE(f, m) do { ((f) ^= (m)); } while (false) +#define BITMASK_HAS(f, m) (((f) & (m)) == (m)) /* the bits from the mask are all set */ +#define BITMASK_ANY(f, m) (((f) & (m)) != 0U) /* any bit from the mask is set */ + +#ifndef MAX +#define MAX(a ,b) (((a) > (b)) ? (a) : (b)) +#endif + +#ifndef MIN +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif + +#ifndef CLAMP +#define CLAMP(min, max, n) ((n) < (min) ? (min) : ((n) > (max) ? (max) : (n))) +#endif + +#define SWAP(X, Y) (X) ^= (Y); (Y) ^= (X); (X) ^= (Y); + + +#if defined(__linux__) && defined(__KERNEL__) + #include + #include +#endif + +/* Get a structure's address from the address of a member */ +#define IMG_CONTAINER_OF(ptr, type, member) \ + (type *) ((uintptr_t) (ptr) - offsetof(type, member)) + +/* Get a new pointer with an offset (in bytes) from a base address, useful + * when traversing byte buffers and accessing data in buffers through struct + * pointers. + * Note, this macro is not equivalent to or replacing offsetof() */ +#define IMG_OFFSET_ADDR(addr, offset_in_bytes) \ + (void*)&(((IMG_UINT8*)(void*)(addr))[offset_in_bytes]) + +/* Get a new pointer with an offset (in dwords) from a base address, useful + * when traversing byte buffers and accessing data in buffers through struct + * pointers. + * Note, this macro is not equivalent to or replacing offsetof() */ +#define IMG_OFFSET_ADDR_DW(addr, offset_in_dwords) \ + (void*)(((IMG_UINT32*)(void*)(addr)) + (offset_in_dwords)) + +/* The number of elements in a fixed-sized array */ +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(ARR) (sizeof(ARR) / sizeof((ARR)[0])) +#endif + +/* To guarantee that __func__ can be used, define it as a macro here if it + isn't already provided by the compiler. */ +#if defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus < 201103L) +#define __func__ __FUNCTION__ +#endif + +#if defined(__cplusplus) +/* C++ Specific: + * Disallow use of copy and assignment operator within a class. + * Should be placed under private. */ +#define IMG_DISALLOW_COPY_AND_ASSIGN(C) \ + C(const C&); \ + void operator=(const C&) +#endif + +#if defined(SUPPORT_PVR_VALGRIND) && !defined(__METAG) && !defined(__mips) && !defined(__riscv) + #include "/usr/include/valgrind/memcheck.h" + + #define VG_MARK_INITIALIZED(pvData,ui32Size) VALGRIND_MAKE_MEM_DEFINED(pvData,ui32Size) + #define VG_MARK_NOACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_NOACCESS(pvData,ui32Size) + #define VG_MARK_ACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_UNDEFINED(pvData,ui32Size) +#else + #if defined(_MSC_VER) + # define PVR_MSC_SUPPRESS_4127 __pragma(warning(suppress:4127)) + #else + # define PVR_MSC_SUPPRESS_4127 + #endif + + #define VG_MARK_INITIALIZED(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (0) + #define VG_MARK_NOACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (0) + #define VG_MARK_ACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (0) +#endif + +#define IMG_STRINGIFY_IMPL(x) # x +#define IMG_STRINGIFY(x) IMG_STRINGIFY_IMPL(x) + +#if defined(INTEGRITY_OS) + /* Definitions not present in INTEGRITY. */ + #define PATH_MAX 200 +#endif + +#if defined(__clang__) || defined(__GNUC__) + /* __SIZEOF_POINTER__ is defined already by these compilers */ +#elif defined(INTEGRITY_OS) + #if defined(__Ptr_Is_64) + #define __SIZEOF_POINTER__ 8 + #else + #define __SIZEOF_POINTER__ 4 + #endif +#elif defined(_WIN32) + #define __SIZEOF_POINTER__ sizeof(char *) +#else + #warning Unknown OS - using default method to determine whether CPU arch is 64-bit. + #define __SIZEOF_POINTER__ sizeof(char *) +#endif + +/* RDI8567: gcc/clang/llvm load/store optimisations may cause issues with + * uncached device memory allocations. Some pointers are made 'volatile' + * to prevent those optimisations being applied to writes through those + * pointers. + */ +#if (GCC_VERSION_AT_LEAST(7, 0) || defined(__clang__)) && (defined(__arm64__) || defined(__aarch64__)) +#define NOLDSTOPT volatile +/* after applying 'volatile' to a pointer, we may need to cast it to 'void *' + * to keep it compatible with its existing uses. + */ +#define NOLDSTOPT_VOID (void *) + +#define NOLDSTOPT_REQUIRED 1 +#else +#define NOLDSTOPT +#define NOLDSTOPT_VOID +#endif + +#endif /* IMG_DEFS_H */ +/***************************************************************************** + End of file (img_defs.h) +*****************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/img_elf.h b/drivers/gpu/drm/phytium/octopus/img_elf.h new file mode 100644 index 000000000000..e5b2852ff25e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/img_elf.h @@ -0,0 +1,111 @@ +/*************************************************************************/ /*! +@File img_elf.h +@Title IMG ELF file definitions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Platform RGX +@Description Definitions for ELF file structures used in the DDK. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(IMG_ELF_H) +#define IMG_ELF_H + +#include "img_types.h" + +/* ELF format defines */ +#define ELF_PT_LOAD (0x1U) /* Program header identifier as Load */ +#define ELF_SHT_SYMTAB (0x2U) /* Section identifier as Symbol Table */ +#define ELF_SHT_STRTAB (0x3U) /* Section identifier as String Table */ +#define MAX_STRTAB_NUM (0x8U) /* Maximum number of string table in the ELF file */ + +/* Redefined structs of ELF format */ +typedef struct +{ + IMG_UINT8 ui32Eident[16]; + IMG_UINT16 ui32Etype; + IMG_UINT16 ui32Emachine; + IMG_UINT32 ui32Eversion; + IMG_UINT32 ui32Eentry; + IMG_UINT32 ui32Ephoff; + IMG_UINT32 ui32Eshoff; + IMG_UINT32 ui32Eflags; + IMG_UINT16 ui32Eehsize; + IMG_UINT16 ui32Ephentsize; + IMG_UINT16 ui32Ephnum; + IMG_UINT16 ui32Eshentsize; + IMG_UINT16 ui32Eshnum; + IMG_UINT16 ui32Eshtrndx; +} IMG_ELF_HDR; + +typedef struct +{ + IMG_UINT32 ui32Stname; + IMG_UINT32 ui32Stvalue; + IMG_UINT32 ui32Stsize; + IMG_UINT8 ui32Stinfo; + IMG_UINT8 ui32Stother; + IMG_UINT16 ui32Stshndx; +} IMG_ELF_SYM; + +typedef struct +{ + IMG_UINT32 ui32Shname; + IMG_UINT32 ui32Shtype; + IMG_UINT32 ui32Shflags; + IMG_UINT32 ui32Shaddr; + IMG_UINT32 ui32Shoffset; + IMG_UINT32 ui32Shsize; + IMG_UINT32 ui32Shlink; + IMG_UINT32 ui32Shinfo; + IMG_UINT32 ui32Shaddralign; + IMG_UINT32 ui32Shentsize; +} IMG_ELF_SHDR; + +typedef struct +{ + IMG_UINT32 ui32Ptype; + IMG_UINT32 ui32Poffset; + IMG_UINT32 ui32Pvaddr; + IMG_UINT32 ui32Ppaddr; + IMG_UINT32 ui32Pfilesz; + IMG_UINT32 ui32Pmemsz; + IMG_UINT32 ui32Pflags; + IMG_UINT32 ui32Palign; +} IMG_ELF_PROGRAM_HDR; + +#endif /* IMG_ELF_H */ diff --git a/drivers/gpu/drm/phytium/octopus/img_types.h b/drivers/gpu/drm/phytium/octopus/img_types.h new file mode 100644 index 000000000000..69a6d9e289e9 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/img_types.h @@ -0,0 +1,298 @@ +/*************************************************************************/ /*! +@File +@Title Global types for use by IMG APIs +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Defines type aliases for use by IMG APIs. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef IMG_TYPES_H +#define IMG_TYPES_H +#if defined(__cplusplus) +extern "C" { +#endif + +/* To use C99 types and definitions, there are two special cases we need to + * cater for: + * + * - Visual Studio: in VS2010 or later, some standard headers are available, + * and MSVC has its own built-in sized types. We can define the C99 types + * in terms of these. + * + * - Linux kernel code: C99 sized types are defined in , but + * some other features (like macros for constants or printf format + * strings) are missing, so we need to fill in the gaps ourselves. + * + * For other cases (userspace code under Linux, Android or Neutrino, or + * firmware code), we can include the standard headers. + */ +#if defined(_MSC_VER) + #include /* bool */ + #include "msvc_types.h" +#elif defined(__linux__) && defined(__KERNEL__) + #include + #include + #include "kernel_types.h" +#elif defined(__linux__) || defined(__METAG) || defined(__MINGW32__) || \ + defined(__QNXNTO__) || defined(INTEGRITY_OS) || defined(__riscv) + #include /* NULL */ + #include + #include /* intX_t/uintX_t, format specifiers */ + #include /* INT_MIN, etc */ + #include /* bool */ +#elif defined(__mips) + #include /* NULL */ + #include /* intX_t/uintX_t, format specifiers */ + #include /* bool */ +#else + #error C99 support not set up for this build +#endif + +/* + * Due to a Klocwork bug, 'true'/'false' constants are not recognized to be of + * boolean type. This results in large number of false-positives being reported + * (MISRA.ETYPE.ASSIGN.2012: "An expression value of essential type 'signed char' + * is assigned to an object of essential type 'bool'"). Work around this by + * redefining those constants with cast to bool added. + */ +#if defined(__KLOCWORK__) && !defined(__cplusplus) +#undef true +#undef false +#define true ((bool) 1) +#define false ((bool) 0) +#endif + +typedef unsigned int IMG_UINT; +typedef int IMG_INT; + +typedef uint8_t IMG_UINT8, *IMG_PUINT8; +typedef uint8_t IMG_BYTE, *IMG_PBYTE; +typedef int8_t IMG_INT8; +typedef char IMG_CHAR, *IMG_PCHAR; + +typedef uint16_t IMG_UINT16, *IMG_PUINT16; +typedef int16_t IMG_INT16; +typedef uint32_t IMG_UINT32, *IMG_PUINT32; +typedef int32_t IMG_INT32, *IMG_PINT32; +#define IMG_UINT32_C(c) ((IMG_UINT32)UINT32_C(c)) + +typedef uint64_t IMG_UINT64, *IMG_PUINT64; +typedef int64_t IMG_INT64; +#define IMG_INT64_C(c) INT64_C(c) +#define IMG_UINT64_C(c) UINT64_C(c) +#define IMG_UINT16_C(c) UINT16_C(c) +#define IMG_UINT64_FMTSPEC PRIu64 +#define IMG_UINT64_FMTSPECX PRIX64 +#define IMG_UINT64_FMTSPECx PRIx64 +#define IMG_UINT64_FMTSPECo PRIo64 +#define IMG_INT64_FMTSPECd PRId64 + +#define IMG_UINT16_MAX UINT16_MAX +#define IMG_UINT32_MAX UINT32_MAX +#define IMG_UINT64_MAX UINT64_MAX + +#define IMG_INT16_MAX INT16_MAX +#define IMG_INT32_MAX INT32_MAX +#define IMG_INT64_MAX INT64_MAX + +/* Linux kernel mode does not use floating point */ +typedef float IMG_FLOAT, *IMG_PFLOAT; +typedef double IMG_DOUBLE; + +typedef union +{ + IMG_UINT32 ui32; + IMG_FLOAT f; +} IMG_UINT32_FLOAT; + +typedef int IMG_SECURE_TYPE; + +typedef enum tag_img_bool +{ + IMG_FALSE = 0, + IMG_TRUE = 1, + IMG_FORCE_ALIGN = 0x7FFFFFFF +} IMG_BOOL, *IMG_PBOOL; + +#if defined(UNDER_WDDM) || defined(WINDOWS_WDF) +typedef IMG_CHAR const* IMG_PCCHAR; +#endif + +/* Format specifiers for 'size_t' type */ +#if defined(_MSC_VER) || defined(__MINGW32__) +#define IMG_SIZE_FMTSPEC "%Iu" +#define IMG_SIZE_FMTSPECX "%Ix" +#else +#define IMG_SIZE_FMTSPEC "%zu" +#define IMG_SIZE_FMTSPECX "%zx" +#endif + +#if defined(__linux__) && defined(__KERNEL__) +/* prints the function name when used with printk */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#define IMG_PFN_FMTSPEC "%ps" +#else +#define IMG_PFN_FMTSPEC "%pf" +#endif +#else +#define IMG_PFN_FMTSPEC "%p" +#endif + +typedef void *IMG_HANDLE; + +/* Process IDs */ +typedef IMG_UINT32 IMG_PID; + +/* OS connection type */ +typedef int IMG_OS_CONNECTION; + + +/* + * Address types. + * All types used to refer to a block of memory are wrapped in structures + * to enforce some degree of type safety, i.e. a IMG_DEV_VIRTADDR cannot + * be assigned to a variable of type IMG_DEV_PHYADDR because they are not the + * same thing. + * + * There is an assumption that the system contains at most one non-cpu mmu, + * and a memory block is only mapped by the MMU once. + * + * Different devices could have offset views of the physical address space. + * + */ + + +/* + * + * +------------+ +------------+ +------------+ +------------+ + * | CPU | | DEV | | DEV | | DEV | + * +------------+ +------------+ +------------+ +------------+ + * | | | | + * | void * |IMG_DEV_VIRTADDR |IMG_DEV_VIRTADDR | + * | \-------------------/ | + * | | | + * +------------+ +------------+ | + * | MMU | | MMU | | + * +------------+ +------------+ | + * | | | + * | | | + * | | | + * +--------+ +---------+ +--------+ + * | Offset | | (Offset)| | Offset | + * +--------+ +---------+ +--------+ + * | | IMG_DEV_PHYADDR | + * | | | + * | | IMG_DEV_PHYADDR | + * +---------------------------------------------------------------------+ + * | System Address bus | + * +---------------------------------------------------------------------+ + * + */ + +#define IMG_DEV_VIRTADDR_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX +#define IMG_DEVMEM_SIZE_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX +#define IMG_DEVMEM_ALIGN_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX +#define IMG_DEVMEM_OFFSET_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX + +/* cpu physical address */ +typedef struct +{ +#if defined(UNDER_WDDM) || defined(WINDOWS_WDF) + uintptr_t uiAddr; +#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (uintptr_t)(var) +#elif defined(__linux__) && defined(__KERNEL__) + phys_addr_t uiAddr; +#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (phys_addr_t)(var) +#else + IMG_UINT64 uiAddr; +#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (IMG_UINT64)(var) +#endif +} IMG_CPU_PHYADDR; + +/* device physical address */ +typedef struct +{ + IMG_UINT64 uiAddr; +} IMG_DEV_PHYADDR; + +/* dma address */ +typedef struct +{ + IMG_UINT64 uiAddr; +} IMG_DMA_ADDR; + +/* + rectangle structure +*/ +typedef struct +{ + IMG_INT32 x0; + IMG_INT32 y0; + IMG_INT32 x1; + IMG_INT32 y1; +} IMG_RECT; + +typedef struct +{ + IMG_INT16 x0; + IMG_INT16 y0; + IMG_INT16 x1; + IMG_INT16 y1; +} IMG_RECT_16; + +/* + * box structure + */ +typedef struct +{ + IMG_INT32 x0; + IMG_INT32 y0; + IMG_INT32 z0; + IMG_INT32 x1; + IMG_INT32 y1; + IMG_INT32 z1; +} IMG_BOX; + +#if defined(__cplusplus) +} +#endif + +#endif /* IMG_TYPES_H */ +/****************************************************************************** + End of file (img_types.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/img_types_check.h b/drivers/gpu/drm/phytium/octopus/img_types_check.h new file mode 100644 index 000000000000..f217e8a47859 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/img_types_check.h @@ -0,0 +1,58 @@ +/*************************************************************************/ /*! +@File +@Title Global types for use by IMG APIs +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Performs size checks on some of the IMG types. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef IMG_TYPES_CHECK_H +#define IMG_TYPES_CHECK_H + +#ifndef __KERNEL__ +#include +#endif /* __KERNEL__ */ +#include "img_types.h" +#include "pvrsrv_error.h" + +static_assert(sizeof(IMG_BOOL) == 4, "invalid size of IMG_BOOL"); +static_assert(sizeof(IMG_INT) == 4, "invalid size of IMG_INT"); +static_assert(sizeof(IMG_UINT) == 4, "invalid size of IMG_UINT"); +static_assert(sizeof(PVRSRV_ERROR) == 4, "invalid size of PVRSRV_ERROR"); + +#endif /* IMG_TYPES_CHECK_H */ diff --git a/drivers/gpu/drm/phytium/octopus/info_page.h b/drivers/gpu/drm/phytium/octopus/info_page.h new file mode 100644 index 000000000000..333a86714383 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/info_page.h @@ -0,0 +1,99 @@ +/*************************************************************************/ /*! +@File +@Title Kernel/User mode general purpose shared memory. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description General purpose memory shared between kernel driver and user + mode. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef INFO_PAGE_KM_H +#define INFO_PAGE_KM_H + +#include "pvrsrv_error.h" + +#include "pmr.h" +#include "pvrsrv.h" +#include "info_page_defs.h" + +/** + * @Function InfoPageCreate + * @Description Allocates resources for global information page. + * @Input psData pointer to PVRSRV data + * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error. + */ +PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData); + +/** + * @Function InfoPageDestroy + * @Description Frees all of the resource of global information page. + * @Input psData pointer to PVRSRV data + * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error. + */ +void InfoPageDestroy(PVRSRV_DATA *psData); + +/** + * @Function PVRSRVAcquireInfoPageKM() + * @Description This interface is used for obtaining the global information page + * which acts as a general purpose shared memory between KM and UM. + * The use of this information page outside of services is _not_ + * recommended. + * @Output ppsPMR handle to exported PMR + * @Return + */ +PVRSRV_ERROR PVRSRVAcquireInfoPageKM(PMR **ppsPMR); + +/** + * @Function PVRSRVReleaseInfoPageKM() + * @Description This function matches PVRSRVAcquireInfoPageKM(). + * @Input psPMR handle to exported PMR + * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error. + */ +PVRSRV_ERROR PVRSRVReleaseInfoPageKM(PMR *psPMR); + +/** + * @Function GetInfoPageDebugFlagsKM() + * @Description Return info page debug flags + * @Return info page debug flags + */ +static INLINE IMG_UINT32 GetInfoPageDebugFlagsKM(void) +{ + return (PVRSRVGetPVRSRVData())->pui32InfoPage[DEBUG_FEATURE_FLAGS]; +} + +#endif /* INFO_PAGE_KM_H */ diff --git a/drivers/gpu/drm/phytium/octopus/info_page_client.h b/drivers/gpu/drm/phytium/octopus/info_page_client.h new file mode 100644 index 000000000000..88d08b61bc6d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/info_page_client.h @@ -0,0 +1,89 @@ +/*************************************************************************/ /*! +@File +@Title Kernel/User mode general purpose shared memory. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description General purpose shared memory (i.e. information page) mapped by + kernel space driver and user space clients. All info page + entries are sizeof(IMG_UINT32) on both 32/64-bit environments. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef INFO_PAGE_CLIENT_H +#define INFO_PAGE_CLIENT_H + +#include "device_connection.h" +#include "info_page_defs.h" +#if defined(__KERNEL__) +#include "pvrsrv.h" +#endif + +/*************************************************************************/ /*! +@Function GetInfoPage + +@Description Return Info Page address + +@Input hDevConnection - Services device connection + +@Return Info Page address +*/ +/*****************************************************************************/ +static INLINE IMG_PUINT32 GetInfoPage(SHARED_DEV_CONNECTION hDevConnection) +{ +#if defined(__KERNEL__) + return (PVRSRVGetPVRSRVData())->pui32InfoPage; +#else + return hDevConnection->pui32InfoPage; +#endif +} + +/*************************************************************************/ /*! +@Function GetInfoPageDebugFlags + +@Description Return Info Page debug flags + +@Input hDevConnection - Services device connection + +@Return Info Page debug flags +*/ +/*****************************************************************************/ +static INLINE IMG_UINT32 GetInfoPageDebugFlags(SHARED_DEV_CONNECTION hDevConnection) +{ + return GetInfoPage(hDevConnection)[DEBUG_FEATURE_FLAGS]; +} + +#endif /* INFO_PAGE_CLIENT_H */ diff --git a/drivers/gpu/drm/phytium/octopus/info_page_defs.h b/drivers/gpu/drm/phytium/octopus/info_page_defs.h new file mode 100644 index 000000000000..9079d6584490 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/info_page_defs.h @@ -0,0 +1,91 @@ +/*************************************************************************/ /*! +@File +@Title Kernel/User mode general purpose shared memory. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description General purpose shared memory (i.e. information page) mapped by + kernel space driver and user space clients. All information page + entries are sizeof(IMG_UINT32) on both 32/64-bit environments. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef INFO_PAGE_DEFS_H +#define INFO_PAGE_DEFS_H + + +/* CacheOp information page entries */ +#define CACHEOP_INFO_IDX_START 0x00 +#define CACHEOP_INFO_UMKMTHRESHLD (CACHEOP_INFO_IDX_START + 1) /*!< UM=>KM routing threshold in bytes */ +#define CACHEOP_INFO_KMDFTHRESHLD (CACHEOP_INFO_IDX_START + 2) /*!< KM/DF threshold in bytes */ +#define CACHEOP_INFO_LINESIZE (CACHEOP_INFO_IDX_START + 3) /*!< CPU data cache line size */ +#define CACHEOP_INFO_PGSIZE (CACHEOP_INFO_IDX_START + 4) /*!< CPU MMU page size */ +#define CACHEOP_INFO_IDX_END (CACHEOP_INFO_IDX_START + 5) + +/* HWPerf information page entries */ +#define HWPERF_INFO_IDX_START (CACHEOP_INFO_IDX_END) +#define HWPERF_FILTER_SERVICES_IDX (HWPERF_INFO_IDX_START + 0) +#define HWPERF_FILTER_EGL_IDX (HWPERF_INFO_IDX_START + 1) +#define HWPERF_FILTER_OPENGLES_IDX (HWPERF_INFO_IDX_START + 2) +#define HWPERF_FILTER_OPENCL_IDX (HWPERF_INFO_IDX_START + 3) +#define HWPERF_FILTER_VULKAN_IDX (HWPERF_INFO_IDX_START + 4) +#define HWPERF_FILTER_OPENGL_IDX (HWPERF_INFO_IDX_START + 5) +#define HWPERF_INFO_IDX_END (HWPERF_INFO_IDX_START + 6) + +/* timeout values */ +#define TIMEOUT_INFO_IDX_START (HWPERF_INFO_IDX_END) +#define TIMEOUT_INFO_VALUE_RETRIES (TIMEOUT_INFO_IDX_START + 0) +#define TIMEOUT_INFO_VALUE_TIMEOUT_MS (TIMEOUT_INFO_IDX_START + 1) +#define TIMEOUT_INFO_CONDITION_RETRIES (TIMEOUT_INFO_IDX_START + 2) +#define TIMEOUT_INFO_CONDITION_TIMEOUT_MS (TIMEOUT_INFO_IDX_START + 3) +#define TIMEOUT_INFO_TASK_QUEUE_RETRIES (TIMEOUT_INFO_IDX_START + 4) +#define TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS (TIMEOUT_INFO_IDX_START + 5) +#define TIMEOUT_INFO_IDX_END (TIMEOUT_INFO_IDX_START + 6) + +/* Bridge Info */ +#define BRIDGE_INFO_IDX_START (TIMEOUT_INFO_IDX_END) +#define BRIDGE_INFO_RGX_BRIDGES (BRIDGE_INFO_IDX_START + 0) +#define BRIDGE_INFO_PVR_BRIDGES (BRIDGE_INFO_IDX_START + 1) +#define BRIDGE_INFO_IDX_END (BRIDGE_INFO_IDX_START + 2) + +/* Debug features */ +#define DEBUG_FEATURE_FLAGS (BRIDGE_INFO_IDX_END) +#define DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED 0x1 +#define DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED 0x2 +#define DEBUG_FEATURE_FLAGS_IDX_END (DEBUG_FEATURE_FLAGS + 1) + + +#endif /* INFO_PAGE_DEFS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/info_page_km.c b/drivers/gpu/drm/phytium/octopus/info_page_km.c new file mode 100644 index 000000000000..f97ab25de775 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/info_page_km.c @@ -0,0 +1,138 @@ +/*************************************************************************/ /*! +@File info_page_km.c +@Title Kernel/User space shared memory +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements general purpose shared memory between kernel driver + and user mode. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "info_page_defs.h" +#include "info_page.h" +#include "pvrsrv.h" +#include "devicemem.h" +#include "pmr.h" + +PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData) +{ + const PVRSRV_MEMALLOCFLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL); + PVRSRV_ERROR eError; + + PVR_ASSERT(psData != NULL); + + /* Allocate single page of memory for driver information page */ + eError = DevmemAllocateExportable(psData->psHostMemDeviceNode, + OSGetPageSize(), + OSGetPageSize(), + OSGetPageShift(), + uiMemFlags, + "PVRSRVInfoPage", + &psData->psInfoPageMemDesc); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAllocateExportable", e0); + + eError = DevmemAcquireCpuVirtAddr(psData->psInfoPageMemDesc, + (void **) &psData->pui32InfoPage); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAllocateExportable", e0); + + /* Look-up the memory descriptor PMR handle */ + eError = DevmemLocalGetImportHandle(psData->psInfoPageMemDesc, + (void **) &psData->psInfoPagePMR); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemLocalGetImportHandle", e0); + + eError = OSLockCreate(&psData->hInfoPageLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); + + return PVRSRV_OK; + +e0: + InfoPageDestroy(psData); + return eError; +} + +void InfoPageDestroy(PVRSRV_DATA *psData) +{ + if (psData->psInfoPageMemDesc) + { + if (psData->pui32InfoPage != NULL) + { + DevmemReleaseCpuVirtAddr(psData->psInfoPageMemDesc); + psData->pui32InfoPage = NULL; + } + + DevmemFree(psData->psInfoPageMemDesc); + psData->psInfoPageMemDesc = NULL; + } + + if (psData->hInfoPageLock) + { + OSLockDestroy(psData->hInfoPageLock); + psData->hInfoPageLock = NULL; + } +} + +PVRSRV_ERROR PVRSRVAcquireInfoPageKM(PMR **ppsPMR) +{ + PVRSRV_DATA *psData = PVRSRVGetPVRSRVData(); + + PVR_LOG_RETURN_IF_FALSE(psData->psInfoPageMemDesc != NULL, "invalid MEMDESC" + " handle", PVRSRV_ERROR_INVALID_PARAMS); + PVR_LOG_RETURN_IF_FALSE(psData->psInfoPagePMR != NULL, "invalid PMR handle", + PVRSRV_ERROR_INVALID_PARAMS); + + /* Copy the PMR import handle back */ + *ppsPMR = psData->psInfoPagePMR; + + /* Mark the PMR such that no layout changes can happen + * This is a fixed layout created during early stages of + * driver loading and shouldn't change later */ + PMR_SetLayoutFixed(psData->psInfoPagePMR, IMG_TRUE); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVReleaseInfoPageKM(PMR *ppsPMR) +{ + /* Nothing to do here as PMR is singleton */ + PVR_UNREFERENCED_PARAMETER(ppsPMR); + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/interrupt_support.c b/drivers/gpu/drm/phytium/octopus/interrupt_support.c new file mode 100644 index 000000000000..20b5f19d1ec0 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/interrupt_support.c @@ -0,0 +1,151 @@ +/*************************************************************************/ /*! +@File +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "pvr_debug.h" +#include "allocmem.h" +#include "interrupt_support.h" + +typedef struct LISR_DATA_TAG +{ + IMG_UINT32 ui32IRQ; + PFN_SYS_LISR pfnLISR; + void *pvData; +} LISR_DATA; + +static irqreturn_t SystemISRWrapper(int irq, void *dev_id) +{ + LISR_DATA *psLISRData = (LISR_DATA *)dev_id; + + PVR_UNREFERENCED_PARAMETER(irq); + + if (psLISRData) + { + if (psLISRData->pfnLISR(psLISRData->pvData)) + { + return IRQ_HANDLED; + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Missing interrupt data", __func__)); + } + + return IRQ_NONE; +} + +PVRSRV_ERROR OSInstallSystemLISR(IMG_HANDLE *phLISR, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszDevName, + PFN_SYS_LISR pfnLISR, + void *pvData, + IMG_UINT32 ui32Flags) +{ + LISR_DATA *psLISRData; + unsigned long ulIRQFlags = 0; + + if (pfnLISR == NULL || pvData == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32Flags & ~SYS_IRQ_FLAG_MASK) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + switch (ui32Flags & SYS_IRQ_FLAG_TRIGGER_MASK) + { + case SYS_IRQ_FLAG_TRIGGER_DEFAULT: + break; + case SYS_IRQ_FLAG_TRIGGER_LOW: + ulIRQFlags |= IRQF_TRIGGER_LOW; + break; + case SYS_IRQ_FLAG_TRIGGER_HIGH: + ulIRQFlags |= IRQF_TRIGGER_HIGH; + break; + default: + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32Flags & SYS_IRQ_FLAG_SHARED) + { + ulIRQFlags |= IRQF_SHARED; + } + + psLISRData = OSAllocMem(sizeof(*psLISRData)); + if (psLISRData == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psLISRData->ui32IRQ = ui32IRQ; + psLISRData->pfnLISR = pfnLISR; + psLISRData->pvData = pvData; + + if (request_irq(ui32IRQ, SystemISRWrapper, ulIRQFlags, pszDevName, psLISRData)) + { + OSFreeMem(psLISRData); + + return PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER; + } + + *phLISR = (IMG_HANDLE)psLISRData; + + return PVRSRV_OK; +} + +PVRSRV_ERROR OSUninstallSystemLISR(IMG_HANDLE hLISR) +{ + LISR_DATA *psLISRData = (LISR_DATA *)hLISR; + + if (psLISRData == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + free_irq(psLISRData->ui32IRQ, psLISRData); + + OSFreeMem(psLISRData); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/interrupt_support.h b/drivers/gpu/drm/phytium/octopus/interrupt_support.h new file mode 100644 index 000000000000..7e5f45218a3d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/interrupt_support.h @@ -0,0 +1,103 @@ +/*************************************************************************/ /*! +@File +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(INTERRUPT_SUPPORT_H) +#define INTERRUPT_SUPPORT_H + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_device.h" + +/*! Default trigger type for the interrupt line. */ +#define SYS_IRQ_FLAG_TRIGGER_DEFAULT (0x0 << 0) +/*! Interrupt triggered when interrupt line is low. */ +#define SYS_IRQ_FLAG_TRIGGER_LOW (0x1 << 0) +/*! Interrupt triggered when interrupt line is high. */ +#define SYS_IRQ_FLAG_TRIGGER_HIGH (0x2 << 0) +/*! Interrupt trigger mask. */ +#define SYS_IRQ_FLAG_TRIGGER_MASK (SYS_IRQ_FLAG_TRIGGER_DEFAULT | \ + SYS_IRQ_FLAG_TRIGGER_LOW | \ + SYS_IRQ_FLAG_TRIGGER_HIGH) +/*! The irq is allowed to be shared among several devices. */ +#define SYS_IRQ_FLAG_SHARED (0x1 << 8) + +/*! Interrupt flags mask. */ +#define SYS_IRQ_FLAG_MASK (SYS_IRQ_FLAG_TRIGGER_MASK | \ + SYS_IRQ_FLAG_SHARED) + +/*************************************************************************/ /*! +@Description Pointer to a system Low-level Interrupt Service Routine (LISR). +@Input pvData Private data provided to the LISR. +@Return IMG_TRUE if interrupt handled, IMG_FALSE otherwise. +*/ /**************************************************************************/ +typedef IMG_BOOL (*PFN_SYS_LISR)(void *pvData); + +/*************************************************************************/ /*! +@Function OSInstallSystemLISR +@Description Installs a system low-level interrupt handler +@Output phLISR On return, contains a handle to the + installed LISR +@Input ui32IRQ The IRQ number for which the + interrupt handler should be installed +@Input pszDevName Name of the device for which the handler + is being installed +@Input pfnLISR A pointer to an interrupt handler + function +@Input pvData A pointer to data that should be passed + to pfnLISR when it is called +@Input ui32Flags Interrupt flags +@Return PVRSRV_OK on success, a failure code otherwise +*/ /**************************************************************************/ +PVRSRV_ERROR OSInstallSystemLISR(IMG_HANDLE *phLISR, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszDevName, + PFN_SYS_LISR pfnLISR, + void *pvData, + IMG_UINT32 ui32Flags); + +/*************************************************************************/ /*! +@Function OSUninstallSystemLISR +@Description Uninstalls a system low-level interrupt handler +@Input hLISRData The handle to the LISR to uninstall +@Return PVRSRV_OK on success, a failure code otherwise +*/ /**************************************************************************/ +PVRSRV_ERROR OSUninstallSystemLISR(IMG_HANDLE hLISRData); +#endif /* !defined(INTERRUPT_SUPPORT_H) */ diff --git a/drivers/gpu/drm/phytium/octopus/kernel_compatibility.h b/drivers/gpu/drm/phytium/octopus/kernel_compatibility.h new file mode 100644 index 000000000000..0c37405dfc70 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/kernel_compatibility.h @@ -0,0 +1,497 @@ +/*************************************************************************/ /*! +@Title Kernel versions compatibility macros +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Per-version macros to allow code to seamlessly use older kernel +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __KERNEL_COMPATIBILITY_H__ +#define __KERNEL_COMPATIBILITY_H__ + +#include + +/* + * Stop supporting an old kernel? Remove the top block. + * New incompatible kernel? Append a new block at the bottom. + * + * Please write you version test as `VERSION < X.Y`, and use the earliest + * possible version :) + */ + +/* Linux 3.6 introduced seq_vprintf(). Earlier versions don't have this + * so we work around the limitation by vsnprintf() + seq_puts(). + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) +#define seq_vprintf(seq_file, fmt, args) \ +do { \ + char aszBuffer[512]; /* maximum message buffer size */ \ + vsnprintf(aszBuffer, sizeof(aszBuffer), fmt, args); \ + seq_puts(seq_file, aszBuffer); \ +} while (0) +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + +/* Linux 3.7 split VM_RESERVED into VM_DONTDUMP and VM_DONTEXPAND */ +#define VM_DONTDUMP VM_RESERVED + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) */ + +/* + * Note: this fix had to be written backwards because get_unused_fd_flags + * was already defined but not exported on kernels < 3.7 + * + * When removing support for kernels < 3.7, this block should be removed + * and all `get_unused_fd()` should be manually replaced with + * `get_unused_fd_flags(0)` + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) + +/* Linux 3.19 removed get_unused_fd() */ +/* get_unused_fd_flags was introduced in 3.7 */ +#define get_unused_fd() get_unused_fd_flags(0) + +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) + +/* + * Headers shouldn't normally be included by this file but this is a special + * case as it's not obvious from the name that devfreq_add_device needs this + * include. + */ +#include + +#define devfreq_add_device(dev, profile, name, data) \ + ({ \ + struct devfreq *__devfreq; \ + if (name && !strcmp(name, "simple_ondemand")) \ + __devfreq = devfreq_add_device(dev, profile, \ + &devfreq_simple_ondemand, data); \ + else \ + __devfreq = ERR_PTR(-EINVAL); \ + __devfreq; \ + }) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) + +#define DRIVER_RENDER 0 +#define DRM_RENDER_ALLOW 0 + +/* Linux 3.12 introduced a new shrinker API */ +#define SHRINK_STOP (~0UL) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) + +#define dev_pm_opp_get_opp_count(dev) opp_get_opp_count(dev) +#define dev_pm_opp_get_freq(opp) opp_get_freq(opp) +#define dev_pm_opp_get_voltage(opp) opp_get_voltage(opp) +#define dev_pm_opp_add(dev, freq, u_volt) opp_add(dev, freq, u_volt) +#define dev_pm_opp_find_freq_ceil(dev, freq) opp_find_freq_ceil(dev, freq) + +#if defined(CONFIG_ARM) +/* Linux 3.13 renamed ioremap_cached to ioremap_cache */ +#define ioremap_cache(cookie, size) ioremap_cached(cookie, size) +#endif /* defined(CONFIG_ARM) */ + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + +/* Linux 3.14 introduced a new set of sized min and max defines */ +#ifndef U32_MAX +#define U32_MAX ((u32)UINT_MAX) +#endif + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) + +/* Linux 3.17 changed the 3rd argument from a `struct page ***pages` to + * `struct page **pages` */ +#define map_vm_area(area, prot, pages) map_vm_area(area, prot, &pages) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) + +/* + * Linux 4.7 removed this function but its replacement was available since 3.19. + */ +#define drm_crtc_send_vblank_event(crtc, e) drm_send_vblank_event((crtc)->dev, drm_crtc_index(crtc), e) + +/* seq_has_overflowed() was introduced in 3.19 but the structure elements + * have been available since 2.x + */ +#include +static inline bool seq_has_overflowed(struct seq_file *m) +{ + return m->count == m->size; +} + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) + +#define debugfs_create_file_size(name, mode, parent, data, fops, file_size) \ + ({ \ + struct dentry *de; \ + de = debugfs_create_file(name, mode, parent, data, fops); \ + if (de) \ + de->d_inode->i_size = file_size; \ + de; \ + }) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) +#define drm_fb_helper_unregister_fbi(fb_helper) \ + ({ \ + if ((fb_helper) && (fb_helper)->fbdev) \ + unregister_framebuffer((fb_helper)->fbdev); \ + }) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) + +/* Linux 4.4 renamed GFP_WAIT to GFP_RECLAIM */ +#define __GFP_RECLAIM __GFP_WAIT + +#if !defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) +#define dev_pm_opp_of_add_table(dev) of_init_opp_table(dev) +#define dev_pm_opp_of_remove_table(dev) of_free_opp_table(dev) +#else +#define sync_fence_create(data_name, sync_pt) sync_fence_create(data_name, &(sync_pt)->base) +#endif + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) && \ + (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) + +/* Linux 4.5 added a new printf-style parameter for debug messages */ + +#define drm_encoder_init(dev, encoder, funcs, encoder_type, name, ...) \ + drm_encoder_init(dev, encoder, funcs, encoder_type) + +#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ...) \ + ({ (void) format_modifiers; drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type); }) + +#define drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs, name, ...) \ + drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs) + +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) + +#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ...) \ + ({ (void) format_modifiers; drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type, name, ##__VA_ARGS__); }) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) + +/* + * Linux 4.6 removed the first two parameters, the "struct task_struct" type + * pointer "current" is defined in asm/current.h, which makes it pointless + * to pass it on every function call. +*/ +#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \ + get_user_pages(current, current->mm, start, nr_pages, gup_flags & FOLL_WRITE, gup_flags & FOLL_FORCE, pages, vmas) + +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) + +/* Linux 4.9 replaced the write/force parameters with "gup_flags" */ +#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \ + get_user_pages(start, nr_pages, gup_flags & FOLL_WRITE, gup_flags & FOLL_FORCE, pages, vmas) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \ + (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) + +/* + * Linux 4.6 removed the start and end arguments as it now always maps + * the entire DMA-BUF. + * Additionally, dma_buf_end_cpu_access() now returns an int error. + */ +#define dma_buf_begin_cpu_access(DMABUF, DIRECTION) dma_buf_begin_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION) +#define dma_buf_end_cpu_access(DMABUF, DIRECTION) ({ dma_buf_end_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION); 0; }) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \ + (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) + +/* Linux 4.7 removed the first arguments as it's never been used */ +#define drm_gem_object_lookup(filp, handle) drm_gem_object_lookup((filp)->minor->dev, filp, handle) + +/* Linux 4.7 replaced nla_put_u64 with nla_put_u64_64bit */ +#define nla_put_u64_64bit(skb, attrtype, value, padattr) nla_put_u64(skb, attrtype, value) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) + +/* Linux 4.9 changed the second argument to a drm_file pointer */ +#define drm_vma_node_is_allowed(node, file_priv) drm_vma_node_is_allowed(node, (file_priv)->filp) +#define drm_vma_node_allow(node, file_priv) drm_vma_node_allow(node, (file_priv)->filp) +#define drm_vma_node_revoke(node, file_priv) drm_vma_node_revoke(node, (file_priv)->filp) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) +#define refcount_read(r) atomic_read(r) +#define drm_mm_insert_node(mm, node, size) drm_mm_insert_node(mm, node, size, 0, DRM_MM_SEARCH_DEFAULT) + +#define drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd) drm_helper_mode_fill_fb_struct(fb, mode_cmd) + +/* + * In Linux Kernels >= 4.12 for x86 another level of page tables has been + * added. The added level (p4d) sits between pgd and pud, so when it + * doesn`t exist, pud_offset function takes pgd as a parameter instead + * of p4d. + */ +#define p4d_t pgd_t +#define p4d_offset(pgd, address) (pgd) +#define p4d_none(p4d) (0) +#define p4d_bad(p4d) (0) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + +#define drm_mode_object_get(obj) drm_mode_object_reference(obj) +#define drm_mode_object_put(obj) drm_mode_object_unreference(obj) +#define drm_connector_get(obj) drm_connector_reference(obj) +#define drm_connector_put(obj) drm_connector_unreference(obj) +#define drm_framebuffer_get(obj) drm_framebuffer_reference(obj) +#define drm_framebuffer_put(obj) drm_framebuffer_unreference(obj) +#define drm_gem_object_get(obj) drm_gem_object_reference(obj) +#define drm_gem_object_put_locked(obj) drm_gem_object_unreference(obj) +#define __drm_gem_object_put(obj) __drm_gem_object_unreference(obj) +#define drm_property_blob_get(obj) drm_property_reference_blob(obj) +#define drm_property_blob_put(obj) drm_property_unreference_blob(obj) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) + +#define drm_dev_put(dev) drm_dev_unref(dev) + +#define drm_mode_object_find(dev, file_priv, id, type) drm_mode_object_find(dev, id, type) +#define drm_encoder_find(dev, file_priv, id) drm_encoder_find(dev, id) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + +#define drm_atomic_helper_check_plane_state(plane_state, crtc_state, \ + min_scale, max_scale, \ + can_position, can_update_disabled) \ + ({ \ + const struct drm_rect __clip = { \ + .x2 = crtc_state->crtc->mode.hdisplay, \ + .y2 = crtc_state->crtc->mode.vdisplay, \ + }; \ + int __ret = drm_plane_helper_check_state(plane_state, \ + &__clip, \ + min_scale, max_scale, \ + can_position, \ + can_update_disabled); \ + __ret; \ + }) + +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) + +#define drm_atomic_helper_check_plane_state(plane_state, crtc_state, \ + min_scale, max_scale, \ + can_position, can_update_disabled) \ + ({ \ + const struct drm_rect __clip = { \ + .x2 = crtc_state->crtc->mode.hdisplay, \ + .y2 = crtc_state->crtc->mode.vdisplay, \ + }; \ + int __ret = drm_atomic_helper_check_plane_state(plane_state, \ + crtc_state, \ + &__clip, \ + min_scale, max_scale, \ + can_position, \ + can_update_disabled); \ + __ret; \ + }) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) + +#define drm_connector_attach_encoder(connector, encoder) \ + drm_mode_connector_attach_encoder(connector, encoder) + +#define drm_connector_update_edid_property(connector, edid) \ + drm_mode_connector_update_edid_property(connector, edid) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) + +/* + * Work around architectures, e.g. MIPS, that define copy_from_user and + * copy_to_user as macros that call access_ok, as this gets redefined below. + * As of kernel 4.12, these functions are no longer defined per-architecture + * so this work around isn't needed. + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) +#if defined(copy_from_user) + /* + * NOTE: This function should not be called directly as it exists simply to + * work around copy_from_user being defined as a macro that calls access_ok. + */ +static inline int +__pvr_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + return copy_from_user(to, from, n); +} + +#undef copy_from_user +#define copy_from_user(to, from, n) __copy_from_user(to, from, n) +#endif + +#if defined(copy_to_user) + /* + * NOTE: This function should not be called directly as it exists simply to + * work around copy_to_user being defined as a macro that calls access_ok. + */ +static inline int +__pvr_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + return copy_to_user(to, from, n); +} + +#undef copy_to_user +#define copy_to_user(to, from, n) __copy_to_user(to, from, n) +#endif +#endif + +/* + * Linux 5.0 dropped the type argument. + * + * This is unused in at least Linux 3.4 and above for all architectures other + * than 'um' (User Mode Linux), which stopped using it in 4.2. + */ +#if defined(access_ok) + /* + * NOTE: This function should not be called directly as it exists simply to + * work around access_ok being defined as a macro. + */ +static inline int +__pvr_access_ok_compat(int type, const void __user * addr, unsigned long size) +{ + return access_ok(type, addr, size); +} + +#undef access_ok +#define access_ok(addr, size) __pvr_access_ok_compat(0, addr, size) +#else +#define access_ok(addr, size) access_ok(0, addr, size) +#endif + +#endif + +/* + * Before v5.8, the "struct mm" has a semaphore named "mmap_sem" which is + * renamed to "mmap_lock" in v5.8. Moreover, new APIs are provided to + * access this lock starting from v5.8. + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) + +#define mmap_write_lock(mm) down_write(&mm->mmap_sem) +#define mmap_write_unlock(mm) up_write(&mm->mmap_sem) + +#define mmap_read_lock(mm) down_read(&mm->mmap_sem) +#define mmap_read_unlock(mm) up_read(&mm->mmap_sem) + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) +#define drm_gem_object_put(obj) drm_gem_object_unreference_unlocked(obj) +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)) +#define drm_gem_object_put(obj) drm_gem_object_put_unlocked(obj) +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) + +#define drm_prime_pages_to_sg(dev, pages, nr_pages) \ + drm_prime_pages_to_sg(pages, nr_pages) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) + +struct dma_buf_map { + void *vaddr; +}; + +#define dma_buf_vmap(dmabuf, map) \ + ({ \ + (map)->vaddr = dma_buf_vmap(dmabuf); \ + (map)->vaddr ? 0 : ((dmabuf) && (dmabuf)->ops->vmap) ? -ENOMEM : -EINVAL; \ + }) + +#define dma_buf_vunmap(dmabuf, map) \ + ({ \ + dma_buf_vunmap(dmabuf, (map)->vaddr); \ + (map)->vaddr = NULL; \ + }) + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ + +#endif /* __KERNEL_COMPATIBILITY_H__ */ diff --git a/drivers/gpu/drm/phytium/octopus/kernel_config_compatibility.h b/drivers/gpu/drm/phytium/octopus/kernel_config_compatibility.h new file mode 100644 index 000000000000..db1c791824b7 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/kernel_config_compatibility.h @@ -0,0 +1,54 @@ +/*************************************************************************/ /*! +@Title Kernel config compatibility define options +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description This file is exclusively for Linux config kernel options. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __KERNEL_CONFIG_COMPATIBILITY_H__ +#define __KERNEL_CONFIG_COMPATIBILITY_H__ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) +#ifdef SUPPORT_DRM_FBDEV_EMULATION +#define CONFIG_DRM_FBDEV_EMULATION +#endif +#endif + +#endif /* __KERNEL_CONFIG_COMPATIBILITY_H__ */ diff --git a/drivers/gpu/drm/phytium/octopus/kernel_nospec.h b/drivers/gpu/drm/phytium/octopus/kernel_nospec.h new file mode 100644 index 000000000000..5a33d7a89445 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/kernel_nospec.h @@ -0,0 +1,71 @@ +/*************************************************************************/ /*! +@Title Macro to limit CPU speculative execution in kernel code +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Per-version macros to allow code to seamlessly use older kernel +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __KERNEL_NOSPEC_H__ +#define __KERNEL_NOSPEC_H__ + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 2) || \ + (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 18)) || \ + (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 81)) || \ + (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 118))) +#include +#include +#include +#else +#define array_index_nospec(index, size) (index) +#endif + +/* + * For Ubuntu kernels, the features available for a given Linux version code + * may not match those in upstream kernels. This is the case for the + * availability of the array_index_nospec macro. + */ +#if !defined(array_index_nospec) +#define array_index_nospec(index, size) (index) +#endif + +#endif /* __KERNEL_NOSPEC_H__ */ diff --git a/drivers/gpu/drm/phytium/octopus/kernel_types.h b/drivers/gpu/drm/phytium/octopus/kernel_types.h new file mode 100644 index 000000000000..ee5b01dcdcd8 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/kernel_types.h @@ -0,0 +1,137 @@ +/*************************************************************************/ /*! +@Title C99-compatible types and definitions for Linux kernel code +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +/* Limits of specified-width integer types */ + +/* S8_MIN, etc were added in kernel version 3.14. The other versions are for + * earlier kernels. They can be removed once older kernels don't need to be + * supported. + */ +#ifdef S8_MIN + #define INT8_MIN S8_MIN +#else + #define INT8_MIN (-128) +#endif + +#ifdef S8_MAX + #define INT8_MAX S8_MAX +#else + #define INT8_MAX 127 +#endif + +#ifdef U8_MAX + #define UINT8_MAX U8_MAX +#else + #define UINT8_MAX 0xFF +#endif + +#ifdef S16_MIN + #define INT16_MIN S16_MIN +#else + #define INT16_MIN (-32768) +#endif + +#ifdef S16_MAX + #define INT16_MAX S16_MAX +#else + #define INT16_MAX 32767 +#endif + +#ifdef U16_MAX + #define UINT16_MAX U16_MAX +#else + #define UINT16_MAX 0xFFFF +#endif + +#ifdef S32_MIN + #define INT32_MIN S32_MIN +#else + #define INT32_MIN (-2147483647 - 1) +#endif + +#ifdef S32_MAX + #define INT32_MAX S32_MAX +#else + #define INT32_MAX 2147483647 +#endif + +#ifdef U32_MAX + #define UINT32_MAX U32_MAX +#else + #define UINT32_MAX 0xFFFFFFFF +#endif + +#ifdef S64_MIN + #define INT64_MIN S64_MIN +#else + #define INT64_MIN (-9223372036854775807LL) +#endif + +#ifdef S64_MAX + #define INT64_MAX S64_MAX +#else + #define INT64_MAX 9223372036854775807LL +#endif + +#ifdef U64_MAX + #define UINT64_MAX U64_MAX +#else + #define UINT64_MAX 0xFFFFFFFFFFFFFFFFULL +#endif + +/* Macros for integer constants */ +#define INT8_C S8_C +#define UINT8_C U8_C +#define INT16_C S16_C +#define UINT16_C U16_C +#define INT32_C S32_C +#define UINT32_C U32_C +#define INT64_C S64_C +#define UINT64_C U64_C + +/* Format conversion of integer types */ + +#define PRIX64 "llX" +#define PRIx64 "llx" +#define PRIu64 "llu" +#define PRId64 "lld" diff --git a/drivers/gpu/drm/phytium/octopus/km/rgx_bvnc_defs_km.h b/drivers/gpu/drm/phytium/octopus/km/rgx_bvnc_defs_km.h new file mode 100644 index 000000000000..ff410b673788 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/km/rgx_bvnc_defs_km.h @@ -0,0 +1,262 @@ +/*************************************************************************/ /*! +@Title Hardware definition file rgx_bvnc_defs_km.h +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/****************************************************************************** + * Auto generated file by rgxbvnc_tablegen.py * + * This file should not be edited manually * + *****************************************************************************/ + +#ifndef RGX_BVNC_DEFS_KM_H +#define RGX_BVNC_DEFS_KM_H + +#include "img_types.h" +#include "img_defs.h" + +#if defined(RGX_BVNC_DEFS_UM_H) +#error "This file should not be included in conjunction with rgx_bvnc_defs_um.h" +#endif + +#define BVNC_FIELD_WIDTH (16U) + +#define PVR_ARCH_NAME "octopus" + + +/****************************************************************************** + * Mask and bit-position macros for features without values + *****************************************************************************/ + +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE_POS (0U) +#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) + +#define RGX_FEATURE_AXI_ACE_POS (1U) +#define RGX_FEATURE_AXI_ACE_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) + +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE_POS (2U) +#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000000004)) + +#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE_POS (3U) +#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000000008)) + +#define RGX_FEATURE_CLUSTER_GROUPING_POS (4U) +#define RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK (IMG_UINT64_C(0x0000000000000010)) + +#define RGX_FEATURE_COMPUTE_POS (5U) +#define RGX_FEATURE_COMPUTE_BIT_MASK (IMG_UINT64_C(0x0000000000000020)) + +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_POS (6U) +#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_BIT_MASK (IMG_UINT64_C(0x0000000000000040)) + +#define RGX_FEATURE_COMPUTE_OVERLAP_POS (7U) +#define RGX_FEATURE_COMPUTE_OVERLAP_BIT_MASK (IMG_UINT64_C(0x0000000000000080)) + +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_POS (8U) +#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_BIT_MASK (IMG_UINT64_C(0x0000000000000100)) + +#define RGX_FEATURE_COREID_PER_OS_POS (9U) +#define RGX_FEATURE_COREID_PER_OS_BIT_MASK (IMG_UINT64_C(0x0000000000000200)) + +#define RGX_FEATURE_DUST_POWER_ISLAND_S7_POS (10U) +#define RGX_FEATURE_DUST_POWER_ISLAND_S7_BIT_MASK (IMG_UINT64_C(0x0000000000000400)) + +#define RGX_FEATURE_FASTRENDER_DM_POS (11U) +#define RGX_FEATURE_FASTRENDER_DM_BIT_MASK (IMG_UINT64_C(0x0000000000000800)) + +#define RGX_FEATURE_GPU_CPU_COHERENCY_POS (12U) +#define RGX_FEATURE_GPU_CPU_COHERENCY_BIT_MASK (IMG_UINT64_C(0x0000000000001000)) + +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_POS (13U) +#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000002000)) + +#define RGX_FEATURE_GPU_VIRTUALISATION_POS (14U) +#define RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK (IMG_UINT64_C(0x0000000000004000)) + +#define RGX_FEATURE_GS_RTA_SUPPORT_POS (15U) +#define RGX_FEATURE_GS_RTA_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000008000)) + +#define RGX_FEATURE_HYPERVISOR_MMU_POS (16U) +#define RGX_FEATURE_HYPERVISOR_MMU_BIT_MASK (IMG_UINT64_C(0x0000000000010000)) + +#define RGX_FEATURE_META_DMA_POS (17U) +#define RGX_FEATURE_META_DMA_BIT_MASK (IMG_UINT64_C(0x0000000000020000)) + +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES_POS (18U) +#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES_BIT_MASK (IMG_UINT64_C(0x0000000000040000)) + +#define RGX_FEATURE_PBE_CHECKSUM_2D_POS (19U) +#define RGX_FEATURE_PBE_CHECKSUM_2D_BIT_MASK (IMG_UINT64_C(0x0000000000080000)) + +#define RGX_FEATURE_PBVNC_COREID_REG_POS (20U) +#define RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK (IMG_UINT64_C(0x0000000000100000)) + +#define RGX_FEATURE_PDS_TEMPSIZE8_POS (21U) +#define RGX_FEATURE_PDS_TEMPSIZE8_BIT_MASK (IMG_UINT64_C(0x0000000000200000)) + +#define RGX_FEATURE_PERFBUS_POS (22U) +#define RGX_FEATURE_PERFBUS_BIT_MASK (IMG_UINT64_C(0x0000000000400000)) + +#define RGX_FEATURE_PERF_COUNTER_BATCH_POS (23U) +#define RGX_FEATURE_PERF_COUNTER_BATCH_BIT_MASK (IMG_UINT64_C(0x0000000000800000)) + +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES_POS (24U) +#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES_BIT_MASK (IMG_UINT64_C(0x0000000001000000)) + +#define RGX_FEATURE_PM_MMUSTACK_POS (25U) +#define RGX_FEATURE_PM_MMUSTACK_BIT_MASK (IMG_UINT64_C(0x0000000002000000)) + +#define RGX_FEATURE_PM_MMU_VFP_POS (26U) +#define RGX_FEATURE_PM_MMU_VFP_BIT_MASK (IMG_UINT64_C(0x0000000004000000)) + +#define RGX_FEATURE_RISCV_FW_PROCESSOR_POS (27U) +#define RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK (IMG_UINT64_C(0x0000000008000000)) + +#define RGX_FEATURE_S7_CACHE_HIERARCHY_POS (28U) +#define RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0000000010000000)) + +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_POS (29U) +#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000020000000)) + +#define RGX_FEATURE_SCALABLE_VDM_GPP_POS (30U) +#define RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK (IMG_UINT64_C(0x0000000040000000)) + +#define RGX_FEATURE_SIGNAL_SNOOPING_POS (31U) +#define RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK (IMG_UINT64_C(0x0000000080000000)) + +#define RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_POS (32U) +#define RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK (IMG_UINT64_C(0x0000000100000000)) + +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT_POS (33U) +#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT_BIT_MASK (IMG_UINT64_C(0x0000000200000000)) + +#define RGX_FEATURE_SLC_VIVT_POS (34U) +#define RGX_FEATURE_SLC_VIVT_BIT_MASK (IMG_UINT64_C(0x0000000400000000)) + +#define RGX_FEATURE_SYS_BUS_SECURE_RESET_POS (35U) +#define RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK (IMG_UINT64_C(0x0000000800000000)) + +#define RGX_FEATURE_TDM_PDS_CHECKSUM_POS (36U) +#define RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK (IMG_UINT64_C(0x0000001000000000)) + +#define RGX_FEATURE_TESSELLATION_POS (37U) +#define RGX_FEATURE_TESSELLATION_BIT_MASK (IMG_UINT64_C(0x0000002000000000)) + +#define RGX_FEATURE_TILE_REGION_PROTECTION_POS (38U) +#define RGX_FEATURE_TILE_REGION_PROTECTION_BIT_MASK (IMG_UINT64_C(0x0000004000000000)) + +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_POS (39U) +#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0000008000000000)) + +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_POS (40U) +#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0000010000000000)) + +#define RGX_FEATURE_VDM_DRAWINDIRECT_POS (41U) +#define RGX_FEATURE_VDM_DRAWINDIRECT_BIT_MASK (IMG_UINT64_C(0x0000020000000000)) + +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_POS (42U) +#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK (IMG_UINT64_C(0x0000040000000000)) + +#define RGX_FEATURE_WATCHDOG_TIMER_POS (43U) +#define RGX_FEATURE_WATCHDOG_TIMER_BIT_MASK (IMG_UINT64_C(0x0000080000000000)) + +#define RGX_FEATURE_ZLS_CHECKSUM_POS (44U) +#define RGX_FEATURE_ZLS_CHECKSUM_BIT_MASK (IMG_UINT64_C(0x0000100000000000)) + + +/****************************************************************************** + * Features with values indexes + *****************************************************************************/ + +typedef enum _RGX_FEATURE_WITH_VALUE_INDEX_ { + RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_IDX, + RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_IDX, + RGX_FEATURE_ECC_RAMS_IDX, + RGX_FEATURE_FBCDC_IDX, + RGX_FEATURE_FBCDC_ALGORITHM_IDX, + RGX_FEATURE_FBCDC_ARCHITECTURE_IDX, + RGX_FEATURE_LAYOUT_MARS_IDX, + RGX_FEATURE_MAX_TPU_PER_SPU_IDX, + RGX_FEATURE_META_IDX, + RGX_FEATURE_META_COREMEM_BANKS_IDX, + RGX_FEATURE_META_COREMEM_SIZE_IDX, + RGX_FEATURE_META_DMA_CHANNEL_COUNT_IDX, + RGX_FEATURE_MMU_VERSION_IDX, + RGX_FEATURE_NUM_CLUSTERS_IDX, + RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX, + RGX_FEATURE_NUM_ISP_PER_SPU_IDX, + RGX_FEATURE_NUM_MEMBUS_IDX, + RGX_FEATURE_NUM_OSIDS_IDX, + RGX_FEATURE_NUM_SPU_IDX, + RGX_FEATURE_PBE_PER_SPU_IDX, + RGX_FEATURE_PHYS_BUS_WIDTH_IDX, + RGX_FEATURE_POWER_ISLAND_VERSION_IDX, + RGX_FEATURE_RAY_TRACING_ARCH_IDX, + RGX_FEATURE_RENDER_TARGET_XY_MAX_IDX, + RGX_FEATURE_SCALABLE_TE_ARCH_IDX, + RGX_FEATURE_SCALABLE_VCE_IDX, + RGX_FEATURE_SLC_BANKS_IDX, + RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_IDX, + RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_IDX, + RGX_FEATURE_TILE_SIZE_X_IDX, + RGX_FEATURE_TILE_SIZE_Y_IDX, + RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_IDX, + RGX_FEATURE_WITH_VALUES_MAX_IDX, +} RGX_FEATURE_WITH_VALUE_INDEX; + + +/****************************************************************************** + * Mask and bit-position macros for ERNs and BRNs + *****************************************************************************/ + +#define FIX_HW_BRN_66927_POS (0U) +#define FIX_HW_BRN_66927_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) + +#define FIX_HW_BRN_71157_POS (1U) +#define FIX_HW_BRN_71157_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) + +#define FIX_HW_BRN_71422_POS (2U) +#define FIX_HW_BRN_71422_BIT_MASK (IMG_UINT64_C(0x0000000000000004)) + +/* Macro used for padding the unavailable values for features with values */ +#define RGX_FEATURE_VALUE_INVALID (0xFFFFFFFEU) + +/* Macro used for marking a feature with value as disabled for a specific bvnc */ +#define RGX_FEATURE_VALUE_DISABLED (0xFFFFFFFFU) + +#endif /* RGX_BVNC_DEFS_KM_H */ diff --git a/drivers/gpu/drm/phytium/octopus/km/rgx_bvnc_table_km.h b/drivers/gpu/drm/phytium/octopus/km/rgx_bvnc_table_km.h new file mode 100644 index 000000000000..2bddbccc205b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/km/rgx_bvnc_table_km.h @@ -0,0 +1,429 @@ +/*************************************************************************/ /*! +@Title Hardware definition file rgx_bvnc_table_km.h +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/****************************************************************************** + * Auto generated file by rgxbvnc_tablegen.py * + * This file should not be edited manually * + *****************************************************************************/ + +#ifndef RGX_BVNC_TABLE_KM_H +#define RGX_BVNC_TABLE_KM_H + +#include "img_types.h" +#include "img_defs.h" +#include "rgxdefs_km.h" + +#ifndef RGXBVNC_C +#error "This file should only be included from rgxbvnc.c" +#endif + +#if defined(RGX_BVNC_TABLE_UM_H) +#error "This file should not be included in conjunction with rgx_bvnc_table_um.h" +#endif + + +/****************************************************************************** + * Defines and arrays for each feature with values used + * for handling the corresponding values + *****************************************************************************/ + +#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values[RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, }; + +#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_values[RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, }; + +#define RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX (4) +static const IMG_UINT16 aui16_RGX_FEATURE_ECC_RAMS_values[RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, 2, }; + +#define RGX_FEATURE_FBCDC_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_values[RGX_FEATURE_FBCDC_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, }; + +#define RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ALGORITHM_values[RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, }; + +#define RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values[RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 5, 6, }; + +#define RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_LAYOUT_MARS_values[RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, }; + +#define RGX_FEATURE_MAX_TPU_PER_SPU_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_MAX_TPU_PER_SPU_values[RGX_FEATURE_MAX_TPU_PER_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; + +#define RGX_FEATURE_META_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_META_values[RGX_FEATURE_META_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, MTP219, }; + +#define RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_BANKS_values[RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 8, }; + +#define RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_SIZE_values[RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 96, }; + +#define RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values[RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 4, }; + +#define RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_MMU_VERSION_values[RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, }; + +#define RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX (6) +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_CLUSTERS_values[RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, 6, 8, }; + +#define RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX (6) +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values[RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 4, 8, 16, 24, 32, }; + +#define RGX_FEATURE_NUM_ISP_PER_SPU_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_PER_SPU_values[RGX_FEATURE_NUM_ISP_PER_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; + +#define RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX (5) +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_MEMBUS_values[RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, }; + +#define RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_OSIDS_values[RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 8, }; + +#define RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX (5) +static const IMG_UINT16 aui16_RGX_FEATURE_NUM_SPU_values[RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, }; + +#define RGX_FEATURE_PBE_PER_SPU_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_PBE_PER_SPU_values[RGX_FEATURE_PBE_PER_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; + +#define RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values[RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 40, }; + +#define RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_POWER_ISLAND_VERSION_values[RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; + +#define RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_RAY_TRACING_ARCH_values[RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 3, }; + +#define RGX_FEATURE_RENDER_TARGET_XY_MAX_MAX_VALUE_IDX (3) +static const IMG_UINT16 aui16_RGX_FEATURE_RENDER_TARGET_XY_MAX_values[RGX_FEATURE_RENDER_TARGET_XY_MAX_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 16384, 32768, }; + +#define RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX (4) +static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values[RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, }; + +#define RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX (5) +static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_VCE_values[RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, }; + +#define RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX (6) +static const IMG_UINT16 aui16_RGX_FEATURE_SLC_BANKS_values[RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, 6, 8, }; + +#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values[RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1024, }; + +#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX (7) +static const IMG_UINT16 aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values[RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 128, 256, 512, 1024, 1536, 2048, }; + +#define RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_TILE_SIZE_X_values[RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 32, }; + +#define RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_TILE_SIZE_Y_values[RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 32, }; + +#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX (2) +static const IMG_UINT16 aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values[RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 40, }; + + +/****************************************************************************** + * Table contains pointers to each feature value array for features that have + * values. + * Indexed using enum RGX_FEATURE_WITH_VALUE_INDEX from rgx_bvnc_defs_km.h + *****************************************************************************/ + +static const IMG_UINT16 * const gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX] = { + aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values, + aui16_RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_values, + aui16_RGX_FEATURE_ECC_RAMS_values, + aui16_RGX_FEATURE_FBCDC_values, + aui16_RGX_FEATURE_FBCDC_ALGORITHM_values, + aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values, + aui16_RGX_FEATURE_LAYOUT_MARS_values, + aui16_RGX_FEATURE_MAX_TPU_PER_SPU_values, + aui16_RGX_FEATURE_META_values, + aui16_RGX_FEATURE_META_COREMEM_BANKS_values, + aui16_RGX_FEATURE_META_COREMEM_SIZE_values, + aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values, + aui16_RGX_FEATURE_MMU_VERSION_values, + aui16_RGX_FEATURE_NUM_CLUSTERS_values, + aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values, + aui16_RGX_FEATURE_NUM_ISP_PER_SPU_values, + aui16_RGX_FEATURE_NUM_MEMBUS_values, + aui16_RGX_FEATURE_NUM_OSIDS_values, + aui16_RGX_FEATURE_NUM_SPU_values, + aui16_RGX_FEATURE_PBE_PER_SPU_values, + aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values, + aui16_RGX_FEATURE_POWER_ISLAND_VERSION_values, + aui16_RGX_FEATURE_RAY_TRACING_ARCH_values, + aui16_RGX_FEATURE_RENDER_TARGET_XY_MAX_values, + aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values, + aui16_RGX_FEATURE_SCALABLE_VCE_values, + aui16_RGX_FEATURE_SLC_BANKS_values, + aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values, + aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values, + aui16_RGX_FEATURE_TILE_SIZE_X_values, + aui16_RGX_FEATURE_TILE_SIZE_Y_values, + aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values, +}; + + +/****************************************************************************** + * Array containing the lengths of the arrays containing the values. + * Used for indexing the aui16__values defined upwards + *****************************************************************************/ + + +static const IMG_UINT16 gaFeaturesValuesMaxIndexes[] = { + RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX, + RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_MAX_VALUE_IDX, + RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX, + RGX_FEATURE_FBCDC_MAX_VALUE_IDX, + RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX, + RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX, + RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX, + RGX_FEATURE_MAX_TPU_PER_SPU_MAX_VALUE_IDX, + RGX_FEATURE_META_MAX_VALUE_IDX, + RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX, + RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX, + RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX, + RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX, + RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX, + RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX, + RGX_FEATURE_NUM_ISP_PER_SPU_MAX_VALUE_IDX, + RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX, + RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX, + RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX, + RGX_FEATURE_PBE_PER_SPU_MAX_VALUE_IDX, + RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX, + RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX, + RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX, + RGX_FEATURE_RENDER_TARGET_XY_MAX_MAX_VALUE_IDX, + RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX, + RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX, + RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX, + RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX, + RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX, + RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX, + RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX, + RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX, +}; + + +/****************************************************************************** + * Bit-positions for features with values + *****************************************************************************/ + +static const IMG_UINT16 aui16FeaturesWithValuesBitPositions[] = { + (0U), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_POS */ + (2U), /* RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_POS */ + (4U), /* RGX_FEATURE_ECC_RAMS_POS */ + (7U), /* RGX_FEATURE_FBCDC_POS */ + (9U), /* RGX_FEATURE_FBCDC_ALGORITHM_POS */ + (11U), /* RGX_FEATURE_FBCDC_ARCHITECTURE_POS */ + (13U), /* RGX_FEATURE_LAYOUT_MARS_POS */ + (15U), /* RGX_FEATURE_MAX_TPU_PER_SPU_POS */ + (17U), /* RGX_FEATURE_META_POS */ + (19U), /* RGX_FEATURE_META_COREMEM_BANKS_POS */ + (21U), /* RGX_FEATURE_META_COREMEM_SIZE_POS */ + (23U), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_POS */ + (25U), /* RGX_FEATURE_MMU_VERSION_POS */ + (27U), /* RGX_FEATURE_NUM_CLUSTERS_POS */ + (30U), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_POS */ + (33U), /* RGX_FEATURE_NUM_ISP_PER_SPU_POS */ + (35U), /* RGX_FEATURE_NUM_MEMBUS_POS */ + (38U), /* RGX_FEATURE_NUM_OSIDS_POS */ + (40U), /* RGX_FEATURE_NUM_SPU_POS */ + (43U), /* RGX_FEATURE_PBE_PER_SPU_POS */ + (45U), /* RGX_FEATURE_PHYS_BUS_WIDTH_POS */ + (47U), /* RGX_FEATURE_POWER_ISLAND_VERSION_POS */ + (49U), /* RGX_FEATURE_RAY_TRACING_ARCH_POS */ + (51U), /* RGX_FEATURE_RENDER_TARGET_XY_MAX_POS */ + (53U), /* RGX_FEATURE_SCALABLE_TE_ARCH_POS */ + (56U), /* RGX_FEATURE_SCALABLE_VCE_POS */ + (59U), /* RGX_FEATURE_SLC_BANKS_POS */ + (62U), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_POS */ + (64U), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_POS */ + (67U), /* RGX_FEATURE_TILE_SIZE_X_POS */ + (69U), /* RGX_FEATURE_TILE_SIZE_Y_POS */ + (71U), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_POS */ +}; + + +/****************************************************************************** + * Bit-masks for features with values + *****************************************************************************/ + +static const IMG_UINT64 aui64FeaturesWithValuesBitMasks[] = { + (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_BIT_MASK */ + (IMG_UINT64_C(0x000000000000000C)), /* RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000070)), /* RGX_FEATURE_ECC_RAMS_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000180)), /* RGX_FEATURE_FBCDC_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000600)), /* RGX_FEATURE_FBCDC_ALGORITHM_BIT_MASK */ + (IMG_UINT64_C(0x0000000000001800)), /* RGX_FEATURE_FBCDC_ARCHITECTURE_BIT_MASK */ + (IMG_UINT64_C(0x0000000000006000)), /* RGX_FEATURE_LAYOUT_MARS_BIT_MASK */ + (IMG_UINT64_C(0x0000000000018000)), /* RGX_FEATURE_MAX_TPU_PER_SPU_BIT_MASK */ + (IMG_UINT64_C(0x0000000000060000)), /* RGX_FEATURE_META_BIT_MASK */ + (IMG_UINT64_C(0x0000000000180000)), /* RGX_FEATURE_META_COREMEM_BANKS_BIT_MASK */ + (IMG_UINT64_C(0x0000000000600000)), /* RGX_FEATURE_META_COREMEM_SIZE_BIT_MASK */ + (IMG_UINT64_C(0x0000000001800000)), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_BIT_MASK */ + (IMG_UINT64_C(0x0000000006000000)), /* RGX_FEATURE_MMU_VERSION_BIT_MASK */ + (IMG_UINT64_C(0x0000000038000000)), /* RGX_FEATURE_NUM_CLUSTERS_BIT_MASK */ + (IMG_UINT64_C(0x00000001C0000000)), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_BIT_MASK */ + (IMG_UINT64_C(0x0000000600000000)), /* RGX_FEATURE_NUM_ISP_PER_SPU_BIT_MASK */ + (IMG_UINT64_C(0x0000003800000000)), /* RGX_FEATURE_NUM_MEMBUS_BIT_MASK */ + (IMG_UINT64_C(0x000000C000000000)), /* RGX_FEATURE_NUM_OSIDS_BIT_MASK */ + (IMG_UINT64_C(0x0000070000000000)), /* RGX_FEATURE_NUM_SPU_BIT_MASK */ + (IMG_UINT64_C(0x0000180000000000)), /* RGX_FEATURE_PBE_PER_SPU_BIT_MASK */ + (IMG_UINT64_C(0x0000600000000000)), /* RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK */ + (IMG_UINT64_C(0x0001800000000000)), /* RGX_FEATURE_POWER_ISLAND_VERSION_BIT_MASK */ + (IMG_UINT64_C(0x0006000000000000)), /* RGX_FEATURE_RAY_TRACING_ARCH_BIT_MASK */ + (IMG_UINT64_C(0x0018000000000000)), /* RGX_FEATURE_RENDER_TARGET_XY_MAX_BIT_MASK */ + (IMG_UINT64_C(0x00E0000000000000)), /* RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK */ + (IMG_UINT64_C(0x0700000000000000)), /* RGX_FEATURE_SCALABLE_VCE_BIT_MASK */ + (IMG_UINT64_C(0x3800000000000000)), /* RGX_FEATURE_SLC_BANKS_BIT_MASK */ + (IMG_UINT64_C(0xC000000000000000)), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000007)), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000018)), /* RGX_FEATURE_TILE_SIZE_X_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000060)), /* RGX_FEATURE_TILE_SIZE_Y_BIT_MASK */ + (IMG_UINT64_C(0x0000000000000180)), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_BIT_MASK */ +}; + + +/****************************************************************************** + * Table mapping bitmasks for features and features with values + *****************************************************************************/ + + +static const IMG_UINT64 gaFeatures[][4]= +{ + { IMG_UINT64_C(0x001b000000fe0002), IMG_UINT64_C(0x000017bcf1facff2), IMG_UINT64_C(0x492ab14c532ab516), IMG_UINT64_C(0x00000000000000a9) }, /* 27.0.254.2 */ + { IMG_UINT64_C(0x001e000001980065), IMG_UINT64_C(0x000017bef1fecff3), IMG_UINT64_C(0x492aa94a4d2ab51a), IMG_UINT64_C(0x00000000000000a9) }, /* 30.0.408.101 */ + { IMG_UINT64_C(0x001e000003300014), IMG_UINT64_C(0x000017bef1fecff3), IMG_UINT64_C(0x512aa94c952b351a), IMG_UINT64_C(0x00000000000000aa) }, /* 30.0.816.20 */ + { IMG_UINT64_C(0x001e000006600001), IMG_UINT64_C(0x000017bef1fecff3), IMG_UINT64_C(0x5a4aaa54dd2b351a), IMG_UINT64_C(0x00000000000000ab) }, /* 30.0.1632.1 */ + { IMG_UINT64_C(0x0023000001980065), IMG_UINT64_C(0x000017bef1fecff3), IMG_UINT64_C(0x492aa94a4d2ab51a), IMG_UINT64_C(0x00000000000000a9) }, /* 35.0.408.101 */ + { IMG_UINT64_C(0x0023000006600015), IMG_UINT64_C(0x000017bef1feeff7), IMG_UINT64_C(0x5a532a54dd2b351a), IMG_UINT64_C(0x00000000000000ab) }, /* 35.0.1632.21 */ + { IMG_UINT64_C(0x0023000006600017), IMG_UINT64_C(0x000017bef1feeff7), IMG_UINT64_C(0x5a532a54dd2b351a), IMG_UINT64_C(0x00000000000000ab) }, /* 35.0.1632.23 */ + { IMG_UINT64_C(0x0026000009900192), IMG_UINT64_C(0x000017bef1feefff), IMG_UINT64_C(0x6b552b65252b351a), IMG_UINT64_C(0x00000000000000ae) }, /* 38.0.2448.402 */ +}; + +/****************************************************************************** + * Table mapping bitmasks for ERNs/BRNs + *****************************************************************************/ + + +static const IMG_UINT64 gaErnsBrns[][2]= +{ + { IMG_UINT64_C(0x001b000500fe0002), IMG_UINT64_C(0x0000000000000004) }, /* 27.5.254.2 */ + { IMG_UINT64_C(0x001e000301980065), IMG_UINT64_C(0x0000000000000000) }, /* 30.3.408.101 */ + { IMG_UINT64_C(0x001e000303300014), IMG_UINT64_C(0x0000000000000000) }, /* 30.3.816.20 */ + { IMG_UINT64_C(0x001e000406600001), IMG_UINT64_C(0x0000000000000000) }, /* 30.4.1632.1 */ + { IMG_UINT64_C(0x001e000506600001), IMG_UINT64_C(0x0000000000000000) }, /* 30.5.1632.1 */ + { IMG_UINT64_C(0x0023000206600015), IMG_UINT64_C(0x0000000000000002) }, /* 35.2.1632.21 */ + { IMG_UINT64_C(0x0023000206600017), IMG_UINT64_C(0x0000000000000002) }, /* 35.2.1632.23 */ + { IMG_UINT64_C(0x0023000301980065), IMG_UINT64_C(0x0000000000000000) }, /* 35.3.408.101 */ + { IMG_UINT64_C(0x0023000306600017), IMG_UINT64_C(0x0000000000000000) }, /* 35.3.1632.23 */ + { IMG_UINT64_C(0x0023000406600017), IMG_UINT64_C(0x0000000000000000) }, /* 35.4.1632.23 */ + { IMG_UINT64_C(0x0026000209900192), IMG_UINT64_C(0x0000000000000000) }, /* 38.2.2448.402 */ +}; + +#if defined(DEBUG) + +#define FEATURE_NO_VALUES_NAMES_MAX_IDX (45) + +static const IMG_CHAR * const gaszFeaturesNoValuesNames[FEATURE_NO_VALUES_NAMES_MAX_IDX] = +{ + "ALBIORIX_TOP_INFRASTRUCTURE", + "AXI_ACE", + "BARREX_TOP_INFRASTRUCTURE", + "CATURIX_TOP_INFRASTRUCTURE", + "CLUSTER_GROUPING", + "COMPUTE", + "COMPUTE_MORTON_CAPABLE", + "COMPUTE_OVERLAP", + "COMPUTE_OVERLAP_WITH_BARRIERS", + "COREID_PER_OS", + "DUST_POWER_ISLAND_S7", + "FASTRENDER_DM", + "GPU_CPU_COHERENCY", + "GPU_MULTICORE_SUPPORT", + "GPU_VIRTUALISATION", + "GS_RTA_SUPPORT", + "HYPERVISOR_MMU", + "META_DMA", + "META_REGISTER_UNPACKED_ACCESSES", + "PBE_CHECKSUM_2D", + "PBVNC_COREID_REG", + "PDS_TEMPSIZE8", + "PERFBUS", + "PERF_COUNTER_BATCH", + "PM_BYTE_ALIGNED_BASE_ADDRESSES", + "PM_MMUSTACK", + "PM_MMU_VFP", + "RISCV_FW_PROCESSOR", + "S7_CACHE_HIERARCHY", + "S7_TOP_INFRASTRUCTURE", + "SCALABLE_VDM_GPP", + "SIGNAL_SNOOPING", + "SLC_FAULT_ACCESS_ADDR_PHYS", + "SLC_SIZE_ADJUSTMENT", + "SLC_VIVT", + "SYS_BUS_SECURE_RESET", + "TDM_PDS_CHECKSUM", + "TESSELLATION", + "TILE_REGION_PROTECTION", + "TPU_CEM_DATAMASTER_GLOBAL_REGISTERS", + "TPU_DM_GLOBAL_REGISTERS", + "VDM_DRAWINDIRECT", + "VDM_OBJECT_LEVEL_LLS", + "WATCHDOG_TIMER", + "ZLS_CHECKSUM", +}; + +#define ERNSBRNS_IDS_MAX_IDX (3) + +static const IMG_UINT32 gaui64ErnsBrnsIDs[ERNSBRNS_IDS_MAX_IDX] = +{ + 66927, + 71157, + 71422, +}; + +#endif /* defined(DEBUG) */ +#endif /* RGX_BVNC_TABLE_KM_H */ diff --git a/drivers/gpu/drm/phytium/octopus/km/rgx_cr_defs_km.h b/drivers/gpu/drm/phytium/octopus/km/rgx_cr_defs_km.h new file mode 100644 index 000000000000..cfe4653991ab --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/km/rgx_cr_defs_km.h @@ -0,0 +1,6710 @@ +/*************************************************************************/ /*! +@Title Hardware definition file rgx_cr_defs_km.h +@Brief The file contains auto-generated hardware definitions without + BVNC-specific compile time conditionals. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* **** Autogenerated C -- do not edit **** */ + +/* + */ + + +#ifndef RGX_CR_DEFS_KM_H +#define RGX_CR_DEFS_KM_H + +#if !defined(IMG_EXPLICIT_INCLUDE_HWDEFS) +#error This file may only be included if explicitly defined +#endif + +#include "img_types.h" +#include "img_defs.h" + + +#define RGX_CR_DEFS_KM_REVISION 144 + +/* + Register RGX_CR_USC_INDIRECT +*/ +#define RGX_CR_USC_INDIRECT (0x8000U) +#define RGX_CR_USC_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_USC_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_USC_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) + + +/* + Register RGX_CR_MERCER_INDIRECT +*/ +#define RGX_CR_MERCER_INDIRECT (0x8238U) +#define RGX_CR_MERCER_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_MERCER_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_MERCER_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) + + +/* + Register RGX_CR_PBE_INDIRECT +*/ +#define RGX_CR_PBE_INDIRECT (0x83E0U) +#define RGX_CR_PBE_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_PBE_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_PBE_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) + + +/* + Register RGX_CR_PBE_SHARED_INDIRECT +*/ +#define RGX_CR_PBE_SHARED_INDIRECT (0x8388U) +#define RGX_CR_PBE_SHARED_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_PBE_SHARED_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_PBE_SHARED_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFE0U) + + +/* + Register RGX_CR_ISP_INDIRECT +*/ +#define RGX_CR_ISP_INDIRECT (0x83F8U) +#define RGX_CR_ISP_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_ISP_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_ISP_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) + + +/* + Register RGX_CR_TPU_INDIRECT +*/ +#define RGX_CR_TPU_INDIRECT (0x83E8U) +#define RGX_CR_TPU_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_TPU_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_TPU_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) + + +/* + Register RGX_CR_SWIFT_INDIRECT +*/ +#define RGX_CR_SWIFT_INDIRECT (0x8308U) +#define RGX_CR_SWIFT_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F)) +#define RGX_CR_SWIFT_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_SWIFT_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U) + + +/* + Register RGX_CR_TEXAS_INDIRECT +*/ +#define RGX_CR_TEXAS_INDIRECT (0x8390U) +#define RGX_CR_TEXAS_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_TEXAS_INDIRECT_ADDRESS_SHIFT (0U) +#define RGX_CR_TEXAS_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFE0U) + + +/* + Register RGX_CR_CLK_CTRL0 +*/ +#define RGX_CR_CLK_CTRL0 (0x0000U) +#define RGX_CR_CLK_CTRL0_MASKFULL (IMG_UINT64_C(0xFFCF03000F3F3303)) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS_SHIFT (62U) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS_ON (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_CLK_CTRL0_BIF_TEXAS_AUTO (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_CLK_CTRL0_FBCACHE_SHIFT (60U) +#define RGX_CR_CLK_CTRL0_FBCACHE_CLRMSK (IMG_UINT64_C(0xCFFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_FBCACHE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_FBCACHE_ON (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_CLK_CTRL0_FBCACHE_AUTO (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_CLK_CTRL0_FBC_SHIFT (58U) +#define RGX_CR_CLK_CTRL0_FBC_CLRMSK (IMG_UINT64_C(0xF3FFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_FBC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_FBC_ON (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_CLK_CTRL0_FBC_AUTO (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_CLK_CTRL0_FBDC_SHIFT (56U) +#define RGX_CR_CLK_CTRL0_FBDC_CLRMSK (IMG_UINT64_C(0xFCFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_FBDC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_FBDC_ON (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_CLK_CTRL0_FBDC_AUTO (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_CLK_CTRL0_FBM_SHIFT (54U) +#define RGX_CR_CLK_CTRL0_FBM_CLRMSK (IMG_UINT64_C(0xFF3FFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_FBM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_FBM_ON (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_CLK_CTRL0_FBM_AUTO (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_CLK_CTRL0_PBE_SHIFT (50U) +#define RGX_CR_CLK_CTRL0_PBE_CLRMSK (IMG_UINT64_C(0xFFF3FFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_PBE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_PBE_ON (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_CLK_CTRL0_PBE_AUTO (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L1_SHIFT (48U) +#define RGX_CR_CLK_CTRL0_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_MCU_L1_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L1_ON (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L1_AUTO (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_CLK_CTRL0_BIF_SHIFT (40U) +#define RGX_CR_CLK_CTRL0_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL0_BIF_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_BIF_ON (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_CLK_CTRL0_BIF_AUTO (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L0_SHIFT (26U) +#define RGX_CR_CLK_CTRL0_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) +#define RGX_CR_CLK_CTRL0_MCU_L0_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_MCU_L0_ON (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_CTRL0_MCU_L0_AUTO (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_CTRL0_TPU_SHIFT (24U) +#define RGX_CR_CLK_CTRL0_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) +#define RGX_CR_CLK_CTRL0_TPU_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_TPU_ON (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_CTRL0_TPU_AUTO (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_CTRL0_USC_SHIFT (20U) +#define RGX_CR_CLK_CTRL0_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) +#define RGX_CR_CLK_CTRL0_USC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_USC_ON (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_CTRL0_USC_AUTO (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_CTRL0_SLC_BANK_SHIFT (18U) +#define RGX_CR_CLK_CTRL0_SLC_BANK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) +#define RGX_CR_CLK_CTRL0_SLC_BANK_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_SLC_BANK_ON (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_CLK_CTRL0_SLC_BANK_AUTO (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_CLK_CTRL0_SLC_SHIFT (16U) +#define RGX_CR_CLK_CTRL0_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) +#define RGX_CR_CLK_CTRL0_SLC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_SLC_ON (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_CLK_CTRL0_SLC_AUTO (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_CLK_CTRL0_PDS_SHIFT (12U) +#define RGX_CR_CLK_CTRL0_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) +#define RGX_CR_CLK_CTRL0_PDS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_PDS_ON (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_CTRL0_PDS_AUTO (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_CTRL0_PM_SHIFT (8U) +#define RGX_CR_CLK_CTRL0_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_CR_CLK_CTRL0_PM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_PM_ON (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_CTRL0_PM_AUTO (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_CTRL0_ISP_SHIFT (0U) +#define RGX_CR_CLK_CTRL0_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_CLK_CTRL0_ISP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL0_ISP_ON (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_CTRL0_ISP_AUTO (IMG_UINT64_C(0x0000000000000002)) + + +/* + Register RGX_CR_CLK_STATUS0 +*/ +#define RGX_CR_CLK_STATUS0 (0x0008U) +#define RGX_CR_CLK_STATUS0_MASKFULL (IMG_UINT64_C(0x00000001BF101751)) +#define RGX_CR_CLK_STATUS0_MCU_L0_SHIFT (32U) +#define RGX_CR_CLK_STATUS0_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_CLK_STATUS0_MCU_L0_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_MCU_L0_RUNNING (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CLK_STATUS0_BIF_TEXAS_SHIFT (31U) +#define RGX_CR_CLK_STATUS0_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_CLK_STATUS0_BIF_TEXAS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_BIF_TEXAS_RUNNING (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CLK_STATUS0_FBCACHE_SHIFT (29U) +#define RGX_CR_CLK_STATUS0_FBCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_CLK_STATUS0_FBCACHE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_FBCACHE_RUNNING (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_STATUS0_FBC_SHIFT (28U) +#define RGX_CR_CLK_STATUS0_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_CLK_STATUS0_FBC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_FBC_RUNNING (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_STATUS0_FBDC_SHIFT (27U) +#define RGX_CR_CLK_STATUS0_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_CLK_STATUS0_FBDC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_FBDC_RUNNING (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_STATUS0_FBM_SHIFT (26U) +#define RGX_CR_CLK_STATUS0_FBM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_CLK_STATUS0_FBM_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_FBM_RUNNING (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_STATUS0_PBE_SHIFT (25U) +#define RGX_CR_CLK_STATUS0_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_CLK_STATUS0_PBE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_PBE_RUNNING (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_STATUS0_MCU_L1_SHIFT (24U) +#define RGX_CR_CLK_STATUS0_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_CLK_STATUS0_MCU_L1_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_MCU_L1_RUNNING (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_STATUS0_BIF_SHIFT (20U) +#define RGX_CR_CLK_STATUS0_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_CLK_STATUS0_BIF_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_BIF_RUNNING (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_STATUS0_TPU_SHIFT (12U) +#define RGX_CR_CLK_STATUS0_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_CLK_STATUS0_TPU_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_TPU_RUNNING (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_STATUS0_USC_SHIFT (10U) +#define RGX_CR_CLK_STATUS0_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_CLK_STATUS0_USC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_USC_RUNNING (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_STATUS0_SLC_BANK_SHIFT (9U) +#define RGX_CR_CLK_STATUS0_SLC_BANK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_CLK_STATUS0_SLC_BANK_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_SLC_BANK_RUNNING (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_STATUS0_SLC_SHIFT (8U) +#define RGX_CR_CLK_STATUS0_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_CLK_STATUS0_SLC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_SLC_RUNNING (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_STATUS0_PDS_SHIFT (6U) +#define RGX_CR_CLK_STATUS0_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_CLK_STATUS0_PDS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_PDS_RUNNING (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_STATUS0_PM_SHIFT (4U) +#define RGX_CR_CLK_STATUS0_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_CLK_STATUS0_PM_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_PM_RUNNING (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_STATUS0_ISP_SHIFT (0U) +#define RGX_CR_CLK_STATUS0_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_CLK_STATUS0_ISP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS0_ISP_RUNNING (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_CORE_ID +*/ +#define RGX_CR_CORE_ID (0x0020U) +#define RGX_CR_CORE_ID_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_CORE_ID_BRANCH_ID_SHIFT (48U) +#define RGX_CR_CORE_ID_BRANCH_ID_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_CORE_ID_VERSION_ID_SHIFT (32U) +#define RGX_CR_CORE_ID_VERSION_ID_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) +#define RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT (16U) +#define RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) +#define RGX_CR_CORE_ID_CONFIG_ID_SHIFT (0U) +#define RGX_CR_CORE_ID_CONFIG_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) + + +/* + Register RGX_CR_SPU_ENABLE +*/ +#define RGX_CR_SPU_ENABLE (0x0050U) +#define RGX_CR_SPU_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SPU_ENABLE_ENABLE_SHIFT (0U) +#define RGX_CR_SPU_ENABLE_ENABLE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_SOC_TIMER_GRAY +*/ +#define RGX_CR_SOC_TIMER_GRAY (0x00E0U) +#define RGX_CR_SOC_TIMER_GRAY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SOC_TIMER_GRAY_VALUE_SHIFT (0U) +#define RGX_CR_SOC_TIMER_GRAY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SOC_TIMER_BINARY +*/ +#define RGX_CR_SOC_TIMER_BINARY (0x00E8U) +#define RGX_CR_SOC_TIMER_BINARY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SOC_TIMER_BINARY_VALUE_SHIFT (0U) +#define RGX_CR_SOC_TIMER_BINARY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_CLK_CTRL1 +*/ +#define RGX_CR_CLK_CTRL1 (0x0080U) +#define RGX_CR_CLK_CTRL1_MASKFULL (IMG_UINT64_C(0xFFFC3F3FFFCFFFFF)) +#define RGX_CR_CLK_CTRL1_BSC_SHIFT (62U) +#define RGX_CR_CLK_CTRL1_BSC_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_BSC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_BSC_ON (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_CLK_CTRL1_BSC_AUTO (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_SHIFT (60U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_CLRMSK (IMG_UINT64_C(0xCFFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_ON (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_AUTO (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_SHIFT (58U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_CLRMSK (IMG_UINT64_C(0xF3FFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_ON (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_AUTO (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_SHIFT (56U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_CLRMSK (IMG_UINT64_C(0xFCFFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_ON (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_AUTO (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_SHIFT (54U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_CLRMSK (IMG_UINT64_C(0xFF3FFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_ON (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_AUTO (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_CLK_CTRL1_PSB_SHIFT (52U) +#define RGX_CR_CLK_CTRL1_PSB_CLRMSK (IMG_UINT64_C(0xFFCFFFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_PSB_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_PSB_ON (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_CLK_CTRL1_PSB_AUTO (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_SHIFT (50U) +#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_CLRMSK (IMG_UINT64_C(0xFFF3FFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_ON (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_AUTO (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_CLK_CTRL1_CDM_PIPE_SHIFT (44U) +#define RGX_CR_CLK_CTRL1_CDM_PIPE_CLRMSK (IMG_UINT64_C(0xFFFFCFFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_CDM_PIPE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_CDM_PIPE_ON (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_CLK_CTRL1_CDM_PIPE_AUTO (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_SHIFT (42U) +#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_CLRMSK (IMG_UINT64_C(0xFFFFF3FFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_ON (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_AUTO (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_CLK_CTRL1_TCU_L1_SHIFT (40U) +#define RGX_CR_CLK_CTRL1_TCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_TCU_L1_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_TCU_L1_ON (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_CLK_CTRL1_TCU_L1_AUTO (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_CLK_CTRL1_TDM_SHIFT (36U) +#define RGX_CR_CLK_CTRL1_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_TDM_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_TDM_ON (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_CLK_CTRL1_TDM_AUTO (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_CLK_CTRL1_ASTC_SHIFT (34U) +#define RGX_CR_CLK_CTRL1_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFF3FFFFFFFF)) +#define RGX_CR_CLK_CTRL1_ASTC_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_ASTC_ON (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_CLK_CTRL1_ASTC_AUTO (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_CLK_CTRL1_IPF_SHIFT (32U) +#define RGX_CR_CLK_CTRL1_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFCFFFFFFFF)) +#define RGX_CR_CLK_CTRL1_IPF_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_IPF_ON (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CLK_CTRL1_IPF_AUTO (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_CLK_CTRL1_COMPUTE_SHIFT (30U) +#define RGX_CR_CLK_CTRL1_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF3FFFFFFF)) +#define RGX_CR_CLK_CTRL1_COMPUTE_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_COMPUTE_ON (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_CLK_CTRL1_COMPUTE_AUTO (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CLK_CTRL1_PIXEL_SHIFT (28U) +#define RGX_CR_CLK_CTRL1_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF)) +#define RGX_CR_CLK_CTRL1_PIXEL_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_PIXEL_ON (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_CTRL1_PIXEL_AUTO (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_CTRL1_VERTEX_SHIFT (26U) +#define RGX_CR_CLK_CTRL1_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) +#define RGX_CR_CLK_CTRL1_VERTEX_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_VERTEX_ON (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_CTRL1_VERTEX_AUTO (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_CTRL1_TPF_SHIFT (24U) +#define RGX_CR_CLK_CTRL1_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) +#define RGX_CR_CLK_CTRL1_TPF_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_TPF_ON (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_CTRL1_TPF_AUTO (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_CTRL1_GEO_VERTEX_SHIFT (22U) +#define RGX_CR_CLK_CTRL1_GEO_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF3FFFFF)) +#define RGX_CR_CLK_CTRL1_GEO_VERTEX_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_GEO_VERTEX_ON (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CLK_CTRL1_GEO_VERTEX_AUTO (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_CLK_CTRL1_GEO_SHARED_SHIFT (18U) +#define RGX_CR_CLK_CTRL1_GEO_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) +#define RGX_CR_CLK_CTRL1_GEO_SHARED_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_GEO_SHARED_ON (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_CLK_CTRL1_GEO_SHARED_AUTO (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_CLK_CTRL1_GEO_TESS_SHIFT (16U) +#define RGX_CR_CLK_CTRL1_GEO_TESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) +#define RGX_CR_CLK_CTRL1_GEO_TESS_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_GEO_TESS_ON (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_CLK_CTRL1_GEO_TESS_AUTO (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_SHIFT (14U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_ON (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_AUTO (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_SHIFT (12U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_ON (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_AUTO (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_SHIFT (10U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_ON (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_AUTO (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_SHIFT (8U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_ON (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_AUTO (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_SHIFT (6U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF3F)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_ON (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_AUTO (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_SHIFT (4U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_ON (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_AUTO (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_SHIFT (2U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF3)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_ON (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_AUTO (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_SHIFT (0U) +#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_OFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_ON (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_AUTO (IMG_UINT64_C(0x0000000000000002)) + + +/* + Register RGX_CR_CLK_STATUS1 +*/ +#define RGX_CR_CLK_STATUS1 (0x0088U) +#define RGX_CR_CLK_STATUS1_MASKFULL (IMG_UINT64_C(0x00000000FFFE77FB)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_SAP_SHIFT (31U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_SAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_SAP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_SAP_RUNNING (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_PAP_CMN_SHIFT (30U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_PAP_CMN_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_PAP_CMN_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_PAP_CMN_RUNNING (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_TPX_SHIFT (29U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_TPX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_TPX_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_TPX_RUNNING (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_EMIPSB_SHIFT (28U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_EMIPSB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_EMIPSB_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_EMIPSB_RUNNING (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_CLK_STATUS1_BSC_SHIFT (27U) +#define RGX_CR_CLK_STATUS1_BSC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_CLK_STATUS1_BSC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_BSC_RUNNING (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_CLK_STATUS1_PSB_SHIFT (26U) +#define RGX_CR_CLK_STATUS1_PSB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_CLK_STATUS1_PSB_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_PSB_RUNNING (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_CLK_STATUS1_TPU_USC_SELECT_SHIFT (25U) +#define RGX_CR_CLK_STATUS1_TPU_USC_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_CLK_STATUS1_TPU_USC_SELECT_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_TPU_USC_SELECT_RUNNING (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_SMP_SHIFT (24U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_SMP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_SMP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_SMP_RUNNING (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_DMA_SHIFT (23U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_DMA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_DMA_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_DMA_RUNNING (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_TAP_SHIFT (22U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_TAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_TAP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_TAP_RUNNING (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_AP_SHIFT (21U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_AP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_AP_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_AP_RUNNING (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_EMI_SHIFT (20U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_EMI_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_EMI_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_EMI_RUNNING (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_ITR_SHIFT (19U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_ITR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_ITR_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_ITR_RUNNING (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_CPX_SHIFT (18U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_CPX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_CPX_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_CPX_RUNNING (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_MOV_SHIFT (17U) +#define RGX_CR_CLK_STATUS1_USC_PIPE_MOV_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_MOV_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_PIPE_MOV_RUNNING (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_CLK_STATUS1_CDM_PIPE_SHIFT (14U) +#define RGX_CR_CLK_STATUS1_CDM_PIPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_CLK_STATUS1_CDM_PIPE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_CDM_PIPE_RUNNING (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_CLK_STATUS1_TCU_L1_SHIFT (13U) +#define RGX_CR_CLK_STATUS1_TCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_CLK_STATUS1_TCU_L1_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_TCU_L1_RUNNING (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_CLK_STATUS1_USC_L2ICACHE_SHIFT (12U) +#define RGX_CR_CLK_STATUS1_USC_L2ICACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_CLK_STATUS1_USC_L2ICACHE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_USC_L2ICACHE_RUNNING (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_CLK_STATUS1_TDM_SHIFT (10U) +#define RGX_CR_CLK_STATUS1_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_CLK_STATUS1_TDM_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_TDM_RUNNING (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_CLK_STATUS1_IPF_SHIFT (9U) +#define RGX_CR_CLK_STATUS1_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_CLK_STATUS1_IPF_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_IPF_RUNNING (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_CLK_STATUS1_COMPUTE_SHIFT (8U) +#define RGX_CR_CLK_STATUS1_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_CLK_STATUS1_COMPUTE_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_COMPUTE_RUNNING (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_CLK_STATUS1_ASTC_SHIFT (7U) +#define RGX_CR_CLK_STATUS1_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_CLK_STATUS1_ASTC_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_ASTC_RUNNING (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_CLK_STATUS1_PIXEL_SHIFT (6U) +#define RGX_CR_CLK_STATUS1_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_CLK_STATUS1_PIXEL_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_PIXEL_RUNNING (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_CLK_STATUS1_VERTEX_SHIFT (5U) +#define RGX_CR_CLK_STATUS1_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_CLK_STATUS1_VERTEX_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_VERTEX_RUNNING (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_CLK_STATUS1_TPF_SHIFT (4U) +#define RGX_CR_CLK_STATUS1_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_CLK_STATUS1_TPF_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_TPF_RUNNING (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_CLK_STATUS1_GEO_SHARED_SHIFT (3U) +#define RGX_CR_CLK_STATUS1_GEO_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_CLK_STATUS1_GEO_SHARED_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_GEO_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_CLK_STATUS1_GEO_VERTEX_SHIFT (1U) +#define RGX_CR_CLK_STATUS1_GEO_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_CLK_STATUS1_GEO_VERTEX_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_GEO_VERTEX_RUNNING (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_CLK_STATUS1_GEO_TESS_SHIFT (0U) +#define RGX_CR_CLK_STATUS1_GEO_TESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_CLK_STATUS1_GEO_TESS_GATED (IMG_UINT64_C(0x0000000000000000)) +#define RGX_CR_CLK_STATUS1_GEO_TESS_RUNNING (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_SOFT_RESET +*/ +#define RGX_CR_SOFT_RESET (0x0100U) +#define RGX_CR_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x03FFFFE0000BDEFF)) +#define RGX_CR_SOFT_RESET_RAC_SHIFT (57U) +#define RGX_CR_SOFT_RESET_RAC_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_RAC_EN (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_SOFT_RESET_GEO_TESS_SHIFT (56U) +#define RGX_CR_SOFT_RESET_GEO_TESS_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_GEO_TESS_EN (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_SOFT_RESET_INT_SHIFT (55U) +#define RGX_CR_SOFT_RESET_INT_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_INT_EN (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_SOFT_RESET_FP_SHIFT (54U) +#define RGX_CR_SOFT_RESET_FP_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FP_EN (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_SOFT_RESET_YUV_SHIFT (53U) +#define RGX_CR_SOFT_RESET_YUV_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_YUV_EN (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_SOFT_RESET_PSB_SHIFT (52U) +#define RGX_CR_SOFT_RESET_PSB_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_PSB_EN (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_SOFT_RESET_ASC_SHIFT (51U) +#define RGX_CR_SOFT_RESET_ASC_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_ASC_EN (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_SOFT_RESET_RCE_SHIFT (50U) +#define RGX_CR_SOFT_RESET_RCE_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_RCE_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_SOFT_RESET_BSC_SHIFT (49U) +#define RGX_CR_SOFT_RESET_BSC_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BSC_EN (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_SOFT_RESET_TPU_USC_SELECT_SHIFT (48U) +#define RGX_CR_SOFT_RESET_TPU_USC_SELECT_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_TPU_USC_SELECT_EN (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_SOFT_RESET_USC_L2ICACHE_SHIFT (47U) +#define RGX_CR_SOFT_RESET_USC_L2ICACHE_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_USC_L2ICACHE_EN (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_SOFT_RESET_TCU_L1_SHIFT (46U) +#define RGX_CR_SOFT_RESET_TCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_TCU_L1_EN (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_SOFT_RESET_BIF_TEXAS_SHIFT (45U) +#define RGX_CR_SOFT_RESET_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BIF_TEXAS_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_SOFT_RESET_BIF_JONES_SHIFT (44U) +#define RGX_CR_SOFT_RESET_BIF_JONES_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_BIF_JONES_EN (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_SOFT_RESET_SLC_SHIFT (43U) +#define RGX_CR_SOFT_RESET_SLC_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_SLC_EN (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_SOFT_RESET_FBCACHE_SHIFT (42U) +#define RGX_CR_SOFT_RESET_FBCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FBCACHE_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_SOFT_RESET_FBM_SHIFT (41U) +#define RGX_CR_SOFT_RESET_FBM_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FBM_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_SOFT_RESET_FBDC_SHIFT (40U) +#define RGX_CR_SOFT_RESET_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FBDC_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_SOFT_RESET_FBC_SHIFT (39U) +#define RGX_CR_SOFT_RESET_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) +#define RGX_CR_SOFT_RESET_FBC_EN (IMG_UINT64_C(0x0000008000000000)) +#define RGX_CR_SOFT_RESET_PM_SHIFT (38U) +#define RGX_CR_SOFT_RESET_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_PM_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_SOFT_RESET_GARTEN_SHIFT (37U) +#define RGX_CR_SOFT_RESET_GARTEN_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_SOFT_RESET_GARTEN_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_SOFT_RESET_PBE_SHIFT (19U) +#define RGX_CR_SOFT_RESET_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_SOFT_RESET_PBE_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_SOFT_RESET_MCU_L1_SHIFT (17U) +#define RGX_CR_SOFT_RESET_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_SOFT_RESET_MCU_L1_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_SOFT_RESET_CDM_PIPE_SHIFT (16U) +#define RGX_CR_SOFT_RESET_CDM_PIPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_SOFT_RESET_CDM_PIPE_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_SOFT_RESET_TDM_SHIFT (15U) +#define RGX_CR_SOFT_RESET_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_SOFT_RESET_TDM_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_SOFT_RESET_ASTC_SHIFT (14U) +#define RGX_CR_SOFT_RESET_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_SOFT_RESET_ASTC_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_SOFT_RESET_PDS_SHIFT (12U) +#define RGX_CR_SOFT_RESET_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_SOFT_RESET_PDS_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_SOFT_RESET_ISP_SHIFT (11U) +#define RGX_CR_SOFT_RESET_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_SOFT_RESET_ISP_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_SOFT_RESET_TPF_SHIFT (10U) +#define RGX_CR_SOFT_RESET_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_SOFT_RESET_TPF_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_SOFT_RESET_IPF_SHIFT (9U) +#define RGX_CR_SOFT_RESET_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_SOFT_RESET_IPF_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_SOFT_RESET_GEO_SHARED_SHIFT (7U) +#define RGX_CR_SOFT_RESET_GEO_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_SOFT_RESET_GEO_SHARED_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_SOFT_RESET_GEO_VERTEX_SHIFT (6U) +#define RGX_CR_SOFT_RESET_GEO_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_SOFT_RESET_GEO_VERTEX_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_SOFT_RESET_PIXEL_SHIFT (5U) +#define RGX_CR_SOFT_RESET_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_SOFT_RESET_PIXEL_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_SOFT_RESET_COMPUTE_SHIFT (4U) +#define RGX_CR_SOFT_RESET_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_SOFT_RESET_COMPUTE_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_SOFT_RESET_MCU_L0_SHIFT (3U) +#define RGX_CR_SOFT_RESET_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_SOFT_RESET_MCU_L0_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_SOFT_RESET_TPU_SHIFT (2U) +#define RGX_CR_SOFT_RESET_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_SOFT_RESET_TPU_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_SOFT_RESET_VERTEX_SHIFT (1U) +#define RGX_CR_SOFT_RESET_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_SOFT_RESET_VERTEX_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_SOFT_RESET_USC_SHIFT (0U) +#define RGX_CR_SOFT_RESET_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_SOFT_RESET_USC_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_SOFT_RESET_SPU +*/ +#define RGX_CR_SOFT_RESET_SPU (0x0108U) +#define RGX_CR_SOFT_RESET_SPU_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SOFT_RESET_SPU_SPU31_SHIFT (31U) +#define RGX_CR_SOFT_RESET_SPU_SPU31_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU31_EN (0x80000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU30_SHIFT (30U) +#define RGX_CR_SOFT_RESET_SPU_SPU30_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU30_EN (0x40000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU29_SHIFT (29U) +#define RGX_CR_SOFT_RESET_SPU_SPU29_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU29_EN (0x20000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU28_SHIFT (28U) +#define RGX_CR_SOFT_RESET_SPU_SPU28_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU28_EN (0x10000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU27_SHIFT (27U) +#define RGX_CR_SOFT_RESET_SPU_SPU27_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU27_EN (0x08000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU26_SHIFT (26U) +#define RGX_CR_SOFT_RESET_SPU_SPU26_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU26_EN (0x04000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU25_SHIFT (25U) +#define RGX_CR_SOFT_RESET_SPU_SPU25_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU25_EN (0x02000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU24_SHIFT (24U) +#define RGX_CR_SOFT_RESET_SPU_SPU24_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU24_EN (0x01000000U) +#define RGX_CR_SOFT_RESET_SPU_SPU23_SHIFT (23U) +#define RGX_CR_SOFT_RESET_SPU_SPU23_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU23_EN (0x00800000U) +#define RGX_CR_SOFT_RESET_SPU_SPU22_SHIFT (22U) +#define RGX_CR_SOFT_RESET_SPU_SPU22_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU22_EN (0x00400000U) +#define RGX_CR_SOFT_RESET_SPU_SPU21_SHIFT (21U) +#define RGX_CR_SOFT_RESET_SPU_SPU21_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU21_EN (0x00200000U) +#define RGX_CR_SOFT_RESET_SPU_SPU20_SHIFT (20U) +#define RGX_CR_SOFT_RESET_SPU_SPU20_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU20_EN (0x00100000U) +#define RGX_CR_SOFT_RESET_SPU_SPU19_SHIFT (19U) +#define RGX_CR_SOFT_RESET_SPU_SPU19_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU19_EN (0x00080000U) +#define RGX_CR_SOFT_RESET_SPU_SPU18_SHIFT (18U) +#define RGX_CR_SOFT_RESET_SPU_SPU18_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU18_EN (0x00040000U) +#define RGX_CR_SOFT_RESET_SPU_SPU17_SHIFT (17U) +#define RGX_CR_SOFT_RESET_SPU_SPU17_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU17_EN (0x00020000U) +#define RGX_CR_SOFT_RESET_SPU_SPU16_SHIFT (16U) +#define RGX_CR_SOFT_RESET_SPU_SPU16_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU16_EN (0x00010000U) +#define RGX_CR_SOFT_RESET_SPU_SPU15_SHIFT (15U) +#define RGX_CR_SOFT_RESET_SPU_SPU15_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU15_EN (0x00008000U) +#define RGX_CR_SOFT_RESET_SPU_SPU14_SHIFT (14U) +#define RGX_CR_SOFT_RESET_SPU_SPU14_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU14_EN (0x00004000U) +#define RGX_CR_SOFT_RESET_SPU_SPU13_SHIFT (13U) +#define RGX_CR_SOFT_RESET_SPU_SPU13_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU13_EN (0x00002000U) +#define RGX_CR_SOFT_RESET_SPU_SPU12_SHIFT (12U) +#define RGX_CR_SOFT_RESET_SPU_SPU12_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU12_EN (0x00001000U) +#define RGX_CR_SOFT_RESET_SPU_SPU11_SHIFT (11U) +#define RGX_CR_SOFT_RESET_SPU_SPU11_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_SOFT_RESET_SPU_SPU11_EN (0x00000800U) +#define RGX_CR_SOFT_RESET_SPU_SPU10_SHIFT (10U) +#define RGX_CR_SOFT_RESET_SPU_SPU10_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU10_EN (0x00000400U) +#define RGX_CR_SOFT_RESET_SPU_SPU9_SHIFT (9U) +#define RGX_CR_SOFT_RESET_SPU_SPU9_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU9_EN (0x00000200U) +#define RGX_CR_SOFT_RESET_SPU_SPU8_SHIFT (8U) +#define RGX_CR_SOFT_RESET_SPU_SPU8_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_SOFT_RESET_SPU_SPU8_EN (0x00000100U) +#define RGX_CR_SOFT_RESET_SPU_SPU7_SHIFT (7U) +#define RGX_CR_SOFT_RESET_SPU_SPU7_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_SOFT_RESET_SPU_SPU7_EN (0x00000080U) +#define RGX_CR_SOFT_RESET_SPU_SPU6_SHIFT (6U) +#define RGX_CR_SOFT_RESET_SPU_SPU6_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_SOFT_RESET_SPU_SPU6_EN (0x00000040U) +#define RGX_CR_SOFT_RESET_SPU_SPU5_SHIFT (5U) +#define RGX_CR_SOFT_RESET_SPU_SPU5_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_SOFT_RESET_SPU_SPU5_EN (0x00000020U) +#define RGX_CR_SOFT_RESET_SPU_SPU4_SHIFT (4U) +#define RGX_CR_SOFT_RESET_SPU_SPU4_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_SOFT_RESET_SPU_SPU4_EN (0x00000010U) +#define RGX_CR_SOFT_RESET_SPU_SPU3_SHIFT (3U) +#define RGX_CR_SOFT_RESET_SPU_SPU3_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SOFT_RESET_SPU_SPU3_EN (0x00000008U) +#define RGX_CR_SOFT_RESET_SPU_SPU2_SHIFT (2U) +#define RGX_CR_SOFT_RESET_SPU_SPU2_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SOFT_RESET_SPU_SPU2_EN (0x00000004U) +#define RGX_CR_SOFT_RESET_SPU_SPU1_SHIFT (1U) +#define RGX_CR_SOFT_RESET_SPU_SPU1_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SOFT_RESET_SPU_SPU1_EN (0x00000002U) +#define RGX_CR_SOFT_RESET_SPU_SPU0_SHIFT (0U) +#define RGX_CR_SOFT_RESET_SPU_SPU0_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SOFT_RESET_SPU_SPU0_EN (0x00000001U) + + +/* + Register RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON +*/ +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON (0x24B0U) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_EXECUTE_COUNT_SHIFT (8U) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_EXECUTE_COUNT_CLRMSK (0xC00000FFU) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) +#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_MULTICORE_TDM_CTRL_COMMON +*/ +#define RGX_CR_MULTICORE_TDM_CTRL_COMMON (0x24B8U) +#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) +#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_EXECUTE_COUNT_SHIFT (8U) +#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_EXECUTE_COUNT_CLRMSK (0xC00000FFU) +#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) +#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON +*/ +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON (0x24C0U) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_EXECUTE_COUNT_SHIFT (8U) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_EXECUTE_COUNT_CLRMSK (0xC00000FFU) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) +#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_MULTICORE_BROADCAST +*/ +#define RGX_CR_MULTICORE_BROADCAST (0x24E0U) +#define RGX_CR_MULTICORE_BROADCAST_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MULTICORE_BROADCAST_PROGRAMMER_MASK_SHIFT (8U) +#define RGX_CR_MULTICORE_BROADCAST_PROGRAMMER_MASK_CLRMSK (0xFFFF00FFU) +#define RGX_CR_MULTICORE_BROADCAST_FIRMWARE_MASK_SHIFT (0U) +#define RGX_CR_MULTICORE_BROADCAST_FIRMWARE_MASK_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_MULTICORE +*/ +#define RGX_CR_MULTICORE (0x24E8U) +#define RGX_CR_MULTICORE_MASKFULL (IMG_UINT64_C(0x00000007FFFFFFFF)) +#define RGX_CR_MULTICORE_DOMAIN_GPU_MASK_SHIFT (27U) +#define RGX_CR_MULTICORE_DOMAIN_GPU_MASK_CLRMSK (IMG_UINT64_C(0xFFFFFFF807FFFFFF)) +#define RGX_CR_MULTICORE_PRIMARY_CORE_ID_SHIFT (3U) +#define RGX_CR_MULTICORE_PRIMARY_CORE_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8000007)) +#define RGX_CR_MULTICORE_ID_SHIFT (0U) +#define RGX_CR_MULTICORE_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF8)) + + +/* + Register RGX_CR_MULTICORE_SYSTEM +*/ +#define RGX_CR_MULTICORE_SYSTEM (0x24F0U) +#define RGX_CR_MULTICORE_SYSTEM_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT (0U) +#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK (0xFFFFFFF0U) + + +/* + Register RGX_CR_MULTICORE_DOMAIN +*/ +#define RGX_CR_MULTICORE_DOMAIN (0x24F8U) +#define RGX_CR_MULTICORE_DOMAIN_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_MULTICORE_DOMAIN_GPU_COUNT_SHIFT (0U) +#define RGX_CR_MULTICORE_DOMAIN_GPU_COUNT_CLRMSK (0xFFFFFFF0U) + + +/* + Register RGX_CR_EVENT_STATUS +*/ +#define RGX_CR_EVENT_STATUS (0x0130U) +#define RGX_CR_EVENT_STATUS__ALRIF_V1__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) +#define RGX_CR_EVENT_STATUS__ALRIF_V2__MASKFULL (IMG_UINT64_C(0x00000000F80FFFFF)) +#define RGX_CR_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) +#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT (31U) +#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN (0x80000000U) +#define RGX_CR_EVENT_STATUS_TDM_FINISHED_SHIFT (31U) +#define RGX_CR_EVENT_STATUS_TDM_FINISHED_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_STATUS_TDM_FINISHED_EN (0x80000000U) +#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT (30U) +#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN (0x40000000U) +#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) +#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) +#define RGX_CR_EVENT_STATUS_RCE_FINISHED_SHIFT (28U) +#define RGX_CR_EVENT_STATUS_RCE_FINISHED_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_EVENT_STATUS_RCE_FINISHED_EN (0x10000000U) +#define RGX_CR_EVENT_STATUS_RCE_BARRIER_HIT_SHIFT (27U) +#define RGX_CR_EVENT_STATUS_RCE_BARRIER_HIT_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_EVENT_STATUS_RCE_BARRIER_HIT_EN (0x08000000U) +#define RGX_CR_EVENT_STATUS_WDT_TIMEOUT_SHIFT (19U) +#define RGX_CR_EVENT_STATUS_WDT_TIMEOUT_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_EVENT_STATUS_WDT_TIMEOUT_EN (0x00080000U) +#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) +#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) +#define RGX_CR_EVENT_STATUS_CDM_BARRIER_HIT_SHIFT (18U) +#define RGX_CR_EVENT_STATUS_CDM_BARRIER_HIT_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_STATUS_CDM_BARRIER_HIT_EN (0x00040000U) +#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_SHIFT (17U) +#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) +#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT (16U) +#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN (0x00010000U) +#define RGX_CR_EVENT_STATUS_USC_TRIGGER_SHIFT (15U) +#define RGX_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_EVENT_STATUS_USC_TRIGGER_EN (0x00008000U) +#define RGX_CR_EVENT_STATUS_FAULT_FW_SHIFT (14U) +#define RGX_CR_EVENT_STATUS_FAULT_FW_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_EVENT_STATUS_FAULT_FW_EN (0x00004000U) +#define RGX_CR_EVENT_STATUS_GPIO_ACK_SHIFT (13U) +#define RGX_CR_EVENT_STATUS_GPIO_ACK_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_EVENT_STATUS_GPIO_ACK_EN (0x00002000U) +#define RGX_CR_EVENT_STATUS_GPIO_REQ_SHIFT (12U) +#define RGX_CR_EVENT_STATUS_GPIO_REQ_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_EVENT_STATUS_GPIO_REQ_EN (0x00001000U) +#define RGX_CR_EVENT_STATUS_POWER_ABORT_SHIFT (11U) +#define RGX_CR_EVENT_STATUS_POWER_ABORT_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_EVENT_STATUS_POWER_ABORT_EN (0x00000800U) +#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT (10U) +#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN (0x00000400U) +#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT (9U) +#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN (0x00000200U) +#define RGX_CR_EVENT_STATUS_PM_FRAG_DONE_SHIFT (8U) +#define RGX_CR_EVENT_STATUS_PM_FRAG_DONE_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_EVENT_STATUS_PM_FRAG_DONE_EN (0x00000100U) +#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT (7U) +#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN (0x00000080U) +#define RGX_CR_EVENT_STATUS_TA_TERMINATE_SHIFT (6U) +#define RGX_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_EVENT_STATUS_TA_TERMINATE_EN (0x00000040U) +#define RGX_CR_EVENT_STATUS_TA_FINISHED_SHIFT (5U) +#define RGX_CR_EVENT_STATUS_TA_FINISHED_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_EVENT_STATUS_TA_FINISHED_EN (0x00000020U) +#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT (4U) +#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_EN (0x00000010U) +#define RGX_CR_EVENT_STATUS_IPP_END_RENDER_SENT_SHIFT (4U) +#define RGX_CR_EVENT_STATUS_IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_STATUS_IPP_END_RENDER_SENT_EN (0x00000010U) +#define RGX_CR_EVENT_STATUS_ISP_END_RENDER_SHIFT (3U) +#define RGX_CR_EVENT_STATUS_ISP_END_RENDER_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_EVENT_STATUS_ISP_END_RENDER_EN (0x00000008U) +#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT (2U) +#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_EN (0x00000004U) +#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT (1U) +#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_EN (0x00000002U) +#define RGX_CR_EVENT_STATUS_TE_END_SHIFT (1U) +#define RGX_CR_EVENT_STATUS_TE_END_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_STATUS_TE_END_EN (0x00000002U) +#define RGX_CR_EVENT_STATUS_FAULT_GPU_SHIFT (0U) +#define RGX_CR_EVENT_STATUS_FAULT_GPU_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_EVENT_STATUS_FAULT_GPU_EN (0x00000001U) + + +/* + Register RGX_CR_EVENT_CLEAR +*/ +#define RGX_CR_EVENT_CLEAR (0x0138U) +#define RGX_CR_EVENT_CLEAR__ALRIF_V1__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) +#define RGX_CR_EVENT_CLEAR__ALRIF_V2__MASKFULL (IMG_UINT64_C(0x00000000F80FFFFF)) +#define RGX_CR_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) +#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_SHIFT (31U) +#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_EN (0x80000000U) +#define RGX_CR_EVENT_CLEAR_TDM_FINISHED_SHIFT (31U) +#define RGX_CR_EVENT_CLEAR_TDM_FINISHED_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_EVENT_CLEAR_TDM_FINISHED_EN (0x80000000U) +#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_SHIFT (30U) +#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_EN (0x40000000U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) +#define RGX_CR_EVENT_CLEAR_RCE_FINISHED_SHIFT (28U) +#define RGX_CR_EVENT_CLEAR_RCE_FINISHED_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_EVENT_CLEAR_RCE_FINISHED_EN (0x10000000U) +#define RGX_CR_EVENT_CLEAR_RCE_BARRIER_HIT_SHIFT (27U) +#define RGX_CR_EVENT_CLEAR_RCE_BARRIER_HIT_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_EVENT_CLEAR_RCE_BARRIER_HIT_EN (0x08000000U) +#define RGX_CR_EVENT_CLEAR_WDT_TIMEOUT_SHIFT (19U) +#define RGX_CR_EVENT_CLEAR_WDT_TIMEOUT_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_EVENT_CLEAR_WDT_TIMEOUT_EN (0x00080000U) +#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) +#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) +#define RGX_CR_EVENT_CLEAR_CDM_BARRIER_HIT_SHIFT (18U) +#define RGX_CR_EVENT_CLEAR_CDM_BARRIER_HIT_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_EVENT_CLEAR_CDM_BARRIER_HIT_EN (0x00040000U) +#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_SHIFT (17U) +#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_SHIFT (16U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_EN (0x00010000U) +#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_SHIFT (15U) +#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_EN (0x00008000U) +#define RGX_CR_EVENT_CLEAR_FAULT_FW_SHIFT (14U) +#define RGX_CR_EVENT_CLEAR_FAULT_FW_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_EVENT_CLEAR_FAULT_FW_EN (0x00004000U) +#define RGX_CR_EVENT_CLEAR_GPIO_ACK_SHIFT (13U) +#define RGX_CR_EVENT_CLEAR_GPIO_ACK_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_EVENT_CLEAR_GPIO_ACK_EN (0x00002000U) +#define RGX_CR_EVENT_CLEAR_GPIO_REQ_SHIFT (12U) +#define RGX_CR_EVENT_CLEAR_GPIO_REQ_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_EVENT_CLEAR_GPIO_REQ_EN (0x00001000U) +#define RGX_CR_EVENT_CLEAR_POWER_ABORT_SHIFT (11U) +#define RGX_CR_EVENT_CLEAR_POWER_ABORT_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_EVENT_CLEAR_POWER_ABORT_EN (0x00000800U) +#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_SHIFT (10U) +#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_EN (0x00000400U) +#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_SHIFT (9U) +#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_EN (0x00000200U) +#define RGX_CR_EVENT_CLEAR_PM_FRAG_DONE_SHIFT (8U) +#define RGX_CR_EVENT_CLEAR_PM_FRAG_DONE_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_EVENT_CLEAR_PM_FRAG_DONE_EN (0x00000100U) +#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_SHIFT (7U) +#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_EN (0x00000080U) +#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_SHIFT (6U) +#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_EN (0x00000040U) +#define RGX_CR_EVENT_CLEAR_TA_FINISHED_SHIFT (5U) +#define RGX_CR_EVENT_CLEAR_TA_FINISHED_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_EVENT_CLEAR_TA_FINISHED_EN (0x00000020U) +#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_SHIFT (4U) +#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_EN (0x00000010U) +#define RGX_CR_EVENT_CLEAR_IPP_END_RENDER_SENT_SHIFT (4U) +#define RGX_CR_EVENT_CLEAR_IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_EVENT_CLEAR_IPP_END_RENDER_SENT_EN (0x00000010U) +#define RGX_CR_EVENT_CLEAR_ISP_END_RENDER_SHIFT (3U) +#define RGX_CR_EVENT_CLEAR_ISP_END_RENDER_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_EVENT_CLEAR_ISP_END_RENDER_EN (0x00000008U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_SHIFT (2U) +#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_EN (0x00000004U) +#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_SHIFT (1U) +#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_EN (0x00000002U) +#define RGX_CR_EVENT_CLEAR_TE_END_SHIFT (1U) +#define RGX_CR_EVENT_CLEAR_TE_END_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_EVENT_CLEAR_TE_END_EN (0x00000002U) +#define RGX_CR_EVENT_CLEAR_FAULT_GPU_SHIFT (0U) +#define RGX_CR_EVENT_CLEAR_FAULT_GPU_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_EVENT_CLEAR_FAULT_GPU_EN (0x00000001U) + + +/* + Register RGX_CR_TIMER +*/ +#define RGX_CR_TIMER (0x0160U) +#define RGX_CR_TIMER_MASKFULL (IMG_UINT64_C(0x8000FFFFFFFFFFFF)) +#define RGX_CR_TIMER_BIT31_SHIFT (63U) +#define RGX_CR_TIMER_BIT31_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_TIMER_BIT31_EN (IMG_UINT64_C(0x8000000000000000)) +#define RGX_CR_TIMER_VALUE_SHIFT (0U) +#define RGX_CR_TIMER_VALUE_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) + + +/* + Register RGX_CR_FAULT_FW_STATUS +*/ +#define RGX_CR_FAULT_FW_STATUS (0x0170U) +#define RGX_CR_FAULT_FW_STATUS_MASKFULL (IMG_UINT64_C(0x000000000007000F)) +#define RGX_CR_FAULT_FW_STATUS_META_CORRECT_SHIFT (18U) +#define RGX_CR_FAULT_FW_STATUS_META_CORRECT_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_FAULT_FW_STATUS_META_CORRECT_EN (0x00040000U) +#define RGX_CR_FAULT_FW_STATUS_SLC_CORRECT_SHIFT (17U) +#define RGX_CR_FAULT_FW_STATUS_SLC_CORRECT_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_FAULT_FW_STATUS_SLC_CORRECT_EN (0x00020000U) +#define RGX_CR_FAULT_FW_STATUS_MMU_CORRECT_SHIFT (16U) +#define RGX_CR_FAULT_FW_STATUS_MMU_CORRECT_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_FAULT_FW_STATUS_MMU_CORRECT_EN (0x00010000U) +#define RGX_CR_FAULT_FW_STATUS_LOCKSTEP_DETECT_SHIFT (3U) +#define RGX_CR_FAULT_FW_STATUS_LOCKSTEP_DETECT_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FAULT_FW_STATUS_LOCKSTEP_DETECT_EN (0x00000008U) +#define RGX_CR_FAULT_FW_STATUS_META_DETECT_SHIFT (2U) +#define RGX_CR_FAULT_FW_STATUS_META_DETECT_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FAULT_FW_STATUS_META_DETECT_EN (0x00000004U) +#define RGX_CR_FAULT_FW_STATUS_SLC_DETECT_SHIFT (1U) +#define RGX_CR_FAULT_FW_STATUS_SLC_DETECT_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FAULT_FW_STATUS_SLC_DETECT_EN (0x00000002U) +#define RGX_CR_FAULT_FW_STATUS_MMU_DETECT_SHIFT (0U) +#define RGX_CR_FAULT_FW_STATUS_MMU_DETECT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FAULT_FW_STATUS_MMU_DETECT_EN (0x00000001U) + + +/* + Register RGX_CR_FAULT_FW_CLEAR +*/ +#define RGX_CR_FAULT_FW_CLEAR (0x0178U) +#define RGX_CR_FAULT_FW_CLEAR_MASKFULL (IMG_UINT64_C(0x000000000007000F)) +#define RGX_CR_FAULT_FW_CLEAR_META_CORRECT_SHIFT (18U) +#define RGX_CR_FAULT_FW_CLEAR_META_CORRECT_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_FAULT_FW_CLEAR_META_CORRECT_EN (0x00040000U) +#define RGX_CR_FAULT_FW_CLEAR_SLC_CORRECT_SHIFT (17U) +#define RGX_CR_FAULT_FW_CLEAR_SLC_CORRECT_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_FAULT_FW_CLEAR_SLC_CORRECT_EN (0x00020000U) +#define RGX_CR_FAULT_FW_CLEAR_MMU_CORRECT_SHIFT (16U) +#define RGX_CR_FAULT_FW_CLEAR_MMU_CORRECT_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_FAULT_FW_CLEAR_MMU_CORRECT_EN (0x00010000U) +#define RGX_CR_FAULT_FW_CLEAR_LOCKSTEP_DETECT_SHIFT (3U) +#define RGX_CR_FAULT_FW_CLEAR_LOCKSTEP_DETECT_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FAULT_FW_CLEAR_LOCKSTEP_DETECT_EN (0x00000008U) +#define RGX_CR_FAULT_FW_CLEAR_META_DETECT_SHIFT (2U) +#define RGX_CR_FAULT_FW_CLEAR_META_DETECT_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FAULT_FW_CLEAR_META_DETECT_EN (0x00000004U) +#define RGX_CR_FAULT_FW_CLEAR_SLC_DETECT_SHIFT (1U) +#define RGX_CR_FAULT_FW_CLEAR_SLC_DETECT_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FAULT_FW_CLEAR_SLC_DETECT_EN (0x00000002U) +#define RGX_CR_FAULT_FW_CLEAR_MMU_DETECT_SHIFT (0U) +#define RGX_CR_FAULT_FW_CLEAR_MMU_DETECT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FAULT_FW_CLEAR_MMU_DETECT_EN (0x00000001U) + + +/* + Register RGX_CR_JONES_RAM_STATUS +*/ +#define RGX_CR_JONES_RAM_STATUS (0x1148U) +#define RGX_CR_JONES_RAM_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_JONES_RAM_STATUS_GARTEN_SHIFT (8U) +#define RGX_CR_JONES_RAM_STATUS_GARTEN_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_JONES_RAM_STATUS_GARTEN_EN (0x00000100U) +#define RGX_CR_JONES_RAM_STATUS_TDM_SHIFT (7U) +#define RGX_CR_JONES_RAM_STATUS_TDM_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_JONES_RAM_STATUS_TDM_EN (0x00000080U) +#define RGX_CR_JONES_RAM_STATUS_VERTEX_SHIFT (6U) +#define RGX_CR_JONES_RAM_STATUS_VERTEX_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_JONES_RAM_STATUS_VERTEX_EN (0x00000040U) +#define RGX_CR_JONES_RAM_STATUS_PIXEL_SHIFT (5U) +#define RGX_CR_JONES_RAM_STATUS_PIXEL_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_JONES_RAM_STATUS_PIXEL_EN (0x00000020U) +#define RGX_CR_JONES_RAM_STATUS_COMPUTE_SHIFT (4U) +#define RGX_CR_JONES_RAM_STATUS_COMPUTE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_JONES_RAM_STATUS_COMPUTE_EN (0x00000010U) +#define RGX_CR_JONES_RAM_STATUS_FBCDC_SHIFT (3U) +#define RGX_CR_JONES_RAM_STATUS_FBCDC_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_JONES_RAM_STATUS_FBCDC_EN (0x00000008U) +#define RGX_CR_JONES_RAM_STATUS_PM_SHIFT (2U) +#define RGX_CR_JONES_RAM_STATUS_PM_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_JONES_RAM_STATUS_PM_EN (0x00000004U) +#define RGX_CR_JONES_RAM_STATUS_BIF_SHIFT (1U) +#define RGX_CR_JONES_RAM_STATUS_BIF_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_JONES_RAM_STATUS_BIF_EN (0x00000002U) +#define RGX_CR_JONES_RAM_STATUS_SLC_SHIFT (0U) +#define RGX_CR_JONES_RAM_STATUS_SLC_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_JONES_RAM_STATUS_SLC_EN (0x00000001U) + + +/* + Register RGX_CR_JONES_RAM_INIT_KICK +*/ +#define RGX_CR_JONES_RAM_INIT_KICK (0x1158U) +#define RGX_CR_JONES_RAM_INIT_KICK_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_JONES_RAM_INIT_KICK_GARTEN_SHIFT (8U) +#define RGX_CR_JONES_RAM_INIT_KICK_GARTEN_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_JONES_RAM_INIT_KICK_GARTEN_EN (0x00000100U) +#define RGX_CR_JONES_RAM_INIT_KICK_TDM_SHIFT (7U) +#define RGX_CR_JONES_RAM_INIT_KICK_TDM_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_JONES_RAM_INIT_KICK_TDM_EN (0x00000080U) +#define RGX_CR_JONES_RAM_INIT_KICK_VERTEX_SHIFT (6U) +#define RGX_CR_JONES_RAM_INIT_KICK_VERTEX_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_JONES_RAM_INIT_KICK_VERTEX_EN (0x00000040U) +#define RGX_CR_JONES_RAM_INIT_KICK_PIXEL_SHIFT (5U) +#define RGX_CR_JONES_RAM_INIT_KICK_PIXEL_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_JONES_RAM_INIT_KICK_PIXEL_EN (0x00000020U) +#define RGX_CR_JONES_RAM_INIT_KICK_COMPUTE_SHIFT (4U) +#define RGX_CR_JONES_RAM_INIT_KICK_COMPUTE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_JONES_RAM_INIT_KICK_COMPUTE_EN (0x00000010U) +#define RGX_CR_JONES_RAM_INIT_KICK_FBCDC_SHIFT (3U) +#define RGX_CR_JONES_RAM_INIT_KICK_FBCDC_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_JONES_RAM_INIT_KICK_FBCDC_EN (0x00000008U) +#define RGX_CR_JONES_RAM_INIT_KICK_PM_SHIFT (2U) +#define RGX_CR_JONES_RAM_INIT_KICK_PM_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_JONES_RAM_INIT_KICK_PM_EN (0x00000004U) +#define RGX_CR_JONES_RAM_INIT_KICK_BIF_SHIFT (1U) +#define RGX_CR_JONES_RAM_INIT_KICK_BIF_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_JONES_RAM_INIT_KICK_BIF_EN (0x00000002U) +#define RGX_CR_JONES_RAM_INIT_KICK_SLC_SHIFT (0U) +#define RGX_CR_JONES_RAM_INIT_KICK_SLC_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_JONES_RAM_INIT_KICK_SLC_EN (0x00000001U) + + +/* + Register RGX_CR_PM_PARTIAL_RENDER_ENABLE +*/ +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE (0x0338U) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT (0U) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN (0x00000001U) + + +/* + Register RGX_CR_CDM_CONTEXT_STORE_STATUS +*/ +#define RGX_CR_CDM_CONTEXT_STORE_STATUS (0x04A0U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0x00000002U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0x00000001U) + + +/* + Register RGX_CR_CDM_CONTEXT_PDS0 +*/ +#define RGX_CR_CDM_CONTEXT_PDS0 (0x04A8U) +#define RGX_CR_CDM_CONTEXT_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT (36U) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE (16U) + + +/* + Register RGX_CR_CDM_CONTEXT_PDS1 +*/ +#define RGX_CR_CDM_CONTEXT_PDS1 (0x04B0U) +#define RGX_CR_CDM_CONTEXT_PDS1_MASKFULL (IMG_UINT64_C(0x00000001FFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT (32U) +#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT (31U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT (30U) +#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT (23U) +#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC07FFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_TYPE_SHIFT (22U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_TYPE_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_SHIFT (11U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC007FF)) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_ALIGNSIZE (16U) +#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT (6U) +#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F)) +#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT (0U) +#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC0)) +#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_ALIGNSHIFT (2U) +#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_ALIGNSIZE (4U) + + +/* + Register RGX_CR_CDM_CONTEXT_LOAD_PDS0 +*/ +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0 (0x04D8U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT (36U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT (4U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE (16U) + + +/* + Register RGX_CR_CDM_CONTEXT_LOAD_PDS1 +*/ +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1 (0x04E0U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL (IMG_UINT64_C(0x00000001BFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT (32U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT (31U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT (23U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC07FFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_TYPE_SHIFT (22U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_TYPE_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_SHIFT (11U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC007FF)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_ALIGNSHIFT (6U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_ALIGNSIZE (64U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT (6U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT (0U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC0)) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_ALIGNSHIFT (2U) +#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_ALIGNSIZE (4U) + + +/* + Register RGX_CR_CDM_TERMINATE_PDS +*/ +#define RGX_CR_CDM_TERMINATE_PDS (0x04B8U) +#define RGX_CR_CDM_TERMINATE_PDS_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT (36U) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE (16U) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT (4U) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT (4U) +#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE (16U) + + +/* + Register RGX_CR_CDM_TERMINATE_PDS1 +*/ +#define RGX_CR_CDM_TERMINATE_PDS1 (0x04C0U) +#define RGX_CR_CDM_TERMINATE_PDS1_MASKFULL (IMG_UINT64_C(0x00000001BFFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT (32U) +#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT (31U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT (23U) +#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC07FFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_TYPE_SHIFT (22U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_TYPE_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_SHIFT (11U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC007FF)) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_ALIGNSHIFT (6U) +#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_ALIGNSIZE (64U) +#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT (6U) +#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F)) +#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_ALIGNSHIFT (1U) +#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_ALIGNSIZE (2U) +#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT (0U) +#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC0)) +#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_ALIGNSHIFT (2U) +#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_ALIGNSIZE (4U) + + +/* + Register group: RGX_CR_SCRATCH, with 16 repeats +*/ +#define RGX_CR_SCRATCH_REPEATCOUNT (16U) +/* + Register RGX_CR_SCRATCH0 +*/ +#define RGX_CR_SCRATCH0 (0x0800U) +#define RGX_CR_SCRATCH0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH0_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH1 +*/ +#define RGX_CR_SCRATCH1 (0x0808U) +#define RGX_CR_SCRATCH1_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH1_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH2 +*/ +#define RGX_CR_SCRATCH2 (0x0810U) +#define RGX_CR_SCRATCH2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH2_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH3 +*/ +#define RGX_CR_SCRATCH3 (0x0818U) +#define RGX_CR_SCRATCH3_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH3_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH4 +*/ +#define RGX_CR_SCRATCH4 (0x0820U) +#define RGX_CR_SCRATCH4_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH4_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH4_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH5 +*/ +#define RGX_CR_SCRATCH5 (0x0828U) +#define RGX_CR_SCRATCH5_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH5_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH5_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH6 +*/ +#define RGX_CR_SCRATCH6 (0x0830U) +#define RGX_CR_SCRATCH6_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH6_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH6_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH7 +*/ +#define RGX_CR_SCRATCH7 (0x0838U) +#define RGX_CR_SCRATCH7_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH7_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH7_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH8 +*/ +#define RGX_CR_SCRATCH8 (0x0840U) +#define RGX_CR_SCRATCH8_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH8_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH8_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH9 +*/ +#define RGX_CR_SCRATCH9 (0x0848U) +#define RGX_CR_SCRATCH9_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH9_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH9_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH10 +*/ +#define RGX_CR_SCRATCH10 (0x0850U) +#define RGX_CR_SCRATCH10_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH10_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH10_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH11 +*/ +#define RGX_CR_SCRATCH11 (0x0858U) +#define RGX_CR_SCRATCH11_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH11_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH11_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH12 +*/ +#define RGX_CR_SCRATCH12 (0x0860U) +#define RGX_CR_SCRATCH12_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH12_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH12_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH13 +*/ +#define RGX_CR_SCRATCH13 (0x0868U) +#define RGX_CR_SCRATCH13_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH13_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH13_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH14 +*/ +#define RGX_CR_SCRATCH14 (0x0870U) +#define RGX_CR_SCRATCH14_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH14_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH14_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_SCRATCH15 +*/ +#define RGX_CR_SCRATCH15 (0x0878U) +#define RGX_CR_SCRATCH15_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SCRATCH15_DATA_SHIFT (0U) +#define RGX_CR_SCRATCH15_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register group: RGX_CR_OS0_SCRATCH, with 4 repeats +*/ +#define RGX_CR_OS0_SCRATCH_REPEATCOUNT (4U) +/* + Register RGX_CR_OS0_SCRATCH0 +*/ +#define RGX_CR_OS0_SCRATCH0 (0x0880U) +#define RGX_CR_OS0_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS0_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS0_SCRATCH1 +*/ +#define RGX_CR_OS0_SCRATCH1 (0x0888U) +#define RGX_CR_OS0_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS0_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS0_SCRATCH2 +*/ +#define RGX_CR_OS0_SCRATCH2 (0x0890U) +#define RGX_CR_OS0_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS0_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH2_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS0_SCRATCH3 +*/ +#define RGX_CR_OS0_SCRATCH3 (0x0898U) +#define RGX_CR_OS0_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS0_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS0_SCRATCH3_DATA_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_OS1_SCRATCH, with 4 repeats +*/ +#define RGX_CR_OS1_SCRATCH_REPEATCOUNT (4U) +/* + Register RGX_CR_OS1_SCRATCH0 +*/ +#define RGX_CR_OS1_SCRATCH0 (0x10880U) +#define RGX_CR_OS1_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS1_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS1_SCRATCH1 +*/ +#define RGX_CR_OS1_SCRATCH1 (0x10888U) +#define RGX_CR_OS1_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS1_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS1_SCRATCH2 +*/ +#define RGX_CR_OS1_SCRATCH2 (0x10890U) +#define RGX_CR_OS1_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS1_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH2_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS1_SCRATCH3 +*/ +#define RGX_CR_OS1_SCRATCH3 (0x10898U) +#define RGX_CR_OS1_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS1_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS1_SCRATCH3_DATA_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_OS2_SCRATCH, with 4 repeats +*/ +#define RGX_CR_OS2_SCRATCH_REPEATCOUNT (4U) +/* + Register RGX_CR_OS2_SCRATCH0 +*/ +#define RGX_CR_OS2_SCRATCH0 (0x20880U) +#define RGX_CR_OS2_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS2_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS2_SCRATCH1 +*/ +#define RGX_CR_OS2_SCRATCH1 (0x20888U) +#define RGX_CR_OS2_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS2_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS2_SCRATCH2 +*/ +#define RGX_CR_OS2_SCRATCH2 (0x20890U) +#define RGX_CR_OS2_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS2_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH2_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS2_SCRATCH3 +*/ +#define RGX_CR_OS2_SCRATCH3 (0x20898U) +#define RGX_CR_OS2_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS2_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS2_SCRATCH3_DATA_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_OS3_SCRATCH, with 4 repeats +*/ +#define RGX_CR_OS3_SCRATCH_REPEATCOUNT (4U) +/* + Register RGX_CR_OS3_SCRATCH0 +*/ +#define RGX_CR_OS3_SCRATCH0 (0x30880U) +#define RGX_CR_OS3_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS3_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS3_SCRATCH1 +*/ +#define RGX_CR_OS3_SCRATCH1 (0x30888U) +#define RGX_CR_OS3_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS3_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS3_SCRATCH2 +*/ +#define RGX_CR_OS3_SCRATCH2 (0x30890U) +#define RGX_CR_OS3_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS3_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH2_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS3_SCRATCH3 +*/ +#define RGX_CR_OS3_SCRATCH3 (0x30898U) +#define RGX_CR_OS3_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS3_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS3_SCRATCH3_DATA_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_OS4_SCRATCH, with 4 repeats +*/ +#define RGX_CR_OS4_SCRATCH_REPEATCOUNT (4U) +/* + Register RGX_CR_OS4_SCRATCH0 +*/ +#define RGX_CR_OS4_SCRATCH0 (0x40880U) +#define RGX_CR_OS4_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS4_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS4_SCRATCH1 +*/ +#define RGX_CR_OS4_SCRATCH1 (0x40888U) +#define RGX_CR_OS4_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS4_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS4_SCRATCH2 +*/ +#define RGX_CR_OS4_SCRATCH2 (0x40890U) +#define RGX_CR_OS4_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS4_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH2_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS4_SCRATCH3 +*/ +#define RGX_CR_OS4_SCRATCH3 (0x40898U) +#define RGX_CR_OS4_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS4_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS4_SCRATCH3_DATA_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_OS5_SCRATCH, with 4 repeats +*/ +#define RGX_CR_OS5_SCRATCH_REPEATCOUNT (4U) +/* + Register RGX_CR_OS5_SCRATCH0 +*/ +#define RGX_CR_OS5_SCRATCH0 (0x50880U) +#define RGX_CR_OS5_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS5_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS5_SCRATCH1 +*/ +#define RGX_CR_OS5_SCRATCH1 (0x50888U) +#define RGX_CR_OS5_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS5_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS5_SCRATCH2 +*/ +#define RGX_CR_OS5_SCRATCH2 (0x50890U) +#define RGX_CR_OS5_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS5_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH2_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS5_SCRATCH3 +*/ +#define RGX_CR_OS5_SCRATCH3 (0x50898U) +#define RGX_CR_OS5_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS5_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS5_SCRATCH3_DATA_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_OS6_SCRATCH, with 4 repeats +*/ +#define RGX_CR_OS6_SCRATCH_REPEATCOUNT (4U) +/* + Register RGX_CR_OS6_SCRATCH0 +*/ +#define RGX_CR_OS6_SCRATCH0 (0x60880U) +#define RGX_CR_OS6_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS6_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS6_SCRATCH1 +*/ +#define RGX_CR_OS6_SCRATCH1 (0x60888U) +#define RGX_CR_OS6_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS6_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS6_SCRATCH2 +*/ +#define RGX_CR_OS6_SCRATCH2 (0x60890U) +#define RGX_CR_OS6_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS6_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH2_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS6_SCRATCH3 +*/ +#define RGX_CR_OS6_SCRATCH3 (0x60898U) +#define RGX_CR_OS6_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS6_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS6_SCRATCH3_DATA_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_OS7_SCRATCH, with 4 repeats +*/ +#define RGX_CR_OS7_SCRATCH_REPEATCOUNT (4U) +/* + Register RGX_CR_OS7_SCRATCH0 +*/ +#define RGX_CR_OS7_SCRATCH0 (0x70880U) +#define RGX_CR_OS7_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS7_SCRATCH0_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH0_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS7_SCRATCH1 +*/ +#define RGX_CR_OS7_SCRATCH1 (0x70888U) +#define RGX_CR_OS7_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS7_SCRATCH1_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH1_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS7_SCRATCH2 +*/ +#define RGX_CR_OS7_SCRATCH2 (0x70890U) +#define RGX_CR_OS7_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS7_SCRATCH2_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH2_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_OS7_SCRATCH3 +*/ +#define RGX_CR_OS7_SCRATCH3 (0x70898U) +#define RGX_CR_OS7_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_OS7_SCRATCH3_DATA_SHIFT (0U) +#define RGX_CR_OS7_SCRATCH3_DATA_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_META_SP_MSLVDATAX +*/ +#define RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES (0x3000U) +#define RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES__MSLVDATAX_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES__MSLVDATAX_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_META_SP_MSLVDATAX +*/ +#define RGX_CR_META_SP_MSLVDATAX (0x0A00U) +#define RGX_CR_META_SP_MSLVDATAX_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_META_SP_MSLVDATAT +*/ +#define RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES (0x3040U) +#define RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES__MSLVDATAT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES__MSLVDATAT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_META_SP_MSLVDATAT +*/ +#define RGX_CR_META_SP_MSLVDATAT (0x0A08U) +#define RGX_CR_META_SP_MSLVDATAT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_META_SP_MSLVCTRL0 +*/ +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES (0x3080U) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__ADDR_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__ADDR_CLRMSK (0x00000003U) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__AUTOINCR_SHIFT (1U) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__AUTOINCR_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__AUTOINCR_EN (0x00000002U) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__RD_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__RD_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__RD_EN (0x00000001U) + + +/* + Register RGX_CR_META_SP_MSLVCTRL0 +*/ +#define RGX_CR_META_SP_MSLVCTRL0 (0x0A10U) +#define RGX_CR_META_SP_MSLVCTRL0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_META_SP_MSLVCTRL0_ADDR_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK (0x00000003U) +#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT (1U) +#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN (0x00000002U) +#define RGX_CR_META_SP_MSLVCTRL0_RD_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL0_RD_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVCTRL0_RD_EN (0x00000001U) + + +/* + Register RGX_CR_META_SP_MSLVCTRL1 +*/ +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES (0x30C0U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x00000000F7F4003F)) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__DEFERRTHREAD_SHIFT (30U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__DEFERRTHREAD_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__LOCK2_INTERLOCK_SHIFT (29U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__LOCK2_INTERLOCK_EN (0x20000000U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__ATOMIC_INTERLOCK_SHIFT (28U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__ATOMIC_INTERLOCK_EN (0x10000000U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_SHIFT (26U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN (0x04000000U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__COREMEM_IDLE_SHIFT (25U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__COREMEM_IDLE_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__COREMEM_IDLE_EN (0x02000000U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_SHIFT (24U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN (0x01000000U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__DEFERRID_SHIFT (21U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__DEFERRID_CLRMSK (0xFF1FFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__DEFERR_SHIFT (20U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__DEFERR_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__DEFERR_EN (0x00100000U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__WR_ACTIVE_SHIFT (18U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__WR_ACTIVE_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__WR_ACTIVE_EN (0x00040000U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__THREAD_SHIFT (4U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__THREAD_CLRMSK (0xFFFFFFCFU) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__TRANS_SIZE_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__TRANS_SIZE_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__BYTE_ROUND_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__BYTE_ROUND_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_META_SP_MSLVCTRL1 +*/ +#define RGX_CR_META_SP_MSLVCTRL1 (0x0A18U) +#define RGX_CR_META_SP_MSLVCTRL1_MASKFULL (IMG_UINT64_C(0x00000000F7F4003F)) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT (30U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK (0x3FFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT (29U) +#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN (0x20000000U) +#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT (28U) +#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN (0x10000000U) +#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT (26U) +#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN (0x04000000U) +#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT (25U) +#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN (0x02000000U) +#define RGX_CR_META_SP_MSLVCTRL1_READY_SHIFT (24U) +#define RGX_CR_META_SP_MSLVCTRL1_READY_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_READY_EN (0x01000000U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT (21U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK (0xFF1FFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT (20U) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_EN (0x00100000U) +#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT (18U) +#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN (0x00040000U) +#define RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT (4U) +#define RGX_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK (0xFFFFFFCFU) +#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT (2U) +#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT (0U) +#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_META_SP_MSLVHANDSHKE +*/ +#define RGX_CR_META_SP_MSLVHANDSHKE__META_REGISTER_UNPACKED_ACCESSES (0x3280U) +#define RGX_CR_META_SP_MSLVHANDSHKE__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_META_SP_MSLVHANDSHKE__META_REGISTER_UNPACKED_ACCESSES__INPUT_SHIFT (2U) +#define RGX_CR_META_SP_MSLVHANDSHKE__META_REGISTER_UNPACKED_ACCESSES__INPUT_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVHANDSHKE__META_REGISTER_UNPACKED_ACCESSES__OUTPUT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVHANDSHKE__META_REGISTER_UNPACKED_ACCESSES__OUTPUT_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_META_SP_MSLVHANDSHKE +*/ +#define RGX_CR_META_SP_MSLVHANDSHKE (0x0A50U) +#define RGX_CR_META_SP_MSLVHANDSHKE_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT (2U) +#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT (0U) +#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_META_SP_MSLVT0KICK +*/ +#define RGX_CR_META_SP_MSLVT0KICK__META_REGISTER_UNPACKED_ACCESSES (0x3400U) +#define RGX_CR_META_SP_MSLVT0KICK__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICK__META_REGISTER_UNPACKED_ACCESSES__MSLVT0KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICK__META_REGISTER_UNPACKED_ACCESSES__MSLVT0KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT0KICK +*/ +#define RGX_CR_META_SP_MSLVT0KICK (0x0A80U) +#define RGX_CR_META_SP_MSLVT0KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT0KICKI +*/ +#define RGX_CR_META_SP_MSLVT0KICKI__META_REGISTER_UNPACKED_ACCESSES (0x3440U) +#define RGX_CR_META_SP_MSLVT0KICKI__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICKI__META_REGISTER_UNPACKED_ACCESSES__MSLVT0KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICKI__META_REGISTER_UNPACKED_ACCESSES__MSLVT0KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT0KICKI +*/ +#define RGX_CR_META_SP_MSLVT0KICKI (0x0A88U) +#define RGX_CR_META_SP_MSLVT0KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT1KICK +*/ +#define RGX_CR_META_SP_MSLVT1KICK__META_REGISTER_UNPACKED_ACCESSES (0x3480U) +#define RGX_CR_META_SP_MSLVT1KICK__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICK__META_REGISTER_UNPACKED_ACCESSES__MSLVT1KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICK__META_REGISTER_UNPACKED_ACCESSES__MSLVT1KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT1KICK +*/ +#define RGX_CR_META_SP_MSLVT1KICK (0x0A90U) +#define RGX_CR_META_SP_MSLVT1KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT1KICKI +*/ +#define RGX_CR_META_SP_MSLVT1KICKI__META_REGISTER_UNPACKED_ACCESSES (0x34C0U) +#define RGX_CR_META_SP_MSLVT1KICKI__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICKI__META_REGISTER_UNPACKED_ACCESSES__MSLVT1KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICKI__META_REGISTER_UNPACKED_ACCESSES__MSLVT1KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT1KICKI +*/ +#define RGX_CR_META_SP_MSLVT1KICKI (0x0A98U) +#define RGX_CR_META_SP_MSLVT1KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT2KICK +*/ +#define RGX_CR_META_SP_MSLVT2KICK__META_REGISTER_UNPACKED_ACCESSES (0x3500U) +#define RGX_CR_META_SP_MSLVT2KICK__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICK__META_REGISTER_UNPACKED_ACCESSES__MSLVT2KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICK__META_REGISTER_UNPACKED_ACCESSES__MSLVT2KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT2KICK +*/ +#define RGX_CR_META_SP_MSLVT2KICK (0x0AA0U) +#define RGX_CR_META_SP_MSLVT2KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT2KICKI +*/ +#define RGX_CR_META_SP_MSLVT2KICKI__META_REGISTER_UNPACKED_ACCESSES (0x3540U) +#define RGX_CR_META_SP_MSLVT2KICKI__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICKI__META_REGISTER_UNPACKED_ACCESSES__MSLVT2KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICKI__META_REGISTER_UNPACKED_ACCESSES__MSLVT2KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT2KICKI +*/ +#define RGX_CR_META_SP_MSLVT2KICKI (0x0AA8U) +#define RGX_CR_META_SP_MSLVT2KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT3KICK +*/ +#define RGX_CR_META_SP_MSLVT3KICK__META_REGISTER_UNPACKED_ACCESSES (0x3580U) +#define RGX_CR_META_SP_MSLVT3KICK__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICK__META_REGISTER_UNPACKED_ACCESSES__MSLVT3KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICK__META_REGISTER_UNPACKED_ACCESSES__MSLVT3KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT3KICK +*/ +#define RGX_CR_META_SP_MSLVT3KICK (0x0AB0U) +#define RGX_CR_META_SP_MSLVT3KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT3KICKI +*/ +#define RGX_CR_META_SP_MSLVT3KICKI__META_REGISTER_UNPACKED_ACCESSES (0x35C0U) +#define RGX_CR_META_SP_MSLVT3KICKI__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICKI__META_REGISTER_UNPACKED_ACCESSES__MSLVT3KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICKI__META_REGISTER_UNPACKED_ACCESSES__MSLVT3KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVT3KICKI +*/ +#define RGX_CR_META_SP_MSLVT3KICKI (0x0AB8U) +#define RGX_CR_META_SP_MSLVT3KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT (0U) +#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_META_SP_MSLVRST +*/ +#define RGX_CR_META_SP_MSLVRST__META_REGISTER_UNPACKED_ACCESSES (0x3600U) +#define RGX_CR_META_SP_MSLVRST__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVRST__META_REGISTER_UNPACKED_ACCESSES__SOFTRESET_SHIFT (0U) +#define RGX_CR_META_SP_MSLVRST__META_REGISTER_UNPACKED_ACCESSES__SOFTRESET_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVRST__META_REGISTER_UNPACKED_ACCESSES__SOFTRESET_EN (0x00000001U) + + +/* + Register RGX_CR_META_SP_MSLVRST +*/ +#define RGX_CR_META_SP_MSLVRST (0x0AC0U) +#define RGX_CR_META_SP_MSLVRST_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVRST_SOFTRESET_SHIFT (0U) +#define RGX_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVRST_SOFTRESET_EN (0x00000001U) + + +/* + Register RGX_CR_META_SP_MSLVIRQSTATUS +*/ +#define RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES (0x3640U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES__TRIGVECT3_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES__TRIGVECT3_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES__TRIGVECT3_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES__TRIGVECT2_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES__TRIGVECT2_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES__TRIGVECT2_EN (0x00000004U) + + +/* + Register RGX_CR_META_SP_MSLVIRQSTATUS +*/ +#define RGX_CR_META_SP_MSLVIRQSTATUS (0x0AC8U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN (0x00000004U) + + +/* + Register RGX_CR_META_SP_MSLVIRQENABLE +*/ +#define RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES (0x3680U) +#define RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES__EVENT1_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES__EVENT1_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES__EVENT1_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES__EVENT0_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES__EVENT0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES__EVENT0_EN (0x00000004U) + + +/* + Register RGX_CR_META_SP_MSLVIRQENABLE +*/ +#define RGX_CR_META_SP_MSLVIRQENABLE (0x0AD0U) +#define RGX_CR_META_SP_MSLVIRQENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000C)) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT (3U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_EN (0x00000008U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT (2U) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_EN (0x00000004U) + + +/* + Register RGX_CR_META_SP_MSLVIRQLEVEL +*/ +#define RGX_CR_META_SP_MSLVIRQLEVEL__META_REGISTER_UNPACKED_ACCESSES (0x36C0U) +#define RGX_CR_META_SP_MSLVIRQLEVEL__META_REGISTER_UNPACKED_ACCESSES__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVIRQLEVEL__META_REGISTER_UNPACKED_ACCESSES__MODE_SHIFT (0U) +#define RGX_CR_META_SP_MSLVIRQLEVEL__META_REGISTER_UNPACKED_ACCESSES__MODE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVIRQLEVEL__META_REGISTER_UNPACKED_ACCESSES__MODE_EN (0x00000001U) + + +/* + Register RGX_CR_META_SP_MSLVIRQLEVEL +*/ +#define RGX_CR_META_SP_MSLVIRQLEVEL (0x0AD8U) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT (0U) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_EN (0x00000001U) + + +/* + Register group: RGX_CR_FWCORE_DMI_RESERVED0, with 32 repeats +*/ +#define RGX_CR_FWCORE_DMI_RESERVED0_REPEATCOUNT (32U) +/* + Register RGX_CR_FWCORE_DMI_RESERVED00 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED00 (0x8FC0U) +#define RGX_CR_FWCORE_DMI_RESERVED00_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED01 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED01 (0x8FC8U) +#define RGX_CR_FWCORE_DMI_RESERVED01_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED02 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED02 (0x8FD0U) +#define RGX_CR_FWCORE_DMI_RESERVED02_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED03 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED03 (0x8FD8U) +#define RGX_CR_FWCORE_DMI_RESERVED03_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_DATA0 +*/ +#define RGX_CR_FWCORE_DMI_DATA0 (0x90C0U) +#define RGX_CR_FWCORE_DMI_DATA0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_DATA0_VAL_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_DATA0_VAL_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_FWCORE_DMI_RESERVED1, with 7 repeats +*/ +#define RGX_CR_FWCORE_DMI_RESERVED1_REPEATCOUNT (7U) +/* + Register RGX_CR_FWCORE_DMI_RESERVED10 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED10 (0x90C8U) +#define RGX_CR_FWCORE_DMI_RESERVED10_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED11 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED11 (0x90D0U) +#define RGX_CR_FWCORE_DMI_RESERVED11_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED12 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED12 (0x90D8U) +#define RGX_CR_FWCORE_DMI_RESERVED12_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED13 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED13 (0x90E0U) +#define RGX_CR_FWCORE_DMI_RESERVED13_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED14 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED14 (0x90E8U) +#define RGX_CR_FWCORE_DMI_RESERVED14_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED15 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED15 (0x90F0U) +#define RGX_CR_FWCORE_DMI_RESERVED15_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED16 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED16 (0x90F8U) +#define RGX_CR_FWCORE_DMI_RESERVED16_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_DATA1 +*/ +#define RGX_CR_FWCORE_DMI_DATA1 (0x9100U) +#define RGX_CR_FWCORE_DMI_DATA1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_DATA1_VAL_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_DATA1_VAL_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_FWCORE_DMI_RESERVED2, with 7 repeats +*/ +#define RGX_CR_FWCORE_DMI_RESERVED2_REPEATCOUNT (7U) +/* + Register RGX_CR_FWCORE_DMI_RESERVED20 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED20 (0x9108U) +#define RGX_CR_FWCORE_DMI_RESERVED20_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED21 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED21 (0x9110U) +#define RGX_CR_FWCORE_DMI_RESERVED21_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED22 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED22 (0x9118U) +#define RGX_CR_FWCORE_DMI_RESERVED22_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED23 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED23 (0x9120U) +#define RGX_CR_FWCORE_DMI_RESERVED23_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register group: RGX_CR_FWCORE_DMI_RESERVED3, with 80 repeats +*/ +#define RGX_CR_FWCORE_DMI_RESERVED3_REPEATCOUNT (80U) +/* + Register RGX_CR_FWCORE_DMI_RESERVED30 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED30 (0x9140U) +#define RGX_CR_FWCORE_DMI_RESERVED30_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED31 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED31 (0x9148U) +#define RGX_CR_FWCORE_DMI_RESERVED31_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED32 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED32 (0x9150U) +#define RGX_CR_FWCORE_DMI_RESERVED32_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED33 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED33 (0x9158U) +#define RGX_CR_FWCORE_DMI_RESERVED33_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED34 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED34 (0x9160U) +#define RGX_CR_FWCORE_DMI_RESERVED34_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED35 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED35 (0x9168U) +#define RGX_CR_FWCORE_DMI_RESERVED35_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED36 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED36 (0x9170U) +#define RGX_CR_FWCORE_DMI_RESERVED36_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED37 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED37 (0x9178U) +#define RGX_CR_FWCORE_DMI_RESERVED37_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED38 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED38 (0x9180U) +#define RGX_CR_FWCORE_DMI_RESERVED38_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED39 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED39 (0x9188U) +#define RGX_CR_FWCORE_DMI_RESERVED39_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED310 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED310 (0x9190U) +#define RGX_CR_FWCORE_DMI_RESERVED310_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED311 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED311 (0x9198U) +#define RGX_CR_FWCORE_DMI_RESERVED311_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED312 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED312 (0x91A0U) +#define RGX_CR_FWCORE_DMI_RESERVED312_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED313 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED313 (0x91A8U) +#define RGX_CR_FWCORE_DMI_RESERVED313_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED314 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED314 (0x91B0U) +#define RGX_CR_FWCORE_DMI_RESERVED314_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED315 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED315 (0x91B8U) +#define RGX_CR_FWCORE_DMI_RESERVED315_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED316 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED316 (0x91C0U) +#define RGX_CR_FWCORE_DMI_RESERVED316_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED317 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED317 (0x91C8U) +#define RGX_CR_FWCORE_DMI_RESERVED317_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED318 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED318 (0x91D0U) +#define RGX_CR_FWCORE_DMI_RESERVED318_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED319 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED319 (0x91D8U) +#define RGX_CR_FWCORE_DMI_RESERVED319_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED320 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED320 (0x91E0U) +#define RGX_CR_FWCORE_DMI_RESERVED320_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED321 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED321 (0x91E8U) +#define RGX_CR_FWCORE_DMI_RESERVED321_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED322 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED322 (0x91F0U) +#define RGX_CR_FWCORE_DMI_RESERVED322_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED323 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED323 (0x91F8U) +#define RGX_CR_FWCORE_DMI_RESERVED323_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED324 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED324 (0x9200U) +#define RGX_CR_FWCORE_DMI_RESERVED324_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED325 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED325 (0x9208U) +#define RGX_CR_FWCORE_DMI_RESERVED325_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED326 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED326 (0x9210U) +#define RGX_CR_FWCORE_DMI_RESERVED326_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED327 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED327 (0x9218U) +#define RGX_CR_FWCORE_DMI_RESERVED327_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED328 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED328 (0x9220U) +#define RGX_CR_FWCORE_DMI_RESERVED328_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED329 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED329 (0x9228U) +#define RGX_CR_FWCORE_DMI_RESERVED329_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED330 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED330 (0x9230U) +#define RGX_CR_FWCORE_DMI_RESERVED330_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED331 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED331 (0x9238U) +#define RGX_CR_FWCORE_DMI_RESERVED331_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_DMCONTROL +*/ +#define RGX_CR_FWCORE_DMI_DMCONTROL (0x93C0U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_MASKFULL (IMG_UINT64_C(0x00000000D0000003)) +#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_SHIFT (31U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN (0x80000000U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_SHIFT (30U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN (0x40000000U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_SHIFT (28U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_EN (0x10000000U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_SHIFT (1U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_EN (0x00000002U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN (0x00000001U) + + +/* + Register group: RGX_CR_FWCORE_DMI_RESERVED4, with 7 repeats +*/ +#define RGX_CR_FWCORE_DMI_RESERVED4_REPEATCOUNT (7U) +/* + Register RGX_CR_FWCORE_DMI_RESERVED40 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED40 (0x93C8U) +#define RGX_CR_FWCORE_DMI_RESERVED40_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_RESERVED41 +*/ +#define RGX_CR_FWCORE_DMI_RESERVED41 (0x93D0U) +#define RGX_CR_FWCORE_DMI_RESERVED41_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_DMSTATUS +*/ +#define RGX_CR_FWCORE_DMI_DMSTATUS (0x9400U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_MASKFULL (IMG_UINT64_C(0x00000000004FFFFF)) +#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_SHIFT (22U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_EN (0x00400000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_SHIFT (19U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_EN (0x00080000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_SHIFT (18U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_EN (0x00040000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_SHIFT (17U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN (0x00020000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_SHIFT (16U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_EN (0x00010000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_SHIFT (15U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_EN (0x00008000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_SHIFT (14U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_EN (0x00004000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_SHIFT (13U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_EN (0x00002000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_SHIFT (12U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_EN (0x00001000U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_SHIFT (11U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_EN (0x00000800U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_SHIFT (10U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_EN (0x00000400U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_SHIFT (9U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN (0x00000200U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_SHIFT (8U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_EN (0x00000100U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_SHIFT (7U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_EN (0x00000080U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_SHIFT (6U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_EN (0x00000040U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_SHIFT (5U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_EN (0x00000020U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_SHIFT (4U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_EN (0x00000010U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_CLRMSK (0xFFFFFFF0U) + + +/* + Register RGX_CR_FWCORE_DMI_ABSTRACTCS +*/ +#define RGX_CR_FWCORE_DMI_ABSTRACTCS (0x9540U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_MASKFULL (IMG_UINT64_C(0x000000001F00170F)) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_SHIFT (24U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_CLRMSK (0xE0FFFFFFU) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_SHIFT (12U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN (0x00001000U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT (8U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK (0xFFFFF8FFU) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_CLRMSK (0xFFFFFFF0U) + + +/* + Register RGX_CR_FWCORE_DMI_COMMAND +*/ +#define RGX_CR_FWCORE_DMI_COMMAND (0x9580U) +#define RGX_CR_FWCORE_DMI_COMMAND_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT (24U) +#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_CLRMSK (0x00FFFFFFU) +#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_CLRMSK (0xFF000000U) + + +/* + Register RGX_CR_FWCORE_DMI_SBCS +*/ +#define RGX_CR_FWCORE_DMI_SBCS (0x9DC0U) +#define RGX_CR_FWCORE_DMI_SBCS_MASKFULL (IMG_UINT64_C(0x00000000E07FFFFF)) +#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_SHIFT (29U) +#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_CLRMSK (0x1FFFFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_SHIFT (22U) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_EN (0x00400000U) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_SHIFT (21U) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN (0x00200000U) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_SHIFT (20U) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN (0x00100000U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT (17U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_CLRMSK (0xFFF1FFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_SHIFT (16U) +#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_EN (0x00010000U) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_SHIFT (15U) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_EN (0x00008000U) +#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT (12U) +#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK (0xFFFF8FFFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_SHIFT (5U) +#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_CLRMSK (0xFFFFF01FU) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_SHIFT (4U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_EN (0x00000010U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_SHIFT (3U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_EN (0x00000008U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_SHIFT (2U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_EN (0x00000004U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_SHIFT (1U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_EN (0x00000002U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_EN (0x00000001U) + + +/* + Register RGX_CR_FWCORE_DMI_SBADDRESS0 +*/ +#define RGX_CR_FWCORE_DMI_SBADDRESS0 (0x9E00U) +#define RGX_CR_FWCORE_DMI_SBADDRESS0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_FWCORE_DMI_SBDATA0 +*/ +#define RGX_CR_FWCORE_DMI_SBDATA0 (0x9EC0U) +#define RGX_CR_FWCORE_DMI_SBDATA0_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_SBDATA1 +*/ +#define RGX_CR_FWCORE_DMI_SBDATA1 (0x9F00U) +#define RGX_CR_FWCORE_DMI_SBDATA1_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_SBDATA2 +*/ +#define RGX_CR_FWCORE_DMI_SBDATA2 (0x9F40U) +#define RGX_CR_FWCORE_DMI_SBDATA2_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_SBDATA3 +*/ +#define RGX_CR_FWCORE_DMI_SBDATA3 (0x9F80U) +#define RGX_CR_FWCORE_DMI_SBDATA3_MASKFULL (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_FWCORE_DMI_HALTSUM0 +*/ +#define RGX_CR_FWCORE_DMI_HALTSUM0 (0x9FC0U) +#define RGX_CR_FWCORE_DMI_HALTSUM0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_SHIFT (0U) +#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_MTS_SCHEDULE +*/ +#define RGX_CR_MTS_SCHEDULE (0x0B00U) +#define RGX_CR_MTS_SCHEDULE_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE1 +*/ +#define RGX_CR_MTS_SCHEDULE1 (0x10B00U) +#define RGX_CR_MTS_SCHEDULE1_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE1_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE1_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE1_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE1_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE1_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE1_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE1_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE1_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE1_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE2 +*/ +#define RGX_CR_MTS_SCHEDULE2 (0x20B00U) +#define RGX_CR_MTS_SCHEDULE2_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE2_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE2_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE2_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE2_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE2_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE2_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE2_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE2_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE2_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE3 +*/ +#define RGX_CR_MTS_SCHEDULE3 (0x30B00U) +#define RGX_CR_MTS_SCHEDULE3_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE3_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE3_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE3_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE3_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE3_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE3_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE3_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE3_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE3_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE4 +*/ +#define RGX_CR_MTS_SCHEDULE4 (0x40B00U) +#define RGX_CR_MTS_SCHEDULE4_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE4_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE4_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE4_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE4_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE4_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE4_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE4_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE4_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE4_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE5 +*/ +#define RGX_CR_MTS_SCHEDULE5 (0x50B00U) +#define RGX_CR_MTS_SCHEDULE5_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE5_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE5_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE5_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE5_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE5_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE5_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE5_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE5_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE5_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE6 +*/ +#define RGX_CR_MTS_SCHEDULE6 (0x60B00U) +#define RGX_CR_MTS_SCHEDULE6_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE6_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE6_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE6_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE6_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE6_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE6_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE6_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE6_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE6_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_SCHEDULE7 +*/ +#define RGX_CR_MTS_SCHEDULE7 (0x70B00U) +#define RGX_CR_MTS_SCHEDULE7_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_MTS_SCHEDULE7_HOST_SHIFT (8U) +#define RGX_CR_MTS_SCHEDULE7_HOST_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_MTS_SCHEDULE7_HOST_BG_TIMER (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_HOST_HOST (0x00000100U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_SHIFT (6U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK (0xFFFFFF3FU) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT1 (0x00000040U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT2 (0x00000080U) +#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT3 (0x000000C0U) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_SHIFT (5U) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_BGCTX (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_CONTEXT_INTCTX (0x00000020U) +#define RGX_CR_MTS_SCHEDULE7_TASK_SHIFT (4U) +#define RGX_CR_MTS_SCHEDULE7_TASK_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_MTS_SCHEDULE7_TASK_NON_COUNTED (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_TASK_COUNTED (0x00000010U) +#define RGX_CR_MTS_SCHEDULE7_DM_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE7_DM_CLRMSK (0xFFFFFFF0U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM0 (0x00000000U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM1 (0x00000001U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM2 (0x00000002U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM3 (0x00000003U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM4 (0x00000004U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM5 (0x00000005U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM6 (0x00000006U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM7 (0x00000007U) +#define RGX_CR_MTS_SCHEDULE7_DM_DM_ALL (0x0000000FU) + + +/* + Register RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC +*/ +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC (0x0B30U) +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC +*/ +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC (0x0B38U) +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC +*/ +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC (0x0B40U) +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC +*/ +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC (0x0B48U) +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) +#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_MTS_GARTEN_WRAPPER_CONFIG +*/ +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG (0x0B50U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S8_CPR__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_SHIFT (1U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_EN (0x00000002U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT (0U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META (0x00000000U) +#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS (0x00000001U) + + +/* + Register RGX_CR_MTS_DM0_INTERRUPT_ENABLE +*/ +#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE (0x0B58U) +#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_MTS_DM1_INTERRUPT_ENABLE +*/ +#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE (0x0B60U) +#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_MTS_DM2_INTERRUPT_ENABLE +*/ +#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE (0x0B68U) +#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_MTS_DM3_INTERRUPT_ENABLE +*/ +#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE (0x0B70U) +#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_MTS_DM4_INTERRUPT_ENABLE +*/ +#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE (0x0B78U) +#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_MTS_DM5_INTERRUPT_ENABLE +*/ +#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE (0x0B80U) +#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_MTS_DM6_INTERRUPT_ENABLE +*/ +#define RGX_CR_MTS_DM6_INTERRUPT_ENABLE (0xD138U) +#define RGX_CR_MTS_DM6_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM6_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM6_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_MTS_DM7_INTERRUPT_ENABLE +*/ +#define RGX_CR_MTS_DM7_INTERRUPT_ENABLE (0xD140U) +#define RGX_CR_MTS_DM7_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_DM7_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) +#define RGX_CR_MTS_DM7_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_MTS_INTCTX +*/ +#define RGX_CR_MTS_INTCTX (0x0B98U) +#define RGX_CR_MTS_INTCTX_MASKFULL (IMG_UINT64_C(0x000000003FC0FFFF)) +#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT (22U) +#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK (0xC03FFFFFU) +#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT (8U) +#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK (0xFFFF00FFU) +#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT (0U) +#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_MTS_BGCTX +*/ +#define RGX_CR_MTS_BGCTX (0x0BA0U) +#define RGX_CR_MTS_BGCTX_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE +*/ +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE (0x0BA8U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT (56U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK (IMG_UINT64_C(0x00FFFFFFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT (48U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT (40U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT (32U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT (24U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT (16U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT (8U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT (0U) +#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_MTS_GPU_INT_STATUS +*/ +#define RGX_CR_MTS_GPU_INT_STATUS (0x0BB0U) +#define RGX_CR_MTS_GPU_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT (0U) +#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_IRQ_OS0_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS0_EVENT_STATUS (0x0BD0U) +#define RGX_CR_IRQ_OS0_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS0_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS0_EVENT_CLEAR (0x0BE0U) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS1_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS1_EVENT_STATUS (0x10BD0U) +#define RGX_CR_IRQ_OS1_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS1_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS1_EVENT_CLEAR (0x10BE0U) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS2_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS2_EVENT_STATUS (0x20BD0U) +#define RGX_CR_IRQ_OS2_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS2_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS2_EVENT_CLEAR (0x20BE0U) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS3_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS3_EVENT_STATUS (0x30BD0U) +#define RGX_CR_IRQ_OS3_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS3_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS3_EVENT_CLEAR (0x30BE0U) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS4_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS4_EVENT_STATUS (0x40BD0U) +#define RGX_CR_IRQ_OS4_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS4_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS4_EVENT_CLEAR (0x40BE0U) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS5_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS5_EVENT_STATUS (0x50BD0U) +#define RGX_CR_IRQ_OS5_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS5_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS5_EVENT_CLEAR (0x50BE0U) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS6_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS6_EVENT_STATUS (0x60BD0U) +#define RGX_CR_IRQ_OS6_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS6_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS6_EVENT_CLEAR (0x60BE0U) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS7_EVENT_STATUS +*/ +#define RGX_CR_IRQ_OS7_EVENT_STATUS (0x70BD0U) +#define RGX_CR_IRQ_OS7_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_IRQ_OS7_EVENT_CLEAR +*/ +#define RGX_CR_IRQ_OS7_EVENT_CLEAR (0x70BE0U) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_SHIFT (0U) +#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU) + + +/* + Register RGX_CR_MTS_SCHEDULE_ENABLE +*/ +#define RGX_CR_MTS_SCHEDULE_ENABLE (0x0BD8U) +#define RGX_CR_MTS_SCHEDULE_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_SHIFT (0U) +#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_FWCORE_BOOT +*/ +#define RGX_CR_FWCORE_BOOT (0x70C0U) +#define RGX_CR_FWCORE_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_FWCORE_BOOT_MODE_SHIFT (0U) +#define RGX_CR_FWCORE_BOOT_MODE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_BOOT_MODE_EN (0x00000001U) + + +/* + Register RGX_CR_META_BOOT +*/ +#define RGX_CR_META_BOOT (0x0BF8U) +#define RGX_CR_META_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_META_BOOT_MODE_SHIFT (0U) +#define RGX_CR_META_BOOT_MODE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_META_BOOT_MODE_EN (0x00000001U) + + +/* + Register RGX_CR_GARTEN_SLC +*/ +#define RGX_CR_GARTEN_SLC (0x0BB8U) +#define RGX_CR_GARTEN_SLC_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT (0U) +#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_EN (0x00000001U) + + +/* + Register RGX_CR_ISP_RENDER +*/ +#define RGX_CR_ISP_RENDER (0x0F08U) +#define RGX_CR_ISP_RENDER__IPP_FAST_RENDER__MASKFULL (IMG_UINT64_C(0x000000000003FFFF)) +#define RGX_CR_ISP_RENDER__CS3DL_4__MASKFULL (IMG_UINT64_C(0x000000000007FF53)) +#define RGX_CR_ISP_RENDER_MASKFULL (IMG_UINT64_C(0x000000000003FFF0)) +#define RGX_CR_ISP_RENDER_TILES_PER_ISP_SHIFT (16U) +#define RGX_CR_ISP_RENDER_TILES_PER_ISP_CLRMSK (0xFFFCFFFFU) +#define RGX_CR_ISP_RENDER__CS3DL_4__TILES_PER_ISP_SHIFT (16U) +#define RGX_CR_ISP_RENDER__CS3DL_4__TILES_PER_ISP_CLRMSK (0xFFF8FFFFU) +#define RGX_CR_ISP_RENDER_TILE_LIMIT_HIGH_SHIFT (12U) +#define RGX_CR_ISP_RENDER_TILE_LIMIT_HIGH_CLRMSK (0xFFFF0FFFU) +#define RGX_CR_ISP_RENDER_TILE_LIMIT_LOW_SHIFT (8U) +#define RGX_CR_ISP_RENDER_TILE_LIMIT_LOW_CLRMSK (0xFFFFF0FFU) +#define RGX_CR_ISP_RENDER_TILE_STARVATION_SHIFT (7U) +#define RGX_CR_ISP_RENDER_TILE_STARVATION_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_ISP_RENDER_TILE_STARVATION_EN (0x00000080U) +#define RGX_CR_ISP_RENDER_PROCESS_EMPTY_TILES_SHIFT (6U) +#define RGX_CR_ISP_RENDER_PROCESS_EMPTY_TILES_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_ISP_RENDER_PROCESS_EMPTY_TILES_EN (0x00000040U) +#define RGX_CR_ISP_RENDER_RESUME_SHIFT (4U) +#define RGX_CR_ISP_RENDER_RESUME_CLRMSK (0xFFFFFFCFU) +#define RGX_CR_ISP_RENDER_RESUME_CONTEXT_NONE (0x00000000U) +#define RGX_CR_ISP_RENDER_RESUME_CONTEXT_TILE (0x00000010U) +#define RGX_CR_ISP_RENDER_RESUME_CONTEXT_PBLK (0x00000030U) +#define RGX_CR_ISP_RENDER__CS3DL_4__RESUME_SHIFT (4U) +#define RGX_CR_ISP_RENDER__CS3DL_4__RESUME_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_ISP_RENDER__CS3DL_4__RESUME_CONTEXT_NONE (0x00000000U) +#define RGX_CR_ISP_RENDER__CS3DL_4__RESUME_CONTEXT_RESUME (0x00000010U) +#define RGX_CR_ISP_RENDER_DIR_SHIFT (2U) +#define RGX_CR_ISP_RENDER_DIR_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_ISP_RENDER_DIR_TL2BR (0x00000000U) +#define RGX_CR_ISP_RENDER_DIR_TR2BL (0x00000004U) +#define RGX_CR_ISP_RENDER_DIR_BL2TR (0x00000008U) +#define RGX_CR_ISP_RENDER_DIR_BR2TL (0x0000000CU) +#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_SHIFT (1U) +#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_EN (0x00000002U) +#define RGX_CR_ISP_RENDER_MODE_SHIFT (0U) +#define RGX_CR_ISP_RENDER_MODE_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_ISP_RENDER_MODE_NORM (0x00000000U) +#define RGX_CR_ISP_RENDER_MODE_FAST_2D (0x00000002U) +#define RGX_CR_ISP_RENDER_MODE_FAST_SCALE (0x00000003U) +#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_SHIFT (0U) +#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_EN (0x00000001U) + + +/* + Register RGX_CR_ISP_CTL +*/ +#define RGX_CR_ISP_CTL (0x0FB0U) +#define RGX_CR_ISP_CTL_MASKFULL (IMG_UINT64_C(0x00000000007BF8FF)) +#define RGX_CR_ISP_CTL_DBUFFER_COUNT_SHIFT (20U) +#define RGX_CR_ISP_CTL_DBUFFER_COUNT_CLRMSK (0xFF8FFFFFU) +#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT (19U) +#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN (0x00080000U) +#define RGX_CR_ISP_CTL_UPFRONT_DEPTH_DISABLE_SHIFT (17U) +#define RGX_CR_ISP_CTL_UPFRONT_DEPTH_DISABLE_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_ISP_CTL_UPFRONT_DEPTH_DISABLE_EN (0x00020000U) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ONE_SHIFT (16U) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ONE_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ONE_EN (0x00010000U) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ZERO_SHIFT (15U) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ZERO_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ZERO_EN (0x00008000U) +#define RGX_CR_ISP_CTL_LINE_SAMPLE_SHIFT (14U) +#define RGX_CR_ISP_CTL_LINE_SAMPLE_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_ISP_CTL_LINE_SAMPLE_EN (0x00004000U) +#define RGX_CR_ISP_CTL_LINE_STYLE_SHIFT (13U) +#define RGX_CR_ISP_CTL_LINE_STYLE_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_ISP_CTL_LINE_STYLE_EN (0x00002000U) +#define RGX_CR_ISP_CTL_LINE_STYLE_SINGLE_PIXEL_SHIFT (12U) +#define RGX_CR_ISP_CTL_LINE_STYLE_SINGLE_PIXEL_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_ISP_CTL_LINE_STYLE_SINGLE_PIXEL_EN (0x00001000U) +#define RGX_CR_ISP_CTL_DBIAS_IS_INT_SHIFT (11U) +#define RGX_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_ISP_CTL_DBIAS_IS_INT_EN (0x00000800U) +#define RGX_CR_ISP_CTL_UPASS_START_SHIFT (0U) +#define RGX_CR_ISP_CTL_UPASS_START_CLRMSK (0xFFFFFF00U) + + +/* + Register group: RGX_CR_MEM_TILING_CFG, with 8 repeats +*/ +#define RGX_CR_MEM_TILING_CFG_REPEATCOUNT (8U) +/* + Register RGX_CR_MEM_TILING_CFG0 +*/ +#define RGX_CR_MEM_TILING_CFG0 (0x12D8U) +#define RGX_CR_MEM_TILING_CFG0_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG0_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG0_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG0_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG0_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG0_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MEM_TILING_CFG1 +*/ +#define RGX_CR_MEM_TILING_CFG1 (0x12E0U) +#define RGX_CR_MEM_TILING_CFG1_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG1_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG1_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG1_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG1_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG1_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MEM_TILING_CFG2 +*/ +#define RGX_CR_MEM_TILING_CFG2 (0x12E8U) +#define RGX_CR_MEM_TILING_CFG2_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG2_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG2_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG2_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG2_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG2_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MEM_TILING_CFG3 +*/ +#define RGX_CR_MEM_TILING_CFG3 (0x12F0U) +#define RGX_CR_MEM_TILING_CFG3_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG3_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG3_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG3_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG3_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG3_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MEM_TILING_CFG4 +*/ +#define RGX_CR_MEM_TILING_CFG4 (0x12F8U) +#define RGX_CR_MEM_TILING_CFG4_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG4_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG4_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG4_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG4_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG4_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MEM_TILING_CFG5 +*/ +#define RGX_CR_MEM_TILING_CFG5 (0x1300U) +#define RGX_CR_MEM_TILING_CFG5_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG5_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG5_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG5_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG5_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG5_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MEM_TILING_CFG6 +*/ +#define RGX_CR_MEM_TILING_CFG6 (0x1308U) +#define RGX_CR_MEM_TILING_CFG6_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG6_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG6_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG6_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG6_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG6_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MEM_TILING_CFG7 +*/ +#define RGX_CR_MEM_TILING_CFG7 (0x1310U) +#define RGX_CR_MEM_TILING_CFG7_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) +#define RGX_CR_MEM_TILING_CFG7_XSTRIDE_SHIFT (61U) +#define RGX_CR_MEM_TILING_CFG7_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG7_ENABLE_SHIFT (60U) +#define RGX_CR_MEM_TILING_CFG7_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG7_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_SHIFT (32U) +#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) +#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_ALIGNSIZE (4096U) +#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_SHIFT (0U) +#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) +#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_ALIGNSHIFT (12U) +#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_USC_TIMER +*/ +#define RGX_CR_USC_TIMER (0x46C8U) +#define RGX_CR_USC_TIMER_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_USC_TIMER_CNT_SHIFT (0U) +#define RGX_CR_USC_TIMER_CNT_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_CR_USC_TIMER_CNT +*/ +#define RGX_CR_USC_TIMER_CNT (0x46D0U) +#define RGX_CR_USC_TIMER_CNT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_USC_TIMER_CNT_RESET_SHIFT (0U) +#define RGX_CR_USC_TIMER_CNT_RESET_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_USC_TIMER_CNT_RESET_EN (0x00000001U) + + +/* + Register RGX_CR_TE_CHECKSUM +*/ +#define RGX_CR_TE_CHECKSUM (0x5110U) +#define RGX_CR_TE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TE_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TE_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_USC_UVB_CHECKSUM +*/ +#define RGX_CR_USC_UVB_CHECKSUM (0x5118U) +#define RGX_CR_USC_UVB_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_USC_UVB_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_TE_TMA_CHECKSUM +*/ +#define RGX_CR_TE_TMA_CHECKSUM (0x5128U) +#define RGX_CR_TE_TMA_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TE_TMA_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TE_TMA_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_CDM_PDS_CHECKSUM +*/ +#define RGX_CR_CDM_PDS_CHECKSUM (0x5130U) +#define RGX_CR_CDM_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_CDM_PDS_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_CDM_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_VCE_CHECKSUM +*/ +#define RGX_CR_VCE_CHECKSUM (0x5030U) +#define RGX_CR_VCE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VCE_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_VCE_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_ISP_PDS_CHECKSUM +*/ +#define RGX_CR_ISP_PDS_CHECKSUM (0x5038U) +#define RGX_CR_ISP_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_ISP_TPF_CHECKSUM +*/ +#define RGX_CR_ISP_TPF_CHECKSUM (0x5040U) +#define RGX_CR_ISP_TPF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_TFPU_CHECKSUM +*/ +#define RGX_CR_TFPU_CHECKSUM (0x5048U) +#define RGX_CR_TFPU_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TFPU_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TFPU_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_ZLS_CHECKSUM +*/ +#define RGX_CR_ZLS_CHECKSUM (0x5050U) +#define RGX_CR_ZLS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_ZLS_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_ZLS_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PBE_CHECKSUM_3D +*/ +#define RGX_CR_PBE_CHECKSUM_3D__PBE_CHECKSUM_2D (0x5058U) +#define RGX_CR_PBE_CHECKSUM_3D__PBE_CHECKSUM_2D__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PBE_CHECKSUM_3D__PBE_CHECKSUM_2D__VALUE_SHIFT (0U) +#define RGX_CR_PBE_CHECKSUM_3D__PBE_CHECKSUM_2D__VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PBE_CHECKSUM +*/ +#define RGX_CR_PBE_CHECKSUM (0x5058U) +#define RGX_CR_PBE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PBE_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_PBE_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PDS_DOUTM_STM_CHECKSUM +*/ +#define RGX_CR_PDS_DOUTM_STM_CHECKSUM (0x5060U) +#define RGX_CR_PDS_DOUTM_STM_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PDS_DOUTM_STM_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_PDS_DOUTM_STM_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_IFPU_ISP_CHECKSUM +*/ +#define RGX_CR_IFPU_ISP_CHECKSUM (0x5068U) +#define RGX_CR_IFPU_ISP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PPP_CLIP_CHECKSUM +*/ +#define RGX_CR_PPP_CLIP_CHECKSUM (0x5120U) +#define RGX_CR_PPP_CLIP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_VCE_PRIM_CHECKSUM +*/ +#define RGX_CR_VCE_PRIM_CHECKSUM (0x5140U) +#define RGX_CR_VCE_PRIM_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_VCE_PRIM_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_VCE_PRIM_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_TDM_PDS_CHECKSUM +*/ +#define RGX_CR_TDM_PDS_CHECKSUM (0x5148U) +#define RGX_CR_TDM_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TDM_PDS_CHECKSUM_VALUE_SHIFT (0U) +#define RGX_CR_TDM_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PBE_CHECKSUM_2D +*/ +#define RGX_CR_PBE_CHECKSUM_2D (0x5158U) +#define RGX_CR_PBE_CHECKSUM_2D_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PBE_CHECKSUM_2D_VALUE_SHIFT (0U) +#define RGX_CR_PBE_CHECKSUM_2D_VALUE_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_PHASE_GEOM +*/ +#define RGX_CR_PERF_PHASE_GEOM (0x6008U) +#define RGX_CR_PERF_PHASE_GEOM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_PHASE_GEOM_COUNT_SHIFT (0U) +#define RGX_CR_PERF_PHASE_GEOM_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_PHASE_FRAG +*/ +#define RGX_CR_PERF_PHASE_FRAG (0x6010U) +#define RGX_CR_PERF_PHASE_FRAG_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_PHASE_FRAG_COUNT_SHIFT (0U) +#define RGX_CR_PERF_PHASE_FRAG_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_PHASE_COMP +*/ +#define RGX_CR_PERF_PHASE_COMP (0x6018U) +#define RGX_CR_PERF_PHASE_COMP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_PHASE_COMP_COUNT_SHIFT (0U) +#define RGX_CR_PERF_PHASE_COMP_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_CYCLE_GEOM_TOTAL +*/ +#define RGX_CR_PERF_CYCLE_GEOM_TOTAL (0x6020U) +#define RGX_CR_PERF_CYCLE_GEOM_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_GEOM_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_GEOM_TOTAL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_CYCLE_FRAG_TOTAL +*/ +#define RGX_CR_PERF_CYCLE_FRAG_TOTAL (0x6028U) +#define RGX_CR_PERF_CYCLE_FRAG_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_FRAG_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_FRAG_TOTAL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_CYCLE_COMP_TOTAL +*/ +#define RGX_CR_PERF_CYCLE_COMP_TOTAL (0x6030U) +#define RGX_CR_PERF_CYCLE_COMP_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_COMP_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_COMP_TOTAL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL +*/ +#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL (0x6038U) +#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_PHASE_2D +*/ +#define RGX_CR_PERF_PHASE_2D (0x6050U) +#define RGX_CR_PERF_PHASE_2D_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_PHASE_2D_COUNT_SHIFT (0U) +#define RGX_CR_PERF_PHASE_2D_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_CYCLE_2D_TOTAL +*/ +#define RGX_CR_PERF_CYCLE_2D_TOTAL (0x6058U) +#define RGX_CR_PERF_CYCLE_2D_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_2D_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_2D_TOTAL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC0_READ_STALL +*/ +#define RGX_CR_PERF_SLC0_READ_STALL (0x60B8U) +#define RGX_CR_PERF_SLC0_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC0_WRITE_STALL +*/ +#define RGX_CR_PERF_SLC0_WRITE_STALL (0x60C0U) +#define RGX_CR_PERF_SLC0_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC1_READ_STALL +*/ +#define RGX_CR_PERF_SLC1_READ_STALL (0x60E0U) +#define RGX_CR_PERF_SLC1_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC1_WRITE_STALL +*/ +#define RGX_CR_PERF_SLC1_WRITE_STALL (0x60E8U) +#define RGX_CR_PERF_SLC1_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC2_READ_STALL +*/ +#define RGX_CR_PERF_SLC2_READ_STALL (0x6158U) +#define RGX_CR_PERF_SLC2_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC2_WRITE_STALL +*/ +#define RGX_CR_PERF_SLC2_WRITE_STALL (0x6160U) +#define RGX_CR_PERF_SLC2_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC3_READ_STALL +*/ +#define RGX_CR_PERF_SLC3_READ_STALL (0x6180U) +#define RGX_CR_PERF_SLC3_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_SLC3_WRITE_STALL +*/ +#define RGX_CR_PERF_SLC3_WRITE_STALL (0x6188U) +#define RGX_CR_PERF_SLC3_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK (0x00000000U) + + +/* + Register RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL +*/ +#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL (0x6408U) +#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL_COUNT_SHIFT (0U) +#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL_COUNT_CLRMSK (0x00000000U) + + +/* + Register group: RGX_CR_FWCORE_ADDR_REMAP_CONFIG, with 16 repeats +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG_REPEATCOUNT (16U) +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 (0x7020U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG1 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1 (0x7028U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG2 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2 (0x7030U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG3 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3 (0x7038U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG4 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4 (0x7040U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG5 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5 (0x7048U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG6 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6 (0x7050U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG7 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7 (0x7058U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG8 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8 (0x7060U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG9 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9 (0x7068U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG10 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10 (0x7070U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG11 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11 (0x7078U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG12 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12 (0x7080U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG13 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13 (0x7088U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG14 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14 (0x7090U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG15 +*/ +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15 (0x7098U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_SHIFT (61U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_SHIFT (60U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_SHIFT (44U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_SHIFT (12U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_MMU_CONTEXT_SHIFT (0U) +#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FWCORE_WRAPPER_NMI_ENABLE +*/ +#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE (0x70A0U) +#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE_EVENT_EN_SHIFT (0U) +#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE_EVENT_EN_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE_EVENT_EN_EN (0x00000001U) + + +/* + Register RGX_CR_FWCORE_WRAPPER_NMI_EVENT +*/ +#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT (0x70A8U) +#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_SHIFT (0U) +#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_EN (0x00000001U) + + +/* + Register RGX_CR_FWCORE_WRAPPER_RESET_VECTOR +*/ +#define RGX_CR_FWCORE_WRAPPER_RESET_VECTOR (0x70B0U) +#define RGX_CR_FWCORE_WRAPPER_RESET_VECTOR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFE)) +#define RGX_CR_FWCORE_WRAPPER_RESET_VECTOR_ADDR_SHIFT (1U) +#define RGX_CR_FWCORE_WRAPPER_RESET_VECTOR_ADDR_CLRMSK (0x00000001U) + + +/* + Register RGX_CR_FWCORE_WRAPPER_NMI_VECTOR +*/ +#define RGX_CR_FWCORE_WRAPPER_NMI_VECTOR (0x70B8U) +#define RGX_CR_FWCORE_WRAPPER_NMI_VECTOR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFE)) +#define RGX_CR_FWCORE_WRAPPER_NMI_VECTOR_ADDR_SHIFT (1U) +#define RGX_CR_FWCORE_WRAPPER_NMI_VECTOR_ADDR_CLRMSK (0x00000001U) + + +/* + Register RGX_CR_JONES_IDLE +*/ +#define RGX_CR_JONES_IDLE (0x8328U) +#define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x000000000007FEFF)) +#define RGX_CR_JONES_IDLE_ASC_SHIFT (18U) +#define RGX_CR_JONES_IDLE_ASC_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_JONES_IDLE_ASC_EN (0x00040000U) +#define RGX_CR_JONES_IDLE_RCE_SHIFT (17U) +#define RGX_CR_JONES_IDLE_RCE_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_JONES_IDLE_RCE_EN (0x00020000U) +#define RGX_CR_JONES_IDLE_AXI2IMG_SHIFT (16U) +#define RGX_CR_JONES_IDLE_AXI2IMG_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_JONES_IDLE_AXI2IMG_EN (0x00010000U) +#define RGX_CR_JONES_IDLE_SLC_SHIFT (15U) +#define RGX_CR_JONES_IDLE_SLC_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_JONES_IDLE_SLC_EN (0x00008000U) +#define RGX_CR_JONES_IDLE_TDM_SHIFT (14U) +#define RGX_CR_JONES_IDLE_TDM_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_JONES_IDLE_TDM_EN (0x00004000U) +#define RGX_CR_JONES_IDLE_FB_CDC_TLA_SHIFT (13U) +#define RGX_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_JONES_IDLE_FB_CDC_TLA_EN (0x00002000U) +#define RGX_CR_JONES_IDLE_FB_CDC_SHIFT (12U) +#define RGX_CR_JONES_IDLE_FB_CDC_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_JONES_IDLE_FB_CDC_EN (0x00001000U) +#define RGX_CR_JONES_IDLE_MMU_SHIFT (11U) +#define RGX_CR_JONES_IDLE_MMU_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_JONES_IDLE_MMU_EN (0x00000800U) +#define RGX_CR_JONES_IDLE_DFU_SHIFT (10U) +#define RGX_CR_JONES_IDLE_DFU_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_JONES_IDLE_DFU_EN (0x00000400U) +#define RGX_CR_JONES_IDLE_GARTEN_SHIFT (9U) +#define RGX_CR_JONES_IDLE_GARTEN_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_JONES_IDLE_GARTEN_EN (0x00000200U) +#define RGX_CR_JONES_IDLE_SOCIF_SHIFT (7U) +#define RGX_CR_JONES_IDLE_SOCIF_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_JONES_IDLE_SOCIF_EN (0x00000080U) +#define RGX_CR_JONES_IDLE_TILING_SHIFT (6U) +#define RGX_CR_JONES_IDLE_TILING_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_JONES_IDLE_TILING_EN (0x00000040U) +#define RGX_CR_JONES_IDLE_IPP_SHIFT (5U) +#define RGX_CR_JONES_IDLE_IPP_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_JONES_IDLE_IPP_EN (0x00000020U) +#define RGX_CR_JONES_IDLE_USC_GMUTEX_SHIFT (4U) +#define RGX_CR_JONES_IDLE_USC_GMUTEX_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_JONES_IDLE_USC_GMUTEX_EN (0x00000010U) +#define RGX_CR_JONES_IDLE_PM_SHIFT (3U) +#define RGX_CR_JONES_IDLE_PM_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_JONES_IDLE_PM_EN (0x00000008U) +#define RGX_CR_JONES_IDLE_CDM_SHIFT (2U) +#define RGX_CR_JONES_IDLE_CDM_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_JONES_IDLE_CDM_EN (0x00000004U) +#define RGX_CR_JONES_IDLE_DCE_SHIFT (1U) +#define RGX_CR_JONES_IDLE_DCE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_JONES_IDLE_DCE_EN (0x00000002U) +#define RGX_CR_JONES_IDLE_BIF_SHIFT (0U) +#define RGX_CR_JONES_IDLE_BIF_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_JONES_IDLE_BIF_EN (0x00000001U) + + +/* + Register RGX_CR_SYS_BUS_SECURE +*/ +#define RGX_CR_SYS_BUS_SECURE (0xA100U) +#define RGX_CR_SYS_BUS_SECURE__SYS_BUS_SECURE_RESET__MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SYS_BUS_SECURE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SYS_BUS_SECURE_ENABLE_SHIFT (0U) +#define RGX_CR_SYS_BUS_SECURE_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SYS_BUS_SECURE_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT +*/ +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT (0xE140U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__MMU_GT_V3__MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__MMU_GT_V3__ID_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__MMU_GT_V3__ID_CLRMSK (0xFFFFFFE0U) + + +/* + Register RGX_CR_MMU_CBASE_MAPPING +*/ +#define RGX_CR_MMU_CBASE_MAPPING__VPU (0x1E010U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__MASKFULL (IMG_UINT64_C(0x000000001FFFFFFF)) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__INVALID_SHIFT (28U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__INVALID_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__INVALID_EN (0x10000000U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_CLRMSK (0xF0000000U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MMU_CBASE_MAPPING +*/ +#define RGX_CR_MMU_CBASE_MAPPING (0xE148U) +#define RGX_CR_MMU_CBASE_MAPPING_MASKFULL (IMG_UINT64_C(0x000000001FFFFFFF)) +#define RGX_CR_MMU_CBASE_MAPPING_INVALID_SHIFT (28U) +#define RGX_CR_MMU_CBASE_MAPPING_INVALID_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_MMU_CBASE_MAPPING_INVALID_EN (0x10000000U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK (0xF0000000U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U) +#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE (4096U) + + +/* + Register RGX_CR_MMU_FAULT_STATUS1 +*/ +#define RGX_CR_MMU_FAULT_STATUS1 (0xE150U) +#define RGX_CR_MMU_FAULT_STATUS1_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS1_LEVEL_SHIFT (62U) +#define RGX_CR_MMU_FAULT_STATUS1_LEVEL_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS1_REQ_ID_SHIFT (56U) +#define RGX_CR_MMU_FAULT_STATUS1_REQ_ID_CLRMSK (IMG_UINT64_C(0xC0FFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS1_CONTEXT_SHIFT (48U) +#define RGX_CR_MMU_FAULT_STATUS1_CONTEXT_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT (4U) +#define RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFF00000000000F)) +#define RGX_CR_MMU_FAULT_STATUS1_RNW_SHIFT (3U) +#define RGX_CR_MMU_FAULT_STATUS1_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_MMU_FAULT_STATUS1_RNW_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_MMU_FAULT_STATUS1_TYPE_SHIFT (1U) +#define RGX_CR_MMU_FAULT_STATUS1_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) +#define RGX_CR_MMU_FAULT_STATUS1_FAULT_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS1_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MMU_FAULT_STATUS1_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_MMU_FAULT_STATUS2 +*/ +#define RGX_CR_MMU_FAULT_STATUS2 (0xE158U) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__MASKFULL (IMG_UINT64_C(0x00000000003FFFFF)) +#define RGX_CR_MMU_FAULT_STATUS2_MASKFULL (IMG_UINT64_C(0x000000003FFF07FF)) +#define RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_SHIFT (29U) +#define RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_EN (0x20000000U) +#define RGX_CR_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT (28U) +#define RGX_CR_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_MMU_FAULT_STATUS2_CLEANUNIQUE_EN (0x10000000U) +#define RGX_CR_MMU_FAULT_STATUS2_BANK_SHIFT (24U) +#define RGX_CR_MMU_FAULT_STATUS2_BANK_CLRMSK (0xF0FFFFFFU) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__FBM_FAULT_SHIFT (21U) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__FBM_FAULT_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__FBM_FAULT_EN (0x00200000U) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__WRITEBACK_SHIFT (20U) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__WRITEBACK_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__WRITEBACK_EN (0x00100000U) +#define RGX_CR_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT (16U) +#define RGX_CR_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK (0xFF00FFFFU) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BIF_ID_SHIFT (12U) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BIF_ID_CLRMSK (0xFFF00FFFU) +#define RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT (10U) +#define RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_EN (0x00000400U) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BANK_SHIFT (8U) +#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BANK_CLRMSK (0xFFFFF0FFU) +#define RGX_CR_MMU_FAULT_STATUS2_BIF_ID_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS2_BIF_ID_CLRMSK (0xFFFFFC00U) +#define RGX_CR_MMU_FAULT_STATUS2_ACTIVE_ID_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS2_ACTIVE_ID_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_MMU_FAULT_STATUS_META +*/ +#define RGX_CR_MMU_FAULT_STATUS_META (0xE160U) +#define RGX_CR_MMU_FAULT_STATUS_META_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT (62U) +#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT (56U) +#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK (IMG_UINT64_C(0xC0FFFFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT (48U) +#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT (4U) +#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFF00000000000F)) +#define RGX_CR_MMU_FAULT_STATUS_META_RNW_SHIFT (3U) +#define RGX_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_MMU_FAULT_STATUS_META_RNW_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT (1U) +#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) +#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_MMU_FAULT_STATUS2_META +*/ +#define RGX_CR_MMU_FAULT_STATUS2_META (0xE198U) +#define RGX_CR_MMU_FAULT_STATUS2_META__ALBTOP__MASKFULL (IMG_UINT64_C(0x0000000000001FFF)) +#define RGX_CR_MMU_FAULT_STATUS2_META_MASKFULL (IMG_UINT64_C(0x0000000000003FFF)) +#define RGX_CR_MMU_FAULT_STATUS2_META_WRITEBACK_SHIFT (13U) +#define RGX_CR_MMU_FAULT_STATUS2_META_WRITEBACK_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_MMU_FAULT_STATUS2_META_WRITEBACK_EN (0x00002000U) +#define RGX_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_SHIFT (12U) +#define RGX_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_EN (0x00001000U) +#define RGX_CR_MMU_FAULT_STATUS2_META__ALBTOP__WRITEBACK_SHIFT (12U) +#define RGX_CR_MMU_FAULT_STATUS2_META__ALBTOP__WRITEBACK_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_MMU_FAULT_STATUS2_META__ALBTOP__WRITEBACK_EN (0x00001000U) +#define RGX_CR_MMU_FAULT_STATUS2_META_BANK_SHIFT (8U) +#define RGX_CR_MMU_FAULT_STATUS2_META_BANK_CLRMSK (0xFFFFF0FFU) +#define RGX_CR_MMU_FAULT_STATUS2_META_TLB_ENTRY_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS2_META_TLB_ENTRY_CLRMSK (0xFFFFFF00U) +#define RGX_CR_MMU_FAULT_STATUS2_META_ACTIVE_ID_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS2_META_ACTIVE_ID_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_MMU_FAULT_STATUS_PM +*/ +#define RGX_CR_MMU_FAULT_STATUS_PM (0xE130U) +#define RGX_CR_MMU_FAULT_STATUS_PM__PM_RECYCLE__MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_PM_MASKFULL (IMG_UINT64_C(0x0000000007FFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_PM_DM_SHIFT (24U) +#define RGX_CR_MMU_FAULT_STATUS_PM_DM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8FFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_PM__PM_RECYCLE__DM_SHIFT (24U) +#define RGX_CR_MMU_FAULT_STATUS_PM__PM_RECYCLE__DM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0FFFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_PM_RNW_SHIFT (23U) +#define RGX_CR_MMU_FAULT_STATUS_PM_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_MMU_FAULT_STATUS_PM_RNW_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_MMU_FAULT_STATUS_PM_ADDRESS_SHIFT (3U) +#define RGX_CR_MMU_FAULT_STATUS_PM_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF800007)) +#define RGX_CR_MMU_FAULT_STATUS_PM_LEVEL_SHIFT (1U) +#define RGX_CR_MMU_FAULT_STATUS_PM_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) +#define RGX_CR_MMU_FAULT_STATUS_PM_FAULT_SHIFT (0U) +#define RGX_CR_MMU_FAULT_STATUS_PM_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MMU_FAULT_STATUS_PM_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_MMU_STATUS +*/ +#define RGX_CR_MMU_STATUS__VPU (0x10288U) +#define RGX_CR_MMU_STATUS__VPU__MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) +#define RGX_CR_MMU_STATUS__VPU__MMU_STALLED_SHIFT (40U) +#define RGX_CR_MMU_STATUS__VPU__MMU_STALLED_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_MMU_STATUS__VPU__MMU_STALLED_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_MMU_STATUS__VPU__PM_WRITES_SHIFT (38U) +#define RGX_CR_MMU_STATUS__VPU__PM_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFF3FFFFFFFFF)) +#define RGX_CR_MMU_STATUS__VPU__PM_READS_SHIFT (36U) +#define RGX_CR_MMU_STATUS__VPU__PM_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF)) +#define RGX_CR_MMU_STATUS__VPU__PC_READS_SHIFT (24U) +#define RGX_CR_MMU_STATUS__VPU__PC_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF)) +#define RGX_CR_MMU_STATUS__VPU__PD_READS_SHIFT (12U) +#define RGX_CR_MMU_STATUS__VPU__PD_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF)) +#define RGX_CR_MMU_STATUS__VPU__PT_READS_SHIFT (0U) +#define RGX_CR_MMU_STATUS__VPU__PT_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000)) + + +/* + Register RGX_CR_MMU_STATUS +*/ +#define RGX_CR_MMU_STATUS (0xE170U) +#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__MASKFULL (IMG_UINT64_C(0x000003FFFFFFFFFF)) +#define RGX_CR_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) +#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__MMU_STALLED_SHIFT (41U) +#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__MMU_STALLED_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__MMU_STALLED_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_MMU_STATUS_MMU_STALLED_SHIFT (40U) +#define RGX_CR_MMU_STATUS_MMU_STALLED_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_MMU_STATUS_MMU_STALLED_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__PM_WRITES_SHIFT (39U) +#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__PM_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFE7FFFFFFFFF)) +#define RGX_CR_MMU_STATUS_PM_WRITES_SHIFT (38U) +#define RGX_CR_MMU_STATUS_PM_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFF3FFFFFFFFF)) +#define RGX_CR_MMU_STATUS_PM_READS_SHIFT (36U) +#define RGX_CR_MMU_STATUS_PM_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF)) +#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__PM_READS_SHIFT (36U) +#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__PM_READS_CLRMSK (IMG_UINT64_C(0xFFFFFF8FFFFFFFFF)) +#define RGX_CR_MMU_STATUS_PC_READS_SHIFT (24U) +#define RGX_CR_MMU_STATUS_PC_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF)) +#define RGX_CR_MMU_STATUS_PD_READS_SHIFT (12U) +#define RGX_CR_MMU_STATUS_PD_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF)) +#define RGX_CR_MMU_STATUS_PT_READS_SHIFT (0U) +#define RGX_CR_MMU_STATUS_PT_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000)) + + +/* + Register RGX_CR_MMU_ENTRY_STATUS +*/ +#define RGX_CR_MMU_ENTRY_STATUS__VPU (0x1E028U) +#define RGX_CR_MMU_ENTRY_STATUS__VPU__MASKFULL (IMG_UINT64_C(0x000000FFFFFF80FF)) +#define RGX_CR_MMU_ENTRY_STATUS__VPU__ADDRESS_SHIFT (15U) +#define RGX_CR_MMU_ENTRY_STATUS__VPU__ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF0000007FFF)) +#define RGX_CR_MMU_ENTRY_STATUS__VPU__CONTEXT_ID_SHIFT (0U) +#define RGX_CR_MMU_ENTRY_STATUS__VPU__CONTEXT_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_MMU_ENTRY_STATUS +*/ +#define RGX_CR_MMU_ENTRY_STATUS (0xE178U) +#define RGX_CR_MMU_ENTRY_STATUS_MASKFULL (IMG_UINT64_C(0x000000FFFFFF80FF)) +#define RGX_CR_MMU_ENTRY_STATUS_ADDRESS_SHIFT (15U) +#define RGX_CR_MMU_ENTRY_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF0000007FFF)) +#define RGX_CR_MMU_ENTRY_STATUS_CONTEXT_ID_SHIFT (0U) +#define RGX_CR_MMU_ENTRY_STATUS_CONTEXT_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_MMU_ENTRY +*/ +#define RGX_CR_MMU_ENTRY__VPU (0x1E030U) +#define RGX_CR_MMU_ENTRY__VPU__MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_MMU_ENTRY__VPU__ENABLE_SHIFT (1U) +#define RGX_CR_MMU_ENTRY__VPU__ENABLE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_MMU_ENTRY__VPU__ENABLE_EN (0x00000002U) +#define RGX_CR_MMU_ENTRY__VPU__PENDING_SHIFT (0U) +#define RGX_CR_MMU_ENTRY__VPU__PENDING_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MMU_ENTRY__VPU__PENDING_EN (0x00000001U) + + +/* + Register RGX_CR_MMU_ENTRY +*/ +#define RGX_CR_MMU_ENTRY (0xE180U) +#define RGX_CR_MMU_ENTRY_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_CR_MMU_ENTRY_ENABLE_SHIFT (1U) +#define RGX_CR_MMU_ENTRY_ENABLE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_MMU_ENTRY_ENABLE_EN (0x00000002U) +#define RGX_CR_MMU_ENTRY_PENDING_SHIFT (0U) +#define RGX_CR_MMU_ENTRY_PENDING_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_MMU_ENTRY_PENDING_EN (0x00000001U) + + +/* + Register RGX_CR_MMU_PAGE_SIZE_RANGE_ONE +*/ +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE (0xE350U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT (38U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_SHIFT (19U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSIZE (2097152U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSIZE (2097152U) + + +/* + Register RGX_CR_MMU_PAGE_SIZE_RANGE_TWO +*/ +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO (0xE358U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_SHIFT (38U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_SHIFT (19U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_ALIGNSIZE (2097152U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_ALIGNSIZE (2097152U) + + +/* + Register RGX_CR_MMU_PAGE_SIZE_RANGE_THREE +*/ +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE (0xE360U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_PAGE_SIZE_SHIFT (38U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_SHIFT (19U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_ALIGNSIZE (2097152U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_ALIGNSIZE (2097152U) + + +/* + Register RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR +*/ +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR (0xE368U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_PAGE_SIZE_SHIFT (38U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_SHIFT (19U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_ALIGNSIZE (2097152U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_SHIFT (0U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000)) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_ALIGNSHIFT (21U) +#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_ALIGNSIZE (2097152U) + + +/* + Register RGX_CR_SLC_STATUS1 +*/ +#define RGX_CR_SLC_STATUS1 (0xE210U) +#define RGX_CR_SLC_STATUS1_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SLC_STATUS1_XBAR_CFI_TIMEOUTS_SHIFT (48U) +#define RGX_CR_SLC_STATUS1_XBAR_CFI_TIMEOUTS_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_SHIFT (36U) +#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFF000FFFFFFFFF)) +#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_SHIFT (24U) +#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF)) +#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_SHIFT (12U) +#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF)) +#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_SHIFT (0U) +#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000)) + + +/* + Register RGX_CR_SLC_STATUS2 +*/ +#define RGX_CR_SLC_STATUS2 (0xE218U) +#define RGX_CR_SLC_STATUS2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_SHIFT (48U) +#define RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_SHIFT (36U) +#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFF000FFFFFFFFF)) +#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_SHIFT (24U) +#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF)) +#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_SHIFT (12U) +#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF)) +#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_SHIFT (0U) +#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000)) + + +/* + Register RGX_CR_SLC_IDLE +*/ +#define RGX_CR_SLC_IDLE (0xE230U) +#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__MASKFULL (IMG_UINT64_C(0x0000000001FFFFFF)) +#define RGX_CR_SLC_IDLE_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF)) +#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__FBCDC_ARB_SHIFT (24U) +#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__FBCDC_ARB_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__FBCDC_ARB_EN (0x01000000U) +#define RGX_CR_SLC_IDLE_FBCDC_ARB_SHIFT (20U) +#define RGX_CR_SLC_IDLE_FBCDC_ARB_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_SLC_IDLE_FBCDC_ARB_EN (0x00100000U) +#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__OWDB_SHIFT (20U) +#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__OWDB_CLRMSK (0xFF0FFFFFU) +#define RGX_CR_SLC_IDLE_OWDB_SHIFT (16U) +#define RGX_CR_SLC_IDLE_OWDB_CLRMSK (0xFFF0FFFFU) +#define RGX_CR_SLC_IDLE_ACE_CLBS_SHIFT (16U) +#define RGX_CR_SLC_IDLE_ACE_CLBS_CLRMSK (0xFFF0FFFFU) +#define RGX_CR_SLC_IDLE_ACE_CONVERTERS_SHIFT (12U) +#define RGX_CR_SLC_IDLE_ACE_CONVERTERS_CLRMSK (0xFFFF0FFFU) +#define RGX_CR_SLC_IDLE_CACHE_BANKS_SHIFT (4U) +#define RGX_CR_SLC_IDLE_CACHE_BANKS_CLRMSK (0xFFFFF00FU) +#define RGX_CR_SLC_IDLE_MMU_SHIFT (3U) +#define RGX_CR_SLC_IDLE_MMU_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SLC_IDLE_MMU_EN (0x00000008U) +#define RGX_CR_SLC_IDLE_CCM_SHIFT (2U) +#define RGX_CR_SLC_IDLE_CCM_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SLC_IDLE_CCM_EN (0x00000004U) +#define RGX_CR_SLC_IDLE_RDI_SHIFT (1U) +#define RGX_CR_SLC_IDLE_RDI_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_SLC_IDLE_RDI_EN (0x00000002U) +#define RGX_CR_SLC_IDLE_XBAR_SHIFT (0U) +#define RGX_CR_SLC_IDLE_XBAR_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_SLC_IDLE_XBAR_EN (0x00000001U) + + +/* + Register RGX_CR_SLC_FAULT_STOP_STATUS +*/ +#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU (0x1E240U) +#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU__MASKFULL (IMG_UINT64_C(0x000000000001FFFF)) +#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU__BIF_SHIFT (0U) +#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU__BIF_CLRMSK (0xFFFE0000U) + + +/* + Register RGX_CR_SLC_FAULT_STOP_STATUS +*/ +#define RGX_CR_SLC_FAULT_STOP_STATUS (0xE240U) +#define RGX_CR_SLC_FAULT_STOP_STATUS_MASKFULL (IMG_UINT64_C(0x000000000001FFFF)) +#define RGX_CR_SLC_FAULT_STOP_STATUS_BIF_SHIFT (0U) +#define RGX_CR_SLC_FAULT_STOP_STATUS_BIF_CLRMSK (0xFFFE0000U) + + +/* + Register RGX_CR_SLC_STATUS_DEBUG +*/ +#define RGX_CR_SLC_STATUS_DEBUG__VPU (0x1E260U) +#define RGX_CR_SLC_STATUS_DEBUG__VPU__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_COH_REQ_SHIFT (16U) +#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_COH_REQ_CLRMSK (0x0000FFFFU) +#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_ADDR_ALIAS_SHIFT (0U) +#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_ADDR_ALIAS_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_SLC_STATUS_DEBUG +*/ +#define RGX_CR_SLC_STATUS_DEBUG (0xE260U) +#define RGX_CR_SLC_STATUS_DEBUG_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_SHIFT (16U) +#define RGX_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_CLRMSK (0x0000FFFFU) +#define RGX_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_SHIFT (0U) +#define RGX_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_HMMU_OSID_PAGE_SIZE +*/ +#define RGX_CR_HMMU_OSID_PAGE_SIZE (0x80000U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_MASKFULL (IMG_UINT64_C(0x0000000077777777)) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_7_SHIFT (28U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_7_CLRMSK (0x8FFFFFFFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_6_SHIFT (24U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_6_CLRMSK (0xF8FFFFFFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_5_SHIFT (20U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_5_CLRMSK (0xFF8FFFFFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_4_SHIFT (16U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_4_CLRMSK (0xFFF8FFFFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_3_SHIFT (12U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_3_CLRMSK (0xFFFF8FFFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_2_SHIFT (8U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_2_CLRMSK (0xFFFFF8FFU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_1_SHIFT (4U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_1_CLRMSK (0xFFFFFF8FU) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_0_SHIFT (0U) +#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_0_CLRMSK (0xFFFFFFF8U) + + +/* + Register RGX_CR_HMMU_BYPASS +*/ +#define RGX_CR_HMMU_BYPASS (0x80008U) +#define RGX_CR_HMMU_BYPASS_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) +#define RGX_CR_HMMU_BYPASS_EN_SHIFT (0U) +#define RGX_CR_HMMU_BYPASS_EN_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_HMMU_INVAL +*/ +#define RGX_CR_HMMU_INVAL (0x80010U) +#define RGX_CR_HMMU_INVAL_MASKFULL (IMG_UINT64_C(0x000000000000007F)) +#define RGX_CR_HMMU_INVAL_OS_ID_SHIFT (4U) +#define RGX_CR_HMMU_INVAL_OS_ID_CLRMSK (0xFFFFFF8FU) +#define RGX_CR_HMMU_INVAL_ALL_OS_IDS_SHIFT (3U) +#define RGX_CR_HMMU_INVAL_ALL_OS_IDS_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_HMMU_INVAL_ALL_OS_IDS_EN (0x00000008U) +#define RGX_CR_HMMU_INVAL_HPC_SHIFT (2U) +#define RGX_CR_HMMU_INVAL_HPC_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_HMMU_INVAL_HPC_EN (0x00000004U) +#define RGX_CR_HMMU_INVAL_HPD_SHIFT (1U) +#define RGX_CR_HMMU_INVAL_HPD_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_HMMU_INVAL_HPD_EN (0x00000002U) +#define RGX_CR_HMMU_INVAL_HPT_SHIFT (0U) +#define RGX_CR_HMMU_INVAL_HPT_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_HMMU_INVAL_HPT_EN (0x00000001U) + + +/* + Register RGX_CR_HMMU_HPC_BASE_MAPPING0 +*/ +#define RGX_CR_HMMU_HPC_BASE_MAPPING0 (0x80018U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR1_SHIFT (36U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR1_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID1_SHIFT (32U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID1_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR0_SHIFT (4U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR0_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID0_SHIFT (0U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID0_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_HMMU_HPC_BASE_MAPPING1 +*/ +#define RGX_CR_HMMU_HPC_BASE_MAPPING1 (0x80020U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR3_SHIFT (36U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR3_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID3_SHIFT (32U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID3_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR2_SHIFT (4U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR2_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID2_SHIFT (0U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID2_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_HMMU_HPC_BASE_MAPPING2 +*/ +#define RGX_CR_HMMU_HPC_BASE_MAPPING2 (0x80028U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR5_SHIFT (36U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR5_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID5_SHIFT (32U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID5_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR4_SHIFT (4U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR4_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID4_SHIFT (0U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID4_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_HMMU_HPC_BASE_MAPPING3 +*/ +#define RGX_CR_HMMU_HPC_BASE_MAPPING3 (0x80030U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR7_SHIFT (36U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR7_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID7_SHIFT (32U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID7_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR6_SHIFT (4U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR6_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID6_SHIFT (0U) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID6_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register group: RGX_CR_HMMU_PAGE_FAULT_INFO, with 8 repeats +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO_REPEATCOUNT (8U) +/* + Register RGX_CR_HMMU_PAGE_FAULT_INFO0 +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO0 (0x80038U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO0_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO0_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO0_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO0_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PAGE_FAULT_INFO1 +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO1 (0x80040U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO1_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO1_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO1_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PAGE_FAULT_INFO2 +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO2 (0x80048U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO2_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO2_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO2_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO2_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PAGE_FAULT_INFO3 +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO3 (0x80050U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO3_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO3_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO3_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO3_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PAGE_FAULT_INFO4 +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO4 (0x80058U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO4_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO4_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO4_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO4_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PAGE_FAULT_INFO5 +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO5 (0x80060U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO5_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO5_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO5_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO5_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PAGE_FAULT_INFO6 +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO6 (0x80068U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO6_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO6_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO6_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO6_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PAGE_FAULT_INFO7 +*/ +#define RGX_CR_HMMU_PAGE_FAULT_INFO7 (0x80070U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO7_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO7_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PAGE_FAULT_INFO7_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PAGE_FAULT_INFO7_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register group: RGX_CR_HMMU_PENDING_ENTRY_INFO, with 8 repeats +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO_REPEATCOUNT (8U) +/* + Register RGX_CR_HMMU_PENDING_ENTRY_INFO0 +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0 (0x800C0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PENDING_ENTRY_INFO1 +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1 (0x800C8U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PENDING_ENTRY_INFO2 +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2 (0x800D0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PENDING_ENTRY_INFO3 +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3 (0x800D8U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PENDING_ENTRY_INFO4 +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4 (0x800E0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PENDING_ENTRY_INFO5 +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5 (0x800E8U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PENDING_ENTRY_INFO6 +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6 (0x800F0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_PENDING_ENTRY_INFO7 +*/ +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7 (0x800F8U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_ADDR_SHIFT (2U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003)) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_LEVEL_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) + + +/* + Register RGX_CR_HMMU_HOST_IRQ_ENABLE +*/ +#define RGX_CR_HMMU_HOST_IRQ_ENABLE (0x80100U) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_SHIFT (3U) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_PM_SHIFT (2U) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_PM_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PAGE_FAULT_SHIFT (1U) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PAGE_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PAGE_FAULT_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PENDING_ENTRY_SHIFT (0U) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PENDING_ENTRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PENDING_ENTRY_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_HMMU_PENDING_ENTRY +*/ +#define RGX_CR_HMMU_PENDING_ENTRY (0x80108U) +#define RGX_CR_HMMU_PENDING_ENTRY_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_HMMU_PENDING_ENTRY_ENABLE_SHIFT (0U) +#define RGX_CR_HMMU_PENDING_ENTRY_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_HMMU_PENDING_ENTRY_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_HMMU_FAULT_STATUS +*/ +#define RGX_CR_HMMU_FAULT_STATUS (0x80120U) +#define RGX_CR_HMMU_FAULT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID7_SHIFT (31U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID7_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID6_SHIFT (30U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID6_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID5_SHIFT (29U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID5_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID4_SHIFT (28U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID4_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID3_SHIFT (27U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID3_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID2_SHIFT (26U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID2_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID1_SHIFT (25U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID1_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID0_SHIFT (24U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID0_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID7_SHIFT (23U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID7_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID6_SHIFT (22U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID6_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID5_SHIFT (21U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID5_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID4_SHIFT (20U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID4_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID3_SHIFT (19U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID3_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID2_SHIFT (18U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID2_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID1_SHIFT (17U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID1_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID0_SHIFT (16U) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID0_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID7_SHIFT (15U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID7_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID6_SHIFT (14U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID6_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID5_SHIFT (13U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID5_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID4_SHIFT (12U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID4_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID3_SHIFT (11U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID3_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID2_SHIFT (10U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID2_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID1_SHIFT (9U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID1_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID0_SHIFT (8U) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID0_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID7_SHIFT (7U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID7_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID6_SHIFT (6U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID6_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID5_SHIFT (5U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID5_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID4_SHIFT (4U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID4_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID3_SHIFT (3U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID3_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID2_SHIFT (2U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID2_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID1_SHIFT (1U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID1_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID0_SHIFT (0U) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID0_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register group: RGX_CR_HMMU_READONLY_FAULT_INFO, with 8 repeats +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO_REPEATCOUNT (8U) +/* + Register RGX_CR_HMMU_READONLY_FAULT_INFO0 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO0 (0x80190U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO0_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_INFO1 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO1 (0x80198U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO1_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_INFO2 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO2 (0x801A0U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO2_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_INFO3 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO3 (0x801A8U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO3_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_INFO4 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO4 (0x801B0U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO4_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_INFO5 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO5 (0x801B8U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO5_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_INFO6 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO6 (0x801C0U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO6_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_INFO7 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_INFO7 (0x801C8U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) +#define RGX_CR_HMMU_READONLY_FAULT_INFO7_ADDR_SHIFT (4U) +#define RGX_CR_HMMU_READONLY_FAULT_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) + + +/* + Register group: RGX_CR_HMMU_READONLY_FAULT_PM_INFO, with 8 repeats +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO_REPEATCOUNT (8U) +/* + Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO0 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0 (0x801D0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO1 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1 (0x801D8U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO2 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2 (0x801E0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO3 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3 (0x801E8U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO4 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4 (0x801F0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO5 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5 (0x801F8U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO6 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6 (0x80200U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) + + +/* + Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO7 +*/ +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7 (0x80208U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7_ADDR_SHIFT (0U) +#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) + + +/* + Register RGX_CR_ACE_CTRL +*/ +#define RGX_CR_ACE_CTRL__VPU (0x1E320U) +#define RGX_CR_ACE_CTRL__VPU__MASKFULL (IMG_UINT64_C(0x00000000007FCFFF)) +#define RGX_CR_ACE_CTRL__VPU__CLB_AXQOS_SHIFT (19U) +#define RGX_CR_ACE_CTRL__VPU__CLB_AXQOS_CLRMSK (0xFF87FFFFU) +#define RGX_CR_ACE_CTRL__VPU__PM_MMU_AXCACHE_SHIFT (15U) +#define RGX_CR_ACE_CTRL__VPU__PM_MMU_AXCACHE_CLRMSK (0xFFF87FFFU) +#define RGX_CR_ACE_CTRL__VPU__ENABLE_NONSECURE_PROT_MATCH_SHIFT (14U) +#define RGX_CR_ACE_CTRL__VPU__ENABLE_NONSECURE_PROT_MATCH_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_ACE_CTRL__VPU__ENABLE_NONSECURE_PROT_MATCH_EN (0x00004000U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_SHIFT (8U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_CLRMSK (0xFFFFF0FFU) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_DEVICE_NON_BUFFERABLE (0x00000000U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_DEVICE_BUFFERABLE (0x00000100U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000200U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_NORMAL_NC_BUFFERABLE (0x00000300U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_THROUGH_NO_ALLOCATE (0x00000600U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_THROUGH_WRITE_ALLOCATE (0x00000E00U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_BACK_NO_ALLOCATE (0x00000700U) +#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE (0x00000F00U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_SHIFT (4U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_CLRMSK (0xFFFFFF0FU) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_DEVICE_NON_BUFFERABLE (0x00000000U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_DEVICE_BUFFERABLE (0x00000010U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000020U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_NORMAL_NC_BUFFERABLE (0x00000030U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_THROUGH_NO_ALLOCATE (0x000000A0U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_THROUGH_READ_ALLOCATE (0x000000E0U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_BACK_NO_ALLOCATE (0x000000B0U) +#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE (0x000000F0U) +#define RGX_CR_ACE_CTRL__VPU__MMU_DOMAIN_SHIFT (2U) +#define RGX_CR_ACE_CTRL__VPU__MMU_DOMAIN_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_SHIFT (1U) +#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_INNER_SHAREABLE (0x00000000U) +#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_OUTER_SHAREABLE (0x00000002U) +#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_SHIFT (0U) +#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_NON_SHAREABLE (0x00000000U) +#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_SYSTEM (0x00000001U) + + +/* + Register RGX_CR_ACE_CTRL +*/ +#define RGX_CR_ACE_CTRL (0xE320U) +#define RGX_CR_ACE_CTRL_MASKFULL (IMG_UINT64_C(0x0000000000FFCFFF)) +#define RGX_CR_ACE_CTRL_DISABLE_EMPTY_BURST_REMOVAL_SHIFT (23U) +#define RGX_CR_ACE_CTRL_DISABLE_EMPTY_BURST_REMOVAL_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_ACE_CTRL_DISABLE_EMPTY_BURST_REMOVAL_EN (0x00800000U) +#define RGX_CR_ACE_CTRL_CLB_AXQOS_SHIFT (19U) +#define RGX_CR_ACE_CTRL_CLB_AXQOS_CLRMSK (0xFF87FFFFU) +#define RGX_CR_ACE_CTRL_PM_MMU_AXCACHE_SHIFT (15U) +#define RGX_CR_ACE_CTRL_PM_MMU_AXCACHE_CLRMSK (0xFFF87FFFU) +#define RGX_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_SHIFT (14U) +#define RGX_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_EN (0x00004000U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_SHIFT (8U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_CLRMSK (0xFFFFF0FFU) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_NON_BUFFERABLE (0x00000000U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_BUFFERABLE (0x00000100U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000200U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_BUFFERABLE (0x00000300U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_NO_ALLOCATE (0x00000600U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_WRITE_ALLOCATE (0x00000E00U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_NO_ALLOCATE (0x00000700U) +#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE (0x00000F00U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_SHIFT (4U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_CLRMSK (0xFFFFFF0FU) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_NON_BUFFERABLE (0x00000000U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_BUFFERABLE (0x00000010U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000020U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_BUFFERABLE (0x00000030U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_NO_ALLOCATE (0x000000A0U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_READ_ALLOCATE (0x000000E0U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_NO_ALLOCATE (0x000000B0U) +#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE (0x000000F0U) +#define RGX_CR_ACE_CTRL_MMU_DOMAIN_SHIFT (2U) +#define RGX_CR_ACE_CTRL_MMU_DOMAIN_CLRMSK (0xFFFFFFF3U) +#define RGX_CR_ACE_CTRL_COH_DOMAIN_SHIFT (1U) +#define RGX_CR_ACE_CTRL_COH_DOMAIN_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_ACE_CTRL_COH_DOMAIN_INNER_SHAREABLE (0x00000000U) +#define RGX_CR_ACE_CTRL_COH_DOMAIN_OUTER_SHAREABLE (0x00000002U) +#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_SHIFT (0U) +#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_NON_SHAREABLE (0x00000000U) +#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_SYSTEM (0x00000001U) + + +/* + Register RGX_CR_SOC_AXI +*/ +#define RGX_CR_SOC_AXI (0xE338U) +#define RGX_CR_SOC_AXI_MASKFULL (IMG_UINT64_C(0x000000000000000F)) +#define RGX_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_SHIFT (3U) +#define RGX_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_EN (0x00000008U) +#define RGX_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_SHIFT (2U) +#define RGX_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_EN (0x00000004U) +#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_SHIFT (0U) +#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_CLRMSK (0xFFFFFFFCU) +#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY (0x00000000U) +#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY (0x00000001U) +#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY (0x00000002U) + + +/* + Register RGX_CR_CONTEXT_MAPPING0 +*/ +#define RGX_CR_CONTEXT_MAPPING0 (0xF078U) +#define RGX_CR_CONTEXT_MAPPING0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING0_2D_SHIFT (24U) +#define RGX_CR_CONTEXT_MAPPING0_2D_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING0_CDM_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING0_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) +#define RGX_CR_CONTEXT_MAPPING0_3D_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING0_3D_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_CR_CONTEXT_MAPPING0_GEOM_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING0_GEOM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_CONTEXT_MAPPING2 +*/ +#define RGX_CR_CONTEXT_MAPPING2 (0xF088U) +#define RGX_CR_CONTEXT_MAPPING2_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING2_ALIST0_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK (0xFF00FFFFU) +#define RGX_CR_CONTEXT_MAPPING2_TE0_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING2_TE0_CLRMSK (0xFFFF00FFU) +#define RGX_CR_CONTEXT_MAPPING2_VCE0_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING2_VCE0_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_CONTEXT_MAPPING3 +*/ +#define RGX_CR_CONTEXT_MAPPING3 (0xF090U) +#define RGX_CR_CONTEXT_MAPPING3_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING3_ALIST1_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK (0xFF00FFFFU) +#define RGX_CR_CONTEXT_MAPPING3_TE1_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING3_TE1_CLRMSK (0xFFFF00FFU) +#define RGX_CR_CONTEXT_MAPPING3_VCE1_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING3_VCE1_CLRMSK (0xFFFFFF00U) + + +/* + Register RGX_CR_BIF_OUTSTANDING_READ +*/ +#define RGX_CR_BIF_OUTSTANDING_READ (0xF098U) +#define RGX_CR_BIF_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_BIF_OUTSTANDING_READ_COUNTER_SHIFT (0U) +#define RGX_CR_BIF_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_BIF_TEXAS1_OUTSTANDING_READ +*/ +#define RGX_CR_BIF_TEXAS1_OUTSTANDING_READ (0xF0A0U) +#define RGX_CR_BIF_TEXAS1_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_BIF_TEXAS1_OUTSTANDING_READ_COUNTER_SHIFT (0U) +#define RGX_CR_BIF_TEXAS1_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_BIF_TEXAS0_OUTSTANDING_READ +*/ +#define RGX_CR_BIF_TEXAS0_OUTSTANDING_READ (0xF0A8U) +#define RGX_CR_BIF_TEXAS0_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_BIF_TEXAS0_OUTSTANDING_READ_COUNTER_SHIFT (0U) +#define RGX_CR_BIF_TEXAS0_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_BIF_PFS +*/ +#define RGX_CR_BIF_PFS (0xF0B0U) +#define RGX_CR_BIF_PFS_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) +#define RGX_CR_BIF_PFS_SLC_STALLING_SHIFT (8U) +#define RGX_CR_BIF_PFS_SLC_STALLING_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_BIF_PFS_SLC_STALLING_EN (0x00000100U) +#define RGX_CR_BIF_PFS_TILING_IDLE_SHIFT (7U) +#define RGX_CR_BIF_PFS_TILING_IDLE_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_BIF_PFS_TILING_IDLE_EN (0x00000080U) +#define RGX_CR_BIF_PFS_ARB_IDLE_SHIFT (6U) +#define RGX_CR_BIF_PFS_ARB_IDLE_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_BIF_PFS_ARB_IDLE_EN (0x00000040U) +#define RGX_CR_BIF_PFS_RTN_MEM_IF_IDLE_SHIFT (5U) +#define RGX_CR_BIF_PFS_RTN_MEM_IF_IDLE_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_BIF_PFS_RTN_MEM_IF_IDLE_EN (0x00000020U) +#define RGX_CR_BIF_PFS_NONMETA_REQ_COMPLETE_SHIFT (4U) +#define RGX_CR_BIF_PFS_NONMETA_REQ_COMPLETE_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BIF_PFS_NONMETA_REQ_COMPLETE_EN (0x00000010U) +#define RGX_CR_BIF_PFS_NONMETA_WDATA_COMPLETE_SHIFT (3U) +#define RGX_CR_BIF_PFS_NONMETA_WDATA_COMPLETE_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_BIF_PFS_NONMETA_WDATA_COMPLETE_EN (0x00000008U) +#define RGX_CR_BIF_PFS_WDATA_IP_IDLE_SHIFT (2U) +#define RGX_CR_BIF_PFS_WDATA_IP_IDLE_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_PFS_WDATA_IP_IDLE_EN (0x00000004U) +#define RGX_CR_BIF_PFS_REQUEST_IP_IDLE_SHIFT (1U) +#define RGX_CR_BIF_PFS_REQUEST_IP_IDLE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_PFS_REQUEST_IP_IDLE_EN (0x00000002U) +#define RGX_CR_BIF_PFS_STALL_COMPLETE_SHIFT (0U) +#define RGX_CR_BIF_PFS_STALL_COMPLETE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_PFS_STALL_COMPLETE_EN (0x00000001U) + + +/* + Register RGX_CR_BIF_TEXAS0_PFS +*/ +#define RGX_CR_BIF_TEXAS0_PFS (0xF0B8U) +#define RGX_CR_BIF_TEXAS0_PFS_MASKFULL (IMG_UINT64_C(0x000000000000007F)) +#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_INFLIGHT_SHIFT (6U) +#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_INFLIGHT_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_INFLIGHT_EN (0x00000040U) +#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_IDLE_SHIFT (5U) +#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_IDLE_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_IDLE_EN (0x00000020U) +#define RGX_CR_BIF_TEXAS0_PFS_SLC_STALLING_SHIFT (4U) +#define RGX_CR_BIF_TEXAS0_PFS_SLC_STALLING_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BIF_TEXAS0_PFS_SLC_STALLING_EN (0x00000010U) +#define RGX_CR_BIF_TEXAS0_PFS_TILING_IDLE_SHIFT (3U) +#define RGX_CR_BIF_TEXAS0_PFS_TILING_IDLE_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_BIF_TEXAS0_PFS_TILING_IDLE_EN (0x00000008U) +#define RGX_CR_BIF_TEXAS0_PFS_ARB_IDLE_SHIFT (2U) +#define RGX_CR_BIF_TEXAS0_PFS_ARB_IDLE_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_TEXAS0_PFS_ARB_IDLE_EN (0x00000004U) +#define RGX_CR_BIF_TEXAS0_PFS_RTN_MEM_IF_IDLE_SHIFT (1U) +#define RGX_CR_BIF_TEXAS0_PFS_RTN_MEM_IF_IDLE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_TEXAS0_PFS_RTN_MEM_IF_IDLE_EN (0x00000002U) +#define RGX_CR_BIF_TEXAS0_PFS_STALL_COMPLETE_SHIFT (0U) +#define RGX_CR_BIF_TEXAS0_PFS_STALL_COMPLETE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_TEXAS0_PFS_STALL_COMPLETE_EN (0x00000001U) + + +/* + Register RGX_CR_BIF_TEXAS1_PFS +*/ +#define RGX_CR_BIF_TEXAS1_PFS (0xF0C8U) +#define RGX_CR_BIF_TEXAS1_PFS_MASKFULL (IMG_UINT64_C(0x000000000000007F)) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_PDSRW_SHIFT (6U) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_PDSRW_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_PDSRW_EN (0x00000040U) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_MCU_SHIFT (5U) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_MCU_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_MCU_EN (0x00000020U) +#define RGX_CR_BIF_TEXAS1_PFS_SLC_STALLING_SHIFT (4U) +#define RGX_CR_BIF_TEXAS1_PFS_SLC_STALLING_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_BIF_TEXAS1_PFS_SLC_STALLING_EN (0x00000010U) +#define RGX_CR_BIF_TEXAS1_PFS_TILING_IDLE_SHIFT (3U) +#define RGX_CR_BIF_TEXAS1_PFS_TILING_IDLE_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_BIF_TEXAS1_PFS_TILING_IDLE_EN (0x00000008U) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_IDLE_SHIFT (2U) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_IDLE_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_BIF_TEXAS1_PFS_ARB_IDLE_EN (0x00000004U) +#define RGX_CR_BIF_TEXAS1_PFS_RTN_MEM_IF_IDLE_SHIFT (1U) +#define RGX_CR_BIF_TEXAS1_PFS_RTN_MEM_IF_IDLE_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_BIF_TEXAS1_PFS_RTN_MEM_IF_IDLE_EN (0x00000002U) +#define RGX_CR_BIF_TEXAS1_PFS_STALL_COMPLETE_SHIFT (0U) +#define RGX_CR_BIF_TEXAS1_PFS_STALL_COMPLETE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_BIF_TEXAS1_PFS_STALL_COMPLETE_EN (0x00000001U) + + +/* + Register RGX_CR_JONES_FIX +*/ +#define RGX_CR_JONES_FIX__ROGUE3 (0xF0C0U) +#define RGX_CR_JONES_FIX__ROGUE3__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) +#define RGX_CR_JONES_FIX__ROGUE3__DISABLE_SHIFT (0U) +#define RGX_CR_JONES_FIX__ROGUE3__DISABLE_CLRMSK (0xFFFF0000U) + + +/* + Register RGX_CR_FBCDC_STATUS +*/ +#define RGX_CR_FBCDC_STATUS (0xF600U) +#define RGX_CR_FBCDC_STATUS_MASKFULL (IMG_UINT64_C(0x000000000F0F0F0F)) +#define RGX_CR_FBCDC_STATUS_STATE_CHECK_FAIL_SHIFT (24U) +#define RGX_CR_FBCDC_STATUS_STATE_CHECK_FAIL_CLRMSK (0xF0FFFFFFU) +#define RGX_CR_FBCDC_STATUS_UFOD_DECODER_ERROR_SHIFT (16U) +#define RGX_CR_FBCDC_STATUS_UFOD_DECODER_ERROR_CLRMSK (0xFFF0FFFFU) +#define RGX_CR_FBCDC_STATUS_HEADER_CHECK_FAIL_SHIFT (8U) +#define RGX_CR_FBCDC_STATUS_HEADER_CHECK_FAIL_CLRMSK (0xFFFFF0FFU) +#define RGX_CR_FBCDC_STATUS_TILE_LENGTH_CHECK_FAIL_SHIFT (0U) +#define RGX_CR_FBCDC_STATUS_TILE_LENGTH_CHECK_FAIL_CLRMSK (0xFFFFFFF0U) + + +/* + Register RGX_CR_CONTEXT_MAPPING4 +*/ +#define RGX_CR_CONTEXT_MAPPING4 (0xF210U) +#define RGX_CR_CONTEXT_MAPPING4_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT (40U) +#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT (32U) +#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT (24U) +#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) +#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT (16U) +#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) +#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT (8U) +#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) +#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT (0U) +#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_CR_FBCDC_IDLE +*/ +#define RGX_CR_FBCDC_IDLE (0xF218U) +#define RGX_CR_FBCDC_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000003FFF)) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF3_SHIFT (13U) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF3_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF3_EN (0x00002000U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF3_SHIFT (12U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF3_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF3_EN (0x00001000U) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF3_SHIFT (11U) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF3_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF3_EN (0x00000800U) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF2_SHIFT (10U) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF2_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF2_EN (0x00000400U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF2_SHIFT (9U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF2_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF2_EN (0x00000200U) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF2_SHIFT (8U) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF2_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF2_EN (0x00000100U) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF1_SHIFT (7U) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF1_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF1_EN (0x00000080U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF1_SHIFT (6U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF1_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF1_EN (0x00000040U) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF1_SHIFT (5U) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF1_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF1_EN (0x00000020U) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF0_SHIFT (4U) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF0_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_FBCDC_IDLE_FBC_MEMIF0_EN (0x00000010U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF0_SHIFT (3U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF0_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF0_EN (0x00000008U) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF0_SHIFT (2U) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF0_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_FBCDC_IDLE_FBM_MEMIF0_EN (0x00000004U) +#define RGX_CR_FBCDC_IDLE_FBHC_SHIFT (1U) +#define RGX_CR_FBCDC_IDLE_FBHC_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_FBCDC_IDLE_FBHC_EN (0x00000002U) +#define RGX_CR_FBCDC_IDLE_FBSC_SHIFT (0U) +#define RGX_CR_FBCDC_IDLE_FBSC_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FBCDC_IDLE_FBSC_EN (0x00000001U) + + +/* + Register RGX_CR_MERCER_SOFT_RESET +*/ +#define RGX_CR_MERCER_SOFT_RESET (0x0630U) +#define RGX_CR_MERCER_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_SHIFT (62U) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_SHIFT (61U) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_SHIFT (60U) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_SHIFT (59U) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_EN (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_SHIFT (58U) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_EN (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_SHIFT (57U) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_EN (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_SHIFT (56U) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_EN (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_SHIFT (55U) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_EN (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_SHIFT (54U) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_EN (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_SHIFT (53U) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_EN (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_SHIFT (52U) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_EN (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_SHIFT (51U) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_EN (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_SHIFT (50U) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_SHIFT (49U) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_EN (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_SHIFT (48U) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_EN (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_SHIFT (47U) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_EN (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_SHIFT (46U) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_EN (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_SHIFT (45U) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_SHIFT (44U) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_EN (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_SHIFT (43U) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_EN (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_SHIFT (42U) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_SHIFT (41U) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_SHIFT (40U) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_SHIFT (39U) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_EN (IMG_UINT64_C(0x0000008000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_SHIFT (38U) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_SHIFT (37U) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_SHIFT (36U) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_SHIFT (35U) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_SHIFT (34U) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_SHIFT (33U) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_SHIFT (32U) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_SHIFT (31U) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_SHIFT (30U) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_SHIFT (29U) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_SHIFT (28U) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_SHIFT (27U) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_SHIFT (26U) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_SHIFT (25U) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_SHIFT (24U) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_SHIFT (23U) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_SHIFT (22U) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_SHIFT (21U) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_SHIFT (20U) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_SHIFT (19U) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_SHIFT (18U) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_SHIFT (17U) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_SHIFT (16U) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_SHIFT (15U) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_SHIFT (14U) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_SHIFT (13U) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_SHIFT (12U) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_SHIFT (11U) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_SHIFT (10U) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_SHIFT (9U) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_SHIFT (8U) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_SHIFT (7U) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_SHIFT (6U) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_SHIFT (5U) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_SHIFT (4U) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_SHIFT (3U) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_SHIFT (2U) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_SHIFT (1U) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_SHIFT (0U) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_TEXAS_SOFT_RESET +*/ +#define RGX_CR_TEXAS_SOFT_RESET (0x0640U) +#define RGX_CR_TEXAS_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_TEXAS_SOFT_RESET_SPU31_SHIFT (31U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU31_CLRMSK (0x7FFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU31_EN (0x80000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU30_SHIFT (30U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU30_CLRMSK (0xBFFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU30_EN (0x40000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU29_SHIFT (29U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU29_CLRMSK (0xDFFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU29_EN (0x20000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU28_SHIFT (28U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU28_CLRMSK (0xEFFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU28_EN (0x10000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU27_SHIFT (27U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU27_CLRMSK (0xF7FFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU27_EN (0x08000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU26_SHIFT (26U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU26_CLRMSK (0xFBFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU26_EN (0x04000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU25_SHIFT (25U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU25_CLRMSK (0xFDFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU25_EN (0x02000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU24_SHIFT (24U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU24_CLRMSK (0xFEFFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU24_EN (0x01000000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU23_SHIFT (23U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU23_CLRMSK (0xFF7FFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU23_EN (0x00800000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU22_SHIFT (22U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU22_CLRMSK (0xFFBFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU22_EN (0x00400000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU21_SHIFT (21U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU21_CLRMSK (0xFFDFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU21_EN (0x00200000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU20_SHIFT (20U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU20_CLRMSK (0xFFEFFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU20_EN (0x00100000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU19_SHIFT (19U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU19_CLRMSK (0xFFF7FFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU19_EN (0x00080000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU18_SHIFT (18U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU18_CLRMSK (0xFFFBFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU18_EN (0x00040000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU17_SHIFT (17U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU17_CLRMSK (0xFFFDFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU17_EN (0x00020000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU16_SHIFT (16U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU16_CLRMSK (0xFFFEFFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU16_EN (0x00010000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU15_SHIFT (15U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU15_CLRMSK (0xFFFF7FFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU15_EN (0x00008000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU14_SHIFT (14U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU14_CLRMSK (0xFFFFBFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU14_EN (0x00004000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU13_SHIFT (13U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU13_CLRMSK (0xFFFFDFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU13_EN (0x00002000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU12_SHIFT (12U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU12_CLRMSK (0xFFFFEFFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU12_EN (0x00001000U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU11_SHIFT (11U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU11_CLRMSK (0xFFFFF7FFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU11_EN (0x00000800U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU10_SHIFT (10U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU10_CLRMSK (0xFFFFFBFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU10_EN (0x00000400U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU9_SHIFT (9U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU9_CLRMSK (0xFFFFFDFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU9_EN (0x00000200U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU8_SHIFT (8U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU8_CLRMSK (0xFFFFFEFFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU8_EN (0x00000100U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU7_SHIFT (7U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU7_CLRMSK (0xFFFFFF7FU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU7_EN (0x00000080U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU6_SHIFT (6U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU6_CLRMSK (0xFFFFFFBFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU6_EN (0x00000040U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU5_SHIFT (5U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU5_CLRMSK (0xFFFFFFDFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU5_EN (0x00000020U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU4_SHIFT (4U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU4_CLRMSK (0xFFFFFFEFU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU4_EN (0x00000010U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU3_SHIFT (3U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU3_CLRMSK (0xFFFFFFF7U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU3_EN (0x00000008U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU2_SHIFT (2U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU2_CLRMSK (0xFFFFFFFBU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU2_EN (0x00000004U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU1_SHIFT (1U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU1_CLRMSK (0xFFFFFFFDU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU1_EN (0x00000002U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU0_SHIFT (0U) +#define RGX_CR_TEXAS_SOFT_RESET_SPU0_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_TEXAS_SOFT_RESET_SPU0_EN (0x00000001U) + + +/* + Register RGX_CR_SWIFT_SOFT_RESET +*/ +#define RGX_CR_SWIFT_SOFT_RESET (0x0650U) +#define RGX_CR_SWIFT_SOFT_RESET__ALRIF_GT0__MASKFULL (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT2_SHIFT (62U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT2_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT2_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT1_SHIFT (61U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT1_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT1_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT0_SHIFT (60U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT0_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT0_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT2_SHIFT (59U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT2_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT2_EN (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT1_SHIFT (58U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT1_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT1_EN (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT0_SHIFT (57U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT0_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT0_EN (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT2_SHIFT (56U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT2_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT2_EN (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT1_SHIFT (55U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT1_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT1_EN (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT0_SHIFT (54U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT0_EN (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT2_SHIFT (53U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT2_EN (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT1_SHIFT (52U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT1_EN (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT0_SHIFT (51U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT0_EN (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT2_SHIFT (50U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT2_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT1_SHIFT (49U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT1_EN (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT0_SHIFT (48U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT0_EN (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT2_SHIFT (47U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT2_EN (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT1_SHIFT (46U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT1_EN (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT0_SHIFT (45U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT0_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT2_SHIFT (44U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT2_EN (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT1_SHIFT (43U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT1_EN (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT0_SHIFT (42U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT0_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT2_SHIFT (41U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT2_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT1_SHIFT (40U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT1_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT0_SHIFT (39U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT0_EN (IMG_UINT64_C(0x0000008000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT2_SHIFT (38U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT2_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT1_SHIFT (37U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT1_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT0_SHIFT (36U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT0_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT2_SHIFT (35U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT2_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT1_SHIFT (34U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT1_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT0_SHIFT (33U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT0_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT2_SHIFT (32U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT2_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU31_SHIFT (31U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU31_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU31_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT1_SHIFT (31U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT1_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU30_SHIFT (30U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU30_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU30_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT0_SHIFT (30U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT0_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU29_SHIFT (29U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU29_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU29_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT2_SHIFT (29U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT2_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU28_SHIFT (28U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU28_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU28_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT1_SHIFT (28U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT1_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU27_SHIFT (27U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU27_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU27_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT0_SHIFT (27U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT0_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU26_SHIFT (26U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU26_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU26_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT2_SHIFT (26U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT2_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU25_SHIFT (25U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU25_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU25_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT1_SHIFT (25U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT1_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU24_SHIFT (24U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU24_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU24_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT0_SHIFT (24U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT0_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU23_SHIFT (23U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU23_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU23_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT2_SHIFT (23U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT2_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU22_SHIFT (22U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU22_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU22_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT1_SHIFT (22U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT1_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU21_SHIFT (21U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU21_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU21_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT0_SHIFT (21U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT0_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SHIFT (20U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU20_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT2_SHIFT (20U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT2_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SHIFT (19U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU19_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT1_SHIFT (19U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT1_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SHIFT (18U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU18_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT0_SHIFT (18U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT0_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SHIFT (17U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU17_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT2_SHIFT (17U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT2_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SHIFT (16U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU16_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT1_SHIFT (16U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT1_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SHIFT (15U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU15_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT0_SHIFT (15U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT0_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SHIFT (14U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU14_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT2_SHIFT (14U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT2_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SHIFT (13U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU13_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT1_SHIFT (13U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT1_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SHIFT (12U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU12_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT0_SHIFT (12U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT0_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SHIFT (11U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU11_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT2_SHIFT (11U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT2_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SHIFT (10U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU10_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT1_SHIFT (10U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT1_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SHIFT (9U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU9_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT0_SHIFT (9U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT0_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SHIFT (8U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU8_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT2_SHIFT (8U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT2_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SHIFT (7U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU7_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT1_SHIFT (7U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT1_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SHIFT (6U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU6_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT0_SHIFT (6U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT0_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SHIFT (5U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU5_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT2_SHIFT (5U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT2_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SHIFT (4U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU4_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT1_SHIFT (4U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT1_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SHIFT (3U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU3_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT0_SHIFT (3U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT0_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SHIFT (2U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU2_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT2_SHIFT (2U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT2_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SHIFT (1U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU1_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT1_SHIFT (1U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT1_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SHIFT (0U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_EN (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT0_SHIFT (0U) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT0_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_RAC_SOFT_RESET +*/ +#define RGX_CR_RAC_SOFT_RESET (0x0660U) +#define RGX_CR_RAC_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC2_SHIFT (62U) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC2_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC2_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC1_SHIFT (61U) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC1_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC1_EN (IMG_UINT64_C(0x2000000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC0_SHIFT (60U) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC0_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC0_EN (IMG_UINT64_C(0x1000000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC2_SHIFT (59U) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC2_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC2_EN (IMG_UINT64_C(0x0800000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC1_SHIFT (58U) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC1_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC1_EN (IMG_UINT64_C(0x0400000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC0_SHIFT (57U) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC0_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC0_EN (IMG_UINT64_C(0x0200000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC2_SHIFT (56U) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC2_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC2_EN (IMG_UINT64_C(0x0100000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC1_SHIFT (55U) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC1_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC1_EN (IMG_UINT64_C(0x0080000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC0_SHIFT (54U) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC0_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC0_EN (IMG_UINT64_C(0x0040000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC2_SHIFT (53U) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC2_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC2_EN (IMG_UINT64_C(0x0020000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC1_SHIFT (52U) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC1_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC1_EN (IMG_UINT64_C(0x0010000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC0_SHIFT (51U) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC0_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC0_EN (IMG_UINT64_C(0x0008000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC2_SHIFT (50U) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC2_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC2_EN (IMG_UINT64_C(0x0004000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC1_SHIFT (49U) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC1_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC1_EN (IMG_UINT64_C(0x0002000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC0_SHIFT (48U) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC0_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC0_EN (IMG_UINT64_C(0x0001000000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC2_SHIFT (47U) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC2_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC2_EN (IMG_UINT64_C(0x0000800000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC1_SHIFT (46U) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC1_EN (IMG_UINT64_C(0x0000400000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC0_SHIFT (45U) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC0_EN (IMG_UINT64_C(0x0000200000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC2_SHIFT (44U) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC2_EN (IMG_UINT64_C(0x0000100000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC1_SHIFT (43U) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC1_EN (IMG_UINT64_C(0x0000080000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC0_SHIFT (42U) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC0_EN (IMG_UINT64_C(0x0000040000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC2_SHIFT (41U) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC2_EN (IMG_UINT64_C(0x0000020000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC1_SHIFT (40U) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC1_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC0_SHIFT (39U) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC0_EN (IMG_UINT64_C(0x0000008000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC2_SHIFT (38U) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC2_EN (IMG_UINT64_C(0x0000004000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC1_SHIFT (37U) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC1_EN (IMG_UINT64_C(0x0000002000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC0_SHIFT (36U) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC0_EN (IMG_UINT64_C(0x0000001000000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC2_SHIFT (35U) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC2_EN (IMG_UINT64_C(0x0000000800000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC1_SHIFT (34U) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC1_EN (IMG_UINT64_C(0x0000000400000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC0_SHIFT (33U) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC0_EN (IMG_UINT64_C(0x0000000200000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC2_SHIFT (32U) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC2_EN (IMG_UINT64_C(0x0000000100000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC1_SHIFT (31U) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC1_EN (IMG_UINT64_C(0x0000000080000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC0_SHIFT (30U) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC0_EN (IMG_UINT64_C(0x0000000040000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC2_SHIFT (29U) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC2_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC1_SHIFT (28U) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC1_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC0_SHIFT (27U) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC0_EN (IMG_UINT64_C(0x0000000008000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC2_SHIFT (26U) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC2_EN (IMG_UINT64_C(0x0000000004000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC1_SHIFT (25U) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC1_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC0_SHIFT (24U) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC0_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC2_SHIFT (23U) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC2_EN (IMG_UINT64_C(0x0000000000800000)) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC1_SHIFT (22U) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC1_EN (IMG_UINT64_C(0x0000000000400000)) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC0_SHIFT (21U) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC0_EN (IMG_UINT64_C(0x0000000000200000)) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC2_SHIFT (20U) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC2_EN (IMG_UINT64_C(0x0000000000100000)) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC1_SHIFT (19U) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC1_EN (IMG_UINT64_C(0x0000000000080000)) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC0_SHIFT (18U) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC0_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC2_SHIFT (17U) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC2_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC1_SHIFT (16U) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC1_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC0_SHIFT (15U) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC0_EN (IMG_UINT64_C(0x0000000000008000)) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC2_SHIFT (14U) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC2_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC1_SHIFT (13U) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC1_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC0_SHIFT (12U) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC0_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC2_SHIFT (11U) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC2_EN (IMG_UINT64_C(0x0000000000000800)) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC1_SHIFT (10U) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC1_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC0_SHIFT (9U) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC0_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC2_SHIFT (8U) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC2_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC1_SHIFT (7U) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC1_EN (IMG_UINT64_C(0x0000000000000080)) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC0_SHIFT (6U) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) +#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC0_EN (IMG_UINT64_C(0x0000000000000040)) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC2_SHIFT (5U) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC2_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC1_SHIFT (4U) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC1_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC0_SHIFT (3U) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) +#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC0_EN (IMG_UINT64_C(0x0000000000000008)) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC2_SHIFT (2U) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC2_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC1_SHIFT (1U) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC1_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC0_SHIFT (0U) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC0_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_CR_FWCORE_WDT_RESET +*/ +#define RGX_CR_FWCORE_WDT_RESET (0x4500U) +#define RGX_CR_FWCORE_WDT_RESET_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_CR_FWCORE_WDT_RESET_EN_SHIFT (0U) +#define RGX_CR_FWCORE_WDT_RESET_EN_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_WDT_RESET_EN_EN (0x00000001U) + + +/* + Register RGX_CR_FWCORE_WDT_CTRL +*/ +#define RGX_CR_FWCORE_WDT_CTRL (0x4508U) +#define RGX_CR_FWCORE_WDT_CTRL_MASKFULL (IMG_UINT64_C(0x00000000FFFF1F01)) +#define RGX_CR_FWCORE_WDT_CTRL_PROT_SHIFT (16U) +#define RGX_CR_FWCORE_WDT_CTRL_PROT_CLRMSK (0x0000FFFFU) +#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_SHIFT (8U) +#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_CLRMSK (0xFFFFE0FFU) +#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_SHIFT (0U) +#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_EN (0x00000001U) + + +/* + Register RGX_CR_FWCORE_WDT_COUNT +*/ +#define RGX_CR_FWCORE_WDT_COUNT (0x4510U) +#define RGX_CR_FWCORE_WDT_COUNT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_CR_FWCORE_WDT_COUNT_VALUE_SHIFT (0U) +#define RGX_CR_FWCORE_WDT_COUNT_VALUE_CLRMSK (0x00000000U) + + +#endif /* RGX_CR_DEFS_KM_H */ +/***************************************************************************** + End of file (rgx_cr_defs_km.h) +*****************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/km/rgxdefs_km.h b/drivers/gpu/drm/phytium/octopus/km/rgxdefs_km.h new file mode 100644 index 000000000000..ca3b33718ffb --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/km/rgxdefs_km.h @@ -0,0 +1,386 @@ +/*************************************************************************/ /*! +@Title Rogue hw definitions (kernel mode) +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXDEFS_KM_H +#define RGXDEFS_KM_H + +#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) +#include RGX_BVNC_CORE_KM_HEADER +#include RGX_BNC_CONFIG_KM_HEADER +#endif + +#define IMG_EXPLICIT_INCLUDE_HWDEFS +#if defined(__KERNEL__) +#include "rgx_cr_defs_km.h" +#endif +#undef IMG_EXPLICIT_INCLUDE_HWDEFS + +#include "rgx_heap_firmware.h" + +/* The following Macros are picked up through BVNC headers for no hardware + * operations to be compatible with old build infrastructure. + */ +#if defined(NO_HARDWARE) +/****************************************************************************** + * Check for valid B.X.N.C + *****************************************************************************/ +#if !defined(RGX_BVNC_KM_B) || !defined(RGX_BVNC_KM_V) || !defined(RGX_BVNC_KM_N) || !defined(RGX_BVNC_KM_C) +#error "Need to specify BVNC (RGX_BVNC_KM_B, RGX_BVNC_KM_V, RGX_BVNC_KM_N and RGX_BVNC_C)" +#endif + +/* Check core/config compatibility */ +#if (RGX_BVNC_KM_B != RGX_BNC_KM_B) || (RGX_BVNC_KM_N != RGX_BNC_KM_N) || (RGX_BVNC_KM_C != RGX_BNC_KM_C) +#error "BVNC headers are mismatching (KM core/config)" +#endif +#endif + +/****************************************************************************** + * RGX Version name + *****************************************************************************/ +#define RGX_BVNC_KM_ST2(S) #S +#define RGX_BVNC_KM_ST(S) RGX_BVNC_KM_ST2(S) +#define RGX_BVNC_KM RGX_BVNC_KM_ST(RGX_BVNC_KM_B) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_V) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_N) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_C) +#define RGX_BVNC_KM_V_ST RGX_BVNC_KM_ST(RGX_BVNC_KM_V) + +/* Maximum string size is [bb.vvvp.nnnn.cccc\0], includes null char */ +#define RGX_BVNC_STR_SIZE_MAX (2+1+4+1+4+1+4+1) +#define RGX_BVNC_STR_FMTSPEC "%u.%u.%u.%u" +#define RGX_BVNC_STRP_FMTSPEC "%u.%up.%u.%u" + + +/****************************************************************************** + * RGX Defines + *****************************************************************************/ + +#define BVNC_FIELD_MASK ((1 << BVNC_FIELD_WIDTH) - 1) +#define C_POSITION (0) +#define N_POSITION ((C_POSITION) + (BVNC_FIELD_WIDTH)) +#define V_POSITION ((N_POSITION) + (BVNC_FIELD_WIDTH)) +#define B_POSITION ((V_POSITION) + (BVNC_FIELD_WIDTH)) + +#define B_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (B_POSITION))) +#define V_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (V_POSITION))) +#define N_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (N_POSITION))) +#define C_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (C_POSITION))) + +#define GET_B(x) (((x) & (B_POSTION_MASK)) >> (B_POSITION)) +#define GET_V(x) (((x) & (V_POSTION_MASK)) >> (V_POSITION)) +#define GET_N(x) (((x) & (N_POSTION_MASK)) >> (N_POSITION)) +#define GET_C(x) (((x) & (C_POSTION_MASK)) >> (C_POSITION)) + +#define BVNC_PACK(B,V,N,C) ((((IMG_UINT64)B)) << (B_POSITION) | \ + (((IMG_UINT64)V)) << (V_POSITION) | \ + (((IMG_UINT64)N)) << (N_POSITION) | \ + (((IMG_UINT64)C)) << (C_POSITION) \ + ) + +#define RGX_CR_CORE_ID_CONFIG_N_SHIFT (8U) +#define RGX_CR_CORE_ID_CONFIG_C_SHIFT (0U) + +#define RGX_CR_CORE_ID_CONFIG_N_CLRMSK (0XFFFF00FFU) +#define RGX_CR_CORE_ID_CONFIG_C_CLRMSK (0XFFFFFF00U) + +/* The default number of OSID is 1, higher number implies VZ enabled firmware */ +#if !defined(RGXFW_NATIVE) && defined(PVRSRV_VZ_NUM_OSID) && (PVRSRV_VZ_NUM_OSID + 1U > 1U) +#define RGXFW_NUM_OS PVRSRV_VZ_NUM_OSID +#else +#define RGXFW_NUM_OS 1U +#endif + +#define RGXFW_MAX_NUM_OS (8U) +#define RGXFW_HOST_OS (0U) +#define RGXFW_GUEST_OSID_START (1U) + +#define RGXFW_THREAD_0 (0U) +#define RGXFW_THREAD_1 (1U) + +/* META cores (required for the RGX_FEATURE_META) */ +#define MTP218 (1) +#define MTP219 (2) +#define LTP218 (3) +#define LTP217 (4) + +/* META Core memory feature depending on META variants */ +#define RGX_META_COREMEM_32K (32*1024) +#define RGX_META_COREMEM_48K (48*1024) +#define RGX_META_COREMEM_64K (64*1024) +#define RGX_META_COREMEM_96K (96*1024) +#define RGX_META_COREMEM_128K (128*1024) +#define RGX_META_COREMEM_256K (256*1024) + +#if !defined(__KERNEL__) +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(RGX_FEATURE_META_DMA) +#undef SUPPORT_META_COREMEM +#undef RGX_FEATURE_META_COREMEM_SIZE +#define RGX_FEATURE_META_COREMEM_SIZE (0) +#define RGX_META_COREMEM_SIZE (0) +#elif defined(RGX_FEATURE_META_COREMEM_SIZE) +#define RGX_META_COREMEM_SIZE (RGX_FEATURE_META_COREMEM_SIZE*1024U) +#else +#define RGX_META_COREMEM_SIZE (0) +#endif + +#if RGX_META_COREMEM_SIZE != 0 +#define RGX_META_COREMEM +#define RGX_META_COREMEM_CODE +#define RGX_META_COREMEM_DATA +#endif +#endif + +#define GET_ROGUE_CACHE_LINE_SIZE(x) ((((IMG_INT32)x) > 0) ? ((x)/8) : (0)) + + +#define MAX_HW_TA3DCONTEXTS 2U + +#define RGX_CR_CLK_CTRL0_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL0_MASKFULL) +#define RGX_CR_CLK_CTRL0_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL0_MASKFULL) +#define RGX_CR_CLK_CTRL1_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL1_MASKFULL) +#define RGX_CR_CLK_CTRL1_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL1_MASKFULL) +#define RGX_CR_CLK_CTRL2_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL2_MASKFULL) +#define RGX_CR_CLK_CTRL2_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL2_MASKFULL) + +#define RGX_CR_MERCER0_SOFT_RESET_SPU_EN (RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_EN) + +#define RGX_CR_MERCER1_SOFT_RESET_SPU_EN (RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_EN) + +#define RGX_CR_MERCER2_SOFT_RESET_SPU_EN (RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_EN | \ + RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_EN) + + +/* SOFT_RESET steps as defined in the TRM */ +#define RGX_SOFT_RESET_JONES (RGX_CR_SOFT_RESET_PM_EN | \ + RGX_CR_SOFT_RESET_ISP_EN) +#define RGX_SOFT_RESET_JONES_ALL (RGX_SOFT_RESET_JONES | \ + RGX_CR_SOFT_RESET_BIF_TEXAS_EN | \ + RGX_CR_SOFT_RESET_BIF_JONES_EN | \ + RGX_CR_SOFT_RESET_SLC_EN | \ + RGX_CR_SOFT_RESET_GARTEN_EN) +#define RGX_SOFT_RESET_EXTRA (RGX_CR_SOFT_RESET_PIXEL_EN | \ + RGX_CR_SOFT_RESET_VERTEX_EN | \ + RGX_CR_SOFT_RESET_GEO_VERTEX_EN | \ + RGX_CR_SOFT_RESET_GEO_SHARED_EN | \ + RGX_CR_SOFT_RESET_COMPUTE_EN | \ + RGX_CR_SOFT_RESET_TDM_EN) +#define RGX_SOFT_RESET_FROM_WITHIN_CORE (RGX_CR_SOFT_RESET_MASKFULL ^ \ + (RGX_CR_SOFT_RESET_GARTEN_EN | \ + RGX_CR_SOFT_RESET_BIF_JONES_EN | \ + RGX_CR_SOFT_RESET_SLC_EN)) + + +#define RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT (12U) +#define RGX_BIF_PM_PHYSICAL_PAGE_SIZE (1U << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT) + +#define RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT (14U) +#define RGX_BIF_PM_VIRTUAL_PAGE_SIZE (1U << RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT) + +#define RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE (32U) + +/* To get the number of required Bernado/Phantom(s), divide + * the number of clusters by 4 and round up + */ +#define RGX_REQ_NUM_PHANTOMS(CLUSTERS) ((CLUSTERS + 3U) / 4U) +#define RGX_REQ_NUM_BERNADOS(CLUSTERS) ((CLUSTERS + 3U) / 4U) +#define RGX_REQ_NUM_BLACKPEARLS(CLUSTERS) ((CLUSTERS + 3U) / 4U) + +#if !defined(__KERNEL__) +# define RGX_NUM_PHANTOMS (RGX_REQ_NUM_PHANTOMS(RGX_FEATURE_NUM_CLUSTERS)) +#endif + +/* for nohw multicore, true max number of cores returned to client */ +#define RGX_MULTICORE_MAX_NOHW_CORES (8U) + +/* + * META second thread feature depending on META variants and + * available CoreMem + */ +#if defined(RGX_FEATURE_META) && (RGX_FEATURE_META == MTP218 || RGX_FEATURE_META == MTP219) && (RGX_FEATURE_META_COREMEM_SIZE >= 96) +#define RGXFW_META_SUPPORT_2ND_THREAD +#endif + + +/* + * FW MMU contexts + */ +#if defined(SUPPORT_TRUSTED_DEVICE) +#define MMU_CONTEXT_MAPPING_FWPRIV (0x0U) /* FW code/private data */ +#define MMU_CONTEXT_MAPPING_FWIF (0x7U) /* Host/FW data */ +#else +#define MMU_CONTEXT_MAPPING_FWPRIV (0x0U) +#define MMU_CONTEXT_MAPPING_FWIF (0x0U) +#endif + + +/* + * FWCORE wrapper register defines + */ +#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT +#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_CLRMSK RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_CLRMSK +#define FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT (12U) + + +/* + * FBC clear color register defaults based on HW defaults + * non-YUV clear colour 0: 0x00000000 (encoded as ch3,2,1,0) + * non-YUV clear colour 1: 0x01000000 (encoded as ch3,2,1,0) + * YUV clear colour 0: 0x000 000 (encoded as UV Y) + * YUV clear colour 1: 0x000 3FF (encoded as UV Y) + */ +#define RGX_FBC_CC_DEFAULT (0x0100000000000000) +#define RGX_FBC_CC_YUV_DEFAULT (0x000003FF00000000) + +/* + * Virtualisation definitions + */ + +#define RGX_VIRTUALISATION_REG_SIZE_PER_OS (RGX_CR_MTS_SCHEDULE1 - RGX_CR_MTS_SCHEDULE) + + +/* GPU CR timer tick in GPU cycles */ +#define RGX_CRTIME_TICK_IN_CYCLES (256U) + +#if defined(FIX_HW_BRN_71840) +#define ROGUE_RENDERSIZE_MAXX (16384U) +#define ROGUE_RENDERSIZE_MAXY (16384U) +#else +#define ROGUE_RENDERSIZE_MAXX (RGX_FEATURE_RENDER_TARGET_XY_MAX) +#define ROGUE_RENDERSIZE_MAXY (RGX_FEATURE_RENDER_TARGET_XY_MAX) +#endif + +/* + * Register used by the FW to track the current boot stage (not used in MIPS) + */ +#define RGX_FW_BOOT_STAGE_REGISTER (RGX_CR_SCRATCH14) + +/* + * Define used to determine whether or not SLC range-based flush/invalidate + * interface is supported. + */ +#define RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED 1 + +/* + * Macro used to indicate which version of HWPerf is active + */ +#define RGX_FEATURE_HWPERF_VOLCANIC + +/* + * Maximum number of cores supported by TRP + */ +#define RGX_TRP_MAX_NUM_CORES (8U) + +/* + * Maximum number of cores supported by WGP + */ +#define RGX_WGP_MAX_NUM_CORES (8U) + + +#if defined(FIX_HW_BRN_71422) +/* + * The BRN71422 software workaround requires a target physical address on + * the hardware platform with a low latency response time and which will + * not suffer from delays of DRAM hardware operations such as refresh and + * recalibration. Only with that address defined will the workaround be used. + */ +#if !defined(PDUMP) +//#define RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR (IMG_UINT64_C(0x0000000000)) +#endif +#define RGX_BRN71422_WORKAROUND_READ_SIZE (32U) +#endif + +#endif /* RGXDEFS_KM_H */ diff --git a/drivers/gpu/drm/phytium/octopus/km/rgxmmudefs_km.h b/drivers/gpu/drm/phytium/octopus/km/rgxmmudefs_km.h new file mode 100644 index 000000000000..845e8e52044a --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/km/rgxmmudefs_km.h @@ -0,0 +1,275 @@ +/*************************************************************************/ /*! +@Title Hardware definition file rgxmmudefs_km.h +@Brief The file contains auto-generated hardware definitions without + BVNC-specific compile time conditionals. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* **** Autogenerated C -- do not edit **** */ + +/* + * rogue_bif.def + */ + + +#ifndef RGXMMUDEFS_KM_H +#define RGXMMUDEFS_KM_H + +#include "img_types.h" +#include "img_defs.h" + + +#define RGXMMUDEFS_KM_REVISION 0 + +#define OSID_CTXT_MAPPING_REGISTERS_CONSTANTS_OSID_CTXT_MAPPING_NUM_ENTRIES_VALUE (0x00000010U) + + +#define OSID_CTXT_MAPPING_REGISTERS_CONSTANTS_OSID_CTXT_MAPPING_PER_OS_SHIFT_VALUE (0x00000004U) + + +#define OSID_CTXT_MAPPING_REGISTERS_CONSTANTS_OSID_CTXT_MAPPING_PER_OS_MASK_VALUE (0x00000007U) + + +#define RGX_BIF_DM_ENCODING_VERTEX (0x00000000U) +#define RGX_BIF_DM_ENCODING_PIXEL (0x00000001U) +#define RGX_BIF_DM_ENCODING_COMPUTE (0x00000002U) +#define RGX_BIF_DM_ENCODING_TLA (0x00000003U) +#define RGX_BIF_DM_ENCODING_PB_VCE (0x00000004U) +#define RGX_BIF_DM_ENCODING_PB_TE (0x00000005U) +#define RGX_BIF_DM_ENCODING_META (0x00000007U) +#define RGX_BIF_DM_ENCODING_HOST (0x00000008U) +#define RGX_BIF_DM_ENCODING_PM_ALIST (0x00000009U) + + +#define RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT (30U) +#define RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFF003FFFFFFF)) +#define RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT (21U) +#define RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC01FFFFF)) +#define RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U) +#define RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE00FFF)) + + +#define RGX_MMUCTRL_ENTRIES_PC_VALUE (0x00000400U) + + +#define RGX_MMUCTRL_ENTRIES_PD_VALUE (0x00000200U) + + +#define RGX_MMUCTRL_ENTRIES_PT_VALUE (0x00000200U) + + +#define RGX_MMUCTRL_ENTRY_SIZE_PC_VALUE (0x00000020U) + + +#define RGX_MMUCTRL_ENTRY_SIZE_PD_VALUE (0x00000040U) + + +#define RGX_MMUCTRL_ENTRY_SIZE_PT_VALUE (0x00000040U) + + +#define RGX_MMUCTRL_PAGE_SIZE_MASK (0x00000007U) +#define RGX_MMUCTRL_PAGE_SIZE_4KB (0x00000000U) +#define RGX_MMUCTRL_PAGE_SIZE_16KB (0x00000001U) +#define RGX_MMUCTRL_PAGE_SIZE_64KB (0x00000002U) +#define RGX_MMUCTRL_PAGE_SIZE_256KB (0x00000003U) +#define RGX_MMUCTRL_PAGE_SIZE_1MB (0x00000004U) +#define RGX_MMUCTRL_PAGE_SIZE_2MB (0x00000005U) + + +#define RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT (12U) +#define RGX_MMUCTRL_PAGE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) + + +#define RGX_MMUCTRL_PAGE_16KB_RANGE_SHIFT (14U) +#define RGX_MMUCTRL_PAGE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000003FFF)) + + +#define RGX_MMUCTRL_PAGE_64KB_RANGE_SHIFT (16U) +#define RGX_MMUCTRL_PAGE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000FFFF)) + + +#define RGX_MMUCTRL_PAGE_256KB_RANGE_SHIFT (18U) +#define RGX_MMUCTRL_PAGE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000003FFFF)) + + +#define RGX_MMUCTRL_PAGE_1MB_RANGE_SHIFT (20U) +#define RGX_MMUCTRL_PAGE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000FFFFF)) + + +#define RGX_MMUCTRL_PAGE_2MB_RANGE_SHIFT (21U) +#define RGX_MMUCTRL_PAGE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00001FFFFF)) + + +#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT (12U) +#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) + + +#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT (10U) +#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000003FF)) + + +#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT (8U) +#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000000FF)) + + +#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT (6U) +#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000003F)) + + +#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT (5U) +#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) + + +#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT (5U) +#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) + + +#define RGX_MMUCTRL_AXCACHE_MASK (0x0000000FU) +/* +Device Non-bufferable */ +#define RGX_MMUCTRL_AXCACHE_DEVNONBUFF (0x00000000U) +/* +Device Bufferable */ +#define RGX_MMUCTRL_AXCACHE_DEVBUFF (0x00000001U) +/* +Normal Non-cacheable Non-bufferable */ +#define RGX_MMUCTRL_AXCACHE_NORMNONBUFF (0x00000002U) +/* +Normal Non-cacheable Bufferable */ +#define RGX_MMUCTRL_AXCACHE_NORMBUFF (0x00000003U) +/* +Write-through No-allocate*/ +#define RGX_MMUCTRL_AXCACHE_WTNOALLOC (0x00000006U) +/* +Write-back No-allocate*/ +#define RGX_MMUCTRL_AXCACHE_WBNOALLOC (0x00000007U) +/* +Write-through Read-Allocate */ +#define RGX_MMUCTRL_AXCACHE_WTRALLOC (0x00000008U) +/* +Write-back Read-Allocate */ +#define RGX_MMUCTRL_AXCACHE_WBRALLOC (0x00000009U) +/* +Write-through Write-Allocate */ +#define RGX_MMUCTRL_AXCACHE_WTWALLOC (0x0000000aU) +/* +Write-back Write-Allocate */ +#define RGX_MMUCTRL_AXCACHE_WBWALLOC (0x0000000bU) +/* +Write-through Read/Write-Allocate */ +#define RGX_MMUCTRL_AXCACHE_WTRWALLOC (0x0000000eU) +/* +Write-back Read/Write-Allocate */ +#define RGX_MMUCTRL_AXCACHE_WBRWALLOC (0x0000000fU) + + +#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT (62U) +#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) +#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN (IMG_UINT64_C(0x4000000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_SHIFT (58U) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_CLRMSK (IMG_UINT64_C(0xC3FFFFFFFFFFFFFF)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_DEVNONBUFF (IMG_UINT64_C(0x0000000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_DEVBUFF (IMG_UINT64_C(0x0400000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_NORMNONBUFF (IMG_UINT64_C(0x0800000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_NORMBUFF (IMG_UINT64_C(0x0c00000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTNOALLOC (IMG_UINT64_C(0x1800000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBNOALLOC (IMG_UINT64_C(0x1c00000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTRALLOC (IMG_UINT64_C(0x2000000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBRALLOC (IMG_UINT64_C(0x2400000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTWALLOC (IMG_UINT64_C(0x2800000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBWALLOC (IMG_UINT64_C(0x2c00000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTRWALLOC (IMG_UINT64_C(0x3800000000000000)) +#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBRWALLOC (IMG_UINT64_C(0x3c00000000000000)) +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT (40U) +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK (IMG_UINT64_C(0xFC0000FFFFFFFFFF)) +#define RGX_MMUCTRL_PT_DATA_PAGE_SHIFT (12U) +#define RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT (6U) +#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) +#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT (5U) +#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_MMUCTRL_PT_DATA_PM_SRC_SHIFT (4U) +#define RGX_MMUCTRL_PT_DATA_PM_SRC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_MMUCTRL_PT_DATA_PM_SRC_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_MMUCTRL_PT_DATA_CC_SHIFT (2U) +#define RGX_MMUCTRL_PT_DATA_CC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_MMUCTRL_PT_DATA_CC_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_MMUCTRL_PT_DATA_READ_ONLY_SHIFT (1U) +#define RGX_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_MMUCTRL_PT_DATA_READ_ONLY_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_MMUCTRL_PT_DATA_VALID_SHIFT (0U) +#define RGX_MMUCTRL_PT_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_MMUCTRL_PT_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) + + +#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT (40U) +#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) +#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000010000000000)) +#define RGX_MMUCTRL_PD_DATA_PT_BASE_SHIFT (5U) +#define RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT (1U) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF1)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB (IMG_UINT64_C(0x0000000000000000)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB (IMG_UINT64_C(0x0000000000000002)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB (IMG_UINT64_C(0x0000000000000004)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB (IMG_UINT64_C(0x0000000000000006)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB (IMG_UINT64_C(0x0000000000000008)) +#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB (IMG_UINT64_C(0x000000000000000a)) +#define RGX_MMUCTRL_PD_DATA_VALID_SHIFT (0U) +#define RGX_MMUCTRL_PD_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_MMUCTRL_PD_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) + + +#define RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT (4U) +#define RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK (0x0000000FU) +#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT (12U) +#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE (4096U) +#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT (1U) +#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFDU) +#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN (0x00000002U) +#define RGX_MMUCTRL_PC_DATA_VALID_SHIFT (0U) +#define RGX_MMUCTRL_PC_DATA_VALID_CLRMSK (0xFFFFFFFEU) +#define RGX_MMUCTRL_PC_DATA_VALID_EN (0x00000001U) + + +#endif /* RGXMMUDEFS_KM_H */ +/***************************************************************************** + End of file (rgxmmudefs_km.h) +*****************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/km/rgxtbdefs_km.h b/drivers/gpu/drm/phytium/octopus/km/rgxtbdefs_km.h new file mode 100644 index 000000000000..811c7791cf14 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/km/rgxtbdefs_km.h @@ -0,0 +1,520 @@ +/*************************************************************************/ /*! +@Title Hardware definition file rgxtbdefs_km.h +@Brief The file contains auto-generated hardware definitions without + BVNC-specific compile time conditionals. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* **** Autogenerated C -- do not edit **** */ + +/* + */ + + +#ifndef RGXTBDEFS_KM_H +#define RGXTBDEFS_KM_H + +#include "img_types.h" +#include "img_defs.h" + + +#define RGXTBDEFS_KM_REVISION 1 + +/* + Register RGX_TB_SOFT_RESET +*/ +#define RGX_TB_SOFT_RESET (0x0000U) +#define RGX_TB_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00000000FFFF0107)) +#define RGX_TB_SOFT_RESET_SPU_SHIFT (16U) +#define RGX_TB_SOFT_RESET_SPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) +#define RGX_TB_SOFT_RESET_JONES_SHIFT (8U) +#define RGX_TB_SOFT_RESET_JONES_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_TB_SOFT_RESET_JONES_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_TB_SOFT_RESET_SYS_SHIFT (2U) +#define RGX_TB_SOFT_RESET_SYS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_TB_SOFT_RESET_SYS_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_TB_SOFT_RESET_MEM_SHIFT (1U) +#define RGX_TB_SOFT_RESET_MEM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_TB_SOFT_RESET_MEM_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_TB_SOFT_RESET_CORE_SHIFT (0U) +#define RGX_TB_SOFT_RESET_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_TB_SOFT_RESET_CORE_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_TB_PCI_MASTER +*/ +#define RGX_TB_PCI_MASTER (0x0008U) +#define RGX_TB_PCI_MASTER_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_TB_PCI_MASTER_MODE_SHIFT (0U) +#define RGX_TB_PCI_MASTER_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_TB_PCI_MASTER_MODE_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_TB_MEM_ARBITER +*/ +#define RGX_TB_MEM_ARBITER (0x0088U) +#define RGX_TB_MEM_ARBITER_MASKFULL (IMG_UINT64_C(0x0000000000010F11)) +#define RGX_TB_MEM_ARBITER_LIMIT_BW_SHIFT (16U) +#define RGX_TB_MEM_ARBITER_LIMIT_BW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_TB_MEM_ARBITER_LIMIT_BW_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_TB_MEM_ARBITER_PRI_SKEW_SHIFT (8U) +#define RGX_TB_MEM_ARBITER_PRI_SKEW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF0FF)) +#define RGX_TB_MEM_ARBITER_PRI_RNW_SHIFT (4U) +#define RGX_TB_MEM_ARBITER_PRI_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_TB_MEM_ARBITER_PRI_RNW_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_TB_MEM_ARBITER_ENABLE_SHIFT (0U) +#define RGX_TB_MEM_ARBITER_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_TB_MEM_ARBITER_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_TB_QOS_RD_LATENCY +*/ +#define RGX_TB_QOS_RD_LATENCY (0x0090U) +#define RGX_TB_QOS_RD_LATENCY_MASKFULL (IMG_UINT64_C(0xFFFF3FFF3FFF3FFF)) +#define RGX_TB_QOS_RD_LATENCY_DIST_SHIFT (62U) +#define RGX_TB_QOS_RD_LATENCY_DIST_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_TB_QOS_RD_LATENCY_MAX_15_SHIFT (48U) +#define RGX_TB_QOS_RD_LATENCY_MAX_15_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_TB_QOS_RD_LATENCY_MIN_15_SHIFT (32U) +#define RGX_TB_QOS_RD_LATENCY_MIN_15_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_TB_QOS_RD_LATENCY_MAX_0_SHIFT (16U) +#define RGX_TB_QOS_RD_LATENCY_MAX_0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC000FFFF)) +#define RGX_TB_QOS_RD_LATENCY_MIN_0_SHIFT (0U) +#define RGX_TB_QOS_RD_LATENCY_MIN_0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFC000)) + + +/* + Register RGX_TB_QOS_WR_LATENCY +*/ +#define RGX_TB_QOS_WR_LATENCY (0x0098U) +#define RGX_TB_QOS_WR_LATENCY_MASKFULL (IMG_UINT64_C(0xFFFF3FFF3FFF3FFF)) +#define RGX_TB_QOS_WR_LATENCY_DIST_SHIFT (62U) +#define RGX_TB_QOS_WR_LATENCY_DIST_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) +#define RGX_TB_QOS_WR_LATENCY_MAX_15_SHIFT (48U) +#define RGX_TB_QOS_WR_LATENCY_MAX_15_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) +#define RGX_TB_QOS_WR_LATENCY_MIN_15_SHIFT (32U) +#define RGX_TB_QOS_WR_LATENCY_MIN_15_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) +#define RGX_TB_QOS_WR_LATENCY_MAX_0_SHIFT (16U) +#define RGX_TB_QOS_WR_LATENCY_MAX_0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC000FFFF)) +#define RGX_TB_QOS_WR_LATENCY_MIN_0_SHIFT (0U) +#define RGX_TB_QOS_WR_LATENCY_MIN_0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFC000)) + + +/* + Register RGX_TB_MAX_ID_OUTSTANDING +*/ +#define RGX_TB_MAX_ID_OUTSTANDING (0x00B0U) +#define RGX_TB_MAX_ID_OUTSTANDING_MASKFULL (IMG_UINT64_C(0x000003FF03FF03FF)) +#define RGX_TB_MAX_ID_OUTSTANDING_RD_WR_SHIFT (32U) +#define RGX_TB_MAX_ID_OUTSTANDING_RD_WR_CLRMSK (IMG_UINT64_C(0xFFFFFC00FFFFFFFF)) +#define RGX_TB_MAX_ID_OUTSTANDING_WRITE_SHIFT (16U) +#define RGX_TB_MAX_ID_OUTSTANDING_WRITE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC00FFFF)) +#define RGX_TB_MAX_ID_OUTSTANDING_READ_SHIFT (0U) +#define RGX_TB_MAX_ID_OUTSTANDING_READ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFC00)) + + +/* + Register RGX_TB_COHERENT_MEM_REGION +*/ +#define RGX_TB_COHERENT_MEM_REGION (0x00C0U) +#define RGX_TB_COHERENT_MEM_REGION_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_TB_COHERENT_MEM_REGION_START_ADDR_SHIFT (12U) +#define RGX_TB_COHERENT_MEM_REGION_START_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) + + +/* + Register RGX_TB_LMA_MEM_REGION +*/ +#define RGX_TB_LMA_MEM_REGION (0x00C8U) +#define RGX_TB_LMA_MEM_REGION_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_LMA_MEM_REGION_SIZE_SHIFT (0U) +#define RGX_TB_LMA_MEM_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) + + +/* + Register RGX_TB_UMA_MEM_REGION +*/ +#define RGX_TB_UMA_MEM_REGION (0x00D0U) +#define RGX_TB_UMA_MEM_REGION_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) +#define RGX_TB_UMA_MEM_REGION_START_ADDR_SHIFT (12U) +#define RGX_TB_UMA_MEM_REGION_START_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) + + +/* + Register RGX_TB_SYSTEM_STATUS +*/ +#define RGX_TB_SYSTEM_STATUS (0x00E0U) +#define RGX_TB_SYSTEM_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFF33F700FF)) +#define RGX_TB_SYSTEM_STATUS_SPU_ISON_SHIFT (48U) +#define RGX_TB_SYSTEM_STATUS_SPU_ISON_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) +#define RGX_TB_SYSTEM_STATUS_SPU_POWER_SHIFT (32U) +#define RGX_TB_SYSTEM_STATUS_SPU_POWER_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) +#define RGX_TB_SYSTEM_STATUS_JONES_ISON_SHIFT (29U) +#define RGX_TB_SYSTEM_STATUS_JONES_ISON_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) +#define RGX_TB_SYSTEM_STATUS_JONES_ISON_EN (IMG_UINT64_C(0x0000000020000000)) +#define RGX_TB_SYSTEM_STATUS_JONES_POWER_SHIFT (28U) +#define RGX_TB_SYSTEM_STATUS_JONES_POWER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) +#define RGX_TB_SYSTEM_STATUS_JONES_POWER_EN (IMG_UINT64_C(0x0000000010000000)) +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_SHIFT (25U) +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_EN (IMG_UINT64_C(0x0000000002000000)) +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_SHIFT (24U) +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) +#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_EN (IMG_UINT64_C(0x0000000001000000)) +#define RGX_TB_SYSTEM_STATUS_GPU_STATE_SHIFT (20U) +#define RGX_TB_SYSTEM_STATUS_GPU_STATE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF0FFFFF)) +#define RGX_TB_SYSTEM_STATUS_SYSTEM_IRQ_SHIFT (18U) +#define RGX_TB_SYSTEM_STATUS_SYSTEM_IRQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) +#define RGX_TB_SYSTEM_STATUS_SYSTEM_IRQ_EN (IMG_UINT64_C(0x0000000000040000)) +#define RGX_TB_SYSTEM_STATUS_TRIGGER_SHIFT (17U) +#define RGX_TB_SYSTEM_STATUS_TRIGGER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) +#define RGX_TB_SYSTEM_STATUS_TRIGGER_EN (IMG_UINT64_C(0x0000000000020000)) +#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_SHIFT (16U) +#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) +#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_EN (IMG_UINT64_C(0x0000000000010000)) +#define RGX_TB_SYSTEM_STATUS_IRQ_SHIFT (0U) +#define RGX_TB_SYSTEM_STATUS_IRQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) + + +/* + Register RGX_TB_SYSTEM_CONFIG +*/ +#define RGX_TB_SYSTEM_CONFIG (0x00F0U) +#define RGX_TB_SYSTEM_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000007007737)) +#define RGX_TB_SYSTEM_CONFIG_SOC_AXI_FEATURE_SHIFT (24U) +#define RGX_TB_SYSTEM_CONFIG_SOC_AXI_FEATURE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8FFFFFF)) +#define RGX_TB_SYSTEM_CONFIG_ICE_CABLEIF256_SHIFT (14U) +#define RGX_TB_SYSTEM_CONFIG_ICE_CABLEIF256_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) +#define RGX_TB_SYSTEM_CONFIG_ICE_CABLEIF256_EN (IMG_UINT64_C(0x0000000000004000)) +#define RGX_TB_SYSTEM_CONFIG_VELOCE_TBX_SHIFT (13U) +#define RGX_TB_SYSTEM_CONFIG_VELOCE_TBX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) +#define RGX_TB_SYSTEM_CONFIG_VELOCE_TBX_EN (IMG_UINT64_C(0x0000000000002000)) +#define RGX_TB_SYSTEM_CONFIG_VELOCE_ICE_SHIFT (12U) +#define RGX_TB_SYSTEM_CONFIG_VELOCE_ICE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) +#define RGX_TB_SYSTEM_CONFIG_VELOCE_ICE_EN (IMG_UINT64_C(0x0000000000001000)) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_AVIP_SHIFT (10U) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_AVIP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_AVIP_EN (IMG_UINT64_C(0x0000000000000400)) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_TBA_SHIFT (9U) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_TBA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_TBA_EN (IMG_UINT64_C(0x0000000000000200)) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_ICE_SHIFT (8U) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_ICE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) +#define RGX_TB_SYSTEM_CONFIG_CADENCE_ICE_EN (IMG_UINT64_C(0x0000000000000100)) +#define RGX_TB_SYSTEM_CONFIG_EMU_UMA_SHIFT (5U) +#define RGX_TB_SYSTEM_CONFIG_EMU_UMA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) +#define RGX_TB_SYSTEM_CONFIG_EMU_UMA_EN (IMG_UINT64_C(0x0000000000000020)) +#define RGX_TB_SYSTEM_CONFIG_EMU_BUILD_SHIFT (4U) +#define RGX_TB_SYSTEM_CONFIG_EMU_BUILD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) +#define RGX_TB_SYSTEM_CONFIG_EMU_BUILD_EN (IMG_UINT64_C(0x0000000000000010)) +#define RGX_TB_SYSTEM_CONFIG_NS_NOC_SHIFT (2U) +#define RGX_TB_SYSTEM_CONFIG_NS_NOC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) +#define RGX_TB_SYSTEM_CONFIG_NS_NOC_EN (IMG_UINT64_C(0x0000000000000004)) +#define RGX_TB_SYSTEM_CONFIG_IMG_NOC_SHIFT (1U) +#define RGX_TB_SYSTEM_CONFIG_IMG_NOC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) +#define RGX_TB_SYSTEM_CONFIG_IMG_NOC_EN (IMG_UINT64_C(0x0000000000000002)) +#define RGX_TB_SYSTEM_CONFIG_TB_NONCOHERENT_SHIFT (0U) +#define RGX_TB_SYSTEM_CONFIG_TB_NONCOHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_TB_SYSTEM_CONFIG_TB_NONCOHERENT_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_TB_RDATA_CORRUPT_ENABLE +*/ +#define RGX_TB_RDATA_CORRUPT_ENABLE (0x1560U) +#define RGX_TB_RDATA_CORRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_TB_RDATA_CORRUPT_ENABLE_ENABLE_SHIFT (0U) +#define RGX_TB_RDATA_CORRUPT_ENABLE_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_RDATA_CORRUPT_ENABLE_ENABLE_EN (0x00000001U) + + +/* + Register RGX_TB_RDATA_CORRUPT_MASK +*/ +#define RGX_TB_RDATA_CORRUPT_MASK (0x1568U) +#define RGX_TB_RDATA_CORRUPT_MASK_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_TB_RDATA_CORRUPT_MASK_MMU_SHIFT (31U) +#define RGX_TB_RDATA_CORRUPT_MASK_MMU_CLRMSK (0x7FFFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_MMU_EN (0x80000000U) +#define RGX_TB_RDATA_CORRUPT_MASK_UPS_SHIFT (30U) +#define RGX_TB_RDATA_CORRUPT_MASK_UPS_CLRMSK (0xBFFFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_UPS_EN (0x40000000U) +#define RGX_TB_RDATA_CORRUPT_MASK_FBM_SHIFT (29U) +#define RGX_TB_RDATA_CORRUPT_MASK_FBM_CLRMSK (0xDFFFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_FBM_EN (0x20000000U) +#define RGX_TB_RDATA_CORRUPT_MASK_TUL_SHIFT (28U) +#define RGX_TB_RDATA_CORRUPT_MASK_TUL_CLRMSK (0xEFFFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_TUL_EN (0x10000000U) +#define RGX_TB_RDATA_CORRUPT_MASK_SHR_SHIFT (27U) +#define RGX_TB_RDATA_CORRUPT_MASK_SHR_CLRMSK (0xF7FFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_SHR_EN (0x08000000U) +#define RGX_TB_RDATA_CORRUPT_MASK_FBA_SHIFT (26U) +#define RGX_TB_RDATA_CORRUPT_MASK_FBA_CLRMSK (0xFBFFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_FBA_EN (0x04000000U) +#define RGX_TB_RDATA_CORRUPT_MASK_VDM_SHIFT (25U) +#define RGX_TB_RDATA_CORRUPT_MASK_VDM_CLRMSK (0xFDFFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_VDM_EN (0x02000000U) +#define RGX_TB_RDATA_CORRUPT_MASK_USC_L2_SHIFT (24U) +#define RGX_TB_RDATA_CORRUPT_MASK_USC_L2_CLRMSK (0xFEFFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_USC_L2_EN (0x01000000U) +#define RGX_TB_RDATA_CORRUPT_MASK_PDS_SHIFT (23U) +#define RGX_TB_RDATA_CORRUPT_MASK_PDS_CLRMSK (0xFF7FFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_PDS_EN (0x00800000U) +#define RGX_TB_RDATA_CORRUPT_MASK_PDSRW_SHIFT (22U) +#define RGX_TB_RDATA_CORRUPT_MASK_PDSRW_CLRMSK (0xFFBFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_PDSRW_EN (0x00400000U) +#define RGX_TB_RDATA_CORRUPT_MASK_TPF_SHIFT (21U) +#define RGX_TB_RDATA_CORRUPT_MASK_TPF_CLRMSK (0xFFDFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_TPF_EN (0x00200000U) +#define RGX_TB_RDATA_CORRUPT_MASK_SHF_SHIFT (20U) +#define RGX_TB_RDATA_CORRUPT_MASK_SHF_CLRMSK (0xFFEFFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_SHF_EN (0x00100000U) +#define RGX_TB_RDATA_CORRUPT_MASK_AMC_SHIFT (19U) +#define RGX_TB_RDATA_CORRUPT_MASK_AMC_CLRMSK (0xFFF7FFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_AMC_EN (0x00080000U) +#define RGX_TB_RDATA_CORRUPT_MASK_RAC_SHIFT (18U) +#define RGX_TB_RDATA_CORRUPT_MASK_RAC_CLRMSK (0xFFFBFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_RAC_EN (0x00040000U) +#define RGX_TB_RDATA_CORRUPT_MASK_VCE_RTC_SHIFT (17U) +#define RGX_TB_RDATA_CORRUPT_MASK_VCE_RTC_CLRMSK (0xFFFDFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_VCE_RTC_EN (0x00020000U) +#define RGX_TB_RDATA_CORRUPT_MASK_ISP_SHIFT (16U) +#define RGX_TB_RDATA_CORRUPT_MASK_ISP_CLRMSK (0xFFFEFFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_ISP_EN (0x00010000U) +#define RGX_TB_RDATA_CORRUPT_MASK_PPP_SHIFT (15U) +#define RGX_TB_RDATA_CORRUPT_MASK_PPP_CLRMSK (0xFFFF7FFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_PPP_EN (0x00008000U) +#define RGX_TB_RDATA_CORRUPT_MASK_IPF_SHIFT (14U) +#define RGX_TB_RDATA_CORRUPT_MASK_IPF_CLRMSK (0xFFFFBFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_IPF_EN (0x00004000U) +#define RGX_TB_RDATA_CORRUPT_MASK_VCE_SHIFT (13U) +#define RGX_TB_RDATA_CORRUPT_MASK_VCE_CLRMSK (0xFFFFDFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_VCE_EN (0x00002000U) +#define RGX_TB_RDATA_CORRUPT_MASK_PBE_SHIFT (12U) +#define RGX_TB_RDATA_CORRUPT_MASK_PBE_CLRMSK (0xFFFFEFFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_PBE_EN (0x00001000U) +#define RGX_TB_RDATA_CORRUPT_MASK_TCU_SHIFT (11U) +#define RGX_TB_RDATA_CORRUPT_MASK_TCU_CLRMSK (0xFFFFF7FFU) +#define RGX_TB_RDATA_CORRUPT_MASK_TCU_EN (0x00000800U) +#define RGX_TB_RDATA_CORRUPT_MASK_MCU_SHIFT (10U) +#define RGX_TB_RDATA_CORRUPT_MASK_MCU_CLRMSK (0xFFFFFBFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_MCU_EN (0x00000400U) +#define RGX_TB_RDATA_CORRUPT_MASK_RPM_SHIFT (9U) +#define RGX_TB_RDATA_CORRUPT_MASK_RPM_CLRMSK (0xFFFFFDFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_RPM_EN (0x00000200U) +#define RGX_TB_RDATA_CORRUPT_MASK_RTU_SHIFT (8U) +#define RGX_TB_RDATA_CORRUPT_MASK_RTU_CLRMSK (0xFFFFFEFFU) +#define RGX_TB_RDATA_CORRUPT_MASK_RTU_EN (0x00000100U) +#define RGX_TB_RDATA_CORRUPT_MASK_TILING_SHIFT (7U) +#define RGX_TB_RDATA_CORRUPT_MASK_TILING_CLRMSK (0xFFFFFF7FU) +#define RGX_TB_RDATA_CORRUPT_MASK_TILING_EN (0x00000080U) +#define RGX_TB_RDATA_CORRUPT_MASK_META_DMA_SHIFT (6U) +#define RGX_TB_RDATA_CORRUPT_MASK_META_DMA_CLRMSK (0xFFFFFFBFU) +#define RGX_TB_RDATA_CORRUPT_MASK_META_DMA_EN (0x00000040U) +#define RGX_TB_RDATA_CORRUPT_MASK_META_SHIFT (5U) +#define RGX_TB_RDATA_CORRUPT_MASK_META_CLRMSK (0xFFFFFFDFU) +#define RGX_TB_RDATA_CORRUPT_MASK_META_EN (0x00000020U) +#define RGX_TB_RDATA_CORRUPT_MASK_CDM_SHIFT (4U) +#define RGX_TB_RDATA_CORRUPT_MASK_CDM_CLRMSK (0xFFFFFFEFU) +#define RGX_TB_RDATA_CORRUPT_MASK_CDM_EN (0x00000010U) +#define RGX_TB_RDATA_CORRUPT_MASK_PM_SHIFT (3U) +#define RGX_TB_RDATA_CORRUPT_MASK_PM_CLRMSK (0xFFFFFFF7U) +#define RGX_TB_RDATA_CORRUPT_MASK_PM_EN (0x00000008U) +#define RGX_TB_RDATA_CORRUPT_MASK_TDM_SHIFT (2U) +#define RGX_TB_RDATA_CORRUPT_MASK_TDM_CLRMSK (0xFFFFFFFBU) +#define RGX_TB_RDATA_CORRUPT_MASK_TDM_EN (0x00000004U) +#define RGX_TB_RDATA_CORRUPT_MASK_DCE_SHIFT (1U) +#define RGX_TB_RDATA_CORRUPT_MASK_DCE_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_RDATA_CORRUPT_MASK_DCE_EN (0x00000002U) +#define RGX_TB_RDATA_CORRUPT_MASK_IPP_SHIFT (0U) +#define RGX_TB_RDATA_CORRUPT_MASK_IPP_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_RDATA_CORRUPT_MASK_IPP_EN (0x00000001U) + + +/* + Register RGX_TB_RDATA_CORRUPT_FREQ +*/ +#define RGX_TB_RDATA_CORRUPT_FREQ (0x1570U) +#define RGX_TB_RDATA_CORRUPT_FREQ_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) +#define RGX_TB_RDATA_CORRUPT_FREQ_FREQ_SHIFT (0U) +#define RGX_TB_RDATA_CORRUPT_FREQ_FREQ_CLRMSK (0x00000000U) + + +/* + Register RGX_TB_TRUSTED_DEVICE +*/ +#define RGX_TB_TRUSTED_DEVICE (0x2000U) +#define RGX_TB_TRUSTED_DEVICE_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_TB_TRUSTED_DEVICE_ALLOW_SECURE_READS_SHIFT (4U) +#define RGX_TB_TRUSTED_DEVICE_ALLOW_SECURE_READS_CLRMSK (0xFFFFFFEFU) +#define RGX_TB_TRUSTED_DEVICE_ALLOW_SECURE_READS_EN (0x00000010U) +#define RGX_TB_TRUSTED_DEVICE_HWCONFIG_SHIFT (2U) +#define RGX_TB_TRUSTED_DEVICE_HWCONFIG_CLRMSK (0xFFFFFFF3U) +#define RGX_TB_TRUSTED_DEVICE_OSID_DISABLE_SHIFT (1U) +#define RGX_TB_TRUSTED_DEVICE_OSID_DISABLE_CLRMSK (0xFFFFFFFDU) +#define RGX_TB_TRUSTED_DEVICE_OSID_DISABLE_EN (0x00000002U) +#define RGX_TB_TRUSTED_DEVICE_ENABLE_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_TRUSTED_DEVICE_ENABLE_EN (0x00000001U) + + +/* + Register RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR +*/ +#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR (0x2010U) +#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) +#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR_PULSE_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR_PULSE_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR_PULSE_EN (0x00000001U) + + +/* + Register group: RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS, with 8 repeats +*/ +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS_REPEATCOUNT (8U) +/* + Register RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0 +*/ +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0 (0x2058U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) + + +/* + Register group: RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS, with 8 repeats +*/ +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS_REPEATCOUNT (8U) +/* + Register RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0 +*/ +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0 (0x20D8U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF)) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0_ADDR_SHIFT (0U) +#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000)) + + +/* + Register RGX_TB_BW_LIMITER +*/ +#define RGX_TB_BW_LIMITER (0x2118U) +#define RGX_TB_BW_LIMITER_MASKFULL (IMG_UINT64_C(0x00000000007707FF)) +#define RGX_TB_BW_LIMITER_DROPN_EXT_SHIFT (20U) +#define RGX_TB_BW_LIMITER_DROPN_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF8FFFFF)) +#define RGX_TB_BW_LIMITER_PERIOD_EXT_SHIFT (16U) +#define RGX_TB_BW_LIMITER_PERIOD_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF8FFFF)) +#define RGX_TB_BW_LIMITER_DROPN_SHIFT (6U) +#define RGX_TB_BW_LIMITER_DROPN_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F)) +#define RGX_TB_BW_LIMITER_PERIOD_SHIFT (1U) +#define RGX_TB_BW_LIMITER_PERIOD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC1)) +#define RGX_TB_BW_LIMITER_ENABLE_SHIFT (0U) +#define RGX_TB_BW_LIMITER_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_TB_BW_LIMITER_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_TB_DRAM_CROSSBAR +*/ +#define RGX_TB_DRAM_CROSSBAR (0x2128U) +#define RGX_TB_DRAM_CROSSBAR_MASKFULL (IMG_UINT64_C(0x0000000000003301)) +#define RGX_TB_DRAM_CROSSBAR_CHANNELS_SHIFT (12U) +#define RGX_TB_DRAM_CROSSBAR_CHANNELS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) +#define RGX_TB_DRAM_CROSSBAR_SEL_MODE_SHIFT (8U) +#define RGX_TB_DRAM_CROSSBAR_SEL_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) +#define RGX_TB_DRAM_CROSSBAR_ENABLE_SHIFT (0U) +#define RGX_TB_DRAM_CROSSBAR_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) +#define RGX_TB_DRAM_CROSSBAR_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) + + +/* + Register RGX_TB_SOC_TIMER +*/ +#define RGX_TB_SOC_TIMER (0x2140U) +#define RGX_TB_SOC_TIMER_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) +#define RGX_TB_SOC_TIMER_COUNT_SHIFT (0U) +#define RGX_TB_SOC_TIMER_COUNT_CLRMSK (IMG_UINT64_C(0x0000000000000000)) + + +/* + Register RGX_TB_PROGRAMABLE_CLK_DIV +*/ +#define RGX_TB_PROGRAMABLE_CLK_DIV (0x2150U) +#define RGX_TB_PROGRAMABLE_CLK_DIV_MASKFULL (IMG_UINT64_C(0x0000000000000FFF)) +#define RGX_TB_PROGRAMABLE_CLK_DIV_MODE_SHIFT (11U) +#define RGX_TB_PROGRAMABLE_CLK_DIV_MODE_CLRMSK (0xFFFFF7FFU) +#define RGX_TB_PROGRAMABLE_CLK_DIV_MODE_EN (0x00000800U) +#define RGX_TB_PROGRAMABLE_CLK_DIV_PROFILE_SEL_SHIFT (9U) +#define RGX_TB_PROGRAMABLE_CLK_DIV_PROFILE_SEL_CLRMSK (0xFFFFF9FFU) +#define RGX_TB_PROGRAMABLE_CLK_DIV_DIV_SHIFT (5U) +#define RGX_TB_PROGRAMABLE_CLK_DIV_DIV_CLRMSK (0xFFFFFE1FU) +#define RGX_TB_PROGRAMABLE_CLK_DIV_DELAY_SHIFT (1U) +#define RGX_TB_PROGRAMABLE_CLK_DIV_DELAY_CLRMSK (0xFFFFFFE1U) +#define RGX_TB_PROGRAMABLE_CLK_DIV_EVENT_SHIFT (0U) +#define RGX_TB_PROGRAMABLE_CLK_DIV_EVENT_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_PROGRAMABLE_CLK_DIV_EVENT_EN (0x00000001U) + + +/* + Register RGX_TB_GPIO_FREQ_CTRL +*/ +#define RGX_TB_GPIO_FREQ_CTRL (0x2160U) +#define RGX_TB_GPIO_FREQ_CTRL_MASKFULL (IMG_UINT64_C(0x000000000000001F)) +#define RGX_TB_GPIO_FREQ_CTRL_COUNT_SHIFT (1U) +#define RGX_TB_GPIO_FREQ_CTRL_COUNT_CLRMSK (0xFFFFFFE1U) +#define RGX_TB_GPIO_FREQ_CTRL_ENABLE_SHIFT (0U) +#define RGX_TB_GPIO_FREQ_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) +#define RGX_TB_GPIO_FREQ_CTRL_ENABLE_EN (0x00000001U) + + +/* + Register RGX_TB_GPIO_MODE +*/ +#define RGX_TB_GPIO_MODE (0x2170U) +#define RGX_TB_GPIO_MODE_MASKFULL (IMG_UINT64_C(0x0000000000000003)) +#define RGX_TB_GPIO_MODE_PROTOCOL_SHIFT (0U) +#define RGX_TB_GPIO_MODE_PROTOCOL_CLRMSK (0xFFFFFFFCU) + + +#endif /* RGXTBDEFS_KM_H */ +/***************************************************************************** + End of file (rgxtbdefs_km.h) +*****************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/km_apphint.c b/drivers/gpu/drm/phytium/octopus/km_apphint.c new file mode 100644 index 000000000000..d9fb349e1cf7 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/km_apphint.c @@ -0,0 +1,1616 @@ +/*************************************************************************/ /*! +@File km_apphint.c +@Title Apphint routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "di_server.h" +#include "pvr_uaccess.h" +#include +#include +#include +#include + +/* Common and SO layer */ +#include "img_defs.h" +#include "sofunc_pvr.h" + +/* for action device access */ +#include "pvrsrv.h" +#include "device.h" +#include "rgxdevice.h" +#include "rgxfwutils.h" +#include "rgxhwperf.h" +#include "htbserver.h" +#include "rgxutils.h" +#include "rgxapi_km.h" + + +/* defines for default values */ +#include "rgx_fwif_km.h" +#include "htbuffer_types.h" + +#include "pvr_notifier.h" + +#include "km_apphint_defs.h" +#include "km_apphint.h" + +#if defined(PDUMP) +#include +#include "pdump_km.h" +#endif + +/* Size of temporary buffers used to read and write AppHint data. + * Must be large enough to contain any strings read or written but no larger + * than 4096: which is the buffer size for the kernel_param_ops .get + * function. And less than 1024 to keep the stack frame size within bounds. + */ +#define APPHINT_BUFFER_SIZE 512 + +#define APPHINT_DEVICES_MAX 16 + +/* +******************************************************************************* + * AppHint mnemonic data type helper tables +******************************************************************************/ +struct apphint_lookup { + const char *name; + int value; +}; + +static const struct apphint_lookup fwt_logtype_tbl[] = { + { "trace", 0}, + { "none", 0} +#if defined(SUPPORT_TBI_INTERFACE) + , { "tbi", 1} +#endif +}; + +static const struct apphint_lookup fwt_loggroup_tbl[] = { + RGXFWIF_LOG_GROUP_NAME_VALUE_MAP +}; + +static const struct apphint_lookup htb_loggroup_tbl[] = { +#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) }, + HTB_LOG_SFGROUPLIST +#undef X +}; + +static const struct apphint_lookup htb_opmode_tbl[] = { + { "droplatest", HTB_OPMODE_DROPLATEST}, + { "dropoldest", HTB_OPMODE_DROPOLDEST}, + { "block", HTB_OPMODE_BLOCK} +}; + +__maybe_unused +static const struct apphint_lookup htb_logmode_tbl[] = { + { "all", HTB_LOGMODE_ALLPID}, + { "restricted", HTB_LOGMODE_RESTRICTEDPID} +}; + +__maybe_unused +static const struct apphint_lookup timecorr_clk_tbl[] = { + { "mono", 0 }, + { "mono_raw", 1 }, + { "sched", 2 } +}; + +/* +******************************************************************************* + Data types +******************************************************************************/ +union apphint_value { + IMG_UINT64 UINT64; + IMG_UINT32 UINT32; + IMG_BOOL BOOL; + IMG_CHAR *STRING; +}; + +union apphint_query_action { + PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device, + const void *private_data, IMG_UINT64 *value); + PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device, + const void *private_data, IMG_UINT32 *value); + PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device, + const void *private_data, IMG_BOOL *value); + PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device, + const void *private_data, IMG_CHAR **value); +}; + +union apphint_set_action { + PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device, + const void *private_data, IMG_UINT64 value); + PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device, + const void *private_data, IMG_UINT32 value); + PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device, + const void *private_data, IMG_BOOL value); + PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device, + const void *private_data, IMG_CHAR *value); +}; + +struct apphint_action { + union apphint_query_action query; /*!< Query callbacks. */ + union apphint_set_action set; /*!< Set callbacks. */ + const PVRSRV_DEVICE_NODE *device; /*!< Pointer to the device node.*/ + const void *private_data; /*!< Opaque data passed to `query` and + `set` callbacks. */ + union apphint_value stored; /*!< Value of the AppHint. */ + bool free; /*!< Flag indicating that memory has been + allocated for this AppHint and it + needs to be freed on deinit. */ + bool initialised; /*!< Flag indicating if the AppHint has + been already initialised. */ +}; + +struct apphint_param { + IMG_UINT32 id; + APPHINT_DATA_TYPE data_type; + const void *data_type_helper; + IMG_UINT32 helper_size; +}; + +struct apphint_init_data { + IMG_UINT32 id; /* index into AppHint Table */ + APPHINT_CLASS class; + const IMG_CHAR *name; + union apphint_value default_value; +}; + +struct apphint_init_data_mapping { + IMG_UINT32 device_apphint_id; + IMG_UINT32 modparam_apphint_id; +}; + +struct apphint_class_state { + APPHINT_CLASS class; + IMG_BOOL enabled; +}; + +struct apphint_work { + struct work_struct work; + union apphint_value new_value; + struct apphint_action *action; +}; + +/* +******************************************************************************* + Initialization / configuration table data +******************************************************************************/ +#define UINT32Bitfield UINT32 +#define UINT32List UINT32 + +static const struct apphint_init_data init_data_buildvar[] = { +#define X(a, b, c, d, e) \ + {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} }, + APPHINT_LIST_BUILDVAR_COMMON + APPHINT_LIST_BUILDVAR +#undef X +}; + +static const struct apphint_init_data init_data_modparam[] = { +#define X(a, b, c, d, e) \ + {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} }, + APPHINT_LIST_MODPARAM_COMMON + APPHINT_LIST_MODPARAM +#undef X +}; + +static const struct apphint_init_data init_data_debuginfo[] = { +#define X(a, b, c, d, e) \ + {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} }, + APPHINT_LIST_DEBUGINFO_COMMON + APPHINT_LIST_DEBUGINFO +#undef X +}; + +static const struct apphint_init_data init_data_debuginfo_device[] = { +#define X(a, b, c, d, e) \ + {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} }, + APPHINT_LIST_DEBUGINFO_DEVICE_COMMON + APPHINT_LIST_DEBUGINFO_DEVICE +#undef X +}; + +static const struct apphint_init_data_mapping init_data_debuginfo_device_to_modparams[] = { +#define X(a, b) \ + {APPHINT_ID_ ## a, APPHINT_ID_ ## b}, + APPHINT_LIST_DEBUIGINFO_DEVICE_X_MODPARAM_INIT_COMMON + APPHINT_LIST_DEBUIGINFO_DEVICE_X_MODPARAM_INIT +#undef X +}; + +#undef UINT32Bitfield +#undef UINT32List + +__maybe_unused static const char NO_PARAM_TABLE[] = {}; + +static const struct apphint_param param_lookup[] = { +#define X(a, b, c, d, e) \ + {APPHINT_ID_ ## a, APPHINT_DATA_TYPE_ ## b, e, ARRAY_SIZE(e) }, + APPHINT_LIST_ALL +#undef X +}; + +static const struct apphint_class_state class_state[] = { +#define X(a) {APPHINT_CLASS_ ## a, APPHINT_ENABLED_CLASS_ ## a}, + APPHINT_CLASS_LIST +#undef X +}; + +/* +******************************************************************************* + Global state +******************************************************************************/ +/* If the union apphint_value becomes such that it is not possible to read + * and write atomically, a mutex may be desirable to prevent a read returning + * a partially written state. + * This would require a statically initialized mutex outside of the + * struct apphint_state to prevent use of an uninitialized mutex when + * module_params are provided on the command line. + * static DEFINE_MUTEX(apphint_mutex); + */ +static struct apphint_state +{ + struct workqueue_struct *workqueue; + DI_GROUP *debuginfo_device_rootdir[APPHINT_DEVICES_MAX]; + DI_ENTRY *debuginfo_device_entry[APPHINT_DEVICES_MAX][APPHINT_DEBUGINFO_DEVICE_ID_MAX]; + DI_GROUP *debuginfo_rootdir; + DI_ENTRY *debuginfo_entry[APPHINT_DEBUGINFO_ID_MAX]; + DI_GROUP *buildvar_rootdir; + DI_ENTRY *buildvar_entry[APPHINT_BUILDVAR_ID_MAX]; + + unsigned num_devices; + PVRSRV_DEVICE_NODE *devices[APPHINT_DEVICES_MAX]; + unsigned initialized; + + /* Array contains value space for 1 copy of all apphint values defined + * (for device 1) and N copies of device specific apphint values for + * multi-device platforms. + */ + struct apphint_action val[APPHINT_ID_MAX + ((APPHINT_DEVICES_MAX-1)*APPHINT_DEBUGINFO_DEVICE_ID_MAX)]; + +} apphint = { +/* statically initialise default values to ensure that any module_params + * provided on the command line are not overwritten by defaults. + */ + .val = { +#define UINT32Bitfield UINT32 +#define UINT32List UINT32 +#define X(a, b, c, d, e) \ + { {NULL}, {NULL}, NULL, NULL, {.b=d}, false }, + APPHINT_LIST_ALL +#undef X +#undef UINT32Bitfield +#undef UINT32List + }, + .initialized = 0, + .num_devices = 0 +}; + +#define APPHINT_DEBUGINFO_DEVICE_ID_OFFSET (APPHINT_ID_MAX-APPHINT_DEBUGINFO_DEVICE_ID_MAX) + +static inline void +get_apphint_id_from_action_addr(const struct apphint_action * const addr, + APPHINT_ID * const id) +{ + *id = (APPHINT_ID)(addr - apphint.val); + if (*id >= APPHINT_ID_MAX) { + *id -= APPHINT_DEBUGINFO_DEVICE_ID_OFFSET; + *id %= APPHINT_DEBUGINFO_DEVICE_ID_MAX; + *id += APPHINT_DEBUGINFO_DEVICE_ID_OFFSET; + } +} + +static inline void +get_value_offset_from_device(const PVRSRV_DEVICE_NODE * const device, + int * const offset) +{ + int i; + + /* No device offset if not a device specific apphint */ + if (APPHINT_OF_DRIVER_NO_DEVICE == device) { + *offset = 0; + return; + } + + for (i = 0; device && i < APPHINT_DEVICES_MAX; i++) { + if (apphint.devices[i] == device) + break; + } + if (APPHINT_DEVICES_MAX == i) { + PVR_DPF((PVR_DBG_WARNING, "%s: Unregistered device", __func__)); + i = 0; + } + *offset = i * APPHINT_DEBUGINFO_DEVICE_ID_MAX; +} + +/** + * apphint_action_worker - perform an action after an AppHint update has been + * requested by a UM process + * And update the record of the current active value + */ +static void apphint_action_worker(struct work_struct *work) +{ + struct apphint_work *work_pkt = container_of(work, + struct apphint_work, + work); + struct apphint_action *a = work_pkt->action; + union apphint_value value = work_pkt->new_value; + APPHINT_ID id; + PVRSRV_ERROR result = PVRSRV_OK; + + get_apphint_id_from_action_addr(a, &id); + + if (a->set.UINT64) { + switch (param_lookup[id].data_type) { + case APPHINT_DATA_TYPE_UINT64: + result = a->set.UINT64(a->device, + a->private_data, + value.UINT64); + break; + + case APPHINT_DATA_TYPE_UINT32: + case APPHINT_DATA_TYPE_UINT32Bitfield: + case APPHINT_DATA_TYPE_UINT32List: + result = a->set.UINT32(a->device, + a->private_data, + value.UINT32); + break; + + case APPHINT_DATA_TYPE_BOOL: + result = a->set.BOOL(a->device, + a->private_data, + value.BOOL); + break; + + case APPHINT_DATA_TYPE_STRING: + result = a->set.STRING(a->device, + a->private_data, + value.STRING); + kfree(value.STRING); + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: unrecognised data type (%d), index (%d)", + __func__, param_lookup[id].data_type, id)); + } + + if (PVRSRV_OK != result) { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed (%s)", + __func__, PVRSRVGetErrorString(result))); + } + } else { + if (a->free) { + kfree(a->stored.STRING); + } + a->stored = value; + if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) { + a->free = true; + } + PVR_DPF((PVR_DBG_MESSAGE, + "%s: AppHint value updated before handler is registered, ID(%d)", + __func__, id)); + } + kfree((void *)work_pkt); +} + +static void apphint_action(union apphint_value new_value, + struct apphint_action *action) +{ + struct apphint_work *work_pkt = kmalloc(sizeof(*work_pkt), GFP_KERNEL); + + /* queue apphint update on a serialized workqueue to avoid races */ + if (work_pkt) { + work_pkt->new_value = new_value; + work_pkt->action = action; + INIT_WORK(&work_pkt->work, apphint_action_worker); + if (0 == queue_work(apphint.workqueue, &work_pkt->work)) { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to queue apphint change request", + __func__)); + goto err_exit; + } + } else { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to alloc memory for apphint change request", + __func__)); + goto err_exit; + } + return; +err_exit: + kfree(new_value.STRING); +} + +/** + * apphint_read - read the different AppHint data types + * return -errno or the buffer size + */ +static int apphint_read(char *buffer, size_t count, APPHINT_ID ue, + union apphint_value *value) +{ + APPHINT_DATA_TYPE data_type = param_lookup[ue].data_type; + int result = 0; + + switch (data_type) { + case APPHINT_DATA_TYPE_UINT64: + if (kstrtou64(buffer, 0, &value->UINT64) < 0) { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid UINT64 input data for id %d: %s", + __func__, ue, buffer)); + result = -EINVAL; + goto err_exit; + } + break; + case APPHINT_DATA_TYPE_UINT32: + if (kstrtou32(buffer, 0, &value->UINT32) < 0) { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid UINT32 input data for id %d: %s", + __func__, ue, buffer)); + result = -EINVAL; + goto err_exit; + } + break; + case APPHINT_DATA_TYPE_BOOL: + switch (buffer[0]) { + case '0': + case 'n': + case 'N': + case 'f': + case 'F': + value->BOOL = IMG_FALSE; + break; + case '1': + case 'y': + case 'Y': + case 't': + case 'T': + value->BOOL = IMG_TRUE; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid BOOL input data for id %d: %s", + __func__, ue, buffer)); + result = -EINVAL; + goto err_exit; + } + break; + case APPHINT_DATA_TYPE_UINT32List: + { + int i; + struct apphint_lookup *lookup = + (struct apphint_lookup *) + param_lookup[ue].data_type_helper; + int size = param_lookup[ue].helper_size; + /* buffer may include '\n', remove it */ + char *arg = strsep(&buffer, "\n"); + + if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) { + result = -EINVAL; + goto err_exit; + } + + for (i = 0; i < size; i++) { + if (strcasecmp(lookup[i].name, arg) == 0) { + value->UINT32 = lookup[i].value; + break; + } + } + if (i == size) { + if (OSStringLength(arg) == 0) { + PVR_DPF((PVR_DBG_ERROR, + "%s: No value set for AppHint", + __func__)); + } else { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unrecognised AppHint value (%s)", + __func__, arg)); + } + result = -EINVAL; + } + break; + } + case APPHINT_DATA_TYPE_UINT32Bitfield: + { + int i; + struct apphint_lookup *lookup = + (struct apphint_lookup *) + param_lookup[ue].data_type_helper; + int size = param_lookup[ue].helper_size; + /* buffer may include '\n', remove it */ + char *string = strsep(&buffer, "\n"); + char *token = strsep(&string, ","); + + if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) { + result = -EINVAL; + goto err_exit; + } + + value->UINT32 = 0; + /* empty string is valid to clear the bitfield */ + while (token && *token) { + for (i = 0; i < size; i++) { + if (strcasecmp(lookup[i].name, token) == 0) { + value->UINT32 |= lookup[i].value; + break; + } + } + if (i == size) { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unrecognised AppHint value (%s)", + __func__, token)); + result = -EINVAL; + goto err_exit; + } + token = strsep(&string, ","); + } + break; + } + case APPHINT_DATA_TYPE_STRING: + { + /* buffer may include '\n', remove it */ + char *string = strsep(&buffer, "\n"); + size_t len = OSStringLength(string); + + if (!len) { + result = -EINVAL; + goto err_exit; + } + + ++len; + + value->STRING = kmalloc(len , GFP_KERNEL); + if (!value->STRING) { + result = -ENOMEM; + goto err_exit; + } + + OSStringLCopy(value->STRING, string, len); + break; + } + default: + result = -EINVAL; + goto err_exit; + } + +err_exit: + return (result < 0) ? result : count; +} + +static PVRSRV_ERROR get_apphint_value_from_action(const struct apphint_action * const action, + union apphint_value * const value) +{ + APPHINT_ID id; + APPHINT_DATA_TYPE data_type; + PVRSRV_ERROR result = PVRSRV_OK; + + get_apphint_id_from_action_addr(action, &id); + data_type = param_lookup[id].data_type; + + if (action->query.UINT64) { + switch (data_type) { + case APPHINT_DATA_TYPE_UINT64: + result = action->query.UINT64(action->device, + action->private_data, + &value->UINT64); + break; + + case APPHINT_DATA_TYPE_UINT32: + case APPHINT_DATA_TYPE_UINT32Bitfield: + case APPHINT_DATA_TYPE_UINT32List: + result = action->query.UINT32(action->device, + action->private_data, + &value->UINT32); + break; + + case APPHINT_DATA_TYPE_BOOL: + result = action->query.BOOL(action->device, + action->private_data, + &value->BOOL); + break; + + case APPHINT_DATA_TYPE_STRING: + result = action->query.STRING(action->device, + action->private_data, + &value->STRING); + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: unrecognised data type (%d), index (%d)", + __func__, data_type, id)); + } + } else { + *value = action->stored; + } + + if (PVRSRV_OK != result) { + PVR_DPF((PVR_DBG_ERROR, "%s: failed (%d), index (%d)", __func__, result, id)); + } + + return result; +} + +/** + * apphint_write - write the current AppHint data to a buffer + * + * Returns length written or -errno + */ +static int apphint_write(char *buffer, const size_t size, + const struct apphint_action *a) +{ + const struct apphint_param *hint; + int result = 0; + APPHINT_ID id; + union apphint_value value; + + get_apphint_id_from_action_addr(a, &id); + hint = ¶m_lookup[id]; + + result = get_apphint_value_from_action(a, &value); + + switch (hint->data_type) { + case APPHINT_DATA_TYPE_UINT64: + result += snprintf(buffer + result, size - result, + "0x%016llx", + value.UINT64); + break; + case APPHINT_DATA_TYPE_UINT32: + result += snprintf(buffer + result, size - result, + "0x%08x", + value.UINT32); + break; + case APPHINT_DATA_TYPE_BOOL: + result += snprintf(buffer + result, size - result, + "%s", + value.BOOL ? "Y" : "N"); + break; + case APPHINT_DATA_TYPE_STRING: + if (value.STRING) { + result += snprintf(buffer + result, size - result, + "%s", + *value.STRING ? value.STRING : "(none)"); + } else { + result += snprintf(buffer + result, size - result, + "(none)"); + } + break; + case APPHINT_DATA_TYPE_UINT32List: + { + struct apphint_lookup *lookup = + (struct apphint_lookup *) hint->data_type_helper; + IMG_UINT32 i; + + if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) { + result = -EINVAL; + goto err_exit; + } + + for (i = 0; i < hint->helper_size; i++) { + if (lookup[i].value == value.UINT32) { + result += snprintf(buffer + result, + size - result, + "%s", + lookup[i].name); + break; + } + } + break; + } + case APPHINT_DATA_TYPE_UINT32Bitfield: + { + struct apphint_lookup *lookup = + (struct apphint_lookup *) hint->data_type_helper; + IMG_UINT32 i; + + if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) { + result = -EINVAL; + goto err_exit; + } + + for (i = 0; i < hint->helper_size; i++) { + if (lookup[i].value & value.UINT32) { + result += snprintf(buffer + result, + size - result, + "%s,", + lookup[i].name); + } + } + if (result) { + /* remove any trailing ',' */ + --result; + *(buffer + result) = '\0'; + } else { + result += snprintf(buffer + result, + size - result, "none"); + } + break; + } + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: unrecognised data type (%d), index (%d)", + __func__, hint->data_type, id)); + result = -EINVAL; + } + +err_exit: + return result; +} + +/* +******************************************************************************* + Module parameters initialization - different from debuginfo +******************************************************************************/ +/** + * apphint_kparam_set - Handle an update of a module parameter + * + * Returns 0, or -errno. arg is in kp->arg. + */ +static int apphint_kparam_set(const char *val, const struct kernel_param *kp) +{ + char val_copy[APPHINT_BUFFER_SIZE]; + APPHINT_ID id; + union apphint_value value; + int result; + + /* need to discard const in case of string comparison */ + result = strlcpy(val_copy, val, APPHINT_BUFFER_SIZE); + + get_apphint_id_from_action_addr(kp->arg, &id); + if (result < APPHINT_BUFFER_SIZE) { + result = apphint_read(val_copy, result, id, &value); + if (result >= 0) { + ((struct apphint_action *)kp->arg)->stored = value; + ((struct apphint_action *)kp->arg)->initialised = true; + if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) { + ((struct apphint_action *)kp->arg)->free = true; + } + } + } else { + PVR_DPF((PVR_DBG_ERROR, "%s: String too long", __func__)); + } + return (result > 0) ? 0 : result; +} + +/** + * apphint_kparam_get - handle a read of a module parameter + * + * Returns length written or -errno. Buffer is 4k (ie. be short!) + */ +static int apphint_kparam_get(char *buffer, const struct kernel_param *kp) +{ + return apphint_write(buffer, PAGE_SIZE, kp->arg); +} + +__maybe_unused +static const struct kernel_param_ops apphint_kparam_fops = { + .set = apphint_kparam_set, + .get = apphint_kparam_get, +}; + +/* + * call module_param_cb() for all AppHints listed in APPHINT_LIST_MODPARAM_COMMON + APPHINT_LIST_MODPARAM + * apphint_modparam_class_ ## resolves to apphint_modparam_enable() except for + * AppHint classes that have been disabled. + */ + +#define apphint_modparam_enable(name, number, perm) \ + module_param_cb(name, &apphint_kparam_fops, &apphint.val[number], perm); + +#define X(a, b, c, d, e) \ + apphint_modparam_class_ ##c(a, APPHINT_ID_ ## a, 0444) + APPHINT_LIST_MODPARAM_COMMON + APPHINT_LIST_MODPARAM +#undef X + +/* +******************************************************************************* + Debug Info get (seq file) operations - supporting functions +******************************************************************************/ +static void *apphint_di_start(OSDI_IMPL_ENTRY *s, IMG_UINT64 *pos) +{ + if (*pos == 0) { + /* We want only one entry in the sequence, one call to show() */ + return (void *) 1; + } + + PVR_UNREFERENCED_PARAMETER(s); + + return NULL; +} + +static void apphint_di_stop(OSDI_IMPL_ENTRY *s, void *v) +{ + PVR_UNREFERENCED_PARAMETER(s); + PVR_UNREFERENCED_PARAMETER(v); +} + +static void *apphint_di_next(OSDI_IMPL_ENTRY *s, void *v, IMG_UINT64 *pos) +{ + PVR_UNREFERENCED_PARAMETER(s); + PVR_UNREFERENCED_PARAMETER(v); + PVR_UNREFERENCED_PARAMETER(pos); + return NULL; +} + +static int apphint_di_show(OSDI_IMPL_ENTRY *s, void *v) +{ + IMG_CHAR km_buffer[APPHINT_BUFFER_SIZE]; + int result; + void *private = DIGetPrivData(s); + + PVR_UNREFERENCED_PARAMETER(v); + + result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE, private); + if (result < 0) { + PVR_DPF((PVR_DBG_ERROR, "%s: failure", __func__)); + } else { + /* debuginfo requires a trailing \n, module_params don't */ + result += snprintf(km_buffer + result, + APPHINT_BUFFER_SIZE - result, + "\n"); + DIPuts(s, km_buffer); + } + + /* have to return 0 to see output */ + return (result < 0) ? result : 0; +} + +/* +******************************************************************************* + Debug Info supporting functions +******************************************************************************/ + +/** + * apphint_set - Handle a DI value update + */ +static IMG_INT64 apphint_set(const IMG_CHAR *buffer, IMG_UINT64 count, + IMG_UINT64 *ppos, void *data) +{ + APPHINT_ID id; + union apphint_value value; + struct apphint_action *action = data; + char km_buffer[APPHINT_BUFFER_SIZE]; + int result = 0; + + if (ppos == NULL) + return -EIO; + + if (count >= APPHINT_BUFFER_SIZE) { + PVR_DPF((PVR_DBG_ERROR, "%s: String too long (%" IMG_INT64_FMTSPECd ")", + __func__, count)); + result = -EINVAL; + goto err_exit; + } + + /* apphint_read() modifies the buffer so we need to copy it */ + memcpy(km_buffer, buffer, count); + /* count is larger than real buffer by 1 because DI framework appends + * a '\0' character at the end, but here we're ignoring this */ + count -= 1; + km_buffer[count] = '\0'; + + get_apphint_id_from_action_addr(action, &id); + result = apphint_read(km_buffer, count, id, &value); + if (result >= 0) + apphint_action(value, action); + + *ppos += count; +err_exit: + return result; +} + +/** + * apphint_debuginfo_init - Create the specified debuginfo entries + */ +static int apphint_debuginfo_init(const char *sub_dir, + unsigned device_num, + unsigned init_data_size, + const struct apphint_init_data *init_data, + DI_GROUP *parentdir, + DI_GROUP **rootdir, + DI_ENTRY *entry[]) +{ + PVRSRV_ERROR result; + unsigned i; + unsigned device_value_offset = device_num * APPHINT_DEBUGINFO_DEVICE_ID_MAX; + const DI_ITERATOR_CB iterator = { + .pfnStart = apphint_di_start, .pfnStop = apphint_di_stop, + .pfnNext = apphint_di_next, .pfnShow = apphint_di_show, + .pfnWrite = apphint_set + }; + + if (*rootdir) { + PVR_DPF((PVR_DBG_WARNING, + "AppHint DebugFS already created, skipping")); + result = -EEXIST; + goto err_exit; + } + + result = DICreateGroup(sub_dir, parentdir, rootdir); + if (result != PVRSRV_OK) { + PVR_DPF((PVR_DBG_WARNING, + "Failed to create \"%s\" DebugFS directory.", sub_dir)); + goto err_exit; + } + + for (i = 0; i < init_data_size; i++) { + if (!class_state[init_data[i].class].enabled) + continue; + + result = DICreateEntry(init_data[i].name, + *rootdir, + &iterator, + (void *) &apphint.val[init_data[i].id + device_value_offset], + DI_ENTRY_TYPE_GENERIC, + &entry[i]); + if (result != PVRSRV_OK) { + PVR_DPF((PVR_DBG_WARNING, + "Failed to create \"%s/%s\" DebugFS entry.", + sub_dir, init_data[i].name)); + } + } + + return 0; + +err_exit: + return result; +} + +/** + * apphint_debuginfo_deinit- destroy the debuginfo entries + */ +static void apphint_debuginfo_deinit(unsigned num_entries, + DI_GROUP **rootdir, + DI_ENTRY *entry[]) +{ + unsigned i; + + for (i = 0; i < num_entries; i++) { + if (entry[i]) { + DIDestroyEntry(entry[i]); + } + } + + if (*rootdir) { + DIDestroyGroup(*rootdir); + *rootdir = NULL; + } +} + +/* +******************************************************************************* + AppHint status dump implementation +******************************************************************************/ +#if defined(PDUMP) +static void apphint_pdump_values(void *flags, const IMG_CHAR *format, ...) +{ + char km_buffer[APPHINT_BUFFER_SIZE]; + IMG_UINT32 ui32Flags = *(IMG_UINT32 *)flags; + va_list ap; + + va_start(ap, format); + (void)vsnprintf(km_buffer, APPHINT_BUFFER_SIZE, format, ap); + va_end(ap); + + PDumpCommentKM(km_buffer, ui32Flags); +} +#endif + +static IMG_BOOL is_apphint_value_equal(const APPHINT_DATA_TYPE data_type, + const union apphint_value * const left, + const union apphint_value * const right) +{ + switch (data_type) { + case APPHINT_DATA_TYPE_UINT64: + return left->UINT64 == right->UINT64; + case APPHINT_DATA_TYPE_UINT32: + case APPHINT_DATA_TYPE_UINT32List: + case APPHINT_DATA_TYPE_UINT32Bitfield: + return left->UINT32 == right->UINT32; + case APPHINT_DATA_TYPE_BOOL: + return left->BOOL == right->BOOL; + case APPHINT_DATA_TYPE_STRING: + return (OSStringNCompare(left->STRING, right->STRING, OSStringLength(right->STRING) + 1) == 0 ? IMG_TRUE : IMG_FALSE); + default: + PVR_DPF((PVR_DBG_WARNING, "%s: unhandled data type (%d)", __func__, data_type)); + return IMG_FALSE; + } +} + +static void apphint_dump_values(const char *group_name, + int device_num, + const struct apphint_init_data *group_data, + int group_size, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + bool list_all) +{ + int i, result; + int device_value_offset = device_num * APPHINT_DEBUGINFO_DEVICE_ID_MAX; + char km_buffer[APPHINT_BUFFER_SIZE]; + char count = 0; + + PVR_DUMPDEBUG_LOG(" %s", group_name); + for (i = 0; i < group_size; i++) + { + IMG_UINT32 id = group_data[i].id; + APPHINT_DATA_TYPE data_type = param_lookup[id].data_type; + const struct apphint_action *action = &apphint.val[id + device_value_offset]; + union apphint_value value; + + result = get_apphint_value_from_action(action, &value); + + if (PVRSRV_OK != result) { + continue; + } + + /* List only apphints with non-default values */ + if (!list_all && + is_apphint_value_equal(data_type, &value, &group_data[i].default_value)) { + continue; + } + + result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE, action); + count++; + + if (result <= 0) { + PVR_DUMPDEBUG_LOG(" %s: ", + group_data[i].name); + } else { + PVR_DUMPDEBUG_LOG(" %s: %s", + group_data[i].name, km_buffer); + } + } + + if (count == 0) { + PVR_DUMPDEBUG_LOG(" none"); + } +} + +/** + * Callback for debug dump + */ +static void apphint_dump_state(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + int i, result; + char km_buffer[APPHINT_BUFFER_SIZE]; + PVRSRV_DEVICE_NODE *device = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle; + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) { + PVR_DUMPDEBUG_LOG("------[ AppHint Settings ]------"); + + apphint_dump_values("Build Vars", 0, + init_data_buildvar, ARRAY_SIZE(init_data_buildvar), + pfnDumpDebugPrintf, pvDumpDebugFile, true); + + apphint_dump_values("Module Params", 0, + init_data_modparam, ARRAY_SIZE(init_data_modparam), + pfnDumpDebugPrintf, pvDumpDebugFile, false); + + apphint_dump_values("Debug Info Params", 0, + init_data_debuginfo, ARRAY_SIZE(init_data_debuginfo), + pfnDumpDebugPrintf, pvDumpDebugFile, false); + + for (i = 0; i < APPHINT_DEVICES_MAX; i++) { + if (!apphint.devices[i] + || (device && device != apphint.devices[i])) + continue; + + result = snprintf(km_buffer, + APPHINT_BUFFER_SIZE, + "Debug Info Params Device ID: %d", + i); + if (0 > result) + continue; + + apphint_dump_values(km_buffer, i, + init_data_debuginfo_device, + ARRAY_SIZE(init_data_debuginfo_device), + pfnDumpDebugPrintf, + pvDumpDebugFile, + false); + } + } +} + +/* +******************************************************************************* + Public interface +******************************************************************************/ +int pvr_apphint_init(void) +{ + int result, i; + + if (apphint.initialized) { + result = -EEXIST; + goto err_out; + } + + for (i = 0; i < APPHINT_DEVICES_MAX; i++) + apphint.devices[i] = NULL; + + /* create workqueue with strict execution ordering to ensure no + * race conditions when setting/updating apphints from different + * contexts + */ + apphint.workqueue = alloc_workqueue("apphint_workqueue", + WQ_UNBOUND | WQ_FREEZABLE, 1); + if (!apphint.workqueue) { + result = -ENOMEM; + goto err_out; + } + + result = apphint_debuginfo_init("apphint", 0, + ARRAY_SIZE(init_data_debuginfo), init_data_debuginfo, + NULL, + &apphint.debuginfo_rootdir, apphint.debuginfo_entry); + if (0 != result) + goto err_out; + + result = apphint_debuginfo_init("buildvar", 0, + ARRAY_SIZE(init_data_buildvar), init_data_buildvar, + NULL, + &apphint.buildvar_rootdir, apphint.buildvar_entry); + + apphint.initialized = 1; + +err_out: + return result; +} + +int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device) +{ + int result, i; + char device_num[APPHINT_BUFFER_SIZE]; + unsigned device_value_offset; + + if (!apphint.initialized) { + result = -EAGAIN; + goto err_out; + } + + if (apphint.num_devices+1 >= APPHINT_DEVICES_MAX) { + result = -EMFILE; + goto err_out; + } + + result = snprintf(device_num, APPHINT_BUFFER_SIZE, "%u", apphint.num_devices); + if (result < 0) { + PVR_DPF((PVR_DBG_WARNING, + "snprintf failed (%d)", result)); + result = -EINVAL; + goto err_out; + } + + /* Set the default values for the new device */ + device_value_offset = apphint.num_devices * APPHINT_DEBUGINFO_DEVICE_ID_MAX; + for (i = 0; i < APPHINT_DEBUGINFO_DEVICE_ID_MAX; i++) { + apphint.val[init_data_debuginfo_device[i].id + device_value_offset].stored + = init_data_debuginfo_device[i].default_value; + } + + /* Set value of an apphint if mapping to module param exists for it + * and this module parameter has been initialised */ + for (i = 0; i < ARRAY_SIZE(init_data_debuginfo_device_to_modparams); i++) { + const struct apphint_init_data_mapping *mapping = + &init_data_debuginfo_device_to_modparams[i]; + const struct apphint_action *modparam_action = + &apphint.val[mapping->modparam_apphint_id]; + struct apphint_action *device_action = + &apphint.val[mapping->device_apphint_id + device_value_offset]; + + /* Set only if the module parameter was explicitly set during the module + * load. */ + if (modparam_action->initialised) { + device_action->stored = modparam_action->stored; + } + } + + result = apphint_debuginfo_init(device_num, apphint.num_devices, + ARRAY_SIZE(init_data_debuginfo_device), + init_data_debuginfo_device, + apphint.debuginfo_rootdir, + &apphint.debuginfo_device_rootdir[apphint.num_devices], + apphint.debuginfo_device_entry[apphint.num_devices]); + if (0 != result) + goto err_out; + + apphint.devices[apphint.num_devices] = device; + apphint.num_devices++; + + (void)SOPvrDbgRequestNotifyRegister( + &device->hAppHintDbgReqNotify, + device, + apphint_dump_state, + DEBUG_REQUEST_APPHINT, + device); + +err_out: + return result; +} + +void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device) +{ + int i; + + if (!apphint.initialized) + return; + + /* find the device */ + for (i = 0; i < APPHINT_DEVICES_MAX; i++) { + if (apphint.devices[i] == device) + break; + } + + if (APPHINT_DEVICES_MAX == i) + return; + + if (device->hAppHintDbgReqNotify) { + (void)SOPvrDbgRequestNotifyUnregister( + device->hAppHintDbgReqNotify); + device->hAppHintDbgReqNotify = NULL; + } + + apphint_debuginfo_deinit(APPHINT_DEBUGINFO_DEVICE_ID_MAX, + &apphint.debuginfo_device_rootdir[i], + apphint.debuginfo_device_entry[i]); + + apphint.devices[i] = NULL; + + WARN_ON(apphint.num_devices==0); + apphint.num_devices--; +} + +void pvr_apphint_deinit(void) +{ + int i; + + if (!apphint.initialized) + return; + + /* remove any remaining device data */ + for (i = 0; apphint.num_devices && i < APPHINT_DEVICES_MAX; i++) { + if (apphint.devices[i]) + pvr_apphint_device_unregister(apphint.devices[i]); + } + + /* free all alloc'd string apphints and set to NULL */ + for (i = 0; i < ARRAY_SIZE(apphint.val); i++) { + if (apphint.val[i].free && apphint.val[i].stored.STRING) { + kfree(apphint.val[i].stored.STRING); + apphint.val[i].stored.STRING = NULL; + apphint.val[i].free = false; + } + } + + apphint_debuginfo_deinit(APPHINT_DEBUGINFO_ID_MAX, + &apphint.debuginfo_rootdir, apphint.debuginfo_entry); + apphint_debuginfo_deinit(APPHINT_BUILDVAR_ID_MAX, + &apphint.buildvar_rootdir, apphint.buildvar_entry); + + destroy_workqueue(apphint.workqueue); + + apphint.initialized = 0; +} + +void pvr_apphint_dump_state(void) +{ +#if defined(PDUMP) + IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS; + + apphint_dump_state(NULL, DEBUG_REQUEST_VERBOSITY_HIGH, + apphint_pdump_values, (void *)&ui32Flags); +#endif + apphint_dump_state(NULL, DEBUG_REQUEST_VERBOSITY_HIGH, + NULL, NULL); +} + +int pvr_apphint_get_uint64(APPHINT_ID ue, IMG_UINT64 *pVal) +{ + int error = -ERANGE; + + if (ue < APPHINT_ID_MAX) { + *pVal = apphint.val[ue].stored.UINT64; + error = 0; + } + return error; +} + +int pvr_apphint_get_uint32(APPHINT_ID ue, IMG_UINT32 *pVal) +{ + int error = -ERANGE; + + if (ue < APPHINT_ID_MAX) { + *pVal = apphint.val[ue].stored.UINT32; + error = 0; + } + return error; +} + +int pvr_apphint_get_bool(APPHINT_ID ue, IMG_BOOL *pVal) +{ + int error = -ERANGE; + + if (ue < APPHINT_ID_MAX) { + error = 0; + *pVal = apphint.val[ue].stored.BOOL; + } + return error; +} + +int pvr_apphint_get_string(APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size) +{ + int error = -ERANGE; + if (ue < APPHINT_ID_MAX && apphint.val[ue].stored.STRING) { + if (OSStringLCopy(pBuffer, apphint.val[ue].stored.STRING, size) < size) { + error = 0; + } + } + return error; +} + +int pvr_apphint_set_uint64(APPHINT_ID ue, IMG_UINT64 Val) +{ + int error = -ERANGE; + + if ((ue < APPHINT_ID_MAX) && + (param_lookup[ue].data_type == APPHINT_DATA_TYPE_UINT64)) { + + if (apphint.val[ue].set.UINT64) { + apphint.val[ue].set.UINT64(apphint.val[ue].device, apphint.val[ue].private_data, Val); + } else { + apphint.val[ue].stored.UINT64 = Val; + } + error = 0; + } + + return error; +} + +int pvr_apphint_set_uint32(APPHINT_ID ue, IMG_UINT32 Val) +{ + int error = -ERANGE; + + if ((ue < APPHINT_ID_MAX) && + (param_lookup[ue].data_type == APPHINT_DATA_TYPE_UINT32)) { + + if (apphint.val[ue].set.UINT32) { + apphint.val[ue].set.UINT32(apphint.val[ue].device, apphint.val[ue].private_data, Val); + } else { + apphint.val[ue].stored.UINT32 = Val; + } + error = 0; + } + + return error; +} + +int pvr_apphint_set_bool(APPHINT_ID ue, IMG_BOOL Val) +{ + int error = -ERANGE; + + if ((ue < APPHINT_ID_MAX) && + (param_lookup[ue].data_type == APPHINT_DATA_TYPE_BOOL)) { + + error = 0; + if (apphint.val[ue].set.BOOL) { + apphint.val[ue].set.BOOL(apphint.val[ue].device, apphint.val[ue].private_data, Val); + } else { + apphint.val[ue].stored.BOOL = Val; + } + } + + return error; +} + +int pvr_apphint_set_string(APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size) +{ + int error = -ERANGE; + + if ((ue < APPHINT_ID_MAX) && + ((param_lookup[ue].data_type == APPHINT_DATA_TYPE_STRING) && + apphint.val[ue].stored.STRING)) { + + if (apphint.val[ue].set.STRING) { + error = apphint.val[ue].set.STRING(apphint.val[ue].device, apphint.val[ue].private_data, pBuffer); + } else { + if (strlcpy(apphint.val[ue].stored.STRING, pBuffer, size) < size) { + error = 0; + } + } + } + + return error; +} + +void pvr_apphint_register_handlers_uint64(APPHINT_ID id, + PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value), + PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value), + const PVRSRV_DEVICE_NODE *device, + const void *private_data) +{ + int device_value_offset; + + if (id >= APPHINT_ID_MAX) { + PVR_DPF((PVR_DBG_ERROR, + "%s: AppHint ID (%d) is out of range, max (%d)", + __func__, id, APPHINT_ID_MAX-1)); + return; + } + + get_value_offset_from_device(device, &device_value_offset); + + switch (param_lookup[id].data_type) { + case APPHINT_DATA_TYPE_UINT64: + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Does not match AppHint data type for ID (%d)", + __func__, id)); + return; + } + + apphint.val[id + device_value_offset] = (struct apphint_action){ + .query.UINT64 = query, + .set.UINT64 = set, + .device = device, + .private_data = private_data, + .stored = apphint.val[id + device_value_offset].stored + }; +} + +void pvr_apphint_register_handlers_uint32(APPHINT_ID id, + PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value), + PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value), + const PVRSRV_DEVICE_NODE *device, + const void *private_data) +{ + int device_value_offset; + + if (id >= APPHINT_ID_MAX) { + PVR_DPF((PVR_DBG_ERROR, + "%s: AppHint ID (%d) is out of range, max (%d)", + __func__, id, APPHINT_ID_MAX-1)); + return; + } + + get_value_offset_from_device(device, &device_value_offset); + + switch (param_lookup[id].data_type) { + case APPHINT_DATA_TYPE_UINT32: + case APPHINT_DATA_TYPE_UINT32Bitfield: + case APPHINT_DATA_TYPE_UINT32List: + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Does not match AppHint data type for ID (%d)", + __func__, id)); + return; + } + + apphint.val[id + device_value_offset] = (struct apphint_action){ + .query.UINT32 = query, + .set.UINT32 = set, + .device = device, + .private_data = private_data, + .stored = apphint.val[id + device_value_offset].stored + }; +} + +void pvr_apphint_register_handlers_bool(APPHINT_ID id, + PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value), + PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value), + const PVRSRV_DEVICE_NODE *device, + const void *private_data) +{ + int device_value_offset; + + if (id >= APPHINT_ID_MAX) { + PVR_DPF((PVR_DBG_ERROR, + "%s: AppHint ID (%d) is out of range, max (%d)", + __func__, id, APPHINT_ID_MAX-1)); + return; + } + + get_value_offset_from_device(device, &device_value_offset); + + switch (param_lookup[id].data_type) { + case APPHINT_DATA_TYPE_BOOL: + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Does not match AppHint data type for ID (%d)", + __func__, id)); + return; + } + + apphint.val[id + device_value_offset] = (struct apphint_action){ + .query.BOOL = query, + .set.BOOL = set, + .device = device, + .private_data = private_data, + .stored = apphint.val[id + device_value_offset].stored + }; +} + +void pvr_apphint_register_handlers_string(APPHINT_ID id, + PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value), + PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value), + const PVRSRV_DEVICE_NODE *device, + const void *private_data) +{ + int device_value_offset; + + if (id >= APPHINT_ID_MAX) { + PVR_DPF((PVR_DBG_ERROR, + "%s: AppHint ID (%d) is out of range, max (%d)", + __func__, id, APPHINT_ID_MAX-1)); + return; + } + + get_value_offset_from_device(device, &device_value_offset); + + switch (param_lookup[id].data_type) { + case APPHINT_DATA_TYPE_STRING: + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Does not match AppHint data type for ID (%d)", + __func__, id)); + return; + } + + apphint.val[id + device_value_offset] = (struct apphint_action){ + .query.STRING = query, + .set.STRING = set, + .device = device, + .private_data = private_data, + .stored = apphint.val[id + device_value_offset].stored + }; +} + +/* EOF */ diff --git a/drivers/gpu/drm/phytium/octopus/km_apphint.h b/drivers/gpu/drm/phytium/octopus/km_apphint.h new file mode 100644 index 000000000000..c065eed2d999 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/km_apphint.h @@ -0,0 +1,99 @@ +/*************************************************************************/ /*! +@File km_apphint.h +@Title Apphint internal header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Linux kernel AppHint control +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef KM_APPHINT_H +#define KM_APPHINT_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "pvrsrv_apphint.h" +#include "km_apphint_defs.h" +#include "device.h" + +int pvr_apphint_init(void); +void pvr_apphint_deinit(void); +int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device); +void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device); +void pvr_apphint_dump_state(void); + +int pvr_apphint_get_uint64(APPHINT_ID ue, IMG_UINT64 *pVal); +int pvr_apphint_get_uint32(APPHINT_ID ue, IMG_UINT32 *pVal); +int pvr_apphint_get_bool(APPHINT_ID ue, IMG_BOOL *pVal); +int pvr_apphint_get_string(APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size); + +int pvr_apphint_set_uint64(APPHINT_ID ue, IMG_UINT64 Val); +int pvr_apphint_set_uint32(APPHINT_ID ue, IMG_UINT32 Val); +int pvr_apphint_set_bool(APPHINT_ID ue, IMG_BOOL Val); +int pvr_apphint_set_string(APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size); + +void pvr_apphint_register_handlers_uint64(APPHINT_ID id, + PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value), + PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value), + const PVRSRV_DEVICE_NODE *device, + const void * private_data); +void pvr_apphint_register_handlers_uint32(APPHINT_ID id, + PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value), + PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value), + const PVRSRV_DEVICE_NODE *device, + const void *private_data); +void pvr_apphint_register_handlers_bool(APPHINT_ID id, + PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value), + PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value), + const PVRSRV_DEVICE_NODE *device, + const void *private_data); +void pvr_apphint_register_handlers_string(APPHINT_ID id, + PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value), + PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value), + const PVRSRV_DEVICE_NODE *device, + const void *private_data); + +#if defined(__cplusplus) +} +#endif +#endif /* KM_APPHINT_H */ + +/****************************************************************************** + End of file (km_apphint.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/km_apphint_defs.h b/drivers/gpu/drm/phytium/octopus/km_apphint_defs.h new file mode 100644 index 000000000000..c0fb5ee23182 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/km_apphint_defs.h @@ -0,0 +1,178 @@ +/*************************************************************************/ /*! +@File +@Title Services AppHint definitions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "km_apphint_defs_common.h" + +#ifndef KM_APPHINT_DEFS_H +#define KM_APPHINT_DEFS_H + +/* NB: The 'DEVICE' AppHints must be last in this list as they will be + * duplicated in the case of a driver supporting multiple devices + */ +#define APPHINT_LIST_ALL \ + APPHINT_LIST_BUILDVAR_COMMON \ + APPHINT_LIST_BUILDVAR \ + APPHINT_LIST_MODPARAM_COMMON \ + APPHINT_LIST_MODPARAM \ + APPHINT_LIST_DEBUGINFO_COMMON \ + APPHINT_LIST_DEBUGINFO \ + APPHINT_LIST_DEBUGINFO_DEVICE_COMMON \ + APPHINT_LIST_DEBUGINFO_DEVICE + + +/* +******************************************************************************* + Build variables (octopus-specific) + All of these should be configurable only through the 'default' value +******************************************************************************/ +#define APPHINT_LIST_BUILDVAR + +/* +******************************************************************************* + Module parameters (octopus-specific) +******************************************************************************/ +#define APPHINT_LIST_MODPARAM \ +/* name, type, class, default, helper, */ \ +X(FabricCoherencyOverride, UINT32, ALWAYS, PVRSRV_APPHINT_FABRICCOHERENCYOVERRIDE, NO_PARAM_TABLE ) \ +\ +X(EnableGenericDMKillingRandMode, BOOL, VALIDATION, PVRSRV_APPHINT_ENABLEDMKILLINGRANDMODE, NO_PARAM_TABLE ) \ +X(KillingCtl, UINT32, VALIDATION, PVRSRV_APPHINT_KILLINGCTL, NO_PARAM_TABLE ) \ +X(CDMTDMKillingCtl, UINT32, VALIDATION, PVRSRV_APPHINT_CDMTDM_KILLINGCTL, NO_PARAM_TABLE ) \ +X(HWValEnableSPUPowerMaskChange, BOOL, VALIDATION, PVRSRV_APPHINT_HWVALENABLESPUPOWERMASKCHANGE, NO_PARAM_TABLE ) \ +X(HWValAvailableSPUMask, UINT32, VALIDATION, PVRSRV_APPHINT_HWVALAVAILABLESPUMASK, NO_PARAM_TABLE ) \ +\ +X(HWPerfDisableCounterFilter, BOOL, VALIDATION, PVRSRV_APPHINT_HWPERFDISABLECOUNTERFILTER, NO_PARAM_TABLE ) \ +\ +X(ISPSchedulingLatencyMode, UINT32, ALWAYS, PVRSRV_APPHINT_ISPSCHEDULINGLATENCYMODE, NO_PARAM_TABLE ) \ +X(ValidateSOCUSCTimer, BOOL, VALIDATION, PVRSRV_APPHINT_VALIDATESOCUSCTIMERS, NO_PARAM_TABLE ) \ +\ +X(USRMNumRegionsVDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \ +X(USRMNumRegionsCDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \ +X(USRMNumRegionsDDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \ +X(USRMNumRegionsPDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \ +X(USRMNumRegionsTDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \ +\ +X(UVBRMNumRegionsVDM, UINT64, VALIDATION, 0, NO_PARAM_TABLE ) \ +X(UVBRMNumRegionsDDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \ +\ +X(CDMArbitrationOverride, UINT32, ALWAYS, PVRSRV_APPHINT_CDMARBITRATIONOVERRIDE, NO_PARAM_TABLE ) \ +\ +X(DualLockstepFWProcessor, BOOL, VALIDATION, 1, NO_PARAM_TABLE ) \ +X(GPUStatePin, BOOL, VALIDATION, 0, NO_PARAM_TABLE ) \ +X(PowerDomainKickInterval, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \ +\ +X(RCEDisableMask, UINT64, VALIDATION, PVRSRV_APPHINT_RCEDISABLEMASK, NO_PARAM_TABLE ) \ + +/* +******************************************************************************* + Debugfs parameters (octopus-specific) - driver configuration +******************************************************************************/ +#define APPHINT_LIST_DEBUGINFO \ +/* name, type, class, default, helper, */ \ + +/* +******************************************************************************* + Debugfs parameters (octopus-specific) - device configuration +******************************************************************************/ +#define APPHINT_LIST_DEBUGINFO_DEVICE \ +/* name, type, class, default, helper, */ \ + +/* +******************************************************************************* + Mapping between debugfs parameters and module parameters. + This mapping is used to initialise device specific apphints from module + parameters. +******************************************************************************/ +#define APPHINT_LIST_DEBUIGINFO_DEVICE_X_MODPARAM_INIT \ +/* debuginfo device apphint name modparam name */ + +/* +******************************************************************************* + + Table generated enums + +******************************************************************************/ +/* Unique ID for all AppHints */ +typedef enum { +#define X(a, b, c, d, e) APPHINT_ID_ ## a, + APPHINT_LIST_ALL +#undef X + APPHINT_ID_MAX +} APPHINT_ID; + +/* ID for build variable Apphints - used for build variable only structures */ +typedef enum { +#define X(a, b, c, d, e) APPHINT_BUILDVAR_ID_ ## a, + APPHINT_LIST_BUILDVAR_COMMON + APPHINT_LIST_BUILDVAR +#undef X + APPHINT_BUILDVAR_ID_MAX +} APPHINT_BUILDVAR_ID; + +/* ID for Modparam Apphints - used for modparam only structures */ +typedef enum { +#define X(a, b, c, d, e) APPHINT_MODPARAM_ID_ ## a, + APPHINT_LIST_MODPARAM_COMMON + APPHINT_LIST_MODPARAM +#undef X + APPHINT_MODPARAM_ID_MAX +} APPHINT_MODPARAM_ID; + +/* ID for Debugfs Apphints - used for debugfs only structures */ +typedef enum { +#define X(a, b, c, d, e) APPHINT_DEBUGINFO_ID_ ## a, + APPHINT_LIST_DEBUGINFO_COMMON + APPHINT_LIST_DEBUGINFO +#undef X + APPHINT_DEBUGINFO_ID_MAX +} APPHINT_DEBUGINFO_ID; + +/* ID for Debugfs Device Apphints - used for debugfs device only structures */ +typedef enum { +#define X(a, b, c, d, e) APPHINT_DEBUGINFO_DEVICE_ID_ ## a, + APPHINT_LIST_DEBUGINFO_DEVICE_COMMON + APPHINT_LIST_DEBUGINFO_DEVICE +#undef X + APPHINT_DEBUGINFO_DEVICE_ID_MAX +} APPHINT_DEBUGINFO_DEVICE_ID; + +#endif /* KM_APPHINT_DEFS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/km_apphint_defs_common.h b/drivers/gpu/drm/phytium/octopus/km_apphint_defs_common.h new file mode 100644 index 000000000000..522e5d288b6f --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/km_apphint_defs_common.h @@ -0,0 +1,281 @@ +/*************************************************************************/ /*! +@File +@Title Services AppHint definitions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + +#ifndef KM_APPHINT_DEFS_COMMON_H +#define KM_APPHINT_DEFS_COMMON_H + +/* +******************************************************************************* + Build variables + All of these should be configurable only through the 'default' value +******************************************************************************/ +#define APPHINT_LIST_BUILDVAR_COMMON \ +/* name, type, class, default, helper, */ \ +X(EnableTrustedDeviceAceConfig, BOOL, GPUVIRT_VAL, PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG, NO_PARAM_TABLE ) \ +X(CleanupThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_CLEANUPTHREADPRIORITY, NO_PARAM_TABLE ) \ +X(CacheOpThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_CACHEOPTHREADPRIORITY, NO_PARAM_TABLE ) \ +X(WatchdogThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY, NO_PARAM_TABLE ) \ +X(HWPerfClientBufferSize, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE, NO_PARAM_TABLE ) \ + +/* +******************************************************************************* + Module parameters +******************************************************************************/ +#define APPHINT_LIST_MODPARAM_COMMON \ +/* name, type, class, default, helper, */ \ +X(GeneralNon4KHeapPageSize, UINT32, ALWAYS, PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE, NO_PARAM_TABLE ) \ +\ +X(EnableSignatureChecks, BOOL, PDUMP, PVRSRV_APPHINT_ENABLESIGNATURECHECKS, NO_PARAM_TABLE ) \ +X(SignatureChecksBufSize, UINT32, PDUMP, PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE, NO_PARAM_TABLE ) \ +\ +X(DisableClockGating, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLECLOCKGATING, NO_PARAM_TABLE ) \ +X(DisableDMOverlap, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLEDMOVERLAP, NO_PARAM_TABLE ) \ +\ +X(EnableRandomContextSwitch, BOOL, VALIDATION, PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH, NO_PARAM_TABLE ) \ +X(EnableSoftResetContextSwitch, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLESOFTRESETCNTEXTSWITCH, NO_PARAM_TABLE ) \ +X(EnableFWContextSwitch, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH, NO_PARAM_TABLE ) \ +X(FWContextSwitchProfile, UINT32, VALIDATION, PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE, NO_PARAM_TABLE ) \ +\ +X(EnableRDPowerIsland, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLERDPOWERISLAND, NO_PARAM_TABLE ) \ +\ +X(DriverMode, UINT32, ALWAYS, PVRSRV_APPHINT_DRIVERMODE, NO_PARAM_TABLE ) \ +\ +X(FirmwarePerf, UINT32, VALIDATION, PVRSRV_APPHINT_FIRMWAREPERF, NO_PARAM_TABLE ) \ +\ +X(HWPerfFWBufSizeInKB, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB, NO_PARAM_TABLE ) \ +X(HWPerfHostBufSizeInKB, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB, NO_PARAM_TABLE ) \ +X(HWPerfHostThreadTimeoutInMS, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS, NO_PARAM_TABLE ) \ +\ +X(JonesDisableMask, UINT32, VALIDATION, PVRSRV_APPHINT_JONESDISABLEMASK, NO_PARAM_TABLE ) \ +X(NewFilteringMode, BOOL, VALIDATION, PVRSRV_APPHINT_NEWFILTERINGMODE, NO_PARAM_TABLE ) \ +X(TruncateMode, UINT32, VALIDATION, PVRSRV_APPHINT_TRUNCATEMODE, NO_PARAM_TABLE ) \ +X(EmuMaxFreq, UINT32, ALWAYS, PVRSRV_APPHINT_EMUMAXFREQ, NO_PARAM_TABLE ) \ +X(GPIOValidationMode, UINT32, VALIDATION, PVRSRV_APPHINT_GPIOVALIDATIONMODE, NO_PARAM_TABLE ) \ +X(RGXBVNC, STRING, ALWAYS, PVRSRV_APPHINT_RGXBVNC, NO_PARAM_TABLE ) \ +\ +X(FWContextSwitchCrossDM, UINT32, ALWAYS, 0, NO_PARAM_TABLE ) \ +X(ValidateIrq, BOOL, VALIDATION, PVRSRV_APPHINT_VALIDATEIRQ, NO_PARAM_TABLE ) \ +\ +X(TPUTrilinearFracMaskPDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE ) \ +X(TPUTrilinearFracMaskVDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE ) \ +X(TPUTrilinearFracMaskCDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE ) \ +X(TPUTrilinearFracMaskTDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE ) \ +X(HTBufferSizeInKB, UINT32, ALWAYS, PVRSRV_APPHINT_HTBUFFERSIZE, NO_PARAM_TABLE ) \ +X(FWTraceBufSizeInDWords, UINT32, ALWAYS, PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS, NO_PARAM_TABLE ) \ +\ +X(EnablePageFaultDebug, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG, NO_PARAM_TABLE ) \ +X(EnableFullSyncTracking, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING, NO_PARAM_TABLE ) \ +X(IgnoreHWReportedBVNC, BOOL, ALWAYS, PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC, NO_PARAM_TABLE ) \ +\ +X(PhysMemTestPasses, UINT32, ALWAYS, PVRSRV_APPHINT_PHYSMEMTESTPASSES, NO_PARAM_TABLE ) \ +\ +X(FBCDCVersionOverride, UINT32, VALIDATION, PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE, NO_PARAM_TABLE ) \ +X(TestSLRInterval, UINT32, VALIDATION, PVRSRV_APPHINT_TESTSLRINTERVAL, NO_PARAM_TABLE ) \ +X(EnablePollOnChecksumErrorStatus, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \ +X(RiscvDmiTest, BOOL, VALIDATION, PVRSRV_APPHINT_RISCVDMITEST, NO_PARAM_TABLE ) \ +X(DevMemFWHeapPolicy, UINT32, ALWAYS, PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY, NO_PARAM_TABLE ) \ +\ +X(EnableAPMAll, UINT32, VALIDATION, PVRSRV_APPHINT_ENABLEAPM, NO_PARAM_TABLE) + +/* +******************************************************************************* + Debugfs parameters - driver configuration +******************************************************************************/ +#define APPHINT_LIST_DEBUGINFO_COMMON \ +/* name, type, class, default, helper, */ \ +X(EnableHTBLogGroup, UINT32Bitfield, ALWAYS, PVRSRV_APPHINT_ENABLEHTBLOGGROUP, htb_loggroup_tbl ) \ +X(HTBOperationMode, UINT32List, ALWAYS, PVRSRV_APPHINT_HTBOPERATIONMODE, htb_opmode_tbl ) \ +X(EnableFTraceGPU, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEFTRACEGPU, NO_PARAM_TABLE ) \ +X(HWPerfFWFilter, UINT64, ALWAYS, PVRSRV_APPHINT_HWPERFFWFILTER, NO_PARAM_TABLE ) \ +X(HWPerfHostFilter, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFHOSTFILTER, NO_PARAM_TABLE ) \ +X(HWPerfClientFilter_Services, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES, NO_PARAM_TABLE ) \ +X(HWPerfClientFilter_EGL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL, NO_PARAM_TABLE ) \ +X(HWPerfClientFilter_OpenGLES, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES, NO_PARAM_TABLE ) \ +X(HWPerfClientFilter_OpenCL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL, NO_PARAM_TABLE ) \ +X(HWPerfClientFilter_Vulkan, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN, NO_PARAM_TABLE ) \ +X(HWPerfClientFilter_OpenGL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGL, NO_PARAM_TABLE ) \ +X(CacheOpConfig, UINT32, ALWAYS, PVRSRV_APPHINT_CACHEOPCONFIG, NO_PARAM_TABLE ) \ +X(CacheOpUMKMThresholdSize, UINT32, ALWAYS, PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE, NO_PARAM_TABLE ) \ +X(TimeCorrClock, UINT32List, ALWAYS, PVRSRV_APPHINT_TIMECORRCLOCK, timecorr_clk_tbl ) + +/* +******************************************************************************* + Debugfs parameters - device configuration +******************************************************************************/ +#define APPHINT_LIST_DEBUGINFO_DEVICE_COMMON \ +/* name, type, class, default, helper, */ \ +/* Device Firmware config */\ +X(AssertOnHWRTrigger, BOOL, ALWAYS, APPHNT_BLDVAR_ASSERTONHWRTRIGGER, NO_PARAM_TABLE ) \ +X(AssertOutOfMemory, BOOL, ALWAYS, PVRSRV_APPHINT_ASSERTOUTOFMEMORY, NO_PARAM_TABLE ) \ +X(CheckMList, BOOL, ALWAYS, PVRSRV_APPHINT_CHECKMLIST, NO_PARAM_TABLE ) \ +X(EnableLogGroup, UINT32Bitfield, ALWAYS, PVRSRV_APPHINT_ENABLELOGGROUP, fwt_loggroup_tbl ) \ +X(FirmwareLogType, UINT32List, ALWAYS, PVRSRV_APPHINT_FIRMWARELOGTYPE, fwt_logtype_tbl ) \ +X(HWRDebugDumpLimit, UINT32, ALWAYS, PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT, NO_PARAM_TABLE ) \ +X(KernelCCBSizeLog2, UINT32, VALIDATION, PVRSRV_APPHINT_KCCB_SIZE_LOG2, NO_PARAM_TABLE ) \ +/* Device host config */ \ +X(EnableAPM, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLEAPM, NO_PARAM_TABLE ) \ +X(DisableFEDLogging, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLEFEDLOGGING, NO_PARAM_TABLE ) \ +X(ZeroFreelist, BOOL, ALWAYS, PVRSRV_APPHINT_ZEROFREELIST, NO_PARAM_TABLE ) \ +X(DisablePDumpPanic, BOOL, PDUMP, PVRSRV_APPHINT_DISABLEPDUMPPANIC, NO_PARAM_TABLE ) \ +X(EnableFWPoisonOnFree, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEFWPOISONONFREE, NO_PARAM_TABLE ) \ +X(GPUUnitsPowerChange, BOOL, VALIDATION, PVRSRV_APPHINT_GPUUNITSPOWERCHANGE, NO_PARAM_TABLE ) \ + +/* +******************************************************************************* + Mapping between debugfs parameters and module parameters. + This mapping is used to initialise device specific apphints from module + parameters. +******************************************************************************/ +#define APPHINT_LIST_DEBUIGINFO_DEVICE_X_MODPARAM_INIT_COMMON \ +/* debuginfo device apphint name modparam name */ \ +X(EnableAPM, EnableAPMAll) + +/* +******************************************************************************* + * Types used in the APPHINT_LIST_ lists must be defined here. + * New types require specific handling code to be added +******************************************************************************/ +#define APPHINT_DATA_TYPE_LIST \ +X(BOOL) \ +X(UINT64) \ +X(UINT32) \ +X(UINT32Bitfield) \ +X(UINT32List) \ +X(STRING) + +#define APPHINT_CLASS_LIST \ +X(ALWAYS) \ +X(NEVER) \ +X(DEBUG) \ +X(PDUMP) \ +X(VALIDATION) \ +X(GPUVIRT_VAL) + +/* +******************************************************************************* + Visibility control for module parameters + These bind build variables to AppHint Visibility Groups. +******************************************************************************/ +#define APPHINT_ENABLED_CLASS_ALWAYS IMG_TRUE +#define APPHINT_ENABLED_CLASS_NEVER IMG_FALSE +#define apphint_modparam_class_ALWAYS(a, b, c) apphint_modparam_enable(a, b, c) +#if defined(DEBUG) + #define APPHINT_ENABLED_CLASS_DEBUG IMG_TRUE + #define apphint_modparam_class_DEBUG(a, b, c) apphint_modparam_enable(a, b, c) +#else + #define APPHINT_ENABLED_CLASS_DEBUG IMG_FALSE + #define apphint_modparam_class_DEBUG(a, b, c) +#endif +#if defined(PDUMP) + #define APPHINT_ENABLED_CLASS_PDUMP IMG_TRUE + #define apphint_modparam_class_PDUMP(a, b, c) apphint_modparam_enable(a, b, c) +#else + #define APPHINT_ENABLED_CLASS_PDUMP IMG_FALSE + #define apphint_modparam_class_PDUMP(a, b, c) +#endif +#if defined(SUPPORT_VALIDATION) + #define APPHINT_ENABLED_CLASS_VALIDATION IMG_TRUE + #define apphint_modparam_class_VALIDATION(a, b, c) apphint_modparam_enable(a, b, c) +#else + #define APPHINT_ENABLED_CLASS_VALIDATION IMG_FALSE + #define apphint_modparam_class_VALIDATION(a, b, c) +#endif +#if defined(SUPPORT_GPUVIRT_VALIDATION) + #define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_TRUE + #define apphint_modparam_class_GPUVIRT_VAL(a, b, c) apphint_modparam_enable(a, b, c) +#else + #define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_FALSE + #define apphint_modparam_class_GPUVIRT_VAL(a, b, c) +#endif + +/* +******************************************************************************* + AppHint defaults based on other build parameters +******************************************************************************/ +#if defined(ASSERTONHWRTRIGGER_DEFAULT_ENABLED) + #define APPHNT_BLDVAR_ASSERTONHWRTRIGGER 1 +#else + #define APPHNT_BLDVAR_ASSERTONHWRTRIGGER 0 +#endif +#if defined(DEBUG) + #define APPHNT_BLDVAR_DEBUG 1 + #define APPHNT_BLDVAR_DBGDUMPLIMIT RGXFWIF_HWR_DEBUG_DUMP_ALL +#else + #define APPHNT_BLDVAR_DEBUG 0 + #define APPHNT_BLDVAR_DBGDUMPLIMIT 1 +#endif +#if defined(PDUMP) +#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS IMG_TRUE +#else +#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS IMG_FALSE +#endif +#if defined(DEBUG) || defined(SUPPORT_VALIDATION) +#define APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG IMG_TRUE +#else +#define APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG IMG_FALSE +#endif + +#if defined(DEBUG) + #define APPHNT_PHYSMEMTEST_ENABLE 1 +#else + #define APPHNT_PHYSMEMTEST_ENABLE 0 +#endif + +/* Data types and actions */ +typedef enum { + APPHINT_DATA_TYPE_INVALID = 0, +#define X(a) APPHINT_DATA_TYPE_ ## a, + APPHINT_DATA_TYPE_LIST +#undef X + APPHINT_DATA_TYPE_MAX +} APPHINT_DATA_TYPE; + +typedef enum { +#define X(a) APPHINT_CLASS_ ## a, + APPHINT_CLASS_LIST +#undef X + APPHINT_CLASS_MAX +} APPHINT_CLASS; + +#endif /* KM_APPHINT_DEFS_COMMON_H */ diff --git a/drivers/gpu/drm/phytium/octopus/linkage.h b/drivers/gpu/drm/phytium/octopus/linkage.h new file mode 100644 index 000000000000..b0ac7cba1eac --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/linkage.h @@ -0,0 +1,52 @@ +/*************************************************************************/ /*! +@File +@Title Linux specific Services code internal interfaces +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Interfaces between various parts of the Linux specific + Services code, that don't have any other obvious + header file to go into. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(LINKAGE_H) +#define LINKAGE_H + +PVRSRV_ERROR PVROSFuncInit(void); +void PVROSFuncDeInit(void); + +#endif /* !defined(LINKAGE_H) */ diff --git a/drivers/gpu/drm/phytium/octopus/linux_sw_sync.h b/drivers/gpu/drm/phytium/octopus/linux_sw_sync.h new file mode 100644 index 000000000000..d0afb76e3c17 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/linux_sw_sync.h @@ -0,0 +1,66 @@ +/*************************************************************************/ /*! +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _UAPI_LINUX_PVR_SW_SYNC_H +#define _UAPI_LINUX_PVR_SW_SYNC_H + +#if defined(SUPPORT_NATIVE_FENCE_SYNC) + +#include + +#include "pvrsrv_sync_km.h" + +struct pvr_sw_sync_create_fence_data { + char name[PVRSRV_SYNC_NAME_LENGTH]; + __s32 fence; + __u32 pad; + __u64 sync_pt_idx; +}; + +struct pvr_sw_timeline_advance_data { + __u64 sync_pt_idx; +}; + +#define PVR_SW_SYNC_IOC_MAGIC 'W' +#define PVR_SW_SYNC_IOC_CREATE_FENCE _IOWR(PVR_SW_SYNC_IOC_MAGIC, 0, struct pvr_sw_sync_create_fence_data) +#define PVR_SW_SYNC_IOC_INC _IOR(PVR_SW_SYNC_IOC_MAGIC, 1, struct pvr_sw_timeline_advance_data) + +#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */ +#endif diff --git a/drivers/gpu/drm/phytium/octopus/lists.c b/drivers/gpu/drm/phytium/octopus/lists.c new file mode 100644 index 000000000000..156c4f11700f --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/lists.c @@ -0,0 +1,60 @@ +/*************************************************************************/ /*! +@File +@Title Linked list shared functions implementation. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implementation of the list iterators for types shared among + more than one file in the services code. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "lists.h" + +/*=================================================================== + LIST ITERATOR FUNCTIONS USED IN MORE THAN ONE FILE (those used just + once are implemented locally). + ===================================================================*/ + +IMPLEMENT_LIST_ANY(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE) +IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK) +IMPLEMENT_LIST_ANY_VA(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK) +IMPLEMENT_LIST_FOR_EACH(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_REMOVE(PVRSRV_DEVICE_NODE) diff --git a/drivers/gpu/drm/phytium/octopus/lists.h b/drivers/gpu/drm/phytium/octopus/lists.h new file mode 100644 index 000000000000..f1e089a9018a --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/lists.h @@ -0,0 +1,355 @@ +/*************************************************************************/ /*! +@File +@Title Linked list shared functions templates. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Definition of the linked list function templates. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef LISTS_UTILS_H +#define LISTS_UTILS_H + +/* instruct QAC to ignore warnings about the following custom formatted macros */ +/* PRQA S 0881,3410 ++ */ +#include +#include "img_types.h" +#include "device.h" +#include "power.h" + +/* + - USAGE - + + The list functions work with any structure that provides the fields psNext and + ppsThis. In order to make a function available for a given type, it is required + to use the function template macro that creates the actual code. + + There are 5 main types of functions: + - INSERT : given a pointer to the head pointer of the list and a pointer + to the node, inserts it as the new head. + - INSERT TAIL : given a pointer to the head pointer of the list and a pointer + to the node, inserts the node at the tail of the list. + - REMOVE : given a pointer to a node, removes it from its list. + - FOR EACH : apply a function over all the elements of a list. + - ANY : apply a function over the elements of a list, until one of them + return a non null value, and then returns it. + + The two last functions can have a variable argument form, with allows to pass + additional parameters to the callback function. In order to do this, the + callback function must take two arguments, the first is the current node and + the second is a list of variable arguments (va_list). + + The ANY functions have also another for which specifies the return type of the + callback function and the default value returned by the callback function. + +*/ + +/*************************************************************************/ /*! +@Function List_##TYPE##_ForEach +@Description Apply a callback function to all the elements of a list. +@Input psHead The head of the list to be processed. +@Input pfnCallBack The function to be applied to each element of the list. +*/ /**************************************************************************/ +#define DECLARE_LIST_FOR_EACH(TYPE) \ +void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode)) + +#define IMPLEMENT_LIST_FOR_EACH(TYPE) \ +void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\ +{\ + while (psHead)\ + {\ + pfnCallBack(psHead);\ + psHead = psHead->psNext;\ + }\ +} + +/*************************************************************************/ /*! +@Function List_##TYPE##_ForEachSafe +@Description Apply a callback function to all the elements of a list. Do it + in a safe way that handles the fact that a node might remove + itself from the list during the iteration. +@Input psHead The head of the list to be processed. +@Input pfnCallBack The function to be applied to each element of the list. +*/ /**************************************************************************/ +#define DECLARE_LIST_FOR_EACH_SAFE(TYPE) \ +void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode)) + +#define IMPLEMENT_LIST_FOR_EACH_SAFE(TYPE) \ +void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\ +{\ + TYPE *psNext;\ +\ + while (psHead)\ + {\ + psNext = psHead->psNext; \ + pfnCallBack(psHead);\ + psHead = psNext;\ + }\ +} + + +#define DECLARE_LIST_FOR_EACH_VA(TYPE) \ +void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...) + +#define IMPLEMENT_LIST_FOR_EACH_VA(TYPE) \ +void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...) \ +{\ + va_list ap;\ + while (psHead)\ + {\ + va_start(ap, pfnCallBack);\ + pfnCallBack(psHead, ap);\ + psHead = psHead->psNext;\ + va_end(ap);\ + }\ +} + + +/*************************************************************************/ /*! +@Function List_##TYPE##_Any +@Description Applies a callback function to the elements of a list until + the function returns a non null value, then returns it. +@Input psHead The head of the list to be processed. +@Input pfnCallBack The function to be applied to each element of the list. +@Return The first non null value returned by the callback function. +*/ /**************************************************************************/ +#define DECLARE_LIST_ANY(TYPE) \ +void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode)) + +#define IMPLEMENT_LIST_ANY(TYPE) \ +void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode))\ +{ \ + void *pResult;\ + TYPE *psNextNode;\ + pResult = NULL;\ + psNextNode = psHead;\ + while (psHead && !pResult)\ + {\ + psNextNode = psNextNode->psNext;\ + pResult = pfnCallBack(psHead);\ + psHead = psNextNode;\ + }\ + return pResult;\ +} + + +/*with variable arguments, that will be passed as a va_list to the callback function*/ + +#define DECLARE_LIST_ANY_VA(TYPE) \ +void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...) + +#define IMPLEMENT_LIST_ANY_VA(TYPE) \ +void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...)\ +{\ + va_list ap;\ + TYPE *psNextNode;\ + void* pResult = NULL;\ + while (psHead && !pResult)\ + {\ + psNextNode = psHead->psNext;\ + va_start(ap, pfnCallBack);\ + pResult = pfnCallBack(psHead, ap);\ + va_end(ap);\ + psHead = psNextNode;\ + }\ + return pResult;\ +} + +/*those ones are for extra type safety, so there's no need to use castings for the results*/ + +#define DECLARE_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \ +RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode)) + +#define IMPLEMENT_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \ +RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))\ +{ \ + RTYPE result;\ + TYPE *psNextNode;\ + result = CONTINUE;\ + psNextNode = psHead;\ + while (psHead && result == CONTINUE)\ + {\ + psNextNode = psNextNode->psNext;\ + result = pfnCallBack(psHead);\ + psHead = psNextNode;\ + }\ + return result;\ +} + + +#define DECLARE_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \ +RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...) + +#define IMPLEMENT_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \ +RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)\ +{\ + va_list ap;\ + TYPE *psNextNode;\ + RTYPE result = CONTINUE;\ + while (psHead && result == CONTINUE)\ + {\ + psNextNode = psHead->psNext;\ + va_start(ap, pfnCallBack);\ + result = pfnCallBack(psHead, ap);\ + va_end(ap);\ + psHead = psNextNode;\ + }\ + return result;\ +} + + +/*************************************************************************/ /*! +@Function List_##TYPE##_Remove +@Description Removes a given node from the list. +@Input psNode The pointer to the node to be removed. +*/ /**************************************************************************/ +#define DECLARE_LIST_REMOVE(TYPE) \ +void List_##TYPE##_Remove(TYPE *psNode) + +#define IMPLEMENT_LIST_REMOVE(TYPE) \ +void List_##TYPE##_Remove(TYPE *psNode)\ +{\ + (*psNode->ppsThis)=psNode->psNext;\ + if (psNode->psNext)\ + {\ + psNode->psNext->ppsThis = psNode->ppsThis;\ + }\ +} + +/*************************************************************************/ /*! +@Function List_##TYPE##_Insert +@Description Inserts a given node at the beginning of the list. +@Input psHead The pointer to the pointer to the head node. +@Input psNode The pointer to the node to be inserted. +*/ /**************************************************************************/ +#define DECLARE_LIST_INSERT(TYPE) \ +void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode) + +#define IMPLEMENT_LIST_INSERT(TYPE) \ +void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)\ +{\ + psNewNode->ppsThis = ppsHead;\ + psNewNode->psNext = *ppsHead;\ + *ppsHead = psNewNode;\ + if (psNewNode->psNext)\ + {\ + psNewNode->psNext->ppsThis = &(psNewNode->psNext);\ + }\ +} + +/*************************************************************************/ /*! +@Function List_##TYPE##_InsertTail +@Description Inserts a given node at the end of the list. +@Input psHead The pointer to the pointer to the head node. +@Input psNode The pointer to the node to be inserted. +*/ /**************************************************************************/ +#define DECLARE_LIST_INSERT_TAIL(TYPE) \ +void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode) + +#define IMPLEMENT_LIST_INSERT_TAIL(TYPE) \ +void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode)\ +{\ + TYPE *psTempNode = *ppsHead;\ + if (psTempNode != NULL)\ + {\ + while (psTempNode->psNext)\ + psTempNode = psTempNode->psNext;\ + ppsHead = &psTempNode->psNext;\ + }\ + psNewNode->ppsThis = ppsHead;\ + psNewNode->psNext = NULL;\ + *ppsHead = psNewNode;\ +} + +/*************************************************************************/ /*! +@Function List_##TYPE##_Reverse +@Description Reverse a list in place +@Input ppsHead The pointer to the pointer to the head node. +*/ /**************************************************************************/ +#define DECLARE_LIST_REVERSE(TYPE) \ +void List_##TYPE##_Reverse(TYPE **ppsHead) + +#define IMPLEMENT_LIST_REVERSE(TYPE) \ +void List_##TYPE##_Reverse(TYPE **ppsHead)\ +{\ + TYPE *psTmpNode1; \ + TYPE *psTmpNode2; \ + TYPE *psCurNode; \ + psTmpNode1 = NULL; \ + psCurNode = *ppsHead; \ + while (psCurNode) { \ + psTmpNode2 = psCurNode->psNext; \ + psCurNode->psNext = psTmpNode1; \ + psTmpNode1 = psCurNode; \ + psCurNode = psTmpNode2; \ + if (psCurNode) \ + { \ + psTmpNode1->ppsThis = &(psCurNode->psNext); \ + } \ + else \ + { \ + psTmpNode1->ppsThis = ppsHead; \ + } \ + } \ + *ppsHead = psTmpNode1; \ +} + +#define IS_LAST_ELEMENT(x) ((x)->psNext == NULL) + + +DECLARE_LIST_ANY(PVRSRV_DEVICE_NODE); +DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE); +DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK); +DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE); +DECLARE_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK); +DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE); +DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE); +DECLARE_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE); +DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE); + +#undef DECLARE_LIST_ANY_2 +#undef DECLARE_LIST_ANY_VA +#undef DECLARE_LIST_ANY_VA_2 +#undef DECLARE_LIST_FOR_EACH +#undef DECLARE_LIST_FOR_EACH_VA +#undef DECLARE_LIST_INSERT +#undef DECLARE_LIST_REMOVE + +#endif + +/* re-enable warnings */ +/* PRQA S 0881,3410 -- */ diff --git a/drivers/gpu/drm/phytium/octopus/lock.h b/drivers/gpu/drm/phytium/octopus/lock.h new file mode 100644 index 000000000000..3e38dfa60e15 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/lock.h @@ -0,0 +1,425 @@ +/*************************************************************************/ /*! +@File lock.h +@Title Locking interface +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Services internal locking interface +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef LOCK_H +#define LOCK_H + +/* In Linux kernel mode we are using the kernel mutex implementation directly + * with macros. This allows us to use the kernel lockdep feature for lock + * debugging. */ +#include "lock_types.h" + +#if defined(__linux__) && defined(__KERNEL__) + +#include "allocmem.h" +#include + +#define OSLockCreateNoStats(phLock) ({ \ + PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ + *(phLock) = OSAllocMemNoStats(sizeof(struct mutex)); \ + if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \ + e;}) +#define OSLockCreate(phLock) ({ \ + PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ + *(phLock) = OSAllocMem(sizeof(struct mutex)); \ + if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \ + e;}) +#define OSLockDestroy(hLock) ({mutex_destroy((hLock)); OSFreeMem((hLock)); PVRSRV_OK;}) +#define OSLockDestroyNoStats(hLock) ({mutex_destroy((hLock)); OSFreeMemNoStats((hLock)); PVRSRV_OK;}) + +#define OSLockAcquire(hLock) ({mutex_lock((hLock)); PVRSRV_OK;}) +#define OSLockAcquireNested(hLock, subclass) ({mutex_lock_nested((hLock), (subclass)); PVRSRV_OK;}) +#define OSLockRelease(hLock) ({mutex_unlock((hLock)); PVRSRV_OK;}) + +#define OSLockIsLocked(hLock) ((mutex_is_locked((hLock)) == 1) ? IMG_TRUE : IMG_FALSE) +#define OSTryLockAcquire(hLock) ((mutex_trylock(hLock) == 1) ? IMG_TRUE : IMG_FALSE) + +#define OSSpinLockCreate(_ppsLock) ({ \ + PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ + *(_ppsLock) = OSAllocMem(sizeof(spinlock_t)); \ + if (*(_ppsLock)) {spin_lock_init(*(_ppsLock)); e = PVRSRV_OK;} \ + e;}) +#define OSSpinLockDestroy(_psLock) ({OSFreeMem(_psLock);}) + +typedef unsigned long OS_SPINLOCK_FLAGS; +#define OSSpinLockAcquire(_pLock, _flags) spin_lock_irqsave(_pLock, _flags) +#define OSSpinLockRelease(_pLock, _flags) spin_unlock_irqrestore(_pLock, _flags) + +/* These _may_ be reordered or optimized away entirely by the compiler/hw */ +#define OSAtomicRead(pCounter) atomic_read(pCounter) +#define OSAtomicWrite(pCounter, i) atomic_set(pCounter, i) + +/* The following atomic operations, in addition to being SMP-safe, also + imply a memory barrier around the operation */ +#define OSAtomicIncrement(pCounter) atomic_inc_return(pCounter) +#define OSAtomicDecrement(pCounter) atomic_dec_return(pCounter) +#define OSAtomicCompareExchange(pCounter, oldv, newv) atomic_cmpxchg(pCounter,oldv,newv) +#define OSAtomicExchange(pCounter, iNewVal) atomic_xchg(pCounter, iNewVal) + +static inline IMG_INT OSAtomicOr(ATOMIC_T *pCounter, IMG_INT iVal) +{ + IMG_INT iOldVal, iLastVal, iNewVal; + + iLastVal = OSAtomicRead(pCounter); + do + { + iOldVal = iLastVal; + iNewVal = iOldVal | iVal; + + iLastVal = OSAtomicCompareExchange(pCounter, iOldVal, iNewVal); + } + while (iOldVal != iLastVal); + + return iNewVal; +} + +#define OSAtomicAdd(pCounter, incr) atomic_add_return(incr,pCounter) +#define OSAtomicAddUnless(pCounter, incr, test) __atomic_add_unless(pCounter,incr,test) + +#define OSAtomicSubtract(pCounter, incr) atomic_add_return(-(incr),pCounter) +#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), test) + +#else /* defined(__linux__) && defined(__KERNEL__) */ + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" + +/**************************************************************************/ /*! +@Function OSLockCreate +@Description Creates an operating system lock object. +@Output phLock The created lock. +@Return PVRSRV_OK on success. PVRSRV_ERROR_OUT_OF_MEMORY if the driver + cannot allocate CPU memory needed for the lock. + PVRSRV_ERROR_INIT_FAILURE if the Operating System fails to + allocate the lock. + */ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR OSLockCreate(POS_LOCK *phLock); +#if defined(INTEGRITY_OS) +#define OSLockCreateNoStats OSLockCreate +#endif + +/**************************************************************************/ /*! +@Function OSLockDestroy +@Description Destroys an operating system lock object. +@Input hLock The lock to be destroyed. +@Return None. + */ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR OSLockDestroy(POS_LOCK hLock); + +#if defined(INTEGRITY_OS) +#define OSLockDestroyNoStats OSLockDestroy +#endif +/**************************************************************************/ /*! +@Function OSLockAcquire +@Description Acquires an operating system lock. + NB. This function must not return until the lock is acquired + (meaning the implementation should not timeout or return with + an error, as the caller will assume they have the lock). +@Input hLock The lock to be acquired. +@Return None. + */ /**************************************************************************/ +IMG_INTERNAL +void OSLockAcquire(POS_LOCK hLock); + +/**************************************************************************/ /*! +@Function OSTryLockAcquire +@Description Try to acquire an operating system lock. + NB. If lock is acquired successfully in the first attempt, + then the function returns true and else it will return false. +@Input hLock The lock to be acquired. +@Return IMG_TRUE if lock acquired successfully, + IMG_FALSE otherwise. + */ /**************************************************************************/ +IMG_INTERNAL +IMG_BOOL OSTryLockAcquire(POS_LOCK hLock); + +/* Nested notation isn't used in UM or other OS's */ +/**************************************************************************/ /*! +@Function OSLockAcquireNested +@Description For operating systems other than Linux, this equates to an + OSLockAcquire() call. On Linux, this function wraps a call + to mutex_lock_nested(). This recognises the scenario where + there may be multiple subclasses within a particular class + of lock. In such cases, the order in which the locks belonging + these various subclasses are acquired is important and must be + validated. +@Input hLock The lock to be acquired. +@Input subclass The subclass of the lock. +@Return None. + */ /**************************************************************************/ +#define OSLockAcquireNested(hLock, subclass) OSLockAcquire((hLock)) + +/**************************************************************************/ /*! +@Function OSLockRelease +@Description Releases an operating system lock. +@Input hLock The lock to be released. +@Return None. + */ /**************************************************************************/ +IMG_INTERNAL +void OSLockRelease(POS_LOCK hLock); + +/**************************************************************************/ /*! +@Function OSLockIsLocked +@Description Tests whether or not an operating system lock is currently + locked. +@Input hLock The lock to be tested. +@Return IMG_TRUE if locked, IMG_FALSE if not locked. + */ /**************************************************************************/ +IMG_INTERNAL +IMG_BOOL OSLockIsLocked(POS_LOCK hLock); + +#if defined(__linux__) + +/* Use GCC intrinsics (read/write semantics consistent with kernel-side implementation) */ +#define OSAtomicRead(pCounter) (*(volatile IMG_INT32 *)&(pCounter)->counter) +#define OSAtomicWrite(pCounter, i) ((pCounter)->counter = (IMG_INT32) i) +#define OSAtomicIncrement(pCounter) __sync_add_and_fetch((&(pCounter)->counter), 1) +#define OSAtomicDecrement(pCounter) __sync_sub_and_fetch((&(pCounter)->counter), 1) +#define OSAtomicCompareExchange(pCounter, oldv, newv) \ + __sync_val_compare_and_swap((&(pCounter)->counter), oldv, newv) +#define OSAtomicOr(pCounter, iVal) __sync_or_and_fetch((&(pCounter)->counter), iVal) + +static inline IMG_UINT32 OSAtomicExchange(ATOMIC_T *pCounter, IMG_UINT32 iNewVal) +{ + IMG_UINT32 iOldVal; + IMG_UINT32 iLastVal; + + iLastVal = OSAtomicRead(pCounter); + do + { + iOldVal = iLastVal; + iLastVal = OSAtomicCompareExchange(pCounter, iOldVal, iNewVal); + } + while (iOldVal != iLastVal); + + return iOldVal; +} + +#define OSAtomicAdd(pCounter, incr) __sync_add_and_fetch((&(pCounter)->counter), incr) +#define OSAtomicAddUnless(pCounter, incr, test) ({ \ + IMG_INT32 c; IMG_INT32 old; \ + c = OSAtomicRead(pCounter); \ + while (1) { \ + if (c == (test)) break; \ + old = OSAtomicCompareExchange(pCounter, c, c+(incr)); \ + if (old == c) break; \ + c = old; \ + } c; }) + +#define OSAtomicSubtract(pCounter, incr) OSAtomicAdd(pCounter, -(incr)) +#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), test) + +#else + +/*************************************************************************/ /*! +@Function OSAtomicRead +@Description Read the value of a variable atomically. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to read +@Return The value of the atomic variable +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicRead(const ATOMIC_T *pCounter); + +/*************************************************************************/ /*! +@Function OSAtomicWrite +@Description Write the value of a variable atomically. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to be written to +@Input v The value to write +@Return None +*/ /**************************************************************************/ +IMG_INTERNAL +void OSAtomicWrite(ATOMIC_T *pCounter, IMG_INT32 v); + +/* For the following atomic operations, in addition to being SMP-safe, + should also have a memory barrier around each operation */ +/*************************************************************************/ /*! +@Function OSAtomicIncrement +@Description Increment the value of a variable atomically. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to be incremented +@Return The new value of *pCounter. +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicIncrement(ATOMIC_T *pCounter); + +/*************************************************************************/ /*! +@Function OSAtomicDecrement +@Description Decrement the value of a variable atomically. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to be decremented +@Return The new value of *pCounter. +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicDecrement(ATOMIC_T *pCounter); + +/*************************************************************************/ /*! +@Function OSAtomicAdd +@Description Add a specified value to a variable atomically. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to add the value to +@Input v The value to be added +@Return The new value of *pCounter. +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicAdd(ATOMIC_T *pCounter, IMG_INT32 v); + +/*************************************************************************/ /*! +@Function OSAtomicAddUnless +@Description Add a specified value to a variable atomically unless it + already equals a particular value. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to add the value to +@Input v The value to be added to 'pCounter' +@Input t The test value. If 'pCounter' equals this, + its value will not be adjusted +@Return The old value of *pCounter. +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicAddUnless(ATOMIC_T *pCounter, IMG_INT32 v, IMG_INT32 t); + +/*************************************************************************/ /*! +@Function OSAtomicSubtract +@Description Subtract a specified value to a variable atomically. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to subtract the value from +@Input v The value to be subtracted +@Return The new value of *pCounter. +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicSubtract(ATOMIC_T *pCounter, IMG_INT32 v); + +/*************************************************************************/ /*! +@Function OSAtomicSubtractUnless +@Description Subtract a specified value from a variable atomically unless + it already equals a particular value. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to subtract the value from +@Input v The value to be subtracted from 'pCounter' +@Input t The test value. If 'pCounter' equals this, + its value will not be adjusted +@Return The old value of *pCounter. +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicSubtractUnless(ATOMIC_T *pCounter, IMG_INT32 v, IMG_INT32 t); + +/*************************************************************************/ /*! +@Function OSAtomicCompareExchange +@Description Set a variable to a given value only if it is currently + equal to a specified value. The whole operation must be atomic. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to be checked and + possibly updated +@Input oldv The value the atomic variable must have in + order to be modified +@Input newv The value to write to the atomic variable if + it equals 'oldv' +@Return The old value of *pCounter +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicCompareExchange(ATOMIC_T *pCounter, IMG_INT32 oldv, IMG_INT32 newv); + +/*************************************************************************/ /*! +@Function OSAtomicExchange +@Description Set a variable to a given value and retrieve previous value. + The whole operation must be atomic. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to be updated +@Input iNewVal The value to write to the atomic variable +@Return The previous value of *pCounter. +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicExchange(ATOMIC_T *pCounter, IMG_INT32 iNewVal); + +/*************************************************************************/ /*! +@Function OSAtomicOr +@Description Set a variable to the bitwise or of its current value and the + specified value. Equivalent to *pCounter |= iVal. + The whole operation must be atomic. + Atomic functions must be implemented in a manner that + is both symmetric multiprocessor (SMP) safe and has a memory + barrier around each operation. +@Input pCounter The atomic variable to be updated +@Input iVal The value to bitwise or against +@Return The new value of *pCounter. +*/ /**************************************************************************/ +IMG_INTERNAL +IMG_INT32 OSAtomicOr(ATOMIC_T *pCounter, IMG_INT32 iVal); + +/* For now, spin-locks are required on Linux only, so other platforms fake + * spinlocks with normal mutex locks */ +typedef unsigned long OS_SPINLOCK_FLAGS; +#define POS_SPINLOCK POS_LOCK +#define OSSpinLockCreate(ppLock) OSLockCreate(ppLock) +#define OSSpinLockDestroy(pLock) OSLockDestroy(pLock) +#define OSSpinLockAcquire(pLock, flags) {flags = 0; OSLockAcquire(pLock);} +#define OSSpinLockRelease(pLock, flags) {flags = 0; OSLockRelease(pLock);} + +#endif /* defined(__linux__) */ +#endif /* defined(__linux__) && defined(__KERNEL__) */ + +#endif /* LOCK_H */ diff --git a/drivers/gpu/drm/phytium/octopus/lock_types.h b/drivers/gpu/drm/phytium/octopus/lock_types.h new file mode 100644 index 000000000000..6dbf1e7b8937 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/lock_types.h @@ -0,0 +1,92 @@ +/*************************************************************************/ /*! +@File lock_types.h +@Title Locking types +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Locking specific enums, defines and structures +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef LOCK_TYPES_H +#define LOCK_TYPES_H + +/* In Linux kernel mode we are using the kernel mutex implementation directly + * with macros. This allows us to use the kernel lockdep feature for lock + * debugging. */ +#if defined(__linux__) && defined(__KERNEL__) + +#include +#include +/* The mutex is defined as a pointer to be compatible with the other code. This + * isn't ideal and usually you wouldn't do that in kernel code. */ +typedef struct mutex *POS_LOCK; +typedef struct rw_semaphore *POSWR_LOCK; +typedef spinlock_t *POS_SPINLOCK; +typedef atomic_t ATOMIC_T; + +#else /* defined(__linux__) && defined(__KERNEL__) */ +#include "img_types.h" /* needed for IMG_INT */ +typedef struct _OS_LOCK_ *POS_LOCK; + +#if defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) +typedef struct _OSWR_LOCK_ *POSWR_LOCK; +#else /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ +typedef struct _OSWR_LOCK_ { + IMG_UINT32 ui32Dummy; +} *POSWR_LOCK; +#endif /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ + +#if defined(__linux__) + typedef struct _OS_ATOMIC {IMG_INT32 counter;} ATOMIC_T; +#elif defined(__QNXNTO__) + typedef struct _OS_ATOMIC {IMG_INT32 counter;} ATOMIC_T; +#elif defined(_WIN32) + /* + * Dummy definition. WDDM doesn't use Services, but some headers + * still have to be shared. This is one such case. + */ + typedef struct _OS_ATOMIC {IMG_INT32 counter;} ATOMIC_T; +#elif defined(INTEGRITY_OS) + /* Only lower 32bits are used in OS ATOMIC APIs to have consistent behaviour across all OS */ + typedef struct _OS_ATOMIC {IMG_INT64 counter;} ATOMIC_T; +#else + #error "Please type-define an atomic lock for this environment" +#endif + +#endif /* defined(__linux__) && defined(__KERNEL__) */ + +#endif /* LOCK_TYPES_H */ diff --git a/drivers/gpu/drm/phytium/octopus/log2.h b/drivers/gpu/drm/phytium/octopus/log2.h new file mode 100644 index 000000000000..ce41262d61f8 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/log2.h @@ -0,0 +1,417 @@ +/*************************************************************************/ /*! +@Title Integer log2 and related functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef LOG2_H +#define LOG2_H + +#include "img_defs.h" + +/*************************************************************************/ /*! +@Description Determine if a number is a power of two. +@Input n +@Return True if n is a power of 2, false otherwise. True if n == 0. +*/ /**************************************************************************/ +static INLINE IMG_BOOL __const_function IsPower2(uint32_t n) +{ + /* C++ needs this cast. */ + return (IMG_BOOL)((n & (n - 1U)) == 0U); +} + +/*************************************************************************/ /*! +@Description Determine if a number is a power of two. +@Input n +@Return True if n is a power of 2, false otherwise. True if n == 0. +*/ /**************************************************************************/ +static INLINE IMG_BOOL __const_function IsPower2_64(uint64_t n) +{ + /* C++ needs this cast. */ + return (IMG_BOOL)((n & (n - 1U)) == 0U); +} + +/* Code using GNU GCC intrinsics */ +#if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) + +/* CHAR_BIT is typically found in . For all the platforms where + * CHAR_BIT is not available, defined it here with the assumption that there + * are 8 bits in a byte */ +#ifndef CHAR_BIT +#define CHAR_BIT 8U +#endif + +/*************************************************************************/ /*! +@Description Compute floor(log2(n)) +@Input n +@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 +*/ /**************************************************************************/ +static INLINE uint32_t __const_function FloorLog2(uint32_t n) +{ + if (unlikely(n == 0U)) + { + return 0; + } + else + { + uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); + return uNumBits - (uint32_t)__builtin_clz(n) - 1U; + } +} + +/*************************************************************************/ /*! +@Description Compute floor(log2(n)) +@Input n +@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 +*/ /**************************************************************************/ +static INLINE uint32_t __const_function FloorLog2_64(uint64_t n) +{ + if (unlikely(n == 0U)) + { + return 0; + } + else + { + uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); + return uNumBits - (uint32_t)__builtin_clzll(n) - 1U; + } +} + +/*************************************************************************/ /*! +@Description Compute ceil(log2(n)) +@Input n +@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 +*/ /**************************************************************************/ +static INLINE uint32_t __const_function CeilLog2(uint32_t n) +{ + if (unlikely(n == 0U || n == 1U)) + { + return 0; + } + else + { + uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); + + n--; /* Handle powers of 2 */ + return uNumBits - (uint32_t)__builtin_clz(n); + } +} + +/*************************************************************************/ /*! +@Description Compute ceil(log2(n)) +@Input n +@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 +*/ /**************************************************************************/ +static INLINE uint32_t __const_function CeilLog2_64(uint64_t n) +{ + if (unlikely(n == 0U || n == 1U)) + { + return 0; + } + else + { + uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); + + n--; /* Handle powers of 2 */ + return uNumBits - (uint32_t)__builtin_clzll(n); + } +} + +/*************************************************************************/ /*! +@Description Compute log2(n) for exact powers of two only +@Input n Must be a power of two +@Return log2(n) +*/ /**************************************************************************/ +static INLINE uint32_t __const_function ExactLog2(uint32_t n) +{ + return (uint32_t)CHAR_BIT * (uint32_t)sizeof(n) - (uint32_t)__builtin_clz(n) - 1U; +} + +/*************************************************************************/ /*! +@Description Compute log2(n) for exact powers of two only +@Input n Must be a power of two +@Return log2(n) +*/ /**************************************************************************/ +static INLINE uint32_t __const_function ExactLog2_64(uint64_t n) +{ + return (uint32_t)CHAR_BIT * (uint32_t)sizeof(n) - (uint32_t)__builtin_clzll(n) - 1U; +} + +/*************************************************************************/ /*! +@Description Round a non-power-of-two number up to the next power of two. +@Input n +@Return n rounded up to the next power of two. If n is zero or + already a power of two, return n unmodified. +*/ /**************************************************************************/ +static INLINE uint32_t __const_function RoundUpToNextPowerOfTwo(uint32_t n) +{ + /* Cases with n greater than 2^31 needs separate handling + * as result of (1<<32) is undefined. */ + if (unlikely( n == 0U || n > (uint32_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - 1U))) + { + return 0; + } + + /* Return n if it is already a power of 2 */ + if ((IMG_BOOL)((n & (n - 1U)) == 0U)) + { + return n; + } + + return (uint32_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - (uint32_t)__builtin_clz(n)); +} + +/*************************************************************************/ /*! +@Description Round a non-power-of-two number up to the next power of two. +@Input n +@Return n rounded up to the next power of two. If n is zero or + already a power of two, return n unmodified. +*/ /**************************************************************************/ +static INLINE uint64_t __const_function RoundUpToNextPowerOfTwo_64(uint64_t n) +{ + /* Cases with n greater than 2^63 needs separate handling + * as result of (1<<64) is undefined. */ + if (unlikely( n == 0U || n > (uint64_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - 1U))) + { + return 0; + } + + /* Return n if it is already a power of 2 */ + if ((IMG_BOOL)((n & (n - 1U)) == 0U)) + { + return n; + } + + return (uint64_t)1 << ((uint64_t)CHAR_BIT * sizeof(n) - (uint64_t)__builtin_clzll(n)); +} + +#else /* #if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) */ + +/*************************************************************************/ /*! +@Description Round a non-power-of-two number up to the next power of two. +@Input n +@Return n rounded up to the next power of two. If n is zero or + already a power of two, return n unmodified. +*/ /**************************************************************************/ +static INLINE uint32_t __const_function RoundUpToNextPowerOfTwo(uint32_t n) +{ + n--; + n |= n >> 1; /* handle 2 bit numbers */ + n |= n >> 2; /* handle 4 bit numbers */ + n |= n >> 4; /* handle 8 bit numbers */ + n |= n >> 8; /* handle 16 bit numbers */ + n |= n >> 16; /* handle 32 bit numbers */ + n++; + + return n; +} + +/*************************************************************************/ /*! +@Description Round a non-power-of-two number up to the next power of two. +@Input n +@Return n rounded up to the next power of two. If n is zero or + already a power of two, return n unmodified. +*/ /**************************************************************************/ +static INLINE uint64_t __const_function RoundUpToNextPowerOfTwo_64(uint64_t n) +{ + n--; + n |= n >> 1; /* handle 2 bit numbers */ + n |= n >> 2; /* handle 4 bit numbers */ + n |= n >> 4; /* handle 8 bit numbers */ + n |= n >> 8; /* handle 16 bit numbers */ + n |= n >> 16; /* handle 32 bit numbers */ + n |= n >> 32; /* handle 64 bit numbers */ + n++; + + return n; +} + +/*************************************************************************/ /*! +@Description Compute floor(log2(n)) +@Input n +@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 +*/ /**************************************************************************/ +static INLINE uint32_t __const_function FloorLog2(uint32_t n) +{ + uint32_t log2 = 0; + + while (n >>= 1) + { + log2++; + } + + return log2; +} + +/*************************************************************************/ /*! +@Description Compute floor(log2(n)) +@Input n +@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 +*/ /**************************************************************************/ +static INLINE uint32_t __const_function FloorLog2_64(uint64_t n) +{ + uint32_t log2 = 0; + + while (n >>= 1) + { + log2++; + } + + return log2; +} + +/*************************************************************************/ /*! +@Description Compute ceil(log2(n)) +@Input n +@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 +*/ /**************************************************************************/ +static INLINE uint32_t __const_function CeilLog2(uint32_t n) +{ + uint32_t log2 = 0; + + if (n == 0) + { + return 0; + } + + n--; /* Handle powers of 2 */ + + while (n) + { + log2++; + n >>= 1; + } + + return log2; +} + +/*************************************************************************/ /*! +@Description Compute ceil(log2(n)) +@Input n +@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 +*/ /**************************************************************************/ +static INLINE uint32_t __const_function CeilLog2_64(uint64_t n) +{ + uint32_t log2 = 0; + + if (n == 0) + { + return 0; + } + + n--; /* Handle powers of 2 */ + + while (n) + { + log2++; + n >>= 1; + } + + return log2; +} + +/*************************************************************************/ /*! +@Description Compute log2(n) for exact powers of two only +@Input n Must be a power of two +@Return log2(n) +*/ /**************************************************************************/ +static INLINE uint32_t __const_function ExactLog2(uint32_t n) +{ + static const uint32_t b[] = + {0xAAAAAAAA, 0xCCCCCCCC, 0xF0F0F0F0, 0xFF00FF00, 0xFFFF0000}; + uint32_t r = (n & b[0]) != 0; + + r |= (uint32_t) ((n & b[4]) != 0) << 4; + r |= (uint32_t) ((n & b[3]) != 0) << 3; + r |= (uint32_t) ((n & b[2]) != 0) << 2; + r |= (uint32_t) ((n & b[1]) != 0) << 1; + + return r; +} + +/*************************************************************************/ /*! +@Description Compute log2(n) for exact powers of two only +@Input n Must be a power of two +@Return log2(n) +*/ /**************************************************************************/ +static INLINE uint32_t __const_function ExactLog2_64(uint64_t n) +{ + static const uint64_t b[] = + {0xAAAAAAAAAAAAAAAAULL, 0xCCCCCCCCCCCCCCCCULL, + 0xF0F0F0F0F0F0F0F0ULL, 0xFF00FF00FF00FF00ULL, + 0xFFFF0000FFFF0000ULL, 0xFFFFFFFF00000000ULL}; + uint32_t r = (n & b[0]) != 0; + + r |= (uint32_t) ((n & b[5]) != 0) << 5; + r |= (uint32_t) ((n & b[4]) != 0) << 4; + r |= (uint32_t) ((n & b[3]) != 0) << 3; + r |= (uint32_t) ((n & b[2]) != 0) << 2; + r |= (uint32_t) ((n & b[1]) != 0) << 1; + + return r; +} + +#endif /* #if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) */ + +/*************************************************************************/ /*! +@Description Compute floor(log2(size)) , where size is the max of 3 sizes + This is almost always the ONLY EVER valid use of FloorLog2. + Usually CeilLog2() should be used instead. + For a 5x5x1 texture, the 3 miplevels are: + 0: 5x5x1 + 1: 2x2x1 + 2: 1x1x1 + + For an 8x8x1 texture, the 4 miplevels are: + 0: 8x8x1 + 1: 4x4x1 + 2: 2x2x1 + 3: 1x1x1 + + +@Input sizeX, sizeY, sizeZ +@Return Count of mipmap levels for given dimensions +*/ /**************************************************************************/ +static INLINE uint32_t __const_function NumMipLevels(uint32_t sizeX, uint32_t sizeY, uint32_t sizeZ) +{ + + uint32_t maxSize = MAX(MAX(sizeX, sizeY), sizeZ); + return FloorLog2(maxSize) + 1U; +} + +#endif /* LOG2_H */ diff --git a/drivers/gpu/drm/phytium/octopus/mem_utils.c b/drivers/gpu/drm/phytium/octopus/mem_utils.c new file mode 100644 index 000000000000..d00d435579c3 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/mem_utils.c @@ -0,0 +1,449 @@ +/*************************************************************************/ /*! +@File +@Title Memory manipulation functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Memory related functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "osfunc_common.h" +#include "img_defs.h" + +/* This workaround is only *required* on ARM64. Avoid building or including + * it by default on other architectures, unless the 'safe memcpy' test flag + * is enabled. (The code should work on other architectures.) + */ + + + +/* NOTE: This C file is compiled with -ffreestanding to avoid pattern matching + * by the compiler to stdlib functions, and it must only use the below + * headers. Do not include any IMG or services headers in this file. + */ +#if defined(__KERNEL__) && defined(__linux__) +#include +#else +#include +#endif + +/* The attribute "vector_size" will generate floating point instructions + * and use FPU registers. In kernel OS, the FPU registers might be corrupted + * when CPU is doing context switch because FPU registers are not expected to + * be stored. + * GCC enables compiler option, -mgeneral-regs-only, by default. + * This option restricts the generated code to use general registers only + * so that we don't have issues on that. + */ +#if defined(__KERNEL__) && defined(__clang__) + +#define DEVICE_MEMSETCPY_NON_VECTOR_KM +#if !defined(BITS_PER_BYTE) +#define BITS_PER_BYTE (8) +#endif /* BITS_PER_BYTE */ + +/* Loading or storing 16 or 32 bytes is only supported on 64-bit machines. */ +#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES > 8 +typedef __uint128_t uint128_t; + +typedef struct +{ + uint128_t ui128DataFields[2]; +} +uint256_t; +#endif + +#endif + +/* This file is only intended to be used on platforms which use GCC or Clang, + * due to its requirement on __attribute__((vector_size(n))), typeof() and + * __SIZEOF__ macros. + */ + +#if defined(__GNUC__) + +#ifndef MIN +#define MIN(a, b) \ + ({__typeof(a) _a = (a); __typeof(b) _b = (b); _a > _b ? _b : _a;}) +#endif + +#if !defined(DEVICE_MEMSETCPY_ALIGN_IN_BYTES) +#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES __SIZEOF_LONG__ +#endif +#if (DEVICE_MEMSETCPY_ALIGN_IN_BYTES & (DEVICE_MEMSETCPY_ALIGN_IN_BYTES - 1)) != 0 +#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be a power of 2" +#endif +#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES < 4 +#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be equal or greater than 4" +#endif + +#if __SIZEOF_POINTER__ != __SIZEOF_LONG__ +#error No support for architectures where void* and long are sized differently +#endif + +#if __SIZEOF_LONG__ > DEVICE_MEMSETCPY_ALIGN_IN_BYTES +/* Meaningless, and harder to do correctly */ +# error Cannot handle DEVICE_MEMSETCPY_ALIGN_IN_BYTES < sizeof(long) +typedef unsigned long block_t; +#elif __SIZEOF_LONG__ <= DEVICE_MEMSETCPY_ALIGN_IN_BYTES +# if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) +# if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8 + typedef uint64_t block_t; +# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16 + typedef uint128_t block_t; +# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32 + typedef uint256_t block_t; +# endif +# else +typedef unsigned int block_t + __attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES))); +# endif +# if defined(__arm64__) || defined(__aarch64__) +# if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8 +# define DEVICE_MEMSETCPY_ARM64 +# define REGSZ "w" +# define REGCL "w" +# define BVCLB "r" +# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16 +# define DEVICE_MEMSETCPY_ARM64 +# define REGSZ "x" +# define REGCL "x" +# define BVCLB "r" +# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32 +# if defined(__ARM_NEON_FP) +# define DEVICE_MEMSETCPY_ARM64 +# define REGSZ "q" +# define REGCL "v" +# define BVCLB "w" +# endif +# endif +# if defined(DEVICE_MEMSETCPY_ARM64) +# if defined(DEVICE_MEMSETCPY_ARM64_NON_TEMPORAL) +# define NSHLD() __asm__ ("dmb nshld") +# define NSHST() __asm__ ("dmb nshst") +# define LDP "ldnp" +# define STP "stnp" +# else +# define NSHLD() +# define NSHST() +# define LDP "ldp" +# define STP "stp" +# endif +# if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) +# if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8 +typedef uint32_t block_half_t; +# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16 +typedef uint64_t block_half_t; +# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32 +typedef uint128_t block_half_t; +# endif +# else + typedef unsigned int block_half_t + __attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES / 2))); +# endif +# endif +# endif +#endif + +__attribute__((visibility("hidden"))) +void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t uSize) +{ + volatile const char *pcSrc = pvSrc; + volatile char *pcDst = pvDst; + size_t uPreambleBytes; + int bBlockCopy = 0; + + size_t uSrcUnaligned = (size_t)pcSrc % sizeof(block_t); + size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t); + + if (!uSrcUnaligned && !uDstUnaligned) + { + /* Neither pointer is unaligned. Optimal case. */ + bBlockCopy = 1; + } + else + { + if (uSrcUnaligned == uDstUnaligned) + { + /* Neither pointer is usefully aligned, but they are misaligned in + * the same way, so we can copy a preamble in a slow way, then + * optimize the rest. + */ + uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize); + uSize -= uPreambleBytes; + while (uPreambleBytes) + { + *pcDst++ = *pcSrc++; + uPreambleBytes--; + } + + bBlockCopy = 1; + } + else if ((uSrcUnaligned | uDstUnaligned) % sizeof(int) == 0) + { + /* Both pointers are at least 32-bit aligned, and we assume that + * the processor must handle all kinds of 32-bit load-stores. + * NOTE: Could we optimize this with a non-temporal version? + */ + if (uSize >= sizeof(int)) + { + volatile int *piSrc = (int *)((void *)pcSrc); + volatile int *piDst = (int *)((void *)pcDst); + + while (uSize >= sizeof(int)) + { + *piDst++ = *piSrc++; + uSize -= sizeof(int); + } + + pcSrc = (char *)((void *)piSrc); + pcDst = (char *)((void *)piDst); + } + } + } + + if (bBlockCopy && uSize >= sizeof(block_t)) + { + volatile block_t *pSrc = (block_t *)((void *)pcSrc); + volatile block_t *pDst = (block_t *)((void *)pcDst); + +#if defined(DEVICE_MEMSETCPY_ARM64) + NSHLD(); +#endif + + while (uSize >= sizeof(block_t)) + { +#if defined(DEVICE_MEMSETCPY_ARM64) + __asm__ (LDP " " REGSZ "0, " REGSZ "1, [%[pSrc]]\n\t" + STP " " REGSZ "0, " REGSZ "1, [%[pDst]]" + : + : [pSrc] "r" (pSrc), [pDst] "r" (pDst) + : "memory", REGCL "0", REGCL "1"); +#else + *pDst = *pSrc; +#endif + pDst++; + pSrc++; + uSize -= sizeof(block_t); + } + +#if defined(DEVICE_MEMSETCPY_ARM64) + NSHST(); +#endif + + pcSrc = (char *)((void *)pSrc); + pcDst = (char *)((void *)pDst); + } + + while (uSize) + { + *pcDst++ = *pcSrc++; + uSize--; + } +} + +__attribute__((visibility("hidden"))) +void DeviceMemSet(void *pvDst, unsigned char ui8Value, size_t uSize) +{ + volatile char *pcDst = pvDst; + size_t uPreambleBytes; + + size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t); + + if (uDstUnaligned) + { + uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize); + uSize -= uPreambleBytes; + while (uPreambleBytes) + { + *pcDst++ = ui8Value; + uPreambleBytes--; + } + } + + if (uSize >= sizeof(block_t)) + { + volatile block_t *pDst = (block_t *)((void *)pcDst); + size_t i, uBlockSize; +#if defined(DEVICE_MEMSETCPY_ARM64) + typedef block_half_t BLK_t; +#else + typedef block_t BLK_t; +#endif /* defined(DEVICE_MEMSETCPY_ARM64) */ + +#if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) + BLK_t bValue = 0; + + uBlockSize = sizeof(BLK_t) / sizeof(ui8Value); + + for (i = 0; i < uBlockSize; i++) + { + bValue |= (BLK_t)ui8Value << ((uBlockSize - i - 1) * BITS_PER_BYTE); + } +#else + BLK_t bValue = {0}; + + uBlockSize = sizeof(bValue) / sizeof(unsigned int); + for (i = 0; i < uBlockSize; i++) + bValue[i] = ui8Value << 24U | + ui8Value << 16U | + ui8Value << 8U | + ui8Value; +#endif /* defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) */ + +#if defined(DEVICE_MEMSETCPY_ARM64) + NSHLD(); +#endif + + while (uSize >= sizeof(block_t)) + { +#if defined(DEVICE_MEMSETCPY_ARM64) + __asm__ (STP " %" REGSZ "[bValue], %" REGSZ "[bValue], [%[pDst]]" + : + : [bValue] BVCLB (bValue), [pDst] "r" (pDst) + : "memory"); +#else + *pDst = bValue; +#endif + pDst++; + uSize -= sizeof(block_t); + } + +#if defined(DEVICE_MEMSETCPY_ARM64) + NSHST(); +#endif + + pcDst = (char *)((void *)pDst); + } + + while (uSize) + { + *pcDst++ = ui8Value; + uSize--; + } +} + +#endif /* defined(__GNUC__) */ + +/* Potentially very slow (but safe) fallbacks for non-GNU C compilers */ +IMG_INTERNAL +void DeviceMemCopyBytes(void *pvDst, const void *pvSrc, size_t uSize) +{ + volatile const char *pcSrc = pvSrc; + volatile char *pcDst = pvDst; + + while (uSize) + { + *pcDst++ = *pcSrc++; + uSize--; + } +} + +IMG_INTERNAL +void DeviceMemSetBytes(void *pvDst, unsigned char ui8Value, size_t uSize) +{ + volatile char *pcDst = pvDst; + + while (uSize) + { + *pcDst++ = ui8Value; + uSize--; + } +} + +#if !defined(__QNXNTO__) /* Ignore Neutrino as it uses strlcpy */ + +#if defined(__KERNEL__) && defined(__linux__) +/* + * In case of Linux kernel-mode in a debug build, choose the variant + * of StringLCopy that uses strlcpy and logs truncation via a stack dump. + * For Linux kernel-mode in a release build, strlcpy alone is used. + */ +#if defined(DEBUG) +IMG_INTERNAL +size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize) +{ + /* + * Let strlcpy handle any truncation cases correctly. + * We will definitely get a NUL-terminated string set in pszDest + */ + size_t uSrcSize = strlcpy(pszDest, pszSrc, uDataSize); + +#if defined(PVR_DEBUG_STRLCPY) + /* Handle truncation by dumping calling stack if debug allows */ + if (uSrcSize >= uDataSize) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: String truncated Src = '<%s>' %ld bytes, Dest = '%s'", + __func__, pszSrc, (long)uDataSize, pszDest)); + OSDumpStack(); + } +#endif /* defined(PVR_DEBUG_STRLCPY) && defined(DEBUG) */ + + return uSrcSize; +} +#endif /* defined(DEBUG) */ + +#else /* defined(__KERNEL__) && defined(__linux__) */ +/* + * For every other platform, make use of the strnlen and strncpy + * implementation of StringLCopy. + * NOTE: It is crucial to avoid memcpy as this has a hidden side-effect of + * dragging in whatever the build-environment flavour of GLIBC is which can + * cause unexpected failures for host-side command execution. + */ +IMG_INTERNAL +size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize) +{ + size_t uSrcSize = strnlen(pszSrc, uDataSize); + + (void)strncpy(pszDest, pszSrc, uSrcSize); + if (uSrcSize == uDataSize) + { + pszDest[uSrcSize-1] = '\0'; + } + else + { + pszDest[uSrcSize] = '\0'; + } + + return uSrcSize; +} + +#endif /* defined(__KERNEL__) && defined(__linux__) */ + +#endif /* !defined(__QNXNTO__) */ diff --git a/drivers/gpu/drm/phytium/octopus/mmu_common.c b/drivers/gpu/drm/phytium/octopus/mmu_common.c new file mode 100644 index 000000000000..2e900e1b650d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/mmu_common.c @@ -0,0 +1,4446 @@ +/*************************************************************************/ /*! +@File +@Title Common MMU Management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements basic low level control of MMU. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + + +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "pvr_debug.h" +#include "dllist.h" +#include "osfunc.h" +#include "allocmem.h" + +#if defined(SUPPORT_RGX) +# include "rgx_memallocflags.h" +# include "rgxmmudefs_km.h" +#endif + +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "htbuffer.h" +#include "pvr_ricommon.h" + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +# include "process_stats.h" +# include "proc_stats.h" +#endif + +#if defined(PDUMP) +#include "pdump_km.h" +#include "pdump_physmem.h" +#endif + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "physmem_lma.h" +#endif + +/* +Major Interfaces to other modules: + +Let's keep this graph up-to-date: + + +-----------+ + | devicemem | + +-----------+ + | + +============+ + | mmu_common | + +============+ + | + +-----------------+ + | | + +---------+ +----------+ + | pmr | | device | + +---------+ +----------+ + */ + +#include "mmu_common.h" +#include "pmr.h" +#include "devicemem_server_utils.h" + +/* #define MMU_OBJECT_REFCOUNT_DEBUGING 1 */ +#if defined(MMU_OBJECT_REFCOUNT_DEBUGING) +#define MMU_OBJ_DBG(x) PVR_DPF(x) +#else +#define MMU_OBJ_DBG(x) +#endif + +/*! + * Refcounted structure that is shared between the context and + * the cleanup thread items. + * It is used to keep track of all cleanup items and whether the creating + * MMU context has been destroyed and therefore is not allowed to be + * accessed any more. + * + * The cleanup thread is used to defer the freeing of the page tables + * because we have to make sure that the MMU cache has been invalidated. + * If we don't take care of this the MMU might partially access cached + * and uncached tables which might lead to inconsistencies and in the + * worst case to MMU pending faults on random memory. + */ +typedef struct _MMU_CTX_CLEANUP_DATA_ +{ + /*! Refcount to know when this structure can be destroyed */ + ATOMIC_T iRef; + /*! Protect items in this structure, especially the refcount */ + POS_LOCK hCleanupLock; + /*! List of all cleanup items currently in flight */ + DLLIST_NODE sMMUCtxCleanupItemsHead; + /*! Was the MMU context destroyed and should not be accessed any more? */ + IMG_BOOL bMMUContextExists; +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /*! Associated OSid for this context */ + IMG_UINT32 ui32OSid; +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ +} MMU_CTX_CLEANUP_DATA; + + +/*! + * Structure holding one or more page tables that need to be + * freed after the MMU cache has been flushed which is signalled when + * the stored sync has a value that is <= the required value. + */ +typedef struct _MMU_CLEANUP_ITEM_ +{ + /*! Cleanup thread data */ + PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn; + /*! List to hold all the MMU_MEMORY_MAPPINGs, i.e. page tables */ + DLLIST_NODE sMMUMappingHead; + /*! Node of the cleanup item list for the context */ + DLLIST_NODE sMMUCtxCleanupItem; + /* Pointer to the cleanup meta data */ + MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData; + /* Sync to query if the MMU cache was flushed */ + PVRSRV_CLIENT_SYNC_PRIM *psSync; + /*! The update value of the sync to signal that the cache was flushed */ + IMG_UINT32 uiRequiredSyncVal; + /*! The device node needed to free the page tables */ + PVRSRV_DEVICE_NODE *psDevNode; +} MMU_CLEANUP_ITEM; + +/*! + All physical allocations and frees are relative to this context, so + we would get all the allocations of PCs, PDs, and PTs from the same + RA. + + We have one per MMU context in case we have mixed UMA/LMA devices + within the same system. + */ +typedef struct _MMU_PHYSMEM_CONTEXT_ +{ + /*! Associated MMU_CONTEXT */ + struct _MMU_CONTEXT_ *psMMUContext; + + /*! Parent device node */ + PVRSRV_DEVICE_NODE *psDevNode; + + /*! Refcount so we know when to free up the arena */ + IMG_UINT32 uiNumAllocations; + + /*! Arena from which physical memory is derived */ + RA_ARENA *psPhysMemRA; + /*! Arena name */ + IMG_CHAR *pszPhysMemRAName; + /*! Size of arena name string */ + size_t uiPhysMemRANameAllocSize; + + /*! Meta data for deferred cleanup */ + MMU_CTX_CLEANUP_DATA *psCleanupData; + /*! Temporary list of all deferred MMU_MEMORY_MAPPINGs. */ + DLLIST_NODE sTmpMMUMappingHead; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + IMG_UINT32 ui32OSid; + IMG_UINT32 ui32OSidReg; + IMG_BOOL bOSidAxiProt; +#endif + +} MMU_PHYSMEM_CONTEXT; + +/*! + Mapping structure for MMU memory allocation + */ +typedef struct _MMU_MEMORY_MAPPING_ +{ + /*! Physmem context to allocate from */ + MMU_PHYSMEM_CONTEXT *psContext; + /*! OS/system Handle for this allocation */ + PG_HANDLE sMemHandle; + /*! CPU virtual address of this allocation */ + void *pvCpuVAddr; + /*! Device physical address of this allocation */ + IMG_DEV_PHYADDR sDevPAddr; + /*! Size of this allocation */ + size_t uiSize; + /*! Number of current mappings of this allocation */ + IMG_UINT32 uiCpuVAddrRefCount; + /*! Node for the defer free list */ + DLLIST_NODE sMMUMappingItem; +} MMU_MEMORY_MAPPING; + +/*! + Memory descriptor for MMU objects. There can be more than one memory + descriptor per MMU memory allocation. + */ +typedef struct _MMU_MEMORY_DESC_ +{ + /* NB: bValid is set if this descriptor describes physical + memory. This allows "empty" descriptors to exist, such that we + can allocate them in batches. */ + /*! Does this MMU object have physical backing */ + IMG_BOOL bValid; + /*! Device Physical address of physical backing */ + IMG_DEV_PHYADDR sDevPAddr; + /*! CPU virtual address of physical backing */ + void *pvCpuVAddr; + /*! Mapping data for this MMU object */ + MMU_MEMORY_MAPPING *psMapping; + /*! Memdesc offset into the psMapping */ + IMG_UINT32 uiOffset; + /*! Size of the Memdesc */ + IMG_UINT32 uiSize; +} MMU_MEMORY_DESC; + +/*! + MMU levelx structure. This is generic and is used + for all levels (PC, PD, PT). + */ +typedef struct _MMU_Levelx_INFO_ +{ + /*! The Number of entries in this level */ + IMG_UINT32 ui32NumOfEntries; + + /*! Number of times this level has been reference. Note: For Level1 (PTE) + we still take/drop the reference when setting up the page tables rather + then at map/unmap time as this simplifies things */ + IMG_UINT32 ui32RefCount; + + /*! MemDesc for this level */ + MMU_MEMORY_DESC sMemDesc; + + /*! Array of infos for the next level. Must be last member in structure */ + struct _MMU_Levelx_INFO_ *apsNextLevel[1]; +} MMU_Levelx_INFO; + +/*! + MMU context structure + */ +struct _MMU_CONTEXT_ +{ + /*! Originating Connection */ + CONNECTION_DATA *psConnection; + + MMU_DEVICEATTRIBS *psDevAttrs; + + /*! For allocation and deallocation of the physical memory where + the pagetables live */ + struct _MMU_PHYSMEM_CONTEXT_ *psPhysMemCtx; + +#if defined(PDUMP) + /*! PDump context ID (required for PDump commands with virtual addresses) */ + IMG_UINT32 uiPDumpContextID; + + /*! The refcount of the PDump context ID */ + IMG_UINT32 ui32PDumpContextIDRefCount; +#endif + + /*! MMU cache invalidation flags (only used on Volcanic driver) */ + ATOMIC_T sCacheFlags; + + /*! Lock to ensure exclusive access when manipulating the MMU context or + * reading and using its content + */ + POS_LOCK hLock; + + /*! Base level info structure. Must be last member in structure */ + MMU_Levelx_INFO sBaseLevelInfo; + /* NO OTHER MEMBERS AFTER THIS STRUCTURE ! */ +}; + +static const IMG_DEV_PHYADDR gsBadDevPhyAddr = {MMU_BAD_PHYS_ADDR}; + +#if defined(DEBUG) +#include "log2.h" +#endif + +#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__) +static IMG_UINT32 g_ui32MMULeakCounter = 0; +static DEFINE_MUTEX(g_sMMULeakMutex); +#endif + +/***************************************************************************** + * Utility functions * + *****************************************************************************/ + +/*************************************************************************/ /*! +@Function _FreeMMUMapping + +@Description Free a given dllist of MMU_MEMORY_MAPPINGs and the page tables + they represent. + +@Input psDevNode Device node + +@Input psTmpMMUMappingHead List of MMU_MEMORY_MAPPINGs to free + */ +/*****************************************************************************/ +static void +_FreeMMUMapping(PVRSRV_DEVICE_NODE *psDevNode, + PDLLIST_NODE psTmpMMUMappingHead) +{ + PDLLIST_NODE psNode, psNextNode; + + /* Free the current list unconditionally */ + dllist_foreach_node(psTmpMMUMappingHead, + psNode, + psNextNode) + { + MMU_MEMORY_MAPPING *psMapping = IMG_CONTAINER_OF(psNode, + MMU_MEMORY_MAPPING, + sMMUMappingItem); + + psDevNode->sDevMMUPxSetup.pfnDevPxFree(psDevNode, &psMapping->sMemHandle); + dllist_remove_node(psNode); + OSFreeMem(psMapping); + } +} + +/*************************************************************************/ /*! +@Function _CleanupThread_FreeMMUMapping + +@Description Function to be executed by the cleanup thread to free + MMU_MEMORY_MAPPINGs after the MMU cache has been invalidated. + + This function will request a MMU cache invalidate once and + retry to free the MMU_MEMORY_MAPPINGs until the invalidate + has been executed. + + If the memory context that created this cleanup item has been + destroyed in the meantime this function will directly free the + MMU_MEMORY_MAPPINGs without waiting for any MMU cache + invalidation. + +@Input pvData Cleanup data in form of a MMU_CLEANUP_ITEM + +@Return PVRSRV_OK if successful otherwise PVRSRV_ERROR_RETRY + */ +/*****************************************************************************/ +static PVRSRV_ERROR +_CleanupThread_FreeMMUMapping(void* pvData) +{ + PVRSRV_ERROR eError; + MMU_CLEANUP_ITEM *psCleanup = (MMU_CLEANUP_ITEM *)pvData; + MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData = psCleanup->psMMUCtxCleanupData; + PVRSRV_DEVICE_NODE *psDevNode = psCleanup->psDevNode; + IMG_BOOL bFreeNow; + IMG_UINT32 uiSyncCurrent; + IMG_UINT32 uiSyncReq; + + OSLockAcquire(psMMUCtxCleanupData->hCleanupLock); + + /* Don't attempt to free anything when the context has been destroyed. + * Especially don't access any device specific structures any more!*/ + if (!psMMUCtxCleanupData->bMMUContextExists) + { + OSFreeMem(psCleanup); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_OK, e0); + } + + if (psCleanup->psSync == NULL) + { + /* Kick to invalidate the MMU caches and get sync info */ + eError = psDevNode->pfnMMUCacheInvalidateKick(psDevNode, + &psCleanup->uiRequiredSyncVal); + if (eError != PVRSRV_OK) + { + OSLockRelease(psMMUCtxCleanupData->hCleanupLock); + return PVRSRV_ERROR_RETRY; + } + psCleanup->psSync = psDevNode->psMMUCacheSyncPrim; + } + + uiSyncCurrent = OSReadDeviceMem32(psCleanup->psSync->pui32LinAddr); + uiSyncReq = psCleanup->uiRequiredSyncVal; + + /* Has the invalidate executed */ + bFreeNow = (uiSyncCurrent >= uiSyncReq) ? + /* ... with the counter wrapped around ... + * There can't be 3*1024*1024 transactions completed, so consider wrapped */ + (((uiSyncCurrent - uiSyncReq) > 0xF0000000UL)? IMG_FALSE : IMG_TRUE): + /* There can't be 3*1024*1024 transactions pending, so consider wrapped */ + (((uiSyncReq - uiSyncCurrent) > 0xF0000000UL)? IMG_TRUE : IMG_FALSE); + +#if defined(NO_HARDWARE) + /* In NOHW the syncs will never be updated so just free the tables */ + bFreeNow = IMG_TRUE; +#endif + /* If the Invalidate operation is not completed, check if the operation timed out */ + if (!bFreeNow) + { + /* If the time left for the completion of invalidate operation is + * within 500ms of time-out, consider the operation as timed out */ + if ((psCleanup->sCleanupThreadFn.ui32TimeEnd - psCleanup->sCleanupThreadFn.ui32TimeStart - 500) <= + (OSClockms() - psCleanup->sCleanupThreadFn.ui32TimeStart)) + { + /* Consider the operation is timed out */ + bFreeNow = IMG_TRUE; + } + } + + /* Free if the invalidate operation completed or the operation itself timed out */ + if (bFreeNow) + { + _FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead); + + dllist_remove_node(&psCleanup->sMMUCtxCleanupItem); + OSFreeMem(psCleanup); + + eError = PVRSRV_OK; + } + else + { + eError = PVRSRV_ERROR_RETRY; + } + +e0: + + /* If this cleanup task has been successfully executed we can + * decrease the context cleanup data refcount. Successfully + * means here that the MMU_MEMORY_MAPPINGs have been freed by + * either this cleanup task of when the MMU context has been + * destroyed. */ + if (eError == PVRSRV_OK) + { + OSLockRelease(psMMUCtxCleanupData->hCleanupLock); + + if (OSAtomicDecrement(&psMMUCtxCleanupData->iRef) == 0) + { + OSLockDestroy(psMMUCtxCleanupData->hCleanupLock); + OSFreeMem(psMMUCtxCleanupData); + } + } + else + { + OSLockRelease(psMMUCtxCleanupData->hCleanupLock); + } + + + return eError; +} + +/*************************************************************************/ /*! +@Function _SetupCleanup_FreeMMUMapping + +@Description Setup a cleanup item for the cleanup thread that will + kick off a MMU invalidate request and free the associated + MMU_MEMORY_MAPPINGs when the invalidate was successful. + +@Input psPhysMemCtx The current MMU physmem context + */ +/*****************************************************************************/ +static void +_SetupCleanup_FreeMMUMapping(MMU_PHYSMEM_CONTEXT *psPhysMemCtx) +{ + + MMU_CLEANUP_ITEM *psCleanupItem; + MMU_CTX_CLEANUP_DATA *psCleanupData = psPhysMemCtx->psCleanupData; + PVRSRV_DEVICE_NODE *psDevNode = psPhysMemCtx->psDevNode; + + if (dllist_is_empty(&psPhysMemCtx->sTmpMMUMappingHead)) + { + goto e0; + } + +#if defined(PDUMP) + /* Free the page tables immediately in case of pdump, which avoids + * changing script files due to the additional invalidation kick */ + goto e1; +#endif + + /* Don't defer the freeing if we are currently unloading the driver + * or if the sync has been destroyed */ + if (PVRSRVGetPVRSRVData()->bUnload || + psDevNode->psMMUCacheSyncPrim == NULL) + { + goto e1; + } + + /* Allocate a cleanup item */ + psCleanupItem = OSAllocMem(sizeof(*psCleanupItem)); + if (!psCleanupItem) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get memory for deferred page table cleanup. " + "Freeing tables immediately", + __func__)); + goto e1; + } + + /* Set sync to NULL to indicate we did not interact with + * the FW yet. Kicking off an MMU cache invalidate should + * be done in the cleanup thread to not waste time here. */ + psCleanupItem->psSync = NULL; + psCleanupItem->uiRequiredSyncVal = 0; + psCleanupItem->psDevNode = psDevNode; + psCleanupItem->psMMUCtxCleanupData = psCleanupData; + + OSAtomicIncrement(&psCleanupData->iRef); + + /* Move the page tables to free to the cleanup item */ + dllist_replace_head(&psPhysMemCtx->sTmpMMUMappingHead, + &psCleanupItem->sMMUMappingHead); + + /* Add the cleanup item itself to the context list */ + dllist_add_to_tail(&psCleanupData->sMMUCtxCleanupItemsHead, + &psCleanupItem->sMMUCtxCleanupItem); + + /* Setup the cleanup thread data and add the work item */ + psCleanupItem->sCleanupThreadFn.pfnFree = _CleanupThread_FreeMMUMapping; + psCleanupItem->sCleanupThreadFn.pvData = psCleanupItem; + psCleanupItem->sCleanupThreadFn.bDependsOnHW = IMG_TRUE; + CLEANUP_THREAD_SET_RETRY_TIMEOUT(&psCleanupItem->sCleanupThreadFn, + CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT); + + PVRSRVCleanupThreadAddWork(&psCleanupItem->sCleanupThreadFn); + + return; + +e1: + /* Free the page tables now */ + _FreeMMUMapping(psDevNode, &psPhysMemCtx->sTmpMMUMappingHead); +e0: + return; +} + +/*************************************************************************/ /*! +@Function _CalcPCEIdx + +@Description Calculate the page catalogue index + +@Input sDevVAddr Device virtual address + +@Input psDevVAddrConfig Configuration of the virtual address + +@Input bRoundUp Round up the index + +@Return The page catalogue index + */ +/*****************************************************************************/ +static IMG_UINT32 _CalcPCEIdx(IMG_DEV_VIRTADDR sDevVAddr, + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, + IMG_BOOL bRoundUp) +{ + IMG_DEV_VIRTADDR sTmpDevVAddr; + IMG_UINT32 ui32RetVal; + + sTmpDevVAddr = sDevVAddr; + + if (bRoundUp) + { + sTmpDevVAddr.uiAddr--; + } + ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPCIndexMask) + >> psDevVAddrConfig->uiPCIndexShift); + + if (bRoundUp) + { + ui32RetVal++; + } + + return ui32RetVal; +} + + +/*************************************************************************/ /*! +@Function _CalcPDEIdx + +@Description Calculate the page directory index + +@Input sDevVAddr Device virtual address + +@Input psDevVAddrConfig Configuration of the virtual address + +@Input bRoundUp Round up the index + +@Return The page directory index + */ +/*****************************************************************************/ +static IMG_UINT32 _CalcPDEIdx(IMG_DEV_VIRTADDR sDevVAddr, + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, + IMG_BOOL bRoundUp) +{ + IMG_DEV_VIRTADDR sTmpDevVAddr; + IMG_UINT32 ui32RetVal; + + sTmpDevVAddr = sDevVAddr; + + if (bRoundUp) + { + sTmpDevVAddr.uiAddr--; + } + ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPDIndexMask) + >> psDevVAddrConfig->uiPDIndexShift); + + if (bRoundUp) + { + ui32RetVal++; + } + + return ui32RetVal; +} + + +/*************************************************************************/ /*! +@Function _CalcPTEIdx + +@Description Calculate the page entry index + +@Input sDevVAddr Device virtual address + +@Input psDevVAddrConfig Configuration of the virtual address + +@Input bRoundUp Round up the index + +@Return The page entry index + */ +/*****************************************************************************/ +static IMG_UINT32 _CalcPTEIdx(IMG_DEV_VIRTADDR sDevVAddr, + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, + IMG_BOOL bRoundUp) +{ + IMG_DEV_VIRTADDR sTmpDevVAddr; + IMG_UINT32 ui32RetVal; + + sTmpDevVAddr = sDevVAddr; + sTmpDevVAddr.uiAddr -= psDevVAddrConfig->uiOffsetInBytes; + if (bRoundUp) + { + sTmpDevVAddr.uiAddr--; + } + ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPTIndexMask) + >> psDevVAddrConfig->uiPTIndexShift); + + if (bRoundUp) + { + ui32RetVal++; + } + + return ui32RetVal; +} + +#if defined(RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR) +/* + * RGXMapBRN71422TargetPhysicalAddress + * + * Set-up a special MMU tree mapping with a single page that eventually points to + * RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR. + * + * PC entries are 32b, with the last 4 bits being 0 except for the LSB bit that should be 1 (Valid). Addr is 4KB aligned. + * PD entries are 64b, with addr in bits 39:5 and everything else 0 except for LSB bit that is Valid. Addr is byte aligned? + * PT entries are 64b, with phy addr in bits 39:12 and everything else 0 except for LSB bit that is Valid. Addr is 4KB aligned. + * So, we can construct the page tables in a single page like this: + * 0x00 : PCE (PCE index 0) + * 0x04 : 0x0 + * 0x08 : PDEa (PDE index 1) + * 0x0C : PDEb + * 0x10 : PTEa (PTE index 2) + * 0x14 : PTEb + * + * With the PCE and the PDE pointing to this same page. + * The VA address that we are mapping is therefore: + * VA = PCE_idx*PCE_size + PDE_idx*PDE_size + PTE_idx*PTE_size = + * = 0 * 1GB + 1 * 2MB + 2 * 4KB = + * = 0 + 0x20_0000 + 0x2000 = + * = 0x00_0020_2000 + */ +void RGXMapBRN71422TargetPhysicalAddress(MMU_CONTEXT *psMMUContext) +{ + MMU_MEMORY_DESC *psMemDesc = &psMMUContext->sBaseLevelInfo.sMemDesc; + IMG_DEV_PHYADDR sPhysAddrPC = psMemDesc->sDevPAddr; + IMG_UINT32 *pui32Px = psMemDesc->pvCpuVAddr; + IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; + IMG_UINT64 ui64Entry; + + /* PCE points to PC */ + ui64Entry = sPhysAddrPC.uiAddr; + ui64Entry = ui64Entry >> RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT; + ui64Entry = ui64Entry << RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT; + ui64Entry = ui64Entry & ~RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK; + ui64Entry = ui64Entry | RGX_MMUCTRL_PC_DATA_VALID_EN; + pui32Px[0] = (IMG_UINT32) ui64Entry; + + /* PDE points to PC */ + ui64Entry = sPhysAddrPC.uiAddr; + ui64Entry = ui64Entry & ~RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK; + ui64Entry = ui64Entry | RGX_MMUCTRL_PD_DATA_VALID_EN; + pui64Px[1] = ui64Entry; + + /* PTE points to PAddr */ + ui64Entry = RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR; + ui64Entry = ui64Entry & ~RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK; + ui64Entry = ui64Entry | RGX_MMUCTRL_PT_DATA_VALID_EN; + pui64Px[2] = ui64Entry; + + { + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psMMUContext->psPhysMemCtx->psDevNode; + eError = psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psMemDesc->psMapping->sMemHandle, + psMemDesc->uiOffset, + psMemDesc->uiSize); + PVR_LOG_IF_ERROR(eError, "pfnDevPxClean"); + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapping the BRN71422 workaround to target physical address 0x%" IMG_UINT64_FMTSPECx ".", + __func__, RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR)); +} +#endif + +/***************************************************************************** + * MMU memory allocation/management functions (mem desc) * + *****************************************************************************/ + +/*************************************************************************/ /*! +@Function _MMU_PhysMem_RAImportAlloc + +@Description Imports MMU Px memory into the RA. This is where the + actual allocation of physical memory happens. + +@Input hArenaHandle Handle that was passed in during the + creation of the RA + +@Input uiSize Size of the memory to import + +@Input uiFlags Flags that where passed in the allocation. + +@Output puiBase The address of where to insert this import + +@Output puiActualSize The actual size of the import + +@Output phPriv Handle which will be passed back when + this import is freed + +@Return PVRSRV_OK if import alloc was successful + */ +/*****************************************************************************/ +static PVRSRV_ERROR _MMU_PhysMem_RAImportAlloc(RA_PERARENA_HANDLE hArenaHandle, + RA_LENGTH_T uiSize, + RA_FLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + RA_BASE_T *puiBase, + RA_LENGTH_T *puiActualSize, + RA_PERISPAN_HANDLE *phPriv) +{ + MMU_PHYSMEM_CONTEXT *psPhysMemCtx = (MMU_PHYSMEM_CONTEXT *)hArenaHandle; + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psPhysMemCtx->psDevNode; + MMU_MEMORY_MAPPING *psMapping; + PVRSRV_ERROR eError; + IMG_UINT32 uiPid = 0; + + PVR_UNREFERENCED_PARAMETER(pszAnnotation); + PVR_UNREFERENCED_PARAMETER(uiFlags); + + PVR_ASSERT(psDevNode != NULL); + PVR_GOTO_IF_INVALID_PARAM(psDevNode, eError, e0); + + psMapping = OSAllocMem(sizeof(MMU_MEMORY_MAPPING)); + PVR_GOTO_IF_NOMEM(psMapping, eError, e0); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + uiPid = psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT ? + PVR_SYS_ALLOC_PID : OSGetCurrentClientProcessIDKM(); +#endif + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /* + * Store the OSid in the PG_HANDLE.uiOSid field for use by the + * pfnDevPxFree() routine. + */ + psMapping->sMemHandle.uiOSid = psPhysMemCtx->ui32OSid; + eError = psDevNode->sDevMMUPxSetup.pfnDevPxAllocGPV(psDevNode, + TRUNCATE_64BITS_TO_SIZE_T(uiSize), + &psMapping->sMemHandle, + &psMapping->sDevPAddr, + psPhysMemCtx->ui32OSid, + uiPid); +#else + eError = psDevNode->sDevMMUPxSetup.pfnDevPxAlloc(psDevNode, + TRUNCATE_64BITS_TO_SIZE_T(uiSize), + &psMapping->sMemHandle, + &psMapping->sDevPAddr, + uiPid); +#endif + if (eError != PVRSRV_OK) + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRVStatsUpdateOOMStats(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT, + OSGetCurrentClientProcessIDKM()); +#endif + goto e1; + } + + psMapping->psContext = psPhysMemCtx; + psMapping->uiSize = TRUNCATE_64BITS_TO_SIZE_T(uiSize); + + psMapping->uiCpuVAddrRefCount = 0; + + *phPriv = (RA_PERISPAN_HANDLE) psMapping; + + /* Note: This assumes this memory never gets paged out */ + *puiBase = (RA_BASE_T)psMapping->sDevPAddr.uiAddr; + *puiActualSize = uiSize; + + return PVRSRV_OK; + +e1: + OSFreeMem(psMapping); +e0: + return eError; +} + +/*************************************************************************/ /*! +@Function _MMU_PhysMem_RAImportFree + +@Description Imports MMU Px memory into the RA. This is where the + actual free of physical memory happens. + +@Input hArenaHandle Handle that was passed in during the + creation of the RA + +@Input puiBase The address of where to insert this import + +@Output phPriv Private data that the import alloc provided + +@Return None + */ +/*****************************************************************************/ +static void _MMU_PhysMem_RAImportFree(RA_PERARENA_HANDLE hArenaHandle, + RA_BASE_T uiBase, + RA_PERISPAN_HANDLE hPriv) +{ + MMU_MEMORY_MAPPING *psMapping = (MMU_MEMORY_MAPPING *)hPriv; + MMU_PHYSMEM_CONTEXT *psPhysMemCtx = (MMU_PHYSMEM_CONTEXT *)hArenaHandle; + + PVR_UNREFERENCED_PARAMETER(uiBase); + + /* Check we have dropped all CPU mappings */ + PVR_ASSERT(psMapping->uiCpuVAddrRefCount == 0); + + /* Add mapping to defer free list */ + psMapping->psContext = NULL; + dllist_add_to_tail(&psPhysMemCtx->sTmpMMUMappingHead, &psMapping->sMMUMappingItem); +} + +/*************************************************************************/ /*! +@Function _MMU_PhysMemAlloc + +@Description Allocates physical memory for MMU objects + +@Input psPhysMemCtx Physmem context to do the allocation from + +@Output psMemDesc Allocation description + +@Input uiBytes Size of the allocation in bytes + +@Input uiAlignment Alignment requirement of this allocation + +@Return PVRSRV_OK if allocation was successful + */ +/*****************************************************************************/ + +static PVRSRV_ERROR _MMU_PhysMemAlloc(MMU_PHYSMEM_CONTEXT *psPhysMemCtx, + MMU_MEMORY_DESC *psMemDesc, + size_t uiBytes, + size_t uiAlignment) +{ + PVRSRV_ERROR eError; + RA_BASE_T uiPhysAddr; + + PVR_RETURN_IF_INVALID_PARAM(psMemDesc); + PVR_RETURN_IF_INVALID_PARAM(!psMemDesc->bValid); + + eError = RA_Alloc(psPhysMemCtx->psPhysMemRA, + uiBytes, + RA_NO_IMPORT_MULTIPLIER, + 0, /* flags */ + uiAlignment, + "", + &uiPhysAddr, + NULL, + (RA_PERISPAN_HANDLE *)&psMemDesc->psMapping); + + PVR_LOG_RETURN_IF_ERROR(eError, "RA_Alloc"); + + psMemDesc->bValid = IMG_TRUE; + psMemDesc->pvCpuVAddr = NULL; + psMemDesc->sDevPAddr.uiAddr = (IMG_UINT64) uiPhysAddr; + + if (psMemDesc->psMapping->uiCpuVAddrRefCount == 0) + { + eError = psPhysMemCtx->psDevNode->sDevMMUPxSetup.pfnDevPxMap(psPhysMemCtx->psDevNode, + &psMemDesc->psMapping->sMemHandle, + psMemDesc->psMapping->uiSize, + &psMemDesc->psMapping->sDevPAddr, + &psMemDesc->psMapping->pvCpuVAddr); + if (eError != PVRSRV_OK) + { + RA_Free(psPhysMemCtx->psPhysMemRA, psMemDesc->sDevPAddr.uiAddr); + return eError; + } + } + + psMemDesc->psMapping->uiCpuVAddrRefCount++; + psMemDesc->uiOffset = (psMemDesc->sDevPAddr.uiAddr - psMemDesc->psMapping->sDevPAddr.uiAddr); + psMemDesc->pvCpuVAddr = (IMG_UINT8 *)psMemDesc->psMapping->pvCpuVAddr + psMemDesc->uiOffset; + psMemDesc->uiSize = uiBytes; + PVR_ASSERT(psMemDesc->pvCpuVAddr != NULL); + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function _MMU_PhysMemFree + +@Description Allocates physical memory for MMU objects + +@Input psPhysMemCtx Physmem context to do the free on + +@Input psMemDesc Allocation description + +@Return None + */ +/*****************************************************************************/ +static void _MMU_PhysMemFree(MMU_PHYSMEM_CONTEXT *psPhysMemCtx, + MMU_MEMORY_DESC *psMemDesc) +{ + RA_BASE_T uiPhysAddr; + + PVR_ASSERT(psMemDesc->bValid); + + if (--psMemDesc->psMapping->uiCpuVAddrRefCount == 0) + { + psPhysMemCtx->psDevNode->sDevMMUPxSetup.pfnDevPxUnMap(psPhysMemCtx->psDevNode, + &psMemDesc->psMapping->sMemHandle, + psMemDesc->psMapping->pvCpuVAddr); + } + + psMemDesc->pvCpuVAddr = NULL; + + uiPhysAddr = psMemDesc->sDevPAddr.uiAddr; + RA_Free(psPhysMemCtx->psPhysMemRA, uiPhysAddr); + + psMemDesc->bValid = IMG_FALSE; +} + + +/***************************************************************************** + * MMU object allocation/management functions * + *****************************************************************************/ + +static INLINE PVRSRV_ERROR _MMU_ConvertDevMemFlags(IMG_BOOL bInvalidate, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + MMU_PROTFLAGS_T *uiMMUProtFlags, + MMU_CONTEXT *psMMUContext) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 uiGPUCacheMode; + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + + /* Do flag conversion between devmem flags and MMU generic flags */ + if (bInvalidate == IMG_FALSE) + { + *uiMMUProtFlags |= ((uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK) + >> PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) + << MMU_PROTFLAGS_DEVICE_OFFSET; + + if (PVRSRV_CHECK_GPU_READABLE(uiMappingFlags)) + { + *uiMMUProtFlags |= MMU_PROTFLAGS_READABLE; + } + if (PVRSRV_CHECK_GPU_WRITEABLE(uiMappingFlags)) + { + *uiMMUProtFlags |= MMU_PROTFLAGS_WRITEABLE; + } + + eError = DevmemDeviceCacheMode(psDevNode, uiMappingFlags, &uiGPUCacheMode); + PVR_RETURN_IF_ERROR(eError); + + switch (uiGPUCacheMode) + { + case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED: + case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC: + break; + case PVRSRV_MEMALLOCFLAG_GPU_CACHED: + *uiMMUProtFlags |= MMU_PROTFLAGS_CACHED; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Wrong parameters", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (DevmemDeviceCacheCoherency(psDevNode, uiMappingFlags)) + { + *uiMMUProtFlags |= MMU_PROTFLAGS_CACHE_COHERENT; + } + /* Only compile if RGX_FEATURE_MIPS_BIT_MASK is defined to avoid compilation + * errors on octopus cores. + */ + #if defined(SUPPORT_RGX) && defined(RGX_FEATURE_MIPS_BIT_MASK) + if ((psDevNode->pfnCheckDeviceFeature) && + PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, MIPS)) + { + /* If we are allocating on the MMU of the firmware processor, the + * cached/uncached attributes must depend on the FIRMWARE_CACHED + * allocation flag. + */ + if (psMMUContext->psDevAttrs == psDevNode->psFirmwareMMUDevAttrs) + { + if (uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) + { + *uiMMUProtFlags |= MMU_PROTFLAGS_CACHED; + } + else + { + *uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHED; + + } + *uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHE_COHERENT; + } + } +#endif + } + else + { + *uiMMUProtFlags |= MMU_PROTFLAGS_INVALID; + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function _PxMemAlloc + +@Description Allocates physical memory for MMU objects, initialises + and PDumps it. + +@Input psMMUContext MMU context + +@Input uiNumEntries Number of entries to allocate + +@Input psConfig MMU Px config + +@Input eMMULevel MMU level that that allocation is for + +@Output psMemDesc Description of allocation + +@Return PVRSRV_OK if allocation was successful + */ +/*****************************************************************************/ +static PVRSRV_ERROR _PxMemAlloc(MMU_CONTEXT *psMMUContext, + IMG_UINT32 uiNumEntries, + const MMU_PxE_CONFIG *psConfig, + MMU_LEVEL eMMULevel, + MMU_MEMORY_DESC *psMemDesc, + IMG_UINT32 uiLog2Align) +{ + PVRSRV_ERROR eError; + size_t uiBytes; + size_t uiAlign; + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + + PVR_ASSERT(psConfig->uiBytesPerEntry != 0); + + uiBytes = uiNumEntries * psConfig->uiBytesPerEntry; + /* We need here the alignment of the previous level because that is the entry for we generate here */ + uiAlign = 1 << uiLog2Align; + + /* + * If the hardware specifies an alignment requirement for a page table then + * it also requires that all memory up to the next aligned address is + * zeroed. + * + * Failing to do this can result in uninitialised data outside of the actual + * page table range being read by the MMU and treated as valid, e.g. the + * pending flag. + * + * Typically this will affect 1MiB, 2MiB PT pages which have a size of 16 + * and 8 bytes respectively but an alignment requirement of 64 bytes each. + */ + uiBytes = PVR_ALIGN(uiBytes, uiAlign); + + /* allocate the object */ + eError = _MMU_PhysMemAlloc(psMMUContext->psPhysMemCtx, + psMemDesc, uiBytes, uiAlign); + if (eError != PVRSRV_OK) + { + PVR_LOG_GOTO_WITH_ERROR("_MMU_PhysMemAlloc", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0); + } + + /* + Clear the object + Note: if any MMUs are cleared with non-zero values then will need a + custom clear function + Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is + unlikely + */ + OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, uiBytes); + + eError = psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psMemDesc->psMapping->sMemHandle, + psMemDesc->uiOffset, + psMemDesc->uiSize); + PVR_GOTO_IF_ERROR(eError, e1); + +#if defined(PDUMP) + PDUMPCOMMENT("Alloc MMU object"); + + PDumpMMUMalloc(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, + eMMULevel, + &psMemDesc->sDevPAddr, + uiBytes, + uiAlign, + psMMUContext->psDevAttrs->eMMUType); + + PDumpMMUDumpPxEntries(eMMULevel, + psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, + psMemDesc->pvCpuVAddr, + psMemDesc->sDevPAddr, + 0, + uiNumEntries, + NULL, NULL, 0, /* pdump symbolic info is irrelevant here */ + psConfig->uiBytesPerEntry, + uiLog2Align, + psConfig->uiAddrShift, + psConfig->uiAddrMask, + psConfig->uiProtMask, + psConfig->uiValidEnMask, + 0, + psMMUContext->psDevAttrs->eMMUType); +#endif + + return PVRSRV_OK; +e1: + _MMU_PhysMemFree(psMMUContext->psPhysMemCtx, + psMemDesc); +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/*************************************************************************/ /*! +@Function _PxMemFree + +@Description Frees physical memory for MMU objects, de-initialises + and PDumps it. + +@Input psMemDesc Description of allocation + +@Return PVRSRV_OK if allocation was successful + */ +/*****************************************************************************/ + +static void _PxMemFree(MMU_CONTEXT *psMMUContext, + MMU_MEMORY_DESC *psMemDesc, MMU_LEVEL eMMULevel) +{ +#if defined(MMU_CLEARMEM_ON_FREE) + /* + Clear the MMU object + Note: if any MMUs are cleared with non-zero values then will need a + custom clear function + Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is + unlikely + */ + OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, psMemDesc->ui32Bytes); + +#if defined(PDUMP) + PDUMPCOMMENT("Clear MMU object before freeing it"); +#endif +#endif/* MMU_CLEARMEM_ON_FREE */ + +#if defined(PDUMP) + PDUMPCOMMENT("Free MMU object"); + PDumpMMUFree(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, + eMMULevel, + &psMemDesc->sDevPAddr, + psMMUContext->psDevAttrs->eMMUType); +#else + PVR_UNREFERENCED_PARAMETER(eMMULevel); +#endif + /* free the PC */ + _MMU_PhysMemFree(psMMUContext->psPhysMemCtx, psMemDesc); +} + +static INLINE PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext, + MMU_Levelx_INFO *psLevel, + IMG_UINT32 uiIndex, + const MMU_PxE_CONFIG *psConfig, + const IMG_DEV_PHYADDR *psDevPAddr, + IMG_BOOL bUnmap, +#if defined(PDUMP) + const IMG_CHAR *pszMemspaceName, + const IMG_CHAR *pszSymbolicAddr, + IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset, +#endif + IMG_UINT64 uiProtFlags) +{ + MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc; + IMG_UINT64 ui64PxE64; + IMG_UINT64 uiAddr = psDevPAddr->uiAddr; + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + + if (psDevNode->pfnValidateOrTweakPhysAddrs) + { + PVRSRV_ERROR eErr = psDevNode->pfnValidateOrTweakPhysAddrs(psDevNode, + psMMUContext->psDevAttrs, + &uiAddr); + /* return if error */ + PVR_LOG_RETURN_IF_ERROR(eErr, "_SetupPTE"); + } + + /* Calculate Entry */ + ui64PxE64 = uiAddr /* Calculate the offset to that base */ + >> psConfig->uiAddrLog2Align /* Shift away the useless bits, because the alignment is very coarse and we address by alignment */ + << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */ + & psConfig->uiAddrMask; /* Delete unused bits */ + ui64PxE64 |= uiProtFlags; + + /* Set the entry */ + if (psConfig->uiBytesPerEntry == 8) + { + IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */ + + pui64Px[uiIndex] = ui64PxE64; + } + else if (psConfig->uiBytesPerEntry == 4) + { + IMG_UINT32 *pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */ + + /* assert that the result fits into 32 bits before writing + it into the 32-bit array with a cast */ + PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU)); + + pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64; + } + else + { + return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG; + } + + + /* Log modification */ + HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, + HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), + uiIndex, MMU_LEVEL_1, + HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64), + !bUnmap); + +#if defined(PDUMP) + PDumpMMUDumpPxEntries(MMU_LEVEL_1, + psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, + psMemDesc->pvCpuVAddr, + psMemDesc->sDevPAddr, + uiIndex, + 1, + pszMemspaceName, + pszSymbolicAddr, + uiSymbolicAddrOffset, + psConfig->uiBytesPerEntry, + psConfig->uiAddrLog2Align, + psConfig->uiAddrShift, + psConfig->uiAddrMask, + psConfig->uiProtMask, + psConfig->uiValidEnMask, + 0, + psMMUContext->psDevAttrs->eMMUType); +#endif /*PDUMP*/ + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function _SetupPxE + +@Description Sets up an entry of an MMU object to point to the + provided address + +@Input psMMUContext MMU context to operate on + +@Input psLevel Level info for MMU object + +@Input uiIndex Index into the MMU object to setup + +@Input psConfig MMU Px config + +@Input eMMULevel Level of MMU object + +@Input psDevPAddr Address to setup the MMU object to point to + +@Input pszMemspaceName Name of the PDump memory space that the entry + will point to + +@Input pszSymbolicAddr PDump symbolic address that the entry will + point to + +@Input uiProtFlags MMU protection flags + +@Return PVRSRV_OK if the setup was successful + */ +/*****************************************************************************/ +static PVRSRV_ERROR _SetupPxE(MMU_CONTEXT *psMMUContext, + MMU_Levelx_INFO *psLevel, + IMG_UINT32 uiIndex, + const MMU_PxE_CONFIG *psConfig, + MMU_LEVEL eMMULevel, + const IMG_DEV_PHYADDR *psDevPAddr, +#if defined(PDUMP) + const IMG_CHAR *pszMemspaceName, + const IMG_CHAR *pszSymbolicAddr, + IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset, +#endif + MMU_PROTFLAGS_T uiProtFlags, + IMG_UINT32 uiLog2DataPageSize) +{ + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc; + + IMG_UINT32 (*pfnDerivePxEProt4)(IMG_UINT32); + IMG_UINT64 (*pfnDerivePxEProt8)(IMG_UINT32, IMG_UINT32); + + if (!psDevPAddr) + { + /* Invalidate entry */ + if (~uiProtFlags & MMU_PROTFLAGS_INVALID) + { + PVR_DPF((PVR_DBG_ERROR, "Error, no physical address specified, but not invalidating entry")); + uiProtFlags |= MMU_PROTFLAGS_INVALID; + } + psDevPAddr = &gsBadDevPhyAddr; + } + else + { + if (uiProtFlags & MMU_PROTFLAGS_INVALID) + { + PVR_DPF((PVR_DBG_ERROR, "A physical address was specified when requesting invalidation of entry")); + uiProtFlags |= MMU_PROTFLAGS_INVALID; + } + } + + switch (eMMULevel) + { + case MMU_LEVEL_3: + pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePCEProt4; + pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePCEProt8; + break; + + case MMU_LEVEL_2: + pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePDEProt4; + pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePDEProt8; + break; + + case MMU_LEVEL_1: + pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePTEProt4; + pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePTEProt8; + break; + + default: + PVR_DPF((PVR_DBG_ERROR, "%s: invalid MMU level", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* How big is a PxE in bytes? */ + /* Filling the actual Px entry with an address */ + switch (psConfig->uiBytesPerEntry) + { + case 4: + { + IMG_UINT32 *pui32Px; + IMG_UINT64 ui64PxE64; + + pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */ + + ui64PxE64 = psDevPAddr->uiAddr /* Calculate the offset to that base */ + >> psConfig->uiAddrLog2Align /* Shift away the unnecessary bits of the address */ + << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */ + & psConfig->uiAddrMask; /* Delete unused higher bits */ + + ui64PxE64 |= (IMG_UINT64)pfnDerivePxEProt4(uiProtFlags); + /* assert that the result fits into 32 bits before writing + it into the 32-bit array with a cast */ + PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU)); + + /* We should never invalidate an invalid page */ + if (uiProtFlags & MMU_PROTFLAGS_INVALID) + { + PVR_ASSERT(pui32Px[uiIndex] != ui64PxE64); + } + pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64; + HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, + HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), + uiIndex, eMMULevel, + HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64), + (uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1); + break; + } + case 8: + { + IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */ + + pui64Px[uiIndex] = psDevPAddr->uiAddr /* Calculate the offset to that base */ + >> psConfig->uiAddrLog2Align /* Shift away the unnecessary bits of the address */ + << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */ + & psConfig->uiAddrMask; /* Delete unused higher bits */ + pui64Px[uiIndex] |= pfnDerivePxEProt8(uiProtFlags, uiLog2DataPageSize); + + HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, + HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), + uiIndex, eMMULevel, + HTBLOG_U64_BITS_HIGH(pui64Px[uiIndex]), HTBLOG_U64_BITS_LOW(pui64Px[uiIndex]), + (uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1); + break; + } + default: + PVR_DPF((PVR_DBG_ERROR, "%s: PxE size not supported (%d) for level %d", + __func__, psConfig->uiBytesPerEntry, eMMULevel)); + + return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG; + } + +#if defined(PDUMP) + PDumpMMUDumpPxEntries(eMMULevel, + psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, + psMemDesc->pvCpuVAddr, + psMemDesc->sDevPAddr, + uiIndex, + 1, + pszMemspaceName, + pszSymbolicAddr, + uiSymbolicAddrOffset, + psConfig->uiBytesPerEntry, + psConfig->uiAddrLog2Align, + psConfig->uiAddrShift, + psConfig->uiAddrMask, + psConfig->uiProtMask, + psConfig->uiValidEnMask, + 0, + psMMUContext->psDevAttrs->eMMUType); +#endif + + psDevNode->pfnMMUCacheInvalidate(psDevNode, psMMUContext, + eMMULevel, + uiProtFlags & MMU_PROTFLAGS_INVALID); + + return PVRSRV_OK; +} + +/***************************************************************************** + * MMU host control functions (Level Info) * + *****************************************************************************/ + + +/*************************************************************************/ /*! +@Function _MMU_FreeLevel + +@Description Recursively frees the specified range of Px entries. If any + level has its last reference dropped then the MMU object + memory and the MMU_Levelx_Info will be freed. + + At each level we might be crossing a boundary from one Px to + another. The values for auiStartArray should be by used for + the first call into each level and the values in auiEndArray + should only be used in the last call for each level. + In order to determine if this is the first/last call we pass + in bFirst and bLast. + When one level calls down to the next only if bFirst/bLast is set + and it's the first/last iteration of the loop at its level will + bFirst/bLast set for the next recursion. + This means that each iteration has the knowledge of the previous + level which is required. + +@Input psMMUContext MMU context to operate on + +@Input psLevel Level info on which to free the + specified range + +@Input auiStartArray Array of start indexes (one for each level) + +@Input auiEndArray Array of end indexes (one for each level) + +@Input auiEntriesPerPxArray Array of number of entries for the Px + (one for each level) + +@Input apsConfig Array of PxE configs (one for each level) + +@Input aeMMULevel Array of MMU levels (one for each level) + +@Input pui32CurrentLevel Pointer to a variable which is set to our + current level + +@Input uiStartIndex Start index of the range to free + +@Input uiEndIndex End index of the range to free + +@Input bFirst This is the first call for this level + +@Input bLast This is the last call for this level + +@Return IMG_TRUE if the last reference to psLevel was dropped + */ +/*****************************************************************************/ +static IMG_BOOL _MMU_FreeLevel(MMU_CONTEXT *psMMUContext, + MMU_Levelx_INFO *psLevel, + IMG_UINT32 auiStartArray[], + IMG_UINT32 auiEndArray[], + IMG_UINT32 auiEntriesPerPxArray[], + const MMU_PxE_CONFIG *apsConfig[], + MMU_LEVEL aeMMULevel[], + IMG_UINT32 *pui32CurrentLevel, + IMG_UINT32 uiStartIndex, + IMG_UINT32 uiEndIndex, + IMG_BOOL bFirst, + IMG_BOOL bLast, + IMG_UINT32 uiLog2DataPageSize) +{ + IMG_UINT32 uiThisLevel = *pui32CurrentLevel; + const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel]; + IMG_UINT32 i; + IMG_BOOL bFreed = IMG_FALSE; + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + + /* Parameter checks */ + PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL); + PVR_ASSERT(psLevel != NULL); + + MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel: level = %d, range %d - %d, refcount = %d", + aeMMULevel[uiThisLevel], uiStartIndex, + uiEndIndex, psLevel->ui32RefCount)); + + for (i = uiStartIndex;(i < uiEndIndex) && (psLevel != NULL);i++) + { + if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) + { + MMU_Levelx_INFO *psNextLevel = psLevel->apsNextLevel[i]; + IMG_UINT32 uiNextStartIndex; + IMG_UINT32 uiNextEndIndex; + IMG_BOOL bNextFirst; + IMG_BOOL bNextLast; + + /* If we're crossing a Px then the start index changes */ + if (bFirst && (i == uiStartIndex)) + { + uiNextStartIndex = auiStartArray[uiThisLevel + 1]; + bNextFirst = IMG_TRUE; + } + else + { + uiNextStartIndex = 0; + bNextFirst = IMG_FALSE; + } + + /* If we're crossing a Px then the end index changes */ + if (bLast && (i == (uiEndIndex - 1))) + { + uiNextEndIndex = auiEndArray[uiThisLevel + 1]; + bNextLast = IMG_TRUE; + } + else + { + uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1]; + bNextLast = IMG_FALSE; + } + + /* Recurse into the next level */ + (*pui32CurrentLevel)++; + if (_MMU_FreeLevel(psMMUContext, psNextLevel, auiStartArray, + auiEndArray, auiEntriesPerPxArray, + apsConfig, aeMMULevel, pui32CurrentLevel, + uiNextStartIndex, uiNextEndIndex, + bNextFirst, bNextLast, uiLog2DataPageSize)) + { + PVRSRV_ERROR eError; + + /* Un-wire the entry */ + eError = _SetupPxE(psMMUContext, + psLevel, + i, + psConfig, + aeMMULevel[uiThisLevel], + NULL, +#if defined(PDUMP) + NULL, /* Only required for data page */ + NULL, /* Only required for data page */ + 0, /* Only required for data page */ +#endif + MMU_PROTFLAGS_INVALID, + uiLog2DataPageSize); + + PVR_ASSERT(eError == PVRSRV_OK); + + /* Free table of the level below, pointed to by this table entry. + * We don't destroy the table inside the above _MMU_FreeLevel call because we + * first have to set the table entry of the level above to invalid. */ + _PxMemFree(psMMUContext, &psNextLevel->sMemDesc, aeMMULevel[*pui32CurrentLevel]); + OSFreeMem(psNextLevel); + + /* The level below us is empty, drop the refcount and clear the pointer */ + psLevel->ui32RefCount--; + psLevel->apsNextLevel[i] = NULL; + + /* Check we haven't wrapped around */ + PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); + } + (*pui32CurrentLevel)--; + } + else + { + psLevel->ui32RefCount--; + } + + /* + Free this level if it is no longer referenced, unless it's the base + level in which case it's part of the MMU context and should be freed + when the MMU context is freed + */ + if ((psLevel->ui32RefCount == 0) && (psLevel != &psMMUContext->sBaseLevelInfo)) + { + bFreed = IMG_TRUE; + } + } + + /* Level one flushing is done when we actually write the table entries */ + if ((aeMMULevel[uiThisLevel] != MMU_LEVEL_1) && (psLevel != NULL)) + { + psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry); + } + + MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel end: level = %d, refcount = %d", + aeMMULevel[uiThisLevel], bFreed?0: (psLevel)?psLevel->ui32RefCount:-1)); + + return bFreed; +} + +/*************************************************************************/ /*! +@Function _MMU_AllocLevel + +@Description Recursively allocates the specified range of Px entries. If any + level has its last reference dropped then the MMU object + memory and the MMU_Levelx_Info will be freed. + + At each level we might be crossing a boundary from one Px to + another. The values for auiStartArray should be by used for + the first call into each level and the values in auiEndArray + should only be used in the last call for each level. + In order to determine if this is the first/last call we pass + in bFirst and bLast. + When one level calls down to the next only if bFirst/bLast is set + and it's the first/last iteration of the loop at its level will + bFirst/bLast set for the next recursion. + This means that each iteration has the knowledge of the previous + level which is required. + +@Input psMMUContext MMU context to operate on + +@Input psLevel Level info on which to free the + specified range + +@Input auiStartArray Array of start indexes (one for each level) + +@Input auiEndArray Array of end indexes (one for each level) + +@Input auiEntriesPerPxArray Array of number of entries for the Px + (one for each level) + +@Input apsConfig Array of PxE configs (one for each level) + +@Input aeMMULevel Array of MMU levels (one for each level) + +@Input pui32CurrentLevel Pointer to a variable which is set to our + current level + +@Input uiStartIndex Start index of the range to free + +@Input uiEndIndex End index of the range to free + +@Input bFirst This is the first call for this level + +@Input bLast This is the last call for this level + +@Return IMG_TRUE if the last reference to psLevel was dropped + */ +/*****************************************************************************/ +static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext, + MMU_Levelx_INFO *psLevel, + IMG_UINT32 auiStartArray[], + IMG_UINT32 auiEndArray[], + IMG_UINT32 auiEntriesPerPxArray[], + const MMU_PxE_CONFIG *apsConfig[], + MMU_LEVEL aeMMULevel[], + IMG_UINT32 *pui32CurrentLevel, + IMG_UINT32 uiStartIndex, + IMG_UINT32 uiEndIndex, + IMG_BOOL bFirst, + IMG_BOOL bLast, + IMG_UINT32 uiLog2DataPageSize) +{ + IMG_UINT32 uiThisLevel = *pui32CurrentLevel; /* Starting with 0 */ + const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel]; /* The table config for the current level */ + PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY; + IMG_UINT32 uiAllocState = 99; /* Debug info to check what progress was made in the function. Updated during this function. */ + IMG_UINT32 i; + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + + /* Parameter check */ + PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL); + + MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel: level = %d, range %d - %d, refcount = %d", + aeMMULevel[uiThisLevel], uiStartIndex, + uiEndIndex, psLevel->ui32RefCount)); + + /* Go from uiStartIndex to uiEndIndex through the Px */ + for (i = uiStartIndex;i < uiEndIndex;i++) + { + /* Only try an allocation if this is not the last level */ + /*Because a PT allocation is already done while setting the entry in PD */ + if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) + { + IMG_UINT32 uiNextStartIndex; + IMG_UINT32 uiNextEndIndex; + IMG_BOOL bNextFirst; + IMG_BOOL bNextLast; + + /* If there is already a next Px level existing, do not allocate it */ + if (!psLevel->apsNextLevel[i]) + { + MMU_Levelx_INFO *psNextLevel; + IMG_UINT32 ui32AllocSize; + IMG_UINT32 uiNextEntries; + + /* Allocate and setup the next level */ + uiNextEntries = auiEntriesPerPxArray[uiThisLevel + 1]; + ui32AllocSize = sizeof(MMU_Levelx_INFO); + if (aeMMULevel[uiThisLevel + 1] != MMU_LEVEL_1) + { + ui32AllocSize += sizeof(MMU_Levelx_INFO *) * (uiNextEntries - 1); + } + psNextLevel = OSAllocZMem(ui32AllocSize); + if (psNextLevel == NULL) + { + uiAllocState = 0; + goto e0; + } + + /* Hook in this level for next time */ + psLevel->apsNextLevel[i] = psNextLevel; + + psNextLevel->ui32NumOfEntries = uiNextEntries; + psNextLevel->ui32RefCount = 0; + /* Allocate Px memory for a sub level*/ + eError = _PxMemAlloc(psMMUContext, uiNextEntries, apsConfig[uiThisLevel + 1], + aeMMULevel[uiThisLevel + 1], + &psNextLevel->sMemDesc, + psConfig->uiAddrLog2Align); + if (eError != PVRSRV_OK) + { + uiAllocState = 1; + goto e0; + } + + /* Wire up the entry */ + eError = _SetupPxE(psMMUContext, + psLevel, + i, + psConfig, + aeMMULevel[uiThisLevel], + &psNextLevel->sMemDesc.sDevPAddr, +#if defined(PDUMP) + NULL, /* Only required for data page */ + NULL, /* Only required for data page */ + 0, /* Only required for data page */ +#endif + 0, + uiLog2DataPageSize); + + if (eError != PVRSRV_OK) + { + uiAllocState = 2; + goto e0; + } + + psLevel->ui32RefCount++; + } + + /* If we're crossing a Px then the start index changes */ + if (bFirst && (i == uiStartIndex)) + { + uiNextStartIndex = auiStartArray[uiThisLevel + 1]; + bNextFirst = IMG_TRUE; + } + else + { + uiNextStartIndex = 0; + bNextFirst = IMG_FALSE; + } + + /* If we're crossing a Px then the end index changes */ + if (bLast && (i == (uiEndIndex - 1))) + { + uiNextEndIndex = auiEndArray[uiThisLevel + 1]; + bNextLast = IMG_TRUE; + } + else + { + uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1]; + bNextLast = IMG_FALSE; + } + + /* Recurse into the next level */ + (*pui32CurrentLevel)++; + eError = _MMU_AllocLevel(psMMUContext, psLevel->apsNextLevel[i], + auiStartArray, + auiEndArray, + auiEntriesPerPxArray, + apsConfig, + aeMMULevel, + pui32CurrentLevel, + uiNextStartIndex, + uiNextEndIndex, + bNextFirst, + bNextLast, + uiLog2DataPageSize); + (*pui32CurrentLevel)--; + if (eError != PVRSRV_OK) + { + uiAllocState = 2; + goto e0; + } + } + else + { + /* All we need to do for level 1 is bump the refcount */ + psLevel->ui32RefCount++; + } + PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); + } + + /* Level one flushing is done when we actually write the table entries */ + if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) + { + eError = psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry); + PVR_GOTO_IF_ERROR(eError, e0); + } + + MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel end: level = %d, refcount = %d", + aeMMULevel[uiThisLevel], psLevel->ui32RefCount)); + return PVRSRV_OK; + +e0: + /* Confirm that we've not come down this route unexpectedly */ + PVR_ASSERT(uiAllocState!=99); + PVR_DPF((PVR_DBG_ERROR, "_MMU_AllocLevel: Error %d allocating Px for level %d in stage %d" + ,eError, aeMMULevel[uiThisLevel], uiAllocState)); + + /* The start value of index variable i is not initialised on purpose. + * This clean-up loop deinitialises what was already initialised in + * reverse order, so the i index already has the correct value. + */ + for (/* i already set */; i>= uiStartIndex && i< uiEndIndex; i--) + { + switch (uiAllocState) + { + IMG_UINT32 uiNextStartIndex; + IMG_UINT32 uiNextEndIndex; + IMG_BOOL bNextFirst; + IMG_BOOL bNextLast; + + case 3: + /* If we're crossing a Px then the start index changes */ + if (bFirst && (i == uiStartIndex)) + { + uiNextStartIndex = auiStartArray[uiThisLevel + 1]; + bNextFirst = IMG_TRUE; + } + else + { + uiNextStartIndex = 0; + bNextFirst = IMG_FALSE; + } + + /* If we're crossing a Px then the end index changes */ + if (bLast && (i == (uiEndIndex - 1))) + { + uiNextEndIndex = auiEndArray[uiThisLevel + 1]; + bNextLast = IMG_TRUE; + } + else + { + uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1]; + bNextLast = IMG_FALSE; + } + + if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) + { + (*pui32CurrentLevel)++; + if (_MMU_FreeLevel(psMMUContext, psLevel->apsNextLevel[i], + auiStartArray, auiEndArray, + auiEntriesPerPxArray, apsConfig, + aeMMULevel, pui32CurrentLevel, + uiNextStartIndex, uiNextEndIndex, + bNextFirst, bNextLast, uiLog2DataPageSize)) + { + psLevel->ui32RefCount--; + psLevel->apsNextLevel[i] = NULL; + + /* Check we haven't wrapped around */ + PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); + } + (*pui32CurrentLevel)--; + } + else + { + /* We should never come down this path, but it's here + for completeness */ + psLevel->ui32RefCount--; + + /* Check we haven't wrapped around */ + PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); + } + + __fallthrough; + case 2: + if (psLevel->apsNextLevel[i] != NULL && + psLevel->apsNextLevel[i]->ui32RefCount == 0) + { + _PxMemFree(psMMUContext, &psLevel->sMemDesc, + aeMMULevel[uiThisLevel]); + } + + __fallthrough; + case 1: + if (psLevel->apsNextLevel[i] != NULL && + psLevel->apsNextLevel[i]->ui32RefCount == 0) + { + OSFreeMem(psLevel->apsNextLevel[i]); + psLevel->apsNextLevel[i] = NULL; + } + + __fallthrough; + case 0: + uiAllocState = 3; + break; + } + } + return eError; +} + +/***************************************************************************** + * MMU page table functions * + *****************************************************************************/ + +/*************************************************************************/ /*! +@Function _MMU_GetLevelData + +@Description Get the all the level data and calculates the indexes for the + specified address range + +@Input psMMUContext MMU context to operate on + +@Input sDevVAddrStart Start device virtual address + +@Input sDevVAddrEnd End device virtual address + +@Input uiLog2DataPageSize Log2 of the page size to use + +@Input auiStartArray Array of start indexes (one for each level) + +@Input auiEndArray Array of end indexes (one for each level) + +@Input uiEntriesPerPxArray Array of number of entries for the Px + (one for each level) + +@Input apsConfig Array of PxE configs (one for each level) + +@Input aeMMULevel Array of MMU levels (one for each level) + +@Input ppsMMUDevVAddrConfig Device virtual address config + +@Input phPriv Private data of page size config + +@Return IMG_TRUE if the last reference to psLevel was dropped + */ +/*****************************************************************************/ +static void _MMU_GetLevelData(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddrStart, + IMG_DEV_VIRTADDR sDevVAddrEnd, + IMG_UINT32 uiLog2DataPageSize, + IMG_UINT32 auiStartArray[], + IMG_UINT32 auiEndArray[], + IMG_UINT32 auiEntriesPerPx[], + const MMU_PxE_CONFIG *apsConfig[], + MMU_LEVEL aeMMULevel[], + const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, + IMG_HANDLE *phPriv) +{ + const MMU_PxE_CONFIG *psMMUPDEConfig; + const MMU_PxE_CONFIG *psMMUPTEConfig; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; + PVRSRV_ERROR eError; + IMG_UINT32 i = 0; + + eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize, + &psMMUPDEConfig, + &psMMUPTEConfig, + ppsMMUDevVAddrConfig, + phPriv); + PVR_ASSERT(eError == PVRSRV_OK); + + psDevVAddrConfig = *ppsMMUDevVAddrConfig; + + if (psDevVAddrConfig->uiPCIndexMask != 0) + { + auiStartArray[i] = _CalcPCEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE); + auiEndArray[i] = _CalcPCEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE); + auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPC; + apsConfig[i] = psDevAttrs->psBaseConfig; + aeMMULevel[i] = MMU_LEVEL_3; + i++; + } + + if (psDevVAddrConfig->uiPDIndexMask != 0) + { + auiStartArray[i] = _CalcPDEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE); + auiEndArray[i] = _CalcPDEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE); + auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPD; + if (i == 0) + { + apsConfig[i] = psDevAttrs->psBaseConfig; + } + else + { + apsConfig[i] = psMMUPDEConfig; + } + aeMMULevel[i] = MMU_LEVEL_2; + i++; + } + + /* + There is always a PTE entry so we have a slightly different behaviour than above. + E.g. for 2 MB RGX pages the uiPTIndexMask is 0x0000000000 but still there + is a PT with one entry. + + */ + auiStartArray[i] = _CalcPTEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE); + if (psDevVAddrConfig->uiPTIndexMask !=0) + { + auiEndArray[i] = _CalcPTEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE); + } + else + { + /* + If the PTE mask is zero it means there is only 1 PTE and thus, as an + an exclusive bound, the end array index is equal to the start index + 1. + */ + + auiEndArray[i] = auiStartArray[i] + 1; + } + + auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPT; + + if (i == 0) + { + apsConfig[i] = psDevAttrs->psBaseConfig; + } + else + { + apsConfig[i] = psMMUPTEConfig; + } + aeMMULevel[i] = MMU_LEVEL_1; +} + +static void _MMU_PutLevelData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hPriv) +{ + MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; + + psDevAttrs->pfnPutPageSizeConfiguration(hPriv); +} + +/*************************************************************************/ /*! +@Function _AllocPageTables + +@Description Allocate page tables and any higher level MMU objects required + for the specified virtual range + +@Input psMMUContext MMU context to operate on + +@Input sDevVAddrStart Start device virtual address + +@Input sDevVAddrEnd End device virtual address + +@Input uiLog2DataPageSize Page size of the data pages + +@Return PVRSRV_OK if the allocation was successful + */ +/*****************************************************************************/ +static PVRSRV_ERROR +_AllocPageTables(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddrStart, + IMG_DEV_VIRTADDR sDevVAddrEnd, + IMG_UINT32 uiLog2DataPageSize) +{ + PVRSRV_ERROR eError; + IMG_UINT32 auiStartArray[MMU_MAX_LEVEL]; + IMG_UINT32 auiEndArray[MMU_MAX_LEVEL]; + IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL]; + MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL]; + const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL]; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + IMG_HANDLE hPriv; + IMG_UINT32 ui32CurrentLevel = 0; + + PVR_DPF((PVR_DBG_ALLOC, + "_AllocPageTables: vaddr range: "IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC, + sDevVAddrStart.uiAddr, + sDevVAddrEnd.uiAddr + )); + +#if defined(PDUMP) + PDUMPCOMMENT("Allocating page tables for %"IMG_UINT64_FMTSPEC" bytes virtual range: " + IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC, + (IMG_UINT64)sDevVAddrEnd.uiAddr - (IMG_UINT64)sDevVAddrStart.uiAddr, + (IMG_UINT64)sDevVAddrStart.uiAddr, + (IMG_UINT64)sDevVAddrEnd.uiAddr); +#endif + + _MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd, + (IMG_UINT32) uiLog2DataPageSize, auiStartArray, auiEndArray, + auiEntriesPerPx, apsConfig, aeMMULevel, + &psDevVAddrConfig, &hPriv); + + HTBLOGK(HTB_SF_MMU_PAGE_OP_ALLOC, + HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr), + HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr)); + + eError = _MMU_AllocLevel(psMMUContext, &psMMUContext->sBaseLevelInfo, + auiStartArray, auiEndArray, auiEntriesPerPx, + apsConfig, aeMMULevel, &ui32CurrentLevel, + auiStartArray[0], auiEndArray[0], + IMG_TRUE, IMG_TRUE, uiLog2DataPageSize); + + _MMU_PutLevelData(psMMUContext, hPriv); + + return eError; +} + +/*************************************************************************/ /*! +@Function _FreePageTables + +@Description Free page tables and any higher level MMU objects at are no + longer referenced for the specified virtual range. + This will fill the temporary free list of the MMU context which + needs cleanup after the call. + +@Input psMMUContext MMU context to operate on + +@Input sDevVAddrStart Start device virtual address + +@Input sDevVAddrEnd End device virtual address + +@Input uiLog2DataPageSize Page size of the data pages + +@Return None + */ +/*****************************************************************************/ +static void _FreePageTables(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddrStart, + IMG_DEV_VIRTADDR sDevVAddrEnd, + IMG_UINT32 uiLog2DataPageSize) +{ + IMG_UINT32 auiStartArray[MMU_MAX_LEVEL]; + IMG_UINT32 auiEndArray[MMU_MAX_LEVEL]; + IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL]; + MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL]; + const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL]; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + IMG_UINT32 ui32CurrentLevel = 0; + IMG_HANDLE hPriv; + + PVR_DPF((PVR_DBG_ALLOC, + "_FreePageTables: vaddr range: "IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC, + sDevVAddrStart.uiAddr, + sDevVAddrEnd.uiAddr + )); + + _MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd, + uiLog2DataPageSize, auiStartArray, auiEndArray, + auiEntriesPerPx, apsConfig, aeMMULevel, + &psDevVAddrConfig, &hPriv); + + HTBLOGK(HTB_SF_MMU_PAGE_OP_FREE, + HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr), + HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr)); + + /* ignoring return code, in this case there should be no references + * to the level anymore, and at this stage there is nothing to do with + * the return status */ + (void) _MMU_FreeLevel(psMMUContext, &psMMUContext->sBaseLevelInfo, + auiStartArray, auiEndArray, auiEntriesPerPx, + apsConfig, aeMMULevel, &ui32CurrentLevel, + auiStartArray[0], auiEndArray[0], + IMG_TRUE, IMG_TRUE, uiLog2DataPageSize); + + _MMU_PutLevelData(psMMUContext, hPriv); +} + + +/*************************************************************************/ /*! +@Function _MMU_GetPTInfo + +@Description Get the PT level information and PT entry index for the specified + virtual address + +@Input psMMUContext MMU context to operate on + +@Input psDevVAddr Device virtual address to get the PTE info + from. + +@Input psDevVAddrConfig The current virtual address config obtained + by another function call before. + +@Output psLevel Level info of the PT + +@Output pui32PTEIndex Index into the PT the address corresponds to + +@Return None + */ +/*****************************************************************************/ +static INLINE void _MMU_GetPTInfo(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, + MMU_Levelx_INFO **psLevel, + IMG_UINT32 *pui32PTEIndex) +{ + MMU_Levelx_INFO *psLocalLevel = NULL; + MMU_LEVEL eMMULevel = psMMUContext->psDevAttrs->eTopLevel; + IMG_UINT32 uiPCEIndex; + IMG_UINT32 uiPDEIndex; + + if ((eMMULevel <= MMU_LEVEL_0) || (eMMULevel >= MMU_LEVEL_LAST)) + { + PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTEInfo: Invalid MMU level")); + psLevel = NULL; + return; + } + + for (; eMMULevel > MMU_LEVEL_0; eMMULevel--) + { + if (eMMULevel == MMU_LEVEL_3) + { + /* find the page directory containing the PCE */ + uiPCEIndex = _CalcPCEIdx (sDevVAddr, psDevVAddrConfig, + IMG_FALSE); + psLocalLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiPCEIndex]; + } + + if (eMMULevel == MMU_LEVEL_2) + { + /* find the page table containing the PDE */ + uiPDEIndex = _CalcPDEIdx (sDevVAddr, psDevVAddrConfig, + IMG_FALSE); + if (psLocalLevel != NULL) + { + psLocalLevel = psLocalLevel->apsNextLevel[uiPDEIndex]; + } + else + { + psLocalLevel = + psMMUContext->sBaseLevelInfo.apsNextLevel[uiPDEIndex]; + } + } + + if (eMMULevel == MMU_LEVEL_1) + { + /* find PTE index into page table */ + *pui32PTEIndex = _CalcPTEIdx (sDevVAddr, psDevVAddrConfig, + IMG_FALSE); + if (psLocalLevel == NULL) + { + psLocalLevel = &psMMUContext->sBaseLevelInfo; + } + } + } + *psLevel = psLocalLevel; +} + +/*************************************************************************/ /*! +@Function _MMU_GetPTConfig + +@Description Get the level config. Call _MMU_PutPTConfig after use! + +@Input psMMUContext MMU context to operate on + +@Input uiLog2DataPageSize Log 2 of the page size + +@Output ppsConfig Config of the PTE + +@Output phPriv Private data handle to be passed back + when the info is put + +@Output ppsDevVAddrConfig Config of the device virtual addresses + +@Return None + */ +/*****************************************************************************/ +static INLINE void _MMU_GetPTConfig(MMU_CONTEXT *psMMUContext, + IMG_UINT32 uiLog2DataPageSize, + const MMU_PxE_CONFIG **ppsConfig, + IMG_HANDLE *phPriv, + const MMU_DEVVADDR_CONFIG **ppsDevVAddrConfig) +{ + MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + const MMU_PxE_CONFIG *psPDEConfig; + const MMU_PxE_CONFIG *psPTEConfig; + + if (psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize, + &psPDEConfig, + &psPTEConfig, + &psDevVAddrConfig, + phPriv) != PVRSRV_OK) + { + /* + There should be no way we got here unless uiLog2DataPageSize + has changed after the MMU_Alloc call (in which case it's a bug in + the MM code) + */ + PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTConfig: Could not get valid page size config")); + PVR_ASSERT(0); + } + + *ppsConfig = psPTEConfig; + *ppsDevVAddrConfig = psDevVAddrConfig; +} + +/*************************************************************************/ /*! +@Function _MMU_PutPTConfig + +@Description Put the level info. Has to be called after _MMU_GetPTConfig to + ensure correct refcounting. + +@Input psMMUContext MMU context to operate on + +@Input phPriv Private data handle created by + _MMU_GetPTConfig. + +@Return None + */ +/*****************************************************************************/ +static INLINE void _MMU_PutPTConfig(MMU_CONTEXT *psMMUContext, + IMG_HANDLE hPriv) +{ + MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; + + if (psDevAttrs->pfnPutPageSizeConfiguration(hPriv) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Could not put page size config", + __func__)); + PVR_ASSERT(0); + } +} + + +/***************************************************************************** + * Public interface functions * + *****************************************************************************/ + +/* + MMU_ContextCreate + */ +PVRSRV_ERROR +MMU_ContextCreate(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + MMU_CONTEXT **ppsMMUContext, + MMU_DEVICEATTRIBS *psDevAttrs) +{ + MMU_CONTEXT *psMMUContext; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + const MMU_PxE_CONFIG *psConfig; + MMU_PHYSMEM_CONTEXT *psPhysMemCtx; + IMG_UINT32 ui32BaseObjects; + IMG_UINT32 ui32Size; + IMG_CHAR sBuf[40]; + PVRSRV_ERROR eError = PVRSRV_OK; + +#if defined(PDUMP) + PDUMPCOMMENT("MMU context create"); +#endif + + psConfig = psDevAttrs->psBaseConfig; + psDevVAddrConfig = psDevAttrs->psTopLevelDevVAddrConfig; + + switch (psDevAttrs->eTopLevel) + { + case MMU_LEVEL_3: + ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPC; + break; + + case MMU_LEVEL_2: + ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPD; + break; + + case MMU_LEVEL_1: + ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPT; + break; + + default: + PVR_LOG_GOTO_WITH_ERROR("psDevAttrs->eTopLevel", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + + /* Allocate the MMU context with the Level 1 Px info's */ + ui32Size = sizeof(MMU_CONTEXT) + + ((ui32BaseObjects - 1) * sizeof(MMU_Levelx_INFO *)); + + psMMUContext = OSAllocZMem(ui32Size); + PVR_LOG_GOTO_IF_NOMEM(psMMUContext, eError, e0); + +#if defined(PDUMP) + /* Clear the refcount */ + psMMUContext->ui32PDumpContextIDRefCount = 0; +#endif + /* Record Device specific attributes in the context for subsequent use */ + psMMUContext->psDevAttrs = psDevAttrs; + + /* + Allocate physmem context and set it up + */ + psPhysMemCtx = OSAllocZMem(sizeof(MMU_PHYSMEM_CONTEXT)); + PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx, eError, e1); + + psMMUContext->psPhysMemCtx = psPhysMemCtx; + psMMUContext->psConnection = psConnection; + + psPhysMemCtx->psDevNode = psDevNode; /* Needed for Direct Bridge case */ + psPhysMemCtx->psMMUContext = psMMUContext; /* Back-link to self */ + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /* Save the app-specific values for external reference via MMU_GetOSids. */ + if (psConnection != NULL) + { + psPhysMemCtx->ui32OSid = psConnection->ui32OSid; + psPhysMemCtx->ui32OSidReg = psConnection->ui32OSidReg; + psPhysMemCtx->bOSidAxiProt = psConnection->bOSidAxiProtReg; + } + else + { + /* Direct Bridge calling sequence e.g. Firmware */ + psPhysMemCtx->ui32OSid = 0; + psPhysMemCtx->ui32OSidReg = 0; + psPhysMemCtx->bOSidAxiProt = IMG_FALSE; + } +#endif + + OSSNPrintf(sBuf, sizeof(sBuf), "pgtables %p", psPhysMemCtx); + psPhysMemCtx->uiPhysMemRANameAllocSize = OSStringLength(sBuf)+1; + psPhysMemCtx->pszPhysMemRAName = OSAllocMem(psPhysMemCtx->uiPhysMemRANameAllocSize); + PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx->pszPhysMemRAName, eError, e2); + + OSStringLCopy(psPhysMemCtx->pszPhysMemRAName, sBuf, psPhysMemCtx->uiPhysMemRANameAllocSize); + + psPhysMemCtx->psPhysMemRA = RA_Create(psPhysMemCtx->pszPhysMemRAName, + /* subsequent import */ + psDevNode->sDevMMUPxSetup.uiMMUPxLog2AllocGran, + RA_LOCKCLASS_1, + _MMU_PhysMem_RAImportAlloc, + _MMU_PhysMem_RAImportFree, + psPhysMemCtx, /* priv */ + RA_POLICY_DEFAULT); + if (psPhysMemCtx->psPhysMemRA == NULL) + { + OSFreeMem(psPhysMemCtx->pszPhysMemRAName); + psPhysMemCtx->pszPhysMemRAName = NULL; + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, e3); + } + + /* Setup cleanup meta data to check if a MMU context + * has been destroyed and should not be accessed anymore */ + psPhysMemCtx->psCleanupData = OSAllocMem(sizeof(*(psPhysMemCtx->psCleanupData))); + PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx->psCleanupData, eError, e4); + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /* Record the originating OSid for all allocation / free for this context */ + psPhysMemCtx->psCleanupData->ui32OSid = psPhysMemCtx->ui32OSid; +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + OSLockCreate(&psPhysMemCtx->psCleanupData->hCleanupLock); + psPhysMemCtx->psCleanupData->bMMUContextExists = IMG_TRUE; + dllist_init(&psPhysMemCtx->psCleanupData->sMMUCtxCleanupItemsHead); + OSAtomicWrite(&psPhysMemCtx->psCleanupData->iRef, 1); + + /* allocate the base level object */ + /* + Note: Although this is not required by the this file until + the 1st allocation is made, a device specific callback + might request the base object address so we allocate + it up front. + */ + if (_PxMemAlloc(psMMUContext, + ui32BaseObjects, + psConfig, + psDevAttrs->eTopLevel, + &psMMUContext->sBaseLevelInfo.sMemDesc, + psDevAttrs->ui32BaseAlign)) + { + PVR_LOG_GOTO_WITH_ERROR("_PxMemAlloc", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e5); + } + + dllist_init(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead); + + psMMUContext->sBaseLevelInfo.ui32NumOfEntries = ui32BaseObjects; + psMMUContext->sBaseLevelInfo.ui32RefCount = 0; + + eError = OSLockCreate(&psMMUContext->hLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e6); + + /* return context */ + *ppsMMUContext = psMMUContext; + + return PVRSRV_OK; + +e6: + _PxMemFree(psMMUContext, &psMMUContext->sBaseLevelInfo.sMemDesc, psDevAttrs->eTopLevel); +e5: + OSFreeMem(psPhysMemCtx->psCleanupData); +e4: + RA_Delete(psPhysMemCtx->psPhysMemRA); +e3: + OSFreeMem(psPhysMemCtx->pszPhysMemRAName); +e2: + OSFreeMem(psPhysMemCtx); +e1: + OSFreeMem(psMMUContext); +e0: + return eError; +} + +/* + MMU_ContextDestroy + */ +void +MMU_ContextDestroy (MMU_CONTEXT *psMMUContext) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PDLLIST_NODE psNode, psNextNode; + + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psMMUContext->psPhysMemCtx->psDevNode; + MMU_CTX_CLEANUP_DATA *psCleanupData = psMMUContext->psPhysMemCtx->psCleanupData; + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Enter", __func__)); + + if (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) + { + /* There should be no way to get here with live pages unless + there is a bug in this module or the MM code */ + PVR_ASSERT(psMMUContext->sBaseLevelInfo.ui32RefCount == 0); + } + + /* Cleanup lock must be acquired before MMUContext lock. Reverse order + * may lead to a deadlock and is reported by lockdep. */ + OSLockAcquire(psCleanupData->hCleanupLock); + OSLockAcquire(psMMUContext->hLock); + + /* Free the top level MMU object - will be put on defer free list. + * This has to be done before the step below that will empty the + * defer-free list. */ + _PxMemFree(psMMUContext, + &psMMUContext->sBaseLevelInfo.sMemDesc, + psMMUContext->psDevAttrs->eTopLevel); + + /* Empty the temporary defer-free list of Px */ + _FreeMMUMapping(psDevNode, &psMMUContext->psPhysMemCtx->sTmpMMUMappingHead); + PVR_ASSERT(dllist_is_empty(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead)); + + /* Empty the defer free list so the cleanup thread will + * not have to access any MMU context related structures anymore */ + dllist_foreach_node(&psCleanupData->sMMUCtxCleanupItemsHead, + psNode, + psNextNode) + { + MMU_CLEANUP_ITEM *psCleanup = IMG_CONTAINER_OF(psNode, + MMU_CLEANUP_ITEM, + sMMUCtxCleanupItem); + + _FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead); + + dllist_remove_node(psNode); + } + PVR_ASSERT(dllist_is_empty(&psCleanupData->sMMUCtxCleanupItemsHead)); + + psCleanupData->bMMUContextExists = IMG_FALSE; + + /* Free physmem context */ + RA_Delete(psMMUContext->psPhysMemCtx->psPhysMemRA); + psMMUContext->psPhysMemCtx->psPhysMemRA = NULL; + OSFreeMem(psMMUContext->psPhysMemCtx->pszPhysMemRAName); + psMMUContext->psPhysMemCtx->pszPhysMemRAName = NULL; + + OSFreeMem(psMMUContext->psPhysMemCtx); + + OSLockRelease(psMMUContext->hLock); + + OSLockRelease(psCleanupData->hCleanupLock); + + if (OSAtomicDecrement(&psCleanupData->iRef) == 0) + { + OSLockDestroy(psCleanupData->hCleanupLock); + OSFreeMem(psCleanupData); + } + + OSLockDestroy(psMMUContext->hLock); + + /* free the context itself. */ + OSFreeMem(psMMUContext); + /*not nulling pointer, copy on stack*/ + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Exit", __func__)); +} + +/* + MMU_Alloc + */ +PVRSRV_ERROR +MMU_Alloc (MMU_CONTEXT *psMMUContext, + IMG_DEVMEM_SIZE_T uSize, + IMG_DEVMEM_SIZE_T *puActualSize, + IMG_UINT32 uiProtFlags, + IMG_DEVMEM_SIZE_T uDevVAddrAlignment, + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_UINT32 uiLog2PageSize) +{ + PVRSRV_ERROR eError; + IMG_DEV_VIRTADDR sDevVAddrEnd; + + const MMU_PxE_CONFIG *psPDEConfig; + const MMU_PxE_CONFIG *psPTEConfig; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + + MMU_DEVICEATTRIBS *psDevAttrs; + IMG_HANDLE hPriv; + +#if !defined(DEBUG) + PVR_UNREFERENCED_PARAMETER(uDevVAddrAlignment); +#endif + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: uSize=" IMG_DEVMEM_SIZE_FMTSPEC + ", uiProtFlags=0x%x, align="IMG_DEVMEM_ALIGN_FMTSPEC, + __func__, uSize, uiProtFlags, uDevVAddrAlignment)); + + /* check params */ + PVR_LOG_RETURN_IF_INVALID_PARAM(psMMUContext, "psMMUContext"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psDevVAddr, "psDevVAddr"); + PVR_LOG_RETURN_IF_INVALID_PARAM(puActualSize, "puActualSize"); + + psDevAttrs = psMMUContext->psDevAttrs; + + eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2PageSize, + &psPDEConfig, + &psPTEConfig, + &psDevVAddrConfig, + &hPriv); + PVR_LOG_RETURN_IF_ERROR(eError, "pfnGetPageSizeConfiguration"); + + /* size and alignment must be datapage granular */ + if (((psDevVAddr->uiAddr & psDevVAddrConfig->uiPageOffsetMask) != 0) + || ((uSize & psDevVAddrConfig->uiPageOffsetMask) != 0)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: invalid address or size granularity", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + sDevVAddrEnd = *psDevVAddr; + sDevVAddrEnd.uiAddr += uSize; + + OSLockAcquire(psMMUContext->hLock); + eError = _AllocPageTables(psMMUContext, *psDevVAddr, sDevVAddrEnd, uiLog2PageSize); + OSLockRelease(psMMUContext->hLock); + + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "_AllocPageTables"); + return PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES; + } + + psDevAttrs->pfnPutPageSizeConfiguration(hPriv); + + return PVRSRV_OK; +} + +/* + MMU_Free + */ +void +MMU_Free (MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 uiLog2DataPageSize) +{ + IMG_DEV_VIRTADDR sDevVAddrEnd; + +#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__) + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + IMG_UINT32 ui32MMULeakMax = psPVRSRVData->sMemLeakIntervals.ui32MMU; + + mutex_lock(&g_sMMULeakMutex); + + g_ui32MMULeakCounter++; + if (ui32MMULeakMax && g_ui32MMULeakCounter >= ui32MMULeakMax) + { + g_ui32MMULeakCounter = 0; + mutex_unlock(&g_sMMULeakMutex); + + PVR_DPF((PVR_DBG_WARNING, + "%s: Skipped MMU free for address 0x%016" IMG_UINT64_FMTSPECx " to trigger memory leak.", + __func__, + sDevVAddr.uiAddr)); + return; + } + + mutex_unlock(&g_sMMULeakMutex); +#endif + + PVR_ASSERT(psMMUContext != NULL); + PVR_LOG_RETURN_VOID_IF_FALSE(psMMUContext != NULL, "psMMUContext"); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Freeing DevVAddr " IMG_DEV_VIRTADDR_FMTSPEC, + __func__, sDevVAddr.uiAddr)); + + /* ensure the address range to free is inside the heap */ + sDevVAddrEnd = sDevVAddr; + sDevVAddrEnd.uiAddr += uiSize; + + /* The Cleanup lock has to be taken before the MMUContext hLock to + * prevent deadlock scenarios. It is necessary only for parts of + * _SetupCleanup_FreeMMUMapping though.*/ + OSLockAcquire(psMMUContext->psPhysMemCtx->psCleanupData->hCleanupLock); + + OSLockAcquire(psMMUContext->hLock); + + _FreePageTables(psMMUContext, + sDevVAddr, + sDevVAddrEnd, + uiLog2DataPageSize); + + _SetupCleanup_FreeMMUMapping(psMMUContext->psPhysMemCtx); + + OSLockRelease(psMMUContext->hLock); + + OSLockRelease(psMMUContext->psPhysMemCtx->psCleanupData->hCleanupLock); + + return; +} + +PVRSRV_ERROR +MMU_MapPages(MMU_CONTEXT *psMMUContext, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_DEV_VIRTADDR sDevVAddrBase, + PMR *psPMR, + IMG_UINT32 ui32PhysPgOffset, + IMG_UINT32 ui32MapPageCount, + IMG_UINT32 *paui32MapIndices, + IMG_UINT32 uiLog2HeapPageSize) +{ + PVRSRV_ERROR eError; + IMG_HANDLE hPriv; + + MMU_Levelx_INFO *psLevel = NULL; + + MMU_Levelx_INFO *psPrevLevel = NULL; + + IMG_UINT32 uiPTEIndex = 0; + IMG_UINT32 uiPageSize = (1 << uiLog2HeapPageSize); + IMG_UINT32 uiLoop = 0; + IMG_UINT32 ui32MappedCount = 0; + IMG_DEVMEM_OFFSET_T uiPgOffset = 0; + IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0; + + IMG_UINT64 uiProtFlags = 0, uiProtFlagsReadOnly = 0, uiDefProtFlags=0; + IMG_UINT64 uiDummyProtFlags = 0; + MMU_PROTFLAGS_T uiMMUProtFlags = 0; + + const MMU_PxE_CONFIG *psConfig; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + + IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; + + IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_DEV_PHYADDR *psDevPAddr; + IMG_DEV_PHYADDR sDevPAddr; + IMG_BOOL *pbValid; + IMG_BOOL bValid; + IMG_BOOL bDummyBacking = IMG_FALSE, bZeroBacking = IMG_FALSE; + IMG_BOOL bNeedBacking = IMG_FALSE; + PVRSRV_DEVICE_NODE *psDevNode; + +#if defined(PDUMP) + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset; + + PDUMPCOMMENT("Wire up Page Table entries to point to the Data Pages (%"IMG_INT64_FMTSPECd" bytes)", + (IMG_UINT64)(ui32MapPageCount * uiPageSize)); +#endif /*PDUMP*/ + +#if defined(TC_MEMORY_CONFIG) || defined(PLATO_MEMORY_CONFIG) + /* We're aware that on TC based platforms, accesses from GPU to CPU_LOCAL + * allocated DevMem fail, so we forbid mapping such a PMR into device mmu */ + if (PVRSRV_CHECK_PHYS_HEAP(CPU_LOCAL, PMR_Flags(psPMR))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Mapping a CPU_LOCAL PMR to device is forbidden on this platform", __func__)); + return PVRSRV_ERROR_PMR_NOT_PERMITTED; + } +#endif + + /* Validate the most essential parameters */ + PVR_LOG_GOTO_IF_INVALID_PARAM(psMMUContext, eError, e0); + PVR_LOG_GOTO_IF_INVALID_PARAM(psPMR, eError, e0); + + psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + + /* Allocate memory for page-frame-numbers and validity states, + N.B. assert could be triggered by an illegal uiSizeBytes */ + if (ui32MapPageCount > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + psDevPAddr = OSAllocMem(ui32MapPageCount * sizeof(IMG_DEV_PHYADDR)); + PVR_LOG_GOTO_IF_NOMEM(psDevPAddr, eError, e0); + + pbValid = OSAllocMem(ui32MapPageCount * sizeof(IMG_BOOL)); + if (pbValid == NULL) + { + /* Should allocation fail, clean-up here before exit */ + OSFreeMem(psDevPAddr); + PVR_LOG_GOTO_WITH_ERROR("pbValid", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0); + } + } + else + { + psDevPAddr = asDevPAddr; + pbValid = abValid; + } + + /* Get the Device physical addresses of the pages we are trying to map + * In the case of non indexed mapping we can get all addresses at once */ + if (NULL == paui32MapIndices) + { + eError = PMR_DevPhysAddr(psPMR, + uiLog2HeapPageSize, + ui32MapPageCount, + ((IMG_DEVMEM_OFFSET_T) ui32PhysPgOffset << uiLog2HeapPageSize), + psDevPAddr, + pbValid); + PVR_GOTO_IF_ERROR(eError, e1); + } + + /*Get the Page table level configuration */ + _MMU_GetPTConfig(psMMUContext, + (IMG_UINT32) uiLog2HeapPageSize, + &psConfig, + &hPriv, + &psDevVAddrConfig); + + eError = _MMU_ConvertDevMemFlags(IMG_FALSE, + uiMappingFlags, + &uiMMUProtFlags, + psMMUContext); + PVR_GOTO_IF_ERROR(eError, e2); + + /* Callback to get device specific protection flags */ + if (psConfig->uiBytesPerEntry == 8) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize); + uiMMUProtFlags |= MMU_PROTFLAGS_READABLE; + uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt8((uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE), + uiLog2HeapPageSize); + } + else if (psConfig->uiBytesPerEntry == 4) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); + uiMMUProtFlags |= MMU_PROTFLAGS_READABLE; + uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt4((uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE)); + } + else + { + PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_INVALID_PARAMS, e2); + } + uiDummyProtFlags = uiProtFlags; + + if (PMR_IsSparse(psPMR)) + { + /* We know there will not be 4G number of PMR's */ + bDummyBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(PMR_Flags(psPMR)); + if (bDummyBacking) + { + bZeroBacking = PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(PMR_Flags(psPMR)); + } + + if (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiMappingFlags)) + { + /* Obtain non-coherent protection flags as we cannot have multiple coherent + virtual pages pointing to the same physical page so all dummy page + mappings have to be non-coherent even in a coherent allocation */ + eError = _MMU_ConvertDevMemFlags(IMG_FALSE, + uiMappingFlags & ~PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT, + &uiMMUProtFlags, + psMMUContext); + PVR_GOTO_IF_ERROR(eError, e2); + + /* Callback to get device specific protection flags */ + if (psConfig->uiBytesPerEntry == 8) + { + uiDummyProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize); + } + else if (psConfig->uiBytesPerEntry == 4) + { + uiDummyProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: The page table entry byte length is not supported", + __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e2; + } + } + } + + OSLockAcquire(psMMUContext->hLock); + + for (uiLoop = 0; uiLoop < ui32MapPageCount; uiLoop++) + { + +#if defined(PDUMP) + IMG_DEVMEM_OFFSET_T uiNextSymName; +#endif /*PDUMP*/ + + if (NULL != paui32MapIndices) + { + uiPgOffset = paui32MapIndices[uiLoop]; + + /*Calculate the Device Virtual Address of the page */ + sDevVAddr.uiAddr = sDevVAddrBase.uiAddr + (uiPgOffset * uiPageSize); + + /* Get the physical address to map */ + eError = PMR_DevPhysAddr(psPMR, + uiLog2HeapPageSize, + 1, + uiPgOffset * uiPageSize, + &sDevPAddr, + &bValid); + PVR_GOTO_IF_ERROR(eError, e3); + } + else + { + uiPgOffset = uiLoop + ui32PhysPgOffset; + sDevPAddr = psDevPAddr[uiLoop]; + bValid = pbValid[uiLoop]; + } + + uiDefProtFlags = uiProtFlags; + /* + The default value of the entry is invalid so we don't need to mark + it as such if the page wasn't valid, we just advance pass that address + */ + if (bValid || bDummyBacking) + { + if (!bValid) + { + if (bZeroBacking) + { + sDevPAddr.uiAddr = psDevNode->sDevZeroPage.ui64PgPhysAddr; + /* Ensure the zero back page PTE is read only */ + uiDefProtFlags = uiProtFlagsReadOnly; + } + else + { + sDevPAddr.uiAddr = psDevNode->sDummyPage.ui64PgPhysAddr; + } + } + else + { + /* check the physical alignment of the memory to map */ + PVR_ASSERT((sDevPAddr.uiAddr & (uiPageSize-1)) == 0); + } + +#if defined(DEBUG) + { + IMG_INT32 i32FeatureVal = 0; + IMG_UINT32 ui32BitLength = FloorLog2(sDevPAddr.uiAddr); + + i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, PHYS_BUS_WIDTH); + do { + /* i32FeatureVal can be negative for cases where this feature is undefined + * In that situation we need to bail out than go ahead with debug comparison */ + if (0 > i32FeatureVal) + break; + + if (ui32BitLength > i32FeatureVal) + { + PVR_DPF((PVR_DBG_ERROR, + "%s Failed. The physical address bitlength (%d)" + " is greater than the chip can handle (%d).", + __func__, ui32BitLength, i32FeatureVal)); + + PVR_ASSERT(ui32BitLength <= i32FeatureVal); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e3; + } + } while (0); + } +#endif /*DEBUG*/ + +#if defined(PDUMP) + if (bValid) + { + eError = PMR_PDumpSymbolicAddr(psPMR, uiPgOffset * uiPageSize, + sizeof(aszMemspaceName), &aszMemspaceName[0], + sizeof(aszSymbolicAddress), &aszSymbolicAddress[0], + &uiSymbolicAddrOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + } +#endif /*PDUMP*/ + + psPrevLevel = psLevel; + /* Calculate PT index and get new table descriptor */ + _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTEIndex); + + if (psPrevLevel == psLevel) + { + /* + * Sparse allocations may have page offsets which + * decrement as well as increment, so make sure we + * update the range we will flush correctly. + */ + if (uiPTEIndex > uiFlushEnd) + uiFlushEnd = uiPTEIndex; + else if (uiPTEIndex < uiFlushStart) + uiFlushStart = uiPTEIndex; + } + else + { + /* Flush if we moved to another psLevel, i.e. page table */ + if (psPrevLevel != NULL) + { + eError = psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psPrevLevel->sMemDesc.psMapping->sMemHandle, + uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset, + (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + PVR_GOTO_IF_ERROR(eError, e3); + } + + uiFlushStart = uiPTEIndex; + uiFlushEnd = uiFlushStart; + } + + HTBLOGK(HTB_SF_MMU_PAGE_OP_MAP, + HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr), + HTBLOG_U64_BITS_HIGH(sDevPAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevPAddr.uiAddr)); + + /* Set the PT entry with the specified address and protection flags */ + eError = _SetupPTE(psMMUContext, + psLevel, + uiPTEIndex, + psConfig, + &sDevPAddr, + IMG_FALSE, +#if defined(PDUMP) + (bValid)?aszMemspaceName:(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName), + ((bValid)?aszSymbolicAddress:((bZeroBacking)?DEV_ZERO_PAGE:DUMMY_PAGE)), + (bValid)?uiSymbolicAddrOffset:0, +#endif /*PDUMP*/ + uiDefProtFlags); + PVR_LOG_GOTO_IF_ERROR(eError, "_SetupPTE", e3); + + if (bValid) + { + PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); + PVR_DPF ((PVR_DBG_MESSAGE, + "%s: devVAddr=" IMG_DEV_VIRTADDR_FMTSPEC ", " + "size=" IMG_DEVMEM_OFFSET_FMTSPEC, + __func__, + sDevVAddr.uiAddr, + uiPgOffset * uiPageSize)); + + ui32MappedCount++; + } + } + + sDevVAddr.uiAddr += uiPageSize; + } + + /* Flush the last level we touched */ + if (psLevel != NULL) + { + eError = psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + PVR_GOTO_IF_ERROR(eError, e3); + } + + OSLockRelease(psMMUContext->hLock); + + _MMU_PutPTConfig(psMMUContext, hPriv); + + if (psDevPAddr != asDevPAddr) + { + OSFreeMem(pbValid); + OSFreeMem(psDevPAddr); + } + + /* Flush TLB for PTs*/ + psDevNode->pfnMMUCacheInvalidate(psDevNode, + psMMUContext, + MMU_LEVEL_1, + IMG_FALSE); + +#if defined(PDUMP) + PDUMPCOMMENT("Wired up %d Page Table entries (out of %d)", ui32MappedCount, ui32MapPageCount); +#endif /*PDUMP*/ + + return PVRSRV_OK; + +e3: + OSLockRelease(psMMUContext->hLock); + + if (PMR_IsSparse(psPMR) && PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMappingFlags)) + { + bNeedBacking = IMG_TRUE; + } + + MMU_UnmapPages(psMMUContext, + (bNeedBacking) ? uiMappingFlags : 0, + sDevVAddrBase, + uiLoop, + paui32MapIndices, + uiLog2HeapPageSize, + PMR_IsSparse(psPMR)); +e2: + _MMU_PutPTConfig(psMMUContext, hPriv); +e1: + if (psDevPAddr != asDevPAddr) + { + OSFreeMem(pbValid); + OSFreeMem(psDevPAddr); + } +e0: + return eError; +} + +/* + MMU_UnmapPages + */ +void +MMU_UnmapPages(MMU_CONTEXT *psMMUContext, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32PageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags) +{ + IMG_UINT32 uiPTEIndex = 0, ui32Loop=0; + IMG_UINT32 uiPageSize = 1 << uiLog2PageSize; + IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0; + MMU_Levelx_INFO *psLevel = NULL; + MMU_Levelx_INFO *psPrevLevel = NULL; + IMG_HANDLE hPriv; + const MMU_PxE_CONFIG *psConfig; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + IMG_UINT64 uiProtFlags = 0, uiProtFlagsReadOnly = 0; + MMU_PROTFLAGS_T uiMMUProtFlags = 0, uiMMUReadOnlyProtFlags = 0; + IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; + IMG_DEV_PHYADDR sBackingPgDevPhysAddr; + IMG_BOOL bUnmap = IMG_TRUE, bDummyBacking = IMG_FALSE, bZeroBacking = IMG_FALSE; + IMG_CHAR *pcBackingPageName = NULL; + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + +#if defined(PDUMP) + PDUMPCOMMENT("Invalidate %d entries in page tables for virtual range: 0x%010"IMG_UINT64_FMTSPECX" to 0x%010"IMG_UINT64_FMTSPECX, + ui32PageCount, + (IMG_UINT64)sDevVAddr.uiAddr, + ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1); +#endif + bDummyBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMemAllocFlags); + bZeroBacking = PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiMemAllocFlags); + + if (bZeroBacking) + { + sBackingPgDevPhysAddr.uiAddr = psDevNode->sDevZeroPage.ui64PgPhysAddr; + pcBackingPageName = DEV_ZERO_PAGE; + } + else + { + sBackingPgDevPhysAddr.uiAddr = psDevNode->sDummyPage.ui64PgPhysAddr; + pcBackingPageName = DUMMY_PAGE; + } + + bUnmap = (uiMappingFlags)? !bDummyBacking : IMG_TRUE; + /* Get PT and address configs */ + _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize, + &psConfig, &hPriv, &psDevVAddrConfig); + + if (_MMU_ConvertDevMemFlags(bUnmap, + uiMappingFlags, + &uiMMUProtFlags, + psMMUContext) != PVRSRV_OK) + { + return; + } + + uiMMUReadOnlyProtFlags = (uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE) | MMU_PROTFLAGS_READABLE; + + /* Callback to get device specific protection flags */ + if (psConfig->uiBytesPerEntry == 4) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); + uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUReadOnlyProtFlags); + } + else if (psConfig->uiBytesPerEntry == 8) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize); + uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUReadOnlyProtFlags, uiLog2PageSize); + } + + + OSLockAcquire(psMMUContext->hLock); + + /* Unmap page by page */ + while (ui32Loop < ui32PageCount) + { + if (NULL != pai32FreeIndices) + { + /*Calculate the Device Virtual Address of the page */ + sDevVAddr.uiAddr = sDevVAddrBase.uiAddr + + pai32FreeIndices[ui32Loop] * (IMG_UINT64) uiPageSize; + } + + psPrevLevel = psLevel; + /* Calculate PT index and get new table descriptor */ + _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTEIndex); + + if (psPrevLevel == psLevel) + { + /* + * Sparse allocations may have page offsets which + * decrement as well as increment, so make sure we + * update the range we will flush correctly. + */ + if (uiPTEIndex > uiFlushEnd) + uiFlushEnd = uiPTEIndex; + else if (uiPTEIndex < uiFlushStart) + uiFlushStart = uiPTEIndex; + } + else + { + /* Flush if we moved to another psLevel, i.e. page table */ + if (psPrevLevel != NULL) + { + psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psPrevLevel->sMemDesc.psMapping->sMemHandle, + uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset, + (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + } + + uiFlushStart = uiPTEIndex; + uiFlushEnd = uiFlushStart; + } + + HTBLOGK(HTB_SF_MMU_PAGE_OP_UNMAP, + HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr)); + + /* Set the PT entry to invalid and poison it with a bad address */ + if (_SetupPTE(psMMUContext, + psLevel, + uiPTEIndex, + psConfig, + (bDummyBacking)? &sBackingPgDevPhysAddr : &gsBadDevPhyAddr, + bUnmap, +#if defined(PDUMP) + (bDummyBacking)? (psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName): NULL, + (bDummyBacking)? pcBackingPageName: NULL, + 0U, +#endif + (bZeroBacking)? uiProtFlagsReadOnly: uiProtFlags) != PVRSRV_OK) + { + goto e0; + } + + /* Check we haven't wrapped around */ + PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); + ui32Loop++; + sDevVAddr.uiAddr += uiPageSize; + } + + /* Flush the last level we touched */ + if (psLevel != NULL) + { + psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + } + + OSLockRelease(psMMUContext->hLock); + + _MMU_PutPTConfig(psMMUContext, hPriv); + + /* Flush TLB for PTs*/ + psDevNode->pfnMMUCacheInvalidate(psDevNode, + psMMUContext, + MMU_LEVEL_1, + IMG_TRUE); + + return; + +e0: + _MMU_PutPTConfig(psMMUContext, hPriv); + PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Failed to map/unmap page table")); + PVR_ASSERT(0); + OSLockRelease(psMMUContext->hLock); + return; +} + +PVRSRV_ERROR +MMU_MapPMRFast (MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddrBase, + const PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSizeBytes, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_UINT32 uiLog2HeapPageSize) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 uiCount, i; + IMG_UINT32 uiPageSize = 1 << uiLog2HeapPageSize; + IMG_UINT32 uiPTEIndex = 0; + IMG_UINT64 uiProtFlags; + MMU_PROTFLAGS_T uiMMUProtFlags = 0; + MMU_Levelx_INFO *psLevel = NULL; + IMG_HANDLE hPriv; + const MMU_PxE_CONFIG *psConfig; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; + IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_DEV_PHYADDR *psDevPAddr; + IMG_BOOL *pbValid; + IMG_UINT32 uiFlushStart = 0; + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + +#if defined(PDUMP) + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset; + IMG_UINT32 ui32MappedCount = 0; + PDUMPCOMMENT("Wire up Page Table entries to point to the Data Pages (%"IMG_INT64_FMTSPECd" bytes)", uiSizeBytes); +#endif /*PDUMP*/ + + /* We should verify the size and contiguity when supporting variable page size */ + + PVR_ASSERT (psMMUContext != NULL); + PVR_ASSERT (psPMR != NULL); + +#if defined(TC_MEMORY_CONFIG) || defined(PLATO_MEMORY_CONFIG) + /* We're aware that on TC based platforms, accesses from GPU to CPU_LOCAL + * allocated DevMem fail, so we forbid mapping such a PMR into device mmu */ + if (PVRSRV_CHECK_PHYS_HEAP(CPU_LOCAL, PMR_Flags(psPMR))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Mapping a CPU_LOCAL PMR to device is forbidden on this platform", __func__)); + return PVRSRV_ERROR_PMR_NOT_PERMITTED; + } +#endif + + /* Allocate memory for page-frame-numbers and validity states, + N.B. assert could be triggered by an illegal uiSizeBytes */ + uiCount = uiSizeBytes >> uiLog2HeapPageSize; + PVR_ASSERT((IMG_DEVMEM_OFFSET_T)uiCount << uiLog2HeapPageSize == uiSizeBytes); + if (uiCount > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + psDevPAddr = OSAllocMem(uiCount * sizeof(IMG_DEV_PHYADDR)); + PVR_LOG_GOTO_IF_NOMEM(psDevPAddr, eError, return_error); + + pbValid = OSAllocMem(uiCount * sizeof(IMG_BOOL)); + if (pbValid == NULL) + { + /* Should allocation fail, clean-up here before exit */ + OSFreeMem(psDevPAddr); + PVR_LOG_GOTO_WITH_ERROR("pbValid", eError, PVRSRV_ERROR_OUT_OF_MEMORY, free_paddr_array); + } + } + else + { + psDevPAddr = asDevPAddr; + pbValid = abValid; + } + + /* Get general PT and address configs */ + _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2HeapPageSize, + &psConfig, &hPriv, &psDevVAddrConfig); + + eError = _MMU_ConvertDevMemFlags(IMG_FALSE, + uiMappingFlags, + &uiMMUProtFlags, + psMMUContext); + PVR_GOTO_IF_ERROR(eError, put_mmu_context); + + /* Callback to get device specific protection flags */ + + if (psConfig->uiBytesPerEntry == 8) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize); + } + else if (psConfig->uiBytesPerEntry == 4) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); + } + else + { + PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_MMU_CONFIG_IS_WRONG, put_mmu_context); + } + + + /* "uiSize" is the amount of contiguity in the underlying + page. Normally this would be constant for the system, but, + that constant needs to be communicated, in case it's ever + different; caller guarantees that PMRLockSysPhysAddr() has + already been called */ + eError = PMR_DevPhysAddr(psPMR, + uiLog2HeapPageSize, + uiCount, + 0, + psDevPAddr, + pbValid); + PVR_GOTO_IF_ERROR(eError, put_mmu_context); + + OSLockAcquire(psMMUContext->hLock); + + _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTEIndex); + uiFlushStart = uiPTEIndex; + + /* Map in all pages of that PMR page by page*/ + for (i=0, uiCount=0; uiCount < uiSizeBytes; i++) + { +#if defined(DEBUG) + { + IMG_INT32 i32FeatureVal = 0; + IMG_UINT32 ui32BitLength = FloorLog2(psDevPAddr[i].uiAddr); + i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, PHYS_BUS_WIDTH); + do { + if (0 > i32FeatureVal) + break; + + if (ui32BitLength > i32FeatureVal) + { + PVR_DPF((PVR_DBG_ERROR, + "%s Failed. The physical address bitlength (%d)" + " is greater than the chip can handle (%d).", + __func__, ui32BitLength, i32FeatureVal)); + + PVR_ASSERT(ui32BitLength <= i32FeatureVal); + OSLockRelease(psMMUContext->hLock); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, put_mmu_context); + } + } while (0); + } +#endif /*DEBUG*/ +#if defined(PDUMP) + { + IMG_DEVMEM_OFFSET_T uiNextSymName; + + eError = PMR_PDumpSymbolicAddr(psPMR, uiCount, + sizeof(aszMemspaceName), &aszMemspaceName[0], + sizeof(aszSymbolicAddress), &aszSymbolicAddress[0], + &uiSymbolicAddrOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + ui32MappedCount++; + } +#endif /*PDUMP*/ + + HTBLOGK(HTB_SF_MMU_PAGE_OP_PMRMAP, + HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr), + HTBLOG_U64_BITS_HIGH(psDevPAddr[i].uiAddr), HTBLOG_U64_BITS_LOW(psDevPAddr[i].uiAddr)); + + /* Set the PT entry with the specified address and protection flags */ + eError = _SetupPTE(psMMUContext, psLevel, uiPTEIndex, + psConfig, &psDevPAddr[i], IMG_FALSE, +#if defined(PDUMP) + aszMemspaceName, + aszSymbolicAddress, + uiSymbolicAddrOffset, +#endif /*PDUMP*/ + uiProtFlags); + PVR_GOTO_IF_ERROR(eError, unlock_mmu_context); + + sDevVAddr.uiAddr += uiPageSize; + uiCount += uiPageSize; + + /* Calculate PT index and get new table descriptor */ + if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (uiCount != uiSizeBytes)) + { + uiPTEIndex++; + } + else + { + eError = psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + PVR_GOTO_IF_ERROR(eError, unlock_mmu_context); + + + _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTEIndex); + uiFlushStart = uiPTEIndex; + } + } + + OSLockRelease(psMMUContext->hLock); + + + _MMU_PutPTConfig(psMMUContext, hPriv); + + if (psDevPAddr != asDevPAddr) + { + OSFreeMem(pbValid); + OSFreeMem(psDevPAddr); + } + + /* Flush TLB for PTs*/ + psDevNode->pfnMMUCacheInvalidate(psDevNode, + psMMUContext, + MMU_LEVEL_1, + IMG_FALSE); + +#if defined(PDUMP) + PDUMPCOMMENT("Wired up %d Page Table entries (out of %d)", ui32MappedCount, i); +#endif /*PDUMP*/ + + return PVRSRV_OK; + +unlock_mmu_context: + OSLockRelease(psMMUContext->hLock); + MMU_UnmapPMRFast(psMMUContext, + sDevVAddrBase, + uiSizeBytes >> uiLog2HeapPageSize, + uiLog2HeapPageSize); + +put_mmu_context: + _MMU_PutPTConfig(psMMUContext, hPriv); + + if (pbValid != abValid) + { + OSFreeMem(pbValid); + } + +free_paddr_array: + if (psDevPAddr != asDevPAddr) + { + OSFreeMem(psDevPAddr); + } + +return_error: + PVR_ASSERT(eError == PVRSRV_OK); + return eError; +} + +/* + MMU_UnmapPages + */ +void +MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32PageCount, + IMG_UINT32 uiLog2PageSize) +{ + IMG_UINT32 uiPTEIndex = 0, ui32Loop=0; + IMG_UINT32 uiPageSize = 1 << uiLog2PageSize; + MMU_Levelx_INFO *psLevel = NULL; + IMG_HANDLE hPriv; + const MMU_PxE_CONFIG *psConfig; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; + IMG_UINT64 uiProtFlags = 0; + MMU_PROTFLAGS_T uiMMUProtFlags = 0; + IMG_UINT64 uiEntry = 0; + IMG_UINT32 uiFlushStart = 0; + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + +#if defined(PDUMP) + PDUMPCOMMENT("Invalidate %d entries in page tables for virtual range: 0x%010"IMG_UINT64_FMTSPECX" to 0x%010"IMG_UINT64_FMTSPECX, + ui32PageCount, + (IMG_UINT64)sDevVAddr.uiAddr, + ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1); +#endif + + /* Get PT and address configs */ + _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize, + &psConfig, &hPriv, &psDevVAddrConfig); + + if (_MMU_ConvertDevMemFlags(IMG_TRUE, + 0, + &uiMMUProtFlags, + psMMUContext) != PVRSRV_OK) + { + return; + } + + /* Callback to get device specific protection flags */ + + if (psConfig->uiBytesPerEntry == 8) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize); + + /* Fill the entry with a bad address but leave space for protection flags */ + uiEntry = (gsBadDevPhyAddr.uiAddr & ~psConfig->uiProtMask) | uiProtFlags; + } + else if (psConfig->uiBytesPerEntry == 4) + { + uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); + + /* Fill the entry with a bad address but leave space for protection flags */ + uiEntry = (((IMG_UINT32) gsBadDevPhyAddr.uiAddr) & ~psConfig->uiProtMask) | (IMG_UINT32) uiProtFlags; + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: The page table entry byte length is not supported", + __func__)); + goto e0; + } + + OSLockAcquire(psMMUContext->hLock); + + _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTEIndex); + uiFlushStart = uiPTEIndex; + + /* Unmap page by page and keep the loop as quick as possible. + * Only use parts of _SetupPTE that need to be executed. */ + while (ui32Loop < ui32PageCount) + { + + /* Set the PT entry to invalid and poison it with a bad address */ + if (psConfig->uiBytesPerEntry == 8) + { + ((IMG_UINT64*)psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = uiEntry; + } + else if (psConfig->uiBytesPerEntry == 4) + { + ((IMG_UINT32*)psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = (IMG_UINT32) uiEntry; + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: The page table entry byte length is not supported", + __func__)); + goto e1; + } + + /* Log modifications */ + HTBLOGK(HTB_SF_MMU_PAGE_OP_UNMAP, + HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr)); + + HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, + HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), + uiPTEIndex, MMU_LEVEL_1, + HTBLOG_U64_BITS_HIGH(uiEntry), HTBLOG_U64_BITS_LOW(uiEntry), + IMG_FALSE); + +#if defined(PDUMP) + PDumpMMUDumpPxEntries(MMU_LEVEL_1, + psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, + psLevel->sMemDesc.pvCpuVAddr, + psLevel->sMemDesc.sDevPAddr, + uiPTEIndex, + 1, + NULL, + NULL, + 0, + psConfig->uiBytesPerEntry, + psConfig->uiAddrLog2Align, + psConfig->uiAddrShift, + psConfig->uiAddrMask, + psConfig->uiProtMask, + psConfig->uiValidEnMask, + 0, + psMMUContext->psDevAttrs->eMMUType); +#endif /*PDUMP*/ + + sDevVAddr.uiAddr += uiPageSize; + ui32Loop++; + + /* Calculate PT index and get new table descriptor */ + if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (ui32Loop != ui32PageCount)) + { + uiPTEIndex++; + } + else + { + psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + + _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTEIndex); + uiFlushStart = uiPTEIndex; + } + } + + OSLockRelease(psMMUContext->hLock); + + _MMU_PutPTConfig(psMMUContext, hPriv); + + /* Flush TLB for PTs*/ + psDevNode->pfnMMUCacheInvalidate(psDevNode, + psMMUContext, + MMU_LEVEL_1, + IMG_TRUE); + + return; + +e1: + OSLockRelease(psMMUContext->hLock); + _MMU_PutPTConfig(psMMUContext, hPriv); +e0: + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map/unmap page table", __func__)); + PVR_ASSERT(0); + return; +} + +/* + MMU_ChangeValidity + */ +PVRSRV_ERROR +MMU_ChangeValidity(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiNumPages, + IMG_UINT32 uiLog2PageSize, + IMG_BOOL bMakeValid, + PMR *psPMR) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + IMG_HANDLE hPriv; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + const MMU_PxE_CONFIG *psConfig; + MMU_Levelx_INFO *psLevel = NULL; + IMG_UINT32 uiFlushStart = 0; + IMG_UINT32 uiPTIndex = 0; + IMG_UINT32 i; + IMG_UINT32 uiPageSize = 1 << uiLog2PageSize; + IMG_BOOL bValid; + + PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; + +#if defined(PDUMP) + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + + PDUMPCOMMENT("Change valid bit of the data pages to %d (0x%"IMG_UINT64_FMTSPECX" - 0x%"IMG_UINT64_FMTSPECX")", + bMakeValid, + sDevVAddr.uiAddr, + sDevVAddr.uiAddr + (uiNumPages<uiBytesPerEntry == 8) + { + ((IMG_UINT64 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask); + } + else if (psConfig->uiBytesPerEntry == 4) + { + ((IMG_UINT32 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask); + } + else + { + PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_MMU_CONFIG_IS_WRONG, e_exit); + } + } + } + else + { + if (psConfig->uiBytesPerEntry == 8) + { + ((IMG_UINT64 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask); + } + else if (psConfig->uiBytesPerEntry == 4) + { + ((IMG_UINT32 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask); + } + else + { + PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_MMU_CONFIG_IS_WRONG, e_exit); + } + } + +#if defined(PDUMP) + + PMR_PDumpSymbolicAddr(psPMR, i<psDevAttrs->pszMMUPxPDumpMemSpaceName, + psLevel->sMemDesc.pvCpuVAddr, + psLevel->sMemDesc.sDevPAddr, + uiPTIndex, + 1, + aszMemspaceName, + aszSymbolicAddress, + uiSymbolicAddrOffset, + psConfig->uiBytesPerEntry, + psConfig->uiAddrLog2Align, + psConfig->uiAddrShift, + psConfig->uiAddrMask, + psConfig->uiProtMask, + psConfig->uiValidEnMask, + 0, + psMMUContext->psDevAttrs->eMMUType); +#endif /*PDUMP*/ + + sDevVAddr.uiAddr += uiPageSize; + i++; + + /* Calculate PT index and get new table descriptor */ + if (uiPTIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (i != uiNumPages)) + { + uiPTIndex++; + } + else + { + + eError = psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + &psLevel->sMemDesc.psMapping->sMemHandle, + uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, + (uiPTIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry); + PVR_GOTO_IF_ERROR(eError, e_exit); + + _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, + &psLevel, &uiPTIndex); + uiFlushStart = uiPTIndex; + } + } + +e_exit: + + _MMU_PutPTConfig(psMMUContext, hPriv); + + /* Flush TLB for PTs*/ + psDevNode->pfnMMUCacheInvalidate(psDevNode, + psMMUContext, + MMU_LEVEL_1, + !bMakeValid); + + PVR_ASSERT(eError == PVRSRV_OK); + return eError; +} + + +/* + MMU_AcquireBaseAddr + */ +PVRSRV_ERROR +MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr) +{ + if (!psMMUContext) + { + psPhysAddr->uiAddr = 0; + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *psPhysAddr = psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr; + + return PVRSRV_OK; +} + +/* + MMU_ReleaseBaseAddr + */ +void +MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext) +{ + PVR_UNREFERENCED_PARAMETER(psMMUContext); +} + +/* + MMU_AppendCacheFlags, MMU_ExchangeCacheFlags +*/ + +void MMU_AppendCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32AppendFlags) +{ + PVR_ASSERT(psMMUContext != NULL); + + if (psMMUContext == NULL) + { + return; + } + + OSAtomicOr(&psMMUContext->sCacheFlags, (IMG_INT)ui32AppendFlags); +} + +IMG_UINT32 MMU_ExchangeCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32NewCacheFlags) +{ + PVR_ASSERT(psMMUContext != NULL); + + if (psMMUContext == NULL) + { + return 0; + } + + return (IMG_UINT32)OSAtomicExchange(&psMMUContext->sCacheFlags, (IMG_INT)ui32NewCacheFlags); +} + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +/* + MMU_GetOSids + */ + +void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt) +{ + *pui32OSid = psMMUContext->psPhysMemCtx->ui32OSid; + *pui32OSidReg = psMMUContext->psPhysMemCtx->ui32OSidReg; + *pbOSidAxiProt = psMMUContext->psPhysMemCtx->bOSidAxiProt; + + return; +} + +#endif + +/* + MMU_CheckFaultAddress + */ +void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR *psDevVAddr, + MMU_FAULT_DATA *psOutFaultData) +{ + /* Ideally the RGX defs should be via callbacks, but the function is only called from RGX. */ +#if defined(SUPPORT_RGX) +# define MMU_VALID_STR(entry,level) \ + (apszMMUValidStr[((((entry)&(RGX_MMUCTRL_##level##_DATA_ENTRY_PENDING_EN))!=0) << 1)| \ + ((((entry)&(RGX_MMUCTRL_##level##_DATA_VALID_EN))!=0) << 0)]) + static const IMG_PCHAR apszMMUValidStr[1<<2] = {/*--*/ "not valid", + /*-V*/ "valid", + /*P-*/ "pending", + /*PV*/ "inconsistent (pending and valid)"}; +#else +# define MMU_VALID_STR(entry,level) ("??") +#endif + MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; + MMU_LEVEL eMMULevel = psDevAttrs->eTopLevel; + const MMU_PxE_CONFIG *psConfig; + const MMU_PxE_CONFIG *psMMUPDEConfig; + const MMU_PxE_CONFIG *psMMUPTEConfig; + const MMU_DEVVADDR_CONFIG *psMMUDevVAddrConfig; + IMG_HANDLE hPriv; + MMU_Levelx_INFO *psLevel = NULL; + PVRSRV_ERROR eError; + IMG_UINT64 uiIndex; + IMG_UINT32 ui32PCIndex = 0xFFFFFFFF; + IMG_UINT32 ui32PDIndex = 0xFFFFFFFF; + IMG_UINT32 ui32PTIndex = 0xFFFFFFFF; + IMG_UINT32 ui32Log2PageSize; + MMU_FAULT_DATA sMMUFaultData = {0}; + MMU_LEVEL_DATA *psMMULevelData; + + OSLockAcquire(psMMUContext->hLock); + + /* + At this point we don't know the page size so assume it's 4K. + When we get the PD level (MMU_LEVEL_2) we can check to see + if this assumption is correct. + */ + eError = psDevAttrs->pfnGetPageSizeConfiguration(12, + &psMMUPDEConfig, + &psMMUPTEConfig, + &psMMUDevVAddrConfig, + &hPriv); + if (eError != PVRSRV_OK) + { + PVR_LOG(("Failed to get the page size info for log2 page sizeof 12")); + } + + psLevel = &psMMUContext->sBaseLevelInfo; + psConfig = psDevAttrs->psBaseConfig; + + sMMUFaultData.eTopLevel = psDevAttrs->eTopLevel; + sMMUFaultData.eType = MMU_FAULT_TYPE_NON_PM; + + + for (; eMMULevel > MMU_LEVEL_0; eMMULevel--) + { + if (eMMULevel == MMU_LEVEL_3) + { + /* Determine the PC index */ + uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexMask; + uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexShift; + ui32PCIndex = (IMG_UINT32) uiIndex; + PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PCIndex)); + + psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_3]; + psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry; + psMMULevelData->ui32Index = ui32PCIndex; + + if (ui32PCIndex >= psLevel->ui32NumOfEntries) + { + psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries; + break; + } + + if (psConfig->uiBytesPerEntry == 4) + { + IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr; + + psMMULevelData->ui64Address = pui32Ptr[ui32PCIndex]; + psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PCIndex] & psConfig->uiProtMask, PC); + + } + else + { + IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr; + + psMMULevelData->ui64Address = pui64Ptr[ui32PCIndex]; + psMMULevelData->psDebugStr = MMU_VALID_STR(pui64Ptr[ui32PCIndex] & psConfig->uiProtMask, PC); + + } + + psLevel = psLevel->apsNextLevel[ui32PCIndex]; + if (!psLevel) + { + break; + } + psConfig = psMMUPDEConfig; + continue; /* continue to the next level */ + } + + + if (eMMULevel == MMU_LEVEL_2) + { + /* Determine the PD index */ + uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexMask; + uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexShift; + ui32PDIndex = (IMG_UINT32) uiIndex; + PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PDIndex)); + + psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_2]; + psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry; + psMMULevelData->ui32Index = ui32PDIndex; + + if (ui32PDIndex >= psLevel->ui32NumOfEntries) + { + psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries; + break; + } + + if (psConfig->uiBytesPerEntry == 4) + { + IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr; + + psMMULevelData->ui64Address = pui32Ptr[ui32PDIndex]; + psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PDIndex] & psMMUPDEConfig->uiProtMask, PD); + + + if (psDevAttrs->pfnGetPageSizeFromPDE4(pui32Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK) + { + PVR_LOG(("Failed to get the page size from the PDE")); + } + } + else + { + IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr; + + psMMULevelData->ui64Address = pui64Ptr[ui32PDIndex]; + psMMULevelData->psDebugStr = MMU_VALID_STR(pui64Ptr[ui32PDIndex] & psMMUPDEConfig->uiProtMask, PD); + + if (psDevAttrs->pfnGetPageSizeFromVirtAddr != NULL) + { + /* MMU_VERSION >= 4 */ + if (psDevAttrs->pfnGetPageSizeFromVirtAddr(psMMUContext->psPhysMemCtx->psDevNode, *psDevVAddr, &ui32Log2PageSize) != PVRSRV_OK) + { + PVR_LOG(("Failed to get the page size from the virtual address")); + } + } + else if (psDevAttrs->pfnGetPageSizeFromPDE8(pui64Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK) + { + PVR_LOG(("Failed to get the page size from the PDE")); + } + } + + /* + We assumed the page size was 4K, now we have the actual size + from the PDE we can confirm if our assumption was correct. + Until now it hasn't mattered as the PC and PD are the same + regardless of the page size + */ + if (ui32Log2PageSize != 12) + { + /* Put the 4K page size data */ + psDevAttrs->pfnPutPageSizeConfiguration(hPriv); + + /* Get the correct size data */ + eError = psDevAttrs->pfnGetPageSizeConfiguration(ui32Log2PageSize, + &psMMUPDEConfig, + &psMMUPTEConfig, + &psMMUDevVAddrConfig, + &hPriv); + if (eError != PVRSRV_OK) + { + PVR_LOG(("Failed to get the page size info for log2 page sizeof %d", ui32Log2PageSize)); + break; + } + } + psLevel = psLevel->apsNextLevel[ui32PDIndex]; + if (!psLevel) + { + break; + } + psConfig = psMMUPTEConfig; + continue; /* continue to the next level */ + } + + + if (eMMULevel == MMU_LEVEL_1) + { + /* Determine the PT index */ + uiIndex = psDevVAddr->uiAddr & psMMUDevVAddrConfig->uiPTIndexMask; + uiIndex = uiIndex >> psMMUDevVAddrConfig->uiPTIndexShift; + ui32PTIndex = (IMG_UINT32) uiIndex; + PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PTIndex)); + + psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_1]; + psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry; + psMMULevelData->ui32Index = ui32PTIndex; + + if (ui32PTIndex >= psLevel->ui32NumOfEntries) + { + psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries; + break; + } + + if (psConfig->uiBytesPerEntry == 4) + { + IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr; + + psMMULevelData->ui64Address = pui32Ptr[ui32PTIndex]; + psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PTIndex] & psMMUPTEConfig->uiProtMask, PT); + + } + else + { + IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr; + + psMMULevelData->ui64Address = pui64Ptr[ui32PTIndex]; + psMMULevelData->psDebugStr = MMU_VALID_STR(pui64Ptr[ui32PTIndex] & psMMUPTEConfig->uiProtMask, PT); + + } + goto e1; + } + + PVR_LOG(("Unsupported MMU setup: %d", eMMULevel)); + break; + } + +e1: + /* Put the page size data back */ + psDevAttrs->pfnPutPageSizeConfiguration(hPriv); + OSLockRelease(psMMUContext->hLock); + + *psOutFaultData = sMMUFaultData; +} + +static IMG_UINT64 MMU_GetVDevAddrPTE(MMU_CONTEXT *psMMUContext, + const MMU_PxE_CONFIG *psConfig, + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, + IMG_UINT32 uiLog2PageSize, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_BOOL *pbStatusOut) +{ + MMU_Levelx_INFO *psLevel = NULL; + IMG_UINT32 uiIndex = 0; + IMG_BOOL bStatus = IMG_FALSE; + IMG_UINT64 ui64Entry = 0; + + OSLockAcquire(psMMUContext->hLock); + + switch (psMMUContext->psDevAttrs->eTopLevel) + { + case MMU_LEVEL_3: + uiIndex = _CalcPCEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE); + psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex]; + if (psLevel == NULL) + break; + + __fallthrough; + case MMU_LEVEL_2: + uiIndex = _CalcPDEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE); + + if (psLevel != NULL) + psLevel = psLevel->apsNextLevel[uiIndex]; + else + psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex]; + + if (psLevel == NULL) + break; + + __fallthrough; + case MMU_LEVEL_1: + uiIndex = _CalcPTEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE); + + if (psLevel == NULL) + psLevel = &psMMUContext->sBaseLevelInfo; + + ui64Entry = ((IMG_UINT64 *)psLevel->sMemDesc.pvCpuVAddr)[uiIndex]; + bStatus = ui64Entry & psConfig->uiValidEnMask; + + break; + default: + PVR_LOG(("MMU_IsVDevAddrValid: Unsupported MMU setup")); + break; + } + + OSLockRelease(psMMUContext->hLock); + + *pbStatusOut = bStatus; + + return ui64Entry; +} + +IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext, + IMG_UINT32 uiLog2PageSize, + IMG_DEV_VIRTADDR sDevVAddr) +{ + IMG_BOOL bStatus; + const MMU_PxE_CONFIG *psConfig; + IMG_HANDLE hPriv; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + + _MMU_GetPTConfig(psMMUContext, uiLog2PageSize, &psConfig, &hPriv, &psDevVAddrConfig); + + MMU_GetVDevAddrPTE(psMMUContext, + psConfig, + psDevVAddrConfig, + uiLog2PageSize, + sDevVAddr, + &bStatus); + + _MMU_PutPTConfig(psMMUContext, hPriv); + + return bStatus; +} + +#if defined(PDUMP) +/* + MMU_ContextDerivePCPDumpSymAddr + */ +PVRSRV_ERROR MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext, + IMG_CHAR *pszPDumpSymbolicNameBuffer, + size_t uiPDumpSymbolicNameBufferSize) +{ + size_t uiCount; + IMG_UINT64 ui64PhysAddr; + PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psPhysMemCtx->psDevNode->sDevId; + + if (!psMMUContext->sBaseLevelInfo.sMemDesc.bValid) + { + /* We don't have any allocations. You're not allowed to ask + * for the page catalogue base address until you've made at + * least one allocation. + */ + return PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR; + } + + ui64PhysAddr = (IMG_UINT64)psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr.uiAddr; + + PVR_ASSERT(uiPDumpSymbolicNameBufferSize >= (IMG_UINT32)(21 + OSStringLength(psDevId->pszPDumpDevName))); + + /* Page table Symbolic Name is formed from page table phys addr + prefixed with MMUPT_. */ + uiCount = OSSNPrintf(pszPDumpSymbolicNameBuffer, + uiPDumpSymbolicNameBufferSize, + ":%s:%s%016"IMG_UINT64_FMTSPECX, + psDevId->pszPDumpDevName, + psMMUContext->sBaseLevelInfo.sMemDesc.bValid?"MMUPC_":"XXX", + ui64PhysAddr); + + if (uiCount + 1 > uiPDumpSymbolicNameBufferSize) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return PVRSRV_OK; +} + +/* + MMU_PDumpWritePageCatBase + */ +PVRSRV_ERROR +MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext, + const IMG_CHAR *pszSpaceName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32WordSize, + IMG_UINT32 ui32AlignShift, + IMG_UINT32 ui32Shift, + PDUMP_FLAGS_T uiPdumpFlags) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszPageCatBaseSymbolicAddr[100]; + const IMG_CHAR *pszPDumpDevName = psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName; + + eError = MMU_ContextDerivePCPDumpSymAddr(psMMUContext, + &aszPageCatBaseSymbolicAddr[0], + sizeof(aszPageCatBaseSymbolicAddr)); + if (eError == PVRSRV_OK) + { + eError = PDumpWriteSymbAddress(pszSpaceName, + uiOffset, + aszPageCatBaseSymbolicAddr, + 0, /* offset -- Could be non-zero for var. pgsz */ + pszPDumpDevName, + ui32WordSize, + ui32AlignShift, + ui32Shift, + uiPdumpFlags | PDUMP_FLAGS_CONTINUOUS); + } + + return eError; +} + +/* + MMU_AcquirePDumpMMUContext + */ +PVRSRV_ERROR MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext, + IMG_UINT32 *pui32PDumpMMUContextID, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psPhysMemCtx->psDevNode->sDevId; + + if (!psMMUContext->ui32PDumpContextIDRefCount) + { + PDUMP_MMU_ALLOC_MMUCONTEXT(psDevId->pszPDumpDevName, + psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr, + psMMUContext->psDevAttrs->eMMUType, + &psMMUContext->uiPDumpContextID, + ui32PDumpFlags); + } + + psMMUContext->ui32PDumpContextIDRefCount++; + *pui32PDumpMMUContextID = psMMUContext->uiPDumpContextID; + + return PVRSRV_OK; +} + +/* + MMU_ReleasePDumpMMUContext + */ +PVRSRV_ERROR MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psPhysMemCtx->psDevNode->sDevId; + + PVR_ASSERT(psMMUContext->ui32PDumpContextIDRefCount != 0); + psMMUContext->ui32PDumpContextIDRefCount--; + + if (psMMUContext->ui32PDumpContextIDRefCount == 0) + { + PDUMP_MMU_FREE_MMUCONTEXT(psDevId->pszPDumpDevName, + psMMUContext->uiPDumpContextID, + ui32PDumpFlags); + } + + return PVRSRV_OK; +} +#endif + +/****************************************************************************** + End of file (mmu_common.c) + ******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/mmu_common.h b/drivers/gpu/drm/phytium/octopus/mmu_common.h new file mode 100644 index 000000000000..9b1987db86ce --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/mmu_common.h @@ -0,0 +1,776 @@ +/*************************************************************************/ /*! +@File +@Title Common MMU Management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements basic low level control of MMU. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef MMU_COMMON_H +#define MMU_COMMON_H + +/* + The Memory Management Unit (MMU) performs device virtual to physical + translation. + + Terminology: + - page catalogue, PC (optional, 3 tier MMU) + - page directory, PD + - page table, PT (can be variable sized) + - data page, DP (can be variable sized) + Note: PD and PC are fixed size and can't be larger than the native + physical (CPU) page size + Shifts and AlignShift variables: + - 'xxxShift' represent the number of bits a bitfield is shifted left from bit0 + - 'xxxAlignShift' is used to convert a bitfield (based at bit0) into byte units + by applying a bit shift left by 'xxxAlignShift' bits +*/ + +/* + Device Virtual Address Config: + + Incoming Device Virtual Address is deconstructed into up to 4 + fields, where the virtual address is up to 64bits: + MSB-----------------------------------------------LSB + | PC Index: | PD Index: | PT Index: | DP offset: | + | d bits | c bits | b-v bits | a+v bits | + ----------------------------------------------------- + where v is the variable page table modifier, e.g. + v == 0 -> 4KB DP + v == 2 -> 16KB DP + v == 4 -> 64KB DP + v == 6 -> 256KB DP + v == 8 -> 1MB DP + v == 10 -> 4MB DP +*/ + +/* services/server/include/ */ +#include "pmr.h" + +/* include/ */ +#include "img_types.h" +#include "img_defs.h" +#include "pvr_notifier.h" +#include "pvrsrv_error.h" +#include "servicesext.h" + + +/*! + The level of the MMU +*/ +typedef enum +{ + MMU_LEVEL_0 = 0, /* Level 0 = Page */ + + MMU_LEVEL_1, + MMU_LEVEL_2, + MMU_LEVEL_3, + MMU_LEVEL_LAST +} MMU_LEVEL; + +/* moved after declaration of MMU_LEVEL, as pdump_mmu.h references it */ +#include "pdump_mmu.h" + +#define MMU_MAX_LEVEL 3 + +typedef struct _MMU_LEVEL_DATA_ +{ + IMG_UINT32 ui32Index; + IMG_UINT32 ui32NumOfEntries; + IMG_CHAR const *psDebugStr; + IMG_UINT8 uiBytesPerEntry; + IMG_UINT64 ui64Address; +} MMU_LEVEL_DATA; + +typedef enum _MMU_FAULT_TYPE_ +{ + MMU_FAULT_TYPE_UNKNOWN = 0, /* If fault is not analysed by Host */ + MMU_FAULT_TYPE_PM, + MMU_FAULT_TYPE_NON_PM, +} MMU_FAULT_TYPE; + +typedef struct _MMU_FAULT_DATA_ +{ + MMU_LEVEL eTopLevel; + MMU_FAULT_TYPE eType; + MMU_LEVEL_DATA sLevelData[MMU_LEVEL_LAST]; +} MMU_FAULT_DATA; + +struct _MMU_DEVVADDR_CONFIG_; + +/*! + MMU device attributes. This structure is the interface between the generic + MMU code and the device specific MMU code. +*/ +typedef struct _MMU_DEVICEATTRIBS_ +{ + PDUMP_MMU_TYPE eMMUType; + + IMG_CHAR *pszMMUPxPDumpMemSpaceName; + + /*! The type of the top level object */ + MMU_LEVEL eTopLevel; + + /*! Alignment requirement of the base object */ + IMG_UINT32 ui32BaseAlign; + + /*! HW config of the base object */ + struct _MMU_PxE_CONFIG_ *psBaseConfig; + + /*! Address split for the base object */ + const struct _MMU_DEVVADDR_CONFIG_ *psTopLevelDevVAddrConfig; + + /*! Callback for creating protection bits for the page catalogue entry with 8 byte entry */ + IMG_UINT64 (*pfnDerivePCEProt8)(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); + /*! Callback for creating protection bits for the page catalogue entry with 4 byte entry */ + IMG_UINT32 (*pfnDerivePCEProt4)(IMG_UINT32 uiProtFlags); + /*! Callback for creating protection bits for the page directory entry with 8 byte entry */ + IMG_UINT64 (*pfnDerivePDEProt8)(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); + /*! Callback for creating protection bits for the page directory entry with 4 byte entry */ + IMG_UINT32 (*pfnDerivePDEProt4)(IMG_UINT32 uiProtFlags); + /*! Callback for creating protection bits for the page table entry with 8 byte entry */ + IMG_UINT64 (*pfnDerivePTEProt8)(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); + /*! Callback for creating protection bits for the page table entry with 4 byte entry */ + IMG_UINT32 (*pfnDerivePTEProt4)(IMG_UINT32 uiProtFlags); + + /*! Callback for getting the MMU configuration based on the specified page size */ + PVRSRV_ERROR (*pfnGetPageSizeConfiguration)(IMG_UINT32 ui32DataPageSize, + const struct _MMU_PxE_CONFIG_ **ppsMMUPDEConfig, + const struct _MMU_PxE_CONFIG_ **ppsMMUPTEConfig, + const struct _MMU_DEVVADDR_CONFIG_ **ppsMMUDevVAddrConfig, + IMG_HANDLE *phPriv2); + /*! Callback for putting the MMU configuration obtained from pfnGetPageSizeConfiguration */ + PVRSRV_ERROR (*pfnPutPageSizeConfiguration)(IMG_HANDLE hPriv); + + /*! Callback for getting the page size from the PDE for the page table entry with 4 byte entry */ + PVRSRV_ERROR (*pfnGetPageSizeFromPDE4)(IMG_UINT32, IMG_UINT32 *); + /*! Callback for getting the page size from the PDE for the page table entry with 8 byte entry */ + PVRSRV_ERROR (*pfnGetPageSizeFromPDE8)(IMG_UINT64, IMG_UINT32 *); + /*! Callback for getting the page size directly from the address. Supported on MMU4 */ + PVRSRV_ERROR (*pfnGetPageSizeFromVirtAddr)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_DEV_VIRTADDR, IMG_UINT32 *); + + /*! Private data handle */ + IMG_HANDLE hGetPageSizeFnPriv; +} MMU_DEVICEATTRIBS; + +/*! + MMU virtual address split +*/ +typedef struct _MMU_DEVVADDR_CONFIG_ +{ + /*! Page catalogue index mask */ + IMG_UINT64 uiPCIndexMask; + /*! Page catalogue index shift */ + IMG_UINT8 uiPCIndexShift; + /*! Total number of PC entries */ + IMG_UINT32 uiNumEntriesPC; + /*! Page directory mask */ + IMG_UINT64 uiPDIndexMask; + /*! Page directory shift */ + IMG_UINT8 uiPDIndexShift; + /*! Total number of PD entries */ + IMG_UINT32 uiNumEntriesPD; + /*! Page table mask */ + IMG_UINT64 uiPTIndexMask; + /*! Page index shift */ + IMG_UINT8 uiPTIndexShift; + /*! Total number of PT entries */ + IMG_UINT32 uiNumEntriesPT; + /*! Page offset mask */ + IMG_UINT64 uiPageOffsetMask; + /*! Page offset shift */ + IMG_UINT8 uiPageOffsetShift; + /*! First virtual address mappable for this config */ + IMG_UINT64 uiOffsetInBytes; + +} MMU_DEVVADDR_CONFIG; + +/* + P(C/D/T) Entry Config: + + MSB-----------------------------------------------LSB + | PT Addr: | variable PT ctrl | protection flags: | + | bits c+v | b bits | a bits | + ----------------------------------------------------- + where v is the variable page table modifier and is optional +*/ +/*! + Generic MMU entry description. This is used to describe PC, PD and PT entries. +*/ +typedef struct _MMU_PxE_CONFIG_ +{ + IMG_UINT8 uiBytesPerEntry; /*! Size of an entry in bytes */ + + IMG_UINT64 uiAddrMask; /*! Physical address mask */ + IMG_UINT8 uiAddrShift; /*! Physical address shift */ + IMG_UINT8 uiAddrLog2Align; /*! Physical address Log 2 alignment */ + + IMG_UINT64 uiVarCtrlMask; /*! Variable control mask */ + IMG_UINT8 uiVarCtrlShift; /*! Variable control shift */ + + IMG_UINT64 uiProtMask; /*! Protection flags mask */ + IMG_UINT8 uiProtShift; /*! Protection flags shift */ + + IMG_UINT64 uiValidEnMask; /*! Entry valid bit mask */ + IMG_UINT8 uiValidEnShift; /*! Entry valid bit shift */ +} MMU_PxE_CONFIG; + +/* MMU Protection flags */ + + +/* These are specified generically and in a h/w independent way, and + are interpreted at each level (PC/PD/PT) separately. */ + +/* The following flags are for internal use only, and should not + traverse the API */ +#define MMU_PROTFLAGS_INVALID 0x80000000U + +typedef IMG_UINT32 MMU_PROTFLAGS_T; + +/* The following flags should be supplied by the caller: */ +#define MMU_PROTFLAGS_READABLE (1U<<0) +#define MMU_PROTFLAGS_WRITEABLE (1U<<1) +#define MMU_PROTFLAGS_CACHE_COHERENT (1U<<2) +#define MMU_PROTFLAGS_CACHED (1U<<3) + +/* Device specific flags*/ +#define MMU_PROTFLAGS_DEVICE_OFFSET 16 +#define MMU_PROTFLAGS_DEVICE_MASK 0x000f0000UL +#define MMU_PROTFLAGS_DEVICE(n) \ + (((n) << MMU_PROTFLAGS_DEVICE_OFFSET) & \ + MMU_PROTFLAGS_DEVICE_MASK) + + +typedef struct _MMU_CONTEXT_ MMU_CONTEXT; + +struct _PVRSRV_DEVICE_NODE_; + +struct _CONNECTION_DATA_; + +typedef struct _MMU_PAGESIZECONFIG_ +{ + const MMU_PxE_CONFIG *psPDEConfig; + const MMU_PxE_CONFIG *psPTEConfig; + const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; + IMG_UINT32 uiRefCount; + IMG_UINT32 uiMaxRefCount; +} MMU_PAGESIZECONFIG; + +/*************************************************************************/ /*! +@Function MMU_ContextCreate + +@Description Create a new MMU context + +@Input psConnection Connection requesting the MMU context + creation. Can be NULL for kernel/FW + memory context. +@Input psDevNode Device node of the device to create the + MMU context for +@Output ppsMMUContext The created MMU context + +@Return PVRSRV_OK if the MMU context was successfully created +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_ContextCreate(struct _CONNECTION_DATA_ *psConnection, + struct _PVRSRV_DEVICE_NODE_ *psDevNode, + MMU_CONTEXT **ppsMMUContext, + MMU_DEVICEATTRIBS *psDevAttrs); + + +/*************************************************************************/ /*! +@Function MMU_ContextDestroy + +@Description Destroy a MMU context + +@Input psMMUContext MMU context to destroy + +@Return None +*/ +/*****************************************************************************/ +void +MMU_ContextDestroy(MMU_CONTEXT *psMMUContext); + +/*************************************************************************/ /*! +@Function MMU_Alloc + +@Description Allocate the page tables required for the specified virtual range + +@Input psMMUContext MMU context to operate on + +@Input uSize The size of the allocation + +@Output puActualSize Actual size of allocation + +@Input uiProtFlags Generic MMU protection flags + +@Input uDevVAddrAlignment Alignment requirement of the virtual + allocation + +@Input psDevVAddr Virtual address to start the allocation + from + +@Return PVRSRV_OK if the allocation of the page tables was successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_Alloc(MMU_CONTEXT *psMMUContext, + IMG_DEVMEM_SIZE_T uSize, + IMG_DEVMEM_SIZE_T *puActualSize, + IMG_UINT32 uiProtFlags, + IMG_DEVMEM_SIZE_T uDevVAddrAlignment, + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_UINT32 uiLog2PageSize); + + +/*************************************************************************/ /*! +@Function MMU_Free + +@Description Free the page tables of the specified virtual range + +@Input psMMUContext MMU context to operate on + +@Input sDevVAddr Virtual address to start the free + from + +@Input uiSize The size of the allocation + +@Return None +*/ +/*****************************************************************************/ +void +MMU_Free(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 uiLog2DataPageSize); + + +/*************************************************************************/ /*! +@Function MMU_MapPages + +@Description Map pages to the MMU. + Two modes of operation: One requires a list of physical page + indices that are going to be mapped, the other just takes + the PMR and a possible offset to map parts of it. + +@Input psMMUContext MMU context to operate on + +@Input uiMappingFlags Memalloc flags for the mapping + +@Input sDevVAddrBase Device virtual address of the 1st page + +@Input psPMR PMR to map + +@Input ui32PhysPgOffset Physical offset into the PMR + +@Input ui32MapPageCount Number of pages to map + +@Input paui32MapIndices List of page indices to map, + can be NULL + +@Input uiLog2PageSize Log2 page size of the pages to map + +@Return PVRSRV_OK if the mapping was successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_MapPages(MMU_CONTEXT *psMMUContext, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_DEV_VIRTADDR sDevVAddrBase, + PMR *psPMR, + IMG_UINT32 ui32PhysPgOffset, + IMG_UINT32 ui32MapPageCount, + IMG_UINT32 *paui32MapIndices, + IMG_UINT32 uiLog2PageSize); + +/*************************************************************************/ /*! +@Function MMU_UnmapPages + +@Description Unmap pages from the MMU. + +@Input psMMUContext MMU context to operate on + +@Input uiMappingFlags Memalloc flags for the mapping + +@Input sDevVAddr Device virtual address of the 1st page + +@Input ui32PageCount Number of pages to unmap + +@Input pai32UnmapIndicies Array of page indices to be unmapped + +@Input uiLog2PageSize log2 size of the page + + +@Input uiMemAllocFlags Indicates if the unmapped regions need + to be backed by dummy or zero page + +@Return None +*/ +/*****************************************************************************/ +void +MMU_UnmapPages(MMU_CONTEXT *psMMUContext, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 ui32PageCount, + IMG_UINT32 *pai32UnmapIndicies, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags); + +/*************************************************************************/ /*! +@Function MMU_MapPMRFast + +@Description Map a PMR into the MMU. Must be not sparse. + This is supposed to cover most mappings and, as the name suggests, + should be as fast as possible. + +@Input psMMUContext MMU context to operate on + +@Input sDevVAddr Device virtual address to map the PMR + into + +@Input psPMR PMR to map + +@Input uiSizeBytes Size in bytes to map + +@Input uiMappingFlags Memalloc flags for the mapping + +@Return PVRSRV_OK if the PMR was successfully mapped +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_MapPMRFast(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + const PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSizeBytes, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, + IMG_UINT32 uiLog2PageSize); + +/*************************************************************************/ /*! +@Function MMU_UnmapPMRFast + +@Description Unmap pages from the MMU as fast as possible. + PMR must be non-sparse! + +@Input psMMUContext MMU context to operate on + +@Input sDevVAddrBase Device virtual address of the 1st page + +@Input ui32PageCount Number of pages to unmap + +@Input uiLog2PageSize log2 size of the page + +@Return None +*/ +/*****************************************************************************/ +void +MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddrBase, + IMG_UINT32 ui32PageCount, + IMG_UINT32 uiLog2PageSize); + +/*************************************************************************/ /*! +@Function MMU_ChangeValidity + +@Description Sets or unsets the valid bit of page table entries for a given + address range. + +@Input psMMUContext MMU context to operate on + +@Input sDevVAddr The device virtual base address of + the range we want to modify + +@Input uiSizeBytes The size of the range in bytes + +@Input uiLog2PageSize Log2 of the used page size + +@Input bMakeValid Choose to set or unset the valid bit. + (bMakeValid == IMG_TRUE ) -> SET + (bMakeValid == IMG_FALSE) -> UNSET + +@Input psPMR The PMR backing the allocation. + Needed in case we have sparse memory + where we have to check whether a physical + address actually backs the virtual. + +@Return PVRSRV_OK if successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_ChangeValidity(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSizeBytes, + IMG_UINT32 uiLog2PageSize, + IMG_BOOL bMakeValid, + PMR *psPMR); + +/*************************************************************************/ /*! +@Function MMU_AcquireBaseAddr + +@Description Acquire the device physical address of the base level MMU object + +@Input psMMUContext MMU context to operate on + +@Output psPhysAddr Device physical address of the base level + MMU object + +@Return PVRSRV_OK if successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr); + +/*************************************************************************/ /*! +@Function MMU_ReleaseBaseAddr + +@Description Release the device physical address of the base level MMU object + +@Input psMMUContext MMU context to operate on + +@Return PVRSRV_OK if successful +*/ +/*****************************************************************************/ +void +MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext); + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +/***********************************************************************************/ /*! +@Function MMU_SetOSid + +@Description Set the OSid associated with the application (and the MMU Context) + +@Input psMMUContext MMU context to store the OSid on + +@Input ui32OSid the OSid in question + +@Input ui32OSidReg The value that the firmware will assign to the + registers. + +@Input bOSidAxiProt Toggles whether the AXI prot bit will be set or + not. +@Return None +*/ +/***********************************************************************************/ + +void MMU_SetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32OSid, + IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt); + +/***********************************************************************************/ /*! +@Function MMU_GetOSid + +@Description Retrieve the OSid associated with the MMU context. + +@Input psMMUContext MMU context in which the OSid is stored + +@Output pui32OSid The OSid in question + +@Output pui32OSidReg The OSid that the firmware will assign to the + registers. + +@Output pbOSidAxiProt Toggles whether the AXI prot bit will be set or + not. +@Return None +*/ +/***********************************************************************************/ + +void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 * pui32OSid, + IMG_UINT32 * pui32OSidReg, IMG_BOOL *pbOSidAxiProt); +#endif + +/*************************************************************************/ /*! +@Function MMU_AppendCacheFlags + +@Description Set the cache flags to the bitwise or of themselves and the + specified input flags, i.e. ui32CacheFlags |= ui32NewCacheFlags, + atomically. + +@Input psMMUContext MMU context + +@Input ui32NewCacheFlags Cache flags to append. + +@Return None +*/ +/*****************************************************************************/ +void MMU_AppendCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32NewCacheFlags); + +/*************************************************************************/ /*! +@Function MMU_ExchangeCacheFlags + +@Description Exchange MMU context flags with specified value, atomically. + +@Input psMMUContext MMU context + +@Input ui32CacheFlags Cache flags to set. + +@Return Previous MMU context cache flags. +*/ +/*****************************************************************************/ +IMG_UINT32 MMU_ExchangeCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32NewCacheFlags); + +/*************************************************************************/ /*! +@Function MMU_CheckFaultAddress + +@Description Check the specified MMU context to see if the provided address + should be valid + +@Input psMMUContext MMU context to store the data on + +@Input psDevVAddr Address to check + +@Output psOutFaultData To store fault details after checking + +@Return None +*/ +/*****************************************************************************/ +void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR *psDevVAddr, + MMU_FAULT_DATA *psOutFaultData); + +/*************************************************************************/ /*! +@Function MMU_IsVDevAddrValid +@Description Checks if given address is valid. +@Input psMMUContext MMU context to store the data on +@Input uiLog2PageSize page size +@Input sDevVAddr Address to check +@Return IMG_TRUE of address is valid +*/ /**************************************************************************/ +IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext, + IMG_UINT32 uiLog2PageSize, + IMG_DEV_VIRTADDR sDevVAddr); + +#if defined(PDUMP) + +/*************************************************************************/ /*! +@Function MMU_ContextDerivePCPDumpSymAddr + +@Description Derives a PDump Symbolic address for the top level MMU object + +@Input psMMUContext MMU context to operate on + +@Input pszPDumpSymbolicNameBuffer Buffer to write the PDump symbolic + address to + +@Input uiPDumpSymbolicNameBufferSize Size of the buffer + +@Return PVRSRV_OK if successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext, + IMG_CHAR *pszPDumpSymbolicNameBuffer, + size_t uiPDumpSymbolicNameBufferSize); + +/*************************************************************************/ /*! +@Function MMU_PDumpWritePageCatBase + +@Description PDump write of the top level MMU object to a device register + +@Input psMMUContext MMU context to operate on + +@Input pszSpaceName PDump name of the mem/reg space + +@Input uiOffset Offset to write the address to + +@Return PVRSRV_OK if successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext, + const IMG_CHAR *pszSpaceName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32WordSize, + IMG_UINT32 ui32AlignShift, + IMG_UINT32 ui32Shift, + PDUMP_FLAGS_T uiPdumpFlags); + +/*************************************************************************/ /*! +@Function MMU_AcquirePDumpMMUContext + +@Description Acquire a reference to the PDump MMU context for this MMU + context + +@Input psMMUContext MMU context to operate on + +@Output pui32PDumpMMUContextID PDump MMU context ID + +@Return PVRSRV_OK if successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext, + IMG_UINT32 *pui32PDumpMMUContextID, + IMG_UINT32 ui32PDumpFlags); + +/*************************************************************************/ /*! +@Function MMU_ReleasePDumpMMUContext + +@Description Release a reference to the PDump MMU context for this MMU context + +@Input psMMUContext MMU context to operate on + +@Return PVRSRV_OK if successful +*/ +/*****************************************************************************/ +PVRSRV_ERROR +MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext, + IMG_UINT32 ui32PDumpFlags); +#else /* PDUMP */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(MMU_PDumpWritePageCatBase) +#endif +static INLINE void +MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext, + const IMG_CHAR *pszSpaceName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32WordSize, + IMG_UINT32 ui32AlignShift, + IMG_UINT32 ui32Shift, + PDUMP_FLAGS_T uiPdumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psMMUContext); + PVR_UNREFERENCED_PARAMETER(pszSpaceName); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(ui32WordSize); + PVR_UNREFERENCED_PARAMETER(ui32AlignShift); + PVR_UNREFERENCED_PARAMETER(ui32Shift); + PVR_UNREFERENCED_PARAMETER(uiPdumpFlags); +} +#endif /* PDUMP */ + +void RGXMapBRN71422TargetPhysicalAddress(MMU_CONTEXT *psMMUContext); + +#endif /* #ifdef MMU_COMMON_H */ diff --git a/drivers/gpu/drm/phytium/octopus/module_common.c b/drivers/gpu/drm/phytium/octopus/module_common.c new file mode 100644 index 000000000000..48044da58e7d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/module_common.c @@ -0,0 +1,534 @@ +/*************************************************************************/ /*! +@File +@Title Common Linux module setup +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#if defined(CONFIG_DEBUG_FS) +#include "pvr_debugfs.h" +#endif /* defined(CONFIG_DEBUG_FS) */ +#if defined(CONFIG_PROC_FS) +#include "pvr_procfs.h" +#endif /* defined(CONFIG_PROC_FS) */ +#include "di_server.h" +#include "private_data.h" +#include "linkage.h" +#include "power.h" +#include "env_connection.h" +#include "process_stats.h" +#include "module_common.h" +#include "pvrsrv.h" +#include "srvcore.h" +#if defined(SUPPORT_RGX) +#include "rgxdevice.h" +#endif +#include "pvrsrv_error.h" +#include "pvr_drv.h" +#include "pvr_bridge_k.h" + +#include "pvr_fence.h" + +#if defined(SUPPORT_NATIVE_FENCE_SYNC) +#include "pvr_sync.h" +#endif + +#include "ospvr_gputrace.h" + +#include "km_apphint.h" +#include "srvinit.h" + +#include "pvr_ion_stats.h" + +#if defined(SUPPORT_DISPLAY_CLASS) +/* Display class interface */ +#include "kerneldisplay.h" +EXPORT_SYMBOL(DCRegisterDevice); +EXPORT_SYMBOL(DCUnregisterDevice); +EXPORT_SYMBOL(DCDisplayConfigurationRetired); +EXPORT_SYMBOL(DCDisplayHasPendingCommand); +EXPORT_SYMBOL(DCImportBufferAcquire); +EXPORT_SYMBOL(DCImportBufferRelease); + +/* Physmem interface (required by LMA DC drivers) */ +#include "physheap.h" +EXPORT_SYMBOL(PhysHeapAcquireByUsage); +EXPORT_SYMBOL(PhysHeapRelease); +EXPORT_SYMBOL(PhysHeapGetType); +EXPORT_SYMBOL(PhysHeapGetCpuPAddr); +EXPORT_SYMBOL(PhysHeapGetSize); +EXPORT_SYMBOL(PhysHeapCpuPAddrToDevPAddr); + +EXPORT_SYMBOL(PVRSRVGetDriverStatus); +EXPORT_SYMBOL(PVRSRVSystemInstallDeviceLISR); +EXPORT_SYMBOL(PVRSRVSystemUninstallDeviceLISR); + +#include "pvr_notifier.h" +EXPORT_SYMBOL(PVRSRVCheckStatus); + +#include "pvr_debug.h" +EXPORT_SYMBOL(PVRSRVGetErrorString); +#endif /* defined(SUPPORT_DISPLAY_CLASS) */ + +#if defined(SUPPORT_RGX) +#include "rgxapi_km.h" +#if defined(SUPPORT_SHARED_SLC) +EXPORT_SYMBOL(RGXInitSLC); +#endif +EXPORT_SYMBOL(RGXHWPerfConnect); +EXPORT_SYMBOL(RGXHWPerfDisconnect); +EXPORT_SYMBOL(RGXHWPerfControl); +#if defined(HWPERF_PACKET_V2C_SIG) +EXPORT_SYMBOL(RGXHWPerfConfigureCounters); +#else +EXPORT_SYMBOL(RGXHWPerfConfigureAndEnableCounters); +EXPORT_SYMBOL(RGXHWPerfConfigureAndEnableCustomCounters); +#endif +EXPORT_SYMBOL(RGXHWPerfDisableCounters); +EXPORT_SYMBOL(RGXHWPerfAcquireEvents); +EXPORT_SYMBOL(RGXHWPerfReleaseEvents); +EXPORT_SYMBOL(RGXHWPerfConvertCRTimeStamp); +#if defined(SUPPORT_KERNEL_HWPERF_TEST) +EXPORT_SYMBOL(OSAddTimer); +EXPORT_SYMBOL(OSEnableTimer); +EXPORT_SYMBOL(OSDisableTimer); +EXPORT_SYMBOL(OSRemoveTimer); +#endif +#endif + +CONNECTION_DATA *LinuxConnectionFromFile(struct file *pFile) +{ + if (pFile) + { + struct drm_file *psDRMFile = pFile->private_data; + + return psDRMFile->driver_priv; + } + + return NULL; +} + +struct file *LinuxFileFromConnection(CONNECTION_DATA *psConnection) +{ + ENV_CONNECTION_DATA *psEnvConnection; + + psEnvConnection = PVRSRVConnectionPrivateData(psConnection); + PVR_ASSERT(psEnvConnection != NULL); + + return psEnvConnection->psFile; +} + +/**************************************************************************/ /*! +@Function PVRSRVDriverInit +@Description Common one time driver initialisation +@Return int 0 on success and a Linux error code otherwise +*/ /***************************************************************************/ +int PVRSRVDriverInit(void) +{ + PVRSRV_ERROR error; + int os_err; + + error = PVROSFuncInit(); + if (error != PVRSRV_OK) + { + return -ENOMEM; + } + +#if defined(SUPPORT_NATIVE_FENCE_SYNC) + error = pvr_sync_register_functions(); + if (error != PVRSRV_OK) + { + return -EPERM; + } + + os_err = pvr_sync_init(); + if (os_err != 0) + { + return os_err; + } +#endif + + error = PVRSRVCommonDriverInit(); + if (error != PVRSRV_OK) + { + return -ENODEV; + } + + os_err = pvr_apphint_init(); + if (os_err != 0) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed AppHint setup(%d)", __func__, + os_err)); + } + +#if defined(SUPPORT_RGX) + error = PVRGpuTraceSupportInit(); + if (error != PVRSRV_OK) + { + return -ENOMEM; + } +#endif + +#if defined(CONFIG_DEBUG_FS) + error = PVRDebugFsRegister(); + if (error != PVRSRV_OK) + { + return -ENOMEM; + } +#elif defined(CONFIG_PROC_FS) + error = PVRProcFsRegister(); + if (error != PVRSRV_OK) + { + return -ENOMEM; + } +#endif /* defined(CONFIG_DEBUG_FS) || defined(CONFIG_PROC_FS) */ + + error = PVRSRVIonStatsInitialise(); + if (error != PVRSRV_OK) + { + return -ENODEV; + } + +#if defined(SUPPORT_RGX) + /* calling here because we need to handle input from the file even + * before the devices are initialised + * note: we're not passing a device node because apphint callbacks don't + * need it */ + PVRGpuTraceInitAppHintCallbacks(NULL); +#endif + + return 0; +} + +/**************************************************************************/ /*! +@Function PVRSRVDriverDeinit +@Description Common one time driver de-initialisation +@Return void +*/ /***************************************************************************/ +void PVRSRVDriverDeinit(void) +{ + pvr_apphint_deinit(); + + PVRSRVIonStatsDestroy(); + + PVRSRVCommonDriverDeInit(); + +#if defined(SUPPORT_RGX) + PVRGpuTraceSupportDeInit(); +#endif + +#if defined(SUPPORT_NATIVE_FENCE_SYNC) + pvr_sync_deinit(); +#endif + + PVROSFuncDeInit(); +} + +/**************************************************************************/ /*! +@Function PVRSRVDeviceInit +@Description Common device related initialisation. +@Input psDeviceNode The device node for which initialisation should be + performed +@Return int 0 on success and a Linux error code otherwise +*/ /***************************************************************************/ +int PVRSRVDeviceInit(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + int error = 0; + +#if defined(SUPPORT_NATIVE_FENCE_SYNC) + { + PVRSRV_ERROR eError = pvr_sync_device_init(psDeviceNode->psDevConfig->pvOSDevice); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: unable to create sync (%d)", + __func__, eError)); + return -EBUSY; + } + } +#endif + +#if defined(SUPPORT_RGX) + error = PVRGpuTraceInitDevice(psDeviceNode); + if (error != 0) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: failed to initialise PVR GPU Tracing on device%d (%d)", + __func__, psDeviceNode->sDevId.i32OsDeviceID, error)); + } +#endif + + /* register the AppHint device control before device initialisation + * so individual AppHints can be configured during the init phase + */ + error = pvr_apphint_device_register(psDeviceNode); + if (error != 0) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: failed to initialise device AppHints (%d)", + __func__, error)); + } + + return 0; +} + +/**************************************************************************/ /*! +@Function PVRSRVDeviceDeinit +@Description Common device related de-initialisation. +@Input psDeviceNode The device node for which de-initialisation should + be performed +@Return void +*/ /***************************************************************************/ +void PVRSRVDeviceDeinit(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + pvr_apphint_device_unregister(psDeviceNode); + +#if defined(SUPPORT_RGX) + PVRGpuTraceDeInitDevice(psDeviceNode); +#endif + +#if defined(SUPPORT_NATIVE_FENCE_SYNC) + pvr_sync_device_deinit(psDeviceNode->psDevConfig->pvOSDevice); +#endif + +#if defined(SUPPORT_DMA_TRANSFER) + PVRSRVDeInitialiseDMA(psDeviceNode); +#endif + + pvr_fence_cleanup(); +} + +/**************************************************************************/ /*! +@Function PVRSRVDeviceShutdown +@Description Common device shutdown. +@Input psDeviceNode The device node representing the device that should + be shutdown +@Return void +*/ /***************************************************************************/ + +void PVRSRVDeviceShutdown(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + /* + * Disable the bridge to stop processes trying to use the driver + * after it has been shut down. + */ + eError = LinuxBridgeBlockClientsAccess(IMG_TRUE); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to suspend driver (%d)", + __func__, eError)); + return; + } + + (void) PVRSRVSetDeviceSystemPowerState(psDeviceNode, + PVRSRV_SYS_POWER_STATE_OFF, + IMG_FALSE); +} + +/**************************************************************************/ /*! +@Function PVRSRVDeviceSuspend +@Description Common device suspend. +@Input psDeviceNode The device node representing the device that should + be suspended +@Return int 0 on success and a Linux error code otherwise +*/ /***************************************************************************/ +int PVRSRVDeviceSuspend(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + /* + * LinuxBridgeBlockClientsAccess prevents processes from using the driver + * while it's suspended (this is needed for Android). Acquire the bridge + * lock first to ensure the driver isn't currently in use. + */ + +// LinuxBridgeBlockClientsAccess(IMG_FALSE); +// + +#if defined(SUPPORT_AUTOVZ) + /* To allow the driver to power down the GPU under AutoVz, the firmware must + * declared as offline, otherwise all power requests will be ignored. */ + psDeviceNode->bAutoVzFwIsUp = IMG_FALSE; +#endif + + if (PVRSRVSetDeviceSystemPowerState(psDeviceNode, + PVRSRV_SYS_POWER_STATE_OFF, + IMG_TRUE) != PVRSRV_OK) + { +// LinuxBridgeUnblockClientsAccess(); +// + return -EINVAL; + } + + return 0; +} + +/**************************************************************************/ /*! +@Function PVRSRVDeviceResume +@Description Common device resume. +@Input psDeviceNode The device node representing the device that should + be resumed +@Return int 0 on success and a Linux error code otherwise +*/ /***************************************************************************/ +int PVRSRVDeviceResume(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + if (PVRSRVSetDeviceSystemPowerState(psDeviceNode, + PVRSRV_SYS_POWER_STATE_ON, + IMG_TRUE) != PVRSRV_OK) + { + return -EINVAL; + } + +// LinuxBridgeUnblockClientsAccess(); +// + + /* + * Reprocess the device queues in case commands were blocked during + * suspend. + */ + if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE) + { + PVRSRVCheckStatus(NULL); + } + + return 0; +} + +/**************************************************************************/ /*! +@Function PVRSRVDeviceOpen +@Description Common device open. +@Input psDeviceNode The device node representing the device being + opened by a user mode process +@Input psDRMFile The DRM file data that backs the file handle + returned to the user mode process +@Return int 0 on success and a Linux error code otherwise +*/ /***************************************************************************/ +int PVRSRVDeviceOpen(PVRSRV_DEVICE_NODE *psDeviceNode, + struct drm_file *psDRMFile) +{ + static DEFINE_MUTEX(sDeviceInitMutex); + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + ENV_CONNECTION_PRIVATE_DATA sPrivData; + void *pvConnectionData; + PVRSRV_ERROR eError; + int iErr = 0; + + if (!psPVRSRVData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: No device data", __func__)); + iErr = -ENODEV; + goto out; + } + + mutex_lock(&sDeviceInitMutex); + /* + * If the first attempt already set the state to bad, + * there is no point in going the second time, so get out + */ + if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_BAD) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Driver already in bad state. Device open failed.", + __func__)); + iErr = -ENODEV; + mutex_unlock(&sDeviceInitMutex); + goto out; + } + + if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT) + { + eError = PVRSRVCommonDeviceInitialise(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise device (%s)", + __func__, PVRSRVGetErrorString(eError))); + iErr = -ENODEV; + mutex_unlock(&sDeviceInitMutex); + goto out; + } + +#if defined(SUPPORT_RGX) + PVRGpuTraceInitIfEnabled(psDeviceNode); +#endif + } + mutex_unlock(&sDeviceInitMutex); + + sPrivData.psDevNode = psDeviceNode; + sPrivData.psFile = psDRMFile->filp; + + /* + * Here we pass the file pointer which will passed through to our + * OSConnectionPrivateDataInit function where we can save it so + * we can back reference the file structure from its connection + */ + eError = PVRSRVCommonConnectionConnect(&pvConnectionData, (void *) &sPrivData); + if (eError != PVRSRV_OK) + { + iErr = -ENOMEM; + goto out; + } + + psDRMFile->driver_priv = pvConnectionData; + +out: + return iErr; +} + +/**************************************************************************/ /*! +@Function PVRSRVDeviceRelease +@Description Common device release. +@Input psDeviceNode The device node for the device that the given file + represents +@Input psDRMFile The DRM file data that's being released +@Return void +*/ /***************************************************************************/ +void PVRSRVDeviceRelease(PVRSRV_DEVICE_NODE *psDeviceNode, + struct drm_file *psDRMFile) +{ + void *pvConnectionData = psDRMFile->driver_priv; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + psDRMFile->driver_priv = NULL; + if (pvConnectionData) + { + PVRSRVCommonConnectionDisconnect(pvConnectionData); + } +} diff --git a/drivers/gpu/drm/phytium/octopus/module_common.h b/drivers/gpu/drm/phytium/octopus/module_common.h new file mode 100644 index 000000000000..89cb64d09491 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/module_common.h @@ -0,0 +1,67 @@ +/*************************************************************************/ /*! +@File module_common.h +@Title Common linux module setup header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef MODULE_COMMON_H +#define MODULE_COMMON_H + +/* DRVNAME is the name we use to register our driver. */ +#define DRVNAME PVR_LDM_DRIVER_REGISTRATION_NAME + +struct _PVRSRV_DEVICE_NODE_; +struct drm_file; + +int PVRSRVDriverInit(void); +void PVRSRVDriverDeinit(void); + +int PVRSRVDeviceInit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); +void PVRSRVDeviceDeinit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); + +void PVRSRVDeviceShutdown(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); +int PVRSRVDeviceSuspend(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); +int PVRSRVDeviceResume(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); + +int PVRSRVDeviceOpen(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + struct drm_file *psDRMFile); +void PVRSRVDeviceRelease(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + struct drm_file *psDRMFile); + +#endif /* MODULE_COMMON_H */ diff --git a/drivers/gpu/drm/phytium/octopus/multicore_defs.h b/drivers/gpu/drm/phytium/octopus/multicore_defs.h new file mode 100644 index 000000000000..31cc9f2b3f34 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/multicore_defs.h @@ -0,0 +1,53 @@ +/**************************************************************************/ /*! +@File +@Title RGX Multicore Information flags +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGX_MULTICORE_DEFS_H +#define RGX_MULTICORE_DEFS_H + +/* Capability bits returned to client in RGXGetMultiCoreInfo */ +#define RGX_MULTICORE_CAPABILITY_FRAGMENT_EN (0x00000040U) +#define RGX_MULTICORE_CAPABILITY_GEOMETRY_EN (0x00000020U) +#define RGX_MULTICORE_CAPABILITY_COMPUTE_EN (0x00000010U) +#define RGX_MULTICORE_CAPABILITY_PRIMARY_EN (0x00000008U) +#define RGX_MULTICORE_ID_CLRMSK (0xFFFFFFF8U) + +#endif /* RGX_MULTICORE_DEFS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/opaque_types.h b/drivers/gpu/drm/phytium/octopus/opaque_types.h new file mode 100644 index 000000000000..f917492ff469 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/opaque_types.h @@ -0,0 +1,56 @@ +/*************************************************************************/ /*! +@File +@Title Opaque Types +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Defines opaque types for various services types +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef SERVICES_OPAQUE_TYPES_H +#define SERVICES_OPAQUE_TYPES_H + +#include "img_defs.h" +#include "img_types.h" + +typedef struct _PVRSRV_DEVICE_NODE_ *PPVRSRV_DEVICE_NODE; +typedef const struct _PVRSRV_DEVICE_NODE_ *PCPVRSRV_DEVICE_NODE; + +#endif /* SERVICES_OPAQUE_TYPES_H */ + +/****************************************************************************** + End of file (opaque_types.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/os_cpu_cache.h b/drivers/gpu/drm/phytium/octopus/os_cpu_cache.h new file mode 100644 index 000000000000..5587142a01f3 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/os_cpu_cache.h @@ -0,0 +1,69 @@ +/*************************************************************************/ /*! +@File +@Title OS and CPU d-cache maintenance mechanisms +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Defines for cache management which are visible internally only +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef OS_CPU_CACHE_H +#define OS_CPU_CACHE_H + +#include "info_page_defs.h" + +#define PVRSRV_CACHE_OP_TIMELINE 0x8 /*!< Request SW_SYNC timeline notification when executed */ +#define PVRSRV_CACHE_OP_FORCE_SYNCHRONOUS 0x10 /*!< Force all batch members to be executed synchronously */ + +#define CACHEFLUSH_ISA_X86 0x1 /*!< x86/x64 specific UM range-based cache flush */ +#define CACHEFLUSH_ISA_ARM64 0x2 /*!< Aarch64 specific UM range-based cache flush */ +#define CACHEFLUSH_ISA_GENERIC 0x3 /*!< Other ISA's without UM range-based cache flush */ +#ifndef CACHEFLUSH_ISA_TYPE + #if defined(__i386__) || defined(__x86_64__) + #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_X86 + #elif defined(__arm64__) || defined(__aarch64__) + #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_ARM64 + #else + #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_GENERIC + #endif +#endif + +#if (CACHEFLUSH_ISA_TYPE == CACHEFLUSH_ISA_X86) || (CACHEFLUSH_ISA_TYPE == CACHEFLUSH_ISA_ARM64) +#define CACHEFLUSH_ISA_SUPPORTS_UM_FLUSH /*!< x86/x86_64/ARM64 supports user-mode d-cache flush */ +#endif + +#endif /* OS_CPU_CACHE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/os_srvinit_param.h b/drivers/gpu/drm/phytium/octopus/os_srvinit_param.h new file mode 100644 index 000000000000..07bffe9ca524 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/os_srvinit_param.h @@ -0,0 +1,322 @@ +/*************************************************************************/ /*! +@File +@Title Services initialisation parameters header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Services initialisation parameter support for the Linux kernel. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef OS_SRVINIT_PARAM_H +#define OS_SRVINIT_PARAM_H + +#if defined(__linux__) && defined(__KERNEL__) +#include "km_apphint.h" +#include "km_apphint_defs.h" + +#define SrvInitParamOpen() NULL +#define SrvInitParamClose(pvState) ((void)(pvState)) + +#define SrvInitParamGetBOOL(state, name, value) \ + ((void) pvr_apphint_get_bool(APPHINT_ID_ ## name, &value)) + +#define SrvInitParamGetUINT32(state, name, value) \ + ((void) pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value)) + +#define SrvInitParamGetUINT64(state, name, value) \ + ((void) pvr_apphint_get_uint64(APPHINT_ID_ ## name, &value)) + +#define SrvInitParamGetSTRING(state, name, buffer, size) \ + ((void) pvr_apphint_get_string(APPHINT_ID_ ## name, buffer, size)) + +#define SrvInitParamGetUINT32BitField(state, name, value) \ + ((void) pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value)) + +#define SrvInitParamGetUINT32List(state, name, value) \ + ((void) pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value)) + +#else /* defined(__linux__) && defined(__KERNEL__) */ + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "img_defs.h" +#include "img_types.h" + +/*! Lookup item. */ +typedef struct +{ + const IMG_CHAR *pszValue; /*!< looked up name */ + IMG_UINT32 ui32Value; /*!< looked up value */ +} SRV_INIT_PARAM_UINT32_LOOKUP; + +/*************************************************************************/ /*! +@Brief SrvInitParamOpen + +@Description Establish a connection to the Parameter resource store which is + used to hold configuration information associated with the + server instance. + +@Return (void *) Handle to Parameter resource store to be used for + subsequent parameter value queries + +*/ /**************************************************************************/ +void *SrvInitParamOpen(void); + +/*************************************************************************/ /*! +@Brief SrvInitParamClose + +@Description Remove a pre-existing connection to the Parameter resource store + given by 'pvState' and release any temporary storage associated + with the 'pvState' mapping handle + +@Input pvState Handle to Parameter resource store + +*/ /**************************************************************************/ +void SrvInitParamClose(void *pvState); + +/*************************************************************************/ /*! +@Brief _SrvInitParamGetBOOL + +@Description Get the current BOOL value for parameter 'pszName' from the + Parameter resource store attached to 'pvState' + +@Input pvState Handle to Parameter resource store + +@Input pszName Name of parameter to look-up + +@Input pbDefault Value to return if parameter not found + +@Output pbValue Value of parameter 'pszName' or 'pbDefault' + if not found + +*/ /**************************************************************************/ +void _SrvInitParamGetBOOL( + void *pvState, + const IMG_CHAR *pszName, + const IMG_BOOL *pbDefault, + IMG_BOOL *pbValue +); + +/*! Get the BOOL value for parameter 'name' from the parameter resource store + * attached to 'state'. */ +#define SrvInitParamGetBOOL(state, name, value) \ + _SrvInitParamGetBOOL(state, # name, & __SrvInitParam_ ## name, &(value)) + +/*! Initialise FLAG type parameter identified by 'name'. */ +#define SrvInitParamInitFLAG(name, defval, dummy) \ + static const IMG_BOOL __SrvInitParam_ ## name = defval; + +/*! Initialise BOOL type parameter identified by 'name'. */ +#define SrvInitParamInitBOOL(name, defval, dummy) \ + static const IMG_BOOL __SrvInitParam_ ## name = defval; + +/*************************************************************************/ /*! +@Brief _SrvInitParamGetUINT32 + +@Description Get the current IMG_UINT32 value for parameter 'pszName' + from the Parameter resource store attached to 'pvState' + +@Input pvState Handle to Parameter resource store + +@Input pszName Name of parameter to look-up + +@Input pui32Default Value to return if parameter not found + +@Output pui32Value Value of parameter 'pszName' or + 'pui32Default' if not found + +*/ /**************************************************************************/ +void _SrvInitParamGetUINT32( + void *pvState, + const IMG_CHAR *pszName, + const IMG_UINT32 *pui32Default, + IMG_UINT32 *pui32Value +); + +/*! Get the UINT32 value for parameter 'name' from the parameter resource store + * attached to 'state'. */ +#define SrvInitParamGetUINT32(state, name, value) \ + _SrvInitParamGetUINT32(state, # name, & __SrvInitParam_ ## name, &(value)) + +/*! Initialise UINT32 type parameter identified by 'name'. */ +#define SrvInitParamInitUINT32(name, defval, dummy) \ + static const IMG_UINT32 __SrvInitParam_ ## name = defval; + +/*! Initialise UINT64 type parameter identified by 'name'. */ +#define SrvInitParamInitUINT64(name, defval, dummy) \ + static const IMG_UINT64 __SrvInitParam_ ## name = defval; + +/*! @cond Doxygen_Suppress */ +#define SrvInitParamUnreferenced(name) \ + PVR_UNREFERENCED_PARAMETER( __SrvInitParam_ ## name ) +/*! @endcond */ + +/*************************************************************************/ /*! +@Brief _SrvInitParamGetUINT32BitField + +@Description Get the current IMG_UINT32 bitfield value for parameter + 'pszBasename' from the Parameter resource store + attached to 'pvState' + +@Input pvState Handle to Parameter resource store + +@Input pszBaseName Bitfield parameter name to search for + +@Input uiDefault Default return value if parameter not found + +@Input psLookup Bitfield array to traverse + +@Input uiSize number of elements in 'psLookup' + +@Output puiValue Value of bitfield or 'uiDefault' if + parameter not found +*/ /**************************************************************************/ +void _SrvInitParamGetUINT32BitField( + void *pvState, + const IMG_CHAR *pszBaseName, + IMG_UINT32 uiDefault, + const SRV_INIT_PARAM_UINT32_LOOKUP *psLookup, + IMG_UINT32 uiSize, + IMG_UINT32 *puiValue +); + +/*! Initialise UINT32 bitfield type parameter identified by 'name' with + * 'inival' value and 'lookup' look up array. */ +#define SrvInitParamInitUINT32Bitfield(name, inival, lookup) \ + static IMG_UINT32 __SrvInitParam_ ## name = inival; \ + static SRV_INIT_PARAM_UINT32_LOOKUP * \ + __SrvInitParamLookup_ ## name = &lookup[0]; \ + static const IMG_UINT32 __SrvInitParamSize_ ## name = \ + ARRAY_SIZE(lookup); + +/*! Get the UINT32 bitfield value for parameter 'name' from the parameter + * resource store attached to 'state'. */ +#define SrvInitParamGetUINT32BitField(state, name, value) \ + _SrvInitParamGetUINT32BitField(state, # name, __SrvInitParam_ ## name, __SrvInitParamLookup_ ## name, __SrvInitParamSize_ ## name, &(value)) + +/*************************************************************************/ /*! +@Brief _SrvInitParamGetUINT32List + +@Description Get the current IMG_UINT32 list value for the specified + parameter 'pszName' from the Parameter resource store + attached to 'pvState' + +@Input pvState Handle to Parameter resource store + +@Input pszName Parameter list name to search for + +@Input uiDefault Default value to return if 'pszName' is + not set within 'pvState' + +@Input psLookup parameter list to traverse + +@Input uiSize number of elements in 'psLookup' list + +@Output puiValue value of located list element or + 'uiDefault' if parameter not found + +*/ /**************************************************************************/ +void _SrvInitParamGetUINT32List( + void *pvState, + const IMG_CHAR *pszName, + IMG_UINT32 uiDefault, + const SRV_INIT_PARAM_UINT32_LOOKUP *psLookup, + IMG_UINT32 uiSize, + IMG_UINT32 *puiValue +); + +/*! Get the UINT32 list value for parameter 'name' from the parameter + * resource store attached to 'state'. */ +#define SrvInitParamGetUINT32List(state, name, value) \ + _SrvInitParamGetUINT32List(state, # name, __SrvInitParam_ ## name, __SrvInitParamLookup_ ## name, __SrvInitParamSize_ ## name, &(value)) + +/*! Initialise UINT32 list type parameter identified by 'name' with + * 'defval' default value and 'lookup' look up list. */ +#define SrvInitParamInitUINT32List(name, defval, lookup) \ + static IMG_UINT32 __SrvInitParam_ ## name = defval; \ + static SRV_INIT_PARAM_UINT32_LOOKUP * \ + __SrvInitParamLookup_ ## name = &lookup[0]; \ + static const IMG_UINT32 __SrvInitParamSize_ ## name = \ + ARRAY_SIZE(lookup); + +/*************************************************************************/ /*! +@Brief _SrvInitParamGetSTRING + +@Description Get the contents of the specified parameter string 'pszName' + from the Parameter resource store attached to 'pvState' + +@Input pvState Handle to Parameter resource store + +@Input pszName Parameter string name to search for + +@Input psDefault Default string to return if 'pszName' is + not set within 'pvState' + +@Input size Size of output 'pBuffer' + +@Output pBuffer Output copy of 'pszName' contents or + copy of 'psDefault' if 'pszName' is not + set within 'pvState' + +*/ /**************************************************************************/ +void _SrvInitParamGetSTRING( + void *pvState, + const IMG_CHAR *pszName, + const IMG_CHAR *psDefault, + IMG_CHAR *pBuffer, + size_t size +); + +/*! Initialise STRING type parameter identified by 'name' with 'defval' default + * value. */ +#define SrvInitParamInitSTRING(name, defval, dummy) \ + static const IMG_CHAR *__SrvInitParam_ ## name = defval; + +/*! Get the STRING value for parameter 'name' from the parameter resource store + * attached to 'state'. */ +#define SrvInitParamGetSTRING(state, name, buffer, size) \ + _SrvInitParamGetSTRING(state, # name, __SrvInitParam_ ## name, buffer, size) + +#if defined(__cplusplus) +} +#endif + +#endif /* defined(__linux__) && defined(__KERNEL__) */ + +#endif /* OS_SRVINIT_PARAM_H */ diff --git a/drivers/gpu/drm/phytium/octopus/osconnection_server.c b/drivers/gpu/drm/phytium/octopus/osconnection_server.c new file mode 100644 index 000000000000..d12f6968fc42 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/osconnection_server.c @@ -0,0 +1,155 @@ +/*************************************************************************/ /*! +@File +@Title Linux specific per process data functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "connection_server.h" +#include "osconnection_server.h" + +#include "env_connection.h" +#include "allocmem.h" +#include "pvr_debug.h" + +#include + +#if defined(SUPPORT_ION) +#include +#include PVR_ANDROID_ION_HEADER + +/* + The ion device (the base object for all requests) + gets created by the system and we acquire it via + Linux specific functions provided by the system layer +*/ +#include "ion_sys.h" +#endif + +PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData) +{ + ENV_CONNECTION_PRIVATE_DATA *psPrivData = pvOSData; + ENV_CONNECTION_DATA *psEnvConnection; +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + ENV_ION_CONNECTION_DATA *psIonConnection; +#endif + + *phOsPrivateData = OSAllocZMem(sizeof(ENV_CONNECTION_DATA)); + + if (*phOsPrivateData == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psEnvConnection = (ENV_CONNECTION_DATA *)*phOsPrivateData; + + psEnvConnection->owner = current->tgid; + + /* Save the pointer to our struct file */ + psEnvConnection->psFile = psPrivData->psFile; + psEnvConnection->psDevNode = psPrivData->psDevNode; + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + psIonConnection = (ENV_ION_CONNECTION_DATA *)OSAllocZMem(sizeof(ENV_ION_CONNECTION_DATA)); + if (psIonConnection == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psEnvConnection->psIonData = psIonConnection; + /* + We can have more than one connection per process, so we need + more than the PID to have a unique name. + */ + psEnvConnection->psIonData->psIonDev = IonDevAcquire(); + OSSNPrintf(psEnvConnection->psIonData->azIonClientName, ION_CLIENT_NAME_SIZE, "pvr_ion_client-%p-%d", *phOsPrivateData, OSGetCurrentClientProcessIDKM()); + psEnvConnection->psIonData->psIonClient = + ion_client_create(psEnvConnection->psIonData->psIonDev, + psEnvConnection->psIonData->azIonClientName); + + if (IS_ERR_OR_NULL(psEnvConnection->psIonData->psIonClient)) + { + PVR_DPF((PVR_DBG_ERROR, "OSConnectionPrivateDataInit: Couldn't create " + "ion client for per connection data")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } +#endif /* SUPPORT_ION && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */ + return PVRSRV_OK; +} + +PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData) +{ + ENV_CONNECTION_DATA *psEnvConnection; + + if (hOsPrivateData == NULL) + { + return PVRSRV_OK; + } + + psEnvConnection = hOsPrivateData; + +#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + PVR_ASSERT(psEnvConnection->psIonData != NULL); + + PVR_ASSERT(psEnvConnection->psIonData->psIonClient != NULL); + ion_client_destroy(psEnvConnection->psIonData->psIonClient); + + IonDevRelease(psEnvConnection->psIonData->psIonDev); + OSFreeMem(psEnvConnection->psIonData); +#endif + + OSFreeMem(hOsPrivateData); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + + +PVRSRV_DEVICE_NODE *OSGetDevNode(CONNECTION_DATA *psConnection) +{ + ENV_CONNECTION_DATA *psEnvConnection; + + psEnvConnection = PVRSRVConnectionPrivateData(psConnection); + PVR_ASSERT(psEnvConnection); + + return psEnvConnection->psDevNode; +} diff --git a/drivers/gpu/drm/phytium/octopus/osconnection_server.h b/drivers/gpu/drm/phytium/octopus/osconnection_server.h new file mode 100644 index 000000000000..cda8406783f1 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/osconnection_server.h @@ -0,0 +1,121 @@ +/**************************************************************************/ /*! +@File +@Title Server side connection management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description API for OS specific callbacks from server side connection + management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ +#ifndef OSCONNECTION_SERVER_H +#define OSCONNECTION_SERVER_H + +#include "handle.h" +#include "osfunc.h" + + +#if defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) +PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData); +PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData); + +PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase); + +PVRSRV_DEVICE_NODE* OSGetDevNode(CONNECTION_DATA *psConnection); + +#else /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSConnectionPrivateDataInit) +#endif +/*************************************************************************/ /*! +@Function OSConnectionPrivateDataInit +@Description Allocates and initialises any OS-specific private data + relating to a connection. + Called from PVRSRVCommonConnectionConnect(). +@Input pvOSData pointer to any OS private data +@Output phOsPrivateData handle to the created connection + private data +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +static INLINE PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData) +{ + PVR_UNREFERENCED_PARAMETER(phOsPrivateData); + PVR_UNREFERENCED_PARAMETER(pvOSData); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSConnectionPrivateDataDeInit) +#endif +/*************************************************************************/ /*! +@Function OSConnectionPrivateDataDeInit +@Description Frees previously allocated OS-specific private data + relating to a connection. +@Input hOsPrivateData handle to the connection private data + to be freed +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +static INLINE PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData) +{ + PVR_UNREFERENCED_PARAMETER(hOsPrivateData); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSConnectionSetHandleOptions) +#endif +static INLINE PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase) +{ + PVR_UNREFERENCED_PARAMETER(psHandleBase); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSGetDevNode) +#endif +static INLINE PVRSRV_DEVICE_NODE* OSGetDevNode(CONNECTION_DATA *psConnection) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + + return NULL; +} +#endif /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ + + +#endif /* OSCONNECTION_SERVER_H */ diff --git a/drivers/gpu/drm/phytium/octopus/osdi_impl.h b/drivers/gpu/drm/phytium/octopus/osdi_impl.h new file mode 100644 index 000000000000..4ee905727624 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/osdi_impl.h @@ -0,0 +1,188 @@ +/*************************************************************************/ /*! +@File +@Title Functions and types for creating Debug Info implementations. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef OSDI_IMPL_H +#define OSDI_IMPL_H + +#include + +#include "di_common.h" +#include "pvrsrv_error.h" + +/*! Implementation callbacks. Those operations are performed on native + * implementation handles. */ +typedef struct OSDI_IMPL_ENTRY_CB +{ + /*! @Function pfnVPrintf + * + * @Description + * Implementation of the 'vprintf' operation. + * + * @Input pvNativeHandle native implementation handle + * @Input pszFmt NUL-terminated format string + * @Input va_list variable length argument list + */ + void (*pfnVPrintf)(void *pvNativeHandle, const IMG_CHAR *pszFmt, va_list pArgs); + + /*! @Function pfnPuts + * + * @Description + * Implementation of the 'puts' operation. + * + * @Input pvNativeHandle native implementation handle + * @Input pszStr NUL-terminated string + */ + void (*pfnPuts)(void *pvNativeHandle, const IMG_CHAR *pszStr); + + /*! @Function pfnHasOverflowed + * + * @Description + * Checks if the native implementation's buffer has overflowed. + * + * @Input pvNativeHandle native implementation handle + */ + IMG_BOOL (*pfnHasOverflowed)(void *pvNativeHandle); +} OSDI_IMPL_ENTRY_CB; + +/*! Debug Info entry specialisation. */ +typedef struct OSDI_IMPL_ENTRY +{ + /*! Pointer to the private data. The data originates from DICreateEntry() + * function. */ + void *pvPrivData; + /*! Pointer to the implementation native handle. */ + void *pvNative; + /*! Implementation entry callbacks. */ + OSDI_IMPL_ENTRY_CB *psCb; +} OSDI_IMPL_ENTRY; + +/*! Debug Info implementation callbacks. */ +typedef struct OSDI_IMPL_CB +{ + /*! Initialise implementation callback. + */ + PVRSRV_ERROR (*pfnInit)(void); + + /*! De-initialise implementation callback. + */ + void (*pfnDeInit)(void); + + /*! @Function pfnCreateEntry + * + * @Description + * Creates entry of eType type with pszName in the pvNativeGroup parent + * group. The entry is an abstract term which depends on the implementation, + * e.g.: a file in DebugFS. + * + * @Input pszName: name of the entry + * @Input eType: type of the entry + * @Input psIterCb: iterator implementation for the entry + * @Input pvPrivData: data that will be passed to the iterator callbacks + * in OSDI_IMPL_ENTRY - it can be retrieved by calling + * DIGetPrivData() function + * @Input pvNativeGroup: implementation specific handle to the parent group + * + * @Output pvNativeEntry: implementation specific handle to the entry + * + * return PVRSRV_ERROR error code + */ + PVRSRV_ERROR (*pfnCreateEntry)(const IMG_CHAR *pszName, + DI_ENTRY_TYPE eType, + const DI_ITERATOR_CB *psIterCb, + void *pvPrivData, + void *pvNativeGroup, + void **pvNativeEntry); + + /*! @Function pfnDestroyEntry + * + * @Description + * Destroys native entry. + * + * @Input psNativeEntry: handle to the entry + */ + void (*pfnDestroyEntry)(void *psNativeEntry); + + /*! @Function pfnCreateGroup + * + * @Description + * Creates group with pszName in the psNativeParentGroup parent group. + * The group is an abstract term which depends on the implementation, + * e.g.: a directory in DebugFS. + * + * @Input pszName: name of the entry + * @Input psNativeParentGroup: implementation specific handle to the parent + * group + * + * @Output psNativeGroup: implementation specific handle to the group + * + * return PVRSRV_ERROR error code + */ + PVRSRV_ERROR (*pfnCreateGroup)(const IMG_CHAR *pszName, + void *psNativeParentGroup, + void **psNativeGroup); + + /*! @Function pfnDestroyGroup + * + * @Description + * Destroys native group. + * + * @Input psNativeGroup: handle to the group + */ + void (*pfnDestroyGroup)(void *psNativeGroup); +} OSDI_IMPL_CB; + +/*! @Function DIRegisterImplementation + * + * @Description + * Registers Debug Info implementations with the framework. The framework takes + * the ownership of the implementation and will clean up the resources when + * it's de-initialised. + * + * @Input pszName: name of the implementation + * @Input psImplCb: implementation callbacks + * + * @Return PVRSRV_ERROR error code + */ +PVRSRV_ERROR DIRegisterImplementation(const IMG_CHAR *pszName, + const OSDI_IMPL_CB *psImplCb); + +#endif /* OSDI_IMPL_H */ diff --git a/drivers/gpu/drm/phytium/octopus/osfunc.c b/drivers/gpu/drm/phytium/octopus/osfunc.c new file mode 100644 index 000000000000..febbe1e80cd7 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/osfunc.c @@ -0,0 +1,2492 @@ +/*************************************************************************/ /*! +@File +@Title Environment related functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) +#include +#include +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) +#include +#include +#else +#include +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */ + +#include "log2.h" +#include "osfunc.h" +#include "cache_km.h" +#include "img_defs.h" +#include "img_types.h" +#include "allocmem.h" +#include "devicemem_server_utils.h" +#include "event.h" +#include "linkage.h" +#include "pvr_uaccess.h" +#include "pvr_debug.h" +#include "pvr_bridge_k.h" +#include "pvrsrv_memallocflags.h" +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif +#include "physmem_osmem_linux.h" +#include "dma_support.h" +#include "kernel_compatibility.h" + +#include "dma_km.h" +#include "pvrsrv_sync_server.h" + + +#if defined(VIRTUAL_PLATFORM) +#define EVENT_OBJECT_TIMEOUT_US (120000000ULL) +#else +#if defined(EMULATOR) || defined(TC_APOLLO_TCF5) +#define EVENT_OBJECT_TIMEOUT_US (2000000ULL) +#else +#define EVENT_OBJECT_TIMEOUT_US (100000ULL) +#endif /* EMULATOR */ +#endif + + +typedef struct { + struct task_struct *kthread; + PFN_THREAD pfnThread; + void *hData; + IMG_CHAR *pszThreadName; + IMG_BOOL bIsThreadRunning; + IMG_BOOL bIsSupportingThread; + PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB; + DLLIST_NODE sNode; +} OSThreadData; + +void OSSuspendTaskInterruptible(void) +{ + set_current_state(TASK_INTERRUPTIBLE); + schedule(); +} + +static DLLIST_NODE gsThreadListHead; + +static void _ThreadListAddEntry(OSThreadData *psThreadListNode) +{ + dllist_add_to_tail(&gsThreadListHead, &(psThreadListNode->sNode)); +} + +static void _ThreadListRemoveEntry(OSThreadData *psThreadListNode) +{ + dllist_remove_node(&(psThreadListNode->sNode)); +} + +static void _ThreadSetStopped(OSThreadData *psOSThreadData) +{ + psOSThreadData->bIsThreadRunning = IMG_FALSE; +} + +static void _OSInitThreadList(void) +{ + dllist_init(&gsThreadListHead); +} + +void OSThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PDLLIST_NODE psNodeCurr, psNodeNext; + + dllist_foreach_node(&gsThreadListHead, psNodeCurr, psNodeNext) + { + OSThreadData *psThreadListNode; + psThreadListNode = IMG_CONTAINER_OF(psNodeCurr, OSThreadData, sNode); + + PVR_DUMPDEBUG_LOG(" %s : %s", + psThreadListNode->pszThreadName, + (psThreadListNode->bIsThreadRunning) ? "Running" : "Stopped"); + + if (psThreadListNode->pfnDebugDumpCB) + { + psThreadListNode->pfnDebugDumpCB(pfnDumpDebugPrintf, pvDumpDebugFile); + } + } +} + +PVRSRV_ERROR OSPhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize, + PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, + IMG_PID uiPid) +{ + struct device *psDev = psDevNode->psDevConfig->pvOSDevice; + IMG_CPU_PHYADDR sCpuPAddr; + struct page *psPage; + IMG_UINT32 ui32Order=0; + gfp_t gfp_flags; + + PVR_ASSERT(uiSize != 0); + /*Align the size to the page granularity */ + uiSize = PAGE_ALIGN(uiSize); + + /*Get the order to be used with the allocation */ + ui32Order = get_order(uiSize); + + gfp_flags = GFP_KERNEL; + +#if !defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) + if (psDev) + { + if (*psDev->dma_mask == DMA_BIT_MASK(32)) + { + /* Limit to 32 bit. + * Achieved by setting __GFP_DMA32 for 64 bit systems */ + gfp_flags |= __GFP_DMA32; + } + else if (*psDev->dma_mask < DMA_BIT_MASK(32)) + { + /* Limit to whatever the size of DMA zone is. */ + gfp_flags |= __GFP_DMA; + } + } +#else + PVR_UNREFERENCED_PARAMETER(psDev); +#endif + + /*allocate the pages */ + psPage = alloc_pages(gfp_flags, ui32Order); + if (psPage == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + uiSize = (1 << ui32Order) * PAGE_SIZE; + + psMemHandle->u.pvHandle = psPage; + psMemHandle->uiOrder = ui32Order; + sCpuPAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(page_to_phys(psPage)); + + /* + * Even when more pages are allocated as base MMU object we still need one single physical address because + * they are physically contiguous. + */ + PhysHeapCpuPAddrToDevPAddr(psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL], 1, psDevPAddr, &sCpuPAddr); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, + uiSize, + (IMG_UINT64)(uintptr_t) psPage, + uiPid); +#else + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, + psPage, + sCpuPAddr, + uiSize, + NULL, + uiPid + DEBUG_MEMSTATS_VALUES); +#endif +#else + PVR_UNREFERENCED_PARAMETER(uiPid); +#endif + + return PVRSRV_OK; +} + +void OSPhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle) +{ + struct page *psPage = (struct page*) psMemHandle->u.pvHandle; + IMG_UINT32 uiSize, uiPageCount=0, ui32Order; + + ui32Order = psMemHandle->uiOrder; + uiPageCount = (1 << ui32Order); + uiSize = (uiPageCount * PAGE_SIZE); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, + (IMG_UINT64)(uintptr_t) psPage); +#else + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, + (IMG_UINT64)(uintptr_t) psPage, + OSGetCurrentClientProcessIDKM()); +#endif +#endif + + __free_pages(psPage, ui32Order); + psMemHandle->uiOrder = 0; +} + +PVRSRV_ERROR OSPhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, + size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, + void **pvPtr) +{ + size_t actualSize = 1 << (PAGE_SHIFT + psMemHandle->uiOrder); + *pvPtr = kmap((struct page*)psMemHandle->u.pvHandle); + + PVR_UNREFERENCED_PARAMETER(psDevPAddr); + + PVR_UNREFERENCED_PARAMETER(actualSize); /* If we don't take an #ifdef path */ + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(psDevNode); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, actualSize, OSGetCurrentClientProcessIDKM()); +#else + { + IMG_CPU_PHYADDR sCpuPAddr; + sCpuPAddr.uiAddr = 0; + + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, + *pvPtr, + sCpuPAddr, + actualSize, + NULL, + OSGetCurrentClientProcessIDKM() + DEBUG_MEMSTATS_VALUES); + } +#endif +#endif + + return PVRSRV_OK; +} + +void OSPhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, void *pvPtr) +{ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + /* Mapping is done a page at a time */ + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, + (1 << (PAGE_SHIFT + psMemHandle->uiOrder)), + OSGetCurrentClientProcessIDKM()); +#else + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, + (IMG_UINT64)(uintptr_t)pvPtr, + OSGetCurrentClientProcessIDKM()); +#endif +#endif + + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(pvPtr); + + kunmap((struct page*) psMemHandle->u.pvHandle); +} + +PVRSRV_ERROR OSPhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode, + PG_HANDLE *psMemHandle, + IMG_UINT32 uiOffset, + IMG_UINT32 uiLength) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + struct page* psPage = (struct page*) psMemHandle->u.pvHandle; + + void* pvVirtAddrStart = kmap(psPage) + uiOffset; + IMG_CPU_PHYADDR sPhysStart, sPhysEnd; + + IMG_UINT32 ui32Order; + + if (uiLength == 0) + { + goto e0; + } + + ui32Order = psMemHandle->uiOrder; + if ((uiOffset + uiLength) > ((1 << ui32Order) * PAGE_SIZE)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid size params, uiOffset %u, uiLength %u", + __func__, + uiOffset, + uiLength)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e0; + } + + sPhysStart.uiAddr = page_to_phys(psPage) + uiOffset; + sPhysEnd.uiAddr = sPhysStart.uiAddr + uiLength; + + CacheOpExec(psDevNode, + pvVirtAddrStart, + pvVirtAddrStart + uiLength, + sPhysStart, + sPhysEnd, + PVRSRV_CACHE_OP_CLEAN); + +e0: + kunmap(psPage); + + return eError; +} + +#if defined(__GNUC__) +#define PVRSRV_MEM_ALIGN __attribute__ ((aligned (0x8))) +#define PVRSRV_MEM_ALIGN_MASK (0x7) +#else +#error "PVRSRV Alignment macros need to be defined for this compiler" +#endif + +IMG_UINT32 OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE eCacheAttribute) +{ + IMG_UINT32 uiSize = 0; + + switch (eCacheAttribute) + { + case OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE: + uiSize = cache_line_size(); + break; + + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache attribute type %d", + __func__, (IMG_UINT32)eCacheAttribute)); + PVR_ASSERT(0); + break; + } + + return uiSize; +} + +IMG_UINT32 OSVSScanf(const IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...) +{ + va_list argList; + IMG_INT32 iCount = 0; + + va_start(argList, pszFormat); + iCount = vsscanf(pStr, pszFormat, argList); + va_end(argList); + + return iCount; +} + +IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen) +{ + return (IMG_INT)memcmp(pvBufA, pvBufB, uiLen); +} + +size_t OSStringLCat(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDstSize) +{ + /* + * Let strlcat handle any truncation cases correctly. + * We will definitely get a NUL-terminated string set in pszDest + */ + size_t uSrcSize = strlcat(pszDest, pszSrc, uDstSize); + +#if defined(PVR_DEBUG_STRLCPY) && defined(DEBUG) + /* Handle truncation by dumping calling stack if debug allows */ + if (uSrcSize >= uDstSize) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: String truncated Src = '<%s>' %ld bytes, Dest = '%s'", + __func__, pszSrc, (long)uDstSize, pszDest)); + OSDumpStack(); + } +#endif /* defined(PVR_DEBUG_STRLCPY) && defined(DEBUG) */ + + return uSrcSize; +} + +IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...) +{ + va_list argList; + IMG_INT32 iCount; + + va_start(argList, pszFormat); + iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList); + va_end(argList); + + return iCount; +} + +IMG_INT32 OSVSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR* pszFormat, va_list vaArgs) +{ + return vsnprintf(pStr, ui32Size, pszFormat, vaArgs); +} + +size_t OSStringLength(const IMG_CHAR *pStr) +{ + return strlen(pStr); +} + +size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount) +{ + return strnlen(pStr, uiCount); +} + +IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2, + size_t uiSize) +{ + return strncmp(pStr1, pStr2, uiSize); +} + +PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base, + IMG_UINT32 *ui32Result) +{ + if (kstrtou32(pStr, ui32Base, ui32Result) != 0) + return PVRSRV_ERROR_CONVERSION_FAILED; + + return PVRSRV_OK; +} + +IMG_UINT32 OSStringUINT32ToStr(IMG_CHAR *pszBuf, size_t uSize, + IMG_UINT32 ui32Num) +{ + IMG_UINT32 ui32i, ui32Len = 0, ui32NumCopy = ui32Num; + + /* calculate string length required to hold the number string */ + do + { + ui32Len++; + ui32NumCopy /= 10; + } while (ui32NumCopy != 0); + + if (unlikely(ui32Len >= uSize)) + { + /* insufficient buffer */ + return 0; + } + + for (ui32i = 0; ui32i < ui32Len; ui32i++) + { + pszBuf[ui32Len - (ui32i + 1)] = '0' + ui32Num % 10; + ui32Num = ui32Num / 10; + } + + pszBuf[ui32Len] = '\0'; + return ui32Len; +} + +PVRSRV_ERROR OSInitEnvData(void) +{ + + LinuxInitPhysmem(); + + _OSInitThreadList(); + + return PVRSRV_OK; +} + + +void OSDeInitEnvData(void) +{ + + LinuxDeinitPhysmem(); +} + + +void OSReleaseThreadQuanta(void) +{ + schedule(); +} + +/* Not matching/aligning this API to the Clockus() API above to avoid necessary + * multiplication/division operations in calling code. + */ +static inline IMG_UINT64 Clockns64(void) +{ + IMG_UINT64 timenow; + + /* Kernel thread preempt protection. Some architecture implementations + * (ARM) of sched_clock are not preempt safe when the kernel is configured + * as such e.g. CONFIG_PREEMPT and others. + */ + preempt_disable(); + + /* Using sched_clock instead of ktime_get since we need a time stamp that + * correlates with that shown in kernel logs and trace data not one that + * is a bit behind. */ + timenow = sched_clock(); + + preempt_enable(); + + return timenow; +} + +IMG_UINT64 OSClockns64(void) +{ + return Clockns64(); +} + +IMG_UINT64 OSClockus64(void) +{ + IMG_UINT64 timenow = Clockns64(); + IMG_UINT32 remainder; + + return OSDivide64r64(timenow, 1000, &remainder); +} + +IMG_UINT32 OSClockus(void) +{ + return (IMG_UINT32) OSClockus64(); +} + +IMG_UINT32 OSClockms(void) +{ + IMG_UINT64 timenow = Clockns64(); + IMG_UINT32 remainder; + + return OSDivide64(timenow, 1000000, &remainder); +} + +static inline IMG_UINT64 KClockns64(void) +{ + ktime_t sTime = ktime_get(); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + return sTime; +#else + return sTime.tv64; +#endif +} + +PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time) +{ + *pui64Time = KClockns64(); + return PVRSRV_OK; +} + +PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time) +{ + IMG_UINT64 timenow = KClockns64(); + IMG_UINT32 remainder; + + *pui64Time = OSDivide64r64(timenow, 1000, &remainder); + return PVRSRV_OK; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) +IMG_UINT64 OSClockMonotonicRawns64(void) +{ + struct timespec64 ts; + + ktime_get_raw_ts64(&ts); + return ts.tv_sec * 1000000000 + ts.tv_nsec; +} +#else +IMG_UINT64 OSClockMonotonicRawns64(void) +{ + struct timespec ts; + + getrawmonotonic(&ts); + return (IMG_UINT64) ts.tv_sec * 1000000000 + ts.tv_nsec; +} +#endif + +IMG_UINT64 OSClockMonotonicRawus64(void) +{ + IMG_UINT32 rem; + return OSDivide64r64(OSClockMonotonicRawns64(), 1000, &rem); +} + +/* + OSWaitus +*/ +void OSWaitus(IMG_UINT32 ui32Timeus) +{ + udelay(ui32Timeus); +} + + +/* + OSSleepms +*/ +void OSSleepms(IMG_UINT32 ui32Timems) +{ + msleep(ui32Timems); +} + + +INLINE IMG_UINT64 OSGetCurrentProcessVASpaceSize(void) +{ + return (IMG_UINT64)TASK_SIZE; +} + +INLINE IMG_PID OSGetCurrentProcessID(void) +{ + if (in_interrupt()) + { + return KERNEL_ID; + } + + return (IMG_PID)task_tgid_nr(current); +} + +INLINE IMG_PID OSGetCurrentVirtualProcessID(void) +{ + if (in_interrupt()) + { + return KERNEL_ID; + } + + return (IMG_PID)task_tgid_vnr(current); +} + +INLINE IMG_CHAR *OSGetCurrentProcessName(void) +{ + return current->comm; +} + +INLINE uintptr_t OSGetCurrentThreadID(void) +{ + if (in_interrupt()) + { + return KERNEL_ID; + } + + return current->pid; +} + +IMG_PID OSGetCurrentClientProcessIDKM(void) +{ + return OSGetCurrentProcessID(); +} + +IMG_CHAR *OSGetCurrentClientProcessNameKM(void) +{ + return OSGetCurrentProcessName(); +} + +uintptr_t OSGetCurrentClientThreadIDKM(void) +{ + return OSGetCurrentThreadID(); +} + +size_t OSGetPageSize(void) +{ + return PAGE_SIZE; +} + +size_t OSGetPageShift(void) +{ + return PAGE_SHIFT; +} + +size_t OSGetPageMask(void) +{ + return (OSGetPageSize()-1); +} + +size_t OSGetOrder(size_t uSize) +{ + return get_order(PAGE_ALIGN(uSize)); +} + +IMG_UINT64 OSGetRAMSize(void) +{ + struct sysinfo SI; + si_meminfo(&SI); + + return (PAGE_SIZE * SI.totalram); +} + +typedef struct +{ + int os_error; + PVRSRV_ERROR pvr_error; +} error_map_t; + +/* return -ve versions of POSIX errors as they are used in this form */ +static const error_map_t asErrorMap[] = +{ + {-EFAULT, PVRSRV_ERROR_BRIDGE_EFAULT}, + {-EINVAL, PVRSRV_ERROR_BRIDGE_EINVAL}, + {-ENOMEM, PVRSRV_ERROR_BRIDGE_ENOMEM}, + {-ERANGE, PVRSRV_ERROR_BRIDGE_ERANGE}, + {-EPERM, PVRSRV_ERROR_BRIDGE_EPERM}, + {-ENOTTY, PVRSRV_ERROR_BRIDGE_ENOTTY}, + {-ENOTTY, PVRSRV_ERROR_BRIDGE_CALL_FAILED}, + {-ERANGE, PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL}, + {-ENOMEM, PVRSRV_ERROR_OUT_OF_MEMORY}, + {-EINVAL, PVRSRV_ERROR_INVALID_PARAMS}, + + {0, PVRSRV_OK} +}; + +int PVRSRVToNativeError(PVRSRV_ERROR e) +{ + int os_error = -EFAULT; + int i; + + for (i = 0; i < ARRAY_SIZE(asErrorMap); i++) + { + if (e == asErrorMap[i].pvr_error) + { + os_error = asErrorMap[i].os_error; + break; + } + } + return os_error; +} + +typedef struct _MISR_DATA_ { + struct workqueue_struct *psWorkQueue; + struct work_struct sMISRWork; + const IMG_CHAR* pszName; + PFN_MISR pfnMISR; + void *hData; +} MISR_DATA; + +/* + MISRWrapper +*/ +static void MISRWrapper(struct work_struct *data) +{ + MISR_DATA *psMISRData = container_of(data, MISR_DATA, sMISRWork); + + PVR_DPF((PVR_DBG_MESSAGE, "Waking up '%s' MISR %p", psMISRData->pszName, psMISRData)); + + psMISRData->pfnMISR(psMISRData->hData); +} + +/* + OSInstallMISR +*/ +PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, PFN_MISR pfnMISR, + void *hData, const IMG_CHAR *pszMisrName) +{ + MISR_DATA *psMISRData; + + psMISRData = OSAllocMem(sizeof(*psMISRData)); + PVR_LOG_RETURN_IF_NOMEM(psMISRData, "psMISRData"); + + psMISRData->hData = hData; + psMISRData->pfnMISR = pfnMISR; + psMISRData->pszName = pszMisrName; + + PVR_DPF((PVR_DBG_MESSAGE, "Installing MISR with cookie %p", psMISRData)); + + psMISRData->psWorkQueue = create_singlethread_workqueue("pvr_misr"); + + if (psMISRData->psWorkQueue == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: create_singlethreaded_workqueue failed")); + OSFreeMem(psMISRData); + return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD; + } + + INIT_WORK(&psMISRData->sMISRWork, MISRWrapper); + + *hMISRData = (IMG_HANDLE) psMISRData; + + return PVRSRV_OK; +} + +/* + OSUninstallMISR +*/ +PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData) +{ + MISR_DATA *psMISRData = (MISR_DATA *) hMISRData; + + PVR_DPF((PVR_DBG_MESSAGE, "Uninstalling MISR with cookie %p", psMISRData)); + + destroy_workqueue(psMISRData->psWorkQueue); + OSFreeMem(psMISRData); + + return PVRSRV_OK; +} + +/* + OSScheduleMISR +*/ +PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData) +{ + MISR_DATA *psMISRData = (MISR_DATA *) hMISRData; + + /* + Note: + + In the case of NO_HARDWARE we want the driver to be synchronous so + that we don't have to worry about waiting for previous operations + to complete + */ +#if defined(NO_HARDWARE) + psMISRData->pfnMISR(psMISRData->hData); + return PVRSRV_OK; +#else + { + bool rc = queue_work(psMISRData->psWorkQueue, &psMISRData->sMISRWork); + return (rc ? PVRSRV_OK : PVRSRV_ERROR_ALREADY_EXISTS); + } +#endif +} + +/* OS specific values for thread priority */ +static const IMG_INT32 ai32OSPriorityValues[OS_THREAD_LAST_PRIORITY] = +{ + 0, /* OS_THREAD_NOSET_PRIORITY */ + -20, /* OS_THREAD_HIGHEST_PRIORITY */ + -10, /* OS_THREAD_HIGH_PRIORITY */ + 0, /* OS_THREAD_NORMAL_PRIORITY */ + 9, /* OS_THREAD_LOW_PRIORITY */ + 19, /* OS_THREAD_LOWEST_PRIORITY */ +}; + +static int OSThreadRun(void *data) +{ + OSThreadData *psOSThreadData = data; + + /* count freezable threads */ + LinuxBridgeNumActiveKernelThreadsIncrement(); + + /* Returns true if the thread was frozen, should we do anything with this + * information? What do we return? Which one is the error case? */ + set_freezable(); + + PVR_DPF((PVR_DBG_MESSAGE, "Starting Thread '%s'...", psOSThreadData->pszThreadName)); + + /* Call the client's kernel thread with the client's data pointer */ + psOSThreadData->pfnThread(psOSThreadData->hData); + + if (psOSThreadData->bIsSupportingThread) + { + _ThreadSetStopped(psOSThreadData); + } + + /* Wait for OSThreadDestroy() to call kthread_stop() */ + while (!kthread_freezable_should_stop(NULL)) + { + schedule(); + } + + LinuxBridgeNumActiveKernelThreadsDecrement(); + + return 0; +} + +PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread, + IMG_CHAR *pszThreadName, + PFN_THREAD pfnThread, + PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB, + IMG_BOOL bIsSupportingThread, + void *hData) +{ + return OSThreadCreatePriority(phThread, pszThreadName, pfnThread, + pfnDebugDumpCB, bIsSupportingThread, hData, + OS_THREAD_NOSET_PRIORITY); +} + +PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread, + IMG_CHAR *pszThreadName, + PFN_THREAD pfnThread, + PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB, + IMG_BOOL bIsSupportingThread, + void *hData, + OS_THREAD_LEVEL eThreadPriority) +{ + OSThreadData *psOSThreadData; + PVRSRV_ERROR eError; + + psOSThreadData = OSAllocZMem(sizeof(*psOSThreadData)); + PVR_LOG_GOTO_IF_NOMEM(psOSThreadData, eError, fail_alloc); + + psOSThreadData->pfnThread = pfnThread; + psOSThreadData->hData = hData; + psOSThreadData->kthread = kthread_run(OSThreadRun, psOSThreadData, "%s", pszThreadName); + + if (IS_ERR(psOSThreadData->kthread)) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_kthread; + } + + if (bIsSupportingThread) + { + psOSThreadData->pszThreadName = pszThreadName; + psOSThreadData->pfnDebugDumpCB = pfnDebugDumpCB; + psOSThreadData->bIsThreadRunning = IMG_TRUE; + psOSThreadData->bIsSupportingThread = IMG_TRUE; + + _ThreadListAddEntry(psOSThreadData); + } + + if (eThreadPriority != OS_THREAD_NOSET_PRIORITY && + eThreadPriority < OS_THREAD_LAST_PRIORITY) + { + set_user_nice(psOSThreadData->kthread, + ai32OSPriorityValues[eThreadPriority]); + } + + *phThread = psOSThreadData; + + return PVRSRV_OK; + +fail_kthread: + OSFreeMem(psOSThreadData); +fail_alloc: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread) +{ + OSThreadData *psOSThreadData = hThread; + int ret; + + /* Let the thread know we are ready for it to end and wait for it. */ + ret = kthread_stop(psOSThreadData->kthread); + if (0 != ret) + { + PVR_DPF((PVR_DBG_WARNING, "kthread_stop failed(%d)", ret)); + return PVRSRV_ERROR_RETRY; + } + + if (psOSThreadData->bIsSupportingThread) + { + _ThreadListRemoveEntry(psOSThreadData); + } + + OSFreeMem(psOSThreadData); + + return PVRSRV_OK; +} + +void OSPanic(void) +{ + BUG(); + +#if defined(__KLOCWORK__) + /* Klocwork does not understand that BUG is terminal... */ + abort(); +#endif +} + +void * +OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, + size_t ui32Bytes, + PVRSRV_MEMALLOCFLAGS_T uiMappingFlags) +{ + void __iomem *pvLinAddr; + + if (uiMappingFlags & ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)) + { + PVR_ASSERT(!"Found non-cpu cache mode flag when mapping to the cpu"); + return NULL; + } + + if (! PVRSRV_VZ_MODE_IS(NATIVE)) + { + /* + This is required to support DMA physheaps for GPU virtualization. + Unfortunately, if a region of kernel managed memory is turned into + a DMA buffer, conflicting mappings can come about easily on Linux + as the original memory is mapped by the kernel as normal cached + memory whilst DMA buffers are mapped mostly as uncached device or + cache-coherent device memory. In both cases the system will have + two conflicting mappings for the same memory region and will have + "undefined behaviour" for most processors notably ARMv6 onwards + and some x86 micro-architectures. As a result, perform ioremapping + manually for DMA physheap allocations by translating from CPU/VA + to BUS/PA thereby preventing the creation of conflicting mappings. + */ + pvLinAddr = (void __iomem *) SysDmaDevPAddrToCpuVAddr(BasePAddr.uiAddr, ui32Bytes); + if (pvLinAddr != NULL) + { + return (void __force *) pvLinAddr; + } + } + + switch (uiMappingFlags) + { + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: + pvLinAddr = (void __iomem *)ioremap(BasePAddr.uiAddr, ui32Bytes); + break; + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: +#if defined(CONFIG_X86) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) + pvLinAddr = (void __iomem *)ioremap_wc(BasePAddr.uiAddr, ui32Bytes); +#else + pvLinAddr = (void __iomem *)ioremap(BasePAddr.uiAddr, ui32Bytes); +#endif + break; + case PVRSRV_MEMALLOCFLAG_CPU_CACHED: +#if defined(CONFIG_X86) || defined(CONFIG_ARM) + pvLinAddr = (void __iomem *)ioremap_cache(BasePAddr.uiAddr, ui32Bytes); +#else + pvLinAddr = (void __iomem *)ioremap(BasePAddr.uiAddr, ui32Bytes); +#endif + break; + case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT: + case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT: + PVR_ASSERT(!"Unexpected cpu cache mode"); + pvLinAddr = NULL; + break; + default: + PVR_ASSERT(!"Unsupported cpu cache mode"); + pvLinAddr = NULL; + break; + } + + return (void __force *) pvLinAddr; +} + + +IMG_BOOL +OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes) +{ + PVR_UNREFERENCED_PARAMETER(ui32Bytes); + + if (!PVRSRV_VZ_MODE_IS(NATIVE)) + { + if (SysDmaCpuVAddrToDevPAddr(pvLinAddr)) + { + return IMG_TRUE; + } + } + + iounmap((void __iomem *) pvLinAddr); + + return IMG_TRUE; +} + +#define OS_MAX_TIMERS 8 + +/* Timer callback structure used by OSAddTimer */ +typedef struct TIMER_CALLBACK_DATA_TAG +{ + IMG_BOOL bInUse; + PFN_TIMER_FUNC pfnTimerFunc; + void *pvData; + struct timer_list sTimer; + IMG_UINT32 ui32Delay; + IMG_BOOL bActive; + struct work_struct sWork; +}TIMER_CALLBACK_DATA; + +static struct workqueue_struct *psTimerWorkQueue; + +static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS]; + +static DEFINE_MUTEX(sTimerStructLock); + +static void OSTimerCallbackBody(TIMER_CALLBACK_DATA *psTimerCBData) +{ + if (!psTimerCBData->bActive) + return; + + /* call timer callback */ + psTimerCBData->pfnTimerFunc(psTimerCBData->pvData); + + /* reset timer */ + mod_timer(&psTimerCBData->sTimer, psTimerCBData->sTimer.expires + psTimerCBData->ui32Delay); +} + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +/*************************************************************************/ /*! +@Function OSTimerCallbackWrapper +@Description OS specific timer callback wrapper function +@Input psTimer Timer list structure +*/ /**************************************************************************/ +static void OSTimerCallbackWrapper(struct timer_list *psTimer) +{ + TIMER_CALLBACK_DATA *psTimerCBData = from_timer(psTimerCBData, psTimer, sTimer); +#else +/*************************************************************************/ /*! +@Function OSTimerCallbackWrapper +@Description OS specific timer callback wrapper function +@Input uData Timer callback data +*/ /**************************************************************************/ +static void OSTimerCallbackWrapper(uintptr_t uData) +{ + TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA*)uData; +#endif + int res; + + res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork); + if (res == 0) + { + PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued")); + } +} + + +static void OSTimerWorkQueueCallBack(struct work_struct *psWork) +{ + TIMER_CALLBACK_DATA *psTimerCBData = container_of(psWork, TIMER_CALLBACK_DATA, sWork); + + OSTimerCallbackBody(psTimerCBData); +} + +IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout) +{ + TIMER_CALLBACK_DATA *psTimerCBData; + IMG_UINT32 ui32i; + + /* check callback */ + if (!pfnTimerFunc) + { + PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback")); + return NULL; + } + + /* Allocate timer callback data structure */ + mutex_lock(&sTimerStructLock); + for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++) + { + psTimerCBData = &sTimers[ui32i]; + if (!psTimerCBData->bInUse) + { + psTimerCBData->bInUse = IMG_TRUE; + break; + } + } + mutex_unlock(&sTimerStructLock); + if (ui32i >= OS_MAX_TIMERS) + { + PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use")); + return NULL; + } + + psTimerCBData->pfnTimerFunc = pfnTimerFunc; + psTimerCBData->pvData = pvData; + psTimerCBData->bActive = IMG_FALSE; + + /* + HZ = ticks per second + ui32MsTimeout = required ms delay + ticks = (Hz * ui32MsTimeout) / 1000 + */ + psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000) + ? 1 + : ((HZ * ui32MsTimeout) / 1000); + + /* initialise object */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + timer_setup(&psTimerCBData->sTimer, OSTimerCallbackWrapper, 0); +#else + init_timer(&psTimerCBData->sTimer); + + /* setup timer object */ + psTimerCBData->sTimer.function = (void *)OSTimerCallbackWrapper; + psTimerCBData->sTimer.data = (uintptr_t)psTimerCBData; +#endif + + return (IMG_HANDLE)(uintptr_t)(ui32i + 1); +} + + +static inline TIMER_CALLBACK_DATA *GetTimerStructure(IMG_HANDLE hTimer) +{ + IMG_UINT32 ui32i = (IMG_UINT32)((uintptr_t)hTimer) - 1; + + PVR_ASSERT(ui32i < OS_MAX_TIMERS); + + return &sTimers[ui32i]; +} + +PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer) +{ + TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); + + PVR_ASSERT(psTimerCBData->bInUse); + PVR_ASSERT(!psTimerCBData->bActive); + + /* free timer callback data struct */ + psTimerCBData->bInUse = IMG_FALSE; + + return PVRSRV_OK; +} + +PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer) +{ + TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); + + PVR_ASSERT(psTimerCBData->bInUse); + PVR_ASSERT(!psTimerCBData->bActive); + + /* Start timer arming */ + psTimerCBData->bActive = IMG_TRUE; + + /* set the expire time */ + psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies; + + /* Add the timer to the list */ + add_timer(&psTimerCBData->sTimer); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer) +{ + TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); + + PVR_ASSERT(psTimerCBData->bInUse); + PVR_ASSERT(psTimerCBData->bActive); + + /* Stop timer from arming */ + psTimerCBData->bActive = IMG_FALSE; + smp_mb(); + + flush_workqueue(psTimerWorkQueue); + + /* remove timer */ + del_timer_sync(&psTimerCBData->sTimer); + + /* + * This second flush is to catch the case where the timer ran + * before we managed to delete it, in which case, it will have + * queued more work for the workqueue. Since the bActive flag + * has been cleared, this second flush won't result in the + * timer being rearmed. + */ + flush_workqueue(psTimerWorkQueue); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, IMG_HANDLE *hEventObject) +{ + PVR_UNREFERENCED_PARAMETER(pszName); + + PVR_LOG_RETURN_IF_INVALID_PARAM(hEventObject, "hEventObject"); + + return LinuxEventObjectListCreate(hEventObject); +} + + +PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject) +{ + PVR_LOG_RETURN_IF_INVALID_PARAM(hEventObject, "hEventObject"); + + return LinuxEventObjectListDestroy(hEventObject); +} + +#define _FREEZABLE IMG_TRUE +#define _NON_FREEZABLE IMG_FALSE + +/* + * EventObjectWaitTimeout() + */ +static PVRSRV_ERROR EventObjectWaitTimeout(IMG_HANDLE hOSEventKM, + IMG_UINT64 uiTimeoutus) +{ + PVRSRV_ERROR eError; + + if (hOSEventKM && uiTimeoutus > 0) + { + eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, _NON_FREEZABLE); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWait: invalid arguments %p, %lld", hOSEventKM, uiTimeoutus)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + return eError; +} + +PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus) +{ + return EventObjectWaitTimeout(hOSEventKM, uiTimeoutus); +} + +PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM) +{ + return OSEventObjectWaitTimeout(hOSEventKM, EVENT_OBJECT_TIMEOUT_US); +} + +PVRSRV_ERROR OSEventObjectWaitKernel(IMG_HANDLE hOSEventKM, + IMG_UINT64 uiTimeoutus) +{ + PVRSRV_ERROR eError; + +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + if (hOSEventKM) + { + if (uiTimeoutus > 0) + eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, + _FREEZABLE); + else + eError = LinuxEventObjectWaitUntilSignalled(hOSEventKM); + } +#else /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ + if (hOSEventKM && uiTimeoutus > 0) + { + eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, + _FREEZABLE); + } +#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWaitKernel: invalid arguments %p", + hOSEventKM)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + return eError; +} + +void OSEventObjectDumpDebugInfo(IMG_HANDLE hOSEventKM) +{ + LinuxEventObjectDumpDebugInfo(hOSEventKM); +} + +PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject, IMG_HANDLE *phOSEvent) +{ + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_INVALID_PARAM(phOSEvent, "phOSEvent"); + PVR_LOG_GOTO_IF_INVALID_PARAM(hEventObject, eError, error); + + eError = LinuxEventObjectAdd(hEventObject, phOSEvent); + PVR_LOG_GOTO_IF_ERROR(eError, "LinuxEventObjectAdd", error); + + return PVRSRV_OK; + +error: + *phOSEvent = NULL; + return eError; +} + +PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM) +{ + PVR_LOG_RETURN_IF_INVALID_PARAM(hOSEventKM, "hOSEventKM"); + + return LinuxEventObjectDelete(hOSEventKM); +} + +PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject) +{ + PVR_LOG_RETURN_IF_INVALID_PARAM(hEventObject, "hEventObject"); + + return LinuxEventObjectSignal(hEventObject); +} + +PVRSRV_ERROR OSCopyToUser(void *pvProcess, + void __user *pvDest, + const void *pvSrc, + size_t ui32Bytes) +{ + PVR_UNREFERENCED_PARAMETER(pvProcess); + + if (pvr_copy_to_user(pvDest, pvSrc, ui32Bytes)==0) + return PVRSRV_OK; + else + return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY; +} + +PVRSRV_ERROR OSCopyFromUser(void *pvProcess, + void *pvDest, + const void __user *pvSrc, + size_t ui32Bytes) +{ + PVR_UNREFERENCED_PARAMETER(pvProcess); + + if (likely(pvr_copy_from_user(pvDest, pvSrc, ui32Bytes)==0)) + return PVRSRV_OK; + else + return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY; +} + +IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder) +{ + *pui32Remainder = do_div(ui64Divident, ui32Divisor); + + return ui64Divident; +} + +IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder) +{ + *pui32Remainder = do_div(ui64Divident, ui32Divisor); + + return (IMG_UINT32) ui64Divident; +} + +/* One time osfunc initialisation */ +PVRSRV_ERROR PVROSFuncInit(void) +{ + { + PVR_ASSERT(!psTimerWorkQueue); + + psTimerWorkQueue = create_freezable_workqueue("pvr_timer"); + if (psTimerWorkQueue == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: couldn't create timer workqueue", + __func__)); + return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD; + } + } + + { + IMG_UINT32 ui32i; + + for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++) + { + TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i]; + + INIT_WORK(&psTimerCBData->sWork, OSTimerWorkQueueCallBack); + } + } + return PVRSRV_OK; +} + +/* + * Osfunc deinitialisation. + * Note that PVROSFuncInit may not have been called + */ +void PVROSFuncDeInit(void) +{ + if (psTimerWorkQueue != NULL) + { + destroy_workqueue(psTimerWorkQueue); + psTimerWorkQueue = NULL; + } +} + +void OSDumpStack(void) +{ + dump_stack(); +} + +PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray, + IMG_UINT64 sCpuVAddrBase, + IMG_CPU_PHYADDR sCpuPAHeapBase, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_BOOL bIsLMA) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + pfn_t sPFN; +#else + IMG_UINT64 uiPFN; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ + + PVRSRV_ERROR eError; + + struct mm_struct *psMM = current->mm; + struct vm_area_struct *psVMA = NULL; + struct address_space *psMapping = NULL; + struct page *psPage = NULL; + + IMG_UINT64 uiCPUVirtAddr = 0; + IMG_UINT32 ui32Loop = 0; + IMG_UINT32 ui32PageSize = OSGetPageSize(); + IMG_BOOL bMixedMap = IMG_FALSE; + + /* + * Acquire the lock before manipulating the VMA + * In this case only mmap_sem lock would suffice as the pages associated with this VMA + * are never meant to be swapped out. + * + * In the future, in case the pages are marked as swapped, page_table_lock needs + * to be acquired in conjunction with this to disable page swapping. + */ + + /* Find the Virtual Memory Area associated with the user base address */ + psVMA = find_vma(psMM, (uintptr_t)sCpuVAddrBase); + if (NULL == psVMA) + { + eError = PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND; + return eError; + } + + /* Acquire the memory sem */ + mmap_write_lock(psMM); + + psMapping = psVMA->vm_file->f_mapping; + + /* Set the page offset to the correct value as this is disturbed in MMAP_PMR func */ + psVMA->vm_pgoff = (psVMA->vm_start >> PAGE_SHIFT); + + /* Delete the entries for the pages that got freed */ + if (ui32FreePageCount && (pai32FreeIndices != NULL)) + { + for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++) + { + uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32FreeIndices[ui32Loop] * ui32PageSize)); + + unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1); + +#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE + /* + * Still need to map pages in case remap flag is set. + * That is not done until the remap case succeeds + */ +#endif + } + eError = PVRSRV_OK; + } + + if ((psVMA->vm_flags & VM_MIXEDMAP) || bIsLMA) + { + psVMA->vm_flags |= VM_MIXEDMAP; + bMixedMap = IMG_TRUE; + } + else + { + if (ui32AllocPageCount && (NULL != pai32AllocIndices)) + { + for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++) + { + + psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]]; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + sPFN = page_to_pfn_t(psPage); + + if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0) +#else + uiPFN = page_to_pfn(psPage); + + if (!pfn_valid(uiPFN) || (page_count(pfn_to_page(uiPFN)) == 0)) +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ + { + bMixedMap = IMG_TRUE; + psVMA->vm_flags |= VM_MIXEDMAP; + break; + } + } + } + } + + /* Map the pages that got allocated */ + if (ui32AllocPageCount && (NULL != pai32AllocIndices)) + { + for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++) + { + int err; + + uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32AllocIndices[ui32Loop] * ui32PageSize)); + unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1); + + if (bIsLMA) + { + phys_addr_t uiAddr = sCpuPAHeapBase.uiAddr + + ((IMG_DEV_PHYADDR *)psPageArray)[pai32AllocIndices[ui32Loop]].uiAddr; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + sPFN = phys_to_pfn_t(uiAddr, 0); + psPage = pfn_t_to_page(sPFN); +#else + uiPFN = uiAddr >> PAGE_SHIFT; + psPage = pfn_to_page(uiPFN); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ + } + else + { + psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]]; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + sPFN = page_to_pfn_t(psPage); +#else + uiPFN = page_to_pfn(psPage); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ + } + + if (bMixedMap) + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) + vm_fault_t vmf; + + vmf = vmf_insert_mixed(psVMA, uiCPUVirtAddr, sPFN); + if (vmf & VM_FAULT_ERROR) + { + err = vm_fault_to_errno(vmf, 0); + } + else + { + err = 0; + } +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + err = vm_insert_mixed(psVMA, uiCPUVirtAddr, sPFN); +#else + err = vm_insert_mixed(psVMA, uiCPUVirtAddr, uiPFN); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) */ + } + else + { + err = vm_insert_page(psVMA, uiCPUVirtAddr, psPage); + } + + if (err) + { + PVR_DPF((PVR_DBG_MESSAGE, "Remap failure error code: %d", err)); + eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED; + goto eFailed; + } + } + } + + eError = PVRSRV_OK; +eFailed: + mmap_write_unlock(psMM); + + return eError; +} + +/*************************************************************************/ /*! +@Function OSDebugSignalPID +@Description Sends a SIGTRAP signal to a specific PID in user mode for + debugging purposes. The user mode process can register a handler + against this signal. + This is necessary to support the Rogue debugger. If the Rogue + debugger is not used then this function may be implemented as + a stub. +@Input ui32PID The PID for the signal. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID) +{ + int err; + struct pid *psPID; + + psPID = find_vpid(ui32PID); + if (psPID == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get PID struct.", __func__)); + return PVRSRV_ERROR_NOT_FOUND; + } + + err = kill_pid(psPID, SIGTRAP, 0); + if (err != 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Signal Failure %d", __func__, err)); + return PVRSRV_ERROR_SIGNAL_FAILED; + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSIsKernelThread +@Description This API determines if the current running thread is a kernel + thread (i.e. one not associated with any userland process, + typically an MISR handler.) +@Return IMG_TRUE if it is a kernel thread, otherwise IMG_FALSE. +*/ /**************************************************************************/ +IMG_BOOL OSIsKernelThread(void) +{ + /* + * Kernel threads have a NULL memory descriptor. + * + * See https://www.kernel.org/doc/Documentation/vm/active_mm.txt + */ + return current->mm == NULL; +} + +void OSDumpVersionInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVR_DUMPDEBUG_LOG("OS kernel info: %s %s %s %s", + utsname()->sysname, + utsname()->release, + utsname()->version, + utsname()->machine); +} +#if defined(SUPPORT_DMA_TRANSFER) + +typedef struct _OS_CLEANUP_DATA_ +{ + IMG_BOOL bSucceed; + IMG_BOOL bAdvanceTimeline; + IMG_UINT uiRefCount; + IMG_UINT uiNumDMA; + IMG_UINT uiCount; + + struct dma_async_tx_descriptor** ppsDescriptors; + + + PVRSRV_DEVICE_NODE *psDevNode; + PFN_SERVER_CLEANUP pfnServerCleanup; + void* pvServerCleanupData; + + enum dma_transfer_direction eDirection; + struct sg_table **ppsSg; + struct page ***pages; + IMG_UINT32* puiNumPages; + spinlock_t spinlock; + + struct completion start_cleanup; + struct completion *sync_completion; + + /* Sparse PMR transfer information */ + IMG_BOOL *pbIsSparse; + IMG_UINT *uiNumValidPages; + struct sg_table ***ppsSgSparse; + struct dma_async_tx_descriptor*** ppsDescriptorsSparse; + +} OS_CLEANUP_DATA; + +static int cleanup_thread(void *pvData) +{ + IMG_UINT32 i, j; + struct completion *sync_completion = NULL; + OS_CLEANUP_DATA *psOSCleanup = (OS_CLEANUP_DATA*)pvData; + IMG_BOOL bSucceed = psOSCleanup->bSucceed; + + sync_completion = psOSCleanup->sync_completion; + +#if defined(DMA_VERBOSE) + PVR_DPF((PVR_DBG_ERROR, "Cleanup thread waiting (%p) on completion", pvData)); +#endif + + wait_for_completion(&psOSCleanup->start_cleanup); + +#if defined(DMA_VERBOSE) + PVR_DPF((PVR_DBG_ERROR, "Cleanup thread notified (%p)", pvData)); +#endif + /* Free resources */ + for (i=0; iuiCount; i++) + { + if (!psOSCleanup->pbIsSparse[i]) + { + dma_sync_sg_for_cpu(psOSCleanup->psDevNode->psDevConfig->pvOSDevice, + psOSCleanup->ppsSg[i]->sgl, + psOSCleanup->ppsSg[i]->nents, + psOSCleanup->eDirection); + + dma_unmap_sg(psOSCleanup->psDevNode->psDevConfig->pvOSDevice, + psOSCleanup->ppsSg[i]->sgl, + psOSCleanup->ppsSg[i]->nents, + psOSCleanup->eDirection); + + sg_free_table(psOSCleanup->ppsSg[i]); + + OSFreeMem(psOSCleanup->ppsSg[i]); + + /* Unpin pages */ + for (j=0; jpuiNumPages[i]; j++) + { + if (psOSCleanup->eDirection == DMA_DEV_TO_MEM) + { + set_page_dirty_lock(psOSCleanup->pages[i][j]); + } + put_page(psOSCleanup->pages[i][j]); + } + } + else + { + for (j = 0; j < psOSCleanup->puiNumPages[i]; j++) + { + if (psOSCleanup->ppsSgSparse[i][j]) { + dma_sync_sg_for_cpu(psOSCleanup->psDevNode->psDevConfig->pvOSDevice, + psOSCleanup->ppsSgSparse[i][j]->sgl, + psOSCleanup->ppsSgSparse[i][j]->nents, + psOSCleanup->eDirection); + + + dma_unmap_sg(psOSCleanup->psDevNode->psDevConfig->pvOSDevice, + psOSCleanup->ppsSgSparse[i][j]->sgl, + psOSCleanup->ppsSgSparse[i][j]->nents, + psOSCleanup->eDirection); + + sg_free_table(psOSCleanup->ppsSgSparse[i][j]); + + OSFreeMem(psOSCleanup->ppsSgSparse[i][j]); + + } + } + + OSFreeMem(psOSCleanup->ppsSgSparse[i]); + OSFreeMem(psOSCleanup->ppsDescriptorsSparse[i]); + + /* Unpin pages */ + for (j=0; jpuiNumPages[i]*2; j++) + { + /* + * Some pages might've been pinned twice + * Others may have not been pinned at all + */ + if (psOSCleanup->pages[i][j]) + { + if (psOSCleanup->eDirection == DMA_DEV_TO_MEM) + { + set_page_dirty_lock(psOSCleanup->pages[i][j]); + } + put_page(psOSCleanup->pages[i][j]); + } + } + } + + OSFreeMem(psOSCleanup->pages[i]); + } + + psOSCleanup->pfnServerCleanup(psOSCleanup->pvServerCleanupData, + psOSCleanup->bAdvanceTimeline); + + OSFreeMem(psOSCleanup->ppsSg); + OSFreeMem(psOSCleanup->pages); + OSFreeMem(psOSCleanup->puiNumPages); + OSFreeMem(psOSCleanup->ppsSgSparse); + OSFreeMem(psOSCleanup->ppsDescriptorsSparse); + OSFreeMem(psOSCleanup->ppsDescriptors); + OSFreeMem(psOSCleanup->pbIsSparse); + OSFreeMem(psOSCleanup->uiNumValidPages); + OSFreeMem(psOSCleanup); + + if (sync_completion && bSucceed) + { + complete(sync_completion); + } + + do_exit(0); + return 0; +} + +static void dma_callback(void *pvOSCleanup) +{ + OS_CLEANUP_DATA *psOSCleanup = (OS_CLEANUP_DATA*)pvOSCleanup; + unsigned long flags; + +#if defined(DMA_VERBOSE) + PVR_DPF((PVR_DBG_ERROR, "dma_callback (%p) refcount decreased to %d", psOSCleanup, psOSCleanup->uiRefCount - 1)); +#endif + spin_lock_irqsave(&psOSCleanup->spinlock, flags); + + psOSCleanup->uiRefCount--; + + if (psOSCleanup->uiRefCount==0) + { + /* Notify the cleanup thread */ + spin_unlock_irqrestore(&psOSCleanup->spinlock, flags); + complete(&psOSCleanup->start_cleanup); + return; + } + + spin_unlock_irqrestore(&psOSCleanup->spinlock, flags); + + return; +} + +#if defined(SUPPORT_VALIDATION) && defined(PVRSRV_DEBUG_DMA) +static void +DMADumpPhysicalAddresses(struct page **ppsHostMemPages, + IMG_UINT32 uiNumPages, + IMG_DMA_ADDR *sDmaAddr, + IMG_UINT64 ui64Offset) +{ + IMG_CPU_PHYADDR sPagePhysAddr; + IMG_UINT32 uiIdx; + + PVR_DPF((PVR_DBG_MESSAGE, "DMA Transfer Address Dump")); + PVR_DPF((PVR_DBG_MESSAGE, "Hostmem phys addresses:")); + + for (uiIdx = 0; uiIdx < uiNumPages; uiIdx++) + { + sPagePhysAddr.uiAddr = page_to_phys(ppsHostMemPages[uiIdx]); + if (uiIdx == 0) + { + sPagePhysAddr.uiAddr += ui64Offset; + PVR_DPF((PVR_DBG_MESSAGE, "\tHost mem start at 0x%llX", sPagePhysAddr.uiAddr)); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "\tHost Mem Page %d at 0x%llX", uiIdx, + sPagePhysAddr.uiAddr)); + } + } + PVR_DPF((PVR_DBG_MESSAGE, "Devmem CPU phys address: 0x%llX", + sDmaAddr->uiAddr)); +} +#endif + +PVRSRV_ERROR OSDmaSubmitTransfer(PVRSRV_DEVICE_NODE *psDevNode, void *pvOSData, + void *pvChan, IMG_BOOL bSynchronous) +{ + OS_CLEANUP_DATA *psOSCleanup = (OS_CLEANUP_DATA*)pvOSData; + struct completion* sync_completion = NULL; + + psOSCleanup->bSucceed = IMG_TRUE; + psOSCleanup->bAdvanceTimeline = IMG_TRUE; + + if (bSynchronous) + { + sync_completion = OSAllocZMem(sizeof(struct completion)); + init_completion(sync_completion); + } + + PVR_UNREFERENCED_PARAMETER(psDevNode); + /* Wait only on number of ops scheduled. This might be different to NumDMAs + in certain error conditions */ + psOSCleanup->uiRefCount = psOSCleanup->uiCount; + psOSCleanup->sync_completion = sync_completion; + + { + IMG_UINT32 i,j; + for (i=0; iuiCount; i++) + { + if (psOSCleanup->pbIsSparse[i]) + { + for (j=0; jpuiNumPages[i]; j++) + { + if (psOSCleanup->ppsDescriptorsSparse[i][j]) + dmaengine_submit(psOSCleanup->ppsDescriptorsSparse[i][j]); + } + } + else + { + dmaengine_submit(psOSCleanup->ppsDescriptors[i]); + } + } + } + + dma_async_issue_pending(pvChan); + + if (bSynchronous) + { + wait_for_completion(sync_completion); + OSFreeMem(sync_completion); + } + + return PVRSRV_OK; +} + +void OSDmaForceCleanup(PVRSRV_DEVICE_NODE *psDevNode, void *pvChan, + void *pvOSData, void *pvServerCleanupParam, + PFN_SERVER_CLEANUP pfnServerCleanup) +{ + OS_CLEANUP_DATA *psOSCleanup = (OS_CLEANUP_DATA *)pvOSData; + IMG_UINT ui32Retries; + + PVR_UNREFERENCED_PARAMETER(psDevNode); + + psOSCleanup->bSucceed = IMG_FALSE; + psOSCleanup->bAdvanceTimeline = IMG_TRUE; + + /* Need to wait for outstanding DMA Engine ops before advancing the + user-supplied timeline in case of error. dmaengine_terminate_sync + cannot be called from within atomic context, so cannot invoke it + from inside the cleanup kernel thread. */ + for (ui32Retries = 0; ui32Retries < DMA_ERROR_SYNC_RETRIES; ui32Retries++) + { + if (dmaengine_terminate_sync(pvChan) == 0) + { + break; + } + } + if (ui32Retries == DMA_ERROR_SYNC_RETRIES) + { + /* We cannot guarantee all outstanding DMAs were terminated + * so we let the UM fence time out as a fallback mechanism */ + psOSCleanup->bAdvanceTimeline = IMG_FALSE; + } + + if (psOSCleanup->uiCount > 0) + { + complete(&psOSCleanup->start_cleanup); + } + else + { + /* Cleanup kthread never run, need to manually wind down */ + pfnServerCleanup(pvServerCleanupParam, psOSCleanup->bAdvanceTimeline); + + OSFreeMem(psOSCleanup->ppsSg); + OSFreeMem(psOSCleanup->pages); + OSFreeMem(psOSCleanup->puiNumPages); + OSFreeMem(psOSCleanup->ppsSgSparse); + OSFreeMem(psOSCleanup->pbIsSparse); + OSFreeMem(psOSCleanup->uiNumValidPages); + OSFreeMem(psOSCleanup->ppsDescriptors); + OSFreeMem(psOSCleanup->ppsDescriptorsSparse); + + OSFreeMem(psOSCleanup); + } +} + +PVRSRV_ERROR OSDmaAllocData(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 uiNumDMA, void **pvOutData) +{ + PVRSRV_ERROR eError; + OS_CLEANUP_DATA *psOSCleanup = OSAllocZMem(sizeof(OS_CLEANUP_DATA)); + PVR_LOG_GOTO_IF_NOMEM(psOSCleanup, eError, e0); + + psOSCleanup->uiNumDMA = uiNumDMA; + psOSCleanup->psDevNode = psDevNode; + + spin_lock_init(&psOSCleanup->spinlock); + + init_completion(&psOSCleanup->start_cleanup); + + psOSCleanup->ppsDescriptors = OSAllocZMem(uiNumDMA * sizeof(struct dma_async_tx_descriptor*)); + PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->ppsDescriptors, eError, e0); + + psOSCleanup->ppsDescriptorsSparse = OSAllocZMem(uiNumDMA * sizeof(struct dma_async_tx_descriptor*)); + PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->ppsDescriptorsSparse, eError, e11); + + psOSCleanup->ppsSg = OSAllocZMem(uiNumDMA * sizeof(struct sg_table*)); + PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->ppsSg, eError, e1); + + psOSCleanup->ppsSgSparse = OSAllocZMem(uiNumDMA * sizeof(struct sg_table*)); + PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->ppsSgSparse, eError, e12); + + psOSCleanup->pbIsSparse = OSAllocZMem(uiNumDMA * sizeof(IMG_BOOL)); + PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->pbIsSparse, eError, e13); + + psOSCleanup->uiNumValidPages = OSAllocZMem(uiNumDMA * sizeof(IMG_UINT)); + PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->uiNumValidPages, eError, e14); + + psOSCleanup->pages = OSAllocZMem(uiNumDMA * sizeof(struct page **)); + PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->pages, eError, e2); + + psOSCleanup->puiNumPages = OSAllocZMem(uiNumDMA * sizeof(IMG_UINT32)); + PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->puiNumPages, eError, e3); + + *pvOutData = psOSCleanup; + + return PVRSRV_OK; + +e3: + OSFreeMem(psOSCleanup->pages); +e2: + OSFreeMem(psOSCleanup->uiNumValidPages); +e14: + OSFreeMem(psOSCleanup->pbIsSparse); +e13: + OSFreeMem(psOSCleanup->ppsSgSparse); +e12: + OSFreeMem(psOSCleanup->ppsSg); +e1: + OSFreeMem(psOSCleanup->ppsDescriptorsSparse); +e11: + OSFreeMem(psOSCleanup->ppsDescriptors); +e0: + OSFreeMem(psOSCleanup); + return eError; +} + +/*************************************************************************/ /*! +@Function OSDmaTransfer +@Description This API is used to ask OS to perform a DMA transfer operation +@Return +*/ /**************************************************************************/ +PVRSRV_ERROR OSDmaPrepareTransfer(PVRSRV_DEVICE_NODE *psDevNode, + void* pvChan, + IMG_DMA_ADDR* psDmaAddr, IMG_UINT64* puiAddress, + IMG_UINT64 uiSize, IMG_BOOL bMemToDev, + void* pvOSData, + void* pvServerCleanupParam, PFN_SERVER_CLEANUP pfnServerCleanup, IMG_BOOL bFirst) +{ + + IMG_INT iRet; + PVRSRV_ERROR eError; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; + OS_CLEANUP_DATA* psOSCleanupData = pvOSData; + + struct dma_slave_config sConfig = {0}; + struct dma_async_tx_descriptor *psDesc; + + unsigned long offset = (unsigned long)puiAddress & ((1 << PAGE_SHIFT) - 1); + unsigned num_pages = (uiSize + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; + int num_pinned_pages = 0; + unsigned int gup_flags = 0; + + struct sg_table *psSg = OSAllocZMem(sizeof(struct sg_table)); + PVR_LOG_GOTO_IF_NOMEM(psSg, eError, e0); + + psOSCleanupData->pages[psOSCleanupData->uiCount] = OSAllocZMem(num_pages * sizeof(struct page *)); + PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->pages[psOSCleanupData->uiCount], eError, e1); + + gup_flags |= bMemToDev ? 0 : (FOLL_WRITE | FOLL_POPULATE); + + num_pinned_pages = get_user_pages_fast( + (unsigned long)puiAddress, + (int)num_pages, + gup_flags, + psOSCleanupData->pages[psOSCleanupData->uiCount]); + if (num_pinned_pages != num_pages) + { + PVR_DPF((PVR_DBG_ERROR, "get_user_pages_fast failed: (%d - %u)", num_pinned_pages, num_pages)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e2; + } + +#if defined(SUPPORT_VALIDATION) && defined(PVRSRV_DEBUG_DMA) + DMADumpPhysicalAddresses(psOSCleanupData->pages[psOSCleanupData->uiCount], + num_pages, psDmaAddr, offset); +#endif + + psOSCleanupData->puiNumPages[psOSCleanupData->uiCount] = num_pinned_pages; + + if (sg_alloc_table_from_pages(psSg, psOSCleanupData->pages[psOSCleanupData->uiCount], num_pages, offset, uiSize, GFP_KERNEL) != 0) + { + eError = PVRSRV_ERROR_BAD_MAPPING; + PVR_DPF((PVR_DBG_ERROR, "sg_alloc_table_from_pages failed")); + goto e3; + } + + if (bMemToDev) + { + sConfig.direction = DMA_MEM_TO_DEV; + sConfig.src_addr = 0; + sConfig.dst_addr = psDmaAddr->uiAddr; + } + else + { + sConfig.direction = DMA_DEV_TO_MEM; + sConfig.src_addr = psDmaAddr->uiAddr; + sConfig.dst_addr = 0; + } + dmaengine_slave_config(pvChan, &sConfig); + + iRet = dma_map_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction); + if (!iRet) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Error mapping SG list", __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e4; + } + + dma_sync_sg_for_device(psDevConfig->pvOSDevice, psSg->sgl,(unsigned)iRet, sConfig.direction); + + psDesc = dmaengine_prep_slave_sg(pvChan, psSg->sgl, (unsigned)iRet, sConfig.direction, 0); + if (!psDesc) + { + PVR_DPF((PVR_DBG_ERROR, "%s: dmaengine_prep_slave_sg failed", __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e5; + } + + psOSCleanupData->eDirection = sConfig.direction; + psOSCleanupData->ppsSg[psOSCleanupData->uiCount] = psSg; + psOSCleanupData->pfnServerCleanup = pfnServerCleanup; + psOSCleanupData->pvServerCleanupData = pvServerCleanupParam; + + psDesc->callback_param = psOSCleanupData; + psDesc->callback = dma_callback; + + if (bFirst) + { + struct task_struct* t1; + t1 = kthread_run(cleanup_thread, psOSCleanupData, "dma-cleanup-thread"); + } + psOSCleanupData->ppsDescriptors[psOSCleanupData->uiCount] = psDesc; + + psOSCleanupData->uiCount++; + + return PVRSRV_OK; + +e5: + dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction); +e4: + sg_free_table(psSg); +e3: + { + IMG_UINT32 i; + /* Unpin pages */ + for (i=0; ipuiNumPages[psOSCleanupData->uiCount]; i++) + { + put_page(psOSCleanupData->pages[psOSCleanupData->uiCount][i]); + } + } +e2: + OSFreeMem(psOSCleanupData->pages[psOSCleanupData->uiCount]); +e1: + OSFreeMem(psSg); +e0: + return eError; +} + +static IMG_UINT32 +CalculateValidPages(IMG_BOOL *pbValid, + IMG_UINT32 ui32SizeInPages) +{ + IMG_UINT32 ui32nValid; + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0, ui32nValid = 0; ui32Idx < ui32SizeInPages; ui32Idx++) + { + ui32nValid += pbValid[ui32Idx] ? 1 : 0; + } + + return ui32nValid; +} + +PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, + void* pvChan, + IMG_DMA_ADDR* psDmaAddr, + IMG_BOOL *pbValid, + IMG_UINT64* puiAddress, + IMG_UINT64 uiSize, + IMG_UINT32 uiOffsetInFirstPMRPage, + IMG_UINT32 ui32SizeInPages, + IMG_BOOL bMemToDev, + void* pvOSData, + void* pvServerCleanupParam, + PFN_SERVER_CLEANUP pfnServerCleanup, + IMG_BOOL bFirst) +{ + + IMG_INT iRet; + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; + OS_CLEANUP_DATA* psOSCleanupData = pvOSData; + IMG_UINT32 ui32PageSize = OSGetPageSize(); + void *pvNextAddress = puiAddress; + IMG_UINT32 ui32Idx; + IMG_INT32 i32Rwd; + + struct dma_slave_config sConfig = {0}; + struct dma_async_tx_descriptor *psDesc; + + unsigned long offset = (unsigned long)puiAddress & ((1 << PAGE_SHIFT) - 1); + unsigned num_pages = (uiSize + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned int num_valid_pages = CalculateValidPages(pbValid, ui32SizeInPages); + unsigned num_pinned_pages = 0; + unsigned int gup_flags = 0; + unsigned int valid_idx; + size_t transfer_size; + struct page ** next_pages; + struct sg_table *psSg; + + psOSCleanupData->uiNumValidPages[psOSCleanupData->uiCount] = num_valid_pages; + psOSCleanupData->pbIsSparse[psOSCleanupData->uiCount] = IMG_TRUE; + + /* + * If an SG transfer from virtual memory to card memory goes over a page boundary in + * main memory, it'll span two different pages - therefore, total number of pages to + * keep track of should be twice as many as for a simple transfer. This twice-as-big + * allocation is also necessary because the same virtual memory page might be present + * in more than one SG DMA transfer, because of differences in first-page offset between + * the sparse device PMR and the virtual memory buffer. + */ + psOSCleanupData->pages[psOSCleanupData->uiCount] = OSAllocZMem(2*num_valid_pages * sizeof(struct page *)); + PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->pages[psOSCleanupData->uiCount], eError, e0); + + psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount] = OSAllocZMem(num_valid_pages * sizeof(struct sg_table *)); + PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount], eError, e1); + + psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount] = OSAllocZMem(num_valid_pages * sizeof(struct dma_async_tx_descriptor *)); + PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount], eError, e11); + + gup_flags |= bMemToDev ? 0 : (FOLL_WRITE | FOLL_POPULATE); + + for (ui32Idx = 0, valid_idx = 0; ui32Idx < ui32SizeInPages; ui32Idx++) + { + if (valid_idx == num_valid_pages) + { + break; + } + if (!pbValid[ui32Idx]) + { + pvNextAddress += (ui32Idx == 0) ? ui32PageSize - uiOffsetInFirstPMRPage : ui32PageSize; + continue; + } + + /* Pick transfer size */ + if (ui32Idx == 0) + { + if (uiOffsetInFirstPMRPage + uiSize <= ui32PageSize) + { + PVR_ASSERT(num_valid_pages == 1); + transfer_size = uiSize; + } + else + { + transfer_size = ui32PageSize - uiOffsetInFirstPMRPage; + } + } + else + { + /* Last valid LMA page */ + if (valid_idx == num_valid_pages - 1) + { + transfer_size = ((uiOffsetInFirstPMRPage + uiSize - 1) % ui32PageSize) + 1; + } + else + { + transfer_size = ui32PageSize; + } + } + + if ((unsigned long long)pvNextAddress + transfer_size > PAGE_ALIGN((unsigned long long)pvNextAddress)) + { + num_pages = 2; + } + else + { + num_pages = 1; + } + + next_pages = psOSCleanupData->pages[psOSCleanupData->uiCount] + (valid_idx * 2); + + num_pinned_pages = get_user_pages_fast( + (unsigned long)pvNextAddress, + (int)num_pages, + gup_flags, + next_pages); + if (num_pinned_pages != num_pages) + { + PVR_DPF((PVR_DBG_ERROR, "get_user_pages_fast for sparse failed: (%d - %u)", num_pinned_pages, num_pages)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e2; + } + +#if defined(SUPPORT_VALIDATION) && defined(PVRSRV_DEBUG_DMA) + DMADumpPhysicalAddresses(next_pages, num_pages, + &psDmaAddr[ui32Idx], + (unsigned long)pvNextAddress & (ui32PageSize - 1)); +#endif + + psSg = OSAllocZMem(sizeof(struct sg_table)); + PVR_LOG_GOTO_IF_NOMEM(psSg, eError, e3); + + if (sg_alloc_table_from_pages(psSg, next_pages, num_pages, + (unsigned long)pvNextAddress & (ui32PageSize - 1), + transfer_size, + GFP_KERNEL) != 0) + { + eError = PVRSRV_ERROR_BAD_MAPPING; + PVR_DPF((PVR_DBG_ERROR, "sg_alloc_table_from_pages failed")); + goto e4; + } + + pvNextAddress += transfer_size; + + if (bMemToDev) + { + sConfig.direction = DMA_MEM_TO_DEV; + sConfig.src_addr = 0; + sConfig.dst_addr = psDmaAddr[ui32Idx].uiAddr; + } + else + { + sConfig.direction = DMA_DEV_TO_MEM; + sConfig.src_addr = psDmaAddr[ui32Idx].uiAddr; + sConfig.dst_addr = 0; + } + dmaengine_slave_config(pvChan, &sConfig); + + iRet = dma_map_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction); + if (!iRet) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Error mapping SG list", __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e5; + } + dma_sync_sg_for_device(psDevConfig->pvOSDevice, psSg->sgl,(unsigned)iRet, sConfig.direction); + + psDesc = dmaengine_prep_slave_sg(pvChan, psSg->sgl, (unsigned)iRet, sConfig.direction, 0); + if (!psDesc) + { + PVR_DPF((PVR_DBG_ERROR, "%s: dmaengine_prep_slave_sg failed", __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto e6; + } + + psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount][valid_idx] = psSg; + psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount][valid_idx] = psDesc; + psOSCleanupData->puiNumPages[psOSCleanupData->uiCount] = ++valid_idx; + + if (valid_idx == num_valid_pages) + { + psDesc->callback_param = psOSCleanupData; + psDesc->callback = dma_callback; + + if (bFirst) + { + struct task_struct* t1; + + psOSCleanupData->eDirection = sConfig.direction; + psOSCleanupData->pfnServerCleanup = pfnServerCleanup; + psOSCleanupData->pvServerCleanupData = pvServerCleanupParam; + + t1 = kthread_run(cleanup_thread, psOSCleanupData, "dma-cleanup-thread"); + } + + psOSCleanupData->uiCount++; + } + + } + + return PVRSRV_OK; + +e6: + dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction); +e5: + sg_free_table(psSg); +e4: + OSFreeMem(psSg); +e3: + /* Unpin last */ + put_page(psOSCleanupData->pages[psOSCleanupData->uiCount][valid_idx]); + if (psOSCleanupData->pages[psOSCleanupData->uiCount][valid_idx+1]) + { + put_page(psOSCleanupData->pages[psOSCleanupData->uiCount][valid_idx+1]); + } +e2: + /* rewind */ + for (i32Rwd=valid_idx-1; i32Rwd >= 0; i32Rwd--) + { + IMG_UINT32 i; + + psSg = psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount][i32Rwd]; + dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction); + sg_free_table(psSg); + + /* Unpin pages */ + for (i=0; i < psOSCleanupData->puiNumPages[psOSCleanupData->uiCount]*2; i++) + { + if (psOSCleanupData->pages[psOSCleanupData->uiCount][i]) + { + put_page(psOSCleanupData->pages[psOSCleanupData->uiCount][i]); + } + } + } + OSFreeMem(psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount]); +e11: + OSFreeMem(psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount]); +e1: + OSFreeMem(psOSCleanupData->pages[psOSCleanupData->uiCount]); +e0: + return eError; +} + +#endif /* SUPPORT_DMA_TRANSFER */ diff --git a/drivers/gpu/drm/phytium/octopus/osfunc.h b/drivers/gpu/drm/phytium/octopus/osfunc.h new file mode 100644 index 000000000000..b69fcab9f163 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/osfunc.h @@ -0,0 +1,1655 @@ +/*************************************************************************/ /*! +@File +@Title OS functions header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description OS specific API definitions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifdef DEBUG_RELEASE_BUILD +#pragma optimize( "", off ) +#define DEBUG 1 +#endif + +#ifndef OSFUNC_H +/*! @cond Doxygen_Suppress */ +#define OSFUNC_H +/*! @endcond */ + +#if defined(__linux__) && defined(__KERNEL__) +#include "kernel_nospec.h" +#if !defined(NO_HARDWARE) +#include + +#endif +#endif + +#include + +#if defined(__QNXNTO__) +#include +#include +#endif + +#if defined(INTEGRITY_OS) +#include +#include +#endif + +#include "img_types.h" +#include "img_defs.h" +#include "device.h" +#include "pvrsrv_device.h" +#include "cache_ops.h" +#include "osfunc_common.h" +#if defined(SUPPORT_DMA_TRANSFER) +#include "dma_km.h" +#include "pmr.h" +#endif + +/****************************************************************************** + * Static defines + *****************************************************************************/ +/*! + * Returned by OSGetCurrentProcessID() and OSGetCurrentThreadID() if the OS + * is currently operating in the interrupt context. + */ +#define KERNEL_ID 0xffffffffL + +#if defined(__linux__) && defined(__KERNEL__) +#define OSConfineArrayIndexNoSpeculation(index, size) array_index_nospec((index), (size)) +#elif defined(__QNXNTO__) +#define OSConfineArrayIndexNoSpeculation(index, size) (index) +#define PVRSRV_MISSING_NO_SPEC_IMPL +#elif defined(INTEGRITY_OS) +#define OSConfineArrayIndexNoSpeculation(index, size) (index) +#define PVRSRV_MISSING_NO_SPEC_IMPL +#else +/*************************************************************************/ /*! +@Function OSConfineArrayIndexNoSpeculation +@Description This macro aims to avoid code exposure to Cache Timing + Side-Channel Mechanisms which rely on speculative code + execution (Variant 1). It does so by ensuring a value to be + used as an array index will be set to zero if outside of the + bounds of the array, meaning any speculative execution of code + which uses this suitably adjusted index value will not then + attempt to load data from memory outside of the array bounds. + Code calling this macro must still first verify that the + original unmodified index value is within the bounds of the + array, and should then only use the modified value returned + by this function when accessing the array itself. + NB. If no OS-specific implementation of this macro is + defined, the original index is returned unmodified and no + protection against the potential exploit is provided. +@Input index The original array index value that would be used to + access the array. +@Input size The number of elements in the array being accessed. +@Return The value to use for the array index, modified so that it + remains within array bounds. +*/ /**************************************************************************/ +#define OSConfineArrayIndexNoSpeculation(index, size) (index) +#if !defined(DOXYGEN) +#define PVRSRV_MISSING_NO_SPEC_IMPL +#endif +#endif + +/*************************************************************************/ /*! +@Function OSClockns64 +@Description This function returns the number of ticks since system boot + expressed in nanoseconds. Unlike OSClockns, OSClockns64 has + a near 64-bit range. +@Return The 64-bit clock value, in nanoseconds. +*/ /**************************************************************************/ +IMG_UINT64 OSClockns64(void); + +/*************************************************************************/ /*! +@Function OSClockus64 +@Description This function returns the number of ticks since system boot + expressed in microseconds. Unlike OSClockus, OSClockus64 has + a near 64-bit range. +@Return The 64-bit clock value, in microseconds. +*/ /**************************************************************************/ +IMG_UINT64 OSClockus64(void); + +/*************************************************************************/ /*! +@Function OSClockus +@Description This function returns the number of ticks since system boot + in microseconds. +@Return The 32-bit clock value, in microseconds. +*/ /**************************************************************************/ +IMG_UINT32 OSClockus(void); + +/*************************************************************************/ /*! +@Function OSClockms +@Description This function returns the number of ticks since system boot + in milliseconds. +@Return The 32-bit clock value, in milliseconds. +*/ /**************************************************************************/ +IMG_UINT32 OSClockms(void); + +/*************************************************************************/ /*! +@Function OSClockMonotonicns64 +@Description This function returns a clock value based on the system + monotonic clock. +@Output pui64Time The 64-bit clock value, in nanoseconds. +@Return Error Code. +*/ /**************************************************************************/ +PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time); + +/*************************************************************************/ /*! +@Function OSClockMonotonicus64 +@Description This function returns a clock value based on the system + monotonic clock. +@Output pui64Time The 64-bit clock value, in microseconds. +@Return Error Code. +*/ /**************************************************************************/ +PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time); + +/*************************************************************************/ /*! +@Function OSClockMonotonicRawns64 +@Description This function returns a clock value based on the system + monotonic raw clock. +@Return 64bit ns timestamp +*/ /**************************************************************************/ +IMG_UINT64 OSClockMonotonicRawns64(void); + +/*************************************************************************/ /*! +@Function OSClockMonotonicRawus64 +@Description This function returns a clock value based on the system + monotonic raw clock. +@Return 64bit us timestamp +*/ /**************************************************************************/ +IMG_UINT64 OSClockMonotonicRawus64(void); + +/*************************************************************************/ /*! +@Function OSGetPageSize +@Description This function returns the page size. + If the OS is not using memory mappings it should return a + default value of 4096. +@Return The size of a page, in bytes. +*/ /**************************************************************************/ +size_t OSGetPageSize(void); + +/*************************************************************************/ /*! +@Function OSGetPageShift +@Description This function returns the page size expressed as a power of + two. A number of pages, left-shifted by this value, gives the + equivalent size in bytes. + If the OS is not using memory mappings it should return a + default value of 12. +@Return The page size expressed as a power of two. +*/ /**************************************************************************/ +size_t OSGetPageShift(void); + +/*************************************************************************/ /*! +@Function OSGetPageMask +@Description This function returns a bitmask that may be applied to an + address to mask off the least-significant bits so as to + leave the start address of the page containing that address. +@Return The page mask. +*/ /**************************************************************************/ +size_t OSGetPageMask(void); + +/*************************************************************************/ /*! +@Function OSGetOrder +@Description This function returns the order of power of two for a given + size. Eg. for a uSize of 4096 bytes the function would + return 12 (4096 = 2^12). +@Input uSize The size in bytes. +@Return The order of power of two. +*/ /**************************************************************************/ +size_t OSGetOrder(size_t uSize); + +/*************************************************************************/ /*! +@Function OSGetRAMSize +@Description This function returns the total amount of GPU-addressable + memory provided by the system. In other words, after loading + the driver this would be the largest allocation an + application would reasonably expect to be able to make. + Note that this is function is not expected to return the + current available memory but the amount which would be + available on startup. +@Return Total GPU-addressable memory size, in bytes. +*/ /**************************************************************************/ +IMG_UINT64 OSGetRAMSize(void); + +/*************************************************************************/ /*! +@Description Pointer to a Mid-level Interrupt Service Routine (MISR). +@Input pvData Pointer to MISR specific data. +*/ /**************************************************************************/ +typedef void (*PFN_MISR)(void *pvData); + +/*************************************************************************/ /*! +@Description Pointer to a thread entry point function. +@Input pvData Pointer to thread specific data. +*/ /**************************************************************************/ +typedef void (*PFN_THREAD)(void *pvData); + +/*************************************************************************/ /*! +@Function OSChangeSparseMemCPUAddrMap +@Description This function changes the CPU mapping of the underlying + sparse allocation. It is used by a PMR 'factory' + implementation if that factory supports sparse + allocations. +@Input psPageArray array representing the pages in the + sparse allocation +@Input sCpuVAddrBase the virtual base address of the sparse + allocation ('first' page) +@Input sCpuPAHeapBase the physical address of the virtual + base address 'sCpuVAddrBase' +@Input ui32AllocPageCount the number of pages referenced in + 'pai32AllocIndices' +@Input pai32AllocIndices list of indices of pages within + 'psPageArray' that we now want to + allocate and map +@Input ui32FreePageCount the number of pages referenced in + 'pai32FreeIndices' +@Input pai32FreeIndices list of indices of pages within + 'psPageArray' we now want to + unmap and free +@Input bIsLMA flag indicating if the sparse allocation + is from LMA or UMA memory +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray, + IMG_UINT64 sCpuVAddrBase, + IMG_CPU_PHYADDR sCpuPAHeapBase, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_BOOL bIsLMA); + +/*************************************************************************/ /*! +@Function OSInstallMISR +@Description Installs a Mid-level Interrupt Service Routine (MISR) + which handles higher-level processing of interrupts from + the device (GPU). + An MISR runs outside of interrupt context, and so may be + descheduled. This means it can contain code that would + not be permitted in the LISR. + An MISR is invoked when OSScheduleMISR() is called. This + call should be made by installed LISR once it has completed + its interrupt processing. + Multiple MISRs may be installed by the driver to handle + different causes of interrupt. +@Input pfnMISR pointer to the function to be installed + as the MISR +@Input hData private data provided to the MISR +@Input pszMisrName Name describing purpose of MISR worker thread + (Must be a string literal). +@Output hMISRData handle to the installed MISR (to be used + for a subsequent uninstall) +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, + PFN_MISR pfnMISR, + void *hData, + const IMG_CHAR *pszMisrName); + +/*************************************************************************/ /*! +@Function OSUninstallMISR +@Description Uninstalls a Mid-level Interrupt Service Routine (MISR). +@Input hMISRData handle to the installed MISR +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData); + +/*************************************************************************/ /*! +@Function OSScheduleMISR +@Description Schedules a Mid-level Interrupt Service Routine (MISR) to be + executed. An MISR should be executed outside of interrupt + context, for example in a work queue. +@Input hMISRData handle to the installed MISR +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData); + +/*************************************************************************/ /*! +@Description Pointer to a function implementing debug dump of thread-specific + data. +@Input pfnDumpDebugPrintf Used to specify the print function used + to dump any debug information. If this + argument is NULL then a default print + function will be used. +@Input pvDumpDebugFile File identifier to be passed to the + print function if specified. +*/ /**************************************************************************/ + +typedef void (*PFN_THREAD_DEBUG_DUMP)(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +/*************************************************************************/ /*! +@Function OSThreadCreate +@Description Creates a kernel thread and starts it running. The caller + is responsible for informing the thread that it must finish + and return from the pfnThread function. It is not possible + to kill or terminate it. The new thread runs with the default + priority provided by the Operating System. + Note: Kernel threads are freezable which means that they + can be frozen by the kernel on for example driver suspend. + Because of that only OSEventObjectWaitKernel() function should + be used to put kernel threads in waiting state. +@Output phThread Returned handle to the thread. +@Input pszThreadName Name to assign to the thread. +@Input pfnThread Thread entry point function. +@Input pfnDebugDumpCB Used to dump info of the created thread +@Input bIsSupportingThread Set, if summary of this thread needs to + be dumped in debug_dump +@Input hData Thread specific data pointer for pfnThread(). +@Return Standard PVRSRV_ERROR error code. +*/ /**************************************************************************/ + +PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread, + IMG_CHAR *pszThreadName, + PFN_THREAD pfnThread, + PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB, + IMG_BOOL bIsSupportingThread, + void *hData); + +/*! Available priority levels for the creation of a new Kernel Thread. */ +typedef enum priority_levels +{ + OS_THREAD_NOSET_PRIORITY = 0, /* With this option the priority level is the default for the given OS */ + OS_THREAD_HIGHEST_PRIORITY, + OS_THREAD_HIGH_PRIORITY, + OS_THREAD_NORMAL_PRIORITY, + OS_THREAD_LOW_PRIORITY, + OS_THREAD_LOWEST_PRIORITY, + OS_THREAD_LAST_PRIORITY /* This must be always the last entry */ +} OS_THREAD_LEVEL; + +/*************************************************************************/ /*! +@Function OSThreadCreatePriority +@Description As OSThreadCreate, this function creates a kernel thread and + starts it running. The difference is that with this function + is possible to specify the priority used to schedule the new + thread. + +@Output phThread Returned handle to the thread. +@Input pszThreadName Name to assign to the thread. +@Input pfnThread Thread entry point function. +@Input pfnDebugDumpCB Used to dump info of the created thread +@Input bIsSupportingThread Set, if summary of this thread needs to + be dumped in debug_dump +@Input hData Thread specific data pointer for pfnThread(). +@Input eThreadPriority Priority level to assign to the new thread. +@Return Standard PVRSRV_ERROR error code. +*/ /**************************************************************************/ +PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread, + IMG_CHAR *pszThreadName, + PFN_THREAD pfnThread, + PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB, + IMG_BOOL bIsSupportingThread, + void *hData, + OS_THREAD_LEVEL eThreadPriority); + +/*************************************************************************/ /*! +@Function OSThreadDestroy +@Description Waits for the thread to end and then destroys the thread + handle memory. This function will block and wait for the + thread to finish successfully, thereby providing a sync point + for the thread completing its work. No attempt is made to kill + or otherwise terminate the thread. +@Input hThread The thread handle returned by OSThreadCreate(). +@Return Standard PVRSRV_ERROR error code. +*/ /**************************************************************************/ +PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread); + +/*************************************************************************/ /*! +@Function OSMapPhysToLin +@Description Maps physical memory into a linear address range. +@Input BasePAddr physical CPU address +@Input ui32Bytes number of bytes to be mapped +@Input uiFlags flags denoting the caching mode to be employed + for the mapping (uncached/write-combined, + cached coherent or cached incoherent). + See pvrsrv_memallocflags.h for full flag bit + definitions. +@Return Pointer to the new mapping if successful, NULL otherwise. +*/ /**************************************************************************/ +void *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, size_t ui32Bytes, PVRSRV_MEMALLOCFLAGS_T uiFlags); + +/*************************************************************************/ /*! +@Function OSUnMapPhysToLin +@Description Unmaps physical memory previously mapped by OSMapPhysToLin(). +@Input pvLinAddr the linear mapping to be unmapped +@Input ui32Bytes number of bytes to be unmapped +@Return IMG_TRUE if unmapping was successful, IMG_FALSE otherwise. +*/ /**************************************************************************/ +IMG_BOOL OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes); + +/*************************************************************************/ /*! +@Function OSCPUCacheFlushRangeKM +@Description Clean and invalidate the CPU cache for the specified + address range. +@Input psDevNode device on which the allocation was made +@Input pvVirtStart virtual start address of the range to be + flushed +@Input pvVirtEnd virtual end address of the range to be + flushed +@Input sCPUPhysStart physical start address of the range to be + flushed +@Input sCPUPhysEnd physical end address of the range to be + flushed +@Return None +*/ /**************************************************************************/ +void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd); + +/*************************************************************************/ /*! +@Function OSCPUCacheCleanRangeKM +@Description Clean the CPU cache for the specified address range. + This writes out the contents of the cache and clears the + 'dirty' bit (which indicates the physical memory is + consistent with the cache contents). +@Input psDevNode device on which the allocation was made +@Input pvVirtStart virtual start address of the range to be + cleaned +@Input pvVirtEnd virtual end address of the range to be + cleaned +@Input sCPUPhysStart physical start address of the range to be + cleaned +@Input sCPUPhysEnd physical end address of the range to be + cleaned +@Return None +*/ /**************************************************************************/ +void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd); + +/*************************************************************************/ /*! +@Function OSCPUCacheInvalidateRangeKM +@Description Invalidate the CPU cache for the specified address range. + The cache must reload data from those addresses if they + are accessed. +@Input psDevNode device on which the allocation was made +@Input pvVirtStart virtual start address of the range to be + invalidated +@Input pvVirtEnd virtual end address of the range to be + invalidated +@Input sCPUPhysStart physical start address of the range to be + invalidated +@Input sCPUPhysEnd physical end address of the range to be + invalidated +@Return None +*/ /**************************************************************************/ +void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd); + +/*! CPU Cache operations address domain type */ +typedef enum +{ + OS_CACHE_OP_ADDR_TYPE_VIRTUAL, /*!< Operation requires CPU virtual address only */ + OS_CACHE_OP_ADDR_TYPE_PHYSICAL, /*!< Operation requires CPU physical address only */ + OS_CACHE_OP_ADDR_TYPE_BOTH /*!< Operation requires both CPU virtual & physical addresses */ +} OS_CACHE_OP_ADDR_TYPE; + +/*************************************************************************/ /*! +@Function OSCPUCacheOpAddressType +@Description Returns the address type (i.e. virtual/physical/both) the CPU + architecture performs cache maintenance operations under. + This is used to infer whether the virtual or physical address + supplied to the OSCPUCacheXXXRangeKM functions can be omitted + when called. +@Return OS_CACHE_OP_ADDR_TYPE +*/ /**************************************************************************/ +OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void); + +/*! CPU Cache attributes available for retrieval, DCache unless specified */ +typedef enum _OS_CPU_CACHE_ATTRIBUTE_ +{ + OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE, /*!< The cache line size */ + OS_CPU_CACHE_ATTRIBUTE_COUNT /*!< The number of attributes (must be last) */ +} OS_CPU_CACHE_ATTRIBUTE; + +/*************************************************************************/ /*! +@Function OSCPUCacheAttributeSize +@Description Returns the size of a given cache attribute. + Typically this function is used to return the cache line + size, but may be extended to return the size of other + cache attributes. +@Input eCacheAttribute the cache attribute whose size should + be returned. +@Return The size of the specified cache attribute, in bytes. +*/ /**************************************************************************/ +IMG_UINT32 OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE eCacheAttribute); + +/*************************************************************************/ /*! +@Function OSGetCurrentProcessID +@Description Returns ID of current process (thread group) +@Return ID of current process +*****************************************************************************/ +IMG_PID OSGetCurrentProcessID(void); + +/*************************************************************************/ /*! +@Function OSGetCurrentVirtualProcessID +@Description Returns ID of current process (thread group of current + PID namespace) +@Return ID of current process in PID namespace +*****************************************************************************/ +IMG_PID OSGetCurrentVirtualProcessID(void); + +/*************************************************************************/ /*! +@Function OSGetCurrentProcessName +@Description Gets the name of current process +@Return Process name +*****************************************************************************/ +IMG_CHAR *OSGetCurrentProcessName(void); + +/*************************************************************************/ /*! +@Function OSGetCurrentProcessVASpaceSize +@Description Returns the CPU virtual address space size of current process +@Return Process VA space size +*/ /**************************************************************************/ +IMG_UINT64 OSGetCurrentProcessVASpaceSize(void); + +/*************************************************************************/ /*! +@Function OSGetCurrentThreadID +@Description Returns ID for current thread +@Return ID of current thread +*****************************************************************************/ +uintptr_t OSGetCurrentThreadID(void); + +/*************************************************************************/ /*! +@Function OSGetCurrentClientProcessIDKM +@Description Returns ID of current client process (thread group) which + has made a bridge call into the server. + For some operating systems, this may simply be the current + process id. For others, it may be that a dedicated thread + is used to handle the processing of bridge calls and that + some additional processing is required to obtain the ID of + the client process making the bridge call. +@Return ID of current client process +*****************************************************************************/ +IMG_PID OSGetCurrentClientProcessIDKM(void); + +/*************************************************************************/ /*! +@Function OSGetCurrentClientProcessNameKM +@Description Gets the name of current client process +@Return Client process name +*****************************************************************************/ +IMG_CHAR *OSGetCurrentClientProcessNameKM(void); + +/*************************************************************************/ /*! +@Function OSGetCurrentClientThreadIDKM +@Description Returns ID for current client thread + For some operating systems, this may simply be the current + thread id. For others, it may be that a dedicated thread + is used to handle the processing of bridge calls and that + some additional processing is require to obtain the ID of + the client thread making the bridge call. +@Return ID of current client thread +*****************************************************************************/ +uintptr_t OSGetCurrentClientThreadIDKM(void); + +/*************************************************************************/ /*! +@Function OSMemCmp +@Description Compares two blocks of memory for equality. +@Input pvBufA Pointer to the first block of memory +@Input pvBufB Pointer to the second block of memory +@Input uiLen The number of bytes to be compared +@Return Value < 0 if pvBufA is less than pvBufB. + Value > 0 if pvBufB is less than pvBufA. + Value = 0 if pvBufA is equal to pvBufB. +*****************************************************************************/ +IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen); + +/*************************************************************************/ /*! +@Function OSPhyContigPagesAlloc +@Description Allocates a number of contiguous physical pages. + If allocations made by this function are CPU cached then + OSPhyContigPagesClean has to be implemented to write the + cached data to memory. +@Input psDevNode the device for which the allocation is + required +@Input uiSize the size of the required allocation (in bytes) +@Output psMemHandle a returned handle to be used to refer to this + allocation +@Output psDevPAddr the physical address of the allocation +@Input uiPid the process ID that this allocation should + be associated with +@Return PVRSRV_OK on success, a failure code otherwise. +*****************************************************************************/ +PVRSRV_ERROR OSPhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize, + PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, + IMG_PID uiPid); + +/*************************************************************************/ /*! +@Function OSPhyContigPagesFree +@Description Frees a previous allocation of contiguous physical pages +@Input psDevNode the device on which the allocation was made +@Input psMemHandle the handle of the allocation to be freed +@Return None. +*****************************************************************************/ +void OSPhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle); + +/*************************************************************************/ /*! +@Function OSPhyContigPagesMap +@Description Maps the specified allocation of contiguous physical pages + to a kernel virtual address +@Input psDevNode the device on which the allocation was made +@Input psMemHandle the handle of the allocation to be mapped +@Input uiSize the size of the allocation (in bytes) +@Input psDevPAddr the physical address of the allocation +@Output pvPtr the virtual kernel address to which the + allocation is now mapped +@Return PVRSRV_OK on success, a failure code otherwise. +*****************************************************************************/ +PVRSRV_ERROR OSPhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, + size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, + void **pvPtr); + +/*************************************************************************/ /*! +@Function OSPhyContigPagesUnmap +@Description Unmaps the kernel mapping for the specified allocation of + contiguous physical pages +@Input psDevNode the device on which the allocation was made +@Input psMemHandle the handle of the allocation to be unmapped +@Input pvPtr the virtual kernel address to which the + allocation is currently mapped +@Return None. +*****************************************************************************/ +void OSPhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, void *pvPtr); + +/*************************************************************************/ /*! +@Function OSPhyContigPagesClean +@Description Write the content of the specified allocation from CPU cache to + memory from (start + uiOffset) to (start + uiOffset + uiLength) + It is expected to be implemented as a cache clean operation but + it is allowed to fall back to a cache clean + invalidate + (i.e. flush). + If allocations returned by OSPhyContigPagesAlloc are always + uncached this can be implemented as nop. +@Input psDevNode device on which the allocation was made +@Input psMemHandle the handle of the allocation to be flushed +@Input uiOffset the offset in bytes from the start of the + allocation from where to start flushing +@Input uiLength the amount to flush from the offset in bytes +@Return PVRSRV_OK on success, a failure code otherwise. +*****************************************************************************/ +PVRSRV_ERROR OSPhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode, + PG_HANDLE *psMemHandle, + IMG_UINT32 uiOffset, + IMG_UINT32 uiLength); + + +/*************************************************************************/ /*! +@Function OSInitEnvData +@Description Called to initialise any environment-specific data. This + could include initialising the bridge calling infrastructure + or device memory management infrastructure. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSInitEnvData(void); + +/*************************************************************************/ /*! +@Function OSDeInitEnvData +@Description The counterpart to OSInitEnvData(). Called to free any + resources which may have been allocated by OSInitEnvData(). +@Return None. +*/ /**************************************************************************/ +void OSDeInitEnvData(void); + +/*************************************************************************/ /*! +@Function OSVSScanf +@Description OS function to support the standard C vsscanf() function. +*/ /**************************************************************************/ +IMG_UINT32 OSVSScanf(const IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...); + +/*************************************************************************/ /*! +@Function OSStringLCat +@Description OS function to support the BSD C strlcat() function. +*/ /**************************************************************************/ +size_t OSStringLCat(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDstSize); + +/*************************************************************************/ /*! +@Function OSSNPrintf +@Description OS function to support the standard C snprintf() function. +@Output pStr char array to print into +@Input ui32Size maximum size of data to write (chars) +@Input pszFormat format string +*/ /**************************************************************************/ +IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...) __printf(3, 4); + +/*************************************************************************/ /*! +@Function OSVSNPrintf +@Description Printf to IMG string using variable args (see stdarg.h). + This is necessary because the '...' notation does not + support nested function calls. +@Input ui32Size maximum size of data to write (chars) +@Input pszFormat format string +@Input vaArgs variable args structure (from stdarg.h) +@Output pStr char array to print into +@Return Number of character written in buffer if successful other wise -1 on error +*/ /**************************************************************************/ +IMG_INT32 OSVSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR* pszFormat, va_list vaArgs) __printf(3, 0); + +/*************************************************************************/ /*! +@Function OSStringLength +@Description OS function to support the standard C strlen() function. +*/ /**************************************************************************/ +size_t OSStringLength(const IMG_CHAR *pStr); + +/*************************************************************************/ /*! +@Function OSStringNLength +@Description Return the length of a string, excluding the terminating null + byte ('\0'), but return at most 'uiCount' bytes. Only the first + 'uiCount' bytes of 'pStr' are interrogated. +@Input pStr pointer to the string +@Input uiCount the maximum length to return +@Return Length of the string if less than 'uiCount' bytes, otherwise + 'uiCount'. +*/ /**************************************************************************/ +size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount); + +/*************************************************************************/ /*! +@Function OSStringNCompare +@Description OS function to support the standard C strncmp() function. +*/ /**************************************************************************/ +IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2, + size_t uiSize); + +/*************************************************************************/ /*! +@Function OSStringToUINT32 +@Description Changes string to IMG_UINT32. +*/ /**************************************************************************/ +PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base, + IMG_UINT32 *ui32Result); + +/*************************************************************************/ /*! +@Function OSStringUINT32ToStr +@Description Changes IMG_UINT32 to string +@Input pszBuf Buffer to write output number string +@Input uSize Size of buffer provided, i.e. size of pszBuf +@Input ui32Num Number to convert to string +@Return Returns 0 if buffer is not sufficient to hold the number string, + else returns length of number string +*/ /**************************************************************************/ +IMG_UINT32 OSStringUINT32ToStr(IMG_CHAR *pszBuf, size_t uSize, IMG_UINT32 ui32Num); + +/*************************************************************************/ /*! +@Function OSEventObjectCreate +@Description Create an event object. +@Input pszName name to assign to the new event object. +@Output EventObject the created event object. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, + IMG_HANDLE *EventObject); + +/*************************************************************************/ /*! +@Function OSEventObjectDestroy +@Description Destroy an event object. +@Input hEventObject the event object to destroy. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject); + +/*************************************************************************/ /*! +@Function OSEventObjectSignal +@Description Signal an event object. Any thread waiting on that event + object will be woken. +@Input hEventObject the event object to signal. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject); + +/*************************************************************************/ /*! +@Function OSEventObjectWait +@Description Wait for an event object to signal. The function is passed + an OS event object handle (which allows the OS to have the + calling thread wait on the associated event object). + The calling thread will be rescheduled when the associated + event object signals. + If the event object has not signalled after a default timeout + period (defined in EVENT_OBJECT_TIMEOUT_MS), the function + will return with the result code PVRSRV_ERROR_TIMEOUT. + + +@Input hOSEventKM the OS event object handle associated with + the event object. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM); + +/*************************************************************************/ /*! +@Function OSEventObjectWaitKernel +@Description Wait for an event object to signal. The function is passed + an OS event object handle (which allows the OS to have the + calling thread wait on the associated event object). + The calling thread will be rescheduled when the associated + event object signals. + If the event object has not signalled after a default timeout + period (defined in EVENT_OBJECT_TIMEOUT_MS), the function + will return with the result code PVRSRV_ERROR_TIMEOUT. + + Note: This function should be used only by kernel thread. + This is because all kernel threads are freezable and + this function allows the kernel to freeze the threads + when waiting. + + See OSEventObjectWait() for more details. + +@Input hOSEventKM the OS event object handle associated with + the event object. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +#if defined(__linux__) && defined(__KERNEL__) +PVRSRV_ERROR OSEventObjectWaitKernel(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus); +#else +#define OSEventObjectWaitKernel OSEventObjectWaitTimeout +#endif + +/*************************************************************************/ /*! +@Function OSSuspendTaskInterruptible +@Description Suspend the current task into interruptible state. +@Return none. +*/ /**************************************************************************/ +#if defined(__linux__) && defined(__KERNEL__) +void OSSuspendTaskInterruptible(void); +#endif + +/*************************************************************************/ /*! +@Function OSEventObjectWaitTimeout +@Description Wait for an event object to signal or timeout. The function + is passed an OS event object handle (which allows the OS to + have the calling thread wait on the associated event object). + The calling thread will be rescheduled when the associated + event object signals. + If the event object has not signalled after the specified + timeout period (passed in 'uiTimeoutus'), the function + will return with the result code PVRSRV_ERROR_TIMEOUT. +@Input hOSEventKM the OS event object handle associated with + the event object. +@Input uiTimeoutus the timeout period (in usecs) +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus); + +/*************************************************************************/ /*! +@Function OSEventObjectDumpDebugInfo +@Description Emits debug counters/stats related to the event object passed +@Input hOSEventKM the OS event object handle associated with + the event object. +@Return None. +*/ /**************************************************************************/ +void OSEventObjectDumpDebugInfo(IMG_HANDLE hOSEventKM); + +/*************************************************************************/ /*! +@Function OSEventObjectOpen +@Description Open an OS handle on the specified event object. + This OS handle may then be used to make a thread wait for + that event object to signal. +@Input hEventObject Event object handle. +@Output phOSEvent OS handle to the returned event object. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject, + IMG_HANDLE *phOSEvent); + +/*************************************************************************/ /*! +@Function OSEventObjectClose +@Description Close an OS handle previously opened for an event object. +@Input hOSEventKM OS event object handle to close. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM); + +/*************************************************************************/ /*! +@Function OSWaitus +@Description Implements a busy wait of the specified number of microseconds. + This function does NOT release thread quanta. +@Input ui32Timeus The duration of the wait period (in us) +@Return None. +*/ /**************************************************************************/ +void OSWaitus(IMG_UINT32 ui32Timeus); + +/*************************************************************************/ /*! +@Function OSSleepms +@Description Implements a sleep of the specified number of milliseconds. + This function may allow pre-emption, meaning the thread + may potentially not be rescheduled for a longer period. +@Input ui32Timems The duration of the sleep (in ms) +@Return None. +*/ /**************************************************************************/ +void OSSleepms(IMG_UINT32 ui32Timems); + +/*************************************************************************/ /*! +@Function OSReleaseThreadQuanta +@Description Relinquishes the current thread's execution time-slice, + permitting the OS scheduler to schedule another thread. +@Return None. +*/ /**************************************************************************/ +void OSReleaseThreadQuanta(void); + +#if defined(__linux__) && defined(__KERNEL__) +#define OSWriteMemoryBarrier() wmb() +#define OSReadMemoryBarrier() rmb() +#define OSMemoryBarrier() mb() +#else +/*************************************************************************/ /*! +@Function OSWriteMemoryBarrier +@Description Insert a write memory barrier. + The write memory barrier guarantees that all store operations + (writes) specified before the barrier will appear to happen + before all of the store operations specified after the barrier. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +void OSWriteMemoryBarrier(void); +/*************************************************************************/ /*! +@Function OSReadMemoryBarrier +@Description Insert a read memory barrier. + The read memory barrier guarantees that all load (read) + operations specified before the barrier will appear to happen + before all of the load operations specified after the barrier. +*/ /**************************************************************************/ +void OSReadMemoryBarrier(void); +/*************************************************************************/ /*! +@Function OSMemoryBarrier +@Description Insert a read/write memory barrier. + The read and write memory barrier guarantees that all load + (read) and all store (write) operations specified before the + barrier will appear to happen before all of the load/store + operations specified after the barrier. +@Return None. +*/ /**************************************************************************/ +void OSMemoryBarrier(void); +#endif + +/*************************************************************************/ /*! +*/ /**************************************************************************/ + +/* The access method is dependent on the location of the physical memory that + * makes up the PhyHeaps defined for the system and the CPU architecture. These + * macros may change in future to accommodate different access requirements. + */ +/*! Performs a 32 bit word read from the device memory. */ +#define OSReadDeviceMem32(addr) (*((volatile IMG_UINT32 __force *)(addr))) +/*! Performs a 32 bit word write to the device memory. */ +#define OSWriteDeviceMem32(addr, val) (*((volatile IMG_UINT32 __force *)(addr)) = (IMG_UINT32)(val)) +/*! Performs a 32 bit word write to the device memory and issues a write memory barrier */ +#define OSWriteDeviceMem32WithWMB(addr, val) \ + do { \ + *((volatile IMG_UINT32 __force *)(addr)) = (IMG_UINT32)(val); \ + OSWriteMemoryBarrier(); \ + } while (0) + +#if defined(__linux__) && defined(__KERNEL__) && !defined(NO_HARDWARE) + #define OSReadHWReg8(addr, off) ((IMG_UINT8)readb((IMG_BYTE __iomem *)(addr) + (off))) + #define OSReadHWReg16(addr, off) ((IMG_UINT16)readw((IMG_BYTE __iomem *)(addr) + (off))) + #define OSReadHWReg32(addr, off) ((IMG_UINT32)readl((IMG_BYTE __iomem *)(addr) + (off))) + + /* Little endian support only */ + #define OSReadHWReg64(addr, off) \ + ({ \ + __typeof__(addr) _addr = addr; \ + __typeof__(off) _off = off; \ + (IMG_UINT64) \ + ( \ + ( (IMG_UINT64)(readl((IMG_BYTE __iomem *)(_addr) + (_off) + 4)) << 32) \ + | readl((IMG_BYTE __iomem *)(_addr) + (_off)) \ + ); \ + }) + + #define OSWriteHWReg8(addr, off, val) writeb((IMG_UINT8)(val), (IMG_BYTE __iomem *)(addr) + (off)) + #define OSWriteHWReg16(addr, off, val) writew((IMG_UINT16)(val), (IMG_BYTE __iomem *)(addr) + (off)) + #define OSWriteHWReg32(addr, off, val) writel((IMG_UINT32)(val), (IMG_BYTE __iomem *)(addr) + (off)) + /* Little endian support only */ + #define OSWriteHWReg64(addr, off, val) do \ + { \ + __typeof__(addr) _addr = addr; \ + __typeof__(off) _off = off; \ + __typeof__(val) _val = val; \ + writel((IMG_UINT32)((_val) & 0xffffffff), (IMG_BYTE __iomem *)(_addr) + (_off)); \ + writel((IMG_UINT32)(((IMG_UINT64)(_val) >> 32) & 0xffffffff), (IMG_BYTE __iomem *)(_addr) + (_off) + 4); \ + } while (0) + + +#elif defined(NO_HARDWARE) + /* OSReadHWReg operations skipped in no hardware builds */ + #define OSReadHWReg8(addr, off) (0x4eU) + #define OSReadHWReg16(addr, off) (0x3a4eU) + #define OSReadHWReg32(addr, off) (0x30f73a4eU) +#if defined(__QNXNTO__) && __SIZEOF_LONG__ == 8 + /* This is needed for 64-bit QNX builds where the size of a long is 64 bits */ + #define OSReadHWReg64(addr, off) (0x5b376c9d30f73a4eUL) +#else + #define OSReadHWReg64(addr, off) (0x5b376c9d30f73a4eULL) +#endif + + #define OSWriteHWReg8(addr, off, val) + #define OSWriteHWReg16(addr, off, val) + #define OSWriteHWReg32(addr, off, val) + #define OSWriteHWReg64(addr, off, val) + +#else +/*************************************************************************/ /*! +@Function OSReadHWReg8 +@Description Read from an 8-bit memory-mapped device register. + The implementation should not permit the compiler to + reorder the I/O sequence. + The implementation should ensure that for a NO_HARDWARE + build the code does not attempt to read from a location + but instead returns a constant value. +@Input pvLinRegBaseAddr The virtual base address of the register + block. +@Input ui32Offset The byte offset from the base address of + the register to be read. +@Return The byte read. +*/ /**************************************************************************/ + IMG_UINT8 OSReadHWReg8(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); + +/*************************************************************************/ /*! +@Function OSReadHWReg16 +@Description Read from a 16-bit memory-mapped device register. + The implementation should not permit the compiler to + reorder the I/O sequence. + The implementation should ensure that for a NO_HARDWARE + build the code does not attempt to read from a location + but instead returns a constant value. +@Input pvLinRegBaseAddr The virtual base address of the register + block. +@Input ui32Offset The byte offset from the base address of + the register to be read. +@Return The word read. +*/ /**************************************************************************/ + IMG_UINT16 OSReadHWReg16(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); + +/*************************************************************************/ /*! +@Function OSReadHWReg32 +@Description Read from a 32-bit memory-mapped device register. + The implementation should not permit the compiler to + reorder the I/O sequence. + The implementation should ensure that for a NO_HARDWARE + build the code does not attempt to read from a location + but instead returns a constant value. +@Input pvLinRegBaseAddr The virtual base address of the register + block. +@Input ui32Offset The byte offset from the base address of + the register to be read. +@Return The long word read. +*/ /**************************************************************************/ + IMG_UINT32 OSReadHWReg32(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); + +/*************************************************************************/ /*! +@Function OSReadHWReg64 +@Description Read from a 64-bit memory-mapped device register. + The implementation should not permit the compiler to + reorder the I/O sequence. + The implementation should ensure that for a NO_HARDWARE + build the code does not attempt to read from a location + but instead returns a constant value. +@Input pvLinRegBaseAddr The virtual base address of the register + block. +@Input ui32Offset The byte offset from the base address of + the register to be read. +@Return The long long word read. +*/ /**************************************************************************/ + IMG_UINT64 OSReadHWReg64(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); + +/*************************************************************************/ /*! +@Function OSWriteHWReg8 +@Description Write to an 8-bit memory-mapped device register. + The implementation should not permit the compiler to + reorder the I/O sequence. + The implementation should ensure that for a NO_HARDWARE + build the code does not attempt to write to a location. +@Input pvLinRegBaseAddr The virtual base address of the register + block. +@Input ui32Offset The byte offset from the base address of + the register to be written to. +@Input ui8Value The byte to be written to the register. +@Return None. +*/ /**************************************************************************/ + void OSWriteHWReg8(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT8 ui8Value); + +/*************************************************************************/ /*! +@Function OSWriteHWReg16 +@Description Write to a 16-bit memory-mapped device register. + The implementation should not permit the compiler to + reorder the I/O sequence. + The implementation should ensure that for a NO_HARDWARE + build the code does not attempt to write to a location. +@Input pvLinRegBaseAddr The virtual base address of the register + block. +@Input ui32Offset The byte offset from the base address of + the register to be written to. +@Input ui16Value The word to be written to the register. +@Return None. +*/ /**************************************************************************/ + void OSWriteHWReg16(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT16 ui16Value); + +/*************************************************************************/ /*! +@Function OSWriteHWReg32 +@Description Write to a 32-bit memory-mapped device register. + The implementation should not permit the compiler to + reorder the I/O sequence. + The implementation should ensure that for a NO_HARDWARE + build the code does not attempt to write to a location. +@Input pvLinRegBaseAddr The virtual base address of the register + block. +@Input ui32Offset The byte offset from the base address of + the register to be written to. +@Input ui32Value The long word to be written to the register. +@Return None. +*/ /**************************************************************************/ + void OSWriteHWReg32(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value); + +/*************************************************************************/ /*! +@Function OSWriteHWReg64 +@Description Write to a 64-bit memory-mapped device register. + The implementation should not permit the compiler to + reorder the I/O sequence. + The implementation should ensure that for a NO_HARDWARE + build the code does not attempt to write to a location. +@Input pvLinRegBaseAddr The virtual base address of the register + block. +@Input ui32Offset The byte offset from the base address of + the register to be written to. +@Input ui64Value The long long word to be written to the + register. +@Return None. +*/ /**************************************************************************/ + void OSWriteHWReg64(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT64 ui64Value); +#endif + +/*************************************************************************/ /*! +@Description Pointer to a timer callback function. +@Input pvData Pointer to timer specific data. +*/ /**************************************************************************/ +typedef void (*PFN_TIMER_FUNC)(void* pvData); + +/*************************************************************************/ /*! +@Function OSAddTimer +@Description OS specific function to install a timer callback. The + timer will then need to be enabled, as it is disabled by + default. + When enabled, the callback will be invoked once the specified + timeout has elapsed. +@Input pfnTimerFunc Timer callback +@Input *pvData Callback data +@Input ui32MsTimeout Callback period +@Return Valid handle on success, NULL if a failure +*/ /**************************************************************************/ +IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout); + +/*************************************************************************/ /*! +@Function OSRemoveTimer +@Description Removes the specified timer. The handle becomes invalid and + should no longer be used. +@Input hTimer handle of the timer to be removed +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSRemoveTimer(IMG_HANDLE hTimer); + +/*************************************************************************/ /*! +@Function OSEnableTimer +@Description Enable the specified timer. after enabling, the timer will + invoke the associated callback at an interval determined by + the configured timeout period until disabled. +@Input hTimer handle of the timer to be enabled +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSEnableTimer(IMG_HANDLE hTimer); + +/*************************************************************************/ /*! +@Function OSDisableTimer +@Description Disable the specified timer +@Input hTimer handle of the timer to be disabled +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSDisableTimer(IMG_HANDLE hTimer); + + +/*************************************************************************/ /*! + @Function OSPanic + @Description Take action in response to an unrecoverable driver error + @Return None +*/ /**************************************************************************/ +void OSPanic(void); + +/*************************************************************************/ /*! +@Function OSCopyToUser +@Description Copy data to user-addressable memory from kernel-addressable + memory. + Note that pvDest may be an invalid address or NULL and the + function should return an error in this case. + For operating systems that do not have a user/kernel space + distinction, this function should be implemented as a stub + which simply returns PVRSRV_ERROR_NOT_SUPPORTED. +@Input pvProcess handle of the connection +@Input pvDest pointer to the destination User memory +@Input pvSrc pointer to the source Kernel memory +@Input ui32Bytes size of the data to be copied +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSCopyToUser(void *pvProcess, void __user *pvDest, const void *pvSrc, size_t ui32Bytes); + +/*************************************************************************/ /*! +@Function OSCopyFromUser +@Description Copy data from user-addressable memory to kernel-addressable + memory. + Note that pvSrc may be an invalid address or NULL and the + function should return an error in this case. + For operating systems that do not have a user/kernel space + distinction, this function should be implemented as a stub + which simply returns PVRSRV_ERROR_NOT_SUPPORTED. +@Input pvProcess handle of the connection +@Input pvDest pointer to the destination Kernel memory +@Input pvSrc pointer to the source User memory +@Input ui32Bytes size of the data to be copied +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSCopyFromUser(void *pvProcess, void *pvDest, const void __user *pvSrc, size_t ui32Bytes); + +#if defined(__linux__) || defined(INTEGRITY_OS) +#define OSBridgeCopyFromUser OSCopyFromUser +#define OSBridgeCopyToUser OSCopyToUser +#else +/*************************************************************************/ /*! +@Function OSBridgeCopyFromUser +@Description Copy data from user-addressable memory into kernel-addressable + memory as part of a bridge call operation. + For operating systems that do not have a user/kernel space + distinction, this function will require whatever implementation + is needed to pass data for making the bridge function call. + For operating systems which do have a user/kernel space + distinction (such as Linux) this function may be defined so + as to equate to a call to OSCopyFromUser(). +@Input pvProcess handle of the connection +@Input pvDest pointer to the destination Kernel memory +@Input pvSrc pointer to the source User memory +@Input ui32Bytes size of the data to be copied +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSBridgeCopyFromUser (void *pvProcess, + void *pvDest, + const void *pvSrc, + size_t ui32Bytes); + +/*************************************************************************/ /*! +@Function OSBridgeCopyToUser +@Description Copy data to user-addressable memory from kernel-addressable + memory as part of a bridge call operation. + For operating systems that do not have a user/kernel space + distinction, this function will require whatever implementation + is needed to pass data for making the bridge function call. + For operating systems which do have a user/kernel space + distinction (such as Linux) this function may be defined so + as to equate to a call to OSCopyToUser(). +@Input pvProcess handle of the connection +@Input pvDest pointer to the destination User memory +@Input pvSrc pointer to the source Kernel memory +@Input ui32Bytes size of the data to be copied +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSBridgeCopyToUser (void *pvProcess, + void *pvDest, + const void *pvSrc, + size_t ui32Bytes); +#endif + +/* To be increased if required in future */ +#define PVRSRV_MAX_BRIDGE_IN_SIZE 0x2000 /*!< Size of the memory block used to hold data passed in to a bridge call */ +#define PVRSRV_MAX_BRIDGE_OUT_SIZE 0x1000 /*!< Size of the memory block used to hold data returned from a bridge call */ + +/*************************************************************************/ /*! +@Function OSPlatformBridgeInit +@Description Called during device creation to allow the OS port to register + other bridge modules and related resources that it requires. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSPlatformBridgeInit(void); + +/*************************************************************************/ /*! +@Function OSPlatformBridgeDeInit +@Description Called during device destruction to allow the OS port to + deregister its OS specific bridges and clean up other + related resources. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSPlatformBridgeDeInit(void); + +/*************************************************************************/ /*! +@Function PVRSRVToNativeError +@Description Returns the OS-specific equivalent error number/code for + the specified PVRSRV_ERROR value. + If there is no equivalent, or the PVRSRV_ERROR value is + PVRSRV_OK (no error), 0 is returned. +@Return The OS equivalent error code. +*/ /**************************************************************************/ +int PVRSRVToNativeError(PVRSRV_ERROR e); +/** See PVRSRVToNativeError(). */ +#define OSPVRSRVToNativeError(e) ( (PVRSRV_OK == e)? 0: PVRSRVToNativeError(e) ) + + +#if defined(__linux__) && defined(__KERNEL__) + +/* Provide LockDep friendly definitions for Services RW locks */ +#include +#include +#include "allocmem.h" + +#define OSWRLockCreate(ppsLock) ({ \ + PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ + *(ppsLock) = OSAllocMem(sizeof(struct rw_semaphore)); \ + if (*(ppsLock)) { init_rwsem(*(ppsLock)); e = PVRSRV_OK; }; \ + e;}) +#define OSWRLockDestroy(psLock) ({OSFreeMem(psLock); PVRSRV_OK;}) + +#define OSWRLockAcquireRead(psLock) ({down_read(psLock); PVRSRV_OK;}) +#define OSWRLockReleaseRead(psLock) ({up_read(psLock); PVRSRV_OK;}) +#define OSWRLockAcquireWrite(psLock) ({down_write(psLock); PVRSRV_OK;}) +#define OSWRLockReleaseWrite(psLock) ({up_write(psLock); PVRSRV_OK;}) + +#elif defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) +/* User-mode unit tests use these definitions on Linux */ + +PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock); +void OSWRLockDestroy(POSWR_LOCK psLock); +void OSWRLockAcquireRead(POSWR_LOCK psLock); +void OSWRLockReleaseRead(POSWR_LOCK psLock); +void OSWRLockAcquireWrite(POSWR_LOCK psLock); +void OSWRLockReleaseWrite(POSWR_LOCK psLock); + +#else + +/*************************************************************************/ /*! +@Function OSWRLockCreate +@Description Create a writer/reader lock. + This type of lock allows multiple concurrent readers but + only a single writer, allowing for optimized performance. +@Output ppsLock A handle to the created WR lock. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +static INLINE PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock) +{ + PVR_UNREFERENCED_PARAMETER(ppsLock); + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSWRLockDestroy +@Description Destroys a writer/reader lock. +@Input psLock The handle of the WR lock to be destroyed. +@Return None. +*/ /**************************************************************************/ +static INLINE void OSWRLockDestroy(POSWR_LOCK psLock) +{ + PVR_UNREFERENCED_PARAMETER(psLock); +} + +/*************************************************************************/ /*! +@Function OSWRLockAcquireRead +@Description Acquire a writer/reader read lock. + If the write lock is already acquired, the caller will + block until it is released. +@Input psLock The handle of the WR lock to be acquired for + reading. +@Return None. +*/ /**************************************************************************/ +static INLINE void OSWRLockAcquireRead(POSWR_LOCK psLock) +{ + PVR_UNREFERENCED_PARAMETER(psLock); +} + +/*************************************************************************/ /*! +@Function OSWRLockReleaseRead +@Description Release a writer/reader read lock. +@Input psLock The handle of the WR lock whose read lock is to + be released. +@Return None. +*/ /**************************************************************************/ +static INLINE void OSWRLockReleaseRead(POSWR_LOCK psLock) +{ + PVR_UNREFERENCED_PARAMETER(psLock); +} + +/*************************************************************************/ /*! +@Function OSWRLockAcquireWrite +@Description Acquire a writer/reader write lock. + If the write lock or any read lock are already acquired, + the caller will block until all are released. +@Input psLock The handle of the WR lock to be acquired for + writing. +@Return None. +*/ /**************************************************************************/ +static INLINE void OSWRLockAcquireWrite(POSWR_LOCK psLock) +{ + PVR_UNREFERENCED_PARAMETER(psLock); +} + +/*************************************************************************/ /*! +@Function OSWRLockReleaseWrite +@Description Release a writer/reader write lock. +@Input psLock The handle of the WR lock whose write lock is to + be released. +@Return None +*/ /**************************************************************************/ +static INLINE void OSWRLockReleaseWrite(POSWR_LOCK psLock) +{ + PVR_UNREFERENCED_PARAMETER(psLock); +} +#endif + +/*************************************************************************/ /*! +@Function OSDivide64r64 +@Description Divide a 64-bit value by a 32-bit value. Return the 64-bit + quotient. + The remainder is also returned in 'pui32Remainder'. +@Input ui64Divident The number to be divided. +@Input ui32Divisor The 32-bit value 'ui64Divident' is to + be divided by. +@Output pui32Remainder The remainder of the division. +@Return The 64-bit quotient (result of the division). +*/ /**************************************************************************/ +IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder); + +/*************************************************************************/ /*! +@Function OSDivide64 +@Description Divide a 64-bit value by a 32-bit value. Return a 32-bit + quotient. + The remainder is also returned in 'pui32Remainder'. + This function allows for a more optional implementation + of a 64-bit division when the result is known to be + representable in 32-bits. +@Input ui64Divident The number to be divided. +@Input ui32Divisor The 32-bit value 'ui64Divident' is to + be divided by. +@Output pui32Remainder The remainder of the division. +@Return The 32-bit quotient (result of the division). +*/ /**************************************************************************/ +IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder); + +/*************************************************************************/ /*! +@Function OSDumpStack +@Description Dump the current task information and its stack trace. +@Return None +*/ /**************************************************************************/ +void OSDumpStack(void); + +/*************************************************************************/ /*! +@Function OSUserModeAccessToPerfCountersEn +@Description Permit User-mode access to CPU performance counter + registers. + This function is called during device initialisation. + Certain CPU architectures may need to explicitly permit + User mode access to performance counters - if this is + required, the necessary code should be implemented inside + this function. +@Return None. +*/ /**************************************************************************/ +void OSUserModeAccessToPerfCountersEn(void); + +/*************************************************************************/ /*! +@Function OSDebugSignalPID +@Description Sends a SIGTRAP signal to a specific PID in user mode for + debugging purposes. The user mode process can register a handler + against this signal. + This is necessary to support the Rogue debugger. If the Rogue + debugger is not used then this function may be implemented as + a stub. +@Input ui32PID The PID for the signal. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID); + +#if defined(__linux__) && defined(__KERNEL__) && !defined(DOXYGEN) +#define OSWarnOn(a) WARN_ON(a) +#else +/*************************************************************************/ /*! +@Function OSWarnOn +@Description This API allows the driver to emit a special token and stack + dump to the server log when an issue is detected that needs the + OS to be notified. The token or call may be used to trigger + log collection by the OS environment. + PVR_DPF log messages will have been emitted prior to this call. +@Input a Expression to evaluate, if true trigger Warn signal +@Return None +*/ /**************************************************************************/ +#define OSWarnOn(a) do { if ((a)) { OSDumpStack(); } } while (0) +#endif + +/*************************************************************************/ /*! +@Function OSIsKernelThread +@Description This API determines if the current running thread is a kernel + thread (i.e. one not associated with any userland process, + typically an MISR handler.) +@Return IMG_TRUE if it is a kernel thread, otherwise IMG_FALSE. +*/ /**************************************************************************/ +IMG_BOOL OSIsKernelThread(void); + +/*************************************************************************/ /*! +@Function OSThreadDumpInfo +@Description Traverse the thread list and call each of the stored + callbacks to dump the info in debug_dump. +@Input pfnDumpDebugPrintf The 'printf' function to be called to + display the debug info +@Input pvDumpDebugFile Optional file identifier to be passed to + the 'printf' function if required +*/ /**************************************************************************/ +void OSThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +/*************************************************************************/ /*! +@Function OSDumpVersionInfo +@Description Store OS version information in debug dump. +@Input pfnDumpDebugPrintf The 'printf' function to be called to + display the debug info +@Input pvDumpDebugFile Optional file identifier to be passed to + the 'printf' function if required +*/ /**************************************************************************/ +void OSDumpVersionInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +/*************************************************************************/ /*! +@Function OSIsWriteCombineUnalignedSafe +@Description Determine if unaligned accesses to write-combine memory are + safe to perform, i.e. whether we are safe from a CPU fault + occurring. This test is specifically aimed at ARM64 platforms + which cannot provide this guarantee if the memory is 'device' + memory rather than 'normal' under the ARM memory architecture. +@Return IMG_TRUE if safe, IMG_FALSE otherwise. +*/ /**************************************************************************/ +IMG_BOOL OSIsWriteCombineUnalignedSafe(void); + +/*************************************************************************/ /*! +@Function OSDebugLevel +@Description Returns current value of the debug level. +@Return Debug level. +*/ /**************************************************************************/ +IMG_UINT32 OSDebugLevel(void); + +/*************************************************************************/ /*! +@Function PVRSRVSetDebugLevel +@Description Sets the current value of the debug level to ui32DebugLevel. +@Input ui32DebugLevel New debug level value. +*/ /**************************************************************************/ +void OSSetDebugLevel(IMG_UINT32 ui32DebugLevel); + +/*************************************************************************/ /*! +@Function PVRSRVIsDebugLevel +@Description Tests if a given debug level is enabled. +@Input ui32DebugLevel IMG_TRUE if debug level is enabled + and IMG_FALSE otherwise. +*/ /**************************************************************************/ +IMG_BOOL OSIsDebugLevel(IMG_UINT32 ui32DebugLevel); + +#if defined(SUPPORT_DMA_TRANSFER) + +typedef void (*PFN_SERVER_CLEANUP)(void *pvData, IMG_BOOL bAdvanceTimeline); + +#define DMA_COMPLETION_TIMEOUT_MS 60000 +#define DMA_ERROR_SYNC_RETRIES 100 + +PVRSRV_ERROR OSDmaPrepareTransfer(PVRSRV_DEVICE_NODE *psDevNode, void *psChan, + IMG_DMA_ADDR* psDmaAddr, IMG_UINT64* puiAddress, + IMG_UINT64 uiSize, IMG_BOOL bMemToDev, + IMG_HANDLE pvOSData, + IMG_HANDLE pvServerCleanupParam,PFN_SERVER_CLEANUP pfnServerCleanup, IMG_BOOL bFirst); + +PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, IMG_HANDLE pvChan, + IMG_DMA_ADDR* psDmaAddr, IMG_BOOL *pbValid, + IMG_UINT64* puiAddress, IMG_UINT64 uiSize, + IMG_UINT32 uiOffsetInPage, + IMG_UINT32 ui32SizeInPages, + IMG_BOOL bMemToDev, + IMG_HANDLE pvOSData, + IMG_HANDLE pvServerCleanupParam, PFN_SERVER_CLEANUP pfnServerCleanup, + IMG_BOOL bFirst); + +PVRSRV_ERROR OSDmaAllocData(PVRSRV_DEVICE_NODE *psDevNode,IMG_UINT32 uiNumDMA, void **pvAllocedData); +PVRSRV_ERROR OSDmaSubmitTransfer(PVRSRV_DEVICE_NODE *psDevNode, void *pvOSData, void *psChan, IMG_BOOL bSynchronous); +void OSDmaForceCleanup(PVRSRV_DEVICE_NODE *psDevNode, void *pvChan, + void *pvOSData, IMG_HANDLE pvServerCleanupParam, + PFN_SERVER_CLEANUP pfnServerCleanup); +#endif +#endif /* OSFUNC_H */ + +/****************************************************************************** + End of file (osfunc.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/osfunc_arm.c b/drivers/gpu/drm/phytium/octopus/osfunc_arm.c new file mode 100644 index 000000000000..851b2d2b9390 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/osfunc_arm.c @@ -0,0 +1,151 @@ +/*************************************************************************/ /*! +@File +@Title arm specific OS functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Processor specific OS functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include +#include +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)) + #include +#endif +#include + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "img_defs.h" +#include "osfunc.h" +#include "pvr_debug.h" + + +static inline size_t pvr_dmac_range_len(const void *pvStart, const void *pvEnd) +{ + return (size_t)((char *)pvEnd - (char *)pvStart); +} + +void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) + arm_dma_ops.sync_single_for_device(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE); + arm_dma_ops.sync_single_for_cpu(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE); +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ + /* Inner cache */ + dmac_flush_range(pvVirtStart, pvVirtEnd); + + /* Outer cache */ + outer_flush_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ +} + +void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) + arm_dma_ops.sync_single_for_device(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE); +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ + /* Inner cache */ + dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_TO_DEVICE); + + /* Outer cache */ + outer_clean_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ +} + +void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) + arm_dma_ops.sync_single_for_cpu(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE); +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ + /* Inner cache */ + dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_FROM_DEVICE); + + /* Outer cache */ + outer_inv_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ +} + +OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) + return OS_CACHE_OP_ADDR_TYPE_PHYSICAL; +#else + return OS_CACHE_OP_ADDR_TYPE_BOTH; +#endif +} + +/* User Enable Register */ +#define PMUSERENR_EN 0x00000001 /* enable user access to the counters */ + +static void per_cpu_perf_counter_user_access_en(void *data) +{ + PVR_UNREFERENCED_PARAMETER(data); + /* Enable user-mode access to counters. */ + asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(PMUSERENR_EN)); +} + +void OSUserModeAccessToPerfCountersEn(void) +{ + on_each_cpu(per_cpu_perf_counter_user_access_en, NULL, 1); +} + +IMG_BOOL OSIsWriteCombineUnalignedSafe(void) +{ + /* + * The kernel looks to have always used normal memory under ARM32. + * See osfunc_arm64.c implementation for more details. + */ + return IMG_TRUE; +} diff --git a/drivers/gpu/drm/phytium/octopus/osfunc_arm64.c b/drivers/gpu/drm/phytium/octopus/osfunc_arm64.c new file mode 100644 index 000000000000..c4a95ceaf0e4 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/osfunc_arm64.c @@ -0,0 +1,288 @@ +/*************************************************************************/ /*! +@File +@Title arm64 specific OS functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Processor specific OS functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include +#include +#include +#include +#include + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "img_defs.h" +#include "osfunc.h" +#include "pvr_debug.h" + +#if defined(CONFIG_OUTER_CACHE) + /* If you encounter a 64-bit ARM system with an outer cache, you'll need + * to add the necessary code to manage that cache. See osfunc_arm.c + * for an example of how to do so. + */ + #error "CONFIG_OUTER_CACHE not supported on arm64." +#endif + +static inline void begin_user_mode_access(void) +{ +#if defined(CONFIG_ARM64) && defined(CONFIG_ARM64_SW_TTBR0_PAN) + uaccess_enable(); +#endif +} + +static inline void end_user_mode_access(void) +{ +#if defined(CONFIG_ARM64) && defined(CONFIG_ARM64_SW_TTBR0_PAN) + uaccess_disable(); +#endif +} + +static inline void FlushRange(void *pvRangeAddrStart, + void *pvRangeAddrEnd, + PVRSRV_CACHE_OP eCacheOp) +{ + IMG_UINT32 ui32CacheLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); + IMG_BYTE *pbStart = pvRangeAddrStart; + IMG_BYTE *pbEnd = pvRangeAddrEnd; + IMG_BYTE *pbBase; + + /* + On arm64, the TRM states in D5.8.1 (data and unified caches) that if cache + maintenance is performed on a memory location using a VA, the effect of + that cache maintenance is visible to all VA aliases of the physical memory + location. So here it's quicker to issue the machine cache maintenance + instruction directly without going via the Linux kernel DMA framework as + this is sufficient to maintain the CPU d-caches on arm64. + */ + + begin_user_mode_access(); + + pbEnd = (IMG_BYTE *) PVR_ALIGN((uintptr_t)pbEnd, (uintptr_t)ui32CacheLineSize); + for (pbBase = pbStart; pbBase < pbEnd; pbBase += ui32CacheLineSize) + { + switch (eCacheOp) + { + case PVRSRV_CACHE_OP_CLEAN: + asm volatile ("dc cvac, %0" :: "r" (pbBase)); + break; + + case PVRSRV_CACHE_OP_INVALIDATE: + asm volatile ("dc ivac, %0" :: "r" (pbBase)); + break; + + case PVRSRV_CACHE_OP_FLUSH: + asm volatile ("dc civac, %0" :: "r" (pbBase)); + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Cache maintenance operation type %d is invalid", + __func__, eCacheOp)); + break; + } + } + + end_user_mode_access(); +} + +void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + struct device *dev; + + if (pvVirtStart) + { + FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_FLUSH); + return; + } + + dev = psDevNode->psDevConfig->pvOSDevice; + + if (dev) + { + dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr, + sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, + DMA_TO_DEVICE); + dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, + sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, + DMA_FROM_DEVICE); + } + else + { + /* + * Allocations done prior to obtaining device pointer may + * affect in cache operations being scheduled. + * + * Ignore operations with null device pointer. + * This prevents crashes on newer kernels that don't return dummy ops + * when null pointer is passed to get_dma_ops. + * + */ + + /* Don't spam on nohw */ +#if !defined(NO_HARDWARE) + PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!")); +#endif + } + +} + +void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + struct device *dev; + + if (pvVirtStart) + { + FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_CLEAN); + return; + } + + dev = psDevNode->psDevConfig->pvOSDevice; + + if (dev) + { + dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr, + sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, + DMA_TO_DEVICE); + } + else + { + /* + * Allocations done prior to obtaining device pointer may + * affect in cache operations being scheduled. + * + * Ignore operations with null device pointer. + * This prevents crashes on newer kernels that don't return dummy ops + * when null pointer is passed to get_dma_ops. + * + */ + + + /* Don't spam on nohw */ +#if !defined(NO_HARDWARE) + PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!")); +#endif + } + +} + +void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + struct device *dev; + + if (pvVirtStart) + { + FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_INVALIDATE); + return; + } + + dev = psDevNode->psDevConfig->pvOSDevice; + + if (dev) + { + dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, + sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, + DMA_FROM_DEVICE); + } + else + { + /* + * Allocations done prior to obtaining device pointer may + * affect in cache operations being scheduled. + * + * Ignore operations with null device pointer. + * This prevents crashes on newer kernels that don't return dummy ops + * when null pointer is passed to get_dma_ops. + * + */ + + /* Don't spam on nohw */ +#if !defined(NO_HARDWARE) + PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!")); +#endif + } +} + + +OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) +{ + return OS_CACHE_OP_ADDR_TYPE_PHYSICAL; +} + +void OSUserModeAccessToPerfCountersEn(void) +{ +} + +IMG_BOOL OSIsWriteCombineUnalignedSafe(void) +{ + /* + * Under ARM64 there is the concept of 'device' [0] and 'normal' [1] memory. + * Unaligned access on device memory is explicitly disallowed [2]: + * + * 'Further, unaligned accesses are only allowed to regions marked as Normal + * memory type. + * ... + * Attempts to perform unaligned accesses when not allowed will cause an + * alignment fault (data abort).' + * + * Write-combine on ARM64 can be implemented as either normal non-cached + * memory (NORMAL_NC) or as device memory with gathering enabled + * (DEVICE_GRE.) Kernel 3.13 changed this from the latter to the former. + * + * [0]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/CHDBDIDF.html + * [1]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/ch13s01s01.html + * [2]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html + */ + + pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL); + + return (pgprot_val(pgprot) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_NC); +} diff --git a/drivers/gpu/drm/phytium/octopus/osfunc_common.h b/drivers/gpu/drm/phytium/octopus/osfunc_common.h new file mode 100644 index 000000000000..538c21c003c0 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/osfunc_common.h @@ -0,0 +1,266 @@ +/*************************************************************************/ /*! +@File +@Title OS functions header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description OS specific API definitions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef OSFUNC_COMMON_H +/*! @cond Doxygen_Suppress */ +#define OSFUNC_COMMON_H +/*! @endcond */ + +#if defined(__KERNEL__) && defined(__linux__) +#include +#else +#include +#endif + +#include "img_types.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + +/**************************************************************************/ /*! +@Function DeviceMemSet +@Description Set memory, whose mapping may be uncached, to a given value. + Safe implementation for all architectures for uncached mapping, + optimised for speed where supported by tool chains. + In such cases, OSDeviceMemSet() is defined as a call to this + function. +@Input pvDest void pointer to the memory to be set +@Input ui8Value byte containing the value to be set +@Input ui32Size the number of bytes to be set to the given value +@Return None + */ /**************************************************************************/ +void DeviceMemSet(void *pvDest, IMG_UINT8 ui8Value, size_t ui32Size); + +/**************************************************************************/ /*! +@Function DeviceMemCopy +@Description Copy values from one area of memory. Safe implementation for + all architectures for uncached mapping, of either the source + or destination, optimised for speed where supported by tool + chains. In such cases, OSDeviceMemCopy() is defined as a call + to this function. +@Input pvDst void pointer to the destination memory +@Input pvSrc void pointer to the source memory +@Input ui32Size the number of bytes to be copied +@Return None + */ /**************************************************************************/ +void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t ui32Size); + +/**************************************************************************/ /*! +@Function DeviceMemSetBytes +@Description Potentially very slow (but safe) memset fallback for non-GNU C + compilers for arm64/aarch64 +@Input pvDest void pointer to the memory to be set +@Input ui8Value byte containing the value to be set +@Input ui32Size the number of bytes to be set to the given value +@Return None + */ /**************************************************************************/ +void DeviceMemSetBytes(void *pvDest, IMG_UINT8 ui8Value, size_t ui32Size); + +/**************************************************************************/ /*! +@Function DeviceMemCopyBytes +@Description Potentially very slow (but safe) memcpy fallback for non-GNU C + compilers for arm64/aarch64 +@Input pvDst void pointer to the destination memory +@Input pvSrc void pointer to the source memory +@Input ui32Size the number of bytes to be copied +@Return None + */ /**************************************************************************/ +void DeviceMemCopyBytes(void *pvDst, const void *pvSrc, size_t ui32Size); + +/**************************************************************************/ /*! +@Function StringLCopy +@Description Copy at most uDataSize-1 bytes from pszSrc to pszDest. + If no null byte ('\0') is contained within the first uDataSize-1 + characters of the source string, the destination string will be + truncated. If the length of the source string is less than uDataSize + an additional NUL byte will be copied to the destination string + to ensure that the string is NUL-terminated. +@Input pszDest char pointer to the destination string +@Input pszSrc const char pointer to the source string +@Input uDataSize the maximum number of bytes to be copied +@Return Size of the source string + */ /**************************************************************************/ +size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize); + +#if defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY) +#if defined(__GNUC__) +/* Workarounds for assumptions made that memory will not be mapped uncached + * in kernel or user address spaces on arm64 platforms (or other testing). + */ + +#define OSDeviceMemSet(a,b,c) DeviceMemSet((a), (b), (c)) +#define OSDeviceMemCopy(a,b,c) DeviceMemCopy((a), (b), (c)) + +#else /* defined __GNUC__ */ + +#define OSDeviceMemSet(a,b,c) DeviceMemSetBytes((a), (b), (c)) +#define OSDeviceMemCopy(a,b,c) DeviceMemCopyBytes((a), (b), (c)) + +#endif /* defined __GNUC__ */ + +#else /* (defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */ + +/* Everything else */ + +/**************************************************************************/ /*! +@Function OSDeviceMemSet +@Description Set memory, whose mapping may be uncached, to a given value. + On some architectures, additional processing may be needed + if the mapping is uncached. +@Input a void pointer to the memory to be set +@Input b byte containing the value to be set +@Input c the number of bytes to be set to the given value +@Return Pointer to the destination memory. + */ /**************************************************************************/ +#define OSDeviceMemSet(a,b,c) memset((a), (b), (c)) + +/**************************************************************************/ /*! +@Function OSDeviceMemCopy +@Description Copy values from one area of memory, to another, when one + or both mappings may be uncached. + On some architectures, additional processing may be needed + if mappings are uncached. +@Input a void pointer to the destination memory +@Input b void pointer to the source memory +@Input c the number of bytes to be copied +@Return Pointer to the destination memory. + */ /**************************************************************************/ +#define OSDeviceMemCopy(a,b,c) memcpy((a), (b), (c)) + +#endif /* (defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */ + +/**************************************************************************/ /*! +@Function OSCachedMemSet +@Description Set memory, where the mapping is known to be cached, to a + given value. This function exists to allow an optimal memset + to be performed when memory is known to be cached. +@Input a void pointer to the memory to be set +@Input b byte containing the value to be set +@Input c the number of bytes to be set to the given value +@Return Pointer to the destination memory. + */ /**************************************************************************/ +#define OSCachedMemSet(a,b,c) memset((a), (b), (c)) + +/**************************************************************************/ /*! +@Function OSCachedMemCopy +@Description Copy values from one area of memory, to another, when both + mappings are known to be cached. + This function exists to allow an optimal memcpy to be + performed when memory is known to be cached. +@Input a void pointer to the destination memory +@Input b void pointer to the source memory +@Input c the number of bytes to be copied +@Return Pointer to the destination memory. + */ /**************************************************************************/ +#define OSCachedMemCopy(a,b,c) memcpy((a), (b), (c)) + +#if defined(__KERNEL__) + +/**************************************************************************/ /*! +@Function OSCachedMemSetWMB +@Description Set memory, where the mapping is known to be cached or + write-combine, to a given value and issue a write memory barrier + after. This + function exists to allow an optimal memset to be performed when + memory is known to be cached or write-combine. +@Input a void pointer to the memory to be set +@Input b byte containing the value to be set +@Input c the number of bytes to be set to the given value +@Return Pointer to the destination memory. + */ /**************************************************************************/ +#define OSCachedMemSetWMB(a,b,c) \ + do { \ + memset((a), (b), (c)); \ + OSWriteMemoryBarrier(); \ + } while (0) + +/**************************************************************************/ /*! +@Function OSCachedMemCopy +@Description Copy values from one area of memory, to another, when both + mappings are known to be cached or write-combine and issue + a write memory barrier after. + This function exists to allow an optimal memcpy to be + performed when memory is known to be cached or write-combine. +@Input a void pointer to the destination memory +@Input b void pointer to the source memory +@Input c the number of bytes to be copied +@Return Pointer to the destination memory. + */ /**************************************************************************/ +#define OSCachedMemCopyWMB(a,b,c) \ + do { \ + memcpy((a), (b), (c)); \ + OSWriteMemoryBarrier(); \ + } while (0) + +#endif /* defined(__KERNEL__) */ + +/**************************************************************************/ /*! +@Function OSStringLCopy +@Description Copy at most uDataSize-1 bytes from pszSrc to pszDest. + If no null byte ('\0') is contained within the first uDataSize-1 + characters of the source string, the destination string will be + truncated. If the length of the source string is less than uDataSize + an additional NUL byte will be copied to the destination string + to ensure that the string is NUL-terminated. +@Input a char pointer to the destination string +@Input b const char pointer to the source string +@Input c the maximum number of bytes to be copied +@Return Size of the source string + */ /**************************************************************************/ +#if defined(__QNXNTO__) || (defined(__linux__) && defined(__KERNEL__) && !defined(DEBUG)) +#define OSStringLCopy(a,b,c) strlcpy((a), (b), (c)) +#else /* defined(__QNXNTO__) ... */ +#define OSStringLCopy(a,b,c) StringLCopy((a), (b), (c)) +#endif /* defined(__QNXNTO__) ... */ + +#ifdef __cplusplus +} +#endif + +#endif /* OSFUNC_COMMON_H */ + +/****************************************************************************** + End of file (osfunc_common.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/osfunc_riscv.c b/drivers/gpu/drm/phytium/octopus/osfunc_riscv.c new file mode 100644 index 000000000000..8b47089961e8 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/osfunc_riscv.c @@ -0,0 +1,179 @@ +/*************************************************************************/ /*! +@File +@Title RISC-V specific OS functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Processor specific OS functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "osfunc.h" +#include "pvr_debug.h" +#include "cache_ops.h" + +void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + /* + * RISC-V cache maintenance mechanism is not part of the core spec. + * This leaves the actual mechanism of action to an implementer. + * Here we let the system layer decide how maintenance is done. + */ + if (psDevNode->psDevConfig->pfnHostCacheMaintenance) + { + psDevNode->psDevConfig->pfnHostCacheMaintenance( + psDevNode->psDevConfig->hSysData, + PVRSRV_CACHE_OP_FLUSH, + pvVirtStart, + pvVirtEnd, + sCPUPhysStart, + sCPUPhysEnd); + + } +#if !defined(NO_HARDWARE) + else + { + PVR_DPF((PVR_DBG_WARNING, + "%s: System doesn't implement cache maintenance. Skipping!", + __func__)); + } +#endif +} + +void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + /* + * RISC-V cache maintenance mechanism is not part of the core spec. + * This leaves the actual mechanism of action to an implementer. + * Here we let the system layer decide how maintenance is done. + */ + if (psDevNode->psDevConfig->pfnHostCacheMaintenance) + { + psDevNode->psDevConfig->pfnHostCacheMaintenance( + psDevNode->psDevConfig->hSysData, + PVRSRV_CACHE_OP_CLEAN, + pvVirtStart, + pvVirtEnd, + sCPUPhysStart, + sCPUPhysEnd); + + } +#if !defined(NO_HARDWARE) + else + { + PVR_DPF((PVR_DBG_WARNING, + "%s: System doesn't implement cache maintenance. Skipping!", + __func__)); + } +#endif +} + +void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + /* + * RISC-V cache maintenance mechanism is not part of the core spec. + * This leaves the actual mechanism of action to an implementer. + * Here we let the system layer decide how maintenance is done. + */ + if (psDevNode->psDevConfig->pfnHostCacheMaintenance) + { + psDevNode->psDevConfig->pfnHostCacheMaintenance( + psDevNode->psDevConfig->hSysData, + PVRSRV_CACHE_OP_INVALIDATE, + pvVirtStart, + pvVirtEnd, + sCPUPhysStart, + sCPUPhysEnd); + + } +#if !defined(NO_HARDWARE) + else + { + PVR_DPF((PVR_DBG_WARNING, + "%s: System doesn't implement cache maintenance. Skipping!", + __func__)); + } +#endif +} + +OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) +{ + /* + * Need to obtain psDevNode here and do the following: + * + * OS_CACHE_OP_ADDR_TYPE eOpAddrType = + * psDevNode->psDevConfig->bHasPhysicalCacheMaintenance ? + * OS_CACHE_OP_ADDR_TYPE_PHYSICAL : OS_CACHE_OP_ADDR_TYPE_VIRTUAL; + * + * Return BOTH for now on. + * + */ + return OS_CACHE_OP_ADDR_TYPE_BOTH; +} + +void OSUserModeAccessToPerfCountersEn(void) +{ +#if !defined(NO_HARDWARE) + PVR_DPF((PVR_DBG_WARNING, "%s: Not implemented!", __func__)); + PVR_ASSERT(0); +#endif +} + +IMG_BOOL OSIsWriteCombineUnalignedSafe(void) +{ +#if !defined(NO_HARDWARE) + PVR_DPF((PVR_DBG_WARNING, + "%s: Not implemented (assuming false)!", + __func__)); + PVR_ASSERT(0); + return IMG_FALSE; +#else + return IMG_TRUE; +#endif +} diff --git a/drivers/gpu/drm/phytium/octopus/osfunc_x86.c b/drivers/gpu/drm/phytium/octopus/osfunc_x86.c new file mode 100644 index 000000000000..77d6d5324a6b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/osfunc_x86.c @@ -0,0 +1,134 @@ +/*************************************************************************/ /*! +@File +@Title x86 specific OS functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Processor specific OS functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "img_defs.h" +#include "osfunc.h" +#include "pvr_debug.h" + +static void x86_flush_cache_range(const void *pvStart, const void *pvEnd) +{ + IMG_BYTE *pbStart = (IMG_BYTE *)pvStart; + IMG_BYTE *pbEnd = (IMG_BYTE *)pvEnd; + IMG_BYTE *pbBase; + + pbEnd = (IMG_BYTE *)PVR_ALIGN((uintptr_t)pbEnd, + (uintptr_t)boot_cpu_data.x86_clflush_size); + + mb(); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,168)) + __uaccess_begin(); +#endif + + for (pbBase = pbStart; pbBase < pbEnd; pbBase += boot_cpu_data.x86_clflush_size) + { + clflush(pbBase); + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,168)) + __uaccess_end(); +#endif + + mb(); +} + +void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); + PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); + + x86_flush_cache_range(pvVirtStart, pvVirtEnd); +} + +void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); + PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); + + /* No clean feature on x86 */ + x86_flush_cache_range(pvVirtStart, pvVirtEnd); +} + +void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); + PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); + + /* No invalidate-only support */ + x86_flush_cache_range(pvVirtStart, pvVirtEnd); +} + +OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) +{ + return OS_CACHE_OP_ADDR_TYPE_VIRTUAL; +} + +void OSUserModeAccessToPerfCountersEn(void) +{ + /* Not applicable to x86 architecture. */ +} + +IMG_BOOL OSIsWriteCombineUnalignedSafe(void) +{ + return IMG_TRUE; +} diff --git a/drivers/gpu/drm/phytium/octopus/oskm_apphint.h b/drivers/gpu/drm/phytium/octopus/oskm_apphint.h new file mode 100644 index 000000000000..f4cf0f6db14e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/oskm_apphint.h @@ -0,0 +1,176 @@ +/*************************************************************************/ /*! +@File oskm_apphint.h +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description OS-independent interface for retrieving KM apphints +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "img_defs.h" +#if defined(__linux__) +#include "km_apphint.h" +#else +#include "services_client_porting.h" +#endif +#if !defined(OSKM_APPHINT_H) +#define OSKM_APPHINT_H + + +#if defined(__linux__) && !defined(DOXYGEN) +static INLINE IMG_UINT os_get_km_apphint_UINT32(void *state, APPHINT_ID id, const IMG_UINT32 *pAppHintDefault, IMG_UINT32 *pVal) { + return !pvr_apphint_get_uint32(id, pVal); +} +static INLINE IMG_UINT os_get_km_apphint_UINT64(void *state, APPHINT_ID id, const IMG_UINT64 *pAppHintDefault, IMG_UINT64 *pVal) { + return !pvr_apphint_get_uint64(id, pVal); +} +static INLINE IMG_UINT os_get_km_apphint_BOOL(void *state, APPHINT_ID id, const IMG_BOOL *pAppHintDefault, IMG_BOOL *pVal) { + return !pvr_apphint_get_bool(id, pVal); +} +static INLINE IMG_UINT os_get_km_apphint_STRING(void *state, APPHINT_ID id, const IMG_CHAR *pAppHintDefault, IMG_CHAR *buffer, size_t size) { + return !pvr_apphint_get_string(id, buffer, size); +} + +#define OSGetKMAppHintUINT32(state, name, appHintDefault, value) \ + os_get_km_apphint_UINT32(state, APPHINT_ID_ ## name, appHintDefault, value) + +#define OSGetKMAppHintUINT64(state, name, appHintDefault, value) \ + os_get_km_apphint_UINT64(state, APPHINT_ID_ ## name, appHintDefault, value) + +#define OSGetKMAppHintBOOL(state, name, appHintDefault, value) \ + os_get_km_apphint_BOOL(state, APPHINT_ID_ ## name, appHintDefault, value) + +#define OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) \ + os_get_km_apphint_STRING(state, APPHINT_ID_ ## name, appHintDefault, buffer, size) + + +#define OSCreateKMAppHintState(state) \ + PVR_UNREFERENCED_PARAMETER(state) + +#define OSFreeKMAppHintState(state) \ + PVR_UNREFERENCED_PARAMETER(state) + +#else /* defined(__linux__) && !defined(DOXYGEN) */ + +/**************************************************************************/ /*! +@def OSGetKMAppHintUINT32(state, name, appHintDefault, value) +@Description Interface for retrieval of uint32 km app hint. + For non-linux operating systems, this macro implements a call + from server code to PVRSRVGetAppHint() declared in + services_client_porting.h, effectively making it 'shared' code. +@Input state App hint state +@Input name Name used to identify app hint +@Input appHintDefault Default value to be returned if no + app hint is found. +@Output value Pointer to returned app hint value. + */ /**************************************************************************/ +#define OSGetKMAppHintUINT32(state, name, appHintDefault, value) \ + PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value) + +/**************************************************************************/ /*! +@def OSGetKMAppHintUINT64(state, name, appHintDefault, value) +@Description Interface for retrieval of uint64 km app hint. + For non-linux operating systems, this macro implements a call + from server code to PVRSRVGetAppHint() declared in + services_client_porting.h, effectively making it 'shared' code. +@Input state App hint state +@Input name Name used to identify app hint +@Input appHintDefault Default value to be returned if no + app hint is found. +@Output value Pointer to returned app hint value. + */ /**************************************************************************/ +#define OSGetKMAppHintUINT64(state, name, appHintDefault, value) \ + PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value) + +/**************************************************************************/ /*! +@def OSGetKMAppHintBOOL(state, name, appHintDefault, value) +@Description Interface for retrieval of IMG_BOOL km app hint. + For non-linux operating systems, this macro implements a call + from server code to PVRSRVGetAppHint() declared in + services_client_porting.h, effectively making it 'shared' code. +@Input state App hint state +@Input name Name used to identify app hint +@Input appHintDefault Default value to be returned if no + app hint is found. +@Output value Pointer to returned app hint value. + */ /**************************************************************************/ +#define OSGetKMAppHintBOOL(state, name, appHintDefault, value) \ + PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value) + +/**************************************************************************/ /*! +@def OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) +@Description Interface for retrieval of string km app hint. + For non-linux operating systems, this macro implements a call + from server code to PVRSRVGetAppHint() declared in + services_client_porting.h, effectively making it 'shared' code. +@Input state App hint state +@Input name Name used to identify app hint +@Input appHintDefault Default value to be returned if no + app hint is found. +@Output buffer Buffer used to return app hint string. +@Input size Size of the buffer. + */ /**************************************************************************/ +#define OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) \ + (PVR_UNREFERENCED_PARAMETER(size), PVRSRVGetAppHint(state, # name, IMG_STRING_TYPE, appHintDefault, buffer)) + +/**************************************************************************/ /*! +@def OSCreateKMAppHintState(state) +@Description Creates the app hint state. + For non-linux operating systems, this macro implements a call + from server code to PVRSRVCreateAppHintState() declared in + services_client_porting.h, effectively making it 'shared' code. +@Output state App hint state + */ /**************************************************************************/ +#define OSCreateKMAppHintState(state) \ + PVRSRVCreateAppHintState(IMG_SRV_UM, 0, state) + +/**************************************************************************/ /*! +@def OSFreeKMAppHintState +@Description Free the app hint state. + For non-linux operating systems, this macro implements a call + from server code to PVRSRVCreateAppHintState() declared in + services_client_porting.h, effectively making it 'shared' code. +@Output state App hint state + */ /**************************************************************************/ +#define OSFreeKMAppHintState(state) \ + PVRSRVFreeAppHintState(IMG_SRV_UM, state) + +#endif /* defined(__linux__) */ + +#endif /* OSKM_APPHINT_H */ + +/****************************************************************************** + End of file (oskm_apphint.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/osmmap.h b/drivers/gpu/drm/phytium/octopus/osmmap.h new file mode 100644 index 000000000000..7de632be9e25 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/osmmap.h @@ -0,0 +1,116 @@ +/*************************************************************************/ /*! +@File +@Title OS Interface for mapping PMRs into CPU space. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description OS abstraction for the mmap2 interface for mapping PMRs into + User Mode memory +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef OSMMAP_H +#define OSMMAP_H + +#include + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" + +/*************************************************************************/ /*! +@Function OSMMapPMR +@Description Maps the specified PMR into CPU memory so that it may be + accessed by the user process. + Whether the memory is mapped read only, read/write, or not at + all, is dependent on the PMR itself. + The PMR handle is opaque to the user, and lower levels of this + stack ensure that the handle is private to this process, such + that this API cannot be abused to gain access to other people's + PMRs. The OS implementation of this function should return the + virtual address and length for the User to use. The "PrivData" + is to be stored opaquely by the caller (N.B. he should make no + assumptions, in particular, NULL is a valid handle) and given + back to the call to OSMUnmapPMR. + The OS implementation is free to use the PrivData handle for + any purpose it sees fit. +@Input hBridge The bridge handle. +@Input hPMR The handle of the PMR to be mapped. +@Input uiPMRLength The size of the PMR. +@Input uiFlags Flags indicating how the mapping should + be done (read-only, etc). These may not + be honoured if the PMR does not permit + them. +@Input uiPMRLength The size of the PMR. +@Output phOSMMapPrivDataOut Returned private data. +@Output ppvMappingAddressOut The returned mapping. +@Output puiMappingLengthOut The size of the returned mapping. +@Return PVRSRV_OK on success, failure code otherwise. + */ /*************************************************************************/ +PVRSRV_ERROR +OSMMapPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_SIZE_T uiPMRLength, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_HANDLE *phOSMMapPrivDataOut, + void **ppvMappingAddressOut, + size_t *puiMappingLengthOut); + +/*************************************************************************/ /*! +@Function OSMUnmapPMR +@Description Unmaps the specified PMR from CPU memory. + This function is the counterpart to OSMMapPMR. + The caller is required to pass the PMR handle back in along + with the same 3-tuple of information that was returned by the + call to OSMMapPMR in phOSMMapPrivDataOut. + It is possible to unmap only part of the original mapping + with this call, by specifying only the address range to be + unmapped in pvMappingAddress and uiMappingLength. +@Input hBridge The bridge handle. +@Input hPMR The handle of the PMR to be unmapped. +@Input hOSMMapPrivData The OS private data of the mapping. +@Input pvMappingAddress The address to be unmapped. +@Input uiMappingLength The size to be unmapped. +@Return PVRSRV_OK on success, failure code otherwise. + */ /*************************************************************************/ +void +OSMUnmapPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_HANDLE hOSMMapPrivData, + void *pvMappingAddress, + size_t uiMappingLength); + +#endif /* OSMMAP_H */ diff --git a/drivers/gpu/drm/phytium/octopus/osmmap_stub.c b/drivers/gpu/drm/phytium/octopus/osmmap_stub.c new file mode 100644 index 000000000000..80bfdb2e4667 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/osmmap_stub.c @@ -0,0 +1,146 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description OS abstraction for the mmap2 interface for mapping PMRs into + User Mode memory +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* our exported API */ +#include "osmmap.h" + +/* include/ */ +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" + +/* services/include/ */ + +/* services/include/srvhelper/ */ +#include "ra.h" + +#include "pmr.h" + +PVRSRV_ERROR +OSMMapPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_DEVMEM_SIZE_T uiPMRSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_HANDLE *phOSMMapPrivDataOut, + void **ppvMappingAddressOut, + size_t *puiMappingLengthOut) +{ + PVRSRV_ERROR eError; + PMR *psPMR; + void *pvKernelAddress; + size_t uiLength; + IMG_HANDLE hPriv; + + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(uiFlags); + + /* + Normally this function would mmap a PMR into the memory space of + user process, but in this case we're taking a PMR and mapping it + into kernel virtual space. We keep the same function name for + symmetry as this allows the higher layers of the software stack + to not care whether they are user mode or kernel + */ + + psPMR = hPMR; + + if (PMR_IsSparse(psPMR)) + { + eError = PMRAcquireSparseKernelMappingData(psPMR, + 0, + 0, + &pvKernelAddress, + &uiLength, + &hPriv); + } + else + { + eError = PMRAcquireKernelMappingData(psPMR, + 0, + 0, + &pvKernelAddress, + &uiLength, + &hPriv); + } + if (eError != PVRSRV_OK) + { + goto e0; + } + + *phOSMMapPrivDataOut = hPriv; + *ppvMappingAddressOut = pvKernelAddress; + *puiMappingLengthOut = uiLength; + + /* MappingLength might be rounded up to page size */ + PVR_ASSERT(*puiMappingLengthOut >= uiPMRSize); + + return PVRSRV_OK; + + /* + error exit paths follow + */ + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +void +OSMUnmapPMR(IMG_HANDLE hBridge, + IMG_HANDLE hPMR, + IMG_HANDLE hOSMMapPrivData, + void *pvMappingAddress, + size_t uiMappingLength) +{ + PMR *psPMR; + + PVR_UNREFERENCED_PARAMETER(hBridge); + PVR_UNREFERENCED_PARAMETER(pvMappingAddress); + PVR_UNREFERENCED_PARAMETER(uiMappingLength); + + psPMR = hPMR; + PMRReleaseKernelMappingData(psPMR, + hOSMMapPrivData); +} diff --git a/drivers/gpu/drm/phytium/octopus/ospvr_gputrace.h b/drivers/gpu/drm/phytium/octopus/ospvr_gputrace.h new file mode 100644 index 000000000000..a0db7eb2d67e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/ospvr_gputrace.h @@ -0,0 +1,167 @@ +/*************************************************************************/ /*! +@File ospvr_gputrace.h +@Title PVR GPU Trace module common environment interface +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_GPUTRACE_H_ +#define PVR_GPUTRACE_H_ + +#include "img_types.h" +#include "img_defs.h" +#include "rgx_hwperf.h" +#include "device.h" + +#if defined(__linux__) + +void PVRGpuTraceEnqueueEvent( + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32FirmwareCtx, + IMG_UINT32 ui32ExternalJobRef, + IMG_UINT32 ui32InternalJobRef, + RGX_HWPERF_KICK_TYPE eKickType); + +/* Early initialisation of GPU Trace events logic. + * This function is called on *driver* initialisation. */ +PVRSRV_ERROR PVRGpuTraceSupportInit(void); + +/* GPU Trace resources final cleanup. + * This function is called on driver de-initialisation. */ +void PVRGpuTraceSupportDeInit(void); + +/* Initialisation for AppHints callbacks. + * This function is called during the late stage of driver initialisation but + * before the device initialisation but after the debugfs sub-system has been + * initialised. */ +void PVRGpuTraceInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode); + +/* Per-device initialisation of the GPU Trace resources */ +PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode); + +/* Per-device cleanup for the GPU Trace resources. */ +void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode); + +/* Enables the gpu trace sub-system for a given device. */ +PVRSRV_ERROR PVRGpuTraceSetEnabled( + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bNewValue); + +/* Returns IMG_TRUE if the gpu trace sub-system has been enabled (but not + * necessarily initialised). */ +IMG_BOOL PVRGpuTraceIsEnabled(void); + +/* Performs some initialisation steps if the feature was enabled + * on driver startup. */ +void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode); + +/* FTrace events callbacks interface */ + +void PVRGpuTraceEnableUfoCallback(void); +void PVRGpuTraceDisableUfoCallback(void); + +void PVRGpuTraceEnableFirmwareActivityCallback(void); +void PVRGpuTraceDisableFirmwareActivityCallback(void); + +#else /* defined(__linux__) */ + +static inline void PVRGpuTraceEnqueueEvent( + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32FirmwareCtx, + IMG_UINT32 ui32ExternalJobRef, + IMG_UINT32 ui32InternalJobRef, + RGX_HWPERF_KICK_TYPE eKickType) +{ + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(ui32ExternalJobRef); + PVR_UNREFERENCED_PARAMETER(ui32InternalJobRef); + PVR_UNREFERENCED_PARAMETER(eKickType); +} + +static inline PVRSRV_ERROR PVRGpuTraceSupportInit(void) { + return PVRSRV_OK; +} + +static inline void PVRGpuTraceSupportDeInit(void) {} + +static inline void PVRGpuTraceInitAppHintCallbacks( + const PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); +} + +static inline PVRSRV_ERROR PVRGpuTraceInitDevice( + PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + return PVRSRV_OK; +} + +static inline void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); +} + +static inline PVRSRV_ERROR PVRGpuTraceSetEnabled( + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bNewValue) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(bNewValue); + return PVRSRV_OK; +} + +static inline IMG_BOOL PVRGpuTraceIsEnabled(void) +{ + return IMG_FALSE; +} + +static inline void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); +} + +static inline void PVRGpuTraceEnableUfoCallback(void) {} +static inline void PVRGpuTraceDisableUfoCallback(void) {} + +static inline void PVRGpuTraceEnableFirmwareActivityCallback(void) {} +static inline void PVRGpuTraceDisableFirmwareActivityCallback(void) {} + +#endif /* defined(__linux__) */ + +#endif /* PVR_GPUTRACE_H_ */ diff --git a/drivers/gpu/drm/phytium/octopus/pci_support.c b/drivers/gpu/drm/phytium/octopus/pci_support.c new file mode 100644 index 000000000000..197deff1e1a1 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pci_support.c @@ -0,0 +1,726 @@ +/*************************************************************************/ /*! +@File +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include + +#if defined(CONFIG_MTRR) +#include +#endif + +#include "pci_support.h" +#include "allocmem.h" + +typedef struct _PVR_PCI_DEV_TAG +{ + struct pci_dev *psPCIDev; + HOST_PCI_INIT_FLAGS ePCIFlags; + IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE]; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + int iMTRR[DEVICE_COUNT_RESOURCE]; +#endif +} PVR_PCI_DEV; + +/*************************************************************************/ /*! +@Function OSPCISetDev +@Description Set a PCI device for subsequent use. +@Input pvPCICookie Pointer to OS specific PCI structure +@Input eFlags Flags +@Return PVRSRV_PCI_DEV_HANDLE Pointer to PCI device handle +*/ /**************************************************************************/ +PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags) +{ + int err; + IMG_UINT32 i; + PVR_PCI_DEV *psPVRPCI; + + psPVRPCI = OSAllocMem(sizeof(*psPVRPCI)); + if (psPVRPCI == NULL) + { + printk(KERN_ERR "OSPCISetDev: Couldn't allocate PVR PCI structure\n"); + return NULL; + } + + psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie; + psPVRPCI->ePCIFlags = eFlags; + + err = pci_enable_device(psPVRPCI->psPCIDev); + if (err != 0) + { + printk(KERN_ERR "OSPCISetDev: Couldn't enable device (%d)\n", err); + OSFreeMem(psPVRPCI); + return NULL; + } + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ + { + pci_set_master(psPVRPCI->psPCIDev); + } + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */ + { +#if defined(CONFIG_PCI_MSI) + err = pci_enable_msi(psPVRPCI->psPCIDev); + if (err != 0) + { + printk(KERN_ERR "OSPCISetDev: Couldn't enable MSI (%d)", err); + psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI; /* PRQA S 1474,3358,4130 */ /* misuse of enums */ + } +#else + printk(KERN_ERR "OSPCISetDev: MSI support not enabled in the kernel"); +#endif + } + + /* Initialise the PCI resource and MTRR tracking array */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + psPVRPCI->iMTRR[i] = -1; +#endif + } + + return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI; +} + +/*************************************************************************/ /*! +@Function OSPCIAcquireDev +@Description Acquire a PCI device for subsequent use. +@Input ui16VendorID Vendor PCI ID +@Input ui16DeviceID Device PCI ID +@Input eFlags Flags +@Return PVRSRV_PCI_DEV_HANDLE Pointer to PCI device handle +*/ /**************************************************************************/ +PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, + IMG_UINT16 ui16DeviceID, + HOST_PCI_INIT_FLAGS eFlags) +{ + struct pci_dev *psPCIDev; + + psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL); + if (psPCIDev == NULL) + { + return NULL; + } + + return OSPCISetDev((void *)psPCIDev, eFlags); +} + +/*************************************************************************/ /*! +@Function OSPCIIRQ +@Description Get the interrupt number for the device. +@Input hPVRPCI PCI device handle +@Output pui16DeviceID Pointer to where the interrupt number + should be returned +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + + if (pui32IRQ == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *pui32IRQ = psPVRPCI->psPCIDev->irq; + + return PVRSRV_OK; +} + +/* Functions supported by OSPCIAddrRangeFunc */ +enum HOST_PCI_ADDR_RANGE_FUNC +{ + HOST_PCI_ADDR_RANGE_FUNC_LEN, + HOST_PCI_ADDR_RANGE_FUNC_START, + HOST_PCI_ADDR_RANGE_FUNC_END, + HOST_PCI_ADDR_RANGE_FUNC_REQUEST, + HOST_PCI_ADDR_RANGE_FUNC_RELEASE +}; + +/*************************************************************************/ /*! +@Function OSPCIAddrRangeFunc +@Description Internal support function for various address range related + functions +@Input eFunc Function to perform +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return IMG_UINT32 Function dependent value +*/ /**************************************************************************/ +static IMG_UINT64 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc, + PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT32 ui32Index) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + + if (ui32Index >= DEVICE_COUNT_RESOURCE) + { + printk(KERN_ERR "OSPCIAddrRangeFunc: Index out of range"); + return 0; + } + + switch (eFunc) + { + case HOST_PCI_ADDR_RANGE_FUNC_LEN: + { + return pci_resource_len(psPVRPCI->psPCIDev, ui32Index); + } + case HOST_PCI_ADDR_RANGE_FUNC_START: + { + return pci_resource_start(psPVRPCI->psPCIDev, ui32Index); + } + case HOST_PCI_ADDR_RANGE_FUNC_END: + { + return pci_resource_end(psPVRPCI->psPCIDev, ui32Index); + } + case HOST_PCI_ADDR_RANGE_FUNC_REQUEST: + { + int err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, PVRSRV_MODNAME); + if (err != 0) + { + printk(KERN_ERR "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err); + return 0; + } + psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE; + return 1; + } + case HOST_PCI_ADDR_RANGE_FUNC_RELEASE: + { + if (psPVRPCI->abPCIResourceInUse[ui32Index]) + { + pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index); + psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE; + } + return 1; + } + default: + { + printk(KERN_ERR "OSPCIAddrRangeFunc: Unknown function"); + break; + } + } + + return 0; +} + +/*************************************************************************/ /*! +@Function OSPCIAddrRangeLen +@Description Returns length of a given address range +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return IMG_UINT32 Length of address range or 0 if no + such range +*/ /**************************************************************************/ +IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index); +} + +/*************************************************************************/ /*! +@Function OSPCIAddrRangeStart +@Description Returns the start of a given address range +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return IMG_UINT32 Start of address range or 0 if no + such range +*/ /**************************************************************************/ +IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index); +} + +/*************************************************************************/ /*! +@Function OSPCIAddrRangeEnd +@Description Returns the end of a given address range +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return IMG_UINT32 End of address range or 0 if no such + range +*/ /**************************************************************************/ +IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index); +} + +/*************************************************************************/ /*! +@Function OSPCIRequestAddrRange +@Description Request a given address range index for subsequent use +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT32 ui32Index) +{ + if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0) + { + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + else + { + return PVRSRV_OK; + } +} + +/*************************************************************************/ /*! +@Function OSPCIReleaseAddrRange +@Description Release a given address range that is no longer being used +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0) + { + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + else + { + return PVRSRV_OK; + } +} + +/*************************************************************************/ /*! +@Function OSPCIRequestAddrRegion +@Description Request a given region from an address range for subsequent use +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Input uiOffset Offset into the address range that forms + the start of the region +@Input uiLength Length of the region +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT32 ui32Index, + IMG_UINT64 uiOffset, + IMG_UINT64 uiLength) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + resource_size_t start; + resource_size_t end; + + start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); + end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index); + + /* Check that the requested region is valid */ + if ((start + uiOffset + uiLength - 1) > end) + { + return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH; + } + + if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO) + { + if (request_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL) + { + return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; + } + } + else + { + if (request_mem_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL) + { + return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; + } + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCIReleaseAddrRegion +@Description Release a given region, from an address range, that is no + longer in use +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Input ui32Offset Offset into the address range that forms + the start of the region +@Input ui32Length Length of the region +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT32 ui32Index, + IMG_UINT64 uiOffset, + IMG_UINT64 uiLength) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + resource_size_t start; + resource_size_t end; + + start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); + end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index); + + /* Check that the region is valid */ + if ((start + uiOffset + uiLength - 1) > end) + { + return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH; + } + + if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO) + { + release_region(start + uiOffset, uiLength); + } + else + { + release_mem_region(start + uiOffset, uiLength); + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCIReleaseDev +@Description Release a PCI device that is no longer being used +@Input hPVRPCI PCI device handle +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + int i; + + /* Release all PCI regions that are currently in use */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + if (psPVRPCI->abPCIResourceInUse[i]) + { + pci_release_region(psPVRPCI->psPCIDev, i); + psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE; + } + } + +#if defined(CONFIG_PCI_MSI) + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */ + { + pci_disable_msi(psPVRPCI->psPCIDev); + } +#endif + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ + { + pci_clear_master(psPVRPCI->psPCIDev); + } + + pci_disable_device(psPVRPCI->psPCIDev); + + OSFreeMem(psPVRPCI); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCISuspendDev +@Description Prepare PCI device to be turned off by power management +@Input hPVRPCI PCI device handle +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + int i; + int err; + + /* Release all PCI regions that are currently in use */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + if (psPVRPCI->abPCIResourceInUse[i]) + { + pci_release_region(psPVRPCI->psPCIDev, i); + } + } + + err = pci_save_state(psPVRPCI->psPCIDev); + if (err != 0) + { + printk(KERN_ERR "OSPCISuspendDev: pci_save_state_failed (%d)", err); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + pci_disable_device(psPVRPCI->psPCIDev); + + err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_SUSPEND)); + switch (err) + { + case 0: + break; + case -EIO: + printk(KERN_ERR "OSPCISuspendDev: device doesn't support PCI PM"); + break; + case -EINVAL: + printk(KERN_ERR "OSPCISuspendDev: can't enter requested power state"); + break; + default: + printk(KERN_ERR "OSPCISuspendDev: pci_set_power_state failed (%d)", err); + break; + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCIResumeDev +@Description Prepare a PCI device to be resumed by power management +@Input hPVRPCI PCI device handle +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + int err; + int i; + + err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON)); + switch (err) + { + case 0: + break; + case -EIO: + printk(KERN_ERR "OSPCIResumeDev: device doesn't support PCI PM"); + break; + case -EINVAL: + printk(KERN_ERR "OSPCIResumeDev: can't enter requested power state"); + return PVRSRV_ERROR_UNKNOWN_POWER_STATE; + default: + printk(KERN_ERR "OSPCIResumeDev: pci_set_power_state failed (%d)", err); + return PVRSRV_ERROR_UNKNOWN_POWER_STATE; + } + + pci_restore_state(psPVRPCI->psPCIDev); + + err = pci_enable_device(psPVRPCI->psPCIDev); + if (err != 0) + { + printk(KERN_ERR "OSPCIResumeDev: Couldn't enable device (%d)", err); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ + pci_set_master(psPVRPCI->psPCIDev); + + /* Restore the PCI resource tracking array */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + if (psPVRPCI->abPCIResourceInUse[i]) + { + err = pci_request_region(psPVRPCI->psPCIDev, i, PVRSRV_MODNAME); + if (err != 0) + { + printk(KERN_ERR "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err); + } + } + } + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCIGetVendorDeviceIDs +@Description Retrieve PCI vendor ID and device ID. +@Input hPVRPCI PCI device handle +@Output pui16VendorID Vendor ID +@Output pui16DeviceID Device ID +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIGetVendorDeviceIDs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT16 *pui16VendorID, + IMG_UINT16 *pui16DeviceID) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + struct pci_dev *psPCIDev; + + if (psPVRPCI == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psPCIDev = psPVRPCI->psPCIDev; + if (psPCIDev == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *pui16VendorID = psPCIDev->vendor; + *pui16DeviceID = psPCIDev->device; + + return PVRSRV_OK; +} + +#if defined(CONFIG_MTRR) + +/*************************************************************************/ /*! +@Function OSPCIClearResourceMTRRs +@Description Clear any BIOS-configured MTRRs for a PCI memory region +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +@Return PVRSRV_ERROR Services error code +*/ /**************************************************************************/ +PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + resource_size_t start, end; + int res; + + start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); + end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + res = arch_io_reserve_memtype_wc(start, end - start); + if (res) + { + return PVRSRV_ERROR_PCI_CALL_FAILED; + } +#endif + res = arch_phys_wc_add(start, end - start); + if (res < 0) + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + arch_io_free_memtype_wc(start, end - start); +#endif + + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + psPVRPCI->iMTRR[ui32Index] = res; +#else + + res = mtrr_add(start, end - start, MTRR_TYPE_UNCACHABLE, 0); + if (res < 0) + { + printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + res = mtrr_del(res, start, end - start); + if (res < 0) + { + printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + /* Workaround for overlapping MTRRs. */ + { + IMG_BOOL bGotMTRR0 = IMG_FALSE; + + /* Current mobo BIOSes will normally set up a WRBACK MTRR spanning + * 0->4GB, and then another 4GB->6GB. If the PCI card's automatic & + * overlapping UNCACHABLE MTRR is deleted, we see WRBACK behaviour. + * + * WRBACK is incompatible with some PCI devices, so try to split + * the UNCACHABLE regions up and insert a WRCOMB region instead. + */ + res = mtrr_add(start, end - start, MTRR_TYPE_WRBACK, 0); + if (res < 0) + { + /* If this fails, services has probably run before and created + * a write-combined MTRR for the test chip. Assume it has, and + * don't return an error here. + */ + return PVRSRV_OK; + } + + if (res == 0) + bGotMTRR0 = IMG_TRUE; + + res = mtrr_del(res, start, end - start); + if (res < 0) + { + printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + if (bGotMTRR0) + { + /* Replace 0 with a non-overlapping WRBACK MTRR */ + res = mtrr_add(0, start, MTRR_TYPE_WRBACK, 0); + if (res < 0) + { + printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + /* Add a WRCOMB MTRR for the PCI device memory bar */ + res = mtrr_add(start, end - start, MTRR_TYPE_WRCOMB, 0); + if (res < 0) + { + printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + } + } +#endif + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function OSPCIReleaseResourceMTRRs +@Description Release resources allocated by OSPCIClearResourceMTRRs +@Input hPVRPCI PCI device handle +@Input ui32Index Address range index +*/ /**************************************************************************/ +void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + + if (psPVRPCI->iMTRR[ui32Index] >= 0) + { + arch_phys_wc_del(psPVRPCI->iMTRR[ui32Index]); + psPVRPCI->iMTRR[ui32Index] = -1; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + { + resource_size_t start, end; + + start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); + end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1; + + arch_io_free_memtype_wc(start, end - start); + } +#endif + } +#else + PVR_UNREFERENCED_PARAMETER(hPVRPCI); + PVR_UNREFERENCED_PARAMETER(ui32Index); +#endif +} +#endif /* defined(CONFIG_MTRR) */ diff --git a/drivers/gpu/drm/phytium/octopus/pci_support.h b/drivers/gpu/drm/phytium/octopus/pci_support.h new file mode 100644 index 000000000000..b5cd7ef21e56 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pci_support.h @@ -0,0 +1,99 @@ +/*************************************************************************/ /*! +@File +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PCI_SUPPORT_H +#define PCI_SUPPORT_H + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" + +#if defined(__linux__) +#include +#define TO_PCI_COOKIE(dev) to_pci_dev((struct device *)(dev)) +#else +#define TO_PCI_COOKIE(dev) (dev) +#endif + +typedef enum _HOST_PCI_INIT_FLAGS_ +{ + HOST_PCI_INIT_FLAG_BUS_MASTER = 0x00000001, + HOST_PCI_INIT_FLAG_MSI = 0x00000002, + HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff +} HOST_PCI_INIT_FLAGS; + +struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_; +typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE; + +PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags); +PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags); +PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); +PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ); +IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength); +PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength); +PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); +PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); +PVRSRV_ERROR OSPCIGetVendorDeviceIDs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT16 *pui16VendorID, IMG_UINT16 *pui16DeviceID); + +#if defined(CONFIG_MTRR) +PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +#else +static inline PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + PVR_UNREFERENCED_PARAMETER(hPVRPCI); + PVR_UNREFERENCED_PARAMETER(ui32Index); + return PVRSRV_OK; +} + +static inline void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + PVR_UNREFERENCED_PARAMETER(hPVRPCI); + PVR_UNREFERENCED_PARAMETER(ui32Index); +} +#endif + +#endif /* PCI_SUPPORT_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pdump.h b/drivers/gpu/drm/phytium/octopus/pdump.h new file mode 100644 index 000000000000..ae1c6c2dab1b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pdump.h @@ -0,0 +1,232 @@ +/*************************************************************************/ /*! +@File +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef SERVICES_PDUMP_H +#define SERVICES_PDUMP_H + +#include "img_types.h" +#include "services_km.h" + + +/* A PDump out2.txt script is made up of 3 sections from three buffers: + * * + * - Init phase buffer - holds PDump data written during driver + * initialisation, non-volatile. + * - Main phase buffer - holds PDump data written after driver init, + * volatile. + * - Deinit phase buffer - holds PDump data needed to shutdown HW/play back, + * written only during driver initialisation using + * the DEINIT flag. + * + * Volatile in this sense means that the buffer is drained and cleared when + * the pdump capture application connects and transfers the data to file. + * + * The PDump sub-system uses the driver state (init/post-init), whether + * the pdump capture application is connected or not (capture range set/unset) + * and, if pdump connected whether the frame is in the range set, to decide + * which of the 3 buffers to write the PDump data. Hence there are several + * key time periods in the lifetime of the kernel driver that is enabled + * with PDUMP=1 (flag XX labels below time line): + * + * Events:load init pdump enter exit pdump + * driver done connects range range disconnects + * |__________________|____________|__________|______________|____________|______ . . . + * State: | init phase | no capture | <- capture client connected -> | no capture + * | | | | + * |__________________|____________|______________________________________|_____ . . . + * Flag: | CT,DI | NONE,CT,PR | NONE,CT,PR | See no + * | Never NONE or PR | Never DI | Never DI | capture + * |__________________|____________|______________________________________|_____ . . . + * Write | NONE -undef | -No write | -No write | -Main buf | -No write | See no + * buffer | CT -Init buf | -Main buf | -Main buf | -Main buf | -Main buf | capture + * | PR -undef | -Init buf | -undef | -Init & Main | -undef | + * | DI -Deinit buf | -undef | -undef | -undef | -undef | + * |__________________|____________|___________|______________|___________|_____ . . . + * + * Note: The time line could repeat if the pdump capture application is + * disconnected and reconnected without unloading the driver module. + * + * The DEINIT (DI) | CONTINUOUS (CT) | PERSISTENT (PR) flags must never + * be OR'd together and given to a PDump call since undefined behaviour may + * result and produce an invalid PDump which does not play back cleanly. + * + * The decision on which flag to use comes down to which time period the + * client or server driver makes the PDump write call AND the nature/purpose + * of the data. + * + * Note: This is a simplified time line, not all conditions represented. + * + */ + +typedef IMG_UINT32 PDUMP_FLAGS_T; + +#define PDUMP_FLAGS_NONE PDUMP_NONE /* +#endif + +/* services/srvkm/include/ */ +#include "device.h" + +/* include/ */ +#include "pvrsrv_error.h" + + +#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) +#define __pvrsrv_defined_struct_enum__ +#include +#endif + +#include "connection_server.h" +/* Pull in pdump flags from services include */ +#include "pdump.h" +#include "pdumpdefs.h" + +/* Define this to enable the PDUMP_HERE trace in the server */ +#undef PDUMP_TRACE + +#if defined(PDUMP_TRACE) +#define PDUMP_HERE_VAR IMG_UINT32 here = 0; +#define PDUMP_HERE(a) { here = (a); if (ui32Flags & PDUMP_FLAGS_DEBUG) PVR_DPF((PVR_DBG_WARNING, "HERE %d", (a))); } +#define PDUMP_HEREA(a) { here = (a); PVR_DPF((PVR_DBG_WARNING, "HERE ALWAYS %d", (a))); } +#else +#define PDUMP_HERE_VAR IMG_UINT32 here = 0; +#define PDUMP_HERE(a) here = (a); +#define PDUMP_HEREA(a) here = (a); +#endif + +#define PDUMP_PD_UNIQUETAG (IMG_HANDLE)0 +#define PDUMP_PT_UNIQUETAG (IMG_HANDLE)0 + +/* Invalid value for PDump block number */ +#define PDUMP_BLOCKNUM_INVALID IMG_UINT32_MAX + +typedef struct _PDUMP_CONNECTION_DATA_ PDUMP_CONNECTION_DATA; + +/* PDump transition events */ +typedef enum _PDUMP_TRANSITION_EVENT_ +{ + PDUMP_TRANSITION_EVENT_NONE, /* No event */ + PDUMP_TRANSITION_EVENT_BLOCK_FINISHED, /* Block mode event, current PDump-block has finished */ + PDUMP_TRANSITION_EVENT_BLOCK_STARTED, /* Block mode event, new PDump-block has started */ + PDUMP_TRANSITION_EVENT_RANGE_ENTERED, /* Transition into capture range */ + PDUMP_TRANSITION_EVENT_RANGE_EXITED, /* Transition out of capture range */ +} PDUMP_TRANSITION_EVENT; + +typedef PVRSRV_ERROR (*PFN_PDUMP_TRANSITION)(void *pvData, void *pvDevice, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags); +typedef void (*PFN_PDUMP_SYNCBLOCKS)(void *pvData, PDUMP_TRANSITION_EVENT eEvent); + +typedef PVRSRV_ERROR (*PFN_PDUMP_TRANSITION_FENCE_SYNC)(void *pvData, PDUMP_TRANSITION_EVENT eEvent); + +#ifdef PDUMP + +/*! Macro used to record a panic in the PDump script stream */ +#define PDUMP_PANIC(_id, _msg) do \ + { PVRSRV_ERROR _eE;\ + _eE = PDumpPanic(((RGX_PDUMP_PANIC_ ## _id) & 0xFFFF), _msg, __func__, __LINE__); \ + PVR_LOG_IF_ERROR(_eE, "PDumpPanic");\ + MSC_SUPPRESS_4127\ + } while (0) + +/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */ +#define PDUMP_ERROR(_err, _msg) \ + (void)PDumpCaptureError(_err, _msg, __func__, __LINE__) + +#define SZ_MSG_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE +#define SZ_SCRIPT_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE +#define SZ_FILENAME_SIZE_MAX (PVRSRV_PDUMP_MAX_FILENAME_SIZE+sizeof(PDUMP_PARAM_N_FILE_NAME)) + +#define PDUMP_GET_SCRIPT_STRING() \ + IMG_HANDLE hScript; \ + void *pvScriptAlloc; \ + IMG_UINT32 ui32MaxLen = SZ_SCRIPT_SIZE_MAX-1; \ + pvScriptAlloc = OSAllocMem( SZ_SCRIPT_SIZE_MAX ); \ + if (!pvScriptAlloc) \ + { \ + PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_STRING() failed to allocate memory for script buffer")); \ + return PVRSRV_ERROR_OUT_OF_MEMORY; \ + } \ + \ + hScript = (IMG_HANDLE) pvScriptAlloc; + +#define PDUMP_GET_MSG_STRING() \ + IMG_CHAR *pszMsg; \ + void *pvMsgAlloc; \ + IMG_UINT32 ui32MaxLen = SZ_MSG_SIZE_MAX-1; \ + pvMsgAlloc = OSAllocMem( SZ_MSG_SIZE_MAX ); \ + if (!pvMsgAlloc) \ + { \ + PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_MSG_STRING() failed to allocate memory for message buffer")); \ + return PVRSRV_ERROR_OUT_OF_MEMORY; \ + } \ + pszMsg = (IMG_CHAR *)pvMsgAlloc; + +#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \ + IMG_HANDLE hScript; \ + IMG_CHAR *pszFileName; \ + IMG_UINT32 ui32MaxLenScript = SZ_SCRIPT_SIZE_MAX-1; \ + void *pvScriptAlloc; \ + void *pvFileAlloc; \ + pvScriptAlloc = OSAllocMem( SZ_SCRIPT_SIZE_MAX ); \ + if (!pvScriptAlloc) \ + { \ + PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_AND_FILE_STRING() failed to allocate memory for script buffer")); \ + return PVRSRV_ERROR_OUT_OF_MEMORY; \ + } \ + \ + hScript = (IMG_HANDLE) pvScriptAlloc; \ + pvFileAlloc = OSAllocMem( SZ_FILENAME_SIZE_MAX ); \ + if (!pvFileAlloc) \ + { \ + PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_AND_FILE_STRING() failed to allocate memory for filename buffer")); \ + OSFreeMem(pvScriptAlloc); \ + return PVRSRV_ERROR_OUT_OF_MEMORY; \ + } \ + pszFileName = (IMG_CHAR *)pvFileAlloc; + +#define PDUMP_RELEASE_SCRIPT_STRING() \ + if (pvScriptAlloc) \ + { \ + OSFreeMem(pvScriptAlloc); \ + pvScriptAlloc = NULL; \ + } + +#define PDUMP_RELEASE_MSG_STRING() \ + if (pvMsgAlloc) \ + { \ + OSFreeMem(pvMsgAlloc); \ + pvMsgAlloc = NULL; \ + } + +#define PDUMP_RELEASE_FILE_STRING() \ + if (pvFileAlloc) \ + { \ + OSFreeMem(pvFileAlloc); \ + pvFileAlloc = NULL; \ + } + +#define PDUMP_RELEASE_SCRIPT_AND_FILE_STRING() \ + if (pvScriptAlloc) \ + { \ + OSFreeMem(pvScriptAlloc); \ + pvScriptAlloc = NULL; \ + } \ + if (pvFileAlloc) \ + { \ + OSFreeMem(pvFileAlloc); \ + pvFileAlloc = NULL; \ + } + + +/* Shared across pdump_x files */ +PVRSRV_ERROR PDumpInitCommon(void); +void PDumpDeInitCommon(void); +PVRSRV_ERROR PDumpReady(void); +void PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset, + size_t *puiZeroPageSize, + const IMG_CHAR **ppszZeroPageFilename); + +void PDumpConnectionNotify(void); +void PDumpDisconnectionNotify(void); + +void PDumpStopInitPhase(void); +PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32Frame); +PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32* pui32Frame); +PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode, + IMG_UINT32 ui32Start, + IMG_UINT32 ui32End, + IMG_UINT32 ui32Interval, + IMG_UINT32 ui32MaxParamFileSize); + + +PVRSRV_ERROR PDumpReg32(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpReg64(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpRegLabelToReg64(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegDst, + IMG_UINT32 ui32RegSrc, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpPhysHandleToInternalVar64(IMG_CHAR *pszInternalVar, + IMG_HANDLE hPdumpPages, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpMemLabelToInternalVar64(IMG_CHAR *pszInternalVar, + PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpInternalVarToMemLabel(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpWriteVarORValueOp(const IMG_CHAR *pszInternalVariable, + const IMG_UINT64 ui64Value, + const IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR PDumpWriteVarANDValueOp(const IMG_CHAR *pszInternalVariable, + const IMG_UINT64 ui64Value, + const IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR PDumpWriteVarSHRValueOp(const IMG_CHAR *pszInternalVariable, + const IMG_UINT64 ui64Value, + const IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR PDumpWriteVarORVarOp(const IMG_CHAR *pszInternalVar, + const IMG_CHAR *pszInternalVar2, + const IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR PDumpWriteVarANDVarOp(const IMG_CHAR *pszInternalVar, + const IMG_CHAR *pszInternalVar2, + const IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR PDumpInternalVarToReg32(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpInternalVarToReg64(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpMemLabelToMem32(PMR *psPMRSource, + PMR *psPMRDest, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpMemLabelToMem64(PMR *psPMRSource, + PMR *psPMRDest, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpRegLabelToMem32(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpRegLabelToMem64(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpRegLabelToInternalVar(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpSAW(IMG_CHAR *pszDevSpaceName, + IMG_UINT32 ui32HPOffsetBytes, + IMG_UINT32 ui32NumSaveBytes, + IMG_CHAR *pszOutfileName, + IMG_UINT32 ui32OutfileOffsetByte, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Flags, + PDUMP_POLL_OPERATOR eOperator); + +PVRSRV_ERROR PDumpBitmapKM(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Width, + IMG_UINT32 ui32Height, + IMG_UINT32 ui32StrideInBytes, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32Size, + PDUMP_PIXEL_FORMAT ePixelFormat, + IMG_UINT32 ui32AddrMode, + IMG_UINT32 ui32PDumpFlags); + + +/**************************************************************************/ /*! +@Function PDumpImageDescriptor +@Description PDumps image data out as an IMGBv2 data section +@Input psDeviceNode Pointer to device node. +@Input ui32MMUContextID PDUMP MMU context ID. +@Input pszSABFileName Pointer to string containing file name of + Image being SABed +@Input sData GPU virtual address of this surface. +@Input ui32DataSize Image data size +@Input ui32LogicalWidth Image logical width +@Input ui32LogicalHeight Image logical height +@Input ui32PhysicalWidth Image physical width +@Input ui32PhysicalHeight Image physical height +@Input ePixFmt Image pixel format +@Input eFBCompression FB compression mode +@Input paui32FBCClearColour FB clear colour (Only applicable to FBC surfaces) +@Input eFBCSwizzle FBC channel swizzle (Only applicable to FBC surfaces) +@Input sHeader GPU virtual address of the headers of this + surface (Only applicable to FBC surfaces) +@Input ui32HeaderSize Header size (Only applicable to FBC surfaces) +@Input ui32PDumpFlags PDUMP flags +@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code +*/ /***************************************************************************/ +PVRSRV_ERROR PDumpImageDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32MMUContextID, + IMG_CHAR *pszSABFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32LogicalWidth, + IMG_UINT32 ui32LogicalHeight, + IMG_UINT32 ui32PhysicalWidth, + IMG_UINT32 ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT ePixFmt, + IMG_MEMLAYOUT eMemLayout, + IMG_FB_COMPRESSION eFBCompression, + const IMG_UINT32 *paui32FBCClearColour, + PDUMP_FBC_SWIZZLE eFBCSwizzle, + IMG_DEV_VIRTADDR sHeader, + IMG_UINT32 ui32HeaderSize, + IMG_UINT32 ui32PDumpFlags); + +/**************************************************************************/ /*! +@Function PDumpDataDescriptor +@Description PDumps non-image data out as an IMGCv1 data section +@Input psDeviceNode Pointer to device node. +@Input ui32MMUContextID PDUMP MMU context ID. +@Input pszSABFileName Pointer to string containing file name of + Data being SABed +@Input sData GPU virtual address of this data. +@Input ui32DataSize Data size +@Input ui32HeaderType Header type +@Input ui32ElementType Data element type +@Input ui32ElementCount Number of data elements +@Input ui32PDumpFlags PDUMP flags +@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code +*/ /***************************************************************************/ +PVRSRV_ERROR PDumpDataDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32MMUContextID, + IMG_CHAR *pszSABFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32HeaderType, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_UINT32 ui32PDumpFlags); + + +PVRSRV_ERROR PDumpReadRegKM(IMG_CHAR *pszPDumpRegName, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Address, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32PDumpFlags); + +__printf(2, 3) +PVRSRV_ERROR PDumpCommentWithFlagsNoLock(IMG_UINT32 ui32Flags, + IMG_CHAR* pszFormat, + ...); + +PVRSRV_ERROR PDumpCommentWithFlagsNoLockVA(IMG_UINT32 ui32Flags, + const IMG_CHAR * pszFormat, + va_list args); + +__printf(2, 3) +PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags, + IMG_CHAR* pszFormat, + ...); + +PVRSRV_ERROR PDumpCommentWithFlagsVA(IMG_UINT32 ui32Flags, + const IMG_CHAR * pszFormat, + va_list args); + +PVRSRV_ERROR PDumpPanic(IMG_UINT32 ui32PanicNo, + IMG_CHAR* pszPanicMsg, + const IMG_CHAR* pszPPFunc, + IMG_UINT32 ui32PPline); + +PVRSRV_ERROR PDumpCaptureError(PVRSRV_ERROR ui32ErrorNo, + IMG_CHAR* pszErrorMsg, + const IMG_CHAR* pszPPFunc, + IMG_UINT32 ui32PPline); + +PVRSRV_ERROR PDumpPDReg(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_UINT32 ui32Reg, + IMG_UINT32 ui32dwData, + IMG_HANDLE hUniqueTag); + +PVRSRV_ERROR PDumpPDRegWithFlags(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_UINT32 ui32Reg, + IMG_UINT32 ui32Data, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + +PVRSRV_ERROR PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame); + +PVRSRV_ERROR PDumpGetStateKM(IMG_UINT64 *ui64State); + +PVRSRV_ERROR PDumpGetCurrentBlockKM(IMG_UINT32 *pui32CurrentBlock); + +PVRSRV_ERROR PDumpForceCaptureStopKM(void); + +PVRSRV_ERROR PDumpIsCaptureFrameKM(IMG_BOOL *bIsCaptureRange); + +PVRSRV_ERROR PDumpRegRead32ToInternalVar(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegOffset, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpRegRead32(IMG_CHAR *pszPDumpRegName, + const IMG_UINT32 dwRegOffset, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpRegRead64(IMG_CHAR *pszPDumpRegName, + const IMG_UINT32 dwRegOffset, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpRegRead64ToInternalVar(IMG_CHAR *pszPDumpRegName, + IMG_CHAR *pszInternalVar, + const IMG_UINT32 dwRegOffset, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags); +PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks); + +PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegOffset, + IMG_UINT32 ui32WPosVal, + IMG_UINT32 ui32PacketSize, + IMG_UINT32 ui32BufferSize, + IMG_UINT32 ui32Flags); + +PVRSRV_ERROR PDumpTRG(IMG_CHAR *pszMemSpace, + IMG_UINT32 ui32MMUCtxID, + IMG_UINT32 ui32RegionID, + IMG_BOOL bEnable, + IMG_UINT64 ui64VAddr, + IMG_UINT64 ui64LenBytes, + IMG_UINT32 ui32XStride, + IMG_UINT32 ui32Flags); + +void PDumpLock(void); +void PDumpUnlock(void); + +PVRSRV_ERROR PDumpRegCondStr(IMG_CHAR **ppszPDumpCond, + IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Flags, + PDUMP_POLL_OPERATOR eOperator); + +PVRSRV_ERROR PDumpInternalValCondStr(IMG_CHAR **ppszPDumpCond, + IMG_CHAR *pszInternalVar, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Flags, + PDUMP_POLL_OPERATOR eOperator); + +PVRSRV_ERROR PDumpIfKM(IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags); +PVRSRV_ERROR PDumpElseKM(IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags); +PVRSRV_ERROR PDumpFiKM(IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags); +PVRSRV_ERROR PDumpStartDoLoopKM(IMG_UINT32 ui32PDumpFlags); +PVRSRV_ERROR PDumpEndDoWhileLoopKM(IMG_CHAR *pszPDumpWhileCond, IMG_UINT32 ui32PDumpFlags); +PVRSRV_ERROR PDumpCOMCommand(IMG_UINT32 ui32PDumpFlags, const IMG_CHAR *pszPDump); + +void PDumpPowerTransitionStart(void); +void PDumpPowerTransitionEnd(void); +IMG_BOOL PDumpInPowerTransition(void); +IMG_BOOL PDumpIsContCaptureOn(void); + +/*! + * @name PDumpWriteParameter + * @brief General function for writing to PDump stream. Used + * mainly for memory dumps to parameter stream. + * Usually more convenient to use PDumpWriteScript below + * for the script stream. + * @param psui8Data - data to write + * @param ui32Size - size of write + * @param ui32Flags - PDump flags + * @param pui32FileOffset - on return contains the file offset to + * the start of the parameter data + * @param aszFilenameStr - pointer to at least a 20 char buffer to + * return the parameter filename + * @return error + */ +PVRSRV_ERROR PDumpWriteParameter(IMG_UINT8 *psui8Data, IMG_UINT32 ui32Size, + IMG_UINT32 ui32Flags, IMG_UINT32* pui32FileOffset, + IMG_CHAR* aszFilenameStr); + +/*! + * @name PDumpWriteScript + * @brief Write an PDumpOS created string to the "script" output stream + * @param hString - PDump OS layer handle of string buffer to write + * @param ui32Flags - PDump flags + * @return IMG_TRUE on success. + */ +IMG_BOOL PDumpWriteScript(IMG_HANDLE hString, IMG_UINT32 ui32Flags); + +/**************************************************************************/ /*! +@Function PDumpSNPrintf +@Description Printf to OS-specific PDump state buffer. This function is + only called if PDUMP is defined. +@Input hBuf handle of buffer to write into +@Input ui32ScriptSizeMax maximum size of data to write (chars) +@Input pszFormat format string +@Return None +*/ /**************************************************************************/ +__printf(3, 4) +PVRSRV_ERROR PDumpSNPrintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...); + + +/* + PDumpWriteShiftedMaskedValue(): + + loads the "reference" address into an internal PDump register, + optionally shifts it right, + optionally shifts it left, + optionally masks it + then finally writes the computed value to the given destination address + + i.e. it emits pdump language equivalent to this expression: + + dest = ((&ref) >> SHRamount << SHLamount) & MASK +*/ +PVRSRV_ERROR +PDumpWriteShiftedMaskedValue(const IMG_CHAR *pszDestRegspaceName, + const IMG_CHAR *pszDestSymbolicName, + IMG_DEVMEM_OFFSET_T uiDestOffset, + const IMG_CHAR *pszRefRegspaceName, + const IMG_CHAR *pszRefSymbolicName, + IMG_DEVMEM_OFFSET_T uiRefOffset, + IMG_UINT32 uiSHRAmount, + IMG_UINT32 uiSHLAmount, + IMG_UINT32 uiMask, + IMG_DEVMEM_SIZE_T uiWordSize, + IMG_UINT32 uiPDumpFlags); + +/* + PDumpWriteSymbAddress(): + writes the address of the "reference" to the offset given +*/ +PVRSRV_ERROR +PDumpWriteSymbAddress(const IMG_CHAR *pszDestSpaceName, + IMG_DEVMEM_OFFSET_T uiDestOffset, + const IMG_CHAR *pszRefSymbolicName, + IMG_DEVMEM_OFFSET_T uiRefOffset, + const IMG_CHAR *pszPDumpDevName, + IMG_UINT32 ui32WordSize, + IMG_UINT32 ui32AlignShift, + IMG_UINT32 ui32Shift, + IMG_UINT32 uiPDumpFlags); + +/* Register the connection with the PDump subsystem */ +PVRSRV_ERROR +PDumpRegisterConnection(void *hSyncPrivData, + PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks, + PDUMP_CONNECTION_DATA **ppsPDumpConnectionData); + +/* Unregister the connection with the PDump subsystem */ +void +PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData); + +/* Register for notification of PDump Transition into/out of capture range */ +PVRSRV_ERROR +PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData, + PFN_PDUMP_TRANSITION pfnCallback, + void *hPrivData, + void *pvDevice, + void **ppvHandle); + +/* Unregister notification of PDump Transition */ +void +PDumpUnregisterTransitionCallback(void *pvHandle); + +PVRSRV_ERROR +PDumpRegisterTransitionCallbackFenceSync(void *hPrivData, + PFN_PDUMP_TRANSITION_FENCE_SYNC pfnCallback, + void **ppvHandle); + +void +PDumpUnregisterTransitionCallbackFenceSync(void *pvHandle); + +/* Notify PDump of a Transition into/out of capture range */ +PVRSRV_ERROR +PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, + PDUMP_TRANSITION_EVENT eEvent, + IMG_UINT32 ui32PDumpFlags); + +/* _ui32PDumpFlags must be a variable in the local scope */ +#define PDUMP_LOCK(_ui32PDumpFlags) do \ + { if ((_ui32PDumpFlags & PDUMP_FLAGS_PDUMP_LOCK_HELD) == 0)\ + {\ + PDumpLock();\ + }\ + MSC_SUPPRESS_4127\ + } while (0) + +/* _ui32PDumpFlags must be a variable in the local scope */ +#define PDUMP_UNLOCK(_ui32PDumpFlags) do \ + { if ((_ui32PDumpFlags & PDUMP_FLAGS_PDUMP_LOCK_HELD) == 0)\ + {\ + PDumpUnlock();\ + }\ + MSC_SUPPRESS_4127\ + } while (0) + +#define PDUMPINIT PDumpInitCommon +#define PDUMPDEINIT PDumpDeInitCommon +#define PDUMPREG32 PDumpReg32 +#define PDUMPREG64 PDumpReg64 +#define PDUMPREGREAD32 PDumpRegRead32 +#define PDUMPREGREAD64 PDumpRegRead64 +#define PDUMPCOMMENT(...) PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, __VA_ARGS__) +#define PDUMPCOMMENTWITHFLAGS PDumpCommentWithFlags +#define PDUMPREGPOL PDumpRegPolKM +#define PDUMPPDREG PDumpPDReg +#define PDUMPPDREGWITHFLAGS PDumpPDRegWithFlags +#define PDUMPREGBASEDCBP PDumpRegBasedCBP +#define PDUMPENDINITPHASE PDumpStopInitPhase +#define PDUMPIDLWITHFLAGS PDumpIDLWithFlags +#define PDUMPIDL PDumpIDL +#define PDUMPPOWCMDSTART PDumpPowerTransitionStart +#define PDUMPPOWCMDEND PDumpPowerTransitionEnd +#define PDUMPPOWCMDINTRANS PDumpInPowerTransition +#define PDUMPCOM PDumpCOMCommand + +/* _ui32PDumpFlags must be a variable in the local scope */ +#define PDUMP_BLKSTART(_ui32PDumpFlags) do \ + { PDUMP_LOCK(_ui32PDumpFlags);\ + _ui32PDumpFlags |= PDUMP_FLAGS_PDUMP_LOCK_HELD;\ + MSC_SUPPRESS_4127\ + } while (0) + +/* _ui32PDumpFlags must be a variable in the local scope */ +#define PDUMP_BLKEND(_ui32PDumpFlags) do \ + { _ui32PDumpFlags &= ~PDUMP_FLAGS_PDUMP_LOCK_HELD;\ + PDUMP_UNLOCK(_ui32PDumpFlags);\ + MSC_SUPPRESS_4127\ + } while (0) + +/* _ui32PDumpFlags must be a variable in the local scope */ +#define PDUMPIF(_msg,_ui32PDumpFlags) do \ + {PDUMP_BLKSTART(_ui32PDumpFlags);\ + PDumpIfKM(_msg,_ui32PDumpFlags);\ + MSC_SUPPRESS_4127\ + } while (0) + +#define PDUMPELSE PDumpElseKM + +/* _ui32PDumpFlags must be a variable in the local scope */ +#define PDUMPFI(_msg,_ui32PDumpFlags) do \ + { PDumpFiKM(_msg,_ui32PDumpFlags);\ + PDUMP_BLKEND(_ui32PDumpFlags);\ + MSC_SUPPRESS_4127\ + } while (0) + +#else +/* + We should be clearer about which functions can be called + across the bridge as this looks rather unbalanced +*/ + +/*! Macro used to record a panic in the PDump script stream */ +#define PDUMP_PANIC(_id, _msg) ((void)0) + +/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */ +#define PDUMP_ERROR(_err, _msg) ((void)0) + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpInitCommon) +#endif +static INLINE PVRSRV_ERROR +PDumpInitCommon(void) +{ + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpConnectionNotify) +#endif +static INLINE void +PDumpConnectionNotify(void) +{ +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpDisconnectionNotify) +#endif +static INLINE void +PDumpDisconnectionNotify(void) +{ +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpLock) +#endif +static INLINE void +PDumpLock(void) +{ +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpUnlock) +#endif +static INLINE void +PDumpUnlock(void) +{ +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpStopInitPhase) +#endif +static INLINE void +PDumpStopInitPhase(void) +{ +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpSetFrameKM) +#endif +static INLINE PVRSRV_ERROR +PDumpSetFrameKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32Frame) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(ui32Frame); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpGetFrameKM) +#endif +static INLINE PVRSRV_ERROR +PDumpGetFrameKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32* pui32Frame) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(pui32Frame); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpCommentKM) +#endif +static INLINE PVRSRV_ERROR +PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags) +{ + PVR_UNREFERENCED_PARAMETER(pszComment); + PVR_UNREFERENCED_PARAMETER(ui32Flags); + return PVRSRV_OK; +} + + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpSetDefaultCaptureParamsKM) +#endif +static INLINE PVRSRV_ERROR +PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode, + IMG_UINT32 ui32Start, + IMG_UINT32 ui32End, + IMG_UINT32 ui32Interval, + IMG_UINT32 ui32MaxParamFileSize) +{ + PVR_UNREFERENCED_PARAMETER(ui32Mode); + PVR_UNREFERENCED_PARAMETER(ui32Start); + PVR_UNREFERENCED_PARAMETER(ui32End); + PVR_UNREFERENCED_PARAMETER(ui32Interval); + PVR_UNREFERENCED_PARAMETER(ui32MaxParamFileSize); + + return PVRSRV_OK; +} + + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpPanic) +#endif +static INLINE PVRSRV_ERROR +PDumpPanic(IMG_UINT32 ui32PanicNo, + IMG_CHAR* pszPanicMsg, + const IMG_CHAR* pszPPFunc, + IMG_UINT32 ui32PPline) +{ + PVR_UNREFERENCED_PARAMETER(ui32PanicNo); + PVR_UNREFERENCED_PARAMETER(pszPanicMsg); + PVR_UNREFERENCED_PARAMETER(pszPPFunc); + PVR_UNREFERENCED_PARAMETER(ui32PPline); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpCaptureError) +#endif +static INLINE PVRSRV_ERROR +PDumpCaptureError(PVRSRV_ERROR ui32ErrorNo, + IMG_CHAR* pszErrorMsg, + const IMG_CHAR* pszPPFunc, + IMG_UINT32 ui32PPline) +{ + PVR_UNREFERENCED_PARAMETER(ui32ErrorNo); + PVR_UNREFERENCED_PARAMETER(pszErrorMsg); + PVR_UNREFERENCED_PARAMETER(pszPPFunc); + PVR_UNREFERENCED_PARAMETER(ui32PPline); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpIsLastCaptureFrameKM) +#endif +static INLINE PVRSRV_ERROR +PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame) +{ + *pbIsLastCaptureFrame = IMG_FALSE; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpGetStateKM) +#endif +static INLINE PVRSRV_ERROR +PDumpGetStateKM(IMG_UINT64 *ui64State) +{ + *ui64State = 0; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpIsCaptureFrameKM) +#endif +static INLINE PVRSRV_ERROR +PDumpIsCaptureFrameKM(IMG_BOOL *bIsCapturing) +{ + *bIsCapturing = IMG_FALSE; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpGetCurrentBlockKM) +#endif +static INLINE PVRSRV_ERROR +PDumpGetCurrentBlockKM(IMG_UINT32 *pui32BlockNum) +{ + *pui32BlockNum = PDUMP_BLOCKNUM_INVALID; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpForceCaptureStopKM) +#endif +static INLINE PVRSRV_ERROR +PDumpForceCaptureStopKM(void) +{ + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpBitmapKM) +#endif +static INLINE PVRSRV_ERROR +PDumpBitmapKM(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Width, + IMG_UINT32 ui32Height, + IMG_UINT32 ui32StrideInBytes, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32Size, + PDUMP_PIXEL_FORMAT ePixelFormat, + IMG_UINT32 ui32AddrMode, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32FileOffset); + PVR_UNREFERENCED_PARAMETER(ui32Width); + PVR_UNREFERENCED_PARAMETER(ui32Height); + PVR_UNREFERENCED_PARAMETER(ui32StrideInBytes); + PVR_UNREFERENCED_PARAMETER(sDevBaseAddr); + PVR_UNREFERENCED_PARAMETER(ui32MMUContextID); + PVR_UNREFERENCED_PARAMETER(ui32Size); + PVR_UNREFERENCED_PARAMETER(ePixelFormat); + PVR_UNREFERENCED_PARAMETER(ui32AddrMode); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpImageDescriptor) +#endif +static INLINE PVRSRV_ERROR +PDumpImageDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32MMUContextID, + IMG_CHAR *pszSABFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32LogicalWidth, + IMG_UINT32 ui32LogicalHeight, + IMG_UINT32 ui32PhysicalWidth, + IMG_UINT32 ui32PhysicalHeight, + PDUMP_PIXEL_FORMAT ePixFmt, + IMG_MEMLAYOUT eMemLayout, + IMG_FB_COMPRESSION eFBCompression, + const IMG_UINT32 *paui32FBCClearColour, + PDUMP_FBC_SWIZZLE eFBCSwizzle, + IMG_DEV_VIRTADDR sHeader, + IMG_UINT32 ui32HeaderSize, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui32MMUContextID); + PVR_UNREFERENCED_PARAMETER(pszSABFileName); + PVR_UNREFERENCED_PARAMETER(sData); + PVR_UNREFERENCED_PARAMETER(ui32DataSize); + PVR_UNREFERENCED_PARAMETER(ui32LogicalWidth); + PVR_UNREFERENCED_PARAMETER(ui32LogicalHeight); + PVR_UNREFERENCED_PARAMETER(ui32PhysicalWidth); + PVR_UNREFERENCED_PARAMETER(ui32PhysicalHeight); + PVR_UNREFERENCED_PARAMETER(ePixFmt); + PVR_UNREFERENCED_PARAMETER(eMemLayout); + PVR_UNREFERENCED_PARAMETER(eFBCompression); + PVR_UNREFERENCED_PARAMETER(paui32FBCClearColour); + PVR_UNREFERENCED_PARAMETER(eFBCSwizzle); + PVR_UNREFERENCED_PARAMETER(sHeader); + PVR_UNREFERENCED_PARAMETER(ui32HeaderSize); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpDataDescriptor) +#endif +static INLINE PVRSRV_ERROR +PDumpDataDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32MMUContextID, + IMG_CHAR *pszSABFileName, + IMG_DEV_VIRTADDR sData, + IMG_UINT32 ui32DataSize, + IMG_UINT32 ui32ElementType, + IMG_UINT32 ui32ElementCount, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui32MMUContextID); + PVR_UNREFERENCED_PARAMETER(pszSABFileName); + PVR_UNREFERENCED_PARAMETER(sData); + PVR_UNREFERENCED_PARAMETER(ui32DataSize); + PVR_UNREFERENCED_PARAMETER(ui32ElementType); + PVR_UNREFERENCED_PARAMETER(ui32ElementCount); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpRegisterConnection) +#endif +static INLINE PVRSRV_ERROR +PDumpRegisterConnection(void *hSyncPrivData, + PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks, + PDUMP_CONNECTION_DATA **ppsPDumpConnectionData) +{ + PVR_UNREFERENCED_PARAMETER(hSyncPrivData); + PVR_UNREFERENCED_PARAMETER(pfnPDumpSyncBlocks); + PVR_UNREFERENCED_PARAMETER(ppsPDumpConnectionData); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpUnregisterConnection) +#endif +static INLINE void +PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData) +{ + PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpRegisterTransitionCallback) +#endif +static INLINE PVRSRV_ERROR +PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData, + PFN_PDUMP_TRANSITION pfnCallback, + void *hPrivData, + void *pvDevice, + void **ppvHandle) +{ + PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData); + PVR_UNREFERENCED_PARAMETER(pfnCallback); + PVR_UNREFERENCED_PARAMETER(hPrivData); + PVR_UNREFERENCED_PARAMETER(pvDevice); + PVR_UNREFERENCED_PARAMETER(ppvHandle); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpUnregisterTransitionCallback) +#endif +static INLINE void +PDumpUnregisterTransitionCallback(void *pvHandle) +{ + PVR_UNREFERENCED_PARAMETER(pvHandle); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpRegisterTransitionCallback) +#endif +static INLINE PVRSRV_ERROR +PDumpRegisterTransitionCallbackFenceSync(void *hPrivData, + PFN_PDUMP_TRANSITION_FENCE_SYNC pfnCallback, + void **ppvHandle) +{ + PVR_UNREFERENCED_PARAMETER(pfnCallback); + PVR_UNREFERENCED_PARAMETER(hPrivData); + PVR_UNREFERENCED_PARAMETER(ppvHandle); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpUnregisterTransitionCallbackFenceSync) +#endif +static INLINE void +PDumpUnregisterTransitionCallbackFenceSync(void *pvHandle) +{ + PVR_UNREFERENCED_PARAMETER(pvHandle); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpTransition) +#endif +static INLINE PVRSRV_ERROR +PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, + PDUMP_TRANSITION_EVENT eEvent, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData); + PVR_UNREFERENCED_PARAMETER(eEvent); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +#if defined(__linux__) || defined(GCC_IA32) || defined(GCC_ARM) || defined(__QNXNTO__) || defined(INTEGRITY_OS) + #define PDUMPINIT PDumpInitCommon + #define PDUMPDEINIT(args...) + #define PDUMPREG32(args...) + #define PDUMPREG64(args...) + #define PDUMPREGREAD32(args...) + #define PDUMPREGREAD64(args...) + #define PDUMPCOMMENT(args...) + #define PDUMPREGPOL(args...) + #define PDUMPPDREG(args...) + #define PDUMPPDREGWITHFLAGS(args...) + #define PDUMPSYNC(args...) + #define PDUMPCOPYTOMEM(args...) + #define PDUMPWRITE(args...) + #define PDUMPREGBASEDCBP(args...) + #define PDUMPCOMMENTWITHFLAGS(args...) + #define PDUMPENDINITPHASE(args...) + #define PDUMPIDLWITHFLAGS(args...) + #define PDUMPIDL(args...) + #define PDUMPPOWCMDSTART(args...) + #define PDUMPPOWCMDEND(args...) + #define PDUMP_LOCK(args...) + #define PDUMP_UNLOCK(args...) + #define PDUMPIF(args...) + #define PDUMPFI(args...) + #define PDUMPCOM(args...) +#else + #error Compiler not specified +#endif + +#endif /* PDUMP */ + +#endif /* PDUMP_KM_H */ + +/****************************************************************************** + End of file (pdump_km.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/pdump_mmu.h b/drivers/gpu/drm/phytium/octopus/pdump_mmu.h new file mode 100644 index 000000000000..5e29208f8857 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pdump_mmu.h @@ -0,0 +1,171 @@ +/**************************************************************************/ /*! +@File +@Title Common MMU Management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements basic low level control of MMU. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef SRVKM_PDUMP_MMU_H +#define SRVKM_PDUMP_MMU_H + +/* services/server/include/ */ +#include "pdump_symbolicaddr.h" +/* include/ */ +#include "img_types.h" +#include "pvrsrv_error.h" + +#include "mmu_common.h" + +/* + * PDUMP MMU attributes + */ +typedef struct _PDUMP_MMU_ATTRIB_DEVICE_ +{ + /* Per-Device Pdump attribs */ + + /*!< Pdump memory bank name */ + IMG_CHAR *pszPDumpMemDevName; + + /*!< Pdump register bank name */ + IMG_CHAR *pszPDumpRegDevName; + +} PDUMP_MMU_ATTRIB_DEVICE; + +typedef struct _PDUMP_MMU_ATTRIB_CONTEXT_ +{ + IMG_UINT32 ui32Dummy; +} PDUMP_MMU_ATTRIB_CONTEXT; + +typedef struct _PDUMP_MMU_ATTRIB_HEAP_ +{ + /* data page info */ + IMG_UINT32 ui32DataPageMask; +} PDUMP_MMU_ATTRIB_HEAP; + +typedef struct _PDUMP_MMU_ATTRIB_ +{ + struct _PDUMP_MMU_ATTRIB_DEVICE_ sDevice; + struct _PDUMP_MMU_ATTRIB_CONTEXT_ sContext; + struct _PDUMP_MMU_ATTRIB_HEAP_ sHeap; +} PDUMP_MMU_ATTRIB; + +#if defined(PDUMP) +PVRSRV_ERROR +PDumpMMUMalloc(const IMG_CHAR *pszPDumpDevName, + MMU_LEVEL eMMULevel, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32Align, + PDUMP_MMU_TYPE eMMUType); + +PVRSRV_ERROR +PDumpMMUFree(const IMG_CHAR *pszPDumpDevName, + MMU_LEVEL eMMULevel, + IMG_DEV_PHYADDR *psDevPAddr, + PDUMP_MMU_TYPE eMMUType); + +PVRSRV_ERROR +PDumpPTBaseObjectToMem64(const IMG_CHAR *pszPDumpDevName, + PMR *psPMRDest, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource, + IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest, + IMG_UINT32 ui32Flags, + MMU_LEVEL eMMULevel, + IMG_UINT64 ui64PxSymAddr, + IMG_UINT64 ui64PxOffset); + +PVRSRV_ERROR +PDumpMMUDumpPxEntries(MMU_LEVEL eMMULevel, + const IMG_CHAR *pszPDumpDevName, + void *pvPxMem, + IMG_DEV_PHYADDR sPxDevPAddr, + IMG_UINT32 uiFirstEntry, + IMG_UINT32 uiNumEntries, + const IMG_CHAR *pszMemspaceName, + const IMG_CHAR *pszSymbolicAddr, + IMG_UINT64 uiSymbolicAddrOffset, + IMG_UINT32 uiBytesPerEntry, + IMG_UINT32 uiLog2Align, + IMG_UINT32 uiAddrShift, + IMG_UINT64 uiAddrMask, + IMG_UINT64 uiPxEProtMask, + IMG_UINT64 uiDataValidEnable, + IMG_UINT32 ui32Flags, + PDUMP_MMU_TYPE eMMUType); + +PVRSRV_ERROR +PDumpMMUAllocMMUContext(const IMG_CHAR *pszPDumpMemSpaceName, + IMG_DEV_PHYADDR sPCDevPAddr, + PDUMP_MMU_TYPE eMMUType, + IMG_UINT32 *pui32MMUContextID, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR +PDumpMMUFreeMMUContext(const IMG_CHAR *pszPDumpMemSpaceName, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR +PDumpMMUSAB(const IMG_CHAR *pszPDumpMemNamespace, + IMG_UINT32 uiPDumpMMUCtx, + IMG_DEV_VIRTADDR sDevAddrStart, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 uiFileOffset, + IMG_UINT32 ui32PDumpFlags); + +#define PDUMP_MMU_ALLOC_MMUCONTEXT(pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID, ui32PDumpFlags) \ + PDumpMMUAllocMMUContext(pszPDumpMemDevName, \ + sPCDevPAddr, \ + eMMUType, \ + puiPDumpCtxID, \ + ui32PDumpFlags) + +#define PDUMP_MMU_FREE_MMUCONTEXT(pszPDumpMemDevName, uiPDumpCtxID, ui32PDumpFlags) \ + PDumpMMUFreeMMUContext(pszPDumpMemDevName, uiPDumpCtxID, ui32PDumpFlags) +#else /* PDUMP */ + +#define PDUMP_MMU_ALLOC_MMUCONTEXT(pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID, ui32PDumpFlags) \ + ((void)0) +#define PDUMP_MMU_FREE_MMUCONTEXT(pszPDumpMemDevName, uiPDumpCtxID, ui32PDumpFlags) \ + ((void)0) + +#endif /* PDUMP */ + +#endif diff --git a/drivers/gpu/drm/phytium/octopus/pdump_physmem.h b/drivers/gpu/drm/phytium/octopus/pdump_physmem.h new file mode 100644 index 000000000000..72b0c895603b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pdump_physmem.h @@ -0,0 +1,243 @@ +/**************************************************************************/ /*! +@File +@Title pdump functions to assist with physmem allocations +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements basic low level control of MMU. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef SRVSRV_PDUMP_PHYSMEM_H +#define SRVSRV_PDUMP_PHYSMEM_H + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "pmr.h" + +#define PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH 40 +#define PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH 60 +#define PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH (PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH) + +typedef struct _PDUMP_PHYSMEM_INFO_T_ PDUMP_PHYSMEM_INFO_T; + +#if defined(PDUMP) +PVRSRV_ERROR +PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle, + IMG_CHAR **ppszSymbolicAddress); + +PVRSRV_ERROR +PDumpMalloc(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicAddress, + IMG_UINT64 ui64Size, + /* alignment is alignment of start of buffer _and_ + minimum contiguity - i.e. smallest allowable + page-size. */ + IMG_DEVMEM_ALIGN_T uiAlign, + IMG_BOOL bInitialise, + IMG_UINT32 ui32InitValue, + IMG_HANDLE *phHandlePtr, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR +PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle); + +void +PDumpMakeStringValid(IMG_CHAR *pszString, + IMG_UINT32 ui32StrLen); +#else /* PDUMP */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpGetSymbolicAddr) +#endif +static INLINE PVRSRV_ERROR +PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle, + IMG_CHAR **ppszSymbolicAddress) +{ + PVR_UNREFERENCED_PARAMETER(hPhysmemPDumpHandle); + PVR_UNREFERENCED_PARAMETER(ppszSymbolicAddress); + return PVRSRV_OK; +} + +static INLINE PVRSRV_ERROR +PDumpMalloc(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicAddress, + IMG_UINT64 ui64Size, + IMG_DEVMEM_ALIGN_T uiAlign, + IMG_BOOL bInitialise, + IMG_UINT32 ui32InitValue, + IMG_HANDLE *phHandlePtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(pszDevSpace); + PVR_UNREFERENCED_PARAMETER(pszSymbolicAddress); + PVR_UNREFERENCED_PARAMETER(ui64Size); + PVR_UNREFERENCED_PARAMETER(uiAlign); + PVR_UNREFERENCED_PARAMETER(bInitialise); + PVR_UNREFERENCED_PARAMETER(ui32InitValue); + PVR_UNREFERENCED_PARAMETER(phHandlePtr); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); + return PVRSRV_OK; +} + +static INLINE PVRSRV_ERROR +PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle) +{ + PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle); + return PVRSRV_OK; +} +#endif /* PDUMP */ + +#define PMR_DEFAULT_PREFIX "PMR" +#define PMR_SYMBOLICADDR_FMTSPEC "%s%"IMG_UINT64_FMTSPEC"_%"IMG_UINT64_FMTSPEC"_%s" +#define PMR_MEMSPACE_FMTSPEC "%s" +#define PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC "CC_%s" + +#if defined(PDUMP) +#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \ + PDumpMalloc(pszPDumpMemDevName, PMR_OSALLOCPAGES_PREFIX, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr, PDUMP_NONE) +#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \ + PDumpFree(hHandle) +#else +#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \ + ((void)(*phHandlePtr=NULL)) +#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \ + ((void)(0)) +#endif /* PDUMP */ + +PVRSRV_ERROR +PDumpPMRWRW32(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PDumpPMRWRW32InternalVarToMem(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + const IMG_CHAR *pszInternalVar, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PDumpPMRRDW32MemToInternalVar(const IMG_CHAR *pszInternalVar, + const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PDumpPMRWRW64(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT64 ui64Value, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PDumpPMRWRW64InternalVarToMem(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + const IMG_CHAR *pszInternalVar, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PDumpPMRRDW64MemToInternalVar(const IMG_CHAR *pszInternalVar, + const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PDumpPMRLDB(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFilename, + IMG_UINT32 uiFileOffset, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PDumpPMRSAB(const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + const IMG_CHAR *pszFileName, + IMG_UINT32 uiFileOffset); + +/* + PDumpPMRPOL() + + Emits a POL to the PDUMP. +*/ +PVRSRV_ERROR +PDumpPMRPOL(const IMG_CHAR *pszMempaceName, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 uiCount, + IMG_UINT32 uiDelay, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PDumpPMRCBP(const IMG_CHAR *pszMemspaceName, + const IMG_CHAR *pszSymbolicName, + IMG_DEVMEM_OFFSET_T uiReadOffset, + IMG_DEVMEM_OFFSET_T uiWriteOffset, + IMG_DEVMEM_SIZE_T uiPacketSize, + IMG_DEVMEM_SIZE_T uiBufferSize); + +/* + * PDumpWriteParameterBlob() + * + * Writes a binary blob to the pdump param stream containing the current + * contents of the memory, and returns the filename and offset of where + * that blob is located (for use in a subsequent LDB, for example). + * + * Caller to provide buffer to receive filename, and declare the size of + * that buffer. + */ +PVRSRV_ERROR +PDumpWriteParameterBlob(IMG_UINT8 *pcBuffer, + size_t uiNumBytes, + PDUMP_FLAGS_T uiPDumpFlags, + IMG_CHAR *pszFilenameOut, + size_t uiFilenameBufSz, + PDUMP_FILEOFFSET_T *puiOffsetOut); + +#endif /* #ifndef SRVSRV_PDUMP_PHYSMEM_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pdump_symbolicaddr.h b/drivers/gpu/drm/phytium/octopus/pdump_symbolicaddr.h new file mode 100644 index 000000000000..62a3fee7a710 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pdump_symbolicaddr.h @@ -0,0 +1,55 @@ +/**************************************************************************/ /*! +@File +@Title Abstraction of PDUMP symbolic address derivation +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Allows pdump functions to derive symbolic addresses on-the-fly +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef SRVKM_PDUMP_SYMBOLICADDR_H +#define SRVKM_PDUMP_SYMBOLICADDR_H + +#include "img_types.h" + +#include "pvrsrv_error.h" + +/* pdump symbolic addresses are generated on-the-fly with a callback */ + +typedef PVRSRV_ERROR (*PVRSRV_SYMADDRFUNCPTR)(IMG_HANDLE hPriv, IMG_UINT32 uiOffset, IMG_CHAR *pszSymbolicAddr, IMG_UINT32 ui32SymbolicAddrLen, IMG_UINT32 *pui32NewOffset); + +#endif /* #ifndef SRVKM_PDUMP_SYMBOLICADDR_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pdumpdefs.h b/drivers/gpu/drm/phytium/octopus/pdumpdefs.h new file mode 100644 index 000000000000..1014b5fd913d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pdumpdefs.h @@ -0,0 +1,246 @@ +/*************************************************************************/ /*! +@File +@Title PDUMP definitions header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description PDUMP definitions header +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PDUMPDEFS_H +#define PDUMPDEFS_H + +/*! PDump Pixel Format Enumeration */ +typedef enum _PDUMP_PIXEL_FORMAT_ +{ + PVRSRV_PDUMP_PIXEL_FORMAT_UNSUPPORTED = 0, + PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1, + PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2, + PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3, + PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4, + PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5, + PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6, + PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7, + PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9, +/* PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10, */ + PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11, + PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12, + PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13, + PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15, + PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16, + PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17, + PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18, + PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20, + PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25, + PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26, + PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27, + PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28, + PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29, + PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30, + PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31, + PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32, + PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33, + PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34, + PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35, + PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36, + PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37, + PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38, + PVRSRV_PDUMP_PIXEL_FORMAT_RGBA8888 = 39, + PVRSRV_PDUMP_PIXEL_FORMAT_ABGR4444 = 40, + PVRSRV_PDUMP_PIXEL_FORMAT_RGBA4444 = 41, + PVRSRV_PDUMP_PIXEL_FORMAT_BGRA4444 = 42, + PVRSRV_PDUMP_PIXEL_FORMAT_ABGR1555 = 43, + PVRSRV_PDUMP_PIXEL_FORMAT_RGBA5551 = 44, + PVRSRV_PDUMP_PIXEL_FORMAT_BGRA5551 = 45, + PVRSRV_PDUMP_PIXEL_FORMAT_BGR565 = 46, + PVRSRV_PDUMP_PIXEL_FORMAT_A8 = 47, + PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16F16 = 49, + PVRSRV_PDUMP_PIXEL_FORMAT_A4 = 50, + PVRSRV_PDUMP_PIXEL_FORMAT_ARGB2101010 = 51, + PVRSRV_PDUMP_PIXEL_FORMAT_RSGSBS888 = 52, + PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32F32 = 53, + PVRSRV_PDUMP_PIXEL_FORMAT_F16F16 = 54, + PVRSRV_PDUMP_PIXEL_FORMAT_F32F32 = 55, + PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16 = 56, + PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32 = 57, + PVRSRV_PDUMP_PIXEL_FORMAT_U8 = 58, + PVRSRV_PDUMP_PIXEL_FORMAT_U8U8 = 59, + PVRSRV_PDUMP_PIXEL_FORMAT_U16 = 60, + PVRSRV_PDUMP_PIXEL_FORMAT_U16U16 = 61, + PVRSRV_PDUMP_PIXEL_FORMAT_U16U16U16U16 = 62, + PVRSRV_PDUMP_PIXEL_FORMAT_U32 = 63, + PVRSRV_PDUMP_PIXEL_FORMAT_U32U32 = 64, + PVRSRV_PDUMP_PIXEL_FORMAT_U32U32U32U32 = 65, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV32 = 66, + + PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff + +} PDUMP_PIXEL_FORMAT; + +typedef enum _PDUMP_FBC_SWIZZLE_ +{ + PVRSRV_PDUMP_FBC_SWIZZLE_ARGB = 0x0, + PVRSRV_PDUMP_FBC_SWIZZLE_ARBG = 0x1, + PVRSRV_PDUMP_FBC_SWIZZLE_AGRB = 0x2, + PVRSRV_PDUMP_FBC_SWIZZLE_AGBR = 0x3, + PVRSRV_PDUMP_FBC_SWIZZLE_ABGR = 0x4, + PVRSRV_PDUMP_FBC_SWIZZLE_ABRG = 0x5, + PVRSRV_PDUMP_FBC_SWIZZLE_RGBA = 0x8, + PVRSRV_PDUMP_FBC_SWIZZLE_RBGA = 0x9, + PVRSRV_PDUMP_FBC_SWIZZLE_GRBA = 0xA, + PVRSRV_PDUMP_FBC_SWIZZLE_GBRA = 0xB, + PVRSRV_PDUMP_FBC_SWIZZLE_BGRA = 0xC, + PVRSRV_PDUMP_FBC_SWIZZLE_BRGA = 0xD, +} PDUMP_FBC_SWIZZLE; + +/*! PDump addrmode */ +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT 0 +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_MASK 0x000000FF + +#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT 8 +#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_NEGATIVE (1U << PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT) + +#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_SHIFT 12 +#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_MASK 0x000FF000 + +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT 20 +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_MASK 0x00F00000 + +#define PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT 24 +#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT 25 + +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT 28 +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_MASK 0xF0000000 + +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_STRIDE (0U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE1 (1U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE2 (2U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE3 (3U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE4 (4U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE5 (5U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE6 (6U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE7 (7U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_TWIDDLED (9U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_PAGETILED (11U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_ZTWIDDLED (12U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) + +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_NONE (0U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_DIRECT (1U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_DIRECT (2U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_32X2_DIRECT (3U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT (4U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT (5U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT_4TILE (6U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT_4TILE (7U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) + +#define PVRSRV_PDUMP_ADDRMODE_FBC_DECOR (1U << PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT) + +#define PVRSRV_PDUMP_ADDRMODE_FBC_LOSSY (1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT) + +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_BASE (1U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_ENHANCED (2U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V2 (3U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_SURFACE (4U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_RESOURCE (5U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_1_SURFACE (6U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_1_RESOURCE (7U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) +#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V4 (8U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) + +/*! PDump Poll Operator */ +typedef enum _PDUMP_POLL_OPERATOR +{ + PDUMP_POLL_OPERATOR_EQUAL = 0, + PDUMP_POLL_OPERATOR_LESS = 1, + PDUMP_POLL_OPERATOR_LESSEQUAL = 2, + PDUMP_POLL_OPERATOR_GREATER = 3, + PDUMP_POLL_OPERATOR_GREATEREQUAL = 4, + PDUMP_POLL_OPERATOR_NOTEQUAL = 5, +} PDUMP_POLL_OPERATOR; + + +#define PVRSRV_PDUMP_MAX_FILENAME_SIZE 75 /*!< Max length of a pdump log file name */ +#define PVRSRV_PDUMP_MAX_COMMENT_SIZE 350 /*!< Max length of a pdump comment */ + +/*! + PDump MMU type + (Maps to values listed in "PhytiumVR Tools.Pdump2 Script Functions.doc" Sec 2.13) +*/ +typedef enum +{ + PDUMP_MMU_TYPE_4KPAGE_32BIT_STDTILE = 1, + PDUMP_MMU_TYPE_VARPAGE_32BIT_STDTILE = 2, + PDUMP_MMU_TYPE_4KPAGE_36BIT_EXTTILE = 3, + PDUMP_MMU_TYPE_4KPAGE_32BIT_EXTTILE = 4, + PDUMP_MMU_TYPE_4KPAGE_36BIT_STDTILE = 5, + PDUMP_MMU_TYPE_VARPAGE_40BIT = 6, + PDUMP_MMU_TYPE_VIDEO_40BIT_STDTILE = 7, + PDUMP_MMU_TYPE_VIDEO_40BIT_EXTTILE = 8, + PDUMP_MMU_TYPE_MIPS_MICROAPTIV = 9, + PDUMP_MMU_TYPE_LAST +} PDUMP_MMU_TYPE; + +/*! + PDump states + These values are used by the bridge call PVRSRVPDumpGetState +*/ +#define PDUMP_STATE_CAPTURE_FRAME (1) /*!< Flag represents the PDump being in capture range or not*/ +#define PDUMP_STATE_CONNECTED (2) /*!< Flag represents the PDump Client App being connected on not */ +#define PDUMP_STATE_SUSPENDED (4) /*!< Flag represents the PDump being suspended or not */ + +/*! + PDump Capture modes + Values used with calls to PVRSRVPDumpSetDefaultCaptureParams +*/ +#define PDUMP_CAPMODE_UNSET 0x00000000UL +#define PDUMP_CAPMODE_FRAMED 0x00000001UL +#define PDUMP_CAPMODE_CONTINUOUS 0x00000002UL +#define PDUMP_CAPMODE_BLOCKED 0x00000003UL + +#define PDUMP_CAPMODE_MAX PDUMP_CAPMODE_BLOCKED + +#endif /* PDUMPDEFS_H */ + +/***************************************************************************** + End of file (pdumpdefs.h) +*****************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/pdumpdesc.h b/drivers/gpu/drm/phytium/octopus/pdumpdesc.h new file mode 100644 index 000000000000..0eb636862ae5 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pdumpdesc.h @@ -0,0 +1,211 @@ +/*************************************************************************/ /*! +@File pdumpdesc.h +@Title PDump Descriptor format +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Describes PDump descriptors that may be passed to the + extraction routines (SAB). +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(PDUMPDESC_H) +#define PDUMPDESC_H + +#include "pdumpdefs.h" + +/* + * Common fields + */ +#define HEADER_WORD0_TYPE_SHIFT (0) +#define HEADER_WORD0_TYPE_CLRMSK (0xFFFFFFFFU) + +#define HEADER_WORD1_SIZE_SHIFT (0) +#define HEADER_WORD1_SIZE_CLRMSK (0x0000FFFFU) +#define HEADER_WORD1_VERSION_SHIFT (16) +#define HEADER_WORD1_VERSION_CLRMSK (0xFFFF0000U) + +#define HEADER_WORD2_DATA_SIZE_SHIFT (0) +#define HEADER_WORD2_DATA_SIZE_CLRMSK (0xFFFFFFFFU) + + +/* + * The image type descriptor + */ + +/* + * Header type (IMGBv2) - 'IMGB' in hex + VERSION 2 + * Header size - 64 bytes + */ +#define IMAGE_HEADER_TYPE (0x42474D49) +#define IMAGE_HEADER_SIZE (64) +#define IMAGE_HEADER_VERSION (2) + +/* + * Image type-specific fields + */ +#define IMAGE_HEADER_WORD3_LOGICAL_WIDTH_SHIFT (0) +#define IMAGE_HEADER_WORD3_LOGICAL_WIDTH_CLRMSK (0xFFFFFFFFU) + +#define IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_SHIFT (0) +#define IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_CLRMSK (0xFFFFFFFFU) + +#define IMAGE_HEADER_WORD5_FORMAT_SHIFT (0) +#define IMAGE_HEADER_WORD5_FORMAT_CLRMSK (0xFFFFFFFFU) + +#define IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_SHIFT (0) +#define IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_CLRMSK (0xFFFFFFFFU) + +#define IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_SHIFT (0) +#define IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_CLRMSK (0xFFFFFFFFU) + +#define IMAGE_HEADER_WORD8_TWIDDLING_SHIFT (0) +#define IMAGE_HEADER_WORD8_TWIDDLING_CLRMSK (0x000000FFU) +#define IMAGE_HEADER_WORD8_TWIDDLING_STRIDED (0 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT) +#define IMAGE_HEADER_WORD8_TWIDDLING_NTWIDDLE (9 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT) +#define IMAGE_HEADER_WORD8_TWIDDLING_ZTWIDDLE (12 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT) + + +#define IMAGE_HEADER_WORD8_STRIDE_SHIFT (8) +#define IMAGE_HEADER_WORD8_STRIDE_CLRMSK (0x0000FF00U) +#define IMAGE_HEADER_WORD8_STRIDE_POSITIVE (0 << IMAGE_HEADER_WORD8_STRIDE_SHIFT) +#define IMAGE_HEADER_WORD8_STRIDE_NEGATIVE (1 << IMAGE_HEADER_WORD8_STRIDE_SHIFT) + +#define IMAGE_HEADER_WORD8_BIFTYPE_SHIFT (16) +#define IMAGE_HEADER_WORD8_BIFTYPE_CLRMSK (0x00FF0000U) +#define IMAGE_HEADER_WORD8_BIFTYPE_NONE (0 << IMAGE_HEADER_WORD8_BIFTYPE_SHIFT) + +#define IMAGE_HEADER_WORD8_FBCTYPE_SHIFT (24) +#define IMAGE_HEADER_WORD8_FBCTYPE_CLRMSK (0xFF000000U) +#define IMAGE_HEADER_WORD8_FBCTYPE_8X8 (1 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT) +#define IMAGE_HEADER_WORD8_FBCTYPE_16x4 (2 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT) +#define IMAGE_HEADER_WORD8_FBCTYPE_32x2 (3 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT) + +#define IMAGE_HEADER_WORD9_FBCDECOR_SHIFT (0) +#define IMAGE_HEADER_WORD9_FBCDECOR_CLRMSK (0x000000FFU) +#define IMAGE_HEADER_WORD9_FBCDECOR_ENABLE (1 << IMAGE_HEADER_WORD9_FBCDECOR_SHIFT) + +/* Align with fbcomp_export_c.h in pdump_tools branch */ +#define IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT (8) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_CLRMSK (0x0000FF00U) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_SAME_AS_GPU (0 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_BASE (1 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_TWIDDLED_EN (2 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* TWIDDLED_ENHANCED */ +#define IMAGE_HEADER_WORD9_FBCCOMPAT_V2 (3 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_0_LAYOUT1 (4 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_0_LAYOUT2 (5 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* V30_WITH_HEADER_REMAP */ +#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_1_LAYOUT1 (6 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_1_LAYOUT2 (7 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* V31_WITH_HEADER_REMAP */ +#define IMAGE_HEADER_WORD9_FBCCOMPAT_V4 (8 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_V4_PLUS (9 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) +#define IMAGE_HEADER_WORD9_FBCCOMPAT_TFBC (10 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) + +#define IMAGE_HEADER_WORD9_LOSSY_SHIFT (16) +#define IMAGE_HEADER_WORD9_LOSSY_CLRMSK (0x00FF0000U) +/* Non-TFBC */ +#define IMAGE_HEADER_WORD9_LOSSY_ON (1 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) + +/* TFBC */ +#define IMAGE_HEADER_WORD9_LOSSY_75 (1 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) +#define IMAGE_HEADER_WORD9_LOSSY_50 (2 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) +#define IMAGE_HEADER_WORD9_LOSSY_25 (3 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) +#define IMAGE_HEADER_WORD9_LOSSY_OFF (0 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) + +#define IMAGE_HEADER_WORD9_SWIZZLE_SHIFT (24) +#define IMAGE_HEADER_WORD9_SWIZZLE_CLRMSK (0xFF000000U) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ARGB (0x0 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ARBG (0x1 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_AGRB (0x2 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_AGBR (0x3 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ABGR (0x4 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ABRG (0x5 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_RGBA (0x8 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_RBGA (0x9 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_GRBA (0xA << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_GBRA (0xB << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_BGRA (0xC << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) +#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_BRGA (0xD << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) + +#define IMAGE_HEADER_WORD10_FBCCLEAR_CH0_SHIFT (0) +#define IMAGE_HEADER_WORD10_FBCCLEAR_CH0_CLRMSK (0xFFFFFFFFU) + +#define IMAGE_HEADER_WORD11_FBCCLEAR_CH1_SHIFT (0) +#define IMAGE_HEADER_WORD11_FBCCLEAR_CH1_CLRMSK (0xFFFFFFFFU) + +#define IMAGE_HEADER_WORD12_FBCCLEAR_CH2_SHIFT (0) +#define IMAGE_HEADER_WORD12_FBCCLEAR_CH2_CLRMSK (0xFFFFFFFFU) + +#define IMAGE_HEADER_WORD13_FBCCLEAR_CH3_SHIFT (0) +#define IMAGE_HEADER_WORD13_FBCCLEAR_CH3_CLRMSK (0xFFFFFFFFU) + +/* IMAGE_HEADER_WORD14_RESERVED1 */ + +/* IMAGE_HEADER_WORD15_RESERVED2 */ + +/* + * The data type descriptor + */ + +/* + * Header type (IMGCv1) - 'IMGC' in hex + VERSION 0 + * Header size - 20 bytes (5 x 32 bit WORDS) + */ +#define DATA_HEADER_TYPE (0x43474D49) +#define DATA_HEADER_SIZE (20) +#define DATA_HEADER_VERSION (0) + +/* + * The IBIN type descriptor + */ + +/* + * Header type (IBIN) - 'IBIN' in hex + VERSION 0 + * Header size - 12 bytes (3 x 32 bit WORDS) + */ +#define IBIN_HEADER_TYPE (0x4e494249) +#define IBIN_HEADER_SIZE (12) +#define IBIN_HEADER_VERSION (0) + +/* + * Data type-specific fields + */ +#define DATA_HEADER_WORD3_ELEMENT_TYPE_SHIFT (0) +#define DATA_HEADER_WORD3_ELEMENT_TYPE_CLRMSK (0xFFFFFFFFU) + +#define DATA_HEADER_WORD4_ELEMENT_COUNT_SHIFT (0) +#define DATA_HEADER_WORD4_ELEMENT_COUNT_CLRMSK (0xFFFFFFFFU) + +#endif /* PDUMPDESC_H */ diff --git a/drivers/gpu/drm/phytium/octopus/physheap.c b/drivers/gpu/drm/phytium/octopus/physheap.c new file mode 100644 index 000000000000..77db3dcd191d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/physheap.c @@ -0,0 +1,562 @@ +/*************************************************************************/ /*! +@File physheap.c +@Title Physical heap management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Management functions for the physical heap(s). A heap contains + all the information required by services when using memory from + that heap (such as CPU <> Device physical address translation). + A system must register one heap but can have more then one which + is why a heap must register with a (system) unique ID. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ +#include "img_types.h" +#include "img_defs.h" +#include "physheap.h" +#include "allocmem.h" +#include "pvr_debug.h" +#include "osfunc.h" +#include "pvrsrv.h" +#include "physmem.h" +#include "physmem_hostmem.h" +#include "physmem_lma.h" +#include "physmem_osmem.h" + +struct _PHYS_HEAP_ +{ + /*! The type of this heap */ + PHYS_HEAP_TYPE eType; + /* Config flags */ + PHYS_HEAP_USAGE_FLAGS ui32UsageFlags; + + /*! Pointer to device node struct */ + PPVRSRV_DEVICE_NODE psDevNode; + /*! PDump name of this physical memory heap */ + IMG_CHAR *pszPDumpMemspaceName; + /*! Private data for the translate routines */ + IMG_HANDLE hPrivData; + /*! Function callbacks */ + PHYS_HEAP_FUNCTIONS *psMemFuncs; + + /*! Refcount */ + IMG_UINT32 ui32RefCount; + + /*! Implementation specific */ + PHEAP_IMPL_DATA pvImplData; + PHEAP_IMPL_FUNCS *psImplFuncs; + + /*! Pointer to next physical heap */ + struct _PHYS_HEAP_ *psNext; +}; + +static PHYS_HEAP *g_psPhysHeapList; +static POS_LOCK g_hPhysHeapLock; + +#if defined(REFCOUNT_DEBUG) +#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...) \ + PVRSRVDebugPrintf(PVR_DBG_WARNING, \ + __FILE__, \ + __LINE__, \ + fmt, \ + __VA_ARGS__) +#else +#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...) +#endif + + + +typedef struct PHYS_HEAP_PROPERTIES_TAG +{ + PVRSRV_PHYS_HEAP eFallbackHeap; + IMG_BOOL bPVRLayerAcquire; + IMG_BOOL bUserModeAlloc; +} PHYS_HEAP_PROPERTIES; + +static PHYS_HEAP_PROPERTIES gasHeapProperties[PVRSRV_PHYS_HEAP_LAST] = +{ + /* eFallbackHeap, bPVRLayerAcquire, bUserModeAlloc */ + { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_TRUE, IMG_TRUE }, /* GPU_LOCAL */ + { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_TRUE, IMG_TRUE }, /* CPU_LOCAL */ + { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_FALSE, IMG_FALSE }, /* FW_MAIN */ + { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_TRUE, IMG_FALSE }, /* EXTERNAL */ + { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_TRUE, IMG_FALSE }, /* GPU_PRIVATE */ + { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_TRUE, IMG_FALSE }, /* GPU_COHERENT */ + { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_TRUE, IMG_TRUE }, /* GPU_SECURE */ + { PVRSRV_PHYS_HEAP_FW_MAIN, IMG_FALSE, IMG_FALSE }, /* FW_CONFIG */ + { PVRSRV_PHYS_HEAP_FW_MAIN, IMG_FALSE, IMG_FALSE }, /* FW_CODE */ + { PVRSRV_PHYS_HEAP_FW_MAIN, IMG_FALSE, IMG_FALSE }, /* FW_DATA */ + { PVRSRV_PHYS_HEAP_FW_PREMAP0, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP0 */ + { PVRSRV_PHYS_HEAP_FW_PREMAP1, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP1 */ + { PVRSRV_PHYS_HEAP_FW_PREMAP2, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP2 */ + { PVRSRV_PHYS_HEAP_FW_PREMAP3, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP3 */ + { PVRSRV_PHYS_HEAP_FW_PREMAP4, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP4 */ + { PVRSRV_PHYS_HEAP_FW_PREMAP5, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP5 */ + { PVRSRV_PHYS_HEAP_FW_PREMAP6, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP6 */ + { PVRSRV_PHYS_HEAP_FW_PREMAP7, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP7 */ +}; + +static PHEAP_IMPL_FUNCS _sPHEAPImplFuncs = +{ + .pfnDestroyData = NULL, + .pfnCreatePMR = PhysmemNewOSRamBackedPMR, +}; + +PVRSRV_ERROR +PhysHeapCreateHeapFromConfig(PVRSRV_DEVICE_NODE *psDevNode, + PHYS_HEAP_CONFIG *psConfig, + PHYS_HEAP **ppsPhysHeap) +{ + PVRSRV_ERROR eResult; + + if (psConfig->eType == PHYS_HEAP_TYPE_UMA +#if defined(SUPPORT_WRAP_EXTMEMOBJECT) + || psConfig->eType == PHYS_HEAP_TYPE_WRAP +#endif + ) + { + eResult = PhysHeapCreate(psDevNode, psConfig, NULL, + &_sPHEAPImplFuncs, ppsPhysHeap); + } + else if (psConfig->eType == PHYS_HEAP_TYPE_LMA || + psConfig->eType == PHYS_HEAP_TYPE_DMA) + { + eResult = PhysmemCreateHeapLMA(psDevNode, psConfig, "GPU LMA (Sys)", ppsPhysHeap); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s Invalid phys heap type: %d", + __func__, psConfig->eType)); + eResult = PVRSRV_ERROR_INVALID_PARAMS; + } + + return eResult; +} + +PVRSRV_ERROR +PhysHeapCreateHeapsFromConfigs(PPVRSRV_DEVICE_NODE psDevNode, + PHYS_HEAP_CONFIG *pasConfigs, + IMG_UINT32 ui32NumConfigs, + PHYS_HEAP **papsPhysHeaps, + IMG_UINT32 *pui32NumHeaps) +{ + IMG_UINT32 i; + PVRSRV_ERROR eError; + + *pui32NumHeaps = 0; + + for (i = 0; i < ui32NumConfigs; i++) + { + eError = PhysHeapCreateHeapFromConfig(psDevNode, + pasConfigs + i, + papsPhysHeaps + i); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap"); + + (*pui32NumHeaps)++; + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR PhysHeapCreate(PPVRSRV_DEVICE_NODE psDevNode, + PHYS_HEAP_CONFIG *psConfig, + PHEAP_IMPL_DATA pvImplData, + PHEAP_IMPL_FUNCS *psImplFuncs, + PHYS_HEAP **ppsPhysHeap) +{ + PHYS_HEAP *psNew; + + PVR_DPF_ENTERED; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode"); + + if (psConfig->eType == PHYS_HEAP_TYPE_UNKNOWN) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_LOG_RETURN_IF_INVALID_PARAM(psImplFuncs != NULL, "psImplFuncs"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psImplFuncs->pfnCreatePMR != NULL, "psImplFuncs->pfnCreatePMR"); + + psNew = OSAllocMem(sizeof(PHYS_HEAP)); + PVR_RETURN_IF_NOMEM(psNew); + psNew->psDevNode = psDevNode; + psNew->eType = psConfig->eType; + psNew->psMemFuncs = psConfig->psMemFuncs; + psNew->hPrivData = psConfig->hPrivData; + psNew->ui32RefCount = 0; + psNew->pszPDumpMemspaceName = psConfig->pszPDumpMemspaceName; + psNew->ui32UsageFlags = psConfig->ui32UsageFlags; + + psNew->pvImplData = pvImplData; + psNew->psImplFuncs = psImplFuncs; + + psNew->psNext = g_psPhysHeapList; + g_psPhysHeapList = psNew; + + *ppsPhysHeap = psNew; + + PVR_DPF_RETURN_RC1(PVRSRV_OK, *ppsPhysHeap); +} + +void PhysHeapDestroy(PHYS_HEAP *psPhysHeap) +{ + PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; + + PVR_DPF_ENTERED1(psPhysHeap); + +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK) +#endif + { + PVR_ASSERT(psPhysHeap->ui32RefCount == 0); + } + + if (g_psPhysHeapList == psPhysHeap) + { + g_psPhysHeapList = psPhysHeap->psNext; + } + else + { + PHYS_HEAP *psTmp = g_psPhysHeapList; + + while (psTmp->psNext != psPhysHeap) + { + psTmp = psTmp->psNext; + } + psTmp->psNext = psPhysHeap->psNext; + } + + if (psImplFuncs->pfnDestroyData != NULL) + { + psImplFuncs->pfnDestroyData(psPhysHeap->pvImplData); + } + + OSFreeMem(psPhysHeap); + + PVR_DPF_RETURN; +} + +PVRSRV_ERROR PhysHeapAcquire(PHYS_HEAP *psPhysHeap) +{ + PVR_LOG_RETURN_IF_INVALID_PARAM(psPhysHeap != NULL, "psPhysHeap"); + + psPhysHeap->ui32RefCount++; + + return PVRSRV_OK; +} + +PVRSRV_ERROR PhysHeapAcquireByUsage(PHYS_HEAP_USAGE_FLAGS ui32UsageFlag, + PHYS_HEAP **ppsPhysHeap) +{ + PHYS_HEAP *psNode = g_psPhysHeapList; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_DPF_ENTERED1(ui32UsageFlag); + + OSLockAcquire(g_hPhysHeapLock); + + while (psNode) + { + if (BITMASK_ANY(psNode->ui32UsageFlags, ui32UsageFlag)) + { + break; + } + psNode = psNode->psNext; + } + + if (psNode == NULL) + { + eError = PVRSRV_ERROR_PHYSHEAP_ID_INVALID; + } + else + { + psNode->ui32RefCount++; + PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", + __func__, psNode, psNode->ui32RefCount); + } + + OSLockRelease(g_hPhysHeapLock); + + *ppsPhysHeap = psNode; + PVR_DPF_RETURN_RC1(eError, *ppsPhysHeap); +} + +static PHYS_HEAP * _PhysHeapFindHeap(PVRSRV_PHYS_HEAP ePhysHeap, + PPVRSRV_DEVICE_NODE psDevNode) +{ + PHYS_HEAP *psPhysHeapNode = g_psPhysHeapList; + PVRSRV_PHYS_HEAP eFallback; + + while (psPhysHeapNode) + { + if ((psPhysHeapNode->psDevNode == psDevNode) && + BIT_ISSET(psPhysHeapNode->ui32UsageFlags, ePhysHeap)) + { + return psPhysHeapNode; + } + + psPhysHeapNode = psPhysHeapNode->psNext; + } + + eFallback = gasHeapProperties[ePhysHeap].eFallbackHeap; + + if (ePhysHeap == eFallback) + { + return NULL; + } + else + { + return _PhysHeapFindHeap(eFallback, psDevNode); + } +} + +PVRSRV_ERROR PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP eDevPhysHeap, + PPVRSRV_DEVICE_NODE psDevNode, + PHYS_HEAP **ppsPhysHeap) +{ + PHYS_HEAP *psPhysHeap; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_LOG_RETURN_IF_INVALID_PARAM(eDevPhysHeap < PVRSRV_PHYS_HEAP_LAST, "eDevPhysHeap"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode"); + + PVR_DPF_ENTERED1(ui32Flags); + + OSLockAcquire(g_hPhysHeapLock); + + psPhysHeap = _PhysHeapFindHeap(eDevPhysHeap, psDevNode); + + if (psPhysHeap != NULL) + { + psPhysHeap->ui32RefCount++; + PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", + __func__, psPhysHeap, psPhysHeap->ui32RefCount); + } + else + { + eError = PVRSRV_ERROR_PHYSHEAP_ID_INVALID; + } + + OSLockRelease(g_hPhysHeapLock); + + *ppsPhysHeap = psPhysHeap; + PVR_DPF_RETURN_RC1(eError, *ppsPhysHeap); +} + +void PhysHeapRelease(PHYS_HEAP *psPhysHeap) +{ + PVR_DPF_ENTERED1(psPhysHeap); + + OSLockAcquire(g_hPhysHeapLock); + psPhysHeap->ui32RefCount--; + PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", + __func__, psPhysHeap, psPhysHeap->ui32RefCount); + OSLockRelease(g_hPhysHeapLock); + + PVR_DPF_RETURN; +} + +PHEAP_IMPL_DATA PhysHeapGetImplData(PHYS_HEAP *psPhysHeap) +{ + return psPhysHeap->pvImplData; +} + +PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap) +{ + PVR_ASSERT(psPhysHeap->eType != PHYS_HEAP_TYPE_UNKNOWN); + return psPhysHeap->eType; +} + +PHYS_HEAP_USAGE_FLAGS PhysHeapGetFlags(PHYS_HEAP *psPhysHeap) +{ + return psPhysHeap->ui32UsageFlags; +} + +/* + * This function will set the psDevPAddr to whatever the system layer + * has set it for the referenced region. + * It will not fail if the psDevPAddr is invalid. + */ +PVRSRV_ERROR PhysHeapGetDevPAddr(PHYS_HEAP *psPhysHeap, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; + PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED; + + if (psImplFuncs->pfnGetDevPAddr != NULL) + { + eResult = psImplFuncs->pfnGetDevPAddr(psPhysHeap->pvImplData, + psDevPAddr); + } + + return eResult; +} + +/* + * This function will set the psCpuPAddr to whatever the system layer + * has set it for the referenced region. + * It will not fail if the psCpuPAddr is invalid. + */ +PVRSRV_ERROR PhysHeapGetCpuPAddr(PHYS_HEAP *psPhysHeap, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; + PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED; + + if (psImplFuncs->pfnGetCPUPAddr != NULL) + { + eResult = psImplFuncs->pfnGetCPUPAddr(psPhysHeap->pvImplData, + psCpuPAddr); + } + + return eResult; +} + +PVRSRV_ERROR PhysHeapGetSize(PHYS_HEAP *psPhysHeap, + IMG_UINT64 *puiSize) +{ + PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; + PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED; + + if (psImplFuncs->pfnGetSize != NULL) + { + eResult = psImplFuncs->pfnGetSize(psPhysHeap->pvImplData, + puiSize); + } + + return eResult; +} + +void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + psPhysHeap->psMemFuncs->pfnCpuPAddrToDevPAddr(psPhysHeap->hPrivData, + ui32NumOfAddr, + psDevPAddr, + psCpuPAddr); +} + +void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + psPhysHeap->psMemFuncs->pfnDevPAddrToCpuPAddr(psPhysHeap->hPrivData, + ui32NumOfAddr, + psCpuPAddr, + psDevPAddr); +} + +IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap) +{ + return psPhysHeap->pszPDumpMemspaceName; +} + +PVRSRV_ERROR PhysHeapCreatePMR(PHYS_HEAP *psPhysHeap, + struct _CONNECTION_DATA_ *psConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; + + return psImplFuncs->pfnCreatePMR(psPhysHeap, + psConnection, + uiSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiLog2PageSize, + uiFlags, + pszAnnotation, + uiPid, + ppsPMRPtr, + ui32PDumpFlags); +} + +PVRSRV_ERROR PhysHeapInit(void) +{ + PVRSRV_ERROR eError; + + g_psPhysHeapList = NULL; + + eError = OSLockCreate(&g_hPhysHeapLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PhysHeapDeinit(void) +{ + PVR_ASSERT(g_psPhysHeapList == NULL); + + OSLockDestroy(g_hPhysHeapLock); + + return PVRSRV_OK; +} + +PPVRSRV_DEVICE_NODE PhysHeapDeviceNode(PHYS_HEAP *psPhysHeap) +{ + PVR_ASSERT(psPhysHeap != NULL); + + return psPhysHeap->psDevNode; +} + +IMG_BOOL PhysHeapPVRLayerAcquire(PVRSRV_PHYS_HEAP ePhysHeap) +{ + PVR_ASSERT(ePhysHeap < PVRSRV_PHYS_HEAP_LAST); + + return gasHeapProperties[ePhysHeap].bPVRLayerAcquire; +} + +IMG_BOOL PhysHeapUserModeAlloc(PVRSRV_PHYS_HEAP ePhysHeap) +{ + PVR_ASSERT(ePhysHeap < PVRSRV_PHYS_HEAP_LAST); + + return gasHeapProperties[ePhysHeap].bUserModeAlloc; +} diff --git a/drivers/gpu/drm/phytium/octopus/physheap.h b/drivers/gpu/drm/phytium/octopus/physheap.h new file mode 100644 index 000000000000..2b47ebec6ebd --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/physheap.h @@ -0,0 +1,307 @@ +/*************************************************************************/ /*! +@File +@Title Physical heap management header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Defines the interface for the physical heap management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" +#include "devicemem_typedefs.h" +#include "opaque_types.h" +#include "pmr_impl.h" +#include "physheap_config.h" + +#ifndef PHYSHEAP_H +#define PHYSHEAP_H + +typedef struct _PHYS_HEAP_ PHYS_HEAP; + +struct _CONNECTION_DATA_; + +/*! Pointer to private implementation specific data */ +typedef void *PHEAP_IMPL_DATA; + +/*************************************************************************/ /*! +@Function Callback function PFN_DESTROY_DATA +@Description Destroy private implementation specific data. +@Input PHEAP_IMPL_DATA Pointer to implementation data. +*/ /**************************************************************************/ +typedef void (*PFN_DESTROY_DATA)(PHEAP_IMPL_DATA); +/*************************************************************************/ /*! +@Function Callback function PFN_GET_DEV_PADDR +@Description Get heap device physical address. +@Input PHEAP_IMPL_DATA Pointer to implementation data. +@Output IMG_DEV_PHYADDR Device physical address. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_GET_DEV_PADDR)(PHEAP_IMPL_DATA, IMG_DEV_PHYADDR*); +/*************************************************************************/ /*! +@Function Callback function PFN_GET_CPU_PADDR +@Description Get heap CPU physical address. +@Input PHEAP_IMPL_DATA Pointer to implementation data. +@Output IMG_CPU_PHYADDR CPU physical address. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_GET_CPU_PADDR)(PHEAP_IMPL_DATA, IMG_CPU_PHYADDR*); +/*************************************************************************/ /*! +@Function Callback function PFN_GET_SIZE +@Description Get size of heap. +@Input PHEAP_IMPL_DATA Pointer to implementation data. +@Output IMG_UINT64 Size of heap. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_GET_SIZE)(PHEAP_IMPL_DATA, IMG_UINT64*); + +/*************************************************************************/ /*! +@Function Callback function PFN_CREATE_PMR +@Description Create a PMR physical allocation and back with RAM on creation, + if required. The RAM page comes either directly from + the Phys Heap's associated pool of memory or from an OS API. +@Input psPhysHeap Pointer to Phys Heap. +@Input psConnection Pointer to device connection. +@Input uiSize Allocation size. +@Input uiChunkSize Chunk size. +@Input ui32NumPhysChunks Physical chunk count. +@Input ui32NumVirtChunks Virtual chunk count. +@Input pui32MappingTable Mapping Table. +@Input uiLog2PageSize Page size. +@Input uiFlags Memalloc flags. +@Input pszAnnotation Annotation. +@Input uiPid Process ID. +@Output ppsPMRPtr Pointer to PMR. +@Input ui32PDumpFlag PDump flags. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_CREATE_PMR)(PHYS_HEAP *psPhysHeap, + struct _CONNECTION_DATA_ *psConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags); + +/*! Implementation specific function table */ +typedef struct PHEAP_IMPL_FUNCS_TAG +{ + PFN_DESTROY_DATA pfnDestroyData; + PFN_GET_DEV_PADDR pfnGetDevPAddr; + PFN_GET_CPU_PADDR pfnGetCPUPAddr; + PFN_GET_SIZE pfnGetSize; + PFN_CREATE_PMR pfnCreatePMR; +} PHEAP_IMPL_FUNCS; + +/*************************************************************************/ /*! +@Function PhysHeapCreateHeapsFromConfigs +@Description Create new heaps from configs. +@Input psDevNode Pointer to device node struct +@Input pasConfigs Pointer to array of Heap configurations. +@Input ui32NumConfigs Number of configurations in array. +@Output papsPhysHeaps Pointer to array of phys heap pointers. +@Output pui32NumHeaps Number of heaps created. Can be less than + ui32NumConfigs if error. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +PVRSRV_ERROR +PhysHeapCreateHeapsFromConfigs(PPVRSRV_DEVICE_NODE psDevNode, + PHYS_HEAP_CONFIG *pasConfigs, + IMG_UINT32 ui32NumConfigs, + PHYS_HEAP **papsPhysHeaps, + IMG_UINT32 *pui32NumHeaps); + +/*************************************************************************/ /*! +@Function PhysHeapCreateHeapFromConfig +@Description Create a new heap. Calls specific heap API depending + on heap type. +@Input psDevNode Pointer to device node struct. +@Input psConfig Heap configuration. +@Output ppsPhysHeap Pointer to the created heap. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +PVRSRV_ERROR +PhysHeapCreateHeapFromConfig(PPVRSRV_DEVICE_NODE psDevNode, + PHYS_HEAP_CONFIG *psConfig, + PHYS_HEAP **ppsPhysHeap); + +/*************************************************************************/ /*! +@Function PhysHeapCreate +@Description Create a new heap. Allocated and stored internally. + Destroy with PhysHeapDestroy when no longer required. +@Input psDevNode Pointer to device node struct +@Input psConfig Heap configuration. +@Input pvImplData Implementation specific data. Can be NULL. +@Input psImplFuncs Implementation specific function table. Must be + a valid pointer. +@Output ppsPhysHeap Pointer to the created heap. Must be a valid + pointer. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +PVRSRV_ERROR PhysHeapCreate(PPVRSRV_DEVICE_NODE psDevNode, + PHYS_HEAP_CONFIG *psConfig, + PHEAP_IMPL_DATA pvImplData, + PHEAP_IMPL_FUNCS *psImplFuncs, + PHYS_HEAP **ppsPhysHeap); + +void PhysHeapDestroy(PHYS_HEAP *psPhysHeap); + +PVRSRV_ERROR PhysHeapAcquire(PHYS_HEAP *psPhysHeap); + +/*************************************************************************/ /*! +@Function PhysHeapAcquireByUsage +@Description Acquire PhysHeap by usage flag. +@Input ui32UsageFlag PhysHeap usage flag +@Output ppsPhysHeap PhysHeap if found. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +PVRSRV_ERROR PhysHeapAcquireByUsage(PHYS_HEAP_USAGE_FLAGS ui32UsageFlag, + PHYS_HEAP **ppsPhysHeap); + +/*************************************************************************/ /*! +@Function PhysHeapAcquireByDevPhysHeap +@Description Acquire PhysHeap by DevPhysHeap. +@Input eDevPhysHeap Device Phys Heap. +@Input psDevNode Pointer to device node struct +@Output ppsPhysHeap PhysHeap if found. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +PVRSRV_ERROR PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP eDevPhysHeap, + PPVRSRV_DEVICE_NODE psDevNode, + PHYS_HEAP **ppsPhysHeap); + +void PhysHeapRelease(PHYS_HEAP *psPhysHeap); + +/*************************************************************************/ /*! +@Function PhysHeapGetImplData +@Description Get physical heap implementation specific data. +@Input psPhysHeap Pointer to physical heap. +@Input psConfig Heap configuration. +@Return pvImplData Implementation specific data. Can be NULL. +*/ /**************************************************************************/ +PHEAP_IMPL_DATA PhysHeapGetImplData(PHYS_HEAP *psPhysHeap); + +PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap); + +/*************************************************************************/ /*! +@Function PhysHeapGetFlags +@Description Get phys heap usage flags. +@Input psPhysHeap Pointer to physical heap. +@Return PHYS_HEAP_USAGE_FLAGS Phys heap usage flags. +*/ /**************************************************************************/ +PHYS_HEAP_USAGE_FLAGS PhysHeapGetFlags(PHYS_HEAP *psPhysHeap); + +PVRSRV_ERROR PhysHeapGetCpuPAddr(PHYS_HEAP *psPhysHeap, + IMG_CPU_PHYADDR *psCpuPAddr); + + +PVRSRV_ERROR PhysHeapGetSize(PHYS_HEAP *psPhysHeap, + IMG_UINT64 *puiSize); + +PVRSRV_ERROR PhysHeapGetDevPAddr(PHYS_HEAP *psPhysHeap, + IMG_DEV_PHYADDR *psDevPAddr); + +PVRSRV_ERROR PhysHeapGetSize(PHYS_HEAP *psPhysHeap, + IMG_UINT64 *puiSize); + +void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr); + +void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr); + +IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap); + +/*************************************************************************/ /*! +@Function PhysHeapCreatePMR +@Description Function calls an implementation-specific function pointer. + See function pointer for details. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +PVRSRV_ERROR PhysHeapCreatePMR(PHYS_HEAP *psPhysHeap, + struct _CONNECTION_DATA_ *psConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR PhysHeapInit(void); +PVRSRV_ERROR PhysHeapDeinit(void); + +/*************************************************************************/ /*! +@Function PhysHeapDeviceNode +@Description Get pointer to the device node this heap belongs to. +@Input psPhysHeap Pointer to physical heap. +@Return PPVRSRV_DEVICE_NODE Pointer to device node. +*/ /**************************************************************************/ +PPVRSRV_DEVICE_NODE PhysHeapDeviceNode(PHYS_HEAP *psPhysHeap); + +/*************************************************************************/ /*! +@Function PhysHeapPVRLayerAcquire +@Description Is phys heap to be acquired in PVR layer? +@Input ePhysHeap phys heap +@Return IMG_BOOL return IMG_TRUE if yes +*/ /**************************************************************************/ +IMG_BOOL PhysHeapPVRLayerAcquire(PVRSRV_PHYS_HEAP ePhysHeap); + +/*************************************************************************/ /*! +@Function PhysHeapUserModeAlloc +@Description Is allocation from UM allowed? +@Input ePhysHeap phys heap +@Return IMG_BOOL return IMG_TRUE if yes +*/ /**************************************************************************/ +IMG_BOOL PhysHeapUserModeAlloc(PVRSRV_PHYS_HEAP ePhysHeap); + +#endif /* PHYSHEAP_H */ diff --git a/drivers/gpu/drm/phytium/octopus/physheap_config.h b/drivers/gpu/drm/phytium/octopus/physheap_config.h new file mode 100644 index 000000000000..920d1f9d8a66 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/physheap_config.h @@ -0,0 +1,142 @@ +/*************************************************************************/ /*! +@File physheap_config.h +@Title Physical heap Config API +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Physical heap configs are created in the system layer and + stored against each device node for use in the Services Server + common layer. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PHYSHEAP_CONFIG_H +#define PHYSHEAP_CONFIG_H + +#include "img_types.h" +#include "pvrsrv_memallocflags.h" +#include "pvrsrv_memalloc_physheap.h" + +typedef IMG_UINT32 PHYS_HEAP_USAGE_FLAGS; + +#define PHYS_HEAP_USAGE_GPU_LOCAL (1< + +module_param(gPMRAllocFail, uint, 0644); +MODULE_PARM_DESC(gPMRAllocFail, "When number of PMR allocs reaches " + "this value, it will fail (default value is 0 which " + "means that alloc function will behave normally)."); +#endif /* defined(__linux__) */ +#endif /* defined(DEBUG) */ + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#include "proc_stats.h" +#endif + +PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32MemSize, + IMG_UINT32 ui32Log2Align, + const IMG_UINT8 u8Value, + IMG_BOOL bInitPage, +#if defined(PDUMP) + const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicAddress, + IMG_HANDLE *phHandlePtr, +#endif + IMG_HANDLE hMemHandle, + IMG_DEV_PHYADDR *psDevPhysAddr) +{ + void *pvCpuVAddr; + PVRSRV_ERROR eError; +#if defined(PDUMP) + IMG_CHAR szFilenameOut[PDUMP_PARAM_MAX_FILE_NAME]; + PDUMP_FILEOFFSET_T uiOffsetOut; + IMG_UINT32 ui32PageSize; + IMG_UINT32 ui32PDumpMemSize = ui32MemSize; +#endif + PG_HANDLE *psMemHandle; + IMG_UINT64 uiMask; + IMG_DEV_PHYADDR sDevPhysAddr_int; + IMG_PID uiPid = 0; + + psMemHandle = hMemHandle; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + uiPid = psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT ? + PVR_SYS_ALLOC_PID : OSGetCurrentClientProcessIDKM(); +#endif + + /* Allocate the pages */ + eError = psDevNode->sDevMMUPxSetup.pfnDevPxAlloc(psDevNode, + TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize), + psMemHandle, + &sDevPhysAddr_int, + uiPid); + PVR_LOG_RETURN_IF_ERROR(eError, "pfnDevPxAlloc:1"); + + /* Check to see if the page allocator returned pages with our desired + * alignment, which is not unlikely + */ + uiMask = (1 << ui32Log2Align) - 1; + if (ui32Log2Align && (sDevPhysAddr_int.uiAddr & uiMask)) + { + /* use over allocation instead */ + psDevNode->sDevMMUPxSetup.pfnDevPxFree(psDevNode, psMemHandle); + + ui32MemSize += (IMG_UINT32) uiMask; + eError = psDevNode->sDevMMUPxSetup.pfnDevPxAlloc(psDevNode, + TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize), + psMemHandle, + &sDevPhysAddr_int, + uiPid); + PVR_LOG_RETURN_IF_ERROR(eError, "pfnDevPxAlloc:2"); + + sDevPhysAddr_int.uiAddr += uiMask; + sDevPhysAddr_int.uiAddr &= ~uiMask; + } + *psDevPhysAddr = sDevPhysAddr_int; + +#if defined(PDUMP) + ui32PageSize = ui32Log2Align? (1 << ui32Log2Align) : OSGetPageSize(); + eError = PDumpMalloc(pszDevSpace, + pszSymbolicAddress, + ui32PDumpMemSize, + ui32PageSize, + IMG_FALSE, + 0, + phHandlePtr, + PDUMP_NONE); + if (PVRSRV_OK != eError) + { + PDUMPCOMMENT("Allocating pages failed"); + *phHandlePtr = NULL; + } +#endif + + if (bInitPage) + { + /*Map the page to the CPU VA space */ + eError = psDevNode->sDevMMUPxSetup.pfnDevPxMap(psDevNode, + psMemHandle, + ui32MemSize, + &sDevPhysAddr_int, + &pvCpuVAddr); + if (PVRSRV_OK != eError) + { + PVR_LOG_ERROR(eError, "DevPxMap"); + psDevNode->sDevMMUPxSetup.pfnDevPxFree(psDevNode, psMemHandle); + return eError; + } + + /*Fill the memory with given content */ + OSDeviceMemSet(pvCpuVAddr, u8Value, ui32MemSize); + + /*Map the page to the CPU VA space */ + eError = psDevNode->sDevMMUPxSetup.pfnDevPxClean(psDevNode, + psMemHandle, + 0, + ui32MemSize); + if (PVRSRV_OK != eError) + { + PVR_LOG_ERROR(eError, "DevPxClean"); + psDevNode->sDevMMUPxSetup.pfnDevPxUnMap(psDevNode, psMemHandle, pvCpuVAddr); + psDevNode->sDevMMUPxSetup.pfnDevPxFree(psDevNode, psMemHandle); + return eError; + } + +#if defined(PDUMP) + /* PDumping of the page contents can be done in two ways + * 1. Store the single byte init value to the .prm file + * and load the same value to the entire dummy page buffer + * This method requires lot of LDB's inserted into the out2.txt + * + * 2. Store the entire contents of the buffer to the .prm file + * and load them back. + * This only needs a single LDB instruction in the .prm file + * and chosen this method + * size of .prm file might go up but that's not huge at least + * for this allocation + */ + /* Write the buffer contents to the prm file */ + eError = PDumpWriteParameterBlob(pvCpuVAddr, + ui32PDumpMemSize, + PDUMP_FLAGS_CONTINUOUS, + szFilenameOut, + sizeof(szFilenameOut), + &uiOffsetOut); + if (PVRSRV_OK == eError) + { + /* Load the buffer back to the allocated memory when playing the pdump */ + eError = PDumpPMRLDB(pszDevSpace, + pszSymbolicAddress, + 0, + ui32PDumpMemSize, + szFilenameOut, + uiOffsetOut, + PDUMP_FLAGS_CONTINUOUS); + if (PVRSRV_OK != eError) + { + PDUMP_ERROR(eError, "Failed to write LDB statement to script file"); + PVR_LOG_ERROR(eError, "PDumpPMRLDB"); + } + } + else if (eError != PVRSRV_ERROR_PDUMP_NOT_ALLOWED) + { + PDUMP_ERROR(eError, "Failed to write device allocation to parameter file"); + PVR_LOG_ERROR(eError, "PDumpWriteParameterBlob"); + } + else + { + /* Else write to parameter file prevented under the flags and + * current state of the driver so skip write to script and error IF. + * This is expected e.g., if not in the capture range. + */ + eError = PVRSRV_OK; + } +#endif + + /* Unmap the page */ + psDevNode->sDevMMUPxSetup.pfnDevPxUnMap(psDevNode, + psMemHandle, + pvCpuVAddr); + } + + return PVRSRV_OK; +} + +void DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode, +#if defined(PDUMP) + IMG_HANDLE hPDUMPMemHandle, +#endif + IMG_HANDLE hMemHandle) +{ + PG_HANDLE *psMemHandle; + + psMemHandle = hMemHandle; + psDevNode->sDevMMUPxSetup.pfnDevPxFree(psDevNode, psMemHandle); +#if defined(PDUMP) + if (NULL != hPDUMPMemHandle) + { + PDumpFree(hPDUMPMemHandle); + } +#endif + +} + + +/* Checks the input parameters and adjusts them if possible and necessary */ +static inline PVRSRV_ERROR _ValidateParams(IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 *puiLog2AllocPageSize, + IMG_DEVMEM_SIZE_T *puiSize, + PMR_SIZE_T *puiChunkSize) +{ + IMG_UINT32 uiLog2AllocPageSize = *puiLog2AllocPageSize; + IMG_DEVMEM_SIZE_T uiSize = *puiSize; + PMR_SIZE_T uiChunkSize = *puiChunkSize; + /* Sparse if we have different number of virtual and physical chunks plus + * in general all allocations with more than one virtual chunk */ + IMG_BOOL bIsSparse = (ui32NumVirtChunks != ui32NumPhysChunks || + ui32NumVirtChunks > 1) ? IMG_TRUE : IMG_FALSE; + + if (ui32NumPhysChunks == 0 && ui32NumVirtChunks == 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Number of physical chunks and number of virtual chunks " + "cannot be both 0", + __func__)); + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Protect against ridiculous page sizes */ + if (uiLog2AllocPageSize > RGX_HEAP_2MB_PAGE_SHIFT) + { + PVR_DPF((PVR_DBG_ERROR, "Page size is too big: 2^%u.", uiLog2AllocPageSize)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Range check of the alloc size */ + if (uiSize >= 0x1000000000ULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Cancelling allocation request of over 64 GB. " + "This is likely a bug." + , __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Fail if requesting coherency on one side but uncached on the other */ + if (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) && + (PVRSRV_CHECK_GPU_UNCACHED(uiFlags) || PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags))) + { + PVR_DPF((PVR_DBG_ERROR, "Request for CPU coherency but specifying GPU uncached " + "Please use GPU cached flags for coherency.")); + return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; + } + + if (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) && + (PVRSRV_CHECK_CPU_UNCACHED(uiFlags) || PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags))) + { + PVR_DPF((PVR_DBG_ERROR, "Request for GPU coherency but specifying CPU uncached " + "Please use CPU cached flags for coherency.")); + return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; + } + + if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) && PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Zero on Alloc and Poison on Alloc are mutually exclusive.", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (bIsSparse) + { + /* For sparse we need correct parameters like a suitable page size.... */ + if (OSGetPageShift() > uiLog2AllocPageSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid log2-contiguity for sparse allocation. " + "Requested %u, required minimum %zd", + __func__, + uiLog2AllocPageSize, + OSGetPageShift() )); + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* ... chunk size must be a equal to page size ... */ + if (uiChunkSize != (1 << uiLog2AllocPageSize)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid chunk size for sparse allocation. Requested " + "%#" IMG_UINT64_FMTSPECx ", must be same as page size %#x.", + __func__, uiChunkSize, 1 << uiLog2AllocPageSize)); + + return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE; + } + + if (ui32NumVirtChunks * uiChunkSize != uiSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Total alloc size (%#" IMG_UINT64_FMTSPECx ") " + "is not equal to virtual chunks * chunk size " + "(%#" IMG_UINT64_FMTSPECx ")", + __func__, uiSize, ui32NumVirtChunks * uiChunkSize)); + + return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE; + } + + if (ui32NumPhysChunks > ui32NumVirtChunks) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Number of physical chunks (%u) must not be greater " + "than number of virtual chunks (%u)", + __func__, + ui32NumPhysChunks, + ui32NumVirtChunks)); + + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + else + { + /* + * Silently round up alignment/pagesize if request was less that PAGE_SHIFT + * because it would never be harmful for memory to be _more_ contiguous that + * was desired. + */ + uiLog2AllocPageSize = OSGetPageShift() > uiLog2AllocPageSize ? + OSGetPageShift() : uiLog2AllocPageSize; + + /* Same for total size */ + uiSize = PVR_ALIGN(uiSize, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); + *puiChunkSize = uiSize; + } + + if ((uiSize & ((1ULL << uiLog2AllocPageSize) - 1)) != 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Total size (%#" IMG_UINT64_FMTSPECx ") " + "must be a multiple of the requested contiguity (%" + IMG_UINT64_FMTSPEC ")", __func__, uiSize, + (IMG_UINT64) (1ULL << uiLog2AllocPageSize))); + return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE; + } + + *puiLog2AllocPageSize = uiLog2AllocPageSize; + *puiSize = uiSize; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _DevPhysHeapFromFlags(PVRSRV_MEMALLOCFLAGS_T uiFlags, + PVRSRV_PHYS_HEAP *peDevPhysHeap) +{ + PVRSRV_PHYS_HEAP eHeap = PVRSRV_GET_PHYS_HEAP_HINT(uiFlags); + + switch (eHeap) + { + case PVRSRV_PHYS_HEAP_FW_PREMAP0: + case PVRSRV_PHYS_HEAP_FW_PREMAP1: + case PVRSRV_PHYS_HEAP_FW_PREMAP2: + case PVRSRV_PHYS_HEAP_FW_PREMAP3: + case PVRSRV_PHYS_HEAP_FW_PREMAP4: + case PVRSRV_PHYS_HEAP_FW_PREMAP5: + case PVRSRV_PHYS_HEAP_FW_PREMAP6: + case PVRSRV_PHYS_HEAP_FW_PREMAP7: + { + /* keep heap (with check) */ + PVR_RETURN_IF_INVALID_PARAM(PVRSRV_VZ_MODE_IS(HOST)); + break; + } + case PVRSRV_PHYS_HEAP_LAST: + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + default: + { + break; + } + } + + *peDevPhysHeap = eHeap; + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PhysmemNewRamBackedPMR_direct(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + PMR_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2AllocPageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 uiAnnotationLength, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PVRSRV_PHYS_HEAP ePhysHeapIdx; + PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize = + psDevNode->psDevConfig->pfnCheckMemAllocSize; + + PVR_UNREFERENCED_PARAMETER(uiAnnotationLength); + + eError = _ValidateParams(ui32NumPhysChunks, + ui32NumVirtChunks, + uiFlags, + &uiLog2AllocPageSize, + &uiSize, + &uiChunkSize); + PVR_RETURN_IF_ERROR(eError); + + eError = _DevPhysHeapFromFlags(uiFlags, &ePhysHeapIdx); + PVR_RETURN_IF_ERROR(eError); + + if (ePhysHeapIdx == PVRSRV_PHYS_HEAP_GPU_LOCAL) + { + if ((uiFlags & PVRSRV_MEMALLOCFLAGS_CPU_MAPPABLE_MASK) == 0) + { + ePhysHeapIdx = PVRSRV_PHYS_HEAP_GPU_PRIVATE; + } + else if (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) && + PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig)) + { + ePhysHeapIdx = PVRSRV_PHYS_HEAP_GPU_COHERENT; + } + } + + if (NULL == psDevNode->apsPhysHeap[ePhysHeapIdx]) + { + /* In case a heap hasn't been acquired for this type, return invalid heap error */ + PVR_DPF((PVR_DBG_ERROR, "%s: Requested allocation on device node (%p) from " + "an invalid heap (HeapIndex=%d)", + __func__, psDevNode, ePhysHeapIdx)); + return PVRSRV_ERROR_INVALID_HEAP; + } + + /* Apply memory budgeting policy */ + if (pfnCheckMemAllocSize) + { + IMG_UINT64 uiMemSize = (IMG_UINT64)uiChunkSize * ui32NumPhysChunks; + + eError = pfnCheckMemAllocSize(psDevNode->psDevConfig->hSysData, uiMemSize); + PVR_RETURN_IF_ERROR(eError); + } + +#if defined(DEBUG) + if (gPMRAllocFail > 0) + { + static IMG_UINT32 ui32AllocCount = 1; + + if (ui32AllocCount < gPMRAllocFail) + { + ui32AllocCount++; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s failed on %d allocation.", + __func__, ui32AllocCount)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + } +#endif /* defined(DEBUG) */ + + /* If the driver is in an 'init' state all of the allocated memory + * should be attributed to the driver (PID 1) rather than to the + * process those allocations are made under. Same applies to the memory + * allocated for the Firmware. */ + if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT || + PVRSRV_CHECK_FW_MAIN(uiFlags)) + { + uiPid = PVR_SYS_ALLOC_PID; + } + + eError = PhysHeapCreatePMR(psDevNode->apsPhysHeap[ePhysHeapIdx], + psConnection, + uiSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiLog2AllocPageSize, + uiFlags, + pszAnnotation, + uiPid, + ppsPMRPtr, + ui32PDumpFlags); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + if (eError != PVRSRV_OK) + { + PVRSRVStatsUpdateOOMStats(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT, + OSGetCurrentClientProcessIDKM()); + } +#endif + + return eError; +} + +PVRSRV_ERROR +PhysmemNewRamBackedPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + PMR_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2AllocPageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 uiAnnotationLength, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_PHYS_HEAP ePhysHeap = PVRSRV_GET_PHYS_HEAP_HINT(uiFlags); + + PVR_LOG_RETURN_IF_INVALID_PARAM(uiAnnotationLength != 0, "uiAnnotationLength"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pszAnnotation != NULL, "pszAnnotation"); + + if (!PhysHeapUserModeAlloc(ePhysHeap)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid phys heap hint: %d.", __func__, ePhysHeap)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return PhysmemNewRamBackedPMR_direct(psConnection, + psDevNode, + uiSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiLog2AllocPageSize, + uiFlags, + uiAnnotationLength, + pszAnnotation, + uiPid, + ppsPMRPtr, + ui32PDumpFlags); +} + +PVRSRV_ERROR +PhysmemNewRamBackedLockedPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + PMR_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 uiAnnotationLength, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + + PVRSRV_ERROR eError; + eError = PhysmemNewRamBackedPMR(psConnection, + psDevNode, + uiSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiLog2PageSize, + uiFlags, + uiAnnotationLength, + pszAnnotation, + uiPid, + ppsPMRPtr, + ui32PDumpFlags); + + if (eError == PVRSRV_OK) + { + eError = PMRLockSysPhysAddresses(*ppsPMRPtr); + } + + return eError; +} + +PVRSRV_ERROR +PVRSRVGetMaxDevMemSizeKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T *puiLMASize, + IMG_DEVMEM_SIZE_T *puiUMASize ) +{ + IMG_DEVMEM_SIZE_T uiLMASize = 0; + IMG_DEVMEM_SIZE_T uiUMASize = 0; + PHYS_HEAP *psPhysHeap; + IMG_UINT uiHeapIndex; + IMG_UINT64 uiRegionSize; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + /* + * psDevNode->apsPhysHeap may contain duplicates so loop over all registered + * heaps instead. + */ + for (uiHeapIndex = 0; uiHeapIndex < psDevNode->ui32RegisteredPhysHeaps; uiHeapIndex++) + { + psPhysHeap = psDevNode->papsRegisteredPhysHeaps[uiHeapIndex]; + + if (psPhysHeap != psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL] && + psPhysHeap != psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]) + { + continue; + } + + if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA) + { + + uiRegionSize = 0; + PhysHeapGetSize(psPhysHeap, &uiRegionSize); + uiLMASize += uiRegionSize; + } + else if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_UMA) + { + if (uiUMASize == 0) + { + uiUMASize = OSGetRAMSize(); + } + } + } + + *puiLMASize = uiLMASize; + *puiUMASize = uiUMASize; + + return PVRSRV_OK; +} + +/* 'Wrapper' function to call PMRImportPMR(), which first checks the PMR is + * for the current device. This avoids the need to do this in pmr.c, which + * would then need PVRSRV_DEVICE_NODE (defining this type in pmr.h causes a + * typedef redefinition issue). + */ +PVRSRV_ERROR +PhysmemImportPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PMR_EXPORT *psPMRExport, + PMR_PASSWORD_T uiPassword, + PMR_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Contig, + PMR **ppsPMR) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (PMRGetExportDeviceNode(psPMRExport) != psDevNode) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device", __func__)); + return PVRSRV_ERROR_PMR_NOT_PERMITTED; + } + + return PMRImportPMR(psPMRExport, + uiPassword, + uiSize, + uiLog2Contig, + ppsPMR); +} diff --git a/drivers/gpu/drm/phytium/octopus/physmem.h b/drivers/gpu/drm/phytium/octopus/physmem.h new file mode 100644 index 000000000000..d8616982ebcc --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/physmem.h @@ -0,0 +1,249 @@ +/*************************************************************************/ /*! +@File +@Title Physmem header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for common entry point for creation of RAM backed PMR's +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SRVSRV_PHYSMEM_H +#define SRVSRV_PHYSMEM_H + +/* include/ */ +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" +#include "connection_server.h" + +/* services/server/include/ */ +#include "pmr.h" +#include "pmr_impl.h" + +/* Valid values for TC_MEMORY_CONFIG configuration option */ +#define TC_MEMORY_LOCAL (1) +#define TC_MEMORY_HOST (2) +#define TC_MEMORY_HYBRID (3) + +/* Valid values for the PLATO_MEMORY_CONFIG configuration option */ +#define PLATO_MEMORY_LOCAL (1) +#define PLATO_MEMORY_HOST (2) +#define PLATO_MEMORY_HYBRID (3) + +/*************************************************************************/ /*! +@Function DevPhysMemAlloc +@Description Allocate memory from device specific heaps directly. +@Input psDevNode device node to operate on +@Input ui32MemSize Size of the memory to be allocated +@Input u8Value Value to be initialised to. +@Input bInitPage Flag to control initialisation +@Input pszDevSpace PDUMP memory space in which the + allocation is to be done +@Input pszSymbolicAddress Symbolic name of the allocation +@Input phHandlePtr PDUMP handle to the allocation +@Output hMemHandle Handle to the allocated memory +@Output psDevPhysAddr Device Physical address of allocated + page +@Return PVRSRV_OK if the allocation is successful +*/ /**************************************************************************/ +PVRSRV_ERROR +DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32MemSize, + IMG_UINT32 ui32Log2Align, + const IMG_UINT8 u8Value, + IMG_BOOL bInitPage, +#if defined(PDUMP) + const IMG_CHAR *pszDevSpace, + const IMG_CHAR *pszSymbolicAddress, + IMG_HANDLE *phHandlePtr, +#endif + IMG_HANDLE hMemHandle, + IMG_DEV_PHYADDR *psDevPhysAddr); + +/*************************************************************************/ /*! +@Function DevPhysMemFree +@Description Free memory to device specific heaps directly. +@Input psDevNode device node to operate on +@Input hPDUMPMemHandle Pdump handle to allocated memory +@Input hMemHandle Devmem handle to allocated memory +@Return None +*/ /**************************************************************************/ +void +DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode, +#if defined(PDUMP) + IMG_HANDLE hPDUMPMemHandle, +#endif + IMG_HANDLE hMemHandle); + +/* + * PhysmemNewRamBackedPMR + * + * This function will create a RAM backed PMR using the device specific + * callback, this allows control at a per-devicenode level to select the + * memory source thus supporting mixed UMA/LMA systems. + * + * The size must be a multiple of page size. The page size is specified in + * log2. It should be regarded as a minimum contiguity of which the + * resulting memory must be a multiple. It may be that this should be a fixed + * number. It may be that the allocation size needs to be a multiple of some + * coarser "page size" than that specified in the page size argument. + * For example, take an OS whose page granularity is a fixed 16kB, but the + * caller requests memory in page sizes of 4kB. The request can be satisfied + * if and only if the SIZE requested is a multiple of 16kB. If the arguments + * supplied are such that this OS cannot grant the request, + * PVRSRV_ERROR_INVALID_PARAMS will be returned. + * + * The caller should supply storage of a pointer. Upon successful return a + * PMR object will have been created and a pointer to it returned in the + * PMROut argument. + * + * A PMR successfully created should be destroyed with PhysmemUnrefPMR. + * + * Note that this function may cause memory allocations and on some operating + * systems this may cause scheduling events, so it is important that this + * function be called with interrupts enabled and in a context where + * scheduling events and memory allocations are permitted. + * + * The flags may be used by the implementation to change its behaviour if + * required. The flags will also be stored in the PMR as immutable metadata + * and returned to mmu_common when it asks for it. + * + * The PID specified is used to tie this allocation to the process context + * that the allocation is made on behalf of. + */ +PVRSRV_ERROR +PhysmemNewRamBackedPMR(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 uiAnnotationLength, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMROut, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR +PhysmemNewRamBackedPMR_direct(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 uiAnnotationLength, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMROut, + IMG_UINT32 ui32PDumpFlags); + +/* + * PhysmemNewRamBackedLockedPMR + * + * Same as function above but is additionally locking down the PMR. + * + * Get the physical memory and lock down the PMR directly, we do not want to + * defer the actual allocation to mapping time. + * + * In general the concept of on-demand allocations is not useful for + * allocations where we give the users the freedom to map and unmap memory at + * will. The user is not expecting their memory contents to suddenly vanish + * just because they unmapped the buffer. + * Even if they would know and be ok with it, we do not want to check for + * every page we unmap whether we have to unlock the underlying PMR. +*/ +PVRSRV_ERROR +PhysmemNewRamBackedLockedPMR(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T uiSize, + PMR_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 uiAnnotationLength, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags); + +/*************************************************************************/ /*! +@Function PhysmemImportPMR +@Description Import PMR a previously exported PMR +@Input psPMRExport The exported PMR token +@Input uiPassword Authorisation password + for the PMR being imported +@Input uiSize Size of the PMR being imported + (for verification) +@Input uiLog2Contig Log2 continuity of the PMR being + imported (for verification) +@Output ppsPMR The imported PMR +@Return PVRSRV_ERROR_PMR_NOT_PERMITTED if not for the same device + PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR if password incorrect + PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES if size or contiguity incorrect + PVRSRV_OK if successful +*/ /**************************************************************************/ +PVRSRV_ERROR +PhysmemImportPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PMR_EXPORT *psPMRExport, + PMR_PASSWORD_T uiPassword, + PMR_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Contig, + PMR **ppsPMR); + +/*************************************************************************/ /*! +@Function PVRSRVGetMaxDevMemSizeKM +@Description Get the amount of device memory on current platform +@Output puiLMASize LMA memory size +@Output puiUMASize UMA memory size +@Return None +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVGetMaxDevMemSizeKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_DEVMEM_SIZE_T *puiLMASize, + IMG_DEVMEM_SIZE_T *puiUMASize); + +#endif /* SRVSRV_PHYSMEM_H */ diff --git a/drivers/gpu/drm/phytium/octopus/physmem_dmabuf.c b/drivers/gpu/drm/phytium/octopus/physmem_dmabuf.c new file mode 100644 index 000000000000..4ae0b13c4287 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/physmem_dmabuf.c @@ -0,0 +1,1279 @@ +/*************************************************************************/ /*! +@File physmem_dmabuf.c +@Title dmabuf memory allocator +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + implementing the function callbacks for dmabuf memory. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "physmem_dmabuf.h" +#include "pvrsrv.h" +#include "pmr.h" + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP) + +#include +#include +#include +#include + +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" + +#include "allocmem.h" +#include "osfunc.h" +#include "pmr_impl.h" +#include "hash.h" +#include "private_data.h" +#include "module_common.h" +#include "pvr_ion_stats.h" + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +#include "ri_server.h" +#endif + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) +#include "mmap_stats.h" +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif + +#include "kernel_compatibility.h" + +/* + * dma_buf_ops + * + * These are all returning errors if used. + * The point is to prevent anyone outside of our driver from importing + * and using our dmabuf. + */ + +static int PVRDmaBufOpsAttach(struct dma_buf *psDmaBuf, +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) && \ + !((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (defined(CHROMIUMOS_KERNEL)))) + struct device *psDev, +#endif + struct dma_buf_attachment *psAttachment) +{ + return -ENOSYS; +} + +static struct sg_table *PVRDmaBufOpsMap(struct dma_buf_attachment *psAttachment, + enum dma_data_direction eDirection) +{ + /* Attach hasn't been called yet */ + return ERR_PTR(-EINVAL); +} + +static void PVRDmaBufOpsUnmap(struct dma_buf_attachment *psAttachment, + struct sg_table *psTable, + enum dma_data_direction eDirection) +{ +} + +static void PVRDmaBufOpsRelease(struct dma_buf *psDmaBuf) +{ + PMR *psPMR = (PMR *) psDmaBuf->priv; + + PMRUnrefPMR(psPMR); +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) +static void *PVRDmaBufOpsKMap(struct dma_buf *psDmaBuf, unsigned long uiPageNum) +{ + return ERR_PTR(-ENOSYS); +} +#endif + +static int PVRDmaBufOpsMMap(struct dma_buf *psDmaBuf, struct vm_area_struct *psVMA) +{ + return -ENOSYS; +} + +static const struct dma_buf_ops sPVRDmaBufOps = +{ + .attach = PVRDmaBufOpsAttach, + .map_dma_buf = PVRDmaBufOpsMap, + .unmap_dma_buf = PVRDmaBufOpsUnmap, + .release = PVRDmaBufOpsRelease, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) && \ + !((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (defined(CHROMIUMOS_KERNEL)))) + .map_atomic = PVRDmaBufOpsKMap, +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) + .map = PVRDmaBufOpsKMap, +#endif +#else + .kmap_atomic = PVRDmaBufOpsKMap, + .kmap = PVRDmaBufOpsKMap, +#endif + .mmap = PVRDmaBufOpsMMap, +}; + +/* end of dma_buf_ops */ + + +typedef struct _PMR_DMA_BUF_DATA_ +{ + /* Filled in at PMR create time */ + PHYS_HEAP *psPhysHeap; + struct dma_buf_attachment *psAttachment; + PFN_DESTROY_DMABUF_PMR pfnDestroy; + IMG_BOOL bPoisonOnFree; + + /* Mapping information. */ + struct dma_buf_map sMap; + + /* Modified by PMR lock/unlock */ + struct sg_table *psSgTable; + IMG_DEV_PHYADDR *pasDevPhysAddr; + IMG_UINT32 ui32PhysPageCount; + IMG_UINT32 ui32VirtPageCount; +} PMR_DMA_BUF_DATA; + +/* Start size of the g_psDmaBufHash hash table */ +#define DMA_BUF_HASH_SIZE 20 + +static DEFINE_MUTEX(g_HashLock); + +static HASH_TABLE *g_psDmaBufHash; +static IMG_UINT32 g_ui32HashRefCount; + +#if defined(PVR_ANDROID_ION_USE_SG_LENGTH) +#define pvr_sg_length(sg) ((sg)->length) +#else +#define pvr_sg_length(sg) sg_dma_len(sg) +#endif + +/***************************************************************************** + * PMR callback functions * + *****************************************************************************/ + +static PVRSRV_ERROR PMRFinalizeDmaBuf(PMR_IMPL_PRIVDATA pvPriv) +{ + PMR_DMA_BUF_DATA *psPrivData = pvPriv; + struct dma_buf_attachment *psAttachment = psPrivData->psAttachment; + struct dma_buf *psDmaBuf = psAttachment->dmabuf; + struct sg_table *psSgTable = psPrivData->psSgTable; + PMR *psPMR; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psDmaBuf->ops != &sPVRDmaBufOps) + { + if (g_psDmaBufHash) + { + /* We have a hash table so check if we've seen this dmabuf before */ + psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf); + + if (psPMR) + { + if (!PMRIsPMRLive(psPMR)) + { + HASH_Remove(g_psDmaBufHash, (uintptr_t) psDmaBuf); + g_ui32HashRefCount--; + + if (g_ui32HashRefCount == 0) + { + HASH_Delete(g_psDmaBufHash); + g_psDmaBufHash = NULL; + } + } + else{ + eError = PVRSRV_ERROR_PMR_STILL_REFERENCED; + } + } + PVRSRVIonRemoveMemAllocRecord(psDmaBuf); + } + }else + { + psPMR = (PMR *) psDmaBuf->priv; + if (PMRIsPMRLive(psPMR)) + { + eError = PVRSRV_ERROR_PMR_STILL_REFERENCED; + } + + } + + if (PVRSRV_OK != eError) + { + return eError; + } + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, + psPrivData->ui32PhysPageCount << PAGE_SHIFT, + OSGetCurrentClientProcessIDKM()); +#endif + + psPrivData->ui32PhysPageCount = 0; + + dma_buf_unmap_attachment(psAttachment, psSgTable, DMA_BIDIRECTIONAL); + + + if (psPrivData->bPoisonOnFree) + { + int err; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) + int i; + void *pvKernAddr; +#else + struct dma_buf_map sMap; +#endif + + err = dma_buf_begin_cpu_access(psDmaBuf, DMA_FROM_DEVICE); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to begin cpu access for free poisoning (err=%d)", + __func__, err)); + PVR_ASSERT(IMG_FALSE); + goto exit; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) + err = dma_buf_vmap(psDmaBuf, &sMap); + if (err != 0 || sMap.vaddr == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to poison allocation before free", + __func__)); + PVR_ASSERT(IMG_FALSE); + goto exit_end_access; + } + + memset(sMap.vaddr, PVRSRV_POISON_ON_FREE_VALUE, psDmaBuf->size); + + dma_buf_vunmap(psDmaBuf, &sMap); +#else + for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++) + { + pvKernAddr = dma_buf_kmap(psDmaBuf, i); + if (IS_ERR_OR_NULL(pvKernAddr)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to poison allocation before free (err=%ld)", + __func__, pvKernAddr ? PTR_ERR(pvKernAddr) : -ENOMEM)); + PVR_ASSERT(IMG_FALSE); + goto exit_end_access; + } + + memset(pvKernAddr, PVRSRV_POISON_ON_FREE_VALUE, PAGE_SIZE); + + dma_buf_kunmap(psDmaBuf, i, pvKernAddr); + } +#endif + +exit_end_access: + do { + err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE); + } while (err == -EAGAIN || err == -EINTR); + } + +exit: + if (psPrivData->pfnDestroy) + { + eError = psPrivData->pfnDestroy(psPrivData->psPhysHeap, psPrivData->psAttachment); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + OSFreeMem(psPrivData->pasDevPhysAddr); + OSFreeMem(psPrivData); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR PMRLockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv) +{ + PVR_UNREFERENCED_PARAMETER(pvPriv); + return PVRSRV_OK; +} + +static PVRSRV_ERROR PMRUnlockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv) +{ + PVR_UNREFERENCED_PARAMETER(pvPriv); + return PVRSRV_OK; +} + +static void PMRGetFactoryLock(void) +{ + mutex_lock(&g_HashLock); +} + +static void PMRReleaseFactoryLock(void) +{ + mutex_unlock(&g_HashLock); +} + +static PVRSRV_ERROR PMRDevPhysAddrDmaBuf(PMR_IMPL_PRIVDATA pvPriv, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_BOOL *pbValid, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PMR_DMA_BUF_DATA *psPrivData = pvPriv; + IMG_UINT32 ui32PageIndex; + IMG_UINT32 idx; + + if (ui32Log2PageSize != PAGE_SHIFT) + { + return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; + } + + for (idx=0; idx < ui32NumOfPages; idx++) + { + if (pbValid[idx]) + { + IMG_UINT32 ui32InPageOffset; + + ui32PageIndex = puiOffset[idx] >> PAGE_SHIFT; + ui32InPageOffset = puiOffset[idx] - ((IMG_DEVMEM_OFFSET_T)ui32PageIndex << PAGE_SHIFT); + + + PVR_ASSERT(ui32PageIndex < psPrivData->ui32VirtPageCount); + PVR_ASSERT(ui32InPageOffset < PAGE_SIZE); + psDevPAddr[idx].uiAddr = psPrivData->pasDevPhysAddr[ui32PageIndex].uiAddr + ui32InPageOffset; + } + } + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PMRAcquireKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv, + size_t uiOffset, + size_t uiSize, + void **ppvKernelAddressOut, + IMG_HANDLE *phHandleOut, + PMR_FLAGS_T ulFlags) +{ + PMR_DMA_BUF_DATA *psPrivData = pvPriv; + struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf; + PVRSRV_ERROR eError; + int err; + + if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Kernel mappings for sparse DMABufs " + "are not allowed!", __func__)); + eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; + goto fail; + } + + err = dma_buf_begin_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL); + if (err) + { + eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; + goto fail; + } + + err = dma_buf_vmap(psDmaBuf, &psPrivData->sMap); + if (err != 0 || psPrivData->sMap.vaddr == NULL) + { + eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; + goto fail_kmap; + } + + *ppvKernelAddressOut = psPrivData->sMap.vaddr + uiOffset; + *phHandleOut = psPrivData->sMap.vaddr; + + return PVRSRV_OK; + +fail_kmap: + do { + err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL); + } while (err == -EAGAIN || err == -EINTR); + +fail: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static void PMRReleaseKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv, + IMG_HANDLE hHandle) +{ + PMR_DMA_BUF_DATA *psPrivData = pvPriv; + struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf; + int err; + + dma_buf_vunmap(psDmaBuf, &psPrivData->sMap); + + do { + err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL); + } while (err == -EAGAIN || err == -EINTR); +} + +static PVRSRV_ERROR PMRMMapDmaBuf(PMR_IMPL_PRIVDATA pvPriv, + PMR *psPMR, + PMR_MMAP_DATA pOSMMapData) +{ + PMR_DMA_BUF_DATA *psPrivData = pvPriv; + struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf; + struct vm_area_struct *psVma = pOSMMapData; + int err; + + if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Not possible to MMAP sparse DMABufs", + __func__)); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + err = dma_buf_mmap(psDmaBuf, psVma, 0); + if (err) + { + return (err == -EINVAL) ? PVRSRV_ERROR_NOT_SUPPORTED : PVRSRV_ERROR_BAD_MAPPING; + } + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) + MMapStatsAddOrUpdatePMR(psPMR, psVma->vm_end - psVma->vm_start); +#endif + + return PVRSRV_OK; +} + +static PMR_IMPL_FUNCTAB _sPMRDmaBufFuncTab = +{ + .pfnLockPhysAddresses = PMRLockPhysAddressesDmaBuf, + .pfnUnlockPhysAddresses = PMRUnlockPhysAddressesDmaBuf, + .pfnDevPhysAddr = PMRDevPhysAddrDmaBuf, + .pfnAcquireKernelMappingData = PMRAcquireKernelMappingDataDmaBuf, + .pfnReleaseKernelMappingData = PMRReleaseKernelMappingDataDmaBuf, + .pfnMMap = PMRMMapDmaBuf, + .pfnFinalize = PMRFinalizeDmaBuf, + .pfnGetPMRFactoryLock = PMRGetFactoryLock, + .pfnReleasePMRFactoryLock = PMRReleaseFactoryLock, +}; + +/***************************************************************************** + * Public facing interface * + *****************************************************************************/ + +PVRSRV_ERROR +PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, + struct dma_buf_attachment *psAttachment, + PFN_DESTROY_DMABUF_PMR pfnDestroy, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr) +{ + struct dma_buf *psDmaBuf = psAttachment->dmabuf; + PMR_DMA_BUF_DATA *psPrivData; + PMR_FLAGS_T uiPMRFlags; + IMG_BOOL bZeroOnAlloc; + IMG_BOOL bPoisonOnAlloc; + IMG_BOOL bPoisonOnFree; + PVRSRV_ERROR eError; + IMG_UINT32 i, j; + IMG_UINT32 uiPagesPerChunk = uiChunkSize >> PAGE_SHIFT; + IMG_UINT32 ui32PageCount = 0; + struct scatterlist *sg; + struct sg_table *table; + IMG_UINT32 uiSglOffset; + IMG_CHAR pszAnnotation[DEVMEM_ANNOTATION_MAX_LEN]; + + bZeroOnAlloc = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags); + bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags); + bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags); + + if (bZeroOnAlloc && bPoisonOnFree) + { + /* Zero on Alloc and Poison on Alloc are mutually exclusive */ + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errReturn; + } + + psPrivData = OSAllocZMem(sizeof(*psPrivData)); + if (psPrivData == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto errReturn; + } + + psPrivData->psPhysHeap = psHeap; + psPrivData->psAttachment = psAttachment; + psPrivData->pfnDestroy = pfnDestroy; + psPrivData->bPoisonOnFree = bPoisonOnFree; + psPrivData->ui32VirtPageCount = + (ui32NumVirtChunks * uiChunkSize) >> PAGE_SHIFT; + + psPrivData->pasDevPhysAddr = + OSAllocZMem(sizeof(*(psPrivData->pasDevPhysAddr)) * + psPrivData->ui32VirtPageCount); + if (!psPrivData->pasDevPhysAddr) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate buffer for physical addresses (oom)", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto errFreePrivData; + } + + if (bZeroOnAlloc || bPoisonOnAlloc) + { + int err; +#if (LINUX_VERSION_CODE = KERNEL_VERSION(5, 6, 0)) + err = dma_buf_vmap(psDmaBuf, &sMap); + if (err != 0 || sMap.vaddr == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map buffer for %s)", + __func__, bZeroOnAlloc ? "zeroing" : "poisoning")); + eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; + + do { + err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE); + } while (err == -EAGAIN || err == -EINTR); + + goto errFreePhysAddr; + } + + if (bZeroOnAlloc) + { + memset(sMap.vaddr, 0, psDmaBuf->size); + } + else + { + memset(sMap.vaddr, PVRSRV_POISON_ON_ALLOC_VALUE, psDmaBuf->size); + } + + dma_buf_vunmap(psDmaBuf, &sMap); +#else + for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++) + { + pvKernAddr = dma_buf_kmap(psDmaBuf, i); + if (IS_ERR_OR_NULL(pvKernAddr)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map page for %s (err=%ld)", + __func__, bZeroOnAlloc ? "zeroing" : "poisoning", + pvKernAddr ? PTR_ERR(pvKernAddr) : -ENOMEM)); + eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; + + do { + err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE); + } while (err == -EAGAIN || err == -EINTR); + + goto errFreePhysAddr; + } + + if (bZeroOnAlloc) + { + memset(pvKernAddr, 0, PAGE_SIZE); + } + else + { + memset(pvKernAddr, PVRSRV_POISON_ON_ALLOC_VALUE, PAGE_SIZE); + } + + dma_buf_kunmap(psDmaBuf, i, pvKernAddr); + } +#endif + + do { + err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE); + } while (err == -EAGAIN || err == -EINTR); + } + + table = dma_buf_map_attachment(psAttachment, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(table)) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errFreePhysAddr; + } + + /* + * We do a two pass process: first work out how many pages there + * are and second, fill in the data. + */ + for_each_sg(table->sgl, sg, table->nents, i) + { + ui32PageCount += PAGE_ALIGN(pvr_sg_length(sg)) / PAGE_SIZE; + } + + if (WARN_ON(!ui32PageCount)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Number of phys. pages must not be zero", + __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errUnmap; + } + + if (WARN_ON(ui32PageCount != ui32NumPhysChunks * uiPagesPerChunk)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Requested physical chunks and actual " + "number of physical dma buf pages don't match", + __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errUnmap; + } + + psPrivData->ui32PhysPageCount = ui32PageCount; + psPrivData->psSgTable = table; + ui32PageCount = 0; + sg = table->sgl; + uiSglOffset = 0; + + + /* Fill physical address array */ + for (i = 0; i < ui32NumPhysChunks; i++) + { + for (j = 0; j < uiPagesPerChunk; j++) + { + IMG_UINT32 uiIdx = pui32MappingTable[i] * uiPagesPerChunk + j; + + psPrivData->pasDevPhysAddr[uiIdx].uiAddr = + sg_dma_address(sg) + uiSglOffset; + + /* Get the next offset for the current sgl or the next sgl */ + uiSglOffset += PAGE_SIZE; + if (uiSglOffset >= pvr_sg_length(sg)) + { + sg = sg_next(sg); + uiSglOffset = 0; + + /* Check that we haven't looped */ + if (WARN_ON(sg == table->sgl)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to fill phys. address " + "array", + __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errUnmap; + } + } + } + } + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, + psPrivData->ui32PhysPageCount << PAGE_SHIFT, + OSGetCurrentClientProcessIDKM()); +#endif + + uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); + + /* + * Check no significant bits were lost in cast due to different + * bit widths for flags + */ + PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); + + if (OSSNPrintf((IMG_CHAR *)pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN, "ImpDmaBuf:%s", (IMG_CHAR *)pszName) < 0) + { + pszAnnotation[0] = '\0'; + } + else + { + pszAnnotation[DEVMEM_ANNOTATION_MAX_LEN-1] = '\0'; + } + + eError = PMRCreatePMR(psHeap, + ui32NumVirtChunks * uiChunkSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + PAGE_SHIFT, + uiPMRFlags, + pszAnnotation, + &_sPMRDmaBufFuncTab, + psPrivData, + PMR_TYPE_DMABUF, + ppsPMRPtr, + PDUMP_NONE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create PMR (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto errFreePhysAddr; + } + + return PVRSRV_OK; + +errUnmap: + dma_buf_unmap_attachment(psAttachment, table, DMA_BIDIRECTIONAL); +errFreePhysAddr: + OSFreeMem(psPrivData->pasDevPhysAddr); +errFreePrivData: + OSFreeMem(psPrivData); +errReturn: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static PVRSRV_ERROR PhysmemDestroyDmaBuf(PHYS_HEAP *psHeap, + struct dma_buf_attachment *psAttachment) +{ + struct dma_buf *psDmaBuf = psAttachment->dmabuf; + + PVR_UNREFERENCED_PARAMETER(psHeap); + + dma_buf_detach(psDmaBuf, psAttachment); + dma_buf_put(psDmaBuf); + + return PVRSRV_OK; +} + +struct dma_buf * +PhysmemGetDmaBuf(PMR *psPMR) +{ + PMR_DMA_BUF_DATA *psPrivData; + + psPrivData = PMRGetPrivateData(psPMR, &_sPMRDmaBufFuncTab); + if (psPrivData) + { + return psPrivData->psAttachment->dmabuf; + } + + return NULL; +} + +PVRSRV_ERROR +PhysmemExportDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PMR *psPMR, + IMG_INT *piFd) +{ + struct dma_buf *psDmaBuf; + IMG_DEVMEM_SIZE_T uiPMRSize; + PVRSRV_ERROR eError; + IMG_INT iFd; + + mutex_lock(&g_HashLock); + + PMRRefPMR(psPMR); + + eError = PMR_LogicalSize(psPMR, &uiPMRSize); + if (eError != PVRSRV_OK) + { + goto fail_pmr_ref; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + { + DEFINE_DMA_BUF_EXPORT_INFO(sDmaBufExportInfo); + + sDmaBufExportInfo.priv = psPMR; + sDmaBufExportInfo.ops = &sPVRDmaBufOps; + sDmaBufExportInfo.size = uiPMRSize; + sDmaBufExportInfo.flags = O_RDWR; + + psDmaBuf = dma_buf_export(&sDmaBufExportInfo); + } +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) + psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps, + uiPMRSize, O_RDWR, NULL); +#else + psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps, + uiPMRSize, O_RDWR); +#endif + + if (IS_ERR_OR_NULL(psDmaBuf)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to export buffer (err=%ld)", + __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_pmr_ref; + } + + iFd = dma_buf_fd(psDmaBuf, O_RDWR); + if (iFd < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf fd (err=%d)", + __func__, iFd)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_dma_buf; + } + + mutex_unlock(&g_HashLock); + *piFd = iFd; + + /* A PMR memory lay out can't change once exported + * This makes sure the exported and imported parties see + * the same layout of the memory */ + PMR_SetLayoutFixed(psPMR, IMG_TRUE); + + return PVRSRV_OK; + +fail_dma_buf: + dma_buf_put(psDmaBuf); + +fail_pmr_ref: + mutex_unlock(&g_HashLock); + PMRUnrefPMR(psPMR); + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +PhysmemImportDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_INT fd, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign) +{ + IMG_DEVMEM_SIZE_T uiSize; + IMG_UINT32 ui32MappingTable = 0; + struct dma_buf *psDmaBuf; + PVRSRV_ERROR eError; + + /* Get the buffer handle */ + psDmaBuf = dma_buf_get(fd); + if (IS_ERR_OR_NULL(psDmaBuf)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)", + __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM)); + return PVRSRV_ERROR_BAD_MAPPING; + + } + + uiSize = psDmaBuf->size; + + eError = PhysmemImportSparseDmaBuf(psConnection, + psDevNode, + fd, + uiFlags, + uiSize, + 1, + 1, + &ui32MappingTable, + ui32NameSize, + pszName, + ppsPMRPtr, + puiSize, + puiAlign); + + dma_buf_put(psDmaBuf); + + return eError; +} + +PVRSRV_ERROR +PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_INT fd, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign) +{ + PMR *psPMR = NULL; + struct dma_buf_attachment *psAttachment; + struct dma_buf *psDmaBuf; + PVRSRV_ERROR eError; + IMG_BOOL bHashTableCreated = IMG_FALSE; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (!psDevNode) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errReturn; + } + + /* Terminate string from bridge to prevent corrupt annotations in RI */ + if (pszName != NULL) + { + IMG_CHAR* pszName0 = (IMG_CHAR*) pszName; + pszName0[ui32NameSize-1] = '\0'; + } + + mutex_lock(&g_HashLock); + + /* Get the buffer handle */ + psDmaBuf = dma_buf_get(fd); + if (IS_ERR_OR_NULL(psDmaBuf)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)", + __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM)); + eError = PVRSRV_ERROR_BAD_MAPPING; + goto errUnlockReturn; + } + + if (psDmaBuf->ops == &sPVRDmaBufOps) + { + PVRSRV_DEVICE_NODE *psPMRDevNode; + + /* We exported this dma_buf, so we can just get its PMR */ + psPMR = (PMR *) psDmaBuf->priv; + + /* However, we can't import it if it belongs to a different device */ + psPMRDevNode = PMR_DeviceNode(psPMR); + if (psPMRDevNode != psDevNode) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device", + __func__)); + eError = PVRSRV_ERROR_PMR_NOT_PERMITTED; + goto err; + } + } + else + { + if (g_psDmaBufHash) + { + /* We have a hash table so check if we've seen this dmabuf before */ + psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf); + } + else + { + /* + * As different processes may import the same dmabuf we need to + * create a hash table so we don't generate a duplicate PMR but + * rather just take a reference on an existing one. + */ + g_psDmaBufHash = HASH_Create(DMA_BUF_HASH_SIZE); + if (!g_psDmaBufHash) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err; + } + bHashTableCreated = IMG_TRUE; + } + } + + if (psPMR) + { + /* Reuse the PMR we already created */ + PMRRefPMR(psPMR); + + *ppsPMRPtr = psPMR; + PMR_LogicalSize(psPMR, puiSize); + *puiAlign = PAGE_SIZE; + } + /* No errors so far */ + eError = PVRSRV_OK; + +err: + if (psPMR || (PVRSRV_OK != eError)) + { + mutex_unlock(&g_HashLock); + dma_buf_put(psDmaBuf); + + if (PVRSRV_OK == eError) + { + /* + * We expect a PMR to be immutable at this point + * But its explicitly set here to cover a corner case + * where a PMR created through non-DMA interface could be + * imported back again through DMA interface */ + PMR_SetLayoutFixed(psPMR, IMG_TRUE); + } + return eError; + } + + /* Do we want this to be a sparse PMR? */ + if (ui32NumVirtChunks > 1) + { + IMG_UINT32 i; + + /* Parameter validation */ + if (psDmaBuf->size != (uiChunkSize * ui32NumPhysChunks) || + uiChunkSize != PAGE_SIZE || + ui32NumPhysChunks > ui32NumVirtChunks) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Requesting sparse buffer: " + "uiChunkSize ("IMG_DEVMEM_SIZE_FMTSPEC") must be equal to " + "OS page size (%lu). uiChunkSize * ui32NumPhysChunks " + "("IMG_DEVMEM_SIZE_FMTSPEC") must" + " be equal to the buffer size ("IMG_SIZE_FMTSPEC"). " + "ui32NumPhysChunks (%u) must be lesser or equal to " + "ui32NumVirtChunks (%u)", + __func__, + uiChunkSize, + PAGE_SIZE, + uiChunkSize * ui32NumPhysChunks, + psDmaBuf->size, + ui32NumPhysChunks, + ui32NumVirtChunks)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errUnlockAndDMAPut; + } + + /* Parameter validation - Mapping table entries*/ + for (i = 0; i < ui32NumPhysChunks; i++) + { + if (pui32MappingTable[i] > ui32NumVirtChunks) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Requesting sparse buffer: " + "Entry in mapping table (%u) is out of allocation " + "bounds (%u)", + __func__, + (IMG_UINT32) pui32MappingTable[i], + (IMG_UINT32) ui32NumVirtChunks)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errUnlockAndDMAPut; + } + } + } + else + { + /* if ui32NumPhysChunks == 0 pui32MappingTable is NULL and because + * is ui32NumPhysChunks is set to 1 below we don't allow NULL array */ + if (pui32MappingTable == NULL) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errUnlockAndDMAPut; + } + + /* Make sure parameters are valid for non-sparse allocations as well */ + uiChunkSize = psDmaBuf->size; + ui32NumPhysChunks = 1; + ui32NumVirtChunks = 1; + } + + + psAttachment = dma_buf_attach(psDmaBuf, psDevNode->psDevConfig->pvOSDevice); + if (IS_ERR_OR_NULL(psAttachment)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to attach to dma-buf (err=%ld)", + __func__, psAttachment? PTR_ERR(psAttachment) : -ENOMEM)); + eError = PVRSRV_ERROR_BAD_MAPPING; + goto errUnlockAndDMAPut; + } + + /* + * Note: + * While we have no way to determine the type of the buffer we just + * assume that all dmabufs are from the same physical heap. + */ + eError = PhysmemCreateNewDmaBufBackedPMR(psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_EXTERNAL], + psAttachment, + PhysmemDestroyDmaBuf, + uiFlags, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + ui32NameSize, + pszName, + &psPMR); + if (eError != PVRSRV_OK) + { + goto errDMADetach; + } + + /* First time we've seen this dmabuf so store it in the hash table */ + HASH_Insert(g_psDmaBufHash, (uintptr_t) psDmaBuf, (uintptr_t) psPMR); + g_ui32HashRefCount++; + + mutex_unlock(&g_HashLock); + + PVRSRVIonAddMemAllocRecord(psDmaBuf); + + *ppsPMRPtr = psPMR; + *puiSize = ui32NumVirtChunks * uiChunkSize; + *puiAlign = PAGE_SIZE; + + /* The memory that's just imported is owned by some other entity. + * Hence the memory layout cannot be changed through our API */ + PMR_SetLayoutFixed(psPMR, IMG_TRUE); + + return PVRSRV_OK; + +errDMADetach: + dma_buf_detach(psDmaBuf, psAttachment); + +errUnlockAndDMAPut: + if (IMG_TRUE == bHashTableCreated) + { + HASH_Delete(g_psDmaBufHash); + g_psDmaBufHash = NULL; + } + dma_buf_put(psDmaBuf); + +errUnlockReturn: + mutex_unlock(&g_HashLock); + +errReturn: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) */ + +PVRSRV_ERROR +PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, + struct dma_buf_attachment *psAttachment, + PFN_DESTROY_DMABUF_PMR pfnDestroy, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr) +{ + PVR_UNREFERENCED_PARAMETER(psHeap); + PVR_UNREFERENCED_PARAMETER(psAttachment); + PVR_UNREFERENCED_PARAMETER(pfnDestroy); + PVR_UNREFERENCED_PARAMETER(uiFlags); + PVR_UNREFERENCED_PARAMETER(uiChunkSize); + PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); + PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks); + PVR_UNREFERENCED_PARAMETER(pui32MappingTable); + PVR_UNREFERENCED_PARAMETER(ui32NameSize); + PVR_UNREFERENCED_PARAMETER(pszName); + PVR_UNREFERENCED_PARAMETER(ppsPMRPtr); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +struct dma_buf * +PhysmemGetDmaBuf(PMR *psPMR) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + + return NULL; +} + +PVRSRV_ERROR +PhysmemExportDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PMR *psPMR, + IMG_INT *piFd) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(piFd); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +PVRSRV_ERROR +PhysmemImportDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_INT fd, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(fd); + PVR_UNREFERENCED_PARAMETER(uiFlags); + PVR_UNREFERENCED_PARAMETER(ui32NameSize); + PVR_UNREFERENCED_PARAMETER(pszName); + PVR_UNREFERENCED_PARAMETER(ppsPMRPtr); + PVR_UNREFERENCED_PARAMETER(puiSize); + PVR_UNREFERENCED_PARAMETER(puiAlign); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +PVRSRV_ERROR +PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_INT fd, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(fd); + PVR_UNREFERENCED_PARAMETER(uiFlags); + PVR_UNREFERENCED_PARAMETER(ppsPMRPtr); + PVR_UNREFERENCED_PARAMETER(puiSize); + PVR_UNREFERENCED_PARAMETER(puiAlign); + PVR_UNREFERENCED_PARAMETER(uiChunkSize); + PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); + PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks); + PVR_UNREFERENCED_PARAMETER(pui32MappingTable); + PVR_UNREFERENCED_PARAMETER(ui32NameSize); + PVR_UNREFERENCED_PARAMETER(pszName); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP) */ diff --git a/drivers/gpu/drm/phytium/octopus/physmem_dmabuf.h b/drivers/gpu/drm/phytium/octopus/physmem_dmabuf.h new file mode 100644 index 000000000000..332c66057ad0 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/physmem_dmabuf.h @@ -0,0 +1,113 @@ +/**************************************************************************/ /*! +@File physmem_dmabuf.h +@Title Header for dmabuf PMR factory +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + implementing the function callbacks importing Ion allocations +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#if !defined(PHYSMEM_DMABUF_H) +#define PHYSMEM_DMABUF_H + +#include + +#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) +#define __pvrsrv_defined_struct_enum__ +#include +#endif + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" +#include "connection_server.h" + +#include "pmr.h" + +typedef PVRSRV_ERROR (*PFN_DESTROY_DMABUF_PMR)(PHYS_HEAP *psHeap, + struct dma_buf_attachment *psAttachment); + +PVRSRV_ERROR +PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, + struct dma_buf_attachment *psAttachment, + PFN_DESTROY_DMABUF_PMR pfnDestroy, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr); + +struct dma_buf * +PhysmemGetDmaBuf(PMR *psPMR); + +PVRSRV_ERROR +PhysmemExportDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + PMR *psPMR, + IMG_INT *piFd); + +PVRSRV_ERROR +PhysmemImportDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_INT fd, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign); + +PVRSRV_ERROR +PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_INT fd, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 ui32NameSize, + const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], + PMR **ppsPMRPtr, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign); + +#endif /* !defined(PHYSMEM_DMABUF_H) */ diff --git a/drivers/gpu/drm/phytium/octopus/physmem_hostmem.c b/drivers/gpu/drm/phytium/octopus/physmem_hostmem.c new file mode 100644 index 000000000000..85bab51ed1b3 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/physmem_hostmem.c @@ -0,0 +1,206 @@ +/*************************************************************************/ /*! +@File physmem_hostmem.c +@Title Host memory device node functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Functions relevant to device memory allocations made from host + mem device node. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "physmem_hostmem.h" + +#include "img_defs.h" +#include "img_types.h" +#include "allocmem.h" +#include "physheap.h" +#include "pvrsrv_device.h" +#include "physheap.h" +#include "physmem_osmem.h" + +static void HostMemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr); + +static void HostMemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr); + +/* heap callbacks for host driver's device's heap */ +static PHYS_HEAP_FUNCTIONS gsHostMemDevPhysHeapFuncs = +{ + /* pfnCpuPAddrToDevPAddr */ + HostMemCpuPAddrToDevPAddr, + /* pfnDevPAddrToCpuPAddr */ + HostMemDevPAddrToCpuPAddr, +}; + +static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[]; + +/* heap configuration for host driver's device */ +static PHYS_HEAP_CONFIG gsPhysHeapConfigHostMemDevice[] = +{ + { + PHYS_HEAP_TYPE_UMA, + "SYSMEM", + &gsHostMemDevPhysHeapFuncs, + {0}, + {0}, + 0, + (IMG_HANDLE)&gsHostMemDevConfig[0], + PHYS_HEAP_USAGE_CPU_LOCAL, + } +}; + +/* device configuration for host driver's device */ +static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[] = +{ + { + .pszName = "HostMemDevice", + .eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE, + .pasPhysHeaps = &gsPhysHeapConfigHostMemDevice[0], + .ui32PhysHeapCount = ARRAY_SIZE(gsPhysHeapConfigHostMemDevice), + } +}; + +static void HostMemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + /* Optimise common case */ + psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr; + if (ui32NumOfAddr > 1) + { + IMG_UINT32 ui32Idx; + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) + { + psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr; + } + } +} + +static void HostMemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + /* Optimise common case */ + psCpuPAddr[0].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[0].uiAddr); + if (ui32NumOfAddr > 1) + { + IMG_UINT32 ui32Idx; + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) + { + psCpuPAddr[ui32Idx].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[ui32Idx].uiAddr); + } + } +} + +PVRSRV_ERROR HostMemDeviceCreate(PVRSRV_DEVICE_NODE **ppsDeviceNode) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_DEVICE_CONFIG *psDevConfig = &gsHostMemDevConfig[0]; + + /* Assert ensures HostMemory device isn't already created and + * that data is initialised */ + PVR_ASSERT(*ppsDeviceNode == NULL); + + /* for now, we only know a single heap (UMA) config for host device */ + PVR_ASSERT(psDevConfig->ui32PhysHeapCount == 1 && + psDevConfig->pasPhysHeaps[0].eType == PHYS_HEAP_TYPE_UMA); + + /* N.B.- In case of any failures in this function, we just return error to + the caller, as clean-up is taken care by _HostMemDeviceDestroy function */ + + psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode)); + PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "OSAllocZMem"); + + /* early save return pointer to aid clean-up */ + *ppsDeviceNode = psDeviceNode; + + psDeviceNode->psDevConfig = psDevConfig; + psDeviceNode->papsRegisteredPhysHeaps = + OSAllocZMem(sizeof(*psDeviceNode->papsRegisteredPhysHeaps) * + psDevConfig->ui32PhysHeapCount); + PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->papsRegisteredPhysHeaps, "OSAllocZMem"); + + eError = PhysHeapCreateHeapFromConfig(psDeviceNode, + &psDevConfig->pasPhysHeaps[0], + &psDeviceNode->papsRegisteredPhysHeaps[0]); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreateHeapFromConfig"); + psDeviceNode->ui32RegisteredPhysHeaps = 1; + + /* Only CPU local heap is valid on host-mem DevNode, so enable minimal callbacks */ + eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_CPU_LOCAL, + psDeviceNode, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquire"); + + return PVRSRV_OK; +} + +void HostMemDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + if (!psDeviceNode) + { + return; + } + + if (psDeviceNode->papsRegisteredPhysHeaps) + { + if (psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]) + { + PhysHeapRelease(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]); + } + + if (psDeviceNode->papsRegisteredPhysHeaps[0]) + { + /* clean-up function as well is aware of only one heap */ + PVR_ASSERT(psDeviceNode->ui32RegisteredPhysHeaps == 1); + PhysHeapDestroy(psDeviceNode->papsRegisteredPhysHeaps[0]); + } + + OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps); + } + OSFreeMem(psDeviceNode); +} diff --git a/drivers/gpu/drm/phytium/octopus/physmem_hostmem.h b/drivers/gpu/drm/phytium/octopus/physmem_hostmem.h new file mode 100644 index 000000000000..8ffcd4ed64a7 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/physmem_hostmem.h @@ -0,0 +1,65 @@ +/*************************************************************************/ /*! +@File physmem_hostmem.h +@Title Host memory device node header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(PHYSMEM_HOSTMEM_H) +#define PHYSMEM_HOSTMEM_H + +#include "pvrsrv_device.h" +#include "device.h" + +/*************************************************************************/ /*! +@Function HostMemDeviceCreate +@Description Allocate memory for and create host memory device node. +@Output ppsDeviceNode Pointer to device node pointer. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +PVRSRV_ERROR HostMemDeviceCreate(PVRSRV_DEVICE_NODE **ppsDeviceNode); + +/*************************************************************************/ /*! +@Function HostMemDeviceDestroy +@Description Destroy host memory device node. +@Input psDeviceNode Pointer to device node. +*/ /**************************************************************************/ +void HostMemDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode); + +#endif /* !defined(PHYSMEM_HOSTMEM_H) */ diff --git a/drivers/gpu/drm/phytium/octopus/physmem_lma.c b/drivers/gpu/drm/phytium/octopus/physmem_lma.c new file mode 100644 index 000000000000..c34739747456 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/physmem_lma.c @@ -0,0 +1,1753 @@ +/*************************************************************************/ /*! +@File physmem_lma.c +@Title Local card memory allocator +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + implementing the function callbacks for local card memory. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" +#include "rgx_pdump_panics.h" +#include "allocmem.h" +#include "osfunc.h" +#include "pvrsrv.h" +#include "devicemem_server_utils.h" +#include "physmem_lma.h" +#include "pdump_km.h" +#include "pmr.h" +#include "pmr_impl.h" +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "rgxutils.h" +#endif + +#if defined(INTEGRITY_OS) +#include "mm.h" +#include "integrity_memobject.h" +#endif + +/* Since 0x0 is a valid DevPAddr, we rely on max 64-bit value to be an invalid + * page address */ +#define INVALID_PAGE_ADDR ~((IMG_UINT64)0x0) + +typedef struct _PMR_LMALLOCARRAY_DATA_ { + IMG_PID uiPid; + IMG_INT32 iNumPagesAllocated; + /* + * uiTotalNumPages: + * Total number of pages supported by this PMR. + * (Fixed as of now due the fixed Page table array size) + */ + IMG_UINT32 uiTotalNumPages; + IMG_UINT32 uiPagesToAlloc; + + IMG_UINT32 uiLog2AllocSize; + IMG_UINT32 uiContigAllocSize; + IMG_DEV_PHYADDR *pasDevPAddr; + + IMG_BOOL bZeroOnAlloc; + IMG_BOOL bPoisonOnAlloc; + + IMG_BOOL bOnDemand; + + /* + Record at alloc time whether poisoning will be required when the + PMR is freed. + */ + IMG_BOOL bPoisonOnFree; + + /* Physical heap and arena pointers for this allocation */ + PHYS_HEAP* psPhysHeap; + RA_ARENA* psArena; + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags; + + /* + Connection data for this requests' originating process. NULL for + direct-bridge originating calls + */ + CONNECTION_DATA *psConnection; +} PMR_LMALLOCARRAY_DATA; + +#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__) +/* Global structure to manage GPU memory leak */ +static DEFINE_MUTEX(g_sLMALeakMutex); +static IMG_UINT32 g_ui32LMALeakCounter = 0; +#endif + +typedef struct PHYSMEM_LMA_DATA_TAG { + RA_ARENA *psRA; + + IMG_CPU_PHYADDR sStartAddr; + IMG_DEV_PHYADDR sCardBase; + IMG_UINT64 uiSize; +} PHYSMEM_LMA_DATA; + +/* + * This function will set the psDevPAddr to whatever the system layer + * has set it for the referenced heap. + * It will not fail if the psDevPAddr is invalid. + */ +static PVRSRV_ERROR +_GetDevPAddr(PHEAP_IMPL_DATA pvImplData, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; + + *psDevPAddr = psLMAData->sCardBase; + + return PVRSRV_OK; +} + +/* + * This function will set the psCpuPAddr to whatever the system layer + * has set it for the referenced heap. + * It will not fail if the psCpuPAddr is invalid. + */ +static PVRSRV_ERROR +_GetCPUPAddr(PHEAP_IMPL_DATA pvImplData, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; + + *psCpuPAddr = psLMAData->sStartAddr; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +_GetSize(PHEAP_IMPL_DATA pvImplData, + IMG_UINT64 *puiSize) +{ + PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; + + *puiSize = psLMAData->uiSize; + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PhysmemGetArenaLMA(PHYS_HEAP *psPhysHeap, + RA_ARENA **ppsArena) +{ + PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)PhysHeapGetImplData(psPhysHeap); + + PVR_LOG_RETURN_IF_FALSE(psLMAData != NULL, "psLMAData", PVRSRV_ERROR_NOT_IMPLEMENTED); + + *ppsArena = psLMAData->psRA; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +_CreateArenas(PHEAP_IMPL_DATA pvImplData, IMG_CHAR *pszLabel) +{ + PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; + + psLMAData->psRA = RA_Create_With_Span(pszLabel, + OSGetPageShift(), + psLMAData->sStartAddr.uiAddr, + psLMAData->sCardBase.uiAddr, + psLMAData->uiSize); + PVR_LOG_RETURN_IF_NOMEM(psLMAData->psRA, "RA_Create_With_Span"); + + return PVRSRV_OK; +} + +static void +_DestroyArenas(PHEAP_IMPL_DATA pvImplData) +{ + PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; + + /* Remove RAs and RA names for local card memory */ + if (psLMAData->psRA) + { + OSFreeMem(psLMAData->psRA); + psLMAData->psRA = NULL; + } +} + +static void +_DestroyImplData(PHEAP_IMPL_DATA pvImplData) +{ + PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; + + _DestroyArenas(pvImplData); + + OSFreeMem(psLMAData); +} + +struct _PHYS_HEAP_ITERATOR_ { + PHYS_HEAP *psPhysHeap; + RA_ARENA_ITERATOR *psRAIter; + + IMG_UINT64 uiTotalSize; + IMG_UINT64 uiInUseSize; +}; + +PVRSRV_ERROR LMA_HeapIteratorCreate(PVRSRV_DEVICE_NODE *psDevNode, + PHYS_HEAP_USAGE_FLAGS ui32Flags, + PHYS_HEAP_ITERATOR **ppsIter) +{ + PVRSRV_ERROR eError; + PHYSMEM_LMA_DATA *psLMAData; + PHYS_HEAP_ITERATOR *psHeapIter; + PHYS_HEAP *psPhysHeap; + RA_USAGE_STATS sStats; + + PVR_LOG_RETURN_IF_INVALID_PARAM(ppsIter != NULL, "ppsIter"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode"); + PVR_LOG_RETURN_IF_INVALID_PARAM(ui32Flags != 0, "ui32Flags"); + + eError = PhysHeapAcquireByUsage(ui32Flags, &psPhysHeap); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquireByUsage"); + + PVR_LOG_GOTO_IF_FALSE(PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA, + "PhysHeap must be of LMA type", release_heap); + + psLMAData = (PHYSMEM_LMA_DATA *) PhysHeapGetImplData(psPhysHeap); + + psHeapIter = OSAllocMem(sizeof(*psHeapIter)); + PVR_LOG_GOTO_IF_NOMEM(psHeapIter, eError, release_heap); + + psHeapIter->psPhysHeap = psPhysHeap; + psHeapIter->psRAIter = RA_IteratorAcquire(psLMAData->psRA, IMG_FALSE); + PVR_LOG_GOTO_IF_NOMEM(psHeapIter->psRAIter, eError, free_heap_iter); + + /* get heap usage */ + RA_Get_Usage_Stats(psLMAData->psRA, &sStats); + + psHeapIter->uiTotalSize = sStats.ui64TotalArenaSize; + psHeapIter->uiInUseSize = sStats.ui64TotalArenaSize - sStats.ui64FreeArenaSize; + + *ppsIter = psHeapIter; + + return PVRSRV_OK; + +release_heap: + PhysHeapRelease(psPhysHeap); + +free_heap_iter: + OSFreeMem(psHeapIter); + + return eError; +} + +void LMA_HeapIteratorDestroy(PHYS_HEAP_ITERATOR *psIter) +{ + PHYS_HEAP_ITERATOR *psHeapIter = psIter; + + PVR_LOG_RETURN_VOID_IF_FALSE(psHeapIter != NULL, "psHeapIter is NULL"); + + PhysHeapRelease(psHeapIter->psPhysHeap); + RA_IteratorRelease(psHeapIter->psRAIter); + OSFreeMem(psHeapIter); +} + +PVRSRV_ERROR LMA_HeapIteratorReset(PHYS_HEAP_ITERATOR *psIter) +{ + PHYS_HEAP_ITERATOR *psHeapIter = psIter; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psHeapIter != NULL, "ppsIter"); + + RA_IteratorReset(psHeapIter->psRAIter); + + return PVRSRV_OK; +} + +IMG_BOOL LMA_HeapIteratorNext(PHYS_HEAP_ITERATOR *psIter, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_UINT64 *puiSize) +{ + PHYS_HEAP_ITERATOR *psHeapIter = psIter; + RA_ITERATOR_DATA sData = {0}; + + if (psHeapIter == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "psHeapIter in %s() is NULL", __func__)); + return IMG_FALSE; + } + + if (!RA_IteratorNext(psHeapIter->psRAIter, &sData)) + { + return IMG_FALSE; + } + + PVR_ASSERT(sData.uiSize != 0); + + psDevPAddr->uiAddr = sData.uiAddr; + *puiSize = sData.uiSize; + + return IMG_TRUE; +} + +PVRSRV_ERROR LMA_HeapIteratorGetHeapStats(PHYS_HEAP_ITERATOR *psIter, + IMG_UINT64 *puiTotalSize, + IMG_UINT64 *puiInUseSize) +{ + PHYS_HEAP_ITERATOR *psHeapIter = psIter; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psHeapIter != NULL, "psHeapIter"); + + *puiTotalSize = psHeapIter->uiTotalSize; + *puiInUseSize = psHeapIter->uiInUseSize; + + return PVRSRV_OK; +} + +static PHEAP_IMPL_FUNCS _sPHEAPImplFuncs = +{ + .pfnDestroyData = &_DestroyImplData, + .pfnGetDevPAddr = &_GetDevPAddr, + .pfnGetCPUPAddr = &_GetCPUPAddr, + .pfnGetSize = &_GetSize, + .pfnCreatePMR = &PhysmemNewLocalRamBackedPMR, +}; + +PVRSRV_ERROR +PhysmemCreateHeapLMA(PVRSRV_DEVICE_NODE *psDevNode, + PHYS_HEAP_CONFIG *psConfig, + IMG_CHAR *pszLabel, + PHYS_HEAP **ppsPhysHeap) +{ + PHYSMEM_LMA_DATA *psLMAData; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pszLabel != NULL, "pszLabel"); + + psLMAData = OSAllocMem(sizeof(*psLMAData)); + PVR_LOG_RETURN_IF_NOMEM(psLMAData, "OSAllocMem"); + + psLMAData->sStartAddr = psConfig->sStartAddr; + psLMAData->sCardBase = psConfig->sCardBase; + psLMAData->uiSize = psConfig->uiSize; + + + eError = PhysHeapCreate(psDevNode, + psConfig, + (PHEAP_IMPL_DATA)psLMAData, + &_sPHEAPImplFuncs, + ppsPhysHeap); + if (eError != PVRSRV_OK) + { + OSFreeMem(psLMAData); + return eError; + } + + eError = _CreateArenas(psLMAData, pszLabel); + PVR_LOG_RETURN_IF_ERROR(eError, "_CreateArenas"); + + + return eError; +} + +static PVRSRV_ERROR _MapAlloc(PHYS_HEAP *psPhysHeap, + IMG_DEV_PHYADDR *psDevPAddr, + size_t uiSize, + PMR_FLAGS_T ulFlags, + void **pvPtr) +{ + IMG_UINT32 ui32CPUCacheFlags; + IMG_CPU_PHYADDR sCpuPAddr; + PVRSRV_ERROR eError; + + eError = DevmemCPUCacheMode(PhysHeapDeviceNode(psPhysHeap), ulFlags, &ui32CPUCacheFlags); + PVR_RETURN_IF_ERROR(eError); + + PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPAddr, psDevPAddr); + + *pvPtr = OSMapPhysToLin(sCpuPAddr, uiSize, ui32CPUCacheFlags); + PVR_RETURN_IF_NOMEM(*pvPtr); + + return PVRSRV_OK; +} + +static void _UnMapAlloc(size_t uiSize, + void *pvPtr) +{ + OSUnMapPhysToLin(pvPtr, uiSize); +} + +static PVRSRV_ERROR +_PoisonAlloc(PHYS_HEAP *psPhysHeap, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_UINT32 uiContigAllocSize, + IMG_BYTE ui8PoisonValue) +{ + PVRSRV_ERROR eError; + void *pvKernLin = NULL; + + eError = _MapAlloc(psPhysHeap, + psDevPAddr, + uiContigAllocSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED, + &pvKernLin); + PVR_GOTO_IF_ERROR(eError, map_failed); + + OSDeviceMemSet(pvKernLin, ui8PoisonValue, uiContigAllocSize); + + _UnMapAlloc(uiContigAllocSize, pvKernLin); + + return PVRSRV_OK; + +map_failed: + PVR_DPF((PVR_DBG_ERROR, "Failed to poison allocation")); + return eError; +} + +static PVRSRV_ERROR +_ZeroAlloc(PHYS_HEAP *psPhysHeap, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_UINT32 uiContigAllocSize) +{ + void *pvKernLin = NULL; + PVRSRV_ERROR eError; + + eError = _MapAlloc(psPhysHeap, + psDevPAddr, + uiContigAllocSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED, + &pvKernLin); + PVR_GOTO_IF_ERROR(eError, map_failed); + + OSDeviceMemSet(pvKernLin, 0, uiContigAllocSize); + + _UnMapAlloc(uiContigAllocSize, pvKernLin); + + return PVRSRV_OK; + +map_failed: + PVR_DPF((PVR_DBG_ERROR, "Failed to zero allocation")); + return eError; +} + +static PVRSRV_ERROR +_AllocLMPageArray(PMR_SIZE_T uiSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pabMappingTable, + IMG_UINT32 uiLog2AllocPageSize, + IMG_BOOL bZero, + IMG_BOOL bPoisonOnAlloc, + IMG_BOOL bPoisonOnFree, + IMG_BOOL bContig, + IMG_BOOL bOnDemand, + PHYS_HEAP* psPhysHeap, + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags, + IMG_PID uiPid, + PMR_LMALLOCARRAY_DATA **ppsPageArrayDataPtr, + CONNECTION_DATA *psConnection + ) +{ + PMR_LMALLOCARRAY_DATA *psPageArrayData = NULL; + IMG_UINT32 ui32Index; + PVRSRV_ERROR eError; + + PVR_ASSERT(!bZero || !bPoisonOnAlloc); + PVR_ASSERT(OSGetPageShift() <= uiLog2AllocPageSize); + + psPageArrayData = OSAllocZMem(sizeof(PMR_LMALLOCARRAY_DATA)); + PVR_GOTO_IF_NOMEM(psPageArrayData, eError, errorOnAllocArray); + + if (bContig) + { + /* + Some allocations require kernel mappings in which case in order + to be virtually contiguous we also have to be physically contiguous. + */ + psPageArrayData->uiTotalNumPages = 1; + psPageArrayData->uiPagesToAlloc = psPageArrayData->uiTotalNumPages; + psPageArrayData->uiContigAllocSize = TRUNCATE_64BITS_TO_32BITS(uiSize); + psPageArrayData->uiLog2AllocSize = uiLog2AllocPageSize; + } + else + { + IMG_UINT32 uiNumPages; + + /* Use of cast below is justified by the assertion that follows to + prove that no significant bits have been truncated */ + uiNumPages = (IMG_UINT32)(((uiSize - 1) >> uiLog2AllocPageSize) + 1); + PVR_ASSERT(((PMR_SIZE_T)uiNumPages << uiLog2AllocPageSize) == uiSize); + + psPageArrayData->uiTotalNumPages = uiNumPages; + + if ((ui32NumVirtChunks != ui32NumPhysChunks) || (1 < ui32NumVirtChunks)) + { + psPageArrayData->uiPagesToAlloc = ui32NumPhysChunks; + } + else + { + psPageArrayData->uiPagesToAlloc = uiNumPages; + } + psPageArrayData->uiContigAllocSize = 1 << uiLog2AllocPageSize; + psPageArrayData->uiLog2AllocSize = uiLog2AllocPageSize; + } + psPageArrayData->psConnection = psConnection; + psPageArrayData->uiPid = uiPid; + psPageArrayData->pasDevPAddr = OSAllocMem(sizeof(IMG_DEV_PHYADDR) * + psPageArrayData->uiTotalNumPages); + PVR_GOTO_IF_NOMEM(psPageArrayData->pasDevPAddr, eError, errorOnAllocAddr); + + /* Since no pages are allocated yet, initialise page addresses to INVALID_PAGE_ADDR */ + for (ui32Index = 0; ui32Index < psPageArrayData->uiTotalNumPages; ui32Index++) + { + psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR; + } + + psPageArrayData->iNumPagesAllocated = 0; + psPageArrayData->bZeroOnAlloc = bZero; + psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc; + psPageArrayData->bPoisonOnFree = bPoisonOnFree; + psPageArrayData->bOnDemand = bOnDemand; + psPageArrayData->psPhysHeap = psPhysHeap; + psPageArrayData->uiAllocFlags = uiAllocFlags; + + *ppsPageArrayDataPtr = psPageArrayData; + + return PVRSRV_OK; + + /* + error exit paths follow: + */ +errorOnAllocAddr: + OSFreeMem(psPageArrayData); + +errorOnAllocArray: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +static PVRSRV_ERROR +_AllocLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable) +{ + PVRSRV_ERROR eError; + RA_BASE_T uiCardAddr; + RA_LENGTH_T uiActualSize; + IMG_UINT32 i, ui32Index = 0; + IMG_UINT32 uiContigAllocSize; + IMG_UINT32 uiLog2AllocSize; + PVRSRV_DEVICE_NODE *psDevNode; + IMG_BOOL bPoisonOnAlloc; + IMG_BOOL bZeroOnAlloc; + RA_ARENA *pArena; + + PVR_ASSERT(NULL != psPageArrayData); + PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated); + + psDevNode = PhysHeapDeviceNode(psPageArrayData->psPhysHeap); + uiContigAllocSize = psPageArrayData->uiContigAllocSize; + uiLog2AllocSize = psPageArrayData->uiLog2AllocSize; + bPoisonOnAlloc = psPageArrayData->bPoisonOnAlloc; + bZeroOnAlloc = psPageArrayData->bZeroOnAlloc; + + /* Get suitable local memory region for this GPU physheap allocation */ + eError = PhysmemGetArenaLMA(psPageArrayData->psPhysHeap, &pArena); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemGetArenaLMA"); + + if (psPageArrayData->uiTotalNumPages < + (psPageArrayData->iNumPagesAllocated + psPageArrayData->uiPagesToAlloc)) + { + PVR_DPF((PVR_DBG_ERROR, "Pages requested to allocate don't fit PMR alloc Size. " + "Allocated: %u + Requested: %u > Total Allowed: %u", + psPageArrayData->iNumPagesAllocated, + psPageArrayData->uiPagesToAlloc, + psPageArrayData->uiTotalNumPages)); + return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE; + } + + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + { + IMG_UINT32 ui32OSid=0; + + /* Obtain the OSid specific data from our connection handle */ + if (psPageArrayData->psConnection != NULL) + { + ui32OSid = psPageArrayData->psConnection->ui32OSid; + } + + if (PVRSRV_CHECK_SHARED_BUFFER(psPageArrayData->uiAllocFlags)) + { + pArena=psDevNode->psOSSharedArena; + PVR_DPF((PVR_DBG_MESSAGE, + "(GPU Virtualization Validation): Giving from shared mem")); + } + else + { + pArena=psDevNode->psOSidSubArena[ui32OSid]; + PVR_DPF((PVR_DBG_MESSAGE, + "(GPU Virtualization Validation): Giving from OS slot %d", + ui32OSid)); + } + } +#endif + + psPageArrayData->psArena = pArena; + + for (i = 0; i < psPageArrayData->uiPagesToAlloc; i++) + { + /* This part of index finding should happen before allocating the page. + * Just avoiding intricate paths */ + if (psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc) + { + ui32Index = i; + } + else + { + if (NULL == pui32MapTable) + { + PVR_LOG_GOTO_WITH_ERROR("pui32MapTable", eError, PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY, errorOnRAAlloc); + } + + ui32Index = pui32MapTable[i]; + if (ui32Index >= psPageArrayData->uiTotalNumPages) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Page alloc request Index out of bounds for PMR @0x%p", + __func__, + psPageArrayData)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, errorOnRAAlloc); + } + + if (INVALID_PAGE_ADDR != psPageArrayData->pasDevPAddr[ui32Index].uiAddr) + { + PVR_LOG_GOTO_WITH_ERROR("Mapping already exists", eError, PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS, errorOnRAAlloc); + } + } + + eError = RA_Alloc(pArena, + uiContigAllocSize, + RA_NO_IMPORT_MULTIPLIER, + 0, /* No flags */ + 1ULL << uiLog2AllocSize, + "LMA_Page_Alloc", + &uiCardAddr, + &uiActualSize, + NULL); /* No private handle */ + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to Allocate the page @index:%d, size = 0x%llx", + ui32Index, 1ULL << uiLog2AllocSize)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES, errorOnRAAlloc); + } + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +{ + PVR_DPF((PVR_DBG_MESSAGE, + "(GPU Virtualization Validation): Address: 0x%"IMG_UINT64_FMTSPECX, + uiCardAddr)); +} +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + /* Allocation is done a page at a time */ + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiActualSize, psPageArrayData->uiPid); +#else + { + IMG_CPU_PHYADDR sLocalCpuPAddr; + + sLocalCpuPAddr.uiAddr = (IMG_UINT64)uiCardAddr; + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, + NULL, + sLocalCpuPAddr, + uiActualSize, + NULL, + psPageArrayData->uiPid + DEBUG_MEMSTATS_VALUES); + } +#endif +#endif + + psPageArrayData->pasDevPAddr[ui32Index].uiAddr = uiCardAddr; + if (bPoisonOnAlloc) + { + eError = _PoisonAlloc(psPageArrayData->psPhysHeap, + &psPageArrayData->pasDevPAddr[ui32Index], + uiContigAllocSize, + PVRSRV_POISON_ON_ALLOC_VALUE); + PVR_LOG_GOTO_IF_ERROR(eError, "_PoisonAlloc", errorOnPoison); + } + + if (bZeroOnAlloc) + { + eError = _ZeroAlloc(psPageArrayData->psPhysHeap, + &psPageArrayData->pasDevPAddr[ui32Index], + uiContigAllocSize); + PVR_LOG_GOTO_IF_ERROR(eError, "_ZeroAlloc", errorOnZero); + } + } + psPageArrayData->iNumPagesAllocated += psPageArrayData->uiPagesToAlloc; + + return PVRSRV_OK; + + /* + error exit paths follow: + */ +errorOnZero: +errorOnPoison: + eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; +errorOnRAAlloc: + PVR_DPF((PVR_DBG_ERROR, + "%s: alloc_pages failed to honour request %d @index: %d of %d pages: (%s)", + __func__, + ui32Index, + i, + psPageArrayData->uiPagesToAlloc, + PVRSRVGetErrorString(eError))); + while (--i < psPageArrayData->uiPagesToAlloc) + { + if (psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc) + { + ui32Index = i; + } + else + { + if (NULL == pui32MapTable) + { + break; + } + + ui32Index = pui32MapTable[i]; + } + + if (ui32Index < psPageArrayData->uiTotalNumPages) + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + /* Allocation is done a page at a time */ + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, + uiContigAllocSize, + psPageArrayData->uiPid); +#else + { + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, + psPageArrayData->pasDevPAddr[ui32Index].uiAddr, + psPageArrayData->uiPid); + } +#endif +#endif + RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr); + psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR; + } + } + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static PVRSRV_ERROR +_FreeLMPageArray(PMR_LMALLOCARRAY_DATA *psPageArrayData) +{ + OSFreeMem(psPageArrayData->pasDevPAddr); + + PVR_DPF((PVR_DBG_MESSAGE, + "physmem_lma.c: freed local memory array structure for PMR @0x%p", + psPageArrayData)); + + OSFreeMem(psPageArrayData); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +_FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, + IMG_UINT32 *pui32FreeIndices, + IMG_UINT32 ui32FreePageCount) +{ + IMG_UINT32 uiContigAllocSize; + IMG_UINT32 i, ui32PagesToFree=0, ui32PagesFreed=0, ui32Index=0; + RA_ARENA *pArena = psPageArrayData->psArena; + + PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0); + + uiContigAllocSize = psPageArrayData->uiContigAllocSize; + + ui32PagesToFree = (NULL == pui32FreeIndices) ? + psPageArrayData->uiTotalNumPages : ui32FreePageCount; + + for (i = 0; i < ui32PagesToFree; i++) + { + if (NULL == pui32FreeIndices) + { + ui32Index = i; + } + else + { + ui32Index = pui32FreeIndices[i]; + } + + if (INVALID_PAGE_ADDR != psPageArrayData->pasDevPAddr[ui32Index].uiAddr) + { + ui32PagesFreed++; + if (psPageArrayData->bPoisonOnFree) + { + _PoisonAlloc(psPageArrayData->psPhysHeap, + &psPageArrayData->pasDevPAddr[ui32Index], + uiContigAllocSize, + PVRSRV_POISON_ON_FREE_VALUE); + } + + RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + /* Allocation is done a page at a time */ + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, + uiContigAllocSize, + psPageArrayData->uiPid); +#else + { + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, + psPageArrayData->pasDevPAddr[ui32Index].uiAddr, + psPageArrayData->uiPid); + } +#endif +#endif + psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR; + } + } + psPageArrayData->iNumPagesAllocated -= ui32PagesFreed; + + PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated); + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: freed %d local memory for PMR @0x%p", + __func__, + (ui32PagesFreed * uiContigAllocSize), + psPageArrayData)); + + return PVRSRV_OK; +} + +/* + * + * Implementation of callback functions + * + */ + +/* destructor func is called after last reference disappears, but + before PMR itself is freed. */ +static PVRSRV_ERROR +PMRFinalizeLocalMem(PMR_IMPL_PRIVDATA pvPriv) +{ + PVRSRV_ERROR eError; + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL; + + psLMAllocArrayData = pvPriv; + + /* We can't free pages until now. */ + if (psLMAllocArrayData->iNumPagesAllocated != 0) + { +#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__) + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + IMG_UINT32 ui32LMALeakMax = psPVRSRVData->sMemLeakIntervals.ui32GPU; + + mutex_lock(&g_sLMALeakMutex); + + g_ui32LMALeakCounter++; + if (ui32LMALeakMax && g_ui32LMALeakCounter >= ui32LMALeakMax) + { + g_ui32LMALeakCounter = 0; + mutex_unlock(&g_sLMALeakMutex); + + PVR_DPF((PVR_DBG_WARNING, "%s: Skipped freeing of PMR 0x%p to trigger memory leak.", __func__, pvPriv)); + return PVRSRV_OK; + } + + mutex_unlock(&g_sLMALeakMutex); +#endif + eError = _FreeLMPages(psLMAllocArrayData, NULL, 0); + PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */ + } + + eError = _FreeLMPageArray(psLMAllocArrayData); + PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */ + + return PVRSRV_OK; +} + +/* callback function for locking the system physical page addresses. + As we are LMA there is nothing to do as we control physical memory. */ +static PVRSRV_ERROR +PMRLockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv) +{ + + PVRSRV_ERROR eError; + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData; + + psLMAllocArrayData = pvPriv; + + if (psLMAllocArrayData->bOnDemand) + { + /* Allocate Memory for deferred allocation */ + eError = _AllocLMPages(psLMAllocArrayData, NULL); + PVR_RETURN_IF_ERROR(eError); + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PMRUnlockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData; + + psLMAllocArrayData = pvPriv; + + if (psLMAllocArrayData->bOnDemand) + { + /* Free Memory for deferred allocation */ + eError = _FreeLMPages(psLMAllocArrayData, NULL, 0); + PVR_RETURN_IF_ERROR(eError); + } + + PVR_ASSERT(eError == PVRSRV_OK); + return eError; +} + +/* N.B. It is assumed that PMRLockSysPhysAddressesLocalMem() is called _before_ this function! */ +static PVRSRV_ERROR +PMRSysPhysAddrLocalMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_BOOL *pbValid, + IMG_DEV_PHYADDR *psDevPAddr) +{ + IMG_UINT32 idx; + IMG_UINT32 uiLog2AllocSize; + IMG_UINT32 uiNumAllocs; + IMG_UINT64 uiAllocIndex; + IMG_DEVMEM_OFFSET_T uiInAllocOffset; + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv; + + if (psLMAllocArrayData->uiLog2AllocSize < ui32Log2PageSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Requested physical addresses from PMR " + "for incompatible contiguity %u!", + __func__, + ui32Log2PageSize)); + return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; + } + + uiNumAllocs = psLMAllocArrayData->uiTotalNumPages; + if (uiNumAllocs > 1) + { + PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0); + uiLog2AllocSize = psLMAllocArrayData->uiLog2AllocSize; + + for (idx=0; idx < ui32NumOfPages; idx++) + { + if (pbValid[idx]) + { + uiAllocIndex = puiOffset[idx] >> uiLog2AllocSize; + uiInAllocOffset = puiOffset[idx] - (uiAllocIndex << uiLog2AllocSize); + + PVR_ASSERT(uiAllocIndex < uiNumAllocs); + PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2AllocSize)); + + psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[uiAllocIndex].uiAddr + uiInAllocOffset; + } + } + } + else + { + for (idx=0; idx < ui32NumOfPages; idx++) + { + if (pbValid[idx]) + { + psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[0].uiAddr + puiOffset[idx]; + } + } + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PMRAcquireKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv, + size_t uiOffset, + size_t uiSize, + void **ppvKernelAddressOut, + IMG_HANDLE *phHandleOut, + PMR_FLAGS_T ulFlags) +{ + PVRSRV_ERROR eError; + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL; + void *pvKernLinAddr = NULL; + IMG_UINT32 ui32PageIndex = 0; + size_t uiOffsetMask = uiOffset; + + psLMAllocArrayData = pvPriv; + + /* Check that we can map this in contiguously */ + if (psLMAllocArrayData->uiTotalNumPages != 1) + { + size_t uiStart = uiOffset; + size_t uiEnd = uiOffset + uiSize - 1; + size_t uiPageMask = ~((1 << psLMAllocArrayData->uiLog2AllocSize) - 1); + + /* We can still map if only one page is required */ + if ((uiStart & uiPageMask) != (uiEnd & uiPageMask)) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY, e0); + } + + /* Locate the desired physical page to map in */ + ui32PageIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize; + uiOffsetMask = (1U << psLMAllocArrayData->uiLog2AllocSize) - 1; + } + + PVR_ASSERT(ui32PageIndex < psLMAllocArrayData->uiTotalNumPages); + + eError = _MapAlloc(psLMAllocArrayData->psPhysHeap, + &psLMAllocArrayData->pasDevPAddr[ui32PageIndex], + psLMAllocArrayData->uiContigAllocSize, + ulFlags, + &pvKernLinAddr); + + *ppvKernelAddressOut = ((IMG_CHAR *) pvKernLinAddr) + (uiOffset & uiOffsetMask); + *phHandleOut = pvKernLinAddr; + + return eError; + + /* + error exit paths follow: + */ +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +#if defined(INTEGRITY_OS) +static PVRSRV_ERROR PMRMapMemoryObjectLocalMem(PMR_IMPL_PRIVDATA pvPriv, IMG_HANDLE *phMemObj, void **ppvClientAddr, IMG_HANDLE *phHandleOut) +{ + PVRSRV_ERROR ePVRError = PVRSRV_OK; + Error eOSError = Success; + PMR_MAP_DATA *psMapData = (PMR_MAP_DATA *)pvPriv; + PMR_MAP_DATA_ARRAY *psMapDataArray = psMapData->paMemObj; + + /* Create a private copy of the mapping data to keep the mapping information separate for every new client */ + struct _PMR_MAP_DATA_ARRAY_ *psMapDataArrayCopy = OSAllocMem(sizeof(struct _PMR_MAP_DATA_ARRAY_)); + PVR_LOG_RETURN_IF_NOMEM(psMapDataArrayCopy, OSAllocMem); + + memcpy(psMapDataArrayCopy, psMapDataArray, sizeof(struct _PMR_MAP_DATA_ARRAY_)); + + ePVRError = IntegrityMapMemoryObject(psMapDataArrayCopy->hFrameMem, *phMemObj); + + if (ePVRError == PVRSRV_OK) + { + ePVRError = IntegrityGetMemoryObjectCPUVAddrRange(*phMemObj, ppvClientAddr, NULL); + + if (ePVRError == PVRSRV_OK) + { + psMapDataArrayCopy->hVirtMemObj=*phMemObj; + *phHandleOut = (IMG_HANDLE)psMapDataArrayCopy; + } + else + { + ePVRError = PVRSRV_ERROR_FAILED_TO_MAP_KERNELVIRTUAL; + PVR_DPF((PVR_DBG_ERROR, "%s: IntegrityGetMemoryObjectCPUVAddrRange failed.", __func__)); + } + } + else + { + OSFreeMem(psMapDataArrayCopy); + ePVRError = PVRSRV_ERROR_FAILED_TO_MAP_KERNELVIRTUAL; + PVR_DPF((PVR_DBG_ERROR, "%s: failed.", __func__)); + } + + return ePVRError; +} + +static PVRSRV_ERROR PMRUnmapMemoryObjectLocalMem(PMR_IMPL_PRIVDATA pvPriv) +{ + PVRSRV_ERROR ePVRError = PVRSRV_OK; + + struct _PMR_MAP_DATA_ARRAY_ *psMapDataArray = (struct _PMR_MAP_DATA_ARRAY_ *)pvPriv; + PVR_LOG_GOTO_IF_INVALID_PARAM(psMapDataArray != NULL, ePVRError, e0); + + ePVRError = IntegrityUnmapMemoryObject(psMapDataArray->hVirtMemObj); + if (ePVRError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to unmap", __func__)); + } + + OSFreeMem(psMapDataArray); + +e0: + return ePVRError; +} +#endif + +static void PMRReleaseKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_HANDLE hHandle) +{ + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL; + void *pvKernLinAddr = NULL; + + psLMAllocArrayData = (PMR_LMALLOCARRAY_DATA *) pvPriv; + pvKernLinAddr = (void *) hHandle; + + _UnMapAlloc(psLMAllocArrayData->uiContigAllocSize, + pvKernLinAddr); +} + + +static PVRSRV_ERROR +CopyBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes, + void (*pfnCopyBytes)(IMG_UINT8 *pcBuffer, + IMG_UINT8 *pcPMR, + size_t uiSize)) +{ + PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL; + size_t uiBytesCopied; + size_t uiBytesToCopy; + size_t uiBytesCopyableFromAlloc; + void *pvMapping = NULL; + IMG_UINT8 *pcKernelPointer = NULL; + size_t uiBufferOffset; + IMG_UINT64 uiAllocIndex; + IMG_DEVMEM_OFFSET_T uiInAllocOffset; + PVRSRV_ERROR eError; + + psLMAllocArrayData = pvPriv; + + uiBytesCopied = 0; + uiBytesToCopy = uiBufSz; + uiBufferOffset = 0; + + if (psLMAllocArrayData->uiTotalNumPages > 1) + { + while (uiBytesToCopy > 0) + { + /* we have to map one alloc in at a time */ + PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0); + uiAllocIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize; + uiInAllocOffset = uiOffset - (uiAllocIndex << psLMAllocArrayData->uiLog2AllocSize); + uiBytesCopyableFromAlloc = uiBytesToCopy; + if (uiBytesCopyableFromAlloc + uiInAllocOffset > (1ULL << psLMAllocArrayData->uiLog2AllocSize)) + { + uiBytesCopyableFromAlloc = TRUNCATE_64BITS_TO_SIZE_T((1ULL << psLMAllocArrayData->uiLog2AllocSize)-uiInAllocOffset); + } + + PVR_ASSERT(uiBytesCopyableFromAlloc != 0); + PVR_ASSERT(uiAllocIndex < psLMAllocArrayData->uiTotalNumPages); + PVR_ASSERT(uiInAllocOffset < (1ULL << psLMAllocArrayData->uiLog2AllocSize)); + + eError = _MapAlloc(psLMAllocArrayData->psPhysHeap, + &psLMAllocArrayData->pasDevPAddr[uiAllocIndex], + psLMAllocArrayData->uiContigAllocSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED, + &pvMapping); + PVR_GOTO_IF_ERROR(eError, e0); + pcKernelPointer = pvMapping; + pfnCopyBytes(&pcBuffer[uiBufferOffset], &pcKernelPointer[uiInAllocOffset], uiBytesCopyableFromAlloc); + + _UnMapAlloc(psLMAllocArrayData->uiContigAllocSize, + pvMapping); + + uiBufferOffset += uiBytesCopyableFromAlloc; + uiBytesToCopy -= uiBytesCopyableFromAlloc; + uiOffset += uiBytesCopyableFromAlloc; + uiBytesCopied += uiBytesCopyableFromAlloc; + } + } + else + { + PVR_ASSERT((uiOffset + uiBufSz) <= psLMAllocArrayData->uiContigAllocSize); + PVR_ASSERT(psLMAllocArrayData->uiContigAllocSize != 0); + eError = _MapAlloc(psLMAllocArrayData->psPhysHeap, + &psLMAllocArrayData->pasDevPAddr[0], + psLMAllocArrayData->uiContigAllocSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED, + &pvMapping); + PVR_GOTO_IF_ERROR(eError, e0); + pcKernelPointer = pvMapping; + pfnCopyBytes(pcBuffer, &pcKernelPointer[uiOffset], uiBufSz); + + _UnMapAlloc(psLMAllocArrayData->uiContigAllocSize, + pvMapping); + + uiBytesCopied = uiBufSz; + } + *puiNumBytes = uiBytesCopied; + return PVRSRV_OK; +e0: + *puiNumBytes = uiBytesCopied; + return eError; +} + +static void ReadLocalMem(IMG_UINT8 *pcBuffer, + IMG_UINT8 *pcPMR, + size_t uiSize) +{ + /* NOTE: 'CachedMemCopy' means the operating system default memcpy, which + * we *assume* in the LMA code will be faster, and doesn't need to + * worry about ARM64. + */ + OSCachedMemCopy(pcBuffer, pcPMR, uiSize); +} + +static PVRSRV_ERROR +PMRReadBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes) +{ + return CopyBytesLocalMem(pvPriv, + uiOffset, + pcBuffer, + uiBufSz, + puiNumBytes, + ReadLocalMem); +} + +static void WriteLocalMem(IMG_UINT8 *pcBuffer, + IMG_UINT8 *pcPMR, + size_t uiSize) +{ + /* NOTE: 'CachedMemCopy' means the operating system default memcpy, which + * we *assume* in the LMA code will be faster, and doesn't need to + * worry about ARM64. + */ + OSCachedMemCopy(pcPMR, pcBuffer, uiSize); +} + +static PVRSRV_ERROR +PMRWriteBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes) +{ + return CopyBytesLocalMem(pvPriv, + uiOffset, + pcBuffer, + uiBufSz, + puiNumBytes, + WriteLocalMem); +} + +/*************************************************************************/ /*! +@Function PMRChangeSparseMemLocalMem +@Description This function Changes the sparse mapping by allocating and + freeing of pages. It also changes the GPU maps accordingly. +@Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ +static PVRSRV_ERROR +PMRChangeSparseMemLocalMem(PMR_IMPL_PRIVDATA pPriv, + const PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 uiFlags) +{ + PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; + + IMG_UINT32 ui32AdtnlAllocPages = 0; + IMG_UINT32 ui32AdtnlFreePages = 0; + IMG_UINT32 ui32CommonRequstCount = 0; + IMG_UINT32 ui32Loop = 0; + IMG_UINT32 ui32Index = 0; + IMG_UINT32 uiAllocpgidx; + IMG_UINT32 uiFreepgidx; + + PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv; + IMG_DEV_PHYADDR sPhyAddr; + +#if defined(DEBUG) + IMG_BOOL bPoisonFail = IMG_FALSE; + IMG_BOOL bZeroFail = IMG_FALSE; +#endif + + /* Fetch the Page table array represented by the PMR */ + IMG_DEV_PHYADDR *psPageArray = psPMRPageArrayData->pasDevPAddr; + PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappingTable(psPMR); + + /* The incoming request is classified into two operations independent of + * each other: alloc & free pages. + * These operations can be combined with two mapping operations as well + * which are GPU & CPU space mappings. + * + * From the alloc and free page requests, the net amount of pages to be + * allocated or freed is computed. Pages that were requested to be freed + * will be reused to fulfil alloc requests. + * + * The order of operations is: + * 1. Allocate new pages from the OS + * 2. Move the free pages from free request to alloc positions. + * 3. Free the rest of the pages not used for alloc + * + * Alloc parameters are validated at the time of allocation + * and any error will be handled then. */ + + if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH)) + { + ui32CommonRequstCount = (ui32AllocPageCount > ui32FreePageCount) ? + ui32FreePageCount : ui32AllocPageCount; + + PDUMP_PANIC(SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported"); + } + + if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC)) + { + ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequstCount; + } + else + { + ui32AllocPageCount = 0; + } + + if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE)) + { + ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequstCount; + } + else + { + ui32FreePageCount = 0; + } + + PVR_LOG_RETURN_IF_FALSE( + (ui32CommonRequstCount | ui32AdtnlAllocPages | ui32AdtnlFreePages) != 0, + "Invalid combination of parameters: ui32CommonRequstCount," + " ui32AdtnlAllocPages and ui32AdtnlFreePages.", + PVRSRV_ERROR_INVALID_PARAMS + ); + + { + /* Validate the free page indices */ + if (ui32FreePageCount) + { + if (NULL != pai32FreeIndices) + { + for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++) + { + uiFreepgidx = pai32FreeIndices[ui32Loop]; + + if (uiFreepgidx > psPMRPageArrayData->uiTotalNumPages) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0); + } + + if (INVALID_PAGE_ADDR == psPageArray[uiFreepgidx].uiAddr) + { + PVR_LOG_GOTO_WITH_ERROR("psPageArray[uiFreepgidx].uiAddr", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + } + }else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Given non-zero free count but missing indices array", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + /*The following block of code verifies any issues with common alloc page indices */ + for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++) + { + uiAllocpgidx = pai32AllocIndices[ui32Loop]; + if (uiAllocpgidx > psPMRPageArrayData->uiTotalNumPages) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0); + } + + if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM)) + { + if ((INVALID_PAGE_ADDR != psPageArray[uiAllocpgidx].uiAddr) || + (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx])) + { + PVR_LOG_GOTO_WITH_ERROR("Trying to allocate already allocated page again", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + } + else + { + if ((INVALID_PAGE_ADDR == psPageArray[uiAllocpgidx].uiAddr) || + (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx])) + { + PVR_LOG_GOTO_WITH_ERROR("Unable to remap memory due to missing page", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + } + } + + + ui32Loop = 0; + + /* Allocate new pages */ + if (0 != ui32AdtnlAllocPages) + { + /* Say how many pages to allocate */ + psPMRPageArrayData->uiPagesToAlloc = ui32AdtnlAllocPages; + + eError = _AllocLMPages(psPMRPageArrayData, pai32AllocIndices); + PVR_LOG_GOTO_IF_ERROR(eError, "_AllocLMPages", e0); + + /* Mark the corresponding pages of translation table as valid */ + for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++) + { + psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop]; + } + + psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages; + } + + ui32Index = ui32Loop; + + /* Move the corresponding free pages to alloc request */ + for (ui32Loop = 0; ui32Loop < ui32CommonRequstCount; ui32Loop++, ui32Index++) + { + + uiAllocpgidx = pai32AllocIndices[ui32Index]; + uiFreepgidx = pai32FreeIndices[ui32Loop]; + sPhyAddr = psPageArray[uiAllocpgidx]; + psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx]; + + /* Is remap mem used in real world scenario? Should it be turned to a + * debug feature? The condition check needs to be out of loop, will be + * done at later point though after some analysis */ + if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM)) + { + psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID; + psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; + psPageArray[uiFreepgidx].uiAddr = INVALID_PAGE_ADDR; + } + else + { + psPageArray[uiFreepgidx] = sPhyAddr; + psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx; + psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; + } + + /* Be sure to honour the attributes associated with the allocation + * such as zeroing, poisoning etc. */ + if (psPMRPageArrayData->bPoisonOnAlloc) + { + eError = _PoisonAlloc(psPMRPageArrayData->psPhysHeap, + &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx], + psPMRPageArrayData->uiContigAllocSize, + PVRSRV_POISON_ON_ALLOC_VALUE); + + /* Consider this as a soft failure and go ahead but log error to kernel log */ + if (eError != PVRSRV_OK) + { +#if defined(DEBUG) + bPoisonFail = IMG_TRUE; +#endif + } + } + else + { + if (psPMRPageArrayData->bZeroOnAlloc) + { + eError = _ZeroAlloc(psPMRPageArrayData->psPhysHeap, + &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx], + psPMRPageArrayData->uiContigAllocSize); + /* Consider this as a soft failure and go ahead but log error to kernel log */ + if (eError != PVRSRV_OK) + { +#if defined(DEBUG) + /*Don't think we need to zero any pages further*/ + bZeroFail = IMG_TRUE; +#endif + } + } + } + } + + /*Free the additional free pages */ + if (0 != ui32AdtnlFreePages) + { + ui32Index = ui32Loop; + _FreeLMPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages); + ui32Loop = 0; + + while (ui32Loop++ < ui32AdtnlFreePages) + { + /*Set the corresponding mapping table entry to invalid address */ + psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Index++]] = TRANSLATION_INVALID; + } + + psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages; + } + + } + +#if defined(DEBUG) + if (IMG_TRUE == bPoisonFail) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Error in poisoning the page", __func__)); + } + + if (IMG_TRUE == bZeroFail) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Error in zeroing the page", __func__)); + } +#endif + + /* Update the PMR memory holding information */ + eError = PVRSRV_OK; + +e0: + return eError; +} + +/*************************************************************************/ /*! +@Function PMRChangeSparseMemCPUMapLocalMem +@Description This function Changes CPU maps accordingly +@Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ +static +PVRSRV_ERROR PMRChangeSparseMemCPUMapLocalMem(PMR_IMPL_PRIVDATA pPriv, + const PMR *psPMR, + IMG_UINT64 sCpuVAddrBase, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices) +{ + PVRSRV_ERROR eError; + IMG_DEV_PHYADDR *psPageArray; + PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv; + uintptr_t sCpuVABase = sCpuVAddrBase; + IMG_CPU_PHYADDR sCpuAddrPtr; + IMG_BOOL bValid = IMG_FALSE; + + /*Get the base address of the heap */ + eError = PMR_CpuPhysAddr(psPMR, + psPMRPageArrayData->uiLog2AllocSize, + 1, + 0, /* offset zero here mean first page in the PMR */ + &sCpuAddrPtr, + &bValid); + PVR_LOG_RETURN_IF_ERROR(eError, "PMR_CpuPhysAddr"); + + /* Phys address of heap is computed here by subtracting the offset of this page + * basically phys address of any page = Base address of heap + offset of the page */ + sCpuAddrPtr.uiAddr -= psPMRPageArrayData->pasDevPAddr[0].uiAddr; + psPageArray = psPMRPageArrayData->pasDevPAddr; + + return OSChangeSparseMemCPUAddrMap((void **)psPageArray, + sCpuVABase, + sCpuAddrPtr, + ui32AllocPageCount, + pai32AllocIndices, + ui32FreePageCount, + pai32FreeIndices, + IMG_TRUE); +} + +static PMR_IMPL_FUNCTAB _sPMRLMAFuncTab = { + /* pfnLockPhysAddresses */ + &PMRLockSysPhysAddressesLocalMem, + /* pfnUnlockPhysAddresses */ + &PMRUnlockSysPhysAddressesLocalMem, + /* pfnDevPhysAddr */ + &PMRSysPhysAddrLocalMem, + /* pfnAcquireKernelMappingData */ + &PMRAcquireKernelMappingDataLocalMem, + /* pfnReleaseKernelMappingData */ + &PMRReleaseKernelMappingDataLocalMem, +#if defined(INTEGRITY_OS) + /* pfnMapMemoryObject */ + PMRMapMemoryObjectLocalMem, + /* pfnUnmapMemoryObject */ + PMRUnmapMemoryObjectLocalMem, +#endif + /* pfnReadBytes */ + &PMRReadBytesLocalMem, + /* pfnWriteBytes */ + &PMRWriteBytesLocalMem, + /* pfnUnpinMem */ + NULL, + /* pfnPinMem */ + NULL, + /* pfnChangeSparseMem*/ + &PMRChangeSparseMemLocalMem, + /* pfnChangeSparseMemCPUMap */ + &PMRChangeSparseMemCPUMapLocalMem, + /* pfnMMap */ + NULL, + /* pfnFinalize */ + &PMRFinalizeLocalMem +}; + +PVRSRV_ERROR +PhysmemNewLocalRamBackedPMR(PHYS_HEAP *psPhysHeap, + CONNECTION_DATA *psConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2AllocPageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2; + PMR *psPMR = NULL; + PMR_LMALLOCARRAY_DATA *psPrivData = NULL; + PMR_FLAGS_T uiPMRFlags; + IMG_BOOL bZero; + IMG_BOOL bPoisonOnAlloc; + IMG_BOOL bPoisonOnFree; + IMG_BOOL bOnDemand; + IMG_BOOL bContig; + + /* For sparse requests we have to do the allocation + * in chunks rather than requesting one contiguous block */ + if (ui32NumPhysChunks != ui32NumVirtChunks || ui32NumVirtChunks > 1) + { + if (PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: LMA kernel mapping functions currently " + "don't work with discontiguous memory.", + __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, errorOnParam); + } + bContig = IMG_FALSE; + } + else + { + bContig = IMG_TRUE; + } + + bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE; + bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE; + bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE; + bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ? IMG_TRUE : IMG_FALSE; + + /* Create Array structure that holds the physical pages */ + eError = _AllocLMPageArray(uiChunkSize * ui32NumVirtChunks, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiLog2AllocPageSize, + bZero, + bPoisonOnAlloc, + bPoisonOnFree, + bContig, + bOnDemand, + psPhysHeap, + uiFlags, + uiPid, + &psPrivData, + psConnection); + PVR_GOTO_IF_ERROR(eError, errorOnAllocPageArray); + + if (!bOnDemand) + { + /* Allocate the physical pages */ + eError = _AllocLMPages(psPrivData, pui32MappingTable); + PVR_GOTO_IF_ERROR(eError, errorOnAllocPages); + } + + /* In this instance, we simply pass flags straight through. + + Generically, uiFlags can include things that control the PMR + factory, but we don't need any such thing (at the time of + writing!), and our caller specifies all PMR flags so we don't + need to meddle with what was given to us. + */ + uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); + /* check no significant bits were lost in cast due to different + bit widths for flags */ + PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); + + if (bOnDemand) + { + PDUMPCOMMENT("Deferred Allocation PMR (LMA)"); + } + + eError = PMRCreatePMR(psPhysHeap, + uiSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiLog2AllocPageSize, + uiPMRFlags, + pszAnnotation, + &_sPMRLMAFuncTab, + psPrivData, + PMR_TYPE_LMA, + &psPMR, + ui32PDumpFlags); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRCreatePMR", errorOnCreate); + + *ppsPMRPtr = psPMR; + return PVRSRV_OK; + +errorOnCreate: + if (!bOnDemand && psPrivData->iNumPagesAllocated) + { + eError2 = _FreeLMPages(psPrivData, NULL, 0); + PVR_ASSERT(eError2 == PVRSRV_OK); + } + +errorOnAllocPages: + eError2 = _FreeLMPageArray(psPrivData); + PVR_ASSERT(eError2 == PVRSRV_OK); + +errorOnAllocPageArray: +errorOnParam: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/physmem_lma.h b/drivers/gpu/drm/phytium/octopus/physmem_lma.h new file mode 100644 index 000000000000..e0c104425335 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/physmem_lma.h @@ -0,0 +1,97 @@ +/**************************************************************************/ /*! +@File +@Title Header for local card memory allocator +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + implementing the function callbacks for local card memory. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef SRVSRV_PHYSMEM_LMA_H +#define SRVSRV_PHYSMEM_LMA_H + +/* include/ */ +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" + +/* services/server/include/ */ +#include "pmr.h" +#include "pmr_impl.h" + +/*************************************************************************/ /*! +@Function PhysmemCreateHeapLMA +@Description Create and register new LMA heap with LMA specific details. +@Input psDevNode Pointer to device node struct. +@Input psConfig Heap configuration. +@Input pszLabel Debug identifier label +@Output ppsPhysHeap Pointer to the created heap. +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +PVRSRV_ERROR +PhysmemCreateHeapLMA(PVRSRV_DEVICE_NODE *psDevNode, + PHYS_HEAP_CONFIG *psConfig, + IMG_CHAR *pszLabel, + PHYS_HEAP **ppsPhysHeap); + +PVRSRV_ERROR +PhysmemGetArenaLMA(PHYS_HEAP *psPhysHeap, + RA_ARENA **ppsArena); + +/* + * PhysmemNewLocalRamBackedPMR + * + * This function will create a PMR using the local card memory and is OS + * agnostic. + */ +PVRSRV_ERROR +PhysmemNewLocalRamBackedPMR(PHYS_HEAP *psPhysHeap, + CONNECTION_DATA *psConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags); + +#endif /* #ifndef SRVSRV_PHYSMEM_LMA_H */ diff --git a/drivers/gpu/drm/phytium/octopus/physmem_osmem.h b/drivers/gpu/drm/phytium/octopus/physmem_osmem.h new file mode 100644 index 000000000000..d48cb92fb691 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/physmem_osmem.h @@ -0,0 +1,129 @@ +/*************************************************************************/ /*! +@File physmem_osmem.h +@Title OS memory PMR factory API +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Part of Services memory management. This file defines the + OS memory PMR factory API that must be defined so that the + common & device layer code in the Services Server can allocate + new PMRs back with pages from the OS page allocator. Applicable + for UMA based platforms, such platforms must implement this API + in the OS Porting layer, in the "env" directory for that + system. + +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PHYSMEM_OSMEM_H +#define PHYSMEM_OSMEM_H + +/* include/ */ +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" + +/* services/server/include/ */ +#include "pmr.h" +#include "pmr_impl.h" +#include "connection_server.h" + +/*************************************************************************/ /*! +@Function PhysmemNewOSRamBackedPMR +@Description Rogue Services will call this function to allocate GPU device + memory from the PMR factory supported by the OS DDK port. This + factory typically obtains physical memory from the kernel/OS + API that allocates memory from the default heap of shared + system memory available on the platform. The allocated memory + must be page-aligned and be a whole number of pages. + After allocating the required memory, the implementation must + then call PMRCreatePMR() to obtain the PMR structure that + describes this allocation to the upper layers of the Services. + memory management sub-system. + NB. Implementation of this function is mandatory. If shared + system memory is not to be used in the OS port then the + implementation must return PVRSRV_ERROR_NOT_SUPPORTED. + +@Input psPhysHeap the phys heap +@Input psConnection the connection to the originator process +@Input uiSize the size of the allocation + (must be a multiple of page size) +@Input uiChunkSize when sparse allocations are requested, + this is the allocated chunk size. + For regular allocations, this will be + the same as uiSize. + (must be a multiple of page size) +@Input ui32NumPhysChunks when sparse allocations are requested, + this is the number of physical chunks + to be allocated. + For regular allocations, this will be 1. +@Input ui32NumVirtChunks when sparse allocations are requested, + this is the number of virtual chunks + covering the sparse allocation. + For regular allocations, this will be 1. +@Input pui32MappingTable when sparse allocations are requested, + this is the list of the indices of + each physically-backed virtual chunk + For regular allocations, this will + be NULL. +@Input uiLog2PageSize the physical pagesize in log2(bytes). +@Input uiFlags the allocation flags. +@Input pszAnnotation string describing the PMR (for debug). + This should be passed into the function + PMRCreatePMR(). +@Input uiPid The process ID that this allocation should + be associated with. +@Output ppsPMROut pointer to the PMR created for the + new allocation +@Input ui32PDumpFlags the pdump flags. +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR +PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap, + CONNECTION_DATA *psConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + IMG_UINT32 uiLog2PageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMROut, + IMG_UINT32 ui32PDumpFlags); + +#endif /* PHYSMEM_OSMEM_H */ diff --git a/drivers/gpu/drm/phytium/octopus/physmem_osmem_linux.c b/drivers/gpu/drm/phytium/octopus/physmem_osmem_linux.c new file mode 100644 index 000000000000..3ea8be8759b2 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/physmem_osmem_linux.c @@ -0,0 +1,3912 @@ +/*************************************************************************/ /*! +@File +@Title Implementation of PMR functions for OS managed memory +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + implementing the function callbacks for physical memory borrowed + from that normally managed by the operating system. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(CONFIG_X86) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)) +#include +#else +#include +#endif +#endif + +/* include/ */ +#include "rgx_heaps.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" +#include "rgx_pdump_panics.h" +/* services/server/include/ */ +#include "allocmem.h" +#include "osfunc.h" +#include "pdump_km.h" +#include "pmr.h" +#include "pmr_impl.h" +#include "cache_km.h" +#include "devicemem_server_utils.h" +#include "pvr_vmap.h" + +/* ourselves */ +#include "physmem_osmem.h" +#include "physmem_osmem_linux.h" + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) +#include "hash.h" +#endif +#endif + +#include "kernel_compatibility.h" + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) +static IMG_UINT32 g_uiMaxOrder = PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM; +#else +/* split_page not available on older kernels */ +#undef PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM +#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 0 +static IMG_UINT32 g_uiMaxOrder; +#endif + +/* + These corresponds to the MMU min/max page sizes and associated PTE + alignment that can be used on the device for an allocation. It is + 4KB (min) and 2MB (max) respectively. +*/ +#define PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ RGX_HEAP_4KB_PAGE_SHIFT +#define PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ RGX_HEAP_2MB_PAGE_SHIFT + +/* Defines how many pages should be mapped at once to the kernel */ +#define PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES 1024 /* 4 MB */ + +/* + These are used to get/set/mask lower-order bits in a dma_addr_t + to provide side-band information associated with that address. + These includes whether the address was obtained via alloc_page + or dma_alloc and if address came allocated pre-aligned or an + adjustment was made manually to aligned it. +*/ +#define DMA_SET_ADJUSTED_ADDR(x) ((x) | ((dma_addr_t)0x02)) +#define DMA_IS_ADDR_ADJUSTED(x) ((x) & ((dma_addr_t)0x02)) +#define DMA_SET_ALLOCPG_ADDR(x) ((x) | ((dma_addr_t)0x01)) +#define DMA_IS_ALLOCPG_ADDR(x) ((x) & ((dma_addr_t)0x01)) +#define DMA_GET_ALIGN_ADJUSTMENT(x) ((x>>2) & ((dma_addr_t)0x3ff)) +#define DMA_SET_ALIGN_ADJUSTMENT(x,y) ((x) | (((dma_addr_t)y)<<0x02)) +#define DMA_GET_ADDR(x) (((dma_addr_t)x) & ((dma_addr_t)~0xfff)) +#define DMA_VADDR_NOT_IN_USE 0xCAFEF00DDEADBEEFULL + +typedef struct _PMR_OSPAGEARRAY_DATA_ { + /* Device for which this allocation has been made */ + PVRSRV_DEVICE_NODE *psDevNode; + /* The pid that made this allocation */ + IMG_PID uiPid; + + /* + * iNumOSPagesAllocated: + * Number of pages allocated in this PMR so far. + * This allows for up to (2^31 - 1) pages. With 4KB pages, that's 8TB of memory for each PMR. + */ + IMG_INT32 iNumOSPagesAllocated; + + /* + * uiTotalNumOSPages: + * Total number of pages supported by this PMR. (Fixed as of now due the fixed Page table array size) + * number of "pages" (a.k.a. macro pages, compound pages, higher order pages, etc...) + */ + IMG_UINT32 uiTotalNumOSPages; + + /* + uiLog2AllocPageSize; + + size of each "page" -- this would normally be the same as + PAGE_SHIFT, but we support the idea that we may allocate pages + in larger chunks for better contiguity, using order>0 in the + call to alloc_pages() + */ + IMG_UINT32 uiLog2AllocPageSize; + + /* + ui64DmaMask; + */ + IMG_UINT64 ui64DmaMask; + + /* + For non DMA/CMA allocation, pagearray references the pages + thus allocated; one entry per compound page when compound + pages are used. In addition, for DMA/CMA allocations, we + track the returned cpu virtual and device bus address. + */ + struct page **pagearray; + dma_addr_t *dmaphysarray; + void **dmavirtarray; + + +#define FLAG_ZERO (0U) +#define FLAG_POISON_ON_FREE (1U) +#define FLAG_POISON_ON_ALLOC (2U) +#define FLAG_ONDEMAND (3U) +#define FLAG_UNPINNED (4U) +#define FLAG_IS_CMA (5U) +#define FLAG_UNSET_MEMORY_TYPE (6U) + + /* + * Allocation flags related to the pages: + * Zero - Should we Zero memory on alloc + * Poison on free - Should we Poison the memory on free. + * Poison on alloc - Should we Poison the memory on alloc. + * On demand - Is the allocation on Demand i.e Do we defer allocation to time of use. + * Unpinned - Should be protected by page pool lock + * CMA - Is CMA memory allocated via DMA framework + * Unset Memory Type - Upon free do we need to revert the cache type before return to OS + * */ + IMG_UINT32 ui32AllocFlags; + + /* + The cache mode of the PMR. Additionally carrying the CPU-Cache-Clean + flag, advising us to do cache maintenance on behalf of the caller. + Boolean used to track if we need to revert the cache attributes + of the pages used in this allocation. Depends on OS/architecture. + */ + IMG_UINT32 ui32CPUCacheFlags; + /* + * In CMA allocation path, algorithm can allocate double the size of + * requested allocation size to satisfy the alignment. In this case + * the additional pages allocated are tracked through this additional + * variable and are accounted for in the memory statistics */ + IMG_UINT32 ui32CMAAdjustedPageCount; +} PMR_OSPAGEARRAY_DATA; + +/*********************************** + * Page pooling for uncached pages * + ***********************************/ + +static INLINE void +_FreeOSPage_CMA(struct device *dev, + size_t alloc_size, + IMG_UINT32 uiOrder, + void *virt_addr, + dma_addr_t dev_addr, + struct page *psPage); + +static void +_FreeOSPage(IMG_UINT32 uiOrder, + IMG_BOOL bUnsetMemoryType, + struct page *psPage); + +static PVRSRV_ERROR +_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 ui32FreePageCount); + +static PVRSRV_ERROR +_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree, + IMG_UINT32 *puiPagesFreed); + +/* A struct for our page pool holding an array of zeroed (!) pages. + * We always put units of page arrays to the pool but are + * able to take individual pages */ +typedef struct +{ + /* Linkage for page pool LRU list */ + struct list_head sPagePoolItem; + + /* How many items are still in the page array */ + IMG_UINT32 uiItemsRemaining; + /* Array of the actual pages */ + struct page **ppsPageArray; + +} LinuxPagePoolEntry; + +/* CleanupThread structure to put allocation in page pool */ +typedef struct +{ + PVRSRV_CLEANUP_THREAD_WORK sCleanupWork; + IMG_UINT32 ui32CPUCacheMode; + LinuxPagePoolEntry *psPoolEntry; +} LinuxCleanupData; + +/* A struct for the unpinned items */ +typedef struct +{ + struct list_head sUnpinPoolItem; + PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr; +} LinuxUnpinEntry; + + +/* Caches to hold page pool and page array structures */ +static struct kmem_cache *g_psLinuxPagePoolCache; +static struct kmem_cache *g_psLinuxPageArray; + +/* Track what is live, all protected by pool lock. + * x86 needs two page pools because we have to change the memory attributes + * of the pages which is expensive due to an implicit flush. + * See set_pages_array_uc/wc/wb. */ +static IMG_UINT32 g_ui32UnpinPageCount; +static IMG_UINT32 g_ui32PagePoolUCCount; +#if defined(CONFIG_X86) +static IMG_UINT32 g_ui32PagePoolWCCount; +#endif +/* Tracks asynchronous tasks currently accessing the page pool. + * It is incremented if a defer free task + * is created. Both will decrement the value when they finished the work. + * The atomic prevents piling up of deferred work in case the deferred thread + * cannot keep up with the application.*/ +static ATOMIC_T g_iPoolCleanTasks; +/* We don't want too many asynchronous threads trying to access the page pool + * at the same time */ +#define PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS 128 + +/* Defines how many pages the page cache should hold. */ +#if defined(PVR_LINUX_PHYSMEM_MAX_POOL_PAGES) +static const IMG_UINT32 g_ui32PagePoolMaxEntries = PVR_LINUX_PHYSMEM_MAX_POOL_PAGES; +#else +static const IMG_UINT32 g_ui32PagePoolMaxEntries; +#endif + +/* We double check if we would exceed this limit if we are below MAX_POOL_PAGES + and want to add an allocation to the pool. + This prevents big allocations being given back to the OS just because they + exceed the MAX_POOL_PAGES limit even though the pool is currently empty. */ +#if defined(PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES) +static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries = PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES; +#else +static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries; +#endif + +#if defined(CONFIG_X86) +#define PHYSMEM_OSMEM_NUM_OF_POOLS 2 +static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = { + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC +}; +#else +#define PHYSMEM_OSMEM_NUM_OF_POOLS 1 +static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = { + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED +}; +#endif + +/* Global structures we use to manage the page pool */ +static DEFINE_MUTEX(g_sPagePoolMutex); + +/* List holding the page array pointers: */ +static LIST_HEAD(g_sPagePoolList_WC); +static LIST_HEAD(g_sPagePoolList_UC); +static LIST_HEAD(g_sUnpinList); + +#if defined(DEBUG) && defined(SUPPORT_VALIDATION) +/* Global structure to manage GPU memory leak */ +static DEFINE_MUTEX(g_sUMALeakMutex); +static IMG_UINT32 g_ui32UMALeakCounter = 0; +#endif + +static inline IMG_UINT32 +_PagesInPoolUnlocked(void) +{ + IMG_UINT32 uiCnt = g_ui32PagePoolUCCount; +#if defined(CONFIG_X86) + uiCnt += g_ui32PagePoolWCCount; +#endif + return uiCnt; +} + +static inline void +_PagePoolLock(void) +{ + mutex_lock(&g_sPagePoolMutex); +} + +static inline int +_PagePoolTrylock(void) +{ + return mutex_trylock(&g_sPagePoolMutex); +} + +static inline void +_PagePoolUnlock(void) +{ + mutex_unlock(&g_sPagePoolMutex); +} + +static PVRSRV_ERROR +_AddUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData) +{ + LinuxUnpinEntry *psUnpinEntry; + + psUnpinEntry = OSAllocMem(sizeof(*psUnpinEntry)); + if (!psUnpinEntry) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OSAllocMem failed. Cannot add entry to unpin list.", + __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psUnpinEntry->psPageArrayDataPtr = psOSPageArrayData; + + /* Add into pool that the shrinker can access easily*/ + list_add_tail(&psUnpinEntry->sUnpinPoolItem, &g_sUnpinList); + + g_ui32UnpinPageCount += psOSPageArrayData->iNumOSPagesAllocated; + + return PVRSRV_OK; +} + +static void +_RemoveUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData) +{ + LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry; + + /* Remove from pool */ + list_for_each_entry_safe(psUnpinEntry, + psTempUnpinEntry, + &g_sUnpinList, + sUnpinPoolItem) + { + if (psUnpinEntry->psPageArrayDataPtr == psOSPageArrayData) + { + list_del(&psUnpinEntry->sUnpinPoolItem); + break; + } + } + + OSFreeMem(psUnpinEntry); + + g_ui32UnpinPageCount -= psOSPageArrayData->iNumOSPagesAllocated; +} + +static inline IMG_BOOL +_GetPoolListHead(IMG_UINT32 ui32CPUCacheFlags, + struct list_head **ppsPoolHead, + IMG_UINT32 **ppuiCounter) +{ + switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags)) + { + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: +#if defined(CONFIG_X86) + /* + For x86 we need to keep different lists for uncached + and write-combined as we must always honour the PAT + setting which cares about this difference. + */ + + *ppsPoolHead = &g_sPagePoolList_WC; + *ppuiCounter = &g_ui32PagePoolWCCount; + break; +#endif + + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: + *ppsPoolHead = &g_sPagePoolList_UC; + *ppuiCounter = &g_ui32PagePoolUCCount; + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Unknown CPU caching mode. " + "Using default UC pool.", + __func__)); + *ppsPoolHead = &g_sPagePoolList_UC; + *ppuiCounter = &g_ui32PagePoolUCCount; + PVR_ASSERT(0); + return IMG_FALSE; + } + return IMG_TRUE; +} + +static struct shrinker g_sShrinker; + +/* Returning the number of pages that still reside in the page pool. */ +static unsigned long +_GetNumberOfPagesInPoolUnlocked(void) +{ + return _PagesInPoolUnlocked() + g_ui32UnpinPageCount; +} + +/* Linux shrinker function that informs the OS about how many pages we are caching and + * it is able to reclaim. */ +static unsigned long +_CountObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl) +{ + int remain; + + PVR_ASSERT(psShrinker == &g_sShrinker); + (void)psShrinker; + (void)psShrinkControl; + + /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */ + if (_PagePoolTrylock() == 0) + return 0; + remain = _GetNumberOfPagesInPoolUnlocked(); + _PagePoolUnlock(); + + return remain; +} + +/* Linux shrinker function to reclaim the pages from our page pool */ +static unsigned long +_ScanObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl) +{ + unsigned long uNumToScan = psShrinkControl->nr_to_scan; + unsigned long uSurplus = 0; + LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry; + IMG_UINT32 uiPagesFreed; + + PVR_ASSERT(psShrinker == &g_sShrinker); + (void)psShrinker; + + /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */ + if (_PagePoolTrylock() == 0) + return SHRINK_STOP; + + _FreePagesFromPoolUnlocked(uNumToScan, + &uiPagesFreed); + uNumToScan -= uiPagesFreed; + + if (uNumToScan == 0) + { + goto e_exit; + } + + /* Free unpinned memory, starting with LRU entries */ + list_for_each_entry_safe(psUnpinEntry, + psTempUnpinEntry, + &g_sUnpinList, + sUnpinPoolItem) + { + PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr = psUnpinEntry->psPageArrayDataPtr; + IMG_UINT32 uiNumPages = (psPageArrayDataPtr->uiTotalNumOSPages > psPageArrayDataPtr->iNumOSPagesAllocated)? + psPageArrayDataPtr->iNumOSPagesAllocated:psPageArrayDataPtr->uiTotalNumOSPages; + PVRSRV_ERROR eError; + + /* Free associated pages */ + eError = _FreeOSPages(psPageArrayDataPtr, + NULL, + 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Shrinker is unable to free unpinned pages. Error: %s (%d)", + __func__, + PVRSRVGetErrorString(eError), + eError)); + goto e_exit; + } + + /* Remove item from pool */ + list_del(&psUnpinEntry->sUnpinPoolItem); + + g_ui32UnpinPageCount -= uiNumPages; + + /* Check if there is more to free or if we already surpassed the limit */ + if (uiNumPages < uNumToScan) + { + uNumToScan -= uiNumPages; + + } + else if (uiNumPages > uNumToScan) + { + uSurplus += uiNumPages - uNumToScan; + uNumToScan = 0; + goto e_exit; + } + else + { + uNumToScan -= uiNumPages; + goto e_exit; + } + } + +e_exit: + if (list_empty(&g_sUnpinList)) + { + PVR_ASSERT(g_ui32UnpinPageCount == 0); + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)) + { + int remain; + remain = _GetNumberOfPagesInPoolUnlocked(); + _PagePoolUnlock(); + return remain; + } +#else + /* Returning the number of pages freed during the scan */ + _PagePoolUnlock(); + return psShrinkControl->nr_to_scan - uNumToScan + uSurplus; +#endif +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)) +static int +_ShrinkPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl) +{ + if (psShrinkControl->nr_to_scan != 0) + { + return _ScanObjectsInPagePool(psShrinker, psShrinkControl); + } + else + { + /* No pages are being reclaimed so just return the page count */ + return _CountObjectsInPagePool(psShrinker, psShrinkControl); + } +} + +static struct shrinker g_sShrinker = +{ + .shrink = _ShrinkPagePool, + .seeks = DEFAULT_SEEKS +}; +#else +static struct shrinker g_sShrinker = +{ + .count_objects = _CountObjectsInPagePool, + .scan_objects = _ScanObjectsInPagePool, + .seeks = DEFAULT_SEEKS +}; +#endif + +/* Register the shrinker so Linux can reclaim cached pages */ +void LinuxInitPhysmem(void) +{ + g_psLinuxPageArray = kmem_cache_create("pvr-pa", sizeof(PMR_OSPAGEARRAY_DATA), 0, 0, NULL); + + g_psLinuxPagePoolCache = kmem_cache_create("pvr-pp", sizeof(LinuxPagePoolEntry), 0, 0, NULL); + if (g_psLinuxPagePoolCache) + { + /* Only create the shrinker if we created the cache OK */ + register_shrinker(&g_sShrinker); + } + + OSAtomicWrite(&g_iPoolCleanTasks, 0); +} + +/* Unregister the shrinker and remove all pages from the pool that are still left */ +void LinuxDeinitPhysmem(void) +{ + IMG_UINT32 uiPagesFreed; + + if (OSAtomicRead(&g_iPoolCleanTasks) > 0) + { + PVR_DPF((PVR_DBG_WARNING, "Still deferred cleanup tasks running " + "while deinitialising memory subsystem.")); + } + + _PagePoolLock(); + if (_FreePagesFromPoolUnlocked(IMG_UINT32_MAX, &uiPagesFreed) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Unable to free all pages from page pool when " + "deinitialising memory subsystem.")); + PVR_ASSERT(0); + } + + PVR_ASSERT(_PagesInPoolUnlocked() == 0); + + /* Free the page cache */ + kmem_cache_destroy(g_psLinuxPagePoolCache); + + unregister_shrinker(&g_sShrinker); + _PagePoolUnlock(); + + kmem_cache_destroy(g_psLinuxPageArray); +} + +static void EnableOOMKiller(void) +{ + current->flags &= ~PF_DUMPCORE; +} + +static void DisableOOMKiller(void) +{ + /* PF_DUMPCORE is treated by the VM as if the OOM killer was disabled. + * + * As oom_killer_disable() is an inline, non-exported function, we + * can't use it from a modular driver. Furthermore, the OOM killer + * API doesn't look thread safe, which 'current' is. + */ + WARN_ON(current->flags & PF_DUMPCORE); + current->flags |= PF_DUMPCORE; +} + +/* Prints out the addresses in a page array for debugging purposes + * Define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY locally to activate: */ +/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY 1 */ +static inline void +_DumpPageArray(struct page **pagearray, IMG_UINT32 uiPagesToPrint) +{ +#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY) + IMG_UINT32 i; + if (pagearray) + { + printk("Array %p:\n", pagearray); + for (i = 0; i < uiPagesToPrint; i++) + { + printk("%p | ", (pagearray)[i]); + } + printk("\n"); + } + else + { + printk("Array is NULL:\n"); + } +#else + PVR_UNREFERENCED_PARAMETER(pagearray); + PVR_UNREFERENCED_PARAMETER(uiPagesToPrint); +#endif +} + +/* Debugging function that dumps out the number of pages for every + * page array that is currently in the page pool. + * Not defined by default. Define locally to activate feature: */ +/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL 1 */ +static void +_DumpPoolStructure(void) +{ +#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL) + LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; + struct list_head *psPoolHead = NULL; + IMG_UINT32 j; + IMG_UINT32 *puiCounter; + + printk("\n"); + /* Empty all pools */ + for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++) + { + + printk("pool = %u\n", j); + + /* Get the correct list for this caching mode */ + if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter)) + { + break; + } + + list_for_each_entry_safe(psPagePoolEntry, + psTempPoolEntry, + psPoolHead, + sPagePoolItem) + { + printk("%u | ", psPagePoolEntry->uiItemsRemaining); + } + printk("\n"); + } +#endif +} + +/* Free a certain number of pages from the page pool. + * Mainly used in error paths or at deinitialisation to + * empty the whole pool. */ +static PVRSRV_ERROR +_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree, + IMG_UINT32 *puiPagesFreed) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; + struct list_head *psPoolHead = NULL; + IMG_UINT32 i, j; + IMG_UINT32 *puiCounter; + + *puiPagesFreed = uiMaxPagesToFree; + + /* Empty all pools */ + for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++) + { + + /* Get the correct list for this caching mode */ + if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter)) + { + break; + } + + /* Free the pages and remove page arrays from the pool if they are exhausted */ + list_for_each_entry_safe(psPagePoolEntry, + psTempPoolEntry, + psPoolHead, + sPagePoolItem) + { + IMG_UINT32 uiItemsToFree; + struct page **ppsPageArray; + + /* Check if we are going to free the whole page array or just parts */ + if (psPagePoolEntry->uiItemsRemaining <= uiMaxPagesToFree) + { + uiItemsToFree = psPagePoolEntry->uiItemsRemaining; + ppsPageArray = psPagePoolEntry->ppsPageArray; + } + else + { + uiItemsToFree = uiMaxPagesToFree; + ppsPageArray = &(psPagePoolEntry->ppsPageArray[psPagePoolEntry->uiItemsRemaining - uiItemsToFree]); + } + +#if defined(CONFIG_X86) + /* Set the correct page caching attributes on x86 */ + if (!PVRSRV_CHECK_CPU_CACHED(g_aui32CPUCacheFlags[j])) + { + int ret; + ret = set_pages_array_wb(ppsPageArray, uiItemsToFree); + if (ret) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to reset page attributes", + __func__)); + eError = PVRSRV_ERROR_FAILED_TO_FREE_PAGES; + goto e_exit; + } + } +#endif + + /* Free the actual pages */ + for (i = 0; i < uiItemsToFree; i++) + { + __free_pages(ppsPageArray[i], 0); + ppsPageArray[i] = NULL; + } + + /* Reduce counters */ + uiMaxPagesToFree -= uiItemsToFree; + *puiCounter -= uiItemsToFree; + psPagePoolEntry->uiItemsRemaining -= uiItemsToFree; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + /* + * MemStats usually relies on having the bridge lock held, however + * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and + * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so + * the page pool lock is used to ensure these calls are mutually + * exclusive + */ + PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * uiItemsToFree); +#endif + + /* Is this pool entry exhausted, delete it */ + if (psPagePoolEntry->uiItemsRemaining == 0) + { + OSFreeMemNoStats(psPagePoolEntry->ppsPageArray); + list_del(&psPagePoolEntry->sPagePoolItem); + kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry); + } + + /* Return if we have all our pages */ + if (uiMaxPagesToFree == 0) + { + goto e_exit; + } + } + } + +e_exit: + *puiPagesFreed -= uiMaxPagesToFree; + _DumpPoolStructure(); + return eError; +} + +/* Get a certain number of pages from the page pool and + * copy them directly into a given page array. */ +static void +_GetPagesFromPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags, + IMG_UINT32 uiMaxNumPages, + struct page **ppsPageArray, + IMG_UINT32 *puiNumReceivedPages) +{ + LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; + struct list_head *psPoolHead = NULL; + IMG_UINT32 i; + IMG_UINT32 *puiCounter; + + *puiNumReceivedPages = 0; + + /* Get the correct list for this caching mode */ + if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter)) + { + return; + } + + /* Check if there are actually items in the list */ + if (list_empty(psPoolHead)) + { + return; + } + + PVR_ASSERT(*puiCounter > 0); + + /* Receive pages from the pool */ + list_for_each_entry_safe(psPagePoolEntry, + psTempPoolEntry, + psPoolHead, + sPagePoolItem) + { + /* Get the pages from this pool entry */ + for (i = psPagePoolEntry->uiItemsRemaining; i != 0 && *puiNumReceivedPages < uiMaxNumPages; i--) + { + ppsPageArray[*puiNumReceivedPages] = psPagePoolEntry->ppsPageArray[i-1]; + (*puiNumReceivedPages)++; + psPagePoolEntry->uiItemsRemaining--; + } + + /* Is this pool entry exhausted, delete it */ + if (psPagePoolEntry->uiItemsRemaining == 0) + { + OSFreeMemNoStats(psPagePoolEntry->ppsPageArray); + list_del(&psPagePoolEntry->sPagePoolItem); + kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry); + } + + /* Return if we have all our pages */ + if (*puiNumReceivedPages == uiMaxNumPages) + { + goto exit_ok; + } + } + +exit_ok: + + /* Update counters */ + *puiCounter -= *puiNumReceivedPages; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + /* MemStats usually relies on having the bridge lock held, however + * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and + * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so + * the page pool lock is used to ensure these calls are mutually + * exclusive + */ + PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * (*puiNumReceivedPages)); +#endif + + _DumpPoolStructure(); + return; +} + +/* Same as _GetPagesFromPoolUnlocked but handles locking and + * checks first whether pages from the pool are a valid option. */ +static inline void +_GetPagesFromPoolLocked(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32CPUCacheFlags, + IMG_UINT32 uiPagesToAlloc, + IMG_UINT32 uiOrder, + IMG_BOOL bZero, + struct page **ppsPageArray, + IMG_UINT32 *puiPagesFromPool) +{ +#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) + PVR_UNREFERENCED_PARAMETER(bZero); +#else + /* Don't get pages from pool if it doesn't provide zeroed pages */ + if (bZero) + { + return; + } +#endif + + /* The page pool stores only order 0 pages. If we need zeroed memory we + * directly allocate from the OS because it is faster than + * doing it within the driver. */ + if (uiOrder == 0 && + !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags)) + { + + _PagePoolLock(); + _GetPagesFromPoolUnlocked(ui32CPUCacheFlags, + uiPagesToAlloc, + ppsPageArray, + puiPagesFromPool); + _PagePoolUnlock(); + } + + return; +} + +/* Takes a page array and maps it into the kernel to write zeros */ +static PVRSRV_ERROR +_ZeroPageArray(IMG_UINT32 uiNumToClean, + struct page **ppsCleanArray, + pgprot_t pgprot) +{ + IMG_CPU_VIRTADDR pvAddr; + IMG_UINT32 uiMaxPagesToMap = MIN(PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES, + uiNumToClean); + + /* Map and fill the pages with zeros. + * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE + * at a time. */ + while (uiNumToClean != 0) + { + IMG_UINT32 uiToClean = MIN(uiNumToClean, uiMaxPagesToMap); + + pvAddr = pvr_vmap(ppsCleanArray, uiToClean, VM_WRITE, pgprot); + if (!pvAddr) + { + if (uiMaxPagesToMap <= 1) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Out of vmalloc memory, " + "unable to map pages for zeroing.", __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + else + { + /* Halve the pages to map at once and try again. */ + uiMaxPagesToMap = uiMaxPagesToMap >> 1; + continue; + } + } + + if (pgprot_val(pgprot) == pgprot_val(pgprot_noncached(PAGE_KERNEL))) + { + /* this is most likely unnecessary as all pages must be 8-bytes + * aligned so there unaligned access is impossible */ + OSDeviceMemSet(pvAddr, 0, PAGE_SIZE * uiToClean); + } + else if (pgprot_val(pgprot) == pgprot_val(pgprot_writecombine(PAGE_KERNEL))) + { + OSCachedMemSet(pvAddr, 0, PAGE_SIZE * uiToClean); + OSWriteMemoryBarrier(); + } + else + { + OSCachedMemSet(pvAddr, 0, PAGE_SIZE * uiToClean); + } + pvr_vunmap(pvAddr, uiToClean, pgprot); + ppsCleanArray = &(ppsCleanArray[uiToClean]); + uiNumToClean -= uiToClean; + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +_CleanupThread_CleanPages(void *pvData) +{ + LinuxCleanupData *psCleanupData = (LinuxCleanupData*) pvData; + LinuxPagePoolEntry *psPagePoolEntry = psCleanupData->psPoolEntry; + struct list_head *psPoolHead = NULL; + IMG_UINT32 *puiCounter = NULL; +#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) + PVRSRV_ERROR eError; + pgprot_t pgprot; + IMG_UINT32 i; +#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */ + + /* Get the correct pool for this caching mode. */ + _GetPoolListHead(psCleanupData->ui32CPUCacheMode , &psPoolHead, &puiCounter); + +#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) + switch (PVRSRV_CPU_CACHE_MODE(psCleanupData->ui32CPUCacheMode)) + { + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: +#if defined(CONFIG_X86) + /* For x86 we can only map with the same attributes + * as in the PAT settings*/ + pgprot = pgprot_noncached(PAGE_KERNEL); + break; +#endif + + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: + pgprot = pgprot_writecombine(PAGE_KERNEL); + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Unknown caching mode to set page protection flags.", + __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto eExit; + } + + /* Map and fill the pages with zeros. + * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE + * at a time. */ + eError = _ZeroPageArray(psPagePoolEntry->uiItemsRemaining, + psPagePoolEntry->ppsPageArray, + pgprot); + if (eError != PVRSRV_OK) + { + goto eExit; + } +#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */ + + /* Lock down pool and add item */ + _PagePoolLock(); + + /* Pool counters were already updated so don't do it here again*/ + + /* The pages are all zeroed so return them to the pool. */ + list_add_tail(&psPagePoolEntry->sPagePoolItem, psPoolHead); + + _DumpPoolStructure(); + _PagePoolUnlock(); + + OSFreeMem(pvData); + OSAtomicDecrement(&g_iPoolCleanTasks); + + return PVRSRV_OK; + +#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) +eExit: + /* we failed to zero the pages so return the error so we can + * retry during the next spin */ + if ((psCleanupData->sCleanupWork.ui32RetryCount - 1) > 0) + { + return eError; + } + + /* this was the last retry, give up and free pages to OS */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Deferred task error, freeing pages to OS.", + __func__)); + _PagePoolLock(); + + *puiCounter -= psPagePoolEntry->uiItemsRemaining; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + /* MemStats usually relies on having the bridge lock held, however + * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and + * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so + * the page pool lock is used to ensure these calls are mutually + * exclusive + */ + PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * psCleanupData->psPoolEntry->uiItemsRemaining); +#endif + + _PagePoolUnlock(); + + for (i = 0; i < psCleanupData->psPoolEntry->uiItemsRemaining; i++) + { + _FreeOSPage(0, IMG_TRUE, psPagePoolEntry->ppsPageArray[i]); + } + OSFreeMemNoStats(psPagePoolEntry->ppsPageArray); + kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry); + OSFreeMem(psCleanupData); + + OSAtomicDecrement(&g_iPoolCleanTasks); + + return PVRSRV_OK; +#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */ +} + + +/* Put page array to the page pool. + * Handles locking and checks whether the pages are + * suitable to be stored in the pool. */ +static inline IMG_BOOL +_PutPagesToPoolLocked(IMG_UINT32 ui32CPUCacheFlags, + struct page **ppsPageArray, + IMG_BOOL bUnpinned, + IMG_UINT32 uiOrder, + IMG_UINT32 uiNumPages) +{ + LinuxCleanupData *psCleanupData; + PVRSRV_CLEANUP_THREAD_WORK *psCleanupThreadFn; +#if defined(SUPPORT_PHYSMEM_TEST) + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); +#endif + + if (uiOrder == 0 && + !bUnpinned && + !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags)) + { + IMG_UINT32 uiEntries; + IMG_UINT32 *puiCounter; + struct list_head *psPoolHead; + + + _PagePoolLock(); + + uiEntries = _PagesInPoolUnlocked(); + + /* Check for number of current page pool entries and whether + * we have other asynchronous tasks in-flight */ + if ( (uiEntries < g_ui32PagePoolMaxEntries) && + ((uiEntries + uiNumPages) < + (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxExcessEntries) )) + { + if (OSAtomicIncrement(&g_iPoolCleanTasks) <= + PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS) + { +#if defined(SUPPORT_PHYSMEM_TEST) + if (!psPVRSRVData->hCleanupThread) + { + goto eDecrement; + } +#endif + + psCleanupData = OSAllocMem(sizeof(*psCleanupData)); + + if (!psCleanupData) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get memory for deferred page pool cleanup. " + "Trying to free pages immediately", + __func__)); + goto eDecrement; + } + + psCleanupThreadFn = &psCleanupData->sCleanupWork; + psCleanupData->ui32CPUCacheMode = ui32CPUCacheFlags; + psCleanupData->psPoolEntry = kmem_cache_alloc(g_psLinuxPagePoolCache, GFP_KERNEL); + + if (!psCleanupData->psPoolEntry) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get memory for deferred page pool cleanup. " + "Trying to free pages immediately", + __func__)); + goto eFreeCleanupData; + } + + if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to get correct page pool", + __func__)); + goto eFreePoolEntry; + } + + /* Increase counter here to avoid deferred cleanup tasks piling up */ + *puiCounter = *puiCounter + uiNumPages; + + psCleanupData->psPoolEntry->ppsPageArray = ppsPageArray; + psCleanupData->psPoolEntry->uiItemsRemaining = uiNumPages; + + psCleanupThreadFn->pfnFree = _CleanupThread_CleanPages; + psCleanupThreadFn->pvData = psCleanupData; + psCleanupThreadFn->bDependsOnHW = IMG_FALSE; + CLEANUP_THREAD_SET_RETRY_COUNT(psCleanupThreadFn, + CLEANUP_THREAD_RETRY_COUNT_DEFAULT); + + #if defined(PVRSRV_ENABLE_PROCESS_STATS) + /* MemStats usually relies on having the bridge lock held, however + * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and + * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so + * the page pool lock is used to ensure these calls are mutually + * exclusive + */ + PVRSRVStatsIncrMemAllocPoolStat(PAGE_SIZE * uiNumPages); + #endif + + /* We must not hold the pool lock when calling AddWork because it might call us back to + * free pooled pages directly when unloading the driver */ + _PagePoolUnlock(); + + PVRSRVCleanupThreadAddWork(psCleanupThreadFn); + + + } + else + { + goto eDecrement; + } + + } + else + { + goto eUnlock; + } + } + else + { + goto eExitFalse; + } + + return IMG_TRUE; + +eFreePoolEntry: + OSFreeMem(psCleanupData->psPoolEntry); +eFreeCleanupData: + OSFreeMem(psCleanupData); +eDecrement: + OSAtomicDecrement(&g_iPoolCleanTasks); +eUnlock: + _PagePoolUnlock(); +eExitFalse: + return IMG_FALSE; +} + +/* Get the GFP flags that we pass to the page allocator */ +static inline gfp_t +_GetGFPFlags(IMG_BOOL bZero, + PVRSRV_DEVICE_NODE *psDevNode) +{ + struct device *psDev = psDevNode->psDevConfig->pvOSDevice; + gfp_t gfp_flags = GFP_USER | __GFP_NOWARN | __GFP_NOMEMALLOC; + +#if defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) + /* Force use of HIGHMEM */ + gfp_flags |= __GFP_HIGHMEM; + + PVR_UNREFERENCED_PARAMETER(psDev); +#else + if (psDev) + { +#if defined(CONFIG_64BIT) || defined(CONFIG_ARM_LPAE) || defined(CONFIG_X86_PAE) + if (*psDev->dma_mask > DMA_BIT_MASK(32)) + { + /* If our system is able to handle large addresses use highmem */ + gfp_flags |= __GFP_HIGHMEM; + } + else if (*psDev->dma_mask == DMA_BIT_MASK(32)) + { + /* Limit to 32 bit. + * Achieved by setting __GFP_DMA32 for 64 bit systems */ + gfp_flags |= __GFP_DMA32; + } + else + { + /* Limit to size of DMA zone. */ + gfp_flags |= __GFP_DMA; + } +#else + if (*psDev->dma_mask < DMA_BIT_MASK(32)) + { + gfp_flags |= __GFP_DMA; + } + else + { + gfp_flags |= __GFP_HIGHMEM; + } +#endif /* if defined(CONFIG_64BIT) || defined(CONFIG_ARM_LPAE) || defined(CONFIG_X86_PAE) */ + } + +#endif /* if defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) */ + + if (bZero) + { + gfp_flags |= __GFP_ZERO; + } + + return gfp_flags; +} + +/* + * @Function _PoisonDevicePage + * + * @Description Poisons a device page. In normal case the device page has the + * same size as the OS page and so the ui32DevPageOrder will be + * equal to 0 and page argument will point to one OS page + * structure. In case of Non4K pages the order will be greater + * than 0 and page argument will point to an array of OS + * allocated pages. + * + * @Input psDevNode pointer to the device object + * @Input page array of the pages allocated by from the OS + * @Input ui32DevPageOrder order of the page (same as the one used to allocate + * the page array by alloc_pages()) + * @Input ui32CPUCacheFlags CPU cache flags applied to the page + * @Input ui8PoisonValue value used to poison the page + */ +static void +_PoisonDevicePage(PVRSRV_DEVICE_NODE *psDevNode, + struct page *page, + IMG_UINT32 ui32DevPageOrder, + IMG_UINT32 ui32CPUCacheFlags, + IMG_BYTE ui8PoisonValue) +{ + IMG_UINT32 ui32OsPageIdx; + + for (ui32OsPageIdx = 0; + ui32OsPageIdx < (1U << ui32DevPageOrder); + ui32OsPageIdx++) + { + struct page *current_page = page + ui32OsPageIdx; + IMG_CPU_PHYADDR sCPUPhysAddrStart = {page_to_phys(current_page)}; + IMG_CPU_PHYADDR sCPUPhysAddrEnd = {sCPUPhysAddrStart.uiAddr + PAGE_SIZE}; + + void *kvaddr = kmap_atomic(current_page); + + /* kmap_atomic maps pages as cached so it's safe to use OSCachedMemSet + * here (also pages are always 8 bytes aligned anyway) */ + OSCachedMemSet(kvaddr, ui8PoisonValue, PAGE_SIZE); + + OSCPUCacheFlushRangeKM(psDevNode, kvaddr, kvaddr + PAGE_SIZE, + sCPUPhysAddrStart, sCPUPhysAddrEnd); + + kunmap_atomic(kvaddr); + } +} + +/* Allocate and initialise the structure to hold the metadata of the allocation */ +static PVRSRV_ERROR +_AllocOSPageArray(PVRSRV_DEVICE_NODE *psDevNode, + PMR_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 uiLog2AllocPageSize, + IMG_UINT32 ui32AllocFlags, + IMG_UINT32 ui32CPUCacheFlags, + IMG_PID uiPid, + PMR_OSPAGEARRAY_DATA **ppsPageArrayDataPtr) +{ + PVRSRV_ERROR eError; + PMR_SIZE_T uiSize = uiChunkSize * ui32NumVirtChunks; + IMG_UINT32 uiNumOSPageSizeVirtPages; + IMG_UINT32 uiNumDevPageSizeVirtPages; + PMR_OSPAGEARRAY_DATA *psPageArrayData; + IMG_UINT64 ui64DmaMask = 0; + PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); + + /* Use of cast below is justified by the assertion that follows to + * prove that no significant bits have been truncated */ + uiNumOSPageSizeVirtPages = (IMG_UINT32) (((uiSize - 1) >> PAGE_SHIFT) + 1); + PVR_ASSERT(((PMR_SIZE_T) uiNumOSPageSizeVirtPages << PAGE_SHIFT) == uiSize); + + uiNumDevPageSizeVirtPages = uiNumOSPageSizeVirtPages >> (uiLog2AllocPageSize - PAGE_SHIFT); + + /* Allocate the struct to hold the metadata */ + psPageArrayData = kmem_cache_alloc(g_psLinuxPageArray, GFP_KERNEL); + if (psPageArrayData == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OS refused the memory allocation for the private data.", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e_freed_none; + } + + /* + * Allocate the page array + * + * We avoid tracking this memory because this structure might go into the page pool. + * The OS can drain the pool asynchronously and when doing that we have to avoid + * any potential deadlocks. + * + * In one scenario the process stats vmalloc hash table lock is held and then + * the oom-killer softirq is trying to call _ScanObjectsInPagePool(), it must not + * try to acquire the vmalloc hash table lock again. + */ + psPageArrayData->pagearray = OSAllocZMemNoStats(sizeof(struct page *) * uiNumDevPageSizeVirtPages); + if (psPageArrayData->pagearray == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e_free_kmem_cache; + } + else + { + if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA)) + { + /* Allocate additional DMA/CMA cpu kernel virtual address & device bus address array state */ + psPageArrayData->dmavirtarray = OSAllocZMemNoStats(sizeof(void*) * uiNumDevPageSizeVirtPages); + if (psPageArrayData->dmavirtarray == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e_free_pagearray; + } + + psPageArrayData->dmaphysarray = OSAllocZMemNoStats(sizeof(dma_addr_t) * uiNumDevPageSizeVirtPages); + if (psPageArrayData->dmaphysarray == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e_free_cpuvirtaddrarray; + } + } + } + + if (psDevNode->psDevConfig && psDevNode->psDevConfig->pvOSDevice) + { + struct device *psDev = psDevNode->psDevConfig->pvOSDevice; + ui64DmaMask = *psDev->dma_mask; + } + + /* Init metadata */ + psPageArrayData->psDevNode = psDevNode; + psPageArrayData->uiPid = uiPid; + psPageArrayData->iNumOSPagesAllocated = 0; + psPageArrayData->uiTotalNumOSPages = uiNumOSPageSizeVirtPages; + psPageArrayData->uiLog2AllocPageSize = uiLog2AllocPageSize; + psPageArrayData->ui64DmaMask = ui64DmaMask; + psPageArrayData->ui32AllocFlags = ui32AllocFlags; + psPageArrayData->ui32CPUCacheFlags = ui32CPUCacheFlags; + psPageArrayData->ui32CMAAdjustedPageCount = 0; + + *ppsPageArrayDataPtr = psPageArrayData; + return PVRSRV_OK; + +/* Error path */ +e_free_cpuvirtaddrarray: + OSFreeMemNoStats(psPageArrayData->dmavirtarray); + +e_free_pagearray: + OSFreeMemNoStats(psPageArrayData->pagearray); + +e_free_kmem_cache: + kmem_cache_free(g_psLinuxPageArray, psPageArrayData); + PVR_DPF((PVR_DBG_ERROR, + "%s: OS refused the memory allocation for the page pointer table. " + "Did you ask for too much?", + __func__)); + +e_freed_none: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static inline void +_ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode, + struct page **ppsPage, + IMG_UINT32 uiNumPages) +{ + void * pvAddr; + + if (OSCPUCacheOpAddressType() == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) + { + pgprot_t pgprot = PAGE_KERNEL; + + IMG_UINT32 uiNumToClean = uiNumPages; + struct page **ppsCleanArray = ppsPage; + + /* Map and flush page. + * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE + * at a time. */ + while (uiNumToClean != 0) + { + IMG_UINT32 uiToClean = MIN(PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES, + uiNumToClean); + IMG_CPU_PHYADDR sUnused = + { IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL) }; + + pvAddr = pvr_vmap(ppsCleanArray, uiToClean, -1, pgprot); + if (!pvAddr) + { + PVR_DPF((PVR_DBG_ERROR, + "Unable to flush page cache for new allocation, skipping flush.")); + return; + } + + CacheOpExec(psDevNode, + pvAddr, + pvAddr + PAGE_SIZE, + sUnused, + sUnused, + PVRSRV_CACHE_OP_FLUSH); + + pvr_vunmap(pvAddr, uiToClean, pgprot); + ppsCleanArray = &(ppsCleanArray[uiToClean]); + uiNumToClean -= uiToClean; + } + } + else + { + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < uiNumPages; ++ui32Idx) + { + IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd; + + pvAddr = kmap(ppsPage[ui32Idx]); + sCPUPhysAddrStart.uiAddr = page_to_phys(ppsPage[ui32Idx]); + sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE; + + /* If we're zeroing, we need to make sure the cleared memory is pushed out + * of the cache before the cache lines are invalidated */ + CacheOpExec(psDevNode, + pvAddr, + pvAddr + PAGE_SIZE, + sCPUPhysAddrStart, + sCPUPhysAddrEnd, + PVRSRV_CACHE_OP_FLUSH); + + kunmap(ppsPage[ui32Idx]); + } + } +} + +/* Change the caching attribute of pages on x86 systems and takes care of + * cache maintenance. This function is supposed to be called once for pages that + * came from alloc_pages(). It expects an array of OS page sized pages! + * + * Flush/Invalidate pages in case the allocation is not cached. Necessary to + * remove pages from the cache that might be flushed later and corrupt memory. */ +static inline PVRSRV_ERROR +_ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode, + struct page **ppsPage, + IMG_UINT32 uiNumPages, + IMG_BOOL bFlush, + IMG_UINT32 ui32CPUCacheFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BOOL bCPUCached = PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags); + IMG_BOOL bCPUUncached = PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags); + IMG_BOOL bCPUWriteCombine = PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags); + + if (ppsPage != NULL && uiNumPages != 0) + { +#if defined(CONFIG_X86) + /* On x86 we have to set page cache attributes for non-cached pages. + * The call is implicitly taking care of all flushing/invalidating + * and therefore we can skip the usual cache maintenance after this. */ + if (bCPUUncached || bCPUWriteCombine) + { + /* On x86 if we already have a mapping (e.g. low memory) we need to change the mode of + current mapping before we map it ourselves */ + int ret = IMG_FALSE; + PVR_UNREFERENCED_PARAMETER(bFlush); + + switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags)) + { + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: + ret = set_pages_array_uc(ppsPage, uiNumPages); + if (ret) + { + eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE; + PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to UC failed, returned %d", ret)); + } + break; + + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: + ret = set_pages_array_wc(ppsPage, uiNumPages); + if (ret) + { + eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE; + PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to WC failed, returned %d", ret)); + } + break; + + case PVRSRV_MEMALLOCFLAG_CPU_CACHED: + break; + + default: + break; + } + } + else +#endif + { + if ( bFlush || + bCPUUncached || bCPUWriteCombine || + (bCPUCached && PVRSRV_CHECK_CPU_CACHE_CLEAN(ui32CPUCacheFlags)) ) + { + /* We can be given pages which still remain in the cache. + In order to make sure that the data we write through our mappings + doesn't get overwritten by later cache evictions we invalidate the + pages that are given to us. + + Note: + This still seems to be true if we request cold pages, it's just less + likely to be in the cache. */ + _ApplyCacheMaintenance(psDevNode, + ppsPage, + uiNumPages); + } + } + } + + return eError; +} + +/* Same as _AllocOSPage except it uses DMA framework to perform allocation. + * uiPageIndex is expected to be the pagearray index where to store the higher order page. */ +static PVRSRV_ERROR +_AllocOSPage_CMA(PMR_OSPAGEARRAY_DATA *psPageArrayData, + gfp_t gfp_flags, + IMG_UINT32 ui32AllocOrder, + IMG_UINT32 ui32MinOrder, + IMG_UINT32 uiPageIndex) +{ + void *virt_addr; + struct page *page; + dma_addr_t bus_addr; + IMG_UINT32 uiAllocIsMisaligned; + size_t alloc_size = PAGE_SIZE << ui32AllocOrder; + struct device *dev = psPageArrayData->psDevNode->psDevConfig->pvOSDevice; + PVR_ASSERT(ui32AllocOrder == ui32MinOrder); + + do + { + DisableOOMKiller(); +#if defined(PVR_LINUX_PHYSMEM_SUPPRESS_DMA_AC) + virt_addr = NULL; +#else + virt_addr = dma_alloc_coherent(dev, alloc_size, &bus_addr, gfp_flags); +#endif + if (virt_addr == NULL) + { + /* The idea here is primarily to support some older kernels with + broken or non-functioning DMA/CMA implementations (< Linux-3.4) + and to also handle DMA/CMA allocation failures by attempting a + normal page allocation though we expect dma_alloc_coherent() + already attempts this internally also before failing but + nonetheless it does no harm to retry the allocation ourselves */ + page = alloc_pages(gfp_flags, ui32AllocOrder); + if (page) + { + /* Taint bus_addr as alloc_page, needed when freeing; + also acquire the low memory page address only, this + prevents mapping possible high memory pages into + kernel virtual address space which might exhaust + the VMALLOC address space */ + bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page)); + virt_addr = (void*)(uintptr_t) DMA_VADDR_NOT_IN_USE; + } + else + { + EnableOOMKiller(); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + } + else + { +#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64) + page = pfn_to_page(bus_addr >> PAGE_SHIFT); +#else + /* Assumes bus address space is identical to physical address space */ + page = phys_to_page(bus_addr); +#endif + } + EnableOOMKiller(); + + /* Physical allocation alignment works/hidden behind the scene transparently, + we do this here if the allocated buffer address does not meet its alignment + requirement by over-allocating using the next power-2 order and reporting + aligned-adjusted values back to meet the requested alignment constraint. + Evidently we waste memory by doing this so should only do so if we do not + initially meet the alignment constraint. */ + uiAllocIsMisaligned = DMA_GET_ADDR(bus_addr) & ((PAGE_SIZE< ui32MinOrder) + { + IMG_BOOL bUsedAllocPages = DMA_IS_ALLOCPG_ADDR(bus_addr); + if (ui32AllocOrder == ui32MinOrder) + { + if (bUsedAllocPages) + { + __free_pages(page, ui32AllocOrder); + } + else + { + dma_free_coherent(dev, alloc_size, virt_addr, bus_addr); + } + + ui32AllocOrder = ui32AllocOrder + 1; + alloc_size = PAGE_SIZE << ui32AllocOrder; + + PVR_ASSERT(uiAllocIsMisaligned != 0); + } + else + { + size_t align_adjust = PAGE_SIZE << ui32MinOrder; + + /* Adjust virtual/bus addresses to meet alignment */ + bus_addr = bUsedAllocPages ? page_to_phys(page) : bus_addr; + align_adjust = PVR_ALIGN((size_t)bus_addr, align_adjust); + align_adjust -= (size_t)bus_addr; + + if (align_adjust) + { + if (bUsedAllocPages) + { + page += align_adjust >> PAGE_SHIFT; + bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page)); + virt_addr = (void*)(uintptr_t) DMA_VADDR_NOT_IN_USE; + } + else + { + bus_addr += align_adjust; + virt_addr += align_adjust; +#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64) + page = pfn_to_page(bus_addr >> PAGE_SHIFT); +#else + /* Assumes bus address space is identical to physical address space */ + page = phys_to_page(bus_addr); +#endif + } + + /* Store adjustments in PAGE_SIZE counts */ + align_adjust = align_adjust >> PAGE_SHIFT; + bus_addr = DMA_SET_ALIGN_ADJUSTMENT(bus_addr, align_adjust); + } + + /* Taint bus_addr due to over-allocation, allows us to free + * memory correctly */ + bus_addr = DMA_SET_ADJUSTED_ADDR(bus_addr); + uiAllocIsMisaligned = 0; + } + } + } while (uiAllocIsMisaligned); + + /* Convert OSPageSize-based index into DevicePageSize-based index */ + psPageArrayData->ui32CMAAdjustedPageCount += (alloc_size - (PAGE_SIZE << ui32AllocOrder )); + + psPageArrayData->dmavirtarray[uiPageIndex] = virt_addr; + psPageArrayData->dmaphysarray[uiPageIndex] = bus_addr; + psPageArrayData->pagearray[uiPageIndex] = page; + + return PVRSRV_OK; +} + +/* Allocate a page of order uiAllocOrder and stores it in the page array ppsPage at + * position uiPageIndex. + * + * If the order is higher than 0, it splits the page into multiples and + * stores them at position uiPageIndex to uiPageIndex+(1<= KERNEL_VERSION(3,10,0)) + /* In case we need to, split the higher order page; + this should only be used for order-0 allocations + as higher order allocations should use DMA/CMA */ + if (uiAllocOrder != 0) + { + split_page(psPage, uiAllocOrder); + } +#endif + + /* Store the page (or multiple split pages) in the page array */ + for (ui32Count = 0; ui32Count < (1 << uiAllocOrder); ui32Count++) + { + psPageArrayData->pagearray[uiPageIndex + ui32Count] = &(psPage[ui32Count]); + } + + return PVRSRV_OK; +} + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + +static inline void _AddMemAllocRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, + struct page *psPage) +{ + IMG_CPU_PHYADDR sCPUPhysAddr = { page_to_phys(psPage) }; + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, + NULL, sCPUPhysAddr, + 1 << psPageArrayData->uiLog2AllocPageSize, + NULL, psPageArrayData->uiPid + DEBUG_MEMSTATS_VALUES); +} + +static inline void _RemoveMemAllocRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, + struct page *psPage) +{ + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, + (IMG_UINT64) page_to_phys(psPage), + psPageArrayData->uiPid); +} + +#else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ + +static inline void _IncrMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid) +{ + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, + uiSize, uiPid); +} + +static inline void _DecrMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid) +{ + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, + uiSize, uiPid); +} + +#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ +#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ + +/* Allocation of OS pages: We may allocate 2^N order pages at a time for two reasons. + * + * Firstly to support device pages which are larger than OS. By asking the OS for 2^N + * order OS pages at a time we guarantee the device page is contiguous. + * + * Secondly for performance where we may ask for 2^N order pages to reduce the number + * of calls to alloc_pages, and thus reduce time for huge allocations. + * + * Regardless of page order requested, we need to break them down to track _OS pages. + * The maximum order requested is increased if all max order allocations were successful. + * If any request fails we reduce the max order. + */ +static PVRSRV_ERROR +_AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) +{ + PVRSRV_ERROR eError; + IMG_UINT32 uiArrayIndex = 0; + IMG_UINT32 ui32Order; + IMG_UINT32 ui32MinOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; + IMG_BOOL bIncreaseMaxOrder = IMG_TRUE; + + IMG_UINT32 ui32NumPageReq; + IMG_UINT32 uiPagesToAlloc; + IMG_UINT32 uiPagesFromPool = 0; + + gfp_t gfp_flags = _GetGFPFlags(ui32MinOrder ? BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO) : IMG_FALSE, /* Zero all pages later as batch */ + psPageArrayData->psDevNode); + gfp_t ui32GfpFlags; + gfp_t ui32HighOrderGfpFlags = ((gfp_flags & ~__GFP_RECLAIM) | __GFP_NORETRY); + + struct page **ppsPageArray = psPageArrayData->pagearray; + struct page **ppsPageAttributeArray = NULL; + + uiPagesToAlloc = psPageArrayData->uiTotalNumOSPages; + + /* Try to get pages from the pool since it is faster; + the page pool currently only supports zero-order pages + thus currently excludes all DMA/CMA allocated memory */ + _GetPagesFromPoolLocked(psPageArrayData->psDevNode, + psPageArrayData->ui32CPUCacheFlags, + uiPagesToAlloc, + ui32MinOrder, + BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO), + ppsPageArray, + &uiPagesFromPool); + + uiArrayIndex = uiPagesFromPool; + + if ((uiPagesToAlloc - uiPagesFromPool) < PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD) + { /* Small allocations: ask for one device page at a time */ + ui32Order = ui32MinOrder; + bIncreaseMaxOrder = IMG_FALSE; + } + else + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) + /* Large zero-order or none zero-order allocations, ask for + MAX(max-order, min-order) order pages at a time; alloc + failures throttles this down to ZeroOrder allocations */ + ui32Order = MAX(g_uiMaxOrder, ui32MinOrder); +#else + /* Because split_page() is not available on older kernels + we cannot mix-and-match any-order pages in the PMR; + only same-order pages must be present in page array. + So we unconditionally force it to use ui32MinOrder on + these older kernels */ + ui32Order = ui32MinOrder; +#if defined(DEBUG) + if (! BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + { + /* Check that this is zero */ + PVR_ASSERT(! ui32Order); + } +#endif +#endif + } + + /* Only if asking for more contiguity than we actually need, let it fail */ + ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags; + ui32NumPageReq = (1 << ui32Order); + + while (uiArrayIndex < uiPagesToAlloc) + { + IMG_UINT32 ui32PageRemain = uiPagesToAlloc - uiArrayIndex; + + while (ui32NumPageReq > ui32PageRemain) + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) + /* Pages to request is larger than that remaining + so ask for less so never over allocate */ + ui32Order = MAX(ui32Order >> 1, ui32MinOrder); +#else + /* Pages to request is larger than that remaining so + do nothing thus over allocate as we do not support + mix/match of any-order pages in PMR page-array in + older kernels (simplifies page free logic) */ + PVR_ASSERT(ui32Order == ui32MinOrder); +#endif + ui32NumPageReq = (1 << ui32Order); + ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags; + } + + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + { + /* As the DMA/CMA framework rounds-up request to the + next power-of-two, we request multiple uiMinOrder + pages to satisfy allocation request in order to + minimise wasting memory */ + eError = _AllocOSPage_CMA(psPageArrayData, + ui32GfpFlags, + ui32Order, + ui32MinOrder, + uiArrayIndex >> ui32MinOrder); + } + else + { + /* Allocate uiOrder pages at uiArrayIndex */ + eError = _AllocOSPage(psPageArrayData, + ui32GfpFlags, + ui32Order, + ui32MinOrder, + uiArrayIndex); + } + + if (eError == PVRSRV_OK) + { + /* Successful request. Move onto next. */ + uiArrayIndex += ui32NumPageReq; + } + else + { + if (ui32Order > ui32MinOrder) + { + /* Last request failed. Let's ask for less next time */ + ui32Order = MAX(ui32Order >> 1, ui32MinOrder); + bIncreaseMaxOrder = IMG_FALSE; + ui32NumPageReq = (1 << ui32Order); + ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags; + g_uiMaxOrder = ui32Order; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)) + /* We should not trigger this code path in older kernels, + this is enforced by ensuring ui32Order == ui32MinOrder */ + PVR_ASSERT(ui32Order == ui32MinOrder); +#endif + } + else + { + /* Failed to alloc pages at required contiguity. Failed allocation */ + PVR_DPF((PVR_DBG_ERROR, "%s: %s failed to honour request at %u of %u, flags = %x, order = %u (%s)", + __func__, + BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA) ? "dma_alloc_coherent" : "alloc_pages", + uiArrayIndex, + uiPagesToAlloc, + ui32GfpFlags, + ui32Order, + PVRSRVGetErrorString(eError))); + eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; + goto e_free_pages; + } + } + } + + if (bIncreaseMaxOrder && (g_uiMaxOrder < PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM)) + { /* All successful allocations on max order. Let's ask for more next time */ + g_uiMaxOrder++; + } + + /* Construct table of page pointers to apply attributes */ + ppsPageAttributeArray = &ppsPageArray[uiPagesFromPool]; + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + { + IMG_UINT32 uiIdx, uiIdy, uiIdz; + + ppsPageAttributeArray = OSAllocMem(sizeof(struct page *) * uiPagesToAlloc); + if (ppsPageAttributeArray == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "Failed OSAllocMem() for page attributes table")); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e_free_pages; + } + + for (uiIdx = 0; uiIdx < uiPagesToAlloc; uiIdx += ui32NumPageReq) + { + uiIdy = uiIdx >> ui32Order; + for (uiIdz = 0; uiIdz < ui32NumPageReq; uiIdz++) + { + ppsPageAttributeArray[uiIdx+uiIdz] = psPageArrayData->pagearray[uiIdy]; + ppsPageAttributeArray[uiIdx+uiIdz] += uiIdz; + } + } + } + + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO) && ui32MinOrder == 0) + { + eError = _ZeroPageArray(uiPagesToAlloc - uiPagesFromPool, + ppsPageAttributeArray, + PAGE_KERNEL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to zero pages (fast)")); + goto e_free_pages; + } + } + + + /* Do the cache management as required */ + eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode, + ppsPageAttributeArray, + uiPagesToAlloc - uiPagesFromPool, + BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO), + psPageArrayData->ui32CPUCacheFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes")); + goto e_free_pages; + } + else + { + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + { + OSFreeMem(ppsPageAttributeArray); + } + } + + /* Update metadata */ + psPageArrayData->iNumOSPagesAllocated = psPageArrayData->uiTotalNumOSPages; + + { + IMG_UINT32 ui32NumPages = + psPageArrayData->iNumOSPagesAllocated >> ui32MinOrder; + IMG_UINT32 i; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + for (i = 0; i < ui32NumPages; i++) + { + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + { + _AddMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]); + } + else + { + _AddMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i << ui32MinOrder]); + } + } +#else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ + _IncrMemAllocStat_UmaPages(((uiPagesToAlloc * PAGE_SIZE)+(psPageArrayData->ui32CMAAdjustedPageCount)), + psPageArrayData->uiPid); +#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ +#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ + + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_POISON_ON_ALLOC)) + { + for (i = 0; i < ui32NumPages; i++) + { + _PoisonDevicePage(psPageArrayData->psDevNode, + ppsPageArray[i], + ui32MinOrder, + psPageArrayData->ui32CPUCacheFlags, + PVRSRV_POISON_ON_ALLOC_VALUE); + } + } + } + + return PVRSRV_OK; + +/* Error path */ +e_free_pages: + { + IMG_UINT32 ui32PageToFree; + + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + { + IMG_UINT32 uiDevArrayIndex = uiArrayIndex >> ui32Order; + IMG_UINT32 uiDevPageSize = PAGE_SIZE << ui32Order; + PVR_ASSERT(ui32Order == ui32MinOrder); + + if (ppsPageAttributeArray) + { + OSFreeMem(ppsPageAttributeArray); + } + + for (ui32PageToFree = 0; ui32PageToFree < uiDevArrayIndex; ui32PageToFree++) + { + _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, + uiDevPageSize, + ui32MinOrder, + psPageArrayData->dmavirtarray[ui32PageToFree], + psPageArrayData->dmaphysarray[ui32PageToFree], + ppsPageArray[ui32PageToFree]); + psPageArrayData->dmaphysarray[ui32PageToFree]= (dma_addr_t)0; + psPageArrayData->dmavirtarray[ui32PageToFree] = NULL; + ppsPageArray[ui32PageToFree] = NULL; + } + } + else + { + /* Free the pages we got from the pool */ + for (ui32PageToFree = 0; ui32PageToFree < uiPagesFromPool; ui32PageToFree++) + { + _FreeOSPage(ui32MinOrder, + BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE), + ppsPageArray[ui32PageToFree]); + ppsPageArray[ui32PageToFree] = NULL; + } + + for (ui32PageToFree = uiPagesFromPool; ui32PageToFree < uiArrayIndex; ui32PageToFree++) + { + _FreeOSPage(ui32MinOrder, IMG_FALSE, ppsPageArray[ui32PageToFree]); + ppsPageArray[ui32PageToFree] = NULL; + } + } + + return eError; + } +} + +/* Allocation of OS pages: This function is used for sparse allocations. + * + * Sparse allocations provide only a proportion of sparse physical backing within the total + * virtual range. */ +static PVRSRV_ERROR +_AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, + IMG_UINT32 *puiAllocIndices, + IMG_UINT32 uiPagesToAlloc) +{ + PVRSRV_ERROR eError; + IMG_UINT32 i; + struct page **ppsPageArray = psPageArrayData->pagearray; + IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; + IMG_UINT32 uiPagesFromPool = 0; + IMG_UINT32 uiNumOSPagesToAlloc = uiPagesToAlloc * (1 << uiOrder); + IMG_UINT32 uiTotalNumAllocPages = psPageArrayData->uiTotalNumOSPages >> uiOrder; + gfp_t ui32GfpFlags = _GetGFPFlags(uiOrder ? BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO): + IMG_FALSE, /* Zero pages later as batch */ + psPageArrayData->psDevNode); + + /* We use this page array to receive pages from the pool and then reuse it afterwards to + * store pages that need their cache attribute changed on x86*/ + struct page **ppsTempPageArray; + IMG_UINT32 uiTempPageArrayIndex = 0; + + /* Allocate the temporary page array that we need here to receive pages + * from the pool and to store pages that need their caching attributes changed. + * Allocate number of OS pages to be able to use the attribute function later. */ + ppsTempPageArray = OSAllocMem(sizeof(struct page*) * uiNumOSPagesToAlloc); + if (ppsTempPageArray == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed metadata allocation", __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e_exit; + } + + /* Check the requested number of pages if they fit in the page array */ + if (uiTotalNumAllocPages < + ((psPageArrayData->iNumOSPagesAllocated >> uiOrder) + uiPagesToAlloc) ) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Trying to allocate more pages (Order %u) than this buffer can handle, " + "Request + Allocated < Max! Request %u, Allocated %u, Max %u.", + __func__, + uiOrder, + uiPagesToAlloc, + psPageArrayData->iNumOSPagesAllocated >> uiOrder, + uiTotalNumAllocPages)); + eError = PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE; + goto e_free_temp_array; + } + + /* Try to get pages from the pool since it is faster */ + _GetPagesFromPoolLocked(psPageArrayData->psDevNode, + psPageArrayData->ui32CPUCacheFlags, + uiPagesToAlloc, + uiOrder, + BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO), + ppsTempPageArray, + &uiPagesFromPool); + + /* Allocate pages from the OS or move the pages that we got from the pool + * to the page array */ + for (i = 0; i < uiPagesToAlloc; i++) + { + /* Check if the indices we are allocating are in range */ + if (puiAllocIndices[i] >= uiTotalNumAllocPages) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Given alloc index %u at %u is larger than page array %u.", + __func__, + i, + puiAllocIndices[i], + uiTotalNumAllocPages)); + eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; + goto e_free_pages; + } + + /* Check if there is not already a page allocated at this position */ + if (NULL != ppsPageArray[puiAllocIndices[i]]) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Mapping number %u at page array index %u already exists. " + "Page struct %p", + __func__, + i, + puiAllocIndices[i], + ppsPageArray[puiAllocIndices[i]])); + eError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS; + goto e_free_pages; + } + + /* Finally assign a page to the array. + * Either from the pool or allocate a new one. */ + if (uiPagesFromPool != 0) + { + uiPagesFromPool--; + ppsPageArray[puiAllocIndices[i]] = ppsTempPageArray[uiPagesFromPool]; + } + else + { + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + { + + /* As the DMA/CMA framework rounds-up request to the + next power-of-two, we request multiple uiMinOrder + pages to satisfy allocation request in order to + minimise wasting memory */ + eError = _AllocOSPage_CMA(psPageArrayData, + ui32GfpFlags, + uiOrder, + uiOrder, + puiAllocIndices[i]); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to alloc CMA pages")); + goto e_free_pages; + } + } + else + { + DisableOOMKiller(); + ppsPageArray[puiAllocIndices[i]] = alloc_pages(ui32GfpFlags, uiOrder); + EnableOOMKiller(); + } + + if (ppsPageArray[puiAllocIndices[i]] != NULL) + { + /* Reusing the temp page array if it has no pool pages anymore */ + + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + { + IMG_UINT32 idx; + struct page* psPageAddr; + + psPageAddr = ppsPageArray[puiAllocIndices[i]]; + + for (idx = 0; idx < (1 << uiOrder); idx++) + { + ppsTempPageArray[uiTempPageArrayIndex + idx] = psPageAddr; + psPageAddr++; + } + uiTempPageArrayIndex += (1 << uiOrder); + } + else + { + ppsTempPageArray[uiTempPageArrayIndex] = ppsPageArray[puiAllocIndices[i]]; + uiTempPageArrayIndex++; + } + } + else + { + /* Failed to alloc pages at required contiguity. Failed allocation */ + PVR_DPF((PVR_DBG_ERROR, + "%s: alloc_pages failed to honour request at %u of %u, flags = %x, order = %u", + __func__, + i, + uiPagesToAlloc, + ui32GfpFlags, + uiOrder)); + eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; + goto e_free_pages; + } + } + } + + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO) && uiOrder == 0) + { + eError = _ZeroPageArray(uiTempPageArrayIndex, + ppsTempPageArray, + PAGE_KERNEL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to zero pages (sparse)")); + goto e_free_pages; + } + } + + /* Do the cache management as required */ + eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode, + ppsTempPageArray, + uiTempPageArrayIndex, + BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO), + psPageArrayData->ui32CPUCacheFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes")); + goto e_free_pages; + } + + /* Update metadata */ + psPageArrayData->iNumOSPagesAllocated += uiNumOSPagesToAlloc; + + /* Free temporary page array */ + OSFreeMem(ppsTempPageArray); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + for (i = 0; i < uiPagesToAlloc; i++) + { + _AddMemAllocRecord_UmaPages(psPageArrayData, + ppsPageArray[puiAllocIndices[i]]); + } +#else + _IncrMemAllocStat_UmaPages(((uiNumOSPagesToAlloc * PAGE_SIZE)+(psPageArrayData->ui32CMAAdjustedPageCount)), + psPageArrayData->uiPid); +#endif +#endif + + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_POISON_ON_ALLOC)) + { + for (i = 0; i < uiPagesToAlloc; i++) + { + _PoisonDevicePage(psPageArrayData->psDevNode, + ppsPageArray[puiAllocIndices[i]], + uiOrder, + psPageArrayData->ui32CPUCacheFlags, + PVRSRV_POISON_ON_ALLOC_VALUE); + } + } + + return PVRSRV_OK; + +/* Error path */ +e_free_pages: + { + IMG_UINT32 ui32PageToFree; + + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + { + IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder; + + for (ui32PageToFree = 0; ui32PageToFree < i; ui32PageToFree++) + { + _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, + uiDevPageSize, + uiOrder, + psPageArrayData->dmavirtarray[puiAllocIndices[ui32PageToFree]], + psPageArrayData->dmaphysarray[puiAllocIndices[ui32PageToFree]], + ppsPageArray[puiAllocIndices[ui32PageToFree]]); + psPageArrayData->dmaphysarray[puiAllocIndices[ui32PageToFree]]= (dma_addr_t)0; + psPageArrayData->dmavirtarray[puiAllocIndices[ui32PageToFree]] = NULL; + ppsPageArray[puiAllocIndices[ui32PageToFree]] = NULL; + } + } + else + { + /* Free the pages we got from the pool */ + for (ui32PageToFree = 0; ui32PageToFree < uiPagesFromPool; ui32PageToFree++) + { + _FreeOSPage(0, + BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE), + ppsTempPageArray[ui32PageToFree]); + } + + /* Free the pages we just allocated from the OS */ + for (ui32PageToFree = uiPagesFromPool; ui32PageToFree < i; ui32PageToFree++) + { + _FreeOSPage(0, + IMG_FALSE, + ppsPageArray[puiAllocIndices[ui32PageToFree]]); + } + + /* Reset all page array entries that have been set so far*/ + for (ui32PageToFree = 0; ui32PageToFree < i; ui32PageToFree++) + { + ppsPageArray[puiAllocIndices[ui32PageToFree]] = NULL; + } + } + } + +e_free_temp_array: + OSFreeMem(ppsTempPageArray); + +e_exit: + return eError; +} + +/* Allocate pages for a given page array. + * + * The executed allocation path depends whether an array with allocation + * indices has been passed or not */ +static PVRSRV_ERROR +_AllocOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, + IMG_UINT32 *puiAllocIndices, + IMG_UINT32 uiPagesToAlloc) +{ + PVRSRV_ERROR eError; + struct page **ppsPageArray; + + /* Parameter checks */ + PVR_ASSERT(NULL != psPageArrayData); + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + { + PVR_ASSERT(psPageArrayData->dmaphysarray != NULL); + PVR_ASSERT(psPageArrayData->dmavirtarray != NULL); + } + PVR_ASSERT(psPageArrayData->pagearray != NULL); + PVR_ASSERT(0 <= psPageArrayData->iNumOSPagesAllocated); + + ppsPageArray = psPageArrayData->pagearray; + + /* Go the sparse alloc path if we have an array with alloc indices.*/ + if (puiAllocIndices != NULL) + { + eError = _AllocOSPages_Sparse(psPageArrayData, + puiAllocIndices, + uiPagesToAlloc); + } + else + { + eError = _AllocOSPages_Fast(psPageArrayData); + } + + if (eError != PVRSRV_OK) + { + goto e_exit; + } + + _DumpPageArray(ppsPageArray, + psPageArrayData->uiTotalNumOSPages >> + (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) ); + + PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: allocated OS memory for PMR @0x%p", psPageArrayData)); + return PVRSRV_OK; + +e_exit: + return eError; +} + +/* Same as _FreeOSPage except free memory using DMA framework */ +static INLINE void +_FreeOSPage_CMA(struct device *dev, + size_t alloc_size, + IMG_UINT32 uiOrder, + void *virt_addr, + dma_addr_t dev_addr, + struct page *psPage) +{ + if (DMA_IS_ALLOCPG_ADDR(dev_addr)) + { +#if defined(CONFIG_X86) + void *pvPageVAddr = page_address(psPage); + if (pvPageVAddr) + { + int ret = set_memory_wb((unsigned long)pvPageVAddr, 1); + if (ret) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to reset page attribute", + __func__)); + } + } +#endif + + if (DMA_IS_ADDR_ADJUSTED(dev_addr)) + { + psPage -= DMA_GET_ALIGN_ADJUSTMENT(dev_addr); + uiOrder += 1; + } + + __free_pages(psPage, uiOrder); + } + else + { + if (DMA_IS_ADDR_ADJUSTED(dev_addr)) + { + size_t align_adjust; + + align_adjust = DMA_GET_ALIGN_ADJUSTMENT(dev_addr); + alloc_size = alloc_size << 1; + + dev_addr = DMA_GET_ADDR(dev_addr); + dev_addr -= align_adjust << PAGE_SHIFT; + virt_addr -= align_adjust << PAGE_SHIFT; + } + + dma_free_coherent(dev, alloc_size, virt_addr, DMA_GET_ADDR(dev_addr)); + } +} + +/* Free a single page back to the OS. + * Make sure the cache type is set back to the default value. + * + * Note: + * We must _only_ check bUnsetMemoryType in the case where we need to free + * the page back to the OS since we may have to revert the cache properties + * of the page to the default as given by the OS when it was allocated. */ +static void +_FreeOSPage(IMG_UINT32 uiOrder, + IMG_BOOL bUnsetMemoryType, + struct page *psPage) +{ + +#if defined(CONFIG_X86) + void *pvPageVAddr; + pvPageVAddr = page_address(psPage); + + if (pvPageVAddr && bUnsetMemoryType) + { + int ret; + + ret = set_memory_wb((unsigned long)pvPageVAddr, 1); + if (ret) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attribute", + __func__)); + } + } +#else + PVR_UNREFERENCED_PARAMETER(bUnsetMemoryType); +#endif + __free_pages(psPage, uiOrder); +} + +/* Free the struct holding the metadata */ +static PVRSRV_ERROR +_FreeOSPagesArray(PMR_OSPAGEARRAY_DATA *psPageArrayData) +{ + PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: freed OS memory for PMR @0x%p", psPageArrayData)); + + /* Check if the page array actually still exists. + * It might be the case that has been moved to the page pool */ + if (psPageArrayData->pagearray != NULL) + { + OSFreeMemNoStats(psPageArrayData->pagearray); + } + + kmem_cache_free(g_psLinuxPageArray, psPageArrayData); + + return PVRSRV_OK; +} + +/* Free all or some pages from a sparse page array */ +static PVRSRV_ERROR +_FreeOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 ui32FreePageCount) +{ + IMG_BOOL bSuccess; + IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; + IMG_UINT32 uiPageIndex, i, j, uiTempIdx = 0; + struct page **ppsPageArray = psPageArrayData->pagearray; + IMG_UINT32 uiNumPages; + + struct page **ppsTempPageArray; + IMG_UINT32 uiTempArraySize; + + /* We really should have something to free before we call this */ + PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0); + + if (pai32FreeIndices == NULL) + { + uiNumPages = psPageArrayData->uiTotalNumOSPages >> uiOrder; + uiTempArraySize = psPageArrayData->iNumOSPagesAllocated; + } + else + { + uiNumPages = ui32FreePageCount; + uiTempArraySize = ui32FreePageCount << uiOrder; + } + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) + for (i = 0; i < uiNumPages; i++) + { + IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i; + + if (NULL != ppsPageArray[idx]) + { + _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[idx]); + } + } +#endif + + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_POISON_ON_FREE)) + { + for (i = 0; i < uiNumPages; i++) + { + IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i; + + if (NULL != ppsPageArray[idx]) + { + _PoisonDevicePage(psPageArrayData->psDevNode, + ppsPageArray[idx], + uiOrder, + psPageArrayData->ui32CPUCacheFlags, + PVRSRV_POISON_ON_FREE_VALUE); + } + } + } + + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + { + IMG_UINT32 uiDevNumPages = uiNumPages; + IMG_UINT32 uiDevPageSize = 1<uiLog2AllocPageSize; + + for (i = 0; i < uiDevNumPages; i++) + { + IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i; + if (NULL != ppsPageArray[idx]) + { + _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, + uiDevPageSize, + uiOrder, + psPageArrayData->dmavirtarray[idx], + psPageArrayData->dmaphysarray[idx], + ppsPageArray[idx]); + psPageArrayData->dmaphysarray[idx] = (dma_addr_t)0; + psPageArrayData->dmavirtarray[idx] = NULL; + ppsPageArray[idx] = NULL; + uiTempIdx++; + } + } + uiTempIdx <<= uiOrder; + } + else + { + + /* OSAllocMemNoStats required because this code may be run without the bridge lock held */ + ppsTempPageArray = OSAllocMemNoStats(sizeof(struct page*) * uiTempArraySize); + if (ppsTempPageArray == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed free_pages metadata allocation", __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Put pages in a contiguous array so further processing is easier */ + for (i = 0; i < uiNumPages; i++) + { + uiPageIndex = pai32FreeIndices ? pai32FreeIndices[i] : i; + if (NULL != ppsPageArray[uiPageIndex]) + { + struct page *psPage = ppsPageArray[uiPageIndex]; + + for (j = 0; j < (1<ui32CPUCacheFlags, + ppsTempPageArray, + BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNPINNED), + 0, + uiTempIdx); + if (bSuccess) + { + goto exit_ok; + } + + /* Free pages and reset page caching attributes on x86 */ +#if defined(CONFIG_X86) + if (uiTempIdx != 0 && BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE)) + { + int iError; + iError = set_pages_array_wb(ppsTempPageArray, uiTempIdx); + + if (iError) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __func__)); + } + } +#endif + + /* Free the pages */ + for (i = 0; i < uiTempIdx; i++) + { + __free_pages(ppsTempPageArray[i], 0); + } + + /* Free the temp page array here if it did not move to the pool */ + OSFreeMemNoStats(ppsTempPageArray); + } + +exit_ok: + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS) + _DecrMemAllocStat_UmaPages(((uiTempIdx * PAGE_SIZE)-(psPageArrayData->ui32CMAAdjustedPageCount)), + psPageArrayData->uiPid); +#endif + + if (pai32FreeIndices && ((uiTempIdx >> uiOrder) != ui32FreePageCount)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Probable sparse duplicate indices: ReqFreeCount: %d " + "ActualFreedCount: %d", __func__, ui32FreePageCount, (uiTempIdx >> uiOrder))); + } + /* Update metadata */ + psPageArrayData->iNumOSPagesAllocated -= uiTempIdx; + PVR_ASSERT(0 <= psPageArrayData->iNumOSPagesAllocated); + return PVRSRV_OK; +} + +/* Free all the pages in a page array */ +static PVRSRV_ERROR +_FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) +{ + IMG_BOOL bSuccess; + IMG_UINT32 i; + IMG_UINT32 uiNumPages = psPageArrayData->uiTotalNumOSPages; + IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; + IMG_UINT32 uiDevNumPages = uiNumPages >> uiOrder; + IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder; + struct page **ppsPageArray = psPageArrayData->pagearray; + + /* We really should have something to free before we call this */ + PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + for (i = 0; i < uiDevNumPages; i++) + { + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + { + _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]); + }else + { + _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i << uiOrder]); + } + } +#else + _DecrMemAllocStat_UmaPages(((uiNumPages * PAGE_SIZE)-(psPageArrayData->ui32CMAAdjustedPageCount)), + psPageArrayData->uiPid); +#endif +#endif + + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_POISON_ON_FREE)) + { + for (i = 0; i < uiDevNumPages; i++) + { + _PoisonDevicePage(psPageArrayData->psDevNode, + ppsPageArray[i], + uiOrder, + psPageArrayData->ui32CPUCacheFlags, + PVRSRV_POISON_ON_FREE_VALUE); + } + } + + /* Try to move the page array to the pool */ + bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags, + ppsPageArray, + BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNPINNED), + uiOrder, + uiNumPages); + if (bSuccess) + { + psPageArrayData->pagearray = NULL; + goto exit_ok; + } + + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) + { + for (i = 0; i < uiDevNumPages; i++) + { + _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, + uiDevPageSize, + uiOrder, + psPageArrayData->dmavirtarray[i], + psPageArrayData->dmaphysarray[i], + ppsPageArray[i]); + psPageArrayData->dmaphysarray[i] = (dma_addr_t)0; + psPageArrayData->dmavirtarray[i] = NULL; + ppsPageArray[i] = NULL; + } + } + else + { +#if defined(CONFIG_X86) + if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE)) + { + int ret; + + ret = set_pages_array_wb(ppsPageArray, uiNumPages); + if (ret) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", + __func__)); + } + } +#endif + + for (i = 0; i < uiNumPages; i++) + { + _FreeOSPage(uiOrder, IMG_FALSE, ppsPageArray[i]); + ppsPageArray[i] = NULL; + } + } + +exit_ok: + /* Update metadata */ + psPageArrayData->iNumOSPagesAllocated = 0; + return PVRSRV_OK; +} + +/* Free pages from a page array. + * Takes care of mem stats and chooses correct free path depending on parameters. */ +static PVRSRV_ERROR +_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 ui32FreePageCount) +{ + PVRSRV_ERROR eError; + + /* Go the sparse or non-sparse path */ + if (psPageArrayData->iNumOSPagesAllocated != psPageArrayData->uiTotalNumOSPages + || pai32FreeIndices != NULL) + { + eError = _FreeOSPages_Sparse(psPageArrayData, + pai32FreeIndices, + ui32FreePageCount); + } + else + { + eError = _FreeOSPages_Fast(psPageArrayData); + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "_FreeOSPages_FreePages failed")); + } + + _DumpPageArray(psPageArrayData->pagearray, + psPageArrayData->uiTotalNumOSPages >> + (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) ); + + return eError; +} + +/* + * + * Implementation of callback functions + * + */ + +/* Destruction function is called after last reference disappears, + * but before PMR itself is freed. + */ +static PVRSRV_ERROR +PMRFinalizeOSMem(PMR_IMPL_PRIVDATA pvPriv) +{ + PVRSRV_ERROR eError; + PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; + + /* We can't free pages until now. */ + if (psOSPageArrayData->iNumOSPagesAllocated != 0) + { +#if defined(DEBUG) && defined(SUPPORT_VALIDATION) + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + IMG_UINT32 ui32UMALeakMax = psPVRSRVData->sMemLeakIntervals.ui32GPU; + + mutex_lock(&g_sUMALeakMutex); + + g_ui32UMALeakCounter++; + if (ui32UMALeakMax && g_ui32UMALeakCounter >= ui32UMALeakMax) + { + g_ui32UMALeakCounter = 0; + mutex_unlock(&g_sUMALeakMutex); + + PVR_DPF((PVR_DBG_WARNING, "%s: Skipped freeing of PMR 0x%p to trigger memory leak.", __func__, pvPriv)); + return PVRSRV_OK; + } + + mutex_unlock(&g_sUMALeakMutex); +#endif + _PagePoolLock(); + if (BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED)) + { + _RemoveUnpinListEntryUnlocked(psOSPageArrayData); + } + _PagePoolUnlock(); + + eError = _FreeOSPages(psOSPageArrayData, + NULL, + 0); + PVR_ASSERT(eError == PVRSRV_OK); /* can we do better? */ + } + + eError = _FreeOSPagesArray(psOSPageArrayData); + PVR_ASSERT(eError == PVRSRV_OK); /* can we do better? */ + return PVRSRV_OK; +} + +/* Callback function for locking the system physical page addresses. + * This function must be called before the lookup address func. */ +static PVRSRV_ERROR +PMRLockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv) +{ + PVRSRV_ERROR eError; + PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; + + if (BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_ONDEMAND)) + { + /* Allocate Memory for deferred allocation */ + eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumOSPages); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + eError = PVRSRV_OK; + return eError; +} + +static PVRSRV_ERROR +PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv) +{ + /* Just drops the refcount. */ + PVRSRV_ERROR eError = PVRSRV_OK; + PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; + + if (BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_ONDEMAND)) + { + /* Free Memory for deferred allocation */ + eError = _FreeOSPages(psOSPageArrayData, + NULL, + 0); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + PVR_ASSERT(eError == PVRSRV_OK); + return eError; +} + +/* Determine PA for specified offset into page array. */ +static IMG_DEV_PHYADDR GetOffsetPA(const PMR_OSPAGEARRAY_DATA *psOSPageArrayData, + IMG_UINT32 ui32Offset) +{ + IMG_UINT32 ui32Log2AllocPageSize = psOSPageArrayData->uiLog2AllocPageSize; + IMG_UINT32 ui32PageIndex = ui32Offset >> ui32Log2AllocPageSize; + IMG_UINT32 ui32InPageOffset = ui32Offset - (ui32PageIndex << ui32Log2AllocPageSize); + IMG_DEV_PHYADDR sPA; + + PVR_ASSERT(ui32PageIndex < psOSPageArrayData->uiTotalNumOSPages); + PVR_ASSERT(ui32InPageOffset < (1U << ui32Log2AllocPageSize)); + + sPA.uiAddr = page_to_phys(psOSPageArrayData->pagearray[ui32PageIndex]); + sPA.uiAddr += ui32InPageOffset; + + return sPA; +} + +/* N.B. It is assumed that PMRLockSysPhysAddressesOSMem() is called _before_ this function! */ +static PVRSRV_ERROR +PMRSysPhysAddrOSMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_BOOL *pbValid, + IMG_DEV_PHYADDR *psDevPAddr) +{ + const PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; + IMG_UINT32 uiIdx; + + if (psOSPageArrayData->uiLog2AllocPageSize < ui32Log2PageSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Requested physical addresses from PMR " + "for incompatible contiguity %u!", + __func__, + ui32Log2PageSize)); + return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; + } + + for (uiIdx=0; uiIdx < ui32NumOfPages; uiIdx++) + { + if (pbValid[uiIdx]) + { + psDevPAddr[uiIdx] = GetOffsetPA(psOSPageArrayData, puiOffset[uiIdx]); + +#if !defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) + /* this is just a precaution, normally this should be always + * available */ + if (psOSPageArrayData->ui64DmaMask) + { + if (psDevPAddr[uiIdx].uiAddr > psOSPageArrayData->ui64DmaMask) + { + PVR_DPF((PVR_DBG_ERROR, "%s: physical address" + " (%" IMG_UINT64_FMTSPECX ") out of allowable range" + " [0; %" IMG_UINT64_FMTSPECX "]", __func__, + psDevPAddr[uiIdx].uiAddr, + psOSPageArrayData->ui64DmaMask)); + BUG(); + } + } +#endif + } + } + + return PVRSRV_OK; +} + +typedef struct _PMR_OSPAGEARRAY_KERNMAP_DATA_ { + void *pvBase; + IMG_UINT32 ui32PageCount; + pgprot_t PageProps; +} PMR_OSPAGEARRAY_KERNMAP_DATA; + +static PVRSRV_ERROR +PMRAcquireKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv, + size_t uiOffset, + size_t uiSize, + void **ppvKernelAddressOut, + IMG_HANDLE *phHandleOut, + PMR_FLAGS_T ulFlags) +{ + PVRSRV_ERROR eError; + PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; + void *pvAddress; + pgprot_t prot = PAGE_KERNEL; + IMG_UINT32 ui32PageOffset=0; + size_t uiMapOffset=0; + IMG_UINT32 ui32PageCount = 0; + IMG_UINT32 uiLog2AllocPageSize = psOSPageArrayData->uiLog2AllocPageSize; + IMG_UINT32 uiOSPageShift = OSGetPageShift(); + IMG_UINT32 uiPageSizeDiff = 0; + struct page **pagearray; + PMR_OSPAGEARRAY_KERNMAP_DATA *psData; + + /* For cases device page size greater than the OS page size, + * multiple physically contiguous OS pages constitute one device page. + * However only the first page address of such an ensemble is stored + * as part of the mapping table in the driver. Hence when mapping the PMR + * in part/full, all OS pages that constitute the device page + * must also be mapped to kernel. + * + * For the case where device page size less than OS page size, + * treat it the same way as the page sizes are equal */ + if (uiLog2AllocPageSize > uiOSPageShift) + { + uiPageSizeDiff = uiLog2AllocPageSize - uiOSPageShift; + } + + /* + Zero offset and size as a special meaning which means map in the + whole of the PMR, this is due to fact that the places that call + this callback might not have access to be able to determine the + physical size + */ + if ((uiOffset == 0) && (uiSize == 0)) + { + ui32PageOffset = 0; + uiMapOffset = 0; + /* Page count = amount of OS pages */ + ui32PageCount = psOSPageArrayData->iNumOSPagesAllocated; + } + else + { + size_t uiEndoffset; + + ui32PageOffset = uiOffset >> uiLog2AllocPageSize; + uiMapOffset = uiOffset - (ui32PageOffset << uiLog2AllocPageSize); + uiEndoffset = uiOffset + uiSize - 1; + /* Add one as we want the count, not the offset */ + /* Page count = amount of device pages (note uiLog2AllocPageSize being used) */ + ui32PageCount = (uiEndoffset >> uiLog2AllocPageSize) + 1; + ui32PageCount -= ui32PageOffset; + + /* The OS page count to be mapped might be different if the + * OS page size is lesser than the device page size */ + ui32PageCount <<= uiPageSizeDiff; + } + + switch (PVRSRV_CPU_CACHE_MODE(psOSPageArrayData->ui32CPUCacheFlags)) + { + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: + prot = pgprot_noncached(prot); + break; + + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: + prot = pgprot_writecombine(prot); + break; + + case PVRSRV_MEMALLOCFLAG_CPU_CACHED: + break; + + default: + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e0; + } + + if (uiPageSizeDiff) + { + /* Each device page can be broken down into ui32SubPageCount OS pages */ + IMG_UINT32 ui32SubPageCount = 1 << uiPageSizeDiff; + IMG_UINT32 i; + struct page **psPage = &psOSPageArrayData->pagearray[ui32PageOffset]; + + /* Allocate enough memory for the OS page pointers for this mapping */ + pagearray = OSAllocMem(ui32PageCount * sizeof(pagearray[0])); + + if (pagearray == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e0; + } + + /* construct array that holds the page pointers that constitute the requested + * mapping */ + for (i = 0; i < ui32PageCount; i++) + { + IMG_UINT32 ui32OSPageArrayIndex = i / ui32SubPageCount; + IMG_UINT32 ui32OSPageArrayOffset = i % ui32SubPageCount; + + /* + * The driver only stores OS page pointers for the first OS page + * within each device page (psPage[ui32OSPageArrayIndex]). + * Get the next OS page structure at device page granularity, + * then calculate OS page pointers for all the other pages. + */ + pagearray[i] = psPage[ui32OSPageArrayIndex] + ui32OSPageArrayOffset; + } + } + else + { + pagearray = &psOSPageArrayData->pagearray[ui32PageOffset]; + } + + psData = OSAllocMem(sizeof(*psData)); + if (psData == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e1; + } + + pvAddress = pvr_vmap(pagearray, ui32PageCount, VM_READ | VM_WRITE, prot); + if (pvAddress == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e2; + } + + *ppvKernelAddressOut = pvAddress + uiMapOffset; + psData->pvBase = pvAddress; + psData->ui32PageCount = ui32PageCount; + psData->PageProps = prot; + *phHandleOut = psData; + + if (uiPageSizeDiff) + { + OSFreeMem(pagearray); + } + + return PVRSRV_OK; + + /* + error exit paths follow + */ +e2: + OSFreeMem(psData); +e1: + if (uiPageSizeDiff) + { + OSFreeMem(pagearray); + } +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static void PMRReleaseKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv, + IMG_HANDLE hHandle) +{ + PMR_OSPAGEARRAY_KERNMAP_DATA *psData = hHandle; + PVR_UNREFERENCED_PARAMETER(pvPriv); + + pvr_vunmap(psData->pvBase, psData->ui32PageCount, psData->PageProps); + OSFreeMem(psData); +} + +static +PVRSRV_ERROR PMRUnpinOSMem(PMR_IMPL_PRIVDATA pPriv) +{ + PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Lock down the pool and add the array to the unpin list */ + _PagePoolLock(); + + /* Check current state */ + PVR_ASSERT(BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED) == IMG_FALSE); + PVR_ASSERT(BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_ONDEMAND) == IMG_FALSE); + + eError = _AddUnpinListEntryUnlocked(psOSPageArrayData); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unable to add allocation to unpinned list (%d).", + __func__, + eError)); + + goto e_exit; + } + + /* Set the Unpinned bit */ + BIT_SET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED); + +e_exit: + _PagePoolUnlock(); + return eError; +} + +static +PVRSRV_ERROR PMRPinOSMem(PMR_IMPL_PRIVDATA pPriv, + PMR_MAPPING_TABLE *psMappingTable) +{ + PVRSRV_ERROR eError; + PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv; + IMG_UINT32 *pui32MapTable = NULL; + IMG_UINT32 i, j = 0, ui32Temp = 0; + + _PagePoolLock(); + + /* Check current state */ + PVR_ASSERT(BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED)); + + /* Clear unpinned bit */ + BIT_UNSET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED); + + /* If there are still pages in the array remove entries from the pool */ + if (psOSPageArrayData->iNumOSPagesAllocated != 0) + { + _RemoveUnpinListEntryUnlocked(psOSPageArrayData); + _PagePoolUnlock(); + + eError = PVRSRV_OK; + goto e_exit_mapalloc_failure; + } + _PagePoolUnlock(); + + /* If pages were reclaimed we allocate new ones and + * return PVRSRV_ERROR_PMR_NEW_MEMORY */ + if (psMappingTable->ui32NumVirtChunks == 1) + { + eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumOSPages); + } + else + { + pui32MapTable = (IMG_UINT32 *)OSAllocMem(sizeof(*pui32MapTable) * psMappingTable->ui32NumPhysChunks); + if (NULL == pui32MapTable) + { + eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; + PVR_DPF((PVR_DBG_ERROR, + "%s: Unable to Alloc Map Table.", + __func__)); + goto e_exit_mapalloc_failure; + } + + for (i = 0, j = 0; i < psMappingTable->ui32NumVirtChunks; i++) + { + ui32Temp = psMappingTable->aui32Translation[i]; + if (TRANSLATION_INVALID != ui32Temp) + { + pui32MapTable[j++] = ui32Temp; + } + } + eError = _AllocOSPages(psOSPageArrayData, pui32MapTable, psMappingTable->ui32NumPhysChunks); + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unable to get new pages for unpinned allocation.", + __func__)); + + eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; + goto e_exit; + } + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Allocating new pages for unpinned allocation. " + "Old content is lost!", + __func__)); + + eError = PVRSRV_ERROR_PMR_NEW_MEMORY; + +e_exit: + OSFreeMem(pui32MapTable); +e_exit_mapalloc_failure: + return eError; +} + +/*************************************************************************/ /*! +@Function PMRChangeSparseMemOSMem +@Description This function Changes the sparse mapping by allocating and + freeing of pages. It changes the GPU and CPU maps accordingly. +@Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ +static PVRSRV_ERROR +PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv, + const PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 uiFlags) +{ + PVRSRV_ERROR eError; + + PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappingTable(psPMR); + PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv; + struct page **psPageArray = psPMRPageArrayData->pagearray; + void **psDMAVirtArray = psPMRPageArrayData->dmavirtarray; + dma_addr_t *psDMAPhysArray = psPMRPageArrayData->dmaphysarray; + + struct page *psPage; + dma_addr_t psDMAPAddr; + void *pvDMAVAddr; + + IMG_UINT32 ui32AdtnlAllocPages = 0; /*uiLog2AllocPageSize - PAGE_SHIFT; + IMG_BOOL bCMA = BIT_ISSET(psPMRPageArrayData->ui32AllocFlags, FLAG_IS_CMA); + + + /* Check SPARSE flags and calculate pages to allocate and free */ + if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH)) + { + ui32CommonRequestCount = (ui32AllocPageCount > ui32FreePageCount) ? + ui32FreePageCount : ui32AllocPageCount; + + PDUMP_PANIC(SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported"); + } + + if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC)) + { + ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequestCount; + } + else + { + ui32AllocPageCount = 0; + } + + if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE)) + { + ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequestCount; + } + else + { + ui32FreePageCount = 0; + } + + if (0 == (ui32CommonRequestCount || ui32AdtnlAllocPages || ui32AdtnlFreePages)) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, + "%s: Missing parameters for number of pages to alloc/free", + __func__)); + return eError; + } + + /* The incoming request is classified into two operations independent of + * each other: alloc & free pages. + * These operations can be combined with two mapping operations as well + * which are GPU & CPU space mappings. + * + * From the alloc and free page requests, the net amount of pages to be + * allocated or freed is computed. Pages that were requested to be freed + * will be reused to fulfil alloc requests. + * + * The order of operations is: + * 1. Allocate new pages from the OS + * 2. Move the free pages from free request to alloc positions. + * 3. Free the rest of the pages not used for alloc + * + * Alloc parameters are validated at the time of allocation + * and any error will be handled then. */ + + /* Validate the free indices */ + if (ui32FreePageCount) + { + if (NULL != pai32FreeIndices){ + + for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++) + { + uiFreepgidx = pai32FreeIndices[ui32Loop]; + + if (uiFreepgidx > (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder)) + { + eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; + goto e0; + } + + if (NULL == psPageArray[uiFreepgidx]) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, + "%s: Trying to free non-allocated page", + __func__)); + goto e0; + } + } + } + else + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, + "%s: Given non-zero free count but missing indices array", + __func__)); + return eError; + } + } + + /* Validate the alloc indices */ + for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++) + { + uiAllocpgidx = pai32AllocIndices[ui32Loop]; + + if (uiAllocpgidx > (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder)) + { + eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; + goto e0; + } + + if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM)) + { + if ((NULL != psPageArray[uiAllocpgidx]) || + (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx])) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, + "%s: Trying to allocate already allocated page again", + __func__)); + goto e0; + } + } + else + { + if ((NULL == psPageArray[uiAllocpgidx]) || + (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]) ) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, + "%s: Unable to remap memory due to missing page", + __func__)); + goto e0; + } + } + } + + ui32Loop = 0; + + /* Allocate new pages from the OS */ + if (0 != ui32AdtnlAllocPages) + { + eError = _AllocOSPages(psPMRPageArrayData, pai32AllocIndices, ui32AdtnlAllocPages); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: New Addtl Allocation of pages failed", + __func__)); + goto e0; + } + + psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages; + /*Mark the corresponding pages of translation table as valid */ + for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++) + { + psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop]; + } + } + + + ui32Index = ui32Loop; + + /* Move the corresponding free pages to alloc request */ + for (ui32Loop = 0; ui32Loop < ui32CommonRequestCount; ui32Loop++, ui32Index++) + { + uiAllocpgidx = pai32AllocIndices[ui32Index]; + uiFreepgidx = pai32FreeIndices[ui32Loop]; + + psPage = psPageArray[uiAllocpgidx]; + psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx]; + + if (bCMA) + { + pvDMAVAddr = psDMAVirtArray[uiAllocpgidx]; + psDMAPAddr = psDMAPhysArray[uiAllocpgidx]; + psDMAVirtArray[uiAllocpgidx] = psDMAVirtArray[uiFreepgidx]; + psDMAPhysArray[uiAllocpgidx] = psDMAPhysArray[uiFreepgidx]; + } + + /* Is remap mem used in real world scenario? Should it be turned to a + * debug feature? The condition check needs to be out of loop, will be + * done at later point though after some analysis */ + if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM)) + { + psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID; + psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; + psPageArray[uiFreepgidx] = NULL; + if (bCMA) + { + psDMAVirtArray[uiFreepgidx] = NULL; + psDMAPhysArray[uiFreepgidx] = (dma_addr_t)0; + } + } + else + { + psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx; + psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; + psPageArray[uiFreepgidx] = psPage; + if (bCMA) + { + psDMAVirtArray[uiFreepgidx] = pvDMAVAddr; + psDMAPhysArray[uiFreepgidx] = psDMAPAddr; + } + } + } + + /* Free the additional free pages */ + if (0 != ui32AdtnlFreePages) + { + eError = _FreeOSPages(psPMRPageArrayData, + &pai32FreeIndices[ui32Loop], + ui32AdtnlFreePages); + if (eError != PVRSRV_OK) + { + goto e0; + } + psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages; + while (ui32Loop < ui32FreePageCount) + { + psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Loop]] = TRANSLATION_INVALID; + ui32Loop++; + } + } + + eError = PVRSRV_OK; + +e0: + return eError; +} + +/*************************************************************************/ /*! +@Function PMRChangeSparseMemCPUMapOSMem +@Description This function Changes CPU maps accordingly +@Return PVRSRV_ERROR failure code +*/ /**************************************************************************/ +static +PVRSRV_ERROR PMRChangeSparseMemCPUMapOSMem(PMR_IMPL_PRIVDATA pPriv, + const PMR *psPMR, + IMG_UINT64 sCpuVAddrBase, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices) +{ + struct page **psPageArray; + PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv; + IMG_CPU_PHYADDR sCPUPAddr; + + sCPUPAddr.uiAddr = 0; + psPageArray = psPMRPageArrayData->pagearray; + + return OSChangeSparseMemCPUAddrMap((void **)psPageArray, + sCpuVAddrBase, + sCPUPAddr, + ui32AllocPageCount, + pai32AllocIndices, + ui32FreePageCount, + pai32FreeIndices, + IMG_FALSE); +} + +static PMR_IMPL_FUNCTAB _sPMROSPFuncTab = { + .pfnLockPhysAddresses = &PMRLockSysPhysAddressesOSMem, + .pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesOSMem, + .pfnDevPhysAddr = &PMRSysPhysAddrOSMem, + .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataOSMem, + .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataOSMem, + .pfnReadBytes = NULL, + .pfnWriteBytes = NULL, + .pfnUnpinMem = &PMRUnpinOSMem, + .pfnPinMem = &PMRPinOSMem, + .pfnChangeSparseMem = &PMRChangeSparseMemOSMem, + .pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapOSMem, + .pfnFinalize = &PMRFinalizeOSMem, +}; + +/* Wrapper around OS page allocation. */ +static PVRSRV_ERROR +DoPageAlloc(PMR_OSPAGEARRAY_DATA *psPrivData, + IMG_UINT32 *puiAllocIndices, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32Log2AllocPageSize) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Do we fill the whole page array or just parts (sparse)? */ + if (ui32NumPhysChunks == ui32NumVirtChunks) + { + /* Allocate the physical pages */ + eError = _AllocOSPages(psPrivData, + NULL, + psPrivData->uiTotalNumOSPages >> + (ui32Log2AllocPageSize - PAGE_SHIFT)); + } + else if (ui32NumPhysChunks != 0) + { + /* Calculate the number of pages we want to allocate */ + IMG_UINT32 ui32PagesToAlloc = + (IMG_UINT32)((((ui32NumPhysChunks * uiChunkSize) - 1) >> ui32Log2AllocPageSize) + 1); + + /* Make sure calculation is correct */ + PVR_ASSERT(((PMR_SIZE_T) ui32PagesToAlloc << ui32Log2AllocPageSize) == + (ui32NumPhysChunks * uiChunkSize)); + + /* Allocate the physical pages */ + eError = _AllocOSPages(psPrivData, puiAllocIndices, + ui32PagesToAlloc); + } + + return eError; +} + +static void _EncodeAllocationFlags(IMG_UINT32 uiLog2AllocPageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + IMG_UINT32* ui32AllocFlags) +{ + + /* + * Use CMA framework if order is greater than OS page size; please note + * that OSMMapPMRGeneric() has the same expectation as well. + */ + /* IsCMA? */ + if (uiLog2AllocPageSize > PAGE_SHIFT) + { + BIT_SET(*ui32AllocFlags, FLAG_IS_CMA); + } + + /* OnDemand? */ + if (PVRSRV_CHECK_ON_DEMAND(uiFlags)) + { + BIT_SET(*ui32AllocFlags, FLAG_ONDEMAND); + } + + /* Zero? */ + if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags)) + { + BIT_SET(*ui32AllocFlags, FLAG_ZERO); + } + + /* Poison on alloc? */ + if (PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags)) + { + BIT_SET(*ui32AllocFlags, FLAG_POISON_ON_ALLOC); + } + + /* Poison on free? */ + if (PVRSRV_CHECK_POISON_ON_FREE(uiFlags)) + { + BIT_SET(*ui32AllocFlags, FLAG_POISON_ON_FREE); + } + + /* Indicate whether this is an allocation with default caching attribute (i.e cached) or not */ + if (PVRSRV_CHECK_CPU_UNCACHED(uiFlags) || + PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags)) + { + BIT_SET(*ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE); + } + +} + +PVRSRV_ERROR +PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap, + CONNECTION_DATA *psConnection, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *puiAllocIndices, + IMG_UINT32 uiLog2AllocPageSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + IMG_PID uiPid, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2; + PMR *psPMR; + struct _PMR_OSPAGEARRAY_DATA_ *psPrivData; + PMR_FLAGS_T uiPMRFlags; + IMG_UINT32 ui32CPUCacheFlags; + IMG_UINT32 ui32AllocFlags = 0; + PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + /* + * The host driver (but not guest) can still use this factory for firmware + * allocations + */ + if (PVRSRV_VZ_MODE_IS(GUEST) && PVRSRV_CHECK_FW_MAIN(uiFlags)) + { + PVR_ASSERT(0); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto errorOnParam; + } + + /* Select correct caching mode */ + eError = DevmemCPUCacheMode(psDevNode, uiFlags, &ui32CPUCacheFlags); + if (eError != PVRSRV_OK) + { + goto errorOnParam; + } + + if (PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags)) + { + ui32CPUCacheFlags |= PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN; + } + + _EncodeAllocationFlags(uiLog2AllocPageSize, uiFlags, &ui32AllocFlags); + + +#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) + /* Overwrite flags and always zero pages that could go back to UM */ + BIT_SET(ui32AllocFlags, FLAG_ZERO); + BIT_UNSET(ui32AllocFlags, FLAG_POISON_ON_ALLOC); +#endif + + /* Physical allocation alignment is generally not supported except under + very restrictive conditions, also there is a maximum alignment value + which must not exceed the largest device page-size. If these are not + met then fail the aligned-requested allocation */ + if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA)) + { + IMG_UINT32 uiAlign = 1 << uiLog2AllocPageSize; + if (uiAlign > uiSize || uiAlign > (1 << PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid PA alignment: size 0x%llx, align 0x%x", + __func__, uiSize, uiAlign)); + eError = PVRSRV_ERROR_INVALID_ALIGNMENT; + goto errorOnParam; + } + PVR_ASSERT(uiLog2AllocPageSize > PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ); + } + + /* Create Array structure that hold the physical pages */ + eError = _AllocOSPageArray(psDevNode, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + uiLog2AllocPageSize, + ui32AllocFlags, + ui32CPUCacheFlags, + uiPid, + &psPrivData); + if (eError != PVRSRV_OK) + { + goto errorOnAllocPageArray; + } + + if (!BIT_ISSET(ui32AllocFlags, FLAG_ONDEMAND)) + { + eError = DoPageAlloc(psPrivData, puiAllocIndices, ui32NumPhysChunks, + ui32NumVirtChunks, uiChunkSize, uiLog2AllocPageSize); + if (eError != PVRSRV_OK) + { + goto errorOnAllocPages; + } + } + + /* + * In this instance, we simply pass flags straight through. + * + * Generically, uiFlags can include things that control the PMR factory, but + * we don't need any such thing (at the time of writing!), and our caller + * specifies all PMR flags so we don't need to meddle with what was given to + * us. + */ + uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); + + /* + * Check no significant bits were lost in cast due to different bit widths + * for flags + */ + PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); + + if (BIT_ISSET(ui32AllocFlags, FLAG_ONDEMAND)) + { + PDUMPCOMMENT("Deferred Allocation PMR (UMA)"); + } + + eError = PMRCreatePMR(psPhysHeap, + uiSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + puiAllocIndices, + uiLog2AllocPageSize, + uiPMRFlags, + pszAnnotation, + &_sPMROSPFuncTab, + psPrivData, + PMR_TYPE_OSMEM, + &psPMR, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + goto errorOnCreate; + } + + *ppsPMRPtr = psPMR; + + return PVRSRV_OK; + +errorOnCreate: + if (!BIT_ISSET(ui32AllocFlags, FLAG_ONDEMAND)) + { + eError2 = _FreeOSPages(psPrivData, NULL, 0); + PVR_ASSERT(eError2 == PVRSRV_OK); + } + +errorOnAllocPages: + eError2 = _FreeOSPagesArray(psPrivData); + PVR_ASSERT(eError2 == PVRSRV_OK); + +errorOnAllocPageArray: +errorOnParam: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/physmem_osmem_linux.h b/drivers/gpu/drm/phytium/octopus/physmem_osmem_linux.h new file mode 100644 index 000000000000..4ec1ff45619b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/physmem_osmem_linux.h @@ -0,0 +1,49 @@ +/*************************************************************************/ /*! +@File +@Title Linux OS physmem implementation +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PHYSMEM_OSMEM_LINUX_H +#define PHYSMEM_OSMEM_LINUX_H + +void LinuxInitPhysmem(void); +void LinuxDeinitPhysmem(void); + +#endif /* PHYSMEM_OSMEM_LINUX_H */ diff --git a/drivers/gpu/drm/phytium/octopus/physmem_test.c b/drivers/gpu/drm/phytium/octopus/physmem_test.c new file mode 100644 index 000000000000..2c85524da9bc --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/physmem_test.c @@ -0,0 +1,710 @@ +/*************************************************************************/ /*! +@Title Physmem_test +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Single entry point for testing of page factories +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" +#include "physmem_test.h" +#include "device.h" +#include "syscommon.h" +#include "pmr.h" +#include "osfunc.h" +#include "physmem.h" +#include "physmem_osmem.h" +#include "physmem_lma.h" +#include "pvrsrv.h" + +#define PHYSMEM_TEST_PAGES 2 /* Mem test pages */ +#define PHYSMEM_TEST_PASSES_MAX 1000 /* Limit number of passes to some reasonable value */ + + +/* Test patterns for mem test */ + +static const IMG_UINT64 gui64Patterns[] = { + 0, + 0xffffffffffffffffULL, + 0x5555555555555555ULL, + 0xaaaaaaaaaaaaaaaaULL, + 0x1111111111111111ULL, + 0x2222222222222222ULL, + 0x4444444444444444ULL, + 0x8888888888888888ULL, + 0x3333333333333333ULL, + 0x6666666666666666ULL, + 0x9999999999999999ULL, + 0xccccccccccccccccULL, + 0x7777777777777777ULL, + 0xbbbbbbbbbbbbbbbbULL, + 0xddddddddddddddddULL, + 0xeeeeeeeeeeeeeeeeULL, + 0x7a6c7258554e494cULL, +}; + +static const IMG_UINT32 gui32Patterns[] = { + 0, + 0xffffffffU, + 0x55555555U, + 0xaaaaaaaaU, + 0x11111111U, + 0x22222222U, + 0x44444444U, + 0x88888888U, + 0x33333333U, + 0x66666666U, + 0x99999999U, + 0xccccccccU, + 0x77777777U, + 0xbbbbbbbbU, + 0xddddddddU, + 0xeeeeeeeeU, + 0x7a6c725cU, +}; + +static const IMG_UINT16 gui16Patterns[] = { + 0, + 0xffffU, + 0x5555U, + 0xaaaaU, + 0x1111U, + 0x2222U, + 0x4444U, + 0x8888U, + 0x3333U, + 0x6666U, + 0x9999U, + 0xccccU, + 0x7777U, + 0xbbbbU, + 0xddddU, + 0xeeeeU, + 0x7a6cU, +}; + +static const IMG_UINT8 gui8Patterns[] = { + 0, + 0xffU, + 0x55U, + 0xaaU, + 0x11U, + 0x22U, + 0x44U, + 0x88U, + 0x33U, + 0x66U, + 0x99U, + 0xccU, + 0x77U, + 0xbbU, + 0xddU, + 0xeeU, + 0x6cU, +}; + + +/* Following function does minimal required initialisation for mem test using dummy device node */ +static PVRSRV_ERROR +PhysMemTestInit(PVRSRV_DEVICE_NODE **ppsDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_ERROR eError; + + /* Dummy device node */ + psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode)); + PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "OSAllocZMem"); + + psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT; + psDeviceNode->psDevConfig = psDevConfig; + psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON; + + /* Initialise Phys mem heaps */ + eError = PVRSRVPhysMemHeapsInit(psDeviceNode, psDevConfig); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPhysMemHeapsInit", ErrorSysDevDeInit); + + psDeviceNode->sDevMMUPxSetup.uiMMUPxLog2AllocGran = OSGetPageShift(); + + *ppsDeviceNode = psDeviceNode; + + return PVRSRV_OK; + +ErrorSysDevDeInit: + psDevConfig->psDevNode = NULL; + OSFreeMem(psDeviceNode); + return eError; +} + +/* Undo initialisation done for mem test */ +static void +PhysMemTestDeInit(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + /* Deinitialise Phys mem heaps */ + PVRSRVPhysMemHeapsDeinit(psDeviceNode); + + OSFreeMem(psDeviceNode); +} + +/* Test for PMR factory validation */ +static PVRSRV_ERROR +PMRValidationTest(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags) +{ + PVRSRV_ERROR eError, eError1; + IMG_UINT32 i = 0, j = 0, ui32Index = 0; + IMG_UINT32 *pui32MappingTable = NULL; + PMR *psPMR = NULL; + IMG_BOOL *pbValid; + IMG_DEV_PHYADDR *apsDevPAddr; + IMG_UINT32 ui32NumOfPages = 10, ui32NumOfPhysPages = 5; + size_t uiMappedSize, uiPageSize; + IMG_UINT8 *pcWriteBuffer, *pcReadBuffer; + IMG_HANDLE hPrivData = NULL; + void *pvKernAddr = NULL; + + uiPageSize = OSGetPageSize(); + + /* Allocate OS memory for PMR page list */ + apsDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR)); + PVR_LOG_RETURN_IF_NOMEM(apsDevPAddr, "OSAllocMem"); + + /* Allocate OS memory for PMR page state */ + pbValid = OSAllocMem(ui32NumOfPages * sizeof(IMG_BOOL)); + PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, ErrorFreePMRPageListMem); + OSCachedMemSet(pbValid, 0, ui32NumOfPages * sizeof(IMG_BOOL)); + + /* Allocate OS memory for write buffer */ + pcWriteBuffer = OSAllocMem(uiPageSize); + PVR_LOG_GOTO_IF_NOMEM(pcWriteBuffer, eError, ErrorFreePMRPageStateMem); + OSCachedMemSet(pcWriteBuffer, 0xF, uiPageSize); + + /* Allocate OS memory for read buffer */ + pcReadBuffer = OSAllocMem(uiPageSize); + PVR_LOG_GOTO_IF_NOMEM(pcReadBuffer, eError, ErrorFreeWriteBuffer); + + /* Allocate OS memory for mapping table */ + pui32MappingTable = (IMG_UINT32 *)OSAllocMem(ui32NumOfPhysPages * sizeof(*pui32MappingTable)); + PVR_LOG_GOTO_IF_NOMEM(pui32MappingTable, eError, ErrorFreeReadBuffer); + + /* Pages having even index will have physical backing in PMR */ + for (ui32Index=0; ui32Index < ui32NumOfPages; ui32Index+=2) + { + pui32MappingTable[i++] = ui32Index; + } + + /* Allocate Sparse PMR with SPARSE | READ | WRITE | UNCACHED attributes */ + uiFlags |= PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \ + PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED; + + /* Allocate a sparse PMR from given physical heap - CPU/GPU/FW */ + eError = PhysmemNewRamBackedPMR(NULL, + psDeviceNode, + ui32NumOfPages * uiPageSize, + uiPageSize, + ui32NumOfPhysPages, + ui32NumOfPages, + pui32MappingTable, + OSGetPageShift(), + uiFlags, + sizeof("PMR ValidationTest"), + "PMR ValidationTest", + OSGetCurrentClientProcessIDKM(), + &psPMR, + PDUMP_NONE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to allocate a PMR")); + goto ErrorFreeMappingTable; + } + + /* Check whether allocated PMR can be locked and obtain physical addresses + * of underlying memory pages. + */ + eError = PMRLockSysPhysAddresses(psPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to lock PMR")); + goto ErrorUnrefPMR; + } + + /* Get the Device physical addresses of the pages */ + eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), ui32NumOfPages, 0, apsDevPAddr, pbValid); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to map PMR pages into device physical addresses")); + goto ErrorUnlockPhysAddresses; + } + + /* Check whether device address of each physical page is OS PAGE_SIZE aligned */ + for (i = 0; i < ui32NumOfPages; i++) + { + if (pbValid[i]) + { + if ((apsDevPAddr[i].uiAddr & OSGetPageMask()) != 0) + { + PVR_DPF((PVR_DBG_ERROR, "Physical memory of PMR is not page aligned")); + eError = PVRSRV_ERROR_MEMORY_TEST_FAILED; + goto ErrorUnlockPhysAddresses; + } + } + } + + /* Acquire kernel virtual address of each physical page and write to it + * and then release it. + */ + for (i = 0; i < ui32NumOfPages; i++) + { + if (pbValid[i]) + { + eError = PMRAcquireSparseKernelMappingData(psPMR, (i * uiPageSize), uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR")); + goto ErrorUnlockPhysAddresses; + } + OSDeviceMemCopy(pvKernAddr, pcWriteBuffer, OSGetPageSize()); + + eError = PMRReleaseKernelMappingData(psPMR, hPrivData); + PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); + } + } + + /* Acquire kernel virtual address of each physical page and read + * from it and check where contents are intact. + */ + for (i = 0; i < ui32NumOfPages; i++) + { + if (pbValid[i]) + { + eError = PMRAcquireSparseKernelMappingData(psPMR, (i * uiPageSize), uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR")); + goto ErrorUnlockPhysAddresses; + } + OSCachedMemSet(pcReadBuffer, 0x0, uiPageSize); + OSDeviceMemCopy(pcReadBuffer, pvKernAddr, uiMappedSize); + + eError = PMRReleaseKernelMappingData(psPMR, hPrivData); + PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); + + for (j = 0; j < uiPageSize; j++) + { + if (pcReadBuffer[j] != pcWriteBuffer[j]) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Test failed. Got (0x%hhx), expected (0x%hhx)!", + __func__, pcReadBuffer[j], pcWriteBuffer[j])); + eError = PVRSRV_ERROR_MEMORY_TEST_FAILED; + goto ErrorUnlockPhysAddresses; + } + } + } + } + +ErrorUnlockPhysAddresses: + /* Unlock and Unref the PMR to destroy it */ + eError1 = PMRUnlockSysPhysAddresses(psPMR); + if (eError1 != PVRSRV_OK) + { + eError = (eError == PVRSRV_OK)? eError1 : eError; + PVR_DPF((PVR_DBG_ERROR, "Failed to unlock PMR")); + } + +ErrorUnrefPMR: + eError1 = PMRUnrefPMR(psPMR); + if (eError1 != PVRSRV_OK) + { + eError = (eError == PVRSRV_OK)? eError1 : eError; + PVR_DPF((PVR_DBG_ERROR, "Failed to free PMR")); + } +ErrorFreeMappingTable: + OSFreeMem(pui32MappingTable); +ErrorFreeReadBuffer: + OSFreeMem(pcReadBuffer); +ErrorFreeWriteBuffer: + OSFreeMem(pcWriteBuffer); +ErrorFreePMRPageStateMem: + OSFreeMem(pbValid); +ErrorFreePMRPageListMem: + OSFreeMem(apsDevPAddr); + + return eError; +} + +#define DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, Patterns, NumOfPatterns, Error, ptr, i) \ + for (i = 0; i < NumOfPatterns; i++) \ + { \ + /* Write pattern */ \ + for (ptr = StartAddr; ptr < EndAddr; ptr++) \ + { \ + *ptr = Patterns[i]; \ + } \ + \ + /* Read back and validate pattern */ \ + for (ptr = StartAddr; ptr < EndAddr ; ptr++) \ + { \ + if (*ptr != Patterns[i]) \ + { \ + Error = PVRSRV_ERROR_MEMORY_TEST_FAILED; \ + break; \ + } \ + } \ + \ + if (Error != PVRSRV_OK) \ + { \ + break; \ + } \ + } + +static PVRSRV_ERROR +TestPatternU8(void *pvKernAddr, size_t uiMappedSize) +{ + IMG_UINT8 *StartAddr = (IMG_UINT8 *) pvKernAddr; + IMG_UINT8 *EndAddr = ((IMG_UINT8 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT8)); + IMG_UINT8 *p; + IMG_UINT32 i; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT8)) == 0); + + DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui8Patterns, sizeof(gui8Patterns)/sizeof(IMG_UINT8), eError, p, i); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Test failed. Got (0x%hhx), expected (0x%hhx)!", + __func__, *p, gui8Patterns[i])); + } + + return eError; +} + + +static PVRSRV_ERROR +TestPatternU16(void *pvKernAddr, size_t uiMappedSize) +{ + IMG_UINT16 *StartAddr = (IMG_UINT16 *) pvKernAddr; + IMG_UINT16 *EndAddr = ((IMG_UINT16 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT16)); + IMG_UINT16 *p; + IMG_UINT32 i; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT16)) == 0); + + DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui16Patterns, sizeof(gui16Patterns)/sizeof(IMG_UINT16), eError, p, i); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Test failed. Got (0x%hx), expected (0x%hx)!", + __func__, *p, gui16Patterns[i])); + } + + return eError; +} + +static PVRSRV_ERROR +TestPatternU32(void *pvKernAddr, size_t uiMappedSize) +{ + IMG_UINT32 *StartAddr = (IMG_UINT32 *) pvKernAddr; + IMG_UINT32 *EndAddr = ((IMG_UINT32 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT32)); + IMG_UINT32 *p; + IMG_UINT32 i; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT32)) == 0); + + DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui32Patterns, sizeof(gui32Patterns)/sizeof(IMG_UINT32), eError, p, i); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Test failed. Got (0x%x), expected (0x%x)!", + __func__, *p, gui32Patterns[i])); + } + + return eError; +} + +static PVRSRV_ERROR +TestPatternU64(void *pvKernAddr, size_t uiMappedSize) +{ + IMG_UINT64 *StartAddr = (IMG_UINT64 *) pvKernAddr; + IMG_UINT64 *EndAddr = ((IMG_UINT64 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT64)); + IMG_UINT64 *p; + IMG_UINT32 i; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT64)) == 0); + + DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui64Patterns, sizeof(gui64Patterns)/sizeof(IMG_UINT64), eError, p, i); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Test failed. Got (0x%llx), expected (0x%llx)!", + __func__, *p, gui64Patterns[i])); + } + + return eError; +} + +static PVRSRV_ERROR +TestSplitCacheline(void *pvKernAddr, size_t uiMappedSize) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + size_t uiCacheLineSize; + size_t uiBlockSize; + size_t j; + IMG_UINT8 *pcWriteBuffer, *pcReadBuffer; + IMG_UINT8 *StartAddr = (IMG_UINT8 *) pvKernAddr; + IMG_UINT8 *EndAddr, *p; + + uiCacheLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); + + if (uiCacheLineSize > 0) + { + uiBlockSize = (uiCacheLineSize * 2)/3; /* split cacheline */ + + pcWriteBuffer = OSAllocMem(uiBlockSize); + PVR_LOG_RETURN_IF_NOMEM(pcWriteBuffer, "OSAllocMem"); + + /* Fill the write buffer with test data, 0xAB*/ + OSCachedMemSet(pcWriteBuffer, 0xAB, uiBlockSize); + + pcReadBuffer = OSAllocMem(uiBlockSize); + PVR_LOG_GOTO_IF_NOMEM(pcReadBuffer, eError, ErrorFreeWriteBuffer); + + /* Fit only complete blocks in uiMappedSize, ignore leftover bytes */ + EndAddr = StartAddr + (uiBlockSize * (uiMappedSize / uiBlockSize)); + + /* Write blocks into the memory */ + for (p = StartAddr; p < EndAddr; p += uiBlockSize) + { + OSCachedMemCopy(p, pcWriteBuffer, uiBlockSize); + } + + /* Read back blocks and check */ + for (p = StartAddr; p < EndAddr; p += uiBlockSize) + { + OSCachedMemCopy(pcReadBuffer, p, uiBlockSize); + + for (j = 0; j < uiBlockSize; j++) + { + if (pcReadBuffer[j] != pcWriteBuffer[j]) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Test failed. Got (0x%hhx), expected (0x%hhx)!", __func__, pcReadBuffer[j], pcWriteBuffer[j])); + eError = PVRSRV_ERROR_MEMORY_TEST_FAILED; + goto ErrorMemTestFailed; + } + } + } + +ErrorMemTestFailed: + OSFreeMem(pcReadBuffer); +ErrorFreeWriteBuffer: + OSFreeMem(pcWriteBuffer); + } + + return eError; +} + +/* Memory test - writes and reads back different patterns to memory and validate the same */ +static PVRSRV_ERROR +MemTestPatterns(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32MappingTable = 0; + PMR *psPMR = NULL; + size_t uiMappedSize, uiPageSize; + IMG_HANDLE hPrivData = NULL; + void *pvKernAddr = NULL; + + uiPageSize = OSGetPageSize(); + + /* Allocate PMR with READ | WRITE | WRITE_COMBINE attributes */ + uiFlags |= PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; + + /*Allocate a PMR from given physical heap */ + eError = PhysmemNewRamBackedPMR(NULL, + psDeviceNode, + uiPageSize * PHYSMEM_TEST_PAGES, + uiPageSize * PHYSMEM_TEST_PAGES, + 1, + 1, + &ui32MappingTable, + OSGetPageShift(), + uiFlags, + sizeof("PMR PhysMemTest"), + "PMR PhysMemTest", + OSGetCurrentClientProcessIDKM(), + &psPMR, + PDUMP_NONE); + PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemNewRamBackedPMR"); + + /* Check whether allocated PMR can be locked and obtain physical + * addresses of underlying memory pages. + */ + eError = PMRLockSysPhysAddresses(psPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddresses", ErrorUnrefPMR); + + /* Map the physical page(s) into kernel space, acquire kernel mapping + * for PMR. + */ + eError = PMRAcquireKernelMappingData(psPMR, 0, uiPageSize * PHYSMEM_TEST_PAGES, &pvKernAddr, &uiMappedSize, &hPrivData); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", ErrorUnlockPhysAddresses); + + PVR_ASSERT((uiPageSize * PHYSMEM_TEST_PAGES) == uiMappedSize); + + /* Test various patterns */ + eError = TestPatternU64(pvKernAddr, uiMappedSize); + if (eError != PVRSRV_OK) + { + goto ErrorReleaseKernelMappingData; + } + + eError = TestPatternU32(pvKernAddr, uiMappedSize); + if (eError != PVRSRV_OK) + { + goto ErrorReleaseKernelMappingData; + } + + eError = TestPatternU16(pvKernAddr, uiMappedSize); + if (eError != PVRSRV_OK) + { + goto ErrorReleaseKernelMappingData; + } + + eError = TestPatternU8(pvKernAddr, uiMappedSize); + if (eError != PVRSRV_OK) + { + goto ErrorReleaseKernelMappingData; + } + + /* Test split cachelines */ + eError = TestSplitCacheline(pvKernAddr, uiMappedSize); + +ErrorReleaseKernelMappingData: + (void) PMRReleaseKernelMappingData(psPMR, hPrivData); + +ErrorUnlockPhysAddresses: + /* Unlock and Unref the PMR to destroy it, ignore returned value */ + (void) PMRUnlockSysPhysAddresses(psPMR); +ErrorUnrefPMR: + (void) PMRUnrefPMR(psPMR); + + return eError; +} + +static PVRSRV_ERROR +PhysMemTestRun(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags, IMG_UINT32 ui32Passes) +{ + PVRSRV_ERROR eError; + IMG_UINT32 i; + + /* PMR validation test */ + eError = PMRValidationTest(psDeviceNode, uiFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PMR validation test failed!", + __func__)); + return eError; + } + + for (i = 0; i < ui32Passes; i++) + { + /* Mem test */ + eError = MemTestPatterns(psDeviceNode, uiFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: [Pass#%u] MemTestPatterns failed!", + __func__, i)); + break; + } + } + + return eError; +} + +PVRSRV_ERROR +PhysMemTest(void *pvDevConfig, IMG_UINT32 ui32MemTestPasses) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_DEVICE_CONFIG *psDevConfig = pvDevConfig; + PVRSRV_ERROR eError; + + /* validate memtest passes requested */ + ui32MemTestPasses = (ui32MemTestPasses > PHYSMEM_TEST_PASSES_MAX)? PHYSMEM_TEST_PASSES_MAX : ui32MemTestPasses; + + /* Do minimal initialisation before test */ + eError = PhysMemTestInit(&psDeviceNode, psDevConfig); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Test failed to initialize", __func__)); + return eError; + } + + /* GPU local mem */ + eError = PhysMemTestRun(psDeviceNode, 0, ui32MemTestPasses); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "GPU local memory test failed!")); + goto ErrorPhysMemTestDeinit; + } + + /* CPU local mem */ + eError = PhysMemTestRun(psDeviceNode, PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL), ui32MemTestPasses); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "CPU local memory test failed!")); + goto ErrorPhysMemTestDeinit; + } + + PVR_LOG(("PhysMemTest: Passed.")); + goto PhysMemTestPassed; + +ErrorPhysMemTestDeinit: + PVR_DPF((PVR_DBG_ERROR, "PhysMemTest: Failed.")); +PhysMemTestPassed: + PhysMemTestDeInit(psDeviceNode); + + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/physmem_test.h b/drivers/gpu/drm/phytium/octopus/physmem_test.h new file mode 100644 index 000000000000..4fea810030e1 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/physmem_test.h @@ -0,0 +1,51 @@ +/*************************************************************************/ /*! +@Title Physmem test header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for single entry point for testing of page factories +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SRVSRV_PHYSMEM_TEST_H +#define SRVSRV_PHYSMEM_TEST_H +/* + * PhysMemTest + */ +PVRSRV_ERROR +PhysMemTest(void *pvDevConfig, IMG_UINT32 ui32MemTestPasses); + +#endif /* SRVSRV_PHYSMEM_TEST_H */ diff --git a/drivers/gpu/drm/phytium/octopus/phytiumvr/buffer_attribs.h b/drivers/gpu/drm/phytium/octopus/phytiumvr/buffer_attribs.h new file mode 100644 index 000000000000..5d9ef9955fea --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/phytiumvr/buffer_attribs.h @@ -0,0 +1,147 @@ +/*************************************************************************/ /*! +@File +@Title 3D types for use by IMG APIs +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License MIT + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef POWERVR_BUFFER_ATTRIBS_H +#define POWERVR_BUFFER_ATTRIBS_H + +/** + * Memory layouts + * Defines how pixels are laid out within a surface. + */ +typedef enum +{ + IMG_MEMLAYOUT_STRIDED, /**< Resource is strided, one row at a time */ + IMG_MEMLAYOUT_TWIDDLED, /**< Resource is 2D twiddled to match HW */ + IMG_MEMLAYOUT_3DTWIDDLED, /**< Resource is 3D twiddled, classic style */ + IMG_MEMLAYOUT_TILED, /**< Resource is tiled, tiling config specified elsewhere. */ + IMG_MEMLAYOUT_PAGETILED, /**< Resource is pagetiled */ + IMG_MEMLAYOUT_INVNTWIDDLED, /**< Resource is 2D twiddled !N style */ +} IMG_MEMLAYOUT; + +/** + * Rotation types + */ +typedef enum +{ + IMG_ROTATION_0DEG = 0, + IMG_ROTATION_90DEG = 1, + IMG_ROTATION_180DEG = 2, + IMG_ROTATION_270DEG = 3, + IMG_ROTATION_FLIP_Y = 4, + + IMG_ROTATION_BAD = 255, +} IMG_ROTATION; + +/** + * Alpha types. + */ +typedef enum +{ + IMG_COLOURSPACE_FORMAT_UNKNOWN = 0x0 << 16, + IMG_COLOURSPACE_FORMAT_LINEAR = 0x1 << 16, + IMG_COLOURSPACE_FORMAT_SRGB = 0x2 << 16, + IMG_COLOURSPACE_FORMAT_SCRGB = 0x3 << 16, + IMG_COLOURSPACE_FORMAT_SCRGB_LINEAR = 0x4 << 16, + IMG_COLOURSPACE_FORMAT_DISPLAY_P3_LINEAR = 0x5 << 16, + IMG_COLOURSPACE_FORMAT_DISPLAY_P3 = 0x6 << 16, + IMG_COLOURSPACE_FORMAT_BT2020_PQ = 0x7 << 16, + IMG_COLOURSPACE_FORMAT_BT2020_LINEAR = 0x8 << 16, + IMG_COLOURSPACE_FORMAT_DISPLAY_P3_PASSTHROUGH = 0x9 << 16, + IMG_COLOURSPACE_FORMAT_MASK = 0xF << 16, +} IMG_COLOURSPACE_FORMAT; + +#define IS_FBCDC_LOSSY(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_TRUE : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_TRUE : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_TRUE : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8) ? IMG_TRUE : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_TRUE : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_TRUE : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8) ? IMG_TRUE : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_TRUE : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_TRUE : IMG_FALSE) + +#define IS_FBCDC_PACKED(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8) ? IMG_TRUE : IMG_FALSE) + +#define GET_FBCDC_BLOCK_TYPE(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : mode) + +#define FBCDC_MODE_ADD_PACKING(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_PACKED_8x8 : mode) + +#define FBCDC_MODE_REMOVE_PACKING(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : mode) + +#define FBCDC_MODE_ADD_LOSSY25(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2 : mode) + +#define FBCDC_MODE_ADD_LOSSY50(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2 : mode) + +#define FBCDC_MODE_ADD_LOSSY75(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2 : mode) + +#define FBCDC_MODE_REMOVE_LOSSY(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ + (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : mode) + +/** + * Types of framebuffer compression + */ +typedef enum +{ + IMG_FB_COMPRESSION_NONE, + IMG_FB_COMPRESSION_DIRECT_8x8, + IMG_FB_COMPRESSION_DIRECT_16x4, + IMG_FB_COMPRESSION_DIRECT_32x2, + IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8, + IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4, + IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2, + IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8, + IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8, + IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4, + IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2, + IMG_FB_COMPRESSION_DIRECT_PACKED_8x8, + IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4, + IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2, +} IMG_FB_COMPRESSION; + + +#endif /* POWERVR_BUFFER_ATTRIBS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/phytiumvr/img_drm_fourcc.h b/drivers/gpu/drm/phytium/octopus/phytiumvr/img_drm_fourcc.h new file mode 100644 index 000000000000..fe7350102836 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/phytiumvr/img_drm_fourcc.h @@ -0,0 +1,137 @@ +/*************************************************************************/ /*! +@File +@Title Wrapper around drm_fourcc.h +@Description FourCCs and DRM framebuffer modifiers that are not in the + Kernel's and libdrm's drm_fourcc.h can be added here. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License MIT + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef IMG_DRM_FOURCC_H +#define IMG_DRM_FOURCC_H + +#if defined(__KERNEL__) +#include +#else +/* + * Include types.h to workaround versions of libdrm older than 2.4.68 + * not including the correct headers. + */ +#include + +#include +#endif + +/* + * Don't get too inspired by this example :) + * ADF doesn't support DRM modifiers, so the memory layout had to be + * included in the fourcc name, but the proper way to specify information + * additional to pixel formats is to use DRM modifiers. + * + * See upstream drm_fourcc.h for the proper naming convention. + */ +#ifndef DRM_FORMAT_BGRA8888_DIRECT_16x4 +#define DRM_FORMAT_BGRA8888_DIRECT_16x4 fourcc_code('I', 'M', 'G', '0') +#endif + +#if !defined(__KERNEL__) +/* + * A definition for the same format was added in Linux kernel 5.2 in commit + * 88ab9c76d191ad8645b483f31e2b394b0f3e280e. As such, this definition has been + * deprecated and the DRM_FORMAT_ABGR16161616F kernel define should be used + * instead of this one. + */ +#define DRM_FORMAT_ABGR16_IMG_DEPRECATED fourcc_code('I', 'M', 'G', '1') +#endif + +/* + * Upstream does not have a packed 10 Bits Per Channel YVU format yet, + * so let`s make one up. + * Note: at the moment this format is not intended to be used with + * a framebuffer, so the kernels core DRM doesn`t need to know + * about this format. This means that the kernel doesn`t need + * to be patched. + */ +#if !defined(__KERNEL__) +#define DRM_FORMAT_YVU444_PACK10_IMG fourcc_code('I', 'M', 'G', '2') +#define DRM_FORMAT_YUV422_2PLANE_PACK10_IMG fourcc_code('I', 'M', 'G', '3') +#define DRM_FORMAT_YUV420_2PLANE_PACK10_IMG fourcc_code('I', 'M', 'G', '4') +#endif + +/* + * Value chosen in the middle of 255 pool to minimise the chance of hitting + * the same value potentially defined by other vendors in the drm_fourcc.h + */ +#define DRM_FORMAT_MOD_VENDOR_PVR 0x92 + +#ifndef DRM_FORMAT_MOD_VENDOR_NONE +#define DRM_FORMAT_MOD_VENDOR_NONE 0 +#endif + +#ifndef DRM_FORMAT_RESERVED +#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1) +#endif + +#define img_fourcc_mod_combine(uiModHi, uiModLo) \ + ((__u64) ((__u32) (uiModHi)) << 32 | (__u64) ((__u32) (uiModLo))) + +#define img_fourcc_mod_hi(ui64Mod) \ + ((__u32) ((__u64) (ui64Mod) >> 32)) + +#define img_fourcc_mod_lo(ui64Mod) \ + ((__u32) ((__u64) (ui64Mod)) & 0xffffffff) + +#ifndef fourcc_mod_code +#define fourcc_mod_code(vendor, val) \ + ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL)) +#endif + +#ifndef DRM_FORMAT_MOD_INVALID +#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED) +#endif + +#ifndef DRM_FORMAT_MOD_LINEAR +#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0) +#endif + +#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V7 fourcc_mod_code(PVR, 6) +#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V7 fourcc_mod_code(PVR, 12) + +#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V10 fourcc_mod_code(PVR, 21) +#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V10 fourcc_mod_code(PVR, 22) +#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V10 fourcc_mod_code(PVR, 23) + +#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V12 fourcc_mod_code(PVR, 15) +#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V12 fourcc_mod_code(PVR, 16) + +#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V13 fourcc_mod_code(PVR, 24) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY25_8x8_V13 fourcc_mod_code(PVR, 25) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_8x8_V13 fourcc_mod_code(PVR, 26) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_8x8_V13 fourcc_mod_code(PVR, 27) + +#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V13 fourcc_mod_code(PVR, 28) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY25_16x4_V13 fourcc_mod_code(PVR, 29) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_16x4_V13 fourcc_mod_code(PVR, 30) +#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_16x4_V13 fourcc_mod_code(PVR, 31) + +#endif /* IMG_DRM_FOURCC_H */ diff --git a/drivers/gpu/drm/phytium/octopus/phytiumvr/mem_types.h b/drivers/gpu/drm/phytium/octopus/phytiumvr/mem_types.h new file mode 100644 index 000000000000..0793db9b5ecf --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/phytiumvr/mem_types.h @@ -0,0 +1,64 @@ +/*************************************************************************/ /*! +@File +@Title Public types +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License MIT + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef POWERVR_TYPES_H +#define POWERVR_TYPES_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(_MSC_VER) + #include "msvc_types.h" +#elif defined(__linux__) && defined(__KERNEL__) + #include + #include +#else + #include + #define __iomem +#endif + +typedef void *IMG_CPU_VIRTADDR; + +/* device virtual address */ +typedef struct +{ + uint64_t uiAddr; +#define IMG_CAST_TO_DEVVADDR_UINT(var) (uint64_t)(var) + +} IMG_DEV_VIRTADDR; + +typedef uint64_t IMG_DEVMEM_SIZE_T; +typedef uint64_t IMG_DEVMEM_ALIGN_T; +typedef uint64_t IMG_DEVMEM_OFFSET_T; +typedef uint32_t IMG_DEVMEM_LOG2ALIGN_T; + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/gpu/drm/phytium/octopus/phytiumvr/pvrsrv_sync_ext.h b/drivers/gpu/drm/phytium/octopus/phytiumvr/pvrsrv_sync_ext.h new file mode 100644 index 000000000000..c081487ba6db --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/phytiumvr/pvrsrv_sync_ext.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@File +@Title Services external synchronisation interface header +@Description Defines synchronisation structures that are visible internally + and externally +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License MIT + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef POWERVR_SYNC_EXT_H +#define POWERVR_SYNC_EXT_H + +#if defined(__cplusplus) +extern "C" { +#endif + +/*! + * Number of sync prims still used internally in operations + */ +#define PVRSRV_MAX_SYNC_PRIMS 4 + +/*! + * Maximum number of dev var updates passed in a kick call + */ +#define PVRSRV_MAX_DEV_VARS 13 + +/*! + * Number of UFOs in operations + */ +#define PVRSRV_MAX_SYNCS (PVRSRV_MAX_SYNC_PRIMS + PVRSRV_MAX_DEV_VARS) + +/*! Implementation independent types for passing fence/timeline to Services. + */ +typedef int32_t PVRSRV_FENCE; +typedef int32_t PVRSRV_TIMELINE; + +/*! Maximum length for an annotation name string for fence sync model objects. + */ +#define PVRSRV_SYNC_NAME_LENGTH 32 + +/* Macros for API callers using the fence sync model + */ +#define PVRSRV_NO_TIMELINE ((PVRSRV_TIMELINE) -1) +#define PVRSRV_NO_FENCE ((PVRSRV_FENCE) -1) +#define PVRSRV_NO_FENCE_PTR NULL +#define PVRSRV_NO_TIMELINE_PTR NULL + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/gpu/drm/phytium/octopus/pmr.c b/drivers/gpu/drm/phytium/octopus/pmr.c new file mode 100644 index 000000000000..870ef3d65201 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pmr.c @@ -0,0 +1,3600 @@ +/*************************************************************************/ /*! +@File +@Title Physmem (PMR) abstraction +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + the "PMR" abstraction. A PMR (Physical Memory Resource) + represents some unit of physical memory which is + allocated/freed/mapped/unmapped as an indivisible unit + (higher software levels provide an abstraction above that + to deal with dividing this down into smaller manageable units). + Importantly, this module knows nothing of virtual memory, or + of MMUs etc., with one excusable exception. We have the + concept of a "page size", which really means nothing in + physical memory, but represents a "contiguity quantum" such + that the higher level modules which map this memory are able + to verify that it matches the needs of the page size for the + virtual realm into which it is being mapped. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" + +#include "pdump.h" +#include "devicemem_server_utils.h" + +#include "osfunc.h" +#include "pdump_km.h" +#include "pdump_physmem.h" +#include "pmr_impl.h" +#include "pmr_os.h" +#include "pvrsrv.h" + +#include "allocmem.h" +#include "lock.h" +#include "uniq_key_splay_tree.h" + +#if defined(SUPPORT_SECURE_EXPORT) +#include "secure_export.h" +#include "ossecure_export.h" +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +#include "ri_server.h" +#endif + +/* ourselves */ +#include "pmr.h" + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) +#include "mmap_stats.h" +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#include "proc_stats.h" +#endif + +/* Memalloc flags can be converted into pmr, ra or psplay flags. + * Ensure flags types are same size. + */ +static_assert(sizeof(PVRSRV_MEMALLOCFLAGS_T) == sizeof(PMR_FLAGS_T), + "Mismatch memalloc and pmr flags type size."); +static_assert(sizeof(PVRSRV_MEMALLOCFLAGS_T) == sizeof(RA_FLAGS_T), + "Mismatch memalloc and ra flags type size."); +static_assert(sizeof(PVRSRV_MEMALLOCFLAGS_T) == sizeof(IMG_PSPLAY_FLAGS_T), + "Mismatch memalloc and psplay flags type size."); + +/* A "context" for the physical memory block resource allocator. + * + * Context is probably the wrong word. + * + * There is almost certainly only one of these, ever, in the system. + * But, let's keep the notion of a context anyway, "just-in-case". + */ +static struct _PMR_CTX_ +{ + /* For debugging, and PDump, etc., let's issue a forever incrementing + * serial number to each allocation. + */ + IMG_UINT64 uiNextSerialNum; + + /* For security, we only allow a PMR to be mapped if the caller knows + * its key. We can pseudo-randomly generate keys + */ + IMG_UINT64 uiNextKey; + + /* For debugging only, I guess: Number of live PMRs */ + IMG_UINT32 uiNumLivePMRs; + + /* Lock for this structure */ + POS_LOCK hLock; + + /* In order to seed the uiNextKey, we enforce initialisation at driver + * load time. Also, we can debug check at driver unload that the PMR + * count is zero. + */ + IMG_BOOL bModuleInitialised; +} _gsSingletonPMRContext = { 1, 0, 0, NULL, IMG_FALSE }; + + +/* A PMR. One per physical allocation. May be "shared". + * + * "shared" is ambiguous. We need to be careful with terminology. + * There are two ways in which a PMR may be "shared" and we need to be sure + * that we are clear which we mean. + * + * i) multiple small allocations living together inside one PMR. + * + * ii) one single allocation filling a PMR but mapped into multiple memory + * contexts. + * + * This is more important further up the stack - at this level, all we care is + * that the PMR is being referenced multiple times. + */ +struct _PMR_ +{ + /* This object is strictly refcounted. References include: + * - mapping + * - live handles (to this object) + * - live export handles + * (thus it is normal for allocated and exported memory to have a refcount of 3) + * The object is destroyed when and only when the refcount reaches 0 + */ + + /* Physical address translation (device <> cpu) is done on a per device + * basis which means we need the physical heap info + */ + PHYS_HEAP *psPhysHeap; + + ATOMIC_T iRefCount; + + /* Lock count - this is the number of times PMRLockSysPhysAddresses() + * has been called, less the number of PMRUnlockSysPhysAddresses() + * calls. This is arguably here for debug reasons only, as the refcount + * is already incremented as a matter of course. + * Really, this just allows us to trap protocol errors: i.e. calling + * PMRSysPhysAddr(), without a lock, or calling + * PMRUnlockSysPhysAddresses() too many or too few times. + */ + ATOMIC_T iLockCount; + + /* Lock for this structure */ + POS_LOCK hLock; + + /* Incrementing serial number to each allocation. */ + IMG_UINT64 uiSerialNum; + + /* For security, we only allow a PMR to be mapped if the caller knows + * its key. We can pseudo-randomly generate keys + */ + PMR_PASSWORD_T uiKey; + + /* Callbacks for per-flavour functions */ + const PMR_IMPL_FUNCTAB *psFuncTab; + + /* Data associated with the "subtype" */ + PMR_IMPL_PRIVDATA pvFlavourData; + + /* What kind of PMR do we have? */ + PMR_IMPL_TYPE eFlavour; + + /* And for pdump */ + const IMG_CHAR *pszPDumpDefaultMemspaceName; + + /* Allocation annotation */ + IMG_CHAR szAnnotation[DEVMEM_ANNOTATION_MAX_LEN]; + +#if defined(PDUMP) + + IMG_HANDLE hPDumpAllocHandle; + + IMG_UINT32 uiNumPDumpBlocks; +#endif + + /* Logical size of allocation. "logical", because a PMR can represent + * memory that will never physically exist. This is the amount of + * virtual space that the PMR would consume when it's mapped into a + * virtual allocation. + */ + PMR_SIZE_T uiLogicalSize; + + /* Mapping table for the allocation. + * PMR's can be sparse in which case not all the "logic" addresses in + * it are valid. We need to know which addresses are and aren't valid + * when mapping or reading the PMR. + * The mapping table translates "logical" offsets into physical offsets + * which is what we always pass to the PMR factory (so it doesn't have + * to be concerned about sparseness issues) + */ + PMR_MAPPING_TABLE *psMappingTable; + + /* Indicates whether this PMR has been allocated as sparse. + * The condition for this variable to be set at allocation time is: + * (numVirtChunks != numPhysChunks) || (numVirtChunks > 1) + */ + IMG_BOOL bSparseAlloc; + + /* Indicates whether this PMR has been unpinned. + * By default, all PMRs are pinned at creation. + */ + IMG_BOOL bIsUnpinned; + + /* + * Flag that conveys mutability of the PMR: + * - TRUE indicates the PMR is immutable (no more memory changes) + * - FALSE means the memory layout associated with the PMR is mutable + * + * A PMR is always mutable by default but is marked immutable on the + * first export for the rest of its life. + * + * Also, any PMRs that track the same memory through imports are + * marked immutable as well. + */ + IMG_BOOL bNoLayoutChange; + + /* Minimum Physical Contiguity Guarantee. Might be called "page size", + * but that would be incorrect, as page size is something meaningful + * only in virtual realm. This contiguity guarantee provides an + * inequality that can be verified/asserted/whatever to ensure that + * this PMR conforms to the page size requirement of the place the PMR + * gets mapped. (May be used to select an appropriate heap in variable + * page size systems) + * + * The absolutely necessary condition is this: + * + * device MMU page size <= actual physical contiguity. + * + * We go one step further in order to be able to provide an early + * warning / early compatibility check and say this: + * + * device MMU page size <= + * 2**(uiLog2ContiguityGuarantee) <= + * actual physical contiguity. + * + * In this way, it is possible to make the page table reservation + * in the device MMU without even knowing the granularity of the + * physical memory (i.e. useful for being able to allocate virtual + * before physical) + */ + PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee; + + /* Flags. We store a copy of the "PMR flags" (usually a subset of the + * flags given at allocation time) and return them to any caller of + * PMR_Flags(). The intention of these flags is that the ones stored + * here are used to represent permissions, such that no one is able + * to map a PMR in a mode in which they are not allowed, e.g., + * writeable for a read-only PMR, etc. + */ + PMR_FLAGS_T uiFlags; + + /* Do we really need this? + * For now we'll keep it, until we know we don't. + * NB: this is not the "memory context" in client terms - this is + * _purely_ the "PMR" context, of which there is almost certainly only + * ever one per system as a whole, but we'll keep the concept anyway, + * just-in-case. + */ + struct _PMR_CTX_ *psContext; + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + /* Stored handle to PMR RI entry */ + void *hRIHandle; +#endif +}; + +/* Do we need a struct for the export handle? + * I'll use one for now, but if nothing goes in it, we'll lose it + */ +struct _PMR_EXPORT_ +{ + struct _PMR_ *psPMR; +}; + +struct _PMR_PAGELIST_ +{ + struct _PMR_ *psReferencePMR; +}; + +PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR) +{ + PPVRSRV_DEVICE_NODE psReturnedDeviceNode = NULL; + + PVR_ASSERT(psExportPMR != NULL); + if (psExportPMR) + { + PVR_ASSERT(psExportPMR->psPMR != NULL); + if (psExportPMR->psPMR) + { + PVR_ASSERT(OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0); + if (OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0) + { + psReturnedDeviceNode = PMR_DeviceNode(psExportPMR->psPMR); + } + } + } + + return psReturnedDeviceNode; +} + +static PVRSRV_ERROR +_PMRCreate(PMR_SIZE_T uiLogicalSize, + PMR_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee, + PMR_FLAGS_T uiFlags, + PMR **ppsPMR) +{ + void *pvPMRLinAddr; + PMR *psPMR; + PMR_MAPPING_TABLE *psMappingTable; + struct _PMR_CTX_ *psContext; + IMG_UINT32 i, ui32Temp = 0; + IMG_UINT32 ui32Remainder; + PVRSRV_ERROR eError; + IMG_BOOL bSparse = IMG_FALSE; + + psContext = &_gsSingletonPMRContext; + + /* Do we have a sparse allocation? */ + if ( (ui32NumVirtChunks != ui32NumPhysChunks) || + (ui32NumVirtChunks > 1) ) + { + bSparse = IMG_TRUE; + } + + /* Extra checks required for sparse PMRs */ + if (uiLogicalSize != uiChunkSize) + { + /* Check the logical size and chunk information agree with each other */ + if (uiLogicalSize != (uiChunkSize * ui32NumVirtChunks)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Bad mapping size (uiLogicalSize = 0x%llx, uiChunkSize = 0x%llx, ui32NumVirtChunks = %d)", + __func__, (unsigned long long)uiLogicalSize, (unsigned long long)uiChunkSize, ui32NumVirtChunks)); + return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE; + } + + /* Check that the chunk size is a multiple of the contiguity */ + OSDivide64(uiChunkSize, (1<< uiLog2ContiguityGuarantee), &ui32Remainder); + if (ui32Remainder) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Bad chunk size, must be a multiple of the contiguity " + "(uiChunkSize = 0x%llx, uiLog2ContiguityGuarantee = %u)", + __func__, + (unsigned long long) uiChunkSize, + uiLog2ContiguityGuarantee)); + return PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE; + } + } + + pvPMRLinAddr = OSAllocMem(sizeof(*psPMR) + sizeof(*psMappingTable) + sizeof(IMG_UINT32) * ui32NumVirtChunks); + PVR_RETURN_IF_NOMEM(pvPMRLinAddr); + + psPMR = (PMR *) pvPMRLinAddr; + psMappingTable = IMG_OFFSET_ADDR(pvPMRLinAddr, sizeof(*psPMR)); + + /* Setup the mapping table */ + psMappingTable->uiChunkSize = uiChunkSize; + psMappingTable->ui32NumVirtChunks = ui32NumVirtChunks; + psMappingTable->ui32NumPhysChunks = ui32NumPhysChunks; + OSCachedMemSet(&psMappingTable->aui32Translation[0], 0xFF, sizeof(psMappingTable->aui32Translation[0])* + ui32NumVirtChunks); + for (i=0; iaui32Translation[ui32Temp] = ui32Temp; + } + else + { + OSFreeMem(psPMR); + return PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY; + } + } + + eError = OSLockCreate(&psPMR->hLock); + if (eError != PVRSRV_OK) + { + OSFreeMem(psPMR); + return eError; + } + + /* Setup the PMR */ + OSAtomicWrite(&psPMR->iRefCount, 0); + + /* If allocation is not made on demand, it will be backed now and + * backing will not be removed until the PMR is destroyed, therefore + * we can initialise the iLockCount to 1 rather than 0. + */ + OSAtomicWrite(&psPMR->iLockCount, (PVRSRV_CHECK_ON_DEMAND(uiFlags) ? 0 : 1)); + + psPMR->psContext = psContext; + psPMR->uiLogicalSize = uiLogicalSize; + psPMR->uiLog2ContiguityGuarantee = uiLog2ContiguityGuarantee; + psPMR->uiFlags = uiFlags; + psPMR->psMappingTable = psMappingTable; + psPMR->bSparseAlloc = bSparse; + psPMR->bIsUnpinned = IMG_FALSE; + psPMR->bNoLayoutChange = IMG_FALSE; + psPMR->szAnnotation[0] = '\0'; + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + psPMR->hRIHandle = NULL; +#endif + + OSLockAcquire(psContext->hLock); + psPMR->uiKey = psContext->uiNextKey; + psPMR->uiSerialNum = psContext->uiNextSerialNum; + psContext->uiNextKey = (0x80200003 * psContext->uiNextKey) + ^ (0xf00f0081 * (uintptr_t)pvPMRLinAddr); + psContext->uiNextSerialNum++; + *ppsPMR = psPMR; + PVR_DPF((PVR_DBG_MESSAGE, "pmr.c: created PMR @0x%p", psPMR)); + /* Increment live PMR count */ + psContext->uiNumLivePMRs++; + OSLockRelease(psContext->hLock); + + return PVRSRV_OK; +} + +/* This function returns true if the PMR is in use and false otherwise. + * This function is not thread safe and hence the caller + * needs to ensure the thread safety by explicitly taking + * the lock on the PMR or through other means */ +IMG_BOOL PMRIsPMRLive(PMR *psPMR) +{ + return (OSAtomicRead(&psPMR->iRefCount) > 0); +} + +static IMG_UINT32 +_Ref(PMR *psPMR) +{ + PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) >= 0); + return OSAtomicIncrement(&psPMR->iRefCount); +} + +static IMG_UINT32 +_Unref(PMR *psPMR) +{ + PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) > 0); + return OSAtomicDecrement(&psPMR->iRefCount); +} + +static void +_UnrefAndMaybeDestroy(PMR *psPMR) +{ + PVRSRV_ERROR eError2; + struct _PMR_CTX_ *psCtx; + IMG_INT iRefCount; + + PVR_ASSERT(psPMR != NULL); + + /* Acquire PMR factory lock if provided */ + if (psPMR->psFuncTab->pfnGetPMRFactoryLock) + { + psPMR->psFuncTab->pfnGetPMRFactoryLock(); + } + + iRefCount = _Unref(psPMR); + + if (iRefCount == 0) + { + if (psPMR->psFuncTab->pfnFinalize != NULL) + { + eError2 = psPMR->psFuncTab->pfnFinalize(psPMR->pvFlavourData); + + /* PMR unref can be called asynchronously by the kernel or other + * third party modules (eg. display) which doesn't go through the + * usual services bridge. The same PMR can be referenced simultaneously + * in a different path that results in a race condition. + * Hence depending on the race condition, a factory may refuse to destroy + * the resource associated with this PMR if a reference on it was taken + * prior to unref. In that case the PMR factory function returns the error. + * + * When such an error is encountered, the factory needs to ensure the state + * associated with PMR is undisturbed. At this point we just bail out from + * freeing the PMR itself. The PMR handle will then be freed at a later point + * when the same PMR is unreferenced. + * */ + if (PVRSRV_ERROR_PMR_STILL_REFERENCED == eError2) + { + if (psPMR->psFuncTab->pfnReleasePMRFactoryLock) + { + psPMR->psFuncTab->pfnReleasePMRFactoryLock(); + } + return; + } + PVR_ASSERT (eError2 == PVRSRV_OK); /* can we do better? */ + } +#if defined(PDUMP) + PDumpPMRFreePMR(psPMR, + psPMR->uiLogicalSize, + (1 << psPMR->uiLog2ContiguityGuarantee), + psPMR->uiLog2ContiguityGuarantee, + psPMR->hPDumpAllocHandle); +#endif + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) + /* This PMR is about to be destroyed, update its mmap stats record (if present) + * to avoid dangling pointer. Additionally, this is required because mmap stats + * are identified by PMRs and a new PMR down the line "might" get the same address + * as the one we're about to free and we'd like 2 different entries in mmaps + * stats for such cases */ + MMapStatsRemovePMR(psPMR); +#endif + +#ifdef PVRSRV_NEED_PVR_ASSERT + /* If not backed on demand, iLockCount should be 1 otherwise it should be 0 */ + PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)); +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + { + PVRSRV_ERROR eError; + + /* Delete RI entry */ + if (psPMR->hRIHandle) + { + eError = RIDeletePMREntryKM (psPMR->hRIHandle); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: RIDeletePMREntryKM failed: %s", + __func__, + PVRSRVGetErrorString(eError))); + /* continue destroying the PMR */ + } + } + } +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + psCtx = psPMR->psContext; + + OSLockDestroy(psPMR->hLock); + + /* Release PMR factory lock acquired if any */ + if (psPMR->psFuncTab->pfnReleasePMRFactoryLock) + { + psPMR->psFuncTab->pfnReleasePMRFactoryLock(); + } + + OSFreeMem(psPMR); + + /* Decrement live PMR count. Probably only of interest for debugging */ + PVR_ASSERT(psCtx->uiNumLivePMRs > 0); + + OSLockAcquire(psCtx->hLock); + psCtx->uiNumLivePMRs--; + OSLockRelease(psCtx->hLock); + } + else + { + /* Release PMR factory lock acquired if any */ + if (psPMR->psFuncTab->pfnReleasePMRFactoryLock) + { + psPMR->psFuncTab->pfnReleasePMRFactoryLock(); + } + } +} + +static IMG_BOOL _PMRIsSparse(const PMR *psPMR) +{ + return psPMR->bSparseAlloc; +} + +PVRSRV_ERROR +PMRCreatePMR(PHYS_HEAP *psPhysHeap, + PMR_SIZE_T uiLogicalSize, + PMR_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee, + PMR_FLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + const PMR_IMPL_FUNCTAB *psFuncTab, + PMR_IMPL_PRIVDATA pvPrivData, + PMR_IMPL_TYPE eType, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PMR *psPMR = NULL; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pszAnnotation != NULL, "pszAnnotation"); + + eError = _PMRCreate(uiLogicalSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + uiLog2ContiguityGuarantee, + uiFlags, + &psPMR); + PVR_GOTO_IF_ERROR(eError, e0); + + psPMR->psPhysHeap = psPhysHeap; + psPMR->psFuncTab = psFuncTab; + psPMR->pszPDumpDefaultMemspaceName = PhysHeapPDumpMemspaceName(psPhysHeap); + psPMR->pvFlavourData = pvPrivData; + psPMR->eFlavour = eType; + OSAtomicWrite(&psPMR->iRefCount, 1); + + OSStringLCopy(psPMR->szAnnotation, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN); + +#if defined(PDUMP) + { + PMR_FLAGS_T uiFlags = psPMR->uiFlags; + IMG_BOOL bInitialise = IMG_FALSE; + IMG_UINT32 ui32InitValue = 0; + + if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags)) + { + bInitialise = IMG_TRUE; + } + else if (PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags)) + { + ui32InitValue = 0xDEADBEEF; + bInitialise = IMG_TRUE; + } + + PDumpPMRMallocPMR(psPMR, + (uiChunkSize * ui32NumVirtChunks), + 1ULL<hPDumpAllocHandle, + ui32PDumpFlags); + } +#endif + + *ppsPMRPtr = psPMR; + + return PVRSRV_OK; + + /* Error exit paths follow */ +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR PMRLockSysPhysAddressesNested(PMR *psPMR, + IMG_UINT32 ui32NestingLevel) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(psPMR != NULL); + + /* Note: taking this lock is not required to protect the PMR reference + * count, because the PMR reference count is atomic. Rather, taking + * the lock here guarantees that no caller will exit this function + * without the underlying physical addresses being locked. + */ + OSLockAcquireNested(psPMR->hLock, ui32NestingLevel); + /* We also count the locks as references, so that the PMR is not freed + * while someone is using a physical address. + * "lock" here simply means incrementing the refcount. It means the + * refcount is multipurpose, but that's okay. We only have to promise + * that physical addresses are valid after this point, and remain valid + * until the corresponding PMRUnlockSysPhysAddressesOSMem() + */ + _Ref(psPMR); + + /* Also count locks separately from other types of references, to + * allow for debug assertions + */ + + /* Only call callback if lockcount transitions from 0 to 1 (or 1 to 2 if not backed on demand) */ + if (OSAtomicIncrement(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 1 : 2)) + { + if (psPMR->psFuncTab->pfnLockPhysAddresses != NULL) + { + /* must always have lock and unlock in pairs! */ + PVR_ASSERT(psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL); + + eError = psPMR->psFuncTab->pfnLockPhysAddresses(psPMR->pvFlavourData); + + PVR_GOTO_IF_ERROR(eError, e1); + } + } + OSLockRelease(psPMR->hLock); + + return PVRSRV_OK; + +e1: + OSAtomicDecrement(&psPMR->iLockCount); + _Unref(psPMR); + PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) != 0); + OSLockRelease(psPMR->hLock); + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +PMRLockSysPhysAddresses(PMR *psPMR) +{ + return PMRLockSysPhysAddressesNested(psPMR, 0); +} + +PVRSRV_ERROR +PMRUnlockSysPhysAddresses(PMR *psPMR) +{ + return PMRUnlockSysPhysAddressesNested(psPMR, 2); +} + +PVRSRV_ERROR +PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(psPMR != NULL); + + /* Acquiring the lock here, as well as during the Lock operation ensures + * the lock count hitting zero and the unlocking of the phys addresses is + * an atomic operation + */ + OSLockAcquireNested(psPMR->hLock, ui32NestingLevel); + PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)); + + if (OSAtomicDecrement(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)) + { + if (psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL) + { + PVR_ASSERT(psPMR->psFuncTab->pfnLockPhysAddresses != NULL); + + eError = psPMR->psFuncTab->pfnUnlockPhysAddresses(psPMR->pvFlavourData); + /* must never fail */ + PVR_ASSERT(eError == PVRSRV_OK); + } + } + + OSLockRelease(psPMR->hLock); + + /* We also count the locks as references, so that the PMR is not + * freed while someone is using a physical address. + */ + _UnrefAndMaybeDestroy(psPMR); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT(psPMR != NULL); + + OSLockAcquire(psPMR->hLock); + /* Stop if we still have references on the PMR */ + if ( ( bDevMapped && (OSAtomicRead(&psPMR->iRefCount) > 2)) + || (!bDevMapped && (OSAtomicRead(&psPMR->iRefCount) > 1)) ) + { + OSLockRelease(psPMR->hLock); + PVR_DPF((PVR_DBG_ERROR, + "%s: PMR is still referenced %u times. " + "That means this PMR is probably exported or used somewhere else. " + "Allowed are 2 references if it is mapped to device, otherwise 1.", + __func__, + OSAtomicRead(&psPMR->iRefCount))); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_STILL_REFERENCED, e_exit); + } + OSLockRelease(psPMR->hLock); + + if (psPMR->psFuncTab->pfnUnpinMem != NULL) + { + eError = psPMR->psFuncTab->pfnUnpinMem(psPMR->pvFlavourData); + if (eError == PVRSRV_OK) + { + psPMR->bIsUnpinned = IMG_TRUE; + } + } + +e_exit: + return eError; +} + +PVRSRV_ERROR +PMRPinPMR(PMR *psPMR) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT(psPMR != NULL); + + if (psPMR->psFuncTab->pfnPinMem != NULL) + { + eError = psPMR->psFuncTab->pfnPinMem(psPMR->pvFlavourData, + psPMR->psMappingTable); + if (eError == PVRSRV_OK) + { + psPMR->bIsUnpinned = IMG_FALSE; + } + } + + return eError; +} + +PVRSRV_ERROR +PMRMakeLocalImportHandle(PMR *psPMR, + PMR **ppsPMR) +{ + PMRRefPMR(psPMR); + *ppsPMR = psPMR; + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMRUnmakeLocalImportHandle(PMR *psPMR) +{ + PMRUnrefPMR(psPMR); + return PVRSRV_OK; +} + +/* + Note: + We pass back the PMR as it was passed in as a different handle type + (DEVMEM_MEM_IMPORT) and it allows us to change the import structure + type if we should need to embed any meta data in it. + */ +PVRSRV_ERROR +PMRLocalImportPMR(PMR *psPMR, + PMR **ppsPMR, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign) +{ + _Ref(psPMR); + + /* Return the PMR */ + *ppsPMR = psPMR; + *puiSize = psPMR->uiLogicalSize; + *puiAlign = 1ULL << psPMR->uiLog2ContiguityGuarantee; + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMRGetUID(PMR *psPMR, + IMG_UINT64 *pui64UID) +{ + PVR_ASSERT(psPMR != NULL); + + *pui64UID = psPMR->uiSerialNum; + + return PVRSRV_OK; +} + +#if defined(SUPPORT_INSECURE_EXPORT) +PVRSRV_ERROR +PMRExportPMR(PMR *psPMR, + PMR_EXPORT **ppsPMRExportPtr, + PMR_SIZE_T *puiSize, + PMR_LOG2ALIGN_T *puiLog2Contig, + PMR_PASSWORD_T *puiPassword) +{ + IMG_UINT64 uiPassword; + PMR_EXPORT *psPMRExport; + + uiPassword = psPMR->uiKey; + + psPMRExport = OSAllocMem(sizeof(*psPMRExport)); + PVR_RETURN_IF_NOMEM(psPMRExport); + + psPMRExport->psPMR = psPMR; + _Ref(psPMR); + /* The layout of a PMR can't change once exported + * to make sure the importers view of the memory is + * the same as exporter. */ + psPMR->bNoLayoutChange = IMG_TRUE; + + *ppsPMRExportPtr = psPMRExport; + *puiSize = psPMR->uiLogicalSize; + *puiLog2Contig = psPMR->uiLog2ContiguityGuarantee; + *puiPassword = uiPassword; + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +PMRUnexportPMR(PMR_EXPORT *psPMRExport) +{ + PVR_ASSERT(psPMRExport != NULL); + PVR_ASSERT(psPMRExport->psPMR != NULL); + PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0); + + _UnrefAndMaybeDestroy(psPMRExport->psPMR); + + OSFreeMem(psPMRExport); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +PMRImportPMR(PMR_EXPORT *psPMRExport, + PMR_PASSWORD_T uiPassword, + PMR_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Contig, + PMR **ppsPMR) +{ + PMR *psPMR; + + PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0); + + psPMR = psPMRExport->psPMR; + + PVR_ASSERT((psPMR->bNoLayoutChange == IMG_TRUE)); + + if (psPMR->uiKey != uiPassword) + { + PVR_DPF((PVR_DBG_ERROR, + "PMRImport: Import failed, password specified does not match the export")); + return PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR; + } + + if (psPMR->uiLogicalSize != uiSize || psPMR->uiLog2ContiguityGuarantee != uiLog2Contig) + { + return PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES; + } + + _Ref(psPMR); + + *ppsPMR = psPMR; + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMRUnimportPMR(PMR *psPMR) +{ + _UnrefAndMaybeDestroy(psPMR); + + return PVRSRV_OK; +} + +#else /* if defined(SUPPORT_INSECURE_EXPORT) */ + +PVRSRV_ERROR +PMRExportPMR(PMR *psPMR, + PMR_EXPORT **ppsPMRExportPtr, + PMR_SIZE_T *puiSize, + PMR_LOG2ALIGN_T *puiLog2Contig, + PMR_PASSWORD_T *puiPassword) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(ppsPMRExportPtr); + PVR_UNREFERENCED_PARAMETER(puiSize); + PVR_UNREFERENCED_PARAMETER(puiLog2Contig); + PVR_UNREFERENCED_PARAMETER(puiPassword); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +PMRUnexportPMR(PMR_EXPORT *psPMRExport) +{ + PVR_UNREFERENCED_PARAMETER(psPMRExport); + return PVRSRV_OK; +} + + +PVRSRV_ERROR +PMRImportPMR(PMR_EXPORT *psPMRExport, + PMR_PASSWORD_T uiPassword, + PMR_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Contig, + PMR **ppsPMR) +{ + PVR_UNREFERENCED_PARAMETER(psPMRExport); + PVR_UNREFERENCED_PARAMETER(uiPassword); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(uiLog2Contig); + PVR_UNREFERENCED_PARAMETER(ppsPMR); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMRUnimportPMR(PMR *psPMR) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + return PVRSRV_OK; +} +#endif /* if defined(SUPPORT_INSECURE_EXPORT) */ + +#if defined(SUPPORT_SECURE_EXPORT) +PVRSRV_ERROR PMRSecureUnexportPMR(PMR *psPMR) +{ + _UnrefAndMaybeDestroy(psPMR); + return PVRSRV_OK; +} + +static PVRSRV_ERROR _ReleaseSecurePMR(void *psExport) +{ + return PMRSecureUnexportPMR(psExport); +} + +PVRSRV_ERROR PMRSecureExportPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDevNode, + PMR *psPMR, + IMG_SECURE_TYPE *phSecure, + PMR **ppsPMR, + CONNECTION_DATA **ppsSecureConnection) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(ppsSecureConnection); + + /* We are acquiring reference to PMR here because OSSecureExport + * releases bridge lock and PMR lock for a moment and we don't want PMR + * to be removed by other thread in the meantime. */ + _Ref(psPMR); + + eError = OSSecureExport("secure_pmr", + _ReleaseSecurePMR, + (void *) psPMR, + phSecure); + PVR_GOTO_IF_ERROR(eError, e0); + + *ppsPMR = psPMR; + + /* Mark the PMR immutable once exported + * This allows the importers and exporter to have + * the same view of the memory */ + psPMR->bNoLayoutChange = IMG_TRUE; + + return PVRSRV_OK; +e0: + PVR_ASSERT(eError != PVRSRV_OK); + _UnrefAndMaybeDestroy(psPMR); + return eError; +} + +PVRSRV_ERROR PMRSecureImportPMR(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_SECURE_TYPE hSecure, + PMR **ppsPMR, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign) +{ + PVRSRV_ERROR eError; + PMR *psPMR; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + eError = OSSecureImport(hSecure, (void **) &psPMR); + PVR_GOTO_IF_ERROR(eError, e0); + + PVR_LOG_RETURN_IF_FALSE(PhysHeapDeviceNode(psPMR->psPhysHeap) == psDevNode, + "PMR invalid for this device", + PVRSRV_ERROR_PMR_NOT_PERMITTED); + + _Ref(psPMR); + /* The PMR should be immutable once exported + * This allows the importers and exporter to have + * the same view of the memory */ + PVR_ASSERT(psPMR->bNoLayoutChange == IMG_TRUE); + + /* Return the PMR */ + *ppsPMR = psPMR; + *puiSize = psPMR->uiLogicalSize; + *puiAlign = 1ull << psPMR->uiLog2ContiguityGuarantee; + return PVRSRV_OK; +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR PMRSecureUnimportPMR(PMR *psPMR) +{ + _UnrefAndMaybeDestroy(psPMR); + return PVRSRV_OK; +} +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +PVRSRV_ERROR +PMRStoreRIHandle(PMR *psPMR, + void *hRIHandle) +{ + PVR_ASSERT(psPMR != NULL); + + psPMR->hRIHandle = hRIHandle; + return PVRSRV_OK; +} +#endif + +static PVRSRV_ERROR +_PMRAcquireKernelMappingData(PMR *psPMR, + size_t uiLogicalOffset, + size_t uiSize, + void **ppvKernelAddressOut, + size_t *puiLengthOut, + IMG_HANDLE *phPrivOut, + IMG_BOOL bMapSparse) +{ + PVRSRV_ERROR eError; + void *pvKernelAddress; + IMG_HANDLE hPriv; + + PVR_ASSERT(psPMR != NULL); + + if (_PMRIsSparse(psPMR) && !bMapSparse) + { + /* Mapping of sparse allocations must be signalled. */ + return PVRSRV_ERROR_PMR_NOT_PERMITTED; + } + + /* Acquire/Release functions must be overridden in pairs */ + if (psPMR->psFuncTab->pfnAcquireKernelMappingData == NULL) + { + PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData == NULL); + + /* If PMR implementation does not supply this pair of + * functions, it means they do not permit the PMR to be mapped + * into kernel memory at all + */ + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PERMITTED, e0); + } + PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL); + + eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData, + uiLogicalOffset, + uiSize, + &pvKernelAddress, + &hPriv, + psPMR->uiFlags); + PVR_GOTO_IF_ERROR(eError, e0); + + *ppvKernelAddressOut = pvKernelAddress; + if (uiSize == 0) + { + /* Zero size means map in the whole PMR ... */ + *puiLengthOut = (size_t)psPMR->uiLogicalSize; + } + else if (uiSize > (1 << psPMR->uiLog2ContiguityGuarantee)) + { + /* ... map in the requested pages ... */ + *puiLengthOut = uiSize; + } + else + { + /* ... otherwise we just map in one page */ + *puiLengthOut = 1 << psPMR->uiLog2ContiguityGuarantee; + } + *phPrivOut = hPriv; + + return PVRSRV_OK; + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +PMRAcquireKernelMappingData(PMR *psPMR, + size_t uiLogicalOffset, + size_t uiSize, + void **ppvKernelAddressOut, + size_t *puiLengthOut, + IMG_HANDLE *phPrivOut) +{ + return _PMRAcquireKernelMappingData(psPMR, + uiLogicalOffset, + uiSize, + ppvKernelAddressOut, + puiLengthOut, + phPrivOut, + IMG_FALSE); +} + +PVRSRV_ERROR +PMRAcquireSparseKernelMappingData(PMR *psPMR, + size_t uiLogicalOffset, + size_t uiSize, + void **ppvKernelAddressOut, + size_t *puiLengthOut, + IMG_HANDLE *phPrivOut) +{ + return _PMRAcquireKernelMappingData(psPMR, + uiLogicalOffset, + uiSize, + ppvKernelAddressOut, + puiLengthOut, + phPrivOut, + IMG_TRUE); +} + +PVRSRV_ERROR +PMRReleaseKernelMappingData(PMR *psPMR, + IMG_HANDLE hPriv) +{ + PVR_ASSERT (psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL); + PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL); + + psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData, + hPriv); + + return PVRSRV_OK; +} + +#if defined(INTEGRITY_OS) + +PVRSRV_ERROR +PMRMapMemoryObject(PMR *psPMR, + IMG_HANDLE *phMemObj, + void **pvClientAddr, + IMG_HANDLE *phPrivOut) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_HANDLE hPriv = *phPrivOut; + + PVR_ASSERT (psPMR->psFuncTab->pfnMapMemoryObject != NULL); + + eError = psPMR->psFuncTab->pfnMapMemoryObject(hPriv, phMemObj, pvClientAddr, phPrivOut); + + return eError; +} + +PVRSRV_ERROR +PMRUnmapMemoryObject(PMR *psPMR, + IMG_HANDLE hPriv) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT (psPMR->psFuncTab->pfnUnmapMemoryObject != NULL); + + eError = psPMR->psFuncTab->pfnUnmapMemoryObject(hPriv); + + return eError; +} + +#endif /* INTEGRITY_OS */ + +/* + _PMRLogicalOffsetToPhysicalOffset + + Translate between the "logical" offset which the upper levels + provide and the physical offset which is what the PMR + factories works on. + + As well as returning the physical offset we return the number of + bytes remaining till the next chunk and if this chunk is valid. + + For multi-page operations, upper layers communicate their + Log2PageSize else argument is redundant (set to zero). + */ + +static void +_PMRLogicalOffsetToPhysicalOffset(const PMR *psPMR, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEVMEM_OFFSET_T *puiPhysicalOffset, + IMG_UINT32 *pui32BytesRemain, + IMG_BOOL *bValid) +{ + PMR_MAPPING_TABLE *psMappingTable = psPMR->psMappingTable; + IMG_DEVMEM_OFFSET_T uiPageSize = 1ULL << ui32Log2PageSize; + IMG_DEVMEM_OFFSET_T uiOffset = uiLogicalOffset; + IMG_UINT64 ui64ChunkIndex; + IMG_UINT32 ui32Remain; + IMG_UINT32 idx; + + /* Must be translating at least a page */ + PVR_ASSERT(ui32NumOfPages); + + if (psMappingTable->ui32NumPhysChunks == psMappingTable->ui32NumVirtChunks) + { + /* Fast path the common case, as logical and physical offsets are + equal we assume the ui32NumOfPages span is also valid */ + *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiOffset); + puiPhysicalOffset[0] = uiOffset; + bValid[0] = IMG_TRUE; + + if (ui32NumOfPages > 1) + { + /* initial offset may not be page aligned, round down */ + uiOffset &= ~(uiPageSize-1); + for (idx=1; idx < ui32NumOfPages; idx++) + { + uiOffset += uiPageSize; + puiPhysicalOffset[idx] = uiOffset; + bValid[idx] = IMG_TRUE; + } + } + } + else + { + for (idx=0; idx < ui32NumOfPages; idx++) + { + ui64ChunkIndex = OSDivide64r64( + uiOffset, + TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize), + &ui32Remain); + + if (psMappingTable->aui32Translation[ui64ChunkIndex] == TRANSLATION_INVALID) + { + bValid[idx] = IMG_FALSE; + } + else + { + bValid[idx] = IMG_TRUE; + } + + if (idx == 0) + { + if (ui32Remain == 0) + { + /* Start of chunk so return the chunk size */ + *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize); + } + else + { + *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize - ui32Remain); + } + + puiPhysicalOffset[idx] = (psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize) + ui32Remain; + + /* initial offset may not be page aligned, round down */ + uiOffset &= ~(uiPageSize-1); + } + else + { + puiPhysicalOffset[idx] = psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize + ui32Remain; + } + uiOffset += uiPageSize; + } + } +} + +static PVRSRV_ERROR +_PMR_ReadBytesPhysical(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiPhysicalOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes) +{ + PVRSRV_ERROR eError; + + if (psPMR->psFuncTab->pfnReadBytes != NULL) + { + /* defer to callback if present */ + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_GOTO_IF_ERROR(eError, e0); + + eError = psPMR->psFuncTab->pfnReadBytes(psPMR->pvFlavourData, + uiPhysicalOffset, + pcBuffer, + uiBufSz, + puiNumBytes); + PMRUnlockSysPhysAddresses(psPMR); + PVR_GOTO_IF_ERROR(eError, e0); + } + else if (psPMR->psFuncTab->pfnAcquireKernelMappingData) + { + /* "default" handler for reading bytes */ + + IMG_HANDLE hKernelMappingHandle; + IMG_UINT8 *pcKernelAddress; + + eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData, + (size_t) uiPhysicalOffset, + uiBufSz, + (void **)&pcKernelAddress, + &hKernelMappingHandle, + psPMR->uiFlags); + PVR_GOTO_IF_ERROR(eError, e0); + + /* Use the conservative 'DeviceMemCopy' here because we can't + * know if this PMR will be mapped cached. + */ + + OSDeviceMemCopy(&pcBuffer[0], pcKernelAddress, uiBufSz); + *puiNumBytes = uiBufSz; + + psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData, + hKernelMappingHandle); + } + else + { + OSPanic(); + PVR_LOG_GOTO_WITH_ERROR("psPMR->psFuncTab", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); + } + + return PVRSRV_OK; + + /* Error exit paths follow */ +e0: + PVR_ASSERT(eError != PVRSRV_OK); + *puiNumBytes = 0; + return eError; +} + +PVRSRV_ERROR +PMR_ReadBytes(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_DEVMEM_OFFSET_T uiPhysicalOffset; + size_t uiBytesCopied = 0; + + if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize) + { + uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset); + } + PVR_ASSERT(uiBufSz > 0); + PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize); + + /* PMR implementations can override this. If they don't, a "default" + * handler uses kernel virtual mappings. If the kernel can't + * provide a kernel virtual mapping, this function fails. + */ + PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL || + psPMR->psFuncTab->pfnReadBytes != NULL); + + while (uiBytesCopied != uiBufSz) + { + IMG_UINT32 ui32Remain; + size_t uiBytesToCopy; + size_t uiRead; + IMG_BOOL bValid; + + _PMRLogicalOffsetToPhysicalOffset(psPMR, + 0, + 1, + uiLogicalOffset, + &uiPhysicalOffset, + &ui32Remain, + &bValid); + /* Copy till either then end of the chunk or end + * of the buffer + */ + uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain); + + if (bValid) + { + /* Read the data from the PMR */ + eError = _PMR_ReadBytesPhysical(psPMR, + uiPhysicalOffset, + &pcBuffer[uiBytesCopied], + uiBytesToCopy, + &uiRead); + if ((eError != PVRSRV_OK) || (uiRead != uiBytesToCopy)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to read chunk (eError = %s, uiRead = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")", + __func__, + PVRSRVGetErrorString(eError), + uiRead, + uiBytesToCopy)); + /* Bail out as soon as we hit an error */ + break; + } + } + else + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Invalid phys offset at logical offset (" IMG_DEVMEM_OFFSET_FMTSPEC ") logical size (" IMG_DEVMEM_OFFSET_FMTSPEC ")", + __func__, + uiLogicalOffset, + psPMR->uiLogicalSize)); + /* Fill invalid chunks with 0 */ + OSCachedMemSet(&pcBuffer[uiBytesCopied], 0, uiBytesToCopy); + uiRead = uiBytesToCopy; + eError = PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR; + } + uiLogicalOffset += uiRead; + uiBytesCopied += uiRead; + } + + *puiNumBytes = uiBytesCopied; + return eError; +} + +static PVRSRV_ERROR +_PMR_WriteBytesPhysical(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiPhysicalOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes) +{ + PVRSRV_ERROR eError; + + if (psPMR->psFuncTab->pfnWriteBytes != NULL) + { + /* defer to callback if present */ + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_GOTO_IF_ERROR(eError, e0); + + eError = psPMR->psFuncTab->pfnWriteBytes(psPMR->pvFlavourData, + uiPhysicalOffset, + pcBuffer, + uiBufSz, + puiNumBytes); + PMRUnlockSysPhysAddresses(psPMR); + PVR_GOTO_IF_ERROR(eError, e0); + } + else if (psPMR->psFuncTab->pfnAcquireKernelMappingData) + { + /* "default" handler for reading bytes */ + + IMG_HANDLE hKernelMappingHandle; + IMG_UINT8 *pcKernelAddress; + + eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData, + (size_t) uiPhysicalOffset, + uiBufSz, + (void **)&pcKernelAddress, + &hKernelMappingHandle, + psPMR->uiFlags); + PVR_GOTO_IF_ERROR(eError, e0); + + /* Use the conservative 'DeviceMemCopy' here because we can't know + * if this PMR will be mapped cached. + */ + + OSDeviceMemCopy(pcKernelAddress, &pcBuffer[0], uiBufSz); + *puiNumBytes = uiBufSz; + + psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData, + hKernelMappingHandle); + } + else + { + /* The write callback is optional as it's only required by the + * debug tools + */ + OSPanic(); + PVR_LOG_GOTO_WITH_ERROR("psPMR->psFuncTab", eError, PVRSRV_ERROR_PMR_NOT_PERMITTED, e0); + } + + return PVRSRV_OK; + + /* Error exit paths follow */ +e0: + PVR_ASSERT(eError != PVRSRV_OK); + *puiNumBytes = 0; + return eError; +} + +PVRSRV_ERROR +PMR_WriteBytes(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_DEVMEM_OFFSET_T uiPhysicalOffset; + size_t uiBytesCopied = 0; + + if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize) + { + uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset); + } + PVR_ASSERT(uiBufSz > 0); + PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize); + + /* PMR implementations can override this. If they don't, a "default" + * handler uses kernel virtual mappings. If the kernel can't provide + * a kernel virtual mapping, this function fails. + */ + PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL || + psPMR->psFuncTab->pfnWriteBytes != NULL); + + while (uiBytesCopied != uiBufSz) + { + IMG_UINT32 ui32Remain; + size_t uiBytesToCopy; + size_t uiWrite; + IMG_BOOL bValid; + + _PMRLogicalOffsetToPhysicalOffset(psPMR, + 0, + 1, + uiLogicalOffset, + &uiPhysicalOffset, + &ui32Remain, + &bValid); + + /* Copy till either then end of the chunk or end of the buffer + */ + uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain); + + if (bValid) + { + /* Write the data to the PMR */ + eError = _PMR_WriteBytesPhysical(psPMR, + uiPhysicalOffset, + &pcBuffer[uiBytesCopied], + uiBytesToCopy, + &uiWrite); + if ((eError != PVRSRV_OK) || (uiWrite != uiBytesToCopy)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to read chunk (eError = %s, uiWrite = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")", + __func__, + PVRSRVGetErrorString(eError), + uiWrite, + uiBytesToCopy)); + /* Bail out as soon as we hit an error */ + break; + } + } + else + { + /* Ignore writes to invalid pages */ + uiWrite = uiBytesToCopy; + } + uiLogicalOffset += uiWrite; + uiBytesCopied += uiWrite; + } + + *puiNumBytes = uiBytesCopied; + return eError; +} + +PVRSRV_ERROR +PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData) +{ + if (psPMR->psFuncTab->pfnMMap) + { + return psPMR->psFuncTab->pfnMMap(psPMR->pvFlavourData, psPMR, pOSMMapData); + } + + return OSMMapPMRGeneric(psPMR, pOSMMapData); +} + +void +PMRRefPMR(PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + _Ref(psPMR); +} + +PVRSRV_ERROR +PMRUnrefPMR(PMR *psPMR) +{ + _UnrefAndMaybeDestroy(psPMR); + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMRUnrefUnlockPMR(PMR *psPMR) +{ + PMRUnlockSysPhysAddresses(psPMR); + + PMRUnrefPMR(psPMR); + + return PVRSRV_OK; +} + +PVRSRV_DEVICE_NODE * +PMR_DeviceNode(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + + return PhysHeapDeviceNode(psPMR->psPhysHeap); +} + +PMR_FLAGS_T +PMR_Flags(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + + return psPMR->uiFlags; +} + +IMG_BOOL +PMR_IsSparse(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + + return _PMRIsSparse(psPMR); +} + +IMG_BOOL +PMR_IsUnpinned(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + + return psPMR->bIsUnpinned; +} + +/* Function that alters the mutability property + * of the PMR + * Setting it to TRUE makes sure the PMR memory layout + * can't be changed through future calls */ +void +PMR_SetLayoutFixed(PMR *psPMR, IMG_BOOL bFlag) +{ + PVR_ASSERT(psPMR != NULL); + + psPMR->bNoLayoutChange = bFlag; +} + +IMG_BOOL PMR_IsMemLayoutFixed(PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + + return psPMR->bNoLayoutChange; +} + +PVRSRV_ERROR +PMR_LogicalSize(const PMR *psPMR, + IMG_DEVMEM_SIZE_T *puiLogicalSize) +{ + PVR_ASSERT(psPMR != NULL); + + *puiLogicalSize = psPMR->uiLogicalSize; + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMR_PhysicalSize(const PMR *psPMR, + IMG_DEVMEM_SIZE_T *puiPhysicalSize) +{ + PVR_ASSERT(psPMR != NULL); + + /* iLockCount will be > 0 for any backed PMR (backed on demand or not) */ + if ((OSAtomicRead(&psPMR->iLockCount) > 0) && !psPMR->bIsUnpinned) + { + if (psPMR->bSparseAlloc) + { + *puiPhysicalSize = psPMR->psMappingTable->uiChunkSize * psPMR->psMappingTable->ui32NumPhysChunks; + } + else + { + *puiPhysicalSize = psPMR->uiLogicalSize; + } + } + else + { + *puiPhysicalSize = 0; + } + return PVRSRV_OK; +} + +PHYS_HEAP * +PMR_PhysHeap(const PMR *psPMR) +{ + return psPMR->psPhysHeap; +} + +PVRSRV_ERROR +PMR_IsOffsetValid(const PMR *psPMR, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_BOOL *pbValid) +{ + IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_UINT32 aui32BytesRemain[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset; + IMG_UINT32 *pui32BytesRemain = aui32BytesRemain; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT(psPMR != NULL); + PVR_ASSERT(psPMR->uiLogicalSize >= uiLogicalOffset); + + if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T)); + PVR_GOTO_IF_NOMEM(puiPhysicalOffset, eError, e0); + + pui32BytesRemain = OSAllocMem(ui32NumOfPages * sizeof(IMG_UINT32)); + PVR_GOTO_IF_NOMEM(pui32BytesRemain, eError, e0); + } + + _PMRLogicalOffsetToPhysicalOffset(psPMR, + ui32Log2PageSize, + ui32NumOfPages, + uiLogicalOffset, + puiPhysicalOffset, + pui32BytesRemain, + pbValid); + +e0: + if (puiPhysicalOffset != auiPhysicalOffset && puiPhysicalOffset != NULL) + { + OSFreeMem(puiPhysicalOffset); + } + + if (pui32BytesRemain != aui32BytesRemain && pui32BytesRemain != NULL) + { + OSFreeMem(pui32BytesRemain); + } + + return eError; +} + +PMR_MAPPING_TABLE * +PMR_GetMappingTable(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + return psPMR->psMappingTable; + +} + +IMG_UINT32 +PMR_GetLog2Contiguity(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + return psPMR->uiLog2ContiguityGuarantee; +} + +const IMG_CHAR * +PMR_GetAnnotation(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + return psPMR->szAnnotation; +} + +PMR_IMPL_TYPE +PMR_GetType(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + return psPMR->eFlavour; +} + +IMG_INT32 +PMR_GetRefCount(const PMR *psPMR) +{ + PVR_ASSERT(psPMR != NULL); + return OSAtomicRead(&psPMR->iRefCount); +} + +/* must have called PMRLockSysPhysAddresses() before calling this! */ +PVRSRV_ERROR +PMR_DevPhysAddr(const PMR *psPMR, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEV_PHYADDR *psDevAddrPtr, + IMG_BOOL *pbValid) +{ + IMG_UINT32 ui32Remain; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset; + + PVR_ASSERT(psPMR != NULL); + PVR_ASSERT(ui32NumOfPages > 0); + PVR_ASSERT(psPMR->psFuncTab->pfnDevPhysAddr != NULL); + +#ifdef PVRSRV_NEED_PVR_ASSERT + PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)); +#endif + + if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T)); + PVR_GOTO_IF_NOMEM(puiPhysicalOffset, eError, e0); + } + + _PMRLogicalOffsetToPhysicalOffset(psPMR, + ui32Log2PageSize, + ui32NumOfPages, + uiLogicalOffset, + puiPhysicalOffset, + &ui32Remain, + pbValid); + if (*pbValid || _PMRIsSparse(psPMR)) + { + /* Sparse PMR may not always have the first page valid */ + eError = psPMR->psFuncTab->pfnDevPhysAddr(psPMR->pvFlavourData, + ui32Log2PageSize, + ui32NumOfPages, + puiPhysicalOffset, + pbValid, + psDevAddrPtr); +#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES) + /* Currently excluded from the default build because of performance concerns. + * We do not need this part in all systems because the GPU has the same address view of system RAM as the CPU. + * Alternatively this could be implemented as part of the PMR-factories directly */ + + if (PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_UMA || + PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_DMA) + { + IMG_UINT32 i; + IMG_DEV_PHYADDR sDevPAddrCorrected; + + /* Copy the translated addresses to the correct array */ + for (i = 0; i < ui32NumOfPages; i++) + { + PhysHeapCpuPAddrToDevPAddr(psPMR->psPhysHeap, + 1, + &sDevPAddrCorrected, + (IMG_CPU_PHYADDR *) &psDevAddrPtr[i]); + psDevAddrPtr[i].uiAddr = sDevPAddrCorrected.uiAddr; + } + } +#endif + } + + if (puiPhysicalOffset != auiPhysicalOffset) + { + OSFreeMem(puiPhysicalOffset); + } + + PVR_GOTO_IF_ERROR(eError, e0); + + return PVRSRV_OK; + +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/* must have called PMRLockSysPhysAddresses() before calling this! */ +PVRSRV_ERROR +PMR_CpuPhysAddr(const PMR *psPMR, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_CPU_PHYADDR *psCpuAddrPtr, + IMG_BOOL *pbValid) +{ + IMG_UINT32 idx; + PVRSRV_ERROR eError; + IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_DEV_PHYADDR *psDevPAddr = asDevPAddr; + + if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + psDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR)); + PVR_GOTO_IF_NOMEM(psDevPAddr, eError, e0); + } + + eError = PMR_DevPhysAddr(psPMR, ui32Log2PageSize, ui32NumOfPages, + uiLogicalOffset, psDevPAddr, pbValid); + PVR_GOTO_IF_ERROR(eError, e1); + + if (_PMRIsSparse(psPMR)) + { + /* Loop over each page. + * If Dev addr valid, populate the CPU addr from the Dev addr + */ + for (idx = 0; idx < ui32NumOfPages; idx++) + { + if (pbValid[idx]) + { + PhysHeapDevPAddrToCpuPAddr(psPMR->psPhysHeap, 1, &psCpuAddrPtr[idx], &psDevPAddr[idx]); + } + } + } + else + { + /* In this case all addrs will be valid, so we can block translate */ + PhysHeapDevPAddrToCpuPAddr(psPMR->psPhysHeap, ui32NumOfPages, psCpuAddrPtr, psDevPAddr); + } + + if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + OSFreeMem(psDevPAddr); + } + + return PVRSRV_OK; +e1: + if (psDevPAddr != asDevPAddr) + { + OSFreeMem(psDevPAddr); + } +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 uiSparseFlags) +{ + PVRSRV_ERROR eError; + + if (IMG_TRUE == psPMR->bNoLayoutChange) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: This PMR layout cannot be changed", + __func__)); + return PVRSRV_ERROR_PMR_NOT_PERMITTED; + } + + if (NULL == psPMR->psFuncTab->pfnChangeSparseMem) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: This type of sparse PMR cannot be changed.", + __func__)); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + eError = psPMR->psFuncTab->pfnChangeSparseMem(psPMR->pvFlavourData, + psPMR, + ui32AllocPageCount, + pai32AllocIndices, + ui32FreePageCount, + pai32FreeIndices, + uiSparseFlags); + if (eError != PVRSRV_OK) + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + if (eError == PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES) + { + PVRSRVStatsUpdateOOMStats(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT, + OSGetCurrentClientProcessIDKM()); + } +#endif + goto e0; + } + +#if defined(PDUMP) + { + IMG_BOOL bInitialise = IMG_FALSE; + IMG_UINT32 ui32InitValue = 0; + + if (PVRSRV_CHECK_ZERO_ON_ALLOC(PMR_Flags(psPMR))) + { + bInitialise = IMG_TRUE; + } + else if (PVRSRV_CHECK_POISON_ON_ALLOC(PMR_Flags(psPMR))) + { + ui32InitValue = 0xDEADBEEF; + bInitialise = IMG_TRUE; + } + + PDumpPMRChangeSparsePMR(psPMR, + 1 << psPMR->uiLog2ContiguityGuarantee, + ui32AllocPageCount, + pai32AllocIndices, + ui32FreePageCount, + pai32FreeIndices, + bInitialise, + ui32InitValue, + &psPMR->hPDumpAllocHandle); + } + +#endif + +e0: + return eError; +} + + +PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR, + IMG_UINT64 sCpuVAddrBase, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices) +{ + PVRSRV_ERROR eError; + + if ((NULL == psPMR->psFuncTab) || + (NULL == psPMR->psFuncTab->pfnChangeSparseMemCPUMap)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: This type of sparse PMR cannot be changed.", + __func__)); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + if (IMG_TRUE == psPMR->bNoLayoutChange) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: This PMR layout cannot be changed", + __func__)); + return PVRSRV_ERROR_PMR_NOT_PERMITTED; + } + + eError = psPMR->psFuncTab->pfnChangeSparseMemCPUMap(psPMR->pvFlavourData, + psPMR, + sCpuVAddrBase, + ui32AllocPageCount, + pai32AllocIndices, + ui32FreePageCount, + pai32FreeIndices); + + return eError; +} + + +#if defined(PDUMP) + +static PVRSRV_ERROR +_PMR_PDumpSymbolicAddrPhysical(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiPhysicalOffset, + IMG_UINT32 ui32MemspaceNameLen, + IMG_CHAR *pszMemspaceName, + IMG_UINT32 ui32SymbolicAddrLen, + IMG_CHAR *pszSymbolicAddr, + IMG_DEVMEM_OFFSET_T *puiNewOffset, + IMG_DEVMEM_OFFSET_T *puiNextSymName) +{ + PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPMR->psPhysHeap); + PVRSRV_ERROR eError = PVRSRV_OK; + +#if defined(SUPPORT_SECURITY_VALIDATION) + if (PVRSRV_CHECK_PHYS_HEAP(FW_CODE, psPMR->uiFlags) || + PVRSRV_CHECK_PHYS_HEAP(FW_PRIV_DATA, psPMR->uiFlags) || + PVRSRV_CHECK_PHYS_HEAP(GPU_SECURE, psPMR->uiFlags)) + { + OSSNPrintf(pszMemspaceName, ui32MemspaceNameLen, PMR_MEMSPACE_FMTSPEC, + psPMR->pszPDumpDefaultMemspaceName); + } + else +#endif + if (DevmemCPUCacheCoherency(psDevNode, psPMR->uiFlags) || + DevmemDeviceCacheCoherency(psDevNode, psPMR->uiFlags)) + { + OSSNPrintf(pszMemspaceName, + ui32MemspaceNameLen, + PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC, + psPMR->pszPDumpDefaultMemspaceName); + } + else + { + OSSNPrintf(pszMemspaceName, ui32MemspaceNameLen, PMR_MEMSPACE_FMTSPEC, + psPMR->pszPDumpDefaultMemspaceName); + } + + OSSNPrintf(pszSymbolicAddr, + ui32SymbolicAddrLen, + PMR_SYMBOLICADDR_FMTSPEC, + PMR_DEFAULT_PREFIX, + psPMR->uiSerialNum, + uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR), + psPMR->szAnnotation); + + if (pszSymbolicAddr) + { + PDumpMakeStringValid(pszSymbolicAddr, OSStringLength(pszSymbolicAddr)); + } + + + *puiNewOffset = uiPhysicalOffset & ((1 << PMR_GetLog2Contiguity(psPMR))-1); + *puiNextSymName = (IMG_DEVMEM_OFFSET_T) (((uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR))+1) + << PMR_GetLog2Contiguity(psPMR)); + + return eError; +} + + +PVRSRV_ERROR +PMR_PDumpSymbolicAddr(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32MemspaceNameLen, + IMG_CHAR *pszMemspaceName, + IMG_UINT32 ui32SymbolicAddrLen, + IMG_CHAR *pszSymbolicAddr, + IMG_DEVMEM_OFFSET_T *puiNewOffset, + IMG_DEVMEM_OFFSET_T *puiNextSymName +) +{ + IMG_DEVMEM_OFFSET_T uiPhysicalOffset; + IMG_UINT32 ui32Remain; + IMG_BOOL bValid; + + PVR_ASSERT(uiLogicalOffset < psPMR->uiLogicalSize); + + _PMRLogicalOffsetToPhysicalOffset(psPMR, + 0, + 1, + uiLogicalOffset, + &uiPhysicalOffset, + &ui32Remain, + &bValid); + + if (!bValid) + { + /* For sparse allocations, for a given logical address, there + * may not be a physical memory backing, the virtual range can + * still be valid. + */ + uiPhysicalOffset = uiLogicalOffset; + } + + return _PMR_PDumpSymbolicAddrPhysical(psPMR, + uiPhysicalOffset, + ui32MemspaceNameLen, + pszMemspaceName, + ui32SymbolicAddrLen, + pszSymbolicAddr, + puiNewOffset, + puiNextSymName); +} + +/*! + * @brief Writes a WRW command to the script2 buffer, representing a + * dword write to a physical allocation. Size is always + * sizeof(IMG_UINT32). + * @param psPMR - PMR object representing allocation + * @param uiLogicalOffset - offset + * @param ui32Value - value to write + * @param uiPDumpFlags - pdump flags + * @return PVRSRV_ERROR + */ +PVRSRV_ERROR +PMRPDumpLoadMemValue32(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee; + + PVR_ASSERT(uiLogicalOffset + sizeof(ui32Value) <= psPMR->uiLogicalSize); + /* Especially make sure to not cross a block boundary */ + PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value)) + <= uiPMRPageSize)); + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Get the symbolic address of the PMR */ + eError = PMR_PDumpSymbolicAddr(psPMR, + uiLogicalOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpSymbolicOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Write the WRW script command */ + eError = PDumpPMRWRW32(aszMemspaceName, + aszSymbolicName, + uiPDumpSymbolicOffset, + ui32Value, + uiPDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PMRUnlockSysPhysAddresses(psPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + return PVRSRV_OK; +} + +/*! + * @brief Writes a RDW followed by a WRW command to the pdump script to perform + * an effective copy from memory to memory. Memory copied is of size + * sizeof(IMG_UINT32) + * + * @param psDstPMR - PMR object representing allocation of destination + * @param uiDstLogicalOffset - destination offset + * @param psSrcPMR - PMR object representing allocation of source + * @param uiSrcLogicalOffset - source offset + * @param pszTmpVar - pdump temporary variable used during the copy + * @param uiPDumpFlags - pdump flags + * @return PVRSRV_ERROR + */ +PVRSRV_ERROR +PMRPDumpCopyMem32(PMR *psDstPMR, + IMG_DEVMEM_OFFSET_T uiDstLogicalOffset, + PMR *psSrcPMR, + IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset, + const IMG_CHAR *pszTmpVar, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + const IMG_UINT32 uiDstPMRPageSize = 1 << psDstPMR->uiLog2ContiguityGuarantee; + const IMG_UINT32 uiSrcPMRPageSize = 1 << psSrcPMR->uiLog2ContiguityGuarantee; + + PVR_ASSERT(uiSrcLogicalOffset + sizeof(IMG_UINT32) <= psSrcPMR->uiLogicalSize); + /* Especially make sure to not cross a block boundary */ + PVR_ASSERT(( ((uiSrcLogicalOffset & (uiSrcPMRPageSize-1)) + sizeof(IMG_UINT32)) + <= uiSrcPMRPageSize)); + + PVR_ASSERT(uiDstLogicalOffset + sizeof(IMG_UINT32) <= psDstPMR->uiLogicalSize); + /* Especially make sure to not cross a block boundary */ + PVR_ASSERT(( ((uiDstLogicalOffset & (uiDstPMRPageSize-1)) + sizeof(IMG_UINT32)) + <= uiDstPMRPageSize)); + + + eError = PMRLockSysPhysAddresses(psSrcPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Get the symbolic address of the source PMR */ + eError = PMR_PDumpSymbolicAddr(psSrcPMR, + uiSrcLogicalOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpSymbolicOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Issue PDump read command */ + eError = PDumpPMRRDW32MemToInternalVar(pszTmpVar, + aszMemspaceName, + aszSymbolicName, + uiPDumpSymbolicOffset, + uiPDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PMRUnlockSysPhysAddresses(psSrcPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + + + eError = PMRLockSysPhysAddresses(psDstPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + + /* Get the symbolic address of the destination PMR */ + eError = PMR_PDumpSymbolicAddr(psDstPMR, + uiDstLogicalOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpSymbolicOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + + /* Write the WRW script command */ + eError = PDumpPMRWRW32InternalVarToMem(aszMemspaceName, + aszSymbolicName, + uiPDumpSymbolicOffset, + pszTmpVar, + uiPDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + + + eError = PMRUnlockSysPhysAddresses(psDstPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + return PVRSRV_OK; +} + +/*! + * @brief Writes a WRW64 command to the script2 buffer, representing a + * dword write to a physical allocation. Size is always + * sizeof(IMG_UINT64). + * @param psPMR - PMR object representing allocation + * @param uiLogicalOffset - offset + * @param ui64Value - value to write + * @param uiPDumpFlags - pdump flags + * @return PVRSRV_ERROR + */ +PVRSRV_ERROR +PMRPDumpLoadMemValue64(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT64 ui64Value, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee; + + + PVR_ASSERT(uiLogicalOffset + sizeof(ui64Value) <= psPMR->uiLogicalSize); + /* Especially make sure to not cross a block boundary */ + PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui64Value)) + <= uiPMRPageSize)); + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Get the symbolic address of the PMR */ + eError = PMR_PDumpSymbolicAddr(psPMR, + uiLogicalOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpSymbolicOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Write the WRW script command */ + eError = PDumpPMRWRW64(aszMemspaceName, + aszSymbolicName, + uiPDumpSymbolicOffset, + ui64Value, + uiPDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PMRUnlockSysPhysAddresses(psPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + return PVRSRV_OK; +} + +/*! + * @brief Writes a RDW64 followed by a WRW64 command to the pdump script to + * perform an effective copy from memory to memory. Memory copied is of + * size sizeof(IMG_UINT32) + * + * @param psDstPMR - PMR object representing allocation of destination + * @param uiDstLogicalOffset - destination offset + * @param psSrcPMR - PMR object representing allocation of source + * @param uiSrcLogicalOffset - source offset + * @param pszTmpVar - pdump temporary variable used during the copy + * @param uiPDumpFlags - pdump flags + * @return PVRSRV_ERROR + */ +PVRSRV_ERROR +PMRPDumpCopyMem64(PMR *psDstPMR, + IMG_DEVMEM_OFFSET_T uiDstLogicalOffset, + PMR *psSrcPMR, + IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset, + const IMG_CHAR *pszTmpVar, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + const IMG_UINT32 uiDstPMRPageSize = 1 << psDstPMR->uiLog2ContiguityGuarantee; + const IMG_UINT32 uiSrcPMRPageSize = 1 << psSrcPMR->uiLog2ContiguityGuarantee; + + PVR_ASSERT(uiSrcLogicalOffset + sizeof(IMG_UINT32) <= psSrcPMR->uiLogicalSize); + /* Especially make sure to not cross a block boundary */ + PVR_ASSERT(( ((uiSrcLogicalOffset & (uiSrcPMRPageSize-1)) + sizeof(IMG_UINT32)) + <= uiSrcPMRPageSize)); + + PVR_ASSERT(uiDstLogicalOffset + sizeof(IMG_UINT32) <= psDstPMR->uiLogicalSize); + /* Especially make sure to not cross a block boundary */ + PVR_ASSERT(( ((uiDstLogicalOffset & (uiDstPMRPageSize-1)) + sizeof(IMG_UINT32)) + <= uiDstPMRPageSize)); + + + eError = PMRLockSysPhysAddresses(psSrcPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Get the symbolic address of the source PMR */ + eError = PMR_PDumpSymbolicAddr(psSrcPMR, + uiSrcLogicalOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpSymbolicOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Issue PDump read command */ + eError = PDumpPMRRDW64MemToInternalVar(pszTmpVar, + aszMemspaceName, + aszSymbolicName, + uiPDumpSymbolicOffset, + uiPDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PMRUnlockSysPhysAddresses(psSrcPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + + + eError = PMRLockSysPhysAddresses(psDstPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + + /* Get the symbolic address of the destination PMR */ + eError = PMR_PDumpSymbolicAddr(psDstPMR, + uiDstLogicalOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpSymbolicOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + + /* Write the WRW script command */ + eError = PDumpPMRWRW64InternalVarToMem(aszMemspaceName, + aszSymbolicName, + uiPDumpSymbolicOffset, + pszTmpVar, + uiPDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + + + eError = PMRUnlockSysPhysAddresses(psDstPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + return PVRSRV_OK; +} + +/*! + * @brief PDumps the contents of the given allocation. + * If bZero is IMG_TRUE then the zero page in the parameter stream is used + * as the source of data, rather than the allocation's actual backing. + * @param psPMR - PMR object representing allocation + * @param uiLogicalOffset - Offset to write at + * @param uiSize - Number of bytes to write + * @param uiPDumpFlags - PDump flags + * @param bZero - Use the PDump zero page as the source + * @return PVRSRV_ERROR + */ +PVRSRV_ERROR +PMRPDumpLoadMem(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEVMEM_SIZE_T uiSize, + PDUMP_FLAGS_T uiPDumpFlags, + IMG_BOOL bZero) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiOutOffset; + IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName = 0; + const IMG_CHAR *pszParamStreamFileName; + PDUMP_FILEOFFSET_T uiParamStreamFileOffset; + + /* required when !bZero */ +#define PMR_MAX_PDUMP_BUFSZ (1<<21) + IMG_CHAR aszParamStreamFilename[PDUMP_PARAM_MAX_FILE_NAME]; + IMG_UINT8 *pcBuffer = NULL; + size_t uiBufSz; + IMG_BOOL bValid; + IMG_DEVMEM_SIZE_T uiSizeRemain = uiSize; + + PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize); + + /* Check if pdump client is connected */ + if (!PDumpIsContCaptureOn()) + { + /* Dumping of memory in Pdump buffer will be rejected for no client connected case. + * So return early and save reading of data from PMR. */ + return PVRSRV_OK; + } + + /* Get the correct PDump stream file name */ + if (bZero) + { + PDumpCommentWithFlags(uiPDumpFlags, + "Zeroing allocation (" IMG_DEVMEM_SIZE_FMTSPEC " bytes)", + uiSize); + + /* get the zero page information. it is constant for this function */ + PDumpGetParameterZeroPageInfo(&uiParamStreamFileOffset, + &uiBufSz, + &pszParamStreamFileName); + } + else + { + + uiBufSz = 1 << PMR_GetLog2Contiguity(psPMR); + PVR_ASSERT((1 << PMR_GetLog2Contiguity(psPMR)) <= PMR_MAX_PDUMP_BUFSZ); + + pcBuffer = OSAllocMem(uiBufSz); + + PVR_LOG_RETURN_IF_NOMEM(pcBuffer, "OSAllocMem"); + + eError = PMRLockSysPhysAddresses(psPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + pszParamStreamFileName = aszParamStreamFilename; + } + + /* Loop over all touched symbolic addresses of the PMR and + * emit LDBs to load the contents. */ + while (uiCurrentOffset < (uiLogicalOffset + uiSize)) + { + /* Get the correct symbolic name for the current offset */ + eError = PMR_PDumpSymbolicAddr(psPMR, + uiCurrentOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiOutOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + PVR_ASSERT((uiNextSymName - uiCurrentOffset) <= uiBufSz); + + PMR_IsOffsetValid(psPMR, + 0, + 1, + uiCurrentOffset, + &bValid); + + /* Either just LDB the zeros or read from the PMR and store that + * in the pdump stream */ + if (bValid) + { + size_t uiNumBytes; + + if (bZero) + { + uiNumBytes = MIN(uiSizeRemain, uiNextSymName - uiCurrentOffset); + } + else + { + IMG_DEVMEM_OFFSET_T uiReadOffset; + uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ? + uiLogicalOffset + uiSize - uiCurrentOffset : + uiNextSymName - uiCurrentOffset); + + eError = PMR_ReadBytes(psPMR, + uiCurrentOffset, + pcBuffer, + uiReadOffset, + &uiNumBytes); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PDumpWriteParameterBlob(pcBuffer, + uiNumBytes, + uiPDumpFlags, + &aszParamStreamFilename[0], + sizeof(aszParamStreamFilename), + &uiParamStreamFileOffset); + if (eError == PVRSRV_ERROR_PDUMP_NOT_ALLOWED) + { + /* Write to parameter file prevented under the flags and + * current state of the driver so skip further writes. + */ + eError = PVRSRV_OK; + } + else if (eError != PVRSRV_OK) + { + PDUMP_ERROR(eError, "Failed to write PMR memory to parameter file"); + } + } + + /* Emit the LDB command to the current symbolic address */ + eError = PDumpPMRLDB(aszMemspaceName, + aszSymbolicName, + uiOutOffset, + uiNumBytes, + pszParamStreamFileName, + uiParamStreamFileOffset, + uiPDumpFlags); + uiSizeRemain = uiSizeRemain - uiNumBytes; + } + uiCurrentOffset = uiNextSymName; + } + + if (!bZero) + { + eError = PMRUnlockSysPhysAddresses(psPMR); + PVR_ASSERT(eError == PVRSRV_OK); + + OSFreeMem(pcBuffer); + } + + return PVRSRV_OK; +} + + + +PVRSRV_ERROR +PMRPDumpSaveToFile(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 uiArraySize, + const IMG_CHAR *pszFilename, + IMG_UINT32 uiFileOffset) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiOutOffset; + IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName = 0; + IMG_UINT32 uiCurrentFileOffset = uiFileOffset; + + PVR_UNREFERENCED_PARAMETER(uiArraySize); + + PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize); + + while (uiCurrentOffset < (uiLogicalOffset + uiSize)) + { + IMG_DEVMEM_OFFSET_T uiReadOffset; + + eError = PMR_PDumpSymbolicAddr(psPMR, + uiCurrentOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiOutOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + PVR_ASSERT(uiNextSymName <= psPMR->uiLogicalSize); + + uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ? + uiLogicalOffset + uiSize - uiCurrentOffset : + uiNextSymName - uiCurrentOffset); + + eError = PDumpPMRSAB(aszMemspaceName, + aszSymbolicName, + uiOutOffset, + uiReadOffset, + pszFilename, + uiCurrentFileOffset); + PVR_ASSERT(eError == PVRSRV_OK); + + uiCurrentFileOffset += uiNextSymName - uiCurrentOffset; + uiCurrentOffset = uiNextSymName; + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMRPDumpPol32(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee; + + /* Make sure to not cross a block boundary */ + PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value)) + <= uiPMRPageSize)); + + eError = PMR_PDumpSymbolicAddr(psPMR, + uiLogicalOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpOffset, + &uiNextSymName); + PVR_GOTO_IF_ERROR(eError, e0); + +#define _MEMPOLL_DELAY (1000) +#define _MEMPOLL_COUNT (2000000000 / _MEMPOLL_DELAY) + + eError = PDumpPMRPOL(aszMemspaceName, + aszSymbolicName, + uiPDumpOffset, + ui32Value, + ui32Mask, + eOperator, + _MEMPOLL_COUNT, + _MEMPOLL_DELAY, + uiPDumpFlags); + PVR_GOTO_IF_ERROR(eError, e0); + + return PVRSRV_OK; + + /* Error exit paths follow */ +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +PMRPDumpCheck32(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee; + + /* Make sure to not cross a block boundary */ + PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value)) + < uiPMRPageSize)); + + eError = PMR_PDumpSymbolicAddr(psPMR, + uiLogicalOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpOffset, + &uiNextSymName); + if (eError != PVRSRV_OK) + { + goto e0; + } + + eError = PDumpPMRPOL(aszMemspaceName, + aszSymbolicName, + uiPDumpOffset, + ui32Value, + ui32Mask, + eOperator, + 1, + 1, + uiPDumpFlags); + if (eError != PVRSRV_OK) + { + goto e0; + } + + return PVRSRV_OK; + + /* Error exit paths follow */ +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +PMRPDumpCBP(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiReadOffset, + IMG_DEVMEM_OFFSET_T uiWriteOffset, + IMG_DEVMEM_SIZE_T uiPacketSize, + IMG_DEVMEM_SIZE_T uiBufferSize) +{ + PVRSRV_ERROR eError; + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPDumpOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + + eError = PMR_PDumpSymbolicAddr(psPMR, + uiReadOffset, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiPDumpOffset, + &uiNextSymName); + PVR_GOTO_IF_ERROR(eError, e0); + + eError = PDumpPMRCBP(aszMemspaceName, + aszSymbolicName, + uiPDumpOffset, + uiWriteOffset, + uiPacketSize, + uiBufferSize); + PVR_GOTO_IF_ERROR(eError, e0); + + return PVRSRV_OK; + + /* Error exit paths follow */ +e0: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +void +PDumpPMRChangeSparsePMR(PMR *psPMR, + IMG_UINT32 uiBlockSize, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_BOOL bInitialise, + IMG_UINT32 ui32InitValue, + IMG_HANDLE *phPDumpAllocInfoOut) +{ + PVRSRV_ERROR eError; + IMG_HANDLE *phPDumpAllocInfo = (IMG_HANDLE*) psPMR->hPDumpAllocHandle; + + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + IMG_UINT32 i, uiIndex; + + /* Remove pages from the PMR */ + for (i = 0; i < ui32FreePageCount; i++) + { + uiIndex = pai32FreeIndices[i]; + + eError = PDumpFree(phPDumpAllocInfo[uiIndex]); + PVR_ASSERT(eError == PVRSRV_OK); + phPDumpAllocInfo[uiIndex] = NULL; + } + + /* Add new pages to the PMR */ + for (i = 0; i < ui32AllocPageCount; i++) + { + uiIndex = pai32AllocIndices[i]; + + PVR_ASSERT(phPDumpAllocInfo[uiIndex] == NULL); + + eError = PMR_PDumpSymbolicAddr(psPMR, + uiIndex * uiBlockSize, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PDumpMalloc(aszMemspaceName, + aszSymbolicName, + uiBlockSize, + uiBlockSize, + bInitialise, + ui32InitValue, + &phPDumpAllocInfo[uiIndex], + PDUMP_NONE); + PVR_ASSERT(eError == PVRSRV_OK); + } + + /* (IMG_HANDLE) <- (IMG_HANDLE*) */ + *phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo; +} + +void +PDumpPMRFreePMR(PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiBlockSize, + IMG_UINT32 uiLog2Contiguity, + IMG_HANDLE hPDumpAllocationInfoHandle) +{ + PVRSRV_ERROR eError; + IMG_UINT32 i; + + /* (IMG_HANDLE*) <- (IMG_HANDLE) */ + IMG_HANDLE *ahPDumpAllocHandleArray = (IMG_HANDLE*) hPDumpAllocationInfoHandle; + + for (i = 0; i < psPMR->uiNumPDumpBlocks; i++) + { + if (ahPDumpAllocHandleArray[i] != NULL) + { + eError = PDumpFree(ahPDumpAllocHandleArray[i]); + PVR_ASSERT(eError == PVRSRV_OK); + ahPDumpAllocHandleArray[i] = NULL; + } + } + + OSFreeMem(ahPDumpAllocHandleArray); +} + + +void +PDumpPMRMallocPMR(PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiBlockSize, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *puiMappingTable, + IMG_UINT32 uiLog2Contiguity, + IMG_BOOL bInitialise, + IMG_UINT32 ui32InitValue, + IMG_HANDLE *phPDumpAllocInfoOut, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_HANDLE *phPDumpAllocInfo; + + IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; + IMG_UINT32 uiNumPhysBlocks; + IMG_UINT32 uiNumVirtBlocks; + IMG_UINT32 i, uiIndex; + + if (PMR_IsSparse(psPMR)) + { + uiNumPhysBlocks = (ui32ChunkSize * ui32NumPhysChunks) >> uiLog2Contiguity; + /* Make sure we did not cut off anything */ + PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == (ui32ChunkSize * ui32NumPhysChunks)); + } + else + { + uiNumPhysBlocks = uiSize >> uiLog2Contiguity; + /* Make sure we did not cut off anything */ + PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == uiSize); + } + + uiNumVirtBlocks = uiSize >> uiLog2Contiguity; + PVR_ASSERT(uiNumVirtBlocks << uiLog2Contiguity == uiSize); + + psPMR->uiNumPDumpBlocks = uiNumVirtBlocks; + + phPDumpAllocInfo = (IMG_HANDLE*) OSAllocZMem(uiNumVirtBlocks * sizeof(IMG_HANDLE)); + + + for (i = 0; i < uiNumPhysBlocks; i++) + { + uiIndex = PMR_IsSparse(psPMR) ? puiMappingTable[i] : i; + + eError = PMR_PDumpSymbolicAddr(psPMR, + uiIndex * uiBlockSize, + sizeof(aszMemspaceName), + &aszMemspaceName[0], + sizeof(aszSymbolicName), + &aszSymbolicName[0], + &uiOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PDumpMalloc(aszMemspaceName, + aszSymbolicName, + uiBlockSize, + uiBlockSize, + bInitialise, + ui32InitValue, + &phPDumpAllocInfo[uiIndex], + ui32PDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + } + + /* (IMG_HANDLE) <- (IMG_HANDLE*) */ + *phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo; + +} +#endif /* PDUMP */ + + +void *PMRGetPrivateData(const PMR *psPMR, + const PMR_IMPL_FUNCTAB *psFuncTab) +{ + return (psFuncTab == psPMR->psFuncTab) ? psPMR->pvFlavourData : NULL; +} + +#define PMR_PM_WORD_SIZE 4 + +PVRSRV_ERROR +PMRWritePMPageList(/* Target PMR, offset, and length */ + PMR *psPageListPMR, + IMG_DEVMEM_OFFSET_T uiTableOffset, + IMG_DEVMEM_SIZE_T uiTableLength, + /* Referenced PMR, and "page" granularity */ + PMR *psReferencePMR, + IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize, + PMR_PAGELIST **ppsPageList) +{ + PVRSRV_ERROR eError; + IMG_DEVMEM_SIZE_T uiWordSize; + IMG_UINT32 uiNumPages; + IMG_UINT32 uiPageIndex; + PMR_FLAGS_T uiFlags = psPageListPMR->uiFlags; + PMR_PAGELIST *psPageList; +#if defined(PDUMP) + IMG_CHAR aszTableEntryMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszTableEntrySymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiTableEntryPDumpOffset; + IMG_CHAR aszPageMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; + IMG_CHAR aszPageSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiPagePDumpOffset; + IMG_DEVMEM_OFFSET_T uiNextSymName; +#endif +#if !defined(NO_HARDWARE) + IMG_UINT32 uiPageListPageSize = 1 << psPageListPMR->uiLog2ContiguityGuarantee; + IMG_UINT64 uiPageListPMRPage = 0; + IMG_UINT64 uiPrevPageListPMRPage = 0; + IMG_HANDLE hPrivData = NULL; + void *pvKernAddr = NULL; + IMG_UINT32 *pui32DataPtr = NULL; + IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_DEV_PHYADDR *pasDevAddrPtr; + IMG_BOOL *pbPageIsValid; +#endif + + uiWordSize = PMR_PM_WORD_SIZE; + + /* check we're being asked to write the same number of 4-byte units as there are pages */ + uiNumPages = (IMG_UINT32)(psReferencePMR->uiLogicalSize >> uiLog2PageSize); + + if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psReferencePMR->uiLogicalSize) + { + /* Strictly speaking, it's possible to provoke this error in two ways: + (i) if it's not a whole multiple of the page size; or + (ii) if there are more than 4 billion pages. + The latter is unlikely. :) but the check is required in order to justify the cast. + */ + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, return_error); + } + uiWordSize = (IMG_UINT32)uiTableLength / uiNumPages; + if (uiNumPages * uiWordSize != uiTableLength) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, return_error); + } + + /* Check we're not being asked to write off the end of the PMR */ + PVR_GOTO_IF_INVALID_PARAM(uiTableOffset + uiTableLength <= psPageListPMR->uiLogicalSize, eError, return_error); + + /* the PMR into which we are writing must not be user CPU mappable: */ + if (PVRSRV_CHECK_CPU_READABLE(uiFlags) || PVRSRV_CHECK_CPU_WRITEABLE(uiFlags)) + { + PVR_DPF((PVR_DBG_ERROR, + "Masked flags = 0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC, + (PMR_FLAGS_T)(uiFlags & (PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE)))); + PVR_DPF((PVR_DBG_ERROR, + "Page list PMR allows CPU mapping (0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC ")", + uiFlags)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS, return_error); + } + + if (_PMRIsSparse(psPageListPMR)) + { + PVR_LOG_GOTO_WITH_ERROR("psPageListPMR", eError, PVRSRV_ERROR_INVALID_PARAMS, return_error); + } + + if (_PMRIsSparse(psReferencePMR)) + { + PVR_LOG_GOTO_WITH_ERROR("psReferencePMR", eError, PVRSRV_ERROR_INVALID_PARAMS, return_error); + } + + psPageList = OSAllocMem(sizeof(PMR_PAGELIST)); + PVR_LOG_GOTO_IF_NOMEM(psPageList, eError, return_error); + + psPageList->psReferencePMR = psReferencePMR; + + /* Need to lock down the physical addresses of the reference PMR */ + /* N.B. This also checks that the requested "contiguity" is achievable */ + eError = PMRLockSysPhysAddresses(psReferencePMR); + PVR_GOTO_IF_ERROR(eError, free_page_list); + +#if !defined(NO_HARDWARE) + if (uiNumPages > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + pasDevAddrPtr = OSAllocMem(uiNumPages * sizeof(IMG_DEV_PHYADDR)); + PVR_LOG_GOTO_IF_NOMEM(pasDevAddrPtr, eError, unlock_phys_addrs); + + pbPageIsValid = OSAllocMem(uiNumPages * sizeof(IMG_BOOL)); + if (pbPageIsValid == NULL) + { + /* Clean-up before exit */ + OSFreeMem(pasDevAddrPtr); + + PVR_LOG_GOTO_WITH_ERROR("pbPageIsValid", eError, PVRSRV_ERROR_OUT_OF_MEMORY, free_devaddr_array); + } + } + else + { + pasDevAddrPtr = asDevPAddr; + pbPageIsValid = abValid; + } + + eError = PMR_DevPhysAddr(psReferencePMR, uiLog2PageSize, uiNumPages, 0, + pasDevAddrPtr, pbPageIsValid); + PVR_LOG_GOTO_IF_ERROR(eError, "PMR_DevPhysAddr", free_valid_array); +#endif + + for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++) + { + IMG_DEVMEM_OFFSET_T uiPMROffset = uiTableOffset + (uiWordSize * uiPageIndex); + +#if defined(PDUMP) + eError = PMR_PDumpSymbolicAddr(psPageListPMR, + uiPMROffset, + sizeof(aszTableEntryMemspaceName), + &aszTableEntryMemspaceName[0], + sizeof(aszTableEntrySymbolicName), + &aszTableEntrySymbolicName[0], + &uiTableEntryPDumpOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PMR_PDumpSymbolicAddr(psReferencePMR, + (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize, + sizeof(aszPageMemspaceName), + &aszPageMemspaceName[0], + sizeof(aszPageSymbolicName), + &aszPageSymbolicName[0], + &uiPagePDumpOffset, + &uiNextSymName); + PVR_ASSERT(eError == PVRSRV_OK); + + eError = PDumpWriteShiftedMaskedValue(/* destination */ + aszTableEntryMemspaceName, + aszTableEntrySymbolicName, + uiTableEntryPDumpOffset, + /* source */ + aszPageMemspaceName, + aszPageSymbolicName, + uiPagePDumpOffset, + /* shift right */ + uiLog2PageSize, + /* shift left */ + 0, + /* mask */ + 0xffffffff, + /* word size */ + uiWordSize, + /* flags */ + PDUMP_FLAGS_CONTINUOUS); + PVR_ASSERT(eError == PVRSRV_OK); +#else + PVR_UNREFERENCED_PARAMETER(uiPMROffset); +#endif + +#if !defined(NO_HARDWARE) + + /* + We check for sparse PMR's at function entry, but as we can, + check that every page is valid + */ + PVR_ASSERT(pbPageIsValid[uiPageIndex]); + PVR_ASSERT(pasDevAddrPtr[uiPageIndex].uiAddr != 0); + PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0); + + uiPageListPMRPage = uiPMROffset >> psReferencePMR->uiLog2ContiguityGuarantee; + + if ((pui32DataPtr == NULL) || (uiPageListPMRPage != uiPrevPageListPMRPage)) + { + size_t uiMappingOffset = uiPMROffset & (~(uiPageListPageSize - 1)); + size_t uiMappedSize; + + /* If we already had a page list mapped, we need to unmap it... */ + if (pui32DataPtr != NULL) + { + PMRReleaseKernelMappingData(psPageListPMR, hPrivData); + } + + eError = PMRAcquireKernelMappingData(psPageListPMR, + uiMappingOffset, + uiPageListPageSize, + &pvKernAddr, + &uiMappedSize, + &hPrivData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Error mapping page list PMR page (%" IMG_UINT64_FMTSPEC ") into kernel (%d)", + uiPageListPMRPage, eError)); + goto free_valid_array; + } + + uiPrevPageListPMRPage = uiPageListPMRPage; + PVR_ASSERT(uiMappedSize >= uiPageListPageSize); + PVR_ASSERT(pvKernAddr != NULL); + + pui32DataPtr = IMG_OFFSET_ADDR(pvKernAddr, (uiPMROffset & (uiPageListPageSize - 1))); + } + + PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0); + + /* Write the physical page index into the page list PMR */ + *pui32DataPtr++ = TRUNCATE_64BITS_TO_32BITS(pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize); + + /* Last page so unmap */ + if (uiPageIndex == (uiNumPages - 1)) + { + PMRReleaseKernelMappingData(psPageListPMR, hPrivData); + } +#endif + } + + /* if this memory is allocated as write-combine we must flush write + * buffers */ + if (PVRSRV_CHECK_CPU_WRITE_COMBINE(psPageListPMR->uiFlags)) + { + OSWriteMemoryBarrier(); + } + +#if !defined(NO_HARDWARE) + if (pasDevAddrPtr != asDevPAddr) + { + OSFreeMem(pbPageIsValid); + OSFreeMem(pasDevAddrPtr); + } +#endif + *ppsPageList = psPageList; + return PVRSRV_OK; + + /* Error exit paths follow */ +#if !defined(NO_HARDWARE) + +free_valid_array: + if (pbPageIsValid != abValid) + { + OSFreeMem(pbPageIsValid); + } + +free_devaddr_array: + if (pasDevAddrPtr != asDevPAddr) + { + OSFreeMem(pasDevAddrPtr); + } + +unlock_phys_addrs: + PMRUnlockSysPhysAddresses(psReferencePMR); +#endif + +free_page_list: + OSFreeMem(psPageList); + +return_error: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +PVRSRV_ERROR +PMRUnwritePMPageList(PMR_PAGELIST *psPageList) +{ + PVRSRV_ERROR eError; + + eError = PMRUnlockSysPhysAddresses(psPageList->psReferencePMR); + PVR_ASSERT(eError == PVRSRV_OK); + OSFreeMem(psPageList); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PMRZeroingPMR(PMR *psPMR, + IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize) +{ + IMG_UINT32 uiNumPages; + IMG_UINT32 uiPageIndex; + IMG_UINT32 ui32PageSize = 1 << uiLog2PageSize; + IMG_HANDLE hPrivData = NULL; + void *pvKernAddr = NULL; + PVRSRV_ERROR eError = PVRSRV_OK; + size_t uiMappedSize; + + PVR_ASSERT(psPMR); + + /* Calculate number of pages in this PMR */ + uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize); + + /* Verify the logical Size is a multiple or the physical page size */ + if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PMR is not a multiple of %u", + __func__, + ui32PageSize)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, MultiPage_Error); + } + + if (_PMRIsSparse(psPMR)) + { + PVR_LOG_GOTO_WITH_ERROR("psPMR", eError, PVRSRV_ERROR_INVALID_PARAMS, Sparse_Error); + } + + /* Scan through all pages of the PMR */ + for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++) + { + /* map the physical page (for a given PMR offset) into kernel space */ + eError = PMRAcquireKernelMappingData(psPMR, + (size_t)uiPageIndex << uiLog2PageSize, + ui32PageSize, + &pvKernAddr, + &uiMappedSize, + &hPrivData); + PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", AcquireKernelMapping_Error); + + /* ensure the mapped page size is the same as the physical page size */ + if (uiMappedSize != ui32PageSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Physical Page size = 0x%08x, Size of Mapping = 0x%016" IMG_UINT64_FMTSPECx, + __func__, + ui32PageSize, + (IMG_UINT64)uiMappedSize)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, MappingSize_Error); + } + + /* Use the conservative 'DeviceMemSet' here because we can't know + * if this PMR will be mapped cached. + */ + OSDeviceMemSet(pvKernAddr, 0, ui32PageSize); + + /* release mapping */ + PMRReleaseKernelMappingData(psPMR, hPrivData); + + } + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Zeroing PMR %p done (num pages %u, page size %u)", + __func__, + psPMR, + uiNumPages, + ui32PageSize)); + + return PVRSRV_OK; + + + /* Error handling */ + +MappingSize_Error: + PMRReleaseKernelMappingData(psPMR, hPrivData); + +AcquireKernelMapping_Error: +Sparse_Error: +MultiPage_Error: + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +PMRDumpPageList(PMR *psPMR, + IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize) +{ + IMG_DEV_PHYADDR sDevAddrPtr; + IMG_UINT32 uiNumPages; + IMG_UINT32 uiPageIndex; + IMG_BOOL bPageIsValid; + IMG_UINT32 ui32Col = 16; + IMG_UINT32 ui32SizePerCol = 11; + IMG_UINT32 ui32ByteCount = 0; + IMG_CHAR pszBuffer[16 /* ui32Col */ * 11 /* ui32SizePerCol */ + 1]; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Get number of pages */ + uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize); + + /* Verify the logical Size is a multiple or the physical page size */ + if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PMR is not a multiple of %" IMG_UINT64_FMTSPEC, + __func__, (IMG_UINT64) (1ULL << uiLog2PageSize))); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, MultiPage_Error); + } + + if (_PMRIsSparse(psPMR)) + { + PVR_LOG_GOTO_WITH_ERROR("psPMR", eError, PVRSRV_ERROR_INVALID_PARAMS, Sparse_Error); + } + + PVR_LOG((" PMR %p, Number of pages %u, Log2PageSize %d", psPMR, uiNumPages, uiLog2PageSize)); + + /* Print the address of the physical pages */ + for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++) + { + /* Get Device physical Address */ + eError = PMR_DevPhysAddr(psPMR, + uiLog2PageSize, + 1, + (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize, + &sDevAddrPtr, + &bPageIsValid); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PMR %p failed to get DevPhysAddr with error %u", + __func__, + psPMR, + eError)); + goto DevPhysAddr_Error; + } + + ui32ByteCount += OSSNPrintf(pszBuffer + ui32ByteCount, ui32SizePerCol + 1, "%08x ", (IMG_UINT32)(sDevAddrPtr.uiAddr >> uiLog2PageSize)); + PVR_ASSERT(ui32ByteCount < ui32Col * ui32SizePerCol); + + if (uiPageIndex % ui32Col == ui32Col-1) + { + PVR_LOG((" Phys Page: %s", pszBuffer)); + ui32ByteCount = 0; + } + } + if (ui32ByteCount > 0) + { + PVR_LOG((" Phys Page: %s", pszBuffer)); + } + + return PVRSRV_OK; + + /* Error handling */ +DevPhysAddr_Error: +Sparse_Error: +MultiPage_Error: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +PMRInit(void) +{ + PVRSRV_ERROR eError; + + /* Singleton PMR context already initialised */ + if (_gsSingletonPMRContext.bModuleInitialised) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR, out); + } + + eError = OSLockCreate(&_gsSingletonPMRContext.hLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", out); + + _gsSingletonPMRContext.uiNextSerialNum = 1; + + _gsSingletonPMRContext.uiNextKey = 0x8300f001 * (uintptr_t)&_gsSingletonPMRContext; + + _gsSingletonPMRContext.bModuleInitialised = IMG_TRUE; + + _gsSingletonPMRContext.uiNumLivePMRs = 0; + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) + eError = MMapStatsInit(); + PVR_LOG_GOTO_IF_ERROR(eError, "MMapStatsInit", out); +#endif + +out: + PVR_ASSERT(eError == PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +PMRDeInit(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + goto out; + } + + /* Singleton PMR context is not initialised */ + if (!_gsSingletonPMRContext.bModuleInitialised) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR, out); + } + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) + MMapStatsDeInit(); +#endif + + if (_gsSingletonPMRContext.uiNumLivePMRs != 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Error: %d live PMRs remain", + __func__, + _gsSingletonPMRContext.uiNumLivePMRs)); + PVR_DPF((PVR_DBG_ERROR, "%s: This is an unrecoverable error; a subsequent crash is inevitable", + __func__)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR, out); + } + + OSLockDestroy(_gsSingletonPMRContext.hLock); + + _gsSingletonPMRContext.bModuleInitialised = IMG_FALSE; + +out: + PVR_ASSERT(eError == PVRSRV_OK); + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/pmr.h b/drivers/gpu/drm/phytium/octopus/pmr.h new file mode 100644 index 000000000000..3fcfa2687de8 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pmr.h @@ -0,0 +1,1137 @@ +/*************************************************************************/ /*! +@File +@Title Physmem (PMR) abstraction +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This module is responsible for + the "PMR" abstraction. A PMR (Physical Memory Resource) + represents some unit of physical memory which is + allocated/freed/mapped/unmapped as an indivisible unit + (higher software levels provide an abstraction above that + to deal with dividing this down into smaller manageable units). + Importantly, this module knows nothing of virtual memory, or + of MMUs etc., with one excusable exception. We have the + concept of a "page size", which really means nothing in + physical memory, but represents a "contiguity quantum" such + that the higher level modules which map this memory are able + to verify that it matches the needs of the page size for the + virtual realm into which it is being mapped. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SRVSRV_PMR_H +#define SRVSRV_PMR_H + +/* include/ */ +#include "img_types.h" +#include "img_defs.h" +#include "pdumpdefs.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memallocflags.h" +#include "devicemem_typedefs.h" /* Required for export DEVMEM_EXPORTCOOKIE */ + +/* services/include */ +#include "pdump.h" +#include "physheap.h" + +/* services/server/include/ */ +#include "pmr_impl.h" +#include "opaque_types.h" + +#define PMR_MAX_TRANSLATION_STACK_ALLOC (32) + +/* Maximum number of pages a PMR can have is 1G of memory */ +#define PMR_MAX_SUPPORTED_PAGE_COUNT (262144) + +typedef IMG_UINT64 PMR_BASE_T; +typedef IMG_UINT64 PMR_SIZE_T; +#define PMR_SIZE_FMTSPEC "0x%010"IMG_UINT64_FMTSPECX +#define PMR_VALUE32_FMTSPEC "0x%08X" +#define PMR_VALUE64_FMTSPEC "0x%016"IMG_UINT64_FMTSPECX +typedef IMG_UINT32 PMR_LOG2ALIGN_T; +typedef IMG_UINT64 PMR_PASSWORD_T; + +struct _PMR_MAPPING_TABLE_ +{ + PMR_SIZE_T uiChunkSize; /*!< Size of a "chunk" */ + IMG_UINT32 ui32NumPhysChunks; /*!< Number of physical chunks that are valid */ + IMG_UINT32 ui32NumVirtChunks; /*!< Number of virtual chunks in the mapping */ + /* Must be last */ + IMG_UINT32 aui32Translation[1]; /*!< Translation mapping for "logical" to physical */ +}; + +#define TRANSLATION_INVALID 0xFFFFFFFFUL + +typedef struct _PMR_EXPORT_ PMR_EXPORT; + +typedef struct _PMR_PAGELIST_ PMR_PAGELIST; + +/* + * PMRCreatePMR + * + * Not to be called directly, only via implementations of PMR + * factories, e.g. in physmem_osmem.c, deviceclass.c, etc. + * + * Creates a PMR object, with callbacks and private data as per the + * FuncTab/PrivData args. + * + * Note that at creation time the PMR must set in stone the "logical + * size" and the "contiguity guarantee" + * + * Flags are also set at this time. (T.B.D. flags also immutable for + * the life of the PMR?) + * + * Logical size is the amount of Virtual space this allocation would + * take up when mapped. Note that this does not have to be the same + * as the actual physical size of the memory. For example, consider + * the sparsely allocated non-power-of-2 texture case. In this + * instance, the "logical size" would be the virtual size of the + * rounded-up power-of-2 texture. That some pages of physical memory + * may not exist does not affect the logical size calculation. + * + * The PMR must also supply the "contiguity guarantee" which is the + * finest granularity of alignment and size of physical pages that the + * PMR will provide after LockSysPhysAddresses is called. Note that + * the calling code may choose to call PMRSysPhysAddr with a finer + * granularity than this, for example if it were to map into a device + * MMU with a smaller page size, and it's also OK for the PMR to + * supply physical memory in larger chunks than this. But + * importantly, never the other way around. + * + * More precisely, the following inequality must be maintained + * whenever mappings and/or physical addresses exist: + * + * (device MMU page size) <= 2**(uiLog2ContiguityGuarantee) <= (actual contiguity of physical memory) + * + * The function table will contain the following callbacks which may + * be overridden by the PMR implementation: + * + * pfnLockPhysAddresses + * + * Called when someone locks requests that Physical pages are to + * be locked down via the PMRLockSysPhysAddresses() API. Note + * that if physical pages are prefaulted at PMR creation time and + * therefore static, it would not be necessary to override this + * function, in which case NULL may be supplied. + * + * pfnUnlockPhysAddresses + * + * The reverse of pfnLockPhysAddresses. Note that this should be + * NULL if and only if pfnLockPhysAddresses is NULL + * + * pfnSysPhysAddr + * + * This function is mandatory. This is the one which returns the + * system physical address for a given offset into this PMR. The + * "lock" function will have been called, if overridden, before + * this function, thus the implementation should not increase any + * refcount when answering this call. Refcounting, if necessary, + * should be done in the lock/unlock calls. Refcounting would + * not be necessary in the prefaulted/static scenario, as the + * pmr.c abstraction will handle the refcounting for the whole + * PMR. + * + * pfnFinalize + * + * Called when the PMR's refcount reaches zero and it gets + * destroyed. This allows the implementation to free up any + * resource acquired during creation time. + * + */ +PVRSRV_ERROR +PMRCreatePMR(PHYS_HEAP *psPhysHeap, + PMR_SIZE_T uiLogicalSize, + PMR_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee, + PMR_FLAGS_T uiFlags, + const IMG_CHAR *pszAnnotation, + const PMR_IMPL_FUNCTAB *psFuncTab, + PMR_IMPL_PRIVDATA pvPrivData, + PMR_IMPL_TYPE eType, + PMR **ppsPMRPtr, + IMG_UINT32 ui32PDumpFlags); + +/* + * PMRLockSysPhysAddresses() + * + * Calls the relevant callback to lock down the system physical addresses of + * the memory that makes up the whole PMR. + * + * Before this call, it is not valid to use any of the information + * getting APIs: PMR_Flags(), PMR_SysPhysAddr(), + * [ see note below about lock/unlock semantics ] + * + * The caller of this function does not have to care about how the PMR + * is implemented. He only has to know that he is allowed access to + * the physical addresses _after_ calling this function and _until_ + * calling PMRUnlockSysPhysAddresses(). + * + * + * Notes to callback implementers (authors of PMR Factories): + * + * Some PMR implementations will be such that the physical memory exists for + * the lifetime of the PMR, with a static address, (and normally flags and + * symbolic address are static too) and so it is legal for a PMR + * implementation to not provide an implementation for the lock callback. + * + * Some PMR implementation may wish to page memory in from secondary storage + * on demand. The lock/unlock callbacks _may_ be the place to do this. + * (More likely, there would be a separate API for doing this, but this API + * provides a useful place to assert that it has been done) + */ + +PVRSRV_ERROR +PMRLockSysPhysAddresses(PMR *psPMR); + +PVRSRV_ERROR +PMRLockSysPhysAddressesNested(PMR *psPMR, + IMG_UINT32 ui32NestingLevel); + +/* + * PMRUnlockSysPhysAddresses() + * + * the reverse of PMRLockSysPhysAddresses() + */ +PVRSRV_ERROR +PMRUnlockSysPhysAddresses(PMR *psPMR); + +PVRSRV_ERROR +PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel); + + +/*************************************************************************/ /*! +@Function PMRUnpinPMR +@Description This is the counterpart to PMRPinPMR(). It is meant to be + called before repinning an allocation. + + For a detailed description see client API documentation. + +@Input psPMR The physical memory to unpin. + +@Input bDevMapped A flag that indicates if this PMR has been + mapped to device virtual space. + Needed to check if this PMR is allowed to be + unpinned or not. + +@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is + registered to be reclaimed. Error otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped); + +/*************************************************************************/ /*! +@Function PMRPinPMR +@Description This is the counterpart to PMRUnpinPMR(). It is meant to be + called after unpinning an allocation. + + For a detailed description see client API documentation. + +@Input psPMR The physical memory to pin. + +@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content + was successfully restored. + + PVRSRV_ERROR_PMR_NEW_MEMORY when the content + could not be restored and new physical memory + was allocated. + + A different error otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR PMRPinPMR(PMR *psPMR); + +/* + * PhysmemPMRExport() + * + * Given a PMR, creates a PMR "Export", which is a handle that + * provides sufficient data to be able to "import" this PMR elsewhere. + * The PMR Export is an object in its own right, whose existence + * implies a reference on the PMR, thus the PMR cannot be destroyed + * while the PMR Export exists. The intention is that the PMR Export + * will be wrapped in the devicemem layer by a cross process handle, + * and some IPC by which to communicate the handle value and password + * to other processes. The receiving process is able to unwrap this + * to gain access to the same PMR Export in this layer, and, via + * PhysmemPMRImport(), obtain a reference to the original PMR. + * + * The caller receives, along with the PMR Export object, information + * about the size and contiguity guarantee for the PMR, and also the + * PMRs secret password, in order to authenticate the subsequent + * import. + * + * N.B. If you call PMRExportPMR() (and it succeeds), you are + * promising to later call PMRUnexportPMR() + */ +PVRSRV_ERROR +PMRExportPMR(PMR *psPMR, + PMR_EXPORT **ppsPMRExport, + PMR_SIZE_T *puiSize, + PMR_LOG2ALIGN_T *puiLog2Contig, + PMR_PASSWORD_T *puiPassword); + +/*! +******************************************************************************* + + @Function PMRMakeLocalImportHandle + + @Description + + Transform a general handle type into one that we are able to import. + Takes a PMR reference. + + @Input psPMR The input PMR. + @Output ppsPMR The output PMR that is going to be transformed to the + correct handle type. + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR +PMRMakeLocalImportHandle(PMR *psPMR, + PMR **ppsPMR); + +/*! +******************************************************************************* + + @Function PMRUnmakeLocalImportHandle + + @Description + + Take a PMR, destroy the handle and release a reference. + Counterpart to PMRMakeServerExportClientExport(). + + @Input psPMR PMR to destroy. + Created by PMRMakeLocalImportHandle(). + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR +PMRUnmakeLocalImportHandle(PMR *psPMR); + +/* + * PMRUnexporPMRt() + * + * The reverse of PMRExportPMR(). This causes the PMR to no longer be + * exported. If the PMR has already been imported, the imported PMR + * reference will still be valid, but no further imports will be possible. + */ +PVRSRV_ERROR +PMRUnexportPMR(PMR_EXPORT *psPMRExport); + +/* + * PMRImportPMR() + * + * Takes a PMR Export object, as obtained by PMRExportPMR(), and + * obtains a reference to the original PMR. + * + * The password must match, and is assumed to have been (by whatever + * means, IPC etc.) preserved intact from the former call to + * PMRExportPMR() + * + * The size and contiguity arguments are entirely irrelevant for the + * import, however they are verified in order to trap bugs. + * + * N.B. If you call PhysmemPMRImport() (and it succeeds), you are + * promising to later call PhysmemPMRUnimport() + */ +PVRSRV_ERROR +PMRImportPMR(PMR_EXPORT *psPMRExport, + PMR_PASSWORD_T uiPassword, + PMR_SIZE_T uiSize, + PMR_LOG2ALIGN_T uiLog2Contig, + PMR **ppsPMR); + +/* Function that alters the mutability property + * of the PMR + * Setting it to TRUE makes sure the PMR memory layout + * can't be changed through future calls */ +void +PMR_SetLayoutFixed(PMR *psPMR, IMG_BOOL bFlag); + +IMG_BOOL PMR_IsMemLayoutFixed(PMR *psPMR); + +/* + * PMRUnimportPMR() + * + * releases the reference on the PMR as obtained by PMRImportPMR() + */ +PVRSRV_ERROR +PMRUnimportPMR(PMR *psPMR); + +PVRSRV_ERROR +PMRLocalImportPMR(PMR *psPMR, + PMR **ppsPMR, + IMG_DEVMEM_SIZE_T *puiSize, + IMG_DEVMEM_ALIGN_T *puiAlign); + +/* + * Equivalent mapping functions when in kernel mode. + */ +PVRSRV_ERROR +PMRAcquireKernelMappingData(PMR *psPMR, + size_t uiLogicalOffset, + size_t uiSize, + void **ppvKernelAddressOut, + size_t *puiLengthOut, + IMG_HANDLE *phPrivOut); + +PVRSRV_ERROR +PMRAcquireSparseKernelMappingData(PMR *psPMR, + size_t uiLogicalOffset, + size_t uiSize, + void **ppvKernelAddressOut, + size_t *puiLengthOut, + IMG_HANDLE *phPrivOut); + +PVRSRV_ERROR +PMRReleaseKernelMappingData(PMR *psPMR, + IMG_HANDLE hPriv); + +#if defined(INTEGRITY_OS) +PVRSRV_ERROR +PMRMapMemoryObject(PMR *psPMR, + IMG_HANDLE *phMemObj, + void **pvClientAddr, + IMG_HANDLE *phPrivOut); +PVRSRV_ERROR +PMRUnmapMemoryObject(PMR *psPMR, + IMG_HANDLE hPriv); +#endif + +/* + * PMR_ReadBytes() + * + * calls into the PMR implementation to read up to uiBufSz bytes, + * returning the actual number read in *puiNumBytes + * + * this will read up to the end of the PMR, or the next symbolic name + * boundary, or until the requested number of bytes is read, whichever + * comes first + * + * In the case of sparse PMR's the caller doesn't know what offsets are + * valid and which ones aren't so we will just write 0 to invalid offsets + */ +PVRSRV_ERROR +PMR_ReadBytes(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes); + +/* + * PMR_WriteBytes() + * + * calls into the PMR implementation to write up to uiBufSz bytes, + * returning the actual number read in *puiNumBytes + * + * this will write up to the end of the PMR, or the next symbolic name + * boundary, or until the requested number of bytes is written, whichever + * comes first + * + * In the case of sparse PMR's the caller doesn't know what offsets are + * valid and which ones aren't so we will just ignore data at invalid offsets + */ +PVRSRV_ERROR +PMR_WriteBytes(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes); + +/*************************************************************************/ /*! +@Function PMRMMapPMR +@Description Performs the necessary steps to map the PMR into a user process + address space. The caller does not need to call + PMRLockSysPhysAddresses before calling this function. + +@Input psPMR PMR to map. + +@Input pOSMMapData OS specific data needed to create a mapping. + +@Return PVRSRV_ERROR: PVRSRV_OK on success or an error otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR +PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData); + +/* + * PMRRefPMR() + * + * Take a reference on the passed in PMR + */ +void +PMRRefPMR(PMR *psPMR); + +/* + * PMRUnrefPMR() + * + * This undoes a call to any of the PhysmemNew* family of APIs + * (i.e. any PMR factory "constructor") + * + * This relinquishes a reference to the PMR, and, where the refcount + * reaches 0, causes the PMR to be destroyed (calling the finalizer + * callback on the PMR, if there is one) + */ +PVRSRV_ERROR +PMRUnrefPMR(PMR *psPMR); + +/* + * PMRUnrefUnlockPMR() + * + * Same as above but also unlocks the PMR. + */ +PVRSRV_ERROR +PMRUnrefUnlockPMR(PMR *psPMR); + +PPVRSRV_DEVICE_NODE +PMR_DeviceNode(const PMR *psPMR); + +/* + * PMRIsPMRLive() + * + * This function returns true if the PMR is in use and false otherwise. + * This function is not thread safe and hence the caller needs to ensure the + * thread safety by explicitly taking PMR or through other means. + */ +IMG_BOOL PMRIsPMRLive(PMR *psPMR); + +/* + * PMR_Flags() + * + * Flags are static and guaranteed for the life of the PMR. Thus this + * function is idempotent and acquire/release semantics is not required. + * + * Returns the flags as specified on the PMR. The flags are to be + * interpreted as mapping permissions + */ +PMR_FLAGS_T +PMR_Flags(const PMR *psPMR); + +IMG_BOOL +PMR_IsSparse(const PMR *psPMR); + +IMG_BOOL +PMR_IsUnpinned(const PMR *psPMR); + +PVRSRV_ERROR +PMR_LogicalSize(const PMR *psPMR, + IMG_DEVMEM_SIZE_T *puiLogicalSize); + +PVRSRV_ERROR +PMR_PhysicalSize(const PMR *psPMR, + IMG_DEVMEM_SIZE_T *puiPhysicalSize); + +PHYS_HEAP * +PMR_PhysHeap(const PMR *psPMR); + +PMR_MAPPING_TABLE * +PMR_GetMappingTable(const PMR *psPMR); + +IMG_UINT32 +PMR_GetLog2Contiguity(const PMR *psPMR); + +const IMG_CHAR * +PMR_GetAnnotation(const PMR *psPMR); + +/* + * PMR_IsOffsetValid() + * + * Returns if an address offset inside a PMR has a valid + * physical backing. + */ +PVRSRV_ERROR +PMR_IsOffsetValid(const PMR *psPMR, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_BOOL *pbValid); + +PMR_IMPL_TYPE +PMR_GetType(const PMR *psPMR); + +IMG_INT32 +PMR_GetRefCount(const PMR *psPMR); + +/* + * PMR_DevPhysAddr() + * + * A note regarding Lock/Unlock semantics + * ====================================== + * + * PMR_DevPhysAddr may only be called after PMRLockSysPhysAddresses() + * has been called. The data returned may be used only until + * PMRUnlockSysPhysAddresses() is called after which time the licence + * to use the data is revoked and the information may be invalid. + * + * Given an offset, this function returns the device physical address of the + * corresponding page in the PMR. It may be called multiple times + * until the address of all relevant pages has been determined. + * + * If caller only wants one physical address it is sufficient to pass in: + * ui32Log2PageSize==0 and ui32NumOfPages==1 + */ +PVRSRV_ERROR +PMR_DevPhysAddr(const PMR *psPMR, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEV_PHYADDR *psDevAddr, + IMG_BOOL *pbValid); + +/* + * PMR_CpuPhysAddr() + * + * See note above about Lock/Unlock semantics. + * + * Given an offset, this function returns the CPU physical address of the + * corresponding page in the PMR. It may be called multiple times + * until the address of all relevant pages has been determined. + * + */ +PVRSRV_ERROR +PMR_CpuPhysAddr(const PMR *psPMR, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_CPU_PHYADDR *psCpuAddrPtr, + IMG_BOOL *pbValid); + +PVRSRV_ERROR +PMRGetUID(PMR *psPMR, + IMG_UINT64 *pui64UID); +/* + * PMR_ChangeSparseMem() + * + * See note above about Lock/Unlock semantics. + * + * This function alters the memory map of the given PMR in device space by + * adding/deleting the pages as requested. + * + */ +PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 uiSparseFlags); + +/* + * PMR_ChangeSparseMemCPUMap() + * + * See note above about Lock/Unlock semantics. + * + * This function alters the memory map of the given PMR in CPU space by + * adding/deleting the pages as requested. + */ +PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR, + IMG_UINT64 sCpuVAddrBase, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices); + +#if defined(PDUMP) + +void +PDumpPMRMallocPMR(PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiBlockSize, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *puiMappingTable, + IMG_UINT32 uiLog2Contiguity, + IMG_BOOL bInitialise, + IMG_UINT32 ui32InitValue, + IMG_HANDLE *phPDumpAllocInfoPtr, + IMG_UINT32 ui32PDumpFlags); + +void +PDumpPMRFreePMR(PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiBlockSize, + IMG_UINT32 uiLog2Contiguity, + IMG_HANDLE hPDumpAllocationInfoHandle); + +void +PDumpPMRChangeSparsePMR(PMR *psPMR, + IMG_UINT32 uiBlockSize, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_BOOL bInitialise, + IMG_UINT32 ui32InitValue, + IMG_HANDLE *phPDumpAllocInfoOut); + +/* + * PMR_PDumpSymbolicAddr() + * + * Given an offset, returns the pdump memspace name and symbolic + * address of the corresponding page in the PMR. + * + * Note that PDump memspace names and symbolic addresses are static + * and valid for the lifetime of the PMR, therefore we don't require + * acquire/release semantics here. + * + * Note that it is expected that the pdump "mapping" code will call + * this function multiple times as each page is mapped in turn + * + * Note that NextSymName is the offset from the base of the PMR to the + * next pdump symbolic address (or the end of the PMR if the PMR only + * had one PDUMPMALLOC + */ +PVRSRV_ERROR +PMR_PDumpSymbolicAddr(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32NamespaceNameLen, + IMG_CHAR *pszNamespaceName, + IMG_UINT32 ui32SymbolicAddrLen, + IMG_CHAR *pszSymbolicAddr, + IMG_DEVMEM_OFFSET_T *puiNewOffset, + IMG_DEVMEM_OFFSET_T *puiNextSymName + ); + +/* + * PMRPDumpLoadMemValue32() + * + * writes the current contents of a dword in PMR memory to the pdump + * script stream. Useful for patching a buffer by simply editing the + * script output file in ASCII plain text. + * + */ +PVRSRV_ERROR +PMRPDumpLoadMemValue32(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + PDUMP_FLAGS_T uiPDumpFlags); + +/* + * PMRPDumpCopyMem32 + * + * Adds in the pdump script stream a copy of a dword in one PMR memory + * location to another PMR memory location. + * + */ +PVRSRV_ERROR +PMRPDumpCopyMem32(PMR *psDstPMR, + IMG_DEVMEM_OFFSET_T uiDstLogicalOffset, + PMR *psSrcPMR, + IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset, + const IMG_CHAR *pszTmpVar, + PDUMP_FLAGS_T uiPDumpFlags); + +/* + * PMRPDumpLoadMemValue64() + * + * writes the current contents of a dword in PMR memory to the pdump + * script stream. Useful for patching a buffer by simply editing the + * script output file in ASCII plain text. + * + */ +PVRSRV_ERROR +PMRPDumpLoadMemValue64(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT64 ui64Value, + PDUMP_FLAGS_T uiPDumpFlags); + +/* + * PMRPDumpCopyMem64 + * + * Adds in the pdump script stream a copy of a quadword in one PMR memory + * location to another PMR memory location. + */ +PVRSRV_ERROR +PMRPDumpCopyMem64(PMR *psDstPMR, + IMG_DEVMEM_OFFSET_T uiDstLogicalOffset, + PMR *psSrcPMR, + IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset, + const IMG_CHAR *pszTmpVar, + PDUMP_FLAGS_T uiPDumpFlags); + +/* + * PMRPDumpLoadMem() + * + * Writes the current contents of the PMR memory to the pdump PRM stream, + * and emits some PDump code to the script stream to LDB said bytes from + * said file. If bZero is IMG_TRUE then the PDump zero page is used as the + * source for the LDB. + */ +PVRSRV_ERROR +PMRPDumpLoadMem(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEVMEM_SIZE_T uiSize, + PDUMP_FLAGS_T uiPDumpFlags, + IMG_BOOL bZero); + +/* + * PMRPDumpSaveToFile() + * + * Emits some PDump that does an SAB (save bytes) using the PDump symbolic + * address of the PMR. Note that this is generally not the preferred way to + * dump the buffer contents. There is an equivalent function in + * devicemem_server.h which also emits SAB but using the virtual address, + * which is the "right" way to dump the buffer contents to a file. + * This function exists just to aid testing by providing a means to dump + * the PMR directly by symbolic address also. + */ +PVRSRV_ERROR +PMRPDumpSaveToFile(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 uiArraySize, + const IMG_CHAR *pszFilename, + IMG_UINT32 uiFileOffset); +#else /* PDUMP */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpPMRMallocPMR) +#endif +static INLINE void +PDumpPMRMallocPMR(PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiBlockSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *puiMappingTable, + IMG_UINT32 uiLog2Contiguity, + IMG_BOOL bInitialise, + IMG_UINT32 ui32InitValue, + IMG_HANDLE *phPDumpAllocInfoPtr, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(uiBlockSize); + PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); + PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks); + PVR_UNREFERENCED_PARAMETER(puiMappingTable); + PVR_UNREFERENCED_PARAMETER(uiLog2Contiguity); + PVR_UNREFERENCED_PARAMETER(bInitialise); + PVR_UNREFERENCED_PARAMETER(ui32InitValue); + PVR_UNREFERENCED_PARAMETER(phPDumpAllocInfoPtr); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpPMRFreePMR) +#endif +static INLINE void +PDumpPMRFreePMR(PMR *psPMR, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiBlockSize, + IMG_UINT32 uiLog2Contiguity, + IMG_HANDLE hPDumpAllocationInfoHandle) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(uiBlockSize); + PVR_UNREFERENCED_PARAMETER(uiLog2Contiguity); + PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PDumpPMRChangeSparsePMR) +#endif +static INLINE void +PDumpPMRChangeSparsePMR(PMR *psPMR, + IMG_UINT32 uiBlockSize, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_BOOL bInitialise, + IMG_UINT32 ui32InitValue, + IMG_HANDLE *phPDumpAllocInfoOut) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiBlockSize); + PVR_UNREFERENCED_PARAMETER(ui32AllocPageCount); + PVR_UNREFERENCED_PARAMETER(pai32AllocIndices); + PVR_UNREFERENCED_PARAMETER(ui32FreePageCount); + PVR_UNREFERENCED_PARAMETER(pai32FreeIndices); + PVR_UNREFERENCED_PARAMETER(bInitialise); + PVR_UNREFERENCED_PARAMETER(ui32InitValue); + PVR_UNREFERENCED_PARAMETER(phPDumpAllocInfoOut); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PMR_PDumpSymbolicAddr) +#endif +static INLINE PVRSRV_ERROR +PMR_PDumpSymbolicAddr(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32NamespaceNameLen, + IMG_CHAR *pszNamespaceName, + IMG_UINT32 ui32SymbolicAddrLen, + IMG_CHAR *pszSymbolicAddr, + IMG_DEVMEM_OFFSET_T *puiNewOffset, + IMG_DEVMEM_OFFSET_T *puiNextSymName) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); + PVR_UNREFERENCED_PARAMETER(ui32NamespaceNameLen); + PVR_UNREFERENCED_PARAMETER(pszNamespaceName); + PVR_UNREFERENCED_PARAMETER(ui32SymbolicAddrLen); + PVR_UNREFERENCED_PARAMETER(pszSymbolicAddr); + PVR_UNREFERENCED_PARAMETER(puiNewOffset); + PVR_UNREFERENCED_PARAMETER(puiNextSymName); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PMRPDumpLoadMemValue32) +#endif +static INLINE PVRSRV_ERROR +PMRPDumpLoadMemValue32(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PMRPDumpLoadMemValue64) +#endif +static INLINE PVRSRV_ERROR +PMRPDumpLoadMemValue64(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT64 ui64Value, + PDUMP_FLAGS_T uiPDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); + PVR_UNREFERENCED_PARAMETER(ui64Value); + PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PMRPDumpLoadMem) +#endif +static INLINE PVRSRV_ERROR +PMRPDumpLoadMem(PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEVMEM_SIZE_T uiSize, + PDUMP_FLAGS_T uiPDumpFlags, + IMG_BOOL bZero) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); + PVR_UNREFERENCED_PARAMETER(bZero); + return PVRSRV_OK; +} + + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PMRPDumpSaveToFile) +#endif +static INLINE PVRSRV_ERROR +PMRPDumpSaveToFile(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT32 uiArraySize, + const IMG_CHAR *pszFilename, + IMG_UINT32 uiFileOffset) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(uiArraySize); + PVR_UNREFERENCED_PARAMETER(pszFilename); + PVR_UNREFERENCED_PARAMETER(uiFileOffset); + return PVRSRV_OK; +} + +#endif /* PDUMP */ + +/* This function returns the private data that a pmr subtype embedded in + * here. We use the function table pointer as "authorisation" that this + * function is being called by the pmr subtype implementation. We can + * assume (assert) that. It would be a bug in the implementation of the + * pmr subtype if this assertion ever fails. + */ +void * +PMRGetPrivateData(const PMR *psPMR, + const PMR_IMPL_FUNCTAB *psFuncTab); + +PVRSRV_ERROR +PMRZeroingPMR(PMR *psPMR, + IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize); + +PVRSRV_ERROR +PMRDumpPageList(PMR *psReferencePMR, + IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize); + +PVRSRV_ERROR +PMRWritePMPageList(/* Target PMR, offset, and length */ + PMR *psPageListPMR, + IMG_DEVMEM_OFFSET_T uiTableOffset, + IMG_DEVMEM_SIZE_T uiTableLength, + /* Referenced PMR, and "page" granularity */ + PMR *psReferencePMR, + IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize, + PMR_PAGELIST **ppsPageList); + +/* Doesn't actually erase the page list - just releases + * the appropriate refcounts + */ +PVRSRV_ERROR // should be void, surely +PMRUnwritePMPageList(PMR_PAGELIST *psPageList); + +#if defined(PDUMP) +PVRSRV_ERROR +PMRPDumpPol32(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiFlags); + +PVRSRV_ERROR +PMRPDumpCheck32(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiPDumpFlags); + +PVRSRV_ERROR +PMRPDumpCBP(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiReadOffset, + IMG_DEVMEM_OFFSET_T uiWriteOffset, + IMG_DEVMEM_SIZE_T uiPacketSize, + IMG_DEVMEM_SIZE_T uiBufferSize); +#else + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PMRPDumpPol32) +#endif +static INLINE PVRSRV_ERROR +PMRPDumpPol32(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiFlags) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + PVR_UNREFERENCED_PARAMETER(eOperator); + PVR_UNREFERENCED_PARAMETER(uiFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PMRPDumpCheck32) +#endif +static INLINE PVRSRV_ERROR +PMRPDumpCheck32(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiLogicalOffset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiFlags) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + PVR_UNREFERENCED_PARAMETER(eOperator); + PVR_UNREFERENCED_PARAMETER(uiFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PMRPDumpCBP) +#endif +static INLINE PVRSRV_ERROR +PMRPDumpCBP(const PMR *psPMR, + IMG_DEVMEM_OFFSET_T uiReadOffset, + IMG_DEVMEM_OFFSET_T uiWriteOffset, + IMG_DEVMEM_SIZE_T uiPacketSize, + IMG_DEVMEM_SIZE_T uiBufferSize) +{ + PVR_UNREFERENCED_PARAMETER(psPMR); + PVR_UNREFERENCED_PARAMETER(uiReadOffset); + PVR_UNREFERENCED_PARAMETER(uiWriteOffset); + PVR_UNREFERENCED_PARAMETER(uiPacketSize); + PVR_UNREFERENCED_PARAMETER(uiBufferSize); + return PVRSRV_OK; +} +#endif + +PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR); + +/* + * PMRInit() + * + * To be called once and only once to initialise the internal data in + * the PMR module (mutexes and such) + * + * Not for general use. Only PVRSRVInit(); should be calling this. + */ +PVRSRV_ERROR +PMRInit(void); + +/* + * PMRDeInit() + * + * To be called once and only once to deinitialise the internal data in + * the PMR module (mutexes and such) and for debug checks + * + * Not for general use. Only PVRSRVDeInit(); should be calling this. + */ +PVRSRV_ERROR +PMRDeInit(void); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +PVRSRV_ERROR +PMRStoreRIHandle(PMR *psPMR, void *hRIHandle); +#endif + +#endif /* #ifdef SRVSRV_PMR_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pmr_impl.h b/drivers/gpu/drm/phytium/octopus/pmr_impl.h new file mode 100644 index 000000000000..6adb9a234a59 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pmr_impl.h @@ -0,0 +1,553 @@ +/**************************************************************************/ /*! +@File +@Title Implementation Callbacks for Physmem (PMR) abstraction +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Part of the memory management. This file is for definitions + that are private to the world of PMRs, but that need to be + shared between pmr.c itself and the modules that implement the + callbacks for the PMR. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef SRVSRV_PMR_IMPL_H +#define SRVSRV_PMR_IMPL_H + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" + +/*! Physical Memory Resource type. + */ +typedef struct _PMR_ PMR; + +/*! Per-flavour callbacks need to be shared with generic implementation + * (pmr.c). + */ +typedef void *PMR_IMPL_PRIVDATA; + +/*! Type for holding flags passed to the PMR factory. + */ +typedef PVRSRV_MEMALLOCFLAGS_T PMR_FLAGS_T; + +/*! Mapping table for the allocation. + * + * PMR's can be sparse in which case not all the logical addresses in it are + * valid. The mapping table translates logical offsets into physical offsets. + * + * This table is always passed to the PMR factory regardless if the memory is + * sparse or not. In case of non-sparse memory all virtual offsets are mapped + * to physical offsets. + */ +typedef struct _PMR_MAPPING_TABLE_ PMR_MAPPING_TABLE; + +/*! Private data passed to the ::PFN_MMAP_FN function. + */ +typedef void *PMR_MMAP_DATA; + +/*! PMR factory type. + */ +typedef enum _PMR_IMPL_TYPE_ +{ + PMR_TYPE_NONE = 0, + PMR_TYPE_OSMEM, + PMR_TYPE_LMA, + PMR_TYPE_DMABUF, + PMR_TYPE_EXTMEM, + PMR_TYPE_DC, + PMR_TYPE_TDFWMEM, + PMR_TYPE_TDSECBUF +} PMR_IMPL_TYPE; + +/*************************************************************************/ /*! +@Brief Callback function type PFN_LOCK_PHYS_ADDRESSES_FN + +@Description Called to lock down the physical addresses for all pages + allocated for a PMR. + The default implementation is to simply increment a + lock-count for debugging purposes. + If overridden, the PFN_LOCK_PHYS_ADDRESSES_FN function will + be called when someone first requires a physical address, + and the PFN_UNLOCK_PHYS_ADDRESSES_FN counterpart will be + called when the last such reference is released. + The PMR implementation may assume that physical addresses + will have been "locked" in this manner before any call is + made to the pfnDevPhysAddr() callback + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) + +@Return PVRSRV_OK if the operation was successful, an error code + otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_LOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_UNLOCK_PHYS_ADDRESSES_FN + +@Description Called to release the lock taken on the physical addresses + for all pages allocated for a PMR. + The default implementation is to simply decrement a + lock-count for debugging purposes. + If overridden, the PFN_UNLOCK_PHYS_ADDRESSES_FN will be + called when the last reference taken on the PMR is + released. + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) + +@Return PVRSRV_OK if the operation was successful, an error code + otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_UNLOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_DEV_PHYS_ADDR_FN + +@Description Called to obtain one or more physical addresses for given + offsets within a PMR. + + The PFN_LOCK_PHYS_ADDRESSES_FN callback (if overridden) is + guaranteed to have been called prior to calling the + PFN_DEV_PHYS_ADDR_FN callback and the caller promises not to + rely on the physical address thus obtained after the + PFN_UNLOCK_PHYS_ADDRESSES_FN callback is called. + + Implementation of this callback is mandatory. + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) +@Input ui32Log2PageSize The log2 page size. +@Input ui32NumOfAddr The number of addresses to be returned +@Input puiOffset The offset from the start of the PMR + (in bytes) for which the physical + address is required. Where multiple + addresses are requested, this will + contain a list of offsets. +@Output pbValid List of boolean flags indicating which + addresses in the returned list + (psDevAddrPtr) are valid (for sparse + allocations, not all pages may have a + physical backing) +@Output psDevAddrPtr Returned list of physical addresses + +@Return PVRSRV_OK if the operation was successful, an error code + otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_DEV_PHYS_ADDR_FN)(PMR_IMPL_PRIVDATA pvPriv, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfAddr, + IMG_DEVMEM_OFFSET_T *puiOffset, + IMG_BOOL *pbValid, + IMG_DEV_PHYADDR *psDevAddrPtr); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN + +@Description Called to obtain a kernel-accessible address (mapped to a + virtual address if required) for the PMR for use internally + in Services. + + Implementation of this function for the (default) PMR factory providing + OS-allocations is mandatory (the driver will expect to be able to call + this function for OS-provided allocations). + For other PMR factories, implementation of this function is only necessary + where an MMU mapping is required for the Kernel to be able to access the + allocated memory. + If no mapping is needed, this function can remain unimplemented and the + pfn may be set to NULL. +@Input pvPriv Private data (which was generated by + the PMR factory when PMR was created) +@Input uiOffset Offset from the beginning of the PMR + at which mapping is to start +@Input uiSize Size of mapping (in bytes) +@Output ppvKernelAddressOut Mapped kernel address +@Output phHandleOut Returned handle of the new mapping +@Input ulFlags Mapping flags + +@Return PVRSRV_OK if the mapping was successful, an error code + otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv, + size_t uiOffset, + size_t uiSize, + void **ppvKernelAddressOut, + IMG_HANDLE *phHandleOut, + PMR_FLAGS_T ulFlags); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_RELEASE_KERNEL_MAPPING_DATA_FN + +@Description Called to release a mapped kernel virtual address + + Implementation of this callback is mandatory if + PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN is provided for the PMR factory, + otherwise this function can remain unimplemented and the pfn may be set + to NULL. + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) +@Input hHandle Handle of the mapping to be released + +@Return None +*/ /**************************************************************************/ +typedef void (*PFN_RELEASE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv, + IMG_HANDLE hHandle); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_READ_BYTES_FN + +@Description Called to read bytes from an unmapped allocation + + Implementation of this callback is optional - where it is not provided, + the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN to map the entire + PMR (if an MMU mapping is required for the Kernel to be able to access the + allocated memory). + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) +@Input uiOffset Offset from the beginning of the PMR at + which to begin reading +@Output pcBuffer Buffer in which to return the read data +@Input uiBufSz Number of bytes to be read +@Output puiNumBytes Number of bytes actually read (may be + less than uiBufSz) + +@Return PVRSRV_OK if the read was successful, an error code + otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_READ_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_WRITE_BYTES_FN + +@Description Called to write bytes into an unmapped allocation + + Implementation of this callback is optional - where it is not provided, + the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN to map the entire + PMR (if an MMU mapping is required for the Kernel to be able to access the + allocated memory). + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) +@Input uiOffset Offset from the beginning of the PMR at + which to begin writing +@Input pcBuffer Buffer containing the data to be written +@Input uiBufSz Number of bytes to be written +@Output puiNumBytes Number of bytes actually written (may be + less than uiBufSz) + +@Return PVRSRV_OK if the write was successful, an error code + otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_WRITE_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_UINT8 *pcBuffer, + size_t uiBufSz, + size_t *puiNumBytes); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_UNPIN_MEM_FN + +@Description Called to unpin an allocation. + Once unpinned, the pages backing the allocation may be + re-used by the Operating System for another purpose. + When the pages are required again, they may be re-pinned + (by calling PFN_PIN_MEM_FN). The driver will try to return + same pages as before. The caller will be told if the + content of these returned pages has been modified or if + the pages returned are not the original pages. + + Implementation of this callback is optional. + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) + +@Return PVRSRV_OK if the unpin was successful, an error code + otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_UNPIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_PIN_MEM_FN + +@Description Called to pin a previously unpinned allocation. + The driver will try to return same pages as were previously + assigned to the allocation. The caller will be told if the + content of these returned pages has been modified or if + the pages returned are not the original pages. + + Implementation of this callback is optional. + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) + +@Input psMappingTable Mapping table, which describes how + virtual 'chunks' are to be mapped to + physical 'chunks' for the allocation. + +@Return PVRSRV_OK if the original pages were returned unmodified. + PVRSRV_ERROR_PMR_NEW_MEMORY if the memory returned was modified + or different pages were returned. + Another PVRSRV_ERROR code on failure. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_PIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv, + PMR_MAPPING_TABLE *psMappingTable); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_CHANGE_SPARSE_MEM_FN + +@Description Called to modify the physical backing for a given sparse + allocation. + The caller provides a list of the pages within the sparse + allocation which should be backed with a physical allocation + and a list of the pages which do not require backing. + + Implementation of this callback is mandatory. + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) +@Input psPMR The PMR of the sparse allocation to be + modified +@Input ui32AllocPageCount The number of pages specified in + pai32AllocIndices +@Input pai32AllocIndices The list of pages in the sparse + allocation that should be backed with a + physical allocation. Pages are + referenced by their index within the + sparse allocation (e.g. in a 10 page + allocation, pages are denoted by + indices 0 to 9) +@Input ui32FreePageCount The number of pages specified in + pai32FreeIndices +@Input pai32FreeIndices The list of pages in the sparse + allocation that do not require + a physical allocation. +@Input ui32Flags Allocation flags + +@Return PVRSRV_OK if the sparse allocation physical backing was updated + successfully, an error code otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_FN)(PMR_IMPL_PRIVDATA pPriv, + const PMR *psPMR, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices, + IMG_UINT32 uiFlags); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN + +@Description Called to modify which pages are mapped for a given sparse + allocation. + The caller provides a list of the pages within the sparse + allocation which should be given a CPU mapping and a list + of the pages which do not require a CPU mapping. + + Implementation of this callback is mandatory. + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) +@Input psPMR The PMR of the sparse allocation to be + modified +@Input sCpuVAddrBase The virtual base address of the sparse + allocation +@Input ui32AllocPageCount The number of pages specified in + pai32AllocIndices +@Input pai32AllocIndices The list of pages in the sparse + allocation that should be given a CPU + mapping. Pages are referenced by their + index within the sparse allocation (e.g. + in a 10 page allocation, pages are + denoted by indices 0 to 9) +@Input ui32FreePageCount The number of pages specified in + pai32FreeIndices +@Input pai32FreeIndices The list of pages in the sparse + allocation that do not require a CPU + mapping. + +@Return PVRSRV_OK if the page mappings were updated successfully, an + error code otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN)(PMR_IMPL_PRIVDATA pPriv, + const PMR *psPMR, + IMG_UINT64 sCpuVAddrBase, + IMG_UINT32 ui32AllocPageCount, + IMG_UINT32 *pai32AllocIndices, + IMG_UINT32 ui32FreePageCount, + IMG_UINT32 *pai32FreeIndices); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_MMAP_FN + +@Description Called to map pages in the specified PMR. + + Implementation of this callback is optional. + Where it is provided, it will be used in place of OSMMapPMRGeneric(). + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) +@Input psPMR The PMR of the allocation to be mapped +@Input pMMapData OS-specific data to describe how mapping + should be performed + +@Return PVRSRV_OK if the mapping was successful, an error code + otherwise. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_MMAP_FN)(PMR_IMPL_PRIVDATA pPriv, + PMR *psPMR, + PMR_MMAP_DATA pMMapData); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_FINALIZE_FN + +@Description Called to destroy the PMR. + This callback will be called only when all references to + the PMR have been dropped. + The PMR was created via a call to PhysmemNewRamBackedPMR() + and is destroyed via this callback. + + Implementation of this callback is mandatory. + +@Input pvPriv Private data (which was generated by the + PMR factory when PMR was created) + +@Return PVRSRV_OK if the PMR destruction was successful, an error + code otherwise. + Currently PVRSRV_ERROR_PMR_STILL_REFERENCED is the only + error returned from physmem_dmabuf.c layer and on this + error, destroying of the PMR is aborted without disturbing + the PMR state. +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_FINALIZE_FN)(PMR_IMPL_PRIVDATA pvPriv); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_ACQUIRE_PMR_FACTORY_LOCK_FN + +@Description Called to acquire the PMR factory's global lock, if it has one, + hence callback optional. Factories which support entry points + in addition to the normal bridge calls, for example, from the + native OS that manipulate the PMR reference count should + create a factory lock and implementations for these call backs. + + Implementation of this callback is optional. + +@Return None +*/ +/*****************************************************************************/ +typedef void (*PFN_ACQUIRE_PMR_FACTORY_LOCK_FN)(void); + +/*************************************************************************/ /*! +@Brief Callback function type PFN_RELEASE_PMR_FACTORY_LOCK_FN + +@Description Called to release the PMR factory's global lock acquired by calling + pfn_acquire_pmr_factory_lock callback. + + Implementation of this callback is optional. + +@Return None +*/ /**************************************************************************/ +typedef void (*PFN_RELEASE_PMR_FACTORY_LOCK_FN)(void); + +/*! PMR factory callback table. + */ +struct _PMR_IMPL_FUNCTAB_ { + /*! Callback function pointer, see ::PFN_LOCK_PHYS_ADDRESSES_FN */ + PFN_LOCK_PHYS_ADDRESSES_FN pfnLockPhysAddresses; + /*! Callback function pointer, see ::PFN_UNLOCK_PHYS_ADDRESSES_FN */ + PFN_UNLOCK_PHYS_ADDRESSES_FN pfnUnlockPhysAddresses; + + /*! Callback function pointer, see ::PFN_DEV_PHYS_ADDR_FN */ + PFN_DEV_PHYS_ADDR_FN pfnDevPhysAddr; + + /*! Callback function pointer, see ::PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN */ + PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN pfnAcquireKernelMappingData; + /*! Callback function pointer, see ::PFN_RELEASE_KERNEL_MAPPING_DATA_FN */ + PFN_RELEASE_KERNEL_MAPPING_DATA_FN pfnReleaseKernelMappingData; + +#if defined(INTEGRITY_OS) + /* + * MapMemoryObject()/UnmapMemoryObject() + * + * called to map/unmap memory objects in Integrity OS + */ + + PVRSRV_ERROR (*pfnMapMemoryObject)(PMR_IMPL_PRIVDATA pvPriv, + IMG_HANDLE *phMemObj, + void **pvClientAddr, + IMG_HANDLE *phHandleOut); + PVRSRV_ERROR (*pfnUnmapMemoryObject)(PMR_IMPL_PRIVDATA pvPriv); +#endif + + /*! Callback function pointer, see ::PFN_READ_BYTES_FN */ + PFN_READ_BYTES_FN pfnReadBytes; + /*! Callback function pointer, see ::PFN_WRITE_BYTES_FN */ + PFN_WRITE_BYTES_FN pfnWriteBytes; + + /*! Callback function pointer, see ::PFN_UNPIN_MEM_FN */ + PFN_UNPIN_MEM_FN pfnUnpinMem; + /*! Callback function pointer, see ::PFN_PIN_MEM_FN */ + PFN_PIN_MEM_FN pfnPinMem; + + /*! Callback function pointer, see ::PFN_CHANGE_SPARSE_MEM_FN */ + PFN_CHANGE_SPARSE_MEM_FN pfnChangeSparseMem; + /*! Callback function pointer, see ::PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN */ + PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN pfnChangeSparseMemCPUMap; + + /*! Callback function pointer, see ::PFN_MMAP_FN */ + PFN_MMAP_FN pfnMMap; + + /*! Callback function pointer, see ::PFN_FINALIZE_FN */ + PFN_FINALIZE_FN pfnFinalize; + + /*! Callback function pointer, see ::PFN_ACQUIRE_PMR_FACTORY_LOCK_FN */ + PFN_ACQUIRE_PMR_FACTORY_LOCK_FN pfnGetPMRFactoryLock; + + /*! Callback function pointer, see ::PFN_RELEASE_PMR_FACTORY_LOCK_FN */ + PFN_RELEASE_PMR_FACTORY_LOCK_FN pfnReleasePMRFactoryLock; +}; + +/*! PMR factory callback table. + */ +typedef struct _PMR_IMPL_FUNCTAB_ PMR_IMPL_FUNCTAB; + +#endif /* SRVSRV_PMR_IMPL_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pmr_os.c b/drivers/gpu/drm/phytium/octopus/pmr_os.c new file mode 100644 index 000000000000..506671f8d19f --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pmr_os.c @@ -0,0 +1,596 @@ +/*************************************************************************/ /*! +@File +@Title Linux OS PMR functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) +#include +#include +#endif + +#include "img_defs.h" +#include "pvr_debug.h" +#include "allocmem.h" +#include "devicemem_server_utils.h" +#include "pmr.h" +#include "pmr_os.h" + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) +#include "mmap_stats.h" +#endif + +#include "kernel_compatibility.h" + +/* + * x86_32: + * Use vm_insert_page because remap_pfn_range has issues when mapping HIGHMEM + * pages with default memory attributes; these HIGHMEM pages are skipped in + * set_pages_array_[uc,wc] during allocation; see reserve_pfn_range(). + * Also vm_insert_page is faster. + * + * x86_64: + * Use vm_insert_page because it is faster. + * + * Other platforms: + * Use remap_pfn_range by default because it does not issue a cache flush. + * It is known that ARM32 benefits from this. When other platforms become + * available it has to be investigated if this assumption holds for them as well. + * + * Since vm_insert_page does more precise memory accounting we have the build + * flag PVR_MMAP_USE_VM_INSERT that forces its use. This is useful as a debug + * feature. + * + */ +#if defined(CONFIG_X86) || defined(PVR_MMAP_USE_VM_INSERT) +#define PMR_OS_USE_VM_INSERT_PAGE 1 +#endif + +static void MMapPMROpen(struct vm_area_struct *ps_vma) +{ + PMR *psPMR = ps_vma->vm_private_data; + + /* Our VM flags should ensure this function never gets called */ + PVR_DPF((PVR_DBG_WARNING, + "%s: Unexpected mmap open call, this is probably an application bug.", + __func__)); + PVR_DPF((PVR_DBG_WARNING, + "%s: vma struct: 0x%p, vAddr: %#lX, length: %#lX, PMR pointer: 0x%p", + __func__, + ps_vma, + ps_vma->vm_start, + ps_vma->vm_end - ps_vma->vm_start, + psPMR)); + + /* In case we get called anyway let's do things right by increasing the refcount and + * locking down the physical addresses. */ + PMRRefPMR(psPMR); + + if (PMRLockSysPhysAddresses(psPMR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Could not lock down physical addresses, aborting.", __func__)); + PMRUnrefPMR(psPMR); + } +} + +static void MMapPMRClose(struct vm_area_struct *ps_vma) +{ + PMR *psPMR = ps_vma->vm_private_data; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + { + uintptr_t vAddr = ps_vma->vm_start; + + while (vAddr < ps_vma->vm_end) + { + /* USER MAPPING */ + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, + (IMG_UINT64)vAddr, + OSGetCurrentClientProcessIDKM()); + vAddr += PAGE_SIZE; + } + } +#else + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, + ps_vma->vm_end - ps_vma->vm_start, + OSGetCurrentClientProcessIDKM()); +#endif +#endif + + PMRUnlockSysPhysAddresses(psPMR); + PMRUnrefPMR(psPMR); +} + +/* + * This vma operation is used to read data from mmap regions. It is called + * by access_process_vm, which is called to handle PTRACE_PEEKDATA ptrace + * requests and reads from /proc//mem. + */ +static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr, + void *buf, int len, int write) +{ + PMR *psPMR = ps_vma->vm_private_data; + unsigned long ulOffset = addr - ps_vma->vm_start; + size_t uiBytesCopied; + PVRSRV_ERROR eError; + int iRetVal = -EINVAL; + + if (write) + { + eError = PMR_WriteBytes(psPMR, + (IMG_DEVMEM_OFFSET_T) ulOffset, + buf, + len, + &uiBytesCopied); + } + else + { + eError = PMR_ReadBytes(psPMR, + (IMG_DEVMEM_OFFSET_T) ulOffset, + buf, + len, + &uiBytesCopied); + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Error from %s (%d)", + __func__, + write ? "PMR_WriteBytes" : "PMR_ReadBytes", + eError)); + } + else + { + iRetVal = uiBytesCopied; + } + + return iRetVal; +} + +static const struct vm_operations_struct gsMMapOps = +{ + .open = &MMapPMROpen, + .close = &MMapPMRClose, + .access = MMapVAccess, +}; + +static INLINE int _OSMMapPMR(PVRSRV_DEVICE_NODE *psDevNode, + struct vm_area_struct *ps_vma, + IMG_DEVMEM_OFFSET_T uiOffset, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_UINT32 uiLog2PageSize, + IMG_BOOL bUseVMInsertPage, + IMG_BOOL bUseMixedMap) +{ + IMG_INT32 iStatus; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + pfn_t sPFN; +#else + unsigned long uiPFN; +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + sPFN = phys_to_pfn_t(psCpuPAddr->uiAddr, 0); +#else + uiPFN = psCpuPAddr->uiAddr >> PAGE_SHIFT; + PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr->uiAddr); +#endif + + /* + * vm_insert_page() allows insertion of individual pages into user + * VMA space _only_ if page is a order-zero allocated page + */ + if (bUseVMInsertPage) + { + if (bUseMixedMap) + { + /* + * This path is just for debugging. It should be + * equivalent to the remap_pfn_range() path. + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) + vm_fault_t vmf; + + vmf = vmf_insert_mixed(ps_vma, + ps_vma->vm_start + uiOffset, + sPFN); + if (vmf & VM_FAULT_ERROR) + { + iStatus = vm_fault_to_errno(vmf, 0); + } + else + { + iStatus = 0; + } +#else + iStatus = vm_insert_mixed(ps_vma, + ps_vma->vm_start + uiOffset, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + sPFN); +#else + uiPFN); +#endif +#endif + } + else + { + /* Since kernel 3.7 this sets VM_MIXEDMAP internally */ + iStatus = vm_insert_page(ps_vma, + ps_vma->vm_start + uiOffset, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + pfn_t_to_page(sPFN)); +#else + pfn_to_page(uiPFN)); +#endif + } + } + else + { + /* + NOTE: Regarding absence of dma_mmap_coherent() in _OSMMapPMR() + + The current services mmap model maps in a PMR's full-length size + into the user VMA & applies any user specified offset to the kernel + returned zero-offset based VA in services client; this essentially + means services server ignores ps_vma->vm_pgoff (this houses hPMR) + during a mmap call. + + Furthermore, during a DMA/CMA memory allocation, multiple order-n + pages are used to satisfy an allocation request due to DMA/CMA + framework rounding-up allocation size to next power-of-two which + can lead to wasted memory (so we don't allocate using single call). + + The combination of the above two issues mean that we cannot use the + dma_mmap_coherent() for a number of reasons outlined below: + + - Services mmap semantics does not fit with dma_mmap_coherent() + which requires proper ps_vma->vm_pgoff; seeing this houses a + hPMR handle value, calls into dma_mmap_coherent() fails. This + could be avoided by forcing ps_vma->vm_pgoff to zero but the + ps_vma->vm_pgoff is applied to DMA bus address PFN and not + user VMA which is always mapped at ps_vma->vm_start. + + - As multiple order-n pages are used for DMA/CMA allocations, a + single dma_mmap_coherent() call with a vma->vm_pgoff set to + zero cannot (maybe) be used because there is no guarantee that + all of the multiple order-n pages in the PMR are physically + contiguous from the first entry to the last. Whilst this is + highly likely to be the case, there is no guarantee that it + will be so we cannot depend on this being the case. + + The solution is to manually mmap DMA/CMA pages into user VMA + using remap_pfn_range() directly. Furthermore, accounting is + always compromised for DMA/CMA allocations. + */ + size_t uiNumContiguousBytes = 1ULL << uiLog2PageSize; + + iStatus = remap_pfn_range(ps_vma, + ps_vma->vm_start + uiOffset, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + pfn_t_to_pfn(sPFN), +#else + uiPFN, +#endif + uiNumContiguousBytes, + ps_vma->vm_page_prot); + } + + return iStatus; +} + +PVRSRV_ERROR +OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData) +{ + struct vm_area_struct *ps_vma = pOSMMapData; + PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR); + PVRSRV_ERROR eError; + size_t uiLength; + IMG_INT32 iStatus; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_UINT32 ui32CPUCacheFlags; + pgprot_t sPageProt; + IMG_CPU_PHYADDR asCpuPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; + IMG_UINT32 uiOffsetIdx; + IMG_UINT32 uiNumOfPFNs; + IMG_UINT32 uiLog2PageSize; + IMG_CPU_PHYADDR *psCpuPAddr; + IMG_BOOL *pbValid; + IMG_BOOL bUseMixedMap = IMG_FALSE; + IMG_BOOL bUseVMInsertPage = IMG_FALSE; + + eError = PMRLockSysPhysAddresses(psPMR); + if (eError != PVRSRV_OK) + { + goto e0; + } + + if (((ps_vma->vm_flags & VM_WRITE) != 0) && + ((ps_vma->vm_flags & VM_SHARED) == 0)) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e1; + } + + sPageProt = vm_get_page_prot(ps_vma->vm_flags); + + eError = DevmemCPUCacheMode(psDevNode, + PMR_Flags(psPMR), + &ui32CPUCacheFlags); + if (eError != PVRSRV_OK) + { + goto e0; + } + + switch (ui32CPUCacheFlags) + { + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: + sPageProt = pgprot_noncached(sPageProt); + break; + + case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: + sPageProt = pgprot_writecombine(sPageProt); + break; + + case PVRSRV_MEMALLOCFLAG_CPU_CACHED: + { +/* Do not set to write-combine for plato */ +#if !defined(PLATO_MEMORY_CONFIG) + PHYS_HEAP *psPhysHeap = PMR_PhysHeap(psPMR); + + if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA) + sPageProt = pgprot_writecombine(sPageProt); +#endif + break; + } + + default: + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e1; + } + ps_vma->vm_page_prot = sPageProt; + + ps_vma->vm_flags |= VM_IO; + + /* Don't include the mapping in core dumps */ + ps_vma->vm_flags |= VM_DONTDUMP; + + /* + * Disable mremap because our nopage handler assumes all + * page requests have already been validated. + */ + ps_vma->vm_flags |= VM_DONTEXPAND; + + /* Don't allow mapping to be inherited across a process fork */ + ps_vma->vm_flags |= VM_DONTCOPY; + + uiLength = ps_vma->vm_end - ps_vma->vm_start; + + /* Is this mmap targeting non order-zero pages or does it use pfn mappings? + * If yes, don't use vm_insert_page */ + uiLog2PageSize = PMR_GetLog2Contiguity(psPMR); + +#if defined(PMR_OS_USE_VM_INSERT_PAGE) + bUseVMInsertPage = (uiLog2PageSize == PAGE_SHIFT) && (PMR_GetType(psPMR) != PMR_TYPE_EXTMEM); +#endif + + /* Can we use stack allocations */ + uiNumOfPFNs = uiLength >> uiLog2PageSize; + if (uiNumOfPFNs > PMR_MAX_TRANSLATION_STACK_ALLOC) + { + psCpuPAddr = OSAllocMem(uiNumOfPFNs * sizeof(*psCpuPAddr)); + if (psCpuPAddr == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e1; + } + + /* Should allocation fail, clean-up here before exiting */ + pbValid = OSAllocMem(uiNumOfPFNs * sizeof(*pbValid)); + if (pbValid == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + OSFreeMem(psCpuPAddr); + goto e2; + } + } + else + { + psCpuPAddr = asCpuPAddr; + pbValid = abValid; + } + + /* Obtain map range pfns */ + eError = PMR_CpuPhysAddr(psPMR, + uiLog2PageSize, + uiNumOfPFNs, + 0, + psCpuPAddr, + pbValid); + if (eError != PVRSRV_OK) + { + goto e3; + } + + /* + * Scan the map range for pfns without struct page* handling. If + * we find one, this is a mixed map, and we can't use vm_insert_page() + * NOTE: vm_insert_page() allows insertion of individual pages into user + * VMA space _only_ if said page is an order-zero allocated page. + */ + if (bUseVMInsertPage) + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + pfn_t sPFN; +#else + unsigned long uiPFN; +#endif + + for (uiOffsetIdx = 0; uiOffsetIdx < uiNumOfPFNs; ++uiOffsetIdx) + { + if (pbValid[uiOffsetIdx]) + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) + sPFN = phys_to_pfn_t(psCpuPAddr[uiOffsetIdx].uiAddr, 0); + + if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0) +#else + uiPFN = psCpuPAddr[uiOffsetIdx].uiAddr >> PAGE_SHIFT; + PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr[uiOffsetIdx].uiAddr); + + if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0) +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ + { + bUseMixedMap = IMG_TRUE; + break; + } + } + } + + if (bUseMixedMap) + { + ps_vma->vm_flags |= VM_MIXEDMAP; + } + } + else + { + ps_vma->vm_flags |= VM_PFNMAP; + } + + /* For each PMR page-size contiguous bytes, map page(s) into user VMA */ + for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<> uiLog2PageSize; + /* + * Only map in pages that are valid, any that aren't will be + * picked up by the nopage handler which will return a zeroed + * page for us. + */ + if (pbValid[uiOffsetIdx]) + { + iStatus = _OSMMapPMR(psDevNode, + ps_vma, + uiOffset, + &psCpuPAddr[uiOffsetIdx], + uiLog2PageSize, + bUseVMInsertPage, + bUseMixedMap); + if (iStatus) + { + /* Failure error code doesn't get propagated */ + eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED; + PVR_ASSERT(0); + goto e3; + } + } +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) +#define PMR_OS_BAD_CPUADDR 0x0BAD0BAD + { + IMG_CPU_PHYADDR sPAddr; + sPAddr.uiAddr = pbValid[uiOffsetIdx] ? + psCpuPAddr[uiOffsetIdx].uiAddr : + IMG_CAST_TO_CPUPHYADDR_UINT(PMR_OS_BAD_CPUADDR); + + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, + (void*)(uintptr_t)(ps_vma->vm_start + uiOffset), + sPAddr, + 1<vm_private_data = psPMR; + + /* Install open and close handlers for ref-counting */ + ps_vma->vm_ops = &gsMMapOps; + + /* + * Take a reference on the PMR so that it can't be freed while mapped + * into the user process. + */ + PMRRefPMR(psPMR); + +#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) + /* record the stats */ + MMapStatsAddOrUpdatePMR(psPMR, uiLength); +#endif + + return PVRSRV_OK; + + /* Error exit paths follow */ +e3: + if (pbValid != abValid) + { + OSFreeMem(pbValid); + } +e2: + if (psCpuPAddr != asCpuPAddr) + { + OSFreeMem(psCpuPAddr); + } +e1: + PMRUnlockSysPhysAddresses(psPMR); +e0: + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/pmr_os.h b/drivers/gpu/drm/phytium/octopus/pmr_os.h new file mode 100644 index 000000000000..a850bfe7c29a --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pmr_os.h @@ -0,0 +1,62 @@ +/*************************************************************************/ /*! +@File +@Title OS PMR functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description OS specific PMR functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(PMR_OS_H) +#define PMR_OS_H + +#include "pmr_impl.h" + +/*************************************************************************/ /*! +@Function OSMMapPMRGeneric +@Description Implements a generic PMR mapping function, which is used + to CPU map a PMR where the PMR does not have a mapping + function defined by the creating PMR factory. +@Input psPMR the PMR to be mapped +@Output pOSMMapData pointer to any private data + needed by the generic mapping function +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR +OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData); + +#endif /* !defined(PMR_OS_H) */ diff --git a/drivers/gpu/drm/phytium/octopus/power.c b/drivers/gpu/drm/phytium/octopus/power.c new file mode 100644 index 000000000000..e92b4a8c6a78 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/power.c @@ -0,0 +1,953 @@ +/*************************************************************************/ /*! +@File power.c +@Title Power management functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Main APIs for power management functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pdump_km.h" +#include "allocmem.h" +#include "osfunc.h" + +#include "lock.h" +#include "pvrsrv.h" +#include "pvr_debug.h" +#include "process_stats.h" + + +struct _PVRSRV_POWER_DEV_TAG_ +{ + PFN_PRE_POWER pfnDevicePrePower; + PFN_POST_POWER pfnDevicePostPower; + PFN_SYS_PRE_POWER pfnSystemPrePower; + PFN_SYS_POST_POWER pfnSystemPostPower; + PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange; + PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange; + PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest; + PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest; + PFN_GPU_UNITS_POWER_CHANGE pfnGPUUnitsPowerChange; + IMG_HANDLE hSysData; + IMG_HANDLE hDevCookie; + PVRSRV_DEV_POWER_STATE eDefaultPowerState; + ATOMIC_T eCurrentPowerState; +}; + +/*! + Typedef for a pointer to a function that will be called for re-acquiring + device powerlock after releasing it temporarily for some timeout period + in function PVRSRVDeviceIdleRequestKM + */ +typedef PVRSRV_ERROR (*PFN_POWER_LOCK_ACQUIRE) (PPVRSRV_DEVICE_NODE psDevNode); + +static inline IMG_UINT64 PVRSRVProcessStatsGetTimeNs(void) +{ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + return OSClockns64(); +#else + return 0; +#endif +} + +static inline IMG_UINT64 PVRSRVProcessStatsGetTimeUs(void) +{ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + return OSClockus(); +#else + return 0; +#endif +} + +/*! +****************************************************************************** + + @Function _IsSystemStatePowered + + @Description Tests whether a given system state represents powered-up. + + @Input eSystemPowerState : a system power state + + @Return IMG_BOOL + +******************************************************************************/ +static IMG_BOOL _IsSystemStatePowered(PVRSRV_SYS_POWER_STATE eSystemPowerState) +{ + return (eSystemPowerState == PVRSRV_SYS_POWER_STATE_ON); +} + +/* We don't expect PID=0 to acquire device power-lock */ +#define PWR_LOCK_OWNER_PID_CLR_VAL 0 + +PVRSRV_ERROR PVRSRVPowerLockInit(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + PVRSRV_ERROR eError; + + eError = OSLockCreate(&psDeviceNode->hPowerLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); + + psDeviceNode->uiPwrLockOwnerPID = PWR_LOCK_OWNER_PID_CLR_VAL; + return PVRSRV_OK; +} + +void PVRSRVPowerLockDeInit(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + psDeviceNode->uiPwrLockOwnerPID = PWR_LOCK_OWNER_PID_CLR_VAL; + OSLockDestroy(psDeviceNode->hPowerLock); +} + +IMG_BOOL PVRSRVPwrLockIsLockedByMe(PCPVRSRV_DEVICE_NODE psDeviceNode) +{ + return OSLockIsLocked(psDeviceNode->hPowerLock) && + OSGetCurrentClientProcessIDKM() == psDeviceNode->uiPwrLockOwnerPID; +} + +PVRSRV_ERROR PVRSRVPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + OSLockAcquire(psDeviceNode->hPowerLock); + + /* Only allow to take powerlock when the system power is on */ + if (_IsSystemStatePowered(psDeviceNode->eCurrentSysPowerState)) + { + psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM(); + return PVRSRV_OK; + } + + OSLockRelease(psDeviceNode->hPowerLock); + + return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF; +} + +PVRSRV_ERROR PVRSRVPowerTryLock(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + if (!(OSTryLockAcquire(psDeviceNode->hPowerLock))) + { + return PVRSRV_ERROR_RETRY; + } + + /* Only allow to take powerlock when the system power is on */ + if (_IsSystemStatePowered(psDeviceNode->eCurrentSysPowerState)) + { + psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM(); + + /* System is powered ON, return OK */ + return PVRSRV_OK; + } + else + { + /* System is powered OFF, release the lock and return error */ + OSLockRelease(psDeviceNode->hPowerLock); + return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF; + } +} + +/*! +****************************************************************************** + + @Function _PVRSRVForcedPowerLock + + @Description Obtain the mutex for power transitions regardless of system + power state + + @Return Always returns PVRSRV_OK. Function prototype required same as + PFN_POWER_LOCK_ACQUIRE + +******************************************************************************/ +static PVRSRV_ERROR _PVRSRVForcedPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + OSLockAcquire(psDeviceNode->hPowerLock); + psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM(); + + return PVRSRV_OK; +} + +void PVRSRVPowerUnlock(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + PVR_ASSERT(PVRSRVPwrLockIsLockedByMe(psDeviceNode)); + + /* Reset uiPwrLockOwnerPID before releasing lock */ + psDeviceNode->uiPwrLockOwnerPID = PWR_LOCK_OWNER_PID_CLR_VAL; + OSLockRelease(psDeviceNode->hPowerLock); +} + +IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice) +{ + return (psPowerDevice->eDefaultPowerState == PVRSRV_DEV_POWER_STATE_OFF); +} + +PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_DEV_POWER_STATE eNewPowerState) +{ + PVRSRV_POWER_DEV *psPowerDevice; + + psPowerDevice = psDeviceNode->psPowerDev; + if (psPowerDevice == NULL) + { + return PVRSRV_ERROR_INVALID_DEVICE; + } + + psPowerDevice->eDefaultPowerState = eNewPowerState; + + return PVRSRV_OK; +} + +/* + @Input pfnPowerLockAcquire : Function to re-acquire power-lock in-case + it was necessary to release it. +*/ +static PVRSRV_ERROR _PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode, + PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff, + IMG_BOOL bDeviceOffPermitted, + PFN_POWER_LOCK_ACQUIRE pfnPowerLockAcquire) +{ + PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev; + PVRSRV_ERROR eError; + + if ((psPowerDev && psPowerDev->pfnForcedIdleRequest) && + (!pfnIsDefaultStateOff || pfnIsDefaultStateOff(psPowerDev))) + { + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = psPowerDev->pfnForcedIdleRequest(psPowerDev->hDevCookie, + bDeviceOffPermitted); + if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED) + { + PVRSRV_ERROR eErrPwrLockAcq; + /* FW denied idle request */ + PVRSRVPowerUnlock(psDeviceNode); + + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + + eErrPwrLockAcq = pfnPowerLockAcquire(psDeviceNode); + if (eErrPwrLockAcq != PVRSRV_OK) + { + /* We only understand PVRSRV_ERROR_RETRY, so assert on others. + * Moreover, we've ended-up releasing the power-lock which was + * originally "held" by caller before calling this function - + * since this needs vigilant handling at call-site, we pass + * back an explicit error, for caller(s) to "avoid" calling + * PVRSRVPowerUnlock */ + PVR_ASSERT(eErrPwrLockAcq == PVRSRV_ERROR_RETRY); + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to re-acquire power-lock " + "(%s) after releasing it for a time-out", + __func__, PVRSRVGetErrorString(eErrPwrLockAcq))); + return PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED; + } + } + else + { + /* idle request successful or some other error occurred, return */ + break; + } + } END_LOOP_UNTIL_TIMEOUT(); + } + else + { + return PVRSRV_OK; + } + + return eError; +} + +/* + * Wrapper function helps limiting calling complexity of supplying additional + * PFN_POWER_LOCK_ACQUIRE argument (required by _PVRSRVDeviceIdleRequestKM) + */ +inline PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode, + PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff, + IMG_BOOL bDeviceOffPermitted) +{ + return _PVRSRVDeviceIdleRequestKM(psDeviceNode, + pfnIsDefaultStateOff, + bDeviceOffPermitted, + PVRSRVPowerLock); +} + +PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev; + + if (psPowerDev && psPowerDev->pfnForcedIdleCancelRequest) + { + return psPowerDev->pfnForcedIdleCancelRequest(psPowerDev->hDevCookie); + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVDevicePrePowerStateKM + + @Description + + Perform device-specific processing required before a power transition + + @Input psPowerDevice : Power device + @Input eNewPowerState : New power state + @Input bForced : TRUE if the transition should not fail (e.g. OS request) + + @Return PVRSRV_ERROR + +******************************************************************************/ +static +PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, + PVRSRV_DEV_POWER_STATE eNewPowerState, + IMG_BOOL bForced, + IMG_BOOL bPreserveRam) +{ + PVRSRV_DEV_POWER_STATE eCurrentPowerState; + IMG_UINT64 ui64SysTimer1 = 0; + IMG_UINT64 ui64SysTimer2 = 0; + IMG_UINT64 ui64DevTimer1 = 0; + IMG_UINT64 ui64DevTimer2 = 0; + PVRSRV_ERROR eError; + + PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT); + + eCurrentPowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); + + if (psPowerDevice->pfnDevicePrePower != NULL) + { + ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs(); + + /* Call the device's power callback. */ + eError = psPowerDevice->pfnDevicePrePower(psPowerDevice->hDevCookie, + eNewPowerState, + eCurrentPowerState, + bForced); + + ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs(); + + PVR_RETURN_IF_ERROR(eError); + } + + /* Do any required system-layer processing. */ + if (psPowerDevice->pfnSystemPrePower != NULL) + { + ui64SysTimer1 = PVRSRVProcessStatsGetTimeNs(); + + eError = psPowerDevice->pfnSystemPrePower(psPowerDevice->hSysData, + (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON) ? + PVRSRV_SYS_POWER_STATE_ON : + PVRSRV_SYS_POWER_STATE_OFF, + (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) ? + PVRSRV_SYS_POWER_STATE_ON : + PVRSRV_SYS_POWER_STATE_OFF, + bForced +#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) + , bPreserveRam +#endif + ); + + ui64SysTimer2 = PVRSRVProcessStatsGetTimeNs(); + + PVR_RETURN_IF_ERROR(eError); + } + + InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2, + ui64DevTimer1, ui64DevTimer2, + bForced, + eNewPowerState == PVRSRV_DEV_POWER_STATE_ON, + IMG_TRUE); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVDevicePostPowerStateKM + + @Description + + Perform device-specific processing required after a power transition + + @Input psPowerDevice : Power device + @Input eNewPowerState : New power state + @Input bForced : TRUE if the transition should not fail (e.g. OS request) + + @Return PVRSRV_ERROR + +******************************************************************************/ +static +PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, + PVRSRV_DEV_POWER_STATE eNewPowerState, + IMG_BOOL bForced, + IMG_BOOL bPreserveRam) +{ + PVRSRV_DEV_POWER_STATE eCurrentPowerState; + IMG_UINT64 ui64SysTimer1 = 0; + IMG_UINT64 ui64SysTimer2 = 0; + IMG_UINT64 ui64DevTimer1 = 0; + IMG_UINT64 ui64DevTimer2 = 0; + PVRSRV_ERROR eError; + +#if !defined(SUPPORT_LMA_SUSPEND_TO_RAM) + PVR_UNREFERENCED_PARAMETER(bPreserveRam); +#endif + + PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT); + + eCurrentPowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); + + /* Do any required system-layer processing. */ + if (psPowerDevice->pfnSystemPostPower != NULL) + { + ui64SysTimer1 = PVRSRVProcessStatsGetTimeNs(); + + eError = psPowerDevice->pfnSystemPostPower(psPowerDevice->hSysData, + (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON) ? + PVRSRV_SYS_POWER_STATE_ON : + PVRSRV_SYS_POWER_STATE_OFF, + (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) ? + PVRSRV_SYS_POWER_STATE_ON : + PVRSRV_SYS_POWER_STATE_OFF, + bForced +#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) + , bPreserveRam +#endif + ); + + ui64SysTimer2 = PVRSRVProcessStatsGetTimeNs(); + + PVR_RETURN_IF_ERROR(eError); + } + + if (psPowerDevice->pfnDevicePostPower != NULL) + { + ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs(); + + /* Call the device's power callback. */ + eError = psPowerDevice->pfnDevicePostPower(psPowerDevice->hDevCookie, + eNewPowerState, + eCurrentPowerState, + bForced); + + ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs(); + + PVR_RETURN_IF_ERROR(eError); + } + + InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2, + ui64DevTimer1, ui64DevTimer2, + bForced, + eNewPowerState == PVRSRV_DEV_POWER_STATE_ON, + IMG_FALSE); + + OSAtomicWrite(&psPowerDevice->eCurrentPowerState, eNewPowerState); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_DEV_POWER_STATE eNewPowerState, + IMG_BOOL bForced, + IMG_BOOL bPreserveRam) +{ + PVRSRV_ERROR eError; + PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_POWER_DEV *psPowerDevice; + +#if !defined(SUPPORT_LMA_SUSPEND_TO_RAM) + PVR_UNREFERENCED_PARAMETER(bPreserveRam); +#endif + + psPowerDevice = psDeviceNode->psPowerDev; + if (!psPowerDevice) + { + return PVRSRV_OK; + } + + if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) + { + eNewPowerState = psPowerDevice->eDefaultPowerState; + } + + if (OSAtomicRead(&psPowerDevice->eCurrentPowerState) != eNewPowerState) + { + eError = PVRSRVDevicePrePowerStateKM(psPowerDevice, + eNewPowerState, + bForced, bPreserveRam); + PVR_GOTO_IF_ERROR(eError, ErrorExit); + + eError = PVRSRVDevicePostPowerStateKM(psPowerDevice, + eNewPowerState, + bForced, bPreserveRam); + PVR_GOTO_IF_ERROR(eError, ErrorExit); + + /* Signal Device Watchdog Thread about power mode change. */ + if (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON) + { + psPVRSRVData->ui32DevicesWatchdogPwrTrans++; +#if !defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + if (psPVRSRVData->ui32DevicesWatchdogTimeout == DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT) +#endif + { + eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + } +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + else if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) + { + /* signal watchdog thread and give it a chance to switch to + * longer / infinite wait time */ + eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } +#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ + } + + return PVRSRV_OK; + +ErrorExit: + + if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Transition to %d was denied, Forced=%d", + __func__, eNewPowerState, bForced)); + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Transition to %d FAILED (%s)", + __func__, eNewPowerState, PVRSRVGetErrorString(eError))); + } + + return eError; +} + +PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_SYS_POWER_STATE eNewSysPowerState, + IMG_BOOL bPreserveRam) +{ + PVRSRV_ERROR eError; + IMG_UINT uiStage = 0; + + PVRSRV_DEV_POWER_STATE eNewDevicePowerState = + _IsSystemStatePowered(eNewSysPowerState)? PVRSRV_DEV_POWER_STATE_DEFAULT : PVRSRV_DEV_POWER_STATE_OFF; + + /* If setting devices to default state, force idle all devices whose default state is off */ + PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff = + (eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ? PVRSRVDeviceIsDefaultStateOFF : NULL; + + /* require a proper power state */ + if (eNewSysPowerState == PVRSRV_SYS_POWER_STATE_Unspecified) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Prevent simultaneous SetPowerStateKM calls */ + _PVRSRVForcedPowerLock(psDeviceNode); + + /* no power transition requested, so do nothing */ + if (eNewSysPowerState == psDeviceNode->eCurrentSysPowerState) + { + PVRSRVPowerUnlock(psDeviceNode); + return PVRSRV_OK; + } + + eError = _PVRSRVDeviceIdleRequestKM(psDeviceNode, pfnIsDefaultStateOff, + IMG_TRUE, _PVRSRVForcedPowerLock); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "_PVRSRVDeviceIdleRequestKM"); + uiStage++; + goto ErrorExit; + } + + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, eNewDevicePowerState, + IMG_TRUE, bPreserveRam); + if (eError != PVRSRV_OK) + { + uiStage++; + goto ErrorExit; + } + + psDeviceNode->eCurrentSysPowerState = eNewSysPowerState; + + PVRSRVPowerUnlock(psDeviceNode); + + return PVRSRV_OK; + +ErrorExit: + PVRSRVPowerUnlock(psDeviceNode); + + PVR_DPF((PVR_DBG_ERROR, + "%s: Transition from %d to %d FAILED (%s) at stage %u. Dumping debug info.", + __func__, psDeviceNode->eCurrentSysPowerState, eNewSysPowerState, + PVRSRVGetErrorString(eError), uiStage)); + + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + + return eError; +} + +PVRSRV_ERROR PVRSRVSetSystemPowerState(PVRSRV_DEVICE_CONFIG *psDevConfig, + PVRSRV_SYS_POWER_STATE eNewSysPowerState) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDevNode = psDevConfig->psDevNode; + PVRSRV_SYS_POWER_STATE eCurrentSysPowerState; + + if (psDevNode != NULL) + { + eCurrentSysPowerState = psDevNode->eCurrentSysPowerState; + } + else + { + /* assume power is off if no device node */ + eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_OFF; + } + + /* no power transition requested, so do nothing */ + if (eNewSysPowerState == eCurrentSysPowerState) + { + return PVRSRV_OK; + } + + if (psDevConfig->pfnPrePowerState != NULL) + { + eError = psDevConfig->pfnPrePowerState(psDevConfig->hSysData, + eNewSysPowerState, + eCurrentSysPowerState, + IMG_TRUE +#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) + , IMG_FALSE +#endif + ); + + PVR_RETURN_IF_ERROR(eError); + } + + if (psDevConfig->pfnPostPowerState != NULL) + { + eError = psDevConfig->pfnPostPowerState(psDevConfig->hSysData, + eNewSysPowerState, + eCurrentSysPowerState, + IMG_TRUE +#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) + , IMG_FALSE +#endif + ); + + PVR_RETURN_IF_ERROR(eError); + } + + if (psDevNode != NULL) + { + psDevNode->eCurrentSysPowerState = eNewSysPowerState; + } + + return PVRSRV_OK; +} + +void PVRSRVSetPowerCallbacks(PPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_POWER_DEV *psPowerDevice, + PFN_PRE_POWER pfnDevicePrePower, + PFN_POST_POWER pfnDevicePostPower, + PFN_SYS_PRE_POWER pfnSystemPrePower, + PFN_SYS_POST_POWER pfnSystemPostPower, + PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, + PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest) +{ + if (psPowerDevice != NULL) + { + if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) + { + psPowerDevice->pfnSystemPrePower = NULL; + psPowerDevice->pfnSystemPostPower = NULL; + } + else + { + psPowerDevice->pfnSystemPrePower = pfnSystemPrePower; + psPowerDevice->pfnSystemPostPower = pfnSystemPostPower; + } + + psPowerDevice->pfnDevicePrePower = pfnDevicePrePower; + psPowerDevice->pfnDevicePostPower = pfnDevicePostPower; + psPowerDevice->pfnForcedIdleRequest = pfnForcedIdleRequest; + psPowerDevice->pfnForcedIdleCancelRequest = pfnForcedIdleCancelRequest; + } +} + +PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode, + PFN_PRE_POWER pfnDevicePrePower, + PFN_POST_POWER pfnDevicePostPower, + PFN_SYS_PRE_POWER pfnSystemPrePower, + PFN_SYS_POST_POWER pfnSystemPostPower, + PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange, + PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange, + PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, + PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest, + PFN_GPU_UNITS_POWER_CHANGE pfnGPUUnitsPowerChange, + IMG_HANDLE hDevCookie, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + PVRSRV_DEV_POWER_STATE eDefaultPowerState) +{ + PVRSRV_POWER_DEV *psPowerDevice; + + PVR_ASSERT(!psDeviceNode->psPowerDev); + + PVR_ASSERT(eCurrentPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT); + PVR_ASSERT(eDefaultPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT); + + psPowerDevice = OSAllocMem(sizeof(PVRSRV_POWER_DEV)); + PVR_LOG_RETURN_IF_NOMEM(psPowerDevice, "psPowerDevice"); + + /* setup device for power manager */ + PVRSRVSetPowerCallbacks(psDeviceNode, + psPowerDevice, + pfnDevicePrePower, + pfnDevicePostPower, + pfnSystemPrePower, + pfnSystemPostPower, + pfnForcedIdleRequest, + pfnForcedIdleCancelRequest); + + psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange; + psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange; + psPowerDevice->pfnGPUUnitsPowerChange = pfnGPUUnitsPowerChange; + psPowerDevice->hSysData = psDeviceNode->psDevConfig->hSysData; + psPowerDevice->hDevCookie = hDevCookie; + OSAtomicWrite(&psPowerDevice->eCurrentPowerState, eCurrentPowerState); + psPowerDevice->eDefaultPowerState = eDefaultPowerState; + + psDeviceNode->psPowerDev = psPowerDevice; + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRemovePowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + if (psDeviceNode->psPowerDev) + { + OSFreeMem(psDeviceNode->psPowerDev); + psDeviceNode->psPowerDev = NULL; + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVGetDevicePowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, + PPVRSRV_DEV_POWER_STATE pePowerState) +{ + PVRSRV_POWER_DEV *psPowerDevice; + + psPowerDevice = psDeviceNode->psPowerDev; + if (psPowerDevice == NULL) + { + return PVRSRV_ERROR_UNKNOWN_POWER_STATE; + } + + *pePowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); + + return PVRSRV_OK; +} + +IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + PVRSRV_DEV_POWER_STATE ePowerState; + + if (PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState) != PVRSRV_OK) + { + return IMG_FALSE; + } + + return (ePowerState == PVRSRV_DEV_POWER_STATE_ON); +} + +PVRSRV_ERROR +PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_BOOL bIdleDevice, + void* pvInfo) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_POWER_DEV *psPowerDevice; + IMG_UINT64 ui64StartTimer, ui64StopTimer; + + PVR_UNREFERENCED_PARAMETER(pvInfo); + + ui64StartTimer = PVRSRVProcessStatsGetTimeUs(); + + /* This mutex is released in PVRSRVDevicePostClockSpeedChange. */ + eError = PVRSRVPowerLock(psDeviceNode); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock"); + + psPowerDevice = psDeviceNode->psPowerDev; + if (psPowerDevice) + { + PVRSRV_DEV_POWER_STATE eCurrentPowerState = + OSAtomicRead(&psPowerDevice->eCurrentPowerState); + + if ((eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice) + { + /* We can change the clock speed if the device is either IDLE or OFF */ + eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE); + + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "PVRSRVDeviceIdleRequestKM"); + if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) + { + PVRSRVPowerUnlock(psDeviceNode); + } + return eError; + } + } + + eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie, + eCurrentPowerState); + } + + ui64StopTimer = PVRSRVProcessStatsGetTimeUs(); + + InsertPowerTimeStatisticExtraPre(ui64StartTimer, ui64StopTimer); + + return eError; +} + +void +PVRSRVDevicePostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_BOOL bIdleDevice, + void* pvInfo) +{ + PVRSRV_ERROR eError; + PVRSRV_POWER_DEV *psPowerDevice; + IMG_UINT64 ui64StartTimer, ui64StopTimer; + + PVR_UNREFERENCED_PARAMETER(pvInfo); + + ui64StartTimer = PVRSRVProcessStatsGetTimeUs(); + + psPowerDevice = psDeviceNode->psPowerDev; + if (psPowerDevice) + { + PVRSRV_DEV_POWER_STATE eCurrentPowerState = + OSAtomicRead(&psPowerDevice->eCurrentPowerState); + + eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie, + eCurrentPowerState); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + } + + if ((eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice) + { + eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode); + PVR_LOG_IF_ERROR(eError, "PVRSRVDeviceIdleCancelRequestKM"); + } + } + + /* This mutex was acquired in PVRSRVDevicePreClockSpeedChange. */ + PVRSRVPowerUnlock(psDeviceNode); + + OSAtomicIncrement(&psDeviceNode->iNumClockSpeedChanges); + + ui64StopTimer = PVRSRVProcessStatsGetTimeUs(); + + InsertPowerTimeStatisticExtraPost(ui64StartTimer, ui64StopTimer); +} + +PVRSRV_ERROR PVRSRVDeviceGPUUnitsPowerChange(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_UINT32 ui32NewValue) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_POWER_DEV *psPowerDevice; + + psPowerDevice = psDeviceNode->psPowerDev; + if (psPowerDevice) + { + PVRSRV_DEV_POWER_STATE eDevicePowerState; + + eError = PVRSRVPowerLock(psDeviceNode); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock"); + + eDevicePowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); + if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON) + { + /* Device must be idle to change GPU unit(s) power state */ + eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_FALSE); + + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "PVRSRVDeviceIdleRequestKM"); + if (eError == PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) + { + goto ErrorExit; + } + goto ErrorUnlockAndExit; + } + } + + if (psPowerDevice->pfnGPUUnitsPowerChange != NULL) + { + PVRSRV_ERROR eError2 = psPowerDevice->pfnGPUUnitsPowerChange(psPowerDevice->hDevCookie, ui32NewValue); + + if (eError2 != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)", + __func__, psDeviceNode, + PVRSRVGetErrorString(eError2))); + } + } + + if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON) + { + eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVDeviceIdleCancelRequestKM", ErrorUnlockAndExit); + } + + PVRSRVPowerUnlock(psDeviceNode); + } + + return eError; + +ErrorUnlockAndExit: + PVRSRVPowerUnlock(psDeviceNode); +ErrorExit: + return eError; +} + +/****************************************************************************** + End of file (power.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/power.h b/drivers/gpu/drm/phytium/octopus/power.h new file mode 100644 index 000000000000..266c34c1a168 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/power.h @@ -0,0 +1,414 @@ +/*************************************************************************/ /*! +@File +@Title Power Management Functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Main APIs for power management functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef POWER_H +#define POWER_H + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_device.h" +#include "pvrsrv_error.h" +#include "servicesext.h" +#include "opaque_types.h" + +/*! + ***************************************************************************** + * Power management + *****************************************************************************/ + +typedef struct _PVRSRV_POWER_DEV_TAG_ PVRSRV_POWER_DEV; + +typedef IMG_BOOL (*PFN_SYS_DEV_IS_DEFAULT_STATE_OFF)(PVRSRV_POWER_DEV *psPowerDevice); + + +PVRSRV_ERROR PVRSRVPowerLockInit(PPVRSRV_DEVICE_NODE psDeviceNode); +void PVRSRVPowerLockDeInit(PPVRSRV_DEVICE_NODE psDeviceNode); + +/*! +****************************************************************************** + + @Function PVRSRVPowerLock + + @Description Obtain the mutex for power transitions. Only allowed when + system power is on. + + @Return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode); + +/*! +****************************************************************************** + + @Function PVRSRVPowerUnlock + + @Description Release the mutex for power transitions + + @Return PVRSRV_ERROR + +******************************************************************************/ +void PVRSRVPowerUnlock(PPVRSRV_DEVICE_NODE psDeviceNode); + +/*! +****************************************************************************** + + @Function PVRSRVPowerTryLock + + @Description Try to obtain the mutex for power transitions. Only allowed when + system power is on. + + @Return PVRSRV_ERROR_RETRY or PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF or + PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPowerTryLock(PPVRSRV_DEVICE_NODE psDeviceNode); + +/*! +****************************************************************************** + + @Function PVRSRVPwrLockIsLockedByMe + + @Description Determine if the calling context is holding the device power-lock + + @Return IMG_BOOL + +******************************************************************************/ +IMG_BOOL PVRSRVPwrLockIsLockedByMe(PCPVRSRV_DEVICE_NODE psDeviceNode); +IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice); + +/*! +****************************************************************************** + + @Function PVRSRVSetDevicePowerStateKM + + @Description Set the Device into a new state + + @Input psDeviceNode : Device node + @Input eNewPowerState : New power state + @Input bForced : TRUE if the transition should not fail (e.g. OS request) + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_DEV_POWER_STATE eNewPowerState, + IMG_BOOL bForced, + IMG_BOOL bPreserveRam); + +/*************************************************************************/ /*! +@Function PVRSRVSetDeviceSystemPowerState +@Description Set the device into a new power state based on the systems power + state +@Input psDeviceNode Device node +@Input eNewSysPowerState New system power state +@Return PVRSRV_ERROR PVRSRV_OK on success or an error otherwise +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_SYS_POWER_STATE eNewSysPowerState, + IMG_BOOL bPreserveRam); + +/*! +****************************************************************************** + + @Function PVRSRVSetDeviceDefaultPowerState + + @Description Set the default device power state to eNewPowerState + + @Input psDeviceNode : Device node + @Input eNewPowerState : New power state + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_DEV_POWER_STATE eNewPowerState); + +/*! +****************************************************************************** + + @Function PVRSRVSetSystemPowerState + + @Description Set the system power state to eNewPowerState + + @Input psDeviceConfig : Device config + @Input eNewPowerState : New power state + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVSetSystemPowerState(PVRSRV_DEVICE_CONFIG * psDeviceConfig, + PVRSRV_SYS_POWER_STATE eNewSysPowerState); + +/*! +****************************************************************************** + + @Function PVRSRVSetPowerCallbacks + + @Description Initialise the Power Device's function pointers + to the appropriate callbacks depending on driver mode and + system setup. + + @Input psDeviceNode : Device node + @Input psPowerDevice : Power device + @Input pfnDevicePrePower : regular device pre power callback + @Input pfnDevicePostPower : regular device post power callback + @Input pfnSystemPrePower : regular system pre power callback + @Input pfnDevicePostPower : regular system post power callback + @Input pfnSystemPrePower : regular device pre power callback + @Input pfnSystemPostPower : regular device pre power callback + @Input pfnForcedIdleRequest : forced idle request callback + @Input pfnForcedIdleCancelRequest : forced idle request cancel callback + +******************************************************************************/ +void PVRSRVSetPowerCallbacks(PPVRSRV_DEVICE_NODE psDeviceNode, + PVRSRV_POWER_DEV *psPowerDevice, + PFN_PRE_POWER pfnDevicePrePower, + PFN_POST_POWER pfnDevicePostPower, + PFN_SYS_PRE_POWER pfnSystemPrePower, + PFN_SYS_POST_POWER pfnSystemPostPower, + PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, + PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest); + +/* Type PFN_DC_REGISTER_POWER */ +PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode, + PFN_PRE_POWER pfnDevicePrePower, + PFN_POST_POWER pfnDevicePostPower, + PFN_SYS_PRE_POWER pfnSystemPrePower, + PFN_SYS_POST_POWER pfnSystemPostPower, + PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange, + PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange, + PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, + PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest, + PFN_GPU_UNITS_POWER_CHANGE pfnGPUUnitsPowerChange, + IMG_HANDLE hDevCookie, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + PVRSRV_DEV_POWER_STATE eDefaultPowerState); + +/*! +****************************************************************************** + + @Function PVRSRVRemovePowerDevice + + @Description + + Removes device from power management register. Device is located by Device Index + + @Input psDeviceNode : Device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRemovePowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode); + +/*! +****************************************************************************** + + @Function PVRSRVGetDevicePowerState + + @Description + + Return the device power state + + @Input psDeviceNode : Device node + @Output pePowerState : Current power state + + @Return PVRSRV_ERROR_UNKNOWN_POWER_STATE if device could not be found. + PVRSRV_OK otherwise. + +******************************************************************************/ +PVRSRV_ERROR PVRSRVGetDevicePowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, + PPVRSRV_DEV_POWER_STATE pePowerState); + +/*! +****************************************************************************** + + @Function PVRSRVIsDevicePowered + + @Description + + Whether the device is powered, for the purposes of lockup detection. + + @Input psDeviceNode : Device node + + @Return IMG_BOOL + +******************************************************************************/ +IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode); + +/**************************************************************************/ /*! +@Function PVRSRVDevicePreClockSpeedChange + +@Description This function is called before a voltage/frequency change is + made to the GPU HW. It informs the host driver of the intention + to make a DVFS change. If allows the host driver to idle + the GPU and begin a hold off period from starting new work + on the GPU. + When this call succeeds the caller *must* call + PVRSRVDevicePostClockSpeedChange() to end the hold off period + to allow new work to be submitted to the GPU. + + Called from system layer or OS layer implementation that + is responsible for triggering a GPU DVFS transition. + +@Input psDeviceNode pointer to the device affected by DVFS transition. +@Input bIdleDevice when True, the driver will wait for the GPU to + reach an idle state before the call returns. +@Input pvInfo unused + +@Return PVRSRV_OK on success, power lock acquired and held on exit, + GPU idle. + PVRSRV_ERROR on failure, power lock not held on exit, do not + call PVRSRVDevicePostClockSpeedChange(). +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_BOOL bIdleDevice, + void *pvInfo); + +/**************************************************************************/ /*! +@Function PVRSRVDevicePostClockSpeedChange + +@Description This function is called after a voltage/frequency change has + been made to the GPU HW following a call to + PVRSRVDevicePreClockSpeedChange(). + Before calling this function the caller must ensure the system + data RGX_DATA->RGX_TIMING_INFORMATION->ui32CoreClockSpeed has + been updated with the new frequency set, measured in Hz. + The function informs the host driver that the DVFS change has + completed. The driver will end the work hold off period, cancel + the device idle period and update its time data records. + When this call returns work submissions are unblocked and + are submitted to the GPU as normal. + This function *must* not be called if the preceding call to + PVRSRVDevicePreClockSpeedChange() failed. + + Called from system layer or OS layer implementation that + is responsible for triggering a GPU DVFS transition. + +@Input psDeviceNode pointer to the device affected by DVFS transition. +@Input bIdleDevice when True, the driver will cancel the GPU + device idle state before the call returns. Value + given must match that used in the call to + PVRSRVDevicePreClockSpeedChange() otherwise + undefined behaviour will result. +@Input pvInfo unused + +@Return void power lock released, no longer held on exit. +*/ /**************************************************************************/ +void PVRSRVDevicePostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_BOOL bIdleDevice, + void *pvInfo); + +/*! +****************************************************************************** + + @Function PVRSRVDeviceIdleRequestKM + + @Description Perform device-specific processing required to force the device + idle. The device power-lock might be temporarily released (and + again re-acquired) during the course of this call, hence to + maintain lock-ordering power-lock should be the last acquired + lock before calling this function + + @Input psDeviceNode : Device node + + @Input pfnIsDefaultStateOff : When specified, the idle request is only + processed if this function passes. + + @Input bDeviceOffPermitted : IMG_TRUE if the transition should not fail + if device off + IMG_FALSE if the transition should fail if + device off + + @Return PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED + When re-acquisition of power-lock failed. + This error NEEDS EXPLICIT HANDLING at call + site as it signifies the caller needs to + AVOID calling PVRSRVPowerUnlock, since + power-lock is no longer "possessed" by + this context. + + PVRSRV_OK When idle request succeeded. + PVRSRV_ERROR Other system errors. + +******************************************************************************/ +PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode, + PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff, + IMG_BOOL bDeviceOffPermitted); + +/*! +****************************************************************************** + + @Function PVRSRVDeviceIdleCancelRequestKM + + @Description Perform device-specific processing required to cancel the forced idle state + on the device, returning to normal operation. + + @Input psDeviceNode : Device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode); + +/*! +****************************************************************************** + +@Function PVRSRVDeviceGPUUnitsPowerChange +@Description Request from system layer for changing power state of GPU + units +@Input psDeviceNode RGX Device Node. +@Input ui32NewValue Value indicating the new power state + of GPU units. how this is interpreted + depends upon the device-specific + function subsequently called by the + server via a pfn. +@Return PVRSRV_ERROR. +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVDeviceGPUUnitsPowerChange(PPVRSRV_DEVICE_NODE psDeviceNode, + IMG_UINT32 ui32NewValue); + + +#endif /* POWER_H */ + +/****************************************************************************** + End of file (power.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/private_data.h b/drivers/gpu/drm/phytium/octopus/private_data.h new file mode 100644 index 000000000000..51cb470cfed3 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/private_data.h @@ -0,0 +1,53 @@ +/*************************************************************************/ /*! +@File +@Title Linux private data structure +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(INCLUDED_PRIVATE_DATA_H) +#define INCLUDED_PRIVATE_DATA_H + +#include + +#include "connection_server.h" + +CONNECTION_DATA *LinuxConnectionFromFile(struct file *pFile); +struct file *LinuxFileFromConnection(CONNECTION_DATA *psConnection); + +#endif /* !defined(INCLUDED_PRIVATE_DATA_H) */ diff --git a/drivers/gpu/drm/phytium/octopus/proc_stats.h b/drivers/gpu/drm/phytium/octopus/proc_stats.h new file mode 100644 index 000000000000..528728992e85 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/proc_stats.h @@ -0,0 +1,135 @@ +/*************************************************************************/ /*! +@File +@Title Process and driver statistic definitions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PROC_STATS_H +#define PROC_STATS_H + +/* X-Macro for Process stat keys */ +#define PVRSRV_PROCESS_STAT_KEY \ + X(PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS, "Connections") \ + X(PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS, "ConnectionsMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_RC_OOMS, "RenderContextOutOfMemoryEvents") \ + X(PVRSRV_PROCESS_STAT_TYPE_RC_PRS, "RenderContextPartialRenders") \ + X(PVRSRV_PROCESS_STAT_TYPE_RC_GROWS, "RenderContextGrows") \ + X(PVRSRV_PROCESS_STAT_TYPE_RC_PUSH_GROWS, "RenderContextPushGrows") \ + X(PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES, "RenderContextTAStores") \ + X(PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES, "RenderContext3DStores") \ + X(PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES, "RenderContextCDMStores") \ + X(PVRSRV_PROCESS_STAT_TYPE_RC_TDM_STORES, "RenderContextTDMStores") \ + X(PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP, "ZSBufferRequestsByApp") \ + X(PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW, "ZSBufferRequestsByFirmware") \ + X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP, "FreeListGrowRequestsByApp") \ + X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW, "FreeListGrowRequestsByFirmware") \ + X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT, "FreeListInitialPages") \ + X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES, "FreeListMaxPages") \ + X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \ + X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \ + X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, "MemoryUsageAllocPTMemoryUMA") \ + X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA") \ + X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, "MemoryUsageAllocPTMemoryLMA") \ + X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA") \ + X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, "MemoryUsageAllocGPUMemLMA") \ + X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES_MAX, "MemoryUsageAllocGPUMemLMAMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, "MemoryUsageAllocGPUMemUMA") \ + X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES_MAX, "MemoryUsageAllocGPUMemUMAMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, "MemoryUsageMappedGPUMemUMA/LMA") \ + X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES_MAX, "MemoryUsageMappedGPUMemUMA/LMAMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, "MemoryUsageDmaBufImport") \ + X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT_MAX, "MemoryUsageDmaBufImportMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_TOTAL, "MemoryUsageTotal") \ + X(PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX, "MemoryUsageTotalMax") \ + X(PVRSRV_PROCESS_STAT_TYPE_OOM_VIRTMEM_COUNT, "MemoryOOMCountDeviceVirtual") \ + X(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT, "MemoryOOMCountPhysicalHeap") \ + X(PVRSRV_PROCESS_STAT_TYPE_INVALID_VIRTMEM, "MemoryOOMCountDeviceVirtualAtAddress") + + +/* X-Macro for Driver stat keys */ +#define PVRSRV_DRIVER_STAT_KEY \ + X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \ + X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \ + X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, "MemoryUsageAllocPTMemoryUMA") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA") \ + X(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, "MemoryUsageAllocPTMemoryLMA") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA") \ + X(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, "MemoryUsageAllocGPUMemLMA") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA_MAX, "MemoryUsageAllocGPUMemLMAMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, "MemoryUsageAllocGPUMemUMA") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_MAX, "MemoryUsageAllocGPUMemUMAMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, "MemoryUsageAllocGPUMemUMAPool") \ + X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL_MAX, "MemoryUsageAllocGPUMemUMAPoolMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, "MemoryUsageMappedGPUMemUMA/LMA") \ + X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA_MAX, "MemoryUsageMappedGPUMemUMA/LMAMax") \ + X(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, "MemoryUsageDmaBufImport") \ + X(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT_MAX, "MemoryUsageDmaBufImportMax") + + +typedef enum { +#define X(stat_type, stat_str) stat_type, + PVRSRV_PROCESS_STAT_KEY +#undef X + PVRSRV_PROCESS_STAT_TYPE_COUNT +}PVRSRV_PROCESS_STAT_TYPE; + +typedef enum { +#define X(stat_type, stat_str) stat_type, + PVRSRV_DRIVER_STAT_KEY +#undef X + PVRSRV_DRIVER_STAT_TYPE_COUNT +}PVRSRV_DRIVER_STAT_TYPE; + +extern const IMG_CHAR *const pszProcessStatType[]; + +extern const IMG_CHAR *const pszDriverStatType[]; + +#endif // PROC_STATS_H diff --git a/drivers/gpu/drm/phytium/octopus/process_stats.c b/drivers/gpu/drm/phytium/octopus/process_stats.c new file mode 100644 index 000000000000..ffc68589e1eb --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/process_stats.c @@ -0,0 +1,3434 @@ +/*************************************************************************/ /*! +@File +@Title Process based statistics +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Manages a collection of statistics based around a process + and referenced via OS agnostic methods. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "img_types.h" +#include "pvr_debug.h" +#include "lock.h" +#include "allocmem.h" +#include "osfunc.h" +#include "lists.h" +#include "process_stats.h" +#include "ri_server.h" +#include "hash.h" +#include "connection_server.h" +#include "pvrsrv.h" +#include "proc_stats.h" +#include "htbuffer.h" +#include "pvr_ricommon.h" +#include "di_server.h" +#if defined(__linux__) +#include "trace_events.h" +#endif + +/* Enabled OS Statistics entries: DEBUGFS on Linux, undefined for other OSs */ +#if defined(__linux__) && ( \ + defined(PVRSRV_ENABLE_PERPID_STATS) || \ + defined(PVRSRV_ENABLE_CACHEOP_STATS) || \ + defined(PVRSRV_ENABLE_MEMORY_STATS) || \ + defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ) +#define ENABLE_DEBUGFS_PIDS +#endif + +/* Enable GPU memory accounting tracepoint */ +#if defined(__linux__) && ( \ + defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) ) +#define ENABLE_GPU_MEM_TRACEPOINT +#endif + +/* + * Maximum history of process statistics that will be kept. + */ +#define MAX_DEAD_LIST_PROCESSES (10) + +/* + * Definition of all the strings used to format process based statistics. + */ + +#if defined(PVRSRV_ENABLE_PERPID_STATS) +/* Array of Process stat type defined using the X-Macro */ +#define X(stat_type, stat_str) stat_str, +const IMG_CHAR *const pszProcessStatType[PVRSRV_PROCESS_STAT_TYPE_COUNT] = { PVRSRV_PROCESS_STAT_KEY }; +#undef X +#endif + +/* Array of Driver stat type defined using the X-Macro */ +#define X(stat_type, stat_str) stat_str, +const IMG_CHAR *const pszDriverStatType[PVRSRV_DRIVER_STAT_TYPE_COUNT] = { PVRSRV_DRIVER_STAT_KEY }; +#undef X + +/* structure used in hash table to track statistic entries */ +typedef struct { + size_t uiSizeInBytes; + IMG_PID uiPid; +} _PVR_STATS_TRACKING_HASH_ENTRY; + +/* Function used internally to decrement tracked per-process statistic entries */ +static void _StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry, + PVRSRV_MEM_ALLOC_TYPE eAllocType); + +#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) +int RawProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData); +#endif +int PowerStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData); +int GlobalStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData); + +/* Note: all of the accesses to the global stats should be protected + * by the gsGlobalStats.hGlobalStatsLock lock. This means all of the + * invocations of macros *_GLOBAL_STAT_VALUE. */ + +/* Macros for fetching stat values */ +#define GET_STAT_VALUE(ptr,var) (ptr)->i32StatValue[(var)] +#define GET_GLOBAL_STAT_VALUE(idx) gsGlobalStats.ui32StatValue[idx] + +#define GET_GPUMEM_GLOBAL_STAT_VALUE() \ + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA) + \ + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA) + \ + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA) + \ + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA) + \ + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT) + +#define GET_GPUMEM_PERPID_STAT_VALUE(ptr) \ + GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA) + \ + GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA) + \ + GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) + \ + GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES) + \ + GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT) +/* + * Macros for updating stat values. + */ +#define UPDATE_MAX_VALUE(a,b) do { if ((b) > (a)) {(a) = (b);} } while (0) +#define INCREASE_STAT_VALUE(ptr,var,val) do { (ptr)->i32StatValue[(var)] += (val); if ((ptr)->i32StatValue[(var)] > (ptr)->i32StatValue[(var##_MAX)]) {(ptr)->i32StatValue[(var##_MAX)] = (ptr)->i32StatValue[(var)];} } while (0) +#define INCREASE_GLOBAL_STAT_VALUE(var,idx,val) do { (var).ui32StatValue[(idx)] += (val); if ((var).ui32StatValue[(idx)] > (var).ui32StatValue[(idx##_MAX)]) {(var).ui32StatValue[(idx##_MAX)] = (var).ui32StatValue[(idx)];} } while (0) +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) +/* Allow stats to go negative */ +#define DECREASE_STAT_VALUE(ptr,var,val) do { (ptr)->i32StatValue[(var)] -= (val); } while (0) +#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val) do { (var).ui32StatValue[(idx)] -= (val); } while (0) +#else +#define DECREASE_STAT_VALUE(ptr,var,val) do { if ((ptr)->i32StatValue[(var)] >= (val)) { (ptr)->i32StatValue[(var)] -= (val); } else { (ptr)->i32StatValue[(var)] = 0; } } while (0) +#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val) do { if ((var).ui32StatValue[(idx)] >= (val)) { (var).ui32StatValue[(idx)] -= (val); } else { (var).ui32StatValue[(idx)] = 0; } } while (0) +#endif +#define MAX_CACHEOP_STAT 16 +#define INCREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x+1) >= MAX_CACHEOP_STAT ? 0 : (x+1)) +#define DECREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x-1) < 0 ? (MAX_CACHEOP_STAT-1) : (x-1)) + +/* + * Structures for holding statistics... + */ +#if defined(PVRSRV_ENABLE_MEMORY_STATS) +typedef struct _PVRSRV_MEM_ALLOC_REC_ +{ + PVRSRV_MEM_ALLOC_TYPE eAllocType; + IMG_UINT64 ui64Key; + void* pvCpuVAddr; + IMG_CPU_PHYADDR sCpuPAddr; + size_t uiBytes; + void* pvPrivateData; +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) + void* pvAllocdFromFile; + IMG_UINT32 ui32AllocdFromLine; +#endif + IMG_PID pid; + struct _PVRSRV_MEM_ALLOC_REC_* psNext; + struct _PVRSRV_MEM_ALLOC_REC_** ppsThis; +} PVRSRV_MEM_ALLOC_REC; +#endif + +typedef struct _PVRSRV_PROCESS_STATS_ { + + /* Linked list pointers */ + struct _PVRSRV_PROCESS_STATS_* psNext; + struct _PVRSRV_PROCESS_STATS_* psPrev; + + /* Create per process lock that need to be held + * to edit of its members */ + POS_LOCK hLock; + + /* OS level process ID */ + IMG_PID pid; + IMG_UINT32 ui32RefCount; + + /* Stats... */ + IMG_INT32 i32StatValue[PVRSRV_PROCESS_STAT_TYPE_COUNT]; + IMG_UINT32 ui32StatAllocFlags; + +#if defined(PVRSRV_ENABLE_CACHEOP_STATS) + struct _CACHEOP_STRUCT_ { + PVRSRV_CACHE_OP uiCacheOp; +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEV_PHYADDR sDevPAddr; + RGXFWIF_DM eFenceOpType; +#endif + IMG_DEVMEM_SIZE_T uiOffset; + IMG_DEVMEM_SIZE_T uiSize; + IMG_UINT64 ui64ExecuteTime; + IMG_BOOL bUserModeFlush; + IMG_UINT32 ui32OpSeqNum; + IMG_BOOL bIsFence; + IMG_PID ownerPid; + } asCacheOp[MAX_CACHEOP_STAT]; + IMG_INT32 uiCacheOpWriteIndex; +#endif + + /* Other statistics structures */ +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRV_MEM_ALLOC_REC* psMemoryRecords; +#endif +} PVRSRV_PROCESS_STATS; + +#if defined(ENABLE_DEBUGFS_PIDS) + +typedef struct _PVRSRV_OS_STAT_ENTRY_ +{ + DI_GROUP *psStatsDIGroup; + DI_ENTRY *psProcessStatsDIEntry; + DI_ENTRY *psMemStatsDIEntry; + DI_ENTRY *psRIMemStatsDIEntry; + DI_ENTRY *psCacheOpStatsDIEntry; +} PVRSRV_OS_STAT_ENTRY; + +static PVRSRV_OS_STAT_ENTRY gsLiveStatEntries; +static PVRSRV_OS_STAT_ENTRY gsRetiredStatEntries; + +int GenericStatsPrintElementsLive(OSDI_IMPL_ENTRY *psEntry, void *pvData); +int GenericStatsPrintElementsRetired(OSDI_IMPL_ENTRY *psEntry, void *pvData); + +/* + * Functions for printing the information stored... + */ +#if defined(PVRSRV_ENABLE_PERPID_STATS) +void ProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats); +#endif + +#if defined(PVRSRV_ENABLE_MEMORY_STATS) +void MemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats); +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +void RIMemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats); +#endif + +#if defined(PVRSRV_ENABLE_CACHEOP_STATS) +void CacheOpStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats); +#endif + +typedef void (PVRSRV_STATS_PRINT_ELEMENTS)(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats); + +typedef enum +{ + PVRSRV_STAT_TYPE_PROCESS, + PVRSRV_STAT_TYPE_MEMORY, + PVRSRV_STAT_TYPE_RIMEMORY, + PVRSRV_STAT_TYPE_CACHEOP, + PVRSRV_STAT_TYPE_LAST +} PVRSRV_STAT_TYPE; + +#define SEPARATOR_STR_LEN 166 + +typedef struct _PVRSRV_STAT_PV_DATA_ { + + PVRSRV_STAT_TYPE eStatType; + PVRSRV_STATS_PRINT_ELEMENTS* pfnStatsPrintElements; + IMG_CHAR szLiveStatsHeaderStr[SEPARATOR_STR_LEN + 1]; + IMG_CHAR szRetiredStatsHeaderStr[SEPARATOR_STR_LEN + 1]; + +} PVRSRV_STAT_PV_DATA; + +static PVRSRV_STAT_PV_DATA g_StatPvDataArr[] = { + { PVRSRV_STAT_TYPE_PROCESS, NULL, " Process" , " Process" }, + { PVRSRV_STAT_TYPE_MEMORY, NULL, " Memory Allocation" , " Memory Allocation" }, + { PVRSRV_STAT_TYPE_RIMEMORY, NULL, " Resource Allocation" , " Resource Allocation" }, + { PVRSRV_STAT_TYPE_CACHEOP, NULL, " Cache Maintenance Ops" , " Cache Maintenance Ops" } + }; + +#define GET_STAT_ENTRY_ID(STAT_TYPE) &g_StatPvDataArr[(STAT_TYPE)] + +/* Generic header strings */ +static const IMG_CHAR g_szLiveHeaderStr[] = " Statistics for LIVE Processes "; +static const IMG_CHAR g_szRetiredHeaderStr[] = " Statistics for RETIRED Processes "; + +/* Separator string used for separating stats for different PIDs */ +static IMG_CHAR g_szSeparatorStr[SEPARATOR_STR_LEN + 1] = ""; + +static inline void +_prepareStatsHeaderString(IMG_CHAR *pszStatsSpecificStr, const IMG_CHAR* pszGenericHeaderStr) +{ + IMG_UINT32 ui32NumSeparators; + IMG_CHAR szStatsHeaderFooterStr[75]; + + /* Prepare text content of the header in a local string */ + OSStringLCopy(szStatsHeaderFooterStr, pszStatsSpecificStr, ARRAY_SIZE(szStatsHeaderFooterStr)); + OSStringLCat(szStatsHeaderFooterStr, pszGenericHeaderStr, ARRAY_SIZE(szStatsHeaderFooterStr)); + + /* Write all '-' characters to the header string */ + memset(pszStatsSpecificStr, '-', SEPARATOR_STR_LEN); + pszStatsSpecificStr[SEPARATOR_STR_LEN] = '\0'; + + /* Find the spot for text content in the header string */ + ui32NumSeparators = (SEPARATOR_STR_LEN - OSStringLength(szStatsHeaderFooterStr)) >> 1; + + /* Finally write the text content */ + OSSNPrintf(pszStatsSpecificStr + ui32NumSeparators, + OSStringLength(szStatsHeaderFooterStr), + "%s", szStatsHeaderFooterStr); + + /* Overwrite the '\0' character added by OSSNPrintf() */ + if (OSStringLength(szStatsHeaderFooterStr) > 0) + { + pszStatsSpecificStr[ui32NumSeparators + OSStringLength(szStatsHeaderFooterStr) - 1] = ' '; + } +} + +static inline void +_prepareSeparatorStrings(void) +{ + IMG_UINT32 i; + + /* Prepare header strings for each stat type */ + for (i = 0; i < PVRSRV_STAT_TYPE_LAST; ++i) + { + _prepareStatsHeaderString(g_StatPvDataArr[i].szLiveStatsHeaderStr, g_szLiveHeaderStr); + _prepareStatsHeaderString(g_StatPvDataArr[i].szRetiredStatsHeaderStr, g_szRetiredHeaderStr); + } + + /* Prepare separator string to separate stats for different PIDs */ + memset(g_szSeparatorStr, '-', SEPARATOR_STR_LEN); + g_szSeparatorStr[SEPARATOR_STR_LEN] = '\0'; +} + +static inline void +_prepareStatsPrivateData(void) +{ +#if defined(PVRSRV_ENABLE_PERPID_STATS) + g_StatPvDataArr[PVRSRV_STAT_TYPE_PROCESS].pfnStatsPrintElements = ProcessStatsPrintElements; +#endif + +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + g_StatPvDataArr[PVRSRV_STAT_TYPE_MEMORY].pfnStatsPrintElements = MemStatsPrintElements; +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + g_StatPvDataArr[PVRSRV_STAT_TYPE_RIMEMORY].pfnStatsPrintElements = RIMemStatsPrintElements; +#endif + +#if defined(PVRSRV_ENABLE_CACHEOP_STATS) + g_StatPvDataArr[PVRSRV_STAT_TYPE_CACHEOP].pfnStatsPrintElements = CacheOpStatsPrintElements; +#endif + + _prepareSeparatorStrings(); +} + +#endif + +#if defined(PVRSRV_ENABLE_MEMORY_STATS) +static IMPLEMENT_LIST_INSERT(PVRSRV_MEM_ALLOC_REC) +static IMPLEMENT_LIST_REMOVE(PVRSRV_MEM_ALLOC_REC) +#endif + +/* + * Global Boolean to flag when the statistics are ready to monitor + * memory allocations. + */ +static IMG_BOOL bProcessStatsInitialised = IMG_FALSE; + +/* + * Linked lists for process stats. Live stats are for processes which are still running + * and the dead list holds those that have exited. + */ +static PVRSRV_PROCESS_STATS *g_psLiveList; +static PVRSRV_PROCESS_STATS *g_psDeadList; + +static POS_LOCK g_psLinkedListLock; +/* Lockdep feature in the kernel cannot differentiate between different instances of same lock type. + * This allows it to group all such instances of the same lock type under one class + * The consequence of this is that, if lock acquisition is nested on different instances, it generates + * a false warning message about the possible occurrence of deadlock due to recursive lock acquisition. + * Hence we create the following sub classes to explicitly appraise Lockdep of such safe lock nesting */ +#define PROCESS_LOCK_SUBCLASS_CURRENT 1 +#define PROCESS_LOCK_SUBCLASS_PREV 2 +#define PROCESS_LOCK_SUBCLASS_NEXT 3 +#if defined(ENABLE_DEBUGFS_PIDS) +/* + * Pointer to OS folder to hold PID folders. + */ +static DI_GROUP *psProcStatsDIGroup; +#endif +#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) +static DI_ENTRY *psProcStatsDIEntry; +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +/* Global driver PID stats registration handle */ +static IMG_HANDLE g_hDriverProcessStats; +#endif + +/* Global driver-data folders */ +typedef struct _GLOBAL_STATS_ +{ + IMG_UINT32 ui32StatValue[PVRSRV_DRIVER_STAT_TYPE_COUNT]; + POS_LOCK hGlobalStatsLock; +} GLOBAL_STATS; + +static DI_ENTRY *psGlobalMemDIEntry; +static GLOBAL_STATS gsGlobalStats; + +#define HASH_INITIAL_SIZE 5 +/* A hash table used to store the size of any vmalloc'd allocation + * against its address (not needed for kmallocs as we can use ksize()) */ +static HASH_TABLE* gpsSizeTrackingHashTable; +static POS_LOCK gpsSizeTrackingHashTableLock; + +static PVRSRV_ERROR _RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid); + +static void _AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats); +static void _AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats); +static void _RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats); + +static void _DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats); + +static void _DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType, + PVRSRV_PROCESS_STATS* psProcessStats, + IMG_UINT32 uiBytes); +/* + * Power statistics related definitions + */ + +/* For the mean time, use an exponentially weighted moving average with a + * 1/4 weighting for the new measurement. + */ +#define MEAN_TIME(A, B) ( ((3*(A))/4) + ((1 * (B))/4) ) + +#define UPDATE_TIME(time, newtime) \ + ((time) > 0 ? MEAN_TIME((time), (newtime)) : (newtime)) + +/* Enum to be used as input to GET_POWER_STAT_INDEX */ +typedef enum +{ + DEVICE = 0, + SYSTEM = 1, + POST_POWER = 0, + PRE_POWER = 2, + POWER_OFF = 0, + POWER_ON = 4, + NOT_FORCED = 0, + FORCED = 8, +} PVRSRV_POWER_STAT_TYPE; + +/* Macro used to access one of the power timing statistics inside an array */ +#define GET_POWER_STAT_INDEX(forced,powon,prepow,system) \ + ((forced) + (powon) + (prepow) + (system)) + +/* For the power timing stats we need 16 variables to store all the + * combinations of forced/not forced, power-on/power-off, pre-power/post-power + * and device/system statistics + */ +#define NUM_POWER_STATS (16) +static IMG_UINT32 aui32PowerTimingStats[NUM_POWER_STATS]; + +static DI_ENTRY *psPowerStatsDIEntry; + +typedef struct _EXTRA_POWER_STATS_ +{ + IMG_UINT64 ui64PreClockSpeedChangeDuration; + IMG_UINT64 ui64BetweenPreEndingAndPostStartingDuration; + IMG_UINT64 ui64PostClockSpeedChangeDuration; +} EXTRA_POWER_STATS; + +#define NUM_EXTRA_POWER_STATS 10 + +static EXTRA_POWER_STATS asClockSpeedChanges[NUM_EXTRA_POWER_STATS]; +static IMG_UINT32 ui32ClockSpeedIndexStart, ui32ClockSpeedIndexEnd; + + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime, + IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime, + IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower) +{ + IMG_UINT32 *pui32Stat; + IMG_UINT64 ui64DeviceDiff = ui64DevEndTime - ui64DevStartTime; + IMG_UINT64 ui64SystemDiff = ui64SysEndTime - ui64SysStartTime; + IMG_UINT32 ui32Index; + + if (bPrePower) + { + HTBLOGK(HTB_SF_MAIN_PRE_POWER, bPowerOn, ui64DeviceDiff, ui64SystemDiff); + } + else + { + HTBLOGK(HTB_SF_MAIN_POST_POWER, bPowerOn, ui64SystemDiff, ui64DeviceDiff); + } + + ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED, + bPowerOn ? POWER_ON : POWER_OFF, + bPrePower ? PRE_POWER : POST_POWER, + DEVICE); + pui32Stat = &aui32PowerTimingStats[ui32Index]; + *pui32Stat = UPDATE_TIME(*pui32Stat, ui64DeviceDiff); + + ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED, + bPowerOn ? POWER_ON : POWER_OFF, + bPrePower ? PRE_POWER : POST_POWER, + SYSTEM); + pui32Stat = &aui32PowerTimingStats[ui32Index]; + *pui32Stat = UPDATE_TIME(*pui32Stat, ui64SystemDiff); +} + +static IMG_UINT64 ui64PreClockSpeedChangeMark; + +void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer) +{ + asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PreClockSpeedChangeDuration = ui64Stoptimer - ui64StartTimer; + + ui64PreClockSpeedChangeMark = OSClockus(); +} + +void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer) +{ + IMG_UINT64 ui64Duration = ui64StartTimer - ui64PreClockSpeedChangeMark; + + PVR_ASSERT(ui64PreClockSpeedChangeMark > 0); + + asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64BetweenPreEndingAndPostStartingDuration = ui64Duration; + asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PostClockSpeedChangeDuration = ui64StopTimer - ui64StartTimer; + + ui32ClockSpeedIndexEnd = (ui32ClockSpeedIndexEnd + 1) % NUM_EXTRA_POWER_STATS; + + if (ui32ClockSpeedIndexEnd == ui32ClockSpeedIndexStart) + { + ui32ClockSpeedIndexStart = (ui32ClockSpeedIndexStart + 1) % NUM_EXTRA_POWER_STATS; + } + + ui64PreClockSpeedChangeMark = 0; +} +#endif + +/*************************************************************************/ /*! +@Function _FindProcessStatsInLiveList +@Description Searches the Live Process List for a statistics structure that + matches the PID given. +@Input pid Process to search for. +@Return Pointer to stats structure for the process. +*/ /**************************************************************************/ +static PVRSRV_PROCESS_STATS* +_FindProcessStatsInLiveList(IMG_PID pid) +{ + PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList; + + while (psProcessStats != NULL) + { + if (psProcessStats->pid == pid) + { + return psProcessStats; + } + + psProcessStats = psProcessStats->psNext; + } + + return NULL; +} /* _FindProcessStatsInLiveList */ + +/*************************************************************************/ /*! +@Function _FindProcessStatsInDeadList +@Description Searches the Dead Process List for a statistics structure that + matches the PID given. +@Input pid Process to search for. +@Return Pointer to stats structure for the process. +*/ /**************************************************************************/ +static PVRSRV_PROCESS_STATS* +_FindProcessStatsInDeadList(IMG_PID pid) +{ + PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList; + + while (psProcessStats != NULL) + { + if (psProcessStats->pid == pid) + { + return psProcessStats; + } + + psProcessStats = psProcessStats->psNext; + } + + return NULL; +} /* _FindProcessStatsInDeadList */ + +/*************************************************************************/ /*! +@Function _FindProcessStats +@Description Searches the Live and Dead Process Lists for a statistics + structure that matches the PID given. +@Input pid Process to search for. +@Return Pointer to stats structure for the process. +*/ /**************************************************************************/ +static PVRSRV_PROCESS_STATS* +_FindProcessStats(IMG_PID pid) +{ + PVRSRV_PROCESS_STATS* psProcessStats = _FindProcessStatsInLiveList(pid); + + if (psProcessStats == NULL) + { + psProcessStats = _FindProcessStatsInDeadList(pid); + } + + return psProcessStats; +} /* _FindProcessStats */ + +/*************************************************************************/ /*! +@Function _CompressMemoryUsage +@Description Reduces memory usage by deleting old statistics data. + This function requires that the list lock is not held! +*/ /**************************************************************************/ +static void +_CompressMemoryUsage(void) +{ + PVRSRV_PROCESS_STATS* psProcessStats; + PVRSRV_PROCESS_STATS* psProcessStatsToBeFreed; + IMG_UINT32 ui32ItemsRemaining; + + /* + * We hold the lock whilst checking the list, but we'll release it + * before freeing memory (as that will require the lock too)! + */ + OSLockAcquire(g_psLinkedListLock); + + /* Check that the dead list is not bigger than the max size... */ + psProcessStats = g_psDeadList; + psProcessStatsToBeFreed = NULL; + ui32ItemsRemaining = MAX_DEAD_LIST_PROCESSES; + + while (psProcessStats != NULL && ui32ItemsRemaining > 0) + { + ui32ItemsRemaining--; + if (ui32ItemsRemaining == 0) + { + /* This is the last allowed process, cut the linked list here! */ + psProcessStatsToBeFreed = psProcessStats->psNext; + psProcessStats->psNext = NULL; + } + else + { + psProcessStats = psProcessStats->psNext; + } + } + + OSLockRelease(g_psLinkedListLock); + + /* Any processes stats remaining will need to be destroyed... */ + while (psProcessStatsToBeFreed != NULL) + { + PVRSRV_PROCESS_STATS* psNextProcessStats = psProcessStatsToBeFreed->psNext; + + psProcessStatsToBeFreed->psNext = NULL; + _DestroyProcessStat(psProcessStatsToBeFreed); + psProcessStatsToBeFreed = psNextProcessStats; + } +} /* _CompressMemoryUsage */ + +/* These functions move the process stats from the live to the dead list. + * _MoveProcessToDeadList moves the entry in the global lists and + * it needs to be protected by g_psLinkedListLock. + * _MoveProcessToDeadList performs the OS calls and it + * shouldn't be used under g_psLinkedListLock because this could generate a + * lockdep warning. */ +static void +_MoveProcessToDeadList(PVRSRV_PROCESS_STATS* psProcessStats) +{ + /* Take the element out of the live list and append to the dead list... */ + _RemoveProcessStatsFromList(psProcessStats); + _AddProcessStatsToFrontOfDeadList(psProcessStats); +} /* _MoveProcessToDeadList */ + +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) +/* These functions move the process stats from the dead to the live list. + * _MoveProcessToLiveList moves the entry in the global lists and + * it needs to be protected by g_psLinkedListLock. + * _MoveProcessToLiveList performs the OS calls and it + * shouldn't be used under g_psLinkedListLock because this could generate a + * lockdep warning. */ +static void +_MoveProcessToLiveList(PVRSRV_PROCESS_STATS* psProcessStats) +{ + /* Take the element out of the live list and append to the dead list... */ + _RemoveProcessStatsFromList(psProcessStats); + _AddProcessStatsToFrontOfLiveList(psProcessStats); +} /* _MoveProcessToLiveList */ +#endif + +/*************************************************************************/ /*! +@Function _AddProcessStatsToFrontOfLiveList +@Description Add a statistic to the live list head. +@Input psProcessStats Process stats to add. +*/ /**************************************************************************/ +static void +_AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats) +{ + /* This function should always be called under global list lock g_psLinkedListLock. + */ + PVR_ASSERT(psProcessStats != NULL); + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + if (g_psLiveList != NULL) + { + PVR_ASSERT(psProcessStats != g_psLiveList); + OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV); + g_psLiveList->psPrev = psProcessStats; + OSLockRelease(g_psLiveList->hLock); + psProcessStats->psNext = g_psLiveList; + } + + g_psLiveList = psProcessStats; + + OSLockRelease(psProcessStats->hLock); +} /* _AddProcessStatsToFrontOfLiveList */ + +/*************************************************************************/ /*! +@Function _AddProcessStatsToFrontOfDeadList +@Description Add a statistic to the dead list head. +@Input psProcessStats Process stats to add. +*/ /**************************************************************************/ +static void +_AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats) +{ + PVR_ASSERT(psProcessStats != NULL); + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + if (g_psDeadList != NULL) + { + PVR_ASSERT(psProcessStats != g_psDeadList); + OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV); + g_psDeadList->psPrev = psProcessStats; + OSLockRelease(g_psDeadList->hLock); + psProcessStats->psNext = g_psDeadList; + } + + g_psDeadList = psProcessStats; + + OSLockRelease(psProcessStats->hLock); +} /* _AddProcessStatsToFrontOfDeadList */ + +/*************************************************************************/ /*! +@Function _RemoveProcessStatsFromList +@Description Detaches a process from either the live or dead list. +@Input psProcessStats Process stats to remove. +*/ /**************************************************************************/ +static void +_RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats) +{ + PVR_ASSERT(psProcessStats != NULL); + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + /* Remove the item from the linked lists... */ + if (g_psLiveList == psProcessStats) + { + g_psLiveList = psProcessStats->psNext; + + if (g_psLiveList != NULL) + { + PVR_ASSERT(psProcessStats != g_psLiveList); + OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV); + g_psLiveList->psPrev = NULL; + OSLockRelease(g_psLiveList->hLock); + + } + } + else if (g_psDeadList == psProcessStats) + { + g_psDeadList = psProcessStats->psNext; + + if (g_psDeadList != NULL) + { + PVR_ASSERT(psProcessStats != g_psDeadList); + OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV); + g_psDeadList->psPrev = NULL; + OSLockRelease(g_psDeadList->hLock); + } + } + else + { + PVRSRV_PROCESS_STATS* psNext = psProcessStats->psNext; + PVRSRV_PROCESS_STATS* psPrev = psProcessStats->psPrev; + + if (psProcessStats->psNext != NULL) + { + PVR_ASSERT(psProcessStats != psNext); + OSLockAcquireNested(psNext->hLock, PROCESS_LOCK_SUBCLASS_NEXT); + psProcessStats->psNext->psPrev = psPrev; + OSLockRelease(psNext->hLock); + } + if (psProcessStats->psPrev != NULL) + { + PVR_ASSERT(psProcessStats != psPrev); + OSLockAcquireNested(psPrev->hLock, PROCESS_LOCK_SUBCLASS_PREV); + psProcessStats->psPrev->psNext = psNext; + OSLockRelease(psPrev->hLock); + } + } + + + /* Reset the pointers in this cell, as it is not attached to anything */ + psProcessStats->psNext = NULL; + psProcessStats->psPrev = NULL; + + OSLockRelease(psProcessStats->hLock); + +} /* _RemoveProcessStatsFromList */ + +static PVRSRV_ERROR +_AllocateProcessStats(PVRSRV_PROCESS_STATS **ppsProcessStats, IMG_PID ownerPid) +{ + PVRSRV_ERROR eError; + PVRSRV_PROCESS_STATS *psProcessStats; + + psProcessStats = OSAllocZMemNoStats(sizeof(PVRSRV_PROCESS_STATS)); + PVR_RETURN_IF_NOMEM(psProcessStats); + + psProcessStats->pid = ownerPid; + psProcessStats->ui32RefCount = 1; + + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = 1; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS] = 1; + + eError = OSLockCreateNoStats(&psProcessStats->hLock); + PVR_GOTO_IF_ERROR(eError, e0); + + *ppsProcessStats = psProcessStats; + return PVRSRV_OK; + +e0: + OSFreeMemNoStats(psProcessStats); + return PVRSRV_ERROR_OUT_OF_MEMORY; +} + +/*************************************************************************/ /*! +@Function _DestroyProcessStat +@Description Frees memory and resources held by a process statistic. +@Input psProcessStats Process stats to destroy. +*/ /**************************************************************************/ +static void +_DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats) +{ + PVR_ASSERT(psProcessStats != NULL); + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + /* Free the memory statistics... */ +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + while (psProcessStats->psMemoryRecords) + { + List_PVRSRV_MEM_ALLOC_REC_Remove(psProcessStats->psMemoryRecords); + } +#endif + OSLockRelease(psProcessStats->hLock); + + /*Destroy the lock */ + OSLockDestroyNoStats(psProcessStats->hLock); + + /* Free the memory... */ + OSFreeMemNoStats(psProcessStats); +} /* _DestroyProcessStat */ + +#if defined(ENABLE_DEBUGFS_PIDS) +static inline void +_createStatsFiles(PVRSRV_OS_STAT_ENTRY* psStatsEntries, + DI_PFN_SHOW pfnStatsShow) +{ + PVRSRV_ERROR eError; + DI_ITERATOR_CB sIterator = {.pfnShow = pfnStatsShow}; + +#if defined(PVRSRV_ENABLE_PERPID_STATS) + eError = DICreateEntry("process_stats", psStatsEntries->psStatsDIGroup, + &sIterator, + GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_PROCESS), + DI_ENTRY_TYPE_GENERIC, + &psStatsEntries->psProcessStatsDIEntry); + PVR_LOG_IF_ERROR(eError, "DICreateEntry (1)"); +#endif + +#if defined(PVRSRV_ENABLE_CACHEOP_STATS) + eError = DICreateEntry("cache_ops_exec", psStatsEntries->psStatsDIGroup, + &sIterator, + GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_CACHEOP), + DI_ENTRY_TYPE_GENERIC, + &psStatsEntries->psCacheOpStatsDIEntry); + PVR_LOG_IF_ERROR(eError, "DICreateEntry (2)"); +#endif + +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + eError = DICreateEntry("mem_area", psStatsEntries->psStatsDIGroup, + &sIterator, + GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_MEMORY), + DI_ENTRY_TYPE_GENERIC, + &psStatsEntries->psMemStatsDIEntry); + PVR_LOG_IF_ERROR(eError, "DICreateEntry (3)"); +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + eError = DICreateEntry("gpu_mem_area", psStatsEntries->psStatsDIGroup, + &sIterator, + GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_RIMEMORY), + DI_ENTRY_TYPE_GENERIC, + &psStatsEntries->psRIMemStatsDIEntry); + PVR_LOG_IF_ERROR(eError, "DICreateEntry (4)"); +#endif +} + +static inline void +_createStatisticsEntries(void) +{ + PVRSRV_ERROR eError; + + eError = DICreateGroup("proc_stats", NULL, &psProcStatsDIGroup); + PVR_LOG_IF_ERROR(eError, "DICreateGroup (1)"); + eError = DICreateGroup("live_pids_stats", psProcStatsDIGroup, + &gsLiveStatEntries.psStatsDIGroup); + PVR_LOG_IF_ERROR(eError, "DICreateGroup (2)"); + eError = DICreateGroup("retired_pids_stats", psProcStatsDIGroup, + &gsRetiredStatEntries.psStatsDIGroup); + PVR_LOG_IF_ERROR(eError, "DICreateGroup (3)"); + + _createStatsFiles(&gsLiveStatEntries, GenericStatsPrintElementsLive); + _createStatsFiles(&gsRetiredStatEntries, GenericStatsPrintElementsRetired); + + _prepareStatsPrivateData(); +} + +static inline void +_removeStatsFiles(PVRSRV_OS_STAT_ENTRY* psStatsEntries) +{ +#if defined(PVRSRV_ENABLE_PERPID_STATS) + DIDestroyEntry(psStatsEntries->psProcessStatsDIEntry); + psStatsEntries->psProcessStatsDIEntry = NULL; +#endif + +#if defined(PVRSRV_ENABLE_CACHEOP_STATS) + DIDestroyEntry(psStatsEntries->psCacheOpStatsDIEntry); + psStatsEntries->psCacheOpStatsDIEntry = NULL; +#endif + +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + DIDestroyEntry(psStatsEntries->psMemStatsDIEntry); + psStatsEntries->psMemStatsDIEntry = NULL; +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + DIDestroyEntry(psStatsEntries->psRIMemStatsDIEntry); + psStatsEntries->psRIMemStatsDIEntry = NULL; +#endif +} + +static inline void +_removeStatisticsEntries(void) +{ + _removeStatsFiles(&gsLiveStatEntries); + _removeStatsFiles(&gsRetiredStatEntries); + + DIDestroyGroup(gsLiveStatEntries.psStatsDIGroup); + gsLiveStatEntries.psStatsDIGroup = NULL; + DIDestroyGroup(gsRetiredStatEntries.psStatsDIGroup); + gsRetiredStatEntries.psStatsDIGroup = NULL; + DIDestroyGroup(psProcStatsDIGroup); + psProcStatsDIGroup = NULL; +} +#endif + +/*************************************************************************/ /*! +@Function PVRSRVStatsInitialise +@Description Entry point for initialising the statistics module. +@Return Standard PVRSRV_ERROR error code. +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVStatsInitialise(void) +{ + PVRSRV_ERROR error; + + PVR_ASSERT(g_psLiveList == NULL); + PVR_ASSERT(g_psDeadList == NULL); + PVR_ASSERT(g_psLinkedListLock == NULL); + PVR_ASSERT(gpsSizeTrackingHashTable == NULL); + PVR_ASSERT(bProcessStatsInitialised == IMG_FALSE); + + /* We need a lock to protect the linked lists... */ + error = OSLockCreate(&g_psLinkedListLock); + PVR_GOTO_IF_ERROR(error, return_); + + /* We also need a lock to protect the hash table used for size tracking. */ + error = OSLockCreate(&gpsSizeTrackingHashTableLock); + PVR_GOTO_IF_ERROR(error, detroy_linked_list_lock_); + + /* We also need a lock to protect the GlobalStat counters */ + error = OSLockCreate(&gsGlobalStats.hGlobalStatsLock); + PVR_GOTO_IF_ERROR(error, destroy_hashtable_lock_); + + /* Flag that we are ready to start monitoring memory allocations. */ + + gpsSizeTrackingHashTable = HASH_Create(HASH_INITIAL_SIZE); + PVR_GOTO_IF_NOMEM(gpsSizeTrackingHashTable, error, destroy_stats_lock_); + + OSCachedMemSet(asClockSpeedChanges, 0, sizeof(asClockSpeedChanges)); + + bProcessStatsInitialised = IMG_TRUE; +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + /* Register our 'system' PID to hold driver-wide alloc stats */ + _RegisterProcess(&g_hDriverProcessStats, PVR_SYS_ALLOC_PID); +#endif + +#if defined(ENABLE_DEBUGFS_PIDS) + _createStatisticsEntries(); +#endif + +#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) + { + DI_ITERATOR_CB sIterator = {.pfnShow = RawProcessStatsPrintElements}; + error = DICreateEntry("memtrack_stats", NULL, &sIterator, NULL, + DI_ENTRY_TYPE_GENERIC, &psProcStatsDIEntry); + PVR_LOG_IF_ERROR(error, "DICreateEntry (1)"); + } +#endif + + { + DI_ITERATOR_CB sIterator = {.pfnShow = PowerStatsPrintElements}; + /* Create power stats entry... */ + error = DICreateEntry("power_timing_stats", NULL, &sIterator, NULL, + DI_ENTRY_TYPE_GENERIC, &psPowerStatsDIEntry); + PVR_LOG_IF_ERROR(error, "DICreateEntry (2)"); + } + + { + DI_ITERATOR_CB sIterator = {.pfnShow = GlobalStatsPrintElements}; + error = DICreateEntry("driver_stats", NULL, &sIterator, NULL, + DI_ENTRY_TYPE_GENERIC, &psGlobalMemDIEntry); + PVR_LOG_IF_ERROR(error, "DICreateEntry (3)"); + } + + return PVRSRV_OK; + +destroy_stats_lock_: + OSLockDestroy(gsGlobalStats.hGlobalStatsLock); + gsGlobalStats.hGlobalStatsLock = NULL; +destroy_hashtable_lock_: + OSLockDestroy(gpsSizeTrackingHashTableLock); + gpsSizeTrackingHashTableLock = NULL; +detroy_linked_list_lock_: + OSLockDestroy(g_psLinkedListLock); + g_psLinkedListLock = NULL; +return_: + return error; + +} + +static PVRSRV_ERROR _DumpAllVMallocEntries (uintptr_t k, uintptr_t v, void* pvPriv) +{ +#if defined(PVRSRV_NEED_PVR_DPF) || defined(DOXYGEN) + _PVR_STATS_TRACKING_HASH_ENTRY *psNewTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)(uintptr_t)v; + IMG_UINT64 uiCpuVAddr = (IMG_UINT64)k; + + PVR_DPF((PVR_DBG_ERROR, "%s: " IMG_SIZE_FMTSPEC " bytes @ 0x%" IMG_UINT64_FMTSPECx " (PID %u)", __func__, + psNewTrackingHashEntry->uiSizeInBytes, + uiCpuVAddr, + psNewTrackingHashEntry->uiPid)); + + PVR_UNREFERENCED_PARAMETER(pvPriv); +#endif + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function PVRSRVStatsDestroy +@Description Method for destroying the statistics module data. +*/ /**************************************************************************/ +void +PVRSRVStatsDestroy(void) +{ + PVR_ASSERT(bProcessStatsInitialised); + +#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) + if (psProcStatsDIEntry != NULL) + { + DIDestroyEntry(psProcStatsDIEntry); + psProcStatsDIEntry = NULL; + } +#endif + + /* Destroy the power stats entry... */ + if (psPowerStatsDIEntry!=NULL) + { + DIDestroyEntry(psPowerStatsDIEntry); + psPowerStatsDIEntry = NULL; + } + + /* Destroy the global data entry */ + if (psGlobalMemDIEntry!=NULL) + { + DIDestroyEntry(psGlobalMemDIEntry); + psGlobalMemDIEntry = NULL; + } + +#if defined(ENABLE_DEBUGFS_PIDS) + _removeStatisticsEntries(); +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + /* Deregister our 'system' PID which holds driver-wide alloc stats */ + PVRSRVStatsDeregisterProcess(g_hDriverProcessStats); +#endif + + /* Stop monitoring memory allocations... */ + bProcessStatsInitialised = IMG_FALSE; + + /* Destroy the locks... */ + if (g_psLinkedListLock != NULL) + { + OSLockDestroy(g_psLinkedListLock); + g_psLinkedListLock = NULL; + } + + /* Free the live and dead lists... */ + while (g_psLiveList != NULL) + { + PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList; + _RemoveProcessStatsFromList(psProcessStats); + _DestroyProcessStat(psProcessStats); + } + + while (g_psDeadList != NULL) + { + PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList; + _RemoveProcessStatsFromList(psProcessStats); + _DestroyProcessStat(psProcessStats); + } + + if (gpsSizeTrackingHashTable != NULL) + { + /* Dump all remaining entries in HASH table (list any remaining vmallocs) */ + HASH_Iterate(gpsSizeTrackingHashTable, (HASH_pfnCallback)_DumpAllVMallocEntries, NULL); + HASH_Delete(gpsSizeTrackingHashTable); + } + if (gpsSizeTrackingHashTableLock != NULL) + { + OSLockDestroy(gpsSizeTrackingHashTableLock); + gpsSizeTrackingHashTableLock = NULL; + } + + if (NULL != gsGlobalStats.hGlobalStatsLock) + { + OSLockDestroy(gsGlobalStats.hGlobalStatsLock); + gsGlobalStats.hGlobalStatsLock = NULL; + } + +} + +static void _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType, + size_t uiBytes) +{ +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + IMG_UINT64 ui64InitialSize; +#endif + + OSLockAcquire(gsGlobalStats.hGlobalStatsLock); + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + ui64InitialSize = GET_GPUMEM_GLOBAL_STAT_VALUE(); +#endif + + switch (eAllocType) + { + case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_KMALLOC, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMALLOC, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: + DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, uiBytes); + break; + + default: + PVR_ASSERT(0); + break; + } + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + { + IMG_UINT64 ui64Size = GET_GPUMEM_GLOBAL_STAT_VALUE(); + if (ui64Size != ui64InitialSize) + { + TracepointUpdateGPUMemGlobal(0, ui64Size); + } + } +#endif + + OSLockRelease(gsGlobalStats.hGlobalStatsLock); +} + +static void _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType, + size_t uiBytes) +{ +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + IMG_UINT64 ui64InitialSize; +#endif + + OSLockAcquire(gsGlobalStats.hGlobalStatsLock); + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + ui64InitialSize = GET_GPUMEM_GLOBAL_STAT_VALUE(); +#endif + + switch (eAllocType) + { + case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_KMALLOC, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMALLOC, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, uiBytes); + break; + + case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: + INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, uiBytes); + break; + + default: + PVR_ASSERT(0); + break; + } + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + { + IMG_UINT64 ui64Size = GET_GPUMEM_GLOBAL_STAT_VALUE(); + if (ui64Size != ui64InitialSize) + { + TracepointUpdateGPUMemGlobal(0, ui64Size); + } + } +#endif + + OSLockRelease(gsGlobalStats.hGlobalStatsLock); +} + +static PVRSRV_ERROR +_RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid) +{ + PVRSRV_PROCESS_STATS* psProcessStats=NULL; + PVRSRV_ERROR eError; + + PVR_ASSERT(phProcessStats != NULL); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Register process PID %d [%s]", + __func__, ownerPid, (ownerPid == PVR_SYS_ALLOC_PID) + ? "system" : OSGetCurrentClientProcessNameKM())); + + /* Check the PID has not already moved to the dead list... */ + OSLockAcquire(g_psLinkedListLock); + psProcessStats = _FindProcessStatsInDeadList(ownerPid); + if (psProcessStats != NULL) + { + /* Move it back onto the live list! */ + _RemoveProcessStatsFromList(psProcessStats); + _AddProcessStatsToFrontOfLiveList(psProcessStats); + } + else + { + /* Check the PID is not already registered in the live list... */ + psProcessStats = _FindProcessStatsInLiveList(ownerPid); + } + + /* If the PID is on the live list then just increment the ref count and return... */ + if (psProcessStats != NULL) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + psProcessStats->ui32RefCount++; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount; + UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS], + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS]); + OSLockRelease(psProcessStats->hLock); + OSLockRelease(g_psLinkedListLock); + + *phProcessStats = psProcessStats; + + return PVRSRV_OK; + } + OSLockRelease(g_psLinkedListLock); + + /* Allocate a new node structure and initialise it... */ + eError = _AllocateProcessStats(&psProcessStats, ownerPid); + PVR_GOTO_IF_ERROR(eError, e0); + + /* Add it to the live list... */ + OSLockAcquire(g_psLinkedListLock); + _AddProcessStatsToFrontOfLiveList(psProcessStats); + OSLockRelease(g_psLinkedListLock); + + /* Done */ + *phProcessStats = (IMG_HANDLE) psProcessStats; + + return PVRSRV_OK; + +e0: + *phProcessStats = (IMG_HANDLE) NULL; + return PVRSRV_ERROR_OUT_OF_MEMORY; +} /* _RegisterProcess */ + +/*************************************************************************/ /*! +@Function PVRSRVStatsRegisterProcess +@Description Register a process into the list statistics list. +@Output phProcessStats Handle to the process to be used to deregister. +@Return Standard PVRSRV_ERROR error code. +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats) +{ + return _RegisterProcess(phProcessStats, OSGetCurrentClientProcessIDKM()); +} + +/*************************************************************************/ /*! +@Function PVRSRVStatsDeregisterProcess +@Input hProcessStats Handle to the process returned when registered. +@Description Method for destroying the statistics module data. +*/ /**************************************************************************/ +void +PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats) +{ + PVR_DPF((PVR_DBG_MESSAGE, "%s: Deregister process entered PID %d [%s]", + __func__, OSGetCurrentClientProcessIDKM(), + OSGetCurrentProcessName())); + + if (hProcessStats != (IMG_HANDLE) NULL) + { + PVRSRV_PROCESS_STATS* psProcessStats = (PVRSRV_PROCESS_STATS*) hProcessStats; + + /* Lower the reference count, if zero then move it to the dead list */ + OSLockAcquire(g_psLinkedListLock); + if (psProcessStats->ui32RefCount > 0) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + psProcessStats->ui32RefCount--; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount; + +#if !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + if (psProcessStats->ui32RefCount == 0) + { + OSLockRelease(psProcessStats->hLock); + _MoveProcessToDeadList(psProcessStats); + }else +#endif + { + OSLockRelease(psProcessStats->hLock); + } + } + OSLockRelease(g_psLinkedListLock); + + /* Check if the dead list needs to be reduced */ + _CompressMemoryUsage(); + } +} /* PVRSRVStatsDeregisterProcess */ + +void +PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, + void *pvCpuVAddr, + IMG_CPU_PHYADDR sCpuPAddr, + size_t uiBytes, + void *pvPrivateData, + IMG_PID currentPid + DEBUG_MEMSTATS_PARAMS) +{ +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); + PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_MEM_ALLOC_REC* psRecord = NULL; + PVRSRV_PROCESS_STATS* psProcessStats; + enum { PVRSRV_PROC_NOTFOUND, + PVRSRV_PROC_FOUND, + PVRSRV_PROC_RESURRECTED + } eProcSearch = PVRSRV_PROC_FOUND; + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + IMG_UINT64 ui64InitialSize; +#endif + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + PVR_DPF((PVR_DBG_WARNING, + "%s: Called when process statistics module is not initialised", + __func__)); +#endif + return; + } + + /* + * To prevent a recursive loop, we make the memory allocations for our + * memstat records via OSAllocMemNoStats(), which does not try to + * create a memstat record entry. + */ + + /* Allocate the memory record... */ + psRecord = OSAllocZMemNoStats(sizeof(PVRSRV_MEM_ALLOC_REC)); + if (psRecord == NULL) + { + return; + } + + psRecord->eAllocType = eAllocType; + psRecord->pvCpuVAddr = pvCpuVAddr; + psRecord->sCpuPAddr.uiAddr = sCpuPAddr.uiAddr; + psRecord->uiBytes = uiBytes; + psRecord->pvPrivateData = pvPrivateData; + + psRecord->pid = currentPid; + +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) + psRecord->pvAllocdFromFile = pvAllocFromFile; + psRecord->ui32AllocdFromLine = ui32AllocFromLine; +#endif + + _increase_global_stat(eAllocType, uiBytes); + /* Lock while we find the correct process... */ + OSLockAcquire(g_psLinkedListLock); + + if (psPVRSRVData) + { + if ((currentPid == psPVRSRVData->cleanupThreadPid) && + (currentCleanupPid != 0)) + { + psProcessStats = _FindProcessStats(currentCleanupPid); + } + else + { + psProcessStats = _FindProcessStatsInLiveList(currentPid); + if (!psProcessStats) + { + psProcessStats = _FindProcessStatsInDeadList(currentPid); + eProcSearch = PVRSRV_PROC_RESURRECTED; + } + } + } + else + { + psProcessStats = _FindProcessStatsInLiveList(currentPid); + if (!psProcessStats) + { + psProcessStats = _FindProcessStatsInDeadList(currentPid); + eProcSearch = PVRSRV_PROC_RESURRECTED; + } + } + + if (psProcessStats == NULL) + { + eProcSearch = PVRSRV_PROC_NOTFOUND; + +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + PVR_DPF((PVR_DBG_WARNING, + "%s: Process stat increment called for 'unknown' process PID(%d)", + __func__, currentPid)); + + if (_AllocateProcessStats(&psProcessStats, currentPid) != PVRSRV_OK) + { + OSLockRelease(g_psLinkedListLock); + PVR_DPF((PVR_DBG_ERROR, + "%s UNABLE TO CREATE process_stats entry for pid %d [%s] (" IMG_SIZE_FMTSPEC " bytes)", + __func__, currentPid, OSGetCurrentProcessName(), uiBytes)); + goto free_record; + } + + /* Add it to the live list... */ + _AddProcessStatsToFrontOfLiveList(psProcessStats); + + OSLockRelease(g_psLinkedListLock); + +#else /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */ + OSLockRelease(g_psLinkedListLock); + goto free_record; +#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */ + } + else + { +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + if (eProcSearch == PVRSRV_PROC_RESURRECTED) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Process stat incremented on 'dead' process PID(%d)", + __func__, currentPid)); + /* Move process from dead list to live list */ + _MoveProcessToLiveList(psProcessStats); + } +#endif + OSLockRelease(g_psLinkedListLock); + } + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + /* Insert the memory record... */ + if (psRecord != NULL) + { + List_PVRSRV_MEM_ALLOC_REC_Insert(&psProcessStats->psMemoryRecords, psRecord); + } + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); +#endif + + /* Update the memory watermarks... */ + switch (eAllocType) + { + case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: + { + if (psRecord != NULL) + { + if (pvCpuVAddr == NULL) + { + break; + } + psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: + { + if (psRecord != NULL) + { + if (pvCpuVAddr == NULL) + { + break; + } + psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: + { + if (psRecord != NULL) + { + if (pvCpuVAddr == NULL) + { + break; + } + psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: + { + if (psRecord != NULL) + { + if (pvCpuVAddr == NULL) + { + break; + } + psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: + { + if (psRecord != NULL) + { + psRecord->ui64Key = sCpuPAddr.uiAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: + { + if (psRecord != NULL) + { + if (pvCpuVAddr == NULL) + { + break; + } + psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: + { + if (psRecord != NULL) + { + psRecord->ui64Key = sCpuPAddr.uiAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: + { + if (psRecord != NULL) + { + psRecord->ui64Key = sCpuPAddr.uiAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: + { + if (psRecord != NULL) + { + if (pvCpuVAddr == NULL) + { + break; + } + psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; + } + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + default: + { + PVR_ASSERT(0); + } + break; + } + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + if (psProcessStats->pid != PVR_SYS_ALLOC_PID) + { + IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); + if (ui64Size != ui64InitialSize) + { + TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, ui64Size); + } + } +#endif + + OSLockRelease(psProcessStats->hLock); + + return; + +free_record: + if (psRecord != NULL) + { + OSFreeMemNoStats(psRecord); + } +#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ +} /* PVRSRVStatsAddMemAllocRecord */ + +void +PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, + IMG_UINT64 ui64Key, + IMG_PID currentPid) +{ +#if defined(PVRSRV_ENABLE_MEMORY_STATS) + IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); + PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_PROCESS_STATS* psProcessStats = NULL; + PVRSRV_MEM_ALLOC_REC* psRecord = NULL; + IMG_BOOL bFound = IMG_FALSE; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + PVR_DPF((PVR_DBG_WARNING, + "%s: Called when process statistics module is not initialised", + __func__)); +#endif + return; + } + + /* Lock while we find the correct process and remove this record... */ + OSLockAcquire(g_psLinkedListLock); + + if (psPVRSRVData) + { + if ((currentPid == psPVRSRVData->cleanupThreadPid) && + (currentCleanupPid != 0)) + { + psProcessStats = _FindProcessStats(currentCleanupPid); + } + else + { + psProcessStats = _FindProcessStats(currentPid); + } + } + else + { + psProcessStats = _FindProcessStats(currentPid); + } + if (psProcessStats != NULL) + { + psRecord = psProcessStats->psMemoryRecords; + while (psRecord != NULL) + { + if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType) + { + bFound = IMG_TRUE; + break; + } + + psRecord = psRecord->psNext; + } + } + + /* If not found, we need to do a full search in case it was allocated to a different PID... */ + if (!bFound) + { + PVRSRV_PROCESS_STATS* psProcessStatsAlreadyChecked = psProcessStats; + + /* Search all live lists first... */ + psProcessStats = g_psLiveList; + while (psProcessStats != NULL) + { + if (psProcessStats != psProcessStatsAlreadyChecked) + { + psRecord = psProcessStats->psMemoryRecords; + while (psRecord != NULL) + { + if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType) + { + bFound = IMG_TRUE; + break; + } + + psRecord = psRecord->psNext; + } + } + + if (bFound) + { + break; + } + + psProcessStats = psProcessStats->psNext; + } + + /* If not found, then search all dead lists next... */ + if (!bFound) + { + psProcessStats = g_psDeadList; + while (psProcessStats != NULL) + { + if (psProcessStats != psProcessStatsAlreadyChecked) + { + psRecord = psProcessStats->psMemoryRecords; + while (psRecord != NULL) + { + if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType) + { + bFound = IMG_TRUE; + break; + } + + psRecord = psRecord->psNext; + } + } + + if (bFound) + { + break; + } + + psProcessStats = psProcessStats->psNext; + } + } + } + + /* Update the watermark and remove this record...*/ + if (bFound) + { + _decrease_global_stat(eAllocType, psRecord->uiBytes); + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + _DecreaseProcStatValue(eAllocType, + psProcessStats, + psRecord->uiBytes); + + List_PVRSRV_MEM_ALLOC_REC_Remove(psRecord); + OSLockRelease(psProcessStats->hLock); + OSLockRelease(g_psLinkedListLock); + +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + /* If all stats are now zero, remove the entry for this thread */ + if (psProcessStats->ui32StatAllocFlags == 0) + { + OSLockAcquire(g_psLinkedListLock); + _MoveProcessToDeadList(psProcessStats); + OSLockRelease(g_psLinkedListLock); + + /* Check if the dead list needs to be reduced */ + _CompressMemoryUsage(); + } +#endif + /* + * Free the record outside the lock so we don't deadlock and so we + * reduce the time the lock is held. + */ + OSFreeMemNoStats(psRecord); + } + else + { + OSLockRelease(g_psLinkedListLock); + } + +#else +PVR_UNREFERENCED_PARAMETER(eAllocType); +PVR_UNREFERENCED_PARAMETER(ui64Key); +#endif +} /* PVRSRVStatsRemoveMemAllocRecord */ + +void +PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType, + size_t uiBytes, + IMG_UINT64 uiCpuVAddr, + IMG_PID uiPid) +{ + IMG_BOOL bRes = IMG_FALSE; + _PVR_STATS_TRACKING_HASH_ENTRY *psNewTrackingHashEntry = NULL; + + if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL)) + { +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + PVR_DPF((PVR_DBG_WARNING, + "%s: Called when process statistics module is not initialised", + __func__)); +#endif + return; + } + + /* Alloc untracked memory for the new hash table entry */ + psNewTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)OSAllocMemNoStats(sizeof(*psNewTrackingHashEntry)); + if (psNewTrackingHashEntry) + { + /* Fill-in the size of the allocation and PID of the allocating process */ + psNewTrackingHashEntry->uiSizeInBytes = uiBytes; + psNewTrackingHashEntry->uiPid = uiPid; + OSLockAcquire(gpsSizeTrackingHashTableLock); + /* Insert address of the new struct into the hash table */ + bRes = HASH_Insert(gpsSizeTrackingHashTable, uiCpuVAddr, (uintptr_t)psNewTrackingHashEntry); + OSLockRelease(gpsSizeTrackingHashTableLock); + } + + if (psNewTrackingHashEntry) + { + if (bRes) + { + PVRSRVStatsIncrMemAllocStat(eAllocType, uiBytes, uiPid); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "*** %s : @ line %d HASH_Insert() failed!", + __func__, __LINE__)); + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "*** %s : @ line %d Failed to alloc memory for psNewTrackingHashEntry!", + __func__, __LINE__)); + } +} + +void +PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, + size_t uiBytes, + IMG_PID currentPid) + +{ + IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); + PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_PROCESS_STATS* psProcessStats = NULL; + enum { PVRSRV_PROC_NOTFOUND, + PVRSRV_PROC_FOUND, + PVRSRV_PROC_RESURRECTED + } eProcSearch = PVRSRV_PROC_FOUND; + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + IMG_UINT64 ui64InitialSize; +#endif + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + PVR_DPF((PVR_DBG_WARNING, + "%s: Called when process statistics module is not initialised", + __func__)); +#endif + return; + } + + _increase_global_stat(eAllocType, uiBytes); + OSLockAcquire(g_psLinkedListLock); + if (psPVRSRVData) + { + if ((currentPid == psPVRSRVData->cleanupThreadPid) && + (currentCleanupPid != 0)) + { + psProcessStats = _FindProcessStats(currentCleanupPid); + } + else + { + psProcessStats = _FindProcessStatsInLiveList(currentPid); + if (!psProcessStats) + { + psProcessStats = _FindProcessStatsInDeadList(currentPid); + eProcSearch = PVRSRV_PROC_RESURRECTED; + } + } + } + else + { + psProcessStats = _FindProcessStatsInLiveList(currentPid); + if (!psProcessStats) + { + psProcessStats = _FindProcessStatsInDeadList(currentPid); + eProcSearch = PVRSRV_PROC_RESURRECTED; + } + } + + if (psProcessStats == NULL) + { + eProcSearch = PVRSRV_PROC_NOTFOUND; + +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + PVR_DPF((PVR_DBG_WARNING, + "%s: Process stat increment called for 'unknown' process PID(%d)", + __func__, currentPid)); + + if (bProcessStatsInitialised) + { + if (_AllocateProcessStats(&psProcessStats, currentPid) != PVRSRV_OK) + { + OSLockRelease(g_psLinkedListLock); + return; + } + /* Add it to the live list... */ + _AddProcessStatsToFrontOfLiveList(psProcessStats); + } +#else + OSLockRelease(g_psLinkedListLock); +#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */ + + } + + if (psProcessStats != NULL) + { +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + if (eProcSearch == PVRSRV_PROC_RESURRECTED) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Process stat incremented on 'dead' process PID(%d)", + __func__, currentPid)); + + /* Move process from dead list to live list */ + _MoveProcessToLiveList(psProcessStats); + } +#endif + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + /* Release the list lock as soon as we acquire the process lock, + * this ensures if the process is in deadlist the entry cannot be + * deleted or modified + */ + OSLockRelease(g_psLinkedListLock); + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); +#endif + + /* Update the memory watermarks... */ + switch (eAllocType) + { + case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes); + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: + { + INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, (IMG_UINT32)uiBytes); + psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + break; + + default: + { + PVR_ASSERT(0); + } + break; + } + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + if (psProcessStats->pid != PVR_SYS_ALLOC_PID) + { + IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); + if (ui64Size != ui64InitialSize) + { + TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, + ui64Size); + } + } +#endif + + OSLockRelease(psProcessStats->hLock); + } + +} + +static void +_DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType, + PVRSRV_PROCESS_STATS* psProcessStats, + IMG_UINT32 uiBytes) +{ +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + IMG_UINT64 ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); +#endif + + switch (eAllocType) + { + case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: + { + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, (IMG_UINT32)uiBytes); + if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT] == 0) + { + psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); + } + } + break; + + default: + { + PVR_ASSERT(0); + } + break; + } + +#if defined(ENABLE_GPU_MEM_TRACEPOINT) + if (psProcessStats->pid != PVR_SYS_ALLOC_PID) + { + IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); + if (ui64Size != ui64InitialSize) + { + TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, ui64Size); + } + } +#endif +} + +#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) +int RawProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVRSRV_PROCESS_STATS *psProcessStats; + + DIPrintf(psEntry, + "%s,%s,%s,%s,%s,%s\n", + "PID", + "MemoryUsageKMalloc", // PVRSRV_PROCESS_STAT_TYPE_KMALLOC + "MemoryUsageAllocPTMemoryUMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA + "MemoryUsageAllocPTMemoryLMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA + "MemoryUsageAllocGPUMemLMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES + "MemoryUsageAllocGPUMemUMA"); // PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES + + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = g_psLiveList; + + while (psProcessStats != NULL) + { + if (psProcessStats->pid != PVR_SYS_ALLOC_PID) + { + DIPrintf(psEntry, + "%d,%d,%d,%d,%d,%d\n", + psProcessStats->pid, + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC], + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA], + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA], + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES], + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES]); + } + + psProcessStats = psProcessStats->psNext; + } + + OSLockRelease(g_psLinkedListLock); + + return 0; +} /* RawProcessStatsPrintElements */ +#endif + +void +PVRSRVStatsDecrMemKAllocStat(size_t uiBytes, + IMG_PID decrPID) +{ + PVRSRV_PROCESS_STATS* psProcessStats; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { + return; + } + + _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, uiBytes); + + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = _FindProcessStats(decrPID); + + if (psProcessStats != NULL) + { + /* Decrement the kmalloc memory stat... */ + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes); + DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); + } + + OSLockRelease(g_psLinkedListLock); +} + +static void +_StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry, + PVRSRV_MEM_ALLOC_TYPE eAllocType) +{ + PVRSRV_PROCESS_STATS* psProcessStats; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { + return; + } + + _decrease_global_stat(eAllocType, psTrackingHashEntry->uiSizeInBytes); + + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = _FindProcessStats(psTrackingHashEntry->uiPid); + + if (psProcessStats != NULL) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + /* Decrement the memory stat... */ + _DecreaseProcStatValue(eAllocType, + psProcessStats, + psTrackingHashEntry->uiSizeInBytes); + OSLockRelease(psProcessStats->hLock); + } + + OSLockRelease(g_psLinkedListLock); +} + +void +PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType, + IMG_UINT64 uiCpuVAddr) +{ + _PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry = NULL; + + if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL)) + { + return; + } + + OSLockAcquire(gpsSizeTrackingHashTableLock); + psTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)HASH_Remove(gpsSizeTrackingHashTable, uiCpuVAddr); + OSLockRelease(gpsSizeTrackingHashTableLock); + if (psTrackingHashEntry) + { + _StatsDecrMemTrackedStat(psTrackingHashEntry, eAllocType); + OSFreeMemNoStats(psTrackingHashEntry); + } +} + +void +PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, + size_t uiBytes, + IMG_PID currentPid) +{ + IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); + PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_PROCESS_STATS* psProcessStats = NULL; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { + return; + } + + _decrease_global_stat(eAllocType, uiBytes); + + OSLockAcquire(g_psLinkedListLock); + if (psPVRSRVData) + { + if ((currentPid == psPVRSRVData->cleanupThreadPid) && + (currentCleanupPid != 0)) + { + psProcessStats = _FindProcessStats(currentCleanupPid); + } + else + { + psProcessStats = _FindProcessStats(currentPid); + } + } + else + { + psProcessStats = _FindProcessStats(currentPid); + } + + + if (psProcessStats != NULL) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + /* Release the list lock as soon as we acquire the process lock, + * this ensures if the process is in deadlist the entry cannot be + * deleted or modified + */ + OSLockRelease(g_psLinkedListLock); + /* Update the memory watermarks... */ + _DecreaseProcStatValue(eAllocType, + psProcessStats, + uiBytes); + OSLockRelease(psProcessStats->hLock); + +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + /* If all stats are now zero, remove the entry for this thread */ + if (psProcessStats->ui32StatAllocFlags == 0) + { + OSLockAcquire(g_psLinkedListLock); + _MoveProcessToDeadList(psProcessStats); + OSLockRelease(g_psLinkedListLock); + + /* Check if the dead list needs to be reduced */ + _CompressMemoryUsage(); + } +#endif + }else{ + OSLockRelease(g_psLinkedListLock); + } +} + +/* For now we do not want to expose the global stats API + * so we wrap it into this specific function for pooled pages. + * As soon as we need to modify the global stats directly somewhere else + * we want to replace these functions with more general ones. + */ +void +PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes) +{ + _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes); +} + +void +PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes) +{ + _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes); +} + +void +PVRSRVStatsUpdateOOMStats(IMG_UINT32 ui32OOMStatType, + IMG_PID pidOwner) +{ + PVRSRV_PROCESS_STAT_TYPE eOOMStatType = (PVRSRV_PROCESS_STAT_TYPE) ui32OOMStatType; + IMG_PID pidCurrent = pidOwner; + PVRSRV_PROCESS_STATS* psProcessStats; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { + return; + } + + /* Lock while we find the correct process and update the record... */ + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = _FindProcessStats(pidCurrent); + if (psProcessStats != NULL) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + psProcessStats->i32StatValue[eOOMStatType]++; + OSLockRelease(psProcessStats->hLock); + } + else + { + PVR_DPF((PVR_DBG_WARNING, "PVRSRVStatsUpdateOOMStats: Process not found for Pid=%d", pidCurrent)); + } + + OSLockRelease(g_psLinkedListLock); +} /* PVRSRVStatsUpdateOOMStats */ + +PVRSRV_ERROR +PVRSRVServerUpdateOOMStats(IMG_UINT32 ui32OOMStatType, + IMG_PID pidOwner) +{ + if (ui32OOMStatType >= PVRSRV_PROCESS_STAT_TYPE_COUNT) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVRSRVStatsUpdateOOMStats(ui32OOMStatType, pidOwner); + + return PVRSRV_OK; +} + +void +PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders, + IMG_UINT32 ui32TotalNumOutOfMemory, + IMG_UINT32 ui32NumTAStores, + IMG_UINT32 ui32Num3DStores, + IMG_UINT32 ui32NumCDMStores, + IMG_UINT32 ui32NumTDMStores, + IMG_PID pidOwner) +{ + IMG_PID pidCurrent = pidOwner; + + PVRSRV_PROCESS_STATS* psProcessStats; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { + return; + } + + /* Lock while we find the correct process and update the record... */ + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = _FindProcessStats(pidCurrent); + if (psProcessStats != NULL) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_PRS] += ui32TotalNumPartialRenders; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_OOMS] += ui32TotalNumOutOfMemory; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES] += ui32NumTAStores; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES] += ui32Num3DStores; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES]+= ui32NumCDMStores; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_TDM_STORES]+= ui32NumTDMStores; + OSLockRelease(psProcessStats->hLock); + } + else + { + PVR_DPF((PVR_DBG_WARNING, "PVRSRVStatsUpdateRenderContextStats: Process not found for Pid=%d", pidCurrent)); + } + + OSLockRelease(g_psLinkedListLock); +} /* PVRSRVStatsUpdateRenderContextStats */ + +void +PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp, + IMG_UINT32 ui32NumReqByFW, + IMG_PID owner) +{ + IMG_PID currentPid = (owner==0)?OSGetCurrentClientProcessIDKM():owner; + PVRSRV_PROCESS_STATS* psProcessStats; + + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { + return; + } + + /* Lock while we find the correct process and update the record... */ + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = _FindProcessStats(currentPid); + if (psProcessStats != NULL) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP] += ui32NumReqByApp; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW] += ui32NumReqByFW; + OSLockRelease(psProcessStats->hLock); + } + + OSLockRelease(g_psLinkedListLock); +} /* PVRSRVStatsUpdateZSBufferStats */ + +void +PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp, + IMG_UINT32 ui32NumGrowReqByFW, + IMG_UINT32 ui32InitFLPages, + IMG_UINT32 ui32NumHighPages, + IMG_PID ownerPid) +{ + IMG_PID currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM(); + PVRSRV_PROCESS_STATS* psProcessStats; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { + return; + } + + /* Lock while we find the correct process and update the record... */ + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = _FindProcessStats(currentPid); + + if (psProcessStats != NULL) + { + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP] += ui32NumGrowReqByApp; + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW] += ui32NumGrowReqByFW; + + UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT], + (IMG_INT32) ui32InitFLPages); + + UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES], + (IMG_INT32) ui32NumHighPages); + + OSLockRelease(psProcessStats->hLock); + + } + + OSLockRelease(g_psLinkedListLock); +} /* PVRSRVStatsUpdateFreelistStats */ + + +#if defined(ENABLE_DEBUGFS_PIDS) + +int +GenericStatsPrintElementsLive(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVRSRV_STAT_PV_DATA *psStatType = DIGetPrivData(psEntry); + PVRSRV_PROCESS_STATS* psProcessStats; + + PVR_UNREFERENCED_PARAMETER(pvData); + + PVR_ASSERT(psStatType->pfnStatsPrintElements != NULL); + + DIPrintf(psEntry, "%s\n", psStatType->szLiveStatsHeaderStr); + + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = g_psLiveList; + + if (psProcessStats == NULL) + { + DIPrintf(psEntry, "No Stats to display\n%s\n", g_szSeparatorStr); + } + else + { + while (psProcessStats != NULL) + { + psStatType->pfnStatsPrintElements(psEntry, psProcessStats); + psProcessStats = psProcessStats->psNext; + DIPrintf(psEntry, "%s\n", g_szSeparatorStr); + } + } + OSLockRelease(g_psLinkedListLock); + + return 0; +} + +int +GenericStatsPrintElementsRetired(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVRSRV_STAT_PV_DATA *psStatType = DIGetPrivData(psEntry); + PVRSRV_PROCESS_STATS* psProcessStats; + + PVR_UNREFERENCED_PARAMETER(pvData); + + PVR_ASSERT(psStatType->pfnStatsPrintElements != NULL); + + DIPrintf(psEntry, "%s\n", psStatType->szRetiredStatsHeaderStr); + + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = g_psDeadList; + + if (psProcessStats == NULL) + { + DIPrintf(psEntry, "No Stats to display\n%s\n", g_szSeparatorStr); + } + else + { + while (psProcessStats != NULL) + { + psStatType->pfnStatsPrintElements(psEntry, psProcessStats); + psProcessStats = psProcessStats->psNext; + DIPrintf(psEntry, "%s\n", g_szSeparatorStr); + } + } + OSLockRelease(g_psLinkedListLock); + + return 0; +} + +#if defined(PVRSRV_ENABLE_PERPID_STATS) +/*************************************************************************/ /*! +@Function ProcessStatsPrintElements +@Description Prints all elements for this process statistic record. +@Input pvStatPtr Pointer to statistics structure. +@Input pfnOSStatsPrintf Printf function to use for output. +*/ /**************************************************************************/ +void +ProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats) +{ + IMG_UINT32 ui32StatNumber; + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + DIPrintf(psEntry, "PID %u\n", psProcessStats->pid); + + /* Loop through all the values and print them... */ + for (ui32StatNumber = 0; + ui32StatNumber < ARRAY_SIZE(pszProcessStatType); + ui32StatNumber++) + { + if (OSStringNCompare(pszProcessStatType[ui32StatNumber], "", 1) != 0) + { +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + if ((ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) || + (ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES)) + { + /* get the stat from RI */ + IMG_INT32 ui32Total = RITotalAllocProcessKM(psProcessStats->pid, + (ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) + ? PHYS_HEAP_TYPE_LMA : PHYS_HEAP_TYPE_UMA); + + DIPrintf(psEntry, "%-34s%10d %8dK\n", + pszProcessStatType[ui32StatNumber], ui32Total, ui32Total>>10); + } + else +#endif + { + if (ui32StatNumber >= PVRSRV_PROCESS_STAT_TYPE_KMALLOC && + ui32StatNumber <= PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX) + { + DIPrintf(psEntry, "%-34s%10d %8dK\n", + pszProcessStatType[ui32StatNumber], + psProcessStats->i32StatValue[ui32StatNumber], + psProcessStats->i32StatValue[ui32StatNumber] >> 10); + } + else + { + DIPrintf(psEntry, "%-34s%10d\n", + pszProcessStatType[ui32StatNumber], + psProcessStats->i32StatValue[ui32StatNumber]); + } + } + } + } + + OSLockRelease(psProcessStats->hLock); +} /* ProcessStatsPrintElements */ +#endif + +#if defined(PVRSRV_ENABLE_CACHEOP_STATS) +void +PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp, + IMG_UINT32 ui32OpSeqNum, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEV_PHYADDR sDevPAddr, + IMG_UINT32 eFenceOpType, +#endif + IMG_DEVMEM_SIZE_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT64 ui64ExecuteTime, + IMG_BOOL bUserModeFlush, + IMG_BOOL bIsFence, + IMG_PID ownerPid) +{ + IMG_PID currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM(); + PVRSRV_PROCESS_STATS* psProcessStats; + + /* Don't do anything if we are not initialised or we are shutting down! */ + if (!bProcessStatsInitialised) + { + return; + } + + /* Lock while we find the correct process and update the record... */ + OSLockAcquire(g_psLinkedListLock); + + psProcessStats = _FindProcessStats(currentPid); + + if (psProcessStats != NULL) + { + IMG_INT32 Idx; + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + /* Look-up next buffer write index */ + Idx = psProcessStats->uiCacheOpWriteIndex; + psProcessStats->uiCacheOpWriteIndex = INCREMENT_CACHEOP_STAT_IDX_WRAP(Idx); + + /* Store all CacheOp meta-data */ + psProcessStats->asCacheOp[Idx].uiCacheOp = uiCacheOp; +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + psProcessStats->asCacheOp[Idx].sDevVAddr = sDevVAddr; + psProcessStats->asCacheOp[Idx].sDevPAddr = sDevPAddr; + psProcessStats->asCacheOp[Idx].eFenceOpType = eFenceOpType; +#endif + psProcessStats->asCacheOp[Idx].uiOffset = uiOffset; + psProcessStats->asCacheOp[Idx].uiSize = uiSize; + psProcessStats->asCacheOp[Idx].bUserModeFlush = bUserModeFlush; + psProcessStats->asCacheOp[Idx].ui64ExecuteTime = ui64ExecuteTime; + psProcessStats->asCacheOp[Idx].ui32OpSeqNum = ui32OpSeqNum; + psProcessStats->asCacheOp[Idx].bIsFence = bIsFence; + + OSLockRelease(psProcessStats->hLock); + } + + OSLockRelease(g_psLinkedListLock); +} /* PVRSRVStatsUpdateCacheOpStats */ + +/*************************************************************************/ /*! +@Function CacheOpStatsPrintElements +@Description Prints all elements for this process statistic CacheOp record. +@Input pvStatPtr Pointer to statistics structure. +@Input pfnOSStatsPrintf Printf function to use for output. +*/ /**************************************************************************/ +void +CacheOpStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats) +{ + IMG_CHAR *pszCacheOpType, *pszFlushType, *pszFlushMode; + IMG_INT32 i32WriteIdx, i32ReadIdx; + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + #define CACHEOP_RI_PRINTF_HEADER \ + "%-10s %-10s %-5s %-16s %-16s %-10s %-10s %-12s %-12s\n" + #define CACHEOP_RI_PRINTF_FENCE \ + "%-10s %-10s %-5s %-16s %-16s %-10s %-10s %-12llu 0x%-10x\n" + #define CACHEOP_RI_PRINTF \ + "%-10s %-10s %-5s 0x%-14llx 0x%-14llx 0x%-8llx 0x%-8llx %-12llu 0x%-10x\n" +#else + #define CACHEOP_PRINTF_HEADER \ + "%-10s %-10s %-5s %-10s %-10s %-12s %-12s\n" + #define CACHEOP_PRINTF_FENCE \ + "%-10s %-10s %-5s %-10s %-10s %-12llu 0x%-10x\n" + #define CACHEOP_PRINTF \ + "%-10s %-10s %-5s 0x%-8llx 0x%-8llx %-12llu 0x%-10x\n" +#endif + + DIPrintf(psEntry, "PID %u\n", psProcessStats->pid); + + /* File header info */ + DIPrintf(psEntry, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + CACHEOP_RI_PRINTF_HEADER, +#else + CACHEOP_PRINTF_HEADER, +#endif + "CacheOp", + "Type", + "Mode", +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + "DevVAddr", + "DevPAddr", +#endif + "Offset", + "Size", + "Time (us)", + "SeqNo"); + + /* Take a snapshot of write index, read backwards in buffer + and wrap round at boundary */ + i32WriteIdx = psProcessStats->uiCacheOpWriteIndex; + for (i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32WriteIdx); + i32ReadIdx != i32WriteIdx; + i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32ReadIdx)) + { + IMG_UINT64 ui64ExecuteTime; + + if (! psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum) + { + break; + } + + ui64ExecuteTime = psProcessStats->asCacheOp[i32ReadIdx].ui64ExecuteTime; + + if (psProcessStats->asCacheOp[i32ReadIdx].bIsFence) + { + IMG_CHAR *pszFenceType = ""; + pszCacheOpType = "Fence"; + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + switch (psProcessStats->asCacheOp[i32ReadIdx].eFenceOpType) + { + case RGXFWIF_DM_GP: + pszFenceType = "GP"; + break; + + case RGXFWIF_DM_TDM: + /* Also case RGXFWIF_DM_2D: */ + pszFenceType = "TDM/2D"; + break; + + case RGXFWIF_DM_GEOM: + pszFenceType = "GEOM"; + break; + + case RGXFWIF_DM_3D: + pszFenceType = "3D"; + break; + + case RGXFWIF_DM_CDM: + pszFenceType = "CDM"; + break; + + default: + PVR_ASSERT(0); + break; + } +#endif + + DIPrintf(psEntry, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + CACHEOP_RI_PRINTF_FENCE, +#else + CACHEOP_PRINTF_FENCE, +#endif + pszCacheOpType, + pszFenceType, + "", +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + "", + "", +#endif + "", + "", + ui64ExecuteTime, + psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum); + } + else + { + IMG_DEVMEM_SIZE_T ui64NumOfPages; + + ui64NumOfPages = psProcessStats->asCacheOp[i32ReadIdx].uiSize >> OSGetPageShift(); + if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC) + { + pszFlushType = "RBF.Fast"; + } + else + { + pszFlushType = "RBF.Slow"; + } + + if (psProcessStats->asCacheOp[i32ReadIdx].bUserModeFlush) + { + pszFlushMode = "UM"; + } + else + { + pszFlushMode = "KM"; + } + + switch (psProcessStats->asCacheOp[i32ReadIdx].uiCacheOp) + { + case PVRSRV_CACHE_OP_NONE: + pszCacheOpType = "None"; + break; + case PVRSRV_CACHE_OP_CLEAN: + pszCacheOpType = "Clean"; + break; + case PVRSRV_CACHE_OP_INVALIDATE: + pszCacheOpType = "Invalidate"; + break; + case PVRSRV_CACHE_OP_FLUSH: + pszCacheOpType = "Flush"; + break; + default: + pszCacheOpType = "Unknown"; + break; + } + + DIPrintf(psEntry, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + CACHEOP_RI_PRINTF, +#else + CACHEOP_PRINTF, +#endif + pszCacheOpType, + pszFlushType, + pszFlushMode, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + psProcessStats->asCacheOp[i32ReadIdx].sDevVAddr.uiAddr, + psProcessStats->asCacheOp[i32ReadIdx].sDevPAddr.uiAddr, +#endif + psProcessStats->asCacheOp[i32ReadIdx].uiOffset, + psProcessStats->asCacheOp[i32ReadIdx].uiSize, + ui64ExecuteTime, + psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum); + } + } +} /* CacheOpStatsPrintElements */ +#endif + +#if defined(PVRSRV_ENABLE_MEMORY_STATS) +/*************************************************************************/ /*! +@Function MemStatsPrintElements +@Description Prints all elements for the memory statistic record. +@Input pvStatPtr Pointer to statistics structure. +@Input pfnOSStatsPrintf Printf function to use for output. +*/ /**************************************************************************/ +void +MemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats) +{ + IMG_UINT32 ui32VAddrFields = sizeof(void*)/sizeof(IMG_UINT32); + IMG_UINT32 ui32PAddrFields = sizeof(IMG_CPU_PHYADDR)/sizeof(IMG_UINT32); + PVRSRV_MEM_ALLOC_REC *psRecord; + IMG_UINT32 ui32ItemNumber; + + /* Write the header... */ + DIPrintf(psEntry, "PID "); + + DIPrintf(psEntry, "Type VAddress"); + for (ui32ItemNumber = 1; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++) + { + DIPrintf(psEntry, " "); + } + + DIPrintf(psEntry, " PAddress"); + for (ui32ItemNumber = 1; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++) + { + DIPrintf(psEntry, " "); + } + + DIPrintf(psEntry, " Size(bytes)\n"); + + psRecord = psProcessStats->psMemoryRecords; + if (psRecord == NULL) + { + DIPrintf(psEntry, "%-5d\n", psProcessStats->pid); + } + + while (psRecord != NULL) + { + IMG_BOOL bPrintStat = IMG_TRUE; + + DIPrintf(psEntry, "%-5d ", psProcessStats->pid); + + switch (psRecord->eAllocType) + { + case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: DIPrintf(psEntry, "KMALLOC "); break; + case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: DIPrintf(psEntry, "VMALLOC "); break; + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: DIPrintf(psEntry, "ALLOC_PAGES_PT_LMA "); break; + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: DIPrintf(psEntry, "ALLOC_PAGES_PT_UMA "); break; + case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: DIPrintf(psEntry, "IOREMAP_PT_LMA "); break; + case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: DIPrintf(psEntry, "VMAP_PT_UMA "); break; + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: DIPrintf(psEntry, "ALLOC_LMA_PAGES "); break; + case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: DIPrintf(psEntry, "ALLOC_UMA_PAGES "); break; + case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: DIPrintf(psEntry, "MAP_UMA_LMA_PAGES "); break; + case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: DIPrintf(psEntry, "DMA_BUF_IMPORT "); break; + default: DIPrintf(psEntry, "INVALID "); break; + } + + if (bPrintStat) + { + for (ui32ItemNumber = 0; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++) + { + DIPrintf(psEntry, "%08x", *(((IMG_UINT32*) &psRecord->pvCpuVAddr) + ui32VAddrFields - ui32ItemNumber - 1)); + } + DIPrintf(psEntry, " "); + + for (ui32ItemNumber = 0; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++) + { + DIPrintf(psEntry, "%08x", *(((IMG_UINT32*) &psRecord->sCpuPAddr.uiAddr) + ui32PAddrFields - ui32ItemNumber - 1)); + } + +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) + DIPrintf(psEntry, " " IMG_SIZE_FMTSPEC, psRecord->uiBytes); + + DIPrintf(psEntry, " %s", (IMG_CHAR*) psRecord->pvAllocdFromFile); + + DIPrintf(psEntry, " %d\n", psRecord->ui32AllocdFromLine); +#else + DIPrintf(psEntry, " " IMG_SIZE_FMTSPEC "\n", psRecord->uiBytes); +#endif + } + /* Move to next record... */ + psRecord = psRecord->psNext; + } +} /* MemStatsPrintElements */ +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +/*************************************************************************/ /*! +@Function RIMemStatsPrintElements +@Description Prints all elements for the RI Memory record. +@Input pvStatPtr Pointer to statistics structure. +@Input pfnOSStatsPrintf Printf function to use for output. +*/ /**************************************************************************/ +void RIMemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, + PVRSRV_PROCESS_STATS *psProcessStats) +{ + IMG_CHAR *pszStatFmtText = NULL; + IMG_HANDLE *pRIHandle = NULL; + + /* Acquire RI lock */ + RILockAcquireKM(); + + /* + * Loop through the RI system to get each line of text. + */ + while (RIGetListEntryKM(psProcessStats->pid, + &pRIHandle, + &pszStatFmtText)) + { + DIPrintf(psEntry, "%s", pszStatFmtText); + } + + /* Release RI lock */ + RILockReleaseKM(); + +} /* RIMemStatsPrintElements */ +#endif + +#endif + +static IMG_UINT32 ui32FirmwareStartTimestamp; +static IMG_UINT64 ui64FirmwareIdleDuration; + +void SetFirmwareStartTime(IMG_UINT32 ui32Time) +{ + ui32FirmwareStartTimestamp = UPDATE_TIME(ui32FirmwareStartTimestamp, ui32Time); +} + +void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration) +{ + ui64FirmwareIdleDuration = UPDATE_TIME(ui64FirmwareIdleDuration, ui64Duration); +} + +static INLINE void PowerStatsPrintGroup(IMG_UINT32 *pui32Stats, + OSDI_IMPL_ENTRY *psEntry, + PVRSRV_POWER_STAT_TYPE eForced, + PVRSRV_POWER_STAT_TYPE ePowerOn) +{ + IMG_UINT32 ui32Index; + + ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, DEVICE); + DIPrintf(psEntry, " Pre-Device: %9u\n", pui32Stats[ui32Index]); + + ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, SYSTEM); + DIPrintf(psEntry, " Pre-System: %9u\n", pui32Stats[ui32Index]); + + ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, SYSTEM); + DIPrintf(psEntry, " Post-System: %9u\n", pui32Stats[ui32Index]); + + ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, DEVICE); + DIPrintf(psEntry, " Post-Device: %9u\n", pui32Stats[ui32Index]); +} + +int PowerStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + IMG_UINT32 *pui32Stats = &aui32PowerTimingStats[0]; + IMG_UINT32 ui32Idx; + + PVR_UNREFERENCED_PARAMETER(pvData); + + DIPrintf(psEntry, "Forced Power-on Transition (nanoseconds):\n"); + PowerStatsPrintGroup(pui32Stats, psEntry, FORCED, POWER_ON); + DIPrintf(psEntry, "\n"); + + DIPrintf(psEntry, "Forced Power-off Transition (nanoseconds):\n"); + PowerStatsPrintGroup(pui32Stats, psEntry, FORCED, POWER_OFF); + DIPrintf(psEntry, "\n"); + + DIPrintf(psEntry, "Not Forced Power-on Transition (nanoseconds):\n"); + PowerStatsPrintGroup(pui32Stats, psEntry, NOT_FORCED, POWER_ON); + DIPrintf(psEntry, "\n"); + + DIPrintf(psEntry, "Not Forced Power-off Transition (nanoseconds):\n"); + PowerStatsPrintGroup(pui32Stats, psEntry, NOT_FORCED, POWER_OFF); + DIPrintf(psEntry, "\n"); + + + DIPrintf(psEntry, "FW bootup time (timer ticks): %u\n", ui32FirmwareStartTimestamp); + DIPrintf(psEntry, "Host Acknowledge Time for FW Idle Signal (timer ticks): %u\n", (IMG_UINT32)(ui64FirmwareIdleDuration)); + DIPrintf(psEntry, "\n"); + + DIPrintf(psEntry, "Last %d Clock Speed Change Timers (nanoseconds):\n", NUM_EXTRA_POWER_STATS); + DIPrintf(psEntry, "Prepare DVFS\tDVFS Change\tPost DVFS\n"); + + for (ui32Idx = ui32ClockSpeedIndexStart; ui32Idx !=ui32ClockSpeedIndexEnd; ui32Idx = (ui32Idx + 1) % NUM_EXTRA_POWER_STATS) + { + DIPrintf(psEntry, "%12llu\t%11llu\t%9llu\n",asClockSpeedChanges[ui32Idx].ui64PreClockSpeedChangeDuration, + asClockSpeedChanges[ui32Idx].ui64BetweenPreEndingAndPostStartingDuration, + asClockSpeedChanges[ui32Idx].ui64PostClockSpeedChangeDuration); + } + + return 0; +} /* PowerStatsPrintElements */ + +int GlobalStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + IMG_UINT32 ui32StatNumber; + PVR_UNREFERENCED_PARAMETER(pvData); + + OSLockAcquire(gsGlobalStats.hGlobalStatsLock); + + for (ui32StatNumber = 0; + ui32StatNumber < ARRAY_SIZE(pszDriverStatType); + ui32StatNumber++) + { + if (OSStringNCompare(pszDriverStatType[ui32StatNumber], "", 1) != 0) + { + DIPrintf(psEntry, "%-34s%10d\n", + pszDriverStatType[ui32StatNumber], + GET_GLOBAL_STAT_VALUE(ui32StatNumber)); + } + } + + OSLockRelease(gsGlobalStats.hGlobalStatsLock); + + return 0; +} + +/*************************************************************************/ /*! +@Function PVRSRVFindProcessMemStats +@Description Using the provided PID find memory stats for that process. + Memstats will be provided for live/connected processes only. + Memstat values provided by this API relate only to the physical + memory allocated by the process and does not relate to any of + the mapped or imported memory. +@Input pid Process to search for. +@Input ArraySize Size of the array where memstat + records will be stored +@Input bAllProcessStats Flag to denote if stats for + individual process are requested + stats for all processes are + requested +@Input MemoryStats Handle to the memory where memstats + are stored. +@Output Memory statistics records for the requested pid. +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemoryStats) +{ + IMG_INT i; + PVRSRV_PROCESS_STATS* psProcessStats; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pui32MemoryStats, "pui32MemoryStats"); + + if (bAllProcessStats) + { + PVR_LOG_RETURN_IF_FALSE(ui32ArrSize == PVRSRV_DRIVER_STAT_TYPE_COUNT, + "MemStats array size is incorrect", + PVRSRV_ERROR_INVALID_PARAMS); + + OSLockAcquire(gsGlobalStats.hGlobalStatsLock); + + for (i = 0; i < ui32ArrSize; i++) + { + pui32MemoryStats[i] = GET_GLOBAL_STAT_VALUE(i); + } + + OSLockRelease(gsGlobalStats.hGlobalStatsLock); + + return PVRSRV_OK; + } + + PVR_LOG_RETURN_IF_FALSE(ui32ArrSize == PVRSRV_PROCESS_STAT_TYPE_COUNT, + "MemStats array size is incorrect", + PVRSRV_ERROR_INVALID_PARAMS); + + OSLockAcquire(g_psLinkedListLock); + + /* Search for the given PID in the Live List */ + psProcessStats = _FindProcessStatsInLiveList(pid); + + if (psProcessStats == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "Process %d not found. This process may not be live anymore.", (IMG_INT)pid)); + OSLockRelease(g_psLinkedListLock); + + return PVRSRV_ERROR_PROCESS_NOT_FOUND; + } + + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + for (i = 0; i < ui32ArrSize; i++) + { + pui32MemoryStats[i] = psProcessStats->i32StatValue[i]; + } + OSLockRelease(psProcessStats->hLock); + + OSLockRelease(g_psLinkedListLock); + + return PVRSRV_OK; + +} /* PVRSRVFindProcessMemStats */ + +/*************************************************************************/ /*! +@Function PVRSRVGetProcessMemUsage +@Description Calculate allocated kernel and graphics memory for all live or + connected processes. Memstat values provided by this API relate + only to the physical memory allocated by the process and does + not relate to any of the mapped or imported memory. +@Output pui32TotalMem Total memory usage for all live + PIDs connected to the driver. +@Output pui32NumberOfLivePids Number of live pids currently + connected to the server. +@Output ppsPerProcessMemUsageData Handle to an array of + PVRSRV_PER_PROCESS_MEM_USAGE, + number of elements defined by + pui32NumberOfLivePids. +@Return PVRSRV_OK Success + PVRSRV_ERROR_PROCESS_NOT_FOUND No live processes. + PVRSRV_ERROR_OUT_OF_MEMORY Failed to allocate memory for + ppsPerProcessMemUsageData. +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT32 *pui32TotalMem, + IMG_UINT32 *pui32NumberOfLivePids, + PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData) +{ + IMG_UINT32 ui32Counter = 0; + IMG_UINT32 ui32NumberOfLivePids = 0; + PVRSRV_ERROR eError = PVRSRV_ERROR_PROCESS_NOT_FOUND; + PVRSRV_PROCESS_STATS* psProcessStats = NULL; + PVRSRV_PER_PROCESS_MEM_USAGE* psPerProcessMemUsageData = NULL; + + OSLockAcquire(gsGlobalStats.hGlobalStatsLock); + + *pui32TotalMem = GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_KMALLOC) + + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_VMALLOC) + + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA) + + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA) + + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA) + + GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA); + + OSLockRelease(gsGlobalStats.hGlobalStatsLock); + + OSLockAcquire(g_psLinkedListLock); + psProcessStats = g_psLiveList; + + while (psProcessStats != NULL) + { + psProcessStats = psProcessStats->psNext; + ui32NumberOfLivePids++; + } + + if (ui32NumberOfLivePids > 0) + { + /* Use OSAllocZMemNoStats to prevent deadlock. */ + psPerProcessMemUsageData = OSAllocZMemNoStats(ui32NumberOfLivePids * sizeof(*psPerProcessMemUsageData)); + + if (psPerProcessMemUsageData) + { + psProcessStats = g_psLiveList; + + while (psProcessStats != NULL) + { + OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); + + psPerProcessMemUsageData[ui32Counter].ui32Pid = (IMG_UINT32)psProcessStats->pid; + + psPerProcessMemUsageData[ui32Counter].ui32KernelMemUsage = psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] + + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC]; + + psPerProcessMemUsageData[ui32Counter].ui32GraphicsMemUsage = psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] + + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] + + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] + + psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES]; + + OSLockRelease(psProcessStats->hLock); + psProcessStats = psProcessStats->psNext; + ui32Counter++; + } + eError = PVRSRV_OK; + } + else + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + } + } + + OSLockRelease(g_psLinkedListLock); + *pui32NumberOfLivePids = ui32NumberOfLivePids; + *ppsPerProcessMemUsageData = psPerProcessMemUsageData; + + return eError; + +} /* PVRSRVGetProcessMemUsage */ diff --git a/drivers/gpu/drm/phytium/octopus/process_stats.h b/drivers/gpu/drm/phytium/octopus/process_stats.h new file mode 100644 index 000000000000..1d937c22666a --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/process_stats.h @@ -0,0 +1,254 @@ +/*************************************************************************/ /*! +@File +@Title Functions for creating and reading proc filesystem entries. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PROCESS_STATS_H +#define PROCESS_STATS_H + +#include + +#include "pvrsrv_error.h" +#include "cache_ops.h" + +/* + * The publishing of Process Stats is controlled by the + * PVRSRV_ENABLE_PROCESS_STATS build option. The recording of all Memory + * allocations is controlled by the PVRSRV_ENABLE_MEMORY_STATS build option. + * + * Note: There will be a performance degradation with memory allocation + * recording enabled! + */ + + +/* + * Memory types which can be tracked... + */ +typedef enum { + PVRSRV_MEM_ALLOC_TYPE_KMALLOC, /* memory allocated by kmalloc() */ + PVRSRV_MEM_ALLOC_TYPE_VMALLOC, /* memory allocated by vmalloc() */ + PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, /* pages allocated from UMA to hold page table information */ + PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, /* ALLOC_PAGES_PT_UMA mapped to kernel address space */ + PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, /* pages allocated from LMA to hold page table information */ + PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, /* ALLOC_PAGES_PT_LMA mapped to kernel address space */ + PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, /* pages allocated from LMA */ + PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, /* pages allocated from UMA */ + PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, /* mapped UMA/LMA pages */ + PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, /* pages in the page pool */ + PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, /* dma-buf imports */ + + /* Must be the last enum...*/ + PVRSRV_MEM_ALLOC_TYPE_COUNT +} PVRSRV_MEM_ALLOC_TYPE; + + +/* + * Functions for managing the processes recorded... + */ +PVRSRV_ERROR PVRSRVStatsInitialise(void); +void PVRSRVStatsDestroy(void); + +PVRSRV_ERROR PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats); + +void PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats); + +#define MAX_POWER_STAT_ENTRIES 51 + +/* + * Functions for recording the statistics... + */ + +/* + * PVRSRV_ENABLE_PROCESS_STATS enables process statistics regarding events, + * resources and memory across all processes + * PVRSRV_ENABLE_MEMORY_STATS enables recording of Linux kernel memory + * allocations, provided that PVRSRV_ENABLE_PROCESS_STATS is enabled + * - Output can be found in: + * /sys/kernel/debug/pvr/proc_stats/[live|retired]_pids_stats/mem_area + * PVRSRV_DEBUG_LINUX_MEMORY_STATS provides more details about memory + * statistics in conjunction with PVRSRV_ENABLE_MEMORY_STATS + * PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON is defined to encompass both memory + * allocation statistics functionalities described above in a single macro + */ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) && defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG) +#define PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON +#endif + +/* + * When using detailed memory allocation statistics, the line number and + * file name where the allocation happened are also provided. + * When this feature is not used, these parameters are not needed. + */ +#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) +#define DEBUG_MEMSTATS_PARAMS ,void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine +#define DEBUG_MEMSTATS_VALUES ,__FILE__, __LINE__ +#else +#define DEBUG_MEMSTATS_PARAMS +#define DEBUG_MEMSTATS_VALUES +#endif + +void PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, + void *pvCpuVAddr, + IMG_CPU_PHYADDR sCpuPAddr, + size_t uiBytes, + void *pvPrivateData, + IMG_PID uiPid + DEBUG_MEMSTATS_PARAMS); + +void PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, + IMG_UINT64 ui64Key, + IMG_PID uiPid); + +void PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, + size_t uiBytes, + IMG_PID uiPid); + +/* + * Increases the memory stat for eAllocType. Tracks the allocation size value + * by inserting a value into a hash table with uiCpuVAddr as key. + * Pair with PVRSRVStatsDecrMemAllocStatAndUntrack(). + */ +void PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType, + size_t uiBytes, + IMG_UINT64 uiCpuVAddr, + IMG_PID uiPid); + +void PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, + size_t uiBytes, + IMG_PID uiPid); + +void PVRSRVStatsDecrMemKAllocStat(size_t uiBytes, + IMG_PID decrPID); + +/* + * Decrease the memory stat for eAllocType. Takes the allocation size value + * from the hash table with uiCpuVAddr as key. + * Pair with PVRSRVStatsIncrMemAllocStatAndTrack(). + */ +void PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType, + IMG_UINT64 uiCpuVAddr); + +void +PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes); + +void +PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes); + +void +PVRSRVStatsUpdateOOMStats(IMG_UINT32 ui32OOMStatType, + IMG_PID pidOwner); + +PVRSRV_ERROR +PVRSRVServerUpdateOOMStats(IMG_UINT32 ui32OOMStatType, + IMG_PID pidOwner); + +void PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders, + IMG_UINT32 ui32TotalNumOutOfMemory, + IMG_UINT32 ui32TotalTAStores, + IMG_UINT32 ui32Total3DStores, + IMG_UINT32 ui32TotalCDMStores, + IMG_UINT32 ui32TotalTDMStores, + IMG_PID owner); + +void PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp, + IMG_UINT32 ui32NumReqByFW, + IMG_PID owner); + +void PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp, + IMG_UINT32 ui32NumGrowReqByFW, + IMG_UINT32 ui32InitFLPages, + IMG_UINT32 ui32NumHighPages, + IMG_PID ownerPid); +#if defined(PVRSRV_ENABLE_CACHEOP_STATS) +void PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp, + IMG_UINT32 ui32OpSeqNum, +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEV_PHYADDR sDevPAddr, + IMG_UINT32 eFenceOpType, +#endif + IMG_DEVMEM_SIZE_T uiOffset, + IMG_DEVMEM_SIZE_T uiSize, + IMG_UINT64 ui64ExecuteTimeMs, + IMG_BOOL bUserModeFlush, + IMG_BOOL bIsFence, + IMG_PID ownerPid); +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +/* Update pre/post power transition timing statistics */ +void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime, + IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime, + IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower); + +void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer); +void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer); +#else +/* Update pre/post power transition timing statistics */ +static inline +void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime, + IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime, + IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower) {} +static inline +void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer) {} + +static inline +void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer) {} +#endif + +void SetFirmwareStartTime(IMG_UINT32 ui32TimeStamp); + +void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration); + +/* Functions used for calculating the memory usage statistics of a process */ +PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize, + IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemoryStats); + +typedef struct { + IMG_UINT32 ui32Pid; + IMG_UINT32 ui32KernelMemUsage; + IMG_UINT32 ui32GraphicsMemUsage; +} PVRSRV_PER_PROCESS_MEM_USAGE; + +PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT32 *pui32TotalMem, + IMG_UINT32 *pui32NumberOfLivePids, + PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData); + +#endif /* PROCESS_STATS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_bridge.h b/drivers/gpu/drm/phytium/octopus/pvr_bridge.h new file mode 100644 index 000000000000..1d6e49471c81 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_bridge.h @@ -0,0 +1,457 @@ +/*************************************************************************/ /*! +@File +@Title PVR Bridge Functionality +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the PVR Bridge code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_BRIDGE_H +#define PVR_BRIDGE_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "pvrsrv_error.h" +#if defined(SUPPORT_DISPLAY_CLASS) +#include "common_dc_bridge.h" +#if defined(SUPPORT_DCPLAT_BRIDGE) +#include "common_dcplat_bridge.h" +#endif +#endif +#include "common_mm_bridge.h" +#if defined(SUPPORT_MMPLAT_BRIDGE) +#include "common_mmplat_bridge.h" +#endif +#if defined(SUPPORT_WRAP_EXTMEM) +#include "common_mmextmem_bridge.h" +#endif +#if !defined(EXCLUDE_CMM_BRIDGE) +#include "common_cmm_bridge.h" +#endif +#if defined(__linux__) +#include "common_dmabuf_bridge.h" +#endif +#if defined(PDUMP) +#include "common_pdump_bridge.h" +#include "common_pdumpctrl_bridge.h" +#include "common_pdumpmm_bridge.h" +#endif +#include "common_cache_bridge.h" +#if defined(SUPPORT_DMA_TRANSFER) +#include "common_dma_bridge.h" +#endif +#include "common_srvcore_bridge.h" +#include "common_sync_bridge.h" +#if defined(SUPPORT_SECURE_EXPORT) +#include "common_smm_bridge.h" +#endif +#if !defined(EXCLUDE_HTBUFFER_BRIDGE) +#include "common_htbuffer_bridge.h" +#endif +#include "common_pvrtl_bridge.h" +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +#include "common_ri_bridge.h" +#endif + +#if defined(SUPPORT_VALIDATION_BRIDGE) +#include "common_validation_bridge.h" +#endif + +#if defined(PVR_TESTING_UTILS) +#include "common_tutils_bridge.h" +#endif + +#include "common_devicememhistory_bridge.h" +#include "common_synctracking_bridge.h" + +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) +#include "common_syncfallback_bridge.h" +#endif + +#if defined(SUPPORT_DI_BRG_IMPL) +#include "common_di_bridge.h" +#endif + +/* + * Bridge Cmd Ids + */ + + +/* Note: The pattern + * #define PVRSRV_BRIDGE_FEATURE (PVRSRV_BRIDGE_PREVFEATURE + 1) + * #if defined(SUPPORT_FEATURE) + * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST (PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST + 1) + * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST (PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST + PVRSRV_BRIDGE_FEATURE_CMD_LAST) + * #else + * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST 0 + * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST (PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST) + * #endif + * is used in the macro definitions below to make PVRSRV_BRIDGE_FEATURE_* + * take up no space in the dispatch table if SUPPORT_FEATURE is disabled. + * + * Note however that a bridge always defines PVRSRV_BRIDGE_FEATURE, even where + * the feature is not enabled (each bridge group retains its own ioctl number). + */ + +#define PVRSRV_BRIDGE_FIRST 0UL + +/* 0: Default handler */ +#define PVRSRV_BRIDGE_DEFAULT 0UL +#define PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST 0UL +#define PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST (PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST) +/* 1: CORE functions */ +#define PVRSRV_BRIDGE_SRVCORE 1UL +#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST (PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST+1) +#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST (PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST + PVRSRV_BRIDGE_SRVCORE_CMD_LAST) + +/* 2: SYNC functions */ +#define PVRSRV_BRIDGE_SYNC 2UL +#define PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST (PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_SYNC_DISPATCH_LAST (PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNC_CMD_LAST) + +/* 3,4: Reserved */ +#define PVRSRV_BRIDGE_RESERVED1 3UL +#define PVRSRV_BRIDGE_RESERVED1_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_RESERVED1_DISPATCH_LAST (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST) + +#define PVRSRV_BRIDGE_RESERVED2 4UL +#define PVRSRV_BRIDGE_RESERVED2_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_RESERVED2_DISPATCH_LAST (PVRSRV_BRIDGE_RESERVED1_DISPATCH_LAST) + +/* 5: PDUMP CTRL layer functions */ +#define PVRSRV_BRIDGE_PDUMPCTRL 5UL +#if defined(PDUMP) +#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPCTRL_CMD_LAST) +#else +#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST) +#endif + +/* 6: Memory Management functions */ +#define PVRSRV_BRIDGE_MM 6UL +#define PVRSRV_BRIDGE_MM_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_MM_DISPATCH_LAST (PVRSRV_BRIDGE_MM_DISPATCH_FIRST + PVRSRV_BRIDGE_MM_CMD_LAST) + +/* 7: Non-Linux Memory Management functions */ +#define PVRSRV_BRIDGE_MMPLAT 7UL +#if defined(SUPPORT_MMPLAT_BRIDGE) +#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST (PVRSRV_BRIDGE_MM_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST + PVRSRV_BRIDGE_MMPLAT_CMD_LAST) +#else +#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_MM_DISPATCH_LAST) +#endif + +/* 8: Context Memory Management functions */ +#define PVRSRV_BRIDGE_CMM 8UL +#if !defined(EXCLUDE_CMM_BRIDGE) +#define PVRSRV_BRIDGE_CMM_DISPATCH_FIRST (PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_CMM_DISPATCH_LAST (PVRSRV_BRIDGE_CMM_DISPATCH_FIRST + PVRSRV_BRIDGE_CMM_CMD_LAST) +#else +#define PVRSRV_BRIDGE_CMM_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_CMM_DISPATCH_LAST (PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST) +#endif + +/* 9: PDUMP Memory Management functions */ +#define PVRSRV_BRIDGE_PDUMPMM 9UL +#if defined(PDUMP) +#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST (PVRSRV_BRIDGE_CMM_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPMM_CMD_LAST) +#else +#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST (PVRSRV_BRIDGE_CMM_DISPATCH_LAST) +#endif + +/* 10: PDUMP functions */ +#define PVRSRV_BRIDGE_PDUMP 10UL +#if defined(PDUMP) +#define PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMP_CMD_LAST) +#else +#define PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST) +#endif + +/* 11: DMABUF functions */ +#define PVRSRV_BRIDGE_DMABUF 11UL +#if defined(__linux__) +#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST (PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST + PVRSRV_BRIDGE_DMABUF_CMD_LAST) +#else +#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST) +#endif + +/* 12: Display Class functions */ +#define PVRSRV_BRIDGE_DC 12UL +#if defined(SUPPORT_DISPLAY_CLASS) +#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST (PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_DC_DISPATCH_LAST (PVRSRV_BRIDGE_DC_DISPATCH_FIRST + PVRSRV_BRIDGE_DC_CMD_LAST) +#else +#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_DC_DISPATCH_LAST (PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST) +#endif + +/* 13: Cache interface functions */ +#define PVRSRV_BRIDGE_CACHE 13UL +#define PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST (PVRSRV_BRIDGE_DC_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_CACHE_DISPATCH_LAST (PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST + PVRSRV_BRIDGE_CACHE_CMD_LAST) + +/* 14: Secure Memory Management functions */ +#define PVRSRV_BRIDGE_SMM 14UL +#if defined(SUPPORT_SECURE_EXPORT) +#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST (PVRSRV_BRIDGE_CACHE_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST (PVRSRV_BRIDGE_SMM_DISPATCH_FIRST + PVRSRV_BRIDGE_SMM_CMD_LAST) +#else +#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST (PVRSRV_BRIDGE_CACHE_DISPATCH_LAST) +#endif + +/* 15: Transport Layer interface functions */ +#define PVRSRV_BRIDGE_PVRTL 15UL +#define PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST (PVRSRV_BRIDGE_SMM_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST (PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST + PVRSRV_BRIDGE_PVRTL_CMD_LAST) + +/* 16: Resource Information (RI) interface functions */ +#define PVRSRV_BRIDGE_RI 16UL +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST (PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_RI_DISPATCH_LAST (PVRSRV_BRIDGE_RI_DISPATCH_FIRST + PVRSRV_BRIDGE_RI_CMD_LAST) +#else +#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_RI_DISPATCH_LAST (PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST) +#endif + +/* 17: Validation interface functions */ +#define PVRSRV_BRIDGE_VALIDATION 17UL +#if defined(SUPPORT_VALIDATION_BRIDGE) +#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST (PVRSRV_BRIDGE_RI_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST + PVRSRV_BRIDGE_VALIDATION_CMD_LAST) +#else +#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST (PVRSRV_BRIDGE_RI_DISPATCH_LAST) +#endif + +/* 18: TUTILS interface functions */ +#define PVRSRV_BRIDGE_TUTILS 18UL +#if defined(PVR_TESTING_UTILS) +#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST (PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST + PVRSRV_BRIDGE_TUTILS_CMD_LAST) +#else +#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST) +#endif + +/* 19: DevMem history interface functions */ +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY 19UL +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST (PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST + PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST) + +/* 20: Host Trace Buffer interface functions */ +#define PVRSRV_BRIDGE_HTBUFFER 20UL +#if !defined(EXCLUDE_HTBUFFER_BRIDGE) +#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST + PVRSRV_BRIDGE_HTBUFFER_CMD_LAST) +#else +#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST) +#endif + +/* 21: Non-Linux Display functions */ +#define PVRSRV_BRIDGE_DCPLAT 21UL +#if defined(SUPPORT_DISPLAY_CLASS) && defined(SUPPORT_DCPLAT_BRIDGE) +#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST + PVRSRV_BRIDGE_DCPLAT_CMD_LAST) +#else +#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST) +#endif + +/* 22: Extmem functions */ +#define PVRSRV_BRIDGE_MMEXTMEM 22UL +#if defined(SUPPORT_WRAP_EXTMEM) +#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST + PVRSRV_BRIDGE_MMEXTMEM_CMD_LAST) +#else +#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST) +#endif + +/* 23: Sync tracking functions */ +#define PVRSRV_BRIDGE_SYNCTRACKING 23UL +#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST) + +/* 24: Sync fallback functions */ +#define PVRSRV_BRIDGE_SYNCFALLBACK 24UL +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) +#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCFALLBACK_CMD_LAST) +#else +#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST) +#endif + +/* 25: Debug Information (DI) interface functions */ +#define PVRSRV_BRIDGE_DI 25UL +#if defined(SUPPORT_DI_BRG_IMPL) +#define PVRSRV_BRIDGE_DI_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_DI_DISPATCH_LAST (PVRSRV_BRIDGE_DI_DISPATCH_FIRST + PVRSRV_BRIDGE_DI_CMD_LAST) +#else +#define PVRSRV_BRIDGE_DI_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_DI_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST) +#endif + +/* 26: DMA transfer functions */ + +#define PVRSRV_BRIDGE_DMA 26UL +#if defined(SUPPORT_DMA_TRANSFER) +#define PVRSRV_BRIDGE_DMA_DISPATCH_FIRST (PVRSRV_BRIDGE_DI_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_DMA_DISPATCH_LAST (PVRSRV_BRIDGE_DMA_DISPATCH_FIRST + PVRSRV_BRIDGE_DMA_CMD_LAST) +#else +#define PVRSRV_BRIDGE_DMA_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_DMA_DISPATCH_LAST (PVRSRV_BRIDGE_DI_DISPATCH_LAST) +#endif + +/* NB PVRSRV_BRIDGE_LAST below must be the last bridge group defined above (PVRSRV_BRIDGE_FEATURE) */ +#define PVRSRV_BRIDGE_LAST (PVRSRV_BRIDGE_DMA) +/* NB PVRSRV_BRIDGE_DISPATCH LAST below must be the last dispatch entry defined above (PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST) */ +#define PVRSRV_BRIDGE_DISPATCH_LAST (PVRSRV_BRIDGE_DMA_DISPATCH_LAST) + +/* bit mask representing the enabled PVR bridges */ + +static const IMG_UINT32 gui32PVRBridges = + (1U << (PVRSRV_BRIDGE_DEFAULT - PVRSRV_BRIDGE_FIRST)) + | (1U << (PVRSRV_BRIDGE_SRVCORE - PVRSRV_BRIDGE_FIRST)) + | (1U << (PVRSRV_BRIDGE_SYNC - PVRSRV_BRIDGE_FIRST)) + +#if defined(PDUMP) + | (1U << (PVRSRV_BRIDGE_PDUMPCTRL - PVRSRV_BRIDGE_FIRST)) +#endif + | (1U << (PVRSRV_BRIDGE_MM - PVRSRV_BRIDGE_FIRST)) +#if defined(SUPPORT_MMPLAT_BRIDGE) + | (1U << (PVRSRV_BRIDGE_MMPLAT - PVRSRV_BRIDGE_FIRST)) +#endif +#if defined(SUPPORT_CMM) + | (1U << (PVRSRV_BRIDGE_CMM - PVRSRV_BRIDGE_FIRST)) +#endif +#if defined(PDUMP) + | (1U << (PVRSRV_BRIDGE_PDUMPMM - PVRSRV_BRIDGE_FIRST)) + | (1U << (PVRSRV_BRIDGE_PDUMP - PVRSRV_BRIDGE_FIRST)) +#endif +#if defined(__linux__) + | (1U << (PVRSRV_BRIDGE_DMABUF - PVRSRV_BRIDGE_FIRST)) +#endif +#if defined(SUPPORT_DISPLAY_CLASS) + | (1U << (PVRSRV_BRIDGE_DC - PVRSRV_BRIDGE_FIRST)) +#endif + | (1U << (PVRSRV_BRIDGE_CACHE - PVRSRV_BRIDGE_FIRST)) +#if defined(SUPPORT_SECURE_EXPORT) + | (1U << (PVRSRV_BRIDGE_SMM - PVRSRV_BRIDGE_FIRST)) +#endif + | (1U << (PVRSRV_BRIDGE_PVRTL - PVRSRV_BRIDGE_FIRST)) +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + | (1U << (PVRSRV_BRIDGE_RI - PVRSRV_BRIDGE_FIRST)) +#endif +#if defined(SUPPORT_VALIDATION) + | (1U << (PVRSRV_BRIDGE_VALIDATION - PVRSRV_BRIDGE_FIRST)) +#endif +#if defined(PVR_TESTING_UTILS) + | (1U << (PVRSRV_BRIDGE_TUTILS - PVRSRV_BRIDGE_FIRST)) +#endif + | (1U << (PVRSRV_BRIDGE_DEVICEMEMHISTORY - PVRSRV_BRIDGE_FIRST)) +#if defined(SUPPORT_HTBUFFER) + | (1U << (PVRSRV_BRIDGE_HTBUFFER - PVRSRV_BRIDGE_FIRST)) +#endif +#if defined(SUPPORT_DISPLAY_CLASS) && defined(SUPPORT_DCPLAT_BRIDGE) + | (1U << (PVRSRV_BRIDGE_DCPLAT - PVRSRV_BRIDGE_FIRST)) +#endif +#if defined(SUPPORT_WRAP_EXTMEM) + | (1U << (PVRSRV_BRIDGE_MMEXTMEM - PVRSRV_BRIDGE_FIRST)) +#endif + | (1U << (PVRSRV_BRIDGE_SYNCTRACKING - PVRSRV_BRIDGE_FIRST)) +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) + | (1U << (PVRSRV_BRIDGE_SYNCFALLBACK - PVRSRV_BRIDGE_FIRST)) +#endif +#if defined(SUPPORT_DI_BRG_IMPL) + | (1U << (PVRSRV_BRIDGE_DI - PVRSRV_BRIDGE_FIRST)) +#endif +#if defined(SUPPORT_DMA_TRANSFER) + | (1U << (PVRSRV_BRIDGE_DMA - PVRSRV_BRIDGE_FIRST)) +#endif + ; + +/* bit field representing which PVR bridge groups may optionally not + * be present in the server + */ +#define PVR_BRIDGES_OPTIONAL \ + ( \ + (1U << (PVRSRV_BRIDGE_RI - PVRSRV_BRIDGE_FIRST)) \ + ) + +/****************************************************************************** + * Generic bridge structures + *****************************************************************************/ + + +/****************************************************************************** + * bridge packaging structure + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_PACKAGE_TAG +{ + IMG_UINT32 ui32BridgeID; /*!< ioctl bridge group */ + IMG_UINT32 ui32FunctionID; /*!< ioctl function index */ + IMG_UINT32 ui32Size; /*!< size of structure */ + void __user *pvParamIn; /*!< input data buffer */ + IMG_UINT32 ui32InBufferSize; /*!< size of input data buffer */ + void __user *pvParamOut; /*!< output data buffer */ + IMG_UINT32 ui32OutBufferSize; /*!< size of output data buffer */ +}PVRSRV_BRIDGE_PACKAGE; + +#if defined(__cplusplus) +} +#endif + +#endif /* PVR_BRIDGE_H */ + +/****************************************************************************** + End of file (pvr_bridge.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_bridge_k.c b/drivers/gpu/drm/phytium/octopus/pvr_bridge_k.c new file mode 100644 index 000000000000..05cc01bca363 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_bridge_k.c @@ -0,0 +1,590 @@ +/*************************************************************************/ /*! +@File +@Title PVR Bridge Module (kernel side) +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Receives calls from the user portion of services and + despatches them to functions in the kernel portion. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include + +#include "img_defs.h" +#include "pvr_bridge.h" +#include "pvr_bridge_k.h" +#include "connection_server.h" +#include "syscommon.h" +#include "pvr_debug.h" +#include "di_server.h" +#include "private_data.h" +#include "linkage.h" +#include "pmr.h" +#include "rgx_bvnc_defs_km.h" +#include "pvrsrv_bridge_init.h" + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#else +#include +#endif + +#include "pvr_drm.h" +#include "pvr_drv.h" + +#include "env_connection.h" +#include +#include + +/* RGX: */ +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif + +#include "srvcore.h" +#include "common_srvcore_bridge.h" + +PVRSRV_ERROR InitDMABUFBridge(void); +PVRSRV_ERROR DeinitDMABUFBridge(void); + +#if defined(MODULE_TEST) +/************************************************************************/ +// additional includes for services testing +/************************************************************************/ +#include "pvr_test_bridge.h" +#include "kern_test.h" +/************************************************************************/ +// end of additional includes +/************************************************************************/ +#endif + +/* The mmap code has its own mutex, to prevent possible re-entrant issues + * when the same PMR is mapped from two different connections/processes. + */ +static DEFINE_MUTEX(g_sMMapMutex); + +#define _DRIVER_SUSPENDED 1 +#define _DRIVER_NOT_SUSPENDED 0 +static ATOMIC_T g_iDriverSuspended; +static ATOMIC_T g_iNumActiveDriverThreads; +static ATOMIC_T g_iNumActiveKernelThreads; +static IMG_HANDLE g_hDriverThreadEventObject; + +#if defined(DEBUG_BRIDGE_KM) +static DI_ENTRY *gpsDIBridgeStatsEntry; + +static void *BridgeStatsDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) +{ + PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = DIGetPrivData(psEntry); + + BridgeGlobalStatsLock(); + + if (psDispatchTable == NULL || *pui64Pos > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) + { + return NULL; + } + + if (*pui64Pos == 0) + { + return DI_START_TOKEN; + } + + return &(psDispatchTable[*pui64Pos - 1]); +} + +static void BridgeStatsDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(psEntry); + PVR_UNREFERENCED_PARAMETER(pvData); + + BridgeGlobalStatsUnlock(); +} + +static void *BridgeStatsDINext(OSDI_IMPL_ENTRY *psEntry, void *pvData, + IMG_UINT64 *pui64Pos) +{ + PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = DIGetPrivData(psEntry); + IMG_UINT64 uiItemAskedFor = *pui64Pos; /* pui64Pos on entry is the index to return */ + + PVR_UNREFERENCED_PARAMETER(pvData); + + /* Is the item asked for (starts at 0) a valid table index? */ + if (uiItemAskedFor < BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) + { + (*pui64Pos)++; /* on exit it is the next DI index to ask for */ + return &(psDispatchTable[uiItemAskedFor]); + } + + /* Now passed the end of the table to indicate stop */ + return NULL; +} + +static int BridgeStatsDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + if (pvData == DI_START_TOKEN) + { + DIPrintf(psEntry, + "Total ioctl call count = %u\n" + "Total number of bytes copied via copy_from_user = %u\n" + "Total number of bytes copied via copy_to_user = %u\n" + "Total number of bytes copied via copy_*_user = %u\n\n" + "%3s: %-60s | %-48s | %10s | %20s | %20s | %20s | %20s\n", + g_BridgeGlobalStats.ui32IOCTLCount, + g_BridgeGlobalStats.ui32TotalCopyFromUserBytes, + g_BridgeGlobalStats.ui32TotalCopyToUserBytes, + g_BridgeGlobalStats.ui32TotalCopyFromUserBytes + + g_BridgeGlobalStats.ui32TotalCopyToUserBytes, + "#", + "Bridge Name", + "Wrapper Function", + "Call Count", + "copy_from_user (B)", + "copy_to_user (B)", + "Total Time (us)", + "Max Time (us)"); + } + else if (pvData != NULL) + { + PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psTableEntry = pvData; + IMG_UINT32 ui32Remainder; + + DIPrintf(psEntry, + "%3d: %-60s %-48s %-10u %-20u %-20u %-20" IMG_UINT64_FMTSPEC " %-20" IMG_UINT64_FMTSPEC "\n", + (IMG_UINT32)(((size_t)psTableEntry-(size_t)g_BridgeDispatchTable)/sizeof(*g_BridgeDispatchTable)), + psTableEntry->pszIOCName, + (psTableEntry->pfFunction != NULL) ? psTableEntry->pszFunctionName : "(null)", + psTableEntry->ui32CallCount, + psTableEntry->ui32CopyFromUserTotalBytes, + psTableEntry->ui32CopyToUserTotalBytes, + OSDivide64r64(psTableEntry->ui64TotalTimeNS, 1000, &ui32Remainder), + OSDivide64r64(psTableEntry->ui64MaxTimeNS, 1000, &ui32Remainder)); + } + + return 0; +} + +static IMG_INT64 BridgeStatsWrite(const IMG_CHAR *pcBuffer, + IMG_UINT64 ui64Count, IMG_UINT64 *pui64Pos, + void *pvData) +{ + IMG_UINT32 i; + + PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); + PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); + PVR_RETURN_IF_FALSE(ui64Count >= 1, -EINVAL); + PVR_RETURN_IF_FALSE(pcBuffer[0] == '0', -EINVAL); + PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); + + /* Reset stats. */ + + BridgeGlobalStatsLock(); + + g_BridgeGlobalStats.ui32IOCTLCount = 0; + g_BridgeGlobalStats.ui32TotalCopyFromUserBytes = 0; + g_BridgeGlobalStats.ui32TotalCopyToUserBytes = 0; + + for (i = 0; i < ARRAY_SIZE(g_BridgeDispatchTable); i++) + { + g_BridgeDispatchTable[i].ui32CallCount = 0; + g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0; + g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0; + g_BridgeDispatchTable[i].ui64TotalTimeNS = 0; + g_BridgeDispatchTable[i].ui64MaxTimeNS = 0; + } + + BridgeGlobalStatsUnlock(); + + return ui64Count; +} + +#endif /* defined(DEBUG_BRIDGE_KM) */ + +PVRSRV_ERROR OSPlatformBridgeInit(void) +{ + PVRSRV_ERROR eError; + + eError = InitDMABUFBridge(); + PVR_LOG_IF_ERROR(eError, "InitDMABUFBridge"); + + OSAtomicWrite(&g_iDriverSuspended, _DRIVER_NOT_SUSPENDED); + OSAtomicWrite(&g_iNumActiveDriverThreads, 0); + OSAtomicWrite(&g_iNumActiveKernelThreads, 0); + + eError = OSEventObjectCreate("Global driver thread event object", + &g_hDriverThreadEventObject); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", error_); + +#if defined(DEBUG_BRIDGE_KM) + { + DI_ITERATOR_CB sIter = { + .pfnStart = BridgeStatsDIStart, + .pfnStop = BridgeStatsDIStop, + .pfnNext = BridgeStatsDINext, + .pfnShow = BridgeStatsDIShow, + .pfnWrite = BridgeStatsWrite + }; + + eError = DICreateEntry("bridge_stats", NULL, &sIter, + &g_BridgeDispatchTable[0], + DI_ENTRY_TYPE_GENERIC, + &gpsDIBridgeStatsEntry); + PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", error_); + } +#endif + + return PVRSRV_OK; + +error_: + if (g_hDriverThreadEventObject) { + OSEventObjectDestroy(g_hDriverThreadEventObject); + g_hDriverThreadEventObject = NULL; + } + + return eError; +} + +PVRSRV_ERROR OSPlatformBridgeDeInit(void) +{ + PVRSRV_ERROR eError; + +#if defined(DEBUG_BRIDGE_KM) + if (gpsDIBridgeStatsEntry != NULL) + { + DIDestroyEntry(gpsDIBridgeStatsEntry); + } +#endif + + eError = DeinitDMABUFBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitDMABUFBridge"); + + if (g_hDriverThreadEventObject != NULL) { + OSEventObjectDestroy(g_hDriverThreadEventObject); + g_hDriverThreadEventObject = NULL; + } + + return eError; +} + +PVRSRV_ERROR LinuxBridgeBlockClientsAccess(IMG_BOOL bShutdown) +{ + PVRSRV_ERROR eError; + IMG_HANDLE hEvent; + + eError = OSEventObjectOpen(g_hDriverThreadEventObject, &hEvent); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__)); + return eError; + } + + if (OSAtomicCompareExchange(&g_iDriverSuspended, _DRIVER_NOT_SUSPENDED, + _DRIVER_SUSPENDED) == _DRIVER_SUSPENDED) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Driver is already suspended", __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto out_put; + } + + /* now wait for any threads currently in the server to exit */ + while (OSAtomicRead(&g_iNumActiveDriverThreads) != 0 || + (OSAtomicRead(&g_iNumActiveKernelThreads) != 0 && !bShutdown)) + { + if (OSAtomicRead(&g_iNumActiveDriverThreads) != 0) + { + PVR_LOG(("%s: waiting for user threads (%d)", __func__, + OSAtomicRead(&g_iNumActiveDriverThreads))); + } +#ifdef ANDROID + if (OSAtomicRead(&g_iNumActiveKernelThreads) != 0) + { + PVR_LOG(("%s: waiting for kernel threads (%d)", __func__, + OSAtomicRead(&g_iNumActiveKernelThreads))); + } +#endif /* ANDROID */ + /* Regular wait is called here (and not OSEventObjectWaitKernel) because + * this code is executed by the caller of .suspend/.shutdown callbacks + * which is most likely PM (or other actor responsible for suspend + * process). Because of that this thread shouldn't and most likely + * event cannot be frozen. */ + OSEventObjectWait(hEvent); + } + +out_put: + OSEventObjectClose(hEvent); + + return eError; +} + +PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(void) +{ + PVRSRV_ERROR eError; + + /* resume the driver and then signal so any waiting threads wake up */ + if (OSAtomicCompareExchange(&g_iDriverSuspended, _DRIVER_SUSPENDED, + _DRIVER_NOT_SUSPENDED) == _DRIVER_NOT_SUSPENDED) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Driver is not suspended", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = OSEventObjectSignal(g_hDriverThreadEventObject); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: OSEventObjectSignal failed: %s", + __func__, PVRSRVGetErrorString(eError))); + } + + return eError; +} + +static PVRSRV_ERROR LinuxBridgeSignalIfSuspended(void) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED) + { + PVRSRV_ERROR eError = OSEventObjectSignal(g_hDriverThreadEventObject); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to signal driver thread event" + " object: %s", __func__, PVRSRVGetErrorString(eError))); + } + } + + return eError; +} + +void LinuxBridgeNumActiveKernelThreadsIncrement(void) +{ + OSAtomicIncrement(&g_iNumActiveKernelThreads); +} + +void LinuxBridgeNumActiveKernelThreadsDecrement(void) +{ + OSAtomicDecrement(&g_iNumActiveKernelThreads); + PVR_ASSERT(OSAtomicRead(&g_iNumActiveKernelThreads) >= 0); + + /* Signal on every decrement in case LinuxBridgeBlockClientsAccess() is + * waiting for the threads to freeze. + * (error is logged in called function so ignore, we can't do much with + * it anyway) */ + (void) LinuxBridgeSignalIfSuspended(); +} + +static PVRSRV_ERROR _WaitForDriverUnsuspend(void) +{ + PVRSRV_ERROR eError; + IMG_HANDLE hEvent; + + eError = OSEventObjectOpen(g_hDriverThreadEventObject, &hEvent); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__)); + return eError; + } + + while (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED) + { + /* we should be able to use normal (not kernel) wait here since + * we were just unfrozen and most likely we're not going to + * be frozen again (?) */ + OSEventObjectWait(hEvent); + } + + OSEventObjectClose(hEvent); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVDriverThreadEnter(void) +{ + PVRSRV_ERROR eError; + + /* increment first so there is no race between this value and + * g_iDriverSuspended in LinuxBridgeBlockClientsAccess() */ + OSAtomicIncrement(&g_iNumActiveDriverThreads); + + if (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED) + { + /* decrement here because the driver is going to be suspended and + * this thread is going to be frozen so we don't want to wait for + * it in LinuxBridgeBlockClientsAccess() */ + OSAtomicDecrement(&g_iNumActiveDriverThreads); + + /* during suspend procedure this will put the current thread to + * the freezer but during shutdown this will just return */ + try_to_freeze(); + + /* if the thread was unfrozen but the flag is not yet set to + * _DRIVER_NOT_SUSPENDED wait for it + * in case this is a shutdown the thread was not frozen so we'll + * wait here indefinitely but this is ok (and this is in fact what + * we want) because no thread should be entering the driver in such + * case */ + eError = _WaitForDriverUnsuspend(); + + /* increment here because that means that the thread entered the + * driver */ + OSAtomicIncrement(&g_iNumActiveDriverThreads); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to wait for driver" + " unsuspend: %s", __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + } + + return PVRSRV_OK; +} + +void PVRSRVDriverThreadExit(void) +{ + OSAtomicDecrement(&g_iNumActiveDriverThreads); + /* if the driver is being suspended then we need to signal the + * event object as the thread suspending the driver is waiting + * for active threads to exit + * error is logged in called function so ignore returned error + */ + (void) LinuxBridgeSignalIfSuspended(); +} + +int +PVRSRV_BridgeDispatchKM(struct drm_device __maybe_unused *dev, void *arg, struct drm_file *pDRMFile) +{ + struct drm_pvr_srvkm_cmd *psSrvkmCmd = (struct drm_pvr_srvkm_cmd *) arg; + PVRSRV_BRIDGE_PACKAGE sBridgePackageKM = { 0 }; + CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pDRMFile->filp); + PVRSRV_ERROR error; + + if (psConnection == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Connection is closed", __func__)); + return -EFAULT; + } + + PVR_ASSERT(psSrvkmCmd != NULL); + + DRM_DEBUG("tgid=%d, tgid_connection=%d, bridge_id=%d, func_id=%d", + task_tgid_nr(current), + ((ENV_CONNECTION_DATA *)PVRSRVConnectionPrivateData(psConnection))->owner, + psSrvkmCmd->bridge_id, + psSrvkmCmd->bridge_func_id); + + error = PVRSRVDriverThreadEnter(); + PVR_LOG_GOTO_IF_ERROR(error, "PVRSRVDriverThreadEnter", e0); + + sBridgePackageKM.ui32BridgeID = psSrvkmCmd->bridge_id; + sBridgePackageKM.ui32FunctionID = psSrvkmCmd->bridge_func_id; + sBridgePackageKM.ui32Size = sizeof(sBridgePackageKM); + sBridgePackageKM.pvParamIn = (void __user *)(uintptr_t)psSrvkmCmd->in_data_ptr; + sBridgePackageKM.ui32InBufferSize = psSrvkmCmd->in_data_size; + sBridgePackageKM.pvParamOut = (void __user *)(uintptr_t)psSrvkmCmd->out_data_ptr; + sBridgePackageKM.ui32OutBufferSize = psSrvkmCmd->out_data_size; + + error = BridgedDispatchKM(psConnection, &sBridgePackageKM); + + PVRSRVDriverThreadExit(); + +e0: + return OSPVRSRVToNativeError(error); +} + +int +PVRSRV_MMap(struct file *pFile, struct vm_area_struct *ps_vma) +{ + CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile); + IMG_HANDLE hSecurePMRHandle = (IMG_HANDLE)((uintptr_t)ps_vma->vm_pgoff); + PMR *psPMR; + PVRSRV_ERROR eError; + + if (psConnection == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "Invalid connection data")); + return -ENOENT; + } + + eError = PVRSRVDriverThreadEnter(); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVDriverThreadEnter", e0); + + /* + * The bridge lock used here to protect PVRSRVLookupHandle is replaced + * by a specific lock considering that the handle functions have now + * their own lock. This change was necessary to solve the lockdep issues + * related with the PVRSRV_MMap. + */ + + eError = PVRSRVLookupHandle(psConnection->psHandleBase, + (void **)&psPMR, + hSecurePMRHandle, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + IMG_TRUE); + if (eError != PVRSRV_OK) + { + goto e0; + } + + mutex_lock(&g_sMMapMutex); + /* Note: PMRMMapPMR will take a reference on the PMR. + * Unref the handle immediately, because we have now done + * the required operation on the PMR (whether it succeeded or not) + */ + eError = PMRMMapPMR(psPMR, ps_vma); + mutex_unlock(&g_sMMapMutex); + PVRSRVReleaseHandle(psConnection->psHandleBase, hSecurePMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PMRMMapPMR failed (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto e0; + } + + PVRSRVDriverThreadExit(); + + return 0; + +e0: + PVRSRVDriverThreadExit(); + + PVR_DPF((PVR_DBG_ERROR, "Unable to translate error %d", eError)); + PVR_ASSERT(eError != PVRSRV_OK); + + return -ENOENT; // -EAGAIN // or what? +} diff --git a/drivers/gpu/drm/phytium/octopus/pvr_bridge_k.h b/drivers/gpu/drm/phytium/octopus/pvr_bridge_k.h new file mode 100644 index 000000000000..b334fd5fdc3d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_bridge_k.h @@ -0,0 +1,103 @@ +/*************************************************************************/ /*! +@File +@Title PVR Bridge Module (kernel side) +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Receives calls from the user portion of services and + despatches them to functions in the kernel portion. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_BRIDGE_K_H +#define PVR_BRIDGE_K_H + +#include "pvrsrv_error.h" + +/*! +****************************************************************************** + @Function LinuxBridgeBlockClientsAccess + @Description This function will wait for any existing threads in the Server + to exit and then disable access to the driver. New threads will + not be allowed to enter the Server until the driver is + unsuspended (see LinuxBridgeUnblockClientsAccess). + @Input bShutdown this flag indicates that the function was called + from a shutdown callback and therefore it will + not wait for the kernel threads to get frozen + (because this doesn't happen during shutdown + procedure) + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR LinuxBridgeBlockClientsAccess(IMG_BOOL bShutdown); + +/*! +****************************************************************************** + @Function LinuxBridgeUnblockClientsAccess + @Description This function will re-enable the bridge and allow any threads + waiting to enter the Server to continue. + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(void); + +void LinuxBridgeNumActiveKernelThreadsIncrement(void); +void LinuxBridgeNumActiveKernelThreadsDecrement(void); + +/*! +****************************************************************************** + @Function PVRSRVDriverThreadEnter + @Description Increments number of client threads currently operating + in the driver's context. + If the driver is currently being suspended this function + will call try_to_freeze() on behalf of the client thread. + When the driver is resumed the function will exit and allow + the thread into the driver. + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVDriverThreadEnter(void); + +/*! +****************************************************************************** + @Function PVRSRVDriverThreadExit + @Description Decrements the number of client threads currently operating + in the driver's context to match the call to + PVRSRVDriverThreadEnter(). + The function also signals the driver that a thread left the + driver context so if it's waiting to suspend it knows that + the number of threads decreased. +******************************************************************************/ +void PVRSRVDriverThreadExit(void); + +#endif /* PVR_BRIDGE_K_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_buffer_sync.c b/drivers/gpu/drm/phytium/octopus/pvr_buffer_sync.c new file mode 100644 index 000000000000..e2fa0e84a7c3 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_buffer_sync.c @@ -0,0 +1,585 @@ +/* + * @File + * @Title Linux buffer sync interface + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include + +#include "services_kernel_client.h" +#include "pvr_dma_resv.h" +#include "pvr_buffer_sync.h" +#include "pvr_buffer_sync_shared.h" +#include "pvr_drv.h" +#include "pvr_fence.h" + + +struct pvr_buffer_sync_context { + struct mutex ctx_lock; + struct pvr_fence_context *fence_ctx; + struct ww_acquire_ctx acquire_ctx; +}; + +struct pvr_buffer_sync_check_data { + struct dma_fence_cb base; + + u32 nr_fences; + struct pvr_fence **fences; +}; + +struct pvr_buffer_sync_append_data { + struct pvr_buffer_sync_context *ctx; + + u32 nr_pmrs; + struct _PMR_ **pmrs; + u32 *pmr_flags; + + struct pvr_fence *update_fence; + struct pvr_buffer_sync_check_data *check_data; +}; + + +static struct dma_resv * +pmr_reservation_object_get(struct _PMR_ *pmr) +{ + struct dma_buf *dmabuf; + + dmabuf = PhysmemGetDmaBuf(pmr); + if (dmabuf) + return dmabuf->resv; + + return NULL; +} + +static int +pvr_buffer_sync_pmrs_lock(struct pvr_buffer_sync_context *ctx, + u32 nr_pmrs, + struct _PMR_ **pmrs) +{ + struct dma_resv *resv, *cresv = NULL, *lresv = NULL; + int i, err; + struct ww_acquire_ctx *acquire_ctx = &ctx->acquire_ctx; + + mutex_lock(&ctx->ctx_lock); + + ww_acquire_init(acquire_ctx, &reservation_ww_class); +retry: + for (i = 0; i < nr_pmrs; i++) { + resv = pmr_reservation_object_get(pmrs[i]); + if (!resv) { + pr_err("%s: Failed to get reservation object from pmr %p\n", + __func__, pmrs[i]); + err = -EINVAL; + goto fail; + } + + if (resv != lresv) { + err = ww_mutex_lock_interruptible(&resv->lock, + acquire_ctx); + if (err) { + cresv = (err == -EDEADLK) ? resv : NULL; + goto fail; + } + } else { + lresv = NULL; + } + } + + ww_acquire_done(acquire_ctx); + + return 0; + +fail: + while (i--) { + resv = pmr_reservation_object_get(pmrs[i]); + if (WARN_ON_ONCE(!resv)) + continue; + ww_mutex_unlock(&resv->lock); + } + + if (lresv) + ww_mutex_unlock(&lresv->lock); + + if (cresv) { + err = ww_mutex_lock_slow_interruptible(&cresv->lock, + acquire_ctx); + if (!err) { + lresv = cresv; + cresv = NULL; + goto retry; + } + } + + ww_acquire_fini(acquire_ctx); + + mutex_unlock(&ctx->ctx_lock); + return err; +} + +static void +pvr_buffer_sync_pmrs_unlock(struct pvr_buffer_sync_context *ctx, + u32 nr_pmrs, + struct _PMR_ **pmrs) +{ + struct dma_resv *resv; + int i; + struct ww_acquire_ctx *acquire_ctx = &ctx->acquire_ctx; + + for (i = 0; i < nr_pmrs; i++) { + resv = pmr_reservation_object_get(pmrs[i]); + if (WARN_ON_ONCE(!resv)) + continue; + ww_mutex_unlock(&resv->lock); + } + + ww_acquire_fini(acquire_ctx); + + mutex_unlock(&ctx->ctx_lock); +} + +static u32 +pvr_buffer_sync_pmrs_fence_count(u32 nr_pmrs, struct _PMR_ **pmrs, + u32 *pmr_flags) +{ + struct dma_resv *resv; + struct dma_resv_list *resv_list; + struct dma_fence *fence; + u32 fence_count = 0; + bool exclusive; + int i; + + for (i = 0; i < nr_pmrs; i++) { + exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE); + + resv = pmr_reservation_object_get(pmrs[i]); + if (WARN_ON_ONCE(!resv)) + continue; + + resv_list = dma_resv_get_list(resv); + fence = dma_resv_get_excl(resv); + + if (fence && + (!exclusive || !resv_list || !resv_list->shared_count)) + fence_count++; + + if (exclusive && resv_list) + fence_count += resv_list->shared_count; + } + + return fence_count; +} + +static struct pvr_buffer_sync_check_data * +pvr_buffer_sync_check_fences_create(struct pvr_fence_context *fence_ctx, + PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx, + u32 nr_pmrs, + struct _PMR_ **pmrs, + u32 *pmr_flags) +{ + struct pvr_buffer_sync_check_data *data; + struct dma_resv *resv; + struct dma_resv_list *resv_list; + struct dma_fence *fence; + u32 fence_count; + bool exclusive; + int i, j; + int err; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return NULL; + + fence_count = pvr_buffer_sync_pmrs_fence_count(nr_pmrs, pmrs, + pmr_flags); + if (fence_count) { + data->fences = kcalloc(fence_count, sizeof(*data->fences), + GFP_KERNEL); + if (!data->fences) + goto err_check_data_free; + } + + for (i = 0; i < nr_pmrs; i++) { + resv = pmr_reservation_object_get(pmrs[i]); + if (WARN_ON_ONCE(!resv)) + continue; + + exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE); + if (!exclusive) { + err = dma_resv_reserve_shared(resv +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) + , 1 +#endif + ); + if (err) + goto err_destroy_fences; + } + + resv_list = dma_resv_get_list(resv); + fence = dma_resv_get_excl(resv); + + if (fence && + (!exclusive || !resv_list || !resv_list->shared_count)) { + data->fences[data->nr_fences++] = + pvr_fence_create_from_fence(fence_ctx, + sync_checkpoint_ctx, + fence, + PVRSRV_NO_FENCE, + "exclusive check fence"); + if (!data->fences[data->nr_fences - 1]) { + data->nr_fences--; + PVR_FENCE_TRACE(fence, + "waiting on exclusive fence\n"); + WARN_ON(dma_fence_wait(fence, true) <= 0); + } + } + + if (exclusive && resv_list) { + for (j = 0; j < resv_list->shared_count; j++) { + fence = rcu_dereference_protected(resv_list->shared[j], + dma_resv_held(resv)); + data->fences[data->nr_fences++] = + pvr_fence_create_from_fence(fence_ctx, + sync_checkpoint_ctx, + fence, + PVRSRV_NO_FENCE, + "check fence"); + if (!data->fences[data->nr_fences - 1]) { + data->nr_fences--; + PVR_FENCE_TRACE(fence, + "waiting on non-exclusive fence\n"); + WARN_ON(dma_fence_wait(fence, true) <= 0); + } + } + } + } + + WARN_ON((i != nr_pmrs) || (data->nr_fences != fence_count)); + + return data; + +err_destroy_fences: + for (i = 0; i < data->nr_fences; i++) + pvr_fence_destroy(data->fences[i]); + kfree(data->fences); +err_check_data_free: + kfree(data); + return NULL; +} + +static void +pvr_buffer_sync_check_fences_destroy(struct pvr_buffer_sync_check_data *data) +{ + int i; + + for (i = 0; i < data->nr_fences; i++) + pvr_fence_destroy(data->fences[i]); + + kfree(data->fences); + kfree(data); +} + +struct pvr_buffer_sync_context * +pvr_buffer_sync_context_create(struct device *dev, const char *name) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct pvr_drm_private *priv = ddev->dev_private; + struct pvr_buffer_sync_context *ctx; + int err; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + err = -ENOMEM; + goto err_exit; + } + + ctx->fence_ctx = pvr_fence_context_create(priv->dev_node, + priv->fence_status_wq, + name); + if (!ctx->fence_ctx) { + err = -ENOMEM; + goto err_free_ctx; + } + + mutex_init(&ctx->ctx_lock); + + return ctx; + +err_free_ctx: + kfree(ctx); +err_exit: + return ERR_PTR(err); +} + +void +pvr_buffer_sync_context_destroy(struct pvr_buffer_sync_context *ctx) +{ + pvr_fence_context_destroy(ctx->fence_ctx); + kfree(ctx); +} + +int +pvr_buffer_sync_resolve_and_create_fences(struct pvr_buffer_sync_context *ctx, + PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx, + u32 nr_pmrs, + struct _PMR_ **pmrs, + u32 *pmr_flags, + u32 *nr_fence_checkpoints_out, + PSYNC_CHECKPOINT **fence_checkpoints_out, + PSYNC_CHECKPOINT *update_checkpoints_out, + struct pvr_buffer_sync_append_data **data_out) +{ + struct pvr_buffer_sync_append_data *data; + PSYNC_CHECKPOINT *fence_checkpoints; + const size_t data_size = sizeof(*data); + const size_t pmrs_size = sizeof(*pmrs) * nr_pmrs; + const size_t pmr_flags_size = sizeof(*pmr_flags) * nr_pmrs; + int i; + int j; + int err; + + if (unlikely((nr_pmrs && !(pmrs && pmr_flags)) || + !nr_fence_checkpoints_out || !fence_checkpoints_out || + !update_checkpoints_out)) + return -EINVAL; + + for (i = 0; i < nr_pmrs; i++) { + if (unlikely(!(pmr_flags[i] & PVR_BUFFER_FLAG_MASK))) { + pr_err("%s: Invalid flags %#08x for pmr %p\n", + __func__, pmr_flags[i], pmrs[i]); + return -EINVAL; + } + } + +#if defined(NO_HARDWARE) + /* + * For NO_HARDWARE there's no checking or updating of sync checkpoints + * which means SW waits on our fences will cause a deadlock (since they + * will never be signalled). Avoid this by not creating any fences. + */ + nr_pmrs = 0; +#endif + + if (!nr_pmrs) { + *nr_fence_checkpoints_out = 0; + *fence_checkpoints_out = NULL; + *update_checkpoints_out = NULL; + *data_out = NULL; + + return 0; + } + + data = kzalloc(data_size + pmrs_size + pmr_flags_size, GFP_KERNEL); + if (unlikely(!data)) + return -ENOMEM; + + data->ctx = ctx; + data->pmrs = (struct _PMR_ **)(void *)(data + 1); + data->pmr_flags = (u32 *)(void *)(data->pmrs + nr_pmrs); + + /* + * It's expected that user space will provide a set of unique PMRs + * but, as a PMR can have multiple handles, it's still possible to + * end up here with duplicates. Take this opportunity to filter out + * any remaining duplicates (updating flags when necessary) before + * trying to process them further. + */ + for (i = 0; i < nr_pmrs; i++) { + for (j = 0; j < data->nr_pmrs; j++) { + if (data->pmrs[j] == pmrs[i]) { + data->pmr_flags[j] |= pmr_flags[i]; + break; + } + } + + if (j == data->nr_pmrs) { + data->pmrs[j] = pmrs[i]; + data->pmr_flags[j] = pmr_flags[i]; + data->nr_pmrs++; + } + } + + err = pvr_buffer_sync_pmrs_lock(ctx, data->nr_pmrs, data->pmrs); + if (unlikely(err)) { + /* + * -EINTR is returned if a signal arrives while trying to acquire a PMR + * lock. In this case the operation should be retried after the signal + * has been serviced. As this is expected behaviour, don't print an + * error in this case. + */ + if (err != -EINTR) { + pr_err("%s: failed to lock pmrs (errno=%d)\n", + __func__, err); + } + goto err_free_data; + } + + /* create the check data */ + data->check_data = pvr_buffer_sync_check_fences_create(ctx->fence_ctx, + sync_checkpoint_ctx, + data->nr_pmrs, + data->pmrs, + data->pmr_flags); + if (unlikely(!data->check_data)) { + err = -ENOMEM; + goto err_pmrs_unlock; + } + + fence_checkpoints = kcalloc(data->check_data->nr_fences, + sizeof(*fence_checkpoints), + GFP_KERNEL); + if (fence_checkpoints) { + pvr_fence_get_checkpoints(data->check_data->fences, + data->check_data->nr_fences, + fence_checkpoints); + } else { + if (unlikely(data->check_data->nr_fences)) { + err = -ENOMEM; + goto err_free_check_data; + } + } + + /* create the update fence */ + data->update_fence = pvr_fence_create(ctx->fence_ctx, + sync_checkpoint_ctx, + SYNC_CHECKPOINT_FOREIGN_CHECKPOINT, "update fence"); + if (unlikely(!data->update_fence)) { + err = -ENOMEM; + goto err_free_fence_checkpoints; + } + + /* + * We need to clean up the fences once the HW has finished with them. + * We can do this using fence callbacks. However, instead of adding a + * callback to every fence, which would result in more work, we can + * simply add one to the update fence since this will be the last fence + * to be signalled. This callback can do all the necessary clean up. + * + * Note: we take an additional reference on the update fence in case + * it signals before we can add it to a reservation object. + */ + PVR_FENCE_TRACE(&data->update_fence->base, + "create fence calling dma_fence_get\n"); + dma_fence_get(&data->update_fence->base); + + *nr_fence_checkpoints_out = data->check_data->nr_fences; + *fence_checkpoints_out = fence_checkpoints; + *update_checkpoints_out = pvr_fence_get_checkpoint(data->update_fence); + *data_out = data; + + return 0; + +err_free_fence_checkpoints: + kfree(fence_checkpoints); +err_free_check_data: + pvr_buffer_sync_check_fences_destroy(data->check_data); +err_pmrs_unlock: + pvr_buffer_sync_pmrs_unlock(ctx, data->nr_pmrs, data->pmrs); +err_free_data: + kfree(data); + return err; +} + +void +pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data) +{ + struct dma_resv *resv; + int i; + + dma_fence_enable_sw_signaling(&data->update_fence->base); + + for (i = 0; i < data->nr_pmrs; i++) { + resv = pmr_reservation_object_get(data->pmrs[i]); + if (WARN_ON_ONCE(!resv)) + continue; + + if (data->pmr_flags[i] & PVR_BUFFER_FLAG_WRITE) { + PVR_FENCE_TRACE(&data->update_fence->base, + "added exclusive fence (%s) to resv %p\n", + data->update_fence->name, resv); + dma_resv_add_excl_fence(resv, + &data->update_fence->base); + } else if (data->pmr_flags[i] & PVR_BUFFER_FLAG_READ) { + PVR_FENCE_TRACE(&data->update_fence->base, + "added non-exclusive fence (%s) to resv %p\n", + data->update_fence->name, resv); + dma_resv_add_shared_fence(resv, + &data->update_fence->base); + } + } + + /* + * Now that the fence has been added to the necessary + * reservation objects we can safely drop the extra reference + * we took in pvr_buffer_sync_resolve_and_create_fences(). + */ + dma_fence_put(&data->update_fence->base); + pvr_buffer_sync_pmrs_unlock(data->ctx, data->nr_pmrs, + data->pmrs); + + /* destroy the check fences */ + pvr_buffer_sync_check_fences_destroy(data->check_data); + /* destroy the update fence */ + pvr_fence_destroy(data->update_fence); + + /* free the append data */ + kfree(data); +} + +void +pvr_buffer_sync_kick_failed(struct pvr_buffer_sync_append_data *data) +{ + + /* drop the extra reference we took on the update fence in + * pvr_buffer_sync_resolve_and_create_fences(). + */ + dma_fence_put(&data->update_fence->base); + + if (data->nr_pmrs > 0) + pvr_buffer_sync_pmrs_unlock(data->ctx, data->nr_pmrs, + data->pmrs); + + /* destroy the check fences */ + pvr_buffer_sync_check_fences_destroy(data->check_data); + /* destroy the update fence */ + pvr_fence_destroy(data->update_fence); + + /* free the append data */ + kfree(data); +} diff --git a/drivers/gpu/drm/phytium/octopus/pvr_buffer_sync.h b/drivers/gpu/drm/phytium/octopus/pvr_buffer_sync.h new file mode 100644 index 000000000000..ef53cce062d7 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_buffer_sync.h @@ -0,0 +1,125 @@ +/* + * @File pvr_buffer_sync.h + * @Title PhytiumVR Linux buffer sync interface + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef PVR_BUFFER_SYNC_H +#define PVR_BUFFER_SYNC_H + +#include +#include +#include + +struct _PMR_; +struct pvr_buffer_sync_context; +struct pvr_buffer_sync_append_data; + +/** + * pvr_buffer_sync_context_create - creates a buffer sync context + * @dev: Linux device + * @name: context name (used for debugging) + * + * pvr_buffer_sync_context_destroy() should be used to clean up the buffer + * sync context. + * + * Return: A buffer sync context or NULL if it fails for any reason. + */ +struct pvr_buffer_sync_context * +pvr_buffer_sync_context_create(struct device *dev, const char *name); + +/** + * pvr_buffer_sync_context_destroy() - frees a buffer sync context + * @ctx: buffer sync context + */ +void +pvr_buffer_sync_context_destroy(struct pvr_buffer_sync_context *ctx); + +/** + * pvr_buffer_sync_resolve_and_create_fences() - create checkpoints from + * buffers + * @ctx: buffer sync context + * @sync_checkpoint_ctx: context in which to create sync checkpoints + * @nr_pmrs: number of buffer objects (PMRs) + * @pmrs: buffer array + * @pmr_flags: internal flags + * @nr_fence_checkpoints_out: returned number of fence sync checkpoints + * @fence_checkpoints_out: returned array of fence sync checkpoints + * @update_checkpoint_out: returned update sync checkpoint + * @data_out: returned buffer sync data + * + * After this call, either pvr_buffer_sync_kick_succeeded() or + * pvr_buffer_sync_kick_failed() must be called. + * + * Return: 0 on success or an error code otherwise. + */ +int +pvr_buffer_sync_resolve_and_create_fences(struct pvr_buffer_sync_context *ctx, + PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx, + u32 nr_pmrs, + struct _PMR_ **pmrs, + u32 *pmr_flags, + u32 *nr_fence_checkpoints_out, + PSYNC_CHECKPOINT **fence_checkpoints_out, + PSYNC_CHECKPOINT *update_checkpoint_out, + struct pvr_buffer_sync_append_data **data_out); + +/** + * pvr_buffer_sync_kick_succeeded() - cleans up after a successful kick + * operation + * @data: buffer sync data returned by + * pvr_buffer_sync_resolve_and_create_fences() + * + * Should only be called following pvr_buffer_sync_resolve_and_create_fences(). + */ +void +pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data); + +/** + * pvr_buffer_sync_kick_failed() - cleans up after a failed kick operation + * @data: buffer sync data returned by + * pvr_buffer_sync_resolve_and_create_fences() + * + * Should only be called following pvr_buffer_sync_resolve_and_create_fences(). + */ +void +pvr_buffer_sync_kick_failed(struct pvr_buffer_sync_append_data *data); + +#endif /* PVR_BUFFER_SYNC_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_buffer_sync_shared.h b/drivers/gpu/drm/phytium/octopus/pvr_buffer_sync_shared.h new file mode 100644 index 000000000000..4005154c3d63 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_buffer_sync_shared.h @@ -0,0 +1,57 @@ +/*************************************************************************/ /*! +@File +@Title PVR buffer sync shared +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Shared definitions between client and server +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_BUFFER_SYNC_SHARED_H +#define PVR_BUFFER_SYNC_SHARED_H + +#define PVR_BUFFER_FLAG_READ (1 << 0) +#define PVR_BUFFER_FLAG_WRITE (1 << 1) +#define PVR_BUFFER_FLAG_MASK (PVR_BUFFER_FLAG_READ | \ + PVR_BUFFER_FLAG_WRITE) + +/* Maximum number of PMRs passed + * in a kick when using buffer sync + */ +#define PVRSRV_MAX_BUFFERSYNC_PMRS 32 + +#endif /* PVR_BUFFER_SYNC_SHARED_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_counting_timeline.c b/drivers/gpu/drm/phytium/octopus/pvr_counting_timeline.c new file mode 100644 index 000000000000..1884b8b65bea --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_counting_timeline.c @@ -0,0 +1,307 @@ +/* + * @File + * @Title PhytiumVR Linux software "counting" timeline fence implementation + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +#include "services_kernel_client.h" +#include "pvr_counting_timeline.h" +#include "pvr_sw_fence.h" + +#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \ + do { \ + if (pfnDumpDebugPrintf) \ + pfnDumpDebugPrintf(pvDumpDebugFile, fmt, \ + ## __VA_ARGS__); \ + else \ + pr_err(fmt "\n", ## __VA_ARGS__); \ + } while (0) + +struct pvr_counting_fence_timeline { + struct pvr_sw_fence_context *context; + + void *dbg_request_handle; + + spinlock_t active_fences_lock; + u64 current_value; /* guarded by active_fences_lock */ + u64 next_value; /* guarded by active_fences_lock */ + struct list_head active_fences; + + struct kref kref; +}; + +struct pvr_counting_fence { + u64 value; + struct dma_fence *fence; + struct list_head active_list_entry; +}; + +void pvr_counting_fence_timeline_dump_timeline( + void *data, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file) +{ + + struct pvr_counting_fence_timeline *timeline = + (struct pvr_counting_fence_timeline *) data; + unsigned long flags; + + spin_lock_irqsave(&timeline->active_fences_lock, flags); + + PVR_DUMPDEBUG_LOG(dump_debug_printf, + dump_debug_file, + "TL:%s SeqNum: %llu/%llu", + pvr_sw_fence_context_name( + timeline->context), + timeline->current_value, + timeline->next_value); + + spin_unlock_irqrestore(&timeline->active_fences_lock, flags); +} + +static void +pvr_counting_fence_timeline_debug_request(void *data, u32 verbosity, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + struct pvr_counting_fence_timeline *timeline = + (struct pvr_counting_fence_timeline *)data; + struct pvr_counting_fence *obj; + unsigned long flags; + char value[128]; + + if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM)) { + spin_lock_irqsave(&timeline->active_fences_lock, flags); + pvr_sw_fence_context_value_str(timeline->context, value, + sizeof(value)); + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + "sw: %s @%s cur=%llu", + pvr_sw_fence_context_name(timeline->context), + value, timeline->current_value); + list_for_each_entry(obj, &timeline->active_fences, + active_list_entry) { + obj->fence->ops->fence_value_str(obj->fence, + value, sizeof(value)); + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + " @%s: val=%llu", value, obj->value); + } + spin_unlock_irqrestore(&timeline->active_fences_lock, flags); + } +} + +struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_create( + void *dev_cookie, + const char *name) +{ + PVRSRV_ERROR srv_err; + struct pvr_counting_fence_timeline *timeline = + kmalloc(sizeof(*timeline), GFP_KERNEL); + + if (!timeline) + goto err_out; + + timeline->context = pvr_sw_fence_context_create(name, + "pvr_sw_sync"); + if (!timeline->context) + goto err_free_timeline; + + srv_err = PVRSRVRegisterDbgRequestNotify(&timeline->dbg_request_handle, + dev_cookie, + pvr_counting_fence_timeline_debug_request, + DEBUG_REQUEST_LINUXFENCE, + timeline); + if (srv_err != PVRSRV_OK) { + pr_err("%s: failed to register debug request callback (%s)\n", + __func__, PVRSRVGetErrorString(srv_err)); + goto err_free_timeline_ctx; + } + + timeline->current_value = 0; + timeline->next_value = 1; + kref_init(&timeline->kref); + spin_lock_init(&timeline->active_fences_lock); + INIT_LIST_HEAD(&timeline->active_fences); + +err_out: + return timeline; + +err_free_timeline_ctx: + pvr_sw_fence_context_destroy(timeline->context); + +err_free_timeline: + kfree(timeline); + timeline = NULL; + goto err_out; +} + +void pvr_counting_fence_timeline_force_complete( + struct pvr_counting_fence_timeline *timeline) +{ + struct list_head *entry, *tmp; + unsigned long flags; + + spin_lock_irqsave(&timeline->active_fences_lock, flags); + + /* This is just a safety measurement. Normally we should never see any + * unsignaled sw fences when we come here. Warn if we still do! + */ + WARN_ON(!list_empty(&timeline->active_fences)); + + list_for_each_safe(entry, tmp, &timeline->active_fences) { + struct pvr_counting_fence *fence = + list_entry(entry, struct pvr_counting_fence, + active_list_entry); + dma_fence_signal(fence->fence); + dma_fence_put(fence->fence); + fence->fence = NULL; + list_del(&fence->active_list_entry); + kfree(fence); + } + spin_unlock_irqrestore(&timeline->active_fences_lock, flags); +} + +static void pvr_counting_fence_timeline_destroy( + struct kref *kref) +{ + struct pvr_counting_fence_timeline *timeline = + container_of(kref, struct pvr_counting_fence_timeline, kref); + + WARN_ON(!list_empty(&timeline->active_fences)); + + PVRSRVUnregisterDbgRequestNotify(timeline->dbg_request_handle); + + pvr_sw_fence_context_destroy(timeline->context); + kfree(timeline); +} + +void pvr_counting_fence_timeline_put( + struct pvr_counting_fence_timeline *timeline) +{ + kref_put(&timeline->kref, pvr_counting_fence_timeline_destroy); +} + +struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_get( + struct pvr_counting_fence_timeline *timeline) +{ + if (!timeline) + return NULL; + kref_get(&timeline->kref); + return timeline; +} + +struct dma_fence *pvr_counting_fence_create( + struct pvr_counting_fence_timeline *timeline, u64 *sync_pt_idx) +{ + unsigned long flags; + struct dma_fence *sw_fence; + struct pvr_counting_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL); + + if (!fence) + return NULL; + + sw_fence = pvr_sw_fence_create(timeline->context); + if (!sw_fence) + goto err_free_fence; + + fence->fence = dma_fence_get(sw_fence); + + spin_lock_irqsave(&timeline->active_fences_lock, flags); + + fence->value = timeline->next_value++; + if (sync_pt_idx) + *sync_pt_idx = fence->value; + + list_add_tail(&fence->active_list_entry, &timeline->active_fences); + + spin_unlock_irqrestore(&timeline->active_fences_lock, flags); + + /* Counting fences can be signalled any time after creation */ + dma_fence_enable_sw_signaling(sw_fence); + + return sw_fence; + +err_free_fence: + kfree(fence); + return NULL; +} + +bool pvr_counting_fence_timeline_inc( + struct pvr_counting_fence_timeline *timeline, u64 *sync_pt_idx) +{ + struct list_head *entry, *tmp; + unsigned long flags; + bool res; + + spin_lock_irqsave(&timeline->active_fences_lock, flags); + + if (timeline->current_value == timeline->next_value-1) { + res = false; + goto exit_unlock; + } + + timeline->current_value++; + + if (sync_pt_idx) + *sync_pt_idx = timeline->current_value; + + list_for_each_safe(entry, tmp, &timeline->active_fences) { + struct pvr_counting_fence *fence = + list_entry(entry, struct pvr_counting_fence, + active_list_entry); + if (fence->value <= timeline->current_value) { + dma_fence_signal(fence->fence); + dma_fence_put(fence->fence); + fence->fence = NULL; + list_del(&fence->active_list_entry); + kfree(fence); + } + } + + res = true; + +exit_unlock: + spin_unlock_irqrestore(&timeline->active_fences_lock, flags); + + return res; +} diff --git a/drivers/gpu/drm/phytium/octopus/pvr_counting_timeline.h b/drivers/gpu/drm/phytium/octopus/pvr_counting_timeline.h new file mode 100644 index 000000000000..ea8bafd1be3c --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_counting_timeline.h @@ -0,0 +1,69 @@ +/* + * @File + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__PVR_COUNTING_TIMELINE_H__) +#define __PVR_COUNTING_TIMELINE_H__ + +#include "pvr_linux_fence.h" + +struct pvr_counting_fence_timeline; + +void pvr_counting_fence_timeline_dump_timeline( + void *data, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file); + +struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_create( + void *dev_cookie, + const char *name); +void pvr_counting_fence_timeline_put( + struct pvr_counting_fence_timeline *fence_timeline); +struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_get( + struct pvr_counting_fence_timeline *fence_timeline); +struct dma_fence *pvr_counting_fence_create( + struct pvr_counting_fence_timeline *fence_timeline, u64 *sync_pt_idx); +bool pvr_counting_fence_timeline_inc( + struct pvr_counting_fence_timeline *fence_timeline, u64 *sync_pt_idx); +void pvr_counting_fence_timeline_force_complete( + struct pvr_counting_fence_timeline *fence_timeline); + +#endif /* !defined(__PVR_COUNTING_TIMELINE_H__) */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_debug.c b/drivers/gpu/drm/phytium/octopus/pvr_debug.c new file mode 100644 index 000000000000..f39ca14bc725 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_debug.c @@ -0,0 +1,481 @@ +/*************************************************************************/ /*! +@File +@Title Debug Functionality +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Provides kernel side Debug Functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include + +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "linkage.h" +#include "pvrsrv.h" +#include "osfunc.h" +#include "di_server.h" + +#if defined(PVRSRV_NEED_PVR_DPF) + +/******** BUFFERED LOG MESSAGES ********/ + +/* Because we don't want to have to handle CCB wrapping, each buffered + * message is rounded up to PVRSRV_DEBUG_CCB_MESG_MAX bytes. This means + * there is the same fixed number of messages that can be stored, + * regardless of message length. + */ + +#if defined(PVRSRV_DEBUG_CCB_MAX) + +#define PVRSRV_DEBUG_CCB_MESG_MAX PVR_MAX_DEBUG_MESSAGE_LEN + +typedef struct +{ + const IMG_CHAR *pszFile; + IMG_INT iLine; + IMG_UINT32 ui32TID; + IMG_UINT32 ui32PID; + IMG_CHAR pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX]; + struct timeval sTimeVal; +} +PVRSRV_DEBUG_CCB; + +static PVRSRV_DEBUG_CCB gsDebugCCB[PVRSRV_DEBUG_CCB_MAX]; + +static IMG_UINT giOffset; + +/* protects access to gsDebugCCB */ +static DEFINE_SPINLOCK(gsDebugCCBLock); + +static void +AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line, + const IMG_CHAR *szBuffer) +{ + unsigned long uiFlags; + + spin_lock_irqsave(&gsDebugCCBLock, uiFlags); + + gsDebugCCB[giOffset].pszFile = pszFileName; + gsDebugCCB[giOffset].iLine = ui32Line; + gsDebugCCB[giOffset].ui32TID = current->pid; + gsDebugCCB[giOffset].ui32PID = current->tgid; + + do_gettimeofday(&gsDebugCCB[giOffset].sTimeVal); + + OSStringLCopy(gsDebugCCB[giOffset].pcMesg, szBuffer, + PVRSRV_DEBUG_CCB_MESG_MAX); + + giOffset = (giOffset + 1) % PVRSRV_DEBUG_CCB_MAX; + + spin_unlock_irqrestore(&gsDebugCCBLock, uiFlags); +} + +void PVRSRVDebugPrintfDumpCCB(void) +{ + int i; + unsigned long uiFlags; + + spin_lock_irqsave(&gsDebugCCBLock, uiFlags); + + for (i = 0; i < PVRSRV_DEBUG_CCB_MAX; i++) + { + PVRSRV_DEBUG_CCB *psDebugCCBEntry = + &gsDebugCCB[(giOffset + i) % PVRSRV_DEBUG_CCB_MAX]; + + /* Early on, we won't have PVRSRV_DEBUG_CCB_MAX messages */ + if (!psDebugCCBEntry->pszFile) + { + continue; + } + + printk(KERN_ERR "%s:%d: (%ld.%ld, tid=%u, pid=%u) %s\n", + psDebugCCBEntry->pszFile, + psDebugCCBEntry->iLine, + (long)psDebugCCBEntry->sTimeVal.tv_sec, + (long)psDebugCCBEntry->sTimeVal.tv_usec, + psDebugCCBEntry->ui32TID, + psDebugCCBEntry->ui32PID, + psDebugCCBEntry->pcMesg); + + /* Clear this entry so it doesn't get printed the next time again. */ + psDebugCCBEntry->pszFile = NULL; + } + + spin_unlock_irqrestore(&gsDebugCCBLock, uiFlags); +} + +#else /* defined(PVRSRV_DEBUG_CCB_MAX) */ + +static INLINE void +AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line, + const IMG_CHAR *szBuffer) +{ + (void)pszFileName; + (void)szBuffer; + (void)ui32Line; +} + +void PVRSRVDebugPrintfDumpCCB(void) +{ + /* Not available */ +} + +#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */ + +static IMG_UINT32 gPVRDebugLevel = + ( + DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING +#if defined(PVRSRV_DEBUG_CCB_MAX) + | DBGPRIV_BUFFERED +#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */ +#if defined(PVR_DPF_ADHOC_DEBUG_ON) + | DBGPRIV_DEBUG +#endif /* defined(PVR_DPF_ADHOC_DEBUG_ON) */ + ); + +module_param(gPVRDebugLevel, uint, 0644); +MODULE_PARM_DESC(gPVRDebugLevel, + "Sets the level of debug output (default 0x7)"); + +IMG_UINT32 OSDebugLevel(void) +{ + return gPVRDebugLevel; +} + +void OSSetDebugLevel(IMG_UINT32 ui32DebugLevel) +{ + gPVRDebugLevel = ui32DebugLevel; +} + +IMG_BOOL OSIsDebugLevel(IMG_UINT32 ui32DebugLevel) +{ + return (gPVRDebugLevel & ui32DebugLevel) != 0; +} + +#else /* defined(PVRSRV_NEED_PVR_DPF) */ + +IMG_UINT32 OSDebugLevel(void) +{ + return 0; +} + +void OSSetDebugLevel(IMG_UINT32 ui32DebugLevel) +{ + PVR_UNREFERENCED_PARAMETER(ui32DebugLevel); +} + +IMG_BOOL OSIsDebugLevel(IMG_UINT32 ui32DebugLevel) +{ + PVR_UNREFERENCED_PARAMETER(ui32DebugLevel); + return IMG_FALSE; +} + +#endif /* defined(PVRSRV_NEED_PVR_DPF) */ + +#define PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN + +/* Message buffer for messages */ +static IMG_CHAR gszBuffer[PVR_MAX_MSG_LEN + 1]; + +/* The lock is used to control access to gszBuffer */ +static DEFINE_SPINLOCK(gsDebugLock); + +/* + * Append a string to a buffer using formatted conversion. + * The function takes a variable number of arguments, pointed + * to by the var args list. + */ +__printf(3, 0) +static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, va_list VArgs) +{ + IMG_UINT32 ui32Used; + IMG_UINT32 ui32Space; + IMG_INT32 i32Len; + + ui32Used = OSStringLength(pszBuf); + BUG_ON(ui32Used >= ui32BufSiz); + ui32Space = ui32BufSiz - ui32Used; + + i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs); + pszBuf[ui32BufSiz - 1] = 0; + + /* Return true if string was truncated */ + return i32Len < 0 || i32Len >= (IMG_INT32)ui32Space; +} + +/*************************************************************************/ /*! +@Function PVRSRVReleasePrintf +@Description To output an important message to the user in release builds +@Input pszFormat The message format string +@Input ... Zero or more arguments for use by the format string +*/ /**************************************************************************/ +void PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) +{ + va_list vaArgs; + unsigned long ulLockFlags = 0; + IMG_CHAR *pszBuf = gszBuffer; + IMG_UINT32 ui32BufSiz = sizeof(gszBuffer); + IMG_INT32 result; + + va_start(vaArgs, pszFormat); + + spin_lock_irqsave(&gsDebugLock, ulLockFlags); + + result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR_K: %u: ", current->pid); + PVR_ASSERT(result>0); + ui32BufSiz -= result; + + if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs)) + { + printk(KERN_INFO "%s (truncated)\n", pszBuf); + } + else + { + printk(KERN_INFO "%s\n", pszBuf); + } + + spin_unlock_irqrestore(&gsDebugLock, ulLockFlags); + va_end(vaArgs); +} + +#if defined(PVRSRV_NEED_PVR_TRACE) + +/*************************************************************************/ /*! +@Function PVRTrace +@Description To output a debug message to the user +@Input pszFormat The message format string +@Input ... Zero or more arguments for use by the format string +*/ /**************************************************************************/ +void PVRSRVTrace(const IMG_CHAR *pszFormat, ...) +{ + va_list VArgs; + unsigned long ulLockFlags = 0; + IMG_CHAR *pszBuf = gszBuffer; + IMG_UINT32 ui32BufSiz = sizeof(gszBuffer); + IMG_INT32 result; + + va_start(VArgs, pszFormat); + + spin_lock_irqsave(&gsDebugLock, ulLockFlags); + + result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR: %u: ", current->pid); + PVR_ASSERT(result>0); + ui32BufSiz -= result; + + if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs)) + { + printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf); + } + else + { + printk(KERN_ERR "%s\n", pszBuf); + } + + spin_unlock_irqrestore(&gsDebugLock, ulLockFlags); + + va_end(VArgs); +} + +#endif /* defined(PVRSRV_NEED_PVR_TRACE) */ + +#if defined(PVRSRV_NEED_PVR_DPF) + +/* + * Append a string to a buffer using formatted conversion. + * The function takes a variable number of arguments, calling + * VBAppend to do the actual work. + */ +__printf(3, 4) +static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, ...) +{ + va_list VArgs; + IMG_BOOL bTrunc; + + va_start (VArgs, pszFormat); + + bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs); + + va_end (VArgs); + + return bTrunc; +} + +/*************************************************************************/ /*! +@Function PVRSRVDebugPrintf +@Description To output a debug message to the user +@Input uDebugLevel The current debug level +@Input pszFile The source file generating the message +@Input uLine The line of the source file +@Input pszFormat The message format string +@Input ... Zero or more arguments for use by the format string +*/ /**************************************************************************/ +void PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel, + const IMG_CHAR *pszFullFileName, + IMG_UINT32 ui32Line, + const IMG_CHAR *pszFormat, + ...) +{ + const IMG_CHAR *pszFileName = pszFullFileName; + IMG_CHAR *pszLeafName; + va_list vaArgs; + unsigned long ulLockFlags = 0; + IMG_CHAR *pszBuf = gszBuffer; + IMG_UINT32 ui32BufSiz = sizeof(gszBuffer); + + if (!(gPVRDebugLevel & ui32DebugLevel)) + { + return; + } + + va_start(vaArgs, pszFormat); + + spin_lock_irqsave(&gsDebugLock, ulLockFlags); + + switch (ui32DebugLevel) + { + case DBGPRIV_FATAL: + { + OSStringLCopy(pszBuf, "PVR_K:(Fatal): ", ui32BufSiz); + PVRSRV_REPORT_ERROR(); + break; + } + case DBGPRIV_ERROR: + { + OSStringLCopy(pszBuf, "PVR_K:(Error): ", ui32BufSiz); + PVRSRV_REPORT_ERROR(); + break; + } + case DBGPRIV_WARNING: + { + OSStringLCopy(pszBuf, "PVR_K:(Warn): ", ui32BufSiz); + break; + } + case DBGPRIV_MESSAGE: + { + OSStringLCopy(pszBuf, "PVR_K:(Mesg): ", ui32BufSiz); + break; + } + case DBGPRIV_VERBOSE: + { + OSStringLCopy(pszBuf, "PVR_K:(Verb): ", ui32BufSiz); + break; + } + case DBGPRIV_DEBUG: + { + OSStringLCopy(pszBuf, "PVR_K:(Debug): ", ui32BufSiz); + break; + } + case DBGPRIV_CALLTRACE: + case DBGPRIV_ALLOC: + case DBGPRIV_BUFFERED: + default: + { + OSStringLCopy(pszBuf, "PVR_K: ", ui32BufSiz); + break; + } + } + + if (current->pid == task_tgid_nr(current)) + { + (void) BAppend(pszBuf, ui32BufSiz, "%5u: ", current->pid); + } + else + { + (void) BAppend(pszBuf, ui32BufSiz, "%5u-%5u: ", task_tgid_nr(current) /* pid id of group*/, current->pid /* task id */); + } + + if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs)) + { + printk(KERN_ERR "%s (truncated)\n", pszBuf); + } + else + { + IMG_BOOL bTruncated = IMG_FALSE; + +#if !defined(__sh__) + pszLeafName = (IMG_CHAR *)strrchr (pszFileName, '/'); + + if (pszLeafName) + { + pszFileName = pszLeafName+1; + } +#endif /* __sh__ */ + +#if defined(DEBUG) + { + static const IMG_CHAR *lastFile; + + if (lastFile == pszFileName) + { + bTruncated = BAppend(pszBuf, ui32BufSiz, " [%u]", ui32Line); + } + else + { + bTruncated = BAppend(pszBuf, ui32BufSiz, " [%s:%u]", pszFileName, ui32Line); + lastFile = pszFileName; + } + } +#else + bTruncated = BAppend(pszBuf, ui32BufSiz, " [%u]", ui32Line); +#endif + + if (bTruncated) + { + printk(KERN_ERR "%s (truncated)\n", pszBuf); + } + else + { + if (ui32DebugLevel & DBGPRIV_BUFFERED) + { + AddToBufferCCB(pszFileName, ui32Line, pszBuf); + } + else + { + printk(KERN_ERR "%s\n", pszBuf); + } + } + } + + spin_unlock_irqrestore(&gsDebugLock, ulLockFlags); + + va_end (vaArgs); +} + +#endif /* PVRSRV_NEED_PVR_DPF */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_debug.h b/drivers/gpu/drm/phytium/octopus/pvr_debug.h new file mode 100644 index 000000000000..0e3065299f73 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_debug.h @@ -0,0 +1,873 @@ +/*************************************************************************/ /*! +@File +@Title PVR Debug Declarations +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Provides debug functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_DEBUG_H +#define PVR_DEBUG_H + +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" + +/*! @cond Doxygen_Suppress */ +#if defined(_MSC_VER) +# define MSC_SUPPRESS_4127 __pragma(warning(suppress:4127)) +#else +# define MSC_SUPPRESS_4127 +#endif +/*! @endcond */ + +#if defined(__cplusplus) +extern "C" { +#endif + +#define PVR_MAX_DEBUG_MESSAGE_LEN (512) /*!< Max length of a Debug Message */ + +/* These are privately used by pvr_debug, use the PVR_DBG_ defines instead */ +#define DBGPRIV_FATAL 0x001UL /*!< Debug-Fatal. Privately used by pvr_debug. */ +#define DBGPRIV_ERROR 0x002UL /*!< Debug-Error. Privately used by pvr_debug. */ +#define DBGPRIV_WARNING 0x004UL /*!< Debug-Warning. Privately used by pvr_debug. */ +#define DBGPRIV_MESSAGE 0x008UL /*!< Debug-Message. Privately used by pvr_debug. */ +#define DBGPRIV_VERBOSE 0x010UL /*!< Debug-Verbose. Privately used by pvr_debug. */ +#define DBGPRIV_CALLTRACE 0x020UL /*!< Debug-CallTrace. Privately used by pvr_debug. */ +#define DBGPRIV_ALLOC 0x040UL /*!< Debug-Alloc. Privately used by pvr_debug. */ +#define DBGPRIV_BUFFERED 0x080UL /*!< Debug-Buffered. Privately used by pvr_debug. */ +#define DBGPRIV_DEBUG 0x100UL /*!< Debug-AdHoc-Debug. Never submitted. Privately used by pvr_debug. */ +#define DBGPRIV_LAST 0x100UL /*!< Always set to highest mask value. Privately used by pvr_debug. */ + +/* Enable DPF logging for locally from some make targets */ +#if defined(PVRSRV_NEED_PVR_DPF_LOCAL) +#undef PVRSRV_NEED_PVR_DPF +#define PVRSRV_NEED_PVR_DPF +#endif + +#if !defined(PVRSRV_NEED_PVR_ASSERT) && defined(DEBUG) +#define PVRSRV_NEED_PVR_ASSERT +#endif + +#if defined(PVRSRV_NEED_PVR_ASSERT) && !defined(PVRSRV_NEED_PVR_DPF) +#define PVRSRV_NEED_PVR_DPF +#endif + +#if !defined(PVRSRV_NEED_PVR_TRACE) && (defined(DEBUG) || defined(TIMING)) +#define PVRSRV_NEED_PVR_TRACE +#endif + +#if !defined(DOXYGEN) +/*************************************************************************/ /* +PVRSRVGetErrorString +Returns a string describing the provided PVRSRV_ERROR code +NB No doxygen comments provided as this function does not require porting + for other operating systems +*/ /**************************************************************************/ +const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError); +#define PVRSRVGETERRORSTRING PVRSRVGetErrorString +#endif + +/* PVR_ASSERT() and PVR_DBG_BREAK handling */ + +#if defined(__KLOCWORK__) +/* A dummy no-return function to be used under Klocwork to mark unreachable + paths instead of abort() in order to avoid MISRA.STDLIB.ABORT issues. */ +__noreturn void klocwork_abort(void); +#endif + +#if defined(PVRSRV_NEED_PVR_ASSERT) || defined(DOXYGEN) + +/* Unfortunately the Klocwork static analysis checker doesn't understand our + * ASSERT macros. Thus it reports lots of false positive. Defining our Assert + * macros in a special way when the code is analysed by Klocwork avoids + * them. + */ +#if defined(__KLOCWORK__) +#define PVR_ASSERT(x) do { if (!(x)) {klocwork_abort();} } while (0) +#else /* ! __KLOCWORKS__ */ + +#if defined(_WIN32) +#define PVR_ASSERT(expr) do \ + { \ + MSC_SUPPRESS_4127 \ + if (unlikely(!(expr))) \ + { \ + PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__,\ + "*** Debug assertion failed!"); \ + __debugbreak(); \ + } \ + MSC_SUPPRESS_4127 \ + } while (0) + +#else + +#if defined(__linux__) && defined(__KERNEL__) +#include +#include + +/* In Linux kernel mode, use WARN_ON() directly. This produces the + * correct filename and line number in the warning message. + */ +#define PVR_ASSERT(EXPR) do \ + { \ + if (unlikely(!(EXPR))) \ + { \ + PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__, \ + "Debug assertion failed!"); \ + WARN_ON(1); \ + } \ + } while (0) + +#else /* defined(__linux__) && defined(__KERNEL__) */ + +/*************************************************************************/ /*! +@Function PVRSRVDebugAssertFail +@Description Indicate to the user that a debug assertion has failed and + prevent the program from continuing. + Invoked from the macro PVR_ASSERT(). +@Input pszFile The name of the source file where the assertion failed +@Input ui32Line The line number of the failed assertion +@Input pszAssertion String describing the assertion +@Return NEVER! +*/ /**************************************************************************/ +IMG_EXPORT void IMG_CALLCONV __noreturn +PVRSRVDebugAssertFail(const IMG_CHAR *pszFile, + IMG_UINT32 ui32Line, + const IMG_CHAR *pszAssertion); + +#define PVR_ASSERT(EXPR) do \ + { \ + if (unlikely(!(EXPR))) \ + { \ + PVRSRVDebugAssertFail(__FILE__, __LINE__, #EXPR); \ + } \ + } while (0) + +#endif /* defined(__linux__) && defined(__KERNEL__) */ +#endif /* defined(_WIN32) */ +#endif /* defined(__KLOCWORK__) */ + +#if defined(__KLOCWORK__) + #define PVR_DBG_BREAK do { klocwork_abort(); } while (0) +#else + #if defined(WIN32) + #define PVR_DBG_BREAK __debugbreak() /*!< Implementation of PVR_DBG_BREAK for (non-WinCE) Win32 */ + #else + #if defined(PVR_DBG_BREAK_ASSERT_FAIL) + /*!< Implementation of PVR_DBG_BREAK that maps onto PVRSRVDebugAssertFail */ + #if defined(_WIN32) + #define PVR_DBG_BREAK DBG_BREAK + #else + #if defined(__linux__) && defined(__KERNEL__) + #define PVR_DBG_BREAK BUG() + #else + #define PVR_DBG_BREAK PVRSRVDebugAssertFail(__FILE__, __LINE__, "PVR_DBG_BREAK") + #endif + #endif + #else + /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */ + #define PVR_DBG_BREAK + #endif + #endif +#endif + + +#else /* defined(PVRSRV_NEED_PVR_ASSERT) */ + /* Unfortunately the Klocwork static analysis checker doesn't understand our + * ASSERT macros. Thus it reports lots of false positive. Defining our Assert + * macros in a special way when the code is analysed by Klocwork avoids + * them. + */ + #if defined(__KLOCWORK__) + #define PVR_ASSERT(EXPR) do { if (!(EXPR)) {klocwork_abort();} } while (0) + #else + #define PVR_ASSERT(EXPR) (void)(EXPR) /*!< Null Implementation of PVR_ASSERT (does nothing) */ + #endif + + #define PVR_DBG_BREAK /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */ + +#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */ + + +/* PVR_DPF() handling */ + +#if defined(PVRSRV_NEED_PVR_DPF) || defined(DOXYGEN) + + /* New logging mechanism */ + #define PVR_DBG_FATAL DBGPRIV_FATAL /*!< Debug level passed to PVRSRVDebugPrintf() for fatal errors. */ + #define PVR_DBG_ERROR DBGPRIV_ERROR /*!< Debug level passed to PVRSRVDebugPrintf() for non-fatal errors. */ + #define PVR_DBG_WARNING DBGPRIV_WARNING /*!< Debug level passed to PVRSRVDebugPrintf() for warnings. */ + #define PVR_DBG_MESSAGE DBGPRIV_MESSAGE /*!< Debug level passed to PVRSRVDebugPrintf() for information only. */ + #define PVR_DBG_VERBOSE DBGPRIV_VERBOSE /*!< Debug level passed to PVRSRVDebugPrintf() for very low-priority debug. */ + #define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE /*!< Debug level passed to PVRSRVDebugPrintf() for function tracing purposes. */ + #define PVR_DBG_ALLOC DBGPRIV_ALLOC /*!< Debug level passed to PVRSRVDebugPrintf() for tracking some of drivers memory operations. */ + #define PVR_DBG_BUFFERED DBGPRIV_BUFFERED /*!< Debug level passed to PVRSRVDebugPrintf() when debug should be written to the debug circular buffer. */ + #define PVR_DBG_DEBUG DBGPRIV_DEBUG /*!< Debug level passed to PVRSRVDebugPrintf() for debug messages. */ + + /* These levels are always on with PVRSRV_NEED_PVR_DPF */ + /*! @cond Doxygen_Suppress */ + #define PVR_DPF_0x001UL(...) PVRSRVDebugPrintf(DBGPRIV_FATAL, __VA_ARGS__) + #define PVR_DPF_0x002UL(...) PVRSRVDebugPrintf(DBGPRIV_ERROR, __VA_ARGS__) + #define PVR_DPF_0x080UL(...) PVRSRVDebugPrintf(DBGPRIV_BUFFERED, __VA_ARGS__) + + /* + * The AdHoc-Debug level is only supported when enabled in the local + * build environment and may need to be used in both debug and release + * builds. An error is generated in the formal build if it is checked in. + */ +#if defined(PVR_DPF_ADHOC_DEBUG_ON) + #define PVR_DPF_0x100UL(...) PVRSRVDebugPrintf(DBGPRIV_DEBUG, __VA_ARGS__) +#else + /* Use an undefined token here to stop compilation dead in the offending module */ + #define PVR_DPF_0x100UL(...) __ERROR__PVR_DBG_DEBUG_is_in_use_but_has_not_been_enabled__Note_Debug_DPF_must_not_be_checked_in__Define_PVR_DPF_ADHOC_DEBUG_ON_for_testing +#endif + + /* Some are compiled out completely in release builds */ +#if defined(DEBUG) || defined(DOXYGEN) + #define PVR_DPF_0x004UL(...) PVRSRVDebugPrintf(DBGPRIV_WARNING, __VA_ARGS__) + #define PVR_DPF_0x008UL(...) PVRSRVDebugPrintf(DBGPRIV_MESSAGE, __VA_ARGS__) + #define PVR_DPF_0x010UL(...) PVRSRVDebugPrintf(DBGPRIV_VERBOSE, __VA_ARGS__) + #define PVR_DPF_0x020UL(...) PVRSRVDebugPrintf(DBGPRIV_CALLTRACE, __VA_ARGS__) + #define PVR_DPF_0x040UL(...) PVRSRVDebugPrintf(DBGPRIV_ALLOC, __VA_ARGS__) +#else + #define PVR_DPF_0x004UL(...) + #define PVR_DPF_0x008UL(...) + #define PVR_DPF_0x010UL(...) + #define PVR_DPF_0x020UL(...) + #define PVR_DPF_0x040UL(...) +#endif + + /* Translate the different log levels to separate macros + * so they can each be compiled out. + */ +#if defined(DEBUG) + #define PVR_DPF_EX(lvl, ...) PVR_DPF_ ## lvl (__FILE__, __LINE__, __VA_ARGS__) +#else + #define PVR_DPF_EX(lvl, ...) PVR_DPF_ ## lvl ("", __LINE__, __VA_ARGS__) +#endif + /*! @endcond */ + + /* Get rid of the double bracketing */ + #define PVR_DPF(x) PVR_DPF_EX x + + #define PVR_LOG_ERROR(_rc, _call) \ + PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)) + + #define PVR_LOG_IF_ERROR(_rc, _call) do \ + { if (unlikely(_rc != PVRSRV_OK)) { \ + PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_WARN_IF_ERROR(_rc, _call) do \ + { if (unlikely(_rc != PVRSRV_OK)) { \ + PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_RETURN_IF_NOMEM(_expr, _call) do \ + { if (unlikely(_expr == NULL)) { \ + PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", _call, __func__)); \ + return PVRSRV_ERROR_OUT_OF_MEMORY; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_GOTO_IF_NOMEM(_expr, _err, _go) do \ + { if (unlikely(_expr == NULL)) { \ + PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", #_expr, __func__)); \ + _err = PVRSRV_ERROR_OUT_OF_MEMORY; \ + goto _go; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_RETURN_IF_ERROR(_rc, _call) do \ + { if (unlikely(_rc != PVRSRV_OK)) { \ + PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + return _rc; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_RETURN_VOID_IF_ERROR(_rc, _call) do \ + { if (unlikely(_rc != PVRSRV_OK)) { \ + PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + return; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_GOTO_IF_ERROR(_rc, _call, _go) do \ + { if (unlikely(_rc != PVRSRV_OK)) { \ + PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + goto _go; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_GOTO_WITH_ERROR(_call, _err, _rc, _go) do \ + { PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ + _err = _rc; \ + goto _go; \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_IF_FALSE(_expr, _msg) do \ + { if (unlikely(!(_expr))) { \ + PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ + } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_RETURN_IF_FALSE(_expr, _msg, _rc) do \ + { if (unlikely(!(_expr))) { \ + PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ + return _rc; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_RETURN_VOID_IF_FALSE(_expr, _msg) do \ + { if (unlikely(!(_expr))) { \ + PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ + return; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_GOTO_IF_FALSE(_expr, _msg, _go) do \ + { if (unlikely(!(_expr))) { \ + PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ + goto _go; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_RETURN_IF_INVALID_PARAM(_expr, _param) do \ + { if (unlikely(!(_expr))) { \ + PVR_DPF((PVR_DBG_ERROR, "%s invalid in %s()", _param, __func__)); \ + return PVRSRV_ERROR_INVALID_PARAMS; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do \ + { if (unlikely(!(_expr))) { \ + PVR_DPF((PVR_DBG_ERROR, "%s invalid in %s()", #_expr, __func__)); \ + _err = PVRSRV_ERROR_INVALID_PARAMS; \ + goto _go; } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_MSG(_lvl, _msg) \ + PVR_DPF((_lvl, ("In %s() "_msg), __func__)) + + #define PVR_LOG_VA(_lvl, _msg, ...) \ + PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)) + + #define PVR_LOG_IF_ERROR_VA(_lvl, _rc, _msg, ...) do \ + { if (unlikely(_rc != PVRSRV_OK)) { \ + PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)); \ + } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_IF_FALSE_VA(_lvl, _expr, _msg, ...) do \ + { if (unlikely(!(_expr))) { \ + PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)); \ + } \ + MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_RETURN_IF_ERROR_VA(_rc, _msg, ...) do \ + { if (unlikely(_rc != PVRSRV_OK)) { \ + PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ + return _rc; \ + } MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_GOTO_IF_ERROR_VA(_rc, _go, _msg, ...) do \ + { if (unlikely(_rc != PVRSRV_OK)) { \ + PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ + goto _go; \ + } MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_RETURN_IF_FALSE_VA(_expr, _rc, _msg, ...) do \ + { if (unlikely(!(_expr))) { \ + PVR_DPF((PVR_DBG_ERROR, ("At %s: "_msg), __func__, __VA_ARGS__)); \ + return _rc; \ + } MSC_SUPPRESS_4127\ + } while (0) + + #define PVR_LOG_GOTO_IF_FALSE_VA(_expr, _go, _msg, ...) do \ + { if (unlikely(!(_expr))) { \ + PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ + goto _go; \ + } MSC_SUPPRESS_4127\ + } while (0) + +#else /* defined(PVRSRV_NEED_PVR_DPF) */ + + #define PVR_DPF(X) /*!< Null Implementation of PhytiumVR Debug Printf (does nothing) */ + + #define PVR_LOG_MSG(_lvl, _msg) + #define PVR_LOG_VA(_lvl, _msg, ...) + #define PVR_LOG_ERROR(_rc, _call) (void)(_rc) + #define PVR_LOG_IF_ERROR(_rc, _call) (void)(_rc) + #define PVR_WARN_IF_ERROR(_rc, _call) (void)(_rc) + + #define PVR_LOG_IF_ERROR_VA(_lvl, _rc, _msg, ...) (void)(_rc) + #define PVR_LOG_IF_FALSE_VA(_lvl, _expr, _msg, ...) (void)(_expr) + + #define PVR_LOG_RETURN_IF_NOMEM(_expr, _call) do { if (unlikely(_expr == NULL)) { return PVRSRV_ERROR_OUT_OF_MEMORY; } MSC_SUPPRESS_4127 } while (0) + #define PVR_LOG_GOTO_IF_NOMEM(_expr, _err, _go) do { if (unlikely(_expr == NULL)) { _err = PVRSRV_ERROR_OUT_OF_MEMORY; goto _go; } MSC_SUPPRESS_4127 } while (0) + + #define PVR_LOG_RETURN_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return (_rc); } MSC_SUPPRESS_4127 } while (0) + #define PVR_LOG_RETURN_IF_ERROR_VA(_rc, _msg, ...) do { if (unlikely(_rc != PVRSRV_OK)) { return (_rc); } MSC_SUPPRESS_4127 } while (0) + #define PVR_LOG_RETURN_VOID_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return; } MSC_SUPPRESS_4127 } while (0) + + #define PVR_LOG_GOTO_IF_ERROR(_rc, _call, _go) do { if (unlikely(_rc != PVRSRV_OK)) { goto _go; } MSC_SUPPRESS_4127 } while (0) + #define PVR_LOG_GOTO_IF_ERROR_VA(_rc, _go, _msg, ...) do { if (unlikely(_rc != PVRSRV_OK)) { goto _go; } MSC_SUPPRESS_4127 } while (0) + #define PVR_LOG_GOTO_WITH_ERROR(_call, _err, _rc, _go) do { _err = _rc; goto _go; MSC_SUPPRESS_4127 } while (0) + + #define PVR_LOG_IF_FALSE(_expr, _msg) (void)(_expr) + #define PVR_LOG_RETURN_IF_FALSE(_expr, _msg, _rc) do { if (unlikely(!(_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while (0) + #define PVR_LOG_RETURN_IF_FALSE_VA(_expr, _rc, _msg, ...) do { if (unlikely(!(_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while (0) + + #define PVR_LOG_RETURN_VOID_IF_FALSE(_expr, _msg) do { if (unlikely(!(_expr))) { return; } MSC_SUPPRESS_4127 } while (0) + #define PVR_LOG_GOTO_IF_FALSE(_expr, _msg, _go) do { if (unlikely(!(_expr))) { goto _go; } MSC_SUPPRESS_4127 } while (0) + #define PVR_LOG_GOTO_IF_FALSE_VA(_expr, _go, _msg, ...) do { if (unlikely(!(_expr))) { goto _go; } MSC_SUPPRESS_4127 } while (0) + + #define PVR_LOG_RETURN_IF_INVALID_PARAM(_expr, _param) do { if (unlikely(!(_expr))) { return PVRSRV_ERROR_INVALID_PARAMS; } MSC_SUPPRESS_4127 } while (0) + #define PVR_LOG_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do { if (unlikely(!(_expr))) { _err = PVRSRV_ERROR_INVALID_PARAMS; goto _go; } MSC_SUPPRESS_4127 } while (0) + + #undef PVR_DPF_FUNCTION_TRACE_ON + +#endif /* defined(PVRSRV_NEED_PVR_DPF) */ + +/*************************************************************************/ /*! +@Function PVRSRVDebugPrintf +@Description Output a debug message to the user, using an OS-specific + method, to a log or console which can be read by developers + Invoked from the macro PVR_DPF(). +@Input ui32DebugLevel The debug level of the message. This can + be used to restrict the output of debug + messages based on their severity. + If this is PVR_DBG_BUFFERED, the message + should be written into a debug circular + buffer instead of being output immediately + (useful when performance would otherwise + be adversely affected). + The debug circular buffer shall only be + output when PVRSRVDebugPrintfDumpCCB() is + called. +@Input pszFileName The source file containing the code that is + generating the message +@Input ui32Line The line number in the source file +@Input pszFormat The formatted message string +@Input ... Zero or more arguments for use by the + formatted string +@Return None +*/ /**************************************************************************/ +IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel, + const IMG_CHAR *pszFileName, + IMG_UINT32 ui32Line, + const IMG_CHAR *pszFormat, + ...) __printf(4, 5); + +/*************************************************************************/ /*! +@Function PVRSRVDebugPrintfDumpCCB +@Description When PVRSRVDebugPrintf() is called with the ui32DebugLevel + specified as DBGPRIV_BUFFERED, the debug shall be written to + the debug circular buffer instead of being output immediately. + (This could be used to obtain debug without incurring a + performance hit by printing it at that moment). + This function shall dump the contents of that debug circular + buffer to be output in an OS-specific method to a log or + console which can be read by developers. +@Return None +*/ /**************************************************************************/ +IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintfDumpCCB(void); + +#define PVR_DPF_FUNC__(lvl, message, ...) PVR_DPF((lvl, "%s: " message, __func__, ##__VA_ARGS__)) +#define PVR_DPF_FUNC(x) PVR_DPF_FUNC__ x + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_RETURN_IF_ERROR macro. + */ +#define PVR_RETURN_IF_ERROR(_rc) do \ + { if (unlikely(_rc != PVRSRV_OK)) { \ + return _rc; } \ + MSC_SUPPRESS_4127 \ + } while (0) + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_RETURN_IF_FALSE macro. + */ +#define PVR_RETURN_IF_FALSE(_expr, _rc) do \ + { if (unlikely(!(_expr))) { \ + return _rc; } \ + MSC_SUPPRESS_4127 \ + } while (0) + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_RETURN_IF_INVALID_PARAM macro. + */ +#define PVR_RETURN_IF_INVALID_PARAM(_expr) do \ + { if (unlikely(!(_expr))) { \ + return PVRSRV_ERROR_INVALID_PARAMS; } \ + MSC_SUPPRESS_4127 \ + } while (0) + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_RETURN_IF_NOMEM macro. + */ +#define PVR_RETURN_IF_NOMEM(_expr) do \ + { if (unlikely(!(_expr))) { \ + return PVRSRV_ERROR_OUT_OF_MEMORY; } \ + MSC_SUPPRESS_4127 \ + } while (0) + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_GOTO_IF_NOMEM macro. + */ +#define PVR_GOTO_IF_NOMEM(_expr, _err, _go) do \ + { if (unlikely(_expr == NULL)) { \ + _err = PVRSRV_ERROR_OUT_OF_MEMORY; \ + goto _go; } \ + MSC_SUPPRESS_4127 \ + } while (0) + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_GOTO_IF_INVALID_PARAM macro. + */ +#define PVR_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do \ + { if (unlikely(!(_expr))) { \ + _err = PVRSRV_ERROR_INVALID_PARAMS; \ + goto _go; } \ + MSC_SUPPRESS_4127 \ + } while (0) + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_GOTO_IF_FALSE macro. + */ +#define PVR_GOTO_IF_FALSE(_expr, _go) do \ + { if (unlikely(!(_expr))) { \ + goto _go; } \ + MSC_SUPPRESS_4127 \ + } while (0) + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_GOTO_IF_ERROR macro. + */ +#define PVR_GOTO_IF_ERROR(_rc, _go) do \ + { if (unlikely(_rc != PVRSRV_OK)) { \ + goto _go; } \ + MSC_SUPPRESS_4127\ + } while (0) + +/* Note: Use only when a log message due to the error absolutely should not + * be printed. Otherwise use PVR_LOG_GOTO_WITH_ERROR macro. + */ +#define PVR_GOTO_WITH_ERROR(_err, _rc, _go) do \ + { _err = _rc; goto _go; \ + MSC_SUPPRESS_4127 \ + } while (0) + +/*! @cond Doxygen_Suppress */ +#if defined(PVR_DPF_FUNCTION_TRACE_ON) + + #define PVR_DPF_ENTERED \ + PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered", __func__, __LINE__)) + + #define PVR_DPF_ENTERED1(p1) \ + PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered (0x%lx)", __func__, __LINE__, ((unsigned long)p1))) + + #define PVR_DPF_RETURN_RC(a) \ + do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d", __func__, __LINE__, (_r))); return (_r); MSC_SUPPRESS_4127 } while (0) + + #define PVR_DPF_RETURN_RC1(a,p1) \ + do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d (0x%lx)", __func__, __LINE__, (_r), ((unsigned long)p1))); return (_r); MSC_SUPPRESS_4127 } while (0) + + #define PVR_DPF_RETURN_VAL(a) \ + do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned with value", __func__, __LINE__)); return (a); MSC_SUPPRESS_4127 } while (0) + + #define PVR_DPF_RETURN_OK \ + do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned ok", __func__, __LINE__)); return PVRSRV_OK; MSC_SUPPRESS_4127 } while (0) + + #define PVR_DPF_RETURN \ + do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned", __func__, __LINE__)); return; MSC_SUPPRESS_4127 } while (0) + + #if !defined(DEBUG) + #error PVR DPF Function trace enabled in release build, rectify + #endif + +#else /* defined(PVR_DPF_FUNCTION_TRACE_ON) */ + + #define PVR_DPF_ENTERED + #define PVR_DPF_ENTERED1(p1) + #define PVR_DPF_RETURN_RC(a) return (a) + #define PVR_DPF_RETURN_RC1(a,p1) return (a) + #define PVR_DPF_RETURN_VAL(a) return (a) + #define PVR_DPF_RETURN_OK return PVRSRV_OK + #define PVR_DPF_RETURN return + +#endif /* defined(PVR_DPF_FUNCTION_TRACE_ON) */ +/*! @endcond */ + +#if defined(__KERNEL__) || defined(DOXYGEN) || defined(__QNXNTO__) +/*Use PVR_DPF() unless message is necessary in release build */ +#define PVR_LOG(X) PVRSRVReleasePrintf X + +/*************************************************************************/ /*! +@Function PVRSRVReleasePrintf +@Description Output an important message, using an OS-specific method, + to the Server log or console which will always be output in + both release and debug builds. + Invoked from the macro PVR_LOG(). Used in Services Server only. +@Input pszFormat The message format string +@Input ... Zero or more arguments for use by the format string +@Return None +*/ /**************************************************************************/ +void IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) __printf(1, 2); +#endif + +/* PVR_TRACE() handling */ + +#if defined(PVRSRV_NEED_PVR_TRACE) || defined(DOXYGEN) + + #define PVR_TRACE(X) PVRSRVTrace X /*!< PhytiumVR Debug Trace Macro */ + /* Empty string implementation that is -O0 build friendly */ + #define PVR_TRACE_EMPTY_LINE() PVR_TRACE(("%s", "")) + +/*************************************************************************/ /*! +@Function PVRTrace +@Description Output a debug message to the user + Invoked from the macro PVR_TRACE(). +@Input pszFormat The message format string +@Input ... Zero or more arguments for use by the format string +*/ /**************************************************************************/ +IMG_EXPORT void IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... ) + __printf(1, 2); + +#else /* defined(PVRSRV_NEED_PVR_TRACE) */ + /*! Null Implementation of PhytiumVR Debug Trace Macro (does nothing) */ + #define PVR_TRACE(X) + +#endif /* defined(PVRSRV_NEED_PVR_TRACE) */ + + +#if defined(PVRSRV_NEED_PVR_ASSERT) +#ifdef INLINE_IS_PRAGMA +#pragma inline(TRUNCATE_64BITS_TO_32BITS) +#endif + INLINE static IMG_UINT32 TRUNCATE_64BITS_TO_32BITS(IMG_UINT64 uiInput) + { + IMG_UINT32 uiTruncated; + + uiTruncated = (IMG_UINT32)uiInput; + PVR_ASSERT(uiInput == uiTruncated); + return uiTruncated; + } + + +#ifdef INLINE_IS_PRAGMA +#pragma inline(TRUNCATE_64BITS_TO_SIZE_T) +#endif + INLINE static size_t TRUNCATE_64BITS_TO_SIZE_T(IMG_UINT64 uiInput) + { + size_t uiTruncated; + + uiTruncated = (size_t)uiInput; + PVR_ASSERT(uiInput == uiTruncated); + return uiTruncated; + } + + +#ifdef INLINE_IS_PRAGMA +#pragma inline(TRUNCATE_SIZE_T_TO_32BITS) +#endif + INLINE static IMG_UINT32 TRUNCATE_SIZE_T_TO_32BITS(size_t uiInput) + { + IMG_UINT32 uiTruncated; + + uiTruncated = (IMG_UINT32)uiInput; + PVR_ASSERT(uiInput == uiTruncated); + return uiTruncated; + } + + +#else /* defined(PVRSRV_NEED_PVR_ASSERT) */ + #define TRUNCATE_64BITS_TO_32BITS(expr) ((IMG_UINT32)(expr)) + #define TRUNCATE_64BITS_TO_SIZE_T(expr) ((size_t)(expr)) + #define TRUNCATE_SIZE_T_TO_32BITS(expr) ((IMG_UINT32)(expr)) +#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */ + +/*! @cond Doxygen_Suppress */ +/* Macros used to trace calls */ +#if defined(DEBUG) + #define PVR_DBG_FILELINE , (__FILE__), (__LINE__) + #define PVR_DBG_FILELINE_PARAM , const IMG_CHAR *pszaFile, IMG_UINT32 ui32Line + #define PVR_DBG_FILELINE_ARG , pszaFile, ui32Line + #define PVR_DBG_FILELINE_FMT " %s:%u" + #define PVR_DBG_FILELINE_UNREF() do { PVR_UNREFERENCED_PARAMETER(pszaFile); \ + PVR_UNREFERENCED_PARAMETER(ui32Line); } while (0) +#else + #define PVR_DBG_FILELINE + #define PVR_DBG_FILELINE_PARAM + #define PVR_DBG_FILELINE_ARG + #define PVR_DBG_FILELINE_FMT + #define PVR_DBG_FILELINE_UNREF() +#endif +/*! @endcond */ + +#if defined(__cplusplus) +} +#endif + +/*! + @def PVR_ASSERT + @brief Aborts the program if assertion fails. + + The macro will be defined only when PVRSRV_NEED_PVR_ASSERT macro is + enabled. It's ignored otherwise. + + @def PVR_DPF + @brief PhytiumVR Debug Printf logging macro used throughout the driver. + + The macro allows to print logging messages to appropriate log. The + destination log is based on the component (user space / kernel space) and + operating system (Linux, Android, etc.). + + The macro also supports severity levels that allow to turn on/off messages + based on their importance. + + This macro will print messages with severity level higher that error only + if PVRSRV_NEED_PVR_DPF macro is defined. + + @def PVR_LOG_ERROR + @brief Logs error. + + @def PVR_LOG_IF_ERROR + @brief Logs error if not PVRSRV_OK. + + @def PVR_WARN_IF_ERROR + @brief Logs warning if not PVRSRV_OK. + + @def PVR_LOG_RETURN_IF_NOMEM + @brief Logs error if expression is NULL and returns PVRSRV_ERROR_OUT_OF_MEMORY. + + @def PVR_LOG_GOTO_IF_NOMEM + @brief Logs error if expression is NULL and jumps to given label. + + @def PVR_LOG_RETURN_IF_ERROR + @brief Logs error if not PVRSRV_OK and returns the error. + + @def PVR_LOG_RETURN_VOID_IF_ERROR + @brief Logs error if not PVRSRV_OK and returns (used in function that return void). + + @def PVR_LOG_GOTO_IF_ERROR + @brief Logs error if not PVRSRV_OK and jumps to label. + + @def PVR_LOG_GOTO_WITH_ERROR + @brief Logs error, goes to a label and sets the error code. + + @def PVR_LOG_IF_FALSE + @brief Prints error message if expression is false. + + @def PVR_LOG_RETURN_IF_FALSE + @brief Prints error message if expression is false and returns given error. + + @def PVR_LOG_RETURN_VOID_IF_FALSE + @brief Prints error message if expression is false and returns (used in function that return void). + + @def PVR_LOG_GOTO_IF_FALSE + @brief Prints error message if expression is false and jumps to label. + + @def PVR_LOG_RETURN_IF_INVALID_PARAM + @brief Prints error message if expression is false and returns PVRSRV_ERROR_INVALID_PARAMS. + + @def PVR_LOG_GOTO_IF_INVALID_PARAM + @brief Prints error message if expression is false and jumps to label. + + @def PVR_RETURN_IF_ERROR + @brief Returns passed error code if it's different than PVRSRV_OK; + + @def PVR_RETURN_IF_FALSE + @brief Returns passed error code if expression is false. + + @def PVR_RETURN_IF_INVALID_PARAM + @brief Returns PVRSRV_ERROR_INVALID_PARAMS if expression is false. + + @def PVR_RETURN_IF_NOMEM + @brief Returns PVRSRV_ERROR_OUT_OF_MEMORY if expression is NULL. + + @def PVR_GOTO_IF_NOMEM + @brief Goes to a label if expression is NULL. + + @def PVR_GOTO_IF_INVALID_PARAM + @brief Goes to a label if expression is false. + + @def PVR_GOTO_IF_FALSE + @def Goes to a label if expression is false. + + @def PVR_GOTO_IF_ERROR + @brief Goes to a label if the error code is different than PVRSRV_OK; + + @def PVR_GOTO_WITH_ERROR + @brief Goes to a label and sets the error code. + + @def PVR_LOG + @brief Prints message to a log unconditionally. + + This macro will print messages only if PVRSRV_NEED_PVR_LOG macro is defined. + + @def PVR_TRACE_EMPTY_LINE + @brief Prints empty line to a log (PVRSRV_NEED_PVR_LOG must be defined). + + @def TRUNCATE_64BITS_TO_32BITS + @brief Truncates 64 bit value to 32 bit value (with possible precision loss). + + @def TRUNCATE_64BITS_TO_SIZE_T + @brief Truncates 64 bit value to size_t value (with possible precision loss). + + @def TRUNCATE_SIZE_T_TO_32BITS + @brief Truncates size_t value to 32 bit value (with possible precision loss). + */ + +#endif /* PVR_DEBUG_H */ + +/****************************************************************************** + End of file (pvr_debug.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_debugfs.c b/drivers/gpu/drm/phytium/octopus/pvr_debugfs.c new file mode 100644 index 000000000000..0a4151729be0 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_debugfs.c @@ -0,0 +1,609 @@ +/*************************************************************************/ /*! +@File +@Title DebugFS implementation of Debug Info interface. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements osdi_impl.h API to provide access to driver's + debug data via DebugFS. + + Note about locking in DebugFS module. + + Access to DebugFS is protected against the race where any + file could be removed while being accessed or accessed while + being removed. Any calls to debugfs_remove() will block + until all operations are finished. + + See implementation of proxy file operations (FULL_PROXY_FUNC) + and implementation of debugfs_file_[get|put]() in + fs/debugfs/file.c in Linux kernel sources for more details. + + Not about locking for sequential files. + + The seq_file objects have a mutex that protects access + to all of the file operations hence all of the sequential + *read* operations are protected. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#include + +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvr_debugfs.h" +#include "osfunc.h" +#include "allocmem.h" +#include "pvr_bridge_k.h" +#include "pvr_uaccess.h" +#include "osdi_impl.h" + +#define _DRIVER_THREAD_ENTER() \ + do { \ + PVRSRV_ERROR eLocalError = PVRSRVDriverThreadEnter(); \ + if (eLocalError != PVRSRV_OK) \ + { \ + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVDriverThreadEnter failed: %s", \ + __func__, PVRSRVGetErrorString(eLocalError))); \ + return OSPVRSRVToNativeError(eLocalError); \ + } \ + } while (0) + +#define _DRIVER_THREAD_EXIT() \ + PVRSRVDriverThreadExit() + +#define PVR_DEBUGFS_PVR_DPF_LEVEL PVR_DBG_ERROR + +typedef struct DFS_DIR +{ + struct dentry *psDirEntry; + struct DFS_DIR *psParentDir; +} DFS_DIR; + +typedef struct DFS_ENTRY +{ + OSDI_IMPL_ENTRY sImplEntry; + DI_ITERATOR_CB sIterCb; +} DFS_ENTRY; + +typedef struct DFS_FILE +{ + struct dentry *psFileEntry; + struct DFS_DIR *psParentDir; + const struct seq_operations *psSeqOps; + struct DFS_ENTRY sEntry; + DI_ENTRY_TYPE eType; +} DFS_FILE; + +/* ----- native callbacks interface ----------------------------------------- */ + +static void _VPrintf(void *pvNativeHandle, const IMG_CHAR *pszFmt, + va_list pArgs) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) + seq_vprintf(pvNativeHandle, pszFmt, pArgs); +#else + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; + + vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFmt, pArgs); + seq_printf(pvNativeHandle, "%s", szBuffer); +#endif +} + +static void _Puts(void *pvNativeHandle, const IMG_CHAR *pszStr) +{ + seq_puts(pvNativeHandle, pszStr); +} + +static IMG_BOOL _HasOverflowed(void *pvNativeHandle) +{ + struct seq_file *psSeqFile = pvNativeHandle; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) + return seq_has_overflowed(psSeqFile); +#else + return psSeqFile->count == psSeqFile->size; +#endif +} + +static OSDI_IMPL_ENTRY_CB _g_sEntryCallbacks = { + .pfnVPrintf = _VPrintf, + .pfnPuts = _Puts, + .pfnHasOverflowed = _HasOverflowed, +}; + +/* ----- sequential file operations ----------------------------------------- */ + +static void *_Start(struct seq_file *psSeqFile, loff_t *puiPos) +{ + DFS_ENTRY *psEntry = psSeqFile->private; + + void *pvRet = psEntry->sIterCb.pfnStart(&psEntry->sImplEntry, puiPos); + + if (pvRet == DI_START_TOKEN) + { + return SEQ_START_TOKEN; + } + + return pvRet; +} + +static void _Stop(struct seq_file *psSeqFile, void *pvPriv) +{ + DFS_ENTRY *psEntry = psSeqFile->private; + + psEntry->sIterCb.pfnStop(&psEntry->sImplEntry, pvPriv); +} + +static void *_Next(struct seq_file *psSeqFile, void *pvPriv, loff_t *puiPos) +{ + DFS_ENTRY *psEntry = psSeqFile->private; + + return psEntry->sIterCb.pfnNext(&psEntry->sImplEntry, pvPriv, puiPos); +} + +static int _Show(struct seq_file *psSeqFile, void *pvPriv) +{ + DFS_ENTRY *psEntry = psSeqFile->private; + + if (pvPriv == SEQ_START_TOKEN) + { + pvPriv = DI_START_TOKEN; + } + + return psEntry->sIterCb.pfnShow(&psEntry->sImplEntry, pvPriv); +} + +static struct seq_operations _g_sSeqOps = { + .start = _Start, + .stop = _Stop, + .next = _Next, + .show = _Show +}; + +/* ----- file operations ---------------------------------------------------- */ + +static int _Open(struct inode *psINode, struct file *psFile) +{ + DFS_FILE *psDFSFile; + int iRes; + + PVR_LOG_RETURN_IF_FALSE(psINode != NULL && psINode->i_private != NULL, + "psDFSFile is NULL", -EIO); + + _DRIVER_THREAD_ENTER(); + + psDFSFile = psINode->i_private; + + if (psDFSFile->sEntry.sIterCb.pfnStart != NULL) + { + iRes = seq_open(psFile, psDFSFile->psSeqOps); + } + else + { + /* private data is NULL as it's going to be set below */ + iRes = single_open(psFile, _Show, NULL); + } + + if (iRes == 0) + { + struct seq_file *psSeqFile = psFile->private_data; + + DFS_ENTRY *psEntry = OSAllocMem(sizeof(*psEntry)); + if (psEntry == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem() failed", __func__)); + iRes = -ENOMEM; + goto return_; + } + + *psEntry = psDFSFile->sEntry; + psSeqFile->private = psEntry; + psEntry->sImplEntry.pvNative = psSeqFile; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to seq_open psFile, returning %d", + __func__, iRes)); + } + +return_: + _DRIVER_THREAD_EXIT(); + + return iRes; +} + +static int _Close(struct inode *psINode, struct file *psFile) +{ + DFS_FILE *psDFSFile = psINode->i_private; + DFS_ENTRY *psEntry; + int iRes; + + PVR_LOG_RETURN_IF_FALSE(psDFSFile != NULL, "psDFSFile is NULL", + -EIO); + + _DRIVER_THREAD_ENTER(); + + /* save pointer to DFS_ENTRY */ + psEntry = ((struct seq_file *) psFile->private_data)->private; + + if (psDFSFile->sEntry.sIterCb.pfnStart != NULL) + { + iRes = seq_release(psINode, psFile); + } + else + { + iRes = single_release(psINode, psFile); + } + + /* free DFS_ENTRY allocated in _Open */ + OSFreeMem(psEntry); + + /* Validation check as seq_release (and single_release which calls it) + * never fail */ + if (iRes != 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release psFile, returning %d", + __func__, iRes)); + } + + _DRIVER_THREAD_EXIT(); + + return iRes; +} + +static ssize_t _Read(struct file *psFile, char __user *pcBuffer, + size_t uiCount, loff_t *puiPos) +{ + DFS_FILE *psDFSFile = psFile->f_path.dentry->d_inode->i_private; + ssize_t iRes = -1; + + _DRIVER_THREAD_ENTER(); + + if (psDFSFile->eType == DI_ENTRY_TYPE_GENERIC) + { + iRes = seq_read(psFile, pcBuffer, uiCount, puiPos); + if (iRes < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to read from file, pfnRead() " + "returned %zd", __func__, iRes)); + goto return_; + } + } + else if (psDFSFile->eType == DI_ENTRY_TYPE_RANDOM_ACCESS) + { + DFS_ENTRY *psEntry = &psDFSFile->sEntry; + IMG_UINT64 ui64Count = uiCount; + + IMG_CHAR *pcLocalBuffer = OSAllocMem(uiCount); + PVR_GOTO_IF_FALSE(pcLocalBuffer != NULL, return_); + + iRes = psEntry->sIterCb.pfnRead(pcLocalBuffer, ui64Count, puiPos, + psEntry->sImplEntry.pvPrivData); + if (iRes < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to read from file, pfnRead() " + "returned %zd", __func__, iRes)); + OSFreeMem(pcLocalBuffer); + goto return_; + } + + if (pvr_copy_to_user(pcBuffer, pcLocalBuffer, iRes) != 0) + { + iRes = -1; + } + + OSFreeMem(pcLocalBuffer); + } + +return_: + _DRIVER_THREAD_EXIT(); + + return iRes; +} + +static loff_t _LSeek(struct file *psFile, loff_t iOffset, int iOrigin) +{ + DFS_FILE *psDFSFile = psFile->f_path.dentry->d_inode->i_private; + loff_t iRes = -1; + + _DRIVER_THREAD_ENTER(); + + if (psDFSFile->eType == DI_ENTRY_TYPE_GENERIC) + { + iRes = seq_lseek(psFile, iOffset, iOrigin); + if (iRes < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to set file position to offset " + "%lld, pfnSeek() returned %lld", __func__, + iOffset, iRes)); + goto return_; + } + } + else if (psDFSFile->eType == DI_ENTRY_TYPE_RANDOM_ACCESS) + { + DFS_ENTRY *psEntry = &psDFSFile->sEntry; + IMG_UINT64 ui64Pos; + + switch (iOrigin) + { + case SEEK_SET: + ui64Pos = psFile->f_pos + iOffset; + break; + case SEEK_CUR: + ui64Pos = iOffset; + break; + case SEEK_END: + /* not supported as we don't know the file size here */ + /* fall through */ + default: + return -1; + } + + /* only pass the absolute position to the callback, it's up to the + * implementer to determine if the position is valid */ + + iRes = psEntry->sIterCb.pfnSeek(ui64Pos, + psEntry->sImplEntry.pvPrivData); + if (iRes < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to set file position to offset " + "%lld, pfnSeek() returned %lld", __func__, + iOffset, iRes)); + goto return_; + } + + psFile->f_pos = ui64Pos; + } + +return_: + _DRIVER_THREAD_EXIT(); + + return iRes; +} + +static ssize_t _Write(struct file *psFile, const char __user *pszBuffer, + size_t uiCount, loff_t *puiPos) +{ + struct inode *psINode = psFile->f_path.dentry->d_inode; + DFS_FILE *psDFSFile = psINode->i_private; + DI_ITERATOR_CB *psIter = &psDFSFile->sEntry.sIterCb; + IMG_CHAR *pcLocalBuffer; + IMG_UINT64 ui64Count = uiCount + 1, ui64Pos = *puiPos; + IMG_INT64 i64Res = -EIO; + + PVR_LOG_RETURN_IF_FALSE(psDFSFile != NULL, "psDFSFile is NULL", + -EIO); + PVR_LOG_RETURN_IF_FALSE(psIter->pfnWrite != NULL, "pfnWrite is NULL", + -EIO); + + _DRIVER_THREAD_ENTER(); + + /* allocate buffer with one additional byte fore NUL character */ + pcLocalBuffer = OSAllocMem(ui64Count); + PVR_LOG_GOTO_IF_FALSE(pcLocalBuffer != NULL, "OSAllocMem() failed", + return_); + + i64Res = pvr_copy_from_user(pcLocalBuffer, pszBuffer, uiCount); + PVR_LOG_GOTO_IF_FALSE(i64Res == 0, "pvr_copy_from_user() failed", + free_local_buffer_); + + /* ensure that the framework user gets a NUL terminated buffer */ + pcLocalBuffer[ui64Count - 1] = '\0'; + + i64Res = psIter->pfnWrite(pcLocalBuffer, ui64Count, &ui64Pos, + psDFSFile->sEntry.sImplEntry.pvPrivData); + PVR_LOG_GOTO_IF_FALSE(i64Res >= 0, "pfnWrite failed", free_local_buffer_); + + *puiPos = ui64Pos; + +free_local_buffer_: + OSFreeMem(pcLocalBuffer); + +return_: + _DRIVER_THREAD_EXIT(); + + return i64Res; +} + +static const struct file_operations _g_psFileOpsGen = { + .owner = THIS_MODULE, + .open = _Open, + .release = _Close, + .read = _Read, + .llseek = _LSeek, + .write = _Write, +}; + +static const struct file_operations _g_psFileOpsRndAcc = { + .owner = THIS_MODULE, + .read = _Read, + .llseek = _LSeek, + .write = _Write, +}; + +/* ----- DI implementation interface ---------------------------------------- */ + +static PVRSRV_ERROR _Init(void) +{ + return PVRSRV_OK; +} + +static void _DeInit(void) +{ +} + +static PVRSRV_ERROR _CreateFile(const IMG_CHAR *pszName, + DI_ENTRY_TYPE eType, + const DI_ITERATOR_CB *psIterCb, + void *pvPrivData, + void *pvParentDir, + void **pvFile) +{ + DFS_DIR *psParentDir = pvParentDir; + DFS_FILE *psFile; + umode_t uiMode = S_IFREG; + struct dentry *psEntry; + const struct file_operations *psFileOps = NULL; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pvFile != NULL, "pvFile"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pvParentDir != NULL, "pvParentDir"); + + switch (eType) + { + case DI_ENTRY_TYPE_GENERIC: + psFileOps = &_g_psFileOpsGen; + break; + case DI_ENTRY_TYPE_RANDOM_ACCESS: + psFileOps = &_g_psFileOpsRndAcc; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "eType invalid in %s()", __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto return_; + } + + psFile = OSAllocMem(sizeof(*psFile)); + PVR_LOG_GOTO_IF_NOMEM(psFile, eError, return_); + + uiMode |= psIterCb->pfnShow != NULL || psIterCb->pfnRead != NULL ? + S_IRUGO : 0; + uiMode |= psIterCb->pfnWrite != NULL ? S_IWUSR : 0; + + psEntry = debugfs_create_file(pszName, uiMode, psParentDir->psDirEntry, + psFile, psFileOps); + if (IS_ERR_OR_NULL(psEntry)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Cannot create debugfs '%s' file", + __func__, pszName)); + + eError = psEntry == NULL ? + PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_DEVICE; + goto free_file_; + } + + psFile->eType = eType; + psFile->psSeqOps = &_g_sSeqOps; + psFile->sEntry.sIterCb = *psIterCb; + psFile->sEntry.sImplEntry.pvPrivData = pvPrivData; + psFile->sEntry.sImplEntry.pvNative = NULL; + psFile->sEntry.sImplEntry.psCb = &_g_sEntryCallbacks; + psFile->psParentDir = psParentDir; + psFile->psFileEntry = psEntry; + + *pvFile = psFile; + + return PVRSRV_OK; + +free_file_: + OSFreeMem(psFile); + +return_: + return eError; +} + +static void _DestroyFile(void *pvFile) +{ + DFS_FILE *psFile = pvFile; + + PVR_ASSERT(psFile != NULL); + + psFile->psFileEntry->d_inode->i_private = NULL; + + debugfs_remove(psFile->psFileEntry); + OSFreeMem(psFile); +} + +static PVRSRV_ERROR _CreateDir(const IMG_CHAR *pszName, + void *pvParentDir, + void **ppvDir) +{ + DFS_DIR *psNewDir; + struct dentry *psDirEntry, *psParentDir = NULL; + + PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); + PVR_LOG_RETURN_IF_INVALID_PARAM(ppvDir != NULL, "ppvDir"); + + psNewDir = OSAllocMem(sizeof(*psNewDir)); + PVR_LOG_RETURN_IF_NOMEM(psNewDir, "OSAllocMem"); + + psNewDir->psParentDir = pvParentDir; + + if (pvParentDir != NULL) + { + psParentDir = psNewDir->psParentDir->psDirEntry; + } + + psDirEntry = debugfs_create_dir(pszName, psParentDir); + if (IS_ERR_OR_NULL(psDirEntry)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Cannot create '%s' debugfs directory", + __func__, pszName)); + OSFreeMem(psNewDir); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psNewDir->psDirEntry = psDirEntry; + *ppvDir = psNewDir; + + return PVRSRV_OK; +} + +static void _DestroyDir(void *pvDir) +{ + DFS_DIR *psDir = pvDir; + + PVR_ASSERT(psDir != NULL); + + debugfs_remove(psDir->psDirEntry); + OSFreeMem(psDir); +} + +PVRSRV_ERROR PVRDebugFsRegister(void) +{ + OSDI_IMPL_CB sImplCb = { + .pfnInit = _Init, + .pfnDeInit = _DeInit, + .pfnCreateEntry = _CreateFile, + .pfnDestroyEntry = _DestroyFile, + .pfnCreateGroup = _CreateDir, + .pfnDestroyGroup = _DestroyDir + }; + + return DIRegisterImplementation("debugfs", &sImplCb); +} diff --git a/drivers/gpu/drm/phytium/octopus/pvr_debugfs.h b/drivers/gpu/drm/phytium/octopus/pvr_debugfs.h new file mode 100644 index 000000000000..2443170decfb --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_debugfs.h @@ -0,0 +1,50 @@ +/*************************************************************************/ /*! +@File +@Title DebugFS implementation of Debug Info interface. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_DEBUGFS_H +#define PVR_DEBUGFS_H + +#include "pvrsrv_error.h" + +PVRSRV_ERROR PVRDebugFsRegister(void); + +#endif /* PVR_DEBUGFS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_dicommon.h b/drivers/gpu/drm/phytium/octopus/pvr_dicommon.h new file mode 100644 index 000000000000..0157298cd2ed --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_dicommon.h @@ -0,0 +1,59 @@ +/*************************************************************************/ /*! +@File +@Title Services Debug Information (DI) common types and definitions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Debug Information (DI) common types and definitions included + in both user mode and kernel mode source. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_DICOMMON_H +#define PVR_DICOMMON_H + +#if defined(__cplusplus) +extern "C" { +#endif + +/*! Maximum DI entry path length including the null byte. */ +#define DI_IMPL_BRG_PATH_LEN 64 + +#if defined(__cplusplus) +} +#endif + +#endif /* PVR_DICOMMON_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_dma_resv.h b/drivers/gpu/drm/phytium/octopus/pvr_dma_resv.h new file mode 100644 index 000000000000..dcc51d7f4a63 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_dma_resv.h @@ -0,0 +1,70 @@ +/*************************************************************************/ /*! +@Title Kernel reservation object compatibility header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Per-version macros to allow code to seamlessly use older kernel +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __PVR_DMA_RESV_H__ +#define __PVR_DMA_RESV_H__ + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) +#include +#else +#include + +/* Reservation object types */ +#define dma_resv reservation_object +#define dma_resv_list reservation_object_list + +/* Reservation object functions */ +#define dma_resv_add_excl_fence reservation_object_add_excl_fence +#define dma_resv_add_shared_fence reservation_object_add_shared_fence +#define dma_resv_fini reservation_object_fini +#define dma_resv_get_excl reservation_object_get_excl +#define dma_resv_get_list reservation_object_get_list +#define dma_resv_held reservation_object_held +#define dma_resv_init reservation_object_init +#define dma_resv_reserve_shared reservation_object_reserve_shared +#define dma_resv_test_signaled_rcu reservation_object_test_signaled_rcu +#define dma_resv_wait_timeout_rcu reservation_object_wait_timeout_rcu +#endif + +#endif /* __PVR_DMA_RESV_H__ */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_drm.c b/drivers/gpu/drm/phytium/octopus/pvr_drm.c new file mode 100644 index 000000000000..e6c29d325de5 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_drm.c @@ -0,0 +1,319 @@ +/* + * @File + * @Title PhytiumVR DRM driver + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#include +#include +#include +#else +#include /* include before drm_crtc.h for kernels older than 3.9 */ +#endif + +#include +#include +#include +#include +#include +#include + +#include "module_common.h" +#include "pvr_drm.h" +#include "pvr_drv.h" +#include "pvrversion.h" +#include "services_kernel_client.h" + +#include "kernel_compatibility.h" + +#define PVR_DRM_DRIVER_NAME PVR_DRM_NAME +#define PVR_DRM_DRIVER_DESC "Phytium Information Technologies PVR DRM" +#define PVR_DRM_DRIVER_DATE "20170530" + + +static int pvr_pm_suspend(struct device *dev) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct pvr_drm_private *priv = ddev->dev_private; + + DRM_DEBUG_DRIVER("device %p\n", dev); + + return PVRSRVDeviceSuspend(priv->dev_node); +} + +static int pvr_pm_resume(struct device *dev) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct pvr_drm_private *priv = ddev->dev_private; + + DRM_DEBUG_DRIVER("device %p\n", dev); + + return PVRSRVDeviceResume(priv->dev_node); +} + +const struct dev_pm_ops pvr_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pvr_pm_suspend,pvr_pm_resume) +// .suspend = pvr_pm_suspend, +// .resume = pvr_pm_resume, +}; + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) +static +#endif +int pvr_drm_load(struct drm_device *ddev, unsigned long flags) +{ + struct pvr_drm_private *priv; + enum PVRSRV_ERROR_TAG srv_err; + int err, deviceId; + + DRM_DEBUG_DRIVER("device %p\n", ddev->dev); + + dev_set_drvdata(ddev->dev, ddev); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) + /* + * Older kernels do not have render drm_minor member in drm_device, + * so we fallback to primary node for device identification + */ + deviceId = ddev->primary->index; +#else + if (ddev->render) + deviceId = ddev->render->index; + else /* when render node is NULL, fallback to primary node */ + deviceId = ddev->primary->index; +#endif + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + err = -ENOMEM; + goto err_exit; + } + ddev->dev_private = priv; + + if (!ddev->dev->dma_parms) + ddev->dev->dma_parms = &priv->dma_parms; + dma_set_max_seg_size(ddev->dev, DMA_BIT_MASK(32)); + +#if defined(SUPPORT_BUFFER_SYNC) || defined(SUPPORT_NATIVE_FENCE_SYNC) + priv->fence_status_wq = create_freezable_workqueue("pvr_fce_status"); + if (!priv->fence_status_wq) { + DRM_ERROR("failed to create fence status workqueue\n"); + err = -ENOMEM; + goto err_unset_dma_parms; + } +#endif + + srv_err = PVRSRVCommonDeviceCreate(ddev->dev, deviceId, &priv->dev_node); + if (srv_err != PVRSRV_OK) { + DRM_ERROR("failed to create device node for device %p (%s)\n", + ddev->dev, PVRSRVGetErrorString(srv_err)); + if (srv_err == PVRSRV_ERROR_PROBE_DEFER) + err = -EPROBE_DEFER; + else + err = -ENODEV; + goto err_workqueue_destroy; + } + + err = PVRSRVDeviceInit(priv->dev_node); + if (err) { + DRM_ERROR("device %p initialisation failed (err=%d)\n", + ddev->dev, err); + goto err_device_destroy; + } + + drm_mode_config_init(ddev); + +#if defined(SUPPORT_FWLOAD_ON_PROBE) + srv_err = PVRSRVCommonDeviceInitialise(priv->dev_node); + if (srv_err != PVRSRV_OK) { + err = -ENODEV; + DRM_ERROR("device %p initialisation failed (err=%d)\n", + ddev->dev, err); + drm_mode_config_cleanup(ddev); + PVRSRVDeviceDeinit(priv->dev_node); + goto err_device_destroy; + } +#endif + + return 0; + +err_device_destroy: + PVRSRVCommonDeviceDestroy(priv->dev_node); +err_workqueue_destroy: +#if defined(SUPPORT_BUFFER_SYNC) || defined(SUPPORT_NATIVE_FENCE_SYNC) + destroy_workqueue(priv->fence_status_wq); +err_unset_dma_parms: +#endif + if (ddev->dev->dma_parms == &priv->dma_parms) + ddev->dev->dma_parms = NULL; + kfree(priv); +err_exit: + return err; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) +static +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) +int pvr_drm_unload(struct drm_device *ddev) +#else +void pvr_drm_unload(struct drm_device *ddev) +#endif +{ + struct pvr_drm_private *priv = ddev->dev_private; + + DRM_DEBUG_DRIVER("device %p\n", ddev->dev); + + drm_mode_config_cleanup(ddev); + + PVRSRVDeviceDeinit(priv->dev_node); + + PVRSRVCommonDeviceDestroy(priv->dev_node); + +#if defined(SUPPORT_BUFFER_SYNC) || defined(SUPPORT_NATIVE_FENCE_SYNC) + destroy_workqueue(priv->fence_status_wq); +#endif + + if (ddev->dev->dma_parms == &priv->dma_parms) + ddev->dev->dma_parms = NULL; + + kfree(priv); + ddev->dev_private = NULL; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + return 0; +#endif +} + +static int pvr_drm_open(struct drm_device *ddev, struct drm_file *dfile) +{ + struct pvr_drm_private *priv = ddev->dev_private; + int err; + + if (!try_module_get(THIS_MODULE)) { + DRM_ERROR("failed to get module reference\n"); + return -ENOENT; + } + + err = PVRSRVDeviceOpen(priv->dev_node, dfile); + if (err) + module_put(THIS_MODULE); + + return err; +} + +static void pvr_drm_release(struct drm_device *ddev, struct drm_file *dfile) +{ + struct pvr_drm_private *priv = ddev->dev_private; + + PVRSRVDeviceRelease(priv->dev_node, dfile); + + module_put(THIS_MODULE); +} + +/* + * The DRM global lock is taken for ioctls unless the DRM_UNLOCKED flag is set. + */ +static struct drm_ioctl_desc pvr_drm_ioctls[] = { + DRM_IOCTL_DEF_DRV(PVR_SRVKM_CMD, PVRSRV_BridgeDispatchKM, DRM_RENDER_ALLOW | DRM_UNLOCKED) +}; + +#if defined(CONFIG_COMPAT) +static long pvr_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + unsigned int nr = DRM_IOCTL_NR(cmd); + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(file, cmd, arg); + + return drm_ioctl(file, cmd, arg); +} +#endif /* defined(CONFIG_COMPAT) */ + +static const struct file_operations pvr_drm_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, +#if defined(CONFIG_COMPAT) + .compat_ioctl = pvr_compat_ioctl, +#endif + .mmap = PVRSRV_MMap, + .poll = drm_poll, + .read = drm_read, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) + .fasync = drm_fasync, +#endif +}; + +const struct drm_driver pvr_drm_generic_driver = { + .driver_features = DRIVER_MODESET | DRIVER_RENDER, + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + .load = NULL, + .unload = NULL, +#else + .load = pvr_drm_load, + .unload = pvr_drm_unload, +#endif + .open = pvr_drm_open, + .postclose = pvr_drm_release, + + .ioctls = pvr_drm_ioctls, + .num_ioctls = ARRAY_SIZE(pvr_drm_ioctls), + .fops = &pvr_drm_fops, + + .name = PVR_DRM_DRIVER_NAME, + .desc = PVR_DRM_DRIVER_DESC, + .date = PVR_DRM_DRIVER_DATE, + .major = PVRVERSION_MAJ, + .minor = PVRVERSION_MIN, + .patchlevel = PVRVERSION_BUILD, +}; +MODULE_FIRMWARE("rgx.fw.30.3.816.20"); +MODULE_FIRMWARE("rgx.sh.30.3.816.20"); diff --git a/drivers/gpu/drm/phytium/octopus/pvr_drm.h b/drivers/gpu/drm/phytium/octopus/pvr_drm.h new file mode 100644 index 000000000000..a3000d1773eb --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_drm.h @@ -0,0 +1,83 @@ +/* + * @File + * @Title PVR DRM definitions shared between kernel and user space. + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__PVR_DRM_H__) +#define __PVR_DRM_H__ + +#include "pvr_drm_core.h" + +/* + * IMPORTANT: + * All structures below are designed to be the same size when compiled for 32 + * and/or 64 bit architectures, i.e. there should be no compiler inserted + * padding. This is achieved by sticking to the following rules: + * 1) only use fixed width types + * 2) always naturally align fields by arranging them appropriately and by using + * padding fields when necessary + * + * These rules should _always_ be followed when modifying or adding new + * structures to this file. + */ + +struct drm_pvr_srvkm_cmd { + __u32 bridge_id; + __u32 bridge_func_id; + __u64 in_data_ptr; + __u64 out_data_ptr; + __u32 in_data_size; + __u32 out_data_size; +}; + +/* + * DRM command numbers, relative to DRM_COMMAND_BASE. + * These defines must be prefixed with "DRM_". + */ +#define DRM_PVR_SRVKM_CMD 0 /* Used for PVR Services ioctls */ + + +/* These defines must be prefixed with "DRM_IOCTL_". */ +#define DRM_IOCTL_PVR_SRVKM_CMD \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_SRVKM_CMD, \ + struct drm_pvr_srvkm_cmd) + +#endif /* defined(__PVR_DRM_H__) */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_drm_core.h b/drivers/gpu/drm/phytium/octopus/pvr_drm_core.h new file mode 100644 index 000000000000..4ffca191ee9a --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_drm_core.h @@ -0,0 +1,76 @@ +/* + * @File + * @Title Linux DRM definitions shared between kernel and user space. + * @Codingstyle LinuxKernel + * @Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All rights reserved. + * @Description This header contains a subset of the Linux kernel DRM uapi + * and is designed to be used in kernel and user mode. When + * included from kernel mode, it pulls in the full version of + * drm.h. Whereas, when included from user mode, it defines a + * minimal version of drm.h (as found in libdrm). As such, the + * structures and ioctl commands must exactly match those found + * in the Linux kernel/libdrm. + * @License MIT + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__PVR_DRM_CORE_H__) +#define __PVR_DRM_CORE_H__ + +#if defined(__KERNEL__) +#include +#else +#include +#include + +#define DRM_IOCTL_BASE 'd' +#define DRM_COMMAND_BASE 0x40 + +#define DRM_IOWR(nr, type) _IOWR(DRM_IOCTL_BASE, nr, type) + +struct drm_version { + int version_major; + int version_minor; + int version_patchlevel; + __kernel_size_t name_len; + char *name; + __kernel_size_t date_len; + char *date; + __kernel_size_t desc_len; + char *desc; +}; + +struct drm_set_version { + int drm_di_major; + int drm_di_minor; + int drm_dd_major; + int drm_dd_minor; +}; + +#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) +#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) +#endif + +#endif diff --git a/drivers/gpu/drm/phytium/octopus/pvr_drv.h b/drivers/gpu/drm/phytium/octopus/pvr_drv.h new file mode 100644 index 000000000000..ecf303a2feb5 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_drv.h @@ -0,0 +1,101 @@ +/* + * @File + * @Title PhytiumVR DRM driver + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__PVR_DRV_H__) +#define __PVR_DRV_H__ + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#include +#include +#else +#include +#endif + +#include + +struct file; +struct _PVRSRV_DEVICE_NODE_; +struct workqueue_struct; +struct vm_area_struct; + +/* This structure is used to store Linux specific per-device information. */ +struct pvr_drm_private { + struct _PVRSRV_DEVICE_NODE_ *dev_node; + + /* + * This is needed for devices that don't already have their own dma + * parameters structure, e.g. platform devices, and, if necessary, will + * be assigned to the 'struct device' during device initialisation. It + * should therefore never be accessed directly via this structure as + * this may not be the version of dma parameters in use. + */ + struct device_dma_parameters dma_parms; + +#if defined(SUPPORT_BUFFER_SYNC) || defined(SUPPORT_NATIVE_FENCE_SYNC) + struct workqueue_struct *fence_status_wq; +#endif + + /* PVR Sync debug notify handle */ + void *sync_debug_notify_handle; +}; + +extern const struct dev_pm_ops pvr_pm_ops; +extern const struct drm_driver pvr_drm_generic_driver; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) +int pvr_drm_load(struct drm_device *ddev, unsigned long flags); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) +int pvr_drm_unload(struct drm_device *ddev); +#else +void pvr_drm_unload(struct drm_device *ddev); +#endif +#endif + +int PVRSRV_BridgeDispatchKM(struct drm_device *dev, void *arg, + struct drm_file *file); +int PVRSRV_MMap(struct file *file, struct vm_area_struct *ps_vma); + +#endif /* !defined(__PVR_DRV_H__) */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_dvfs.h b/drivers/gpu/drm/phytium/octopus/pvr_dvfs.h new file mode 100644 index 000000000000..845d9fbc01dd --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_dvfs.h @@ -0,0 +1,136 @@ +/*************************************************************************/ /*! +@File pvr_dvfs.h +@Title System level interface for DVFS +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_DVFS_H +#define PVR_DVFS_H + +#include + +#if defined(SUPPORT_LINUX_DVFS) + #include + #include + + #if defined(CONFIG_DEVFREQ_THERMAL) + #include + #endif + + #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) + #include + #else + #include + #endif +#endif + +#include "img_types.h" + +typedef void (*PFN_SYS_DEV_DVFS_SET_FREQUENCY)(IMG_UINT32 ui32Freq); +typedef void (*PFN_SYS_DEV_DVFS_SET_VOLTAGE)(IMG_UINT32 ui32Volt); + +typedef struct _IMG_OPP_ +{ + IMG_UINT32 ui32Volt; + /* + * Unit of frequency in Hz. + */ + IMG_UINT32 ui32Freq; +} IMG_OPP; + +typedef struct _IMG_DVFS_DEVICE_CFG_ +{ + const IMG_OPP *pasOPPTable; + IMG_UINT32 ui32OPPTableSize; +#if defined(SUPPORT_LINUX_DVFS) + IMG_UINT32 ui32PollMs; +#endif + IMG_BOOL bIdleReq; + PFN_SYS_DEV_DVFS_SET_FREQUENCY pfnSetFrequency; + PFN_SYS_DEV_DVFS_SET_VOLTAGE pfnSetVoltage; + +#if defined(CONFIG_DEVFREQ_THERMAL) && defined(SUPPORT_LINUX_DVFS) + struct devfreq_cooling_power *psPowerOps; +#endif +} IMG_DVFS_DEVICE_CFG; + +#if defined(SUPPORT_LINUX_DVFS) +typedef struct _IMG_DVFS_GOVERNOR_ +{ + IMG_BOOL bEnabled; +} IMG_DVFS_GOVERNOR; + +typedef struct _IMG_DVFS_GOVERNOR_CFG_ +{ + IMG_UINT32 ui32UpThreshold; + IMG_UINT32 ui32DownDifferential; +} IMG_DVFS_GOVERNOR_CFG; +#endif + +#if defined(__linux__) +#if defined(SUPPORT_LINUX_DVFS) +typedef struct _IMG_DVFS_DEVICE_ +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) + struct opp *psOPP; +#else + struct dev_pm_opp *psOPP; +#endif + struct devfreq *psDevFreq; + IMG_BOOL bEnabled; + IMG_HANDLE hGpuUtilUserDVFS; + struct devfreq_simple_ondemand_data data; +#if defined(CONFIG_DEVFREQ_THERMAL) + struct thermal_cooling_device *psDevfreqCoolingDevice; +#endif +} IMG_DVFS_DEVICE; +#endif + +typedef struct _IMG_DVFS_ +{ +#if defined(SUPPORT_LINUX_DVFS) + IMG_DVFS_DEVICE sDVFSDevice; + IMG_DVFS_GOVERNOR sDVFSGovernor; + IMG_DVFS_GOVERNOR_CFG sDVFSGovernorCfg; +#endif + IMG_DVFS_DEVICE_CFG sDVFSDeviceCfg; +} PVRSRV_DVFS; +#endif/* (__linux__) */ + +#endif /* PVR_DVFS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_dvfs_device.c b/drivers/gpu/drm/phytium/octopus/pvr_dvfs_device.c new file mode 100644 index 000000000000..81b829249e7b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_dvfs_device.c @@ -0,0 +1,670 @@ +/*************************************************************************/ /*! +@File +@Title PhytiumVR devfreq device implementation +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Linux module setup +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(NO_HARDWARE) + +#include +#if defined(CONFIG_DEVFREQ_THERMAL) +#include +#endif +#include +#include + +#include "power.h" +#include "pvrsrv.h" +#include "pvrsrv_device.h" + +#include "rgxdevice.h" +#include "rgxinit.h" +#include "sofunc_rgx.h" + +#include "syscommon.h" + +#include "pvr_dvfs_device.h" + +#include "kernel_compatibility.h" + +static PVRSRV_DEVICE_NODE *gpsDeviceNode; + +static IMG_INT32 devfreq_target(struct device *dev, unsigned long *requested_freq, IMG_UINT32 flags) +{ + RGX_DATA *psRGXData = (RGX_DATA*) gpsDeviceNode->psDevConfig->hDevData; + IMG_DVFS_DEVICE *psDVFSDevice = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDevice; + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; + RGX_TIMING_INFORMATION *psRGXTimingInfo = NULL; + IMG_UINT32 ui32Freq, ui32CurFreq, ui32Volt; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) + struct opp *opp; +#else + struct dev_pm_opp *opp; +#endif + + /* Check the RGX device is initialised */ + if (!psRGXData) + { + return -ENODATA; + } + + psRGXTimingInfo = psRGXData->psRGXTimingInfo; + if (!psDVFSDevice->bEnabled) + { + *requested_freq = psRGXTimingInfo->ui32CoreClockSpeed; + return 0; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + rcu_read_lock(); +#endif + + opp = devfreq_recommended_opp(dev, requested_freq, flags); + if (IS_ERR(opp)) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + rcu_read_unlock(); +#endif + PVR_DPF((PVR_DBG_ERROR, "Invalid OPP")); + return PTR_ERR(opp); + } + + ui32Freq = dev_pm_opp_get_freq(opp); + ui32Volt = dev_pm_opp_get_voltage(opp); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + rcu_read_unlock(); +#else + dev_pm_opp_put(opp); +#endif + + ui32CurFreq = psRGXTimingInfo->ui32CoreClockSpeed; + + if (ui32CurFreq == ui32Freq) + { + return 0; + } + + if (PVRSRV_OK != PVRSRVDevicePreClockSpeedChange(gpsDeviceNode, + psDVFSDeviceCfg->bIdleReq, + NULL)) + { + dev_err(dev, "PVRSRVDevicePreClockSpeedChange failed\n"); + return -EPERM; + } + + /* Increasing frequency, change voltage first */ + if (ui32Freq > ui32CurFreq) + { + psDVFSDeviceCfg->pfnSetVoltage(ui32Volt); + } + + psDVFSDeviceCfg->pfnSetFrequency(ui32Freq); + + /* Decreasing frequency, change frequency first */ + if (ui32Freq < ui32CurFreq) + { + psDVFSDeviceCfg->pfnSetVoltage(ui32Volt); + } + + psRGXTimingInfo->ui32CoreClockSpeed = ui32Freq; + + PVRSRVDevicePostClockSpeedChange(gpsDeviceNode, psDVFSDeviceCfg->bIdleReq, + NULL); + + return 0; +} + +static int devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = gpsDeviceNode->pvDevice; + IMG_DVFS_DEVICE *psDVFSDevice = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDevice; + RGX_DATA *psRGXData = (RGX_DATA*) gpsDeviceNode->psDevConfig->hDevData; + RGX_TIMING_INFORMATION *psRGXTimingInfo = NULL; + RGXFWIF_GPU_UTIL_STATS sGpuUtilStats; + PVRSRV_ERROR eError; + + /* Check the RGX device is initialised */ + if (!psDevInfo || !psRGXData) + { + return -ENODATA; + } + + psRGXTimingInfo = psRGXData->psRGXTimingInfo; + stat->current_frequency = psRGXTimingInfo->ui32CoreClockSpeed; + + if (psDevInfo->pfnGetGpuUtilStats == NULL) + { + /* Not yet ready. So set times to something sensible. */ + stat->busy_time = 0; + stat->total_time = 0; + return 0; + } + + eError = psDevInfo->pfnGetGpuUtilStats(psDevInfo->psDeviceNode, + psDVFSDevice->hGpuUtilUserDVFS, + &sGpuUtilStats); + + if (eError != PVRSRV_OK) + { + return -EAGAIN; + } + + stat->busy_time = sGpuUtilStats.ui64GpuStatActive; + stat->total_time = sGpuUtilStats.ui64GpuStatCumulative; + + return 0; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) +static IMG_INT32 devfreq_cur_freq(struct device *dev, unsigned long *freq) +{ + RGX_DATA *psRGXData = (RGX_DATA*) gpsDeviceNode->psDevConfig->hDevData; + + /* Check the RGX device is initialised */ + if (!psRGXData) + { + return -ENODATA; + } + + *freq = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; + + return 0; +} +#endif + +static struct devfreq_dev_profile img_devfreq_dev_profile = +{ + .target = devfreq_target, + .get_dev_status = devfreq_get_dev_status, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + .get_cur_freq = devfreq_cur_freq, +#endif +}; + +static int FillOPPTable(struct device *dev) +{ + const IMG_OPP *iopp; + int i, err = 0; + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; + + for (i = 0, iopp = psDVFSDeviceCfg->pasOPPTable; + i < psDVFSDeviceCfg->ui32OPPTableSize; + i++, iopp++) + { + err = dev_pm_opp_add(dev, iopp->ui32Freq, iopp->ui32Volt); + if (err) { + dev_err(dev, "Could not add OPP entry, %d\n", err); + return err; + } + } + + return 0; +} + +static void ClearOPPTable(struct device *dev) +{ +#if (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) + const IMG_OPP *iopp; + int i; + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; + + for (i = 0, iopp = psDVFSDeviceCfg->pasOPPTable; + i < psDVFSDeviceCfg->ui32OPPTableSize; + i++, iopp++) + { + dev_pm_opp_remove(dev, iopp->ui32Freq); + } +#endif +} + +static int GetOPPValues(struct device *dev, + unsigned long *min_freq, + unsigned long *min_volt, + unsigned long *max_freq) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) + struct opp *opp; +#else + struct dev_pm_opp *opp; +#endif + int count, i, err = 0; + unsigned long freq; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) && \ + (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) + unsigned int *freq_table; +#else + unsigned long *freq_table; +#endif + + count = dev_pm_opp_get_opp_count(dev); + if (count < 0) + { + dev_err(dev, "Could not fetch OPP count, %d\n", count); + return count; + } + + dev_info(dev, "Found %d OPP points.\n", count); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) + freq_table = devm_kcalloc(dev, count, sizeof(*freq_table), GFP_ATOMIC); +#else + freq_table = kcalloc(count, sizeof(*freq_table), GFP_ATOMIC); +#endif + if (! freq_table) + { + return -ENOMEM; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + /* Start RCU read-side critical section to map frequency to OPP */ + rcu_read_lock(); +#endif + + /* Iterate over OPP table; Iteration 0 finds "opp w/ freq >= 0 Hz". */ + freq = 0; + opp = dev_pm_opp_find_freq_ceil(dev, &freq); + if (IS_ERR(opp)) + { + err = PTR_ERR(opp); + dev_err(dev, "Couldn't find lowest frequency, %d\n", err); + goto exit; + } + + *min_volt = dev_pm_opp_get_voltage(opp); + *max_freq = *min_freq = freq_table[0] = freq; + dev_info(dev, "opp[%d/%d]: (%lu Hz, %lu uV)\n", 1, count, freq, *min_volt); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + dev_pm_opp_put(opp); +#endif + + /* Iteration i > 0 finds "opp w/ freq >= (opp[i-1].freq + 1)". */ + for (i = 1; i < count; i++) + { + freq++; + opp = dev_pm_opp_find_freq_ceil(dev, &freq); + if (IS_ERR(opp)) + { + err = PTR_ERR(opp); + dev_err(dev, "Couldn't find %dth frequency, %d\n", i, err); + goto exit; + } + + freq_table[i] = freq; + *max_freq = freq; + dev_info(dev, + "opp[%d/%d]: (%lu Hz, %lu uV)\n", + i + 1, + count, + freq, + dev_pm_opp_get_voltage(opp)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + dev_pm_opp_put(opp); +#endif + } + +exit: +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + rcu_read_unlock(); +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + if (!err) + { + img_devfreq_dev_profile.freq_table = freq_table; + img_devfreq_dev_profile.max_state = count; + } + else +#endif + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) + devm_kfree(dev, freq_table); +#else + kfree(freq_table); +#endif + } + + return err; +} + +#if defined(CONFIG_DEVFREQ_THERMAL) +static int RegisterCoolingDevice(struct device *dev, + IMG_DVFS_DEVICE *psDVFSDevice, + struct devfreq_cooling_power *powerOps) +{ + struct device_node *of_node; + int err = 0; + PVRSRV_VZ_RET_IF_MODE(GUEST, err); + + if (!powerOps) + { + dev_info(dev, "Cooling: power ops not registered, not enabling cooling"); + return 0; + } + + of_node = of_node_get(dev->of_node); + + psDVFSDevice->psDevfreqCoolingDevice = of_devfreq_cooling_register_power( + of_node, psDVFSDevice->psDevFreq, powerOps); + + if (IS_ERR(psDVFSDevice->psDevfreqCoolingDevice)) + { + err = PTR_ERR(psDVFSDevice->psDevfreqCoolingDevice); + dev_err(dev, "Failed to register as devfreq cooling device %d", err); + } + + of_node_put(of_node); + + return err; +} +#endif + +#define TO_IMG_ERR(err) ((err == -EPROBE_DEFER) ? PVRSRV_ERROR_PROBE_DEFER : PVRSRV_ERROR_INIT_FAILURE) + +extern int pvr_dvfsmode; +static char* GetDVFSMode(void) +{ + char* pMode = NULL; + + switch(pvr_dvfsmode) + { + case 1: + pMode = "performance"; + break; + case 2: + pMode = "powersave"; + break; + case 3: + pMode = "userspace"; + break; + default: + pMode = "simple_ondemand"; + break; + } + + return pMode; +} + +PVRSRV_ERROR InitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + IMG_DVFS_DEVICE *psDVFSDevice = NULL; + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = NULL; + IMG_DVFS_GOVERNOR_CFG *psDVFSGovernorCfg = NULL; + RGX_TIMING_INFORMATION *psRGXTimingInfo = NULL; + struct device *psDev = psDeviceNode->psDevConfig->pvOSDevice; + unsigned long min_freq = 0, max_freq = 0, min_volt = 0; + PVRSRV_ERROR eError; + int err; + char* pMode = NULL; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + +#if !defined(CONFIG_PM_OPP) + return PVRSRV_ERROR_NOT_SUPPORTED; +#endif + + if (gpsDeviceNode) + { + PVR_DPF((PVR_DBG_ERROR, + "DVFS already initialised for device node %p", + gpsDeviceNode)); + return PVRSRV_ERROR_INIT_FAILURE; + } + + gpsDeviceNode = psDeviceNode; + psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice; + psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; + psDVFSGovernorCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSGovernorCfg; + psRGXTimingInfo = ((RGX_DATA *)psDeviceNode->psDevConfig->hDevData)->psRGXTimingInfo; + +#if defined(SUPPORT_SOC_TIMER) + if (! psDeviceNode->psDevConfig->pfnSoCTimerRead) + { + PVR_DPF((PVR_DBG_ERROR, "System layer SoC timer callback not implemented")); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } +#endif + + eError = SORgxGpuUtilStatsRegister(&psDVFSDevice->hGpuUtilUserDVFS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to register to the GPU utilisation stats, %d", eError)); + return eError; + } + +#if defined(CONFIG_OF) + err = dev_pm_opp_of_add_table(psDev); + if (err) + { + /* + * If there are no device tree or system layer provided operating points + * then return an error + */ + if (err != -ENODEV || !psDVFSDeviceCfg->pasOPPTable) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to init opp table from devicetree, %d", err)); + eError = TO_IMG_ERR(err); + goto err_exit; + } + } +#endif + + if (psDVFSDeviceCfg->pasOPPTable) + { + err = FillOPPTable(psDev); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to fill OPP table with data, %d", err)); + eError = TO_IMG_ERR(err); + goto err_exit; + } + } + + err = GetOPPValues(psDev, &min_freq, &min_volt, &max_freq); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to read OPP points, %d", err)); + eError = TO_IMG_ERR(err); + goto err_exit; + } + + img_devfreq_dev_profile.initial_freq = (pvr_dvfsmode == 1) ? max_freq : min_freq; + img_devfreq_dev_profile.polling_ms = psDVFSDeviceCfg->ui32PollMs; + + psRGXTimingInfo->ui32CoreClockSpeed = (pvr_dvfsmode == 1) ? max_freq : min_freq; + + psDVFSDeviceCfg->pfnSetFrequency((pvr_dvfsmode == 1) ? max_freq : min_freq); + psDVFSDeviceCfg->pfnSetVoltage(min_volt); + + psDVFSDevice->data.upthreshold = psDVFSGovernorCfg->ui32UpThreshold; + psDVFSDevice->data.downdifferential = psDVFSGovernorCfg->ui32DownDifferential; + + pMode = GetDVFSMode(); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) + psDVFSDevice->psDevFreq = devm_devfreq_add_device(psDev, + &img_devfreq_dev_profile, + pMode, + &psDVFSDevice->data); +#else + psDVFSDevice->psDevFreq = devfreq_add_device(psDev, + &img_devfreq_dev_profile, + pMode, + &psDVFSDevice->data); +#endif + + if (IS_ERR(psDVFSDevice->psDevFreq)) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to add as devfreq device %p, %ld", + psDVFSDevice->psDevFreq, + PTR_ERR(psDVFSDevice->psDevFreq))); + eError = TO_IMG_ERR(PTR_ERR(psDVFSDevice->psDevFreq)); + goto err_exit; + } + + eError = SuspendDVFS(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVInit: Failed to suspend DVFS")); + goto err_exit; + } + +#if defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + psDVFSDevice->psDevFreq->policy.user.min_freq = min_freq; + psDVFSDevice->psDevFreq->policy.user.max_freq = max_freq; +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) + psDVFSDevice->psDevFreq->scaling_min_freq = min_freq; + psDVFSDevice->psDevFreq->scaling_max_freq = max_freq; +#else + psDVFSDevice->psDevFreq->min_freq = min_freq; + psDVFSDevice->psDevFreq->max_freq = max_freq; +#endif + + err = devfreq_register_opp_notifier(psDev, psDVFSDevice->psDevFreq); + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to register opp notifier, %d", err)); + eError = TO_IMG_ERR(err); + goto err_exit; + } + +#if defined(CONFIG_DEVFREQ_THERMAL) + err = RegisterCoolingDevice(psDev, psDVFSDevice, psDVFSDeviceCfg->psPowerOps); + if (err) + { + eError = TO_IMG_ERR(err); + goto err_exit; + } +#endif + + PVR_TRACE(("PVR DVFS activated: %lu-%lu Hz, Period: %ums", + min_freq, + max_freq, + psDVFSDeviceCfg->ui32PollMs)); + + return PVRSRV_OK; + +err_exit: + DeinitDVFS(psDeviceNode); + return eError; +} + +void DeinitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode) +{ + IMG_DVFS_DEVICE *psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice; + struct device *psDev = psDeviceNode->psDevConfig->pvOSDevice; + IMG_INT32 i32Error; + + PVRSRV_VZ_RETN_IF_MODE(GUEST); + + PVR_ASSERT(psDeviceNode == gpsDeviceNode); + + if (! psDVFSDevice) + { + return; + } + +#if defined(CONFIG_DEVFREQ_THERMAL) + if (!IS_ERR_OR_NULL(psDVFSDevice->psDevfreqCoolingDevice)) + { + devfreq_cooling_unregister(psDVFSDevice->psDevfreqCoolingDevice); + psDVFSDevice->psDevfreqCoolingDevice = NULL; + } +#endif + + if (psDVFSDevice->psDevFreq) + { + i32Error = devfreq_unregister_opp_notifier(psDev, psDVFSDevice->psDevFreq); + if (i32Error < 0) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to unregister OPP notifier")); + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) + devfreq_remove_device(psDVFSDevice->psDevFreq); +#else + devm_devfreq_remove_device(psDev, psDVFSDevice->psDevFreq); +#endif + + psDVFSDevice->psDevFreq = NULL; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) + kfree(img_devfreq_dev_profile.freq_table); +#endif + + /* Remove OPP entries for this device */ + ClearOPPTable(psDev); + +#if defined(CONFIG_OF) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) || \ + (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) + dev_pm_opp_of_remove_table(psDev); +#endif +#endif + + SORgxGpuUtilStatsUnregister(psDVFSDevice->hGpuUtilUserDVFS); + psDVFSDevice->hGpuUtilUserDVFS = NULL; + + gpsDeviceNode = NULL; +} + +PVRSRV_ERROR SuspendDVFS(void) +{ + IMG_DVFS_DEVICE *psDVFSDevice = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDevice; + + psDVFSDevice->bEnabled = IMG_FALSE; + + return PVRSRV_OK; +} + +PVRSRV_ERROR ResumeDVFS(void) +{ + IMG_DVFS_DEVICE *psDVFSDevice = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDevice; + + /* Not supported in GuestOS drivers */ + psDVFSDevice->bEnabled = !PVRSRV_VZ_MODE_IS(GUEST); + + return PVRSRV_OK; +} + +#endif /* !NO_HARDWARE */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_dvfs_device.h b/drivers/gpu/drm/phytium/octopus/pvr_dvfs_device.h new file mode 100644 index 000000000000..bfd6be81d124 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_dvfs_device.h @@ -0,0 +1,58 @@ +/*************************************************************************/ /*! +@File pvr_dvfs.c +@Title System level interface for DVFS +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_DVFS_DEVICE_H +#define PVR_DVFS_DEVICE_H + +#include "opaque_types.h" +#include "pvrsrv_error.h" + + +PVRSRV_ERROR InitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode); + +void DeinitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode); + +PVRSRV_ERROR SuspendDVFS(void); + +PVRSRV_ERROR ResumeDVFS(void); + +#endif /* PVR_DVFS_DEVICE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_fd_sync_kernel.h b/drivers/gpu/drm/phytium/octopus/pvr_fd_sync_kernel.h new file mode 100644 index 000000000000..7104027b88c1 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_fd_sync_kernel.h @@ -0,0 +1,76 @@ +/*************************************************************************/ /*! +@File pvr_fd_sync_kernel.h +@Title Kernel/userspace interface definitions to use the kernel sync + driver +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + +#ifndef _PVR_FD_SYNC_KERNEL_H_ +#define _PVR_FD_SYNC_KERNEL_H_ + +#include +#include + +#define PVR_SYNC_MAX_QUERY_FENCE_POINTS 14 + +#define PVR_SYNC_IOC_MAGIC 'W' + +#define PVR_SYNC_IOC_RENAME \ + _IOW(PVR_SYNC_IOC_MAGIC, 4, struct pvr_sync_rename_ioctl_data) + +#define PVR_SYNC_IOC_FORCE_SW_ONLY \ + _IO(PVR_SYNC_IOC_MAGIC, 5) + +struct pvr_sync_pt_info { + /* Output */ + __u32 id; + __u32 ui32FWAddr; + __u32 ui32CurrOp; + __u32 ui32NextOp; + __u32 ui32TlTaken; +} __attribute__((packed, aligned(8))); + +struct pvr_sync_rename_ioctl_data +{ + /* Input */ + char szName[32]; +} __attribute__((packed, aligned(8))); + +#endif /* _PVR_FD_SYNC_KERNEL_H_ */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_fence.c b/drivers/gpu/drm/phytium/octopus/pvr_fence.c new file mode 100644 index 000000000000..60eba25a8e2f --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_fence.c @@ -0,0 +1,1046 @@ +/* + * @File + * @Title PhytiumVR Linux fence interface + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include + +#include "pvr_fence.h" +#include "services_kernel_client.h" +#include "sync_checkpoint_external.h" + +#define CREATE_TRACE_POINTS +#include "pvr_fence_trace.h" + +/* This header must always be included last */ +#include "kernel_compatibility.h" + +/* Global kmem_cache for pvr_fence object allocations */ +static struct kmem_cache *pvr_fence_cache; +static DEFINE_MUTEX(pvr_fence_cache_mutex); +static u32 pvr_fence_cache_refcount; + +#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \ + do { \ + if (pfnDumpDebugPrintf) \ + pfnDumpDebugPrintf(pvDumpDebugFile, fmt, \ + ## __VA_ARGS__); \ + else \ + pr_err(fmt "\n", ## __VA_ARGS__); \ + } while (0) + +static inline void +pvr_fence_sync_signal(struct pvr_fence *pvr_fence, u32 fence_sync_flags) +{ + SyncCheckpointSignal(pvr_fence->sync_checkpoint, fence_sync_flags); +} + +static inline bool +pvr_fence_sync_is_signaled(struct pvr_fence *pvr_fence, u32 fence_sync_flags) +{ + return SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint, + fence_sync_flags); +} + +static inline u32 +pvr_fence_sync_value(struct pvr_fence *pvr_fence) +{ + if (SyncCheckpointIsErrored(pvr_fence->sync_checkpoint, + PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + return PVRSRV_SYNC_CHECKPOINT_ERRORED; + else if (SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint, + PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + return PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + else + return PVRSRV_SYNC_CHECKPOINT_ACTIVE; +} + +static void +pvr_fence_context_check_status(struct work_struct *data) +{ + PVRSRVCheckStatus(NULL); +} + +void +pvr_context_value_str(struct pvr_fence_context *fctx, char *str, int size) +{ + snprintf(str, size, + "%u ctx=%llu refs=%u", + atomic_read(&fctx->fence_seqno), + fctx->fence_context, + refcount_read(&fctx->kref.refcount)); +} + +static void +pvr_fence_context_fences_dump(struct pvr_fence_context *fctx, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + struct pvr_fence *pvr_fence; + unsigned long flags; + char value[128]; + + spin_lock_irqsave(&fctx->list_lock, flags); + pvr_context_value_str(fctx, value, sizeof(value)); + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + "%s: @%s", fctx->name, value); + list_for_each_entry(pvr_fence, &fctx->fence_list, fence_head) { + struct dma_fence *fence = pvr_fence->fence; + const char *timeline_value_str = "unknown timeline value"; + const char *fence_value_str = "unknown fence value"; + + pvr_fence->base.ops->fence_value_str(&pvr_fence->base, value, + sizeof(value)); + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + " @%s", value); + + if (is_pvr_fence(fence)) + continue; + + if (fence->ops->timeline_value_str) { + fence->ops->timeline_value_str(fence, value, + sizeof(value)); + timeline_value_str = value; + } + + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + " | %s: %s (driver: %s)", + fence->ops->get_timeline_name(fence), + timeline_value_str, + fence->ops->get_driver_name(fence)); + + if (fence->ops->fence_value_str) { + fence->ops->fence_value_str(fence, value, + sizeof(value)); + fence_value_str = value; + } + + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + " | @%s (foreign)", value); + } + spin_unlock_irqrestore(&fctx->list_lock, flags); +} + +static inline unsigned int +pvr_fence_context_seqno_next(struct pvr_fence_context *fctx) +{ + return atomic_inc_return(&fctx->fence_seqno) - 1; +} + +/* This function prepends seqno to fence name */ +static inline void +pvr_fence_prepare_name(char *fence_name, size_t fence_name_size, + const char *name, unsigned int seqno) +{ + unsigned int len; + + len = OSStringUINT32ToStr(fence_name, fence_name_size, seqno); + if (likely((len > 0) && (fence_name_size >= (len + 1)))) { + fence_name[len] = '-'; + fence_name[len + 1] = '\0'; + } + strlcat(fence_name, name, fence_name_size); +} + +static void +pvr_fence_sched_free(struct rcu_head *rcu) +{ + struct pvr_fence *pvr_fence = container_of(rcu, struct pvr_fence, rcu); + + kmem_cache_free(pvr_fence_cache, pvr_fence); +} + +static inline void +pvr_fence_context_free_deferred(struct pvr_fence_context *fctx) +{ + struct pvr_fence *pvr_fence, *tmp; + LIST_HEAD(deferred_free_list); + unsigned long flags; + + spin_lock_irqsave(&fctx->list_lock, flags); + list_for_each_entry_safe(pvr_fence, tmp, + &fctx->deferred_free_list, + fence_head) + list_move(&pvr_fence->fence_head, &deferred_free_list); + spin_unlock_irqrestore(&fctx->list_lock, flags); + + list_for_each_entry_safe(pvr_fence, tmp, + &deferred_free_list, + fence_head) { + list_del(&pvr_fence->fence_head); + SyncCheckpointFree(pvr_fence->sync_checkpoint); + call_rcu(&pvr_fence->rcu, pvr_fence_sched_free); + module_put(THIS_MODULE); + } +} + +void +pvr_fence_context_free_deferred_callback(void *data) +{ + struct pvr_fence_context *fctx = (struct pvr_fence_context *)data; + + /* + * Free up any fence objects we have deferred freeing. + */ + pvr_fence_context_free_deferred(fctx); +} + +static void +pvr_fence_context_signal_fences(void *data) +{ + struct pvr_fence_context *fctx = (struct pvr_fence_context *)data; + struct pvr_fence *pvr_fence, *tmp; + unsigned long flags1; + + LIST_HEAD(signal_list); + + /* + * We can't call fence_signal while holding the lock as we can end up + * in a situation whereby pvr_fence_foreign_signal_sync, which also + * takes the list lock, ends up being called as a result of the + * fence_signal below, i.e. fence_signal(fence) -> fence->callback() + * -> fence_signal(foreign_fence) -> foreign_fence->callback() where + * the foreign_fence callback is pvr_fence_foreign_signal_sync. + * + * So extract the items we intend to signal and add them to their own + * queue. + */ + spin_lock_irqsave(&fctx->list_lock, flags1); + list_for_each_entry_safe(pvr_fence, tmp, &fctx->signal_list, signal_head) { + if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + list_move_tail(&pvr_fence->signal_head, &signal_list); + } + spin_unlock_irqrestore(&fctx->list_lock, flags1); + + list_for_each_entry_safe(pvr_fence, tmp, &signal_list, signal_head) { + + PVR_FENCE_TRACE(&pvr_fence->base, "signalled fence (%s)\n", + pvr_fence->name); + trace_pvr_fence_signal_fence(pvr_fence); + spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags1); + list_del(&pvr_fence->signal_head); + spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags1); + dma_fence_signal(pvr_fence->fence); + dma_fence_put(pvr_fence->fence); + } + + /* + * Take this opportunity to free up any fence objects we + * have deferred freeing. + */ + pvr_fence_context_free_deferred(fctx); +} + +void +pvr_fence_context_signal_fences_nohw(void *data) +{ + pvr_fence_context_signal_fences(data); +} + +static void +pvr_fence_context_destroy_work(struct work_struct *data) +{ + struct pvr_fence_context *fctx = + container_of(data, struct pvr_fence_context, destroy_work); + + pvr_fence_context_free_deferred(fctx); + + if (WARN_ON(!list_empty_careful(&fctx->fence_list))) + pvr_fence_context_fences_dump(fctx, NULL, NULL); + + PVRSRVUnregisterDbgRequestNotify(fctx->dbg_request_handle); + PVRSRVUnregisterCmdCompleteNotify(fctx->cmd_complete_handle); + + // wait for all fences to be freed before kmem_cache_destroy() is called + rcu_barrier(); + + /* Destroy pvr_fence object cache, if no one is using it */ + WARN_ON(pvr_fence_cache == NULL); + mutex_lock(&pvr_fence_cache_mutex); + if (--pvr_fence_cache_refcount == 0) + kmem_cache_destroy(pvr_fence_cache); + mutex_unlock(&pvr_fence_cache_mutex); + + kfree(fctx); +} + +static void +pvr_fence_context_debug_request(void *data, u32 verbosity, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + struct pvr_fence_context *fctx = (struct pvr_fence_context *)data; + + if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM)) + pvr_fence_context_fences_dump(fctx, pfnDumpDebugPrintf, + pvDumpDebugFile); +} + +/** + * pvr_fence_context_create - creates a PVR fence context + * @dev_cookie: services device cookie + * @name: context name (used for debugging) + * + * Creates a PVR fence context that can be used to create PVR fences or to + * create PVR fences from an existing fence. + * + * pvr_fence_context_destroy should be called to clean up the fence context. + * + * Returns NULL if a context cannot be created. + */ +struct pvr_fence_context * +pvr_fence_context_create(void *dev_cookie, + struct workqueue_struct *fence_status_wq, + const char *name) +{ + struct pvr_fence_context *fctx; + PVRSRV_ERROR srv_err; + + fctx = kzalloc(sizeof(*fctx), GFP_KERNEL); + if (!fctx) + return NULL; + + spin_lock_init(&fctx->lock); + atomic_set(&fctx->fence_seqno, 0); + INIT_WORK(&fctx->check_status_work, pvr_fence_context_check_status); + INIT_WORK(&fctx->destroy_work, pvr_fence_context_destroy_work); + spin_lock_init(&fctx->list_lock); + INIT_LIST_HEAD(&fctx->signal_list); + INIT_LIST_HEAD(&fctx->fence_list); + INIT_LIST_HEAD(&fctx->deferred_free_list); + + fctx->fence_wq = fence_status_wq; + + fctx->fence_context = dma_fence_context_alloc(1); + strlcpy(fctx->name, name, sizeof(fctx->name)); + + srv_err = PVRSRVRegisterCmdCompleteNotify(&fctx->cmd_complete_handle, + pvr_fence_context_signal_fences, + fctx); + if (srv_err != PVRSRV_OK) { + pr_err("%s: failed to register command complete callback (%s)\n", + __func__, PVRSRVGetErrorString(srv_err)); + goto err_free_fctx; + } + + /* Create pvr_fence object cache, if not already created */ + mutex_lock(&pvr_fence_cache_mutex); + if (pvr_fence_cache_refcount == 0) { + pvr_fence_cache = KMEM_CACHE(pvr_fence, 0); + if (!pvr_fence_cache) { + pr_err("%s: failed to allocate pvr_fence cache\n", + __func__); + mutex_unlock(&pvr_fence_cache_mutex); + goto err_unregister_cmd_complete_notify; + } + } + pvr_fence_cache_refcount++; + mutex_unlock(&pvr_fence_cache_mutex); + + srv_err = PVRSRVRegisterDbgRequestNotify(&fctx->dbg_request_handle, + dev_cookie, + pvr_fence_context_debug_request, + DEBUG_REQUEST_LINUXFENCE, + fctx); + if (srv_err != PVRSRV_OK) { + pr_err("%s: failed to register debug request callback (%s)\n", + __func__, PVRSRVGetErrorString(srv_err)); + goto err_free_pvr_fence_cache; + } + + kref_init(&fctx->kref); + + PVR_FENCE_CTX_TRACE(fctx, "created fence context (%s)\n", name); + trace_pvr_fence_context_create(fctx); + + return fctx; + +err_free_pvr_fence_cache: + mutex_lock(&pvr_fence_cache_mutex); + if (--pvr_fence_cache_refcount == 0) + kmem_cache_destroy(pvr_fence_cache); + mutex_unlock(&pvr_fence_cache_mutex); +err_unregister_cmd_complete_notify: + PVRSRVUnregisterCmdCompleteNotify(fctx->cmd_complete_handle); +err_free_fctx: + kfree(fctx); + return NULL; +} + +static void pvr_fence_context_destroy_kref(struct kref *kref) +{ + struct pvr_fence_context *fctx = + container_of(kref, struct pvr_fence_context, kref); + + PVR_FENCE_CTX_TRACE(fctx, "destroyed fence context (%s)\n", fctx->name); + + trace_pvr_fence_context_destroy_kref(fctx); + + schedule_work(&fctx->destroy_work); +} + +/** + * pvr_fence_context_destroy - destroys a context + * @fctx: PVR fence context to destroy + * + * Destroys a PVR fence context with the expectation that all fences have been + * destroyed. + */ +void +pvr_fence_context_destroy(struct pvr_fence_context *fctx) +{ + trace_pvr_fence_context_destroy(fctx); + + kref_put(&fctx->kref, pvr_fence_context_destroy_kref); +} + +static const char * +pvr_fence_get_driver_name(struct dma_fence *fence) +{ + return PVR_LDM_DRIVER_REGISTRATION_NAME; +} + +static const char * +pvr_fence_get_timeline_name(struct dma_fence *fence) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + + if (pvr_fence) + return pvr_fence->fctx->name; + return NULL; +} + +static +void pvr_fence_fence_value_str(struct dma_fence *fence, char *str, int size) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + + if (!pvr_fence) + return; + + snprintf(str, size, + "%llu: (%s%s) refs=%u fwaddr=%#08x enqueue=%u status=%-9s %s%s", + (u64) pvr_fence->fence->seqno, + test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &pvr_fence->fence->flags) ? "+" : "-", + test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &pvr_fence->fence->flags) ? "+" : "-", + refcount_read(&pvr_fence->fence->refcount.refcount), + SyncCheckpointGetFirmwareAddr( + pvr_fence->sync_checkpoint), + SyncCheckpointGetEnqueuedCount(pvr_fence->sync_checkpoint), + SyncCheckpointGetStateString(pvr_fence->sync_checkpoint), + pvr_fence->name, + (&pvr_fence->base != pvr_fence->fence) ? + "(foreign)" : ""); +} + +static +void pvr_fence_timeline_value_str(struct dma_fence *fence, char *str, int size) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + + if (pvr_fence) + pvr_context_value_str(pvr_fence->fctx, str, size); +} + +static bool +pvr_fence_enable_signaling(struct dma_fence *fence) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + unsigned long flags; + + if (!pvr_fence) + return false; + + WARN_ON_SMP(!spin_is_locked(&pvr_fence->fctx->lock)); + + if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + return false; + + dma_fence_get(&pvr_fence->base); + + spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags); + list_add_tail(&pvr_fence->signal_head, &pvr_fence->fctx->signal_list); + spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags); + + PVR_FENCE_TRACE(&pvr_fence->base, "signalling enabled (%s)\n", + pvr_fence->name); + trace_pvr_fence_enable_signaling(pvr_fence); + + return true; +} + +static bool +pvr_fence_is_signaled(struct dma_fence *fence) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + + if (pvr_fence) + return pvr_fence_sync_is_signaled(pvr_fence, + PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT); + return false; +} + +static void +pvr_fence_release(struct dma_fence *fence) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + unsigned long flags; + + if (pvr_fence) { + struct pvr_fence_context *fctx = pvr_fence->fctx; + + PVR_FENCE_TRACE(&pvr_fence->base, "released fence (%s)\n", + pvr_fence->name); + trace_pvr_fence_release(pvr_fence); + + spin_lock_irqsave(&fctx->list_lock, flags); + list_move(&pvr_fence->fence_head, + &fctx->deferred_free_list); + spin_unlock_irqrestore(&fctx->list_lock, flags); + + kref_put(&fctx->kref, pvr_fence_context_destroy_kref); + } +} + +const struct dma_fence_ops pvr_fence_ops = { + .get_driver_name = pvr_fence_get_driver_name, + .get_timeline_name = pvr_fence_get_timeline_name, + .fence_value_str = pvr_fence_fence_value_str, + .timeline_value_str = pvr_fence_timeline_value_str, + .enable_signaling = pvr_fence_enable_signaling, + .signaled = pvr_fence_is_signaled, + .wait = dma_fence_default_wait, + .release = pvr_fence_release, +}; + +/** + * pvr_fence_create - creates a PVR fence + * @fctx: PVR fence context on which the PVR fence should be created + * @sync_checkpoint_ctx: context in which to create sync checkpoints + * @timeline_fd: timeline on which the PVR fence should be created + * @name: PVR fence name (used for debugging) + * + * Creates a PVR fence. + * + * Once the fence is finished with, pvr_fence_destroy should be called. + * + * Returns NULL if a PVR fence cannot be created. + */ +struct pvr_fence * +pvr_fence_create(struct pvr_fence_context *fctx, + struct _SYNC_CHECKPOINT_CONTEXT *sync_checkpoint_ctx, + int timeline_fd, const char *name) +{ + struct pvr_fence *pvr_fence; + unsigned int seqno; + unsigned long flags; + PVRSRV_ERROR srv_err; + + if (!try_module_get(THIS_MODULE)) + goto err_exit; + + /* Note: As kmem_cache is used to allocate pvr_fence objects, + * make sure that all members of pvr_fence struct are initialized + * here + */ + pvr_fence = kmem_cache_alloc(pvr_fence_cache, GFP_KERNEL); + if (unlikely(!pvr_fence)) + goto err_module_put; + + srv_err = SyncCheckpointAlloc(sync_checkpoint_ctx, + (PVRSRV_TIMELINE) timeline_fd, PVRSRV_NO_FENCE, + name, &pvr_fence->sync_checkpoint); + if (unlikely(srv_err != PVRSRV_OK)) + goto err_free_fence; + + INIT_LIST_HEAD(&pvr_fence->fence_head); + INIT_LIST_HEAD(&pvr_fence->signal_head); + pvr_fence->fctx = fctx; + seqno = pvr_fence_context_seqno_next(fctx); + /* Add the seqno to the fence name for easier debugging */ + pvr_fence_prepare_name(pvr_fence->name, sizeof(pvr_fence->name), + name, seqno); + + /* Reset cb to zero */ + memset(&pvr_fence->cb, 0, sizeof(pvr_fence->cb)); + pvr_fence->fence = &pvr_fence->base; + + dma_fence_init(&pvr_fence->base, &pvr_fence_ops, &fctx->lock, + fctx->fence_context, seqno); + + spin_lock_irqsave(&fctx->list_lock, flags); + list_add_tail(&pvr_fence->fence_head, &fctx->fence_list); + spin_unlock_irqrestore(&fctx->list_lock, flags); + + kref_get(&fctx->kref); + + PVR_FENCE_TRACE(&pvr_fence->base, "created fence (%s)\n", name); + trace_pvr_fence_create(pvr_fence); + + return pvr_fence; + +err_free_fence: + kmem_cache_free(pvr_fence_cache, pvr_fence); +err_module_put: + module_put(THIS_MODULE); +err_exit: + return NULL; +} + +static const char * +pvr_fence_foreign_get_driver_name(struct dma_fence *fence) +{ + return PVR_LDM_DRIVER_REGISTRATION_NAME; +} + +static const char * +pvr_fence_foreign_get_timeline_name(struct dma_fence *fence) +{ + return "foreign"; +} + +static +void pvr_fence_foreign_fence_value_str(struct dma_fence *fence, char *str, + int size) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + u32 sync_addr = 0; + u32 sync_value_next; + + if (WARN_ON(!pvr_fence)) + return; + + sync_addr = SyncCheckpointGetFirmwareAddr(pvr_fence->sync_checkpoint); + sync_value_next = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + + /* + * Include the fence flag bits from the foreign fence instead of our + * shadow copy. This is done as the shadow fence flag bits aren't used. + */ + snprintf(str, size, + "%llu: (%s%s) refs=%u fwaddr=%#08x cur=%#08x nxt=%#08x %s", + (u64) fence->seqno, + test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &pvr_fence->fence->flags) ? "+" : "-", + test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &pvr_fence->fence->flags) ? "+" : "-", + refcount_read(&fence->refcount.refcount), + sync_addr, + pvr_fence_sync_value(pvr_fence), + sync_value_next, + pvr_fence->name); +} + +static +void pvr_fence_foreign_timeline_value_str(struct dma_fence *fence, char *str, + int size) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + + if (pvr_fence) + pvr_context_value_str(pvr_fence->fctx, str, size); +} + +static bool +pvr_fence_foreign_enable_signaling(struct dma_fence *fence) +{ + WARN_ON("cannot enable signalling on foreign fence"); + return false; +} + +static signed long +pvr_fence_foreign_wait(struct dma_fence *fence, bool intr, signed long timeout) +{ + WARN_ON("cannot wait on foreign fence"); + return 0; +} + +static void +pvr_fence_foreign_release(struct dma_fence *fence) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + unsigned long flags; + + if (pvr_fence) { + struct pvr_fence_context *fctx = pvr_fence->fctx; + struct dma_fence *foreign_fence = pvr_fence->fence; + + PVR_FENCE_TRACE(&pvr_fence->base, + "released fence for foreign fence %llu#%d (%s)\n", + (u64) pvr_fence->fence->context, + pvr_fence->fence->seqno, pvr_fence->name); + trace_pvr_fence_foreign_release(pvr_fence); + + spin_lock_irqsave(&fctx->list_lock, flags); + list_move(&pvr_fence->fence_head, + &fctx->deferred_free_list); + spin_unlock_irqrestore(&fctx->list_lock, flags); + + dma_fence_put(foreign_fence); + + kref_put(&fctx->kref, + pvr_fence_context_destroy_kref); + } +} + +const struct dma_fence_ops pvr_fence_foreign_ops = { + .get_driver_name = pvr_fence_foreign_get_driver_name, + .get_timeline_name = pvr_fence_foreign_get_timeline_name, + .fence_value_str = pvr_fence_foreign_fence_value_str, + .timeline_value_str = pvr_fence_foreign_timeline_value_str, + .enable_signaling = pvr_fence_foreign_enable_signaling, + .wait = pvr_fence_foreign_wait, + .release = pvr_fence_foreign_release, +}; + +static void +pvr_fence_foreign_signal_sync(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct pvr_fence *pvr_fence = container_of(cb, struct pvr_fence, cb); + struct pvr_fence_context *fctx = pvr_fence->fctx; + + WARN_ON_ONCE(is_pvr_fence(fence)); + + /* Callback registered by dma_fence_add_callback can be called from an atomic ctx */ + pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_CTX_ATOMIC); + + trace_pvr_fence_foreign_signal(pvr_fence); + + queue_work(fctx->fence_wq, &fctx->check_status_work); + + PVR_FENCE_TRACE(&pvr_fence->base, + "foreign fence %llu#%d signalled (%s)\n", + (u64) pvr_fence->fence->context, + pvr_fence->fence->seqno, pvr_fence->name); + + /* Drop the reference on the base fence */ + dma_fence_put(&pvr_fence->base); +} + +/** + * pvr_fence_create_from_fence - creates a PVR fence from a fence + * @fctx: PVR fence context on which the PVR fence should be created + * @sync_checkpoint_ctx: context in which to create sync checkpoints + * @fence: fence from which the PVR fence should be created + * @fence_fd: fd for the sync file to which the fence belongs. If it doesn't + * belong to a sync file then PVRSRV_NO_FENCE should be given + * instead. + * @name: PVR fence name (used for debugging) + * + * Creates a PVR fence from an existing fence. If the fence is a foreign fence, + * i.e. one that doesn't originate from a PVR fence context, then a new PVR + * fence will be created using the specified sync_checkpoint_context. + * Otherwise, a reference will be taken on the underlying fence and the PVR + * fence will be returned. + * + * Once the fence is finished with, pvr_fence_destroy should be called. + * + * Returns NULL if a PVR fence cannot be created. + */ + +struct pvr_fence * +pvr_fence_create_from_fence(struct pvr_fence_context *fctx, + struct _SYNC_CHECKPOINT_CONTEXT *sync_checkpoint_ctx, + struct dma_fence *fence, + PVRSRV_FENCE fence_fd, + const char *name) +{ + struct pvr_fence *pvr_fence = to_pvr_fence(fence); + unsigned int seqno; + unsigned long flags; + PVRSRV_ERROR srv_err; + int err; + + if (pvr_fence) { + if (WARN_ON(fence->ops == &pvr_fence_foreign_ops)) + return NULL; + dma_fence_get(fence); + + PVR_FENCE_TRACE(fence, "created fence from PVR fence (%s)\n", + name); + return pvr_fence; + } + + if (!try_module_get(THIS_MODULE)) + goto err_exit; + + /* Note: As kmem_cache is used to allocate pvr_fence objects, + * make sure that all members of pvr_fence struct are initialized + * here + */ + pvr_fence = kmem_cache_alloc(pvr_fence_cache, GFP_KERNEL); + if (!pvr_fence) + goto err_module_put; + + srv_err = SyncCheckpointAlloc(sync_checkpoint_ctx, + SYNC_CHECKPOINT_FOREIGN_CHECKPOINT, + fence_fd, + name, &pvr_fence->sync_checkpoint); + if (srv_err != PVRSRV_OK) + goto err_free_pvr_fence; + + INIT_LIST_HEAD(&pvr_fence->fence_head); + INIT_LIST_HEAD(&pvr_fence->signal_head); + pvr_fence->fctx = fctx; + pvr_fence->fence = dma_fence_get(fence); + seqno = pvr_fence_context_seqno_next(fctx); + /* Add the seqno to the fence name for easier debugging */ + pvr_fence_prepare_name(pvr_fence->name, sizeof(pvr_fence->name), + name, seqno); + + /* + * We use the base fence to refcount the PVR fence and to do the + * necessary clean up once the refcount drops to 0. + */ + dma_fence_init(&pvr_fence->base, &pvr_fence_foreign_ops, &fctx->lock, + fctx->fence_context, seqno); + + /* + * Take an extra reference on the base fence that gets dropped when the + * foreign fence is signalled. + */ + dma_fence_get(&pvr_fence->base); + + spin_lock_irqsave(&fctx->list_lock, flags); + list_add_tail(&pvr_fence->fence_head, &fctx->fence_list); + spin_unlock_irqrestore(&fctx->list_lock, flags); + kref_get(&fctx->kref); + + PVR_FENCE_TRACE(&pvr_fence->base, + "created fence from foreign fence %llu#%d (%s)\n", + (u64) pvr_fence->fence->context, + pvr_fence->fence->seqno, name); + + err = dma_fence_add_callback(fence, &pvr_fence->cb, + pvr_fence_foreign_signal_sync); + if (err) { + if (err != -ENOENT) { + pr_err("%s: failed to add fence callback (err=%d)", + __func__, err); + goto err_put_ref; + } + + /* + * The fence has already signalled so set the sync as signalled. + * The "signalled" hwperf packet should be emitted because the + * callback won't be called for already signalled fence hence, + * PVRSRV_FENCE_FLAG_NONE flag. + */ + pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_NONE); + PVR_FENCE_TRACE(&pvr_fence->base, + "foreign fence %llu#%d already signaled (%s)\n", + (u64) pvr_fence->fence->context, + pvr_fence->fence->seqno, + name); + dma_fence_put(&pvr_fence->base); + } + + trace_pvr_fence_foreign_create(pvr_fence); + + return pvr_fence; + +err_put_ref: + kref_put(&fctx->kref, pvr_fence_context_destroy_kref); + spin_lock_irqsave(&fctx->list_lock, flags); + list_del(&pvr_fence->fence_head); + spin_unlock_irqrestore(&fctx->list_lock, flags); + SyncCheckpointFree(pvr_fence->sync_checkpoint); +err_free_pvr_fence: + kmem_cache_free(pvr_fence_cache, pvr_fence); +err_module_put: + module_put(THIS_MODULE); +err_exit: + return NULL; +} + +/** + * pvr_fence_destroy - destroys a PVR fence + * @pvr_fence: PVR fence to destroy + * + * Destroys a PVR fence. Upon return, the PVR fence may still exist if something + * else still references the underlying fence, e.g. a reservation object, or if + * software signalling has been enabled and the fence hasn't yet been signalled. + */ +void +pvr_fence_destroy(struct pvr_fence *pvr_fence) +{ + PVR_FENCE_TRACE(&pvr_fence->base, "destroyed fence (%s)\n", + pvr_fence->name); + + dma_fence_put(&pvr_fence->base); +} + +/** + * pvr_fence_sw_signal - signals a PVR fence sync + * @pvr_fence: PVR fence to signal + * + * Sets the PVR fence sync value to signalled. + * + * Returns -EINVAL if the PVR fence represents a foreign fence. + */ +int +pvr_fence_sw_signal(struct pvr_fence *pvr_fence) +{ + if (!is_our_fence(pvr_fence->fctx, &pvr_fence->base)) + return -EINVAL; + + pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_NONE); + + queue_work(pvr_fence->fctx->fence_wq, + &pvr_fence->fctx->check_status_work); + + PVR_FENCE_TRACE(&pvr_fence->base, "sw set fence sync signalled (%s)\n", + pvr_fence->name); + + return 0; +} + +/** + * pvr_fence_sw_error - errors the sync checkpoint backing a PVR fence + * @pvr_fence: PVR fence to error + * + * Sets the PVR fence sync checkpoint value to errored. + * + * Returns -EINVAL if the PVR fence represents a foreign fence. + */ +int +pvr_fence_sw_error(struct pvr_fence *pvr_fence) +{ + if (!is_our_fence(pvr_fence->fctx, &pvr_fence->base)) + return -EINVAL; + + SyncCheckpointError(pvr_fence->sync_checkpoint, PVRSRV_FENCE_FLAG_NONE); + PVR_FENCE_TRACE(&pvr_fence->base, "sw set fence sync errored (%s)\n", + pvr_fence->name); + + return 0; +} + +int +pvr_fence_get_checkpoints(struct pvr_fence **pvr_fences, u32 nr_fences, + struct _SYNC_CHECKPOINT **fence_checkpoints) +{ + struct _SYNC_CHECKPOINT **next_fence_checkpoint = fence_checkpoints; + struct pvr_fence **next_pvr_fence = pvr_fences; + int fence_checkpoint_idx; + + if (nr_fences > 0) { + + for (fence_checkpoint_idx = 0; fence_checkpoint_idx < nr_fences; + fence_checkpoint_idx++) { + struct pvr_fence *next_fence = *next_pvr_fence++; + *next_fence_checkpoint++ = next_fence->sync_checkpoint; + /* Take reference on sync checkpoint (will be dropped + * later by kick code) + */ + SyncCheckpointTakeRef(next_fence->sync_checkpoint); + } + } + + return 0; +} + +struct _SYNC_CHECKPOINT * +pvr_fence_get_checkpoint(struct pvr_fence *update_fence) +{ + return update_fence->sync_checkpoint; +} + +/** + * pvr_fence_dump_info_on_stalled_ufos - displays debug + * information on a native fence associated with any of + * the ufos provided. This function will be called from + * pvr_sync_file.c if the driver determines any GPU work + * is stuck waiting for a sync checkpoint representing a + * foreign sync to be signalled. + * @nr_ufos: number of ufos in vaddrs + * @vaddrs: array of FW addresses of UFOs which the + * driver is waiting on. + * + * Output debug information to kernel log on linux fences + * which would be responsible for signalling the sync + * checkpoints indicated by the ufo vaddresses. + * + * Returns the number of ufos in the array which were found + * to be associated with foreign syncs. + */ +u32 pvr_fence_dump_info_on_stalled_ufos(struct pvr_fence_context *fctx, + u32 nr_ufos, u32 *vaddrs) +{ + int our_ufo_ct = 0; + struct pvr_fence *pvr_fence; + unsigned long flags; + + spin_lock_irqsave(&fctx->list_lock, flags); + /* dump info on any ufos in our active list */ + list_for_each_entry(pvr_fence, &fctx->fence_list, fence_head) { + u32 *this_ufo_vaddr = vaddrs; + int ufo_num; + DUMPDEBUG_PRINTF_FUNC *pfnDummy = NULL; + + for (ufo_num = 0; ufo_num < nr_ufos; ufo_num++) { + struct _SYNC_CHECKPOINT *checkpoint = + pvr_fence->sync_checkpoint; + const u32 fence_ufo_addr = + SyncCheckpointGetFirmwareAddr(checkpoint); + + if (fence_ufo_addr != this_ufo_vaddr[ufo_num]) + continue; + + /* Dump sync info */ + PVR_DUMPDEBUG_LOG(pfnDummy, NULL, + "\tSyncID = %d, FWAddr = 0x%08x: TLID = %d (Foreign Fence - [%p] %s)", + SyncCheckpointGetId(checkpoint), + fence_ufo_addr, + SyncCheckpointGetTimeline(checkpoint), + pvr_fence->fence, + pvr_fence->name); + our_ufo_ct++; + } + } + spin_unlock_irqrestore(&fctx->list_lock, flags); + return our_ufo_ct; +} diff --git a/drivers/gpu/drm/phytium/octopus/pvr_fence.h b/drivers/gpu/drm/phytium/octopus/pvr_fence.h new file mode 100644 index 000000000000..1efceed11bc9 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_fence.h @@ -0,0 +1,233 @@ +/* + * @File + * @Title PhytiumVR Linux fence interface + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__PVR_FENCE_H__) +#define __PVR_FENCE_H__ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) +static inline void pvr_fence_cleanup(void) +{ +} +#else +#include "services_kernel_client.h" +#include "pvr_linux_fence.h" +#include +#include +#include + +struct _SYNC_CHECKPOINT_CONTEXT; +struct _SYNC_CHECKPOINT; + +/** + * pvr_fence_context - PVR fence context used to create and manage PVR fences + * @lock: protects the context and fences created on the context + * @name: fence context name (used for debugging) + * @dbg_request_handle: handle for callback used to dump debug data + * @fence_context: fence context with which to associate fences + * @fence_seqno: sequence number to use for the next fence + * @fence_wq: work queue for signalled fence work + * @check_status_work: work item used to inform services when a foreign fence + * has signalled + * @cmd_complete_handle: handle for callback used to signal fences when fence + * syncs are met + * @list_lock: protects the active and active foreign lists + * @signal_list: list of fences waiting to be signalled + * @fence_list: list of fences (used for debugging) + * @deferred_free_list: list of fences that we will free when we are no longer + * holding spinlocks. The frees get implemented when an update fence is + * signalled or the context is freed. + */ +struct pvr_fence_context { + spinlock_t lock; + char name[32]; + void *dbg_request_handle; + u64 fence_context; + atomic_t fence_seqno; + + struct workqueue_struct *fence_wq; + struct work_struct check_status_work; + + void *cmd_complete_handle; + + spinlock_t list_lock; + struct list_head signal_list; + struct list_head fence_list; + struct list_head deferred_free_list; + + struct kref kref; + struct work_struct destroy_work; +}; + +/** + * pvr_fence - PVR fence that represents both native and foreign fences + * @base: fence structure + * @fctx: fence context on which this fence was created + * @name: fence name (used for debugging) + * @fence: pointer to base fence structure or foreign fence + * @sync_checkpoint: services sync checkpoint used by hardware + * @fence_head: entry on the context fence and deferred free list + * @signal_head: entry on the context signal list + * @cb: foreign fence callback to set the sync to signalled + */ +struct pvr_fence { + struct dma_fence base; + struct pvr_fence_context *fctx; + char name[32]; + + struct dma_fence *fence; + struct _SYNC_CHECKPOINT *sync_checkpoint; + + struct list_head fence_head; + struct list_head signal_head; + struct dma_fence_cb cb; + struct rcu_head rcu; +}; + +extern const struct dma_fence_ops pvr_fence_ops; +extern const struct dma_fence_ops pvr_fence_foreign_ops; + +static inline bool is_our_fence(struct pvr_fence_context *fctx, + struct dma_fence *fence) +{ + return (fence->context == fctx->fence_context); +} + +static inline bool is_pvr_fence(struct dma_fence *fence) +{ + return ((fence->ops == &pvr_fence_ops) || + (fence->ops == &pvr_fence_foreign_ops)); +} + +static inline struct pvr_fence *to_pvr_fence(struct dma_fence *fence) +{ + if (is_pvr_fence(fence)) + return container_of(fence, struct pvr_fence, base); + + return NULL; +} + +struct pvr_fence_context * +pvr_fence_context_create(void *dev_cookie, + struct workqueue_struct *fence_status_wq, + const char *name); +void pvr_fence_context_destroy(struct pvr_fence_context *fctx); +void pvr_context_value_str(struct pvr_fence_context *fctx, char *str, int size); + +struct pvr_fence * +pvr_fence_create(struct pvr_fence_context *fctx, + struct _SYNC_CHECKPOINT_CONTEXT *sync_checkpoint_ctx, + int timeline_fd, const char *name); +struct pvr_fence * +pvr_fence_create_from_fence(struct pvr_fence_context *fctx, + struct _SYNC_CHECKPOINT_CONTEXT *sync_checkpoint_ctx, + struct dma_fence *fence, + PVRSRV_FENCE fence_fd, + const char *name); +void pvr_fence_destroy(struct pvr_fence *pvr_fence); +int pvr_fence_sw_signal(struct pvr_fence *pvr_fence); +int pvr_fence_sw_error(struct pvr_fence *pvr_fence); + +int pvr_fence_get_checkpoints(struct pvr_fence **pvr_fences, u32 nr_fences, + struct _SYNC_CHECKPOINT **fence_checkpoints); +struct _SYNC_CHECKPOINT * +pvr_fence_get_checkpoint(struct pvr_fence *update_fence); + +void pvr_fence_context_signal_fences_nohw(void *data); + +void pvr_fence_context_free_deferred_callback(void *data); + +u32 pvr_fence_dump_info_on_stalled_ufos(struct pvr_fence_context *fctx, + u32 nr_ufos, + u32 *vaddrs); + +static inline void pvr_fence_cleanup(void) +{ + /* + * Ensure all PVR fence contexts have been destroyed, by flushing + * the global workqueue. + */ + flush_scheduled_work(); +} + +#if defined(PVR_FENCE_DEBUG) +#define PVR_FENCE_CTX_TRACE(c, fmt, ...) \ + do { \ + struct pvr_fence_context *__fctx = (c); \ + pr_err("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \ + ## __VA_ARGS__); \ + } while (0) +#else +#define PVR_FENCE_CTX_TRACE(c, fmt, ...) +#endif + +#define PVR_FENCE_CTX_WARN(c, fmt, ...) \ + do { \ + struct pvr_fence_context *__fctx = (c); \ + pr_warn("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \ + ## __VA_ARGS__); \ + } while (0) + +#define PVR_FENCE_CTX_ERR(c, fmt, ...) \ + do { \ + struct pvr_fence_context *__fctx = (c); \ + pr_err("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \ + ## __VA_ARGS__); \ + } while (0) + +#if defined(PVR_FENCE_DEBUG) +#define PVR_FENCE_TRACE(f, fmt, ...) \ + DMA_FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__) +#else +#define PVR_FENCE_TRACE(f, fmt, ...) +#endif + +#define PVR_FENCE_WARN(f, fmt, ...) \ + DMA_FENCE_WARN(f, "(PVR) " fmt, ## __VA_ARGS__) + +#define PVR_FENCE_ERR(f, fmt, ...) \ + DMA_FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */ +#endif /* !defined(__PVR_FENCE_H__) */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_fence_trace.h b/drivers/gpu/drm/phytium/octopus/pvr_fence_trace.h new file mode 100644 index 000000000000..54916825a9f5 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_fence_trace.h @@ -0,0 +1,225 @@ +/* + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM pvr_fence + +#if !defined(_TRACE_PVR_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PVR_FENCE_H + +#include + +struct pvr_fence; +struct pvr_fence_context; + +DECLARE_EVENT_CLASS(pvr_fence_context, + + TP_PROTO(struct pvr_fence_context *fctx), + TP_ARGS(fctx), + + TP_STRUCT__entry( + __string(name, fctx->name) + __array(char, val, 128) + ), + + TP_fast_assign( + __assign_str(name, fctx->name) + pvr_context_value_str(fctx, __entry->val, + sizeof(__entry->val)); + ), + + TP_printk("name=%s val=%s", + __get_str(name), + __entry->val + ) +); + +DEFINE_EVENT(pvr_fence_context, pvr_fence_context_create, + TP_PROTO(struct pvr_fence_context *fctx), + TP_ARGS(fctx) +); + +DEFINE_EVENT(pvr_fence_context, pvr_fence_context_destroy, + TP_PROTO(struct pvr_fence_context *fctx), + TP_ARGS(fctx) +); + +DEFINE_EVENT(pvr_fence_context, pvr_fence_context_destroy_kref, + TP_PROTO(struct pvr_fence_context *fctx), + TP_ARGS(fctx) +); + +DEFINE_EVENT(pvr_fence_context, pvr_fence_context_signal_fences, + TP_PROTO(struct pvr_fence_context *fctx), + TP_ARGS(fctx) +); + +DECLARE_EVENT_CLASS(pvr_fence, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence), + + TP_STRUCT__entry( + __string(driver, + fence->base.ops->get_driver_name(&fence->base)) + __string(timeline, + fence->base.ops->get_timeline_name(&fence->base)) + __array(char, val, 128) + __field(u64, context) + ), + + TP_fast_assign( + __assign_str(driver, + fence->base.ops->get_driver_name(&fence->base)) + __assign_str(timeline, + fence->base.ops->get_timeline_name(&fence->base)) + fence->base.ops->fence_value_str(&fence->base, + __entry->val, sizeof(__entry->val)); + __entry->context = fence->base.context; + ), + + TP_printk("driver=%s timeline=%s ctx=%llu val=%s", + __get_str(driver), __get_str(timeline), + __entry->context, __entry->val + ) +); + +DEFINE_EVENT(pvr_fence, pvr_fence_create, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence) +); + +DEFINE_EVENT(pvr_fence, pvr_fence_release, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence) +); + +DEFINE_EVENT(pvr_fence, pvr_fence_enable_signaling, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence) +); + +DEFINE_EVENT(pvr_fence, pvr_fence_signal_fence, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence) +); + +DECLARE_EVENT_CLASS(pvr_fence_foreign, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence), + + TP_STRUCT__entry( + __string(driver, + fence->base.ops->get_driver_name(&fence->base)) + __string(timeline, + fence->base.ops->get_timeline_name(&fence->base)) + __array(char, val, 128) + __field(u64, context) + __string(foreign_driver, + fence->fence->ops->get_driver_name ? + fence->fence->ops->get_driver_name(fence->fence) : + "unknown") + __string(foreign_timeline, + fence->fence->ops->get_timeline_name ? + fence->fence->ops->get_timeline_name(fence->fence) : + "unknown") + __array(char, foreign_val, 128) + __field(u64, foreign_context) + ), + + TP_fast_assign( + __assign_str(driver, + fence->base.ops->get_driver_name(&fence->base)) + __assign_str(timeline, + fence->base.ops->get_timeline_name(&fence->base)) + fence->base.ops->fence_value_str(&fence->base, __entry->val, + sizeof(__entry->val)); + __entry->context = fence->base.context; + __assign_str(foreign_driver, + fence->fence->ops->get_driver_name ? + fence->fence->ops->get_driver_name(fence->fence) : + "unknown") + __assign_str(foreign_timeline, + fence->fence->ops->get_timeline_name ? + fence->fence->ops->get_timeline_name(fence->fence) : + "unknown") + fence->fence->ops->fence_value_str ? + fence->fence->ops->fence_value_str( + fence->fence, __entry->foreign_val, + sizeof(__entry->foreign_val)) : + (void) strlcpy(__entry->foreign_val, + "unknown", sizeof(__entry->foreign_val)); + __entry->foreign_context = fence->fence->context; + ), + + TP_printk("driver=%s timeline=%s ctx=%llu val=%s foreign: driver=%s timeline=%s ctx=%llu val=%s", + __get_str(driver), __get_str(timeline), __entry->context, + __entry->val, __get_str(foreign_driver), + __get_str(foreign_timeline), __entry->foreign_context, + __entry->foreign_val + ) +); + +DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_create, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence) +); + +DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_release, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence) +); + +DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_signal, + TP_PROTO(struct pvr_fence *fence), + TP_ARGS(fence) +); + +#endif /* _TRACE_PVR_FENCE_H */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . + +/* This is needed because the name of this file doesn't match TRACE_SYSTEM. */ +#define TRACE_INCLUDE_FILE pvr_fence_trace + +/* This part must be outside protection */ +#include diff --git a/drivers/gpu/drm/phytium/octopus/pvr_gputrace.c b/drivers/gpu/drm/phytium/octopus/pvr_gputrace.c new file mode 100644 index 000000000000..c3da939341ac --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_gputrace.c @@ -0,0 +1,1244 @@ +/*************************************************************************/ /*! +@File pvr_gputrace.c +@Title PVR GPU Trace module Linux implementation +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) +#include +#else +#include +#endif + +#include "pvrsrv_error.h" +#include "pvrsrv_apphint.h" +#include "pvr_debug.h" +#include "ospvr_gputrace.h" +#include "rgxhwperf.h" +#include "rgxtimecorr.h" +#include "device.h" +#include "trace_events.h" +#include "pvrsrv.h" +#include "pvrsrv_tlstreams.h" +#include "tlclient.h" +#include "pvr_debug.h" +#define CREATE_TRACE_POINTS +#include "rogue_trace_events.h" + +/****************************************************************************** + Module internal implementation +******************************************************************************/ + +typedef enum { + PVR_GPUTRACE_SWITCH_TYPE_UNDEF = 0, + + PVR_GPUTRACE_SWITCH_TYPE_BEGIN = 1, + PVR_GPUTRACE_SWITCH_TYPE_END = 2, + PVR_GPUTRACE_SWITCH_TYPE_SINGLE = 3 +} PVR_GPUTRACE_SWITCH_TYPE; + +typedef struct RGX_HWPERF_FTRACE_DATA { + /* This lock ensures the HWPerf TL stream reading resources are not destroyed + * by one thread disabling it while another is reading from it. Keeps the + * state and resource create/destroy atomic and consistent. */ + POS_LOCK hFTraceResourceLock; + + IMG_HANDLE hGPUTraceCmdCompleteHandle; + IMG_HANDLE hGPUTraceTLStream; + IMG_UINT64 ui64LastSampledTimeCorrOSTimeStamp; + IMG_UINT32 ui32FTraceLastOrdinal; +} RGX_HWPERF_FTRACE_DATA; + +/* This lock ensures state change of GPU_TRACING on/off is done atomically */ +static POS_LOCK ghGPUTraceStateLock; +static IMG_BOOL gbFTraceGPUEventsEnabled = PVRSRV_APPHINT_ENABLEFTRACEGPU; + +/* Saved value of the clock source before the trace was enabled. We're keeping + * it here so that we know which clock should be selected after we disable the + * gpu ftrace. */ +#if defined(SUPPORT_RGX) +static RGXTIMECORR_CLOCK_TYPE geLastTimeCorrClock = PVRSRV_APPHINT_TIMECORRCLOCK; +#endif + +/* This lock ensures that the reference counting operation on the FTrace UFO + * events and enable/disable operation on firmware event are performed as + * one atomic operation. This should ensure that there are no race conditions + * between reference counting and firmware event state change. + * See below comment for guiUfoEventRef. + */ +static POS_LOCK ghLockFTraceEventLock; + +/* Multiple FTrace UFO events are reflected in the firmware as only one event. When + * we enable FTrace UFO event we want to also at the same time enable it in + * the firmware. Since there is a multiple-to-one relation between those events + * we count how many FTrace UFO events is enabled. If at least one event is + * enabled we enabled the firmware event. When all FTrace UFO events are disabled + * we disable firmware event. */ +static IMG_UINT guiUfoEventRef; + +/****************************************************************************** + Module In-bound API +******************************************************************************/ + +static PVRSRV_ERROR _GpuTraceDisable( + PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_BOOL bDeInit); + +static void _GpuTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE); + +PVRSRV_ERROR PVRGpuTraceSupportInit(void) +{ + PVRSRV_ERROR eError; + + if (ghLockFTraceEventLock != NULL) + { + PVR_DPF((PVR_DBG_ERROR, "FTrace Support is already initialized")); + return PVRSRV_OK; + } + + /* common module params initialization */ + eError = OSLockCreate(&ghLockFTraceEventLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); + + eError = OSLockCreate(&ghGPUTraceStateLock); + PVR_LOG_RETURN_IF_ERROR (eError, "OSLockCreate"); + + return PVRSRV_OK; +} + +void PVRGpuTraceSupportDeInit(void) +{ + if (ghGPUTraceStateLock) + { + OSLockDestroy(ghGPUTraceStateLock); + } + + if (ghLockFTraceEventLock) + { + OSLockDestroy(ghLockFTraceEventLock); + ghLockFTraceEventLock = NULL; + } +} + +PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + RGX_HWPERF_FTRACE_DATA *psData; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + psData = OSAllocZMem(sizeof(RGX_HWPERF_FTRACE_DATA)); + psDevInfo->pvGpuFtraceData = psData; + PVR_LOG_GOTO_IF_NOMEM(psData, eError, e0); + + /* We initialise it only once because we want to track if any + * packets were dropped. */ + psData->ui32FTraceLastOrdinal = IMG_UINT32_MAX - 1; + + eError = OSLockCreate(&psData->hFTraceResourceLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); + + return PVRSRV_OK; + +e0: + PVRGpuTraceDeInitDevice(psDeviceNode); + return eError; +} + +void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_HWPERF_FTRACE_DATA *psData = psDevInfo->pvGpuFtraceData; + + PVRSRV_VZ_RETN_IF_MODE(GUEST); + if (psData) + { + /* first disable the tracing, to free up TL resources */ + if (psData->hFTraceResourceLock) + { + OSLockAcquire(psData->hFTraceResourceLock); + _GpuTraceDisable(psDeviceNode->pvDevice, IMG_TRUE); + OSLockRelease(psData->hFTraceResourceLock); + + /* now free all the FTrace resources */ + OSLockDestroy(psData->hFTraceResourceLock); + } + OSFreeMem(psData); + psDevInfo->pvGpuFtraceData = NULL; + } +} + +IMG_BOOL PVRGpuTraceIsEnabled(void) +{ + return gbFTraceGPUEventsEnabled; +} + +void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + if (PVRGpuTraceIsEnabled()) + { + PVRSRV_ERROR eError = PVRGpuTraceSetEnabled(psDeviceNode, IMG_TRUE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to initialise GPU event tracing" + " (%s)", PVRSRVGetErrorString(eError))); + } + + /* below functions will enable FTrace events which in turn will + * execute HWPerf callbacks that set appropriate filter values + * note: unfortunately the functions don't allow to pass private + * data so they enable events for all of the devices + * at once, which means that this can happen more than once + * if there is more than one device */ + + /* single events can be enabled by calling trace_set_clr_event() + * with the event name, e.g.: + * trace_set_clr_event("rogue", "rogue_ufo_update", 1) */ +#if defined(CONFIG_EVENT_TRACING) /* this is a kernel config option */ +#if defined(ANDROID) || defined(CHROMIUMOS_KERNEL) + if (trace_set_clr_event("gpu", NULL, 1)) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"gpu\" event" + " group")); + } + else + { + PVR_LOG(("FTrace events from \"gpu\" group enabled")); + } +#endif /* defined(ANDROID) || defined(CHROMIUMOS_KERNEL) */ + if (trace_set_clr_event("rogue", NULL, 1)) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"rogue\" event" + " group")); + } + else + { + PVR_LOG(("FTrace events from \"rogue\" group enabled")); + } +#endif /* defined(CONFIG_EVENT_TRACING) */ + } +} + +/* Caller must now hold hFTraceResourceLock before calling this method. + */ +static PVRSRV_ERROR _GpuTraceEnable(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGX_HWPERF_FTRACE_DATA *psFtraceData; + PVRSRV_DEVICE_NODE *psRgxDevNode = psRgxDevInfo->psDeviceNode; + IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psRgxDevInfo); + + psFtraceData = psRgxDevInfo->pvGpuFtraceData; + + PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock)); + + /* return if already enabled */ + if (psFtraceData->hGPUTraceTLStream) + { + return PVRSRV_OK; + } + +#if defined(SUPPORT_RGX) + /* Signal FW to enable event generation */ + if (psRgxDevInfo->bFirmwareInitialised) + { + IMG_UINT64 ui64UFOFilter = psRgxDevInfo->ui64HWPerfFilter & + (RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO); + + eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode, + RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE, + RGX_HWPERF_EVENT_MASK_HW_KICKFINISH | + ui64UFOFilter); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM", err_out); + } + else +#endif + { + /* only set filter and exit */ + psRgxDevInfo->ui64HWPerfFilter = RGX_HWPERF_EVENT_MASK_HW_KICKFINISH | + ((RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO) & + psRgxDevInfo->ui64HWPerfFilter); + + PVR_DPF((PVR_DBG_WARNING, + "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")", + psRgxDevInfo->ui64HWPerfFilter)); + + return PVRSRV_OK; + } + + /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */ + if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d", + PVRSRV_TL_HWPERF_RGX_FW_STREAM, psRgxDevNode->sDevId.i32OsDeviceID) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf stream name for device %d", + __func__, + psRgxDevNode->sDevId.i32OsDeviceID)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Open the TL Stream for HWPerf data consumption */ + eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, + pszHWPerfStreamName, + PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING, + &psFtraceData->hGPUTraceTLStream); + PVR_LOG_GOTO_IF_ERROR(eError, "TLClientOpenStream", err_out); + +#if defined(SUPPORT_RGX) + if (RGXTimeCorrGetClockSource() != RGXTIMECORR_CLOCK_SCHED) + { + /* Set clock source for timer correlation data to sched_clock */ + geLastTimeCorrClock = RGXTimeCorrGetClockSource(); + RGXTimeCorrSetClockSource(psRgxDevNode, RGXTIMECORR_CLOCK_SCHED); + } +#endif + + /* Reset the OS timestamp coming from the timer correlation data + * associated with the latest HWPerf event we processed. + */ + psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = 0; + + /* Register a notifier to collect HWPerf data whenever the HW completes + * an operation. + */ + eError = PVRSRVRegisterCmdCompleteNotify( + &psFtraceData->hGPUTraceCmdCompleteHandle, + &_GpuTraceCmdCompleteNotify, + psRgxDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterCmdCompleteNotify", err_close_stream); + +err_out: + PVR_DPF_RETURN_RC(eError); + +err_close_stream: + TLClientCloseStream(DIRECT_BRIDGE_HANDLE, + psFtraceData->hGPUTraceTLStream); + psFtraceData->hGPUTraceTLStream = NULL; + goto err_out; +} + +/* Caller must now hold hFTraceResourceLock before calling this method. + */ +static PVRSRV_ERROR _GpuTraceDisable(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_BOOL bDeInit) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGX_HWPERF_FTRACE_DATA *psFtraceData; +#if defined(SUPPORT_RGX) + PVRSRV_DEVICE_NODE *psRgxDevNode = psRgxDevInfo->psDeviceNode; +#endif + + PVR_DPF_ENTERED; + + PVR_ASSERT(psRgxDevInfo); + + psFtraceData = psRgxDevInfo->pvGpuFtraceData; + + PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock)); + + /* if FW is not yet initialised, just set filter and exit */ + if (!psRgxDevInfo->bFirmwareInitialised) + { + psRgxDevInfo->ui64HWPerfFilter = RGX_HWPERF_EVENT_MASK_NONE; + PVR_DPF((PVR_DBG_WARNING, + "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")", + psRgxDevInfo->ui64HWPerfFilter)); + + return PVRSRV_OK; + } + + if (NULL == psFtraceData->hGPUTraceTLStream) + { + /* Tracing already disabled, just return */ + return PVRSRV_OK; + } + +#if defined(SUPPORT_RGX) + if (!bDeInit) + { + eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode, + RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE, + (RGX_HWPERF_EVENT_MASK_NONE)); + PVR_LOG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM"); + } +#endif + + if (psFtraceData->hGPUTraceCmdCompleteHandle) + { + /* Tracing is being turned off. Unregister the notifier. */ + eError = PVRSRVUnregisterCmdCompleteNotify( + psFtraceData->hGPUTraceCmdCompleteHandle); + PVR_LOG_IF_ERROR(eError, "PVRSRVUnregisterCmdCompleteNotify"); + psFtraceData->hGPUTraceCmdCompleteHandle = NULL; + } + + if (psFtraceData->hGPUTraceTLStream) + { + IMG_PBYTE pbTmp = NULL; + IMG_UINT32 ui32Tmp = 0; + + /* We have to flush both the L1 (FW) and L2 (Host) buffers in case there + * are some events left unprocessed in this FTrace/systrace "session" + * (note that even if we have just disabled HWPerf on the FW some packets + * could have been generated and already copied to L2 by the MISR handler). + * + * With the following calls we will both copy new data to the Host buffer + * (done by the producer callback in TLClientAcquireData) and advance + * the read offset in the buffer to catch up with the latest events. + */ + eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, + psFtraceData->hGPUTraceTLStream, + &pbTmp, &ui32Tmp); + PVR_LOG_IF_ERROR(eError, "TLClientCloseStream"); + + /* Let close stream perform the release data on the outstanding acquired data */ + eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, + psFtraceData->hGPUTraceTLStream); + PVR_LOG_IF_ERROR(eError, "TLClientCloseStream"); + + psFtraceData->hGPUTraceTLStream = NULL; + } + +#if defined(SUPPORT_RGX) + if (geLastTimeCorrClock != RGXTIMECORR_CLOCK_SCHED) + { + RGXTimeCorrSetClockSource(psRgxDevNode, geLastTimeCorrClock); + } +#endif + + PVR_DPF_RETURN_RC(eError); +} + +static PVRSRV_ERROR _GpuTraceSetEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_BOOL bNewValue) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGX_HWPERF_FTRACE_DATA *psFtraceData; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + PVR_DPF_ENTERED; + + PVR_ASSERT(psRgxDevInfo); + psFtraceData = psRgxDevInfo->pvGpuFtraceData; + + /* About to create/destroy FTrace resources, lock critical section + * to avoid HWPerf MISR thread contention. + */ + OSLockAcquire(psFtraceData->hFTraceResourceLock); + + eError = (bNewValue ? _GpuTraceEnable(psRgxDevInfo) + : _GpuTraceDisable(psRgxDevInfo, IMG_FALSE)); + + OSLockRelease(psFtraceData->hFTraceResourceLock); + + PVR_DPF_RETURN_RC(eError); +} + +static PVRSRV_ERROR _GpuTraceSetEnabledForAllDevices(IMG_BOOL bNewValue) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode; + + psDeviceNode = psPVRSRVData->psDeviceNodeList; + /* enable/disable GPU trace on all devices */ + while (psDeviceNode) + { + eError = _GpuTraceSetEnabled(psDeviceNode->pvDevice, bNewValue); + if (eError != PVRSRV_OK) + { + break; + } + psDeviceNode = psDeviceNode->psNext; + } + + PVR_DPF_RETURN_RC(eError); +} + +PVRSRV_ERROR PVRGpuTraceSetEnabled(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bNewValue) +{ + return _GpuTraceSetEnabled(psDeviceNode->pvDevice, bNewValue); +} + +/* ----- HWPerf to FTrace packet processing and events injection ------------ */ + +static const IMG_CHAR *_HWPerfKickTypeToStr(RGX_HWPERF_KICK_TYPE eKickType) +{ + static const IMG_CHAR *aszKickType[RGX_HWPERF_KICK_TYPE_LAST+1] = { +#if defined(HWPERF_PACKET_V2C_SIG) + "TA3D", "CDM", "RS", "SHG", "TQTDM", "SYNC", "LAST" +#else + "TA3D", "TQ2D", "TQ3D", "CDM", "RS", "VRDM", "TQTDM", "SYNC", "LAST" +#endif + }; + + /* cast in case of negative value */ + if (((IMG_UINT32) eKickType) >= RGX_HWPERF_KICK_TYPE_LAST) + { + return ""; + } + + return aszKickType[eKickType]; +} + +void PVRGpuTraceEnqueueEvent( + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32FirmwareCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + RGX_HWPERF_KICK_TYPE eKickType) +{ + const IMG_CHAR *pszKickType = _HWPerfKickTypeToStr(eKickType); + + PVR_DPF((PVR_DBG_MESSAGE, "PVRGpuTraceEnqueueEvent(%s): contextId %u, " + "jobId %u", pszKickType, ui32FirmwareCtx, ui32IntJobRef)); + + if (PVRGpuTraceIsEnabled()) + { + trace_rogue_job_enqueue(ui32FirmwareCtx, ui32IntJobRef, ui32ExtJobRef, + pszKickType); + } +} + +static void _GpuTraceWorkSwitch( + IMG_UINT64 ui64HWTimestampInOSTime, + IMG_UINT32 ui32CtxId, + IMG_UINT32 ui32CtxPriority, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + const IMG_CHAR* pszWorkType, + PVR_GPUTRACE_SWITCH_TYPE eSwType) +{ + PVR_ASSERT(pszWorkType); + trace_rogue_sched_switch(pszWorkType, eSwType, ui64HWTimestampInOSTime, + ui32CtxId, 2-ui32CtxPriority, ui32IntJobRef, ui32ExtJobRef); +} + +static void _GpuTraceUfo( + IMG_UINT64 ui64OSTimestamp, + const RGX_HWPERF_UFO_EV eEvType, + const IMG_UINT32 ui32CtxId, + const IMG_UINT32 ui32ExtJobRef, + const IMG_UINT32 ui32IntJobRef, + const IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData) +{ + switch (eEvType) { + case RGX_HWPERF_UFO_EV_UPDATE: + trace_rogue_ufo_updates(ui64OSTimestamp, ui32CtxId, + ui32ExtJobRef, ui32IntJobRef, ui32UFOCount, puData); + break; + case RGX_HWPERF_UFO_EV_CHECK_SUCCESS: + trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId, + ui32ExtJobRef, ui32IntJobRef, IMG_FALSE, ui32UFOCount, + puData); + break; + case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS: + trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId, + ui32ExtJobRef, ui32IntJobRef, IMG_TRUE, ui32UFOCount, + puData); + break; + case RGX_HWPERF_UFO_EV_CHECK_FAIL: + trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId, + ui32ExtJobRef, ui32IntJobRef, IMG_FALSE, ui32UFOCount, + puData); + break; + case RGX_HWPERF_UFO_EV_PRCHECK_FAIL: + trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId, + ui32ExtJobRef, ui32IntJobRef, IMG_TRUE, ui32UFOCount, + puData); + break; + default: + break; + } +} + +static void _GpuTraceFirmware( + IMG_UINT64 ui64HWTimestampInOSTime, + const IMG_CHAR* pszWorkType, + PVR_GPUTRACE_SWITCH_TYPE eSwType) +{ + trace_rogue_firmware_activity(ui64HWTimestampInOSTime, pszWorkType, eSwType); +} + +static void _GpuTraceEventsLost( + const RGX_HWPERF_STREAM_ID eStreamId, + const IMG_UINT32 ui32LastOrdinal, + const IMG_UINT32 ui32CurrOrdinal) +{ + trace_rogue_events_lost(eStreamId, ui32LastOrdinal, ui32CurrOrdinal); +} + +/* Calculate the OS timestamp given an RGX timestamp in the HWPerf event. */ +static uint64_t CalculateEventTimestamp( + PVRSRV_RGXDEV_INFO *psDevInfo, + uint32_t ui32TimeCorrIndex, + uint64_t ui64EventTimestamp) +{ + RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; + RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData; + RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32TimeCorrIndex]; + uint64_t ui64CRTimeStamp = psTimeCorr->ui64CRTimeStamp; + uint64_t ui64OSTimeStamp = psTimeCorr->ui64OSTimeStamp; + uint64_t ui64CRDeltaToOSDeltaKNs = psTimeCorr->ui64CRDeltaToOSDeltaKNs; + uint64_t ui64EventOSTimestamp, deltaRgxTimer, delta_ns; + + if (psFtraceData->ui64LastSampledTimeCorrOSTimeStamp > ui64OSTimeStamp) + { + /* The previous packet had a time reference (time correlation data) more + * recent than the one in the current packet, it means the timer + * correlation array wrapped too quickly (buffer too small) and in the + * previous call to _GpuTraceUfoEvent we read one of the + * newest timer correlations rather than one of the oldest ones. + */ + PVR_DPF((PVR_DBG_ERROR, "%s: The timestamps computed so far could be " + "wrong! The time correlation array size should be increased " + "to avoid this.", __func__)); + } + + psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = ui64OSTimeStamp; + + /* RGX CR timer ticks delta */ + deltaRgxTimer = ui64EventTimestamp - ui64CRTimeStamp; + /* RGX time delta in nanoseconds */ + delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui64CRDeltaToOSDeltaKNs); + /* Calculate OS time of HWPerf event */ + ui64EventOSTimestamp = ui64OSTimeStamp + delta_ns; + + PVR_DPF((PVR_DBG_VERBOSE, "%s: psCurrentDvfs RGX %llu, OS %llu, DVFSCLK %u", + __func__, ui64CRTimeStamp, ui64OSTimeStamp, + psTimeCorr->ui32CoreClockSpeed)); + + return ui64EventOSTimestamp; +} + +static void _GpuTraceSwitchEvent(PVRSRV_RGXDEV_INFO *psDevInfo, + RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName, + PVR_GPUTRACE_SWITCH_TYPE eSwType) +{ + IMG_UINT64 ui64Timestamp; + RGX_HWPERF_HW_DATA* psHWPerfPktData; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psHWPerfPkt); + PVR_ASSERT(pszWorkName); + + psHWPerfPktData = RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt); + + ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex, + psHWPerfPkt->ui64Timestamp); + + PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceSwitchEvent: %s ui32ExtJobRef=%d, ui32IntJobRef=%d, eSwType=%d", + pszWorkName, psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32IntJobRef, eSwType)); + + _GpuTraceWorkSwitch(ui64Timestamp, + psHWPerfPktData->ui32DMContext, + psHWPerfPktData->ui32CtxPriority, + psHWPerfPktData->ui32ExtJobRef, + psHWPerfPktData->ui32IntJobRef, + pszWorkName, + eSwType); + + PVR_DPF_RETURN; +} + +static void _GpuTraceUfoEvent(PVRSRV_RGXDEV_INFO *psDevInfo, + RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt) +{ + IMG_UINT64 ui64Timestamp; + RGX_HWPERF_UFO_DATA *psHWPerfPktData; + IMG_UINT32 ui32UFOCount; + RGX_HWPERF_UFO_DATA_ELEMENT *puData; + + psHWPerfPktData = RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt); + + ui32UFOCount = RGX_HWPERF_GET_UFO_STREAMSIZE(psHWPerfPktData->ui32StreamInfo); + puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) IMG_OFFSET_ADDR(psHWPerfPktData, RGX_HWPERF_GET_UFO_STREAMOFFSET(psHWPerfPktData->ui32StreamInfo)); + + ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex, + psHWPerfPkt->ui64Timestamp); + + PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceUfoEvent: ui32ExtJobRef=%d, " + "ui32IntJobRef=%d", psHWPerfPktData->ui32ExtJobRef, + psHWPerfPktData->ui32IntJobRef)); + + _GpuTraceUfo(ui64Timestamp, psHWPerfPktData->eEvType, + psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32ExtJobRef, + psHWPerfPktData->ui32IntJobRef, ui32UFOCount, puData); +} + +static void _GpuTraceFirmwareEvent(PVRSRV_RGXDEV_INFO *psDevInfo, + RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName, + PVR_GPUTRACE_SWITCH_TYPE eSwType) + +{ + uint64_t ui64Timestamp; + RGX_HWPERF_FW_DATA *psHWPerfPktData = RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt); + + ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex, + psHWPerfPkt->ui64Timestamp); + + _GpuTraceFirmware(ui64Timestamp, pszWorkName, eSwType); +} + +static IMG_BOOL ValidAndEmitFTraceEvent(PVRSRV_RGXDEV_INFO *psDevInfo, + RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt) +{ + RGX_HWPERF_EVENT_TYPE eType; + RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData; + IMG_UINT32 ui32HwEventTypeIndex; + static const struct { + IMG_CHAR* pszName; + PVR_GPUTRACE_SWITCH_TYPE eSwType; + } aszHwEventTypeMap[] = { +#define _T(T) PVR_GPUTRACE_SWITCH_TYPE_##T + { "BG", _T(BEGIN) }, /* RGX_HWPERF_FW_BGSTART */ + { "BG", _T(END) }, /* RGX_HWPERF_FW_BGEND */ + { "IRQ", _T(BEGIN) }, /* RGX_HWPERF_FW_IRQSTART */ + { "IRQ", _T(END) }, /* RGX_HWPERF_FW_IRQEND */ + { "DBG", _T(BEGIN) }, /* RGX_HWPERF_FW_DBGSTART */ + { "DBG", _T(END) }, /* RGX_HWPERF_FW_DBGEND */ + { "PMOOM_TAPAUSE", _T(END) }, /* RGX_HWPERF_HW_PMOOM_TAPAUSE */ + { "TA", _T(BEGIN) }, /* RGX_HWPERF_HW_TAKICK */ + { "TA", _T(END) }, /* RGX_HWPERF_HW_TAFINISHED */ + { "TQ3D", _T(BEGIN) }, /* RGX_HWPERF_HW_3DTQKICK */ + { "3D", _T(BEGIN) }, /* RGX_HWPERF_HW_3DKICK */ + { "3D", _T(END) }, /* RGX_HWPERF_HW_3DFINISHED */ + { "CDM", _T(BEGIN) }, /* RGX_HWPERF_HW_CDMKICK */ + { "CDM", _T(END) }, /* RGX_HWPERF_HW_CDMFINISHED */ + { "TQ2D", _T(BEGIN) }, /* RGX_HWPERF_HW_TLAKICK */ + { "TQ2D", _T(END) }, /* RGX_HWPERF_HW_TLAFINISHED */ + { "3DSPM", _T(BEGIN) }, /* RGX_HWPERF_HW_3DSPMKICK */ + { NULL, 0 }, /* RGX_HWPERF_HW_PERIODIC (unsupported) */ + { "RTU", _T(BEGIN) }, /* RGX_HWPERF_HW_RTUKICK */ + { "RTU", _T(END) }, /* RGX_HWPERF_HW_RTUFINISHED */ + { "SHG", _T(BEGIN) }, /* RGX_HWPERF_HW_SHGKICK */ + { "SHG", _T(END) }, /* RGX_HWPERF_HW_SHGFINISHED */ + { "TQ3D", _T(END) }, /* RGX_HWPERF_HW_3DTQFINISHED */ + { "3DSPM", _T(END) }, /* RGX_HWPERF_HW_3DSPMFINISHED */ + { "PMOOM_TARESUME", _T(BEGIN) }, /* RGX_HWPERF_HW_PMOOM_TARESUME */ + { "TDM", _T(BEGIN) }, /* RGX_HWPERF_HW_TDMKICK */ + { "TDM", _T(END) }, /* RGX_HWPERF_HW_TDMFINISHED */ + { "NULL", _T(SINGLE) }, /* RGX_HWPERF_HW_NULLKICK */ +#undef _T + }; + static_assert(RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE == RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE + 1, + "FW and HW events are not contiguous in RGX_HWPERF_EVENT_TYPE"); + + PVR_ASSERT(psHWPerfPkt); + eType = RGX_HWPERF_GET_TYPE(psHWPerfPkt); + + if (psFtraceData->ui32FTraceLastOrdinal != psHWPerfPkt->ui32Ordinal - 1) + { + RGX_HWPERF_STREAM_ID eStreamId = RGX_HWPERF_GET_STREAM_ID(psHWPerfPkt); + _GpuTraceEventsLost(eStreamId, + psFtraceData->ui32FTraceLastOrdinal, + psHWPerfPkt->ui32Ordinal); + PVR_DPF((PVR_DBG_ERROR, "FTrace events lost (stream_id = %u, ordinal: last = %u, current = %u)", + eStreamId, psFtraceData->ui32FTraceLastOrdinal, psHWPerfPkt->ui32Ordinal)); + } + + psFtraceData->ui32FTraceLastOrdinal = psHWPerfPkt->ui32Ordinal; + + /* Process UFO packets */ + if (eType == RGX_HWPERF_UFO) + { + _GpuTraceUfoEvent(psDevInfo, psHWPerfPkt); + return IMG_TRUE; + } + + if (eType <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE) + { + /* this ID belongs to range 0, so index directly in range 0 */ + ui32HwEventTypeIndex = eType - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE; + } + else + { + /* this ID belongs to range 1, so first index in range 1 and skip number of slots used up for range 0 */ + ui32HwEventTypeIndex = (eType - RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE) + + (RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE + 1); + } + + if (ui32HwEventTypeIndex >= ARRAY_SIZE(aszHwEventTypeMap)) + goto err_unsupported; + + if (aszHwEventTypeMap[ui32HwEventTypeIndex].pszName == NULL) + { + /* Not supported map entry, ignore event */ + goto err_unsupported; + } + + if (HWPERF_PACKET_IS_HW_TYPE(eType)) + { + if (aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType == PVR_GPUTRACE_SWITCH_TYPE_SINGLE) + { + _GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt, + aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, + PVR_GPUTRACE_SWITCH_TYPE_BEGIN); + _GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt, + aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, + PVR_GPUTRACE_SWITCH_TYPE_END); + } + else + { + _GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt, + aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, + aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType); + } + } + else if (HWPERF_PACKET_IS_FW_TYPE(eType)) + { + _GpuTraceFirmwareEvent(psDevInfo, psHWPerfPkt, + aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, + aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType); + } + else + { + goto err_unsupported; + } + + return IMG_TRUE; + +err_unsupported: + PVR_DPF((PVR_DBG_VERBOSE, "%s: Unsupported event type %d", __func__, eType)); + return IMG_FALSE; +} + + +static void _GpuTraceProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo, + void *pBuffer, IMG_UINT32 ui32ReadLen) +{ + IMG_UINT32 ui32TlPackets = 0; + IMG_UINT32 ui32HWPerfPackets = 0; + IMG_UINT32 ui32HWPerfPacketsSent = 0; + void *pBufferEnd; + PVRSRVTL_PPACKETHDR psHDRptr; + PVRSRVTL_PACKETTYPE ui16TlType; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psDevInfo); + PVR_ASSERT(pBuffer); + PVR_ASSERT(ui32ReadLen); + + /* Process the TL Packets + */ + pBufferEnd = IMG_OFFSET_ADDR(pBuffer, ui32ReadLen); + psHDRptr = GET_PACKET_HDR(pBuffer); + while ( psHDRptr < (PVRSRVTL_PPACKETHDR)pBufferEnd ) + { + ui16TlType = GET_PACKET_TYPE(psHDRptr); + if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA) + { + IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr); + if (0 == ui16DataLen) + { + PVR_DPF((PVR_DBG_ERROR, "_GpuTraceProcessPackets: ZERO Data in TL data packet: %p", psHDRptr)); + } + else + { + RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt; + RGX_HWPERF_V2_PACKET_HDR* psHWPerfEnd; + + /* Check for lost hwperf data packets */ + psHWPerfEnd = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr)+ui16DataLen); + psHWPerfPkt = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr)); + do + { + if (ValidAndEmitFTraceEvent(psDevInfo, psHWPerfPkt)) + { + ui32HWPerfPacketsSent++; + } + ui32HWPerfPackets++; + psHWPerfPkt = RGX_HWPERF_GET_NEXT_PACKET(psHWPerfPkt); + } + while (psHWPerfPkt < psHWPerfEnd); + } + } + else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED) + { + PVR_DPF((PVR_DBG_MESSAGE, "_GpuTraceProcessPackets: Indication that the transport buffer was full")); + } + else + { + /* else Ignore padding packet type and others */ + PVR_DPF((PVR_DBG_MESSAGE, "_GpuTraceProcessPackets: Ignoring TL packet, type %d", ui16TlType )); + } + + psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr); + ui32TlPackets++; + } + + PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceProcessPackets: TL " + "Packets processed %03d, HWPerf packets %03d, sent %03d", + ui32TlPackets, ui32HWPerfPackets, ui32HWPerfPacketsSent)); + + PVR_DPF_RETURN; +} + + +static void _GpuTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) +{ + PVRSRV_RGXDEV_INFO* psDeviceInfo = hCmdCompHandle; + RGX_HWPERF_FTRACE_DATA* psFtraceData; + PVRSRV_ERROR eError; + IMG_PBYTE pBuffer; + IMG_UINT32 ui32ReadLen; + IMG_BOOL bFTraceLockAcquired = IMG_FALSE; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psDeviceInfo != NULL); + + psFtraceData = psDeviceInfo->pvGpuFtraceData; + + /* Command-complete notifiers can run concurrently. If this is + * happening, just bail out and let the previous call finish. + * This is ok because we can process the queued packets on the next call. + */ + bFTraceLockAcquired = OSTryLockAcquire(psFtraceData->hFTraceResourceLock); + if (IMG_FALSE == bFTraceLockAcquired) + { + PVR_DPF_RETURN; + } + + /* If this notifier is called, it means the TL resources will be valid at-least + * until the end of this call, since the DeInit function will wait on the hFTraceResourceLock + * to clean-up the TL resources and un-register the notifier, so just assert here. + */ + PVR_ASSERT(psFtraceData->hGPUTraceTLStream); + + /* If we have a valid stream attempt to acquire some data */ + eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream, &pBuffer, &ui32ReadLen); + if (eError == PVRSRV_OK) + { + /* Process the HWPerf packets and release the data */ + if (ui32ReadLen > 0) + { + PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceCmdCompleteNotify: DATA AVAILABLE offset=%p, length=%d", pBuffer, ui32ReadLen)); + + /* Process the transport layer data for HWPerf packets... */ + _GpuTraceProcessPackets(psDeviceInfo, pBuffer, ui32ReadLen); + + eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "TLClientReleaseData"); + + /* Serious error, disable FTrace GPU events */ + + /* Release TraceLock so we always have the locking + * order BridgeLock->TraceLock to prevent AB-BA deadlocks*/ + OSLockRelease(psFtraceData->hFTraceResourceLock); + OSLockAcquire(psFtraceData->hFTraceResourceLock); + _GpuTraceDisable(psDeviceInfo, IMG_FALSE); + OSLockRelease(psFtraceData->hFTraceResourceLock); + goto out; + + } + } /* else no data, ignore */ + } + else if (eError != PVRSRV_ERROR_TIMEOUT) + { + PVR_LOG_ERROR(eError, "TLClientAcquireData"); + } + if (bFTraceLockAcquired) + { + OSLockRelease(psFtraceData->hFTraceResourceLock); + } +out: + PVR_DPF_RETURN; +} + +/* ----- AppHint interface -------------------------------------------------- */ + +static PVRSRV_ERROR _GpuTraceIsEnabledCallback( + const PVRSRV_DEVICE_NODE *device, + const void *private_data, + IMG_BOOL *value) +{ + PVR_UNREFERENCED_PARAMETER(device); + PVR_UNREFERENCED_PARAMETER(private_data); + + *value = gbFTraceGPUEventsEnabled; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _GpuTraceSetEnabledCallback( + const PVRSRV_DEVICE_NODE *device, + const void *private_data, + IMG_BOOL value) +{ + PVR_UNREFERENCED_PARAMETER(device); + + /* Lock down the state to avoid concurrent writes */ + OSLockAcquire(ghGPUTraceStateLock); + + if (value != gbFTraceGPUEventsEnabled) + { + PVRSRV_ERROR eError; + if ((eError = _GpuTraceSetEnabledForAllDevices(value)) == PVRSRV_OK) + { + PVR_TRACE(("%s GPU FTrace", value ? "ENABLED" : "DISABLED")); + gbFTraceGPUEventsEnabled = value; + } + else + { + PVR_TRACE(("FAILED to %s GPU FTrace", value ? "enable" : "disable")); + /* On failure, partial enable/disable might have resulted. + * Try best to restore to previous state. Ignore error */ + _GpuTraceSetEnabledForAllDevices(gbFTraceGPUEventsEnabled); + + OSLockRelease(ghGPUTraceStateLock); + return eError; + } + } + else + { + PVR_TRACE(("GPU FTrace already %s!", value ? "enabled" : "disabled")); + } + + OSLockRelease(ghGPUTraceStateLock); + + return PVRSRV_OK; +} + +void PVRGpuTraceInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFTraceGPU, + _GpuTraceIsEnabledCallback, + _GpuTraceSetEnabledCallback, + psDeviceNode, NULL); +} + +/* ----- FTrace event callbacks -------------------------------------------- */ + +void PVRGpuTraceEnableUfoCallback(void) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList; +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psRgxDevInfo; + PVRSRV_ERROR eError; +#endif + + /* Lock down events state, for consistent value of guiUfoEventRef */ + OSLockAcquire(ghLockFTraceEventLock); + if (guiUfoEventRef++ == 0) + { + /* make sure UFO events are enabled on all rogue devices */ + while (psDeviceNode) + { +#if defined(SUPPORT_RGX) + IMG_UINT64 ui64Filter; + + psRgxDevInfo = psDeviceNode->pvDevice; + ui64Filter = RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO) | + psRgxDevInfo->ui64HWPerfFilter; + /* Small chance exists that ui64HWPerfFilter can be changed here and + * the newest filter value will be changed to the old one + UFO event. + * This is not a critical problem. */ + eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW, + IMG_FALSE, ui64Filter); + if (eError == PVRSRV_ERROR_NOT_INITIALISED) + { + /* If we land here that means that the FW is not initialised yet. + * We stored the filter and it will be passed to the firmware + * during its initialisation phase. So ignore. */ + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Could not enable UFO HWPerf events on device %d", psDeviceNode->sDevId.i32OsDeviceID)); + } +#endif + psDeviceNode = psDeviceNode->psNext; + } + } + OSLockRelease(ghLockFTraceEventLock); +} + +void PVRGpuTraceDisableUfoCallback(void) +{ +#if defined(SUPPORT_RGX) + PVRSRV_ERROR eError; +#endif + PVRSRV_DEVICE_NODE *psDeviceNode; + + /* We have to check if lock is valid because on driver unload + * PVRGpuTraceSupportDeInit is called before kernel disables the ftrace + * events. This means that the lock will be destroyed before this callback + * is called. + * We can safely return if that situation happens because driver will be + * unloaded so we don't care about HWPerf state anymore. */ + if (ghLockFTraceEventLock == NULL) + return; + + psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList; + + /* Lock down events state, for consistent value of guiUfoEventRef */ + OSLockAcquire(ghLockFTraceEventLock); + if (--guiUfoEventRef == 0) + { + /* make sure UFO events are disabled on all rogue devices */ + while (psDeviceNode) + { +#if defined(SUPPORT_RGX) + IMG_UINT64 ui64Filter; + PVRSRV_RGXDEV_INFO *psRgxDevInfo = psDeviceNode->pvDevice; + + ui64Filter = ~(RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO)) & + psRgxDevInfo->ui64HWPerfFilter; + /* Small chance exists that ui64HWPerfFilter can be changed here and + * the newest filter value will be changed to the old one + UFO event. + * This is not a critical problem. */ + eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW, + IMG_FALSE, ui64Filter); + if (eError == PVRSRV_ERROR_NOT_INITIALISED) + { + /* If we land here that means that the FW is not initialised yet. + * We stored the filter and it will be passed to the firmware + * during its initialisation phase. So ignore. */ + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Could not disable UFO HWPerf events on device %d", + psDeviceNode->sDevId.i32OsDeviceID)); + } +#endif + psDeviceNode = psDeviceNode->psNext; + } + } + OSLockRelease(ghLockFTraceEventLock); +} + +void PVRGpuTraceEnableFirmwareActivityCallback(void) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList; +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psRgxDevInfo; + uint64_t ui64Filter, ui64FWEventsFilter = 0; + int i; + + for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE; + i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++) + { + ui64FWEventsFilter |= RGX_HWPERF_EVENT_MASK_VALUE(i); + } +#endif + OSLockAcquire(ghLockFTraceEventLock); + /* Enable all FW events on all the devices */ + while (psDeviceNode) + { +#if defined(SUPPORT_RGX) + PVRSRV_ERROR eError; + psRgxDevInfo = psDeviceNode->pvDevice; + ui64Filter = psRgxDevInfo->ui64HWPerfFilter | ui64FWEventsFilter; + + eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW, + IMG_FALSE, ui64Filter); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Could not enable HWPerf event for firmware" + " task timings (%s).", PVRSRVGetErrorString(eError))); + } +#endif + psDeviceNode = psDeviceNode->psNext; + } + OSLockRelease(ghLockFTraceEventLock); +} + +void PVRGpuTraceDisableFirmwareActivityCallback(void) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; +#if defined(SUPPORT_RGX) + IMG_UINT64 ui64FWEventsFilter = ~0; + int i; +#endif + + /* We have to check if lock is valid because on driver unload + * PVRGpuTraceSupportDeInit is called before kernel disables the ftrace + * events. This means that the lock will be destroyed before this callback + * is called. + * We can safely return if that situation happens because driver will be + * unloaded so we don't care about HWPerf state anymore. */ + if (ghLockFTraceEventLock == NULL) + return; + + psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList; + +#if defined(SUPPORT_RGX) + for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE; + i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++) + { + ui64FWEventsFilter &= ~RGX_HWPERF_EVENT_MASK_VALUE(i); + } +#endif + + OSLockAcquire(ghLockFTraceEventLock); + + /* Disable all FW events on all the devices */ + while (psDeviceNode) + { +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psRgxDevInfo = psDeviceNode->pvDevice; + IMG_UINT64 ui64Filter = psRgxDevInfo->ui64HWPerfFilter & ui64FWEventsFilter; + + if (PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW, + IMG_FALSE, ui64Filter) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Could not disable HWPerf event for firmware task timings.")); + } +#endif + psDeviceNode = psDeviceNode->psNext; + } + + OSLockRelease(ghLockFTraceEventLock); +} + +/****************************************************************************** + End of file (pvr_gputrace.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_intrinsics.h b/drivers/gpu/drm/phytium/octopus/pvr_intrinsics.h new file mode 100644 index 000000000000..ef93d147b13b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_intrinsics.h @@ -0,0 +1,70 @@ +/*************************************************************************/ /*! +@File +@Title Intrinsics definitions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_INTRINSICS_H +#define PVR_INTRINSICS_H + +/* PVR_CTZLL: + * Count the number of trailing zeroes in a long long integer + */ + +#if defined(__GNUC__) +#if defined(__x86_64__) + + #define PVR_CTZLL __builtin_ctzll +#endif +#endif + +/* PVR_CLZLL: + * Count the number of leading zeroes in a long long integer + */ + +#if defined(__GNUC__) +#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || \ + defined(__arm__) || defined(__mips) + +#define PVR_CLZLL __builtin_clzll + +#endif +#endif + +#endif /* PVR_INTRINSICS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_ion_stats.h b/drivers/gpu/drm/phytium/octopus/pvr_ion_stats.h new file mode 100644 index 000000000000..506b5fd18835 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_ion_stats.h @@ -0,0 +1,80 @@ +/*************************************************************************/ /*! +@File +@Title Functions for recording ION memory stats. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_ION_STATS_H +#define PVR_ION_STATS_H + +#include "pvrsrv_error.h" +#include "img_defs.h" + +struct dma_buf; + +#if defined(PVRSRV_ENABLE_PVR_ION_STATS) +PVRSRV_ERROR PVRSRVIonStatsInitialise(void); + +void PVRSRVIonStatsDestroy(void); + +void PVRSRVIonAddMemAllocRecord(struct dma_buf *psDmaBuf); + +void PVRSRVIonRemoveMemAllocRecord(struct dma_buf *psDmaBuf); +#else +static INLINE PVRSRV_ERROR PVRSRVIonStatsInitialise(void) +{ + return PVRSRV_OK; +} + +static INLINE void PVRSRVIonStatsDestroy(void) +{ +} + +static INLINE void PVRSRVIonAddMemAllocRecord(struct dma_buf *psDmaBuf) +{ + PVR_UNREFERENCED_PARAMETER(psDmaBuf); +} + +static INLINE void PVRSRVIonRemoveMemAllocRecord(struct dma_buf *psDmaBuf) +{ + PVR_UNREFERENCED_PARAMETER(psDmaBuf); +} +#endif /* defined(PVRSRV_ENABLE_PVR_ION_STATS) */ + +#endif /* PVR_ION_STATS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_linux_fence.h b/drivers/gpu/drm/phytium/octopus/pvr_linux_fence.h new file mode 100644 index 000000000000..f55d5ce048e0 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_linux_fence.h @@ -0,0 +1,103 @@ +/* + * @File + * @Title PhytiumVR Linux fence compatibility header + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__PVR_LINUX_FENCE_H__) +#define __PVR_LINUX_FENCE_H__ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) && \ + !defined(CHROMIUMOS_KERNEL_HAS_DMA_FENCE) +#include +#else +#include +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) && \ + !defined(CHROMIUMOS_KERNEL_HAS_DMA_FENCE) +/* Structures */ +#define dma_fence fence +#define dma_fence_array fence_array +#define dma_fence_cb fence_cb +#define dma_fence_ops fence_ops + +/* Defines and Enums */ +#define DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT FENCE_FLAG_ENABLE_SIGNAL_BIT +#define DMA_FENCE_FLAG_SIGNALED_BIT FENCE_FLAG_SIGNALED_BIT +#define DMA_FENCE_FLAG_USER_BITS FENCE_FLAG_USER_BITS + +#define DMA_FENCE_ERR FENCE_ERR +#define DMA_FENCE_TRACE FENCE_TRACE +#define DMA_FENCE_WARN FENCE_WARN + +/* Functions */ +#define dma_fence_add_callback fence_add_callback +#define dma_fence_context_alloc fence_context_alloc +#define dma_fence_default_wait fence_default_wait +#define dma_fence_is_signaled fence_is_signaled +#define dma_fence_enable_sw_signaling fence_enable_sw_signaling +#define dma_fence_free fence_free +#define dma_fence_get fence_get +#define dma_fence_get_rcu fence_get_rcu +#define dma_fence_init fence_init +#define dma_fence_is_array fence_is_array +#define dma_fence_put fence_put +#define dma_fence_signal fence_signal +#define dma_fence_wait fence_wait +#define to_dma_fence_array to_fence_array + +static inline signed long +dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) +{ + signed long lret; + + lret = fence_wait_timeout(fence, intr, timeout); + if (lret || timeout) + return lret; + + return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ? 1 : 0; +} + +#endif + +#endif /* !defined(__PVR_LINUX_FENCE_H__) */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_notifier.c b/drivers/gpu/drm/phytium/octopus/pvr_notifier.c new file mode 100644 index 000000000000..58d7d387154a --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_notifier.c @@ -0,0 +1,531 @@ +/*************************************************************************/ /*! +@File +@Title PhytiumVR notifier interface +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "allocmem.h" +#include "dllist.h" + +#include "device.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "pvrversion.h" +#include "connection_server.h" + +#include "osfunc.h" +#include "sofunc_pvr.h" + +#define PVR_DUMP_DRIVER_INFO(x, y) \ + PVR_DUMPDEBUG_LOG("%s info: %d.%d @ %8d (%s) build options: 0x%08x", \ + (x), \ + PVRVERSION_UNPACK_MAJ((y).ui32BuildVersion), \ + PVRVERSION_UNPACK_MIN((y).ui32BuildVersion), \ + (y).ui32BuildRevision, \ + (BUILD_TYPE_DEBUG == (y).ui32BuildType) ? "debug":"release", \ + (y).ui32BuildOptions); + +#if !defined(WINDOW_SYSTEM) +#define WINDOW_SYSTEM "Unknown" +#endif + +#define IS_DECLARED(x) (x[0] != '\0') + +/*************************************************************************/ /*! +Command Complete Notifier Interface +*/ /**************************************************************************/ + +typedef struct PVRSRV_CMDCOMP_NOTIFY_TAG +{ + PVRSRV_CMDCOMP_HANDLE hCmdCompHandle; + PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify; + DLLIST_NODE sListNode; +} PVRSRV_CMDCOMP_NOTIFY; + +/* Head of the list of callbacks called when command complete happens */ +static DLLIST_NODE g_sCmdCompNotifyHead; +static POSWR_LOCK g_hCmdCompNotifyLock; + +PVRSRV_ERROR +PVRSRVCmdCompleteInit(void) +{ + PVRSRV_ERROR eError; + + eError = OSWRLockCreate(&g_hCmdCompNotifyLock); + PVR_RETURN_IF_ERROR(eError); + + dllist_init(&g_sCmdCompNotifyHead); + + return PVRSRV_OK; +} + +void +PVRSRVCmdCompleteDeinit(void) +{ + /* Check that all notify function have been unregistered */ + if (!dllist_is_empty(&g_sCmdCompNotifyHead)) + { + PDLLIST_NODE psNode; + + PVR_DPF((PVR_DBG_ERROR, + "%s: Command complete notify list is not empty!", __func__)); + + /* Clean up any stragglers */ + psNode = dllist_get_next_node(&g_sCmdCompNotifyHead); + while (psNode) + { + PVRSRV_CMDCOMP_NOTIFY *psNotify; + + dllist_remove_node(psNode); + + psNotify = IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode); + OSFreeMem(psNotify); + + psNode = dllist_get_next_node(&g_sCmdCompNotifyHead); + } + } + + if (g_hCmdCompNotifyLock) + { + OSWRLockDestroy(g_hCmdCompNotifyLock); + } +} + +PVRSRV_ERROR +PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify, + PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, + PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) +{ + PVRSRV_CMDCOMP_NOTIFY *psNotify; + + PVR_LOG_RETURN_IF_INVALID_PARAM(phNotify, "phNotify"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pfnCmdCompleteNotify, "pfnCmdCompleteNotify"); + PVR_LOG_RETURN_IF_INVALID_PARAM(hCmdCompHandle, "hCmdCompHandle"); + + psNotify = OSAllocMem(sizeof(*psNotify)); + PVR_LOG_RETURN_IF_NOMEM(psNotify, "psNotify"); + + /* Set-up the notify data */ + psNotify->hCmdCompHandle = hCmdCompHandle; + psNotify->pfnCmdCompleteNotify = pfnCmdCompleteNotify; + + /* Add it to the list of Notify functions */ + OSWRLockAcquireWrite(g_hCmdCompNotifyLock); + dllist_add_to_tail(&g_sCmdCompNotifyHead, &psNotify->sListNode); + OSWRLockReleaseWrite(g_hCmdCompNotifyLock); + + *phNotify = psNotify; + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify) +{ + PVRSRV_CMDCOMP_NOTIFY *psNotify; + + psNotify = (PVRSRV_CMDCOMP_NOTIFY *) hNotify; + PVR_LOG_RETURN_IF_INVALID_PARAM(psNotify, "hNotify"); + + OSWRLockAcquireWrite(g_hCmdCompNotifyLock); + dllist_remove_node(&psNotify->sListNode); + OSWRLockReleaseWrite(g_hCmdCompNotifyLock); + + OSFreeMem(psNotify); + + return PVRSRV_OK; +} + +void +PVRSRVNotifyCommandCompletion(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle) +{ +#if !defined(NO_HARDWARE) + DLLIST_NODE *psNode, *psNext; + + /* Call notify callbacks to check if blocked work items can now proceed */ + OSWRLockAcquireRead(g_hCmdCompNotifyLock); + dllist_foreach_node(&g_sCmdCompNotifyHead, psNode, psNext) + { + PVRSRV_CMDCOMP_NOTIFY *psNotify = + IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode); + + if (hCmdCompCallerHandle != psNotify->hCmdCompHandle) + { + psNotify->pfnCmdCompleteNotify(psNotify->hCmdCompHandle); + } + } + OSWRLockReleaseRead(g_hCmdCompNotifyLock); +#endif +} + +inline void +PVRSRVSignalGlobalEO(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + if (psPVRSRVData->hGlobalEventObject) + { + OSEventObjectSignal(psPVRSRVData->hGlobalEventObject); + } +} + +inline void +PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle) +{ + PVRSRVNotifyCommandCompletion(hCmdCompCallerHandle); + PVRSRVSignalGlobalEO(); +} + +/*************************************************************************/ /*! +Debug Notifier Interface +*/ /**************************************************************************/ + +typedef struct DEBUG_REQUEST_ENTRY_TAG +{ + IMG_UINT32 ui32RequesterID; + DLLIST_NODE sListHead; +} DEBUG_REQUEST_ENTRY; + +typedef struct DEBUG_REQUEST_TABLE_TAG +{ + POSWR_LOCK hLock; + IMG_UINT32 ui32RequestCount; + DEBUG_REQUEST_ENTRY asEntry[1]; +} DEBUG_REQUEST_TABLE; + +typedef struct DEBUG_REQUEST_NOTIFY_TAG +{ + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_DBGREQ_HANDLE hDbgRequestHandle; + PFN_DBGREQ_NOTIFY pfnDbgRequestNotify; + IMG_UINT32 ui32RequesterID; + DLLIST_NODE sListNode; +} DEBUG_REQUEST_NOTIFY; + + +PVRSRV_ERROR +PVRSRVRegisterDbgTable(PVRSRV_DEVICE_NODE *psDevNode, + const IMG_UINT32 *paui32Table, IMG_UINT32 ui32Length) +{ + DEBUG_REQUEST_TABLE *psDebugTable; + IMG_UINT32 i; + PVRSRV_ERROR eError; + + if (psDevNode->hDebugTable) + { + return PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED; + } + + psDebugTable = OSAllocMem(sizeof(DEBUG_REQUEST_TABLE) + + (sizeof(DEBUG_REQUEST_ENTRY) * (ui32Length-1))); + PVR_RETURN_IF_NOMEM(psDebugTable); + + eError = OSWRLockCreate(&psDebugTable->hLock); + PVR_GOTO_IF_ERROR(eError, ErrorFreeDebugTable); + + psDebugTable->ui32RequestCount = ui32Length; + + /* Init the list heads */ + for (i = 0; i < ui32Length; i++) + { + psDebugTable->asEntry[i].ui32RequesterID = paui32Table[i]; + dllist_init(&psDebugTable->asEntry[i].sListHead); + } + + psDevNode->hDebugTable = (IMG_HANDLE *) psDebugTable; + + return PVRSRV_OK; + +ErrorFreeDebugTable: + OSFreeMem(psDebugTable); + + return eError; +} + +void +PVRSRVUnregisterDbgTable(PVRSRV_DEVICE_NODE *psDevNode) +{ + DEBUG_REQUEST_TABLE *psDebugTable; + IMG_UINT32 i; + + PVR_ASSERT(psDevNode->hDebugTable); + psDebugTable = (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable; + psDevNode->hDebugTable = NULL; + + for (i = 0; i < psDebugTable->ui32RequestCount; i++) + { + if (!dllist_is_empty(&psDebugTable->asEntry[i].sListHead)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Found registered callback(s) on %d", + __func__, i)); + } + } + + OSWRLockDestroy(psDebugTable->hLock); + psDebugTable->hLock = NULL; + + OSFreeMem(psDebugTable); +} + +PVRSRV_ERROR +PVRSRVRegisterDbgRequestNotify(IMG_HANDLE *phNotify, + PVRSRV_DEVICE_NODE *psDevNode, + PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, + IMG_UINT32 ui32RequesterID, + PVRSRV_DBGREQ_HANDLE hDbgRequestHandle) +{ + DEBUG_REQUEST_TABLE *psDebugTable; + DEBUG_REQUEST_NOTIFY *psNotify; + PDLLIST_NODE psHead = NULL; + IMG_UINT32 i; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_INVALID_PARAM(phNotify, "phNotify"); + PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode, "psDevNode"); + PVR_LOG_RETURN_IF_INVALID_PARAM(pfnDbgRequestNotify, "pfnDbRequestNotify"); + + psDebugTable = (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable; + + PVR_ASSERT(psDebugTable); + + /* NoStats used since this may be called outside of the register/de-register + * process calls which track memory use. */ + psNotify = OSAllocMemNoStats(sizeof(*psNotify)); + PVR_LOG_RETURN_IF_NOMEM(psNotify, "psNotify"); + + /* Set-up the notify data */ + psNotify->psDevNode = psDevNode; + psNotify->hDbgRequestHandle = hDbgRequestHandle; + psNotify->pfnDbgRequestNotify = pfnDbgRequestNotify; + psNotify->ui32RequesterID = ui32RequesterID; + + /* Lock down all the lists */ + OSWRLockAcquireWrite(psDebugTable->hLock); + + /* Find which list to add it to */ + for (i = 0; i < psDebugTable->ui32RequestCount; i++) + { + if (psDebugTable->asEntry[i].ui32RequesterID == ui32RequesterID) + { + psHead = &psDebugTable->asEntry[i].sListHead; + } + } + + /* Failed to find debug requester */ + PVR_LOG_GOTO_IF_INVALID_PARAM(psHead, eError, ErrorReleaseLock); + + /* Add it to the list of Notify functions */ + dllist_add_to_tail(psHead, &psNotify->sListNode); + + /* Unlock the lists */ + OSWRLockReleaseWrite(psDebugTable->hLock); + + *phNotify = psNotify; + + return PVRSRV_OK; + +ErrorReleaseLock: + OSWRLockReleaseWrite(psDebugTable->hLock); + OSFreeMem(psNotify); + + return eError; +} + +PVRSRV_ERROR +SOPvrDbgRequestNotifyRegister(IMG_HANDLE *phNotify, + PVRSRV_DEVICE_NODE *psDevNode, + PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, + IMG_UINT32 ui32RequesterID, + PVRSRV_DBGREQ_HANDLE hDbgRequestHandle) +{ + return PVRSRVRegisterDbgRequestNotify(phNotify, + psDevNode, + pfnDbgRequestNotify, + ui32RequesterID, + hDbgRequestHandle); +} + +PVRSRV_ERROR +PVRSRVUnregisterDbgRequestNotify(IMG_HANDLE hNotify) +{ + DEBUG_REQUEST_NOTIFY *psNotify = (DEBUG_REQUEST_NOTIFY *) hNotify; + DEBUG_REQUEST_TABLE *psDebugTable; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psNotify, "psNotify"); + + psDebugTable = (DEBUG_REQUEST_TABLE *) psNotify->psDevNode->hDebugTable; + + OSWRLockAcquireWrite(psDebugTable->hLock); + dllist_remove_node(&psNotify->sListNode); + OSWRLockReleaseWrite(psDebugTable->hLock); + + OSFreeMemNoStats(psNotify); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +SOPvrDbgRequestNotifyUnregister(IMG_HANDLE hNotify) +{ + return PVRSRVUnregisterDbgRequestNotify(hNotify); +} + +void +PVRSRVDebugRequest(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + DEBUG_REQUEST_TABLE *psDebugTable = + (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable; + static const IMG_CHAR *apszVerbosityTable[] = { "Low", "Medium", "High" }; + const IMG_CHAR *szVerbosityLevel; + const IMG_CHAR *Bit32 = "32 Bit", *Bit64 = "64 Bit"; + IMG_UINT32 i; + + static_assert(ARRAY_SIZE(apszVerbosityTable) == DEBUG_REQUEST_VERBOSITY_MAX+1, + "Incorrect number of verbosity levels"); + + PVR_ASSERT(psDebugTable); + + OSWRLockAcquireRead(psDebugTable->hLock); + + if (ui32VerbLevel < ARRAY_SIZE(apszVerbosityTable)) + { + szVerbosityLevel = apszVerbosityTable[ui32VerbLevel]; + } + else + { + szVerbosityLevel = "unknown"; + PVR_ASSERT(!"Invalid verbosity level received"); + } + + PVR_DUMPDEBUG_LOG("------------[ PVR DBG: START (%s) ]------------", + szVerbosityLevel); + + OSDumpVersionInfo(pfnDumpDebugPrintf, pvDumpDebugFile); + + PVR_DUMPDEBUG_LOG("DDK info: %s (%s) %s", + PVRVERSION_STRING, PVR_BUILD_TYPE, PVR_BUILD_DIR); + + PVR_DUMPDEBUG_LOG("Time now: %" IMG_UINT64_FMTSPEC "us", + OSClockus64()); + + switch (psPVRSRVData->eServicesState) + { + case PVRSRV_SERVICES_STATE_OK: + PVR_DUMPDEBUG_LOG("Services State: OK"); + break; + case PVRSRV_SERVICES_STATE_BAD: + PVR_DUMPDEBUG_LOG("Services State: BAD"); + break; + case PVRSRV_SERVICES_STATE_UNDEFINED: + PVR_DUMPDEBUG_LOG("Services State: UNDEFINED"); + break; + default: + PVR_DUMPDEBUG_LOG("Services State: UNKNOWN (%d)", + psPVRSRVData->eServicesState); + break; + } + + PVR_DUMPDEBUG_LOG("Server Errors: %d", + PVRSRV_KM_ERRORS); + + PVRSRVConnectionDebugNotify(pfnDumpDebugPrintf, pvDumpDebugFile); + + PVR_DUMPDEBUG_LOG("------[ Driver Info ]------"); + + PVR_DUMPDEBUG_LOG("Comparison of UM/KM components: %s", + (psPVRSRVData->sDriverInfo.bIsNoMatch) ? "MISMATCH" : "MATCHING"); + + PVR_DUMPDEBUG_LOG("KM Arch: %s", + (psPVRSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT) ? Bit64 : Bit32); + + if (!PVRSRV_VZ_MODE_IS(NATIVE)) + { + PVR_DUMPDEBUG_LOG("Driver Mode: %s", + (PVRSRV_VZ_MODE_IS(HOST)) ? "Host":"Guest"); + } + + if (psPVRSRVData->sDriverInfo.ui8UMSupportedArch) + { + if ((psPVRSRVData->sDriverInfo.ui8UMSupportedArch & BUILD_ARCH_BOTH) == + BUILD_ARCH_BOTH) + { + PVR_DUMPDEBUG_LOG("UM Connected Clients Arch: %s and %s", Bit64, Bit32); + + }else + { + PVR_DUMPDEBUG_LOG("UM Connected Clients: %s", + (psPVRSRVData->sDriverInfo.ui8UMSupportedArch & BUILD_ARCH_64BIT) ? Bit64 : Bit32); + } + } + + PVR_DUMP_DRIVER_INFO("UM", psPVRSRVData->sDriverInfo.sUMBuildInfo); + PVR_DUMP_DRIVER_INFO("KM", psPVRSRVData->sDriverInfo.sKMBuildInfo); + + PVR_DUMPDEBUG_LOG("Window system: %s", (IS_DECLARED(WINDOW_SYSTEM)) ? (WINDOW_SYSTEM) : "Not declared"); + + /* For each requester */ + for (i = 0; i < psDebugTable->ui32RequestCount; i++) + { + DLLIST_NODE *psNode; + DLLIST_NODE *psNext; + + /* For each notifier on this requestor */ + dllist_foreach_node(&psDebugTable->asEntry[i].sListHead, psNode, psNext) + { + DEBUG_REQUEST_NOTIFY *psNotify = + IMG_CONTAINER_OF(psNode, DEBUG_REQUEST_NOTIFY, sListNode); + psNotify->pfnDbgRequestNotify(psNotify->hDbgRequestHandle, ui32VerbLevel, + pfnDumpDebugPrintf, pvDumpDebugFile); + } + } + + PVR_DUMPDEBUG_LOG("------------[ PVR DBG: END ]------------"); + OSWRLockReleaseRead(psDebugTable->hLock); + + if (!pfnDumpDebugPrintf) + { + /* Only notify OS of an issue if the debug dump has gone there */ + OSWarnOn(IMG_TRUE); + } +} diff --git a/drivers/gpu/drm/phytium/octopus/pvr_notifier.h b/drivers/gpu/drm/phytium/octopus/pvr_notifier.h new file mode 100644 index 000000000000..169deff50ecf --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_notifier.h @@ -0,0 +1,269 @@ +/*************************************************************************/ /*! +@File +@Title PhytiumVR notifier interface +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(PVR_NOTIFIER_H) +#define PVR_NOTIFIER_H + +#include "img_types.h" +#include "pvr_debug.h" + + +/*************************************************************************/ /*! +Command Complete Notifier Interface +*/ /**************************************************************************/ + +typedef IMG_HANDLE PVRSRV_CMDCOMP_HANDLE; +#ifndef CMDCOMPNOTIFY_PFN +typedef void (*PFN_CMDCOMP_NOTIFY)(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle); +#define CMDCOMPNOTIFY_PFN +#endif + +/*************************************************************************/ /*! +@Function PVRSRVCmdCompleteInit +@Description Performs initialisation of the command complete notifier + interface. +@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVCmdCompleteInit(void); + +/*************************************************************************/ /*! +@Function PVRSRVCmdCompleteDeinit +@Description Performs cleanup for the command complete notifier interface. +@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error +*/ /**************************************************************************/ +void +PVRSRVCmdCompleteDeinit(void); + +/*************************************************************************/ /*! +@Function PVRSRVRegisterCmdCompleteNotify +@Description Register a callback function that is called when some device + finishes some work, which is signalled via a call to + PVRSRVCheckStatus. +@Output phNotify On success, points to command complete + notifier handle +@Input pfnCmdCompleteNotify Function callback +@Input hPrivData Data to be passed back to the caller via + the callback function +@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify, + PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, + PVRSRV_CMDCOMP_HANDLE hPrivData); + +/*************************************************************************/ /*! +@Function PVRSRVUnregisterCmdCompleteNotify +@Description Unregister a previously registered callback function. +@Input hNotify Command complete notifier handle +@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify); + +/*************************************************************************/ /*! +@Function PVRSRVCheckStatus +@Description Calls PVRSRVNotifyCommandCompletion() to notify registered + command complete handlers of work completion and then calls + PVRSRVSignalGlobalEO() to signal the global event object. +@Input hCmdCompCallerHandle Used to prevent a handler from being + notified. A NULL value results in all + handlers being notified. +*/ /**************************************************************************/ +void +PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle); + +/*************************************************************************/ /*! +@Function PVRSRVNotifyCommandCompletion +@Description Notify any registered command complete handlers that some work + has been finished (unless hCmdCompCallerHandle matches a + handler's hPrivData). +@Input hCmdCompCallerHandle Used to prevent a handler from being + notified. A NULL value results in all + handlers being notified. +*/ /**************************************************************************/ +void +PVRSRVNotifyCommandCompletion(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle); + +/*************************************************************************/ /*! +@Function PVRSRVSignalGlobalEO +@Description Signals the global event object. +*/ /**************************************************************************/ +void +PVRSRVSignalGlobalEO(void); + + +/*************************************************************************/ /*! +Debug Notifier Interface +*/ /**************************************************************************/ + +#define DEBUG_REQUEST_DC 0 +#define DEBUG_REQUEST_SYNCTRACKING 1 +#define DEBUG_REQUEST_SYS 2 +#define DEBUG_REQUEST_ANDROIDSYNC 3 +#define DEBUG_REQUEST_LINUXFENCE 4 +#define DEBUG_REQUEST_SYNCCHECKPOINT 5 +#define DEBUG_REQUEST_HTB 6 +#define DEBUG_REQUEST_APPHINT 7 +#define DEBUG_REQUEST_FALLBACKSYNC 8 + +#define DEBUG_REQUEST_VERBOSITY_LOW 0 +#define DEBUG_REQUEST_VERBOSITY_MEDIUM 1 +#define DEBUG_REQUEST_VERBOSITY_HIGH 2 +#define DEBUG_REQUEST_VERBOSITY_MAX DEBUG_REQUEST_VERBOSITY_HIGH + +#define DD_VERB_LVL_ENABLED(_verbLvl, _verbLvlChk) ((_verbLvl) >= (_verbLvlChk)) + +/* + * Macro used within debug dump functions to send output either to PVR_LOG or + * a custom function. The custom function should be stored as a function + * pointer in a local variable called 'pfnDumpDebugPrintf'. 'pvDumpDebugFile' + * is also required as a local variable to serve as a file identifier for the + * printf function if required. + */ +#define PVR_DUMPDEBUG_LOG(...) \ + do \ + { \ + if (pfnDumpDebugPrintf) \ + pfnDumpDebugPrintf(pvDumpDebugFile, __VA_ARGS__); \ + else \ + PVR_LOG((__VA_ARGS__)); \ + } while (0) + +struct _PVRSRV_DEVICE_NODE_; + +typedef IMG_HANDLE PVRSRV_DBGREQ_HANDLE; +#ifndef DBGNOTIFY_PFNS +typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile, + const IMG_CHAR *pszFormat, ...); +typedef void (*PFN_DBGREQ_NOTIFY)(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); +#define DBGNOTIFY_PFNS +#endif + +/*************************************************************************/ /*! +@Function PVRSRVRegisterDbgTable +@Description Registers a debug requester table for the given device. The + order in which the debug requester IDs appear in the given + table determine the order in which a set of notifier callbacks + will be called. In other words, the requester ID that appears + first will have all of its associated debug notifier callbacks + called first. This will then be followed by all the callbacks + associated with the next requester ID in the table and so on. +@Input psDevNode Device node to register requester table with +@Input paui32Table Array of requester IDs +@Input ui32Length Number of elements in paui32Table +@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVRegisterDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + const IMG_UINT32 *paui32Table, IMG_UINT32 ui32Length); + +/*************************************************************************/ /*! +@Function PVRSRVUnregisterDbgTable +@Description Unregisters a debug requester table. +@Input psDevNode Device node for which the requester table should + be unregistered +@Return void +*/ /**************************************************************************/ +void +PVRSRVUnregisterDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode); + +/*************************************************************************/ /*! +@Function PVRSRVRegisterDbgRequestNotify +@Description Register a callback function that is called when a debug request + is made via a call PVRSRVDebugRequest. There are a number of + verbosity levels ranging from DEBUG_REQUEST_VERBOSITY_LOW up to + DEBUG_REQUEST_VERBOSITY_MAX. The callback will be called once + for each level up to the highest level specified to + PVRSRVDebugRequest. +@Output phNotify Points to debug notifier handle on success +@Input psDevNode Device node for which the debug callback + should be registered +@Input pfnDbgRequestNotify Function callback +@Input ui32RequesterID Requester ID. This is used to determine + the order in which callbacks are called +@Input hDbgReqeustHandle Data to be passed back to the caller via + the callback function +@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVRegisterDbgRequestNotify(IMG_HANDLE *phNotify, + struct _PVRSRV_DEVICE_NODE_ *psDevNode, + PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, + IMG_UINT32 ui32RequesterID, + PVRSRV_DBGREQ_HANDLE hDbgReqeustHandle); + +/*************************************************************************/ /*! +@Function PVRSRVUnregisterDbgRequestNotify +@Description Unregister a previously registered callback function. +@Input hNotify Debug notifier handle. +@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVUnregisterDbgRequestNotify(IMG_HANDLE hNotify); + +/*************************************************************************/ /*! +@Function PVRSRVDebugRequest +@Description Notify any registered debug request handlers that a debug + request has been made and at what level. +@Input psDevNode Device node for which the debug request + has been made +@Input ui32VerbLevel The maximum verbosity level to dump +@Input pfnDumpDebugPrintf Used to specify the print function that + should be used to dump any debug + information. If this argument is NULL then + PVR_LOG() will be used as the default + print function. +@Input pvDumpDebugFile Optional file identifier to be passed to + the print function if required. +@Return void +*/ /**************************************************************************/ +void +PVRSRVDebugRequest(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +#endif /* !defined(PVR_NOTIFIER_H) */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_pci_drv.c b/drivers/gpu/drm/phytium/octopus/pvr_pci_drv.c new file mode 100644 index 000000000000..7a7f114a20d8 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_pci_drv.c @@ -0,0 +1,243 @@ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Title PhytiumVR DRM PCI driver +@Codingstyle LinuxKernel +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) +#include +#include +#else +#include +#endif + +#include +#include + +#include "module_common.h" +#include "pvr_drv.h" +#include "pvrmodule.h" +#include "sysinfo.h" + +/* This header must always be included last */ +#include "kernel_compatibility.h" + +int pvr_dvfsmode = 1; +MODULE_PARM_DESC(pvr_dvfsmode, "dvfsmode: 0--simple_ondemand, 1--performance, 2--powersave, 3--userspace"); +module_param_named(dvfsmode, pvr_dvfsmode, int, 0600); + +static struct drm_driver pvr_drm_pci_driver; + +static int pvr_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + struct drm_device *ddev; + int ret; + + DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); + + ddev = drm_dev_alloc(&pvr_drm_pci_driver, &pdev->dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) + if (IS_ERR(ddev)) + return PTR_ERR(ddev); +#else + if (!ddev) + return -ENOMEM; +#endif + + ret = pci_enable_device(pdev); + if (ret) + goto err_drm_dev_put; + + ddev->pdev = pdev; + + /* + * drm_get_pci_dev calls sets the drvdata at this point, to ddev. + * We set the drvdata in the load callback, so there is no need + * to do it again here. The platform driver equivalent of + * drm_get_pci_dev, drm_platform_init, doesn't set the drvdata, + * which is why it is done in the load callback. + * + * The load callback, called from drm_dev_register, is deprecated, + * because of potential race conditions. Calling the function here, + * before calling drm_dev_register, avoids those potential races. + */ + BUG_ON(pvr_drm_pci_driver.load != NULL); + ret = pvr_drm_load(ddev, 0); + if (ret) + goto err_pci_dev_disable; + + ret = drm_dev_register(ddev, 0); + if (ret) + goto err_drm_dev_unload; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", + pvr_drm_pci_driver.name, + pvr_drm_pci_driver.major, + pvr_drm_pci_driver.minor, + pvr_drm_pci_driver.patchlevel, + pvr_drm_pci_driver.date, + pci_name(pdev), + ddev->primary->index); +#endif + return 0; + +err_drm_dev_unload: + pvr_drm_unload(ddev); +err_pci_dev_disable: + pci_disable_device(pdev); +err_drm_dev_put: + drm_dev_put(ddev); + return ret; +#else + DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); + + return drm_get_pci_dev(pdev, ent, &pvr_drm_pci_driver); +#endif +} + +static void pvr_remove(struct pci_dev *pdev) +{ + struct drm_device *ddev = pci_get_drvdata(pdev); + + DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + drm_dev_unregister(ddev); + + /* The unload callback, called from drm_dev_unregister, is + * deprecated. Call the unload function directly. + */ + BUG_ON(pvr_drm_pci_driver.unload != NULL); + pvr_drm_unload(ddev); + + pci_disable_device(pdev); + + drm_dev_put(ddev); +#else + drm_put_dev(ddev); +#endif +} + +static void pvr_shutdown(struct pci_dev *pdev) +{ + struct drm_device *ddev = pci_get_drvdata(pdev); + struct pvr_drm_private *priv = ddev->dev_private; + + DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); + + PVRSRVDeviceShutdown(priv->dev_node); +} + + +static const struct pci_device_id pvr_pci_ids[] = { +// { PCI_DEVICE(SYS_RGX_DEV_VENDOR_ID, SYS_RGX_DEV_DEVICE_ID) }, + { + .vendor = 0x1db7, + .device = 0xdc20, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = 0x0b0400, + .class_mask = 0xffffff, + }, +#if defined(SYS_RGX_DEV1_DEVICE_ID) + { PCI_DEVICE(SYS_RGX_DEV_VENDOR_ID, SYS_RGX_DEV1_DEVICE_ID) }, +#endif +#if defined(SYS_RGX_DEV_FROST_VENDOR_ID) + { PCI_DEVICE(SYS_RGX_DEV_FROST_VENDOR_ID, SYS_RGX_DEV_FROST_DEVICE_ID) }, +#endif + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, pvr_pci_ids); + +static struct pci_driver pvr_pci_driver = { + .name = DRVNAME, + .driver.pm = &pvr_pm_ops, + .id_table = pvr_pci_ids, + .probe = pvr_probe, + .remove = pvr_remove, + .shutdown = pvr_shutdown, +}; + +static int __init pvr_init(void) +{ + int err; + + DRM_DEBUG_DRIVER("\n"); + + pr_err("----gpu vendor id:%x, device id:%x\n",SYS_RGX_DEV_VENDOR_ID,SYS_RGX_DEV_DEVICE_ID); + pvr_drm_pci_driver = pvr_drm_generic_driver; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) + pvr_drm_pci_driver.set_busid = drm_pci_set_busid; +#endif + + err = PVRSRVDriverInit(); + if (err) + return err; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + return pci_register_driver(&pvr_pci_driver); +#else + return drm_pci_init(&pvr_drm_pci_driver, &pvr_pci_driver); +#endif +} + +static void __exit pvr_exit(void) +{ + DRM_DEBUG_DRIVER("\n"); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + pci_unregister_driver(&pvr_pci_driver); +#else + drm_pci_exit(&pvr_drm_pci_driver, &pvr_pci_driver); +#endif + PVRSRVDriverDeinit(); + + DRM_DEBUG_DRIVER("done\n"); +} + +module_init(pvr_init); +module_exit(pvr_exit); diff --git a/drivers/gpu/drm/phytium/octopus/pvr_procfs.h b/drivers/gpu/drm/phytium/octopus/pvr_procfs.h new file mode 100644 index 000000000000..a489c78f90cb --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_procfs.h @@ -0,0 +1,50 @@ +/*************************************************************************/ /*! +@File +@Title ProcFS implementation of Debug Info interface. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVR_PROCFS_H +#define PVR_PROCFS_H + +#include "pvrsrv_error.h" + +PVRSRV_ERROR PVRProcFsRegister(void); + +#endif /* PVR_PROCFS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_ricommon.h b/drivers/gpu/drm/phytium/octopus/pvr_ricommon.h new file mode 100644 index 000000000000..5abd70c6ce64 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_ricommon.h @@ -0,0 +1,68 @@ +/*************************************************************************/ /*! +@File +@Title Services Resource Information (RI) common types and definitions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Resource Information (RI) common types and definitions included + in both user mode and kernel mode source. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef PVR_RICOMMON_H +#define PVR_RICOMMON_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "img_defs.h" + +/*! Maximum text string length including the null byte */ +#define PRVSRVRI_MAX_TEXT_LENGTH 20U + +/* PID used to hold PMR allocations which are driver-wide (i.e. have a lifetime + * longer than an application process) + */ +#define PVR_SYS_ALLOC_PID 1 + +#if defined(__cplusplus) +} +#endif + +#endif /* PVR_RICOMMON_H */ +/****************************************************************************** + End of file (pvr_ricommon.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_sw_fence.c b/drivers/gpu/drm/phytium/octopus/pvr_sw_fence.c new file mode 100644 index 000000000000..a0b5027afa8a --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_sw_fence.c @@ -0,0 +1,199 @@ +/* + * @File + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include + +#include "pvr_sw_fence.h" + +struct pvr_sw_fence_context { + struct kref kref; + unsigned int context; + char context_name[32]; + char driver_name[32]; + atomic_t seqno; + atomic_t fence_count; +}; + +struct pvr_sw_fence { + struct dma_fence base; + struct pvr_sw_fence_context *fence_context; + spinlock_t lock; +}; + +#define to_pvr_sw_fence(fence) container_of(fence, struct pvr_sw_fence, base) + +const char *pvr_sw_fence_context_name(struct pvr_sw_fence_context *fctx) +{ + return fctx->context_name; +} + +void pvr_sw_fence_context_value_str(struct pvr_sw_fence_context *fctx, + char *str, int size) +{ + snprintf(str, size, "%d", atomic_read(&fctx->seqno)); +} + +static inline unsigned +pvr_sw_fence_context_seqno_next(struct pvr_sw_fence_context *fence_context) +{ + return atomic_inc_return(&fence_context->seqno) - 1; +} + +static const char *pvr_sw_fence_get_driver_name(struct dma_fence *fence) +{ + struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence); + + return pvr_sw_fence->fence_context->driver_name; +} + +static const char *pvr_sw_fence_get_timeline_name(struct dma_fence *fence) +{ + struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence); + + return pvr_sw_fence_context_name(pvr_sw_fence->fence_context); +} + +static void pvr_sw_fence_value_str(struct dma_fence *fence, char *str, int size) +{ + snprintf(str, size, "%llu", (u64) fence->seqno); +} + +static void pvr_sw_fence_timeline_value_str(struct dma_fence *fence, + char *str, int size) +{ + struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence); + + pvr_sw_fence_context_value_str(pvr_sw_fence->fence_context, str, size); +} + +static bool pvr_sw_fence_enable_signaling(struct dma_fence *fence) +{ + return true; +} + +static void pvr_sw_fence_context_destroy_kref(struct kref *kref) +{ + struct pvr_sw_fence_context *fence_context = + container_of(kref, struct pvr_sw_fence_context, kref); + unsigned int fence_count; + + fence_count = atomic_read(&fence_context->fence_count); + if (WARN_ON(fence_count)) + pr_debug("%s context has %u fence(s) remaining\n", + fence_context->context_name, fence_count); + + kfree(fence_context); +} + +static void pvr_sw_fence_release(struct dma_fence *fence) +{ + struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence); + + atomic_dec(&pvr_sw_fence->fence_context->fence_count); + kref_put(&pvr_sw_fence->fence_context->kref, + pvr_sw_fence_context_destroy_kref); + kfree(pvr_sw_fence); +} + +static const struct dma_fence_ops pvr_sw_fence_ops = { + .get_driver_name = pvr_sw_fence_get_driver_name, + .get_timeline_name = pvr_sw_fence_get_timeline_name, + .fence_value_str = pvr_sw_fence_value_str, + .timeline_value_str = pvr_sw_fence_timeline_value_str, + .enable_signaling = pvr_sw_fence_enable_signaling, + .wait = dma_fence_default_wait, + .release = pvr_sw_fence_release, +}; + +struct pvr_sw_fence_context * +pvr_sw_fence_context_create(const char *context_name, const char *driver_name) +{ + struct pvr_sw_fence_context *fence_context; + + fence_context = kmalloc(sizeof(*fence_context), GFP_KERNEL); + if (!fence_context) + return NULL; + + fence_context->context = dma_fence_context_alloc(1); + strlcpy(fence_context->context_name, context_name, + sizeof(fence_context->context_name)); + strlcpy(fence_context->driver_name, driver_name, + sizeof(fence_context->driver_name)); + atomic_set(&fence_context->seqno, 0); + atomic_set(&fence_context->fence_count, 0); + kref_init(&fence_context->kref); + + return fence_context; +} + +void pvr_sw_fence_context_destroy(struct pvr_sw_fence_context *fence_context) +{ + kref_put(&fence_context->kref, pvr_sw_fence_context_destroy_kref); +} + +struct dma_fence * +pvr_sw_fence_create(struct pvr_sw_fence_context *fence_context) +{ + struct pvr_sw_fence *pvr_sw_fence; + unsigned int seqno; + + pvr_sw_fence = kmalloc(sizeof(*pvr_sw_fence), GFP_KERNEL); + if (!pvr_sw_fence) + return NULL; + + spin_lock_init(&pvr_sw_fence->lock); + pvr_sw_fence->fence_context = fence_context; + + seqno = pvr_sw_fence_context_seqno_next(fence_context); + dma_fence_init(&pvr_sw_fence->base, &pvr_sw_fence_ops, + &pvr_sw_fence->lock, fence_context->context, seqno); + + atomic_inc(&fence_context->fence_count); + kref_get(&fence_context->kref); + + return &pvr_sw_fence->base; +} diff --git a/drivers/gpu/drm/phytium/octopus/pvr_sw_fence.h b/drivers/gpu/drm/phytium/octopus/pvr_sw_fence.h new file mode 100644 index 000000000000..efafb76922f9 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_sw_fence.h @@ -0,0 +1,60 @@ +/* + * @File + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__PVR_SW_FENCES_H__) +#define __PVR_SW_FENCES_H__ + +#include "pvr_linux_fence.h" + +struct pvr_sw_fence_context; + +struct pvr_sw_fence_context *pvr_sw_fence_context_create(const char *name, + const char *driver_name); +void pvr_sw_fence_context_destroy(struct pvr_sw_fence_context *fence_context); +struct dma_fence *pvr_sw_fence_create(struct pvr_sw_fence_context * + fence_context); + +const char *pvr_sw_fence_context_name(struct pvr_sw_fence_context *fctx); +void pvr_sw_fence_context_value_str(struct pvr_sw_fence_context *fctx, + char *str, int size); + +#endif /* !defined(__PVR_SW_FENCES_H__) */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_sync.h b/drivers/gpu/drm/phytium/octopus/pvr_sync.h new file mode 100644 index 000000000000..f70ec6b567f8 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_sync.h @@ -0,0 +1,118 @@ +/* + * @File pvr_sync.h + * @Title Kernel driver for Android's sync mechanism + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _PVR_SYNC_H +#define _PVR_SYNC_H + +#include + +#include "pvr_fd_sync_kernel.h" +#include "services_kernel_client.h" + + +/* Services internal interface */ + +/** + * pvr_sync_register_functions() + * + * Return: PVRSRV_OK on success. + */ +enum PVRSRV_ERROR_TAG pvr_sync_register_functions(void); + +/** + * pvr_sync_init - register the pvr_sync misc device + * + * Return: error code, 0 on success. + */ +int pvr_sync_init(void); + +/** + * pvr_sync_deinit - unregister the pvr_sync misc device + */ +void pvr_sync_deinit(void); + +/** + * pvr_sync_device_init() - create an internal sync context + * @dev: Linux device + * + * Return: PVRSRV_OK on success. + */ +enum PVRSRV_ERROR_TAG pvr_sync_device_init(struct device *dev); + +/** + * pvr_sync_device_deinit() - destroy an internal sync context + * + * Drains any work items with outstanding sync fence updates/dependencies. + */ +void pvr_sync_device_deinit(struct device *dev); + +enum PVRSRV_ERROR_TAG pvr_sync_fence_wait(void *fence, u32 timeout_in_ms); + +enum PVRSRV_ERROR_TAG pvr_sync_fence_release(void *fence); + +enum PVRSRV_ERROR_TAG pvr_sync_fence_get(int fence_fd, void **fence_out); + +enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_fence_create(int timeline_fd, + const char *fence_name, + int *fence_fd_out, + u64 *sync_pt_idx); + +enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_advance(void *timeline, + u64 *sync_pt_idx); + +enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_release(void *timeline); + +enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_get(int timeline_fd, + void **timeline_out); + +enum PVRSRV_ERROR_TAG +sync_dump_fence(void *sw_fence_obj, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file); + +enum PVRSRV_ERROR_TAG +sync_sw_dump_timeline(void *sw_timeline_obj, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file); + +#endif /* _PVR_SYNC_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_sync_file.c b/drivers/gpu/drm/phytium/octopus/pvr_sync_file.c new file mode 100644 index 000000000000..78b59d140678 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_sync_file.c @@ -0,0 +1,1123 @@ +/* + * @File pvr_sync_file.c + * @Title Kernel driver for Android's sync mechanism + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "services_kernel_client.h" +#include "pvr_drv.h" +#include "pvr_sync.h" +#include "pvr_fence.h" +#include "pvr_counting_timeline.h" + +#include "linux_sw_sync.h" + +#include +#include +#include +#include +#include +#include +#include + +/* This header must always be included last */ +#include "kernel_compatibility.h" + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) && !defined(CHROMIUMOS_KERNEL) +#define sync_file_user_name(s) ((s)->name) +#else +#define sync_file_user_name(s) ((s)->user_name) +#endif + +#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \ + do { \ + if (pfnDumpDebugPrintf) \ + pfnDumpDebugPrintf(pvDumpDebugFile, fmt, \ + ## __VA_ARGS__); \ + else \ + pr_err(fmt "\n", ## __VA_ARGS__); \ + } while (0) + +#define FILE_NAME "pvr_sync_file" + +struct sw_sync_create_fence_data { + __u32 value; + char name[32]; + __s32 fence; +}; +#define SW_SYNC_IOC_MAGIC 'W' +#define SW_SYNC_IOC_CREATE_FENCE \ + (_IOWR(SW_SYNC_IOC_MAGIC, 0, struct sw_sync_create_fence_data)) +#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) + +/* Global data for the sync driver */ +static struct { + void *dev_cookie; + struct workqueue_struct *fence_status_wq; + struct pvr_fence_context *foreign_fence_context; + PFN_SYNC_CHECKPOINT_STRUCT sync_checkpoint_ops; +} pvr_sync_data; + +#if defined(NO_HARDWARE) +static DEFINE_MUTEX(pvr_timeline_active_list_lock); +static struct list_head pvr_timeline_active_list; +#endif + +static const struct file_operations pvr_sync_fops; + +/* This is the actual timeline metadata. We might keep this around after the + * base sync driver has destroyed the pvr_sync_timeline_wrapper object. + */ +struct pvr_sync_timeline { + char name[32]; + struct file *file; + bool is_sw; + /* Fence context used for hw fences */ + struct pvr_fence_context *hw_fence_context; + /* Timeline and context for sw fences */ + struct pvr_counting_fence_timeline *sw_fence_timeline; +#if defined(NO_HARDWARE) + /* List of all timelines (used to advance all timelines in nohw builds) */ + struct list_head list; +#endif +}; + +static +void pvr_sync_free_checkpoint_list_mem(void *mem_ptr) +{ + kfree(mem_ptr); +} + +#if defined(NO_HARDWARE) +/* function used to signal pvr fence in nohw builds */ +static +void pvr_sync_nohw_signal_fence(void *fence_data_to_signal) +{ + struct pvr_sync_timeline *this_timeline; + + mutex_lock(&pvr_timeline_active_list_lock); + list_for_each_entry(this_timeline, &pvr_timeline_active_list, list) { + pvr_fence_context_signal_fences_nohw(this_timeline->hw_fence_context); + } + mutex_unlock(&pvr_timeline_active_list_lock); +} +#endif + +static bool is_pvr_timeline(struct file *file) +{ + return file->f_op == &pvr_sync_fops; +} + +static struct pvr_sync_timeline *pvr_sync_timeline_fget(int fd) +{ + struct file *file = fget(fd); + + if (!file) + return NULL; + + if (!is_pvr_timeline(file)) { + fput(file); + return NULL; + } + + return file->private_data; +} + +static void pvr_sync_timeline_fput(struct pvr_sync_timeline *timeline) +{ + fput(timeline->file); +} + +/* ioctl and fops handling */ + +static int pvr_sync_open(struct inode *inode, struct file *file) +{ + struct pvr_sync_timeline *timeline; + char task_comm[TASK_COMM_LEN]; + int err = -ENOMEM; + + get_task_comm(task_comm, current); + + timeline = kzalloc(sizeof(*timeline), GFP_KERNEL); + if (!timeline) + goto err_out; + + strlcpy(timeline->name, task_comm, sizeof(timeline->name)); + timeline->file = file; + timeline->is_sw = false; + + file->private_data = timeline; + err = 0; +err_out: + return err; +} + +static int pvr_sync_close(struct inode *inode, struct file *file) +{ + struct pvr_sync_timeline *timeline = file->private_data; + + if (timeline->sw_fence_timeline) { + /* This makes sure any outstanding SW syncs are marked as + * complete at timeline close time. Otherwise it'll leak the + * timeline (as outstanding fences hold a ref) and possibly + * wedge the system if something is waiting on one of those + * fences + */ + pvr_counting_fence_timeline_force_complete( + timeline->sw_fence_timeline); + pvr_counting_fence_timeline_put(timeline->sw_fence_timeline); + } + + if (timeline->hw_fence_context) { +#if defined(NO_HARDWARE) + mutex_lock(&pvr_timeline_active_list_lock); + list_del(&timeline->list); + mutex_unlock(&pvr_timeline_active_list_lock); +#endif + pvr_fence_context_destroy(timeline->hw_fence_context); + } + + kfree(timeline); + + return 0; +} + +/* + * This is the function that kick code will call in order to 'finalise' a + * created output fence just prior to returning from the kick function. + * The OS native sync code needs to implement a function meeting this + * specification - the implementation may be a nop if the OS does not need + * to perform any actions at this point. + * + * Input: fence_fd The PVRSRV_FENCE to be 'finalised'. This value + * will have been returned by an earlier call to + * pvr_sync_create_fence(). + * Input: finalise_data The finalise data returned by an earlier call + * to pvr_sync_create_fence(). + */ +static enum PVRSRV_ERROR_TAG +pvr_sync_finalise_fence(PVRSRV_FENCE fence_fd, void *finalise_data) +{ + struct sync_file *sync_file = finalise_data; + struct pvr_fence *pvr_fence; + + if (!sync_file || (fence_fd < 0)) { + pr_err(FILE_NAME ": %s: Invalid input fence\n", __func__); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + pvr_fence = to_pvr_fence(sync_file->fence); + + /* pvr fences can be signalled any time after creation */ + dma_fence_enable_sw_signaling(&pvr_fence->base); + + fd_install(fence_fd, sync_file->file); + + return PVRSRV_OK; +} + +/* + * This is the function that kick code will call in order to obtain a new + * PVRSRV_FENCE from the OS native sync code and the PSYNC_CHECKPOINT used + * in that fence. The OS native sync code needs to implement a function + * meeting this specification. + * + * Input: fence_name A string to annotate the fence with (for + * debug). + * Input: timeline The timeline on which the new fence is to be + * created. + * Output: new_fence The new PVRSRV_FENCE to be returned by the + * kick call. + * Output: fence_uid Unique ID of the update fence. + * Output: fence_finalise_data Pointer to data needed to finalise the fence. + * Output: new_checkpoint_handle The PSYNC_CHECKPOINT used by the new fence. + */ +static enum PVRSRV_ERROR_TAG +pvr_sync_create_fence(const char *fence_name, + PVRSRV_TIMELINE new_fence_timeline, + PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE *new_fence, u64 *fence_uid, + void **fence_finalise_data, + PSYNC_CHECKPOINT *new_checkpoint_handle, + void **timeline_update_sync, + __u32 *timeline_update_value) +{ + PVRSRV_ERROR err = PVRSRV_OK; + PVRSRV_FENCE new_fence_fd = -1; + struct pvr_sync_timeline *timeline; + struct pvr_fence *pvr_fence; + PSYNC_CHECKPOINT checkpoint; + struct sync_file *sync_file; + + if (new_fence_timeline < 0 || !new_fence || !new_checkpoint_handle + || !fence_finalise_data) { + pr_err(FILE_NAME ": %s: Invalid input params\n", __func__); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_out; + } + + /* We reserve the new fence FD before taking any operations + * as we do not want to fail (e.g. run out of FDs) + */ + new_fence_fd = get_unused_fd_flags(O_CLOEXEC); + if (new_fence_fd < 0) { + pr_err(FILE_NAME ": %s: Failed to get fd\n", __func__); + err = PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; + goto err_out; + } + + timeline = pvr_sync_timeline_fget(new_fence_timeline); + if (!timeline) { + pr_err(FILE_NAME ": %s: Failed to open supplied timeline fd (%d)\n", + __func__, new_fence_timeline); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_put_fd; + } + + if (timeline->is_sw) { + /* This should never happen! */ + pr_err(FILE_NAME ": %s: Request to create a pvr fence on sw timeline (%d)\n", + __func__, new_fence_timeline); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_put_timeline; + } + + if (!timeline->hw_fence_context) { + /* First time we use this timeline, so create a context. */ + timeline->hw_fence_context = + pvr_fence_context_create(pvr_sync_data.dev_cookie, + pvr_sync_data.fence_status_wq, + timeline->name); + if (!timeline->hw_fence_context) { + pr_err(FILE_NAME ": %s: Failed to create fence context (%d)\n", + __func__, new_fence_timeline); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_timeline; + } +#if defined(NO_HARDWARE) + /* Add timeline to active list */ + INIT_LIST_HEAD(&timeline->list); + mutex_lock(&pvr_timeline_active_list_lock); + list_add_tail(&timeline->list, &pvr_timeline_active_list); + mutex_unlock(&pvr_timeline_active_list_lock); +#endif + } + + pvr_fence = pvr_fence_create(timeline->hw_fence_context, + psSyncCheckpointContext, + new_fence_timeline, + fence_name); + if (!pvr_fence) { + pr_err(FILE_NAME ": %s: Failed to create new pvr_fence\n", + __func__); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_timeline; + } + + checkpoint = pvr_fence_get_checkpoint(pvr_fence); + if (!checkpoint) { + pr_err(FILE_NAME ": %s: Failed to get fence checkpoint\n", + __func__); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_destroy_fence; + } + + sync_file = sync_file_create(&pvr_fence->base); + if (!sync_file) { + pr_err(FILE_NAME ": %s: Failed to create sync_file\n", + __func__); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_destroy_fence; + } + strlcpy(sync_file_user_name(sync_file), + pvr_fence->name, + sizeof(sync_file_user_name(sync_file))); + dma_fence_put(&pvr_fence->base); + + *new_fence = new_fence_fd; + *fence_finalise_data = sync_file; + *new_checkpoint_handle = checkpoint; + *fence_uid = OSGetCurrentClientProcessIDKM(); + *fence_uid = (*fence_uid << 32) | (new_fence_fd & U32_MAX); + /* not used but don't want to return dangling pointers */ + *timeline_update_sync = NULL; + *timeline_update_value = 0; + + pvr_sync_timeline_fput(timeline); +err_out: + return err; + +err_destroy_fence: + pvr_fence_destroy(pvr_fence); +err_put_timeline: + pvr_sync_timeline_fput(timeline); +err_put_fd: + put_unused_fd(new_fence_fd); + *fence_uid = PVRSRV_NO_FENCE; + goto err_out; +} + +/* + * This is the function that kick code will call in order to 'rollback' a + * created output fence should an error occur when submitting the kick. + * The OS native sync code needs to implement a function meeting this + * specification. + * + * Input: fence_to_rollback The PVRSRV_FENCE to be 'rolled back'. The fence + * should be destroyed and any actions taken due to + * its creation that need to be undone should be + * reverted. + * Input: finalise_data The finalise data for the fence to be 'rolled back'. + */ +static enum PVRSRV_ERROR_TAG +pvr_sync_rollback_fence_data(PVRSRV_FENCE fence_to_rollback, + void *fence_data_to_rollback) +{ + struct sync_file *sync_file = fence_data_to_rollback; + struct pvr_fence *pvr_fence; + + if (!sync_file || fence_to_rollback < 0) { + pr_err(FILE_NAME ": %s: Invalid fence (%d)\n", __func__, + fence_to_rollback); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + pvr_fence = to_pvr_fence(sync_file->fence); + if (!pvr_fence) { + pr_err(FILE_NAME + ": %s: Non-PVR fence (%p)\n", + __func__, sync_file->fence); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + fput(sync_file->file); + + put_unused_fd(fence_to_rollback); + + return PVRSRV_OK; +} + +/* + * This is the function that kick code will call in order to obtain a list of + * the PSYNC_CHECKPOINTs for a given PVRSRV_FENCE passed to a kick function. + * The OS native sync code will allocate the memory to hold the returned list + * of PSYNC_CHECKPOINT ptrs. The caller will free this memory once it has + * finished referencing it. + * + * Input: fence The input (check) fence + * Output: nr_checkpoints The number of PVRSRV_SYNC_CHECKPOINT ptrs + * returned in the checkpoint_handles + * parameter. + * Output: fence_uid Unique ID of the check fence + * Input/Output: checkpoint_handles The returned list of PVRSRV_SYNC_CHECKPOINTs. + */ +static enum PVRSRV_ERROR_TAG +pvr_sync_resolve_fence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE fence_to_resolve, u32 *nr_checkpoints, + PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid) +{ + PSYNC_CHECKPOINT *checkpoints = NULL; + unsigned int i, num_fences, num_used_fences = 0; + struct dma_fence **fences = NULL; + struct dma_fence *fence; + PVRSRV_ERROR err = PVRSRV_OK; + + if (!nr_checkpoints || !checkpoint_handles || !fence_uid) { + pr_err(FILE_NAME ": %s: Invalid input checkpoint pointer\n", + __func__); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_out; + } + + *nr_checkpoints = 0; + *checkpoint_handles = NULL; + *fence_uid = 0; + + if (fence_to_resolve < 0) + goto err_out; + + fence = sync_file_get_fence(fence_to_resolve); + if (!fence) { + pr_err(FILE_NAME ": %s: Failed to read sync private data for fd %d\n", + __func__, fence_to_resolve); + err = PVRSRV_ERROR_HANDLE_NOT_FOUND; + goto err_out; + } + + if (dma_fence_is_array(fence)) { + struct dma_fence_array *array = to_dma_fence_array(fence); + + fences = array->fences; + num_fences = array->num_fences; + } else { + fences = &fence; + num_fences = 1; + } + + checkpoints = kmalloc_array(num_fences, sizeof(PSYNC_CHECKPOINT), + GFP_KERNEL); + if (!checkpoints) { + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_fence; + } + for (i = 0; i < num_fences; i++) { + /* Only return the checkpoint if the fence is still active. */ + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &fences[i]->flags)) { + struct pvr_fence *pvr_fence = + pvr_fence_create_from_fence( + pvr_sync_data.foreign_fence_context, + psSyncCheckpointContext, + fences[i], + fence_to_resolve, + "foreign"); + if (!pvr_fence) { + pr_err(FILE_NAME ": %s: Failed to create fence\n", + __func__); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_free_checkpoints; + } + checkpoints[num_used_fences] = + pvr_fence_get_checkpoint(pvr_fence); + SyncCheckpointTakeRef(checkpoints[num_used_fences]); + ++num_used_fences; + dma_fence_put(&pvr_fence->base); + } + } + /* If we don't return any checkpoints, delete the array because + * the caller will not. + */ + if (num_used_fences == 0) { + kfree(checkpoints); + checkpoints = NULL; + } + + *checkpoint_handles = checkpoints; + *nr_checkpoints = num_used_fences; + *fence_uid = OSGetCurrentClientProcessIDKM(); + *fence_uid = (*fence_uid << 32) | (fence_to_resolve & U32_MAX); + +err_put_fence: + dma_fence_put(fence); +err_out: + return err; + +err_free_checkpoints: + for (i = 0; i < num_used_fences; i++) { + if (checkpoints[i]) + SyncCheckpointDropRef(checkpoints[i]); + } + kfree(checkpoints); + goto err_put_fence; +} + +/* + * This is the function that driver code will call in order to request the + * sync implementation to output debug information relating to any sync + * checkpoints it may have created which appear in the provided array of + * FW addresses of Unified Fence Objects (UFOs). + * + * Input: nr_ufos The number of FW addresses provided in the + * vaddrs parameter. + * Input: vaddrs The array of FW addresses of UFOs. The sync + * implementation should check each of these to + * see if any relate to sync checkpoints it has + * created and where they do output debug information + * pertaining to the native/fallback sync with + * which it is associated. + */ +static u32 +pvr_sync_dump_info_on_stalled_ufos(u32 nr_ufos, u32 *vaddrs) +{ + return pvr_fence_dump_info_on_stalled_ufos(pvr_sync_data.foreign_fence_context, + nr_ufos, + vaddrs); +} + +#if defined(PDUMP) +static enum PVRSRV_ERROR_TAG +pvr_sync_fence_get_checkpoints(PVRSRV_FENCE fence_to_pdump, u32 *nr_checkpoints, + struct _SYNC_CHECKPOINT ***checkpoint_handles) +{ + struct dma_fence **fences = NULL; + struct dma_fence *fence; + struct pvr_fence *pvr_fence; + struct _SYNC_CHECKPOINT **checkpoints = NULL; + unsigned int i, num_fences, num_used_fences = 0; + enum PVRSRV_ERROR_TAG err; + + if (fence_to_pdump < 0) { + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_out; + } + + if (!nr_checkpoints || !checkpoint_handles) { + pr_err(FILE_NAME ": %s: Invalid input checkpoint pointer\n", + __func__); + err = PVRSRV_ERROR_INVALID_PARAMS; + goto err_out; + } + + fence = sync_file_get_fence(fence_to_pdump); + if (!fence) { + pr_err(FILE_NAME ": %s: Failed to read sync private data for fd %d\n", + __func__, fence_to_pdump); + err = PVRSRV_ERROR_HANDLE_NOT_FOUND; + goto err_out; + } + + if (dma_fence_is_array(fence)) { + struct dma_fence_array *array = to_dma_fence_array(fence); + + fences = array->fences; + num_fences = array->num_fences; + } else { + fences = &fence; + num_fences = 1; + } + + checkpoints = kmalloc_array(num_fences, sizeof(*checkpoints), + GFP_KERNEL); + if (!checkpoints) { + pr_err("pvr_sync_file: %s: Failed to alloc memory for returned list of sync checkpoints\n", + __func__); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_fence; + } + + for (i = 0; i < num_fences; i++) { + pvr_fence = to_pvr_fence(fences[i]); + if (!pvr_fence) + continue; + checkpoints[num_used_fences] = pvr_fence_get_checkpoint(pvr_fence); + ++num_used_fences; + } + + *checkpoint_handles = checkpoints; + *nr_checkpoints = num_used_fences; + err = PVRSRV_OK; + +err_put_fence: + dma_fence_put(fence); +err_out: + return err; +} +#endif + +static long pvr_sync_ioctl_rename(struct pvr_sync_timeline *timeline, + void __user *user_data) +{ + int err = 0; + struct pvr_sync_rename_ioctl_data data; + + if (!access_ok(user_data, sizeof(data))) { + err = -EFAULT; + goto err; + } + + if (copy_from_user(&data, user_data, sizeof(data))) { + err = -EFAULT; + goto err; + } + + data.szName[sizeof(data.szName) - 1] = '\0'; + strlcpy(timeline->name, data.szName, sizeof(timeline->name)); + if (timeline->hw_fence_context) + strlcpy(timeline->hw_fence_context->name, data.szName, + sizeof(timeline->hw_fence_context->name)); + +err: + return err; +} + +static long pvr_sync_ioctl_force_sw_only(struct pvr_sync_timeline *timeline, + void **private_data) +{ + /* Already in SW mode? */ + if (timeline->sw_fence_timeline) + return 0; + + /* Create a sw_sync timeline with the old GPU timeline's name */ + timeline->sw_fence_timeline = pvr_counting_fence_timeline_create( + pvr_sync_data.dev_cookie, + timeline->name); + if (!timeline->sw_fence_timeline) + return -ENOMEM; + + timeline->is_sw = true; + + return 0; +} + +static long pvr_sync_ioctl_sw_create_fence(struct pvr_sync_timeline *timeline, + void __user *user_data) +{ + struct pvr_sw_sync_create_fence_data data; + struct sync_file *sync_file; + int fd = get_unused_fd_flags(O_CLOEXEC); + struct dma_fence *fence; + int err = -EFAULT; + + if (fd < 0) { + pr_err(FILE_NAME ": %s: Failed to find unused fd (%d)\n", + __func__, fd); + err = -EMFILE; + goto err_out; + } + + if (copy_from_user(&data, user_data, sizeof(data))) { + pr_err(FILE_NAME ": %s: Failed copy from user\n", __func__); + goto err_put_fd; + } + + fence = pvr_counting_fence_create(timeline->sw_fence_timeline, &data.sync_pt_idx); + if (!fence) { + pr_err(FILE_NAME ": %s: Failed to create a sync point (%d)\n", + __func__, fd); + err = -ENOMEM; + goto err_put_fd; + } + + sync_file = sync_file_create(fence); + if (!sync_file) { + pr_err(FILE_NAME ": %s: Failed to create a sync point (%d)\n", + __func__, fd); + err = -ENOMEM; + goto err_put_fence; + } + + data.fence = fd; + + if (copy_to_user(user_data, &data, sizeof(data))) { + pr_err(FILE_NAME ": %s: Failed copy to user\n", __func__); + goto err_put_fence; + } + + fd_install(fd, sync_file->file); + err = 0; + + dma_fence_put(fence); +err_out: + return err; + +err_put_fence: + dma_fence_put(fence); +err_put_fd: + put_unused_fd(fd); + goto err_out; +} + +static long pvr_sync_ioctl_sw_inc(struct pvr_sync_timeline *timeline, + void __user *user_data) +{ + bool res; + struct pvr_sw_timeline_advance_data data; + + res = pvr_counting_fence_timeline_inc(timeline->sw_fence_timeline, &data.sync_pt_idx); + + /* pvr_counting_fence_timeline_inc won't allow sw timeline to be + * advanced beyond the last defined point + */ + if (!res) { + pr_err("pvr_sync_file: attempt to advance SW timeline beyond last defined point\n"); + return -EPERM; + } + + if (copy_to_user(user_data, &data, sizeof(data))) { + pr_err(FILE_NAME ": %s: Failed copy to user\n", __func__); + return -EFAULT; + } + + return 0; +} + +static long +pvr_sync_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + void __user *user_data = (void __user *)arg; + long err = -ENOTTY; + struct pvr_sync_timeline *timeline = file->private_data; + + if (!timeline->is_sw) { + + switch (cmd) { + case PVR_SYNC_IOC_RENAME: + err = pvr_sync_ioctl_rename(timeline, user_data); + break; + case PVR_SYNC_IOC_FORCE_SW_ONLY: + err = pvr_sync_ioctl_force_sw_only(timeline, + &file->private_data); + break; + default: + break; + } + } else { + + switch (cmd) { + case PVR_SW_SYNC_IOC_CREATE_FENCE: + err = pvr_sync_ioctl_sw_create_fence(timeline, + user_data); + break; + case PVR_SW_SYNC_IOC_INC: + err = pvr_sync_ioctl_sw_inc(timeline, user_data); + break; + default: + break; + } + } + + return err; +} + +static const struct file_operations pvr_sync_fops = { + .owner = THIS_MODULE, + .open = pvr_sync_open, + .release = pvr_sync_close, + .unlocked_ioctl = pvr_sync_ioctl, + .compat_ioctl = pvr_sync_ioctl, +}; + +static struct miscdevice pvr_sync_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = PVRSYNC_MODNAME, + .fops = &pvr_sync_fops, +}; + +static void +pvr_sync_debug_request_heading(void *data, u32 verbosity, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM)) + PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, + "------[ Native Fence Sync: timelines ]------"); +} + +enum PVRSRV_ERROR_TAG pvr_sync_register_functions(void) +{ + /* Register the resolve fence and create fence functions with + * sync_checkpoint.c + * The pvr_fence context registers its own EventObject callback to + * update sync status + */ + /* Initialise struct and register with sync_checkpoint.c */ + pvr_sync_data.sync_checkpoint_ops.pfnFenceResolve = pvr_sync_resolve_fence; + pvr_sync_data.sync_checkpoint_ops.pfnFenceCreate = pvr_sync_create_fence; + pvr_sync_data.sync_checkpoint_ops.pfnFenceDataRollback = pvr_sync_rollback_fence_data; + pvr_sync_data.sync_checkpoint_ops.pfnFenceFinalise = pvr_sync_finalise_fence; +#if defined(NO_HARDWARE) + pvr_sync_data.sync_checkpoint_ops.pfnNoHWUpdateTimelines = pvr_sync_nohw_signal_fence; +#else + pvr_sync_data.sync_checkpoint_ops.pfnNoHWUpdateTimelines = NULL; +#endif + pvr_sync_data.sync_checkpoint_ops.pfnFreeCheckpointListMem = + pvr_sync_free_checkpoint_list_mem; + pvr_sync_data.sync_checkpoint_ops.pfnDumpInfoOnStalledUFOs = + pvr_sync_dump_info_on_stalled_ufos; + strlcpy(pvr_sync_data.sync_checkpoint_ops.pszImplName, "pvr_sync_file", + SYNC_CHECKPOINT_IMPL_MAX_STRLEN); +#if defined(PDUMP) + pvr_sync_data.sync_checkpoint_ops.pfnSyncFenceGetCheckpoints = + pvr_sync_fence_get_checkpoints; +#endif + + return SyncCheckpointRegisterFunctions(&pvr_sync_data.sync_checkpoint_ops); +} + +int pvr_sync_init(void) +{ + int err; + + err = misc_register(&pvr_sync_device); + if (err) { + pr_err(FILE_NAME ": %s: Failed to register pvr_sync device (%d)\n", + __func__, err); + } + return err; +} + +void pvr_sync_deinit(void) +{ + misc_deregister(&pvr_sync_device); +} + +enum PVRSRV_ERROR_TAG pvr_sync_device_init(struct device *dev) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct pvr_drm_private *priv = ddev->dev_private; + enum PVRSRV_ERROR_TAG error; + + error = PVRSRVRegisterDbgRequestNotify( + &priv->sync_debug_notify_handle, + priv->dev_node, + pvr_sync_debug_request_heading, + DEBUG_REQUEST_LINUXFENCE, + NULL); + if (error != PVRSRV_OK) { + pr_err("%s: failed to register debug request callback (%s)\n", + __func__, PVRSRVGetErrorString(error)); + goto err_out; + } + + pvr_sync_data.dev_cookie = priv->dev_node; + pvr_sync_data.fence_status_wq = priv->fence_status_wq; + + pvr_sync_data.foreign_fence_context = + pvr_fence_context_create(pvr_sync_data.dev_cookie, + pvr_sync_data.fence_status_wq, + "foreign_sync"); + if (!pvr_sync_data.foreign_fence_context) { + pr_err(FILE_NAME ": %s: Failed to create foreign sync context\n", + __func__); + error = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_out; + } + +#if defined(NO_HARDWARE) + INIT_LIST_HEAD(&pvr_timeline_active_list); +#endif + +err_out: + return error; +} + +void pvr_sync_device_deinit(struct device *dev) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct pvr_drm_private *priv = ddev->dev_private; + + pvr_fence_context_destroy(pvr_sync_data.foreign_fence_context); + PVRSRVUnregisterDbgRequestNotify(priv->sync_debug_notify_handle); +} + +enum PVRSRV_ERROR_TAG pvr_sync_fence_wait(void *fence, u32 timeout_in_ms) +{ + long timeout = msecs_to_jiffies(timeout_in_ms); + int err; + + err = dma_fence_wait_timeout(fence, true, timeout); + /* + * dma_fence_wait_timeout returns: + * - the remaining timeout on success + * - 0 on timeout + * - -ERESTARTSYS if interrupted + */ + if (err > 0) + return PVRSRV_OK; + else if (err == 0) + return PVRSRV_ERROR_TIMEOUT; + + return PVRSRV_ERROR_FAILED_DEPENDENCIES; +} + +enum PVRSRV_ERROR_TAG pvr_sync_fence_release(void *fence) +{ + dma_fence_put(fence); + + return PVRSRV_OK; +} + +enum PVRSRV_ERROR_TAG pvr_sync_fence_get(int fence_fd, void **fence_out) +{ + struct dma_fence *fence; + + fence = sync_file_get_fence(fence_fd); + if (fence == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + *fence_out = fence; + + return PVRSRV_OK; +} + +enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_fence_create(int timeline_fd, + const char *fence_name, + int *fence_fd_out, + u64 *sync_pt_idx) +{ + enum PVRSRV_ERROR_TAG srv_err; + struct pvr_sync_timeline *timeline; + struct dma_fence *fence = NULL; + struct sync_file *sync_file = NULL; + int fd; + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) + return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; + + timeline = pvr_sync_timeline_fget(timeline_fd); + if (!timeline) { + /* unrecognised timeline */ + srv_err = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + goto err_put_fd; + } + + fence = pvr_counting_fence_create(timeline->sw_fence_timeline, sync_pt_idx); + pvr_sync_timeline_fput(timeline); + if (!fence) { + srv_err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_fd; + } + + sync_file = sync_file_create(fence); + if (!sync_file) { + srv_err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_fence; + } + + fd_install(fd, sync_file->file); + + *fence_fd_out = fd; + + return PVRSRV_OK; + +err_put_fence: + dma_fence_put(fence); +err_put_fd: + put_unused_fd(fd); + return srv_err; +} + +enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_advance(void *timeline, u64 *sync_pt_idx) +{ + if (timeline == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + pvr_counting_fence_timeline_inc(timeline, sync_pt_idx); + + return PVRSRV_OK; +} + +enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_release(void *timeline) +{ + if (timeline == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + pvr_counting_fence_timeline_put(timeline); + + return PVRSRV_OK; +} + +enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_get(int timeline_fd, + void **timeline_out) +{ + struct pvr_counting_fence_timeline *sw_timeline; + struct pvr_sync_timeline *timeline; + + timeline = pvr_sync_timeline_fget(timeline_fd); + if (!timeline) + return PVRSRV_ERROR_INVALID_PARAMS; + + sw_timeline = + pvr_counting_fence_timeline_get(timeline->sw_fence_timeline); + pvr_sync_timeline_fput(timeline); + if (!sw_timeline) + return PVRSRV_ERROR_INVALID_PARAMS; + + *timeline_out = sw_timeline; + + return PVRSRV_OK; +} +static void _dump_sync_point(struct dma_fence *fence, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file) +{ + const struct dma_fence_ops *fence_ops = fence->ops; + bool signaled = dma_fence_is_signaled(fence); + char time[16] = { '\0' }; + + fence_ops->timeline_value_str(fence, time, sizeof(time)); + + PVR_DUMPDEBUG_LOG(dump_debug_printf, + dump_debug_file, + "<%p> Seq#=%llu TS=%s State=%s TLN=%s", + fence, + (u64) fence->seqno, + time, + (signaled) ? "Signalled" : "Active", + fence_ops->get_timeline_name(fence)); +} + +static void _dump_fence(struct dma_fence *fence, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file) +{ + if (dma_fence_is_array(fence)) { + struct dma_fence_array *fence_array = to_dma_fence_array(fence); + int i; + + PVR_DUMPDEBUG_LOG(dump_debug_printf, + dump_debug_file, + "Fence: [%p] Sync Points:\n", + fence_array); + + for (i = 0; i < fence_array->num_fences; i++) + _dump_sync_point(fence_array->fences[i], + dump_debug_printf, + dump_debug_file); + + } else { + _dump_sync_point(fence, dump_debug_printf, dump_debug_file); + } +} + +enum PVRSRV_ERROR_TAG +sync_dump_fence(void *sw_fence_obj, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file) +{ + struct dma_fence *fence = (struct dma_fence *) sw_fence_obj; + + _dump_fence(fence, dump_debug_printf, dump_debug_file); + + return PVRSRV_OK; +} + +enum PVRSRV_ERROR_TAG +sync_sw_dump_timeline(void *sw_timeline_obj, + DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, + void *dump_debug_file) +{ + pvr_counting_fence_timeline_dump_timeline(sw_timeline_obj, + dump_debug_printf, + dump_debug_file); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/pvr_uaccess.h b/drivers/gpu/drm/phytium/octopus/pvr_uaccess.h new file mode 100644 index 000000000000..e6abf78f2798 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_uaccess.h @@ -0,0 +1,99 @@ +/*************************************************************************/ /*! +@File +@Title Utility functions for user space access +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef PVR_UACCESS_H +#define PVR_UACCESS_H + +#include +#include + +static inline unsigned long pvr_copy_to_user(void __user *pvTo, const void *pvFrom, unsigned long ulBytes) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) + if (access_ok(VERIFY_WRITE, pvTo, ulBytes)) +#else + if (access_ok(pvTo, ulBytes)) +#endif + { + return __copy_to_user(pvTo, pvFrom, ulBytes); + } + + return ulBytes; +} + + +#if defined(__KLOCWORK__) + /* this part is only to tell Klocwork not to report false positive because + it doesn't understand that pvr_copy_from_user will initialise the memory + pointed to by pvTo */ +#include /* get the memset prototype */ +static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes) +{ + if (pvTo != NULL) + { + memset(pvTo, 0xAA, ulBytes); + return 0; + } + return 1; +} + +#else /* real implementation */ + +static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes) +{ + /* + * The compile time correctness checking introduced for copy_from_user in + * Linux 2.6.33 isn't fully compatible with our usage of the function. + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) + if (access_ok(VERIFY_READ, pvFrom, ulBytes)) +#else + if (access_ok(pvFrom, ulBytes)) +#endif + { + return __copy_from_user(pvTo, pvFrom, ulBytes); + } + + return ulBytes; +} +#endif /* klocworks */ + +#endif /* PVR_UACCESS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvr_vmap.h b/drivers/gpu/drm/phytium/octopus/pvr_vmap.h new file mode 100644 index 000000000000..260b5835f4c7 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvr_vmap.h @@ -0,0 +1,83 @@ +/* + * @File pvr_vmap.h + * @Title Utility functions for virtual memory mapping + * @Codingstyle LinuxKernel + * @Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved + * @License Dual MIT/GPLv2 + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * Alternatively, the contents of this file may be used under the terms of + * the GNU General Public License Version 2 ("GPL") in which case the provisions + * of GPL are applicable instead of those above. + * + * If you wish to allow use of your version of this file only under the terms of + * GPL, and not to allow others to use your version of this file under the terms + * of the MIT license, indicate your decision by deleting the provisions above + * and replace them with the notice and other provisions required by GPL as set + * out in the file called "GPL-COPYING" included in this distribution. If you do + * not delete the provisions above, a recipient may use your version of this file + * under the terms of either the MIT license or GPL. + * + * This License is also included in this distribution in the file called + * "MIT-COPYING". + * + * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS + * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef PVR_VMAP_H +#define PVR_VMAP_H + +#include +#include + +static inline void *pvr_vmap(struct page **pages, + unsigned int count, + __maybe_unused unsigned long flags, + pgprot_t prot) +{ +#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) + return vmap(pages, count, flags, prot); +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) + return vm_map_ram(pages, count, -1, prot); +#else + if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) + return vm_map_ram(pages, count, -1); + else + return vmap(pages, count, flags, prot); +#endif /* !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) */ +} + +static inline void pvr_vunmap(void *pages, + __maybe_unused unsigned int count, + __maybe_unused pgprot_t prot) +{ +#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) + vunmap(pages); +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) + vm_unmap_ram(pages, count); +#else + if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) + vm_unmap_ram(pages, count); + else + vunmap(pages); +#endif /* !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) */ +} + +#endif /* PVR_VMAP_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvrmodule.h b/drivers/gpu/drm/phytium/octopus/pvrmodule.h new file mode 100644 index 000000000000..fe4ce7bf3a71 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrmodule.h @@ -0,0 +1,48 @@ +/*************************************************************************/ /*! +@Title Module Author and License. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _PVRMODULE_H_ +#define _PVRMODULE_H_ + +MODULE_AUTHOR("Phytium Information Technologies Ltd. "); +MODULE_LICENSE("Dual MIT/GPL"); + +#endif /* _PVRMODULE_H_ */ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv.c b/drivers/gpu/drm/phytium/octopus/pvrsrv.c new file mode 100644 index 000000000000..de704e552f89 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv.c @@ -0,0 +1,3286 @@ +/*************************************************************************/ /*! +@File +@Title core services functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Main APIs for core services functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "rgxdebug.h" +#include "handle.h" +#include "connection_server.h" +#include "osconnection_server.h" +#include "pdump_km.h" +#include "ra.h" +#include "allocmem.h" +#include "pmr.h" +#include "pvrsrv.h" +#include "srvcore.h" +#include "services_km.h" +#include "pvrsrv_device.h" +#include "pvr_debug.h" +#include "debug_common.h" +#include "pvr_notifier.h" +#include "sync.h" +#include "sync_server.h" +#include "sync_checkpoint.h" +#include "sync_fallback_server.h" +#include "sync_checkpoint_init.h" +#include "devicemem.h" +#include "cache_km.h" +#include "info_page.h" +#include "info_page_defs.h" +#include "pvrsrv_bridge_init.h" +#include "devicemem_server.h" +#include "km_apphint_defs.h" +#include "di_server.h" +#include "di_impl_brg.h" +#include "htb_debug.h" +#include "dma_km.h" + +#include "log2.h" + +#include "lists.h" +#include "dllist.h" +#include "syscommon.h" +#include "sysvalidation.h" + +#include "physmem_lma.h" +#include "physmem_osmem.h" +#include "physmem_hostmem.h" + +#include "tlintern.h" +#include "htbserver.h" + +//#define MULTI_DEVICE_BRINGUP + +#if defined(MULTI_DEVICE_BRINGUP) +#define MULTI_DEVICE_BRINGUP_DPF(msg, ...) PVR_DPF((PVR_DBG_MESSAGE, msg, __VA_ARGS__)) +#else +#define MULTI_DEVICE_BRINGUP_DPF(msg, ...) +#endif + +#if defined(SUPPORT_RGX) +#include "rgxinit.h" +#include "rgxhwperf.h" +#include "rgxfwutils.h" +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +#include "ri_server.h" +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif + +#include "vz_vmm_pvz.h" + +#include "devicemem_history_server.h" + +#if defined(SUPPORT_LINUX_DVFS) +#include "pvr_dvfs_device.h" +#endif + +#if defined(SUPPORT_DISPLAY_CLASS) +#include "dc_server.h" +#endif + +#include "rgx_options.h" +#include "srvinit.h" +#include "rgxutils.h" + +#include "oskm_apphint.h" +#include "pvrsrv_apphint.h" + +#include "pvrsrv_tlstreams.h" +#include "tlstream.h" + +#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) +#include "physmem_test.h" +#endif + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "virt_validation_defs.h" +#endif + +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) +#define INFINITE_SLEEP_TIMEOUT 0ULL +#endif + +/*! Wait 100ms before retrying deferred clean-up again */ +#define CLEANUP_THREAD_WAIT_RETRY_TIMEOUT 100000ULL + +/*! Wait 8hrs when no deferred clean-up required. Allows a poll several times + * a day to check for any missed clean-up. */ +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) +#define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT INFINITE_SLEEP_TIMEOUT +#else +#define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT 28800000000ULL +#endif + +/*! When unloading try a few times to free everything remaining on the list */ +#define CLEANUP_THREAD_UNLOAD_RETRY 4 + +#define PVRSRV_PROC_HANDLE_BASE_INIT 10 + +#define PVRSRV_TL_CTLR_STREAM_SIZE 4096 + +static PVRSRV_DATA *gpsPVRSRVData; +static IMG_UINT32 g_ui32InitFlags; + +/* mark which parts of Services were initialised */ +#define INIT_DATA_ENABLE_PDUMPINIT 0x1U + +static IMG_UINT32 g_aui32DebugOrderTable[] = { + DEBUG_REQUEST_SYS, + DEBUG_REQUEST_APPHINT, + DEBUG_REQUEST_HTB, + DEBUG_REQUEST_DC, + DEBUG_REQUEST_SYNCCHECKPOINT, + DEBUG_REQUEST_SYNCTRACKING, + DEBUG_REQUEST_ANDROIDSYNC, + DEBUG_REQUEST_FALLBACKSYNC, + DEBUG_REQUEST_LINUXFENCE +}; + +/* Callback to dump info of cleanup thread in debug_dump */ +static void CleanupThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVRSRV_DATA *psPVRSRVData; + psPVRSRVData = PVRSRVGetPVRSRVData(); + + PVR_DUMPDEBUG_LOG(" Number of deferred cleanup items : %u", + OSAtomicRead(&psPVRSRVData->i32NumCleanupItems)); +} + +/* Add work to the cleanup thread work list. + * The work item will be executed by the cleanup thread + */ +void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData) +{ + PVRSRV_DATA *psPVRSRVData; + PVRSRV_ERROR eError; + + psPVRSRVData = PVRSRVGetPVRSRVData(); + + PVR_ASSERT(psData != NULL); +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK || psPVRSRVData->bUnload) +#else + if (psPVRSRVData->bUnload) +#endif + { + CLEANUP_THREAD_FN pfnFree = psData->pfnFree; + + PVR_DPF((PVR_DBG_MESSAGE, "Cleanup thread has already quit: doing work immediately")); + + eError = pfnFree(psData->pvData); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to free resource " + "(callback " IMG_PFN_FMTSPEC "). " + "Immediate free will not be retried.", + pfnFree)); + } + } + else + { + OS_SPINLOCK_FLAGS uiFlags; + + /* add this work item to the list */ + OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, &psData->sNode); + OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + + OSAtomicIncrement(&psPVRSRVData->i32NumCleanupItems); + + /* signal the cleanup thread to ensure this item gets processed */ + eError = OSEventObjectSignal(psPVRSRVData->hCleanupEventObject); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } +} + +/* Pop an item from the head of the cleanup thread work list */ +static INLINE DLLIST_NODE *_CleanupThreadWorkListPop(PVRSRV_DATA *psPVRSRVData) +{ + DLLIST_NODE *psNode; + OS_SPINLOCK_FLAGS uiFlags; + + OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + psNode = dllist_get_next_node(&psPVRSRVData->sCleanupThreadWorkList); + if (psNode != NULL) + { + dllist_remove_node(psNode); + } + OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + + return psNode; +} + +/* Process the cleanup thread work list */ +static IMG_BOOL _CleanupThreadProcessWorkList(PVRSRV_DATA *psPVRSRVData, + IMG_BOOL *pbUseGlobalEO) +{ + DLLIST_NODE *psNodeIter, *psNodeLast; + PVRSRV_ERROR eError; + IMG_BOOL bNeedRetry = IMG_FALSE; + OS_SPINLOCK_FLAGS uiFlags; + + /* any callback functions which return error will be + * moved to the back of the list, and additional items can be added + * to the list at any time so we ensure we only iterate from the + * head of the list to the current tail (since the tail may always + * be changing) + */ + + OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + psNodeLast = dllist_get_prev_node(&psPVRSRVData->sCleanupThreadWorkList); + OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + + if (psNodeLast == NULL) + { + /* no elements to clean up */ + return IMG_FALSE; + } + + do + { + psNodeIter = _CleanupThreadWorkListPop(psPVRSRVData); + + if (psNodeIter != NULL) + { + PVRSRV_CLEANUP_THREAD_WORK *psData = IMG_CONTAINER_OF(psNodeIter, PVRSRV_CLEANUP_THREAD_WORK, sNode); + CLEANUP_THREAD_FN pfnFree; + + /* get the function pointer address here so we have access to it + * in order to report the error in case of failure, without having + * to depend on psData not having been freed + */ + pfnFree = psData->pfnFree; + + *pbUseGlobalEO = psData->bDependsOnHW; + eError = pfnFree(psData->pvData); + + if (eError != PVRSRV_OK) + { + /* move to back of the list, if this item's + * retry count hasn't hit zero. + */ + if (CLEANUP_THREAD_IS_RETRY_TIMEOUT(psData)) + { + if (CLEANUP_THREAD_RETRY_TIMEOUT_REACHED(psData)) + { + bNeedRetry = IMG_TRUE; + } + } + else + { + if (psData->ui32RetryCount-- > 0) + { + bNeedRetry = IMG_TRUE; + } + } + + if (bNeedRetry) + { + OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, psNodeIter); + OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "Failed to free resource " + "(callback " IMG_PFN_FMTSPEC "). " + "Retry limit reached", + pfnFree)); + } + } + else + { + OSAtomicDecrement(&psPVRSRVData->i32NumCleanupItems); + } + } + } while ((psNodeIter != NULL) && (psNodeIter != psNodeLast)); + + return bNeedRetry; +} + +// #define CLEANUP_DPFL PVR_DBG_WARNING +#define CLEANUP_DPFL PVR_DBG_MESSAGE + +/* Create/initialise data required by the cleanup thread, + * before the cleanup thread is started + */ +static PVRSRV_ERROR _CleanupThreadPrepare(PVRSRV_DATA *psPVRSRVData) +{ + PVRSRV_ERROR eError; + + /* Create the clean up event object */ + + eError = OSEventObjectCreate("PVRSRV_CLEANUP_EVENTOBJECT", &gpsPVRSRVData->hCleanupEventObject); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", Exit); + + /* initialise the mutex and linked list required for the cleanup thread work list */ + + eError = OSSpinLockCreate(&psPVRSRVData->hCleanupThreadWorkListLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", Exit); + + dllist_init(&psPVRSRVData->sCleanupThreadWorkList); + +Exit: + return eError; +} + +static void CleanupThread(void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = pvData; + IMG_BOOL bRetryWorkList = IMG_FALSE; + IMG_HANDLE hGlobalEvent; + IMG_HANDLE hOSEvent; + PVRSRV_ERROR eRc; + IMG_BOOL bUseGlobalEO = IMG_FALSE; + IMG_UINT32 uiUnloadRetry = 0; + + /* Store the process id (pid) of the clean-up thread */ + psPVRSRVData->cleanupThreadPid = OSGetCurrentProcessID(); + OSAtomicWrite(&psPVRSRVData->i32NumCleanupItems, 0); + + PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread starting... ")); + + /* Open an event on the clean up event object so we can listen on it, + * abort the clean up thread and driver if this fails. + */ + eRc = OSEventObjectOpen(psPVRSRVData->hCleanupEventObject, &hOSEvent); + PVR_ASSERT(eRc == PVRSRV_OK); + + eRc = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hGlobalEvent); + PVR_ASSERT(eRc == PVRSRV_OK); + + /* While the driver is in a good state and is not being unloaded + * try to free any deferred items when signalled + */ + while (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) + { + IMG_HANDLE hEvent; + + if (psPVRSRVData->bUnload) + { + if (dllist_is_empty(&psPVRSRVData->sCleanupThreadWorkList) || + uiUnloadRetry > CLEANUP_THREAD_UNLOAD_RETRY) + { + break; + } + uiUnloadRetry++; + } + + /* Wait until signalled for deferred clean up OR wait for a + * short period if the previous deferred clean up was not able + * to release all the resources before trying again. + * Bridge lock re-acquired on our behalf before the wait call returns. + */ + + if (bRetryWorkList && bUseGlobalEO) + { + hEvent = hGlobalEvent; + } + else + { + hEvent = hOSEvent; + } + + eRc = OSEventObjectWaitKernel(hEvent, + bRetryWorkList ? + CLEANUP_THREAD_WAIT_RETRY_TIMEOUT : + CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT); + if (eRc == PVRSRV_ERROR_TIMEOUT) + { + PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait timeout")); + } + else if (eRc == PVRSRV_OK) + { + PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait OK, signal received")); + } + else + { + PVR_LOG_ERROR(eRc, "OSEventObjectWaitKernel"); + } + + bRetryWorkList = _CleanupThreadProcessWorkList(psPVRSRVData, &bUseGlobalEO); + } + + OSSpinLockDestroy(psPVRSRVData->hCleanupThreadWorkListLock); + + eRc = OSEventObjectClose(hOSEvent); + PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose"); + + eRc = OSEventObjectClose(hGlobalEvent); + PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose"); + + PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread ending... ")); +} + +static void DevicesWatchdogThread_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, + va_list va) +{ +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; +#endif + PVRSRV_DEVICE_HEALTH_STATUS *pePreviousHealthStatus, eHealthStatus; + PVRSRV_ERROR eError; + PVRSRV_DEVICE_DEBUG_DUMP_STATUS eDebugDumpState; + IMG_BOOL bCheckAfterTimePassed; + + pePreviousHealthStatus = va_arg(va, PVRSRV_DEVICE_HEALTH_STATUS *); + bCheckAfterTimePassed = va_arg(va, IMG_BOOL); + + if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) + { + return; + } + + if (psDeviceNode->pfnUpdateHealthStatus != NULL) + { + eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, bCheckAfterTimePassed); + PVR_WARN_IF_ERROR(eError, "pfnUpdateHealthStatus"); + } + eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus); + + if (eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_OK) + { + if (eHealthStatus != *pePreviousHealthStatus) + { +#if defined(SUPPORT_RGX) + if (!(psDevInfo->ui32DeviceFlags & + RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN)) +#else + /* In this case we don't have an RGX device */ + if (eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED) +#endif + { + PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: " + "Device status not OK!!!")); + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, + NULL, NULL); + } + } + } + + *pePreviousHealthStatus = eHealthStatus; + + /* Have we received request from FW to capture debug dump(could be due to HWR) */ + eDebugDumpState = (PVRSRV_DEVICE_DEBUG_DUMP_STATUS)OSAtomicCompareExchange( + &psDeviceNode->eDebugDumpRequested, + PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE, + PVRSRV_DEVICE_DEBUG_DUMP_NONE); + if (PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE == eDebugDumpState) + { + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + } + +} + +#if defined(SUPPORT_RGX) +static void HWPerfPeriodicHostEventsThread(void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = pvData; + IMG_HANDLE hOSEvent; + PVRSRV_ERROR eError; + IMG_BOOL bHostStreamIsOpenForReading; + PVRSRV_RGXDEV_INFO *psDevInfo; + + eError = OSEventObjectOpen(psPVRSRVData->hHWPerfHostPeriodicEvObj, &hOSEvent); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen"); + +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && + !psPVRSRVData->bUnload && !psPVRSRVData->bHWPerfHostThreadStop) +#else + while (!psPVRSRVData->bUnload && !psPVRSRVData->bHWPerfHostThreadStop) +#endif + { + eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)psPVRSRVData->ui32HWPerfHostThreadTimeout * 1000); + if (eError == PVRSRV_OK && (psPVRSRVData->bUnload || psPVRSRVData->bHWPerfHostThreadStop)) + { + PVR_DPF((PVR_DBG_MESSAGE, "HWPerfPeriodicHostEventsThread: Shutdown event received.")); + break; + } + + psDevInfo = (PVRSRV_RGXDEV_INFO*)psPVRSRVData->psDeviceNodeList->pvDevice; + + /* Check if the HWPerf host stream is open for reading before writing a packet, + this covers cases where the event filter is not zeroed before a reader disconnects. */ + bHostStreamIsOpenForReading = TLStreamIsOpenForReading(psDevInfo->hHWPerfHostStream); + + if (bHostStreamIsOpenForReading) + { +#if defined(SUPPORT_RGX) + RGXSRV_HWPERF_HOST_INFO(psPVRSRVData->psDeviceNodeList->pvDevice, RGX_HWPERF_INFO_EV_MEM_USAGE); +#endif + } + else + { +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + psPVRSRVData->ui32HWPerfHostThreadTimeout = INFINITE_SLEEP_TIMEOUT; +#else + /* This 'long' timeout is temporary until functionality is added to services to put a thread to sleep indefinitely. */ + psPVRSRVData->ui32HWPerfHostThreadTimeout = 60 * 60 * 8 * 1000; // 8 hours +#endif + } + } + + eError = OSEventObjectClose(hOSEvent); + PVR_LOG_IF_ERROR(eError, "OSEventObjectClose"); +} +#endif + +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + +typedef enum +{ + DWT_ST_INIT, + DWT_ST_SLEEP_POWERON, + DWT_ST_SLEEP_POWEROFF, + DWT_ST_SLEEP_DEFERRED, + DWT_ST_FINAL +} DWT_STATE; + +typedef enum +{ + DWT_SIG_POWERON, + DWT_SIG_POWEROFF, + DWT_SIG_TIMEOUT, + DWT_SIG_UNLOAD, + DWT_SIG_ERROR +} DWT_SIGNAL; + +static inline IMG_BOOL _DwtIsPowerOn(PVRSRV_DATA *psPVRSRVData) +{ + return List_PVRSRV_DEVICE_NODE_IMG_BOOL_Any(psPVRSRVData->psDeviceNodeList, + PVRSRVIsDevicePowered); +} + +static inline void _DwtCheckHealthStatus(PVRSRV_DATA *psPVRSRVData, + PVRSRV_DEVICE_HEALTH_STATUS *peStatus, + IMG_BOOL bTimeOut) +{ + List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList, + DevicesWatchdogThread_ForEachVaCb, + peStatus, + bTimeOut); +} + +static DWT_SIGNAL _DwtWait(PVRSRV_DATA *psPVRSRVData, IMG_HANDLE hOSEvent, + IMG_UINT32 ui32Timeout) +{ + PVRSRV_ERROR eError; + + eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64) ui32Timeout * 1000); + +#ifdef PVR_TESTING_UTILS + psPVRSRVData->ui32DevicesWdWakeupCounter++; +#endif + + if (eError == PVRSRV_OK) + { + if (psPVRSRVData->bUnload) + { + PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Shutdown event" + " received.")); + return DWT_SIG_UNLOAD; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power state " + "change event received.")); + + if (_DwtIsPowerOn(psPVRSRVData)) + { + return DWT_SIG_POWERON; + } + else + { + return DWT_SIG_POWEROFF; + } + } + } + else if (eError == PVRSRV_ERROR_TIMEOUT) + { + return DWT_SIG_TIMEOUT; + } + + PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: Error (%d) when" + " waiting for event!", eError)); + return DWT_SIG_ERROR; +} + +#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ + +static void DevicesWatchdogThread(void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = pvData; + PVRSRV_DEVICE_HEALTH_STATUS ePreviousHealthStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK; + IMG_HANDLE hOSEvent; + PVRSRV_ERROR eError; +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + DWT_STATE eState = DWT_ST_INIT; + const IMG_UINT32 ui32OnTimeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT; + const IMG_UINT32 ui32OffTimeout = INFINITE_SLEEP_TIMEOUT; +#else + IMG_UINT32 ui32Timeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT; + /* Flag used to defer the sleep timeout change by 1 loop iteration. + * This helps to ensure at least two health checks are performed before a long sleep. + */ + IMG_BOOL bDoDeferredTimeoutChange = IMG_FALSE; +#endif + + PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power off sleep time: %d.", + DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT)); + + /* Open an event on the devices watchdog event object so we can listen on it + and abort the devices watchdog thread. */ + eError = OSEventObjectOpen(psPVRSRVData->hDevicesWatchdogEvObj, &hOSEvent); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen"); + + /* Loop continuously checking the device status every few seconds. */ +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && + !psPVRSRVData->bUnload) +#else + while (!psPVRSRVData->bUnload) +#endif + { +#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + + switch (eState) + { + case DWT_ST_INIT: + { + if (_DwtIsPowerOn(psPVRSRVData)) + { + eState = DWT_ST_SLEEP_POWERON; + } + else + { + eState = DWT_ST_SLEEP_POWEROFF; + } + + break; + } + case DWT_ST_SLEEP_POWERON: + { + DWT_SIGNAL eSignal = _DwtWait(psPVRSRVData, hOSEvent, + ui32OnTimeout); + + switch (eSignal) { + case DWT_SIG_POWERON: + /* self-transition, nothing to do */ + break; + case DWT_SIG_POWEROFF: + eState = DWT_ST_SLEEP_DEFERRED; + break; + case DWT_SIG_TIMEOUT: + _DwtCheckHealthStatus(psPVRSRVData, + &ePreviousHealthStatus, + IMG_TRUE); + /* self-transition */ + break; + case DWT_SIG_UNLOAD: + eState = DWT_ST_FINAL; + break; + case DWT_SIG_ERROR: + /* deliberately ignored */ + break; + } + + break; + } + case DWT_ST_SLEEP_POWEROFF: + { + DWT_SIGNAL eSignal = _DwtWait(psPVRSRVData, hOSEvent, + ui32OffTimeout); + + switch (eSignal) { + case DWT_SIG_POWERON: + eState = DWT_ST_SLEEP_POWERON; + _DwtCheckHealthStatus(psPVRSRVData, + &ePreviousHealthStatus, + IMG_FALSE); + break; + case DWT_SIG_POWEROFF: + /* self-transition, nothing to do */ + break; + case DWT_SIG_TIMEOUT: + /* self-transition */ + _DwtCheckHealthStatus(psPVRSRVData, + &ePreviousHealthStatus, + IMG_TRUE); + break; + case DWT_SIG_UNLOAD: + eState = DWT_ST_FINAL; + break; + case DWT_SIG_ERROR: + /* deliberately ignored */ + break; + } + + break; + } + case DWT_ST_SLEEP_DEFERRED: + { + DWT_SIGNAL eSignal =_DwtWait(psPVRSRVData, hOSEvent, + ui32OnTimeout); + + switch (eSignal) { + case DWT_SIG_POWERON: + eState = DWT_ST_SLEEP_POWERON; + _DwtCheckHealthStatus(psPVRSRVData, + &ePreviousHealthStatus, + IMG_FALSE); + break; + case DWT_SIG_POWEROFF: + /* self-transition, nothing to do */ + break; + case DWT_SIG_TIMEOUT: + eState = DWT_ST_SLEEP_POWEROFF; + _DwtCheckHealthStatus(psPVRSRVData, + &ePreviousHealthStatus, + IMG_FALSE); + break; + case DWT_SIG_UNLOAD: + eState = DWT_ST_FINAL; + break; + case DWT_SIG_ERROR: + /* deliberately ignored */ + break; + } + + break; + } + case DWT_ST_FINAL: + /* the loop should terminate on next spin if this state is + * reached so nothing to do here. */ + break; + } + +#else /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ + IMG_BOOL bPwrIsOn = IMG_FALSE; + IMG_BOOL bTimeOut = IMG_FALSE; + + /* Wait time between polls (done at the start of the loop to allow devices + to initialise) or for the event signal (shutdown or power on). */ + eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)ui32Timeout * 1000); + +#ifdef PVR_TESTING_UTILS + psPVRSRVData->ui32DevicesWdWakeupCounter++; +#endif + if (eError == PVRSRV_OK) + { + if (psPVRSRVData->bUnload) + { + PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Shutdown event received.")); + break; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power state change event received.")); + } + } + else if (eError != PVRSRV_ERROR_TIMEOUT) + { + /* If timeout do nothing otherwise print warning message. */ + PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: " + "Error (%d) when waiting for event!", eError)); + } + else + { + bTimeOut = IMG_TRUE; + } + + bPwrIsOn = List_PVRSRV_DEVICE_NODE_IMG_BOOL_Any(psPVRSRVData->psDeviceNodeList, + PVRSRVIsDevicePowered); + + if (bPwrIsOn || psPVRSRVData->ui32DevicesWatchdogPwrTrans) + { + psPVRSRVData->ui32DevicesWatchdogPwrTrans = 0; + ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT; + bDoDeferredTimeoutChange = IMG_FALSE; + } + else + { + /* First, check if the previous loop iteration signalled a need to change the timeout period */ + if (bDoDeferredTimeoutChange) + { + ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT; + bDoDeferredTimeoutChange = IMG_FALSE; + } + else + { + /* Signal that we need to change the sleep timeout in the next loop iteration + * to allow the device health check code a further iteration at the current + * sleep timeout in order to determine bad health (e.g. stalled cCCB) by + * comparing past and current state snapshots */ + bDoDeferredTimeoutChange = IMG_TRUE; + } + } + + List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList, + DevicesWatchdogThread_ForEachVaCb, + &ePreviousHealthStatus, + bTimeOut); + +#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ + } + + eError = OSEventObjectClose(hOSEvent); + PVR_LOG_IF_ERROR(eError, "OSEventObjectClose"); +} + +#if defined(SUPPORT_AUTOVZ) +static void AutoVzWatchdogThread_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) + { + return; + } + else if (psDeviceNode->pfnUpdateAutoVzWatchdog != NULL) + { + psDeviceNode->pfnUpdateAutoVzWatchdog(psDeviceNode); + } +} + +static void AutoVzWatchdogThread(void *pvData) +{ + PVRSRV_DATA *psPVRSRVData = pvData; + IMG_HANDLE hOSEvent; + PVRSRV_ERROR eError; + IMG_UINT32 ui32Timeout = PVR_AUTOVZ_WDG_PERIOD_MS / 3; + + /* Open an event on the devices watchdog event object so we can listen on it + and abort the devices watchdog thread. */ + eError = OSEventObjectOpen(psPVRSRVData->hAutoVzWatchdogEvObj, &hOSEvent); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen"); + +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && + !psPVRSRVData->bUnload) +#else + while (!psPVRSRVData->bUnload) +#endif + { + /* Wait time between polls (done at the start of the loop to allow devices + to initialise) or for the event signal (shutdown or power on). */ + eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)ui32Timeout * 1000); + + List_PVRSRV_DEVICE_NODE_ForEach(psPVRSRVData->psDeviceNodeList, + AutoVzWatchdogThread_ForEachCb); + } + + eError = OSEventObjectClose(hOSEvent); + PVR_LOG_IF_ERROR(eError, "OSEventObjectClose"); +} +#endif /* SUPPORT_AUTOVZ */ + +PVRSRV_DATA *PVRSRVGetPVRSRVData(void) +{ + return gpsPVRSRVData; +} + +static PVRSRV_ERROR InitialiseInfoPageTimeouts(PVRSRV_DATA *psPVRSRVData) +{ + if (NULL == psPVRSRVData) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_VALUE_RETRIES] = WAIT_TRY_COUNT; + psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_VALUE_TIMEOUT_MS] = + ((MAX_HW_TIME_US / 10000) + 1000); + /* TIMEOUT_INFO_VALUE_TIMEOUT_MS resolves to... + vp : 2000 + 1000 + emu : 2000 + 1000 + rgx_nohw : 50 + 1000 + plato : 30000 + 1000 (VIRTUAL_PLATFORM or EMULATOR) + 50 + 1000 (otherwise) + */ + + psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_CONDITION_RETRIES] = 5; + psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_CONDITION_TIMEOUT_MS] = + ((MAX_HW_TIME_US / 10000) + 100); + /* TIMEOUT_INFO_CONDITION_TIMEOUT_MS resolves to... + vp : 2000 + 100 + emu : 2000 + 100 + rgx_nohw : 50 + 100 + plato : 30000 + 100 (VIRTUAL_PLATFORM or EMULATOR) + 50 + 100 (otherwise) + */ + + psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_RETRIES] = 10; +#if defined(VIRTUAL_PLATFORM) + psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] = 1200000U; +#else +#if defined(EMULATOR) + psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] = 20000U; +#else + psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] = 1000U; +#endif /* EMULATOR */ +#endif + + return PVRSRV_OK; +} + +static PVRSRV_ERROR PopulateInfoPageBridges(PVRSRV_DATA *psPVRSRVData) +{ + PVR_RETURN_IF_INVALID_PARAM(psPVRSRVData); + + psPVRSRVData->pui32InfoPage[BRIDGE_INFO_PVR_BRIDGES] = gui32PVRBridges; + +#if defined(SUPPORT_RGX) + psPVRSRVData->pui32InfoPage[BRIDGE_INFO_RGX_BRIDGES] = gui32RGXBridges; +#else + psPVRSRVData->pui32InfoPage[BRIDGE_INFO_RGX_BRIDGES] = 0; +#endif + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PVRSRVCommonDriverInit(void) +{ + PVRSRV_ERROR eError; + + PVRSRV_DATA *psPVRSRVData = NULL; + + IMG_UINT32 ui32AppHintCleanupThreadPriority; + IMG_UINT32 ui32AppHintWatchdogThreadPriority; + IMG_BOOL bEnablePageFaultDebug; + IMG_BOOL bEnableFullSyncTracking; + + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault; + + /* + * As this function performs one time driver initialisation, use the + * Services global device-independent data to determine whether or not + * this function has already been called. + */ + if (gpsPVRSRVData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Driver already initialised", __func__)); + return PVRSRV_ERROR_ALREADY_EXISTS; + } + + eError = DIInit(); + PVR_GOTO_IF_ERROR(eError, Error); + +#if defined(SUPPORT_DI_BRG_IMPL) + eError = PVRDIImplBrgRegister(); + PVR_GOTO_IF_ERROR(eError, Error); +#endif + +#ifdef PVRSRV_ENABLE_PROCESS_STATS + eError = PVRSRVStatsInitialise(); + PVR_GOTO_IF_ERROR(eError, Error); +#endif /* PVRSRV_ENABLE_PROCESS_STATS */ + + eError = HTB_CreateDIEntry(); + PVR_GOTO_IF_ERROR(eError, Error); + + /* + * Initialise the server bridges + */ + eError = ServerBridgeInit(); + PVR_GOTO_IF_ERROR(eError, Error); + + eError = PhysHeapInit(); + PVR_GOTO_IF_ERROR(eError, Error); + + eError = DevmemIntInit(); + PVR_GOTO_IF_ERROR(eError, Error); + + /* + * Allocate the device-independent data + */ + psPVRSRVData = OSAllocZMem(sizeof(*gpsPVRSRVData)); + PVR_GOTO_IF_NOMEM(psPVRSRVData, eError, Error); + + /* Now it is set up, point gpsPVRSRVData to the actual data */ + gpsPVRSRVData = psPVRSRVData; + + eError = DebugCommonInit(); + PVR_GOTO_IF_ERROR(eError, Error); + + eError = BridgeDispatcherInit(); + PVR_GOTO_IF_ERROR(eError, Error); + + /* Init any OS specific's */ + eError = OSInitEnvData(); + PVR_GOTO_IF_ERROR(eError, Error); + + /* Early init. server cache maintenance */ + eError = CacheOpInit(); + PVR_GOTO_IF_ERROR(eError, Error); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + RIInitKM(); +#endif + + ui32AppHintDefault = PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG; + + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintBOOL(pvAppHintState, EnablePageFaultDebug, + &ui32AppHintDefault, &bEnablePageFaultDebug); + OSFreeKMAppHintState(pvAppHintState); + + if (bEnablePageFaultDebug) + { + eError = DevicememHistoryInitKM(); + PVR_LOG_GOTO_IF_ERROR(eError, "DevicememHistoryInitKM", Error); + } + + eError = PMRInit(); + PVR_GOTO_IF_ERROR(eError, Error); + +#if defined(SUPPORT_DISPLAY_CLASS) + eError = DCInit(); + PVR_GOTO_IF_ERROR(eError, Error); +#endif + + /* Initialise overall system state */ + gpsPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_OK; + + /* Create an event object */ + eError = OSEventObjectCreate("PVRSRV_GLOBAL_EVENTOBJECT", &gpsPVRSRVData->hGlobalEventObject); + PVR_GOTO_IF_ERROR(eError, Error); + gpsPVRSRVData->ui32GEOConsecutiveTimeouts = 0; + + eError = PVRSRVCmdCompleteInit(); + PVR_GOTO_IF_ERROR(eError, Error); + + eError = PVRSRVHandleInit(); + PVR_GOTO_IF_ERROR(eError, Error); + + OSCreateKMAppHintState(&pvAppHintState); + ui32AppHintDefault = PVRSRV_APPHINT_CLEANUPTHREADPRIORITY; + OSGetKMAppHintUINT32(pvAppHintState, CleanupThreadPriority, + &ui32AppHintDefault, &ui32AppHintCleanupThreadPriority); + + ui32AppHintDefault = PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY; + OSGetKMAppHintUINT32(pvAppHintState, WatchdogThreadPriority, + &ui32AppHintDefault, &ui32AppHintWatchdogThreadPriority); + + ui32AppHintDefault = PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING; + OSGetKMAppHintBOOL(pvAppHintState, EnableFullSyncTracking, + &ui32AppHintDefault, &bEnableFullSyncTracking); + OSFreeKMAppHintState(pvAppHintState); + pvAppHintState = NULL; + + eError = _CleanupThreadPrepare(gpsPVRSRVData); + PVR_LOG_GOTO_IF_ERROR(eError, "_CleanupThreadPrepare", Error); + + /* Create a thread which is used to do the deferred cleanup */ + eError = OSThreadCreatePriority(&gpsPVRSRVData->hCleanupThread, + "pvr_defer_free", + CleanupThread, + CleanupThreadDumpInfo, + IMG_TRUE, + gpsPVRSRVData, + ui32AppHintCleanupThreadPriority); + PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreatePriority:1", Error); + + /* Create the devices watchdog event object */ + eError = OSEventObjectCreate("PVRSRV_DEVICESWATCHDOG_EVENTOBJECT", &gpsPVRSRVData->hDevicesWatchdogEvObj); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", Error); + + /* Create a thread which is used to detect fatal errors */ + eError = OSThreadCreatePriority(&gpsPVRSRVData->hDevicesWatchdogThread, + "pvr_device_wdg", + DevicesWatchdogThread, + NULL, + IMG_TRUE, + gpsPVRSRVData, + ui32AppHintWatchdogThreadPriority); + PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreatePriority:2", Error); + +#if defined(SUPPORT_AUTOVZ) + /* Create the devices watchdog event object */ + eError = OSEventObjectCreate("PVRSRV_AUTOVZ_WATCHDOG_EVENTOBJECT", &gpsPVRSRVData->hAutoVzWatchdogEvObj); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", Error); + + /* Create a thread that maintains the FW-KM connection by regularly updating the virtualization watchdog */ + eError = OSThreadCreatePriority(&gpsPVRSRVData->hAutoVzWatchdogThread, + "pvr_autovz_wdg", + AutoVzWatchdogThread, + NULL, + IMG_TRUE, + gpsPVRSRVData, + OS_THREAD_HIGHEST_PRIORITY); + PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreatePriority:3", Error); +#endif /* SUPPORT_AUTOVZ */ + + gpsPVRSRVData->psProcessHandleBase_Table = HASH_Create(PVRSRV_PROC_HANDLE_BASE_INIT); + + if (gpsPVRSRVData->psProcessHandleBase_Table == NULL) + { + PVR_LOG_GOTO_WITH_ERROR("psProcessHandleBase_Table", eError, PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE, Error); + } + + eError = OSLockCreate(&gpsPVRSRVData->hProcessHandleBase_Lock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate:1", Error); + +#if defined(SUPPORT_RGX) + eError = OSLockCreate(&gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate:2", Error); +#endif + + eError = HostMemDeviceCreate(&gpsPVRSRVData->psHostMemDeviceNode); + PVR_GOTO_IF_ERROR(eError, Error); + + /* Initialise the Transport Layer */ + eError = TLInit(); + PVR_GOTO_IF_ERROR(eError, Error); + + /* Initialise pdump */ + eError = PDUMPINIT(); + PVR_GOTO_IF_ERROR(eError, Error); + + g_ui32InitFlags |= INIT_DATA_ENABLE_PDUMPINIT; + + /* Initialise TL control stream */ + eError = TLStreamCreate(&psPVRSRVData->hTLCtrlStream, + PVRSRV_TL_CTLR_STREAM, PVRSRV_TL_CTLR_STREAM_SIZE, + TL_OPMODE_DROP_OLDEST, NULL, NULL, NULL, + NULL); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "TLStreamCreate"); + psPVRSRVData->hTLCtrlStream = NULL; + } + + eError = InfoPageCreate(psPVRSRVData); + PVR_LOG_GOTO_IF_ERROR(eError, "InfoPageCreate", Error); + + + /* Initialise the Timeout Info */ + eError = InitialiseInfoPageTimeouts(psPVRSRVData); + PVR_GOTO_IF_ERROR(eError, Error); + + eError = PopulateInfoPageBridges(psPVRSRVData); + + PVR_GOTO_IF_ERROR(eError, Error); + + if (bEnableFullSyncTracking) + { + psPVRSRVData->pui32InfoPage[DEBUG_FEATURE_FLAGS] |= DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED; + } + if (bEnablePageFaultDebug) + { + psPVRSRVData->pui32InfoPage[DEBUG_FEATURE_FLAGS] |= DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED; + } + + /* Initialise the Host Trace Buffer */ + eError = HTBInit(); + PVR_GOTO_IF_ERROR(eError, Error); + +#if defined(SUPPORT_RGX) + RGXHWPerfClientInitAppHintCallbacks(); +#endif + + /* Late init. client cache maintenance via info. page */ + eError = CacheOpInit2(); + PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpInit2", Error); + +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) + eError = SyncFbRegisterSyncFunctions(); + PVR_LOG_GOTO_IF_ERROR(eError, "SyncFbRegisterSyncFunctions", Error); +#endif + + return 0; + +Error: + PVRSRVCommonDriverDeInit(); + return eError; +} + +void +PVRSRVCommonDriverDeInit(void) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BOOL bEnablePageFaultDebug = IMG_FALSE; + + if (gpsPVRSRVData == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: missing device-independent data", + __func__)); + return; + } + + if (gpsPVRSRVData->pui32InfoPage != NULL) + { + bEnablePageFaultDebug = GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED; + } + + gpsPVRSRVData->bUnload = IMG_TRUE; + + if (gpsPVRSRVData->hProcessHandleBase_Lock) + { + OSLockDestroy(gpsPVRSRVData->hProcessHandleBase_Lock); + gpsPVRSRVData->hProcessHandleBase_Lock = NULL; + } + +#if defined(SUPPORT_RGX) + PVRSRVDestroyHWPerfHostThread(); + if (gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock) + { + OSLockDestroy(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); + gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock = NULL; + } +#endif + + if (gpsPVRSRVData->psProcessHandleBase_Table) + { + HASH_Delete(gpsPVRSRVData->psProcessHandleBase_Table); + gpsPVRSRVData->psProcessHandleBase_Table = NULL; + } + + if (gpsPVRSRVData->hGlobalEventObject) + { + OSEventObjectSignal(gpsPVRSRVData->hGlobalEventObject); + } + +#if defined(SUPPORT_AUTOVZ) + /* Stop and cleanup the devices watchdog thread */ + if (gpsPVRSRVData->hAutoVzWatchdogThread) + { + LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) + { + if (gpsPVRSRVData->hAutoVzWatchdogEvObj) + { + eError = OSEventObjectSignal(gpsPVRSRVData->hAutoVzWatchdogEvObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + + eError = OSThreadDestroy(gpsPVRSRVData->hAutoVzWatchdogThread); + if (PVRSRV_OK == eError) + { + gpsPVRSRVData->hAutoVzWatchdogThread = NULL; + break; + } + OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); + } + + if (gpsPVRSRVData->hAutoVzWatchdogEvObj) + { + eError = OSEventObjectDestroy(gpsPVRSRVData->hAutoVzWatchdogEvObj); + gpsPVRSRVData->hAutoVzWatchdogEvObj = NULL; + PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); + } +#endif /* SUPPORT_AUTOVZ */ + + /* Stop and cleanup the devices watchdog thread */ + if (gpsPVRSRVData->hDevicesWatchdogThread) + { + LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) + { + if (gpsPVRSRVData->hDevicesWatchdogEvObj) + { + eError = OSEventObjectSignal(gpsPVRSRVData->hDevicesWatchdogEvObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + + eError = OSThreadDestroy(gpsPVRSRVData->hDevicesWatchdogThread); + if (PVRSRV_OK == eError) + { + gpsPVRSRVData->hDevicesWatchdogThread = NULL; + break; + } + OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); + } + + if (gpsPVRSRVData->hDevicesWatchdogEvObj) + { + eError = OSEventObjectDestroy(gpsPVRSRVData->hDevicesWatchdogEvObj); + gpsPVRSRVData->hDevicesWatchdogEvObj = NULL; + PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); + } + + /* Stop and cleanup the deferred clean up thread, event object and + * deferred context list. + */ + if (gpsPVRSRVData->hCleanupThread) + { + LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) + { + if (gpsPVRSRVData->hCleanupEventObject) + { + eError = OSEventObjectSignal(gpsPVRSRVData->hCleanupEventObject); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + + eError = OSThreadDestroy(gpsPVRSRVData->hCleanupThread); + if (PVRSRV_OK == eError) + { + gpsPVRSRVData->hCleanupThread = NULL; + break; + } + OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); + } + + if (gpsPVRSRVData->hCleanupEventObject) + { + eError = OSEventObjectDestroy(gpsPVRSRVData->hCleanupEventObject); + gpsPVRSRVData->hCleanupEventObject = NULL; + PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); + } + + /* Tear down the HTB before PVRSRVHandleDeInit() removes its TL handle */ + /* HTB De-init happens in device de-registration currently */ + eError = HTBDeInit(); + PVR_LOG_IF_ERROR(eError, "HTBDeInit"); + + /* Tear down CacheOp framework information page first */ + CacheOpDeInit2(); + + /* Clean up information page */ + InfoPageDestroy(gpsPVRSRVData); + + /* Close the TL control plane stream. */ + if (gpsPVRSRVData->hTLCtrlStream != NULL) + { + TLStreamClose(gpsPVRSRVData->hTLCtrlStream); + } + + /* deinitialise pdump */ + if ((g_ui32InitFlags & INIT_DATA_ENABLE_PDUMPINIT) > 0) + { + PDUMPDEINIT(); + } + + /* Clean up Transport Layer resources that remain */ + TLDeInit(); + + HostMemDeviceDestroy(gpsPVRSRVData->psHostMemDeviceNode); + gpsPVRSRVData->psHostMemDeviceNode = NULL; + + eError = PVRSRVHandleDeInit(); + PVR_LOG_IF_ERROR(eError, "PVRSRVHandleDeInit"); + + /* destroy event object */ + if (gpsPVRSRVData->hGlobalEventObject) + { + OSEventObjectDestroy(gpsPVRSRVData->hGlobalEventObject); + gpsPVRSRVData->hGlobalEventObject = NULL; + } + + PVRSRVCmdCompleteDeinit(); + +#if defined(SUPPORT_DISPLAY_CLASS) + eError = DCDeInit(); + PVR_LOG_IF_ERROR(eError, "DCDeInit"); +#endif + + eError = PMRDeInit(); + PVR_LOG_IF_ERROR(eError, "PMRDeInit"); + + BridgeDispatcherDeinit(); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + RIDeInitKM(); +#endif + + if (bEnablePageFaultDebug) + { + DevicememHistoryDeInitKM(); + } + + CacheOpDeInit(); + + OSDeInitEnvData(); + + (void) DevmemIntDeInit(); + + eError = ServerBridgeDeInit(); + PVR_LOG_IF_ERROR(eError, "ServerBridgeDeinit"); + + eError = PhysHeapDeinit(); + PVR_LOG_IF_ERROR(eError, "PhysHeapDeinit"); + + HTB_DestroyDIEntry(); + +#ifdef PVRSRV_ENABLE_PROCESS_STATS + PVRSRVStatsDestroy(); +#endif /* PVRSRV_ENABLE_PROCESS_STATS */ + + DebugCommonDeInit(); + + DIDeInit(); + + OSFreeMem(gpsPVRSRVData); + gpsPVRSRVData = NULL; +} + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +/*************************************************************************/ /*! +@Function CreateGpuVirtValArenas +@Description Create virtualization validation arenas +@Input psDeviceNode The device node +@Return PVRSRV_ERROR PVRSRV_OK on success +*/ /**************************************************************************/ +static PVRSRV_ERROR CreateGpuVirtValArenas(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + /* aui64OSidMin and aui64OSidMax are what we program into HW registers. + The values are different from base/size of arenas. */ + IMG_UINT64 aui64OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]; + IMG_UINT64 aui64OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]; + PHYS_HEAP_CONFIG *psGPULocalHeap = FindPhysHeapConfig(psDeviceNode->psDevConfig, PHYS_HEAP_USAGE_GPU_LOCAL); + PHYS_HEAP_CONFIG *psDisplayHeap = FindPhysHeapConfig(psDeviceNode->psDevConfig, PHYS_HEAP_USAGE_DISPLAY); + IMG_UINT64 uBase; + IMG_UINT64 uSize; + IMG_UINT64 uBaseShared; + IMG_UINT64 uSizeShared; + IMG_UINT64 uSizeSharedReg; + IMG_UINT32 i; + + /* Shared region is fixed size, the remaining space is divided amongst OSes */ + uSizeShared = PVR_ALIGN(GPUVIRT_SIZEOF_SHARED, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); + uSize = psGPULocalHeap->uiSize - uSizeShared; + uSize /= GPUVIRT_VALIDATION_NUM_OS; + uSize = uSize & ~((IMG_UINT64)OSGetPageSize() - 1ULL); /* Align, round down */ + + uBase = psGPULocalHeap->sCardBase.uiAddr; + uBaseShared = uBase + uSize * GPUVIRT_VALIDATION_NUM_OS; + uSizeShared = psGPULocalHeap->uiSize - (uBaseShared - uBase); + + PVR_LOG(("GPUVIRT_VALIDATION split GPU_LOCAL base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", + psGPULocalHeap->sCardBase.uiAddr, + psGPULocalHeap->uiSize)); + + /* If a display heap config exists, include the display heap in the non-secure regions */ + if (psDisplayHeap) + { + /* Only works when DISPLAY heap follows GPU_LOCAL heap. */ + PVR_LOG(("GPUVIRT_VALIDATION include DISPLAY in shared, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", + psDisplayHeap->sCardBase.uiAddr, + psDisplayHeap->uiSize)); + + uSizeSharedReg = uSizeShared + psDisplayHeap->uiSize; + } + else + { + uSizeSharedReg = uSizeShared; + } + + PVR_ASSERT(uSize >= GPUVIRT_MIN_SIZE); + PVR_ASSERT(uSizeSharedReg >= GPUVIRT_SIZEOF_SHARED); + + for (i = 0; i < GPUVIRT_VALIDATION_NUM_OS; i++) + { + IMG_CHAR aszOSRAName[RA_MAX_NAME_LENGTH]; + + PVR_LOG(("GPUVIRT_VALIDATION create arena OS: %d, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", i, uBase, uSize)); + + OSSNPrintf(aszOSRAName, RA_MAX_NAME_LENGTH, "GPUVIRT_OS%d", i); + + psDeviceNode->psOSidSubArena[i] = RA_Create_With_Span(aszOSRAName, + OSGetPageShift(), + 0, + uBase, + uSize); + PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->psOSidSubArena[i], "RA_Create_With_Span"); + + aui64OSidMin[GPUVIRT_VAL_REGION_SECURE][i] = uBase; + + if (i == 0) + { + /* OSid0 has access to all regions */ + aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i] = psGPULocalHeap->uiSize - 1ULL; + } + else + { + aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i] = uBase + uSize - 1ULL; + } + + /* uSizeSharedReg includes display heap */ + aui64OSidMin[GPUVIRT_VAL_REGION_SHARED][i] = uBaseShared; + aui64OSidMax[GPUVIRT_VAL_REGION_SHARED][i] = uBaseShared + uSizeSharedReg - 1ULL; + + PVR_LOG(("GPUVIRT_VALIDATION HW reg regions %d: min[0]: 0x%" IMG_UINT64_FMTSPECX ", max[0]: 0x%" IMG_UINT64_FMTSPECX ", min[1]: 0x%" IMG_UINT64_FMTSPECX ", max[1]: 0x%" IMG_UINT64_FMTSPECX ",", + i, + aui64OSidMin[GPUVIRT_VAL_REGION_SECURE][i], + aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i], + aui64OSidMin[GPUVIRT_VAL_REGION_SHARED][i], + aui64OSidMax[GPUVIRT_VAL_REGION_SHARED][i])); + uBase += uSize; + } + + PVR_LOG(("GPUVIRT_VALIDATION create arena Shared, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", uBaseShared, uSizeShared)); + + PVR_ASSERT(uSizeShared >= GPUVIRT_SIZEOF_SHARED); + + /* uSizeShared does not include display heap */ + psDeviceNode->psOSSharedArena = RA_Create_With_Span("GPUVIRT_SHARED", + OSGetPageShift(), + 0, + uBaseShared, + uSizeShared); + PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->psOSSharedArena, "RA_Create_With_Span"); + + if (psDeviceNode->psDevConfig->pfnSysDevVirtInit != NULL) + { + psDeviceNode->psDevConfig->pfnSysDevVirtInit(aui64OSidMin, aui64OSidMax); + } + + return PVRSRV_OK; +} + +/* + * Counter-part to CreateGpuVirtValArenas. + */ +static void DestroyGpuVirtValArenas(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + IMG_UINT32 uiCounter = 0; + + /* + * NOTE: We overload psOSidSubArena[0] into the psLocalMemArena so we must + * not free it here as it gets cleared later. + */ + for (uiCounter = 1; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++) + { + if (psDeviceNode->psOSidSubArena[uiCounter] == NULL) + { + continue; + } + RA_Delete(psDeviceNode->psOSidSubArena[uiCounter]); + } + + if (psDeviceNode->psOSSharedArena != NULL) + { + RA_Delete(psDeviceNode->psOSSharedArena); + } +} + +#endif + +static void _SysDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + /* Only dump info once */ + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hDebugRequestHandle; + + PVR_DUMPDEBUG_LOG("------[ System Summary Device ID:%d ]------", psDeviceNode->sDevId.ui32InternalID); + + switch (psDeviceNode->eCurrentSysPowerState) + { + case PVRSRV_SYS_POWER_STATE_OFF: + PVR_DUMPDEBUG_LOG("Device System Power State: OFF"); + break; + case PVRSRV_SYS_POWER_STATE_ON: + PVR_DUMPDEBUG_LOG("Device System Power State: ON"); + break; + default: + PVR_DUMPDEBUG_LOG("Device System Power State: UNKNOWN (%d)", + psDeviceNode->eCurrentSysPowerState); + break; + } + + PVR_DUMPDEBUG_LOG("MaxHWTOut: %dus, WtTryCt: %d, WDGTOut(on,off): (%dms,%dms)", + MAX_HW_TIME_US, WAIT_TRY_COUNT, DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT, DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT); + + SysDebugInfo(psDeviceNode->psDevConfig, pfnDumpDebugPrintf, pvDumpDebugFile); +} + +static void _ThreadsDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVR_UNREFERENCED_PARAMETER(hDbgRequestHandle); + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) + { + PVR_DUMPDEBUG_LOG("------[ Server Thread Summary ]------"); + OSThreadDumpInfo(pfnDumpDebugPrintf, pvDumpDebugFile); + } +} + +static PVRSRV_ERROR PVRSRVValidatePhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + IMG_UINT32 ui32FlagsAccumulate = 0; + IMG_UINT32 i; + + PVR_LOG_RETURN_IF_FALSE(psDevConfig->ui32PhysHeapCount > 0, + "Device config must specify at least one phys heap config.", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + + for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++) + { + PHYS_HEAP_CONFIG *psHeapConf = &psDevConfig->pasPhysHeaps[i]; + + PVR_LOG_RETURN_IF_FALSE_VA(psHeapConf->ui32UsageFlags != 0, + PVRSRV_ERROR_PHYSHEAP_CONFIG, + "Phys heap config %d: must specify usage flags.", i); + + PVR_LOG_RETURN_IF_FALSE_VA((ui32FlagsAccumulate & psHeapConf->ui32UsageFlags) == 0, + PVRSRV_ERROR_PHYSHEAP_CONFIG, + "Phys heap config %d: duplicate usage flags.", i); + + ui32FlagsAccumulate |= psHeapConf->ui32UsageFlags; + } + + PVR_LOG_RETURN_IF_FALSE((ui32FlagsAccumulate & PHYS_HEAP_USAGE_GPU_LOCAL) != 0 , + "Device config must specify GPU local phys heap config.", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVPhysMemHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVRSRV_ERROR eError; + PHYS_HEAP *psPhysHeap; + PHYS_HEAP_TYPE eHeapType; + PVRSRV_PHYS_HEAP ePhysHeap; + + eError = PVRSRVValidatePhysHeapConfig(psDevConfig); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVValidatePhysHeapConfig"); + + /* Register the physical memory heaps */ + psDeviceNode->papsRegisteredPhysHeaps = + OSAllocZMem(sizeof(*psDeviceNode->papsRegisteredPhysHeaps) * + psDevConfig->ui32PhysHeapCount); + PVR_RETURN_IF_NOMEM(psDeviceNode->papsRegisteredPhysHeaps); + + eError = PhysHeapCreateHeapsFromConfigs(psDeviceNode, + psDevConfig->pasPhysHeaps, + psDevConfig->ui32PhysHeapCount, + psDeviceNode->papsRegisteredPhysHeaps, + &psDeviceNode->ui32RegisteredPhysHeaps); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapCreateHeapsFromConfigs", ErrorDeinit); + + for (ePhysHeap = 0; ePhysHeap < PVRSRV_PHYS_HEAP_LAST; ePhysHeap++) + { + if (PhysHeapPVRLayerAcquire(ePhysHeap)) + { + eError = PhysHeapAcquireByDevPhysHeap(ePhysHeap, psDeviceNode, &psDeviceNode->apsPhysHeap[ePhysHeap]); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByDevPhysHeap", ErrorDeinit); + } + } + + eHeapType = PhysHeapGetType(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL]); + + if (eHeapType == PHYS_HEAP_TYPE_UMA) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: GPU physical heap uses OS System memory (UMA)", __func__)); + + psDeviceNode->sDevMMUPxSetup.pfnDevPxAlloc = OSPhyContigPagesAlloc; + psDeviceNode->sDevMMUPxSetup.pfnDevPxFree = OSPhyContigPagesFree; + psDeviceNode->sDevMMUPxSetup.pfnDevPxMap = OSPhyContigPagesMap; + psDeviceNode->sDevMMUPxSetup.pfnDevPxUnMap = OSPhyContigPagesUnmap; + psDeviceNode->sDevMMUPxSetup.pfnDevPxClean = OSPhyContigPagesClean; + psDeviceNode->sDevMMUPxSetup.psPxRA = NULL; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + PVR_DPF((PVR_DBG_ERROR, "%s: Virtualisation Validation builds are currently only" + " supported on systems with local memory (LMA).", __func__)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + goto ErrorDeinit; +#endif + } + else + { + psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL]; + + PVR_DPF((PVR_DBG_MESSAGE, "%s: GPU physical heap uses local memory managed by the driver (LMA)", __func__)); + + psDeviceNode->sDevMMUPxSetup.pfnDevPxAlloc = LMA_PhyContigPagesAlloc; + psDeviceNode->sDevMMUPxSetup.pfnDevPxFree = LMA_PhyContigPagesFree; + psDeviceNode->sDevMMUPxSetup.pfnDevPxMap = LMA_PhyContigPagesMap; + psDeviceNode->sDevMMUPxSetup.pfnDevPxUnMap = LMA_PhyContigPagesUnmap; + psDeviceNode->sDevMMUPxSetup.pfnDevPxClean = LMA_PhyContigPagesClean; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + eError = CreateGpuVirtValArenas(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "CreateGpuVirtValArenas", ErrorDeinit); + + psDeviceNode->sDevMMUPxSetup.psPxRA = psDeviceNode->psOSidSubArena[0]; + psDeviceNode->sDevMMUPxSetup.pfnDevPxAllocGPV = LMA_PhyContigPagesAllocGPV; +#else + eError = PhysmemGetArenaLMA(psPhysHeap, &psDeviceNode->sDevMMUPxSetup.psPxRA); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemGetArenaLMA", ErrorDeinit); +#endif + } + + return PVRSRV_OK; + +ErrorDeinit: + PVR_ASSERT(IMG_FALSE); + PVRSRVPhysMemHeapsDeinit(psDeviceNode); + + return eError; +} + +void PVRSRVPhysMemHeapsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_PHYS_HEAP ePhysHeapIdx; + IMG_UINT32 i; + + if (psDeviceNode->psFwMMUReservedMemArena) + { + RA_Delete(psDeviceNode->psFwMMUReservedMemArena); + psDeviceNode->psFwMMUReservedMemArena = NULL; + } + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /* Remove local LMA subarenas */ + DestroyGpuVirtValArenas(psDeviceNode); +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + + psDeviceNode->sDevMMUPxSetup.psPxRA = NULL; + + /* Release heaps */ + for (ePhysHeapIdx = 0; + ePhysHeapIdx < ARRAY_SIZE(psDeviceNode->apsPhysHeap); + ePhysHeapIdx++) + { + if (psDeviceNode->apsPhysHeap[ePhysHeapIdx]) + { + PhysHeapRelease(psDeviceNode->apsPhysHeap[ePhysHeapIdx]); + } + } + + if (psDeviceNode->psFWMainPhysHeap) + { + PhysHeapDestroy(psDeviceNode->psFWMainPhysHeap); + psDeviceNode->psFWMainPhysHeap = NULL; + } + + if (psDeviceNode->psFWCfgPhysHeap) + { + PhysHeapDestroy(psDeviceNode->psFWCfgPhysHeap); + psDeviceNode->psFWCfgPhysHeap = NULL; + } + + for (i = 0; i < RGX_NUM_OS_SUPPORTED; i++) + { + if (psDeviceNode->apsFWPremapPhysHeap[i]) + { + PhysHeapDestroy(psDeviceNode->apsFWPremapPhysHeap[i]); + psDeviceNode->apsFWPremapPhysHeap[i] = NULL; + } + } + + /* Unregister heaps */ + for (i = 0; i < psDeviceNode->ui32RegisteredPhysHeaps; i++) + { + PhysHeapDestroy(psDeviceNode->papsRegisteredPhysHeaps[i]); + } + + OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps); +} + +PHYS_HEAP_CONFIG* FindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig, + PHYS_HEAP_USAGE_FLAGS ui32Flags) +{ + IMG_UINT32 i; + + for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++) + { + if (psDevConfig->pasPhysHeaps[i].ui32UsageFlags == ui32Flags) + { + return &psDevConfig->pasPhysHeaps[i]; + } + } + + return NULL; +} + +PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, + IMG_INT32 i32OsDeviceID, + PVRSRV_DEVICE_NODE **ppsDeviceNode) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_ERROR eError; + PVRSRV_DEVICE_CONFIG *psDevConfig; + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_UINT32 ui32AppHintDefault; + IMG_UINT32 ui32AppHintDriverMode; +#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) + IMG_UINT32 ui32AppHintPhysMemTestPasses; +#endif + void *pvAppHintState = NULL; +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + IMG_HANDLE hProcessStats; +#endif + + MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceCreate: DevId %d", i32OsDeviceID); + + /* Read driver mode (i.e. native, host or guest) AppHint early as it is + required by SysDevInit */ + ui32AppHintDefault = PVRSRV_APPHINT_DRIVERMODE; + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintUINT32(pvAppHintState, DriverMode, + &ui32AppHintDefault, &ui32AppHintDriverMode); + psPVRSRVData->eDriverMode = PVRSRV_VZ_APPHINT_MODE(ui32AppHintDriverMode); + psPVRSRVData->bForceApphintDriverMode = PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(ui32AppHintDriverMode); + OSFreeKMAppHintState(pvAppHintState); + pvAppHintState = NULL; + + psDeviceNode = OSAllocZMemNoStats(sizeof(*psDeviceNode)); + PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "psDeviceNode"); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + /* Allocate process statistics */ + eError = PVRSRVStatsRegisterProcess(&hProcessStats); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVStatsRegisterProcess", ErrorFreeDeviceNode); +#endif + + psDeviceNode->sDevId.i32OsDeviceID = i32OsDeviceID; + psDeviceNode->sDevId.ui32InternalID = psPVRSRVData->ui32RegisteredDevices; + + eError = SysDevInit(pvOSDevice, &psDevConfig); + PVR_LOG_GOTO_IF_ERROR(eError, "SysDevInit", ErrorDeregisterStats); + + PVR_ASSERT(psDevConfig); + PVR_ASSERT(psDevConfig->pvOSDevice == pvOSDevice); + PVR_ASSERT(!psDevConfig->psDevNode); + + psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT; + +#if defined(SUPPORT_AUTOVZ) + /* AutoVz platforms should have the GPU domain powered on before startup */ + psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON; +#else + /* Assume system power is off at start of day and turned on by the time we hit RGXInitDevPart2 */ + psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_OFF; +#endif + + psDeviceNode->psDevConfig = psDevConfig; + psDevConfig->psDevNode = psDeviceNode; + +#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) + if (PVRSRV_VZ_MODE_IS(NATIVE)) + { + /* Read AppHint - Configurable memory test pass count */ + ui32AppHintDefault = 0; + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintUINT32(pvAppHintState, PhysMemTestPasses, + &ui32AppHintDefault, &ui32AppHintPhysMemTestPasses); + OSFreeKMAppHintState(pvAppHintState); + pvAppHintState = NULL; + + if (ui32AppHintPhysMemTestPasses > 0) + { + eError = PhysMemTest(psDevConfig, ui32AppHintPhysMemTestPasses); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysMemTest", ErrorSysDevDeInit); + } + } +#endif + + /* Initialise the paravirtualised connection */ + if (!PVRSRV_VZ_MODE_IS(NATIVE)) + { + PvzConnectionInit(psDevConfig); + PVR_GOTO_IF_ERROR(eError, ErrorSysDevDeInit); + } + + eError = PVRSRVRegisterDbgTable(psDeviceNode, + g_aui32DebugOrderTable, + ARRAY_SIZE(g_aui32DebugOrderTable)); + PVR_GOTO_IF_ERROR(eError, ErrorPvzConnectionDeInit); + + eError = PVRSRVPowerLockInit(psDeviceNode); + PVR_GOTO_IF_ERROR(eError, ErrorUnregisterDbgTable); + + eError = PVRSRVPhysMemHeapsInit(psDeviceNode, psDevConfig); + PVR_GOTO_IF_ERROR(eError, ErrorPowerLockDeInit); + +#if defined(SUPPORT_RGX) + /* Requirements: + * registered GPU and FW local heaps */ + /* debug table */ + eError = RGXRegisterDevice(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "RGXRegisterDevice"); + eError = PVRSRV_ERROR_DEVICE_REGISTER_FAILED; + goto ErrorPhysMemHeapsDeinit; + } +#endif + + if (psDeviceNode->pfnPhysMemDeviceHeapsInit != NULL) + { + eError = psDeviceNode->pfnPhysMemDeviceHeapsInit(psDeviceNode); + PVR_GOTO_IF_ERROR(eError, ErrorPhysMemHeapsDeinit); + } + + if (psDeviceNode->pfnFwMMUInit != NULL) + { + eError = psDeviceNode->pfnFwMMUInit(psDeviceNode); + PVR_GOTO_IF_ERROR(eError, ErrorFwMMUDeinit); + } + + psDeviceNode->sDevMMUPxSetup.uiMMUPxLog2AllocGran = OSGetPageShift(); + + eError = SyncServerInit(psDeviceNode); + PVR_GOTO_IF_ERROR(eError, ErrorDeInitRgx); + + eError = SyncCheckpointInit(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "SyncCheckpointInit", ErrorSyncCheckpointInit); + + /* + * This is registered before doing device specific initialisation to ensure + * generic device information is dumped first during a debug request. + */ + eError = PVRSRVRegisterDbgRequestNotify(&psDeviceNode->hDbgReqNotify, + psDeviceNode, + _SysDebugRequestNotify, + DEBUG_REQUEST_SYS, + psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify", ErrorRegDbgReqNotify); + + eError = PVRSRVRegisterDbgRequestNotify(&psDeviceNode->hThreadsDbgReqNotify, + psDeviceNode, + _ThreadsDebugRequestNotify, + DEBUG_REQUEST_SYS, + NULL); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify(threads)", ErrorRegThreadsDbgReqNotify); + + eError = HTBDeviceCreate(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "HTBDeviceCreate", ErrorHTBDeviceCreate); + + psPVRSRVData->ui32RegisteredDevices++; + +#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) + eError = InitDVFS(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "InitDVFS", ErrorDecrementDeviceCount); +#endif + + OSAtomicWrite(&psDeviceNode->iNumClockSpeedChanges, 0); + +#if defined(PVR_TESTING_UTILS) + TUtilsInit(psDeviceNode); +#endif + + OSWRLockCreate(&psDeviceNode->hMemoryContextPageFaultNotifyListLock); + if (psDeviceNode->hMemoryContextPageFaultNotifyListLock == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for PF notify list", + __func__)); + goto ErrorDecrementDeviceCount; + } + + dllist_init(&psDeviceNode->sMemoryContextPageFaultNotifyListHead); + + PVR_DPF((PVR_DBG_MESSAGE, "Registered device %p", psDeviceNode)); + PVR_DPF((PVR_DBG_MESSAGE, "Register bank address = 0x%08lx", + (unsigned long)psDevConfig->sRegsCpuPBase.uiAddr)); + PVR_DPF((PVR_DBG_MESSAGE, "IRQ = %d", psDevConfig->ui32IRQ)); + +/* SUPPORT_ALT_REGBASE is defined for rogue cores only */ +#if defined(SUPPORT_RGX) && defined(SUPPORT_ALT_REGBASE) + { + IMG_DEV_PHYADDR sRegsGpuPBase; + + PhysHeapCpuPAddrToDevPAddr(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL], + 1, + &sRegsGpuPBase, + &(psDeviceNode->psDevConfig->sRegsCpuPBase)); + + PVR_LOG(("%s: Using alternate Register bank GPU address: 0x%08lx (orig: 0x%08lx)", __func__, + (unsigned long)psDevConfig->sAltRegsGpuPBase.uiAddr, + (unsigned long)sRegsGpuPBase.uiAddr)); + } +#endif + + /* Finally insert the device into the dev-list and set it as active */ + List_PVRSRV_DEVICE_NODE_InsertTail(&psPVRSRVData->psDeviceNodeList, + psDeviceNode); + + *ppsDeviceNode = psDeviceNode; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + /* Close the process statistics */ + PVRSRVStatsDeregisterProcess(hProcessStats); +#endif + +#if defined(SUPPORT_VALIDATION) + OSLockCreateNoStats(&psDeviceNode->hValidationLock); +#endif + + return PVRSRV_OK; + +ErrorDecrementDeviceCount: + psPVRSRVData->ui32RegisteredDevices--; +#if defined(PVR_TESTING_UTILS) + TUtilsDeinit(psDeviceNode); +#endif + HTBDeviceDestroy(psDeviceNode); + +ErrorHTBDeviceCreate: + if (psDeviceNode->hThreadsDbgReqNotify) + { + PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hThreadsDbgReqNotify); + } + +ErrorRegThreadsDbgReqNotify: + if (psDeviceNode->hDbgReqNotify) + { + PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hDbgReqNotify); + } + +ErrorRegDbgReqNotify: + SyncCheckpointDeinit(psDeviceNode); + +ErrorSyncCheckpointInit: + SyncServerDeinit(psDeviceNode); + +ErrorDeInitRgx: +#if defined(SUPPORT_RGX) + DevDeInitRGX(psDeviceNode); +#endif +ErrorFwMMUDeinit: +ErrorPhysMemHeapsDeinit: + PVRSRVPhysMemHeapsDeinit(psDeviceNode); +ErrorPowerLockDeInit: + PVRSRVPowerLockDeInit(psDeviceNode); +ErrorUnregisterDbgTable: + PVRSRVUnregisterDbgTable(psDeviceNode); +ErrorPvzConnectionDeInit: + psDevConfig->psDevNode = NULL; + if (!PVRSRV_VZ_MODE_IS(NATIVE)) + { + PvzConnectionDeInit(); + } +ErrorSysDevDeInit: + SysDevDeInit(psDevConfig); +ErrorDeregisterStats: +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + /* Close the process statistics */ + PVRSRVStatsDeregisterProcess(hProcessStats); +ErrorFreeDeviceNode: +#endif + OSFreeMemNoStats(psDeviceNode); + + return eError; +} + +#if defined(SUPPORT_RGX) +static PVRSRV_ERROR _SetDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice, + const void *psPrivate, IMG_BOOL bValue) +{ + PVRSRV_ERROR eResult = PVRSRV_OK; + IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate); + + PVR_RETURN_IF_INVALID_PARAM(ui32Flag); + + eResult = RGXSetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice, + ui32Flag, bValue); + + return eResult; +} + +static PVRSRV_ERROR _ReadDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice, + const void *psPrivate, IMG_BOOL *pbValue) +{ + PVRSRV_ERROR eResult = PVRSRV_OK; + IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate); + IMG_UINT32 ui32State; + + PVR_RETURN_IF_INVALID_PARAM(ui32Flag); + + eResult = RGXGetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice, + &ui32State); + + if (PVRSRV_OK == eResult) + { + *pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE; + } + + return eResult; +} +static PVRSRV_ERROR _SetStateFlag(const PVRSRV_DEVICE_NODE *psDevice, + const void *psPrivate, IMG_BOOL bValue) +{ + PVRSRV_ERROR eResult = PVRSRV_OK; + IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate); + + PVR_RETURN_IF_INVALID_PARAM(ui32Flag); + + eResult = RGXStateFlagCtrl((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice, + ui32Flag, NULL, bValue); + + return eResult; +} + +static PVRSRV_ERROR _ReadStateFlag(const PVRSRV_DEVICE_NODE *psDevice, + const void *psPrivate, IMG_BOOL *pbValue) +{ + IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate); + IMG_UINT32 ui32State; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevice->pvDevice; + + PVR_RETURN_IF_INVALID_PARAM(ui32Flag); + + ui32State = psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags; + + if (pbValue) + { + *pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE; + } + + return PVRSRV_OK; +} +#endif + +PVRSRV_ERROR PVRSRVCommonDeviceInitialise(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + IMG_BOOL bInitSuccesful = IMG_FALSE; +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + IMG_HANDLE hProcessStats; +#endif + PVRSRV_ERROR eError; + + MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceInitialise: DevId %d", psDeviceNode->sDevId.i32OsDeviceID); + + if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_INIT) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Device already initialised", __func__)); + return PVRSRV_ERROR_INIT_FAILURE; + } + + /* Initialise Connection_Data access mechanism */ + dllist_init(&psDeviceNode->sConnections); + eError = OSLockCreate(&psDeviceNode->hConnectionsLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); + + /* Allocate process statistics */ +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + eError = PVRSRVStatsRegisterProcess(&hProcessStats); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVStatsRegisterProcess"); +#endif + +#if defined(SUPPORT_RGX) + eError = RGXInit(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXInit", Exit); +#endif + +#if defined(SUPPORT_DMA_TRANSFER) + PVRSRVInitialiseDMA(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVInitialiseDMA", Exit); +#endif + + bInitSuccesful = IMG_TRUE; + +#if defined(SUPPORT_RGX) +Exit: +#endif + eError = PVRSRVDeviceFinalise(psDeviceNode, bInitSuccesful); + PVR_LOG_IF_ERROR(eError, "PVRSRVDeviceFinalise"); + +#if defined(SUPPORT_RGX) + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableClockGating, + _ReadStateFlag, _SetStateFlag, + psDeviceNode, + (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_CLKGATING_EN)); + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableDMOverlap, + _ReadStateFlag, _SetStateFlag, + psDeviceNode, + (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_DM_OVERLAP)); + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOnHWRTrigger, + _ReadStateFlag, _SetStateFlag, + psDeviceNode, + (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER)); + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOutOfMemory, + _ReadStateFlag, _SetStateFlag, + psDeviceNode, + (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY)); + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_CheckMList, + _ReadStateFlag, _SetStateFlag, + psDeviceNode, + (void*)((uintptr_t)RGXFWIF_INICFG_CHECK_MLIST_EN)); + } + + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableFEDLogging, + _ReadDeviceFlag, _SetDeviceFlag, + psDeviceNode, + (void*)((uintptr_t)RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN)); + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_ZeroFreelist, + _ReadDeviceFlag, _SetDeviceFlag, + psDeviceNode, + (void*)((uintptr_t)RGXKM_DEVICE_STATE_ZERO_FREELIST)); +#if defined(SUPPORT_VALIDATION) + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_GPUUnitsPowerChange, + _ReadDeviceFlag, _SetDeviceFlag, + psDeviceNode, + (void*)((uintptr_t)RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN)); +#endif + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisablePDumpPanic, + RGXQueryPdumpPanicDisable, RGXSetPdumpPanicDisable, + psDeviceNode, + NULL); +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) + /* Close the process statistics */ + PVRSRVStatsDeregisterProcess(hProcessStats); +#endif + + return eError; +} + +PVRSRV_ERROR PVRSRVCommonDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_ERROR eError; +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + IMG_BOOL bForceUnload = IMG_FALSE; + + if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + bForceUnload = IMG_TRUE; + } +#endif + + MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceDestroy: DevId %d", psDeviceNode->sDevId.i32OsDeviceID); + + psPVRSRVData->ui32RegisteredDevices--; + + psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_DEINIT; + + if (psDeviceNode->hMemoryContextPageFaultNotifyListLock != NULL) + { + OSWRLockDestroy(psDeviceNode->hMemoryContextPageFaultNotifyListLock); + } + +#if defined(SUPPORT_VALIDATION) + OSLockDestroyNoStats(psDeviceNode->hValidationLock); + psDeviceNode->hValidationLock = NULL; +#endif + +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) + SyncFbDeregisterDevice(psDeviceNode); +#endif + /* Counter part to what gets done in PVRSRVDeviceFinalise */ + if (psDeviceNode->hSyncCheckpointContext) + { + SyncCheckpointContextDestroy(psDeviceNode->hSyncCheckpointContext); + psDeviceNode->hSyncCheckpointContext = NULL; + } + if (psDeviceNode->hSyncPrimContext) + { + if (psDeviceNode->psMMUCacheSyncPrim) + { + PVRSRV_CLIENT_SYNC_PRIM *psSync = psDeviceNode->psMMUCacheSyncPrim; + + /* Ensure there are no pending MMU Cache Ops in progress before freeing this sync. */ + eError = PVRSRVPollForValueKM(psDeviceNode, + psSync->pui32LinAddr, + psDeviceNode->ui32NextMMUInvalidateUpdate-1, + 0xFFFFFFFF, + POLL_FLAG_LOG_ERROR); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPollForValueKM"); + + /* Important to set the device node pointer to NULL + * before we free the sync-prim to make sure we don't + * defer the freeing of the sync-prim's page tables itself. + * The sync is used to defer the MMU page table + * freeing. */ + psDeviceNode->psMMUCacheSyncPrim = NULL; + + /* Free general purpose sync primitive */ + SyncPrimFree(psSync); + } + + SyncPrimContextDestroy(psDeviceNode->hSyncPrimContext); + psDeviceNode->hSyncPrimContext = NULL; + } + + eError = PVRSRVPowerLock(psDeviceNode); + if (eError == PVRSRV_OK) + { +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + /* + * Firmware probably not responding if bForceUnload is set, but we still want to unload the + * driver. + */ + if (!bForceUnload) +#endif + { + /* Force idle device */ + eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "PVRSRVDeviceIdleRequestKM"); + if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) + { + PVRSRVPowerUnlock(psDeviceNode); + } + return eError; + } + } + + /* Power down the device if necessary */ + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_OFF, + IMG_TRUE, IMG_FALSE); + PVRSRVPowerUnlock(psDeviceNode); + + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "PVRSRVSetDevicePowerStateKM"); + + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + + /* + * If the driver is okay then return the error, otherwise we can ignore + * this error. + */ + if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK) + { + return eError; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Will continue to unregister as driver status is not OK", + __func__)); + } + } + } + +#if defined(PVR_TESTING_UTILS) + TUtilsDeinit(psDeviceNode); +#endif + +#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) + DeinitDVFS(psDeviceNode); +#endif + + HTBDeviceDestroy(psDeviceNode); + + if (psDeviceNode->hThreadsDbgReqNotify) + { + PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hThreadsDbgReqNotify); + } + + if (psDeviceNode->hDbgReqNotify) + { + PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hDbgReqNotify); + } + + SyncCheckpointDeinit(psDeviceNode); + + SyncServerDeinit(psDeviceNode); + +#if defined(SUPPORT_RGX) + DevDeInitRGX(psDeviceNode); +#endif + + List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode); + + PVRSRVPhysMemHeapsDeinit(psDeviceNode); + PVRSRVPowerLockDeInit(psDeviceNode); + + PVRSRVUnregisterDbgTable(psDeviceNode); + + /* Release the Connection-Data lock as late as possible. */ + if (psDeviceNode->hConnectionsLock) + { + eError = OSLockDestroy(psDeviceNode->hConnectionsLock); + PVR_LOG_IF_ERROR(eError, "ConnectionLock destruction failed"); + } + + psDeviceNode->psDevConfig->psDevNode = NULL; + + if (!PVRSRV_VZ_MODE_IS(NATIVE)) + { + PvzConnectionDeInit(); + } + SysDevDeInit(psDeviceNode->psDevConfig); + + OSFreeMemNoStats(psDeviceNode); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _LMA_DoPhyContigPagesAlloc(RA_ARENA *pArena, + size_t uiSize, + PG_HANDLE *psMemHandle, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_PID uiPid) +{ + RA_BASE_T uiCardAddr = 0; + RA_LENGTH_T uiActualSize; + PVRSRV_ERROR eError; +#if defined(DEBUG) + static IMG_UINT32 ui32MaxLog2NumPages = 4; /* 16 pages => 64KB */ +#endif /* defined(DEBUG) */ + + IMG_UINT32 ui32Log2NumPages = 0; + + PVR_ASSERT(uiSize != 0); + ui32Log2NumPages = OSGetOrder(uiSize); + uiSize = (1 << ui32Log2NumPages) * OSGetPageSize(); + + eError = RA_Alloc(pArena, + uiSize, + RA_NO_IMPORT_MULTIPLIER, + 0, /* No flags */ + uiSize, + "LMA_PhyContigPagesAlloc", + &uiCardAddr, + &uiActualSize, + NULL); /* No private handle */ + + PVR_ASSERT(uiSize == uiActualSize); + + psMemHandle->u.ui64Handle = uiCardAddr; + psDevPAddr->uiAddr = (IMG_UINT64) uiCardAddr; + + if (PVRSRV_OK == eError) + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, + uiSize, + uiCardAddr, + uiPid); +#else + IMG_CPU_PHYADDR sCpuPAddr; + sCpuPAddr.uiAddr = psDevPAddr->uiAddr; + + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, + NULL, + sCpuPAddr, + uiSize, + NULL, + uiPid + DEBUG_MEMSTATS_VALUES); +#endif +#endif +#if defined(SUPPORT_GPUVIRT_VALIDATION) + PVR_DPF((PVR_DBG_MESSAGE, + "%s: (GPU Virtualisation) Allocated 0x" IMG_SIZE_FMTSPECX " at 0x%" IMG_UINT64_FMTSPECX ", Arena ID %u", + __func__, uiSize, psDevPAddr->uiAddr, psMemHandle->uiOSid)); +#endif + +#if defined(DEBUG) + PVR_ASSERT((ui32Log2NumPages <= ui32MaxLog2NumPages)); + if (ui32Log2NumPages > ui32MaxLog2NumPages) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: ui32MaxLog2NumPages = %u, increasing to %u", __func__, + ui32MaxLog2NumPages, ui32Log2NumPages )); + ui32MaxLog2NumPages = ui32Log2NumPages; + } +#endif /* defined(DEBUG) */ + psMemHandle->uiOrder = ui32Log2NumPages; + } + + return eError; +} + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +PVRSRV_ERROR LMA_PhyContigPagesAllocGPV(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize, + PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, + IMG_UINT32 ui32OSid, IMG_PID uiPid) +{ + RA_ARENA *pArena; + IMG_UINT32 ui32Log2NumPages = 0; + PVRSRV_ERROR eError; + + PVR_ASSERT(uiSize != 0); + ui32Log2NumPages = OSGetOrder(uiSize); + uiSize = (1 << ui32Log2NumPages) * OSGetPageSize(); + + PVR_ASSERT(ui32OSid < GPUVIRT_VALIDATION_NUM_OS); + if (ui32OSid >= GPUVIRT_VALIDATION_NUM_OS) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Arena index %u defaulting to 0", + __func__, ui32OSid)); + ui32OSid = 0; + } + pArena = psDevNode->psOSidSubArena[ui32OSid]; + + if (psMemHandle->uiOSid != ui32OSid) + { + PVR_LOG(("%s: Unexpected OSid value %u - expecting %u", __func__, + psMemHandle->uiOSid, ui32OSid)); + } + + psMemHandle->uiOSid = ui32OSid; /* For Free() use */ + + eError = _LMA_DoPhyContigPagesAlloc(pArena, uiSize, psMemHandle, + psDevPAddr, uiPid); + PVR_LOG_IF_ERROR(eError, "_LMA_DoPhyContigPagesAlloc"); + + return eError; +} +#endif + +PVRSRV_ERROR LMA_PhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize, + PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, + IMG_PID uiPid) +{ + PVRSRV_ERROR eError; + + RA_ARENA *pArena = psDevNode->sDevMMUPxSetup.psPxRA; + IMG_UINT32 ui32Log2NumPages = 0; + + PVR_ASSERT(uiSize != 0); + ui32Log2NumPages = OSGetOrder(uiSize); + uiSize = (1 << ui32Log2NumPages) * OSGetPageSize(); + + eError = _LMA_DoPhyContigPagesAlloc(pArena, uiSize, psMemHandle, + psDevPAddr, uiPid); + PVR_LOG_IF_ERROR(eError, "_LMA_DoPhyContigPagesAlloc"); + + return eError; +} + +void LMA_PhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle) +{ + RA_BASE_T uiCardAddr = (RA_BASE_T) psMemHandle->u.ui64Handle; + RA_ARENA *pArena; +#if defined(SUPPORT_GPUVIRT_VALIDATION) + IMG_UINT32 ui32OSid = psMemHandle->uiOSid; + + /* + * The Arena ID is set by the originating allocation, and maintained via + * the call stacks into this function. We have a limited range of IDs + * and if the passed value falls outside this we simply treat it as a + * 'global' arena ID of 0. This is where all default OS-specific allocations + * are created. + */ + PVR_ASSERT(ui32OSid < GPUVIRT_VALIDATION_NUM_OS); + if (ui32OSid >= GPUVIRT_VALIDATION_NUM_OS) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Arena index %u PhysAddr 0x%" + IMG_UINT64_FMTSPECx " Reverting to Arena 0", __func__, + ui32OSid, uiCardAddr)); + /* + * No way of determining what we're trying to free so default to the + * global default arena index 0. + */ + ui32OSid = 0; + } + + pArena = psDevNode->psOSidSubArena[ui32OSid]; + + PVR_DPF((PVR_DBG_MESSAGE, "%s: (GPU Virtualisation) Freeing 0x%" + IMG_UINT64_FMTSPECx ", Arena %u", __func__, + uiCardAddr, ui32OSid)); + +#else + pArena = psDevNode->sDevMMUPxSetup.psPxRA; +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, + (IMG_UINT64)uiCardAddr); +#else + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, + (IMG_UINT64)uiCardAddr, + OSGetCurrentClientProcessIDKM()); +#endif +#endif + + RA_Free(pArena, uiCardAddr); + psMemHandle->uiOrder = 0; +} + +PVRSRV_ERROR LMA_PhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, + size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, + void **pvPtr) +{ + IMG_CPU_PHYADDR sCpuPAddr; + IMG_UINT32 ui32NumPages = (1 << psMemHandle->uiOrder); + PVR_UNREFERENCED_PARAMETER(psMemHandle); + PVR_UNREFERENCED_PARAMETER(uiSize); + + PhysHeapDevPAddrToCpuPAddr(psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL], 1, &sCpuPAddr, psDevPAddr); + *pvPtr = OSMapPhysToLin(sCpuPAddr, + ui32NumPages * OSGetPageSize(), + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC); + PVR_RETURN_IF_NOMEM(*pvPtr); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, + ui32NumPages * OSGetPageSize(), + OSGetCurrentClientProcessIDKM()); +#else + { + PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, + *pvPtr, + sCpuPAddr, + ui32NumPages * OSGetPageSize(), + NULL, + OSGetCurrentClientProcessIDKM() + DEBUG_MEMSTATS_VALUES); + } +#endif +#endif + return PVRSRV_OK; +} + +void LMA_PhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, + void *pvPtr) +{ + IMG_UINT32 ui32NumPages = (1 << psMemHandle->uiOrder); + PVR_UNREFERENCED_PARAMETER(psMemHandle); + PVR_UNREFERENCED_PARAMETER(psDevNode); + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#if !defined(PVRSRV_ENABLE_MEMORY_STATS) + PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, + ui32NumPages * OSGetPageSize(), + OSGetCurrentClientProcessIDKM()); +#else + PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, + (IMG_UINT64)(uintptr_t)pvPtr, + OSGetCurrentClientProcessIDKM()); +#endif +#endif + + OSUnMapPhysToLin(pvPtr, ui32NumPages * OSGetPageSize()); +} + +PVRSRV_ERROR LMA_PhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode, + PG_HANDLE *psMemHandle, + IMG_UINT32 uiOffset, + IMG_UINT32 uiLength) +{ + /* No need to flush because we map as uncached */ + PVR_UNREFERENCED_PARAMETER(psDevNode); + PVR_UNREFERENCED_PARAMETER(psMemHandle); + PVR_UNREFERENCED_PARAMETER(uiOffset); + PVR_UNREFERENCED_PARAMETER(uiLength); + + return PVRSRV_OK; +} + +/**************************************************************************/ /*! +@Function PVRSRVDeviceFinalise +@Description Performs the final parts of device initialisation. +@Input psDeviceNode Device node of the device to finish + initialising +@Input bInitSuccessful Whether or not device specific + initialisation was successful +@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /***************************************************************************/ +PVRSRV_ERROR PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bInitSuccessful) +{ + PVRSRV_ERROR eError; + __maybe_unused PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice); + + if (bInitSuccessful) + { + eError = SyncCheckpointContextCreate(psDeviceNode, + &psDeviceNode->hSyncCheckpointContext); + PVR_LOG_GOTO_IF_ERROR(eError, "SyncCheckpointContextCreate", ErrorExit); +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) + eError = SyncFbRegisterDevice(psDeviceNode); + PVR_GOTO_IF_ERROR(eError, ErrorExit); +#endif + eError = SyncPrimContextCreate(psDeviceNode, + &psDeviceNode->hSyncPrimContext); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "SyncPrimContextCreate"); + SyncCheckpointContextDestroy(psDeviceNode->hSyncCheckpointContext); + goto ErrorExit; + } + + /* Allocate MMU cache invalidate sync */ + eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext, + &psDeviceNode->psMMUCacheSyncPrim, + "pvrsrv dev MMU cache"); + PVR_LOG_GOTO_IF_ERROR(eError, "SyncPrimAlloc", ErrorExit); + + /* Set the sync prim value to a much higher value near the + * wrapping range. This is so any wrapping bugs would be + * seen early in the driver start-up. + */ + SyncPrimSet(psDeviceNode->psMMUCacheSyncPrim, 0xFFFFFFF6UL); + + /* Next update value will be 0xFFFFFFF7 since sync prim starts with 0xFFFFFFF6 */ + psDeviceNode->ui32NextMMUInvalidateUpdate = 0xFFFFFFF7UL; + + eError = PVRSRVPowerLock(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPowerLock", ErrorExit); + + /* + * Always ensure a single power on command appears in the pdump. This + * should be the only power related call outside of PDUMPPOWCMDSTART + * and PDUMPPOWCMDEND. + */ + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON, IMG_TRUE, + IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to set device %p power state to 'on' (%s)", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + PVRSRVPowerUnlock(psDeviceNode); + goto ErrorExit; + } + +#if defined(SUPPORT_EXTRA_METASP_DEBUG) + eError = ValidateFWOnLoad(psDeviceNode->pvDevice); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "ValidateFWOnLoad"); + PVRSRVPowerUnlock(psDeviceNode); + return eError; + } +#endif + + eError = PVRSRVDevInitCompatCheck(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed compatibility check for device %p (%s)", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + PVRSRVPowerUnlock(psDeviceNode); + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + goto ErrorExit; + } + + PDUMPPOWCMDSTART(); + + /* Force the device to idle if its default power state is off */ + eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, + &PVRSRVDeviceIsDefaultStateOFF, + IMG_TRUE); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "PVRSRVDeviceIdleRequestKM"); + if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) + { + PVRSRVPowerUnlock(psDeviceNode); + } + goto ErrorExit; + } + + /* Place device into its default power state. */ + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_DEFAULT, + IMG_TRUE, IMG_FALSE); + PDUMPPOWCMDEND(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to set device %p into its default power state (%s)", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + + PVRSRVPowerUnlock(psDeviceNode); + goto ErrorExit; + } + + PVRSRVPowerUnlock(psDeviceNode); + + /* + * If PDUMP is enabled and RGX device is supported, then initialise the + * performance counters that can be further modified in PDUMP. Then, + * before ending the init phase of the pdump, drain the commands put in + * the kCCB during the init phase. + */ +#if defined(SUPPORT_RGX) +#if defined(PDUMP) + { + eError = RGXInitHWPerfCounters(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitHWPerfCounters", ErrorExit); + + eError = RGXPdumpDrainKCCB(psDevInfo, + psDevInfo->psKernelCCBCtl->ui32WriteOffset); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXPdumpDrainKCCB", ErrorExit); + } +#endif +#endif /* defined(SUPPORT_RGX) */ + /* Now that the device(s) are fully initialised set them as active */ + psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_ACTIVE; + eError = PVRSRV_OK; + } + else + { + /* Initialisation failed so set the device(s) into a bad state */ + psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD; + eError = PVRSRV_ERROR_NOT_INITIALISED; + } + + /* Give PDump control a chance to end the init phase, depends on OS */ + PDumpStopInitPhase(); + + return eError; + +ErrorExit: + /* Initialisation failed so set the device(s) into a bad state */ + psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD; + + return eError; +} + +PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + /* Only check devices which specify a compatibility check callback */ + if (psDeviceNode->pfnInitDeviceCompatCheck) + return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode); + else + return PVRSRV_OK; +} + +/* + PollForValueKM +*/ +static +PVRSRV_ERROR PollForValueKM (volatile IMG_UINT32 __iomem * pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Timeoutus, + IMG_UINT32 ui32PollPeriodus, + POLL_FLAGS ePollFlags) +{ +#if defined(NO_HARDWARE) + PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + PVR_UNREFERENCED_PARAMETER(ui32Timeoutus); + PVR_UNREFERENCED_PARAMETER(ui32PollPeriodus); + PVR_UNREFERENCED_PARAMETER(ePollFlags); + return PVRSRV_OK; +#else + IMG_UINT32 ui32ActualValue = 0xFFFFFFFFU; /* Initialiser only required to prevent incorrect warning */ + + LOOP_UNTIL_TIMEOUT(ui32Timeoutus) + { + ui32ActualValue = OSReadHWReg32((void __iomem *)pui32LinMemAddr, 0) & ui32Mask; + + if (ui32ActualValue == ui32Value) + { + return PVRSRV_OK; + } + + if (gpsPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + OSWaitus(ui32PollPeriodus); + } END_LOOP_UNTIL_TIMEOUT(); + + if (BITMASK_HAS(ePollFlags, POLL_FLAG_LOG_ERROR)) + { + PVR_DPF((PVR_DBG_ERROR, + "PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).", + ui32Value, ui32ActualValue, ui32Mask)); + } + + return PVRSRV_ERROR_TIMEOUT; +#endif /* NO_HARDWARE */ +} + + +/* + PVRSRVPollForValueKM +*/ +PVRSRV_ERROR PVRSRVPollForValueKM (PVRSRV_DEVICE_NODE *psDevNode, + volatile IMG_UINT32 __iomem *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + POLL_FLAGS ePollFlags) +{ + PVRSRV_ERROR eError; + + eError = PollForValueKM(pui32LinMemAddr, ui32Value, ui32Mask, + MAX_HW_TIME_US, + MAX_HW_TIME_US/WAIT_TRY_COUNT, + ePollFlags); + if (eError != PVRSRV_OK && BITMASK_HAS(ePollFlags, POLL_FLAG_DEBUG_DUMP)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed! Error(%s) CPU linear address(%p) Expected value(%u)", + __func__, PVRSRVGetErrorString(eError), + pui32LinMemAddr, ui32Value)); + PVRSRVDebugRequest(psDevNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + } + + return eError; +} + +PVRSRV_ERROR +PVRSRVWaitForValueKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask) +{ +#if defined(NO_HARDWARE) + PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + return PVRSRV_OK; +#else + + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + IMG_HANDLE hOSEvent; + PVRSRV_ERROR eError; + PVRSRV_ERROR eErrorWait; + IMG_UINT32 ui32ActualValue; + + eError = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hOSEvent); + PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectOpen", EventObjectOpenError); + + eError = PVRSRV_ERROR_TIMEOUT; /* Initialiser for following loop */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + ui32ActualValue = (OSReadDeviceMem32(pui32LinMemAddr) & ui32Mask); + + if (ui32ActualValue == ui32Value) + { + /* Expected value has been found */ + eError = PVRSRV_OK; + break; + } + else if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + /* Services in bad state, don't wait any more */ + eError = PVRSRV_ERROR_NOT_READY; + break; + } + else + { + /* wait for event and retry */ + eErrorWait = OSEventObjectWait(hOSEvent); + if (eErrorWait != PVRSRV_OK && eErrorWait != PVRSRV_ERROR_TIMEOUT) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Failed with error %d. Found value 0x%x but was expected " + "to be 0x%x (Mask 0x%08x). Retrying", + __func__, + eErrorWait, + ui32ActualValue, + ui32Value, + ui32Mask)); + } + } + } END_LOOP_UNTIL_TIMEOUT(); + + OSEventObjectClose(hOSEvent); + + /* One last check in case the object wait ended after the loop timeout... */ + if (eError != PVRSRV_OK && + (OSReadDeviceMem32(pui32LinMemAddr) & ui32Mask) == ui32Value) + { + eError = PVRSRV_OK; + } + + /* Provide event timeout information to aid the Device Watchdog Thread... */ + if (eError == PVRSRV_OK) + { + psPVRSRVData->ui32GEOConsecutiveTimeouts = 0; + } + else if (eError == PVRSRV_ERROR_TIMEOUT) + { + psPVRSRVData->ui32GEOConsecutiveTimeouts++; + } + +EventObjectOpenError: + + return eError; + +#endif /* NO_HARDWARE */ +} + +int PVRSRVGetDriverStatus(void) +{ + return PVRSRVGetPVRSRVData()->eServicesState; +} + +/* + PVRSRVSystemHasCacheSnooping +*/ +IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + if ((psDevConfig->eCacheSnoopingMode != PVRSRV_DEVICE_SNOOP_NONE) && + (psDevConfig->eCacheSnoopingMode != PVRSRV_DEVICE_SNOOP_EMULATED)) + { + return IMG_TRUE; + } + return IMG_FALSE; +} + +IMG_BOOL PVRSRVSystemSnoopingIsEmulated(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + if (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_EMULATED) + { + return IMG_TRUE; + } + return IMG_FALSE; +} + +IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CPU_ONLY) || + (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS)) + { + return IMG_TRUE; + } + return IMG_FALSE; +} + +IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_DEVICE_ONLY) || + (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS)) + { + return IMG_TRUE; + } + return IMG_FALSE; +} + +IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + return psDevConfig->bHasNonMappableLocalMemory; +} + +/* + PVRSRVSystemWaitCycles +*/ +void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles) +{ + /* Delay in us */ + IMG_UINT32 ui32Delayus = 1; + + /* obtain the device freq */ + if (psDevConfig->pfnClockFreqGet != NULL) + { + IMG_UINT32 ui32DeviceFreq; + + ui32DeviceFreq = psDevConfig->pfnClockFreqGet(psDevConfig->hSysData); + + ui32Delayus = (ui32Cycles*1000000)/ui32DeviceFreq; + + if (ui32Delayus == 0) + { + ui32Delayus = 1; + } + } + + OSWaitus(ui32Delayus); +} + +static void * +PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, + va_list va) +{ + void *pvOSDevice = va_arg(va, void *); + + if (psDeviceNode->psDevConfig->pvOSDevice == pvOSDevice) + { + return psDeviceNode; + } + + return NULL; +} + +PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszName, + PFN_LISR pfnLISR, + void *pvData, + IMG_HANDLE *phLISRData) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode; + + psDeviceNode = + List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, + &PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb, + pvOSDevice); + if (!psDeviceNode) + { + /* Device can't be found in the list so it isn't in the system */ + PVR_DPF((PVR_DBG_ERROR, "%s: device %p with irq %d is not present", + __func__, pvOSDevice, ui32IRQ)); + return PVRSRV_ERROR_INVALID_DEVICE; + } + + return SysInstallDeviceLISR(psDeviceNode->psDevConfig->hSysData, ui32IRQ, + pszName, pfnLISR, pvData, phLISRData); +} + +PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData) +{ + return SysUninstallDeviceLISR(hLISRData); +} + +#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR) +/* functions only used on rogue, but header defining them is common */ +void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState) +{ + SysSetAxiProtOSid(ui32OSid, bState); +} + +void SetTrustedDeviceAceEnabled(void) +{ + SysSetTrustedDeviceAceEnabled(); +} +#endif + +#if defined(SUPPORT_RGX) +PVRSRV_ERROR PVRSRVCreateHWPerfHostThread(IMG_UINT32 ui32Timeout) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (!ui32Timeout) + return PVRSRV_ERROR_INVALID_PARAMS; + + OSLockAcquire(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); + + /* Create only once */ + if (gpsPVRSRVData->hHWPerfHostPeriodicThread == NULL) + { + /* Create the HWPerf event object */ + eError = OSEventObjectCreate("PVRSRV_HWPERFHOSTPERIODIC_EVENTOBJECT", &gpsPVRSRVData->hHWPerfHostPeriodicEvObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectCreate"); + + if (eError == PVRSRV_OK) + { + gpsPVRSRVData->bHWPerfHostThreadStop = IMG_FALSE; + gpsPVRSRVData->ui32HWPerfHostThreadTimeout = ui32Timeout; + /* Create a thread which is used to periodically emit host stream packets */ + eError = OSThreadCreate(&gpsPVRSRVData->hHWPerfHostPeriodicThread, + "pvr_hwperf_host", + HWPerfPeriodicHostEventsThread, + NULL, IMG_TRUE, gpsPVRSRVData); + PVR_LOG_IF_ERROR(eError, "OSThreadCreate"); + } + } + /* If the thread has already been created then just update the timeout and wake up thread */ + else + { + gpsPVRSRVData->ui32HWPerfHostThreadTimeout = ui32Timeout; + eError = OSEventObjectSignal(gpsPVRSRVData->hHWPerfHostPeriodicEvObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + + OSLockRelease(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); + return eError; +} + +PVRSRV_ERROR PVRSRVDestroyHWPerfHostThread(void) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + OSLockAcquire(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); + + /* Stop and cleanup the HWPerf periodic thread */ + if (gpsPVRSRVData->hHWPerfHostPeriodicThread) + { + if (gpsPVRSRVData->hHWPerfHostPeriodicEvObj) + { + gpsPVRSRVData->bHWPerfHostThreadStop = IMG_TRUE; + eError = OSEventObjectSignal(gpsPVRSRVData->hHWPerfHostPeriodicEvObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); + } + LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) + { + eError = OSThreadDestroy(gpsPVRSRVData->hHWPerfHostPeriodicThread); + if (PVRSRV_OK == eError) + { + gpsPVRSRVData->hHWPerfHostPeriodicThread = NULL; + break; + } + OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); + + if (gpsPVRSRVData->hHWPerfHostPeriodicEvObj) + { + eError = OSEventObjectDestroy(gpsPVRSRVData->hHWPerfHostPeriodicEvObj); + gpsPVRSRVData->hHWPerfHostPeriodicEvObj = NULL; + PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); + } + } + + OSLockRelease(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); + return eError; +} +#endif + +/* + * Scan the list of known devices until we find the specific instance or + * exhaust the list + */ +PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstance(IMG_UINT32 uiInstance) +{ + PVRSRV_DEVICE_NODE *psDevNode; + + if (uiInstance >= gpsPVRSRVData->ui32RegisteredDevices) + { + return NULL; + } + for (psDevNode = gpsPVRSRVData->psDeviceNodeList; + psDevNode != NULL; psDevNode = psDevNode->psNext) + { + if (uiInstance == psDevNode->sDevId.ui32InternalID) + { + return psDevNode; + } + } + + return NULL; +} + +/***************************************************************************** + End of file (pvrsrv.c) +*****************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv.h b/drivers/gpu/drm/phytium/octopus/pvrsrv.h new file mode 100644 index 000000000000..25730ee4580f --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv.h @@ -0,0 +1,551 @@ +/*************************************************************************/ /*! +@File +@Title PhytiumVR services server header file +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVRSRV_H +#define PVRSRV_H + +#include "connection_server.h" +#include "pvrsrv_pool.h" +#include "device.h" +#include "power.h" +#include "syscommon.h" +#include "sysinfo.h" +#include "physheap.h" +#include "cache_ops.h" +#include "pvr_notifier.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) +#define __pvrsrv_defined_struct_enum__ +#include +#endif + + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "virt_validation_defs.h" +#endif + +#include "dma_support.h" +#include "vz_vmm_pvz.h" + +/*! + * For OSThreadDestroy(), which may require a retry + * Try for 100 ms to destroy an OS thread before failing + */ +#define OS_THREAD_DESTROY_TIMEOUT_US 100000ULL +#define OS_THREAD_DESTROY_RETRY_COUNT 10 + +typedef enum _POLL_FLAGS_ +{ + POLL_FLAG_NONE = 0, /* No message or dump is printed on poll timeout */ + POLL_FLAG_LOG_ERROR = 1, /* Log error on poll timeout */ + POLL_FLAG_DEBUG_DUMP = 2 /* Print debug dump on poll timeout */ +} POLL_FLAGS; + +typedef struct _BUILD_INFO_ +{ + IMG_UINT32 ui32BuildOptions; + IMG_UINT32 ui32BuildVersion; + IMG_UINT32 ui32BuildRevision; + IMG_UINT32 ui32BuildType; +#define BUILD_TYPE_DEBUG 0 +#define BUILD_TYPE_RELEASE 1 + /* The above fields are self explanatory */ + /* B.V.N.C can be added later if required */ +} BUILD_INFO; + +typedef struct _DRIVER_INFO_ +{ + BUILD_INFO sUMBuildInfo; + BUILD_INFO sKMBuildInfo; + IMG_UINT8 ui8UMSupportedArch; + IMG_UINT8 ui8KMBitArch; + +#define BUILD_ARCH_64BIT (1 << 0) +#define BUILD_ARCH_32BIT (1 << 1) +#define BUILD_ARCH_BOTH (BUILD_ARCH_32BIT | BUILD_ARCH_64BIT) + IMG_BOOL bIsNoMatch; +}DRIVER_INFO; + +#if defined(SUPPORT_VALIDATION) && defined(__linux__) +typedef struct MEM_LEAK_INTERVALS_TAG +{ + IMG_UINT32 ui32OSAlloc; + IMG_UINT32 ui32GPU; + IMG_UINT32 ui32MMU; +} MEM_LEAK_INTERVALS; +#endif + +typedef struct PVRSRV_DATA_TAG +{ + PVRSRV_DRIVER_MODE eDriverMode; /*!< Driver mode (i.e. native, host or guest) */ + IMG_BOOL bForceApphintDriverMode; /*!< Indicate if driver mode is forced via apphint */ + DRIVER_INFO sDriverInfo; + IMG_UINT32 ui32RegisteredDevices; + IMG_UINT32 ui32DPFErrorCount; /*!< Number of Fatal/Error DPFs */ + + PVRSRV_DEVICE_NODE *psDeviceNodeList; /*!< List head of device nodes */ + PVRSRV_DEVICE_NODE *psHostMemDeviceNode; /*!< DeviceNode to be used for device independent + host based memory allocations where the DevMem + framework is to be used e.g. TL */ + PVRSRV_SERVICES_STATE eServicesState; /*!< global driver state */ + + HASH_TABLE *psProcessHandleBase_Table; /*!< Hash table with process handle bases */ + POS_LOCK hProcessHandleBase_Lock; /*!< Lock for the process handle base table */ + PVRSRV_HANDLE_BASE *psProcessHandleBaseBeingFreed; /*!< Pointer to process handle base currently being freed */ + + IMG_HANDLE hGlobalEventObject; /*!< OS Global Event Object */ + IMG_UINT32 ui32GEOConsecutiveTimeouts; /*!< OS Global Event Object Timeouts */ + + IMG_HANDLE hCleanupThread; /*!< Cleanup thread */ + IMG_HANDLE hCleanupEventObject; /*!< Event object to drive cleanup thread */ + POS_SPINLOCK hCleanupThreadWorkListLock; /*!< Lock protecting the cleanup thread work list */ + DLLIST_NODE sCleanupThreadWorkList; /*!< List of work for the cleanup thread */ + IMG_PID cleanupThreadPid; /*!< Cleanup thread process id */ + ATOMIC_T i32NumCleanupItems; /*!< Number of items in cleanup thread work list */ + + IMG_HANDLE hDevicesWatchdogThread; /*!< Devices watchdog thread */ + IMG_HANDLE hDevicesWatchdogEvObj; /*! Event object to drive devices watchdog thread */ + volatile IMG_UINT32 ui32DevicesWatchdogPwrTrans; /*! Number of off -> on power state transitions */ +#if !defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) + volatile IMG_UINT32 ui32DevicesWatchdogTimeout; /*! Timeout for the Devices watchdog Thread */ +#endif +#ifdef PVR_TESTING_UTILS + volatile IMG_UINT32 ui32DevicesWdWakeupCounter; /* Need this for the unit tests. */ +#endif + +#if defined(SUPPORT_AUTOVZ) + IMG_HANDLE hAutoVzWatchdogThread; /*!< Devices watchdog thread */ + IMG_HANDLE hAutoVzWatchdogEvObj; /*! Event object to drive devices watchdog thread */ +#endif + + POS_LOCK hHWPerfHostPeriodicThread_Lock; /*!< Lock for the HWPerf Host periodic thread */ + IMG_HANDLE hHWPerfHostPeriodicThread; /*!< HWPerf Host periodic thread */ + IMG_HANDLE hHWPerfHostPeriodicEvObj; /*! Event object to drive HWPerf thread */ + volatile IMG_BOOL bHWPerfHostThreadStop; + IMG_UINT32 ui32HWPerfHostThreadTimeout; + + IMG_HANDLE hPvzConnection; /*!< PVZ connection used for cross-VM hyper-calls */ + POS_LOCK hPvzConnectionLock; /*!< Lock protecting PVZ connection */ + IMG_BOOL abVmOnline[RGX_NUM_OS_SUPPORTED]; + + IMG_BOOL bUnload; /*!< Driver unload is in progress */ + + IMG_HANDLE hTLCtrlStream; /*! Control plane for TL streams */ + + IMG_HANDLE hDriverThreadEventObject; /*! Event object relating to multi-threading in the Server */ + IMG_BOOL bDriverSuspended; /*! if TRUE, the driver is suspended and new threads should not enter */ + ATOMIC_T iNumActiveDriverThreads; /*! Number of threads active in the Server */ + + PMR *psInfoPagePMR; /*! Handle to exportable PMR of the information page. */ + IMG_UINT32 *pui32InfoPage; /*! CPU memory mapping for information page. */ + DEVMEM_MEMDESC *psInfoPageMemDesc; /*! Memory descriptor of the information page. */ + POS_LOCK hInfoPageLock; /*! Lock guarding access to information page. */ + +#if defined(SUPPORT_VALIDATION) && defined(__linux__) + MEM_LEAK_INTERVALS sMemLeakIntervals; /*!< How often certain memory leak types will trigger */ +#endif +} PVRSRV_DATA; + + +/*! +****************************************************************************** + @Function PVRSRVGetPVRSRVData + + @Description Get a pointer to the global data + + @Return PVRSRV_DATA * +******************************************************************************/ +PVRSRV_DATA *PVRSRVGetPVRSRVData(void); + +#define PVRSRV_KM_ERRORS (PVRSRVGetPVRSRVData()->ui32DPFErrorCount) +#define PVRSRV_ERROR_LIMIT_REACHED (PVRSRV_KM_ERRORS == IMG_UINT32_MAX) +#define PVRSRV_REPORT_ERROR() do { if (!(PVRSRV_ERROR_LIMIT_REACHED)) { PVRSRVGetPVRSRVData()->ui32DPFErrorCount++; } } while (0) + +#define PVRSRV_VZ_MODE_IS(_expr) (DRIVER_MODE_##_expr == PVRSRVGetPVRSRVData()->eDriverMode) +#define PVRSRV_VZ_RETN_IF_MODE(_expr) do { if ( PVRSRV_VZ_MODE_IS(_expr)) { return; } } while (0) +#define PVRSRV_VZ_RETN_IF_NOT_MODE(_expr) do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return; } } while (0) +#define PVRSRV_VZ_RET_IF_MODE(_expr, _rc) do { if ( PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while (0) +#define PVRSRV_VZ_RET_IF_NOT_MODE(_expr, _rc) do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while (0) + +/*! +****************************************************************************** +@Note The driver execution mode AppHint (i.e. PVRSRV_APPHINT_DRIVERMODE) + can be an override or non-override 32-bit value. An override value + has the MSB bit set & a non-override value has this MSB bit cleared. + Excluding this MSB bit & interpreting the remaining 31-bit as a + signed 31-bit integer, the mode values are: + [-1 native : 0 host : +1 guest ]. +******************************************************************************/ +#define PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(_expr) ((IMG_UINT32)(_expr)&(IMG_UINT32)(1<<31)) +#define PVRSRV_VZ_APPHINT_MODE(_expr) \ + ((((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) == (IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_NATIVE : \ + !((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_HOST : \ + ((IMG_UINT32)((IMG_UINT32)(_expr)&(IMG_UINT)0x7FFFFFFF)==(IMG_UINT32)0x1) ? DRIVER_MODE_GUEST : \ + ((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF)) + +/*! +****************************************************************************** + + @Function LMA memory management API + +******************************************************************************/ +#if defined(SUPPORT_GPUVIRT_VALIDATION) +PVRSRV_ERROR LMA_PhyContigPagesAllocGPV(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize, + PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, + IMG_UINT32 ui32OSid, IMG_PID uiPid); +#endif +PVRSRV_ERROR LMA_PhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize, + PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, + IMG_PID uiPid); + +void LMA_PhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle); + +PVRSRV_ERROR LMA_PhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, + size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, + void **pvPtr); + +void LMA_PhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, + void *pvPtr); + +PVRSRV_ERROR LMA_PhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode, + PG_HANDLE *psMemHandle, + IMG_UINT32 uiOffset, + IMG_UINT32 uiLength); + +typedef struct _PHYS_HEAP_ITERATOR_ PHYS_HEAP_ITERATOR; + +/*! +****************************************************************************** + @Function LMA_HeapIteratorCreate + + @Description + Creates iterator for traversing physical heap requested by ui32Flags. The + iterator will go through all of the segments (a segment is physically + contiguous) of the physical heap and return their CPU physical address and + size. + + @Input psDevNode: Pointer to device node struct. + @Input ui32Flags: Find heap that matches flags. + @Output ppsIter: Pointer to the iterator object. + + @Return PVRSRV_OK upon success and PVRSRV_ERROR otherwise. +******************************************************************************/ +PVRSRV_ERROR LMA_HeapIteratorCreate(PVRSRV_DEVICE_NODE *psDevNode, + PHYS_HEAP_USAGE_FLAGS ui32Flags, + PHYS_HEAP_ITERATOR **ppsIter); + +/*! +****************************************************************************** + @Function LMA_HeapIteratorDestroy + + @Description + Frees the iterator object created with LMA_HeapIteratorCreate. + + @Input psIter: Pointer to the iterator object. +******************************************************************************/ +void LMA_HeapIteratorDestroy(PHYS_HEAP_ITERATOR *psIter); + +/*! +****************************************************************************** + @Function LMA_HeapIteratorReset + + @Description + Resets the iterator the first segment of the physical heap. + + @Input psIter: Pointer to the iterator object. +******************************************************************************/ +PVRSRV_ERROR LMA_HeapIteratorReset(PHYS_HEAP_ITERATOR *psIter); + +/*! +****************************************************************************** + @Function LMA_HeapIteratorNext + + @Description + Retrieves current segment's physical device address and size and moves the + iterator to the next element (if exists). If the iterator reached an end of + the heap and no segment was retrieved, this function returns IMG_FALSE. + + @Input psIter: Pointer to the iterator object. + @Output psDevPAddr: Device physical address of the current segment. + @Output puiSize: Size of the current segment. + + @Return IMG TRUE if a segment was found and retrieved, IMG_FALSE otherwise. +******************************************************************************/ +IMG_BOOL LMA_HeapIteratorNext(PHYS_HEAP_ITERATOR *psIter, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_UINT64 *puiSize); + +/*! +****************************************************************************** + @Function LMA_HeapIteratorGetHeapStats + + @Description + Retrieves phys heap's usage statistics. + + @Input psPhysHeap: Pointer to the physical heap object. + @Output puiTotalSize: Total size of the physical heap. + @Output puiInUseSize: Used space in the physical heap. + + @Return PVRSRV_OK upon success and PVRSRV_otherwise. +******************************************************************************/ +PVRSRV_ERROR LMA_HeapIteratorGetHeapStats(PHYS_HEAP_ITERATOR *psIter, + IMG_UINT64 *puiTotalSize, + IMG_UINT64 *puiInUseSize); + +/*! +****************************************************************************** + @Function PVRSRVPollForValueKM + + @Description + Polls for a value to match a masked read + + @Input psDevNode : Pointer to device node struct + @Input pui32LinMemAddr : CPU linear address to poll + @Input ui32Value : required value + @Input ui32Mask : Mask + @Input bDebugDumpOnFailure : Whether poll failure should result into a debug + dump. CAUTION: When calling this function from code paths which are + also used by debug-dumping code, this argument MUST be IMG_FALSE + otherwise, we might end up requesting debug-dump in recursion and + eventually blow-up call stack. + + @Return PVRSRV_ERROR : +******************************************************************************/ +PVRSRV_ERROR PVRSRVPollForValueKM(PVRSRV_DEVICE_NODE *psDevNode, + volatile IMG_UINT32 __iomem *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + POLL_FLAGS ePollFlags); + +/*! +****************************************************************************** + @Function PVRSRVWaitForValueKM + + @Description + Waits (using EventObjects) for a value to match a masked read + + @Input pui32LinMemAddr : CPU linear address to poll + @Input ui32Value : Required value + @Input ui32Mask : Mask to be applied before checking against + ui32Value + @Return PVRSRV_ERROR : +******************************************************************************/ +PVRSRV_ERROR +PVRSRVWaitForValueKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask); + +/*! +****************************************************************************** + @Function : PVRSRVSystemHasCacheSnooping + + @Description : Returns whether the system has cache snooping + + @Return : IMG_TRUE if the system has cache snooping +******************************************************************************/ +IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig); + +/*! +****************************************************************************** + @Function : PVRSRVSystemSnoopingIsEmulated + + @Description : Returns whether system cache snooping support is emulated + + @Return : IMG_TRUE if the system cache snooping is emulated in software +******************************************************************************/ +IMG_BOOL PVRSRVSystemSnoopingIsEmulated(PVRSRV_DEVICE_CONFIG *psDevConfig); + +/*! +****************************************************************************** + @Function : PVRSRVSystemSnoopingOfCPUCache + + @Description : Returns whether the system supports snooping of the CPU cache + + @Return : IMG_TRUE if the system has CPU cache snooping +******************************************************************************/ +IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig); + +/*! +****************************************************************************** + @Function : PVRSRVSystemSnoopingOfDeviceCache + + @Description : Returns whether the system supports snooping of the device cache + + @Return : IMG_TRUE if the system has device cache snooping +******************************************************************************/ +IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig); + +/*! +****************************************************************************** + @Function : PVRSRVSystemHasNonMappableLocalMemory + + @Description : Returns whether the device has non-mappable part of local memory + + @Return : IMG_TRUE if the device has non-mappable part of local memory +******************************************************************************/ +IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig); + +/*! +****************************************************************************** + @Function : PVRSRVSystemWaitCycles + + @Description : Waits for at least ui32Cycles of the Device clk. +******************************************************************************/ +void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles); + +PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszName, + PFN_LISR pfnLISR, + void *pvData, + IMG_HANDLE *phLISRData); + +PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData); + +int PVRSRVGetDriverStatus(void); + +/*! +****************************************************************************** + @Function : PVRSRVIsBridgeEnabled + + @Description : Returns whether the given bridge group is enabled + + @Return : IMG_TRUE if the given bridge group is enabled +******************************************************************************/ +static inline IMG_BOOL PVRSRVIsBridgeEnabled(IMG_HANDLE hServices, IMG_UINT32 ui32BridgeGroup) +{ + IMG_UINT32 ui32Bridges; + IMG_UINT32 ui32Offset; + + PVR_UNREFERENCED_PARAMETER(hServices); + +#if defined(SUPPORT_RGX) + if (ui32BridgeGroup >= PVRSRV_BRIDGE_RGX_FIRST) + { + ui32Bridges = gui32RGXBridges; + ui32Offset = PVRSRV_BRIDGE_RGX_FIRST; + } + else +#endif /* SUPPORT_RGX */ + { + ui32Bridges = gui32PVRBridges; + ui32Offset = PVRSRV_BRIDGE_FIRST; + } + + return ((1U << (ui32BridgeGroup - ui32Offset)) & ui32Bridges) != 0; +} + + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#if defined(EMULATOR) + void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState); + void SetTrustedDeviceAceEnabled(void); +#endif +#endif + +/*! +****************************************************************************** + @Function : PVRSRVCreateHWPerfHostThread + + @Description : Creates HWPerf event object and thread unless already created + + @Input ui32Timeout : Initial timeout (ms) between updates on the HWPerf thread + + @Return : PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code +******************************************************************************/ +PVRSRV_ERROR PVRSRVCreateHWPerfHostThread(IMG_UINT32 ui32Timeout); + +/*! +****************************************************************************** + @Function : PVRSRVDestroyHWPerfHostThread + + @Description : Destroys HWPerf event object and thread if created + + @Return : PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code +******************************************************************************/ +PVRSRV_ERROR PVRSRVDestroyHWPerfHostThread(void); + +/*! +****************************************************************************** + @Function : PVRSRVPhysMemHeapsInit + + @Description : Registers and acquires physical memory heaps + + @Return : PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code +******************************************************************************/ +PVRSRV_ERROR PVRSRVPhysMemHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig); + +/*! +****************************************************************************** + @Function : PVRSRVPhysMemHeapsDeinit + + @Description : Releases and unregisters physical memory heaps + + @Return : PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ + error code +******************************************************************************/ +void PVRSRVPhysMemHeapsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode); + +/*************************************************************************/ /*! +@Function FindPhysHeapConfig +@Description Find Phys Heap Config from Device Config. +@Input psDevConfig Pointer to device config. +@Input ui32Flags Find heap that matches flags. +@Return PHYS_HEAP_CONFIG* Return a config, or NULL if not found. +*/ /**************************************************************************/ +PHYS_HEAP_CONFIG* FindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig, + PHYS_HEAP_USAGE_FLAGS ui32Flags); + +/*************************************************************************/ /*! +@Function PVRSRVGetDeviceInstance +@Description Return the specified device instance from Device node list. +@Input ui32Instance Device instance to find +@Return PVRSRV_DEVICE_NODE* Return a device node, or NULL if not found. +*/ /**************************************************************************/ +PVRSRV_DEVICE_NODE* PVRSRVGetDeviceInstance(IMG_UINT32 ui32Instance); +#endif /* PVRSRV_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_apphint.h b/drivers/gpu/drm/phytium/octopus/pvrsrv_apphint.h new file mode 100644 index 000000000000..b385cb930a6a --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_apphint.h @@ -0,0 +1,71 @@ +/**************************************************************************/ /*! +@File +@Title PhytiumVR AppHint generic interface +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#if !defined(PVRSRV_APPHINT_H) +#define PVRSRV_APPHINT_H + +/* Supplied to PVRSRVAppHintRegisterHandlers*() functions when the apphint + * is a global driver apphint, i.e. apphints not present in + * APPHINT_DEBUGFS_DEVICE_ID, i.e. not per device. + */ +#define APPHINT_OF_DRIVER_NO_DEVICE ((void*)-1U) + +#if defined(__linux__) + +#include "km_apphint.h" +#define PVRSRVAppHintDumpState() pvr_apphint_dump_state() +#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p) pvr_apphint_register_handlers_uint64(i,q,s,d,p) +#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p) pvr_apphint_register_handlers_uint32(i,q,s,d,p) +#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p) pvr_apphint_register_handlers_bool(i,q,s,d,p) +#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p) pvr_apphint_register_handlers_string(i,q,s,d,p) + +#else + +#define PVRSRVAppHintDumpState() +#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p) +#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p) +#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p) +#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p) + +#endif + +#endif /* PVRSRV_APPHINT_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_bridge_init.c b/drivers/gpu/drm/phytium/octopus/pvrsrv_bridge_init.c new file mode 100644 index 000000000000..fb2f534a4329 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_bridge_init.c @@ -0,0 +1,511 @@ +/*************************************************************************/ /*! +@File +@Title PVR Common Bridge Init/Deinit Module (kernel side) +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements common PVR Bridge init/deinit code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvrsrv_bridge_init.h" +#include "srvcore.h" + +/* These will go when full bridge gen comes in */ +#if defined(PDUMP) +PVRSRV_ERROR InitPDUMPCTRLBridge(void); +PVRSRV_ERROR DeinitPDUMPCTRLBridge(void); +PVRSRV_ERROR InitPDUMPBridge(void); +PVRSRV_ERROR DeinitPDUMPBridge(void); +PVRSRV_ERROR InitRGXPDUMPBridge(void); +PVRSRV_ERROR DeinitRGXPDUMPBridge(void); +#endif +#if defined(SUPPORT_DISPLAY_CLASS) +PVRSRV_ERROR InitDCBridge(void); +PVRSRV_ERROR DeinitDCBridge(void); +#endif +PVRSRV_ERROR InitMMBridge(void); +PVRSRV_ERROR DeinitMMBridge(void); +#if !defined(EXCLUDE_CMM_BRIDGE) +PVRSRV_ERROR InitCMMBridge(void); +PVRSRV_ERROR DeinitCMMBridge(void); +#endif +PVRSRV_ERROR InitPDUMPMMBridge(void); +PVRSRV_ERROR DeinitPDUMPMMBridge(void); +PVRSRV_ERROR InitSRVCOREBridge(void); +PVRSRV_ERROR DeinitSRVCOREBridge(void); +PVRSRV_ERROR InitSYNCBridge(void); +PVRSRV_ERROR DeinitSYNCBridge(void); +#if defined(SUPPORT_DMA_TRANSFER) +PVRSRV_ERROR InitDMABridge(void); +PVRSRV_ERROR DeinitDMABridge(void); +#endif + +#if defined(SUPPORT_RGX) +PVRSRV_ERROR InitRGXTA3DBridge(void); +PVRSRV_ERROR DeinitRGXTA3DBridge(void); +#if defined(SUPPORT_RGXTQ_BRIDGE) +PVRSRV_ERROR InitRGXTQBridge(void); +PVRSRV_ERROR DeinitRGXTQBridge(void); +#endif /* defined(SUPPORT_RGXTQ_BRIDGE) */ +PVRSRV_ERROR InitRGXTQ2Bridge(void); +PVRSRV_ERROR DeinitRGXTQ2Bridge(void); +PVRSRV_ERROR InitRGXCMPBridge(void); +PVRSRV_ERROR DeinitRGXCMPBridge(void); +#if defined(SUPPORT_USC_BREAKPOINT) +PVRSRV_ERROR InitRGXBREAKPOINTBridge(void); +PVRSRV_ERROR DeinitRGXBREAKPOINTBridge(void); +#endif +PVRSRV_ERROR InitRGXFWDBGBridge(void); +PVRSRV_ERROR DeinitRGXFWDBGBridge(void); +PVRSRV_ERROR InitRGXHWPERFBridge(void); +PVRSRV_ERROR DeinitRGXHWPERFBridge(void); +#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) +PVRSRV_ERROR InitRGXREGCONFIGBridge(void); +PVRSRV_ERROR DeinitRGXREGCONFIGBridge(void); +#endif +PVRSRV_ERROR InitRGXKICKSYNCBridge(void); +PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void); +PVRSRV_ERROR InitRGXSIGNALSBridge(void); +PVRSRV_ERROR DeinitRGXSIGNALSBridge(void); +#endif /* SUPPORT_RGX */ +PVRSRV_ERROR InitCACHEBridge(void); +PVRSRV_ERROR DeinitCACHEBridge(void); +#if defined(SUPPORT_SECURE_EXPORT) +PVRSRV_ERROR InitSMMBridge(void); +PVRSRV_ERROR DeinitSMMBridge(void); +#endif +#if !defined(EXCLUDE_HTBUFFER_BRIDGE) +PVRSRV_ERROR InitHTBUFFERBridge(void); +PVRSRV_ERROR DeinitHTBUFFERBridge(void); +#endif +PVRSRV_ERROR InitPVRTLBridge(void); +PVRSRV_ERROR DeinitPVRTLBridge(void); +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) +PVRSRV_ERROR InitRIBridge(void); +PVRSRV_ERROR DeinitRIBridge(void); +#endif +PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void); +PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void); +#if defined(SUPPORT_VALIDATION_BRIDGE) +PVRSRV_ERROR InitVALIDATIONBridge(void); +PVRSRV_ERROR DeinitVALIDATIONBridge(void); +#endif +#if defined(PVR_TESTING_UTILS) +PVRSRV_ERROR InitTUTILSBridge(void); +PVRSRV_ERROR DeinitTUTILSBridge(void); +#endif +PVRSRV_ERROR InitSYNCTRACKINGBridge(void); +PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void); +#if defined(SUPPORT_WRAP_EXTMEM) +PVRSRV_ERROR InitMMEXTMEMBridge(void); +PVRSRV_ERROR DeinitMMEXTMEMBridge(void); +#endif +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) +PVRSRV_ERROR InitSYNCFALLBACKBridge(void); +PVRSRV_ERROR DeinitSYNCFALLBACKBridge(void); +#endif +PVRSRV_ERROR InitRGXTIMERQUERYBridge(void); +PVRSRV_ERROR DeinitRGXTIMERQUERYBridge(void); +#if defined(SUPPORT_DI_BRG_IMPL) +PVRSRV_ERROR InitDIBridge(void); +PVRSRV_ERROR DeinitDIBridge(void); +#endif + +PVRSRV_ERROR InitRGXRAYBridge(void); +PVRSRV_ERROR DeinitRGXRAYBridge(void); + +PVRSRV_ERROR +ServerBridgeInit(void) +{ + PVRSRV_ERROR eError; + + BridgeDispatchTableStartOffsetsInit(); + + eError = InitSRVCOREBridge(); + PVR_LOG_IF_ERROR(eError, "InitSRVCOREBridge"); + + eError = InitSYNCBridge(); + PVR_LOG_IF_ERROR(eError, "InitSYNCBridge"); + +#if defined(PDUMP) + eError = InitPDUMPCTRLBridge(); + PVR_LOG_IF_ERROR(eError, "InitPDUMPCTRLBridge"); +#endif + + eError = InitMMBridge(); + PVR_LOG_IF_ERROR(eError, "InitMMBridge"); + +#if !defined(EXCLUDE_CMM_BRIDGE) + eError = InitCMMBridge(); + PVR_LOG_IF_ERROR(eError, "InitCMMBridge"); +#endif + +#if defined(PDUMP) + eError = InitPDUMPMMBridge(); + PVR_LOG_IF_ERROR(eError, "InitPDUMPMMBridge"); + + eError = InitPDUMPBridge(); + PVR_LOG_IF_ERROR(eError, "InitPDUMPBridge"); +#endif + +#if defined(SUPPORT_DISPLAY_CLASS) + eError = InitDCBridge(); + PVR_LOG_IF_ERROR(eError, "InitDCBridge"); +#endif + + eError = InitCACHEBridge(); + PVR_LOG_IF_ERROR(eError, "InitCACHEBridge"); + +#if defined(SUPPORT_SECURE_EXPORT) + eError = InitSMMBridge(); + PVR_LOG_IF_ERROR(eError, "InitSMMBridge"); +#endif + +#if !defined(EXCLUDE_HTBUFFER_BRIDGE) + eError = InitHTBUFFERBridge(); + PVR_LOG_IF_ERROR(eError, "InitHTBUFFERBridge"); +#endif + + eError = InitPVRTLBridge(); + PVR_LOG_IF_ERROR(eError, "InitPVRTLBridge"); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + eError = InitRIBridge(); + PVR_LOG_IF_ERROR(eError, "InitRIBridge"); +#endif + +#if defined(SUPPORT_VALIDATION_BRIDGE) + eError = InitVALIDATIONBridge(); + PVR_LOG_IF_ERROR(eError, "InitVALIDATIONBridge"); +#endif + +#if defined(PVR_TESTING_UTILS) + eError = InitTUTILSBridge(); + PVR_LOG_IF_ERROR(eError, "InitTUTILSBridge"); +#endif + + eError = InitDEVICEMEMHISTORYBridge(); + PVR_LOG_IF_ERROR(eError, "InitDEVICEMEMHISTORYBridge"); + + eError = InitSYNCTRACKINGBridge(); + PVR_LOG_IF_ERROR(eError, "InitSYNCTRACKINGBridge"); + +#if defined(SUPPORT_DMA_TRANSFER) + eError = InitDMABridge(); + PVR_LOG_IF_ERROR(eError, "InitDMABridge"); +#endif + +#if defined(SUPPORT_RGX) + +#if defined(SUPPORT_RGXTQ_BRIDGE) + eError = InitRGXTQBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXTQBridge"); +#endif /* defined(SUPPORT_RGXTQ_BRIDGE) */ + + eError = InitRGXTA3DBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXTA3DBridge"); + + #if defined(SUPPORT_USC_BREAKPOINT) + eError = InitRGXBREAKPOINTBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXBREAKPOINTBridge"); +#endif + + eError = InitRGXFWDBGBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXFWDBGBridge"); + +#if defined(PDUMP) + eError = InitRGXPDUMPBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXPDUMPBridge"); +#endif + + eError = InitRGXHWPERFBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXHWPERFBridge"); + +#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) + eError = InitRGXREGCONFIGBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXREGCONFIGBridge"); +#endif + + eError = InitRGXKICKSYNCBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXKICKSYNCBridge"); + + eError = InitRGXTIMERQUERYBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXTIMERQUERYBridge"); + +#endif /* SUPPORT_RGX */ + +#if defined(SUPPORT_WRAP_EXTMEM) + eError = InitMMEXTMEMBridge(); + PVR_LOG_IF_ERROR(eError, "InitMMEXTMEMBridge"); +#endif + +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) + eError = InitSYNCFALLBACKBridge(); + PVR_LOG_IF_ERROR(eError, "InitSYNCFALLBACKBridge"); +#endif + +#if defined(SUPPORT_DI_BRG_IMPL) + eError = InitDIBridge(); + PVR_LOG_IF_ERROR(eError, "InitDIBridge"); +#endif + + eError = OSPlatformBridgeInit(); + PVR_LOG_IF_ERROR(eError, "OSPlatformBridgeInit"); + + return eError; +} + +PVRSRV_ERROR +ServerBridgeDeInit(void) +{ + PVRSRV_ERROR eError; + + eError = OSPlatformBridgeDeInit(); + PVR_LOG_IF_ERROR(eError, "OSPlatformBridgeDeInit"); + +#if defined(SUPPORT_DI_BRG_IMPL) + eError = DeinitDIBridge(); + PVR_LOG_IF_ERROR(eError, "DeinitDIBridge"); +#endif + +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) + eError = DeinitSYNCFALLBACKBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitSYNCFALLBACKBridge"); +#endif + +#if defined(SUPPORT_WRAP_EXTMEM) + eError = DeinitMMEXTMEMBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitMMEXTMEMBridge"); +#endif + + eError = DeinitSRVCOREBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitSRVCOREBridge"); + + eError = DeinitSYNCBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitSYNCBridge"); + +#if defined(PDUMP) + eError = DeinitPDUMPCTRLBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitPDUMPCTRLBridge"); +#endif + + eError = DeinitMMBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitMMBridge"); + +#if !defined(EXCLUDE_CMM_BRIDGE) + eError = DeinitCMMBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitCMMBridge"); +#endif + +#if defined(PDUMP) + eError = DeinitPDUMPMMBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitPDUMPMMBridge"); + + eError = DeinitPDUMPBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitPDUMPBridge"); +#endif + +#if defined(PVR_TESTING_UTILS) + eError = DeinitTUTILSBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitTUTILSBridge"); +#endif + +#if defined(SUPPORT_DISPLAY_CLASS) + eError = DeinitDCBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitDCBridge"); +#endif + + eError = DeinitCACHEBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitCACHEBridge"); + +#if defined(SUPPORT_SECURE_EXPORT) + eError = DeinitSMMBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitSMMBridge"); +#endif + +#if !defined(EXCLUDE_HTBUFFER_BRIDGE) + eError = DeinitHTBUFFERBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitHTBUFFERBridge"); +#endif + + eError = DeinitPVRTLBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitPVRTLBridge"); + +#if defined(SUPPORT_VALIDATION_BRIDGE) + eError = DeinitVALIDATIONBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitVALIDATIONBridge"); +#endif + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + eError = DeinitRIBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRIBridge"); +#endif + + eError = DeinitDEVICEMEMHISTORYBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitDEVICEMEMHISTORYBridge"); + + eError = DeinitSYNCTRACKINGBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitSYNCTRACKINGBridge"); + +#if defined(SUPPORT_DMA_TRANSFER) + eError = DeinitDMABridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitDMABridge"); +#endif + +#if defined(SUPPORT_RGX) + +#if defined(SUPPORT_RGXTQ_BRIDGE) + eError = DeinitRGXTQBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXTQBridge"); +#endif /* defined(SUPPORT_RGXTQ_BRIDGE) */ + + eError = DeinitRGXTA3DBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXTA3DBridge"); + +#if defined(SUPPORT_USC_BREAKPOINT) + eError = DeinitRGXBREAKPOINTBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXBREAKPOINTBridge"); +#endif + + eError = DeinitRGXFWDBGBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXFWDBGBridge"); + +#if defined(PDUMP) + eError = DeinitRGXPDUMPBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXPDUMPBridge"); +#endif + + eError = DeinitRGXHWPERFBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXHWPERFBridge"); + +#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) + eError = DeinitRGXREGCONFIGBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXREGCONFIGBridge"); +#endif + + eError = DeinitRGXKICKSYNCBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXKICKSYNCBridge"); + + eError = DeinitRGXTIMERQUERYBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXTIMERQUERYBridge"); + +#endif /* SUPPORT_RGX */ + + return eError; +} + +#if defined(SUPPORT_RGX) +PVRSRV_ERROR +DeviceDepBridgeInit(IMG_UINT64 ui64Features) +{ + PVRSRV_ERROR eError; + +#if defined(RGX_FEATURE_COMPUTE_BIT_MASK) + if (ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK) +#endif + { + eError = InitRGXCMPBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXCMPBridge"); + } + + if (ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK) + { + eError = InitRGXSIGNALSBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXCMPBridge"); + } + +#if defined(RGX_FEATURE_FASTRENDER_DM_BIT_MASK) + if (ui64Features & RGX_FEATURE_FASTRENDER_DM_BIT_MASK) +#endif + { + eError = InitRGXTQ2Bridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXTQ2Bridge"); + } + +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH)) +#endif + { +#if defined(SUPPORT_RGXRAY_BRIDGE) + eError = InitRGXRAYBridge(); + PVR_LOG_IF_ERROR(eError, "InitRGXRAYBridge"); +#endif + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR +DeviceDepBridgeDeInit(IMG_UINT64 ui64Features) +{ + PVRSRV_ERROR eError; + +#if defined(RGX_FEATURE_COMPUTE_BIT_MASK) + if (ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK) +#endif + { + eError = DeinitRGXCMPBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXCMPBridge"); + } + + if (ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK) + { + eError = DeinitRGXSIGNALSBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXSIGNALSBridge"); + } + +#if defined(RGX_FEATURE_COMPUTE_BIT_MASK) + if (ui64Features & RGX_FEATURE_FASTRENDER_DM_BIT_MASK) +#endif + { + eError = DeinitRGXTQ2Bridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXTQ2Bridge"); + } + +#if defined(RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH)) +#endif + { +#if defined(SUPPORT_RGXRAY_BRIDGE) + eError = DeinitRGXRAYBridge(); + PVR_LOG_RETURN_IF_ERROR(eError, "DeinitRGXRAYBridge"); +#endif + } + + return PVRSRV_OK; +} +#endif /* SUPPORT_RGX */ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_bridge_init.h b/drivers/gpu/drm/phytium/octopus/pvrsrv_bridge_init.h new file mode 100644 index 000000000000..29069228eaed --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_bridge_init.h @@ -0,0 +1,57 @@ +/**************************************************************************/ /*! +@File +@Title PVR Common Bridge Init/Deinit Module (kernel side) +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the common PVR Bridge init/deinit code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef PVRSRV_BRIDGE_INIT_H +#define PVRSRV_BRIDGE_INIT_H + +#include "img_types.h" +#include "pvrsrv_error.h" + +PVRSRV_ERROR ServerBridgeInit(void); +PVRSRV_ERROR DeviceDepBridgeInit(IMG_UINT64 ui64Features); + +PVRSRV_ERROR ServerBridgeDeInit(void); +PVRSRV_ERROR DeviceDepBridgeDeInit(IMG_UINT64 ui64Features); + + +#endif /* PVRSRV_BRIDGE_INIT_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_cleanup.h b/drivers/gpu/drm/phytium/octopus/pvrsrv_cleanup.h new file mode 100644 index 000000000000..1390c701938d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_cleanup.h @@ -0,0 +1,159 @@ +/**************************************************************************/ /*! +@File +@Title PhytiumVR SrvKM cleanup thread deferred work interface +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef PVRSRV_CLEANUP_H +#define PVRSRV_CLEANUP_H + +#include "dllist.h" + +/**************************************************************************/ /*! +@Brief CLEANUP_THREAD_FN + +@Description This is the function prototype for the pfnFree member found in + the structure PVRSRV_CLEANUP_THREAD_WORK. The function is + responsible for carrying out the clean up work and if successful + freeing the memory originally supplied to the call + PVRSRVCleanupThreadAddWork(). + +@Input pvParam This is private data originally supplied by the caller + to PVRSRVCleanupThreadAddWork() when registering the + clean up work item, psDAta->pvData. Itr can be cast + to a relevant type within the using module. + +@Return PVRSRV_OK if the cleanup operation was successful and the + callback has freed the PVRSRV_CLEANUP_THREAD_WORK* work item + memory original supplied to PVRSRVCleanupThreadAddWork() + Any other error code will lead to the work item + being re-queued and hence the original + PVRSRV_CLEANUP_THREAD_WORK* must not be freed. +*/ /***************************************************************************/ + +typedef PVRSRV_ERROR (*CLEANUP_THREAD_FN)(void *pvParam); + + +/* Typical number of times a caller should want the work to be retried in case + * of the callback function (pfnFree) returning an error. + * Callers to PVRSRVCleanupThreadAddWork should provide this value as the retry + * count (ui32RetryCount) unless there are special requirements. + * A value of 200 corresponds to around ~20s (200 * 100ms). If it is not + * successful by then give up as an unrecoverable problem has occurred. + */ +#define CLEANUP_THREAD_RETRY_COUNT_DEFAULT 200u +/* Like for CLEANUP_THREAD_RETRY_COUNT_DEFAULT but call will wait for + * a specified amount of time rather than number of retries. + */ +#define CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT 20000u /* 20s */ + +/* Use to set retry count on a cleanup item. + * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK + * _count - retry count + */ +#define CLEANUP_THREAD_SET_RETRY_COUNT(_item,_count) \ + do { \ + (_item)->ui32RetryCount = (_count); \ + (_item)->ui32TimeStart = 0; \ + (_item)->ui32TimeEnd = 0; \ + } while (0) + +/* Use to set timeout deadline on a cleanup item. + * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK + * _timeout - timeout in milliseconds, if 0 + * CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT is used + */ +#define CLEANUP_THREAD_SET_RETRY_TIMEOUT(_item,_timeout) \ + do { \ + (_item)->ui32RetryCount = 0; \ + (_item)->ui32TimeStart = OSClockms(); \ + (_item)->ui32TimeEnd = (_item)->ui32TimeStart + ((_timeout) > 0 ? \ + (_timeout) : CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT); \ + } while (0) + +/* Indicates if the timeout on a given item has been reached. + * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK + */ +#define CLEANUP_THREAD_RETRY_TIMEOUT_REACHED(_item) \ + ((_item)->ui32TimeEnd - (_item)->ui32TimeStart >= \ + OSClockms() - (_item)->ui32TimeStart) + +/* Indicates if the current item is waiting on timeout or retry count. + * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK + * */ +#define CLEANUP_THREAD_IS_RETRY_TIMEOUT(_item) \ + ((_item)->ui32TimeStart != (_item->ui32TimeEnd)) + +/* Clean up work item specifics so that the task can be managed by the + * pvr_defer_free cleanup thread in the Server. + */ +typedef struct _PVRSRV_CLEANUP_THREAD_WORK_ +{ + DLLIST_NODE sNode; /*!< List node used internally by the cleanup + thread */ + CLEANUP_THREAD_FN pfnFree; /*!< Pointer to the function to be called to + carry out the deferred cleanup */ + void *pvData; /*!< private data for pfnFree, usually a way back + to the original PVRSRV_CLEANUP_THREAD_WORK* + pointer supplied in the call to + PVRSRVCleanupThreadAddWork(). */ + IMG_UINT32 ui32TimeStart; /*!< Timestamp in ms of the moment when + cleanup item has been created. */ + IMG_UINT32 ui32TimeEnd; /*!< Time in ms after which no further retry + attempts will be made, item discard and + error logged when this is reached. */ + IMG_UINT32 ui32RetryCount; /*!< Number of times the callback should be + re-tried when it returns error. */ + IMG_BOOL bDependsOnHW; /*!< Retry again after the RGX interrupt signals + the global event object */ +} PVRSRV_CLEANUP_THREAD_WORK; + + +/**************************************************************************/ /*! +@Function PVRSRVCleanupThreadAddWork + +@Description Add a work item to be called from the cleanup thread + +@Input psData : The function pointer and private data for the callback + +@Return None +*/ /***************************************************************************/ +void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData); + +#endif /* PVRSRV_CLEANUP_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_device.h b/drivers/gpu/drm/phytium/octopus/pvrsrv_device.h new file mode 100644 index 000000000000..428b177b5418 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_device.h @@ -0,0 +1,404 @@ +/**************************************************************************/ /*! +@File +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef PVRSRV_DEVICE_H +#define PVRSRV_DEVICE_H + +#include "img_types.h" +#include "physheap.h" +#include "pvrsrv_error.h" +#include "pvrsrv_memalloc_physheap.h" +#include "rgx_fwif_km.h" +#include "servicesext.h" +#include "cache_ops.h" + +#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) +#include "pvr_dvfs.h" +#endif + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "virt_validation_defs.h" +#endif + +typedef struct _PVRSRV_DEVICE_CONFIG_ PVRSRV_DEVICE_CONFIG; +typedef enum _DRIVER_MODE_ +{ +/* Do not use these enumerations directly, to query the + current driver mode, use the PVRSRV_VZ_MODE_IS() + macro */ + DRIVER_MODE_NATIVE = -1, + DRIVER_MODE_HOST = 0, + DRIVER_MODE_GUEST +} PVRSRV_DRIVER_MODE; + +typedef enum +{ + PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_MAPPABLE = 0, + PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_NON_MAPPABLE = 1, + PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_LAST +} PVRSRV_DEVICE_LOCAL_MEMORY_ARENA; + +typedef enum _PVRSRV_DEVICE_SNOOP_MODE_ +{ + PVRSRV_DEVICE_SNOOP_NONE = 0, + PVRSRV_DEVICE_SNOOP_CPU_ONLY, + PVRSRV_DEVICE_SNOOP_DEVICE_ONLY, + PVRSRV_DEVICE_SNOOP_CROSS, + PVRSRV_DEVICE_SNOOP_EMULATED, +} PVRSRV_DEVICE_SNOOP_MODE; + +#if defined(SUPPORT_SOC_TIMER) +typedef IMG_UINT64 +(*PFN_SYS_DEV_SOC_TIMER_READ)(IMG_HANDLE hSysData); +#endif + +typedef enum _PVRSRV_DEVICE_FABRIC_TYPE_ +{ + PVRSRV_DEVICE_FABRIC_NONE = 0, + PVRSRV_DEVICE_FABRIC_ACELITE, + PVRSRV_DEVICE_FABRIC_FULLACE, +} PVRSRV_DEVICE_FABRIC_TYPE; + +typedef IMG_UINT32 +(*PFN_SYS_DEV_CLK_FREQ_GET)(IMG_HANDLE hSysData); + +typedef PVRSRV_ERROR +(*PFN_SYS_PRE_POWER)(IMG_HANDLE hSysData, + PVRSRV_SYS_POWER_STATE eNewPowerState, + PVRSRV_SYS_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced +#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) + , IMG_BOOL bPreserveRam +#endif + ); + +typedef PVRSRV_ERROR +(*PFN_SYS_POST_POWER)(IMG_HANDLE hSysData, + PVRSRV_SYS_POWER_STATE eNewPowerState, + PVRSRV_SYS_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced +#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) + , IMG_BOOL bPreserveRam +#endif + ); + +typedef void +(*PFN_SYS_DEV_INTERRUPT_HANDLED)(PVRSRV_DEVICE_CONFIG *psDevConfig); + +typedef PVRSRV_ERROR +(*PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE)(IMG_HANDLE hSysData, + IMG_UINT64 ui64MemSize); + +typedef void (*PFN_SYS_DEV_FEAT_DEP_INIT)(PVRSRV_DEVICE_CONFIG *, IMG_UINT64); + +typedef void +(*PFN_SYS_DEV_HOST_CACHE_MAINTENANCE)(IMG_HANDLE hSysData, + PVRSRV_CACHE_OP eRequestType, + void *pvVirtStart, + void *pvVirtEnd, + IMG_CPU_PHYADDR sCPUPhysStart, + IMG_CPU_PHYADDR sCPUPhysEnd); + +typedef void* +(*PFN_SLAVE_DMA_CHAN)(PVRSRV_DEVICE_CONFIG*, char*); + +typedef void +(*PFN_SLAVE_DMA_FREE)(PVRSRV_DEVICE_CONFIG*, + void*); + +typedef void +(*PFN_DEV_PHY_ADDR_2_DMA_ADDR)(PVRSRV_DEVICE_CONFIG *, + IMG_DMA_ADDR *, + IMG_DEV_PHYADDR *, + IMG_BOOL *, + IMG_UINT32, + IMG_BOOL); + +#if defined(SUPPORT_TRUSTED_DEVICE) + +#define TD_MAX_NUM_MIPS_PAGETABLE_PAGES (4U) + +typedef struct _PVRSRV_TD_FW_PARAMS_ +{ + const void *pvFirmware; + IMG_UINT32 ui32FirmwareSize; + + union + { + struct + { + /* META-only parameters */ + IMG_DEV_VIRTADDR sFWCodeDevVAddr; + IMG_DEV_VIRTADDR sFWDataDevVAddr; + IMG_DEV_VIRTADDR sFWCorememCodeDevVAddr; + RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; + IMG_DEVMEM_SIZE_T uiFWCorememCodeSize; + IMG_DEV_VIRTADDR sFWCorememDataDevVAddr; + RGXFWIF_DEV_VIRTADDR sFWCorememDataFWAddr; + IMG_UINT32 ui32NumThreads; + } sMeta; + + struct + { + /* MIPS-only parameters */ + IMG_DEV_PHYADDR sGPURegAddr; + IMG_DEV_PHYADDR asFWPageTableAddr[TD_MAX_NUM_MIPS_PAGETABLE_PAGES]; + IMG_DEV_PHYADDR sFWStackAddr; + IMG_UINT32 ui32FWPageTableLog2PageSize; + IMG_UINT32 ui32FWPageTableNumPages; + } sMips; + } uFWP; +} PVRSRV_TD_FW_PARAMS; + +typedef PVRSRV_ERROR +(*PFN_TD_SEND_FW_IMAGE)(IMG_HANDLE hSysData, + PVRSRV_TD_FW_PARAMS *psTDFWParams); + +typedef struct _PVRSRV_TD_POWER_PARAMS_ +{ + IMG_DEV_PHYADDR sPCAddr; + + /* MIPS-only fields */ + IMG_DEV_PHYADDR sGPURegAddr; + IMG_DEV_PHYADDR sBootRemapAddr; + IMG_DEV_PHYADDR sCodeRemapAddr; + IMG_DEV_PHYADDR sDataRemapAddr; +} PVRSRV_TD_POWER_PARAMS; + +typedef PVRSRV_ERROR +(*PFN_TD_SET_POWER_PARAMS)(IMG_HANDLE hSysData, + PVRSRV_TD_POWER_PARAMS *psTDPowerParams); + +typedef PVRSRV_ERROR +(*PFN_TD_RGXSTART)(IMG_HANDLE hSysData); + +typedef PVRSRV_ERROR +(*PFN_TD_RGXSTOP)(IMG_HANDLE hSysData); + +#endif /* defined(SUPPORT_TRUSTED_DEVICE) */ + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +typedef void (*PFN_SYS_DEV_VIRT_INIT)(IMG_UINT64[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], IMG_UINT64[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]); +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + +typedef struct _PVRSRV_ROBUSTNESS_ERR_DATA_HOST_WDG_ +{ + IMG_UINT32 ui32Status; /*!< FW status */ + IMG_UINT32 ui32Reason; /*!< Reason for FW status */ +} PVRSRV_ROBUSTNESS_ERR_DATA_HOST_WDG; + +typedef struct _PVRSRV_ROBUSTNESS_ERR_DATA_FW_PF_ +{ + IMG_DEV_VIRTADDR sFWFaultAddr; /*!< FW page fault address */ +} PVRSRV_ROBUSTNESS_ERR_DATA_FW_PF; + +typedef struct _PVRSRV_ROBUSTNESS_ERR_DATA_CHECKSUM_ +{ + IMG_UINT32 ui32ExtJobRef; /*!< External Job Reference of any affected GPU work */ + RGXFWIF_DM eDM; /*!< Data Master which was running any affected GPU work */ +} PVRSRV_ROBUSTNESS_ERR_DATA_CHECKSUM; + +typedef struct _PVRSRV_ROBUSTNESS_NOTIFY_DATA_ +{ + RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reason for error/reset */ + IMG_PID pid; /*!< Pid of process which created the errored context */ + union + { + PVRSRV_ROBUSTNESS_ERR_DATA_CHECKSUM sChecksumErrData; /*!< Data returned for checksum errors */ + PVRSRV_ROBUSTNESS_ERR_DATA_FW_PF sFwPFErrData; /*!< Data returned for FW page faults */ + PVRSRV_ROBUSTNESS_ERR_DATA_HOST_WDG sHostWdgData; /*!< Data returned for Host Wdg FW faults */ + } uErrData; +} PVRSRV_ROBUSTNESS_NOTIFY_DATA; + +typedef void +(*PFN_SYS_DEV_ERROR_NOTIFY)(IMG_HANDLE hSysData, + PVRSRV_ROBUSTNESS_NOTIFY_DATA *psRobustnessErrorData); + +struct _PVRSRV_DEVICE_CONFIG_ +{ + /*! OS device passed to SysDevInit (linux: 'struct device') */ + void *pvOSDevice; + + /*! + *! Service representation of pvOSDevice. Should be set to NULL when the + *! config is created in SysDevInit. Set by Services once a device node has + *! been created for this config and unset before SysDevDeInit is called. + */ + struct _PVRSRV_DEVICE_NODE_ *psDevNode; + + /*! Name of the device */ + IMG_CHAR *pszName; + + /*! Version of the device (optional) */ + IMG_CHAR *pszVersion; + + /*! Register bank address */ + IMG_CPU_PHYADDR sRegsCpuPBase; + /*! Register bank size */ + IMG_UINT32 ui32RegsSize; + /*! Device interrupt number */ + IMG_UINT32 ui32IRQ; + + PVRSRV_DEVICE_SNOOP_MODE eCacheSnoopingMode; + + /*! Device specific data handle */ + IMG_HANDLE hDevData; + + /*! System specific data that gets passed into system callback functions. */ + IMG_HANDLE hSysData; + + IMG_BOOL bHasNonMappableLocalMemory; + + /*! Indicates if system supports FBCDC v3.1 */ + IMG_BOOL bHasFBCDCVersion31; + + PHYS_HEAP_CONFIG *pasPhysHeaps; + IMG_UINT32 ui32PhysHeapCount; + + /*! + *! Callbacks to change system device power state at the beginning and end + *! of a power state change (optional). + */ + PFN_SYS_PRE_POWER pfnPrePowerState; + PFN_SYS_POST_POWER pfnPostPowerState; + + /*! Callback to obtain the clock frequency from the device (optional). */ + PFN_SYS_DEV_CLK_FREQ_GET pfnClockFreqGet; + +#if defined(SUPPORT_SOC_TIMER) + /*! Callback to read SoC timer register value (mandatory). */ + PFN_SYS_DEV_SOC_TIMER_READ pfnSoCTimerRead; +#endif + + /*! + *! Callback to handle memory budgeting. Can be used to reject allocations + *! over a certain size (optional). + */ + PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize; + + /*! + *! Callback to perform host CPU cache maintenance. Might be needed for + *! architectures which allow extensions such as RISC-V (optional). + */ + PFN_SYS_DEV_HOST_CACHE_MAINTENANCE pfnHostCacheMaintenance; + IMG_BOOL bHasPhysicalCacheMaintenance; + +#if defined(SUPPORT_TRUSTED_DEVICE) + /*! + *! Callback to send FW image and FW boot time parameters to the trusted + *! device. + */ + PFN_TD_SEND_FW_IMAGE pfnTDSendFWImage; + + /*! + *! Callback to send parameters needed in a power transition to the trusted + *! device. + */ + PFN_TD_SET_POWER_PARAMS pfnTDSetPowerParams; + + /*! Callbacks to ping the trusted device to securely run RGXStart/Stop() */ + PFN_TD_RGXSTART pfnTDRGXStart; + PFN_TD_RGXSTOP pfnTDRGXStop; +#endif /* defined(SUPPORT_TRUSTED_DEVICE) */ + + /*! Function that does device feature specific system layer initialisation */ + PFN_SYS_DEV_FEAT_DEP_INIT pfnSysDevFeatureDepInit; + +#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) + PVRSRV_DVFS sDVFS; +#endif + +#if defined(SUPPORT_ALT_REGBASE) + IMG_DEV_PHYADDR sAltRegsGpuPBase; +#endif + + /*! + *! Indicates if device physical address 0x0 might be used as GPU memory + *! (e.g. LMA system or UMA system with CPU PA 0x0 reserved by the OS, + *! but CPU PA != device PA and device PA 0x0 available for the GPU) + */ + IMG_BOOL bDevicePA0IsValid; + + /*! + *! Function to initialize System-specific virtualization. If not supported + *! this should be a NULL reference. Only present if + *! SUPPORT_GPUVIRT_VALIDATION is defined. + */ +#if defined(SUPPORT_GPUVIRT_VALIDATION) + PFN_SYS_DEV_VIRT_INIT pfnSysDevVirtInit; +#endif + + /*! + *! Callback to notify system layer of device errors. + *! NB. implementers should ensure that the minimal amount of work is + *! done in the callback function, as it will be executed in the main + *! RGX MISR. (e.g. any blocking or lengthy work should be performed by + *! a worker queue/thread instead.) + */ + PFN_SYS_DEV_ERROR_NOTIFY pfnSysDevErrorNotify; + + /*! + *! Slave DMA channel request callbacks + */ + PFN_SLAVE_DMA_CHAN pfnSlaveDMAGetChan; + PFN_SLAVE_DMA_FREE pfnSlaveDMAFreeChan; + /*! + *! Conversion of device memory to DMA addresses + */ + PFN_DEV_PHY_ADDR_2_DMA_ADDR pfnDevPhysAddr2DmaAddr; + /*! + *! DMA channel names + */ + IMG_CHAR *pszDmaTxChanName; + IMG_CHAR *pszDmaRxChanName; + /*! + *! DMA device transfer restrictions + */ + IMG_UINT32 ui32DmaAlignment; + IMG_UINT32 ui32DmaTransferUnit; + /*! + *! System-wide presence of DMA capabilities + */ + IMG_BOOL bHasDma; + +}; + +#endif /* PVRSRV_DEVICE_H*/ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_device_types.h b/drivers/gpu/drm/phytium/octopus/pvrsrv_device_types.h new file mode 100644 index 000000000000..07ff70b78164 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_device_types.h @@ -0,0 +1,55 @@ +/*************************************************************************/ /*! +@File +@Title PhytiumVR device type definitions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(PVRSRV_DEVICE_TYPES_H) +#define PVRSRV_DEVICE_TYPES_H + +#include "img_types.h" + +#define PVRSRV_MAX_DEVICES 16 /*!< Largest supported number of devices on the system */ + +#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) +#define __pvrsrv_defined_struct_enum__ +#include +#endif + +#endif /* PVRSRV_DEVICE_TYPES_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_devvar.h b/drivers/gpu/drm/phytium/octopus/pvrsrv_devvar.h new file mode 100644 index 000000000000..1201d3aed6d1 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_devvar.h @@ -0,0 +1,291 @@ +/*************************************************************************/ /*! +@File +@Title Services Device Variable interface header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Defines the client side interface for device variables +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVRSRV_DEVVAR_H +#define PVRSRV_DEVVAR_H + +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +#define DEVVAR_MAX_NAME_LEN 32 + +typedef struct _PVRSRV_DEVVARCTX_ *PDEVVARCTX; +typedef struct _PVRSRV_DEVVAR_ *PDEVVAR; + +typedef struct PVRSRV_DEV_VAR_UPDATE_TAG +{ + PDEVVAR psDevVar; /*!< Pointer to the dev var */ + IMG_UINT32 ui32UpdateValue; /*!< the update value */ +} PVRSRV_DEV_VAR_UPDATE; + +/*************************************************************************/ /*! +@Function PVRSRVDevVarContextCreate + +@Description Create a new device variable context + +@Input psDevConnection Device to create the device + variable context on + +@Output phDevVarContext Handle to the created device + variable context + +@Return PVRSRV_OK if the device variable context was successfully + created +*/ +/*****************************************************************************/ +IMG_EXPORT PVRSRV_ERROR +PVRSRVDevVarContextCreate(const PVRSRV_DEV_CONNECTION *psDevConnection, + PDEVVARCTX *phDevVarContext); + +/*************************************************************************/ /*! +@Function PVRSRVDevVarContextDestroy + +@Description Destroy a device variable context + +@Input hDevVarContext Handle to the device variable + context to destroy + +@Return None +*/ +/*****************************************************************************/ +IMG_EXPORT void +PVRSRVDevVarContextDestroy(PDEVVARCTX hDevVarContext); + +/*************************************************************************/ /*! +@Function PVRSRVDevVarAlloc + +@Description Allocate a new device variable on the specified device + variable context. The device variable's value is initialised + with the value passed in ui32InitialValue. + +@Input hDevVarContext Handle to the device variable + context +@Input ui32InitialValue Value to initially assign to the + new variable +@Input pszDevVarName Name assigned to the device variable + (for debug purposes) + +@Output ppsDevVar Created device variable + +@Return PVRSRV_OK if the device variable was successfully created +*/ +/*****************************************************************************/ +IMG_EXPORT PVRSRV_ERROR +PVRSRVDevVarAllocI(PDEVVARCTX hDevVarContext, + PDEVVAR *ppsDevVar, + IMG_UINT32 ui32InitialValue, + const IMG_CHAR *pszDevVarName + PVR_DBG_FILELINE_PARAM); +#define PVRSRVDevVarAlloc(hDevVarContext, ppsDevVar, ui32InitialValue, pszDevVarName) \ + PVRSRVDevVarAllocI( (hDevVarContext), (ppsDevVar), (ui32InitialValue), (pszDevVarName) \ + PVR_DBG_FILELINE ) + +/*************************************************************************/ /*! +@Function PVRSRVDevVarFree + +@Description Free a device variable + +@Input psDevVar The device variable to free + +@Return None +*/ +/*****************************************************************************/ +IMG_EXPORT void +PVRSRVDevVarFree(PDEVVAR psDevVar); + +/*************************************************************************/ /*! +@Function PVRSRVDevVarSet + +@Description Set the device variable to a value + +@Input psDevVar The device variable to set + +@Input ui32Value Value to set it to + +@Return None +*/ +/*****************************************************************************/ +IMG_EXPORT void +PVRSRVDevVarSet(PDEVVAR psDevVar, + IMG_UINT32 ui32Value); + +/*************************************************************************/ /*! +@Function PVRSRVDevVarGet + +@Description Get the current value of the device variable + +@Input psDevVar The device variable to get the + value of + +@Return Value of the variable +*/ +/*****************************************************************************/ +IMG_EXPORT IMG_UINT32 +PVRSRVDevVarGet(PDEVVAR psDevVar); + +/*************************************************************************/ /*! +@Function PVRSRVDevVarGetFirmwareAddr + +@Description Returns the address of the associated firmware value for a + specified device integer (not exposed to client) + +@Input psDevVar The device variable to resolve + +@Return The firmware address of the device variable +*/ +/*****************************************************************************/ +IMG_EXPORT IMG_UINT32 +PVRSRVDevVarGetFirmwareAddr(PDEVVAR psDevVar); + +#if defined(PDUMP) +/*************************************************************************/ /*! +@Function PVRSRVDevVarPDump + +@Description PDump the current value of the device variable + +@Input psDevVar The device variable to PDump + +@Return None +*/ +/*****************************************************************************/ +IMG_EXPORT void +PVRSRVDevVarPDump(PDEVVAR psDevVar); + +/*************************************************************************/ /*! +@Function PVRSRVDevVarPDumpPol + +@Description Do a PDump poll of the device variable + +@Input psDevVar The device variable to PDump + +@Input ui32Value Value to Poll for + +@Input ui32Mask PDump mask operator + +@Input ui32PDumpFlags PDump flags + +@Return None +*/ +/*****************************************************************************/ +IMG_EXPORT void +PVRSRVDevVarPDumpPol(PDEVVAR psDevVar, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 ui32PDumpFlags); + +/*************************************************************************/ /*! +@Function PVRSRVDevVarPDumpCBP + +@Description Do a PDump CB poll using the device variable + +@Input psDevVar The device variable to PDump + +@Input uiWriteOffset Current write offset of buffer + +@Input uiPacketSize Size of the packet to write into CB + +@Input uiBufferSize Size of the CB + +@Return None +*/ +/*****************************************************************************/ +IMG_EXPORT void +PVRSRVDevVarPDumpCBP(PDEVVAR psDevVar, + IMG_UINT64 uiWriteOffset, + IMG_UINT64 uiPacketSize, + IMG_UINT64 uiBufferSize); +#else /* PDUMP */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVDevVarPDump) +#endif +static INLINE void +PVRSRVDevVarPDump(PDEVVAR psDevVar) +{ + PVR_UNREFERENCED_PARAMETER(psDevVar); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVDevVarPDumpPol) +#endif +static INLINE void +PVRSRVDevVarPDumpPol(PDEVVAR psDevVar, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psDevVar); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + PVR_UNREFERENCED_PARAMETER(eOperator); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVDevVarPDumpCBP) +#endif +static INLINE void +PVRSRVDevVarPDumpCBP(PDEVVAR psDevVar, + IMG_UINT64 uiWriteOffset, + IMG_UINT64 uiPacketSize, + IMG_UINT64 uiBufferSize) +{ + PVR_UNREFERENCED_PARAMETER(psDevVar); + PVR_UNREFERENCED_PARAMETER(uiWriteOffset); + PVR_UNREFERENCED_PARAMETER(uiPacketSize); + PVR_UNREFERENCED_PARAMETER(uiBufferSize); +} +#endif /* PDUMP */ + +#if defined(__cplusplus) +} +#endif +#endif /* PVRSRV_DEVVAR_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_error.c b/drivers/gpu/drm/phytium/octopus/pvrsrv_error.c new file mode 100644 index 000000000000..bb6537123587 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_error.c @@ -0,0 +1,61 @@ +/*************************************************************************/ /*! +@File +@Title Services error support +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "pvr_debug.h" + +IMG_EXPORT +const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError) +{ + switch (eError) + { + case PVRSRV_OK: + return "PVRSRV_OK"; +#define PVRE(x) \ + case x: \ + return #x; +#include "pvrsrv_errors.h" +#undef PVRE + default: + return "Unknown PVRSRV error number"; + } +} diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_error.h b/drivers/gpu/drm/phytium/octopus/pvrsrv_error.h new file mode 100644 index 000000000000..14c77427a78b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_error.h @@ -0,0 +1,61 @@ +/*************************************************************************/ /*! +@File pvrsrv_error.h +@Title services error enumerant +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Defines error codes used by any/all services modules +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(PVRSRV_ERROR_H) +#define PVRSRV_ERROR_H + +/*! + ***************************************************************************** + * Error values + *****************************************************************************/ +typedef enum PVRSRV_ERROR_TAG +{ + PVRSRV_OK, +#define PVRE(x) x, +#include "pvrsrv_errors.h" +#undef PVRE + PVRSRV_ERROR_FORCE_I32 = 0x7fffffff + +} PVRSRV_ERROR; + +#endif /* !defined(PVRSRV_ERROR_H) */ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_errors.h b/drivers/gpu/drm/phytium/octopus/pvrsrv_errors.h new file mode 100644 index 000000000000..c256215c989b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_errors.h @@ -0,0 +1,407 @@ +/*************************************************************************/ /*! +@File pvrsrv_errors.h +@Title services error codes +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Defines error codes used by any/all services modules +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* Don't add include guards to this file! */ + +PVRE(PVRSRV_ERROR_OUT_OF_MEMORY) +PVRE(PVRSRV_ERROR_TOO_FEW_BUFFERS) +PVRE(PVRSRV_ERROR_INVALID_PARAMS) +PVRE(PVRSRV_ERROR_INIT_FAILURE) +PVRE(PVRSRV_ERROR_CANT_REGISTER_CALLBACK) +PVRE(PVRSRV_ERROR_INVALID_DEVICE) +PVRE(PVRSRV_ERROR_NOT_OWNER) +PVRE(PVRSRV_ERROR_BAD_MAPPING) +PVRE(PVRSRV_ERROR_TIMEOUT) +PVRE(PVRSRV_ERROR_NOT_IMPLEMENTED) +PVRE(PVRSRV_ERROR_FLIP_CHAIN_EXISTS) +PVRE(PVRSRV_ERROR_INVALID_SWAPINTERVAL) +PVRE(PVRSRV_ERROR_SCENE_INVALID) +PVRE(PVRSRV_ERROR_STREAM_ERROR) +PVRE(PVRSRV_ERROR_FAILED_DEPENDENCIES) +PVRE(PVRSRV_ERROR_CMD_NOT_PROCESSED) +PVRE(PVRSRV_ERROR_CMD_TOO_BIG) +PVRE(PVRSRV_ERROR_DEVICE_REGISTER_FAILED) +PVRE(PVRSRV_ERROR_TOOMANYBUFFERS) +PVRE(PVRSRV_ERROR_NOT_SUPPORTED) +PVRE(PVRSRV_ERROR_PROCESSING_BLOCKED) +PVRE(PVRSRV_ERROR_CANNOT_FLUSH_QUEUE) +PVRE(PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE) +PVRE(PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS) +PVRE(PVRSRV_ERROR_RETRY) +PVRE(PVRSRV_ERROR_DDK_VERSION_MISMATCH) +PVRE(PVRSRV_ERROR_DDK_BUILD_MISMATCH) +PVRE(PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH) +PVRE(PVRSRV_ERROR_BVNC_MISMATCH) +PVRE(PVRSRV_ERROR_FWPROCESSOR_MISMATCH) +PVRE(PVRSRV_ERROR_UPLOAD_TOO_BIG) +PVRE(PVRSRV_ERROR_INVALID_FLAGS) +PVRE(PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS) +PVRE(PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY) +PVRE(PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR) +PVRE(PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED) +PVRE(PVRSRV_ERROR_BRIDGE_CALL_FAILED) +PVRE(PVRSRV_ERROR_IOCTL_CALL_FAILED) +PVRE(PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR) +PVRE(PVRSRV_ERROR_MMU_CONFIG_IS_WRONG) +PVRE(PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND) +PVRE(PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES) +PVRE(PVRSRV_ERROR_MMU_FAILED_TO_CREATE_HEAP) +PVRE(PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE) +PVRE(PVRSRV_ERROR_MMU_FAILED_TO_UNMAP_PAGE_TABLE) +PVRE(PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE) +PVRE(PVRSRV_ERROR_MMU_LIVE_ALLOCATIONS_IN_HEAP) +PVRE(PVRSRV_ERROR_MMU_RESERVATION_NOT_INSIDE_HEAP) +PVRE(PVRSRV_ERROR_PMR_NEW_MEMORY) +PVRE(PVRSRV_ERROR_PMR_STILL_REFERENCED) +PVRE(PVRSRV_ERROR_PMR_CLIENT_NOT_TRUSTED) +PVRE(PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES) +PVRE(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY) +PVRE(PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES) +PVRE(PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE) +PVRE(PVRSRV_ERROR_PMR_NOT_PERMITTED) +PVRE(PVRSRV_ERROR_PMR_ALREADY_OCCUPIED) +PVRE(PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR) +PVRE(PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR) +PVRE(PVRSRV_ERROR_PMR_WRONG_PMR_TYPE) +PVRE(PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS) +PVRE(PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE) +PVRE(PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE) +PVRE(PVRSRV_ERROR_PMR_MAPPINGTABLE_MISMATCH) +PVRE(PVRSRV_ERROR_PMR_INVALID_CHUNK) +PVRE(PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING) +PVRE(PVRSRV_ERROR_PMR_EMPTY) +PVRE(PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND) +PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_UNMAP_FAILED) +PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED) +PVRE(PVRSRV_ERROR_PMR_PAGE_POISONING_FAILED) +PVRE(PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY) +PVRE(PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP) +PVRE(PVRSRV_ERROR_DEVICEMEM_BAD_IMPORT_SIZE) +PVRE(PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION) +PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX) +PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX) +PVRE(PVRSRV_ERROR_DEVICEMEM_MAP_FAILED) +PVRE(PVRSRV_ERROR_DEVICEMEM_NON_ZERO_USAGE_COUNT) +PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE) +PVRE(PVRSRV_ERROR_DEVICEMEM_VA_ALLOC_FAILED) +PVRE(PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA) +PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM) +PVRE(PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED) +PVRE(PVRSRV_ERROR_DEVICEMEM_NO_MAPPING) +PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS) +PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_LMA_HEAP) +PVRE(PVRSRV_ERROR_INVALID_MMU_TYPE) +PVRE(PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND) +PVRE(PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT) +PVRE(PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND) +PVRE(PVRSRV_ERROR_PCI_CALL_FAILED) +PVRE(PVRSRV_ERROR_PCI_REGION_TOO_SMALL) +PVRE(PVRSRV_ERROR_PCI_REGION_UNAVAILABLE) +PVRE(PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH) +PVRE(PVRSRV_ERROR_REGISTER_BASE_NOT_SET) +PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM) +PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY) +PVRE(PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC) +PVRE(PVRSRV_ERROR_FAILED_TO_MAP_KERNELVIRTUAL) +PVRE(PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR) +PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY) +PVRE(PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY) +PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES) +PVRE(PVRSRV_ERROR_FAILED_TO_FREE_PAGES) +PVRE(PVRSRV_ERROR_FAILED_TO_COPY_PAGES) +PVRE(PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES) +PVRE(PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES) +PVRE(PVRSRV_ERROR_STILL_MAPPED) +PVRE(PVRSRV_ERROR_MAPPING_NOT_FOUND) +PVRE(PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT) +PVRE(PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE) +PVRE(PVRSRV_ERROR_INVALID_SEGMENT_BLOCK) +PVRE(PVRSRV_ERROR_INVALID_GFXDEVDEVDATA) +PVRE(PVRSRV_ERROR_INVALID_DEVINFO) +PVRE(PVRSRV_ERROR_INVALID_MEMINFO) +PVRE(PVRSRV_ERROR_INVALID_MISCINFO) +PVRE(PVRSRV_ERROR_UNKNOWN_IOCTL) +PVRE(PVRSRV_ERROR_INVALID_CONTEXT) +PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT) +PVRE(PVRSRV_ERROR_INVALID_HEAP) +PVRE(PVRSRV_ERROR_INVALID_KERNELINFO) +PVRE(PVRSRV_ERROR_UNKNOWN_POWER_STATE) +PVRE(PVRSRV_ERROR_INVALID_HANDLE_TYPE) +PVRE(PVRSRV_ERROR_INVALID_WRAP_TYPE) +PVRE(PVRSRV_ERROR_INVALID_PHYS_ADDR) +PVRE(PVRSRV_ERROR_INVALID_CPU_ADDR) +PVRE(PVRSRV_ERROR_INVALID_HEAPINFO) +PVRE(PVRSRV_ERROR_INVALID_PERPROC) +PVRE(PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO) +PVRE(PVRSRV_ERROR_INVALID_MAP_REQUEST) +PVRE(PVRSRV_ERROR_INVALID_UNMAP_REQUEST) +PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP) +PVRE(PVRSRV_ERROR_MAPPING_STILL_IN_USE) +PVRE(PVRSRV_ERROR_EXCEEDED_HW_LIMITS) +PVRE(PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED) +PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA) +PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT) +PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT) +PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT) +PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT) +PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD) +PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD) +PVRE(PVRSRV_ERROR_THREAD_READ_ERROR) +PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER) +PVRE(PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR) +PVRE(PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR) +PVRE(PVRSRV_ERROR_ISR_ALREADY_INSTALLED) +PVRE(PVRSRV_ERROR_ISR_NOT_INSTALLED) +PVRE(PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT) +PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO) +PVRE(PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT) +PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES) +PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT) +PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE) +PVRE(PVRSRV_ERROR_INVALID_CCB_COMMAND) +PVRE(PVRSRV_ERROR_KERNEL_CCB_FULL) +PVRE(PVRSRV_ERROR_FLIP_FAILED) +PVRE(PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED) +PVRE(PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE) +PVRE(PVRSRV_ERROR_TIMEOUT_WAITING_FOR_CLIENT_CCB) +PVRE(PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED) +PVRE(PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG) +PVRE(PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG) +PVRE(PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG) +PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID) +PVRE(PVRSRV_ERROR_BLIT_SETUP_FAILED) +PVRE(PVRSRV_ERROR_SUBMIT_NEEDED) +PVRE(PVRSRV_ERROR_PDUMP_NOT_AVAILABLE) +PVRE(PVRSRV_ERROR_PDUMP_BUFFER_FULL) +PVRE(PVRSRV_ERROR_PDUMP_BUF_OVERFLOW) +PVRE(PVRSRV_ERROR_PDUMP_NOT_ACTIVE) +PVRE(PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES) +PVRE(PVRSRV_ERROR_MUTEX_DESTROY_FAILED) +PVRE(PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR) +PVRE(PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND) +PVRE(PVRSRV_ERROR_PROCESS_NOT_INITIALISED) +PVRE(PVRSRV_ERROR_PROCESS_NOT_FOUND) +PVRE(PVRSRV_ERROR_SRV_CONNECT_FAILED) +PVRE(PVRSRV_ERROR_SRV_DISCONNECT_FAILED) +PVRE(PVRSRV_ERROR_DEINT_PHASE_FAILED) +PVRE(PVRSRV_ERROR_INIT2_PHASE_FAILED) +PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE) +PVRE(PVRSRV_ERROR_NO_DC_DEVICES_FOUND) +PVRE(PVRSRV_ERROR_DC_DEVICE_INACCESSIBLE) +PVRE(PVRSRV_ERROR_DC_INVALID_MAXDEPTH) +PVRE(PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE) +PVRE(PVRSRV_ERROR_UNABLE_TO_UNREGISTER_DEVICE) +PVRE(PVRSRV_ERROR_NO_DEVICEDATA_FOUND) +PVRE(PVRSRV_ERROR_NO_DEVICENODE_FOUND) +PVRE(PVRSRV_ERROR_NO_CLIENTNODE_FOUND) +PVRE(PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE) +PVRE(PVRSRV_ERROR_UNABLE_TO_INIT_TASK) +PVRE(PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK) +PVRE(PVRSRV_ERROR_UNABLE_TO_KILL_TASK) +PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER) +PVRE(PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER) +PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER) +PVRE(PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT) +PVRE(PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE) +PVRE(PVRSRV_ERROR_HANDLE_NOT_ALLOCATED) +PVRE(PVRSRV_ERROR_HANDLE_TYPE_MISMATCH) +PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE) +PVRE(PVRSRV_ERROR_HANDLE_NOT_SHAREABLE) +PVRE(PVRSRV_ERROR_HANDLE_NOT_FOUND) +PVRE(PVRSRV_ERROR_INVALID_SUBHANDLE) +PVRE(PVRSRV_ERROR_HANDLE_BATCH_IN_USE) +PVRE(PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE) +PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE) +PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_HASH_VALUE) +PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE) +PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE) +PVRE(PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED) +PVRE(PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE) +PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP) +PVRE(PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE) +PVRE(PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVAILABLE) +PVRE(PVRSRV_ERROR_INVALID_DEVICEID) +PVRE(PVRSRV_ERROR_DEVICEID_NOT_FOUND) +PVRE(PVRSRV_ERROR_MEMORY_TEST_FAILED) +PVRE(PVRSRV_ERROR_CPUPADDR_TEST_FAILED) +PVRE(PVRSRV_ERROR_COPY_TEST_FAILED) +PVRE(PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED) +PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK) +PVRE(PVRSRV_ERROR_CLOCK_REQUEST_FAILED) +PVRE(PVRSRV_ERROR_DISABLE_CLOCK_FAILURE) +PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE) +PVRE(PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE) +PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK) +PVRE(PVRSRV_ERROR_UNABLE_TO_GET_CLOCK) +PVRE(PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK) +PVRE(PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK) +PVRE(PVRSRV_ERROR_UNKNOWN_SGL_ERROR) +PVRE(PVRSRV_ERROR_SYSTEM_POWER_CHANGE_FAILURE) +PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE) +PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED) +PVRE(PVRSRV_ERROR_BAD_SYNC_STATE) +PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE) +PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID) +PVRE(PVRSRV_ERROR_PARAMETER_BUFFER_INVALID_ALIGNMENT) +PVRE(PVRSRV_ERROR_UNABLE_TO_ACQUIRE_CONNECTION) +PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CONNECTION) +PVRE(PVRSRV_ERROR_PHYSHEAP_ID_IN_USE) +PVRE(PVRSRV_ERROR_PHYSHEAP_ID_INVALID) +PVRE(PVRSRV_ERROR_PHYSHEAP_CONFIG) +PVRE(PVRSRV_ERROR_HP_REQUEST_TOO_LONG) +PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM) +PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM_OP) +PVRE(PVRSRV_ERROR_INVALID_SYNC_CONTEXT) +PVRE(PVRSRV_ERROR_BP_NOT_SET) +PVRE(PVRSRV_ERROR_BP_ALREADY_SET) +PVRE(PVRSRV_ERROR_FEATURE_DISABLED) +PVRE(PVRSRV_ERROR_REG_CONFIG_ENABLED) +PVRE(PVRSRV_ERROR_REG_CONFIG_FULL) +PVRE(PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE) +PVRE(PVRSRV_ERROR_MEMORY_ACCESS) +PVRE(PVRSRV_ERROR_NO_SYSTEM_BUFFER) +PVRE(PVRSRV_ERROR_DC_INVALID_CONFIG) +PVRE(PVRSRV_ERROR_DC_INVALID_CROP_RECT) +PVRE(PVRSRV_ERROR_DC_INVALID_DISPLAY_RECT) +PVRE(PVRSRV_ERROR_DC_INVALID_BUFFER_DIMS) +PVRE(PVRSRV_ERROR_DC_INVALID_TRANSFORM) +PVRE(PVRSRV_ERROR_DC_INVALID_SCALE) +PVRE(PVRSRV_ERROR_DC_INVALID_CUSTOM) +PVRE(PVRSRV_ERROR_DC_TOO_MANY_PIPES) +PVRE(PVRSRV_ERROR_DC_INVALID_PLANE_ALPHA) +PVRE(PVRSRV_ERROR_NOT_READY) +PVRE(PVRSRV_ERROR_RESOURCE_UNAVAILABLE) +PVRE(PVRSRV_ERROR_UNSUPPORTED_PIXEL_FORMAT) +PVRE(PVRSRV_ERROR_UNSUPPORTED_MEMORY_LAYOUT) +PVRE(PVRSRV_ERROR_UNSUPPORTED_FB_COMPRESSION_MODE) +PVRE(PVRSRV_ERROR_UNSUPPORTED_DIMS) +PVRE(PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE) +PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_TIMER) +PVRE(PVRSRV_ERROR_NOT_FOUND) +PVRE(PVRSRV_ERROR_ALREADY_OPEN) +PVRE(PVRSRV_ERROR_STREAM_MISUSE) +PVRE(PVRSRV_ERROR_STREAM_FULL) +PVRE(PVRSRV_ERROR_STREAM_READLIMIT_REACHED) +PVRE(PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE) +PVRE(PVRSRV_ERROR_PHYSMEM_NOT_ALLOCATED) +PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MAX) +PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MIN) +PVRE(PVRSRV_ERROR_INVALID_PB_CONFIG) +PVRE(PVRSRV_ERROR_META_THREAD0_NOT_ENABLED) +PVRE(PVRSRV_ERROR_NOT_AUTHENTICATED) +PVRE(PVRSRV_ERROR_REQUEST_TDFWMEM_PAGES_FAIL) +PVRE(PVRSRV_ERROR_INIT_TDFWMEM_PAGES_FAIL) +PVRE(PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL) +PVRE(PVRSRV_ERROR_INIT_TDSECUREBUF_PAGES_FAIL) +PVRE(PVRSRV_ERROR_MUTEX_ALREADY_CREATED) +PVRE(PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED) +PVRE(PVRSRV_ERROR_ALREADY_EXISTS) +PVRE(PVRSRV_ERROR_UNABLE_TO_SEND_PULSE) +PVRE(PVRSRV_ERROR_TASK_FAILED) +PVRE(PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED) +PVRE(PVRSRV_ERROR_INVALID_GPU_ADDR) +PVRE(PVRSRV_ERROR_INVALID_OFFSET) +PVRE(PVRSRV_ERROR_CCCB_STALLED) +PVRE(PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE) +PVRE(PVRSRV_ERROR_NOT_ENABLED) +PVRE(PVRSRV_ERROR_SYSTEM_LOCAL_MEMORY_INIT_FAIL) +PVRE(PVRSRV_ERROR_FW_IMAGE_MISMATCH) +PVRE(PVRSRV_ERROR_PDUMP_NOT_ALLOWED) +PVRE(PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL) +PVRE(PVRSRV_ERROR_RPM_PBSIZE_ALREADY_MAX) +PVRE(PVRSRV_ERROR_NONZERO_REFCOUNT) +PVRE(PVRSRV_ERROR_SETAFFINITY_FAILED) +PVRE(PVRSRV_ERROR_UNABLE_TO_COMPILE_PDS) +PVRE(PVRSRV_ERROR_INTERNAL_ERROR) +PVRE(PVRSRV_ERROR_BRIDGE_EFAULT) +PVRE(PVRSRV_ERROR_BRIDGE_EINVAL) +PVRE(PVRSRV_ERROR_BRIDGE_ENOMEM) +PVRE(PVRSRV_ERROR_BRIDGE_ERANGE) +PVRE(PVRSRV_ERROR_BRIDGE_EPERM) +PVRE(PVRSRV_ERROR_BRIDGE_ENOTTY) +PVRE(PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED) +PVRE(PVRSRV_ERROR_PROBE_DEFER) +PVRE(PVRSRV_ERROR_INVALID_ALIGNMENT) +PVRE(PVRSRV_ERROR_CLOSE_FAILED) +PVRE(PVRSRV_ERROR_NOT_INITIALISED) +PVRE(PVRSRV_ERROR_CONVERSION_FAILED) +PVRE(PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) +PVRE(PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL) +PVRE(PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED) +PVRE(PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED) +PVRE(PVRSRV_ERROR_OBJECT_STILL_REFERENCED) +PVRE(PVRSRV_ERROR_BVNC_UNSUPPORTED) +PVRE(PVRSRV_ERROR_INVALID_BVNC_PARAMS) +PVRE(PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE) +PVRE(PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT) +PVRE(PVRSRV_ERROR_PID_ALREADY_REGISTERED) +PVRE(PVRSRV_ERROR_PID_NOT_REGISTERED) +PVRE(PVRSRV_ERROR_SIGNAL_FAILED) +PVRE(PVRSRV_ERROR_INVALID_NOTIF_STREAM) +PVRE(PVRSRV_ERROR_INVALID_SPU_MASK) +PVRE(PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED) +PVRE(PVRSRV_ERROR_INVALID_PVZ_CONFIG) +PVRE(PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED) +PVRE(PVRSRV_ERROR_NOT_SW_TIMELINE) +PVRE(PVRSRV_ERROR_SW_TIMELINE_AT_LATEST_POINT) +PVRE(PVRSRV_ERROR_INVALID_PVZ_OSID) +PVRE(PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE) +PVRE(PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG) +PVRE(PVRSRV_ERROR_INTERRUPTED) +PVRE(PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) +PVRE(PVRSRV_ERROR_PDUMP_INVALID_BLOCKLEN) +PVRE(PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF) +PVRE(PVRSRV_ERROR_MULTIPLE_SECURITY_PDUMPS) +PVRE(PVRSRV_ERROR_BAD_PARAM_SIZE) +PVRE(PVRSRV_ERROR_INVALID_REQUEST) +PVRE(PVRSRV_ERROR_FAILED_TO_ACQUIRE_PAGES) +PVRE(PVRSRV_ERROR_TEST_FAILED) +PVRE(PVRSRV_ERROR_SYNC_PRIM_OP_NOT_SUPPORTED) +PVRE(PVRSRV_ERROR_FAILED_TO_GET_VIRT_ADDR) +PVRE(PVRSRV_ERROR_UNABLE_TO_FREE_RESOURCE) +PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_SEMAPHORE) +PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_SEMAPHORE) +PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_SEMAPHORE) +PVRE(PVRSRV_ERROR_TOO_MANY_SYNCS) +PVRE(PVRSRV_ERROR_ION_NO_CLIENT) +PVRE(PVRSRV_ERROR_ION_FAILED_TO_ALLOC) diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_memalloc_physheap.h b/drivers/gpu/drm/phytium/octopus/pvrsrv_memalloc_physheap.h new file mode 100644 index 000000000000..3346cbdbe65e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_memalloc_physheap.h @@ -0,0 +1,85 @@ +/*************************************************************************/ /*! +@File pvrsrv_memalloc_physheap.h +@Title Services Phys Heap types +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Used in creating and allocating from Physical Heaps. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef PVRSRV_MEMALLOC_PHYSHEAP_H +#define PVRSRV_MEMALLOC_PHYSHEAP_H + +#include "img_defs.h" + +/* + * These IDs are replicated in the Device Memory allocation flags to allow + * allocations to be made in terms of their locality/use to ensure the correct + * physical heap is accessed for the given system/platform configuration. + * A system Phys Heap Config is linked to one or more Phys Heaps. When a heap + * is not present in the system configuration the allocation will fallback to + * the default GPU_LOCAL physical heap which all systems must define. + */ +typedef enum +{ + /* Services external heaps */ + PVRSRV_PHYS_HEAP_GPU_LOCAL = 0, /* default phys heap for device memory allocations */ + PVRSRV_PHYS_HEAP_CPU_LOCAL = 1, /* used for buffers with more CPU access than GPU */ + + /* Services internal heaps */ + PVRSRV_PHYS_HEAP_FW_MAIN = 2, /* runtime data, e.g. CCBs, sync objects */ + PVRSRV_PHYS_HEAP_EXTERNAL = 3, /* used by some PMR import/export factories where the physical memory heap is not managed by the pvrsrv driver */ + PVRSRV_PHYS_HEAP_GPU_PRIVATE = 4, /* Non CPU-mappable memory region. See PVRSRV_MEMALLOCFLAGS_CPU_MAPPABLE_MASK. */ + PVRSRV_PHYS_HEAP_GPU_COHERENT = 5, /* used for a cache coherent region */ + PVRSRV_PHYS_HEAP_GPU_SECURE = 6, /* used by security validation */ + PVRSRV_PHYS_HEAP_FW_CONFIG = 7, /* subheap of FW_MAIN, configuration data for FW init */ + PVRSRV_PHYS_HEAP_FW_CODE = 8, /* used by security validation or dedicated fw */ + PVRSRV_PHYS_HEAP_FW_PRIV_DATA = 9, /* internal FW data (like the stack, FW control data structures, etc.) */ + PVRSRV_PHYS_HEAP_FW_PREMAP0 = 10, /* Host OS premap fw heap */ + PVRSRV_PHYS_HEAP_FW_PREMAP1 = 11, /* Guest OS 1 premap fw heap */ + PVRSRV_PHYS_HEAP_FW_PREMAP2 = 12, /* Guest OS 2 premap fw heap */ + PVRSRV_PHYS_HEAP_FW_PREMAP3 = 13, /* Guest OS 3 premap fw heap */ + PVRSRV_PHYS_HEAP_FW_PREMAP4 = 14, /* Guest OS 4 premap fw heap */ + PVRSRV_PHYS_HEAP_FW_PREMAP5 = 15, /* Guest OS 5 premap fw heap */ + PVRSRV_PHYS_HEAP_FW_PREMAP6 = 16, /* Guest OS 6 premap fw heap */ + PVRSRV_PHYS_HEAP_FW_PREMAP7 = 17, /* Guest OS 7 premap fw heap */ + PVRSRV_PHYS_HEAP_LAST +} PVRSRV_PHYS_HEAP; + + +static_assert(PVRSRV_PHYS_HEAP_LAST <= (0x1F + 1), "Ensure enum fits in memalloc flags bitfield."); + +#endif /* PVRSRV_MEMALLOC_PHYSHEAP_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_memallocflags.h b/drivers/gpu/drm/phytium/octopus/pvrsrv_memallocflags.h new file mode 100644 index 000000000000..c6c22fface3b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_memallocflags.h @@ -0,0 +1,912 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description This file defines flags used on memory allocations and mappings + These flags are relevant throughout the memory management + software stack and are specified by users of services and + understood by all levels of the memory management in both + client and server. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVRSRV_MEMALLOCFLAGS_H +#define PVRSRV_MEMALLOCFLAGS_H + +#include "img_types.h" +#include "pvrsrv_memalloc_physheap.h" + +/*! + Type for specifying memory allocation flags. + */ + +typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; +#define PVRSRV_MEMALLOCFLAGS_FMTSPEC IMG_UINT64_FMTSPECx + +#if defined(__KERNEL__) || defined(SERVICES_SC) +#include "pvrsrv_memallocflags_internal.h" +#endif /* __KERNEL__ */ + +/* + * --- MAPPING FLAGS 0..14 (15-bits) --- + * | 0-3 | 4-7 | 8-10 | 11-13 | 14 | + * | GPU-RW | CPU-RW | GPU-Caching | CPU-Caching | KM-Mappable | + * + * --- MISC FLAGS 15..23 (9-bits) --- + * | 15 | 17 | 18 | 19 | 20 | + * | Defer | SVM | Sparse-Dummy-Page | CPU-Cache-Clean | Sparse-Zero-Page | + * + * --- DEV CONTROL FLAGS 26..27 (2-bits) --- + * | 26-27 | + * | Device-Flags | + * + * --- MISC FLAGS 28..31 (4-bits) --- + * | 28 | 29 | 30 | 31 | + * | No-Cache-Align | Poison-On-Free | P.-On-Alloc | Zero-On-Alloc | + * + * --- VALIDATION FLAGS --- + * | 35 | + * | Shared-buffer | + * + * --- PHYS HEAP HINTS --- + * | 59-63 | + * | PhysHeap Hints | + * + */ + +/* + * ********************************************************** + * * * + * * MAPPING FLAGS * + * * * + * ********************************************************** + */ + +/*! + * This flag affects the device MMU protection flags, and specifies + * that the memory may be read by the GPU. + * + * Typically all device memory allocations would specify this flag. + * + * At the moment, memory allocations without this flag are not supported + * + * This flag will live with the PMR, thus subsequent mappings would + * honour this flag. + * + * This is a dual purpose flag. It specifies that memory is permitted + * to be read by the GPU, and also requests that the allocation is + * mapped into the GPU as a readable mapping + * + * To be clear: + * - When used as an argument on PMR creation; it specifies + * that GPU readable mappings will be _permitted_ + * - When used as an argument to a "map" function: it specifies + * that a GPU readable mapping is _desired_ + * - When used as an argument to "AllocDeviceMem": it specifies + * that the PMR will be created with permission to be mapped + * with a GPU readable mapping, _and_ that this PMR will be + * mapped with a GPU readable mapping. + * This distinction becomes important when (a) we export allocations; + * and (b) when we separate the creation of the PMR from the mapping. + */ +#define PVRSRV_MEMALLOCFLAG_GPU_READABLE (1ULL<<0) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_READABLE flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_READABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READABLE) != 0) + +/*! + * This flag affects the device MMU protection flags, and specifies + * that the memory may be written by the GPU + * + * Using this flag on an allocation signifies that the allocation is + * intended to be written by the GPU. + * + * Omitting this flag causes a read-only mapping. + * + * This flag will live with the PMR, thus subsequent mappings would + * honour this flag. + * + * This is a dual purpose flag. It specifies that memory is permitted + * to be written by the GPU, and also requests that the allocation is + * mapped into the GPU as a writable mapping (see note above about + * permission vs. mapping mode, and why this flag causes permissions + * to be inferred from mapping mode on first allocation) + * + * N.B. This flag has no relevance to the CPU's MMU mapping, if any, + * and would therefore not enforce read-only mapping on CPU. + */ +#define PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE (1ULL<<1) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_WRITEABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE) != 0) + +/*! + The flag indicates whether an allocation can be mapped as GPU readable in another GPU memory context. + */ +#define PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED (1ULL<<2) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_READ_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED) != 0) + +/*! + The flag indicates whether an allocation can be mapped as GPU writable in another GPU memory context. + */ +#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED (1ULL<<3) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_WRITE_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED) != 0) + +/*! + The flag indicates that an allocation is mapped as readable to the CPU. + */ +#define PVRSRV_MEMALLOCFLAG_CPU_READABLE (1ULL<<4) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_READABLE flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_READABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READABLE) != 0) + +/*! + The flag indicates that an allocation is mapped as writable to the CPU. + */ +#define PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE (1ULL<<5) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_WRITEABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) != 0) + +/*! + The flag indicates whether an allocation can be mapped as CPU readable in another CPU memory context. + */ +#define PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED (1ULL<<6) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_READ_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED) != 0) + +/*! + The flag indicates whether an allocation can be mapped as CPU writable in another CPU memory context. + */ +#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED (1ULL<<7) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_WRITE_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED) != 0) + + +/* + * ********************************************************** + * * * + * * CACHE CONTROL FLAGS * + * * * + * ********************************************************** + */ + +/* + GPU domain + ========== + + The following defines are used to control the GPU cache bit field. + The defines are mutually exclusive. + + A helper macro, PVRSRV_GPU_CACHE_MODE, is provided to obtain just the GPU + cache bit field from the flags. This should be used whenever the GPU cache + mode needs to be determined. +*/ + +/*! + GPU domain. Flag indicating uncached memory. This means that any writes to memory + allocated with this flag are written straight to memory and thus are + coherent for any device in the system. +*/ +#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED (1ULL<<8) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_UNCACHED mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_UNCACHED(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_UNCACHED) + +/*! + GPU domain. Use write combiner (if supported) to combine sequential writes + together to reduce memory access by doing burst writes. +*/ +#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC (0ULL<<8) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC) + +/*! + GPU domain. This flag affects the GPU MMU protection flags. + The allocation will be cached. + Services will try to set the coherent bit in the GPU MMU tables so the + GPU cache is snooping the CPU cache. If coherency is not supported the + caller is responsible to ensure the caches are up to date. +*/ +#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT (2ULL<<8) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) + +/*! + GPU domain. Request cached memory, but not coherent (i.e. no cache + snooping). Services will flush the GPU internal caches after every GPU + task so no cache maintenance requests from the users are necessary. + + Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future + expansion. +*/ +#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT (3ULL<<8) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT) + +/*! + GPU domain. This flag is for internal use only and is used to indicate + that the underlying allocation should be cached on the GPU after all + the snooping and coherent checks have been done +*/ +#define PVRSRV_MEMALLOCFLAG_GPU_CACHED (7ULL<<8) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHED mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_GPU_CACHED(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHED) + +/*! + GPU domain. GPU cache mode mask. +*/ +#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK (7ULL<<8) + +/*! + @Description A helper macro to obtain just the GPU cache bit field from the flags. + This should be used whenever the GPU cache mode needs to be determined. + @Input uiFlags Allocation flags. + @Return Value of the GPU cache bit field. + */ +#define PVRSRV_GPU_CACHE_MODE(uiFlags) ((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK) + + +/* + CPU domain + ========== + + The following defines are used to control the CPU cache bit field. + The defines are mutually exclusive. + + A helper macro, PVRSRV_CPU_CACHE_MODE, is provided to obtain just the CPU + cache bit field from the flags. This should be used whenever the CPU cache + mode needs to be determined. +*/ + +/*! + CPU domain. Use write combiner (if supported) to combine sequential writes + together to reduce memory access by doing burst writes. +*/ +#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC (0ULL<<11) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC) + +/*! + CPU domain. This flag affects the CPU MMU protection flags. + The allocation will be cached. + Services will try to set the coherent bit in the CPU MMU tables so the + CPU cache is snooping the GPU cache. If coherency is not supported the + caller is responsible to ensure the caches are up to date. +*/ +#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT (2ULL<<11) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) + +/*! + CPU domain. Request cached memory, but not coherent (i.e. no cache + snooping). This means that if the allocation needs to transition from + one device to another services has to be informed so it can + flush/invalidate the appropriate caches. + + Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future + expansion. +*/ +#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT (3ULL<<11) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT) + +/*! + CPU domain. This flag is for internal use only and is used to indicate + that the underlying allocation should be cached on the CPU + after all the snooping and coherent checks have been done +*/ +#define PVRSRV_MEMALLOCFLAG_CPU_CACHED (7ULL<<11) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHED mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_CACHED(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHED) + +/*! + CPU domain. CPU cache mode mask +*/ +#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK (7ULL<<11) + +/*! + @Description A helper macro to obtain just the CPU cache bit field from the flags. + This should be used whenever the CPU cache mode needs to be determined. + @Input uiFlags Allocation flags. + @Return Value of the CPU cache bit field. + */ +#define PVRSRV_CPU_CACHE_MODE(uiFlags) ((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK) + +/* Helper flags for usual cases */ + +/*! + * Memory will be write-combined on CPU and GPU + */ +#define PVRSRV_MEMALLOCFLAG_UNCACHED_WC (PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_UNCACHED_WC mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_WRITE_COMBINE(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_UNCACHED_WC) + +/*! + * Memory will be cached. + * Services will try to set the correct flags in the MMU tables. + * In case there is no coherency support the caller has to ensure caches are up to date */ +#define PVRSRV_MEMALLOCFLAG_CACHE_COHERENT (PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CACHE_COHERENT mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_CACHE_COHERENT(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_COHERENT) + +/*! + * Memory will be cache-incoherent on CPU and GPU + */ +#define PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT (PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_CACHE_INCOHERENT(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT) + +/*! + Cache mode mask +*/ +#define PVRSRV_CACHE_MODE(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) | PVRSRV_CPU_CACHE_MODE(uiFlags)) + + +/*! + CPU MMU Flags mask -- intended for use internal to services only + */ +#define PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK (PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK) + +/*! + MMU Flags mask -- intended for use internal to services only - used for + partitioning the flags bits and determining which flags to pass down to + mmu_common.c + */ +#define PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK (PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK) + +/*! + Indicates that the PMR created due to this allocation will support + in-kernel CPU mappings. Only privileged processes may use this flag as + it may cause wastage of precious kernel virtual memory on some platforms. + */ +#define PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE (1ULL<<14) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) != 0) + + + +/* + * + * ********************************************************** + * * * + * * ALLOC MEMORY FLAGS * + * * * + * ********************************************************** + * + * (Bits 15) + * + */ +#define PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC (1ULL<<15) +#define PVRSRV_CHECK_ON_DEMAND(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) != 0) + +/*! + Indicates that the allocation will be accessed by the CPU and GPU using + the same virtual address, i.e. for all SVM allocs, + IMG_CPU_VIRTADDR == IMG_DEV_VIRTADDR + */ +#define PVRSRV_MEMALLOCFLAG_SVM_ALLOC (1ULL<<17) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_SVM_ALLOC flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_SVM_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SVM_ALLOC) != 0) + +/*! + Indicates the particular memory that's being allocated is sparse and the + sparse regions should not be backed by dummy page +*/ +#define PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING (1ULL << 18) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) == 0) + +/*! + Services is going to clean the cache for the allocated memory. + For performance reasons avoid usage if allocation is written to by the + CPU anyway before the next GPU kick. + */ +#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN (1ULL<<19) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN) != 0) + +/*! PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING + + Indicates the particular memory that's being allocated is sparse and the + sparse regions should be backed by zero page. This is different with + zero on alloc flag such that only physically unbacked pages are backed + by zero page at the time of mapping. + The zero backed page is always with read only attribute irrespective of its + original attributes. +*/ +#define PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING (1ULL << 20) +#define PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiFlags) (((uiFlags) & \ + PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING) == PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING) + +/*! + @Description Macro extracting the OS id from a variable containing memalloc flags + @Input uiFlags Allocation flags + @Return returns the value of the FW_ALLOC_OSID bitfield + */ +#define PVRSRV_FW_RAW_ALLOC_OSID(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_MASK) \ + >> PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_SHIFT) + +/*! + @Description Macro converting an OS id value into a memalloc bitfield + @Input uiFlags OS id + @Return returns a shifted bitfield with the OS id value + */ +#define PVRSRV_MEMALLOCFLAG_FW_RAW_ALLOC_OSID(osid) (((osid) << PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_SHIFT) \ + & PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_MASK) \ + +/* + * + * ********************************************************** + * * * + * * MEMORY ZEROING AND POISONING FLAGS * + * * * + * ********************************************************** + * + * Zero / Poison, on alloc/free + * + * We think the following usecases are required: + * + * don't poison or zero on alloc or free + * (normal operation, also most efficient) + * poison on alloc + * (for helping to highlight bugs) + * poison on alloc and free + * (for helping to highlight bugs) + * zero on alloc + * (avoid highlighting security issues in other uses of memory) + * zero on alloc and poison on free + * (avoid highlighting security issues in other uses of memory, while + * helping to highlight a subset of bugs e.g. memory freed prematurely) + * + * Since there are more than 4, we can't encode this in just two bits, + * so we might as well have a separate flag for each of the three + * actions. + */ + +/*! + Ensures that the memory allocated is initialised with zeroes. + */ +#define PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC (1ULL<<31) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) != 0) + +/*! + Scribbles over the allocated memory with a poison value + + Not compatible with ZERO_ON_ALLOC + + Poisoning is very deliberately _not_ reflected in PDump as we want + a simulation to cry loudly if the initialised data propagates to a + result. + */ +#define PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC (1ULL<<30) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) != 0) + +/*! + Causes memory to be trashed when freed, as a lazy man's security measure. + */ +#define PVRSRV_MEMALLOCFLAG_POISON_ON_FREE (1ULL<<29) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_POISON_ON_FREE flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_POISON_ON_FREE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE) != 0) + +/*! + Avoid address alignment to a CPU or GPU cache line size. + */ +#define PVRSRV_MEMALLOCFLAG_NO_CACHE_LINE_ALIGN (1ULL<<28) + +/*! + @Description Macro checking whether the PVRSRV_CHECK_NO_CACHE_LINE_ALIGN flag is set. + @Input uiFlags Allocation flags. + @Return True if the flag is set, false otherwise + */ +#define PVRSRV_CHECK_NO_CACHE_LINE_ALIGN(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_NO_CACHE_LINE_ALIGN) != 0) + + +/* + * + * ********************************************************** + * * * + * * Device specific MMU flags * + * * * + * ********************************************************** + * + * (Bits 26 to 27) + * + * Some services controlled devices have device specific control bits in + * their page table entries, we need to allow these flags to be passed down + * the memory management layers so the user can control these bits. + * For example, RGX device has the file rgx_memallocflags.h + */ + +/*! + * Offset of device specific MMU flags. + */ +#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET 26 + +/*! + * Mask for retrieving device specific MMU flags. + */ +#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK (0x3ULL << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) + +/*! + @Description Helper macro for setting device specific MMU flags. + @Input n Flag index. + @Return Flag vector with the specified bit set. + */ +#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(n) \ + (((n) << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) & \ + PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK) + +/* + * + * ********************************************************** + * * * + * * Secure validation flags * + * * * + * ********************************************************** + * + * (Bit 35) + * + */ + +/*! + PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER + */ + +#define PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER (1ULL<<35) +#define PVRSRV_CHECK_SHARED_BUFFER(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER) != 0) + +/* + * + * ********************************************************** + * * * + * * Phys Heap Hints * + * * * + * ********************************************************** + * + * (Bits 59 to 63) + * + */ + +/*! + * Value of enum PVRSRV_PHYS_HEAP stored in memalloc flags. If not set + * defaults to GPU_LOCAL (value 0). + */ +#define PVRSRV_PHYS_HEAP_HINT_SHIFT (59) +#define PVRSRV_PHYS_HEAP_HINT_MASK (0x1FULL << PVRSRV_PHYS_HEAP_HINT_SHIFT) + + +/*! + @Description Macro extracting the Phys Heap hint from memalloc flag value. + @Input uiFlags Allocation flags + @Return returns the value of the PHYS_HEAP_HINT bitfield + */ +#define PVRSRV_GET_PHYS_HEAP_HINT(uiFlags) (((uiFlags) & PVRSRV_PHYS_HEAP_HINT_MASK) \ + >> PVRSRV_PHYS_HEAP_HINT_SHIFT) + +/*! + @Description Macro converting a Phys Heap value into a memalloc bitfield + @Input uiFlags Device Phys Heap + @Return returns a shifted bitfield with the Device Phys Heap value + */ +#define PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(PhysHeap) ((((PVRSRV_MEMALLOCFLAGS_T)PVRSRV_PHYS_HEAP_ ## PhysHeap) << \ + PVRSRV_PHYS_HEAP_HINT_SHIFT) \ + & PVRSRV_PHYS_HEAP_HINT_MASK) +/*! + @Description Macro to replace an existing phys heap hint value in flags. + @Input uiFlags Phys Heap + @Input uiFlags Allocation flags + @Return N/A + */ +#define PVRSRV_SET_PHYS_HEAP_HINT(PhysHeap, uiFlags) (uiFlags) = ((uiFlags) & ~PVRSRV_PHYS_HEAP_HINT_MASK) | \ + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(PhysHeap) + +/*! + @Description Macros checking if a Phys Heap hint is set. + @Input uiFlags Allocation flags. + @Return True if the hint is set, false otherwise + */ +#define PVRSRV_CHECK_PHYS_HEAP(PhysHeap, uiFlags) (PVRSRV_PHYS_HEAP_ ## PhysHeap == PVRSRV_GET_PHYS_HEAP_HINT(uiFlags)) + +#define PVRSRV_CHECK_FW_MAIN(uiFlags) (PVRSRV_CHECK_PHYS_HEAP(FW_MAIN, uiFlags) || \ + PVRSRV_CHECK_PHYS_HEAP(FW_CONFIG, uiFlags) || \ + PVRSRV_CHECK_PHYS_HEAP(FW_CODE, uiFlags) || \ + PVRSRV_CHECK_PHYS_HEAP(FW_PRIV_DATA, uiFlags) || \ + PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP0, uiFlags) || \ + PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP1, uiFlags) || \ + PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP2, uiFlags) || \ + PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP3, uiFlags) || \ + PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP4, uiFlags) || \ + PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP5, uiFlags) || \ + PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP6, uiFlags) || \ + PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP7, uiFlags)) + +/*! + * Secure buffer mask -- Flags in the mask are allowed for secure buffers + * because they are not related to CPU mappings. + */ +#define PVRSRV_MEMALLOCFLAGS_SECBUFMASK ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ + PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ + PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED) + +/*! + * Trusted device mask -- Flags in the mask are allowed for trusted device + * because the driver cannot access the memory + */ +#define PVRSRV_MEMALLOCFLAGS_TDFWMASK ~(PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \ + PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ + PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) + + +/*! + PMR flags mask -- for internal services use only. This is the set of flags + that will be passed down and stored with the PMR, this also includes the + MMU flags which the PMR has to pass down to mm_common.c at PMRMap time. +*/ + +#define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \ + PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ + PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \ + PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ + PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ + PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \ + PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \ + PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER | \ + PVRSRV_PHYS_HEAP_HINT_MASK) + +/*! + * CPU mappable mask -- Any flag set in the mask requires memory to be CPU mappable + */ +#define PVRSRV_MEMALLOCFLAGS_CPU_MAPPABLE_MASK (PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) +/*! + RA differentiation mask + + for use internal to services + + this is the set of flags bits that are able to determine whether a pair of + allocations are permitted to live in the same page table. Allocations + whose flags differ in any of these places would be allocated from separate + RA Imports and therefore would never coexist in the same page. + Special cases are zeroing and poisoning of memory. The caller is responsible + to set the sub-allocations to the value he wants it to be. To differentiate + between zeroed and poisoned RA Imports does not make sense because the + memory might be reused. + +*/ +#define PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK (PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK \ + & \ + ~(PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)) + +/*! + Flags that affect _allocation_ +*/ +#define PVRSRV_MEMALLOCFLAGS_PERALLOCFLAGSMASK (0xFFFFFFFFU) + +/*! + Flags that affect _mapping_ +*/ +#define PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \ + PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ + PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ + PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ + PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \ + PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) + +#if ((~(PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK) & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK) != 0) +#error PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK is not a subset of PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK +#endif + + +/*! + Flags that affect _physical allocations_ in the DevMemX API + */ +#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ + PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK | \ + PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED | \ + PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \ + PVRSRV_PHYS_HEAP_HINT_MASK) + +/*! + Flags that affect _virtual allocations_ in the DevMemX API + */ +#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_VIRTUAL_MASK (PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ + PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED | \ + PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED) + +#endif /* PVRSRV_MEMALLOCFLAGS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_memallocflags_internal.h b/drivers/gpu/drm/phytium/octopus/pvrsrv_memallocflags_internal.h new file mode 100644 index 000000000000..32644e541628 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_memallocflags_internal.h @@ -0,0 +1,78 @@ +/*************************************************************************/ /*! +@File +@Title Device Memory Management allocation flags for internal Services + use only +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description This file defines flags used on memory allocations and mappings + These flags are relevant throughout the memory management + software stack and are specified by users of services and + understood by all levels of the memory management in the server + and in special cases in the client. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVRSRV_MEMALLOCFLAGS_INTERNAL_H +#define PVRSRV_MEMALLOCFLAGS_INTERNAL_H + +/*! + CPU domain. Request uncached memory. This means that any writes to memory + allocated with this flag are written straight to memory and thus are + coherent for any device in the system. +*/ +#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED (1ULL<<11) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_UNCACHED mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_CPU_UNCACHED(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_UNCACHED) + +/*! + * Memory will be uncached on CPU and GPU + */ +#define PVRSRV_MEMALLOCFLAG_UNCACHED (PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED) + +/*! + @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_UNCACHED mode is set. + @Input uiFlags Allocation flags. + @Return True if the mode is set, false otherwise + */ +#define PVRSRV_CHECK_UNCACHED(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_UNCACHED) + +#endif /* PVRSRV_MEMALLOCFLAGS_INTERNAL_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_pool.c b/drivers/gpu/drm/phytium/octopus/pvrsrv_pool.c new file mode 100644 index 000000000000..99e323823685 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_pool.c @@ -0,0 +1,260 @@ +/**************************************************************************/ /*! +@File +@Title Services pool implementation +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Provides a generic pool implementation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv.h" +#include "lock.h" +#include "dllist.h" +#include "allocmem.h" + +struct _PVRSRV_POOL_ +{ + POS_LOCK hLock; + /* total max number of permitted entries in the pool */ + IMG_UINT uiMaxEntries; + /* currently number of pool entries created. these may be in the pool + * or in-use + */ + IMG_UINT uiNumBusy; + /* number of not-in-use entries currently free in the pool */ + IMG_UINT uiNumFree; + + DLLIST_NODE sFreeList; + + const IMG_CHAR *pszName; + + PVRSRV_POOL_ALLOC_FUNC *pfnAlloc; + PVRSRV_POOL_FREE_FUNC *pfnFree; + void *pvPrivData; +}; + +typedef struct _PVRSRV_POOL_ENTRY_ +{ + DLLIST_NODE sNode; + void *pvData; +} PVRSRV_POOL_ENTRY; + +PVRSRV_ERROR PVRSRVPoolCreate(PVRSRV_POOL_ALLOC_FUNC *pfnAlloc, + PVRSRV_POOL_FREE_FUNC *pfnFree, + IMG_UINT32 ui32MaxEntries, + const IMG_CHAR *pszName, + void *pvPrivData, + PVRSRV_POOL **ppsPool) +{ + PVRSRV_POOL *psPool; + PVRSRV_ERROR eError; + + psPool = OSAllocMem(sizeof(PVRSRV_POOL)); + PVR_GOTO_IF_NOMEM(psPool, eError, err_alloc); + + eError = OSLockCreate(&psPool->hLock); + + PVR_GOTO_IF_ERROR(eError, err_lock_create); + + psPool->uiMaxEntries = ui32MaxEntries; + psPool->uiNumBusy = 0; + psPool->uiNumFree = 0; + psPool->pfnAlloc = pfnAlloc; + psPool->pfnFree = pfnFree; + psPool->pvPrivData = pvPrivData; + psPool->pszName = pszName; + + dllist_init(&psPool->sFreeList); + + *ppsPool = psPool; + + return PVRSRV_OK; + +err_lock_create: + OSFreeMem(psPool); +err_alloc: + return eError; +} + +static PVRSRV_ERROR _DestroyPoolEntry(PVRSRV_POOL *psPool, + PVRSRV_POOL_ENTRY *psEntry) +{ + psPool->pfnFree(psPool->pvPrivData, psEntry->pvData); + OSFreeMem(psEntry); + + return PVRSRV_OK; +} + +void PVRSRVPoolDestroy(PVRSRV_POOL *psPool) +{ + if (psPool->uiNumBusy != 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to destroy pool %s " + "with %u entries still in use", + __func__, + psPool->pszName, + psPool->uiNumBusy)); + return; + } + + OSLockDestroy(psPool->hLock); + + if (psPool->uiNumFree) + { + PVRSRV_POOL_ENTRY *psEntry; + DLLIST_NODE *psChosenNode; + + psChosenNode = dllist_get_next_node(&psPool->sFreeList); + + while (psChosenNode) + { + dllist_remove_node(psChosenNode); + + psEntry = IMG_CONTAINER_OF(psChosenNode, PVRSRV_POOL_ENTRY, sNode); + _DestroyPoolEntry(psPool, psEntry); + + psPool->uiNumFree--; + + psChosenNode = dllist_get_next_node(&psPool->sFreeList); + } + + PVR_ASSERT(psPool->uiNumFree == 0); + } + + OSFreeMem(psPool); +} + +static PVRSRV_ERROR _CreateNewPoolEntry(PVRSRV_POOL *psPool, + PVRSRV_POOL_ENTRY **ppsEntry) +{ + PVRSRV_POOL_ENTRY *psNewEntry; + PVRSRV_ERROR eError; + + psNewEntry = OSAllocMem(sizeof(PVRSRV_POOL_ENTRY)); + PVR_GOTO_IF_NOMEM(psNewEntry, eError, err_allocmem); + + dllist_init(&psNewEntry->sNode); + + eError = psPool->pfnAlloc(psPool->pvPrivData, &psNewEntry->pvData); + + PVR_GOTO_IF_ERROR(eError, err_pfn_alloc); + + *ppsEntry = psNewEntry; + + return PVRSRV_OK; + +err_pfn_alloc: + OSFreeMem(psNewEntry); +err_allocmem: + return eError; +} + +PVRSRV_ERROR PVRSRVPoolGet(PVRSRV_POOL *psPool, + PVRSRV_POOL_TOKEN *hToken, + void **ppvDataOut) +{ + PVRSRV_POOL_ENTRY *psEntry; + PVRSRV_ERROR eError = PVRSRV_OK; + DLLIST_NODE *psChosenNode; + + OSLockAcquire(psPool->hLock); + + psChosenNode = dllist_get_next_node(&psPool->sFreeList); + if (unlikely(psChosenNode == NULL)) + { + /* no available elements in the pool. try to create one */ + + eError = _CreateNewPoolEntry(psPool, &psEntry); + + PVR_GOTO_IF_ERROR(eError, out_unlock); + } + else + { + dllist_remove_node(psChosenNode); + + psEntry = IMG_CONTAINER_OF(psChosenNode, PVRSRV_POOL_ENTRY, sNode); + + psPool->uiNumFree--; + } + +#if defined(DEBUG) || defined(SUPPORT_VALIDATION) + /* Don't poison the IN buffer as that is copied from client and would be + * waste of cycles. + */ + OSCachedMemSet(((IMG_PBYTE)psEntry->pvData)+PVRSRV_MAX_BRIDGE_IN_SIZE, + PVRSRV_POISON_ON_ALLOC_VALUE, PVRSRV_MAX_BRIDGE_OUT_SIZE); +#endif + + psPool->uiNumBusy++; + *hToken = psEntry; + *ppvDataOut = psEntry->pvData; + +out_unlock: + OSLockRelease(psPool->hLock); + return eError; +} + +PVRSRV_ERROR PVRSRVPoolPut(PVRSRV_POOL *psPool, PVRSRV_POOL_TOKEN hToken) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_POOL_ENTRY *psEntry = hToken; + + PVR_ASSERT(psPool->uiNumBusy > 0); + + OSLockAcquire(psPool->hLock); + + /* put this entry in the pool if the pool has space, + * otherwise free it + */ + if (psPool->uiNumFree < psPool->uiMaxEntries) + { + dllist_add_to_tail(&psPool->sFreeList, &psEntry->sNode); + psPool->uiNumFree++; + } + else + { + eError = _DestroyPoolEntry(psPool, psEntry); + } + + psPool->uiNumBusy--; + + OSLockRelease(psPool->hLock); + + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_pool.h b/drivers/gpu/drm/phytium/octopus/pvrsrv_pool.h new file mode 100644 index 000000000000..68eaf63564d0 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_pool.h @@ -0,0 +1,135 @@ +/**************************************************************************/ /*! +@File +@Title Services pool implementation +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Provides a generic pool implementation. + The pool allows to dynamically retrieve and return entries from + it using functions pair PVRSRVPoolGet/PVRSRVPoolPut. The entries + are created in lazy manner which means not until first usage. + The pool API allows to pass and allocation/free functions + pair that will allocate entry's private data and return it + to the caller on every entry 'Get'. + The pool will keep up to ui32MaxEntries entries allocated. + Every entry that exceeds this number and is 'Put' back to the + pool will be freed on the spot instead being returned to the + pool. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#if !defined(PVRSRVPOOL_H) +#define PVRSRVPOOL_H + +/**************************************************************************/ /*! + @Description Callback function called during creation of the new element. This + function allocates an object that will be stored in the pool. + The object can be retrieved from the pool by calling + PVRSRVPoolGet. + @Input pvPrivData Private data passed to the alloc function. + @Output pvOut Allocated object. + @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /***************************************************************************/ +typedef PVRSRV_ERROR (PVRSRV_POOL_ALLOC_FUNC)(void *pvPrivData, void **pvOut); + +/**************************************************************************/ /*! + @Description Callback function called to free the object allocated by + the counterpart alloc function. + @Input pvPrivData Private data passed to the free function. + @Output pvFreeData Object allocated by PVRSRV_POOL_ALLOC_FUNC. +*/ /***************************************************************************/ +typedef void (PVRSRV_POOL_FREE_FUNC)(void *pvPrivData, void *pvFreeData); + +typedef IMG_HANDLE PVRSRV_POOL_TOKEN; + +typedef struct _PVRSRV_POOL_ PVRSRV_POOL; + +/**************************************************************************/ /*! + @Function PVRSRVPoolCreate + @Description Creates new buffer pool. + @Input pfnAlloc Allocation function pointer. Function is used + to allocate new pool entries' data. + @Input pfnFree Free function pointer. Function is used to + free memory allocated by pfnAlloc function. + @Input ui32MaxEntries Total maximum number of entries in the pool. + @Input pszName Name of the pool. String has to be NULL + terminated. + @Input pvPrivData Private data that will be passed to pfnAlloc and + pfnFree functions. + @Output ppsPool New buffer pool object. + @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /***************************************************************************/ +PVRSRV_ERROR PVRSRVPoolCreate(PVRSRV_POOL_ALLOC_FUNC *pfnAlloc, + PVRSRV_POOL_FREE_FUNC *pfnFree, + IMG_UINT32 ui32MaxEntries, + const IMG_CHAR *pszName, + void *pvPrivData, + PVRSRV_POOL **ppsPool); + +/**************************************************************************/ /*! + @Function PVRSRVPoolDestroy + @Description Destroys pool created by PVRSRVPoolCreate. + @Input psPool Buffer pool object meant to be destroyed. +*/ /***************************************************************************/ +void PVRSRVPoolDestroy(PVRSRV_POOL *psPool); + +/**************************************************************************/ /*! + @Function PVRSRVPoolGet + @Description Retrieves an entry from a pool. If no free elements are + available new entry will be allocated. + @Input psPool Pointer to the pool. + @Output hToken Pointer to the entry handle. + @Output ppvDataOut Pointer to data stored in the entry (the data + allocated by the pfnAlloc function). + @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /***************************************************************************/ +PVRSRV_ERROR PVRSRVPoolGet(PVRSRV_POOL *psPool, + PVRSRV_POOL_TOKEN *hToken, + void **ppvDataOut); + +/**************************************************************************/ /*! + @Function PVRSRVPoolPut + @Description Returns entry to the pool. If number of entries is greater + than ui32MaxEntries set during pool creation the entry will + be freed instead. + @Input psPool Pointer to the pool. + @Input hToken Entry handle. + @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /***************************************************************************/ +PVRSRV_ERROR PVRSRVPoolPut(PVRSRV_POOL *psPool, + PVRSRV_POOL_TOKEN hToken); + +#endif /* PVRSRVPOOL_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_sync_km.h b/drivers/gpu/drm/phytium/octopus/pvrsrv_sync_km.h new file mode 100644 index 000000000000..fefe07d746cd --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_sync_km.h @@ -0,0 +1,65 @@ +/*************************************************************************/ /*! +@File +@Title PVR synchronisation interface +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Types for server side code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef PVRSRV_SYNC_KM_H +#define PVRSRV_SYNC_KM_H + +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +#define SYNC_FB_FILE_STRING_MAX 256 +#define SYNC_FB_MODULE_STRING_LEN_MAX (32) +#define SYNC_FB_DESC_STRING_LEN_MAX (32) + +/* By default, fence-sync module emits into HWPerf (of course, if enabled) and + * considers a process (sleepable) context */ +#define PVRSRV_FENCE_FLAG_NONE (0U) +#define PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT (1U << 0) +#define PVRSRV_FENCE_FLAG_CTX_ATOMIC (1U << 1) + +#if defined(__cplusplus) +} +#endif +#endif /* PVRSRV_SYNC_KM_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_sync_server.h b/drivers/gpu/drm/phytium/octopus/pvrsrv_sync_server.h new file mode 100644 index 000000000000..7aa71f5939fc --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_sync_server.h @@ -0,0 +1,277 @@ +/**************************************************************************/ /*! +@File +@Title Fence sync server interface +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef PVRSRV_SYNC_SERVER_H +#define PVRSRV_SYNC_SERVER_H + +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) +#include "sync_fallback_server.h" +#include "pvr_notifier.h" +#include "img_types.h" +#include "pvrsrv_sync_km.h" +#elif defined(SUPPORT_NATIVE_FENCE_SYNC) +#include "pvr_sync.h" +#endif + +#include "rgxhwperf.h" + +#define SYNC_SW_TIMELINE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH +#define SYNC_SW_FENCE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH + +typedef struct _SYNC_TIMELINE_OBJ_ +{ + void *pvTlObj; /* Implementation specific timeline object */ + + PVRSRV_TIMELINE hTimeline; /* Reference to implementation-independent timeline object */ +} SYNC_TIMELINE_OBJ; + +typedef struct _SYNC_FENCE_OBJ_ +{ + void *pvFenceObj; /* Implementation specific fence object */ + + PVRSRV_FENCE hFence; /* Reference to implementation-independent fence object */ +} SYNC_FENCE_OBJ; + +static inline void SyncClearTimelineObj(SYNC_TIMELINE_OBJ *psSTO) +{ + psSTO->pvTlObj = NULL; + psSTO->hTimeline = PVRSRV_NO_TIMELINE; +} + +static inline IMG_BOOL SyncIsTimelineObjValid(const SYNC_TIMELINE_OBJ *psSTO) +{ + return psSTO->pvTlObj != NULL; +} + +static inline void SyncClearFenceObj(SYNC_FENCE_OBJ *psSFO) +{ + psSFO->pvFenceObj = NULL; + psSFO->hFence = PVRSRV_NO_FENCE; +} + +static inline IMG_BOOL SyncIsFenceObjValid(const SYNC_FENCE_OBJ *psSFO) +{ + return psSFO->pvFenceObj != NULL; +} + + +/* Mapping of each required function to its appropriate sync-implementation function */ +#if defined(SUPPORT_FALLBACK_FENCE_SYNC) + #define SyncFenceWaitKM_ SyncFbFenceWait + #define SyncGetFenceObj_ SyncFbGetFenceObj + #define SyncFenceReleaseKM_ SyncFbFenceReleaseKM + #define SyncSWTimelineFenceCreateKM_ SyncFbSWTimelineFenceCreateKM + #define SyncSWTimelineAdvanceKM_ SyncFbSWTimelineAdvanceKM + #define SyncSWGetTimelineObj_ SyncFbSWGetTimelineObj + #define SyncSWTimelineReleaseKM_ SyncFbTimelineRelease + #define SyncDumpFence_ SyncFbDumpFenceKM + #define SyncSWDumpTimeline_ SyncFbSWDumpTimelineKM +#elif defined(SUPPORT_NATIVE_FENCE_SYNC) + #define SyncFenceWaitKM_ pvr_sync_fence_wait + #define SyncGetFenceObj_ pvr_sync_fence_get + #define SyncFenceReleaseKM_ pvr_sync_fence_release + #define SyncSWTimelineFenceCreateKM_ pvr_sync_sw_timeline_fence_create + #define SyncSWTimelineAdvanceKM_ pvr_sync_sw_timeline_advance + #define SyncSWGetTimelineObj_ pvr_sync_sw_timeline_get + #define SyncSWTimelineReleaseKM_ pvr_sync_sw_timeline_release + #define SyncDumpFence_ sync_dump_fence + #define SyncSWDumpTimeline_ sync_sw_dump_timeline +#endif + +/*************************************************************************/ /*! +@Function SyncFenceWaitKM + +@Description Wait for all the sync points in the fence to be signalled. + +@Input psFenceObj Fence to wait on + +@Input ui32TimeoutInMs Maximum time to wait (in milliseconds) + +@Return PVRSRV_OK once the fence has been passed (all + containing check points have either + signalled or errored) + PVRSRV_ERROR_TIMEOUT if the poll has exceeded the timeout + PVRSRV_ERROR_FAILED_DEPENDENCIES Other sync-impl specific error +*/ /**************************************************************************/ +static inline PVRSRV_ERROR +SyncFenceWaitKM(PVRSRV_DEVICE_NODE *psDevNode, + const SYNC_FENCE_OBJ *psFenceObj, + IMG_UINT32 ui32TimeoutInMs) +{ + PVRSRV_ERROR eError; + + RGXSRV_HWPERF_SYNC_FENCE_WAIT(psDevNode->pvDevice, + BEGIN, + OSGetCurrentProcessID(), + psFenceObj->hFence, + ui32TimeoutInMs); + + eError = SyncFenceWaitKM_(psFenceObj->pvFenceObj, ui32TimeoutInMs); + + RGXSRV_HWPERF_SYNC_FENCE_WAIT(psDevNode->pvDevice, + END, + OSGetCurrentProcessID(), + psFenceObj->hFence, + ((eError == PVRSRV_OK) ? + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED : + ((eError == PVRSRV_ERROR_TIMEOUT) ? + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT : + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR))); + return eError; +} + +/*************************************************************************/ /*! +@Function SyncGetFenceObj + +@Description Get the implementation specific server fence object from + opaque implementation independent PVRSRV_FENCE type. + When successful, this function gets a reference on the base + fence, which needs to be dropped using SyncFenceReleaseKM, + when fence object is no longer in use. + +@Input iFence Input opaque fence object + +@Output psFenceObj Pointer to implementation specific fence object + +@Return PVRSRV_ERROR PVRSRV_OK, on success +*/ /**************************************************************************/ +static inline PVRSRV_ERROR +SyncGetFenceObj(PVRSRV_FENCE iFence, + SYNC_FENCE_OBJ *psFenceObj) +{ + psFenceObj->hFence = iFence; + return SyncGetFenceObj_(iFence, &psFenceObj->pvFenceObj); +} + +/*************************************************************************/ /*! +@Function SyncFenceReleaseKM + +@Description Release reference on this fence. + +@Input psFenceObj Fence to be released + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static inline +PVRSRV_ERROR SyncFenceReleaseKM(const SYNC_FENCE_OBJ *psFenceObj) +{ + return SyncFenceReleaseKM_(psFenceObj->pvFenceObj); +} + +/*****************************************************************************/ +/* */ +/* SW TIMELINE SPECIFIC FUNCTIONS */ +/* */ +/*****************************************************************************/ + +static inline PVRSRV_ERROR +SyncSWTimelineFenceCreateKM(PVRSRV_DEVICE_NODE *psDevNode, + PVRSRV_TIMELINE hSWTimeline, + const IMG_CHAR *pszFenceName, + PVRSRV_FENCE *phOutFence) +{ + IMG_UINT64 ui64SyncPtIdx; + PVRSRV_ERROR eError; + eError = SyncSWTimelineFenceCreateKM_(hSWTimeline, + pszFenceName, + phOutFence, + &ui64SyncPtIdx); + if (eError == PVRSRV_OK) + { + RGXSRV_HWPERF_ALLOC_SW_FENCE(psDevNode, OSGetCurrentProcessID(), + *phOutFence, hSWTimeline, ui64SyncPtIdx, + pszFenceName, OSStringLength(pszFenceName)); + } + return eError; +} + +static inline PVRSRV_ERROR +SyncSWTimelineAdvanceKM(PVRSRV_DEVICE_NODE *psDevNode, + const SYNC_TIMELINE_OBJ *psSWTimelineObj) +{ + IMG_UINT64 ui64SyncPtIdx; + PVRSRV_ERROR eError; + eError = SyncSWTimelineAdvanceKM_(psSWTimelineObj->pvTlObj, + &ui64SyncPtIdx); + + if (eError == PVRSRV_OK) + { + RGXSRV_HWPERF_SYNC_SW_TL_ADV(psDevNode->pvDevice, + OSGetCurrentProcessID(), + psSWTimelineObj->hTimeline, + ui64SyncPtIdx); + } + return eError; +} + +static inline PVRSRV_ERROR +SyncSWGetTimelineObj(PVRSRV_TIMELINE hSWTimeline, + SYNC_TIMELINE_OBJ *psSWTimelineObj) +{ + psSWTimelineObj->hTimeline = hSWTimeline; + return SyncSWGetTimelineObj_(hSWTimeline, &psSWTimelineObj->pvTlObj); +} + +static inline PVRSRV_ERROR +SyncSWTimelineReleaseKM(const SYNC_TIMELINE_OBJ *psSWTimelineObj) +{ + return SyncSWTimelineReleaseKM_(psSWTimelineObj->pvTlObj); +} + +static inline PVRSRV_ERROR +SyncDumpFence(const SYNC_FENCE_OBJ *psFenceObj, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + return SyncDumpFence_(psFenceObj->pvFenceObj, pfnDumpDebugPrintf, pvDumpDebugFile); +} + +static inline PVRSRV_ERROR +SyncSWDumpTimeline(const SYNC_TIMELINE_OBJ *psSWTimelineObj, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + return SyncSWDumpTimeline_(psSWTimelineObj->pvTlObj, pfnDumpDebugPrintf, pvDumpDebugFile); +} + + +#endif /* PVRSRV_SYNC_SERVER_H */ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_tlcommon.h b/drivers/gpu/drm/phytium/octopus/pvrsrv_tlcommon.h new file mode 100644 index 000000000000..4eee27a71dd0 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_tlcommon.h @@ -0,0 +1,261 @@ +/*************************************************************************/ /*! +@File +@Title Services Transport Layer common types and definitions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Transport layer common types and definitions included into + both user mode and kernel mode source. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef PVR_TLCOMMON_H +#define PVR_TLCOMMON_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "img_defs.h" + + +/*! Handle type for stream descriptor objects as created by this API */ +typedef IMG_HANDLE PVRSRVTL_SD; + +/*! Maximum stream name length including the null byte */ +#define PRVSRVTL_MAX_STREAM_NAME_SIZE 40U + +/*! Maximum number of streams expected to exist */ +#define PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER (32*PRVSRVTL_MAX_STREAM_NAME_SIZE) + +/*! Packet lengths are always rounded up to a multiple of 8 bytes */ +#define PVRSRVTL_PACKET_ALIGNMENT 8U +#define PVRSRVTL_ALIGN(x) (((x)+PVRSRVTL_PACKET_ALIGNMENT-1) & ~(PVRSRVTL_PACKET_ALIGNMENT-1)) + + +/*! A packet is made up of a header structure followed by the data bytes. + * There are 3 types of packet: normal (has data), data lost and padding, + * see packet flags. Header kept small to reduce data overhead. + * + * if the ORDER of the structure members is changed, please UPDATE the + * PVRSRVTL_PACKET_FLAG_OFFSET macro. + * + * Layout of uiTypeSize member is : + * + * |<---------------------------32-bits------------------------------>| + * |<----8---->|<-----1----->|<----7--->|<------------16------------->| + * | Type | Drop-Oldest | UNUSED | Size | + * + */ +typedef struct +{ + IMG_UINT32 uiTypeSize; /*!< Type, Drop-Oldest flag & number of bytes following header */ + IMG_UINT32 uiReserved; /*!< Reserve, packets and data must be 8 byte aligned */ + + /* First bytes of TL packet data follow header ... */ +} PVRSRVTL_PACKETHDR, *PVRSRVTL_PPACKETHDR; + +/* Structure must always be a size multiple of 8 as stream buffer + * still an array of IMG_UINT32s. + */ +static_assert((sizeof(PVRSRVTL_PACKETHDR) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(PVRSRVTL_PACKETHDR) must be a multiple of 8"); + +/*! Packet header reserved word fingerprint "TLP1" */ +#define PVRSRVTL_PACKETHDR_RESERVED 0x31504C54U + +/*! Packet header mask used to extract the size from the uiTypeSize member. + * Do not use directly, see GET macros. + */ +#define PVRSRVTL_PACKETHDR_SIZE_MASK 0x0000FFFFU +#define PVRSRVTL_MAX_PACKET_SIZE (PVRSRVTL_PACKETHDR_SIZE_MASK & ~0xFU) + + +/*! Packet header mask used to extract the type from the uiTypeSize member. + * Do not use directly, see GET macros. + */ +#define PVRSRVTL_PACKETHDR_TYPE_MASK 0xFF000000U +#define PVRSRVTL_PACKETHDR_TYPE_OFFSET 24U + +/*! Packet header mask used to check if packets before this one were dropped + * or not. Do not use directly, see GET macros. + */ +#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK 0x00800000U +#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET 23U + +/*! Packet type enumeration. + */ +typedef enum +{ + /*! Undefined packet */ + PVRSRVTL_PACKETTYPE_UNDEF = 0, + + /*! Normal packet type. Indicates data follows the header. + */ + PVRSRVTL_PACKETTYPE_DATA = 1, + + /*! When seen this packet type indicates that at this moment in the stream + * packet(s) were not able to be accepted due to space constraints and + * that recent data may be lost - depends on how the producer handles the + * error. Such packets have no data, data length is 0. + */ + PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED = 2, + + /*! Packets with this type set are padding packets that contain undefined + * data and must be ignored/skipped by the client. They are used when the + * circular stream buffer wraps around and there is not enough space for + * the data at the end of the buffer. Such packets have a length of 0 or + * more. + */ + PVRSRVTL_PACKETTYPE_PADDING = 3, + + /*! This packet type conveys to the stream consumer that the stream + * producer has reached the end of data for that data sequence. The + * TLDaemon has several options for processing these packets that can + * be selected on a per stream basis. + */ + PVRSRVTL_PACKETTYPE_MARKER_EOS = 4, + + /*! This is same as PVRSRVTL_PACKETTYPE_MARKER_EOS but additionally removes + * old data record output file before opening new/next one + */ + PVRSRVTL_PACKETTYPE_MARKER_EOS_REMOVEOLD = 5, + + /*! Packet emitted on first stream opened by writer. Packet carries a name + * of the opened stream in a form of null-terminated string. + */ + PVRSRVTL_PACKETTYPE_STREAM_OPEN_FOR_WRITE = 6, + + /*! Packet emitted on last stream closed by writer. Packet carries a name + * of the closed stream in a form of null-terminated string. + */ + PVRSRVTL_PACKETTYPE_STREAM_CLOSE_FOR_WRITE = 7, + + PVRSRVTL_PACKETTYPE_LAST +} PVRSRVTL_PACKETTYPE; + +/* The SET_PACKET_* macros rely on the order the PVRSRVTL_PACKETHDR members are declared: + * uiFlags is the upper half of a structure consisting of 2 uint16 quantities. + */ +#define PVRSRVTL_SET_PACKET_DATA(len) (len) | (PVRSRVTL_PACKETTYPE_DATA << PVRSRVTL_PACKETHDR_TYPE_OFFSET) +#define PVRSRVTL_SET_PACKET_PADDING(len) (len) | (PVRSRVTL_PACKETTYPE_PADDING << PVRSRVTL_PACKETHDR_TYPE_OFFSET) +#define PVRSRVTL_SET_PACKET_WRITE_FAILED (0) | (PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED << PVRSRVTL_PACKETHDR_TYPE_OFFSET) +#define PVRSRVTL_SET_PACKET_HDR(len, type) (len) | ((type) << PVRSRVTL_PACKETHDR_TYPE_OFFSET) + +/*! Returns the number of bytes of data in the packet. + * p may be any address type. + */ +#define GET_PACKET_DATA_LEN(p) \ + ((IMG_UINT32) ((PVRSRVTL_PPACKETHDR) (void *) (p))->uiTypeSize & PVRSRVTL_PACKETHDR_SIZE_MASK) + + +/*! Returns a IMG_BYTE* pointer to the first byte of data in the packet */ +#define GET_PACKET_DATA_PTR(p) \ + (((IMG_UINT8 *) (void *) (p)) + sizeof(PVRSRVTL_PACKETHDR)) + +/*! Turns the packet address p into a PVRSRVTL_PPACKETHDR pointer type. + */ +#define GET_PACKET_HDR(p) ((PVRSRVTL_PPACKETHDR) ((void *) (p))) + +/*! Given a PVRSRVTL_PPACKETHDR address, return the address of the next pack + * It is up to the caller to determine if the new address is within the + * packet buffer. + */ +#define GET_NEXT_PACKET_ADDR(p) \ + GET_PACKET_HDR( \ + GET_PACKET_DATA_PTR(p) + \ + ( \ + (GET_PACKET_DATA_LEN(p) + (PVRSRVTL_PACKET_ALIGNMENT-1)) & \ + (~(PVRSRVTL_PACKET_ALIGNMENT-1)) \ + ) \ + ) + +/*! Get the type of the packet. p is of type PVRSRVTL_PPACKETHDR. + */ +#define GET_PACKET_TYPE(p) (((p)->uiTypeSize & PVRSRVTL_PACKETHDR_TYPE_MASK)>>PVRSRVTL_PACKETHDR_TYPE_OFFSET) + +/*! Set PACKETS_DROPPED flag in packet header as a part of uiTypeSize. + * p is of type PVRSRVTL_PPACKETHDR. + */ +#define SET_PACKETS_DROPPED(p) (((p)->uiTypeSize) | (1<uiTypeSize & PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK)>>PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET) + +/*! Flags for use with PVRSRVTLOpenStream + * 0x01 - Do not block in PVRSRVTLAcquireData() when no bytes are available + * 0x02 - When the stream does not exist wait for a bit (2s) in + * PVRSRVTLOpenStream() and then exit with a timeout error if it still + * does not exist. + * 0x04 - Open stream for write only operations. + * If flag is not used stream is opened as read-only. This flag is + * required if one wants to call reserve/commit/write function on the + * stream descriptor. Read from on the stream descriptor opened + * with this flag will fail. + * 0x08 - Disable Producer Callback. + * If this flag is set and the stream becomes empty, do not call any + * associated producer callback to generate more data from the reader + * context. + * 0x10 - Reset stream on open. + * When this flag is used the stream will drop all of the stored data. + * 0x20 - Limit read position to the write position at time the stream + * was opened. Hence this flag will freeze the content read to that + * produced before the stream was opened for reading. + * 0x40 - Ignore Open Callback. + * When this flag is set ignore any OnReaderOpenCallback setting for + * the stream. This allows access to the stream to be made without + * generating any extra packets into the stream. + */ + +#define PVRSRV_STREAM_FLAG_NONE (0U) +#define PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING (1U<<0) +#define PVRSRV_STREAM_FLAG_OPEN_WAIT (1U<<1) +#define PVRSRV_STREAM_FLAG_OPEN_WO (1U<<2) +#define PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK (1U<<3) +#define PVRSRV_STREAM_FLAG_RESET_ON_OPEN (1U<<4) +#define PVRSRV_STREAM_FLAG_READ_LIMIT (1U<<5) +#define PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK (1U<<6) + + +#if defined(__cplusplus) +} +#endif + +#endif /* PVR_TLCOMMON_H */ +/****************************************************************************** + End of file (pvrsrv_tlcommon.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrv_tlstreams.h b/drivers/gpu/drm/phytium/octopus/pvrsrv_tlstreams.h new file mode 100644 index 000000000000..5706fb134480 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrv_tlstreams.h @@ -0,0 +1,61 @@ +/*************************************************************************/ /*! +@File +@Title Services Transport Layer stream names +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Transport layer common types and definitions included into + both user mode and kernel mode source. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVRSRV_TLSTREAMS_H +#define PVRSRV_TLSTREAMS_H + +#define PVRSRV_TL_CTLR_STREAM "tlctrl" + +#define PVRSRV_TL_HWPERF_RGX_FW_STREAM "hwperf_fw_" +#define PVRSRV_TL_HWPERF_HOST_SERVER_STREAM "hwperf_host_" + +/* Host HWPerf client stream names are of the form 'hwperf_client_' */ +#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM "hwperf_client_" +#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC "hwperf_client_%u_%u" + +#endif /* PVRSRV_TLSTREAMS_H */ + +/****************************************************************************** + End of file (pvrsrv_tlstreams.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/pvrsrvkm.mk b/drivers/gpu/drm/phytium/octopus/pvrsrvkm.mk new file mode 100644 index 000000000000..61db17eb2830 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrsrvkm.mk @@ -0,0 +1,149 @@ +pvrsrvkm-y += \ +pvr_drm.o \ +event.o \ +fwload.o \ +km_apphint.o \ +module_common.o \ +osmmap_stub.o \ +osfunc.o \ +allocmem.o \ +osconnection_server.o \ +physmem_osmem_linux.o \ +pmr_os.o \ +pvr_bridge_k.o \ +pvr_debug.o \ +physmem_dmabuf.o \ +devicemem_heapcfg.o \ +devicemem.o \ +devicemem_utils.o \ +hash.o \ +ra.o \ +sync.o \ +mem_utils.o \ +devicemem_server.o \ +handle.o \ +lists.o \ +mmu_common.o \ +connection_server.o \ +physheap.o \ +physmem.o \ +physmem_lma.o \ +physmem_hostmem.o \ +pmr.o \ +power.o \ +process_stats.o \ +pvr_notifier.o \ +pvrsrv.o \ +srvcore.o \ +sync_checkpoint.o \ +sync_server.o \ +htbuffer.o \ +htbserver.o \ +htb_debug.o \ +tlintern.o \ +tlclient.o \ +tlserver.o \ +tlstream.o \ +cache_km.o \ +uniq_key_splay_tree.o \ +pvrsrv_pool.o \ +pvrsrv_bridge_init.o \ +info_page_km.o \ +pvrsrv_error.o \ +debug_common.o \ +di_server.o \ +physmem_test.o \ +pvr_pci_drv.o \ +pvr_gputrace.o \ +rgxfwdbg.o \ +rgxtimerquery.o \ +rgxccb.o \ +rgxdebug.o \ +rgxfwtrace_strings.o \ +rgxfwutils.o \ +rgxinit.o \ +rgxbvnc.o \ +rgxkicksync.o \ +rgxlayer_impl.o \ +rgxmem.o \ +rgxmmuinit.o \ +rgxregconfig.o \ +rgxta3d.o \ +rgxsyncutils.o \ +rgxtdmtransfer.o \ +rgxutils.o \ +rgxhwperf_common.o \ +rgxhwperf.o \ +rgxpower.o \ +rgxstartstop.o \ +rgxtimecorr.o \ +rgxcompute.o \ +rgxsignals.o \ +rgxmulticore.o \ +rgxshader.o \ +rgxray.o \ +devicemem_history_server.o \ +handle_idr.o \ +pvr_buffer_sync.o \ +pvr_fence.o \ +pvr_sync_file.o \ +pvr_counting_timeline.o \ +pvr_sw_fence.o \ +osfunc_arm64.o \ +pvr_debugfs.o \ +di_impl_brg.o \ +rgxsrvinit.o \ +rgxfwimageutils.o \ +server_mm_bridge.o \ +server_cmm_bridge.o \ +server_srvcore_bridge.o \ +server_sync_bridge.o \ +server_htbuffer_bridge.o \ +server_pvrtl_bridge.o \ +server_cache_bridge.o \ +server_dmabuf_bridge.o \ +server_rgxtq2_bridge.o \ +server_rgxta3d_bridge.o \ +server_rgxhwperf_bridge.o \ +server_rgxkicksync_bridge.o \ +server_rgxcmp_bridge.o \ +server_rgxregconfig_bridge.o \ +server_rgxtimerquery_bridge.o \ +server_rgxfwdbg_bridge.o \ +server_rgxsignals_bridge.o \ +server_rgxray_bridge.o \ +server_devicememhistory_bridge.o \ +server_synctracking_bridge.o \ +server_di_bridge.o \ +client_mm_direct_bridge.o \ +client_sync_direct_bridge.o \ +client_htbuffer_direct_bridge.o \ +client_cache_direct_bridge.o \ +client_pvrtl_direct_bridge.o \ +client_devicememhistory_direct_bridge.o \ +client_synctracking_direct_bridge.o \ +sysconfig.o \ +interrupt_support.o \ +dma_support.o \ +pci_support.o \ +vmm_pvz_client.o \ +vmm_pvz_server.o \ +vz_vmm_pvz.o \ +vz_vmm_vm.o \ +vmm_type_stub.o + +pvrsrvkm-$(CONFIG_DRM_PHYTIUMVR_DVFS) +=pvr_dvfs_device.o + +pvrsrvkm-$(CONFIG_DRM_PHYTIUMVR_OCTOPUS_DMA) += \ +dma_km.o \ +server_dma_bridge.o + +pvrsrvkm-$(CONFIG_DRM_PHYTIUMVR_OCTOPUS_DEBUG) += \ + client_ri_direct_bridge.o \ + server_ri_bridge.o \ + ri_server.o +pvrsrvkm-$(CONFIG_ARM) += osfunc_arm.o +pvrsrvkm-$(CONFIG_ARM64) += osfunc_arm64.o +pvrsrvkm-$(CONFIG_EVENT_TRACING) += trace_events.o +pvrsrvkm-$(CONFIG_RISCV) += osfunc_riscv.c +pvrsrvkm-$(CONFIG_X86) += osfunc_x86.o diff --git a/drivers/gpu/drm/phytium/octopus/pvrversion.h b/drivers/gpu/drm/phytium/octopus/pvrversion.h new file mode 100644 index 000000000000..deb2cc4d18f4 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/pvrversion.h @@ -0,0 +1,68 @@ +/*************************************************************************/ /*! +@File pvrversion.h +@Title PhytiumVR version numbers and strings. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Version numbers and strings for PhytiumVR components. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef PVRVERSION_H +#define PVRVERSION_H + +#define PVRVERSION_MAJ 1U +#define PVRVERSION_MIN 15U + +#define PVRVERSION_FAMILY "rogueddk" +#define PVRVERSION_BRANCHNAME "1.15" +#define PVRVERSION_BUILD 6052913 +#define PVRVERSION_BSCONTROL "Rogue_DDK_Linux_WS" + +#define PVRVERSION_STRING "Rogue_DDK_Linux_WS rogueddk 1.15@6052913" +#define PVRVERSION_STRING_SHORT "1.15@6052913" + +#define COPYRIGHT_TXT "Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved." + +#define PVRVERSION_BUILD_HI 605 +#define PVRVERSION_BUILD_LO 2913 +#define PVRVERSION_STRING_NUMERIC "1.15.605.2913" + +#define PVRVERSION_PACK(MAJOR,MINOR) (((IMG_UINT32)((IMG_UINT32)(MAJOR) & 0xFFFFU) << 16U) | (((MINOR) & 0xFFFFU) << 0U)) +#define PVRVERSION_UNPACK_MAJ(VERSION) (((VERSION) >> 16U) & 0xFFFFU) +#define PVRVERSION_UNPACK_MIN(VERSION) (((VERSION) >> 0U) & 0xFFFFU) + +#endif /* PVRVERSION_H */ diff --git a/drivers/gpu/drm/phytium/octopus/ra.c b/drivers/gpu/drm/phytium/octopus/ra.c new file mode 100644 index 000000000000..a507e493a405 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/ra.c @@ -0,0 +1,1898 @@ +/*************************************************************************/ /*! +@File +@Title Resource Allocator +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +@Description + Implements generic resource allocation. The resource allocator was originally + intended to manage address spaces. In practice the resource allocator is + generic and can manage arbitrary sets of integers. + + Resources are allocated from arenas. Arenas can be created with an initial + span of resources. Further resources spans can be added to arenas. A + callback mechanism allows an arena to request further resource spans on + demand. + + Each arena maintains an ordered list of resource segments each described by a + boundary tag. Each boundary tag describes a segment of resources which are + either 'free', available for allocation, or 'busy' currently allocated. + Adjacent 'free' segments are always coalesced to avoid fragmentation. + + For allocation, all 'free' segments are kept on lists of 'free' segments in + a table index by pvr_log2(segment size) i.e., each table index n holds 'free' + segments in the size range 2^n -> 2^(n+1) - 1. + + Allocation policy is based on an *almost* good fit strategy. + + Allocated segments are inserted into a self-scaling hash table which maps + the base resource of the span to the relevant boundary tag. This allows the + code to get back to the boundary tag without exporting explicit boundary tag + references through the API. + + Each arena has an associated quantum size, all allocations from the arena are + made in multiples of the basic quantum. + + On resource exhaustion in an arena, a callback if provided will be used to + request further resources. Resource spans allocated by the callback mechanism + will be returned when freed (through one of the two callbacks). +*/ /**************************************************************************/ + +/* Issues: + * - flags, flags are passed into the resource allocator but are not currently used. + * - determination, of import size, is currently braindead. + * - debug code should be moved out to own module and #ifdef'd + */ + +#include "img_types.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "uniq_key_splay_tree.h" + +#include "hash.h" +#include "ra.h" +#include "pvrsrv_memallocflags.h" + +#include "osfunc.h" +#include "allocmem.h" +#include "lock.h" +#include "pvr_intrinsics.h" + +/* The initial, and minimum size of the live address -> boundary tag structure + * hash table. The value 64 is a fairly arbitrary choice. The hash table + * resizes on demand so the value chosen is not critical. + */ +#define MINIMUM_HASH_SIZE (64) + + +/* #define RA_VALIDATE */ + +#if defined(__KLOCWORK__) + /* Make sure Klocwork analyses all the code (including the debug one) */ + #if !defined(RA_VALIDATE) + #define RA_VALIDATE + #endif +#endif + +#if !defined(PVRSRV_NEED_PVR_ASSERT) || !defined(RA_VALIDATE) +/* Disable the asserts unless explicitly told otherwise. + * They slow the driver too much for other people + */ + +#undef PVR_ASSERT +/* Use a macro that really do not do anything when compiling in release + * mode! + */ +#define PVR_ASSERT(x) +#endif + +/* boundary tags, used to describe a resource segment */ +struct _BT_ +{ + enum bt_type + { + btt_free, /* free resource segment */ + btt_live /* allocated resource segment */ + } type; + + unsigned int is_leftmost; + unsigned int is_rightmost; + unsigned int free_import; + + /* The base resource and extent of this segment */ + RA_BASE_T base; + RA_LENGTH_T uSize; + + /* doubly linked ordered list of all segments within the arena */ + struct _BT_ *pNextSegment; + struct _BT_ *pPrevSegment; + + /* doubly linked un-ordered list of free segments with the same flags. */ + struct _BT_ *next_free; + struct _BT_ *prev_free; + + /* A user reference associated with this span, user references are + * currently only provided in the callback mechanism + */ + IMG_HANDLE hPriv; + + /* Flags to match on this span */ + RA_FLAGS_T uFlags; + +}; +typedef struct _BT_ BT; + + +/* resource allocation arena */ +struct _RA_ARENA_ +{ + /* arena name for diagnostics output */ + IMG_CHAR name[RA_MAX_NAME_LENGTH]; + + /* allocations within this arena are quantum sized */ + RA_LENGTH_T uQuantum; + + /* import interface, if provided */ + PFN_RA_ALLOC pImportAlloc; + + PFN_RA_FREE pImportFree; + + /* Arbitrary handle provided by arena owner to be passed into the + * import alloc and free hooks + */ + void *pImportHandle; + + IMG_PSPLAY_TREE per_flags_buckets; + + /* resource segment list */ + BT *pHeadSegment; + + /* segment address to boundary tag hash table */ + HASH_TABLE *pSegmentHash; + + /* Lock for this arena */ + POS_LOCK hLock; + + /* Policies that govern the resource area */ + IMG_UINT32 ui32PolicyFlags; + + /* LockClass of this arena. This is used within lockdep to decide if a + * recursive call sequence with the same lock class is allowed or not. + */ + IMG_UINT32 ui32LockClass; + + /* Total Size of the Arena */ + IMG_UINT64 ui64TotalArenaSize; + + /* Size available for allocation in the arena */ + IMG_UINT64 ui64FreeArenaSize; + +}; + +#if defined(__KERNEL__) +struct _RA_ARENA_ITERATOR_ +{ + RA_ARENA *pArena; + BT *pCurrent; + IMG_BOOL bIncludeFreeSegments; +}; +#endif + +/*************************************************************************/ /*! +@Function _RequestAllocFail +@Description Default callback allocator used if no callback is specified, + always fails to allocate further resources to the arena. +@Input _h - callback handle +@Input _uSize - requested allocation size +@Input _uflags - allocation flags +@Input _pBase - receives allocated base +@Output _pActualSize - actual allocation size +@Input _pRef - user reference +@Return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL, this function always fails + to allocate. +*/ /**************************************************************************/ +static PVRSRV_ERROR +_RequestAllocFail(RA_PERARENA_HANDLE _h, + RA_LENGTH_T _uSize, + RA_FLAGS_T _uFlags, + const IMG_CHAR *_pszAnnotation, + RA_BASE_T *_pBase, + RA_LENGTH_T *_pActualSize, + RA_PERISPAN_HANDLE *_phPriv) +{ + PVR_UNREFERENCED_PARAMETER(_h); + PVR_UNREFERENCED_PARAMETER(_uSize); + PVR_UNREFERENCED_PARAMETER(_pActualSize); + PVR_UNREFERENCED_PARAMETER(_phPriv); + PVR_UNREFERENCED_PARAMETER(_uFlags); + PVR_UNREFERENCED_PARAMETER(_pBase); + PVR_UNREFERENCED_PARAMETER(_pszAnnotation); + + return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL; +} + + +#if defined(PVR_CTZLL) + /* Make sure to trigger an error if someone change the buckets or the bHasEltsMapping size + the bHasEltsMapping is used to quickly determine the smallest bucket containing elements. + therefore it must have at least as many bits has the buckets array have buckets. The RA + implementation actually uses one more bit. */ + static_assert(ARRAY_SIZE(((IMG_PSPLAY_TREE)0)->buckets) + < 8 * sizeof(((IMG_PSPLAY_TREE) 0)->bHasEltsMapping), + "Too many buckets for bHasEltsMapping bitmap"); +#endif + + +/*************************************************************************/ /*! +@Function pvr_log2 +@Description Computes the floor of the log base 2 of a unsigned integer +@Input n Unsigned integer +@Return Floor(Log2(n)) +*/ /**************************************************************************/ +#if defined(PVR_CLZLL) +/* make sure to trigger a problem if someone changes the RA_LENGTH_T type + indeed the __builtin_clzll is for unsigned long long variables. + + if someone changes RA_LENGTH to unsigned long, then use __builtin_clzl + if it changes to unsigned int, use __builtin_clz + + if it changes for something bigger than unsigned long long, + then revert the pvr_log2 to the classic implementation */ +static_assert(sizeof(RA_LENGTH_T) == sizeof(unsigned long long), + "RA log routines not tuned for sizeof(RA_LENGTH_T)"); + +static inline IMG_UINT32 pvr_log2(RA_LENGTH_T n) +{ + PVR_ASSERT(n != 0); /* Log2 is not defined on 0 */ + + return (8 * sizeof(RA_LENGTH_T)) - 1 - PVR_CLZLL(n); +} +#else +static IMG_UINT32 +pvr_log2(RA_LENGTH_T n) +{ + IMG_UINT32 l = 0; + + PVR_ASSERT(n != 0); /* Log2 is not defined on 0 */ + + n>>=1; + while (n>0) + { + n>>=1; + l++; + } + return l; +} +#endif + + +#if defined(RA_VALIDATE) +/*************************************************************************/ /*! +@Function _IsInSegmentList +@Description Tests if a BT is in the segment list. +@Input pArena The arena. +@Input pBT The boundary tag to look for. +@Return IMG_FALSE BT was not in the arena's segment list. + IMG_TRUE BT was in the arena's segment list. +*/ /**************************************************************************/ +static IMG_BOOL +_IsInSegmentList(RA_ARENA *pArena, BT *pBT) +{ + BT* pBTScan; + + PVR_ASSERT(pArena != NULL); + PVR_ASSERT(pBT != NULL); + + /* Walk the segment list until we see the BT pointer... */ + pBTScan = pArena->pHeadSegment; + while (pBTScan != NULL && pBTScan != pBT) + { + pBTScan = pBTScan->pNextSegment; + } + + /* Test if we found it and then return */ + return (pBTScan == pBT); +} + +/*************************************************************************/ /*! +@Function _IsInFreeList +@Description Tests if a BT is in the free list. +@Input pArena The arena. +@Input pBT The boundary tag to look for. +@Return IMG_FALSE BT was not in the arena's free list. + IMG_TRUE BT was in the arena's free list. +*/ /**************************************************************************/ +static IMG_BOOL +_IsInFreeList(RA_ARENA *pArena, BT *pBT) +{ + BT* pBTScan; + IMG_UINT32 uIndex; + + PVR_ASSERT(pArena != NULL); + PVR_ASSERT(pBT != NULL); + + /* Look for the free list that holds BTs of this size... */ + uIndex = pvr_log2(pBT->uSize); + PVR_ASSERT(uIndex < FREE_TABLE_LIMIT); + + pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets); + if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->flags != pBT->uFlags)) + { + return 0; + } + else + { + pBTScan = pArena->per_flags_buckets->buckets[uIndex]; + while (pBTScan != NULL && pBTScan != pBT) + { + pBTScan = pBTScan->next_free; + } + + /* Test if we found it and then return */ + return (pBTScan == pBT); + } +} + +/* is_arena_valid should only be used in debug mode. + * It checks that some properties an arena must have are verified + */ +static int is_arena_valid(struct _RA_ARENA_ *arena) +{ + struct _BT_ *chunk; +#if defined(PVR_CTZLL) + unsigned int i; +#endif + + for (chunk = arena->pHeadSegment; chunk != NULL; chunk = chunk->pNextSegment) + { + /* if next segment is NULL, then it must be a rightmost */ + PVR_ASSERT((chunk->pNextSegment != NULL) || (chunk->is_rightmost)); + /* if prev segment is NULL, then it must be a leftmost */ + PVR_ASSERT((chunk->pPrevSegment != NULL) || (chunk->is_leftmost)); + + if (chunk->type == btt_free) + { + /* checks the correctness of the type field */ + PVR_ASSERT(_IsInFreeList(arena, chunk)); + + /* check that there can't be two consecutive free chunks. + Indeed, instead of having two consecutive free chunks, + there should be only one that span the size of the two. */ + PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->type != btt_free)); + PVR_ASSERT((chunk->is_rightmost) || (chunk->pNextSegment->type != btt_free)); + } + else + { + /* checks the correctness of the type field */ + PVR_ASSERT(!_IsInFreeList(arena, chunk)); + } + + PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->base + chunk->pPrevSegment->uSize == chunk->base)); + PVR_ASSERT((chunk->is_rightmost) || (chunk->base + chunk->uSize == chunk->pNextSegment->base)); + + /* all segments of the same imports must have the same flags ... */ + PVR_ASSERT((chunk->is_rightmost) || (chunk->uFlags == chunk->pNextSegment->uFlags)); + /* ... and the same import handle */ + PVR_ASSERT((chunk->is_rightmost) || (chunk->hPriv == chunk->pNextSegment->hPriv)); + + + /* if a free chunk spans a whole import, then it must be an 'not to free import'. + Otherwise it should have been freed. */ + PVR_ASSERT((!chunk->is_leftmost) || (!chunk->is_rightmost) || (chunk->type == btt_live) || (!chunk->free_import)); + } + +#if defined(PVR_CTZLL) + if (arena->per_flags_buckets != NULL) + { + for (i = 0; i < FREE_TABLE_LIMIT; ++i) + { + /* verify that the bHasEltsMapping is correct for this flags bucket */ + PVR_ASSERT( + ((arena->per_flags_buckets->buckets[i] == NULL) && + (((arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) == 0))) + || + ((arena->per_flags_buckets->buckets[i] != NULL) && + (((arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) != 0))) + ); + } + } +#endif + + /* if arena was not valid, an earlier assert should have triggered */ + return 1; +} +#endif + +/*************************************************************************/ /*! +@Function _SegmentListInsertAfter +@Description Insert a boundary tag into an arena segment list after a + specified boundary tag. +@Input pInsertionPoint The insertion point. +@Input pBT The boundary tag to insert. +*/ /**************************************************************************/ +static INLINE void +_SegmentListInsertAfter(BT *pInsertionPoint, + BT *pBT) +{ + PVR_ASSERT(pBT != NULL); + PVR_ASSERT(pInsertionPoint != NULL); + + pBT->pNextSegment = pInsertionPoint->pNextSegment; + pBT->pPrevSegment = pInsertionPoint; + if (pInsertionPoint->pNextSegment != NULL) + { + pInsertionPoint->pNextSegment->pPrevSegment = pBT; + } + pInsertionPoint->pNextSegment = pBT; +} + +/*************************************************************************/ /*! +@Function _SegmentListInsert +@Description Insert a boundary tag into an arena segment list +@Input pArena The arena. +@Input pBT The boundary tag to insert. +*/ /**************************************************************************/ +static INLINE void +_SegmentListInsert(RA_ARENA *pArena, BT *pBT) +{ + PVR_ASSERT(!_IsInSegmentList(pArena, pBT)); + + /* insert into the segment chain */ + pBT->pNextSegment = pArena->pHeadSegment; + pArena->pHeadSegment = pBT; + if (pBT->pNextSegment != NULL) + { + pBT->pNextSegment->pPrevSegment = pBT; + } + + pBT->pPrevSegment = NULL; +} + +/*************************************************************************/ /*! +@Function _SegmentListRemove +@Description Remove a boundary tag from an arena segment list. +@Input pArena The arena. +@Input pBT The boundary tag to remove. +*/ /**************************************************************************/ +static void +_SegmentListRemove(RA_ARENA *pArena, BT *pBT) +{ + PVR_ASSERT(_IsInSegmentList(pArena, pBT)); + + if (pBT->pPrevSegment == NULL) + pArena->pHeadSegment = pBT->pNextSegment; + else + pBT->pPrevSegment->pNextSegment = pBT->pNextSegment; + + if (pBT->pNextSegment != NULL) + pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment; +} + + +/*************************************************************************/ /*! +@Function _BuildBT +@Description Construct a boundary tag for a free segment. +@Input base The base of the resource segment. +@Input uSize The extent of the resource segment. +@Input uFlags The flags to give to the boundary tag +@Return Boundary tag or NULL +*/ /**************************************************************************/ +static BT * +_BuildBT(RA_BASE_T base, RA_LENGTH_T uSize, RA_FLAGS_T uFlags) +{ + BT *pBT; + + pBT = OSAllocZMem(sizeof(BT)); + if (pBT == NULL) + { + return NULL; + } + + pBT->is_leftmost = 1; + pBT->is_rightmost = 1; + /* pBT->free_import = 0; */ + pBT->type = btt_live; + pBT->base = base; + pBT->uSize = uSize; + pBT->uFlags = uFlags; + + return pBT; +} + + +/*************************************************************************/ /*! +@Function _SegmentSplit +@Description Split a segment into two, maintain the arena segment list. The + boundary tag should not be in the free table. Neither the + original or the new neighbour boundary tag will be in the free + table. +@Input pBT The boundary tag to split. +@Input uSize The required segment size of boundary tag after + splitting. +@Return New neighbour boundary tag or NULL. +*/ /**************************************************************************/ +static BT * +_SegmentSplit(BT *pBT, RA_LENGTH_T uSize) +{ + BT *pNeighbour; + + pNeighbour = _BuildBT(pBT->base + uSize, pBT->uSize - uSize, pBT->uFlags); + if (pNeighbour == NULL) + { + return NULL; + } + + _SegmentListInsertAfter(pBT, pNeighbour); + + pNeighbour->is_leftmost = 0; + pNeighbour->is_rightmost = pBT->is_rightmost; + pNeighbour->free_import = pBT->free_import; + pBT->is_rightmost = 0; + pNeighbour->hPriv = pBT->hPriv; + pBT->uSize = uSize; + pNeighbour->uFlags = pBT->uFlags; + + return pNeighbour; +} + +/*************************************************************************/ /*! +@Function _FreeListInsert +@Description Insert a boundary tag into an arena free table. +@Input pArena The arena. +@Input pBT The boundary tag. +*/ /**************************************************************************/ +static void +_FreeListInsert(RA_ARENA *pArena, BT *pBT) +{ + IMG_UINT32 uIndex; + BT *pBTTemp = NULL; + uIndex = pvr_log2(pBT->uSize); + + PVR_ASSERT(uIndex < FREE_TABLE_LIMIT); + PVR_ASSERT(!_IsInFreeList(pArena, pBT)); + + pBT->type = btt_free; + + pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets); + /* the flags item in the splay tree must have been created before-hand by + _InsertResource */ + PVR_ASSERT(pArena->per_flags_buckets != NULL); + PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL); + + /* Handle NULL values for RELEASE builds and/or disabled ASSERT DEBUG builds */ + if (unlikely((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->buckets == NULL))) + { + return; + } + + /* Get the first node in the bucket */ + pBTTemp = pArena->per_flags_buckets->buckets[uIndex]; + + if (unlikely((pArena->ui32PolicyFlags & RA_POLICY_ALLOC_OPTIMAL_MASK) == RA_POLICY_ALLOC_OPTIMAL)) + { + /* Add the node to the start if the bucket is empty */ + if (NULL == pBTTemp) + { + pArena->per_flags_buckets->buckets[uIndex] = pBT; + pBT->next_free = NULL; + pBT->prev_free = NULL; + + } + else + { + BT *pBTPrev = NULL; + /* Traverse the list and identify the appropriate + * place based on the size of the Boundary being inserted */ + while (pBTTemp && (pBTTemp->uSize < pBT->uSize)) + { + pBTPrev = pBTTemp; + pBTTemp = pBTTemp->next_free; + } + /* point the new node to the first higher size element */ + pBT->next_free = pBTTemp; + pBT->prev_free = pBTPrev; + + if (pBTPrev) + { + /* Set the lower size element in the + * chain to point new node */ + pBTPrev->next_free = pBT; + } + else + { + /* Assign the new node to the start of the bucket + * if the bucket is empty */ + pArena->per_flags_buckets->buckets[uIndex] = pBT; + } + /* Make sure the higher size element in the chain points back + * to the new node to be introduced */ + if (pBTTemp) + { + pBTTemp->prev_free = pBT; + } + } + } + else + { + pBT->next_free = pBTTemp; + if (pBT->next_free != NULL) + { + pBT->next_free->prev_free = pBT; + } + pBT->prev_free = NULL; + pArena->per_flags_buckets->buckets[uIndex] = pBT; + } + +#if defined(PVR_CTZLL) + /* tells that bucket[index] now contains elements */ + pArena->per_flags_buckets->bHasEltsMapping |= ((IMG_ELTS_MAPPINGS) 1 << uIndex); +#endif + +} + +/*************************************************************************/ /*! +@Function _FreeListRemove +@Description Remove a boundary tag from an arena free table. +@Input pArena The arena. +@Input pBT The boundary tag. +*/ /**************************************************************************/ +static void +_FreeListRemove(RA_ARENA *pArena, BT *pBT) +{ + IMG_UINT32 uIndex; + uIndex = pvr_log2(pBT->uSize); + + PVR_ASSERT(uIndex < FREE_TABLE_LIMIT); + PVR_ASSERT(_IsInFreeList(pArena, pBT)); + + if (pBT->next_free != NULL) + { + pBT->next_free->prev_free = pBT->prev_free; + } + + if (pBT->prev_free != NULL) + { + pBT->prev_free->next_free = pBT->next_free; + } + else + { + pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets); + /* the flags item in the splay tree must have already been created + (otherwise how could there be a segment with these flags */ + PVR_ASSERT(pArena->per_flags_buckets != NULL); + PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL); + + /* Handle unlikely NULL values for RELEASE or ASSERT-disabled builds */ + if (unlikely((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->buckets == NULL))) + { + pBT->type = btt_live; + return; + } + + pArena->per_flags_buckets->buckets[uIndex] = pBT->next_free; +#if defined(PVR_CTZLL) + if (pArena->per_flags_buckets->buckets[uIndex] == NULL) + { + /* there is no more elements in this bucket. Update the mapping. */ + pArena->per_flags_buckets->bHasEltsMapping &= ~((IMG_ELTS_MAPPINGS) 1 << uIndex); + } +#endif + } + + PVR_ASSERT(!_IsInFreeList(pArena, pBT)); + pBT->type = btt_live; +} + + +/*************************************************************************/ /*! +@Function _InsertResource +@Description Add a free resource segment to an arena. +@Input pArena The arena. +@Input base The base of the resource segment. +@Input uSize The extent of the resource segment. +@Input uFlags The flags of the new resources. +@Return New bucket pointer + NULL on failure +*/ /**************************************************************************/ +static BT * +_InsertResource(RA_ARENA *pArena, RA_BASE_T base, RA_LENGTH_T uSize, + RA_FLAGS_T uFlags) +{ + BT *pBT; + PVR_ASSERT(pArena!=NULL); + + pBT = _BuildBT(base, uSize, uFlags); + + if (pBT != NULL) + { + IMG_PSPLAY_TREE tmp = PVRSRVInsert(pBT->uFlags, pArena->per_flags_buckets); + if (tmp == NULL) + { + OSFreeMem(pBT); + return NULL; + } + + pArena->per_flags_buckets = tmp; + _SegmentListInsert(pArena, pBT); + _FreeListInsert(pArena, pBT); + } + return pBT; +} + +/*************************************************************************/ /*! +@Function _InsertResourceSpan +@Description Add a free resource span to an arena, marked for free_import. +@Input pArena The arena. +@Input base The base of the resource segment. +@Input uSize The extent of the resource segment. +@Return The boundary tag representing the free resource segment, + or NULL on failure. +*/ /**************************************************************************/ +static INLINE BT * +_InsertResourceSpan(RA_ARENA *pArena, + RA_BASE_T base, + RA_LENGTH_T uSize, + RA_FLAGS_T uFlags) +{ + BT *pBT = _InsertResource(pArena, base, uSize, uFlags); + if (pBT != NULL) + { + pBT->free_import = 1; + } + return pBT; +} + + +/*************************************************************************/ /*! +@Function _RemoveResourceSpan +@Description Frees a resource span from an arena, returning the imported + span via the callback. +@Input pArena The arena. +@Input pBT The boundary tag to free. +@Return IMG_FALSE failure - span was still in use + IMG_TRUE success - span was removed and returned +*/ /**************************************************************************/ +static INLINE IMG_BOOL +_RemoveResourceSpan(RA_ARENA *pArena, BT *pBT) +{ + PVR_ASSERT(pArena!=NULL); + PVR_ASSERT(pBT!=NULL); + + if (pBT->free_import && + pBT->is_leftmost && + pBT->is_rightmost) + { + _SegmentListRemove(pArena, pBT); + pArena->pImportFree(pArena->pImportHandle, pBT->base, pBT->hPriv); + OSFreeMem(pBT); + + return IMG_TRUE; + } + + return IMG_FALSE; +} + + +/*************************************************************************/ /*! +@Function _FreeBT +@Description Free a boundary tag taking care of the segment list and the + boundary tag free table. +@Input pArena The arena. +@Input pBT The boundary tag to free. +*/ /**************************************************************************/ +static void +_FreeBT(RA_ARENA *pArena, BT *pBT) +{ + BT *pNeighbour; + + PVR_ASSERT(pArena!=NULL); + PVR_ASSERT(pBT!=NULL); + PVR_ASSERT(!_IsInFreeList(pArena, pBT)); + + /* try and coalesce with left neighbour */ + pNeighbour = pBT->pPrevSegment; + if ((!pBT->is_leftmost) && (pNeighbour->type == btt_free)) + { + /* Verify list correctness */ + PVR_ASSERT(pNeighbour->base + pNeighbour->uSize == pBT->base); + + _FreeListRemove(pArena, pNeighbour); + _SegmentListRemove(pArena, pNeighbour); + pBT->base = pNeighbour->base; + + pBT->uSize += pNeighbour->uSize; + pBT->is_leftmost = pNeighbour->is_leftmost; + OSFreeMem(pNeighbour); + } + + /* try to coalesce with right neighbour */ + pNeighbour = pBT->pNextSegment; + if ((!pBT->is_rightmost) && (pNeighbour->type == btt_free)) + { + /* Verify list correctness */ + PVR_ASSERT(pBT->base + pBT->uSize == pNeighbour->base); + + _FreeListRemove(pArena, pNeighbour); + _SegmentListRemove(pArena, pNeighbour); + pBT->uSize += pNeighbour->uSize; + pBT->is_rightmost = pNeighbour->is_rightmost; + OSFreeMem(pNeighbour); + } + + if (_RemoveResourceSpan(pArena, pBT) == IMG_FALSE) + { + _FreeListInsert(pArena, pBT); + PVR_ASSERT((!pBT->is_rightmost) || (!pBT->is_leftmost) || (!pBT->free_import)); + } + + PVR_ASSERT(is_arena_valid(pArena)); +} + + +/* + This function returns the first element in a bucket that can be split + in a way that one of the sub-segments can meet the size and alignment + criteria. + + The first_elt is the bucket to look into. Remember that a bucket is + implemented as a pointer to the first element of the linked list. + + nb_max_try is used to limit the number of elements considered. + This is used to only consider the first nb_max_try elements in the + free-list. The special value ~0 is used to say unlimited i.e. consider + all elements in the free list + */ +static INLINE +struct _BT_ *find_chunk_in_bucket(struct _BT_ * first_elt, + RA_LENGTH_T uSize, + RA_LENGTH_T uAlignment, + unsigned int nb_max_try) +{ + struct _BT_ *walker; + + for (walker = first_elt; (walker != NULL) && (nb_max_try != 0); walker = walker->next_free) + { + const RA_BASE_T aligned_base = (uAlignment > 1) ? + (walker->base + uAlignment - 1) & ~(uAlignment - 1) + : walker->base; + + if (walker->base + walker->uSize >= aligned_base + uSize) + { + return walker; + } + + /* 0xFFFF...FFFF is used has nb_max_try = infinity. */ + if (nb_max_try != (unsigned int) ~0) + { + nb_max_try--; + } + } + + return NULL; +} + + +/*************************************************************************/ /*! +@Function _AttemptAllocAligned +@Description Attempt an allocation from an arena. +@Input pArena The arena. +@Input uSize The requested allocation size. +@Input uFlags Allocation flags +@Output phPriv The user references associated with + the imported segment. (optional) +@Input uAlignment Required uAlignment, or 0. + Must be a power of 2 if not 0 +@Output base Allocated resource base (non-optional, must not + be NULL) +@Return IMG_FALSE failure + IMG_TRUE success +*/ /**************************************************************************/ +static IMG_BOOL +_AttemptAllocAligned(RA_ARENA *pArena, + RA_LENGTH_T uSize, + RA_FLAGS_T uFlags, + RA_LENGTH_T uAlignment, + RA_BASE_T *base, + RA_PERISPAN_HANDLE *phPriv) /* this is the "per-import" private data */ +{ + + IMG_UINT32 index_low; + IMG_UINT32 index_high; + IMG_UINT32 i; + struct _BT_ *pBT = NULL; + RA_BASE_T aligned_base; + + PVR_ASSERT(pArena!=NULL); + PVR_ASSERT(base != NULL); + + pArena->per_flags_buckets = PVRSRVSplay(uFlags, pArena->per_flags_buckets); + if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->uiFlags != uFlags)) + { + /* no chunks with these flags. */ + return IMG_FALSE; + } + + index_low = pvr_log2(uSize); + if (uAlignment) + { + index_high = pvr_log2(uSize + uAlignment - 1); + } + else + { + index_high = index_low; + } + + PVR_ASSERT(index_low < FREE_TABLE_LIMIT); + PVR_ASSERT(index_high < FREE_TABLE_LIMIT); + PVR_ASSERT(index_low <= index_high); + + if (unlikely((pArena->ui32PolicyFlags & RA_POLICY_BUCKET_MASK) == RA_POLICY_BUCKET_BEST_FIT)) + { + /* This policy ensures the selection of the first lowest size bucket that + * satisfies the request size is selected */ +#if defined(PVR_CTZLL) + i = PVR_CTZLL((~(((IMG_ELTS_MAPPINGS)1 << (index_low )) - 1)) & pArena->per_flags_buckets->bHasEltsMapping); +#else + i = index_low; +#endif + for ( ; (i < FREE_TABLE_LIMIT) && (pBT == NULL); ++i) + { + if (pArena->per_flags_buckets->buckets[i]) + { + pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, (unsigned int) ~0); + } + } + } + else + { +#if defined(PVR_CTZLL) + i = PVR_CTZLL((~(((IMG_ELTS_MAPPINGS)1 << (index_high + 1)) - 1)) & pArena->per_flags_buckets->bHasEltsMapping); +#else + for (i = index_high + 1; (i < FREE_TABLE_LIMIT) && (pArena->per_flags_buckets->buckets[i] == NULL); ++i) + { + } +#endif + PVR_ASSERT(i <= FREE_TABLE_LIMIT); + + if (i != FREE_TABLE_LIMIT) + { + /* since we start at index_high + 1, we are guaranteed to exit */ + pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, 1); + } + else + { + for (i = index_high; (i != index_low - 1) && (pBT == NULL); --i) + { + pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, (unsigned int) ~0); + } + } + } + + if (pBT == NULL) + { + return IMG_FALSE; + } + + aligned_base = (uAlignment > 1) ? (pBT->base + uAlignment - 1) & ~(uAlignment - 1) : pBT->base; + + _FreeListRemove(pArena, pBT); + + if ((pArena->ui32PolicyFlags & RA_POLICY_NO_SPLIT_MASK) == RA_POLICY_NO_SPLIT) + { + goto nosplit; + } + + /* with uAlignment we might need to discard the front of this segment */ + if (aligned_base > pBT->base) + { + BT *pNeighbour; + pNeighbour = _SegmentSplit(pBT, (RA_LENGTH_T)(aligned_base - pBT->base)); + /* partition the buffer, create a new boundary tag */ + if (pNeighbour == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Front split failed", __func__)); + /* Put pBT back in the list */ + _FreeListInsert(pArena, pBT); + return IMG_FALSE; + } + + _FreeListInsert(pArena, pBT); + pBT = pNeighbour; + } + + /* the segment might be too big, if so, discard the back of the segment */ + if (pBT->uSize > uSize) + { + BT *pNeighbour; + pNeighbour = _SegmentSplit(pBT, uSize); + /* partition the buffer, create a new boundary tag */ + if (pNeighbour == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Back split failed", __func__)); + /* Put pBT back in the list */ + _FreeListInsert(pArena, pBT); + return IMG_FALSE; + } + + _FreeListInsert(pArena, pNeighbour); + } +nosplit: + pBT->type = btt_live; + + if (!HASH_Insert_Extended(pArena->pSegmentHash, &aligned_base, (uintptr_t)pBT)) + { + _FreeBT(pArena, pBT); + return IMG_FALSE; + } + + if (phPriv != NULL) + *phPriv = pBT->hPriv; + + *base = aligned_base; + + return IMG_TRUE; +} + + + +/*************************************************************************/ /*! +@Function RA_Create +@Description To create a resource arena. +@Input name The name of the arena for diagnostic purposes. +@Input ulog2Quantum The arena allocation quantum. +@Input ui32LockClass the lock class level this arena uses +@Input imp_alloc A resource allocation callback or 0. +@Input imp_free A resource de-allocation callback or 0. +@Input arena_handle Handle passed to alloc and free or 0. +@Input ui32PolicyFlags Policies that govern the arena. +@Return arena handle, or NULL. +*/ /**************************************************************************/ +IMG_INTERNAL RA_ARENA * +RA_Create(IMG_CHAR *name, + RA_LOG2QUANTUM_T uLog2Quantum, + IMG_UINT32 ui32LockClass, + PFN_RA_ALLOC imp_alloc, + PFN_RA_FREE imp_free, + RA_PERARENA_HANDLE arena_handle, + IMG_UINT32 ui32PolicyFlags) +{ + RA_ARENA *pArena; + PVRSRV_ERROR eError; + + if (name == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter 'name' (NULL not accepted)", __func__)); + return NULL; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s'", __func__, name)); + + pArena = OSAllocMem(sizeof(*pArena)); + if (pArena == NULL) + { + goto arena_fail; + } + + eError = OSLockCreate(&pArena->hLock); + if (eError != PVRSRV_OK) + { + goto lock_fail; + } + + pArena->pSegmentHash = HASH_Create_Extended(MINIMUM_HASH_SIZE, sizeof(RA_BASE_T), HASH_Func_Default, HASH_Key_Comp_Default); + + if (pArena->pSegmentHash==NULL) + { + goto hash_fail; + } + + OSStringLCopy(pArena->name, name, RA_MAX_NAME_LENGTH); + pArena->pImportAlloc = (imp_alloc!=NULL) ? imp_alloc : &_RequestAllocFail; + pArena->pImportFree = imp_free; + pArena->pImportHandle = arena_handle; + pArena->pHeadSegment = NULL; + pArena->uQuantum = 1ULL << uLog2Quantum; + pArena->per_flags_buckets = NULL; + pArena->ui32LockClass = ui32LockClass; + pArena->ui32PolicyFlags = ui32PolicyFlags; + pArena->ui64TotalArenaSize = 0; + pArena->ui64FreeArenaSize = 0; + + PVR_ASSERT(is_arena_valid(pArena)); + return pArena; + +hash_fail: + OSLockDestroy(pArena->hLock); +lock_fail: + OSFreeMem(pArena); + /* not nulling pointer, out of scope */ +arena_fail: + return NULL; +} + +static void _LogRegionCreation(const char *pszMemType, + IMG_UINT64 ui64CpuPA, + IMG_UINT64 ui64DevPA, + IMG_UINT64 ui64Size) +{ +#if !defined(DEBUG) + PVR_UNREFERENCED_PARAMETER(pszMemType); + PVR_UNREFERENCED_PARAMETER(ui64CpuPA); + PVR_UNREFERENCED_PARAMETER(ui64DevPA); + PVR_UNREFERENCED_PARAMETER(ui64Size); +#else + if ((ui64CpuPA != 0) && (ui64DevPA != 0) && (ui64CpuPA != ui64DevPA)) + { + PVR_DPF((PVR_DBG_MESSAGE, + "Creating RA for \"%s\" memory" + " - Cpu PA 0x%016" IMG_UINT64_FMTSPECx "-0x%016" IMG_UINT64_FMTSPECx + " - Dev PA 0x%016" IMG_UINT64_FMTSPECx "-0x%016" IMG_UINT64_FMTSPECx, + pszMemType, + ui64CpuPA, ui64CpuPA + ui64Size, + ui64DevPA, ui64DevPA + ui64Size)); + } + else + { + __maybe_unused IMG_UINT64 ui64PA = + ui64CpuPA != 0 ? ui64CpuPA : ui64DevPA; + __maybe_unused const IMG_CHAR *pszAddrType = + ui64CpuPA == ui64DevPA ? "Cpu/Dev" : (ui64CpuPA != 0 ? "Cpu" : "Dev"); + + PVR_DPF((PVR_DBG_MESSAGE, + "Creating RA for \"%s\" memory - %s PA 0x%016" + IMG_UINT64_FMTSPECx "-0x%016" IMG_UINT64_FMTSPECx, + pszMemType, pszAddrType, + ui64PA, ui64PA + ui64Size)); + } +#endif +} + +IMG_INTERNAL RA_ARENA * +RA_Create_With_Span(IMG_CHAR *name, + RA_LOG2QUANTUM_T uLog2Quantum, + IMG_UINT64 ui64CpuBase, + IMG_UINT64 ui64SpanDevBase, + IMG_UINT64 ui64SpanSize) +{ + RA_ARENA *psRA; + IMG_BOOL bSuccess; + + psRA = RA_Create(name, + uLog2Quantum, /* Use OS page size, keeps things simple */ + RA_LOCKCLASS_0, /* This arena doesn't use any other arenas. */ + NULL, /* No Import */ + NULL, /* No free import */ + NULL, /* No import handle */ + RA_POLICY_DEFAULT); /* No restriction on import splitting */ + PVR_LOG_GOTO_IF_FALSE(psRA != NULL, "RA_Create() failed", return_); + + bSuccess = RA_Add(psRA, (RA_BASE_T) ui64SpanDevBase, (RA_LENGTH_T) ui64SpanSize, 0, NULL); + PVR_LOG_GOTO_IF_FALSE(bSuccess, "RA_Add() failed", cleanup_); + + _LogRegionCreation(name, ui64CpuBase, ui64SpanDevBase, ui64SpanSize); + + return psRA; + +cleanup_: + RA_Delete(psRA); +return_: + return NULL; +} + +/*************************************************************************/ /*! +@Function RA_Delete +@Description To delete a resource arena. All resources allocated from + the arena must be freed before deleting the arena. +@Input pArena The arena to delete. +*/ /**************************************************************************/ +IMG_INTERNAL void +RA_Delete(RA_ARENA *pArena) +{ + IMG_UINT32 uIndex; + IMG_BOOL bWarn = IMG_TRUE; + + PVR_ASSERT(pArena != NULL); + + if (pArena == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter - pArena", __func__)); + return; + } + + PVR_ASSERT(is_arena_valid(pArena)); + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: name='%s'", __func__, pArena->name)); + + while (pArena->pHeadSegment != NULL) + { + BT *pBT = pArena->pHeadSegment; + + if (pBT->type != btt_free) + { + if (bWarn) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Allocations still exist in the arena that is being destroyed", __func__)); + PVR_DPF((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmem context", __func__)); + PVR_DPF((PVR_DBG_ERROR, "%s: base = 0x%llx size=0x%llx", __func__, + (unsigned long long)pBT->base, (unsigned long long)pBT->uSize)); + PVR_DPF((PVR_DBG_ERROR, "%s: This warning will be issued only once for the first allocation found!", __func__)); + bWarn = IMG_FALSE; + } + } + else + { + _FreeListRemove(pArena, pBT); + } + + _SegmentListRemove(pArena, pBT); + OSFreeMem(pBT); + /* not nulling original pointer, it has changed */ + } + + while (pArena->per_flags_buckets != NULL) + { + for (uIndex=0; uIndexper_flags_buckets->buckets[uIndex] == NULL); + } + + pArena->per_flags_buckets = PVRSRVDelete(pArena->per_flags_buckets->uiFlags, pArena->per_flags_buckets); + } + + HASH_Delete(pArena->pSegmentHash); + OSLockDestroy(pArena->hLock); + OSFreeMem(pArena); + /* not nulling pointer, copy on stack */ +} + +/*************************************************************************/ /*! +@Function RA_Add +@Description To add a resource span to an arena. The span must not + overlap with any span previously added to the arena. +@Input pArena The arena to add a span into. +@Input base The base of the span. +@Input uSize The extent of the span. +@Input uFlags the flags of the new import +@Input hPriv a private handle associate to the span. + (reserved for user) +@Return IMG_TRUE - Success + IMG_FALSE - failure +*/ /**************************************************************************/ +IMG_INTERNAL IMG_BOOL +RA_Add(RA_ARENA *pArena, + RA_BASE_T base, + RA_LENGTH_T uSize, + RA_FLAGS_T uFlags, + RA_PERISPAN_HANDLE hPriv) +{ + struct _BT_* bt; + PVR_ASSERT(pArena != NULL); + PVR_ASSERT(uSize != 0); + + if (pArena == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter - pArena", __func__)); + return IMG_FALSE; + } + + if (uSize == 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: invalid size 0 added to arena %s", __func__, pArena->name)); + return IMG_FALSE; + } + + OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); + PVR_ASSERT(is_arena_valid(pArena)); + PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', " + "base=0x%llx, size=0x%llx", __func__, pArena->name, + (unsigned long long)base, (unsigned long long)uSize)); + + uSize = (uSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1); + bt = _InsertResource(pArena, base, uSize, uFlags); + if (bt != NULL) + { + bt->hPriv = hPriv; + } + + PVR_ASSERT(is_arena_valid(pArena)); + + pArena->ui64TotalArenaSize += uSize; + pArena->ui64FreeArenaSize += uSize; + OSLockRelease(pArena->hLock); + + return bt != NULL; +} + +/*************************************************************************/ /*! +@Function RA_Alloc +@Description To allocate resource from an arena. +@Input pArena The arena +@Input uRequestSize The size of resource segment requested. +@Input uImportMultiplier Import x-times more for future requests if + we have to import new memory. +@Input uImportFlags Flags influencing allocation policy. +@Input uAlignment The uAlignment constraint required for the + allocated segment, use 0 if uAlignment not + required, otherwise must be a power of 2. +@Input pszAnnotation String to describe the allocation +@Output base Allocated base resource +@Output pActualSize The actual size of resource segment + allocated, typically rounded up by quantum. +@Output phPriv The user reference associated with allocated + resource span. +@Return PVRSRV_OK - success +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +RA_Alloc(RA_ARENA *pArena, + RA_LENGTH_T uRequestSize, + IMG_UINT8 uImportMultiplier, + RA_FLAGS_T uImportFlags, + RA_LENGTH_T uAlignment, + const IMG_CHAR *pszAnnotation, + RA_BASE_T *base, + RA_LENGTH_T *pActualSize, + RA_PERISPAN_HANDLE *phPriv) +{ + PVRSRV_ERROR eError; + IMG_BOOL bResult; + RA_LENGTH_T uSize = uRequestSize; + RA_FLAGS_T uFlags = (uImportFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK); + + if (pArena == NULL || uImportMultiplier == 0 || uSize == 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: One of the necessary parameters is 0", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); + PVR_ASSERT(is_arena_valid(pArena)); + + if (pActualSize != NULL) + { + *pActualSize = uSize; + } + + /* Must be a power of 2 or 0 */ + PVR_ASSERT((uAlignment == 0) || (uAlignment & (uAlignment - 1)) == 0); + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: arena='%s', size=0x%llx(0x%llx), " + "alignment=0x%llx", __func__, pArena->name, + (unsigned long long)uSize, (unsigned long long)uRequestSize, + (unsigned long long)uAlignment)); + + /* if allocation failed then we might have an import source which + can provide more resource, else we will have to fail the + allocation to the caller. */ + bResult = _AttemptAllocAligned(pArena, uSize, uFlags, uAlignment, base, phPriv); + if (!bResult) + { + IMG_HANDLE hPriv; + RA_BASE_T import_base; + RA_LENGTH_T uImportSize = uSize; + + /* + Ensure that we allocate sufficient space to meet the uAlignment + constraint + */ + if (uAlignment > pArena->uQuantum) + { + uImportSize += (uAlignment - pArena->uQuantum); + } + + /* apply over-allocation multiplier after all alignment adjustments */ + uImportSize *= uImportMultiplier; + + /* ensure that we import according to the quanta of this arena */ + uImportSize = (uImportSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1); + + eError = pArena->pImportAlloc(pArena->pImportHandle, + uImportSize, uImportFlags, + pszAnnotation, + &import_base, &uImportSize, + &hPriv); + if (PVRSRV_OK != eError) + { + OSLockRelease(pArena->hLock); + return eError; + } + else + { + BT *pBT; + pBT = _InsertResourceSpan(pArena, import_base, uImportSize, uFlags); + /* successfully import more resource, create a span to + represent it and retry the allocation attempt */ + if (pBT == NULL) + { + /* insufficient resources to insert the newly acquired span, + so free it back again */ + pArena->pImportFree(pArena->pImportHandle, import_base, hPriv); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', " + "size=0x%llx failed!", __func__, pArena->name, + (unsigned long long)uSize)); + /* RA_Dump (arena); */ + + OSLockRelease(pArena->hLock); + return PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED; + } + + pBT->hPriv = hPriv; + + bResult = _AttemptAllocAligned(pArena, uSize, uFlags, uAlignment, base, phPriv); + if (!bResult) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: name='%s' second alloc failed!", + __func__, pArena->name)); + + /* + On failure of _AttemptAllocAligned() depending on the exact point + of failure, the imported segment may have been used and freed, or + left untouched. If the later, we need to return it. + */ + _FreeBT(pArena, pBT); + + OSLockRelease(pArena->hLock); + return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED; + } + else + { + /* Check if the new allocation was in the span we just added... */ + if (*base < import_base || *base > (import_base + uImportSize)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: name='%s' alloc did not occur in the imported span!", + __func__, pArena->name)); + + /* + Remove the imported span which should not be in use (if it is then + that is okay, but essentially no span should exist that is not used). + */ + _FreeBT(pArena, pBT); + } + else + { + pArena->ui64FreeArenaSize += uImportSize; + pArena->ui64TotalArenaSize += uImportSize; + } + } + } + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', size=0x%llx, " + "*base=0x%llx = %d", __func__, pArena->name, (unsigned long long)uSize, + (unsigned long long)*base, bResult)); + + PVR_ASSERT(is_arena_valid(pArena)); + + pArena->ui64FreeArenaSize -= uSize; + + OSLockRelease(pArena->hLock); + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function RA_Find_BT_VARange +@Description To find the boundary tag associated with the given device + virtual address. +@Input pArena The arena +@input base Allocated base resource +@Input uRequestSize The size of resource segment requested. +@Input uImportFlags Flags influencing allocation policy. +@Return Boundary Tag - success, NULL on failure +*/ /**************************************************************************/ +static BT *RA_Find_BT_VARange(RA_ARENA *pArena, + RA_BASE_T base, + RA_LENGTH_T uRequestSize, + RA_FLAGS_T uImportFlags) +{ + IMG_PSPLAY_TREE psSplaynode; + BT *pBT = pArena->pHeadSegment; + IMG_UINT32 uIndex; + + uIndex = pvr_log2 (uRequestSize); + + /* Find the splay node associated with these import flags */ + psSplaynode = PVRSRVFindNode(uImportFlags, pArena->per_flags_buckets); + + if (psSplaynode == NULL) + { + return NULL; + } + + /* Find the free Boundary Tag from the bucket that holds the requested range */ + while (uIndex < FREE_TABLE_LIMIT) + { + pBT = psSplaynode->buckets[uIndex]; + + while (pBT) + { + if ((pBT->base <= base) && ((pBT->base + pBT->uSize) >= (base + uRequestSize))) + { + if (pBT->type == btt_free) + { + return pBT; + } + else + { + PVR_ASSERT(pBT->type == btt_free); + } + } + else{ + pBT = pBT->next_free; + } + } + +#if defined(PVR_CTZLL) + /* This could further be optimised to get the next valid bucket */ + while (!(psSplaynode->bHasEltsMapping & (1ULL << ++uIndex))); +#else + uIndex++; +#endif + } + + return NULL; +} + + +/*************************************************************************/ /*! +@Function RA_Alloc_Range +@Description To allocate requested device virtual address resource from an arena. +@Input pArena The arena +@Input uRequestSize The size of resource segment requested. +@Input uImportFlags Flags influencing allocation policy. +@Input uAlignment The uAlignment constraint required for the + allocated segment, use 0 if uAlignment not required, otherwise + must be a power of 2. +@input base Allocated base resource +@Output pActualSize The actual size of resource segment + allocated, typically rounded up by quantum. +@Return PVRSRV_OK - success +*/ /**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR +RA_Alloc_Range(RA_ARENA *pArena, + RA_LENGTH_T uRequestSize, + RA_FLAGS_T uImportFlags, + RA_LENGTH_T uAlignment, + RA_BASE_T base, + RA_LENGTH_T *pActualSize) +{ + RA_LENGTH_T uSize = uRequestSize; + BT *pBT = NULL; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (pArena == NULL || uSize == 0) + { + PVR_DPF ((PVR_DBG_ERROR, + "%s: One of the necessary parameters is 0", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); + PVR_ASSERT(is_arena_valid(pArena)); + + /* Align the requested size to the Arena Quantum */ + uSize = ((uSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1)); + + /* Must be a power of 2 or 0 */ + PVR_ASSERT((uAlignment == 0) || (uAlignment & (uAlignment - 1)) == 0); + + if (uAlignment > 1) + { + if (base != ((base + uAlignment - 1) & ~(uAlignment - 1))) + { + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, unlock_); + } + } + + /* Find if the segment in the range exists and is free + * Check if the segment can be split + * Find the bucket that points to this segment + * Find the free segment is in the free list + * remove the free segment + * split the segment into three segments one prior free, alloc range, + * free segment after the range. + * remove the allocated range segment from the free list + * hook up the prior and after segments back to free list + * For each free, find the bucket the segment should go to + */ + + pBT = RA_Find_BT_VARange(pArena, base, uSize, uImportFlags); + + if (pBT == NULL) + { + PVR_GOTO_WITH_ERROR(eError, + PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL, + unlock_); + } + + /* Remove the boundary tag from the free list */ + _FreeListRemove (pArena, pBT); + + /* if requested VA start in the middle of the BT, split the BT accordingly */ + if (base > pBT->base) + { + BT *pNeighbour; + pNeighbour = _SegmentSplit (pBT, (RA_LENGTH_T)(base - pBT->base)); + /* partition the buffer, create a new boundary tag */ + if (pNeighbour == NULL) + { + /* Put pBT back in the list */ + _FreeListInsert (pArena, pBT); + PVR_LOG_GOTO_WITH_ERROR("_SegmentSplit (1)", eError, + PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL, + unlock_); + } + + /* Insert back the free BT to the free list */ + _FreeListInsert(pArena, pBT); + pBT = pNeighbour; + } + + /* the segment might be too big, if so, discard the back of the segment */ + if (pBT->uSize > uSize) + { + BT *pNeighbour; + pNeighbour = _SegmentSplit(pBT, uSize); + /* partition the buffer, create a new boundary tag */ + if (pNeighbour == NULL) + { + /* Put pBT back in the list */ + _FreeListInsert (pArena, pBT); + PVR_LOG_GOTO_WITH_ERROR("_SegmentSplit (2)", eError, + PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL, + unlock_); + } + + /* Insert back the free BT to the free list */ + _FreeListInsert (pArena, pNeighbour); + } + + pBT->type = btt_live; + + if (!HASH_Insert_Extended (pArena->pSegmentHash, &base, (uintptr_t)pBT)) + { + _FreeBT (pArena, pBT); + PVR_GOTO_WITH_ERROR(eError, + PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED, + unlock_); + } + + if (pActualSize != NULL) + { + *pActualSize = uSize; + } + + pArena->ui64FreeArenaSize -= uSize; + +unlock_: + OSLockRelease(pArena->hLock); + + return eError; +} + +/*************************************************************************/ /*! +@Function RA_Free +@Description To free a resource segment. +@Input pArena The arena the segment was originally allocated from. +@Input base The base of the resource span to free. +*/ /**************************************************************************/ +IMG_INTERNAL void +RA_Free(RA_ARENA *pArena, RA_BASE_T base) +{ + BT *pBT; + + PVR_ASSERT(pArena != NULL); + + if (pArena == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter - pArena", __func__)); + return; + } + + OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); + PVR_ASSERT(is_arena_valid(pArena)); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', base=0x%llx", __func__, pArena->name, + (unsigned long long)base)); + + pBT = (BT *) HASH_Remove_Extended(pArena->pSegmentHash, &base); + PVR_ASSERT(pBT != NULL); + + pArena->ui64FreeArenaSize += pBT->uSize; + + if (pBT) + { + PVR_ASSERT(pBT->base == base); + _FreeBT(pArena, pBT); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: no resource span found for given base (0x%llX) in arena %s", + __func__, (unsigned long long) base, + pArena->name)); + } + + PVR_ASSERT(is_arena_valid(pArena)); + OSLockRelease(pArena->hLock); +} + +IMG_INTERNAL void +RA_Get_Usage_Stats(RA_ARENA *pArena, PRA_USAGE_STATS psRAStats) +{ + psRAStats->ui64TotalArenaSize = pArena->ui64TotalArenaSize; + psRAStats->ui64FreeArenaSize = pArena->ui64FreeArenaSize; +} + +#if defined(__KERNEL__) +/* #define _DBG(...) PVR_LOG((__VA_ARGS__)) */ +#define _DBG(...) + +RA_ARENA_ITERATOR * +RA_IteratorAcquire(RA_ARENA *pArena, IMG_BOOL bIncludeFreeSegments) +{ + RA_ARENA_ITERATOR *pIter = OSAllocMem(sizeof(*pIter)); + PVR_LOG_RETURN_IF_FALSE(pIter != NULL, "OSAllocMem", NULL); + + OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); + + pIter->pArena = pArena; + pIter->bIncludeFreeSegments = bIncludeFreeSegments; + + RA_IteratorReset(pIter); + + return pIter; +} + +void +RA_IteratorRelease(RA_ARENA_ITERATOR *pIter) +{ + PVR_ASSERT(pIter != NULL); + + if (pIter == NULL) + { + return; + } + + OSLockRelease(pIter->pArena->hLock); + + OSFreeMem(pIter); +} + +void +RA_IteratorReset(RA_ARENA_ITERATOR *pIter) +{ + BT *pNext; + + PVR_ASSERT(pIter != NULL); + + pNext = pIter->pArena->pHeadSegment; + + /* find next element if we're not including the free ones */ + if (!pIter->bIncludeFreeSegments) + { + while (pNext != NULL && pNext->type != btt_live) + { + _DBG("(%s()) skipping segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " + "type=%u", __func__, (void *) pNext->base, pNext->uSize, + pNext->type); + pNext = pNext->pNextSegment; + } + } + + _DBG("(%s()) current segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " + "type=%u", __func__, + pNext != NULL ? (void *) pNext->base : NULL, + pNext != NULL ? pNext->uSize : 0, + pNext != NULL ? pNext->type : 0); + + /* if bIncludeFreeSegments then pNext here is either a valid pointer to + * "live" segment or NULL and if !bIncludeFreeSegments then it's either + * a valid pointer to any next segment or NULL */ + pIter->pCurrent = pNext; +} + +IMG_BOOL +RA_IteratorNext(RA_ARENA_ITERATOR *pIter, RA_ITERATOR_DATA *pData) +{ + BT *pNext; + + PVR_ASSERT(pIter != NULL); + + if (pIter == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "pIter in %s() is NULL", __func__)); + return IMG_FALSE; + } + + if (pIter->pCurrent == NULL) + { + return IMG_FALSE; + } + + pNext = pIter->pCurrent; + + _DBG("(%s()) current segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " + "type=%u", __func__, (void *) pNext->base, pNext->uSize, + pNext->type); + + pData->uiAddr = pIter->pCurrent->base; + pData->uiSize = pIter->pCurrent->uSize; + pData->bFree = pIter->pCurrent->type == btt_free; + + /* squash contigious segments */ + while ((pNext = pNext->pNextSegment) != NULL && + pNext->type == btt_live && + pNext->base == pData->uiAddr + pData->uiSize) + { + _DBG("(%s()) squashing segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " + "type=%u", __func__, (void *) pNext->base, pNext->uSize, + pNext->type); + pData->uiSize += pNext->uSize; + } + + /* advance to next */ + if (!pIter->bIncludeFreeSegments) + { + while (pNext != NULL && pNext->type != btt_live) + { + _DBG("(%s()) skipping segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " + "type=%u", __func__, (void *) pNext->base, pNext->uSize, + pNext->type); + pNext = pNext->pNextSegment; + } + } + + _DBG("(%s()) next segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " + "type=%u", __func__, + pNext != NULL ? (void *) pNext->base : NULL, + pNext != NULL ? pNext->uSize : 0, + pNext != NULL ? pNext->type : 0); + + /* if bIncludeFreeSegments then pNext here is either a valid pointer to + * "live" segment or NULL and if !bIncludeFreeSegments then it's either + * a valid pointer to any next segment or NULL */ + pIter->pCurrent = pNext; + + return IMG_TRUE; +} +#endif /* defined(__KERNEL__) */ diff --git a/drivers/gpu/drm/phytium/octopus/ra.h b/drivers/gpu/drm/phytium/octopus/ra.h new file mode 100644 index 000000000000..f49ef8f1094d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/ra.h @@ -0,0 +1,375 @@ +/*************************************************************************/ /*! +@File +@Title Resource Allocator API +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RA_H +#define RA_H + +#include "img_types.h" +#include "pvrsrv_error.h" + +#define RA_MAX_NAME_LENGTH 20 + +/** Resource arena. + * struct _RA_ARENA_ deliberately opaque + */ +typedef struct _RA_ARENA_ RA_ARENA; //PRQA S 3313 + +#if defined(__KERNEL__) +/** Resource arena's iterator. + * struct _RA_ARENA_ITERATOR_ deliberately opaque + */ +typedef struct _RA_ARENA_ITERATOR_ RA_ARENA_ITERATOR; + +typedef struct _RA_ITERATOR_DATA_ { + IMG_UINT64 uiAddr; + IMG_UINT64 uiSize; + IMG_BOOL bFree; +} RA_ITERATOR_DATA; +#endif /* defined(__KERNEL__) */ + +/** Resource arena usage statistics. + * struct _RA_USAGE_STATS + */ +typedef struct _RA_USAGE_STATS { + IMG_UINT64 ui64TotalArenaSize; + IMG_UINT64 ui64FreeArenaSize; +}RA_USAGE_STATS, *PRA_USAGE_STATS; + +/* + * Per-Arena handle - this is private data for the caller of the RA. + * The RA knows nothing about this data. It is given it in RA_Create, and + * promises to pass it to calls to the ImportAlloc and ImportFree callbacks + */ +typedef IMG_HANDLE RA_PERARENA_HANDLE; +/* + * Per-Import handle - this is private data for the caller of the RA. + * The RA knows nothing about this data. It is given it on a per-import basis, + * basis, either the "initial" import at RA_Create time, or further imports + * via the ImportAlloc callback. It sends it back via the ImportFree callback, + * and also provides it in answer to any RA_Alloc request to signify from + * which "import" the allocation came. + */ +typedef IMG_HANDLE RA_PERISPAN_HANDLE; + +typedef IMG_UINT64 RA_BASE_T; +typedef IMG_UINT32 RA_LOG2QUANTUM_T; +typedef IMG_UINT64 RA_LENGTH_T; + +/* Lock classes: describes the level of nesting between different arenas. */ +#define RA_LOCKCLASS_0 0 +#define RA_LOCKCLASS_1 1 +#define RA_LOCKCLASS_2 2 + +#define RA_NO_IMPORT_MULTIPLIER 1 + +/* + * Allocation Policies that govern the resource areas. + * */ + +/* --- Resource allocation policy definitions --- +* | 31.........4|......3....|........2.............|1..................0| +* | Reserved | No split | Area bucket selection| Free node selection| +*/ + +/* + * Fast allocation policy allows to pick the first node + * that satisfies the request. + * It is the default policy for all arenas. + * */ +#define RA_POLICY_ALLOC_FAST (0U) +/* + * Optimal allocation policy allows to pick the lowest size node + * that satisfies the request. This picking policy helps in reducing the fragmentation. + * This minimises the necessity to split the nodes more often as the optimal + * ones are picked. + * As a result any future higher size allocation requests are likely to succeed + */ +#define RA_POLICY_ALLOC_OPTIMAL (1U) +#define RA_POLICY_ALLOC_OPTIMAL_MASK (3U) + +/* + * Bucket selection policies + * */ +/* Assured bucket policy makes sure the selected bucket is guaranteed + * to satisfy the given request. Generally Nodes picked up from such a + * bucket need to be further split. However picking node that belongs to this + * bucket is likely to succeed and thus promises better response times */ +#define RA_POLICY_BUCKET_ASSURED_FIT (0U) +/* + * Best fit bucket policy selects a bucket with free nodes that are likely + * to satisfy the request and nodes that are close to the requested size. + * Nodes picked up from this bucket may likely to satisfy the request but not + * guaranteed. Failing to satisfy the request from this bucket mean further + * higher size buckets are selected in the later iterations till the request + * is satisfied. + * + * Hence response times may vary depending on availability of free nodes + * that satisfy the request. + * */ +#define RA_POLICY_BUCKET_BEST_FIT (4U) +#define RA_POLICY_BUCKET_MASK (4U) + +/* This flag ensures the imports will not be split up and Allocations will always get + * their own import + */ +#define RA_POLICY_NO_SPLIT (8U) +#define RA_POLICY_NO_SPLIT_MASK (8U) + +/* + * Default Arena Policy + * */ +#define RA_POLICY_DEFAULT (RA_POLICY_ALLOC_FAST | RA_POLICY_BUCKET_ASSURED_FIT) + +/* + * Flags in an "import" must match the flags for an allocation + */ +typedef IMG_UINT64 RA_FLAGS_T; + +/*************************************************************************/ /*! +@Function Callback function PFN_RA_ALLOC +@Description RA import allocate function +@Input RA_PERARENA_HANDLE RA handle +@Input RA_LENGTH_T Request size +@Input RA_FLAGS_T RA flags +@Input IMG_CHAR Annotation +@Input RA_BASE_T Allocation base +@Input RA_LENGTH_T Actual size +@Input RA_PERISPAN_HANDLE Per import private data +@Return PVRSRV_ERROR PVRSRV_OK or error code +*/ /**************************************************************************/ +typedef PVRSRV_ERROR (*PFN_RA_ALLOC)(RA_PERARENA_HANDLE, + RA_LENGTH_T, + RA_FLAGS_T, + const IMG_CHAR*, + RA_BASE_T*, + RA_LENGTH_T*, + RA_PERISPAN_HANDLE*); + +/*************************************************************************/ /*! +@Function Callback function PFN_RA_FREE +@Description RA free imported allocation +@Input RA_PERARENA_HANDLE RA handle +@Input RA_BASE_T Allocation base +@Output RA_PERISPAN_HANDLE Per import private data +*/ /**************************************************************************/ +typedef void (*PFN_RA_FREE)(RA_PERARENA_HANDLE, + RA_BASE_T, + RA_PERISPAN_HANDLE); + +/** + * @Function RA_Create + * + * @Description To create a resource arena. + * + * @Input name - the name of the arena for diagnostic purposes. + * @Input uLog2Quantum - the arena allocation quantum. + * @Input ui32LockClass - the lock class level this arena uses. + * @Input imp_alloc - a resource allocation callback or 0. + * @Input imp_free - a resource de-allocation callback or 0. + * @Input per_arena_handle - private handle passed to alloc and free or 0. + * @Input ui32PlicyFlags - Policies that govern the arena. + * @Return pointer to arena, or NULL. + */ +RA_ARENA * +RA_Create(IMG_CHAR *name, + /* subsequent imports: */ + RA_LOG2QUANTUM_T uLog2Quantum, + IMG_UINT32 ui32LockClass, + PFN_RA_ALLOC imp_alloc, + PFN_RA_FREE imp_free, + RA_PERARENA_HANDLE per_arena_handle, + IMG_UINT32 ui32PolicyFlags); + +/** + * @Function RA_Create_With_Span + * + * @Description + * + * Create a resource arena and initialises it, with a given resource span. + * + * @Input name - String briefly describing the RA's purpose. + * @Input uLog2Quantum - the arena allocation quantum. + * @Input ui64CpuBase - CPU Physical Base Address of the RA. + * @Input ui64SpanDevBase - Device Physical Base Address of the RA. + * @Input ui64SpanSize - Size of the span to add to the created RA. + * @Return pointer to arena, or NULL. +*/ +RA_ARENA * +RA_Create_With_Span(IMG_CHAR *name, + RA_LOG2QUANTUM_T uLog2Quantum, + IMG_UINT64 ui64CpuBase, + IMG_UINT64 ui64SpanDevBase, + IMG_UINT64 ui64SpanSize); + +/** + * @Function RA_Delete + * + * @Description + * + * To delete a resource arena. All resources allocated from the arena + * must be freed before deleting the arena. + * + * @Input pArena - the arena to delete. + * @Return None + */ +void +RA_Delete(RA_ARENA *pArena); + +/** + * @Function RA_Add + * + * @Description + * + * To add a resource span to an arena. The span must not overlap with + * any span previously added to the arena. + * + * @Input pArena - the arena to add a span into. + * @Input base - the base of the span. + * @Input uSize - the extent of the span. + * @Input hPriv - handle associated to the span (reserved for user uses) + * @Return IMG_TRUE - success, IMG_FALSE - failure + */ +IMG_BOOL +RA_Add(RA_ARENA *pArena, + RA_BASE_T base, + RA_LENGTH_T uSize, + RA_FLAGS_T uFlags, + RA_PERISPAN_HANDLE hPriv); + +/** + * @Function RA_Alloc + * + * @Description To allocate resource from an arena. + * + * @Input pArena - the arena + * @Input uRequestSize - the size of resource segment requested. + * @Input uImportMultiplier - Import x-times of the uRequestSize + * for future RA_Alloc calls. + * Use RA_NO_IMPORT_MULTIPLIER to import the exact size. + * @Input uImportFlags - flags influencing allocation policy. + * @Input uAlignment - the alignment constraint required for the + * allocated segment, use 0 if alignment not required. + * @Input pszAnnotation - a string to describe the allocation + * @Output base - allocated base resource + * @Output pActualSize - the actual_size of resource segment allocated, + * typically rounded up by quantum. + * @Output phPriv - the user reference associated with allocated + * resource span. + * @Return PVRSRV_OK - success + */ +PVRSRV_ERROR +RA_Alloc(RA_ARENA *pArena, + RA_LENGTH_T uRequestSize, + IMG_UINT8 uImportMultiplier, + RA_FLAGS_T uImportFlags, + RA_LENGTH_T uAlignment, + const IMG_CHAR *pszAnnotation, + RA_BASE_T *base, + RA_LENGTH_T *pActualSize, + RA_PERISPAN_HANDLE *phPriv); + +/** + * @Function RA_Alloc_Range + * + * @Description + * + * To allocate a resource at a specified base from an arena. + * + * @Input pArena - the arena + * @Input uRequestSize - the size of resource segment requested. + * @Input uImportFlags - flags influencing allocation policy. + * @Input uAlignment - the alignment constraint required for the + * allocated segment, use 0 if alignment not required. + * @Input base - allocated base resource + * @Output pActualSize - the actual_size of resource segment allocated, + * typically rounded up by quantum. + * @Return PVRSRV_OK - success + */ +PVRSRV_ERROR +RA_Alloc_Range(RA_ARENA *pArena, + RA_LENGTH_T uRequestSize, + RA_FLAGS_T uImportFlags, + RA_LENGTH_T uAlignment, + RA_BASE_T base, + RA_LENGTH_T *pActualSize); + +/** + * @Function RA_Free + * + * @Description To free a resource segment. + * + * @Input pArena - the arena the segment was originally allocated from. + * @Input base - the base of the resource span to free. + * + * @Return None + */ +void +RA_Free(RA_ARENA *pArena, RA_BASE_T base); + +/** + * @Function RA_Get_Usage_Stats + * + * @Description To collect the arena usage statistics. + * + * @Input pArena - the arena to acquire usage statistics from. + * @Input psRAStats - the buffer to hold the usage statistics of the arena. + * + * @Return None + */ +IMG_INTERNAL void +RA_Get_Usage_Stats(RA_ARENA *pArena, PRA_USAGE_STATS psRAStats); + +#if defined(__KERNEL__) +RA_ARENA_ITERATOR * +RA_IteratorAcquire(RA_ARENA *pArena, IMG_BOOL bIncludeFreeSegments); + +void +RA_IteratorReset(RA_ARENA_ITERATOR *pIter); + +void +RA_IteratorRelease(RA_ARENA_ITERATOR *pIter); + +IMG_BOOL +RA_IteratorNext(RA_ARENA_ITERATOR *pIter, RA_ITERATOR_DATA *pData); +#endif /* defined(__KERNEL__) */ + +#endif diff --git a/drivers/gpu/drm/phytium/octopus/rgx_bridge.h b/drivers/gpu/drm/phytium/octopus/rgx_bridge.h new file mode 100644 index 000000000000..d0fa6dd06da2 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_bridge.h @@ -0,0 +1,242 @@ +/*************************************************************************/ /*! +@File +@Title RGX Bridge Functionality +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the Rogue Bridge code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGX_BRIDGE_H +#define RGX_BRIDGE_H + +#include "pvr_bridge.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "rgx_fwif_km.h" + +#define RGXFWINITPARAMS_VERSION 1 +#define RGXFWINITPARAMS_EXTENSION 128 + +#include "common_rgxta3d_bridge.h" +#include "common_rgxcmp_bridge.h" +#include "common_rgxtq2_bridge.h" +#if defined(SUPPORT_RGXTQ_BRIDGE) +#include "common_rgxtq_bridge.h" +#endif +#if defined(SUPPORT_USC_BREAKPOINT) +#include "common_rgxbreakpoint_bridge.h" +#endif +#include "common_rgxfwdbg_bridge.h" +#if defined(PDUMP) +#include "common_rgxpdump_bridge.h" +#endif +#include "common_rgxhwperf_bridge.h" +#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) +#include "common_rgxregconfig_bridge.h" +#endif +#include "common_rgxkicksync_bridge.h" +#include "common_rgxsignals_bridge.h" +#include "common_rgxtimerquery_bridge.h" +#if defined(SUPPORT_RGXRAY_BRIDGE) +#include "common_rgxray_bridge.h" +#endif +/* + * Bridge Cmd Ids + */ + +/* *REMEMBER* to update PVRSRV_BRIDGE_RGX_LAST if you add/remove a bridge + * group! + * Also you need to ensure all PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST offsets + * follow on from the previous bridge group's commands! + * + * If a bridge group is optional, ensure you *ALWAYS* define its index + * (e.g. PVRSRV_BRIDGE_RGXCMP is always 151, even is the feature is not + * defined). If an optional bridge group is not defined you must still + * define PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST for it with an assigned + * value of 0. + */ + +/* The RGX bridge groups start at 128 (PVRSRV_BRIDGE_RGX_FIRST) rather than + * follow-on from the other non-device bridge groups (meaning that they then + * won't be displaced if other non-device bridge groups are added). + */ + +#define PVRSRV_BRIDGE_RGX_FIRST 128UL + +/* 128: RGX TQ interface functions */ +#define PVRSRV_BRIDGE_RGXTQ 128UL +/* The RGXTQ bridge is conditional since the definitions in this header file + * support both the rogue and octopus servers, but the RGXTQ bridge is not + * required at all on the Volcanic architecture. + */ +#if defined(SUPPORT_RGXTQ_BRIDGE) +#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST (PVRSRV_BRIDGE_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTQ_CMD_LAST) +#else +#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST (PVRSRV_BRIDGE_DISPATCH_LAST) +#endif + +/* 129: RGX Compute interface functions */ +#define PVRSRV_BRIDGE_RGXCMP 129UL +#define PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST (PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXCMP_CMD_LAST) + +/* 130: RGX TA/3D interface functions */ +#define PVRSRV_BRIDGE_RGXTA3D 130UL +#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTA3D_CMD_LAST) + +/* 131: RGX Breakpoint interface functions */ +#define PVRSRV_BRIDGE_RGXBREAKPOINT 131UL +#if defined(SUPPORT_USC_BREAKPOINT) +#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST (PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_LAST) +#else +#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST) +#endif + +/* 132: RGX Debug/Misc interface functions */ +#define PVRSRV_BRIDGE_RGXFWDBG 132UL +#define PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST (PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST) + +/* 133: RGX PDump interface functions */ +#define PVRSRV_BRIDGE_RGXPDUMP 133UL +#if defined(PDUMP) +#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXPDUMP_CMD_LAST) +#else +#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST) +#endif + +/* 134: RGX HWPerf interface functions */ +#define PVRSRV_BRIDGE_RGXHWPERF 134UL +#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST) + +/* 135: RGX Register Configuration interface functions */ +#define PVRSRV_BRIDGE_RGXREGCONFIG 135UL +#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) +#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST (PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXREGCONFIG_CMD_LAST) +#else +#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST) +#endif + +/* 136: RGX kicksync interface */ +#define PVRSRV_BRIDGE_RGXKICKSYNC 136UL +#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST) + +/* 137: RGX signals interface */ +#define PVRSRV_BRIDGE_RGXSIGNALS 137UL +#define PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_LAST (PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXSIGNALS_CMD_LAST) + +#define PVRSRV_BRIDGE_RGXTQ2 138UL +#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTQ2_CMD_LAST) + +#define PVRSRV_BRIDGE_RGXTIMERQUERY 139UL +#define PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_LAST) + +#define PVRSRV_BRIDGE_RGXRAY 140UL +#if defined(SUPPORT_RGXRAY_BRIDGE) +#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST + 1) +#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST (PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXRAY_CMD_LAST) +#else +#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST 0 +#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST) +#endif + +#define PVRSRV_BRIDGE_RGX_LAST (PVRSRV_BRIDGE_RGXRAY) +#define PVRSRV_BRIDGE_RGX_DISPATCH_LAST (PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST) + +/* bit mask representing the enabled RGX bridges */ + +static const IMG_UINT32 gui32RGXBridges = + (1U << (PVRSRV_BRIDGE_RGXTQ - PVRSRV_BRIDGE_RGX_FIRST)) +#if defined(RGX_FEATURE_COMPUTE) || defined(__KERNEL__) + | (1U << (PVRSRV_BRIDGE_RGXCMP - PVRSRV_BRIDGE_RGX_FIRST)) +#endif + | (1U << (PVRSRV_BRIDGE_RGXTA3D - PVRSRV_BRIDGE_RGX_FIRST)) +#if defined(SUPPORT_BREAKPOINT) + | (1U << (PVRSRV_BRIDGE_BREAKPOINT - PVRSRV_BRIDGE_RGX_FIRST)) +#endif + | (1U << (PVRSRV_BRIDGE_RGXFWDBG - PVRSRV_BRIDGE_RGX_FIRST)) +#if defined(PDUMP) + | (1U << (PVRSRV_BRIDGE_RGXPDUMP - PVRSRV_BRIDGE_RGX_FIRST)) +#endif + | (1U << (PVRSRV_BRIDGE_RGXHWPERF - PVRSRV_BRIDGE_RGX_FIRST)) +#if defined(SUPPORT_REGCONFIG) + | (1U << (PVRSRV_BRIDGE_RGXREGCONFIG - PVRSRV_BRIDGE_RGX_FIRST)) +#endif + | (1U << (PVRSRV_BRIDGE_RGXKICKSYNC - PVRSRV_BRIDGE_RGX_FIRST)) +#if defined(RGX_FEATURE_SIGNAL_SNOOPING) || defined(__KERNEL__) + | (1U << (PVRSRV_BRIDGE_RGXSIGNALS - PVRSRV_BRIDGE_RGX_FIRST)) +#endif +#if defined(SUPPORT_FASTRENDER_DM) || defined(__KERNEL__) + | (1U << (PVRSRV_BRIDGE_RGXTQ2 - PVRSRV_BRIDGE_RGX_FIRST)) +#endif +#if defined(SUPPORT_TIMERQUERY) + | (1U << (PVRSRV_BRIDGE_RGXTIMERQUERY - PVRSRV_BRIDGE_RGX_FIRST)) +#endif + | (1U << (PVRSRV_BRIDGE_RGXRAY - PVRSRV_BRIDGE_RGX_FIRST)) + ; +/* bit field representing which RGX bridge groups may optionally not + * be present in the server + */ + +#define RGX_BRIDGES_OPTIONAL \ + ( \ + 0 /* no RGX bridges are currently optional */ \ + ) + +#if defined(__cplusplus) +} +#endif + +#endif /* RGX_BRIDGE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_common.h b/drivers/gpu/drm/phytium/octopus/rgx_common.h new file mode 100644 index 000000000000..746d52877595 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_common.h @@ -0,0 +1,254 @@ +/*************************************************************************/ /*! +@File +@Title RGX Common Types and Defines Header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Common types and definitions for RGX software +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef RGX_COMMON_H +#define RGX_COMMON_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "img_defs.h" + +/* Included to get the BVNC_KM_N defined and other feature defs */ +#include "km/rgxdefs_km.h" + +/*! This macro represents a mask of LSBs that must be zero on data structure + * sizes and offsets to ensure they are 8-byte granular on types shared between + * the FW and host driver */ +#define RGX_FW_ALIGNMENT_LSB (7U) + +/*! Macro to test structure size alignment */ +#define RGX_FW_STRUCT_SIZE_ASSERT(_a) \ + static_assert((sizeof(_a) & RGX_FW_ALIGNMENT_LSB) == 0U, \ + "Size of " #_a " is not properly aligned") + +/*! Macro to test structure member alignment */ +#define RGX_FW_STRUCT_OFFSET_ASSERT(_a, _b) \ + static_assert((offsetof(_a, _b) & RGX_FW_ALIGNMENT_LSB) == 0U, \ + "Offset of " #_a "." #_b " is not properly aligned") + + +/* Virtualisation validation builds are meant to test the VZ-related hardware without a fully virtualised platform. + * As such a driver can support either the vz-validation code or real virtualisation. + * Note: PVRSRV_VZ_NUM_OSID is the external build option, while RGX_NUM_OS_SUPPORTED is the internal symbol used in the DDK */ +#if defined(SUPPORT_GPUVIRT_VALIDATION) && (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)) +#error "Invalid build configuration: Virtualisation support (PVRSRV_VZ_NUM_OSID > 1) and virtualisation validation code (SUPPORT_GPUVIRT_VALIDATION) are mutually exclusive." +#endif + +/* The RGXFWIF_DM defines assume only one of RGX_FEATURE_TLA or + * RGX_FEATURE_FASTRENDER_DM is present. Ensure this with a compile-time check. + */ +#if defined(RGX_FEATURE_TLA) && defined(RGX_FEATURE_FASTRENDER_DM) +#error "Both RGX_FEATURE_TLA and RGX_FEATURE_FASTRENDER_DM defined. Fix code to handle this!" +#endif + +/*! The master definition for data masters known to the firmware of RGX. + * When a new DM is added to this list, relevant entry should be added to + * RGX_HWPERF_DM enum list. + * The DM in a V1 HWPerf packet uses this definition. */ + +typedef IMG_UINT32 RGXFWIF_DM; + +#define RGXFWIF_DM_GP IMG_UINT32_C(0) +/* Either TDM or 2D DM is present. The above build time error is present to verify this */ +#define RGXFWIF_DM_2D IMG_UINT32_C(1) /* when RGX_FEATURE_TLA defined */ +#define RGXFWIF_DM_TDM IMG_UINT32_C(1) /* when RGX_FEATURE_FASTRENDER_DM defined */ + +#define RGXFWIF_DM_GEOM IMG_UINT32_C(2) +#define RGXFWIF_DM_3D IMG_UINT32_C(3) +#define RGXFWIF_DM_CDM IMG_UINT32_C(4) +#define RGXFWIF_DM_RAY IMG_UINT32_C(5) + +#define RGXFWIF_DM_LAST RGXFWIF_DM_RAY + +typedef enum _RGX_KICK_TYPE_DM_ +{ + RGX_KICK_TYPE_DM_GP = 0x001, + RGX_KICK_TYPE_DM_TDM_2D = 0x002, + RGX_KICK_TYPE_DM_TA = 0x004, + RGX_KICK_TYPE_DM_3D = 0x008, + RGX_KICK_TYPE_DM_CDM = 0x010, + RGX_KICK_TYPE_DM_RTU = 0x020, + RGX_KICK_TYPE_DM_SHG = 0x040, + RGX_KICK_TYPE_DM_TQ2D = 0x080, + RGX_KICK_TYPE_DM_TQ3D = 0x100, + RGX_KICK_TYPE_DM_RAY = 0x200, + RGX_KICK_TYPE_DM_LAST = 0x400 +} RGX_KICK_TYPE_DM; + +/* Maximum number of DM in use: GP, 2D/TDM, TA, 3D, CDM, SHG, RTU, RDM */ +#define RGXFWIF_DM_DEFAULT_MAX (RGXFWIF_DM_LAST + 1U) + +/* Maximum number of DM in use: GP, 2D/TDM, TA, 3D, CDM, RDM*/ +#define RGXFWIF_DM_MAX (6U) +#define RGXFWIF_HWDM_MAX (RGXFWIF_DM_MAX) + +/* Min/Max number of HW DMs (all but GP) */ +#if defined(RGX_FEATURE_TLA) +#define RGXFWIF_HWDM_MIN (1U) +#else +#if defined(RGX_FEATURE_FASTRENDER_DM) +#define RGXFWIF_HWDM_MIN (1U) +#else +#define RGXFWIF_HWDM_MIN (2U) +#endif +#endif + +#define RGXFWIF_DM_MIN_MTS_CNT (6) +#define RGXFWIF_DM_MIN_CNT (5) + +/* + * Data Master Tags to be appended to resources created on behalf of each RGX + * Context. + */ +#define RGX_RI_DM_TAG_KS 'K' +#define RGX_RI_DM_TAG_CDM 'C' +#define RGX_RI_DM_TAG_RC 'R' /* To be removed once TA/3D Timelines are split */ +#define RGX_RI_DM_TAG_TA 'V' +#define RGX_RI_DM_TAG_GEOM 'V' +#define RGX_RI_DM_TAG_3D 'P' +#define RGX_RI_DM_TAG_TDM 'T' +#define RGX_RI_DM_TAG_TQ2D '2' +#define RGX_RI_DM_TAG_TQ3D 'Q' +#define RGX_RI_DM_TAG_RAY 'r' + +/* + * Client API Tags to be appended to resources created on behalf of each + * Client API. + */ +#define RGX_RI_CLIENT_API_GLES1 '1' +#define RGX_RI_CLIENT_API_GLES3 '3' +#define RGX_RI_CLIENT_API_VULKAN 'V' +#define RGX_RI_CLIENT_API_EGL 'E' +#define RGX_RI_CLIENT_API_OPENCL 'C' +#define RGX_RI_CLIENT_API_OPENGL 'G' +#define RGX_RI_CLIENT_API_SERVICES 'S' +#define RGX_RI_CLIENT_API_WSEGL 'W' +#define RGX_RI_CLIENT_API_ANDROID 'A' +#define RGX_RI_CLIENT_API_LWS 'L' + +/* + * Format a RI annotation for a given RGX Data Master context + */ +#define RGX_RI_FORMAT_DM_ANNOTATION(annotation, dmTag, clientAPI) do \ + { \ + (annotation)[0] = (dmTag); \ + (annotation)[1] = (clientAPI); \ + (annotation)[2] = '\0'; \ + } while (false) + +/*! + ****************************************************************************** + * RGXFW Compiler alignment definitions + *****************************************************************************/ +#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES) || defined(INTEGRITY_OS) +#define RGXFW_ALIGN __attribute__ ((aligned (8))) +#define RGXFW_ALIGN_DCACHEL __attribute__((aligned (64))) +#elif defined(_MSC_VER) +#define RGXFW_ALIGN __declspec(align(8)) +#define RGXFW_ALIGN_DCACHEL __declspec(align(64)) +#pragma warning (disable : 4324) +#else +#error "Align MACROS need to be defined for this compiler" +#endif + +/*! + ****************************************************************************** + * Force 8-byte alignment for structures allocated uncached. + *****************************************************************************/ +#define UNCACHED_ALIGN RGXFW_ALIGN + + +/*! + ****************************************************************************** + * GPU Utilisation states + *****************************************************************************/ +#define RGXFWIF_GPU_UTIL_STATE_IDLE (0U) +#define RGXFWIF_GPU_UTIL_STATE_ACTIVE (1U) +#define RGXFWIF_GPU_UTIL_STATE_BLOCKED (2U) +#define RGXFWIF_GPU_UTIL_STATE_NUM (3U) +#define RGXFWIF_GPU_UTIL_STATE_MASK IMG_UINT64_C(0x0000000000000003) + + +/* + * Maximum amount of register writes that can be done by the register + * programmer (FW or META DMA). This is not a HW limitation, it is only + * a protection against malformed inputs to the register programmer. + */ +#define RGX_MAX_NUM_REGISTER_PROGRAMMER_WRITES (128U) + +/* FW common context priority. */ +#define RGX_CTX_PRIORITY_REALTIME (UINT32_MAX) +#define RGX_CTX_PRIORITY_HIGH (2U) +#define RGX_CTX_PRIORITY_MEDIUM (1U) +#define RGX_CTX_PRIORITY_LOW (0) + +/* + * Use of the 32-bit context property flags mask + * ( X = taken/in use, - = available/unused ) + * + * 0 + * | + * -------------------------------x + */ +/* + * Context creation flags + * (specify a context's properties at creation time) + */ +#define RGX_CONTEXT_FLAG_DISABLESLR (1UL << 0) /*!< Disable SLR */ + +/* List of attributes that may be set for a context */ +typedef enum _RGX_CONTEXT_PROPERTY_ +{ + RGX_CONTEXT_PROPERTY_FLAGS = 0, /*!< Context flags */ +} RGX_CONTEXT_PROPERTY; + +#if defined(__cplusplus) +} +#endif + +#endif /* RGX_COMMON_H */ + +/****************************************************************************** + End of file +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_compat_bvnc.h b/drivers/gpu/drm/phytium/octopus/rgx_compat_bvnc.h new file mode 100644 index 000000000000..814dd796623c --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_compat_bvnc.h @@ -0,0 +1,140 @@ +/*************************************************************************/ /*! +@File rgx_compat_bvnc.h +@Title BVNC compatibility check utilities +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Utility functions used for packing BNC and V. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_COMPAT_BVNC_H) +#define RGX_COMPAT_BVNC_H + +#include "img_types.h" + +#if defined(RGX_FIRMWARE) /* Services firmware */ +# include "rgxfw_utils.h" +# define PVR_COMPAT_ASSERT RGXFW_ASSERT +#elif !defined(RGX_BUILD_BINARY) /* Services host driver code */ +# include "pvr_debug.h" +# define PVR_COMPAT_ASSERT PVR_ASSERT +#else /* FW user-mode tools */ +# include +# define PVR_COMPAT_ASSERT assert +#endif + +/* 64bit endian conversion macros */ +#if defined(__BIG_ENDIAN__) +#define RGX_INT64_TO_BE(N) (N) +#define RGX_INT64_FROM_BE(N) (N) +#define RGX_INT32_TO_BE(N) (N) +#define RGX_INT32_FROM_BE(N) (N) +#else +#define RGX_INT64_TO_BE(N) \ + ((((N) >> 56) & 0xff) \ + | (((N) >> 40) & 0xff00) \ + | (((N) >> 24) & 0xff0000) \ + | (((N) >> 8) & 0xff000000U) \ + | ((N) << 56) \ + | (((N) & 0xff00) << 40) \ + | (((N) & 0xff0000) << 24) \ + | (((N) & 0xff000000U) << 8)) +#define RGX_INT64_FROM_BE(N) RGX_INT64_TO_BE(N) + +#define RGX_INT32_TO_BE(N) \ + ((((N) >> 24) & 0xff) \ + | (((N) >> 8) & 0xff00) \ + | ((N) << 24) \ + | ((((N) & 0xff00) << 8))) +#define RGX_INT32_FROM_BE(N) RGX_INT32_TO_BE(N) +#endif + +/****************************************************************************** + * RGX Version packed into 64-bit (BVNC) to be used by Compatibility Check + *****************************************************************************/ + +#define RGX_BVNC_PACK_SHIFT_B 48 +#define RGX_BVNC_PACK_SHIFT_V 32 +#define RGX_BVNC_PACK_SHIFT_N 16 +#define RGX_BVNC_PACK_SHIFT_C 0 + +#define RGX_BVNC_PACK_MASK_B (IMG_UINT64_C(0xFFFF000000000000)) +#define RGX_BVNC_PACK_MASK_V (IMG_UINT64_C(0x0000FFFF00000000)) +#define RGX_BVNC_PACK_MASK_N (IMG_UINT64_C(0x00000000FFFF0000)) +#define RGX_BVNC_PACK_MASK_C (IMG_UINT64_C(0x000000000000FFFF)) + +#define RGX_BVNC_PACKED_EXTR_B(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_B) >> RGX_BVNC_PACK_SHIFT_B)) +#define RGX_BVNC_PACKED_EXTR_V(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_V) >> RGX_BVNC_PACK_SHIFT_V)) +#define RGX_BVNC_PACKED_EXTR_N(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_N) >> RGX_BVNC_PACK_SHIFT_N)) +#define RGX_BVNC_PACKED_EXTR_C(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_C) >> RGX_BVNC_PACK_SHIFT_C)) + +#define RGX_BVNC_EQUAL(L,R,all,version,bvnc) do { \ + (bvnc) = IMG_FALSE; \ + (version) = ((L).ui32LayoutVersion == (R).ui32LayoutVersion); \ + if (version) \ + { \ + (bvnc) = ((L).ui64BVNC == (R).ui64BVNC); \ + } \ + (all) = (version) && (bvnc); \ + } while (0) + + +/**************************************************************************//** + * Utility function for packing BVNC + *****************************************************************************/ +static inline IMG_UINT64 rgx_bvnc_pack(IMG_UINT32 ui32B, IMG_UINT32 ui32V, IMG_UINT32 ui32N, IMG_UINT32 ui32C) +{ + /* + * Test for input B, V, N and C exceeding max bit width. + */ + PVR_COMPAT_ASSERT((ui32B & (~(RGX_BVNC_PACK_MASK_B >> RGX_BVNC_PACK_SHIFT_B))) == 0); + PVR_COMPAT_ASSERT((ui32V & (~(RGX_BVNC_PACK_MASK_V >> RGX_BVNC_PACK_SHIFT_V))) == 0); + PVR_COMPAT_ASSERT((ui32N & (~(RGX_BVNC_PACK_MASK_N >> RGX_BVNC_PACK_SHIFT_N))) == 0); + PVR_COMPAT_ASSERT((ui32C & (~(RGX_BVNC_PACK_MASK_C >> RGX_BVNC_PACK_SHIFT_C))) == 0); + + return (((IMG_UINT64)ui32B << RGX_BVNC_PACK_SHIFT_B) | + ((IMG_UINT64)ui32V << RGX_BVNC_PACK_SHIFT_V) | + ((IMG_UINT64)ui32N << RGX_BVNC_PACK_SHIFT_N) | + ((IMG_UINT64)ui32C << RGX_BVNC_PACK_SHIFT_C)); +} + + +#endif /* RGX_COMPAT_BVNC_H */ + +/****************************************************************************** + End of file (rgx_compat_bvnc.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_fw_info.h b/drivers/gpu/drm/phytium/octopus/rgx_fw_info.h new file mode 100644 index 000000000000..ff9a4c40653b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_fw_info.h @@ -0,0 +1,135 @@ +/*************************************************************************/ /*! +@File +@Title FW image information + +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Utility functions used internally for HWPerf data retrieval +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_FW_INFO_H) +#define RGX_FW_INFO_H + +#include "img_types.h" +#include "rgx_common.h" + +/* + * Firmware binary block unit in bytes. + * Raw data stored in FW binary will be aligned to this size. + */ +#define FW_BLOCK_SIZE 4096L + +typedef enum +{ + META_CODE = 0, + META_PRIVATE_DATA, + META_COREMEM_CODE, + META_COREMEM_DATA, + MIPS_CODE, + MIPS_EXCEPTIONS_CODE, + MIPS_BOOT_CODE, + MIPS_PRIVATE_DATA, + MIPS_BOOT_DATA, + MIPS_STACK, + RISCV_UNCACHED_CODE, + RISCV_CACHED_CODE, + RISCV_PRIVATE_DATA, + RISCV_COREMEM_CODE, + RISCV_COREMEM_DATA, +} RGX_FW_SECTION_ID; + +typedef enum +{ + NONE = 0, + FW_CODE, + FW_DATA, + FW_COREMEM_CODE, + FW_COREMEM_DATA +} RGX_FW_SECTION_TYPE; + + +/* + * FW binary format with FW info attached: + * + * Contents Offset + * +-----------------+ + * | | 0 + * | | + * | Original binary | + * | file | + * | (.ldr/.elf) | + * | | + * | | + * +-----------------+ + * | FW info header | FILE_SIZE - 4K + * +-----------------+ + * | | + * | FW layout table | + * | | + * +-----------------+ + * FILE_SIZE + */ + +#define FW_INFO_VERSION (1) + +typedef struct +{ + IMG_UINT32 ui32InfoVersion; /* FW info version */ + IMG_UINT32 ui32HeaderLen; /* Header length */ + IMG_UINT32 ui32LayoutEntryNum; /* Number of entries in the layout table */ + IMG_UINT32 ui32LayoutEntrySize; /* Size of an entry in the layout table */ + IMG_UINT64 RGXFW_ALIGN ui64BVNC; /* BVNC */ + IMG_UINT32 ui32FwPageSize; /* Page size of processor on which firmware executes */ + IMG_UINT32 ui32Flags; /* Compatibility flags */ +} RGX_FW_INFO_HEADER; + +typedef struct +{ + RGX_FW_SECTION_ID eId; + RGX_FW_SECTION_TYPE eType; + IMG_UINT32 ui32BaseAddr; + IMG_UINT32 ui32MaxSize; + IMG_UINT32 ui32AllocSize; + IMG_UINT32 ui32AllocOffset; +} RGX_FW_LAYOUT_ENTRY; + +#endif /* RGX_FW_INFO_H */ + +/****************************************************************************** + End of file (rgx_fw_info.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_fwif_alignchecks.h b/drivers/gpu/drm/phytium/octopus/rgx_fwif_alignchecks.h new file mode 100644 index 000000000000..bb0f20d22015 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_fwif_alignchecks.h @@ -0,0 +1,184 @@ +/*************************************************************************/ /*! +@File +@Title RGX fw interface alignment checks +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Checks to avoid disalignment in RGX fw data structures + shared with the host +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_FWIF_ALIGNCHECKS_H) +#define RGX_FWIF_ALIGNCHECKS_H + +/* for the offsetof macro */ +#if defined(__KERNEL__) && defined(__linux__) +#include +#else +#include +#endif + +/*! + ****************************************************************************** + * Alignment UM/FW checks array + *****************************************************************************/ + +#define RGXFW_ALIGN_CHECKS_UM_MAX 128U + +#if defined(PM_INTERACTIVE_MODE) +#define HWRTDATA_PM_OFFSET offsetof(RGXFWIF_HWRTDATA, sPMMListDevVAddr), +#define HWRTDATA_HEAPTABLE_OFFSET offsetof(RGXFWIF_HWRTDATA, psVHeapTableDevVAddr), +#else +#define HWRTDATA_PM_OFFSET offsetof(RGXFWIF_HWRTDATA, sPMRenderStateDevVAddr), +#define HWRTDATA_HEAPTABLE_OFFSET +#endif + +#define RGXFW_ALIGN_CHECKS_INIT0 \ + sizeof(RGXFWIF_TRACEBUF), \ + offsetof(RGXFWIF_TRACEBUF, ui32LogType), \ + offsetof(RGXFWIF_TRACEBUF, sTraceBuf), \ + offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords), \ + offsetof(RGXFWIF_TRACEBUF, ui32TracebufFlags), \ + \ + sizeof(RGXFWIF_SYSDATA), \ + offsetof(RGXFWIF_SYSDATA, ePowState), \ + offsetof(RGXFWIF_SYSDATA, ui32HWPerfDropCount), \ + offsetof(RGXFWIF_SYSDATA, ui32LastDropOrdinal), \ + offsetof(RGXFWIF_SYSDATA, ui32FWFaults), \ + offsetof(RGXFWIF_SYSDATA, ui32HWRStateFlags), \ + \ + sizeof(RGXFWIF_OSDATA), \ + offsetof(RGXFWIF_OSDATA, ui32HostSyncCheckMark), \ + offsetof(RGXFWIF_OSDATA, ui32KCCBCmdsExecuted), \ + \ + sizeof(RGXFWIF_HWRINFOBUF), \ + offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmLockedUpCount), \ + offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmOverranCount), \ + offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmRecoveredCount), \ + offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmFalseDetectCount), \ + \ + /* RGXFWIF_CMDTA checks */ \ + sizeof(RGXFWIF_CMDTA), \ + offsetof(RGXFWIF_CMDTA, sGeomRegs), \ + \ + /* RGXFWIF_CMD3D checks */ \ + sizeof(RGXFWIF_CMD3D), \ + offsetof(RGXFWIF_CMD3D, s3DRegs), \ + \ + /* RGXFWIF_CMD_COMPUTE checks */ \ + sizeof(RGXFWIF_CMD_COMPUTE), \ + offsetof(RGXFWIF_CMD_COMPUTE, sCDMRegs), \ + \ + /* RGXFWIF_FREELIST checks */ \ + sizeof(RGXFWIF_FREELIST), \ + offsetof(RGXFWIF_FREELIST, sFreeListBaseDevVAddr),\ + offsetof(RGXFWIF_FREELIST, sFreeListStateDevVAddr),\ + offsetof(RGXFWIF_FREELIST, sFreeListLastGrowDevVAddr),\ + offsetof(RGXFWIF_FREELIST, ui32MaxPages),\ + offsetof(RGXFWIF_FREELIST, ui32CurrentPages),\ + \ + /* RGXFWIF_HWRTDATA checks */ \ + sizeof(RGXFWIF_HWRTDATA), \ + HWRTDATA_PM_OFFSET \ + HWRTDATA_HEAPTABLE_OFFSET \ + offsetof(RGXFWIF_HWRTDATA, apsFreeLists),\ + /*offsetof(RGXFWIF_HWRTDATA, ui64VCECatBase),*/ \ + offsetof(RGXFWIF_HWRTDATA, eState), \ + \ +\ + sizeof(RGXFWIF_HWPERF_CTL), \ + offsetof(RGXFWIF_HWPERF_CTL, sBlkCfg), \ + sizeof(RGXFWIF_CMDTDM), \ + offsetof(RGXFWIF_CMDTDM, sTDMRegs) + +#define RGXFW_ALIGN_CHECKS_INIT RGXFW_ALIGN_CHECKS_INIT0 + + + +/*! + ****************************************************************************** + * Alignment KM checks array + *****************************************************************************/ + +#define RGXFW_ALIGN_CHECKS_INIT_KM0 \ + sizeof(RGXFWIF_SYSINIT), \ + offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr), \ + offsetof(RGXFWIF_SYSINIT, sPDSExecBase), \ + offsetof(RGXFWIF_SYSINIT, sUSCExecBase), \ + offsetof(RGXFWIF_SYSINIT, asSigBufCtl), \ + offsetof(RGXFWIF_SYSINIT, sTraceBufCtl), \ + offsetof(RGXFWIF_SYSINIT, sFwSysData), \ + \ + sizeof(RGXFWIF_OSINIT), \ + offsetof(RGXFWIF_OSINIT, psKernelCCBCtl), \ + offsetof(RGXFWIF_OSINIT, psKernelCCB), \ + offsetof(RGXFWIF_OSINIT, psFirmwareCCBCtl), \ + offsetof(RGXFWIF_OSINIT, psFirmwareCCB), \ + offsetof(RGXFWIF_OSINIT, sFwOsData), \ + offsetof(RGXFWIF_OSINIT, sRGXCompChecks), \ + \ + /* RGXFWIF_FWRENDERCONTEXT checks */ \ + sizeof(RGXFWIF_FWRENDERCONTEXT), \ + offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), \ + offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), \ + \ + sizeof(RGXFWIF_FWCOMPUTECONTEXT), \ + offsetof(RGXFWIF_FWCOMPUTECONTEXT, sCDMContext), \ + offsetof(RGXFWIF_FWCOMPUTECONTEXT, sStaticComputeContextState),\ + offsetof(RGXFWIF_FWCOMPUTECONTEXT, ui32WorkEstCCBSubmitted),\ + \ + sizeof(RGXFWIF_FWTDMCONTEXT), \ + offsetof(RGXFWIF_FWTDMCONTEXT, sTDMContext), \ + offsetof(RGXFWIF_FWTDMCONTEXT, ui32WorkEstCCBSubmitted),\ + \ + sizeof(RGXFWIF_FWCOMMONCONTEXT), \ + offsetof(RGXFWIF_FWCOMMONCONTEXT, psFWMemContext), \ + offsetof(RGXFWIF_FWCOMMONCONTEXT, sRunNode), \ + offsetof(RGXFWIF_FWCOMMONCONTEXT, psCCB), \ + \ + sizeof(RGXFWIF_MMUCACHEDATA), \ + offsetof(RGXFWIF_MMUCACHEDATA, ui32CacheFlags), \ + offsetof(RGXFWIF_MMUCACHEDATA, sMMUCacheSync), \ + offsetof(RGXFWIF_MMUCACHEDATA, ui32MMUCacheSyncUpdateValue) + +#define RGXFW_ALIGN_CHECKS_INIT_KM RGXFW_ALIGN_CHECKS_INIT_KM0 + +#endif /* RGX_FWIF_ALIGNCHECKS_H */ + +/****************************************************************************** + End of file (rgx_fwif_alignchecks.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_fwif_hwperf.h b/drivers/gpu/drm/phytium/octopus/rgx_fwif_hwperf.h new file mode 100644 index 000000000000..9695985865c8 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_fwif_hwperf.h @@ -0,0 +1,119 @@ +/*************************************************************************/ /*! +@File rgx_fwif_hwperf.h +@Title RGX HWPerf support +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Shared header between RGX firmware and Init process +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef RGX_FWIF_HWPERF_H +#define RGX_FWIF_HWPERF_H + +#include "rgx_fwif_shared.h" +#include "rgx_hwperf.h" +#include "rgxdefs_km.h" + +/* Server and Firmware definitions only */ + +/*! The number of HWPerf blocks in the GPU */ + +#if defined(RGX_FIRMWARE) +#define RGX_HWPERF_NUM_SPU ((RGX_FEATURE_NUM_SPU)) +#define RGX_HWPERF_NUM_USC ((RGX_FEATURE_NUM_CLUSTERS)) +#define RGX_HWPERF_NUM_ISP_PER_SPU ((RGX_FEATURE_NUM_ISP_PER_SPU)) +#define RGX_HWPERF_NUM_PBE ((RGX_FEATURE_PBE_PER_SPU) * (RGX_FEATURE_NUM_SPU)) +#define RGX_HWPERF_NUM_MERCER ((RGX_FEATURE_NUM_CLUSTERS)) +#define RGX_HWPERF_NUM_PBE_SHARED ((RGX_FEATURE_NUM_SPU)) +#define RGX_HWPERF_NUM_SWIFT ((RGX_FEATURE_NUM_SPU * RGX_FEATURE_MAX_TPU_PER_SPU)) +#define RGX_HWPERF_NUM_TEXAS ((RGX_FEATURE_NUM_SPU)) +#define RGX_HWPERF_NUM_TPU ((RGX_FEATURE_NUM_SPU * RGX_FEATURE_MAX_TPU_PER_SPU)) +#define RGX_HWPERF_NUM_ISP ((RGX_FEATURE_NUM_CLUSTERS)) + +#define RGX_CNTBLK_INDIRECT_COUNT(_class) ((RGX_HWPERF_NUM_ ## _class)) + +/*! The number of layout blocks defined with configurable + * performance counters. Compile time constants. + * This is for the Series 8XT+ layout. + */ +#define RGX_HWPERF_MAX_DEFINED_BLKS (\ + (IMG_UINT32)RGX_CNTBLK_ID_DIRECT_LAST +\ + RGX_CNTBLK_INDIRECT_COUNT(ISP) +\ + RGX_CNTBLK_INDIRECT_COUNT(MERCER) +\ + RGX_CNTBLK_INDIRECT_COUNT(PBE) +\ + RGX_CNTBLK_INDIRECT_COUNT(PBE_SHARED) +\ + RGX_CNTBLK_INDIRECT_COUNT(USC) +\ + RGX_CNTBLK_INDIRECT_COUNT(TPU) +\ + RGX_CNTBLK_INDIRECT_COUNT(SWIFT) +\ + RGX_CNTBLK_INDIRECT_COUNT(TEXAS)) + +#endif /* RGX_FIRMWARE */ + +/*****************************************************************************/ + +/* Structure used in the FW's global control data to hold the performance + * counters provisioned for a given block. */ +typedef struct +{ + IMG_UINT32 uiBlockID; + IMG_UINT32 uiNumCounters; // Number of counters held + // in aui32CounterCfg + // [0..RGX_CNTBLK_COUNTERS_MAX) + IMG_UINT32 uiEnabled; // 1 => enabled, 0=> disabled + RGXFWIF_DEV_VIRTADDR psModel; // link to model table for uiBlockID + IMG_UINT32 aui32CounterCfg[RGX_CNTBLK_COUNTERS_MAX]; +} RGXFWIF_HWPERF_CTL_BLK; + + +/*! + ***************************************************************************** + * Structure used in the FW's global RGXFW_CTL store, holding HWPerf counter + * block configuration. It is written to by the Server on FW initialisation + * (PDUMP=1) and by the FW BG kCCB command processing code. It is read by + * the FW IRQ register programming and HWPerf event generation routines. + * Size of the sBlkCfg[] array must be consistent between KM/UM and FW. + * FW will ASSERT if the sizes are different + * (ui32NumBlocks != RGX_HWPERF_MAX_DEFINED_BLKS) + ****************************************************************************/ +typedef struct +{ + IMG_UINT32 ui32Reserved; + IMG_UINT32 ui32CtrlWord; + IMG_UINT32 ui32EnabledBlksCount; + IMG_UINT32 ui32NumBlocks; + RGXFWIF_HWPERF_CTL_BLK RGXFW_ALIGN sBlkCfg[1]; // First array entry +} UNCACHED_ALIGN RGXFWIF_HWPERF_CTL; +#endif diff --git a/drivers/gpu/drm/phytium/octopus/rgx_fwif_km.h b/drivers/gpu/drm/phytium/octopus/rgx_fwif_km.h new file mode 100644 index 000000000000..48d2ab6900f2 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_fwif_km.h @@ -0,0 +1,2292 @@ +/*************************************************************************/ /*! +@File +@Title RGX firmware interface structures used by pvrsrvkm +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX firmware interface structures used by pvrsrvkm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_FWIF_KM_H) +#define RGX_FWIF_KM_H + +#include "img_types.h" +#include "rgx_fwif_shared.h" +#include "rgxdefs_km.h" +#include "dllist.h" +#include "rgx_hwperf.h" + + +/*************************************************************************/ /*! + Logging type +*/ /**************************************************************************/ +#define RGXFWIF_LOG_TYPE_NONE 0x00000000U +#define RGXFWIF_LOG_TYPE_TRACE 0x00000001U +#define RGXFWIF_LOG_TYPE_GROUP_MAIN 0x00000002U +#define RGXFWIF_LOG_TYPE_GROUP_MTS 0x00000004U +#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U +#define RGXFWIF_LOG_TYPE_GROUP_CSW 0x00000010U +#define RGXFWIF_LOG_TYPE_GROUP_BIF 0x00000020U +#define RGXFWIF_LOG_TYPE_GROUP_PM 0x00000040U +#define RGXFWIF_LOG_TYPE_GROUP_RTD 0x00000080U +#define RGXFWIF_LOG_TYPE_GROUP_SPM 0x00000100U +#define RGXFWIF_LOG_TYPE_GROUP_POW 0x00000200U +#define RGXFWIF_LOG_TYPE_GROUP_HWR 0x00000400U +#define RGXFWIF_LOG_TYPE_GROUP_HWP 0x00000800U +#define RGXFWIF_LOG_TYPE_GROUP_RPM 0x00001000U +#define RGXFWIF_LOG_TYPE_GROUP_DMA 0x00002000U +#define RGXFWIF_LOG_TYPE_GROUP_MISC 0x00004000U +#define RGXFWIF_LOG_TYPE_GROUP_DEBUG 0x80000000U +#define RGXFWIF_LOG_TYPE_GROUP_MASK 0x80007FFEU +#define RGXFWIF_LOG_TYPE_MASK 0x80007FFFU + +/* String used in pvrdebug -h output */ +#define RGXFWIF_LOG_GROUPS_STRING_LIST "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug" + +/* Table entry to map log group strings to log type value */ +typedef struct { + const IMG_CHAR* pszLogGroupName; + IMG_UINT32 ui32LogGroupType; +} RGXFWIF_LOG_GROUP_MAP_ENTRY; + +/* + Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup + table where needed. Keep log group names short, no more than 20 chars. +*/ +#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "none", RGXFWIF_LOG_TYPE_NONE }, \ + { "main", RGXFWIF_LOG_TYPE_GROUP_MAIN }, \ + { "mts", RGXFWIF_LOG_TYPE_GROUP_MTS }, \ + { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \ + { "csw", RGXFWIF_LOG_TYPE_GROUP_CSW }, \ + { "bif", RGXFWIF_LOG_TYPE_GROUP_BIF }, \ + { "pm", RGXFWIF_LOG_TYPE_GROUP_PM }, \ + { "rtd", RGXFWIF_LOG_TYPE_GROUP_RTD }, \ + { "spm", RGXFWIF_LOG_TYPE_GROUP_SPM }, \ + { "pow", RGXFWIF_LOG_TYPE_GROUP_POW }, \ + { "hwr", RGXFWIF_LOG_TYPE_GROUP_HWR }, \ + { "hwp", RGXFWIF_LOG_TYPE_GROUP_HWP }, \ + { "rpm", RGXFWIF_LOG_TYPE_GROUP_RPM }, \ + { "dma", RGXFWIF_LOG_TYPE_GROUP_DMA }, \ + { "misc", RGXFWIF_LOG_TYPE_GROUP_MISC }, \ + { "debug", RGXFWIF_LOG_TYPE_GROUP_DEBUG } + + +/* Used in print statements to display log group state, one %s per group defined */ +#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s" + +/* Used in a print statement to display log group state, one per group */ +#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types) (((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN) ?("main ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_MTS) ?("mts ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP) ?("cleanup ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_CSW) ?("csw ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_BIF) ?("bif ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_PM) ?("pm ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_RTD) ?("rtd ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_SPM) ?("spm ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_POW) ?("pow ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_HWR) ?("hwr ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_HWP) ?("hwp ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_RPM) ?("rpm ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_DMA) ?("dma ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_MISC) ?("misc ") :("")), \ + (((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG) ?("debug ") :("")) + + +/************************************************************************ +* RGX FW signature checks +************************************************************************/ +#define RGXFW_SIG_BUFFER_SIZE_MIN (8192) + +#define RGXFWIF_TIMEDIFF_ID ((0x1UL << 28) | RGX_CR_TIMER) + +/*! + ****************************************************************************** + * Trace Buffer + *****************************************************************************/ + +/*! Default size of RGXFWIF_TRACEBUF_SPACE in DWords */ +#define RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U +#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200U +#if defined(RGXFW_META_SUPPORT_2ND_THREAD) +#define RGXFW_THREAD_NUM 2U +#else +#define RGXFW_THREAD_NUM 1U +#endif + +#define RGXFW_POLL_TYPE_SET 0x80000000U + +typedef struct +{ + IMG_CHAR szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; + IMG_CHAR szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; + IMG_UINT32 ui32LineNum; +} UNCACHED_ALIGN RGXFWIF_FILE_INFO_BUF; + +typedef struct +{ + IMG_UINT32 ui32TracePointer; + +#if defined(RGX_FIRMWARE) + IMG_UINT32 *pui32RGXFWIfTraceBuffer; /* To be used by firmware for writing into trace buffer */ +#else + RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer; +#endif + IMG_PUINT32 pui32TraceBuffer; /* To be used by host when reading from trace buffer */ + + RGXFWIF_FILE_INFO_BUF sAssertBuf; +} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE; + +#define RGXFWIF_FWFAULTINFO_MAX (8U) /* Total number of FW fault logs stored */ + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64CRTimer; + IMG_UINT64 RGXFW_ALIGN ui64OSTimer; + IMG_UINT32 RGXFW_ALIGN ui32Data; + IMG_UINT32 ui32Reserved; + RGXFWIF_FILE_INFO_BUF sFaultBuf; +} UNCACHED_ALIGN RGX_FWFAULTINFO; + + +#define RGXFWIF_POW_STATES \ + X(RGXFWIF_POW_OFF) /* idle and handshaked with the host (ready to full power down) */ \ + X(RGXFWIF_POW_ON) /* running HW commands */ \ + X(RGXFWIF_POW_FORCED_IDLE) /* forced idle */ \ + X(RGXFWIF_POW_IDLE) /* idle waiting for host handshake */ + +typedef enum +{ +#define X(NAME) NAME, + RGXFWIF_POW_STATES +#undef X +} RGXFWIF_POW_STATE; + +/* Firmware HWR states */ +#define RGXFWIF_HWR_HARDWARE_OK (IMG_UINT32_C(0x1) << 0U) /*!< The HW state is ok or locked up */ +#define RGXFWIF_HWR_RESET_IN_PROGRESS (IMG_UINT32_C(0x1) << 1U) /*!< Tells if a HWR reset is in progress */ +#define RGXFWIF_HWR_GENERAL_LOCKUP (IMG_UINT32_C(0x1) << 3U) /*!< A DM unrelated lockup has been detected */ +#define RGXFWIF_HWR_DM_RUNNING_OK (IMG_UINT32_C(0x1) << 4U) /*!< At least one DM is running without being close to a lockup */ +#define RGXFWIF_HWR_DM_STALLING (IMG_UINT32_C(0x1) << 5U) /*!< At least one DM is close to lockup */ +#define RGXFWIF_HWR_FW_FAULT (IMG_UINT32_C(0x1) << 6U) /*!< The FW has faulted and needs to restart */ +#define RGXFWIF_HWR_RESTART_REQUESTED (0x1U << 7U) /*!< The FW has requested the host to restart it */ + +#define RGXFWIF_PHR_STATE_SHIFT (8U) +#define RGXFWIF_PHR_RESTART_REQUESTED (IMG_UINT32_C(1) << RGXFWIF_PHR_STATE_SHIFT) /*!< The FW has requested the host to restart it, per PHR configuration */ +#define RGXFWIF_PHR_RESTART_FINISHED (IMG_UINT32_C(2) << RGXFWIF_PHR_STATE_SHIFT) /*!< A PHR triggered GPU reset has just finished */ +#define RGXFWIF_PHR_RESTART_MASK (RGXFWIF_PHR_RESTART_REQUESTED | RGXFWIF_PHR_RESTART_FINISHED) + +#define RGXFWIF_PHR_MODE_OFF (0UL) +#define RGXFWIF_PHR_MODE_RD_RESET (1UL) +#define RGXFWIF_PHR_MODE_FULL_RESET (2UL) + +typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS; + +/* Firmware per-DM HWR states */ +#define RGXFWIF_DM_STATE_WORKING (0x00U) /*!< DM is working if all flags are cleared */ +#define RGXFWIF_DM_STATE_READY_FOR_HWR (IMG_UINT32_C(0x1) << 0) /*!< DM is idle and ready for HWR */ +#define RGXFWIF_DM_STATE_NEEDS_SKIP (IMG_UINT32_C(0x1) << 2) /*!< DM need to skip to next cmd before resuming processing */ +#define RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP (IMG_UINT32_C(0x1) << 3) /*!< DM need partial render cleanup before resuming processing */ +#define RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR (IMG_UINT32_C(0x1) << 4) /*!< DM need to increment Recovery Count once fully recovered */ +#define RGXFWIF_DM_STATE_GUILTY_LOCKUP (IMG_UINT32_C(0x1) << 5) /*!< DM was identified as locking up and causing HWR */ +#define RGXFWIF_DM_STATE_INNOCENT_LOCKUP (IMG_UINT32_C(0x1) << 6) /*!< DM was innocently affected by another lockup which caused HWR */ +#define RGXFWIF_DM_STATE_GUILTY_OVERRUNING (IMG_UINT32_C(0x1) << 7) /*!< DM was identified as over-running and causing HWR */ +#define RGXFWIF_DM_STATE_INNOCENT_OVERRUNING (IMG_UINT32_C(0x1) << 8) /*!< DM was innocently affected by another DM over-running which caused HWR */ +#define RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH (IMG_UINT32_C(0x1) << 9) /*!< DM was forced into HWR as it delayed more important workloads */ +#define RGXFWIF_DM_STATE_GPU_ECC_HWR (IMG_UINT32_C(0x1) << 10) /*!< DM was forced into HWR due to an uncorrected GPU ECC error */ + +/* Firmware's connection state */ +typedef enum +{ + RGXFW_CONNECTION_FW_OFFLINE = 0, /*!< Firmware is offline */ + RGXFW_CONNECTION_FW_READY, /*!< Firmware is initialised */ + RGXFW_CONNECTION_FW_ACTIVE, /*!< Firmware connection is fully established */ + RGXFW_CONNECTION_FW_OFFLOADING, /*!< Firmware is clearing up connection data */ + RGXFW_CONNECTION_FW_STATE_COUNT +} RGXFWIF_CONNECTION_FW_STATE; + +/* OS' connection state */ +typedef enum +{ + RGXFW_CONNECTION_OS_OFFLINE = 0, /*!< OS is offline */ + RGXFW_CONNECTION_OS_READY, /*!< OS's KM driver is setup and waiting */ + RGXFW_CONNECTION_OS_ACTIVE, /*!< OS connection is fully established */ + RGXFW_CONNECTION_OS_STATE_COUNT +} RGXFWIF_CONNECTION_OS_STATE; + +typedef struct +{ + IMG_UINT bfOsState : 3; + IMG_UINT bfFLOk : 1; + IMG_UINT bfFLGrowPending : 1; + IMG_UINT bfIsolatedOS : 1; + IMG_UINT bfReserved : 26; +} RGXFWIF_OS_RUNTIME_FLAGS; + +typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS; + +#if defined(PVRSRV_STALLED_CCB_ACTION) +#define PVR_SLR_LOG_ENTRIES 10 +#define PVR_SLR_LOG_STRLEN 30 /*!< MAX_CLIENT_CCB_NAME not visible to this header */ + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64Timestamp; + IMG_UINT32 ui32FWCtxAddr; + IMG_UINT32 ui32NumUFOs; + IMG_CHAR aszCCBName[PVR_SLR_LOG_STRLEN]; +} UNCACHED_ALIGN RGXFWIF_SLR_ENTRY; +#endif + +/* firmware trace control data */ +typedef struct +{ + IMG_UINT32 ui32LogType; + RGXFWIF_TRACEBUF_SPACE sTraceBuf[RGXFW_THREAD_NUM]; + IMG_UINT32 ui32TraceBufSizeInDWords; /*!< Member initialised only when sTraceBuf is actually allocated + * (in RGXTraceBufferInitOnDemandResources) */ + IMG_UINT32 ui32TracebufFlags; /*!< Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_TRACEBUF; + +/*! @Brief Firmware system data shared with the Host driver */ +typedef struct +{ + IMG_UINT32 ui32ConfigFlags; /*!< Configuration flags from host */ + IMG_UINT32 ui32ConfigFlagsExt; /*!< Extended configuration flags from host */ + volatile RGXFWIF_POW_STATE ePowState; + volatile IMG_UINT32 ui32HWPerfRIdx; + volatile IMG_UINT32 ui32HWPerfWIdx; + volatile IMG_UINT32 ui32HWPerfWrapCount; + IMG_UINT32 ui32HWPerfSize; /*!< Constant after setup, needed in FW */ + IMG_UINT32 ui32HWPerfDropCount; /*!< The number of times the FW drops a packet due to buffer full */ + + /* ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid when FW is built with + * RGX_HWPERF_UTILIZATION & RGX_HWPERF_DROP_TRACKING defined in rgxfw_hwperf.c */ + IMG_UINT32 ui32HWPerfUt; /*!< Buffer utilisation, high watermark of bytes in use */ + IMG_UINT32 ui32FirstDropOrdinal; /*!< The ordinal of the first packet the FW dropped */ + IMG_UINT32 ui32LastDropOrdinal; /*!< The ordinal of the last packet the FW dropped */ + RGXFWIF_OS_RUNTIME_FLAGS asOsRuntimeFlagsMirror[RGXFW_MAX_NUM_OS];/*!< State flags for each Operating System mirrored from Fw coremem */ + RGX_FWFAULTINFO sFaultInfo[RGXFWIF_FWFAULTINFO_MAX]; /*!< Firmware fault info */ + IMG_UINT32 ui32FWFaults; /*!< Firmware faults count */ + IMG_UINT32 aui32CrPollAddr[RGXFW_THREAD_NUM]; /*!< Failed poll address */ + IMG_UINT32 aui32CrPollMask[RGXFW_THREAD_NUM]; /*!< Failed poll mask */ + IMG_UINT32 aui32CrPollCount[RGXFW_THREAD_NUM]; /*!< Failed poll count */ + IMG_UINT64 RGXFW_ALIGN ui64StartIdleTime; +#if defined(SUPPORT_POWMON_COMPONENT) +#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) + RGXFWIF_TRACEBUF_SPACE sPowerMonBuf; + IMG_UINT32 ui32PowerMonBufSizeInDWords; +#endif +#endif + +#if defined(SUPPORT_VALIDATION) + IMG_UINT32 ui32RenderKillingCtl; /*!< Rasterisation DM Killing Configuration from host */ + IMG_UINT32 ui32CDMTDMKillingCtl; /*!< CDM/TDM Killing Configuration from host */ +#endif +#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) +#define RGXFWIF_STATS_FRAMEWORK_LINESIZE (8) +#define RGXFWIF_STATS_FRAMEWORK_MAX (2048*RGXFWIF_STATS_FRAMEWORK_LINESIZE) + IMG_UINT32 RGXFW_ALIGN aui32FWStatsBuf[RGXFWIF_STATS_FRAMEWORK_MAX]; +#endif + RGXFWIF_HWR_STATEFLAGS ui32HWRStateFlags; /*!< Firmware's Current HWR state */ + RGXFWIF_HWR_RECOVERYFLAGS aui32HWRRecoveryFlags[RGXFWIF_DM_DEFAULT_MAX]; /*!< Each DM's HWR state */ + IMG_UINT32 ui32FwSysDataFlags; /*!< Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_SYSDATA; + +/*! @Brief Firmware per-os data shared with the Host driver */ +typedef struct +{ + IMG_UINT32 ui32FwOsConfigFlags; /*!< Configuration flags from an OS */ + IMG_UINT32 ui32FWSyncCheckMark; /*!< Markers to signal that the host should perform a full sync check */ + IMG_UINT32 ui32HostSyncCheckMark; /*!< Markers to signal that the Firmware should perform a full sync check */ +#if defined(PVRSRV_STALLED_CCB_ACTION) + IMG_UINT32 ui32ForcedUpdatesRequested; + IMG_UINT8 ui8SLRLogWp; + RGXFWIF_SLR_ENTRY sSLRLogFirst; + RGXFWIF_SLR_ENTRY sSLRLog[PVR_SLR_LOG_ENTRIES]; + IMG_UINT64 RGXFW_ALIGN ui64LastForcedUpdateTime; +#endif + volatile IMG_UINT32 aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */ + IMG_UINT32 ui32KCCBCmdsExecuted; /*!< Executed Kernel CCB command count */ + RGXFWIF_DEV_VIRTADDR sPowerSync; /*!< Sync prim used to signal the host the power off state */ + IMG_UINT32 ui32FwOsDataFlags; /*!< Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_OSDATA; + +/* Firmware trace time-stamp field breakup */ + +/* RGX_CR_TIMER register read (48 bits) value*/ +#define RGXFWT_TIMESTAMP_TIME_SHIFT (0U) +#define RGXFWT_TIMESTAMP_TIME_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) + +/* Extra debug-info (16 bits) */ +#define RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT (48U) +#define RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK ~RGXFWT_TIMESTAMP_TIME_CLRMSK + + +/* Debug-info sub-fields */ +/* Bit 0: RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from RGX_CR_EVENT_STATUS register */ +#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT (0U) +#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET (1U << RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT) + +/* Bit 1: RGX_CR_BIF_MMU_ENTRY_PENDING bit from RGX_CR_BIF_MMU_ENTRY register */ +#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT (1U) +#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET (1U << RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT) + +/* Bit 2: RGX_CR_SLAVE_EVENT register is non-zero */ +#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT (2U) +#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET (1U << RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT) + +/* Bit 3-15: Unused bits */ + +#define RGXFWT_DEBUG_INFO_STR_MAXLEN 64 +#define RGXFWT_DEBUG_INFO_STR_PREPEND " (debug info: " +#define RGXFWT_DEBUG_INFO_STR_APPEND ")" + +/* Table of debug info sub-field's masks and corresponding message strings + * to be appended to firmware trace + * + * Mask : 16 bit mask to be applied to debug-info field + * String : debug info message string + */ + +#define RGXFWT_DEBUG_INFO_MSKSTRLIST \ +/*Mask, String*/ \ +X(RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET, "mmu pf") \ +X(RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET, "mmu pending") \ +X(RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET, "slave events") + +/*! + ****************************************************************************** + * HWR Data + *****************************************************************************/ +/*! + * @Defgroup HWRInfo FW HWR shared data interface + * @Brief Types grouping data structures and defines used in realising the HWR record. + * @{ + */ +/*! @Brief HWR Lockup types */ +typedef enum +{ + RGX_HWRTYPE_UNKNOWNFAILURE = 0, /*!< Unknown failure */ + RGX_HWRTYPE_OVERRUN = 1, /*!< DM overrun */ + RGX_HWRTYPE_POLLFAILURE = 2, /*!< Poll failure */ + RGX_HWRTYPE_BIF0FAULT = 3, /*!< BIF0 fault */ + RGX_HWRTYPE_BIF1FAULT = 4, /*!< BIF1 fault */ + RGX_HWRTYPE_TEXASBIF0FAULT = 5, /*!< TEXASBIF0 fault */ + RGX_HWRTYPE_MMUFAULT = 6, /*!< MMU fault */ + RGX_HWRTYPE_MMUMETAFAULT = 7, /*!< MMU META fault */ + RGX_HWRTYPE_MIPSTLBFAULT = 8, /*!< MIPS TLB fault */ + RGX_HWRTYPE_ECCFAULT = 9, /*!< ECC fault */ + RGX_HWRTYPE_MMURISCVFAULT = 10, /*!< MMU RISCV fault */ +} RGX_HWRTYPE; + +#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) (((eHWRType) == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1) + +#define RGXFWIF_HWRTYPE_PAGE_FAULT_GET(eHWRType) ((((eHWRType) == RGX_HWRTYPE_BIF0FAULT) || \ + ((eHWRType) == RGX_HWRTYPE_BIF1FAULT) || \ + ((eHWRType) == RGX_HWRTYPE_TEXASBIF0FAULT) || \ + ((eHWRType) == RGX_HWRTYPE_MMUFAULT) || \ + ((eHWRType) == RGX_HWRTYPE_MMUMETAFAULT) || \ + ((eHWRType) == RGX_HWRTYPE_MIPSTLBFAULT) || \ + ((eHWRType) == RGX_HWRTYPE_MMURISCVFAULT)) ? true : false) + +/************************ + * GPU HW error codes * + ************************/ +typedef enum +{ + RGX_HW_ERR_NA = 0x0, + RGX_HW_ERR_PRIMID_FAILURE_DURING_DMKILL = 0x101, +} RGX_HW_ERR; + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64BIFReqStatus; /*!< BIF request status */ + IMG_UINT64 RGXFW_ALIGN ui64BIFMMUStatus; /*!< MMU status */ + IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ + IMG_UINT64 RGXFW_ALIGN ui64Reserved; +} RGX_BIFINFO; + +typedef struct +{ + IMG_UINT32 ui32FaultGPU; /*!< ECC fault in GPU */ +} RGX_ECCINFO; + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN aui64MMUStatus[2]; /*!< MMU status */ + IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ + IMG_UINT64 RGXFW_ALIGN ui64Reserved; +} RGX_MMUINFO; + +typedef struct +{ + IMG_UINT32 ui32ThreadNum; /*!< Thread ID performing poll operation */ + IMG_UINT32 ui32CrPollAddr; /*!< CR Poll Address */ + IMG_UINT32 ui32CrPollMask; /*!< CR Poll mask */ + IMG_UINT32 ui32CrPollLastValue; /*!< CR Poll last value */ + IMG_UINT64 RGXFW_ALIGN ui64Reserved; +} UNCACHED_ALIGN RGX_POLLINFO; + +typedef struct +{ + IMG_UINT32 ui32BadVAddr; /*!< VA address */ + IMG_UINT32 ui32EntryLo; +} RGX_TLBINFO; + +typedef struct +{ + union + { + RGX_BIFINFO sBIFInfo; /*!< BIF failure details */ + RGX_MMUINFO sMMUInfo; /*!< MMU failure details */ + RGX_POLLINFO sPollInfo; /*!< Poll failure details */ + RGX_TLBINFO sTLBInfo; /*!< TLB failure details */ + RGX_ECCINFO sECCInfo; /*!< ECC failure details */ + } uHWRData; + + IMG_UINT64 RGXFW_ALIGN ui64CRTimer; /*!< Timer value at the time of lockup */ + IMG_UINT64 RGXFW_ALIGN ui64OSTimer; /*!< OS timer value at the time of lockup */ + IMG_UINT32 ui32FrameNum; /*!< Frame number of the workload */ + IMG_UINT32 ui32PID; /*!< PID belonging to the workload */ + IMG_UINT32 ui32ActiveHWRTData; /*!< HWRT data of the workload */ + IMG_UINT32 ui32HWRNumber; /*!< HWR number */ + IMG_UINT32 ui32EventStatus; /*!< Core specific event status register at the time of lockup */ + IMG_UINT32 ui32HWRRecoveryFlags; /*!< DM state flags */ + RGX_HWRTYPE eHWRType; /*!< Type of lockup */ + RGXFWIF_DM eDM; /*!< Recovery triggered for the DM */ + IMG_UINT32 ui32CoreID; /*!< Core ID of the GPU */ + RGX_HW_ERR eHWErrorCode; /*!< Error code used to determine HW fault */ + IMG_UINT64 RGXFW_ALIGN ui64CRTimeOfKick; /*!< Workload kick time */ + IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart; /*!< HW reset start time */ + IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish; /*!< HW reset stop time */ + IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady; /*!< freelist ready time on the last HWR */ + IMG_UINT64 RGXFW_ALIGN ui64Reserved; /*!< Pad to 16 64-bit words */ +} UNCACHED_ALIGN RGX_HWRINFO; + +#define RGXFWIF_HWINFO_MAX_FIRST 8U /* Number of first HWR logs recorded (never overwritten by newer logs) */ +#define RGXFWIF_HWINFO_MAX_LAST 8U /* Number of latest HWR logs (older logs are overwritten by newer logs) */ +#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST) /* Total number of HWR logs stored in a buffer */ +#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1U) /* Index of the last log in the HWR log buffer */ + +typedef struct +{ + RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX]; /*!< Max number of recovery record */ + IMG_UINT32 ui32HwrCounter; /*!< HWR counter used in FL reconstruction */ + IMG_UINT32 ui32WriteIndex; /*!< Index for updating recovery information in sHWRInfo */ + IMG_UINT32 ui32DDReqCount; /*!< Count of DebugDump requested to the host after recovery */ + IMG_UINT32 ui32HWRInfoBufFlags; /* Compatibility and other flags */ + IMG_UINT32 aui32HwrDmLockedUpCount[RGXFWIF_DM_DEFAULT_MAX]; /*!< Lockup count for each DM */ + IMG_UINT32 aui32HwrDmOverranCount[RGXFWIF_DM_DEFAULT_MAX]; /*!< Overrun count for each DM */ + IMG_UINT32 aui32HwrDmRecoveredCount[RGXFWIF_DM_DEFAULT_MAX]; /*!< Lockup + Overrun count for each DM */ + IMG_UINT32 aui32HwrDmFalseDetectCount[RGXFWIF_DM_DEFAULT_MAX]; /*!< False lockup detection count for each DM */ +} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF; + +/*! @} End of HWRInfo */ + +#define RGXFWIF_CTXSWITCH_PROFILE_FAST_EN (IMG_UINT32_C(0x1)) +#define RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN (IMG_UINT32_C(0x2)) +#define RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN (IMG_UINT32_C(0x3)) +#define RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN (IMG_UINT32_C(0x4)) + +#define RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN (IMG_UINT32_C(0x1)) +#define RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN (IMG_UINT32_C(0x2)) + +#define RGXFWIF_ISP_SCHEDMODE_VER1_IPP (IMG_UINT32_C(0x1)) +#define RGXFWIF_ISP_SCHEDMODE_VER2_ISP (IMG_UINT32_C(0x2)) +/*! + ****************************************************************************** + * RGX firmware Init Config Data + * NOTE: Please be careful to keep backwards compatibility with DDKv1 for the + * CTXSWITCH controls. + *****************************************************************************/ + +/* Flag definitions affecting the firmware globally */ +#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND (IMG_UINT32_C(0x1) << 0) /*!< Randomise context switch requests */ +#define RGXFWIF_INICFG_CTXSWITCH_SRESET_EN (IMG_UINT32_C(0x1) << 1) +#define RGXFWIF_INICFG_HWPERF_EN (IMG_UINT32_C(0x1) << 2) +#define RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN (IMG_UINT32_C(0x1) << 3) /*!< Randomise DM-killing requests */ +#define RGXFWIF_INICFG_POW_RASCALDUST (IMG_UINT32_C(0x1) << 4) +/* 5 unused */ +#define RGXFWIF_INICFG_FBCDC_V3_1_EN (IMG_UINT32_C(0x1) << 6) +#define RGXFWIF_INICFG_CHECK_MLIST_EN (IMG_UINT32_C(0x1) << 7) +#define RGXFWIF_INICFG_DISABLE_CLKGATING_EN (IMG_UINT32_C(0x1) << 8) +#define RGXFWIF_INICFG_POLL_COUNTERS_EN (IMG_UINT32_C(0x1) << 9) +/* 10 unused */ +/* 11 unused */ +#define RGXFWIF_INICFG_REGCONFIG_EN (IMG_UINT32_C(0x1) << 12) +#define RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY (IMG_UINT32_C(0x1) << 13) +#define RGXFWIF_INICFG_HWP_DISABLE_FILTER (IMG_UINT32_C(0x1) << 14) +#define RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN (IMG_UINT32_C(0x1) << 15) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT (16) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST (RGXFWIF_CTXSWITCH_PROFILE_FAST_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM (RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW (RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY (RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK (IMG_UINT32_C(0x7) << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define RGXFWIF_INICFG_DISABLE_DM_OVERLAP (IMG_UINT32_C(0x1) << 19) +#define RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER (IMG_UINT32_C(0x1) << 20) +#define RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED (IMG_UINT32_C(0x1) << 21) +#define RGXFWIF_INICFG_VALIDATE_IRQ (IMG_UINT32_C(0x1) << 22) +#define RGXFWIF_INICFG_DISABLE_PDP_EN (IMG_UINT32_C(0x1) << 23) +#define RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN (IMG_UINT32_C(0x1) << 24) +#define RGXFWIF_INICFG_WORKEST (IMG_UINT32_C(0x1) << 25) +#define RGXFWIF_INICFG_PDVFS (IMG_UINT32_C(0x1) << 26) +#define RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT (27) +#define RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND (RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) +#define RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN (RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) +#define RGXFWIF_INICFG_CDM_ARBITRATION_MASK (IMG_UINT32_C(0x3) << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) +#define RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT (29) +#define RGXFWIF_INICFG_ISPSCHEDMODE_NONE (0) +#define RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP (RGXFWIF_ISP_SCHEDMODE_VER1_IPP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) +#define RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP (RGXFWIF_ISP_SCHEDMODE_VER2_ISP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) +#define RGXFWIF_INICFG_ISPSCHEDMODE_MASK (RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP |\ + RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP) +#define RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER (IMG_UINT32_C(0x1) << 31) +#define RGXFWIF_INICFG_ALL (0xFFFFF3FFU) + +/* Extended Flag definitions affecting the firmware globally */ +#define RGXFWIF_INICFG_EXT_ALL (0x0U) + +#define RGXFWIF_INICFG_SYS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_CTXSWITCH_MODE_RAND | \ + RGXFWIF_INICFG_CTXSWITCH_SRESET_EN) + +/* Flag definitions affecting only workloads submitted by a particular OS */ +#define RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN (IMG_UINT32_C(0x1) << 0) +#define RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN (IMG_UINT32_C(0x1) << 1) /*!< Enables GEOM-TA and GEOM-SHG context switch */ +#define RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN (IMG_UINT32_C(0x1) << 2) +#define RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN (IMG_UINT32_C(0x1) << 3) +/* #define RGXFWIF_INICFG_OS_CTXSWITCH_RTU_EN_DEPRECATED (IMG_UINT32_C(0x1) << 4) !< Used for RTU DM-kill only. The RTU does not context switch */ + +#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM (IMG_UINT32_C(0x1) << 4) +#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM (IMG_UINT32_C(0x1) << 5) +#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D (IMG_UINT32_C(0x1) << 6) +#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM (IMG_UINT32_C(0x1) << 7) + +#define RGXFWIF_INICFG_OS_ALL (0xFFU) + +#define RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL (RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN | \ + RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN | \ + RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN | \ + RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN) + +#define RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL) + +#define RGXFWIF_FILTCFG_TRUNCATE_HALF (IMG_UINT32_C(0x1) << 3) +#define RGXFWIF_FILTCFG_TRUNCATE_INT (IMG_UINT32_C(0x1) << 2) +#define RGXFWIF_FILTCFG_NEW_FILTER_MODE (IMG_UINT32_C(0x1) << 1) + +typedef enum +{ + RGX_ACTIVEPM_FORCE_OFF = 0, + RGX_ACTIVEPM_FORCE_ON = 1, + RGX_ACTIVEPM_DEFAULT = 2 +} RGX_ACTIVEPM_CONF; + +typedef enum +{ + RGX_RD_POWER_ISLAND_FORCE_OFF = 0, + RGX_RD_POWER_ISLAND_FORCE_ON = 1, + RGX_RD_POWER_ISLAND_DEFAULT = 2 +} RGX_RD_POWER_ISLAND_CONF; + +typedef struct +{ + IMG_UINT16 ui16RegNum; /*!< Register number */ + IMG_UINT16 ui16IndirectRegNum; /*!< Indirect register number (or 0 if not used) */ + IMG_UINT16 ui16IndirectStartVal; /*!< Start value for indirect register */ + IMG_UINT16 ui16IndirectEndVal; /*!< End value for indirect register */ +} RGXFW_REGISTER_LIST; + +#if defined(RGX_FIRMWARE) +typedef DLLIST_NODE RGXFWIF_DLLIST_NODE; +#else +typedef struct {RGXFWIF_DEV_VIRTADDR p; + RGXFWIF_DEV_VIRTADDR n;} RGXFWIF_DLLIST_NODE; +#endif + +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SIGBUFFER; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TRACEBUF; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SYSDATA; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_OSDATA; +#if defined(SUPPORT_TBI_INTERFACE) +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TBIBUF; +#endif +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERFBUF; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRINFOBUF; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RUNTIME_CFG; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_GPU_UTIL_FWCB; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_REG_CFG; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERF_CTL; +typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_CONFIG_CNTBLK; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_CTL; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_RTN_SLOTS; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWMEMCONTEXT; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWCOMMONCONTEXT; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_ZSBUFFER; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COMMONCTX_STATE; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CORE_CLK_RATE; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FIRMWAREGCOVBUFFER; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB_CTL; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FREELIST; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRTDATA; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TIMESTAMP_ADDR; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RF_CMD; + +/*! + * This number is used to represent an invalid page catalogue physical address + */ +#define RGXFWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU + +/*! + * This number is used to represent unallocated page catalog base register + */ +#define RGXFW_BIF_INVALID_PCREG 0xFFFFFFFFU + +/*! + Firmware memory context. +*/ +typedef struct +{ + IMG_DEV_PHYADDR RGXFW_ALIGN sPCDevPAddr; /*!< device physical address of context's page catalogue */ + IMG_UINT32 uiPageCatBaseRegID; /*!< associated page catalog base register (RGXFW_BIF_INVALID_PCREG == unallocated) */ + IMG_UINT32 uiBreakpointAddr; /*!< breakpoint address */ + IMG_UINT32 uiBPHandlerAddr; /*!< breakpoint handler address */ + IMG_UINT32 uiBreakpointCtl; /*!< DM and enable control for BP */ + IMG_UINT64 RGXFW_ALIGN ui64FBCStateIDMask; /*!< FBCDC state descriptor IDs (non-zero means defer on mem context activation) */ + IMG_UINT32 ui32FwMemCtxFlags; /*!< Compatibility and other flags */ + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + IMG_UINT32 ui32OSid; + IMG_BOOL bOSidAxiProt; +#endif + +} UNCACHED_ALIGN RGXFWIF_FWMEMCONTEXT; + +/*! + * FW context state flags + */ +#define RGXFWIF_CONTEXT_FLAGS_NEED_RESUME (0x00000001U) +#define RGXFWIF_CONTEXT_FLAGS_TDM_HEADER_STALE (0x00000002U) + +typedef struct +{ + /* FW-accessible TA state which must be written out to memory on context store */ + IMG_UINT64 RGXFW_ALIGN uTAReg_DCE_CMD0; + IMG_UINT32 uTAReg_DCE_CMD1; + IMG_UINT32 uTAReg_DCE_WRITE; + IMG_UINT64 RGXFW_ALIGN uTAReg_DCE_DRAW0; + IMG_UINT64 RGXFW_ALIGN uTAReg_DCE_DRAW1; + IMG_UINT32 uTAReg_GTA_SO_PRIM[4]; + IMG_UINT16 ui16TACurrentIdx; +} UNCACHED_ALIGN RGXFWIF_TACTX_STATE; + +/* The following defines need to be auto generated using the HW defines + * rather than hard coding it */ +#define RGXFWIF_ISP_PIPE_COUNT_MAX (20) +#define RGXFWIF_PIPE_COUNT_PER_ISP (2) +#define RGXFWIF_IPP_RESUME_REG_COUNT (1) + +#if !defined(__KERNEL__) +#define RGXFWIF_ISP_COUNT (RGX_FEATURE_NUM_SPU * RGX_FEATURE_NUM_ISP_PER_SPU) +#define RGXFWIF_ISP_PIPE_COUNT (RGXFWIF_ISP_COUNT * RGXFWIF_PIPE_COUNT_PER_ISP) +#if RGXFWIF_ISP_PIPE_COUNT > RGXFWIF_ISP_PIPE_COUNT_MAX +#error RGXFWIF_ISP_PIPE_COUNT should not be greater than RGXFWIF_ISP_PIPE_COUNT_MAX +#endif +#endif /* !defined(__KERNEL__) */ + +typedef struct +{ +#if defined(PM_INTERACTIVE_MODE) + IMG_UINT32 RGXFW_ALIGN u3DReg_PM_DEALLOCATED_MASK_STATUS; /*!< Managed by PM HW in the non-interactive mode */ +#endif + IMG_UINT32 ui32CtxStateFlags; /*!< Compatibility and other flags */ + + /* FW-accessible ISP state which must be written out to memory on context store */ + /* au3DReg_ISP_STORE should be the last element of the structure + * as this is an array whose size is determined at runtime + * after detecting the RGX core */ + IMG_UINT64 RGXFW_ALIGN au3DReg_ISP_STORE[]; +} UNCACHED_ALIGN RGXFWIF_3DCTX_STATE; + +#define RGXFWIF_CTX_USING_BUFFER_A (0) +#define RGXFWIF_CTX_USING_BUFFER_B (1U) + +typedef struct +{ + IMG_UINT32 ui32CtxStateFlags; /*!< Target buffer and other flags */ +} RGXFWIF_COMPUTECTX_STATE; + + +typedef struct RGXFWIF_FWCOMMONCONTEXT_ +{ + /* CCB details for this firmware context */ + PRGXFWIF_CCCB_CTL psCCBCtl; /*!< CCB control */ + PRGXFWIF_CCCB psCCB; /*!< CCB base */ + RGXFWIF_DMA_ADDR sCCBMetaDMAAddr; + + RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitingNode; /*!< List entry for the waiting list */ + RGXFWIF_DLLIST_NODE RGXFW_ALIGN sRunNode; /*!< List entry for the run list */ + RGXFWIF_UFO sLastFailedUFO; /*!< UFO that last failed (or NULL) */ + + PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ + + /* Context suspend state */ + PRGXFWIF_COMMONCTX_STATE RGXFW_ALIGN psContextState; /*!< TA/3D context suspend state, read/written by FW */ + + /* Framework state */ + PRGXFWIF_RF_CMD RGXFW_ALIGN psRFCmd; /*!< Register updates for Framework */ + + /* + * Flags e.g. for context switching + */ + IMG_UINT32 ui32FWComCtxFlags; + IMG_UINT32 ui32Priority; + IMG_UINT32 ui32PrioritySeqNum; + + /* References to the host side originators */ + IMG_UINT32 ui32ServerCommonContextID; /*!< the Server Common Context */ + IMG_UINT32 ui32PID; /*!< associated process ID */ + + IMG_BOOL bGeomOOMDisabled; /*!< True when Geom DM OOM is not allowed */ + + /* Statistic updates waiting to be passed back to the host... */ + IMG_BOOL bStatsPending; /*!< True when some stats are pending */ + IMG_INT32 i32StatsNumStores; /*!< Number of stores on this context since last update */ + IMG_INT32 i32StatsNumOutOfMemory; /*!< Number of OOMs on this context since last update */ + IMG_INT32 i32StatsNumPartialRenders; /*!< Number of PRs on this context since last update */ + RGXFWIF_DM eDM; /*!< Data Master type */ + IMG_UINT64 RGXFW_ALIGN ui64WaitSignalAddress; /*!< Device Virtual Address of the signal the context is waiting on */ + RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitSignalNode; /*!< List entry for the wait-signal list */ + RGXFWIF_DLLIST_NODE RGXFW_ALIGN sBufStalledNode; /*!< List entry for the buffer stalled list */ + IMG_UINT64 RGXFW_ALIGN ui64CBufQueueCtrlAddr; /*!< Address of the circular buffer queue pointers */ + + IMG_UINT64 RGXFW_ALIGN ui64RobustnessAddress; + IMG_UINT32 ui32MaxDeadlineMS; /*!< Max HWR deadline limit in ms */ + bool bReadOffsetNeedsReset; /*!< Following HWR circular buffer read-offset needs resetting */ +} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT; + +typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_2D[RGX_TRP_MAX_NUM_CORES][2]; +typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_3D[RGX_TRP_MAX_NUM_CORES][4]; +typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_GEOM[RGX_TRP_MAX_NUM_CORES][2]; + +/*! + Firmware render context. +*/ +typedef struct +{ + RGXFWIF_FWCOMMONCONTEXT sTAContext; /*!< Firmware context for the TA */ + RGXFWIF_FWCOMMONCONTEXT s3DContext; /*!< Firmware context for the 3D */ + + RGXFWIF_STATIC_RENDERCONTEXT_STATE sStaticRenderContextState; + + IMG_UINT32 ui32TotalNumPartialRenders; /*!< Total number of partial renders */ + IMG_UINT32 ui32TotalNumOutOfMemory; /*!< Total number of OOMs */ + IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ + IMG_UINT32 ui32FwRenderCtxFlags; /*!< Compatibility and other flags */ + +} UNCACHED_ALIGN RGXFWIF_FWRENDERCONTEXT; + +/*! + Firmware compute context. +*/ +typedef struct +{ + RGXFWIF_FWCOMMONCONTEXT sCDMContext; /*!< Firmware context for the CDM */ + + RGXFWIF_STATIC_COMPUTECONTEXT_STATE sStaticComputeContextState; + + IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ + + IMG_UINT32 ui32ComputeCtxFlags; /*!< Compatibility and other flags */ + + IMG_UINT32 ui32WGPState; + IMG_UINT32 aui32WGPChecksum[RGX_WGP_MAX_NUM_CORES]; +} UNCACHED_ALIGN RGXFWIF_FWCOMPUTECONTEXT; + +/*! + Firmware ray context. +*/ +typedef struct +{ + RGXFWIF_FWCOMMONCONTEXT sRDMContext; /*!< Firmware context for the RDM */ + RGXFWIF_STATIC_RAYCONTEXT_STATE sStaticRayContextState; + IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ + +} UNCACHED_ALIGN RGXFWIF_FWRAYCONTEXT; +/*! + Firmware TDM context. +*/ +typedef struct +{ + RGXFWIF_FWCOMMONCONTEXT sTDMContext; /*!< Firmware context for the TDM */ + + IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ + +} UNCACHED_ALIGN RGXFWIF_FWTDMCONTEXT; + +/*! + ****************************************************************************** + * Defines for CMD_TYPE corruption detection and forward compatibility check + *****************************************************************************/ + +/* CMD_TYPE 32bit contains: + * 31:16 Reserved for magic value to detect corruption (16 bits) + * 15 Reserved for RGX_CCB_TYPE_TASK (1 bit) + * 14:0 Bits available for CMD_TYPEs (15 bits) */ + + +/* Magic value to detect corruption */ +#define RGX_CMD_MAGIC_DWORD IMG_UINT32_C(0x2ABC) +#define RGX_CMD_MAGIC_DWORD_MASK (0xFFFF0000U) +#define RGX_CMD_MAGIC_DWORD_SHIFT (16U) +#define RGX_CMD_MAGIC_DWORD_SHIFTED (RGX_CMD_MAGIC_DWORD << RGX_CMD_MAGIC_DWORD_SHIFT) + +/*! + * @Defgroup KCCBTypes Kernel CCB data interface + * @Brief Types grouping data structures and defines used in realising the KCCB functionality + * @{ + */ + +/*! + * @Brief Kernel CCB control for RGX + */ +typedef struct +{ + volatile IMG_UINT32 ui32WriteOffset; /*!< write offset into array of commands (MUST be aligned to 16 bytes!) */ + volatile IMG_UINT32 ui32ReadOffset; /*!< read offset into array of commands */ + IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask (Total capacity of the CCB - 1) */ + IMG_UINT32 ui32CmdSize; /*!< size of each command in bytes */ +} UNCACHED_ALIGN RGXFWIF_CCB_CTL; + +#define RGXFWIF_MMUCACHEDATA_FLAGS_PT (0x1U) /* MMU_CTRL_INVAL_PT_EN */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_PD (0x2U) /* MMU_CTRL_INVAL_PD_EN */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_PC (0x4U) /* MMU_CTRL_INVAL_PC_EN */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800U) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */ +#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (0x0) /* not used */ + +#define RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000U) /* indicates FW should interrupt the host */ + +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_MMUCACHE type command + */ +typedef struct +{ + IMG_UINT32 ui32CacheFlags; + RGXFWIF_DEV_VIRTADDR sMMUCacheSync; + IMG_UINT32 ui32MMUCacheSyncUpdateValue; +} RGXFWIF_MMUCACHEDATA; + +#define RGXFWIF_BPDATA_FLAGS_ENABLE (1U << 0) +#define RGXFWIF_BPDATA_FLAGS_WRITE (1U << 1) +#define RGXFWIF_BPDATA_FLAGS_CTL (1U << 2) +#define RGXFWIF_BPDATA_FLAGS_REGS (1U << 3) + +typedef struct +{ + PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ + IMG_UINT32 ui32BPAddr; /*!< Breakpoint address */ + IMG_UINT32 ui32HandlerAddr; /*!< Breakpoint handler */ + IMG_UINT32 ui32BPDM; /*!< Breakpoint control */ + IMG_UINT32 ui32BPDataFlags; + IMG_UINT32 ui32TempRegs; /*!< Number of temporary registers to overallocate */ + IMG_UINT32 ui32SharedRegs; /*!< Number of shared registers to overallocate */ + RGXFWIF_DM eDM; /*!< DM associated with the breakpoint */ +} RGXFWIF_BPDATA; + +#define RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS (RGXFWIF_PRBUFFER_MAXSUPPORTED + 1U) /* +1 is RTDATASET cleanup */ + +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_KICK type command + */ +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */ + IMG_UINT32 ui32CWoffUpdate; /*!< Client CCB woff update */ + IMG_UINT32 ui32CWrapMaskUpdate; /*!< Client CCB wrap mask update after CCCB growth */ + IMG_UINT32 ui32NumCleanupCtl; /*!< number of CleanupCtl pointers attached */ + PRGXFWIF_CLEANUP_CTL apsCleanupCtl[RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS]; /*!< CleanupCtl structures associated with command */ + IMG_UINT32 ui32WorkEstCmdHeaderOffset; /*!< offset to the CmdHeader which houses the workload estimation kick data. */ +} RGXFWIF_KCCB_CMD_KICK_DATA; + +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK type command + */ +typedef struct +{ + RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData; + RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData; +} RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA; + +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FORCE_UPDATE type command + */ +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */ + IMG_UINT32 ui32CCBFenceOffset; /*!< Client CCB fence offset */ +} RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA; + +/*! + * @Brief Resource types supported by \ref RGXFWIF_KCCB_CMD_CLEANUP type command + */ +typedef enum +{ + RGXFWIF_CLEANUP_FWCOMMONCONTEXT, /*!< FW common context cleanup */ + RGXFWIF_CLEANUP_HWRTDATA, /*!< FW HW RT data cleanup */ + RGXFWIF_CLEANUP_FREELIST, /*!< FW freelist cleanup */ + RGXFWIF_CLEANUP_ZSBUFFER, /*!< FW ZS Buffer cleanup */ +} RGXFWIF_CLEANUP_TYPE; + +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_CLEANUP type command + */ +typedef struct +{ + RGXFWIF_CLEANUP_TYPE eCleanupType; /*!< Cleanup type */ + union { + PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< FW common context to cleanup */ + PRGXFWIF_HWRTDATA psHWRTData; /*!< HW RT to cleanup */ + PRGXFWIF_FREELIST psFreelist; /*!< Freelist to cleanup */ + PRGXFWIF_ZSBUFFER psZSBuffer; /*!< ZS Buffer to cleanup */ + } uCleanupData; +} RGXFWIF_CLEANUP_REQUEST; + +/*! + * @Brief Type of power requests supported in \ref RGXFWIF_KCCB_CMD_POW type command + */ +typedef enum +{ + RGXFWIF_POW_OFF_REQ = 1, /*!< GPU power-off request */ + RGXFWIF_POW_FORCED_IDLE_REQ, /*!< Force-idle related request */ + RGXFWIF_POW_NUM_UNITS_CHANGE, /*!< Request to change default powered scalable units */ + RGXFWIF_POW_APM_LATENCY_CHANGE /*!< Request to change the APM latency period */ +} RGXFWIF_POWER_TYPE; + +/*! + * @Brief Supported force-idle related requests with \ref RGXFWIF_POW_FORCED_IDLE_REQ type request + */ +typedef enum +{ + RGXFWIF_POWER_FORCE_IDLE = 1, /*!< Request to force-idle GPU */ + RGXFWIF_POWER_CANCEL_FORCED_IDLE, /*!< Request to cancel a previously successful force-idle transition */ + RGXFWIF_POWER_HOST_TIMEOUT, /*!< Notification that host timed-out waiting for force-idle state */ +} RGXFWIF_POWER_FORCE_IDLE_TYPE; + +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_POW type command + */ +typedef struct +{ + RGXFWIF_POWER_TYPE ePowType; /*!< Type of power request */ + union + { + IMG_UINT32 ui32PowUnitsStateMask; /*!< New power units state mask */ + IMG_BOOL bForced; /*!< If the operation is mandatory */ + RGXFWIF_POWER_FORCE_IDLE_TYPE ePowRequestType; /*!< Type of Request. Consolidating Force Idle, Cancel Forced Idle, Host Timeout */ + } uPowerReqData; +} RGXFWIF_POWER_REQUEST; + +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_SLCFLUSHINVAL type command + */ +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to fence on (only useful when bDMContext == TRUE) */ + IMG_BOOL bInval; /*!< Invalidate the cache as well as flushing */ + IMG_BOOL bDMContext; /*!< The data to flush/invalidate belongs to a specific DM context */ + IMG_UINT64 RGXFW_ALIGN ui64Address; /*!< Optional address of range (only useful when bDMContext == FALSE) */ + IMG_UINT64 RGXFW_ALIGN ui64Size; /*!< Optional size of range (only useful when bDMContext == FALSE) */ +} RGXFWIF_SLCFLUSHINVALDATA; + +typedef enum +{ + RGXFWIF_HWPERF_CTRL_TOGGLE = 0, + RGXFWIF_HWPERF_CTRL_SET = 1, + RGXFWIF_HWPERF_CTRL_EMIT_FEATURES_EV = 2 +} RGXFWIF_HWPERF_UPDATE_CONFIG; + +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG type command + */ +typedef struct +{ + RGXFWIF_HWPERF_UPDATE_CONFIG eOpCode; /*!< Control operation code */ + IMG_UINT64 RGXFW_ALIGN ui64Mask; /*!< Mask of events to toggle */ +} RGXFWIF_HWPERF_CTRL; + +typedef enum +{ + RGXFWIF_HWPERF_CNTR_NOOP = 0, /* No-Op */ + RGXFWIF_HWPERF_CNTR_ENABLE = 1, /* Enable Counters */ + RGXFWIF_HWPERF_CNTR_DISABLE = 2 /* Disable Counters */ +} RGXFWIF_HWPERF_CNTR_CONFIG; + +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS type command + */ +typedef struct +{ + IMG_UINT32 ui32CtrlWord; + IMG_UINT32 ui32NumBlocks; /*!< Number of RGX_HWPERF_CONFIG_CNTBLK in the array */ + PRGX_HWPERF_CONFIG_CNTBLK sBlockConfigs; /*!< Address of the RGX_HWPERF_CONFIG_CNTBLK array */ +} RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS; + +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE type command + */ +typedef struct +{ + IMG_UINT32 ui32NewClockSpeed; /*!< New clock speed */ +} RGXFWIF_CORECLKSPEEDCHANGE_DATA; + +#define RGXFWIF_HWPERF_CTRL_BLKS_MAX 16 + +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS type command + */ +typedef struct +{ + bool bEnable; + IMG_UINT32 ui32NumBlocks; /*!< Number of block IDs in the array */ + IMG_UINT16 aeBlockIDs[RGXFWIF_HWPERF_CTRL_BLKS_MAX]; /*!< Array of RGX_HWPERF_CNTBLK_ID values */ +} RGXFWIF_HWPERF_CTRL_BLKS; + +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE & \ref RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE type commands + */ +typedef struct +{ + RGXFWIF_DEV_VIRTADDR sZSBufferFWDevVAddr; /*!< ZS-Buffer FW address */ + IMG_BOOL bDone; /*!< action backing/unbacking succeeded */ +} RGXFWIF_ZSBUFFER_BACKING_DATA; + +#if defined(SUPPORT_VALIDATION) +typedef struct +{ + IMG_UINT32 ui32RegWidth; + IMG_BOOL bWriteOp; + IMG_UINT32 ui32RegAddr; + IMG_UINT64 RGXFW_ALIGN ui64RegVal; +} RGXFWIF_RGXREG_DATA; +#endif + +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE type command + */ +typedef struct +{ + RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr; /*!< Freelist FW address */ + IMG_UINT32 ui32DeltaPages; /*!< Amount of the Freelist change */ + IMG_UINT32 ui32NewPages; /*!< New amount of pages on the freelist (including ready pages) */ + IMG_UINT32 ui32ReadyPages; /*!< Number of ready pages to be held in reserve until OOM */ +} RGXFWIF_FREELIST_GS_DATA; + +#define RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT (MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS * 2U) +#define RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000U + +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE type command + */ +typedef struct +{ + IMG_UINT32 ui32FreelistsCount; + IMG_UINT32 aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; +} RGXFWIF_FREELISTS_RECONSTRUCTION_DATA; + +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE type command + */ +typedef struct +{ + IMG_DEV_VIRTADDR RGXFW_ALIGN sDevSignalAddress; /*!< device virtual address of the updated signal */ + PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ +} UNCACHED_ALIGN RGXFWIF_SIGNAL_UPDATE_DATA; + +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE type command + */ +typedef struct +{ + PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to that may need to be resumed following write offset update */ +} UNCACHED_ALIGN RGXFWIF_WRITE_OFFSET_UPDATE_DATA; + +/*! + ****************************************************************************** + * Proactive DVFS Structures + *****************************************************************************/ +#define NUM_OPP_VALUES 16 + +typedef struct +{ + IMG_UINT32 ui32Volt; /* V */ + IMG_UINT32 ui32Freq; /* Hz */ +} UNCACHED_ALIGN PDVFS_OPP; + +typedef struct +{ + PDVFS_OPP asOPPValues[NUM_OPP_VALUES]; +#if defined(DEBUG) + IMG_UINT32 ui32MinOPPPoint; +#endif + IMG_UINT32 ui32MaxOPPPoint; +} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP; + +typedef struct +{ + IMG_UINT32 ui32MaxOPPPoint; +} UNCACHED_ALIGN RGXFWIF_PDVFS_MAX_FREQ_DATA; + +typedef struct +{ + IMG_UINT32 ui32MinOPPPoint; +} UNCACHED_ALIGN RGXFWIF_PDVFS_MIN_FREQ_DATA; + +/*! + ****************************************************************************** + * Register configuration structures + *****************************************************************************/ + +#define RGXFWIF_REG_CFG_MAX_SIZE 512 + +typedef enum +{ + RGXFWIF_REGCFG_CMD_ADD = 101, + RGXFWIF_REGCFG_CMD_CLEAR = 102, + RGXFWIF_REGCFG_CMD_ENABLE = 103, + RGXFWIF_REGCFG_CMD_DISABLE = 104 +} RGXFWIF_REGDATA_CMD_TYPE; + +typedef IMG_UINT32 RGXFWIF_REG_CFG_TYPE; +#define RGXFWIF_REG_CFG_TYPE_PWR_ON 0U /* Sidekick power event */ +#define RGXFWIF_REG_CFG_TYPE_DUST_CHANGE 1U /* Rascal / dust power event */ +#define RGXFWIF_REG_CFG_TYPE_TA 2U /* TA kick */ +#define RGXFWIF_REG_CFG_TYPE_3D 3U /* 3D kick */ +#define RGXFWIF_REG_CFG_TYPE_CDM 4U /* Compute kick */ +#define RGXFWIF_REG_CFG_TYPE_TDM 5U /* TDM kick */ +#define RGXFWIF_REG_CFG_TYPE_ALL 6U /* Applies to all types. Keep as last element */ + +typedef struct +{ + IMG_UINT64 ui64Addr; + IMG_UINT64 ui64Mask; + IMG_UINT64 ui64Value; +} RGXFWIF_REG_CFG_REC; + +typedef struct +{ + RGXFWIF_REGDATA_CMD_TYPE eCmdType; + RGXFWIF_REG_CFG_TYPE eRegConfigType; + RGXFWIF_REG_CFG_REC RGXFW_ALIGN sRegConfig; + +} RGXFWIF_REGCONFIG_DATA; + +typedef struct +{ + /** + * PDump WRW command write granularity is 32 bits. + * Add padding to ensure array size is 32 bit granular. + */ + IMG_UINT8 RGXFW_ALIGN aui8NumRegsType[PVR_ALIGN(RGXFWIF_REG_CFG_TYPE_ALL,sizeof(IMG_UINT32))]; + RGXFWIF_REG_CFG_REC RGXFW_ALIGN asRegConfigs[RGXFWIF_REG_CFG_MAX_SIZE]; +} UNCACHED_ALIGN RGXFWIF_REG_CFG; + +typedef enum +{ + RGXFWIF_OS_ONLINE = 1, + RGXFWIF_OS_OFFLINE +} RGXFWIF_OS_STATE_CHANGE; + +/*! + * @Brief Command data for \ref RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE type command + */ +typedef struct +{ + IMG_UINT32 ui32OSid; + RGXFWIF_OS_STATE_CHANGE eNewOSState; +} UNCACHED_ALIGN RGXFWIF_OS_STATE_CHANGE_DATA; + +/*! + * @Brief List of command types supported by the Kernel CCB + */ +typedef enum +{ + /* Common commands */ + RGXFWIF_KCCB_CMD_KICK = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< DM workload kick command */ + RGXFWIF_KCCB_CMD_MMUCACHE = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< MMU cache invalidation request */ + RGXFWIF_KCCB_CMD_BP = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, + RGXFWIF_KCCB_CMD_SLCFLUSHINVAL = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< SLC flush and invalidation request */ + RGXFWIF_KCCB_CMD_CLEANUP = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests cleanup of a FW resource (type specified in the command data) */ + RGXFWIF_KCCB_CMD_POW = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Power request (type specified in the command data) */ + RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Backing for on-demand ZS-Buffer done */ + RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Unbacking for on-demand ZS-Buffer done */ + RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Grow done */ + RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelists Reconstruction done */ + RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE = 113U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has performed a signal update */ + RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = 114U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */ + RGXFWIF_KCCB_CMD_HEALTH_CHECK = 115U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Health check request */ + RGXFWIF_KCCB_CMD_FORCE_UPDATE = 116U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Forcing signalling of all unmet UFOs for a given CCB offset */ + + RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK = 117U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< There is a TA and a 3D command in this single kick */ + + /* Commands only permitted to the native or host OS */ + RGXFWIF_KCCB_CMD_REGCONFIG = 200U | RGX_CMD_MAGIC_DWORD_SHIFTED, + RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 201U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */ + RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 202U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks */ + RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 203U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */ + RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 204U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Core clock speed change event */ + RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT = 205U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks during the init process */ + RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE = 206U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */ + RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 207U | RGX_CMD_MAGIC_DWORD_SHIFTED, + RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE = 208U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the relative scheduling priority for a particular OSID. It can only be serviced for the Host DDK */ + RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL = 209U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set or clear firmware state flags */ + /* RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE */ + RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 211U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW that a Guest OS has come online / offline. It can only be serviced for the Host DDK */ + RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 212U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a minimum frequency/OPP point */ + RGXFWIF_KCCB_CMD_PHR_CFG = 213U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Periodic Hardware Reset behaviour */ +#if defined(SUPPORT_VALIDATION) + RGXFWIF_KCCB_CMD_RGXREG = 214U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Read RGX Register from FW */ +#endif + RGXFWIF_KCCB_CMD_WDG_CFG = 215U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Safety Firmware Watchdog */ +} RGXFWIF_KCCB_CMD_TYPE; + +#define RGXFWIF_LAST_ALLOWED_GUEST_KCCB_CMD (RGXFWIF_KCCB_CMD_REGCONFIG - 1) + +/*! @Brief Kernel CCB command packet */ +typedef struct +{ + RGXFWIF_KCCB_CMD_TYPE eCmdType; /*!< Command type */ + IMG_UINT32 ui32KCCBFlags; /*!< Compatibility and other flags */ + + /* NOTE: Make sure that uCmdData is the last member of this struct + * This is to calculate actual command size for device mem copy. + * (Refer RGXGetCmdMemCopySize()) + * */ + union + { + RGXFWIF_KCCB_CMD_KICK_DATA sCmdKickData; /*!< Data for Kick command */ + RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA sCombinedTA3DCmdKickData; /*!< Data for combined TA/3D Kick command */ + RGXFWIF_MMUCACHEDATA sMMUCacheData; /*!< Data for MMU cache command */ + RGXFWIF_BPDATA sBPData; /*!< Data for Breakpoint Commands */ + RGXFWIF_SLCFLUSHINVALDATA sSLCFlushInvalData; /*!< Data for SLC Flush/Inval commands */ + RGXFWIF_CLEANUP_REQUEST sCleanupData; /*!< Data for cleanup commands */ + RGXFWIF_POWER_REQUEST sPowData; /*!< Data for power request commands */ + RGXFWIF_HWPERF_CTRL sHWPerfCtrl; /*!< Data for HWPerf control command */ + RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS sHWPerfCfgEnableBlks; /*!< Data for HWPerf configure, clear and enable performance counter block command */ + RGXFWIF_HWPERF_CTRL_BLKS sHWPerfCtrlBlks; /*!< Data for HWPerf enable or disable performance counter block commands */ + RGXFWIF_CORECLKSPEEDCHANGE_DATA sCoreClkSpeedChangeData;/*!< Data for core clock speed change */ + RGXFWIF_ZSBUFFER_BACKING_DATA sZSBufferBackingData; /*!< Feedback for Z/S Buffer backing/unbacking */ + RGXFWIF_FREELIST_GS_DATA sFreeListGSData; /*!< Feedback for Freelist grow/shrink */ + RGXFWIF_FREELISTS_RECONSTRUCTION_DATA sFreeListsReconstructionData; /*!< Feedback for Freelists reconstruction */ + RGXFWIF_REGCONFIG_DATA sRegConfigData; /*!< Data for custom register configuration */ + RGXFWIF_SIGNAL_UPDATE_DATA sSignalUpdateData; /*!< Data for informing the FW about the signal update */ + RGXFWIF_WRITE_OFFSET_UPDATE_DATA sWriteOffsetUpdateData; /*!< Data for informing the FW about the write offset update */ +#if defined(SUPPORT_PDVFS) + RGXFWIF_PDVFS_MAX_FREQ_DATA sPDVFSMaxFreqData; + RGXFWIF_PDVFS_MIN_FREQ_DATA sPDVFSMinFreqData; /*!< Data for setting the min frequency/OPP */ +#endif + RGXFWIF_OS_STATE_CHANGE_DATA sCmdOSOnlineStateData; /*!< Data for updating the Guest Online states */ + RGXFWIF_DEV_VIRTADDR sTBIBuffer; /*!< Dev address for TBI buffer allocated on demand */ + RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA sForceUpdateData; /*!< Data for signalling all unmet fences for a given CCB */ +#if defined(SUPPORT_VALIDATION) + RGXFWIF_RGXREG_DATA sFwRgxData; /*!< Data for reading off an RGX register */ +#endif + } UNCACHED_ALIGN uCmdData; +} UNCACHED_ALIGN RGXFWIF_KCCB_CMD; + +RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_KCCB_CMD); + +/*! @} End of KCCBTypes */ + +/*! + * @Defgroup FWCCBTypes Firmware CCB data interface + * @Brief Types grouping data structures and defines used in realising the Firmware CCB functionality + * @{ + */ + +/*! + ****************************************************************************** + * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING and the + * \ref RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING Firmware CCB commands + *****************************************************************************/ +typedef struct +{ + IMG_UINT32 ui32ZSBufferID; /*!< ZS buffer ID */ +} RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA; + +/*! + ****************************************************************************** + * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_FREELIST_GROW Firmware CCB + * command + *****************************************************************************/ +typedef struct +{ + IMG_UINT32 ui32FreelistID; /*!< Freelist ID */ +} RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA; + +/*! + ****************************************************************************** + * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION + * Firmware CCB command + *****************************************************************************/ +typedef struct +{ + IMG_UINT32 ui32FreelistsCount; /*!< Freelists count */ + IMG_UINT32 ui32HwrCounter; /*!< HWR counter */ + IMG_UINT32 aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; /*!< Array of freelist IDs to reconstruct */ +} RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA; + +#define RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF (1U<<0) /*!< 1 if a page fault happened */ +#define RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS (1U<<1) /*!< 1 if applicable to all contexts */ + +/*! + ****************************************************************************** + * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION + * Firmware CCB command + *****************************************************************************/ +typedef struct +{ + IMG_UINT32 ui32ServerCommonContextID; /*!< Context affected by the reset */ + RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reason for reset */ + RGXFWIF_DM eDM; /*!< Data Master affected by the reset */ + IMG_UINT32 ui32ResetJobRef; /*!< Job ref running at the time of reset */ + IMG_UINT32 ui32Flags; /*!< RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG bitfield */ + IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< At what page catalog address */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sFaultAddress; /*!< Page fault address (only when applicable) */ +} RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA; + +/*! + ****************************************************************************** + * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION + * Firmware CCB command + *****************************************************************************/ +typedef struct +{ + IMG_DEV_VIRTADDR sFWFaultAddr; /*!< Page fault address */ +} RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA; + +/*! + ****************************************************************************** + * List of command types supported by the Firmware CCB + *****************************************************************************/ +typedef enum +{ + RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be backed with physical pages + \n Command data: RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA */ + RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be unbacked + \n Command data: RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA */ + RGXFWIF_FWCCB_CMD_FREELIST_GROW = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand freelist grow + \n Command data: RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA */ + RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION = 104U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests freelists reconstruction + \n Command data: RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA */ + RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Notifies host of a HWR event on a context + \n Command data: RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA */ + RGXFWIF_FWCCB_CMD_DEBUG_DUMP = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand debug dump + \n Command data: None */ + RGXFWIF_FWCCB_CMD_UPDATE_STATS = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand update on process stats + \n Command data: RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA */ +#if defined(SUPPORT_PDVFS) + RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, +#endif + RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests GPU restart + \n Command data: None */ +#if defined(SUPPORT_VALIDATION) + RGXFWIF_FWCCB_CMD_REG_READ = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, +#if defined(SUPPORT_SOC_TIMER) + RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS = 111U | RGX_CMD_MAGIC_DWORD_SHIFTED, +#endif +#endif + RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Notifies host of a FW pagefault + \n Command data: RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA */ +} RGXFWIF_FWCCB_CMD_TYPE; + +/*! + ****************************************************************************** + * List of the various stats of the process to update/increment + *****************************************************************************/ +typedef enum +{ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS=1, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumPartialRenders stat */ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumOutOfMemory stat */ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTAStores stat */ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32Num3DStores stat */ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumCDMStores stat */ + RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTDMStores stat */ +} RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE; + +/*! + ****************************************************************************** + * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_UPDATE_STATS Firmware CCB + * command + *****************************************************************************/ +typedef struct +{ + RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE eElementToUpdate; /*!< Element to update */ + IMG_PID pidOwner; /*!< The pid of the process whose stats are being updated */ + IMG_INT32 i32AdjustmentValue; /*!< Adjustment to be made to the statistic */ +} RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA; + +typedef struct +{ + IMG_UINT32 ui32CoreClkRate; +} UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA; + +#if defined(SUPPORT_VALIDATION) +typedef struct +{ + IMG_UINT64 ui64RegValue; +} RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA; + +#if defined(SUPPORT_SOC_TIMER) +typedef struct +{ + IMG_UINT64 ui64timerGray; + IMG_UINT64 ui64timerBinary; + IMG_UINT64 aui64uscTimers[RGX_FEATURE_NUM_CLUSTERS]; +} RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA; +#endif +#endif + +/*! + ****************************************************************************** + * @Brief Firmware CCB command structure + *****************************************************************************/ +typedef struct +{ + RGXFWIF_FWCCB_CMD_TYPE eCmdType; /*!< Command type */ + IMG_UINT32 ui32FWCCBFlags; /*!< Compatibility and other flags */ + + union + { + RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA sCmdZSBufferBacking; /*!< Data for Z/S-Buffer on-demand (un)backing*/ + RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA sCmdFreeListGS; /*!< Data for on-demand freelist grow/shrink */ + RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA sCmdFreeListsReconstruction; /*!< Data for freelists reconstruction */ + RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA sCmdContextResetNotification; /*!< Data for context reset notification */ + RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA sCmdUpdateStatsData; /*!< Data for updating process stats */ + RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA sCmdCoreClkRateChange; + RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA sCmdFWPagefault; /*!< Data for context reset notification */ +#if defined(SUPPORT_VALIDATION) + RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA sCmdRgxRegReadData; +#if defined(SUPPORT_SOC_TIMER) + RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA sCmdTimers; +#endif +#endif + } RGXFW_ALIGN uCmdData; +} RGXFW_ALIGN RGXFWIF_FWCCB_CMD; + +RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD); + +/*! @} End of FWCCBTypes */ + +/*! + ****************************************************************************** + * Workload estimation Firmware CCB command structure for RGX + *****************************************************************************/ +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64ReturnDataIndex; /*!< Index for return data array */ + IMG_UINT64 RGXFW_ALIGN ui64CyclesTaken; /*!< The cycles the workload took on the hardware */ +} RGXFWIF_WORKEST_FWCCB_CMD; + +/*! + * @Defgroup ClientCCBTypes Client CCB data interface + * @Brief Types grouping data structures and defines used in realising Client CCB commands/functionality + * @{ + */ + +/* Required memory alignment for 64-bit variables accessible by Meta + (The gcc meta aligns 64-bit variables to 64-bit; therefore, memory shared + between the host and meta that contains 64-bit variables has to maintain + this alignment) */ +#define RGXFWIF_FWALLOC_ALIGN sizeof(IMG_UINT64) + +#define RGX_CCB_TYPE_TASK (IMG_UINT32_C(1) << 15) +#define RGX_CCB_FWALLOC_ALIGN(size) (((size) + (RGXFWIF_FWALLOC_ALIGN-1)) & ~(RGXFWIF_FWALLOC_ALIGN - 1)) + +typedef IMG_UINT32 RGXFWIF_CCB_CMD_TYPE; + +#define RGXFWIF_CCB_CMD_TYPE_GEOM (201U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< TA DM command */ +#define RGXFWIF_CCB_CMD_TYPE_TQ_3D (202U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command for TQ operation */ +#define RGXFWIF_CCB_CMD_TYPE_3D (203U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command */ +#define RGXFWIF_CCB_CMD_TYPE_3D_PR (204U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command for Partial render */ +#define RGXFWIF_CCB_CMD_TYPE_CDM (205U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< Compute DM command */ +#define RGXFWIF_CCB_CMD_TYPE_TQ_TDM (206U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< TDM command */ +#define RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE (207U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) +#define RGXFWIF_CCB_CMD_TYPE_TQ_2D (208U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 2D DM command for TQ operation */ +#define RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP (209U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) +#define RGXFWIF_CCB_CMD_TYPE_NULL (210U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) +#define RGXFWIF_CCB_CMD_TYPE_ABORT (211U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) + +/* Leave a gap between CCB specific commands and generic commands */ +#define RGXFWIF_CCB_CMD_TYPE_FENCE (212U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence dependencies of a command */ +#define RGXFWIF_CCB_CMD_TYPE_UPDATE (213U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence updates of a command */ +#define RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE (214U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence updates related to workload resources */ +#define RGXFWIF_CCB_CMD_TYPE_FENCE_PR (215U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence dependencies of a PR command */ +#define RGXFWIF_CCB_CMD_TYPE_PRIORITY (216U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Context priority update command */ +/* Pre and Post timestamp commands are supposed to sandwich the DM cmd. The + padding code with the CCB wrap upsets the FW if we don't have the task type + bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types. +*/ +#define RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP (217U | RGX_CMD_MAGIC_DWORD_SHIFTED) +#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE (218U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates of a command */ +#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE (219U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates related to workload resources */ + +#if defined(SUPPORT_VALIDATION) +#define RGXFWIF_CCB_CMD_TYPE_REG_READ (220U | RGX_CMD_MAGIC_DWORD_SHIFTED) +#endif + +#define RGXFWIF_CCB_CMD_TYPE_PADDING (221U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Skip without action type command */ +#define RGXFWIF_CCB_CMD_TYPE_RAY (222U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) + +typedef struct +{ + /* Index for the KM Workload estimation return data array */ + IMG_UINT64 RGXFW_ALIGN ui64ReturnDataIndex; + /* Deadline for the workload */ + IMG_UINT64 RGXFW_ALIGN ui64Deadline; + /* Predicted time taken to do the work in cycles */ + IMG_UINT64 RGXFW_ALIGN ui64CyclesPrediction; +} RGXFWIF_WORKEST_KICK_DATA; + +/*! @Brief Command header of a command in the client CCB buffer. + * + * Followed by this header is the command-data specific to the + * command-type as specified in the header. + */ +typedef struct +{ + RGXFWIF_CCB_CMD_TYPE eCmdType; /*!< Command data type following this command header */ + IMG_UINT32 ui32CmdSize; /*!< Size of the command following this header */ + IMG_UINT32 ui32ExtJobRef; /*!< external job reference - provided by client and used in debug for tracking submitted work */ + IMG_UINT32 ui32IntJobRef; /*!< internal job reference - generated by services and used in debug for tracking submitted work */ + RGXFWIF_WORKEST_KICK_DATA RGXFW_ALIGN sWorkEstKickData; /*!< Workload Estimation - Workload Estimation Data */ +} RGXFWIF_CCB_CMD_HEADER; + +/* + ****************************************************************************** + * Client CCB commands which are only required by the kernel + *****************************************************************************/ + +/*! @Brief Command data for \ref RGXFWIF_CCB_CMD_TYPE_PRIORITY type client CCB command */ +typedef struct +{ + IMG_UINT32 ui32Priority; +} RGXFWIF_CMD_PRIORITY; + +/*! @} End of ClientCCBTypes */ + +/*! + ****************************************************************************** + * Signature and Checksums Buffer + *****************************************************************************/ +typedef struct +{ + PRGXFWIF_SIGBUFFER sBuffer; /*!< Ptr to Signature Buffer memory */ + IMG_UINT32 ui32LeftSizeInRegs; /*!< Amount of space left for storing regs in the buffer */ +} UNCACHED_ALIGN RGXFWIF_SIGBUF_CTL; + +typedef struct +{ + PRGXFWIF_FIRMWAREGCOVBUFFER sBuffer; /*!< Ptr to firmware gcov buffer */ + IMG_UINT32 ui32Size; /*!< Amount of space for storing in the buffer */ +} UNCACHED_ALIGN RGXFWIF_FIRMWARE_GCOV_CTL; + +/*! + ***************************************************************************** + * RGX Compatibility checks + *****************************************************************************/ + +/* WARNING: Whenever the layout of RGXFWIF_COMPCHECKS_BVNC is a subject of change, + following define should be increased by 1 to indicate to compatibility logic, + that layout has changed */ +#define RGXFWIF_COMPCHECKS_LAYOUT_VERSION 3 + +typedef struct +{ + IMG_UINT32 ui32LayoutVersion; /* WARNING: This field must be defined as first one in this structure */ + IMG_UINT64 RGXFW_ALIGN ui64BVNC; +} UNCACHED_ALIGN RGXFWIF_COMPCHECKS_BVNC; + +typedef struct +{ + IMG_UINT8 ui8OsCountSupport; +} UNCACHED_ALIGN RGXFWIF_INIT_OPTIONS; + +#define RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \ + RGXFWIF_COMPCHECKS_BVNC (name) = { \ + RGXFWIF_COMPCHECKS_LAYOUT_VERSION, \ + 0, \ + } +#define RGXFWIF_COMPCHECKS_BVNC_INIT(name) \ + do { \ + (name).ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION; \ + (name).ui64BVNC = 0; \ + } while (0) + +typedef struct +{ + RGXFWIF_COMPCHECKS_BVNC sHWBVNC; /*!< hardware BVNC (from the RGX registers) */ + RGXFWIF_COMPCHECKS_BVNC sFWBVNC; /*!< firmware BVNC */ + IMG_UINT32 ui32FWProcessorVersion; /*!< identifier of the FW processor version */ + IMG_UINT32 ui32DDKVersion; /*!< software DDK version */ + IMG_UINT32 ui32DDKBuild; /*!< software DDK build no. */ + IMG_UINT32 ui32BuildOptions; /*!< build options bit-field */ + RGXFWIF_INIT_OPTIONS sInitOptions; /*!< initialisation options bit-field */ + IMG_BOOL bUpdated; /*!< Information is valid */ +} UNCACHED_ALIGN RGXFWIF_COMPCHECKS; + +/*! + ****************************************************************************** + * Updated configuration post FW data init. + *****************************************************************************/ +typedef struct +{ + IMG_UINT32 ui32ActivePMLatencyms; /* APM latency in ms before signalling IDLE to the host */ + IMG_UINT32 ui32RuntimeCfgFlags; /* Compatibility and other flags */ + IMG_BOOL bActivePMLatencyPersistant; /* If set, APM latency does not reset to system default each GPU power transition */ + IMG_UINT32 ui32CoreClockSpeed; /* Core clock speed, currently only used to calculate timer ticks */ + IMG_UINT32 ui32PowUnitsStateMask; /* Power Unit state mask set by the host */ + IMG_UINT32 ui32PHRMode; /* Periodic Hardware Reset configuration values */ + IMG_UINT32 ui32HCSDeadlineMS; /* New number of milliseconds C/S is allowed to last */ + IMG_UINT32 ui32WdgPeriodUs; /* The watchdog period in microseconds */ + IMG_UINT32 aui32OSidPriority[RGXFW_MAX_NUM_OS]; /*!< Array of priorities per OS */ + PRGXFWIF_HWPERFBUF sHWPerfBuf; /* On-demand allocated HWPerf buffer address, to be passed to the FW */ + RGXFWIF_DMA_ADDR sHWPerfDMABuf; +} RGXFWIF_RUNTIME_CFG; + +/*! + ***************************************************************************** + * Control data for RGX + *****************************************************************************/ + +#define RGXFWIF_HWR_DEBUG_DUMP_ALL (99999U) + +#if defined(PDUMP) + +#define RGXFWIF_PID_FILTER_MAX_NUM_PIDS 32U + +typedef enum +{ + RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT, + RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT +} RGXFWIF_PID_FILTER_MODE; + +typedef struct +{ + IMG_PID uiPID; + IMG_UINT32 ui32OSID; +} RGXFW_ALIGN RGXFWIF_PID_FILTER_ITEM; + +typedef struct +{ + RGXFWIF_PID_FILTER_MODE eMode; + /* each process in the filter list is specified by a PID and OS ID pair. + * each PID and OS pair is an item in the items array (asItems). + * if the array contains less than RGXFWIF_PID_FILTER_MAX_NUM_PIDS entries + * then it must be terminated by an item with pid of zero. + */ + RGXFWIF_PID_FILTER_ITEM asItems[RGXFWIF_PID_FILTER_MAX_NUM_PIDS]; +} RGXFW_ALIGN RGXFWIF_PID_FILTER; +#endif + +#if defined(SUPPORT_SECURITY_VALIDATION) +#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_DATA (0x1U << 0) +#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_CODE (0x1U << 1) +#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_NONSECURE (0x1U << 2) +#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_SECURE (0x1U << 3) +#endif + +typedef enum +{ + RGXFWIF_USRM_DM_VDM = 0, + RGXFWIF_USRM_DM_DDM = 1, + RGXFWIF_USRM_DM_CDM = 2, + RGXFWIF_USRM_DM_PDM = 3, + RGXFWIF_USRM_DM_TDM = 4, + RGXFWIF_USRM_DM_LAST +} RGXFWIF_USRM_DM; + +typedef enum +{ + RGXFWIF_UVBRM_DM_VDM = 0, + RGXFWIF_UVBRM_DM_DDM = 1, + RGXFWIF_UVBRM_DM_LAST +} RGXFWIF_UVBRM_DM; + +typedef enum +{ + RGXFWIF_TPU_DM_PDM = 0, + RGXFWIF_TPU_DM_VDM = 1, + RGXFWIF_TPU_DM_CDM = 2, + RGXFWIF_TPU_DM_TDM = 3, + RGXFWIF_TPU_DM_LAST +} RGXFWIF_TPU_DM; + +typedef enum +{ + RGXFWIF_GPIO_VAL_OFF = 0, /*!< No GPIO validation */ + RGXFWIF_GPIO_VAL_GENERAL = 1, /*!< Simple test case that + initiates by sending data via the + GPIO and then sends back any data + received over the GPIO */ + RGXFWIF_GPIO_VAL_AP = 2, /*!< More complex test case that writes + and reads data across the entire + GPIO AP address range.*/ +#if defined(SUPPORT_STRIP_RENDERING) + RGXFWIF_GPIO_VAL_SR_BASIC = 3, /*!< Strip Rendering AP based basic test.*/ + RGXFWIF_GPIO_VAL_SR_COMPLEX = 4, /*!< Strip Rendering AP based complex test.*/ +#endif + RGXFWIF_GPIO_VAL_TESTBENCH = 5, /*!< Validates the GPIO Testbench. */ + RGXFWIF_GPIO_VAL_LAST +} RGXFWIF_GPIO_VAL_MODE; + +typedef enum +{ + FW_PERF_CONF_NONE = 0, + FW_PERF_CONF_ICACHE = 1, + FW_PERF_CONF_DCACHE = 2, + FW_PERF_CONF_POLLS = 3, + FW_PERF_CONF_CUSTOM_TIMER = 4, + FW_PERF_CONF_JTLB_INSTR = 5, + FW_PERF_CONF_INSTRUCTIONS = 6 +} FW_PERF_CONF; + +typedef enum +{ + FW_BOOT_STAGE_TLB_INIT_FAILURE = -2, + FW_BOOT_STAGE_NOT_AVAILABLE = -1, + FW_BOOT_NOT_STARTED = 0, + FW_BOOT_BLDR_STARTED = 1, + FW_BOOT_CACHE_DONE, + FW_BOOT_TLB_DONE, + FW_BOOT_MAIN_STARTED, + FW_BOOT_ALIGNCHECKS_DONE, + FW_BOOT_INIT_DONE, +} FW_BOOT_STAGE; + +/* + * Kernel CCB return slot responses. Usage of bit-fields instead of bare integers + * allows FW to possibly pack-in several responses for each single kCCB command. + */ +#define RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED (1U << 0) /* Command executed (return status from FW) */ +#define RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY (1U << 1) /* A cleanup was requested but resource busy */ +#define RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE (1U << 2) /* Poll failed in FW for a HW operation to complete */ + +#define RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE 0x0U /* Reset value of a kCCB return slot (set by host) */ + +typedef struct +{ + /* Fw-Os connection states */ + volatile RGXFWIF_CONNECTION_FW_STATE eConnectionFwState; + volatile RGXFWIF_CONNECTION_OS_STATE eConnectionOsState; + volatile IMG_UINT32 ui32AliveFwToken; + volatile IMG_UINT32 ui32AliveOsToken; +} UNCACHED_ALIGN RGXFWIF_CONNECTION_CTL; + +/*! @Brief Firmware OS Initialization data \ref RGXFWIF_OSINIT + * allocated by services and used by the Firmware on boot + **/ +typedef struct +{ + /* Kernel CCB */ + PRGXFWIF_CCB_CTL psKernelCCBCtl; /*!< Kernel CCB Control */ + PRGXFWIF_CCB psKernelCCB; /*!< Kernel CCB */ + PRGXFWIF_CCB_RTN_SLOTS psKernelCCBRtnSlots; /*!< Kernel CCB return slots */ + + /* Firmware CCB */ + PRGXFWIF_CCB_CTL psFirmwareCCBCtl; /*!< Firmware CCB control */ + PRGXFWIF_CCB psFirmwareCCB; /*!< Firmware CCB */ + + /* Workload Estimation Firmware CCB */ + PRGXFWIF_CCB_CTL psWorkEstFirmwareCCBCtl; /*!< Workload estimation control */ + PRGXFWIF_CCB psWorkEstFirmwareCCB; /*!< Workload estimation buffer */ + + PRGXFWIF_HWRINFOBUF sRGXFWIfHWRInfoBufCtl; /*!< HWRecoveryInfo control */ + + IMG_UINT32 ui32HWRDebugDumpLimit; /*!< Firmware debug dump maximum limit */ + + PRGXFWIF_OSDATA sFwOsData; /*!< Firmware per-os shared data */ + + RGXFWIF_COMPCHECKS sRGXCompChecks; /*!< Compatibility checks to be populated by the Firmware */ + +} UNCACHED_ALIGN RGXFWIF_OSINIT; + +/*! @Brief Firmware System Initialization data \ref RGXFWIF_SYSINIT + * allocated by services and used by the Firmware on boot + **/ +typedef struct +{ + IMG_DEV_PHYADDR RGXFW_ALIGN sFaultPhysAddr; /*!< Fault read address */ + + IMG_DEV_VIRTADDR RGXFW_ALIGN sPDSExecBase; /*!< PDS execution base */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sUSCExecBase; /*!< USC execution base */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sFBCDCStateTableBase; /*!< FBCDC bindless texture state table base */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sFBCDCLargeStateTableBase; + IMG_DEV_VIRTADDR RGXFW_ALIGN sTextureHeapBase; /*!< Texture state base */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sPDSIndirectHeapBase; /* Pixel Indirect State base */ + + IMG_UINT32 ui32FilterFlags; + + RGXFWIF_SIGBUF_CTL asSigBufCtl[RGXFWIF_DM_DEFAULT_MAX]; /*!< Signature and Checksum Buffers for DMs */ +#if defined(SUPPORT_VALIDATION) + RGXFWIF_SIGBUF_CTL asValidationSigBufCtl[RGXFWIF_DM_DEFAULT_MAX]; + IMG_UINT64 RGXFW_ALIGN ui64RCEDisableMask; +#endif + + PRGXFWIF_RUNTIME_CFG sRuntimeCfg; /*!< Firmware Runtime configuration */ + + PRGXFWIF_TRACEBUF sTraceBufCtl; /*!< Firmware Trace buffer control */ + PRGXFWIF_SYSDATA sFwSysData; /*!< Firmware System shared data */ +#if defined(SUPPORT_TBI_INTERFACE) + PRGXFWIF_TBIBUF sTBIBuf; /*!< Tbi log buffer */ +#endif + IMG_UINT64 RGXFW_ALIGN ui64HWPerfFilter; /*! Event filter for Firmware events */ + + PRGXFWIF_GPU_UTIL_FWCB sGpuUtilFWCbCtl; /*!< GPU utilization buffer */ + PRGXFWIF_REG_CFG sRegCfg; /*!< Firmware register user configuration */ + PRGXFWIF_HWPERF_CTL sHWPerfCtl; /*!< HWPerf counter block configuration.*/ + +#if defined(SUPPORT_FIRMWARE_GCOV) + RGXFWIF_FIRMWARE_GCOV_CTL sFirmwareGcovCtl; /*!< Firmware gcov buffer control */ +#endif + + RGXFWIF_DEV_VIRTADDR sAlignChecks; /*!< Array holding Server structures alignment data */ + + IMG_UINT32 ui32InitialCoreClockSpeed; /*!< Core clock speed at FW boot time */ + + IMG_UINT32 ui32InitialActivePMLatencyms; /*!< APM latency in ms before signalling IDLE to the host */ + + IMG_BOOL bFirmwareStarted; /*!< Flag to be set by the Firmware after successful start */ + + IMG_UINT32 ui32MarkerVal; /*!< Host/FW Trace synchronisation Partition Marker */ + + IMG_UINT32 ui32FirmwareStartedTimeStamp; /*!< Firmware initialization complete time */ + + IMG_UINT32 ui32JonesDisableMask; + + RGXFWIF_DMA_ADDR sCorememDataStore; /*!< Firmware coremem data */ + + FW_PERF_CONF eFirmwarePerf; /*!< Firmware performance counter config */ + + IMG_DEV_VIRTADDR RGXFW_ALIGN sSLC3FenceDevVAddr; /*!< Address to use as a fence when issuing SLC3_CFI */ + +#if defined(SUPPORT_PDVFS) + RGXFWIF_PDVFS_OPP RGXFW_ALIGN sPDVFSOPPInfo; + + /** + * FW Pointer to memory containing core clock rate in Hz. + * Firmware (PDVFS) updates the memory when running on non primary FW thread + * to communicate to host driver. + */ + PRGXFWIF_CORE_CLK_RATE RGXFW_ALIGN sCoreClockRate; +#endif + +#if defined(PDUMP) + RGXFWIF_PID_FILTER sPIDFilter; +#endif + + RGXFWIF_GPIO_VAL_MODE eGPIOValidationMode; + IMG_UINT32 RGXFW_ALIGN aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST]; + IMG_UINT32 RGXFW_ALIGN aui32USRMNumRegions[RGXFWIF_USRM_DM_LAST]; + IMG_UINT64 RGXFW_ALIGN aui64UVBRMNumRegions[RGXFWIF_UVBRM_DM_LAST]; + + RGX_HWPERF_BVNC sBvncKmFeatureFlags; /*!< Used in HWPerf for decoding BVNC Features*/ + +#if defined(SUPPORT_SECURITY_VALIDATION) + IMG_UINT32 ui32SecurityTestFlags; + RGXFWIF_DEV_VIRTADDR pbSecureBuffer; + RGXFWIF_DEV_VIRTADDR pbNonSecureBuffer; +#endif + +#if defined(SUPPORT_GPUVIRT_VALIDATION) + /* + * Used when validation is enabled to allow the host to check + * that MTS sent the correct sideband in response to a kick + * from a given OSes schedule register. + * Testing is enabled if RGXFWIF_KICK_TEST_ENABLED_BIT is set + * + * Set by the host to: + * (osid << RGXFWIF_KICK_TEST_OSID_SHIFT) | RGXFWIF_KICK_TEST_ENABLED_BIT + * reset to 0 by FW when kicked by the given OSid + */ + IMG_UINT32 ui32OSKickTest; +#endif + +} UNCACHED_ALIGN RGXFWIF_SYSINIT; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#define RGXFWIF_KICK_TEST_ENABLED_BIT 0x1 +#define RGXFWIF_KICK_TEST_OSID_SHIFT 0x1 +#endif + +/*! + ***************************************************************************** + * Timer correlation shared data and defines + *****************************************************************************/ + +typedef struct +{ + IMG_UINT64 RGXFW_ALIGN ui64OSTimeStamp; + IMG_UINT64 RGXFW_ALIGN ui64OSMonoTimeStamp; + IMG_UINT64 RGXFW_ALIGN ui64CRTimeStamp; + + /* Utility variable used to convert CR timer deltas to OS timer deltas (nS), + * where the deltas are relative to the timestamps above: + * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below */ + IMG_UINT64 RGXFW_ALIGN ui64CRDeltaToOSDeltaKNs; + + IMG_UINT32 ui32CoreClockSpeed; + IMG_UINT32 ui32Reserved; +} UNCACHED_ALIGN RGXFWIF_TIME_CORR; + + +/* The following macros are used to help converting FW timestamps to the Host + * time domain. On the FW the RGX_CR_TIMER counter is used to keep track of + * time; it increments by 1 every 256 GPU clock ticks, so the general + * formula to perform the conversion is: + * + * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS, + * otherwise if (scale == 10^6) then deltaOS is in uS ] + * + * deltaCR * 256 256 * scale + * deltaOS = --------------- * scale = deltaCR * K [ K = --------------- ] + * GPUclockspeed GPUclockspeed + * + * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20) + * to get some better accuracy and to avoid returning 0 in the integer + * division 256000000/GPUfreq if GPUfreq is greater than 256MHz. + * This is the same as keeping K as a decimal number. + * + * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies + * (deltaCR * K is more or less a constant), and it's relative to the base + * OS timestamp sampled as a part of the timer correlation data. + * This base is refreshed on GPU power-on, DVFS transition and periodic + * frequency calibration (executed every few seconds if the FW is doing + * some work), so as long as the GPU is doing something and one of these + * events is triggered then deltaCR * K will not overflow and deltaOS will be + * correct. + */ + +#define RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT (20) + +#define RGXFWIF_GET_DELTA_OSTIME_NS(deltaCR, K) \ + (((deltaCR) * (K)) >> RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT) + + +/*! + ****************************************************************************** + * GPU Utilisation + *****************************************************************************/ + +/* See rgx_common.h for a list of GPU states */ +#define RGXFWIF_GPU_UTIL_TIME_MASK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF) & ~RGXFWIF_GPU_UTIL_STATE_MASK) + +#define RGXFWIF_GPU_UTIL_GET_TIME(word) ((word) & RGXFWIF_GPU_UTIL_TIME_MASK) +#define RGXFWIF_GPU_UTIL_GET_STATE(word) ((word) & RGXFWIF_GPU_UTIL_STATE_MASK) + +/* The OS timestamps computed by the FW are approximations of the real time, + * which means they could be slightly behind or ahead the real timer on the Host. + * In some cases we can perform subtractions between FW approximated + * timestamps and real OS timestamps, so we need a form of protection against + * negative results if for instance the FW one is a bit ahead of time. + */ +#define RGXFWIF_GPU_UTIL_GET_PERIOD(newtime,oldtime) \ + (((newtime) > (oldtime)) ? ((newtime) - (oldtime)) : 0U) + +#define RGXFWIF_GPU_UTIL_MAKE_WORD(time,state) \ + (RGXFWIF_GPU_UTIL_GET_TIME(time) | RGXFWIF_GPU_UTIL_GET_STATE(state)) + + +/* The timer correlation array must be big enough to ensure old entries won't be + * overwritten before all the HWPerf events linked to those entries are processed + * by the MISR. The update frequency of this array depends on how fast the system + * can change state (basically how small the APM latency is) and perform DVFS transitions. + * + * The minimum size is 2 (not 1) to avoid race conditions between the FW reading + * an entry while the Host is updating it. With 2 entries in the worst case the FW + * will read old data, which is still quite ok if the Host is updating the timer + * correlation at that time. + */ +#define RGXFWIF_TIME_CORR_ARRAY_SIZE 256U +#define RGXFWIF_TIME_CORR_CURR_INDEX(seqcount) ((seqcount) % RGXFWIF_TIME_CORR_ARRAY_SIZE) + +/* Make sure the timer correlation array size is a power of 2 */ +static_assert((RGXFWIF_TIME_CORR_ARRAY_SIZE & (RGXFWIF_TIME_CORR_ARRAY_SIZE - 1U)) == 0U, + "RGXFWIF_TIME_CORR_ARRAY_SIZE must be a power of two"); + +typedef struct +{ + RGXFWIF_TIME_CORR sTimeCorr[RGXFWIF_TIME_CORR_ARRAY_SIZE]; + IMG_UINT32 ui32TimeCorrSeqCount; + + /* Last GPU state + OS time of the last state update */ + IMG_UINT64 RGXFW_ALIGN ui64LastWord; + + /* Counters for the amount of time the GPU was active/idle/blocked */ + IMG_UINT64 RGXFW_ALIGN aui64StatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM]; + + IMG_UINT32 ui32GpuUtilFlags; /* Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FWCB; + + +typedef struct +{ + IMG_UINT32 ui32RenderTargetIndex; //Render number + IMG_UINT32 ui32CurrentRenderTarget; //index in RTA + IMG_UINT32 ui32ActiveRenderTargets; //total active RTs + RGXFWIF_DEV_VIRTADDR sValidRenderTargets; //Array of valid RT indices + RGXFWIF_DEV_VIRTADDR sRTANumPartialRenders; //Array of number of occurred partial renders per render target + IMG_UINT32 ui32MaxRTs; //Number of render targets in the array + IMG_UINT32 ui32RTACtlFlags; /* Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_RTA_CTL; + +typedef struct +{ + IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListBaseDevVAddr; /*!< Free list device base address */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListStateDevVAddr; /*!< Free list state buffer */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListLastGrowDevVAddr; /*!< Free list base address at last grow */ + +#if defined(PM_INTERACTIVE_MODE) + IMG_UINT64 RGXFW_ALIGN ui64CurrentDevVAddr; + IMG_UINT32 ui32CurrentStackTop; +#endif + + IMG_UINT32 ui32MaxPages; + IMG_UINT32 ui32GrowPages; + IMG_UINT32 ui32CurrentPages; /* HW pages */ +#if defined(PM_INTERACTIVE_MODE) + IMG_UINT32 ui32AllocatedPageCount; + IMG_UINT32 ui32AllocatedMMUPageCount; +#endif +#if defined(SUPPORT_SHADOW_FREELISTS) + IMG_UINT32 ui32HWRCounter; + PRGXFWIF_FWMEMCONTEXT psFWMemContext; +#endif + IMG_UINT32 ui32FreeListID; + IMG_BOOL bGrowPending; + IMG_UINT32 ui32ReadyPages; /* Pages that should be used only when OOM is reached */ + IMG_UINT32 ui32FreelistFlags; /* Compatibility and other flags */ + + IMG_BOOL bUpdatePending; + IMG_UINT32 ui32UpdateNewPages; + IMG_UINT32 ui32UpdateNewReadyPages; +} UNCACHED_ALIGN RGXFWIF_FREELIST; + + + +#if defined(SUPPORT_SW_TRP) +#define SW_TRP_SIGNATURE_FIRST_KICK 0U +#define SW_TRP_SIGNATURE_SECOND_KICK 1U +#define SW_TRP_SIGNATURE_COUNT 2U +#define SW_TRP_GEOMETRY_SIGNATURE_SIZE 8U +#define SW_TRP_FRAGMENT_SIGNATURE_SIZE 8U +/* Space for tile usage bitmap, one bit per tile on screen */ +#define RGX_FEATURE_TILE_SIZE_X (32U) +#define RGX_FEATURE_TILE_SIZE_Y (32U) +#define SW_TRP_TILE_USED_SIZE ((ROGUE_RENDERSIZE_MAXX / RGX_FEATURE_TILE_SIZE_X + ROGUE_RENDERSIZE_MAXY / RGX_FEATURE_TILE_SIZE_Y) / (8U * sizeof(IMG_UINT32))) +#endif + +/*! + ****************************************************************************** + * Parameter Management (PM) control data for RGX + *****************************************************************************/ +typedef enum +{ + RGXFW_SPM_STATE_NONE = 0, + RGXFW_SPM_STATE_PR_BLOCKED, + RGXFW_SPM_STATE_WAIT_FOR_GROW, + RGXFW_SPM_STATE_WAIT_FOR_HW, + RGXFW_SPM_STATE_PR_RUNNING, + RGXFW_SPM_STATE_PR_AVOIDED, + RGXFW_SPM_STATE_PR_EXECUTED, + RGXFW_SPM_STATE_PR_FORCEFREE, +} RGXFW_SPM_STATE; + +/*! + ****************************************************************************** + * @Brief RGX firmware SPM Control Data: + * This structure holds all the internal SPM control Data of the firmware. + *****************************************************************************/ +typedef struct +{ + IMG_CHAR RGXFW_ALIGN_DCACHEL align[1]; /*!< Make sure the structure is aligned to the dcache line */ + + RGXFW_SPM_STATE eSPMState; /*!< Current state of TA OOM event */ /*!< Current owner of this PM data structure */ /*!< current owner of this PM data structure */ + RGXFWIF_UFO sPartialRenderTA3DFence; /*!< TA/3D fence object holding the value to let through the 3D partial command */ +#if defined(RGX_FIRMWARE) + RGXFWIF_FWCOMMONCONTEXT *ps3dContext; /*!< Pointer to the 3D Context holding the partial render */ + RGXFWIF_CCB_CMD_HEADER *psCmdHeader; /*!< Pointer to the header of the command holding the partial render */ + struct RGXFWIF_CMD3D_STRUCT *ps3DCmd; /*!< Pointer to the 3D command holding the partial render register info */ /*!< Pointer to the 3D command holding the partial render register info*/ + RGXFWIF_PRBUFFER *apsPRBuffer[RGXFWIF_PRBUFFER_MAXSUPPORTED]; /*!< Array of pointers to PR Buffers which may be used if partial render is needed */ +#else + RGXFWIF_DEV_VIRTADDR ps3dContext; /*!< Pointer to the 3D Context holding the partial render */ + RGXFWIF_DEV_VIRTADDR psCmdHeader; /*!< Pointer to the header of the command holding the partial render */ + RGXFWIF_DEV_VIRTADDR ps3DCmd; /*!< Pointer to the 3D command holding the partial render register info*/ + RGXFWIF_DEV_VIRTADDR apsPRBuffer[RGXFWIF_PRBUFFER_MAXSUPPORTED]; /*!< Array of pointers to PR Buffers which may be used if partial render is needed */ +#endif + RGXFW_FREELIST_TYPE eOOMFreeListType; /*!< Indicates the freelist type that went out of memory */ + bool b3DMemFreeDetected; /*!< Indicates if a 3D Memory Free has been detected, which resolves OOM */ +} RGXFW_SPMCTL; + +/*! + ****************************************************************************** + * HWRTData + *****************************************************************************/ + +/* HWRTData flags */ +/* Deprecated flags 1:0 */ +#define HWRTDATA_HAS_LAST_TA (1U << 2) +#define HWRTDATA_PARTIAL_RENDERED (1U << 3) +#define HWRTDATA_KILLED (1U << 4) +#define HWRTDATA_KILL_AFTER_TARESTART (1U << 5) + +typedef enum +{ + RGXFWIF_RTDATA_STATE_NONE = 0, + RGXFWIF_RTDATA_STATE_KICKTA, + RGXFWIF_RTDATA_STATE_KICKTAFIRST, + RGXFWIF_RTDATA_STATE_TAFINISHED, + RGXFWIF_RTDATA_STATE_KICK3D, + RGXFWIF_RTDATA_STATE_3DFINISHED, + RGXFWIF_RTDATA_STATE_3DCONTEXTSTORED, + RGXFWIF_RTDATA_STATE_TAOUTOFMEM, + RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED, + /* In case of HWR, we can't set the RTDATA state to NONE, + * as this will cause any TA to become a first TA. + * To ensure all related TA's are skipped, we use the HWR state */ + RGXFWIF_RTDATA_STATE_HWR, + RGXFWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU +} RGXFWIF_RTDATA_STATE; + +typedef struct +{ + IMG_BOOL bTACachesNeedZeroing; + +} UNCACHED_ALIGN RGXFWIF_HWRTDATA_COMMON; + +typedef struct +{ + RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; + + IMG_UINT32 ui32HWRTDataFlags; + RGXFWIF_RTDATA_STATE eState; + + + IMG_UINT64 RGXFW_ALIGN ui64VCECatBase[4]; + IMG_UINT64 RGXFW_ALIGN ui64VCELastCatBase[4]; + IMG_UINT64 RGXFW_ALIGN ui64TECatBase[4]; + IMG_UINT64 RGXFW_ALIGN ui64TELastCatBase[4]; + IMG_UINT64 RGXFW_ALIGN ui64AlistCatBase; + IMG_UINT64 RGXFW_ALIGN ui64AlistLastCatBase; + +#if defined(PM_INTERACTIVE_MODE) + IMG_DEV_VIRTADDR RGXFW_ALIGN psVHeapTableDevVAddr; + IMG_DEV_VIRTADDR RGXFW_ALIGN sPMMListDevVAddr; +#else + /* Series8 PM State buffers */ + IMG_DEV_VIRTADDR RGXFW_ALIGN sPMRenderStateDevVAddr; + IMG_DEV_VIRTADDR RGXFW_ALIGN sPMSecureRenderStateDevVAddr; +#endif + + PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS]; + IMG_UINT32 aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS]; + IMG_BOOL bRenderStateNeedsReset; + + RGXFWIF_CLEANUP_CTL sCleanupState; + + RGXFWIF_RTA_CTL sRTACtl; + + IMG_UINT32 ui32ScreenPixelMax; + IMG_UINT64 RGXFW_ALIGN ui64PPPMultiSampleCtl; + IMG_UINT32 ui32TEStride; + IMG_DEV_VIRTADDR RGXFW_ALIGN sTailPtrsDevVAddr; + IMG_UINT32 ui32TPCSize; + IMG_UINT32 ui32TEScreen; + IMG_UINT32 ui32TEAA; + IMG_UINT32 ui32TEMTILE1; + IMG_UINT32 ui32TEMTILE2; + IMG_UINT32 ui32RgnStride; + IMG_UINT32 ui32ISPMergeLowerX; + IMG_UINT32 ui32ISPMergeLowerY; + IMG_UINT32 ui32ISPMergeUpperX; + IMG_UINT32 ui32ISPMergeUpperY; + IMG_UINT32 ui32ISPMergeScaleX; + IMG_UINT32 ui32ISPMergeScaleY; +#if defined(RGX_FIRMWARE) + struct RGXFWIF_FWCOMMONCONTEXT_* psOwnerGeom; +#else + RGXFWIF_DEV_VIRTADDR pui32OwnerGeomNotUsedByHost; +#endif + +#if defined(PM_INTERACTIVE_MODE) + IMG_UINT64 RGXFW_ALIGN ui64PMAListStackPointer; + IMG_UINT32 ui32PMMListStackPointer; +#endif +#if defined(SUPPORT_SW_TRP) + /* SW-TRP state and signature data + * + * Stored state is used to kick the same geometry or 3D twice, + * State is stored before first kick and restored before second to rerun the same data. + * Signatures from both kicks are stored and compared */ + IMG_UINT32 aaui32GeometrySignature[SW_TRP_SIGNATURE_COUNT][SW_TRP_GEOMETRY_SIGNATURE_SIZE]; + IMG_UINT32 aaui32FragmentSignature[SW_TRP_SIGNATURE_COUNT][SW_TRP_FRAGMENT_SIGNATURE_SIZE]; + IMG_UINT32 ui32KickFlagsCopy; + IMG_UINT32 ui32SW_TRPState; + IMG_UINT32 aui32TileUsed[SW_TRP_TILE_USED_SIZE]; + RGXFW_SPMCTL sSPMCtlCopy; +#endif +} UNCACHED_ALIGN RGXFWIF_HWRTDATA; + +/* Sync_checkpoint firmware object. + * This is the FW-addressable structure use to hold the sync checkpoint's + * state and other information which needs to be accessed by the firmware. + */ +typedef struct +{ + IMG_UINT32 ui32State; /*!< Holds the current state of the sync checkpoint */ + IMG_UINT32 ui32FwRefCount; /*!< Holds the FW reference count (num of fences/updates processed) */ +} SYNC_CHECKPOINT_FW_OBJ; + +/* Bit mask Firmware can use to test if a checkpoint has signalled or errored */ +#define SYNC_CHECKPOINT_SIGNALLED_MASK (0x1 << 0) + +#endif /* RGX_FWIF_KM_H */ + +/****************************************************************************** + End of file (rgx_fwif_km.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_fwif_resetframework.h b/drivers/gpu/drm/phytium/octopus/rgx_fwif_resetframework.h new file mode 100644 index 000000000000..601d3888242e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_fwif_resetframework.h @@ -0,0 +1,74 @@ +/*************************************************************************/ /*! +@File rgx_fwif_resetframework.h +@Title Post-reset work-around framework FW interface +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_FWIF_RESETFRAMEWORK_H) +#define RGX_FWIF_RESETFRAMEWORK_H + +#include "img_types.h" +#include "rgx_fwif_shared.h" + +typedef struct +{ +#if defined(RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT) && (RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT == 2) + IMG_UINT64 uCDMReg_CDM_CB_QUEUE; + IMG_UINT64 uCDMReg_CDM_CB_BASE; + IMG_UINT64 uCDMReg_CDM_CB; +#else + IMG_UINT64 uCDMReg_CDM_CTRL_STREAM_BASE; +#endif +} RGXFWIF_RF_REGISTERS; + +#define RGXFWIF_RF_FLAG_ENABLE 0x00000001U /*!< enables the reset framework in the firmware */ + +typedef struct +{ + IMG_UINT32 ui32Flags; + + /* THIS MUST BE THE LAST MEMBER OF THE CONTAINING STRUCTURE */ + RGXFWIF_RF_REGISTERS RGXFW_ALIGN sFWRegisters; + +} RGXFWIF_RF_CMD; + +/* to opaquely allocate and copy in the kernel */ +#define RGXFWIF_RF_CMD_SIZE sizeof(RGXFWIF_RF_CMD) + +#endif /* RGX_FWIF_RESETFRAMEWORK_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_fwif_sf.h b/drivers/gpu/drm/phytium/octopus/rgx_fwif_sf.h new file mode 100644 index 000000000000..7aadfe62d23a --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_fwif_sf.h @@ -0,0 +1,898 @@ +/*************************************************************************/ /*! +@File rgx_fwif_sf.h +@Title RGX firmware interface string format specifiers +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the rgx firmware logging messages. The following + list are the messages the firmware prints. Changing anything + but the first column or spelling mistakes in the strings will + break compatibility with log files created with older/newer + firmware versions. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef RGX_FWIF_SF_H +#define RGX_FWIF_SF_H + +/****************************************************************************** + * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you + * WILL BREAK fw tracing message compatibility with previous + * fw versions. Only add new ones, if so required. + *****************************************************************************/ +/* Available log groups */ +#define RGXFW_LOG_SFGROUPLIST \ + X(RGXFW_GROUP_NULL,NULL) \ + X(RGXFW_GROUP_MAIN,MAIN) \ + X(RGXFW_GROUP_CLEANUP,CLEANUP) \ + X(RGXFW_GROUP_CSW,CSW) \ + X(RGXFW_GROUP_PM, PM) \ + X(RGXFW_GROUP_RTD,RTD) \ + X(RGXFW_GROUP_SPM,SPM) \ + X(RGXFW_GROUP_MTS,MTS) \ + X(RGXFW_GROUP_BIF,BIF) \ + X(RGXFW_GROUP_MISC,MISC) \ + X(RGXFW_GROUP_POW,POW) \ + X(RGXFW_GROUP_HWR,HWR) \ + X(RGXFW_GROUP_HWP,HWP) \ + X(RGXFW_GROUP_RPM,RPM) \ + X(RGXFW_GROUP_DMA,DMA) \ + X(RGXFW_GROUP_DBG,DBG) + +enum RGXFW_LOG_SFGROUPS { +#define X(A,B) A, + RGXFW_LOG_SFGROUPLIST +#undef X +}; + +#define IMG_SF_STRING_MAX_SIZE 256U + +typedef struct { + IMG_UINT32 ui32Id; + IMG_CHAR sName[IMG_SF_STRING_MAX_SIZE]; +} RGXFW_STID_FMT; /* pair of string format id and string formats */ + +typedef struct { + IMG_UINT32 ui32Id; + const IMG_CHAR *psName; +} RGXKM_STID_FMT; /* pair of string format id and string formats */ + +/* Table of String Format specifiers, the group they belong and the number of + * arguments each expects. Xmacro styled macros are used to generate what is + * needed without requiring hand editing. + * + * id : id within a group + * gid : group id + * Sym name : name of enumerations used to identify message strings + * String : Actual string + * #args : number of arguments the string format requires + */ +#define RGXFW_LOG_SFIDLIST \ +/*id, gid, id name, string, # arguments */ \ +X( 0, RGXFW_GROUP_NULL, RGXFW_SF_FIRST, "You should not use this string", 0) \ +\ +X( 1, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x. Partial render:%d, CSW resume:%d, prio:%d", 6) \ +X( 2, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED, "3D finished, HWRTData0State=%x, HWRTData1State=%x", 2) \ +X( 3, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK3D_TQ_DEPRECATED, "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d, prio: %d", 4) \ +X( 4, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_TQ_FINISHED, "3D Transfer finished", 0) \ +X( 5, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_DEPRECATED, "Kick Compute: FWCtx 0x%08.8x @ %d, prio: %d", 3) \ +X( 6, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED, "Compute finished", 0) \ +X( 7, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x. First kick:%d, Last kick:%d, CSW resume:%d, prio:%d", 7) \ +X( 8, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED, "TA finished", 0) \ +X( 9, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESTART_AFTER_PRENDER, "Restart TA after partial render", 0) \ +X( 10, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESUME_WOUT_PRENDER, "Resume TA without partial render", 0) \ +X( 11, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OOM, "Out of memory! Context 0x%08x, HWRTData 0x%x", 2) \ +X( 12, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA_DEPRECATED, "Kick TLA: FWCtx 0x%08.8x @ %d, prio:%d", 3) \ +X( 13, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TLA_FINISHED, "TLA finished", 0) \ +X( 14, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CCCB_WOFF_UPDATE, "cCCB Woff update = %d, DM = %d, FWCtx = 0x%08.8x", 3) \ +X( 16, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_START, "UFO Checks for FWCtx 0x%08.8x @ %d", 2) \ +X( 17, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK, "UFO Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ +X( 18, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_SUCCEEDED, "UFO Checks succeeded", 0) \ +X( 19, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_PR_CHECK, "UFO PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \ +X( 20, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_START, "UFO SPM PR-Checks for FWCtx 0x%08.8x", 1) \ +X( 21, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_DEPRECATED, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= ????????, [0x%08.8x] is ???????? requires 0x%08.8x", 4) \ +X( 22, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE_START, "UFO Updates for FWCtx 0x%08.8x @ %d", 2) \ +X( 23, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \ +X( 24, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ASSERT_FAILED, "ASSERT Failed: line %d of:", 1) \ +X( 25, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_LOCKUP_DEPRECATED, "HWR: Lockup detected on DM%d, FWCtx: 0x%08.8x", 2) \ +X( 26, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_FW_DEPRECATED, "HWR: Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \ +X( 27, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_HW_DEPRECATED, "HWR: Reset HW", 0) \ +X( 28, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_TERMINATED_DEPRECATED, "HWR: Lockup recovered.", 0) \ +X( 29, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_FALSE_LOCKUP_DEPRECATED, "HWR: False lockup detected for DM%u", 1) \ +X( 30, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ALIGN_FAILED, "Alignment check %d failed: host = 0x%x, fw = 0x%x", 3) \ +X( 31, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GP_USC_TRIGGERED, "GP USC triggered", 0) \ +X( 32, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_OVERALLOC_REGS, "Overallocating %u temporary registers and %u shared registers for breakpoint handler", 2) \ +X( 33, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED, "Setting breakpoint: Addr 0x%08.8x", 1) \ +X( 34, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_STORE, "Store breakpoint state", 0) \ +X( 35, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_UNSET, "Unsetting BP Registers", 0) \ +X( 36, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NONZERO_RT, "Active RTs expected to be zero, actually %u", 1) \ +X( 37, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_PRESENT, "RTC present, %u active render targets", 1) \ +X( 38, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_EST_POWER_DEPRECATED, "Estimated Power 0x%x", 1) \ +X( 39, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_TARGET, "RTA render target %u", 1) \ +X( 40, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_KICK_RENDER, "Kick RTA render %u of %u", 2) \ +X( 41, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SIZES_CHECK_DEPRECATED, "HWR sizes check %d failed: addresses = %d, sizes = %d", 3) \ +X( 42, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_ENABLE_DEPRECATED, "Pow: DUSTS_ENABLE = 0x%x", 1) \ +X( 43, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_HWREQ_DEPRECATED, "Pow: On(1)/Off(0): %d, Units: 0x%08.8x", 2) \ +X( 44, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_DEPRECATED, "Pow: Changing number of dusts from %d to %d", 2) \ +X( 45, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_SIDEKICK_IDLE_DEPRECATED, "Pow: Sidekick ready to be powered down", 0) \ +X( 46, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_REQ_DEPRECATED, "Pow: Request to change num of dusts to %d (bPowRascalDust=%d)", 2) \ +X( 47, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_STORE, "No ZS Buffer used for partial render (store)", 0) \ +X( 48, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_LOAD, "No Depth/Stencil Buffer used for partial render (load)", 0) \ +X( 49, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SET_LOCKUP_DEPRECATED, "HWR: Lock-up DM%d FWCtx: 0x%08.8x", 2) \ +X( 50, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE_DEPRECATED, "MLIST%d checker: CatBase TE=0x%08x (%d Pages), VCE=0x%08x (%d Pages), ALIST=0x%08x, IsTA=%d", 7) \ +X( 51, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_MLIST_VALUE, "MLIST%d checker: MList[%d] = 0x%08x", 3) \ +X( 52, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_OK, "MLIST%d OK", 1) \ +X( 53, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_EMPTY, "MLIST%d is empty", 1) \ +X( 54, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE, "MLIST%d checker: CatBase TE=0x%08x%08x, VCE=0x%08x%08x, ALIST=0x%08x%08x, IsTA=%d", 8) \ +X( 55, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_40480KICK, "3D OQ flush kick", 0) \ +X( 56, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWP_UNSUPPORTED_BLOCK, "HWPerf block ID (0x%x) unsupported by device", 1) \ +X( 57, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED2, "Setting breakpoint: Addr 0x%08.8x DM%u", 2) \ +X( 58, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED, "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d", 3) \ +X( 59, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_FINISHED_DEPRECATED, "RDM finished on context %u", 1) \ +X( 60, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED, "Kick SHG: FWCtx 0x%08.8x @ %d, prio: %d", 3) \ +X( 61, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SHG_FINISHED_DEPRECATED, "SHG finished", 0) \ +X( 62, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBA_FINISHED_DEPRECATED, "FBA finished on context %u", 1) \ +X( 63, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_FAILED, "UFO Checks failed", 0) \ +X( 64, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_START, "Kill DM%d start", 1) \ +X( 65, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_COMPLETE, "Kill DM%d complete", 1) \ +X( 66, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FC_CCB_UPDATE_DEPRECATED, "FC%u cCCB Woff update = %u", 2) \ +X( 67, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED2, "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d", 4) \ +X( 68, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_INIT, "GPU init", 0) \ +X( 69, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNITS_INIT, "GPU Units init (# mask: 0x%x)", 1) \ +X( 70, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGTIMES, "Register access cycles: read: %d cycles, write: %d cycles, iterations: %d", 3) \ +X( 71, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_ADD, "Register configuration added. Address: 0x%x Value: 0x%x%x", 3) \ +X( 72, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_SET, "Register configuration applied to type %d. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)", 1) \ +X( 73, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TPC_FLUSH, "Perform TPC flush.", 0) \ +X( 74, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP_DEPRECATED, "GPU has locked up (see HWR logs for more info)", 0) \ +X( 75, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_OUTOFTIME, "HWR has been triggered - GPU has overrun its deadline (see HWR logs)", 0) \ +X( 76, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_POLLFAILURE, "HWR has been triggered - GPU has failed a poll (see HWR logs)", 0) \ +X( 77, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DOPPLER_OOM_DEPRECATED, "Doppler out of memory event for FC %u", 1) \ +X( 78, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK1, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \ +X( 79, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK2, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ +X( 80, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIMESTAMP, "TIMESTAMP -> [0x%08.8x]", 1) \ +X( 81, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE_START, "UFO RMW Updates for FWCtx 0x%08.8x @ %d", 2) \ +X( 82, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \ +X( 83, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULLCMD, "Kick Null cmd: FWCtx 0x%08.8x @ %d", 2) \ +X( 84, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RPM_OOM_DEPRECATED, "RPM Out of memory! Context 0x%08x, SH requestor %d", 2) \ +X( 85, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_ABORT_DISCARD_DEPRECATED, "Discard RTU due to RPM abort: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d", 4) \ +X( 86, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED, "Deferring DM%u from running context 0x%08x @ %d (deferred DMs = 0x%08x)", 4) \ +X( 87, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_WAITING_TURN_DEPRECATED, "Deferring DM%u from running context 0x%08x @ %d to let other deferred DMs run (deferred DMs = 0x%08x)", 4) \ +X( 88, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_NO_LONGER, "No longer deferring DM%u from running context = 0x%08x @ %d (deferred DMs = 0x%08x)", 4) \ +X( 89, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB_DEPRECATED, "FWCCB for DM%u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \ +X( 90, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB, "FWCCB for OSid %u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \ +X( 91, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART, "Host Sync Partition marker: %d", 1) \ +X( 92, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART_RPT, "Host Sync Partition repeat: %d", 1) \ +X( 93, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CLOCK_SPEED_CHANGE, "Core clock set to %d Hz", 1) \ +X( 94, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_OFFSETS, "Compute Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \ +X( 95, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DEPRECATED, "Signal check failed, Required Data: 0x%x, Address: 0x%08x%08x", 3) \ +X( 96, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_DEPRECATED, "Signal update, Snoop Filter: %u, MMU Ctx: %u, Signal Id: %u, Signals Base: 0x%08x%08x", 5) \ +X( 97, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNALED, "Signalled the previously waiting FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x", 4) \ +X( 98, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED_DEPRECATED, "Compute stalled", 0) \ +X( 99, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED, "Compute stalled (Roff = %u, Woff = %u, Size = %u)", 3) \ +X(100, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED_FROM_STALL, "Compute resumed (Roff = %u, Woff = %u, Size = %u)", 3) \ +X(101, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_SIGNAL_UPDATE, "Signal update notification from the host, PC Physical Address: 0x%08x%08x, Signal Virtual Address: 0x%08x%08x", 4) \ +X(102, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_OSID_DM_DEPRECATED, "Signal update from DM: %u, OSId: %u, PC Physical Address: 0x%08x%08x", 4) \ +X(103, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DM_DEPRECATED, "DM: %u signal check failed", 1) \ +X(104, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED, "Kick TDM: FWCtx 0x%08.8x @ %d, prio:%d", 3) \ +X(105, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED, "TDM finished", 0) \ +X(106, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TE_PIPE_STATUS_DEPRECATED, "MMU_PM_CAT_BASE_TE[%d]_PIPE[%d]: 0x%08x 0x%08x)", 4) \ +X(107, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_HIT_DEPRECATED, "BRN 54141 HIT", 0) \ +X(108, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_APPLYING_DUMMY_TA_DEPRECATED, "BRN 54141 Dummy TA kicked", 0) \ +X(109, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_RESUME_TA_DEPRECATED, "BRN 54141 resume TA", 0) \ +X(110, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DOUBLE_HIT_DEPRECATED, "BRN 54141 double hit after applying WA", 0) \ +X(111, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DUMMY_TA_VDM_BASE_DEPRECATED, "BRN 54141 Dummy TA VDM base address: 0x%08x%08x", 2) \ +X(112, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_WITH_CURRENT, "Signal check failed, Required Data: 0x%x, Current Data: 0x%x, Address: 0x%08x%08x", 4) \ +X(113, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL_DEPRECATED, "TDM stalled (Roff = %u, Woff = %u)", 2) \ +X(114, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_WRITE_OFFSET_UPDATE, "Write Offset update notification for stalled FWCtx 0x%08.8x", 1) \ +X(115, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE_DEPRECATED, "Changing OSid %d's priority from %u to %u", 3) \ +X(116, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED, "Compute resumed", 0) \ +X(117, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA, "Kick TLA: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \ +X(118, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM, "Kick TDM: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \ +X(119, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA, "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 11) \ +X(120, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D, "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 10) \ +X(121, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3DTQ, "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \ +X(122, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE, "Kick Compute: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)", 6) \ +X(123, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED3, "Kick RTU: FWCtx 0x%08.8x @ %d, Frame Context:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \ +X(124, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED2, "Kick SHG: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \ +X(125, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CSRM_RECONFIG, "Reconfigure CSRM: special coeff support enable %d.", 1) \ +X(127, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_REQ_MAX_COEFFS, "TA requires max coeff mode, deferring: %d.", 1) \ +X(128, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_REQ_MAX_COEFFS, "3D requires max coeff mode, deferring: %d.", 1) \ +X(129, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_FAILED, "Kill DM%d failed", 1) \ +X(130, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE, "Thread Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \ +X(131, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE_FENCE, "Thread Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)", 3) \ +X(132, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_HCS_TRIGGERED, "DM %d failed to Context Switch on time. Triggered HCS (see HWR logs).", 1) \ +X(133, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HCS_SET_DEPRECATED, "HCS changed to %d ms", 1) \ +X(134, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT_DEPRECATED, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)", 4) \ +X(135, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_TILES_IN_FLIGHT, " Phantom %d: USCTiles=%d", 2) \ +X(136, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_OFF_DEPRECATED, "Isolation grouping is disabled", 0) \ +X(137, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_DEPRECATED, "Isolation group configured with a priority threshold of %d", 1) \ +X(138, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_ONLINE_DEPRECATED, "OS %d has come online", 1) \ +X(139, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_OFFLINE_DEPRECATED, "OS %d has gone offline", 1) \ +X(140, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNAL_REKICK, "Signalled the previously stalled FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x", 4) \ +X(141, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS_DEPRECATED, "TDM Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \ +X(142, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSET_READ_RESET, "Reset TDM Queue Read Offset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes 0, Woff = %u, Size = %u)", 6) \ +X(143, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_READ_OFFSET, "User Mode Queue mismatched stream start: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)", 5) \ +X(144, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_DEINIT, "GPU deinit", 0) \ +X(145, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNITS_DEINIT, "GPU units deinit", 0) \ +X(146, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG, "Initialised OS %d with config flags 0x%08x", 2) \ +X(147, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_LIMIT, "UFO limit exceeded %d/%d", 2) \ +X(148, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_62850KICK, "3D Dummy stencil store", 0) \ +X(149, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG_DEPRECATED, "Initialised OS %d with config flags 0x%08x and extended config flags 0x%08x", 3) \ +X(150, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_COMMAND_DEPRECATED, "Unknown Command (eCmdType=0x%08x)", 1) \ +X(151, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE, "UFO forced update: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x", 4) \ +X(152, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE_NOP, "UFO forced update NOP: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x, reason %d", 5) \ +X(153, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66075_CHECK, "TDM context switch check: Roff %u points to 0x%08x, Match=%u", 3) \ +X(154, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x", 6) \ +X(155, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWIRQ, "FW IRQ # %u @ %u", 2) \ +X(156, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET, "Setting breakpoint: Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 3) \ +X(157, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_DEPRECATED, "Invalid KCCB setup for OSid %u: KCCB 0x%08x, KCCB Ctrl 0x%08x", 3) \ +X(158, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_CMD, "Invalid KCCB cmd (%u) for OSid %u @ KCCB 0x%08x", 3) \ +X(159, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_FAULT, "FW FAULT: At line %d in file 0x%08x%08x, additional data=0x%08x", 4) \ +X(160, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_INVALID, "Invalid breakpoint: MemCtx 0x%08x Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 4) \ +X(161, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID_DEPRECATED, "Discarding invalid SLC flushinval command for OSid %u: DM %u, FWCtx 0x%08x", 3) \ +X(162, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE_DEPRECATED, "Invalid Write Offset update notification from OSid %u to DM %u: FWCtx 0x%08x, MemCtx 0x%08x", 4) \ +X(163, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD_DEPRECATED, "Null FWCtx in KCCB kick cmd for OSid %u: KCCB 0x%08x, ROff %u, WOff %u", 4) \ +X(164, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FULL_CHPTCCB, "Checkpoint CCB for OSid %u is full, signalling host for full check state (Roff = %u, Woff = %u)", 3) \ +X(165, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS_DEPRECATED, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x chptCCBCtl@0x%x chptCCB@0x%x", 8) \ +X(166, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_STATE_CHANGE, "OSid %d fw state transition request: from %d to %d (0-offline 1-ready 2-active 3-offloading). Status %d (1-ok 0-fail)", 4) \ +X(167, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_STALE_KCCB_CMDS, "OSid %u has %u stale commands in its KCCB", 2) \ +X(168, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_VCE_PAUSE, "Applying VCE pause", 0) \ +X(169, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KCCB_UPDATE_RTN_SLOT_DEPRECATED, "OSid %u KCCB slot %u value updated to %u", 3) \ +X(170, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_KCCB_COMMAND, "Unknown KCCB Command: KCCBCtl=0x%08x, KCCB=0x%08x, Roff=%u, Woff=%u, Wrap=%u, Cmd=0x%08x, CmdType=0x%08x", 7) \ +X(171, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND1, "Unknown Client CCB Command processing fences: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \ +X(172, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND2, "Unknown Client CCB Command executing kick: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \ +X(173, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD, "Null FWCtx in KCCB kick cmd for OSid %u with WOff %u", 2) \ +X(174, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID, "Discarding invalid SLC flushinval command for OSid %u, FWCtx 0x%08x", 2) \ +X(175, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE, "Invalid Write Offset update notification from OSid %u: FWCtx 0x%08x, MemCtx 0x%08x", 3) \ +X(176, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_INIT_CONFIG, "Initialised Firmware with config flags 0x%08x and extended config flags 0x%08x", 2) \ +X(177, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_CONFIG, "Set Periodic Hardware Reset Mode: %d", 1) \ +X(179, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_TRIG, "PHR mode %d, FW state: 0x%08x, HWR flags: 0x%08x", 3) \ +X(180, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_RESET_DEPRECATED, "PHR mode %d triggered a reset", 1) \ +X(181, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE, "Signal update, Snoop Filter: %u, Signal Id: %u", 2) \ +X(182, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FIXME_SERIES8, "WARNING: Skipping FW KCCB Cmd type %d which is not yet supported on Series8.", 1) \ +X(183, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INCONSISTENT_MMU_FLAGS, "MMU context cache data NULL, but cache flags=0x%x (sync counter=%u, update value=%u) OSId=%u", 4) \ +X(184, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SLC_FLUSH, "SLC range based flush: Context=%u VAddr=0x%02x%08x, Size=0x%08x, Invalidate=%d", 5) \ +X(185, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBSC_INVAL, "FBSC invalidate for Context [0x%08x]: Entry mask 0x%08x%08x.", 3) \ +X(186, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66284_UPDATE, "TDM context switch check: Roff %u was not valid for kick starting at %u, moving back to %u", 3) \ +X(187, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SPFILTER_UPDATES, "Signal updates: FIFO: %u, Signals: 0x%08x", 2) \ +X(188, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_FBSC_CMD, "Invalid FBSC cmd: FWCtx 0x%08x, MemCtx 0x%08x", 2) \ +X(189, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN68497_BLIT, "Insert BRN68497 WA blit after TDM Context store.", 0) \ +X(190, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PENDING_UFO_UPDATE_START, "UFO Updates for previously finished FWCtx 0x%08.8x", 1) \ +X(191, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_RTA_PRESENT, "RTC with RTA present, %u active render targets", 1) \ +X(192, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULL_RTAS, "Invalid RTA Set-up. The ValidRenderTargets array in RTACtl is Null!", 0) \ +X(193, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_COUNTER, "Block 0x%x / Counter 0x%x INVALID and ignored", 2) \ +X(194, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ECC_FAULT_DEPRECATED, "ECC fault GPU=0x%08x FW=0x%08x", 2) \ +X(195, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PROCESS_XPU_EVENT, "Processing XPU event on DM = %d", 1) \ +X(196, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_VZ_WDG_TRIGGER, "OSid %u failed to respond to the virtualisation watchdog in time. Timestamp of its last input = %u", 2) \ +X(197, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP, "GPU-%u has locked up (see HWR logs for more info)", 1) \ +X(198, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x)", 3) \ +X(199, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP_DM, "GPU has locked up (see HWR logs for more info)", 0) \ +X(200, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REPROCESS_XPU_EVENTS, "Reprocessing outstanding XPU events from cores 0x%02x", 1) \ +X(201, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SECONDARY_XPU_EVENT, "Secondary XPU event on DM=%d, CoreMask=0x%02x, Raised=0x%02x", 3) \ +X(202, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS, "TDM Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 8) \ +X(203, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL, "TDM stalled Core %u (Roff = %u, Woff = %u)", 3) \ +X(204, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_OFFSETS, "Compute Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 8) \ +X(205, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_STALLED, "Compute stalled core %u (Roff = %u, Woff = %u, Size = %u)", 4) \ +X(206, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_CORE_READ_OFFSET, "User Mode Queue mismatched stream start: Core %u, FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)", 6) \ +X(207, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_RESUMED_FROM_STALL, "TDM resumed core %u (Roff = %u, Woff = %u)", 3) \ +X(208, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_RESUMED_FROM_STALL, "Compute resumed core %u (Roff = %u, Woff = %u, Size = %u)", 4) \ +X(209, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_MTS_PERMISSION_CHANGED, " Updated permission for OSid %u to perform MTS kicks: %u (1 = allowed, 0 = not allowed)", 2) \ +X(210, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TEST1, "Mask = 0x%X, mask2 = 0x%X", 2) \ +X(211, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TEST2, " core %u, reg = %u, mask = 0x%X)", 3) \ +X(212, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ECC_FAULT_SAFETY_BUS, "ECC fault received from safety bus: 0x%08x", 1) \ +X(213, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SAFETY_WDG_CONFIG, "Safety Watchdog threshold period set to 0x%x clock cycles", 1) \ +X(214, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SAFETY_WDG_TRIGGER, "MTS Safety Event trigged by the safety watchdog.", 0) \ +X(215, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_USC_TASKS_RANGE, "DM%d USC tasks range limit 0 - %d, stride %d", 3) \ +X(216, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_ECC_FAULT, "ECC fault GPU=0x%08x", 1) \ +X(217, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_SAFETY_RESET, "GPU Hardware units reset to prevent transient faults.", 0) \ +X(218, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ABORTCMD, "Kick Abort cmd: FWCtx 0x%08.8x @ %d", 2) \ +X(219, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY, "Kick Ray: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7)\ +X(220, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RAY_FINISHED, "Ray finished", 0) \ +X(221, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWDATA_INIT_STATUS, "State of firmware's private data at boot time: %d (0 = uninitialised, 1 = initialised); Fw State Flags = 0x%08X", 2) \ +X(222, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT, "CFI Timeout detected (%d increasing to %d)", 2) \ +X(223, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT_FBM, "CFI Timeout detected for FBM (%d increasing to %d)", 2) \ +X(224, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GEOM_OOM_DISALLOWED, "Geom OOM event not allowed", 0) \ +X(225, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE, "Changing OSid %d's priority from %u to %u; Isolation = %u (0 = off; 1 = on)", 4) \ +\ +X( 1, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED, "Bg Task DM = %u, counted = %d", 2) \ +X( 2, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE_DEPRECATED, "Bg Task complete DM = %u", 1) \ +X( 3, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK, "Irq Task DM = %u, Breq = %d, SBIrq = 0x%x", 3) \ +X( 4, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE_DEPRECATED, "Irq Task complete DM = %u", 1) \ +X( 5, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_BG_ALL_DEPRECATED, "Kick MTS Bg task DM=All", 0) \ +X( 6, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_IRQ, "Kick MTS Irq task DM=%d", 1) \ +X( 7, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED, "Ready queue debug DM = %u, celltype = %d", 2) \ +X( 8, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN_DEPRECATED, "Ready-to-run debug DM = %u, item = 0x%x", 2) \ +X( 9, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMDHEADER, "Client command header DM = %u, client CCB = 0x%x, cmd = 0x%x", 3) \ +X( 10, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN, "Ready-to-run debug OSid = %u, DM = %u, item = 0x%x", 3) \ +X( 11, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED2, "Ready queue debug DM = %u, celltype = %d, OSid = %u", 3) \ +X( 12, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED2, "Bg Task DM = %u, counted = %d, OSid = %u", 3) \ +X( 13, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE, "Bg Task complete DM Bitfield: %u", 1) \ +X( 14, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE, "Irq Task complete.", 0) \ +X( 15, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMD_DISCARD, "Discarded Command Type: %d OS ID = %d PID = %d context = 0x%08x cccb ROff = 0x%x, due to USC breakpoint hit by OS ID = %d PID = %d.", 7) \ +X( 16, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC_DEPRECATED, "KCCB Slot %u: DM=%u, Cmd=0x%08x, OSid=%u", 4) \ +X( 17, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_RTN_VALUE, "KCCB Slot %u: Return value %u", 2) \ +X( 18, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK, "Bg Task OSid = %u", 1) \ +X( 19, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC, "KCCB Slot %u: Cmd=0x%08x, OSid=%u", 3) \ +\ +X( 1, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_CLEANUP, "FwCommonContext [0x%08x] cleaned", 1) \ +X( 2, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_BUSY, "FwCommonContext [0x%08x] is busy: ReadOffset = %d, WriteOffset = %d", 3) \ +X( 3, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP_DEPRECATED, "HWRTData [0x%08x] for DM=%d, received cleanup request", 2) \ +X( 4, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_FOR_DM_DEPRECATED, "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %d", 3) \ +X( 5, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED, "HWRTData [0x%08x] HW Context for DM%u is busy", 2) \ +X( 6, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_DEPRECATED, "HWRTData [0x%08x] HW Context %u cleaned", 2) \ +X( 7, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FL_CLEANED, "Freelist [0x%08x] cleaned", 1) \ +X( 8, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_CLEANED, "ZSBuffer [0x%08x] cleaned", 1) \ +X( 9, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_BUSY, "ZSBuffer [0x%08x] is busy: submitted = %d, executed = %d", 3) \ +X( 10, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED2, "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %d, executed = %d", 4) \ +X( 11, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANUP_DEPRECATED, "HW Ray Frame data [0x%08x] for DM=%d, received cleanup request", 2) \ +X( 12, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_FOR_DM_DEPRECATED, "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %d", 3) \ +X( 13, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_BUSY_DEPRECATED, "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %d, executed = %d", 4) \ +X( 14, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_DEPRECATED, "HW Ray Frame Data [0x%08x] HW Context %u cleaned", 2) \ +X( 15, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_INVALID_REQUEST, "Discarding invalid cleanup request of type 0x%x", 1) \ +X( 16, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP, "Received cleanup request for HWRTData [0x%08x]", 1) \ +X( 17, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY, "HWRTData [0x%08x] HW Context is busy: submitted = %d, executed = %d", 3) \ +X( 18, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED, "HWRTData [0x%08x] HW Context %u cleaned, executed commands = %d", 3) \ +\ +X( 1, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_NEEDS_RESUME, "CDM FWCtx 0x%08.8x needs resume", 1) \ +X( 2, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_DEPRECATED, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \ +X( 3, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SHARED, "CDM FWCtx shared alloc size load 0x%x", 1) \ +X( 4, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_COMPLETE, "*** CDM FWCtx store complete", 0) \ +X( 5, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_START, "*** CDM FWCtx store start", 0) \ +X( 6, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SOFT_RESET, "CDM Soft Reset", 0) \ +X( 7, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_NEEDS_RESUME, "3D FWCtx 0x%08.8x needs resume", 1) \ +X( 8, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME, "*** 3D FWCtx 0x%08.8x resume", 1) \ +X( 9, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_COMPLETE, "*** 3D context store complete", 0) \ +X( 10, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED, "3D context store pipe state: 0x%08.8x 0x%08.8x 0x%08.8x", 3) \ +X( 11, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START, "*** 3D context store start", 0) \ +X( 12, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_TQ_RESUME, "*** 3D TQ FWCtx 0x%08.8x resume", 1) \ +X( 13, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_NEEDS_RESUME, "TA FWCtx 0x%08.8x needs resume", 1) \ +X( 14, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_RESUME, "*** TA FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \ +X( 15, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_SHARED, "TA context shared alloc size store 0x%x, load 0x%x", 2) \ +X( 16, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_COMPLETE, "*** TA context store complete", 0) \ +X( 17, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_START, "*** TA context store start", 0) \ +X( 18, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED, "Higher priority context scheduled for DM %u, old prio:%d, new prio:%d", 3) \ +X( 19, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SET_CONTEXT_PRIORITY, "Set FWCtx 0x%x priority to %u", 2) \ +X( 20, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED2, "3D context store pipe%d state: 0x%08.8x", 2) \ +X( 21, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_DEPRECATED, "3D context resume pipe%d state: 0x%08.8x", 2) \ +X( 22, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_NEEDS_RESUME_DEPRECATED, "SHG FWCtx 0x%08.8x needs resume", 1) \ +X( 23, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_RESUME_DEPRECATED, "*** SHG FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \ +X( 24, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_SHARED_DEPRECATED, "SHG context shared alloc size store 0x%x, load 0x%x", 2) \ +X( 25, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_COMPLETE_DEPRECATED, "*** SHG context store complete", 0) \ +X( 26, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_START_DEPRECATED, "*** SHG context store start", 0) \ +X( 27, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_PIPE_INDIRECT, "Performing TA indirection, last used pipe %d", 1) \ +X( 28, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_CTRL_STREAM_TERMINATE, "CDM context store hit ctrl stream terminate. Skip resume.", 0) \ +X( 29, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_AB_BUFFER, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x, shader state %u", 4) \ +X( 30, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STATE_BUFFER_FLIP, "TA PDS/USC state buffer flip (%d->%d)", 2) \ +X( 31, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_52563_HIT_DEPRECATED, "TA context store hit BRN 52563: vertex store tasks outstanding", 0) \ +X( 32, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_USC_POLL_FAILED, "TA USC poll failed (USC vertex task count: %d)", 1) \ +X( 33, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_DEFERRED_DEPRECATED, "TA context store deferred due to BRN 54141.", 0) \ +X( 34, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED2, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u", 7) \ +X( 35, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_START, "*** TDM context store start", 0) \ +X( 36, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_COMPLETE, "*** TDM context store complete", 0) \ +X( 37, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_NEEDS_RESUME_DEPRECATED, "TDM context needs resume, header [0x%08.8x, 0x%08.8x]", 2) \ +X( 38, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u. Hard Context Switching: %u", 8) \ +X( 39, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE, "3D context store pipe %2d (%2d) state: 0x%08.8x", 3) \ +X( 40, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE, "3D context resume pipe %2d (%2d) state: 0x%08.8x", 3) \ +X( 41, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START_VOLCANIC, "*** 3D context store start version %d (1=IPP_TILE, 2=ISP_TILE)", 1) \ +X( 42, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_VOLCANIC, "3D context store pipe%d state: 0x%08.8x%08x", 3) \ +X( 43, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_VOLCANIC, "3D context resume pipe%d state: 0x%08.8x%08x", 3) \ +X( 44, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_IPP_STATE, "3D context resume IPP state: 0x%08.8x%08x", 2) \ +X( 45, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_PIPES_EMPTY, "All 3D pipes empty after ISP tile mode store! IPP_status: 0x%08x", 1) \ +X( 46, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE_DEPRECATED, "TDM context resume pipe%d state: 0x%08.8x%08x", 3) \ +X( 47, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_LEVEL4_STORE_START, "*** 3D context store start version 4", 0) \ +X( 48, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RESUME_MULTICORE, "Multicore context resume on DM%d active core mask 0x%04.4x", 2) \ +X( 49, RGXFW_GROUP_CSW, RGXFW_SF_CSW_STORE_MULTICORE, "Multicore context store on DM%d active core mask 0x%04.4x", 2) \ +X( 50, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE, "TDM context resume Core %d, pipe%d state: 0x%08.8x%08x%08x", 5) \ +X( 51, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_STORE_COMPLETE, "*** RDM FWCtx store complete", 0) \ +X( 52, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_STORE_START, "*** RDM FWCtx store start", 0) \ +X( 53, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_NEEDS_RESUME, "RDM FWCtx 0x%08.8x needs resume", 1) \ +X( 54, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_RESUME, "RDM FWCtx 0x%08.8x resume", 1) \ +\ +X( 1, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_BIFREQ_DEPRECATED, "Activate MemCtx=0x%08x BIFreq=%d secure=%d", 3) \ +X( 2, RGXFW_GROUP_BIF, RGXFW_SF_BIF_DEACTIVATE, "Deactivate MemCtx=0x%08x", 1) \ +X( 3, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_ALLOC, "Alloc PC reg %d", 1) \ +X( 4, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_GRAB, "Grab reg %d refcount now %d", 2) \ +X( 5, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_UNGRAB, "Ungrab reg %d refcount now %d", 2) \ +X( 6, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_BIFREQ_DEPRECATED, "Setup reg=%d BIFreq=%d, expect=0x%08x%08x, actual=0x%08x%08x", 6) \ +X( 7, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DEPRECATED, "Trust enabled:%d, for BIFreq=%d", 2) \ +X( 8, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TILECFG_DEPRECATED, "BIF Tiling Cfg %d base 0x%08x%08x len 0x%08x%08x enable %d stride %d --> 0x%08x%08x", 9) \ +X( 9, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID0, "Wrote the Value %d to OSID0, Cat Base %d, Register's contents are now 0x%08x 0x%08x", 4) \ +X( 10, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID1, "Wrote the Value %d to OSID1, Context %d, Register's contents are now 0x%04x", 3) \ +X( 11, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSIDx, "ui32OSid = %u, Catbase = %u, Reg Address = 0x%x, Reg index = %u, Bitshift index = %u, Val = 0x%08x%08x", 7) \ +X( 12, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY_BIFREQ_DEPRECATED, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, BIFREQ %u", 5) \ +X( 13, RGXFW_GROUP_BIF, RGXFW_SF_BIF_UNMAP_GPU_MEMORY, "Unmap GPU memory (event status 0x%x)", 1) \ +X( 14, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_DM, "Activate MemCtx=0x%08x DM=%d secure=%d", 3) \ +X( 15, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM, "Setup reg=%d DM=%d, expect=0x%08x%08x, actual=0x%08x%08x", 6) \ +X( 16, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u", 4) \ +X( 17, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DM, "Trust enabled:%d, for DM=%d", 2) \ +X( 18, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY_DM, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, DM %u", 5) \ +\ +X( 1, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_WRITE, "GPIO write 0x%02x", 1) \ +X( 2, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_READ, "GPIO read 0x%02x", 1) \ +X( 3, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ENABLED, "GPIO enabled", 0) \ +X( 4, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_DISABLED, "GPIO disabled", 0) \ +X( 5, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_STATUS, "GPIO status=%d (0=OK, 1=Disabled)", 1) \ +X( 6, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_READ, "GPIO_AP: Read address=0x%02x (%d byte(s))", 2) \ +X( 7, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_WRITE, "GPIO_AP: Write address=0x%02x (%d byte(s))", 2) \ +X( 8, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_TIMEOUT, "GPIO_AP timeout!", 0) \ +X( 9, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_ERROR, "GPIO_AP error. GPIO status=%d (0=OK, 1=Disabled)", 1) \ +X( 10, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ALREADY_READ, "GPIO already read 0x%02x", 1) \ +X( 11, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_CHECK_BUFFER_AVAILABLE, "SR: Check buffer %d available returned %d", 2) \ +X( 12, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAITING_BUFFER_AVAILABLE, "SR: Waiting for buffer %d", 1) \ +X( 13, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAIT_BUFFER_TIMEOUT, "SR: Timeout waiting for buffer %d (after %d ticks)", 2) \ +X( 14, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_CHECK, "SR: Skip frame check for strip %d returned %d (0=No skip, 1=Skip frame)", 2) \ +X( 15, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_REMAINING_STRIPS, "SR: Skip remaining strip %d in frame", 1) \ +X( 16, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_FRAME_SKIP_NEW_FRAME, "SR: Inform HW that strip %d is a new frame", 1) \ +X( 17, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_TIMEOUT, "SR: Timeout waiting for INTERRUPT_FRAME_SKIP (after %d ticks)", 1) \ +X( 18, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_MODE, "SR: Strip mode is %d", 1) \ +X( 19, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_INDEX, "SR: Strip Render start (strip %d)", 1) \ +X( 20, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_RENDERED, "SR: Strip Render complete (buffer %d)", 1) \ +X( 21, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_FAULT, "SR: Strip Render fault (buffer %d)", 1) \ +X( 22, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE, "TRP state: %d", 1) \ +X( 23, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_FAILURE, "TRP failure: %d", 1) \ +X( 24, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_STATE, "SW TRP State: %d", 1) \ +X( 25, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_FAILURE, "SW TRP failure: %d", 1) \ +X( 26, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HW_KICK, "HW kick event (%u)", 1) \ +X( 27, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WGP_CHECKSUMS, "GPU core (%u/%u): checksum 0x%08x vs. 0x%08x", 4) \ +X( 28, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WGP_UNIT_CHECKSUMS, "GPU core (%u/%u), unit (%u,%u): checksum 0x%08x vs. 0x%08x", 6) \ +X( 29, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_CHECK_REG, "HWR: Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \ +X( 30, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_USC_SLOTS_CHECK, "HWR: USC Core%u, ui32TotalSlotsUsedByDM=0x%08x, psDMHWCtl->ui32USCSlotsUsedByDM=0x%08x, bHWRNeeded=%u", 4) \ +X( 31, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_USC_REG_CHECK, "HWR: USC Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \ +\ +X( 1, RGXFW_GROUP_PM, RGXFW_SF_PM_AMLIST, "ALIST%d SP = %u, MLIST%d SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)", 10) \ +X( 2, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_DEPRECATED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d, mmu:%d", 8) \ +X( 3, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE_DEPRECATED, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-3D-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \ +X( 4, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE_DEPRECATED, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-TA-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \ +X( 5, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_COMPLETE_DEPRECATED, "Freelist grow completed [0x%08x]: added pages 0x%08x, total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \ +X( 6, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_DENIED_DEPRECATED, "Grow for freelist ID=0x%08x denied by host", 1) \ +X( 7, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE, "Freelist update completed [0x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \ +X( 8, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_RECONSTRUCTION_FAILED_DEPRECATED, "Reconstruction of freelist ID=0x%08x failed", 1) \ +X( 9, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_WARNING, "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %d, operation(0-unpause, 1-pause): %d", 2) \ +X( 10, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT_STATUS, "Force free 3D Context memory, FWCtx: 0x%08x, status(1:success, 0:fail): %d", 2)\ +X( 11, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_ALLOC, "PM pause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ +X( 12, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_ALLOC, "PM unpause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ +X( 13, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_DALLOC, "PM pause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ +X( 14, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_DALLOC, "PM unpause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ +X( 15, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_FAILED, "PM ALLOC/DALLOC change was not actioned: PM_PAGE_MANAGEOP_STATUS=0x%x", 1) \ +X( 16, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d", 7) \ +X( 17, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ +X( 18, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ +X( 19, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE_VOLCANIC, "Freelist update completed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 7) \ +X( 20, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_FAILED, "Freelist update failed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 7) \ +X( 21, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE_VOLCANIC, "UFL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ +X( 22, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE_VOLCANIC, "UFL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ +X( 23, RGXFW_GROUP_PM, RGXFW_SF_PM_CHECK_FL_BASEADDR, "Freelist 0x%08x base address from HW: 0x%02x%08x (expected value: 0x%02x%08x)", 5) \ +X( 24, RGXFW_GROUP_PM, RGXFW_SF_PM_ANALYSE_FL_GROW, "Analysis of FL grow: Pause=(%u,%u) Paused+Valid(%u,%u) PMStateBuffer=0x%x", 5) \ +X( 25, RGXFW_GROUP_PM, RGXFW_SF_PM_ATTEMPT_FL_GROW, "Attempt FL grow for FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \ +X( 26, RGXFW_GROUP_PM, RGXFW_SF_PM_DEFER_FL_GROW, "Deferring FL grow for non-loaded FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \ +X( 27, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_ALBIORIX, "Is GEOM: %d, finished: %d (HWRTData = 0x%08x, MemCtx = 0x%08x)", 4) \ +X( 28, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT, "3D Timeout Now for FWCtx 0x%08.8x", 1) \ +X( 29, RGXFW_GROUP_PM, RGXFW_SF_PM_RECYCLE, "GEOM PM Recycle for FWCtx 0x%08.8x", 1) \ +\ +X( 1, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_DYNAMIC_STATUS_DEPRECATED, "Global link list dynamic page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \ +X( 2, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_STATIC_STATUS_DEPRECATED, "Global link list static page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \ +X( 3, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_GROW_DEPRECATED, "RPM request failed. Waiting for freelist grow.", 0) \ +X( 4, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_ABORT_DEPRECATED, "RPM request failed. Aborting the current frame.", 0) \ +X( 5, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_PENDING_GROW_DEPRECATED, "RPM waiting for pending grow on freelist 0x%08x", 1) \ +X( 6, RGXFW_GROUP_RPM, RGXFW_SF_RPM_REQUEST_HOST_GROW_DEPRECATED, "Request freelist grow [0x%08x] current pages %d, grow size %d", 3) \ +X( 7, RGXFW_GROUP_RPM, RGXFW_SF_RPM_FREELIST_LOAD_DEPRECATED, "Freelist load: SHF = 0x%08x, SHG = 0x%08x", 2) \ +X( 8, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_DEPRECATED, "SHF FPL register: 0x%08x.0x%08x", 2) \ +X( 9, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_DEPRECATED, "SHG FPL register: 0x%08x.0x%08x", 2) \ +X( 10, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_FREELIST_DEPRECATED, "Kernel requested RPM grow on freelist (type %d) at 0x%08x from current size %d to new size %d, RPM restart: %d (1=Yes)", 5) \ +X( 11, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_RESTART_DEPRECATED, "Restarting SHG", 0) \ +X( 12, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_ABORTED_DEPRECATED, "Grow failed, aborting the current frame.", 0) \ +X( 13, RGXFW_GROUP_RPM, RGXFW_SF_RPM_ABORT_COMPLETE_DEPRECATED, "RPM abort complete on HWFrameData [0x%08x].", 1) \ +X( 14, RGXFW_GROUP_RPM, RGXFW_SF_RPM_CLEANUP_NEEDS_ABORT_DEPRECATED, "RPM freelist cleanup [0x%08x] requires abort to proceed.", 1) \ +X( 15, RGXFW_GROUP_RPM, RGXFW_SF_RPM_RPM_PT_DEPRECATED, "RPM page table base register: 0x%08x.0x%08x", 2) \ +X( 16, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_ABORT_DEPRECATED, "Issuing RPM abort.", 0) \ +X( 17, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_TOGGLE_CHECK_FULL_DEPRECATED, "RPM OOM received but toggle bits indicate free pages available", 0) \ +X( 18, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_HW_TIMEOUT_DEPRECATED, "RPM hardware timeout. Unable to process OOM event.", 0) \ +X( 19, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_LOAD_DEPRECATED_DEPRECATED, "SHF FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x", 5) \ +X( 20, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_LOAD_DEPRECATED, "SHG FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x", 5) \ +X( 21, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_STORE_DEPRECATED, "SHF FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x", 3) \ +X( 22, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_STORE_DEPRECATED, "SHG FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x", 3) \ +\ +X( 1, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_FINISHED, "3D RTData 0x%08x finished on HW context %u", 2) \ +X( 2, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_READY, "3D RTData 0x%08x ready on HW context %u", 2) \ +X( 3, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO_DEPRECATED, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d, mmu: %d", 4) \ +X( 4, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_3D_DEPRECATED, "Loading VFP table 0x%08x%08x for 3D", 2) \ +X( 5, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_TA_DEPRECATED, "Loading VFP table 0x%08x%08x for TA", 2) \ +X( 6, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED, "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \ +X( 7, RGXFW_GROUP_RTD, RGXFW_SF_RTD_VHEAP_STORE, "Perform VHEAP table store", 0) \ +X( 8, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_MATCH_FOUND, "RTData 0x%08x: found match in Context=%d: Load=No, Store=No", 2) \ +X( 9, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_NULL_FOUND, "RTData 0x%08x: found NULL in Context=%d: Load=Yes, Store=No", 2) \ +X( 10, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_3D_FINISHED, "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%d: Load=Yes, Store=Yes", 3) \ +X( 11, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_TA_FINISHED, "RTData 0x%08x: found state TA finished (0x%08x) in Context=%d: Load=Yes, Store=Yes", 3) \ +X( 12, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_STACK_POINTERS, "Loading stack-pointers for %d (0:MidTA,1:3D) on context %d, MLIST = 0x%08x, ALIST = 0x%08x%08x", 5) \ +X( 13, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED, "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \ +X( 14, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_FINISHED, "TA RTData 0x%08x finished on HW context %u", 2) \ +X( 15, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED, "TA RTData 0x%08x loaded on HW context %u", 2) \ +X( 16, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED2, "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ +X( 17, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED2, "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ +X( 18, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG_DEPRECATED, "Freelist 0x%x RESET!!!!!!!!", 1) \ +X( 19, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG2_DEPRECATED, "Freelist 0x%x stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 5) \ +X( 20, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_DEPRECATED, "Request reconstruction of Freelist 0x%x type: %d (0:local,1:global,2:mmu) on HW context %u", 3) \ +X( 21, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED, "Freelist reconstruction ACK from host (HWR state :%u)", 1) \ +X( 22, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED2, "Freelist reconstruction completed", 0) \ +X( 23, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED_DEPRECATED, "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%d", 3) \ +X( 24, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TE_RGNHDR_INFO, "TE Region headers base 0x%08x%08x (RGNHDR Init: %d)", 3) \ +X( 25, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS_DEPRECATED, "TA Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 8) \ +X( 26, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_LOADED_DEPRECATED, "3D RTData 0x%08x loaded on HW context %u", 2) \ +X( 27, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED, "3D Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x (MemCtx 0x%08x)", 4) \ +X( 28, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RESTART_AFTER_PR_EXECUTED, "Restarting TA after partial render, HWRTData0State=0x%x, HWRTData1State=0x%x", 2) \ +X( 29, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d", 3) \ +X( 30, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_FL, "Store Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ +X( 31, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL, "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ +X( 32, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED2, "3D Buffers: FWCtx 0x%08x, parent RT 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)", 5) \ +X( 33, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS, "TA Buffers: FWCtx 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 7) \ +X( 34, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS, "3D Buffers: FWCtx 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)", 4) \ +X( 35, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_V2, "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u)", 6) \ +X( 36, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILLED_TA, "TA RTData 0x%08x marked as killed.", 1) \ +X( 37, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILLED_3D, "3D RTData 0x%08x marked as killed.", 1) \ +X( 38, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILL_TA_AFTER_RESTART, "RTData 0x%08x will be killed after TA restart.", 1) \ +X( 39, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RENDERSTATE_RESET, "RTData 0x%08x Render State Buffer 0x%08x%08x will be reset.", 3) \ +\ +X( 1, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_DEPRECATED, "Force Z-Load for partial render", 0) \ +X( 2, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_DEPRECATED, "Force Z-Store for partial render", 0) \ +X( 3, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_LOCAL_DEPRECATED, "3D MemFree: Local FL 0x%08x", 1) \ +X( 4, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_MMU_DEPRECATED, "3D MemFree: MMU FL 0x%08x", 1) \ +X( 5, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_GLOBAL_DEPRECATED, "3D MemFree: Global FL 0x%08x", 1) \ +X( 6, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_DEPRECATED, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x, HardwareSync Fence [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 6) \ +X( 7, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_FL, "OOM TA_cmd=0x%08x, U-FL 0x%08x, N-FL 0x%08x", 3) \ +X( 8, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_MMU_FL_DEPRECATED, "OOM TA_cmd=0x%08x, OOM MMU:%d, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x", 5) \ +X( 9, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_AVOIDED_DEPRECATED, "Partial render avoided", 0) \ +X( 10, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_DISCARDED_DEPRECATED, "Partial render discarded", 0) \ +X( 11, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_FINISHED, "Partial Render finished", 0) \ +X( 12, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DBG_DEPRECATED, "SPM Owner = 3D-BG", 0) \ +X( 13, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DIRQ_DEPRECATED, "SPM Owner = 3D-IRQ", 0) \ +X( 14, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_NONE_DEPRECATED, "SPM Owner = NONE", 0) \ +X( 15, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TABG_DEPRECATED, "SPM Owner = TA-BG", 0) \ +X( 16, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TAIRQ_DEPRECATED, "SPM Owner = TA-IRQ", 0) \ +X( 17, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_ADDRESS, "ZStore address 0x%08x%08x", 2) \ +X( 18, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SSTORE_ADDRESS, "SStore address 0x%08x%08x", 2) \ +X( 19, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_ADDRESS, "ZLoad address 0x%08x%08x", 2) \ +X( 20, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SLOAD_ADDRESS, "SLoad address 0x%08x%08x", 2) \ +X( 21, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_ZSBUFFER_DEPRECATED, "No deferred ZS Buffer provided", 0) \ +X( 22, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POPULATED, "ZS Buffer successfully populated (ID=0x%08x)", 1) \ +X( 23, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POP_UNNEEDED_DEPRECATED, "No need to populate ZS Buffer (ID=0x%08x)", 1) \ +X( 24, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOPULATED, "ZS Buffer successfully unpopulated (ID=0x%08x)", 1) \ +X( 25, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOP_UNNEEDED_DEPRECATED, "No need to unpopulate ZS Buffer (ID=0x%08x)", 1) \ +X( 26, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_DEPRECATED, "Send ZS-Buffer backing request to host (ID=0x%08x)", 1) \ +X( 27, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_DEPRECATED, "Send ZS-Buffer unbacking request to host (ID=0x%08x)", 1) \ +X( 28, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer backing request. Previous request still pending (ID=0x%08x)", 1) \ +X( 29, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer unbacking request. Previous request still pending (ID=0x%08x)", 1) \ +X( 30, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for ZBuffer to be backed (ID=0x%08x)", 1) \ +X( 31, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for SBuffer to be backed (ID=0x%08x)", 1) \ +X( 32, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_NONE, "SPM State = none", 0) \ +X( 33, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_BLOCKED, "SPM State = PR blocked", 0) \ +X( 34, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_GROW, "SPM State = wait for grow", 0) \ +X( 35, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_HW, "SPM State = wait for HW", 0) \ +X( 36, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_RUNNING, "SPM State = PR running", 0) \ +X( 37, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_AVOIDED, "SPM State = PR avoided", 0) \ +X( 38, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_EXECUTED, "SPM State = PR executed", 0) \ +X( 39, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FREELIST_MATCH, "3DMemFree matches freelist 0x%08x (FL type = %u)", 2) \ +X( 40, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_FLAG_SET, "Raise the 3DMemFreeDedected flag", 0) \ +X( 41, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_PENDING_GROW, "Wait for pending grow on Freelist 0x%08x", 1) \ +X( 42, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_FAILED, "ZS Buffer failed to be populated (ID=0x%08x)", 1) \ +X( 43, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FL_GROW_DEBUG, "Grow update inconsistency: FL addr: 0x%02x%08x, curr pages: %u, ready: %u, new: %u", 5) \ +X( 44, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA_WITH_SP, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u, SP : %u", 4) \ +X( 45, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE_DEPRECATED, "Received grow update, FL addr: 0x%02x%08x, current pages: %u, ready pages: %u, threshold: %u", 5) \ +X( 46, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_PRBUFFER, "No deferred partial render FW (Type=%d) Buffer provided", 1) \ +X( 47, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_POP_UNNEEDED, "No need to populate PR Buffer (ID=0x%08x)", 1) \ +X( 48, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNPOP_UNNEEDED, "No need to unpopulate PR Buffer (ID=0x%08x)", 1) \ +X( 49, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST, "Send PR Buffer backing request to host (ID=0x%08x)", 1) \ +X( 50, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST, "Send PR Buffer unbacking request to host (ID=0x%08x)", 1) \ +X( 51, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST_PENDING, "Don't send PR Buffer backing request. Previous request still pending (ID=0x%08x)", 1) \ +X( 52, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST_PENDING, "Don't send PR Buffer unbacking request. Previous request still pending (ID=0x%08x)", 1) \ +X( 53, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_NOT_READY, "Partial Render waiting for Buffer %d type to be backed (ID=0x%08x)", 2) \ +X( 54, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE, "Received grow update, FL addr: 0x%02x%08x, new pages: %u, ready pages: %u", 4) \ +X( 66, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ +X( 67, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u", 3) \ +X( 68, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PR_DEADLOCK_UNBLOCKED, "OOM TA/3D PR deadlock unblocked reordering DM%d runlist head from Context 0x%08x to 0x%08x", 3) \ +\ +X( 1, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED, "Check Pow state DM%d int: 0x%x, ext: 0x%x, pow flags: 0x%x", 4) \ +X( 2, RGXFW_GROUP_POW, RGXFW_SF_POW_GPU_IDLE, "GPU idle (might be powered down). Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ +X( 3, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ_DEPRECATED, "OS requested pow off (forced = %d), DM%d, pow flags: 0x%x", 3) \ +X( 4, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_DEPRECATED, "Initiate powoff query. Inactive DMs: %d %d %d %d", 4) \ +X( 5, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECKOFF_DEPRECATED, "Any RD-DM pending? %d, Any RD-DM Active? %d", 2) \ +X( 6, RGXFW_GROUP_POW, RGXFW_SF_POW_GPU_OFF, "GPU ready to be powered down. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ +X( 7, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ, "HW Request On(1)/Off(0): %d, Units: 0x%08.8x", 2) \ +X( 8, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_REQ, "Request to change num of dusts to %d (Power flags=%d)", 2) \ +X( 9, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE, "Changing number of dusts from %d to %d", 2) \ +X( 11, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_INIT_DEPRECATED, "Sidekick init", 0) \ +X( 12, RGXFW_GROUP_POW, RGXFW_SF_POW_RD_INIT_DEPRECATED, "Rascal+Dusts init (# dusts mask: 0x%x)", 1) \ +X( 13, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RD, "Initiate powoff query for RD-DMs.", 0) \ +X( 14, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_TLA, "Initiate powoff query for TLA-DM.", 0) \ +X( 15, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RD, "Any RD-DM pending? %d, Any RD-DM Active? %d", 2) \ +X( 16, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_TLA, "TLA-DM pending? %d, TLA-DM Active? %d", 2) \ +X( 17, RGXFW_GROUP_POW, RGXFW_SF_POW_BRN37270_DEPRECATED, "Request power up due to BRN37270. Pow stat int: 0x%x", 1) \ +X( 18, RGXFW_GROUP_POW, RGXFW_SF_POW_REQ_CANCEL, "Cancel power off request int: 0x%x, ext: 0x%x, pow flags: 0x%x", 3) \ +X( 19, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_IDLE, "OS requested forced IDLE, pow flags: 0x%x", 1) \ +X( 20, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE, "OS cancelled forced IDLE, pow flags: 0x%x", 1) \ +X( 21, RGXFW_GROUP_POW, RGXFW_SF_POW_IDLE_TIMER, "Idle timer start. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ +X( 22, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_IDLE_TIMER, "Cancel idle timer. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ +X( 23, RGXFW_GROUP_POW, RGXFW_SF_POW_APM_LATENCY_CHANGE, "Active PM latency set to %dms. Core clock: %d Hz", 2) \ +X( 24, RGXFW_GROUP_POW, RGXFW_SF_POW_CDM_CLUSTERS, "Compute cluster mask change to 0x%x, %d dusts powered.", 2) \ +X( 25, RGXFW_GROUP_POW, RGXFW_SF_POW_NULL_CMD_INIOFF_RD, "Null command executed, repeating initiate powoff query for RD-DMs.", 0) \ +X( 26, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_ENERGY, "Power monitor: Estimate of dynamic energy %u", 1) \ +X( 27, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED2, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x", 3) \ +X( 28, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_DEADLINE, "Proactive DVFS: New deadline, time = 0x%08x%08x", 2) \ +X( 29, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_WORKLOAD, "Proactive DVFS: New workload, cycles = 0x%08x%08x", 2) \ +X( 30, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_CALCULATE, "Proactive DVFS: Proactive frequency calculated = %u", 1) \ +X( 31, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UTILISATION, "Proactive DVFS: Reactive utilisation = %u percent", 1) \ +X( 32, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_REACT, "Proactive DVFS: Reactive frequency calculated = %u.%u", 2) \ +X( 33, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND_DEPRECATED, "Proactive DVFS: OPP Point Sent = 0x%x", 1) \ +X( 34, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DEADLINE_REMOVED, "Proactive DVFS: Deadline removed = 0x%08x%08x", 2) \ +X( 35, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_WORKLOAD_REMOVED, "Proactive DVFS: Workload removed = 0x%08x%08x", 2) \ +X( 36, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_THROTTLE, "Proactive DVFS: Throttle to a maximum = 0x%x", 1) \ +X( 37, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_FAILURE, "Proactive DVFS: Failed to pass OPP point via GPIO.", 0) \ +X( 38, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_INVALID_NODE_DEPRECATED, "Proactive DVFS: Invalid node passed to function.", 0) \ +X( 39, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GUEST_BAD_ACCESS_DEPRECATED, "Proactive DVFS: Guest OS attempted to do a privileged action. OSid = %u", 1) \ +X( 40, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_STARTED, "Proactive DVFS: Unprofiled work started. Total unprofiled work present: %u", 1) \ +X( 41, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_FINISHED, "Proactive DVFS: Unprofiled work finished. Total unprofiled work present: %u", 1) \ +X( 42, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DISABLED, "Proactive DVFS: Disabled: Not enabled by host.", 0) \ +X( 43, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ_RESULT, "HW Request Completed(1)/Aborted(0): %d, Ticks: %d", 2) \ +X( 44, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_FIX_59042_DEPRECATED, "Allowed number of dusts is %d due to BRN59042.", 1) \ +X( 45, RGXFW_GROUP_POW, RGXFW_SF_POW_HOST_TIMEOUT_NOTIFICATION, "Host timed out while waiting for a forced idle state. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ +X( 46, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x, Fence Counters: Check: %u - Update: %u", 5) \ +X( 47, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND, "Proactive DVFS: OPP Point Sent = 0x%x, Success = 0x%x", 2) \ +X( 48, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_IDLE, "Proactive DVFS: GPU transitioned to idle", 0) \ +X( 49, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_ACTIVE, "Proactive DVFS: GPU transitioned to active", 0) \ +X( 50, RGXFW_GROUP_POW, RGXFW_SF_POW_POWDUMP_BUFFER_SIZE, "Power counter dumping: Data truncated writing register %u. Buffer too small.", 1) \ +X( 51, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT, "Power controller returned ABORT for last request so retrying.", 0) \ +X( 52, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_POWER_REQUEST_DEPRECATED, "Discarding invalid power request: type 0x%x, DM %u", 2) \ +X( 53, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE_NOT_IDLE, "Detected attempt to cancel forced idle while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \ +X( 54, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_POW_OFF_NOT_IDLE, "Detected attempt to force power off while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \ +X( 55, RGXFW_GROUP_POW, RGXFW_SF_POW_NUMDUST_CHANGE_NOT_IDLE, "Detected attempt to change dust count while not forced idle (pow state 0x%x)", 1) \ +X( 56, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESULT, "Power monitor: Type = %d (0 = power, 1 = energy), Estimate result = 0x%08x%08x", 3) \ +X( 57, RGXFW_GROUP_POW, RGXFW_SF_POW_MINMAX_CONFLICT, "Conflicting clock frequency range: OPP min = %u, max = %u", 2) \ +X( 58, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_FLOOR, "Proactive DVFS: Set floor to a minimum = 0x%x", 1) \ +X( 59, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ, "OS requested pow off (forced = %d), pow flags: 0x%x", 2) \ +X( 60, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_POWER_REQUEST, "Discarding invalid power request: type 0x%x", 1) \ +X( 61, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_STATE_CHANGE_REQ, "Request to change SPU power state mask from 0x%x to 0x%x. Pow flags: 0x%x", 3) \ +X( 62, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_STATE_CHANGE, "Changing SPU power state mask from 0x%x to 0x%x", 2) \ +X( 63, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_CHANGE_NOT_IDLE, "Detected attempt to change SPU power state mask while not forced idle (pow state 0x%x)", 1) \ +X( 64, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_SPU_POWER_MASK, "Invalid SPU power mask 0x%x! Changing to 1", 1) \ +X( 65, RGXFW_GROUP_POW, RGXFW_SF_POW_CLKDIV_UPDATE, "Proactive DVFS: Send OPP %u with clock divider value %u", 2) \ +X( 66, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_PERF_MODE, "PPA block started in perf validation mode.", 0) \ +X( 67, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESET, "Reset PPA block state %u (1=reset, 0=recalculate).", 1) \ +X( 68, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT_WITH_CORE, "Power controller returned ABORT for Core-%d last request so retrying.", 1) \ +X( 69, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ64BIT, "HW Request On(1)/Off(0): %d, Units: 0x%08x%08x", 3) \ +\ +X( 1, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DEPRECATED, "Lockup detected on DM%d, FWCtx: 0x%08.8x", 2) \ +X( 2, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_FW_DEPRECATED, "Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \ +X( 3, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED, "Reset HW", 0) \ +X( 4, RGXFW_GROUP_HWR, RGXFW_SF_HWR_TERMINATED_DEPRECATED, "Lockup recovered.", 0) \ +X( 5, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED, "Lock-up DM%d FWCtx: 0x%08.8x", 2) \ +X( 6, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DETECTED_DEPRECATED, "Lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \ +X( 7, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EARLY_FAULT_DETECTION_DEPRECATED, "Early fault detection: GLB(%d->%d), PER-DM(0x%08x)", 3) \ +X( 8, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP_DEPRECATED, "Hold scheduling due lockup: GLB(%d), PER-DM(0x%08x->0x%08x)", 3) \ +X( 9, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FALSE_LOCKUP_DEPRECATED, "False lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \ +X( 10, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED, "BRN37729: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \ +X( 11, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED, "Freelists reconstructed: GLB(%d->%d), PER-DM(0x%08x)", 3) \ +X( 12, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RECONSTRUCTING_FREELISTS_DEPRECATED, "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%d->%d), PER-DM(0x%08x)", 4) \ +X( 13, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FAILED_HW_POLL, "HW poll %u (0-Unset 1-Set) failed (reg:0x%08x val:0x%08x)", 3) \ +X( 14, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED_DEPRECATED, "Discarded cmd on DM%u FWCtx=0x%08x", 2) \ +X( 15, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED, "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08x (st: %d), FWCtx 0x%08x @ %d", 6) \ +X( 16, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PM_FENCE_DEPRECATED, "PM fence WA could not be applied, Valid TA Setup: %d, RD powered off: %d", 2) \ +X( 17, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_SNAPSHOT, "FL snapshot RTD 0x%08.8x - local (0x%08.8x): %d, global (0x%08.8x): %d", 5) \ +X( 18, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_CHECK, "FL check RTD 0x%08.8x, discard: %d - local (0x%08.8x): s%d?=c%d, global (0x%08.8x): s%d?=c%d", 8) \ +X( 19, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_DEPRECATED, "FL reconstruction 0x%08.8x c%d", 2) \ +X( 20, RGXFW_GROUP_HWR, RGXFW_SF_HWR_3D_CHECK, "3D check: missing TA FWCtx 0x%08.8x @ %d, RTD 0x%08x.", 3) \ +X( 21, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED2, "Reset HW (mmu:%d, extmem: %d)", 2) \ +X( 22, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_TA_CACHES, "Zero TA caches for FWCtx: 0x%08.8x (TPC addr: 0x%08x%08x, size: %d bytes)", 4) \ +X( 23, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED2, "Recovery DM%u: Freelists reconstructed. New R-Flags=0x%08x", 2) \ +X( 24, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SKIPPED_CMD, "Recovery DM%u: FWCtx 0x%08x skipped to command @ %u. PR=%u. New R-Flags=0x%08x", 5) \ +X( 25, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_RECOVERED, "Recovery DM%u: DM fully recovered", 1) \ +X( 26, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP, "DM%u: Hold scheduling due to R-Flag = 0x%08x", 2) \ +X( 27, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_RECONSTRUCTION, "Analysis: Need freelist reconstruction", 0) \ +X( 28, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP, "Analysis DM%u: Lockup FWCtx: 0x%08.8x. Need to skip to next command", 2) \ +X( 29, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP_OOM_TA, "Analysis DM%u: Lockup while TA is OOM FWCtx: 0x%08.8x. Need to skip to next command", 2) \ +X( 30, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_PR_CLEANUP, "Analysis DM%u: Lockup while partial render FWCtx: 0x%08.8x. Need PR cleanup", 2) \ +X( 31, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED2, "GPU has locked up", 0) \ +X( 32, RGXFW_GROUP_HWR, RGXFW_SF_HWR_READY, "DM%u ready for HWR", 1) \ +X( 33, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_UPDATE_RECOVERY, "Recovery DM%u: Updated Recovery counter. New R-Flags=0x%08x", 2) \ +X( 34, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED2, "Analysis: BRN37729 detected, reset TA and re-kicked 0x%08x)", 1) \ +X( 35, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_TIMED_OUT, "DM%u timed out", 1) \ +X( 36, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EVENT_STATUS_REG, "RGX_CR_EVENT_STATUS=0x%08x", 1) \ +X( 37, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_FALSE_LOCKUP, "DM%u lockup falsely detected, R-Flags=0x%08x", 2) \ +X( 38, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_OUTOFTIME, "GPU has overrun its deadline", 0) \ +X( 39, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_POLLFAILURE, "GPU has failed a poll", 0) \ +X( 40, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PERF_PHASE_REG, "RGX DM%u phase count=0x%08x", 2) \ +X( 41, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED3, "Reset HW (loop:%d, poll failures: 0x%08x)", 2) \ +X( 42, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_FAULT_EVENT, "MMU fault event: 0x%08x", 1) \ +X( 43, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BIF1_FAULT, "BIF1 page fault detected (Bank1 MMU Status: 0x%08x)", 1) \ +X( 44, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_TRUE_DEPRECATED, "Fast CRC Failed. Proceeding to full register checking (DM: %u).", 1) \ +X( 45, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_META_FAULT, "Meta MMU page fault detected (Meta MMU Status: 0x%08x%08x)", 2) \ +X( 46, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_DEPRECATED, "Fast CRC Check result for DM%u is HWRNeeded=%u", 2) \ +X( 47, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK_DEPRECATED, "Full Signature Check result for DM%u is HWRNeeded=%u", 2) \ +X( 48, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINAL_RESULT, "Final result for DM%u is HWRNeeded=%u with HWRChecksToGo=%u", 3) \ +X( 49, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK_DEPRECATED, "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d", 3) \ +X( 50, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK_DEPRECATED, "Deadline counter for DM%u is HWRDeadline=%u", 2) \ +X( 51, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST_DEPRECATED, "Holding Scheduling on OSid %u due to pending freelist reconstruction", 1) \ +X( 52, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_REQUEST, "Requesting reconstruction for freelist 0x%x (ID=%d)", 2) \ +X( 53, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_PASSED, "Reconstruction of freelist ID=%d complete", 1) \ +X( 54, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global,2:mmu) on HW context %u", 4) \ +X( 55, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FAILED, "Reconstruction of freelist ID=%d failed", 1) \ +X( 56, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESTRICTING_PDS_TASKS, "Restricting PDS Tasks to help other stalling DMs (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \ +X( 57, RGXFW_GROUP_HWR, RGXFW_SF_HWR_UNRESTRICTING_PDS_TASKS, "Unrestricting PDS Tasks again (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \ +X( 58, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_USED, "USC slots: %u used by DM%u", 2) \ +X( 59, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_EMPTY, "USC slots: %u empty", 1) \ +X( 60, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HCS_FIRE, "HCS DM%d's Context Switch failed to meet deadline. Current time: 0x%08x%08x, deadline: 0x%08x%08x", 5) \ +X( 61, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_HW_RESET, "Begin hardware reset (HWR Counter=%d)", 1) \ +X( 62, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINISH_HW_RESET, "Finished hardware reset (HWR Counter=%d)", 1) \ +X( 63, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST, "Holding Scheduling on DM %u for OSid %u due to pending freelist reconstruction", 2) \ +X( 64, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_UMQ_READ_OFFSET, "User Mode Queue ROff reset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes StreamStartOffset = %u)", 5) \ +X( 65, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED2, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) on HW context %u", 4) \ +X( 66, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MIPS_FAULT, "Mips page fault detected (BadVAddr: 0x%08x, EntryLo0: 0x%08x, EntryLo1: 0x%08x)", 3) \ +X( 67, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ANOTHER_CHANCE, "At least one other DM is running okay so DM%u will get another chance", 1) \ +X( 68, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FW, "Reconstructing in FW, FL: 0x%x (ID=%d)", 2) \ +X( 69, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_RTC, "Zero RTC for FWCtx: 0x%08.8x (RTC addr: 0x%08x%08x, size: %d bytes)", 4) \ +X( 70, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED3, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u", 5) \ +X( 71, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_LONG_HW_POLL, "Start long HW poll %u (0-Unset 1-Set) for (reg:0x%08x val:0x%08x)", 3) \ +X( 72, RGXFW_GROUP_HWR, RGXFW_SF_HWR_END_LONG_HW_POLL, "End long HW poll (result=%d)", 1) \ +X( 73, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK, "DM%u has taken %d ticks and deadline is %d ticks", 3) \ +X( 74, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WATCHDOG_CHECK_DEPRECATED, "USC Watchdog result for DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u", 5) \ +X( 75, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED, "Reconstruction needed for freelist 0x%x (ID=%d) OSid: %d type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u", 6) \ +X( 76, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP, "GPU-%u has locked up", 1) \ +X( 77, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DM, "DM%u has locked up", 1) \ +X( 78, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_EVENT_STATUS_REG, "Core %d RGX_CR_EVENT_STATUS=0x%08x", 2) \ +X( 79, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MULTICORE_EVENT_STATUS_REG, "RGX_CR_MULTICORE_EVENT_STATUS%u=0x%08x", 2) \ +X( 80, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_BIF0_FAULT, "BIF0 page fault detected (Core %d MMU Status: 0x%08x%08x Req Status: 0x%08x%08x)", 5) \ +X( 81, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT_S7, "MMU page fault detected (Core %d MMU Status: 0x%08x%08x)", 3) \ +X( 82, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT, "MMU page fault detected (Core %d MMU Status: 0x%08x%08x 0x%08x)", 4) \ +X( 83, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW, "Reset HW (core:%d of %d, loop:%d, poll failures: 0x%08x)", 4) \ +X( 84, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK, "Fast CRC Check result for Core%u, DM%u is HWRNeeded=%u", 3) \ +X( 85, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK, "Full Signature Check result for Core%u, DM%u is HWRNeeded=%u", 3) \ +X( 86, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK, "USC Slots result for Core%u, DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d", 4) \ +X( 87, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WATCHDOG_CHECK, "USC Watchdog result for Core%u DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u", 6) \ +X( 88, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_RISCV_FAULT, "RISC-V MMU page fault detected (FWCORE MMU Status 0x%08x Req Status 0x%08x%08x)", 3) \ +X( 89, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS1_PFS_DEPRECATED, "After FW fault was raised, TEXAS1_PFS poll failed on core %d with value 0x%08x", 2) \ +X( 90, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_PFS, "After FW fault was raised, BIF_PFS poll failed on core %d with value 0x%08x", 2) \ +X( 91, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SET_ABORT_PM_STATUS, "After FW fault was raised, MMU_ABORT_PM_STATUS set poll failed on core %d with value 0x%08x", 2) \ +X( 92, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_UNSET_ABORT_PM_STATUS, "After FW fault was raised, MMU_ABORT_PM_STATUS unset poll failed on core %d with value 0x%08x", 2) \ +X( 93, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLC_INVAL, "After FW fault was raised, MMU_CTRL_INVAL poll (all but fw) failed on core %d with value 0x%08x", 2) \ +X( 94, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLCMMU_INVAL, "After FW fault was raised, MMU_CTRL_INVAL poll (all) failed on core %d with value 0x%08x", 2) \ +X( 95, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS_PFS, "After FW fault was raised, TEXAS%d_PFS poll failed on core %d with value 0x%08x", 3) \ +X( 96, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EXTRA_CHECK, "Extra Registers Check result for Core%u, DM%u is HWRNeeded=%u", 3) \ +\ +X( 1, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGBLK, "Block 0x%x mapped to Config Idx %u", 2) \ +X( 2, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_OMTBLK, "Block 0x%x omitted from event - not enabled in HW", 1) \ +X( 3, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INCBLK, "Block 0x%x included in event - enabled in HW", 1) \ +X( 4, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELREG, "Select register state hi_0x%x lo_0x%x", 2) \ +X( 5, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CSBHDR, "Counter stream block header word 0x%x", 1) \ +X( 6, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTROFF, "Counter register offset 0x%x", 1) \ +X( 7, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGSKP, "Block 0x%x config unset, skipping", 1) \ +X( 8, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK, "Accessing Indirect block 0x%x", 1) \ +X( 9, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DIRBLK, "Accessing Direct block 0x%x", 1) \ +X( 10, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CNTPRG, "Programmed counter select register at offset 0x%x", 1) \ +X( 11, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKPRG, "Block register offset 0x%x and value 0x%x", 2) \ +X( 12, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKCG, "Reading config block from driver 0x%x", 1) \ +X( 13, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKRG, "Reading block range 0x%x to 0x%x", 2) \ +X( 14, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKREC, "Recording block 0x%x config from driver", 1) \ +X( 15, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKED, "Finished reading config block from driver", 0) \ +X( 16, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_COUNTER, "Custom Counter offset: 0x%x value: 0x%x", 2) \ +X( 17, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELECT_CNTR, "Select counter n:%u ID:0x%x", 2) \ +X( 18, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_SELECT_PACK, "The counter ID 0x%x is not allowed. The package [b:%u, n:%u] will be discarded", 3) \ +X( 19, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS_CUSTOM, "Custom Counters filter status %d", 1) \ +X( 20, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_WRONG_BLOCK, "The Custom block %d is not allowed. Use only blocks lower than %d. The package will be discarded", 2) \ +X( 21, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_TOO_MANY_ID, "The package will be discarded because it contains %d counters IDs while the upper limit is %d", 2) \ +X( 22, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHECK_FILTER, "Check Filter 0x%x is 0x%x ?", 2) \ +X( 23, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_RESET_CUSTOM_BLOCK, "The custom block %u is reset", 1) \ +X( 24, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INVALID_CMD_DEPRECATED, "Encountered an invalid command (%d)", 1) \ +X( 25, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_DEPRECATED, "HWPerf Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \ +X( 26, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_FENCE_DEPRECATED, "HWPerf Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)", 3) \ +X( 27, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_BLOCK, "Custom Counter block: %d", 1) \ +X( 28, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKENA, "Block 0x%x ENABLED", 1) \ +X( 29, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKDIS, "Block 0x%x DISABLED", 1) \ +X( 30, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK_INSTANCE, "Accessing Indirect block 0x%x, instance %u", 2) \ +X( 31, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTRVAL, "Counter register 0x%x, Value 0x%x", 2) \ +X( 32, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS, "Counters filter status %d", 1) \ +X( 33, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTLBLK, "Block 0x%x mapped to Ctl Idx %u", 2) \ +X( 34, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_WORKEST_EN, "Block(s) in use for workload estimation.", 0) \ +X( 35, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CYCCTR, "GPU %u Cycle counter 0x%x, Value 0x%x", 3) \ +X( 36, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CYCMAX, "GPU Mask 0x%x Cycle counter 0x%x, Value 0x%x", 3) \ +X( 37, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_IGNORE_BLOCKS, "Blocks IGNORED for GPU %u", 1) \ +\ +X( 1, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_REQUEST_DEPRECATED, "Transfer 0x%02x request: 0x%02x%08x -> 0x%08x, size %u", 5) \ +X( 2, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_COMPLETE, "Transfer of type 0x%02x expected on channel %u, 0x%02x found, status %u", 4) \ +X( 3, RGXFW_GROUP_DMA, RGXFW_SF_DMA_INT_REG, "DMA Interrupt register 0x%08x", 1) \ +X( 4, RGXFW_GROUP_DMA, RGXFW_SF_DMA_WAIT, "Waiting for transfer of type 0x%02x completion...", 1) \ +X( 5, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOADING_FAILED, "Loading of cCCB data from FW common context 0x%08x (offset: %u, size: %u) failed", 3) \ +X( 6, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOAD_INVALID, "Invalid load of cCCB data from FW common context 0x%08x (offset: %u, size: %u)", 3) \ +X( 7, RGXFW_GROUP_DMA, RGXFW_SF_DMA_POLL_FAILED, "Transfer 0x%02x request poll failure", 1) \ +X( 8, RGXFW_GROUP_DMA, RGXFW_SF_DMA_BOOT_TRANSFER_FAILED, "Boot transfer(s) failed (code? %u, data? %u), used slower memcpy instead", 2) \ +X( 9, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_REQUEST, "Transfer 0x%02x request on ch. %u: system 0x%02x%08x, coremem 0x%08x, flags 0x%x, size %u", 7) \ +\ +X( 1, RGXFW_GROUP_DBG, RGXFW_SF_DBG_INTPAIR, "0x%08x 0x%08x", 2) \ +X( 2, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1HEX, "0x%08x", 1) \ +X( 3, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2HEX, "0x%08x 0x%08x", 2) \ +X( 4, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3HEX, "0x%08x 0x%08x 0x%08x", 3) \ +X( 5, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4HEX, "0x%08x 0x%08x 0x%08x 0x%08x", 4) \ +X( 6, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 5) \ +X( 7, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 6) \ +X( 8, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 7) \ +X( 9, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 8) \ +X( 10, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1SIGNED, "%d", 1) \ +X( 11, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2SIGNED, "%d %d", 2) \ +X( 12, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3SIGNED, "%d %d %d", 3) \ +X( 13, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4SIGNED, "%d %d %d %d", 4) \ +X( 14, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5SIGNED, "%d %d %d %d %d", 5) \ +X( 15, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6SIGNED, "%d %d %d %d %d %d", 6) \ +X( 16, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7SIGNED, "%d %d %d %d %d %d %d", 7) \ +X( 17, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8SIGNED, "%d %d %d %d %d %d %d %d", 8) \ +X( 18, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1UNSIGNED, "%u", 1) \ +X( 19, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2UNSIGNED, "%u %u", 2) \ +X( 20, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3UNSIGNED, "%u %u %u", 3) \ +X( 21, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4UNSIGNED, "%u %u %u %u", 4) \ +X( 22, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5UNSIGNED, "%u %u %u %u %u", 5) \ +X( 23, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6UNSIGNED, "%u %u %u %u %u %u", 6) \ +X( 24, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7UNSIGNED, "%u %u %u %u %u %u %u", 7) \ +X( 25, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8UNSIGNED, "%u %u %u %u %u %u %u %u", 8) \ +\ +X(65535, RGXFW_GROUP_NULL, RGXFW_SF_LAST, "You should not use this string", 15) + + +/* The symbolic names found in the table above are assigned an ui32 value of + * the following format: + * 31 30 28 27 20 19 16 15 12 11 0 bits + * - --- ---- ---- ---- ---- ---- ---- ---- + * 0-11: id number + * 12-15: group id number + * 16-19: number of parameters + * 20-27: unused + * 28-30: active: identify SF packet, otherwise regular int32 + * 31: reserved for signed/unsigned compatibility + * + * The following macro assigns those values to the enum generated SF ids list. + */ +#define RGXFW_LOG_IDMARKER (0x70000000U) +#define RGXFW_LOG_CREATESFID(a,b,e) ((IMG_UINT32)(a) | ((IMG_UINT32)(b)<<12U) | ((IMG_UINT32)(e)<<16U)) | RGXFW_LOG_IDMARKER + +#define RGXFW_LOG_IDMASK (0xFFF00000) +#define RGXFW_LOG_VALIDID(I) (((I) & RGXFW_LOG_IDMASK) == RGXFW_LOG_IDMARKER) + +typedef enum { +#define X(a, b, c, d, e) c = RGXFW_LOG_CREATESFID(a,b,e), + RGXFW_LOG_SFIDLIST +#undef X +} RGXFW_LOG_SFids; + +/* Return the group id that the given (enum generated) id belongs to */ +#define RGXFW_SF_GID(x) (((IMG_UINT32)(x)>>12) & 0xfU) +/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */ +#define RGXFW_SF_PARAMNUM(x) (((IMG_UINT32)(x)>>16) & 0xfU) + +#endif /* RGX_FWIF_SF_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_fwif_shared.h b/drivers/gpu/drm/phytium/octopus/rgx_fwif_shared.h new file mode 100644 index 000000000000..96f11a7ac542 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_fwif_shared.h @@ -0,0 +1,322 @@ +/*************************************************************************/ /*! +@File +@Title RGX firmware interface structures +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX firmware interface structures shared by both host client + and host server +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_FWIF_SHARED_H) +#define RGX_FWIF_SHARED_H + +#include "img_types.h" +#include "img_defs.h" +#include "rgx_common.h" +#include "phytiumvr/mem_types.h" + +/* Maximum number of UFOs in a CCB command. + * The number is based on having 32 sync prims (as originally), plus 32 sync + * checkpoints. + * Once the use of sync prims is no longer supported, we will retain + * the same total (64) as the number of sync checkpoints which may be + * supporting a fence is not visible to the client driver and has to + * allow for the number of different timelines involved in fence merges. + */ +#define RGXFWIF_CCB_CMD_MAX_UFOS (32U+32U) + +/* + * This is a generic limit imposed on any DM (TA,3D,CDM,TDM,2D,TRANSFER) + * command passed through the bridge. + * Just across the bridge in the server, any incoming kick command size is + * checked against this maximum limit. + * In case the incoming command size is larger than the specified limit, + * the bridge call is retired with error. + */ +#define RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE (1024U) + +typedef struct RGXFWIF_DEV_VIRTADDR_ +{ + IMG_UINT32 ui32Addr; +} RGXFWIF_DEV_VIRTADDR; + +typedef struct +{ + IMG_DEV_VIRTADDR RGXFW_ALIGN psDevVirtAddr; + RGXFWIF_DEV_VIRTADDR pbyFWAddr; +} UNCACHED_ALIGN RGXFWIF_DMA_ADDR; + +typedef IMG_UINT8 RGXFWIF_CCCB; + +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_UFO_ADDR; +typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CLEANUP_CTL; + +/*! + * @InGroup ClientCCBTypes + * @Brief Command data for fence & update types Client CCB commands. + */ +typedef struct +{ + PRGXFWIF_UFO_ADDR puiAddrUFO; /*!< Address to be checked/updated */ + IMG_UINT32 ui32Value; /*!< Value to check-against/update-to */ +} RGXFWIF_UFO; + +typedef struct +{ + IMG_UINT32 ui32SubmittedCommands; /*!< Number of commands received by the FW */ + IMG_UINT32 ui32ExecutedCommands; /*!< Number of commands executed by the FW */ +} UNCACHED_ALIGN RGXFWIF_CLEANUP_CTL; + +#define RGXFWIF_PRBUFFER_START IMG_UINT32_C(0) +#define RGXFWIF_PRBUFFER_ZSBUFFER IMG_UINT32_C(0) +#define RGXFWIF_PRBUFFER_MSAABUFFER IMG_UINT32_C(1) +#define RGXFWIF_PRBUFFER_MAXSUPPORTED IMG_UINT32_C(2) + +typedef IMG_UINT32 RGXFWIF_PRBUFFER_TYPE; + +typedef enum +{ + RGXFWIF_PRBUFFER_UNBACKED = 0, + RGXFWIF_PRBUFFER_BACKED, + RGXFWIF_PRBUFFER_BACKING_PENDING, + RGXFWIF_PRBUFFER_UNBACKING_PENDING, +}RGXFWIF_PRBUFFER_STATE; + +typedef struct +{ + IMG_UINT32 ui32BufferID; /*!< Buffer ID*/ + IMG_BOOL bOnDemand; /*!< Needs On-demand Z/S/MSAA Buffer allocation */ + RGXFWIF_PRBUFFER_STATE eState; /*!< Z/S/MSAA -Buffer state */ + RGXFWIF_CLEANUP_CTL sCleanupState; /*!< Cleanup state */ + IMG_UINT32 ui32PRBufferFlags; /*!< Compatibility and other flags */ +} UNCACHED_ALIGN RGXFWIF_PRBUFFER; + +/* + * Used to share frame numbers across UM-KM-FW, + * frame number is set in UM, + * frame number is required in both KM for HTB and FW for FW trace. + * + * May be used to house Kick flags in the future. + */ +typedef struct +{ + IMG_UINT32 ui32FrameNum; /*!< associated frame number */ +} CMD_COMMON; + +/* + * TA and 3D commands require set of firmware addresses that are stored in the + * Kernel. Client has handle(s) to Kernel containers storing these addresses, + * instead of raw addresses. We have to patch/write these addresses in KM to + * prevent UM from controlling FW addresses directly. + * Typedefs for TA and 3D commands are shared between Client and Firmware (both + * single-BVNC). Kernel is implemented in a multi-BVNC manner, so it can't use + * TA|3D CMD type definitions directly. Therefore we have a SHARED block that + * is shared between UM-KM-FW across all BVNC configurations. + */ +typedef struct +{ + CMD_COMMON sCmn; /*!< Common command attributes */ + RGXFWIF_DEV_VIRTADDR sHWRTData; /* RTData associated with this command, + this is used for context selection and for storing out HW-context, + when TA is switched out for continuing later */ + + RGXFWIF_DEV_VIRTADDR asPRBuffer[RGXFWIF_PRBUFFER_MAXSUPPORTED]; /* Supported PR Buffers like Z/S/MSAA Scratch */ + +} CMDTA3D_SHARED; + +/*! + * Client Circular Command Buffer (CCCB) control structure. + * This is shared between the Server and the Firmware and holds byte offsets + * into the CCCB as well as the wrapping mask to aid wrap around. A given + * snapshot of this queue with Cmd 1 running on the GPU might be: + * + * Roff Doff Woff + * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........] + * < runnable commands >< !ready to run > + * + * Cmd 1 : Currently executing on the GPU data master. + * Cmd 2,3,4: Fence dependencies met, commands runnable. + * Cmd 5... : Fence dependency not met yet. + */ +typedef struct +{ + IMG_UINT32 ui32WriteOffset; /*!< Host write offset into CCB. This + * must be aligned to 16 bytes. */ + IMG_UINT32 ui32ReadOffset; /*!< Firmware read offset into CCB. + Points to the command that is + * runnable on GPU, if R!=W */ + IMG_UINT32 ui32DepOffset; /*!< Firmware fence dependency offset. + * Points to commands not ready, i.e. + * fence dependencies are not met. */ + IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask, total capacity + * in bytes of the CCB-1 */ +} UNCACHED_ALIGN RGXFWIF_CCCB_CTL; + + +typedef IMG_UINT32 RGXFW_FREELIST_TYPE; + +#define RGXFW_LOCAL_FREELIST IMG_UINT32_C(0) +#define RGXFW_GLOBAL_FREELIST IMG_UINT32_C(1) +#define RGXFW_MAX_FREELISTS (RGXFW_GLOBAL_FREELIST + 1U) + + +typedef struct +{ + IMG_UINT64 uTAReg_DCE_ROOT_CTRL_STREAM; + IMG_UINT64 uTAReg_DCE_CONTEXT_STATE_BASE_ADDR; + IMG_UINT64 uTAReg_TA_CONTEXT_STATE_BASE_ADDR; + + struct + { + IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_VDM0; + IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_VDM1; + IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_VDM2; + + IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_DDM0; + IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_DDM1; + IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_DDM2; + IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_XFB; + + /* VDM resume state update controls */ + IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_VDM0; + IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_VDM1; + IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_VDM2; + + + IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_DDM0; + IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_DDM1; + IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_DDM2; + IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_XFB; + } asTAState[2]; + +} RGXFWIF_TAREGISTERS_CSWITCH; + +typedef struct +{ + IMG_UINT64 u3DReg_IPP_CONTEXT_ADDR; +} RGXFWIF_3DREGISTERS_CSWITCH; + +typedef struct +{ + IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS0; + IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS1; + IMG_UINT64 uCDMReg_CDM_TERMINATE_PDS; + IMG_UINT64 uCDMReg_CDM_TERMINATE_PDS1; + + /* CDM resume controls */ + IMG_UINT64 uCDMReg_CDM_RESUME_PDS0; + IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS0_B; + IMG_UINT64 uCDMReg_CDM_RESUME_PDS0_B; + +} RGXFWIF_CDM_REGISTERS_CSWITCH; + +static_assert((sizeof(RGXFWIF_CDM_REGISTERS_CSWITCH) % 8U) == 0U, + "the size of the structure must be multiple of 8"); + +#define RGXFWIF_CDM_REGISTERS_CSWITCH_SIZE sizeof(RGXFWIF_CDM_REGISTERS_CSWITCH) + + +typedef struct +{ + RGXFWIF_TAREGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_GeomRegs;/*!< Geometry registers for ctx switch */ + RGXFWIF_3DREGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_3DRegs; /*!< 3D registers for ctx switch */ +} RGXFWIF_STATIC_RENDERCONTEXT_STATE; + +#define RGXFWIF_STATIC_RENDERCONTEXT_SIZE sizeof(RGXFWIF_STATIC_RENDERCONTEXT_STATE) + +typedef struct +{ + RGXFWIF_CDM_REGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_Regs; /*!< CDM registers for ctx switch */ +} RGXFWIF_STATIC_COMPUTECONTEXT_STATE; + +#define RGXFWIF_STATIC_COMPUTECONTEXT_SIZE sizeof(RGXFWIF_STATIC_COMPUTECONTEXT_STATE) + +typedef struct +{ + IMG_UINT64 uRDMReg_RDM_CONTEXT_STATE_BASE_ADDR; +} RGXFWIF_RDM_REGISTERS_CSWITCH; + +static_assert((sizeof(RGXFWIF_RDM_REGISTERS_CSWITCH) % 8U) == 0U, + "the size of the structure must be multiple of 8"); + +#define RGXFWIF_RDM_REGISTERS_CSWITCH_SIZE sizeof(RGXFWIF_RDM_REGISTERS_CSWITCH) + +typedef struct +{ + RGXFWIF_RDM_REGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_Regs; /*!< RDM registers for ctx switch */ +} RGXFWIF_STATIC_RAYCONTEXT_STATE; + +#define RGXFWIF_STATIC_RAYCONTEXT_SIZE sizeof(RGXFWIF_STATIC_RAYCONTEXT_STATE) + +/*! + @Brief Context reset reason. Last reset reason for a reset context. +*/ +typedef enum +{ + RGX_CONTEXT_RESET_REASON_NONE = 0, /*!< No reset reason recorded */ + RGX_CONTEXT_RESET_REASON_GUILTY_LOCKUP = 1, /*!< Caused a reset due to locking up */ + RGX_CONTEXT_RESET_REASON_INNOCENT_LOCKUP = 2, /*!< Affected by another context locking up */ + RGX_CONTEXT_RESET_REASON_GUILTY_OVERRUNING = 3, /*!< Overran the global deadline */ + RGX_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4, /*!< Affected by another context overrunning */ + RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH = 5, /*!< Forced reset to ensure scheduling requirements */ + RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM = 6, /*!< CDM Mission/safety checksum mismatch */ + RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM = 7, /*!< TRP checksum mismatch */ + RGX_CONTEXT_RESET_REASON_GPU_ECC_OK = 8, /*!< GPU ECC error (corrected, OK) */ + RGX_CONTEXT_RESET_REASON_GPU_ECC_HWR = 9, /*!< GPU ECC error (uncorrected, HWR) */ + RGX_CONTEXT_RESET_REASON_FW_ECC_OK = 10, /*!< FW ECC error (corrected, OK) */ + RGX_CONTEXT_RESET_REASON_FW_ECC_ERR = 11, /*!< FW ECC error (uncorrected, ERR) */ + RGX_CONTEXT_RESET_REASON_FW_WATCHDOG = 12, /*!< FW Safety watchdog triggered */ + RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT = 13, /*!< FW page fault (no HWR) */ + RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR = 14, /*!< FW execution error (GPU reset requested) */ + RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR = 15, /*!< Host watchdog detected FW error */ + RGX_CONTEXT_GEOM_OOM_DISABLED = 16, /*!< Geometry DM OOM event is not allowed */ +} RGX_CONTEXT_RESET_REASON; + +/*! + @Brief Context reset data shared with the host +*/ +typedef struct +{ + RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reset reason */ + IMG_UINT32 ui32ResetExtJobRef; /*!< External Job ID */ +} RGX_CONTEXT_RESET_REASON_DATA; +#endif /* RGX_FWIF_SHARED_H */ + +/****************************************************************************** + End of file (rgx_fwif_shared.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_heap_firmware.h b/drivers/gpu/drm/phytium/octopus/rgx_heap_firmware.h new file mode 100644 index 000000000000..cfd77a0c5a7c --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_heap_firmware.h @@ -0,0 +1,126 @@ +/*************************************************************************/ /*! +@File +@Title RGX FW heap definitions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_HEAP_FIRMWARE_H) +#define RGX_HEAP_FIRMWARE_H + +/* Start at 903GiB. Size of 32MB per OSID (see rgxheapconfig.h) + * NOTE: + * The firmware heaps bases and sizes are defined here to + * simplify #include dependencies, see rgxheapconfig.h + * for the full RGX virtual address space layout. + */ + +/* + * The Config heap holds initialisation data shared between the + * the driver and firmware (e.g. pointers to the KCCB and FWCCB). + * The Main Firmware heap size is adjusted accordingly but most + * of the map / unmap functions must take into consideration + * the entire range (i.e. main and config heap). + */ +#define RGX_FIRMWARE_NUMBER_OF_FW_HEAPS (2) +#define RGX_FIRMWARE_HEAP_SHIFT RGX_FW_HEAP_SHIFT +#define RGX_FIRMWARE_RAW_HEAP_BASE (0xE1C0000000ULL) +#define RGX_FIRMWARE_RAW_HEAP_SIZE (IMG_UINT32_C(1) << RGX_FIRMWARE_HEAP_SHIFT) + +/* To enable the firmware to compute the exact address of structures allocated by the KM + * in the Fw Config subheap, regardless of the KM's page size (and PMR granularity), + * objects allocated consecutively but from different PMRs (due to differing memalloc flags) + * are allocated with a 64kb offset. This way, all structures will be located at the same base + * addresses when the KM is running with a page size of 4k, 16k or 64k. */ +#define RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY (IMG_UINT32_C(0x10000)) + +/* Ensure the heap can hold 3 PMRs of maximum supported granularity (192KB): + * 1st PMR: RGXFWIF_CONNECTION_CTL + * 2nd PMR: RGXFWIF_OSINIT + * 3rd PMR: RGXFWIF_SYSINIT */ +#define RGX_FIRMWARE_CONFIG_HEAP_SIZE (3*RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY) + +#define RGX_FIRMWARE_META_MAIN_HEAP_SIZE (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE) +/* + * MIPS FW needs space in the Main heap to map GPU memory. + * This space is taken from the MAIN heap, to avoid creating a new heap. + */ +#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL (IMG_UINT32_C(0x100000)) /* 1MB */ +#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101 (IMG_UINT32_C(0x400000)) /* 4MB */ + +#define RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE_NORMAL (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE - \ + RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL) + +#define RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE_BRN65101 (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE - \ + RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101) + +#if !defined(__KERNEL__) +#if defined(FIX_HW_BRN_65101) +#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101 +#define RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE_BRN65101 + +#include "img_defs.h" +static_assert((RGX_FIRMWARE_RAW_HEAP_SIZE) >= IMG_UINT32_C(0x800000), "MIPS GPU map size cannot be increased due to BRN65101 with a small FW heap"); + +#else +#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL +#define RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE_NORMAL +#endif +#endif /* !defined(__KERNEL__) */ + +/* Host sub-heap order: MAIN + CONFIG */ +#define RGX_FIRMWARE_HOST_MAIN_HEAP_BASE RGX_FIRMWARE_RAW_HEAP_BASE +#define RGX_FIRMWARE_HOST_CONFIG_HEAP_BASE (RGX_FIRMWARE_HOST_MAIN_HEAP_BASE + \ + RGX_FIRMWARE_RAW_HEAP_SIZE - \ + RGX_FIRMWARE_CONFIG_HEAP_SIZE) + +/* Guest sub-heap order: CONFIG + MAIN */ +#define RGX_FIRMWARE_GUEST_CONFIG_HEAP_BASE RGX_FIRMWARE_RAW_HEAP_BASE +#define RGX_FIRMWARE_GUEST_MAIN_HEAP_BASE (RGX_FIRMWARE_GUEST_CONFIG_HEAP_BASE + \ + RGX_FIRMWARE_CONFIG_HEAP_SIZE) + +/* + * The maximum configurable size via RGX_FW_HEAP_SHIFT is 32MiB (1<<25) and + * the minimum is 4MiB (1<<22); the default firmware heap size is set to + * maximum 32MiB. + */ +#if defined(RGX_FW_HEAP_SHIFT) && (RGX_FW_HEAP_SHIFT < 22 || RGX_FW_HEAP_SHIFT > 25) +#error "RGX_FW_HEAP_SHIFT is outside valid range [22, 25]" +#endif + +#endif /* RGX_HEAP_FIRMWARE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_heaps.h b/drivers/gpu/drm/phytium/octopus/rgx_heaps.h new file mode 100644 index 000000000000..cc77938395b5 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_heaps.h @@ -0,0 +1,65 @@ +/*************************************************************************/ /*! +@File +@Title RGX heap definitions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_HEAPS_H) +#define RGX_HEAPS_H + +/* + Identify heaps by their names +*/ +#define RGX_GENERAL_SVM_HEAP_IDENT "General SVM" /*!< SVM (shared virtual memory) Heap Identifier */ +#define RGX_GENERAL_HEAP_IDENT "General" /*!< RGX General Heap Identifier */ +#define RGX_GENERAL_NON4K_HEAP_IDENT "General NON-4K" /*!< RGX General non-4K Heap Identifier */ +#define RGX_PDSCODEDATA_HEAP_IDENT "PDS Code and Data" /*!< RGX PDS Code/Data Heap Identifier */ +#define RGX_USCCODE_HEAP_IDENT "USC Code" /*!< RGX USC Code Heap Identifier */ +#define RGX_VK_CAPT_REPLAY_HEAP_IDENT "Vulkan Capture Replay" /*!< RGX vulkan capture replay buffer Heap Identifier */ +#define RGX_SIGNALS_HEAP_IDENT "Signals" /*!< Compute Signals Heap Identifier */ +#define RGX_COMPONENT_CTRL_HEAP_IDENT "Component Control" /*!< RGX DCE Component Control Heap Identifier */ +#define RGX_FBCDC_HEAP_IDENT "FBCDC" /*!< RGX FBCDC State Table Heap Identifier */ +#define RGX_FBCDC_LARGE_HEAP_IDENT "Large FBCDC" /*!< RGX Large FBCDC State Table Heap Identifier */ +#define RGX_PDS_INDIRECT_STATE_HEAP_IDENT "PDS Indirect State" /*!< PDS Indirect State Table Heap Identifier */ +#define RGX_CMP_MISSION_RMW_HEAP_IDENT "Compute Mission RMW" /*!< Compute Mission RMW Heap Identifier */ +#define RGX_CMP_SAFETY_RMW_HEAP_IDENT "Compute Safety RMW" /*!< Compute Safety RMW Heap Identifier */ +#define RGX_TEXTURE_STATE_HEAP_IDENT "Texture State" /*!< Texture State Heap Identifier */ +#define RGX_VISIBILITY_TEST_HEAP_IDENT "Visibility Test" /*!< Visibility Test Heap Identifier */ + +#endif /* RGX_HEAPS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_hwperf.h b/drivers/gpu/drm/phytium/octopus/rgx_hwperf.h new file mode 100644 index 000000000000..fbdd8f397964 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_hwperf.h @@ -0,0 +1,1772 @@ +/*************************************************************************/ /*! +@File +@Title RGX HWPerf and Debug Types and Defines Header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Common data types definitions for hardware performance API +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef RGX_HWPERF_H_ +#define RGX_HWPERF_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +/* These structures are used on both GPU and CPU and must be a size that is a + * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities at + * 8 byte aligned addresses. RGX_FW_STRUCT_*_ASSERT() is used to check this. + */ + +/****************************************************************************** + * Includes and Defines + *****************************************************************************/ + +#include "img_types.h" +#include "img_defs.h" + +#include "rgx_common.h" +#include "pvrsrv_tlcommon.h" +#include "pvrsrv_sync_km.h" + + +#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) +/* HWPerf interface assumption checks */ +static_assert(RGX_FEATURE_NUM_CLUSTERS <= 16U, + "Cluster count too large for HWPerf protocol definition"); +#endif + +/*! Perf counter control words */ +#define RGX_HWPERF_CTRL_NOP (0) /*!< only update HW counters */ +#define RGX_HWPERF_CTRL_STATE_UPDATE_EN (1U << 31) /*!< persistent state update; see other flags below */ +#define RGX_HWPERF_CTRL_GEOM_FULLRANGE (1U) /*!< selectable geom and 3D counters are full range */ +#define RGX_HWPERF_CTRL_COMP_FULLRANGE (2U) /*!< selectable compute counters are full range */ +#define RGX_HWPERF_CTRL_TDM_FULLRANGE (4U) /*!< selectable TDM counters are full range */ + +/****************************************************************************** + * Packet Event Type Enumerations + *****************************************************************************/ + +/*! Type used to encode the event that generated the packet. + * NOTE: When this type is updated the corresponding hwperfbin2json tool + * source needs to be updated as well. The RGX_HWPERF_EVENT_MASK_* macros will + * also need updating when adding new types. + * + * @par + * The event type values are incrementing integers for use as a shift ordinal + * in the event filtering process at the point events are generated. + * This scheme thus implies a limit of 63 event types. + */ + +typedef IMG_UINT32 RGX_HWPERF_EVENT_TYPE; + +#define RGX_HWPERF_INVALID 0x00U /*!< Invalid. Reserved value. */ + +/*! FW types 0x01..0x06 */ +#define RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE 0x01U + +#define RGX_HWPERF_FW_BGSTART 0x01U /*!< Background task processing start */ +#define RGX_HWPERF_FW_BGEND 0x02U /*!< Background task end */ +#define RGX_HWPERF_FW_IRQSTART 0x03U /*!< IRQ task processing start */ + +#define RGX_HWPERF_FW_IRQEND 0x04U /*!< IRQ task end */ +#define RGX_HWPERF_FW_DBGSTART 0x05U /*!< Debug event start */ +#define RGX_HWPERF_FW_DBGEND 0x06U /*!< Debug event end */ + +#define RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE 0x06U + +/*! HW types 0x07..0x19 */ +#define RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE 0x07U + +#define RGX_HWPERF_HW_PMOOM_TAPAUSE 0x07U /*!< TA Pause at PM Out of Memory */ + +#define RGX_HWPERF_HW_TAKICK 0x08U /*!< TA task started */ +#define RGX_HWPERF_HW_TAFINISHED 0x09U /*!< TA task finished */ +#define RGX_HWPERF_HW_3DTQKICK 0x0AU /*!< 3D TQ started */ +#define RGX_HWPERF_HW_3DKICK 0x0BU /*!< 3D task started */ +#define RGX_HWPERF_HW_3DFINISHED 0x0CU /*!< 3D task finished */ +#define RGX_HWPERF_HW_CDMKICK 0x0DU /*!< CDM task started */ +#define RGX_HWPERF_HW_CDMFINISHED 0x0EU /*!< CDM task finished */ +#define RGX_HWPERF_HW_TLAKICK 0x0FU /*!< TLA task started */ +#define RGX_HWPERF_HW_TLAFINISHED 0x10U /*!< TLS task finished */ +#define RGX_HWPERF_HW_3DSPMKICK 0x11U /*!< 3D SPM task started */ +#define RGX_HWPERF_HW_PERIODIC 0x12U /*!< Periodic event with updated HW counters */ +#define RGX_HWPERF_HW_RTUKICK 0x13U /*!< Reserved, future use */ +#define RGX_HWPERF_HW_RTUFINISHED 0x14U /*!< Reserved, future use */ +#define RGX_HWPERF_HW_SHGKICK 0x15U /*!< Reserved, future use */ +#define RGX_HWPERF_HW_SHGFINISHED 0x16U /*!< Reserved, future use */ +#define RGX_HWPERF_HW_3DTQFINISHED 0x17U /*!< 3D TQ finished */ +#define RGX_HWPERF_HW_3DSPMFINISHED 0x18U /*!< 3D SPM task finished */ + +#define RGX_HWPERF_HW_PMOOM_TARESUME 0x19U /*!< TA Resume after PM Out of Memory */ + +/*! HW_EVENT_RANGE0 used up. Use next empty range below to add new hardware events */ +#define RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE 0x19U + +/*! other types 0x1A..0x1F */ +#define RGX_HWPERF_CLKS_CHG 0x1AU /*!< Clock speed change in GPU */ +#define RGX_HWPERF_GPU_STATE_CHG 0x1BU /*!< GPU work state change */ + +/*! power types 0x20..0x27 */ +#define RGX_HWPERF_PWR_EST_RANGE_FIRST_TYPE 0x20U +#define RGX_HWPERF_PWR_EST_REQUEST 0x20U /*!< Power estimate requested (via GPIO) */ +#define RGX_HWPERF_PWR_EST_READY 0x21U /*!< Power estimate inputs ready */ +#define RGX_HWPERF_PWR_EST_RESULT 0x22U /*!< Power estimate result calculated */ +#define RGX_HWPERF_PWR_EST_RANGE_LAST_TYPE 0x22U + +#define RGX_HWPERF_PWR_CHG 0x23U /*!< Power state change */ + +/*! HW_EVENT_RANGE1 0x28..0x2F, for accommodating new hardware events */ +#define RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE 0x28U + +#define RGX_HWPERF_HW_TDMKICK 0x28U /*!< TDM task started */ +#define RGX_HWPERF_HW_TDMFINISHED 0x29U /*!< TDM task finished */ +#define RGX_HWPERF_HW_NULLKICK 0x2AU /*!< NULL event */ + +#define RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE 0x2AU + +/*! context switch types 0x30..0x31 */ +#define RGX_HWPERF_CSW_START 0x30U /*!< HW context store started */ +#define RGX_HWPERF_CSW_FINISHED 0x31U /*!< HW context store finished */ + +/*! DVFS events */ +#define RGX_HWPERF_DVFS 0x32U /*!< Dynamic voltage/frequency scaling events */ + +/*! firmware misc 0x38..0x39 */ +#define RGX_HWPERF_UFO 0x38U /*!< FW UFO Check / Update */ +#define RGX_HWPERF_FWACT 0x39U /*!< FW Activity notification */ + +/*! last */ +#define RGX_HWPERF_LAST_TYPE 0x3BU + +/*! This enumeration must have a value that is a power of two as it is + * used in masks and a filter bit field (currently 64 bits long). + */ +#define RGX_HWPERF_MAX_TYPE 0x40U + +static_assert(RGX_HWPERF_LAST_TYPE < RGX_HWPERF_MAX_TYPE, "Too many HWPerf event types"); + +/*! Macro used to check if an event type ID is present in the known set of hardware type events */ +#define HWPERF_PACKET_IS_HW_TYPE(_etype) (((_etype) >= RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE) || \ + ((_etype) >= RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE)) + +/*! Macro used to check if an event type ID is present in the known set of firmware type events */ +#define HWPERF_PACKET_IS_FW_TYPE(_etype) \ + ((_etype) >= RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE && \ + (_etype) <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE) + + +typedef enum { + RGX_HWPERF_HOST_INVALID = 0x00, /*!< Invalid, do not use. */ + RGX_HWPERF_HOST_ENQ = 0x01, /*!< ``0x01`` Kernel driver has queued GPU work. + See RGX_HWPERF_HOST_ENQ_DATA */ + RGX_HWPERF_HOST_UFO = 0x02, /*!< ``0x02`` UFO updated by the driver. + See RGX_HWPERF_HOST_UFO_DATA */ + RGX_HWPERF_HOST_ALLOC = 0x03, /*!< ``0x03`` Resource allocated. + See RGX_HWPERF_HOST_ALLOC_DATA */ + RGX_HWPERF_HOST_CLK_SYNC = 0x04, /*!< ``0x04`` GPU / Host clocks correlation data. + See RGX_HWPERF_HOST_CLK_SYNC_DATA */ + RGX_HWPERF_HOST_FREE = 0x05, /*!< ``0x05`` Resource freed, + See RGX_HWPERF_HOST_FREE_DATA */ + RGX_HWPERF_HOST_MODIFY = 0x06, /*!< ``0x06`` Resource modified / updated. + See RGX_HWPERF_HOST_MODIFY_DATA */ + RGX_HWPERF_HOST_DEV_INFO = 0x07, /*!< ``0x07`` Device Health status. + See RGX_HWPERF_HOST_DEV_INFO_DATA */ + RGX_HWPERF_HOST_INFO = 0x08, /*!< ``0x08`` Device memory usage information. + See RGX_HWPERF_HOST_INFO_DATA */ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT = 0x09, /*!< ``0x09`` Wait for sync event. + See RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA */ + RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE = 0x0A, /*!< ``0x0A`` Software timeline advanced. + See RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA */ + + /*! last */ + RGX_HWPERF_HOST_LAST_TYPE, + + /*! This enumeration must have a value that is a power of two as it is + * used in masks and a filter bit field (currently 32 bits long). + */ + RGX_HWPERF_HOST_MAX_TYPE = 0x20 +} RGX_HWPERF_HOST_EVENT_TYPE; + +/*!< The event type values are incrementing integers for use as a shift ordinal + * in the event filtering process at the point events are generated. + * This scheme thus implies a limit of 31 event types. + */ +static_assert(RGX_HWPERF_HOST_LAST_TYPE < RGX_HWPERF_HOST_MAX_TYPE, "Too many HWPerf host event types"); + + +/****************************************************************************** + * Packet Header Format Version 2 Types + *****************************************************************************/ + +/*! Major version number of the protocol in operation + */ +#define RGX_HWPERF_V2_FORMAT 2 + +/*! Signature ASCII pattern 'HWP2' found in the first word of a HWPerfV2 packet + */ +#define HWPERF_PACKET_V2_SIG 0x48575032 + +/*! Signature ASCII pattern 'HWPA' found in the first word of a HWPerfV2a packet + */ +#define HWPERF_PACKET_V2A_SIG 0x48575041 + +/*! Signature ASCII pattern 'HWPB' found in the first word of a HWPerfV2b packet + */ +#define HWPERF_PACKET_V2B_SIG 0x48575042 + +/*! Signature ASCII pattern 'HWPC' found in the first word of a HWPerfV2c packet + */ +#define HWPERF_PACKET_V2C_SIG 0x48575043 + +#define HWPERF_PACKET_ISVALID(_val) (((_val) == HWPERF_PACKET_V2_SIG) || ((_val) == HWPERF_PACKET_V2A_SIG) || ((_val) == HWPERF_PACKET_V2B_SIG) || ((_val) == HWPERF_PACKET_V2C_SIG)) +/*!< Checks that the packet signature is one of the supported versions */ + +/*! Type defines the HWPerf packet header common to all events. */ +typedef struct +{ + IMG_UINT32 ui32Sig; /*!< Always the value HWPERF_PACKET_SIG */ + IMG_UINT32 ui32Size; /*!< Overall packet size in bytes */ + IMG_UINT32 eTypeId; /*!< Event type information field */ + IMG_UINT32 ui32Ordinal; /*!< Sequential number of the packet */ + IMG_UINT64 ui64Timestamp; /*!< Event timestamp */ +} RGX_HWPERF_V2_PACKET_HDR, *RGX_PHWPERF_V2_PACKET_HDR; + +RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_V2_PACKET_HDR, ui64Timestamp); + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_HDR); + + +/*! Mask for use with the IMG_UINT32 ui32Size header field */ +#define RGX_HWPERF_SIZE_MASK 0xFFFFU + +/*! This macro defines an upper limit to which the size of the largest variable + * length HWPerf packet must fall within, currently 3KB. This constant may be + * used to allocate a buffer to hold one packet. + * This upper limit is policed by packet producing code. + */ +#define RGX_HWPERF_MAX_PACKET_SIZE 0xC00U + +/*! Defines an upper limit to the size of a variable length packet payload. + */ +#define RGX_HWPERF_MAX_PAYLOAD_SIZE ((IMG_UINT32)(RGX_HWPERF_MAX_PACKET_SIZE-\ + sizeof(RGX_HWPERF_V2_PACKET_HDR))) + +/*! Macro which takes a structure name and provides the packet size for + * a fixed size payload packet, rounded up to 8 bytes to align packets + * for 64 bit architectures. */ +#define RGX_HWPERF_MAKE_SIZE_FIXED(_struct) ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN(sizeof(_struct), PVRSRVTL_PACKET_ALIGNMENT)))) + +/*! Macro which takes the number of bytes written in the data payload of a + * packet for a variable size payload packet, rounded up to 8 bytes to + * align packets for 64 bit architectures. */ +#define RGX_HWPERF_MAKE_SIZE_VARIABLE(_size) ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN((_size), PVRSRVTL_PACKET_ALIGNMENT)))) + +/*! Macro to obtain the size of the packet */ +#define RGX_HWPERF_GET_SIZE(_packet_addr) ((IMG_UINT16)(((_packet_addr)->ui32Size) & RGX_HWPERF_SIZE_MASK)) + +/*! Macro to obtain the size of the packet data */ +#define RGX_HWPERF_GET_DATA_SIZE(_packet_addr) (RGX_HWPERF_GET_SIZE(_packet_addr) - sizeof(RGX_HWPERF_V2_PACKET_HDR)) + +/*! Masks for use with the IMG_UINT32 eTypeId header field */ +#define RGX_HWPERF_TYPEID_MASK 0x0007FFFFU +#define RGX_HWPERF_TYPEID_EVENT_MASK 0x00007FFFU +#define RGX_HWPERF_TYPEID_THREAD_MASK 0x00008000U +#define RGX_HWPERF_TYPEID_STREAM_MASK 0x00070000U +#define RGX_HWPERF_TYPEID_META_DMA_MASK 0x00080000U +#define RGX_HWPERF_TYPEID_M_CORE_MASK 0x00100000U +#define RGX_HWPERF_TYPEID_OSID_MASK 0x07000000U + +/*! Meta thread macros for encoding the ID into the type field of a packet */ +#define RGX_HWPERF_META_THREAD_SHIFT 15U +#define RGX_HWPERF_META_THREAD_ID0 0x0U /*!< Meta Thread 0 ID */ +#define RGX_HWPERF_META_THREAD_ID1 0x1U /*!< Meta Thread 1 ID */ +/*! Obsolete, kept for source compatibility */ +#define RGX_HWPERF_META_THREAD_MASK 0x1U +/*! Stream ID macros for encoding the ID into the type field of a packet */ +#define RGX_HWPERF_STREAM_SHIFT 16U +/*! Meta DMA macro for encoding how the packet was generated into the type field of a packet */ +#define RGX_HWPERF_META_DMA_SHIFT 19U +/*! Bit-shift macro used for encoding multi-core data into the type field of a packet */ +#define RGX_HWPERF_M_CORE_SHIFT 20U +/*! OSID bit-shift macro used for encoding OSID into type field of a packet */ +#define RGX_HWPERF_OSID_SHIFT 24U +typedef enum { + RGX_HWPERF_STREAM_ID0_FW, /*!< Events from the Firmware/GPU */ + RGX_HWPERF_STREAM_ID1_HOST, /*!< Events from the Server host driver component */ + RGX_HWPERF_STREAM_ID2_CLIENT, /*!< Events from the Client host driver component */ + RGX_HWPERF_STREAM_ID_LAST, +} RGX_HWPERF_STREAM_ID; + +/* Checks if all stream IDs can fit under RGX_HWPERF_TYPEID_STREAM_MASK. */ +static_assert(((IMG_UINT32)RGX_HWPERF_STREAM_ID_LAST - 1U) < (RGX_HWPERF_TYPEID_STREAM_MASK >> RGX_HWPERF_STREAM_SHIFT), + "Too many HWPerf stream IDs."); + +/*! Compile-time value used to seed the Multi-Core (MC) bit in the typeID field. + * Only set by RGX_FIRMWARE builds. + */ +#if defined(RGX_FIRMWARE) +# if defined(RGX_FEATURE_GPU_MULTICORE_SUPPORT) +#define RGX_HWPERF_M_CORE_VALUE 1U /*!< 1 => Multi-core supported */ +# else +#define RGX_HWPERF_M_CORE_VALUE 0U /*!< 0 => Multi-core not supported */ +# endif +#else +#define RGX_HWPERF_M_CORE_VALUE 0U /*!< 0 => Multi-core not supported */ +#endif + +/*! Macros used to set the packet type and encode meta thread ID (0|1), + * HWPerf stream ID, multi-core capability and OSID within the typeID */ +#define RGX_HWPERF_MAKE_TYPEID(_stream, _type, _thread, _metadma, _osid)\ + ((IMG_UINT32) ((RGX_HWPERF_TYPEID_STREAM_MASK&((IMG_UINT32)(_stream) << RGX_HWPERF_STREAM_SHIFT)) | \ + (RGX_HWPERF_TYPEID_THREAD_MASK & ((IMG_UINT32)(_thread) << RGX_HWPERF_META_THREAD_SHIFT)) | \ + (RGX_HWPERF_TYPEID_EVENT_MASK & (IMG_UINT32)(_type)) | \ + (RGX_HWPERF_TYPEID_META_DMA_MASK & ((IMG_UINT32)(_metadma) << RGX_HWPERF_META_DMA_SHIFT)) | \ + (RGX_HWPERF_TYPEID_OSID_MASK & ((IMG_UINT32)(_osid) << RGX_HWPERF_OSID_SHIFT)) | \ + (RGX_HWPERF_TYPEID_M_CORE_MASK & ((IMG_UINT32)(RGX_HWPERF_M_CORE_VALUE) << RGX_HWPERF_M_CORE_SHIFT)))) + +/*! Obtains the event type that generated the packet */ +#define RGX_HWPERF_GET_TYPE(_packet_addr) (((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_EVENT_MASK) + +/*! Obtains the META Thread number that generated the packet */ +#define RGX_HWPERF_GET_THREAD_ID(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_THREAD_MASK) >> RGX_HWPERF_META_THREAD_SHIFT)) + +/*! Determines if the packet generated contains multi-core data */ +#define RGX_HWPERF_GET_M_CORE(_packet_addr) (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_M_CORE_MASK) >> RGX_HWPERF_M_CORE_SHIFT) + +/*! Obtains the guest OSID which resulted in packet generation */ +#define RGX_HWPERF_GET_OSID(_packet_addr) (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_OSID_MASK) >> RGX_HWPERF_OSID_SHIFT) + +/*! Obtain stream id */ +#define RGX_HWPERF_GET_STREAM_ID(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_STREAM_MASK) >> RGX_HWPERF_STREAM_SHIFT)) + +/*! Obtain information about how the packet was generated, which might affect payload total size */ +#define RGX_HWPERF_GET_META_DMA_INFO(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_META_DMA_MASK) >> RGX_HWPERF_META_DMA_SHIFT)) + +/*! Obtains a typed pointer to a packet given a buffer address */ +#define RGX_HWPERF_GET_PACKET(_buffer_addr) ((RGX_HWPERF_V2_PACKET_HDR *)(void *) (_buffer_addr)) +/*! Obtains a typed pointer to a data structure given a packet address */ +#define RGX_HWPERF_GET_PACKET_DATA_BYTES(_packet_addr) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR))) +/*! Obtains a typed pointer to the next packet given a packet address */ +#define RGX_HWPERF_GET_NEXT_PACKET(_packet_addr) ((RGX_HWPERF_V2_PACKET_HDR *) (IMG_OFFSET_ADDR((_packet_addr), RGX_HWPERF_SIZE_MASK&((_packet_addr)->ui32Size)))) + +/*! Obtains a typed pointer to a packet header given the packet data address */ +#define RGX_HWPERF_GET_PACKET_HEADER(_packet_addr) ((RGX_HWPERF_V2_PACKET_HDR *) (IMG_OFFSET_ADDR((_packet_addr), -(IMG_INT32)sizeof(RGX_HWPERF_V2_PACKET_HDR)))) + + +/****************************************************************************** + * Other Common Defines + *****************************************************************************/ + +/*! This macro is not a real array size, but indicates the array has a variable + * length only known at run-time but always contains at least 1 element. The + * final size of the array is deduced from the size field of a packet header. + */ +#define RGX_HWPERF_ONE_OR_MORE_ELEMENTS 1U + +/*! This macro is not a real array size, but indicates the array is optional + * and if present has a variable length only known at run-time. The final + * size of the array is deduced from the size field of a packet header. */ +#define RGX_HWPERF_ZERO_OR_MORE_ELEMENTS 1U + + +/*! Masks for use with the IMG_UINT32 ui32BlkInfo field */ +#define RGX_HWPERF_BLKINFO_BLKCOUNT_MASK 0xFFFF0000U +#define RGX_HWPERF_BLKINFO_BLKOFFSET_MASK 0x0000FFFFU + +/*! Shift for the NumBlocks and counter block offset field in ui32BlkInfo */ +#define RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT 16U +#define RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT 0U + +/*! Macro used to set the block info word as a combination of two 16-bit integers */ +#define RGX_HWPERF_MAKE_BLKINFO(_numblks, _blkoffset) ((IMG_UINT32) ((RGX_HWPERF_BLKINFO_BLKCOUNT_MASK&((_numblks) << RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)) | (RGX_HWPERF_BLKINFO_BLKOFFSET_MASK&((_blkoffset) << RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT)))) + +/*! Macro used to obtain the number of counter blocks present in the packet */ +#define RGX_HWPERF_GET_BLKCOUNT(_blkinfo) (((_blkinfo) & RGX_HWPERF_BLKINFO_BLKCOUNT_MASK) >> RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT) + +/*! Obtains the offset of the counter block stream in the packet */ +#define RGX_HWPERF_GET_BLKOFFSET(_blkinfo) (((_blkinfo) & RGX_HWPERF_BLKINFO_BLKOFFSET_MASK) >> RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT) + +/*! This macro gets the number of blocks depending on the packet version */ +#define RGX_HWPERF_GET_NUMBLKS(_sig, _packet_data, _numblocks) \ + do { \ + if (HWPERF_PACKET_V2B_SIG == _sig || HWPERF_PACKET_V2C_SIG == _sig) \ + { \ + (_numblocks) = RGX_HWPERF_GET_BLKCOUNT((_packet_data)->ui32BlkInfo);\ + } \ + else \ + { \ + IMG_UINT32 ui32VersionOffset = (((_sig) == HWPERF_PACKET_V2_SIG) ? 1 : 3);\ + (_numblocks) = *(IMG_UINT16 *)(IMG_OFFSET_ADDR(&(_packet_data)->ui32WorkTarget, ui32VersionOffset)); \ + } \ + } while (0) + +/*! This macro gets the counter stream pointer depending on the packet version */ +#define RGX_HWPERF_GET_CNTSTRM(_sig, _hw_packet_data, _cntstream_ptr) \ +{ \ + if (HWPERF_PACKET_V2B_SIG == _sig || HWPERF_PACKET_V2C_SIG == _sig) \ + { \ + (_cntstream_ptr) = (IMG_UINT32 *)(IMG_OFFSET_ADDR((_hw_packet_data), RGX_HWPERF_GET_BLKOFFSET((_hw_packet_data)->ui32BlkInfo))); \ + } \ + else \ + { \ + IMG_UINT32 ui32BlkStreamOffsetInWords = ((_sig == HWPERF_PACKET_V2_SIG) ? 6 : 8); \ + (_cntstream_ptr) = (IMG_UINT32 *)(IMG_OFFSET_ADDR((_hw_packet_data), ui32BlkStreamOffsetInWords)); \ + } \ +} + +/*! This is the maximum number of frame contexts that are supported in the + * driver at the moment */ +#define RGX_HWPERF_HW_MAX_WORK_CONTEXT 2 + +/*! Masks for use with the RGX_HWPERF_UFO_EV eEvType field */ +#define RGX_HWPERF_UFO_STREAMSIZE_MASK 0xFFFF0000U +#define RGX_HWPERF_UFO_STREAMOFFSET_MASK 0x0000FFFFU + +/*! Shift for the UFO count and data stream fields */ +#define RGX_HWPERF_UFO_STREAMSIZE_SHIFT 16U +#define RGX_HWPERF_UFO_STREAMOFFSET_SHIFT 0U + +/*! Macro used to set UFO stream info word as a combination of two 16-bit integers */ +#define RGX_HWPERF_MAKE_UFOPKTINFO(_ssize, _soff) \ + ((IMG_UINT32) ((RGX_HWPERF_UFO_STREAMSIZE_MASK&((_ssize) << RGX_HWPERF_UFO_STREAMSIZE_SHIFT)) | \ + (RGX_HWPERF_UFO_STREAMOFFSET_MASK&((_soff) << RGX_HWPERF_UFO_STREAMOFFSET_SHIFT)))) + +/*! Macro used to obtain UFO count*/ +#define RGX_HWPERF_GET_UFO_STREAMSIZE(_streaminfo) \ + (((_streaminfo) & RGX_HWPERF_UFO_STREAMSIZE_MASK) >> RGX_HWPERF_UFO_STREAMSIZE_SHIFT) + +/*! Obtains the offset of the UFO stream in the packet */ +#define RGX_HWPERF_GET_UFO_STREAMOFFSET(_streaminfo) \ + (((_streaminfo) & RGX_HWPERF_UFO_STREAMOFFSET_MASK) >> RGX_HWPERF_UFO_STREAMOFFSET_SHIFT) + + + +/****************************************************************************** + * Data Stream Common Types + *****************************************************************************/ + +/*! All the Data Masters HWPerf is aware of. When a new DM is added to this + * list, it should be appended at the end to maintain backward compatibility + * of HWPerf data. + */ +typedef enum { + + RGX_HWPERF_DM_GP, + RGX_HWPERF_DM_TDM, + RGX_HWPERF_DM_GEOM, + RGX_HWPERF_DM_3D, + RGX_HWPERF_DM_CDM, + RGX_HWPERF_DM_RTU, + + RGX_HWPERF_DM_LAST, + + RGX_HWPERF_DM_INVALID = 0x1FFFFFFF +} RGX_HWPERF_DM; + +/*! Enum containing bit position for 32bit feature flags used in hwperf and api */ +typedef enum { + RGX_HWPERF_FEATURE_PERFBUS_FLAG = 0x001, + RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG = 0x002, + RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG = 0x004, + RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG = 0x008, + RGX_HWPERF_FEATURE_ROGUEXE_FLAG = 0x010, + RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG = 0x020, + RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG = 0x040, + RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION = 0x080, + RGX_HWPERF_FEATURE_MULTICORE_FLAG = 0x100, + RGX_HWPERF_FEATURE_RAYTRACING_FLAG = 0x200, + RGX_HWPERF_FEATURE_CXT_TOP_INFRASTRUCTURE_FLAG = 0x400 +} RGX_HWPERF_FEATURE_FLAGS; + +/*! This structure holds the data of a firmware packet. */ +typedef struct +{ + RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ + IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ + IMG_UINT32 ui32FWPerfCount0; /*!< Meta/MIPS PERF_COUNT0 register */ + IMG_UINT32 ui32FWPerfCount1; /*!< Meta/MIPS PERF_COUNT1 register */ + IMG_UINT32 ui32TimeCorrIndex; /*!< Internal field */ + IMG_UINT32 ui32Padding; /*!< Reserved */ +} RGX_HWPERF_FW_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FW_DATA); + +/*! This structure holds the data of a hardware packet, including counters. */ +typedef struct +{ + IMG_UINT32 ui32DMCyc; /*!< DataMaster cycle count register, 0 if none */ + IMG_UINT32 ui32FrameNum; /*!< Frame number, undefined on some DataMasters */ + IMG_UINT32 ui32PID; /*!< Process identifier */ + IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ + IMG_UINT32 ui32WorkTarget; /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */ + IMG_UINT32 ui32ExtJobRef; /*!< Client driver context job reference used for tracking/debugging */ + IMG_UINT32 ui32IntJobRef; /*!< RGX Data master context job reference used for tracking/debugging */ + IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */ + IMG_UINT32 ui32BlkInfo; /*!< <31..16> NumBlocks <15..0> Counter block stream offset */ + IMG_UINT32 ui32WorkCtx; /*!< Work context: Render Context for TA/3D; RayTracing Context for RTU/SHG; 0x0 otherwise */ + IMG_UINT32 ui32CtxPriority; /*!< Context priority */ + IMG_UINT32 ui32GPUIdMask; /*!< GPU IDs active within this event */ + IMG_UINT32 aui32CountBlksStream[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; /*!< Counter data */ + IMG_UINT32 ui32Padding2; /*!< Reserved. To ensure correct alignment */ +} RGX_HWPERF_HW_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA); + +/*! Mask for use with the aui32CountBlksStream field when decoding the + * counter block ID and mask word. */ +#define RGX_HWPERF_CNTBLK_ID_MASK 0xFFFF0000U +#define RGX_HWPERF_CNTBLK_ID_SHIFT 16U + +/*! MAX value used in server handling of counter config arrays */ +#if defined(SUPPORT_VALIDATION) +#define RGX_CNTBLK_COUNTERS_MAX 64 +#else +#define RGX_CNTBLK_COUNTERS_MAX 12 +#endif + + +/*! Obtains the counter block ID word from an aui32CountBlksStream field. + * The word combines Control bits (15-12), GPU-Id (11-8), Group (7-4), Unit + * within group (3-0) */ +#define RGX_HWPERF_GET_CNTBLK_IDW(_word) ((IMG_UINT16)(((_word)&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT)) + +/*! Obtains the counter block ID from the supplied RGX_HWPERF_HW_DATA address + * and stream index. May be used in decoding the counter block stream words of + * a RGX_HWPERF_HW_DATA structure. */ +#define RGX_HWPERF_GET_CNTBLK_ID(_data_addr, _idx) RGX_HWPERF_GET_CNTBLK_IDW((_data_addr)->aui32CountBlksStream[(_idx)]) + +/*! Obtains the GPU ID from the supplied RGX_HWPERF_HW_DATA CNTBLK_IDW */ +#define RGX_HWPERF_GET_CNTBLK_GPUW(_word) ((IMG_UINT16)(((_word)&RGX_CNTBLK_ID_MC_GPU_MASK)>>RGX_CNTBLK_ID_MC_GPU_SHIFT)) + +#define RGX_HWPERF_GET_CNT_MASKW(_word) ((IMG_UINT16)((_word)&(~RGX_HWPERF_CNTBLK_ID_MASK))) + +/*! Obtains the counter mask from the supplied RGX_HWPERF_HW_DATA address + * and stream index. May be used in decoding the counter block stream words + * of a RGX_HWPERF_HW_DATA structure. */ +#define RGX_HWPERF_GET_CNT_MASK(_data_addr, _idx) RGX_HWPERF_GET_CNT_MASKW((_data_addr)->aui32CountBlksStream[(_idx)]) + +/*! Context switch packet event */ +typedef struct +{ + RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ + IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ + IMG_UINT32 ui32FrameNum; /*!< Client Frame number (TA, 3D only) */ + IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ + IMG_UINT32 ui32PerfCycle; /*!< Cycle count. Used to measure HW context store latency */ + IMG_UINT32 ui32PerfPhase; /*!< Phase. Used to determine geometry content */ + IMG_UINT32 ui32Padding[2]; /*!< Padding to 8 DWords */ +} RGX_HWPERF_CSW_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CSW_DATA); + +/*! Enumeration of clocks supporting this event */ +typedef enum +{ + RGX_HWPERF_CLKS_CHG_INVALID = 0, + + RGX_HWPERF_CLKS_CHG_NAME_CORE = 1, + + RGX_HWPERF_CLKS_CHG_LAST, +} RGX_HWPERF_CLKS_CHG_NAME; + +/*! This structure holds the data of a clocks change packet. */ +typedef struct +{ + IMG_UINT64 ui64NewClockSpeed; /*!< New Clock Speed (in Hz) */ + RGX_HWPERF_CLKS_CHG_NAME eClockName; /*!< Clock name */ + IMG_UINT32 ui32CalibratedClockSpeed; /*!< Calibrated new GPU clock speed (in Hz) */ + IMG_UINT64 ui64OSTimeStamp; /*!< OSTimeStamp sampled by the host */ + IMG_UINT64 ui64CRTimeStamp; /*!< CRTimeStamp sampled by the host and + correlated to OSTimeStamp */ +} RGX_HWPERF_CLKS_CHG_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CLKS_CHG_DATA); + +/*! Enumeration of GPU utilisation states supported by this event */ +typedef IMG_UINT32 RGX_HWPERF_GPU_STATE; + +/*! This structure holds the data of a GPU utilisation state change packet. */ +typedef struct +{ + RGX_HWPERF_GPU_STATE eState; /*!< New GPU utilisation state */ + IMG_UINT32 uiUnused1; /*!< Padding */ + IMG_UINT32 uiUnused2; /*!< Padding */ + IMG_UINT32 uiUnused3; /*!< Padding */ +} RGX_HWPERF_GPU_STATE_CHG_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_GPU_STATE_CHG_DATA); + + +/*! Signature pattern 'HPE1' found in the first word of a PWR_EST packet data */ +#define HWPERF_PWR_EST_V1_SIG 0x48504531 + +/*! Macros to obtain a component field from a counter ID word */ +#define RGX_HWPERF_GET_PWR_EST_HIGH_FLAG(_word) (((_word)&0x80000000)>>31) +#define RGX_HWPERF_GET_PWR_EST_GPUID(_word) (((_word)&0x70000000)>>28) +/*!< Obtains the GPU ID from a counter ID word */ +#define RGX_HWPERF_GET_PWR_EST_UNIT(_word) (((_word)&0x0F000000)>>24) +#define RGX_HWPERF_GET_PWR_EST_NUMBER(_word) ((_word)&0x0000FFFF) + +#define RGX_HWPERF_PWR_EST_HIGH_OFFSET (31) +#define RGX_HWPERF_PWR_EST_GPUID_OFFSET (28) +#define RGX_HWPERF_PWR_EST_GPUID_MASK (0x7U) +#define RGX_HWPERF_PWR_EST_UNIT_OFFSET (24) +#define RGX_HWPERF_PWR_EST_UNIT_MASK (0xFU) +#define RGX_HWPERF_PWR_EST_VALUE_MASK (0xFFFFU) + +/*! This macro constructs a counter ID for a power estimate data stream from + * the component parts of: high word flag, unit id, GPU id, counter number */ +#define RGX_HWPERF_MAKE_PWR_EST_COUNTERID(_high, _unit, _core, _number) \ + ((IMG_UINT32)(((IMG_UINT32)((IMG_UINT32)(_high)&0x1U)<= RGX_BVNC_STR_SIZE_MAX), + "Space inside HWPerf packet data for BVNC string insufficient"); + +#define RGX_HWPERF_MAX_BVNC_BLOCK_LEN (20U) + +/*! BVNC Features */ +typedef struct +{ + /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ + IMG_UINT16 ui16BlockID; + + /*! Number of counters in this block type */ + IMG_UINT16 ui16NumCounters; + + /*! Number of blocks of this type */ + IMG_UINT16 ui16NumBlocks; + + /*! Reserved for future use */ + IMG_UINT16 ui16Reserved; +} RGX_HWPERF_BVNC_BLOCK; + +/*! BVNC Features */ +typedef struct +{ + IMG_CHAR aszBvncString[RGX_HWPERF_MAX_BVNC_LEN]; /*!< BVNC string */ + IMG_UINT32 ui32BvncKmFeatureFlags; /*!< See RGX_HWPERF_FEATURE_FLAGS */ + IMG_UINT16 ui16BvncBlocks; /*!< Number of blocks described in aBvncBlocks */ + IMG_UINT16 ui16BvncGPUCores; /*!< Number of GPU cores present */ + RGX_HWPERF_BVNC_BLOCK aBvncBlocks[RGX_HWPERF_MAX_BVNC_BLOCK_LEN]; /*!< Supported Performance Blocks for BVNC. See RGX_HWPERF_BVNC_BLOCK */ +} RGX_HWPERF_BVNC; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_BVNC); + +/*! Performance Counter Configuration data element. */ +typedef struct +{ + IMG_UINT32 ui32BlockID; /*!< Counter Block ID. See RGX_HWPERF_CNTBLK_ID */ + IMG_UINT32 ui32NumCounters; /*!< Number of counters configured */ + IMG_UINT32 ui32CounterVals[RGX_CNTBLK_COUNTERS_MAX]; /*!< Counters configured (ui32NumCounters worth of entries) */ +} RGX_HWPERF_COUNTER_CFG_DATA_EL; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG_DATA_EL); + +/*! Performance Counter Configuration data. */ +typedef struct +{ + IMG_UINT32 ui32EnabledBlocks; /*!< Number of Enabled Blocks. */ + RGX_HWPERF_COUNTER_CFG_DATA_EL uData; /*!< Start of variable length data. See RGX_HWPERF_COUNTER_CFG_DATA_EL */ + IMG_UINT32 ui32Padding; /*!< reserved */ +} RGX_HWPERF_COUNTER_CFG; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG); + +/*! Sub-event's data. */ +typedef union +{ + struct + { + RGX_HWPERF_DM eDM; /*!< Data Master ID. */ + RGX_HWPERF_HWR_REASON eReason; /*!< Reason of the HWR. */ + IMG_UINT32 ui32DMContext; /*!< FW render context */ + } sHWR; /*!< HWR sub-event data. */ + + RGX_HWPERF_BVNC sBVNC; /*!< BVNC Features. See RGX_HWPERF_BVNC */ + struct + { + IMG_UINT32 ui32EvMaskLo; /*!< Low order 32 bits of Filter Mask */ + IMG_UINT32 ui32EvMaskHi; /*!< High order 32 bits of Filter Mask */ + } sEvMsk; /*!< HW Filter Mask */ + RGX_HWPERF_COUNTER_CFG sPCC; /*!< Performance Counter Config. See RGX_HWPERF_COUNTER_CFG */ +} RGX_HWPERF_FWACT_DETAIL; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DETAIL); + +/*! This structure holds the data of a FW activity event packet */ +typedef struct +{ + RGX_HWPERF_FWACT_EV eEvType; /*!< Event type. */ + RGX_HWPERF_FWACT_DETAIL uFwActDetail; /*!< Data of the sub-event. */ + IMG_UINT32 ui32Padding; /*!< Reserved. */ +} RGX_HWPERF_FWACT_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DATA); + + +typedef enum { + RGX_HWPERF_UFO_EV_UPDATE, /*!< Update on the UFO objects. */ + RGX_HWPERF_UFO_EV_CHECK_SUCCESS, /*!< Successful check on UFO objects. */ + RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS, /*!< Successful partial render check on UFO objects. */ + RGX_HWPERF_UFO_EV_CHECK_FAIL, /*!< Unsuccessful check on UFO objects. */ + RGX_HWPERF_UFO_EV_PRCHECK_FAIL, /*!< Unsuccessful partial render check on UFO objects. */ + RGX_HWPERF_UFO_EV_FORCE_UPDATE, /*!< Forced erroring of the UFO objects. */ + + RGX_HWPERF_UFO_EV_LAST /*!< Reserved. Do not use. */ +} RGX_HWPERF_UFO_EV; + +/*! Data stream tuple. */ +typedef union +{ + struct + { + IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ + IMG_UINT32 ui32Value; /*!< Value of the UFO object */ + } sCheckSuccess; + struct + { + IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ + IMG_UINT32 ui32Value; /*!< Value of the UFO object */ + IMG_UINT32 ui32Required; /*!< Value of the UFO object required by the fence */ + } sCheckFail; + struct + { + IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ + IMG_UINT32 ui32OldValue; /*!< Value of UFO object before update */ + IMG_UINT32 ui32NewValue; /*!< Value of UFO object after update */ + } sUpdate; +} RGX_HWPERF_UFO_DATA_ELEMENT; + +/*! This structure holds the packet payload data for UFO event. */ +typedef struct +{ + RGX_HWPERF_UFO_EV eEvType; /*!< Subtype of the event. See RGX_HWPERF_UFO_EV */ + IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the timer correlation data + at the time the packet was generated. + Used to approximate Host timestamps for + these events. */ + IMG_UINT32 ui32PID; /*!< Client process identifier */ + IMG_UINT32 ui32ExtJobRef; /*!< Reference used by callers of the RGX + API to track submitted work (for + debugging/trace purposes) */ + IMG_UINT32 ui32IntJobRef; /*!< Internal reference used to track + submitted work (for debugging / trace + purposes) */ + IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ + IMG_UINT32 ui32StreamInfo; /*!< Encoded number of elements in the + stream and stream data offset in the + payload */ + RGX_HWPERF_DM eDM; /*!< Data Master number, see RGX_HWPERF_DM */ + IMG_UINT32 ui32Padding; /*!< Unused, reserved */ + IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Series of tuples holding UFO objects data */ +} RGX_HWPERF_UFO_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_UFO_DATA); + + +/*! + * RGX_HWPERF_KICK_TYPE describes the type of kick for events received / sent + * between KICK_START / KICK_END inclusively for all event types. + */ +typedef enum +{ + RGX_HWPERF_KICK_TYPE_TA3D, /*!< TA 3D Kick */ + RGX_HWPERF_KICK_TYPE_CDM, /*!< Compute Data Master Kick */ + RGX_HWPERF_KICK_TYPE_RS, /*!< Ray Store Kick */ + RGX_HWPERF_KICK_TYPE_SHG, /*!< Scene Hierarchy Generator Kick */ + RGX_HWPERF_KICK_TYPE_TQTDM, /*!< TQ 2D Data Master Kick */ + RGX_HWPERF_KICK_TYPE_SYNC, /*!< Sync Kick */ + RGX_HWPERF_KICK_TYPE_LAST, + + RGX_HWPERF_KICK_TYPE_FORCE_32BIT = 0x7fffffff +} RGX_HWPERF_KICK_TYPE; + +typedef struct +{ + RGX_HWPERF_KICK_TYPE ui32EnqType; /*!< Workload type sent to FW for + scheduling on GPU hardware. + See RGX_HWPERF_KICK_TYPE */ + IMG_UINT32 ui32PID; /*!< Client process identifier */ + IMG_UINT32 ui32ExtJobRef; /*!< Reference used by callers of the RGX API + to track submitted work (for debugging / + trace purposes) */ + IMG_UINT32 ui32IntJobRef; /*!< internal reference used to track submitted + work (for debugging / trace purposes) */ + IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ + IMG_UINT32 ui32Padding; /*!< Unused, reserved */ + IMG_UINT64 ui64CheckFence_UID; /*!< ID of fence gating work execution on GPU */ + IMG_UINT64 ui64UpdateFence_UID; /*!< ID of fence triggered after work completes on GPU */ + IMG_UINT64 ui64DeadlineInus; /*!< Workload deadline in system monotonic time */ + IMG_UINT64 ui64CycleEstimate; /*!< Estimated cycle time for the workload */ + PVRSRV_FENCE hCheckFence; /*!< Fence this enqueue task waits for, before starting */ + PVRSRV_FENCE hUpdateFence; /*!< Fence this enqueue task signals, on completion */ + PVRSRV_TIMELINE hUpdateTimeline; /*!< Timeline on which the above hUpdateFence is created */ + + IMG_UINT32 ui32Pad; /* Align structure size to 8 bytes */ +} RGX_HWPERF_HOST_ENQ_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_ENQ_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_ENQ_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef struct +{ + RGX_HWPERF_UFO_EV eEvType; /*!< Subtype of the event */ + IMG_UINT32 ui32StreamInfo; /*!< Encoded number of elements in the stream and + stream data offset in the payload */ +#ifdef __CHECKER__ + /* Since we're not conforming to the C99 standard by not using a flexible + * array member need to add a special case for Smatch static code analyser. */ + IMG_UINT32 aui32StreamData[]; +#else + IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; + /*!< Series of tuples holding UFO objects data */ + + IMG_UINT32 ui32Padding; /*!< Reserved, align structure size to 8 bytes */ +#endif +} RGX_HWPERF_HOST_UFO_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_UFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_UFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +/*! + * RGX_HWPERF_HOST_RESOURCE_TYPE describes the type of resource which has been + * Allocated, Freed or Modified. The values are used to determine which event + * data structure to use to decode the data from the event stream + */ +typedef enum +{ + RGX_HWPERF_HOST_RESOURCE_TYPE_INVALID, /*!< Invalid */ + RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC, /*!< SyncPrim */ + RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE_DEPRECATED, + /*!< Timeline resource packets are + now emitted in client hwperf buffer */ + RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, /*!< Fence for use on GPU (SYNC_CP backed) */ + RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, /*!< Sync Checkpoint */ + RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, /*!< Fence created on SW timeline */ + + RGX_HWPERF_HOST_RESOURCE_TYPE_LAST /*!< End of enumeration */ +} RGX_HWPERF_HOST_RESOURCE_TYPE; + +typedef union +{ + /*! Data for TYPE_TIMELINE (*Deprecated*). This sub-event is no longer + * generated in the HOST stream. Timeline data is now provided in the + * CLIENT stream instead. + */ + struct + { + IMG_UINT32 uiPid; /*!< Identifier of owning process */ + IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for timeline resource */ + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + /*!< Label or name given to the sync resource */ + IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ + } sTimelineAlloc; + + /*! Data for TYPE_FENCE_PVR */ + struct + { + IMG_PID uiPID; /*!< Identifier of owning process */ + PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ + IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier of the check point + backing this fence on the GPU */ + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + /*!< Label or name given to the sync resource */ + } sFenceAlloc; + + /*! Data for TYPE_SYNC_CP */ + struct + { + IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */ + PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */ + IMG_PID uiPID; /*!< Identifier of owning process */ + PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + /*!< Label or name given to the sync resource */ + } sSyncCheckPointAlloc; + + /*! Data for TYPE_FENCE_SW */ + struct + { + IMG_PID uiPID; /*!< Identifier of owning process */ + PVRSRV_FENCE hSWFence; /*!< Unique identifier for the SWFence resource */ + PVRSRV_TIMELINE hSWTimeline; /*!< Unique identifier for the timeline resource */ + IMG_UINT64 ui64SyncPtIndex; /*!< Sync-pt index where this SW timeline has reached */ + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + /*!< Label or name given to the sync resource */ + } sSWFenceAlloc; + + /*! Data for TYPE_SYNC */ + struct + { + IMG_UINT32 ui32FWAddr; /*!< Identifier of sync resource */ + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + /*!< Label or name given to the sync resource */ + } sSyncAlloc; +} RGX_HWPERF_HOST_ALLOC_DETAIL; + +typedef struct +{ + RGX_HWPERF_HOST_RESOURCE_TYPE ui32AllocType; + /*!< This describes the type of the resource + allocated in the driver. See + RGX_HWPERF_HOST_RESOURCE_TYPE */ + RGX_HWPERF_HOST_ALLOC_DETAIL RGXFW_ALIGN uAllocDetail; + /*!< Union of structures providing further + data regarding the resource allocated. + Size of data varies with union member that + is present, check ``ui32AllocType`` value + to decode */ +} RGX_HWPERF_HOST_ALLOC_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_ALLOC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_ALLOC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef union +{ + /*! Data for TYPE_TIMELINE (*Deprecated*) */ + struct + { + IMG_UINT32 uiPid; /*!< Identifier of owning process */ + IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for the timeline resource */ + IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ + } sTimelineDestroy; + + /*! Data for TYPE_FENCE_PVR */ + struct + { + IMG_UINT64 ui64Fence_UID; /*!< Unique identifier for the fence resource */ + IMG_UINT32 ui32Padding; /*!< Reserved. */ + } sFenceDestroy; + + /*! Data for TYPE_SYNC_CP */ + struct + { + IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */ + } sSyncCheckPointFree; + + /*! Data for TYPE_SYNC */ + struct + { + IMG_UINT32 ui32FWAddr; /*!< Unique identifier for the sync resource */ + } sSyncFree; +} RGX_HWPERF_HOST_FREE_DETAIL; + +typedef struct +{ + RGX_HWPERF_HOST_RESOURCE_TYPE ui32FreeType; + /*!< This describes the type of the resource + freed or released by the driver. See + RGX_HWPERF_HOST_RESOURCE_TYPE */ + RGX_HWPERF_HOST_FREE_DETAIL uFreeDetail; + /*!< Union of structures providing further data + regarding the resource freed. Size of data + varies with union member that is present, + check ``ui32FreeType`` value to decode */ + IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ +} RGX_HWPERF_HOST_FREE_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_FREE_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_FREE_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef struct +{ + IMG_UINT64 ui64CRTimestamp; /*!< CR timer value from the latest entry of + the time domains correlation table */ + IMG_UINT64 ui64OSTimestamp; /*!< OS timestamp from the latest entry of the + time domains correlation table */ + IMG_UINT32 ui32ClockSpeed; /*!< GPU clock speed from the latest entry of + the time domains correlation table */ + IMG_UINT32 ui32Padding; /*!< Reserved, align structure size to 8 bytes */ +} RGX_HWPERF_HOST_CLK_SYNC_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef union +{ + /*! Data for TYPE_FENCE_PVR */ + struct + { + IMG_UINT64 ui64NewFence_UID; /*!< Unique identifier for the new merged fence + resource that has been created */ + IMG_UINT64 ui64InFence1_UID; /*!< Unique identifier for the fence resource */ + IMG_UINT64 ui64InFence2_UID; /*!< Unique identifier of the check point backing + the fence on the GPU */ + IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; + /*!< Label or name given to the sync resource */ + IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ + } sFenceMerge; +} RGX_HWPERF_HOST_MODIFY_DETAIL; + +typedef struct +{ + RGX_HWPERF_HOST_RESOURCE_TYPE ui32ModifyType; + /*!< Describes the type of the resource + modified by the driver. See + RGX_HWPERF_HOST_RESOURCE_TYPE */ + + RGX_HWPERF_HOST_MODIFY_DETAIL uModifyDetail; + /*!< Union of structures providing further + data regarding the resource modified. + Size of data varies with union member that + is present. + Check ``uiModifyType`` value to decode */ +} RGX_HWPERF_HOST_MODIFY_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_MODIFY_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_MODIFY_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef enum +{ + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED = 0, /*!< Invalid */ + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK, /*!< Device OK */ + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_RESPONDING, /*!< Device responding to requests */ + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD, /*!< Device not responding */ + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT, /*!< Device has faulted */ + + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_LAST +} RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS; + +typedef enum +{ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED = 0, /*!< Invalid */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE, /*!< No underlying health reason. */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED, /*!< Device has asserted. */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING, /*!< Device poll has failed. */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS, /*!< Device timeout has fired. */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT, /*!< Queue has become corrupt. */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED, /*!< Queue has stalled. */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING, /*!< Device is idling. */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING, /*!< Device restarting. */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS,/*!< Interrupts have been discarded. */ + + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_LAST +} RGX_HWPERF_HOST_DEVICE_HEALTH_REASON; + +/*! RGX_HWPERF_DEV_INFO_EV values */ +typedef enum +{ + RGX_HWPERF_DEV_INFO_EV_HEALTH, /*!< Health sub-event */ + + RGX_HWPERF_DEV_INFO_EV_LAST /*!< Last enumeration value */ +} RGX_HWPERF_DEV_INFO_EV; + +/*! RGX_HWPERF_HOST_DEV_INFO_DETAIL is a union of structures providing + * further data regarding the device's status + */ +typedef union +{ + /*! Data for device status event */ + struct + { + RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS eDeviceHealthStatus; + /*!< Device's health status */ + RGX_HWPERF_HOST_DEVICE_HEALTH_REASON eDeviceHealthReason; + /*!< Reason for device's health status */ + } sDeviceStatus; +} RGX_HWPERF_HOST_DEV_INFO_DETAIL; + +/*! RGX_HWPERF_HOST_DEV_INFO_DATA contains device health status information */ +typedef struct +{ + IMG_UINT32 ui32Padding; + /*!< Reserved. Align structure size to 8 bytes */ + RGX_HWPERF_DEV_INFO_EV eEvType; + /*!< Type of the sub-event. See + RGX_HWPERF_DEV_INFO_EV */ + RGX_HWPERF_HOST_DEV_INFO_DETAIL uDevInfoDetail; + /*!< Union of structures providing further data + regarding the device's status. Size of data + varies with union member that is present, + check ``eEvType`` value to decode */ +} RGX_HWPERF_HOST_DEV_INFO_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +/*! RGX_HWPERF_INFO_EV event subtype for RGX_HWPERF_HOST_INFO_DATA events */ +typedef enum +{ + RGX_HWPERF_INFO_EV_MEM_USAGE, /*!< Memory usage event */ + RGX_HWPERF_INFO_EV_LAST /*!< End of enumeration */ +} RGX_HWPERF_INFO_EV; + +/*! RGX_HWPERF_HOST_INFO_DETAIL contains the data payload for the + * RGX_HWPERF_HOST_INFO_DATA event. + */ +typedef union +{ + /*! Host Memory usage statistics */ + struct + { + IMG_UINT32 ui32TotalMemoryUsage; /*!< Total memory usage */ + /*! Detailed memory usage */ + struct + { + IMG_UINT32 ui32Pid; /*!< Process ID */ + IMG_UINT32 ui32KernelMemUsage; /*!< Kernel memory usage */ + IMG_UINT32 ui32GraphicsMemUsage; /*!< GPU memory usage */ + } sPerProcessUsage[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; + } sMemUsageStats; +} RGX_HWPERF_HOST_INFO_DETAIL; + +/*! RGX_HWPERF_HOST_INFO_DATA. Host Info data event payload contains device + * memory usage information. + */ +typedef struct +{ + IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ + RGX_HWPERF_INFO_EV eEvType; /*!< Type of subevent. See RGX_HWPERF_INFO_EV */ + RGX_HWPERF_HOST_INFO_DETAIL uInfoDetail; + /*!< Union of structures providing further data + regarding memory usage. Size varies with union + member that is present, check ``eEvType`` + value to decode */ +} RGX_HWPERF_HOST_INFO_DATA; + +/* Payload size must be multiple of 8 bytes to align start of next packet. */ +static_assert((sizeof(RGX_HWPERF_HOST_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +/*! FENCE_WAIT_TYPE definitions */ +typedef enum +{ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN = 0, /*!< Begin */ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END, /*!< End */ + + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_LAST, /*!< Do not use */ +} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE; + +/*! FENCE_WAIT_RESULT definitions */ +typedef enum +{ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_INVALID = 0, /*!< Invalid */ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT, /*!< Timed Out */ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED, /*!< Passed */ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR, /*!< Errored */ + + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_LAST, /*!< Do not use */ +} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT; + +/*! FENCE_WAIT_DETAIL Event Payload */ +typedef union +{ +/*! Data for SYNC_FENCE_WAIT_TYPE_BEGIN */ + struct + { + IMG_UINT32 ui32TimeoutInMs; /*!< Wait timeout (ms) */ + } sBegin; + + /*! Data for SYNC_FENCE_WAIT_TYPE_END */ + struct + { + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT eResult; /*!< Wait result */ + } sEnd; +} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL; + +/*! RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA Event Payload. This data structure + * is received whenever the host driver handles a wait for sync event request. + */ +typedef struct +{ + IMG_PID uiPID; /*!< Identifier of the owning process */ + PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType; + /*!< Type of the subevent, see + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE */ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL uDetail; + /*!< Union of structures providing further data + regarding device's status. Size of data varies with + union member that is present, check ``eType`` value + to decode */ + +} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA; + +static_assert((sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +/*! RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA. + * Software Timeline Advanced Event Payload. This data structure is received + * whenever the host driver processes a Software Timeline Advanced event. + */ +typedef struct +{ + IMG_PID uiPID; /*!< Identifier of the owning process */ + PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */ + IMG_UINT64 ui64SyncPtIndex; /*!< Index of the sync point to which the + timeline has advanced */ + +} RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA; + +static_assert((sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, + "sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); + +typedef enum +{ + RGX_HWPERF_RESOURCE_CAPTURE_TYPE_NONE, + RGX_HWPERF_RESOURCE_CAPTURE_TYPE_DEFAULT_FRAMEBUFFER, + RGX_HWPERF_RESOURCE_CAPTURE_TYPE_OFFSCREEN_FB_ATTACHMENTS, + RGX_HWPERF_RESOURCE_CAPTURE_TYPE_TILE_LIFETIME_DATA, + + RGX_HWPERF_RESOURCE_TYPE_COUNT +} RGX_HWPERF_RESOURCE_CAPTURE_TYPE; + +typedef struct +{ + IMG_UINT32 ui32Height; + IMG_UINT32 ui32Width; + IMG_UINT32 ui32BPP; + IMG_UINT32 ui32PixFormat; +} RGX_RESOURCE_PER_SURFACE_INFO, *PRGX_RESOURCE_PER_SURFACE_INFO; + +typedef struct +{ + IMG_INT32 i32XOffset; /*!< render surface X shift */ + IMG_INT32 i32YOffset; /*!< render surface Y shift */ + IMG_UINT32 ui32WidthInTiles; /*!< number of TLT data points in X */ + IMG_UINT32 ui32HeightInTiles; /*!< number of TLT data points in Y */ +} RGX_RESOURCE_PER_TLT_BUFFER_INFO, *PRGX_RESOURCE_PER_TLT_BUFFER_INFO; + +typedef union +{ + struct RGX_RESOURCE_CAPTURE_RENDER_SURFACES + { + IMG_UINT32 ui32RenderSurfaceCount; + RGX_RESOURCE_PER_SURFACE_INFO sSurface[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; + } sRenderSurfaces; + + struct RGX_RESOURCE_CAPTURE_TILE_LIFETIME_BUFFERS + { + RGX_RESOURCE_PER_TLT_BUFFER_INFO sTLTBufInfo[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; + } sTLTBuffers; +} RGX_RESOURCE_CAPTURE_DETAIL; + +typedef struct +{ + RGX_HWPERF_RESOURCE_CAPTURE_TYPE eType; + IMG_PID uPID; + IMG_UINT32 ui32ContextID; + IMG_UINT32 ui32FrameNum; + IMG_UINT32 ui32CapturedTaskJobRef; /* The job ref of the HW task that emitted the data */ + IMG_INT32 eClientModule; /* RGX_HWPERF_CLIENT_API - ID that the capture is originating from. */ + RGX_RESOURCE_CAPTURE_DETAIL uDetail; /* eType determines the value of the union */ +} RGX_RESOURCE_CAPTURE_INFO, *PRGX_RESOURCE_CAPTURE_INFO; + +#define RGX_RESOURCE_CAPTURE_INFO_BASE_SIZE() offsetof(RGX_RESOURCE_CAPTURE_INFO, uDetail) + +/*! Tile Lifetime Tracking header size. Only available if + * RGX_FEATURE_ISP_TILE_LIFETIME_TRACKING is present and enabled via + * SUPPORT_TLT_PERF + */ +#define RGX_TLT_HARDWARE_HDR_SIZE (16U) + +/* PVRSRVGetHWPerfResourceCaptureResult */ +typedef enum +{ + RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NONE = 0, + RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK, /* We got data ok, expect more packets for this request. */ + RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NOT_READY, /* Signals a timeout on the connection - no data available yet. */ + RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_SUCCESS, /* The request completed successfully, signals the end of packets for the request. */ + RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_FAILURE /* The request failed, signals the end of packets for the request. */ +} RGX_HWPERF_RESOURCE_CAPTURE_RESULT_STATUS; + +typedef struct +{ + IMG_PID uPID; /* In case of a failed request pass the caller the PID and context ID. */ + IMG_UINT32 ui32CtxID; + RGX_RESOURCE_CAPTURE_INFO *psInfo; /* Various meta-data regarding the captured resource which aid the requester when, + unpacking the resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */ + IMG_BYTE *pbData; /* Buffer containing the captured resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */ +} RGX_RESOURCE_CAPTURE_RESULT; + +/*! This type is a union of packet payload data structures associated with + * various FW and Host events */ +typedef union +{ + RGX_HWPERF_FW_DATA sFW; /*!< Firmware event packet data, + events ``0x01-0x06`` */ + RGX_HWPERF_HW_DATA sHW; /*!< Hardware event packet data, + events ``0x07-0x19``, ``0x28-0x29`` */ + RGX_HWPERF_CLKS_CHG_DATA sCLKSCHG; /*!< Clock change event packet + data, events ``0x1A`` */ + RGX_HWPERF_GPU_STATE_CHG_DATA sGPUSTATECHG; /*!< GPU utilisation state + change event packet data, + events ``0x1B`` */ + RGX_HWPERF_PWR_EST_DATA sPWREST; /*!< Power estimate event + packet data, + events ``0x20-0x22`` */ + RGX_HWPERF_PWR_CHG_DATA sPWR; /*!< Power event packet data, + events ``0x23`` */ + RGX_HWPERF_CSW_DATA sCSW; /*!< Context switch packet data, + events ``0x30-0x31`` */ + RGX_HWPERF_DVFS_DATA sDVFS; /*!< DVFS activity data, + events ``0x32`` */ + RGX_HWPERF_UFO_DATA sUFO; /*!< UFO data, events ``0x38`` */ + RGX_HWPERF_FWACT_DATA sFWACT; /*!< Firmware activity event + packet data, + events ``0x39`` */ + /* */ + RGX_HWPERF_HOST_ENQ_DATA sENQ; /*!< Host ENQ data, + events ``0x01`` (Host) */ + RGX_HWPERF_HOST_UFO_DATA sHUFO; /*!< Host UFO data, + events ``0x02`` (Host) */ + RGX_HWPERF_HOST_ALLOC_DATA sHALLOC; /*!< Host Alloc data, + events ``0x03`` (Host) */ + RGX_HWPERF_HOST_CLK_SYNC_DATA sHCLKSYNC; /*!< Host CLK_SYNC data, + events ``0x04`` (Host) */ + RGX_HWPERF_HOST_FREE_DATA sHFREE; /*!< Host Free data, + events ``0x05`` (Host) */ + RGX_HWPERF_HOST_MODIFY_DATA sHMOD; /*!< Host Modify data, + events ``0x06`` (Host) */ + RGX_HWPERF_HOST_DEV_INFO_DATA sHDEVINFO; /*!< Host device info data, + events ``0x07`` (Host) */ + RGX_HWPERF_HOST_INFO_DATA sHINFO; /*!< Host info data, + events ``0x08`` (Host) */ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA sWAIT; /*!< Host fence-wait data, + events ``0x09`` (Host) */ + RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA sSWTLADV; /*!< Host SW-timeline advance + data, events ``0x0A`` (Host) */ +} RGX_HWPERF_V2_PACKET_DATA, *RGX_PHWPERF_V2_PACKET_DATA; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_DATA); + +#define RGX_HWPERF_GET_PACKET_DATA(_packet_addr) ((RGX_PHWPERF_V2_PACKET_DATA) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR)))) + +#define RGX_HWPERF_GET_DVFS_EVENT_TYPE_PTR(_packet_addr) \ + ((RGX_HWPERF_DVFS_EV*) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR) + offsetof(RGX_HWPERF_DVFS_DATA,eEventType)))) + +/****************************************************************************** + * API Types + *****************************************************************************/ + +/*! Counter block IDs for all the hardware blocks with counters. + * Directly addressable blocks must have a value between 0..15 [0..0xF]. + * Indirect groups have following encoding: + * First hex digit (LSB) represents a unit number within the group + * and the second hex digit represents the group number. + * Group 0 is the direct group, all others are indirect groups. + */ +typedef IMG_UINT32 RGX_HWPERF_CNTBLK_ID; + +/*! Directly addressable non bank-switched counter blocks */ +#define RGX_CNTBLK_ID_JONES 0x0000U +#define RGX_CNTBLK_ID_SLC 0x0001U /*!< SLC-specific counter control */ +#define RGX_CNTBLK_ID_FBCDC 0x0002U +#define RGX_CNTBLK_ID_FW_CUSTOM 0x0003U /*!< Custom FW provided counters */ + +/*! Directly addressable SLC counter blocks - presence depends on GPU. */ +#define RGX_CNTBLK_ID_SLCBANK0 0x0004U /*!< SLCBANK0 counter control */ +#define RGX_CNTBLK_ID_SLCBANK1 0x0005U /*!< SLCBANK1 counter control */ +#define RGX_CNTBLK_ID_SLCBANK2 0x0006U /*!< SLCBANK2 counter control */ +#define RGX_CNTBLK_ID_SLCBANK3 0x0007U /*!< SLCBANK3 counter control */ +#define RGX_CNTBLK_ID_SLCBANK_ALL 0x4004U /*!< SLC ALL block ID */ + +#define RGX_CNTBLK_ID_PIPELINE_STATS 0x0008U /*!< PIPELINE_STATS counters */ +#define RGX_CNTBLK_ID_DIRECT_LAST 0x0009U /*!< Indirect blocks start from here */ + +/*! Indirectly addressable counter blocks */ +#define RGX_CNTBLK_ID_ISP0 0x0010U /*!< ISP 1..N ISP */ +#define RGX_CNTBLK_ID_ISP1 0x0011U +#define RGX_CNTBLK_ID_ISP2 0x0012U +#define RGX_CNTBLK_ID_ISP3 0x0013U +#define RGX_CNTBLK_ID_ISP4 0x0014U +#define RGX_CNTBLK_ID_ISP5 0x0015U +#define RGX_CNTBLK_ID_ISP6 0x0016U +#define RGX_CNTBLK_ID_ISP7 0x0017U +#define RGX_CNTBLK_ID_ISP_ALL 0x4010U + +#define RGX_CNTBLK_ID_MERCER0 0x0020U /*!< MERCER 1..N MERCER */ +#define RGX_CNTBLK_ID_MERCER1 0x0021U +#define RGX_CNTBLK_ID_MERCER2 0x0022U +#define RGX_CNTBLK_ID_MERCER3 0x0023U +#define RGX_CNTBLK_ID_MERCER4 0x0024U +#define RGX_CNTBLK_ID_MERCER5 0x0025U +#define RGX_CNTBLK_ID_MERCER6 0x0026U +#define RGX_CNTBLK_ID_MERCER7 0x0027U +#define RGX_CNTBLK_ID_MERCER_ALL 0x4020U + +#define RGX_CNTBLK_ID_PBE0 0x0030U /*!< PBE 1..N PBE_PER_SPU x N SPU */ +#define RGX_CNTBLK_ID_PBE1 0x0031U +#define RGX_CNTBLK_ID_PBE2 0x0032U +#define RGX_CNTBLK_ID_PBE3 0x0033U +#define RGX_CNTBLK_ID_PBE4 0x0034U +#define RGX_CNTBLK_ID_PBE5 0x0035U +#define RGX_CNTBLK_ID_PBE6 0x0036U +#define RGX_CNTBLK_ID_PBE7 0x0037U +#define RGX_CNTBLK_ID_PBE_ALL 0x4030U + +#define RGX_CNTBLK_ID_PBE_SHARED0 0x0040U /*!< PBE_SHARED 1..N SPU */ +#define RGX_CNTBLK_ID_PBE_SHARED1 0x0041U +#define RGX_CNTBLK_ID_PBE_SHARED2 0x0042U +#define RGX_CNTBLK_ID_PBE_SHARED3 0x0043U +#define RGX_CNTBLK_ID_PBE_SHARED_ALL 0x4040U + +#define RGX_CNTBLK_ID_USC0 0x0050U /*!< USC 1..N USC */ +#define RGX_CNTBLK_ID_USC1 0x0051U +#define RGX_CNTBLK_ID_USC2 0x0052U +#define RGX_CNTBLK_ID_USC3 0x0053U +#define RGX_CNTBLK_ID_USC4 0x0054U +#define RGX_CNTBLK_ID_USC5 0x0055U +#define RGX_CNTBLK_ID_USC6 0x0056U +#define RGX_CNTBLK_ID_USC7 0x0057U +#define RGX_CNTBLK_ID_USC_ALL 0x4050U + +#define RGX_CNTBLK_ID_TPU0 0x0060U /*!< TPU 1..N TPU */ +#define RGX_CNTBLK_ID_TPU1 0x0061U +#define RGX_CNTBLK_ID_TPU2 0x0062U +#define RGX_CNTBLK_ID_TPU3 0x0063U +#define RGX_CNTBLK_ID_TPU4 0x0064U +#define RGX_CNTBLK_ID_TPU5 0x0065U +#define RGX_CNTBLK_ID_TPU6 0x0066U +#define RGX_CNTBLK_ID_TPU7 0x0067U +#define RGX_CNTBLK_ID_TPU_ALL 0x4060U + +#define RGX_CNTBLK_ID_SWIFT0 0x0070U /*!< SWIFT 1..N SWIFT */ +#define RGX_CNTBLK_ID_SWIFT1 0x0071U +#define RGX_CNTBLK_ID_SWIFT2 0x0072U +#define RGX_CNTBLK_ID_SWIFT3 0x0073U +#define RGX_CNTBLK_ID_SWIFT4 0x0074U +#define RGX_CNTBLK_ID_SWIFT5 0x0075U +#define RGX_CNTBLK_ID_SWIFT6 0x0076U +#define RGX_CNTBLK_ID_SWIFT7 0x0077U +#define RGX_CNTBLK_ID_SWIFT_ALL 0x4070U + +#define RGX_CNTBLK_ID_TEXAS0 0x0080U /*!< TEXAS 1..N TEXAS */ +#define RGX_CNTBLK_ID_TEXAS1 0x0081U +#define RGX_CNTBLK_ID_TEXAS2 0x0082U +#define RGX_CNTBLK_ID_TEXAS3 0x0083U +#define RGX_CNTBLK_ID_TEXAS_ALL 0x4080U + +#define RGX_CNTBLK_ID_RAC0 0x0090U /*!< RAC 1..N RAC */ +#define RGX_CNTBLK_ID_RAC1 0x0091U +#define RGX_CNTBLK_ID_RAC2 0x0092U +#define RGX_CNTBLK_ID_RAC3 0x0093U +#define RGX_CNTBLK_ID_RAC_ALL 0x4090U + +#define RGX_CNTBLK_ID_LAST 0x0094U /*!< End of RAC block */ + +/*! Masks for the counter block ID*/ +#define RGX_CNTBLK_ID_UNIT_MASK (0x000FU) /*!< Unit within group */ +#define RGX_CNTBLK_ID_GROUP_MASK (0x00F0U) /*!< Group value */ +#define RGX_CNTBLK_ID_GROUP_SHIFT (4U) +#define RGX_CNTBLK_ID_MC_GPU_MASK (0x0F00U) /*!< GPU ID for MC use */ +#define RGX_CNTBLK_ID_MC_GPU_SHIFT (8U) +#define RGX_CNTBLK_ID_UNIT_ALL_MASK (0x4000U) /*!< Program all units within a group */ + +static_assert( + ((RGX_CNTBLK_ID_DIRECT_LAST + ((RGX_CNTBLK_ID_LAST & RGX_CNTBLK_ID_GROUP_MASK) >> RGX_CNTBLK_ID_GROUP_SHIFT)) <= RGX_HWPERF_MAX_BVNC_BLOCK_LEN), + "RGX_HWPERF_MAX_BVNC_BLOCK_LEN insufficient"); + +#define RGX_HWPERF_EVENT_MASK_VALUE(e) (IMG_UINT64_C(1) << (e)) + +/* When adding new counters here, make sure changes are made to rgxfw_hwperf_fwblk_valid() as well */ +#define RGX_CUSTOM_FW_CNTRS \ + X(TA_LOCAL_FL_SIZE, 0x0, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \ + \ + X(TA_GLOBAL_FL_SIZE, 0x1, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \ + \ + X(3D_LOCAL_FL_SIZE, 0x2, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \ + \ + X(3D_GLOBAL_FL_SIZE, 0x3, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \ + \ + X(ISP_TILES_IN_FLIGHT, 0x4, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DSPMKICK)) + +/*! Counter IDs for the firmware held statistics */ +typedef enum +{ +#define X(ctr, id, allow_mask) RGX_CUSTOM_FW_CNTR_##ctr = id, + RGX_CUSTOM_FW_CNTRS +#undef X + + /* always the last entry in the list */ + RGX_CUSTOM_FW_CNTR_LAST +} RGX_HWPERF_CUSTOM_FW_CNTR_ID; + +/*! Identifier for each counter in a performance counting module */ +typedef IMG_UINT32 RGX_HWPERF_CNTBLK_COUNTER_ID; + +/* sets all the bits from bit _b1 to _b2, in a IMG_UINT64 type */ +#define MASK_RANGE_IMPL(b1, b2) ((IMG_UINT64)((IMG_UINT64_C(1) << ((IMG_UINT32)(b2)-(IMG_UINT32)(b1) + 1U)) - 1U) << (IMG_UINT32)b1) +#define MASK_RANGE(R) MASK_RANGE_IMPL(R##_FIRST_TYPE, R##_LAST_TYPE) +#define RGX_HWPERF_HOST_EVENT_MASK_VALUE(e) (IMG_UINT32_C(1) << (e)) + +/*! Mask macros for use with RGXCtrlHWPerf() API. + */ +#define RGX_HWPERF_EVENT_MASK_NONE (IMG_UINT64_C(0x0000000000000000)) +#define RGX_HWPERF_EVENT_MASK_DEFAULT RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_FWACT) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG) | \ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) +#define RGX_HWPERF_EVENT_MASK_ALL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) + +/*! HWPerf Firmware event masks + * @par + * All FW Start/End/Debug (SED) events. */ +#define RGX_HWPERF_EVENT_MASK_FW_SED (MASK_RANGE(RGX_HWPERF_FW_EVENT_RANGE)) + +#define RGX_HWPERF_EVENT_MASK_FW_UFO (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO)) +#define RGX_HWPERF_EVENT_MASK_FW_CSW (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_START) |\ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_FINISHED)) +/*! All FW events. */ +#define RGX_HWPERF_EVENT_MASK_ALL_FW (RGX_HWPERF_EVENT_MASK_FW_SED |\ + RGX_HWPERF_EVENT_MASK_FW_UFO |\ + RGX_HWPERF_EVENT_MASK_FW_CSW) + +/*! HW Periodic events (1ms interval). */ +#define RGX_HWPERF_EVENT_MASK_HW_PERIODIC (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PERIODIC)) +/*! All HW Kick/Finish events. */ +#define RGX_HWPERF_EVENT_MASK_HW_KICKFINISH ((MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE0) |\ + MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE1)) &\ + ~(RGX_HWPERF_EVENT_MASK_HW_PERIODIC)) + +#define RGX_HWPERF_EVENT_MASK_ALL_HW (RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |\ + RGX_HWPERF_EVENT_MASK_HW_PERIODIC) + +#define RGX_HWPERF_EVENT_MASK_ALL_PWR_EST (MASK_RANGE(RGX_HWPERF_PWR_EST_RANGE)) + +#define RGX_HWPERF_EVENT_MASK_ALL_PWR (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) |\ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_GPU_STATE_CHG) |\ + RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG)) + +/*! HWPerf Host event masks + */ +#define RGX_HWPERF_EVENT_MASK_HOST_WORK_ENQ (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_ENQ)) +#define RGX_HWPERF_EVENT_MASK_HOST_ALL_UFO (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_UFO)) +#define RGX_HWPERF_EVENT_MASK_HOST_ALL_PWR (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_CLK_SYNC)) + + +/*! Type used in the RGX API RGXConfigHWPerfCounters() */ +typedef struct +{ + /*! Reserved for future use */ + IMG_UINT32 ui32Reserved; + + /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ + IMG_UINT16 ui16BlockID; + + /*! Number of configured counters within this block */ + IMG_UINT16 ui16NumCounters; + + /*! Counter register values */ + IMG_UINT16 ui16Counters[RGX_CNTBLK_COUNTERS_MAX]; + +} UNCACHED_ALIGN RGX_HWPERF_CONFIG_CNTBLK; + +RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_CNTBLK); + + +#if defined(__cplusplus) +} +#endif + +#endif /* RGX_HWPERF_H_ */ + +/****************************************************************************** + End of file +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_hwperf_table.c b/drivers/gpu/drm/phytium/octopus/rgx_hwperf_table.c new file mode 100644 index 000000000000..4c9890146e60 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_hwperf_table.c @@ -0,0 +1,638 @@ +/*************************************************************************/ /*! +@File +@Title RGX HW Performance counter table +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX HW Performance counters table +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +#include "img_defs.h" +#include "rgx_fwif_hwperf.h" +#if defined(__KERNEL__) +#include "rgxdefs_km.h" +#else +#include "rgxdefs.h" +#endif +#include "rgx_hwperf_table.h" + +/* Includes needed for PVRSRVKM (Server) context */ +# include "rgx_bvnc_defs_km.h" +# if defined(__KERNEL__) +# include "rgxdevice.h" +# endif + +/* Shared compile-time context ASSERT macro */ +#if defined(RGX_FIRMWARE) +# include "rgxfw_utils.h" +/* firmware context */ +# define DBG_ASSERT(_c) RGXFW_ASSERT((_c)) +#else +# include "pvr_debug.h" +/* host client/server context */ +# define DBG_ASSERT(_c) PVR_ASSERT((_c)) +#endif + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() + + Referenced in gasCntBlkTypeModel[] table below and only called from + RGX_FIRMWARE run-time context. Therefore compile time configuration is used. + *****************************************************************************/ + +#if defined(RGX_FIRMWARE) && defined(RGX_FEATURE_PERFBUS) +# include "rgxfw_pow.h" +# include "rgxfw_utils.h" + +static bool rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId) +{ + PVR_UNREFERENCED_PARAMETER(eBlkType); + PVR_UNREFERENCED_PARAMETER(ui8UnitId); + +#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) + /* S7XT: JONES */ + return (eBlkType == RGX_CNTBLK_ID_JONES); +#elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) + /* S6XT: TA, TORNADO */ + return true; +#else + /* S6 : TA, HUB, RASTER (RASCAL) */ + return (gsPowCtl.eUnitsPowState & RGXFW_POW_ST_RD_ON) != 0U; +#endif +} + +/* Only use conditional compilation when counter blocks appear in different + * islands for different Rogue families. + */ +static bool rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId) +{ + IMG_UINT32 ui32NumDustsEnabled = rgxfw_pow_get_enabled_units(); + + if (((gsPowCtl.eUnitsPowState & RGXFW_POW_ST_RD_ON) != 0U) && + (ui32NumDustsEnabled > 0U)) + { +#if defined(RGX_FEATURE_DYNAMIC_DUST_POWER) + IMG_UINT32 ui32NumUscEnabled = ui32NumDustsEnabled*2U; + + switch (eBlkType) + { + case RGX_CNTBLK_ID_TPU_MCU0: /* S6 and S6XT */ +#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) + case RGX_CNTBLK_ID_TEXAS0: /* S7 */ +#endif + if (ui8UnitId >= ui32NumDustsEnabled) + { + return false; + } + break; + case RGX_CNTBLK_ID_USC0: /* S6, S6XT, S7 */ + case RGX_CNTBLK_ID_PBE0: /* S7, PBE2_IN_XE */ + /* Handle single cluster cores */ + if (ui8UnitId >= ((ui32NumUscEnabled > RGX_FEATURE_NUM_CLUSTERS) ? RGX_FEATURE_NUM_CLUSTERS : ui32NumUscEnabled)) + { + return false; + } + break; + case RGX_CNTBLK_ID_BLACKPEARL0: /* S7 */ + case RGX_CNTBLK_ID_RASTER0: /* S6XT */ +#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) + case RGX_CNTBLK_ID_TEXAS0: /* S6XT */ +#endif + if (ui8UnitId >= (RGX_REQ_NUM_PHANTOMS(ui32NumUscEnabled))) + { + return false; + } + break; + default: + RGXFW_ASSERT(false); /* should never get here, table error */ + break; + } +#else + /* Always true, no fused DUSTs, all powered so do not check unit */ + PVR_UNREFERENCED_PARAMETER(eBlkType); + PVR_UNREFERENCED_PARAMETER(ui8UnitId); +#endif + } + else + { + return false; + } + return true; +} + +#else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ + +# define rgxfw_hwperf_pow_st_direct ((void*)NULL) +# define rgxfw_hwperf_pow_st_indirect ((void*)NULL) +# define rgxfw_hwperf_pow_st_gandalf ((void*)NULL) + +#endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ + +# define rgxfw_hwperf_pow_st_gandalf ((void*)NULL) + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() end + *****************************************************************************/ + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() start + + Referenced in gasCntBlkTypeModel[] table below and called from all build + contexts: + RGX_FIRMWARE, PVRSRVCTL (UM) and PVRSRVKM (Server). + + Therefore each function has two implementations, one for compile time and one + run time configuration depending on the context. The functions will inform the + caller whether this block is valid for this particular RGX device. Other + run-time dependent data is returned in psRtInfo for the caller to use. + *****************************************************************************/ + +/* Used for block types: USC */ +static IMG_BOOL rgx_hwperf_blk_present_perfbus(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) +{ + DBG_ASSERT(psBlkTypeDesc != NULL); + DBG_ASSERT(psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_USC0); + +#if defined(__KERNEL__) /* Server context */ + PVR_ASSERT(pvDev_km != NULL); + PVR_ASSERT(pvRtInfo != NULL); + { + RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; + const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)) + { + psRtInfo->ui32NumUnits = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) ? RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) : 0; + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + } +#else /* FW context */ + PVR_UNREFERENCED_PARAMETER(pvDev_km); + PVR_UNREFERENCED_PARAMETER(pvRtInfo); + PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); +# if defined(RGX_FEATURE_PERFBUS) + return IMG_TRUE; +# endif +#endif + return IMG_FALSE; +} + +/* Used for block types: Direct RASTERISATION, HUB */ +static IMG_BOOL rgx_hwperf_blk_present_not_clustergrouping(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) +{ + DBG_ASSERT(psBlkTypeDesc != NULL); + DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_RASTER) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_HUB)); + +#if defined(__KERNEL__) /* Server context */ + PVR_ASSERT(pvDev_km != NULL); + PVR_ASSERT(pvRtInfo != NULL); + { + RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; + const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km; + if ((!RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) && + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS))) + { + psRtInfo->ui32NumUnits = 1; + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + } +#else /* FW context */ + PVR_UNREFERENCED_PARAMETER(pvDev_km); + PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); + PVR_UNREFERENCED_PARAMETER(pvRtInfo); +# if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) + return IMG_TRUE; +# endif +#endif + return IMG_FALSE; +} + +#if defined(__KERNEL__) /* Server context */ +static IMG_UINT32 rgx_units_indirect_by_phantom(const PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg) +{ + /* Run-time math for RGX_HWPERF_INDIRECT_BY_PHANTOM */ + return ((psFeatCfg->ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK) == 0) ? 1 + : (psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]+3)/4; +} + +static IMG_UINT32 rgx_units_phantom_indirect_by_dust(const PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg) +{ + /* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST */ + return MAX((psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]>>1),1); +} + +static IMG_UINT32 rgx_units_phantom_indirect_by_cluster(const PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg) +{ + /* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER */ + return psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]; +} +#endif /* defined(__KERNEL__) */ + +/* Used for block types: TORNADO, TEXAS, Indirect RASTERISATION */ +static IMG_BOOL rgx_hwperf_blk_present_xttop(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) +{ + DBG_ASSERT(psBlkTypeDesc != NULL); + DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TORNADO) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_RASTER0)); + +#if defined(__KERNEL__) /* Server context */ + PVR_ASSERT(pvDev_km != NULL); + PVR_ASSERT(pvRtInfo != NULL); + { + RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; + const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km; + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE)) + { + if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TORNADO) + { + psRtInfo->ui32NumUnits = 1; + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) + { + psRtInfo->ui32NumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg); + psRtInfo->ui32IndirectReg = RGX_CR_TEXAS_PERF_INDIRECT; + return IMG_TRUE; + } + else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_RASTER0) + { + psRtInfo->ui32NumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg); + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + } + } +#else /* FW context */ + PVR_UNREFERENCED_PARAMETER(pvDev_km); + PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); + PVR_UNREFERENCED_PARAMETER(pvRtInfo); +# if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) + return IMG_TRUE; +# endif +#endif + return IMG_FALSE; +} + +/* Used for block types: JONES, TPU_MCU, TEXAS, BLACKPERL, PBE */ +static IMG_BOOL rgx_hwperf_blk_present_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) +{ + DBG_ASSERT(psBlkTypeDesc != NULL); + DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_JONES) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0)); + +#if defined(__KERNEL__) /* Server context */ + PVR_ASSERT(pvDev_km != NULL); + PVR_ASSERT(pvRtInfo != NULL); + { + RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; + const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km; + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) + { + if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) + { + psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg); + psRtInfo->ui32IndirectReg = RGX_CR_TPU_PERF_INDIRECT; + return IMG_TRUE; + } + else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) + { + psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg); + psRtInfo->ui32IndirectReg = RGX_CR_TEXAS3_PERF_INDIRECT; + return IMG_TRUE; + } + else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0) + { + psRtInfo->ui32NumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg); + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0) + { + psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_cluster(&psDevInfo->sDevFeatureCfg); + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_JONES) + { + psRtInfo->ui32NumUnits = 1; + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + } + } +#else /* FW context */ + PVR_UNREFERENCED_PARAMETER(pvDev_km); + PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); + PVR_UNREFERENCED_PARAMETER(pvRtInfo); +# if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) + return IMG_TRUE; +# else +# endif +#endif + return IMG_FALSE; +} + +/* Used for block types: TA, TPU_MCU. Also PBE when PBE2_IN_XE is present */ +static IMG_BOOL rgx_hwperf_blk_present_not_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) +{ + DBG_ASSERT(psBlkTypeDesc != NULL); + DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TA) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) || + (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0)); + +#if defined(__KERNEL__) /* Server context */ + PVR_ASSERT(pvDev_km != NULL); + PVR_ASSERT(pvRtInfo != NULL); + { + RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; + const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km; + if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE) && + RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)) + { + if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TA) + { + psRtInfo->ui32NumUnits = 1; + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0) + { + if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE)) + { + /* PBE counters are not present on this config */ + return IMG_FALSE; + } + psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_cluster(&psDevInfo->sDevFeatureCfg); + psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; + return IMG_TRUE; + } + else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) + { + psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg); + psRtInfo->ui32IndirectReg = RGX_CR_TPU_MCU_L0_PERF_INDIRECT; + return IMG_TRUE; + } + } + } +#else /* FW context */ + PVR_UNREFERENCED_PARAMETER(pvDev_km); + PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); + PVR_UNREFERENCED_PARAMETER(pvRtInfo); +# if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) +# if !defined(RGX_FEATURE_PBE2_IN_XE) + if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0) + { + /* No support for PBE counters without PBE2_IN_XE */ + return IMG_FALSE; + } +# endif + return IMG_TRUE; +# endif +#endif + return IMG_FALSE; +} + +static IMG_BOOL rgx_hwperf_blk_present_check_s7top_or_not(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) +{ +#if defined(__KERNEL__) + return (rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo) + || rgx_hwperf_blk_present_not_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo)); + +#elif defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) + return rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo); + +#elif defined(RGX_FEATURE_PBE2_IN_XE) || defined(RGX_FEATURE_PERFBUS) + return rgx_hwperf_blk_present_not_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo); +#else + PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); + PVR_UNREFERENCED_PARAMETER(pvDev_km); + PVR_UNREFERENCED_PARAMETER(pvRtInfo); + return IMG_FALSE; +#endif +} + +static IMG_BOOL rgx_hwperf_blk_present_check_s7top_or_xttop(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) +{ +#if defined(__KERNEL__) + return (rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo) + || rgx_hwperf_blk_present_xttop(psBlkTypeDesc, pvDev_km, pvRtInfo)); + +#elif defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) + return rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo); + +#elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) + return rgx_hwperf_blk_present_xttop(psBlkTypeDesc, pvDev_km, pvRtInfo); +#else + PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); + PVR_UNREFERENCED_PARAMETER(pvDev_km); + PVR_UNREFERENCED_PARAMETER(pvRtInfo); + return IMG_FALSE; +#endif +} + +#if !defined(__KERNEL__) /* Firmware or User-mode context */ +static IMG_BOOL rgx_hwperf_blk_present_false(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) +{ + PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); + PVR_UNREFERENCED_PARAMETER(pvDev_km); + PVR_UNREFERENCED_PARAMETER(pvRtInfo); + + /* Some functions not used on some BVNCs, silence compiler warnings */ + PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_perfbus); + PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_clustergrouping); + PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_xttop); + PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_s7top); + PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_s7top); + PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_check_s7top_or_not); + PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_check_s7top_or_xttop); + + return IMG_FALSE; +} + +/* Used to instantiate a null row in the block type model table below where the + * block is not supported for a given build BVNC in firmware/user mode context. + * This is needed as the blockid to block type lookup uses the table as well + * and clients may try to access blocks not in the hardware. */ +#define RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(_blkid) {_blkid, 0, 0, 0, 0, 0, 0, 0, 0, #_blkid, NULL, rgx_hwperf_blk_present_false} + +#endif + + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() end + *****************************************************************************/ + +#if defined(__KERNEL__) /* Values will be calculated at run-time */ +#define RGX_HWPERF_NUM_BLOCK_UNITS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC +#define RGX_INDIRECT_REG_TEXAS 0xFFFFFFFF +#define RGX_INDIRECT_REG_TPU 0xFFFFFFFF + +#elif defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) +#define RGX_HWPERF_NUM_BLOCK_UNITS RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST +#define RGX_INDIRECT_REG_TEXAS RGX_CR_TEXAS3_PERF_INDIRECT +#define RGX_INDIRECT_REG_TPU RGX_CR_TPU_PERF_INDIRECT + +#else + +#if defined(RGX_FEATURE_PERFBUS) +#define RGX_INDIRECT_REG_TPU RGX_CR_TPU_MCU_L0_PERF_INDIRECT +#endif + +#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) +#define RGX_HWPERF_NUM_BLOCK_UNITS RGX_HWPERF_INDIRECT_BY_PHANTOM +#define RGX_INDIRECT_REG_TEXAS RGX_CR_TEXAS_PERF_INDIRECT +#endif + +#endif + + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] table + + This table holds the entries for the performance counter block type model. + Where the block is not present on an RGX device in question the + pfnIsBlkPresent() returns false, if valid and present it returns true. + Columns in the table with a ** indicate the value is a default and the + value returned in RGX_HWPERF_CNTBLK_RT_INFO when calling pfnIsBlkPresent() + should be used at runtime by the caller. These columns are only valid for + compile time BVNC configured contexts. + + Order of table rows must match order of counter block IDs in the enumeration + RGX_HWPERF_CNTBLK_ID. + *****************************************************************************/ + +static const RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] = +{ + /* ui32CntBlkIdBase, ui32IndirectReg, ui32PerfReg, ui32Select0BaseReg, ui32Counter0BaseReg ui8NumCounters, ui32NumUnits**, ui8SelectRegModeShift, ui8SelectRegOffsetShift, pfnIsBlkPowered pfnIsBlkPresent + * pszBlockNameComment, */ + /*RGX_CNTBLK_ID_TA*/ +#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__) + {RGX_CNTBLK_ID_TA, 0, /* direct */ RGX_CR_TA_PERF, RGX_CR_TA_PERF_SELECT0, RGX_CR_TA_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_TA_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_s7top }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TA), +#endif + + /*RGX_CNTBLK_ID_RASTER*/ +#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__) + {RGX_CNTBLK_ID_RASTER, 0, /* direct */ RGX_CR_RASTERISATION_PERF, RGX_CR_RASTERISATION_PERF_SELECT0, RGX_CR_RASTERISATION_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_RASTERISATION_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_clustergrouping }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER), +#endif + + /*RGX_CNTBLK_ID_HUB*/ +#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__) + {RGX_CNTBLK_ID_HUB, 0, /* direct */ RGX_CR_HUB_BIFPMCACHE_PERF, RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0, RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_HUB_BIFPMCACHE_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_clustergrouping }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_HUB), +#endif + + /*RGX_CNTBLK_ID_TORNADO*/ +#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__) + {RGX_CNTBLK_ID_TORNADO, 0, /* direct */ RGX_CR_TORNADO_PERF, RGX_CR_TORNADO_PERF_SELECT0, RGX_CR_TORNADO_PERF_COUNTER_0, 4, 1, 21, 4, "RGX_CR_TORNADO_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_xttop }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TORNADO), +#endif + + /*RGX_CNTBLK_ID_JONES*/ +#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__) + {RGX_CNTBLK_ID_JONES, 0, /* direct */ RGX_CR_JONES_PERF, RGX_CR_JONES_PERF_SELECT0, RGX_CR_JONES_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_JONES_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_s7top }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_JONES), +#endif + + /*RGX_CNTBLK_ID_TPU_MCU0*/ +#if defined(__KERNEL__) || (defined(RGX_FEATURE_PERFBUS) && !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)) || defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) + {RGX_CNTBLK_ID_TPU_MCU0, RGX_INDIRECT_REG_TPU, RGX_CR_TPU_MCU_L0_PERF, RGX_CR_TPU_MCU_L0_PERF_SELECT0, RGX_CR_TPU_MCU_L0_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST, 21, 3, "RGX_CR_TPU_MCU_L0_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_check_s7top_or_not }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TPU_MCU0), +#endif + + /*RGX_CNTBLK_ID_USC0*/ +#if defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__) + {RGX_CNTBLK_ID_USC0, RGX_CR_USC_PERF_INDIRECT, RGX_CR_USC_PERF, RGX_CR_USC_PERF_SELECT0, RGX_CR_USC_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21, 3, "RGX_CR_USC_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_perfbus }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_USC0), +#endif + + /*RGX_CNTBLK_ID_TEXAS0*/ +#if defined(__KERNEL__) || defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) + {RGX_CNTBLK_ID_TEXAS0, RGX_INDIRECT_REG_TEXAS, RGX_CR_TEXAS_PERF, RGX_CR_TEXAS_PERF_SELECT0, RGX_CR_TEXAS_PERF_COUNTER_0, 6, RGX_HWPERF_NUM_BLOCK_UNITS, 31, 3, "RGX_CR_TEXAS_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_check_s7top_or_xttop }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TEXAS0), +#endif + + /*RGX_CNTBLK_ID_RASTER0*/ +#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__) + {RGX_CNTBLK_ID_RASTER0, RGX_CR_RASTERISATION_PERF_INDIRECT, RGX_CR_RASTERISATION_PERF, RGX_CR_RASTERISATION_PERF_SELECT0, RGX_CR_RASTERISATION_PERF_COUNTER_0, 4, RGX_HWPERF_INDIRECT_BY_PHANTOM, 21, 3, "RGX_CR_RASTERISATION_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_xttop }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER0), +#endif + + /*RGX_CNTBLK_ID_BLACKPEARL0*/ +#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__) + {RGX_CNTBLK_ID_BLACKPEARL0, RGX_CR_BLACKPEARL_PERF_INDIRECT, RGX_CR_BLACKPEARL_PERF, RGX_CR_BLACKPEARL_PERF_SELECT0, RGX_CR_BLACKPEARL_PERF_COUNTER_0, 6, RGX_HWPERF_INDIRECT_BY_PHANTOM, 21, 3, "RGX_CR_BLACKPEARL_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BLACKPEARL0), +#endif + + /*RGX_CNTBLK_ID_PBE0*/ +#if defined(__KERNEL__) || defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(RGX_FEATURE_PBE2_IN_XE) + {RGX_CNTBLK_ID_PBE0, RGX_CR_PBE_PERF_INDIRECT, RGX_CR_PBE_PERF, RGX_CR_PBE_PERF_SELECT0, RGX_CR_PBE_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21, 3, "RGX_CR_PBE_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_check_s7top_or_not }, +#else + RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_PBE0), +#endif +}; + + +IMG_INTERNAL IMG_UINT32 +RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel) +{ + *ppsModel = gasCntBlkTypeModel; + return ARRAY_SIZE(gasCntBlkTypeModel); +} + +/****************************************************************************** + End of file (rgx_hwperf_table.c) + ******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_hwperf_table.h b/drivers/gpu/drm/phytium/octopus/rgx_hwperf_table.h new file mode 100644 index 000000000000..abe7c043d06c --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_hwperf_table.h @@ -0,0 +1,479 @@ +/*************************************************************************/ /*! +@File +@Title HWPerf counter table header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Utility functions used internally for HWPerf data retrieval +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGX_HWPERF_TABLE_H +#define RGX_HWPERF_TABLE_H + +#include "img_types.h" +#include "img_defs.h" +#include "rgx_fwif_hwperf.h" + +/*****************************************************************************/ + +/* Forward declaration */ +typedef struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ RGXFW_HWPERF_CNTBLK_TYPE_MODEL; + +/* Function pointer type for functions to check dynamic power state of + * counter block instance. Used only in firmware. */ +typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_POWERED)( + RGX_HWPERF_CNTBLK_ID eBlkType, + IMG_UINT8 ui8UnitId); + +/* Counter block run-time info */ +typedef struct +{ + IMG_UINT32 uiNumUnits; /* Number of instances of this block type in the core */ +} RGX_HWPERF_CNTBLK_RT_INFO; + +/* Function pointer type for functions to check block is valid and present + * on that RGX Device at runtime. It may have compile logic or run-time + * logic depending on where the code executes: server, srvinit or firmware. + * Values in the psRtInfo output parameter are only valid if true returned. + */ +typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_PRESENT)( + const struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_* psBlkTypeDesc, + const void *pvDev_km, + RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo); + +/* This structure encodes properties of a type of performance counter block. + * The structure is sometimes referred to as a block type descriptor. These + * properties contained in this structure represent the columns in the block + * type model table variable below. These values vary depending on the build + * BVNC and core type. + * Each direct block has a unique type descriptor and each indirect group has + * a type descriptor. + */ +struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ +{ + IMG_UINT32 uiCntBlkIdBase; /* The starting block id for this block type */ + IMG_UINT32 uiIndirectReg; /* 0 if direct type otherwise the indirect register value to select indirect unit */ + IMG_UINT32 uiNumUnits; /* Number of instances of this block type in the core (compile time use) */ + const IMG_CHAR *pszBlockNameComment; /* Name of the PERF register. Used while dumping the perf counters to pdumps */ + PFN_RGXFW_HWPERF_CNTBLK_POWERED pfnIsBlkPowered; /* A function to determine dynamic power state for the block type */ + PFN_RGXFW_HWPERF_CNTBLK_PRESENT pfnIsBlkPresent; /* A function to determine presence on RGX Device at run-time */ + IMG_UINT16 *pszBlkCfgValid; /* Array of supported counters per block type */ +}; + +/*****************************************************************************/ + +/* Shared compile-time context ASSERT macro */ +#if defined(RGX_FIRMWARE) +/* firmware context */ +# define DBG_ASSERT(_c) RGXFW_ASSERT((_c)) +#else +/* host client/server context */ +# define DBG_ASSERT(_c) PVR_ASSERT((_c)) +#endif + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() + + Referenced in gasCntBlkTypeModel[] table below and only called from + RGX_FIRMWARE run-time context. Therefore compile time configuration is used. + *****************************************************************************/ + +#if defined(RGX_FIRMWARE) && defined(RGX_FEATURE_PERFBUS) +# include "rgxfw_pow.h" +# include "rgxfw_utils.h" + +static inline IMG_BOOL rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId); +static inline IMG_BOOL rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId) +{ + PVR_UNREFERENCED_PARAMETER(ui8UnitId); + + switch (eBlkType) + { + case RGX_CNTBLK_ID_JONES: + case RGX_CNTBLK_ID_SLC: + case RGX_CNTBLK_ID_SLCBANK0: + case RGX_CNTBLK_ID_FBCDC: + case RGX_CNTBLK_ID_FW_CUSTOM: + case RGX_CNTBLK_ID_PIPELINE_STATS: + return IMG_TRUE; + break; + + case RGX_CNTBLK_ID_SLCBANK1: + if (RGX_FEATURE_NUM_MEMBUS > 1U) + { + return IMG_TRUE; + } + else + { + return IMG_FALSE; + } + break; + + case RGX_CNTBLK_ID_SLCBANK2: + case RGX_CNTBLK_ID_SLCBANK3: + if (RGX_FEATURE_NUM_MEMBUS > 2U) + { + return IMG_TRUE; + } + else + { + return IMG_FALSE; + } + break; + + default: + return IMG_FALSE; + break; + } +} + +/* Only use conditional compilation when counter blocks appear in different + * islands for different Rogue families. + */ +static inline IMG_BOOL rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId); +static inline IMG_BOOL rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId) +{ + PVR_UNREFERENCED_PARAMETER(ui8UnitId); + + IMG_UINT32 ui32NumDustsEnabled = rgxfw_pow_get_enabled_units(); + + // We don't have any Dusts Enabled until first DC opens the GPU. This makes + // setting the PDump HWPerf trace buffers very difficult. + // To work around this we special-case some of the 'have to be there' + // indirect registers (e.g., TPU0) + + switch (eBlkType) + { + case RGX_CNTBLK_ID_TPU0: + return IMG_TRUE; + /*NOTREACHED*/ + break; + default: + if (((gsPowCtl.eUnitsPowState & RGXFW_POW_ST_RD_ON) != 0U) && + (ui32NumDustsEnabled > 0U)) + { + return IMG_TRUE; + } + else + { + return IMG_FALSE; + } + /*NOTREACHED*/ + break; + } + return IMG_TRUE; +} + +#else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ + +# define rgxfw_hwperf_pow_st_direct ((void *)NULL) +# define rgxfw_hwperf_pow_st_indirect ((void *)NULL) + +#endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() end + *****************************************************************************/ + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() start + + Referenced in gasCntBlkTypeModel[] table below and called from all build + contexts: + RGX_FIRMWARE, PVRSRVCTL (UM) and PVRSRVKM (Server). + + Therefore each function has two implementations, one for compile time and one + run time configuration depending on the context. The functions will inform the + caller whether this block is valid for this particular RGX device. Other + run-time dependent data is returned in psRtInfo for the caller to use. + *****************************************************************************/ + + +/* Used for all block types: Direct and Indirect */ +static inline IMG_BOOL rgx_hwperf_blk_present(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo) +{ +#if defined(__KERNEL__) /* Server context -- Run-time Only */ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km; + PVRSRV_DEVICE_NODE *psNode; + IMG_UINT32 ui32MaxTPUPerSPU; + IMG_UINT32 ui32NumMemBus; + + DBG_ASSERT(psDevInfo != NULL); + DBG_ASSERT(psBlkTypeDesc != NULL); + DBG_ASSERT(psRtInfo != NULL); + + if (((psDevInfo == NULL) || (psBlkTypeDesc == NULL)) || (psRtInfo == NULL)) + { + return IMG_FALSE; + } + + psNode = psDevInfo->psDeviceNode; + DBG_ASSERT(psNode != NULL); + + if (psNode == NULL) + { + return IMG_FALSE; + } + + ui32MaxTPUPerSPU = + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, MAX_TPU_PER_SPU); + + ui32NumMemBus = + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_MEMBUS); + + switch (psBlkTypeDesc->uiCntBlkIdBase) + { + case RGX_CNTBLK_ID_JONES: + case RGX_CNTBLK_ID_SLC: + case RGX_CNTBLK_ID_SLCBANK0: + case RGX_CNTBLK_ID_FBCDC: + case RGX_CNTBLK_ID_FW_CUSTOM: + case RGX_CNTBLK_ID_PIPELINE_STATS: + psRtInfo->uiNumUnits = 1; + break; + + case RGX_CNTBLK_ID_SLCBANK1: + if (ui32NumMemBus >= 2U) + { + psRtInfo->uiNumUnits = 1; + } + else + { + psRtInfo->uiNumUnits = 0; + } + break; + + case RGX_CNTBLK_ID_SLCBANK2: + case RGX_CNTBLK_ID_SLCBANK3: + if (ui32NumMemBus > 2U) + { + psRtInfo->uiNumUnits = 1; + } + else + { + psRtInfo->uiNumUnits = 0; + } + break; + + case RGX_CNTBLK_ID_TPU0: + case RGX_CNTBLK_ID_SWIFT0: + psRtInfo->uiNumUnits = + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU); + psRtInfo->uiNumUnits *= ui32MaxTPUPerSPU; + break; + + case RGX_CNTBLK_ID_TEXAS0: + case RGX_CNTBLK_ID_PBE_SHARED0: + + psRtInfo->uiNumUnits = + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU); + break; + + case RGX_CNTBLK_ID_USC0: + case RGX_CNTBLK_ID_MERCER0: + psRtInfo->uiNumUnits = + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_CLUSTERS); + break; + + case RGX_CNTBLK_ID_PBE0: + + psRtInfo->uiNumUnits = + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, PBE_PER_SPU); + psRtInfo->uiNumUnits *= + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU); + break; + + case RGX_CNTBLK_ID_ISP0: + + psRtInfo->uiNumUnits = + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_ISP_PER_SPU); + /* Adjust by NUM_SPU */ + + psRtInfo->uiNumUnits *= + PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU); + break; + + default: + return IMG_FALSE; + } + /* Verify that we have at least one unit present */ + if (psRtInfo->uiNumUnits > 0U) + { + return IMG_TRUE; + } + else + { + return IMG_FALSE; + } +#else /* FW context -- Compile-time only */ + PVR_UNREFERENCED_PARAMETER(pvDev_km); + DBG_ASSERT(psBlkTypeDesc != NULL); + + if (unlikely(psBlkTypeDesc == NULL)) + { + return IMG_FALSE; + } + + switch (psBlkTypeDesc->uiCntBlkIdBase) + { + /* Handle the dynamic-sized SLC blocks which are only present if + * RGX_FEATURE_NUM_MEMBUS is appropriately set. + */ + case RGX_CNTBLK_ID_SLCBANK1: + if (RGX_FEATURE_NUM_MEMBUS >= 2U) + { + psRtInfo->uiNumUnits = 1; + } + else + { + psRtInfo->uiNumUnits = 0; + } + break; + + case RGX_CNTBLK_ID_SLCBANK2: + case RGX_CNTBLK_ID_SLCBANK3: + if (RGX_FEATURE_NUM_MEMBUS > 2U) + { + psRtInfo->uiNumUnits = 1; + } + else + { + psRtInfo->uiNumUnits = 0; + } + break; + + default: + psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits; + break; + } + if (psRtInfo->uiNumUnits > 0U) + { + return IMG_TRUE; + } + else + { + return IMG_FALSE; + } +#endif /* defined(__KERNEL__) */ +} + +#if !defined(__KERNEL__) /* Firmware or User-mode context */ + +/* Used to instantiate a null row in the block type model table below where the + * block is not supported for a given build BVNC in firmware/user mode context. + * This is needed as the blockid to block type lookup uses the table as well + * and clients may try to access blocks not in the hardware. */ +#define RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(_blkid) X(_blkid, 0, 0, #_blkid, NULL, NULL, NULL) + +#endif + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() end + *****************************************************************************/ + +/***************************************************************************** + RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] table + + This table holds the entries for the performance counter block type model. + Where the block is not present on an RGX device in question the + pfnIsBlkPresent() returns false, if valid and present it returns true. + Columns in the table with a ** indicate the value is a default and the value + returned in RGX_HWPERF_CNTBLK_RT_INFO when calling pfnIsBlkPresent()should + be used at runtime by the caller. These columns are only valid for compile + time BVNC configured contexts. + + Order of table rows must match order of counter block IDs in the enumeration + RGX_HWPERF_CNTBLK_ID. + + Table contains Xmacro styled entries. Each includer of this file must define + a gasCntBlkTypeModel[] structure which is local to itself. Only the layout is + defined here. + + uiCntBlkIdBase : Block-ID + uiIndirectReg : 0 => Direct, non-zero => INDIRECT register address + uiNumUnits : Number of units present on the GPU + pszBlockNameComment : Name of the Performance Block + pfnIsBlkPowered : Function to determine power state of block + pfnIsBlkPresent : Function to determine block presence on the core + pszBlkCfgValid : Array of counters valid within this block type + *****************************************************************************/ + + // Furian 8XT V2 layout: + + /* uiCntBlkIdBase, uiIndirectReg, uiNumUnits**, pszBlockNameComment, pfnIsBlkPowered, pfnIsBlkPresent */ + + /* RGX_CNTBLK_ID_JONES */ +#if defined(RGX_FIRMWARE) || defined(__KERNEL__) + +/* Furian 8XT Direct Performance counter blocks */ + +#define RGX_CNT_BLK_TYPE_MODEL_DIRECT_LIST \ + /* uiCntBlkIdBase, uiIndirectReg, uiNumUnits**, pszBlockNameComment, pfnIsBlkPowered, pfnIsBlkPresent */ \ +X(RGX_CNTBLK_ID_JONES, 0, 1, "PERF_BLK_JONES", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiJONES), \ +X(RGX_CNTBLK_ID_SLC, 0, 1, "PERF_BLK_SLC", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC), \ +X(RGX_CNTBLK_ID_FBCDC, 0, 1, "PERF_BLK_FBCDC", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiFBCDC), \ +X(RGX_CNTBLK_ID_FW_CUSTOM, 0, 1, "PERF_BLK_FW_CUSTOM", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiFWCUSTOM), \ +X(RGX_CNTBLK_ID_SLCBANK0, 0, 1, "PERF_BLK_SLC0", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC0), \ +X(RGX_CNTBLK_ID_SLCBANK1, 0, 1, "PERF_BLK_SLC1", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC1), \ +X(RGX_CNTBLK_ID_SLCBANK2, 0, 1, "PERF_BLK_SLC2", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC2), \ +X(RGX_CNTBLK_ID_SLCBANK3, 0, 1, "PERF_BLK_SLC3", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC3), \ +X(RGX_CNTBLK_ID_PIPELINE_STATS, 0, 1, "PERF_BLK_PIPELINE_STATS", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiPIPELINE) + +/* Furian 8XT Indirect Performance counter blocks */ + +#define RGX_CNT_BLK_TYPE_MODEL_INDIRECT_LIST \ + /* uiCntBlkIdBase, uiIndirectReg, uiNumUnits**, pszBlockNameComment, pfnIsBlkPowered, pfnIsBlkPresent */ \ +X(RGX_CNTBLK_ID_ISP0, RGX_CR_ISP_INDIRECT, RGX_HWPERF_NUM_SPU * RGX_HWPERF_NUM_ISP_PER_SPU, "PERF_BLK_ISP", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiISP), \ +X(RGX_CNTBLK_ID_MERCER0, RGX_CR_MERCER_INDIRECT, RGX_HWPERF_NUM_MERCER, "PERF_BLK_MERCER", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiMERCER), \ +X(RGX_CNTBLK_ID_PBE0, RGX_CR_PBE_INDIRECT, RGX_HWPERF_NUM_PBE, "PERF_BLK_PBE", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiPBE), \ +X(RGX_CNTBLK_ID_PBE_SHARED0, RGX_CR_PBE_SHARED_INDIRECT, RGX_HWPERF_NUM_PBE_SHARED, "PERF_BLK_PBE_SHARED", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiPBE_SHARED), \ +X(RGX_CNTBLK_ID_USC0, RGX_CR_USC_INDIRECT, RGX_HWPERF_NUM_USC, "PERF_BLK_USC", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiUSC), \ +X(RGX_CNTBLK_ID_TPU0, RGX_CR_TPU_INDIRECT, RGX_HWPERF_NUM_TPU, "PERF_BLK_TPU", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiTPU), \ +X(RGX_CNTBLK_ID_SWIFT0, RGX_CR_SWIFT_INDIRECT, RGX_HWPERF_NUM_SWIFT, "PERF_BLK_SWIFT", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiSWIFT), \ +X(RGX_CNTBLK_ID_TEXAS0, RGX_CR_TEXAS_INDIRECT, RGX_HWPERF_NUM_TEXAS, "PERF_BLK_TEXAS", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiTEXAS) + +#else /* !defined(RGX_FIRMWARE) && !defined(__KERNEL__) */ + +#error "RGX_FIRMWARE or __KERNEL__ *MUST* be defined" + +#endif /* defined(RGX_FIRMWARE) || defined(__KERNEL__) */ + +#endif /* RGX_HWPERF_TABLE_H */ + +/****************************************************************************** + End of file (rgx_hwperf_table.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_memallocflags.h b/drivers/gpu/drm/phytium/octopus/rgx_memallocflags.h new file mode 100644 index 000000000000..2ccb3ae5e2f0 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_memallocflags.h @@ -0,0 +1,58 @@ +/**************************************************************************/ /*! +@File +@Title RGX device specific memory allocation flags +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGX_MEMALLOCFLAGS_H +#define RGX_MEMALLOCFLAGS_H + + +/* Include pvrsrv layer header as the flags below are used in the device + * field defined in this header inside Services code. + * See PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK */ +#include "pvrsrv_memallocflags.h" + + +/* Device specific MMU flags */ +#define PMMETA_PROTECT (1U << 0) /*!< Memory that only the PM and Meta can access */ +#define FIRMWARE_CACHED (1U << 1) /*!< Memory that is cached in META/MIPS */ + + +#endif /* RGX_MEMALLOCFLAGS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_meta.h b/drivers/gpu/drm/phytium/octopus/rgx_meta.h new file mode 100644 index 000000000000..212646ec3b5e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_meta.h @@ -0,0 +1,385 @@ +/*************************************************************************/ /*! +@File +@Title RGX META definitions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX META helper definitions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_META_H) +#define RGX_META_H + + +/***** The META HW register definitions in the file are updated manually *****/ + + +#include "img_defs.h" +#include "km/rgxdefs_km.h" + + +/****************************************************************************** +* META registers and MACROS +******************************************************************************/ +#define META_CR_CTRLREG_BASE(T) (0x04800000U + (0x1000U*(T))) + +#define META_CR_TXPRIVEXT (0x048000E8) +#define META_CR_TXPRIVEXT_MINIM_EN (IMG_UINT32_C(0x1) << 7) + +#define META_CR_SYSC_JTAG_THREAD (0x04830030) +#define META_CR_SYSC_JTAG_THREAD_PRIV_EN (0x00000004) + +#define META_CR_PERF_COUNT0 (0x0480FFE0) +#define META_CR_PERF_COUNT1 (0x0480FFE8) +#define META_CR_PERF_COUNT_CTRL_SHIFT (28) +#define META_CR_PERF_COUNT_CTRL_MASK (0xF0000000) +#define META_CR_PERF_COUNT_CTRL_DCACHEHITS (IMG_UINT32_C(0x8) << META_CR_PERF_COUNT_CTRL_SHIFT) +#define META_CR_PERF_COUNT_CTRL_ICACHEHITS (IMG_UINT32_C(0x9) << META_CR_PERF_COUNT_CTRL_SHIFT) +#define META_CR_PERF_COUNT_CTRL_ICACHEMISS (IMG_UINT32_C(0xA) << META_CR_PERF_COUNT_CTRL_SHIFT) +#define META_CR_PERF_COUNT_CTRL_ICORE (IMG_UINT32_C(0xD) << META_CR_PERF_COUNT_CTRL_SHIFT) +#define META_CR_PERF_COUNT_THR_SHIFT (24) +#define META_CR_PERF_COUNT_THR_MASK (0x0F000000) +#define META_CR_PERF_COUNT_THR_0 (IMG_UINT32_C(0x1) << META_CR_PERF_COUNT_THR_SHIFT) +#define META_CR_PERF_COUNT_THR_1 (IMG_UINT32_C(0x2) << META_CR_PERF_COUNT_THR_1) + +#define META_CR_TxVECINT_BHALT (0x04820500) +#define META_CR_PERF_ICORE0 (0x0480FFD0) +#define META_CR_PERF_ICORE1 (0x0480FFD8) +#define META_CR_PERF_ICORE_DCACHEMISS (0x8) + +#define META_CR_PERF_COUNT(CTRL, THR) ((META_CR_PERF_COUNT_CTRL_##CTRL << META_CR_PERF_COUNT_CTRL_SHIFT) | \ + (THR << META_CR_PERF_COUNT_THR_SHIFT)) + +#define META_CR_TXUXXRXDT_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF0U) +#define META_CR_TXUXXRXRQ_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF8U) + +#define META_CR_TXUXXRXRQ_DREADY_BIT (0x80000000U) /* Poll for done */ +#define META_CR_TXUXXRXRQ_RDnWR_BIT (0x00010000U) /* Set for read */ +#define META_CR_TXUXXRXRQ_TX_S (12) +#define META_CR_TXUXXRXRQ_RX_S (4) +#define META_CR_TXUXXRXRQ_UXX_S (0) + +#define META_CR_TXUIN_ID (0x0) /* Internal ctrl regs */ +#define META_CR_TXUD0_ID (0x1) /* Data unit regs */ +#define META_CR_TXUD1_ID (0x2) /* Data unit regs */ +#define META_CR_TXUA0_ID (0x3) /* Address unit regs */ +#define META_CR_TXUA1_ID (0x4) /* Address unit regs */ +#define META_CR_TXUPC_ID (0x5) /* PC registers */ + +/* Macros to calculate register access values */ +#define META_CR_CORE_REG(Thr, RegNum, Unit) (((IMG_UINT32)(Thr) << META_CR_TXUXXRXRQ_TX_S) | \ + ((IMG_UINT32)(RegNum) << META_CR_TXUXXRXRQ_RX_S) | \ + ((IMG_UINT32)(Unit) << META_CR_TXUXXRXRQ_UXX_S)) + +#define META_CR_THR0_PC META_CR_CORE_REG(0, 0, META_CR_TXUPC_ID) +#define META_CR_THR0_PCX META_CR_CORE_REG(0, 1, META_CR_TXUPC_ID) +#define META_CR_THR0_SP META_CR_CORE_REG(0, 0, META_CR_TXUA0_ID) + +#define META_CR_THR1_PC META_CR_CORE_REG(1, 0, META_CR_TXUPC_ID) +#define META_CR_THR1_PCX META_CR_CORE_REG(1, 1, META_CR_TXUPC_ID) +#define META_CR_THR1_SP META_CR_CORE_REG(1, 0, META_CR_TXUA0_ID) + +#define SP_ACCESS(Thread) META_CR_CORE_REG(Thread, 0, META_CR_TXUA0_ID) +#define PC_ACCESS(Thread) META_CR_CORE_REG(Thread, 0, META_CR_TXUPC_ID) + +#define META_CR_COREREG_ENABLE (0x0000000U) +#define META_CR_COREREG_STATUS (0x0000010U) +#define META_CR_COREREG_DEFR (0x00000A0U) +#define META_CR_COREREG_PRIVEXT (0x00000E8U) + +#define META_CR_T0ENABLE_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_ENABLE) +#define META_CR_T0STATUS_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_STATUS) +#define META_CR_T0DEFR_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_DEFR) +#define META_CR_T0PRIVEXT_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_PRIVEXT) + +#define META_CR_T1ENABLE_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_ENABLE) +#define META_CR_T1STATUS_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_STATUS) +#define META_CR_T1DEFR_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_DEFR) +#define META_CR_T1PRIVEXT_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_PRIVEXT) + +#define META_CR_TXENABLE_ENABLE_BIT (0x00000001U) /* Set if running */ +#define META_CR_TXSTATUS_PRIV (0x00020000U) +#define META_CR_TXPRIVEXT_MINIM (0x00000080U) + +#define META_MEM_GLOBAL_RANGE_BIT (0x80000000U) + +#define META_CR_TXCLKCTRL (0x048000B0) +#define META_CR_TXCLKCTRL_ALL_ON (0x55111111) +#define META_CR_TXCLKCTRL_ALL_AUTO (0xAA222222) + + +/****************************************************************************** +* META LDR Format +******************************************************************************/ +/* Block header structure */ +typedef struct +{ + IMG_UINT32 ui32DevID; + IMG_UINT32 ui32SLCode; + IMG_UINT32 ui32SLData; + IMG_UINT16 ui16PLCtrl; + IMG_UINT16 ui16CRC; + +} RGX_META_LDR_BLOCK_HDR; + +/* High level data stream block structure */ +typedef struct +{ + IMG_UINT16 ui16Cmd; + IMG_UINT16 ui16Length; + IMG_UINT32 ui32Next; + IMG_UINT32 aui32CmdData[4]; + +} RGX_META_LDR_L1_DATA_BLK; + +/* High level data stream block structure */ +typedef struct +{ + IMG_UINT16 ui16Tag; + IMG_UINT16 ui16Length; + IMG_UINT32 aui32BlockData[4]; + +} RGX_META_LDR_L2_DATA_BLK; + +/* Config command structure */ +typedef struct +{ + IMG_UINT32 ui32Type; + IMG_UINT32 aui32BlockData[4]; + +} RGX_META_LDR_CFG_BLK; + +/* Block type definitions */ +#define RGX_META_LDR_COMMENT_TYPE_MASK (0x0010U) +#define RGX_META_LDR_BLK_IS_COMMENT(X) ((X & RGX_META_LDR_COMMENT_TYPE_MASK) != 0U) + +/* Command definitions + * Value Name Description + * 0 LoadMem Load memory with binary data. + * 1 LoadCore Load a set of core registers. + * 2 LoadMMReg Load a set of memory mapped registers. + * 3 StartThreads Set each thread PC and SP, then enable threads. + * 4 ZeroMem Zeros a memory region. + * 5 Config Perform a configuration command. + */ +#define RGX_META_LDR_CMD_MASK (0x000FU) + +#define RGX_META_LDR_CMD_LOADMEM (0x0000U) +#define RGX_META_LDR_CMD_LOADCORE (0x0001U) +#define RGX_META_LDR_CMD_LOADMMREG (0x0002U) +#define RGX_META_LDR_CMD_START_THREADS (0x0003U) +#define RGX_META_LDR_CMD_ZEROMEM (0x0004U) +#define RGX_META_LDR_CMD_CONFIG (0x0005U) + +/* Config Command definitions + * Value Name Description + * 0 Pause Pause for x times 100 instructions + * 1 Read Read a value from register - No value return needed. + * Utilises effects of issuing reads to certain registers + * 2 Write Write to mem location + * 3 MemSet Set mem to value + * 4 MemCheck check mem for specific value. + */ +#define RGX_META_LDR_CFG_PAUSE (0x0000) +#define RGX_META_LDR_CFG_READ (0x0001) +#define RGX_META_LDR_CFG_WRITE (0x0002) +#define RGX_META_LDR_CFG_MEMSET (0x0003) +#define RGX_META_LDR_CFG_MEMCHECK (0x0004) + + +/****************************************************************************** +* RGX FW segmented MMU definitions +******************************************************************************/ +/* All threads can access the segment */ +#define RGXFW_SEGMMU_ALLTHRS (IMG_UINT32_C(0xf) << 8U) +/* Writable */ +#define RGXFW_SEGMMU_WRITEABLE (0x1U << 1U) +/* All threads can access and writable */ +#define RGXFW_SEGMMU_ALLTHRS_WRITEABLE (RGXFW_SEGMMU_ALLTHRS | RGXFW_SEGMMU_WRITEABLE) + +/* Direct map region 10 used for mapping GPU memory - max 8MB */ +#define RGXFW_SEGMMU_DMAP_GPU_ID (10U) +#define RGXFW_SEGMMU_DMAP_GPU_ADDR_START (0x07000000U) +#define RGXFW_SEGMMU_DMAP_GPU_MAX_SIZE (0x00800000U) + +/* Segment IDs */ +#define RGXFW_SEGMMU_DATA_ID (1U) +#define RGXFW_SEGMMU_BOOTLDR_ID (2U) +#define RGXFW_SEGMMU_TEXT_ID (RGXFW_SEGMMU_BOOTLDR_ID) + +/* + * SLC caching strategy in S7 and octopus is emitted through the segment MMU. + * All the segments configured through the macro RGXFW_SEGMMU_OUTADDR_TOP are + * CACHED in the SLC. + * The interface has been kept the same to simplify the code changes. + * The bifdm argument is ignored (no longer relevant) in S7 and octopus. + */ +#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(pers, slc_policy, mmu_ctx) ((((IMG_UINT64) ((pers) & 0x3)) << 52) | \ + (((IMG_UINT64) ((mmu_ctx) & 0xFF)) << 44) | \ + (((IMG_UINT64) ((slc_policy) & 0x1)) << 40)) +#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(mmu_ctx) RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x3, 0x0, mmu_ctx) +#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(mmu_ctx) RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x0, 0x1, mmu_ctx) + +/* To configure the Page Catalog and BIF-DM fed into the BIF for Garten + * accesses through this segment + */ +#define RGXFW_SEGMMU_OUTADDR_TOP_SLC(pc, bifdm) (((IMG_UINT64)((IMG_UINT64)(pc) & 0xFU) << 44U) | \ + ((IMG_UINT64)((IMG_UINT64)(bifdm) & 0xFU) << 40U)) + +#define RGXFW_SEGMMU_META_BIFDM_ID (0x7U) +#if !defined(__KERNEL__) && defined(RGX_FEATURE_META) +#if defined(RGX_FEATURE_SLC_VIVT) +#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED +#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_UNCACHED RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED +#define RGXFW_SEGMMU_OUTADDR_TOP_META RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED +#else +#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED RGXFW_SEGMMU_OUTADDR_TOP_SLC +#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_UNCACHED RGXFW_SEGMMU_OUTADDR_TOP_SLC +#define RGXFW_SEGMMU_OUTADDR_TOP_META(pc) RGXFW_SEGMMU_OUTADDR_TOP_SLC(pc, RGXFW_SEGMMU_META_BIFDM_ID) +#endif +#endif + +/* META segments have 4kB minimum size */ +#define RGXFW_SEGMMU_ALIGN (0x1000U) + +/* Segmented MMU registers (n = segment id) */ +#define META_CR_MMCU_SEGMENTn_BASE(n) (0x04850000U + ((n)*0x10U)) +#define META_CR_MMCU_SEGMENTn_LIMIT(n) (0x04850004U + ((n)*0x10U)) +#define META_CR_MMCU_SEGMENTn_OUTA0(n) (0x04850008U + ((n)*0x10U)) +#define META_CR_MMCU_SEGMENTn_OUTA1(n) (0x0485000CU + ((n)*0x10U)) + +/* The following defines must be recalculated if the Meta MMU segments used + * to access Host-FW data are changed + * Current combinations are: + * - SLC uncached, META cached, FW base address 0x70000000 + * - SLC uncached, META uncached, FW base address 0xF0000000 + * - SLC cached, META cached, FW base address 0x10000000 + * - SLC cached, META uncached, FW base address 0x90000000 + */ +#define RGXFW_SEGMMU_DATA_BASE_ADDRESS (0x10000000U) +#define RGXFW_SEGMMU_DATA_META_CACHED (0x0U) +#define RGXFW_SEGMMU_DATA_META_UNCACHED (META_MEM_GLOBAL_RANGE_BIT) // 0x80000000 +#define RGXFW_SEGMMU_DATA_META_CACHE_MASK (META_MEM_GLOBAL_RANGE_BIT) +/* For non-VIVT SLCs the cacheability of the FW data in the SLC is selected in + * the PTEs for the FW data, not in the Meta Segment MMU, which means these + * defines have no real effect in those cases + */ +#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED (0x0U) +#define RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED (0x60000000U) +#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK (0x60000000U) + + +#if defined(SECURE_FW_CODE_OSID) && defined(RGX_FEATURE_META) +#error "SECURE_FW_CODE_OSID is not supported on META cores" +#endif + + +/****************************************************************************** +* RGX FW Bootloader defaults +******************************************************************************/ +#define RGXFW_BOOTLDR_META_ADDR (0x40000000U) +#define RGXFW_BOOTLDR_DEVV_ADDR_0 (0xC0000000U) +#define RGXFW_BOOTLDR_DEVV_ADDR_1 (0x000000E1) +#define RGXFW_BOOTLDR_DEVV_ADDR ((((IMG_UINT64) RGXFW_BOOTLDR_DEVV_ADDR_1) << 32) | RGXFW_BOOTLDR_DEVV_ADDR_0) +#define RGXFW_BOOTLDR_LIMIT (0x1FFFF000) +#define RGXFW_MAX_BOOTLDR_OFFSET (0x1000) + +/* Bootloader configuration offset is in dwords (512 bytes) */ +#define RGXFW_BOOTLDR_CONF_OFFSET (0x80) + + +/****************************************************************************** +* RGX META Stack +******************************************************************************/ +#define RGX_META_STACK_SIZE (0x1000U) + +/****************************************************************************** + RGX META Core memory +******************************************************************************/ +/* code and data both map to the same physical memory */ +#define RGX_META_COREMEM_CODE_ADDR (0x80000000U) +#define RGX_META_COREMEM_DATA_ADDR (0x82000000U) +#define RGX_META_COREMEM_OFFSET_MASK (0x01ffffffU) + +#if defined(__KERNEL__) +#define RGX_META_IS_COREMEM_CODE(A, B) (((A) >= RGX_META_COREMEM_CODE_ADDR) && ((A) < (RGX_META_COREMEM_CODE_ADDR + (B)))) +#define RGX_META_IS_COREMEM_DATA(A, B) (((A) >= RGX_META_COREMEM_DATA_ADDR) && ((A) < (RGX_META_COREMEM_DATA_ADDR + (B)))) +#endif + +/****************************************************************************** +* 2nd thread +******************************************************************************/ +#define RGXFW_THR1_PC (0x18930000) +#define RGXFW_THR1_SP (0x78890000) + +/****************************************************************************** +* META compatibility +******************************************************************************/ + +#define META_CR_CORE_ID (0x04831000) +#define META_CR_CORE_ID_VER_SHIFT (16U) +#define META_CR_CORE_ID_VER_CLRMSK (0XFF00FFFFU) + +#if !defined(__KERNEL__) && defined(RGX_FEATURE_META) + + #if (RGX_FEATURE_META == MTP218) + #define RGX_CR_META_CORE_ID_VALUE 0x19 + #elif (RGX_FEATURE_META == MTP219) + #define RGX_CR_META_CORE_ID_VALUE 0x1E + #elif (RGX_FEATURE_META == LTP218) + #define RGX_CR_META_CORE_ID_VALUE 0x1C + #elif (RGX_FEATURE_META == LTP217) + #define RGX_CR_META_CORE_ID_VALUE 0x1F + #else + #error "Unknown META ID" + #endif +#else + + #define RGX_CR_META_MTP218_CORE_ID_VALUE 0x19 + #define RGX_CR_META_MTP219_CORE_ID_VALUE 0x1E + #define RGX_CR_META_LTP218_CORE_ID_VALUE 0x1C + #define RGX_CR_META_LTP217_CORE_ID_VALUE 0x1F + +#endif +#define RGXFW_PROCESSOR_META "META" + + +#endif /* RGX_META_H */ + +/****************************************************************************** + End of file (rgx_meta.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_mips.h b/drivers/gpu/drm/phytium/octopus/rgx_mips.h new file mode 100644 index 000000000000..e39322ce6e85 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_mips.h @@ -0,0 +1,374 @@ +/*************************************************************************/ /*! +@File rgx_mips.h +@Title +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Platform RGX +@Description RGX MIPS definitions, kernel/user space +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_MIPS_H) +#define RGX_MIPS_H + +/* + * Utility defines for memory management + */ +#define RGXMIPSFW_LOG2_PAGE_SIZE_4K (12) +#define RGXMIPSFW_PAGE_SIZE_4K (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_4K) +#define RGXMIPSFW_PAGE_MASK_4K (RGXMIPSFW_PAGE_SIZE_4K - 1) +#define RGXMIPSFW_LOG2_PAGE_SIZE_64K (16) +#define RGXMIPSFW_PAGE_SIZE_64K (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_64K) +#define RGXMIPSFW_PAGE_MASK_64K (RGXMIPSFW_PAGE_SIZE_64K - 1) +#define RGXMIPSFW_LOG2_PAGE_SIZE_256K (18) +#define RGXMIPSFW_PAGE_SIZE_256K (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_256K) +#define RGXMIPSFW_PAGE_MASK_256K (RGXMIPSFW_PAGE_SIZE_256K - 1) +#define RGXMIPSFW_LOG2_PAGE_SIZE_1MB (20) +#define RGXMIPSFW_PAGE_SIZE_1MB (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_1MB) +#define RGXMIPSFW_PAGE_MASK_1MB (RGXMIPSFW_PAGE_SIZE_1MB - 1) +#define RGXMIPSFW_LOG2_PAGE_SIZE_4MB (22) +#define RGXMIPSFW_PAGE_SIZE_4MB (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_4MB) +#define RGXMIPSFW_PAGE_MASK_4MB (RGXMIPSFW_PAGE_SIZE_4MB - 1) +#define RGXMIPSFW_LOG2_PTE_ENTRY_SIZE (2) +/* log2 page table sizes dependent on FW heap size and page size (for each OS) */ +#define RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K (RGX_FIRMWARE_HEAP_SHIFT - RGXMIPSFW_LOG2_PAGE_SIZE_4K + RGXMIPSFW_LOG2_PTE_ENTRY_SIZE) +#define RGXMIPSFW_LOG2_PAGETABLE_SIZE_64K (RGX_FIRMWARE_HEAP_SHIFT - RGXMIPSFW_LOG2_PAGE_SIZE_64K + RGXMIPSFW_LOG2_PTE_ENTRY_SIZE) +/* Maximum number of page table pages (both Host and MIPS pages) */ +#define RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES (4) +/* Total number of TLB entries */ +#define RGXMIPSFW_NUMBER_OF_TLB_ENTRIES (16) +/* "Uncached" caching policy */ +#define RGXMIPSFW_UNCACHED_CACHE_POLICY (0X00000002) +/* "Write-back write-allocate" caching policy */ +#define RGXMIPSFW_WRITEBACK_CACHE_POLICY (0X00000003) +/* "Write-through no write-allocate" caching policy */ +#define RGXMIPSFW_WRITETHROUGH_CACHE_POLICY (0X00000001) +/* Cached policy used by MIPS in case of physical bus on 32 bit */ +#define RGXMIPSFW_CACHED_POLICY (RGXMIPSFW_WRITEBACK_CACHE_POLICY) +/* Cached policy used by MIPS in case of physical bus on more than 32 bit */ +#define RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT (RGXMIPSFW_WRITETHROUGH_CACHE_POLICY) +/* Total number of Remap entries */ +#define RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES (2 * RGXMIPSFW_NUMBER_OF_TLB_ENTRIES) + + +/* + * MIPS EntryLo/PTE format + */ + +#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_SHIFT (31U) +#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_CLRMSK (0X7FFFFFFF) +#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN (0X80000000) + +#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_SHIFT (30U) +#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_CLRMSK (0XBFFFFFFF) +#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN (0X40000000) + +/* Page Frame Number */ +#define RGXMIPSFW_ENTRYLO_PFN_SHIFT (6) +#define RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT (12) +/* Mask used for the MIPS Page Table in case of physical bus on 32 bit */ +#define RGXMIPSFW_ENTRYLO_PFN_MASK (0x03FFFFC0) +#define RGXMIPSFW_ENTRYLO_PFN_SIZE (20) +/* Mask used for the MIPS Page Table in case of physical bus on more than 32 bit */ +#define RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT (0x3FFFFFC0) +#define RGXMIPSFW_ENTRYLO_PFN_SIZE_ABOVE_32BIT (24) +#define RGXMIPSFW_ADDR_TO_ENTRYLO_PFN_RSHIFT (RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT - \ + RGXMIPSFW_ENTRYLO_PFN_SHIFT) + +#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT (3U) +#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK (0XFFFFFFC7) + +#define RGXMIPSFW_ENTRYLO_DIRTY_SHIFT (2U) +#define RGXMIPSFW_ENTRYLO_DIRTY_CLRMSK (0XFFFFFFFB) +#define RGXMIPSFW_ENTRYLO_DIRTY_EN (0X00000004) + +#define RGXMIPSFW_ENTRYLO_VALID_SHIFT (1U) +#define RGXMIPSFW_ENTRYLO_VALID_CLRMSK (0XFFFFFFFD) +#define RGXMIPSFW_ENTRYLO_VALID_EN (0X00000002) + +#define RGXMIPSFW_ENTRYLO_GLOBAL_SHIFT (0U) +#define RGXMIPSFW_ENTRYLO_GLOBAL_CLRMSK (0XFFFFFFFE) +#define RGXMIPSFW_ENTRYLO_GLOBAL_EN (0X00000001) + +#define RGXMIPSFW_ENTRYLO_DVG (RGXMIPSFW_ENTRYLO_DIRTY_EN | \ + RGXMIPSFW_ENTRYLO_VALID_EN | \ + RGXMIPSFW_ENTRYLO_GLOBAL_EN) +#define RGXMIPSFW_ENTRYLO_UNCACHED (RGXMIPSFW_UNCACHED_CACHE_POLICY << \ + RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT) +#define RGXMIPSFW_ENTRYLO_DVG_UNCACHED (RGXMIPSFW_ENTRYLO_DVG | RGXMIPSFW_ENTRYLO_UNCACHED) + + +/* Remap Range Config Addr Out */ +/* These defines refer to the upper half of the Remap Range Config register */ +#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_MASK (0x0FFFFFF0) +#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT (4) /* wrt upper half of the register */ +#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT (12) +#define RGXMIPSFW_ADDR_TO_RR_ADDR_OUT_RSHIFT (RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT - \ + RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT) + +#if defined(SECURE_FW_CODE_OSID) && (SECURE_FW_CODE_OSID + 1 > 2) +#define MIPS_FW_CODE_OSID (SECURE_FW_CODE_OSID) +#elif defined(SECURE_FW_CODE_OSID) +#define MIPS_FW_CODE_OSID (1U) +#endif + + +/* + * Pages to trampoline problematic physical addresses: + * - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN : 0x1FC0_0000 + * - RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN : 0x1FC0_1000 + * - RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN : 0x1FC0_2000 + * - (benign trampoline) : 0x1FC0_3000 + * that would otherwise be erroneously remapped by the MIPS wrapper + * (see "Firmware virtual layout and remap configuration" section below) + */ + +#define RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES (2) +#define RGXMIPSFW_TRAMPOLINE_NUMPAGES (1 << RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES) +#define RGXMIPSFW_TRAMPOLINE_SIZE (RGXMIPSFW_TRAMPOLINE_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE_4K) +#define RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE (RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES + RGXMIPSFW_LOG2_PAGE_SIZE_4K) + +#define RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN) +#define RGXMIPSFW_TRAMPOLINE_OFFSET(a) (a - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN) + +#define RGXMIPSFW_SENSITIVE_ADDR(a) (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN == (~((1<> 2) +#define RGXMIPSFW_C0_CAUSE_EXCCODE_FWERROR 9 +/* Use only when Coprocessor Unusable exception */ +#define RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(CAUSE) (((CAUSE) >> 28) & 0x3) +#define RGXMIPSFW_C0_CAUSE_PENDING_HWIRQ(CAUSE) (((CAUSE) & 0x3fc00) >> 10) +#define RGXMIPSFW_C0_CAUSE_FDCIPENDING (1 << 21) +#define RGXMIPSFW_C0_CAUSE_IV (1 << 23) +#define RGXMIPSFW_C0_CAUSE_IC (1 << 25) +#define RGXMIPSFW_C0_CAUSE_PCIPENDING (1 << 26) +#define RGXMIPSFW_C0_CAUSE_TIPENDING (1 << 30) +#define RGXMIPSFW_C0_CAUSE_BRANCH_DELAY (1 << 31) + +/* Macros to decode C0_Debug register */ +#define RGXMIPSFW_C0_DEBUG_EXCCODE(DEBUG) (((DEBUG) >> 10) & 0x1f) +#define RGXMIPSFW_C0_DEBUG_DSS (1 << 0) +#define RGXMIPSFW_C0_DEBUG_DBP (1 << 1) +#define RGXMIPSFW_C0_DEBUG_DDBL (1 << 2) +#define RGXMIPSFW_C0_DEBUG_DDBS (1 << 3) +#define RGXMIPSFW_C0_DEBUG_DIB (1 << 4) +#define RGXMIPSFW_C0_DEBUG_DINT (1 << 5) +#define RGXMIPSFW_C0_DEBUG_DIBIMPR (1 << 6) +#define RGXMIPSFW_C0_DEBUG_DDBLIMPR (1 << 18) +#define RGXMIPSFW_C0_DEBUG_DDBSIMPR (1 << 19) +#define RGXMIPSFW_C0_DEBUG_IEXI (1 << 20) +#define RGXMIPSFW_C0_DEBUG_DBUSEP (1 << 21) +#define RGXMIPSFW_C0_DEBUG_CACHEEP (1 << 22) +#define RGXMIPSFW_C0_DEBUG_MCHECKP (1 << 23) +#define RGXMIPSFW_C0_DEBUG_IBUSEP (1 << 24) +#define RGXMIPSFW_C0_DEBUG_DM (1 << 30) +#define RGXMIPSFW_C0_DEBUG_DBD (1 << 31) + +/* Macros to decode TLB entries */ +#define RGXMIPSFW_TLB_GET_MASK(PAGE_MASK) (((PAGE_MASK) >> 13) & 0XFFFFU) +#define RGXMIPSFW_TLB_GET_PAGE_SIZE(PAGE_MASK) ((((PAGE_MASK) | 0x1FFF) + 1) >> 11) /* page size in KB */ +#define RGXMIPSFW_TLB_GET_PAGE_MASK(PAGE_SIZE) ((((PAGE_SIZE) << 11) - 1) & ~0x7FF) /* page size in KB */ +#define RGXMIPSFW_TLB_GET_VPN2(ENTRY_HI) ((ENTRY_HI) >> 13) +#define RGXMIPSFW_TLB_GET_COHERENCY(ENTRY_LO) (((ENTRY_LO) >> 3) & 0x7U) +#define RGXMIPSFW_TLB_GET_PFN(ENTRY_LO) (((ENTRY_LO) >> 6) & 0XFFFFFU) +/* GET_PA uses a non-standard PFN mask for 36 bit addresses */ +#define RGXMIPSFW_TLB_GET_PA(ENTRY_LO) (((IMG_UINT64)(ENTRY_LO) & RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT) << 6) +#define RGXMIPSFW_TLB_GET_INHIBIT(ENTRY_LO) (((ENTRY_LO) >> 30) & 0x3U) +#define RGXMIPSFW_TLB_GET_DGV(ENTRY_LO) ((ENTRY_LO) & 0x7U) +#define RGXMIPSFW_TLB_GLOBAL (1U) +#define RGXMIPSFW_TLB_VALID (1U << 1) +#define RGXMIPSFW_TLB_DIRTY (1U << 2) +#define RGXMIPSFW_TLB_XI (1U << 30) +#define RGXMIPSFW_TLB_RI (1U << 31) + +typedef struct { + IMG_UINT32 ui32TLBPageMask; + IMG_UINT32 ui32TLBHi; + IMG_UINT32 ui32TLBLo0; + IMG_UINT32 ui32TLBLo1; +} RGX_MIPS_TLB_ENTRY; + +typedef struct { + IMG_UINT32 ui32RemapAddrIn; /* always 4k aligned */ + IMG_UINT32 ui32RemapAddrOut; /* always 4k aligned */ + IMG_UINT32 ui32RemapRegionSize; +} RGX_MIPS_REMAP_ENTRY; + +typedef struct { + IMG_UINT32 ui32ErrorState; /* This must come first in the structure */ + IMG_UINT32 ui32ErrorEPC; + IMG_UINT32 ui32StatusRegister; + IMG_UINT32 ui32CauseRegister; + IMG_UINT32 ui32BadRegister; + IMG_UINT32 ui32EPC; + IMG_UINT32 ui32SP; + IMG_UINT32 ui32Debug; + IMG_UINT32 ui32DEPC; + IMG_UINT32 ui32BadInstr; + IMG_UINT32 ui32UnmappedAddress; + RGX_MIPS_TLB_ENTRY asTLB[RGXMIPSFW_NUMBER_OF_TLB_ENTRIES]; + RGX_MIPS_REMAP_ENTRY asRemap[RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES]; +} RGX_MIPS_STATE; + +#endif /* RGXMIPSFW_ASSEMBLY_CODE */ + +#endif /* RGX_MIPS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_options.h b/drivers/gpu/drm/phytium/octopus/rgx_options.h new file mode 100644 index 000000000000..005162d0b175 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_options.h @@ -0,0 +1,294 @@ +/*************************************************************************/ /*! +@File +@Title RGX build options +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* Each build option listed here is packed into a dword which provides up to + * log2(RGX_BUILD_OPTIONS_MASK_KM + 1) flags for KM and + * (32 - log2(RGX_BUILD_OPTIONS_MASK_KM + 1)) flags for UM. + * The corresponding bit is set if the build option was enabled at compile + * time. + * + * In order to extract the enabled build flags the INTERNAL_TEST switch should + * be enabled in a client program which includes this header. Then the client + * can test specific build flags by reading the bit value at + * ##OPTIONNAME##_SET_OFFSET + * in RGX_BUILD_OPTIONS_KM or RGX_BUILD_OPTIONS. + * + * IMPORTANT: add new options to unused bits or define a new dword + * (e.g. RGX_BUILD_OPTIONS_KM2 or RGX_BUILD_OPTIONS2) so that the bitfield + * remains backwards compatible. + */ + +#ifndef RGX_OPTIONS_H +#define RGX_OPTIONS_H + +#define RGX_BUILD_OPTIONS_MASK_KM 0x0000FFFFUL + +#define NO_HARDWARE_OPTION "NO_HARDWARE " +#if defined(NO_HARDWARE) || defined(INTERNAL_TEST) + #define NO_HARDWARE_SET_OFFSET OPTIONS_BIT0 + #define OPTIONS_BIT0 (0x1UL << 0) + #if OPTIONS_BIT0 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT0 0x0UL +#endif /* NO_HARDWARE */ + +#define PDUMP_OPTION "PDUMP " +#if defined(PDUMP) || defined(INTERNAL_TEST) + #define PDUMP_SET_OFFSET OPTIONS_BIT1 + #define OPTIONS_BIT1 (0x1UL << 1) + #if OPTIONS_BIT1 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT1 0x0UL +#endif /* PDUMP */ + +/* No longer used */ +#define INTERNAL_TEST_OPTION "INTERNAL_TEST " +#if defined(INTERNAL_TEST) + #define UNUSED_SET_OFFSET OPTIONS_BIT2 + #define OPTIONS_BIT2 (0x1UL << 2) + #if OPTIONS_BIT2 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT2 0x0UL +#endif + +/* No longer used */ +#define UNUSED_OPTION " " +#if defined(INTERNAL_TEST) + #define OPTIONS_BIT3 (0x1UL << 3) + #if OPTIONS_BIT3 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT3 0x0UL +#endif + +#define SUPPORT_RGX_OPTION " " +#if defined(SUPPORT_RGX) || defined(INTERNAL_TEST) + #define SUPPORT_RGX_SET_OFFSET OPTIONS_BIT4 + #define OPTIONS_BIT4 (0x1UL << 4) + #if OPTIONS_BIT4 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT4 0x0UL +#endif /* SUPPORT_RGX */ + +#define SUPPORT_SECURE_EXPORT_OPTION "SECURE_EXPORTS " +#if defined(SUPPORT_SECURE_EXPORT) || defined(INTERNAL_TEST) + #define SUPPORT_SECURE_EXPORT_SET_OFFSET OPTIONS_BIT5 + #define OPTIONS_BIT5 (0x1UL << 5) + #if OPTIONS_BIT5 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT5 0x0UL +#endif /* SUPPORT_SECURE_EXPORT */ + +#define SUPPORT_INSECURE_EXPORT_OPTION "INSECURE_EXPORTS " +#if defined(SUPPORT_INSECURE_EXPORT) || defined(INTERNAL_TEST) + #define SUPPORT_INSECURE_EXPORT_SET_OFFSET OPTIONS_BIT6 + #define OPTIONS_BIT6 (0x1UL << 6) + #if OPTIONS_BIT6 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT6 0x0UL +#endif /* SUPPORT_INSECURE_EXPORT */ + +#define SUPPORT_VFP_OPTION "VFP " +#if defined(SUPPORT_VFP) || defined(INTERNAL_TEST) + #define SUPPORT_VFP_SET_OFFSET OPTIONS_BIT7 + #define OPTIONS_BIT7 (0x1UL << 7) + #if OPTIONS_BIT7 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT7 0x0UL +#endif /* SUPPORT_VFP */ + +#define SUPPORT_WORKLOAD_ESTIMATION_OPTION "WORKLOAD_ESTIMATION " +#if defined(SUPPORT_WORKLOAD_ESTIMATION) || defined(INTERNAL_TEST) + #define SUPPORT_WORKLOAD_ESTIMATION_OFFSET OPTIONS_BIT8 + #define OPTIONS_BIT8 (0x1UL << 8) + #if OPTIONS_BIT8 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT8 0x0UL +#endif /* SUPPORT_WORKLOAD_ESTIMATION */ +#define OPTIONS_WORKLOAD_ESTIMATION_MASK (0x1UL << 8) + +#define SUPPORT_PDVFS_OPTION "PDVFS " +#if defined(SUPPORT_PDVFS) || defined(INTERNAL_TEST) + #define SUPPORT_PDVFS_OFFSET OPTIONS_BIT9 + #define OPTIONS_BIT9 (0x1UL << 9) + #if OPTIONS_BIT9 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT9 0x0UL +#endif /* SUPPORT_PDVFS */ +#define OPTIONS_PDVFS_MASK (0x1UL << 9) + +#define DEBUG_OPTION "DEBUG " +#if defined(DEBUG) || defined(INTERNAL_TEST) + #define DEBUG_SET_OFFSET OPTIONS_BIT10 + #define OPTIONS_BIT10 (0x1UL << 10) + #if OPTIONS_BIT10 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT10 0x0UL +#endif /* DEBUG */ +/* The bit position of this should be the same as DEBUG_SET_OFFSET option + * when defined. + */ +#define OPTIONS_DEBUG_MASK (0x1UL << 10) + +#define SUPPORT_BUFFER_SYNC_OPTION "BUFFER_SYNC " +#if defined(SUPPORT_BUFFER_SYNC) || defined(INTERNAL_TEST) + #define SUPPORT_BUFFER_SYNC_SET_OFFSET OPTIONS_BIT11 + #define OPTIONS_BIT11 (0x1UL << 11) + #if OPTIONS_BIT11 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT11 0x0UL +#endif /* SUPPORT_BUFFER_SYNC */ + +#define SUPPORT_AUTOVZ_OPTION "AUTOVZ " +#if defined(SUPPORT_AUTOVZ) + #define SUPPORT_AUTOVZ_OFFSET OPTIONS_BIT12 + #define OPTIONS_BIT12 (0x1UL << 12) + #if OPTIONS_BIT12 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT12 0x0UL +#endif /* SUPPORT_AUTOVZ */ + +#define SUPPORT_AUTOVZ_HW_REGS_OPTION "AUTOVZ_HW_REGS " +#if defined(SUPPORT_AUTOVZ_HW_REGS) + #define SUPPORT_AUTOVZ_HW_REGS_OFFSET OPTIONS_BIT13 + #define OPTIONS_BIT13 (0x1UL << 13) + #if OPTIONS_BIT13 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT13 0x0UL +#endif /* SUPPORT_AUTOVZ_HW_REGS */ + +/* Bit 14 reserved for compatibility with Rogue code base */ +#define OPTIONS_BIT14 0x0UL + +#define VALIDATION_EN_MASK (0x1UL << 15) +#define SUPPORT_VALIDATION_OPTION "VALIDATION " +#if defined(SUPPORT_VALIDATION) + #define SUPPORT_VALIDATION_OFFSET OPTIONS_BIT15 + #define OPTIONS_BIT15 (0x1UL << 15) + #if OPTIONS_BIT15 > RGX_BUILD_OPTIONS_MASK_KM + #error "Bit exceeds reserved range" + #endif +#else + #define OPTIONS_BIT15 0x0UL +#endif /* SUPPORT_VALIDATION */ + +#define RGX_BUILD_OPTIONS_KM \ + (OPTIONS_BIT0 |\ + OPTIONS_BIT1 |\ + OPTIONS_BIT2 |\ + OPTIONS_BIT3 |\ + OPTIONS_BIT4 |\ + OPTIONS_BIT6 |\ + OPTIONS_BIT7 |\ + OPTIONS_BIT8 |\ + OPTIONS_BIT9 |\ + OPTIONS_BIT10 |\ + OPTIONS_BIT11 |\ + OPTIONS_BIT12 |\ + OPTIONS_BIT13 |\ + OPTIONS_BIT14 |\ + OPTIONS_BIT15) + +#define RGX_BUILD_OPTIONS_LIST \ + { \ + NO_HARDWARE_OPTION, \ + PDUMP_OPTION, \ + INTERNAL_TEST_OPTION, \ + UNUSED_OPTION, \ + SUPPORT_RGX_OPTION, \ + SUPPORT_SECURE_EXPORT_OPTION, \ + SUPPORT_INSECURE_EXPORT_OPTION, \ + SUPPORT_VFP_OPTION, \ + SUPPORT_WORKLOAD_ESTIMATION_OPTION, \ + SUPPORT_PDVFS_OPTION, \ + DEBUG_OPTION, \ + SUPPORT_BUFFER_SYNC_OPTION, \ + SUPPORT_AUTOVZ_OPTION, \ + SUPPORT_AUTOVZ_HW_REGS_OPTION, \ + SUPPORT_VALIDATION_OPTION \ + } + +#define RGX_BUILD_OPTIONS_MASK_FW \ + (RGX_BUILD_OPTIONS_MASK_KM & \ + ~OPTIONS_BIT11) + +#define OPTIONS_BIT31 (0x1UL << 31) +#if OPTIONS_BIT31 <= RGX_BUILD_OPTIONS_MASK_KM +#error "Bit exceeds reserved range" +#endif +#define SUPPORT_PERCONTEXT_FREELIST_SET_OFFSET OPTIONS_BIT31 + +#define RGX_BUILD_OPTIONS (RGX_BUILD_OPTIONS_KM | OPTIONS_BIT31) + +#define OPTIONS_STRICT (RGX_BUILD_OPTIONS & \ + ~(OPTIONS_DEBUG_MASK | \ + OPTIONS_WORKLOAD_ESTIMATION_MASK | \ + OPTIONS_PDVFS_MASK)) + +#endif /* RGX_OPTIONS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_pdump_panics.h b/drivers/gpu/drm/phytium/octopus/rgx_pdump_panics.h new file mode 100644 index 000000000000..2b5e9fc8172b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_pdump_panics.h @@ -0,0 +1,64 @@ +/*************************************************************************/ /*! +@File +@Title RGX PDump panic definitions header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX PDump panic definitions header +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_PDUMP_PANICS_H_) +#define RGX_PDUMP_PANICS_H_ + +/*! Unique device specific IMG_UINT16 panic IDs to identify the cause of an + * RGX PDump panic in a PDump script. */ +typedef enum +{ + RGX_PDUMP_PANIC_UNDEFINED = 0, + + /* These panics occur when test parameters and driver configuration + * enable features that require the firmware and host driver to + * communicate. Such features are not supported with off-line playback. + */ + RGX_PDUMP_PANIC_ZSBUFFER_BACKING = 101, /*!< Requests ZSBuffer to be backed with physical pages */ + RGX_PDUMP_PANIC_ZSBUFFER_UNBACKING = 102, /*!< Requests ZSBuffer to be unbacked */ + RGX_PDUMP_PANIC_FREELIST_GROW = 103, /*!< Requests an on-demand freelist grow/shrink */ + RGX_PDUMP_PANIC_FREELISTS_RECONSTRUCTION = 104, /*!< Requests freelists reconstruction */ + RGX_PDUMP_PANIC_SPARSEMEM_SWAP = 105, /*!< Requests sparse remap memory swap feature */ +} RGX_PDUMP_PANIC; + +#endif /* RGX_PDUMP_PANICS_H_ */ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_riscv.h b/drivers/gpu/drm/phytium/octopus/rgx_riscv.h new file mode 100644 index 000000000000..26e1440be944 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_riscv.h @@ -0,0 +1,250 @@ +/*************************************************************************/ /*! +@File rgx_riscv.h +@Title +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Platform RGX +@Description RGX RISCV definitions, kernel/user space +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_RISCV_H) +#define RGX_RISCV_H + +#include "km/rgxdefs_km.h" + + +/* Utility defines to convert regions to virtual addresses and remaps */ +#define RGXRISCVFW_GET_REGION_BASE(r) IMG_UINT32_C((r) << 28) +#define RGXRISCVFW_GET_REGION(a) IMG_UINT32_C((a) >> 28) +#define RGXRISCVFW_MAX_REGION_SIZE IMG_UINT32_C(1 << 28) +#define RGXRISCVFW_GET_REMAP(r) (RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 + ((r) * 8U)) + +/* RISCV remap output is aligned to 4K */ +#define RGXRISCVFW_REMAP_CONFIG_DEVVADDR_ALIGN (0x1000U) + +/* + * FW bootloader defines + */ +#define RGXRISCVFW_BOOTLDR_CODE_REGION IMG_UINT32_C(0xC) +#define RGXRISCVFW_BOOTLDR_DATA_REGION IMG_UINT32_C(0x5) +#define RGXRISCVFW_BOOTLDR_CODE_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_BOOTLDR_CODE_REGION)) +#define RGXRISCVFW_BOOTLDR_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_BOOTLDR_DATA_REGION)) +#define RGXRISCVFW_BOOTLDR_CODE_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_BOOTLDR_CODE_REGION)) +#define RGXRISCVFW_BOOTLDR_DATA_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_BOOTLDR_DATA_REGION)) + +/* Bootloader data offset in dwords from the beginning of the FW data allocation */ +#define RGXRISCVFW_BOOTLDR_CONF_OFFSET (0x0) + +/* + * FW coremem region defines + */ +#define RGXRISCVFW_COREMEM_REGION IMG_UINT32_C(0x8) +#define RGXRISCVFW_COREMEM_MAX_SIZE IMG_UINT32_C(0x10000000) /* 256 MB */ +#define RGXRISCVFW_COREMEM_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_COREMEM_REGION)) +#define RGXRISCVFW_COREMEM_END (RGXRISCVFW_COREMEM_BASE + RGXRISCVFW_COREMEM_MAX_SIZE - 1) + + +/* + * Host-FW shared data defines + */ +#define RGXRISCVFW_SHARED_CACHED_DATA_REGION (0x6U) +#define RGXRISCVFW_SHARED_UNCACHED_DATA_REGION (0xDU) +#define RGXRISCVFW_SHARED_CACHED_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SHARED_CACHED_DATA_REGION)) +#define RGXRISCVFW_SHARED_UNCACHED_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SHARED_UNCACHED_DATA_REGION)) +#define RGXRISCVFW_SHARED_CACHED_DATA_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_SHARED_CACHED_DATA_REGION)) +#define RGXRISCVFW_SHARED_UNCACHED_DATA_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_SHARED_UNCACHED_DATA_REGION)) + + +/* + * GPU SOCIF access defines + */ +#define RGXRISCVFW_SOCIF_REGION (0x2U) +#define RGXRISCVFW_SOCIF_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SOCIF_REGION)) + + +/* The things that follow are excluded when compiling assembly sources */ +#if !defined(RGXRISCVFW_ASSEMBLY_CODE) +#include "img_types.h" + +#define RGXFW_PROCESSOR_RISCV "RISCV" +#define RGXRISCVFW_CORE_ID_VALUE (0x00450B02U) +#define RGXRISCVFW_MISA_ADDR (0x301U) +#define RGXRISCVFW_MISA_VALUE (0x40001104U) +#define RGXRISCVFW_MSCRATCH_ADDR (0x340U) + +typedef struct +{ + IMG_UINT64 ui64CorememCodeDevVAddr; + IMG_UINT64 ui64CorememDataDevVAddr; + IMG_UINT32 ui32CorememCodeFWAddr; + IMG_UINT32 ui32CorememDataFWAddr; + IMG_UINT32 ui32CorememCodeSize; + IMG_UINT32 ui32CorememDataSize; + IMG_UINT32 ui32Flags; + IMG_UINT32 ui32Reserved; +} RGXRISCVFW_BOOT_DATA; + +/* + * List of registers to be printed in debug dump. + * First column: register names (general purpose or control/status registers) + * Second column: register number to be used in abstract access register command + * (see RISC-V debug spec v0.13) + */ +#define RGXRISCVFW_DEBUG_DUMP_REGISTERS \ + X(pc, 0x7b1) /* dpc */ \ + X(ra, 0x1001) \ + X(sp, 0x1002) \ + X(mepc, 0x341) \ + X(mcause, 0x342) \ + X(mdseac, 0xfc0) \ + X(mstatus, 0x300) \ + X(mie, 0x304) \ + X(mip, 0x344) \ + X(mscratch, 0x340) \ + X(mbvnc0, 0xffe) \ + X(mbvnc1, 0xfff) \ + X(micect, 0x7f0) \ + X(mdcect, 0x7f3) \ + X(mdcrfct, 0x7f4) \ + +typedef struct +{ +#define X(name, address) \ + IMG_UINT32 name; + + RGXRISCVFW_DEBUG_DUMP_REGISTERS +#undef X +} RGXRISCVFW_STATE; + + +#define RGXRISCVFW_MCAUSE_INTERRUPT (1U << 31) + +#define RGXRISCVFW_MCAUSE_TABLE \ + X(0x00000000, IMG_FALSE, "NMI pin assertion") /* Also reset value */ \ + X(0x00000001, IMG_TRUE, "Instruction access fault") \ + X(0x00000002, IMG_TRUE, "Illegal instruction") \ + X(0x00000003, IMG_TRUE, "Breakpoint") \ + X(0x00000004, IMG_TRUE, "Load address misaligned") \ + X(0x00000005, IMG_TRUE, "Load access fault") \ + X(0x00000006, IMG_TRUE, "Store/AMO address misaligned") \ + X(0x00000007, IMG_TRUE, "Store/AMO access fault") \ + X(0x0000000B, IMG_TRUE, "Environment call from M-mode (FW assert)") \ + X(0x80000007, IMG_FALSE, "Machine timer interrupt") \ + X(0x8000000B, IMG_FALSE, "Machine external interrupt") \ + X(0x8000001E, IMG_FALSE, "Machine correctable error local interrupt") \ + X(0xF0000000, IMG_TRUE, "Machine D-bus store error NMI") \ + X(0xF0000001, IMG_TRUE, "Machine D-bus non-blocking load error NMI") \ + X(0xF0000002, IMG_TRUE, "dCache unrecoverable NMI") + + +/* Debug module HW defines */ +#define RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER (0U) +#define RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY (2U) +#define RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT (2U << 20) +#define RGXRISCVFW_DMI_COMMAND_WRITE (1U << 16) +#define RGXRISCVFW_DMI_COMMAND_READ (0U << 16) +#define RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT (2U) + +/* Abstract command error codes (descriptions from RISC-V debug spec v0.13) */ +typedef enum +{ + /* No error. */ + RISCV_ABSTRACT_CMD_NO_ERROR = 0, + + /* + * An abstract command was executing while command, abstractcs, or abstractauto + * was written, or when one of the data or progbuf registers was read or + * written. This status is only written if cmderr contains 0. + */ + RISCV_ABSTRACT_CMD_BUSY = 1, + + /* + * The requested command is not supported, regardless of whether + * the hart is running or not. + */ + RISCV_ABSTRACT_CMD_NOT_SUPPORTED = 2, + + /* + * An exception occurred while executing the command + * (e.g. while executing the Program Buffer). + */ + RISCV_ABSTRACT_CMD_EXCEPTION = 3, + + /* + * The abstract command couldn't execute because the hart wasn't in the required + * state (running/halted), or unavailable. + */ + RISCV_ABSTRACT_CMD_HALT_RESUME = 4, + + /* + * The abstract command failed due to a bus error + * (e.g. alignment, access size, or timeout). + */ + RISCV_ABSTRACT_CMD_BUS_ERROR = 5, + + /* The command failed for another reason. */ + RISCV_ABSTRACT_CMD_OTHER_ERROR = 7 + +} RGXRISCVFW_ABSTRACT_CMD_ERR; + +/* System Bus error codes (descriptions from RISC-V debug spec v0.13) */ +typedef enum +{ + /* There was no bus error. */ + RISCV_SYSBUS_NO_ERROR = 0, + + /* There was a timeout. */ + RISCV_SYSBUS_TIMEOUT = 1, + + /* A bad address was accessed. */ + RISCV_SYSBUS_BAD_ADDRESS = 2, + + /* There was an alignment error. */ + RISCV_SYSBUS_BAD_ALIGNMENT = 3, + + /* An access of unsupported size was requested. */ + RISCV_SYSBUS_UNSUPPORTED_SIZE = 4, + + /* Other. */ + RISCV_SYSBUS_OTHER_ERROR = 7 + +} RGXRISCVFW_SYSBUS_ERR; + +#endif /* RGXRISCVFW_ASSEMBLY_CODE */ + +#endif /* RGX_RISCV_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgx_tq_shared.h b/drivers/gpu/drm/phytium/octopus/rgx_tq_shared.h new file mode 100644 index 000000000000..5fe4bf43de19 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgx_tq_shared.h @@ -0,0 +1,63 @@ +/*************************************************************************/ /*! +@File +@Title RGX transfer queue shared +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Shared definitions between client and server +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGX_TQ_SHARED_H +#define RGX_TQ_SHARED_H + +#define TQ_MAX_PREPARES_PER_SUBMIT 16U + +#define TQ_PREP_FLAGS_COMMAND_3D 0x0U +#define TQ_PREP_FLAGS_COMMAND_2D 0x1U +#define TQ_PREP_FLAGS_COMMAND_MASK (0xfU) +#define TQ_PREP_FLAGS_COMMAND_SHIFT 0 +#define TQ_PREP_FLAGS_PDUMPCONTINUOUS (1U << 4) +#define TQ_PREP_FLAGS_START (1U << 5) +#define TQ_PREP_FLAGS_END (1U << 6) + +#define TQ_PREP_FLAGS_COMMAND_SET(m) \ + ((TQ_PREP_FLAGS_COMMAND_##m << TQ_PREP_FLAGS_COMMAND_SHIFT) & TQ_PREP_FLAGS_COMMAND_MASK) + +#define TQ_PREP_FLAGS_COMMAND_IS(m,n) \ + (((m & TQ_PREP_FLAGS_COMMAND_MASK) >> TQ_PREP_FLAGS_COMMAND_SHIFT) == TQ_PREP_FLAGS_COMMAND_##n) + +#endif /* RGX_TQ_SHARED_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxapi_km.h b/drivers/gpu/drm/phytium/octopus/rgxapi_km.h new file mode 100644 index 000000000000..91bc22c0ccfd --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxapi_km.h @@ -0,0 +1,309 @@ +/*************************************************************************/ /*! +@File +@Title RGX API Header kernel mode +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Exported RGX API details +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXAPI_KM_H +#define RGXAPI_KM_H + +#include "rgx_hwperf.h" + + +/****************************************************************************** + * RGX HW Performance Profiling Control API(s) + *****************************************************************************/ + +/*! HWPerf device identification structure */ +typedef struct _RGX_HWPERF_DEVICE_ +{ + IMG_CHAR pszName[20]; /*!< Helps identify this device uniquely */ + IMG_HANDLE hDevData; /*!< Handle for the server */ + + struct _RGX_HWPERF_DEVICE_ *psNext; /*!< Next device if any */ +} RGX_HWPERF_DEVICE; + +/*! HWPerf connection structure */ +typedef struct +{ + RGX_HWPERF_DEVICE *psHWPerfDevList; /*!< pointer to list of devices */ +} RGX_HWPERF_CONNECTION; + +/*************************************************************************/ /*! +@Function RGXHWPerfLazyConnect +@Description Obtain a HWPerf connection object to the RGX device(s). The + connections to devices are not actually opened until + HWPerfOpen() is called. + +@Output ppsHWPerfConnection Address of a HWPerf connection object +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfOpen +@Description Opens connection(s) to the RGX device(s). Valid handle to the + connection object has to be provided which means the this + function needs to be preceded by the call to + RGXHWPerfLazyConnect() function. + +@Input psHWPerfConnection HWPerf connection object +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION* psHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfConnect +@Description Obtain a connection object to the RGX HWPerf module. Allocated + connection object(s) reference opened connection(s). Calling + this function is an equivalent of calling RGXHWPerfLazyConnect + and RGXHWPerfOpen. This connect should be used when the caller + will be retrieving event data. + +@Output ppsHWPerfConnection Address of HWPerf connection object +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfFreeConnection +@Description Frees the HWPerf connection object + +@Input psHWPerfConnection Pointer to connection object as returned + from RGXHWPerfLazyConnect() +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** psHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfClose +@Description Closes all the opened connection(s) to RGX device(s) + +@Input psHWPerfConnection Pointer to HWPerf connection object as + returned from RGXHWPerfConnect() or + RGXHWPerfOpen() +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfDisconnect +@Description Disconnect from the RGX device + +@Input ppsHWPerfConnection Pointer to HWPerf connection object as + returned from RGXHWPerfConnect() or + RGXHWPerfOpen(). Calling this function is + an equivalent of calling RGXHWPerfClose() + and RGXHWPerfFreeConnection(). +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection); + + +/*************************************************************************/ /*! +@Function RGXHWPerfControl +@Description Enable or disable the generation of RGX HWPerf event packets. + See RGXCtrlHWPerf(). + +@Input psHWPerfConnection Pointer to HWPerf connection object +@Input eStreamId ID of the HWPerf stream +@Input bToggle Switch to toggle or apply mask. +@Input ui64Mask Mask of events to control. +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfControl( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_BOOL bToggle, + IMG_UINT64 ui64Mask); + + +/*************************************************************************/ /*! +@Function RGXHWPerfGetFilter +@Description Reads HWPerf stream filter where stream is identified by the + given stream ID. + +@Input hDevData Handle to connection/device object +@Input eStreamId ID of the HWPerf stream +@Output ui64Filter HWPerf filter value +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfGetFilter( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_UINT64 *ui64Filter +); + + +/*************************************************************************/ /*! +@Function RGXHWPerfConfigureCounters +@Description Enable and configure the performance counter block for one or + more device layout modules. + See RGXConfigHWPerfCounters(). + +@Input psHWPerfConnection Pointer to HWPerf connection object +@Input ui32CtrlWord One of RGX_HWPERF_CTRL_NOP, + RGX_HWPERF_CTRL_GEOM_FULLrange, + RGX_HWPERF_CTRL_COMP_FULLRANGE, + RGX_HWPERF_CTRL_TDM_FULLRANGE +@Input ui32NumBlocks Number of elements in the array +@Input asBlockConfigs Address of the array of configuration blocks +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfConfigureCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32CtrlWord, + IMG_UINT32 ui32NumBlocks, + RGX_HWPERF_CONFIG_CNTBLK* asBlockConfigs); + + +/*************************************************************************/ /*! +@Function RGXHWPerfDisableCounters +@Description Disable the performance counter block for one or more device + layout modules. + +@Input psHWPerfConnection Pointer to HWPerf connection object +@Input ui32NumBlocks Number of elements in the array +@Input aeBlockIDs An array of words with values taken from + the RGX_HWPERF_CNTBLK_ID enumeration. +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfDisableCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + IMG_UINT16* aeBlockIDs); + +/*************************************************************************/ /*! +@Function RGXHWPerfEnableCounters +@Description Enable the performance counter block for one or more device + layout modules. + +@Input psHWPerfConnection Pointer to HWPerf connection object +@Input ui32NumBlocks Number of elements in the array +@Input aeBlockIDs An array of words with values taken from the + RGX_HWPERF_CNTBLK_ID enumeration. +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfEnableCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + IMG_UINT16* aeBlockIDs); + +/****************************************************************************** + * RGX HW Performance Profiling Retrieval API(s) + * + * The client must ensure their use of this acquire/release API for a single + * connection/stream must not be shared with multiple execution contexts e.g. + * between a kernel thread and an ISR handler. It is the client's + * responsibility to ensure this API is not interrupted by a high priority + * thread/ISR + *****************************************************************************/ + +/*************************************************************************/ /*! +@Function RGXHWPerfAcquireEvents +@Description When there is data available to read this call returns with OK + and the address and length of the data buffer the client can + safely read. This buffer may contain one or more event packets. + When there is no data to read, this call returns with OK and + sets *puiBufLen to 0 on exit. + Clients must pair this call with a RGXHWPerfReleaseEvents() + call. + Data returned in ppBuf will be in the form of a sequence of + HWPerf packets which should be traversed using the pointers, + structures and macros provided in rgx_hwperf.h + +@Input hDevData Handle to connection/device object +@Input eStreamId ID of the HWPerf stream +@Output ppBuf Address of a pointer to a byte buffer. On exit it + contains the address of buffer to read from +@Output pui32BufLen Pointer to an integer. On exit it is the size of + the data to read from the buffer +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfAcquireEvents( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_PBYTE* ppBuf, + IMG_UINT32* pui32BufLen); + + +/*************************************************************************/ /*! +@Function RGXHWPerfReleaseEvents +@Description Called after client has read the event data out of the buffer + retrieved from the Acquire Events call to release resources. +@Input hDevData Handle to connection/device object +@Input eStreamId ID of the HWPerf stream +@Return PVRSRV_ERROR System error code +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR RGXHWPerfReleaseEvents( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId); + + +/*************************************************************************/ /*! +@Function RGXHWPerfConvertCRTimeStamp +@Description Converts the timestamp given by FW events to the common OS + timestamp. The first three inputs are obtained via a CLK_SYNC + event, ui64CRTimeStamp is the CR timestamp from the FW event + to be converted. +@Input ui32ClkSpeed Clock speed given by sync event +@Input ui64CorrCRTimeStamp CR Timestamp given by sync event +@Input ui64CorrOSTimeStamp Correlating OS Timestamp given by sync + event +@Input ui64CRTimeStamp CR Timestamp to convert +@Return IMG_UINT64 Calculated OS Timestamp +*/ /**************************************************************************/ +IMG_UINT64 RGXHWPerfConvertCRTimeStamp( + IMG_UINT32 ui32ClkSpeed, + IMG_UINT64 ui64CorrCRTimeStamp, + IMG_UINT64 ui64CorrOSTimeStamp, + IMG_UINT64 ui64CRTimeStamp); + +#endif /* RGXAPI_KM_H */ + +/****************************************************************************** + End of file (rgxapi_km.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxbreakpoint.c b/drivers/gpu/drm/phytium/octopus/rgxbreakpoint.c new file mode 100644 index 000000000000..d536b69e7a7f --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxbreakpoint.c @@ -0,0 +1,295 @@ +/*************************************************************************/ /*! +@File +@Title RGX Breakpoint routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX Breakpoint routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxbreakpoint.h" +#include "pvr_debug.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgxmem.h" +#include "device.h" +#include "sync_internal.h" +#include "pdump_km.h" +#include "pvrsrv.h" + +PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + RGXFWIF_DM eFWDataMaster, + IMG_UINT32 ui32BPAddr, + IMG_UINT32 ui32HandlerAddr, + IMG_UINT32 ui32DataMaster) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sBPCmd; + IMG_UINT32 ui32kCCBCommandSlot; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psDevInfo->hBPLock); + + if (psDevInfo->bBPSet) + { + eError = PVRSRV_ERROR_BP_ALREADY_SET; + goto unlock; + } + + sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; + sBPCmd.uCmdData.sBPData.ui32BPAddr = ui32BPAddr; + sBPCmd.uCmdData.sBPData.ui32HandlerAddr = ui32HandlerAddr; + sBPCmd.uCmdData.sBPData.ui32BPDM = ui32DataMaster; + sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_ENABLE; + sBPCmd.uCmdData.sBPData.eDM = eFWDataMaster; + + eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, + psFWMemContextMemDesc, + 0 , + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock); + + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + eFWDataMaster, + &sBPCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); + + /* Wait for FW to complete command execution */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); + + psDevInfo->eBPDM = eFWDataMaster; + psDevInfo->bBPSet = IMG_TRUE; + +unlock: + OSLockRelease(psDevInfo->hBPLock); + + return eError; +} + +PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sBPCmd; + IMG_UINT32 ui32kCCBCommandSlot; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; + sBPCmd.uCmdData.sBPData.ui32BPAddr = 0; + sBPCmd.uCmdData.sBPData.ui32HandlerAddr = 0; + sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_CTL; + sBPCmd.uCmdData.sBPData.eDM = psDevInfo->eBPDM; + + OSLockAcquire(psDevInfo->hBPLock); + + eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, + psFWMemContextMemDesc, + 0 , + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock); + + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + psDevInfo->eBPDM, + &sBPCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); + + /* Wait for FW to complete command execution */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); + + psDevInfo->bBPSet = IMG_FALSE; + +unlock: + OSLockRelease(psDevInfo->hBPLock); + + return eError; +} + +PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sBPCmd; + IMG_UINT32 ui32kCCBCommandSlot; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psDevInfo->hBPLock); + + if (psDevInfo->bBPSet == IMG_FALSE) + { + eError = PVRSRV_ERROR_BP_NOT_SET; + goto unlock; + } + + sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; + sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_CTL | RGXFWIF_BPDATA_FLAGS_ENABLE; + sBPCmd.uCmdData.sBPData.eDM = psDevInfo->eBPDM; + + eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, + psFWMemContextMemDesc, + 0 , + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock); + + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + psDevInfo->eBPDM, + &sBPCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); + + /* Wait for FW to complete command execution */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); + +unlock: + OSLockRelease(psDevInfo->hBPLock); + + return eError; +} + +PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sBPCmd; + IMG_UINT32 ui32kCCBCommandSlot; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psDevInfo->hBPLock); + + if (psDevInfo->bBPSet == IMG_FALSE) + { + eError = PVRSRV_ERROR_BP_NOT_SET; + goto unlock; + } + + sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; + sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_CTL; + sBPCmd.uCmdData.sBPData.eDM = psDevInfo->eBPDM; + + eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, + psFWMemContextMemDesc, + 0 , + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock); + + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + psDevInfo->eBPDM, + &sBPCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); + + /* Wait for FW to complete command execution */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); + +unlock: + OSLockRelease(psDevInfo->hBPLock); + + return eError; +} + +PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32TempRegs, + IMG_UINT32 ui32SharedRegs) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sBPCmd; + IMG_UINT32 ui32kCCBCommandSlot; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; + sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_REGS; + sBPCmd.uCmdData.sBPData.ui32TempRegs = ui32TempRegs; + sBPCmd.uCmdData.sBPData.ui32SharedRegs = ui32SharedRegs; + sBPCmd.uCmdData.sBPData.psFWMemContext.ui32Addr = 0U; + sBPCmd.uCmdData.sBPData.eDM = RGXFWIF_DM_GP; + + OSLockAcquire(psDevInfo->hBPLock); + + eError = RGXScheduleCommandAndGetKCCBSlot(psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + &sBPCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); + + /* Wait for FW to complete command execution */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); + +unlock: + OSLockRelease(psDevInfo->hBPLock); + + return eError; +} + +/****************************************************************************** + End of file (rgxbreakpoint.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxbreakpoint.h b/drivers/gpu/drm/phytium/octopus/rgxbreakpoint.h new file mode 100644 index 000000000000..424575b7fdbc --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxbreakpoint.h @@ -0,0 +1,141 @@ +/*************************************************************************/ /*! +@File +@Title RGX breakpoint functionality +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the RGX breakpoint functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXBREAKPOINT_H) +#define RGXBREAKPOINT_H + +#include "pvr_debug.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgx_fwif_km.h" + +/*! +******************************************************************************* + @Function PVRSRVRGXSetBreakpointKM + + @Description + Server-side implementation of RGXSetBreakpoint + + @Input psDeviceNode - RGX Device node + @Input eDataMaster - Data Master to schedule command for + @Input hMemCtxPrivData - memory context private data + @Input ui32BPAddr - Address of breakpoint + @Input ui32HandlerAddr - Address of breakpoint handler + @Input ui32BPCtl - Breakpoint controls + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + RGXFWIF_DM eFWDataMaster, + IMG_UINT32 ui32BPAddr, + IMG_UINT32 ui32HandlerAddr, + IMG_UINT32 ui32DataMaster); + +/*! +******************************************************************************* + @Function PVRSRVRGXClearBreakpointKM + + @Description + Server-side implementation of RGXClearBreakpoint + + @Input psDeviceNode - RGX Device node + @Input hMemCtxPrivData - memory context private data + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData); + +/*! +******************************************************************************* + @Function PVRSRVRGXEnableBreakpointKM + + @Description + Server-side implementation of RGXEnableBreakpoint + + @Input psDeviceNode - RGX Device node + @Input hMemCtxPrivData - memory context private data + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData); + +/*! +******************************************************************************* + @Function PVRSRVRGXDisableBreakpointKM + + @Description + Server-side implementation of RGXDisableBreakpoint + + @Input psDeviceNode - RGX Device node + @Input hMemCtxPrivData - memory context private data + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData); + +/*! +******************************************************************************* + @Function PVRSRVRGXOverallocateBPRegistersKM + + @Description + Server-side implementation of RGXOverallocateBPRegisters + + @Input psDeviceNode - RGX Device node + @Input ui32TempRegs - Number of temporary registers to overallocate + @Input ui32SharedRegs - Number of shared registers to overallocate + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32TempRegs, + IMG_UINT32 ui32SharedRegs); +#endif /* RGXBREAKPOINT_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxbvnc.c b/drivers/gpu/drm/phytium/octopus/rgxbvnc.c new file mode 100644 index 000000000000..dd8e0694b09b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxbvnc.c @@ -0,0 +1,811 @@ +/*************************************************************************/ /*! +@File +@Title BVNC handling specific routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Functions used for BNVC related work +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "rgxbvnc.h" +#define RGXBVNC_C +#include "rgx_bvnc_table_km.h" +#undef RGXBVNC_C +#include "oskm_apphint.h" +#include "pvrsrv.h" +#include "pdump_km.h" +#include "rgx_compat_bvnc.h" + +#define RGXBVNC_BUFFER_SIZE (((PVRSRV_MAX_DEVICES)*(RGX_BVNC_STR_SIZE_MAX))+1) + +/* This function searches the given array for a given search value */ +static IMG_UINT64* _RGXSearchBVNCTable( IMG_UINT64 *pui64Array, + IMG_UINT uiEnd, + IMG_UINT64 ui64SearchValue, + IMG_UINT uiRowCount) +{ + IMG_UINT uiStart = 0, index; + IMG_UINT64 value, *pui64Ptr = NULL; + + while (uiStart < uiEnd) + { + index = (uiStart + uiEnd)/2; + pui64Ptr = pui64Array + (index * uiRowCount); + value = *(pui64Ptr); + + if (value == ui64SearchValue) + { + return pui64Ptr; + } + + if (value > ui64SearchValue) + { + uiEnd = index; + }else + { + uiStart = index + 1; + } + } + return NULL; +} +#define RGX_SEARCH_BVNC_TABLE(t, b) (_RGXSearchBVNCTable((IMG_UINT64*)(t), \ + ARRAY_SIZE(t), (b), \ + sizeof((t)[0])/sizeof(IMG_UINT64)) ) + + +#if defined(DEBUG) + +#define PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, szShortName, Feature) \ + if ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] != RGX_FEATURE_VALUE_DISABLED ) \ + { PVR_LOG(("%s %d", szShortName, psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX])); } \ + else \ + { PVR_LOG(("%s N/A", szShortName)); } + +static void _RGXBvncDumpParsedConfig(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + IMG_UINT64 ui64Mask = 0, ui32IdOrNameIdx = 1; + + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NC: ", NUM_CLUSTERS); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "CSF: ", CDM_CONTROL_STREAM_FORMAT); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "FBCDCA: ", FBCDC_ARCHITECTURE); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMB: ", META_COREMEM_BANKS); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMS: ", META_COREMEM_SIZE); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MDMACnt: ", META_DMA_CHANNEL_COUNT); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NIIP: ", NUM_ISP_IPP_PIPES); +#if defined(RGX_FEATURE_NUM_ISP_PER_SPU_MAX_VALUE_IDX) + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NIPS: ", NUM_ISP_PER_SPU); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "PPS: ", PBE_PER_SPU); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NSPU: ", NUM_SPU); +#endif + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "PBW: ", PHYS_BUS_WIDTH); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "STEArch: ", SCALABLE_TE_ARCH); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SVCEA: ", SCALABLE_VCE); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCBanks: ", SLC_BANKS); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCCLS: ", SLC_CACHE_LINE_SIZE_BITS); + PVR_LOG(("SLCSize: %d", psDevInfo->sDevFeatureCfg.ui32SLCSizeInBytes)); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "VASB: ", VIRTUAL_ADDRESS_SPACE_BITS); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "META: ", META); + PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NOSIDS: ", NUM_OSIDS); + +#if defined(FEATURE_NO_VALUES_NAMES_MAX_IDX) + /* Dump the features with no values */ + ui64Mask = psDevInfo->sDevFeatureCfg.ui64Features; + while (ui64Mask) + { + if (ui64Mask & 0x01) + { + if (ui32IdOrNameIdx <= FEATURE_NO_VALUES_NAMES_MAX_IDX) + { + PVR_LOG(("%s", gaszFeaturesNoValuesNames[ui32IdOrNameIdx - 1])); + } + else + { + PVR_DPF((PVR_DBG_WARNING, + "Feature with Mask doesn't exist: 0x%016" IMG_UINT64_FMTSPECx, + ((IMG_UINT64)1 << (ui32IdOrNameIdx - 1)))); + } + } + ui64Mask >>= 1; + ui32IdOrNameIdx++; + } +#endif + +#if defined(ERNSBRNS_IDS_MAX_IDX) + /* Dump the ERN and BRN flags for this core */ + ui64Mask = psDevInfo->sDevFeatureCfg.ui64ErnsBrns; + ui32IdOrNameIdx = 1; + + while (ui64Mask) + { + if (ui64Mask & 0x1) + { + if (ui32IdOrNameIdx <= ERNSBRNS_IDS_MAX_IDX) + { + PVR_LOG(("ERN/BRN : %d", gaui64ErnsBrnsIDs[ui32IdOrNameIdx - 1])); + } + else + { + PVR_LOG(("Unknown ErnBrn bit: 0x%0" IMG_UINT64_FMTSPECx, ((IMG_UINT64)1 << (ui32IdOrNameIdx - 1)))); + } + } + ui64Mask >>= 1; + ui32IdOrNameIdx++; + } +#endif + +} +#endif + +static void _RGXBvncParseFeatureValues(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT64 *pui64Cfg) +{ + IMG_UINT32 ui32Index; + + /* Read the feature values for the runtime BVNC */ + for (ui32Index = 0; ui32Index < RGX_FEATURE_WITH_VALUES_MAX_IDX; ui32Index++) + { + IMG_UINT16 bitPosition = aui16FeaturesWithValuesBitPositions[ui32Index]; + IMG_UINT64 ui64PackedValues = pui64Cfg[2 + bitPosition / 64]; + IMG_UINT16 ui16ValueIndex = (ui64PackedValues & aui64FeaturesWithValuesBitMasks[ui32Index]) >> (bitPosition % 64); + + if (ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index]) + { + if (gaFeaturesValues[ui32Index][ui16ValueIndex] == (IMG_UINT16)RGX_FEATURE_VALUE_DISABLED) + { + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_DISABLED; + } + else + { + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = gaFeaturesValues[ui32Index][ui16ValueIndex]; + } + } + else + { + /* This case should never be reached */ + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_INVALID; + PVR_DPF((PVR_DBG_ERROR, "%s: Feature with index (%d) decoded wrong value index (%d)", __func__, ui32Index, ui16ValueIndex)); + PVR_ASSERT(ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index]); + } + } + +#if defined(RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX) + /* Code path for Volcanic */ + + psDevInfo->sDevFeatureCfg.ui32MAXDMCount = RGXFWIF_DM_MIN_CNT; + psDevInfo->sDevFeatureCfg.ui32MAXDMMTSCount = RGXFWIF_DM_MIN_MTS_CNT; + + /* Get the max number of dusts in the core */ + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS)) + { + RGX_LAYER_PARAMS sParams; + + OSCachedMemSet(&sParams, 0, sizeof(RGX_LAYER_PARAMS)); + sParams.psDevInfo = psDevInfo; + + if (RGX_DEVICE_GET_FEATURE_VALUE(&sParams, POWER_ISLAND_VERSION) == 1) + { + psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = MAX(1, (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) / 2)); + } + else if (RGX_DEVICE_GET_FEATURE_VALUE(&sParams, POWER_ISLAND_VERSION) == 2) + { + psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS); + } + else + { + /* All octopus cores support power islanding */ + psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_FEATURE_VALUE_INVALID; + PVR_DPF((PVR_DBG_ERROR, "%s: Power island feature version not found!", __func__)); + PVR_ASSERT(0); + } + } + else + { + /* This case should never be reached as all cores have clusters */ + psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_FEATURE_VALUE_INVALID; + PVR_DPF((PVR_DBG_ERROR, "%s: Number of clusters feature value missing!", __func__)); + PVR_ASSERT(0); + } +#else + /* Code path for Rogue */ + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) + { + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_IDX] = RGX_FEATURE_VALUE_DISABLED; + } + + /* Get the max number of dusts in the core */ + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS)) + { + psDevInfo->sDevFeatureCfg.ui32MAXDustCount = MAX(1, (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) / 2)); + } + else + { + /* This case should never be reached as all cores have clusters */ + psDevInfo->sDevFeatureCfg.ui32MAXDustCount = RGX_FEATURE_VALUE_INVALID; + PVR_DPF((PVR_DBG_ERROR, "%s: Number of clusters feature value missing!", __func__)); + PVR_ASSERT(0); + } +#endif + + /* Transform the META coremem size info in bytes */ + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) + { + psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_COREMEM_SIZE_IDX] *= 1024; + } +} + +static void _RGXBvncAcquireAppHint(IMG_CHAR *pszBVNC, const IMG_UINT32 ui32RGXDevCount) +{ + const IMG_CHAR *pszAppHintDefault = PVRSRV_APPHINT_RGXBVNC; + void *pvAppHintState = NULL; + IMG_UINT32 ui32BVNCCount = 0; + IMG_BOOL bRet; + IMG_CHAR szBVNCAppHint[RGXBVNC_BUFFER_SIZE]; + IMG_CHAR *pszCurrentBVNC = szBVNCAppHint; + szBVNCAppHint[0] = '\0'; + + OSCreateKMAppHintState(&pvAppHintState); + + bRet = (IMG_BOOL)OSGetKMAppHintSTRING(pvAppHintState, + RGXBVNC, + pszAppHintDefault, + szBVNCAppHint, + sizeof(szBVNCAppHint)); + + OSFreeKMAppHintState(pvAppHintState); + + if (!bRet || (szBVNCAppHint[0] == '\0')) + { + return; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC module param list: %s",__func__, szBVNCAppHint)); + + while (*pszCurrentBVNC != '\0') + { + IMG_CHAR *pszNext = pszCurrentBVNC; + + if (ui32BVNCCount >= PVRSRV_MAX_DEVICES) + { + break; + } + + while (1) + { + if (*pszNext == ',') + { + pszNext[0] = '\0'; + pszNext++; + break; + } else if (*pszNext == '\0') + { + break; + } + pszNext++; + } + + if (ui32BVNCCount == ui32RGXDevCount) + { + OSStringLCopy(pszBVNC, pszCurrentBVNC, RGX_BVNC_STR_SIZE_MAX); + return; + } + + ui32BVNCCount++; + pszCurrentBVNC = pszNext; + } + + PVR_DPF((PVR_DBG_ERROR, "%s: Given module parameters list is shorter than " + "number of actual devices", __func__)); + + /* If only one BVNC parameter is specified, the same is applied for all RGX + * devices detected */ + if (1 == ui32BVNCCount) + { + OSStringLCopy(pszBVNC, szBVNCAppHint, RGX_BVNC_STR_SIZE_MAX); + } +} + +/* Function that parses the BVNC List passed as module parameter */ +static PVRSRV_ERROR _RGXBvncParseList(IMG_UINT32 *pB, + IMG_UINT32 *pV, + IMG_UINT32 *pN, + IMG_UINT32 *pC, + const IMG_UINT32 ui32RGXDevCount) +{ + unsigned int ui32ScanCount = 0; + IMG_CHAR aszBVNCString[RGX_BVNC_STR_SIZE_MAX]; + + aszBVNCString[0] = '\0'; + + /* 4 components of a BVNC string is B, V, N & C */ +#define RGX_BVNC_INFO_PARAMS (4) + + _RGXBvncAcquireAppHint(aszBVNCString, ui32RGXDevCount); + + if ('\0' == aszBVNCString[0]) + { + return PVRSRV_ERROR_INVALID_BVNC_PARAMS; + } + + /* Parse the given RGX_BVNC string */ + ui32ScanCount = OSVSScanf(aszBVNCString, RGX_BVNC_STR_FMTSPEC, pB, pV, pN, pC); + if (RGX_BVNC_INFO_PARAMS != ui32ScanCount) + { + ui32ScanCount = OSVSScanf(aszBVNCString, RGX_BVNC_STRP_FMTSPEC, pB, pV, pN, pC); + } + if (RGX_BVNC_INFO_PARAMS != ui32ScanCount) + { + return PVRSRV_ERROR_INVALID_BVNC_PARAMS; + } + PVR_LOG(("BVNC module parameter honoured: %s", aszBVNCString)); + + return PVRSRV_OK; +} + +#if !defined(NO_HARDWARE) +/* + * This function obtains the SLCSize from the physical device for GPUs which provide + * this information. If the GPU does not provide support we return a value of 0 which will + * result in the BVNC supplied definition being used to provide the SLCSize. + * Must only be called from driver-live with hardware powered-on. + */ +static IMG_UINT32 _RGXBvncReadSLCSize(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT64 ui64SLCSize = 0ULL; + +#if defined(RGX_CR_CORE_ID__PBVNC) + /* Rogue hardware */ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_SIZE_CONFIGURABLE)) + { + ui64SLCSize = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SLC_SIZE_IN_KB); + if (ui64SLCSize == 0ULL) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Unexpected 0 SLC size. Using default", __func__)); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: RGX_CR_SIZE_IN_KB = %u", __func__, + (IMG_UINT32) ui64SLCSize)); + } + } +#else + /* Volcanic hardware */ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_SIZE_ADJUSTMENT)) + { + ui64SLCSize = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SLC_STATUS2); + ui64SLCSize &= ~RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_CLRMSK; + ui64SLCSize >>= RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_SHIFT; + + if (ui64SLCSize == 0ULL) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Unexpected 0 SLC size. Using default", __func__)); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: SLC_SIZE_IN_KILOBYTES = %u", __func__, + (IMG_UINT32) ui64SLCSize)); + } + } +#endif + + return (IMG_UINT32)ui64SLCSize * 1024U; +} +#endif /* !defined(NO_HARDWARE) */ + +/* This function detects the Rogue variant and configures the essential + * config info associated with such a device. + * The config info includes features, errata, etc + */ +PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + static IMG_UINT32 ui32RGXDevCnt = 0; + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT64 ui64BVNC=0; + IMG_UINT32 B=0, V=0, N=0, C=0; + IMG_UINT64 *pui64Cfg = NULL; + IMG_UINT32 ui32Cores = 1U; + IMG_UINT32 ui32SLCSize = 0; + + /* Check for load time RGX BVNC parameter */ + eError = _RGXBvncParseList(&B,&V,&N,&C, ui32RGXDevCnt); + if (PVRSRV_OK == eError) + { + PVR_LOG(("Read BVNC " RGX_BVNC_STR_FMTSPEC + " from driver load parameter", B, V, N, C)); + + /* Extract the BVNC config from the Features table */ + ui64BVNC = BVNC_PACK(B,0,N,C); + pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC); + PVR_LOG_IF_FALSE((pui64Cfg != NULL), "Driver parameter BVNC configuration not found!"); + } + + { + void *pvAppHintState = NULL; + const IMG_BOOL bAppHintDefault = PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC; + + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintBOOL(pvAppHintState, + IgnoreHWReportedBVNC, + &bAppHintDefault, + &psDevInfo->bIgnoreHWReportedBVNC); + OSFreeKMAppHintState(pvAppHintState); + } + +#if !defined(NO_HARDWARE) + + /* Try to detect the RGX BVNC from the HW device */ + if ((NULL == pui64Cfg) && !psDevInfo->bIgnoreHWReportedBVNC) + { + IMG_UINT64 ui32ID; + IMG_BOOL bPowerDown = (psDeviceNode->eCurrentSysPowerState == PVRSRV_SYS_POWER_STATE_OFF); + + /* Power-up the device as required to read the registers */ + if (bPowerDown) + { + eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_ON); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState ON"); + } + +#if defined(RGX_CR_CORE_ID__PBVNC) + /* Core ID reading code for Rogue */ + + /* Read the BVNC, in to new way first, if B not set, use old scheme */ + ui32ID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID__PBVNC); + + if (GET_B(ui32ID)) + { + B = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK) >> + RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT; + V = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK) >> + RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT; + N = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK) >> + RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT; + C = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK) >> + RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT; + + } + else + { + IMG_UINT64 ui32CoreID, ui32CoreRev; + ui32CoreRev = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_REVISION); + ui32CoreID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID); + B = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MAJOR_CLRMSK) >> + RGX_CR_CORE_REVISION_MAJOR_SHIFT; + V = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MINOR_CLRMSK) >> + RGX_CR_CORE_REVISION_MINOR_SHIFT; + N = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_N_CLRMSK) >> + RGX_CR_CORE_ID_CONFIG_N_SHIFT; + C = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_C_CLRMSK) >> + RGX_CR_CORE_ID_CONFIG_C_SHIFT; + } +#else + /* Core ID reading code for Volcanic */ + + ui32ID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID); + + B = (ui32ID & ~RGX_CR_CORE_ID_BRANCH_ID_CLRMSK) >> + RGX_CR_CORE_ID_BRANCH_ID_SHIFT; + V = (ui32ID & ~RGX_CR_CORE_ID_VERSION_ID_CLRMSK) >> + RGX_CR_CORE_ID_VERSION_ID_SHIFT; + N = (ui32ID & ~RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK) >> + RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT; + C = (ui32ID & ~RGX_CR_CORE_ID_CONFIG_ID_CLRMSK) >> + RGX_CR_CORE_ID_CONFIG_ID_SHIFT; +#endif + + PVR_LOG(("Read BVNC " RGX_BVNC_STR_FMTSPEC + " from HW device registers", B, V, N, C)); + + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + /* Read the number of cores in the system for newer BVNC (Branch ID > 20) */ + if (B > 20) + { + ui32Cores = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM); + } + } + + /* Obtain the SLC size from the device */ + ui32SLCSize = _RGXBvncReadSLCSize(psDeviceNode); + PVR_DPF((PVR_DBG_MESSAGE, "%s: SLC Size reported as %u", __func__, ui32SLCSize)); + + if (bPowerDown) + { + eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_OFF); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState OFF"); + } + + /* Extract the BVNC config from the Features table */ + ui64BVNC = BVNC_PACK(B,0,N,C); + if (ui64BVNC != 0) + { + pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC); + PVR_LOG_IF_FALSE((pui64Cfg != NULL), "HW device BVNC configuration not found!"); + } + else if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + /* + * On host OS we should not get here as CORE_ID should not be zero, so flag an error. + * On older cores, guest OS only has CORE_ID if defined(RGX_FEATURE_COREID_PER_OS) + */ + PVR_LOG_ERROR(PVRSRV_ERROR_DEVICE_REGISTER_FAILED, "CORE_ID register returns zero. Unknown BVNC"); + } + } +#endif + +#if defined(RGX_BVNC_KM_B) && defined(RGX_BVNC_KM_N) && defined(RGX_BVNC_KM_C) + if (NULL == pui64Cfg) + { + /* We reach here if the HW is not present, + * or we are running in a guest OS with no COREID_PER_OS feature, + * or HW is unstable during register read giving invalid values, + * or runtime detection has been disabled - fall back to compile time BVNC + */ + B = RGX_BVNC_KM_B; + N = RGX_BVNC_KM_N; + C = RGX_BVNC_KM_C; + { + IMG_UINT32 ui32ScanCount = 0; + ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%u", &V); + if (1 != ui32ScanCount) + { + ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%up", &V); + if (1 != ui32ScanCount) + { + V = 0; + } + } + } + PVR_LOG(("Reverting to compile time BVNC %s", RGX_BVNC_KM)); + + /* Extract the BVNC config from the Features table */ + ui64BVNC = BVNC_PACK(B,0,N,C); + pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC); + PVR_LOG_IF_FALSE((pui64Cfg != NULL), "Compile time BVNC configuration not found!"); + } +#endif /* defined(RGX_BVNC) */ + + /* Have we failed to identify the BVNC to use? */ + if (NULL == pui64Cfg) + { + PVR_DPF((PVR_DBG_ERROR, "%s: BVNC Detection and feature lookup failed. " + "Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx, __func__, ui64BVNC)); + return PVRSRV_ERROR_BVNC_UNSUPPORTED; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC Feature found config: 0x%016" + IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx " 0x%016" + IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx "\n", __func__, + pui64Cfg[0], pui64Cfg[1], pui64Cfg[2], pui64Cfg[3])); + + /* Parsing feature config depends on available features on the core + * hence this parsing should always follow the above feature assignment */ + psDevInfo->sDevFeatureCfg.ui64Features = pui64Cfg[1]; + _RGXBvncParseFeatureValues(psDevInfo, pui64Cfg); + + /* Add 'V' to the packed BVNC value to get the BVNC ERN and BRN config. */ + ui64BVNC = BVNC_PACK(B,V,N,C); + pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaErnsBrns, ui64BVNC); + if (NULL == pui64Cfg) + { + PVR_DPF((PVR_DBG_ERROR, "%s: BVNC ERN/BRN lookup failed. " + "Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx, __func__, ui64BVNC)); + psDevInfo->sDevFeatureCfg.ui64ErnsBrns = 0; + return PVRSRV_ERROR_BVNC_UNSUPPORTED; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC ERN/BRN Cfg: 0x%016" IMG_UINT64_FMTSPECx + " 0x%016" IMG_UINT64_FMTSPECx, __func__, *pui64Cfg, pui64Cfg[1])); + psDevInfo->sDevFeatureCfg.ui64ErnsBrns = pui64Cfg[1]; + + psDevInfo->sDevFeatureCfg.ui32B = B; + psDevInfo->sDevFeatureCfg.ui32V = V; + psDevInfo->sDevFeatureCfg.ui32N = N; + psDevInfo->sDevFeatureCfg.ui32C = C; + + + /* + * Store the SLCSize in the device info field. If 0 it means the device uses the BVNC + * values so grab them here as we've already populated the internal structures. + */ + if (ui32SLCSize == 0U) + { + ui32SLCSize = RGX_GET_FEATURE_VALUE(psDevInfo, SLC_SIZE_IN_KILOBYTES) * 1024U; + + /* Verify that we have a valid value returned from the BVNC */ + PVR_ASSERT(ui32SLCSize != 0U); + } + psDevInfo->sDevFeatureCfg.ui32SLCSizeInBytes = ui32SLCSize; + + /* Message to confirm configuration look up was a success */ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) + { +#if defined(NO_HARDWARE) + { + PVR_UNREFERENCED_PARAMETER(ui32Cores); + PVR_LOG(("RGX Device registered with BVNC " RGX_BVNC_STR_FMTSPEC, + B, V, N, C)); + } +#else + { + PVR_LOG(("RGX Device registered BVNC " RGX_BVNC_STR_FMTSPEC + " with %u %s in the system", B ,V ,N ,C, ui32Cores , + ((ui32Cores == 1U)?"core":"cores"))); + } +#endif + } + else + { + PVR_LOG(("RGX Device registered with BVNC " RGX_BVNC_STR_FMTSPEC, + B, V, N, C)); + } + + ui32RGXDevCnt++; + +#if defined(DEBUG) + _RGXBvncDumpParsedConfig(psDeviceNode); +#endif + return PVRSRV_OK; +} + +/* + * This function checks if a particular feature is available on the given rgx device */ +IMG_BOOL RGXBvncCheckFeatureSupported(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64FeatureMask) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + if (psDevInfo->sDevFeatureCfg.ui64Features & ui64FeatureMask) + { + return IMG_TRUE; + } + return IMG_FALSE; +} + +/* + * This function returns the value of a feature on the given rgx device */ +IMG_INT32 RGXBvncGetSupportedFeatureValue(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_FEATURE_WITH_VALUE_INDEX eFeatureIndex) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + if (eFeatureIndex >= RGX_FEATURE_WITH_VALUES_MAX_IDX) + { + return -1; + } + + if (psDevInfo->sDevFeatureCfg.ui32FeaturesValues[eFeatureIndex] == RGX_FEATURE_VALUE_DISABLED) + { + return -1; + } + + return psDevInfo->sDevFeatureCfg.ui32FeaturesValues[eFeatureIndex]; +} + +/**************************************************************************/ /*! +@Function RGXVerifyBVNC +@Description Checks that the device's BVNC registers have the correct values. +@Input psDeviceNode Device node +@Return PVRSRV_ERROR +*/ /***************************************************************************/ +#define NUM_RGX_CORE_IDS 8 +PVRSRV_ERROR RGXVerifyBVNC(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64GivenBVNC, IMG_UINT64 ui64CoreIdMask) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT64 ui64MatchBVNC; + IMG_UINT32 i; + + PVR_ASSERT(psDeviceNode != NULL); + PVR_ASSERT(psDeviceNode->pvDevice != NULL); + + /* The device info */ + psDevInfo = psDeviceNode->pvDevice; + + PDUMPCOMMENT("PDUMP VERIFY CORE_ID registers for all OSIDs\n"); + + /* construct the value to match against */ + if ((ui64GivenBVNC | ui64CoreIdMask) == 0) /* both zero means use configured DDK value */ + { + ui64MatchBVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); + } + else + { + /* use the value in CORE_ID for any zero elements in the BVNC */ + ui64MatchBVNC = (ui64GivenBVNC & ~ui64CoreIdMask) | (OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID) & ui64CoreIdMask); + } + PVR_LOG(("matchBVNC %d.%d.%d.%d", + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff))); + + /* read in all the CORE_ID registers */ + for (i = 0; i < NUM_RGX_CORE_IDS; ++i) + { +#if !defined(NO_HARDWARE) + IMG_UINT64 ui64BVNC = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID + (i << 16)); + + PVR_LOG(("CORE_ID%d returned %d.%d.%d.%d", i, + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff))); + + if (ui64BVNC != ui64MatchBVNC) + { + eError = PVRSRV_ERROR_BVNC_MISMATCH; + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid CORE_ID%d %d.%d.%d.%d, Expected %d.%d.%d.%d", __func__, i, + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), + (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), + (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff))); + break; + } +#endif + +#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) + /* check upper DWORD */ + eError = PDUMPREGPOL(RGX_PDUMPREG_NAME, + (RGX_CR_CORE_ID + 4) + (i << 16), + (IMG_UINT32)(ui64MatchBVNC >> 32), + 0xFFFFFFFF, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + if (eError == PVRSRV_OK) + { + /* check lower DWORD */ + eError = PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_CORE_ID + (i << 16), + (IMG_UINT32)(ui64MatchBVNC & 0xFFFFFFFF), + 0xFFFFFFFF, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + } +#endif + } + + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/rgxbvnc.h b/drivers/gpu/drm/phytium/octopus/rgxbvnc.h new file mode 100644 index 000000000000..a59e76be9f94 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxbvnc.h @@ -0,0 +1,90 @@ +/*************************************************************************/ /*! +@File +@Title BVNC handling specific header file +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the BVNC related work + (see hwdefs/km/rgx_bvnc_table_km.h and + hwdefs/km/rgx_bvnc_defs_km.h +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXBVNC_H) +#define RGXBVNC_H + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "rgxdevice.h" + +/*************************************************************************/ /*! +@brief This function detects the Rogue variant and configures the + essential config info associated with such a device. + The config info includes features, errata, etc +@param psDeviceNode - Device Node pointer +@return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode); + +/*************************************************************************/ /*! +@brief This function checks if a particular feature is available on + the given rgx device +@param psDeviceNode - Device Node pointer +@param ui64FeatureMask - feature to be checked +@return true if feature is supported, false otherwise +*/ /**************************************************************************/ +IMG_BOOL RGXBvncCheckFeatureSupported(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64FeatureMask); + +/*************************************************************************/ /*! +@brief This function returns the value of a feature on the given + rgx device +@param psDeviceNode - Device Node pointer +@param ui64FeatureMask - feature for which to return the value +@return the value for the specified feature +*/ /**************************************************************************/ +IMG_INT32 RGXBvncGetSupportedFeatureValue(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_FEATURE_WITH_VALUE_INDEX eFeatureIndex); + +/*************************************************************************/ /*! +@brief This function validates that the BVNC values in CORE_ID regs are + consistent and correct. +@param psDeviceNode - Device Node pointer +@param GivenBVNC - BVNC to be verified against as supplied by caller +@param CoreIdMask - mask of components to pull from CORE_ID register +@return success or fail +*/ /**************************************************************************/ +PVRSRV_ERROR RGXVerifyBVNC(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64GivenBVNC, IMG_UINT64 ui64CoreIdMask); + +#endif /* RGXBVNC_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxccb.c b/drivers/gpu/drm/phytium/octopus/rgxccb.c new file mode 100644 index 000000000000..4b90d58ca30e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxccb.c @@ -0,0 +1,2723 @@ +/*************************************************************************/ /*! +@File +@Title RGX CCB routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX CCB routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvr_debug.h" +#include "rgxdevice.h" +#include "pdump_km.h" +#include "allocmem.h" +#include "devicemem.h" +#include "rgxfwutils.h" +#include "osfunc.h" +#include "rgxccb.h" +#include "rgx_memallocflags.h" +#include "devicemem_pdump.h" +#include "dllist.h" +#if defined(__linux__) +#include "trace_events.h" +#endif +#include "sync_checkpoint_external.h" +#include "sync_checkpoint.h" +#include "rgxutils.h" +#include "info_page.h" +#include "rgxtimerquery.h" + +#if defined(PVRSRV_FORCE_FLUSH_CCCB_ON_KICK) +#include "cache_km.h" +#endif + +/* + * Uncomment PVRSRV_ENABLE_CCCB_UTILISATION_INFO define for verbose + * info and statistics regarding CCB usage. + */ +//#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO + +/* Default threshold (as a percentage) for the PVRSRV_ENABLE_CCCB_UTILISATION_INFO feature. */ +#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD (90) + +/* + * Defines the number of fence updates to record so that future fences in the + * CCB. Can be checked to see if they are already known to be satisfied. + */ +#define RGX_CCCB_FENCE_UPDATE_LIST_SIZE (32) + +#define RGX_UFO_PTR_ADDR(ufoptr) \ + (((ufoptr)->puiAddrUFO.ui32Addr) & 0xFFFFFFFC) + +#define GET_CCB_SPACE(WOff, ROff, CCBSize) \ + ((((ROff) - (WOff)) + ((CCBSize) - 1)) & ((CCBSize) - 1)) + +#define UPDATE_CCB_OFFSET(Off, PacketSize, CCBSize) \ + (Off) = (((Off) + (PacketSize)) & ((CCBSize) - 1)) + +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + +#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD 0x1 +#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED 0x2 +#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_FULL_CCB 0x4 + +typedef struct _RGX_CLIENT_CCB_UTILISATION_ +{ + /* the threshold in bytes. + * when the CCB utilisation hits the threshold then we will print + * a warning message. + */ + IMG_UINT32 ui32ThresholdBytes; + /* Maximum cCCB usage at some point in time */ + IMG_UINT32 ui32HighWaterMark; + /* keep track of the warnings already printed. + * bit mask of PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_xyz + */ + IMG_UINT32 ui32Warnings; + /* Keep track how many times CCB was full. + * Counters are reset after every grow. + */ + IMG_UINT32 ui32CCBFull; + IMG_UINT32 ui32CCBAcquired; +} RGX_CLIENT_CCB_UTILISATION; + +#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ + +struct _RGX_CLIENT_CCB_ { + volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; /*!< CPU mapping of the CCB control structure used by the fw */ + void *pvClientCCB; /*!< CPU mapping of the CCB */ + DEVMEM_MEMDESC *psClientCCBMemDesc; /*!< MemDesc for the CCB */ + DEVMEM_MEMDESC *psClientCCBCtrlMemDesc; /*!< MemDesc for the CCB control */ + IMG_UINT32 ui32HostWriteOffset; /*!< CCB write offset from the driver side */ + IMG_UINT32 ui32LastPDumpWriteOffset; /*!< CCB write offset from the last time we submitted a command in capture range */ + IMG_UINT32 ui32FinishedPDumpWriteOffset; /*!< Trails LastPDumpWriteOffset for last finished command, used for HW CB driven DMs */ + IMG_UINT32 ui32LastROff; /*!< Last CCB Read offset to help detect any CCB wedge */ + IMG_UINT32 ui32LastWOff; /*!< Last CCB Write offset to help detect any CCB wedge */ + IMG_UINT32 ui32ByteCount; /*!< Count of the number of bytes written to CCCB */ + IMG_UINT32 ui32LastByteCount; /*!< Last value of ui32ByteCount to help detect any CCB wedge */ + IMG_UINT32 ui32Size; /*!< Size of the CCB */ +#if defined(PVRSRV_ENABLE_CCCB_GROW) + POS_LOCK hCCBGrowLock; /*!< Prevents CCB Grow while DumpCCB() is called and vice versa */ + IMG_UINT32 ui32VirtualAllocSize; /*!< Virtual size of the CCB */ + IMG_UINT32 ui32ChunkSize; /*!< CCB Sparse allocation chunk size */ + IMG_PUINT32 pui32MappingTable; /*!< Mapping table for sparse allocation of the CCB */ +#endif + DLLIST_NODE sNode; /*!< Node used to store this CCB on the per connection list */ + PDUMP_CONNECTION_DATA *psPDumpConnectionData; /*!< Pointer to the per connection data in which we reside */ + void *hTransition; /*!< Handle for Transition callback */ + IMG_CHAR szName[MAX_CLIENT_CCB_NAME]; /*!< Name of this client CCB */ + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; /*!< Parent server common context that this CCB belongs to */ +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor; + RGX_CLIENT_CCB_UTILISATION sUtilisation; /*!< CCB utilisation data */ +#endif +#if defined(DEBUG) + IMG_UINT32 ui32UpdateEntries; /*!< Number of Fence Updates in asFenceUpdateList */ + RGXFWIF_UFO asFenceUpdateList[RGX_CCCB_FENCE_UPDATE_LIST_SIZE]; /*!< List of recent updates written in this CCB */ +#endif + IMG_UINT32 ui32CCBFlags; /*!< Bitmask for various flags relating to CCB. Bit defines in rgxccb.h */ +}; + +/* Forms a table, with array of strings for each requestor type (listed in RGX_CCB_REQUESTORS X macro), to be used for + DevMemAllocation comments and PDump comments. Each tuple in the table consists of 3 strings: + { "FwClientCCB:" , "FwClientCCBControl:" , }, + The first string being used as comment when allocating ClientCCB for the given requestor, the second for CCBControl + structure, and the 3rd one for use in PDUMP comments. The number of tuples in the table must adhere to the following + build assert. */ +const IMG_CHAR *const aszCCBRequestors[][3] = +{ +#define REQUESTOR_STRING(prefix,req) #prefix ":" #req +#define FORM_REQUESTOR_TUPLE(req) { REQUESTOR_STRING(FwClientCCB,req), REQUESTOR_STRING(FwClientCCBControl,req), #req }, + RGX_CCB_REQUESTORS(FORM_REQUESTOR_TUPLE) +#undef FORM_REQUESTOR_TUPLE +}; + +PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32PDumpFlags) +{ + + IMG_UINT32 ui32PollOffset; + + if (BIT_ISSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN)) + { + /* Draining CCB on a command that hasn't finished, and FW isn't expected + * to have updated Roff up to Woff. Only drain to the first + * finished command prior to this. The Roff for this + * is stored in ui32FinishedPDumpWriteOffset. + */ + ui32PollOffset = psClientCCB->ui32FinishedPDumpWriteOffset; + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, + "cCCB(%s@%p): Draining open CCB rgxfw_roff < woff (%d)", + psClientCCB->szName, + psClientCCB, + ui32PollOffset); + } + else + { + /* Command to a finished CCB stream and FW is drained to empty + * out remaining commands until R==W. + */ + ui32PollOffset = psClientCCB->ui32LastPDumpWriteOffset; + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, + "cCCB(%s@%p): Draining CCB rgxfw_roff == woff (%d)", + psClientCCB->szName, + psClientCCB, + ui32PollOffset); + } + + return DevmemPDumpDevmemPol32(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + ui32PollOffset, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); +} + +/****************************************************************************** + FUNCTION : RGXCCBPDumpSyncCCB + + PURPOSE : Synchronise Client CCBs from both live and playback contexts. + Waits for live-FW to empty live-CCB. + Waits for sim-FW to empty sim-CCB by adding POL + + PARAMETERS : psClientCCB - The client CCB + ui32PDumpFlags - PDump flags + + RETURNS : PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXCCBPDumpSyncCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + + /* Wait for the live FW to catch up/empty CCB. This is done by returning + * retry which will get pushed back out to Services client where it + * waits on the event object and then resubmits the command. + */ + if (psClientCCB->psClientCCBCtrl->ui32ReadOffset != psClientCCB->ui32HostWriteOffset) + { + return PVRSRV_ERROR_RETRY; + } + + /* Wait for the sim FW to catch up/empty sim CCB. + * We drain whenever capture range is entered, even if no commands + * have been issued on this CCB when out of capture range. We have to + * wait for commands that might have been issued in the last capture + * range to finish so the connection's sync block snapshot dumped after + * all the PDumpTransition callbacks have been execute doesn't clobber + * syncs which the sim FW is currently working on. + * + * Although this is sub-optimal for play-back - while out of capture + * range for every continuous operation we synchronise the sim + * play-back processing the script and the sim FW, there is no easy + * solution. Not all modules that work with syncs register a + * PDumpTransition callback and thus we have no way of knowing if we + * can skip this sim CCB drain and sync block dump or not. + */ + + eError = RGXCCBPDumpDrainCCB(psClientCCB, ui32PDumpFlags); + PVR_LOG_IF_ERROR(eError, "RGXCCBPDumpDrainCCB"); + PVR_ASSERT(eError == PVRSRV_OK); + + /* Live CCB and simulation CCB now empty, FW idle on CCB in both + * contexts. + */ + return PVRSRV_OK; +} + +/****************************************************************************** + FUNCTION : RGXCCBPDumpFastForwardCCB + + PURPOSE : Fast-forward sim-CCB and live-CCB offsets to live app-thread + values. + This helps to skip any commands submitted when out of capture + range and start with first command in capture range in both + live and playback contexts. In case of Block mode, this helps + to playback any intermediate PDump block directly after first + block. + + + PARAMETERS : psClientCCB - The client CCB + ui32PDumpFlags - PDump flags + + RETURNS : void +******************************************************************************/ +static void RGXCCBPDumpFastForwardCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32PDumpFlags) +{ + volatile RGXFWIF_CCCB_CTL *psCCBCtl = psClientCCB->psClientCCBCtrl; + + /* Make sure that we have synced live-FW and live-App threads */ + PVR_ASSERT(psCCBCtl->ui32ReadOffset == psClientCCB->ui32HostWriteOffset); + + psCCBCtl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset; + psCCBCtl->ui32DepOffset = psClientCCB->ui32HostWriteOffset; + psCCBCtl->ui32WriteOffset = psClientCCB->ui32HostWriteOffset; + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, + "cCCB(%s@%p): Fast-forward from %d to %d", + psClientCCB->szName, + psClientCCB, + psClientCCB->ui32LastPDumpWriteOffset, + psClientCCB->ui32HostWriteOffset); + + DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, + 0, + sizeof(RGXFWIF_CCCB_CTL), + ui32PDumpFlags); + + /* Although we've entered capture range for this process connection + * we might not do any work on this CCB so update the + * ui32LastPDumpWriteOffset to reflect where we got to for next + * time so we start the drain from where we got to last time. + */ + psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset; + +} + +static PVRSRV_ERROR _RGXCCBPDumpTransition(void *pvData, void *pvDevice, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags) +{ + RGX_CLIENT_CCB *psClientCCB = (RGX_CLIENT_CCB *) pvData; +#if defined(PDUMP) + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) pvDevice; +#endif + PVRSRV_ERROR eError; + + /* Block mode: + * Here is block structure at transition (ui32BlockLength=N frames): + * + * ... + * ... + * PDUMP_BLOCK_START_0x0000000x{ + * + * + * ... + * ... + * ... (N frames data) + * ... + * ... + * <(1) Drain sim-KCCB> ''| + * <(2) Sync live and sim CCCB> | + * }PDUMP_BLOCK_END_0x0000000x | <- BlockTransition Steps + * <(3) Split MAIN and BLOCK stream script> | + * PDUMP_BLOCK_START_0x0000000y{ | + * <(4) Fast-forward sim-CCCB> | + * <(5) Re-dump SyncBlocks> ,,| + * ... + * ... + * ... (N frames data) + * ... + * ... + * + * + * }PDUMP_BLOCK_END_0x0000000y + * ... + * ... + * + * Steps (3) and (5) are done in pdump_server.c + * */ + switch (eEvent) + { + case PDUMP_TRANSITION_EVENT_RANGE_ENTERED: + { + /* We're about to transition into capture range and we've submitted + * new commands since the last time we entered capture range so drain + * the live CCB and simulation (sim) CCB as required, i.e. leave CCB + * idle in both live and sim contexts. + * This requires the host driver to ensure the live FW & the sim FW + * have both emptied out the remaining commands until R==W (CCB empty). + */ + + eError = RGXCCBPDumpSyncCCB(psClientCCB, ui32PDumpFlags); + PVR_RETURN_IF_ERROR(eError); + + if (psClientCCB->ui32LastPDumpWriteOffset != psClientCCB->ui32HostWriteOffset) + { + /* If new commands have been written when out of capture range in + * the live CCB then we need to fast forward the sim CCBCtl + * offsets past uncaptured commands. This is done by PDUMPing + * the CCBCtl memory to align sim values with the live CCBCtl + * values. Both live and sim FWs can start with the 1st command + * which is in the new capture range. + */ + RGXCCBPDumpFastForwardCCB(psClientCCB, ui32PDumpFlags); + } + break; + } + case PDUMP_TRANSITION_EVENT_RANGE_EXITED: + { + /* Nothing to do */ + break; + } + case PDUMP_TRANSITION_EVENT_BLOCK_FINISHED: + { + /* (1) Drain KCCB from current block before starting new: + * + * At playback, this will ensure that sim-FW drains all commands in KCCB + * belongs to current block before 'jumping' to any future commands (from + * next block). This will synchronise script-thread and sim-FW thread KCCBs + * at end of each pdump block. + * + * This will additionally force redump of KCCBCtl structure at start of next/new block. + * */ + +#if defined(PDUMP) + eError = RGXPdumpDrainKCCB(psDevInfo, psDevInfo->psKernelCCBCtl->ui32WriteOffset); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXPdumpDrainKCCB"); +#endif + + /* (2) Synchronise Client CCBs from live and playback contexts before starting new block: + * + * This operation will, + * a. Force synchronisation between app-thread and live-FW thread (i.e. Wait + * for live-FW to empty live Client CCB). + * + * b. Next, it will dump poll command to drain Client CCB at end of every + * pdump block. At playback time this will synchronise sim-FW and + * script-thread Client CCBs at end of each block. + * + * This is to ensure that all commands in CCB from current block are processed + * before moving on to future commands. + * */ + + eError = RGXCCBPDumpSyncCCB(psClientCCB, ui32PDumpFlags); + PVR_RETURN_IF_ERROR(eError); + break; + } + case PDUMP_TRANSITION_EVENT_BLOCK_STARTED: + { + /* (4) Fast-forward CCB write offsets to current live values: + * + * We have already synchronised live-FW and app-thread above at end of each + * block (in Step 2a above), now fast-forward Client CCBCtl write offsets to that of + * current app-thread values at start of every block. This will allow us to + * skip any intermediate pdump blocks and start with last (or any next) block + * immediately after first pdump block. + * */ + + RGXCCBPDumpFastForwardCCB(psClientCCB, ui32PDumpFlags); + break; + } + case PDUMP_TRANSITION_EVENT_NONE: + /* Invalid event for transition */ + default: + { + /* Unknown Transition event */ + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + return PVRSRV_OK; +} + +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + +static INLINE void _RGXInitCCBUtilisation(RGX_CLIENT_CCB *psClientCCB) +{ + psClientCCB->sUtilisation.ui32HighWaterMark = 0; /* initialize ui32HighWaterMark level to zero */ + psClientCCB->sUtilisation.ui32ThresholdBytes = (psClientCCB->ui32Size * + PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD) / 100; + psClientCCB->sUtilisation.ui32Warnings = 0; + psClientCCB->sUtilisation.ui32CCBAcquired = 0; + psClientCCB->sUtilisation.ui32CCBFull = 0; +} + +static INLINE void _RGXCCBUtilisationEvent(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32WarningType, + IMG_UINT32 ui32CmdSize) +{ + /* in VERBOSE mode we will print a message for each different + * event type as they happen. + */ +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + if (!(psClientCCB->sUtilisation.ui32Warnings & ui32WarningType)) + { + if (ui32WarningType == PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED) + { + PVR_LOG(("Failed to acquire CCB space for %u byte command:", ui32CmdSize)); + } + + PVR_LOG(("%s: Client CCB (%s) watermark (%u) hit %d%% of its allocation size (%u)", + __func__, + psClientCCB->szName, + psClientCCB->sUtilisation.ui32HighWaterMark, + psClientCCB->sUtilisation.ui32HighWaterMark * 100 / psClientCCB->ui32Size, + psClientCCB->ui32Size)); + + /* record that we have issued a warning of this type */ + psClientCCB->sUtilisation.ui32Warnings |= ui32WarningType; + } +#else + PVR_UNREFERENCED_PARAMETER(psClientCCB); + PVR_UNREFERENCED_PARAMETER(ui32WarningType); + PVR_UNREFERENCED_PARAMETER(ui32CmdSize); +#endif +} + +/* Check the current CCB utilisation. Print a one-time warning message if it is above the + * specified threshold + */ +static INLINE void _RGXCheckCCBUtilisation(RGX_CLIENT_CCB *psClientCCB) +{ + /* Print a warning message if the cCCB watermark is above the threshold value */ + if (psClientCCB->sUtilisation.ui32HighWaterMark >= psClientCCB->sUtilisation.ui32ThresholdBytes) + { + _RGXCCBUtilisationEvent(psClientCCB, + PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD, + 0); + } +} + +/* Update the cCCB high watermark level if necessary */ +static void _RGXUpdateCCBUtilisation(RGX_CLIENT_CCB *psClientCCB) +{ + IMG_UINT32 ui32FreeSpace, ui32MemCurrentUsage; + + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + ui32MemCurrentUsage = psClientCCB->ui32Size - ui32FreeSpace; + + if (ui32MemCurrentUsage > psClientCCB->sUtilisation.ui32HighWaterMark) + { + psClientCCB->sUtilisation.ui32HighWaterMark = ui32MemCurrentUsage; + + /* The high water mark has increased. Check if it is above the + * threshold so we can print a warning if necessary. + */ + _RGXCheckCCBUtilisation(psClientCCB); + } +} + +#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ + +PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32CCBSizeLog2, + IMG_UINT32 ui32CCBMaxSizeLog2, + IMG_UINT32 ui32ContextFlags, + CONNECTION_DATA *psConnectionData, + RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + RGX_CLIENT_CCB **ppsClientCCB, + DEVMEM_MEMDESC **ppsClientCCBMemDesc, + DEVMEM_MEMDESC **ppsClientCCBCtrlMemDesc) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_MEMALLOCFLAGS_T uiClientCCBMemAllocFlags, uiClientCCBCtlMemAllocFlags; + IMG_UINT32 ui32FWMainLog2PageSize = DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap); + IMG_UINT32 ui32ChunkSize = (1U << ui32FWMainLog2PageSize); + IMG_UINT32 ui32AllocSize = MAX((1U << ui32CCBSizeLog2), ui32ChunkSize); + IMG_UINT32 ui32MinAllocSize = MAX((1U << MIN_SAFE_CCB_SIZE_LOG2), ui32ChunkSize); + RGX_CLIENT_CCB *psClientCCB; +#if defined(PVRSRV_ENABLE_CCCB_GROW) + IMG_UINT32 ui32NumChunks = ui32AllocSize / ui32ChunkSize; + IMG_UINT32 ui32VirtualAllocSize = (1U << ui32CCBMaxSizeLog2); + IMG_UINT32 ui32NumVirtChunks = ui32VirtualAllocSize / ui32ChunkSize; + IMG_UINT32 i; + + /* For the allocation request to be valid, at least one page is required. + * This is relevant on systems where the page size is greater than the client CCB size. */ + ui32NumVirtChunks = MAX(1, ui32NumVirtChunks); + PVR_ASSERT((ui32ChunkSize >= (1U << PAGE_SHIFT))); +#else + PVR_UNREFERENCED_PARAMETER(ui32CCBMaxSizeLog2); +#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ + + /* All client CCBs should be at-least of the "minimum" size and not to exceed "maximum" */ + if ((ui32CCBSizeLog2 < MIN_SAFE_CCB_SIZE_LOG2) || + (ui32CCBSizeLog2 > MAX_SAFE_CCB_SIZE_LOG2)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s CCB size is invalid (%d). Should be from %d to %d", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + ui32CCBSizeLog2, MIN_SAFE_CCB_SIZE_LOG2, MAX_SAFE_CCB_SIZE_LOG2)); + return PVRSRV_ERROR_INVALID_PARAMS; + } +#if defined(PVRSRV_ENABLE_CCCB_GROW) + if ((ui32CCBMaxSizeLog2 < ui32CCBSizeLog2) || + (ui32CCBMaxSizeLog2 > MAX_SAFE_CCB_SIZE_LOG2)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s CCB maximum size is invalid (%d). Should be from %d to %d", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + ui32CCBMaxSizeLog2, ui32CCBSizeLog2, MAX_SAFE_CCB_SIZE_LOG2)); + return PVRSRV_ERROR_INVALID_PARAMS; + } +#endif + + psClientCCB = OSAllocMem(sizeof(*psClientCCB)); + if (psClientCCB == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc; + } + psClientCCB->psServerCommonContext = psServerCommonContext; + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + psClientCCB->ui32VirtualAllocSize = 0; + psClientCCB->pui32MappingTable = NULL; + psClientCCB->ui32ChunkSize = ui32ChunkSize; +#endif + + uiClientCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN); + + uiClientCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_UNCACHED | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN); + + /* If connection data indicates Sync Lockup Recovery (SLR) should be disabled, + * or if the caller has set ui32ContextFlags to disable SLR for this context, + * indicate this in psClientCCB->ui32CCBFlags. + */ + if ((psConnectionData->ui32ClientFlags & SRV_FLAGS_CLIENT_SLR_DISABLED) || + (ui32ContextFlags & RGX_CONTEXT_FLAG_DISABLESLR)) + { + BIT_SET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED); + } + + PDUMPCOMMENT("Allocate RGXFW cCCB"); +#if defined(PVRSRV_ENABLE_CCCB_GROW) + if (BITMASK_HAS(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN)) + { + PHYS_HEAP *psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; + PHYS_HEAP_TYPE eHeapType = PhysHeapGetType(psPhysHeap); + + psClientCCB->ui32VirtualAllocSize = ui32VirtualAllocSize; + + /* + * Growing CCB is doubling the size. Last grow would require only ui32NumVirtChunks/2 new chunks + * because another ui32NumVirtChunks/2 is already allocated. + * Sometimes initial chunk count would be higher (when CCB size is equal to CCB maximum size) so MAX is needed. + */ + psClientCCB->pui32MappingTable = OSAllocMem(MAX(ui32NumChunks, ui32NumVirtChunks/2) * sizeof(IMG_UINT32)); + if (psClientCCB->pui32MappingTable == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_mtable; + } + for (i = 0; i < ui32NumChunks; i++) + { + psClientCCB->pui32MappingTable[i] = i; + } + + if (eHeapType == PHYS_HEAP_TYPE_LMA || + eHeapType == PHYS_HEAP_TYPE_DMA) + { + /* + * On LMA sparse memory can't be mapped to kernel. + * To work around this whole ccb memory is allocated at once as contiguous. + */ + eError = DevmemFwAllocate(psDevInfo, + ui32VirtualAllocSize, + uiClientCCBMemAllocFlags, + aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING], + &psClientCCB->psClientCCBMemDesc); + } + else + { + eError = DevmemFwAllocateSparse(psDevInfo, + ui32VirtualAllocSize, + ui32ChunkSize, + ui32NumChunks, + ui32NumVirtChunks, + psClientCCB->pui32MappingTable, + uiClientCCBMemAllocFlags, + aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING], + &psClientCCB->psClientCCBMemDesc); + } + } + + if (eError != PVRSRV_OK) + { + OSFreeMem(psClientCCB->pui32MappingTable); + psClientCCB->pui32MappingTable = NULL; + psClientCCB->ui32VirtualAllocSize = 0; + } + + if (!BITMASK_HAS(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN) || + (eError != PVRSRV_OK)) +#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ + { + /* Allocate ui32AllocSize, or the next best POT allocation */ + do + { + eError = DevmemFwAllocate(psDevInfo, + ui32AllocSize, + uiClientCCBMemAllocFlags, + aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING], + &psClientCCB->psClientCCBMemDesc); + if (eError != PVRSRV_OK) + { + /* Failed to allocate - ensure CCB grow is disabled from + * now on for this device. + */ + BITMASK_UNSET(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN); + + /* Failed to allocate, try next POT down */ + ui32AllocSize >>= 1; + } + } while ((eError != PVRSRV_OK) && (ui32AllocSize > ui32MinAllocSize)); + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate RGX client CCB (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_alloc_ccb; + } + + OSSNPrintf(psClientCCB->szName, MAX_CLIENT_CCB_NAME, "%s-P%lu-T%lu-%s", + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + (unsigned long) OSGetCurrentClientProcessIDKM(), + (unsigned long) OSGetCurrentClientThreadIDKM(), + OSGetCurrentClientProcessNameKM()); + + if (ui32AllocSize < (1U << ui32CCBSizeLog2)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Unable to allocate %d bytes for RGX client CCB (%s) but allocated %d bytes", + __func__, + (1U << ui32CCBSizeLog2), + psClientCCB->szName, + ui32AllocSize)); + } + + eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, + &psClientCCB->pvClientCCB); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map RGX client CCB (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_map_ccb; + } + + PDUMPCOMMENT("Allocate RGXFW cCCB control"); + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_CCCB_CTL), + uiClientCCBCtlMemAllocFlags, + aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING], + &psClientCCB->psClientCCBCtrlMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate RGX client CCB control (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_alloc_ccbctrl; + } + + + eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc, + (void **) &psClientCCB->psClientCCBCtrl); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map RGX client CCB control (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_map_ccbctrl; + } + + psClientCCB->psClientCCBCtrl->ui32WriteOffset = 0; + psClientCCB->psClientCCBCtrl->ui32ReadOffset = 0; + psClientCCB->psClientCCBCtrl->ui32DepOffset = 0; + psClientCCB->psClientCCBCtrl->ui32WrapMask = ui32AllocSize - 1; + + PDUMPCOMMENT("cCCB control"); + DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, + 0, + sizeof(RGXFWIF_CCCB_CTL), + PDUMP_FLAGS_CONTINUOUS); + PVR_ASSERT(eError == PVRSRV_OK); + + psClientCCB->ui32HostWriteOffset = 0; + psClientCCB->ui32LastPDumpWriteOffset = 0; + psClientCCB->ui32FinishedPDumpWriteOffset = 0; + psClientCCB->ui32Size = ui32AllocSize; + psClientCCB->ui32LastROff = ui32AllocSize - 1; + psClientCCB->ui32ByteCount = 0; + psClientCCB->ui32LastByteCount = 0; + BIT_UNSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + eError = OSLockCreate(&psClientCCB->hCCBGrowLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to create hCCBGrowLock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_create_ccbgrow_lock; + } +#endif +#if defined(DEBUG) + psClientCCB->ui32UpdateEntries = 0; +#endif + +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + _RGXInitCCBUtilisation(psClientCCB); + psClientCCB->eRGXCCBRequestor = eRGXCCBRequestor; +#endif + eError = PDumpRegisterTransitionCallback(psConnectionData->psPDumpConnectionData, + _RGXCCBPDumpTransition, + psClientCCB, + psDevInfo, + &psClientCCB->hTransition); + if (eError != PVRSRV_OK) + { + goto fail_pdumpreg; + } + + /* + * Note: + * Save the PDump specific structure, which is ref counted unlike + * the connection data, to ensure it's not freed too early + */ + psClientCCB->psPDumpConnectionData = psConnectionData->psPDumpConnectionData; + PDUMPCOMMENT("New RGXFW cCCB(%s@%p) created", + psClientCCB->szName, + psClientCCB); + + *ppsClientCCB = psClientCCB; + *ppsClientCCBMemDesc = psClientCCB->psClientCCBMemDesc; + *ppsClientCCBCtrlMemDesc = psClientCCB->psClientCCBCtrlMemDesc; + return PVRSRV_OK; + +fail_pdumpreg: +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockDestroy(psClientCCB->hCCBGrowLock); +fail_create_ccbgrow_lock: +#endif + DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc); +fail_map_ccbctrl: + DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc); +fail_alloc_ccbctrl: + DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); +fail_map_ccb: + DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBMemDesc); +#if defined(PVRSRV_ENABLE_CCCB_GROW) +fail_alloc_ccb: + if ( psClientCCB->ui32VirtualAllocSize > 0) + { + OSFreeMem(psClientCCB->pui32MappingTable); + } +fail_alloc_mtable: +#else +fail_alloc_ccb: +#endif + OSFreeMem(psClientCCB); +fail_alloc: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB) +{ +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + if (psClientCCB->sUtilisation.ui32CCBFull) + { + PVR_LOG(("CCBUtilisationInfo: GPU %s command buffer was full %d times out of %d. " + "This is not an error but the application may not run optimally.", + aszCCBRequestors[psClientCCB->eRGXCCBRequestor][REQ_PDUMP_COMMENT], + psClientCCB->sUtilisation.ui32CCBFull, + psClientCCB->sUtilisation.ui32CCBAcquired)); + } +#endif +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockDestroy(psClientCCB->hCCBGrowLock); +#endif + PDumpUnregisterTransitionCallback(psClientCCB->hTransition); + DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc); + DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBMemDesc); +#if defined(PVRSRV_ENABLE_CCCB_GROW) + if (psClientCCB->pui32MappingTable) + { + OSFreeMem(psClientCCB->pui32MappingTable); + } +#endif + OSFreeMem(psClientCCB); +} + +#if defined(PVRSRV_ENABLE_CCCB_GROW) +static PVRSRV_ERROR _RGXCCBMemChangeSparse(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32AllocPageCount) +{ + PVRSRV_ERROR eError; + IMG_UINT32 i; + +#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE + DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); +#endif + + for (i = 0; i < ui32AllocPageCount; i++) + { + psClientCCB->pui32MappingTable[i] = ui32AllocPageCount + i; + } + + /* Double the CCB size (CCB must be POT) by adding ui32AllocPageCount new pages */ + eError = DeviceMemChangeSparse(psClientCCB->psClientCCBMemDesc, + ui32AllocPageCount, + psClientCCB->pui32MappingTable, + 0, + NULL, +#if !defined(PVRSRV_UNMAP_ON_SPARSE_CHANGE) + SPARSE_MAP_CPU_ADDR | +#endif + SPARSE_RESIZE_ALLOC); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to grow RGX client CCB (%s)", + PVRSRVGetErrorString(eError))); + +#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE + if (DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, + &psClientCCB->pvClientCCB) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to reacquire CCB mapping")); + psClientCCB->pvClientCCB = NULL; + } +#endif + + return eError; + } + +#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE + eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, + &psClientCCB->pvClientCCB); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to map RGX client CCB (%s)", + PVRSRVGetErrorString(eError))); + return eError; + } +#endif + + return PVRSRV_OK; +} +#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ + +PVRSRV_ERROR RGXCheckSpaceCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32CmdSize) +{ + IMG_UINT32 ui32FreeSpace; + + /* Check that the CCB can hold this command + padding */ + if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size) + { + PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB" + " (%d bytes)", ui32CmdSize, psClientCCB->ui32Size)); + return PVRSRV_ERROR_CMD_TOO_BIG; + } + + /* + Check we don't overflow the end of the buffer and make sure we have + enough space for the padding command. If we don't have enough space + (including the minimum amount for the padding command) we need to make + sure we insert a padding command now and wrap before adding the main + command. + */ + if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size) + { + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + + /* Don't allow all the space to be used */ + if (ui32FreeSpace > ui32CmdSize) + { + return PVRSRV_OK; + } + + goto e_retry; + } + else + { + IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset; + + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + + /* Check there is space for both the command and the padding command */ + if (ui32FreeSpace > ui32Remain + ui32CmdSize) + { + return PVRSRV_OK; + } + + goto e_retry; + } + +e_retry: +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + _RGXCCBUtilisationEvent(psClientCCB, + PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_FULL_CCB, + ui32CmdSize); +#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ + + return PVRSRV_ERROR_RETRY; +} + +/****************************************************************************** + FUNCTION : RGXAcquireCCB + + PURPOSE : Obtains access to write some commands to a CCB + + PARAMETERS : psClientCCB - The client CCB + ui32CmdSize - How much space is required + ppvBufferSpace - Pointer to space in the buffer + ui32PDumpFlags - Should this be PDump continuous? + + RETURNS : PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32CmdSize, + void **ppvBufferSpace, + IMG_UINT32 ui32PDumpFlags) +{ +#if defined(PVRSRV_ENABLE_CCCB_GROW) + IMG_UINT32 ui32RetryCount = 2; +#endif + +#if defined(PDUMP) + PVRSRV_ERROR eError; + IMG_BOOL bInCaptureRange; + IMG_BOOL bPdumpEnabled; + IMG_UINT64 ui64PDumpState = 0; + + PDumpGetStateKM(&ui64PDumpState); + PDumpIsCaptureFrameKM(&bInCaptureRange); + bPdumpEnabled = (ui64PDumpState & PDUMP_STATE_CONNECTED) != 0 + && (bInCaptureRange || PDUMP_IS_CONTINUOUS(ui32PDumpFlags)); + + /* + PDumpSetFrame will detect as we Transition into capture range for + frame based data but if we are PDumping continuous data then we + need to inform the PDump layer ourselves + */ + if ((ui64PDumpState & PDUMP_STATE_CONNECTED) != 0 + && PDUMP_IS_CONTINUOUS(ui32PDumpFlags) + && !bInCaptureRange) + { + eError = PDumpTransition(psClientCCB->psPDumpConnectionData, PDUMP_TRANSITION_EVENT_RANGE_ENTERED, ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + return eError; + } + } +#endif + + /* Check that the CCB can hold this command + padding */ + if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size) + { + PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB (%d bytes)", + ui32CmdSize, psClientCCB->ui32Size)); + return PVRSRV_ERROR_CMD_TOO_BIG; + } + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + while (ui32RetryCount--) +#endif + { +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + psClientCCB->sUtilisation.ui32CCBAcquired++; +#endif + + /* + Check we don't overflow the end of the buffer and make sure we have + enough space for the padding command. We don't have enough space (including the + minimum amount for the padding command) we will need to make sure we insert a + padding command now and wrap before adding the main command. + */ + if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size) + { + /* The command can fit without wrapping... */ + IMG_UINT32 ui32FreeSpace; + +#if defined(PDUMP) + /* Wait for sufficient CCB space to become available */ + PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", + ui32CmdSize, psClientCCB->ui32HostWriteOffset, + psClientCCB->szName); + DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + psClientCCB->ui32HostWriteOffset, + ui32CmdSize, + psClientCCB->ui32Size); +#endif + + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + + /* Can command fit? */ + if (ui32FreeSpace > ui32CmdSize) + { + *ppvBufferSpace = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); + return PVRSRV_OK; + } + /* There is not enough free space in CCB. */ + goto e_retry; + } + else + { + /* + We're at the end of the buffer without enough contiguous space. + The command cannot fit without wrapping, we need to insert a + padding command and wrap. We need to do this in one go otherwise + we would be leaving unflushed commands and forcing the client to + deal with flushing the padding command but not the command they + wanted to write. Therefore we either do all or nothing. + */ + RGXFWIF_CCB_CMD_HEADER *psHeader; + IMG_UINT32 ui32FreeSpace; + IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset; + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + /* Check this is a growable CCB */ + if (psClientCCB->ui32VirtualAllocSize > 0) + { + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext); + + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + /* + * Check if CCB should grow or be wrapped. + * Wrap CCB if there is no need for grow (CCB is half empty) or CCB can't grow, + * and when is free space for command and padding. + */ + if (((ui32FreeSpace > psClientCCB->ui32Size/2) || (psClientCCB->ui32Size == psClientCCB->ui32VirtualAllocSize)) && + (ui32FreeSpace > ui32Remain + ui32CmdSize)) + { + /* Wrap CCB */ + psHeader = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); + psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING; + psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER); + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize); + if (bPdumpEnabled) + { + DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, + psClientCCB->ui32HostWriteOffset, + ui32Remain, + ui32PDumpFlags); + } +#endif + + *ppvBufferSpace = psClientCCB->pvClientCCB; + return PVRSRV_OK; + } + else if ((psClientCCB->ui32Size < psClientCCB->ui32VirtualAllocSize) && + (psClientCCB->ui32HostWriteOffset >= psClientCCB->psClientCCBCtrl->ui32ReadOffset)) + { + /* Grow CCB */ + PHYS_HEAP *psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; + PHYS_HEAP_TYPE eHeapType = PhysHeapGetType(psPhysHeap); + PVRSRV_ERROR eErr = PVRSRV_OK; + + /* Something went wrong if we are here a second time */ + PVR_ASSERT(ui32RetryCount != 0); + OSLockAcquire(psClientCCB->hCCBGrowLock); + + /* + * On LMA sparse memory can't be mapped to kernel. + * To work around this whole ccb memory was allocated at once as contiguous. + * In such case below sparse change is not needed because memory is already allocated. + */ + if (eHeapType != PHYS_HEAP_TYPE_LMA && + eHeapType != PHYS_HEAP_TYPE_DMA) + { + IMG_UINT32 ui32AllocChunkCount = psClientCCB->ui32Size / psClientCCB->ui32ChunkSize; + + eErr = _RGXCCBMemChangeSparse(psClientCCB, ui32AllocChunkCount); + } + + /* Setup new CCB size */ + if (eErr == PVRSRV_OK) + { + psClientCCB->ui32Size += psClientCCB->ui32Size; + } + else + { + PVR_LOG(("%s: Client CCB (%s) grow failed (%s)", __func__, psClientCCB->szName, PVRSRVGetErrorString(eErr))); + OSLockRelease(psClientCCB->hCCBGrowLock); + goto e_retry; + } + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB update for grow"); + if (bPdumpEnabled) + { + DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32WrapMask), + sizeof(psClientCCB->psClientCCBCtrl->ui32WrapMask), + ui32PDumpFlags); + DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, + offsetof(RGX_CLIENT_CCB, ui32Size), + sizeof(psClientCCB->ui32Size), + ui32PDumpFlags); + } +#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ + +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + PVR_LOG(("%s: Client CCB (%s) grew to %u", __func__, psClientCCB->szName, psClientCCB->ui32Size)); + /* Reset counters */ + _RGXInitCCBUtilisation(psClientCCB); +#endif + + /* CCB doubled the size so retry now. */ + OSLockRelease(psClientCCB->hCCBGrowLock); + } + else + { + /* CCB can't grow anymore and can't be wrapped */ +#if defined(PDUMP) + /* Wait for sufficient CCB space to become available */ + PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", + ui32Remain, psClientCCB->ui32HostWriteOffset, + psClientCCB->szName); + DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + psClientCCB->ui32HostWriteOffset, + ui32Remain, + psClientCCB->ui32Size); + PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", + ui32CmdSize, 0 /*ui32HostWriteOffset after wrap */, + psClientCCB->szName); + DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + 0 /*ui32HostWriteOffset after wrap */, + ui32CmdSize, + psClientCCB->ui32Size); + /* CCB has now space for our command so try wrapping again. Retry now. */ +#else /* defined(PDUMP) */ + goto e_retry; +#endif /* defined(PDUMP) */ + } + } + else +#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ + { +#if defined(PDUMP) + /* Wait for sufficient CCB space to become available */ + PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", + ui32Remain, psClientCCB->ui32HostWriteOffset, + psClientCCB->szName); + DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + psClientCCB->ui32HostWriteOffset, + ui32Remain, + psClientCCB->ui32Size); + PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", + ui32CmdSize, 0 /*ui32HostWriteOffset after wrap */, + psClientCCB->szName); + DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, + offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), + 0 /*ui32HostWriteOffset after wrap */, + ui32CmdSize, + psClientCCB->ui32Size); +#endif + ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, + psClientCCB->psClientCCBCtrl->ui32ReadOffset, + psClientCCB->ui32Size); + + if (ui32FreeSpace > ui32Remain + ui32CmdSize) + { + psHeader = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); + psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING; + psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER); +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize); + if (bPdumpEnabled) + { + DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, + psClientCCB->ui32HostWriteOffset, + ui32Remain, + ui32PDumpFlags); + } +#endif + + *ppvBufferSpace = psClientCCB->pvClientCCB; + return PVRSRV_OK; + } + + goto e_retry; + } + } + } +e_retry: +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + psClientCCB->sUtilisation.ui32CCBFull++; + _RGXCCBUtilisationEvent(psClientCCB, + PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED, + ui32CmdSize); +#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ + return PVRSRV_ERROR_RETRY; +} + +/****************************************************************************** + FUNCTION : RGXReleaseCCB + + PURPOSE : Release a CCB that we have been writing to. + + PARAMETERS : psDevData - device data + psCCB - the CCB + + RETURNS : None +******************************************************************************/ +void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32CmdSize, + IMG_UINT32 ui32PDumpFlags) +{ + IMG_BOOL bInCaptureRange; + IMG_BOOL bPdumpEnabled; + IMG_UINT64 ui64PDumpState = 0; + + PDumpGetStateKM(&ui64PDumpState); + PDumpIsCaptureFrameKM(&bInCaptureRange); + bPdumpEnabled = (ui64PDumpState & PDUMP_STATE_CONNECTED) != 0 + && (bInCaptureRange || PDUMP_IS_CONTINUOUS(ui32PDumpFlags)); + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockAcquire(psClientCCB->hCCBGrowLock); +#endif + /* + * If a padding command was needed then we should now move ui32HostWriteOffset + * forward. The command has already be dumped (if bPdumpEnabled). + */ + if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) > psClientCCB->ui32Size) + { + IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset; + + UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset, + ui32Remain, + psClientCCB->ui32Size); + psClientCCB->ui32ByteCount += ui32Remain; + } + + /* Dump the CCB data */ + if (bPdumpEnabled) + { + DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, + psClientCCB->ui32HostWriteOffset, + ui32CmdSize, + ui32PDumpFlags); + } + + /* + * Check if there any fences being written that will already be + * satisfied by the last written update command in this CCB. At the + * same time we can ASSERT that all sync addresses are not NULL. + */ +#if defined(DEBUG) + { + void *pvBufferStart = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); + void *pvBufferEnd = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset + ui32CmdSize); + IMG_BOOL bMessagePrinted = IMG_FALSE; + + /* Walk through the commands in this section of CCB being released... */ + while (pvBufferStart < pvBufferEnd) + { + RGXFWIF_CCB_CMD_HEADER *psCmdHeader = pvBufferStart; + + if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UPDATE) + { + /* If an UPDATE then record the values incase an adjacent fence uses it. */ + IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); + RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + psClientCCB->ui32UpdateEntries = 0; + while (ui32NumUFOs-- > 0) + { + PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0); + if (psClientCCB->ui32UpdateEntries < RGX_CCCB_FENCE_UPDATE_LIST_SIZE) + { + psClientCCB->asFenceUpdateList[psClientCCB->ui32UpdateEntries++] = *psUFOPtr++; + } + } + } + else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE) + { + /* If a FENCE then check the values against the last UPDATE issued. */ + IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); + RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + while (ui32NumUFOs-- > 0) + { + PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0); + + if (bMessagePrinted == IMG_FALSE) + { + RGXFWIF_UFO *psUpdatePtr = psClientCCB->asFenceUpdateList; + IMG_UINT32 ui32UpdateIndex; + + for (ui32UpdateIndex = 0; ui32UpdateIndex < psClientCCB->ui32UpdateEntries; ui32UpdateIndex++) + { + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) + { + if (RGX_UFO_PTR_ADDR(psUFOPtr) == RGX_UFO_PTR_ADDR(psUpdatePtr)) + { + PVR_DPF((PVR_DBG_MESSAGE, "Redundant sync checkpoint check found in cCCB(%p) - 0x%x -> 0x%x", + psClientCCB, RGX_UFO_PTR_ADDR(psUFOPtr), psUFOPtr->ui32Value)); + bMessagePrinted = IMG_TRUE; + break; + } + } + else + { + if (psUFOPtr->puiAddrUFO.ui32Addr == psUpdatePtr->puiAddrUFO.ui32Addr && + psUFOPtr->ui32Value == psUpdatePtr->ui32Value) + { + PVR_DPF((PVR_DBG_MESSAGE, "Redundant fence check found in cCCB(%p) - 0x%x -> 0x%x", + psClientCCB, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value)); + bMessagePrinted = IMG_TRUE; + break; + } + } + psUpdatePtr++; + } + } + + psUFOPtr++; + } + } + else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR || + psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE) + { + /* For all other UFO ops check the UFO address is not NULL. */ + IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); + RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + while (ui32NumUFOs-- > 0) + { + PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0); + psUFOPtr++; + } + } + + /* Move to the next command in this section of CCB being released... */ + pvBufferStart = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER) + psCmdHeader->ui32CmdSize); + } + } +#endif /* REDUNDANT_SYNCS_DEBUG */ + + +#if defined(PVRSRV_FORCE_FLUSH_CCCB_ON_KICK) + { + DEVMEM_MEMDESC* psClientCCBMemDesc = psClientCCB->psClientCCBMemDesc; + void *pvClientCCBAddr = psClientCCB->pvClientCCB; + PMR *psClientCCBMemDescPMR = NULL; + IMG_DEVMEM_OFFSET_T uiPMROffset; + + DevmemGetPMRData(psClientCCBMemDesc, + (IMG_HANDLE*)&psClientCCBMemDescPMR, + &uiPMROffset); + + CacheOpValExec(psClientCCBMemDescPMR, + (IMG_UINT64)(uintptr_t) pvClientCCBAddr, + uiPMROffset, + psClientCCBMemDesc->uiAllocSize, + PVRSRV_CACHE_OP_FLUSH); + + } +#endif + /* + * Update the CCB write offset. + */ + UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset, + ui32CmdSize, + psClientCCB->ui32Size); + psClientCCB->ui32ByteCount += ui32CmdSize; + +#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) + _RGXUpdateCCBUtilisation(psClientCCB); +#endif + /* + PDumpSetFrame will detect as we Transition out of capture range for + frame based data but if we are PDumping continuous data then we + need to inform the PDump layer ourselves + */ + if ((ui64PDumpState & PDUMP_STATE_CONNECTED) != 0 + && PDUMP_IS_CONTINUOUS(ui32PDumpFlags) + && !bInCaptureRange) + { + PVRSRV_ERROR eError; + + /* Only Transitioning into capture range can cause an error */ + eError = PDumpTransition(psClientCCB->psPDumpConnectionData, PDUMP_TRANSITION_EVENT_RANGE_EXITED, ui32PDumpFlags); + PVR_ASSERT(eError == PVRSRV_OK); + } + + if (bPdumpEnabled) + { + if (!BIT_ISSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN)) + { + /* Store offset to last finished CCB command. This offset can + * be needed when appending commands to a non finished CCB. + */ + psClientCCB->ui32FinishedPDumpWriteOffset = psClientCCB->ui32LastPDumpWriteOffset; + } + + /* Update the PDump write offset to show we PDumped this command */ + psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset; + } + +#if defined(NO_HARDWARE) + /* + The firmware is not running, it cannot update these; we do here instead. + */ + psClientCCB->psClientCCBCtrl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset; + psClientCCB->psClientCCBCtrl->ui32DepOffset = psClientCCB->ui32HostWriteOffset; +#endif + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockRelease(psClientCCB->hCCBGrowLock); +#endif +} + +IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB) +{ + return psClientCCB->ui32HostWriteOffset; +} + +IMG_UINT32 RGXGetWrapMaskCCB(RGX_CLIENT_CCB *psClientCCB) +{ + return psClientCCB->ui32Size-1; +} + +PVRSRV_ERROR RGXSetCCBFlags(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32Flags) +{ + if ((ui32Flags & RGX_CONTEXT_FLAG_DISABLESLR)) + { + BIT_SET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED); + } + else + { + BIT_UNSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED); + } + return PVRSRV_OK; +} + +void RGXCmdHelperInitCmdCCB_CommandSize(IMG_UINT64 ui64FBSCEntryMask, + IMG_UINT32 ui32ClientFenceCount, + IMG_UINT32 ui32ClientUpdateCount, + IMG_UINT32 ui32CmdSize, + PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, + PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, + PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) +{ + /* Init the generated data members */ + psCmdHelperData->ui32FBSCInvalCmdSize = 0; + psCmdHelperData->ui64FBSCEntryMask = 0; + psCmdHelperData->ui32FenceCmdSize = 0; + psCmdHelperData->ui32UpdateCmdSize = 0; + psCmdHelperData->ui32PreTimeStampCmdSize = 0; + psCmdHelperData->ui32PostTimeStampCmdSize = 0; + psCmdHelperData->ui32RMWUFOCmdSize = 0; + + /* Total FBSC invalidate command size (header plus command data) */ + + if (ui64FBSCEntryMask != 0) + { + psCmdHelperData->ui32FBSCInvalCmdSize = + RGX_CCB_FWALLOC_ALIGN(sizeof(psCmdHelperData->ui64FBSCEntryMask) + + sizeof(RGXFWIF_CCB_CMD_HEADER)); + psCmdHelperData->ui64FBSCEntryMask = ui64FBSCEntryMask; + } + + /* total DM command size (header plus command data) */ + + psCmdHelperData->ui32DMCmdSize = + RGX_CCB_FWALLOC_ALIGN(ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)); + + if (ui32ClientFenceCount != 0) + { + psCmdHelperData->ui32FenceCmdSize = + RGX_CCB_FWALLOC_ALIGN(ui32ClientFenceCount * sizeof(RGXFWIF_UFO) + + sizeof(RGXFWIF_CCB_CMD_HEADER)); + } + + if (ui32ClientUpdateCount != 0) + { + psCmdHelperData->ui32UpdateCmdSize = + RGX_CCB_FWALLOC_ALIGN(ui32ClientUpdateCount * sizeof(RGXFWIF_UFO) + + sizeof(RGXFWIF_CCB_CMD_HEADER)); + } + + if (ppPreAddr && (ppPreAddr->ui32Addr != 0)) + { + psCmdHelperData->ui32PreTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) + + ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1)); + } + + if (ppPostAddr && (ppPostAddr->ui32Addr != 0)) + { + psCmdHelperData->ui32PostTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) + + ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1)); + } + + if (ppRMWUFOAddr && (ppRMWUFOAddr->ui32Addr != 0)) + { + psCmdHelperData->ui32RMWUFOCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_UFO); + } +} + +/* + Work out how much space this command will require +*/ +void RGXCmdHelperInitCmdCCB_OtherData(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32ClientFenceCount, + PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, + IMG_UINT32 *paui32FenceValue, + IMG_UINT32 ui32ClientUpdateCount, + PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, + IMG_UINT32 *paui32UpdateValue, + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, + PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, + PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, + RGXFWIF_CCB_CMD_TYPE eType, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32PDumpFlags, + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, + IMG_CHAR *pszCommandName, + IMG_BOOL bCCBStateOpen, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) +{ + /* Job reference values */ + psCmdHelperData->ui32ExtJobRef = ui32ExtJobRef; + psCmdHelperData->ui32IntJobRef = ui32IntJobRef; + + /* Save the data we require in the submit call */ + psCmdHelperData->psClientCCB = psClientCCB; +#if defined(PDUMP) + psCmdHelperData->ui32PDumpFlags = ui32PDumpFlags; +#endif + psCmdHelperData->pszCommandName = pszCommandName; + if (bCCBStateOpen) + { + BIT_SET(psCmdHelperData->psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); + } + else + { + BIT_UNSET(psCmdHelperData->psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); + } + + /* Client sync data */ + psCmdHelperData->ui32ClientFenceCount = ui32ClientFenceCount; + psCmdHelperData->pauiFenceUFOAddress = pauiFenceUFOAddress; + psCmdHelperData->paui32FenceValue = paui32FenceValue; + psCmdHelperData->ui32ClientUpdateCount = ui32ClientUpdateCount; + psCmdHelperData->pauiUpdateUFOAddress = pauiUpdateUFOAddress; + psCmdHelperData->paui32UpdateValue = paui32UpdateValue; + + /* Command data */ + psCmdHelperData->ui32CmdSize = ui32CmdSize; + psCmdHelperData->pui8DMCmd = pui8DMCmd; + psCmdHelperData->eType = eType; + + if (ppPreAddr) + { + psCmdHelperData->pPreTimestampAddr = *ppPreAddr; + } + + if (ppPostAddr) + { + psCmdHelperData->pPostTimestampAddr = *ppPostAddr; + } + + if (ppRMWUFOAddr) + { + psCmdHelperData->pRMWUFOAddr = *ppRMWUFOAddr; + } + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, + "%s Command Server Init on FWCtx %08x", pszCommandName, + FWCommonContextGetFWAddress(psClientCCB->psServerCommonContext).ui32Addr); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Workload Data added */ + psCmdHelperData->psWorkEstKickData = psWorkEstKickData; +#endif +} + +/* + Work out how much space this command will require +*/ +void RGXCmdHelperInitCmdCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT64 ui64FBSCEntryMask, + IMG_UINT32 ui32ClientFenceCount, + PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, + IMG_UINT32 *paui32FenceValue, + IMG_UINT32 ui32ClientUpdateCount, + PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, + IMG_UINT32 *paui32UpdateValue, + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, + PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, + PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, + RGXFWIF_CCB_CMD_TYPE eType, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32PDumpFlags, + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, + IMG_CHAR *pszCommandName, + IMG_BOOL bCCBStateOpen, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) +{ + RGXCmdHelperInitCmdCCB_CommandSize(ui64FBSCEntryMask, + ui32ClientFenceCount, + ui32ClientUpdateCount, + ui32CmdSize, + ppPreAddr, + ppPostAddr, + ppRMWUFOAddr, + psCmdHelperData); + + RGXCmdHelperInitCmdCCB_OtherData(psClientCCB, + ui32ClientFenceCount, + pauiFenceUFOAddress, + paui32FenceValue, + ui32ClientUpdateCount, + pauiUpdateUFOAddress, + paui32UpdateValue, + ui32CmdSize, + pui8DMCmd, + ppPreAddr, + ppPostAddr, + ppRMWUFOAddr, + eType, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, + psWorkEstKickData, + pszCommandName, + bCCBStateOpen, + psCmdHelperData); +} + +/* + Reserve space in the CCB and fill in the command and client sync data +*/ +PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData) +{ + const IMG_UINT32 ui32MaxUFOCmdSize = RGX_CCB_FWALLOC_ALIGN((RGXFWIF_CCB_CMD_MAX_UFOS * sizeof(RGXFWIF_UFO)) + + sizeof(RGXFWIF_CCB_CMD_HEADER)); + IMG_UINT32 ui32AllocSize = 0; + IMG_UINT32 i; + void *pvStartPtr; + PVRSRV_ERROR eError; + + /* + Check the number of fences & updates are valid. + */ + for (i = 0; i < ui32CmdCount; i++) + { + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = &asCmdHelperData[i]; + + if (psCmdHelperData->ui32FenceCmdSize > ui32MaxUFOCmdSize || + psCmdHelperData->ui32UpdateCmdSize > ui32MaxUFOCmdSize) + { + return PVRSRV_ERROR_TOO_MANY_SYNCS; + } + } + + /* + Work out how much space we need for all the command(s) + */ + ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData); + +#if defined(PDUMP) + for (i = 0; i < ui32CmdCount; i++) + { + if ((asCmdHelperData[0].ui32PDumpFlags ^ asCmdHelperData[i].ui32PDumpFlags) & PDUMP_FLAGS_CONTINUOUS) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PDump continuous is not consistent (%s != %s) for command %d", + __func__, + PDUMP_IS_CONTINUOUS(asCmdHelperData[0].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE", + PDUMP_IS_CONTINUOUS(asCmdHelperData[i].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE", + ui32CmdCount)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + } +#endif + + /* + Acquire space in the CCB for all the command(s). + */ + eError = RGXAcquireCCB(asCmdHelperData[0].psClientCCB, + ui32AllocSize, + &pvStartPtr, + asCmdHelperData[0].ui32PDumpFlags); + if (unlikely(eError != PVRSRV_OK)) + { + return eError; + } + + /* + For each command fill in the fence, DM, and update command + + */ + for (i = 0; i < ui32CmdCount; i++) + { + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = & asCmdHelperData[i]; + void *pvCmdPtr; +#if defined(PDUMP) + IMG_UINT32 ui32CtxAddr = FWCommonContextGetFWAddress(asCmdHelperData->psClientCCB->psServerCommonContext).ui32Addr; + IMG_UINT32 ui32CcbWoff = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(asCmdHelperData->psClientCCB->psServerCommonContext)); +#endif + + if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0) + { + PDUMPCOMMENT("Start of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes", + psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff); + } + + pvCmdPtr = pvStartPtr; + + /* + Create the fence command. + */ + if (psCmdHelperData->ui32FenceCmdSize) + { + RGXFWIF_CCB_CMD_HEADER *psHeader; + IMG_UINT k, uiNextValueIndex; + + psHeader = pvCmdPtr; + psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_FENCE; + + psHeader->ui32CmdSize = psCmdHelperData->ui32FenceCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); + psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; + psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0; + psHeader->sWorkEstKickData.ui64Deadline = 0; + psHeader->sWorkEstKickData.ui64CyclesPrediction = 0; +#endif + + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + /* Fill in the client fences */ + uiNextValueIndex = 0; + for (k = 0; k < psCmdHelperData->ui32ClientFenceCount; k++) + { + RGXFWIF_UFO *psUFOPtr = pvCmdPtr; + + psUFOPtr->puiAddrUFO = psCmdHelperData->pauiFenceUFOAddress[k]; + + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) + { + psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + } + else + { + /* Only increment uiNextValueIndex for non sync checkpoints + * (as paui32FenceValue only contains values for sync prims) + */ + psUFOPtr->ui32Value = psCmdHelperData->paui32FenceValue[uiNextValueIndex++]; + } + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_UFO)); + +#if defined(SYNC_COMMAND_DEBUG) + PVR_DPF((PVR_DBG_ERROR, "%s client sync fence - 0x%x -> 0x%x", + psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value)); +#endif + PDUMPCOMMENT(".. %s client sync fence - 0x%x -> 0x%x", + psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value); + + + } + } + + /* + Create the FBSC invalidate command. + */ + if (psCmdHelperData->ui32FBSCInvalCmdSize) + { + RGXFWIF_CCB_CMD_HEADER *psHeader; + IMG_UINT64 *pui64FBSCInvalCmdData; + + /* pui8CmdPtr */ + + psHeader = pvCmdPtr; + psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE; + + psHeader->ui32CmdSize = psCmdHelperData->ui32FBSCInvalCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); + psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; + psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0; + psHeader->sWorkEstKickData.ui64Deadline = 0; + psHeader->sWorkEstKickData.ui64CyclesPrediction = 0; +#endif + pui64FBSCInvalCmdData = IMG_OFFSET_ADDR(psHeader, sizeof(RGXFWIF_CCB_CMD_HEADER)); + *pui64FBSCInvalCmdData = psCmdHelperData->ui64FBSCEntryMask; + /* leap over the FBSC invalidate command */ + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32FBSCInvalCmdSize); + + } + + /* + Create the pre DM timestamp commands. Pre and Post timestamp commands are supposed to + sandwich the DM cmd. The padding code with the CCB wrap upsets the FW if we don't have + the task type bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types. + */ + if (psCmdHelperData->ui32PreTimeStampCmdSize != 0) + { + RGXWriteTimestampCommand(&pvCmdPtr, + RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP, + psCmdHelperData->pPreTimestampAddr); + } + + /* + Create the DM command + */ + if (psCmdHelperData->ui32DMCmdSize) + { + RGXFWIF_CCB_CMD_HEADER *psHeader; + + psHeader = pvCmdPtr; + psHeader->eCmdType = psCmdHelperData->eType; + + psHeader->ui32CmdSize = psCmdHelperData->ui32DMCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); + psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; + psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + if (psCmdHelperData->psWorkEstKickData != NULL && + psCmdHelperData->eType != RGXFWIF_CCB_CMD_TYPE_NULL) + { + PVR_ASSERT(psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_GEOM || + psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_3D || + psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_CDM || + psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_TQ_TDM); + psHeader->sWorkEstKickData = *psCmdHelperData->psWorkEstKickData; + } + else + { + psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0; + psHeader->sWorkEstKickData.ui64Deadline = 0; + psHeader->sWorkEstKickData.ui64CyclesPrediction = 0; + } +#endif + + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + /* The buffer is write-combine, so no special device memory treatment required. */ + OSCachedMemCopy(pvCmdPtr, psCmdHelperData->pui8DMCmd, psCmdHelperData->ui32CmdSize); + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32CmdSize); + } + + + if (psCmdHelperData->ui32PostTimeStampCmdSize != 0) + { + RGXWriteTimestampCommand(&pvCmdPtr, + RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP, + psCmdHelperData->pPostTimestampAddr); + } + + + if (psCmdHelperData->ui32RMWUFOCmdSize != 0) + { + RGXFWIF_CCB_CMD_HEADER * psHeader; + RGXFWIF_UFO * psUFO; + + psHeader = (RGXFWIF_CCB_CMD_HEADER *) pvCmdPtr; + psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE; + psHeader->ui32CmdSize = psCmdHelperData->ui32RMWUFOCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); + psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; + psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0; + psHeader->sWorkEstKickData.ui64Deadline = 0; + psHeader->sWorkEstKickData.ui64CyclesPrediction = 0; +#endif + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + psUFO = (RGXFWIF_UFO *) pvCmdPtr; + psUFO->puiAddrUFO = psCmdHelperData->pRMWUFOAddr; + + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_UFO)); + } + + /* + Create the update command. + */ + if (psCmdHelperData->ui32UpdateCmdSize) + { + RGXFWIF_CCB_CMD_HEADER *psHeader; + IMG_UINT k, uiNextValueIndex; + + psHeader = pvCmdPtr; + psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UPDATE; + psHeader->ui32CmdSize = psCmdHelperData->ui32UpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); + psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; + psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0; + psHeader->sWorkEstKickData.ui64Deadline = 0; + psHeader->sWorkEstKickData.ui64CyclesPrediction = 0; +#endif + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + /* Fill in the client updates */ + uiNextValueIndex = 0; + for (k = 0; k < psCmdHelperData->ui32ClientUpdateCount; k++) + { + RGXFWIF_UFO *psUFOPtr = pvCmdPtr; + + psUFOPtr->puiAddrUFO = psCmdHelperData->pauiUpdateUFOAddress[k]; + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) + { + psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + } + else + { + /* Only increment uiNextValueIndex for non sync checkpoints + * (as paui32UpdateValue only contains values for sync prims) + */ + psUFOPtr->ui32Value = psCmdHelperData->paui32UpdateValue[uiNextValueIndex++]; + } + pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_UFO)); + +#if defined(SYNC_COMMAND_DEBUG) + PVR_DPF((PVR_DBG_ERROR, "%s client sync update - 0x%x -> 0x%x", + psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value)); +#endif + PDUMPCOMMENT(".. %s client sync update - 0x%x -> 0x%x", + psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value); + + } + } + + /* Set the start pointer for the next iteration around the loop */ + pvStartPtr = IMG_OFFSET_ADDR(pvStartPtr, + psCmdHelperData->ui32FenceCmdSize + + psCmdHelperData->ui32FBSCInvalCmdSize + + psCmdHelperData->ui32PreTimeStampCmdSize + + psCmdHelperData->ui32DMCmdSize + + psCmdHelperData->ui32PostTimeStampCmdSize + + psCmdHelperData->ui32RMWUFOCmdSize + + psCmdHelperData->ui32UpdateCmdSize ); + + if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0) + { + PDUMPCOMMENT("End of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes", + psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff); + } + else + { + PDUMPCOMMENT("No %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes", + psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff); + } + } + + return PVRSRV_OK; +} + +/* + Fill in the server syncs data and release the CCB space +*/ +void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, + const IMG_CHAR *pcszDMName, + IMG_UINT32 ui32CtxAddr) +{ + IMG_UINT32 ui32AllocSize = 0; + IMG_UINT32 i; +#if defined(__linux__) + IMG_BOOL bTraceChecks = trace_rogue_are_fence_checks_traced(); + IMG_BOOL bTraceUpdates = trace_rogue_are_fence_updates_traced(); +#endif + + /* + Work out how much space we need for all the command(s) + */ + ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData); + /* + For each command fill in the server sync info + */ + for (i=0;ipszCommandName, + pcszDMName, + ui32CtxAddr, + psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize, + psCmdHelperData->ui32ClientFenceCount, + psCmdHelperData->pauiFenceUFOAddress, + psCmdHelperData->paui32FenceValue); + } + if (bTraceUpdates) + { + trace_rogue_fence_updates(psCmdHelperData->pszCommandName, + pcszDMName, + ui32CtxAddr, + psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize, + psCmdHelperData->ui32ClientUpdateCount, + psCmdHelperData->pauiUpdateUFOAddress, + psCmdHelperData->paui32UpdateValue); + } +#endif + + /* + All the commands have been filled in so release the CCB space. + The FW still won't run this command until we kick it + */ + PDUMPCOMMENTWITHFLAGS(psCmdHelperData->ui32PDumpFlags, + "%s Command Server Release on FWCtx %08x", + psCmdHelperData->pszCommandName, ui32CtxAddr); + } + + RGXReleaseCCB(asCmdHelperData[0].psClientCCB, + ui32AllocSize, + asCmdHelperData[0].ui32PDumpFlags); + + BIT_UNSET(asCmdHelperData[0].psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); +} + +IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData) +{ + IMG_UINT32 ui32AllocSize = 0; + IMG_UINT32 i; + + /* + Work out how much space we need for all the command(s) + */ + for (i = 0; i < ui32CmdCount; i++) + { + ui32AllocSize += + asCmdHelperData[i].ui32FenceCmdSize + + asCmdHelperData[i].ui32FBSCInvalCmdSize + + asCmdHelperData[i].ui32DMCmdSize + + asCmdHelperData[i].ui32UpdateCmdSize + + asCmdHelperData[i].ui32PreTimeStampCmdSize + + asCmdHelperData[i].ui32PostTimeStampCmdSize + + asCmdHelperData[i].ui32RMWUFOCmdSize; + } + + return ui32AllocSize; +} + +/* Work out how much of an offset there is to a specific command. */ +IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, + IMG_UINT32 ui32Cmdindex) +{ + IMG_UINT32 ui32Offset = 0; + IMG_UINT32 i; + + for (i = 0; i < ui32Cmdindex; i++) + { + ui32Offset += + asCmdHelperData[i].ui32FenceCmdSize + + asCmdHelperData[i].ui32FBSCInvalCmdSize + + asCmdHelperData[i].ui32DMCmdSize + + asCmdHelperData[i].ui32UpdateCmdSize + + asCmdHelperData[i].ui32PreTimeStampCmdSize + + asCmdHelperData[i].ui32PostTimeStampCmdSize + + asCmdHelperData[i].ui32RMWUFOCmdSize; + } + + return ui32Offset; +} + +/* Returns the offset of the data master command from a write offset */ +IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) +{ + return psCmdHelperData->ui32FenceCmdSize + + psCmdHelperData->ui32PreTimeStampCmdSize + + psCmdHelperData->ui32FBSCInvalCmdSize; +} + +static const char *_CCBCmdTypename(RGXFWIF_CCB_CMD_TYPE cmdType) +{ + switch (cmdType) + { + case RGXFWIF_CCB_CMD_TYPE_GEOM: return "TA"; + case RGXFWIF_CCB_CMD_TYPE_3D: return "3D"; + case RGXFWIF_CCB_CMD_TYPE_3D_PR: return "3D_PR"; + case RGXFWIF_CCB_CMD_TYPE_CDM: return "CDM"; + case RGXFWIF_CCB_CMD_TYPE_TQ_3D: return "TQ_3D"; + case RGXFWIF_CCB_CMD_TYPE_TQ_2D: return "TQ_2D"; + case RGXFWIF_CCB_CMD_TYPE_TQ_TDM: return "TQ_TDM"; + case RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE: return "FBSC_INVALIDATE"; + case RGXFWIF_CCB_CMD_TYPE_NULL: return "NULL"; + case RGXFWIF_CCB_CMD_TYPE_FENCE: return "FENCE"; + case RGXFWIF_CCB_CMD_TYPE_UPDATE: return "UPDATE"; + case RGXFWIF_CCB_CMD_TYPE_FENCE_PR: return "FENCE_PR"; + case RGXFWIF_CCB_CMD_TYPE_PRIORITY: return "PRIORITY"; + case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE: return "UNFENCED_UPDATE"; + case RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP: return "PRE_TIMESTAMP"; + case RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE: return "RMW_UPDATE"; + case RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP: return "POST_TIMESTAMP"; + case RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE: return "UNFENCED_RMW_UPDATE"; + case RGXFWIF_CCB_CMD_TYPE_PADDING: return "PADDING"; + + default: + PVR_ASSERT(IMG_FALSE); + break; + } + + return "INVALID"; +} + +PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM) +{ + volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; + IMG_UINT32 ui32SampledRdOff, ui32SampledDpOff, ui32SampledWrOff, ui32WrapMask; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psCurrentClientCCB == NULL) + { + PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB is NULL")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + /* If CCB grow is enabled, take the lock while sampling offsets + * (to guard against a grow happening mid-sample) + */ + OSLockAcquire(psCurrentClientCCB->hCCBGrowLock); +#endif + /* NB. use psCurrentClientCCB->ui32Size as basis for wrap mask (rather than psClientCCBCtrl->ui32WrapMask) + * as if CCB grow happens, psCurrentClientCCB->ui32Size will have been updated but + * psClientCCBCtrl->ui32WrapMask is only updated once the firmware sees the CCB has grown. + * If we use the wrong value, we might incorrectly determine that the offsets are invalid. + */ + ui32WrapMask = RGXGetWrapMaskCCB(psCurrentClientCCB); + psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; + ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset; + ui32SampledDpOff = psClientCCBCtrl->ui32DepOffset; + ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset; +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockRelease(psCurrentClientCCB->hCCBGrowLock); +#endif + + if (ui32SampledRdOff > ui32WrapMask || + ui32SampledDpOff > ui32WrapMask || + ui32SampledWrOff > ui32WrapMask) + { + PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB has invalid offset (ROFF=%d DOFF=%d WOFF=%d)", + ui32SampledRdOff, ui32SampledDpOff, ui32SampledWrOff)); + return PVRSRV_ERROR_INVALID_OFFSET; + } + + if (ui32SampledRdOff != ui32SampledWrOff && + psCurrentClientCCB->ui32LastROff != psCurrentClientCCB->ui32LastWOff && + ui32SampledRdOff == psCurrentClientCCB->ui32LastROff && + (psCurrentClientCCB->ui32ByteCount - psCurrentClientCCB->ui32LastByteCount) < psCurrentClientCCB->ui32Size) + { + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDevNode->pvDevice; + + /* Only log a stalled CCB if GPU is idle (any state other than POW_ON is considered idle). + * Guest drivers do not initialize psRGXFWIfFwSysData, so they assume FW internal state is ON. */ + if (((psDevInfo->psRGXFWIfFwSysData == NULL) || (psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_ON)) && + (psDevInfo->ui32SLRHoldoffCounter == 0)) + { + static __maybe_unused const char *pszStalledAction = +#if defined(PVRSRV_STALLED_CCB_ACTION) + "force"; +#else + "warn"; +#endif + /* Don't log this by default unless debugging since a higher up + * function will log the stalled condition. Helps avoid double + * messages in the log. + */ + PVR_DPF((PVR_DBG_ERROR, "%s (%s): CCCB has not progressed (ROFF=%d DOFF=%d WOFF=%d) for \"%s\"", + __func__, pszStalledAction, ui32SampledRdOff, + ui32SampledDpOff, ui32SampledWrOff, + psCurrentClientCCB->szName)); + eError = PVRSRV_ERROR_CCCB_STALLED; + + { + void *pvClientCCBBuff = psCurrentClientCCB->pvClientCCB; + RGXFWIF_CCB_CMD_HEADER *psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff); + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCurrentClientCCB->psServerCommonContext); + + /* Special case - if readOffset is on a PADDING packet, CCB has wrapped. + * In this case, skip over the PADDING packet. + */ + if (psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_PADDING) + { + psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, + ((ui32SampledRdOff + + psCommandHeader->ui32CmdSize + + sizeof(RGXFWIF_CCB_CMD_HEADER)) + & psCurrentClientCCB->psClientCCBCtrl->ui32WrapMask)); + } + + /* Only try to recover a 'stalled' context (ie one waiting on a fence), as some work (eg compute) could + * take a long time to complete, during which time the CCB ptrs would not advance. + */ + if (((psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE) || + (psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) && + (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff))) + { + /* Acquire the cCCB recovery lock */ + OSLockAcquire(psDevInfo->hCCBRecoveryLock); + + if (!psDevInfo->pvEarliestStalledClientCCB) + { + psDevInfo->pvEarliestStalledClientCCB = (void*)psCurrentClientCCB; + psDevInfo->ui32OldestSubmissionOrdinal = psCommandHeader->ui32IntJobRef; + } + else + { + /* Check if this fence cmd header has an older submission stamp than the one we are currently considering unblocking + * (account for submission stamp wrap by checking diff is less than 0x80000000) - if it is older, then this becomes + * our preferred fence to be unblocked/ + */ + if ((psCommandHeader->ui32IntJobRef < psDevInfo->ui32OldestSubmissionOrdinal) && + ((psDevInfo->ui32OldestSubmissionOrdinal - psCommandHeader->ui32IntJobRef) < 0x8000000)) + { + psDevInfo->pvEarliestStalledClientCCB = (void*)psCurrentClientCCB; + psDevInfo->ui32OldestSubmissionOrdinal = psCommandHeader->ui32IntJobRef; + } + } + + /* Release the cCCB recovery lock */ + OSLockRelease(psDevInfo->hCCBRecoveryLock); + } + } + } + } + + psCurrentClientCCB->ui32LastROff = ui32SampledRdOff; + psCurrentClientCCB->ui32LastWOff = ui32SampledWrOff; + psCurrentClientCCB->ui32LastByteCount = psCurrentClientCCB->ui32ByteCount; + + return eError; +} + +void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, + RGX_CLIENT_CCB *psCurrentClientCCB, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; + void *pvClientCCBBuff; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32DepOffset; + IMG_UINT32 ui32EndOffset; + IMG_UINT32 ui32WrapMask; + IMG_CHAR * pszState = "Ready"; + + /* Ensure hCCBGrowLock is acquired before reading + * psCurrentClientCCB->pvClientCCB as a CCB grow + * could remap the virtual addresses. + */ +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockAcquire(psCurrentClientCCB->hCCBGrowLock); +#endif + psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; + pvClientCCBBuff = psCurrentClientCCB->pvClientCCB; + ui32EndOffset = psCurrentClientCCB->ui32HostWriteOffset; + OSMemoryBarrier(); + ui32Offset = psClientCCBCtrl->ui32ReadOffset; + ui32DepOffset = psClientCCBCtrl->ui32DepOffset; + /* NB. Use psCurrentClientCCB->ui32Size as basis for wrap mask (rather + * than psClientCCBCtrl->ui32WrapMask) as if CCB grow happened, + * psCurrentClientCCB->ui32Size will have been updated but + * psClientCCBCtrl->ui32WrapMask is only updated once the firmware + * sees the CCB has grown. If we use the wrong value, ui32NextOffset + * can end up being wrapped prematurely and pointing to garbage. + */ + ui32WrapMask = RGXGetWrapMaskCCB(psCurrentClientCCB); + + PVR_DUMPDEBUG_LOG("FWCtx 0x%08X (%s)", sFWCommonContext.ui32Addr, psCurrentClientCCB->szName); + if (ui32Offset == ui32EndOffset) + { + PVR_DUMPDEBUG_LOG(" `--"); + } + + while (ui32Offset != ui32EndOffset) + { + RGXFWIF_CCB_CMD_HEADER *psCmdHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32Offset); + IMG_UINT32 ui32NextOffset = (ui32Offset + psCmdHeader->ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)) & ui32WrapMask; + IMG_BOOL bLastCommand = (ui32NextOffset == ui32EndOffset)? IMG_TRUE: IMG_FALSE; + IMG_BOOL bLastUFO; + #define CCB_SYNC_INFO_LEN 80 + IMG_CHAR pszSyncInfo[CCB_SYNC_INFO_LEN]; + IMG_UINT32 ui32NoOfUpdates, i; + RGXFWIF_UFO *psUFOPtr; + + ui32NoOfUpdates = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); + psUFOPtr = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32Offset + sizeof(RGXFWIF_CCB_CMD_HEADER)); + pszSyncInfo[0] = '\0'; + + if (ui32Offset == ui32DepOffset) + { + pszState = "Waiting"; + } + + PVR_DUMPDEBUG_LOG(" %s--%s %s @ %u Int=%u Ext=%u", + bLastCommand? "`": "|", + pszState, _CCBCmdTypename(psCmdHeader->eCmdType), + ui32Offset, psCmdHeader->ui32IntJobRef, psCmdHeader->ui32ExtJobRef + ); + + /* switch on type and write checks and updates */ + switch (psCmdHeader->eCmdType) + { + case RGXFWIF_CCB_CMD_TYPE_UPDATE: + case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE: + case RGXFWIF_CCB_CMD_TYPE_FENCE: + case RGXFWIF_CCB_CMD_TYPE_FENCE_PR: + { + for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++) + { + bLastUFO = (ui32NoOfUpdates-1 == i)? IMG_TRUE: IMG_FALSE; + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) + { + SyncCheckpointRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr, + pszSyncInfo, CCB_SYNC_INFO_LEN); + } + else + { + SyncRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr, + pszSyncInfo, CCB_SYNC_INFO_LEN); + } + } + + PVR_DUMPDEBUG_LOG(" %s %s--Addr:0x%08x Val=0x%08x %s", + bLastCommand? " ": "|", + bLastUFO? "`": "|", + psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value, + pszSyncInfo + ); + } + break; + } + case RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE: + case RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE: + { + for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++) + { + bLastUFO = (ui32NoOfUpdates-1 == i)? IMG_TRUE: IMG_FALSE; + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) + { + SyncCheckpointRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr, + pszSyncInfo, CCB_SYNC_INFO_LEN); + } + else + { + SyncRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr, + pszSyncInfo, CCB_SYNC_INFO_LEN); + } + } + + PVR_DUMPDEBUG_LOG(" %s %s--Addr:0x%08x Val++ %s", + bLastCommand? " ": "|", + bLastUFO? "`": "|", + psUFOPtr->puiAddrUFO.ui32Addr, + pszSyncInfo + ); + } + break; + } + default: + break; + } + ui32Offset = ui32NextOffset; + } + +#if defined(PVRSRV_ENABLE_CCCB_GROW) + OSLockRelease(psCurrentClientCCB->hCCBGrowLock); +#endif +} + +void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, + RGX_CLIENT_CCB *psCurrentClientCCB, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; + void *pvClientCCBBuff = psCurrentClientCCB->pvClientCCB; + volatile void *pvPtr; + IMG_UINT32 ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset; + IMG_UINT32 ui32SampledDepOff = psClientCCBCtrl->ui32DepOffset; + IMG_UINT32 ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset; + + pvPtr = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff); + + if ((ui32SampledRdOff == ui32SampledDepOff) && + (ui32SampledRdOff != ui32SampledWrOff)) + { + volatile RGXFWIF_CCB_CMD_HEADER *psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff); + RGXFWIF_CCB_CMD_TYPE eCommandType = psCommandHeader->eCmdType; + volatile void *pvPtr = psCommandHeader; + + /* CCB is stalled on a fence... */ + if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) + { +#if defined(SUPPORT_EXTRA_METASP_DEBUG) + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCurrentClientCCB->psServerCommonContext); + IMG_UINT32 ui32Val; +#endif + RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader)); + IMG_UINT32 jj; + + /* Display details of the fence object on which the context is pending */ + PVR_DUMPDEBUG_LOG("FWCtx 0x%08X @ %d (%s) pending on %s:", + sFWCommonContext.ui32Addr, + ui32SampledRdOff, + psCurrentClientCCB->szName, + _CCBCmdTypename(eCommandType)); + for (jj=0; jjui32CmdSize/sizeof(RGXFWIF_UFO); jj++) + { +#if !defined(SUPPORT_EXTRA_METASP_DEBUG) + PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value); +#else + ui32Val = 0; + RGXReadWithSP(psDevInfo, psUFOPtr[jj].puiAddrUFO.ui32Addr, &ui32Val); + PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x", + psUFOPtr[jj].puiAddrUFO.ui32Addr, + psUFOPtr[jj].ui32Value, ui32Val); +#endif + } + + /* Advance psCommandHeader past the FENCE to the next command header (this will be the TA/3D command that is fenced) */ + pvPtr = IMG_OFFSET_ADDR(psUFOPtr, psCommandHeader->ui32CmdSize); + psCommandHeader = pvPtr; + if (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff)) + { + PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X fenced command is of type %s",sFWCommonContext.ui32Addr, _CCBCmdTypename(psCommandHeader->eCmdType)); + /* Advance psCommandHeader past the TA/3D to the next command header (this will possibly be an UPDATE) */ + pvPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader) + psCommandHeader->ui32CmdSize); + psCommandHeader = pvPtr; + /* If the next command is an update, display details of that so we can see what would then become unblocked */ + if (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff)) + { + eCommandType = psCommandHeader->eCmdType; + + if (eCommandType == RGXFWIF_CCB_CMD_TYPE_UPDATE) + { + psUFOPtr = IMG_OFFSET_ADDR(psCommandHeader, sizeof(*psCommandHeader)); + PVR_DUMPDEBUG_LOG(" preventing %s:",_CCBCmdTypename(eCommandType)); + for (jj=0; jjui32CmdSize/sizeof(RGXFWIF_UFO); jj++) + { +#if !defined(SUPPORT_EXTRA_METASP_DEBUG) + PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value); +#else + ui32Val = 0; + RGXReadWithSP(psDevInfo, psUFOPtr[jj].puiAddrUFO.ui32Addr, &ui32Val); + PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x", + psUFOPtr[jj].puiAddrUFO.ui32Addr, + psUFOPtr[jj].ui32Value, + ui32Val); +#endif + } + } + } + else + { + PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr); + } + } + else + { + PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr); + } + } + } +} + +void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGX_CLIENT_CCB *psStalledClientCCB; + + PVR_ASSERT(psDevInfo); + + psStalledClientCCB = (RGX_CLIENT_CCB *)psDevInfo->pvEarliestStalledClientCCB; + + if (psStalledClientCCB) + { + volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psStalledClientCCB->psClientCCBCtrl; + IMG_UINT32 ui32SampledDepOffset = psClientCCBCtrl->ui32DepOffset; + void *pvPtr = IMG_OFFSET_ADDR(psStalledClientCCB->pvClientCCB, ui32SampledDepOffset); + RGXFWIF_CCB_CMD_HEADER *psCommandHeader = pvPtr; + RGXFWIF_CCB_CMD_TYPE eCommandType = psCommandHeader->eCmdType; + + if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) + { + RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader)); + IMG_UINT32 jj; + IMG_UINT32 ui32NumUnsignalledUFOs = 0; + IMG_UINT32 ui32UnsignalledUFOVaddrs[PVRSRV_MAX_SYNCS]; + +#if defined(PVRSRV_STALLED_CCB_ACTION) + if (!psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.aszCCBName[0]) + { + OSClockMonotonicns64(&psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui64Timestamp); + psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui32NumUFOs = (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)); + psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui32FWCtxAddr = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr; + OSStringLCopy(psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.aszCCBName, + psStalledClientCCB->szName, + MAX_CLIENT_CCB_NAME); + } + else + { + OSClockMonotonicns64(&psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui64Timestamp); + psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui32NumUFOs = (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)); + psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui32FWCtxAddr = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr; + OSStringLCopy(psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].aszCCBName, + psStalledClientCCB->szName, + MAX_CLIENT_CCB_NAME); + psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp = (psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp + 1) % PVR_SLR_LOG_ENTRIES; + } + psDevInfo->psRGXFWIfFwOsData->ui32ForcedUpdatesRequested++; +#endif + PVR_LOG(("Fence found on context 0x%x '%s' @ %d has %d UFOs", + FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr, + psStalledClientCCB->szName, ui32SampledDepOffset, + (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)))); + + for (jj=0; jjui32CmdSize/sizeof(RGXFWIF_UFO); jj++) + { + if (PVRSRV_UFO_IS_SYNC_CHECKPOINT((RGXFWIF_UFO *)&psUFOPtr[jj])) + { + IMG_UINT32 ui32ReadValue = SyncCheckpointStateFromUFO(psDevInfo->psDeviceNode, + psUFOPtr[jj].puiAddrUFO.ui32Addr); + PVR_LOG((" %d/%d FWAddr 0x%x requires 0x%x (currently 0x%x)", jj+1, + (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)), + psUFOPtr[jj].puiAddrUFO.ui32Addr, + psUFOPtr[jj].ui32Value, + ui32ReadValue)); + /* If fence is unmet, dump debug info on it */ + if (ui32ReadValue != psUFOPtr[jj].ui32Value) + { + /* Add to our list to pass to pvr_sync */ + ui32UnsignalledUFOVaddrs[ui32NumUnsignalledUFOs] = psUFOPtr[jj].puiAddrUFO.ui32Addr; + ui32NumUnsignalledUFOs++; + } + } + else + { + PVR_LOG((" %d/%d FWAddr 0x%x requires 0x%x", jj+1, + (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)), + psUFOPtr[jj].puiAddrUFO.ui32Addr, + psUFOPtr[jj].ui32Value)); + } + } +#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) + if (ui32NumUnsignalledUFOs > 0) + { + IMG_UINT32 ui32NumSyncsOwned; + PVRSRV_ERROR eErr = SyncCheckpointDumpInfoOnStalledUFOs(ui32NumUnsignalledUFOs, &ui32UnsignalledUFOVaddrs[0], &ui32NumSyncsOwned); + + PVR_LOG_IF_ERROR(eErr, "SyncCheckpointDumpInfoOnStalledUFOs() call failed."); + } +#endif +#if defined(PVRSRV_STALLED_CCB_ACTION) + if (BIT_ISSET(psStalledClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED)) + { + PRGXFWIF_FWCOMMONCONTEXT psContext = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext); + + PVR_LOG(("SLR disabled for FWCtx 0x%08X", psContext.ui32Addr)); + } + else + { + if (ui32NumUnsignalledUFOs > 0) + { + RGXFWIF_KCCB_CMD sSignalFencesCmd; + + sSignalFencesCmd.eCmdType = RGXFWIF_KCCB_CMD_FORCE_UPDATE; + sSignalFencesCmd.ui32KCCBFlags = 0; + sSignalFencesCmd.uCmdData.sForceUpdateData.psContext = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext); + sSignalFencesCmd.uCmdData.sForceUpdateData.ui32CCBFenceOffset = ui32SampledDepOffset; + + PVR_LOG(("Forced update command issued for FWCtx 0x%08X", sSignalFencesCmd.uCmdData.sForceUpdateData.psContext.ui32Addr)); + + RGXScheduleCommand(FWCommonContextGetRGXDevInfo(psStalledClientCCB->psServerCommonContext), + RGXFWIF_DM_GP, + &sSignalFencesCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + } + } +#endif + } + psDevInfo->pvEarliestStalledClientCCB = NULL; + } +} + +/****************************************************************************** + End of file (rgxccb.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxccb.h b/drivers/gpu/drm/phytium/octopus/rgxccb.h new file mode 100644 index 000000000000..ae88d8697f5e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxccb.h @@ -0,0 +1,347 @@ +/*************************************************************************/ /*! +@File +@Title RGX Circular Command Buffer functionality. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the RGX Circular Command Buffer functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXCCB_H) +#define RGXCCB_H + +#include "devicemem.h" +#include "device.h" +#include "rgxdevice.h" +#include "sync_server.h" +#include "connection_server.h" +#include "rgxdebug.h" +#include "rgxdefs_km.h" +#include "pvr_notifier.h" + +#define MAX_CLIENT_CCB_NAME 30 +#define SYNC_FLAG_MASK_ALL IMG_UINT32_MAX + +/* + * This size is to be used when a client CCB is found to consume very + * negligible space (e.g. a few hundred bytes to few KBs - less than a page). + * In such a case, instead of allocating CCB of size of only a few KBs, we + * allocate at-least this much to be future risk-free. + */ +#define MIN_SAFE_CCB_SIZE_LOG2 13 /* 8K (2 Pages) */ +#define MAX_SAFE_CCB_SIZE_LOG2 18 /* 256K (64 Pages) */ + +#define RGX_TQ3D_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D +static_assert(RGX_TQ3D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_TQ3D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ3D CCB size is invalid"); +#define RGX_TQ3D_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ3D +static_assert(RGX_TQ3D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D + && RGX_TQ3D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ3D max CCB size is invalid"); + +#define RGX_TQ2D_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D +static_assert(RGX_TQ2D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_TQ2D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ2D CCB size is invalid"); +#define RGX_TQ2D_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ2D +static_assert(RGX_TQ2D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D && + RGX_TQ2D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ2D max CCB size is invalid"); + +#define RGX_CDM_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM +static_assert(RGX_CDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_CDM_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "CDM CCB size is invalid"); +#define RGX_CDM_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM +static_assert(RGX_CDM_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM && + RGX_CDM_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "CDM max CCB size is invalid"); + +#define RGX_TA_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA +static_assert(RGX_TA_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_TA_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TA CCB size is invalid"); +#define RGX_TA_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TA +static_assert(RGX_TA_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA && + RGX_TA_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TA max CCB size is invalid"); + +#define RGX_3D_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D +static_assert(RGX_3D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_3D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "3D CCB size is invalid"); +#define RGX_3D_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_3D +static_assert(RGX_3D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D && + RGX_3D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "3D max CCB size is invalid"); + +#define RGX_KICKSYNC_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC +static_assert(RGX_KICKSYNC_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_KICKSYNC_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "KickSync CCB size is invalid"); +#define RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_KICKSYNC +static_assert(RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC && + RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "KickSync max CCB size is invalid"); + +#define RGX_TDM_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM +static_assert(RGX_TDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && + RGX_TDM_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TDM CCB size is invalid"); +#define RGX_TDM_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TDM +static_assert(RGX_TDM_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM && + RGX_TDM_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TDM max CCB size is invalid"); + +typedef struct _RGX_CLIENT_CCB_ RGX_CLIENT_CCB; + +/* + This structure is declared here as it's allocated on the heap by + the callers +*/ + +typedef struct _RGX_CCB_CMD_HELPER_DATA_ { + /* Data setup at command init time */ + RGX_CLIENT_CCB *psClientCCB; + IMG_CHAR *pszCommandName; + IMG_UINT32 ui32PDumpFlags; + + IMG_UINT32 ui32ClientFenceCount; + PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress; + IMG_UINT32 *paui32FenceValue; + IMG_UINT32 ui32ClientUpdateCount; + PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress; + IMG_UINT32 *paui32UpdateValue; + RGXFWIF_CCB_CMD_TYPE eType; + IMG_UINT32 ui32CmdSize; + IMG_UINT8 *pui8DMCmd; + IMG_UINT32 ui32FenceCmdSize; + IMG_UINT32 ui32FBSCInvalCmdSize; + IMG_UINT32 ui32DMCmdSize; + IMG_UINT32 ui32UpdateCmdSize; + + /* data for FBSC invalidate command */ + IMG_UINT64 ui64FBSCEntryMask; + + /* timestamp commands */ + PRGXFWIF_TIMESTAMP_ADDR pPreTimestampAddr; + IMG_UINT32 ui32PreTimeStampCmdSize; + PRGXFWIF_TIMESTAMP_ADDR pPostTimestampAddr; + IMG_UINT32 ui32PostTimeStampCmdSize; + PRGXFWIF_UFO_ADDR pRMWUFOAddr; + IMG_UINT32 ui32RMWUFOCmdSize; + + /* Job reference fields */ + IMG_UINT32 ui32ExtJobRef; + IMG_UINT32 ui32IntJobRef; + + /* FW Memdesc for Workload information */ + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData; + +} RGX_CCB_CMD_HELPER_DATA; + +#define PADDING_COMMAND_SIZE (sizeof(RGXFWIF_CCB_CMD_HEADER)) + + +#define RGX_CCB_REQUESTORS(TYPE) \ + /* for debugging purposes */ TYPE(UNDEF) \ + TYPE(TA) \ + TYPE(3D) \ + TYPE(CDM) \ + TYPE(SH) \ + TYPE(RS) \ + TYPE(TQ_3D) \ + TYPE(TQ_2D) \ + TYPE(TQ_TDM) \ + TYPE(KICKSYNC) \ + TYPE(RAY) \ + +/* Forms an enum constant for each type present in RGX_CCB_REQUESTORS list. The enum is mainly used as + an index to the aszCCBRequestors table defined in rgxccb.c. The total number of enums must adhere + to the following build assert. +*/ +typedef enum _RGX_CCB_REQUESTOR_TYPE_ +{ +#define CONSTRUCT_ENUM(req) REQ_TYPE_##req, + RGX_CCB_REQUESTORS (CONSTRUCT_ENUM) +#undef CONSTRUCT_ENUM + + /* should always be at the end */ + REQ_TYPE_TOTAL_COUNT, +} RGX_CCB_REQUESTOR_TYPE; + +/* Tuple describing the columns of the following table */ +typedef enum _RGX_CCB_REQUESTOR_TUPLE_ +{ + REQ_RGX_FW_CLIENT_CCB_STRING, /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCB for this requestor */ + REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING, /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCBControl for this requestor */ + REQ_PDUMP_COMMENT, /* Index to comment to be dumped in PDUMPs */ + + /* should always be at the end */ + REQ_TUPLE_CARDINALITY, +} RGX_CCB_REQUESTOR_TUPLE; + +/* Unpack U8 values from U32. */ +#define U32toU8_Unpack1(U32Packed) (U32Packed & 0xFF) +#define U32toU8_Unpack2(U32Packed) ((U32Packed>>8) & 0xFF) +#define U32toU8_Unpack3(U32Packed) ((U32Packed>>16) & 0xFF) +#define U32toU8_Unpack4(U32Packed) ((U32Packed>>24) & 0xFF) + +/* Defines for bit meanings within the ui32CCBFlags member of struct _RGX_CLIENT_CCB_ + * + * ( X = taken/in use, - = available/unused ) + * + * 31 10 + * | || + * ------------------------------XX + * Bit Meaning + * 0 = If set, CCB is still open and commands will be appended to it + * 1 = If set, do not perform Sync Lockup Recovery (SLR) for this CCB + */ +#define CCB_FLAGS_CCB_STATE_OPEN (0) /*!< This bit is set to indicate CCB is in the 'Open' state. */ +#define CCB_FLAGS_SLR_DISABLED (1) /*!< This bit is set to disable Sync Lockup Recovery (SLR) for this CCB. */ + + +/* Table containing an array of strings for each requestor type in the list of RGX_CCB_REQUESTORS. In addition to its use in + this module (rgxccb.c), this table is also used to access string to be dumped in PDUMP comments, hence, marking it extern for + use in other modules. +*/ +extern const IMG_CHAR *const aszCCBRequestors[][REQ_TUPLE_CARDINALITY]; + +PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32CCBSizeLog2, + IMG_UINT32 ui32CCBMaxSizeLog2, + IMG_UINT32 ui32ContextFlags, + CONNECTION_DATA *psConnectionData, + RGX_CCB_REQUESTOR_TYPE eCCBRequestor, + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + RGX_CLIENT_CCB **ppsClientCCB, + DEVMEM_MEMDESC **ppsClientCCBMemDesc, + DEVMEM_MEMDESC **ppsClientCCBCtlMemDesc); + +void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB); + +PVRSRV_ERROR RGXCheckSpaceCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32CmdSize); + +PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32CmdSize, + void **ppvBufferSpace, + IMG_UINT32 ui32PDumpFlags); + +void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32CmdSize, + IMG_UINT32 ui32PDumpFlags); + +IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB); +IMG_UINT32 RGXGetWrapMaskCCB(RGX_CLIENT_CCB *psClientCCB); + +PVRSRV_ERROR RGXSetCCBFlags(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32Flags); + +void RGXCmdHelperInitCmdCCB_CommandSize(IMG_UINT64 ui64FBSCEntryMask, + IMG_UINT32 ui32ClientFenceCount, + IMG_UINT32 ui32ClientUpdateCount, + IMG_UINT32 ui32CmdSize, + PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, + PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, + PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); + +void RGXCmdHelperInitCmdCCB_OtherData(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT32 ui32ClientFenceCount, + PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, + IMG_UINT32 *paui32FenceValue, + IMG_UINT32 ui32ClientUpdateCount, + PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, + IMG_UINT32 *paui32UpdateValue, + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, + PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, + PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, + RGXFWIF_CCB_CMD_TYPE eType, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32PDumpFlags, + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, + IMG_CHAR *pszCommandName, + IMG_BOOL bCCBStateOpen, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); + +void RGXCmdHelperInitCmdCCB(RGX_CLIENT_CCB *psClientCCB, + IMG_UINT64 ui64FBSCEntryMask, + IMG_UINT32 ui32ClientFenceCount, + PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, + IMG_UINT32 *paui32FenceValue, + IMG_UINT32 ui32ClientUpdateCount, + PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, + IMG_UINT32 *paui32UpdateValue, + IMG_UINT32 ui32CmdSize, + IMG_UINT8 *pui8DMCmd, + PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, + PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, + PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, + RGXFWIF_CCB_CMD_TYPE eType, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32PDumpFlags, + RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, + IMG_CHAR *pszCommandName, + IMG_BOOL bCCBStateOpen, + RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); + +PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData); + +void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, + const IMG_CHAR *pcszDMName, + IMG_UINT32 ui32CtxAddr); + +IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount, + RGX_CCB_CMD_HELPER_DATA *asCmdHelperData); + +IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, + IMG_UINT32 ui32Cmdindex); + +IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); + +void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, + RGX_CLIENT_CCB *psCurrentClientCCB, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, + RGX_CLIENT_CCB *psCurrentClientCCB, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM); + +void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo); +#endif /* RGXCCB_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxcompute.c b/drivers/gpu/drm/phytium/octopus/rgxcompute.c new file mode 100644 index 000000000000..939d3c935d70 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxcompute.c @@ -0,0 +1,1411 @@ +/*************************************************************************/ /*! +@File +@Title RGX Compute routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX Compute routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "srvkm.h" +#include "pdump_km.h" +#include "pvr_debug.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgxcompute.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "osfunc.h" +#include "rgxccb.h" +#include "rgxhwperf.h" +#include "ospvr_gputrace.h" +#include "htbuffer.h" + +#include "sync_server.h" +#include "sync_internal.h" +#include "sync.h" +#include "rgx_memallocflags.h" + +#if defined(SUPPORT_BUFFER_SYNC) +#include "pvr_buffer_sync.h" +#endif + +#include "sync_checkpoint.h" +#include "sync_checkpoint_internal.h" + +#include "rgxtimerquery.h" + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "rgxworkest.h" + +#define HASH_CLEAN_LIMIT 6 +#endif + +/* Enable this to dump the compiled list of UFOs prior to kick call */ +#define ENABLE_CMP_UFO_DUMP 0 + +//#define CMP_CHECKPOINT_DEBUG 1 + +#if defined(CMP_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +#else +#define CHKPT_DBG(X) +#endif + +typedef struct { + DEVMEM_MEMDESC *psContextStateMemDesc; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + IMG_UINT32 ui32Priority; +} RGX_SERVER_CC_CMP_DATA; + +struct _RGX_SERVER_COMPUTE_CONTEXT_ { + PVRSRV_DEVICE_NODE *psDeviceNode; + //RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + //DEVMEM_MEMDESC *psFWComputeContextStateMemDesc; + DEVMEM_MEMDESC *psFWComputeContextMemDesc; + DEVMEM_MEMDESC *psFWFrameworkMemDesc; + RGX_SERVER_CC_CMP_DATA sComputeData; + DLLIST_NODE sListNode; + SYNC_ADDR_LIST sSyncAddrListFence; + SYNC_ADDR_LIST sSyncAddrListUpdate; + POS_LOCK hLock; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WORKEST_HOST_DATA sWorkEstData; +#endif +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_context *psBufferSyncContext; +#endif +}; + +static +PVRSRV_ERROR _CreateComputeContext(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + SERVER_MMU_CONTEXT *psServerMMUContext, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + IMG_UINT32 ui32Priority, + IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxDeadlineMS, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_CC_CMP_DATA *psComputeData) +{ + IMG_UINT32 ui32CCBAllocSizeLog2, ui32CCBMaxAllocSizeLog2; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + + /* + Allocate device memory for the firmware GPU context suspend state. + Note: the FW reads/writes the state to memory by accessing the GPU register interface. + */ + PDUMPCOMMENT("Allocate RGX firmware compute context suspend state"); + + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_COMPUTECTX_STATE), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwComputeContextState", + &psComputeData->psContextStateMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware GPU context suspend state (%d)", + __func__, + eError)); + goto fail_contextsuspendalloc; + } + + ui32CCBAllocSizeLog2 = U32toU8_Unpack1(ui32PackedCCBSizeU88); + ui32CCBMaxAllocSizeLog2 = U32toU8_Unpack2(ui32PackedCCBSizeU88); + eError = FWCommonContextAllocate(psConnection, + psDeviceNode, + REQ_TYPE_CDM, + RGXFWIF_DM_CDM, + psServerMMUContext, + psAllocatedMemDesc, + ui32AllocatedOffset, + psFWMemContextMemDesc, + psComputeData->psContextStateMemDesc, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_CDM_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_CDM_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + ui32Priority, + ui32MaxDeadlineMS, + ui64RobustnessAddress, + psInfo, + &psComputeData->psServerCommonContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to init Compute fw common context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_computecommoncontext; + } + + /* + * Dump the FW compute context suspend state buffer + */ + PDUMPCOMMENT("Dump the compute context suspend state buffer"); + DevmemPDumpLoadMem(psComputeData->psContextStateMemDesc, + 0, + sizeof(RGXFWIF_COMPUTECTX_STATE), + PDUMP_FLAGS_CONTINUOUS); + + psComputeData->ui32Priority = ui32Priority; + return PVRSRV_OK; + +fail_computecommoncontext: + DevmemFree(psComputeData->psContextStateMemDesc); +fail_contextsuspendalloc: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +static +PVRSRV_ERROR _DestroyComputeContext(RGX_SERVER_CC_CMP_DATA *psComputeData, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, + psComputeData->psServerCommonContext, + RGXFWIF_DM_CDM, + PDUMP_FLAGS_NONE); + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free its resources */ + FWCommonContextFree(psComputeData->psServerCommonContext); + DevmemFwUnmapAndFree(psDeviceNode->pvDevice, psComputeData->psContextStateMemDesc); + psComputeData->psServerCommonContext = NULL; + return PVRSRV_OK; + } + +PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32FrameworkCommandSize, + IMG_PBYTE pabyFrameworkCommand, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32StaticComputeContextStateSize, + IMG_PBYTE pStaticComputeContextState, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxDeadlineMS, + RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext) +{ + DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContext; + RGX_COMMON_CONTEXT_INFO sInfo = {NULL}; + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_FWCOMPUTECONTEXT *psFWComputeContext; + + /* Prepare cleanup struct */ + *ppsComputeContext = NULL; + + if (ui32StaticComputeContextStateSize > RGXFWIF_STATIC_COMPUTECONTEXT_SIZE) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psComputeContext = OSAllocZMem(sizeof(*psComputeContext)); + if (psComputeContext == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* + Create the FW compute context, this has the CDM common + context embedded within it + */ + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_FWCOMPUTECONTEXT), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwComputeContext", + &psComputeContext->psFWComputeContextMemDesc); + if (eError != PVRSRV_OK) + { + goto fail_fwcomputecontext; + } + + eError = OSLockCreate(&psComputeContext->hLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to create lock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_createlock; + } + + psComputeContext->psDeviceNode = psDeviceNode; + + if (ui32FrameworkCommandSize) + { + eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, + &psComputeContext->psFWFrameworkMemDesc, + ui32FrameworkCommandSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware GPU framework state (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_frameworkcreate; + } + + /* Copy the Framework client data into the framework buffer */ + eError = PVRSRVRGXFrameworkCopyCommand(psComputeContext->psFWFrameworkMemDesc, + pabyFrameworkCommand, + ui32FrameworkCommandSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to populate the framework buffer (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_frameworkcopy; + } + sInfo.psFWFrameworkMemDesc = psComputeContext->psFWFrameworkMemDesc; + } + + eError = _CreateComputeContext(psConnection, + psDeviceNode, + psComputeContext->psFWComputeContextMemDesc, + offsetof(RGXFWIF_FWCOMPUTECONTEXT, sCDMContext), + hMemCtxPrivData, + psFWMemContextMemDesc, + ui32PackedCCBSizeU88, + ui32ContextFlags, + ui32Priority, + ui64RobustnessAddress, + ui32MaxDeadlineMS, + &sInfo, + &psComputeContext->sComputeData); + if (eError != PVRSRV_OK) + { + goto fail_computecontext; + } + + eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc, + (void **)&psFWComputeContext); + if (eError != PVRSRV_OK) + { + goto fail_acquire_cpu_mapping; + } + + OSDeviceMemCopy(&psFWComputeContext->sStaticComputeContextState, pStaticComputeContextState, ui32StaticComputeContextStateSize); + DevmemPDumpLoadMem(psComputeContext->psFWComputeContextMemDesc, 0, sizeof(RGXFWIF_FWCOMPUTECONTEXT), PDUMP_FLAGS_CONTINUOUS); + DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc); + +#if defined(SUPPORT_BUFFER_SYNC) + psComputeContext->psBufferSyncContext = + pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, + "rogue-cdm"); + if (IS_ERR(psComputeContext->psBufferSyncContext)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to create buffer_sync context (err=%ld)", + __func__, PTR_ERR(psComputeContext->psBufferSyncContext))); + + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail_buffer_sync_context_create; + } +#endif + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WorkEstInitCompute(psDevInfo, &psComputeContext->sWorkEstData); +#endif + + SyncAddrListInit(&psComputeContext->sSyncAddrListFence); + SyncAddrListInit(&psComputeContext->sSyncAddrListUpdate); + + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock); + dllist_add_to_tail(&(psDevInfo->sComputeCtxtListHead), &(psComputeContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock); + } + + *ppsComputeContext = psComputeContext; + return PVRSRV_OK; + +#if defined(SUPPORT_BUFFER_SYNC) +fail_buffer_sync_context_create: +#endif +fail_acquire_cpu_mapping: + FWCommonContextFree(psComputeContext->sComputeData.psServerCommonContext); +fail_frameworkcopy: +fail_computecontext: + if (psComputeContext->psFWFrameworkMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc); + } +fail_frameworkcreate: + OSLockDestroy(psComputeContext->hLock); +fail_createlock: + DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc); +fail_fwcomputecontext: + OSFreeMem(psComputeContext); + return eError; +} + +PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_FWCOMPUTECONTEXT *psFWComputeContext; + IMG_UINT32 ui32WorkEstCCBSubmitted; + + eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc, + (void **)&psFWComputeContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware compute context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + ui32WorkEstCCBSubmitted = psFWComputeContext->ui32WorkEstCCBSubmitted; + + DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc); + + /* Check if all of the workload estimation CCB commands for this workload are read */ + if (ui32WorkEstCCBSubmitted != psComputeContext->sWorkEstData.ui32WorkEstCCBReceived) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", + __func__, ui32WorkEstCCBSubmitted, + psComputeContext->sWorkEstData.ui32WorkEstCCBReceived)); + + return PVRSRV_ERROR_RETRY; + } +#endif + + eError = _DestroyComputeContext(&psComputeContext->sComputeData, + psComputeContext->psDeviceNode); + if (eError != PVRSRV_OK) + { + return eError; + } + +#if defined(SUPPORT_BUFFER_SYNC) + /* remove after RGXFWRequestCommonContextCleanUp() because we might return + * RETRY and don't want to be calling this twice */ + if (psComputeContext->psBufferSyncContext != NULL) + { + pvr_buffer_sync_context_destroy(psComputeContext->psBufferSyncContext); + psComputeContext->psBufferSyncContext = NULL; + } +#endif + + OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock); + dllist_remove_node(&(psComputeContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WorkEstDeInitCompute(psDevInfo, &psComputeContext->sWorkEstData); +#endif + + if (psComputeContext->psFWFrameworkMemDesc != NULL) + { + DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc); + } + DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc); + + OSLockDestroy(psComputeContext->hLock); + OSFreeMem(psComputeContext); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock, + IMG_UINT32 *paui32ClientUpdateSyncOffset, + IMG_UINT32 *paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE *piUpdateFence, + IMG_CHAR pszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 *paui32SyncPMRFlags, + PMR **ppsSyncPMRs, + IMG_UINT32 ui32NumWorkgroups, + IMG_UINT32 ui32NumWorkitems, + IMG_UINT64 ui64DeadlineInus) +{ + RGXFWIF_KCCB_CMD sCmpKCCBCmd; + RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1]; + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2; + IMG_UINT32 ui32CDMCmdOffset = 0; + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psComputeContext->sComputeData.psServerCommonContext); + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psComputeContext->sComputeData.psServerCommonContext); + IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + IMG_UINT32 ui32FWCtx; + IMG_BOOL bCCBStateOpen = IMG_FALSE; + + PRGXFWIF_TIMESTAMP_ADDR pPreAddr; + PRGXFWIF_TIMESTAMP_ADDR pPostAddr; + PRGXFWIF_UFO_ADDR pRMWUFOAddr; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataCompute = {0}; + IMG_UINT32 ui32CDMWorkloadDataRO = 0; + IMG_UINT32 ui32CDMCmdHeaderOffset = 0; + IMG_UINT32 ui32CDMCmdOffsetWrapCheck = 0; + RGX_WORKLOAD sWorkloadCharacteristics = {0}; +#endif + + IMG_UINT64 ui64FBSCEntryMask; + IMG_UINT32 ui32IntClientFenceCount = 0; + PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL; + IMG_UINT32 ui32IntClientUpdateCount = 0; + PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL; + IMG_UINT32 *paui32IntUpdateValue = NULL; + PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE; + IMG_UINT64 uiCheckFenceUID = 0; + IMG_UINT64 uiUpdateFenceUID = 0; + PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32FenceSyncCheckpointCount = 0; + IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; + PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL; + IMG_UINT32 ui32FenceTimelineUpdateValue = 0; + void *pvUpdateFenceFinaliseData = NULL; + +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_append_data *psBufferSyncData = NULL; + PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0; + PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL; +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + + CMD_COMMON *psComputeCmdCmn = IMG_OFFSET_ADDR(pui8DMCmd, 0); + + if (iUpdateTimeline >= 0 && !piUpdateFence) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Ensure we haven't been given a null ptr to + * update values if we have been told we + * have updates + */ + if (ui32ClientUpdateCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL, + "paui32ClientUpdateValue NULL but " + "ui32ClientUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Ensure the string is null-terminated (Required for safety) */ + pszUpdateFenceName[31] = '\0'; + + OSLockAcquire(psComputeContext->hLock); + + eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListFence, + 0, + NULL, + NULL); + if (eError != PVRSRV_OK) + { + goto err_populate_sync_addr_list; + } + + ui32IntClientUpdateCount = ui32ClientUpdateCount; + + eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListUpdate, + ui32ClientUpdateCount, + pauiClientUpdateUFODevVarBlock, + paui32ClientUpdateSyncOffset); + if (eError != PVRSRV_OK) + { + goto err_populate_sync_addr_list; + } + if (ui32IntClientUpdateCount && !pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs; + } + paui32IntUpdateValue = paui32ClientUpdateValue; + + if (ui32SyncPMRCount != 0) + { +#if defined(SUPPORT_BUFFER_SYNC) + int err; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling " + "pvr_buffer_sync_resolve_and_create_fences", __func__)); + + err = pvr_buffer_sync_resolve_and_create_fences( + psComputeContext->psBufferSyncContext, + psComputeContext->psDeviceNode->hSyncCheckpointContext, + ui32SyncPMRCount, + ppsSyncPMRs, + paui32SyncPMRFlags, + &ui32BufferFenceSyncCheckpointCount, + &apsBufferFenceSyncCheckpoints, + &psBufferUpdateSyncCheckpoint, + &psBufferSyncData + ); + + if (unlikely(err)) + { + switch (err) + { + case -EINTR: + eError = PVRSRV_ERROR_RETRY; + break; + case -ENOMEM: + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + break; + default: + eError = PVRSRV_ERROR_INVALID_PARAMS; + break; + } + + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "%s: " + "pvr_buffer_sync_resolve_and_create_fences failed (%d)", + __func__, eError)); + } + + goto fail_resolve_input_fence; + } + + /* Append buffer sync fences */ + if (ui32BufferFenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints " + "to CDM Fence (&psTransferContext->sSyncAddrListFence=<%p>, " + "pauiIntFenceUFOAddress=<%p>)...", __func__, + ui32BufferFenceSyncCheckpointCount, + (void *) &psComputeContext->sSyncAddrListFence , + (void *) pauiIntFenceUFOAddress)); + + SyncAddrListAppendAndDeRefCheckpoints(&psComputeContext->sSyncAddrListFence, + ui32BufferFenceSyncCheckpointCount, + apsBufferFenceSyncCheckpoints); + if (pauiIntFenceUFOAddress == NULL) + { + pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs; + } + ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount; + } + + /* Append the update (from output fence) */ + if (psBufferUpdateSyncCheckpoint) + { + SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate, + 1, &psBufferUpdateSyncCheckpoint); + if (pauiIntUpdateUFOAddress == NULL) + { + pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs; + } + ui32IntClientUpdateCount++; + } +#else /* defined(SUPPORT_BUFFER_SYNC) */ + PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", + __func__, ui32SyncPMRCount)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto err_populate_sync_addr_list; +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext)); + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence(psComputeContext->psDeviceNode->hSyncCheckpointContext, + iCheckFence, + &ui32FenceSyncCheckpointCount, + &apsFenceSyncCheckpoints, + &uiCheckFenceUID, ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError)); + goto fail_free_buffer_sync_data; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints)); +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32FenceSyncCheckpointCount > 0) + { + IMG_UINT32 ii; + for (ii=0; ii", __func__, ii, (void*)psNextCheckpoint)); + } + } +#endif + /* Create the output fence (if required) */ + if (iUpdateTimeline != PVRSRV_NO_TIMELINE) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d, psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>)...", __func__, iUpdateFence, iUpdateTimeline, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext)); + eError = SyncCheckpointCreateFence(psComputeContext->psDeviceNode, + pszUpdateFenceName, + iUpdateTimeline, + psComputeContext->psDeviceNode->hSyncCheckpointContext, + &iUpdateFence, + &uiUpdateFenceUID, + &pvUpdateFenceFinaliseData, + &psUpdateSyncCheckpoint, + (void*)&psFenceTimelineUpdateSync, + &ui32FenceTimelineUpdateValue, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", __func__, eError)); + goto fail_create_output_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __func__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue)); + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u, psFenceTimelineUpdateSync=<%p>", __func__, ui32IntClientUpdateCount, (void*)psFenceTimelineUpdateSync)); + /* Append the sync prim update for the timeline (if required) */ + if (psFenceTimelineUpdateSync) + { + IMG_UINT32 *pui32TimelineUpdateWp = NULL; + + /* Allocate memory to hold the list of update values (including our timeline update) */ + pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + if (!pui32IntAllocatedUpdateValues) + { + /* Failed to allocate memory */ + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_update_values_mem; + } + OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + /* Copy the update values into the new memory, then append our timeline update value */ + if (paui32IntUpdateValue) + { + OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount); + } +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount)); + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Now set the additional update value */ + pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount; + *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue; + ui32IntClientUpdateCount++; + /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */ + paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: append the timeline sync prim addr <%p> to the compute context update list", __func__, (void*)psFenceTimelineUpdateSync)); + /* Now append the timeline sync prim addr to the compute context update list */ + SyncAddrListAppendSyncPrim(&psComputeContext->sSyncAddrListUpdate, + psFenceTimelineUpdateSync); +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount)); + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */ + paui32IntUpdateValue = pui32IntAllocatedUpdateValues; + } + } + + /* Append the checks (from input fence) */ + if (ui32FenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to Compute CDM Fence (&psComputeContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psComputeContext->sSyncAddrListFence)); +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListFence, + ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + if (!pauiIntFenceUFOAddress) + { + pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs; + } + ui32IntClientFenceCount += ui32FenceSyncCheckpointCount; + } +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)paui32IntUpdateValue; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: Dumping %d update values (paui32IntUpdateValue=<%p>)...", __func__, ui32IntClientUpdateCount, (void*)paui32IntUpdateValue)); + for (iii=0; iii", __func__, iii, (void*)pui32Tmp)); + CHKPT_DBG((PVR_DBG_ERROR, "%s: *paui32IntUpdateValue[%d] = 0x%x", __func__, iii, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + + if (psUpdateSyncCheckpoint) + { + /* Append the update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to Compute CDM Update (&psComputeContext->sSyncAddrListUpdate=<%p>, psUpdateSyncCheckpoint=<%p>)...", __func__, (void*)&psComputeContext->sSyncAddrListUpdate , (void*)psUpdateSyncCheckpoint)); + SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate, + 1, + &psUpdateSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs; + } + ui32IntClientUpdateCount++; +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress=<%p>, pui32Tmp=<%p>, ui32IntClientUpdateCount=%u", __func__, (void*)pauiIntUpdateUFOAddress, (void*)pui32Tmp, ui32IntClientUpdateCount)); + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount)); + +#if (ENABLE_CMP_UFO_DUMP == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: dumping Compute (CDM) fence/updates syncs...", __func__)); + { + IMG_UINT32 ii; + PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress; + PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress; + IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue; + + /* Dump Fence syncs and Update syncs */ + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) fence syncs (&psComputeContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psComputeContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress)); + for (ii=0; ii. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr)); + psTmpIntFenceUFOAddress++; + } + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) update syncs (&psComputeContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psComputeContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress)); + for (ii=0; iiui32Addr & 0x1) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue)); + pui32TmpIntUpdateValue++; + } + psTmpIntUpdateUFOAddress++; + } + } +#endif + RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psComputeContext->psDeviceNode->pvDevice, + &pPreAddr, + &pPostAddr, + &pRMWUFOAddr); + /* + * Extract the FBSC entries from MMU Context for the deferred FBSC invalidate command, + * in other words, take the value and set it to zero afterwards. + * FBSC Entry Mask must be extracted from MMU ctx and updated just before the kick starts + * as it must be ready at the time of context activation. + */ + { + eError = RGXExtractFBSCEntryMaskFromMMUContext(psComputeContext->psDeviceNode, + FWCommonContextGetServerMMUCtx(psComputeContext->sComputeData.psServerCommonContext), + &ui64FBSCEntryMask); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to extract FBSC Entry Mask (%d)", eError)); + goto fail_cmdinvalfbsc; + } + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + sWorkloadCharacteristics.sCompute.ui32NumberOfWorkgroups = ui32NumWorkgroups; + sWorkloadCharacteristics.sCompute.ui32NumberOfWorkitems = ui32NumWorkitems; + + /* Prepare workload estimation */ + WorkEstPrepare(psComputeContext->psDeviceNode->pvDevice, + &psComputeContext->sWorkEstData, + &psComputeContext->sWorkEstData.uWorkloadMatchingData.sCompute.sDataCDM, + RGXFWIF_CCB_CMD_TYPE_CDM, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickDataCompute); +#endif + + RGXCmdHelperInitCmdCCB(psClientCCB, + ui64FBSCEntryMask, + ui32IntClientFenceCount, + pauiIntFenceUFOAddress, + NULL, + ui32IntClientUpdateCount, + pauiIntUpdateUFOAddress, + paui32IntUpdateValue, + ui32CmdSize, + pui8DMCmd, + &pPreAddr, + &pPostAddr, + &pRMWUFOAddr, + RGXFWIF_CCB_CMD_TYPE_CDM, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + &sWorkloadKickDataCompute, +#else + NULL, +#endif + "Compute", + bCCBStateOpen, + asCmdHelperData); + + eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData); + if (eError != PVRSRV_OK) + { + goto fail_cmdaquire; + } + + + /* + We should reserve space in the kernel CCB here and fill in the command + directly. + This is so if there isn't space in the kernel CCB we can return with + retry back to services client before we take any operations + */ + + /* + We might only be kicking for flush out a padding packet so only submit + the command if the create was successful + */ + if (eError == PVRSRV_OK) + { + /* + All the required resources are ready at this point, we can't fail so + take the required server sync operations and commit all the resources + */ + + ui32CDMCmdOffset = RGXGetHostWriteOffsetCCB(psClientCCB); + RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM", FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext).ui32Addr); + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* The following is used to determine the offset of the command header containing + the workload estimation data so that can be accessed when the KCCB is read */ + ui32CDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(asCmdHelperData); + + ui32CDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->sComputeData.psServerCommonContext)); + + /* This checks if the command would wrap around at the end of the CCB and + * therefore would start at an offset of 0 rather than the current command + * offset */ + if (ui32CDMCmdOffset < ui32CDMCmdOffsetWrapCheck) + { + ui32CDMWorkloadDataRO = ui32CDMCmdOffset; + } + else + { + ui32CDMWorkloadDataRO = 0; + } +#endif + + /* Construct the kernel compute CCB command. */ + sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext); + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + + /* Add the Workload data into the KCCB kick */ +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Store the offset to the CCCB command header so that it can be referenced + * when the KCCB command reaches the FW */ + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32CDMWorkloadDataRO + ui32CDMCmdHeaderOffset; +#else + sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; +#endif + + ui32FWCtx = FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext).ui32Addr; + + if (psComputeCmdCmn) + { + HTBLOGK(HTB_SF_MAIN_KICK_CDM, + sCmpKCCBCmd.uCmdData.sCmdKickData.psContext, + ui32CDMCmdOffset, + psComputeCmdCmn->ui32FrameNum, + ui32ExtJobRef, + ui32IntJobRef + ); + } + + RGXSRV_HWPERF_ENQ(psComputeContext, OSGetCurrentClientProcessIDKM(), + ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_CDM, + iCheckFence, + iUpdateFence, + iUpdateTimeline, + uiCheckFenceUID, + uiUpdateFenceUID, + NO_DEADLINE, + NO_CYCEST); + + /* + * Submit the compute command to the firmware. + */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice, + RGXFWIF_DM_CDM, + &sCmpKCCBCmd, + ui32ClientCacheOpSeqNum, + ui32PDumpFlags); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError2 != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s failed to schedule kernel CCB command (%s)", + __func__, + PVRSRVGetErrorString(eError2))); + } + else + { + PVRGpuTraceEnqueueEvent(psComputeContext->psDeviceNode->pvDevice, + ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_CDM); + } + /* + * Now check eError (which may have returned an error from our earlier call + * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first + * so we check it now... + */ + if (eError != PVRSRV_OK ) + { + goto fail_cmdaquire; + } + +#if defined(NO_HARDWARE) + /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ + if (psUpdateSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint))); + SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); + } + if (psFenceTimelineUpdateSync) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim<%p> to %d", __func__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); + } + SyncCheckpointNoHWUpdateTimelines(NULL); +#endif /* defined(NO_HARDWARE) */ + +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferSyncData) + { + pvr_buffer_sync_kick_succeeded(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + + *piUpdateFence = iUpdateFence; + + if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psComputeContext->psDeviceNode, iUpdateFence, + pvUpdateFenceFinaliseData, + psUpdateSyncCheckpoint, pszUpdateFenceName); + } + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + /* Free memory allocated to hold the internal list of update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + pui32IntAllocatedUpdateValues = NULL; + } + + OSLockRelease(psComputeContext->hLock); + + return PVRSRV_OK; + +fail_cmdaquire: +fail_cmdinvalfbsc: + SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListFence); + SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListUpdate); +fail_alloc_update_values_mem: + if (iUpdateFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); + } +fail_create_output_fence: + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + +fail_free_buffer_sync_data: +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferSyncData) + { + pvr_buffer_sync_kick_failed(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } + +fail_resolve_input_fence: +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + +err_populate_sync_addr_list: + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + /* Free memory allocated to hold the internal list of update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + pui32IntAllocatedUpdateValues = NULL; + } + OSLockRelease(psComputeContext->hLock); + return eError; +} + +PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) +{ + RGXFWIF_KCCB_CMD sFlushCmd; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32kCCBCommandSlot; + PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice; + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit Compute flush"); +#endif + sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; + sFlushCmd.uCmdData.sSLCFlushInvalData.ui64Size = 0; + sFlushCmd.uCmdData.sSLCFlushInvalData.ui64Address = 0; + sFlushCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_FALSE; + sFlushCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_TRUE; + sFlushCmd.uCmdData.sSLCFlushInvalData.psContext = FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext); + + OSLockAcquire(psComputeContext->hLock); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + RGXFWIF_DM_CDM, + &sFlushCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + /* Iterate if we hit a PVRSRV_ERROR_KERNEL_CCB_FULL error */ + if ((eError != PVRSRV_ERROR_RETRY) && + (eError != PVRSRV_ERROR_KERNEL_CCB_FULL)) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError != PVRSRV_OK) + { + /* If we hit a temporary KCCB exhaustion, return a RETRY to caller */ + if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Returning RETRY to caller", __func__)); + eError = PVRSRV_ERROR_RETRY; + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule SLC flush command (%s)", + __func__, + PVRSRVGetErrorString(eError))); + } + } + else + { + /* Wait for the SLC flush to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Compute flush aborted (%s)", + __func__, + PVRSRVGetErrorString(eError))); + } + else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & + RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); + } + } + + OSLockRelease(psComputeContext->hLock); + return eError; +} + + +PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) +{ + RGXFWIF_KCCB_CMD sKCCBCmd; + PVRSRV_ERROR eError; + + OSLockAcquire(psComputeContext->hLock); + + /* Schedule the firmware command */ + sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE; + sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice, + RGXFWIF_DM_CDM, + &sKCCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule the FW command %d (%s)", + __func__, + eError, + PVRSRVGETERRORSTRING(eError))); + } + + OSLockRelease(psComputeContext->hLock); + + return eError; +} + + +PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + IMG_UINT32 ui32Priority) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + OSLockAcquire(psComputeContext->hLock); + + eError = ContextSetPriority(psComputeContext->sComputeData.psServerCommonContext, + psConnection, + psComputeContext->psDeviceNode->pvDevice, + ui32Priority, + RGXFWIF_DM_CDM); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the compute context (%s)", __func__, PVRSRVGetErrorString(eError))); + } + + OSLockRelease(psComputeContext->hLock); + return eError; +} + +/* + * PVRSRVRGXSetComputeContextPropertyKM + */ +PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output) +{ + PVRSRV_ERROR eError; + + switch (eContextProperty) + { + case RGX_CONTEXT_PROPERTY_FLAGS: + { + OSLockAcquire(psComputeContext->hLock); + eError = FWCommonContextSetFlags(psComputeContext->sComputeData.psServerCommonContext, + (IMG_UINT32)ui64Input); + OSLockRelease(psComputeContext->hLock); + PVR_LOG_IF_ERROR(eError, "FWCommonContextSetFlags"); + break; + } + + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + } + + return eError; +} + +void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + DLLIST_NODE *psNode, *psNext; + OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock); + dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext) + { + RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode); + DumpFWCommonContextInfo(psCurrentServerComputeCtx->sComputeData.psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock); +} + +IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT32 ui32ContextBitMask = 0; + DLLIST_NODE *psNode, *psNext; + OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock); + dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext) + { + RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode); + + if (CheckStalledClientCommonContext(psCurrentServerComputeCtx->sComputeData.psServerCommonContext, RGX_KICK_TYPE_DM_CDM) + == PVRSRV_ERROR_CCCB_STALLED) + { + ui32ContextBitMask |= RGX_KICK_TYPE_DM_CDM; + } + } + OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock); + return ui32ContextBitMask; +} + +/* + * PVRSRVRGXGetLastDeviceErrorKM + */ +PVRSRV_ERROR PVRSRVRGXGetLastDeviceErrorKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 *ui32Error) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVR_UNREFERENCED_PARAMETER(psConnection); + + *ui32Error = psDevInfo->eLastDeviceError; + psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_NONE; + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (rgxcompute.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxcompute.h b/drivers/gpu/drm/phytium/octopus/rgxcompute.h new file mode 100644 index 000000000000..5b7c071c7cb2 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxcompute.h @@ -0,0 +1,183 @@ +/*************************************************************************/ /*! +@File +@Title RGX compute functionality +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the RGX compute functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXCOMPUTE_H) +#define RGXCOMPUTE_H + +#include "devicemem.h" +#include "device.h" +#include "rgxfwutils.h" +#include "rgx_fwif_resetframework.h" +#include "rgxdebug.h" +#include "pvr_notifier.h" + +#include "sync_server.h" +#include "sync_internal.h" +#include "connection_server.h" + + +typedef struct _RGX_SERVER_COMPUTE_CONTEXT_ RGX_SERVER_COMPUTE_CONTEXT; + +/*! +******************************************************************************* + @Function PVRSRVRGXCreateComputeContextKM + + @Description + Creates a RGX device context for submitting commands to CDM. + + @Input pvDeviceNode - Services-managed device + @Input ui32Priority - Scheduling priority for commands on this context + @Input hMemCtxPrivData - private data + @Input ui32PackedCCBSizeU88 - packed CCB size. The first byte contains the + log2 CCB size and the second byte the log2 maximum CCB size. + @Input ui32ComputeCtxSwitchSize - Context control size + @Input pComputeCtxSwitch_Regs - Context control registers + @Output ppsComputeContext - cleanup data + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32FrameworkCommandSize, + IMG_PBYTE pabyFrameworkCommand, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32StaticComputeContextStateSize, + IMG_PBYTE pStaticComputeContextState, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxDeadlineMS, + RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext); + +/*! +******************************************************************************* + @Function PVRSRVRGXDestroyComputeContextKM + + @Description + Server-side implementation of RGXDestroyComputeContext + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext); + + +/*! +******************************************************************************* + @Function PVRSRVRGXKickCDMKM + + @Description + Server-side implementation of RGXKickCDM + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock, + IMG_UINT32 *paui32ClientUpdateSyncOffset, + IMG_UINT32 *paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE *piUpdateFence, + IMG_CHAR pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 *paui32SyncPMRFlags, + PMR **ppsSyncPMRs, + IMG_UINT32 ui32NumWorkgroups, + IMG_UINT32 ui32NumWorkitems, + IMG_UINT64 ui64DeadlineInus); + +/*! +******************************************************************************* + @Function PVRSRVRGXFlushComputeDataKM + + @Description + Server-side implementation of RGXFlushComputeData + + @Input psComputeContext - Compute context to flush + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext); + +/*! +******************************************************************************* + + @Function PVRSRVRGXNotifyComputeWriteOffsetUpdateKM + @Description Server-side implementation of RGXNotifyComputeWriteOffsetUpdate + + @Input psComputeContext - Compute context to flush + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext); + +PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + IMG_UINT32 ui32Priority); + +PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output); + +PVRSRV_ERROR PVRSRVRGXGetLastDeviceErrorKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 *ui32Error); + +/* Debug - Dump debug info of compute contexts on this device */ +void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +/* Debug/Watchdog - check if client compute contexts are stalled */ +IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); + +#endif /* RGXCOMPUTE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxdebug.c b/drivers/gpu/drm/phytium/octopus/rgxdebug.c new file mode 100644 index 000000000000..36f71ce6313e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxdebug.c @@ -0,0 +1,3846 @@ +/*************************************************************************/ /*! +@File +@Title Rgx debug information +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX debugging functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +//#define PVR_DPF_FUNCTION_TRACE_ON 1 +#undef PVR_DPF_FUNCTION_TRACE_ON + +#include "img_defs.h" +#include "rgxdefs_km.h" +#include "rgxdevice.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "cache_km.h" +#include "osfunc.h" + +#include "rgxdebug.h" +#include "pvrversion.h" +#include "pvr_debug.h" +#include "srvkm.h" +#include "rgxutils.h" +#include "tlstream.h" +#include "rgxfwutils.h" +#include "pvrsrv.h" +#include "services_km.h" + +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "devicemem_utils.h" +#include "rgx_fwif_km.h" +#include "rgx_fwif_sf.h" +#include "rgxfw_log_helper.h" +#include "fwtrace_string.h" +#include "rgxfwimageutils.h" +#include "fwload.h" + +#include "rgxta3d.h" +#include "rgxkicksync.h" +#include "rgxcompute.h" +#include "rgxtdmtransfer.h" +#include "rgxtimecorr.h" +#include "rgx_options.h" +#include "rgxinit.h" +#include "rgxlayer_impl.h" +#include "devicemem_history_server.h" +#include "info_page.h" + +#define PVR_DUMP_FIRMWARE_INFO(x) \ + PVR_DUMPDEBUG_LOG("FW info: %d.%d @ %8d (%s) build options: 0x%08x", \ + PVRVERSION_UNPACK_MAJ((x).ui32DDKVersion), \ + PVRVERSION_UNPACK_MIN((x).ui32DDKVersion), \ + (x).ui32DDKBuild, \ + ((x).ui32BuildOptions & OPTIONS_DEBUG_MASK) ? "debug":"release",\ + (x).ui32BuildOptions); + +#define DD_SUMMARY_INDENT "" +#define DD_NORMAL_INDENT " " + +#define RGX_DEBUG_STR_SIZE (150U) +#define MAX_FW_DESCRIPTION_LENGTH (600U) + + +#define RGX_TEXAS_BIF0_ID (0) +#define RGX_TEXAS_BIF1_ID (1) + +/* + * The first 7 or 8 cat bases are memory contexts used for PM + * or firmware. The rest are application contexts. The numbering + * is zero-based. + */ +#if defined(SUPPORT_TRUSTED_DEVICE) +#define MAX_RESERVED_FW_MMU_CONTEXT (7) +#else +#define MAX_RESERVED_FW_MMU_CONTEXT (6) +#endif + +static const IMG_CHAR *const pszPowStateName[] = +{ +#define X(NAME) #NAME, + RGXFWIF_POW_STATES +#undef X +}; + +typedef struct _IMG_FLAGS2DESC_ +{ + IMG_UINT32 uiFlag; + const IMG_CHAR *pszLabel; +} IMG_FLAGS2DESC; + +static const IMG_CHAR * const apszFwOsStateName[RGXFW_CONNECTION_FW_STATE_COUNT] = +{ + "offline", + "ready", + "active", + "offloading" +}; + +#if defined(PVR_ENABLE_PHR) +static const IMG_FLAGS2DESC asPHRConfig2Description[] = +{ + {BIT_ULL(RGXFWIF_PHR_MODE_OFF), "off"}, + {BIT_ULL(RGXFWIF_PHR_MODE_RD_RESET), "reset RD hardware"}, + {BIT_ULL(RGXFWIF_PHR_MODE_FULL_RESET), "full gpu reset "}, +}; +#endif + +#if !defined(NO_HARDWARE) +static PVRSRV_ERROR +RGXPollMetaRegThroughSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegOffset, + IMG_UINT32 ui32PollValue, IMG_UINT32 ui32Mask) +{ + IMG_UINT32 ui32RegValue, ui32NumPolls = 0; + PVRSRV_ERROR eError; + + do + { + eError = RGXReadWithSP(psDevInfo, ui32RegOffset, &ui32RegValue); + if (eError != PVRSRV_OK) + { + return eError; + } + } while (((ui32RegValue & ui32Mask) != ui32PollValue) && (ui32NumPolls++ < 1000)); + + return ((ui32RegValue & ui32Mask) == ui32PollValue) ? PVRSRV_OK : PVRSRV_ERROR_RETRY; +} + +static PVRSRV_ERROR +RGXReadMetaCoreReg(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegAddr, IMG_UINT32 *pui32RegVal) +{ + PVRSRV_ERROR eError; + + /* Core Read Ready? */ + eError = RGXPollMetaRegThroughSP(psDevInfo, + META_CR_TXUXXRXRQ_OFFSET, + META_CR_TXUXXRXRQ_DREADY_BIT, + META_CR_TXUXXRXRQ_DREADY_BIT); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP"); + + /* Set the reg we are interested in reading */ + eError = RGXWriteWithSP(psDevInfo, META_CR_TXUXXRXRQ_OFFSET, + ui32RegAddr | META_CR_TXUXXRXRQ_RDnWR_BIT); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXWriteWithSP"); + + /* Core Read Done? */ + eError = RGXPollMetaRegThroughSP(psDevInfo, + META_CR_TXUXXRXRQ_OFFSET, + META_CR_TXUXXRXRQ_DREADY_BIT, + META_CR_TXUXXRXRQ_DREADY_BIT); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP"); + + /* Read the value */ + return RGXReadWithSP(psDevInfo, META_CR_TXUXXRXDT_OFFSET, pui32RegVal); +} +#endif + +PVRSRV_ERROR +RGXReadWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 *pui32Value) +{ + PVRSRV_ERROR eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, pui32Value); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError))); + } + + return eError; +} + +PVRSRV_ERROR +RGXWriteWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eError = RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32Value); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError))); + } + return eError; +} + +#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) +static PVRSRV_ERROR _ValidateWithSP(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_DEV_VIRTADDR *psFWAddr, + void *pvHostCodeAddr, + IMG_UINT32 ui32MaxLen, + const IMG_CHAR *pszDesc, + IMG_UINT32 ui32StartOffset) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32Value, i; + IMG_UINT32 ui32FWCodeDevVAAddr = psFWAddr->ui32Addr + ui32StartOffset; + IMG_UINT32 *pui32FWCode = (IMG_PUINT32) ((IMG_PBYTE)pvHostCodeAddr + ui32StartOffset); + + ui32MaxLen -= ui32StartOffset; + ui32MaxLen /= sizeof(IMG_UINT32); /* Byte -> 32 bit words */ + + for (i = 0; i < ui32MaxLen; i++) + { + eError = RGXReadMETAAddr(psDevInfo, ui32FWCodeDevVAAddr, &ui32Value); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError))); + return eError; + } + + PVR_DPF((PVR_DBG_VERBOSE, "0x%x: CPU 0x%08x, FW 0x%08x", i * 4, pui32FWCode[i], ui32Value)); + + if (pui32FWCode[i] != ui32Value) + { + PVR_DUMPDEBUG_LOG("_ValidateWithSP: Mismatch while validating %s at offset 0x%x: CPU 0x%08x (%p), FW 0x%08x (%x)", + pszDesc, + (i * 4) + ui32StartOffset, pui32FWCode[i], pui32FWCode, ui32Value, ui32FWCodeDevVAAddr); + return PVRSRV_ERROR_FW_IMAGE_MISMATCH; + } + + ui32FWCodeDevVAAddr += 4; + } + + PVR_DUMPDEBUG_LOG("Match between Host and Meta view of the %s", pszDesc); + return PVRSRV_OK; +} + +#if !defined(EMULATOR) +static PVRSRV_ERROR _ValidateWithDM(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_DEV_VIRTADDR *psFWAddr, + void *pvHostCodeAddr, + IMG_UINT32 ui32MaxLen, + const IMG_CHAR *pszDesc, + IMG_UINT32 ui32StartOffset) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32Value, i; + IMG_UINT32 ui32FWCodeDevVAAddr = psFWAddr->ui32Addr + ui32StartOffset; + IMG_UINT32 *pui32FWCode = IMG_OFFSET_ADDR(pvHostCodeAddr, ui32StartOffset); + + ui32MaxLen -= ui32StartOffset; + ui32MaxLen /= sizeof(IMG_UINT32); /* Byte -> 32 bit words */ + + for (i = 0; i < ui32MaxLen; i++) + { + eError = RGXRiscvReadMem(psDevInfo, ui32FWCodeDevVAAddr, &ui32Value); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError))); + return eError; + } + + PVR_DPF((PVR_DBG_VERBOSE, "0x%x: CPU 0x%08x, FW 0x%08x", i * 4, pui32FWCode[i], ui32Value)); + + if (pui32FWCode[i] != ui32Value) + { + PVR_DUMPDEBUG_LOG("%s: Mismatch while validating %s at offset 0x%x: CPU 0x%08x (0x%p), FW 0x%08x (0x%x)", + __func__, pszDesc, + (i * 4) + ui32StartOffset, pui32FWCode[i], pui32FWCode, ui32Value, ui32FWCodeDevVAAddr); + + return PVRSRV_ERROR_FW_IMAGE_MISMATCH; + } + + ui32FWCodeDevVAAddr += 4; + } + + PVR_DUMPDEBUG_LOG("Match between Host and RISC-V view of the %s", pszDesc); + return PVRSRV_OK; +} +#endif +#endif /* !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) */ + + +#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) +static PVRSRV_ERROR _ValidateFWImageForMETA(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT32 *pui32HostFWCode = NULL, *pui32HostFWCoremem = NULL; + OS_FW_IMAGE *psRGXFW = NULL; + const IMG_BYTE *pbRGXFirmware = NULL; + RGXFWIF_DEV_VIRTADDR sFWAddr; + PVRSRV_ERROR eError; + + if (psDevInfo->pvRegsBaseKM == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: RGX registers not mapped yet!", __func__)); + return PVRSRV_ERROR_BAD_MAPPING; + } + + /* Load FW from system for code verification */ + pui32HostFWCode = OSAllocZMem(psDevInfo->ui32FWCodeSizeInBytes); + if (pui32HostFWCode == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed in allocating memory for FW code. " + "So skipping FW code verification", + __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + if (psDevInfo->ui32FWCorememCodeSizeInBytes) + { + pui32HostFWCoremem = OSAllocZMem(psDevInfo->ui32FWCorememCodeSizeInBytes); + if (pui32HostFWCoremem == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed in allocating memory for FW core code. " + "So skipping FW code verification", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto freeHostFWCode; + } + } + + /* Load FW image */ + eError = RGXLoadAndGetFWData(psDevInfo->psDeviceNode, &psRGXFW, &pbRGXFirmware); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load FW image file (%s).", + __func__, PVRSRVGetErrorString(eError))); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto cleanup_initfw; + } + + eError = ProcessLDRCommandStream(&psDevInfo->sLayerParams, pbRGXFirmware, + (IMG_PBYTE) pui32HostFWCode, NULL, + (IMG_PBYTE) pui32HostFWCoremem, NULL, NULL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed in parsing FW image file.", __func__)); + goto cleanup_initfw; + } + + /* starting checking after BOOT LOADER config */ + sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR; + eError = _ValidateWithSP(pfnDumpDebugPrintf, pvDumpDebugFile, + psDevInfo, &sFWAddr, + pui32HostFWCode, psDevInfo->ui32FWCodeSizeInBytes, + "FW code", RGXFW_MAX_BOOTLDR_OFFSET); + if (eError != PVRSRV_OK) + { + goto cleanup_initfw; + } + + if (psDevInfo->ui32FWCorememCodeSizeInBytes) + { + sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE); + + eError = _ValidateWithSP(pfnDumpDebugPrintf, pvDumpDebugFile, + psDevInfo, &sFWAddr, + pui32HostFWCoremem, psDevInfo->ui32FWCorememCodeSizeInBytes, + "FW coremem code", 0); + } + +cleanup_initfw: + if (psRGXFW) + { + OSUnloadFirmware(psRGXFW); + } + + if (pui32HostFWCoremem) + { + OSFreeMem(pui32HostFWCoremem); + } +freeHostFWCode: + if (pui32HostFWCode) + { + OSFreeMem(pui32HostFWCode); + } + return eError; +} + +#if !defined(EMULATOR) +static PVRSRV_ERROR _ValidateFWImageForRISCV(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT32 *pui32HostFWCode = NULL, *pui32HostFWCoremem = NULL; + OS_FW_IMAGE *psRGXFW = NULL; + const IMG_BYTE *pbRGXFirmware = NULL; + RGXFWIF_DEV_VIRTADDR sFWAddr; + PVRSRV_ERROR eError; + RGX_LAYER_PARAMS sLayerParams; + sLayerParams.psDevInfo = psDevInfo; + + if (psDevInfo->pvRegsBaseKM == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: RGX registers not mapped yet!", __func__)); + return PVRSRV_ERROR_BAD_MAPPING; + } + + /* Load FW from system for code verification */ + pui32HostFWCode = OSAllocZMem(psDevInfo->ui32FWCodeSizeInBytes); + if (pui32HostFWCode == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed in allocating memory for FW code. " + "So skipping FW code verification", + __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + if (psDevInfo->ui32FWCorememCodeSizeInBytes) + { + pui32HostFWCoremem = OSAllocZMem(psDevInfo->ui32FWCorememCodeSizeInBytes); + if (pui32HostFWCoremem == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed in allocating memory for FW core code. " + "So skipping FW code verification", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto freeHostFWCode; + } + } + + /* Load FW image */ + eError = RGXLoadAndGetFWData(psDevInfo->psDeviceNode, &psRGXFW, &pbRGXFirmware); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load FW image file (%s).", + __func__, PVRSRVGetErrorString(eError))); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto cleanup_initfw; + } + + eError = ProcessELFCommandStream(&sLayerParams, pbRGXFirmware, + pui32HostFWCode, NULL, + pui32HostFWCoremem, NULL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed in parsing FW image file.", __func__)); + goto cleanup_initfw; + } + + /* Use bootloader code remap which is always configured before the FW is started */ + sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_BASE; + + eError = _ValidateWithDM(pfnDumpDebugPrintf, pvDumpDebugFile, + psDevInfo, &sFWAddr, + pui32HostFWCode, psDevInfo->ui32FWCodeSizeInBytes, + "FW code", 0); + if (eError != PVRSRV_OK) + { + goto cleanup_initfw; + } + + if (psDevInfo->ui32FWCorememCodeSizeInBytes) + { + sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, RISCV_COREMEM_CODE); + + /* Core must be halted while issuing abstract commands */ + eError = RGXRiscvHalt(psDevInfo); + PVR_GOTO_IF_ERROR(eError, cleanup_initfw); + + eError = _ValidateWithDM(pfnDumpDebugPrintf, pvDumpDebugFile, + psDevInfo, &sFWAddr, + pui32HostFWCoremem, psDevInfo->ui32FWCorememCodeSizeInBytes, + "FW coremem code", 0); + + eError = RGXRiscvResume(psDevInfo); + PVR_GOTO_IF_ERROR(eError, cleanup_initfw); + } + +cleanup_initfw: + if (psRGXFW) + { + OSUnloadFirmware(psRGXFW); + } + + if (pui32HostFWCoremem) + { + OSFreeMem(pui32HostFWCoremem); + } +freeHostFWCode: + if (pui32HostFWCode) + { + OSFreeMem(pui32HostFWCode); + } + return eError; +} +#endif +#endif + +#if defined(SUPPORT_EXTRA_METASP_DEBUG) +PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo) +{ +#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) + IMG_PBYTE pbCodeMemoryPointer; + PVRSRV_ERROR eError; + RGXFWIF_DEV_VIRTADDR sFWAddr; + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pbCodeMemoryPointer); + if (eError != PVRSRV_OK) + { + return eError; + } + + sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR; + eError = _ValidateWithSP(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes, "FW code", 0); + if (eError != PVRSRV_OK) + { + goto releaseFWCodeMapping; + } + + if (psDevInfo->ui32FWCorememCodeSizeInBytes) + { + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememMemDesc, (void **)&pbCodeMemoryPointer); + if (eError != PVRSRV_OK) + { + goto releaseFWCoreCodeMapping; + } + + sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE); + + eError = _ValidateWithSP(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, + psDevInfo->ui32FWCorememCodeSizeInBytes, "FW coremem code", 0); + } + +releaseFWCoreCodeMapping: + if (psDevInfo->ui32FWCorememCodeSizeInBytes) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememMemDesc); + } +releaseFWCodeMapping: + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(psDevInfo); + return PVRSRV_OK; +#endif +} +#endif + + +/*! +******************************************************************************* + + @Function _RGXDecodeMMULevel + + @Description + + Return the name for the MMU level that faulted. + + @Input ui32MMULevel - MMU level + + @Return IMG_CHAR* to the sting describing the MMU level that faulted. + +******************************************************************************/ +static const IMG_CHAR* _RGXDecodeMMULevel(IMG_UINT32 ui32MMULevel) +{ + const IMG_CHAR* pszMMULevel = ""; + + switch (ui32MMULevel) + { + case 0x0: pszMMULevel = " (Page Table)"; break; + case 0x1: pszMMULevel = " (Page Directory)"; break; + case 0x2: pszMMULevel = " (Page Catalog)"; break; + case 0x3: pszMMULevel = " (Cat Base Reg)"; break; + } + + return pszMMULevel; +} + + +/*! +******************************************************************************* + + @Function _RGXDecodeMMUReqTags + + @Description + + Decodes the MMU Tag ID and Sideband data fields from RGX_CR_MMU_FAULT_META_STATUS and + RGX_CR_MMU_FAULT_STATUS regs. + + @Input ui32TagID - Tag ID value + @Input ui32BIFModule - BIF module + @Input bRead - Read flag + @Input bWriteBack - Write Back flag + @Output ppszTagID - Decoded string from the Tag ID + @Output ppszTagSB - Decoded string from the Tag SB + @Output pszScratchBuf - Buffer provided to the function to generate the debug strings + @Input ui32ScratchBufSize - Size of the provided buffer + + @Return void + +******************************************************************************/ +#define RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__SERIES8 (12) +#define RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST__SERIES8 (15) +#define RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__ALBIORIX (6) +#define RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST__ALBIORIX (9) +#define RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_LAST (33) +#define RGX_TEXAS_BIF0_TAG_RTU_RAC_FIRST (41) +#define RGX_TEXAS_BIF0_TAG_RTU_RAC_LAST (48) +#define RGX_TEXAS_BIF0_TAG_LAST (51) + +#define RGX_TEXAS_BIF1_TAG_LAST (26) + +#define RGX_JONES_BIF_IPP_TAG (0) +#define RGX_JONES_BIF_DCE_TAG_FIRST (1) +#define RGX_JONES_BIF_DCE_TAG_LAST (14) +#define RGX_JONES_BIF_TDM_TAG_FIRST (15) +#define RGX_JONES_BIF_TDM_TAG_LAST (19) +#define RGX_JONES_BIF_PM_TAG (20) +#define RGX_JONES_BIF_CDM_TAG_FIRST (21) +#define RGX_JONES_BIF_CDM_TAG_LAST (31) +#define RGX_JONES_BIF_META_TAG (32) +#define RGX_JONES_BIF_META_DMA_TAG (33) +#define RGX_JONES_BIF_TE_TAG_FIRST (34) +#define RGX_JONES_BIF_TE_TAG_LAST (47) +#define RGX_JONES_BIF_RTU_TAG_FIRST (48) +#define RGX_JONES_BIF_RTU_TAG_LAST (53) +#define RGX_JONES_BIF_RPM_TAG (54) +#define RGX_JONES_BIF_TAG_LAST (54) + + +/* The MCU L1 requestors are common to all Texas BIFs so put them + * in their own function. */ +static INLINE void _RGXDecodeMMUReqMCULevel1(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32TagID, + IMG_CHAR **ppszTagSB) +{ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) + { + switch (ui32TagID) + { + case 0: *ppszTagSB = "IP0 PDS"; break; + case 1: *ppszTagSB = "IP0 Global"; break; + case 2: *ppszTagSB = "IP1 PDS"; break; + case 3: *ppszTagSB = "IP1 Global"; break; + case 4: *ppszTagSB = "IP2 PDS"; break; + case 5: *ppszTagSB = "IP2 Global"; break; + } + } + else + { + switch (ui32TagID) + { + case 0: *ppszTagSB = "IP0 PDS"; break; + case 1: *ppszTagSB = "IP0 Global"; break; + case 2: *ppszTagSB = "IP0 BSC"; break; + case 3: *ppszTagSB = "IP0 Constants"; break; + + case 4: *ppszTagSB = "IP1 PDS"; break; + case 5: *ppszTagSB = "IP1 Global"; break; + case 6: *ppszTagSB = "IP1 BSC"; break; + case 7: *ppszTagSB = "IP1 Constants"; break; + + case 8: *ppszTagSB = "IP2 PDS"; break; + case 9: *ppszTagSB = "IP2 Global"; break; + case 10: *ppszTagSB = "IP2 BSC"; break; + case 11: *ppszTagSB = "IP2 Constants"; break; + } + } +} + +static void _RGXDecodeMMUReqTags(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32TagID, + IMG_UINT32 ui32BIFModule, + IMG_BOOL bRead, + IMG_BOOL bWriteBack, + IMG_BOOL bFBMFault, + IMG_CHAR **ppszTagID, + IMG_CHAR **ppszTagSB, + IMG_CHAR *pszScratchBuf, + IMG_UINT32 ui32ScratchBufSize) +{ + IMG_UINT32 ui32BIFsPerSPU = 2; + IMG_CHAR *pszTagID = "-"; + IMG_CHAR *pszTagSB = "-"; + + PVR_ASSERT(ppszTagID != NULL); + PVR_ASSERT(ppszTagSB != NULL); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) + { + ui32BIFsPerSPU = 4; + } + + if (bFBMFault) + { + pszTagID = "FBM"; + if (bWriteBack) + { + pszTagSB = "Header/state cache request"; + } + } + else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_SPU) && + ui32BIFModule < RGX_GET_FEATURE_VALUE(psDevInfo, NUM_SPU)*ui32BIFsPerSPU) + { + if ((ui32BIFModule % ui32BIFsPerSPU) == 0) + { + IMG_UINT32 ui32Tag_RGX_TEXAS_BIF0_MCU_L1_TAG_LAST = + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) + ? RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__ALBIORIX + : RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__SERIES8; + IMG_UINT32 ui32Tag_RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST = + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) + ? RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST__ALBIORIX + : RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST__SERIES8; + + /* Texas 0 BIF */ + if (ui32TagID < ui32Tag_RGX_TEXAS_BIF0_MCU_L1_TAG_LAST) + { + pszTagID = "MCU L1"; + _RGXDecodeMMUReqMCULevel1(psDevInfo, ui32TagID, &pszTagSB); + } + else if (ui32TagID < ui32Tag_RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_FIRST) + { + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) + { + switch (ui32TagID) + { + case 6: pszTagID = "TCU L1"; break; + case 7: + case 8: pszTagID = "PBE0"; break; + } + } + else + { + switch (ui32TagID) + { + case 12: pszTagID = "TCU L1"; break; + case 13: + case 14: pszTagID = "PBE0"; break; + } + } + } + else if (ui32TagID <= RGX_TEXAS_BIF0_TAG_IPFID_ARRAY_LAST) + { + pszTagID = "IPF ID Array"; + } + else if (ui32TagID < RGX_TEXAS_BIF0_TAG_RTU_RAC_FIRST) + { + switch (ui32TagID) + { + case 34: pszTagID = "IPF_CPF"; break; + case 35: pszTagID = "PPP"; break; + case 36: + case 37: pszTagID = "ISP0 ID Array"; break; + case 38: + case 39: pszTagID = "ISP2 ID Array"; break; + case 40: pszTagID = "VCE RTC"; break; + } + } + else if (ui32TagID <= RGX_TEXAS_BIF0_TAG_RTU_RAC_LAST) + { + pszTagID = "RTU RAC"; + } + else if (ui32TagID <= RGX_TEXAS_BIF0_TAG_LAST) + { + switch (ui32TagID) + { + case 49: pszTagID = "VCE AMC"; break; + case 50: + case 51: pszTagID = "SHF"; break; + } + } + else + { + PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified Texas BIF Tag ID: %d", __func__, ui32TagID)); + } + } + else if ((ui32BIFModule % ui32BIFsPerSPU) == 1) + { + IMG_UINT32 ui32Tag_RGX_TEXAS_BIF0_MCU_L1_TAG_LAST = + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) + ? RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__ALBIORIX + : RGX_TEXAS_BIF0_MCU_L1_TAG_LAST__SERIES8; + + /* Texas 1 BIF */ + if (ui32TagID < ui32Tag_RGX_TEXAS_BIF0_MCU_L1_TAG_LAST) + { + pszTagID = "MCU L1"; + _RGXDecodeMMUReqMCULevel1(psDevInfo, ui32TagID, &pszTagSB); + } + else if (ui32TagID <= RGX_TEXAS_BIF1_TAG_LAST) + { + switch (ui32TagID) + { + /** Albiorix/NUM_TPU_PER_SPU > 1 **/ + case 6: + case 7: pszTagID = "BSC"; break; + /** All cores **/ + case 12: pszTagID = "TCU L1"; break; + case 13: pszTagID = "TPF"; break; + case 14: pszTagID = "TPF CPF"; break; + case 15: + case 16: pszTagID = "PBE1"; break; + case 17: pszTagID = "PDSRW cache"; break; + case 18: pszTagID = "PDS"; break; + case 19: + case 20: pszTagID = "ISP1 ID Array"; break; + case 21: pszTagID = "USC L2"; break; + case 22: pszTagID = "VDM L2"; break; + case 23: pszTagID = "RTU FBA L2"; break; + case 24: pszTagID = "RTU SHR L2"; break; + case 25: pszTagID = "RTU SHG L2"; break; + case 26: pszTagID = "RTU TUL L2"; break; + } + } + else + { + PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified Texas BIF Tag ID: %d", __func__, ui32TagID)); + } + } + } + else if (ui32BIFModule == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_SPU)*ui32BIFsPerSPU) + { + /* Jones BIF */ + + if ((ui32TagID >= RGX_JONES_BIF_DCE_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_DCE_TAG_LAST)) + { + pszTagID = "DCE"; + } + else if ((ui32TagID >= RGX_JONES_BIF_TDM_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_TDM_TAG_LAST)) + { + pszTagID = "TDM"; + } + else if ((ui32TagID >= RGX_JONES_BIF_CDM_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_CDM_TAG_LAST)) + { + pszTagID = "CDM"; + } + else if ((ui32TagID >= RGX_JONES_BIF_TE_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_TE_TAG_LAST)) + { + pszTagID = "Tiling Engine (TE3)"; + } + else if ((ui32TagID >= RGX_JONES_BIF_RTU_TAG_FIRST) && (ui32TagID <= RGX_JONES_BIF_RTU_TAG_LAST)) + { + pszTagID = "RTU"; + } + else if (ui32TagID <= RGX_JONES_BIF_TAG_LAST) + { + switch (ui32TagID) + { + case RGX_JONES_BIF_IPP_TAG: pszTagID = "IPP"; break; + case RGX_JONES_BIF_PM_TAG: pszTagID = "PM"; break; + case RGX_JONES_BIF_META_TAG: pszTagID = "META"; break; + case RGX_JONES_BIF_META_DMA_TAG:pszTagID = "META DMA"; break; + case RGX_JONES_BIF_RPM_TAG: pszTagID = "RPM"; break; + } + } + else + { + PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified Jones BIF Tag ID: %d", __func__, ui32TagID)); + } + } + else if (bWriteBack) + { + pszTagID = ""; + pszTagSB = "Writeback of dirty cacheline"; + } + else + { + PVR_DPF((PVR_DBG_WARNING, "%s: Unidentified BIF Module: %d", __func__, ui32BIFModule)); + } + + *ppszTagID = pszTagID; + *ppszTagSB = pszTagSB; +} + + +static void ConvertOSTimestampToSAndNS(IMG_UINT64 ui64OSTimer, + IMG_UINT64 *pui64Seconds, + IMG_UINT64 *pui64Nanoseconds) +{ + IMG_UINT32 ui32Remainder; + + *pui64Seconds = OSDivide64r64(ui64OSTimer, 1000000000, &ui32Remainder); + *pui64Nanoseconds = ui64OSTimer - (*pui64Seconds * 1000000000ULL); +} + + +typedef enum _DEVICEMEM_HISTORY_QUERY_INDEX_ +{ + DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING, + DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED, + DEVICEMEM_HISTORY_QUERY_INDEX_NEXT, + DEVICEMEM_HISTORY_QUERY_INDEX_COUNT, +} DEVICEMEM_HISTORY_QUERY_INDEX; + + +/*! +******************************************************************************* + + @Function _PrintDevicememHistoryQueryResult + + @Description + + Print details of a single result from a DevicememHistory query + + @Input pfnDumpDebugPrintf - Debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psFaultProcessInfo - The process info derived from the page fault + @Input psResult - The DevicememHistory result to be printed + @Input ui32Index - The index of the result + + @Return void + +******************************************************************************/ +static void _PrintDevicememHistoryQueryResult(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXMEM_PROCESS_INFO *psFaultProcessInfo, + DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult, + IMG_UINT32 ui32Index, + const IMG_CHAR* pszIndent) +{ + IMG_UINT32 ui32Remainder; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + + ConvertOSTimestampToSAndNS(psResult->ui64When, + &ui64Seconds, + &ui64Nanoseconds); + + if (psFaultProcessInfo->uiPID != RGXMEM_SERVER_PID_FIRMWARE) + { + PVR_DUMPDEBUG_LOG("%s [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC + " Size: " IMG_DEVMEM_SIZE_FMTSPEC + " Operation: %s Modified: %" IMG_UINT64_FMTSPEC + " us ago (OS time %" IMG_UINT64_FMTSPEC + ".%09" IMG_UINT64_FMTSPEC " s)", + pszIndent, + ui32Index, + psResult->szString, + psResult->sBaseDevVAddr.uiAddr, + psResult->uiSize, + psResult->bMap ? "Map": "Unmap", + OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder), + ui64Seconds, + ui64Nanoseconds); + } + else + { + PVR_DUMPDEBUG_LOG("%s [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC + " Size: " IMG_DEVMEM_SIZE_FMTSPEC + " Operation: %s Modified: %" IMG_UINT64_FMTSPEC + " us ago (OS time %" IMG_UINT64_FMTSPEC + ".%09" IMG_UINT64_FMTSPEC + ") PID: %u (%s)", + pszIndent, + ui32Index, + psResult->szString, + psResult->sBaseDevVAddr.uiAddr, + psResult->uiSize, + psResult->bMap ? "Map": "Unmap", + OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder), + ui64Seconds, + ui64Nanoseconds, + psResult->sProcessInfo.uiPID, + psResult->sProcessInfo.szProcessName); + } + + if (!psResult->bRange) + { + PVR_DUMPDEBUG_LOG("%s Whole allocation was %s", pszIndent, psResult->bMap ? "mapped": "unmapped"); + } + else + { + PVR_DUMPDEBUG_LOG("%s Pages %u to %u (" IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC ") %s%s", + pszIndent, + psResult->ui32StartPage, + psResult->ui32StartPage + psResult->ui32PageCount - 1, + psResult->sMapStartAddr.uiAddr, + psResult->sMapEndAddr.uiAddr, + psResult->bAll ? "(whole allocation) " : "", + psResult->bMap ? "mapped": "unmapped"); + } +} + +/*! +******************************************************************************* + + @Function _PrintDevicememHistoryQueryOut + + @Description + + Print details of all the results from a DevicememHistory query + + @Input pfnDumpDebugPrintf - Debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psFaultProcessInfo - The process info derived from the page fault + @Input psQueryOut - Storage for the query results + + @Return void + +******************************************************************************/ +static void _PrintDevicememHistoryQueryOut(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXMEM_PROCESS_INFO *psFaultProcessInfo, + DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut, + const IMG_CHAR* pszIndent) +{ + IMG_UINT32 i; + + if (psQueryOut->ui32NumResults == 0) + { + PVR_DUMPDEBUG_LOG("%s No results", pszIndent); + } + else + { + for (i = 0; i < psQueryOut->ui32NumResults; i++) + { + _PrintDevicememHistoryQueryResult(pfnDumpDebugPrintf, pvDumpDebugFile, + psFaultProcessInfo, + &psQueryOut->sResults[i], + i, + pszIndent); + } + } +} + +/* table of HW page size values and the equivalent */ +static const unsigned int aui32HWPageSizeTable[][2] = +{ + { 0, PVRSRV_4K_PAGE_SIZE }, + { 1, PVRSRV_16K_PAGE_SIZE }, + { 2, PVRSRV_64K_PAGE_SIZE }, + { 3, PVRSRV_256K_PAGE_SIZE }, + { 4, PVRSRV_1M_PAGE_SIZE }, + { 5, PVRSRV_2M_PAGE_SIZE } +}; + +/*! +******************************************************************************* + + @Function _PageSizeHWToBytes + + @Description + + Convert a HW page size value to its size in bytes + + @Input ui32PageSizeHW - The HW page size value + + @Return IMG_UINT32 The page size in bytes + +******************************************************************************/ +static IMG_UINT32 _PageSizeHWToBytes(IMG_UINT32 ui32PageSizeHW) +{ + if (ui32PageSizeHW > 5) + { + /* This is invalid, so return a default value as we cannot ASSERT in this code! */ + return PVRSRV_4K_PAGE_SIZE; + } + + return aui32HWPageSizeTable[ui32PageSizeHW][1]; +} + +/*! +******************************************************************************* + + @Function _GetDevicememHistoryData + + @Description + + Get the DevicememHistory results for the given PID and faulting device virtual address. + The function will query DevicememHistory for information about the faulting page, as well + as the page before and after. + + @Input uiPID - The process ID to search for allocations belonging to + @Input sFaultDevVAddr - The device address to search for allocations at/before/after + @Input asQueryOut - Storage for the query results + @Input ui32PageSizeBytes - Faulted page size in bytes + + @Return IMG_BOOL - IMG_TRUE if any results were found for this page fault + +******************************************************************************/ +static IMG_BOOL _GetDevicememHistoryData(IMG_PID uiPID, IMG_DEV_VIRTADDR sFaultDevVAddr, + DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT], + IMG_UINT32 ui32PageSizeBytes) +{ + IMG_UINT32 i; + DEVICEMEM_HISTORY_QUERY_IN sQueryIn; + IMG_BOOL bAnyHits = IMG_FALSE; + + /* if the page fault originated in the firmware then the allocation may + * appear to belong to any PID, because FW allocations are attributed + * to the client process creating the allocation, so instruct the + * devicemem_history query to search all available PIDs + */ + if (uiPID == RGXMEM_SERVER_PID_FIRMWARE) + { + sQueryIn.uiPID = DEVICEMEM_HISTORY_PID_ANY; + } + else + { + sQueryIn.uiPID = uiPID; + } + + /* query the DevicememHistory about the preceding / faulting / next page */ + + for (i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) + { + IMG_BOOL bHits; + + switch (i) + { + case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING: + sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) - 1; + break; + case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED: + sQueryIn.sDevVAddr = sFaultDevVAddr; + break; + case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT: + sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) + ui32PageSizeBytes; + break; + } + + /* First try matching any record at the exact address... */ + bHits = DevicememHistoryQuery(&sQueryIn, &asQueryOut[i], ui32PageSizeBytes, IMG_FALSE); + if (!bHits) + { + /* If not matched then try matching any record in the same page... */ + bHits = DevicememHistoryQuery(&sQueryIn, &asQueryOut[i], ui32PageSizeBytes, IMG_TRUE); + } + + if (bHits) + { + bAnyHits = IMG_TRUE; + } + } + + + return bAnyHits; +} + +/* stored data about one page fault */ +typedef struct _FAULT_INFO_ +{ + /* the process info of the memory context that page faulted */ + RGXMEM_PROCESS_INFO sProcessInfo; + IMG_DEV_VIRTADDR sFaultDevVAddr; + MMU_FAULT_DATA sMMUFaultData; + DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT]; + /* the CR timer value at the time of the fault, recorded by the FW. + * used to differentiate different page faults + */ + IMG_UINT64 ui64CRTimer; + /* time when this FAULT_INFO entry was added. used for timing + * reference against the map/unmap information + */ + IMG_UINT64 ui64When; + IMG_UINT32 ui32FaultInfoFlags; +} FAULT_INFO; + +/* history list of page faults. + * Keeps the first `n` page faults and the last `n` page faults, like the FW + * HWR log + */ +typedef struct _FAULT_INFO_LOG_ +{ + IMG_UINT32 ui32Head; + /* the number of faults in this log need not correspond exactly to + * the HWINFO number of the FW, as the FW HWINFO log may contain + * non-page fault HWRs + */ + FAULT_INFO asFaults[RGXFWIF_HWINFO_MAX]; +} FAULT_INFO_LOG; + +#define FAULT_INFO_PROC_INFO (0x1U) +#define FAULT_INFO_DEVMEM_HIST (0x2U) + +static FAULT_INFO_LOG gsFaultInfoLog = { 0 }; + +static void _FillAppForFWFaults(PVRSRV_RGXDEV_INFO *psDevInfo, + FAULT_INFO *psInfo, + RGXMEM_PROCESS_INFO *psProcInfo) +{ + IMG_UINT32 i, j; + + for (i = 0; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) + { + for (j = 0; j < DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS; j++) + { + IMG_BOOL bFound; + + RGXMEM_PROCESS_INFO *psProcInfo = &psInfo->asQueryOut[i].sResults[j].sProcessInfo; + bFound = RGXPCPIDToProcessInfo(psDevInfo, + psProcInfo->uiPID, + psProcInfo); + if (!bFound) + { + OSStringLCopy(psProcInfo->szProcessName, + "(unknown)", + sizeof(psProcInfo->szProcessName)); + } + } + } +} + +/*! +******************************************************************************* + + @Function _PrintFaultInfo + + @Description + + Print all the details of a page fault from a FAULT_INFO structure + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psInfo - The page fault occurrence to print + + @Return void + +******************************************************************************/ +static void _PrintFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + FAULT_INFO *psInfo, + const IMG_CHAR* pszIndent) +{ + IMG_UINT32 i; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + + ConvertOSTimestampToSAndNS(psInfo->ui64When, &ui64Seconds, &ui64Nanoseconds); + + if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_PROC_INFO)) + { + IMG_PID uiPID = (psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE || psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_PM) ? + 0 : psInfo->sProcessInfo.uiPID; + + PVR_DUMPDEBUG_LOG("%sDevice memory history for page fault address " IMG_DEV_VIRTADDR_FMTSPEC + ", CRTimer: 0x%016" IMG_UINT64_FMTSPECX + ", PID: %u (%s, unregistered: %u) OS time: " + "%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC, + pszIndent, + psInfo->sFaultDevVAddr.uiAddr, + psInfo->ui64CRTimer, + uiPID, + psInfo->sProcessInfo.szProcessName, + psInfo->sProcessInfo.bUnregistered, + ui64Seconds, + ui64Nanoseconds); + } + else + { + PVR_DUMPDEBUG_LOG("%sCould not find PID for device memory history on PC of the fault", pszIndent); + } + + if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_DEVMEM_HIST)) + { + for (i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) + { + const IMG_CHAR *pszWhich = NULL; + + switch (i) + { + case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING: + pszWhich = "Preceding page"; + break; + case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED: + pszWhich = "Faulted page"; + break; + case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT: + pszWhich = "Next page"; + break; + } + + PVR_DUMPDEBUG_LOG("%s %s:", pszIndent, pszWhich); + _PrintDevicememHistoryQueryOut(pfnDumpDebugPrintf, pvDumpDebugFile, + &psInfo->sProcessInfo, + &psInfo->asQueryOut[i], + pszIndent); + } + } + else + { + PVR_DUMPDEBUG_LOG("%s No matching Devmem History for fault address", pszIndent); + } +} + +static void _RecordFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + FAULT_INFO *psInfo, + IMG_DEV_VIRTADDR sFaultDevVAddr, + IMG_DEV_PHYADDR sPCDevPAddr, + IMG_UINT64 ui64CRTimer, + IMG_UINT32 ui32PageSizeBytes) +{ + IMG_BOOL bFound = IMG_FALSE, bIsPMFault = IMG_FALSE; + RGXMEM_PROCESS_INFO sProcessInfo; + + psInfo->ui32FaultInfoFlags = 0; + psInfo->sFaultDevVAddr = sFaultDevVAddr; + psInfo->ui64CRTimer = ui64CRTimer; + psInfo->ui64When = OSClockns64(); + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + /* Check if this is PM fault */ + if (psInfo->sMMUFaultData.eType == MMU_FAULT_TYPE_PM) + { + bIsPMFault = IMG_TRUE; + bFound = IMG_TRUE; + sProcessInfo.uiPID = RGXMEM_SERVER_PID_PM; + OSStringLCopy(sProcessInfo.szProcessName, "PM", sizeof(sProcessInfo.szProcessName)); + sProcessInfo.szProcessName[sizeof(sProcessInfo.szProcessName) - 1] = '\0'; + sProcessInfo.bUnregistered = IMG_FALSE; + } + else + { + /* look up the process details for the faulting page catalogue */ + bFound = RGXPCAddrToProcessInfo(psDevInfo, sPCDevPAddr, &sProcessInfo); + } + + if (bFound) + { + IMG_BOOL bHits; + + psInfo->ui32FaultInfoFlags = FAULT_INFO_PROC_INFO; + psInfo->sProcessInfo = sProcessInfo; + + if (bIsPMFault) + { + bHits = IMG_TRUE; + } + else + { + /* get any DevicememHistory data for the faulting address */ + bHits = _GetDevicememHistoryData(sProcessInfo.uiPID, + sFaultDevVAddr, + psInfo->asQueryOut, + ui32PageSizeBytes); + + if (bHits) + { + psInfo->ui32FaultInfoFlags |= FAULT_INFO_DEVMEM_HIST; + + /* if the page fault was caused by the firmware then get information about + * which client application created the related allocations. + * + * Fill in the process info data for each query result. + */ + + if (sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE) + { + _FillAppForFWFaults(psDevInfo, psInfo, &sProcessInfo); + } + } + } + } + } +} + +/*! +******************************************************************************* + + @Function _DumpFaultAddressHostView + + @Description + + Dump FW HWR fault status in human readable form. + + @Input ui32Index - Index of global Fault info + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Return void + +******************************************************************************/ +static void _DumpFaultAddressHostView(MMU_FAULT_DATA *psFaultData, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + const IMG_CHAR* pszIndent) +{ + MMU_LEVEL eTopLevel; + const IMG_CHAR szPageLevel[][4] = {"", "PTE", "PDE", "PCE" }; + const IMG_CHAR szPageError[][3] = {"", "PT", "PD", "PC" }; + + eTopLevel = psFaultData->eTopLevel; + + if (psFaultData->eType == MMU_FAULT_TYPE_UNKNOWN) + { + PVR_DUMPDEBUG_LOG("%sNo live host MMU data available", pszIndent); + return; + } + else if (psFaultData->eType == MMU_FAULT_TYPE_PM) + { + PVR_DUMPDEBUG_LOG("%sPM faulted at PC address = 0x%016" IMG_UINT64_FMTSPECx, pszIndent, psFaultData->sLevelData[MMU_LEVEL_0].ui64Address); + } + else + { + MMU_LEVEL eCurrLevel; + PVR_ASSERT(eTopLevel < MMU_LEVEL_LAST); + + for (eCurrLevel = eTopLevel; eCurrLevel > MMU_LEVEL_0; eCurrLevel--) + { + MMU_LEVEL_DATA *psMMULevelData = &psFaultData->sLevelData[eCurrLevel]; + if (psMMULevelData->ui64Address) + { + if (psMMULevelData->uiBytesPerEntry == 4) + { + PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%08x and is %s", + pszIndent, + szPageLevel[eCurrLevel], + psMMULevelData->ui32Index, + (IMG_UINT) psMMULevelData->ui64Address, + psMMULevelData->psDebugStr); + } + else + { + PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s", + pszIndent, + szPageLevel[eCurrLevel], + psMMULevelData->ui32Index, + psMMULevelData->ui64Address, + psMMULevelData->psDebugStr); + } + } + else + { + PVR_DUMPDEBUG_LOG("%s%s index (%d) out of bounds (%d)", + pszIndent, + szPageError[eCurrLevel], + psMMULevelData->ui32Index, + psMMULevelData->ui32NumOfEntries); + break; + } + } + } + +} + +/*! +******************************************************************************* + + @Function _RGXDumpRGXMMUFaultStatus + + @Description + + Dump MMU Fault status in human readable form. + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + @Input ui64MMUStatus - MMU Status register value + @Input pszMetaOrCore - string representing call is for META or MMU core + @Return void + +******************************************************************************/ +static void _RGXDumpRGXMMUFaultStatus(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT64 aui64MMUStatus[], + const IMG_PCHAR pszMetaOrCore, + const IMG_CHAR *pszIndent) +{ + if (aui64MMUStatus[0] == 0x0) + { + PVR_DUMPDEBUG_LOG("%sMMU (%s) - OK", pszIndent, pszMetaOrCore); + } + else + { + IMG_UINT32 ui32PC = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_CONTEXT_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS1_CONTEXT_SHIFT; + IMG_UINT64 ui64Addr = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT) << 4; /* align shift */ + IMG_UINT32 ui32Requester = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_REQ_ID_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS1_REQ_ID_SHIFT; + IMG_UINT32 ui32MMULevel = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_LEVEL_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS1_LEVEL_SHIFT; + IMG_BOOL bRead = (aui64MMUStatus[0] & RGX_CR_MMU_FAULT_STATUS1_RNW_EN) != 0; + IMG_BOOL bFault = (aui64MMUStatus[0] & RGX_CR_MMU_FAULT_STATUS1_FAULT_EN) != 0; + IMG_BOOL bROFault = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_TYPE_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS1_TYPE_SHIFT) == 0x2; + IMG_BOOL bProtFault = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_TYPE_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS1_TYPE_SHIFT) == 0x3; + IMG_UINT32 ui32BIFModule; + IMG_BOOL bWriteBack, bFBMFault; + IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE]; + IMG_CHAR *pszTagID = NULL; + IMG_CHAR *pszTagSB = NULL; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ALBIORIX_TOP_INFRASTRUCTURE)) + { + ui32BIFModule = (aui64MMUStatus[1] & ~RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BIF_ID_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BIF_ID_SHIFT; + bWriteBack = (aui64MMUStatus[1] & RGX_CR_MMU_FAULT_STATUS2__ALBTOP__WRITEBACK_EN) != 0; + bFBMFault = (aui64MMUStatus[1] & RGX_CR_MMU_FAULT_STATUS2__ALBTOP__FBM_FAULT_EN) != 0; + } + else + { + ui32BIFModule = (aui64MMUStatus[1] & ~RGX_CR_MMU_FAULT_STATUS2_BIF_ID_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS2_BIF_ID_SHIFT; + bWriteBack = (aui64MMUStatus[1] & RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_EN) != 0; + bFBMFault = (aui64MMUStatus[1] & RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_EN) != 0; + } + + if (strcmp(pszMetaOrCore, "Meta") == 0) + { + ui32PC = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT; + ui64Addr = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT) << 4; /* align shift */ + ui32Requester = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT; + ui32MMULevel = (aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT; + bRead = (aui64MMUStatus[0] & RGX_CR_MMU_FAULT_STATUS_META_RNW_EN) != 0; + bFault = (aui64MMUStatus[0] & RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN) != 0; + bROFault = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT) == 0x2; + bProtFault = ((aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT) == 0x3; + } + else + { + _RGXDecodeMMUReqTags(psDevInfo, ui32Requester, ui32BIFModule, bRead, bWriteBack, bFBMFault, &pszTagID, &pszTagSB, aszScratch, RGX_DEBUG_STR_SIZE); + } + + PVR_DUMPDEBUG_LOG("%sMMU (%s) - FAULT:", pszIndent, pszMetaOrCore); + PVR_DUMPDEBUG_LOG("%s * MMU status (0x%016" IMG_UINT64_FMTSPECX " | 0x%08" IMG_UINT64_FMTSPECX "): PC = %d, %s 0x%010" IMG_UINT64_FMTSPECX ", %s(%s)%s%s%s%s.", + pszIndent, + aui64MMUStatus[0], + aui64MMUStatus[1], + ui32PC, + (bRead)?"Reading from":"Writing to", + ui64Addr, + (pszTagID)? pszTagID : "META", + (pszTagSB)? pszTagSB : "-", + (bFault)?", Fault":"", + (bROFault)?", Read Only fault":"", + (bProtFault)?", PM/META protection fault":"", + _RGXDecodeMMULevel(ui32MMULevel)); + + } +} + +static_assert((RGX_CR_MMU_FAULT_STATUS1_CONTEXT_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_CONTEXT_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_REQ_ID_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_REQ_ID_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_LEVEL_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_LEVEL_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_RNW_EN == RGX_CR_MMU_FAULT_STATUS_META_RNW_EN), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_FAULT_EN == RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); +static_assert((RGX_CR_MMU_FAULT_STATUS1_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT), + "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); + + +static const IMG_FLAGS2DESC asCswOpts2Description[] = +{ + {RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST, " Fast CSW profile;"}, + {RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM, " Medium CSW profile;"}, + {RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW, " Slow CSW profile;"}, + {RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY, " No Delay CSW profile;"}, + {RGXFWIF_INICFG_CTXSWITCH_MODE_RAND, " Random Csw enabled;"}, + {RGXFWIF_INICFG_CTXSWITCH_SRESET_EN, " SoftReset;"}, +}; + +static const IMG_FLAGS2DESC asMisc2Description[] = +{ + {RGXFWIF_INICFG_POW_RASCALDUST, " Power Rascal/Dust;"}, + {RGXFWIF_INICFG_HWPERF_EN, " HwPerf EN;"}, + {RGXFWIF_INICFG_FBCDC_V3_1_EN, " FBCDCv3.1;"}, + {RGXFWIF_INICFG_CHECK_MLIST_EN, " Check MList;"}, + {RGXFWIF_INICFG_DISABLE_CLKGATING_EN, " ClockGating Off;"}, + {RGXFWIF_INICFG_POLL_COUNTERS_EN, " Poll Counters;"}, + {RGXFWIF_INICFG_REGCONFIG_EN, " Register Config;"}, + {RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY, " Assert on OOM;"}, + {RGXFWIF_INICFG_HWP_DISABLE_FILTER, " HWP Filter Off;"}, + {RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN, " Custom PerfTimer;"}, + {RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN, " CDM Random kill;"}, + {RGXFWIF_INICFG_DISABLE_DM_OVERLAP, " DM Overlap Off;"}, + {RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER, " Assert on HWR;"}, + {RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED, " Coherent fabric on;"}, + {RGXFWIF_INICFG_VALIDATE_IRQ, " Validate IRQ;"}, + {RGXFWIF_INICFG_DISABLE_PDP_EN, " PDUMP Panic off;"}, + {RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN, " SPU Pow mask change on;"}, + {RGXFWIF_INICFG_WORKEST, " Workload Estim;"}, + {RGXFWIF_INICFG_PDVFS, " PDVFS;"}, + {RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND, " CDM task demand arbitration;"}, + {RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN, " CDM round-robin arbitration;"}, + {RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP, " ISP v1 scheduling;"}, + {RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP, " ISP v2 scheduling;"}, + {RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER, " Validate SOC&USC timers;"} +}; + +static const IMG_FLAGS2DESC asFwOsCfg2Description[] = +{ + {RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN, " TDM;"}, + {RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN, " GEOM;"}, + {RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN, " 3D;"}, + {RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN, " CDM;"}, + {RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM, " LowPrio TDM;"}, + {RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM, " LowPrio GEOM;"}, + {RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D, " LowPrio 3D;"}, + {RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM, " LowPrio CDM;"}, +}; + +static const IMG_FLAGS2DESC asHwrState2Description[] = +{ + {RGXFWIF_HWR_HARDWARE_OK, " HWR OK;"}, + {RGXFWIF_HWR_RESET_IN_PROGRESS, " Reset ongoing;"}, + {RGXFWIF_HWR_GENERAL_LOCKUP, " General lockup;"}, + {RGXFWIF_HWR_DM_RUNNING_OK, " DM running ok;"}, + {RGXFWIF_HWR_DM_STALLING, " DM stalling;"}, + {RGXFWIF_HWR_FW_FAULT, " FW Fault;"}, + {RGXFWIF_HWR_RESTART_REQUESTED, " Restart requested;"}, +}; + +static const IMG_FLAGS2DESC asDmState2Description[] = +{ + {RGXFWIF_DM_STATE_READY_FOR_HWR, " ready for hwr;"}, + {RGXFWIF_DM_STATE_NEEDS_SKIP, " needs skip;"}, + {RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP, " needs PR cleanup;"}, + {RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR, " needs trace clear;"}, + {RGXFWIF_DM_STATE_GUILTY_LOCKUP, " guilty lockup;"}, + {RGXFWIF_DM_STATE_INNOCENT_LOCKUP, " innocent lockup;"}, + {RGXFWIF_DM_STATE_GUILTY_OVERRUNING, " guilty overrunning;"}, + {RGXFWIF_DM_STATE_INNOCENT_OVERRUNING, " innocent overrunning;"}, + {RGXFWIF_DM_STATE_GPU_ECC_HWR, " GPU ECC hwr;"}, +}; + +static const IMG_FLAGS2DESC asHWErrorState[] = +{ + {RGX_HW_ERR_NA, "N/A"}, + {RGX_HW_ERR_PRIMID_FAILURE_DURING_DMKILL, "Primitive ID failure during DM kill."}, +}; + +static inline IMG_CHAR const *_GetRISCVException(IMG_UINT32 ui32Mcause) +{ + switch (ui32Mcause) + { +#define X(value, fatal, description) \ + case value: \ + if (fatal) \ + return description; \ + return NULL; + + RGXRISCVFW_MCAUSE_TABLE +#undef X + + default: + PVR_DPF((PVR_DBG_WARNING, "Invalid RISC-V FW mcause value 0x%08x", ui32Mcause)); + return NULL; + } +} + +/* + Appends flags strings to a null-terminated string buffer - each flag + description string starts with a space. +*/ +static void _Flags2Description(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, const IMG_FLAGS2DESC *psConvTable, IMG_UINT32 ui32TableSize, IMG_UINT32 ui32Flags) +{ + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++) + { + if ((ui32Flags & psConvTable[ui32Idx].uiFlag) == psConvTable[ui32Idx].uiFlag) + { + OSStringLCat(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize); + } + } +} + +/* + * Translate ID code to descriptive string. + * Returns on the first match. + */ +static void _ID2Description(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, const IMG_FLAGS2DESC *psConvTable, IMG_UINT32 ui32TableSize, IMG_UINT32 ui32ID) +{ + IMG_UINT32 ui32Idx; + + for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++) + { + if (ui32ID == psConvTable[ui32Idx].uiFlag) + { + OSStringLCopy(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize); + return; + } + } +} + +/* + Writes flags strings to an uninitialised buffer. +*/ +static void _GetFwSysFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags) +{ + const IMG_CHAR *psCswLabel = "Ctx switch options:"; + size_t uLabelLen = OSStringLength(psCswLabel); + const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U; + + OSStringLCopy(psDesc, psCswLabel, ui32DescSize); + + _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asCswOpts2Description, ARRAY_SIZE(asCswOpts2Description), ui32RawFlags); + _Flags2Description(psDesc, ui32DescSize, asMisc2Description, ARRAY_SIZE(asMisc2Description), ui32RawFlags); +} + +static void _GetFwOsFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags) +{ + const IMG_CHAR *psCswLabel = "Ctx switch:"; + size_t uLabelLen = OSStringLength(psCswLabel); + const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U; + + OSStringLCopy(psDesc, psCswLabel, ui32DescSize); + + _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asFwOsCfg2Description, ARRAY_SIZE(asFwOsCfg2Description), ui32RawFlags); +} + +/*! +******************************************************************************* + + @Function _RGXDumpFWAssert + + @Description + + Dump FW assert strings when a thread asserts. + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psRGXFWIfTraceBufCtl - RGX FW trace buffer + + @Return void + +******************************************************************************/ +static void _RGXDumpFWAssert(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl) +{ + IMG_CHAR *pszTraceAssertPath; + IMG_CHAR *pszTraceAssertInfo; + IMG_INT32 ui32TraceAssertLine; + IMG_UINT32 i; + + for (i = 0; i < RGXFW_THREAD_NUM; i++) + { + pszTraceAssertPath = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szPath; + pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szInfo; + ui32TraceAssertLine = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.ui32LineNum; + + /* print non-null assert strings */ + if (*pszTraceAssertInfo) + { + PVR_DUMPDEBUG_LOG("FW-T%d Assert: %s (%s:%d)", + i, pszTraceAssertInfo, pszTraceAssertPath, ui32TraceAssertLine); + } + } +} + +/*! +******************************************************************************* + + @Function _RGXDumpFWFaults + + @Description + + Dump FW assert strings when a thread asserts. + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psFwSysData - RGX FW shared system data + + @Return void + +******************************************************************************/ +static void _RGXDumpFWFaults(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_SYSDATA *psFwSysData) +{ + if (psFwSysData->ui32FWFaults > 0) + { + IMG_UINT32 ui32StartFault = psFwSysData->ui32FWFaults - RGXFWIF_FWFAULTINFO_MAX; + IMG_UINT32 ui32EndFault = psFwSysData->ui32FWFaults - 1; + IMG_UINT32 ui32Index; + + if (psFwSysData->ui32FWFaults < RGXFWIF_FWFAULTINFO_MAX) + { + ui32StartFault = 0; + } + + for (ui32Index = ui32StartFault; ui32Index <= ui32EndFault; ui32Index++) + { + RGX_FWFAULTINFO *psFaultInfo = &psFwSysData->sFaultInfo[ui32Index % RGXFWIF_FWFAULTINFO_MAX]; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + + /* Split OS timestamp in seconds and nanoseconds */ + ConvertOSTimestampToSAndNS(psFaultInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); + + PVR_DUMPDEBUG_LOG("FW Fault %d: %s (%s:%d)", + ui32Index+1, psFaultInfo->sFaultBuf.szInfo, + psFaultInfo->sFaultBuf.szPath, + psFaultInfo->sFaultBuf.ui32LineNum); + PVR_DUMPDEBUG_LOG(" Data = 0x%08x, CRTimer = 0x%012"IMG_UINT64_FMTSPECX", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC, + psFaultInfo->ui32Data, + psFaultInfo->ui64CRTimer, + ui64Seconds, ui64Nanoseconds); + } + } +} + +static void _RGXDumpFWPoll(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_SYSDATA *psFwSysData) +{ + IMG_UINT32 i; + for (i = 0; i < RGXFW_THREAD_NUM; i++) + { + if (psFwSysData->aui32CrPollAddr[i]) + { + PVR_DUMPDEBUG_LOG("T%u polling %s (reg:0x%08X mask:0x%08X)", + i, + ((psFwSysData->aui32CrPollAddr[i] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), + psFwSysData->aui32CrPollAddr[i] & ~RGXFW_POLL_TYPE_SET, + psFwSysData->aui32CrPollMask[i]); + } + } + +} + +static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + RGXFWIF_SYSDATA *psFwSysData, + RGXFWIF_HWRINFOBUF *psHWRInfoBuf, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_BOOL bAnyLocked = IMG_FALSE; + IMG_UINT32 dm, i; + IMG_UINT32 ui32LineSize; + IMG_CHAR *pszLine, *pszTemp; + const IMG_CHAR *apszDmNames[RGXFWIF_DM_MAX] = {"GP", "2D", "GEOM", "3D", "CDM", "RAY"}; + + const IMG_CHAR szMsgHeader[] = "Number of HWR: "; + const IMG_CHAR szMsgFalse[] = "FALSE("; + IMG_CHAR *pszLockupType = ""; + RGX_HWRINFO *psHWRInfo; + const IMG_UINT32 ui32MsgHeaderCharCount = ARRAY_SIZE(szMsgHeader) - 1; /* size includes the null */ + const IMG_UINT32 ui32MsgFalseCharCount = ARRAY_SIZE(szMsgFalse) - 1; + IMG_UINT32 ui32HWRRecoveryFlags; + IMG_UINT32 ui32ReadIndex; + + for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) + { + if (psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] || + psHWRInfoBuf->aui32HwrDmOverranCount[dm]) + { + bAnyLocked = IMG_TRUE; + break; + } + } + + if (!PVRSRV_VZ_MODE_IS(GUEST) && !bAnyLocked && (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_HARDWARE_OK)) + { + /* No HWR situation, print nothing */ + return; + } + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + IMG_BOOL bAnyHWROccured = IMG_FALSE; + + for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) + { + if (psHWRInfoBuf->aui32HwrDmRecoveredCount[dm] != 0 || + psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] != 0 || + psHWRInfoBuf->aui32HwrDmOverranCount[dm] !=0) + { + bAnyHWROccured = IMG_TRUE; + break; + } + } + + if (!bAnyHWROccured) + { + return; + } + } + + ui32LineSize = sizeof(IMG_CHAR) * ( + ui32MsgHeaderCharCount + + (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*( 4/*DM name + left parenthesis*/ + + 10/*UINT32 max num of digits*/ + + 1/*slash*/ + + 10/*UINT32 max num of digits*/ + + 3/*right parenthesis + comma + space*/)) + + ui32MsgFalseCharCount + 1 + (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*6) + 1 + /* 'FALSE(' + ')' + (UINT16 max num + comma) per DM + \0 */ + ); + + pszLine = OSAllocMem(ui32LineSize); + if (pszLine == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Out of mem allocating line string (size: %d)", + __func__, + ui32LineSize)); + return; + } + + OSStringLCopy(pszLine, szMsgHeader, ui32LineSize); + pszTemp = pszLine + ui32MsgHeaderCharCount; + + for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) + { + pszTemp += OSSNPrintf(pszTemp, + 4 + 10 + 1 + 10 + 1 + 10 + 1 + 1 + 1 + 1 + /* (name + left parenthesis) + UINT32 + slash + UINT32 + plus + UINT32 + right parenthesis + comma + space + \0 */, + "%s(%u/%u+%u), ", + apszDmNames[dm], + psHWRInfoBuf->aui32HwrDmRecoveredCount[dm], + psHWRInfoBuf->aui32HwrDmLockedUpCount[dm], + psHWRInfoBuf->aui32HwrDmOverranCount[dm]); + } + + OSStringLCat(pszLine, szMsgFalse, ui32LineSize); + pszTemp += ui32MsgFalseCharCount; + + for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) + { + pszTemp += OSSNPrintf(pszTemp, + 10 + 1 + 1 /* UINT32 max num + comma + \0 */, + (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount-1 ? "%u," : "%u)"), + psHWRInfoBuf->aui32HwrDmFalseDetectCount[dm]); + } + + PVR_DUMPDEBUG_LOG("%s", pszLine); + + OSFreeMem(pszLine); + + /* Print out per HWR info */ + for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) + { + if (dm == RGXFWIF_DM_GP) + { + PVR_DUMPDEBUG_LOG("DM %d (GP)", dm); + } + else + { + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + IMG_UINT32 ui32HWRRecoveryFlags = psFwSysData->aui32HWRRecoveryFlags[dm]; + IMG_CHAR sPerDmHwrDescription[RGX_DEBUG_STR_SIZE]; + sPerDmHwrDescription[0] = '\0'; + + if (ui32HWRRecoveryFlags == RGXFWIF_DM_STATE_WORKING) + { + OSStringLCopy(sPerDmHwrDescription, " working;", RGX_DEBUG_STR_SIZE); + } + else + { + _Flags2Description(sPerDmHwrDescription, RGX_DEBUG_STR_SIZE, + asDmState2Description, ARRAY_SIZE(asDmState2Description), + ui32HWRRecoveryFlags); + } + PVR_DUMPDEBUG_LOG("DM %d (HWRflags 0x%08x:%s)", dm, ui32HWRRecoveryFlags, sPerDmHwrDescription); + } + else + { + PVR_DUMPDEBUG_LOG("DM %d", dm); + } + } + + ui32ReadIndex = 0; + for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++) + { + IMG_BOOL bPMFault = IMG_FALSE; + IMG_UINT32 ui32PC; + IMG_UINT32 ui32PageSize = 0; + IMG_DEV_PHYADDR sPCDevPAddr = { 0 }; + + psHWRInfo = &psHWRInfoBuf->sHWRInfo[ui32ReadIndex]; + + if ((psHWRInfo->eDM == dm) && (psHWRInfo->ui32HWRNumber != 0)) + { + IMG_CHAR aui8RecoveryNum[10+10+1]; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + IMG_BOOL bPageFault = IMG_FALSE; + IMG_DEV_VIRTADDR sFaultDevVAddr; + + /* Split OS timestamp in seconds and nanoseconds */ + ConvertOSTimestampToSAndNS(psHWRInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); + + ui32HWRRecoveryFlags = psHWRInfo->ui32HWRRecoveryFlags; + if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Guilty Lockup"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_LOCKUP) { pszLockupType = ", Innocent Lockup"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_OVERRUNING) { pszLockupType = ", Guilty Overrun"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_OVERRUNING) { pszLockupType = ", Innocent Overrun"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH) { pszLockupType = ", Hard Context Switch"; } + else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GPU_ECC_HWR) { pszLockupType = ", GPU ECC HWR"; } + + OSSNPrintf(aui8RecoveryNum, sizeof(aui8RecoveryNum), "Recovery %d:", psHWRInfo->ui32HWRNumber); + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) + { + PVR_DUMPDEBUG_LOG(" %s Core = %u, PID = %u, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s", + aui8RecoveryNum, + psHWRInfo->ui32CoreID, + psHWRInfo->ui32PID, + psHWRInfo->ui32FrameNum, + psHWRInfo->ui32ActiveHWRTData, + psHWRInfo->ui32EventStatus, + pszLockupType); + } + else + { + PVR_DUMPDEBUG_LOG(" %s PID = %u, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s", + aui8RecoveryNum, + psHWRInfo->ui32PID, + psHWRInfo->ui32FrameNum, + psHWRInfo->ui32ActiveHWRTData, + psHWRInfo->ui32EventStatus, + pszLockupType); + } + + if (psHWRInfo->eHWErrorCode != RGX_HW_ERR_NA) + { + IMG_CHAR sHWDebugInfo[RGX_DEBUG_STR_SIZE] = ""; + + _ID2Description(sHWDebugInfo, RGX_DEBUG_STR_SIZE, asHWErrorState, ARRAY_SIZE(asHWErrorState), + psHWRInfo->eHWErrorCode); + PVR_DUMPDEBUG_LOG(" HW error code = 0x%X: %s", + psHWRInfo->eHWErrorCode, sHWDebugInfo); + } + + pszTemp = &aui8RecoveryNum[0]; + while (*pszTemp != '\0') + { + *pszTemp++ = ' '; + } + + /* There's currently no time correlation for the Guest OSes on the Firmware so there's no point printing OS Timestamps on Guests */ + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ", CyclesElapsed = %" IMG_INT64_FMTSPECd, + aui8RecoveryNum, + psHWRInfo->ui64CRTimer, + ui64Seconds, + ui64Nanoseconds, + (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256); + } + else + { + PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", CyclesElapsed = %" IMG_INT64_FMTSPECd, + aui8RecoveryNum, + psHWRInfo->ui64CRTimer, + (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256); + } + + if (psHWRInfo->ui64CRTimeHWResetFinish != 0) + { + if (psHWRInfo->ui64CRTimeFreelistReady != 0) + { + /* If ui64CRTimeFreelistReady is less than ui64CRTimeHWResetFinish it means APM kicked in and the time is not valid. */ + if (psHWRInfo->ui64CRTimeHWResetFinish < psHWRInfo->ui64CRTimeFreelistReady) + { + PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalRecoveryTimeInCycles = %" IMG_INT64_FMTSPECd, + aui8RecoveryNum, + (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, + (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, + (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimeHWResetFinish)*256, + (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimer)*256); + } + else + { + PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = , TotalResetTimeInCycles = %" IMG_INT64_FMTSPECd, + aui8RecoveryNum, + (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, + (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, + (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256); + } + } + else + { + PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalResetTimeInCycles = %" IMG_INT64_FMTSPECd, + aui8RecoveryNum, + (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, + (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, + (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256); + } + } + + switch (psHWRInfo->eHWRType) + { + case RGX_HWRTYPE_ECCFAULT: + { + PVR_DUMPDEBUG_LOG(" ECC fault GPU=0x%08x", psHWRInfo->uHWRData.sECCInfo.ui32FaultGPU); + } + break; + + case RGX_HWRTYPE_MMUFAULT: + { + _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, + &psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0], + "Core", + DD_NORMAL_INDENT); + + bPageFault = IMG_TRUE; + sFaultDevVAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0]; + sFaultDevVAddr.uiAddr &= ~RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK; + sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT; + sFaultDevVAddr.uiAddr <<= 4; /* align shift */ + ui32PC = (psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_CONTEXT_CLRMSK) >> + RGX_CR_MMU_FAULT_STATUS1_CONTEXT_SHIFT; +#if defined(SUPPORT_TRUSTED_DEVICE) + ui32PC = ui32PC - 1; +#endif + bPMFault = (ui32PC <= 8); + sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress; + + } + break; + case RGX_HWRTYPE_MMUMETAFAULT: + { + _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, + &psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0], + "Meta", + DD_NORMAL_INDENT); + + bPageFault = IMG_TRUE; + sFaultDevVAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0]; + sFaultDevVAddr.uiAddr &= ~RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK; + sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT; + sFaultDevVAddr.uiAddr <<= 4; /* align shift */ + sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress; + } + break; + case RGX_HWRTYPE_POLLFAILURE: + { + PVR_DUMPDEBUG_LOG(" T%u polling %s (reg:0x%08X mask:0x%08X last:0x%08X)", + psHWRInfo->uHWRData.sPollInfo.ui32ThreadNum, + ((psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & RGXFW_POLL_TYPE_SET)?("set"):("unset")), + psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & ~RGXFW_POLL_TYPE_SET, + psHWRInfo->uHWRData.sPollInfo.ui32CrPollMask, + psHWRInfo->uHWRData.sPollInfo.ui32CrPollLastValue); + } + break; + + case RGX_HWRTYPE_OVERRUN: + case RGX_HWRTYPE_UNKNOWNFAILURE: + { + /* Nothing to dump */ + } + break; + + default: + { + PVR_DUMPDEBUG_LOG(" Unknown HWR Info type: 0x%x", psHWRInfo->eHWRType); + } + break; + } + + if (bPageFault) + { + + FAULT_INFO *psInfo; + + OSLockAcquire(psDevInfo->hDebugFaultInfoLock); + + /* Find the matching Fault Info for this HWRInfo */ + psInfo = &gsFaultInfoLog.asFaults[ui32ReadIndex]; + + /* if they do not match, we need to update the psInfo */ + if ((psInfo->ui64CRTimer != psHWRInfo->ui64CRTimer) || + (psInfo->sFaultDevVAddr.uiAddr != sFaultDevVAddr.uiAddr)) + { + MMU_FAULT_DATA *psFaultData = &psInfo->sMMUFaultData; + + psFaultData->eType = MMU_FAULT_TYPE_UNKNOWN; + + if (bPMFault) + { + /* PM fault and we dump PC details only */ + psFaultData->eTopLevel = MMU_LEVEL_0; + psFaultData->eType = MMU_FAULT_TYPE_PM; + psFaultData->sLevelData[MMU_LEVEL_0].ui64Address = sPCDevPAddr.uiAddr; + } + else + { + RGXCheckFaultAddress(psDevInfo, &sFaultDevVAddr, &sPCDevPAddr, psFaultData); + } + + _RecordFaultInfo(psDevInfo, psInfo, + sFaultDevVAddr, sPCDevPAddr, psHWRInfo->ui64CRTimer, + _PageSizeHWToBytes(ui32PageSize)); + + } + + _DumpFaultAddressHostView(&psInfo->sMMUFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_NORMAL_INDENT); + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + _PrintFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psInfo, DD_NORMAL_INDENT); + } + + OSLockRelease(psDevInfo->hDebugFaultInfoLock); + } + + } + + if (ui32ReadIndex == RGXFWIF_HWINFO_MAX_FIRST - 1) + ui32ReadIndex = psHWRInfoBuf->ui32WriteIndex; + else + ui32ReadIndex = (ui32ReadIndex + 1) - (ui32ReadIndex / RGXFWIF_HWINFO_LAST_INDEX) * RGXFWIF_HWINFO_MAX_LAST; + } + } +} + +#if !defined(NO_HARDWARE) + +/*! +******************************************************************************* + + @Function _CheckForPendingPage + + @Description + + Check if the MMU indicates it is blocked on a pending page + MMU4 does not support pending pages, so return false. + + @Input psDevInfo - RGX device info + + @Return IMG_BOOL - IMG_TRUE if there is a pending page + +******************************************************************************/ +static INLINE IMG_BOOL _CheckForPendingPage(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + /* MMU4 doesn't support pending pages */ + return (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) < 4) && + (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_ENTRY) & RGX_CR_MMU_ENTRY_PENDING_EN); +} + +/*! +******************************************************************************* + + @Function _GetPendingPageInfo + + @Description + + Get information about the pending page from the MMU status registers + + @Input psDevInfo - RGX device info + @Output psDevVAddr - The device virtual address of the pending MMU address translation + @Output pui32CatBase - The page catalog base + + @Return void + +******************************************************************************/ +static void _GetPendingPageInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR *psDevVAddr, + IMG_UINT32 *pui32CatBase) +{ + IMG_UINT64 ui64BIFMMUEntryStatus; + + ui64BIFMMUEntryStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_ENTRY_STATUS); + + psDevVAddr->uiAddr = (ui64BIFMMUEntryStatus & ~RGX_CR_MMU_ENTRY_STATUS_ADDRESS_CLRMSK); + + *pui32CatBase = (ui64BIFMMUEntryStatus & ~RGX_CR_MMU_ENTRY_STATUS_CONTEXT_ID_CLRMSK) >> + RGX_CR_MMU_ENTRY_STATUS_CONTEXT_ID_SHIFT; +} + +#endif + +void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_BOOL bRGXPoweredON) +{ + IMG_CHAR *pszState, *pszReason; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + IMG_UINT32 ui32OSid; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + /* space for the current clock speed and 3 previous */ + RGXFWIF_TIME_CORR asTimeCorrs[4]; + IMG_UINT32 ui32NumClockSpeedChanges; + +#if defined(NO_HARDWARE) + PVR_UNREFERENCED_PARAMETER(bRGXPoweredON); +#else + if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST)) + { + IMG_UINT64 aui64RegValMMUStatus[2]; + + aui64RegValMMUStatus[0] = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS1); + aui64RegValMMUStatus[1] = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS2); + _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, &aui64RegValMMUStatus[0], "Core", DD_SUMMARY_INDENT); + + aui64RegValMMUStatus[0] = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS_META); + _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, &aui64RegValMMUStatus[0], "Meta", DD_SUMMARY_INDENT); + + if (_CheckForPendingPage(psDevInfo)) + { + IMG_UINT32 ui32CatBase; + IMG_DEV_VIRTADDR sDevVAddr; + + PVR_DUMPDEBUG_LOG("MMU Pending page: Yes"); + + _GetPendingPageInfo(psDevInfo, &sDevVAddr, &ui32CatBase); + + if (ui32CatBase <= MAX_RESERVED_FW_MMU_CONTEXT) + { + PVR_DUMPDEBUG_LOG("Cannot check address on PM cat base %u", ui32CatBase); + } + else + { + IMG_UINT64 ui64CBaseMapping; + IMG_DEV_PHYADDR sPCDevPAddr; + MMU_FAULT_DATA sFaultData; + IMG_BOOL bIsValid; + + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, ui32CatBase); + + ui64CBaseMapping = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_CBASE_MAPPING); + sPCDevPAddr.uiAddr = (((ui64CBaseMapping & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK) + >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT) + << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT); + bIsValid = !(ui64CBaseMapping & RGX_CR_MMU_CBASE_MAPPING_INVALID_EN); + + PVR_DUMPDEBUG_LOG("Checking device virtual address " IMG_DEV_VIRTADDR_FMTSPEC + " on cat base %u. PC Addr = 0x%llX is %s", + (unsigned long long) sDevVAddr.uiAddr, + ui32CatBase, + (unsigned long long) sPCDevPAddr.uiAddr, + bIsValid ? "valid":"invalid"); + RGXCheckFaultAddress(psDevInfo, &sDevVAddr, &sPCDevPAddr, &sFaultData); + _DumpFaultAddressHostView(&sFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_SUMMARY_INDENT); + } + } + } +#endif /* NO_HARDWARE */ + + /* Firmware state */ + switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthStatus)) + { + case PVRSRV_DEVICE_HEALTH_STATUS_OK: pszState = "OK"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: pszState = "NOT RESPONDING"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: pszState = "DEAD"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: pszState = "FAULT"; break; + case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: pszState = "UNDEFINED"; break; + default: pszState = "UNKNOWN"; break; + } + + switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthReason)) + { + case PVRSRV_DEVICE_HEALTH_REASON_NONE: pszReason = ""; break; + case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: pszReason = " - Asserted"; break; + case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: pszReason = " - Poll failing"; break; + case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: pszReason = " - Global Event Object timeouts rising"; break; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: pszReason = " - KCCB offset invalid"; break; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: pszReason = " - KCCB stalled"; break; + case PVRSRV_DEVICE_HEALTH_REASON_IDLING: pszReason = " - Idling"; break; + case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: pszReason = " - Restarting"; break; + case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS: pszReason = " - Missing interrupts"; break; + default: pszReason = " - Unknown reason"; break; + } + +#if !defined(NO_HARDWARE) + /* Determine the type virtualisation support used */ +#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) + if (!PVRSRV_VZ_MODE_IS(NATIVE)) + { +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) +#if defined(SUPPORT_AUTOVZ) +#if defined(SUPPORT_AUTOVZ_HW_REGS) + PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with HW register support"); +#else + PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with shared memory"); +#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */ +#else + PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with static Fw heap allocation"); +#endif /* defined(SUPPORT_AUTOVZ) */ +#else + PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with dynamic Fw heap allocation"); +#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ + } +#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ + +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)) + { + RGXFWIF_CONNECTION_FW_STATE eFwState = KM_GET_FW_CONNECTION(psDevInfo); + RGXFWIF_CONNECTION_OS_STATE eOsState = KM_GET_OS_CONNECTION(psDevInfo); + + PVR_DUMPDEBUG_LOG("RGX Virtualisation firmware connection state: %s (Fw=%s; OS=%s)", + ((eFwState == RGXFW_CONNECTION_FW_ACTIVE) && (eOsState == RGXFW_CONNECTION_OS_ACTIVE)) ? ("UP") : ("DOWN"), + (eFwState < RGXFW_CONNECTION_FW_STATE_COUNT) ? (apszFwOsStateName[eFwState]) : ("invalid"), + (eOsState < RGXFW_CONNECTION_OS_STATE_COUNT) ? (apszFwOsStateName[eOsState]) : ("invalid")); + + } +#endif + +#if defined(SUPPORT_AUTOVZ) && defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) + if (!PVRSRV_VZ_MODE_IS(NATIVE)) + { + IMG_UINT32 ui32FwAliveTS = KM_GET_FW_ALIVE_TOKEN(psDevInfo); + IMG_UINT32 ui32OsAliveTS = KM_GET_OS_ALIVE_TOKEN(psDevInfo); + + PVR_DUMPDEBUG_LOG("RGX Virtualisation watchdog timestamps (in GPU timer ticks): Fw=%u; OS=%u; diff(FW, OS) = %u", + ui32FwAliveTS, ui32OsAliveTS, ui32FwAliveTS - ui32OsAliveTS); + } +#endif +#endif /* !defined(NO_HARDWARE) */ + + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + IMG_CHAR sHwrStateDescription[RGX_DEBUG_STR_SIZE]; + IMG_BOOL bOsIsolationEnabled = IMG_FALSE; + + if (psFwSysData == NULL) + { + /* can't dump any more information */ + PVR_DUMPDEBUG_LOG("RGX FW State: %s%s", pszState, pszReason); + return; + } + + sHwrStateDescription[0] = '\0'; + + _Flags2Description(sHwrStateDescription, RGX_DEBUG_STR_SIZE, + asHwrState2Description, ARRAY_SIZE(asHwrState2Description), + psFwSysData->ui32HWRStateFlags); + PVR_DUMPDEBUG_LOG("RGX FW State: %s%s (HWRState 0x%08x:%s)", pszState, pszReason, psFwSysData->ui32HWRStateFlags, sHwrStateDescription); + PVR_DUMPDEBUG_LOG("RGX FW Power State: %s (APM %s: %d ok, %d denied, %d non-idle, %d retry, %d other, %d total. Latency: %u ms)", + pszPowStateName[psFwSysData->ePowState], + (psDevInfo->pvAPMISRData)?"enabled":"disabled", + psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqNonIdle, + psDevInfo->ui32ActivePMReqDenied, + psDevInfo->ui32ActivePMReqNonIdle, + psDevInfo->ui32ActivePMReqRetry, + psDevInfo->ui32ActivePMReqTotal - + psDevInfo->ui32ActivePMReqOk - + psDevInfo->ui32ActivePMReqDenied - + psDevInfo->ui32ActivePMReqRetry - + psDevInfo->ui32ActivePMReqNonIdle, + psDevInfo->ui32ActivePMReqTotal, + psRuntimeCfg->ui32ActivePMLatencyms); + + ui32NumClockSpeedChanges = (IMG_UINT32) OSAtomicRead(&psDevInfo->psDeviceNode->iNumClockSpeedChanges); + RGXGetTimeCorrData(psDevInfo->psDeviceNode, asTimeCorrs, ARRAY_SIZE(asTimeCorrs)); + + PVR_DUMPDEBUG_LOG("RGX DVFS: %u frequency changes. " + "Current frequency: %u.%03u MHz (sampled at %" IMG_UINT64_FMTSPEC " ns). " + "FW frequency: %u.%03u MHz.", + ui32NumClockSpeedChanges, + asTimeCorrs[0].ui32CoreClockSpeed / 1000000, + (asTimeCorrs[0].ui32CoreClockSpeed / 1000) % 1000, + asTimeCorrs[0].ui64OSTimeStamp, + psRuntimeCfg->ui32CoreClockSpeed / 1000000, + (psRuntimeCfg->ui32CoreClockSpeed / 1000) % 1000); + if (ui32NumClockSpeedChanges > 0) + { + PVR_DUMPDEBUG_LOG(" Previous frequencies: %u.%03u, %u.%03u, %u.%03u MHz (Sampled at " + "%" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ")", + asTimeCorrs[1].ui32CoreClockSpeed / 1000000, + (asTimeCorrs[1].ui32CoreClockSpeed / 1000) % 1000, + asTimeCorrs[2].ui32CoreClockSpeed / 1000000, + (asTimeCorrs[2].ui32CoreClockSpeed / 1000) % 1000, + asTimeCorrs[3].ui32CoreClockSpeed / 1000000, + (asTimeCorrs[3].ui32CoreClockSpeed / 1000) % 1000, + asTimeCorrs[1].ui64OSTimeStamp, + asTimeCorrs[2].ui64OSTimeStamp, + asTimeCorrs[3].ui64OSTimeStamp); + } + + for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + { + RGXFWIF_OS_RUNTIME_FLAGS sFwRunFlags = psFwSysData->asOsRuntimeFlagsMirror[ui32OSid]; + + IMG_BOOL bMTSEnabled = (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_VIRTUALISATION)) ? + IMG_TRUE : ((OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE_ENABLE) & BIT(ui32OSid)) != 0); + + PVR_DUMPDEBUG_LOG("RGX FW OS %u - State: %s; Freelists: %s%s; Priority: %d;%s %s", ui32OSid, + apszFwOsStateName[sFwRunFlags.bfOsState], + (sFwRunFlags.bfFLOk) ? "Ok" : "Not Ok", + (sFwRunFlags.bfFLGrowPending) ? "; Grow Request Pending" : "", + psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid], + (sFwRunFlags.bfIsolatedOS) ? " Isolated;" : "", + (bMTSEnabled) ? "MTS on;" : "MTS off;" + ); + + bOsIsolationEnabled |= sFwRunFlags.bfIsolatedOS; + } + +#if defined(PVR_ENABLE_PHR) + { + IMG_CHAR sPHRConfigDescription[RGX_DEBUG_STR_SIZE]; + + sPHRConfigDescription[0] = '\0'; + _Flags2Description(sPHRConfigDescription, RGX_DEBUG_STR_SIZE, + asPHRConfig2Description, ARRAY_SIZE(asPHRConfig2Description), + BIT_ULL(psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode)); + + PVR_DUMPDEBUG_LOG("RGX PHR configuration: (%d) %s", psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode, sPHRConfigDescription); + } +#endif + + if (bOsIsolationEnabled) + { + PVR_DUMPDEBUG_LOG("RGX Hard Context Switch deadline: %u ms", psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS); + } + + _RGXDumpFWAssert(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBufCtl); + _RGXDumpFWFaults(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData); + _RGXDumpFWPoll(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData); + } + else + { + PVR_DUMPDEBUG_LOG("RGX FW State: Unavailable under Guest Mode of operation"); + PVR_DUMPDEBUG_LOG("RGX FW Power State: Unavailable under Guest Mode of operation"); + } + + _RGXDumpFWHWRInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData, psDevInfo->psRGXFWIfHWRInfoBufCtl, psDevInfo); + +#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) + /* Dump all non-zero values in lines of 8... */ + { + IMG_CHAR pszLine[(9*RGXFWIF_STATS_FRAMEWORK_LINESIZE)+1]; + IMG_UINT32 *pui32FWStatsBuf = psFwSysData->aui32FWStatsBuf; + IMG_UINT32 ui32Index1, ui32Index2; + + PVR_DUMPDEBUG_LOG("STATS[START]: RGXFWIF_STATS_FRAMEWORK_MAX=%d", RGXFWIF_STATS_FRAMEWORK_MAX); + for (ui32Index1 = 0; ui32Index1 < RGXFWIF_STATS_FRAMEWORK_MAX; ui32Index1 += RGXFWIF_STATS_FRAMEWORK_LINESIZE) + { + IMG_UINT32 ui32OrOfValues = 0; + IMG_CHAR *pszBuf = pszLine; + + /* Print all values in this line and skip if all zero... */ + for (ui32Index2 = 0; ui32Index2 < RGXFWIF_STATS_FRAMEWORK_LINESIZE; ui32Index2++) + { + ui32OrOfValues |= pui32FWStatsBuf[ui32Index1+ui32Index2]; + OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32FWStatsBuf[ui32Index1+ui32Index2]); + pszBuf += 9; /* write over the '\0' */ + } + + if (ui32OrOfValues != 0) + { + PVR_DUMPDEBUG_LOG("STATS[%08x]:%s", ui32Index1, pszLine); + } + } + PVR_DUMPDEBUG_LOG("STATS[END]"); + } +#endif +} + +#if !defined(NO_HARDWARE) +static void _RGXDumpMetaSPExtraDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ +/* List of extra META Slave Port debug registers */ +/* Order in these two initialisers must match */ +#define RGX_META_SP_EXTRA_DEBUG \ + X(RGX_CR_META_SP_MSLVCTRL0) \ + X(RGX_CR_META_SP_MSLVCTRL1) \ + X(RGX_CR_META_SP_MSLVDATAX) \ + X(RGX_CR_META_SP_MSLVIRQSTATUS) \ + X(RGX_CR_META_SP_MSLVIRQENABLE) \ + X(RGX_CR_META_SP_MSLVIRQLEVEL) + +#define RGX_META_SP_EXTRA_DEBUG__UNPACKED_ACCESSES \ + X(RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES) \ + X(RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES) \ + X(RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES) \ + X(RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES) \ + X(RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES) \ + X(RGX_CR_META_SP_MSLVIRQLEVEL__META_REGISTER_UNPACKED_ACCESSES) + + IMG_UINT32 ui32Idx, ui32RegIdx; + IMG_UINT32 ui32RegVal; + IMG_UINT32 ui32RegAddr; + + const IMG_UINT32* pui32DebugRegAddr; + const IMG_UINT32 aui32DebugRegAddr[] = { +#define X(A) A, + RGX_META_SP_EXTRA_DEBUG +#undef X + }; + const IMG_UINT32 aui32DebugRegAddrUA[] = { +#define X(A) A, + RGX_META_SP_EXTRA_DEBUG__UNPACKED_ACCESSES +#undef X + }; + + const IMG_CHAR* apszDebugRegName[] = { +#define X(A) #A, + RGX_META_SP_EXTRA_DEBUG +#undef X + }; + + const IMG_UINT32 aui32Debug2RegAddr[] = {0xA28, 0x0A30, 0x0A38}; + + PVR_DUMPDEBUG_LOG("META Slave Port extra debug:"); + + /* array of register offset values depends on feature. But don't augment names in apszDebugRegName */ + PVR_ASSERT(sizeof(aui32DebugRegAddrUA) == sizeof(aui32DebugRegAddr)); + pui32DebugRegAddr = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES) ? + aui32DebugRegAddrUA : aui32DebugRegAddr; + + /* dump first set of Slave Port debug registers */ + for (ui32Idx = 0; ui32Idx < sizeof(aui32DebugRegAddr)/sizeof(IMG_UINT32); ui32Idx++) + { + const IMG_CHAR* pszRegName = apszDebugRegName[ui32Idx]; + + ui32RegAddr = pui32DebugRegAddr[ui32Idx]; + ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr); + PVR_DUMPDEBUG_LOG(" * %s: 0x%8.8X", pszRegName, ui32RegVal); + } + + /* dump second set of Slave Port debug registers */ + for (ui32Idx = 0; ui32Idx < 4; ui32Idx++) + { + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, 0xA20, ui32Idx); + ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, 0xA20); + PVR_DUMPDEBUG_LOG(" * 0xA20[%d]: 0x%8.8X", ui32Idx, ui32RegVal); + + } + + for (ui32RegIdx = 0; ui32RegIdx < sizeof(aui32Debug2RegAddr)/sizeof(IMG_UINT32); ui32RegIdx++) + { + ui32RegAddr = aui32Debug2RegAddr[ui32RegIdx]; + for (ui32Idx = 0; ui32Idx < 2; ui32Idx++) + { + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr, ui32Idx); + ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr); + PVR_DUMPDEBUG_LOG(" * 0x%X[%d]: 0x%8.8X", ui32RegAddr, ui32Idx, ui32RegVal); + } + } + +} +#endif /* !defined(NO_HARDWARE) */ + +/* + * Array of all the Firmware Trace log IDs used to convert the trace data. + */ +typedef struct _TRACEBUF_LOG_ { + RGXFW_LOG_SFids eSFId; + const IMG_CHAR *pszName; + const IMG_CHAR *pszFmt; + IMG_UINT32 ui32ArgNum; +} TRACEBUF_LOG; + +static const TRACEBUF_LOG aLogDefinitions[] = +{ +#define X(a, b, c, d, e) {RGXFW_LOG_CREATESFID(a,b,e), #c, d, e}, + RGXFW_LOG_SFIDLIST +#undef X +}; + +#define NARGS_MASK ~(0xF<<16) +static IMG_BOOL _FirmwareTraceIntegrityCheck(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + const TRACEBUF_LOG *psLogDef = &aLogDefinitions[0]; + IMG_BOOL bIntegrityOk = IMG_TRUE; + + /* + * For every log ID, check the format string and number of arguments is valid. + */ + while (psLogDef->eSFId != RGXFW_SF_LAST) + { + const TRACEBUF_LOG *psLogDef2; + const IMG_CHAR *pszString; + IMG_UINT32 ui32Count; + + /* + * Check the number of arguments matches the number of '%' in the string and + * check that no string uses %s which is not supported as it requires a + * pointer to memory that is not going to be valid. + */ + pszString = psLogDef->pszFmt; + ui32Count = 0; + + while (*pszString != '\0') + { + if (*pszString++ == '%') + { + ui32Count++; + if (*pszString == 's') + { + bIntegrityOk = IMG_FALSE; + PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has an unsupported type not recognized (fmt: %%%c). Please fix.", + psLogDef->pszName, *pszString); + } + else if (*pszString == '%') + { + /* Double % is a printable % sign and not a format string... */ + ui32Count--; + } + } + } + + if (ui32Count != psLogDef->ui32ArgNum) + { + bIntegrityOk = IMG_FALSE; + PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but only %d are specified. Please fix.", + psLogDef->pszName, ui32Count, psLogDef->ui32ArgNum); + } + + /* RGXDumpFirmwareTrace() has a hardcoded limit of supporting up to 20 arguments... */ + if (ui32Count > 20) + { + bIntegrityOk = IMG_FALSE; + PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but a maximum of 20 are supported. Please fix.", + psLogDef->pszName, ui32Count); + } + + /* Check the id number is unique (don't take into account the number of arguments) */ + ui32Count = 0; + psLogDef2 = &aLogDefinitions[0]; + + while (psLogDef2->eSFId != RGXFW_SF_LAST) + { + if ((psLogDef->eSFId & NARGS_MASK) == (psLogDef2->eSFId & NARGS_MASK)) + { + ui32Count++; + } + psLogDef2++; + } + + if (ui32Count != 1) + { + bIntegrityOk = IMG_FALSE; + PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s id %x is not unique, there are %d more. Please fix.", + psLogDef->pszName, psLogDef->eSFId, ui32Count - 1); + } + + /* Move to the next log ID... */ + psLogDef++; + } + + return bIntegrityOk; +} + +typedef struct { + IMG_UINT16 ui16Mask; + const IMG_CHAR *pszStr; +} RGXFWT_DEBUG_INFO_MSKSTR; /* pair of bit mask and debug info message string */ + + +/*! +******************************************************************************* + + @Function RGXPrepareExtraDebugInfo + + @Description + + Prepares debug info string by decoding ui16DebugInfo value passed + + @Input pszBuffer - pointer to debug info string buffer + + @Return void + +******************************************************************************/ +static void RGXPrepareExtraDebugInfo(IMG_CHAR *pszBuffer, IMG_UINT32 ui32BufferSize, IMG_UINT16 ui16DebugInfo) +{ + const RGXFWT_DEBUG_INFO_MSKSTR aDebugInfoMskStr[] = + { +#define X(a, b) {a, b}, + RGXFWT_DEBUG_INFO_MSKSTRLIST +#undef X + }; + + IMG_UINT32 ui32NumFields = sizeof(aDebugInfoMskStr)/sizeof(RGXFWT_DEBUG_INFO_MSKSTR); + IMG_UINT32 i; + IMG_BOOL bHasExtraDebugInfo = IMG_FALSE; + + /* Add prepend string */ + OSStringLCopy(pszBuffer, RGXFWT_DEBUG_INFO_STR_PREPEND, ui32BufferSize); + + /* Add debug info strings */ + for (i = 0; i < ui32NumFields; i++) + { + if (ui16DebugInfo & aDebugInfoMskStr[i].ui16Mask) + { + if (bHasExtraDebugInfo) + { + OSStringLCat(pszBuffer, ", ", ui32BufferSize); /* Add comma separator */ + } + OSStringLCat(pszBuffer, aDebugInfoMskStr[i].pszStr, ui32BufferSize); + bHasExtraDebugInfo = IMG_TRUE; + } + } + + /* Add append string */ + OSStringLCat(pszBuffer, RGXFWT_DEBUG_INFO_STR_APPEND, ui32BufferSize); +} + +void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + static IMG_BOOL bIntegrityCheckPassed = IMG_FALSE; + + /* Check that the firmware trace is correctly defined... */ + if (!bIntegrityCheckPassed) + { + bIntegrityCheckPassed = _FirmwareTraceIntegrityCheck(pfnDumpDebugPrintf, pvDumpDebugFile); + if (!bIntegrityCheckPassed) + { + return; + } + } + + /* Dump FW trace information... */ + if (psRGXFWIfTraceBufCtl != NULL) + { + IMG_UINT32 tid; + IMG_UINT32 ui32TraceBufSizeInDWords = psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords; + + /* Print the log type settings... */ + if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) + { + PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")", + ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")), + RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType) + ); + } + else + { + PVR_DUMPDEBUG_LOG("Debug log type: none"); + } + + /* Print the decoded log for each thread... */ + for (tid = 0; tid < RGXFW_THREAD_NUM; tid++) + { + IMG_UINT32 *pui32TraceBuf = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer; + IMG_UINT32 ui32TracePtr = psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer; + IMG_UINT32 ui32Count = 0; + + if (pui32TraceBuf == NULL) + { + /* trace buffer not yet allocated */ + continue; + } + + while (ui32Count < ui32TraceBufSizeInDWords) + { + IMG_UINT32 ui32Data, ui32DataToId; + + /* Find the first valid log ID, skipping whitespace... */ + do + { + ui32Data = pui32TraceBuf[ui32TracePtr]; + ui32DataToId = idToStringID(ui32Data, SFs); + + /* If an unrecognized id is found it may be inconsistent data or a firmware trace error. */ + if (ui32DataToId == RGXFW_SF_LAST && RGXFW_LOG_VALIDID(ui32Data)) + { + PVR_DUMPDEBUG_LOG("WARNING: Unrecognized id (%x). From here on the trace might be wrong!", ui32Data); + } + + /* Update the trace pointer... */ + ui32TracePtr = (ui32TracePtr + 1) % ui32TraceBufSizeInDWords; + ui32Count++; + } while ((RGXFW_SF_LAST == ui32DataToId || ui32DataToId >= RGXFW_SF_FIRST) && + ui32Count < ui32TraceBufSizeInDWords); + + if (ui32Count < ui32TraceBufSizeInDWords) + { + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN] = "%" IMG_UINT64_FMTSPEC ":T%u-%s> "; + IMG_CHAR szDebugInfoBuffer[RGXFWT_DEBUG_INFO_STR_MAXLEN] = ""; + IMG_UINT64 ui64Timestamp; + IMG_UINT16 ui16DebugInfo; + + /* If we hit the ASSERT message then this is the end of the log... */ + if (ui32Data == RGXFW_SF_MAIN_ASSERT_FAILED) + { + PVR_DUMPDEBUG_LOG("ASSERTION %s failed at %s:%u", + psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szInfo, + psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szPath, + psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum); + break; + } + + ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 0) % ui32TraceBufSizeInDWords]) << 32 | + (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 1) % ui32TraceBufSizeInDWords]); + + ui16DebugInfo = (IMG_UINT16) ((ui64Timestamp & ~RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK) >> RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT); + ui64Timestamp = (ui64Timestamp & ~RGXFWT_TIMESTAMP_TIME_CLRMSK) >> RGXFWT_TIMESTAMP_TIME_SHIFT; + + /* + * Print the trace string and provide up to 20 arguments which + * printf function will be able to use. We have already checked + * that no string uses more than this. + */ + OSStringLCat(szBuffer, SFs[ui32DataToId].psName, PVR_MAX_DEBUG_MESSAGE_LEN); + + /* Check and append any extra debug info available */ + if (ui16DebugInfo) + { + /* Prepare debug info string */ + RGXPrepareExtraDebugInfo(szDebugInfoBuffer, RGXFWT_DEBUG_INFO_STR_MAXLEN, ui16DebugInfo); + + /* Append debug info string */ + OSStringLCat(szBuffer, szDebugInfoBuffer, PVR_MAX_DEBUG_MESSAGE_LEN); + } + + PVR_DUMPDEBUG_LOG(szBuffer, ui64Timestamp, tid, groups[RGXFW_SF_GID(ui32Data)], + pui32TraceBuf[(ui32TracePtr + 2) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 3) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 4) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 5) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 6) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 7) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 8) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 9) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 10) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 11) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 12) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 13) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 14) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 15) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 16) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 17) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 18) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 19) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 20) % ui32TraceBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 21) % ui32TraceBufSizeInDWords]); + + /* Update the trace pointer... */ + ui32TracePtr = (ui32TracePtr + 2 + RGXFW_SF_PARAMNUM(ui32Data)) % ui32TraceBufSizeInDWords; + ui32Count = (ui32Count + 2 + RGXFW_SF_PARAMNUM(ui32Data)); + } + } + } + } +} + +#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) +void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + /* Print the power monitoring counters... */ + if (psFwSysData != NULL) + { + IMG_UINT32 *pui32TraceBuf = psFwSysData->sPowerMonBuf.pui32TraceBuffer; + IMG_UINT32 ui32TracePtr = 0; //psFwSysData->sPowerMonBuf.ui32TracePointer; + IMG_UINT32 ui32PowerMonBufSizeInDWords = psFwSysData->ui32PowerMonBufSizeInDWords; + IMG_UINT32 ui32Count = 0; + IMG_UINT64 ui64Timestamp; + + if (pui32TraceBuf == NULL) + { + /* power monitoring buffer not yet allocated */ + return; + } + + if (pui32TraceBuf[ui32TracePtr] != RGX_CR_TIMER) + { + PVR_DPF((PVR_DBG_WARNING, "Power monitoring data not available.")); + return; + } + ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords]) << 32 | + (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords]); + + /* Update the trace pointer... */ + ui32TracePtr = (ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords; + ui32Count = (ui32Count + 3); + + PVR_DPF((PVR_DBG_WARNING, "Dumping power monitoring buffer: CPUVAddr = %p, pointer = 0x%x, size = 0x%x", + pui32TraceBuf, + ui32TracePtr, + ui32PowerMonBufSizeInDWords)); + + while (ui32Count < ui32PowerMonBufSizeInDWords) + { + /* power monitoring data is (register, value) dword pairs */ + PVR_DUMPDEBUG_LOG("%" IMG_UINT64_FMTSPEC ":POWMON 0x%08x 0x%08x 0x%08x 0x%08x", + ui64Timestamp, + pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords], + pui32TraceBuf[(ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords]); + + if (pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID || + pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID) + { + /* end of buffer */ + break; + } + + /* Update the trace pointer... */ + ui32TracePtr = (ui32TracePtr + 4) % ui32PowerMonBufSizeInDWords; + ui32Count = (ui32Count + 4); + } + } +} +#endif + +static const IMG_CHAR *_RGXGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState) +{ + switch (eDevState) + { + case PVRSRV_DEVICE_STATE_INIT: + return "Initialising"; + case PVRSRV_DEVICE_STATE_ACTIVE: + return "Active"; + case PVRSRV_DEVICE_STATE_DEINIT: + return "De-initialising"; + case PVRSRV_DEVICE_STATE_BAD: + return "Bad"; + case PVRSRV_DEVICE_STATE_UNDEFINED: + PVR_ASSERT(!"Device has undefined state"); + __fallthrough; + default: + return "Unknown"; + } +} + +static const IMG_CHAR* _RGXGetDebugDevPowerStateString(PVRSRV_DEV_POWER_STATE ePowerState) +{ + switch (ePowerState) + { + case PVRSRV_DEV_POWER_STATE_DEFAULT: return "DEFAULT"; + case PVRSRV_DEV_POWER_STATE_OFF: return "OFF"; + case PVRSRV_DEV_POWER_STATE_ON: return "ON"; + default: return "UNKNOWN"; + } +} + +/* Helper macros to emit data */ +#define REG32_FMTSPEC "%-30s: 0x%08X" +#define REG64_FMTSPEC "%-30s: 0x%016" IMG_UINT64_FMTSPECX +#define DDLOG32(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, RGX_CR_##R)); +#define DDLOG64(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, RGX_CR_##R)); +#define DDLOG32_DPX(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, DPX_CR_##R)); +#define DDLOG64_DPX(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, DPX_CR_##R)); +#define DDLOGVAL32(S,V) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, S, V); +#define DDLOG32UNPACKED(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, RGX_CR_##R##__META_REGISTER_UNPACKED_ACCESSES)); + +#if !defined(NO_HARDWARE) +static PVRSRV_ERROR RGXDumpRISCVState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; + RGXRISCVFW_STATE sRiscvState; + const IMG_CHAR *pszException; + PVRSRV_ERROR eError; + + /* Limit dump to what is currently being used */ + DDLOG64(FWCORE_ADDR_REMAP_CONFIG4); + DDLOG64(FWCORE_ADDR_REMAP_CONFIG5); + DDLOG64(FWCORE_ADDR_REMAP_CONFIG6); + DDLOG64(FWCORE_ADDR_REMAP_CONFIG12); + DDLOG64(FWCORE_ADDR_REMAP_CONFIG13); + DDLOG64(FWCORE_ADDR_REMAP_CONFIG14); + + PVR_DUMPDEBUG_LOG("---- [ RISC-V internal state ] ----"); + + eError = RGXRiscvHalt(psDevInfo); + PVR_GOTO_IF_ERROR(eError, _RISCVDMError); + +#define X(name, address) \ + eError = RGXRiscvReadReg(psDevInfo, address, &sRiscvState.name); \ + PVR_LOG_GOTO_IF_ERROR(eError, "RGXRiscvReadReg", _RISCVDMError); \ + DDLOGVAL32(#name, sRiscvState.name); + + RGXRISCVFW_DEBUG_DUMP_REGISTERS +#undef X + + eError = RGXRiscvResume(psDevInfo); + PVR_GOTO_IF_ERROR(eError, _RISCVDMError); + + pszException = _GetRISCVException(sRiscvState.mcause); + if (pszException != NULL) + { + PVR_DUMPDEBUG_LOG("RISC-V FW hit an exception: %s", pszException); + +#if !defined(SUPPORT_TRUSTED_DEVICE) && !defined(EMULATOR) + eError = _ValidateFWImageForRISCV(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + + if (eError != PVRSRV_OK) + { + PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); + } +#elif defined(EMULATOR) + PVR_DUMPDEBUG_LOG("Validation of RISC-V FW code is disabled on emulator"); +#endif + } + + return PVRSRV_OK; + +_RISCVDMError: + PVR_DPF((PVR_DBG_ERROR, "Failed to communicate with the Debug Module")); + + return eError; +} +#endif + +PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ +#if !defined(NO_HARDWARE) + IMG_UINT32 ui32Meta = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ? RGX_GET_FEATURE_VALUE(psDevInfo, META) : 0; + IMG_UINT32 ui32RegVal; + PVRSRV_ERROR eError; +#endif + IMG_BOOL bFirmwarePerf; + IMG_BOOL bMulticore = RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT); + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; + + /* Check if firmware perf was set at Init time */ + bFirmwarePerf = (psDevInfo->psRGXFWIfSysInit->eFirmwarePerf != FW_PERF_CONF_NONE); + +#if defined(NO_HARDWARE) + /* OSReadHWReg variants don't use params passed in NoHW builds */ + PVR_UNREFERENCED_PARAMETER(pvRegsBaseKM); +#endif + + DDLOG64(CORE_ID); + + if (bMulticore) + { + DDLOG64(MULTICORE); + DDLOG32(MULTICORE_SYSTEM); + DDLOG32(MULTICORE_DOMAIN); + } + DDLOG32(EVENT_STATUS); + DDLOG64(TIMER); + DDLOG64(CLK_CTRL0); + DDLOG64(CLK_STATUS0); + DDLOG64(CLK_CTRL1); + DDLOG64(CLK_STATUS1); + DDLOG64(MMU_FAULT_STATUS1); + DDLOG64(MMU_FAULT_STATUS2); + DDLOG64(MMU_FAULT_STATUS_PM); + DDLOG64(MMU_FAULT_STATUS_META); + DDLOG64(SLC_STATUS1); + DDLOG64(SLC_STATUS2); + DDLOG64(SLC_STATUS_DEBUG); + DDLOG64(MMU_STATUS); + DDLOG32(BIF_PFS); + DDLOG32(BIF_TEXAS0_PFS); + DDLOG32(BIF_TEXAS1_PFS); + DDLOG32(BIF_OUTSTANDING_READ); + DDLOG32(BIF_TEXAS0_OUTSTANDING_READ); + DDLOG32(BIF_TEXAS1_OUTSTANDING_READ); + DDLOG32(FBCDC_IDLE); + DDLOG32(FBCDC_STATUS); + DDLOG32(SPU_ENABLE); + + DDLOG64(CONTEXT_MAPPING0); + DDLOG64(CONTEXT_MAPPING2); + DDLOG64(CONTEXT_MAPPING3); + DDLOG64(CONTEXT_MAPPING4); + + if (bMulticore) + { +#if !defined(RGX_CR_MULTICORE_AXI) +#define RGX_CR_MULTICORE_AXI (0x2508U) +#define RGX_CR_MULTICORE_AXI_ERROR (0x2510U) +#endif + DDLOG32(MULTICORE_AXI); + DDLOG32(MULTICORE_AXI_ERROR); + DDLOG32(MULTICORE_TDM_CTRL_COMMON); + DDLOG32(MULTICORE_FRAGMENT_CTRL_COMMON); + DDLOG32(MULTICORE_COMPUTE_CTRL_COMMON); + } + + DDLOG32(PERF_PHASE_2D); + DDLOG32(PERF_CYCLE_2D_TOTAL); + DDLOG32(PERF_PHASE_GEOM); + DDLOG32(PERF_CYCLE_GEOM_TOTAL); + DDLOG32(PERF_PHASE_FRAG); + DDLOG32(PERF_CYCLE_FRAG_TOTAL); + DDLOG32(PERF_CYCLE_GEOM_OR_FRAG_TOTAL); + DDLOG32(PERF_CYCLE_GEOM_AND_FRAG_TOTAL); + DDLOG32(PERF_PHASE_COMP); + DDLOG32(PERF_CYCLE_COMP_TOTAL); + DDLOG32(PM_PARTIAL_RENDER_ENABLE); + + DDLOG32(ISP_RENDER); + DDLOG32(ISP_CTL); + + DDLOG32(MTS_INTCTX); + DDLOG32(MTS_BGCTX); + DDLOG32(MTS_BGCTX_COUNTED_SCHEDULE); + DDLOG32(MTS_SCHEDULE); + DDLOG32(MTS_GPU_INT_STATUS); + + DDLOG32(CDM_CONTEXT_STORE_STATUS); + DDLOG64(CDM_CONTEXT_PDS0); + DDLOG64(CDM_CONTEXT_PDS1); + DDLOG64(CDM_TERMINATE_PDS); + DDLOG64(CDM_TERMINATE_PDS1); + DDLOG64(CDM_CONTEXT_LOAD_PDS0); + DDLOG64(CDM_CONTEXT_LOAD_PDS1); + + DDLOG32(JONES_IDLE); + DDLOG32(SLC_IDLE); + DDLOG32(SLC_FAULT_STOP_STATUS); + + DDLOG64(SCRATCH0); + DDLOG64(SCRATCH1); + DDLOG64(SCRATCH2); + DDLOG64(SCRATCH3); + DDLOG64(SCRATCH4); + DDLOG64(SCRATCH5); + DDLOG64(SCRATCH6); + DDLOG64(SCRATCH7); + DDLOG64(SCRATCH8); + DDLOG64(SCRATCH9); + DDLOG64(SCRATCH10); + DDLOG64(SCRATCH11); + DDLOG64(SCRATCH12); + DDLOG64(SCRATCH13); + DDLOG64(SCRATCH14); + DDLOG64(SCRATCH15); + DDLOG32(IRQ_OS0_EVENT_STATUS); + +#if !defined(NO_HARDWARE) + if (ui32Meta) + { + IMG_BOOL bIsT0Enabled = IMG_FALSE, bIsFWFaulted = IMG_FALSE; + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + { + DDLOG32UNPACKED(META_SP_MSLVIRQSTATUS); + } + else + { + DDLOG32(META_SP_MSLVIRQSTATUS); + } + + eError = RGXReadWithSP(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadWithSP", _METASPError); + DDLOGVAL32("T0 TXENABLE", ui32RegVal); + if (ui32RegVal & META_CR_TXENABLE_ENABLE_BIT) + { + bIsT0Enabled = IMG_TRUE; + } + + eError = RGXReadWithSP(psDevInfo, META_CR_T0STATUS_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadWithSP", _METASPError); + DDLOGVAL32("T0 TXSTATUS", ui32RegVal); + + /* check for FW fault */ + if (((ui32RegVal >> 20) & 0x3) == 0x2) + { + bIsFWFaulted = IMG_TRUE; + } + + eError = RGXReadWithSP(psDevInfo, META_CR_T0DEFR_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadWithSP", _METASPError); + DDLOGVAL32("T0 TXDEFR", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PC, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T0 PC", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PCX, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T0 PCX", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_SP, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T0 SP", ui32RegVal); + + if ((ui32Meta == MTP218) || (ui32Meta == MTP219)) + { + eError = RGXReadWithSP(psDevInfo, META_CR_T1ENABLE_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadWithSP", _METASPError); + DDLOGVAL32("T1 TXENABLE", ui32RegVal); + + eError = RGXReadWithSP(psDevInfo, META_CR_T1STATUS_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadWithSP", _METASPError); + DDLOGVAL32("T1 TXSTATUS", ui32RegVal); + + eError = RGXReadWithSP(psDevInfo, META_CR_T1DEFR_OFFSET, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadWithSP", _METASPError); + DDLOGVAL32("T1 TXDEFR", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PC, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T1 PC", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PCX, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T1 PCX", ui32RegVal); + + eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_SP, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); + DDLOGVAL32("T1 SP", ui32RegVal); + } + + if (bFirmwarePerf) + { + eError = RGXReadWithSP(psDevInfo, META_CR_PERF_COUNT0, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadWithSP", _METASPError); + DDLOGVAL32("META_CR_PERF_COUNT0", ui32RegVal); + + eError = RGXReadWithSP(psDevInfo, META_CR_PERF_COUNT1, &ui32RegVal); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadWithSP", _METASPError); + DDLOGVAL32("META_CR_PERF_COUNT1", ui32RegVal); + } + +#if !defined(SUPPORT_TRUSTED_DEVICE) + if (bIsT0Enabled & bIsFWFaulted) + { + eError = _ValidateFWImageForMETA(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); + } + } + else if (bIsFWFaulted) + { + PVR_DUMPDEBUG_LOG("Skipping FW code memory corruption checking as META is disabled"); + } +#endif + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + eError = RGXDumpRISCVState(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + PVR_RETURN_IF_ERROR(eError); + } +#endif + + return PVRSRV_OK; + +#if !defined(NO_HARDWARE) +_METASPError: + PVR_DUMPDEBUG_LOG("Dump Slave Port debug information"); + _RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + return eError; +#endif +} + +#undef REG32_FMTSPEC +#undef REG64_FMTSPEC +#undef DDLOG32 +#undef DDLOG64 +#undef DDLOG32_DPX +#undef DDLOG64_DPX +#undef DDLOGVAL32 +#undef DDLOG32UNPACKED + +/*! +******************************************************************************* + + @Function RGXDebugRequestProcess + + @Description + + This function will print out the debug for the specified level of verbosity + + @Input pfnDumpDebugPrintf - Optional replacement print function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + @Input ui32VerbLevel - Verbosity level + + @Return void + +******************************************************************************/ +static +void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32VerbLevel) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + PVRSRV_DEV_POWER_STATE ePowerState; + IMG_BOOL bRGXPoweredON; + IMG_UINT8 ui8FwOsCount; + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + RGXFWIF_OSDATA *psFwOsData = psDevInfo->psRGXFWIfFwOsData; + IMG_BOOL bPwrLockAlreadyHeld; + + bPwrLockAlreadyHeld = PVRSRVPwrLockIsLockedByMe(psDeviceNode); + if (!bPwrLockAlreadyHeld) + { + /* Only acquire the power-lock if not already held by the calling context */ + eError = PVRSRVPowerLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return; + } + } + + ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport; + + eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error retrieving RGX power state. No debug info dumped.", + __func__)); + goto Exit; + } + + if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) || + (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED))) + { + PVR_DUMPDEBUG_LOG("Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", + (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount); + } + + PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d Start ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID); + + bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON); + + PVR_DUMPDEBUG_LOG("------[ RGX Info ]------"); + PVR_DUMPDEBUG_LOG("Device Node (Info): %p (%p)", psDevInfo->psDeviceNode, psDevInfo); + PVR_DUMPDEBUG_LOG("RGX BVNC: %d.%d.%d.%d (%s)", psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C, + PVR_ARCH_NAME); + PVR_DUMPDEBUG_LOG("RGX Device State: %s", _RGXGetDebugDevStateString(psDeviceNode->eDevState)); + PVR_DUMPDEBUG_LOG("RGX Power State: %s", _RGXGetDebugDevPowerStateString(ePowerState)); + if (psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated) + { + PVR_DUMP_FIRMWARE_INFO(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks); + } + else + { + PVR_DUMPDEBUG_LOG("FW info: UNINITIALIZED"); + } + + RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, bRGXPoweredON); + + /* Dump out the kernel CCB. */ + { + RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; + + if (psKCCBCtl != NULL) + { + PVR_DUMPDEBUG_LOG("RGX Kernel CCB WO:0x%X RO:0x%X", + psKCCBCtl->ui32WriteOffset, + psKCCBCtl->ui32ReadOffset); + } + } + + /* Dump out the firmware CCB. */ + { + RGXFWIF_CCB_CTL *psFCCBCtl = psDevInfo->psFirmwareCCBCtl; + + if (psFCCBCtl != NULL) + { + PVR_DUMPDEBUG_LOG("RGX Firmware CCB WO:0x%X RO:0x%X", + psFCCBCtl->ui32WriteOffset, + psFCCBCtl->ui32ReadOffset); + } + } + + if (psFwOsData != NULL) + { + IMG_UINT32 ui32TID; + + /* Dump the KCCB commands executed */ + PVR_DUMPDEBUG_LOG("RGX Kernel CCB commands executed = %d", + psFwOsData->ui32KCCBCmdsExecuted); + +#if defined(PVRSRV_STALLED_CCB_ACTION) + /* Dump the number of times we have performed a forced UFO update, + * and (if non-zero) the timestamp of the most recent occurrence/ + */ + PVR_DUMPDEBUG_LOG("RGX SLR: Forced UFO updates requested = %d", + psFwOsData->ui32ForcedUpdatesRequested); + if (psFwOsData->ui32ForcedUpdatesRequested > 0) + { + IMG_UINT8 ui8Idx; + IMG_UINT64 ui64Seconds, ui64Nanoseconds; + + if (psFwOsData->ui64LastForcedUpdateTime > 0ULL) + { + ConvertOSTimestampToSAndNS(psFwOsData->ui64LastForcedUpdateTime, &ui64Seconds, &ui64Nanoseconds); + PVR_DUMPDEBUG_LOG("RGX SLR: (most recent forced update was around %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ")", + ui64Seconds, ui64Nanoseconds); + } + else + { + PVR_DUMPDEBUG_LOG("RGX SLR: (unable to force update as fence contained no sync checkpoints)"); + } + /* Dump SLR log */ + if (psFwOsData->sSLRLogFirst.aszCCBName[0]) + { + ConvertOSTimestampToSAndNS(psFwOsData->sSLRLogFirst.ui64Timestamp, &ui64Seconds, &ui64Nanoseconds); + PVR_DUMPDEBUG_LOG("RGX SLR:{%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC + "} Fence found on context 0x%x '%s' has %d UFOs", + ui64Seconds, ui64Nanoseconds, + psFwOsData->sSLRLogFirst.ui32FWCtxAddr, + psFwOsData->sSLRLogFirst.aszCCBName, + psFwOsData->sSLRLogFirst.ui32NumUFOs); + } + for (ui8Idx=0; ui8IdxsSLRLog[ui8Idx].aszCCBName[0]) + { + ConvertOSTimestampToSAndNS(psFwOsData->sSLRLog[ui8Idx].ui64Timestamp, &ui64Seconds, &ui64Nanoseconds); + PVR_DUMPDEBUG_LOG("RGX SLR:[%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC + "] Fence found on context 0x%x '%s' has %d UFOs", + ui64Seconds, ui64Nanoseconds, + psFwOsData->sSLRLog[ui8Idx].ui32FWCtxAddr, + psFwOsData->sSLRLog[ui8Idx].aszCCBName, + psFwOsData->sSLRLog[ui8Idx].ui32NumUFOs); + } + } + } +#else + PVR_DUMPDEBUG_LOG("RGX SLR: Disabled"); +#endif + + /* Dump the error counts */ + PVR_DUMPDEBUG_LOG("RGX Errors: WGP:%d, TRP:%d", + psDevInfo->sErrorCounts.ui32WGPErrorCount, + psDevInfo->sErrorCounts.ui32TRPErrorCount); + + for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) + { + /* Dump the IRQ info for threads */ + PVR_DUMPDEBUG_LOG("RGX FW thread %u: FW IRQ count = %u, Last sampled IRQ count in LISR = %u", + ui32TID, + psDevInfo->psRGXFWIfFwOsData->aui32InterruptCount[ui32TID], + psDevInfo->aui32SampleIRQCount[ui32TID]); + } + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Dump out the Workload estimation CCB. */ + { + RGXFWIF_CCB_CTL *psWorkEstCCBCtl = psDevInfo->psWorkEstFirmwareCCBCtl; + + if (psWorkEstCCBCtl != NULL) + { + PVR_DUMPDEBUG_LOG("RGX WorkEst CCB WO:0x%X RO:0x%X", + psWorkEstCCBCtl->ui32WriteOffset, + psWorkEstCCBCtl->ui32ReadOffset); + } + } +#endif + + /* Dump the FW Sys config flags on the Host */ + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + IMG_CHAR sFwSysFlagsDescription[MAX_FW_DESCRIPTION_LENGTH]; + + if (!psFwSysData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Fw Sys Data is not mapped into CPU space", __func__)); + goto Exit; + } + + _GetFwSysFlagsDescription(sFwSysFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwSysData->ui32ConfigFlags); + PVR_DUMPDEBUG_LOG("FW System config flags = 0x%08X (%s)", psFwSysData->ui32ConfigFlags, sFwSysFlagsDescription); + } + + /* Dump the FW OS config flags */ + { + RGXFWIF_OSDATA *psFwOsData = psDevInfo->psRGXFWIfFwOsData; + IMG_CHAR sFwOsFlagsDescription[MAX_FW_DESCRIPTION_LENGTH]; + + if (!psFwOsData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Fw Os Data is not mapped into CPU space", __func__)); + goto Exit; + } + + _GetFwOsFlagsDescription(sFwOsFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwOsData->ui32FwOsConfigFlags); + PVR_DUMPDEBUG_LOG("FW OS config flags = 0x%08X (%s)", psFwOsData->ui32FwOsConfigFlags, sFwOsFlagsDescription); + } + + if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST)) + { + + PVR_DUMPDEBUG_LOG("------[ RGX registers ]------"); + PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear): 0x%p", psDevInfo->pvRegsBaseKM); + PVR_DUMPDEBUG_LOG("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr); + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + /* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going through the core */ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + { + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES, 0x0); + } + else + { + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1, 0x0); + } + } + + eError = RGXDumpRGXRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXDumpRegisters failed (%s)", + __func__, + PVRSRVGetErrorString(eError))); +#if defined(SUPPORT_EXTRA_METASP_DEBUG) + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + PVR_DUMPDEBUG_LOG("Dump Slave Port debug information"); + _RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); + } +#endif + } + } + else + { + PVR_DUMPDEBUG_LOG(" (!) %s. No registers dumped", PVRSRV_VZ_MODE_IS(GUEST) ? "Guest Mode of operation" : "RGX power is down"); + } + + PVR_DUMPDEBUG_LOG("------[ RGX FW Trace Info ]------"); + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) + { + IMG_INT tid; + /* Dump FW trace information */ + if (psRGXFWIfTraceBufCtl != NULL) + { + for (tid = 0 ; tid < RGXFW_THREAD_NUM ; tid++) + { + IMG_UINT32 i; + IMG_BOOL bPrevLineWasZero = IMG_FALSE; + IMG_BOOL bLineIsAllZeros = IMG_FALSE; + IMG_UINT32 ui32CountLines = 0; + IMG_UINT32 *pui32TraceBuffer; + IMG_CHAR *pszLine; + + if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) + { + PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")", + ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")), + RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType) + ); + } + else + { + PVR_DUMPDEBUG_LOG("Debug log type: none"); + } + + pui32TraceBuffer = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer; + + /* Skip if trace buffer is not allocated */ + if (pui32TraceBuffer == NULL) + { + PVR_DUMPDEBUG_LOG("RGX FW thread %d: Trace buffer not yet allocated",tid); + continue; + } + +/* Max number of DWords to be printed per line, in debug dump output */ +#define PVR_DD_FW_TRACEBUF_LINESIZE 30U + /* each element in the line is 8 characters plus a space. The '+ 1' is because of the final trailing '\0'. */ + pszLine = OSAllocMem(9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1); + if (pszLine == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Out of mem allocating line string (size: %d)", + __func__, + 9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1)); + goto Exit; + } + + PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace START ]------", tid); + PVR_DUMPDEBUG_LOG("FWT[traceptr]: %X", psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer); + PVR_DUMPDEBUG_LOG("FWT[tracebufsize]: %X", psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords); + + for (i = 0; i < psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords; i += PVR_DD_FW_TRACEBUF_LINESIZE) + { + IMG_UINT32 k = 0; + IMG_UINT32 ui32Line = 0x0; + IMG_UINT32 ui32LineOffset = i*sizeof(IMG_UINT32); + IMG_CHAR *pszBuf = pszLine; + + for (k = 0; k < PVR_DD_FW_TRACEBUF_LINESIZE; k++) + { + if ((i + k) >= psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords) + { + /* Stop reading when the index goes beyond trace buffer size. This condition is + * hit during printing the last line in DD when ui32TraceBufSizeInDWords is not + * a multiple of PVR_DD_FW_TRACEBUF_LINESIZE */ + break; + } + + ui32Line |= pui32TraceBuffer[i + k]; + + /* prepare the line to print it. The '+1' is because of the trailing '\0' added */ + OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32TraceBuffer[i + k]); + pszBuf += 9; /* write over the '\0' */ + } + + bLineIsAllZeros = (ui32Line == 0x0); + + if (bLineIsAllZeros) + { + if (bPrevLineWasZero) + { + ui32CountLines++; + } + else + { + bPrevLineWasZero = IMG_TRUE; + ui32CountLines = 1; + PVR_DUMPDEBUG_LOG("FWT[%08x]: 00000000 ... 00000000", ui32LineOffset); + } + } + else + { + if (bPrevLineWasZero && ui32CountLines > 1) + { + PVR_DUMPDEBUG_LOG("FWT[...]: %d lines were all zero", ui32CountLines); + } + bPrevLineWasZero = IMG_FALSE; + + PVR_DUMPDEBUG_LOG("FWT[%08x]:%s", ui32LineOffset, pszLine); + } + + } + if (bPrevLineWasZero) + { + PVR_DUMPDEBUG_LOG("FWT[END]: %d lines were all zero", ui32CountLines); + } + + PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace END ]------", tid); + + OSFreeMem(pszLine); + } + } + + { + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) + { + PVR_DUMPDEBUG_LOG("------[ Full CCB Status ]------"); + } + else + { + PVR_DUMPDEBUG_LOG("------[ Stalled FWCtxs ]------"); + } + + DumpRenderCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + DumpComputeCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + + DumpTDMTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + + DumpKickSyncCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + } + + PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d End ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID); + +Exit: + if (!bPwrLockAlreadyHeld) + { + PVRSRVPowerUnlock(psDeviceNode); + } +} + +/*! + ****************************************************************************** + + @Function RGXDebugRequestNotify + + @Description Dump the debug data for RGX + + ******************************************************************************/ +static void RGXDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = hDbgRequestHandle; + + /* Only action the request if we've fully init'ed */ + if (psDevInfo->bDevInit2Done) + { + RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui32VerbLevel); + } +} + +PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + return PVRSRVRegisterDbgRequestNotify(&psDevInfo->hDbgReqNotify, + psDevInfo->psDeviceNode, + RGXDebugRequestNotify, + DEBUG_REQUEST_SYS, + psDevInfo); +} + +PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + if (psDevInfo->hDbgReqNotify) + { + return PVRSRVUnregisterDbgRequestNotify(psDevInfo->hDbgReqNotify); + } + + /* No notifier registered */ + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (rgxdebug.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxdebug.h b/drivers/gpu/drm/phytium/octopus/rgxdebug.h new file mode 100644 index 000000000000..ae985ebd26ca --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxdebug.h @@ -0,0 +1,227 @@ +/*************************************************************************/ /*! +@File +@Title RGX debug header file +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the RGX debugging functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXDEBUG_H) +#define RGXDEBUG_H + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "device.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "rgxdevice.h" + +/** + * Debug utility macro for printing FW IRQ count and Last sampled IRQ count in + * LISR for each RGX FW thread. + * Macro takes pointer to PVRSRV_RGXDEV_INFO as input. + */ +#define RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo) \ + do \ + { \ + IMG_UINT32 ui32TID; \ + for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) \ + { \ + PVR_DPF((DBGPRIV_VERBOSE, \ + "RGX FW thread %u: FW IRQ count = %u, Last sampled IRQ count in LISR = %u)", \ + ui32TID, \ + (psRgxDevInfo)->psRGXFWIfFwOsData->aui32InterruptCount[ui32TID], \ + (psRgxDevInfo)->aui32SampleIRQCount[ui32TID])); \ + } \ + } while (0) + +/*! +******************************************************************************* + + @Function RGXDumpRGXRegisters + + @Description + + Dumps an extensive list of RGX registers required for debugging + + @Input pfnDumpDebugPrintf - Optional replacement print function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + + @Return PVRSRV_ERROR PVRSRV_OK on success, error code otherwise + +******************************************************************************/ +PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* + + @Function RGXDumpFirmwareTrace + + @Description Dumps the decoded version of the firmware trace buffer. + + Dump useful debugging info + + @Input pfnDumpDebugPrintf - Optional replacement print function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + + @Return void + +******************************************************************************/ +void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); + +#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) +void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo); +#endif + +/*! +******************************************************************************* + + @Function RGXReadWithSP + + @Description + + Reads data from a memory location (FW memory map) using the META Slave Port + + @Input psDevInfo - Pointer to RGX DevInfo to be used while reading + @Input ui32FWAddr - 32 bit FW address + @Input pui32Value - When the read is successful, value at above FW address + is returned at this location + + @Return PVRSRV_ERROR PVRSRV_OK if read success, error code otherwise. +******************************************************************************/ +PVRSRV_ERROR RGXReadWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 *pui32Value); + +/*! +******************************************************************************* + + @Function RGXWriteWithSP + + @Description + + Writes data to a memory location (FW memory map) using the META Slave Port + + @Input psDevInfo - Pointer to RGX DevInfo to be used while writing + @Input ui32FWAddr - 32 bit FW address + + @Input ui32Value - 32 bit Value to write + + @Return PVRSRV_ERROR PVRSRV_OK if write success, error code otherwise. +******************************************************************************/ +PVRSRV_ERROR RGXWriteWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value); + +#if defined(SUPPORT_EXTRA_METASP_DEBUG) +/*! +******************************************************************************* + + @Function ValidateFWOnLoad + + @Description Compare the Firmware image as seen from the CPU point of view + against the same memory area as seen from the META point of view + after first power up. + + @Input psDevInfo - Device Info + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo); +#endif + +/*! +******************************************************************************* + + @Function RGXDumpRGXDebugSummary + + @Description + + Dump a summary in human readable form with the RGX state + + @Input pfnDumpDebugPrintf - The debug printf function + @Input pvDumpDebugFile - Optional file identifier to be passed to the + 'printf' function if required + @Input psDevInfo - RGX device info + @Input bRGXPoweredON - IMG_TRUE if RGX device is on + + @Return void + +******************************************************************************/ +void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_BOOL bRGXPoweredON); + +/*! +******************************************************************************* + + @Function RGXDebugInit + + @Description + + Setup debug requests, calls into PVRSRVRegisterDbgRequestNotify + + @Input psDevInfo RGX device info + @Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error + +******************************************************************************/ +PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* + + @Function RGXDebugDeinit + + @Description + + Remove debug requests, calls into PVRSRVUnregisterDbgRequestNotify + + @Output phNotify Points to debug notifier handle + @Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error + +******************************************************************************/ +PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo); + +#endif /* RGXDEBUG_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxdevice.h b/drivers/gpu/drm/phytium/octopus/rgxdevice.h new file mode 100644 index 000000000000..12048258b3a4 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxdevice.h @@ -0,0 +1,824 @@ +/*************************************************************************/ /*! +@File +@Title RGX device node header file +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the RGX device node +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXDEVICE_H) +#define RGXDEVICE_H + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_device_types.h" +#include "mmu_common.h" +#include "rgx_fwif_km.h" +#include "cache_ops.h" +#include "device.h" +#include "osfunc.h" +#include "rgxlayer_impl.h" +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "hash.h" +#endif +typedef struct _RGX_SERVER_COMMON_CONTEXT_ RGX_SERVER_COMMON_CONTEXT; + +typedef struct { + DEVMEM_MEMDESC *psFWFrameworkMemDesc; +} RGX_COMMON_CONTEXT_INFO; + + +/*! + ****************************************************************************** + * Device state flags + *****************************************************************************/ +#define RGXKM_DEVICE_STATE_ZERO_FREELIST (0x1) /*!< Zeroing the physical pages of reconstructed free lists */ +#define RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN (0x2) /*!< Used to disable the Devices Watchdog logging */ +#define RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN (0x4) /*!< Used for validation to inject SPU power state mask change every DM kick */ +#define RGXKM_DEVICE_STATE_CCB_GROW_EN (0x8) /*!< Used to indicate CCB grow is permitted */ +#define RGXKM_DEVICE_STATE_ENABLE_SPU_UNITS_POWER_MASK_CHANGE_EN (0x10) /*!< Used for validation to enable SPU power state mask change */ +#define RGXKM_DEVICE_STATE_MASK (0x1F) + +/*! + ****************************************************************************** + * GPU DVFS Table + *****************************************************************************/ + +#define RGX_GPU_DVFS_TABLE_SIZE 32 +#define RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US 25000 /* Time required to calibrate a clock frequency the first time */ +#define RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US 150000 /* Time required for a recalibration after a DVFS transition */ +#define RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US 10000000 /* Time before the next periodic calibration and correlation */ + + +/*! + ****************************************************************************** + * Global flags for driver validation + *****************************************************************************/ +#define RGX_VAL_LS_EN (0x1U) /*!< Enable dual lockstep firmware */ +#define RGX_VAL_FBDC_SIG_CHECK_NOERR_EN (0x2U) /*!< Enable FBDC signature check. Signatures must match */ +#define RGX_VAL_FBDC_SIG_CHECK_ERR_EN (0x4U) /*!< Enable FBDC signature check. Signatures must not match */ +#define RGX_VAL_GPUSTATEPIN_EN (0x8U) /*!< Enable GPU state pin check */ +#define RGX_VAL_KZ_SIG_CHECK_NOERR_EN (0x10U) /*!< Enable KZ signature check. Signatures must match */ +#define RGX_VAL_KZ_SIG_CHECK_ERR_EN (0x20U) /*!< Enable KZ signature check. Signatures must not match */ +#define RGX_VAL_SIG_CHECK_ERR_EN (RGX_VAL_FBDC_SIG_CHECK_ERR_EN) + +typedef struct _GPU_FREQ_TRACKING_DATA_ +{ + /* Core clock speed estimated by the driver */ + IMG_UINT32 ui32EstCoreClockSpeed; + + /* Amount of successful calculations of the estimated core clock speed */ + IMG_UINT32 ui32CalibrationCount; +} GPU_FREQ_TRACKING_DATA; + +#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) +#define RGX_GPU_FREQ_TRACKING_SIZE 16 + +typedef struct +{ + IMG_UINT64 ui64BeginCRTimestamp; + IMG_UINT64 ui64BeginOSTimestamp; + + IMG_UINT64 ui64EndCRTimestamp; + IMG_UINT64 ui64EndOSTimestamp; + + IMG_UINT32 ui32EstCoreClockSpeed; + IMG_UINT32 ui32CoreClockSpeed; +} GPU_FREQ_TRACKING_HISTORY; +#endif + +typedef struct _RGX_GPU_DVFS_TABLE_ +{ + /* Beginning of current calibration period (in us) */ + IMG_UINT64 ui64CalibrationCRTimestamp; + IMG_UINT64 ui64CalibrationOSTimestamp; + + /* Calculated calibration period (in us) */ + IMG_UINT64 ui64CalibrationCRTimediff; + IMG_UINT64 ui64CalibrationOSTimediff; + + /* Current calibration period (in us) */ + IMG_UINT32 ui32CalibrationPeriod; + + /* System layer frequency table and frequency tracking data */ + IMG_UINT32 ui32FreqIndex; + IMG_UINT32 aui32GPUFrequency[RGX_GPU_DVFS_TABLE_SIZE]; + GPU_FREQ_TRACKING_DATA asTrackingData[RGX_GPU_DVFS_TABLE_SIZE]; + +#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) + IMG_UINT32 ui32HistoryIndex; + GPU_FREQ_TRACKING_HISTORY asTrackingHistory[RGX_GPU_FREQ_TRACKING_SIZE]; +#endif +} RGX_GPU_DVFS_TABLE; + + +/*! + ****************************************************************************** + * GPU utilisation statistics + *****************************************************************************/ + +typedef struct _RGXFWIF_GPU_UTIL_STATS_ +{ + IMG_BOOL bValid; /* If TRUE, statistics are valid. + FALSE if the driver couldn't get reliable stats. */ + IMG_UINT64 ui64GpuStatActive; /* GPU active statistic */ + IMG_UINT64 ui64GpuStatBlocked; /* GPU blocked statistic */ + IMG_UINT64 ui64GpuStatIdle; /* GPU idle statistic */ + IMG_UINT64 ui64GpuStatCumulative; /* Sum of active/blocked/idle stats */ + IMG_UINT64 ui64TimeStamp; /* Timestamp of the most recent sample of the GPU stats */ +} RGXFWIF_GPU_UTIL_STATS; + + +typedef struct _RGX_REG_CONFIG_ +{ + IMG_BOOL bEnabled; + RGXFWIF_REG_CFG_TYPE eRegCfgTypeToPush; + IMG_UINT32 ui32NumRegRecords; + POS_LOCK hLock; +} RGX_REG_CONFIG; + +typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC; + +#if defined(SUPPORT_VALIDATION) +/** + * Structure containing information for calculating next SPU power domain state. + */ +typedef struct _RGX_POWER_DOMAIN_STATE_ +{ + /** + * Total number of power units in the core. + */ + IMG_UINT32 ui32PowUnitsCount; + /** + * Current power domain state + */ + IMG_UINT32 ui32CurrentState; + /** + * Stores last transition that happened for each power domain state. + */ + IMG_UINT32 *paui32LastTransition; +} RGX_POWER_DOMAIN_STATE; +#endif + +typedef struct _PVRSRV_DEVICE_FEATURE_CONFIG_ +{ + IMG_UINT64 ui64ErnsBrns; + IMG_UINT64 ui64Features; + IMG_UINT32 ui32B; + IMG_UINT32 ui32V; + IMG_UINT32 ui32N; + IMG_UINT32 ui32C; + IMG_UINT32 ui32FeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX]; + IMG_UINT32 ui32MAXDMCount; + IMG_UINT32 ui32MAXDMMTSCount; + IMG_UINT32 ui32MAXPowUnitCount; + IMG_UINT32 ui32SLCSizeInBytes; + IMG_PCHAR pszBVNCString; +}PVRSRV_DEVICE_FEATURE_CONFIG; + +/* This is used to get the value of a specific feature. + * Note that it will assert if the feature is disabled or value is invalid. */ +#define RGX_GET_FEATURE_VALUE(psDevInfo, Feature) \ + ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] ) + +/* This is used to check if the feature value (e.g. with an integer value) is available for the currently running BVNC or not */ +#define RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, Feature) \ + ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] < RGX_FEATURE_VALUE_DISABLED ) + +/* This is used to check if the Boolean feature (e.g. WITHOUT an integer value) is available for the currently running BVNC or not */ +#define RGX_IS_FEATURE_SUPPORTED(psDevInfo, Feature) \ + BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64Features, RGX_FEATURE_##Feature##_BIT_MASK) + +/* This is used to check if the ERN is available for the currently running BVNC or not */ +#define RGX_IS_ERN_SUPPORTED(psDevInfo, ERN) \ + BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64ErnsBrns, HW_ERN_##ERN##_BIT_MASK) + +/* This is used to check if the BRN is available for the currently running BVNC or not */ +#define RGX_IS_BRN_SUPPORTED(psDevInfo, BRN) \ + BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64ErnsBrns, FIX_HW_BRN_##BRN##_BIT_MASK) + +/* there is a corresponding define in rgxapi.h */ +#define RGX_MAX_TIMER_QUERIES 16U + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +/*! + * The host maintains a 512-deep cache of submitted workloads per device, + * i.e. a global look-up table for TA, 3D and compute (depending on the RGX + * hardware support present) + */ + +/* + * For the workload estimation return data array, the max amount of commands the + * MTS can have is 255, therefore 512 (LOG2 = 9) is large enough to account for + * all corner cases + */ +#define RETURN_DATA_ARRAY_SIZE_LOG2 (9) +#define RETURN_DATA_ARRAY_SIZE ((1UL) << RETURN_DATA_ARRAY_SIZE_LOG2) +#define RETURN_DATA_ARRAY_WRAP_MASK (RETURN_DATA_ARRAY_SIZE - 1) + +#define WORKLOAD_HASH_SIZE_LOG2 6 +#define WORKLOAD_HASH_SIZE ((1UL) << WORKLOAD_HASH_SIZE_LOG2) +#define WORKLOAD_HASH_WRAP_MASK (WORKLOAD_HASH_SIZE - 1) + +/*! + * Workload characteristics for supported data masters. + * All characteristics must match for the workload estimate to be used/updated. + */ +typedef union _RGX_WORKLOAD_ +{ + struct + { + IMG_UINT32 ui32RenderTargetSize; + IMG_UINT32 ui32NumberOfDrawCalls; + IMG_UINT32 ui32NumberOfIndices; + IMG_UINT32 ui32NumberOfMRTs; + } sTA3D; + + struct + { + IMG_UINT32 ui32NumberOfWorkgroups; + IMG_UINT32 ui32NumberOfWorkitems; + } sCompute; + + struct + { + IMG_UINT32 ui32Characteristic1; + IMG_UINT32 ui32Characteristic2; + } sTransfer; +} RGX_WORKLOAD; + +/*! + * Host data used to match the return data (actual cycles count) to the + * submitted command packet. + * The hash table is a per-DM circular buffer containing a key based on the + * workload characteristics. On job completion, the oldest workload data + * is evicted if the CB is full and the driver matches the characteristics + * to the matching data. + * + * o If the driver finds a match the existing cycle estimate is averaged with + * the actual cycles used. + * o Otherwise a new hash entry is created with the actual cycles for this + * workload. + * + * Subsequently if a match is found during command submission, the estimate + * is passed to the scheduler, e.g. adjust the GPU frequency if PDVFS is enabled. + */ +typedef struct _WORKLOAD_MATCHING_DATA_ +{ + POS_LOCK psHashLock; + HASH_TABLE *psHashTable; /*! existing workload cycle estimates for this DM */ + RGX_WORKLOAD asHashKeys[WORKLOAD_HASH_SIZE]; + IMG_UINT64 aui64HashData[WORKLOAD_HASH_SIZE]; + IMG_UINT32 ui32HashArrayWO; /*! track the most recent workload estimates */ +} WORKLOAD_MATCHING_DATA; + +/*! + * A generic container for the workload matching data for GPU contexts: + * rendering (TA, 3D), compute, etc. + */ +typedef struct _WORKEST_HOST_DATA_ +{ + union + { + struct + { + WORKLOAD_MATCHING_DATA sDataTA; /*!< matching data for TA commands */ + WORKLOAD_MATCHING_DATA sData3D; /*!< matching data for 3D commands */ + } sTA3D; + + struct + { + WORKLOAD_MATCHING_DATA sDataCDM; /*!< matching data for CDM commands */ + } sCompute; + + struct + { + WORKLOAD_MATCHING_DATA sDataTDM; /*!< matching data for TDM-TQ commands */ + } sTransfer; + } uWorkloadMatchingData; + + /* + * This is a per-context property, hence the TA and 3D share the same + * per render context counter. + */ + IMG_UINT32 ui32WorkEstCCBReceived; /*!< Used to ensure all submitted work + estimation commands are received + by the host before clean up. */ +} WORKEST_HOST_DATA; + +/*! + * Entries in the list of submitted workloads, used when the completed command + * returns data to the host. + * + * - the matching data is needed as it holds the hash data + * - the host data is needed for completion updates, ensuring memory is not + * freed while workload estimates are in-flight. + * - the workload characteristic is used in the hash table look-up. + */ +typedef struct _WORKEST_RETURN_DATA_ +{ + WORKEST_HOST_DATA *psWorkEstHostData; + WORKLOAD_MATCHING_DATA *psWorkloadMatchingData; + RGX_WORKLOAD sWorkloadCharacteristics; +} WORKEST_RETURN_DATA; +#endif + + +#define RGX_MAX_NUM_MMU_PAGE_SIZE_RANGES 4 + + +/*! + ****************************************************************************** + * RGX Device error counts + *****************************************************************************/ +typedef struct _PVRSRV_RGXDEV_ERROR_COUNTS_ +{ + IMG_UINT32 ui32WGPErrorCount; /*!< count of the number of WGP checksum errors */ + IMG_UINT32 ui32TRPErrorCount; /*!< count of the number of TRP checksum errors */ +} PVRSRV_RGXDEV_ERROR_COUNTS; + +/*! + ****************************************************************************** + * RGX Device info + *****************************************************************************/ +typedef struct _PVRSRV_RGXDEV_INFO_ +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVRSRV_DEVICE_FEATURE_CONFIG sDevFeatureCfg; + + IMG_BOOL bDevInit2Done; + + IMG_BOOL bFirmwareInitialised; + IMG_BOOL bPDPEnabled; + + IMG_HANDLE hDbgReqNotify; + + /* Kernel mode linear address of device registers */ + void __iomem *pvRegsBaseKM; + + IMG_HANDLE hRegMapping; + + /* System physical address of device registers */ + IMG_CPU_PHYADDR sRegsPhysBase; + /* Register region size in bytes */ + IMG_UINT32 ui32RegSize; + + PVRSRV_STUB_PBDESC *psStubPBDescListKM; + + /* Firmware memory context info */ + DEVMEM_CONTEXT *psKernelDevmemCtx; + DEVMEM_HEAP *psFirmwareMainHeap; + DEVMEM_HEAP *psFirmwareConfigHeap; + MMU_CONTEXT *psKernelMMUCtx; + + void *pvDeviceMemoryHeap; + + /* Kernel CCB */ + DEVMEM_MEMDESC *psKernelCCBCtlMemDesc; /*!< memdesc for Kernel CCB control */ + RGXFWIF_CCB_CTL *psKernelCCBCtl; /*!< kernel mapping for Kernel CCB control */ + DEVMEM_MEMDESC *psKernelCCBMemDesc; /*!< memdesc for Kernel CCB */ + IMG_UINT8 *psKernelCCB; /*!< kernel mapping for Kernel CCB */ + DEVMEM_MEMDESC *psKernelCCBRtnSlotsMemDesc; /*!< Return slot array for Kernel CCB commands */ + IMG_UINT32 *pui32KernelCCBRtnSlots; /*!< kernel mapping for return slot array */ + + /* Firmware CCB */ + DEVMEM_MEMDESC *psFirmwareCCBCtlMemDesc; /*!< memdesc for Firmware CCB control */ + RGXFWIF_CCB_CTL *psFirmwareCCBCtl; /*!< kernel mapping for Firmware CCB control */ + DEVMEM_MEMDESC *psFirmwareCCBMemDesc; /*!< memdesc for Firmware CCB */ + IMG_UINT8 *psFirmwareCCB; /*!< kernel mapping for Firmware CCB */ + + /* Workload Estimation Firmware CCB */ + DEVMEM_MEMDESC *psWorkEstFirmwareCCBCtlMemDesc; /*!< memdesc for Workload Estimation Firmware CCB control */ + RGXFWIF_CCB_CTL *psWorkEstFirmwareCCBCtl; /*!< kernel mapping for Workload Estimation Firmware CCB control */ + DEVMEM_MEMDESC *psWorkEstFirmwareCCBMemDesc; /*!< memdesc for Workload Estimation Firmware CCB */ + IMG_UINT8 *psWorkEstFirmwareCCB; /*!< kernel mapping for Workload Estimation Firmware CCB */ + + PVRSRV_MEMALLOCFLAGS_T uiFWPoisonOnFreeFlag; /*!< Flag for poisoning FW allocations when freed */ + + IMG_BOOL bIgnoreHWReportedBVNC; /*!< Ignore BVNC reported by HW */ + + /* + if we don't preallocate the pagetables we must + insert newly allocated page tables dynamically + */ + void *pvMMUContextList; + + IMG_UINT32 ui32ClkGateStatusReg; + IMG_UINT32 ui32ClkGateStatusMask; + + DEVMEM_MEMDESC *psRGXFWCodeMemDesc; + IMG_DEV_VIRTADDR sFWCodeDevVAddrBase; + IMG_UINT32 ui32FWCodeSizeInBytes; + DEVMEM_MEMDESC *psRGXFWDataMemDesc; + IMG_DEV_VIRTADDR sFWDataDevVAddrBase; + + DEVMEM_MEMDESC *psRGXFWCorememCodeMemDesc; + IMG_DEV_VIRTADDR sFWCorememCodeDevVAddrBase; + RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; + IMG_UINT32 ui32FWCorememCodeSizeInBytes; + + DEVMEM_MEMDESC *psRGXFWIfCorememDataStoreMemDesc; + IMG_DEV_VIRTADDR sFWCorememDataStoreDevVAddrBase; + RGXFWIF_DEV_VIRTADDR sFWCorememDataStoreFWAddr; + + DEVMEM_MEMDESC *psRGXFWAlignChecksMemDesc; + +#if defined(PDUMP) + DEVMEM_MEMDESC *psRGXFWSigTAChecksMemDesc; + IMG_UINT32 ui32SigTAChecksSize; + + DEVMEM_MEMDESC *psRGXFWSig3DChecksMemDesc; + IMG_UINT32 ui32Sig3DChecksSize; + + DEVMEM_MEMDESC *psRGXFWSigCDMChecksMemDesc; + IMG_UINT32 ui32SigCDMChecksSize; + + DEVMEM_MEMDESC *psRGXFWSigTDMChecksMemDesc; + IMG_UINT32 ui32SigTDMChecksSize; + +#if defined(SUPPORT_VALIDATION) + DEVMEM_MEMDESC *psRGXFWValidationSigMemDesc; + IMG_UINT32 ui32ValidationSigSize; +#endif + + IMG_BOOL bDumpedKCCBCtlAlready; + + POS_SPINLOCK hSyncCheckpointSignalSpinLock; /*!< Guards data shared between an atomic & sleepable-context */ +#endif + + POS_LOCK hRGXFWIfBufInitLock; /*!< trace buffer lock for initialisation phase */ + + DEVMEM_MEMDESC *psRGXFWIfTraceBufCtlMemDesc; /*!< memdesc of trace buffer control structure */ + DEVMEM_MEMDESC *psRGXFWIfTraceBufferMemDesc[RGXFW_THREAD_NUM]; /*!< memdesc of actual FW trace (log) buffer(s) */ + DEVMEM_MEMDESC *psRGXFWIfPowMonBufferMemDesc; /*!< memdesc of FW power monitoring data */ + RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl; /*!< structure containing trace control data and actual trace buffer */ + + DEVMEM_MEMDESC *psRGXFWIfFwSysDataMemDesc; /*!< memdesc of the firmware-shared system data structure */ + RGXFWIF_SYSDATA *psRGXFWIfFwSysData; /*!< structure containing trace control data and actual trace buffer */ + + DEVMEM_MEMDESC *psRGXFWIfFwOsDataMemDesc; /*!< memdesc of the firmware-shared os structure */ + RGXFWIF_OSDATA *psRGXFWIfFwOsData; /*!< structure containing trace control data and actual trace buffer */ + +#if defined(SUPPORT_TBI_INTERFACE) + DEVMEM_MEMDESC *psRGXFWIfTBIBufferMemDesc; /*!< memdesc of actual FW TBI buffer */ + RGXFWIF_DEV_VIRTADDR sRGXFWIfTBIBuffer; /*!< TBI buffer data */ + IMG_UINT32 ui32FWIfTBIBufferSize; +#endif + + DEVMEM_MEMDESC *psRGXFWIfHWRInfoBufCtlMemDesc; + RGXFWIF_HWRINFOBUF *psRGXFWIfHWRInfoBufCtl; + + DEVMEM_MEMDESC *psRGXFWIfGpuUtilFWCbCtlMemDesc; + RGXFWIF_GPU_UTIL_FWCB *psRGXFWIfGpuUtilFWCb; + + DEVMEM_MEMDESC *psRGXFWIfHWPerfBufMemDesc; + IMG_BYTE *psRGXFWIfHWPerfBuf; + IMG_UINT32 ui32RGXFWIfHWPerfBufSize; /* in bytes */ + + DEVMEM_MEMDESC *psRGXFWIfRegCfgMemDesc; + + DEVMEM_MEMDESC *psRGXFWIfHWPerfCountersMemDesc; + + DEVMEM_MEMDESC *psRGXFWIfConnectionCtlMemDesc; + RGXFWIF_CONNECTION_CTL *psRGXFWIfConnectionCtl; + + DEVMEM_MEMDESC *psRGXFWIfSysInitMemDesc; + RGXFWIF_SYSINIT *psRGXFWIfSysInit; + + DEVMEM_MEMDESC *psRGXFWIfOsInitMemDesc; + RGXFWIF_OSINIT *psRGXFWIfOsInit; + + DEVMEM_MEMDESC *psRGXFWIfRuntimeCfgMemDesc; + RGXFWIF_RUNTIME_CFG *psRGXFWIfRuntimeCfg; + + /* Additional guest firmware memory context info */ + DEVMEM_HEAP *psGuestFirmwareRawHeap[RGX_NUM_OS_SUPPORTED]; + DEVMEM_MEMDESC *psGuestFirmwareRawMemDesc[RGX_NUM_OS_SUPPORTED]; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Array to store data needed for workload estimation when a workload + has finished and its cycle time is returned to the host. */ + WORKEST_RETURN_DATA asReturnData[RETURN_DATA_ARRAY_SIZE]; + IMG_UINT32 ui32ReturnDataWO; + POS_LOCK hWorkEstLock; +#endif + +#if defined(SUPPORT_PDVFS) + /** + * Host memdesc and pointer to memory containing core clock rate in Hz. + * Firmware (PDVFS) updates the memory on changing the core clock rate over + * GPIO. + * Note: Shared memory needs atomic access from Host driver and firmware, + * hence size should not be greater than memory transaction granularity. + * Currently it is chosen to be 32 bits. + */ + DEVMEM_MEMDESC *psRGXFWIFCoreClkRateMemDesc; + volatile IMG_UINT32 *pui32RGXFWIFCoreClkRate; + /** + * Last sampled core clk rate. + */ + volatile IMG_UINT32 ui32CoreClkRateSnapshot; +#endif + + /* + HWPerf data for the RGX device + */ + + POS_LOCK hHWPerfLock; /*! Critical section lock that protects HWPerf code + * from multiple thread duplicate init/deinit + * and loss/freeing of FW & Host resources while in + * use in another thread e.g. MSIR. */ + + IMG_UINT64 ui64HWPerfFilter; /*! Event filter for FW events (settable by AppHint) */ + IMG_HANDLE hHWPerfStream; /*! TL Stream buffer (L2) for firmware event stream */ + IMG_UINT32 ui32L2BufMaxPacketSize;/*!< Max allowed packet size in FW HWPerf TL (L2) buffer */ + IMG_BOOL bSuspendHWPerfL2DataCopy; /*! Flag to indicate if copying HWPerf data is suspended */ + + IMG_UINT32 ui32HWPerfHostFilter; /*! Event filter for HWPerfHost stream (settable by AppHint) */ + POS_LOCK hLockHWPerfHostStream; /*! Lock guarding access to HWPerfHost stream from multiple threads */ + IMG_HANDLE hHWPerfHostStream; /*! TL Stream buffer for host only event stream */ + IMG_UINT32 ui32HWPerfHostBufSize; /*! Host side buffer size in bytes */ + IMG_UINT32 ui32HWPerfHostLastOrdinal; /*! Ordinal of the last packet emitted in HWPerfHost TL stream. + * Guarded by hLockHWPerfHostStream */ + IMG_UINT32 ui32HWPerfHostNextOrdinal; /*! Ordinal number for HWPerfHost events. Guarded by hHWPerfHostSpinLock */ + IMG_UINT8 *pui8DeferredEvents; /*! List of HWPerfHost events yet to be emitted in the TL stream. + * Events generated from atomic context are deferred "emitted" + * as the "emission" code can sleep */ + IMG_UINT16 ui16DEReadIdx; /*! Read index in the above deferred events buffer */ + IMG_UINT16 ui16DEWriteIdx; /*! Write index in the above deferred events buffer */ + void *pvHostHWPerfMISR; /*! MISR to emit pending/deferred events in HWPerfHost TL stream */ + POS_SPINLOCK hHWPerfHostSpinLock; /*! Guards data shared between an atomic & sleepable-context */ +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + IMG_UINT32 ui32DEHighWatermark; /*! High watermark of deferred events buffer usage. Protected by + *! hHWPerfHostSpinLock */ + /* Max number of times DeferredEmission waited for an atomic-context to "finish" packet write */ + IMG_UINT32 ui32WaitForAtomicCtxPktHighWatermark; /*! Protected by hLockHWPerfHostStream */ + /* Whether warning has been logged about an atomic-context packet loss (due to too long wait for "write" finish) */ + IMG_BOOL bWarnedAtomicCtxPktLost; + /* Max number of times DeferredEmission scheduled-out to give a chance to the right-ordinal packet to be emitted */ + IMG_UINT32 ui32WaitForRightOrdPktHighWatermark; /*! Protected by hLockHWPerfHostStream */ + /* Whether warning has been logged about an packet loss (due to too long wait for right ordinal to emit) */ + IMG_BOOL bWarnedPktOrdinalBroke; +#endif + + void *pvGpuFtraceData; + + /* Poll data for detecting firmware fatal errors */ + IMG_UINT32 aui32CrLastPollCount[RGXFW_THREAD_NUM]; + IMG_UINT32 ui32KCCBCmdsExecutedLastTime; + IMG_BOOL bKCCBCmdsWaitingLastTime; + IMG_UINT32 ui32GEOTimeoutsLastTime; + IMG_UINT32 ui32InterruptCountLastTime; + IMG_UINT32 ui32MissingInterruptsLastTime; + + /* Client stall detection */ + IMG_UINT32 ui32StalledClientMask; + + IMG_BOOL bWorkEstEnabled; + IMG_BOOL bPDVFSEnabled; + + void *pvLISRData; + void *pvMISRData; + void *pvAPMISRData; + RGX_ACTIVEPM_CONF eActivePMConf; + + volatile IMG_UINT32 aui32SampleIRQCount[RGXFW_THREAD_NUM]; + + DEVMEM_MEMDESC *psRGXFaultAddressMemDesc; + + DEVMEM_MEMDESC *psSLC3FenceMemDesc; + + /* If we do 10 deferred memory allocations per second, then the ID would wrap around after 13 years */ + IMG_UINT32 ui32ZSBufferCurrID; /*!< ID assigned to the next deferred devmem allocation */ + IMG_UINT32 ui32FreelistCurrID; /*!< ID assigned to the next freelist */ + + POS_LOCK hLockZSBuffer; /*!< Lock to protect simultaneous access to ZSBuffers */ + DLLIST_NODE sZSBufferHead; /*!< List of on-demand ZSBuffers */ + POS_LOCK hLockFreeList; /*!< Lock to protect simultaneous access to Freelists */ + DLLIST_NODE sFreeListHead; /*!< List of growable Freelists */ + PSYNC_PRIM_CONTEXT hSyncPrimContext; + PVRSRV_CLIENT_SYNC_PRIM *psPowSyncPrim; + + IMG_UINT32 ui32ActivePMReqOk; + IMG_UINT32 ui32ActivePMReqDenied; + IMG_UINT32 ui32ActivePMReqNonIdle; + IMG_UINT32 ui32ActivePMReqRetry; + IMG_UINT32 ui32ActivePMReqTotal; + + IMG_HANDLE hProcessQueuesMISR; + + IMG_UINT32 ui32DeviceFlags; /*!< Flags to track general device state */ + + /* GPU DVFS Table */ + RGX_GPU_DVFS_TABLE *psGpuDVFSTable; + + /* Pointer to function returning the GPU utilisation statistics since the last + * time the function was called. Supports different users at the same time. + * + * psReturnStats [out]: GPU utilisation statistics (active high/active low/idle/blocked) + * in microseconds since the last time the function was called + * by a specific user (identified by hGpuUtilUser) + * + * Returns PVRSRV_OK in case the call completed without errors, + * some other value otherwise. + */ + PVRSRV_ERROR (*pfnGetGpuUtilStats) (PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hGpuUtilUser, + RGXFWIF_GPU_UTIL_STATS *psReturnStats); + + /* Pointer to function that checks if the physical GPU IRQ + * line has been asserted and clears it if so */ + IMG_BOOL (*pfnRGXAckIrq) (struct _PVRSRV_RGXDEV_INFO_ *psDevInfo); + + POS_LOCK hGPUUtilLock; + + /* Register configuration */ + RGX_REG_CONFIG sRegCongfig; + + IMG_BOOL bRGXPowered; + DLLIST_NODE sMemoryContextList; + + POSWR_LOCK hRenderCtxListLock; + POSWR_LOCK hComputeCtxListLock; + POSWR_LOCK hTransferCtxListLock; + POSWR_LOCK hTDMCtxListLock; + POSWR_LOCK hMemoryCtxListLock; + POSWR_LOCK hKickSyncCtxListLock; + + /* Linked list of deferred KCCB commands due to a full KCCB. + * Access to members sKCCBDeferredCommandsListHead and ui32KCCBDeferredCommandsCount + * are protected by the hLockKCCBDeferredCommandsList spin lock. */ + POS_SPINLOCK hLockKCCBDeferredCommandsList; /*!< Protects deferred KCCB commands list */ + DLLIST_NODE sKCCBDeferredCommandsListHead; + IMG_UINT32 ui32KCCBDeferredCommandsCount; /*!< No of commands in the deferred list */ + + /* Linked lists of contexts on this device */ + DLLIST_NODE sRenderCtxtListHead; + DLLIST_NODE sComputeCtxtListHead; + DLLIST_NODE sTDMCtxtListHead; + DLLIST_NODE sKickSyncCtxtListHead; + + DLLIST_NODE sCommonCtxtListHead; + POSWR_LOCK hCommonCtxtListLock; + IMG_UINT32 ui32CommonCtxtCurrentID; /*!< ID assigned to the next common context */ + + POS_LOCK hDebugFaultInfoLock; /*!< Lock to protect the debug fault info list */ + POS_LOCK hMMUCtxUnregLock; /*!< Lock to protect list of unregistered MMU contexts */ + +#if defined(SUPPORT_VALIDATION) + RGX_POWER_DOMAIN_STATE sPowerDomainState; /*!< Power island sequence */ + IMG_UINT32 ui32PowDomainKickInterval; /*!< Power island transition interval */ + IMG_UINT32 ui32ValidationFlags; /*!< Validation flags for host driver */ +#endif + IMG_UINT32 ui32AvailablePowUnitsMask; + + RGX_LAYER_PARAMS sLayerParams; + + RGXFWIF_DM eBPDM; /*!< Current breakpoint data master */ + IMG_BOOL bBPSet; /*!< A Breakpoint has been set */ + POS_LOCK hBPLock; /*!< Lock for break point operations */ + + IMG_UINT32 ui32CoherencyTestsDone; + + ATOMIC_T iCCBSubmissionOrdinal; /* Rolling count used to indicate CCB submission order (all CCBs) */ + POS_LOCK hCCBRecoveryLock; /* Lock to protect pvEarliestStalledClientCCB and ui32OldestSubmissionOrdinal variables */ + void *pvEarliestStalledClientCCB; /* Will point to cCCB command to unblock in the event of a stall */ + IMG_UINT32 ui32OldestSubmissionOrdinal; /* Earliest submission ordinal of CCB entry found so far */ + IMG_UINT32 ui32SLRHoldoffCounter; /* Decremented each time health check is called until zero. SLR only happen when zero. */ + + POS_LOCK hCCBStallCheckLock; /* Lock used to guard against multiple threads simultaneously checking for stalled CCBs */ + +#if defined(SUPPORT_FIRMWARE_GCOV) + /* Firmware gcov buffer */ + DEVMEM_MEMDESC *psFirmwareGcovBufferMemDesc; /*!< mem desc for Firmware gcov dumping buffer */ + IMG_UINT32 ui32FirmwareGcovSize; +#endif + /* Value to store for each page size range config register in MMU4 */ + IMG_UINT64 aui64MMUPageSizeRangeValue[RGX_MAX_NUM_MMU_PAGE_SIZE_RANGES]; + +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) + struct + { + IMG_UINT64 ui64timerGray; + IMG_UINT64 ui64timerBinary; + IMG_UINT64 *pui64uscTimers; + } sRGXTimerValues; +#endif + +#if defined(SUPPORT_VALIDATION) + struct + { + IMG_UINT64 ui64RegVal; + struct completion sRegComp; + } sFwRegs; +#endif + + IMG_HANDLE hTQCLISharedMem; /*!< TQ Client Shared Mem PMR */ + IMG_HANDLE hTQUSCSharedMem; /*!< TQ USC Shared Mem PMR */ + +#if defined(SUPPORT_VALIDATION) + IMG_UINT32 ui32TestSLRInterval; /* Don't enqueue an update sync checkpoint every nth kick */ + IMG_UINT32 ui32TestSLRCount; /* (used to test SLR operation) */ + IMG_UINT32 ui32SLRSkipFWAddr; +#endif + +#if defined(SUPPORT_SECURITY_VALIDATION) + DEVMEM_MEMDESC *psRGXFWIfSecureBufMemDesc; + DEVMEM_MEMDESC *psRGXFWIfNonSecureBufMemDesc; +#endif + + /* Timer Queries */ + IMG_UINT32 ui32ActiveQueryId; /*!< id of the active line */ + IMG_BOOL bSaveStart; /*!< save the start time of the next kick on the device*/ + IMG_BOOL bSaveEnd; /*!< save the end time of the next kick on the device*/ + + DEVMEM_MEMDESC *psStartTimeMemDesc; /*!< memdesc for Start Times */ + IMG_UINT64 *pui64StartTimeById; /*!< CPU mapping of the above */ + + DEVMEM_MEMDESC *psEndTimeMemDesc; /*!< memdesc for End Timer */ + IMG_UINT64 *pui64EndTimeById; /*!< CPU mapping of the above */ + + IMG_UINT32 aui32ScheduledOnId[RGX_MAX_TIMER_QUERIES]; /*!< kicks Scheduled on QueryId */ + DEVMEM_MEMDESC *psCompletedMemDesc; /*!< kicks Completed on QueryId */ + IMG_UINT32 *pui32CompletedById; /*!< CPU mapping of the above */ + +#if !defined(PVRSRV_USE_BRIDGE_LOCK) + POS_LOCK hTimerQueryLock; /*!< lock to protect simultaneous access to timer query members */ +#endif + + PVRSRV_RGXDEV_ERROR_COUNTS sErrorCounts; /*!< struct containing device error counts */ + + IMG_UINT32 ui32HostSafetyEventMask;/*!< mask of the safety events handled by the driver */ + + RGX_CONTEXT_RESET_REASON eLastDeviceError; /*!< device error reported to client */ + + IMG_UINT32 ui32Log2Non4KPgSize; /* Page size of Non4k heap in log2 form */ +} PVRSRV_RGXDEV_INFO; + + + +typedef struct _RGX_TIMING_INFORMATION_ +{ + /*! GPU default core clock speed in Hz */ + IMG_UINT32 ui32CoreClockSpeed; + + /*! Active Power Management: GPU actively requests the host driver to be powered off */ + IMG_BOOL bEnableActivePM; + + /*! Enable the GPU to power off internal Power Islands independently from the host driver */ + IMG_BOOL bEnableRDPowIsland; + + /*! Active Power Management: Delay between the GPU idle and the request to the host */ + IMG_UINT32 ui32ActivePMLatencyms; + +} RGX_TIMING_INFORMATION; + +typedef struct _RGX_DATA_ +{ + /*! Timing information */ + RGX_TIMING_INFORMATION *psRGXTimingInfo; +} RGX_DATA; + + +/* + RGX PDUMP register bank name (prefix) +*/ +#define RGX_PDUMPREG_NAME "RGXREG" + +#endif /* RGXDEVICE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxfw_log_helper.h b/drivers/gpu/drm/phytium/octopus/rgxfw_log_helper.h new file mode 100644 index 000000000000..fa9bca9912cb --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxfw_log_helper.h @@ -0,0 +1,79 @@ +/*************************************************************************/ /*! +@File rgxfw_log_helper.h +@Title Firmware TBI logging helper function +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Platform Generic +@Description This file contains some helper code to make TBI logging possible + Specifically, it uses the SFIDLIST xmacro to trace ids back to + the original strings. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef RGXFW_LOG_HELPER_H +#define RGXFW_LOG_HELPER_H + +#include "rgx_fwif_sf.h" + +static IMG_CHAR *const groups[]= { +#define X(A,B) #B, + RGXFW_LOG_SFGROUPLIST +#undef X +}; + +/* idToStringID : Search SFs tuples {id,string} for a matching id. + * return index to array if found or RGXFW_SF_LAST if none found. + * bsearch could be used as ids are in increasing order. */ +#if defined(RGX_FIRMWARE) +static IMG_UINT32 idToStringID(IMG_UINT32 ui32CheckData, const RGXFW_STID_FMT *const psSFs) +#else +static IMG_UINT32 idToStringID(IMG_UINT32 ui32CheckData, const RGXKM_STID_FMT *const psSFs) +#endif +{ + IMG_UINT32 i = 0, ui32Id = (IMG_UINT32)RGXFW_SF_LAST; + + for ( i = 0 ; psSFs[i].ui32Id != (IMG_UINT32)RGXFW_SF_LAST ; i++) + { + if ( ui32CheckData == psSFs[i].ui32Id ) + { + ui32Id = i; + break; + } + } + return ui32Id; +} + +#endif /* RGXFW_LOG_HELPER_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxfwdbg.c b/drivers/gpu/drm/phytium/octopus/rgxfwdbg.c new file mode 100644 index 000000000000..3bbbcec1600b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxfwdbg.c @@ -0,0 +1,282 @@ +/*************************************************************************/ /*! +@File +@Title Debugging and miscellaneous functions server implementation +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Kernel services functions for debugging and other + miscellaneous functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvrsrv.h" +#include "pvr_debug.h" +#include "rgxfwdbg.h" +#include "rgxfwutils.h" +#include "rgxta3d.h" +#include "pdump_km.h" +#include "mmu_common.h" +#include "devicemem_server.h" +#include "osfunc.h" + +PVRSRV_ERROR +PVRSRVRGXFWDebugQueryFWLogKM( + const CONNECTION_DATA *psConnection, + const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 *pui32RGXFWLogType) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + if (!psDeviceNode || !pui32RGXFWLogType) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevInfo = psDeviceNode->pvDevice; + + if (!psDevInfo || !psDevInfo->psRGXFWIfTraceBufCtl) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *pui32RGXFWLogType = psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType; + return PVRSRV_OK; +} + + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetFWLogKM( + const CONNECTION_DATA * psConnection, + const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32RGXFWLogType) +{ + RGXFWIF_KCCB_CMD sLogTypeUpdateCmd; + PVRSRV_DEV_POWER_STATE ePowerState; + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32OldRGXFWLogTpe; + IMG_UINT32 ui32kCCBCommandSlot; + IMG_BOOL bWaitForFwUpdate = IMG_FALSE; + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + ui32OldRGXFWLogTpe = psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType; + + /* check log type is valid */ + if (ui32RGXFWLogType & ~RGXFWIF_LOG_TYPE_MASK) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + OSLockAcquire(psDevInfo->hRGXFWIfBufInitLock); + + /* set the new log type and ensure the new log type is written to memory + * before requesting the FW to read it + */ + psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32RGXFWLogType; + OSMemoryBarrier(); + + /* Allocate firmware trace buffer resource(s) if not already done */ + if (RGXTraceBufferIsInitRequired(psDevInfo)) + { + eError = RGXTraceBufferInitOnDemandResources(psDevInfo, RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS); + } +#if defined(SUPPORT_TBI_INTERFACE) + /* Check if LogType is TBI then allocate resource on demand and copy + * SFs to it + */ + else if (RGXTBIBufferIsInitRequired(psDevInfo)) + { + eError = RGXTBIBufferInitOnDemandResources(psDevInfo); + } + + /* TBI buffer address will be 0 if not initialised */ + sLogTypeUpdateCmd.uCmdData.sTBIBuffer = psDevInfo->sRGXFWIfTBIBuffer; +#else + sLogTypeUpdateCmd.uCmdData.sTBIBuffer.ui32Addr = 0; +#endif + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate resource on-demand. Reverting to old value", + __func__)); + psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32OldRGXFWLogTpe; + OSMemoryBarrier(); + + OSLockRelease(psDevInfo->hRGXFWIfBufInitLock); + + return eError; + } + + OSLockRelease(psDevInfo->hRGXFWIfBufInitLock); + + eError = PVRSRVPowerLock((const PPVRSRV_DEVICE_NODE) psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire power lock (%u)", + __func__, + eError)); + return eError; + } + + eError = PVRSRVGetDevicePowerState((const PPVRSRV_DEVICE_NODE) psDeviceNode, &ePowerState); + + if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF)) + { + /* Ask the FW to update its cached version of logType value */ + sLogTypeUpdateCmd.eCmdType = RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE; + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sLogTypeUpdateCmd, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot", unlock); + bWaitForFwUpdate = IMG_TRUE; + } + +unlock: + PVRSRVPowerUnlock((const PPVRSRV_DEVICE_NODE) psDeviceNode); + if (bWaitForFwUpdate) + { + /* Wait for the LogType value to be updated in FW */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); + } + return eError; +} + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetHCSDeadlineKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32HCSDeadlineMS) +{ + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + PVR_UNREFERENCED_PARAMETER(psConnection); + + return RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadlineMS); +} + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetOSidPriorityKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSid, + IMG_UINT32 ui32OSidPriority) +{ + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + PVR_UNREFERENCED_PARAMETER(psConnection); + + return RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32OSidPriority); +} + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetOSNewOnlineStateKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSid, + IMG_UINT32 ui32OSNewState) +{ + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_OS_STATE_CHANGE eState; + PVR_UNREFERENCED_PARAMETER(psConnection); + + eState = (ui32OSNewState) ? (RGXFWIF_OS_ONLINE) : (RGXFWIF_OS_OFFLINE); + return RGXFWSetFwOsState(psDevInfo, ui32OSid, eState); +} + +PVRSRV_ERROR +PVRSRVRGXFWDebugPHRConfigureKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PHRMode) +{ + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + PVR_UNREFERENCED_PARAMETER(psConnection); + + return RGXFWConfigPHR(psDevInfo, + ui32PHRMode); +} + +PVRSRV_ERROR +PVRSRVRGXFWDebugWdgConfigureKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32WdgPeriodUs) +{ + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + PVR_UNREFERENCED_PARAMETER(psConnection); + + return RGXFWConfigWdg(psDevInfo, + ui32WdgPeriodUs); +} + +PVRSRV_ERROR +PVRSRVRGXFWDebugDumpFreelistPageListKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; + DLLIST_NODE *psNode, *psNext; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (dllist_is_empty(&psDevInfo->sFreeListHead)) + { + return PVRSRV_OK; + } + + PVR_LOG(("---------------[ Begin Freelist Page List Dump ]------------------")); + + OSLockAcquire(psDevInfo->hLockFreeList); + dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) + { + RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); + RGXDumpFreeListPageList(psFreeList); + } + OSLockRelease(psDevInfo->hLockFreeList); + + PVR_LOG(("----------------[ End Freelist Page List Dump ]-------------------")); + + return PVRSRV_OK; + +} diff --git a/drivers/gpu/drm/phytium/octopus/rgxfwdbg.h b/drivers/gpu/drm/phytium/octopus/rgxfwdbg.h new file mode 100644 index 000000000000..ee488592c087 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxfwdbg.h @@ -0,0 +1,113 @@ +/*************************************************************************/ /*! +@File +@Title Debugging and miscellaneous functions server interface +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Kernel services functions for debugging and other + miscellaneous functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXFWDBG_H) +#define RGXFWDBG_H + +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "device.h" +#include "pmr.h" + +#include "connection_server.h" + + +PVRSRV_ERROR +PVRSRVRGXFWDebugInitFWImageKM( + PMR *psFWImgDestPMR, + PMR *psFWImgSrcPMR, + IMG_UINT64 ui64FWImgLen, + PMR *psFWImgSigPMR, + IMG_UINT64 ui64FWSigLen); + +PVRSRV_ERROR +PVRSRVRGXFWDebugQueryFWLogKM( + const CONNECTION_DATA *psConnection, + const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 *pui32RGXFWLogType); + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetFWLogKM( + const CONNECTION_DATA *psConnection, + const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32RGXFWLogType); + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetHCSDeadlineKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32HCSDeadlineMS); + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetOSidPriorityKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSid, + IMG_UINT32 ui32OSidPriority); + +PVRSRV_ERROR +PVRSRVRGXFWDebugSetOSNewOnlineStateKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSid, + IMG_UINT32 ui32OSNewState); + +PVRSRV_ERROR +PVRSRVRGXFWDebugPHRConfigureKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PHRMode); + +PVRSRV_ERROR +PVRSRVRGXFWDebugWdgConfigureKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32WdgPeriodUs); + +PVRSRV_ERROR +PVRSRVRGXFWDebugDumpFreelistPageListKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode); + +#endif diff --git a/drivers/gpu/drm/phytium/octopus/rgxfwimageutils.c b/drivers/gpu/drm/phytium/octopus/rgxfwimageutils.c new file mode 100644 index 000000000000..59e80ddc0cf5 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxfwimageutils.c @@ -0,0 +1,1066 @@ +/*************************************************************************/ /*! +@File +@Title Services Firmware image utilities used at init time +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Services Firmware image utilities used at init time +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* The routines implemented here are built on top of an abstraction layer to + * hide DDK/OS-specific details in case they are used outside of the DDK + * (e.g. when trusted device is enabled). + * Any new dependency should be added to rgxlayer.h. + * Any new code should be built on top of the existing abstraction layer, + * which should be extended when necessary. */ +#include "rgxfwimageutils.h" +#include "pvrsrv.h" + + +/************************************************************************ +* FW layout information +************************************************************************/ +#define MAX_NUM_ENTRIES (8) +static RGX_FW_LAYOUT_ENTRY asRGXFWLayoutTable[MAX_NUM_ENTRIES]; +static IMG_UINT32 ui32LayoutEntryNum; + + +static RGX_FW_LAYOUT_ENTRY* GetTableEntry(const void *hPrivate, RGX_FW_SECTION_ID eId) +{ + IMG_UINT32 i; + + for (i = 0; i < ui32LayoutEntryNum; i++) + { + if (asRGXFWLayoutTable[i].eId == eId) + { + return &asRGXFWLayoutTable[i]; + } + } + + RGXErrorLog(hPrivate, "%s: id %u not found, returning entry 0\n", + __func__, eId); + + return &asRGXFWLayoutTable[0]; +} + +/*! +******************************************************************************* + + @Function FindMMUSegment + + @Description Given a 32 bit FW address attempt to find the corresponding + pointer to FW allocation + + @Input ui32OffsetIn : 32 bit FW address + @Input pvHostFWCodeAddr : Pointer to FW code + @Input pvHostFWDataAddr : Pointer to FW data + @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code + @Input pvHostFWCorememDataAddr : Pointer to FW coremem code + @Input uiHostAddrOut : CPU pointer equivalent to ui32OffsetIn + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR FindMMUSegment(IMG_UINT32 ui32OffsetIn, + void *pvHostFWCodeAddr, + void *pvHostFWDataAddr, + void *pvHostFWCorememCodeAddr, + void *pvHostFWCorememDataAddr, + void **uiHostAddrOut) +{ + IMG_UINT32 i; + + for (i = 0; i < ui32LayoutEntryNum; i++) + { + if ((ui32OffsetIn >= asRGXFWLayoutTable[i].ui32BaseAddr) && + (ui32OffsetIn < (asRGXFWLayoutTable[i].ui32BaseAddr + asRGXFWLayoutTable[i].ui32AllocSize))) + { + switch (asRGXFWLayoutTable[i].eType) + { + case FW_CODE: + *uiHostAddrOut = pvHostFWCodeAddr; + break; + + case FW_DATA: + *uiHostAddrOut = pvHostFWDataAddr; + break; + + case FW_COREMEM_CODE: + *uiHostAddrOut = pvHostFWCorememCodeAddr; + break; + + case FW_COREMEM_DATA: + *uiHostAddrOut = pvHostFWCorememDataAddr; + break; + + default: + return PVRSRV_ERROR_INIT_FAILURE; + } + + goto found; + } + } + + return PVRSRV_ERROR_INIT_FAILURE; + +found: + if (*uiHostAddrOut == NULL) + { + return PVRSRV_OK; + } + + /* Direct Mem write to mapped memory */ + ui32OffsetIn -= asRGXFWLayoutTable[i].ui32BaseAddr; + ui32OffsetIn += asRGXFWLayoutTable[i].ui32AllocOffset; + + /* Add offset to pointer to FW allocation only if + * that allocation is available + */ + if (*uiHostAddrOut) + { + *(IMG_UINT8 **)uiHostAddrOut += ui32OffsetIn; + } + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXFWConfigureSegID + + @Description Configures a single segment of the Segment MMU + (base, limit and out_addr) + + @Input hPrivate : Implementation specific data + @Input ui64SegOutAddr : Segment output base address (40 bit devVaddr) + @Input ui32SegBase : Segment input base address (32 bit FW address) + @Input ui32SegLimit : Segment size + @Input ui32SegID : Segment ID + @Input pszName : Segment name + @Input ppui32BootConf : Pointer to bootloader data + + @Return void + +******************************************************************************/ +static void RGXFWConfigureSegID(const void *hPrivate, + IMG_UINT64 ui64SegOutAddr, + IMG_UINT32 ui32SegBase, + IMG_UINT32 ui32SegLimit, + IMG_UINT32 ui32SegID, + IMG_UINT32 **ppui32BootConf) +{ + IMG_UINT32 *pui32BootConf = *ppui32BootConf; + IMG_UINT32 ui32SegOutAddr0 = ui64SegOutAddr & 0x00000000FFFFFFFFUL; + IMG_UINT32 ui32SegOutAddr1 = (ui64SegOutAddr >> 32) & 0x00000000FFFFFFFFUL; + + /* META segments have a minimum size */ + IMG_UINT32 ui32LimitOff = (ui32SegLimit < RGXFW_SEGMMU_ALIGN) ? + RGXFW_SEGMMU_ALIGN : ui32SegLimit; + /* the limit is an offset, therefore off = size - 1 */ + ui32LimitOff -= 1; + + RGXCommentLog(hPrivate, + "* Seg%d: meta_addr = 0x%08x, devv_addr = 0x%" IMG_UINT64_FMTSPECx ", limit = 0x%x", + ui32SegID, + ui32SegBase, + ui64SegOutAddr, + ui32LimitOff); + + ui32SegBase |= RGXFW_SEGMMU_ALLTHRS_WRITEABLE; + + *pui32BootConf++ = META_CR_MMCU_SEGMENTn_BASE(ui32SegID); + *pui32BootConf++ = ui32SegBase; + + *pui32BootConf++ = META_CR_MMCU_SEGMENTn_LIMIT(ui32SegID); + *pui32BootConf++ = ui32LimitOff; + + *pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA0(ui32SegID); + *pui32BootConf++ = ui32SegOutAddr0; + + *pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA1(ui32SegID); + *pui32BootConf++ = ui32SegOutAddr1; + + *ppui32BootConf = pui32BootConf; +} + +/*! +******************************************************************************* + + @Function RGXFWConfigureSegMMU + + @Description Configures META's Segment MMU + + @Input hPrivate : Implementation specific data + @Input psFWCodeDevVAddrBase : FW code base device virtual address + @Input psFWDataDevVAddrBase : FW data base device virtual address + @Input ppui32BootConf : Pointer to bootloader data + + @Return void + +******************************************************************************/ +static void RGXFWConfigureSegMMU(const void *hPrivate, + IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase, + IMG_DEV_VIRTADDR *psFWDataDevVAddrBase, + IMG_UINT32 **ppui32BootConf) +{ + IMG_UINT64 ui64SegOutAddrTop; + IMG_UINT32 i; + + PVR_UNREFERENCED_PARAMETER(psFWCodeDevVAddrBase); + + /* Configure Segment MMU */ + RGXCommentLog(hPrivate, "********** FW configure Segment MMU **********"); + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT)) + { + ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(MMU_CONTEXT_MAPPING_FWPRIV); + } + else + { + ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV, RGXFW_SEGMMU_META_BIFDM_ID); + } + + for (i = 0; i < ui32LayoutEntryNum; i++) + { + /* + * FW code is using the bootloader segment which is already configured on boot. + * FW coremem code and data don't use the segment MMU. + * Only the FW data segment needs to be configured. + */ + + if (asRGXFWLayoutTable[i].eType == FW_DATA) + { + IMG_UINT64 ui64SegOutAddr; + IMG_UINT32 ui32SegId = RGXFW_SEGMMU_DATA_ID; + + ui64SegOutAddr = (psFWDataDevVAddrBase->uiAddr | ui64SegOutAddrTop) + + asRGXFWLayoutTable[i].ui32AllocOffset; + + RGXFWConfigureSegID(hPrivate, + ui64SegOutAddr, + asRGXFWLayoutTable[i].ui32BaseAddr, + asRGXFWLayoutTable[i].ui32AllocSize, + ui32SegId, + ppui32BootConf); /*write the sequence to the bootldr */ + + break; + } + } +} + +/*! +******************************************************************************* + + @Function RGXFWConfigureMetaCaches + + @Description Configure and enable the Meta instruction and data caches + + @Input hPrivate : Implementation specific data + @Input ui32NumThreads : Number of FW threads in use + @Input ppui32BootConf : Pointer to bootloader data + + @Return void + +******************************************************************************/ +static void RGXFWConfigureMetaCaches(const void *hPrivate, + IMG_UINT32 ui32NumThreads, + IMG_UINT32 **ppui32BootConf) +{ + IMG_UINT32 *pui32BootConf = *ppui32BootConf; + IMG_UINT32 ui32DCacheT0, ui32ICacheT0; + IMG_UINT32 ui32DCacheT1, ui32ICacheT1; + IMG_UINT32 ui32DCacheT2, ui32ICacheT2; + IMG_UINT32 ui32DCacheT3, ui32ICacheT3; + +#define META_CR_MMCU_LOCAL_EBCTRL (0x04830600) +#define META_CR_MMCU_LOCAL_EBCTRL_ICWIN (0x3 << 14) +#define META_CR_MMCU_LOCAL_EBCTRL_DCWIN (0x3 << 6) +#define META_CR_SYSC_DCPART(n) (0x04830200 + (n)*0x8) +#define META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE (0x1 << 31) +#define META_CR_SYSC_ICPART(n) (0x04830220 + (n)*0x8) +#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF (0x8 << 16) +#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE (0xF) +#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE (0x7) +#define META_CR_MMCU_DCACHE_CTRL (0x04830018) +#define META_CR_MMCU_ICACHE_CTRL (0x04830020) +#define META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN (0x1) + + RGXCommentLog(hPrivate, "********** Meta caches configuration *********"); + + /* Initialise I/Dcache settings */ + ui32DCacheT0 = ui32DCacheT1 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; + ui32DCacheT2 = ui32DCacheT3 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; + ui32ICacheT0 = ui32ICacheT1 = ui32ICacheT2 = ui32ICacheT3 = 0; + + if (ui32NumThreads == 1) + { + ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE; + ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE; + } + else + { + ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE; + ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE; + + ui32DCacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE | + META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF; + ui32ICacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE | + META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF; + } + + /* Local region MMU enhanced bypass: WIN-3 mode for code and data caches */ + *pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL; + *pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL_ICWIN | + META_CR_MMCU_LOCAL_EBCTRL_DCWIN; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_MMCU_LOCAL_EBCTRL, + META_CR_MMCU_LOCAL_EBCTRL_ICWIN | META_CR_MMCU_LOCAL_EBCTRL_DCWIN); + + /* Data cache partitioning thread 0 to 3 */ + *pui32BootConf++ = META_CR_SYSC_DCPART(0); + *pui32BootConf++ = ui32DCacheT0; + *pui32BootConf++ = META_CR_SYSC_DCPART(1); + *pui32BootConf++ = ui32DCacheT1; + *pui32BootConf++ = META_CR_SYSC_DCPART(2); + *pui32BootConf++ = ui32DCacheT2; + *pui32BootConf++ = META_CR_SYSC_DCPART(3); + *pui32BootConf++ = ui32DCacheT3; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_DCPART(0), ui32DCacheT0); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_DCPART(1), ui32DCacheT1); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_DCPART(2), ui32DCacheT2); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_DCPART(3), ui32DCacheT3); + + /* Enable data cache hits */ + *pui32BootConf++ = META_CR_MMCU_DCACHE_CTRL; + *pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_MMCU_DCACHE_CTRL, + META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN); + + /* Instruction cache partitioning thread 0 to 3 */ + *pui32BootConf++ = META_CR_SYSC_ICPART(0); + *pui32BootConf++ = ui32ICacheT0; + *pui32BootConf++ = META_CR_SYSC_ICPART(1); + *pui32BootConf++ = ui32ICacheT1; + *pui32BootConf++ = META_CR_SYSC_ICPART(2); + *pui32BootConf++ = ui32ICacheT2; + *pui32BootConf++ = META_CR_SYSC_ICPART(3); + *pui32BootConf++ = ui32ICacheT3; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_ICPART(0), ui32ICacheT0); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_ICPART(1), ui32ICacheT1); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_ICPART(2), ui32ICacheT2); + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_SYSC_ICPART(3), ui32ICacheT3); + + /* Enable instruction cache hits */ + *pui32BootConf++ = META_CR_MMCU_ICACHE_CTRL; + *pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + META_CR_MMCU_ICACHE_CTRL, + META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN); + + *pui32BootConf++ = 0x040000C0; + *pui32BootConf++ = 0; + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", 0x040000C0, 0); + + *ppui32BootConf = pui32BootConf; +} + +/*! +******************************************************************************* + + @Function ProcessLDRCommandStream + + @Description Process the output of the Meta toolchain in the .LDR format + copying code and data sections into their final location and + passing some information to the Meta bootloader + + @Input hPrivate : Implementation specific data + @Input pbLDR : Pointer to FW blob + @Input pvHostFWCodeAddr : Pointer to FW code + @Input pvHostFWDataAddr : Pointer to FW data + @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code + @Input pvHostFWCorememDataAddr : Pointer to FW coremem data + @Input ppui32BootConf : Pointer to bootloader data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate, + const IMG_BYTE* pbLDR, + void* pvHostFWCodeAddr, + void* pvHostFWDataAddr, + void* pvHostFWCorememCodeAddr, + void* pvHostFWCorememDataAddr, + IMG_UINT32 **ppui32BootConf) +{ + RGX_META_LDR_BLOCK_HDR *psHeader = (RGX_META_LDR_BLOCK_HDR *) pbLDR; + RGX_META_LDR_L1_DATA_BLK *psL1Data = + (RGX_META_LDR_L1_DATA_BLK*) ((IMG_UINT8 *) pbLDR + psHeader->ui32SLData); + + IMG_UINT32 *pui32BootConf = ppui32BootConf ? *ppui32BootConf : NULL; + IMG_UINT32 ui32CorememSize = RGXGetFWCorememSize(hPrivate); + + RGXCommentLog(hPrivate, "**********************************************"); + RGXCommentLog(hPrivate, "************** Begin LDR Parsing *************"); + RGXCommentLog(hPrivate, "**********************************************"); + + while (psL1Data != NULL) + { + if (RGX_META_LDR_BLK_IS_COMMENT(psL1Data->ui16Cmd)) + { + /* Don't process comment blocks */ + goto NextBlock; + } + + switch (psL1Data->ui16Cmd & RGX_META_LDR_CMD_MASK) + { + case RGX_META_LDR_CMD_LOADMEM: + { + RGX_META_LDR_L2_DATA_BLK *psL2Block = + (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[1]); + IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0]; + IMG_UINT32 ui32DataSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */; + void *pvWriteAddr; + PVRSRV_ERROR eError; + + if (!RGX_META_IS_COREMEM_CODE(ui32Offset, ui32CorememSize) && + !RGX_META_IS_COREMEM_DATA(ui32Offset, ui32CorememSize)) + { + /* Global range is aliased to local range */ + ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT; + } + + eError = FindMMUSegment(ui32Offset, + pvHostFWCodeAddr, + pvHostFWDataAddr, + pvHostFWCorememCodeAddr, + pvHostFWCorememDataAddr, + &pvWriteAddr); + + if (eError != PVRSRV_OK) + { + RGXErrorLog(hPrivate, + "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment", + ui32Offset, ui32DataSize); + return eError; + } + + /* Write to FW allocation only if available */ + if (pvWriteAddr) + { + RGXMemCopy(hPrivate, + pvWriteAddr, + psL2Block->aui32BlockData, + ui32DataSize); + } + + break; + } + case RGX_META_LDR_CMD_LOADCORE: + case RGX_META_LDR_CMD_LOADMMREG: + { + return PVRSRV_ERROR_INIT_FAILURE; + } + case RGX_META_LDR_CMD_START_THREADS: + { + /* Don't process this block */ + break; + } + case RGX_META_LDR_CMD_ZEROMEM: + { + IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0]; + IMG_UINT32 ui32ByteCount = psL1Data->aui32CmdData[1]; + void *pvWriteAddr; + PVRSRV_ERROR eError; + + if (RGX_META_IS_COREMEM_DATA(ui32Offset, ui32CorememSize)) + { + /* cannot zero coremem directly */ + break; + } + + /* Global range is aliased to local range */ + ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT; + + eError = FindMMUSegment(ui32Offset, + pvHostFWCodeAddr, + pvHostFWDataAddr, + pvHostFWCorememCodeAddr, + pvHostFWCorememDataAddr, + &pvWriteAddr); + + if (eError != PVRSRV_OK) + { + RGXErrorLog(hPrivate, + "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment", + ui32Offset, ui32ByteCount); + return eError; + } + + /* Write to FW allocation only if available */ + if (pvWriteAddr) + { + RGXMemSet(hPrivate, pvWriteAddr, 0, ui32ByteCount); + } + + break; + } + case RGX_META_LDR_CMD_CONFIG: + { + RGX_META_LDR_L2_DATA_BLK *psL2Block = + (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[0]); + RGX_META_LDR_CFG_BLK *psConfigCommand = (RGX_META_LDR_CFG_BLK*) psL2Block->aui32BlockData; + IMG_UINT32 ui32L2BlockSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */; + IMG_UINT32 ui32CurrBlockSize = 0; + + while (ui32L2BlockSize) + { + switch (psConfigCommand->ui32Type) + { + case RGX_META_LDR_CFG_PAUSE: + case RGX_META_LDR_CFG_READ: + { + ui32CurrBlockSize = 8; + return PVRSRV_ERROR_INIT_FAILURE; + } + case RGX_META_LDR_CFG_WRITE: + { + IMG_UINT32 ui32RegisterOffset = psConfigCommand->aui32BlockData[0]; + IMG_UINT32 ui32RegisterValue = psConfigCommand->aui32BlockData[1]; + + /* Only write to bootloader if we got a valid + * pointer to the FW code allocation + */ + if (pui32BootConf) + { + /* Do register write */ + *pui32BootConf++ = ui32RegisterOffset; + *pui32BootConf++ = ui32RegisterValue; + } + + RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", + ui32RegisterOffset, ui32RegisterValue); + + ui32CurrBlockSize = 12; + break; + } + case RGX_META_LDR_CFG_MEMSET: + case RGX_META_LDR_CFG_MEMCHECK: + { + ui32CurrBlockSize = 20; + return PVRSRV_ERROR_INIT_FAILURE; + } + default: + { + return PVRSRV_ERROR_INIT_FAILURE; + } + } + ui32L2BlockSize -= ui32CurrBlockSize; + psConfigCommand = (RGX_META_LDR_CFG_BLK*) (((IMG_UINT8*) psConfigCommand) + ui32CurrBlockSize); + } + + break; + } + default: + { + return PVRSRV_ERROR_INIT_FAILURE; + } + } + +NextBlock: + + if (psL1Data->ui32Next == 0xFFFFFFFF) + { + psL1Data = NULL; + } + else + { + psL1Data = (RGX_META_LDR_L1_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->ui32Next); + } + } + + if (pui32BootConf) + { + *ppui32BootConf = pui32BootConf; + } + + RGXCommentLog(hPrivate, "**********************************************"); + RGXCommentLog(hPrivate, "************** End Loader Parsing ************"); + RGXCommentLog(hPrivate, "**********************************************"); + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function ProcessELFCommandStream + + @Description Process a file in .ELF format copying code and data sections + into their final location + + @Input hPrivate : Implementation specific data + @Input pbELF : Pointer to FW blob + @Input pvHostFWCodeAddr : Pointer to FW code + @Input pvHostFWDataAddr : Pointer to FW data + @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code + @Input pvHostFWCorememDataAddr : Pointer to FW coremem data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate, + const IMG_BYTE *pbELF, + void *pvHostFWCodeAddr, + void *pvHostFWDataAddr, + void* pvHostFWCorememCodeAddr, + void* pvHostFWCorememDataAddr) +{ + IMG_UINT32 ui32Entry; + IMG_ELF_HDR *psHeader = (IMG_ELF_HDR *)pbELF; + IMG_ELF_PROGRAM_HDR *psProgramHeader = + (IMG_ELF_PROGRAM_HDR *)(pbELF + psHeader->ui32Ephoff); + PVRSRV_ERROR eError; + + for (ui32Entry = 0; ui32Entry < psHeader->ui32Ephnum; ui32Entry++, psProgramHeader++) + { + void *pvWriteAddr; + + /* Only consider loadable entries in the ELF segment table */ + if (psProgramHeader->ui32Ptype != ELF_PT_LOAD) continue; + + eError = FindMMUSegment(psProgramHeader->ui32Pvaddr, + pvHostFWCodeAddr, + pvHostFWDataAddr, + pvHostFWCorememCodeAddr, + pvHostFWCorememDataAddr, + &pvWriteAddr); + + if (eError != PVRSRV_OK) + { + RGXErrorLog(hPrivate, + "%s: Addr 0x%x (size: %d) not found in any segment",__func__, + psProgramHeader->ui32Pvaddr, + psProgramHeader->ui32Pfilesz); + return eError; + } + + /* Write to FW allocation only if available */ + if (pvWriteAddr) + { + RGXMemCopy(hPrivate, + pvWriteAddr, + (IMG_PBYTE)(pbELF + psProgramHeader->ui32Poffset), + psProgramHeader->ui32Pfilesz); + + RGXMemSet(hPrivate, + (IMG_PBYTE)pvWriteAddr + psProgramHeader->ui32Pfilesz, + 0, + psProgramHeader->ui32Pmemsz - psProgramHeader->ui32Pfilesz); + } + } + + return PVRSRV_OK; +} + +IMG_UINT32 RGXGetFWImageSectionOffset(const void *hPrivate, RGX_FW_SECTION_ID eId) +{ + RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); + + return psEntry->ui32AllocOffset; +} + +IMG_UINT32 RGXGetFWImageSectionMaxSize(const void *hPrivate, RGX_FW_SECTION_ID eId) +{ + RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); + + return psEntry->ui32MaxSize; +} + +IMG_UINT32 RGXGetFWImageSectionAllocSize(const void *hPrivate, RGX_FW_SECTION_ID eId) +{ + RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); + + return psEntry->ui32AllocSize; +} + +IMG_UINT32 RGXGetFWImageSectionAddress(const void *hPrivate, RGX_FW_SECTION_ID eId) +{ + RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); + + return psEntry->ui32BaseAddr; +} + +PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate, + const IMG_BYTE *pbRGXFirmware, + const IMG_UINT32 ui32RGXFirmwareSize, + IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize, + IMG_DEVMEM_SIZE_T *puiFWDataAllocSize, + IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize, + IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize) +{ + RGX_FW_INFO_HEADER *psInfoHeader; + const IMG_BYTE *pbRGXFirmwareInfo; + const IMG_BYTE *pbRGXFirmwareLayout; + IMG_UINT32 i; + + if (pbRGXFirmware == NULL || ui32RGXFirmwareSize == 0 || ui32RGXFirmwareSize <= FW_BLOCK_SIZE) + { + RGXErrorLog(hPrivate, "%s: Invalid FW binary at %p, size %u", + __func__, pbRGXFirmware, ui32RGXFirmwareSize); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + + /* + * Acquire pointer to the FW info header within the FW image. + * The format of the header in the FW image might not be the one expected + * by the driver, but the driver should still be able to correctly read + * the information below, as long as new/incompatible elements are added + * at the end of the header (they will be ignored by the driver). + */ + + pbRGXFirmwareInfo = pbRGXFirmware + ui32RGXFirmwareSize - FW_BLOCK_SIZE; + psInfoHeader = (RGX_FW_INFO_HEADER*)pbRGXFirmwareInfo; + + /* If any of the following checks fails, the FW will likely not work properly */ + + if (psInfoHeader->ui32InfoVersion != FW_INFO_VERSION) + { + RGXErrorLog(hPrivate, "%s: FW info version mismatch (expected: %u, found: %u)", + __func__, + (IMG_UINT32) FW_INFO_VERSION, + psInfoHeader->ui32InfoVersion); + } + + if (psInfoHeader->ui32HeaderLen != sizeof(RGX_FW_INFO_HEADER)) + { + RGXErrorLog(hPrivate, "%s: FW info header sizes mismatch (expected: %u, found: %u)", + __func__, + (IMG_UINT32) sizeof(RGX_FW_INFO_HEADER), + psInfoHeader->ui32HeaderLen); + } + + if (psInfoHeader->ui32LayoutEntrySize != sizeof(RGX_FW_LAYOUT_ENTRY)) + { + RGXErrorLog(hPrivate, "%s: FW layout entry sizes mismatch (expected: %u, found: %u)", + __func__, + (IMG_UINT32) sizeof(RGX_FW_LAYOUT_ENTRY), + psInfoHeader->ui32LayoutEntrySize); + } + + if (psInfoHeader->ui32LayoutEntryNum > MAX_NUM_ENTRIES) + { + RGXErrorLog(hPrivate, "%s: Not enough storage for the FW layout table (max: %u entries, found: %u)", + __func__, + MAX_NUM_ENTRIES, + psInfoHeader->ui32LayoutEntryNum); + } + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + if (RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS)) + { + if (psInfoHeader->ui32FwPageSize != RGXGetOSPageSize(hPrivate)) + { + RGXErrorLog(hPrivate, "%s: FW page size mismatch (expected: %u, found: %u)", + __func__, + (IMG_UINT32) RGXGetOSPageSize(hPrivate), + psInfoHeader->ui32FwPageSize); + return PVRSRV_ERROR_INVALID_PARAMS; + } + } +#endif + + ui32LayoutEntryNum = psInfoHeader->ui32LayoutEntryNum; + + + /* + * Copy FW layout table from FW image to local array. + * One entry is copied at a time and the copy is limited to what the driver + * expects to find in it. Assuming that new/incompatible elements + * are added at the end of each entry, the loop below adapts the table + * in the FW image into the format expected by the driver. + */ + + pbRGXFirmwareLayout = pbRGXFirmwareInfo + psInfoHeader->ui32HeaderLen; + + for (i = 0; i < ui32LayoutEntryNum; i++) + { + RGX_FW_LAYOUT_ENTRY *psOutEntry = &asRGXFWLayoutTable[i]; + + RGX_FW_LAYOUT_ENTRY *psInEntry = (RGX_FW_LAYOUT_ENTRY*) + (pbRGXFirmwareLayout + i * psInfoHeader->ui32LayoutEntrySize); + + RGXMemCopy(hPrivate, + (void*)psOutEntry, + (void*)psInEntry, + sizeof(RGX_FW_LAYOUT_ENTRY)); + } + + + /* Calculate how much memory the FW needs for its code and data segments */ + + *puiFWCodeAllocSize = 0; + *puiFWDataAllocSize = 0; + *puiFWCorememCodeAllocSize = 0; + *puiFWCorememDataAllocSize = 0; + + for (i = 0; i < ui32LayoutEntryNum; i++) + { + switch (asRGXFWLayoutTable[i].eType) + { + case FW_CODE: + *puiFWCodeAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; + break; + + case FW_DATA: + *puiFWDataAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; + break; + + case FW_COREMEM_CODE: + *puiFWCorememCodeAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; + break; + + case FW_COREMEM_DATA: + *puiFWCorememDataAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; + break; + + default: + RGXErrorLog(hPrivate, "%s: Unknown FW section type %u\n", + __func__, asRGXFWLayoutTable[i].eType); + break; + } + } + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate, + const IMG_BYTE *pbRGXFirmware, + void *pvFWCode, + void *pvFWData, + void *pvFWCorememCode, + void *pvFWCorememData, + RGX_FW_BOOT_PARAMS *puFWParams) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BOOL bMIPS = IMG_FALSE; + IMG_BOOL bRISCV = RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR); + IMG_BOOL bMETA; + +#if defined(RGX_FEATURE_MIPS_BIT_MASK) + bMIPS = RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS); +#endif + bMETA = !bMIPS && !bRISCV; + + if (bMETA) + { + IMG_UINT32 *pui32BootConf = NULL; + /* Skip bootloader configuration if a pointer to the FW code + * allocation is not available + */ + if (pvFWCode) + { + /* This variable points to the bootloader code which is mostly + * a sequence of pairs + */ + pui32BootConf = ((IMG_UINT32*) pvFWCode) + RGXFW_BOOTLDR_CONF_OFFSET; + + /* Slave port and JTAG accesses are privileged */ + *pui32BootConf++ = META_CR_SYSC_JTAG_THREAD; + *pui32BootConf++ = META_CR_SYSC_JTAG_THREAD_PRIV_EN; + + RGXFWConfigureSegMMU(hPrivate, + &puFWParams->sMeta.sFWCodeDevVAddr, + &puFWParams->sMeta.sFWDataDevVAddr, + &pui32BootConf); + } + + /* Process FW image data stream */ + eError = ProcessLDRCommandStream(hPrivate, + pbRGXFirmware, + pvFWCode, + pvFWData, + pvFWCorememCode, + pvFWCorememData, + &pui32BootConf); + if (eError != PVRSRV_OK) + { + RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError); + return eError; + } + + /* Skip bootloader configuration if a pointer to the FW code + * allocation is not available + */ + if (pvFWCode) + { + IMG_UINT32 ui32NumThreads = puFWParams->sMeta.ui32NumThreads; + + if ((ui32NumThreads == 0) || (ui32NumThreads > 2)) + { + RGXErrorLog(hPrivate, + "ProcessFWImage: Wrong Meta threads configuration, using one thread only"); + + ui32NumThreads = 1; + } + + RGXFWConfigureMetaCaches(hPrivate, + ui32NumThreads, + &pui32BootConf); + + /* Signal the end of the conf sequence */ + *pui32BootConf++ = 0x0; + *pui32BootConf++ = 0x0; + + if (puFWParams->sMeta.uiFWCorememCodeSize && (puFWParams->sMeta.sFWCorememCodeFWAddr.ui32Addr != 0)) + { + *pui32BootConf++ = puFWParams->sMeta.sFWCorememCodeFWAddr.ui32Addr; + *pui32BootConf++ = puFWParams->sMeta.uiFWCorememCodeSize; + } + else + { + *pui32BootConf++ = 0; + *pui32BootConf++ = 0; + } + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, META_DMA)) + { + *pui32BootConf++ = (IMG_UINT32) (puFWParams->sMeta.sFWCorememCodeDevVAddr.uiAddr >> 32); + *pui32BootConf++ = (IMG_UINT32) puFWParams->sMeta.sFWCorememCodeDevVAddr.uiAddr; + } + else + { + *pui32BootConf++ = 0; + *pui32BootConf++ = 0; + } + } + } +#if defined(RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES) + else if (bMIPS) + { + /* Process FW image data stream */ + eError = ProcessELFCommandStream(hPrivate, + pbRGXFirmware, + pvFWCode, + pvFWData, + NULL, + NULL); + if (eError != PVRSRV_OK) + { + RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError); + return eError; + } + + if (pvFWData) + { + RGXMIPSFW_BOOT_DATA *psBootData = (RGXMIPSFW_BOOT_DATA*) + /* To get a pointer to the bootloader configuration data start from a pointer to the FW image... */ + IMG_OFFSET_ADDR(pvFWData, + /* ... jump to the boot/NMI data page... */ + (RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA) + /* ... and then jump to the bootloader data offset within the page */ + + RGXMIPSFW_BOOTLDR_CONF_OFFSET)); + + /* Rogue Registers physical address */ + psBootData->ui64RegBase = puFWParams->sMips.sGPURegAddr.uiAddr; + + /* MIPS Page Table physical address */ + psBootData->ui32PTLog2PageSize = puFWParams->sMips.ui32FWPageTableLog2PageSize; + psBootData->ui32PTNumPages = puFWParams->sMips.ui32FWPageTableNumPages; + psBootData->aui64PTPhyAddr[0U] = puFWParams->sMips.asFWPageTableAddr[0U].uiAddr; + psBootData->aui64PTPhyAddr[1U] = puFWParams->sMips.asFWPageTableAddr[1U].uiAddr; + psBootData->aui64PTPhyAddr[2U] = puFWParams->sMips.asFWPageTableAddr[2U].uiAddr; + psBootData->aui64PTPhyAddr[3U] = puFWParams->sMips.asFWPageTableAddr[3U].uiAddr; + + /* MIPS Stack Pointer Physical Address */ + psBootData->ui64StackPhyAddr = puFWParams->sMips.sFWStackAddr.uiAddr; + + /* Reserved for future use */ + psBootData->ui32Reserved1 = 0; + psBootData->ui32Reserved2 = 0; + } + } +#endif /* #if defined(RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES) */ + else + { + /* Process FW image data stream */ + eError = ProcessELFCommandStream(hPrivate, + pbRGXFirmware, + pvFWCode, + pvFWData, + pvFWCorememCode, + pvFWCorememData); + if (eError != PVRSRV_OK) + { + RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError); + return eError; + } + + if (pvFWData) + { + RGXRISCVFW_BOOT_DATA *psBootData = (RGXRISCVFW_BOOT_DATA*) + IMG_OFFSET_ADDR(pvFWData, RGXRISCVFW_BOOTLDR_CONF_OFFSET); + + psBootData->ui64CorememCodeDevVAddr = puFWParams->sRISCV.sFWCorememCodeDevVAddr.uiAddr; + psBootData->ui32CorememCodeFWAddr = puFWParams->sRISCV.sFWCorememCodeFWAddr.ui32Addr; + psBootData->ui32CorememCodeSize = puFWParams->sRISCV.uiFWCorememCodeSize; + + psBootData->ui64CorememDataDevVAddr = puFWParams->sRISCV.sFWCorememDataDevVAddr.uiAddr; + psBootData->ui32CorememDataFWAddr = puFWParams->sRISCV.sFWCorememDataFWAddr.ui32Addr; + psBootData->ui32CorememDataSize = puFWParams->sRISCV.uiFWCorememDataSize; + } + } + + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/rgxfwimageutils.h b/drivers/gpu/drm/phytium/octopus/rgxfwimageutils.h new file mode 100644 index 000000000000..539ca4493b0b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxfwimageutils.h @@ -0,0 +1,262 @@ +/*************************************************************************/ /*! +@File +@Title Header for Services Firmware image utilities used at init time +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for Services Firmware image utilities used at init time +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXFWIMAGEUTILS_H +#define RGXFWIMAGEUTILS_H + +/* The routines declared here are built on top of an abstraction layer to + * hide DDK/OS-specific details in case they are used outside of the DDK + * (e.g. when DRM security is enabled). + * Any new dependency should be added to rgxlayer.h. + * Any new code should be built on top of the existing abstraction layer, + * which should be extended when necessary. + */ +#include "rgxlayer.h" + + +typedef union _RGX_FW_BOOT_PARAMS_ +{ + struct + { + IMG_DEV_VIRTADDR sFWCodeDevVAddr; + IMG_DEV_VIRTADDR sFWDataDevVAddr; + IMG_DEV_VIRTADDR sFWCorememCodeDevVAddr; + RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; + IMG_DEVMEM_SIZE_T uiFWCorememCodeSize; + IMG_DEV_VIRTADDR sFWCorememDataDevVAddr; + RGXFWIF_DEV_VIRTADDR sFWCorememDataFWAddr; + IMG_UINT32 ui32NumThreads; + } sMeta; + +#if defined(RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES) + struct + { + IMG_DEV_PHYADDR sGPURegAddr; + IMG_DEV_PHYADDR asFWPageTableAddr[RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES]; + IMG_DEV_PHYADDR sFWStackAddr; + IMG_UINT32 ui32FWPageTableLog2PageSize; + IMG_UINT32 ui32FWPageTableNumPages; + } sMips; +#endif + + struct + { + IMG_DEV_VIRTADDR sFWCorememCodeDevVAddr; + RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; + IMG_DEVMEM_SIZE_T uiFWCorememCodeSize; + + IMG_DEV_VIRTADDR sFWCorememDataDevVAddr; + RGXFWIF_DEV_VIRTADDR sFWCorememDataFWAddr; + IMG_DEVMEM_SIZE_T uiFWCorememDataSize; + } sRISCV; + +} RGX_FW_BOOT_PARAMS; + +/*! +******************************************************************************* + + @Function RGXGetFWImageSectionOffset + + @Input hPrivate : Implementation specific data + @Input eId : Section id + + @Description Return offset of a Firmware section, relative to the beginning + of the code or data allocation (depending on the section id) + +******************************************************************************/ +IMG_UINT32 RGXGetFWImageSectionOffset(const void *hPrivate, + RGX_FW_SECTION_ID eId); + +/*! +******************************************************************************* + + @Function RGXGetFWImageSectionMaxSize + + @Input hPrivate : Implementation specific data + @Input eId : Section id + + @Description Return maximum size (not allocation size) of a Firmware section + +******************************************************************************/ +IMG_UINT32 RGXGetFWImageSectionMaxSize(const void *hPrivate, + RGX_FW_SECTION_ID eId); + +/*! +******************************************************************************* + + @Function RGXGetFWImageSectionAllocSize + + @Input hPrivate : Implementation specific data + @Input eId : Section id + + @Description Return allocation size of a Firmware section + +******************************************************************************/ +IMG_UINT32 RGXGetFWImageSectionAllocSize(const void *hPrivate, + RGX_FW_SECTION_ID eId); + +/*! +******************************************************************************* + + @Function RGXGetFWImageSectionAddress + + @Input hPrivate : Implementation specific data + @Input eId : Section id + + @Description Return base address of a Firmware section + +******************************************************************************/ +IMG_UINT32 RGXGetFWImageSectionAddress(const void *hPrivate, + RGX_FW_SECTION_ID eId); + +/*! +******************************************************************************* + + @Function RGXGetFWImageAllocSize + + @Description Return size of Firmware code/data/coremem code allocations + + @Input hPrivate : Implementation specific data + @Input pbRGXFirmware : Pointer to FW binary + @Input ui32RGXFirmwareSize : FW binary size + @Output puiFWCodeAllocSize : Code size + @Output puiFWDataAllocSize : Data size + @Output puiFWCorememCodeAllocSize : Coremem code size (0 if N/A) + @Output puiFWCorememDataAllocSize : Coremem data size (0 if N/A) + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate, + const IMG_BYTE *pbRGXFirmware, + const IMG_UINT32 ui32RGXFirmwareSize, + IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize, + IMG_DEVMEM_SIZE_T *puiFWDataAllocSize, + IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize, + IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize); + +/*! +******************************************************************************* + + @Function ProcessLDRCommandStream + + @Description Process the output of the Meta toolchain in the .LDR format + copying code and data sections into their final location and + passing some information to the Meta bootloader + + @Input hPrivate : Implementation specific data + @Input pbLDR : Pointer to FW blob + @Input pvHostFWCodeAddr : Pointer to FW code + @Input pvHostFWDataAddr : Pointer to FW data + @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code + @Input pvHostFWCorememDataAddr : Pointer to FW coremem data + @Input ppui32BootConf : Pointer to bootloader data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate, + const IMG_BYTE* pbLDR, + void* pvHostFWCodeAddr, + void* pvHostFWDataAddr, + void* pvHostFWCorememCodeAddr, + void* pvHostFWCorememDataAddr, + IMG_UINT32 **ppui32BootConf); + +/*! +******************************************************************************* + + @Function ProcessELFCommandStream + + @Description Process a file in .ELF format copying code and data sections + into their final location + + @Input hPrivate : Implementation specific data + @Input pbELF : Pointer to FW blob + @Input pvHostFWCodeAddr : Pointer to FW code + @Input pvHostFWDataAddr : Pointer to FW data + @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code + @Input pvHostFWCorememDataAddr : Pointer to FW coremem data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate, + const IMG_BYTE *pbELF, + void *pvHostFWCodeAddr, + void *pvHostFWDataAddr, + void* pvHostFWCorememCodeAddr, + void* pvHostFWCorememDataAddr); + +/*! +******************************************************************************* + + @Function RGXProcessFWImage + + @Description Process the Firmware binary blob copying code and data + sections into their final location and passing some + information to the Firmware bootloader. + If a pointer to the final memory location for FW code or data + is not valid (NULL) then the relative section will not be + processed. + + @Input hPrivate : Implementation specific data + @Input pbRGXFirmware : Pointer to FW blob + @Input pvFWCode : Pointer to FW code + @Input pvFWData : Pointer to FW data + @Input pvFWCorememCode : Pointer to FW coremem code + @Input pvFWCorememData : Pointer to FW coremem data + @Input puFWParams : Parameters used by the FW at boot time + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate, + const IMG_BYTE *pbRGXFirmware, + void *pvFWCode, + void *pvFWData, + void *pvFWCorememCode, + void *pvFWCorememData, + RGX_FW_BOOT_PARAMS *puFWParams); + +#endif /* RGXFWIMAGEUTILS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxfwtrace_strings.c b/drivers/gpu/drm/phytium/octopus/rgxfwtrace_strings.c new file mode 100644 index 000000000000..a948442603b0 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxfwtrace_strings.c @@ -0,0 +1,56 @@ +/*************************************************************************/ /*! +@File rgxfwtrace_strings.c +@Title RGX Firmware trace strings +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "rgx_fwif_sf.h" +#include "fwtrace_string.h" + +/* The tuple pairs that will be generated using XMacros will be stored here. + * This macro definition must match the definition of SFids in rgx_fwif_sf.h + */ +const RGXKM_STID_FMT SFs[]= { +#define X(a, b, c, d, e) { RGXFW_LOG_CREATESFID(a,b,e), d }, + RGXFW_LOG_SFIDLIST +#undef X +}; + +const IMG_UINT32 g_ui32SFsCount = ARRAY_SIZE(SFs); diff --git a/drivers/gpu/drm/phytium/octopus/rgxfwutils.c b/drivers/gpu/drm/phytium/octopus/rgxfwutils.c new file mode 100644 index 000000000000..8d980cd6ebe8 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxfwutils.c @@ -0,0 +1,7548 @@ +/*************************************************************************/ /*! +@File +@Title Rogue firmware utility routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Rogue firmware utility routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(__linux__) +#include +#else +#include +#endif + +#include "img_defs.h" + +#include "rgxdefs_km.h" +#include "rgx_fwif_km.h" +#include "pdump_km.h" +#include "osfunc.h" +#if defined(__linux__) +#include "km_apphint.h" +#endif +#include "cache_km.h" +#include "allocmem.h" +#include "physheap.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "devicemem_server.h" + +#include "pvr_debug.h" +#include "pvr_notifier.h" +#include "rgxfwutils.h" +#include "rgx_options.h" +#include "rgx_fwif_alignchecks.h" +#include "rgx_fwif_resetframework.h" +#include "rgx_pdump_panics.h" +#include "fwtrace_string.h" +#include "rgxheapconfig.h" +#include "pvrsrv.h" +#include "rgxdebug.h" +#include "rgxhwperf.h" +#include "rgxccb.h" +#include "rgxcompute.h" +#include "rgxtdmtransfer.h" +#include "rgxpower.h" +#if defined(SUPPORT_DISPLAY_CLASS) +#include "dc_server.h" +#endif +#include "rgxmem.h" +#include "rgxta3d.h" +#include "rgxkicksync.h" +#include "rgxutils.h" +#include "rgxtimecorr.h" +#include "rgxfwimageutils.h" +#include "sync_internal.h" +#include "sync.h" +#include "sync_checkpoint.h" +#include "sync_checkpoint_external.h" +#include "tlstream.h" +#include "devicemem_server_utils.h" +#include "htbuffer.h" +#include "info_page.h" + +#include "physmem_lma.h" +#include "physmem_osmem.h" +#include "oskm_apphint.h" + +#ifdef __linux__ +#include /* sprintf */ +#include "rogue_trace_events.h" +#else +#include +#endif +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "rgxworkest.h" +#endif + +#if defined(SUPPORT_PDVFS) +#include "rgxpdvfs.h" +#endif + +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) +#include "validation_soc.h" +#endif + +#include "vz_vmm_pvz.h" +#include "rgx_heaps.h" + +/*! + ****************************************************************************** + * HWPERF + *****************************************************************************/ +/* Size of the Firmware L1 HWPERF buffer in bytes (2MB). Accessed by the + * Firmware and host driver. */ +#define RGXFW_HWPERF_L1_SIZE_MIN (16U) +#define RGXFW_HWPERF_L1_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB +#define RGXFW_HWPERF_L1_SIZE_MAX (12288U) +#if defined(DEBUG) +/* Catch the use of auto-increment when meta_registers_unpacked_accesses feature is + * present in case we ever use it. No WA exists so it must not be used */ +#define CHECK_HWBRN_68777(v) \ + do { \ + PVR_ASSERT(((v) & RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN) == 0); \ + } while (0) +#else +#define CHECK_HWBRN_68777(v) +#endif + +/* Kernel CCB length */ +#define RGXFW_KCCB_SIZE_MIN_LOG2 (4) +#define RGXFW_KCCB_SIZE_MAX_LOG2 (16) + +#if PVRSRV_APPHINT_KCCB_SIZE_LOG2 > RGXFW_KCCB_SIZE_MAX_LOG2 +#define RGXFWIF_KCCB_NUMCMDS_LOG2 RGXFW_KCCB_SIZE_MAX_LOG2 +#warning PVRSRV_APPHINT_KCCB_SIZE_LOG2 is too high. +#elif PVRSRV_APPHINT_KCCB_SIZE_LOG2 < RGXFW_KCCB_SIZE_MIN_LOG2 +#define RGXFWIF_KCCB_NUMCMDS_LOG2 RGXFW_KCCB_SIZE_MIN_LOG2 +#warning PVRSRV_APPHINT_KCCB_SIZE_LOG2 is too low. +#else +#define RGXFWIF_KCCB_NUMCMDS_LOG2 PVRSRV_APPHINT_KCCB_SIZE_LOG2 +#endif + +/* Firmware CCB length */ +#if defined(NO_HARDWARE) && defined(PDUMP) +#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (10) +#elif defined(SUPPORT_PDVFS) || defined(SUPPORT_WORKLOAD_ESTIMATION) +#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (8) +#else +#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (5) +#endif + +/* + * Maximum length of time a DM can run for before the DM will be marked + * as out-of-time. CDM has an increased value due to longer running kernels. + * + * These deadlines are increased on FPGA, EMU and VP due to the slower + * execution time of these platforms. PDUMPS are also included since they + * are often run on EMU, FPGA or in CSim. + */ +#if defined(FPGA) || defined(EMULATOR) || defined(VIRTUAL_PLATFORM) || defined(PDUMP) +#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (60000) +#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (1000000) +#else +#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (30000) +#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (90000) +#endif + +/* Workload Estimation Firmware CCB length */ +#define RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2 (7) + +/* Size of memory buffer for firmware gcov data + * The actual data size is several hundred kilobytes. The buffer is an order of magnitude larger. */ +#define RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE (4*1024*1024) + +typedef struct +{ + RGXFWIF_KCCB_CMD sKCCBcmd; + DLLIST_NODE sListNode; + PDUMP_FLAGS_T uiPdumpFlags; + PVRSRV_RGXDEV_INFO *psDevInfo; +} RGX_DEFERRED_KCCB_CMD; + +#if defined(PDUMP) +/* ensure PIDs are 32-bit because a 32-bit PDump load is generated for the + * PID filter example entries + */ +static_assert(sizeof(IMG_PID) == sizeof(IMG_UINT32), + "FW PID filtering assumes the IMG_PID type is 32-bits wide as it " + "generates WRW commands for loading the PID values"); +#endif + +static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo); +static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo); + +static PVRSRV_ERROR _AllocateSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo, RGXFWIF_SYSINIT* psFwSysInit) +{ + PVRSRV_ERROR eError; + DEVMEM_MEMDESC** ppsSLC3FenceMemDesc = &psDevInfo->psSLC3FenceMemDesc; + IMG_UINT32 ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE( + RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)); + + PVR_DPF_ENTERED; + + eError = DevmemAllocate(psDevInfo->psFirmwareMainHeap, + 1, + ui32CacheLineSize, + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), + "FwSLC3FenceWA", + ppsSLC3FenceMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF_RETURN_RC(eError); + } + + /* We need to map it so the heap for this allocation is set */ + eError = DevmemMapToDevice(*ppsSLC3FenceMemDesc, + psDevInfo->psFirmwareMainHeap, + &psFwSysInit->sSLC3FenceDevVAddr); + if (eError != PVRSRV_OK) + { + DevmemFree(*ppsSLC3FenceMemDesc); + *ppsSLC3FenceMemDesc = NULL; + } + + PVR_DPF_RETURN_RC1(eError, *ppsSLC3FenceMemDesc); +} + +static void _FreeSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo) +{ + DEVMEM_MEMDESC* psSLC3FenceMemDesc = psDevInfo->psSLC3FenceMemDesc; + + if (psSLC3FenceMemDesc) + { + DevmemReleaseDevVirtAddr(psSLC3FenceMemDesc); + DevmemFree(psSLC3FenceMemDesc); + } +} + +static void __MTSScheduleWrite(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Value) +{ + /* ensure memory is flushed before kicking MTS */ + OSWriteMemoryBarrier(); + + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE, ui32Value); + + /* ensure the MTS kick goes through before continuing */ + OSMemoryBarrier(); +} + +/*************************************************************************/ /*! +@Function RGXSetupFwAllocation + +@Description Sets a pointer in a firmware data structure. + +@Input psDevInfo Device Info struct +@Input uiAllocFlags Flags determining type of memory allocation +@Input ui32Size Size of memory allocation +@Input pszName Allocation label +@Input ppsMemDesc pointer to the allocation's memory descriptor +@Input psFwPtr Address of the firmware pointer to set +@Input ppvCpuPtr Address of the cpu pointer to set +@Input ui32DevVAFlags Any combination of RFW_FWADDR_*_FLAG + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXSetupFwAllocation(PVRSRV_RGXDEV_INFO* psDevInfo, + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszName, + DEVMEM_MEMDESC **ppsMemDesc, + RGXFWIF_DEV_VIRTADDR *psFwPtr, + void **ppvCpuPtr, + IMG_UINT32 ui32DevVAFlags) +{ + PVRSRV_ERROR eError; +#if defined(SUPPORT_AUTOVZ) + IMG_BOOL bClearByMemset; + if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiAllocFlags)) + { + /* Under AutoVz the ZERO_ON_ALLOC flag is avoided as it causes the memory to + * be allocated from a different PMR than an allocation without the flag. + * When the content of an allocation needs to be recovered from physical memory + * on a later driver reboot, the memory then cannot be zeroed but the allocation + * addresses must still match. + * If the memory requires clearing, perform a memset after the allocation. */ + uiAllocFlags &= ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC; + bClearByMemset = IMG_TRUE; + } + else + { + bClearByMemset = IMG_FALSE; + } +#endif + + PDUMPCOMMENT("Allocate %s", pszName); + eError = DevmemFwAllocate(psDevInfo, + ui32Size, + uiAllocFlags, + pszName, + ppsMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate %u bytes for %s (%u)", + __func__, + ui32Size, + pszName, + eError)); + goto fail_alloc; + } + + if (psFwPtr) + { + eError = RGXSetFirmwareAddress(psFwPtr, *ppsMemDesc, 0, ui32DevVAFlags); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire firmware virtual address for %s (%u)", + __func__, + pszName, + eError)); + goto fail_fwaddr; + } + } + +#if defined(SUPPORT_AUTOVZ) + if ((bClearByMemset) || (ppvCpuPtr)) +#else + if (ppvCpuPtr) +#endif + { + void *pvTempCpuPtr; + + eError = DevmemAcquireCpuVirtAddr(*ppsMemDesc, &pvTempCpuPtr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire CPU virtual address for %s (%u)", + __func__, + pszName, + eError)); + goto fail_cpuva; + } + +#if defined(SUPPORT_AUTOVZ) + if (bClearByMemset) + { + if (PVRSRV_CHECK_CPU_WRITE_COMBINE(uiAllocFlags)) + { + OSCachedMemSetWMB(pvTempCpuPtr, 0, ui32Size); + } + else + { + OSDeviceMemSet(pvTempCpuPtr, 0, ui32Size); + } + } + if (ppvCpuPtr) +#endif + { + *ppvCpuPtr = pvTempCpuPtr; + } +#if defined(SUPPORT_AUTOVZ) + else + { + DevmemReleaseCpuVirtAddr(*ppsMemDesc); + pvTempCpuPtr = NULL; + } +#endif + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: %s set up at Fw VA 0x%x and CPU VA 0x%p with alloc flags 0x%" IMG_UINT64_FMTSPECX, + __func__, pszName, + (psFwPtr) ? (psFwPtr->ui32Addr) : (0), + (ppvCpuPtr) ? (*ppvCpuPtr) : (NULL), + uiAllocFlags)); + + return eError; + +fail_cpuva: + if (psFwPtr) + { + RGXUnsetFirmwareAddress(*ppsMemDesc); + } +fail_fwaddr: + DevmemFree(*ppsMemDesc); +fail_alloc: + return eError; +} + +/*************************************************************************/ /*! +@Function GetHwPerfBufferSize + +@Description Computes the effective size of the HW Perf Buffer +@Input ui32HWPerfFWBufSizeKB Device Info struct +@Return HwPerfBufferSize +*/ /**************************************************************************/ +static IMG_UINT32 GetHwPerfBufferSize(IMG_UINT32 ui32HWPerfFWBufSizeKB) +{ + IMG_UINT32 HwPerfBufferSize; + + /* HWPerf: Determine the size of the FW buffer */ + if (ui32HWPerfFWBufSizeKB == 0 || + ui32HWPerfFWBufSizeKB == RGXFW_HWPERF_L1_SIZE_DEFAULT) + { + /* Under pvrsrvctl 0 size implies AppHint not set or is set to zero, + * use default size from driver constant. Set it to the default + * size, no logging. + */ + HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_DEFAULT<<10; + } + else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MAX)) + { + /* Size specified as a AppHint but it is too big */ + PVR_DPF((PVR_DBG_WARNING, + "%s: HWPerfFWBufSizeInKB value (%u) too big, using maximum (%u)", + __func__, + ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MAX)); + HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_MAX<<10; + } + else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MIN)) + { + /* Size specified as in AppHint HWPerfFWBufSizeInKB */ + PVR_DPF((PVR_DBG_WARNING, + "%s: Using HWPerf FW buffer size of %u KB", + __func__, + ui32HWPerfFWBufSizeKB)); + HwPerfBufferSize = ui32HWPerfFWBufSizeKB<<10; + } + else + { + /* Size specified as a AppHint but it is too small */ + PVR_DPF((PVR_DBG_WARNING, + "%s: HWPerfFWBufSizeInKB value (%u) too small, using minimum (%u)", + __func__, + ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MIN)); + HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_MIN<<10; + } + + return HwPerfBufferSize; +} + +#if defined(PDUMP) +/*! +******************************************************************************* + @Function RGXFWSetupSignatureChecks + @Description + @Input psDevInfo + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXFWSetupSignatureChecks(PVRSRV_RGXDEV_INFO* psDevInfo, + DEVMEM_MEMDESC** ppsSigChecksMemDesc, + IMG_UINT32 ui32SigChecksBufSize, + RGXFWIF_SIGBUF_CTL* psSigBufCtl) +{ + PVRSRV_ERROR eError; + + /* Allocate memory for the checks */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, + ui32SigChecksBufSize, + "FwSignatureChecks", + ppsSigChecksMemDesc, + &psSigBufCtl->sBuffer, + NULL, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + DevmemPDumpLoadMem( *ppsSigChecksMemDesc, + 0, + ui32SigChecksBufSize, + PDUMP_FLAGS_CONTINUOUS); + + psSigBufCtl->ui32LeftSizeInRegs = ui32SigChecksBufSize / sizeof(IMG_UINT32); +fail: + return eError; +} +#endif + + +#if defined(SUPPORT_FIRMWARE_GCOV) +/*! +******************************************************************************* + @Function RGXFWSetupFirmwareGcovBuffer + @Description + @Input psDevInfo + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXFWSetupFirmwareGcovBuffer(PVRSRV_RGXDEV_INFO* psDevInfo, + DEVMEM_MEMDESC** ppsBufferMemDesc, + IMG_UINT32 ui32FirmwareGcovBufferSize, + RGXFWIF_FIRMWARE_GCOV_CTL* psFirmwareGcovCtl, + const IMG_CHAR* pszBufferName) +{ + PVRSRV_ERROR eError; + + /* Allocate memory for gcov */ + eError = RGXSetupFwAllocation(psDevInfo, + (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)), + ui32FirmwareGcovBufferSize, + pszBufferName, + ppsBufferMemDesc, + &psFirmwareGcovCtl->sBuffer, + NULL, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + psFirmwareGcovCtl->ui32Size = ui32FirmwareGcovBufferSize; + + return PVRSRV_OK; +} +#endif + +/*! +******************************************************************************* + @Function RGXFWSetupAlignChecks + @Description This functions allocates and fills memory needed for the + aligns checks of the UM and KM structures shared with the + firmware. The format of the data in the memory is as follows: + + + + + The UM array is passed from the user side. Now the firmware is + is responsible for filling this part of the memory. If that + happens the check of the UM structures will be performed + by the host driver on client's connect. + If the macro is not defined the client driver fills the memory + and the firmware checks for the alignment of all structures. + @Input psDeviceNode + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXFWSetupAlignChecks(PVRSRV_DEVICE_NODE *psDeviceNode, + RGXFWIF_DEV_VIRTADDR *psAlignChecksDevFW) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 aui32RGXFWAlignChecksKM[] = { RGXFW_ALIGN_CHECKS_INIT_KM }; + IMG_UINT32 ui32RGXFWAlignChecksTotal; + IMG_UINT32* paui32AlignChecks; + PVRSRV_ERROR eError; + + /* In this case we don't know the number of elements in UM array. + * We have to assume something so we assume RGXFW_ALIGN_CHECKS_UM_MAX. + */ + ui32RGXFWAlignChecksTotal = sizeof(aui32RGXFWAlignChecksKM) + + RGXFW_ALIGN_CHECKS_UM_MAX * sizeof(IMG_UINT32) + + 2 * sizeof(IMG_UINT32); + + /* Allocate memory for the checks */ + eError = RGXSetupFwAllocation(psDevInfo, + (RGX_FWINITDATA_WC_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp)), + ui32RGXFWAlignChecksTotal, + "FwAlignmentChecks", + &psDevInfo->psRGXFWAlignChecksMemDesc, + psAlignChecksDevFW, + (void**) &paui32AlignChecks, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + if (!psDeviceNode->bAutoVzFwIsUp) + { + /* Copy the values */ + *paui32AlignChecks++ = ARRAY_SIZE(aui32RGXFWAlignChecksKM); + OSCachedMemCopy(paui32AlignChecks, &aui32RGXFWAlignChecksKM[0], + sizeof(aui32RGXFWAlignChecksKM)); + paui32AlignChecks += ARRAY_SIZE(aui32RGXFWAlignChecksKM); + + *paui32AlignChecks = 0; + } + + OSWriteMemoryBarrier(); + + DevmemPDumpLoadMem( psDevInfo->psRGXFWAlignChecksMemDesc, + 0, + ui32RGXFWAlignChecksTotal, + PDUMP_FLAGS_CONTINUOUS); + + return PVRSRV_OK; + +fail: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +static void RGXFWFreeAlignChecks(PVRSRV_RGXDEV_INFO* psDevInfo) +{ + if (psDevInfo->psRGXFWAlignChecksMemDesc != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWAlignChecksMemDesc); + psDevInfo->psRGXFWAlignChecksMemDesc = NULL; + } +} + +PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest, + DEVMEM_MEMDESC *psSrc, + IMG_UINT32 uiExtraOffset, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eError; + IMG_DEV_VIRTADDR psDevVirtAddr; + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_RGXDEV_INFO *psDevInfo; + + psDeviceNode = (PVRSRV_DEVICE_NODE *) DevmemGetConnection(psSrc); + psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + IMG_UINT32 ui32Offset; + IMG_BOOL bCachedInMETA; + PVRSRV_MEMALLOCFLAGS_T uiDevFlags; + IMG_UINT32 uiGPUCacheMode; + + eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireDevVirtAddr", failDevVAAcquire); + + /* Convert to an address in META memmap */ + ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_RAW_HEAP_BASE; + + /* Check in the devmem flags whether this memory is cached/uncached */ + DevmemGetFlags(psSrc, &uiDevFlags); + + /* Honour the META cache flags */ + bCachedInMETA = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0; + + /* Honour the SLC cache flags */ + eError = DevmemDeviceCacheMode(psDeviceNode, uiDevFlags, &uiGPUCacheMode); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemDeviceCacheMode", failDevCacheMode); + + /* + * Choose Meta virtual address based on Meta and SLC cacheability. + */ + ui32Offset += RGXFW_SEGMMU_DATA_BASE_ADDRESS; + + if (bCachedInMETA) + { + ui32Offset |= RGXFW_SEGMMU_DATA_META_CACHED; + } + else + { + ui32Offset |= RGXFW_SEGMMU_DATA_META_UNCACHED; + } + + if (PVRSRV_CHECK_GPU_CACHED(uiGPUCacheMode)) + { + ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED; + } + else + { + ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED; + } + + ppDest->ui32Addr = ui32Offset; + } + else + { + IMG_UINT32 ui32Offset; + IMG_BOOL bCachedInRISCV; + PVRSRV_MEMALLOCFLAGS_T uiDevFlags; + + eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireDevVirtAddr", failDevVAAcquire); + + /* Convert to an address in RISCV memmap */ + ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_RAW_HEAP_BASE; + + /* Check in the devmem flags whether this memory is cached/uncached */ + DevmemGetFlags(psSrc, &uiDevFlags); + + /* Honour the RISCV cache flags */ + bCachedInRISCV = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0; + + if (bCachedInRISCV) + { + ui32Offset |= RGXRISCVFW_SHARED_CACHED_DATA_BASE; + } + else + { + ui32Offset |= RGXRISCVFW_SHARED_UNCACHED_DATA_BASE; + } + + ppDest->ui32Addr = ui32Offset; + } + + if ((ppDest->ui32Addr & 0x3U) != 0) + { + IMG_CHAR *pszAnnotation; + /* It is expected that the annotation returned by DevmemGetAnnotation() is always valid */ + DevmemGetAnnotation(psSrc, &pszAnnotation); + + PVR_DPF((PVR_DBG_ERROR, "%s: %s @ 0x%x is not aligned to 32 bit", + __func__, pszAnnotation, ppDest->ui32Addr)); + + return PVRSRV_ERROR_INVALID_ALIGNMENT; + } + + if (ui32Flags & RFW_FWADDR_NOREF_FLAG) + { + DevmemReleaseDevVirtAddr(psSrc); + } + + return PVRSRV_OK; + +failDevCacheMode: + DevmemReleaseDevVirtAddr(psSrc); +failDevVAAcquire: + return eError; +} + +void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR *psDest, + DEVMEM_MEMDESC *psSrcMemDesc, + RGXFWIF_DEV_VIRTADDR *psSrcFWDevVAddr, + IMG_UINT32 uiOffset) +{ + PVRSRV_ERROR eError; + IMG_DEV_VIRTADDR sDevVirtAddr; + + eError = DevmemAcquireDevVirtAddr(psSrcMemDesc, &sDevVirtAddr); + PVR_ASSERT(eError == PVRSRV_OK); + + psDest->psDevVirtAddr.uiAddr = sDevVirtAddr.uiAddr; + psDest->psDevVirtAddr.uiAddr += uiOffset; + psDest->pbyFWAddr.ui32Addr = psSrcFWDevVAddr->ui32Addr; + + DevmemReleaseDevVirtAddr(psSrcMemDesc); +} + + +void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc) +{ + DevmemReleaseDevVirtAddr(psSrc); +} + +PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Wait for Slave Port to be Ready */ + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + { + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN); + if (eError == PVRSRV_OK) + { + /* Issue a Write */ + CHECK_HWBRN_68777(ui32RegAddr); + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES, ui32RegAddr); + (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES); /* Fence write */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES, ui32RegValue); + (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES); /* Fence write */ + } + } + else + { + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); + if (eError == PVRSRV_OK) + { + /* Issue a Write */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr); + (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue); + (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT); /* Fence write */ + } + } + + return eError; +} + +PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32* ui32RegValue) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Wait for Slave Port to be Ready */ + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + { + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN); + if (eError == PVRSRV_OK) + { + /* Issue a Read */ + CHECK_HWBRN_68777(ui32RegAddr); + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES, + ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__RD_EN); + (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES); /* Fence write */ + + /* Wait for Slave Port to be Ready */ + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN); + if (eError != PVRSRV_OK) return eError; + } +#if !defined(NO_HARDWARE) + *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES); +#else + *ui32RegValue = 0xFFFFFFFF; +#endif + } + else + { + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); + if (eError == PVRSRV_OK) + { + /* Issue a Read */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN); + (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ + + /* Wait for Slave Port to be Ready */ + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); + if (eError != PVRSRV_OK) return eError; + } +#if !defined(NO_HARDWARE) + *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX); +#else + *ui32RegValue = 0xFFFFFFFF; +#endif + } + + return eError; +} + + +struct _RGX_SERVER_COMMON_CONTEXT_ { + PVRSRV_RGXDEV_INFO *psDevInfo; + DEVMEM_MEMDESC *psFWCommonContextMemDesc; + PRGXFWIF_FWCOMMONCONTEXT sFWCommonContextFWAddr; + SERVER_MMU_CONTEXT *psServerMMUContext; + DEVMEM_MEMDESC *psFWMemContextMemDesc; + DEVMEM_MEMDESC *psFWFrameworkMemDesc; + DEVMEM_MEMDESC *psContextStateMemDesc; + RGX_CLIENT_CCB *psClientCCB; + DEVMEM_MEMDESC *psClientCCBMemDesc; + DEVMEM_MEMDESC *psClientCCBCtrlMemDesc; + IMG_BOOL bCommonContextMemProvided; + IMG_UINT32 ui32ContextID; + DLLIST_NODE sListNode; + RGX_CONTEXT_RESET_REASON eLastResetReason; + IMG_UINT32 ui32LastResetJobRef; + IMG_UINT32 ui32Priority; + RGX_CCB_REQUESTOR_TYPE eRequestor; +}; + +/*************************************************************************/ /*! +@Function _CheckPriority +@Description Check if priority is allowed for requestor type +@Input psDevInfo pointer to DevInfo struct +@Input ui32Priority Requested priority +@Input eRequestor Requestor type specifying data master +@Return PVRSRV_ERROR PVRSRV_OK on success +*/ /**************************************************************************/ +static PVRSRV_ERROR _CheckPriority(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Priority, + RGX_CCB_REQUESTOR_TYPE eRequestor) +{ + /* Only one context allowed with real time priority (highest priority) */ + if (ui32Priority == RGX_CTX_PRIORITY_REALTIME) + { + DLLIST_NODE *psNode, *psNext; + + dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) + { + RGX_SERVER_COMMON_CONTEXT *psThisContext = + IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); + + if (psThisContext->ui32Priority == RGX_CTX_PRIORITY_REALTIME && + psThisContext->eRequestor == eRequestor) + { + PVR_LOG(("Only one context with real time priority allowed")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, + RGXFWIF_DM eDM, + SERVER_MMU_CONTEXT *psServerMMUContext, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + DEVMEM_MEMDESC *psContextStateMemDesc, + IMG_UINT32 ui32CCBAllocSize, + IMG_UINT32 ui32CCBMaxAllocSize, + IMG_UINT32 ui32ContextFlags, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + RGXFWIF_FWCOMMONCONTEXT *psFWCommonContext; + IMG_UINT32 ui32FWCommonContextOffset; + IMG_UINT8 *pui8Ptr; + PVRSRV_ERROR eError; + + /* + * Allocate all the resources that are required + */ + psServerCommonContext = OSAllocMem(sizeof(*psServerCommonContext)); + if (psServerCommonContext == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc; + } + + psServerCommonContext->psDevInfo = psDevInfo; + psServerCommonContext->psServerMMUContext = psServerMMUContext; + + if (psAllocatedMemDesc) + { + PDUMPCOMMENT("Using existing MemDesc for Rogue firmware %s context (offset = %d)", + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + ui32AllocatedOffset); + ui32FWCommonContextOffset = ui32AllocatedOffset; + psServerCommonContext->psFWCommonContextMemDesc = psAllocatedMemDesc; + psServerCommonContext->bCommonContextMemProvided = IMG_TRUE; + } + else + { + /* Allocate device memory for the firmware context */ + PDUMPCOMMENT("Allocate Rogue firmware %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]); + eError = DevmemFwAllocate(psDevInfo, + sizeof(*psFWCommonContext), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwContext", + &psServerCommonContext->psFWCommonContextMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware %s context (%s)", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + PVRSRVGetErrorString(eError))); + goto fail_contextalloc; + } + ui32FWCommonContextOffset = 0; + psServerCommonContext->bCommonContextMemProvided = IMG_FALSE; + } + + /* Record this context so we can refer to it if the FW needs to tell us it was reset. */ + psServerCommonContext->eLastResetReason = RGX_CONTEXT_RESET_REASON_NONE; + psServerCommonContext->ui32LastResetJobRef = 0; + psServerCommonContext->ui32ContextID = psDevInfo->ui32CommonCtxtCurrentID++; + + /* + * Temporarily map the firmware context to the kernel and initialise it + */ + eError = DevmemAcquireCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc, + (void **)&pui8Ptr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware %s context to CPU (%s)", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + PVRSRVGetErrorString(eError))); + goto fail_cpuvirtacquire; + } + + /* Allocate the client CCB */ + eError = RGXCreateCCB(psDevInfo, + ui32CCBAllocSize, + ui32CCBMaxAllocSize, + ui32ContextFlags, + psConnection, + eRGXCCBRequestor, + psServerCommonContext, + &psServerCommonContext->psClientCCB, + &psServerCommonContext->psClientCCBMemDesc, + &psServerCommonContext->psClientCCBCtrlMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to create CCB for %s context (%s)", + __func__, + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + PVRSRVGetErrorString(eError))); + goto fail_allocateccb; + } + + psFWCommonContext = (RGXFWIF_FWCOMMONCONTEXT *) (pui8Ptr + ui32FWCommonContextOffset); + psFWCommonContext->eDM = eDM; + + /* Set the firmware CCB device addresses in the firmware common context */ + eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCB, + psServerCommonContext->psClientCCBMemDesc, + 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", fail_cccbfwaddr); + + eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCBCtl, + psServerCommonContext->psClientCCBCtrlMemDesc, + 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", fail_cccbctrlfwaddr); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) + { + RGXSetMetaDMAAddress(&psFWCommonContext->sCCBMetaDMAAddr, + psServerCommonContext->psClientCCBMemDesc, + &psFWCommonContext->psCCB, + 0); + } + + /* Set the memory context device address */ + psServerCommonContext->psFWMemContextMemDesc = psFWMemContextMemDesc; + eError = RGXSetFirmwareAddress(&psFWCommonContext->psFWMemContext, + psFWMemContextMemDesc, + 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", fail_fwmemctxfwaddr); + + /* Set the framework register updates address */ + psServerCommonContext->psFWFrameworkMemDesc = psInfo->psFWFrameworkMemDesc; + if (psInfo->psFWFrameworkMemDesc != NULL) + { + eError = RGXSetFirmwareAddress(&psFWCommonContext->psRFCmd, + psInfo->psFWFrameworkMemDesc, + 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:4", fail_fwframeworkfwadd); + } + else + { + /* This should never be touched in this contexts without a framework + * memdesc, but ensure it is zero so we see crashes if it is. + */ + psFWCommonContext->psRFCmd.ui32Addr = 0; + } + + eError = _CheckPriority(psDevInfo, ui32Priority, eRGXCCBRequestor); + PVR_LOG_GOTO_IF_ERROR(eError, "_CheckPriority", fail_checkpriority); + + psServerCommonContext->ui32Priority = ui32Priority; + psServerCommonContext->eRequestor = eRGXCCBRequestor; + + /* Store the FWMemContext device virtual address in server mmu context + * to be used in schedule command path */ + RGXSetFWMemContextDevVirtAddr(psServerMMUContext, psFWCommonContext->psFWMemContext); + + psFWCommonContext->ui32Priority = ui32Priority; + psFWCommonContext->ui32PrioritySeqNum = 0; + psFWCommonContext->ui32MaxDeadlineMS = MIN(ui32MaxDeadlineMS, + (eDM == RGXFWIF_DM_CDM ? + RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS : + RGXFWIF_MAX_WORKLOAD_DEADLINE_MS)); + psFWCommonContext->ui64RobustnessAddress = ui64RobustnessAddress; + + /* Store a references to Server Common Context and PID for notifications back from the FW. */ + psFWCommonContext->ui32ServerCommonContextID = psServerCommonContext->ui32ContextID; + psFWCommonContext->ui32PID = OSGetCurrentClientProcessIDKM(); + + /* Set the firmware GPU context state buffer */ + psServerCommonContext->psContextStateMemDesc = psContextStateMemDesc; + if (psContextStateMemDesc) + { + eError = RGXSetFirmwareAddress(&psFWCommonContext->psContextState, + psContextStateMemDesc, + 0, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:5", fail_ctxstatefwaddr); + } + + /* + * Dump the created context + */ + PDUMPCOMMENT("Dump %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]); + DevmemPDumpLoadMem(psServerCommonContext->psFWCommonContextMemDesc, + ui32FWCommonContextOffset, + sizeof(*psFWCommonContext), + PDUMP_FLAGS_CONTINUOUS); + + /* We've finished the setup so release the CPU mapping */ + DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc); + + /* Map this allocation into the FW */ + eError = RGXSetFirmwareAddress(&psServerCommonContext->sFWCommonContextFWAddr, + psServerCommonContext->psFWCommonContextMemDesc, + ui32FWCommonContextOffset, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:6", fail_fwcommonctxfwaddr); + +#if defined(__linux__) + { + IMG_UINT32 ui32FWAddr; + switch (eDM) { + case RGXFWIF_DM_GEOM: + ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t) + psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, sTAContext)); + break; + case RGXFWIF_DM_3D: + ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t) + psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, s3DContext)); + break; + default: + ui32FWAddr = psServerCommonContext->sFWCommonContextFWAddr.ui32Addr; + break; + } + + trace_rogue_create_fw_context(OSGetCurrentClientProcessNameKM(), + aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], + ui32FWAddr); + } +#endif + /*Add the node to the list when finalised */ + OSWRLockAcquireWrite(psDevInfo->hCommonCtxtListLock); + dllist_add_to_tail(&(psDevInfo->sCommonCtxtListHead), &(psServerCommonContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hCommonCtxtListLock); + + *ppsServerCommonContext = psServerCommonContext; + return PVRSRV_OK; + +fail_fwcommonctxfwaddr: + if (psContextStateMemDesc) + { + RGXUnsetFirmwareAddress(psContextStateMemDesc); + } +fail_ctxstatefwaddr: +fail_checkpriority: + if (psInfo->psFWFrameworkMemDesc != NULL) + { + RGXUnsetFirmwareAddress(psInfo->psFWFrameworkMemDesc); + } +fail_fwframeworkfwadd: + RGXUnsetFirmwareAddress(psFWMemContextMemDesc); +fail_fwmemctxfwaddr: + RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc); +fail_cccbctrlfwaddr: + RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc); +fail_cccbfwaddr: + RGXDestroyCCB(psDevInfo, psServerCommonContext->psClientCCB); +fail_allocateccb: + DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc); +fail_cpuvirtacquire: + RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc); + if (!psServerCommonContext->bCommonContextMemProvided) + { + DevmemFwUnmapAndFree(psDevInfo, psServerCommonContext->psFWCommonContextMemDesc); + psServerCommonContext->psFWCommonContextMemDesc = NULL; + } +fail_contextalloc: + OSFreeMem(psServerCommonContext); +fail_alloc: + return eError; +} + +void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + + OSWRLockAcquireWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock); + /* Remove the context from the list of all contexts. */ + dllist_remove_node(&psServerCommonContext->sListNode); + OSWRLockReleaseWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock); + + /* + Unmap the context itself and then all its resources + */ + + /* Unmap the FW common context */ + RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc); + /* Umap context state buffer (if there was one) */ + if (psServerCommonContext->psContextStateMemDesc) + { + RGXUnsetFirmwareAddress(psServerCommonContext->psContextStateMemDesc); + } + /* Unmap the framework buffer */ + if (psServerCommonContext->psFWFrameworkMemDesc != NULL) + { + RGXUnsetFirmwareAddress(psServerCommonContext->psFWFrameworkMemDesc); + } + /* Unmap client CCB and CCB control */ + RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc); + RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc); + /* Unmap the memory context */ + RGXUnsetFirmwareAddress(psServerCommonContext->psFWMemContextMemDesc); + + /* Destroy the client CCB */ + RGXDestroyCCB(psServerCommonContext->psDevInfo, psServerCommonContext->psClientCCB); + + + /* Free the FW common context (if there was one) */ + if (!psServerCommonContext->bCommonContextMemProvided) + { + DevmemFwUnmapAndFree(psServerCommonContext->psDevInfo, + psServerCommonContext->psFWCommonContextMemDesc); + psServerCommonContext->psFWCommonContextMemDesc = NULL; + } + /* Free the hosts representation of the common context */ + OSFreeMem(psServerCommonContext); +} + +PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + return psServerCommonContext->sFWCommonContextFWAddr; +} + +RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + return psServerCommonContext->psClientCCB; +} + +SERVER_MMU_CONTEXT *FWCommonContextGetServerMMUCtx(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + return psServerCommonContext->psServerMMUContext; +} + +RGX_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + IMG_UINT32 *pui32LastResetJobRef) +{ + RGX_CONTEXT_RESET_REASON eLastResetReason; + + PVR_ASSERT(psServerCommonContext != NULL); + PVR_ASSERT(pui32LastResetJobRef != NULL); + + /* Take the most recent reason & job ref and reset for next time... */ + eLastResetReason = psServerCommonContext->eLastResetReason; + *pui32LastResetJobRef = psServerCommonContext->ui32LastResetJobRef; + psServerCommonContext->eLastResetReason = RGX_CONTEXT_RESET_REASON_NONE; + psServerCommonContext->ui32LastResetJobRef = 0; + + if (eLastResetReason == RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH) + { + PVR_DPF((PVR_DBG_WARNING, + "A Hard Context Switch was triggered on the GPU to ensure Quality of Service.")); + } + + return eLastResetReason; +} + +PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) +{ + return psServerCommonContext->psDevInfo; +} + +PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo, + SERVER_MMU_CONTEXT *psServerMMUContext, + PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr) +{ + DLLIST_NODE *psNode, *psNext; + dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) + { + RGX_SERVER_COMMON_CONTEXT *psThisContext = + IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); + + if (psThisContext->psServerMMUContext == psServerMMUContext) + { + psFWCommonContextFWAddr->ui32Addr = psThisContext->sFWCommonContextFWAddr.ui32Addr; + return PVRSRV_OK; + } + } + return PVRSRV_ERROR_INVALID_PARAMS; +} + +PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + IMG_UINT32 ui32ContextFlags) +{ + return RGXSetCCBFlags(psServerCommonContext->psClientCCB, + ui32ContextFlags); +} + +/*! +******************************************************************************* + @Function RGXFreeCCB + @Description Free the kernel or firmware CCB + @Input psDevInfo + @Input ppsCCBCtl + @Input ppsCCBCtlMemDesc + @Input ppsCCBMemDesc + @Input psCCBCtlFWAddr +******************************************************************************/ +static void RGXFreeCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_CCB_CTL **ppsCCBCtl, + DEVMEM_MEMDESC **ppsCCBCtlMemDesc, + IMG_UINT8 **ppui8CCB, + DEVMEM_MEMDESC **ppsCCBMemDesc) +{ + if (*ppsCCBMemDesc != NULL) + { + if (*ppui8CCB != NULL) + { + DevmemReleaseCpuVirtAddr(*ppsCCBMemDesc); + *ppui8CCB = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, *ppsCCBMemDesc); + *ppsCCBMemDesc = NULL; + } + if (*ppsCCBCtlMemDesc != NULL) + { + if (*ppsCCBCtl != NULL) + { + DevmemReleaseCpuVirtAddr(*ppsCCBCtlMemDesc); + *ppsCCBCtl = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, *ppsCCBCtlMemDesc); + *ppsCCBCtlMemDesc = NULL; + } +} + +/*! +******************************************************************************* + @Function RGXFreeCCBReturnSlots + @Description Free the kernel CCB's return slot array and associated mappings + @Input psDevInfo Device Info struct + @Input ppui32CCBRtnSlots CPU mapping of slot array + @Input ppsCCBRtnSlotsMemDesc Slot array's device memdesc +******************************************************************************/ +static void RGXFreeCCBReturnSlots(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 **ppui32CCBRtnSlots, + DEVMEM_MEMDESC **ppsCCBRtnSlotsMemDesc) +{ + /* Free the return slot array if allocated */ + if (*ppsCCBRtnSlotsMemDesc != NULL) + { + /* Before freeing, ensure the CPU mapping as well is released */ + if (*ppui32CCBRtnSlots != NULL) + { + DevmemReleaseCpuVirtAddr(*ppsCCBRtnSlotsMemDesc); + *ppui32CCBRtnSlots = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, *ppsCCBRtnSlotsMemDesc); + *ppsCCBRtnSlotsMemDesc = NULL; + } +} + +/*! +******************************************************************************* + @Function RGXSetupCCB + @Description Allocate and initialise a circular command buffer + @Input psDevInfo + @Input ppsCCBCtl + @Input ppsCCBCtlMemDesc + @Input ppui8CCB + @Input ppsCCBMemDesc + @Input psCCBCtlFWAddr + @Input ui32NumCmdsLog2 + @Input ui32CmdSize + @Input uiCCBMemAllocFlags + @Input pszName + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXSetupCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_CCB_CTL **ppsCCBCtl, + DEVMEM_MEMDESC **ppsCCBCtlMemDesc, + IMG_UINT8 **ppui8CCB, + DEVMEM_MEMDESC **ppsCCBMemDesc, + PRGXFWIF_CCB_CTL *psCCBCtlFWAddr, + PRGXFWIF_CCB *psCCBFWAddr, + IMG_UINT32 ui32NumCmdsLog2, + IMG_UINT32 ui32CmdSize, + PVRSRV_MEMALLOCFLAGS_T uiCCBMemAllocFlags, + const IMG_CHAR *pszName) +{ + PVRSRV_ERROR eError; + RGXFWIF_CCB_CTL *psCCBCtl; + IMG_UINT32 ui32CCBSize = (1U << ui32NumCmdsLog2); + IMG_CHAR szCCBCtlName[DEVMEM_ANNOTATION_MAX_LEN]; + IMG_INT32 iStrLen; + + /* Append "Control" to the name for the control struct. */ + iStrLen = OSSNPrintf(szCCBCtlName, sizeof(szCCBCtlName), "%sControl", pszName); + PVR_ASSERT(iStrLen < sizeof(szCCBCtlName)); + + if (unlikely(iStrLen < 0)) + { + szCCBCtlName[0] = '\0'; + } + + /* Allocate memory for the CCB control.*/ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, + sizeof(RGXFWIF_CCB_CTL), + szCCBCtlName, + ppsCCBCtlMemDesc, + psCCBCtlFWAddr, + (void**) ppsCCBCtl, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + /* + * Allocate memory for the CCB. + * (this will reference further command data in non-shared CCBs) + */ + eError = RGXSetupFwAllocation(psDevInfo, + uiCCBMemAllocFlags, + ui32CCBSize * ui32CmdSize, + pszName, + ppsCCBMemDesc, + psCCBFWAddr, + (void**) ppui8CCB, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + /* + * Initialise the CCB control. + */ + psCCBCtl = *ppsCCBCtl; + psCCBCtl->ui32WriteOffset = 0; + psCCBCtl->ui32ReadOffset = 0; + psCCBCtl->ui32WrapMask = ui32CCBSize - 1; + psCCBCtl->ui32CmdSize = ui32CmdSize; + + /* Pdump the CCB control */ + PDUMPCOMMENT("Initialise %s", szCCBCtlName); + DevmemPDumpLoadMem(*ppsCCBCtlMemDesc, + 0, + sizeof(RGXFWIF_CCB_CTL), + 0); + + return PVRSRV_OK; + +fail: + RGXFreeCCB(psDevInfo, + ppsCCBCtl, + ppsCCBCtlMemDesc, + ppui8CCB, + ppsCCBMemDesc); + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) +static void RGXSetupFaultReadRegisterRollback(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + PMR *psPMR; + + /* Run-time check feature support */ + if (PVRSRV_IS_FEATURE_SUPPORTED(psDevInfo->psDeviceNode, SLC_FAULT_ACCESS_ADDR_PHYS)) + { + if (psDevInfo->psRGXFaultAddressMemDesc) + { + if (DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR) == PVRSRV_OK) + { + PMRUnlockSysPhysAddresses(psPMR); + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc); + psDevInfo->psRGXFaultAddressMemDesc = NULL; + } + } +} + +static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFWIF_SYSINIT *psFwSysInit) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 *pui32MemoryVirtAddr; + IMG_UINT32 i; + size_t ui32PageSize = OSGetPageSize(); + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PMR *psPMR; + + /* Run-time check feature support */ + if (!PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, SLC_FAULT_ACCESS_ADDR_PHYS)) + { + return PVRSRV_OK; + } + + /* Allocate page of memory to use for page faults on non-blocking memory transactions. + * Doesn't need to be cleared as it is initialised with the 0xDEADBEEF pattern below. */ + psDevInfo->psRGXFaultAddressMemDesc = NULL; + eError = DevmemFwAllocateExportable(psDeviceNode, + ui32PageSize, + ui32PageSize, + RGX_FWINITDATA_WC_ALLOCFLAGS & ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC, + "FwExFaultAddress", + &psDevInfo->psRGXFaultAddressMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate mem for fault address (%u)", + __func__, eError)); + goto failFaultAddressDescAlloc; + } + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc, + (void **)&pui32MemoryVirtAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire mem for fault address (%u)", + __func__, eError)); + goto failFaultAddressDescAqCpuVirt; + } + + if (!psDeviceNode->bAutoVzFwIsUp) + { + /* fill the page with a known pattern when booting the firmware */ + for (i = 0; i < ui32PageSize/sizeof(IMG_UINT32); i++) + { + *(pui32MemoryVirtAddr + i) = 0xDEADBEEF; + } + } + + OSWriteMemoryBarrier(); + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc); + + eError = DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error getting PMR for fault address (%u)", + __func__, eError)); + + goto failFaultAddressDescGetPMR; + } + else + { + IMG_BOOL bValid; + IMG_UINT32 ui32Log2PageSize = OSGetPageShift(); + + eError = PMRLockSysPhysAddresses(psPMR); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error locking physical address for fault address MemDesc (%u)", + __func__, eError)); + + goto failFaultAddressDescLockPhys; + } + + eError = PMR_DevPhysAddr(psPMR,ui32Log2PageSize, 1, 0, &(psFwSysInit->sFaultPhysAddr), &bValid); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error getting physical address for fault address MemDesc (%u)", + __func__, eError)); + + goto failFaultAddressDescGetPhys; + } + + if (!bValid) + { + psFwSysInit->sFaultPhysAddr.uiAddr = 0; + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed getting physical address for fault address MemDesc - invalid page (0x%" IMG_UINT64_FMTSPECX ")", + __func__, psFwSysInit->sFaultPhysAddr.uiAddr)); + + goto failFaultAddressDescGetPhys; + } + } + + return PVRSRV_OK; + +failFaultAddressDescGetPhys: + PMRUnlockSysPhysAddresses(psPMR); + +failFaultAddressDescLockPhys: +failFaultAddressDescGetPMR: +failFaultAddressDescAqCpuVirt: + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc); + psDevInfo->psRGXFaultAddressMemDesc = NULL; + +failFaultAddressDescAlloc: + + return eError; +} + +#if defined(PDUMP) +/* Replace the DevPhy address with the one Pdump allocates at pdump_player run time */ +static PVRSRV_ERROR RGXPDumpFaultReadRegister(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + PVRSRV_ERROR eError; + PMR *psFWInitPMR, *psFaultAddrPMR; + IMG_UINT32 ui32Dstoffset; + + /* Run-time check feature support */ + if (!PVRSRV_IS_FEATURE_SUPPORTED(psDevInfo->psDeviceNode, SLC_FAULT_ACCESS_ADDR_PHYS)) + { + return PVRSRV_OK; + } + + psFWInitPMR = (PMR *)(psDevInfo->psRGXFWIfSysInitMemDesc->psImport->hPMR); + ui32Dstoffset = psDevInfo->psRGXFWIfSysInitMemDesc->uiOffset + offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr.uiAddr); + + psFaultAddrPMR = (PMR *)(psDevInfo->psRGXFaultAddressMemDesc->psImport->hPMR); + + eError = PDumpMemLabelToMem64(psFaultAddrPMR, + psFWInitPMR, + 0, + ui32Dstoffset, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Dump of Fault Page Phys address failed(%u)", __func__, eError)); + } + return eError; +} +#endif +#endif /* defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) */ + +#if defined(SUPPORT_TBI_INTERFACE) +/*************************************************************************/ /*! +@Function RGXTBIBufferIsInitRequired + +@Description Returns true if the firmware tbi buffer is not allocated and + might be required by the firmware soon. TBI buffer allocated + on-demand to reduce RAM footprint on systems not needing + tbi. + +@Input psDevInfo RGX device info + +@Return IMG_BOOL Whether on-demand allocation(s) is/are needed + or not +*/ /**************************************************************************/ +INLINE IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + + /* The firmware expects a tbi buffer only when: + * - Logtype is "tbi" + */ + if ((psDevInfo->psRGXFWIfTBIBufferMemDesc == NULL) + && (psTraceBufCtl->ui32LogType & ~RGXFWIF_LOG_TYPE_TRACE) + && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)) + { + return IMG_TRUE; + } + + return IMG_FALSE; +} + +/*************************************************************************/ /*! +@Function RGXTBIBufferDeinit + +@Description Deinitialises all the allocations and references that are made + for the FW tbi buffer + +@Input ppsDevInfo RGX device info +@Return void +*/ /**************************************************************************/ +static void RGXTBIBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTBIBufferMemDesc); + psDevInfo->psRGXFWIfTBIBufferMemDesc = NULL; + psDevInfo->ui32RGXFWIfHWPerfBufSize = 0; +} + +/*************************************************************************/ /*! +@Function RGXTBIBufferInitOnDemandResources + +@Description Allocates the firmware TBI buffer required for reading SFs + strings and initialize it with SFs. + +@Input psDevInfo RGX device info + +@Return PVRSRV_OK If all went good, PVRSRV_ERROR otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 i, ui32Len; + const IMG_UINT32 ui32FWTBIBufsize = g_ui32SFsCount * sizeof(RGXFW_STID_FMT); + RGXFW_STID_FMT *psFW_SFs = NULL; + + /* Firmware address should not be already set */ + if (psDevInfo->sRGXFWIfTBIBuffer.ui32Addr) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: FW address for FWTBI is already set. Resetting it with newly allocated one", + __func__)); + } + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS, + ui32FWTBIBufsize, + "FwTBIBuffer", + &psDevInfo->psRGXFWIfTBIBufferMemDesc, + &psDevInfo->sRGXFWIfTBIBuffer, + (void**)&psFW_SFs, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + /* Copy SFs entries to FW buffer */ + for (i = 0; i < g_ui32SFsCount; i++) + { + OSDeviceMemCopy(&psFW_SFs[i].ui32Id, &SFs[i].ui32Id, sizeof(SFs[i].ui32Id)); + ui32Len = OSStringLength(SFs[i].psName); + OSDeviceMemCopy(psFW_SFs[i].sName, SFs[i].psName, MIN(ui32Len, IMG_SF_STRING_MAX_SIZE - 1)); + } + + /* flush write buffers for psFW_SFs */ + OSWriteMemoryBarrier(); + + /* Set size of TBI buffer */ + psDevInfo->ui32FWIfTBIBufferSize = ui32FWTBIBufsize; + + /* release CPU mapping */ + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTBIBufferMemDesc); + + return PVRSRV_OK; +fail: + RGXTBIBufferDeinit(psDevInfo); + return eError; +} +#endif + +/*************************************************************************/ /*! +@Function RGXTraceBufferIsInitRequired + +@Description Returns true if the firmware trace buffer is not allocated and + might be required by the firmware soon. Trace buffer allocated + on-demand to reduce RAM footprint on systems not needing + firmware trace. + +@Input psDevInfo RGX device info + +@Return IMG_BOOL Whether on-demand allocation(s) is/are needed + or not +*/ /**************************************************************************/ +INLINE IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + + /* The firmware expects a trace buffer only when: + * - Logtype is "trace" AND + * - at least one LogGroup is configured + * - the Driver Mode is not Guest + */ + if ((psDevInfo->psRGXFWIfTraceBufferMemDesc[0] == NULL) + && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE) + && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) + && !PVRSRV_VZ_MODE_IS(GUEST)) + { + return IMG_TRUE; + } + + return IMG_FALSE; +} + +/*************************************************************************/ /*! +@Function RGXTraceBufferDeinit + +@Description Deinitialises all the allocations and references that are made + for the FW trace buffer(s) + +@Input ppsDevInfo RGX device info +@Return void +*/ /**************************************************************************/ +static void RGXTraceBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + IMG_UINT32 i; + + for (i = 0; i < RGXFW_THREAD_NUM; i++) + { + if (psDevInfo->psRGXFWIfTraceBufferMemDesc[i]) + { + if (psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufferMemDesc[i]); + psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer = NULL; + } + + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufferMemDesc[i]); + psDevInfo->psRGXFWIfTraceBufferMemDesc[i] = NULL; + } + } +} + +/*************************************************************************/ /*! +@Function RGXTraceBufferInitOnDemandResources + +@Description Allocates the firmware trace buffer required for dumping trace + info from the firmware. + +@Input psDevInfo RGX device info + +@Return PVRSRV_OK If all went good, PVRSRV_ERROR otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags) +{ + RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32FwThreadNum; + IMG_UINT32 ui32DefaultTraceBufSize; + IMG_DEVMEM_SIZE_T uiTraceBufSizeInBytes; + void *pvAppHintState = NULL; + IMG_CHAR pszBufferName[] = "FwTraceBuffer_Thread0"; + + /* Check AppHint value for module-param FWTraceBufSizeInDWords */ + OSCreateKMAppHintState(&pvAppHintState); + ui32DefaultTraceBufSize = RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS; + OSGetKMAppHintUINT32(pvAppHintState, + FWTraceBufSizeInDWords, + &ui32DefaultTraceBufSize, + &psTraceBufCtl->ui32TraceBufSizeInDWords); + OSFreeKMAppHintState(pvAppHintState); + pvAppHintState = NULL; + + uiTraceBufSizeInBytes = psTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); + + for (ui32FwThreadNum = 0; ui32FwThreadNum < RGXFW_THREAD_NUM; ui32FwThreadNum++) + { +#if !defined(SUPPORT_AUTOVZ) + /* Ensure allocation API is only called when not already allocated */ + PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum] == NULL); + /* Firmware address should not be already set */ + PVR_ASSERT(psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer.ui32Addr == 0x0); +#endif + + /* update the firmware thread number in the Trace Buffer's name */ + pszBufferName[sizeof(pszBufferName) - 2] += ui32FwThreadNum; + + eError = RGXSetupFwAllocation(psDevInfo, + uiAllocFlags, + uiTraceBufSizeInBytes, + pszBufferName, + &psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum], + &psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer, + (void**)&psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + } + + return PVRSRV_OK; + +fail: + RGXTraceBufferDeinit(psDevInfo); + return eError; +} + +#if defined(SUPPORT_POWMON_COMPONENT) +/*************************************************************************/ /*! +@Function RGXPowmonBufferIsInitRequired + +@Description Returns true if the power monitoring buffer is not allocated and + might be required by the firmware soon. Powmon buffer allocated + on-demand to reduce RAM footprint on systems not needing + power monitoring. + +@Input psDevInfo RGX device info + +@Return IMG_BOOL Whether on-demand allocation(s) is/are needed + or not +*/ /**************************************************************************/ +INLINE IMG_BOOL RGXPowmonBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + /* The firmware expects a power monitoring buffer only when: + * - Single-shot power counters are enabled with RGX_HWPERF_PWR_EST_REQUEST + * - the Driver Mode is not Guest + */ + if ((psDevInfo->psRGXFWIfPowMonBufferMemDesc == NULL) + && (psDevInfo->ui64HWPerfFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_EST_REQUEST)) + && !PVRSRV_VZ_MODE_IS(GUEST)) + { + return IMG_TRUE; + } + + return IMG_FALSE; +} + +/*************************************************************************/ /*! +@Function RGXPowmonBufferDeinit + +@Description Deinitialises all the allocations and references that are made + for the FW power monitoring buffer + +@Input ppsDevInfo RGX device info +@Return void +*/ /**************************************************************************/ +static void RGXPowmonBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + if (psDevInfo->psRGXFWIfPowMonBufferMemDesc) + { + if (psFwSysData->sPowerMonBuf.pui32TraceBuffer != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfPowMonBufferMemDesc); + psFwSysData->sPowerMonBuf.pui32TraceBuffer = NULL; + } + + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfPowMonBufferMemDesc); + psDevInfo->psRGXFWIfPowMonBufferMemDesc = NULL; + } +} + +/*************************************************************************/ /*! +@Function RGXPowmonBufferInitOnDemandResources + +@Description Allocates the power monitoring buffer. + +@Input psDevInfo RGX device info + +@Return PVRSRV_OK If all went good, PVRSRV_ERROR otherwise. +*/ /**************************************************************************/ +PVRSRV_ERROR RGXPowmonBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + PVRSRV_ERROR eError = PVRSRV_OK; + +#define POWER_MON_BUF_SIZE (512UL) + /* Ensure allocation API is only called when not already allocated */ + PVR_ASSERT(psDevInfo->psRGXFWIfPowMonBufferMemDesc == NULL); + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, + POWER_MON_BUF_SIZE, + "FwPowMonBuffer", + &psDevInfo->psRGXFWIfPowMonBufferMemDesc, + &psFwSysData->sPowerMonBuf.pui32RGXFWIfTraceBuffer, + (void **)&psFwSysData->sPowerMonBuf.pui32TraceBuffer, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "Power Monitoring Buffer allocation", fail); + + psFwSysData->ui32PowerMonBufSizeInDWords = POWER_MON_BUF_SIZE >> 2; + + return PVRSRV_OK; +fail: + RGXPowmonBufferDeinit(psDevInfo); + return eError; +} +#endif + +#if defined(PDUMP) +/*************************************************************************/ /*! +@Function RGXPDumpLoadFWInitData + +@Description Allocates the firmware trace buffer required for dumping trace + info from the firmware. + +@Input psDevInfo RGX device info + */ /*************************************************************************/ +static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32HWPerfCountersDataSize, + IMG_UINT32 ui32RenderKillingCtl, + IMG_UINT32 ui32CDMTDMKillingCtl, + IMG_BOOL bEnableSignatureChecks) +{ + IMG_UINT32 ui32ConfigFlags = psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags; + IMG_UINT32 ui32FwOsCfgFlags = psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags; + + PDUMPCOMMENT("Dump RGXFW Init data"); + if (!bEnableSignatureChecks) + { + PDUMPCOMMENT("(to enable rgxfw signatures place the following line after the RTCONF line)"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc, + offsetof(RGXFWIF_SYSINIT, asSigBufCtl), + sizeof(RGXFWIF_SIGBUF_CTL)*(RGXFWIF_DM_MAX), + PDUMP_FLAGS_CONTINUOUS); + } + + PDUMPCOMMENT("Dump initial state of FW runtime configuration"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, + 0, + sizeof(RGXFWIF_RUNTIME_CFG), + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Dump rgxfw hwperfctl structure"); + DevmemPDumpLoadZeroMem (psDevInfo->psRGXFWIfHWPerfCountersMemDesc, + 0, + ui32HWPerfCountersDataSize, + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Dump rgxfw trace control structure"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + 0, + sizeof(RGXFWIF_TRACEBUF), + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Dump firmware system data structure"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfFwSysDataMemDesc, + 0, + sizeof(RGXFWIF_SYSDATA), + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Dump firmware OS data structure"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfFwOsDataMemDesc, + 0, + sizeof(RGXFWIF_OSDATA), + PDUMP_FLAGS_CONTINUOUS); + +#if defined(SUPPORT_TBI_INTERFACE) + PDUMPCOMMENT("Dump rgx TBI buffer"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfTBIBufferMemDesc, + 0, + psDevInfo->ui32FWIfTBIBufferSize, + PDUMP_FLAGS_CONTINUOUS); +#endif /* defined(SUPPORT_TBI_INTERFACE) */ + +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PDUMPCOMMENT("Dump rgxfw register configuration buffer"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfRegCfgMemDesc, + 0, + sizeof(RGXFWIF_REG_CFG), + PDUMP_FLAGS_CONTINUOUS); +#endif /* defined(SUPPORT_USER_REGISTER_CONFIGURATION) */ + PDUMPCOMMENT("Dump rgxfw system init structure"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc, + 0, + sizeof(RGXFWIF_SYSINIT), + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Dump rgxfw os init structure"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfOsInitMemDesc, + 0, + sizeof(RGXFWIF_OSINIT), + PDUMP_FLAGS_CONTINUOUS); + +#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) + /* RGXFW Init structure needs to be loaded before we overwrite FaultPhysAddr, else this address patching won't have any effect */ + PDUMPCOMMENT("Overwrite FaultPhysAddr of FwSysInit in pdump with actual physical address"); + RGXPDumpFaultReadRegister(psDevInfo); +#endif /* defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) */ + + PDUMPCOMMENT("RTCONF: run-time configuration"); + + /* Dump the config options so they can be edited. */ + + PDUMPCOMMENT("(Set the FW system config options here)"); + PDUMPCOMMENT("( Ctx Switch Rand mode: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_MODE_RAND); + PDUMPCOMMENT("( Ctx Switch Soft Reset Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_SRESET_EN); + PDUMPCOMMENT("( Enable HWPerf: 0x%08x)", RGXFWIF_INICFG_HWPERF_EN); +#if defined(SUPPORT_VALIDATION) + PDUMPCOMMENT("( Enable generic DM Killing Rand mode: 0x%08x)", RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN); +#endif /* defined(SUPPORT_VALIDATION) */ + PDUMPCOMMENT("( Rascal+Dust Power Island: 0x%08x)", RGXFWIF_INICFG_POW_RASCALDUST); + PDUMPCOMMENT("( FBCDC Version 3.1 Enable: 0x%08x)", RGXFWIF_INICFG_FBCDC_V3_1_EN); + PDUMPCOMMENT("( Check MList: 0x%08x)", RGXFWIF_INICFG_CHECK_MLIST_EN); + PDUMPCOMMENT("( Disable Auto Clock Gating: 0x%08x)", RGXFWIF_INICFG_DISABLE_CLKGATING_EN); + PDUMPCOMMENT("( Enable HWPerf Polling Perf Counter: 0x%08x)", RGXFWIF_INICFG_POLL_COUNTERS_EN); + PDUMPCOMMENT("( Enable register configuration: 0x%08x)", RGXFWIF_INICFG_REGCONFIG_EN); + PDUMPCOMMENT("( Assert on TA Out-of-Memory: 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY); + PDUMPCOMMENT("( Disable HWPerf counter filter: 0x%08x)", RGXFWIF_INICFG_HWP_DISABLE_FILTER); + PDUMPCOMMENT("( Enable HWPerf custom performance timer: 0x%08x)", RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN); + PDUMPCOMMENT("( Enable Ctx Switch profile mode: 0x%08x (none=d'0, fast=d'1, medium=d'2, slow=d'3, nodelay=d'4))", RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK); + PDUMPCOMMENT("( Disable DM overlap (except TA during SPM): 0x%08x)", RGXFWIF_INICFG_DISABLE_DM_OVERLAP); + PDUMPCOMMENT("( Assert on HWR trigger (page fault, lockup, overrun or poll failure): 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER); + PDUMPCOMMENT("( Enable coherent memory accesses: 0x%08x)", RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED); + PDUMPCOMMENT("( Enable IRQ validation: 0x%08x)", RGXFWIF_INICFG_VALIDATE_IRQ); + PDUMPCOMMENT("( SPU power state mask change Enable: 0x%08x)", RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN); +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + PDUMPCOMMENT("( Enable Workload Estimation: 0x%08x)", RGXFWIF_INICFG_WORKEST); +#if defined(SUPPORT_PDVFS) + PDUMPCOMMENT("( Enable Proactive DVFS: 0x%08x)", RGXFWIF_INICFG_PDVFS); +#endif /* defined(SUPPORT_PDVFS) */ +#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ + PDUMPCOMMENT("( CDM Arbitration Mode (task demand=b'01, round robin=b'10): 0x%08x)", RGXFWIF_INICFG_CDM_ARBITRATION_MASK); + PDUMPCOMMENT("( ISP Scheduling Mode (v1=b'01, v2=b'10): 0x%08x)", RGXFWIF_INICFG_ISPSCHEDMODE_MASK); + PDUMPCOMMENT("( Validate SOC & USC timers: 0x%08x)", RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER); + + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwSysDataMemDesc, + offsetof(RGXFWIF_SYSDATA, ui32ConfigFlags), + ui32ConfigFlags, + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("( Extended FW system config options not used.)"); + + PDUMPCOMMENT("(Set the FW OS config options here)"); + PDUMPCOMMENT("( Ctx Switch TDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN); + PDUMPCOMMENT("( Ctx Switch GEOM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN); + PDUMPCOMMENT("( Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN); + PDUMPCOMMENT("( Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN); + PDUMPCOMMENT("( Lower Priority Ctx Switch 2D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM); + PDUMPCOMMENT("( Lower Priority Ctx Switch TA Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM); + PDUMPCOMMENT("( Lower Priority Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D); + PDUMPCOMMENT("( Lower Priority Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM); + + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwOsDataMemDesc, + offsetof(RGXFWIF_OSDATA, ui32FwOsConfigFlags), + ui32FwOsCfgFlags, + PDUMP_FLAGS_CONTINUOUS); + + { + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; + IMG_UINT32 ui32AllPowUnitsMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount) - 1; +#if defined(SUPPORT_VALIDATION) + IMG_BOOL bRunTimeUpdate = (RGX_GET_FEATURE_VALUE(psDevInfo, POWER_ISLAND_VERSION) == 1); +#else + IMG_BOOL bRunTimeUpdate = IMG_FALSE; +#endif + IMG_UINT32 ui32DstOffset = psDevInfo->psRGXFWIfRuntimeCfgMemDesc->uiOffset + offsetof(RGXFWIF_RUNTIME_CFG, ui32PowUnitsStateMask); + IMG_CHAR aszPowUnitsMaskRegVar[] = ":SYSMEM:$1"; + IMG_CHAR aszPowUnitsEnable[] = "RUNTIME_POW_UNITS_MASK"; + PMR *psPMR = (PMR *)(psDevInfo->psRGXFWIfRuntimeCfgMemDesc->psImport->hPMR); + + + if (bRunTimeUpdate) + { + PDUMPIF(aszPowUnitsEnable, ui32PDumpFlags); + } + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Load initial value power units mask in FW runtime configuration"); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, + ui32DstOffset, + psDevInfo->psRGXFWIfRuntimeCfg->ui32PowUnitsStateMask, + ui32PDumpFlags); + + if (bRunTimeUpdate) + { + PDUMPELSE(aszPowUnitsEnable, ui32PDumpFlags); + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Read initial SPU mask value from HW registers"); + PDumpRegRead32ToInternalVar(RGX_PDUMPREG_NAME, RGX_CR_SPU_ENABLE, aszPowUnitsMaskRegVar, ui32PDumpFlags); + PDumpWriteVarANDValueOp(aszPowUnitsMaskRegVar, ui32AllPowUnitsMask, ui32PDumpFlags); + PDumpInternalVarToMemLabel(psPMR, ui32DstOffset, aszPowUnitsMaskRegVar, ui32PDumpFlags); + PDUMPFI(aszPowUnitsEnable, ui32PDumpFlags); + } + } + +#if defined(SUPPORT_SECURITY_VALIDATION) + PDUMPCOMMENT("(Select one or more security tests here)"); + PDUMPCOMMENT("( Read/write FW private data from non-FW contexts: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_DATA); + PDUMPCOMMENT("( Read/write FW code from non-FW contexts: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_CODE); + PDUMPCOMMENT("( Execute FW code from non-secure memory: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_NONSECURE); + PDUMPCOMMENT("( Execute FW code from secure (non-FW) memory: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_SECURE); + + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, + offsetof(RGXFWIF_SYSINIT, ui32SecurityTestFlags), + psDevInfo->psRGXFWIfSysInit->ui32SecurityTestFlags, + PDUMP_FLAGS_CONTINUOUS); +#endif /* defined(SUPPORT_SECURITY_VALIDATION) */ + + PDUMPCOMMENT("( PID filter type: %X=INCLUDE_ALL_EXCEPT, %X=EXCLUDE_ALL_EXCEPT)", + RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT, + RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT); + + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, + offsetof(RGXFWIF_SYSINIT, sPIDFilter.eMode), + psDevInfo->psRGXFWIfSysInit->sPIDFilter.eMode, + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("( PID filter PID/OSID list (Up to %u entries. Terminate with a zero PID))", + RGXFWIF_PID_FILTER_MAX_NUM_PIDS); + { + IMG_UINT32 i; + + /* generate a few WRWs in the pdump stream as an example */ + for (i = 0; i < MIN(RGXFWIF_PID_FILTER_MAX_NUM_PIDS, 8); i++) + { + /* + * Some compilers cannot cope with the uses of offsetof() below - the specific problem being the use of + * a non-const variable in the expression, which it needs to be const. Typical compiler output is + * "expression must have a constant value". + */ + const IMG_DEVMEM_OFFSET_T uiPIDOff + = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].uiPID); + + const IMG_DEVMEM_OFFSET_T uiOSIDOff + = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].ui32OSID); + + PDUMPCOMMENT("(PID and OSID pair %u)", i); + + PDUMPCOMMENT("(PID)"); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, + uiPIDOff, + 0, + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("(OSID)"); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, + uiOSIDOff, + 0, + PDUMP_FLAGS_CONTINUOUS); + } + } +#if defined(SUPPORT_VALIDATION) + PDUMPCOMMENT("(Set the FW GEOM/3D Killing Control.)"); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwSysDataMemDesc, + offsetof(RGXFWIF_SYSDATA, ui32RenderKillingCtl), + ui32RenderKillingCtl, + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("(Set the FW CDM/TDM Killing Control.)"); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwSysDataMemDesc, + offsetof(RGXFWIF_SYSDATA, ui32CDMTDMKillingCtl), + ui32CDMTDMKillingCtl, + PDUMP_FLAGS_CONTINUOUS); +#endif /* defined(SUPPORT_VALIDATION) */ + /* + * Dump the log config so it can be edited. + */ + PDUMPCOMMENT("(Set the log config here)"); + PDUMPCOMMENT("( Log Type: set bit 0 for TRACE, reset for TBI)"); + PDUMPCOMMENT("( MAIN Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MAIN); + PDUMPCOMMENT("( MTS Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MTS); + PDUMPCOMMENT("( CLEANUP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CLEANUP); + PDUMPCOMMENT("( CSW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CSW); + PDUMPCOMMENT("( BIF Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_BIF); + PDUMPCOMMENT("( PM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_PM); + PDUMPCOMMENT("( RTD Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_RTD); + PDUMPCOMMENT("( SPM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_SPM); + PDUMPCOMMENT("( POW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_POW); + PDUMPCOMMENT("( HWR Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWR); + PDUMPCOMMENT("( HWP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWP); + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) + { + PDUMPCOMMENT("( DMA Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DMA); + } + + PDUMPCOMMENT("( MISC Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MISC); + PDUMPCOMMENT("( DEBUG Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DEBUG); + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + offsetof(RGXFWIF_TRACEBUF, ui32LogType), + psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType, + PDUMP_FLAGS_CONTINUOUS); + + PDUMPCOMMENT("Set the HWPerf Filter config here"); + DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfSysInitMemDesc, + offsetof(RGXFWIF_SYSINIT, ui64HWPerfFilter), + psDevInfo->psRGXFWIfSysInit->ui64HWPerfFilter, + PDUMP_FLAGS_CONTINUOUS); + +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PDUMPCOMMENT("(Number of registers configurations for types(byte index): pow on(%d), dust change(%d), ta(%d), 3d(%d), cdm(%d), TDM(%d))", + RGXFWIF_REG_CFG_TYPE_PWR_ON, + RGXFWIF_REG_CFG_TYPE_DUST_CHANGE, + RGXFWIF_REG_CFG_TYPE_TA, + RGXFWIF_REG_CFG_TYPE_3D, + RGXFWIF_REG_CFG_TYPE_CDM, + RGXFWIF_REG_CFG_TYPE_TDM); + + { + IMG_UINT32 i; + + /* Write 32 bits in each iteration as required by PDUMP WRW command */ + for (i = 0; i < RGXFWIF_REG_CFG_TYPE_ALL; i += sizeof(IMG_UINT32)) + { + DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc, + offsetof(RGXFWIF_REG_CFG, aui8NumRegsType[i]), + 0, + PDUMP_FLAGS_CONTINUOUS); + } + } + + PDUMPCOMMENT("(Set registers here: address, mask, value)"); + DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc, + offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Addr), + 0, + PDUMP_FLAGS_CONTINUOUS); + DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc, + offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Mask), + 0, + PDUMP_FLAGS_CONTINUOUS); + DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc, + offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Value), + 0, + PDUMP_FLAGS_CONTINUOUS); +#endif /* SUPPORT_USER_REGISTER_CONFIGURATION */ +} +#endif /* defined(PDUMP) */ + +/*! +******************************************************************************* + @Function RGXSetupFwSysData + + @Description Setups all system-wide firmware related data + + @Input psDevInfo + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bEnableSignatureChecks, + IMG_UINT32 ui32SignatureChecksBufSize, + IMG_UINT32 ui32HWPerfFWBufSizeKB, + IMG_UINT64 ui64HWPerfFilter, + IMG_UINT32 ui32ConfigFlags, + IMG_UINT32 ui32ConfigFlagsExt, + IMG_UINT32 ui32LogType, + IMG_UINT32 ui32FilterFlags, + IMG_UINT32 ui32JonesDisableMask, + IMG_UINT32 ui32HWPerfCountersDataSize, + IMG_UINT32 ui32RenderKillingCtl, + IMG_UINT32 ui32CDMTDMKillingCtl, + IMG_UINT32 *pui32TPUTrilinearFracMask, + IMG_UINT32 *pui32USRMNumRegions, + IMG_UINT64 *pui64UVBRMNumRegions, + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, + FW_PERF_CONF eFirmwarePerf, + IMG_UINT32 ui32AvailablePowUnitsMask) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32AllPowUnitsMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount) - 1; + RGXFWIF_SYSINIT *psFwSysInitScratch = NULL; + + psFwSysInitScratch = OSAllocZMem(sizeof(*psFwSysInitScratch)); + PVR_LOG_GOTO_IF_NOMEM(psFwSysInitScratch, eError, fail); + + /* Sys Fw init data */ + eError = RGXSetupFwAllocation(psDevInfo, + (RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_SYSINIT), + "FwSysInitStructure", + &psDevInfo->psRGXFWIfSysInitMemDesc, + NULL, + (void**) &psDevInfo->psRGXFWIfSysInit, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Sys Init structure allocation", fail); + +#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) + /* Setup Fault read register */ + eError = RGXSetupFaultReadRegister(psDeviceNode, psFwSysInitScratch); + PVR_LOG_GOTO_IF_ERROR(eError, "Fault read register setup", fail); +#endif + + /* RD Power Island */ + { + RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; + IMG_BOOL bSysEnableRDPowIsland = psRGXData->psRGXTimingInfo->bEnableRDPowIsland; + IMG_BOOL bEnableRDPowIsland = ((eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_DEFAULT) && bSysEnableRDPowIsland) || + (eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_FORCE_ON); + + ui32ConfigFlags |= bEnableRDPowIsland? RGXFWIF_INICFG_POW_RASCALDUST : 0; + } + + /* Make sure to inform firmware if the device supports fullace fabric coherency */ + ui32ConfigFlags |= (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && + PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) ? + RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED : 0; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST; +#if defined(SUPPORT_PDVFS) + { + RGXFWIF_PDVFS_OPP *psPDVFSOPPInfo; + IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg; + + /* Pro-active DVFS depends on Workload Estimation */ + psPDVFSOPPInfo = &psFwSysInitScratch->sPDVFSOPPInfo; + psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; + PVR_LOG_IF_FALSE(psDVFSDeviceCfg->pasOPPTable, "RGXSetupFwSysData: Missing OPP Table"); + + if (psDVFSDeviceCfg->pasOPPTable != NULL) + { + if (psDVFSDeviceCfg->ui32OPPTableSize > ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OPP Table too large: Size = %u, Maximum size = %lu", + __func__, + psDVFSDeviceCfg->ui32OPPTableSize, + (unsigned long)(ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues)))); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail; + } + + OSDeviceMemCopy(psPDVFSOPPInfo->asOPPValues, + psDVFSDeviceCfg->pasOPPTable, + sizeof(psPDVFSOPPInfo->asOPPValues)); + + psPDVFSOPPInfo->ui32MaxOPPPoint = psDVFSDeviceCfg->ui32OPPTableSize - 1; + + ui32ConfigFlags |= RGXFWIF_INICFG_PDVFS; + } + } +#endif /* defined(SUPPORT_PDVFS) */ +#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ + + /* FW trace control structure */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWINITDATA_WC_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_TRACEBUF), + "FwTraceCtlStruct", + &psDevInfo->psRGXFWIfTraceBufCtlMemDesc, + &psFwSysInitScratch->sTraceBufCtl, + (void**) &psDevInfo->psRGXFWIfTraceBufCtl, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + if (!psDeviceNode->bAutoVzFwIsUp) + { + /* Set initial firmware log type/group(s) */ + if (ui32LogType & ~RGXFWIF_LOG_TYPE_MASK) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid initial log type (0x%X)", + __func__, ui32LogType)); + goto fail; + } + psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32LogType; + } + + /* When PDUMP is enabled, ALWAYS allocate on-demand trace buffer resource + * (irrespective of loggroup(s) enabled), given that logtype/loggroups can + * be set during PDump playback in logconfig, at any point of time, + * Otherwise, allocate only if required. */ +#if !defined(PDUMP) +#if defined(SUPPORT_AUTOVZ) + /* always allocate trace buffer for AutoVz Host drivers to allow + * deterministic addresses of all SysData structures */ + if ((PVRSRV_VZ_MODE_IS(HOST)) || (RGXTraceBufferIsInitRequired(psDevInfo))) +#else + if (RGXTraceBufferIsInitRequired(psDevInfo)) +#endif +#endif + { + eError = RGXTraceBufferInitOnDemandResources(psDevInfo, + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp)); + } + PVR_LOG_GOTO_IF_ERROR(eError, "RGXTraceBufferInitOnDemandResources", fail); + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_SYSDATA), + "FwSysData", + &psDevInfo->psRGXFWIfFwSysDataMemDesc, + &psFwSysInitScratch->sFwSysData, + (void**) &psDevInfo->psRGXFWIfFwSysData, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + /* GPIO validation setup */ + psFwSysInitScratch->eGPIOValidationMode = RGXFWIF_GPIO_VAL_OFF; +#if defined(SUPPORT_VALIDATION) + { + IMG_INT32 ui32GPIOValidationMode; + + /* Check AppHint for GPIO validation mode */ + pvr_apphint_get_uint32(APPHINT_ID_GPIOValidationMode, &ui32GPIOValidationMode); + + if (ui32GPIOValidationMode >= RGXFWIF_GPIO_VAL_LAST) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid GPIO validation mode: %d, only valid if smaller than %d. Disabling GPIO validation.", + __func__, + ui32GPIOValidationMode, + RGXFWIF_GPIO_VAL_LAST)); + } + else + { + psFwSysInitScratch->eGPIOValidationMode = (RGXFWIF_GPIO_VAL_MODE) ui32GPIOValidationMode; + } + + psFwSysInitScratch->eGPIOValidationMode = ui32GPIOValidationMode; + } + + //if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_STATE_PIN)) + { + IMG_BOOL bGPUStatePin; + + /* Check AppHint for GPU state pin */ + pvr_apphint_get_bool(APPHINT_ID_GPUStatePin, &bGPUStatePin); + + psDevInfo->ui32ValidationFlags |= (bGPUStatePin) ? RGX_VAL_GPUSTATEPIN_EN : 0; + } + + { + IMG_UINT32 ui32EnablePollOnChecksumErrorStatus; + + /* Check AppHint for polling on GPU Checksum status */ + pvr_apphint_get_uint32(APPHINT_ID_EnablePollOnChecksumErrorStatus, &ui32EnablePollOnChecksumErrorStatus); + + switch (ui32EnablePollOnChecksumErrorStatus) + { + case 1: psDevInfo->ui32ValidationFlags |= RGX_VAL_FBDC_SIG_CHECK_NOERR_EN; break; + case 2: psDevInfo->ui32ValidationFlags |= RGX_VAL_FBDC_SIG_CHECK_ERR_EN; break; + case 3: psDevInfo->ui32ValidationFlags |= RGX_VAL_KZ_SIG_CHECK_NOERR_EN; break; + case 4: psDevInfo->ui32ValidationFlags |= RGX_VAL_KZ_SIG_CHECK_ERR_EN; break; + default: + PVR_DPF((PVR_DBG_WARNING, "Unsupported value in EnablePollOnChecksumErrorStatus (%d)", ui32EnablePollOnChecksumErrorStatus)); + break; + } + } + + /* Check AppHint for power island transition interval */ + pvr_apphint_get_uint32(APPHINT_ID_PowerDomainKickInterval, &psDevInfo->ui32PowDomainKickInterval); + +#if defined(SUPPORT_RAY_TRACING) + { + IMG_UINT64 ui64RCEDisableMask; + pvr_apphint_get_uint64(APPHINT_ID_RCEDisableMask, &ui64RCEDisableMask); + psFwSysInitScratch->ui64RCEDisableMask = ui64RCEDisableMask; + + } +#endif + +#endif /* defined(SUPPORT_VALIDATION) */ + +#if defined(SUPPORT_FIRMWARE_GCOV) + eError = RGXFWSetupFirmwareGcovBuffer(psDevInfo, + &psDevInfo->psFirmwareGcovBufferMemDesc, + RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE, + &psFwSysInitScratch->sFirmwareGcovCtl, + "FirmwareGcovBuffer"); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware GCOV buffer allocation", fail); + psDevInfo->ui32FirmwareGcovSize = RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE; +#endif /* defined(SUPPORT_FIRMWARE_GCOV) */ + +#if defined(PDUMP) + /* Require a minimum amount of memory for the signature buffers */ + if (ui32SignatureChecksBufSize < RGXFW_SIG_BUFFER_SIZE_MIN) + { + ui32SignatureChecksBufSize = RGXFW_SIG_BUFFER_SIZE_MIN; + } + + /* Setup Signature and Checksum Buffers for TDM, GEOM, 3D and CDM */ + eError = RGXFWSetupSignatureChecks(psDevInfo, + &psDevInfo->psRGXFWSigTDMChecksMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM]); + PVR_LOG_GOTO_IF_ERROR(eError, "TDM Signature check setup", fail); + psDevInfo->ui32SigTDMChecksSize = ui32SignatureChecksBufSize; + + eError = RGXFWSetupSignatureChecks(psDevInfo, + &psDevInfo->psRGXFWSigTAChecksMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM]); + PVR_LOG_GOTO_IF_ERROR(eError, "GEOM Signature check setup", fail); + psDevInfo->ui32SigTAChecksSize = ui32SignatureChecksBufSize; + + eError = RGXFWSetupSignatureChecks(psDevInfo, + &psDevInfo->psRGXFWSig3DChecksMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D]); + PVR_LOG_GOTO_IF_ERROR(eError, "3D Signature check setup", fail); + psDevInfo->ui32Sig3DChecksSize = ui32SignatureChecksBufSize; + + eError = RGXFWSetupSignatureChecks(psDevInfo, + &psDevInfo->psRGXFWSigCDMChecksMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_CDM]); + PVR_LOG_GOTO_IF_ERROR(eError, "CDM Signature check setup", fail); + psDevInfo->ui32SigCDMChecksSize = ui32SignatureChecksBufSize; + +#if defined(SUPPORT_VALIDATION) + eError = RGXFWSetupSignatureChecks(psDevInfo, + &psDevInfo->psRGXFWValidationSigMemDesc, + ui32SignatureChecksBufSize, + &psFwSysInitScratch->asValidationSigBufCtl[RGXFWIF_DM_3D]); + psFwSysInitScratch->asValidationSigBufCtl[RGXFWIF_DM_CDM] = psFwSysInitScratch->asValidationSigBufCtl[RGXFWIF_DM_3D]; + PVR_LOG_GOTO_IF_ERROR(eError, "FBCDC/TRP/WGP Signature check setup", fail); + psDevInfo->ui32ValidationSigSize = ui32SignatureChecksBufSize; +#endif +#endif + + if (!bEnableSignatureChecks) + { + psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM].sBuffer.ui32Addr = 0x0; + psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM].sBuffer.ui32Addr = 0x0; + psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D].sBuffer.ui32Addr = 0x0; + psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_CDM].sBuffer.ui32Addr = 0x0; + } + + eError = RGXFWSetupAlignChecks(psDeviceNode, + &psFwSysInitScratch->sAlignChecks); + PVR_LOG_GOTO_IF_ERROR(eError, "Alignment checks setup", fail); + + psFwSysInitScratch->ui32FilterFlags = ui32FilterFlags; + + /* Fill the remaining bits of fw the init data */ + psFwSysInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_HEAP_BASE; + psFwSysInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_HEAP_BASE; + psFwSysInitScratch->sFBCDCStateTableBase.uiAddr = RGX_FBCDC_HEAP_BASE; + psFwSysInitScratch->sFBCDCLargeStateTableBase.uiAddr = RGX_FBCDC_LARGE_HEAP_BASE; + psFwSysInitScratch->sTextureHeapBase.uiAddr = RGX_TEXTURE_STATE_HEAP_BASE; + psFwSysInitScratch->sPDSIndirectHeapBase.uiAddr = RGX_PDS_INDIRECT_STATE_HEAP_BASE; + + psFwSysInitScratch->ui32JonesDisableMask = ui32JonesDisableMask; + + eError = _AllocateSLC3Fence(psDevInfo, psFwSysInitScratch); + PVR_LOG_GOTO_IF_ERROR(eError, "SLC3Fence memory allocation", fail); + +#if defined(SUPPORT_PDVFS) + /* Core clock rate */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(IMG_UINT32), + "FwPDVFSCoreClkRate", + &psDevInfo->psRGXFWIFCoreClkRateMemDesc, + &psFwSysInitScratch->sCoreClockRate, + (void**) &psDevInfo->pui32RGXFWIFCoreClkRate, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "PDVFS core clock rate memory setup", fail); +#endif + { + PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags; + /* Timestamps */ + uiMemAllocFlags = + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | /* XXX ?? */ + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_UNCACHED | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC; + + /* + the timer query arrays + */ + PDUMPCOMMENT("Allocate timer query arrays (FW)"); + eError = DevmemFwAllocate(psDevInfo, + sizeof(IMG_UINT64) * RGX_MAX_TIMER_QUERIES, + uiMemAllocFlags, + "FwStartTimesArray", + & psDevInfo->psStartTimeMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map start times array", + __func__)); + goto fail; + } + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psStartTimeMemDesc, + (void **)& psDevInfo->pui64StartTimeById); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map start times array", + __func__)); + goto fail; + } + + eError = DevmemFwAllocate(psDevInfo, + sizeof(IMG_UINT64) * RGX_MAX_TIMER_QUERIES, + uiMemAllocFlags, + "FwEndTimesArray", + & psDevInfo->psEndTimeMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map end times array", + __func__)); + goto fail; + } + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psEndTimeMemDesc, + (void **)& psDevInfo->pui64EndTimeById); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map end times array", + __func__)); + goto fail; + } + + eError = DevmemFwAllocate(psDevInfo, + sizeof(IMG_UINT32) * RGX_MAX_TIMER_QUERIES, + uiMemAllocFlags, + "FwCompletedOpsArray", + & psDevInfo->psCompletedMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to completed ops array", + __func__)); + goto fail; + } + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCompletedMemDesc, + (void **)& psDevInfo->pui32CompletedById); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map completed ops array", + __func__)); + goto fail; + } + +#if !defined(PVRSRV_USE_BRIDGE_LOCK) + eError = OSLockCreate(&psDevInfo->hTimerQueryLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate log for timer query", + __func__)); + goto fail; + } +#endif + } +#if defined(SUPPORT_TBI_INTERFACE) +#if !defined(PDUMP) + /* allocate only if required */ + if (RGXTBIBufferIsInitRequired(psDevInfo)) +#endif /* !defined(PDUMP) */ + { + /* When PDUMP is enabled, ALWAYS allocate on-demand TBI buffer resource + * (irrespective of loggroup(s) enabled), given that logtype/loggroups + * can be set during PDump playback in logconfig, at any point of time + */ + eError = RGXTBIBufferInitOnDemandResources(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXTBIBufferInitOnDemandResources", fail); + } + + psFwSysInitScratch->sTBIBuf = psDevInfo->sRGXFWIfTBIBuffer; +#endif /* defined(SUPPORT_TBI_INTERFACE) */ + + /* Allocate shared buffer for GPU utilisation */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_GPU_UTIL_FWCB), + "FwGPUUtilisationBuffer", + &psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc, + &psFwSysInitScratch->sGpuUtilFWCbCtl, + (void**) &psDevInfo->psRGXFWIfGpuUtilFWCb, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "GPU Utilisation Buffer ctl allocation", fail); + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_RUNTIME_CFG), + "FwRuntimeCfg", + &psDevInfo->psRGXFWIfRuntimeCfgMemDesc, + &psFwSysInitScratch->sRuntimeCfg, + (void**) &psDevInfo->psRGXFWIfRuntimeCfg, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware runtime configuration memory allocation", fail); + +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + sizeof(RGXFWIF_REG_CFG), + "FwRegisterConfigStructure", + &psDevInfo->psRGXFWIfRegCfgMemDesc, + &psFwSysInitScratch->sRegCfg, + NULL, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware register user configuration structure allocation", fail); +#endif + + psDevInfo->ui32RGXFWIfHWPerfBufSize = GetHwPerfBufferSize(ui32HWPerfFWBufSizeKB); + /* Second stage initialisation or HWPerf, hHWPerfLock created in first + * stage. See RGXRegisterDevice() call to RGXHWPerfInit(). */ + if (psDevInfo->ui64HWPerfFilter == 0) + { + psDevInfo->ui64HWPerfFilter = ui64HWPerfFilter; + psFwSysInitScratch->ui64HWPerfFilter = ui64HWPerfFilter; + } + else + { + /* The filter has already been modified. This can happen if + * pvr/apphint/EnableFTraceGPU was enabled. */ + psFwSysInitScratch->ui64HWPerfFilter = psDevInfo->ui64HWPerfFilter; + } + +#if !defined(PDUMP) + /* Allocate if HWPerf filter has already been set. This is possible either + * by setting a proper AppHint or enabling GPU ftrace events. */ + if (psDevInfo->ui64HWPerfFilter != 0) +#endif + { + /* When PDUMP is enabled, ALWAYS allocate on-demand HWPerf resources + * (irrespective of HWPerf enabled or not), given that HWPerf can be + * enabled during PDump playback via RTCONF at any point of time. */ + eError = RGXHWPerfInitOnDemandResources(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInitOnDemandResources", fail); +#if defined(SUPPORT_POWMON_COMPONENT) + if (RGXPowmonBufferIsInitRequired(psDevInfo)) + { + /* Allocate power monitoring log buffer if enabled */ + eError = RGXPowmonBufferInitOnDemandResources(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXPowmonBufferInitOnDemandResources", fail); + } +#endif + } + + RGXHWPerfInitAppHintCallbacks(psDeviceNode); + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWINITDATA_WC_ALLOCFLAGS & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), + ui32HWPerfCountersDataSize, + "FwHWPerfControlStructure", + &psDevInfo->psRGXFWIfHWPerfCountersMemDesc, + &psFwSysInitScratch->sHWPerfCtl, + NULL, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware HW Perf control struct allocation", fail); + + psDevInfo->bPDPEnabled = (ui32ConfigFlags & RGXFWIF_INICFG_DISABLE_PDP_EN) + ? IMG_FALSE : IMG_TRUE; + + psFwSysInitScratch->eFirmwarePerf = eFirmwarePerf; + +#if defined(PDUMP) + /* default: no filter */ + psFwSysInitScratch->sPIDFilter.eMode = RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT; + psFwSysInitScratch->sPIDFilter.asItems[0].uiPID = 0; +#endif + +#if defined(SUPPORT_VALIDATION) + { + IMG_UINT32 dm; + + /* TPU trilinear rounding mask override */ + for (dm = 0; dm < RGXFWIF_TPU_DM_LAST; dm++) + { + psFwSysInitScratch->aui32TPUTrilinearFracMask[dm] = pui32TPUTrilinearFracMask[dm]; + } + + /* USRM Config override */ + for (dm = 0; dm < RGXFWIF_USRM_DM_LAST; dm++) + { + psFwSysInitScratch->aui32USRMNumRegions[dm] = pui32USRMNumRegions[dm]; + } + + /* UVBRM Config override */ + for (dm = 0; dm < RGXFWIF_UVBRM_DM_LAST; dm++) + { + psFwSysInitScratch->aui64UVBRMNumRegions[dm] = pui64UVBRMNumRegions[dm]; + } + } +#endif + +#if defined(SUPPORT_SECURITY_VALIDATION) + { + PVRSRV_MEMALLOCFLAGS_T uiFlags = RGX_FWSHAREDMEM_ALLOCFLAGS; + PVRSRV_SET_PHYS_HEAP_HINT(GPU_SECURE, uiFlags); + + PDUMPCOMMENT("Allocate non-secure buffer for security validation test"); + eError = DevmemFwAllocateExportable(psDeviceNode, + OSGetPageSize(), + OSGetPageSize(), + RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, + "FwExNonSecureBuffer", + &psDevInfo->psRGXFWIfNonSecureBufMemDesc); + PVR_LOG_GOTO_IF_ERROR(eError, "Non-secure buffer allocation", fail); + + eError = RGXSetFirmwareAddress(&psFwSysInitScratch->pbNonSecureBuffer, + psDevInfo->psRGXFWIfNonSecureBufMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", fail); + + PDUMPCOMMENT("Allocate secure buffer for security validation test"); + eError = DevmemFwAllocateExportable(psDeviceNode, + OSGetPageSize(), + OSGetPageSize(), + uiFlags, + "FwExSecureBuffer", + &psDevInfo->psRGXFWIfSecureBufMemDesc); + PVR_LOG_GOTO_IF_ERROR(eError, "Secure buffer allocation", fail); + + eError = RGXSetFirmwareAddress(&psFwSysInitScratch->pbSecureBuffer, + psDevInfo->psRGXFWIfSecureBufMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", fail); + } +#endif /* SUPPORT_SECURITY_VALIDATION */ + + /* Initialize FW started flag */ + psFwSysInitScratch->bFirmwareStarted = IMG_FALSE; + psFwSysInitScratch->ui32MarkerVal = 1; + + if (!psDeviceNode->bAutoVzFwIsUp) + { + IMG_UINT32 ui32OSIndex; + + RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + + /* Required info by FW to calculate the ActivePM idle timer latency */ + psFwSysInitScratch->ui32InitialCoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; + psFwSysInitScratch->ui32InitialActivePMLatencyms = psRGXData->psRGXTimingInfo->ui32ActivePMLatencyms; + + /* Initialise variable runtime configuration to the system defaults */ + psRuntimeCfg->ui32CoreClockSpeed = psFwSysInitScratch->ui32InitialCoreClockSpeed; + psRuntimeCfg->ui32ActivePMLatencyms = psFwSysInitScratch->ui32InitialActivePMLatencyms; + psRuntimeCfg->bActivePMLatencyPersistant = IMG_TRUE; + psRuntimeCfg->ui32WdgPeriodUs = RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US; + psRuntimeCfg->ui32HCSDeadlineMS = RGX_HCS_DEFAULT_DEADLINE_MS; + + for (ui32OSIndex = 0; ui32OSIndex < RGX_NUM_OS_SUPPORTED; ui32OSIndex++) + { + const IMG_INT32 ai32DefaultOsPriority[RGXFW_MAX_NUM_OS] = + {RGX_OSID_0_DEFAULT_PRIORITY, RGX_OSID_1_DEFAULT_PRIORITY, RGX_OSID_2_DEFAULT_PRIORITY, RGX_OSID_3_DEFAULT_PRIORITY, + RGX_OSID_4_DEFAULT_PRIORITY, RGX_OSID_5_DEFAULT_PRIORITY, RGX_OSID_6_DEFAULT_PRIORITY, RGX_OSID_7_DEFAULT_PRIORITY}; + + /* Set up initial priorities between different OSes */ + psRuntimeCfg->aui32OSidPriority[ui32OSIndex] = (IMG_UINT32)ai32DefaultOsPriority[ui32OSIndex]; + } + +#if defined(PVR_ENABLE_PHR) && defined(PDUMP) + psRuntimeCfg->ui32PHRMode = RGXFWIF_PHR_MODE_RD_RESET; +#else + psRuntimeCfg->ui32PHRMode = 0; +#endif + + /* Validate the power units mask and initialize to number of units to power up */ + if ((ui32AvailablePowUnitsMask & ui32AllPowUnitsMask) == 0) + { + eError = PVRSRV_ERROR_INVALID_SPU_MASK; + PVR_DPF((PVR_DBG_ERROR, + "%s:Invalid power units mask (All=0x%X, Non Fused=0x%X). At-least one power unit must to be powered up.", + __func__, + ui32AllPowUnitsMask, + ui32AvailablePowUnitsMask)); + goto fail; + } + psRuntimeCfg->ui32PowUnitsStateMask = ui32AvailablePowUnitsMask & ui32AllPowUnitsMask; + + /* flush write buffers for psDevInfo->psRGXFWIfRuntimeCfg */ + OSWriteMemoryBarrier(); + + /* Setup FW coremem data */ + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) + { + psFwSysInitScratch->sCorememDataStore.pbyFWAddr = psDevInfo->sFWCorememDataStoreFWAddr; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) + { + RGXSetMetaDMAAddress(&psFwSysInitScratch->sCorememDataStore, + psDevInfo->psRGXFWIfCorememDataStoreMemDesc, + &psFwSysInitScratch->sCorememDataStore.pbyFWAddr, + 0); + } + } + + psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags = ui32ConfigFlags & RGXFWIF_INICFG_ALL; + psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlagsExt = ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_ALL; +#if defined(SUPPORT_VALIDATION) + psDevInfo->psRGXFWIfFwSysData->ui32RenderKillingCtl = ui32RenderKillingCtl; + psDevInfo->psRGXFWIfFwSysData->ui32CDMTDMKillingCtl = ui32CDMTDMKillingCtl; +#else + PVR_UNREFERENCED_PARAMETER(ui32RenderKillingCtl); + PVR_UNREFERENCED_PARAMETER(ui32CDMTDMKillingCtl); +#endif + + /* Initialise GPU utilisation buffer */ + psDevInfo->psRGXFWIfGpuUtilFWCb->ui64LastWord = + RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(),RGXFWIF_GPU_UTIL_STATE_IDLE); + + /* init HWPERF data */ + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfRIdx = 0; + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfWIdx = 0; + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfWrapCount = 0; + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfSize = psDevInfo->ui32RGXFWIfHWPerfBufSize; + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfUt = 0; + psDevInfo->psRGXFWIfFwSysData->ui32HWPerfDropCount = 0; + psDevInfo->psRGXFWIfFwSysData->ui32FirstDropOrdinal = 0; + psDevInfo->psRGXFWIfFwSysData->ui32LastDropOrdinal = 0; + + /*Send through the BVNC Feature Flags*/ + eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, &psFwSysInitScratch->sBvncKmFeatureFlags); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXServerFeatureFlagsToHWPerfFlags", fail); + + /* populate the real FwOsInit structure with the values stored in the scratch copy */ + OSCachedMemCopyWMB(psDevInfo->psRGXFWIfSysInit, psFwSysInitScratch, sizeof(RGXFWIF_SYSINIT)); + } + + OSFreeMem(psFwSysInitScratch); + + return PVRSRV_OK; + +fail: + if (psFwSysInitScratch) + { + OSFreeMem(psFwSysInitScratch); + } + + RGXFreeFwSysData(psDevInfo); + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/*! +******************************************************************************* + @Function RGXSetupFwOsData + + @Description Sets up all os-specific firmware related data + + @Input psDevInfo + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32HWRDebugDumpLimit, + IMG_UINT32 ui32FwOsCfgFlags) +{ + PVRSRV_ERROR eError; + RGXFWIF_OSINIT sFwOsInitScratch; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + OSCachedMemSet(&sFwOsInitScratch, 0, sizeof(RGXFWIF_OSINIT)); + + /* Memory tracking the connection state should be non-volatile and + * is not cleared on allocation to prevent loss of pre-reset information */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS & + ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC, + sizeof(RGXFWIF_CONNECTION_CTL), + "FwConnectionCtl", + &psDevInfo->psRGXFWIfConnectionCtlMemDesc, + NULL, + (void**) &psDevInfo->psRGXFWIfConnectionCtl, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Connection Control structure allocation", fail); + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED), + sizeof(RGXFWIF_OSINIT), + "FwOsInitStructure", + &psDevInfo->psRGXFWIfOsInitMemDesc, + NULL, + (void**) &psDevInfo->psRGXFWIfOsInit, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Os Init structure allocation", fail); + + /* init HWR frame info */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, + sizeof(RGXFWIF_HWRINFOBUF), + "FwHWRInfoBuffer", + &psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc, + &sFwOsInitScratch.sRGXFWIfHWRInfoBufCtl, + (void**) &psDevInfo->psRGXFWIfHWRInfoBufCtl, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "HWR Info Buffer allocation", fail); + + /* Might be uncached. Be conservative and use a DeviceMemSet */ + OSDeviceMemSet(psDevInfo->psRGXFWIfHWRInfoBufCtl, 0, sizeof(RGXFWIF_HWRINFOBUF)); + + /* Allocate a sync for power management */ + eError = SyncPrimContextCreate(psDevInfo->psDeviceNode, + &psDevInfo->hSyncPrimContext); + PVR_LOG_GOTO_IF_ERROR(eError, "Sync primitive context allocation", fail); + + eError = SyncPrimAlloc(psDevInfo->hSyncPrimContext, &psDevInfo->psPowSyncPrim, "fw power ack"); + PVR_LOG_GOTO_IF_ERROR(eError, "Sync primitive allocation", fail); + + /* Set up kernel CCB */ + eError = RGXSetupCCB(psDevInfo, + &psDevInfo->psKernelCCBCtl, + &psDevInfo->psKernelCCBCtlMemDesc, + &psDevInfo->psKernelCCB, + &psDevInfo->psKernelCCBMemDesc, + &sFwOsInitScratch.psKernelCCBCtl, + &sFwOsInitScratch.psKernelCCB, + RGXFWIF_KCCB_NUMCMDS_LOG2, + sizeof(RGXFWIF_KCCB_CMD), + (RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)), + "FwKernelCCB"); + PVR_LOG_GOTO_IF_ERROR(eError, "Kernel CCB allocation", fail); + + /* KCCB additionally uses a return slot array for FW to be able to send back + * return codes for each required command + */ + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, + (1U << RGXFWIF_KCCB_NUMCMDS_LOG2) * sizeof(IMG_UINT32), + "FwKernelCCBRtnSlots", + &psDevInfo->psKernelCCBRtnSlotsMemDesc, + &sFwOsInitScratch.psKernelCCBRtnSlots, + (void**) &psDevInfo->pui32KernelCCBRtnSlots, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "Kernel CCB return slot array allocation", fail); + + /* Set up firmware CCB */ + eError = RGXSetupCCB(psDevInfo, + &psDevInfo->psFirmwareCCBCtl, + &psDevInfo->psFirmwareCCBCtlMemDesc, + &psDevInfo->psFirmwareCCB, + &psDevInfo->psFirmwareCCBMemDesc, + &sFwOsInitScratch.psFirmwareCCBCtl, + &sFwOsInitScratch.psFirmwareCCB, + RGXFWIF_FWCCB_NUMCMDS_LOG2, + sizeof(RGXFWIF_FWCCB_CMD), + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, + "FwCCB"); + PVR_LOG_GOTO_IF_ERROR(eError, "Firmware CCB allocation", fail); + + eError = RGXSetupFwAllocation(psDevInfo, + RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, + sizeof(RGXFWIF_OSDATA), + "FwOsData", + &psDevInfo->psRGXFWIfFwOsDataMemDesc, + &sFwOsInitScratch.sFwOsData, + (void**) &psDevInfo->psRGXFWIfFwOsData, + RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); + + psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags = ui32FwOsCfgFlags & RGXFWIF_INICFG_OS_ALL; + + eError = SyncPrimGetFirmwareAddr(psDevInfo->psPowSyncPrim, &psDevInfo->psRGXFWIfFwOsData->sPowerSync.ui32Addr); + PVR_LOG_GOTO_IF_ERROR(eError, "Get Sync Prim FW address", fail); + + sFwOsInitScratch.ui32HWRDebugDumpLimit = ui32HWRDebugDumpLimit; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Set up Workload Estimation firmware CCB */ + eError = RGXSetupCCB(psDevInfo, + &psDevInfo->psWorkEstFirmwareCCBCtl, + &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, + &psDevInfo->psWorkEstFirmwareCCB, + &psDevInfo->psWorkEstFirmwareCCBMemDesc, + &sFwOsInitScratch.psWorkEstFirmwareCCBCtl, + &sFwOsInitScratch.psWorkEstFirmwareCCB, + RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2, + sizeof(RGXFWIF_WORKEST_FWCCB_CMD), + RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, + "FwWEstCCB"); + PVR_LOG_GOTO_IF_ERROR(eError, "Workload Estimation Firmware CCB allocation", fail); +#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ + + /* Initialise the compatibility check data */ + RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sFWBVNC); + RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sHWBVNC); + + /* populate the real FwOsInit structure with the values stored in the scratch copy */ + OSCachedMemCopyWMB(psDevInfo->psRGXFWIfOsInit, &sFwOsInitScratch, sizeof(RGXFWIF_OSINIT)); + + return PVRSRV_OK; + +fail: + RGXFreeFwOsData(psDevInfo); + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/*! +******************************************************************************* + @Function RGXSetupFirmware + + @Description Setups all firmware related data + + @Input psDevInfo + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bEnableSignatureChecks, + IMG_UINT32 ui32SignatureChecksBufSize, + IMG_UINT32 ui32HWPerfFWBufSizeKB, + IMG_UINT64 ui64HWPerfFilter, + IMG_UINT32 ui32ConfigFlags, + IMG_UINT32 ui32ConfigFlagsExt, + IMG_UINT32 ui32FwOsCfgFlags, + IMG_UINT32 ui32LogType, + IMG_UINT32 ui32FilterFlags, + IMG_UINT32 ui32JonesDisableMask, + IMG_UINT32 ui32HWRDebugDumpLimit, + IMG_UINT32 ui32HWPerfCountersDataSize, + IMG_UINT32 ui32RenderKillingCtl, + IMG_UINT32 ui32CDMTDMKillingCtl, + IMG_UINT32 *pui32TPUTrilinearFracMask, + IMG_UINT32 *pui32USRMNumRegions, + IMG_UINT64 *pui64UVBRMNumRegions, + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, + FW_PERF_CONF eFirmwarePerf, + IMG_UINT32 ui32AvailablePowUnitsMask) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + eError = RGXSetupFwOsData(psDeviceNode, ui32HWRDebugDumpLimit, ui32FwOsCfgFlags); + PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware os data", fail); + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + /* Guest drivers do not configure system-wide firmware data */ + psDevInfo->psRGXFWIfSysInit = NULL; + } + else + { + /* Native and Host drivers must initialise the firmware's system data */ + eError = RGXSetupFwSysData(psDeviceNode, + bEnableSignatureChecks, + ui32SignatureChecksBufSize, + ui32HWPerfFWBufSizeKB, + ui64HWPerfFilter, + ui32ConfigFlags, + ui32ConfigFlagsExt, + ui32LogType, + ui32FilterFlags, + ui32JonesDisableMask, + ui32HWPerfCountersDataSize, + ui32RenderKillingCtl, + ui32CDMTDMKillingCtl, + pui32TPUTrilinearFracMask, + pui32USRMNumRegions, + pui64UVBRMNumRegions, + eRGXRDPowerIslandConf, + eFirmwarePerf, + ui32AvailablePowUnitsMask); + PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware system data", fail); + } + + psDevInfo->bFirmwareInitialised = IMG_TRUE; + +#if defined(PDUMP) + RGXPDumpLoadFWInitData(psDevInfo, + ui32HWPerfCountersDataSize, + ui32RenderKillingCtl, + ui32CDMTDMKillingCtl, + bEnableSignatureChecks); +#endif /* PDUMP */ + +fail: + return eError; +} + +/*! +******************************************************************************* + @Function RGXFreeFwSysData + + @Description Frees all system-wide firmware related data + + @Input psDevInfo +******************************************************************************/ +static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + psDevInfo->bFirmwareInitialised = IMG_FALSE; + + if (psDevInfo->psRGXFWAlignChecksMemDesc) + { + RGXFWFreeAlignChecks(psDevInfo); + } + +#if defined(PDUMP) + if (psDevInfo->psRGXFWSigTDMChecksMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTDMChecksMemDesc); + psDevInfo->psRGXFWSigTDMChecksMemDesc = NULL; + } + + if (psDevInfo->psRGXFWSigTAChecksMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTAChecksMemDesc); + psDevInfo->psRGXFWSigTAChecksMemDesc = NULL; + } + + if (psDevInfo->psRGXFWSig3DChecksMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSig3DChecksMemDesc); + psDevInfo->psRGXFWSig3DChecksMemDesc = NULL; + } + + if (psDevInfo->psRGXFWSigCDMChecksMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigCDMChecksMemDesc); + psDevInfo->psRGXFWSigCDMChecksMemDesc = NULL; + } + +#if defined(SUPPORT_VALIDATION) + if (psDevInfo->psRGXFWValidationSigMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWValidationSigMemDesc); + psDevInfo->psRGXFWValidationSigMemDesc = NULL; + } +#endif +#endif + +#if defined(SUPPORT_FIRMWARE_GCOV) + if (psDevInfo->psFirmwareGcovBufferMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psFirmwareGcovBufferMemDesc); + psDevInfo->psFirmwareGcovBufferMemDesc = NULL; + } +#endif + +#if defined(RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK) + RGXSetupFaultReadRegisterRollback(psDevInfo); +#endif + + if (psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc) + { + if (psDevInfo->psRGXFWIfGpuUtilFWCb != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc); + psDevInfo->psRGXFWIfGpuUtilFWCb = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc); + psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc = NULL; + } + + RGXHWPerfDeinit(psDevInfo); + + if (psDevInfo->psRGXFWIfRuntimeCfgMemDesc) + { + if (psDevInfo->psRGXFWIfRuntimeCfg != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfRuntimeCfgMemDesc); + psDevInfo->psRGXFWIfRuntimeCfg = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfRuntimeCfgMemDesc); + psDevInfo->psRGXFWIfRuntimeCfgMemDesc = NULL; + } + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) + { + if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc); + psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL; + } + } + + if (psDevInfo->psRGXFWIfTraceBufCtlMemDesc) + { + if (psDevInfo->psRGXFWIfTraceBufCtl != NULL) + { + /* first deinit/free the tracebuffer allocation */ + RGXTraceBufferDeinit(psDevInfo); + +#if defined(SUPPORT_POWMON_COMPONENT) + /* second free the powmon log buffer if used */ + RGXPowmonBufferDeinit(psDevInfo); +#endif + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc); + psDevInfo->psRGXFWIfTraceBufCtl = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufCtlMemDesc); + psDevInfo->psRGXFWIfTraceBufCtlMemDesc = NULL; + } + + if (psDevInfo->psRGXFWIfFwSysDataMemDesc) + { + if (psDevInfo->psRGXFWIfFwSysData != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfFwSysDataMemDesc); + psDevInfo->psRGXFWIfFwSysData = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfFwSysDataMemDesc); + psDevInfo->psRGXFWIfFwSysDataMemDesc = NULL; + } + +#if defined(SUPPORT_TBI_INTERFACE) + if (psDevInfo->psRGXFWIfTBIBufferMemDesc) + { + RGXTBIBufferDeinit(psDevInfo); + } +#endif + +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + if (psDevInfo->psRGXFWIfRegCfgMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfRegCfgMemDesc); + psDevInfo->psRGXFWIfRegCfgMemDesc = NULL; + } +#endif + if (psDevInfo->psRGXFWIfHWPerfCountersMemDesc) + { + RGXUnsetFirmwareAddress(psDevInfo->psRGXFWIfHWPerfCountersMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc); + psDevInfo->psRGXFWIfHWPerfCountersMemDesc = NULL; + } + +#if defined(SUPPORT_SECURITY_VALIDATION) + if (psDevInfo->psRGXFWIfNonSecureBufMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfNonSecureBufMemDesc); + psDevInfo->psRGXFWIfNonSecureBufMemDesc = NULL; + } + + if (psDevInfo->psRGXFWIfSecureBufMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfSecureBufMemDesc); + psDevInfo->psRGXFWIfSecureBufMemDesc = NULL; + } +#endif + + /* Free the SLC3 fence object */ + _FreeSLC3Fence(psDevInfo); + +#if defined(SUPPORT_PDVFS) + if (psDevInfo->psRGXFWIFCoreClkRateMemDesc) + { + if (psDevInfo->pui32RGXFWIFCoreClkRate != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIFCoreClkRateMemDesc); + psDevInfo->pui32RGXFWIFCoreClkRate = NULL; + } + + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIFCoreClkRateMemDesc); + psDevInfo->psRGXFWIFCoreClkRateMemDesc = NULL; + } +#endif +} + +/*! +******************************************************************************* + @Function RGXFreeFwOsData + + @Description Frees all os-specific firmware related data + + @Input psDevInfo +******************************************************************************/ +static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFreeCCBReturnSlots(psDevInfo, + &psDevInfo->pui32KernelCCBRtnSlots, + &psDevInfo->psKernelCCBRtnSlotsMemDesc); + RGXFreeCCB(psDevInfo, + &psDevInfo->psKernelCCBCtl, + &psDevInfo->psKernelCCBCtlMemDesc, + &psDevInfo->psKernelCCB, + &psDevInfo->psKernelCCBMemDesc); + + RGXFreeCCB(psDevInfo, + &psDevInfo->psFirmwareCCBCtl, + &psDevInfo->psFirmwareCCBCtlMemDesc, + &psDevInfo->psFirmwareCCB, + &psDevInfo->psFirmwareCCBMemDesc); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFreeCCB(psDevInfo, + &psDevInfo->psWorkEstFirmwareCCBCtl, + &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, + &psDevInfo->psWorkEstFirmwareCCB, + &psDevInfo->psWorkEstFirmwareCCBMemDesc); +#endif + + if (psDevInfo->psPowSyncPrim != NULL) + { + SyncPrimFree(psDevInfo->psPowSyncPrim); + psDevInfo->psPowSyncPrim = NULL; + } + + if (psDevInfo->hSyncPrimContext != (IMG_HANDLE) NULL) + { + SyncPrimContextDestroy(psDevInfo->hSyncPrimContext); + psDevInfo->hSyncPrimContext = (IMG_HANDLE) NULL; + } + + if (psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc) + { + if (psDevInfo->psRGXFWIfHWRInfoBufCtl != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc); + psDevInfo->psRGXFWIfHWRInfoBufCtl = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc); + psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc = NULL; + } + + if (psDevInfo->psRGXFWIfFwOsDataMemDesc) + { + if (psDevInfo->psRGXFWIfFwOsData != NULL) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfFwOsDataMemDesc); + psDevInfo->psRGXFWIfFwOsData = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfFwOsDataMemDesc); + psDevInfo->psRGXFWIfFwOsDataMemDesc = NULL; + } + + if (psDevInfo->psCompletedMemDesc) + { + if (psDevInfo->pui32CompletedById) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psCompletedMemDesc); + psDevInfo->pui32CompletedById = NULL; + } + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psCompletedMemDesc); + psDevInfo->psCompletedMemDesc = NULL; + } + if (psDevInfo->psEndTimeMemDesc) + { + if (psDevInfo->pui64EndTimeById) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psEndTimeMemDesc); + psDevInfo->pui64EndTimeById = NULL; + } + + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psEndTimeMemDesc); + psDevInfo->psEndTimeMemDesc = NULL; + } + if (psDevInfo->psStartTimeMemDesc) + { + if (psDevInfo->pui64StartTimeById) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psStartTimeMemDesc); + psDevInfo->pui64StartTimeById = NULL; + } + + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psStartTimeMemDesc); + psDevInfo->psStartTimeMemDesc = NULL; + } +#if !defined(PVRSRV_USE_BRIDGE_LOCK) + if (psDevInfo->hTimerQueryLock) + { + OSLockDestroy(psDevInfo->hTimerQueryLock); + psDevInfo->hTimerQueryLock = NULL; + } +#endif +} + +/*! +******************************************************************************* + @Function RGXFreeFirmware + + @Description Frees all the firmware-related allocations + + @Input psDevInfo +******************************************************************************/ +void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFreeFwOsData(psDevInfo); + + if (psDevInfo->psRGXFWIfConnectionCtl) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfConnectionCtlMemDesc); + psDevInfo->psRGXFWIfConnectionCtl = NULL; + } + + if (psDevInfo->psRGXFWIfConnectionCtlMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfConnectionCtlMemDesc); + psDevInfo->psRGXFWIfConnectionCtlMemDesc = NULL; + } + + if (psDevInfo->psRGXFWIfOsInit) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfOsInitMemDesc); + psDevInfo->psRGXFWIfOsInit = NULL; + } + + if (psDevInfo->psRGXFWIfOsInitMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfOsInitMemDesc); + psDevInfo->psRGXFWIfOsInitMemDesc = NULL; + } + + RGXFreeFwSysData(psDevInfo); + if (psDevInfo->psRGXFWIfSysInit) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfSysInitMemDesc); + psDevInfo->psRGXFWIfSysInit = NULL; + } + + if (psDevInfo->psRGXFWIfSysInitMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfSysInitMemDesc); + psDevInfo->psRGXFWIfSysInitMemDesc = NULL; + } +} + +/****************************************************************************** + FUNCTION : RGXAcquireKernelCCBSlot + + PURPOSE : Attempts to obtain a slot in the Kernel CCB + + PARAMETERS : psCCB - the CCB + : Address of space if available, NULL otherwise + + RETURNS : PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXAcquireKernelCCBSlot(DEVMEM_MEMDESC *psKCCBCtrlMemDesc, + RGXFWIF_CCB_CTL *psKCCBCtl, + IMG_UINT32 *pui32Offset) +{ + IMG_UINT32 ui32OldWriteOffset, ui32NextWriteOffset; + + ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; + ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask; + + /* + * KCCB size is determined by the log2size as the MSB rounded up to the (next power of 2) -1 + * The MTS can queue up to (next power of 2) -1 kicks + * i.e. RGXFWIF_KCCB_NUMCMDS_LOG2=7, 2^7 = 128, next power of 2= 256, Max Kicks = 255 + * (254 pending kicks and 1 executing kick), + * hence the kernel CCB should not queue more than (1<<(RGXFWIF_KCCB_NUMCMDS_LOG2+1))-1 commands. + */ + PVR_ASSERT(psKCCBCtl->ui32WrapMask < ((1<<(RGXFWIF_KCCB_NUMCMDS_LOG2+1))-1)); + +#if defined(PDUMP) + /* Wait for sufficient CCB space to become available */ + PDUMPCOMMENTWITHFLAGS(0, "Wait for kCCB woff=%u", ui32NextWriteOffset); + DevmemPDumpCBP(psKCCBCtrlMemDesc, + offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset), + ui32NextWriteOffset, + 1, + (psKCCBCtl->ui32WrapMask + 1)); +#endif + + if (ui32NextWriteOffset == psKCCBCtl->ui32ReadOffset) + { + return PVRSRV_ERROR_KERNEL_CCB_FULL; + } + *pui32Offset = ui32NextWriteOffset; + return PVRSRV_OK; +} + +/****************************************************************************** + FUNCTION : RGXPollKernelCCBSlot + + PURPOSE : Poll for space in Kernel CCB + + PARAMETERS : psCCB - the CCB + : Address of space if available, NULL otherwise + + RETURNS : PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXPollKernelCCBSlot(DEVMEM_MEMDESC *psKCCBCtrlMemDesc, + RGXFWIF_CCB_CTL *psKCCBCtl) +{ + IMG_UINT32 ui32OldWriteOffset, ui32NextWriteOffset; + + ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; + ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask; + + /* + * KCCB size is determined by the log2size as the MSB rounded up to the (next power of 2) -1 + * The MTS can queue up to (next power of 2) -1 kicks + * i.e. RGXFWIF_KCCB_NUMCMDS_LOG2=7, 2^7 = 128, next power of 2= 256, Max Kicks = 255 + * (254 pending kicks and 1 executing kick), + * hence the kernel CCB should not queue more than (1<<(RGXFWIF_KCCB_NUMCMDS_LOG2+1))-1 commands. + */ + PVR_ASSERT(psKCCBCtl->ui32WrapMask < ((1<<(RGXFWIF_KCCB_NUMCMDS_LOG2+1))-1)); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + + if (ui32NextWriteOffset != psKCCBCtl->ui32ReadOffset) + { + return PVRSRV_OK; + } + + /* + * The following check doesn't impact performance, since the + * CPU has to wait for the GPU anyway (full kernel CCB). + */ + if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + return PVRSRV_ERROR_KERNEL_CCB_FULL; + } + + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + return PVRSRV_ERROR_KERNEL_CCB_FULL; +} + +/****************************************************************************** + FUNCTION : RGXGetCmdMemCopySize + + PURPOSE : Calculates actual size of KCCB command getting used + + PARAMETERS : eCmdType Type of KCCB command + + RETURNS : Returns actual size of KCCB command on success else zero +******************************************************************************/ +static IMG_UINT32 RGXGetCmdMemCopySize(RGXFWIF_KCCB_CMD_TYPE eCmdType) +{ + /* First get offset of uCmdData inside the struct RGXFWIF_KCCB_CMD + * This will account alignment requirement of uCmdData union + * + * Then add command-data size depending on command type to calculate actual + * command size required to do mem copy + * + * NOTE: Make sure that uCmdData is the last member of RGXFWIF_KCCB_CMD struct. + */ + switch (eCmdType) + { + case RGXFWIF_KCCB_CMD_KICK: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_KICK_DATA); + } + case RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA); + } + case RGXFWIF_KCCB_CMD_MMUCACHE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_MMUCACHEDATA); + } +#if defined(SUPPORT_USC_BREAKPOINT) + case RGXFWIF_KCCB_CMD_BP: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_BPDATA); + } +#endif + case RGXFWIF_KCCB_CMD_SLCFLUSHINVAL: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_SLCFLUSHINVALDATA); + } + case RGXFWIF_KCCB_CMD_CLEANUP: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CLEANUP_REQUEST); + } + case RGXFWIF_KCCB_CMD_POW: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_POWER_REQUEST); + } + case RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE: + case RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_ZSBUFFER_BACKING_DATA); + } + case RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_FREELIST_GS_DATA); + } + case RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_FREELISTS_RECONSTRUCTION_DATA); + } + case RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_SIGNAL_UPDATE_DATA); + } + case RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_WRITE_OFFSET_UPDATE_DATA); + } + case RGXFWIF_KCCB_CMD_FORCE_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA); + } +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + case RGXFWIF_KCCB_CMD_REGCONFIG: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_REGCONFIG_DATA); + } +#endif +#if defined(SUPPORT_PDVFS) + case RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_PDVFS_MAX_FREQ_DATA); + } +#endif + case RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_OS_STATE_CHANGE_DATA); + } + case RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL); + } + case RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS); + } + case RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL_BLKS); + } + case RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CORECLKSPEEDCHANGE_DATA); + } + case RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_DEV_VIRTADDR); + } +#if defined(SUPPORT_VALIDATION) + case RGXFWIF_KCCB_CMD_RGXREG: + { + return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_RGXREG_DATA); + } +#endif + case RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE: + case RGXFWIF_KCCB_CMD_WDG_CFG: + case RGXFWIF_KCCB_CMD_HEALTH_CHECK: + case RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL: + case RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT: + { + /* No command specific data */ + return offsetof(RGXFWIF_KCCB_CMD, uCmdData); + } + default: + { + /* Invalid (OR) Unused (OR) Newly added command type */ + return 0; /* Error */ + } + } +} + +PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32SlotNum, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + + eError = PVRSRVWaitForValueKM( + (IMG_UINT32 __iomem *)&psDevInfo->pui32KernelCCBRtnSlots[ui32SlotNum], + RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, + RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVWaitForValueKM"); + +#if defined(PDUMP) + /* PDumping conditions same as RGXSendCommandRaw for the actual command and poll command to go in harmony */ + if (PDumpIsContCaptureOn()) + { + IMG_BOOL bIsInCaptureRange; + + PDumpIsCaptureFrameKM(&bIsInCaptureRange); + + if ((bIsInCaptureRange || PDUMP_IS_CONTINUOUS(ui32PDumpFlags)) && !PDUMPPOWCMDINTRANS()) + { + PDUMPCOMMENT("Poll on KCCB slot %u for value %u (mask: 0x%x)", ui32SlotNum, + RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED); + + eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBRtnSlotsMemDesc, + ui32SlotNum * sizeof(IMG_UINT32), + RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, + RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32"); + } + } +#else + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); +#endif + + return eError; +} + +static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 uiPdumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; + IMG_UINT8 *pui8KCCB = psDevInfo->psKernelCCB; + IMG_UINT32 ui32NewWriteOffset; + IMG_UINT32 ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; + IMG_UINT32 ui32CmdMemCopySize; + +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(uiPdumpFlags); +#else + IMG_BOOL bPdumpEnabled = IMG_FALSE; + IMG_BOOL bPDumpPowTrans = PDUMPPOWCMDINTRANS(); + IMG_BOOL bContCaptureOn = PDumpIsContCaptureOn(); /* client connected or in pdump init phase */ + + if (bContCaptureOn) + { + IMG_BOOL bIsInCaptureRange; + + PDumpIsCaptureFrameKM(&bIsInCaptureRange); + bPdumpEnabled = (bIsInCaptureRange || PDUMP_IS_CONTINUOUS(uiPdumpFlags)) && !bPDumpPowTrans; + + /* in capture range */ + if (!PVRSRV_VZ_MODE_IS(GUEST) && bPdumpEnabled) + { + if (!psDevInfo->bDumpedKCCBCtlAlready) + { + /* entering capture range */ + psDevInfo->bDumpedKCCBCtlAlready = IMG_TRUE; + + /* Wait for the live FW to catch up */ + PVR_DPF((PVR_DBG_MESSAGE, "%s: waiting on fw to catch-up, roff: %d, woff: %d", + __func__, + psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset)); + PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)&psKCCBCtl->ui32ReadOffset, + ui32OldWriteOffset, 0xFFFFFFFF, + POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP); + + /* Dump Init state of Kernel CCB control (read and write offset) */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Initial state of kernel CCB Control, roff: %d, woff: %d", + psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset); + + DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc, + 0, + sizeof(RGXFWIF_CCB_CTL), + PDUMP_FLAGS_CONTINUOUS); + } + } + } +#endif + +#if defined(SUPPORT_AUTOVZ) + if (!((KM_FW_CONNECTION_IS(READY, psDevInfo) && KM_OS_CONNECTION_IS(READY, psDevInfo)) || + (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)))) + { + PVR_DPF((PVR_DBG_ERROR, "%s: The firmware-driver connection is invalid:" + "driver state = %u / firmware state = %u;" + "expected READY (%u/%u) or ACTIVE (%u/%u);", + __func__, KM_GET_OS_CONNECTION(psDevInfo), KM_GET_FW_CONNECTION(psDevInfo), + RGXFW_CONNECTION_OS_READY, RGXFW_CONNECTION_FW_READY, + RGXFW_CONNECTION_OS_ACTIVE, RGXFW_CONNECTION_FW_ACTIVE)); + eError = PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE; + goto _RGXSendCommandRaw_Exit; + } +#endif + + PVR_ASSERT(sizeof(RGXFWIF_KCCB_CMD) == psKCCBCtl->ui32CmdSize); + if (!OSLockIsLocked(psDeviceNode->hPowerLock)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s called without power lock held!", + __func__)); + PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock)); + } + + /* Acquire a slot in the CCB */ + eError = RGXAcquireKernelCCBSlot(psDevInfo->psKernelCCBCtlMemDesc, psKCCBCtl, &ui32NewWriteOffset); + if (eError != PVRSRV_OK) + { + goto _RGXSendCommandRaw_Exit; + } + + /* Calculate actual size of command to optimize device mem copy */ + ui32CmdMemCopySize = RGXGetCmdMemCopySize(psKCCBCmd->eCmdType); + PVR_LOG_RETURN_IF_FALSE(ui32CmdMemCopySize !=0, "RGXGetCmdMemCopySize failed", PVRSRV_ERROR_INVALID_CCB_COMMAND); + + /* Copy the command into the CCB */ + OSCachedMemCopy(&pui8KCCB[ui32OldWriteOffset * psKCCBCtl->ui32CmdSize], + psKCCBCmd, ui32CmdMemCopySize); + OSWriteMemoryBarrier(); + + /* If non-NULL pui32CmdKCCBSlot passed-in, return the kCCB slot in which the command was enqueued */ + if (pui32CmdKCCBSlot) + { + *pui32CmdKCCBSlot = ui32OldWriteOffset; + + /* Each such command enqueue needs to reset the slot value first. This is so that a caller + * doesn't get to see stale/false value in allotted slot */ + psDevInfo->pui32KernelCCBRtnSlots[ui32OldWriteOffset] = RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE; +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "Reset kCCB slot number %u", ui32OldWriteOffset); + DevmemPDumpLoadMem(psDevInfo->psKernelCCBRtnSlotsMemDesc, + ui32OldWriteOffset * sizeof(IMG_UINT32), + sizeof(IMG_UINT32), + uiPdumpFlags); +#endif + PVR_DPF((PVR_DBG_MESSAGE, "%s: Device (%p) KCCB slot %u reset with value %u for command type %x", + __func__, psDevInfo, ui32OldWriteOffset, RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE, psKCCBCmd->eCmdType)); + } + + /* ensure kCCB data is written before the offsets */ + OSWriteMemoryBarrier(); + + /* Move past the current command */ + psKCCBCtl->ui32WriteOffset = ui32NewWriteOffset; + /* Force a read-back to memory to avoid posted writes on certain buses */ + (void) psKCCBCtl->ui32WriteOffset; + + +#if defined(PDUMP) + if (bContCaptureOn) + { + /* in capture range */ + if (bPdumpEnabled) + { + /* Dump new Kernel CCB content */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump kCCB cmd woff = %d", + ui32OldWriteOffset); + DevmemPDumpLoadMem(psDevInfo->psKernelCCBMemDesc, + ui32OldWriteOffset * psKCCBCtl->ui32CmdSize, + ui32CmdMemCopySize, + PDUMP_FLAGS_CONTINUOUS); + + /* Dump new kernel CCB write offset */ + PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "Dump kCCBCtl woff: %d", + ui32NewWriteOffset); + DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc, + offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset), + sizeof(IMG_UINT32), + uiPdumpFlags); + + /* mimic the read-back of the write from above */ + DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc, + offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset), + ui32NewWriteOffset, + 0xFFFFFFFF, + PDUMP_POLL_OPERATOR_EQUAL, + uiPdumpFlags); + + } + /* out of capture range */ + else + { + eError = RGXPdumpDrainKCCB(psDevInfo, ui32OldWriteOffset); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXPdumpDrainKCCB", _RGXSendCommandRaw_Exit); + } + } +#endif + + + PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "MTS kick for kernel CCB"); + /* + * Kick the MTS to schedule the firmware. + */ + __MTSScheduleWrite(psDevInfo, RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK); + + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_MTS_SCHEDULE, RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK, uiPdumpFlags); + +#if defined(SUPPORT_AUTOVZ) + RGXUpdateAutoVzWdgToken(psDevInfo); +#endif + +#if defined(NO_HARDWARE) + /* keep the roff updated because fw isn't there to update it */ + psKCCBCtl->ui32ReadOffset = psKCCBCtl->ui32WriteOffset; +#endif + +_RGXSendCommandRaw_Exit: + return eError; +} + +/****************************************************************************** + FUNCTION : _AllocDeferredCommand + + PURPOSE : Allocate a KCCB command and add it to KCCB deferred list + + PARAMETERS : psDevInfo RGX device info + : eKCCBType Firmware Command type + : psKCCBCmd Firmware Command + : uiPdumpFlags Pdump flags + + RETURNS : PVRSRV_OK If all went good, PVRSRV_ERROR_RETRY otherwise. +******************************************************************************/ +static PVRSRV_ERROR _AllocDeferredCommand(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 uiPdumpFlags) +{ + RGX_DEFERRED_KCCB_CMD *psDeferredCommand; + OS_SPINLOCK_FLAGS uiFlags; + + psDeferredCommand = OSAllocMem(sizeof(*psDeferredCommand)); + + if (!psDeferredCommand) + { + PVR_DPF((PVR_DBG_ERROR, + "Deferring a KCCB command failed: allocation failure: requesting retry")); + return PVRSRV_ERROR_RETRY; + } + + psDeferredCommand->sKCCBcmd = *psKCCBCmd; + psDeferredCommand->uiPdumpFlags = uiPdumpFlags; + psDeferredCommand->psDevInfo = psDevInfo; + + OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + dllist_add_to_tail(&(psDevInfo->sKCCBDeferredCommandsListHead), &(psDeferredCommand->sListNode)); + psDevInfo->ui32KCCBDeferredCommandsCount++; + OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + + return PVRSRV_OK; +} + +/****************************************************************************** + FUNCTION : _FreeDeferredCommand + + PURPOSE : Remove from the deferred list the sent deferred KCCB command + + PARAMETERS : psNode Node in deferred list + : psDeferredKCCBCmd KCCB Command to free + + RETURNS : None +******************************************************************************/ +static void _FreeDeferredCommand(DLLIST_NODE *psNode, RGX_DEFERRED_KCCB_CMD *psDeferredKCCBCmd) +{ + dllist_remove_node(psNode); + psDeferredKCCBCmd->psDevInfo->ui32KCCBDeferredCommandsCount--; + OSFreeMem(psDeferredKCCBCmd); +} + +/****************************************************************************** + FUNCTION : RGXSendCommandsFromDeferredList + + PURPOSE : Try send KCCB commands in deferred list to KCCB + Should be called by holding PowerLock + + PARAMETERS : psDevInfo RGX device info + : bPoll Poll for space in KCCB + + RETURNS : PVRSRV_OK If all commands in deferred list are sent to KCCB, + PVRSRV_ERROR_KERNEL_CCB_FULL otherwise. +******************************************************************************/ +PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + DLLIST_NODE *psNode, *psNext; + RGX_DEFERRED_KCCB_CMD *psTempDeferredKCCBCmd; + DLLIST_NODE sCommandList; + OS_SPINLOCK_FLAGS uiFlags; + + PVR_ASSERT(PVRSRVPwrLockIsLockedByMe(psDevInfo->psDeviceNode)); + + /* !!! Important !!! + * + * The idea of moving the whole list hLockKCCBDeferredCommandsList below + * to the temporary list is only valid under the principle that all of the + * operations are also protected by the power lock. It must be held + * so that the order of the commands doesn't get messed up while we're + * performing the operations on the local list. + * + * The necessity of releasing the hLockKCCBDeferredCommandsList comes from + * the fact that _FreeDeferredCommand() is allocating memory and it can't + * be done in atomic context (inside section protected by a spin lock). + * + * We're using spin lock here instead of mutex to quickly perform a check + * if the list is empty in MISR without a risk that the MISR is going + * to sleep due to a lock. + */ + + /* move the whole list to a local list so it can be processed without lock */ + OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + dllist_replace_head(&psDevInfo->sKCCBDeferredCommandsListHead, &sCommandList); + OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + if (dllist_is_empty(&sCommandList)) + { + return PVRSRV_OK; + } + + /* For every deferred KCCB command, try to send it*/ + dllist_foreach_node(&sCommandList, psNode, psNext) + { + psTempDeferredKCCBCmd = IMG_CONTAINER_OF(psNode, RGX_DEFERRED_KCCB_CMD, sListNode); + eError = RGXSendCommandRaw(psTempDeferredKCCBCmd->psDevInfo, + &psTempDeferredKCCBCmd->sKCCBcmd, + psTempDeferredKCCBCmd->uiPdumpFlags, + NULL /* We surely aren't interested in kCCB slot number of deferred command */); + if (eError != PVRSRV_OK) + { + if (!bPoll) + { + eError = PVRSRV_ERROR_KERNEL_CCB_FULL; + goto cleanup_; + } + break; + } + + _FreeDeferredCommand(psNode, psTempDeferredKCCBCmd); + } + + if (bPoll) + { + PVRSRV_ERROR eErrPollForKCCBSlot; + + /* Don't overwrite eError because if RGXPollKernelCCBSlot returns OK and the + * outer loop times-out, we'll still want to return KCCB_FULL to caller + */ + eErrPollForKCCBSlot = RGXPollKernelCCBSlot(psDevInfo->psKernelCCBCtlMemDesc, + psDevInfo->psKernelCCBCtl); + if (eErrPollForKCCBSlot == PVRSRV_ERROR_KERNEL_CCB_FULL) + { + eError = PVRSRV_ERROR_KERNEL_CCB_FULL; + goto cleanup_; + } + } + } END_LOOP_UNTIL_TIMEOUT(); + +cleanup_: + /* if the local list is not empty put it back to the deferred list head + * so that the old order of commands is retained */ + OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + dllist_insert_list_at_head(&psDevInfo->sKCCBDeferredCommandsListHead, &sCommandList); + OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + + return eError; +} + +PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 uiPdumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot) +{ + IMG_BOOL bPoll = (pui32CmdKCCBSlot != NULL); + PVRSRV_ERROR eError; + + /* + * First try to Flush all the cmds in deferred list. + * + * We cannot defer an incoming command if the caller is interested in + * knowing the command's kCCB slot: it plans to poll/wait for a + * response from the FW just after the command is enqueued, so we must + * poll for space to be available. + */ + eError = RGXSendCommandsFromDeferredList(psDevInfo, bPoll); + if (eError == PVRSRV_OK) + { + eError = RGXSendCommandRaw(psDevInfo, + psKCCBCmd, + uiPdumpFlags, + pui32CmdKCCBSlot); + } + + /* + * If we don't manage to enqueue one of the deferred commands or the command + * passed as argument because the KCCB is full, insert the latter into the deferred commands list. + * The deferred commands will also be flushed eventually by: + * - one more KCCB command sent for any DM + * - RGX_MISRHandler_CheckFWActivePowerState + */ + if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL) + { + if (pui32CmdKCCBSlot == NULL) + { + eError = _AllocDeferredCommand(psDevInfo, psKCCBCmd, uiPdumpFlags); + } + else + { + /* Let the caller retry. Otherwise if we deferred the command and returned OK, + * the caller can end up looking in a stale CCB slot. + */ + PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't flush the deferred queue for a command (Type:%d) " + "that needed the kCCB command slot number! Returning kCCB FULL", + __func__, psKCCBCmd->eCmdType)); + } + } + + return eError; +} + +PVRSRV_ERROR RGXSendCommandWithPowLockAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + + /* Ensure Rogue is powered up before kicking MTS */ + eError = PVRSRVPowerLock(psDeviceNode); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: failed to acquire powerlock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + + goto _PVRSRVPowerLock_Exit; + } + + PDUMPPOWCMDSTART(); + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON, + IMG_FALSE, IMG_FALSE); + PDUMPPOWCMDEND(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition Rogue to ON (%s)", + __func__, + PVRSRVGetErrorString(eError))); + + goto _PVRSRVSetDevicePowerStateKM_Exit; + } + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + psKCCBCmd, + ui32PDumpFlags, + pui32CmdKCCBSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to schedule command (%s)", + __func__, + PVRSRVGetErrorString(eError))); +#if defined(DEBUG) + /* PVRSRVDebugRequest must be called without powerlock */ + PVRSRVPowerUnlock(psDeviceNode); + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + goto _PVRSRVPowerLock_Exit; +#endif + } + +_PVRSRVSetDevicePowerStateKM_Exit: + PVRSRVPowerUnlock(psDeviceNode); + +_PVRSRVPowerLock_Exit: + return eError; +} + +void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hCmdCompHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + OSScheduleMISR(psDevInfo->hProcessQueuesMISR); +} + +#if defined(SUPPORT_VALIDATION) +PVRSRV_ERROR RGXScheduleRgxRegCommand(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT64 ui64RegVal, + IMG_UINT64 ui64Size, + IMG_UINT32 ui32Offset, + IMG_BOOL bWriteOp) +{ + RGXFWIF_KCCB_CMD sRgxRegsCmd = {0}; + IMG_UINT32 ui32kCCBCommandSlot; + PVRSRV_ERROR eError; + + sRgxRegsCmd.eCmdType = RGXFWIF_KCCB_CMD_RGXREG; + sRgxRegsCmd.uCmdData.sFwRgxData.ui64RegVal = ui64RegVal; + sRgxRegsCmd.uCmdData.sFwRgxData.ui32RegWidth = ui64Size; + sRgxRegsCmd.uCmdData.sFwRgxData.ui32RegAddr = ui32Offset; + sRgxRegsCmd.uCmdData.sFwRgxData.bWriteOp = bWriteOp; + + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + RGXFWIF_DM_GP, + &sRgxRegsCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot"); + + if (bWriteOp) + { + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, + ui32kCCBCommandSlot, + PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); + } + + return eError; +} +#endif + +/*! +******************************************************************************* + + @Function RGX_MISRHandler_ScheduleProcessQueues + + @Description - Sends uncounted kick to all the DMs (the FW will process all + the queue for all the DMs) +******************************************************************************/ +static void RGX_MISRHandler_ScheduleProcessQueues(void *pvData) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = pvData; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + PVRSRV_DEV_POWER_STATE ePowerState; + + eError = PVRSRVPowerLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + return; + } + + /* Check whether it's worth waking up the GPU */ + eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); + + if (!PVRSRV_VZ_MODE_IS(GUEST) && + (eError == PVRSRV_OK) && (ePowerState == PVRSRV_DEV_POWER_STATE_OFF)) + { + /* For now, guest drivers will always wake-up the GPU */ + RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; + IMG_BOOL bGPUHasWorkWaiting; + + bGPUHasWorkWaiting = + (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED); + + if (!bGPUHasWorkWaiting) + { + /* all queues are empty, don't wake up the GPU */ + PVRSRVPowerUnlock(psDeviceNode); + return; + } + } + + PDUMPPOWCMDSTART(); + /* wake up the GPU */ + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON, + IMG_FALSE, IMG_FALSE); + PDUMPPOWCMDEND(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition Rogue to ON (%s)", + __func__, PVRSRVGetErrorString(eError))); + + PVRSRVPowerUnlock(psDeviceNode); + return; + } + + /* uncounted kick to the FW */ + HTBLOGK(HTB_SF_MAIN_KICK_UNCOUNTED); + __MTSScheduleWrite(psDevInfo, (RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED); + + PVRSRVPowerUnlock(psDeviceNode); +} + +PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode) +{ + return OSInstallMISR(phMISR, + RGX_MISRHandler_ScheduleProcessQueues, + psDeviceNode, + "RGX_ScheduleProcessQueues"); +} + +PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_DM eKCCBType, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 ui32CacheOpFence, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot) +{ + PVRSRV_ERROR eError; + IMG_UINT32 uiMMUSyncUpdate; +#if defined(SUPPORT_VALIDATION) + static IMG_UINT32 ui32PowDomainFrameCounter; +#endif + + /* Don't send the command/power up request if the device is de-initialising. + * The de-init thread could destroy the device whilst the power up + * sequence below is accessing the HW registers. + */ + if (unlikely((psDevInfo == NULL) || + (psDevInfo->psDeviceNode == NULL) || + (psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT))) + { + return PVRSRV_ERROR_INVALID_DEVICE; + } + + eError = CacheOpFence(eKCCBType, ui32CacheOpFence); + if (unlikely(eError != PVRSRV_OK)) goto RGXScheduleCommand_exit; + + /* PVRSRVPowerLock guarantees atomicity between commands. This is helpful + in a scenario with several applications allocating resources. */ + eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + + /* If system is found powered OFF, Retry scheduling the command */ + if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) + { + eError = PVRSRV_ERROR_RETRY; + } + + goto RGXScheduleCommand_exit; + } + + if (unlikely(psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT)) + { + /* If we have the power lock the device is valid but the deinit + * thread could be waiting for the lock. */ + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); + return PVRSRV_ERROR_INVALID_DEVICE; + } + + /* Ensure device is powered up before sending any commands */ + PDUMPPOWCMDSTART(); + eError = PVRSRVSetDevicePowerStateKM(psDevInfo->psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON, + IMG_FALSE, IMG_FALSE); + PDUMPPOWCMDEND(); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition RGX to ON (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto _PVRSRVSetDevicePowerStateKM_Exit; + } + + eError = RGXPreKickCacheCommand(psDevInfo, eKCCBType, &uiMMUSyncUpdate); + if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit; + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, pui32CmdKCCBSlot); + if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit; + +_PVRSRVSetDevicePowerStateKM_Exit: + PVRSRVPowerUnlock(psDevInfo->psDeviceNode); + +#if defined(SUPPORT_VALIDATION) + /** + * For validation, force the core to different powered units between + * DM kicks. PVRSRVDeviceGPUUnitsPowerChange acquires the power lock, hence + * ensure that this is done after the power lock is released. + */ + if ((eError == PVRSRV_OK) && (eKCCBType != RGXFWIF_DM_GP)) + { + IMG_BOOL bInsertPowerDomainTransition = + (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN); + if (psDevInfo->ui32PowDomainKickInterval > 0) + { + if (eKCCBType == RGXFWIF_DM_3D) + { + /* Insert a power domain transition every N '3D' frames */ + ui32PowDomainFrameCounter++; + if ((ui32PowDomainFrameCounter % psDevInfo->ui32PowDomainKickInterval) == 0) + { + bInsertPowerDomainTransition = IMG_TRUE; + } + } + } + + if (bInsertPowerDomainTransition) + { + IMG_UINT32 ui32PowerDomainState; + IMG_BOOL bIsValid; + do { + ui32PowerDomainState = RGXPowerDomainGetNextState(&psDevInfo->sPowerDomainState); + bIsValid = ui32PowerDomainState && + ((ui32PowerDomainState & ~(psDevInfo->ui32AvailablePowUnitsMask)) == 0); + } while (!bIsValid); + + eError = PVRSRVDeviceGPUUnitsPowerChange(psDevInfo->psDeviceNode, ui32PowerDomainState); + if (eError != PVRSRV_OK) + goto RGXScheduleCommand_exit; + } + } +#endif + +RGXScheduleCommand_exit: + return eError; +} + +/* + * RGXCheckFirmwareCCB + */ +void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_FWCCB_CMD *psFwCCBCmd; + + RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl; + IMG_UINT8 *psFWCCB = psDevInfo->psFirmwareCCB; + + while (psFWCCBCtl->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset) + { + /* Point to the next command */ + psFwCCBCmd = ((RGXFWIF_FWCCB_CMD *)psFWCCB) + psFWCCBCtl->ui32ReadOffset; + + HTBLOGK(HTB_SF_MAIN_FWCCB_CMD, psFwCCBCmd->eCmdType); + switch (psFwCCBCmd->eCmdType) + { + case RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING: + { + if (psDevInfo->bPDPEnabled) + { + PDUMP_PANIC(ZSBUFFER_BACKING, "Request to add backing to ZSBuffer"); + } + RGXProcessRequestZSBufferBacking(psDevInfo, + psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID); + break; + } + + case RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING: + { + if (psDevInfo->bPDPEnabled) + { + PDUMP_PANIC(ZSBUFFER_UNBACKING, "Request to remove backing from ZSBuffer"); + } + RGXProcessRequestZSBufferUnbacking(psDevInfo, + psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID); + break; + } + + case RGXFWIF_FWCCB_CMD_FREELIST_GROW: + { + if (psDevInfo->bPDPEnabled) + { + PDUMP_PANIC(FREELIST_GROW, "Request to grow the free list"); + } + RGXProcessRequestGrow(psDevInfo, + psFwCCBCmd->uCmdData.sCmdFreeListGS.ui32FreelistID); + break; + } + + case RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION: + { + if (psDevInfo->bPDPEnabled) + { + PDUMP_PANIC(FREELISTS_RECONSTRUCTION, "Request to reconstruct free lists"); + } + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d) for %d freelists", + __func__, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount)); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d/%d) for %d freelists", + __func__, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1, + psDevInfo->psRGXFWIfHWRInfoBufCtl->ui32HwrCounter+1, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount)); + } + + RGXProcessRequestFreelistsReconstruction(psDevInfo, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount, + psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.aui32FreelistIDs); + break; + } + + case RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION: + { + /* Notify client drivers */ + /* Client notification of device error will be achieved by + * clients calling UM function RGXGetLastDeviceError() */ + psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT; + + /* Notify system layer */ + { + PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; + RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA *psCmdFwPagefault = + &psFwCCBCmd->uCmdData.sCmdFWPagefault; + + if (psDevConfig->pfnSysDevErrorNotify) + { + PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; + + sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT; + sErrorData.uErrData.sFwPFErrData.sFWFaultAddr.uiAddr = psCmdFwPagefault->sFWFaultAddr.uiAddr; + + psDevConfig->pfnSysDevErrorNotify(psDevConfig, + &sErrorData); + } + } + break; + } + + case RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION: + { + DLLIST_NODE *psNode, *psNext; + RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification = + &psFwCCBCmd->uCmdData.sCmdContextResetNotification; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext = NULL; + IMG_UINT32 ui32ErrorPid = 0; + + OSWRLockAcquireRead(psDevInfo->hCommonCtxtListLock); + dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) + { + RGX_SERVER_COMMON_CONTEXT *psThisContext = + IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); + + /* If the notification applies to all contexts update reset info + * for all contexts, otherwise only do so for the appropriate ID. + */ + if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS) + { + /* Notification applies to all contexts */ + psThisContext->eLastResetReason = psCmdContextResetNotification->eResetReason; + psThisContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef; + } + else + { + /* Notification applies to one context only */ + if (psThisContext->ui32ContextID == psCmdContextResetNotification->ui32ServerCommonContextID) + { + psServerCommonContext = psThisContext; + psServerCommonContext->eLastResetReason = psCmdContextResetNotification->eResetReason; + psServerCommonContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef; + ui32ErrorPid = RGXGetPIDFromServerMMUContext(psServerCommonContext->psServerMMUContext); + break; + } + } + } + + if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: All contexts reset (Reason=%d, JobRef=0x%08x)", + __func__, + (IMG_UINT32)(psCmdContextResetNotification->eResetReason), + psCmdContextResetNotification->ui32ResetJobRef)); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Context 0x%p reset (ID=0x%08x, Reason=%d, JobRef=0x%08x)", + __func__, + psServerCommonContext, + psCmdContextResetNotification->ui32ServerCommonContextID, + (IMG_UINT32)(psCmdContextResetNotification->eResetReason), + psCmdContextResetNotification->ui32ResetJobRef)); + } + + /* Increment error counter (if appropriate) */ + if (psCmdContextResetNotification->eResetReason == RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM) + { + /* Avoid wrapping the error count (which would then + * make it appear we had far fewer errors), by limiting + * it to IMG_UINT32_MAX. + */ + if (psDevInfo->sErrorCounts.ui32WGPErrorCount < IMG_UINT32_MAX) + { + psDevInfo->sErrorCounts.ui32WGPErrorCount++; + } + } + else if (psCmdContextResetNotification->eResetReason == RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM) + { + /* Avoid wrapping the error count (which would then + * make it appear we had far fewer errors), by limiting + * it to IMG_UINT32_MAX. + */ + if (psDevInfo->sErrorCounts.ui32TRPErrorCount < IMG_UINT32_MAX) + { + psDevInfo->sErrorCounts.ui32TRPErrorCount++; + } + } + OSWRLockReleaseRead(psDevInfo->hCommonCtxtListLock); + + /* Notify system layer */ + { + PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; + + if (psDevConfig->pfnSysDevErrorNotify) + { + PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; + + sErrorData.eResetReason = psCmdContextResetNotification->eResetReason; + sErrorData.pid = ui32ErrorPid; + + /* Populate error data according to reset reason */ + switch (psCmdContextResetNotification->eResetReason) + { + case RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM: + case RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM: + { + sErrorData.uErrData.sChecksumErrData.ui32ExtJobRef = psCmdContextResetNotification->ui32ResetJobRef; + sErrorData.uErrData.sChecksumErrData.eDM = psCmdContextResetNotification->eDM; + break; + } + default: + { + break; + } + } + + psDevConfig->pfnSysDevErrorNotify(psDevConfig, + &sErrorData); + } + } + + /* Notify if a page fault */ + if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF) + { + DevmemIntPFNotify(psDevInfo->psDeviceNode, + psCmdContextResetNotification->ui64PCAddress, + psCmdContextResetNotification->sFaultAddress); + } + break; + } + + case RGXFWIF_FWCCB_CMD_DEBUG_DUMP: + { + PVRSRV_ERROR eError; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + OSAtomicWrite(&psDevInfo->psDeviceNode->eDebugDumpRequested, PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE); + eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to signal FW Cmd debug dump event, dumping now instead", __func__)); + PVRSRVDebugRequest(psDevInfo->psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + } + break; + } + + case RGXFWIF_FWCCB_CMD_UPDATE_STATS: + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + IMG_PID pidTmp = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.pidOwner; + IMG_INT32 i32AdjustmentValue = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.i32AdjustmentValue; + + switch (psFwCCBCmd->uCmdData.sCmdUpdateStatsData.eElementToUpdate) + { + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS: + { + PVRSRVStatsUpdateRenderContextStats(i32AdjustmentValue,0,0,0,0,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY: + { + PVRSRVStatsUpdateRenderContextStats(0,i32AdjustmentValue,0,0,0,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES: + { + PVRSRVStatsUpdateRenderContextStats(0,0,i32AdjustmentValue,0,0,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES: + { + PVRSRVStatsUpdateRenderContextStats(0,0,0,i32AdjustmentValue,0,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES: + { + PVRSRVStatsUpdateRenderContextStats(0,0,0,0,i32AdjustmentValue,0,pidTmp); + break; + } + case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES: + { + PVRSRVStatsUpdateRenderContextStats(0,0,0,0,0,i32AdjustmentValue,pidTmp); + break; + } + } +#endif + break; + } +#if defined(SUPPORT_PDVFS) + case RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE: + { + PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(psDevInfo, + psFwCCBCmd->uCmdData.sCmdCoreClkRateChange.ui32CoreClkRate); + break; + } +#endif + case RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART: + { + if (psDevInfo->psRGXFWIfFwSysData != NULL && + psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_OFF) + { + PVRSRV_ERROR eError; + + /* Power down... */ + eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode, + PVRSRV_SYS_POWER_STATE_OFF, + IMG_FALSE); + if (eError == PVRSRV_OK) + { + /* Clear the FW faulted flags... */ + psDevInfo->psRGXFWIfFwSysData->ui32HWRStateFlags &= ~(RGXFWIF_HWR_FW_FAULT|RGXFWIF_HWR_RESTART_REQUESTED); + + /* Power back up again... */ + eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode, + PVRSRV_SYS_POWER_STATE_ON, + IMG_FALSE); + + /* Send a dummy KCCB command to ensure the FW wakes up and checks the queues... */ + if (eError == PVRSRV_OK) + { + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXFWHealthCheckCmd(psDevInfo); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + } + } + + /* Notify client drivers and system layer of FW fault */ + { + PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; + + /* Client notification of device error will be achieved by + * clients calling UM function RGXGetLastDeviceError() */ + psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR; + + /* Notify system layer */ + if (psDevConfig->pfnSysDevErrorNotify) + { + PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; + + sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR; + psDevConfig->pfnSysDevErrorNotify(psDevConfig, + &sErrorData); + } + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed firmware restart (%s)", + __func__, PVRSRVGetErrorString(eError))); + } + } + break; + } +#if defined(SUPPORT_VALIDATION) + case RGXFWIF_FWCCB_CMD_REG_READ: + { + psDevInfo->sFwRegs.ui64RegVal = psFwCCBCmd->uCmdData.sCmdRgxRegReadData.ui64RegValue; + complete(&psDevInfo->sFwRegs.sRegComp); + break; + } +#if defined(SUPPORT_SOC_TIMER) + case RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS: + { + if (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) + { + PVRSRV_ERROR eSOCtimerErr = PVRSRVValidateSOCUSCTimer(psDevInfo, + PDUMP_NONE, + psFwCCBCmd->uCmdData.sCmdTimers.ui64timerGray, + psFwCCBCmd->uCmdData.sCmdTimers.ui64timerBinary, + psFwCCBCmd->uCmdData.sCmdTimers.aui64uscTimers); + if (PVRSRV_OK == eSOCtimerErr) + { + PVR_DPF((PVR_DBG_WARNING, "SoC or USC Timers have increased over time")); + } + else + { + PVR_DPF((PVR_DBG_WARNING, "SoC or USC Timers have NOT increased over time")); + } + } + break; + } +#endif +#endif + default: + { + /* unknown command */ + PVR_DPF((PVR_DBG_WARNING, "%s: Unknown Command (eCmdType=0x%08x)", + __func__, psFwCCBCmd->eCmdType)); + /* Assert on magic value corruption */ + PVR_ASSERT((((IMG_UINT32)psFwCCBCmd->eCmdType & RGX_CMD_MAGIC_DWORD_MASK) >> RGX_CMD_MAGIC_DWORD_SHIFT) == RGX_CMD_MAGIC_DWORD); + } + } + + /* Update read offset */ + psFWCCBCtl->ui32ReadOffset = (psFWCCBCtl->ui32ReadOffset + 1) & psFWCCBCtl->ui32WrapMask; + } +} + +/* + * PVRSRVRGXFrameworkCopyCommand +*/ +PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(DEVMEM_MEMDESC *psFWFrameworkMemDesc, + IMG_PBYTE pbyGPUFRegisterList, + IMG_UINT32 ui32FrameworkRegisterSize) +{ + PVRSRV_ERROR eError; + RGXFWIF_RF_REGISTERS *psRFReg; + + eError = DevmemAcquireCpuVirtAddr(psFWFrameworkMemDesc, + (void **)&psRFReg); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware render context state (%u)", + __func__, eError)); + return eError; + } + + OSDeviceMemCopy(psRFReg, pbyGPUFRegisterList, ui32FrameworkRegisterSize); + + /* Release the CPU mapping */ + DevmemReleaseCpuVirtAddr(psFWFrameworkMemDesc); + + /* + * Dump the FW framework buffer + */ +#if defined(PDUMP) + PDUMPCOMMENT("Dump FWFramework buffer"); + DevmemPDumpLoadMem(psFWFrameworkMemDesc, 0, ui32FrameworkRegisterSize, PDUMP_FLAGS_CONTINUOUS); +#endif + + return PVRSRV_OK; +} + +/* + * PVRSRVRGXFrameworkCreateKM +*/ +PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC **ppsFWFrameworkMemDesc, + IMG_UINT32 ui32FrameworkCommandSize) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + /* + Allocate device memory for the firmware GPU framework state. + Sufficient info to kick one or more DMs should be contained in this buffer + */ + PDUMPCOMMENT("Allocate Volcanic firmware framework state"); + + eError = DevmemFwAllocate(psDevInfo, + ui32FrameworkCommandSize, + RGX_FWCOMCTX_ALLOCFLAGS, + "FwGPUFrameworkState", + ppsFWFrameworkMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware framework state (%u)", + __func__, eError)); + return eError; + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode, + volatile IMG_UINT32 __iomem *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_CCB_CTL *psKCCBCtl; + IMG_UINT32 ui32CurrentQueueLength, ui32MaxRetries; + PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; + + psKCCBCtl = psDevInfo->psKernelCCBCtl; + ui32CurrentQueueLength = (psKCCBCtl->ui32WrapMask+1 + + psKCCBCtl->ui32WriteOffset - + psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask; + ui32CurrentQueueLength += psDevInfo->ui32KCCBDeferredCommandsCount; + + for (ui32MaxRetries = ui32CurrentQueueLength + 1; + ui32MaxRetries > 0; + ui32MaxRetries--) + { + + /* + * PVRSRVPollForValueKM flags are set to POLL_FLAG_NONE in this case so that the function + * does not generate an error message. In this case, the PollForValueKM is expected to + * timeout as there is work ongoing on the GPU which may take longer than the timeout period. + */ + eError = PVRSRVPollForValueKM(psDevNode, pui32LinMemAddr, ui32Value, ui32Mask, POLL_FLAG_NONE); + if (eError != PVRSRV_ERROR_TIMEOUT) + { + break; + } + + RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE); + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed! Error(%s) CPU linear address(%p) Expected value(%u)", + __func__, PVRSRVGetErrorString(eError), + pui32LinMemAddr, ui32Value)); + } + + return eError; +} + +PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Config, + IMG_UINT32 *pui32ConfigState, + IMG_BOOL bSetNotClear) +{ + PVRSRV_ERROR eError; + PVRSRV_DEV_POWER_STATE ePowerState; + RGXFWIF_KCCB_CMD sStateFlagCmd = { 0 }; + PVRSRV_DEVICE_NODE *psDeviceNode; + RGXFWIF_SYSDATA *psFwSysData; + IMG_UINT32 ui32kCCBCommandSlot; + IMG_BOOL bWaitForFwUpdate = IMG_FALSE; + + if (!psDevInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + psDeviceNode = psDevInfo->psDeviceNode; + psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + if (NULL == psFwSysData) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Fw Sys Config is not mapped into CPU space", + __func__)); + return PVRSRV_ERROR_INVALID_CPU_ADDR; + } + + /* apply change and ensure the new data is written to memory + * before requesting the FW to read it + */ + ui32Config = ui32Config & RGXFWIF_INICFG_ALL; + if (bSetNotClear) + { + psFwSysData->ui32ConfigFlags |= ui32Config; + } + else + { + psFwSysData->ui32ConfigFlags &= ~ui32Config; + } + + /* return current/new value to caller */ + if (pui32ConfigState) + { + *pui32ConfigState = psFwSysData->ui32ConfigFlags; + } + + OSMemoryBarrier(); + + eError = PVRSRVPowerLock(psDeviceNode); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock"); + + /* notify FW to update setting */ + eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); + + if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF)) + { + /* Ask the FW to update its cached version of the value */ + sStateFlagCmd.eCmdType = RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL; + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sStateFlagCmd, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot", unlock); + bWaitForFwUpdate = IMG_TRUE; + } + +unlock: + PVRSRVPowerUnlock(psDeviceNode); + if (bWaitForFwUpdate) + { + /* Wait for the value to be updated as the FW validates + * the parameters and modifies the ui32ConfigFlags + * accordingly + * (for completeness as registered callbacks should also + * not permit invalid transitions) + */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); + } + return eError; +} + +static +PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_DM eDM, + RGXFWIF_KCCB_CMD *psKCCBCmd, + RGXFWIF_CLEANUP_TYPE eCleanupType, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32kCCBCommandSlot; + + psKCCBCmd->eCmdType = RGXFWIF_KCCB_CMD_CLEANUP; + psKCCBCmd->uCmdData.sCleanupData.eCleanupType = eCleanupType; + + /* + Send the cleanup request to the firmware. If the resource is still busy + the firmware will tell us and we'll drop out with a retry. + */ + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + eDM, + psKCCBCmd, + 0, + ui32PDumpFlags, + &ui32kCCBCommandSlot); + if (eError != PVRSRV_OK) + { + /* If we hit a temporary KCCB exhaustion, return a RETRY to caller */ + if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL) + { + eError = PVRSRV_ERROR_RETRY; + } + else + { + PVR_LOG_VA(PVR_DBG_ERROR, + "failed to schedule cleanup command %d for %d", + eCleanupType, eDM); + } + + goto fail_command; + } + + /* Wait for command kCCB slot to be updated by FW */ + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Wait for the firmware to reply to the cleanup command"); + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, + ui32PDumpFlags); + /* + If the firmware hasn't got back to us in a timely manner + then bail and let the caller retry the command. + */ + if (eError == PVRSRV_ERROR_TIMEOUT) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: RGXWaitForKCCBSlotUpdate timed out. Dump debug information.", + __func__)); + + eError = PVRSRV_ERROR_RETRY; +#if defined(DEBUG) + PVRSRVDebugRequest(psDevInfo->psDeviceNode, + DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); +#endif + goto fail_poll; + } + else if (eError != PVRSRV_OK) + { + goto fail_poll; + } + +#if defined(PDUMP) + /* + * The cleanup request to the firmware will tell us if a given resource is busy or not. + * If the RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY flag is set, this means that the resource is + * still in use. In this case we return a PVRSRV_ERROR_RETRY error to the client drivers + * and they will re-issue the cleanup request until it succeed. + * + * Since this retry mechanism doesn't work for pdumps, client drivers should ensure + * that cleanup requests are only submitted if the resource is unused. + * If this is not the case, the following poll will block infinitely, making sure + * the issue doesn't go unnoticed. + */ + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Cleanup: If this poll fails, the following resource is still in use (DM=%u, type=%u, address=0x%08x), which is incorrect in pdumps", + eDM, + psKCCBCmd->uCmdData.sCleanupData.eCleanupType, + psKCCBCmd->uCmdData.sCleanupData.uCleanupData.psContext.ui32Addr); + eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBRtnSlotsMemDesc, + ui32kCCBCommandSlot * sizeof(IMG_UINT32), + 0, + RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY, + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32"); +#endif + + /* + If the command has was run but a resource was busy, then the request + will need to be retried. + */ + if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY)) + { + if (psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE) + { + PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); + } + eError = PVRSRV_ERROR_RETRY; + goto fail_requestbusy; + } + + return PVRSRV_OK; + +fail_requestbusy: +fail_poll: +fail_command: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +/* + RGXRequestCommonContextCleanUp +*/ +PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + RGXFWIF_DM eDM, + IMG_UINT32 ui32PDumpFlags) +{ + RGXFWIF_KCCB_CMD sRCCleanUpCmd = {0}; + PVRSRV_ERROR eError; + PRGXFWIF_FWCOMMONCONTEXT psFWCommonContextFWAddr; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; + + /* Force retry if this context's CCB is currently being dumped + * as part of the stalled CCB debug */ + if (psDevInfo->pvEarliestStalledClientCCB == (void*)psServerCommonContext->psClientCCB) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Forcing retry as psDevInfo->pvEarliestStalledClientCCB = psServerCommonContext->psClientCCB <%p>", + __func__, + (void*)psServerCommonContext->psClientCCB)); + return PVRSRV_ERROR_RETRY; + } + + psFWCommonContextFWAddr = FWCommonContextGetFWAddress(psServerCommonContext); +#if defined(PDUMP) + PDUMPCOMMENT("Common ctx cleanup Request DM%d [context = 0x%08x]", + eDM, psFWCommonContextFWAddr.ui32Addr); + PDUMPCOMMENT("Wait for CCB to be empty before common ctx cleanup"); + + RGXCCBPDumpDrainCCB(FWCommonContextGetClientCCB(psServerCommonContext), ui32PDumpFlags); +#endif + + /* Setup our command data, the cleanup call will fill in the rest */ + sRCCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psContext = psFWCommonContextFWAddr; + + /* Request cleanup of the firmware resource */ + eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice, + eDM, + &sRCCleanUpCmd, + RGXFWIF_CLEANUP_FWCOMMONCONTEXT, + ui32PDumpFlags); + + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule a memory context cleanup with error (%u)", + __func__, eError)); + } + + return eError; +} + +/* + * RGXFWRequestHWRTDataCleanUp + */ + +PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, + PRGXFWIF_HWRTDATA psHWRTData) +{ + RGXFWIF_KCCB_CMD sHWRTDataCleanUpCmd = {0}; + PVRSRV_ERROR eError; + + PDUMPCOMMENT("HW RTData cleanup Request [HWRTData = 0x%08x]", psHWRTData.ui32Addr); + + sHWRTDataCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psHWRTData = psHWRTData; + + eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + &sHWRTDataCleanUpCmd, + RGXFWIF_CLEANUP_HWRTDATA, + PDUMP_FLAGS_NONE); + + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule a HWRTData cleanup with error (%u)", + __func__, eError)); + } + + return eError; +} + +/* + RGXFWRequestFreeListCleanUp +*/ +PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, + PRGXFWIF_FREELIST psFWFreeList) +{ + RGXFWIF_KCCB_CMD sFLCleanUpCmd = {0}; + PVRSRV_ERROR eError; + + PDUMPCOMMENT("Free list cleanup Request [FreeList = 0x%08x]", psFWFreeList.ui32Addr); + + /* Setup our command data, the cleanup call will fill in the rest */ + sFLCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psFreelist = psFWFreeList; + + /* Request cleanup of the firmware resource */ + eError = RGXScheduleCleanupCommand(psDevInfo, + RGXFWIF_DM_GP, + &sFLCleanUpCmd, + RGXFWIF_CLEANUP_FREELIST, + PDUMP_FLAGS_NONE); + + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule a memory context cleanup with error (%u)", + __func__, eError)); + } + + return eError; +} + +/* + RGXFWRequestZSBufferCleanUp +*/ +PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, + PRGXFWIF_ZSBUFFER psFWZSBuffer) +{ + RGXFWIF_KCCB_CMD sZSBufferCleanUpCmd = {0}; + PVRSRV_ERROR eError; + + PDUMPCOMMENT("ZS Buffer cleanup Request [ZS Buffer = 0x%08x]", psFWZSBuffer.ui32Addr); + + /* Setup our command data, the cleanup call will fill in the rest */ + sZSBufferCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psZSBuffer = psFWZSBuffer; + + /* Request cleanup of the firmware resource */ + eError = RGXScheduleCleanupCommand(psDevInfo, + RGXFWIF_DM_3D, + &sZSBufferCleanUpCmd, + RGXFWIF_CLEANUP_ZSBUFFER, + PDUMP_FLAGS_NONE); + + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule a memory context cleanup with error (%u)", + __func__, eError)); + } + + return eError; +} + +PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32HCSDeadlineMs) +{ + psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS = ui32HCSDeadlineMs; + OSWriteMemoryBarrier(); + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_KCCB_CMD sCmpKCCBCmd; + + sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK; + + return RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GP, + &sCmpKCCBCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); +} + +PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32OSid, + RGXFWIF_OS_STATE_CHANGE eOSOnlineState) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sOSOnlineStateCmd = { 0 }; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + sOSOnlineStateCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE; + sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32OSid = ui32OSid; + sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = eOSOnlineState; + + if (eOSOnlineState == RGXFWIF_OS_ONLINE) + { + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GP, + &sOSOnlineStateCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) break; + + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + } + else if (psFwSysData) + { + IMG_UINT32 ui32kCCBCommandSlot; + volatile RGXFWIF_OS_RUNTIME_FLAGS *psFwRunFlags; + + psFwRunFlags = (volatile RGXFWIF_OS_RUNTIME_FLAGS*) &psFwSysData->asOsRuntimeFlagsMirror[ui32OSid]; + /* Attempt several times until the FW manages to offload the OS */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + /* Send request */ + eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, + RGXFWIF_DM_GP, + &sOSOnlineStateCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + if (unlikely(eError == PVRSRV_ERROR_RETRY)) continue; + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommand", return_); + + /* Wait for FW to process the cmd */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", return_); + + /* read the OS state */ + OSMemoryBarrier(); + /* check if FW finished offloading the OSID and is stopped */ + if (psFwRunFlags->bfOsState == RGXFW_CONNECTION_FW_OFFLINE) + { + eError = PVRSRV_OK; + break; + } + else + { + eError = PVRSRV_ERROR_TIMEOUT; + } + + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + } + else + { + eError = PVRSRV_ERROR_NOT_INITIALISED; + } + +return_ : + return eError; +} + +PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32OSid, + IMG_UINT32 ui32Priority) +{ + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sOSidPriorityCmd = { 0 }; + + sOSidPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE; + psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid] = ui32Priority; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GP, + &sOSidPriorityCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + return eError; +} + +PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext, + CONNECTION_DATA *psConnection, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Priority, + RGXFWIF_DM eDM) +{ + IMG_UINT32 ui32CmdSize; + IMG_UINT8 *pui8CmdPtr; + RGXFWIF_KCCB_CMD sPriorityCmd = { 0 }; + RGXFWIF_CCB_CMD_HEADER *psCmdHeader; + RGXFWIF_CMD_PRIORITY *psCmd; + PVRSRV_ERROR eError; + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psContext); + + eError = _CheckPriority(psDevInfo, ui32Priority, psContext->eRequestor); + PVR_LOG_GOTO_IF_ERROR(eError, "_CheckPriority", fail_checkpriority); + + /* + Get space for command + */ + ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_CMD_PRIORITY)); + + eError = RGXAcquireCCB(psClientCCB, + ui32CmdSize, + (void **) &pui8CmdPtr, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire space for client CCB", __func__)); + } + goto fail_ccbacquire; + } + + /* + Write the command header and command + */ + psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr; + psCmdHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PRIORITY; + psCmdHeader->ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CMD_PRIORITY)); + pui8CmdPtr += sizeof(*psCmdHeader); + + psCmd = (RGXFWIF_CMD_PRIORITY *) pui8CmdPtr; + psCmd->ui32Priority = ui32Priority; + pui8CmdPtr += sizeof(*psCmd); + + /* + We should reserve space in the kernel CCB here and fill in the command + directly. + This is so if there isn't space in the kernel CCB we can return with + retry back to services client before we take any operations + */ + + /* + Submit the command + */ + RGXReleaseCCB(psClientCCB, + ui32CmdSize, + PDUMP_FLAGS_CONTINUOUS); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release space in client CCB", __func__)); + return eError; + } + + /* Construct the priority command. */ + sPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sPriorityCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psContext); + sPriorityCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sPriorityCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + sPriorityCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + sPriorityCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + eDM, + &sPriorityCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to submit set priority command with error (%u)", + __func__, + eError)); + } + + psContext->ui32Priority = ui32Priority; + + return PVRSRV_OK; + +fail_ccbacquire: +fail_checkpriority: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PHRMode) +{ + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sCfgPHRCmd = { 0 }; + + sCfgPHRCmd.eCmdType = RGXFWIF_KCCB_CMD_PHR_CFG; + psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode = ui32PHRMode; + OSWriteMemoryBarrier(); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GP, + &sCfgPHRCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + return eError; +} + +PVRSRV_ERROR RGXFWConfigWdg(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32WdgPeriodUs) +{ + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sCfgWdgCmd = { 0 }; + + sCfgWdgCmd.eCmdType = RGXFWIF_KCCB_CMD_WDG_CFG; + psDevInfo->psRGXFWIfRuntimeCfg->ui32WdgPeriodUs = ui32WdgPeriodUs; + OSWriteMemoryBarrier(); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GP, + &sCfgWdgCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + return eError; +} + +/* + RGXReadMETAAddr +*/ +PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 *pui32Value) +{ + IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM; + IMG_UINT32 ui32PollValue; + IMG_UINT32 ui32PollMask; + IMG_UINT32 ui32PollRegOffset; + IMG_UINT32 ui32ReadOffset; + IMG_UINT32 ui32WriteOffset; + IMG_UINT32 ui32WriteValue; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + { + ui32PollValue = RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN; + ui32PollMask = RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN; + ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES; + ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES; + ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__RD_EN; + CHECK_HWBRN_68777(ui32WriteValue); + ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES; + } + else + { + ui32PollValue = RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN; + ui32PollMask = RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN; + ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1; + ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0; + ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN; + ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX; + } + + /* Wait for Slave Port to be Ready */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *) (pui8RegBase + ui32PollRegOffset), + ui32PollValue, + ui32PollMask, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* Issue the Read */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32WriteOffset, ui32WriteValue); + (void)OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32WriteOffset); + + /* Wait for Slave Port to be Ready: read complete */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *) (pui8RegBase + ui32PollRegOffset), + ui32PollValue, + ui32PollMask, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* Read the value */ + *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32ReadOffset); + + return PVRSRV_OK; +} + +/* + RGXWriteMETAAddr +*/ +PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 ui32Value) +{ + IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + { + /* Wait for Slave Port to be Ready */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES), + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* Issue the Write */ + CHECK_HWBRN_68777(ui32METAAddr); + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES, ui32METAAddr); + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES, ui32Value); + } + else + { + /* Wait for Slave Port to be Ready */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + return PVRSRV_ERROR_TIMEOUT; + } + + /* Issue the Write */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0, ui32METAAddr); + (void) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT, ui32Value); + (void) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT); /* Fence write */ + } + + return PVRSRV_OK; +} + +void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious) +{ + /* Attempt to detect and deal with any stalled client contexts. + * bIgnorePrevious may be set by the caller if they know a context to be + * stalled, as otherwise this function will only identify stalled + * contexts which have not been previously reported. + */ + + IMG_UINT32 ui32StalledClientMask = 0; + + if (!(OSTryLockAcquire(psDevInfo->hCCBStallCheckLock))) + { + PVR_LOG(("RGXCheckForStalledClientContexts: Failed to acquire hCCBStallCheckLock, returning...")); + return; + } + + ui32StalledClientMask |= CheckForStalledClientTDMTransferCtxt(psDevInfo); + + ui32StalledClientMask |= CheckForStalledClientRenderCtxt(psDevInfo); + + ui32StalledClientMask |= CheckForStalledClientComputeCtxt(psDevInfo); + + ui32StalledClientMask |= CheckForStalledClientKickSyncCtxt(psDevInfo); + + /* If at least one DM stalled bit is different than before */ + if (bIgnorePrevious || (psDevInfo->ui32StalledClientMask != ui32StalledClientMask)) + { + if (ui32StalledClientMask > 0) + { + static __maybe_unused const char *pszStalledAction = +#if defined(PVRSRV_STALLED_CCB_ACTION) + "force"; +#else + "warn"; +#endif + /* Print all the stalled DMs */ + PVR_LOG(("Possible stalled client RGX contexts detected: %s%s%s%s%s%s%s", + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_GP), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TDM_2D), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TA), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_3D), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_CDM), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ2D), + RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ3D))); + + PVR_LOG(("Trying to identify stalled context...(%s) [%d]", + pszStalledAction, bIgnorePrevious)); + + DumpStalledContextInfo(psDevInfo); + } + else + { + if (psDevInfo->ui32StalledClientMask> 0) + { + /* Indicate there are no stalled DMs */ + PVR_LOG(("No further stalled client contexts exist")); + } + } + psDevInfo->ui32StalledClientMask = ui32StalledClientMask; + psDevInfo->pvEarliestStalledClientCCB = NULL; + } + OSLockRelease(psDevInfo->hCCBStallCheckLock); +} + +/* + RGXUpdateHealthStatus +*/ +PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, + IMG_BOOL bCheckAfterTimePassed) +{ + PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_HEALTH_STATUS eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK; + PVRSRV_DEVICE_HEALTH_REASON eNewReason = PVRSRV_DEVICE_HEALTH_REASON_NONE; + PVRSRV_RGXDEV_INFO* psDevInfo; + RGXFWIF_TRACEBUF* psRGXFWIfTraceBufCtl; + RGXFWIF_SYSDATA* psFwSysData; + RGXFWIF_OSDATA* psFwOsData; + RGXFWIF_CCB_CTL *psKCCBCtl; + IMG_UINT32 ui32ThreadCount; + IMG_BOOL bKCCBCmdsWaiting; + + PVR_ASSERT(psDevNode != NULL); + psDevInfo = psDevNode->pvDevice; + + /* If the firmware is not yet initialised or has already deinitialised, stop here */ + if (psDevInfo == NULL || !psDevInfo->bFirmwareInitialised || psDevInfo->pvRegsBaseKM == NULL || + psDevInfo->psDeviceNode == NULL || psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT) + { + return PVRSRV_OK; + } + + psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; + psFwSysData = psDevInfo->psRGXFWIfFwSysData; + psFwOsData = psDevInfo->psRGXFWIfFwOsData; + + /* If this is a quick update, then include the last current value... */ + if (!bCheckAfterTimePassed) + { + eNewStatus = OSAtomicRead(&psDevNode->eHealthStatus); + eNewReason = OSAtomicRead(&psDevNode->eHealthReason); + } + + /* Decrement the SLR holdoff counter (if non-zero) */ + if (psDevInfo->ui32SLRHoldoffCounter > 0) + { + psDevInfo->ui32SLRHoldoffCounter--; + } + + + + /* If Rogue is not powered on, just skip ahead and check for stalled client CCBs */ + if (PVRSRVIsDevicePowered(psDevNode)) + { + /* + Firmware thread checks... + */ + if (psRGXFWIfTraceBufCtl != NULL) + { + for (ui32ThreadCount = 0; ui32ThreadCount < RGXFW_THREAD_NUM; ui32ThreadCount++) + { + IMG_CHAR* pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szInfo; + + /* + Check if the FW has hit an assert... + */ + if (*pszTraceAssertInfo != '\0') + { + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware thread %d has asserted: %s (%s:%d)", + __func__, ui32ThreadCount, pszTraceAssertInfo, + psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szPath, + psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.ui32LineNum)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_ASSERTED; + goto _RGXUpdateHealthStatus_Exit; + } + + /* + Check the threads to see if they are in the same poll locations as last time... + */ + if (bCheckAfterTimePassed) + { + if (psFwSysData->aui32CrPollAddr[ui32ThreadCount] != 0 && + psFwSysData->aui32CrPollCount[ui32ThreadCount] == psDevInfo->aui32CrLastPollCount[ui32ThreadCount]) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware stuck on CR poll: T%u polling %s (reg:0x%08X mask:0x%08X)", + __func__, ui32ThreadCount, + ((psFwSysData->aui32CrPollAddr[ui32ThreadCount] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), + psFwSysData->aui32CrPollAddr[ui32ThreadCount] & ~RGXFW_POLL_TYPE_SET, + psFwSysData->aui32CrPollMask[ui32ThreadCount])); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING; + goto _RGXUpdateHealthStatus_Exit; + } + psDevInfo->aui32CrLastPollCount[ui32ThreadCount] = psFwSysData->aui32CrPollCount[ui32ThreadCount]; + } + } + + /* + Check if the FW has faulted... + */ + if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_FW_FAULT) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Firmware has faulted and needs to restart", + __func__)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_FAULT; + if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_RESTART_REQUESTED) + { + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_RESTARTING; + } + else + { + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_IDLING; + } + goto _RGXUpdateHealthStatus_Exit; + } + } + + /* + Event Object Timeouts check... + */ + if (!bCheckAfterTimePassed) + { + if (psDevInfo->ui32GEOTimeoutsLastTime > 1 && psPVRSRVData->ui32GEOConsecutiveTimeouts > psDevInfo->ui32GEOTimeoutsLastTime) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Global Event Object Timeouts have risen (from %d to %d)", + __func__, + psDevInfo->ui32GEOTimeoutsLastTime, psPVRSRVData->ui32GEOConsecutiveTimeouts)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS; + } + psDevInfo->ui32GEOTimeoutsLastTime = psPVRSRVData->ui32GEOConsecutiveTimeouts; + } + + /* + Check the Kernel CCB pointer is valid. If any commands were waiting last time, then check + that some have executed since then. + */ + bKCCBCmdsWaiting = IMG_FALSE; + psKCCBCtl = psDevInfo->psKernelCCBCtl; + + if (psKCCBCtl != NULL) + { + if (psKCCBCtl->ui32ReadOffset > psKCCBCtl->ui32WrapMask || + psKCCBCtl->ui32WriteOffset > psKCCBCtl->ui32WrapMask) + { + PVR_DPF((PVR_DBG_WARNING, "%s: KCCB has invalid offset (ROFF=%d WOFF=%d)", + __func__, psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT; + } + + if (psKCCBCtl->ui32ReadOffset != psKCCBCtl->ui32WriteOffset) + { + bKCCBCmdsWaiting = IMG_TRUE; + } + } + + if (bCheckAfterTimePassed && psFwOsData != NULL) + { + IMG_UINT32 ui32KCCBCmdsExecuted = psFwOsData->ui32KCCBCmdsExecuted; + + if (psDevInfo->ui32KCCBCmdsExecutedLastTime == ui32KCCBCmdsExecuted) + { + /* + If something was waiting last time then the Firmware has stopped processing commands. + */ + if (psDevInfo->bKCCBCmdsWaitingLastTime) + { + PVR_DPF((PVR_DBG_WARNING, "%s: No KCCB commands executed since check!", + __func__)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED; + } + + /* + If no commands are currently pending and nothing happened since the last poll, then + schedule a dummy command to ping the firmware so we know it is alive and processing. + */ + if (!bKCCBCmdsWaiting) + { + PVRSRV_ERROR eError = RGXFWHealthCheckCmd(psDevNode->pvDevice); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Cannot schedule Health Check command! (0x%x)", + __func__, eError)); + } + else + { + bKCCBCmdsWaiting = IMG_TRUE; + } + } + } + + psDevInfo->bKCCBCmdsWaitingLastTime = bKCCBCmdsWaiting; + psDevInfo->ui32KCCBCmdsExecutedLastTime = ui32KCCBCmdsExecuted; + } + } + + /* + Interrupt counts check... + */ + if (bCheckAfterTimePassed && psFwOsData != NULL) + { + IMG_UINT32 ui32LISRCount = 0; + IMG_UINT32 ui32FWCount = 0; + IMG_UINT32 ui32MissingInts = 0; + IMG_UINT32 ui32Index; + + /* Add up the total number of interrupts issued, sampled/received and missed... */ + for (ui32Index = 0; ui32Index < RGXFW_THREAD_NUM; ui32Index++) + { + ui32LISRCount += psDevInfo->aui32SampleIRQCount[ui32Index]; + ui32FWCount += psFwOsData->aui32InterruptCount[ui32Index]; + } + + if (ui32LISRCount < ui32FWCount) + { + ui32MissingInts = (ui32FWCount-ui32LISRCount); + } + + if (ui32LISRCount == psDevInfo->ui32InterruptCountLastTime && + ui32MissingInts >= psDevInfo->ui32MissingInterruptsLastTime && + psDevInfo->ui32MissingInterruptsLastTime > 1) + { + PVR_DPF((PVR_DBG_ERROR, "%s: LISR has not received the last %d interrupts", + __func__, ui32MissingInts)); + eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; + eNewReason = PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS; + + /* Schedule the MISRs to help mitigate the problems of missing interrupts. */ + OSScheduleMISR(psDevInfo->pvMISRData); + if (psDevInfo->pvAPMISRData != NULL) + { + OSScheduleMISR(psDevInfo->pvAPMISRData); + } + } + psDevInfo->ui32InterruptCountLastTime = ui32LISRCount; + psDevInfo->ui32MissingInterruptsLastTime = ui32MissingInts; + } + + /* + Stalled CCB check... + */ + if (bCheckAfterTimePassed && (PVRSRV_DEVICE_HEALTH_STATUS_OK==eNewStatus)) + { + RGXCheckForStalledClientContexts(psDevInfo, IMG_FALSE); + } + + /* Notify client driver and system layer of any eNewStatus errors */ + if (eNewStatus > PVRSRV_DEVICE_HEALTH_STATUS_OK) + { + /* Client notification of device error will be achieved by + * clients calling UM function RGXGetLastDeviceError() */ + psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR; + + /* Notify system layer */ + { + PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; + + if (psDevConfig->pfnSysDevErrorNotify) + { + PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; + + sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR; + sErrorData.uErrData.sHostWdgData.ui32Status = (IMG_UINT32)eNewStatus; + sErrorData.uErrData.sHostWdgData.ui32Reason = (IMG_UINT32)eNewReason; + + psDevConfig->pfnSysDevErrorNotify(psDevConfig, + &sErrorData); + } + } + } + + /* + Finished, save the new status... + */ +_RGXUpdateHealthStatus_Exit: + OSAtomicWrite(&psDevNode->eHealthStatus, eNewStatus); + OSAtomicWrite(&psDevNode->eHealthReason, eNewReason); + RGXSRV_HWPERF_DEVICE_INFO(psDevInfo, RGX_HWPERF_DEV_INFO_EV_HEALTH, eNewStatus, eNewReason); + + /* + * Attempt to service the HWPerf buffer to regularly transport idle/periodic + * packets to host buffer. + */ + if (psDevNode->pfnServiceHWPerf != NULL) + { + PVRSRV_ERROR eError = psDevNode->pfnServiceHWPerf(psDevNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: " + "Error occurred when servicing HWPerf buffer (%d)", + __func__, eError)); + } + } + + /* Attempt to refresh timer correlation data */ + RGXTimeCorrRestartPeriodic(psDevNode); + + return PVRSRV_OK; +} /* RGXUpdateHealthStatus */ + +#if defined(SUPPORT_AUTOVZ) +void RGXUpdateAutoVzWdgToken(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + if (likely(KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo))) + { + /* read and write back the alive token value to confirm to the + * virtualisation watchdog that this connection is healthy */ + KM_SET_OS_ALIVE_TOKEN(KM_GET_FW_ALIVE_TOKEN(psDevInfo), psDevInfo); + } +} + +/* + RGXUpdateAutoVzWatchdog +*/ +void RGXUpdateAutoVzWatchdog(PVRSRV_DEVICE_NODE* psDevNode) +{ + if (likely(psDevNode != NULL)) + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; + + if (unlikely((psDevInfo == NULL || !psDevInfo->bFirmwareInitialised || !psDevInfo->bRGXPowered || + psDevInfo->pvRegsBaseKM == NULL || psDevNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT))) + { + /* If the firmware is not initialised, stop here */ + return; + } + else + { + PVRSRV_ERROR eError = PVRSRVPowerLock(psDevNode); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PVRSRVPowerLock"); + + RGXUpdateAutoVzWdgToken(psDevInfo); + PVRSRVPowerUnlock(psDevNode); + } + } +} +#endif /* SUPPORT_AUTOVZ */ + +PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM) +{ + if (psCurrentServerCommonContext == NULL) + { + /* the context has already been freed so there is nothing to do here */ + return PVRSRV_OK; + } + + return CheckForStalledCCB(psCurrentServerCommonContext->psDevInfo->psDeviceNode, + psCurrentServerCommonContext->psClientCCB, + eKickTypeDM); +} + +void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + if (psCurrentServerCommonContext == NULL) + { + /* the context has already been freed so there is nothing to do here */ + return; + } + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) + { + /* If high verbosity requested, dump whole CCB */ + DumpCCB(psCurrentServerCommonContext->psDevInfo, + psCurrentServerCommonContext->sFWCommonContextFWAddr, + psCurrentServerCommonContext->psClientCCB, + pfnDumpDebugPrintf, + pvDumpDebugFile); + } + else + { + /* Otherwise, only dump first stalled command in the CCB */ + DumpStalledCCBCommand(psCurrentServerCommonContext->sFWCommonContextFWAddr, + psCurrentServerCommonContext->psClientCCB, + pfnDumpDebugPrintf, + pvDumpDebugFile); + } +} + +PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl, + IMG_UINT32 *pui32NumCleanupCtl, + RGXFWIF_DM eDM, + IMG_BOOL bKick, + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, + RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_ZSBUFFER_DATA *psMSAAScratchBuffer) +{ + PVRSRV_ERROR eError; + PRGXFWIF_CLEANUP_CTL *psCleanupCtlWrite = apsCleanupCtl; + + PVR_ASSERT((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D)); + PVR_RETURN_IF_INVALID_PARAM((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D)); + + if (bKick) + { + if (psKMHWRTDataSet) + { + PRGXFWIF_CLEANUP_CTL psCleanupCtl; + + eError = RGXSetFirmwareAddress(&psCleanupCtl, psKMHWRTDataSet->psHWRTDataFwMemDesc, + offsetof(RGXFWIF_HWRTDATA, sCleanupState), + RFW_FWADDR_NOREF_FLAG); + PVR_RETURN_IF_ERROR(eError); + + *(psCleanupCtlWrite++) = psCleanupCtl; + } + + if (eDM == RGXFWIF_DM_3D) + { + RGXFWIF_PRBUFFER_TYPE eBufferType; + RGX_ZSBUFFER_DATA *psBuffer = NULL; + + for (eBufferType = RGXFWIF_PRBUFFER_START; eBufferType < RGXFWIF_PRBUFFER_MAXSUPPORTED; eBufferType++) + { + switch (eBufferType) + { + case RGXFWIF_PRBUFFER_ZSBUFFER: + psBuffer = psZSBuffer; + break; + case RGXFWIF_PRBUFFER_MSAABUFFER: + psBuffer = psMSAAScratchBuffer; + break; + case RGXFWIF_PRBUFFER_MAXSUPPORTED: + psBuffer = NULL; + break; + } + if (psBuffer) + { + (psCleanupCtlWrite++)->ui32Addr = psBuffer->sZSBufferFWDevVAddr.ui32Addr + + offsetof(RGXFWIF_PRBUFFER, sCleanupState); + psBuffer = NULL; + } + } + } + } + + *pui32NumCleanupCtl = psCleanupCtlWrite - apsCleanupCtl; + PVR_ASSERT(*pui32NumCleanupCtl <= RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS); + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + RGXFWIF_HWRINFOBUF *psHWRInfoBuf; + IMG_UINT32 i; + + if (psDevNode->pvDevice == NULL) + { + return PVRSRV_ERROR_INVALID_DEVINFO; + } + psDevInfo = psDevNode->pvDevice; + + psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBufCtl; + + for (i = 0 ; i < psDevInfo->sDevFeatureCfg.ui32MAXDMCount ; i++) + { + /* Reset the HWR numbers */ + psHWRInfoBuf->aui32HwrDmLockedUpCount[i] = 0; + psHWRInfoBuf->aui32HwrDmFalseDetectCount[i] = 0; + psHWRInfoBuf->aui32HwrDmRecoveredCount[i] = 0; + psHWRInfoBuf->aui32HwrDmOverranCount[i] = 0; + } + + for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++) + { + psHWRInfoBuf->sHWRInfo[i].ui32HWRNumber = 0; + } + + psHWRInfoBuf->ui32WriteIndex = 0; + psHWRInfoBuf->ui32DDReqCount = 0; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR, + IMG_DEV_PHYADDR *psPhyAddr, + IMG_UINT32 ui32LogicalOffset, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_BOOL *bValid) +{ + + PVRSRV_ERROR eError; + + eError = PMRLockSysPhysAddresses(psPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PMRLockSysPhysAddresses failed (%u)", + __func__, + eError)); + return eError; + } + + eError = PMR_DevPhysAddr(psPMR, + ui32Log2PageSize, + ui32NumOfPages, + ui32LogicalOffset, + psPhyAddr, + bValid); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PMR_DevPhysAddr failed (%u)", + __func__, + eError)); + return eError; + } + + + eError = PMRUnlockSysPhysAddresses(psPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PMRUnLockSysPhysAddresses failed (%u)", + __func__, + eError)); + return eError; + } + + return eError; +} + +#if defined(PDUMP) +PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32WriteOffset) +{ + RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + if (psDevInfo->bDumpedKCCBCtlAlready) + { + /* exiting capture range or pdump block */ + psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE; + + /* make sure previous cmd is drained in pdump in case we will 'jump' over some future cmds */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER, + "kCCB(%p): Draining rgxfw_roff (0x%x) == woff (0x%x)", + psKCCBCtl, + ui32WriteOffset, + ui32WriteOffset); + eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc, + offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset), + ui32WriteOffset, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: problem pdumping POL for kCCBCtl (%d)", __func__, eError)); + } + } + + return eError; + +} +#endif + +/*! +******************************************************************************* + + @Function RGXClientConnectCompatCheck_ClientAgainstFW + + @Description + + Check compatibility of client and firmware (build options) + at the connection time. + + @Input psDeviceNode - device node + @Input ui32ClientBuildOptions - build options for the client + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32ClientBuildOptions) +{ +#if !defined(NO_HARDWARE) || defined(PDUMP) +#if !defined(NO_HARDWARE) + IMG_UINT32 ui32BuildOptionsMismatch; + IMG_UINT32 ui32BuildOptionsFW; +#endif + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#endif + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + +#if !defined(NO_HARDWARE) + if (psDevInfo == NULL || psDevInfo->psRGXFWIfOsInitMemDesc == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Cannot acquire kernel fw compatibility check info, RGXFWIF_OSINIT structure not allocated.", + __func__)); + return PVRSRV_ERROR_NOT_INITIALISED; + } + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + if (*((volatile IMG_BOOL *) &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) + { + /* No need to wait if the FW has already updated the values */ + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); +#endif + +#if defined(PDUMP) + { + PVRSRV_ERROR eError; + + PDUMPCOMMENT("Compatibility check: client and FW build options"); + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, ui32BuildOptions), + ui32ClientBuildOptions, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", + __func__, + eError)); + return eError; + } + } +#endif + +#if !defined(NO_HARDWARE) + ui32BuildOptionsFW = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.ui32BuildOptions; + ui32BuildOptionsMismatch = ui32ClientBuildOptions ^ ui32BuildOptionsFW; + + if (ui32BuildOptionsMismatch != 0) + { + if ((ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; " + "extra options present in client: (0x%x). Please check rgx_options.h", + ui32ClientBuildOptions & ui32BuildOptionsMismatch )); + } + + if ((ui32BuildOptionsFW & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; " + "extra options present in Firmware: (0x%x). Please check rgx_options.h", + ui32BuildOptionsFW & ui32BuildOptionsMismatch )); + } + + return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware and client build options match. [ OK ]", __func__)); + } +#endif + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXFwRawHeapAllocMap + + @Description Register firmware heap for the specified guest OSID + + @Input psDeviceNode - device node + @Input ui32OSID - Guest OSID + @Input sDevPAddr - Heap address + @Input ui64DevPSize - Heap size + + @Return PVRSRV_ERROR - PVRSRV_OK if heap setup was successful. + +******************************************************************************/ +PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSID, + IMG_DEV_PHYADDR sDevPAddr, + IMG_UINT64 ui64DevPSize) +{ + PVRSRV_ERROR eError; + IMG_CHAR szRegionRAName[RA_MAX_NAME_LENGTH]; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_MEMALLOCFLAGS_T uiRawFwHeapAllocFlags = (RGX_FWSHAREDMEM_ALLOCFLAGS | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PREMAP0 + ui32OSID)); + PHYS_HEAP_CONFIG *psFwMainConfig = FindPhysHeapConfig(psDeviceNode->psDevConfig, + PHYS_HEAP_USAGE_FW_MAIN); + PHYS_HEAP_CONFIG sFwHeapConfig; + + PVRSRV_VZ_RET_IF_NOT_MODE(HOST, PVRSRV_OK); + + if (psFwMainConfig == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "FW_MAIN heap config not found.")); + return PVRSRV_ERROR_NOT_SUPPORTED; + } + + OSSNPrintf(szRegionRAName, sizeof(szRegionRAName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID); + + if (!ui64DevPSize || + !sDevPAddr.uiAddr || + ui32OSID >= RGX_NUM_OS_SUPPORTED || + ui64DevPSize != RGX_FIRMWARE_RAW_HEAP_SIZE) + { + PVR_DPF((PVR_DBG_ERROR, "Invalid parameters for %s", szRegionRAName)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + sFwHeapConfig = *psFwMainConfig; + sFwHeapConfig.sStartAddr.uiAddr = 0; + sFwHeapConfig.sCardBase.uiAddr = sDevPAddr.uiAddr; + sFwHeapConfig.uiSize = RGX_FIRMWARE_RAW_HEAP_SIZE; + + eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, szRegionRAName, &psDeviceNode->apsFWPremapPhysHeap[ui32OSID]); + PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysmemCreateHeapLMA:PREMAP [%d]", ui32OSID); + + eError = PhysHeapAcquire(psDeviceNode->apsFWPremapPhysHeap[ui32OSID]); + PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysHeapAcquire:PREMAP [%d]", ui32OSID); + + psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32OSID] = psDeviceNode->apsFWPremapPhysHeap[ui32OSID]; + + PDUMPCOMMENT("Allocate and map raw firmware heap for OSID: [%d]", ui32OSID); + +#if (RGX_NUM_OS_SUPPORTED > 1) + /* don't clear the heap of other guests on allocation */ + uiRawFwHeapAllocFlags &= (ui32OSID > RGXFW_HOST_OS) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL); +#endif + + /* if the firmware is already powered up, consider the firmware heaps are pre-mapped. */ + if (psDeviceNode->bAutoVzFwIsUp) + { + uiRawFwHeapAllocFlags &= RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); + DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE); + } + + eError = DevmemFwAllocate(psDevInfo, + RGX_FIRMWARE_RAW_HEAP_SIZE, + uiRawFwHeapAllocFlags, + psDevInfo->psGuestFirmwareRawHeap[ui32OSID]->pszName, + &psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]); + PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); + + /* Mark this devmem heap as premapped so allocations will not require device mapping. */ + DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE); + + if (ui32OSID == RGXFW_HOST_OS) + { + /* if the Host's raw fw heap is premapped, mark its main & config sub-heaps accordingly + * No memory allocated from these sub-heaps will be individually mapped into the device's + * address space so they can remain marked permanently as premapped. */ + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE); + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE); + } + + return eError; +} + +/*! +******************************************************************************* + + @Function RGXFwRawHeapUnmapFree + + @Description Unregister firmware heap for the specified guest OSID + + @Input psDeviceNode - device node + @Input ui32OSID - Guest OSID + +******************************************************************************/ +void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSID) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + /* remove the premap status, so the heap can be unmapped and freed */ + if (psDevInfo->psGuestFirmwareRawHeap[ui32OSID]) + { + DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_FALSE); + } + + if (psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]); + psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID] = NULL; + } +} + +/*! +******************************************************************************* +@Function RGXRiscvHalt + +@Description Halt the RISC-V FW core (required for certain operations + done through Debug Module) + +@Input psDevInfo Pointer to device info + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvHalt(PVRSRV_RGXDEV_INFO *psDevInfo) +{ +#if defined(NO_HARDWARE) && defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(psDevInfo); + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Halt RISC-V FW"); + + /* Send halt request (no need to select one or more harts on this RISC-V core) */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL, + RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN | + RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, + PDUMP_FLAGS_CONTINUOUS); + + /* Wait until hart is halted */ + PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_FWCORE_DMI_DMSTATUS, + RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, + RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + + /* Clear halt request */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL, + RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, + PDUMP_FLAGS_CONTINUOUS); +#else + IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; + + /* Send halt request (no need to select one or more harts on this RISC-V core) */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL, + RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN | + RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); + + /* Wait until hart is halted */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + pui32RegsBase + RGX_CR_FWCORE_DMI_DMSTATUS/sizeof(IMG_UINT32), + RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, + RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Hart not halted (0x%x)", + __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMSTATUS))); + return PVRSRV_ERROR_TIMEOUT; + } + + /* Clear halt request */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL, + RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); +#endif + + return PVRSRV_OK; +} + +/*! +******************************************************************************* +@Function RGXRiscvResume + +@Description Resume the RISC-V FW core + +@Input psDevInfo Pointer to device info + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvResume(PVRSRV_RGXDEV_INFO *psDevInfo) +{ +#if defined(NO_HARDWARE) && defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(psDevInfo); + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Resume RISC-V FW"); + + /* Send resume request (no need to select one or more harts on this RISC-V core) */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL, + RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN | + RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, + PDUMP_FLAGS_CONTINUOUS); + + /* Wait until hart is resumed */ + PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_FWCORE_DMI_DMSTATUS, + RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, + RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + + /* Clear resume request */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL, + RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, + PDUMP_FLAGS_CONTINUOUS); +#else + IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; + + /* Send resume request (no need to select one or more harts on this RISC-V core) */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL, + RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN | + RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); + + /* Wait until hart is resumed */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + pui32RegsBase + RGX_CR_FWCORE_DMI_DMSTATUS/sizeof(IMG_UINT32), + RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, + RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Hart not resumed (0x%x)", + __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMSTATUS))); + return PVRSRV_ERROR_TIMEOUT; + } + + /* Clear resume request */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL, + RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); +#endif + + return PVRSRV_OK; +} + +/*! +******************************************************************************* +@Function RGXRiscvCheckAbstractCmdError + +@Description Check for RISC-V abstract command errors and clear them + +@Input pvRegsBaseKM Pointer to GPU register base + +@Return RGXRISCVFW_ABSTRACT_CMD_ERR +******************************************************************************/ +static RGXRISCVFW_ABSTRACT_CMD_ERR RGXRiscvCheckAbstractCmdError(void __iomem *pvRegsBaseKM) +{ + RGXRISCVFW_ABSTRACT_CMD_ERR eCmdErr; + +#if defined(NO_HARDWARE) && defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(pvRegsBaseKM); + eCmdErr = RISCV_ABSTRACT_CMD_NO_ERROR; + + /* Check error status */ + PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_FWCORE_DMI_ABSTRACTCS, + RISCV_ABSTRACT_CMD_NO_ERROR << RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT, + ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); +#else + /* Check error status */ + eCmdErr = (OSReadHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS) + & ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK) + >> RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT; + + if (eCmdErr != RISCV_ABSTRACT_CMD_NO_ERROR) + { + PVR_DPF((PVR_DBG_WARNING, "RISC-V FW abstract command error %u", eCmdErr)); + + /* Clear the error (note CMDERR field is write-1-to-clear) */ + OSWriteHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS, + ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK); + } +#endif + + return eCmdErr; +} + +/*! +******************************************************************************* +@Function RGXRiscvReadReg + +@Description Read a value from the given RISC-V register (GPR or CSR) + +@Input psDevInfo Pointer to device info +@Input ui32RegAddr RISC-V register address + +@Output pui32Value Read value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvReadReg(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 *pui32Value) +{ +#if defined(NO_HARDWARE) && defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(ui32RegAddr); + PVR_UNREFERENCED_PARAMETER(pui32Value); + + /* Reading HW registers is not supported in nohw/pdump */ + return PVRSRV_ERROR_NOT_SUPPORTED; +#else + IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; + + /* Send abstract register read command */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, + RGX_CR_FWCORE_DMI_COMMAND, + (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | + RGXRISCVFW_DMI_COMMAND_READ | + RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | + ui32RegAddr); + + /* Wait until abstract command is completed */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32), + 0U, + RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", + __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS))); + return PVRSRV_ERROR_TIMEOUT; + } + + if (RGXRiscvCheckAbstractCmdError(psDevInfo->pvRegsBaseKM) == RISCV_ABSTRACT_CMD_NO_ERROR) + { + /* Read register value */ + *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0); + } + else + { + *pui32Value = 0U; + } + + return PVRSRV_OK; +#endif +} + +/*! +******************************************************************************* +@Function RGXRiscvPollReg + +@Description Poll for a value from the given RISC-V register (GPR or CSR) + +@Input psDevInfo Pointer to device info +@Input ui32RegAddr RISC-V register address +@Input ui32Value Expected value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvPollReg(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32Value) +{ +#if defined(NO_HARDWARE) && defined(PDUMP) + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Poll RISC-V register 0x%x (expected 0x%08x)", + ui32RegAddr, ui32Value); + + /* Send abstract register read command */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND, + (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | + RGXRISCVFW_DMI_COMMAND_READ | + RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | + ui32RegAddr, + PDUMP_FLAGS_CONTINUOUS); + + /* Wait until abstract command is completed */ + PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_FWCORE_DMI_ABSTRACTCS, + 0U, + RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + + RGXRiscvCheckAbstractCmdError(pvRegsBaseKM); + + /* Check read value */ + PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_FWCORE_DMI_DATA0, + ui32Value, + 0xFFFFFFFF, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + + return PVRSRV_OK; +#else + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(ui32RegAddr); + PVR_UNREFERENCED_PARAMETER(ui32Value); + + /* Polling HW registers is currently not required driverlive */ + return PVRSRV_ERROR_NOT_SUPPORTED; +#endif +} + +/*! +******************************************************************************* +@Function RGXRiscvWriteReg + +@Description Write a value to the given RISC-V register (GPR or CSR) + +@Input psDevInfo Pointer to device info +@Input ui32RegAddr RISC-V register address +@Input ui32Value Write value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvWriteReg(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32Value) +{ +#if defined(NO_HARDWARE) && defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Write RISC-V register 0x%x (value 0x%08x)", + ui32RegAddr, ui32Value); + + /* Prepare data to be written to register */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA0, ui32Value, PDUMP_FLAGS_CONTINUOUS); + + /* Send abstract register write command */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND, + (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | + RGXRISCVFW_DMI_COMMAND_WRITE | + RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | + ui32RegAddr, + PDUMP_FLAGS_CONTINUOUS); + + /* Wait until abstract command is completed */ + PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_FWCORE_DMI_ABSTRACTCS, + 0U, + RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); +#else + IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; + + /* Prepare data to be written to register */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0, ui32Value); + + /* Send abstract register write command */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, + RGX_CR_FWCORE_DMI_COMMAND, + (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | + RGXRISCVFW_DMI_COMMAND_WRITE | + RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | + ui32RegAddr); + + /* Wait until abstract command is completed */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32), + 0U, + RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", + __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS))); + return PVRSRV_ERROR_TIMEOUT; + } +#endif + + return PVRSRV_OK; +} + +/*! +******************************************************************************* +@Function RGXRiscvCheckSysBusError + +@Description Check for RISC-V system bus errors and clear them + +@Input pvRegsBaseKM Pointer to GPU register base + +@Return RGXRISCVFW_SYSBUS_ERR +******************************************************************************/ +static RGXRISCVFW_SYSBUS_ERR RGXRiscvCheckSysBusError(void __iomem *pvRegsBaseKM) +{ + RGXRISCVFW_SYSBUS_ERR eSBError; + +#if defined(NO_HARDWARE) && defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(pvRegsBaseKM); + eSBError = RISCV_SYSBUS_NO_ERROR; + + PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_FWCORE_DMI_SBCS, + RISCV_SYSBUS_NO_ERROR << RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT, + ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); +#else + eSBError = (OSReadHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS) + & ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK) + >> RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT; + + if (eSBError != RISCV_SYSBUS_NO_ERROR) + { + PVR_DPF((PVR_DBG_WARNING, "RISC-V FW system bus error %u", eSBError)); + + /* Clear the error (note SBERROR field is write-1-to-clear) */ + OSWriteHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS, + ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK); + } +#endif + + return eSBError; +} + +/*! +******************************************************************************* +@Function RGXRiscvReadAbstractMem + +@Description Read a value at the given address in RISC-V memory space + using RISC-V abstract memory commands + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space + +@Output pui32Value Read value + +@Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR +RGXRiscvReadAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 *pui32Value) +{ +#if defined(NO_HARDWARE) && defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(ui32Addr); + PVR_UNREFERENCED_PARAMETER(pui32Value); + + /* Reading memory is not supported in nohw/pdump */ + return PVRSRV_ERROR_NOT_SUPPORTED; +#else + IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; + + /* Prepare read address */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA1, ui32Addr); + + /* Send abstract memory read command */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, + RGX_CR_FWCORE_DMI_COMMAND, + (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | + RGXRISCVFW_DMI_COMMAND_READ | + RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT); + + /* Wait until abstract command is completed */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32), + 0U, + RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", + __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS))); + return PVRSRV_ERROR_TIMEOUT; + } + + if (RGXRiscvCheckAbstractCmdError(psDevInfo->pvRegsBaseKM) == RISCV_ABSTRACT_CMD_NO_ERROR) + { + /* Read memory value */ + *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0); + } + else + { + *pui32Value = 0U; + } + + return PVRSRV_OK; +#endif +} + +/*! +******************************************************************************* +@Function RGXRiscvPollAbstractMem + +@Description Poll for a value at the given address in RISC-V memory space + using RISC-V abstract memory commands + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space +@Input ui32Value Expected value + +@Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR +RGXRiscvPollAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) +{ +#if defined(NO_HARDWARE) && defined(PDUMP) + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Poll RISC-V address 0x%x (expected 0x%08x)", + ui32Addr, ui32Value); + + /* Prepare read address */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA1, ui32Addr, PDUMP_FLAGS_CONTINUOUS); + + /* Send abstract memory read command */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND, + (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | + RGXRISCVFW_DMI_COMMAND_READ | + RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT, + PDUMP_FLAGS_CONTINUOUS); + + /* Wait until abstract command is completed */ + PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_FWCORE_DMI_ABSTRACTCS, + 0U, + RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + + RGXRiscvCheckAbstractCmdError(pvRegsBaseKM); + + /* Check read value */ + PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_FWCORE_DMI_DATA0, + ui32Value, + 0xFFFFFFFF, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + + return PVRSRV_OK; +#else + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(ui32Addr); + PVR_UNREFERENCED_PARAMETER(ui32Value); + + /* Polling memory is currently not required driverlive */ + return PVRSRV_ERROR_NOT_SUPPORTED; +#endif +} + +/*! +******************************************************************************* +@Function RGXRiscvReadSysBusMem + +@Description Read a value at the given address in RISC-V memory space + using the RISC-V system bus + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space + +@Output pui32Value Read value + +@Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR +RGXRiscvReadSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 *pui32Value) +{ +#if defined(NO_HARDWARE) && defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(ui32Addr); + PVR_UNREFERENCED_PARAMETER(pui32Value); + + /* Reading memory is not supported in nohw/pdump */ + return PVRSRV_ERROR_NOT_SUPPORTED; +#else + IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; + + /* Configure system bus to read 32 bit every time a new address is provided */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, + RGX_CR_FWCORE_DMI_SBCS, + (RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT) | + RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN); + + /* Perform read */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBADDRESS0, ui32Addr); + + /* Wait until system bus is idle */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + pui32RegsBase + RGX_CR_FWCORE_DMI_SBCS/sizeof(IMG_UINT32), + 0U, + RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: System Bus did not go idle in time (sbcs = 0x%x)", + __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS))); + return PVRSRV_ERROR_TIMEOUT; + } + + if (RGXRiscvCheckSysBusError(psDevInfo->pvRegsBaseKM) == RISCV_SYSBUS_NO_ERROR) + { + /* Read value from debug system bus */ + *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBDATA0); + } + else + { + *pui32Value = 0U; + } + + return PVRSRV_OK; +#endif +} + +/*! +******************************************************************************* +@Function RGXRiscvPollSysBusMem + +@Description Poll for a value at the given address in RISC-V memory space + using the RISC-V system bus + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space +@Input ui32Value Expected value + +@Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR +RGXRiscvPollSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) +{ +#if defined(NO_HARDWARE) && defined(PDUMP) + void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Poll RISC-V address 0x%x (expected 0x%08x)", + ui32Addr, ui32Value); + + /* Configure system bus to read 32 bit every time a new address is provided */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBCS, + (RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT) | + RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN, + PDUMP_FLAGS_CONTINUOUS); + + /* Perform read */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBADDRESS0, + ui32Addr, + PDUMP_FLAGS_CONTINUOUS); + + /* Wait until system bus is idle */ + PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_FWCORE_DMI_SBCS, + 0U, + RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + + RGXRiscvCheckSysBusError(pvRegsBaseKM); + + /* Check read value */ + PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_FWCORE_DMI_SBDATA0, + ui32Value, + 0xFFFFFFFF, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); + + return PVRSRV_OK; +#else + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(ui32Addr); + PVR_UNREFERENCED_PARAMETER(ui32Value); + + /* Polling memory is currently not required driverlive */ + return PVRSRV_ERROR_NOT_SUPPORTED; +#endif +} + +/*! +******************************************************************************* +@Function RGXRiscvReadMem + +@Description Read a value at the given address in RISC-V memory space + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space + +@Output pui32Value Read value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvReadMem(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Addr, + IMG_UINT32 *pui32Value) +{ + if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) + { + return RGXRiscvReadAbstractMem(psDevInfo, ui32Addr, pui32Value); + } + + return RGXRiscvReadSysBusMem(psDevInfo, ui32Addr, pui32Value); +} + +/*! +******************************************************************************* +@Function RGXRiscvPollMem + +@Description Poll a value at the given address in RISC-V memory space + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space +@Input ui32Value Expected value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvPollMem(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Addr, + IMG_UINT32 ui32Value) +{ + if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) + { + return RGXRiscvPollAbstractMem(psDevInfo, ui32Addr, ui32Value); + } + + return RGXRiscvPollSysBusMem(psDevInfo, ui32Addr, ui32Value); +} + +/*! +******************************************************************************* +@Function RGXRiscvWriteAbstractMem + +@Description Write a value at the given address in RISC-V memory space + using RISC-V abstract memory commands + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space +@Input ui32Value Write value + +@Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR +RGXRiscvWriteAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) +{ +#if defined(NO_HARDWARE) && defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Write RISC-V address 0x%x (value 0x%08x)", + ui32Addr, ui32Value); + + /* Prepare write address */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA1, ui32Addr, PDUMP_FLAGS_CONTINUOUS); + + /* Prepare write data */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA0, ui32Value, PDUMP_FLAGS_CONTINUOUS); + + /* Send abstract register write command */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND, + (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | + RGXRISCVFW_DMI_COMMAND_WRITE | + RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT, + PDUMP_FLAGS_CONTINUOUS); + + /* Wait until abstract command is completed */ + PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_FWCORE_DMI_ABSTRACTCS, + 0U, + RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); +#else + IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; + + /* Prepare write address */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA1, ui32Addr); + + /* Prepare write data */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0, ui32Value); + + /* Send abstract memory write command */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, + RGX_CR_FWCORE_DMI_COMMAND, + (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | + RGXRISCVFW_DMI_COMMAND_WRITE | + RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT); + + /* Wait until abstract command is completed */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32), + 0U, + RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", + __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS))); + return PVRSRV_ERROR_TIMEOUT; + } +#endif + + return PVRSRV_OK; +} + +/*! +******************************************************************************* +@Function RGXRiscvWriteSysBusMem + +@Description Write a value at the given address in RISC-V memory space + using the RISC-V system bus + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space +@Input ui32Value Write value + +@Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR +RGXRiscvWriteSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) +{ +#if defined(NO_HARDWARE) && defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Write RISC-V address 0x%x (value 0x%08x)", + ui32Addr, ui32Value); + + /* Configure system bus to read 32 bit every time a new address is provided */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBCS, + RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT, + PDUMP_FLAGS_CONTINUOUS); + + /* Prepare write address */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBADDRESS0, ui32Addr, PDUMP_FLAGS_CONTINUOUS); + + /* Prepare write data and initiate write */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBDATA0, ui32Value, PDUMP_FLAGS_CONTINUOUS); + + /* Wait until system bus is idle */ + PDUMPREGPOL(RGX_PDUMPREG_NAME, + RGX_CR_FWCORE_DMI_SBCS, + 0U, + RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, + PDUMP_FLAGS_CONTINUOUS, + PDUMP_POLL_OPERATOR_EQUAL); +#else + IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; + + /* Configure system bus for 32 bit accesses */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, + RGX_CR_FWCORE_DMI_SBCS, + RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT); + + /* Prepare write address */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBADDRESS0, ui32Addr); + + /* Prepare write data and initiate write */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBDATA0, ui32Value); + + /* Wait until system bus is idle */ + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + pui32RegsBase + RGX_CR_FWCORE_DMI_SBCS/sizeof(IMG_UINT32), + 0U, + RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: System Bus did not go idle in time (sbcs = 0x%x)", + __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS))); + return PVRSRV_ERROR_TIMEOUT; + } +#endif + + return PVRSRV_OK; +} + +/*! +******************************************************************************* +@Function RGXRiscvWriteMem + +@Description Write a value to the given address in RISC-V memory space + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space +@Input ui32Value Write value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvWriteMem(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Addr, + IMG_UINT32 ui32Value) +{ + if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) + { + return RGXRiscvWriteAbstractMem(psDevInfo, ui32Addr, ui32Value); + } + + return RGXRiscvWriteSysBusMem(psDevInfo, ui32Addr, ui32Value); +} + +/****************************************************************************** + End of file (rgxfwutils.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxfwutils.h b/drivers/gpu/drm/phytium/octopus/rgxfwutils.h new file mode 100644 index 000000000000..484902153958 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxfwutils.h @@ -0,0 +1,1353 @@ +/*************************************************************************/ /*! +@File +@Title RGX firmware utility routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX firmware utility routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXFWUTILS_H +#define RGXFWUTILS_H + +#include "rgx_memallocflags.h" +#include "rgxdevice.h" +#include "rgxccb.h" +#include "devicemem.h" +#include "device.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "connection_server.h" +#include "rgxta3d.h" +#include "devicemem_utils.h" +#include "rgxmem.h" + +#define RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT "FwRawOSID%d" /*!< RGX Raw Firmware Heap identifier */ + +static INLINE PVRSRV_ERROR _SelectDevMemHeap(PVRSRV_RGXDEV_INFO *psDevInfo, + PVRSRV_MEMALLOCFLAGS_T *puiFlags, + DEVMEM_HEAP **ppsFwHeap) +{ + PVRSRV_PHYS_HEAP ePhysHeap = PVRSRV_GET_PHYS_HEAP_HINT(*puiFlags); + PVRSRV_ERROR eError = PVRSRV_OK; + + switch (ePhysHeap) + { +#if defined(SUPPORT_SECURITY_VALIDATION) + /* call with GPU_SECURE from RGXSetupFwSysData */ + case PVRSRV_PHYS_HEAP_GPU_SECURE: +#endif + case PVRSRV_PHYS_HEAP_FW_CODE: + case PVRSRV_PHYS_HEAP_FW_PRIV_DATA: + case PVRSRV_PHYS_HEAP_FW_MAIN: + { + *ppsFwHeap = psDevInfo->psFirmwareMainHeap; + break; + } + case PVRSRV_PHYS_HEAP_FW_CONFIG: + { + *ppsFwHeap = psDevInfo->psFirmwareConfigHeap; + break; + } + case PVRSRV_PHYS_HEAP_FW_PREMAP0: + case PVRSRV_PHYS_HEAP_FW_PREMAP1: + case PVRSRV_PHYS_HEAP_FW_PREMAP2: + case PVRSRV_PHYS_HEAP_FW_PREMAP3: + case PVRSRV_PHYS_HEAP_FW_PREMAP4: + case PVRSRV_PHYS_HEAP_FW_PREMAP5: + case PVRSRV_PHYS_HEAP_FW_PREMAP6: + case PVRSRV_PHYS_HEAP_FW_PREMAP7: + { + IMG_UINT32 ui32OSID = ePhysHeap - PVRSRV_PHYS_HEAP_FW_PREMAP0; + + PVR_LOG_RETURN_IF_INVALID_PARAM(ui32OSID < RGX_NUM_OS_SUPPORTED, "ui32OSID"); + *ppsFwHeap = psDevInfo->psGuestFirmwareRawHeap[ui32OSID]; + break; + } + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: invalid phys heap", __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + break; + } + } + + return eError; +} + +/* + * Firmware-only allocation (which are initialised by the host) must be aligned to the SLC cache line size. + * This is because firmware-only allocations are GPU_CACHE_INCOHERENT and this causes problems + * if two allocations share the same cache line; e.g. the initialisation of the second allocation won't + * make it into the SLC cache because it has been already loaded when accessing the content of the first allocation. + */ +static INLINE PVRSRV_ERROR DevmemFwAllocate(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_DEVMEM_SIZE_T uiSize, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + IMG_DEV_VIRTADDR sTmpDevVAddr; + PVRSRV_ERROR eError; + DEVMEM_HEAP *psFwHeap; + IMG_DEVMEM_ALIGN_T uiAlign; + + PVR_DPF_ENTERED; + + /* Enforce the standard pre-fix naming scheme callers must follow */ + PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w')); + + /* Imported from AppHint , flag to poison allocations when freed */ + uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag; + + eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); + if (eError != PVRSRV_OK) + { + PVR_DPF_RETURN_RC(eError); + } + + uiAlign = (psFwHeap == psDevInfo->psFirmwareConfigHeap) ? + (RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY) : + (GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS))); + + eError = DevmemAllocateAndMap(psFwHeap, + uiSize, + uiAlign, + uiFlags, + pszText, + ppsMemDescPtr, + &sTmpDevVAddr); + + PVR_DPF_RETURN_RC(eError); +} + +static INLINE PVRSRV_ERROR DevmemFwAllocateExportable(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_ALIGN_T uiAlign, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + IMG_DEV_VIRTADDR sTmpDevVAddr; + PVRSRV_ERROR eError; + DEVMEM_HEAP *psFwHeap; + + PVR_DPF_ENTERED; + + /* Enforce the standard pre-fix naming scheme callers must follow */ + PVR_ASSERT((pszText != NULL) && + (pszText[0] == 'F') && (pszText[1] == 'w') && + (pszText[2] == 'E') && (pszText[3] == 'x')); + + /* Imported from AppHint , flag to poison allocations when freed */ + uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag; + + eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); + if (eError != PVRSRV_OK) + { + PVR_DPF_RETURN_RC(eError); + } + + eError = DevmemAllocateExportable(psDeviceNode, + uiSize, + uiAlign, + DevmemGetHeapLog2PageSize(psFwHeap), + uiFlags, + pszText, + ppsMemDescPtr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FW DevmemAllocateExportable failed (%u)", eError)); + PVR_DPF_RETURN_RC(eError); + } + + /* + We need to map it so the heap for this allocation + is set + */ + eError = DevmemMapToDevice(*ppsMemDescPtr, + psDevInfo->psFirmwareMainHeap, + &sTmpDevVAddr); + if (eError != PVRSRV_OK) + { + DevmemFree(*ppsMemDescPtr); + PVR_DPF((PVR_DBG_ERROR, "FW DevmemMapToDevice failed (%u)", eError)); + } + + PVR_DPF_RETURN_RC1(eError, *ppsMemDescPtr); +} + +static INLINE PVRSRV_ERROR DevmemFwAllocateSparse(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_DEVMEM_SIZE_T uiSize, + IMG_DEVMEM_SIZE_T uiChunkSize, + IMG_UINT32 ui32NumPhysChunks, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 *pui32MappingTable, + PVRSRV_MEMALLOCFLAGS_T uiFlags, + const IMG_CHAR *pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + IMG_DEV_VIRTADDR sTmpDevVAddr; + PVRSRV_ERROR eError; + DEVMEM_HEAP *psFwHeap; + IMG_UINT32 ui32Align; + + PVR_DPF_ENTERED; + + /* Enforce the standard pre-fix naming scheme callers must follow */ + PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w')); + ui32Align = GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)); + + /* Imported from AppHint , flag to poison allocations when freed */ + uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag; + + eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); + if (eError != PVRSRV_OK) + { + PVR_DPF_RETURN_RC(eError); + } + + eError = DevmemAllocateSparse(psDevInfo->psDeviceNode, + uiSize, + uiChunkSize, + ui32NumPhysChunks, + ui32NumVirtChunks, + pui32MappingTable, + ui32Align, + DevmemGetHeapLog2PageSize(psFwHeap), + uiFlags | PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING, + pszText, + ppsMemDescPtr); + if (eError != PVRSRV_OK) + { + PVR_DPF_RETURN_RC(eError); + } + /* + We need to map it so the heap for this allocation + is set + */ + eError = DevmemMapToDevice(*ppsMemDescPtr, + psFwHeap, + &sTmpDevVAddr); + if (eError != PVRSRV_OK) + { + DevmemFree(*ppsMemDescPtr); + PVR_DPF_RETURN_RC(eError); + } + + PVR_DPF_RETURN_RC(eError); +} + + +static INLINE void DevmemFwUnmapAndFree(PVRSRV_RGXDEV_INFO *psDevInfo, + DEVMEM_MEMDESC *psMemDesc) +{ + PVR_DPF_ENTERED1(psMemDesc); + + DevmemReleaseDevVirtAddr(psMemDesc); + DevmemFree(psMemDesc); + + PVR_DPF_RETURN; +} + + +/* + * This function returns the value of the hardware register RGX_CR_TIMER + * which is a timer counting in ticks. + */ + +static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT64 ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER); + + /* + * In order to avoid having to issue three 32-bit reads to detect the + * lower 32-bits wrapping, the MSB of the low 32-bit word is duplicated + * in the MSB of the high 32-bit word. If the wrap happens, we just read + * the register again (it will not wrap again so soon). + */ + if ((ui64Time ^ (ui64Time << 32)) & ~RGX_CR_TIMER_BIT31_CLRMSK) + { + ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER); + } + + return (ui64Time & ~RGX_CR_TIMER_VALUE_CLRMSK) >> RGX_CR_TIMER_VALUE_SHIFT; +} + +/* + * This FW Common Context is only mapped into kernel for initialisation and cleanup purposes. + * Otherwise this allocation is only used by the FW. + * Therefore the GPU cache doesn't need coherency, and write-combine will + * suffice on the CPU side (WC buffer will be flushed at the first kick) + */ +#define RGX_FWCOMCTX_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)| \ + PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | \ + PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) + +#define RGX_FWSHAREDMEM_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ + PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ + PVRSRV_MEMALLOCFLAG_UNCACHED | \ + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) + +#define RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS (RGX_FWSHAREDMEM_ALLOCFLAGS | \ + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) + +#define RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ + PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ + PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED | \ + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CONFIG)) +/* + * This FW Init Data is initialised on the CPU and then passed to the FW. We need + * to make the CPU mapping write-combined to avoid CPU-specific alignment issues + * for device memory. + */ +#define RGX_FWINITDATA_WC_ALLOCFLAGS ((RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & (~PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)) | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC) + +#define RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ + PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ + PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) + +/* Firmware shared memory that is supposed to be read-only to the CPU. + * In reality it isn't due to ZERO_ON_ALLOC which enforces CPU_WRITEABLE + * flag on the allocations. */ +#define RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) | \ + PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ + PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ + PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) + +/* data content being kept from previous boot cycles from physical memory must not be cleared during allocation */ +#define RGX_AUTOVZ_KEEP_FW_DATA_MASK(bKeepMem) ((bKeepMem) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL)) + +/****************************************************************************** + * RGXSetFirmwareAddress Flags + *****************************************************************************/ +#define RFW_FWADDR_FLAG_NONE (0) /*!< Void flag */ +#define RFW_FWADDR_NOREF_FLAG (1U << 0) /*!< It is safe to immediately release the reference to the pointer, + otherwise RGXUnsetFirmwareAddress() must be call when finished. */ + +IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo); +PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, PVRSRV_MEMALLOCFLAGS_T uiAllocFlags); + +#if defined(SUPPORT_POWMON_COMPONENT) +IMG_BOOL RGXPowmonBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo); +PVRSRV_ERROR RGXPowmonBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo); +#endif + +#if defined(SUPPORT_TBI_INTERFACE) +IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo); +PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo); +#endif + +PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bEnableSignatureChecks, + IMG_UINT32 ui32SignatureChecksBufSize, + IMG_UINT32 ui32HWPerfFWBufSizeKB, + IMG_UINT64 ui64HWPerfFilter, + IMG_UINT32 ui32ConfigFlags, + IMG_UINT32 ui32ConfigFlagsExt, + IMG_UINT32 ui32FwOsCfgFlags, + IMG_UINT32 ui32LogType, + IMG_UINT32 ui32FilterFlags, + IMG_UINT32 ui32JonesDisableMask, + IMG_UINT32 ui32HWRDebugDumpLimit, + IMG_UINT32 ui32HWPerfCountersDataSize, + IMG_UINT32 ui32RenderKillingCtl, + IMG_UINT32 ui32CDMTDMKillingCtl, + IMG_UINT32 *pui32TPUTrilinearFracMask, + IMG_UINT32 *pui32USRMNumRegions, + IMG_UINT64 *pui64UVBRMNumRegions, + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, + FW_PERF_CONF eFirmwarePerf, + IMG_UINT32 ui32AvailableSPUMask); + + + +void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*************************************************************************/ /*! +@Function RGXSetupFwAllocation + +@Description Sets a pointer in a firmware data structure. + +@Input psDevInfo Device Info struct +@Input uiAllocFlags Flags determining type of memory allocation +@Input ui32Size Size of memory allocation +@Input pszName Allocation label +@Input psFwPtr Address of the firmware pointer to set +@Input ppvCpuPtr Address of the cpu pointer to set +@Input ui32DevVAFlags Any combination of RFW_FWADDR_*_FLAG + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXSetupFwAllocation(PVRSRV_RGXDEV_INFO *psDevInfo, + PVRSRV_MEMALLOCFLAGS_T uiAllocFlags, + IMG_UINT32 ui32Size, + const IMG_CHAR *pszName, + DEVMEM_MEMDESC **ppsMemDesc, + RGXFWIF_DEV_VIRTADDR *psFwPtr, + void **ppvCpuPtr, + IMG_UINT32 ui32DevVAFlags); + +/*************************************************************************/ /*! +@Function RGXSetFirmwareAddress + +@Description Sets a pointer in a firmware data structure. + +@Input ppDest Address of the pointer to set +@Input psSrc MemDesc describing the pointer +@Input ui32Flags Any combination of RFW_FWADDR_*_FLAG + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest, + DEVMEM_MEMDESC *psSrc, + IMG_UINT32 uiOffset, + IMG_UINT32 ui32Flags); + + +/*************************************************************************/ /*! +@Function RGXSetMetaDMAAddress + +@Description Fills a Firmware structure used to setup the Meta DMA with two + pointers to the same data, one on 40 bit and one on 32 bit + (pointer in the FW memory space). + +@Input ppDest Address of the structure to set +@Input psSrcMemDesc MemDesc describing the pointer +@Input psSrcFWDevVAddr Firmware memory space pointer + +@Return void +*/ /**************************************************************************/ +void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR *psDest, + DEVMEM_MEMDESC *psSrcMemDesc, + RGXFWIF_DEV_VIRTADDR *psSrcFWDevVAddr, + IMG_UINT32 uiOffset); + + +/*************************************************************************/ /*! +@Function RGXUnsetFirmwareAddress + +@Description Unsets a pointer in a firmware data structure + +@Input psSrc MemDesc describing the pointer + +@Return void +*/ /**************************************************************************/ +void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc); + +PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue); +PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32* ui32RegValue); + +/*************************************************************************/ /*! +@Function FWCommonContextAllocate + +@Description Allocate a FW common context. This allocates the HW memory + for the context, the CCB and wires it all together. + +@Input psConnection Connection this context is being created on +@Input psDeviceNode Device node to create the FW context on + (must be RGX device node) +@Input eRGXCCBRequestor RGX_CCB_REQUESTOR_TYPE enum constant which + which represents the requestor of this FWCC +@Input eDM Data Master type +@Input psServerMMUContext Server MMU memory context. +@Input psAllocatedMemDesc Pointer to pre-allocated MemDesc to use + as the FW context or NULL if this function + should allocate it +@Input ui32AllocatedOffset Offset into pre-allocate MemDesc to use + as the FW context. If psAllocatedMemDesc + is NULL then this parameter is ignored +@Input psFWMemContextMemDesc MemDesc of the FW memory context this + common context resides on +@Input psContextStateMemDesc FW context state (context switch) MemDesc +@Input ui32CCBAllocSizeLog2 Size of the CCB for this context +@Input ui32CCBMaxAllocSizeLog2 Maximum size to which CCB can grow for this context +@Input ui32ContextFlags Flags which specify properties of the context +@Input ui32Priority Priority of the context +@Input ui32MaxDeadlineMS Max deadline limit in MS that the workload can run +@Input ui64RobustnessAddress Address for FW to signal a context reset +@Input psInfo Structure that contains extra info + required for the creation of the context + (elements might change from core to core) +@Return PVRSRV_OK if the context was successfully created +*/ /**************************************************************************/ +PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, + RGXFWIF_DM eDM, + SERVER_MMU_CONTEXT *psServerMMUContext, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + DEVMEM_MEMDESC *psContextStateMemDesc, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext); + +void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +SERVER_MMU_CONTEXT *FWCommonContextGetServerMMUCtx(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +RGX_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + IMG_UINT32 *pui32LastResetJobRef); + +PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); + +PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo, + SERVER_MMU_CONTEXT *psServerMMUContext, + PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr); + +PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + IMG_UINT32 ui32ContextFlags); + +/*! +******************************************************************************* +@Function RGXScheduleProcessQueuesKM + +@Description Software command complete handler + (sends uncounted kicks for all the DMs through the MISR) + +@Input hCmdCompHandle RGX device node + +@Return None +******************************************************************************/ +void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle); + +#if defined(SUPPORT_VALIDATION) +/*! +******************************************************************************* +@Function RGXScheduleRgxRegCommand + +@Input psDevInfo Device Info struct +@Input ui64RegVal Value to write into FW register +@Input ui64Size Register size +@Input ui32Offset Register Offset +@Input bWriteOp Register Write or Read toggle + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXScheduleRgxRegCommand(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT64 ui64RegVal, + IMG_UINT64 ui64Size, + IMG_UINT32 ui32Offset, + IMG_BOOL bWriteOp); + +#endif + +/*! +******************************************************************************* + +@Function RGXInstallProcessQueuesMISR + +@Description Installs the MISR to handle Process Queues operations + +@Input phMISR Pointer to the MISR handler +@Input psDeviceNode RGX Device node + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode); + +PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll); + +/*************************************************************************/ /*! +@Function RGXSendCommandWithPowLockAndGetKCCBSlot + +@Description Sends a command to a particular DM without honouring + pending cache operations but taking the power lock. + +@Input psDevInfo Device Info +@Input psKCCBCmd The cmd to send. +@Input ui32PDumpFlags Pdump flags +@Output pui32CmdKCCBSlot When non-NULL: + - Pointer on return contains the kCCB slot + number in which the command was enqueued. + - Resets the value of the allotted slot to + RGXFWIF_KCCB_RTN_SLOT_RST +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXSendCommandWithPowLockAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot); + +#define RGXSendCommandWithPowLock(psDevInfo, psKCCBCmd, ui32PDumpFlags) \ + RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, NULL) + +/*************************************************************************/ /*! +@Function RGXSendCommandAndGetKCCBSlot + +@Description Sends a command to a particular DM without honouring + pending cache operations or the power lock. + The function flushes any deferred KCCB commands first. + +@Input psDevInfo Device Info +@Input psKCCBCmd The cmd to send. +@Input uiPdumpFlags PDump flags. +@Output pui32CmdKCCBSlot When non-NULL: + - Pointer on return contains the kCCB slot + number in which the command was enqueued. + - Resets the value of the allotted slot to + RGXFWIF_KCCB_RTN_SLOT_RST +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_KCCB_CMD *psKCCBCmd, + PDUMP_FLAGS_T uiPdumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot); + +#define RGXSendCommand(psDevInfo, psKCCBCmd, ui32PDumpFlags) \ + RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, NULL) + +/*************************************************************************/ /*! +@Function RGXScheduleCommandAndGetKCCBSlot + +@Description Sends a command to a particular DM and kicks the firmware but + first schedules any commands which have to happen before + handle + +@Input psDevInfo Device Info +@Input eDM To which DM the cmd is sent. +@Input psKCCBCmd The cmd to send. +@Input ui32CacheOpFence Pending cache op. fence value. +@Input ui32PDumpFlags PDump flags +@Output pui32CmdKCCBSlot When non-NULL: + - Pointer on return contains the kCCB slot + number in which the command was enqueued. + - Resets the value of the allotted slot to + RGXFWIF_KCCB_RTN_SLOT_RST + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_DM eKCCBType, + RGXFWIF_KCCB_CMD *psKCCBCmd, + IMG_UINT32 ui32CacheOpFence, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 *pui32CmdKCCBSlot); +#define RGXScheduleCommand(psDevInfo, eKCCBType, psKCCBCmd, ui32CacheOpFence, ui32PDumpFlags) \ + RGXScheduleCommandAndGetKCCBSlot(psDevInfo, eKCCBType, psKCCBCmd, ui32CacheOpFence, ui32PDumpFlags, NULL) + +/*************************************************************************/ /*! +@Function RGXWaitForKCCBSlotUpdate + +@Description Waits until the required kCCB slot value is updated by the FW + (signifies command completion). Additionally, dumps a relevant + PDump poll command. + +@Input psDevInfo Device Info +@Input ui32SlotNum The kCCB slot number to wait for an update on +@Input ui32PDumpFlags + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32SlotNum, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR RGXFirmwareUnittests(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*************************************************************************/ /*! +@Function PVRSRVRGXFrameworkCopyCommand + +@Description Copy framework command into FW addressable buffer + +@param psFWFrameworkMemDesc +@param pbyGPUFRegisterList +@param ui32FrameworkRegisterSize + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(DEVMEM_MEMDESC *psFWFrameworkMemDesc, + IMG_PBYTE pbyGPUFRegisterList, + IMG_UINT32 ui32FrameworkRegisterSize); + + +/*************************************************************************/ /*! +@Function PVRSRVRGXFrameworkCreateKM + +@Description Create FW addressable buffer for framework + +@param psDeviceNode +@param ppsFWFrameworkMemDesc +@param ui32FrameworkRegisterSize + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE * psDeviceNode, + DEVMEM_MEMDESC ** ppsFWFrameworkMemDesc, + IMG_UINT32 ui32FrameworkRegisterSize); + + +/*************************************************************************/ /*! +@Function RGXPollForGPCommandCompletion + +@Description Polls for completion of a submitted GP command. Poll is done + on a value matching a masked read from the address. + +@Input psDevNode Pointer to device node struct +@Input pui32LinMemAddr CPU linear address to poll +@Input ui32Value Required value +@Input ui32Mask Mask + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode, + volatile IMG_UINT32 __iomem *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask); + +/*************************************************************************/ /*! +@Function RGXStateFlagCtrl + +@Description Set and return FW internal state flags. + +@Input psDevInfo Device Info +@Input ui32Config AppHint config flags +@Output pui32State Current AppHint state flag configuration +@Input bSetNotClear Set or clear the provided config flags + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Config, + IMG_UINT32 *pui32State, + IMG_BOOL bSetNotClear); + +/*! +******************************************************************************* +@Function RGXFWRequestCommonContextCleanUp + +@Description Schedules a FW common context cleanup. The firmware doesn't + block waiting for the resource to become idle but rather + notifies the host that the resources is busy. + +@Input psDeviceNode pointer to device node +@Input psServerCommonContext context to be cleaned up +@Input eDM Data master, to which the cleanup command should + be sent +@Input ui32PDumpFlags PDump continuous flag + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, + RGXFWIF_DM eDM, + IMG_UINT32 ui32PDumpFlags); + +/*! +******************************************************************************* +@Function RGXFWRequestHWRTDataCleanUp + +@Description Schedules a FW HWRTData memory cleanup. The firmware doesn't + block waiting for the resource to become idle but rather + notifies the host that the resources is busy. + +@Input psDeviceNode pointer to device node +@Input psHWRTData firmware address of the HWRTData for clean-up + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, + PRGXFWIF_HWRTDATA psHWRTData); + +/*! +******************************************************************************* +@Function RGXFWRequestFreeListCleanUp + +@Description Schedules a FW FreeList cleanup. The firmware doesn't block + waiting for the resource to become idle but rather notifies the + host that the resources is busy. + +@Input psDeviceNode pointer to device node +@Input psFWFreeList firmware address of the FreeList for clean-up + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDeviceNode, + PRGXFWIF_FREELIST psFWFreeList); + +/*! +******************************************************************************* +@Function RGXFWRequestZSBufferCleanUp + +@Description Schedules a FW ZS Buffer cleanup. The firmware doesn't block + waiting for the resource to become idle but rather notifies the + host that the resources is busy. + +@Input psDevInfo pointer to device node +@Input psFWZSBuffer firmware address of the ZS Buffer for clean-up + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, + PRGXFWIF_ZSBUFFER psFWZSBuffer); + +PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext, + CONNECTION_DATA *psConnection, + PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Priority, + RGXFWIF_DM eDM); + +/*! +******************************************************************************* +@Function RGXFWSetHCSDeadline + +@Description Requests the Firmware to set a new Hard Context Switch timeout + deadline. Context switches that surpass that deadline cause the + system to kill the currently running workloads. + +@Input psDeviceNode pointer to device node +@Input ui32HCSDeadlineMs The deadline in milliseconds. + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32HCSDeadlineMs); + +/*! +******************************************************************************* +@Function RGXFWChangeOSidPriority + +@Description Requests the Firmware to change the priority of an operating + system. Higher priority number equals higher priority on the + scheduling system. + +@Input psDevInfo pointer to device info +@Input ui32OSid The OSid whose priority is to be altered +@Input ui32Priority The new priority number for the specified OSid + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32OSid, + IMG_UINT32 ui32Priority); + +/*! +******************************************************************************* +@Function RGXFWHealthCheckCmd + +@Description Ping the firmware to check if it is responsive. + +@Input psDevInfo pointer to device info + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* +@Function RGXFWSetFwOsState + +@Description Requests the Firmware to change the guest OS Online states. + This should be initiated by the VMM when a guest VM comes + online or goes offline. If offline, the FW offloads any current + resource from that OSID. The request is repeated until the FW + has had time to free all the resources or has waited for + workloads to finish. + +@Input psDevInfo pointer to device info +@Input ui32OSid The Guest OSid whose state is being altered +@Input eOSOnlineState The new state (Online or Offline) + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32OSid, + RGXFWIF_OS_STATE_CHANGE eOSOnlineState); + +#if defined(SUPPORT_AUTOVZ) +/*! +******************************************************************************* +@Function RGXUpdateAutoVzWdgToken + +@Description If the driver-firmware connection is active, read the + firmware's watchdog token and copy its value back into the OS + token. This indicates to the firmware that this driver is alive + and responsive. + +@Input psDevInfo pointer to device info +******************************************************************************/ +void RGXUpdateAutoVzWdgToken(PVRSRV_RGXDEV_INFO *psDevInfo); +#endif + +/*! +******************************************************************************* +@Function RGXFWConfigPHR + +@Description Configure the Periodic Hardware Reset functionality + +@Input psDevInfo pointer to device info +@Input ui32PHRMode desired PHR mode + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PHRMode); +/*! +******************************************************************************* +@Function RGXFWConfigWdg + +@Description Configure the Safety watchdog trigger period + +@Input psDevInfo pointer to device info +@Input ui32WdgPeriodUs requested period in microseconds + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXFWConfigWdg(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32WdgPeriod); + +/*! +******************************************************************************* +@Function RGXReadMETAAddr + +@Description Reads a value at given address in META memory space + (it can be either a memory location or a META register) + +@Input psDevInfo pointer to device info +@Input ui32METAAddr address in META memory space + +@Output pui32Value value + +@Return PVRSRV_ERROR +******************************************************************************/ + +PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32METAAddr, + IMG_UINT32 *pui32Value); + +/*! +******************************************************************************* +@Function RGXWriteMETAAddr + +@Description Write a value to the given address in META memory space + (it can be either a memory location or a META register) + +@Input psDevInfo pointer to device info +@Input ui32METAAddr address in META memory space +@Input ui32Value Value to write to address in META memory space + +@Return PVRSRV_ERROR +******************************************************************************/ + +PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32METAAddr, + IMG_UINT32 ui32Value); + +/*! +******************************************************************************* +@Function RGXCheckFirmwareCCB + +@Description Processes all commands that are found in the Firmware CCB. + +@Input psDevInfo pointer to device + +@Return None +******************************************************************************/ +void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* +@Function RGXCheckForStalledClientContexts + +@Description Checks all client contexts, for the device with device info + provided, to see if any are waiting for a fence to signal and + optionally force signalling of the fence for the context which + has been waiting the longest. + This function is called by RGXUpdateHealthStatus() and also + may be invoked from other trigger points. + +@Input psDevInfo pointer to device info +@Input bIgnorePrevious If IMG_TRUE, any stalled contexts will be + indicated immediately, rather than only + checking against any previous stalled contexts + +@Return None +******************************************************************************/ +void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious); + +/*! +******************************************************************************* +@Function RGXUpdateHealthStatus + +@Description Tests a number of conditions which might indicate a fatal error + has occurred in the firmware. The result is stored in the + device node eHealthStatus. + +@Input psDevNode Pointer to device node structure. +@Input bCheckAfterTimePassed When TRUE, the function will also test + for firmware queues and polls not changing + since the previous test. + + Note: if not enough time has passed since the + last call, false positives may occur. + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, + IMG_BOOL bCheckAfterTimePassed); + +#if defined(SUPPORT_AUTOVZ) +/*! +******************************************************************************* +@Function RGXUpdateAutoVzWatchdog + +@Description Updates AutoVz watchdog that maintains the fw-driver connection + +@Input psDevNode Pointer to device node structure. +******************************************************************************/ +void RGXUpdateAutoVzWatchdog(PVRSRV_DEVICE_NODE* psDevNode); +#endif /* SUPPORT_AUTOVZ */ + +PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM); + +void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +/*! +******************************************************************************* +@Function AttachKickResourcesCleanupCtls + +@Description Attaches the cleanup structures to a kick command so that + submission reference counting can be performed when the + firmware processes the command + +@Output apsCleanupCtl Array of CleanupCtl structure pointers to populate. +@Output pui32NumCleanupCtl Number of CleanupCtl structure pointers written out. +@Input eDM Which data master is the subject of the command. +@Input bKick TRUE if the client originally wanted to kick this DM. +@Input psRTDataCleanup Optional RTData cleanup associated with the command. +@Input psZBuffer Optional ZSBuffer associated with the command. + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl, + IMG_UINT32 *pui32NumCleanupCtl, + RGXFWIF_DM eDM, + IMG_BOOL bKick, + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, + RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_ZSBUFFER_DATA *psMSAAScratchBuffer); + +/*! +******************************************************************************* +@Function RGXResetHWRLogs + +@Description Resets the HWR Logs buffer + (the hardware recovery count is not reset) + +@Input psDevNode Pointer to the device + +@Return PVRSRV_ERROR PVRSRV_OK on success. + Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode); + +/*! +******************************************************************************* +@Function RGXGetPhyAddr + +@Description Get the physical address of a PMR at an offset within it + +@Input psPMR PMR of the allocation +@Input ui32LogicalOffset Logical offset + +@Output psPhyAddr Physical address of the allocation + +@Return PVRSRV_ERROR PVRSRV_OK on success. + Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR, + IMG_DEV_PHYADDR *psPhyAddr, + IMG_UINT32 ui32LogicalOffset, + IMG_UINT32 ui32Log2PageSize, + IMG_UINT32 ui32NumOfPages, + IMG_BOOL *bValid); + +#if defined(PDUMP) +/*! +******************************************************************************* +@Function RGXPdumpDrainKCCB + +@Description Wait for the firmware to execute all the commands in the kCCB + +@Input psDevInfo Pointer to the device +@Input ui32WriteOffset Woff we have to POL for the Roff to be equal to + +@Return PVRSRV_ERROR PVRSRV_OK on success. + Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32WriteOffset); +#endif /* PDUMP */ + +/*! +******************************************************************************* +@Function RGXFwRawHeapAllocMap + +@Description Register and maps to device, a raw firmware physheap + +@Return PVRSRV_ERROR PVRSRV_OK on success. + Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSID, + IMG_DEV_PHYADDR sDevPAddr, + IMG_UINT64 ui64DevPSize); + +/*! +******************************************************************************* +@Function RGXFwRawHeapUnmapFree + +@Description Unregister and unmap from device, a raw firmware physheap + +******************************************************************************/ +void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32OSID); + +/*! +******************************************************************************* +@Function RGXRiscvHalt + +@Description Halt the RISC-V FW core (required for certain operations + done through Debug Module) + +@Input psDevInfo Pointer to device info + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvHalt(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* +@Function RGXRiscvResume + +@Description Resume the RISC-V FW core + +@Input psDevInfo Pointer to device info + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvResume(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* +@Function RGXRiscvReadReg + +@Description Read a value from the given RISC-V register (GPR or CSR) + +@Input psDevInfo Pointer to device info +@Input ui32RegAddr RISC-V register address + +@Output pui32Value Read value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvReadReg(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 *pui32Value); + +/*! +******************************************************************************* +@Function RGXRiscvPollReg + +@Description Poll for a value from the given RISC-V register (GPR or CSR) + +@Input psDevInfo Pointer to device info +@Input ui32RegAddr RISC-V register address +@Input ui32Value Expected value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvPollReg(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32Value); + +/*! +******************************************************************************* +@Function RGXRiscvWriteReg + +@Description Write a value to the given RISC-V register (GPR or CSR) + +@Input psDevInfo Pointer to device info +@Input ui32RegAddr RISC-V register address +@Input ui32Value Write value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvWriteReg(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32Value); + +/*! +******************************************************************************* +@Function RGXRiscvReadMem + +@Description Read a value at the given address in RISC-V memory space + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space + +@Output pui32Value Read value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvReadMem(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Addr, + IMG_UINT32 *pui32Value); + +/*! +******************************************************************************* +@Function RGXRiscvPollMem + +@Description Poll for a value at the given address in RISC-V memory space + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space +@Input ui32Value Expected value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvPollMem(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Addr, + IMG_UINT32 ui32Value); + +/*! +******************************************************************************* +@Function RGXRiscvWriteMem + +@Description Write a value to the given address in RISC-V memory space + +@Input psDevInfo Pointer to device info +@Input ui32Addr Address in RISC-V memory space +@Input ui32Value Write value + +@Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RGXRiscvWriteMem(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32MemAddr, + IMG_UINT32 ui32Value); + +#if defined(SUPPORT_AUTOVZ_HW_REGS) && !defined(SUPPORT_AUTOVZ) +#error "VZ build configuration error: use of OS scratch registers supported only in AutoVz drivers." +#endif + +#if defined(SUPPORT_AUTOVZ_HW_REGS) +/* AutoVz with hw support */ +#define KM_GET_FW_CONNECTION(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH3) +#define KM_GET_OS_CONNECTION(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH2) +#define KM_SET_OS_CONNECTION(val, psDevInfo) OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH2, RGXFW_CONNECTION_OS_##val) + +#define KM_GET_FW_ALIVE_TOKEN(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH1) +#define KM_GET_OS_ALIVE_TOKEN(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0) +#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo) OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0, val) +#else + +#if defined(SUPPORT_AUTOVZ) +#define KM_GET_FW_ALIVE_TOKEN(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveFwToken) +#define KM_GET_OS_ALIVE_TOKEN(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken) +#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo) OSWriteDeviceMem32WithWMB(&psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken, val) +#endif /* defined(SUPPORT_AUTOVZ) */ + +#if !defined(NO_HARDWARE) && (defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1))) +/* native, static-vz and AutoVz using shared memory */ +#define KM_GET_FW_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionFwState) +#define KM_GET_OS_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState) +#define KM_SET_OS_CONNECTION(val, psDevInfo) OSWriteDeviceMem32WithWMB(&psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState, RGXFW_CONNECTION_OS_##val) +#else +/* dynamic-vz & nohw */ +#define KM_GET_FW_CONNECTION(psDevInfo) (RGXFW_CONNECTION_FW_ACTIVE) +#define KM_GET_OS_CONNECTION(psDevInfo) (RGXFW_CONNECTION_OS_ACTIVE) +#define KM_SET_OS_CONNECTION(val, psDevInfo) +#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (RGX_NUM_OS_SUPPORTED == 1) */ +#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */ + +#if defined(SUPPORT_AUTOVZ) +#define RGX_FIRST_RAW_HEAP_OSID RGXFW_HOST_OS +#else +#define RGX_FIRST_RAW_HEAP_OSID RGXFW_GUEST_OSID_START +#endif + +#define KM_OS_CONNECTION_IS(val, psDevInfo) (KM_GET_OS_CONNECTION(psDevInfo) == RGXFW_CONNECTION_OS_##val) +#define KM_FW_CONNECTION_IS(val, psDevInfo) (KM_GET_FW_CONNECTION(psDevInfo) == RGXFW_CONNECTION_FW_##val) + +#endif /* RGXFWUTILS_H */ +/****************************************************************************** + End of file (rgxfwutils.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxheapconfig.h b/drivers/gpu/drm/phytium/octopus/rgxheapconfig.h new file mode 100644 index 000000000000..21e929ead85b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxheapconfig.h @@ -0,0 +1,278 @@ +/*************************************************************************/ /*! +@File +@Title RGX Device virtual memory map +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Memory heaps device specific configuration +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXHEAPCONFIG_H +#define RGXHEAPCONFIG_H + +#include "rgxdefs_km.h" + + +#define RGX_HEAP_SIZE_4KiB IMG_UINT64_C(0x0000001000) +#define RGX_HEAP_SIZE_64KiB IMG_UINT64_C(0x0000010000) +#define RGX_HEAP_SIZE_256KiB IMG_UINT64_C(0x0000040000) + +#define RGX_HEAP_SIZE_1MiB IMG_UINT64_C(0x0000100000) +#define RGX_HEAP_SIZE_2MiB IMG_UINT64_C(0x0000200000) +#define RGX_HEAP_SIZE_4MiB IMG_UINT64_C(0x0000400000) +#define RGX_HEAP_SIZE_16MiB IMG_UINT64_C(0x0001000000) +#define RGX_HEAP_SIZE_256MiB IMG_UINT64_C(0x0010000000) + +#define RGX_HEAP_SIZE_1GiB IMG_UINT64_C(0x0040000000) +#define RGX_HEAP_SIZE_2GiB IMG_UINT64_C(0x0080000000) +#define RGX_HEAP_SIZE_4GiB IMG_UINT64_C(0x0100000000) +#define RGX_HEAP_SIZE_16GiB IMG_UINT64_C(0x0400000000) +#define RGX_HEAP_SIZE_32GiB IMG_UINT64_C(0x0800000000) +#define RGX_HEAP_SIZE_64GiB IMG_UINT64_C(0x1000000000) +#define RGX_HEAP_SIZE_128GiB IMG_UINT64_C(0x2000000000) +#define RGX_HEAP_SIZE_256GiB IMG_UINT64_C(0x4000000000) +#define RGX_HEAP_SIZE_512GiB IMG_UINT64_C(0x8000000000) + +/* + RGX Device Virtual Address Space Definitions + + NOTES: + Base addresses have to be a multiple of 4MiB + + This file defines the RGX virtual address heaps that are used in + application memory contexts. It also shows where the Firmware memory heap + fits into this, but the firmware heap is only ever created in the + Services KM/server component. + + RGX_PDSCODEDATA_HEAP_BASE and RGX_USCCODE_HEAP_BASE will be programmed, + on a global basis, into RGX_CR_PDS_EXEC_BASE and RGX_CR_USC_CODE_BASE_* + respectively. Therefore if clients use multiple configs they must still + be consistent with their definitions for these heaps. + + Shared virtual memory (GENERAL_SVM) support requires half of the address + space (512 GiB) be reserved for SVM allocations to mirror application CPU + addresses. + + The GENERAL non-SVM region is 512 GiB to 768 GiB and is shared between the + general (4KiB) heap and the general non-4K heap. The first 128 GiB is used + for the GENERAL_HEAP (4KiB) and the last 32 GiB is used for the + GENERAL_NON4K_HEAP. This heap has a default page-size of 16K. + AppHint PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE can be used to forced it + to these values: 4K,64K,256K,1M,2M. + + Heaps must not start at 0x0000000000, as this is reserved for internal + use within device memory layer. + Range comments, those starting in column 0 below are a section heading of + sorts and are above the heaps in that range. Often this is the reserved + size of the heap within the range. +*/ + + +/* 0x00_0000_0000 ************************************************************/ + +/* 0x00_0000_0000 - 0x00_0040_0000 **/ + /* 0 MiB to 4 MiB, size of 4 MiB : RESERVED **/ + +/* 0x00_0040_0000 - 0x7F_FFC0_0000 **/ + /* 4 MiB to 512 GiB, size of 512 GiB less 4 MiB : GENERAL_SVM_HEAP **/ + #define RGX_GENERAL_SVM_HEAP_BASE IMG_UINT64_C(0x0000400000) + #define RGX_GENERAL_SVM_HEAP_SIZE (RGX_HEAP_SIZE_512GiB - RGX_HEAP_SIZE_4MiB) + + +/* 0x80_0000_0000 ************************************************************/ + +/* 0x80_0000_0000 - 0x9F_FFFF_FFFF **/ + /* 512 GiB to 640 GiB, size of 128 GiB : GENERAL_HEAP **/ + #define RGX_GENERAL_HEAP_BASE IMG_UINT64_C(0x8000000000) + #define RGX_GENERAL_HEAP_SIZE RGX_HEAP_SIZE_128GiB + +/* 0xA0_0000_0000 - 0xAF_FFFF_FFFF **/ + /* 640 GiB to 704 GiB, size of 64 GiB : FREE **/ + +/* 0xB0_0000_0000 - 0xB1_FFFF_FFFF **/ + /* 704 GiB to 720 GiB, size of 16 GiB : RESERVED ROGUE **/ + +/* B4_0000_0000 - 0xB7_FFFF_FFFF **/ + /* 720 GiB to 736 GiB, size of 16 GiB : FREE **/ + +/* 0xB8_0000_0000 - 0xBF_FFFF_FFFF **/ + /* 736 GiB to 768 GiB, size of 32 GiB : GENERAL_NON4K_HEAP **/ + #define RGX_GENERAL_NON4K_HEAP_BASE IMG_UINT64_C(0xB800000000) + #define RGX_GENERAL_NON4K_HEAP_SIZE RGX_HEAP_SIZE_32GiB + + +/* 0xC0_0000_0000 ************************************************************/ + +/* 0xC0_0000_0000 - 0xD9_FFFF_FFFF **/ + /* 768 GiB to 872 GiB, size of 104 GiB : FREE **/ + +/* 0xDA_0000_0000 - 0xDA_FFFF_FFFF **/ + /* 872 GiB to 876 GiB, size of 4 GiB : PDSCODEDATA_HEAP **/ + #define RGX_PDSCODEDATA_HEAP_BASE IMG_UINT64_C(0xDA00000000) + #define RGX_PDSCODEDATA_HEAP_SIZE RGX_HEAP_SIZE_4GiB + +/* 0xDB_0000_0000 - 0xDC_FFFF_FFFF **/ + /* 876 GiB to 884 GiB, size of 8 GiB : RESERVED ROGUE **/ + +/* 0xDD_0000_0000 - 0xDF_FFFF_FFFF **/ + /* 884 GiB to 896 GiB, size of 12 GiB : FREE **/ + +/* 0xE0_0000_0000 - 0xE0_FFFF_FFFF **/ + /* 896 GiB to 900 GiB, size of 4 GiB : USCCODE_HEAP **/ + #define RGX_USCCODE_HEAP_BASE IMG_UINT64_C(0xE000000000) + #define RGX_USCCODE_HEAP_SIZE RGX_HEAP_SIZE_4GiB + +/* 0xE1_0000_0000 - 0xE1_BFFF_FFFF **/ + /* 900 GiB to 903 GiB, size of 3 GiB : RESERVED **/ + +/* 0xE1_C000_000 - 0xE1_FFFF_FFFF **/ + /* 903 GiB to 904 GiB, reserved 1 GiB, : FIRMWARE_HEAP **/ + + /* Firmware heaps defined in rgx_heap_firmware.h as they are not present in + application memory contexts, see: + RGX_FIRMWARE_RAW_HEAP_BASE + RGX_FIRMWARE_RAW_HEAP_SIZE + See header for other sub-heaps details + */ + +/* 0xE2_0000_0000 - 0xE2_FFFF_FFFF **/ + /* 904 GiB to 908 GiB, size of 4GiB : RESERVED ROGUE **/ + +/* 0xE3_0000_0000 - 0xE3_FFFF_FFFF **/ + /* 908 GiB to 912 GiB, size of 4 GiB : FREE **/ + +/* 0xE4_0000_0000 - 0xE7_FFFF_FFFF **/ + /* 912 GiB to 928 GiB, size 16 GiB : RESERVED_ROGUE **/ + +/* 0xE8_0000_0000 - 0xE8_FFFF_FFFF **/ + /* 928 GiB to 932 GiB, size of 4 GiB : FREE **/ + +/* 0xE9_0000_0000 - 0xE9_3FFF_FFFF **/ + /* 932 GiB to 933 GiB, size of 1 GiB : VK_CAPT_REPLAY_HEAP **/ + #define RGX_VK_CAPT_REPLAY_HEAP_BASE IMG_UINT64_C(0xE900000000) + #define RGX_VK_CAPT_REPLAY_HEAP_SIZE RGX_HEAP_SIZE_1GiB + +/* 0xE9_4000_0000 - 0xE9_FFFF_FFFF **/ + /* 933 GiB to 936 GiB, size of 3 GiB : FREE **/ + +/* 0xEA_0000_0000 - 0xEA_0000_0FFF **/ + /* 936 GiB to 937 GiB, size of min heap size : SIGNALS_HEAP **/ + /* CDM Signals heap (31 signals less one reserved for Services). + * Size 960B rounded up to minimum heap size */ + #define RGX_SIGNALS_HEAP_BASE IMG_UINT64_C(0xEA00000000) + #define RGX_SIGNALS_HEAP_SIZE DEVMEM_HEAP_MINIMUM_SIZE + +/* 0xEA_4000_0000 - 0xEA_FFFF_FFFF **/ + /* 937 GiB to 940 GiB, size of 3 GiB : FREE **/ + +/* 0xEB_0000_0000 - 0xEB_FFFF_FFFF **/ + /* 940 GiB to 944 GiB, size 4 GiB : COMPONENT_CTRL_HEAP **/ + #define RGX_COMPONENT_CTRL_HEAP_BASE IMG_UINT64_C(0xEB00000000) + #define RGX_COMPONENT_CTRL_HEAP_SIZE RGX_HEAP_SIZE_4GiB + +/* 0xEC_0000_0000 - 0xEC_001F_FFFF **/ + /* 944 GiB to 945 GiB, size 2 MiB : FBCDC_HEAP **/ + #define RGX_FBCDC_HEAP_BASE IMG_UINT64_C(0xEC00000000) + #define RGX_FBCDC_HEAP_SIZE RGX_HEAP_SIZE_2MiB + +/* 0xEC_4000_0000 - 0xEC_401F_FFFF **/ + /* 945 GiB to 946 GiB, size 2 MiB : FBCDC_LARGE_HEAP **/ + #define RGX_FBCDC_LARGE_HEAP_BASE IMG_UINT64_C(0xEC40000000) + #define RGX_FBCDC_LARGE_HEAP_SIZE RGX_HEAP_SIZE_2MiB + +/* 0xEC_8000_0000 - 0xEC_FFFF_FFFF **/ + /* 946 GiB to 948 GiB, size of 3 GiB : FREE **/ + +/* 0xED_0000_0000 - 0xED_00FF_FFFF */ + /* 948 GiB to 949 GiB, size 16 MiB : PDS_INDIRECT_STATE_HEAP */ + #define RGX_PDS_INDIRECT_STATE_HEAP_BASE IMG_UINT64_C(0xED00000000) + #define RGX_PDS_INDIRECT_STATE_HEAP_SIZE RGX_HEAP_SIZE_16MiB + +/* 0xED_4000_0000 - 0xED_FFFF_FFFF **/ + /* 949 GiB to 952 GiB, size of 3 GiB : FREE **/ + +/* 0xEE_0000_0000 - 0xEE_3FFF_FFFF **/ + /* 952 GiB to 953 GiB, size of 1 GiB : CMP_MISSION_RMW_HEAP **/ + #define RGX_CMP_MISSION_RMW_HEAP_BASE IMG_UINT64_C(0xEE00000000) + #define RGX_CMP_MISSION_RMW_HEAP_SIZE RGX_HEAP_SIZE_1GiB + +/* 0xEE_4000_0000 - 0xEE_FFFF_FFFF **/ + /* 953 GiB to 956 GiB, size of 3 GiB : RESERVED **/ + +/* 0xEF_0000_0000 - 0xEF_3FFF_FFFF **/ + /* 956 GiB to 957 GiB, size of 1 GiB : CMP_SAFETY_RMW_HEAP **/ + #define RGX_CMP_SAFETY_RMW_HEAP_BASE IMG_UINT64_C(0xEF00000000) + #define RGX_CMP_SAFETY_RMW_HEAP_SIZE RGX_HEAP_SIZE_1GiB + +/* 0xEF_4000_0000 - 0xEF_FFFF_FFFF **/ + /* 957 GiB to 960 GiB, size of 3 GiB : RESERVED **/ + +/* 0xF0_0000_0000 - 0xF0_FFFF_FFFF **/ + /* 960 GiB to 964 GiB, size of 4 GiB : TEXTURE_STATE_HEAP (36-bit aligned) */ + #define RGX_TEXTURE_STATE_HEAP_BASE IMG_UINT64_C(0xF000000000) + #define RGX_TEXTURE_STATE_HEAP_SIZE RGX_HEAP_SIZE_4GiB + +/* 0xF1_0000_0000 - 0xF1_FFFF_FFFF **/ + /* 964 GiB to 968 GiB, size of 4 GiB : FREE **/ + +/* 0xF2_0000_0000 - 0xF2_001F_FFFF **/ + /* 968 GiB to 969 GiB, size of 2 MiB : VISIBILITY_TEST_HEAP **/ + #define RGX_VISIBILITY_TEST_HEAP_BASE IMG_UINT64_C(0xF200000000) + #define RGX_VISIBILITY_TEST_HEAP_SIZE RGX_HEAP_SIZE_2MiB + +/* 0xF2_4000_0000 - 0xF2_FFFF_FFFF **/ + /* 969 GiB to 972 GiB, size of 3 GiB : FREE **/ + +/* 0xF3_0000_0000 - 0xF7_FFFF_FFFF **/ + /* 972 GiB to 992 GiB, size of 20 GiB : FREE **/ + +/* 0xF8_0000_0000 - 0xF9_FFFF_FFFF **/ + /* 992 GiB to 1000 GiB, size 8 GiB : RESERVED ROGUE **/ + +/* 0xFA_0000_0000 - 0xFF_FFFF_FFFF **/ + /* 1000 GiB to 1024 GiB, size of 24 GiB : FREE **/ + + +/* 0xFF_FFFF_FFFF ************************************************************/ + +/* End of RGX Device Virtual Address Space definitions */ + +#endif /* RGXHEAPCONFIG_H */ + +/****************************************************************************** + End of file (rgxheapconfig.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxhwperf.c b/drivers/gpu/drm/phytium/octopus/rgxhwperf.c new file mode 100644 index 000000000000..fc5e627ac997 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxhwperf.c @@ -0,0 +1,391 @@ +/*************************************************************************/ /*! +@File +@Title RGX HW Performance implementation +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX HW Performance implementation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +//#define PVR_DPF_FUNCTION_TRACE_ON 1 +#undef PVR_DPF_FUNCTION_TRACE_ON + +#include "img_defs.h" +#include "pvr_debug.h" +#include "rgxdevice.h" +#include "pvrsrv_error.h" +#include "pvr_notifier.h" +#include "osfunc.h" +#include "allocmem.h" + +#include "pvrsrv.h" +#include "pvrsrv_tlstreams.h" +#include "pvrsrv_tlcommon.h" +#include "tlclient.h" +#include "tlstream.h" + +#include "rgxhwperf.h" +#include "rgxapi_km.h" +#include "rgxfwutils.h" +#include "rgxtimecorr.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "pdump_km.h" +#include "pvrsrv_apphint.h" +#include "process_stats.h" +#include "rgx_hwperf_table.h" +#include "rgxinit.h" + +#include "info_page_defs.h" + +/* Maximum enum value to prevent access to RGX_HWPERF_STREAM_ID2_CLIENT stream */ +#define RGX_HWPERF_MAX_STREAM_ID (RGX_HWPERF_STREAM_ID2_CLIENT) + + +IMG_INTERNAL /*static inline*/ IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **); + +static IMG_BOOL RGXServerFeatureFlagsToHWPerfFlagsAddBlock( + RGX_HWPERF_BVNC_BLOCK * const psBlocks, + IMG_UINT16 * const pui16Count, + const IMG_UINT16 ui16BlockID, /* see RGX_HWPERF_CNTBLK_ID */ + const IMG_UINT16 ui16NumCounters, + const IMG_UINT16 ui16NumBlocks) +{ + const IMG_UINT16 ui16Count = *pui16Count; + + if (ui16Count < RGX_HWPERF_MAX_BVNC_BLOCK_LEN) + { + RGX_HWPERF_BVNC_BLOCK * const psBlock = &psBlocks[ui16Count]; + + /* If the GROUP is non-zero, convert from e.g. RGX_CNTBLK_ID_USC0 to + * RGX_CNTBLK_ID_USC_ALL. The table stores the former (plus the + * number of blocks and counters) but PVRScopeServices expects the + * latter (plus the number of blocks and counters). The conversion + * could always be moved to PVRScopeServices, but it's less code this + * way. + * For SLC0 we generate a single SLCBANK_ALL which has NUM_MEMBUS + * instances. + * This replaces the SLC0 .. SLC3 entries. + */ + if ((ui16BlockID == RGX_CNTBLK_ID_SLCBANK0) || (ui16BlockID & RGX_CNTBLK_ID_GROUP_MASK)) + { + psBlock->ui16BlockID = ui16BlockID | RGX_CNTBLK_ID_UNIT_ALL_MASK; + } + else + { + psBlock->ui16BlockID = ui16BlockID; + } + psBlock->ui16NumCounters = ui16NumCounters; + psBlock->ui16NumBlocks = ui16NumBlocks; + + *pui16Count = ui16Count + 1; + return IMG_TRUE; + } + return IMG_FALSE; +} + +PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_HWPERF_BVNC *psBVNC) +{ + IMG_PCHAR pszBVNC; + PVR_LOG_RETURN_IF_FALSE((NULL != psDevInfo), "psDevInfo invalid", PVRSRV_ERROR_INVALID_PARAMS); + + if ((pszBVNC = RGXDevBVNCString(psDevInfo))) + { + size_t uiStringLength = OSStringNLength(pszBVNC, RGX_HWPERF_MAX_BVNC_LEN - 1); + OSStringLCopy(psBVNC->aszBvncString, pszBVNC, uiStringLength + 1); + memset(&psBVNC->aszBvncString[uiStringLength], 0, RGX_HWPERF_MAX_BVNC_LEN - uiStringLength); + } + else + { + *psBVNC->aszBvncString = 0; + } + + psBVNC->ui32BvncKmFeatureFlags = 0x0; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)) + { + psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PERFBUS_FLAG; + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) + { + psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_MULTICORE_FLAG; + } + +#ifdef SUPPORT_WORKLOAD_ESTIMATION + /* Not a part of BVNC feature line and so doesn't need the feature supported check */ + psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION; +#endif + + /* Define the HW counter block counts. */ + { + RGX_HWPERF_BVNC_BLOCK * const psBlocks = psBVNC->aBvncBlocks; + IMG_UINT16 * const pui16Count = &psBVNC->ui16BvncBlocks; + const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel; + const IMG_UINT32 ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel); + IMG_UINT32 ui32BlkCfgIdx; + size_t uiCount; + IMG_BOOL bOk = IMG_TRUE; + + // Initialise to zero blocks + *pui16Count = 0; + + // Add all the blocks + for (ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; ui32BlkCfgIdx++) + { + const RGXFW_HWPERF_CNTBLK_TYPE_MODEL * const psCntBlkInfo = &asCntBlkTypeModel[ui32BlkCfgIdx]; + RGX_HWPERF_CNTBLK_RT_INFO sCntBlkRtInfo; + /* psCntBlkInfo->uiNumUnits gives compile-time info. For BVNC agnosticism, we use this: */ + if (psCntBlkInfo->pfnIsBlkPresent(psCntBlkInfo, psDevInfo, &sCntBlkRtInfo)) + { + IMG_UINT32 uiNumUnits; + + switch (psCntBlkInfo->uiCntBlkIdBase) + { + case RGX_CNTBLK_ID_SLCBANK0: + /* Generate the SLCBANK_ALL block for SLC0..SLC3 + * we have to special-case this as the iteration will + * generate entries starting at SLC0 and we need to + * defer until we are processing the last 'present' + * entry. + * The SLC_BLKID_ALL is keyed from SLC0. Need to access + * the NUM_MEMBUS feature to see how many are physically + * present. + */ + uiNumUnits = RGX_GET_FEATURE_VALUE(psDevInfo, NUM_MEMBUS); + break; + case RGX_CNTBLK_ID_SLCBANK1: + case RGX_CNTBLK_ID_SLCBANK2: + case RGX_CNTBLK_ID_SLCBANK3: + /* These are contained within SLCBANK_ALL block */ + continue; + default: + uiNumUnits = sCntBlkRtInfo.uiNumUnits; + break; + } + bOk &= RGXServerFeatureFlagsToHWPerfFlagsAddBlock(psBlocks, pui16Count, psCntBlkInfo->uiCntBlkIdBase, RGX_CNTBLK_COUNTERS_MAX, uiNumUnits); + } + } + + /* If this fails, consider why the static_assert didn't fail, and consider increasing RGX_HWPERF_MAX_BVNC_BLOCK_LEN */ + PVR_ASSERT(bOk); + + // Zero the remaining entries + uiCount = *pui16Count; + OSDeviceMemSet(&psBlocks[uiCount], 0, (RGX_HWPERF_MAX_BVNC_BLOCK_LEN - uiCount) * sizeof(*psBlocks)); + } + + return PVRSRV_OK; +} + +/* + PVRSRVRGXConfigureHWPerfBlocksKM + */ +PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32CtrlWord, + IMG_UINT32 ui32ArrayLen, + RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sKccbCmd; + DEVMEM_MEMDESC* psFwBlkConfigsMemDesc; + RGX_HWPERF_CONFIG_CNTBLK* psFwArray; + IMG_UINT32 ui32kCCBCommandSlot; + PVRSRV_RGXDEV_INFO *psDevice; + + PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", + PVRSRV_ERROR_INVALID_PARAMS); + + psDevice = psDeviceNode->pvDevice; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + PVR_LOG_RETURN_IF_FALSE(ui32ArrayLen > 0, "ui32ArrayLen is 0", + PVRSRV_ERROR_INVALID_PARAMS); + PVR_LOG_RETURN_IF_FALSE(psBlockConfigs != NULL, "psBlockConfigs is NULL", + PVRSRV_ERROR_INVALID_PARAMS); + + PVR_DPF_ENTERED; + + /* Fill in the command structure with the parameters needed + */ + sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS; + sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.ui32CtrlWord = ui32CtrlWord; + sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.ui32NumBlocks = ui32ArrayLen; + + eError = DevmemFwAllocate(psDevice, + sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen, + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_UNCACHED | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), + "FwHWPerfCountersConfigBlock", + &psFwBlkConfigsMemDesc); + PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); + + eError = RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.sBlockConfigs, + psFwBlkConfigsMemDesc, 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail1); + + eError = DevmemAcquireCpuVirtAddr(psFwBlkConfigsMemDesc, (void **)&psFwArray); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail2); + + OSDeviceMemCopy(psFwArray, psBlockConfigs, sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen); + DevmemPDumpLoadMem(psFwBlkConfigsMemDesc, + 0, + sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen, + PDUMP_FLAGS_CONTINUOUS); + + /* Ask the FW to carry out the HWPerf configuration command + */ + eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, + RGXFWIF_DM_GP, + &sKccbCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", fail2); + + /* Wait for FW to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", fail3); + + /* Release temporary memory used for block configuration + */ + RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc); + DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc); + DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc); + + PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED", ui32ArrayLen)); + + PVR_DPF_RETURN_OK; + +fail3: + DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc); +fail2: + RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc); +fail1: + DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc); + + PVR_DPF_RETURN_RC(eError); +} + +/****************************************************************************** + * Currently only implemented on Linux. Feature can be enabled to provide + * an interface to 3rd-party kernel modules that wish to access the + * HWPerf data. The API is documented in the rgxapi_km.h header and + * the rgx_hwperf* headers. + *****************************************************************************/ + +/* Internal HWPerf kernel connection/device data object to track the state + * of a client session. + */ +typedef struct +{ + PVRSRV_DEVICE_NODE* psRgxDevNode; + PVRSRV_RGXDEV_INFO* psRgxDevInfo; + + /* TL Open/close state */ + IMG_HANDLE hSD[RGX_HWPERF_MAX_STREAM_ID]; + + /* TL Acquire/release state */ + IMG_PBYTE pHwpBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer returned to user in acquire call */ + IMG_PBYTE pHwpBufEnd[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to end of HwpBuf */ + IMG_PBYTE pTlBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer obtained via TlAcquireData */ + IMG_PBYTE pTlBufPos[RGX_HWPERF_MAX_STREAM_ID]; /*!< initial position in TlBuf to acquire packets */ + IMG_PBYTE pTlBufRead[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to the last packet read */ + IMG_UINT32 ui32AcqDataLen[RGX_HWPERF_MAX_STREAM_ID]; /*!< length of acquired TlBuf */ + IMG_BOOL bRelease[RGX_HWPERF_MAX_STREAM_ID]; /*!< used to determine whether or not to release currently held TlBuf */ + + +} RGX_KM_HWPERF_DEVDATA; + +PVRSRV_ERROR RGXHWPerfConfigureCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32CtrlWord, + IMG_UINT32 ui32NumBlocks, + RGX_HWPERF_CONFIG_CNTBLK* asBlockConfigs) +{ + PVRSRV_ERROR eError; + RGX_KM_HWPERF_DEVDATA* psDevData; + RGX_HWPERF_DEVICE *psHWPerfDev; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Validate input argument values supplied by the caller */ + if (!psHWPerfConnection || ui32NumBlocks==0 || !asBlockConfigs) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psHWPerfDev = psHWPerfConnection->psHWPerfDevList; + + while (psHWPerfDev) + { + psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + + /* Call the internal server API */ + eError = PVRSRVRGXConfigureHWPerfBlocksKM(NULL, + psDevData->psRgxDevNode, + ui32CtrlWord, + ui32NumBlocks, + asBlockConfigs); + + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXConfigureHWPerfBlocksKM"); + + psHWPerfDev = psHWPerfDev->psNext; + } + + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (rgxhwperf.c) + ******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxhwperf.h b/drivers/gpu/drm/phytium/octopus/rgxhwperf.h new file mode 100644 index 000000000000..d594afca88bb --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxhwperf.h @@ -0,0 +1,60 @@ +/*************************************************************************/ /*! +@File +@Title RGX HW Performance header file +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the RGX HWPerf functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXHWPERF_H_ +#define RGXHWPERF_H_ + +#include "rgxhwperf_common.h" + +/****************************************************************************** + * RGX HW Performance Profiling API(s) - Volcanic specific + *****************************************************************************/ + +PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32CtrlWord, + IMG_UINT32 ui32ArrayLen, + RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs); + +#endif /* RGXHWPERF_H_ */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxhwperf_common.c b/drivers/gpu/drm/phytium/octopus/rgxhwperf_common.c new file mode 100644 index 000000000000..4b2ef8fe3d02 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxhwperf_common.c @@ -0,0 +1,3576 @@ +/*************************************************************************/ /*! +@File +@Title RGX HW Performance implementation +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX HW Performance implementation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +//#define PVR_DPF_FUNCTION_TRACE_ON 1 +#undef PVR_DPF_FUNCTION_TRACE_ON + +#include "img_defs.h" +#include "pvr_debug.h" +#include "rgxdevice.h" +#include "pvrsrv_error.h" +#include "pvr_notifier.h" +#include "osfunc.h" +#include "allocmem.h" + +#include "pvrsrv.h" +#include "pvrsrv_tlstreams.h" +#include "pvrsrv_tlcommon.h" +#include "tlclient.h" +#include "tlstream.h" + +#include "rgxhwperf.h" +#include "rgxapi_km.h" +#include "rgxfwutils.h" +#include "rgxtimecorr.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "pdump_km.h" +#include "pvrsrv_apphint.h" +#include "process_stats.h" +#include "rgx_hwperf_table.h" +#include "rgxinit.h" + +#include "info_page_defs.h" + +/* This is defined by default to enable producer callbacks. + * Clients of the TL interface can disable the use of the callback + * with PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK. */ +#define SUPPORT_TL_PRODUCER_CALLBACK 1 + +/* Maximum enum value to prevent access to RGX_HWPERF_STREAM_ID2_CLIENT stream */ +#define RGX_HWPERF_MAX_STREAM_ID (RGX_HWPERF_STREAM_ID2_CLIENT) + +/* Defines size of buffers returned from acquire/release calls */ +#define FW_STREAM_BUFFER_SIZE (0x80000) +#define HOST_STREAM_BUFFER_SIZE (0x20000) + +/* Must be at least as large as two tl packets of maximum size */ +static_assert(HOST_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1), + "HOST_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)"); +static_assert(FW_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1), + "FW_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)"); + +IMG_INTERNAL /*static inline*/ IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **); + +static inline IMG_UINT32 +RGXHWPerfGetPackets(IMG_UINT32 ui32BytesExp, + IMG_UINT32 ui32AllowedSize, + RGX_PHWPERF_V2_PACKET_HDR psCurPkt ) +{ + IMG_UINT32 sizeSum = 0; + + /* Traverse the array to find how many packets will fit in the available space. */ + while ( sizeSum < ui32BytesExp && + sizeSum + RGX_HWPERF_GET_SIZE(psCurPkt) < ui32AllowedSize ) + { + sizeSum += RGX_HWPERF_GET_SIZE(psCurPkt); + psCurPkt = RGX_HWPERF_GET_NEXT_PACKET(psCurPkt); + } + + return sizeSum; +} + +static inline void +RGXSuspendHWPerfL2DataCopy(PVRSRV_RGXDEV_INFO* psDeviceInfo, + IMG_BOOL bIsReaderConnected) +{ + if (!bIsReaderConnected) + { + PVR_DPF((PVR_DBG_WARNING, "%s : HWPerf FW events enabled but host buffer for FW events is full " + "and no reader is currently connected, suspending event collection. " + "Connect a reader or restart driver to avoid event loss.", __func__)); + psDeviceInfo->bSuspendHWPerfL2DataCopy = IMG_TRUE; + } +} + +/* + RGXHWPerfCopyDataL1toL2 + */ +static IMG_UINT32 RGXHWPerfCopyDataL1toL2(PVRSRV_RGXDEV_INFO* psDeviceInfo, + IMG_BYTE *pbFwBuffer, + IMG_UINT32 ui32BytesExp) +{ + IMG_HANDLE hHWPerfStream = psDeviceInfo->hHWPerfStream; + IMG_BYTE * pbL2Buffer; + IMG_UINT32 ui32L2BufFree; + IMG_UINT32 ui32BytesCopied = 0; + IMG_UINT32 ui32BytesExpMin = RGX_HWPERF_GET_SIZE(RGX_HWPERF_GET_PACKET(pbFwBuffer)); + PVRSRV_ERROR eError; + IMG_BOOL bIsReaderConnected; + + /* HWPERF_MISR_FUNC_DEBUG enables debug code for investigating HWPerf issues */ +#ifdef HWPERF_MISR_FUNC_DEBUG + static IMG_UINT32 gui32Ordinal = IMG_UINT32_MAX; +#endif + + PVR_DPF_ENTERED; + +#ifdef HWPERF_MISR_FUNC_DEBUG + PVR_DPF((PVR_DBG_VERBOSE, "EVENTS to copy from 0x%p length:%05d", + pbFwBuffer, ui32BytesExp)); +#endif + +#ifdef HWPERF_MISR_FUNC_DEBUG + { + /* Check the incoming buffer of data has not lost any packets */ + IMG_BYTE *pbFwBufferIter = pbFwBuffer; + IMG_BYTE *pbFwBufferEnd = pbFwBuffer+ui32BytesExp; + do + { + RGX_HWPERF_V2_PACKET_HDR *asCurPos = RGX_HWPERF_GET_PACKET(pbFwBufferIter); + IMG_UINT32 ui32CurOrdinal = asCurPos->ui32Ordinal; + if (gui32Ordinal != IMG_UINT32_MAX) + { + if ((gui32Ordinal+1) != ui32CurOrdinal) + { + if (gui32Ordinal < ui32CurOrdinal) + { + PVR_DPF((PVR_DBG_WARNING, + "HWPerf [%p] packets lost (%u packets) between ordinal %u...%u", + pbFwBufferIter, + ui32CurOrdinal - gui32Ordinal - 1, + gui32Ordinal, + ui32CurOrdinal)); + } + else + { + PVR_DPF((PVR_DBG_WARNING, + "HWPerf [%p] packet ordinal out of sequence last: %u, current: %u", + pbFwBufferIter, + gui32Ordinal, + ui32CurOrdinal)); + } + } + } + gui32Ordinal = asCurPos->ui32Ordinal; + pbFwBufferIter += RGX_HWPERF_GET_SIZE(asCurPos); + } while (pbFwBufferIter < pbFwBufferEnd); + } +#endif + + if (ui32BytesExp > psDeviceInfo->ui32L2BufMaxPacketSize) + { + IMG_UINT32 sizeSum = RGXHWPerfGetPackets(ui32BytesExp, + psDeviceInfo->ui32L2BufMaxPacketSize, + RGX_HWPERF_GET_PACKET(pbFwBuffer)); + + if (0 != sizeSum) + { + ui32BytesExp = sizeSum; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "Failed to write data into host buffer as " + "packet is too big and hence it breaches TL " + "packet size limit (TLBufferSize / 2.5)")); + goto e0; + } + } + + /* Try submitting all data in one TL packet. */ + eError = TLStreamReserve2(hHWPerfStream, + &pbL2Buffer, + (size_t)ui32BytesExp, ui32BytesExpMin, + &ui32L2BufFree, &bIsReaderConnected); + if ( eError == PVRSRV_OK ) + { + OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)ui32BytesExp ); + eError = TLStreamCommit(hHWPerfStream, (size_t)ui32BytesExp); + if ( eError != PVRSRV_OK ) + { + PVR_DPF((PVR_DBG_ERROR, + "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer", + eError, __func__)); + goto e0; + } + /* Data were successfully written */ + ui32BytesCopied = ui32BytesExp; + } + else if (eError == PVRSRV_ERROR_STREAM_FULL) + { + /* There was not enough space for all data, copy as much as possible */ + IMG_UINT32 sizeSum = RGXHWPerfGetPackets(ui32BytesExp, ui32L2BufFree, RGX_HWPERF_GET_PACKET(pbFwBuffer)); + + PVR_DPF((PVR_DBG_MESSAGE, "Unable to reserve space (%d) in host buffer on first attempt, remaining free space: %d", ui32BytesExp, ui32L2BufFree)); + + if ( 0 != sizeSum ) + { + eError = TLStreamReserve( hHWPerfStream, &pbL2Buffer, (size_t)sizeSum); + + if ( eError == PVRSRV_OK ) + { + OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)sizeSum ); + eError = TLStreamCommit(hHWPerfStream, (size_t)sizeSum); + if ( eError != PVRSRV_OK ) + { + PVR_DPF((PVR_DBG_ERROR, + "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer", + eError, __func__)); + goto e0; + } + /* sizeSum bytes of hwperf packets have been successfully written */ + ui32BytesCopied = sizeSum; + } + else if ( PVRSRV_ERROR_STREAM_FULL == eError ) + { + PVR_DPF((PVR_DBG_WARNING, "Cannot write HWPerf packet into host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree)); + RGXSuspendHWPerfL2DataCopy(psDeviceInfo, bIsReaderConnected); + } + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "Cannot find space in host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree)); + RGXSuspendHWPerfL2DataCopy(psDeviceInfo, bIsReaderConnected); + } + } + if ( PVRSRV_OK != eError && /* Some other error occurred */ + PVRSRV_ERROR_STREAM_FULL != eError ) /* Full error handled by caller, we returning the copied bytes count to caller */ + { + PVR_DPF((PVR_DBG_ERROR, + "HWPerf enabled: Unexpected Error ( %d ) while copying FW buffer to TL buffer.", + eError)); + } + +e0: + /* Return the remaining packets left to be transported. */ + PVR_DPF_RETURN_VAL(ui32BytesCopied); +} + + +static INLINE IMG_UINT32 RGXHWPerfAdvanceRIdx( + const IMG_UINT32 ui32BufSize, + const IMG_UINT32 ui32Pos, + const IMG_UINT32 ui32Size) +{ + return ( ui32Pos + ui32Size < ui32BufSize ? ui32Pos + ui32Size : 0 ); +} + + +/* + RGXHWPerfDataStore + */ +static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + IMG_BYTE* psHwPerfInfo = psDevInfo->psRGXFWIfHWPerfBuf; + IMG_UINT32 ui32SrcRIdx, ui32SrcWIdx, ui32SrcWrapCount; + IMG_UINT32 ui32BytesExp = 0, ui32BytesCopied = 0, ui32BytesCopiedSum = 0; +#ifdef HWPERF_MISR_FUNC_DEBUG + IMG_UINT32 ui32BytesExpSum = 0; +#endif + + PVR_DPF_ENTERED; + + /* Caller should check this member is valid before calling */ + PVR_ASSERT(psDevInfo->hHWPerfStream); + + if (psDevInfo->bSuspendHWPerfL2DataCopy) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s : Copying data to host buffer for FW events is " + "suspended. Start HWPerf consumer or restart driver if " + "HWPerf FW events are needed", __func__)); + + PVR_DPF_RETURN_VAL(ui32BytesCopiedSum); + } + + /* Get a copy of the current + * read (first packet to read) + * write (empty location for the next write to be inserted) + * WrapCount (size in bytes of the buffer at or past end) + * indexes of the FW buffer */ + ui32SrcRIdx = psFwSysData->ui32HWPerfRIdx; + ui32SrcWIdx = psFwSysData->ui32HWPerfWIdx; + OSMemoryBarrier(); + ui32SrcWrapCount = psFwSysData->ui32HWPerfWrapCount; + +#if defined(HWPERF_MISR_FUNC_DEBUG) || defined(EMULATOR) + { + IMG_UINT32 ui32SrcBufSize = psDevInfo->ui32RGXFWIfHWPerfBufSize; + + if (ui32SrcRIdx >= ui32SrcBufSize || ui32SrcWIdx >= ui32SrcBufSize) + { + PVR_DPF((PVR_DBG_ERROR, "%s : Invalid read/write offsets found! srcRIdx:%u srcWIdx:%u srcBufSize:%u", + __func__, ui32SrcRIdx, ui32SrcWIdx, ui32SrcBufSize)); + + PVR_DPF_RETURN_VAL(ui32BytesCopiedSum); + } + } +#endif + + /* Is there any data in the buffer not yet retrieved? */ + if ( ui32SrcRIdx != ui32SrcWIdx ) + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStore EVENTS found srcRIdx:%d srcWIdx: %d", ui32SrcRIdx, ui32SrcWIdx)); + + /* Is the write position higher than the read position? */ + if ( ui32SrcWIdx > ui32SrcRIdx ) + { + /* Yes, buffer has not wrapped */ + ui32BytesExp = ui32SrcWIdx - ui32SrcRIdx; +#ifdef HWPERF_MISR_FUNC_DEBUG + ui32BytesExpSum += ui32BytesExp; +#endif + ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, + psHwPerfInfo + ui32SrcRIdx, + ui32BytesExp); + + /* Advance the read index and the free bytes counter by the number + * of bytes transported. Items will be left in buffer if not all data + * could be transported. Exit to allow buffer to drain. */ + psFwSysData->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx( + psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx, + ui32BytesCopied); + + ui32BytesCopiedSum += ui32BytesCopied; + } + /* No, buffer has wrapped and write position is behind read position */ + else + { + /* Byte count equal to + * number of bytes from read position to the end of the buffer, + * + data in the extra space in the end of the buffer. */ + ui32BytesExp = ui32SrcWrapCount - ui32SrcRIdx; + +#ifdef HWPERF_MISR_FUNC_DEBUG + ui32BytesExpSum += ui32BytesExp; +#endif + /* Attempt to transfer the packets to the TL stream buffer */ + ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, + psHwPerfInfo + ui32SrcRIdx, + ui32BytesExp); + + /* Advance read index as before and Update the local copy of the + * read index as it might be used in the last if branch*/ + ui32SrcRIdx = RGXHWPerfAdvanceRIdx( + psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx, + ui32BytesCopied); + + /* Update Wrap Count */ + if ( ui32SrcRIdx == 0) + { + psFwSysData->ui32HWPerfWrapCount = psDevInfo->ui32RGXFWIfHWPerfBufSize; + } + psFwSysData->ui32HWPerfRIdx = ui32SrcRIdx; + + ui32BytesCopiedSum += ui32BytesCopied; + + /* If all the data in the end of the array was copied, try copying + * wrapped data in the beginning of the array, assuming there is + * any and the RIdx was wrapped. */ + if ( (ui32BytesCopied == ui32BytesExp) + && (ui32SrcWIdx > 0) + && (ui32SrcRIdx == 0) ) + { + ui32BytesExp = ui32SrcWIdx; +#ifdef HWPERF_MISR_FUNC_DEBUG + ui32BytesExpSum += ui32BytesExp; +#endif + ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, + psHwPerfInfo, + ui32BytesExp); + /* Advance the FW buffer read position. */ + psFwSysData->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx( + psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx, + ui32BytesCopied); + + ui32BytesCopiedSum += ui32BytesCopied; + } + } +#ifdef HWPERF_MISR_FUNC_DEBUG + if (ui32BytesCopiedSum != ui32BytesExpSum) + { + PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfDataStore: FW L1 RIdx:%u. Not all bytes copied to L2: %u bytes out of %u expected", psFwSysData->ui32HWPerfRIdx, ui32BytesCopiedSum, ui32BytesExpSum)); + } +#endif + + } + else + { + PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfDataStore NO EVENTS to transport")); + } + + PVR_DPF_RETURN_VAL(ui32BytesCopiedSum); +} + + +PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE *psDevInfo) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psRgxDevInfo; + IMG_UINT32 ui32BytesCopied; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + PVR_DPF_ENTERED; + + PVR_ASSERT(psDevInfo); + psRgxDevInfo = psDevInfo->pvDevice; + + /* Store FW event data if the destination buffer exists.*/ + if (psRgxDevInfo->hHWPerfStream != (IMG_HANDLE) NULL) + { + OSLockAcquire(psRgxDevInfo->hHWPerfLock); + ui32BytesCopied = RGXHWPerfDataStore(psRgxDevInfo); + if ( ui32BytesCopied ) + { /* Signal consumers that packets may be available to read when + * running from a HW kick, not when called by client APP thread + * via the transport layer CB as this can lead to stream + * corruption.*/ + eError = TLStreamSync(psRgxDevInfo->hHWPerfStream); + PVR_ASSERT(eError == PVRSRV_OK); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStoreCB: Zero bytes copied")); + RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo); + } + OSLockRelease(psRgxDevInfo->hHWPerfLock); + } + + + PVR_DPF_RETURN_OK; +} + + +/* Currently supported by default */ +#if defined(SUPPORT_TL_PRODUCER_CALLBACK) +static PVRSRV_ERROR RGXHWPerfTLCB(IMG_HANDLE hStream, + IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*)pvUser; + + PVR_UNREFERENCED_PARAMETER(hStream); + PVR_UNREFERENCED_PARAMETER(ui32Resp); + + PVR_ASSERT(psRgxDevInfo); + + switch (ui32ReqOp) + { + case TL_SOURCECB_OP_CLIENT_EOS: + /* Keep HWPerf resource init check and use of + * resources atomic, they may not be freed during use + */ + + /* This solution is for avoiding a deadlock situation where - + * in DoTLStreamReserve(), writer has acquired HWPerfLock and + * ReadLock and is waiting on ReadPending (which will be reset + * by reader), And + * the reader after setting ReadPending in TLStreamAcquireReadPos(), + * is waiting for HWPerfLock in RGXHWPerfTLCB(). + * So here in RGXHWPerfTLCB(), if HWPerfLock is already acquired we + * will return to the reader without waiting to acquire HWPerfLock. + */ + if (!OSTryLockAcquire(psRgxDevInfo->hHWPerfLock)) + { + PVR_DPF((PVR_DBG_MESSAGE, "hHWPerfLock is already acquired, a write " + "operation might already be in process")); + return PVRSRV_OK; + } + + if (psRgxDevInfo->hHWPerfStream != (IMG_HANDLE) NULL) + { + (void) RGXHWPerfDataStore(psRgxDevInfo); + } + OSLockRelease(psRgxDevInfo->hHWPerfLock); + break; + + default: + break; + } + + return eError; +} +#endif + + +static void RGXHWPerfL1BufferDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc) + { + if (psRgxDevInfo->psRGXFWIfHWPerfBuf != NULL) + { + DevmemReleaseCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc); + psRgxDevInfo->psRGXFWIfHWPerfBuf = NULL; + } + DevmemFwUnmapAndFree(psRgxDevInfo, psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc); + psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL; + } +} + +/*************************************************************************/ /*! +@Function RGXHWPerfInit + +@Description Called during driver init for initialization of HWPerf module + in the Rogue device driver. This function keeps allocated + only the minimal necessary resources, which are required for + functioning of HWPerf server module. + +@Input psRgxDevInfo RGX Device Info + +@Return PVRSRV_ERROR + */ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + PVRSRV_ERROR eError; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + PVR_DPF_ENTERED; + + /* expecting a valid device info */ + PVR_ASSERT(psRgxDevInfo); + + /* Create a lock for HWPerf server module used for serializing, L1 to L2 + * copy calls (e.g. in case of TL producer callback) and L1, L2 resource + * allocation */ + eError = OSLockCreate(&psRgxDevInfo->hHWPerfLock); + PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); + + /* avoid uninitialised data */ + psRgxDevInfo->hHWPerfStream = (IMG_HANDLE) NULL; + psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL; + + PVR_DPF_RETURN_OK; +} + +/*************************************************************************/ /*! +@Function RGXHWPerfIsInitRequired + +@Description Returns true if the HWperf firmware buffer (L1 buffer) and host + driver TL buffer (L2 buffer) are not already allocated. Caller + must possess hHWPerfLock lock before calling this + function so the state tested is not inconsistent. + +@Input psRgxDevInfo RGX Device Info, on which init requirement is + checked. + +@Return IMG_BOOL Whether initialization (allocation) is required + */ /**************************************************************************/ +static INLINE IMG_BOOL RGXHWPerfIsInitRequired(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hHWPerfLock)); + +#if !defined(NO_HARDWARE) + /* Both L1 and L2 buffers are required (for HWPerf functioning) on driver + * built for actual hardware (TC, EMU, etc.) + */ + if (psRgxDevInfo->hHWPerfStream == (IMG_HANDLE) NULL) + { + /* The allocation API (RGXHWPerfInitOnDemandResources) allocates + * device memory for both L1 and L2 without any checks. Hence, + * either both should be allocated or both be NULL. + * + * In-case this changes in future (for e.g. a situation where one + * of the 2 buffers is already allocated and other is required), + * add required checks before allocation calls to avoid memory leaks. + */ + PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL); + return IMG_TRUE; + } + PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc != NULL); +#else + /* On a NO-HW driver L2 is not allocated. So, no point in checking its + * allocation */ + if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL) + { + return IMG_TRUE; + } +#endif + return IMG_FALSE; +} +#if !defined(NO_HARDWARE) +static void _HWPerfFWOnReaderOpenCB(void *pvArg) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*) pvArg; + PVRSRV_DEVICE_NODE* psDevNode = (PVRSRV_DEVICE_NODE*) psRgxDevInfo->psDeviceNode; + RGXFWIF_KCCB_CMD sKccbCmd; + IMG_UINT32 ui32kCCBCommandSlot; + + /* Clear any previously suspended state for bSuspendHWPerfL2DataCopy as we + * now have a reader attached so the data will be delivered upstream. */ + if (psRgxDevInfo->bSuspendHWPerfL2DataCopy) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Resuming HWPerf FW event collection.", + __func__)); + psRgxDevInfo->bSuspendHWPerfL2DataCopy = IMG_FALSE; + } + + sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG; + sKccbCmd.uCmdData.sHWPerfCtrl.eOpCode = RGXFWIF_HWPERF_CTRL_EMIT_FEATURES_EV; + sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = 0; + + eError = RGXScheduleCommandAndGetKCCBSlot(psDevNode->pvDevice, + RGXFWIF_DM_GP, + &sKccbCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to generate feature packet in " + "firmware (error = %d)", __func__, eError)); + return; + } + + eError = RGXWaitForKCCBSlotUpdate(psRgxDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); +} +#endif +/*************************************************************************/ /*! +@Function RGXHWPerfInitOnDemandResources + +@Description This function allocates the HWperf firmware buffer (L1 buffer) + and host driver TL buffer (L2 buffer) if HWPerf is enabled at + driver load time. Otherwise, these buffers are allocated + on-demand as and when required. Caller + must possess hHWPerfLock lock before calling this + function so the state tested is not inconsistent if called + outside of driver initialisation. + +@Input psRgxDevInfo RGX Device Info, on which init is done + +@Return PVRSRV_ERROR + */ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo) +{ + IMG_HANDLE hStream = NULL; /* Init required for noHW */ + PVRSRV_ERROR eError; + IMG_UINT32 ui32L2BufferSize = 0; + PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags; + IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; /* 5 seems reasonable as it can hold + names up to "hwperf_9999", which is enough */ + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + PVR_DPF_ENTERED; + + /* Create the L1 HWPerf buffer on demand */ + uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) + | PVRSRV_MEMALLOCFLAG_GPU_READABLE + | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE + | PVRSRV_MEMALLOCFLAG_CPU_READABLE + | PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE + | PVRSRV_MEMALLOCFLAG_UNCACHED +#if defined(PDUMP) /* Helps show where the packet data ends */ + | PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC +#else /* Helps show corruption issues in driver-live */ + | PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC +#endif + | PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN); + + /* Allocate HWPerf FW L1 buffer */ + eError = DevmemFwAllocate(psRgxDevInfo, + /* Pad it enough to hold the biggest variable sized packet. */ + psRgxDevInfo->ui32RGXFWIfHWPerfBufSize+RGX_HWPERF_MAX_PACKET_SIZE, + uiMemAllocFlags, + "FwHWPerfBuffer", + &psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate kernel fw hwperf buffer (%u)", + __func__, eError)); + goto e0; + } + + /* Expecting the RuntimeCfg structure is mapped into CPU virtual memory. + * Also, make sure the FW address is not already set */ + PVR_ASSERT(psRgxDevInfo->psRGXFWIfRuntimeCfg && psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf.ui32Addr == 0x0); + + /* Meta cached flag removed from this allocation as it was found + * FW performance was better without it. */ + eError = RGXSetFirmwareAddress(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf, + psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", e0); + +#if defined(RGX_FEATURE_HWPERF_VOLCANIC) + RGXSetMetaDMAAddress(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfDMABuf, + psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc, + &psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf, + 0); +#endif + + /* flush write buffers for psRgxDevInfo->psRGXFWIfRuntimeCfg */ + OSWriteMemoryBarrier(); + + eError = DevmemAcquireCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc, + (void**)&psRgxDevInfo->psRGXFWIfHWPerfBuf); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire kernel hwperf buffer (%u)", + __func__, eError)); + goto e0; + } + + /* On NO-HW driver, there is no MISR installed to copy data from L1 to L2. Hence, + * L2 buffer is not allocated */ +#if !defined(NO_HARDWARE) + /* Host L2 HWPERF buffer size in bytes must be bigger than the L1 buffer + * accessed by the FW. The MISR may try to write one packet the size of the L1 + * buffer in some scenarios. When logging is enabled in the MISR, it can be seen + * if the L2 buffer hits a full condition. The closer in size the L2 and L1 buffers + * are the more chance of this happening. + * Size chosen to allow MISR to write an L1 sized packet and for the client + * application/daemon to drain a L1 sized packet e.g. ~ 1.5*L1. + */ + ui32L2BufferSize = psRgxDevInfo->ui32RGXFWIfHWPerfBufSize + + (psRgxDevInfo->ui32RGXFWIfHWPerfBufSize>>1); + + /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */ + if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d", + PVRSRV_TL_HWPERF_RGX_FW_STREAM, + psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf stream name for device %d", + __func__, + psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = TLStreamCreate(&hStream, + pszHWPerfStreamName, + ui32L2BufferSize, + TL_OPMODE_DROP_NEWER | TL_FLAG_NO_SIGNAL_ON_COMMIT, + _HWPerfFWOnReaderOpenCB, psRgxDevInfo, +#if !defined(SUPPORT_TL_PRODUCER_CALLBACK) + NULL, NULL +#else + /* Not enabled by default */ + RGXHWPerfTLCB, psRgxDevInfo +#endif + ); + PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate", e1); + + eError = TLStreamSetNotifStream(hStream, + PVRSRVGetPVRSRVData()->hTLCtrlStream); + /* we can still discover host stream so leave it as is and just log error */ + PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream"); + + /* send the event here because host stream is implicitly opened for write + * in TLStreamCreate and TLStreamOpen is never called (so the event is + * never emitted) */ + TLStreamMarkStreamOpen(hStream); + + { + TL_STREAM_INFO sTLStreamInfo; + + TLStreamInfo(hStream, &sTLStreamInfo); + psRgxDevInfo->ui32L2BufMaxPacketSize = sTLStreamInfo.maxTLpacketSize; + + psRgxDevInfo->bSuspendHWPerfL2DataCopy = IMG_FALSE; + } + + PVR_DPF((PVR_DBG_MESSAGE, "HWPerf buffer size in bytes: L1: %d L2: %d", + psRgxDevInfo->ui32RGXFWIfHWPerfBufSize, ui32L2BufferSize)); + +#else /* defined(NO_HARDWARE) */ + PVR_UNREFERENCED_PARAMETER(ui32L2BufferSize); + PVR_UNREFERENCED_PARAMETER(RGXHWPerfTLCB); + PVR_UNREFERENCED_PARAMETER(pszHWPerfStreamName); + ui32L2BufferSize = 0; +#endif + + psRgxDevInfo->hHWPerfStream = hStream; + PVR_DPF_RETURN_OK; + +#if !defined(NO_HARDWARE) +e1: /* L2 buffer initialisation failures */ + psRgxDevInfo->hHWPerfStream = NULL; +#endif +e0: /* L1 buffer initialisation failures */ + RGXHWPerfL1BufferDeinit(psRgxDevInfo); + + PVR_DPF_RETURN_RC(eError); +} + + +void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + IMG_HANDLE hStream = psRgxDevInfo->hHWPerfStream; + + PVRSRV_VZ_RETN_IF_MODE(GUEST); + + PVR_DPF_ENTERED; + + PVR_ASSERT(psRgxDevInfo); + psRgxDevInfo->hHWPerfStream = NULL; + + /* Clean up the L2 buffer stream object if allocated */ + if (hStream) + { + /* send the event here because host stream is implicitly opened for + * write in TLStreamCreate and TLStreamClose is never called (so the + * event is never emitted) */ + TLStreamMarkStreamClose(hStream); + TLStreamClose(hStream); + } + + /* Cleanup L1 buffer resources */ + RGXHWPerfL1BufferDeinit(psRgxDevInfo); + + /* Cleanup the HWPerf server module lock resource */ + if (psRgxDevInfo->hHWPerfLock) + { + OSLockDestroy(psRgxDevInfo->hHWPerfLock); + psRgxDevInfo->hHWPerfLock = NULL; + } + + PVR_DPF_RETURN; +} + + +/****************************************************************************** + * RGX HW Performance Profiling Server API(s) + *****************************************************************************/ + +static PVRSRV_ERROR RGXHWPerfCtrlFwBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bToggle, + IMG_UINT64 ui64Mask) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice; + RGXFWIF_KCCB_CMD sKccbCmd; + IMG_UINT32 ui32kCCBCommandSlot; + + /* If this method is being used whether to enable or disable + * then the hwperf buffers (host and FW) are likely to be needed + * eventually so create them, also helps unit testing. Buffers + * allocated on demand to reduce RAM foot print on systems not + * needing HWPerf resources. + * Obtain lock first, test and init if required. */ + OSLockAcquire(psDevice->hHWPerfLock); + + if (!psDevice->bFirmwareInitialised) + { + psDevice->ui64HWPerfFilter = ui64Mask; // at least set filter + eError = PVRSRV_ERROR_NOT_INITIALISED; + + PVR_DPF((PVR_DBG_ERROR, + "HWPerf has NOT been initialised yet. Mask has been SET to " + "(%" IMG_UINT64_FMTSPECx ")", + ui64Mask)); + + goto unlock_and_return; + } + + if (RGXHWPerfIsInitRequired(psDevice)) + { + eError = RGXHWPerfInitOnDemandResources(psDevice); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand HWPerfFW " + "resources failed", __func__)); + goto unlock_and_return; + } + } + +#if defined(RGX_FEATURE_HWPERF_VOLCANIC) && defined(SUPPORT_POWMON_COMPONENT) + if (RGXPowmonBufferIsInitRequired(psDeviceNode->pvDevice)) + { + /* Allocate power monitoring log buffer if enabled */ + eError = RGXPowmonBufferInitOnDemandResources(psDeviceNode->pvDevice); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand power monitoring " + "resources failed", __func__)); + goto unlock_and_return; + } + } +#endif + + /* Unlock here as no further HWPerf resources are used below that would be + * affected if freed by another thread */ + OSLockRelease(psDevice->hHWPerfLock); + + /* Return if the filter is the same */ + if (!bToggle && psDevice->ui64HWPerfFilter == ui64Mask) + goto return_; + + /* Prepare command parameters ... */ + sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG; + sKccbCmd.uCmdData.sHWPerfCtrl.eOpCode = bToggle ? RGXFWIF_HWPERF_CTRL_TOGGLE : RGXFWIF_HWPERF_CTRL_SET; + sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = ui64Mask; + + /* Ask the FW to carry out the HWPerf configuration command */ + eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, + RGXFWIF_DM_GP, + &sKccbCmd, + 0, + IMG_TRUE, + &ui32kCCBCommandSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set new HWPerfFW filter in " + "firmware (error = %d)", __func__, eError)); + goto return_; + } + + psDevice->ui64HWPerfFilter = bToggle ? + psDevice->ui64HWPerfFilter ^ ui64Mask : ui64Mask; + + /* Wait for FW to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", return_); + +#if defined(DEBUG) + if (bToggle) + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfFW events (%" IMG_UINT64_FMTSPECx ") have been TOGGLED", + ui64Mask)); + } + else + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")", + ui64Mask)); + } +#endif + + return PVRSRV_OK; + +unlock_and_return: + OSLockRelease(psDevice->hHWPerfLock); + +return_: + return eError; +} + +#define HWPERF_HOST_MAX_DEFERRED_PACKETS 800 + +static PVRSRV_ERROR RGXHWPerfCtrlHostBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bToggle, + IMG_UINT32 ui32Mask) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice; +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + IMG_UINT32 ui32OldFilter = psDevice->ui32HWPerfHostFilter; +#endif + + OSLockAcquire(psDevice->hLockHWPerfHostStream); + if (psDevice->hHWPerfHostStream == NULL) + { + eError = RGXHWPerfHostInitOnDemandResources(psDevice); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Initialisation of on-demand HWPerfHost resources failed", + __func__)); + OSLockRelease(psDevice->hLockHWPerfHostStream); + return eError; + } + } + + psDevice->ui32HWPerfHostFilter = bToggle ? + psDevice->ui32HWPerfHostFilter ^ ui32Mask : ui32Mask; + + // Deferred creation of host periodic events thread + if (psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO)) + { + eError = PVRSRVCreateHWPerfHostThread(PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS); + PVR_LOG_IF_ERROR(eError, "PVRSRVCreateHWPerfHostThread"); + } + else if (!(psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO))) + { + eError = PVRSRVDestroyHWPerfHostThread(); + PVR_LOG_IF_ERROR(eError, "PVRSRVDestroyHWPerfHostThread"); + } + +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + // Log deferred events stats if filter changed from non-zero to zero + if ((ui32OldFilter != 0) && (psDevice->ui32HWPerfHostFilter == 0)) + { + PVR_LOG(("HWPerfHost deferred events buffer high-watermark / size: (%u / %u)", + psDevice->ui32DEHighWatermark, HWPERF_HOST_MAX_DEFERRED_PACKETS)); + + PVR_LOG(("HWPerfHost deferred event retries: WaitForAtomicCtxPktHighWatermark(%u) " + "WaitForRightOrdPktHighWatermark(%u)", + psDevice->ui32WaitForAtomicCtxPktHighWatermark, + psDevice->ui32WaitForRightOrdPktHighWatermark)); + } +#endif + + OSLockRelease(psDevice->hLockHWPerfHostStream); + +#if defined(DEBUG) + if (bToggle) + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfHost events (%x) have been TOGGLED", + ui32Mask)); + } + else + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfHost mask has been SET to (%x)", + ui32Mask)); + } +#endif + + return PVRSRV_OK; +} + +static PVRSRV_ERROR RGXHWPerfCtrlClientBuffer(IMG_BOOL bToggle, + IMG_UINT32 ui32InfoPageIdx, + IMG_UINT32 ui32Mask) +{ + PVRSRV_DATA *psData = PVRSRVGetPVRSRVData(); + + PVR_LOG_RETURN_IF_FALSE(ui32InfoPageIdx >= HWPERF_INFO_IDX_START && + ui32InfoPageIdx < HWPERF_INFO_IDX_END, "invalid info" + " page index", PVRSRV_ERROR_INVALID_PARAMS); + + OSLockAcquire(psData->hInfoPageLock); + psData->pui32InfoPage[ui32InfoPageIdx] = bToggle ? + psData->pui32InfoPage[ui32InfoPageIdx] ^ ui32Mask : ui32Mask; + OSLockRelease(psData->hInfoPageLock); + +#if defined(DEBUG) + if (bToggle) + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) events (%x) have been TOGGLED", + ui32InfoPageIdx, ui32Mask)); + } + else + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) mask has been SET to (%x)", + ui32InfoPageIdx, ui32Mask)); + } +#endif + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_HWPERF_BVNC *psBVNC) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_FALSE((NULL != psDeviceNode), "psDeviceNode invalid", PVRSRV_ERROR_INVALID_PARAMS); + + psDevInfo = psDeviceNode->pvDevice; + eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, psBVNC); + + return eError; +} + +/* + AppHint interfaces + */ +static +PVRSRV_ERROR RGXHWPerfSetFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT64 ui64Value) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psPrivate); + + psDevNode = psPVRSRVData->psDeviceNodeList; + /* Control HWPerf on all the devices */ + while (psDevNode) + { + eError = RGXHWPerfCtrlFwBuffer(psDevNode, IMG_FALSE, ui64Value); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to set HWPerf firmware filter for device (%d)", psDevNode->sDevId.i32OsDeviceID)); + return eError; + } + psDevNode = psDevNode->psNext; + } + return PVRSRV_OK; +} + +static +PVRSRV_ERROR RGXHWPerfReadFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT64 *pui64Value) +{ + PVRSRV_RGXDEV_INFO *psDevice; + + PVR_UNREFERENCED_PARAMETER(psPrivate); + + if (!psDeviceNode || !psDeviceNode->pvDevice) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Configuration command is applied for all devices, so filter value should + * be same for all */ + psDevice = psDeviceNode->pvDevice; + *pui64Value = psDevice->ui64HWPerfFilter; + return PVRSRV_OK; +} + +static +PVRSRV_ERROR RGXHWPerfSetHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 ui32Value) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psPrivate); + + psDevNode = psPVRSRVData->psDeviceNodeList; + /* Control HWPerf on all the devices */ + while (psDevNode) + { + eError = RGXHWPerfCtrlHostBuffer(psDevNode, IMG_FALSE, ui32Value); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to set HWPerf firmware filter for device (%d)", psDevNode->sDevId.i32OsDeviceID)); + return eError; + } + psDevNode = psDevNode->psNext; + } + return PVRSRV_OK; +} + +static +PVRSRV_ERROR RGXHWPerfReadHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 *pui32Value) +{ + PVRSRV_RGXDEV_INFO *psDevice; + + PVR_UNREFERENCED_PARAMETER(psPrivate); + + if (!psDeviceNode || !psDeviceNode->pvDevice) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevice = psDeviceNode->pvDevice; + *pui32Value = psDevice->ui32HWPerfHostFilter; + return PVRSRV_OK; +} + +static PVRSRV_ERROR _ReadClientFilter(const PVRSRV_DEVICE_NODE *psDevice, + const void *psPrivData, + IMG_UINT32 *pui32Value) +{ + PVRSRV_DATA *psData = PVRSRVGetPVRSRVData(); + IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData; + PVR_UNREFERENCED_PARAMETER(psDevice); + + OSLockAcquire(psData->hInfoPageLock); + *pui32Value = psData->pui32InfoPage[ui32Idx]; + OSLockRelease(psData->hInfoPageLock); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _WriteClientFilter(const PVRSRV_DEVICE_NODE *psDevice, + const void *psPrivData, + IMG_UINT32 ui32Value) +{ + IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData; + PVR_UNREFERENCED_PARAMETER(psDevice); + + return RGXHWPerfCtrlClientBuffer(IMG_FALSE, ui32Idx, ui32Value); +} + +void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRVAppHintRegisterHandlersUINT64(APPHINT_ID_HWPerfFWFilter, + RGXHWPerfReadFwFilter, + RGXHWPerfSetFwFilter, + psDeviceNode, + NULL); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfHostFilter, + RGXHWPerfReadHostFilter, + RGXHWPerfSetHostFilter, + psDeviceNode, + NULL); +} + +void RGXHWPerfClientInitAppHintCallbacks(void) +{ + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_Services, + _ReadClientFilter, + _WriteClientFilter, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) HWPERF_FILTER_SERVICES_IDX); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_EGL, + _ReadClientFilter, + _WriteClientFilter, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) HWPERF_FILTER_EGL_IDX); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenGLES, + _ReadClientFilter, + _WriteClientFilter, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) HWPERF_FILTER_OPENGLES_IDX); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenCL, + _ReadClientFilter, + _WriteClientFilter, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) HWPERF_FILTER_OPENCL_IDX); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_Vulkan, + _ReadClientFilter, + _WriteClientFilter, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) HWPERF_FILTER_VULKAN_IDX); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenGL, + _ReadClientFilter, + _WriteClientFilter, + APPHINT_OF_DRIVER_NO_DEVICE, + (void *) HWPERF_FILTER_OPENGL_IDX); +} + +static INLINE IMG_UINT32 _RGXHWPerfFixBufferSize(IMG_UINT32 ui32BufSizeKB) +{ + if (ui32BufSizeKB > HWPERF_HOST_TL_STREAM_SIZE_MAX) + { + /* Size specified as a AppHint but it is too big */ + PVR_DPF((PVR_DBG_WARNING, + "RGXHWPerfHostInit: HWPerf Host buffer size " + "value (%u) too big, using maximum (%u)", + ui32BufSizeKB, HWPERF_HOST_TL_STREAM_SIZE_MAX)); + return HWPERF_HOST_TL_STREAM_SIZE_MAX<<10; + } + else if (ui32BufSizeKB >= HWPERF_HOST_TL_STREAM_SIZE_MIN) + { + return ui32BufSizeKB<<10; + } + else if (ui32BufSizeKB > 0) + { + /* Size specified as a AppHint but it is too small */ + PVR_DPF((PVR_DBG_WARNING, + "RGXHWPerfHostInit: HWPerf Host buffer size " + "value (%u) too small, using minimum (%u)", + ui32BufSizeKB, HWPERF_HOST_TL_STREAM_SIZE_MIN)); + return HWPERF_HOST_TL_STREAM_SIZE_MIN<<10; + } + else + { + /* 0 size implies AppHint not set or is set to zero, + * use default size from driver constant. */ + return HWPERF_HOST_TL_STREAM_SIZE_DEFAULT<<10; + } +} + +/****************************************************************************** + * RGX HW Performance Host Stream API + *****************************************************************************/ + +/*************************************************************************/ /*! +@Function RGXHWPerfHostInit + +@Description Called during driver init for initialisation of HWPerfHost + stream in the Rogue device driver. This function keeps allocated + only the minimal necessary resources, which are required for + functioning of HWPerf server module. + +@Return PVRSRV_ERROR + */ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB) +{ + PVRSRV_ERROR eError; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + PVR_ASSERT(psRgxDevInfo != NULL); + + eError = OSLockCreate(&psRgxDevInfo->hLockHWPerfHostStream); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", error); + + psRgxDevInfo->hHWPerfHostStream = NULL; + psRgxDevInfo->ui32HWPerfHostFilter = 0; /* disable all events */ + psRgxDevInfo->ui32HWPerfHostNextOrdinal = 1; + psRgxDevInfo->ui32HWPerfHostBufSize = _RGXHWPerfFixBufferSize(ui32BufSizeKB); + psRgxDevInfo->pvHostHWPerfMISR = NULL; + psRgxDevInfo->pui8DeferredEvents = NULL; + /* First packet has ordinal=1, so LastOrdinal=0 will ensure ordering logic + * is maintained */ + psRgxDevInfo->ui32HWPerfHostLastOrdinal = 0; + psRgxDevInfo->hHWPerfHostSpinLock = NULL; + +error: + return eError; +} + +static void _HWPerfHostOnConnectCB(void *pvArg) +{ + PVRSRV_RGXDEV_INFO* psDevice; + PVRSRV_ERROR eError; + + RGXSRV_HWPERF_CLK_SYNC(pvArg); + + psDevice = (PVRSRV_RGXDEV_INFO*) pvArg; + + /* Handle the case where the RGX_HWPERF_HOST_INFO bit is set in the event filter + * before the host stream is opened for reading by a HWPerf client. + * Which can result in the host periodic thread sleeping for a long duration as TLStreamIsOpenForReading may return false. */ + if (psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO)) + { + eError = PVRSRVCreateHWPerfHostThread(PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS); + PVR_LOG_IF_ERROR(eError, "PVRSRVCreateHWPerfHostThread"); + } +} + +/* Avoiding a holder struct using fields below, as a struct gets along padding, + * packing, and other compiler dependencies, and we want a continuous stream of + * bytes for (header+data) for use in TLStreamWrite. See + * _HWPerfHostDeferredEventsEmitter(). + * + * A deferred (UFO) packet is represented in memory as: + * - IMG_BOOL --> Indicates whether a packet write is + * "complete" by atomic context or not. + * - RGX_HWPERF_V2_PACKET_HDR --. + * |--> Fed together to TLStreamWrite for + * | deferred packet to be written to + * | HWPerfHost buffer + * - RGX_HWPERF_HOST_UFO_DATA---` + * + * PS: Currently only UFO events are supported in deferred list */ +#define HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE (sizeof(IMG_BOOL) +\ + sizeof(RGX_HWPERF_V2_PACKET_HDR) +\ + sizeof(RGX_HWPERF_HOST_UFO_DATA)) + +static void RGX_MISRHandler_HWPerfPostDeferredHostEvents(void *pvData); +static void _HWPerfHostDeferredEventsEmitter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 ui32MaxOrdinal); + +/*************************************************************************/ /*! +@Function RGXHWPerfHostInitOnDemandResources + +@Description This function allocates the HWPerfHost buffer if HWPerf is + enabled at driver load time. Otherwise, these buffers are + allocated on-demand as and when required. + +@Return PVRSRV_ERROR + */ /**************************************************************************/ +PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + PVRSRV_ERROR eError; + IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5]; /* 5 makes space up to "hwperf_host_9999" streams */ + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */ + if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d", + PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, + psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf host stream name for device %d", + __func__, + psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = TLStreamCreate(&psRgxDevInfo->hHWPerfHostStream, + pszHWPerfHostStreamName, psRgxDevInfo->ui32HWPerfHostBufSize, + TL_OPMODE_DROP_NEWER, + _HWPerfHostOnConnectCB, psRgxDevInfo, + NULL, NULL); + PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamCreate"); + + eError = TLStreamSetNotifStream(psRgxDevInfo->hHWPerfHostStream, + PVRSRVGetPVRSRVData()->hTLCtrlStream); + /* we can still discover host stream so leave it as is and just log error */ + PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream"); + + /* send the event here because host stream is implicitly opened for write + * in TLStreamCreate and TLStreamOpen is never called (so the event is + * never emitted) */ + eError = TLStreamMarkStreamOpen(psRgxDevInfo->hHWPerfHostStream); + PVR_LOG_IF_ERROR(eError, "TLStreamMarkStreamOpen"); + + /* HWPerfHost deferred events specific initialization */ + eError = OSInstallMISR(&psRgxDevInfo->pvHostHWPerfMISR, + RGX_MISRHandler_HWPerfPostDeferredHostEvents, + psRgxDevInfo, + "RGX_HWPerfDeferredEventPoster"); + PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR", err_install_misr); + + eError = OSSpinLockCreate(&psRgxDevInfo->hHWPerfHostSpinLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate", err_spinlock_create); + + psRgxDevInfo->pui8DeferredEvents = OSAllocMem(HWPERF_HOST_MAX_DEFERRED_PACKETS + * HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE); + if (NULL == psRgxDevInfo->pui8DeferredEvents) + { + PVR_DPF((PVR_DBG_ERROR, "%s: OUT OF MEMORY. Could not allocate memory for " + "HWPerfHost deferred events array", __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_alloc_deferred_events; + } + psRgxDevInfo->ui16DEReadIdx = 0; + psRgxDevInfo->ui16DEWriteIdx = 0; +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + psRgxDevInfo->ui32DEHighWatermark = 0; + psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark = 0; + psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark = 0; +#endif + + PVR_DPF((DBGPRIV_MESSAGE, "HWPerf Host buffer size is %uKB", + psRgxDevInfo->ui32HWPerfHostBufSize)); + + return PVRSRV_OK; + +err_alloc_deferred_events: + OSSpinLockDestroy(psRgxDevInfo->hHWPerfHostSpinLock); + psRgxDevInfo->hHWPerfHostSpinLock = NULL; + +err_spinlock_create: + (void) OSUninstallMISR(psRgxDevInfo->pvHostHWPerfMISR); + psRgxDevInfo->pvHostHWPerfMISR = NULL; + +err_install_misr: + TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfHostStream); + TLStreamClose(psRgxDevInfo->hHWPerfHostStream); + psRgxDevInfo->hHWPerfHostStream = NULL; + + return eError; +} + +void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + PVRSRV_VZ_RETN_IF_MODE(GUEST); + + PVR_ASSERT (psRgxDevInfo); + + if (psRgxDevInfo->pui8DeferredEvents) + { + OSFreeMem(psRgxDevInfo->pui8DeferredEvents); + psRgxDevInfo->pui8DeferredEvents = NULL; + } + + if (psRgxDevInfo->hHWPerfHostSpinLock) + { + OSSpinLockDestroy(psRgxDevInfo->hHWPerfHostSpinLock); + psRgxDevInfo->hHWPerfHostSpinLock = NULL; + } + + if (psRgxDevInfo->pvHostHWPerfMISR) + { + (void) OSUninstallMISR(psRgxDevInfo->pvHostHWPerfMISR); + psRgxDevInfo->pvHostHWPerfMISR = NULL; + } + + if (psRgxDevInfo->hHWPerfHostStream) + { + /* send the event here because host stream is implicitly opened for + * write in TLStreamCreate and TLStreamClose is never called (so the + * event is never emitted) */ + TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfHostStream); + TLStreamClose(psRgxDevInfo->hHWPerfHostStream); + psRgxDevInfo->hHWPerfHostStream = NULL; + } + + if (psRgxDevInfo->hLockHWPerfHostStream) + { + OSLockDestroy(psRgxDevInfo->hLockHWPerfHostStream); + psRgxDevInfo->hLockHWPerfHostStream = NULL; + } +} + +inline void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Filter) +{ + PVRSRV_VZ_RETN_IF_MODE(GUEST); + psRgxDevInfo->ui32HWPerfHostFilter = ui32Filter; +} + +inline IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent) +{ + PVR_ASSERT(psRgxDevInfo); + return (psRgxDevInfo->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(eEvent)) ? IMG_TRUE : IMG_FALSE; +} + +#define MAX_RETRY_COUNT 80 +static inline void _PostFunctionPrologue(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 ui32CurrentOrdinal) +{ + IMG_UINT32 ui32Retry = MAX_RETRY_COUNT; + + PVR_ASSERT(psRgxDevInfo->hLockHWPerfHostStream != NULL); + PVR_ASSERT(psRgxDevInfo->hHWPerfHostStream != NULL); + + OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); + + /* First, flush pending events (if any) */ + _HWPerfHostDeferredEventsEmitter(psRgxDevInfo, ui32CurrentOrdinal); + + while ((ui32CurrentOrdinal != psRgxDevInfo->ui32HWPerfHostLastOrdinal + 1) + && (--ui32Retry != 0)) + { + /* Release lock and give a chance to a waiting context to emit the + * expected packet */ + OSLockRelease (psRgxDevInfo->hLockHWPerfHostStream); + OSSleepms(100); + OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); + } + +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + if ((ui32Retry == 0) && !(psRgxDevInfo->bWarnedPktOrdinalBroke)) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Will warn only once! Potential packet(s) lost after ordinal" + " %u (Current ordinal = %u)", + __func__, + psRgxDevInfo->ui32HWPerfHostLastOrdinal, ui32CurrentOrdinal)); + psRgxDevInfo->bWarnedPktOrdinalBroke = IMG_TRUE; + } + + if (psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark < (MAX_RETRY_COUNT - ui32Retry)) + { + psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark = MAX_RETRY_COUNT - ui32Retry; + } +#endif +} + +static inline void _PostFunctionEpilogue(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 ui32CurrentOrdinal) +{ + /* update last ordinal emitted */ + psRgxDevInfo->ui32HWPerfHostLastOrdinal = ui32CurrentOrdinal; + + PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hLockHWPerfHostStream)); + OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); +} + +static inline IMG_UINT8 *_ReserveHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size) +{ + IMG_UINT8 *pui8Dest; + + PVRSRV_ERROR eError = TLStreamReserve(psRgxDevInfo->hHWPerfHostStream, + &pui8Dest, ui32Size); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not reserve space in %s buffer" + " (%d). Dropping packet.", + __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError)); + return NULL; + } + PVR_ASSERT(pui8Dest != NULL); + + return pui8Dest; +} + +static inline void _CommitHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size) +{ + PVRSRV_ERROR eError = TLStreamCommit(psRgxDevInfo->hHWPerfHostStream, + ui32Size); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not commit data to %s" + " (%d)", __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError)); + } +} + +/* Returns IMG_TRUE if packet write passes, IMG_FALSE otherwise */ +static inline IMG_BOOL _WriteHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_V2_PACKET_HDR *psHeader) +{ + PVRSRV_ERROR eError = TLStreamWrite(psRgxDevInfo->hHWPerfHostStream, + IMG_OFFSET_ADDR(psHeader, 0), psHeader->ui32Size); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not write packet in %s buffer" + " (%d). Dropping packet.", + __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError)); + } + + /* Regardless of whether write passed/failed, we consider it "written" */ + psRgxDevInfo->ui32HWPerfHostLastOrdinal = psHeader->ui32Ordinal; + + return (eError == PVRSRV_OK); +} + +/* Helper macros for deferred events operations */ +#define GET_DE_NEXT_IDX(_curridx) ((_curridx + 1) % HWPERF_HOST_MAX_DEFERRED_PACKETS) +#define GET_DE_EVENT_BASE(_idx) (IMG_OFFSET_ADDR(psRgxDevInfo->pui8DeferredEvents, \ + (_idx) * HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE)) + +#define GET_DE_EVENT_WRITE_STATUS(_base) ((IMG_BOOL*)((void *)(_base))) +#define GET_DE_EVENT_DATA(_base) (IMG_OFFSET_ADDR((_base), sizeof(IMG_BOOL))) + +/* Emits HWPerfHost event packets present in the deferred list stopping when one + * of the following cases is hit: + * case 1: Packet ordering breaks i.e. a packet found doesn't meet ordering + * criteria (ordinal == last_ordinal + 1) + * + * case 2: A packet with ordinal > ui32MaxOrdinal is found + * + * case 3: Deferred list's (read == write) i.e. no more deferred packets. + * + * NOTE: Caller must possess the hLockHWPerfHostStream lock before calling + * this function.*/ +static void _HWPerfHostDeferredEventsEmitter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 ui32MaxOrdinal) +{ + RGX_HWPERF_V2_PACKET_HDR *psHeader; + IMG_UINT32 ui32Retry; + IMG_UINT8 *pui8DeferredEvent; + IMG_BOOL *pbPacketWritten; + IMG_BOOL bWritePassed; + + PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hLockHWPerfHostStream)); + + while (psRgxDevInfo->ui16DEReadIdx != psRgxDevInfo->ui16DEWriteIdx) + { + pui8DeferredEvent = GET_DE_EVENT_BASE(psRgxDevInfo->ui16DEReadIdx); + pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(pui8DeferredEvent); + psHeader = (RGX_HWPERF_V2_PACKET_HDR*) GET_DE_EVENT_DATA(pui8DeferredEvent); + + for (ui32Retry = MAX_RETRY_COUNT; !(*pbPacketWritten) && (ui32Retry != 0); ui32Retry--) + { + /* Packet not yet written, re-check after a while. Wait for a short period as + * atomic contexts are generally expected to finish fast */ + OSWaitus(10); + } + +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + if ((ui32Retry == 0) && !(psRgxDevInfo->bWarnedAtomicCtxPktLost)) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Will warn only once. Dropping a deferred packet as atomic context" + " took too long to write it", + __func__)); + psRgxDevInfo->bWarnedAtomicCtxPktLost = IMG_TRUE; + } + + if (psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark < (MAX_RETRY_COUNT - ui32Retry)) + { + psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark = MAX_RETRY_COUNT - ui32Retry; + } +#endif + + if (*pbPacketWritten) + { + if ((psHeader->ui32Ordinal > ui32MaxOrdinal) || + (psHeader->ui32Ordinal != (psRgxDevInfo->ui32HWPerfHostLastOrdinal + 1))) + { + /* Leave remaining events to be emitted by next call to this function */ + break; + } + bWritePassed = _WriteHWPerfStream(psRgxDevInfo, psHeader); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Atomic context packet lost!", __func__)); + bWritePassed = IMG_FALSE; + } + + /* Move on to next packet */ + psRgxDevInfo->ui16DEReadIdx = GET_DE_NEXT_IDX(psRgxDevInfo->ui16DEReadIdx); + + if (!bWritePassed // if write failed + && ui32MaxOrdinal == IMG_UINT32_MAX // and we are from MISR + && psRgxDevInfo->ui16DEReadIdx != psRgxDevInfo->ui16DEWriteIdx) // and there are more events + { + /* Stop emitting here and re-schedule MISR */ + OSScheduleMISR(psRgxDevInfo->pvHostHWPerfMISR); + break; + } + } +} + +static void RGX_MISRHandler_HWPerfPostDeferredHostEvents(void *pvData) +{ + PVRSRV_RGXDEV_INFO *psRgxDevInfo = pvData; + + OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); + + /* Since we're called from MISR, there is no upper cap of ordinal to be emitted. + * Send IMG_UINT32_MAX to signify all possible packets. */ + _HWPerfHostDeferredEventsEmitter(psRgxDevInfo, IMG_UINT32_MAX); + + OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); +} + +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) +static inline void _UpdateDEBufferHighWatermark(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + IMG_UINT32 ui32DEWatermark; + IMG_UINT16 ui16LRead = psRgxDevInfo->ui16DEReadIdx; + IMG_UINT16 ui16LWrite = psRgxDevInfo->ui16DEWriteIdx; + + if (ui16LWrite >= ui16LRead) + { + ui32DEWatermark = ui16LWrite - ui16LRead; + } + else + { + ui32DEWatermark = (HWPERF_HOST_MAX_DEFERRED_PACKETS - ui16LRead) + (ui16LWrite); + } + + if (ui32DEWatermark > psRgxDevInfo->ui32DEHighWatermark) + { + psRgxDevInfo->ui32DEHighWatermark = ui32DEWatermark; + } +} +#endif + +/* @Description Gets the data/members that concerns the accuracy of a packet in HWPerfHost + buffer. Since the data returned by this function is required in both, an + atomic as well as a process/sleepable context, it is protected under spinlock + + @Output pui32Ordinal Pointer to ordinal number assigned to this packet + @Output pui64Timestamp Timestamp value for this packet + @Output ppui8Dest If the current context cannot sleep, pointer to a place in + deferred events buffer where the packet data should be written. + Don't care, otherwise. + */ +static void _GetHWPerfHostPacketSpecifics(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 *pui32Ordinal, + IMG_UINT64 *pui64Timestamp, + IMG_UINT8 **ppui8Dest, + IMG_BOOL bSleepAllowed) +{ + OS_SPINLOCK_FLAGS uiFlags; + + /* Spin lock is required to avoid getting scheduled out by a higher priority + * context while we're getting header specific details and packet place in + * HWPerf buffer (when in atomic context) for ourselves */ + OSSpinLockAcquire(psRgxDevInfo->hHWPerfHostSpinLock, uiFlags); + + *pui32Ordinal = psRgxDevInfo->ui32HWPerfHostNextOrdinal++; + *pui64Timestamp = RGXTimeCorrGetClockus64(); + + if (!bSleepAllowed) + { + /* We're in an atomic context. So return the next position available in + * deferred events buffer */ + IMG_UINT16 ui16NewWriteIdx; + IMG_BOOL *pbPacketWritten; + + PVR_ASSERT(ppui8Dest != NULL); + + ui16NewWriteIdx = GET_DE_NEXT_IDX(psRgxDevInfo->ui16DEWriteIdx); + if (ui16NewWriteIdx == psRgxDevInfo->ui16DEReadIdx) + { + /* This shouldn't happen. HWPERF_HOST_MAX_DEFERRED_PACKETS should be + * big enough to avoid any such scenario */ +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + /* PVR_LOG/printk isn't recommended in atomic context. Perhaps we'll do + * this debug output here when trace_printk support is added to DDK */ +// PVR_LOG(("%s: No more space in deferred events buffer (%u/%u) W=%u,R=%u", +// __func__, psRgxDevInfo->ui32DEHighWatermark, +// HWPERF_HOST_MAX_DEFERRED_PACKETS, psRgxDevInfo->ui16DEWriteIdx, +// psRgxDevInfo->ui16DEReadIdx)); +#endif + *ppui8Dest = NULL; + } + else + { + /* Return the position where deferred event would be written */ + *ppui8Dest = GET_DE_EVENT_BASE(psRgxDevInfo->ui16DEWriteIdx); + + /* Make sure packet write "state" is "write-pending" _before_ moving write + * pointer forward */ + pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(*ppui8Dest); + *pbPacketWritten = IMG_FALSE; + + psRgxDevInfo->ui16DEWriteIdx = ui16NewWriteIdx; + +#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) + _UpdateDEBufferHighWatermark(psRgxDevInfo); +#endif + } + } + + OSSpinLockRelease(psRgxDevInfo->hHWPerfHostSpinLock, uiFlags); +} + +static inline void _SetupHostPacketHeader(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT8 *pui8Dest, + RGX_HWPERF_HOST_EVENT_TYPE eEvType, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32Ordinal, + IMG_UINT64 ui64Timestamp) +{ + RGX_HWPERF_V2_PACKET_HDR *psHeader = (RGX_HWPERF_V2_PACKET_HDR *) ((void *)pui8Dest); + + PVR_ASSERT(ui32Size<=RGX_HWPERF_MAX_PACKET_SIZE); + + psHeader->ui32Ordinal = ui32Ordinal; + psHeader->ui64Timestamp = ui64Timestamp; + psHeader->ui32Sig = HWPERF_PACKET_V2B_SIG; + psHeader->eTypeId = RGX_HWPERF_MAKE_TYPEID(RGX_HWPERF_STREAM_ID1_HOST, + eEvType, 0, 0, 0); + psHeader->ui32Size = ui32Size; +} + +static inline void _SetupHostEnqPacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_KICK_TYPE eEnqType, + IMG_UINT32 ui32Pid, + IMG_UINT32 ui32FWDMContext, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + PVRSRV_FENCE hCheckFence, + PVRSRV_FENCE hUpdateFence, + PVRSRV_TIMELINE hUpdateTimeline, + IMG_UINT64 ui64CheckFenceUID, + IMG_UINT64 ui64UpdateFenceUID, + IMG_UINT64 ui64DeadlineInus, + IMG_UINT64 ui64CycleEstimate) +{ + RGX_HWPERF_HOST_ENQ_DATA *psData = (RGX_HWPERF_HOST_ENQ_DATA *) + IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + psData->ui32EnqType = eEnqType; + psData->ui32PID = ui32Pid; + psData->ui32ExtJobRef = ui32ExtJobRef; + psData->ui32IntJobRef = ui32IntJobRef; + psData->ui32DMContext = ui32FWDMContext; + psData->hCheckFence = hCheckFence; + psData->hUpdateFence = hUpdateFence; + psData->hUpdateTimeline = hUpdateTimeline; + psData->ui64CheckFence_UID = ui64CheckFenceUID; + psData->ui64UpdateFence_UID = ui64UpdateFenceUID; + psData->ui64DeadlineInus = ui64DeadlineInus; + psData->ui64CycleEstimate = ui64CycleEstimate; +} + +void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_KICK_TYPE eEnqType, + IMG_UINT32 ui32Pid, + IMG_UINT32 ui32FWDMContext, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + PVRSRV_FENCE hCheckFence, + PVRSRV_FENCE hUpdateFence, + PVRSRV_TIMELINE hUpdateTimeline, + IMG_UINT64 ui64CheckFenceUID, + IMG_UINT64 ui64UpdateFenceUID, + IMG_UINT64 ui64DeadlineInus, + IMG_UINT64 ui64CycleEstimate ) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_ENQ_DATA); + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ENQ, ui32Size, + ui32Ordinal, ui64Timestamp); + _SetupHostEnqPacketData(pui8Dest, + eEnqType, + ui32Pid, + ui32FWDMContext, + ui32ExtJobRef, + ui32IntJobRef, + hCheckFence, + hUpdateFence, + hUpdateTimeline, + ui64CheckFenceUID, + ui64UpdateFenceUID, + ui64DeadlineInus, + ui64CycleEstimate); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline IMG_UINT32 _CalculateHostUfoPacketSize(RGX_HWPERF_UFO_EV eUfoType) +{ + IMG_UINT32 ui32Size = + (IMG_UINT32) offsetof(RGX_HWPERF_HOST_UFO_DATA, aui32StreamData); + RGX_HWPERF_UFO_DATA_ELEMENT *puData; + + switch (eUfoType) + { + case RGX_HWPERF_UFO_EV_CHECK_SUCCESS: + case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS: + ui32Size += sizeof(puData->sCheckSuccess); + break; + case RGX_HWPERF_UFO_EV_CHECK_FAIL: + case RGX_HWPERF_UFO_EV_PRCHECK_FAIL: + ui32Size += sizeof(puData->sCheckFail); + break; + case RGX_HWPERF_UFO_EV_UPDATE: + ui32Size += sizeof(puData->sUpdate); + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO" + " event type")); + PVR_ASSERT(IMG_FALSE); + break; + } + + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +static inline void _SetupHostUfoPacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_UFO_EV eUfoType, + RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData) +{ + RGX_HWPERF_HOST_UFO_DATA *psData = (RGX_HWPERF_HOST_UFO_DATA *) + IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + RGX_HWPERF_UFO_DATA_ELEMENT *puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) + psData->aui32StreamData; + + psData->eEvType = eUfoType; + /* HWPerfHost always emits 1 UFO at a time, since each UFO has 1-to-1 mapping + * with an underlying DevNode, and each DevNode has a dedicated HWPerf buffer */ + psData->ui32StreamInfo = RGX_HWPERF_MAKE_UFOPKTINFO(1, + offsetof(RGX_HWPERF_HOST_UFO_DATA, aui32StreamData)); + + switch (eUfoType) + { + case RGX_HWPERF_UFO_EV_CHECK_SUCCESS: + case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS: + puData->sCheckSuccess.ui32FWAddr = + psUFOData->sCheckSuccess.ui32FWAddr; + puData->sCheckSuccess.ui32Value = + psUFOData->sCheckSuccess.ui32Value; + + puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) + IMG_OFFSET_ADDR(puData, sizeof(puData->sCheckSuccess)); + break; + case RGX_HWPERF_UFO_EV_CHECK_FAIL: + case RGX_HWPERF_UFO_EV_PRCHECK_FAIL: + puData->sCheckFail.ui32FWAddr = + psUFOData->sCheckFail.ui32FWAddr; + puData->sCheckFail.ui32Value = + psUFOData->sCheckFail.ui32Value; + puData->sCheckFail.ui32Required = + psUFOData->sCheckFail.ui32Required; + + puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) + IMG_OFFSET_ADDR(puData, sizeof(puData->sCheckFail)); + break; + case RGX_HWPERF_UFO_EV_UPDATE: + puData->sUpdate.ui32FWAddr = + psUFOData->sUpdate.ui32FWAddr; + puData->sUpdate.ui32OldValue = + psUFOData->sUpdate.ui32OldValue; + puData->sUpdate.ui32NewValue = + psUFOData->sUpdate.ui32NewValue; + + puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) + IMG_OFFSET_ADDR(puData, sizeof(puData->sUpdate)); + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO" + " event type")); + PVR_ASSERT(IMG_FALSE); + break; + } +} + +void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_UFO_EV eUfoType, + RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData, + const IMG_BOOL bSleepAllowed) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size = _CalculateHostUfoPacketSize(eUfoType); + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + IMG_BOOL *pbPacketWritten = NULL; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + &pui8Dest, bSleepAllowed); + + if (bSleepAllowed) + { + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + } + else + { + if (pui8Dest == NULL) + { + // Give-up if we couldn't get a place in deferred events buffer + goto cleanup; + } + pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(pui8Dest); + pui8Dest = GET_DE_EVENT_DATA(pui8Dest); + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_UFO, ui32Size, + ui32Ordinal, ui64Timestamp); + _SetupHostUfoPacketData(pui8Dest, eUfoType, psUFOData); + + if (bSleepAllowed) + { + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + } + else + { + *pbPacketWritten = IMG_TRUE; + OSScheduleMISR(psRgxDevInfo->pvHostHWPerfMISR); + } + +cleanup: + if (bSleepAllowed) + { + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); + } +} + +#define UNKNOWN_SYNC_NAME "UnknownSync" + +static_assert(PVRSRV_SYNC_NAME_LENGTH==PVRSRV_SYNC_NAME_LENGTH, "Sync class name max does not match Fence Sync name max"); + +static inline IMG_UINT32 _FixNameAndCalculateHostAllocPacketSize( + RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, + const IMG_CHAR **ppsName, + IMG_UINT32 *ui32NameSize) +{ + RGX_HWPERF_HOST_ALLOC_DATA *psData; + IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_ALLOC_DATA, uAllocDetail); + + if (*ppsName != NULL && *ui32NameSize > 0) + { + /* if string longer than maximum cut it (leave space for '\0') */ + if (*ui32NameSize >= PVRSRV_SYNC_NAME_LENGTH) + *ui32NameSize = PVRSRV_SYNC_NAME_LENGTH; + } + else + { + PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostAllocEvent: Invalid" + " resource name given.")); + *ppsName = UNKNOWN_SYNC_NAME; + *ui32NameSize = sizeof(UNKNOWN_SYNC_NAME); + } + + switch (eAllocType) + { + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC: + ui32Size += sizeof(psData->uAllocDetail.sSyncAlloc) - PVRSRV_SYNC_NAME_LENGTH + + *ui32NameSize; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: + ui32Size += sizeof(psData->uAllocDetail.sFenceAlloc) - PVRSRV_SYNC_NAME_LENGTH + + *ui32NameSize; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW: + ui32Size += sizeof(psData->uAllocDetail.sSWFenceAlloc) - PVRSRV_SYNC_NAME_LENGTH + + *ui32NameSize; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP: + ui32Size += sizeof(psData->uAllocDetail.sSyncCheckPointAlloc) - PVRSRV_SYNC_NAME_LENGTH + + *ui32NameSize; + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "RGXHWPerfHostPostAllocEvent: Invalid alloc event type")); + PVR_ASSERT(IMG_FALSE); + break; + } + + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +static inline void _SetupHostAllocPacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, + RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize) +{ + RGX_HWPERF_HOST_ALLOC_DATA *psData = (RGX_HWPERF_HOST_ALLOC_DATA *) + IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + + IMG_CHAR *acName = NULL; + + psData->ui32AllocType = eAllocType; + + switch (eAllocType) + { + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC: + psData->uAllocDetail.sSyncAlloc = puAllocDetail->sSyncAlloc; + acName = psData->uAllocDetail.sSyncAlloc.acName; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: + psData->uAllocDetail.sFenceAlloc = puAllocDetail->sFenceAlloc; + acName = psData->uAllocDetail.sFenceAlloc.acName; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW: + psData->uAllocDetail.sSWFenceAlloc = puAllocDetail->sSWFenceAlloc; + acName = psData->uAllocDetail.sSWFenceAlloc.acName; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP: + psData->uAllocDetail.sSyncCheckPointAlloc = puAllocDetail->sSyncCheckPointAlloc; + acName = psData->uAllocDetail.sSyncCheckPointAlloc.acName; + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "RGXHWPerfHostPostAllocEvent: Invalid alloc event type")); + PVR_ASSERT(IMG_FALSE); + } + + + if (acName != NULL) + { + if (ui32NameSize) + { + OSStringLCopy(acName, psName, ui32NameSize); + } + else + { + /* In case no name was given make sure we don't access random + * memory */ + acName[0] = '\0'; + } + } +} + +void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO* psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize, + RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT64 ui64Timestamp; + IMG_UINT32 ui32Ordinal; + IMG_UINT32 ui32Size = _FixNameAndCalculateHostAllocPacketSize(eAllocType, + &psName, + &ui32NameSize); + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ALLOC, ui32Size, + ui32Ordinal, ui64Timestamp); + + _SetupHostAllocPacketData(pui8Dest, + eAllocType, + puAllocDetail, + psName, + ui32NameSize); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline void _SetupHostFreePacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType, + IMG_UINT64 ui64UID, + IMG_UINT32 ui32PID, + IMG_UINT32 ui32FWAddr) +{ + RGX_HWPERF_HOST_FREE_DATA *psData = (RGX_HWPERF_HOST_FREE_DATA *) + IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + + psData->ui32FreeType = eFreeType; + + switch (eFreeType) + { + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC: + psData->uFreeDetail.sSyncFree.ui32FWAddr = ui32FWAddr; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: + psData->uFreeDetail.sFenceDestroy.ui64Fence_UID = ui64UID; + break; + case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP: + psData->uFreeDetail.sSyncCheckPointFree.ui32CheckPt_FWAddr = ui32FWAddr; + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "RGXHWPerfHostPostFreeEvent: Invalid free event type")); + PVR_ASSERT(IMG_FALSE); + } +} + +void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType, + IMG_UINT64 ui64UID, + IMG_UINT32 ui32PID, + IMG_UINT32 ui32FWAddr) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_FREE_DATA); + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_FREE, ui32Size, + ui32Ordinal, ui64Timestamp); + _SetupHostFreePacketData(pui8Dest, + eFreeType, + ui64UID, + ui32PID, + ui32FWAddr); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline IMG_UINT32 _FixNameAndCalculateHostModifyPacketSize( + RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, + const IMG_CHAR **ppsName, + IMG_UINT32 *ui32NameSize) +{ + RGX_HWPERF_HOST_MODIFY_DATA *psData; + RGX_HWPERF_HOST_MODIFY_DETAIL *puData; + IMG_UINT32 ui32Size = sizeof(psData->ui32ModifyType); + + if (*ppsName != NULL && *ui32NameSize > 0) + { + /* first strip the terminator */ + if ((*ppsName)[*ui32NameSize - 1] == '\0') + *ui32NameSize -= 1; + /* if string longer than maximum cut it (leave space for '\0') */ + if (*ui32NameSize >= PVRSRV_SYNC_NAME_LENGTH) + *ui32NameSize = PVRSRV_SYNC_NAME_LENGTH - 1; + } + else + { + PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostModifyEvent: Invalid" + " resource name given.")); + *ppsName = UNKNOWN_SYNC_NAME; + *ui32NameSize = sizeof(UNKNOWN_SYNC_NAME) - 1; + } + + switch (eModifyType) + { + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: + ui32Size += sizeof(puData->sFenceMerge) - PVRSRV_SYNC_NAME_LENGTH + + *ui32NameSize + 1; /* +1 for '\0' */ + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "RGXHWPerfHostPostModifyEvent: Invalid modify event type")); + PVR_ASSERT(IMG_FALSE); + break; + } + + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +static inline void _SetupHostModifyPacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, + IMG_UINT64 ui64NewUID, + IMG_UINT64 ui64UID1, + IMG_UINT64 ui64UID2, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize) +{ + RGX_HWPERF_HOST_MODIFY_DATA *psData = (RGX_HWPERF_HOST_MODIFY_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + + IMG_CHAR *acName = NULL; + + psData->ui32ModifyType = eModifyType; + + switch (eModifyType) + { + case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: + psData->uModifyDetail.sFenceMerge.ui64NewFence_UID = ui64NewUID; + psData->uModifyDetail.sFenceMerge.ui64InFence1_UID = ui64UID1; + psData->uModifyDetail.sFenceMerge.ui64InFence2_UID = ui64UID2; + acName = psData->uModifyDetail.sFenceMerge.acName; + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "RGXHWPerfHostPostModifyEvent: Invalid modify event type")); + PVR_ASSERT(IMG_FALSE); + } + + if (acName != NULL) + { + if (ui32NameSize) + { + OSStringLCopy(acName, psName, ui32NameSize); + } + else + { + /* In case no name was given make sure we don't access random + * memory */ + acName[0] = '\0'; + } + } +} + +void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, + IMG_UINT64 ui64NewUID, + IMG_UINT64 ui64UID1, + IMG_UINT64 ui64UID2, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT64 ui64Timestamp; + IMG_UINT32 ui32Ordinal; + IMG_UINT32 ui32Size = _FixNameAndCalculateHostModifyPacketSize(eModifyType, + &psName, + &ui32NameSize); + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_MODIFY, ui32Size, + ui32Ordinal, ui64Timestamp); + _SetupHostModifyPacketData(pui8Dest, + eModifyType, + ui64NewUID, + ui64UID1, + ui64UID2, + psName, + ui32NameSize); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline void _SetupHostClkSyncPacketData(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT8 *pui8Dest) +{ + RGX_HWPERF_HOST_CLK_SYNC_DATA *psData = (RGX_HWPERF_HOST_CLK_SYNC_DATA *) + IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psRgxDevInfo->psRGXFWIfGpuUtilFWCb; + IMG_UINT32 ui32CurrIdx = + RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount); + RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32CurrIdx]; + + psData->ui64CRTimestamp = psTimeCorr->ui64CRTimeStamp; + psData->ui64OSTimestamp = psTimeCorr->ui64OSTimeStamp; + psData->ui32ClockSpeed = psTimeCorr->ui32CoreClockSpeed; +} + +void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size = + RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_CLK_SYNC_DATA); + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_CLK_SYNC, ui32Size, + ui32Ordinal, ui64Timestamp); + _SetupHostClkSyncPacketData(psRgxDevInfo, pui8Dest); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS _ConvDeviceHealthStatus(PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus) +{ + switch (eDeviceHealthStatus) + { + case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED; + case PVRSRV_DEVICE_HEALTH_STATUS_OK: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK; + case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_RESPONDING; + case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD; + case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT; + default: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED; + } +} + +static inline RGX_HWPERF_HOST_DEVICE_HEALTH_REASON _ConvDeviceHealthReason(PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason) +{ + switch (eDeviceHealthReason) + { + case PVRSRV_DEVICE_HEALTH_REASON_NONE: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE; + case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED; + case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING; + case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT; + case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED; + case PVRSRV_DEVICE_HEALTH_REASON_IDLING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING; + case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING; + case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS:return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS; + default: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED; + } +} + +static inline void _SetupHostDeviceInfoPacketData(RGX_HWPERF_DEV_INFO_EV eEvType, + PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus, + PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason, + IMG_UINT8 *pui8Dest) +{ + RGX_HWPERF_HOST_DEV_INFO_DATA *psData = (RGX_HWPERF_HOST_DEV_INFO_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + psData->eEvType = eEvType; + + switch (eEvType) + { + case RGX_HWPERF_DEV_INFO_EV_HEALTH: + psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthStatus = _ConvDeviceHealthStatus(eDeviceHealthStatus); + psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthReason = _ConvDeviceHealthReason(eDeviceHealthReason); + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: Invalid event type")); + PVR_ASSERT(IMG_FALSE); + break; + } +} + +static inline IMG_UINT32 _CalculateHostDeviceInfoPacketSize(RGX_HWPERF_DEV_INFO_EV eEvType) +{ + IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_DEV_INFO_DATA, uDevInfoDetail); + + switch (eEvType) + { + case RGX_HWPERF_DEV_INFO_EV_HEALTH: + ui32Size += sizeof(((RGX_HWPERF_HOST_DEV_INFO_DATA*)0)->uDevInfoDetail.sDeviceStatus); + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: Invalid event type")); + PVR_ASSERT(IMG_FALSE); + break; + } + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_DEV_INFO_EV eEvType, + PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus, + PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + IMG_UINT32 ui32Size; + + OSLockAcquire(psRgxDevInfo->hHWPerfLock); + + if (psRgxDevInfo->hHWPerfHostStream != (IMG_HANDLE) NULL) + { + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE); + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + ui32Size = _CalculateHostDeviceInfoPacketSize(eEvType); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL) + { + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_DEV_INFO, ui32Size, ui32Ordinal, ui64Timestamp); + _SetupHostDeviceInfoPacketData(eEvType, eDeviceHealthStatus, eDeviceHealthReason, pui8Dest); + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + } + + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); + } + + OSLockRelease(psRgxDevInfo->hHWPerfLock); +} + +static inline void _SetupHostInfoPacketData(RGX_HWPERF_INFO_EV eEvType, + IMG_UINT32 ui32TotalMemoryUsage, + IMG_UINT32 ui32LivePids, + PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage, + IMG_UINT8 *pui8Dest) +{ + IMG_INT i; + RGX_HWPERF_HOST_INFO_DATA *psData = (RGX_HWPERF_HOST_INFO_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + psData->eEvType = eEvType; + + switch (eEvType) + { + case RGX_HWPERF_INFO_EV_MEM_USAGE: + psData->uInfoDetail.sMemUsageStats.ui32TotalMemoryUsage = ui32TotalMemoryUsage; + + if (psPerProcessMemUsage) + { + for (i = 0; i < ui32LivePids; ++i) + { + psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32Pid = psPerProcessMemUsage[i].ui32Pid; + psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32KernelMemUsage = psPerProcessMemUsage[i].ui32KernelMemUsage; + psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32GraphicsMemUsage = psPerProcessMemUsage[i].ui32GraphicsMemUsage; + } + } + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostInfo: Invalid event type")); + PVR_ASSERT(IMG_FALSE); + break; + } +} + +static inline IMG_UINT32 _CalculateHostInfoPacketSize(RGX_HWPERF_INFO_EV eEvType, + IMG_UINT32 *pui32TotalMemoryUsage, + IMG_UINT32 *pui32LivePids, + PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsage) +{ + IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_INFO_DATA, uInfoDetail); + + switch (eEvType) + { + case RGX_HWPERF_INFO_EV_MEM_USAGE: +#if !defined(__QNXNTO__) + if (PVRSRVGetProcessMemUsage(pui32TotalMemoryUsage, pui32LivePids, ppsPerProcessMemUsage) == PVRSRV_OK) + { + ui32Size += ((offsetof(RGX_HWPERF_HOST_INFO_DATA, uInfoDetail.sMemUsageStats.ui32TotalMemoryUsage) - ui32Size) + + ((*pui32LivePids) * sizeof(((RGX_HWPERF_HOST_INFO_DATA*)0)->uInfoDetail.sMemUsageStats.sPerProcessUsage))); + } +#else + PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform")); +#endif + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostInfo: Invalid event type")); + PVR_ASSERT(IMG_FALSE); + break; + } + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_INFO_EV eEvType) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size; + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + IMG_UINT32 ui32TotalMemoryUsage = 0; + PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage = NULL; + IMG_UINT32 ui32LivePids = 0; + + OSLockAcquire(psRgxDevInfo->hHWPerfLock); + + if (psRgxDevInfo->hHWPerfHostStream != (IMG_HANDLE) NULL) + { + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE); + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + ui32Size = _CalculateHostInfoPacketSize(eEvType, &ui32TotalMemoryUsage, &ui32LivePids, &psPerProcessMemUsage); + + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL) + { + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_INFO, ui32Size, ui32Ordinal, ui64Timestamp); + _SetupHostInfoPacketData(eEvType, ui32TotalMemoryUsage, ui32LivePids, psPerProcessMemUsage, pui8Dest); + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + } + + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); + + if (psPerProcessMemUsage) + OSFreeMemNoStats(psPerProcessMemUsage); // psPerProcessMemUsage was allocated with OSAllocZMemNoStats + } + + OSLockRelease(psRgxDevInfo->hHWPerfLock); +} + +static inline IMG_UINT32 +_CalculateHostFenceWaitPacketSize(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eWaitType) +{ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *psSizeCalculator; + IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA, uDetail); + + switch (eWaitType) + { + case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN: + ui32Size += sizeof(psSizeCalculator->uDetail.sBegin); + break; + case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END: + ui32Size += sizeof(psSizeCalculator->uDetail.sEnd); + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid wait event type (%u)", __func__, + eWaitType)); + PVR_ASSERT(IMG_FALSE); + break; + } + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +static inline void +_SetupHostFenceWaitPacketData(IMG_UINT8 *pui8Dest, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eWaitType, + IMG_PID uiPID, + PVRSRV_FENCE hFence, + IMG_UINT32 ui32Data) +{ + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *psData = (RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *) + IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + + psData->eType = eWaitType; + psData->uiPID = uiPID; + psData->hFence = hFence; + + switch (eWaitType) + { + case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN: + psData->uDetail.sBegin.ui32TimeoutInMs = ui32Data; + break; + case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END: + psData->uDetail.sEnd.eResult = + (RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT) ui32Data; + break; + default: + // unknown type - this should never happen + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid fence-wait event type", __func__)); + PVR_ASSERT(IMG_FALSE); + } +} + +void RGXHWPerfHostPostFenceWait(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType, + IMG_PID uiPID, + PVRSRV_FENCE hFence, + IMG_UINT32 ui32Data) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size; + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + ui32Size = _CalculateHostFenceWaitPacketSize(eType); + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_SYNC_FENCE_WAIT, + ui32Size, ui32Ordinal, ui64Timestamp); + _SetupHostFenceWaitPacketData(pui8Dest, eType, uiPID, hFence, ui32Data); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); +} + +static inline IMG_UINT32 _CalculateHostSWTimelineAdvPacketSize(void) +{ + IMG_UINT32 ui32Size = sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA); + return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); +} + +static inline void +_SetupHostSWTimelineAdvPacketData(IMG_UINT8 *pui8Dest, + IMG_PID uiPID, + PVRSRV_TIMELINE hSWTimeline, + IMG_UINT64 ui64SyncPtIndex) + +{ + RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA *psData = (RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA *) + IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); + + psData->uiPID = uiPID; + psData->hTimeline = hSWTimeline; + psData->ui64SyncPtIndex = ui64SyncPtIndex; +} + +void RGXHWPerfHostPostSWTimelineAdv(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_PID uiPID, + PVRSRV_TIMELINE hSWTimeline, + IMG_UINT64 ui64SyncPtIndex) +{ + IMG_UINT8 *pui8Dest; + IMG_UINT32 ui32Size; + IMG_UINT32 ui32Ordinal; + IMG_UINT64 ui64Timestamp; + + _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, + NULL, IMG_TRUE); + + _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); + + ui32Size = _CalculateHostSWTimelineAdvPacketSize(); + if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) + { + goto cleanup; + } + + _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE, + ui32Size, ui32Ordinal, ui64Timestamp); + _SetupHostSWTimelineAdvPacketData(pui8Dest, uiPID, hSWTimeline, ui64SyncPtIndex); + + _CommitHWPerfStream(psRgxDevInfo, ui32Size); + +cleanup: + _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); + +} + +/****************************************************************************** + * Currently only implemented on Linux. Feature can be enabled to provide + * an interface to 3rd-party kernel modules that wish to access the + * HWPerf data. The API is documented in the rgxapi_km.h header and + * the rgx_hwperf* headers. + *****************************************************************************/ + +/* Internal HWPerf kernel connection/device data object to track the state + * of a client session. + */ +typedef struct +{ + PVRSRV_DEVICE_NODE* psRgxDevNode; + PVRSRV_RGXDEV_INFO* psRgxDevInfo; + + /* TL Open/close state */ + IMG_HANDLE hSD[RGX_HWPERF_MAX_STREAM_ID]; + + /* TL Acquire/release state */ + IMG_PBYTE pHwpBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer returned to user in acquire call */ + IMG_PBYTE pHwpBufEnd[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to end of HwpBuf */ + IMG_PBYTE pTlBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer obtained via TlAcquireData */ + IMG_PBYTE pTlBufPos[RGX_HWPERF_MAX_STREAM_ID]; /*!< initial position in TlBuf to acquire packets */ + IMG_PBYTE pTlBufRead[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to the last packet read */ + IMG_UINT32 ui32AcqDataLen[RGX_HWPERF_MAX_STREAM_ID]; /*!< length of acquired TlBuf */ + IMG_BOOL bRelease[RGX_HWPERF_MAX_STREAM_ID]; /*!< used to determine whether or not to release currently held TlBuf */ + + +} RGX_KM_HWPERF_DEVDATA; + +PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode; + RGX_KM_HWPERF_DEVDATA *psDevData; + RGX_HWPERF_DEVICE *psNewHWPerfDevice; + RGX_HWPERF_CONNECTION* psHWPerfConnection; + IMG_BOOL bFWActive = IMG_FALSE; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* avoid uninitialised data */ + PVR_ASSERT(*ppsHWPerfConnection == NULL); + PVR_ASSERT(psPVRSRVData); + + /* Allocate connection object */ + psHWPerfConnection = OSAllocZMem(sizeof(*psHWPerfConnection)); + if (!psHWPerfConnection) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + /* early save the return pointer to aid clean-up if failure occurs */ + *ppsHWPerfConnection = psHWPerfConnection; + + psDeviceNode = psPVRSRVData->psDeviceNodeList; + while (psDeviceNode) + { + if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: HWPerf: Device not currently active. ID:%u", + __func__, + psDeviceNode->sDevId.i32OsDeviceID)); + psDeviceNode = psDeviceNode->psNext; + continue; + } + /* Create a list node to be attached to connection object's list */ + psNewHWPerfDevice = OSAllocMem(sizeof(*psNewHWPerfDevice)); + if (!psNewHWPerfDevice) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + /* Insert node at head of the list */ + psNewHWPerfDevice->psNext = psHWPerfConnection->psHWPerfDevList; + psHWPerfConnection->psHWPerfDevList = psNewHWPerfDevice; + + /* create a device data object for kernel server */ + psDevData = OSAllocZMem(sizeof(*psDevData)); + psNewHWPerfDevice->hDevData = (IMG_HANDLE)psDevData; + if (!psDevData) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + if (OSSNPrintf(psNewHWPerfDevice->pszName, sizeof(psNewHWPerfDevice->pszName), + "hwperf_device_%d", psDeviceNode->sDevId.i32OsDeviceID) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf device name for device %d", + __func__, + psDeviceNode->sDevId.i32OsDeviceID)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevData->psRgxDevNode = psDeviceNode; + psDevData->psRgxDevInfo = psDeviceNode->pvDevice; + + psDeviceNode = psDeviceNode->psNext; + + /* At least one device is active */ + bFWActive = IMG_TRUE; + } + + if (!bFWActive) + { + return PVRSRV_ERROR_NOT_READY; + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION *psHWPerfConnection) +{ + RGX_KM_HWPERF_DEVDATA *psDevData; + RGX_HWPERF_DEVICE *psHWPerfDev; + PVRSRV_RGXDEV_INFO *psRgxDevInfo; + PVRSRV_ERROR eError; + IMG_CHAR pszHWPerfFwStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; + IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5]; + IMG_UINT32 ui32BufSize; + + /* Disable producer callback by default for the Kernel API. */ + IMG_UINT32 ui32StreamFlags = PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING | + PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Validate input argument values supplied by the caller */ + if (!psHWPerfConnection) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psHWPerfDev = psHWPerfConnection->psHWPerfDevList; + while (psHWPerfDev) + { + psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + psRgxDevInfo = psDevData->psRgxDevInfo; + + /* In the case where the AppHint has not been set we need to + * initialise the HWPerf resources here. Allocated on-demand + * to reduce RAM foot print on systems not needing HWPerf. + */ + OSLockAcquire(psRgxDevInfo->hHWPerfLock); + if (RGXHWPerfIsInitRequired(psRgxDevInfo)) + { + eError = RGXHWPerfInitOnDemandResources(psRgxDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Initialisation of on-demand HWPerfFW resources failed", + __func__)); + OSLockRelease(psRgxDevInfo->hHWPerfLock); + return eError; + } + } + OSLockRelease(psRgxDevInfo->hHWPerfLock); + + OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); + if (psRgxDevInfo->hHWPerfHostStream == NULL) + { + eError = RGXHWPerfHostInitOnDemandResources(psRgxDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Initialisation of on-demand HWPerfHost resources failed", + __func__)); + OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); + return eError; + } + } + OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); + + /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */ + if (OSSNPrintf(pszHWPerfFwStreamName, sizeof(pszHWPerfFwStreamName), "%s%d", + PVRSRV_TL_HWPERF_RGX_FW_STREAM, + psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf stream name for device %d", + __func__, + psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + /* Open the RGX TL stream for reading in this session */ + eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, + pszHWPerfFwStreamName, + ui32StreamFlags, + &psDevData->hSD[RGX_HWPERF_STREAM_ID0_FW]); + PVR_LOG_RETURN_IF_ERROR(eError, "TLClientOpenStream(RGX_HWPerf)"); + + /* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */ + if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d", + PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, + psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to form HWPerf host stream name for device %d", + __func__, + psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Open the host TL stream for reading in this session */ + eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, + pszHWPerfHostStreamName, + PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING, + &psDevData->hSD[RGX_HWPERF_STREAM_ID1_HOST]); + PVR_LOG_RETURN_IF_ERROR(eError, "TLClientOpenStream(Host_HWPerf)"); + + /* Allocate a large enough buffer for use during the entire session to + * avoid the need to resize in the Acquire call as this might be in an ISR + * Choose size that can contain at least one packet. + */ + /* Allocate buffer for FW Stream */ + ui32BufSize = FW_STREAM_BUFFER_SIZE; + psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] = OSAllocMem(ui32BufSize); + if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID0_FW] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]+ui32BufSize; + + /* Allocate buffer for Host Stream */ + ui32BufSize = HOST_STREAM_BUFFER_SIZE; + psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] = OSAllocMem(ui32BufSize); + if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] == NULL) + { + OSFreeMem(psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID1_HOST] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST]+ui32BufSize; + + psHWPerfDev = psHWPerfDev->psNext; + } + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) +{ + PVRSRV_ERROR eError; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + eError = RGXHWPerfLazyConnect(ppsHWPerfConnection); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfLazyConnect", e0); + + eError = RGXHWPerfOpen(*ppsHWPerfConnection); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfOpen", e1); + + return PVRSRV_OK; + +e1: /* HWPerfOpen might have opened some, and then failed */ + RGXHWPerfClose(*ppsHWPerfConnection); +e0: /* LazyConnect might have allocated some resources and then failed, + * make sure they are cleaned up */ + RGXHWPerfFreeConnection(ppsHWPerfConnection); + return eError; +} + +/* + PVRSRVRGXControlHWPerfBlocksKM + */ +PVRSRV_ERROR PVRSRVRGXControlHWPerfBlocksKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_BOOL bEnable, + IMG_UINT32 ui32ArrayLen, + IMG_UINT16 * psBlockIDs) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sKccbCmd; + IMG_UINT32 ui32kCCBCommandSlot; + PVRSRV_RGXDEV_INFO *psDevice; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + PVR_DPF_ENTERED; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psBlockIDs != NULL, "psBlockIDs"); + PVR_LOG_RETURN_IF_INVALID_PARAM((ui32ArrayLen>0) && (ui32ArrayLen <= RGXFWIF_HWPERF_CTRL_BLKS_MAX), "ui32ArrayLen"); + + PVR_ASSERT(psDeviceNode); + psDevice = psDeviceNode->pvDevice; + + /* Fill in the command structure with the parameters needed + */ + sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS; + sKccbCmd.uCmdData.sHWPerfCtrlBlks.bEnable = bEnable; + sKccbCmd.uCmdData.sHWPerfCtrlBlks.ui32NumBlocks = ui32ArrayLen; + + OSDeviceMemCopy(sKccbCmd.uCmdData.sHWPerfCtrlBlks.aeBlockIDs, psBlockIDs, sizeof(IMG_UINT16) * ui32ArrayLen); + + + /* Ask the FW to carry out the HWPerf configuration command + */ + eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, + RGXFWIF_DM_GP, + &sKccbCmd, + 0, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot"); + + /* Wait for FW to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); + + +#if defined(DEBUG) + if (bEnable) + PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been ENABLED", ui32ArrayLen)); + else + PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been DISABLED", ui32ArrayLen)); +#endif + + PVR_DPF_RETURN_OK; +} + +/* + PVRSRVRGXCtrlHWPerfKM + */ +PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM( + CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_BOOL bToggle, + IMG_UINT64 ui64Mask) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + PVR_DPF_ENTERED; + PVR_ASSERT(psDeviceNode); + + if (eStreamId == RGX_HWPERF_STREAM_ID0_FW) + { + return RGXHWPerfCtrlFwBuffer(psDeviceNode, bToggle, ui64Mask); + } + else if (eStreamId == RGX_HWPERF_STREAM_ID1_HOST) + { + return RGXHWPerfCtrlHostBuffer(psDeviceNode, bToggle, (IMG_UINT32) ui64Mask); + } + else if (eStreamId == RGX_HWPERF_STREAM_ID2_CLIENT) + { + IMG_UINT32 ui32Index = (IMG_UINT32) (ui64Mask >> 32); + IMG_UINT32 ui32Mask = (IMG_UINT32) ui64Mask; + + return RGXHWPerfCtrlClientBuffer(bToggle, ui32Index, ui32Mask); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXCtrlHWPerfKM: Unknown stream id.")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_DPF_RETURN_OK; +} + +PVRSRV_ERROR RGXHWPerfControl( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_BOOL bToggle, + IMG_UINT64 ui64Mask) +{ + PVRSRV_ERROR eError; + RGX_KM_HWPERF_DEVDATA* psDevData; + RGX_HWPERF_DEVICE* psHWPerfDev; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Validate input argument values supplied by the caller */ + if (!psHWPerfConnection) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psHWPerfDev = psHWPerfConnection->psHWPerfDevList; + + while (psHWPerfDev) + { + psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + + /* Call the internal server API */ + eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDevData->psRgxDevNode, eStreamId, bToggle, ui64Mask); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM"); + + psHWPerfDev = psHWPerfDev->psNext; + } + + return PVRSRV_OK; +} + + +IMG_INTERNAL PVRSRV_ERROR RGXHWPerfToggleCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + IMG_UINT16* aeBlockIDs, + IMG_BOOL bToggle, + const char* szFunctionString); + +IMG_INTERNAL PVRSRV_ERROR RGXHWPerfToggleCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + IMG_UINT16* aeBlockIDs, + IMG_BOOL bToggle, + const char* szFunctionString) +{ + PVRSRV_ERROR eError; + RGX_KM_HWPERF_DEVDATA* psDevData; + RGX_HWPERF_DEVICE* psHWPerfDev; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + if (!psHWPerfConnection || ui32NumBlocks==0 || !aeBlockIDs) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psHWPerfDev = psHWPerfConnection->psHWPerfDevList; + + while (psHWPerfDev) + { + psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + + /* Call the internal server API */ + eError = PVRSRVRGXControlHWPerfBlocksKM(NULL, + psDevData->psRgxDevNode, + bToggle, + ui32NumBlocks, + aeBlockIDs); + + PVR_LOG_RETURN_IF_ERROR(eError, szFunctionString); + + psHWPerfDev = psHWPerfDev->psNext; + } + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXHWPerfDisableCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + IMG_UINT16* aeBlockIDs) +{ + return RGXHWPerfToggleCounters(psHWPerfConnection, + ui32NumBlocks, + aeBlockIDs, + IMG_FALSE, + __func__); +} + + +PVRSRV_ERROR RGXHWPerfEnableCounters( + RGX_HWPERF_CONNECTION *psHWPerfConnection, + IMG_UINT32 ui32NumBlocks, + IMG_UINT16* aeBlockIDs) +{ + return RGXHWPerfToggleCounters(psHWPerfConnection, + ui32NumBlocks, + aeBlockIDs, + IMG_TRUE, + __func__); +} + + +PVRSRV_ERROR RGXHWPerfAcquireEvents( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_PBYTE* ppBuf, + IMG_UINT32* pui32BufLen) +{ + PVRSRV_ERROR eError; + RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData; + IMG_PBYTE pDataDest; + IMG_UINT32 ui32TlPackets = 0; + IMG_PBYTE pBufferEnd; + PVRSRVTL_PPACKETHDR psHDRptr; + PVRSRVTL_PACKETTYPE ui16TlType; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Reset the output arguments in case we discover an error */ + *ppBuf = NULL; + *pui32BufLen = 0; + + /* Valid input argument values supplied by the caller */ + if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (psDevData->pTlBuf[eStreamId] == NULL) + { + /* Acquire some data to read from the HWPerf TL stream */ + eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, + psDevData->hSD[eStreamId], + &psDevData->pTlBuf[eStreamId], + &psDevData->ui32AcqDataLen[eStreamId]); + PVR_LOG_RETURN_IF_ERROR(eError, "TLClientAcquireData"); + + psDevData->pTlBufPos[eStreamId] = psDevData->pTlBuf[eStreamId]; + } + + /* TL indicates no data exists so return OK and zero. */ + if ((psDevData->pTlBufPos[eStreamId] == NULL) || (psDevData->ui32AcqDataLen[eStreamId] == 0)) + { + return PVRSRV_OK; + } + + /* Process each TL packet in the data buffer we have acquired */ + pBufferEnd = psDevData->pTlBuf[eStreamId]+psDevData->ui32AcqDataLen[eStreamId]; + pDataDest = psDevData->pHwpBuf[eStreamId]; + psHDRptr = GET_PACKET_HDR(psDevData->pTlBufPos[eStreamId]); + psDevData->pTlBufRead[eStreamId] = psDevData->pTlBufPos[eStreamId]; + while (psHDRptr < (PVRSRVTL_PPACKETHDR)((void *)pBufferEnd)) + { + ui16TlType = GET_PACKET_TYPE(psHDRptr); + if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA) + { + IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr); + if (0 == ui16DataLen) + { + PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfAcquireEvents: ZERO Data in TL data packet: %p", psHDRptr)); + } + else + { + /* Check next packet does not fill buffer */ + if (pDataDest + ui16DataLen > psDevData->pHwpBufEnd[eStreamId]) + { + break; + } + + /* For valid data copy it into the client buffer and move + * the write position on */ + OSDeviceMemCopy(pDataDest, GET_PACKET_DATA_PTR(psHDRptr), ui16DataLen); + pDataDest += ui16DataLen; + } + } + else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED) + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Indication that the transport buffer was full")); + } + else + { + /* else Ignore padding packet type and others */ + PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Ignoring TL packet, type %d", ui16TlType )); + } + + /* Update loop variable to the next packet and increment counts */ + psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr); + /* Updated to keep track of the next packet to be read. */ + psDevData->pTlBufRead[eStreamId] = (IMG_PBYTE) ((void *)psHDRptr); + ui32TlPackets++; + } + + PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfAcquireEvents: TL Packets processed %03d", ui32TlPackets)); + + psDevData->bRelease[eStreamId] = IMG_FALSE; + if (psHDRptr >= (PVRSRVTL_PPACKETHDR)((void *)pBufferEnd)) + { + psDevData->bRelease[eStreamId] = IMG_TRUE; + } + + /* Update output arguments with client buffer details and true length */ + *ppBuf = psDevData->pHwpBuf[eStreamId]; + *pui32BufLen = pDataDest - psDevData->pHwpBuf[eStreamId]; + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXHWPerfReleaseEvents( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Valid input argument values supplied by the caller */ + if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (psDevData->bRelease[eStreamId]) + { + /* Inform the TL that we are done with reading the data. */ + eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[eStreamId]); + psDevData->ui32AcqDataLen[eStreamId] = 0; + psDevData->pTlBuf[eStreamId] = NULL; + } + else + { + psDevData->pTlBufPos[eStreamId] = psDevData->pTlBufRead[eStreamId]; + } + return eError; +} + + +PVRSRV_ERROR RGXHWPerfGetFilter( + IMG_HANDLE hDevData, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_UINT64 *ui64Filter) +{ + PVRSRV_RGXDEV_INFO* psRgxDevInfo = + hDevData ? ((RGX_KM_HWPERF_DEVDATA*) hDevData)->psRgxDevInfo : NULL; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + /* Valid input argument values supplied by the caller */ + if (!psRgxDevInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pointer to the RGX device", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* No need to take hHWPerfLock here since we are only reading data + * from always existing integers to return to debugfs which is an + * atomic operation. + */ + switch (eStreamId) { + case RGX_HWPERF_STREAM_ID0_FW: + *ui64Filter = psRgxDevInfo->ui64HWPerfFilter; + break; + case RGX_HWPERF_STREAM_ID1_HOST: + *ui64Filter = psRgxDevInfo->ui32HWPerfHostFilter; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid stream ID", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) +{ + RGX_HWPERF_DEVICE *psHWPerfDev, *psHWPerfNextDev; + RGX_HWPERF_CONNECTION *psHWPerfConnection = *ppsHWPerfConnection; + + /* if connection object itself is NULL, nothing to free */ + if (psHWPerfConnection == NULL) + { + return PVRSRV_OK; + } + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + psHWPerfNextDev = psHWPerfConnection->psHWPerfDevList; + while (psHWPerfNextDev) + { + psHWPerfDev = psHWPerfNextDev; + psHWPerfNextDev = psHWPerfNextDev->psNext; + + /* Free the session memory */ + if (psHWPerfDev->hDevData) + OSFreeMem(psHWPerfDev->hDevData); + OSFreeMem(psHWPerfDev); + } + OSFreeMem(psHWPerfConnection); + *ppsHWPerfConnection = NULL; + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection) +{ + RGX_HWPERF_DEVICE *psHWPerfDev; + RGX_KM_HWPERF_DEVDATA* psDevData; + IMG_UINT uiStreamId; + PVRSRV_ERROR eError; + + /* Check session connection is not zero */ + if (!psHWPerfConnection) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + psHWPerfDev = psHWPerfConnection->psHWPerfDevList; + while (psHWPerfDev) + { + psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; + for (uiStreamId = 0; uiStreamId < RGX_HWPERF_MAX_STREAM_ID; uiStreamId++) + { + /* If the TL buffer exists they have not called ReleaseData + * before disconnecting so clean it up */ + if (psDevData->pTlBuf[uiStreamId]) + { + /* TLClientReleaseData call and null out the buffer fields + * and length */ + eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[uiStreamId]); + psDevData->ui32AcqDataLen[uiStreamId] = 0; + psDevData->pTlBuf[uiStreamId] = NULL; + PVR_LOG_IF_ERROR(eError, "TLClientReleaseData"); + /* Packets may be lost if release was not required */ + if (!psDevData->bRelease[uiStreamId]) + { + PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfClose: Events in buffer waiting to be read, remaining events may be lost.")); + } + } + + /* Close the TL stream, ignore the error if it occurs as we + * are disconnecting */ + if (psDevData->hSD[uiStreamId]) + { + eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, + psDevData->hSD[uiStreamId]); + PVR_LOG_IF_ERROR(eError, "TLClientCloseStream"); + psDevData->hSD[uiStreamId] = NULL; + } + + /* Free the client buffer used in session */ + if (psDevData->pHwpBuf[uiStreamId]) + { + OSFreeMem(psDevData->pHwpBuf[uiStreamId]); + psDevData->pHwpBuf[uiStreamId] = NULL; + } + } + psHWPerfDev = psHWPerfDev->psNext; + } + + return PVRSRV_OK; +} + + +PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); + + eError = RGXHWPerfClose(*ppsHWPerfConnection); + PVR_LOG_IF_ERROR(eError, "RGXHWPerfClose"); + + eError = RGXHWPerfFreeConnection(ppsHWPerfConnection); + PVR_LOG_IF_ERROR(eError, "RGXHWPerfFreeConnection"); + + return eError; +} + +IMG_UINT64 RGXHWPerfConvertCRTimeStamp( + IMG_UINT32 ui32ClkSpeed, + IMG_UINT64 ui64CorrCRTimeStamp, + IMG_UINT64 ui64CorrOSTimeStamp, + IMG_UINT64 ui64CRTimeStamp) +{ + IMG_UINT64 ui64CRDeltaToOSDeltaKNs; + IMG_UINT64 ui64EventOSTimestamp, deltaRgxTimer, delta_ns; + + if (!(ui64CRTimeStamp) || !(ui32ClkSpeed) || !(ui64CorrCRTimeStamp) || !(ui64CorrOSTimeStamp)) + { + return 0; + } + + ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(ui32ClkSpeed); + + /* RGX CR timer ticks delta */ + deltaRgxTimer = ui64CRTimeStamp - ui64CorrCRTimeStamp; + /* RGX time delta in nanoseconds */ + delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui64CRDeltaToOSDeltaKNs); + /* Calculate OS time of HWPerf event */ + ui64EventOSTimestamp = ui64CorrOSTimeStamp + delta_ns; + + return ui64EventOSTimestamp; +} + +/****************************************************************************** + End of file (rgxhwperf_common.c) + ******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxhwperf_common.h b/drivers/gpu/drm/phytium/octopus/rgxhwperf_common.h new file mode 100644 index 000000000000..0d7a01734632 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxhwperf_common.h @@ -0,0 +1,488 @@ +/*************************************************************************/ /*! +@File +@Title RGX HW Performance header file +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the RGX HWPerf functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXHWPERF_COMMON_H_ +#define RGXHWPERF_COMMON_H_ + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" + +#include "device.h" +#include "connection_server.h" +#include "rgxdevice.h" +#include "rgx_hwperf.h" + +/* HWPerf host buffer size constraints in KBs */ +#define HWPERF_HOST_TL_STREAM_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB +#define HWPERF_HOST_TL_STREAM_SIZE_MIN (32U) +#define HWPERF_HOST_TL_STREAM_SIZE_MAX (3072U) + +/****************************************************************************** + * RGX HW Performance decode Bvnc Features for HWPerf + *****************************************************************************/ +PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, + RGX_HWPERF_BVNC *psBVNC); + +PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_HWPERF_BVNC *psBVNC); + +/****************************************************************************** + * RGX HW Performance Data Transport Routines + *****************************************************************************/ + +PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE* psDevInfo); + +PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); +PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo); +void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); +void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode); +void RGXHWPerfClientInitAppHintCallbacks(void); + +/****************************************************************************** + * RGX HW Performance Profiling API(s) + *****************************************************************************/ + +PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_HWPERF_STREAM_ID eStreamId, + IMG_BOOL bToggle, + IMG_UINT64 ui64Mask); + +PVRSRV_ERROR PVRSRVRGXControlHWPerfBlocksKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_BOOL bEnable, + IMG_UINT32 ui32ArrayLen, + IMG_UINT16 * psBlockIDs); + +/****************************************************************************** + * RGX HW Performance Host Stream API + *****************************************************************************/ + +PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB); +PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo); +void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); + +void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_UINT32 ui32Filter); + +void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_KICK_TYPE eEnqType, + IMG_UINT32 ui32Pid, + IMG_UINT32 ui32FWDMContext, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + PVRSRV_FENCE hCheckFence, + PVRSRV_FENCE hUpdateFence, + PVRSRV_TIMELINE hUpdateTimeline, + IMG_UINT64 ui64CheckFenceUID, + IMG_UINT64 ui64UpdateFenceUID, + IMG_UINT64 ui64DeadlineInus, + IMG_UINT64 ui64CycleEstimate); + +void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize, + RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail); + +void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType, + IMG_UINT64 ui64UID, + IMG_UINT32 ui32PID, + IMG_UINT32 ui32FWAddr); + +void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, + IMG_UINT64 ui64NewUID, + IMG_UINT64 ui64UID1, + IMG_UINT64 ui64UID2, + const IMG_CHAR *psName, + IMG_UINT32 ui32NameSize); + +void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_UFO_EV eUfoType, + RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData, + const IMG_BOOL bSleepAllowed); + +void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo); + +void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_DEV_INFO_EV eEvType, + PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus, + PVRSRV_DEVICE_HEALTH_REASON eDeviceHeathReason); + +void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_INFO_EV eEvType); + +void RGXHWPerfHostPostFenceWait(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType, + IMG_PID uiPID, + PVRSRV_FENCE hFence, + IMG_UINT32 ui32Data); + +void RGXHWPerfHostPostSWTimelineAdv(PVRSRV_RGXDEV_INFO *psRgxDevInfo, + IMG_PID uiPID, + PVRSRV_TIMELINE hSWTimeline, + IMG_UINT64 ui64SyncPtIndex); + +IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent); + +#define _RGX_HWPERF_HOST_FILTER(CTX, EV) \ + (((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice)->ui32HWPerfHostFilter \ + & RGX_HWPERF_EVENT_MASK_VALUE(EV)) + +#define _RGX_DEVICE_INFO_FROM_CTX(CTX) \ + ((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice) + +#define _RGX_DEVICE_INFO_FROM_NODE(DEVNODE) \ + ((PVRSRV_RGXDEV_INFO *)DEVNODE->pvDevice) + +/* Deadline and cycle estimate is not supported for all ENQ events */ +#define NO_DEADLINE 0 +#define NO_CYCEST 0 + + +#if defined(SUPPORT_RGX) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param C Kick context + * @param P Pid of kicking process + * @param X Related FW context + * @param E External job reference + * @param I Job ID + * @param K Kick type + * @param CF Check fence handle + * @param UF Update fence handle + * @param UT Update timeline (on which above UF was created) handle + * @param CHKUID Check fence UID + * @param UPDUID Update fence UID + * @param D Deadline + * @param CE Cycle estimate + */ +#define RGXSRV_HWPERF_ENQ(C, P, X, E, I, K, CF, UF, UT, CHKUID, UPDUID, D, CE) \ + do { \ + if (_RGX_HWPERF_HOST_FILTER(C, RGX_HWPERF_HOST_ENQ)) \ + { \ + RGXHWPerfHostPostEnqEvent(_RGX_DEVICE_INFO_FROM_CTX(C), \ + (K), (P), (X), (E), (I), \ + (CF), (UF), (UT), \ + (CHKUID), (UPDUID), (D), (CE)); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param I Device Info pointer + * @param T Host UFO event type + * @param D Pointer to UFO data + * @param S Is sleeping allowed? + */ +#define RGXSRV_HWPERF_UFO(I, T, D, S) \ + do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_UFO)) \ + { \ + RGXHWPerfHostPostUfoEvent((I), (T), (D), (S)); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param D Device node pointer + * @param T Host ALLOC event type + * @param FWADDR sync firmware address + * @param N string containing sync name + * @param Z string size including null terminating character + */ +#define RGXSRV_HWPERF_ALLOC(D, T, FWADDR, N, Z) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ + { \ + RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ + uAllocDetail.sSyncAlloc.ui32FWAddr = (FWADDR); \ + RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ + (N), (Z), &uAllocDetail); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param D Device Node pointer + * @param PID ID of allocating process + * @param FENCE PVRSRV_FENCE object + * @param FWADDR sync firmware address + * @param N string containing sync name + * @param Z string size including null terminating character + */ +#define RGXSRV_HWPERF_ALLOC_FENCE(D, PID, FENCE, FWADDR, N, Z) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ + { \ + RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ + uAllocDetail.sFenceAlloc.uiPID = (PID); \ + uAllocDetail.sFenceAlloc.hFence = (FENCE); \ + uAllocDetail.sFenceAlloc.ui32CheckPt_FWAddr = (FWADDR); \ + RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, \ + N, Z, &uAllocDetail); \ + } \ + } while (0) + +/** + * @param D Device Node pointer + * @param TL PVRSRV_TIMELINE on which CP is allocated + * @param PID Allocating process ID of this TL/FENCE + * @param FENCE PVRSRV_FENCE as passed to SyncCheckpointResolveFence OR PVRSRV_NO_FENCE + * @param FWADDR sync firmware address + * @param N string containing sync name + * @param Z string size including null terminating character + */ +#define RGXSRV_HWPERF_ALLOC_SYNC_CP(D, TL, PID, FENCE, FWADDR, N, Z) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ + { \ + RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ + uAllocDetail.sSyncCheckPointAlloc.ui32CheckPt_FWAddr = (FWADDR); \ + uAllocDetail.sSyncCheckPointAlloc.hTimeline = (TL); \ + uAllocDetail.sSyncCheckPointAlloc.uiPID = (PID); \ + uAllocDetail.sSyncCheckPointAlloc.hFence = (FENCE); \ + RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, \ + N, Z, &uAllocDetail); \ + } \ + } while (0) + +/** + * @param D Device Node pointer + * @param PID ID of allocating process + * @param SW_FENCE PVRSRV_FENCE object + * @param SW_TL PVRSRV_TIMELINE on which SW_FENCE is allocated + * @param SPI Sync point index on the SW_TL on which this SW_FENCE is allocated + * @param N string containing sync name + * @param Z string size including null terminating character + */ +#define RGXSRV_HWPERF_ALLOC_SW_FENCE(D, PID, SW_FENCE, SW_TL, SPI, N, Z) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ + { \ + RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ + uAllocDetail.sSWFenceAlloc.uiPID = (PID); \ + uAllocDetail.sSWFenceAlloc.hSWFence = (SW_FENCE); \ + uAllocDetail.sSWFenceAlloc.hSWTimeline = (SW_TL); \ + uAllocDetail.sSWFenceAlloc.ui64SyncPtIndex = (SPI); \ + RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, \ + N, Z, &uAllocDetail); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param D Device Node pointer + * @param T Host ALLOC event type + * @param FWADDR sync firmware address + */ +#define RGXSRV_HWPERF_FREE(D, T, FWADDR) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \ + { \ + RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ + (0), (0), (FWADDR)); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param D Device Node pointer + * @param T Host ALLOC event type + * @param UID ID of input object + * @param PID ID of allocating process + * @param FWADDR sync firmware address + */ +#define RGXSRV_HWPERF_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \ + { \ + RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ + (UID), (PID), (FWADDR)); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param D Device Node pointer + * @param T Host ALLOC event type + * @param NEWUID ID of output object + * @param UID1 ID of first input object + * @param UID2 ID of second input object + * @param N string containing new object's name + * @param Z string size including null terminating character + */ +#define RGXSRV_HWPERF_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z) \ + do { \ + if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_MODIFY)) \ + { \ + RGXHWPerfHostPostModifyEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ + RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ + (NEWUID), (UID1), (UID2), N, Z); \ + } \ + } while (0) + + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param I Device info pointer + */ +#define RGXSRV_HWPERF_CLK_SYNC(I) \ + do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_CLK_SYNC)) \ + { \ + RGXHWPerfHostPostClkSyncEvent((I)); \ + } \ + } while (0) + + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts a device info event to the HWPerfHost stream. + * + * @param I Device info pointer + * @param T Event type + * @param H Health status enum + * @param R Health reason enum + */ +#define RGXSRV_HWPERF_DEVICE_INFO(I, T, H, R) \ + do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_DEV_INFO)) \ + { \ + RGXHWPerfHostPostDeviceInfo((I), (T), (H), (R)); \ + } \ + } while (0) + +/** + * This macro checks if HWPerfHost and the event are enabled and if they are + * it posts event to the HWPerfHost stream. + * + * @param I Device info pointer + * @param T Event type + */ +#define RGXSRV_HWPERF_HOST_INFO(I, T) \ +do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_INFO)) \ + { \ + RGXHWPerfHostPostInfo((I), (T)); \ + } \ +} while (0) + +/** + * @param I Device info pointer + * @param T Wait Event type + * @param PID Process ID that the following fence belongs to + * @param F Fence handle + * @param D Data for this wait event type + */ +#define RGXSRV_HWPERF_SYNC_FENCE_WAIT(I, T, PID, F, D) \ +do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_SYNC_FENCE_WAIT)) \ + { \ + RGXHWPerfHostPostFenceWait(I, RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_##T, \ + (PID), (F), (D)); \ + } \ +} while (0) + +/** + * @param I Device info pointer + * @param PID Process ID that the following timeline belongs to + * @param F SW-timeline handle + * @param SPI Sync-pt index where this SW-timeline has reached + */ +#define RGXSRV_HWPERF_SYNC_SW_TL_ADV(I, PID, SW_TL, SPI)\ +do { \ + if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE)) \ + { \ + RGXHWPerfHostPostSWTimelineAdv((I), (PID), (SW_TL), (SPI)); \ + } \ +} while (0) +#else + +#define RGXSRV_HWPERF_ENQ(C, P, X, E, I, K, CF, UF, UT, CHKUID, UPDUID, D, CE) +#define RGXSRV_HWPERF_UFO(I, T, D, S) +#define RGXSRV_HWPERF_ALLOC(D, T, FWADDR, N, Z) +#define RGXSRV_HWPERF_ALLOC_FENCE(D, PID, FENCE, FWADDR, N, Z) +#define RGXSRV_HWPERF_ALLOC_SYNC_CP(D, TL, PID, FENCE, FWADDR, N, Z) +#define RGXSRV_HWPERF_ALLOC_SW_FENCE(D, PID, SW_FENCE, SW_TL, SPI, N, Z) +#define RGXSRV_HWPERF_FREE(D, T, FWADDR) +#define RGXSRV_HWPERF_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR) +#define RGXSRV_HWPERF_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z) +#define RGXSRV_HWPERF_CLK_SYNC(I) +#define RGXSRV_HWPERF_DEVICE_INFO(I, T, H, R) +#define RGXSRV_HWPERF_HOST_INFO(I, T) +#define RGXSRV_HWPERF_SYNC_FENCE_WAIT(I, T, PID, F, D) +#define RGXSRV_HWPERF_SYNC_SW_TL_ADV(I, PID, SW_TL, SPI) + +#endif + +#endif /* RGXHWPERF_COMMON_H_ */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxinit.c b/drivers/gpu/drm/phytium/octopus/rgxinit.c new file mode 100644 index 000000000000..4391ebc9585a --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxinit.c @@ -0,0 +1,4690 @@ +/*************************************************************************/ /*! +@File +@Title Device specific initialisation routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(__linux__) +#include +#else +#include +#endif + +#include "log2.h" +#include "img_defs.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "pvrsrv_bridge_init.h" +#include "syscommon.h" +#include "rgx_heaps.h" +#include "rgxheapconfig.h" +#include "rgxdefs_km.h" +#include "rgxpower.h" +#include "tlstream.h" +#include "pvrsrv_tlstreams.h" + +#include "rgxinit.h" +#include "rgxbvnc.h" +#include "rgxmulticore.h" + +#include "pdump_km.h" +#include "handle.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "rgxmem.h" +#include "sync_internal.h" +#include "pvrsrv_apphint.h" +#include "oskm_apphint.h" +#include "rgxfwdbg.h" +#include "info_page.h" + +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgx_fwif_km.h" + +#include "rgxmmuinit.h" +#include "devicemem_utils.h" +#include "devicemem_server.h" +#include "physmem_osmem.h" +#include "physmem_lma.h" + +#include "rgxdebug.h" +#include "rgxhwperf.h" +#include "htbserver.h" + +#include "rgx_options.h" +#include "pvrversion.h" + +#include "rgx_compat_bvnc.h" + +#include "rgx_heaps.h" + +#include "rgxta3d.h" +#include "rgxtimecorr.h" +#include "rgxshader.h" + +#if defined(PDUMP) +#include "rgxstartstop.h" +#endif + +#include "rgx_fwif_alignchecks.h" +#include "vmm_pvz_client.h" + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "rgxworkest.h" +#endif +#if defined(SUPPORT_PDVFS) +#include "rgxpdvfs.h" +#endif + +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) +#include "validation_soc.h" +#endif + +#if defined(PDUMP) && defined(SUPPORT_SECURITY_VALIDATION) +#include "pdump_physmem.h" +#endif + +static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode); +static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_CHAR **ppszVersionString); +static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PUINT32 pui32RGXClockSpeed); +static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64ResetValue, IMG_UINT64 ui64SPUResetValue); +static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode); + +#if (RGX_NUM_OS_SUPPORTED > 1) +static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid); +static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap); +#endif + +#if defined(SUPPORT_AUTOVZ) +#define RGX_FW_MMU_RESERVED_MEM_SETUP(devnode) (MMU_PX_SETUP) { \ + LMA_PhyContigPagesAlloc, \ + LMA_PhyContigPagesFree, \ + LMA_PhyContigPagesMap, \ + LMA_PhyContigPagesUnmap, \ + LMA_PhyContigPagesClean, \ + OSGetPageShift(), \ + (devnode)->psFwMMUReservedMemArena \ + } +#endif + +/* Services internal heap identification used in this file only */ +#define RGX_FIRMWARE_MAIN_HEAP_IDENT "FwMain" /*!< RGX Main Firmware Heap identifier */ +#define RGX_FIRMWARE_CONFIG_HEAP_IDENT "FwConfig" /*!< RGX Config firmware Heap identifier */ + +#define RGX_MMU_PAGE_SIZE_4KB ( 4 * 1024) +#define RGX_MMU_PAGE_SIZE_16KB ( 16 * 1024) +#define RGX_MMU_PAGE_SIZE_64KB ( 64 * 1024) +#define RGX_MMU_PAGE_SIZE_256KB ( 256 * 1024) +#define RGX_MMU_PAGE_SIZE_1MB (1024 * 1024) +#define RGX_MMU_PAGE_SIZE_2MB (2048 * 1024) +#define RGX_MMU_PAGE_SIZE_MIN RGX_MMU_PAGE_SIZE_4KB +#define RGX_MMU_PAGE_SIZE_MAX RGX_MMU_PAGE_SIZE_2MB + +#define VAR(x) #x + +#define MAX_BVNC_LEN (12) +#define RGXBVNC_BUFFER_SIZE (((PVRSRV_MAX_DEVICES)*(MAX_BVNC_LEN))+1) + +static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo); + +#if !defined(NO_HARDWARE) +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) +#define RGX_LISR_INIT (0U) +#define RGX_LISR_DEVICE_NOT_POWERED (1U) +#define RGX_LISR_NOT_TRIGGERED_BY_HW (2U) +#define RGX_LISR_FW_IRQ_COUNTER_NOT_UPDATED (3U) +#define RGX_LISR_PROCESSED (4U) + +typedef IMG_UINT32 LISR_STATUS; + +typedef struct _LISR_EXECUTION_INFO_ +{ + /* status of last LISR invocation */ + LISR_STATUS ui32Status; + + /* snapshot from the last LISR invocation */ + IMG_UINT32 aui32InterruptCountSnapshot[RGXFW_THREAD_NUM]; + + /* time of the last LISR invocation */ + IMG_UINT64 ui64Clockns; +} LISR_EXECUTION_INFO; + +/* information about the last execution of the LISR */ +static LISR_EXECUTION_INFO g_sLISRExecutionInfo; + +#define UPDATE_LISR_DBG_STATUS(status) g_sLISRExecutionInfo.ui32Status = (status) +#define UPDATE_LISR_DBG_SNAPSHOT(idx, val) g_sLISRExecutionInfo.aui32InterruptCountSnapshot[idx] = (val) +#define UPDATE_LISR_DBG_TIMESTAMP() g_sLISRExecutionInfo.ui64Clockns = OSClockns64() +#define UPDATE_LISR_DBG_COUNTER() psDeviceNode->ui64nLISR++ +#define UPDATE_MISR_DBG_COUNTER() psDeviceNode->ui64nMISR++ +#else +#define UPDATE_LISR_DBG_STATUS(status) +#define UPDATE_LISR_DBG_SNAPSHOT(idx, val) +#define UPDATE_LISR_DBG_TIMESTAMP() +#define UPDATE_LISR_DBG_COUNTER() +#define UPDATE_MISR_DBG_COUNTER() +#endif /* defined(PVRSRV_DEBUG_LISR_EXECUTION) */ + +/*************************************************************************/ /*! +@Function SampleIRQCount +@Description Utility function taking snapshots of RGX FW interrupt count. +@Input paui32Input A pointer to RGX FW IRQ count array. + Size of the array should be equal to RGX FW thread + count. +@Input paui32Output A pointer to array containing sampled RGX FW + IRQ counts +@Return IMG_BOOL Returns IMG_TRUE, if RGX FW IRQ is not equal to + sampled RGX FW IRQ count for any RGX FW thread. +*/ /**************************************************************************/ +static INLINE IMG_BOOL SampleIRQCount(volatile IMG_UINT32 *paui32Input, + volatile IMG_UINT32 *paui32Output) +{ + IMG_UINT32 ui32TID; + IMG_BOOL bReturnVal = IMG_FALSE; + + for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) + { + if (paui32Output[ui32TID] != paui32Input[ui32TID]) + { + /** + * we are handling any unhandled interrupts here so align the host + * count with the FW count + */ + + /* Sample the current count from the FW _after_ we've cleared the interrupt. */ + paui32Output[ui32TID] = paui32Input[ui32TID]; + bReturnVal = IMG_TRUE; + } + } + + return bReturnVal; +} + +/*************************************************************************/ /*! +@Function RGXHostSafetyEvents +@Description Returns the event status masked to keep only the safety + events handled by the Host +@Input psDevInfo Device Info structure +@Return IMG_UINT32 Status of Host-handled safety events + */ /**************************************************************************/ +static INLINE IMG_UINT32 RGXHostSafetyEvents(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + if (PVRSRV_VZ_MODE_IS(GUEST) || (psDevInfo->ui32HostSafetyEventMask == 0)) + { + return 0; + } + else + { + IMG_UINT32 ui32EventStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_EVENT_STATUS); + + return (ui32EventStatus & psDevInfo->ui32HostSafetyEventMask); + } +} + +/*************************************************************************/ /*! +@Function RGXSafetyEventHandler +@Description +@Input psDevInfo Device Info structure + */ /**************************************************************************/ +static void RGXSafetyEventHandler(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT32 ui32HostSafetyStatus = RGXHostSafetyEvents(psDevInfo); + RGX_CONTEXT_RESET_REASON eResetReason = RGX_CONTEXT_RESET_REASON_NONE; + + if (ui32HostSafetyStatus != 0) + { + /* clear the safety events handled by the Host */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_EVENT_CLEAR, ui32HostSafetyStatus); + + if (BIT_ISSET(ui32HostSafetyStatus, RGX_CR_EVENT_STATUS_FAULT_FW_SHIFT)) + { + IMG_UINT32 ui32FaultFlag; + IMG_UINT32 ui32FaultFW = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_STATUS); + IMG_UINT32 ui32CorrectedBitOffset = RGX_CR_FAULT_FW_STATUS_MMU_CORRECT_SHIFT - + RGX_CR_FAULT_FW_STATUS_MMU_DETECT_SHIFT; + + PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety fault status: 0x%X", __func__, ui32FaultFW)); + + for (ui32FaultFlag = 0; ui32FaultFlag < ui32CorrectedBitOffset; ui32FaultFlag++) + { + if (BIT_ISSET(ui32FaultFW, ui32FaultFlag)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety hardware fault detected (0x%lX).", + __func__, BIT(ui32FaultFlag))); + eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_ERR; + } + else if BIT_ISSET(ui32FaultFW, ui32FaultFlag + ui32CorrectedBitOffset) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware safety hardware fault corrected.(0x%lX).", + __func__, BIT(ui32FaultFlag))); + + /* Only report this if we haven't detected a more serious error */ + if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR) + { + eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_OK; + } + } + } + + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_CLEAR, ui32FaultFW); + } + + if (BIT_ISSET(ui32HostSafetyStatus, RGX_CR_EVENT_STATUS_WDT_TIMEOUT_SHIFT)) + { + volatile RGXFWIF_POW_STATE ePowState = psDevInfo->psRGXFWIfFwSysData->ePowState; + + if (ePowState == RGXFWIF_POW_ON) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Safety Watchdog Trigger !", __func__)); + + /* Only report this if we haven't detected a more serious error */ + if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR) + { + eResetReason = RGX_CONTEXT_RESET_REASON_FW_WATCHDOG; + } + } + } + + /* Notify client and system layer of any error */ + if (eResetReason != RGX_CONTEXT_RESET_REASON_NONE) + { + PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; + + /* Client notification of device error will be achieved by + * clients calling UM function RGXGetLastDeviceError() */ + psDevInfo->eLastDeviceError = eResetReason; + + /* Notify system layer of any error */ + if (psDevConfig->pfnSysDevErrorNotify) + { + PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; + + sErrorData.eResetReason = eResetReason; + + psDevConfig->pfnSysDevErrorNotify(psDevConfig, + &sErrorData); + } + } + } +} + +static IMG_BOOL _WaitForInterruptsTimeoutCheck(PVRSRV_RGXDEV_INFO *psDevInfo) +{ +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + IMG_UINT32 ui32TID; +#endif + + RGXDEBUG_PRINT_IRQ_COUNT(psDevInfo); + +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + PVR_DPF((PVR_DBG_ERROR, + "Last RGX_LISRHandler State: 0x%08X Clock: %llu", + g_sLISRExecutionInfo.ui32Status, + g_sLISRExecutionInfo.ui64Clockns)); + + for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) + { + PVR_DPF((PVR_DBG_ERROR, + "RGX FW thread %u: InterruptCountSnapshot: 0x%X", + ui32TID, g_sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32TID])); + } +#else + PVR_DPF((PVR_DBG_ERROR, "No further information available. Please enable PVRSRV_DEBUG_LISR_EXECUTION")); +#endif + + return SampleIRQCount(psDevInfo->psRGXFWIfFwOsData->aui32InterruptCount, + psDevInfo->aui32SampleIRQCount); +} + +void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_BOOL bScheduleMISR; + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + bScheduleMISR = IMG_TRUE; + } + else + { + bScheduleMISR = _WaitForInterruptsTimeoutCheck(psDevInfo); + } + + if (bScheduleMISR) + { + OSScheduleMISR(psDevInfo->pvMISRData); + + if (psDevInfo->pvAPMISRData != NULL) + { + OSScheduleMISR(psDevInfo->pvAPMISRData); + } + } +} + +static inline IMG_BOOL RGXAckHwIrq(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32IRQStatusReg, + IMG_UINT32 ui32IRQStatusEventMsk, + IMG_UINT32 ui32IRQClearReg, + IMG_UINT32 ui32IRQClearMask) +{ + IMG_UINT32 ui32IRQStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQStatusReg); + + /* clear only the pending bit of the thread that triggered this interrupt */ + ui32IRQClearMask &= ui32IRQStatus; + + if (ui32IRQStatus & ui32IRQStatusEventMsk) + { + /* acknowledge and clear the interrupt */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQClearReg, ui32IRQClearMask); + return IMG_TRUE; + } + else + { + /* spurious interrupt */ + return IMG_FALSE; + } +} + +static IMG_BOOL RGXAckIrqDedicated(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + /* status & clearing registers are available on both Host and Guests + * and are agnostic of the Fw CPU type. Due to the remappings done by + * the 2nd stage device MMU, all drivers assume they are accessing + * register bank 0 */ + return RGXAckHwIrq(psDevInfo, + RGX_CR_IRQ_OS0_EVENT_STATUS, + ~RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK, + RGX_CR_IRQ_OS0_EVENT_CLEAR, + ~RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK); +} + +static IMG_BOOL RGX_LISRHandler(void *pvData) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = pvData; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_OSDATA *psFwOsData = psDevInfo->psRGXFWIfFwOsData; + IMG_BOOL bIrqAcknowledged = IMG_FALSE; + +#if defined(PVRSRV_DEBUG_LISR_EXECUTION) + IMG_UINT32 ui32TID; + + for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) + { + UPDATE_LISR_DBG_SNAPSHOT(ui32TID, psFwOsData->aui32InterruptCount[ui32TID]); + } + + UPDATE_LISR_DBG_STATUS(RGX_LISR_INIT); + UPDATE_LISR_DBG_TIMESTAMP(); +#endif + + UPDATE_LISR_DBG_COUNTER(); + + if (psDevInfo->bRGXPowered) + { + IMG_BOOL bSafetyEvent = (RGXHostSafetyEvents(psDevInfo) != 0); + + if ((psDevInfo->pfnRGXAckIrq == NULL) || psDevInfo->pfnRGXAckIrq(psDevInfo) || bSafetyEvent) + { + bIrqAcknowledged = IMG_TRUE; + + if (SampleIRQCount(psFwOsData->aui32InterruptCount, + psDevInfo->aui32SampleIRQCount) || bSafetyEvent) + { + UPDATE_LISR_DBG_STATUS(RGX_LISR_PROCESSED); + UPDATE_MISR_DBG_COUNTER(); + + OSScheduleMISR(psDevInfo->pvMISRData); + +#if defined(SUPPORT_AUTOVZ) + RGXUpdateAutoVzWdgToken(psDevInfo); +#endif + if (psDevInfo->pvAPMISRData != NULL) + { + OSScheduleMISR(psDevInfo->pvAPMISRData); + } + } + else + { + UPDATE_LISR_DBG_STATUS(RGX_LISR_FW_IRQ_COUNTER_NOT_UPDATED); + } + } + else + { + UPDATE_LISR_DBG_STATUS(RGX_LISR_NOT_TRIGGERED_BY_HW); + } + } + else + { + /* AutoVz drivers rebooting while the firmware is active, must acknowledge + * and clear the hw IRQ line before the RGXInit() has finished. */ + if (!(psDevInfo->psDeviceNode->bAutoVzFwIsUp && + (psDevInfo->pfnRGXAckIrq != NULL) && + psDevInfo->pfnRGXAckIrq(psDevInfo))) + { + UPDATE_LISR_DBG_STATUS(RGX_LISR_DEVICE_NOT_POWERED); + } + } + + return bIrqAcknowledged; +} + +static void RGX_MISR_ProcessKCCBDeferredList(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + OS_SPINLOCK_FLAGS uiFlags; + + /* First check whether there are pending commands in Deferred KCCB List */ + OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + if (dllist_is_empty(&psDevInfo->sKCCBDeferredCommandsListHead)) + { + OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + return; + } + OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); + + /* Powerlock to avoid further Power transition requests + while KCCB deferred list is being processed */ + eError = PVRSRVPowerLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire PowerLock (device: %p, error: %s)", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + goto _RGX_MISR_ProcessKCCBDeferredList_PowerLock_failed; + } + + /* Try to send deferred KCCB commands Do not Poll from here*/ + eError = RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE); + + PVRSRVPowerUnlock(psDeviceNode); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s could not flush Deferred KCCB list, KCCB is full.", + __func__)); + } + +_RGX_MISR_ProcessKCCBDeferredList_PowerLock_failed: + + return; +} + +static void RGX_MISRHandler_CheckFWActivePowerState(void *psDevice) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = psDevice; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psFwSysData->ePowState == RGXFWIF_POW_ON || psFwSysData->ePowState == RGXFWIF_POW_IDLE) + { + RGX_MISR_ProcessKCCBDeferredList(psDeviceNode); + } + + if (psFwSysData->ePowState == RGXFWIF_POW_IDLE) + { + /* The FW is IDLE and therefore could be shut down */ + eError = RGXActivePowerRequest(psDeviceNode); + + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)) + { + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Failed RGXActivePowerRequest call (device: %p) with %s", + __func__, psDeviceNode, PVRSRVGetErrorString(eError))); + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + } + else + { + /* Re-schedule the power down request as it was deferred. */ + OSScheduleMISR(psDevInfo->pvAPMISRData); + } + } + } + +} + +/* Shorter defines to keep the code a bit shorter */ +#define GPU_IDLE RGXFWIF_GPU_UTIL_STATE_IDLE +#define GPU_ACTIVE RGXFWIF_GPU_UTIL_STATE_ACTIVE +#define GPU_BLOCKED RGXFWIF_GPU_UTIL_STATE_BLOCKED +#define MAX_ITERATIONS 64 + +static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hGpuUtilUser, + RGXFWIF_GPU_UTIL_STATS *psReturnStats) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; + RGXFWIF_GPU_UTIL_STATS *psAggregateStats; + IMG_UINT64 ui64TimeNow; + IMG_UINT32 ui32Attempts; + IMG_UINT32 ui32Remainder; + + + /***** (1) Initialise return stats *****/ + + psReturnStats->bValid = IMG_FALSE; + psReturnStats->ui64GpuStatIdle = 0; + psReturnStats->ui64GpuStatActive = 0; + psReturnStats->ui64GpuStatBlocked = 0; + psReturnStats->ui64GpuStatCumulative = 0; + + if (hGpuUtilUser == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + psAggregateStats = hGpuUtilUser; + + + /* Try to acquire GPU utilisation counters and repeat if the FW is in the middle of an update */ + for (ui32Attempts = 0; ui32Attempts < 4; ui32Attempts++) + { + IMG_UINT64 aui64TmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0}; + IMG_UINT64 ui64LastPeriod = 0, ui64LastWord = 0, ui64LastState = 0, ui64LastTime = 0; + IMG_UINT32 i = 0; + + + /***** (2) Get latest data from shared area *****/ + + OSLockAcquire(psDevInfo->hGPUUtilLock); + + /* + * First attempt at detecting if the FW is in the middle of an update. + * This should also help if the FW is in the middle of a 64 bit variable update. + */ + while (((ui64LastWord != psUtilFWCb->ui64LastWord) || + (aui64TmpCounters[ui64LastState] != + psUtilFWCb->aui64StatsCounters[ui64LastState])) && + (i < MAX_ITERATIONS)) + { + ui64LastWord = psUtilFWCb->ui64LastWord; + ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64LastWord); + aui64TmpCounters[GPU_IDLE] = psUtilFWCb->aui64StatsCounters[GPU_IDLE]; + aui64TmpCounters[GPU_ACTIVE] = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE]; + aui64TmpCounters[GPU_BLOCKED] = psUtilFWCb->aui64StatsCounters[GPU_BLOCKED]; + i++; + } + + OSLockRelease(psDevInfo->hGPUUtilLock); + + if (i == MAX_ITERATIONS) + { + PVR_DPF((PVR_DBG_WARNING, + "RGXGetGpuUtilStats could not get reliable data after trying %u times", i)); + return PVRSRV_ERROR_TIMEOUT; + } + + + /***** (3) Compute return stats *****/ + + /* Update temp counters to account for the time since the last update to the shared ones */ + OSMemoryBarrier(); /* Ensure the current time is read after the loop above */ + ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64()); + ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(ui64LastWord); + ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime); + aui64TmpCounters[ui64LastState] += ui64LastPeriod; + + /* Get statistics for a user since its last request */ + psReturnStats->ui64GpuStatIdle = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_IDLE], + psAggregateStats->ui64GpuStatIdle); + psReturnStats->ui64GpuStatActive = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE], + psAggregateStats->ui64GpuStatActive); + psReturnStats->ui64GpuStatBlocked = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_BLOCKED], + psAggregateStats->ui64GpuStatBlocked); + psReturnStats->ui64GpuStatCumulative = psReturnStats->ui64GpuStatIdle + + psReturnStats->ui64GpuStatActive + + psReturnStats->ui64GpuStatBlocked; + + if (psAggregateStats->ui64TimeStamp != 0) + { + IMG_UINT64 ui64TimeSinceLastCall = ui64TimeNow - psAggregateStats->ui64TimeStamp; + /* We expect to return at least 75% of the time since the last call in GPU stats */ + IMG_UINT64 ui64MinReturnedStats = ui64TimeSinceLastCall - (ui64TimeSinceLastCall / 4); + + /* + * If the returned stats are substantially lower than the time since + * the last call, then the Host might have read a partial update from the FW. + * If this happens, try sampling the shared counters again. + */ + if (psReturnStats->ui64GpuStatCumulative < ui64MinReturnedStats) + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Return stats (%" IMG_UINT64_FMTSPEC ") too low " + "(call period %" IMG_UINT64_FMTSPEC ")", + __func__, psReturnStats->ui64GpuStatCumulative, ui64TimeSinceLastCall)); + PVR_DPF((PVR_DBG_MESSAGE, "%s: Attempt #%u has failed, trying again", + __func__, ui32Attempts)); + continue; + } + } + + break; + } + + + /***** (4) Update aggregate stats for the current user *****/ + + psAggregateStats->ui64GpuStatIdle += psReturnStats->ui64GpuStatIdle; + psAggregateStats->ui64GpuStatActive += psReturnStats->ui64GpuStatActive; + psAggregateStats->ui64GpuStatBlocked += psReturnStats->ui64GpuStatBlocked; + psAggregateStats->ui64TimeStamp = ui64TimeNow; + + + /***** (5) Convert return stats to microseconds *****/ + + psReturnStats->ui64GpuStatIdle = OSDivide64(psReturnStats->ui64GpuStatIdle, 1000, &ui32Remainder); + psReturnStats->ui64GpuStatActive = OSDivide64(psReturnStats->ui64GpuStatActive, 1000, &ui32Remainder); + psReturnStats->ui64GpuStatBlocked = OSDivide64(psReturnStats->ui64GpuStatBlocked, 1000, &ui32Remainder); + psReturnStats->ui64GpuStatCumulative = OSDivide64(psReturnStats->ui64GpuStatCumulative, 1000, &ui32Remainder); + + /* Check that the return stats make sense */ + if (psReturnStats->ui64GpuStatCumulative == 0) + { + /* We can enter here only if all the RGXFWIF_GPU_UTIL_GET_PERIOD + * returned 0. This could happen if the GPU frequency value + * is not well calibrated and the FW is updating the GPU state + * while the Host is reading it. + * When such an event happens frequently, timers or the aggregate + * stats might not be accurate... + */ + PVR_DPF((PVR_DBG_WARNING, "RGXGetGpuUtilStats could not get reliable data.")); + return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + } + + psReturnStats->bValid = IMG_TRUE; + + return PVRSRV_OK; +} + +PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser) +{ + RGXFWIF_GPU_UTIL_STATS *psAggregateStats; + + /* NoStats used since this may be called outside of the register/de-register + * process calls which track memory use. */ + psAggregateStats = OSAllocMemNoStats(sizeof(RGXFWIF_GPU_UTIL_STATS)); + if (psAggregateStats == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psAggregateStats->ui64GpuStatIdle = 0; + psAggregateStats->ui64GpuStatActive = 0; + psAggregateStats->ui64GpuStatBlocked = 0; + psAggregateStats->ui64TimeStamp = 0; + + /* Not used */ + psAggregateStats->bValid = IMG_FALSE; + psAggregateStats->ui64GpuStatCumulative = 0; + + *phGpuUtilUser = psAggregateStats; + + return PVRSRV_OK; +} + +PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser) +{ + RGXFWIF_GPU_UTIL_STATS *psAggregateStats; + + if (hGpuUtilUser == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psAggregateStats = hGpuUtilUser; + OSFreeMemNoStats(psAggregateStats); + + return PVRSRV_OK; +} + +/* + RGX MISR Handler +*/ +static void RGX_MISRHandler_Main (void *pvData) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = pvData; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + /* Give the HWPerf service a chance to transfer some data from the FW + * buffer to the host driver transport layer buffer. + */ + RGXHWPerfDataStoreCB(psDeviceNode); + + /* Inform other services devices that we have finished an operation */ + PVRSRVNotifyCommandCompletion(psDeviceNode); + +#if defined(SUPPORT_PDVFS) && defined(RGXFW_META_SUPPORT_2ND_THREAD) + /* + * Firmware CCB only exists for primary FW thread. Only requirement for + * non primary FW thread(s) to communicate with host driver is in the case + * of PDVFS running on non primary FW thread. + * This requirement is directly handled by the below + */ + RGXPDVFSCheckCoreClkRateChange(psDeviceNode->pvDevice); +#endif + + /* Handle Safety events if necessary */ + RGXSafetyEventHandler(psDeviceNode->pvDevice); + + /* Signal the global event object */ + PVRSRVSignalGlobalEO(); + + /* Process the Firmware CCB for pending commands */ + RGXCheckFirmwareCCB(psDeviceNode->pvDevice); + + /* Calibrate the GPU frequency and recorrelate Host and GPU timers (done every few seconds) */ + RGXTimeCorrRestartPeriodic(psDeviceNode); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Process Workload Estimation Specific commands from the FW */ + WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice); +#endif + + if (psDevInfo->pvAPMISRData == NULL) + { + RGX_MISR_ProcessKCCBDeferredList(psDeviceNode); + } +} +#endif /* !defined(NO_HARDWARE) */ + +static PVRSRV_ERROR RGXSetPowerParams(PVRSRV_RGXDEV_INFO *psDevInfo, + PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Save information used on power transitions for later + * (when RGXStart and RGXStop are executed) + */ + psDevInfo->sLayerParams.psDevInfo = psDevInfo; + psDevInfo->sLayerParams.psDevConfig = psDevConfig; +#if defined(PDUMP) + psDevInfo->sLayerParams.ui32PdumpFlags = PDUMP_FLAGS_CONTINUOUS; +#endif + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || + RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + IMG_DEV_PHYADDR sKernelMMUCtxPCAddr; + + if (psDevInfo->psDeviceNode->bAutoVzFwIsUp) + { + /* If AutoVz firmware is up at this stage, the driver initialised it + * during a previous life-cycle. The firmware's memory is already pre-mapped + * and the MMU page tables reside in the predetermined memory carveout. + * The Kernel MMU Context created in this life-cycle is a dummy structure + * that is not used for mapping. + * To program the Device's BIF with the correct PC address, use the base + * address of the carveout reserved for MMU mappings as Kernel MMU PC Address */ +#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR) + sKernelMMUCtxPCAddr.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR; +#else + PHYS_HEAP_CONFIG *psFwHeapCfg = FindPhysHeapConfig(psDevConfig, + PHYS_HEAP_USAGE_FW_MAIN); + eError = (psFwHeapCfg != NULL) ? PVRSRV_OK : PVRSRV_ERROR_PHYSHEAP_CONFIG; + PVR_LOG_RETURN_IF_ERROR(eError, "FindPhysHeapConfig(PHYS_HEAP_USAGE_FW_MAIN)"); + + sKernelMMUCtxPCAddr.uiAddr = psFwHeapCfg->sCardBase.uiAddr + + (RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED); +#endif /* PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR */ + } + else + { + eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, + &sKernelMMUCtxPCAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire Kernel MMU Ctx page catalog")); + return eError; + } + } + + psDevInfo->sLayerParams.sPCAddr = sKernelMMUCtxPCAddr; + } + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + /* Send information used on power transitions to the trusted device as + * in this setup the driver cannot start/stop the GPU and perform resets + */ + if (psDevConfig->pfnTDSetPowerParams) + { + PVRSRV_TD_POWER_PARAMS sTDPowerParams; + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || + RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + sTDPowerParams.sPCAddr = psDevInfo->sLayerParams.sPCAddr; + } + + eError = psDevConfig->pfnTDSetPowerParams(psDevConfig->hSysData, + &sTDPowerParams); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: TDSetPowerParams not implemented!")); + eError = PVRSRV_ERROR_NOT_IMPLEMENTED; + } +#endif + + return eError; +} + +/* + RGXSystemGetFabricCoherency +*/ +PVRSRV_ERROR RGXSystemGetFabricCoherency(PVRSRV_DEVICE_CONFIG *psDevConfig, + IMG_CPU_PHYADDR sRegsCpuPBase, + IMG_UINT32 ui32RegsSize, + PVRSRV_DEVICE_FABRIC_TYPE *peDevFabricType, + PVRSRV_DEVICE_SNOOP_MODE *peCacheSnoopingMode) +{ + IMG_CHAR *aszLabels[] = {"none", "acelite", "fullace", "unknown"}; + PVRSRV_DEVICE_SNOOP_MODE eAppHintCacheSnoopingMode; + PVRSRV_DEVICE_SNOOP_MODE eDeviceCacheSnoopingMode; + IMG_UINT32 ui32AppHintFabricCoherency; + IMG_UINT32 ui32DeviceFabricCoherency; + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault; +#if !defined(NO_HARDWARE) + void *pvRegsBaseKM; + IMG_BOOL bPowerDown = IMG_TRUE; + PVRSRV_ERROR eError; +#endif + + if (!sRegsCpuPBase.uiAddr || !ui32RegsSize) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXSystemGetFabricCoherency: Invalid RGX register base/size parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#if !defined(NO_HARDWARE) + pvRegsBaseKM = OSMapPhysToLin(sRegsCpuPBase, ui32RegsSize, PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); + if (!pvRegsBaseKM) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXSystemGetFabricCoherency: Failed to create RGX register mapping")); + return PVRSRV_ERROR_BAD_MAPPING; + } + + if (psDevConfig->psDevNode != NULL) + { + bPowerDown = (psDevConfig->psDevNode->eCurrentSysPowerState == PVRSRV_SYS_POWER_STATE_OFF); + } + + /* Power-up the device as required to read the registers */ + if (bPowerDown) + { + eError = PVRSRVSetSystemPowerState(psDevConfig, PVRSRV_SYS_POWER_STATE_ON); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState ON"); + } + + /* AXI support within the SoC, bitfield COHERENCY_SUPPORT [1 .. 0] + value NO_COHERENCY 0x0 {SoC does not support any form of Coherency} + value ACE_LITE_COHERENCY 0x1 {SoC supports ACE-Lite or I/O Coherency} + value FULL_ACE_COHERENCY 0x2 {SoC supports full ACE or 2-Way Coherency} */ + ui32DeviceFabricCoherency = OSReadHWReg32(pvRegsBaseKM, RGX_CR_SOC_AXI); + PVR_LOG(("AXI fabric coherency (RGX_CR_SOC_AXI): 0x%x", ui32DeviceFabricCoherency)); +#if defined(DEBUG) + if (ui32DeviceFabricCoherency & ~((IMG_UINT32)RGX_CR_SOC_AXI_MASKFULL)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid RGX_CR_SOC_AXI value.", __func__)); + return PVRSRV_ERROR_INVALID_DEVICE; + } +#endif + ui32DeviceFabricCoherency &= ~((IMG_UINT32)RGX_CR_SOC_AXI_COHERENCY_SUPPORT_CLRMSK); + ui32DeviceFabricCoherency >>= RGX_CR_SOC_AXI_COHERENCY_SUPPORT_SHIFT; + + if (bPowerDown) + { + eError = PVRSRVSetSystemPowerState(psDevConfig, PVRSRV_SYS_POWER_STATE_OFF); + PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState OFF"); + } + + /* UnMap Regs */ + OSUnMapPhysToLin(pvRegsBaseKM, ui32RegsSize); + + switch (ui32DeviceFabricCoherency) + { + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY: + eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CROSS; + *peDevFabricType = PVRSRV_DEVICE_FABRIC_FULLACE; + break; + + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY: + eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY; + *peDevFabricType = PVRSRV_DEVICE_FABRIC_ACELITE; + break; + + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY: + default: + eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE; + *peDevFabricType = PVRSRV_DEVICE_FABRIC_NONE; + break; + } +#else /* !defined(NO_HARDWARE) */ +#if defined(RGX_FEATURE_GPU_CPU_COHERENCY) + *peDevFabricType = PVRSRV_DEVICE_FABRIC_FULLACE; + eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CROSS; + ui32DeviceFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY; +#else + *peDevFabricType = PVRSRV_DEVICE_FABRIC_ACELITE; + eDeviceCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY; + ui32DeviceFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY; +#endif +#endif /* !defined(NO_HARDWARE) */ + + OSCreateKMAppHintState(&pvAppHintState); + ui32AppHintDefault = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY; + OSGetKMAppHintUINT32(pvAppHintState, FabricCoherencyOverride, + &ui32AppHintDefault, &ui32AppHintFabricCoherency); + OSFreeKMAppHintState(pvAppHintState); + +#if defined(SUPPORT_SECURITY_VALIDATION) + /* Temporarily disable coherency */ + ui32AppHintFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY; +#endif + + /* Suppress invalid AppHint value */ + switch (ui32AppHintFabricCoherency) + { + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY: + eAppHintCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE; + break; + + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY: + eAppHintCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY; + break; + + case RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY: + eAppHintCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CROSS; + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "Invalid FabricCoherencyOverride AppHint %d, ignoring", + ui32AppHintFabricCoherency)); + eAppHintCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CROSS; + ui32AppHintFabricCoherency = RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY; + break; + } + + if (ui32AppHintFabricCoherency < ui32DeviceFabricCoherency) + { + PVR_LOG(("Downgrading device fabric coherency from %s to %s", + aszLabels[ui32DeviceFabricCoherency], + aszLabels[ui32AppHintFabricCoherency])); + eDeviceCacheSnoopingMode = eAppHintCacheSnoopingMode; + } + else if (ui32AppHintFabricCoherency > ui32DeviceFabricCoherency) + { + PVR_DPF((PVR_DBG_WARNING, + "Cannot upgrade device fabric coherency from %s to %s, not supported by device!", + aszLabels[ui32DeviceFabricCoherency], + aszLabels[ui32AppHintFabricCoherency])); + + /* Override requested-for app-hint with actual app-hint value being used */ + ui32AppHintFabricCoherency = ui32DeviceFabricCoherency; + } + + *peCacheSnoopingMode = eDeviceCacheSnoopingMode; + return PVRSRV_OK; +} + +/* + RGXSystemHasFBCDCVersion31 +*/ +static IMG_BOOL RGXSystemHasFBCDCVersion31(PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#if defined(SUPPORT_VALIDATION) + IMG_UINT32 ui32FBCDCVersionOverride = 0; +#endif + + { + +#if defined(SUPPORT_VALIDATION) + if (ui32FBCDCVersionOverride == 2) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: FBCDCVersionOverride forces FBC3.1 but this core doesn't support it!", + __func__)); + } +#endif + +#if !defined(NO_HARDWARE) + if (psDeviceNode->psDevConfig->bHasFBCDCVersion31) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: System uses FBCDC3.1 but GPU doesn't support it!", + __func__)); + } +#endif + } + + return IMG_FALSE; +} + +/* + RGXDevMMUAttributes +*/ +static MMU_DEVICEATTRIBS *RGXDevMMUAttributes(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bKernelMemoryCtx) +{ + MMU_DEVICEATTRIBS *psMMUDevAttrs = NULL; + + /* bKernelMemoryCtx is only used for rogue cores */ + PVR_UNREFERENCED_PARAMETER(bKernelMemoryCtx); + + if (psDeviceNode->pfnCheckDeviceFeature) + { + psMMUDevAttrs = psDeviceNode->psMMUDevAttrs; + } + + return psMMUDevAttrs; +} + +/* + * RGXInitDevPart2 + */ +PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32DeviceFlags, + IMG_UINT32 ui32HWPerfHostBufSizeKB, + IMG_UINT32 ui32HWPerfHostFilter, + RGX_ACTIVEPM_CONF eActivePMConf, + IMG_UINT32 ui32AvailablePowUnitsMask) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_DEV_POWER_STATE eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + IMG_UINT32 ui32AllPowUnitsMask = (1 << psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount) - 1; + + /* Assume system layer has turned power on by this point, required before powering device */ + psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON; + +#if defined(TIMING) || defined(DEBUG) + OSUserModeAccessToPerfCountersEn(); +#endif + + PDUMPCOMMENT("RGX Initialisation Part 2"); + + /* Initialise Device Flags */ + psDevInfo->ui32DeviceFlags = 0; + RGXSetDeviceFlags(psDevInfo, ui32DeviceFlags, IMG_TRUE); + + /* Allocate DVFS Table (needs to be allocated before GPU trace events + * component is initialised because there is a dependency between them) */ + psDevInfo->psGpuDVFSTable = OSAllocZMem(sizeof(*(psDevInfo->psGpuDVFSTable))); + PVR_LOG_GOTO_IF_NOMEM(psDevInfo->psGpuDVFSTable, eError, ErrorExit); + + /* Initialise HWPerfHost buffer. */ + if (RGXHWPerfHostInit(psDevInfo, ui32HWPerfHostBufSizeKB) == PVRSRV_OK) + { + if (psDevInfo->ui32HWPerfHostFilter == 0) + { + RGXHWPerfHostSetEventFilter(psDevInfo, ui32HWPerfHostFilter); + } + + /* If HWPerf enabled allocate all resources for the host side buffer. */ + if (psDevInfo->ui32HWPerfHostFilter != 0) + { + if (RGXHWPerfHostInitOnDemandResources(psDevInfo) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer on demand" + " initialisation failed.")); + } + } + } + else + { + PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer initialisation failed.")); + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Initialise work estimation lock */ + eError = OSLockCreate(&psDevInfo->hWorkEstLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(WorkEstLock)", ErrorExit); +#endif + + /* Initialise lists of ZSBuffers */ + eError = OSLockCreate(&psDevInfo->hLockZSBuffer); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(LockZSBuffer)", ErrorExit); + dllist_init(&psDevInfo->sZSBufferHead); + psDevInfo->ui32ZSBufferCurrID = 1; + + /* Initialise lists of growable Freelists */ + eError = OSLockCreate(&psDevInfo->hLockFreeList); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(LockFreeList)", ErrorExit); + dllist_init(&psDevInfo->sFreeListHead); + psDevInfo->ui32FreelistCurrID = 1; + + eError = OSLockCreate(&psDevInfo->hDebugFaultInfoLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(DebugFaultInfoLock)", ErrorExit); + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + eError = OSLockCreate(&psDevInfo->hMMUCtxUnregLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(MMUCtxUnregLock)", ErrorExit); + } + + /* Setup GPU utilisation stats update callback */ + eError = OSLockCreate(&psDevInfo->hGPUUtilLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(GPUUtilLock)", ErrorExit); +#if !defined(NO_HARDWARE) + psDevInfo->pfnGetGpuUtilStats = RGXGetGpuUtilStats; +#endif + + eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON; + psDevInfo->eActivePMConf = eActivePMConf; + + /* Validate the SPU mask and initialize to number of SPUs to power up */ + if ((ui32AvailablePowUnitsMask & ui32AllPowUnitsMask) == 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s:Invalid SPU mask (All=0x%X, Non Fused=0x%X). At-least one SPU must to be powered up.", + __func__, + ui32AllPowUnitsMask, + ui32AvailablePowUnitsMask)); + PVR_LOG_GOTO_WITH_ERROR("ui32AvailablePowUnitsMask", eError, PVRSRV_ERROR_INVALID_SPU_MASK, ErrorExit); + } + + psDevInfo->ui32AvailablePowUnitsMask = ui32AvailablePowUnitsMask & ui32AllPowUnitsMask; + +#if !defined(NO_HARDWARE) + /* set-up the Active Power Mgmt callback */ + { + RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; + IMG_BOOL bSysEnableAPM = psRGXData->psRGXTimingInfo->bEnableActivePM; + IMG_BOOL bEnableAPM = ((eActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) || + (eActivePMConf == RGX_ACTIVEPM_FORCE_ON); + + if (bEnableAPM && (!PVRSRV_VZ_MODE_IS(NATIVE))) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Active Power Management disabled in virtualization mode", __func__)); + bEnableAPM = false; + } + +#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) && defined(SUPPORT_AUTOVZ) + /* The AutoVz driver enable a virtualisation watchdog not compatible with APM */ + PVR_ASSERT(bEnableAPM == IMG_FALSE); +#endif + + if (bEnableAPM) + { + eError = OSInstallMISR(&psDevInfo->pvAPMISRData, + RGX_MISRHandler_CheckFWActivePowerState, + psDeviceNode, + "RGX_CheckFWActivePower"); + PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR(APMISR)", ErrorExit); + + /* Prevent the device being woken up before there is something to do. */ + eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF; + } + } +#endif + + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableAPM, + RGXQueryAPMState, + RGXSetAPMState, + psDeviceNode, + NULL); + + RGXTimeCorrInitAppHintCallbacks(psDeviceNode); + + /* Register the device with the power manager */ + eError = PVRSRVRegisterPowerDevice(psDeviceNode, + (PVRSRV_VZ_MODE_IS(NATIVE)) ? &RGXPrePowerState : &RGXVzPrePowerState, + (PVRSRV_VZ_MODE_IS(NATIVE)) ? &RGXPostPowerState : &RGXVzPostPowerState, + psDevConfig->pfnPrePowerState, psDevConfig->pfnPostPowerState, + &RGXPreClockSpeedChange, &RGXPostClockSpeedChange, + &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest, + &RGXPowUnitsStateMaskChange, + (IMG_HANDLE)psDeviceNode, + PVRSRV_DEV_POWER_STATE_OFF, + eDefaultPowerState); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterPowerDevice", ErrorExit); + + eError = RGXSetPowerParams(psDevInfo, psDevConfig); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetPowerParams", ErrorExit); + +#if defined(SUPPORT_VALIDATION) + { + void *pvAppHintState = NULL; + + IMG_UINT32 ui32AppHintDefault; + + OSCreateKMAppHintState(&pvAppHintState); + ui32AppHintDefault = PVRSRV_APPHINT_TESTSLRINTERVAL; + OSGetKMAppHintUINT32(pvAppHintState, TestSLRInterval, + &ui32AppHintDefault, &psDevInfo->ui32TestSLRInterval); + PVR_LOG(("OSGetKMAppHintUINT32(TestSLRInterval) ui32AppHintDefault=%d, psDevInfo->ui32TestSLRInterval=%d", + ui32AppHintDefault, psDevInfo->ui32TestSLRInterval)); + OSFreeKMAppHintState(pvAppHintState); + psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval; + psDevInfo->ui32SLRSkipFWAddr = 0; + } +#endif + +#if defined(PDUMP) +#if defined(NO_HARDWARE) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_DEINIT, "Wait for the FW to signal idle"); + + /* Kick the FW once, in case it still needs to detect and set the idle state */ + PDUMPREG32(RGX_PDUMPREG_NAME, + RGX_CR_MTS_SCHEDULE, + RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK, + PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_DEINIT); + + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfFwSysDataMemDesc, + offsetof(RGXFWIF_SYSDATA, ePowState), + RGXFWIF_POW_IDLE, + 0xFFFFFFFFU, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_DEINIT); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemPDumpDevmemPol32", ErrorExit); +#endif + + /* Run RGXStop with the correct PDump flags to feed the last-frame deinit buffer */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_DEINIT, "RGX deinitialisation commands"); + + psDevInfo->sLayerParams.ui32PdumpFlags |= PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW; + + if (! PVRSRV_VZ_MODE_IS(GUEST)) + { + eError = RGXStop(&psDevInfo->sLayerParams); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXStop", ErrorExit); + } + + psDevInfo->sLayerParams.ui32PdumpFlags &= ~(PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW); +#endif + +#if !defined(NO_HARDWARE) + eError = RGXInstallProcessQueuesMISR(&psDevInfo->hProcessQueuesMISR, psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXInstallProcessQueuesMISR", ErrorExit); + + /* Register the interrupt handlers */ + eError = OSInstallMISR(&psDevInfo->pvMISRData, + RGX_MISRHandler_Main, + psDeviceNode, + "RGX_Main"); + PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR(MISR)", ErrorExit); + + /* only the HOST_IRQ bus is supported on octopus for IRQ delivery */ + psDevInfo->pfnRGXAckIrq = RGXAckIrqDedicated; + + eError = SysInstallDeviceLISR(psDevConfig->hSysData, + psDevConfig->ui32IRQ, + PVRSRV_MODNAME, + RGX_LISRHandler, + psDeviceNode, + &psDevInfo->pvLISRData); + PVR_LOG_GOTO_IF_ERROR(eError, "SysInstallDeviceLISR", ErrorExit); +#endif + +#if defined(PDUMP) +/* We need to wrap the check for S7_CACHE_HIERARCHY being supported inside + * #if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK)...#endif, as the + * RGX_IS_FEATURE_SUPPORTED macro references a bitmask define derived from its + * last parameter which will not exist on architectures which do not have this + * feature. + * Note we check for RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK rather than for + * RGX_FEATURE_S7_CACHE_HIERARCHY (which might seem a better choice) as this + * means we can build the kernel driver without having to worry about the BVNC + * (the BIT_MASK is defined in rgx_bvnc_defs_km.h for all BVNCs for a given + * architecture, whereas the FEATURE is only defined for those BVNCs that + * support it). + */ +#if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK) + if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_CACHE_HIERARCHY))) +#endif + { + if (!PVRSRVSystemSnoopingOfCPUCache(psDevConfig) && + !PVRSRVSystemSnoopingOfDeviceCache(psDevConfig)) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has NO cache snooping"); + } + else + { + if (PVRSRVSystemSnoopingOfCPUCache(psDevConfig)) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has CPU cache snooping"); + } + if (PVRSRVSystemSnoopingOfDeviceCache(psDevConfig)) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has DEVICE cache snooping"); + } + } + } +#endif + + eError = PVRSRVTQLoadShaders(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVTQLoadShaders", ErrorExit); + + psDevInfo->bDevInit2Done = IMG_TRUE; + + return PVRSRV_OK; + +ErrorExit: +#if !defined(NO_HARDWARE) + if (psDevInfo->pvLISRData != NULL) + { + (void) SysUninstallDeviceLISR(psDevInfo->pvLISRData); + } + if (psDevInfo->pvMISRData != NULL) + { + (void) OSUninstallMISR(psDevInfo->pvMISRData); + } + if (psDevInfo->hProcessQueuesMISR != NULL) + { + (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR); + } + if (psDevInfo->pvAPMISRData != NULL) + { + (void) OSUninstallMISR(psDevInfo->pvAPMISRData); + } +#endif /* !defined(NO_HARDWARE) */ + + return eError; +} + +#define VZ_RGX_FW_FILENAME_SUFFIX ".vz" +#define RGX_FW_FILENAME_MAX_SIZE ((sizeof(RGX_FW_FILENAME)+ \ + RGX_BVNC_STR_SIZE_MAX+sizeof(VZ_RGX_FW_FILENAME_SUFFIX))) + +static void _GetFWFileName(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFWFilenameStr, + IMG_CHAR *pszFWpFilenameStr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + const IMG_CHAR * const pszFWFilenameSuffix = + PVRSRV_VZ_MODE_IS(NATIVE) ? "" : VZ_RGX_FW_FILENAME_SUFFIX; + + OSSNPrintf(pszFWFilenameStr, RGX_FW_FILENAME_MAX_SIZE, + "%s." RGX_BVNC_STR_FMTSPEC "%s", + RGX_FW_FILENAME, + psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C, + pszFWFilenameSuffix); + + OSSNPrintf(pszFWpFilenameStr, RGX_FW_FILENAME_MAX_SIZE, + "%s." RGX_BVNC_STRP_FMTSPEC "%s", + RGX_FW_FILENAME, + psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C, + pszFWFilenameSuffix); +} + +PVRSRV_ERROR RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode, + OS_FW_IMAGE **ppsRGXFW, + const IMG_BYTE **ppbFWData) +{ + IMG_CHAR aszFWFilenameStr[RGX_FW_FILENAME_MAX_SIZE]; + IMG_CHAR aszFWpFilenameStr[RGX_FW_FILENAME_MAX_SIZE]; + IMG_CHAR *pszLoadedFwStr; + PVRSRV_ERROR eErr; + + /* Prepare the image filenames to use in the following code */ + _GetFWFileName(psDeviceNode, aszFWFilenameStr, aszFWpFilenameStr); + + /* Get pointer to Firmware image */ + pszLoadedFwStr = aszFWFilenameStr; + eErr = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION, ppsRGXFW); + if (eErr == PVRSRV_ERROR_NOT_FOUND) + { + pszLoadedFwStr = aszFWpFilenameStr; + eErr = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION, ppsRGXFW); + if (eErr == PVRSRV_ERROR_NOT_FOUND) + { + pszLoadedFwStr = RGX_FW_FILENAME; + eErr = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION, ppsRGXFW); + if (eErr == PVRSRV_ERROR_NOT_FOUND) + { + PVR_DPF((PVR_DBG_FATAL, "All RGX Firmware image loads failed for '%s' (%s)", + aszFWFilenameStr, PVRSRVGetErrorString(eErr))); + } + } + } + + if (eErr == PVRSRV_OK) + { + PVR_LOG(("RGX Firmware image '%s' loaded", pszLoadedFwStr)); + *ppbFWData = (const IMG_BYTE*)OSFirmwareData(*ppsRGXFW); + } + else + { + *ppbFWData = NULL; + } + + return eErr; + +} + +#if defined(PDUMP) +PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + RGXFWIF_KCCB_CMD sKccbCmd; + PVRSRV_ERROR eError; + + /* Fill in the command structure with the parameters needed */ + sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT; + + eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice, + &sKccbCmd, + PDUMP_FLAGS_CONTINUOUS); + + return eError; +} +#endif + +PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + /* set up fw memory contexts */ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + PVRSRV_ERROR eError; + +#if defined(SUPPORT_AUTOVZ) + MMU_PX_SETUP sDefaultPxSetup = psDeviceNode->sDevMMUPxSetup; + + if (PVRSRV_VZ_MODE_IS(HOST) && (!psDeviceNode->bAutoVzFwIsUp)) + { + /* Temporarily swap the MMU Px methods and default LMA region of GPU physheap to + * allow the page tables of all memory mapped by the FwKernel context to be placed + * in a dedicated memory carveout. This should allow the firmware mappings to + * persist after a Host kernel crash or driver reset. */ + + psDeviceNode->sDevMMUPxSetup = RGX_FW_MMU_RESERVED_MEM_SETUP(psDeviceNode); + } +#endif + + /* Set the device fabric coherency before FW context creation */ + eError = RGXSystemGetFabricCoherency(psDevConfig, + psDevConfig->sRegsCpuPBase, + psDevConfig->ui32RegsSize, + &psDeviceNode->eDevFabricType, + &psDevConfig->eCacheSnoopingMode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed RGXSystemGetFabricCoherency (%u)", + __func__, + eError)); + goto failed_to_create_ctx; + } + + /* Register callbacks for creation of device memory contexts */ + psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext; + psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext; + + /* Create the memory context for the firmware. */ + eError = DevmemCreateContext(psDeviceNode, DEVMEM_HEAPCFG_META, + &psDevInfo->psKernelDevmemCtx); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed DevmemCreateContext (%u)", + __func__, + eError)); + goto failed_to_create_ctx; + } + + eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_MAIN_HEAP_IDENT, + &psDevInfo->psFirmwareMainHeap); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed DevmemFindHeapByName (%u)", + __func__, + eError)); + goto failed_to_find_heap; + } + + eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_CONFIG_HEAP_IDENT, + &psDevInfo->psFirmwareConfigHeap); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed DevmemFindHeapByName (%u)", + __func__, + eError)); + goto failed_to_find_heap; + } + +#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + IMG_UINT32 ui32OSID; + for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) + { + IMG_CHAR szHeapName[RA_MAX_NAME_LENGTH]; + + OSSNPrintf(szHeapName, sizeof(szHeapName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID); + eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, szHeapName, + &psDevInfo->psGuestFirmwareRawHeap[ui32OSID]); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemFindHeapByName", failed_to_find_heap); + } + } +#endif + +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + IMG_DEV_PHYADDR sPhysHeapBase; + IMG_UINT32 ui32OSID; + + eError = PhysHeapGetDevPAddr(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN], &sPhysHeapBase); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapGetDevPAddr", failed_to_find_heap); + + for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) + { + IMG_DEV_PHYADDR sRawFwHeapBase = {sPhysHeapBase.uiAddr + (ui32OSID * RGX_FIRMWARE_RAW_HEAP_SIZE)}; + + eError = RGXFwRawHeapAllocMap(psDeviceNode, + ui32OSID, + sRawFwHeapBase, + RGX_FIRMWARE_RAW_HEAP_SIZE); + if (eError != PVRSRV_OK) + { + for (; ui32OSID > RGX_FIRST_RAW_HEAP_OSID; ui32OSID--) + { + RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID); + } + PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", failed_to_find_heap); + } + } + +#if defined(SUPPORT_AUTOVZ) + /* restore default Px setup */ + psDeviceNode->sDevMMUPxSetup = sDefaultPxSetup; +#endif + } +#else + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + eError = PvzClientMapDevPhysHeap(psDeviceNode->psDevConfig); + PVR_LOG_GOTO_IF_ERROR(eError, "PvzClientMapDevPhysHeap", failed_to_find_heap); + } +#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE); + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE); + } + + return eError; + +failed_to_find_heap: + /* + * Clear the mem context create callbacks before destroying the RGX firmware + * context to avoid a spurious callback. + */ + psDeviceNode->pfnRegisterMemoryContext = NULL; + psDeviceNode->pfnUnregisterMemoryContext = NULL; + DevmemDestroyContext(psDevInfo->psKernelDevmemCtx); + psDevInfo->psKernelDevmemCtx = NULL; +failed_to_create_ctx: + return eError; +} + +void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + if (PVRSRV_VZ_MODE_IS(HOST)) + { +#if defined(SUPPORT_AUTOVZ) + MMU_PX_SETUP sDefaultPxSetup = psDeviceNode->sDevMMUPxSetup; + + psDeviceNode->sDevMMUPxSetup = RGX_FW_MMU_RESERVED_MEM_SETUP(psDeviceNode); + + if (!psDeviceNode->bAutoVzFwIsUp) +#endif + { + IMG_UINT32 ui32OSID; + + for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) + { + RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID); + } + } +#if defined(SUPPORT_AUTOVZ) + psDeviceNode->sDevMMUPxSetup = sDefaultPxSetup; +#endif + } +#else + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + (void) PvzClientUnmapDevPhysHeap(psDeviceNode->psDevConfig); + + if (psDevInfo->psFirmwareMainHeap) + { + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_FALSE); + } + if (psDevInfo->psFirmwareConfigHeap) + { + DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_FALSE); + } + } +#endif + + /* + * Clear the mem context create callbacks before destroying the RGX firmware + * context to avoid a spurious callback. + */ + psDeviceNode->pfnRegisterMemoryContext = NULL; + psDeviceNode->pfnUnregisterMemoryContext = NULL; + + if (psDevInfo->psKernelDevmemCtx) + { + eError = DevmemDestroyContext(psDevInfo->psKernelDevmemCtx); + PVR_ASSERT(eError == PVRSRV_OK); + } +} + +static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32AlignChecksSize, + IMG_UINT32 aui32AlignChecks[]) +{ + static IMG_UINT32 aui32AlignChecksKM[] = {RGXFW_ALIGN_CHECKS_INIT_KM}; + PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; + IMG_UINT32 i, *paui32FWAlignChecks; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Skip the alignment check if the driver is guest + since there is no firmware to check against */ + PVRSRV_VZ_RET_IF_MODE(GUEST, eError); + + if (psDevInfo->psRGXFWAlignChecksMemDesc == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: FW Alignment Check Mem Descriptor is NULL", + __func__)); + return PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE; + } + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc, + (void **) &paui32FWAlignChecks); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire kernel address for alignment checks (%u)", + __func__, + eError)); + return eError; + } + + paui32FWAlignChecks += ARRAY_SIZE(aui32AlignChecksKM) + 1; + if (*paui32FWAlignChecks++ != ui32AlignChecksSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Mismatch in number of structures to check.", + __func__)); + eError = PVRSRV_ERROR_INVALID_ALIGNMENT; + goto return_; + } + + for (i = 0; i < ui32AlignChecksSize; i++) + { + if (aui32AlignChecks[i] != paui32FWAlignChecks[i]) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Check for structured alignment failed.", + __func__)); + eError = PVRSRV_ERROR_INVALID_ALIGNMENT; + goto return_; + } + } + +return_: + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc); + + return eError; +} + +static +PVRSRV_ERROR RGXAllocateFWMemoryRegion(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEVMEM_SIZE_T ui32Size, + PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags, + const IMG_PCHAR pszText, + DEVMEM_MEMDESC **ppsMemDescPtr) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_DEVMEM_LOG2ALIGN_T uiLog2Align = OSGetPageShift(); + + uiMemAllocFlags = (uiMemAllocFlags | + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(SUPPORT_SECURITY_VALIDATION) + uiMemAllocFlags &= PVRSRV_MEMALLOCFLAGS_TDFWMASK; +#endif + + PVR_UNREFERENCED_PARAMETER(uiLog2Align); + + PDUMPCOMMENT("Allocate FW %s memory", pszText); + + eError = DevmemFwAllocate(psDeviceNode->pvDevice, + ui32Size, + uiMemAllocFlags, + pszText, + ppsMemDescPtr); + + return eError; +} + +/*! +******************************************************************************* + + @Function RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver + + @Description + + Validate the FW build options against KM driver build options (KM build options only) + + Following check is redundant, because next check checks the same bits. + Redundancy occurs because if client-server are build-compatible and client-firmware are + build-compatible then server-firmware are build-compatible as well. + + This check is left for clarity in error messages if any incompatibility occurs. + + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF_OSINIT *psFwOsInit) +{ +#if !defined(NO_HARDWARE) + IMG_UINT32 ui32BuildOptions, ui32BuildOptionsFWKMPart, ui32BuildOptionsMismatch; + + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + ui32BuildOptions = (RGX_BUILD_OPTIONS_KM & RGX_BUILD_OPTIONS_MASK_FW); + + ui32BuildOptionsFWKMPart = psFwOsInit->sRGXCompChecks.ui32BuildOptions & RGX_BUILD_OPTIONS_MASK_FW; + + /* Check if the FW is missing support for any features required by the driver */ + if (~ui32BuildOptionsFWKMPart & ui32BuildOptions) + { + ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32BuildOptionsFWKMPart; +#if !defined(PVRSRV_STRICT_COMPAT_CHECK) + /*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/ + ui32BuildOptionsMismatch &= ~OPTIONS_DEBUG_MASK; +#endif + if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and KM driver build options; " + "extra options present in the KM driver: (0x%x). Please check rgx_options.h", + ui32BuildOptions & ui32BuildOptionsMismatch )); + return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; + } + + if ( (ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware-side and KM driver build options; " + "extra options present in Firmware: (0x%x). Please check rgx_options.h", + ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch )); + return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; + } + PVR_DPF((PVR_DBG_WARNING, "RGXDevInitCompatCheck: Firmware and KM driver build options differ.")); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and KM driver build options match. [ OK ]")); + } +#endif + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver + + @Description + + Validate FW DDK version against driver DDK version + + @Input psDevInfo - device info + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_OSINIT *psFwOsInit) +{ +#if defined(PDUMP)||(!defined(NO_HARDWARE)) + IMG_UINT32 ui32DDKVersion; + PVRSRV_ERROR eError; + + ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN); +#endif + +#if defined(PDUMP) + PDUMPCOMMENT("Compatibility check: KM driver and FW DDK version"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, ui32DDKVersion), + ui32DDKVersion, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } +#endif + +#if !defined(NO_HARDWARE) + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + if (psFwOsInit->sRGXCompChecks.ui32DDKVersion != ui32DDKVersion) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible driver DDK version (%u.%u) / Firmware DDK revision (%u.%u).", + PVRVERSION_MAJ, PVRVERSION_MIN, + PVRVERSION_UNPACK_MAJ(psFwOsInit->sRGXCompChecks.ui32DDKVersion), + PVRVERSION_UNPACK_MIN(psFwOsInit->sRGXCompChecks.ui32DDKVersion))); + eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH; + PVR_DBG_BREAK; + return eError; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK version (%u.%u) and Firmware DDK revision (%u.%u) match. [ OK ]", + PVRVERSION_MAJ, PVRVERSION_MIN, + PVRVERSION_MAJ, PVRVERSION_MIN)); + } +#endif + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver + + @Description + + Validate FW DDK build against driver DDK build + + @Input psDevInfo - device info + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_OSINIT *psFwOsInit) +{ + PVRSRV_ERROR eError=PVRSRV_OK; +#if defined(PDUMP)||(!defined(NO_HARDWARE)) + IMG_UINT32 ui32DDKBuild; + + ui32DDKBuild = PVRVERSION_BUILD; +#endif + +#if defined(PDUMP) && defined(PVRSRV_STRICT_COMPAT_CHECK) + PDUMPCOMMENT("Compatibility check: KM driver and FW DDK build"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, ui32DDKBuild), + ui32DDKBuild, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } +#endif + +#if !defined(NO_HARDWARE) + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + if (psFwOsInit->sRGXCompChecks.ui32DDKBuild != ui32DDKBuild) + { + PVR_LOG(("(WARN) RGXDevInitCompatCheck: Different driver DDK build version (%d) / Firmware DDK build version (%d).", + ui32DDKBuild, psFwOsInit->sRGXCompChecks.ui32DDKBuild)); +#if defined(PVRSRV_STRICT_COMPAT_CHECK) + eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH; + PVR_DBG_BREAK; + return eError; +#endif + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK build version (%d) and Firmware DDK build version (%d) match. [ OK ]", + ui32DDKBuild, psFwOsInit->sRGXCompChecks.ui32DDKBuild)); + } +#endif + return eError; +} + +/*! +******************************************************************************* + + @Function RGXDevInitCompatCheck_BVNC_FWAgainstDriver + + @Description + + Validate FW BVNC against driver BVNC + + @Input psDevInfo - device info + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_OSINIT *psFwOsInit) +{ +#if !defined(NO_HARDWARE) + IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC; +#endif +#if defined(PDUMP)||(!defined(NO_HARDWARE)) + RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC); + PVRSRV_ERROR eError; + + sBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); +#endif + +#if defined(PDUMP) + PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (struct version)"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion), + sBVNC.ui32LayoutVersion, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + } + + + PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (BNC part - lower 32 bits)"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC), + (IMG_UINT32)sBVNC.ui64BVNC, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + } + + PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (BNC part - Higher 32 bits)"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + + sizeof(IMG_UINT32), + (IMG_UINT32)(sBVNC.ui64BVNC >> 32), + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + } +#endif + +#if !defined(NO_HARDWARE) + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + RGX_BVNC_EQUAL(sBVNC, psFwOsInit->sRGXCompChecks.sFWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC); + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + bCompatibleAll = IMG_TRUE; + } + + if (!bCompatibleAll) + { + if (!bCompatibleVersion) + { + PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%u) and firmware (%u).", + __func__, + sBVNC.ui32LayoutVersion, + psFwOsInit->sRGXCompChecks.sFWBVNC.ui32LayoutVersion)); + eError = PVRSRV_ERROR_BVNC_MISMATCH; + return eError; + } + + if (!bCompatibleBVNC) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BVNC (%u.%u.%u.%u) and Firmware BVNC (%u.%u.%u.%u)", + RGX_BVNC_PACKED_EXTR_B(sBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(sBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(sBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(sBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_B(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC))); + eError = PVRSRV_ERROR_BVNC_MISMATCH; + return eError; + } + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware BVNC and KM driver BNVC match. [ OK ]")); + } +#endif + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXDevInitCompatCheck_BVNC_HWAgainstDriver + + @Description + + Validate HW BVNC against driver BVNC + + @Input psDevInfo - device info + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_HWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_OSINIT *psFwOsInit) +{ +#if defined(PDUMP) || !defined(NO_HARDWARE) + IMG_UINT64 ui64MaskBVNC = RGX_BVNC_PACK_MASK_B | + RGX_BVNC_PACK_MASK_V | + RGX_BVNC_PACK_MASK_N | + RGX_BVNC_PACK_MASK_C; + + PVRSRV_ERROR eError; + RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sSWBVNC); +#endif + +#if !defined(NO_HARDWARE) + RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sHWBVNC); + IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC; +#endif + +#if defined(PDUMP) + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; +#endif + + if (psDevInfo->bIgnoreHWReportedBVNC) + { + PVR_LOG(("BVNC compatibility checks between driver and HW are disabled (AppHint override)")); + return PVRSRV_OK; + } + +#if defined(PDUMP) || !defined(NO_HARDWARE) +#if defined(COMPAT_BVNC_MASK_B) + ui64MaskBNC &= ~RGX_BVNC_PACK_MASK_B; +#endif +#if defined(COMPAT_BVNC_MASK_V) + ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_V; +#endif +#if defined(COMPAT_BVNC_MASK_N) + ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_N; +#endif +#if defined(COMPAT_BVNC_MASK_C) + ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_C; +#endif + + sSWBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); + + + + if (ui64MaskBVNC != (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_V | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C)) + { + PVR_LOG(("Compatibility checks: Ignoring fields: '%s%s%s%s' of HW BVNC.", + ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_B))?("B"):("")), + ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_V))?("V"):("")), + ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_N))?("N"):("")), + ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_C))?("C"):("")))); + } +#endif + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: Layout version of compchecks struct"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion), + sSWBVNC.ui32LayoutVersion, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } + + PDUMPCOM(ui32PDumpFlags, "BVNC compatibility check started"); + if (ui64MaskBVNC & (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C)) + { + PDUMPIF("DISABLE_HWBNC_CHECK", ui32PDumpFlags); + PDUMPELSE("DISABLE_HWBNC_CHECK", ui32PDumpFlags); + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: HW BNC and FW BNC (Lower 32 bits)"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC), + (IMG_UINT32)sSWBVNC.ui64BVNC , + (IMG_UINT32)(ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V), + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: HW BNC and FW BNC (Higher 32 bits)"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + + sizeof(IMG_UINT32), + (IMG_UINT32)(sSWBVNC.ui64BVNC >> 32), + (IMG_UINT32)((ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V) >> 32), + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } + + PDUMPFI("DISABLE_HWBNC_CHECK", ui32PDumpFlags); + } + if (ui64MaskBVNC & RGX_BVNC_PACK_MASK_V) + { + PDUMPIF("DISABLE_HWV_CHECK", ui32PDumpFlags); + PDUMPELSE("DISABLE_HWV_CHECK", ui32PDumpFlags); + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: HW V and FW V"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + + offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + + ((RGX_BVNC_PACK_SHIFT_V >= 32) ? sizeof(IMG_UINT32) : 0), + (IMG_UINT32)(sSWBVNC.ui64BVNC >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0)), + RGX_BVNC_PACK_MASK_V >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0), + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } + PDUMPFI("DISABLE_HWV_CHECK", ui32PDumpFlags); + } + PDUMPCOM(ui32PDumpFlags, "BVNC compatibility check finished"); +#endif + +#if !defined(NO_HARDWARE) + if (psFwOsInit == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + sHWBVNC = psFwOsInit->sRGXCompChecks.sHWBVNC; + + sHWBVNC.ui64BVNC &= ui64MaskBVNC; + sSWBVNC.ui64BVNC &= ui64MaskBVNC; + + RGX_BVNC_EQUAL(sSWBVNC, sHWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC); + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + bCompatibleAll = IMG_TRUE; + } + + if (!bCompatibleAll) + { + if (!bCompatibleVersion) + { + PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of HW (%d) and FW (%d).", + __func__, + sHWBVNC.ui32LayoutVersion, + sSWBVNC.ui32LayoutVersion)); + eError = PVRSRV_ERROR_BVNC_MISMATCH; + return eError; + } + + if (!bCompatibleBVNC) + { + PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d).", + RGX_BVNC_PACKED_EXTR_B(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_B(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(sSWBVNC.ui64BVNC))); + eError = PVRSRV_ERROR_BVNC_MISMATCH; + return eError; + } + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d) match. [ OK ]", + RGX_BVNC_PACKED_EXTR_B(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(sHWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_B(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_V(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_N(sSWBVNC.ui64BVNC), + RGX_BVNC_PACKED_EXTR_C(sSWBVNC.ui64BVNC))); + } +#endif + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXDevInitCompatCheck_METACoreVersion_AgainstDriver + + @Description + + Validate HW META version against driver META version + + @Input psDevInfo - device info + @Input psFwOsInit - FW init data + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_OSINIT *psFwOsInit) +{ +#if defined(PDUMP)||(!defined(NO_HARDWARE)) + PVRSRV_ERROR eError; +#endif + +#if defined(PDUMP) + PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; +#endif + + IMG_UINT32 ui32FWCoreIDValue = 0; + IMG_CHAR *pcRGXFW_PROCESSOR = NULL; + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + switch (RGX_GET_FEATURE_VALUE(psDevInfo, META)) + { + case MTP218: ui32FWCoreIDValue = RGX_CR_META_MTP218_CORE_ID_VALUE; break; + case MTP219: ui32FWCoreIDValue = RGX_CR_META_MTP219_CORE_ID_VALUE; break; + case LTP218: ui32FWCoreIDValue = RGX_CR_META_LTP218_CORE_ID_VALUE; break; + case LTP217: ui32FWCoreIDValue = RGX_CR_META_LTP217_CORE_ID_VALUE; break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Undefined FW_CORE_ID_VALUE", __func__)); + PVR_ASSERT(0); + } + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META; + } + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + ui32FWCoreIDValue = RGXRISCVFW_CORE_ID_VALUE; + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Undefined FW_CORE_ID_VALUE", __func__)); + PVR_ASSERT(0); + } + +#if defined(PDUMP) + PDUMPIF("DISABLE_HWMETA_CHECK", ui32PDumpFlags); + PDUMPELSE("DISABLE_HWMETA_CHECK", ui32PDumpFlags); + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Compatibility check: KM driver and HW FW Processor version"); + eError = DevmemPDumpDevmemCheck32(psDevInfo->psRGXFWIfOsInitMemDesc, + offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + + offsetof(RGXFWIF_COMPCHECKS, ui32FWProcessorVersion), + ui32FWCoreIDValue, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); + return eError; + } + PDUMPFI("DISABLE_HWMETA_CHECK", ui32PDumpFlags); +#endif + +#if !defined(NO_HARDWARE) + if (psFwOsInit == NULL) + return PVRSRV_ERROR_INVALID_PARAMS; + + if (psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion != ui32FWCoreIDValue) + { + PVR_LOG(("RGXDevInitCompatCheck: Incompatible driver %s version (%d) / HW %s version (%d).", + pcRGXFW_PROCESSOR, + ui32FWCoreIDValue, + pcRGXFW_PROCESSOR, + psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion)); + eError = PVRSRV_ERROR_FWPROCESSOR_MISMATCH; + PVR_DBG_BREAK; + return eError; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Compatible driver %s version (%d) / HW %s version (%d) [OK].", + pcRGXFW_PROCESSOR, + ui32FWCoreIDValue, + pcRGXFW_PROCESSOR, + psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion)); + } +#endif + return PVRSRV_OK; +} + +/*! +******************************************************************************* +******************************************************************************* + + @Function RGXDevInitCompatCheck + + @Description + + Check compatibility of host driver and firmware (DDK and build options) + for RGX devices at services/device initialisation + + @Input psDeviceNode - device node + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#if !defined(NO_HARDWARE) + IMG_UINT32 ui32RegValue; + IMG_UINT8 ui8FwOsCount; + IMG_UINT32 ui32FwTimeout = MAX_HW_TIME_US; + + LOOP_UNTIL_TIMEOUT(ui32FwTimeout) + { + if (*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) + { + /* No need to wait if the FW has already updated the values */ + break; + } + OSWaitus(ui32FwTimeout/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + ui32RegValue = 0; + + if ((!PVRSRV_VZ_MODE_IS(GUEST)) && + RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + eError = RGXReadMETAAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegValue); + + if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Reading RGX META register failed. Is the GPU correctly powered up? (%u)", + __func__, eError)); + goto chk_exit; + } + + if (!(ui32RegValue & META_CR_TXENABLE_ENABLE_BIT)) + { + eError = PVRSRV_ERROR_META_THREAD0_NOT_ENABLED; + PVR_DPF((PVR_DBG_ERROR, + "%s: RGX META is not running. Is the GPU correctly powered up? %d (%u)", + __func__, psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated, eError)); + goto chk_exit; + } + } + + if (!*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) + { + eError = PVRSRV_ERROR_TIMEOUT; + PVR_DPF((PVR_DBG_ERROR, "%s: GPU Firmware not responding: failed to supply compatibility info (%u)", + __func__, eError)); + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Potential causes: firmware not initialised or the current Guest driver's " + "OsConfig initialisation data was not accepted by the firmware", __func__)); + } + goto chk_exit; + } + + ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport; + if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) || + (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED))) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", + __func__, (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount)); + } +#endif /* defined(NO_HARDWARE) */ + + eError = RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + + eError = RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + + eError = RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + eError = RGXDevInitCompatCheck_BVNC_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + + eError = RGXDevInitCompatCheck_BVNC_HWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + } + + eError = RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); + if (eError != PVRSRV_OK) + { + goto chk_exit; + } + + eError = PVRSRV_OK; +chk_exit: + + return eError; +} + +static void _RGXSoftResetToggle(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT64 ui64ResetValue, + IMG_UINT64 ui64SPUResetValue) +{ + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, ui64ResetValue); + if (RGX_GET_FEATURE_VALUE(psDevInfo, POWER_ISLAND_VERSION) == 1) + { + OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET_SPU, ui64SPUResetValue); + } + + /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ + (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET); + if (RGX_GET_FEATURE_VALUE(psDevInfo, POWER_ISLAND_VERSION) == 1) + { + (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET_SPU); + } +} + +/**************************************************************************/ /*! +@Function RGXSoftReset +@Description Resets some modules of the RGX device +@Input psDeviceNode Device node +@Input ui64ResetValue A mask for which each bit set corresponds + to a module to reset (via the SOFT_RESET + register). +@Input ui64SPUResetValue A mask for which each bit set corresponds + to a module to reset (via the SOFT_RESET_SPU + register). +@Return PVRSRV_ERROR +*/ /***************************************************************************/ +static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT64 ui64ResetValue, + IMG_UINT64 ui64SPUResetValue) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(psDeviceNode != NULL); + PVR_ASSERT(psDeviceNode->pvDevice != NULL); + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + if (((ui64ResetValue & RGX_CR_SOFT_RESET_MASKFULL) != ui64ResetValue) + || (ui64SPUResetValue & RGX_CR_SOFT_RESET_SPU_MASKFULL) != ui64SPUResetValue) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* The device info */ + psDevInfo = psDeviceNode->pvDevice; + + /* Set in soft-reset */ + _RGXSoftResetToggle(psDevInfo, ui64ResetValue, ui64SPUResetValue); + + /* Take the modules out of reset... */ + _RGXSoftResetToggle(psDevInfo, 0, 0); + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEVMEM_SIZE_T uiFWCodeLen, + IMG_DEVMEM_SIZE_T uiFWDataLen, + IMG_DEVMEM_SIZE_T uiFWCorememCodeLen, + IMG_DEVMEM_SIZE_T uiFWCorememDataLen) +{ + PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + + /* + * Set up Allocation for FW code section + */ + uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CODE); + + eError = RGXAllocateFWMemoryRegion(psDeviceNode, + uiFWCodeLen, + uiMemAllocFlags, + "FwCodeRegion", + &psDevInfo->psRGXFWCodeMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to allocate fw code mem (%u)", + eError)); + goto failFWCodeMemDescAlloc; + } + + eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc, + &psDevInfo->sFWCodeDevVAddrBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to acquire devVAddr for fw code mem (%u)", + eError)); + goto failFWCodeMemDescAqDevVirt; + } + + /* + * The FW code must be the first allocation in the firmware heap, otherwise + * the bootloader will not work (the FW will not be able to find the bootloader). + */ + PVR_ASSERT(psDevInfo->sFWCodeDevVAddrBase.uiAddr == RGX_FIRMWARE_HOST_MAIN_HEAP_BASE); + + /* + * Set up Allocation for FW data section + */ + uiMemAllocFlags = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA)) & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); + + eError = RGXAllocateFWMemoryRegion(psDeviceNode, + uiFWDataLen, + uiMemAllocFlags, + "FwDataRegion", + &psDevInfo->psRGXFWDataMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to allocate fw data mem (%u)", + eError)); + goto failFWDataMemDescAlloc; + } + + eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWDataMemDesc, + &psDevInfo->sFWDataDevVAddrBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to acquire devVAddr for fw data mem (%u)", + eError)); + goto failFWDataMemDescAqDevVirt; + } + + if (uiFWCorememCodeLen != 0) + { + /* + * Set up Allocation for FW coremem code section + */ + uiMemAllocFlags = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CODE)) & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); + + eError = RGXAllocateFWMemoryRegion(psDeviceNode, + uiFWCorememCodeLen, + uiMemAllocFlags, + "FwCorememCodeRegion", + &psDevInfo->psRGXFWCorememCodeMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to allocate fw coremem code mem, size: %" IMG_INT64_FMTSPECd ", flags: %" PVRSRV_MEMALLOCFLAGS_FMTSPEC " (%u)", + uiFWCorememCodeLen, uiMemAllocFlags, eError)); + goto failFWCorememMemDescAlloc; + } + + eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, + &psDevInfo->sFWCorememCodeDevVAddrBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to acquire devVAddr for fw coremem mem code (%u)", + eError)); + goto failFWCorememCodeMemDescAqDevVirt; + } + + eError = RGXSetFirmwareAddress(&psDevInfo->sFWCorememCodeFWAddr, + psDevInfo->psRGXFWCorememCodeMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", failFWCorememCodeMemDescFwAddr); + } + else + { + psDevInfo->sFWCorememCodeDevVAddrBase.uiAddr = 0; + psDevInfo->sFWCorememCodeFWAddr.ui32Addr = 0; + } + + if (uiFWCorememDataLen != 0) + { + /* + * Set up Allocation for FW coremem data section + */ + uiMemAllocFlags = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA)) & + RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); + + eError = RGXAllocateFWMemoryRegion(psDeviceNode, + uiFWCorememDataLen, + uiMemAllocFlags, + "FwCorememDataRegion", + &psDevInfo->psRGXFWIfCorememDataStoreMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to allocate fw coremem data mem, " + "size: %" IMG_INT64_FMTSPECd ", flags: %" PVRSRV_MEMALLOCFLAGS_FMTSPEC " (%u)", + uiFWCorememDataLen, + uiMemAllocFlags, + eError)); + goto failFWCorememDataMemDescAlloc; + } + + eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, + &psDevInfo->sFWCorememDataStoreDevVAddrBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Failed to acquire devVAddr for fw coremem mem data (%u)", + eError)); + goto failFWCorememDataMemDescAqDevVirt; + } + + eError = RGXSetFirmwareAddress(&psDevInfo->sFWCorememDataStoreFWAddr, + psDevInfo->psRGXFWIfCorememDataStoreMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", failFWCorememDataMemDescFwAddr); + } + else + { + psDevInfo->sFWCorememDataStoreDevVAddrBase.uiAddr = 0; + psDevInfo->sFWCorememDataStoreFWAddr.ui32Addr = 0; + } + + return PVRSRV_OK; + +failFWCorememDataMemDescFwAddr: +failFWCorememDataMemDescAqDevVirt: + if (uiFWCorememDataLen != 0) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc); + psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL; + } +failFWCorememDataMemDescAlloc: +failFWCorememCodeMemDescFwAddr: +failFWCorememCodeMemDescAqDevVirt: + if (uiFWCorememCodeLen != 0) + { + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCorememCodeMemDesc); + psDevInfo->psRGXFWCorememCodeMemDesc = NULL; + } +failFWCorememMemDescAlloc: +failFWDataMemDescAqDevVirt: + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc); + psDevInfo->psRGXFWDataMemDesc = NULL; +failFWDataMemDescAlloc: +failFWCodeMemDescAqDevVirt: + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc); + psDevInfo->psRGXFWCodeMemDesc = NULL; +failFWCodeMemDescAlloc: + return eError; +} + +/* + AppHint parameter interface +*/ +static +PVRSRV_ERROR RGXFWTraceQueryFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 *pui32Value) +{ + PVRSRV_ERROR eResult; + + eResult = PVRSRVRGXFWDebugQueryFWLogKM(NULL, psDeviceNode, pui32Value); + *pui32Value &= RGXFWIF_LOG_TYPE_GROUP_MASK; + return eResult; +} + +static +PVRSRV_ERROR RGXFWTraceQueryLogType(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 *pui32Value) +{ + PVRSRV_ERROR eResult; + + eResult = PVRSRVRGXFWDebugQueryFWLogKM(NULL, psDeviceNode, pui32Value); + if (PVRSRV_OK == eResult) + { + if (*pui32Value & RGXFWIF_LOG_TYPE_TRACE) + { + *pui32Value = 0; /* Trace */ + } + else + { + *pui32Value = 1; /* TBI */ + } + } + return eResult; +} + +static +PVRSRV_ERROR RGXFWTraceSetFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eResult; + IMG_UINT32 ui32RGXFWLogType; + + eResult = RGXFWTraceQueryLogType(psDeviceNode, NULL, &ui32RGXFWLogType); + if (PVRSRV_OK == eResult) + { + if (0 == ui32RGXFWLogType) + { + BITMASK_SET(ui32Value, RGXFWIF_LOG_TYPE_TRACE); + } + eResult = PVRSRVRGXFWDebugSetFWLogKM(NULL, psDeviceNode, ui32Value); + } + return eResult; +} + +static +PVRSRV_ERROR RGXFWTraceSetLogType(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eResult; + IMG_UINT32 ui32RGXFWLogType = ui32Value; + + eResult = RGXFWTraceQueryFilter(psDeviceNode, NULL, &ui32RGXFWLogType); + if (PVRSRV_OK != eResult) + { + return eResult; + } + + /* 0 - trace, 1 - tbi */ + if (0 == ui32Value) + { + BITMASK_SET(ui32RGXFWLogType, RGXFWIF_LOG_TYPE_TRACE); + } +#if defined(SUPPORT_TBI_INTERFACE) + else if (1 == ui32Value) + { + BITMASK_UNSET(ui32RGXFWLogType, RGXFWIF_LOG_TYPE_TRACE); + } +#endif + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid parameter %u specified to set FW log type AppHint.", + __func__, ui32Value)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eResult = PVRSRVRGXFWDebugSetFWLogKM(NULL, psDeviceNode, ui32RGXFWLogType); + return eResult; +} + +static +PVRSRV_ERROR RGXQueryFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_BOOL *pbValue) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + + *pbValue = (PVRSRV_MEMALLOCFLAG_POISON_ON_FREE == psDevInfo->uiFWPoisonOnFreeFlag) + ? IMG_TRUE + : IMG_FALSE; + return PVRSRV_OK; +} + +static +PVRSRV_ERROR RGXSetFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_BOOL bValue) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + psDevInfo->uiFWPoisonOnFreeFlag = bValue + ? PVRSRV_MEMALLOCFLAG_POISON_ON_FREE + : 0ULL; + + return PVRSRV_OK; +} + +/* + * RGXInitFirmware + */ +PVRSRV_ERROR +RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bEnableSignatureChecks, + IMG_UINT32 ui32SignatureChecksBufSize, + IMG_UINT32 ui32HWPerfFWBufSizeKB, + IMG_UINT64 ui64HWPerfFilter, + IMG_UINT32 ui32ConfigFlags, + IMG_UINT32 ui32LogType, + IMG_UINT32 ui32FilterFlags, + IMG_UINT32 ui32JonesDisableMask, + IMG_UINT32 ui32HWRDebugDumpLimit, + IMG_UINT32 ui32RenderKillingCtl, + IMG_UINT32 ui32CDMTDMKillingCtl, + IMG_UINT32 *pui32TPUTrilinearFracMask, + IMG_UINT32 *pui32USRMNumRegions, + IMG_UINT64 *pui64UVBRMNumRegions, + IMG_UINT32 ui32HWPerfCountersDataSize, + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf, + FW_PERF_CONF eFirmwarePerf, + IMG_UINT32 ui32ConfigFlagsExt, + IMG_UINT32 ui32AvailablePowUnitsMask, + IMG_UINT32 ui32FwOsCfgFlags) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + IMG_BOOL bEnableFWPoisonOnFree = IMG_FALSE; + + eError = RGXSetupFirmware(psDeviceNode, + bEnableSignatureChecks, + ui32SignatureChecksBufSize, + ui32HWPerfFWBufSizeKB, + ui64HWPerfFilter, + ui32ConfigFlags, + ui32ConfigFlagsExt, + ui32FwOsCfgFlags, + ui32LogType, + ui32FilterFlags, + ui32JonesDisableMask, + ui32HWRDebugDumpLimit, + ui32HWPerfCountersDataSize, + ui32RenderKillingCtl, + ui32CDMTDMKillingCtl, + pui32TPUTrilinearFracMask, + pui32USRMNumRegions, + pui64UVBRMNumRegions, + eRGXRDPowerIslandingConf, + eFirmwarePerf, + ui32AvailablePowUnitsMask); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRGXInitFirmwareKM: RGXSetupFirmware failed (%u)", + eError)); + goto failed_init_firmware; + } + + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableLogGroup, + RGXFWTraceQueryFilter, + RGXFWTraceSetFilter, + psDeviceNode, + NULL); + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_FirmwareLogType, + RGXFWTraceQueryLogType, + RGXFWTraceSetLogType, + psDeviceNode, + NULL); + } + + bEnableFWPoisonOnFree = PVRSRV_APPHINT_ENABLEFWPOISONONFREE; + + PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFWPoisonOnFree, + RGXQueryFWPoisonOnFree, + RGXSetFWPoisonOnFree, + psDeviceNode, + NULL); + + psDevInfo->uiFWPoisonOnFreeFlag = bEnableFWPoisonOnFree + ? PVRSRV_MEMALLOCFLAG_POISON_ON_FREE + : 0ULL; + + return PVRSRV_OK; + +failed_init_firmware: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/* See device.h for function declaration */ +static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC **psMemDesc, + IMG_UINT32 *puiSyncPrimVAddr, + IMG_UINT32 *puiSyncPrimBlockSize) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError; + RGXFWIF_DEV_VIRTADDR pFirmwareAddr; + IMG_DEVMEM_SIZE_T uiUFOBlockSize = sizeof(IMG_UINT32); + IMG_DEVMEM_ALIGN_T ui32UFOBlockAlign = sizeof(IMG_UINT32); + IMG_UINT32 ui32CoherencyFlag = 0; + + psDevInfo = psDeviceNode->pvDevice; + + /* Size and align are 'expanded' because we request an Exportalign allocation */ + eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap), + &uiUFOBlockSize, + &ui32UFOBlockAlign); + + if (eError != PVRSRV_OK) + { + goto e0; + } + + if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) && + PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig)) + { + ui32CoherencyFlag = PVRSRV_MEMALLOCFLAG_CACHE_COHERENT; + } + else + { + ui32CoherencyFlag = PVRSRV_MEMALLOCFLAG_UNCACHED; + } + + eError = DevmemFwAllocateExportable(psDeviceNode, + uiUFOBlockSize, + ui32UFOBlockAlign, + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + ui32CoherencyFlag, + "FwExUFOBlock", + psMemDesc); + if (eError != PVRSRV_OK) + { + goto e0; + } + + eError = RGXSetFirmwareAddress(&pFirmwareAddr, *psMemDesc, 0, RFW_FWADDR_FLAG_NONE); + PVR_GOTO_IF_ERROR(eError, e1); + + *puiSyncPrimVAddr = pFirmwareAddr.ui32Addr; + *puiSyncPrimBlockSize = TRUNCATE_64BITS_TO_32BITS(uiUFOBlockSize); + + return PVRSRV_OK; + +e1: + DevmemFwUnmapAndFree(psDevInfo, *psMemDesc); +e0: + return eError; +} + +/* See device.h for function declaration */ +static void RGXFreeUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psMemDesc) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + /* + If the system has snooping of the device cache then the UFO block + might be in the cache so we need to flush it out before freeing + the memory + + When the device is being shutdown/destroyed we don't care anymore. + Several necessary data structures to issue a flush were destroyed + already. + */ + if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) && + psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_DEINIT) + { + RGXFWIF_KCCB_CMD sFlushInvalCmd; + PVRSRV_ERROR eError; + IMG_UINT32 ui32kCCBCommandSlot; + + /* Schedule the SLC flush command ... */ +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate"); +#endif + sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Size = 0; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Address = 0; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0; + + eError = RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, + &sFlushInvalCmd, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule SLC flush command with error (%u)", + __func__, + eError)); + } + else + { + /* Wait for the SLC flush to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: SLC flush and invalidate aborted with error (%u)", + __func__, + eError)); + } + else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & + RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); + } + } + } + + RGXUnsetFirmwareAddress(psMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psMemDesc); +} + +/* + DevDeInitRGX +*/ +PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + IMG_UINT32 ui32Temp=0; + + if (!psDevInfo) + { + /* Can happen if DevInitRGX failed */ + PVR_DPF((PVR_DBG_ERROR, "DevDeInitRGX: Null DevInfo")); + return PVRSRV_OK; + } + + if (psDevInfo->psRGXFWIfOsInit) + { + KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); + } + + eError = DeviceDepBridgeDeInit(psDevInfo->sDevFeatureCfg.ui64Features); + PVR_LOG_IF_ERROR(eError, "DeviceDepBridgeDeInit"); +#if defined(PDUMP) + DevmemIntFreeDefBackingPage(psDeviceNode, + &psDeviceNode->sDummyPage, + DUMMY_PAGE); + DevmemIntFreeDefBackingPage(psDeviceNode, + &psDeviceNode->sDevZeroPage, + DEV_ZERO_PAGE); +#endif + +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) + if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + OSAtomicWrite(&psDeviceNode->sDummyPage.atRefCounter, 0); + PVR_UNREFERENCED_PARAMETER(ui32Temp); + } + else +#else + { + /*Delete the Dummy page related info */ + ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDummyPage.atRefCounter); + if (0 != ui32Temp) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Dummy page reference counter is non zero (%u)", + __func__, + ui32Temp)); + PVR_ASSERT(0); + } + } +#endif + + /*Delete the Dummy page related info */ + ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDevZeroPage.atRefCounter); + if (0 != ui32Temp) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Zero page reference counter is non zero (%u)", + __func__, + ui32Temp)); + } + +#if defined(PDUMP) + if (NULL != psDeviceNode->sDummyPage.hPdumpPg) + { + PDUMPCOMMENT("Error dummy page handle is still active"); + } + + if (NULL != psDeviceNode->sDevZeroPage.hPdumpPg) + { + PDUMPCOMMENT("Error Zero page handle is still active"); + } +#endif + + /*The lock type need to be dispatch type here because it can be acquired from MISR (Z-buffer) path */ + OSLockDestroy(psDeviceNode->sDummyPage.psPgLock); + + /* Destroy the zero page lock */ + OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock); + + /* Unregister debug request notifiers first as they could depend on anything. */ + + RGXDebugDeinit(psDevInfo); + + + /* Cancel notifications to this device */ + PVRSRVUnregisterCmdCompleteNotify(psDeviceNode->hCmdCompNotify); + psDeviceNode->hCmdCompNotify = NULL; + + /* + * De-initialise in reverse order, so stage 2 init is undone first. + */ + if (psDevInfo->bDevInit2Done) + { + psDevInfo->bDevInit2Done = IMG_FALSE; + + eError = PVRSRVTQUnloadShaders(psDeviceNode); + if (eError != PVRSRV_OK) + { + return eError; + } + +#if !defined(NO_HARDWARE) + (void) SysUninstallDeviceLISR(psDevInfo->pvLISRData); + (void) OSUninstallMISR(psDevInfo->pvMISRData); + (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR); + if (psDevInfo->pvAPMISRData != NULL) + { + (void) OSUninstallMISR(psDevInfo->pvAPMISRData); + } +#endif /* !NO_HARDWARE */ + + /* Remove the device from the power manager */ + eError = PVRSRVRemovePowerDevice(psDeviceNode); + if (eError != PVRSRV_OK) + { + return eError; + } + + psDevInfo->pfnGetGpuUtilStats = NULL; + OSLockDestroy(psDevInfo->hGPUUtilLock); + + /* Free DVFS Table */ + if (psDevInfo->psGpuDVFSTable != NULL) + { + OSFreeMem(psDevInfo->psGpuDVFSTable); + psDevInfo->psGpuDVFSTable = NULL; + } + + /* De-init Freelists/ZBuffers... */ + OSLockDestroy(psDevInfo->hLockFreeList); + OSLockDestroy(psDevInfo->hLockZSBuffer); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* De-init work estimation lock */ + OSLockDestroy(psDevInfo->hWorkEstLock); +#endif + + /* Unregister MMU related stuff */ + eError = RGXMMUInit_Unregister(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevDeInitRGX: Failed RGXMMUInit_Unregister (0x%x)", + eError)); + return eError; + } + } + + /* UnMap Regs */ + if (psDevInfo->pvRegsBaseKM != NULL) + { +#if !defined(NO_HARDWARE) + OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM, + psDevInfo->ui32RegSize); +#endif /* !NO_HARDWARE */ + psDevInfo->pvRegsBaseKM = NULL; + } + +#if 0 /* not required at this time */ + if (psDevInfo->hTimer) + { + eError = OSRemoveTimer(psDevInfo->hTimer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevDeInitRGX: Failed to remove timer")); + return eError; + } + psDevInfo->hTimer = NULL; + } +#endif + + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + + RGXDeInitHeaps(psDevMemoryInfo); + + if (psDevInfo->psRGXFWCodeMemDesc) + { + /* Free fw code */ + PDUMPCOMMENT("Freeing FW code memory"); + DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc); + psDevInfo->psRGXFWCodeMemDesc = NULL; + } + if (psDevInfo->psRGXFWDataMemDesc) + { + /* Free fw data */ + PDUMPCOMMENT("Freeing FW data memory"); + DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWDataMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc); + psDevInfo->psRGXFWDataMemDesc = NULL; + } + if (psDevInfo->psRGXFWCorememCodeMemDesc) + { + /* Free fw core mem code */ + PDUMPCOMMENT("Freeing FW coremem code memory"); + DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCorememCodeMemDesc); + psDevInfo->psRGXFWCorememCodeMemDesc = NULL; + } + + if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc) + { + /* Free fw core mem data */ + PDUMPCOMMENT("Freeing FW coremem data store memory"); + DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc); + psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL; + } + + /* + Free the firmware allocations. + */ + RGXFreeFirmware(psDevInfo); + RGXDeInitDestroyFWKernelMemoryContext(psDeviceNode); + +#if defined(SUPPORT_VALIDATION) + RGXPowerDomainDeInitState(&psDevInfo->sPowerDomainState); +#endif + + RGXDeInitMultiCoreInfo(psDeviceNode); + + /* De-initialise non-device specific (TL) users of RGX device memory */ + RGXHWPerfHostDeInit(psDevInfo); + eError = HTBDeInit(); + PVR_LOG_IF_ERROR(eError, "HTBDeInit"); + + /* destroy the stalled CCB locks */ + OSLockDestroy(psDevInfo->hCCBRecoveryLock); + OSLockDestroy(psDevInfo->hCCBStallCheckLock); + + /* destroy the context list locks */ + OSLockDestroy(psDevInfo->sRegCongfig.hLock); + OSLockDestroy(psDevInfo->hBPLock); + OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock); + OSWRLockDestroy(psDevInfo->hRenderCtxListLock); + OSWRLockDestroy(psDevInfo->hComputeCtxListLock); + OSWRLockDestroy(psDevInfo->hTransferCtxListLock); + OSWRLockDestroy(psDevInfo->hTDMCtxListLock); + OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock); + OSWRLockDestroy(psDevInfo->hMemoryCtxListLock); + OSSpinLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList); + OSWRLockDestroy(psDevInfo->hCommonCtxtListLock); + + if (psDevInfo->hDebugFaultInfoLock != NULL) + { + OSLockDestroy(psDevInfo->hDebugFaultInfoLock); + } + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + if (psDevInfo->hMMUCtxUnregLock != NULL) + { + OSLockDestroy(psDevInfo->hMMUCtxUnregLock); + } + } + + /* Free device BVNC string */ + if (NULL != psDevInfo->sDevFeatureCfg.pszBVNCString) + { + OSFreeMem(psDevInfo->sDevFeatureCfg.pszBVNCString); + } + +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) + if (NULL != psDevInfo->sRGXTimerValues.pui64uscTimers) + { + OSFreeMem(psDevInfo->sRGXTimerValues.pui64uscTimers); + psDevInfo->sRGXTimerValues.pui64uscTimers = NULL; + } +#endif + + /* DeAllocate devinfo */ + OSFreeMem(psDevInfo); + + psDeviceNode->pvDevice = NULL; + + return PVRSRV_OK; +} + +#if defined(PDUMP) +static +PVRSRV_ERROR RGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice); + + psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE; + + return PVRSRV_OK; +} +#endif /* PDUMP */ + +/* Takes a log2 page size parameter and calculates a suitable page size + * for the RGX heaps. Returns 0 if parameter is wrong.*/ +static INLINE IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize) +{ + IMG_BOOL bFound = IMG_FALSE; + + /* OS page shift must be at least RGX_HEAP_4KB_PAGE_SHIFT, + * max RGX_HEAP_2MB_PAGE_SHIFT, non-zero and a power of two*/ + if (uiLog2PageSize == 0U || + (uiLog2PageSize < RGX_HEAP_4KB_PAGE_SHIFT) || + (uiLog2PageSize > RGX_HEAP_2MB_PAGE_SHIFT)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Provided incompatible log2 page size %u", + __func__, + uiLog2PageSize)); + PVR_ASSERT(0); + return 0; + } + + do + { + switch (uiLog2PageSize) + { + case RGX_HEAP_4KB_PAGE_SHIFT: + case RGX_HEAP_16KB_PAGE_SHIFT: + case RGX_HEAP_64KB_PAGE_SHIFT: + case RGX_HEAP_256KB_PAGE_SHIFT: + case RGX_HEAP_1MB_PAGE_SHIFT: + case RGX_HEAP_2MB_PAGE_SHIFT: + /* All good, RGX page size equals given page size + * => use it as default for heaps */ + bFound = IMG_TRUE; + break; + default: + /* We have to fall back to a smaller device + * page size than given page size because there + * is no exact match for any supported size. */ + uiLog2PageSize -= 1U; + break; + } + } while (!bFound); + + return uiLog2PageSize; +} + +/* First 16-bits define possible types */ +#define HEAP_INST_VALUE_MASK (0xFFFF) +#define HEAP_INST_DEFAULT_VALUE (1U) /* Used to show either the heap is always instantiated by default (pfn = NULL) + OR + that this is the default configuration of the heap with an Alternative BRN */ +#define HEAP_INST_BRN_DEP_VALUE (2U) /* The inclusion of this heap is dependent on the brn being present */ +#define HEAP_INST_FEAT_DEP_VALUE (3U) /* The inclusion of this heap is dependent on the feature being present */ +#define HEAP_INST_BRN_ALT_VALUE (4U) /* This entry is a possible alternative to the default determined by a BRN */ +#define HEAP_INST_FEAT_ALT_VALUE (5U) /* The entry is a possible alternative to the default determined by a Feature define */ + +/* Latter 16-bits define other flags we may need */ +#define HEAP_INST_NON4K_FLAG (1 << 16U) /* This is a possible NON4K Entry and we should use the device + NON4K size when instantiating */ + +typedef struct RGX_HEAP_INFO_TAG RGX_HEAP_INFO; // Forward declaration +typedef IMG_BOOL (*PFN_IS_PRESENT)(PVRSRV_RGXDEV_INFO*, const RGX_HEAP_INFO*); + +typedef struct RGX_HEAP_INFO_TAG +{ + IMG_CHAR *pszName; + IMG_UINT64 ui64HeapBase; + IMG_DEVMEM_SIZE_T uiHeapLength; + IMG_DEVMEM_SIZE_T uiHeapReservedRegionLength; + IMG_UINT32 ui32Log2ImportAlignment; + PFN_IS_PRESENT pfnIsHeapPresent; + IMG_UINT32 ui32HeapInstanceFlags; +} RGX_HEAP_INFO; + +/* Feature Present function prototypes */ + +/* FW Feature Present function prototypes */ + +static IMG_BOOL FWVZConfigPresent(PVRSRV_RGXDEV_INFO* psDevInfo, const RGX_HEAP_INFO* pksHeapInfo) +{ + /* Used to determine the correct table row to instantiate as a heap by checking + * the Heap base at run time VS the current table instance + */ + IMG_UINT64 ui64VZTypeBase; + + /* Check VZ Type */ + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + ui64VZTypeBase = RGX_FIRMWARE_GUEST_CONFIG_HEAP_BASE; + } + else + { + ui64VZTypeBase = RGX_FIRMWARE_HOST_CONFIG_HEAP_BASE; + } + + /* Determine if we should include this entry based upon previous checks */ + return (pksHeapInfo->ui64HeapBase == ui64VZTypeBase) ? IMG_TRUE : IMG_FALSE; +} + +static IMG_BOOL FWVZMainHeapPresent(PVRSRV_RGXDEV_INFO* psDevInfo, const RGX_HEAP_INFO* pksHeapInfo) +{ + /* Used to determine the correct table row to instantiate as a heap by checking + * the Heap base at run time VS the current table instance + */ + IMG_UINT64 ui64VZTypeBase; + + /* Check VZ Type */ + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + ui64VZTypeBase = RGX_FIRMWARE_GUEST_MAIN_HEAP_BASE; + } + else + { + ui64VZTypeBase = RGX_FIRMWARE_HOST_MAIN_HEAP_BASE; + } + + /* Determine if we should include this entry based upon previous checks */ + return (pksHeapInfo->ui64HeapBase == ui64VZTypeBase) ? IMG_TRUE : IMG_FALSE; +} + +static const RGX_HEAP_INFO gasRGXHeapLayoutApp[] = +{ + /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnIsHeapPresent HeapInstanceFlags */ + {RGX_GENERAL_SVM_HEAP_IDENT, RGX_GENERAL_SVM_HEAP_BASE, RGX_GENERAL_SVM_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE}, + {RGX_GENERAL_HEAP_IDENT, RGX_GENERAL_HEAP_BASE, RGX_GENERAL_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, NULL, HEAP_INST_DEFAULT_VALUE}, + {RGX_GENERAL_NON4K_HEAP_IDENT, RGX_GENERAL_NON4K_HEAP_BASE, RGX_GENERAL_NON4K_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE | HEAP_INST_NON4K_FLAG}, + {RGX_PDSCODEDATA_HEAP_IDENT, RGX_PDSCODEDATA_HEAP_BASE, RGX_PDSCODEDATA_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, NULL, HEAP_INST_DEFAULT_VALUE}, + {RGX_USCCODE_HEAP_IDENT, RGX_USCCODE_HEAP_BASE, RGX_USCCODE_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, NULL, HEAP_INST_DEFAULT_VALUE}, + {RGX_VK_CAPT_REPLAY_HEAP_IDENT, RGX_VK_CAPT_REPLAY_HEAP_BASE, RGX_VK_CAPT_REPLAY_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE}, + {RGX_SIGNALS_HEAP_IDENT, RGX_SIGNALS_HEAP_BASE, RGX_SIGNALS_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE}, + {RGX_COMPONENT_CTRL_HEAP_IDENT, RGX_COMPONENT_CTRL_HEAP_BASE, RGX_COMPONENT_CTRL_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE}, + {RGX_FBCDC_HEAP_IDENT, RGX_FBCDC_HEAP_BASE, RGX_FBCDC_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE}, + {RGX_FBCDC_LARGE_HEAP_IDENT, RGX_FBCDC_LARGE_HEAP_BASE, RGX_FBCDC_LARGE_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE}, + {RGX_PDS_INDIRECT_STATE_HEAP_IDENT, RGX_PDS_INDIRECT_STATE_HEAP_BASE, RGX_PDS_INDIRECT_STATE_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE}, + {RGX_CMP_MISSION_RMW_HEAP_IDENT, RGX_CMP_MISSION_RMW_HEAP_BASE, RGX_CMP_MISSION_RMW_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE}, + {RGX_CMP_SAFETY_RMW_HEAP_IDENT, RGX_CMP_SAFETY_RMW_HEAP_BASE, RGX_CMP_SAFETY_RMW_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE}, + {RGX_TEXTURE_STATE_HEAP_IDENT, RGX_TEXTURE_STATE_HEAP_BASE, RGX_TEXTURE_STATE_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE}, + {RGX_VISIBILITY_TEST_HEAP_IDENT, RGX_VISIBILITY_TEST_HEAP_BASE, RGX_VISIBILITY_TEST_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE} +}; + +static const RGX_HEAP_INFO gasRGXHeapLayoutFW[] = +{ + /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnPresent HeapInstanceFlags*/ + {RGX_FIRMWARE_CONFIG_HEAP_IDENT, RGX_FIRMWARE_GUEST_CONFIG_HEAP_BASE, RGX_FIRMWARE_CONFIG_HEAP_SIZE, 0, 0, FWVZConfigPresent, HEAP_INST_DEFAULT_VALUE}, + {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_GUEST_MAIN_HEAP_BASE, RGX_FIRMWARE_META_MAIN_HEAP_SIZE, 0, 0, FWVZMainHeapPresent, HEAP_INST_DEFAULT_VALUE}, + {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_HOST_MAIN_HEAP_BASE, RGX_FIRMWARE_META_MAIN_HEAP_SIZE, 0, 0, FWVZMainHeapPresent, HEAP_INST_DEFAULT_VALUE}, + {RGX_FIRMWARE_CONFIG_HEAP_IDENT, RGX_FIRMWARE_HOST_CONFIG_HEAP_BASE, RGX_FIRMWARE_CONFIG_HEAP_SIZE, 0, 0, FWVZConfigPresent, HEAP_INST_DEFAULT_VALUE} +}; + +/* Generic counting method. */ +static void _CountRequiredHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, + const RGX_HEAP_INFO pksHeapInfo[], + IMG_UINT32 ui32HeapListSize, + IMG_UINT32* ui32HeapCount) +{ + IMG_UINT32 i; + + /* Loop over rows in the heap data array using callback to decide if we + * should include the heap + */ + for (i = 0; i < ui32HeapListSize; i++) + { + const RGX_HEAP_INFO *psHeapInfo = &pksHeapInfo[i]; + + if (psHeapInfo->pfnIsHeapPresent) + { + if (!psHeapInfo->pfnIsHeapPresent(psDevInfo, psHeapInfo)) + { + /* We don't need to create this heap */ + continue; + } + } + + (*ui32HeapCount)++; + } +} +/* Generic heap instantiator */ +static void _InstantiateRequiredHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, + const RGX_HEAP_INFO pksHeapInfo[], + IMG_UINT32 ui32HeapListSize, + DEVMEM_HEAP_BLUEPRINT **psDeviceMemoryHeapCursor) +{ + IMG_UINT32 i; + /* We now have a list of the heaps to include and so we should loop over this + * list and instantiate. + */ + for (i = 0; i < ui32HeapListSize; i++) + { + IMG_UINT32 ui32Log2RgxDefaultPageShift = RGXHeapDerivePageSize(OSGetPageShift()); + IMG_UINT32 ui32Log2DataPageSize = 0; + + const RGX_HEAP_INFO *psHeapInfo = &pksHeapInfo[i]; + + if (psHeapInfo->pfnIsHeapPresent) + { + if (!psHeapInfo->pfnIsHeapPresent(psDevInfo, psHeapInfo)) + { + /* We don't need to create this heap */ + continue; + } + } + + if (psHeapInfo->ui32HeapInstanceFlags & HEAP_INST_NON4K_FLAG) + { + ui32Log2DataPageSize = psDevInfo->ui32Log2Non4KPgSize; + } + else + { + ui32Log2DataPageSize = ui32Log2RgxDefaultPageShift; + } + + HeapCfgBlueprintInit(psHeapInfo->pszName, + psHeapInfo->ui64HeapBase, + psHeapInfo->uiHeapLength, + psHeapInfo->uiHeapReservedRegionLength, + ui32Log2DataPageSize, + psHeapInfo->ui32Log2ImportAlignment, + *psDeviceMemoryHeapCursor); + + (*psDeviceMemoryHeapCursor)++; + } +} + +static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, + DEVICE_MEMORY_INFO *psNewMemoryInfo) +{ + PVRSRV_ERROR eError; + DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor; + + IMG_UINT32 ui32HeapListSize = ARRAY_SIZE(gasRGXHeapLayoutApp); + IMG_UINT32 ui32FWHeapListSize = ARRAY_SIZE(gasRGXHeapLayoutFW); + IMG_UINT32 ui32CountedHeapSize; + + IMG_UINT32 ui32HeapCount = 0; + IMG_UINT32 ui32FWHeapCount = 0; + + /* Count heaps required for the app heaps */ + _CountRequiredHeaps(psDevInfo, + gasRGXHeapLayoutApp, + ui32HeapListSize, + &ui32HeapCount); + + /* Count heaps required for the FW heaps */ + _CountRequiredHeaps(psDevInfo, + gasRGXHeapLayoutFW, + ui32FWHeapListSize, + &ui32FWHeapCount); + + ui32CountedHeapSize = (ui32HeapCount + ui32FWHeapCount + RGX_NUM_OS_SUPPORTED); + + psNewMemoryInfo->psDeviceMemoryHeap = OSAllocMem(sizeof(DEVMEM_HEAP_BLUEPRINT) * ui32CountedHeapSize); + PVR_LOG_GOTO_IF_NOMEM(psNewMemoryInfo->psDeviceMemoryHeap, eError, e0); + + /* Initialise the heaps */ + psDeviceMemoryHeapCursor = psNewMemoryInfo->psDeviceMemoryHeap; + + /* Instantiate App Heaps */ + _InstantiateRequiredHeaps(psDevInfo, + gasRGXHeapLayoutApp, + ui32HeapListSize, + &psDeviceMemoryHeapCursor); + + /* Instantiate FW Heaps */ + _InstantiateRequiredHeaps(psDevInfo, + gasRGXHeapLayoutFW, + ui32FWHeapListSize, + &psDeviceMemoryHeapCursor); + + /* set the heap count */ + psNewMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeapCursor - psNewMemoryInfo->psDeviceMemoryHeap); + + /* Check we have allocated the correct # of heaps, minus any VZ heaps as these + * have not been created at this point + */ + PVR_ASSERT(psNewMemoryInfo->ui32HeapCount == (ui32CountedHeapSize - RGX_NUM_OS_SUPPORTED)); + + /* + In the new heap setup, we initialise 2 configurations: + 1 - One will be for the firmware only (index 1 in array) + a. This primarily has the firmware heap in it. + b. It also has additional guest OSID firmware heap(s) + - Only if the number of support firmware OSID > 1 + 2 - Others shall be for clients only (index 0 in array) + a. This has all the other client heaps in it. + */ + psNewMemoryInfo->uiNumHeapConfigs = 2; + psNewMemoryInfo->psDeviceMemoryHeapConfigArray = OSAllocMem(sizeof(DEVMEM_HEAP_CONFIG) * psNewMemoryInfo->uiNumHeapConfigs); + PVR_LOG_GOTO_IF_NOMEM(psNewMemoryInfo->psDeviceMemoryHeapConfigArray, eError, e1); + + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].pszName = "Default Heap Configuration"; + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].uiNumHeaps = psNewMemoryInfo->ui32HeapCount - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].psHeapBlueprintArray = psNewMemoryInfo->psDeviceMemoryHeap; + + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].pszName = "Firmware Heap Configuration"; + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; + + if (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) >= 4) + { + IMG_UINT32 i; + const IMG_UINT32 ui32GeneralNon4KHeapPageSize = (1 << psDevInfo->ui32Log2Non4KPgSize); + const IMG_UINT32 ui32RgxDefaultPageSize = (1 << RGXHeapDerivePageSize(OSGetPageShift())); + + /* + * Initialise all MMU Page Size Range Config register to the default page size + * used by the OS, leaving the address range 0; + */ + for (i = 0; i < ARRAY_SIZE(psDevInfo->aui64MMUPageSizeRangeValue); ++i) + { + psDevInfo->aui64MMUPageSizeRangeValue[i] = + RGXMMUInit_GetConfigRangeValue(ui32RgxDefaultPageSize, + 0, + (1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT)); + } + + + /* set the last MMU config range covering the entire virtual memory to the OS's page size */ + psDevInfo->aui64MMUPageSizeRangeValue[RGX_MAX_NUM_MMU_PAGE_SIZE_RANGES - 1] = + RGXMMUInit_GetConfigRangeValue(ui32RgxDefaultPageSize, 0, (1ULL << 40)); + + /* + * If the Non4K heap has a different page size than the OS's page size + * (used as default for all other heaps), configure one MMU config range + * for the Non4K heap + */ + if (ui32GeneralNon4KHeapPageSize != ui32RgxDefaultPageSize) + { + psDevInfo->aui64MMUPageSizeRangeValue[0] = + RGXMMUInit_GetConfigRangeValue(ui32GeneralNon4KHeapPageSize, + RGX_GENERAL_NON4K_HEAP_BASE, + RGX_GENERAL_NON4K_HEAP_SIZE); + } + } + +#if (RGX_NUM_OS_SUPPORTED > 1) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + IMG_UINT32 ui32OSid; + + /* Create additional raw firmware heaps */ + for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + { + if (RGXInitFwRawHeap(psDeviceMemoryHeapCursor, ui32OSid) != PVRSRV_OK) + { + /* if any allocation fails, free previously allocated heaps and abandon initialisation */ + for (; ui32OSid > RGX_FIRST_RAW_HEAP_OSID; ui32OSid--) + { + RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor); + psDeviceMemoryHeapCursor--; + } + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e1; + } + + /* Append additional firmware heaps to host driver firmware context heap configuration */ + psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps += 1; + + /* advance to the next heap */ + psDeviceMemoryHeapCursor++; + } + } +#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ + + return PVRSRV_OK; +e1: + OSFreeMem(psNewMemoryInfo->psDeviceMemoryHeap); +e0: + return eError; +} + +static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo) +{ +#if (RGX_NUM_OS_SUPPORTED > 1) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + IMG_UINT32 ui32OSid; + DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor = psDevMemoryInfo->psDeviceMemoryHeap; + + /* Delete all guest firmware heaps */ + for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + { + RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor); + psDeviceMemoryHeapCursor++; + } + } +#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ + + OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeapConfigArray); + OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeap); +} + +static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT64 uPhysheapSize; + IMG_CPU_PHYADDR sCpuPAddr; + IMG_DEV_PHYADDR sDevPAddr; + PHYS_HEAP_CONFIG *psFwMainConfig = FindPhysHeapConfig(psDeviceNode->psDevConfig, + PHYS_HEAP_USAGE_FW_MAIN); + +#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) + /* VZ heap validation */ + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + PVR_LOG_RETURN_IF_FALSE(psFwMainConfig != NULL, + "FW Main heap is required for VZ Guest.", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + } +#endif + + if (psFwMainConfig != NULL) + { + /* Check FW_MAIN for multiple usage flags. Because FW_MAIN is divided + into subheaps, shared usage with other heaps is not allowed. */ + PVR_LOG_RETURN_IF_FALSE(psFwMainConfig->ui32UsageFlags == PHYS_HEAP_USAGE_FW_MAIN, + "FW Main phys heap config specified with more than one usage. FW Main must be FW Main only.", + PVRSRV_ERROR_PHYSHEAP_CONFIG); + } + + if (psFwMainConfig == NULL) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap not set", __func__)); + } + else if (psFwMainConfig->eType == PHYS_HEAP_TYPE_UMA) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses OS System memory (UMA)", __func__)); + } + else /* PHYS_HEAP_TYPE_LMA or PHYS_HEAP_TYPE_DMA */ + { + IMG_UINT64 uRawHeapBase; + RA_BASE_T uFwCfgSubHeapBase, uFwMainSubHeapBase; + const IMG_UINT64 ui64ExpectedHeapSize = RGX_FIRMWARE_RAW_HEAP_SIZE; + const RA_LENGTH_T uFwCfgSubHeapSize = RGX_FIRMWARE_CONFIG_HEAP_SIZE; + RA_LENGTH_T uFwMainSubHeapSize; + PHYS_HEAP_CONFIG sFwHeapConfig; + + uFwMainSubHeapSize = RGX_FIRMWARE_META_MAIN_HEAP_SIZE; + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses local memory managed by the driver (LMA)", __func__)); + + sCpuPAddr = psFwMainConfig->sStartAddr; + sDevPAddr = psFwMainConfig->sCardBase; + uPhysheapSize = psFwMainConfig->uiSize; + + PVR_LOG_GOTO_IF_FALSE(uPhysheapSize >= ui64ExpectedHeapSize, + "Invalid firmware physical heap size.", ErrorDeinit); + + /* Now we construct RAs to manage the FW heaps */ + uRawHeapBase = sDevPAddr.uiAddr; + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + /* Guest subheap layout: Config + Main */ + uFwCfgSubHeapBase = uRawHeapBase; + uFwMainSubHeapBase = uFwCfgSubHeapBase + uFwCfgSubHeapSize; + } + else + { + /* Native/Host subheap layout: Main + (optional MIPS reserved range) + Config */ + uFwMainSubHeapBase = uRawHeapBase; + uFwCfgSubHeapBase = uRawHeapBase + RGX_FIRMWARE_RAW_HEAP_SIZE - uFwCfgSubHeapSize; + } + +#if defined(SUPPORT_AUTOVZ) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + /* 1 Mb can hold the maximum amount of page tables for the memory shared between the firmware and all KM drivers: + * MAX(RAW_HEAP_SIZE) = 32 Mb; MAX(NUMBER_OS) = 8; Total shared memory = 256 Mb; + * MMU objects required: 65536 PTEs; 16 PDEs; 1 PCE; */ + RA_LENGTH_T uMaxFwMmuPageTableSize = 1 * 1024 * 1024; + + /* By default the firmware MMU's page tables are allocated from the same carveout memory as the firmware heap. + * If a different base address is specified for this reserved range, use the overriding define instead. */ +#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR) + RA_BASE_T uFwMmuReservedMemStart = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR; +#else + RA_BASE_T uFwMmuReservedMemStart = uRawHeapBase + (RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED); +#endif + + psDeviceNode->psFwMMUReservedMemArena = RA_Create_With_Span("Fw MMU Mem 0", + OSGetPageShift(), + 0, + uFwMmuReservedMemStart, + uMaxFwMmuPageTableSize); + PVR_LOG_GOTO_IF_NOMEM(psDeviceNode->psFwMMUReservedMemArena, eError, ErrorDeinit); + } +#endif + + sFwHeapConfig = *psFwMainConfig; + sFwHeapConfig.sStartAddr.uiAddr = sCpuPAddr.uiAddr + (uFwMainSubHeapBase - uRawHeapBase); + sFwHeapConfig.sCardBase.uiAddr = uFwMainSubHeapBase; + sFwHeapConfig.uiSize = uFwMainSubHeapSize; + sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_MAIN; + + eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, "Fw Main subheap", &psDeviceNode->psFWMainPhysHeap); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:MAIN", ErrorDeinit); + + sFwHeapConfig = *psFwMainConfig; + sFwHeapConfig.sStartAddr.uiAddr = sCpuPAddr.uiAddr + (uFwCfgSubHeapBase - uRawHeapBase); + sFwHeapConfig.sCardBase.uiAddr = uFwCfgSubHeapBase; + sFwHeapConfig.uiSize = uFwCfgSubHeapSize; + sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_CONFIG; + + eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, "Fw Cfg subheap", &psDeviceNode->psFWCfgPhysHeap); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:CFG", ErrorDeinit); + } + + /* Acquire FW heaps */ + eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_MAIN, psDeviceNode, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_MAIN", ErrorDeinit); + + eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_CONFIG, psDeviceNode, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CONFIG", ErrorDeinit); + + eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_CODE, psDeviceNode, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CODE]); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CODE", ErrorDeinit); + + eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_PRIV_DATA, psDeviceNode, + &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PRIV_DATA]); + PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_DATA", ErrorDeinit); + + return eError; + +ErrorDeinit: + PVR_ASSERT(IMG_FALSE); + PVRSRVPhysMemHeapsDeinit(psDeviceNode); + + return eError; +} + +static void _ReadNon4KHeapPageSize(IMG_UINT32 *pui32Log2Non4KPgSize) +{ + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE; + IMG_UINT32 ui32GeneralNon4KHeapPageSize; + + /* Get the page size for the dummy page from the NON4K heap apphint */ + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintUINT32(pvAppHintState, GeneralNon4KHeapPageSize, + &ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize); + *pui32Log2Non4KPgSize = ExactLog2(ui32GeneralNon4KHeapPageSize); + OSFreeKMAppHintState(pvAppHintState); +} + +/* + RGXRegisterDevice +*/ +PVRSRV_ERROR RGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PDUMPCOMMENT("Device Name: %s", psDeviceNode->psDevConfig->pszName); + + if (psDeviceNode->psDevConfig->pszVersion) + { + PDUMPCOMMENT("Device Version: %s", psDeviceNode->psDevConfig->pszVersion); + } + + PDUMPCOMMENT("RGX Initialisation (Part 1)"); + + /********************* + * Device node setup * + *********************/ + /* Setup static data and callbacks on the device agnostic device node */ +#if defined(PDUMP) + psDeviceNode->sDevId.pszPDumpRegName = RGX_PDUMPREG_NAME; + psDeviceNode->sDevId.pszPDumpDevName = PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL]); + psDeviceNode->pfnPDumpInitDevice = &RGXResetPDump; + psDeviceNode->ui64FBCClearColour = RGX_FBC_CC_DEFAULT; + +#endif /* PDUMP */ + + OSAtomicWrite(&psDeviceNode->eHealthStatus, PVRSRV_DEVICE_HEALTH_STATUS_OK); + OSAtomicWrite(&psDeviceNode->eHealthReason, PVRSRV_DEVICE_HEALTH_REASON_NONE); + + psDeviceNode->pfnDevSLCFlushRange = RGXSLCFlushRange; + psDeviceNode->pfnInvalFBSCTable = RGXInvalidateFBSCTable; + + psDeviceNode->pfnValidateOrTweakPhysAddrs = NULL; + + psDeviceNode->pfnMMUCacheInvalidate = RGXMMUCacheInvalidate; + + psDeviceNode->pfnMMUCacheInvalidateKick = RGXMMUCacheInvalidateKick; + + /* Register RGX to receive notifies when other devices complete some work */ + PVRSRVRegisterCmdCompleteNotify(&psDeviceNode->hCmdCompNotify, &RGXScheduleProcessQueuesKM, psDeviceNode); + + psDeviceNode->pfnInitDeviceCompatCheck = &RGXDevInitCompatCheck; + + /* Register callbacks for creation of device memory contexts */ + psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext; + psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext; + + /* Register callbacks for Unified Fence Objects */ + psDeviceNode->pfnAllocUFOBlock = RGXAllocUFOBlock; + psDeviceNode->pfnFreeUFOBlock = RGXFreeUFOBlock; + + /* Register callback for checking the device's health */ + psDeviceNode->pfnUpdateHealthStatus = PVRSRV_VZ_MODE_IS(GUEST) ? NULL : RGXUpdateHealthStatus; + +#if defined(SUPPORT_AUTOVZ) + /* Register callback for updating the virtualization watchdog */ + psDeviceNode->pfnUpdateAutoVzWatchdog = RGXUpdateAutoVzWatchdog; +#endif + + /* Register method to service the FW HWPerf buffer */ + psDeviceNode->pfnServiceHWPerf = RGXHWPerfDataStoreCB; + + /* Register callback for getting the device version information string */ + psDeviceNode->pfnDeviceVersionString = RGXDevVersionString; + + /* Register callback for getting the device clock speed */ + psDeviceNode->pfnDeviceClockSpeed = RGXDevClockSpeed; + + /* Register callback for soft resetting some device modules */ + psDeviceNode->pfnSoftReset = RGXSoftReset; + + /* Register callback for resetting the HWR logs */ + psDeviceNode->pfnResetHWRLogs = RGXResetHWRLogs; + + /* Register callback for resetting the HWR logs */ + psDeviceNode->pfnVerifyBVNC = RGXVerifyBVNC; + + /* Register callback for checking alignment of UM structures */ + psDeviceNode->pfnAlignmentCheck = RGXAlignmentCheck; + + /*Register callback for checking the supported features and getting the + * corresponding values */ + psDeviceNode->pfnCheckDeviceFeature = RGXBvncCheckFeatureSupported; + psDeviceNode->pfnGetDeviceFeatureValue = RGXBvncGetSupportedFeatureValue; + + /* Callback for checking if system layer supports FBC 3.1 */ + psDeviceNode->pfnHasFBCDCVersion31 = NULL; + + /* Callback for getting the MMU device attributes */ + psDeviceNode->pfnGetMMUDeviceAttributes = RGXDevMMUAttributes; + + /* Register callback for initialising device-specific physical memory heaps */ + psDeviceNode->pfnPhysMemDeviceHeapsInit = RGXPhysMemDeviceHeapsInit; + + /* Set up required support for dummy page */ + OSAtomicWrite(&(psDeviceNode->sDummyPage.atRefCounter), 0); + OSAtomicWrite(&(psDeviceNode->sDevZeroPage.atRefCounter), 0); + + /* Set the order to 0 */ + psDeviceNode->sDummyPage.sPageHandle.uiOrder = 0; + psDeviceNode->sDevZeroPage.sPageHandle.uiOrder = 0; + + /* Set the size of the Dummy page to zero */ + psDeviceNode->sDummyPage.ui32Log2PgSize = 0; + + /* Set the size of the Zero page to zero */ + psDeviceNode->sDevZeroPage.ui32Log2PgSize = 0; + + /* Set the Dummy page phys addr */ + psDeviceNode->sDummyPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; + + /* Set the Zero page phys addr */ + psDeviceNode->sDevZeroPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; + + /* The lock can be acquired from MISR (Z-buffer) path */ + eError = OSLockCreate(&psDeviceNode->sDummyPage.psPgLock); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create dummy page lock", __func__)); + return eError; + } + + /* Create the lock for zero page */ + eError = OSLockCreate(&psDeviceNode->sDevZeroPage.psPgLock); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create Zero page lock", __func__)); + goto free_dummy_page; + } +#if defined(PDUMP) + psDeviceNode->sDummyPage.hPdumpPg = NULL; + psDeviceNode->sDevZeroPage.hPdumpPg = NULL; +#endif + + psDeviceNode->pfnHasFBCDCVersion31 = RGXSystemHasFBCDCVersion31; + + /* The device shared-virtual-memory heap address-space size is stored here for faster + look-up without having to walk the device heap configuration structures during + client device connection (i.e. this size is relative to a zero-based offset) */ + psDeviceNode->ui64GeneralSVMHeapTopVA = RGX_GENERAL_SVM_HEAP_BASE + RGX_GENERAL_SVM_HEAP_SIZE; + + /********************* + * Device info setup * + *********************/ + /* Allocate device control block */ + psDevInfo = OSAllocZMem(sizeof(*psDevInfo)); + if (psDevInfo == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "DevInitRGXPart1 : Failed to alloc memory for DevInfo")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* create locks for the context lists stored in the DevInfo structure. + * these lists are modified on context create/destroy and read by the + * watchdog thread + */ + + eError = OSWRLockCreate(&(psDevInfo->hRenderCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create render context list lock", __func__)); + goto e0; + } + + eError = OSWRLockCreate(&(psDevInfo->hComputeCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create compute context list lock", __func__)); + goto e1; + } + + eError = OSWRLockCreate(&(psDevInfo->hTransferCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create transfer context list lock", __func__)); + goto e2; + } + + eError = OSWRLockCreate(&(psDevInfo->hTDMCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create TDM context list lock", __func__)); + goto e3; + } + + eError = OSWRLockCreate(&(psDevInfo->hKickSyncCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create kick sync context list lock", __func__)); + goto e4; + } + + eError = OSWRLockCreate(&(psDevInfo->hMemoryCtxListLock)); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create memory context list lock", __func__)); + goto e5; + } + + eError = OSSpinLockCreate(&psDevInfo->hLockKCCBDeferredCommandsList); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to KCCB deferred commands list lock", __func__)); + goto e6; + } + dllist_init(&(psDevInfo->sKCCBDeferredCommandsListHead)); + + dllist_init(&(psDevInfo->sRenderCtxtListHead)); + dllist_init(&(psDevInfo->sComputeCtxtListHead)); + dllist_init(&(psDevInfo->sTDMCtxtListHead)); + dllist_init(&(psDevInfo->sKickSyncCtxtListHead)); + + dllist_init(&(psDevInfo->sCommonCtxtListHead)); + psDevInfo->ui32CommonCtxtCurrentID = 1; + + + eError = OSWRLockCreate(&psDevInfo->hCommonCtxtListLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create common context list lock", __func__)); + goto e7; + } + + eError = OSLockCreate(&psDevInfo->sRegCongfig.hLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create register configuration lock", __func__)); + goto e8; + } + + eError = OSLockCreate(&psDevInfo->hBPLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for break points", __func__)); + goto e9; + } + + eError = OSLockCreate(&psDevInfo->hRGXFWIfBufInitLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for trace buffers", __func__)); + goto e10; + } + + eError = OSLockCreate(&psDevInfo->hCCBStallCheckLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB checking lock", __func__)); + goto e11; + } + eError = OSLockCreate(&psDevInfo->hCCBRecoveryLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB recovery lock", __func__)); + goto e12; + } + + dllist_init(&psDevInfo->sMemoryContextList); + + /* initialise ui32SLRHoldoffCounter */ + if (RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS > DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT) + { + psDevInfo->ui32SLRHoldoffCounter = RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS / DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT; + } + else + { + psDevInfo->ui32SLRHoldoffCounter = 0; + } + + /* Setup static data and callbacks on the device specific device info */ + psDevInfo->psDeviceNode = psDeviceNode; + + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + psDevInfo->pvDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap; + + /* + * Map RGX Registers + */ + psDevInfo->ui32RegSize = psDeviceNode->psDevConfig->ui32RegsSize; + psDevInfo->sRegsPhysBase = psDeviceNode->psDevConfig->sRegsCpuPBase; + +#if !defined(NO_HARDWARE) + psDevInfo->pvRegsBaseKM = (void __iomem *) OSMapPhysToLin(psDeviceNode->psDevConfig->sRegsCpuPBase, + psDeviceNode->psDevConfig->ui32RegsSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); + + if (psDevInfo->pvRegsBaseKM == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRGXInitDevPart2KM: Failed to create RGX register mapping")); + eError = PVRSRV_ERROR_BAD_MAPPING; + goto e13; + } +#else + psDevInfo->pvRegsBaseKM = NULL; +#endif /* !NO_HARDWARE */ + + psDeviceNode->pvDevice = psDevInfo; + + + eError = RGXBvncInitialiseConfiguration(psDeviceNode); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unsupported HW device detected by driver", + __func__)); + goto e14; + } + + /* pdump info about the core */ + PDUMPCOMMENT("RGX Version Information (KM): %d.%d.%d.%d", + psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); + + _ReadNon4KHeapPageSize(&psDevInfo->ui32Log2Non4KPgSize); + + /*Set the zero & dummy page sizes as needed for the heap with largest page size */ + psDeviceNode->sDevZeroPage.ui32Log2PgSize = psDevInfo->ui32Log2Non4KPgSize; + psDeviceNode->sDummyPage.ui32Log2PgSize = psDevInfo->ui32Log2Non4KPgSize; + + RGXInitMultiCoreInfo(psDeviceNode); + + /* Configure MMU specific stuff */ + RGXMMUInit_Register(psDeviceNode); + + eError = RGXInitHeaps(psDevInfo, psDevMemoryInfo); + if (eError != PVRSRV_OK) + { + goto e14; + } + + eError = RGXHWPerfInit(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInit", e14); + +#if defined(SUPPORT_VALIDATION) + eError = RGXPowerDomainInitState(&psDevInfo->sPowerDomainState, + psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount); + if (eError != PVRSRV_OK) + { + goto e15; + } + + /* This completion will be signaled by the ISR when processing + * the answer CCB command carrying an RGX Register read value */ + init_completion(&psDevInfo->sFwRegs.sRegComp); + psDevInfo->sFwRegs.ui64RegVal = 0; + +#if defined(SUPPORT_SOC_TIMER) + { + IMG_BOOL ui32AppHintDefault = IMG_FALSE; + IMG_BOOL bInitSocTimer; + void *pvAppHintState = NULL; + + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintBOOL(pvAppHintState, ValidateSOCUSCTimer, &ui32AppHintDefault, &bInitSocTimer); + OSFreeKMAppHintState(pvAppHintState); + + if (bInitSocTimer) + { + eError = PVRSRVInitSOCUSCTimer(psDeviceNode); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVInitSOCUSCTimer", e16); + } + } +#endif +#endif + + /* Register callback for dumping debug info */ + eError = RGXDebugInit(psDevInfo); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXDebugInit", e16); + +#if defined(PDUMP) + eError = DevmemIntAllocDefBackingPage(psDeviceNode, + &psDeviceNode->sDummyPage, + PVR_DUMMY_PAGE_INIT_VALUE, + DUMMY_PAGE, + IMG_TRUE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate dummy page.", __func__)); + goto e17; + } + eError = DevmemIntAllocDefBackingPage(psDeviceNode, + &psDeviceNode->sDevZeroPage, + PVR_ZERO_PAGE_INIT_VALUE, + DEV_ZERO_PAGE, + IMG_TRUE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate Zero page.", __func__)); + goto e18; + } +#endif + + /* Initialise the device dependent bridges */ + eError = DeviceDepBridgeInit(psDevInfo->sDevFeatureCfg.ui64Features); + PVR_LOG_IF_ERROR(eError, "DeviceDepBridgeInit"); + + /* Initialise error counters */ + memset(&psDevInfo->sErrorCounts, 0, sizeof(PVRSRV_RGXDEV_ERROR_COUNTS)); + + return PVRSRV_OK; + +#if defined(PDUMP) +e18: + DevmemIntFreeDefBackingPage(psDeviceNode, + &psDeviceNode->sDummyPage, + DUMMY_PAGE); +e17: + RGXDebugDeinit(psDevInfo); +#endif +e16: +#if defined(SUPPORT_VALIDATION) + RGXPowerDomainDeInitState(&psDevInfo->sPowerDomainState); +e15: +#endif + RGXHWPerfDeinit(psDevInfo); +e14: +#if !defined(NO_HARDWARE) + OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM, + psDevInfo->ui32RegSize); + +e13: +#endif /* !NO_HARDWARE */ + OSLockDestroy(psDevInfo->hCCBRecoveryLock); +e12: + OSLockDestroy(psDevInfo->hCCBStallCheckLock); +e11: + OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock); +e10: + OSLockDestroy(psDevInfo->hBPLock); +e9: + OSLockDestroy(psDevInfo->sRegCongfig.hLock); +e8: + OSWRLockDestroy(psDevInfo->hCommonCtxtListLock); +e7: + OSSpinLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList); +e6: + OSWRLockDestroy(psDevInfo->hMemoryCtxListLock); +e5: + OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock); +e4: + OSWRLockDestroy(psDevInfo->hTDMCtxListLock); +e3: + OSWRLockDestroy(psDevInfo->hTransferCtxListLock); +e2: + OSWRLockDestroy(psDevInfo->hComputeCtxListLock); +e1: + OSWRLockDestroy(psDevInfo->hRenderCtxListLock); +e0: + OSFreeMem(psDevInfo); + + /* Destroy the zero page lock created above */ + OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock); + +free_dummy_page: + /* Destroy the dummy page lock created above */ + OSLockDestroy(psDeviceNode->sDummyPage.psPgLock); + + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_PCHAR psz = psDevInfo->sDevFeatureCfg.pszBVNCString; + if (NULL == psz) + { + IMG_CHAR pszBVNCInfo[RGX_HWPERF_MAX_BVNC_LEN]; + size_t uiBVNCStringSize; + size_t uiStringLength; + + uiStringLength = OSSNPrintf(pszBVNCInfo, RGX_HWPERF_MAX_BVNC_LEN, "%d.%d.%d.%d", + psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); + PVR_ASSERT(uiStringLength < RGX_HWPERF_MAX_BVNC_LEN); + + uiBVNCStringSize = (uiStringLength + 1) * sizeof(IMG_CHAR); + psz = OSAllocMem(uiBVNCStringSize); + if (NULL != psz) + { + OSCachedMemCopy(psz, pszBVNCInfo, uiBVNCStringSize); + psDevInfo->sDevFeatureCfg.pszBVNCString = psz; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Allocating memory for BVNC Info string failed", + __func__)); + } + } + + return psz; +} + +/*************************************************************************/ /*! +@Function RGXDevVersionString +@Description Gets the version string for the given device node and returns + a pointer to it in ppszVersionString. It is then the + responsibility of the caller to free this memory. +@Input psDeviceNode Device node from which to obtain the + version string +@Output ppszVersionString Contains the version string upon return +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR **ppszVersionString) +{ +#if defined(COMPAT_BVNC_MASK_B) || defined(COMPAT_BVNC_MASK_V) || defined(COMPAT_BVNC_MASK_N) || defined(COMPAT_BVNC_MASK_C) || defined(NO_HARDWARE) || defined(EMULATOR) + const IMG_CHAR szFormatString[] = "GPU variant BVNC: %s (SW)"; +#else + const IMG_CHAR szFormatString[] = "GPU variant BVNC: %s (HW)"; +#endif + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_PCHAR pszBVNC; + size_t uiStringLength; + + if (psDeviceNode == NULL || ppszVersionString == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + pszBVNC = RGXDevBVNCString(psDevInfo); + + if (NULL == pszBVNC) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + uiStringLength = OSStringLength(pszBVNC); + uiStringLength += (sizeof(szFormatString) - 2); /* sizeof includes the null, -2 for "%s" */ + *ppszVersionString = OSAllocMem(uiStringLength * sizeof(IMG_CHAR)); + if (*ppszVersionString == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + OSSNPrintf(*ppszVersionString, uiStringLength, szFormatString, + pszBVNC); + + return PVRSRV_OK; +} + +/**************************************************************************/ /*! +@Function RGXDevClockSpeed +@Description Gets the clock speed for the given device node and returns + it in pui32RGXClockSpeed. +@Input psDeviceNode Device node +@Output pui32RGXClockSpeed Variable for storing the clock speed +@Return PVRSRV_ERROR +*/ /***************************************************************************/ +static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_PUINT32 pui32RGXClockSpeed) +{ + RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; + + /* get clock speed */ + *pui32RGXClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; + + return PVRSRV_OK; +} + +#if (RGX_NUM_OS_SUPPORTED > 1) +/*! + ******************************************************************************* + + @Function RGXInitFwRawHeap + + @Description Called to perform additional initialisation + ******************************************************************************/ +static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid) +{ + IMG_UINT32 uiStringLength; + IMG_UINT32 uiStringLengthMax = 32; + + IMG_UINT32 ui32Log2RgxDefaultPageShift = RGXHeapDerivePageSize(OSGetPageShift()); + + uiStringLength = MIN(sizeof(RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT), uiStringLengthMax + 1); + + /* Start by allocating memory for this OSID heap identification string */ + psDevMemHeap->pszName = OSAllocMem(uiStringLength * sizeof(IMG_CHAR)); + if (psDevMemHeap->pszName == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Append the OSID number to the RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT string */ + OSSNPrintf((IMG_CHAR *)psDevMemHeap->pszName, uiStringLength, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSid); + + /* Use the common blueprint template support function to initialise the heap */ + HeapCfgBlueprintInit(psDevMemHeap->pszName, + RGX_FIRMWARE_RAW_HEAP_BASE + (ui32OSid * RGX_FIRMWARE_RAW_HEAP_SIZE), + RGX_FIRMWARE_RAW_HEAP_SIZE, + 0, + ui32Log2RgxDefaultPageShift, + 0, + psDevMemHeap); + + return PVRSRV_OK; +} + +/*! + ******************************************************************************* + + @Function RGXDeInitFwRawHeap + + @Description Called to perform additional deinitialisation + ******************************************************************************/ +static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap) +{ + IMG_UINT64 uiBase = RGX_FIRMWARE_RAW_HEAP_BASE + RGX_FIRMWARE_RAW_HEAP_SIZE; + IMG_UINT64 uiSpan = uiBase + ((RGX_NUM_OS_SUPPORTED - 1) * RGX_FIRMWARE_RAW_HEAP_SIZE); + + /* Safe to do as the guest firmware heaps are last in the list */ + if (psDevMemHeap->sHeapBaseAddr.uiAddr >= uiBase && + psDevMemHeap->sHeapBaseAddr.uiAddr < uiSpan) + { + void *pszName = (void*)psDevMemHeap->pszName; + OSFreeMem(pszName); + } +} +#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ + +/****************************************************************************** + End of file (rgxinit.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxinit.h b/drivers/gpu/drm/phytium/octopus/rgxinit.h new file mode 100644 index 000000000000..384e367e00d5 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxinit.h @@ -0,0 +1,306 @@ +/*************************************************************************/ /*! +@File +@Title RGX initialisation header file +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the RGX initialisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXINIT_H) +#define RGXINIT_H + +#include "connection_server.h" +#include "pvrsrv_error.h" +#include "img_types.h" +#include "device.h" +#include "rgxdevice.h" +#include "rgx_bridge.h" +#include "fwload.h" + +#if defined(__linux__) +#define OS_FW_VERIFY_FUNCTION OSVerifyFirmware +#else +#define OS_FW_VERIFY_FUNCTION NULL +#endif + +/*! +******************************************************************************* + + @Function RGXInitDevPart2 + + @Description + + Second part of server-side RGX initialisation + + @Input psDeviceNode - device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32DeviceFlags, + IMG_UINT32 ui32HWPerfHostBufSizeKB, + IMG_UINT32 ui32HWPerfHostFilter, + RGX_ACTIVEPM_CONF eActivePMConf, + IMG_UINT32 ui32AvailableSPUMask); + +PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEVMEM_SIZE_T ui32FWCodeLen, + IMG_DEVMEM_SIZE_T ui32FWDataLen, + IMG_DEVMEM_SIZE_T uiFWCorememCodeLen, + IMG_DEVMEM_SIZE_T uiFWCorememDataLen); + + +/*! +******************************************************************************* + + @Function RGXInitFirmware + + @Description + + Server-side RGX firmware initialisation + + @Input psDeviceNode - device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR +RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_BOOL bEnableSignatureChecks, + IMG_UINT32 ui32SignatureChecksBufSize, + IMG_UINT32 ui32HWPerfFWBufSizeKB, + IMG_UINT64 ui64HWPerfFilter, + IMG_UINT32 ui32ConfigFlags, + IMG_UINT32 ui32LogType, + IMG_UINT32 ui32FilterFlags, + IMG_UINT32 ui32JonesDisableMask, + IMG_UINT32 ui32HWRDebugDumpLimit, + IMG_UINT32 ui32RenderKillingCtl, + IMG_UINT32 ui32CDMTDMKillingCtl, + IMG_UINT32 *pui32TPUTrilinearFracMask, + IMG_UINT32 *pui32USRMNumRegions, + IMG_UINT64 *pui64UVBRMNumRegions, + IMG_UINT32 ui32HWPerfCountersDataSize, + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf, + FW_PERF_CONF eFirmwarePerf, + IMG_UINT32 ui32ConfigFlagsExt, + IMG_UINT32 ui32AvailableSPUMask, + IMG_UINT32 ui32FwOsCfgFlags); + + +/*! +******************************************************************************* + + @Function RGXLoadAndGetFWData + + @Description + + Load FW and return pointer to FW data. + + @Input psDeviceNode - device node + + @Input ppsRGXFW - fw pointer + + @Output ppbFWData - pointer to FW data (NULL if an error occurred) + + @Return PVRSRV_ERROR - PVRSRV_OK on success + PVRSRV_ERROR_NOT_READY if filesystem is not ready + PVRSRV_ERROR_NOT_FOUND if no suitable FW image found + PVRSRV_ERROR_OUT_OF_MEMORY if unable to alloc memory for FW image + PVRSRV_ERROR_NOT_AUTHENTICATED if FW image failed verification + +******************************************************************************/ +PVRSRV_ERROR RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode, + OS_FW_IMAGE **ppsRGXFW, + const IMG_BYTE **ppbFWData); +#if defined(PDUMP) +/*! +******************************************************************************* + + @Function RGXInitHWPerfCounters + + @Description + + Initialisation of the performance counters + + @Input psDeviceNode - device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode); +#endif + +/*! +******************************************************************************* + + @Function RGXRegisterDevice + + @Description + + Registers the device with the system + + @Input: psDeviceNode - device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode); + +/*! +******************************************************************************* + + @Function RGXDevBVNCString + + @Description + + Returns the Device BVNC string. It will allocate and fill it first, if necessary. + + @Input: psDevInfo - device info (must not be null) + + @Return IMG_PCHAR - pointer to BVNC string + +******************************************************************************/ +IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* + + @Function DevDeInitRGX + + @Description + + Reset and deinitialise Chip + + @Input psDeviceNode - device info. structure + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode); + + +#if !defined(NO_HARDWARE) + +void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo); + +/*! +******************************************************************************* + + @Function SORgxGpuUtilStatsRegister + + @Description SO Interface function called from the OS layer implementation. + Initialise data used to compute GPU utilisation statistics + for a particular user (identified by the handle passed as + argument). This function must be called only once for each + different user/handle. + + @Input phGpuUtilUser - Pointer to handle used to identify a user of + RGXGetGpuUtilStats + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser); + + +/*! +******************************************************************************* + + @Function SORgxGpuUtilStatsUnregister + + @Description SO Interface function called from the OS layer implementation. + Free data previously used to compute GPU utilisation statistics + for a particular user (identified by the handle passed as + argument). + + @Input hGpuUtilUser - Handle used to identify a user of + RGXGetGpuUtilStats + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser); +#endif /* !defined(NO_HARDWARE) */ + +/*! +************************************************************************************ + @Function RGXSystemGetFabricCoherency + + @Description Get the system fabric coherency for the device by reading default + configuration from device register, subject to AppHint overrides. + + @Input sRegsCpuPBase : Device register CPU physical address base + ui32RegsSize : Device register size + peDevFabricType : Device memory bus fabric type + peCacheSnoopingMode : Fabric coherency override + + @Return PVRSRV_ERROR +************************************************************************************/ +PVRSRV_ERROR RGXSystemGetFabricCoherency(PVRSRV_DEVICE_CONFIG *psDeviceConfig, + IMG_CPU_PHYADDR sRegsCpuPBase, + IMG_UINT32 ui32RegsSize, + PVRSRV_DEVICE_FABRIC_TYPE *peDevFabricType, + PVRSRV_DEVICE_SNOOP_MODE *peCacheSnoopingMode); + +/*! + ******************************************************************************* + + @Function RGXInitCreateFWKernelMemoryContext + + @Description Called to perform initialisation during firmware kernel context + creation. + + @Input psDeviceNode device node + ******************************************************************************/ +PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode); + +/*! + ******************************************************************************* + + @Function RGXDeInitDestroyFWKernelMemoryContext + + @Description Called to perform deinitialisation during firmware kernel + context destruction. + + @Input psDeviceNode device node + ******************************************************************************/ +void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode); + +#endif /* RGXINIT_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxkicksync.c b/drivers/gpu/drm/phytium/octopus/rgxkicksync.c new file mode 100644 index 000000000000..3441d2ae729e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxkicksync.c @@ -0,0 +1,790 @@ +/*************************************************************************/ /*! +@File rgxkicksync.c +@Title Server side of the sync only kick API +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "rgxkicksync.h" + +#include "rgxdevice.h" +#include "rgxmem.h" +#include "rgxfwutils.h" +#include "allocmem.h" +#include "sync.h" +#include "rgxhwperf.h" +#include "ospvr_gputrace.h" + +#include "sync_checkpoint.h" +#include "sync_checkpoint_internal.h" + +/* Enable this to dump the compiled list of UFOs prior to kick call */ +#define ENABLE_KICKSYNC_UFO_DUMP 0 + +//#define KICKSYNC_CHECKPOINT_DEBUG 1 + +#if defined(KICKSYNC_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +#else +#define CHKPT_DBG(X) +#endif + +struct _RGX_SERVER_KICKSYNC_CONTEXT_ +{ + PVRSRV_DEVICE_NODE * psDeviceNode; + RGX_SERVER_COMMON_CONTEXT * psServerCommonContext; + DLLIST_NODE sListNode; + SYNC_ADDR_LIST sSyncAddrListFence; + SYNC_ADDR_LIST sSyncAddrListUpdate; + POS_LOCK hLock; +}; + + +PVRSRV_ERROR PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + RGX_SERVER_KICKSYNC_CONTEXT **ppsKickSyncContext) +{ + PVRSRV_RGXDEV_INFO * psDevInfo = psDeviceNode->pvDevice; + DEVMEM_MEMDESC * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext; + RGX_COMMON_CONTEXT_INFO sInfo; + PVRSRV_ERROR eError; + IMG_UINT32 ui32CCBAllocSizeLog2, ui32CCBMaxAllocSizeLog2; + + memset(&sInfo, 0, sizeof(sInfo)); + + /* Prepare cleanup struct */ + * ppsKickSyncContext = NULL; + psKickSyncContext = OSAllocZMem(sizeof(*psKickSyncContext)); + if (psKickSyncContext == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eError = OSLockCreate(&psKickSyncContext->hLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto err_lockcreate; + } + + psKickSyncContext->psDeviceNode = psDeviceNode; + + ui32CCBAllocSizeLog2 = U32toU8_Unpack1(ui32PackedCCBSizeU88); + ui32CCBMaxAllocSizeLog2 = U32toU8_Unpack2(ui32PackedCCBSizeU88); + eError = FWCommonContextAllocate(psConnection, + psDeviceNode, + REQ_TYPE_KICKSYNC, + RGXFWIF_DM_GP, + hMemCtxPrivData, + NULL, + 0, + psFWMemContextMemDesc, + NULL, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_KICKSYNC_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_KICKSYNC_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + 0, /* priority */ + 0, /* max deadline MS */ + 0, /* robustness address */ + & sInfo, + & psKickSyncContext->psServerCommonContext); + if (eError != PVRSRV_OK) + { + goto fail_contextalloc; + } + + OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock); + dllist_add_to_tail(&(psDevInfo->sKickSyncCtxtListHead), &(psKickSyncContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock); + + SyncAddrListInit(&psKickSyncContext->sSyncAddrListFence); + SyncAddrListInit(&psKickSyncContext->sSyncAddrListUpdate); + + * ppsKickSyncContext = psKickSyncContext; + return PVRSRV_OK; + +fail_contextalloc: + OSLockDestroy(psKickSyncContext->hLock); +err_lockcreate: + OSFreeMem(psKickSyncContext); + return eError; +} + + +PVRSRV_ERROR PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext) +{ + PVRSRV_RGXDEV_INFO * psDevInfo = psKickSyncContext->psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp(psKickSyncContext->psDeviceNode, + psKickSyncContext->psServerCommonContext, + RGXFWIF_DM_GP, + PDUMP_FLAGS_NONE); + + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free its resources */ + + OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock); + dllist_remove_node(&(psKickSyncContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock); + + FWCommonContextFree(psKickSyncContext->psServerCommonContext); + + SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListFence); + SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListUpdate); + + OSLockDestroy(psKickSyncContext->hLock); + + OSFreeMem(psKickSyncContext); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRGXSetKickSyncContextPropertyKM(RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output) +{ + PVRSRV_ERROR eError; + + switch (eContextProperty) + { + case RGX_CONTEXT_PROPERTY_FLAGS: + { + OSLockAcquire(psKickSyncContext->hLock); + eError = FWCommonContextSetFlags(psKickSyncContext->psServerCommonContext, + (IMG_UINT32)ui64Input); + + OSLockRelease(psKickSyncContext->hLock); + PVR_LOG_IF_ERROR(eError, "FWCommonContextSetFlags"); + break; + } + + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + } + + return eError; +} + +void DumpKickSyncCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + DLLIST_NODE *psNode, *psNext; + OSWRLockAcquireRead(psDevInfo->hKickSyncCtxListLock); + dllist_foreach_node(&psDevInfo->sKickSyncCtxtListHead, psNode, psNext) + { + RGX_SERVER_KICKSYNC_CONTEXT *psCurrentServerKickSyncCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_KICKSYNC_CONTEXT, sListNode); + + if (NULL != psCurrentServerKickSyncCtx->psServerCommonContext) + { + DumpFWCommonContextInfo(psCurrentServerKickSyncCtx->psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + } + OSWRLockReleaseRead(psDevInfo->hKickSyncCtxListLock); +} + +IMG_UINT32 CheckForStalledClientKickSyncCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + DLLIST_NODE *psNode, *psNext; + IMG_UINT32 ui32ContextBitMask = 0; + + OSWRLockAcquireRead(psDevInfo->hKickSyncCtxListLock); + + dllist_foreach_node(&psDevInfo->sKickSyncCtxtListHead, psNode, psNext) + { + RGX_SERVER_KICKSYNC_CONTEXT *psCurrentServerKickSyncCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_KICKSYNC_CONTEXT, sListNode); + + if (NULL != psCurrentServerKickSyncCtx->psServerCommonContext) + { + if (CheckStalledClientCommonContext(psCurrentServerKickSyncCtx->psServerCommonContext, RGX_KICK_TYPE_DM_GP) == PVRSRV_ERROR_CCCB_STALLED) + { + ui32ContextBitMask |= RGX_KICK_TYPE_DM_GP; + } + } + } + + OSWRLockReleaseRead(psDevInfo->hKickSyncCtxListLock); + return ui32ContextBitMask; +} + +PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, + IMG_UINT32 * paui32ClientUpdateOffset, + IMG_UINT32 * paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE * piUpdateFence, + IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32ExtJobRef) +{ + RGXFWIF_KCCB_CMD sKickSyncKCCBCmd; + RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1]; + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2; + IMG_BOOL bCCBStateOpen = IMG_FALSE; + PRGXFWIF_UFO_ADDR *pauiClientFenceUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClientUpdateUFOAddress = NULL; + IMG_UINT32 ui32ClientFenceCount = 0; + IMG_UINT32 *paui32ClientFenceValue = NULL; + PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE; + IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr; + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psKickSyncContext->psServerCommonContext); + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psKickSyncContext->psServerCommonContext); + IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + IMG_UINT64 uiCheckFenceUID = 0; + IMG_UINT64 uiUpdateFenceUID = 0; + PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32FenceSyncCheckpointCount = 0; + IMG_UINT32 ui32FenceTimelineUpdateValue = 0; + IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; + PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL; + void *pvUpdateFenceFinaliseData = NULL; + + /* Ensure we haven't been given a null ptr to + * update values if we have been told we + * have dev var updates + */ + if (ui32ClientUpdateCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL, + "paui32ClientUpdateValue NULL but ui32ClientUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + + OSLockAcquire(psKickSyncContext->hLock); + eError = SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListUpdate, + ui32ClientUpdateCount, + pauiClientUpdateUFODevVarBlock, + paui32ClientUpdateOffset); + + if (eError != PVRSRV_OK) + { + goto fail_syncaddrlist; + } + + if (ui32ClientUpdateCount > 0) + { + pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs; + } + /* Ensure the string is null-terminated (Required for safety) */ + szUpdateFenceName[31] = '\0'; + + /* This will never be true if called from the bridge since piUpdateFence will always be valid */ + if (iUpdateTimeline >= 0 && !piUpdateFence) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto out_unlock; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), " + "psKickSyncContext->psDeviceNode->hSyncCheckpointContext=<%p>...", + __func__, iCheckFence, + (void*)psKickSyncContext->psDeviceNode->hSyncCheckpointContext)); + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence(psKickSyncContext->psDeviceNode->hSyncCheckpointContext, + iCheckFence, + &ui32FenceSyncCheckpointCount, + &apsFenceSyncCheckpoints, + &uiCheckFenceUID, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_OK) + { + goto fail_resolve_fence; + } + + /* Create the output fence (if required) */ + if (iUpdateTimeline != PVRSRV_NO_TIMELINE) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling SyncCheckpointCreateFence (iUpdateTimeline=%d)...", + __func__, iUpdateTimeline)); + eError = SyncCheckpointCreateFence(psKickSyncContext->psDeviceNode, + szUpdateFenceName, + iUpdateTimeline, + psKickSyncContext->psDeviceNode->hSyncCheckpointContext, + &iUpdateFence, + &uiUpdateFenceUID, + &pvUpdateFenceFinaliseData, + &psUpdateSyncCheckpoint, + (void*)&psFenceTimelineUpdateSync, + &ui32FenceTimelineUpdateValue, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", + __func__, eError)); + goto fail_create_output_fence; + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: ...returned from SyncCheckpointCreateFence " + "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, " + "ui32FenceTimelineUpdateValue=%u)", + __func__, iUpdateFence, psFenceTimelineUpdateSync, + ui32FenceTimelineUpdateValue)); + + /* Append the sync prim update for the timeline (if required) */ + if (psFenceTimelineUpdateSync) + { + IMG_UINT32 *pui32TimelineUpdateWp = NULL; + + /* Allocate memory to hold the list of update values (including our timeline update) */ + pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*paui32ClientUpdateValue) * (ui32ClientUpdateCount+1)); + if (!pui32IntAllocatedUpdateValues) + { + /* Failed to allocate memory */ + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_update_values_mem; + } + OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientUpdateCount+1)); + /* Copy the update values into the new memory, then append our timeline update value */ + OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32ClientUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32ClientUpdateCount); + /* Now set the additional update value */ + pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32ClientUpdateCount; + *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue; + ui32ClientUpdateCount++; + /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */ + paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; +#if defined(KICKSYNC_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", + __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Now append the timeline sync prim addr to the kicksync context update list */ + SyncAddrListAppendSyncPrim(&psKickSyncContext->sSyncAddrListUpdate, + psFenceTimelineUpdateSync); + } + } + + /* Reset number of fence syncs in kicksync context fence list to 0 */ + SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListFence, + 0, NULL, NULL); + + if (ui32FenceSyncCheckpointCount > 0) + { + /* Append the checks (from input fence) */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append %d sync checkpoints to KickSync Fence " + "(&psKickSyncContext->sSyncAddrListFence=<%p>)...", + __func__, ui32FenceSyncCheckpointCount, + (void*)&psKickSyncContext->sSyncAddrListFence)); + SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListFence, + ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + if (!pauiClientFenceUFOAddress) + { + pauiClientFenceUFOAddress = psKickSyncContext->sSyncAddrListFence.pasFWAddrs; + } + ui32ClientFenceCount += ui32FenceSyncCheckpointCount; +#if defined(KICKSYNC_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientFenceUFOAddress; + + for (iii=0; iii) = 0x%x", + __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + + if (psUpdateSyncCheckpoint) + { + PVRSRV_ERROR eErr; + + /* Append the update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append 1 sync checkpoint to KickSync Update " + "(&psKickSyncContext->sSyncAddrListUpdate=<%p>)...", + __func__, (void*)&psKickSyncContext->sSyncAddrListUpdate)); + eErr = SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListUpdate, + 1, + &psUpdateSyncCheckpoint); + if (eErr != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: ...done. SyncAddrListAppendCheckpoints() returned error (%d)", + __func__, eErr)); + } + else + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done.", __func__)); + } + if (!pauiClientUpdateUFOAddress) + { + pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs; + } + ui32ClientUpdateCount++; +#if defined(KICKSYNC_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientUpdateUFOAddress; + + for (iii=0; iii) = 0x%x", + __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + +#if (ENABLE_KICKSYNC_UFO_DUMP == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: dumping KICKSYNC fence/updates syncs...", + __func__)); + { + IMG_UINT32 ii; + PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiClientFenceUFOAddress; + IMG_UINT32 *pui32TmpIntFenceValue = paui32ClientFenceValue; + PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiClientUpdateUFOAddress; + IMG_UINT32 *pui32TmpIntUpdateValue = paui32ClientUpdateValue; + + /* Dump Fence syncs and Update syncs */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Prepared %d KickSync fence syncs " + "(&psKickSyncContext->sSyncAddrListFence=<%p>, " + "pauiClientFenceUFOAddress=<%p>):", + __func__, ui32ClientFenceCount, + (void*)&psKickSyncContext->sSyncAddrListFence, + (void*)pauiClientFenceUFOAddress)); + for (ii=0; iiui32Addr & 0x1) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, " + "CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, ii + 1, ui32ClientFenceCount, + (void*)psTmpIntFenceUFOAddress, + psTmpIntFenceUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", + __func__, ii + 1, ui32ClientFenceCount, + (void*)psTmpIntFenceUFOAddress, + psTmpIntFenceUFOAddress->ui32Addr, + *pui32TmpIntFenceValue, + *pui32TmpIntFenceValue)); + pui32TmpIntFenceValue++; + } + psTmpIntFenceUFOAddress++; + } + PVR_DPF((PVR_DBG_ERROR, + "%s: Prepared %d KickSync update syncs " + "(&psKickSyncContext->sSyncAddrListUpdate=<%p>, " + "pauiClientUpdateUFOAddress=<%p>):", + __func__, ui32ClientUpdateCount, + (void*)&psKickSyncContext->sSyncAddrListUpdate, + (void*)pauiClientUpdateUFOAddress)); + for (ii=0; ii", + __func__, __LINE__, + (void*)psTmpIntUpdateUFOAddress)); + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Line %d, pui32TmpIntUpdateValue=<%p>", + __func__, __LINE__, + (void*)pui32TmpIntUpdateValue)); + if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, " + "UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, ii + 1, ui32ClientUpdateCount, + (void*)psTmpIntUpdateUFOAddress, + psTmpIntUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", + __func__, ii + 1, ui32ClientUpdateCount, + (void*)psTmpIntUpdateUFOAddress, + psTmpIntUpdateUFOAddress->ui32Addr, + *pui32TmpIntUpdateValue)); + pui32TmpIntUpdateValue++; + } + psTmpIntUpdateUFOAddress++; + } + } +#endif + + RGXCmdHelperInitCmdCCB(psClientCCB, + 0, /* empty ui64FBSCEntryMask */ + ui32ClientFenceCount, + pauiClientFenceUFOAddress, + paui32ClientFenceValue, + ui32ClientUpdateCount, + pauiClientUpdateUFOAddress, + paui32ClientUpdateValue, + 0, + NULL, + NULL, + NULL, + NULL, + RGXFWIF_CCB_CMD_TYPE_NULL, + ui32ExtJobRef, + ui32IntJobRef, + PDUMP_FLAGS_NONE, + NULL, + "KickSync", + bCCBStateOpen, + asCmdHelperData); + + eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData); + if (eError != PVRSRV_OK) + { + goto fail_cmdaquire; + } + + /* + * We should reserve space in the kernel CCB here and fill in the command + * directly. + * This is so if there isn't space in the kernel CCB we can return with + * retry back to services client before we take any operations + */ + + /* + * We might only be kicking for flush out a padding packet so only submit + * the command if the create was successful + */ + if (eError == PVRSRV_OK) + { + /* + * All the required resources are ready at this point, we can't fail so + * take the required server sync operations and commit all the resources + */ + RGXCmdHelperReleaseCmdCCB(1, + asCmdHelperData, + "KickSync", + FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr); + } + + /* Construct the kernel kicksync CCB command. */ + sKickSyncKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sKickSyncKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext); + sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + + sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; + + /* + * Submit the kicksync command to the firmware. + */ + RGXSRV_HWPERF_ENQ(psKickSyncContext, + OSGetCurrentClientProcessIDKM(), + ui32FWCtx, + ui32ExtJobRef, + ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_SYNC, + iCheckFence, + iUpdateFence, + iUpdateTimeline, + uiCheckFenceUID, + uiUpdateFenceUID, + NO_DEADLINE, + NO_CYCEST); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psKickSyncContext->psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + & sKickSyncKCCBCmd, + ui32ClientCacheOpSeqNum, + PDUMP_FLAGS_NONE); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + PVRGpuTraceEnqueueEvent(psKickSyncContext->psDeviceNode->pvDevice, + ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_SYNC); + + if (eError2 != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRGXKickSync failed to schedule kernel CCB command. (0x%x)", + eError)); + } + + /* + * Now check eError (which may have returned an error from our earlier call + * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first + * so we check it now... + */ + if (eError != PVRSRV_OK ) + { + goto fail_cmdaquire; + } + +#if defined(NO_HARDWARE) + /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ + if (psUpdateSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", + __func__, (void*)psUpdateSyncCheckpoint, + SyncCheckpointGetId(psUpdateSyncCheckpoint), + SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint))); + SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); + } + if (psFenceTimelineUpdateSync) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Updating NOHW sync prim<%p> to %d", + __func__, (void*)psFenceTimelineUpdateSync, + ui32FenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); + } + SyncCheckpointNoHWUpdateTimelines(NULL); +#endif + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + /* Free memory allocated to hold the internal list of update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + pui32IntAllocatedUpdateValues = NULL; + } + + *piUpdateFence = iUpdateFence; + if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psKickSyncContext->psDeviceNode, iUpdateFence, + pvUpdateFenceFinaliseData, + psUpdateSyncCheckpoint, szUpdateFenceName); + } + + OSLockRelease(psKickSyncContext->hLock); + return PVRSRV_OK; + +fail_cmdaquire: + SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListFence); + SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListUpdate); + if (iUpdateFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); + } + + /* Free memory allocated to hold update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + } +fail_alloc_update_values_mem: +fail_create_output_fence: + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free memory allocated to hold the resolved fence's checkpoints */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } +fail_resolve_fence: +fail_syncaddrlist: +out_unlock: + OSLockRelease(psKickSyncContext->hLock); + return eError; +} + +/**************************************************************************//** + End of file (rgxkicksync.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxkicksync.h b/drivers/gpu/drm/phytium/octopus/rgxkicksync.h new file mode 100644 index 000000000000..649f756d4a87 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxkicksync.h @@ -0,0 +1,129 @@ +/*************************************************************************/ /*! +@File rgxkicksync.h +@Title Server side of the sync only kick API +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXKICKSYNC_H) +#define RGXKICKSYNC_H + +#include "pvrsrv_error.h" +#include "connection_server.h" +#include "sync_server.h" +#include "rgxdevice.h" + + +typedef struct _RGX_SERVER_KICKSYNC_CONTEXT_ RGX_SERVER_KICKSYNC_CONTEXT; + +/**************************************************************************/ /*! +@Function DumpKickSyncCtxtsInfo +@Description Function that dumps debug info of kick sync ctxs on this device +@Return none +*/ /**************************************************************************/ +void +DumpKickSyncCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +/**************************************************************************/ /*! +@Function CheckForStalledClientKickSyncCtxt +@Description Function that checks if a kick sync client is stalled +@Return RGX_KICK_TYPE_DM_GP on stalled context. Otherwise, 0 +*/ /**************************************************************************/ +IMG_UINT32 CheckForStalledClientKickSyncCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); + +/**************************************************************************/ /*! +@Function PVRSRVRGXCreateKickSyncContextKM +@Description Server-side implementation of RGXCreateKicksyncContext +@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + RGX_SERVER_KICKSYNC_CONTEXT ** ppsKicksyncContext); + + + +/**************************************************************************/ /*! +@Function PVRSRVRGXDestroyKickSyncContextKM +@Description Server-side implementation of RGXDestroyKicksyncContext +@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext); + +/**************************************************************************/ /*! +@Function PVRSRVRGXSetKickSyncContextPropertyKM +@Description Server-side implementation of RGXSetKickSyncContextProperty +@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code + */ /**************************************************************************/ +PVRSRV_ERROR PVRSRVRGXSetKickSyncContextPropertyKM(RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output); + +/**************************************************************************/ /*! +@Function PVRSRVRGXKickSyncKM +@Description Kicks a sync only command +@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, + IMG_UINT32 * paui32ClientUpdateDevVarOffset, + IMG_UINT32 * paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE * piUpdateFence, + IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + + IMG_UINT32 ui32ExtJobRef); + +#endif /* RGXKICKSYNC_H */ + +/**************************************************************************//** + End of file (rgxkicksync.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxlayer.h b/drivers/gpu/drm/phytium/octopus/rgxlayer.h new file mode 100644 index 000000000000..d7cff0ae309f --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxlayer.h @@ -0,0 +1,509 @@ +/*************************************************************************/ /*! +@File +@Title Header for Services abstraction layer +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Declaration of an interface layer used to abstract code that + can be compiled outside of the DDK, potentially in a + completely different OS. + All the headers included by this file must also be copied to + the alternative source tree. + All the functions declared here must have a DDK implementation + inside the DDK source tree (e.g. rgxlayer_impl.h/.c) and + another different implementation in case they are used outside + of the DDK. + All of the functions accept as a first parameter a + "const void *hPrivate" argument. It should be used to pass + around any implementation specific data required. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXLAYER_H) +#define RGXLAYER_H + +#if defined(__cplusplus) +extern "C" { +#endif + + +#include "img_defs.h" +#include "img_types.h" +#include "img_elf.h" +#include "pvrsrv_error.h" /* includes pvrsrv_errors.h */ +#include "rgx_bvnc_defs_km.h" +#include "rgx_fw_info.h" +#include "rgx_fwif_shared.h" /* includes rgx_common.h and mem_types.h */ +#include "rgx_meta.h" +#include "rgx_riscv.h" + +#include "rgxdefs_km.h" +/* includes: + * rgx_cr_defs_km.h, + * RGX_BVNC_CORE_KM_HEADER (rgxcore_km_B.V.N.C.h), + * RGX_BNC_CONFIG_KM_HEADER (rgxconfig_km_B.V.N.C.h) + */ + + +/*! +******************************************************************************* + + @Function RGXMemCopy + + @Description MemCopy implementation + + @Input hPrivate : Implementation specific data + @Input pvDst : Pointer to the destination + @Input pvSrc : Pointer to the source location + @Input uiSize : The amount of memory to copy in bytes + + @Return void + +******************************************************************************/ +void RGXMemCopy(const void *hPrivate, + void *pvDst, + void *pvSrc, + size_t uiSize); + +/*! +******************************************************************************* + + @Function RGXMemSet + + @Description MemSet implementation + + @Input hPrivate : Implementation specific data + @Input pvDst : Pointer to the start of the memory region + @Input ui8Value : The value to be written + @Input uiSize : The number of bytes to be set to ui8Value + + @Return void + +******************************************************************************/ +void RGXMemSet(const void *hPrivate, + void *pvDst, + IMG_UINT8 ui8Value, + size_t uiSize); + +/*! +******************************************************************************* + + @Function RGXCommentLog + + @Description Generic log function used for debugging or other purposes + + @Input hPrivate : Implementation specific data + @Input pszString : Message to be printed + @Input ... : Variadic arguments + + @Return void + +******************************************************************************/ +__printf(2, 3) +void RGXCommentLog(const void *hPrivate, + const IMG_CHAR *pszString, + ...); + +/*! +******************************************************************************* + + @Function RGXErrorLog + + @Description Generic error log function used for debugging or other purposes + + @Input hPrivate : Implementation specific data + @Input pszString : Message to be printed + @Input ... : Variadic arguments + + @Return void + +******************************************************************************/ +__printf(2, 3) +void RGXErrorLog(const void *hPrivate, + const IMG_CHAR *pszString, + ...); + +/* This is used to check if a specific feature is enabled. + * Should be used instead of calling RGXDeviceHasFeature. */ +#define RGX_DEVICE_HAS_FEATURE(hPrivate, Feature) \ + RGXDeviceHasFeature(hPrivate, RGX_FEATURE_##Feature##_BIT_MASK) + +/* This is used to check if a specific feature with value is enabled. + * Should be used instead of calling RGXDeviceGetFeatureValue. */ +#define RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, Feature) \ + (RGXDeviceGetFeatureValue(hPrivate, RGX_FEATURE_##Feature##_IDX) >= 0) + +/* This is used to get the value of a specific feature from hPrivate. + * Should be used instead of calling RGXDeviceGetFeatureValue. */ +#define RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, Feature) \ + RGXDeviceGetFeatureValue(hPrivate, RGX_FEATURE_##Feature##_IDX) + +/* This is used to get the value of a specific ERN from hPrivate. + * Should be used instead of calling RGXDeviceHasErnBrn. */ +#define RGX_DEVICE_HAS_ERN(hPrivate, FixNum) \ + RGXDeviceHasErnBrn(hPrivate, HW_##FixNum##_BIT_MASK) + +/* This is used to get the value of a specific BRN from hPrivate. + * Should be used instead of calling RGXDeviceHasErnBrn. */ +#define RGX_DEVICE_HAS_BRN(hPrivate, FixNum) \ + RGXDeviceHasErnBrn(hPrivate, FIX_HW_##FixNum##_BIT_MASK) + +#define CLK_CTRL_FORCE_ON(X, Module) \ + X = (((X) & RGX_CR_##Module##_CLRMSK) | RGX_CR_##Module##_ON) +/*! +******************************************************************************* + + @Function RGXDeviceGetFeatureValue + + @Description Checks if a device has a particular feature with values + + @Input hPrivate : Implementation specific data + @Input ui64Feature : Feature with values to check + + @Return Value >= 0 if the given feature is available, -1 otherwise + +******************************************************************************/ +IMG_INT32 RGXDeviceGetFeatureValue(const void *hPrivate, IMG_UINT64 ui64Feature); + +/*! +******************************************************************************* + + @Function RGXDeviceHasFeature + + @Description Checks if a device has a particular feature + + @Input hPrivate : Implementation specific data + @Input ui64Feature : Feature to check + + @Return IMG_TRUE if the given feature is available, IMG_FALSE otherwise + +******************************************************************************/ +IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature); + +/*! +******************************************************************************* + + @Function RGXDeviceHasErnBrn + + @Description Checks if a device has a particular errata + + @Input hPrivate : Implementation specific data + @Input ui64ErnsBrns : Flags to check + + @Return IMG_TRUE if the given errata is available, IMG_FALSE otherwise + +******************************************************************************/ +IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns); + +/*! +******************************************************************************* + + @Function RGXGetFWCorememSize + + @Description Get the FW coremem size + + @Input hPrivate : Implementation specific data + + @Return FW coremem size + +******************************************************************************/ +IMG_INTERNAL +IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXWriteReg32/64 + + @Description Write a value to a 32/64 bit RGX register + + @Input hPrivate : Implementation specific data + @Input ui32RegAddr : Register offset inside the register bank + @Input ui32/64RegValue : New register value + + @Return void + +******************************************************************************/ +void RGXWriteReg32(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue); + +void RGXWriteReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue); + +/*! +******************************************************************************* + + @Function RGXReadReg32/64 + + @Description Read a 32/64 bit RGX register + + @Input hPrivate : Implementation specific data + @Input ui32RegAddr : Register offset inside the register bank + + @Return Register value + +******************************************************************************/ +IMG_UINT32 RGXReadReg32(const void *hPrivate, + IMG_UINT32 ui32RegAddr); + +IMG_UINT64 RGXReadReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr); + +/*! +******************************************************************************* + + @Function RGXReadModifyWriteReg32 + + @Description Read-modify-write a 32 bit RGX register + + @Input hPrivate : Implementation specific data. + @Input ui32RegAddr : Register offset inside the register bank. + @Input ui32RegValue : New register value. + @Input ui32RegMask : Keep the bits set in the mask. + + @Return Always returns PVRSRV_OK + +******************************************************************************/ +IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue, + IMG_UINT64 ui64RegKeepMask); + +/*! +******************************************************************************* + + @Function RGXPollReg32/64 + + @Description Poll on a 32/64 bit RGX register until some bits are set/unset + + @Input hPrivate : Implementation specific data + @Input ui32RegAddr : Register offset inside the register bank + @Input ui32/64RegValue : Value expected from the register + @Input ui32/64RegMask : Only the bits set in this mask will be + checked against uiRegValue + + @Return PVRSRV_OK if the poll succeeds, + PVRSRV_ERROR_TIMEOUT if the poll takes too long + +******************************************************************************/ +PVRSRV_ERROR RGXPollReg32(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32RegMask); + +PVRSRV_ERROR RGXPollReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue, + IMG_UINT64 ui64RegMask); + +/*! +******************************************************************************* + + @Function RGXWaitCycles + + @Description Wait for a number of GPU cycles and/or microseconds + + @Input hPrivate : Implementation specific data + @Input ui32Cycles : Number of GPU cycles to wait for in pdumps, + it can also be used when running driver-live + if desired (ignoring the next parameter) + @Input ui32WaitUs : Number of microseconds to wait for when running + driver-live + + @Return void + +******************************************************************************/ +void RGXWaitCycles(const void *hPrivate, + IMG_UINT32 ui32Cycles, + IMG_UINT32 ui32WaitUs); + +/*! +******************************************************************************* + + @Function RGXAcquireKernelMMUPC + + @Description Acquire the Kernel MMU Page Catalogue device physical address + + @Input hPrivate : Implementation specific data + @Input psPCAddr : Returned page catalog address + + @Return void + +******************************************************************************/ +void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr); + +/*! +******************************************************************************* + + @Function RGXWriteKernelMMUPC32/64 + + @Description Write the Kernel MMU Page Catalogue to the 32/64 bit + RGX register passed as argument. + In a driver-live scenario without PDump these functions + are the same as RGXWriteReg32/64 and they don't need + to be reimplemented. + + @Input hPrivate : Implementation specific data + @Input ui32PCReg : Register offset inside the register bank + @Input ui32AlignShift : PC register alignshift + @Input ui32Shift : PC register shift + @Input ui32/64PCVal : Page catalog value (aligned and shifted) + + @Return void + +******************************************************************************/ +#if defined(PDUMP) +void RGXWriteKernelMMUPC32(const void *hPrivate, + IMG_UINT32 ui32PCReg, + IMG_UINT32 ui32PCRegAlignShift, + IMG_UINT32 ui32PCRegShift, + IMG_UINT32 ui32PCVal); + +#else /* defined(PDUMP) */ +#define RGXWriteKernelMMUPC32(priv, pcreg, alignshift, shift, pcval) \ + RGXWriteReg32(priv, pcreg, pcval) +#endif /* defined(PDUMP) */ + +/*! +******************************************************************************* + + @Function RGXDoFWSlaveBoot + + @Description Returns whether or not a FW Slave Boot is required + while powering on + + @Input hPrivate : Implementation specific data + + @Return IMG_BOOL + +******************************************************************************/ +IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXFabricCoherencyTest + + @Description Performs fabric coherency test + + @Input hPrivate : Implementation specific data + + @Return PVRSRV_OK if the test succeeds, + PVRSRV_ERROR_INIT_FAILURE if the test fails at some point + +******************************************************************************/ +PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXGetDeviceSLCBanks + + @Description Returns the number of SLC banks used by the device + + @Input hPrivate : Implementation specific data + + @Return Number of SLC banks + +******************************************************************************/ +IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXGetDeviceCacheLineSize + + @Description Returns the device cache line size + + @Input hPrivate : Implementation specific data + + @Return Cache line size + +******************************************************************************/ +IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXAcquireBootCodeAddr + + @Description Acquire the device virtual address of the RISCV boot code + + @Input hPrivate : Implementation specific data + @Output psBootCodeAddr : Boot code base address + + @Return void + +******************************************************************************/ +void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr); + +/*! +******************************************************************************* + + @Function RGXAcquireBootDataAddr + + @Description Acquire the device virtual address of the RISCV boot data + + @Input hPrivate : Implementation specific data + @Output psBootDataAddr : Boot data base address + + @Return void + +******************************************************************************/ +void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr); + +/*! +******************************************************************************* + + @Function RGXDeviceAckIrq + + @Description Checks the implementation specific IRQ status register, + clearing it if necessary and returning the IRQ status. + + @Input hPrivate : Implementation specific data + + @Return: IRQ status + +******************************************************************************/ +IMG_BOOL RGXDeviceAckIrq(const void *hPrivate); + +#if defined(__cplusplus) +} +#endif + +#endif /* RGXLAYER_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxlayer_impl.c b/drivers/gpu/drm/phytium/octopus/rgxlayer_impl.c new file mode 100644 index 000000000000..647653ae6bc6 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxlayer_impl.c @@ -0,0 +1,967 @@ +/*************************************************************************/ /*! +@File +@Title DDK implementation of the Services abstraction layer +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description DDK implementation of the Services abstraction layer +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxlayer_impl.h" +#include "osfunc.h" +#include "pdump_km.h" +#include "rgxfwutils.h" +#include "cache_km.h" + +#if defined(PDUMP) +#include +#endif + +void RGXMemCopy(const void *hPrivate, + void *pvDst, + void *pvSrc, + size_t uiSize) +{ + PVR_UNREFERENCED_PARAMETER(hPrivate); + OSDeviceMemCopy(pvDst, pvSrc, uiSize); +} + +void RGXMemSet(const void *hPrivate, + void *pvDst, + IMG_UINT8 ui8Value, + size_t uiSize) +{ + PVR_UNREFERENCED_PARAMETER(hPrivate); + OSDeviceMemSet(pvDst, ui8Value, uiSize); +} + +void RGXCommentLog(const void *hPrivate, + const IMG_CHAR *pszString, + ...) +{ +#if defined(PDUMP) + va_list argList; + va_start(argList, pszString); + PDumpCommentWithFlagsVA(PDUMP_FLAGS_CONTINUOUS, pszString, argList); + va_end(argList); + PVR_UNREFERENCED_PARAMETER(hPrivate); +#else + PVR_UNREFERENCED_PARAMETER(hPrivate); + PVR_UNREFERENCED_PARAMETER(pszString); +#endif +} + +void RGXErrorLog(const void *hPrivate, + const IMG_CHAR *pszString, + ...) +{ + IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; + va_list argList; + + PVR_UNREFERENCED_PARAMETER(hPrivate); + + va_start(argList, pszString); + vsnprintf(szBuffer, sizeof(szBuffer), pszString, argList); + va_end(argList); + + PVR_DPF((PVR_DBG_ERROR, "%s", szBuffer)); +} + +IMG_INT32 RGXDeviceGetFeatureValue(const void *hPrivate, IMG_UINT64 ui64Feature) +{ + IMG_INT32 i32Ret = -1; + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVR_ASSERT(hPrivate != NULL); + + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + psDeviceNode = psDevInfo->psDeviceNode; + + if ((psDeviceNode->pfnGetDeviceFeatureValue)) + { + i32Ret = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, ui64Feature); + } + + return i32Ret; +} + +IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + return (psDevInfo->sDevFeatureCfg.ui64Features & ui64Feature) != 0; +} + +IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) + { + return RGX_GET_FEATURE_VALUE(psDevInfo, META_COREMEM_SIZE); + } + return 0; +} + +void RGXWriteReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + { + OSWriteHWReg32(pvRegsBase, ui32RegAddr, ui32RegValue); + } + + PDUMPREG32(RGX_PDUMPREG_NAME, ui32RegAddr, ui32RegValue, psParams->ui32PdumpFlags); +} + +void RGXWriteReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT64 ui64RegValue) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + { + OSWriteHWReg64(pvRegsBase, ui32RegAddr, ui64RegValue); + } + + PDUMPREG64(RGX_PDUMPREG_NAME, ui32RegAddr, ui64RegValue, psParams->ui32PdumpFlags); +} + +IMG_UINT32 RGXReadReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + IMG_UINT32 ui32RegValue; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) + { + ui32RegValue = IMG_UINT32_MAX; + } + else +#endif + { + ui32RegValue = OSReadHWReg32(pvRegsBase, ui32RegAddr); + } + + PDUMPREGREAD32(RGX_PDUMPREG_NAME, ui32RegAddr, psParams->ui32PdumpFlags); + + return ui32RegValue; +} + +IMG_UINT64 RGXReadReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + IMG_UINT64 ui64RegValue; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) + { + ui64RegValue = IMG_UINT64_MAX; + } + else +#endif + { + ui64RegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr); + } + + PDUMPREGREAD64(RGX_PDUMPREG_NAME, ui32RegAddr, PDUMP_FLAGS_CONTINUOUS); + + return ui64RegValue; +} + +IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 uiRegValueNew, + IMG_UINT64 uiRegKeepMask) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + + /* only use the new values for bits we update according to the keep mask */ + uiRegValueNew &= ~uiRegKeepMask; + +#if defined(PDUMP) + /* Store register offset to temp PDump variable */ + PDumpRegRead64ToInternalVar(RGX_PDUMPREG_NAME, ":SYSMEM:$1", ui32RegAddr, PDUMP_FLAGS_CONTINUOUS); + + /* Keep the bits set in the mask */ + PDumpWriteVarANDValueOp(":SYSMEM:$1", uiRegKeepMask, PDUMP_FLAGS_CONTINUOUS); + + /* OR the new values */ + PDumpWriteVarORValueOp(":SYSMEM:$1", uiRegValueNew, PDUMP_FLAGS_CONTINUOUS); + + /* Do the actual register write */ + PDumpInternalVarToReg64(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", 0); + + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + + { + IMG_UINT64 uiRegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr); + uiRegValue &= uiRegKeepMask; + OSWriteHWReg64(pvRegsBase, ui32RegAddr, uiRegValue | uiRegValueNew); + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXPollReg32(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32RegMask) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + { + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr), + ui32RegValue, + ui32RegMask, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPollReg32: Poll for Reg (0x%x) failed", ui32RegAddr)); + return PVRSRV_ERROR_TIMEOUT; + } + } + + PDUMPREGPOL(RGX_PDUMPREG_NAME, + ui32RegAddr, + ui32RegValue, + ui32RegMask, + psParams->ui32PdumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXPollReg64(const void *hPrivate, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue, + IMG_UINT64 ui64RegMask) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + void __iomem *pvRegsBase; + + /* Split lower and upper words */ + IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64RegValue >> 32); + IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64RegValue); + IMG_UINT32 ui32UpperMask = (IMG_UINT32) (ui64RegMask >> 32); + IMG_UINT32 ui32LowerMask = (IMG_UINT32) (ui64RegMask); + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + pvRegsBase = psDevInfo->pvRegsBaseKM; + +#if defined(PDUMP) + if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) +#endif + { + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr + 4), + ui32UpperValue, + ui32UpperMask, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr)); + return PVRSRV_ERROR_TIMEOUT; + } + + if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, + (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr), + ui32LowerValue, + ui32LowerMask, + POLL_FLAG_LOG_ERROR) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for lower part of Reg (0x%x) failed", ui32RegAddr)); + return PVRSRV_ERROR_TIMEOUT; + } + } + + PDUMPREGPOL(RGX_PDUMPREG_NAME, + ui32RegAddr + 4, + ui32UpperValue, + ui32UpperMask, + psParams->ui32PdumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + + + PDUMPREGPOL(RGX_PDUMPREG_NAME, + ui32RegAddr, + ui32LowerValue, + ui32LowerMask, + psParams->ui32PdumpFlags, + PDUMP_POLL_OPERATOR_EQUAL); + + return PVRSRV_OK; +} + +void RGXWaitCycles(const void *hPrivate, IMG_UINT32 ui32Cycles, IMG_UINT32 ui32TimeUs) +{ + PVR_UNREFERENCED_PARAMETER(hPrivate); + OSWaitus(ui32TimeUs); + PDUMPIDLWITHFLAGS(ui32Cycles, PDUMP_FLAGS_CONTINUOUS); +} + +void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr) +{ + PVR_ASSERT(hPrivate != NULL); + *psPCAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sPCAddr; +} + +#if defined(PDUMP) +void RGXWriteKernelMMUPC32(const void *hPrivate, + IMG_UINT32 ui32PCReg, + IMG_UINT32 ui32PCRegAlignShift, + IMG_UINT32 ui32PCRegShift, + IMG_UINT32 ui32PCVal) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + /* Write the cat-base address */ + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32PCReg, ui32PCVal); + + /* Pdump catbase address */ + MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx, + RGX_PDUMPREG_NAME, + ui32PCReg, + 4, + ui32PCRegAlignShift, + ui32PCRegShift, + PDUMP_FLAGS_CONTINUOUS); +} +#endif /* defined(PDUMP) */ + +#define MAX_NUM_COHERENCY_TESTS (10) +IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + if (psDevInfo->ui32CoherencyTestsDone >= MAX_NUM_COHERENCY_TESTS) + { + return IMG_FALSE; + } + + psDeviceNode = psDevInfo->psDeviceNode; +#if !defined(NO_HARDWARE) + return (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && + PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)); +#else + return IMG_FALSE; +#endif +} + +/* + * The fabric coherency test is performed when platform supports fabric coherency + * either in the form of ACE-lite or Full-ACE. This test is done quite early + * with the firmware processor quiescent and makes exclusive use of the slave + * port interface for reading/writing through the device memory hierarchy. The + * rationale for the test is to ensure that what the CPU writes to its dcache + * is visible to the GPU via coherency snoop miss/hit and vice-versa without + * any intervening cache maintenance by the writing agent. + */ +PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT32 *pui32FabricCohTestBufferCpuVA = NULL; + IMG_UINT32 *pui32FabricCohCcTestBufferCpuVA = NULL; + IMG_UINT32 *pui32FabricCohNcTestBufferCpuVA = NULL; + DEVMEM_MEMDESC *psFabricCohTestBufferMemDesc = NULL; + DEVMEM_MEMDESC *psFabricCohCcTestBufferMemDesc = NULL; + DEVMEM_MEMDESC *psFabricCohNcTestBufferMemDesc = NULL; + RGXFWIF_DEV_VIRTADDR sFabricCohCcTestBufferDevVA; + RGXFWIF_DEV_VIRTADDR sFabricCohNcTestBufferDevVA; + RGXFWIF_DEV_VIRTADDR *psFabricCohTestBufferDevVA = NULL; + IMG_DEVMEM_SIZE_T uiFabricCohTestBlockSize = sizeof(IMG_UINT64); + IMG_DEVMEM_ALIGN_T uiFabricCohTestBlockAlign = sizeof(IMG_UINT64); + IMG_UINT64 ui64SegOutAddrTopCached = 0; + IMG_UINT64 ui64SegOutAddrTopUncached = 0; + IMG_UINT32 ui32OddEven; + IMG_UINT32 ui32OddEvenSeed = 1; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BOOL bFullTestPassed = IMG_TRUE; + IMG_BOOL bSubTestPassed = IMG_FALSE; + IMG_BOOL bExit = IMG_FALSE; + enum TEST_TYPE { + CPU_WRITE_GPU_READ_SM=0, GPU_WRITE_CPU_READ_SM, + CPU_WRITE_GPU_READ_SH, GPU_WRITE_CPU_READ_SH + } eTestType; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + PVR_LOG(("Starting fabric coherency test .....")); + + /* Size and align are 'expanded' because we request an export align allocation */ + eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap), + &uiFabricCohTestBlockSize, + &uiFabricCohTestBlockAlign); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevmemExportalignAdjustSizeAndAlign() error: %s, exiting", + PVRSRVGetErrorString(eError))); + goto e0; + } + + /* Allocate, acquire cpu address and set firmware address for cc=1 buffer */ + eError = DevmemFwAllocateExportable(psDevInfo->psDeviceNode, + uiFabricCohTestBlockSize, + uiFabricCohTestBlockAlign, + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | + PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), + "FwExFabricCoherencyCcTestBuffer", + &psFabricCohCcTestBufferMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevmemFwAllocateExportable() error: %s, exiting", + PVRSRVGetErrorString(eError))); + goto e0; + } + + eError = DevmemAcquireCpuVirtAddr(psFabricCohCcTestBufferMemDesc, (void **) &pui32FabricCohCcTestBufferCpuVA); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevmemAcquireCpuVirtAddr() error: %s, exiting", + PVRSRVGetErrorString(eError))); + goto e1; + } + + /* Create a FW address which is uncached in the Meta DCache and in the SLC using the Meta bootloader segment. + This segment is the only one configured correctly out of reset (when this test is meant to be executed) */ + eError = RGXSetFirmwareAddress(&sFabricCohCcTestBufferDevVA, + psFabricCohCcTestBufferMemDesc, + 0, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", e2); + + /* Undo most of the FW mappings done by RGXSetFirmwareAddress */ + sFabricCohCcTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_META_CACHE_MASK; + sFabricCohCcTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK; + sFabricCohCcTestBufferDevVA.ui32Addr -= RGXFW_SEGMMU_DATA_BASE_ADDRESS; + + /* Map the buffer in the bootloader segment as uncached */ + sFabricCohCcTestBufferDevVA.ui32Addr |= RGXFW_BOOTLDR_META_ADDR; + sFabricCohCcTestBufferDevVA.ui32Addr |= RGXFW_SEGMMU_DATA_META_UNCACHED; + + /* Allocate, acquire cpu address and set firmware address for cc=0 buffer */ + eError = DevmemFwAllocateExportable(psDevInfo->psDeviceNode, + uiFabricCohTestBlockSize, + uiFabricCohTestBlockAlign, + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), + "FwExFabricCoherencyNcTestBuffer", + &psFabricCohNcTestBufferMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevmemFwAllocateExportable() error: %s, exiting", + PVRSRVGetErrorString(eError))); + goto e3; + } + + eError = DevmemAcquireCpuVirtAddr(psFabricCohNcTestBufferMemDesc, (void **) &pui32FabricCohNcTestBufferCpuVA); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "DevmemAcquireCpuVirtAddr() error: %s, exiting", + PVRSRVGetErrorString(eError))); + goto e4; + } + + eError = RGXSetFirmwareAddress(&sFabricCohNcTestBufferDevVA, + psFabricCohNcTestBufferMemDesc, + 0, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", e5); + + /* Undo most of the FW mappings done by RGXSetFirmwareAddress */ + sFabricCohNcTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_META_CACHE_MASK; + sFabricCohNcTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK; + sFabricCohNcTestBufferDevVA.ui32Addr -= RGXFW_SEGMMU_DATA_BASE_ADDRESS; + + /* Map the buffer in the bootloader segment as uncached */ + sFabricCohNcTestBufferDevVA.ui32Addr |= RGXFW_BOOTLDR_META_ADDR; + sFabricCohNcTestBufferDevVA.ui32Addr |= RGXFW_SEGMMU_DATA_META_UNCACHED; + + /* Obtain the META segment addresses corresponding to cached and uncached windows into SLC */ + ui64SegOutAddrTopCached = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(MMU_CONTEXT_MAPPING_FWIF); + ui64SegOutAddrTopUncached = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(MMU_CONTEXT_MAPPING_FWIF); + + /* At the top level, we perform snoop-miss (i.e. to verify slave port) & snoop-hit (i.e. to verify ACE) test. + NOTE: For now, skip snoop-miss test as Services currently forces all firmware allocations to be coherent */ + for (eTestType = CPU_WRITE_GPU_READ_SH; eTestType <= GPU_WRITE_CPU_READ_SH && bExit == IMG_FALSE; eTestType++) + { + IMG_CPU_PHYADDR sCpuPhyAddr; + IMG_BOOL bValid; + PMR *psPMR; + + if (eTestType == CPU_WRITE_GPU_READ_SM) + { + /* All snoop miss test must bypass the SLC, here memory is region of coherence so + configure META to use SLC bypass cache policy for the bootloader segment. Note + this cannot be done on a cache-coherent (i.e. CC=1) VA, as this violates ACE + standard as one cannot issue a non-coherent request into the bus fabric for + an allocation's VA that is cache-coherent in SLC, so use non-coherent buffer */ + RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6), + (ui64SegOutAddrTopUncached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32); + pui32FabricCohTestBufferCpuVA = pui32FabricCohNcTestBufferCpuVA; + psFabricCohTestBufferMemDesc = psFabricCohNcTestBufferMemDesc; + psFabricCohTestBufferDevVA = &sFabricCohNcTestBufferDevVA; + } + else if (eTestType == CPU_WRITE_GPU_READ_SH) + { + /* All snoop hit test must obviously use SLC, here SLC is region of coherence so + configure META not to bypass the SLC for the bootloader segment */ + RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6), + (ui64SegOutAddrTopCached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32); + pui32FabricCohTestBufferCpuVA = pui32FabricCohCcTestBufferCpuVA; + psFabricCohTestBufferMemDesc = psFabricCohCcTestBufferMemDesc; + psFabricCohTestBufferDevVA = &sFabricCohCcTestBufferDevVA; + } + + if (eTestType == GPU_WRITE_CPU_READ_SH && + !PVRSRVSystemSnoopingOfDeviceCache(psDevInfo->psDeviceNode->psDevConfig)) + { + /* Cannot perform this test if there is no snooping of device cache */ + continue; + } + + /* Acquire underlying PMR CpuPA in preparation for cache maintenance */ + (void) DevmemLocalGetImportHandle(psFabricCohTestBufferMemDesc, (void**)&psPMR); + eError = PMR_CpuPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sCpuPhyAddr, &bValid); + if (eError != PVRSRV_OK || bValid == IMG_FALSE) + { + PVR_DPF((PVR_DBG_ERROR, + "PMR_CpuPhysAddr error: %s, exiting", + PVRSRVGetErrorString(eError))); + bExit = IMG_TRUE; + continue; + } + + /* Here we do two passes mostly to account for the effects of using a different + seed (i.e. ui32OddEvenSeed) value to read and write */ + for (ui32OddEven = 1; ui32OddEven < 3 && bExit == IMG_FALSE; ui32OddEven++) + { + IMG_UINT32 i; + + /* Do multiple sub-dword cache line tests */ + for (i = 0; i < 2 && bExit == IMG_FALSE; i++) + { + IMG_UINT32 ui32FWAddr; + IMG_UINT32 ui32FWValue; + IMG_UINT32 ui32FWValue2; + IMG_UINT32 ui32LastFWValue = ~0; + IMG_UINT32 ui32Offset = i * sizeof(IMG_UINT32); + + /* Calculate next address and seed value to write/read from slave-port */ + ui32FWAddr = psFabricCohTestBufferDevVA->ui32Addr + ui32Offset; + ui32OddEvenSeed += 1; + + if (eTestType == GPU_WRITE_CPU_READ_SM || eTestType == GPU_WRITE_CPU_READ_SH) + { + /* Clean dcache to ensure there is no stale data in dcache that might over-write + what we are about to write via slave-port here because if it drains from the CPU + dcache before we read it, it would corrupt what we are going to read back via + the CPU */ + CacheOpValExec(psPMR, 0, ui32Offset, sizeof(IMG_UINT32), PVRSRV_CACHE_OP_CLEAN); + + /* Calculate a new value to write */ + ui32FWValue = i + ui32OddEvenSeed; + + /* Write the value using the RGX slave-port interface */ + eError = RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32FWValue); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXWriteMETAAddr error: %s, exiting", + PVRSRVGetErrorString(eError))); + bExit = IMG_TRUE; + continue; + } + + /* Read back value using RGX slave-port interface, this is used + as a sort of memory barrier for the above write */ + eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, &ui32FWValue2); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXReadMETAAddr error: %s, exiting", + PVRSRVGetErrorString(eError))); + bExit = IMG_TRUE; + continue; + } + else if (ui32FWValue != ui32FWValue2) + { + //IMG_UINT32 ui32FWValue3; + //RGXReadMETAAddr(psDevInfo, 0xC1F00000, &ui32FWValue3); + + /* Fatal error, we should abort */ + PVR_DPF((PVR_DBG_ERROR, + "At Offset: %d, RAW via SlavePort failed: expected: %x, got: %x", + i, + ui32FWValue, + ui32FWValue2)); + eError = PVRSRV_ERROR_INIT_FAILURE; + bExit = IMG_TRUE; + continue; + } + + if (!PVRSRVSystemSnoopingOfDeviceCache(psDevInfo->psDeviceNode->psDevConfig)) + { + /* Invalidate dcache to ensure that any prefetched data by the CPU from this memory + region is discarded before we read (i.e. next read must trigger a cache miss). + If there is snooping of device cache, then any prefetching done by the CPU + will reflect the most up to date datum writing by GPU into said location, + that is to say prefetching must be coherent so CPU d-flush is not needed */ + CacheOpValExec(psPMR, 0, ui32Offset, sizeof(IMG_UINT32), PVRSRV_CACHE_OP_INVALIDATE); + } + } + else + { + IMG_UINT32 ui32RAWCpuValue; + + /* Ensures line is in dcache */ + ui32FWValue = IMG_UINT32_MAX; + + /* Dirty allocation in dcache */ + ui32RAWCpuValue = i + ui32OddEvenSeed; + pui32FabricCohTestBufferCpuVA[i] = i + ui32OddEvenSeed; + + /* Flush possible cpu store-buffer(ing) on LMA */ + OSWriteMemoryBarrier(); + + switch (eTestType) + { + case CPU_WRITE_GPU_READ_SM: + /* Flush dcache to force subsequent incoming CPU-bound snoop to miss so + memory is coherent before the SlavePort reads */ + CacheOpValExec(psPMR, 0, ui32Offset, sizeof(IMG_UINT32), PVRSRV_CACHE_OP_FLUSH); + break; + default: + break; + } + + /* Read back value using RGX slave-port interface */ + eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, &ui32FWValue); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXReadWithSP error: %s, exiting", + PVRSRVGetErrorString(eError))); + bExit = IMG_TRUE; + continue; + } + + /* Being mostly paranoid here, verify that CPU RAW operation is valid + after the above slave port read */ + CacheOpValExec(psPMR, 0, ui32Offset, sizeof(IMG_UINT32), PVRSRV_CACHE_OP_INVALIDATE); + if (pui32FabricCohTestBufferCpuVA[i] != ui32RAWCpuValue) + { + /* Fatal error, we should abort */ + PVR_DPF((PVR_DBG_ERROR, + "At Offset: %d, RAW by CPU failed: expected: %x, got: %x", + i, + ui32RAWCpuValue, + pui32FabricCohTestBufferCpuVA[i])); + eError = PVRSRV_ERROR_INIT_FAILURE; + bExit = IMG_TRUE; + continue; + } + } + + /* Compare to see if sub-test passed */ + if (pui32FabricCohTestBufferCpuVA[i] == ui32FWValue) + { + bSubTestPassed = IMG_TRUE; + } + else + { + bSubTestPassed = IMG_FALSE; + bFullTestPassed = IMG_FALSE; + eError = PVRSRV_ERROR_INIT_FAILURE; + if (ui32LastFWValue != ui32FWValue) + { +#if defined(DEBUG) + PVR_LOG(("At Offset: %d, Expected: %x, Got: %x", + i, + (eTestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i], + (eTestType & 0x1) ? pui32FabricCohTestBufferCpuVA[i] : ui32FWValue)); +#endif + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "test encountered unexpected error, exiting")); + eError = PVRSRV_ERROR_INIT_FAILURE; + bExit = IMG_TRUE; + continue; + } + } + + ui32LastFWValue = (eTestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i]; + } + +#if defined(DEBUG) + bSubTestPassed = bExit ? IMG_FALSE : bSubTestPassed; + switch (eTestType) + { + case CPU_WRITE_GPU_READ_SM: + PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: completed [run #%u]: %s", + ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); + break; + case GPU_WRITE_CPU_READ_SM: + PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: completed [run #%u]: %s", + ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); + break; + case CPU_WRITE_GPU_READ_SH: + PVR_LOG(("CPU:Write/GPU:Read Snoop Hit Test: completed [run #%u]: %s", + ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); + break; + case GPU_WRITE_CPU_READ_SH: + PVR_LOG(("GPU:Write/CPU:Read Snoop Hit Test: completed [run #%u]: %s", + ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); + break; + default: + PVR_LOG(("Internal error, exiting test")); + eError = PVRSRV_ERROR_INIT_FAILURE; + bExit = IMG_TRUE; + continue; + } +#endif + } + } + + /* Release and free NC/CC test buffers */ + RGXUnsetFirmwareAddress(psFabricCohCcTestBufferMemDesc); +e5: + DevmemReleaseCpuVirtAddr(psFabricCohCcTestBufferMemDesc); +e4: + DevmemFwUnmapAndFree(psDevInfo, psFabricCohCcTestBufferMemDesc); + +e3: + RGXUnsetFirmwareAddress(psFabricCohNcTestBufferMemDesc); +e2: + DevmemReleaseCpuVirtAddr(psFabricCohNcTestBufferMemDesc); +e1: + DevmemFwUnmapAndFree(psDevInfo, psFabricCohNcTestBufferMemDesc); + +e0: + /* Restore bootloader segment settings */ + RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6), + (ui64SegOutAddrTopCached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32); + + bFullTestPassed = bExit ? IMG_FALSE: bFullTestPassed; + if (bFullTestPassed) + { + PVR_LOG(("fabric coherency test: PASSED")); + psDevInfo->ui32CoherencyTestsDone = MAX_NUM_COHERENCY_TESTS + 1; + } + else + { + PVR_LOG(("fabric coherency test: FAILED")); + psDevInfo->ui32CoherencyTestsDone++; + } + + return eError; +} + +IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + return (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & ui64ErnsBrns) != 0; +} + +IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS)) + { + return 0; + } + return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS); +} + +IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)) + { + return 0; + } + return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS); +} + +void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + *psBootCodeAddr = psDevInfo->sFWCodeDevVAddrBase; +} + +void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; + + *psBootDataAddr = psDevInfo->sFWDataDevVAddrBase; +} + +IMG_BOOL RGXDeviceAckIrq(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_ASSERT(hPrivate != NULL); + psParams = (RGX_LAYER_PARAMS*)hPrivate; + psDevInfo = psParams->psDevInfo; + + return (psDevInfo->pfnRGXAckIrq != NULL) ? + psDevInfo->pfnRGXAckIrq(psDevInfo) : IMG_TRUE; +} diff --git a/drivers/gpu/drm/phytium/octopus/rgxlayer_impl.h b/drivers/gpu/drm/phytium/octopus/rgxlayer_impl.h new file mode 100644 index 000000000000..0b2bb24a39ac --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxlayer_impl.h @@ -0,0 +1,61 @@ +/*************************************************************************/ /*! +@File +@Title Header for DDK implementation of the Services abstraction layer +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for DDK implementation of the Services abstraction layer +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXLAYER_IMPL_H) +#define RGXLAYER_IMPL_H + +#include "rgxlayer.h" +#include "device_connection.h" + +typedef struct _RGX_LAYER_PARAMS_ +{ + void *psDevInfo; + void *psDevConfig; +#if defined(PDUMP) + IMG_UINT32 ui32PdumpFlags; +#endif + + IMG_DEV_PHYADDR sPCAddr; +} RGX_LAYER_PARAMS; + +#endif /* RGXLAYER_IMPL_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxmem.c b/drivers/gpu/drm/phytium/octopus/rgxmem.c new file mode 100644 index 000000000000..257d893c3d34 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxmem.c @@ -0,0 +1,936 @@ +/*************************************************************************/ /*! +@File +@Title RGX memory context management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX memory context management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvr_debug.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_server_utils.h" +#include "devicemem_pdump.h" +#include "rgxdevice.h" +#include "rgx_fwif_km.h" +#include "rgxfwutils.h" +#include "pdump_km.h" +#include "pdump_physmem.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" +#include "sync_internal.h" +#include "rgx_memallocflags.h" +#include "rgx_bvnc_defs_km.h" +#include "info_page.h" + +#if defined(PDUMP) +#include "sync.h" +#endif + +typedef struct SERVER_MMU_CONTEXT_TAG +{ + DEVMEM_MEMDESC *psFWMemContextMemDesc; + PRGXFWIF_FWMEMCONTEXT sFWMemContextDevVirtAddr; + MMU_CONTEXT *psMMUContext; + IMG_PID uiPID; + IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME]; + IMG_UINT64 ui64FBSCEntryMask; + DLLIST_NODE sNode; + PVRSRV_RGXDEV_INFO *psDevInfo; +} SERVER_MMU_CONTEXT; + +PVRSRV_ERROR RGXSLCFlushRange(PVRSRV_DEVICE_NODE *psDeviceNode, + MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiSize, + IMG_BOOL bInvalidate) +{ + PVRSRV_ERROR eError; + DLLIST_NODE *psNode, *psNext; + RGXFWIF_KCCB_CMD sFlushInvalCmd; + SERVER_MMU_CONTEXT *psServerMMUContext = NULL; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32kCCBCommandSlot; + + OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock); + + dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) + { + SERVER_MMU_CONTEXT *psIter = IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); + if (psIter->psMMUContext == psMMUContext) + { + psServerMMUContext = psIter; + } + } + + OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock); + + if (! psServerMMUContext) + { + return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND; + } + + /* Schedule the SLC flush command */ +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate"); +#endif + sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = bInvalidate; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Size = uiSize; + sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Address = sDevVAddr.uiAddr; + eError = RGXGetFWCommonContextAddrFromServerMMUCtx(psDevInfo, + psServerMMUContext, + &sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext); + if (eError != PVRSRV_OK) + { + return eError; + } + + eError = RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, + &sFlushInvalCmd, + PDUMP_FLAGS_CONTINUOUS, + &ui32kCCBCommandSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXSLCFlush: Failed to schedule SLC flush command with error (%u)", + eError)); + } + else + { + /* Wait for the SLC flush to complete */ + eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXSLCFlush: SLC flush and invalidate aborted with error (%u)", + eError)); + } + } + + return eError; +} + +PVRSRV_ERROR RGXInvalidateFBSCTable(PVRSRV_DEVICE_NODE *psDeviceNode, + MMU_CONTEXT *psMMUContext, + IMG_UINT64 ui64FBSCEntryMask) +{ + DLLIST_NODE *psNode, *psNext; + SERVER_MMU_CONTEXT *psServerMMUContext = NULL; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock); + + dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) + { + SERVER_MMU_CONTEXT *psIter = IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); + if (psIter->psMMUContext == psMMUContext) + { + psServerMMUContext = psIter; + } + } + + OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock); + + if (! psServerMMUContext) + { + return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND; + } + + /* Accumulate the FBSC invalidate request */ + psServerMMUContext->ui64FBSCEntryMask |= ui64FBSCEntryMask; + + return PVRSRV_OK; +} + +/* + * RGXExtractFBSCEntryMaskFromMMUContext + * + */ +PVRSRV_ERROR RGXExtractFBSCEntryMaskFromMMUContext(PVRSRV_DEVICE_NODE *psDeviceNode, + SERVER_MMU_CONTEXT *psServerMMUContext, + IMG_UINT64 *pui64FBSCEntryMask) +{ + if (!psServerMMUContext) + { + return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND; + } + + *pui64FBSCEntryMask = psServerMMUContext->ui64FBSCEntryMask; + psServerMMUContext->ui64FBSCEntryMask = 0; + + return PVRSRV_OK; +} + +void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode, + MMU_CONTEXT *psMMUContext, + MMU_LEVEL eMMULevel, + IMG_BOOL bUnmap) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + IMG_UINT32 ui32NewCacheFlags; + + PVR_UNREFERENCED_PARAMETER(bUnmap); + + switch (eMMULevel) + { + case MMU_LEVEL_3: + ui32NewCacheFlags = RGXFWIF_MMUCACHEDATA_FLAGS_PC; + + break; + case MMU_LEVEL_2: + ui32NewCacheFlags = RGXFWIF_MMUCACHEDATA_FLAGS_PD; + + break; + case MMU_LEVEL_1: + ui32NewCacheFlags = RGXFWIF_MMUCACHEDATA_FLAGS_PT; + + if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT))) + { + ui32NewCacheFlags |= RGXFWIF_MMUCACHEDATA_FLAGS_TLB; + } + + break; + default: + ui32NewCacheFlags = 0; + PVR_ASSERT(0); + + break; + } + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) + { + MMU_AppendCacheFlags(psMMUContext, ui32NewCacheFlags); + } + else + { + MMU_AppendCacheFlags(psDevInfo->psKernelMMUCtx, ui32NewCacheFlags); + } +} + +static inline void _GetAndResetCacheOpsPending(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 *pui32FWCacheFlags) +{ + /* + * Atomically exchange flags and 0 to ensure we never accidentally read + * state inconsistently or overwrite valid cache flags with 0. + */ + *pui32FWCacheFlags = MMU_ExchangeCacheFlags(psDevInfo->psKernelMMUCtx, 0); +} + +static +PVRSRV_ERROR _PrepareAndSubmitCacheCommand(PVRSRV_DEVICE_NODE *psDeviceNode, + RGXFWIF_DM eDM, + IMG_UINT32 ui32CacheFlags, + IMG_BOOL bInterrupt, + IMG_UINT32 *pui32MMUInvalidateUpdate) +{ + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sFlushCmd; + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + + *pui32MMUInvalidateUpdate = psDeviceNode->ui32NextMMUInvalidateUpdate++; + + /* Setup cmd and add the device nodes sync object */ + sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_MMUCACHE; + sFlushCmd.uCmdData.sMMUCacheData.ui32MMUCacheSyncUpdateValue = *pui32MMUInvalidateUpdate; + SyncPrimGetFirmwareAddr(psDeviceNode->psMMUCacheSyncPrim, + &sFlushCmd.uCmdData.sMMUCacheData.sMMUCacheSync.ui32Addr); + + /* Indicate the firmware should signal command completion to the host */ + if (bInterrupt) + { + ui32CacheFlags |= RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT; + } + + sFlushCmd.uCmdData.sMMUCacheData.ui32CacheFlags = ui32CacheFlags; + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Submit MMU flush and invalidate (flags = 0x%08x)", + ui32CacheFlags); +#endif + + /* Schedule MMU cache command */ + eError = RGXSendCommand(psDevInfo, + &sFlushCmd, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule MMU cache command to " + "DM=%d with error (%u)", + __func__, eDM, eError)); + psDeviceNode->ui32NextMMUInvalidateUpdate--; + } + + return eError; +} + +PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 *pui32MMUInvalidateUpdate) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32FWCacheFlags; + + eError = PVRSRVPowerLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto RGXMMUCacheInvalidateKick_exit; + } + + _GetAndResetCacheOpsPending(psDeviceNode->pvDevice, &ui32FWCacheFlags); + if (ui32FWCacheFlags == 0) + { + /* Nothing to do if no cache ops pending */ + eError = PVRSRV_OK; + goto _PowerUnlockAndReturnErr; + } + + /* Ensure device is powered up before sending cache command */ + PDUMPPOWCMDSTART(); + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON, + IMG_FALSE, IMG_FALSE); + PDUMPPOWCMDEND(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition RGX to ON (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto _PowerUnlockAndReturnErr; + } + + eError = _PrepareAndSubmitCacheCommand(psDeviceNode, RGXFWIF_DM_GP, ui32FWCacheFlags, + IMG_TRUE, pui32MMUInvalidateUpdate); + if (eError != PVRSRV_OK) + { + /* failed to submit cache operations, return failure */ + goto _PowerUnlockAndReturnErr; + } + +_PowerUnlockAndReturnErr: + PVRSRVPowerUnlock(psDeviceNode); + +RGXMMUCacheInvalidateKick_exit: + return eError; +} + +PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_DM eDM, + IMG_UINT32 *pui32MMUInvalidateUpdate) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; + IMG_UINT32 ui32FWCacheFlags; + + /* Caller should ensure that power lock is held before calling this function */ + PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock)); + + _GetAndResetCacheOpsPending(psDeviceNode->pvDevice, &ui32FWCacheFlags); + if (ui32FWCacheFlags == 0) + { + /* Nothing to do if no cache ops pending */ + return PVRSRV_OK; + } + + return _PrepareAndSubmitCacheCommand(psDeviceNode, eDM, ui32FWCacheFlags, + IMG_FALSE, pui32MMUInvalidateUpdate); +} + +/* page fault debug is the only current use case for needing to find process info + * after that process device memory context has been destroyed + */ + +typedef struct _UNREGISTERED_MEMORY_CONTEXT_ +{ + IMG_PID uiPID; + IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME]; + IMG_DEV_PHYADDR sPCDevPAddr; +} UNREGISTERED_MEMORY_CONTEXT; + +/* must be a power of two */ +#define UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE (1 << 3) + +static UNREGISTERED_MEMORY_CONTEXT gasUnregisteredMemCtxs[UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE]; +static IMG_UINT32 gui32UnregisteredMemCtxsHead; + +/* record a device memory context being unregistered. + * the list of unregistered contexts can be used to find the PID and process name + * belonging to a memory context which has been destroyed + */ +static void _RecordUnregisteredMemoryContext(PVRSRV_RGXDEV_INFO *psDevInfo, SERVER_MMU_CONTEXT *psServerMMUContext) +{ + UNREGISTERED_MEMORY_CONTEXT *psRecord; + + OSLockAcquire(psDevInfo->hMMUCtxUnregLock); + + psRecord = &gasUnregisteredMemCtxs[gui32UnregisteredMemCtxsHead]; + + gui32UnregisteredMemCtxsHead = (gui32UnregisteredMemCtxsHead + 1) + & (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1); + + OSLockRelease(psDevInfo->hMMUCtxUnregLock); + + psRecord->uiPID = psServerMMUContext->uiPID; + if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &psRecord->sPCDevPAddr) != PVRSRV_OK) + { + PVR_LOG(("_RecordUnregisteredMemoryContext: Failed to get PC address for memory context")); + } + OSStringLCopy(psRecord->szProcessName, psServerMMUContext->szProcessName, sizeof(psRecord->szProcessName)); +} + + +void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData) +{ + SERVER_MMU_CONTEXT *psServerMMUContext = hPrivData; + PVRSRV_RGXDEV_INFO *psDevInfo = psServerMMUContext->psDevInfo; + +#if defined(PDUMP) + { + RGXFWIF_DEV_VIRTADDR sFWAddr; + + RGXSetFirmwareAddress(&sFWAddr, + psServerMMUContext->psFWMemContextMemDesc, + 0, + RFW_FWADDR_NOREF_FLAG); + + /* + * MMU cache commands (always dumped) might have a pointer to this FW + * memory context, wait until the FW has caught-up to the latest command. + */ + PDUMPCOMMENT("Ensure FW has executed all MMU invalidations on FW memory " + "context 0x%x before freeing it", sFWAddr.ui32Addr); + SyncPrimPDumpPol(psDevInfo->psDeviceNode->psMMUCacheSyncPrim, + psDevInfo->psDeviceNode->ui32NextMMUInvalidateUpdate - 1, + 0xFFFFFFFF, + PDUMP_POLL_OPERATOR_GREATEREQUAL, + PDUMP_FLAGS_CONTINUOUS); + } +#endif + + OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock); + dllist_remove_node(&psServerMMUContext->sNode); + OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock); + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) + { + _RecordUnregisteredMemoryContext(psDevInfo, psServerMMUContext); + } + + /* + * Release the page catalogue address acquired in RGXRegisterMemoryContext(). + */ + MMU_ReleaseBaseAddr(NULL); + + /* + * Free the firmware memory context. + */ + PDUMPCOMMENT("Free FW memory context"); + DevmemFwUnmapAndFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc); + + OSFreeMem(psServerMMUContext); +} + +/* + * RGXRegisterMemoryContext + */ +PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode, + MMU_CONTEXT *psMMUContext, + IMG_HANDLE *hPrivData) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_MEMALLOCFLAGS_T uiFWMemContextMemAllocFlags; + RGXFWIF_FWMEMCONTEXT *psFWMemContext; + DEVMEM_MEMDESC *psFWMemContextMemDesc; + SERVER_MMU_CONTEXT *psServerMMUContext; + + if (psDevInfo->psKernelMMUCtx == NULL) + { + /* + * This must be the creation of the Kernel memory context. Take a copy + * of the MMU context for use when programming the BIF. + */ + psDevInfo->psKernelMMUCtx = psMMUContext; + +#if defined(RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR) + /* Setup the BRN71422 mapping in the FW memory context. */ + if (RGX_IS_BRN_SUPPORTED(psDevInfo, 71422)) + { + RGXMapBRN71422TargetPhysicalAddress(psMMUContext); + } +#endif + } + else + { + psServerMMUContext = OSAllocMem(sizeof(*psServerMMUContext)); + if (psServerMMUContext == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_server_ctx; + } + + psServerMMUContext->psDevInfo = psDevInfo; + psServerMMUContext->ui64FBSCEntryMask = 0; + psServerMMUContext->sFWMemContextDevVirtAddr.ui32Addr = 0; + + /* + * This FW MemContext is only mapped into kernel for initialisation purposes. + * Otherwise this allocation is only used by the FW. + * Therefore the GPU cache doesn't need coherency, and write-combine + * will suffice on the CPU side (WC buffer will be flushed at any kick) + */ + uiFWMemContextMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN); + + /* + Allocate device memory for the firmware memory context for the new + application. + */ + PDUMPCOMMENT("Allocate RGX firmware memory context"); + eError = DevmemFwAllocate(psDevInfo, + sizeof(*psFWMemContext), + uiFWMemContextMemAllocFlags | PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC, + "FwMemoryContext", + &psFWMemContextMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware memory context (%u)", + __func__, + eError)); + goto fail_alloc_fw_ctx; + } + + /* + Temporarily map the firmware memory context to the kernel. + */ + eError = DevmemAcquireCpuVirtAddr(psFWMemContextMemDesc, + (void **)&psFWMemContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware memory context (%u)", + __func__, + eError)); + goto fail_acquire_cpu_addr; + } + + /* + * Write the new memory context's page catalogue into the firmware memory + * context for the client. + */ + eError = MMU_AcquireBaseAddr(psMMUContext, &psFWMemContext->sPCDevPAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire Page Catalogue address (%u)", + __func__, + eError)); + DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); + goto fail_acquire_base_addr; + } + + /* + * Set default values for the rest of the structure. + */ + psFWMemContext->uiPageCatBaseRegID = RGXFW_BIF_INVALID_PCREG; + psFWMemContext->uiBreakpointAddr = 0; + psFWMemContext->uiBPHandlerAddr = 0; + psFWMemContext->uiBreakpointCtl = 0; + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +{ + IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0; + IMG_BOOL bOSidAxiProt; + + MMU_GetOSids(psMMUContext, &ui32OSid, &ui32OSidReg, &bOSidAxiProt); + + psFWMemContext->ui32OSid = ui32OSidReg; + psFWMemContext->bOSidAxiProt = bOSidAxiProt; +} +#endif + +#if defined(PDUMP) + { + IMG_CHAR aszName[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH]; + IMG_DEVMEM_OFFSET_T uiOffset = 0; + + /* + * Dump the Mem context allocation + */ + DevmemPDumpLoadMem(psFWMemContextMemDesc, 0, sizeof(*psFWMemContext), PDUMP_FLAGS_CONTINUOUS); + + + /* + * Obtain a symbolic addr of the mem context structure + */ + eError = DevmemPDumpPageCatBaseToSAddr(psFWMemContextMemDesc, + &uiOffset, + aszName, + PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to generate a Dump Page Catalogue address (%u)", + __func__, + eError)); + DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); + goto fail_pdump_cat_base_addr; + } + + /* + * Dump the Page Cat tag in the mem context (symbolic address) + */ + eError = MMU_PDumpWritePageCatBase(psMMUContext, + aszName, + uiOffset, + 8, /* 64-bit register write */ + 0, + 0, + 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire Page Catalogue address (%u)", + __func__, + eError)); + DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); + goto fail_pdump_cat_base; + } + } +#endif + + /* + * Release kernel address acquired above. + */ + DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); + + /* + * Store the process information for this device memory context + * for use with the host page-fault analysis. + */ + psServerMMUContext->uiPID = OSGetCurrentClientProcessIDKM(); + psServerMMUContext->psMMUContext = psMMUContext; + psServerMMUContext->psFWMemContextMemDesc = psFWMemContextMemDesc; + OSStringLCopy(psServerMMUContext->szProcessName, + OSGetCurrentClientProcessNameKM(), + sizeof(psServerMMUContext->szProcessName)); + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "New memory context: Process Name: %s PID: %u (0x%08X)", + psServerMMUContext->szProcessName, + psServerMMUContext->uiPID, + psServerMMUContext->uiPID); + + OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock); + dllist_add_to_tail(&psDevInfo->sMemoryContextList, &psServerMMUContext->sNode); + OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock); + + *hPrivData = psServerMMUContext; + } + + return PVRSRV_OK; + +#if defined(PDUMP) +fail_pdump_cat_base: +fail_pdump_cat_base_addr: + MMU_ReleaseBaseAddr(NULL); +#endif +fail_acquire_base_addr: + /* Done before jumping to the fail point as the release is done before exit */ +fail_acquire_cpu_addr: + DevmemFwUnmapAndFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc); +fail_alloc_fw_ctx: + OSFreeMem(psServerMMUContext); +fail_alloc_server_ctx: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv) +{ + SERVER_MMU_CONTEXT *psMMUContext = (SERVER_MMU_CONTEXT *) hPriv; + + return psMMUContext->psFWMemContextMemDesc; +} + +void RGXSetFWMemContextDevVirtAddr(SERVER_MMU_CONTEXT *psServerMMUContext, + RGXFWIF_DEV_VIRTADDR sFWMemContextAddr) +{ + psServerMMUContext->sFWMemContextDevVirtAddr.ui32Addr = sFWMemContextAddr.ui32Addr; +} + +void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_DEV_PHYADDR *psDevPAddr, + MMU_FAULT_DATA *psOutFaultData) +{ + IMG_DEV_PHYADDR sPCDevPAddr; + DLLIST_NODE *psNode, *psNext; + + OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock); + + dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) + { + SERVER_MMU_CONTEXT *psServerMMUContext = + IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); + + if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK) + { + PVR_LOG(("Failed to get PC address for memory context")); + continue; + } + + if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr) + { + MMU_CheckFaultAddress(psServerMMUContext->psMMUContext, psDevVAddr, psOutFaultData); + goto out_unlock; + } + } + + /* Lastly check for fault in the kernel allocated memory */ + if (MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPCDevPAddr) != PVRSRV_OK) + { + PVR_LOG(("Failed to get PC address for kernel memory context")); + } + + if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr) + { + MMU_CheckFaultAddress(psDevInfo->psKernelMMUCtx, psDevVAddr, psOutFaultData); + } + +out_unlock: + OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock); +} + +/* given the physical address of a page catalogue, searches for a corresponding + * MMU context and if found, provides the caller details of the process. + * Returns IMG_TRUE if a process is found. + */ +IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress, + RGXMEM_PROCESS_INFO *psInfo) +{ + IMG_BOOL bRet = IMG_FALSE; + DLLIST_NODE *psNode, *psNext; + SERVER_MMU_CONTEXT *psServerMMUContext = NULL; + + /* check if the input PC addr corresponds to an active memory context */ + dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) + { + SERVER_MMU_CONTEXT *psThisMMUContext = + IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); + IMG_DEV_PHYADDR sPCDevPAddr; + + if (MMU_AcquireBaseAddr(psThisMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK) + { + PVR_LOG(("Failed to get PC address for memory context")); + continue; + } + + if (sPCAddress.uiAddr == sPCDevPAddr.uiAddr) + { + psServerMMUContext = psThisMMUContext; + break; + } + } + + if (psServerMMUContext != NULL) + { + psInfo->uiPID = psServerMMUContext->uiPID; + OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_FALSE; + bRet = IMG_TRUE; + } + /* else check if the input PC addr corresponds to the firmware */ + else + { + IMG_DEV_PHYADDR sKernelPCDevPAddr; + PVRSRV_ERROR eError; + + eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sKernelPCDevPAddr); + + if (eError != PVRSRV_OK) + { + PVR_LOG(("Failed to get PC address for kernel memory context")); + } + else + { + if (sPCAddress.uiAddr == sKernelPCDevPAddr.uiAddr) + { + psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE; + OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_FALSE; + bRet = IMG_TRUE; + } + } + } + + if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) && + (bRet == IMG_FALSE)) + { + /* no active memory context found with the given PC address. + * Check the list of most recently freed memory contexts. + */ + IMG_UINT32 i; + + OSLockAcquire(psDevInfo->hMMUCtxUnregLock); + + /* iterate through the list of unregistered memory contexts + * from newest (one before the head) to the oldest (the current head) + */ + i = gui32UnregisteredMemCtxsHead; + + do + { + UNREGISTERED_MEMORY_CONTEXT *psRecord; + + i ? i-- : (i = (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1)); + + psRecord = &gasUnregisteredMemCtxs[i]; + + if (psRecord->sPCDevPAddr.uiAddr == sPCAddress.uiAddr) + { + psInfo->uiPID = psRecord->uiPID; + OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_TRUE; + bRet = IMG_TRUE; + break; + } + } while (i != gui32UnregisteredMemCtxsHead); + + OSLockRelease(psDevInfo->hMMUCtxUnregLock); + + } + + return bRet; +} + +IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID, + RGXMEM_PROCESS_INFO *psInfo) +{ + IMG_BOOL bRet = IMG_FALSE; + DLLIST_NODE *psNode, *psNext; + SERVER_MMU_CONTEXT *psServerMMUContext = NULL; + + /* check if the input PID corresponds to an active memory context */ + dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) + { + SERVER_MMU_CONTEXT *psThisMMUContext = + IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); + + if (psThisMMUContext->uiPID == uiPID) + { + psServerMMUContext = psThisMMUContext; + break; + } + } + + if (psServerMMUContext != NULL) + { + psInfo->uiPID = psServerMMUContext->uiPID; + OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_FALSE; + bRet = IMG_TRUE; + } + /* else check if the input PID corresponds to the firmware */ + else if (uiPID == RGXMEM_SERVER_PID_FIRMWARE) + { + psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE; + OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_FALSE; + bRet = IMG_TRUE; + } + + if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) && + (bRet == IMG_FALSE)) + { + /* if the PID didn't correspond to an active context or the + * FW address then see if it matches a recently unregistered context + */ + const IMG_UINT32 ui32Mask = UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1; + IMG_UINT32 i, j; + + OSLockAcquire(psDevInfo->hMMUCtxUnregLock); + + for (i = (gui32UnregisteredMemCtxsHead - 1) & ui32Mask, j = 0; + j < UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE; + i = (gui32UnregisteredMemCtxsHead - 1) & ui32Mask, j++) + { + UNREGISTERED_MEMORY_CONTEXT *psRecord = &gasUnregisteredMemCtxs[i]; + + if (psRecord->uiPID == uiPID) + { + psInfo->uiPID = psRecord->uiPID; + OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)); + psInfo->bUnregistered = IMG_TRUE; + bRet = IMG_TRUE; + break; + } + } + + OSLockRelease(psDevInfo->hMMUCtxUnregLock); + } + + return bRet; +} + +IMG_PID RGXGetPIDFromServerMMUContext(SERVER_MMU_CONTEXT *psServerMMUContext) +{ + if (psServerMMUContext) + { + return psServerMMUContext->uiPID; + } + return 0; +} + +/****************************************************************************** + End of file (rgxmem.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxmem.h b/drivers/gpu/drm/phytium/octopus/rgxmem.h new file mode 100644 index 000000000000..33f96348a11b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxmem.h @@ -0,0 +1,147 @@ +/*************************************************************************/ /*! +@File +@Title RGX memory context management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for RGX memory context management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXMEM_H) +#define RGXMEM_H + +#include "pvrsrv_error.h" +#include "device.h" +#include "mmu_common.h" +#include "rgxdevice.h" + +#define RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME 16 + +/* this PID denotes the firmware */ +#define RGXMEM_SERVER_PID_FIRMWARE 0xFFFFFFFF + +/* this PID denotes the PM */ +#define RGXMEM_SERVER_PID_PM 0xEFFFFFFF + +typedef struct _RGXMEM_PROCESS_INFO_ +{ + IMG_PID uiPID; + IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME]; + IMG_BOOL bUnregistered; +} RGXMEM_PROCESS_INFO; + +typedef struct SERVER_MMU_CONTEXT_TAG SERVER_MMU_CONTEXT; + +IMG_DEV_PHYADDR GetPC(MMU_CONTEXT * psContext); + +void RGXSetFWMemContextDevVirtAddr(SERVER_MMU_CONTEXT *psServerMMUContext, + RGXFWIF_DEV_VIRTADDR sFWMemContextAddr); + +void RGXMMUSyncPrimAlloc(PVRSRV_DEVICE_NODE *psDevNode); +void RGXMMUSyncPrimFree(void); + +PVRSRV_ERROR RGXSLCFlushRange(PVRSRV_DEVICE_NODE *psDevNode, + MMU_CONTEXT *psMMUContext, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEVMEM_SIZE_T uiLength, + IMG_BOOL bInvalidate); + +PVRSRV_ERROR RGXInvalidateFBSCTable(PVRSRV_DEVICE_NODE *psDeviceNode, + MMU_CONTEXT *psMMUContext, + IMG_UINT64 ui64FBSCEntryMask); + +PVRSRV_ERROR RGXExtractFBSCEntryMaskFromMMUContext(PVRSRV_DEVICE_NODE *psDeviceNode, + SERVER_MMU_CONTEXT *psServerMMUContext, + IMG_UINT64 *pui64FBSCEntryMask); + +void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDevNode, + MMU_CONTEXT *psMMUContext, + MMU_LEVEL eMMULevel, + IMG_BOOL bUnmap); + +/*************************************************************************/ /*! +@Function RGXMMUCacheInvalidateKick + +@Description Sends a flush command to a particular DM but first takes + the power lock. + +@Input psDevNode Device Node pointer +@Input pui32NextMMUInvalidateUpdate + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 *pui32NextMMUInvalidateUpdate); + +/*************************************************************************/ /*! +@Function RGXPreKickCacheCommand + +@Description Sends a cache flush command to a particular DM without + honouring the power lock. It's the caller's responsibility + to ensure power lock is held before calling this function. + +@Input psDevInfo Device Info +@Input eDM To which DM the cmd is sent. +@Input pui32MMUInvalidateUpdate + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo, + RGXFWIF_DM eDM, + IMG_UINT32 *pui32MMUInvalidateUpdate); + +void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData); +PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDevNode, + MMU_CONTEXT *psMMUContext, + IMG_HANDLE *hPrivData); + +DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv); + +void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_DEV_PHYADDR *psDevPAddr, + MMU_FAULT_DATA *psOutFaultData); + +IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress, + RGXMEM_PROCESS_INFO *psInfo); + +IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID, + RGXMEM_PROCESS_INFO *psInfo); + +IMG_PID RGXGetPIDFromServerMMUContext(SERVER_MMU_CONTEXT *psServerMMUContext); + +#endif /* RGXMEM_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxmipsmmuinit.c b/drivers/gpu/drm/phytium/octopus/rgxmipsmmuinit.c new file mode 100644 index 000000000000..7c486371f9f0 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxmipsmmuinit.c @@ -0,0 +1,1007 @@ +/*************************************************************************/ /*! +@File +@Title Device specific initialisation routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device specific MMU initialisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "rgxmipsmmuinit.h" + +#include "device.h" +#include "img_types.h" +#include "img_defs.h" +#include "mmu_common.h" +#include "pdump_mmu.h" +#include "rgxheapconfig.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "rgx_memallocflags.h" +#include "pdump_km.h" +#include "rgxdevice.h" +#include "log2.h" + +/* + * Bits of PT, PD and PC not involving addresses + */ + +/* Currently there is no page directory for MIPS MMU */ +#define RGX_MIPS_MMUCTRL_PDE_PROTMASK 0 +/* Currently there is no page catalog for MIPS MMU */ +#define RGX_MIPS_MMUCTRL_PCE_PROTMASK 0 + + +static MMU_PxE_CONFIG sRGXMMUPCEConfig; +static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig; + + +/* + * + * Configuration for heaps with 4kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig4KB; + + +/* + * + * Configuration for heaps with 16kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig16KB; + + +/* + * + * Configuration for heaps with 64kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig64KB; + + +/* + * + * Configuration for heaps with 256kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig256KB; + + +/* + * + * Configuration for heaps with 1MB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig1MB; + + +/* + * + * Configuration for heaps with 2MB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig2MB; + + +/* Forward declaration of protection bits derivation functions, for + the following structure */ +static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); +static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags); +static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); +static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags); +static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); +static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags); + +static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, + const MMU_PxE_CONFIG **ppsMMUPDEConfig, + const MMU_PxE_CONFIG **ppsMMUPTEConfig, + const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, + IMG_HANDLE *phPriv); + +static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv); + +static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize); +static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize); + +static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes; + +/* Cached policy */ +static IMG_UINT32 gui32CachedPolicy; + +static PVRSRV_ERROR RGXCheckTrampolineAddrs(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + MMU_DEVICEATTRIBS *psDevAttrs, + IMG_UINT64 *pui64Addr); + +PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_BOOL bPhysBusAbove32Bit = 0; + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PHYS_BUS_WIDTH)) + { + bPhysBusAbove32Bit = RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32; + } + + sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName = + PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]); + + /* + * Setup sRGXMMUPCEConfig, no PC in MIPS MMU currently + */ + sRGXMMUPCEConfig.uiBytesPerEntry = 0; /* 32 bit entries */ + sRGXMMUPCEConfig.uiAddrMask = 0; /* Mask to get significant address bits of PC entry */ + + sRGXMMUPCEConfig.uiAddrShift = 0; /* Shift this many bits to get PD address in PC entry */ + sRGXMMUPCEConfig.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; /* Alignment of PD AND PC */ + + sRGXMMUPCEConfig.uiProtMask = RGX_MIPS_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits of the PC */ + sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to have status bits starting with bit 0 */ + + sRGXMMUPCEConfig.uiValidEnMask = RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */ + sRGXMMUPCEConfig.uiValidEnShift = RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to have entry valid bit starting with bit 0 */ + + /* + * Setup sRGXMMUTopLevelDevVAddrConfig + */ + sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = 0; /* Get the PC address bits from a 40 bit virt. address (in a 64bit UINT) */ + sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = 0; + sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = 0; + + sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = 0; /* Get the PD address bits from a 40 bit virt. address (in a 64bit UINT) */ + sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = 0; + sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = 0; + + sRGXMMUTopLevelDevVAddrConfig.uiPTIndexMask = IMG_UINT64_C(0xfffffff000); /* Get the PT address bits from a 40 bit virt. address (in a 64bit UINT) */ + sRGXMMUTopLevelDevVAddrConfig.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; + sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; + +/* + * + * Configuration for heaps with 4kB Data-Page size + * + */ + + /* + * Setup sRGXMMUPDEConfig_4KBDP. No PD in MIPS MMU currently + */ + sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 0; + + /* No PD used for MIPS */ + sRGXMMUPDEConfig_4KBDP.uiAddrMask = 0; + sRGXMMUPDEConfig_4KBDP.uiAddrShift = 0; + sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; + + sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x0); + sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 0; + + sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MIPS_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_4KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUPTEConfig_4KBDP. + */ + sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; + + + if (bPhysBusAbove32Bit) + { + sRGXMMUPTEConfig_4KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT; + gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT; + } + else + { + sRGXMMUPTEConfig_4KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK; + gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY; + } + + sRGXMMUPTEConfig_4KBDP.uiAddrShift = RGXMIPSFW_ENTRYLO_PFN_SHIFT; + sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; + + sRGXMMUPTEConfig_4KBDP.uiProtMask = RGXMIPSFW_ENTRYLO_DVG | ~RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK | + RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN | RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN; + sRGXMMUPTEConfig_4KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGXMIPSFW_ENTRYLO_VALID_EN; + sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGXMIPSFW_ENTRYLO_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_4KBDP + */ + sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = 0; + sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = 0; + sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = 0; + + + sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = 0; + sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = 0; + sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = 0; + + sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; + + + sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff); + sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = RGX_FIRMWARE_RAW_HEAP_BASE & IMG_UINT64_C(0x00ffffffff); + + /* + * Setup gsPageSizeConfig4KB + */ + gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP; + gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP; + gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP; + gsPageSizeConfig4KB.uiRefCount = 0; + gsPageSizeConfig4KB.uiMaxRefCount = 0; + + +/* + * + * Configuration for heaps with 16kB Data-Page size + * + */ + + /* + * Setup sRGXMMUPDEConfig_16KBDP + */ + sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 0; + + sRGXMMUPDEConfig_16KBDP.uiAddrMask = 0; + sRGXMMUPDEConfig_16KBDP.uiAddrShift = 0; /* These are for a page directory ENTRY, meaning the address of a PT cropped to suit the PD */ + sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the page tables NOT directories */ + + sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = 0; + sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 0; + + sRGXMMUPDEConfig_16KBDP.uiProtMask = 0; + sRGXMMUPDEConfig_16KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_16KBDP.uiValidEnMask = 0; + sRGXMMUPDEConfig_16KBDP.uiValidEnShift = 0; + + /* + * Setup sRGXMMUPTEConfig_16KBDP. Not supported yet + */ + sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 0; + + sRGXMMUPTEConfig_16KBDP.uiAddrMask = 0; + sRGXMMUPTEConfig_16KBDP.uiAddrShift = 0; /* These are for a page table ENTRY, meaning the address of a PAGE cropped to suit the PD */ + sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the pages NOT tables */ + + sRGXMMUPTEConfig_16KBDP.uiProtMask = 0; + sRGXMMUPTEConfig_16KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_16KBDP.uiValidEnMask = 0; + sRGXMMUPTEConfig_16KBDP.uiValidEnShift = 0; + + /* + * Setup sRGXMMUDevVAddrConfig_16KBDP + */ + sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = 0; + + sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = 0; + + sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = 0; + + sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig16KB + */ + gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP; + gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP; + gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP; + gsPageSizeConfig16KB.uiRefCount = 0; + gsPageSizeConfig16KB.uiMaxRefCount = 0; + + +/* + * + * Configuration for heaps with 64kB Data-Page size. Not supported yet + * + */ + + /* + * Setup sRGXMMUPDEConfig_64KBDP + */ + sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 0; + + sRGXMMUPDEConfig_64KBDP.uiAddrMask = 0; + sRGXMMUPDEConfig_64KBDP.uiAddrShift = 0; + sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 0; + + sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = 0; + sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 0; + + sRGXMMUPDEConfig_64KBDP.uiProtMask = 0; + sRGXMMUPDEConfig_64KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_64KBDP.uiValidEnMask = 0; + sRGXMMUPDEConfig_64KBDP.uiValidEnShift = 0; + + /* + * Setup sRGXMMUPTEConfig_64KBDP. + * + */ + sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; + + if (bPhysBusAbove32Bit) + { + sRGXMMUPTEConfig_64KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT; + gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT; + } + else + { + sRGXMMUPTEConfig_64KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK; + gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY; + } + + /* Even while using 64K pages, MIPS still aligns addresses to 4K */ + sRGXMMUPTEConfig_64KBDP.uiAddrShift = RGXMIPSFW_ENTRYLO_PFN_SHIFT; + sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; + + sRGXMMUPTEConfig_64KBDP.uiProtMask = RGXMIPSFW_ENTRYLO_DVG | ~RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK | + RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN | RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN; + sRGXMMUPTEConfig_64KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGXMIPSFW_ENTRYLO_VALID_EN; + sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGXMIPSFW_ENTRYLO_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_64KBDP. + */ + sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = 0; + sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = 0; + sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = 0; + + sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = 0; + sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = 0; + sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = 0; + + sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00ffff0000); + sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_64K; + sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_64K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; + + sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff); + sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = RGX_FIRMWARE_RAW_HEAP_BASE & IMG_UINT64_C(0x00ffffffff); + + /* + * Setup gsPageSizeConfig64KB. + */ + gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP; + gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP; + gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP; + gsPageSizeConfig64KB.uiRefCount = 0; + gsPageSizeConfig64KB.uiMaxRefCount = 0; + + +/* + * + * Configuration for heaps with 256kB Data-Page size. Not supported yet + * + */ + + /* + * Setup sRGXMMUPDEConfig_256KBDP + */ + sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 0; + + sRGXMMUPDEConfig_256KBDP.uiAddrMask = 0; + sRGXMMUPDEConfig_256KBDP.uiAddrShift = 0; + sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 0; + + sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = 0; + sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 0; + + sRGXMMUPDEConfig_256KBDP.uiProtMask = 0; + sRGXMMUPDEConfig_256KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_256KBDP.uiValidEnMask = 0; + sRGXMMUPDEConfig_256KBDP.uiValidEnShift = 0; + + /* + * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP + */ + sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 0; + + sRGXMMUPTEConfig_256KBDP.uiAddrMask = 0; + sRGXMMUPTEConfig_256KBDP.uiAddrShift = 0; + sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 0; + + sRGXMMUPTEConfig_256KBDP.uiProtMask = 0; + sRGXMMUPTEConfig_256KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_256KBDP.uiValidEnMask = 0; + sRGXMMUPTEConfig_256KBDP.uiValidEnShift = 0; + + /* + * Setup sRGXMMUDevVAddrConfig_256KBDP + */ + sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = 0; + + sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = 0; + + sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = 0; + + sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig256KB + */ + gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP; + gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP; + gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP; + gsPageSizeConfig256KB.uiRefCount = 0; + gsPageSizeConfig256KB.uiMaxRefCount = 0; + + /* + * Setup sRGXMMUPDEConfig_1MBDP. Not supported yet + */ + sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 0; + + sRGXMMUPDEConfig_1MBDP.uiAddrMask = 0; + sRGXMMUPDEConfig_1MBDP.uiAddrShift = 0; + sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 0; + + sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = 0; + sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 0; + + sRGXMMUPDEConfig_1MBDP.uiProtMask = 0; + sRGXMMUPDEConfig_1MBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_1MBDP.uiValidEnMask = 0; + sRGXMMUPDEConfig_1MBDP.uiValidEnShift = 0; + + /* + * Setup sRGXMMUPTEConfig_1MBDP + */ + sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_1MBDP.uiAddrMask = 0; + sRGXMMUPTEConfig_1MBDP.uiAddrShift = 0; + sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 0; + + sRGXMMUPTEConfig_1MBDP.uiProtMask = 0; + sRGXMMUPTEConfig_1MBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_1MBDP.uiValidEnMask = 0; + sRGXMMUPTEConfig_1MBDP.uiValidEnShift = 0; + + /* + * Setup sRGXMMUDevVAddrConfig_1MBDP + */ + sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = 0; + + sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = 0; + + sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = 0; + + sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig1MB + */ + gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP; + gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP; + gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP; + gsPageSizeConfig1MB.uiRefCount = 0; + gsPageSizeConfig1MB.uiMaxRefCount = 0; + + /* + * Setup sRGXMMUPDEConfig_2MBDP. Not supported yet + */ + sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 0; + + sRGXMMUPDEConfig_2MBDP.uiAddrMask = 0; + sRGXMMUPDEConfig_2MBDP.uiAddrShift = 0; + sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 0; + + sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = 0; + sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 0; + + sRGXMMUPDEConfig_2MBDP.uiProtMask = 0; + sRGXMMUPDEConfig_2MBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_2MBDP.uiValidEnMask = 0; + sRGXMMUPDEConfig_2MBDP.uiValidEnShift = 0; + + /* + * Setup sRGXMMUPTEConfig_2MBDP + */ + sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 0; + + sRGXMMUPTEConfig_2MBDP.uiAddrMask = 0; + sRGXMMUPTEConfig_2MBDP.uiAddrShift = 0; + sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 0; + + sRGXMMUPTEConfig_2MBDP.uiProtMask = 0; + sRGXMMUPTEConfig_2MBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_2MBDP.uiValidEnMask = 0; + sRGXMMUPTEConfig_2MBDP.uiValidEnShift = 0; + + /* + * Setup sRGXMMUDevVAddrConfig_2MBDP + */ + sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = 0; + + sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = 0; + + sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = 0; + + sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig2MB + */ + gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP; + gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP; + gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP; + gsPageSizeConfig2MB.uiRefCount = 0; + gsPageSizeConfig2MB.uiMaxRefCount = 0; + + /* + * Setup sRGXMMUDeviceAttributes + */ + sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_MIPS_MICROAPTIV; + sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_1; + + /* + * The page table fits in one or more big physically adjacent pages, + * at most as big as the page table itself. + * To calculate its alignment/page size, calculate the log2 size of the page + * table taking into account all OSes, then round that down to a valid MIPS + * log2 page size (12, 14, 16 for a 4K, 16K, 64K page size). + */ + sRGXMMUDeviceAttributes.ui32BaseAlign = + (CeilLog2(RGX_NUM_OS_SUPPORTED) + RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) & ~1U; + + /* 256K alignment might be too hard to achieve, fall back to 64K */ + sRGXMMUDeviceAttributes.ui32BaseAlign = + MIN(sRGXMMUDeviceAttributes.ui32BaseAlign, RGXMIPSFW_LOG2_PAGE_SIZE_64K); + + + + /* The base configuration is set to 4kB pages*/ + sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPTEConfig_4KBDP; + sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig; + + /* Functions for deriving page table/dir/cat protection bits */ + sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8; + sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4; + sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8; + sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4; + sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8; + sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4; + + /* Functions for establishing configurations for PDE/PTE/DEVVADDR + on per-heap basis */ + sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB; + sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB; + + sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4; + sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8; + + psDeviceNode->psFirmwareMMUDevAttrs = &sRGXMMUDeviceAttributes; + + psDeviceNode->pfnValidateOrTweakPhysAddrs = RGXCheckTrampolineAddrs; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR RGXCheckTrampolineAddrs(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + MMU_DEVICEATTRIBS *psDevAttrs, + IMG_UINT64 *pui64Addr) +{ + if (PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, MIPS)) + { + /* + * If mapping for the MIPS FW context, check for sensitive PAs + */ + if (psDevAttrs == psDevNode->psFirmwareMMUDevAttrs) + { + PVRSRV_RGXDEV_INFO *psDevice = (PVRSRV_RGXDEV_INFO *)psDevNode->pvDevice; + + if (RGXMIPSFW_SENSITIVE_ADDR(*pui64Addr)) + { + *pui64Addr = psDevice->psTrampoline->sPhysAddr.uiAddr + RGXMIPSFW_TRAMPOLINE_OFFSET(*pui64Addr); + } + /* FIX_HW_BRN_63553 is mainlined for all MIPS cores */ + else if (*pui64Addr == 0x0 && !psDevice->sLayerParams.bDevicePA0IsValid) + { + PVR_DPF((PVR_DBG_ERROR, "%s attempt to map addr 0x0 in the FW but 0x0 is not considered valid.", __func__)); + return PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE; + } + } + } + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + eError = PVRSRV_OK; + +#if defined(PDUMP) + psDeviceNode->pfnMMUGetContextID = NULL; +#endif + + psDeviceNode->psFirmwareMMUDevAttrs = NULL; + +#if defined(DEBUG) + PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:")); + PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d", + gsPageSizeConfig4KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d", + gsPageSizeConfig4KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d", + gsPageSizeConfig16KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d", + gsPageSizeConfig16KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d", + gsPageSizeConfig64KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d", + gsPageSizeConfig64KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d", + gsPageSizeConfig256KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d", + gsPageSizeConfig256KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d", + gsPageSizeConfig1MB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d", + gsPageSizeConfig1MB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d", + gsPageSizeConfig2MB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d", + gsPageSizeConfig2MB.uiRefCount)); +#endif + if (gsPageSizeConfig4KB.uiRefCount > 0 || + gsPageSizeConfig16KB.uiRefCount > 0 || + gsPageSizeConfig64KB.uiRefCount > 0 || + gsPageSizeConfig256KB.uiRefCount > 0 || + gsPageSizeConfig1MB.uiRefCount > 0 || + gsPageSizeConfig2MB.uiRefCount > 0 + ) + { + PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)")); + } + + return eError; +} + +/*************************************************************************/ /*! +@Function RGXDerivePCEProt4 +@Description calculate the PCE protection flags based on a 4 byte entry +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags) +{ + PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU")); + return 0; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePCEProt8 +@Description calculate the PCE protection flags based on an 8 byte entry +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) +{ + PVR_UNREFERENCED_PARAMETER(uiProtFlags); + PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); + + PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU")); + return 0; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePDEProt4 +@Description derive the PDE protection flags based on a 4 byte entry +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags) +{ + PVR_UNREFERENCED_PARAMETER(uiProtFlags); + PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU")); + return 0; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePDEProt8 +@Description derive the PDE protection flags based on an 8 byte entry + +@Input uiLog2DataPageSize The log2 of the required page size. + E.g, for 4KiB pages, this parameter must be 12. + For 2MiB pages, it must be set to 21. + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) +{ + PVR_UNREFERENCED_PARAMETER(uiProtFlags); + PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU")); + return 0; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePTEProt4 +@Description calculate the PTE protection flags based on a 4 byte entry +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags) +{ + IMG_UINT32 ui32MMUFlags = 0; + + if (((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE)) + { + /* read/write */ + ui32MMUFlags |= RGXMIPSFW_ENTRYLO_DIRTY_EN; + } + else if (MMU_PROTFLAGS_READABLE & uiProtFlags) + { + /* read only */ + } + else if (MMU_PROTFLAGS_WRITEABLE & uiProtFlags) + { + /* write only */ + ui32MMUFlags |= RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN; + } + else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: neither read nor write specified...")); + } + + /* cache coherency */ + if (MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: cache coherency not supported for MIPS caches")); + } + + /* cache setup */ + if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0) + { + ui32MMUFlags |= RGXMIPSFW_ENTRYLO_UNCACHED; + } + else + { + ui32MMUFlags |= gui32CachedPolicy << + RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT; + } + + if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0) + { + ui32MMUFlags |= RGXMIPSFW_ENTRYLO_VALID_EN; + ui32MMUFlags |= RGXMIPSFW_ENTRYLO_GLOBAL_EN; + } + + if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags) + { + /* PVR_DPF((PVR_DBG_WARNING, "RGXDerivePTEProt4: PMMETA Protect not existent for MIPS, option discarded")); */ + } + + return ui32MMUFlags; +} + +/*************************************************************************/ /*! +@Function RGXDerivePTEProt8 +@Description calculate the PTE protection flags based on an 8 byte entry +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) +{ + PVR_UNREFERENCED_PARAMETER(uiProtFlags); + PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); + + PVR_DPF((PVR_DBG_ERROR, "8-byte PTE not supported on this device")); + + return 0; +} + + +/*************************************************************************/ /*! +@Function RGXGetPageSizeConfig +@Description Set up configuration for variable sized data pages. + RGXPutPageSizeConfigCB has to be called to ensure correct + refcounting. +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, + const MMU_PxE_CONFIG **ppsMMUPDEConfig, + const MMU_PxE_CONFIG **ppsMMUPTEConfig, + const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, + IMG_HANDLE *phPriv) +{ + MMU_PAGESIZECONFIG *psPageSizeConfig; + + switch (uiLog2DataPageSize) + { + case RGXMIPSFW_LOG2_PAGE_SIZE_64K: + psPageSizeConfig = &gsPageSizeConfig64KB; + break; + case RGXMIPSFW_LOG2_PAGE_SIZE_4K: + psPageSizeConfig = &gsPageSizeConfig4KB; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", + uiLog2DataPageSize)); + *phPriv = NULL; + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; + } + + /* Refer caller's pointers to the data */ + *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig; + *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig; + *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig; + +#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) + /* Increment ref-count - not that we're allocating anything here + (I'm using static structs), but one day we might, so we want + the Get/Put code to be balanced properly */ + psPageSizeConfig->uiRefCount++; + + /* This is purely for debug statistics */ + psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount, + psPageSizeConfig->uiRefCount); +#endif + + *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize; + PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv); + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function RGXPutPageSizeConfig +@Description Tells this code that the mmu module is done with the + configurations set in RGXGetPageSizeConfig. This can + be a no-op. + Called after RGXGetPageSizeConfigCB. +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv) +{ +#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) + MMU_PAGESIZECONFIG *psPageSizeConfig; + IMG_UINT32 uiLog2DataPageSize; + + uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv; + + switch (uiLog2DataPageSize) + { + case RGXMIPSFW_LOG2_PAGE_SIZE_64K: + psPageSizeConfig = &gsPageSizeConfig64KB; + break; + case RGXMIPSFW_LOG2_PAGE_SIZE_4K: + psPageSizeConfig = &gsPageSizeConfig4KB; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", + uiLog2DataPageSize)); + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; + } + + /* Ref-count here is not especially useful, but it's an extra + check that the API is being used correctly */ + psPageSizeConfig->uiRefCount--; +#else + PVR_UNREFERENCED_PARAMETER(hPriv); +#endif + return PVRSRV_OK; +} + +static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize) +{ + PVR_UNREFERENCED_PARAMETER(ui32PDE); + PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize); + PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS")); + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; +} + +static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize) +{ + PVR_UNREFERENCED_PARAMETER(ui64PDE); + PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize); + PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS")); + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; +} diff --git a/drivers/gpu/drm/phytium/octopus/rgxmipsmmuinit.h b/drivers/gpu/drm/phytium/octopus/rgxmipsmmuinit.h new file mode 100644 index 000000000000..62e92dba59f9 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxmipsmmuinit.h @@ -0,0 +1,94 @@ +/*************************************************************************/ /*! +@File +@Title Device specific initialisation routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device specific MMU initialisation for the MIPS firmware +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* NB: this file is not to be included arbitrarily. It exists solely + for the linkage between rgxinit.c and rgxmmuinit.c, the former + being otherwise cluttered by the contents of the latter */ + +#ifndef SRVKM_RGXMIPSMMUINIT_H +#define SRVKM_RGXMIPSMMUINIT_H + +#include "device.h" +#include "img_types.h" +#include "mmu_common.h" +#include "img_defs.h" +#include "rgx_mips.h" + +/* + + Labelling of fields within virtual address. No PD and PC are used currently for + the MIPS MMU +*/ +/* +Page Table entry # +*/ +#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U) +#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF)) + + +/* PC entries related definitions */ +/* No PC is currently used for MIPS MMU */ +#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN (0U) +#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT (0U) +#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_CLRMSK (0U) + +#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_SHIFT (0U) +#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_CLRMSK (0U) +#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_EN (0U) + +/* PD entries related definitions */ +/* No PD is currently used for MIPS MMU */ +#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN (0U) +#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT (0U) +#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_CLRMSK (0U) + +#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_SHIFT (0U) +#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_CLRMSK (0U) +#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_EN (0U) + + +PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode); +PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode); + + +#endif /* #ifndef SRVKM_RGXMIPSMMUINIT_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxmmuinit.c b/drivers/gpu/drm/phytium/octopus/rgxmmuinit.c new file mode 100644 index 000000000000..e6bac6ffcc69 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxmmuinit.c @@ -0,0 +1,1272 @@ +/*************************************************************************/ /*! +@File +@Title Device specific initialisation routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device specific MMU initialisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ +#include "rgxmmuinit.h" +#include "rgxmmudefs_km.h" + +#include "rgxdevice.h" +#include "img_types.h" +#include "img_defs.h" +#include "mmu_common.h" +#include "pdump_mmu.h" + +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "rgx_memallocflags.h" +#include "rgx_heaps.h" +#include "pdump_km.h" + + +/* useful macros */ +/* units represented in a bitfield */ +#define UNITS_IN_BITFIELD(Mask, Shift) ((Mask >> Shift) + 1) + + +/* + * Bits of PT, PD and PC not involving addresses + */ + + + +/* protection bits for MMU_VERSION <= 3 */ +#define RGX_MMUCTRL_PTE_PROTMASK (RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN | \ + ~RGX_MMUCTRL_PT_DATA_AXCACHE_CLRMSK | \ + RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN | \ + RGX_MMUCTRL_PT_DATA_PM_SRC_EN | \ + RGX_MMUCTRL_PT_DATA_CC_EN | \ + RGX_MMUCTRL_PT_DATA_READ_ONLY_EN | \ + RGX_MMUCTRL_PT_DATA_VALID_EN) + +#define RGX_MMUCTRL_PDE_PROTMASK (RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN | \ + ~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK | \ + RGX_MMUCTRL_PD_DATA_VALID_EN) + +#define RGX_MMUCTRL_PCE_PROTMASK (RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN | \ + RGX_MMUCTRL_PC_DATA_VALID_EN) + + +/* + * protection bits for MMU_VERSION >= 4 + * MMU4 has no PENDING or PAGE_SIZE fields in PxE + */ +#define RGX_MMU4CTRL_PTE_PROTMASK (RGX_MMUCTRL_PTE_PROTMASK & ~RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN) + +#define RGX_MMU4CTRL_PDE_PROTMASK (RGX_MMUCTRL_PD_DATA_VALID_EN) + +#define RGX_MMU4CTRL_PCE_PROTMASK (RGX_MMUCTRL_PC_DATA_VALID_EN) + + + + +static MMU_PxE_CONFIG sRGXMMUPCEConfig; +static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig; + + +/* + * + * Configuration for heaps with 4kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig4KB; + + +/* + * + * Configuration for heaps with 16kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig16KB; + + +/* + * + * Configuration for heaps with 64kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig64KB; + + +/* + * + * Configuration for heaps with 256kB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig256KB; + + +/* + * + * Configuration for heaps with 1MB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig1MB; + + +/* + * + * Configuration for heaps with 2MB Data-Page size + * + */ + +static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP; +static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP; +static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP; +static MMU_PAGESIZECONFIG gsPageSizeConfig2MB; + + +/* Forward declaration of protection bits derivation functions, for + the following structure */ +static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); +static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags); +static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); +static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags); +static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); +static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags); + +/* protection bits derivation functions for MMUv4 */ +static IMG_UINT64 RGXMMU4DerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); +static PVRSRV_ERROR RGXMMU4GetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize); +static PVRSRV_ERROR RGXMMU4GetPageSizeFromVirtAddr(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 *pui32Log2PageSize); + + +static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, + const MMU_PxE_CONFIG **ppsMMUPDEConfig, + const MMU_PxE_CONFIG **ppsMMUPTEConfig, + const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, + IMG_HANDLE *phPriv); + +static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv); + +static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize); +static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize); + +static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes; + +PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_BOOL bHaveMMU4 = (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) >= 4); + + /* Setup of Px Entries: + * + * + * PAGE TABLE (8 Byte): + * + * | 62 | 61...40 | 39...12 (varies) | 11...6 | 5 | 4 | 3 | 2 | 1 | 0 | + * | PM/Meta protect | VP Page (39:18) | Physical Page | VP Page (17:12) | Entry Pending | PM src | SLC Bypass Ctrl | Cache Coherency | Read Only | Valid | + * + * + * PAGE DIRECTORY (8 Byte): + * + * | 40 | 39...5 (varies) | 4 | 3...1 | 0 | + * | Entry Pending | Page Table base address | (reserved) | Page Size | Valid | + * + * + * PAGE CATALOGUE (4 Byte): + * + * | 31...4 | 3...2 | 1 | 0 | + * | Page Directory base address | (reserved) | Entry Pending | Valid | + * + */ + + + /* Example how to get the PD address from a PC entry. + * The procedure is the same for PD and PT entries to retrieve PT and Page addresses: + * + * 1) sRGXMMUPCEConfig.uiAddrMask applied to PC entry with '&': + * | 31...4 | 3...2 | 1 | 0 | + * | PD Addr | 0 | 0 | 0 | + * + * 2) sRGXMMUPCEConfig.uiAddrShift applied with '>>': + * | 27...0 | + * | PD Addr | + * + * 3) sRGXMMUPCEConfig.uiAddrLog2Align applied with '<<': + * | 39...0 | + * | PD Addr | + * + */ + + + sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName = + PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL]); + + /* + * Setup sRGXMMUPCEConfig + */ + sRGXMMUPCEConfig.uiBytesPerEntry = 4; /* 32 bit entries */ + sRGXMMUPCEConfig.uiAddrMask = 0xfffffff0; /* Mask to get significant address bits of PC entry i.e. the address of the PD */ + + sRGXMMUPCEConfig.uiAddrShift = 4; /* Shift this many bits to get PD address */ + sRGXMMUPCEConfig.uiAddrLog2Align = 12; /* Alignment of PD physical addresses. */ + + sRGXMMUPCEConfig.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PCE_PROTMASK : RGX_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits */ + sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to get the status bits */ + + sRGXMMUPCEConfig.uiValidEnMask = RGX_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */ + sRGXMMUPCEConfig.uiValidEnShift = RGX_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to get entry valid bit */ + + /* + * Setup sRGXMMUTopLevelDevVAddrConfig + */ + sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; /* Mask to get PC index applied to a 40 bit virt. device address */ + sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PC index */ + sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask, + sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift)); + + sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; /* Mask to get PD index applied to a 40 bit virt. device address */ + sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PD index */ + sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask, + sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift)); + + /* + * + * Configuration for heaps with 4kB Data-Page size + * + */ + + /* + * Setup sRGXMMUPDEConfig_4KBDP + */ + sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + sRGXMMUPDEConfig_4KBDP.uiAddrShift = 12; + sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = 12; + + sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_4KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_4KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUPTEConfig_4KBDP + */ + sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffff000); + sRGXMMUPTEConfig_4KBDP.uiAddrShift = 12; + sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = 12; /* Alignment of the physical addresses of the pages NOT PTs */ + + sRGXMMUPTEConfig_4KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_4KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_4KBDP + */ + sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift)); + + sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift)); + + sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift)); + + sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff); + sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig4KB + */ + gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP; + gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP; + gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP; + gsPageSizeConfig4KB.uiRefCount = 0; + gsPageSizeConfig4KB.uiMaxRefCount = 0; + + + /* + * + * Configuration for heaps with 16kB Data-Page size + * + */ + + /* + * Setup sRGXMMUPDEConfig_16KBDP + */ + sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + sRGXMMUPDEConfig_16KBDP.uiAddrShift = 10; + sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 10; + + sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_16KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_16KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUPTEConfig_16KBDP + */ + sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xffffffc000); + sRGXMMUPTEConfig_16KBDP.uiAddrShift = 14; + sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 14; + + sRGXMMUPTEConfig_16KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_16KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_16KBDP + */ + sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift)); + + + sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift)); + + + sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001fc000); + sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 14; + sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift)); + + sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000003fff); + sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig16KB + */ + gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP; + gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP; + gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP; + gsPageSizeConfig16KB.uiRefCount = 0; + gsPageSizeConfig16KB.uiMaxRefCount = 0; + + + /* + * + * Configuration for heaps with 64kB Data-Page size + * + */ + + /* + * Setup sRGXMMUPDEConfig_64KBDP + */ + sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + sRGXMMUPDEConfig_64KBDP.uiAddrShift = 8; + sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 8; + + sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_64KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_64KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUPTEConfig_64KBDP + */ + sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xffffff0000); + sRGXMMUPTEConfig_64KBDP.uiAddrShift = 16; + sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = 16; + + sRGXMMUPTEConfig_64KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_64KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_64KBDP + */ + sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift)); + + + sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift)); + + + sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001f0000); + sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = 16; + sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift)); + + + sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff); + sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig64KB + */ + gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP; + gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP; + gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP; + gsPageSizeConfig64KB.uiRefCount = 0; + gsPageSizeConfig64KB.uiMaxRefCount = 0; + + + /* + * + * Configuration for heaps with 256kB Data-Page size + * + */ + + /* + * Setup sRGXMMUPDEConfig_256KBDP + */ + sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + sRGXMMUPDEConfig_256KBDP.uiAddrShift = 6; + sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 6; + + sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_256KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_256KBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP + */ + sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffc0000); + sRGXMMUPTEConfig_256KBDP.uiAddrShift = 18; + sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 18; + + sRGXMMUPTEConfig_256KBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_256KBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_256KBDP + */ + sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift)); + + + sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift)); + + + sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001c0000); + sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 18; + sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift)); + + + sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000003ffff); + sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig256KB + */ + gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP; + gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP; + gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP; + gsPageSizeConfig256KB.uiRefCount = 0; + gsPageSizeConfig256KB.uiMaxRefCount = 0; + + /* + * Setup sRGXMMUPDEConfig_1MBDP + */ + sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + /* + * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even + * if they contain fewer entries. + */ + sRGXMMUPDEConfig_1MBDP.uiAddrShift = 6; + sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 6; + + sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_1MBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_1MBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUPTEConfig_1MBDP + */ + sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffff00000); + sRGXMMUPTEConfig_1MBDP.uiAddrShift = 20; + sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 20; + + sRGXMMUPTEConfig_1MBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_1MBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_1MBDP + */ + sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift)); + + + sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift)); + + + sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000100000); + sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 20; + sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift)); + + + sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00000fffff); + sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig1MB + */ + gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP; + gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP; + gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP; + gsPageSizeConfig1MB.uiRefCount = 0; + gsPageSizeConfig1MB.uiMaxRefCount = 0; + + /* + * Setup sRGXMMUPDEConfig_2MBDP + */ + sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 8; + + sRGXMMUPDEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); + /* + * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even + * if they contain fewer entries. + */ + sRGXMMUPDEConfig_2MBDP.uiAddrShift = 6; + sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 6; + + sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); + sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 1; + + sRGXMMUPDEConfig_2MBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PDE_PROTMASK : RGX_MMUCTRL_PDE_PROTMASK; + sRGXMMUPDEConfig_2MBDP.uiProtShift = 0; + + sRGXMMUPDEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; + sRGXMMUPDEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUPTEConfig_2MBDP + */ + sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 8; + + sRGXMMUPTEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xffffe00000); + sRGXMMUPTEConfig_2MBDP.uiAddrShift = 21; + sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 21; + + sRGXMMUPTEConfig_2MBDP.uiProtMask = bHaveMMU4 ? RGX_MMU4CTRL_PTE_PROTMASK : RGX_MMUCTRL_PTE_PROTMASK; + sRGXMMUPTEConfig_2MBDP.uiProtShift = 0; + + sRGXMMUPTEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; + sRGXMMUPTEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; + + /* + * Setup sRGXMMUDevVAddrConfig_2MBDP + */ + sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask, + sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift)); + + + sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; + sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; + sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask, + sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift)); + + + sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000000000); + sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 21; + sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask, + sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift)); + + + sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00001fffff); + sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0; + sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0; + + /* + * Setup gsPageSizeConfig2MB + */ + gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP; + gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP; + gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP; + gsPageSizeConfig2MB.uiRefCount = 0; + gsPageSizeConfig2MB.uiMaxRefCount = 0; + + /* + * Setup sRGXMMUDeviceAttributes + */ + sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT; + sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_3; + sRGXMMUDeviceAttributes.ui32BaseAlign = RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT; + sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPCEConfig; + sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig; + + /* Functions for deriving page table/dir/cat protection bits */ + sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8; + sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4; + sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8; + sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4; + sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8; + sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4; + + /* Functions for establishing configurations for PDE/PTE/DEVVADDR + on per-heap basis */ + sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB; + sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB; + + sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4; + sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8; + sRGXMMUDeviceAttributes.pfnGetPageSizeFromVirtAddr = NULL; + + if (bHaveMMU4) + { + /* override some of these functions for MMU4 as page size is not stored in PD entries */ + sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXMMU4DerivePDEProt8; + sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXMMU4GetPageSizeFromPDE8; + sRGXMMUDeviceAttributes.pfnGetPageSizeFromVirtAddr = RGXMMU4GetPageSizeFromVirtAddr; + } + + psDeviceNode->psMMUDevAttrs = &sRGXMMUDeviceAttributes; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + eError = PVRSRV_OK; + +#if defined(PDUMP) + psDeviceNode->pfnMMUGetContextID = NULL; +#endif + + psDeviceNode->psMMUDevAttrs = NULL; + +#if defined(DEBUG) + PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:")); + PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d", + gsPageSizeConfig4KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d", + gsPageSizeConfig4KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d", + gsPageSizeConfig16KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d", + gsPageSizeConfig16KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d", + gsPageSizeConfig64KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d", + gsPageSizeConfig64KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d", + gsPageSizeConfig256KB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d", + gsPageSizeConfig256KB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d", + gsPageSizeConfig1MB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d", + gsPageSizeConfig1MB.uiRefCount)); + PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d", + gsPageSizeConfig2MB.uiMaxRefCount)); + PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d", + gsPageSizeConfig2MB.uiRefCount)); +#endif + if (gsPageSizeConfig4KB.uiRefCount > 0 || + gsPageSizeConfig16KB.uiRefCount > 0 || + gsPageSizeConfig64KB.uiRefCount > 0 || + gsPageSizeConfig256KB.uiRefCount > 0 || + gsPageSizeConfig1MB.uiRefCount > 0 || + gsPageSizeConfig2MB.uiRefCount > 0 + ) + { + PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)")); + } + + return eError; +} + +/*************************************************************************/ /*! +@Function RGXMMUInit_GetConfigRangeValue +@Description Helper Function + For a given virtual address range and page size, return the + value to load into an MMU_PAGE_SIZE_RANGE config register. +@Return 64-bit register value +*/ /**************************************************************************/ +IMG_UINT64 RGXMMUInit_GetConfigRangeValue(IMG_UINT32 ui32DataPageSize, IMG_UINT64 ui64BaseAddress, IMG_UINT64 ui64RangeSize) +{ + /* end address of range is inclusive */ + IMG_UINT64 ui64EndAddress = ui64BaseAddress + ui64RangeSize - (1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT); + IMG_UINT64 ui64RegValue = 0; + + switch (ui32DataPageSize) + { + case 16*1024: + ui64RegValue = 1; + break; + case 64*1024: + ui64RegValue = 2; + break; + case 256*1024: + ui64RegValue = 3; + break; + case 1024*1024: + ui64RegValue = 4; + break; + case 2*1024*1024: + ui64RegValue = 5; + break; + case 4*1024: + /* fall through */ + default: + /* anything we don't support, use 4K */ + break; + } + + /* check that the range is defined by valid 40 bit virtual addresses */ + PVR_ASSERT((ui64BaseAddress & ~((1ULL << 40) - 1)) == 0); + PVR_ASSERT((ui64EndAddress & ~((1ULL << 40) - 1)) == 0); + + /* the range config register addresses are in 2MB chunks so check 21 lsb are zero */ + PVR_ASSERT((ui64BaseAddress & ((1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT) - 1)) == 0); + PVR_ASSERT((ui64EndAddress & ((1 << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT) - 1)) == 0); + + ui64BaseAddress >>= RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT; + ui64EndAddress >>= RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT; + + ui64RegValue = (ui64RegValue << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT) | + (ui64EndAddress << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_SHIFT) | + (ui64BaseAddress << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_SHIFT); + return ui64RegValue; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePCEProt4 +@Description calculate the PCE protection flags based on a 4 byte entry +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags) +{ + return (uiProtFlags & MMU_PROTFLAGS_INVALID)?0:RGX_MMUCTRL_PC_DATA_VALID_EN; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePCEProt8 +@Description calculate the PCE protection flags based on an 8 byte entry +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) +{ + PVR_UNREFERENCED_PARAMETER(uiProtFlags); + PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); + + PVR_DPF((PVR_DBG_ERROR, "8-byte PCE not supported on this device")); + return 0; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePDEProt4 +@Description derive the PDE protection flags based on a 4 byte entry +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags) +{ + PVR_UNREFERENCED_PARAMETER(uiProtFlags); + PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device")); + return 0; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePDEProt8 +@Description derive the PDE protection flags based on an 8 byte entry + +@Input uiLog2DataPageSize The log2 of the required page size. + E.g, for 4KiB pages, this parameter must be 12. + For 2MiB pages, it must be set to 21. + +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) +{ + IMG_UINT64 ret_value = 0; /* 0 means invalid */ + + if (!(uiProtFlags & MMU_PROTFLAGS_INVALID)) /* if not invalid */ + { + switch (uiLog2DataPageSize) + { + case RGX_HEAP_4KB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB; + break; + case RGX_HEAP_16KB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB; + break; + case RGX_HEAP_64KB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB; + break; + case RGX_HEAP_256KB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB; + break; + case RGX_HEAP_1MB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB; + break; + case RGX_HEAP_2MB_PAGE_SHIFT: + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "%s:%d: in function<%s>: Invalid parameter log2_page_size. Expected {12, 14, 16, 18, 20, 21}. Got [%u]", + __FILE__, __LINE__, __func__, uiLog2DataPageSize)); + } + } + return ret_value; +} + + +/*************************************************************************/ /*! +@Function RGXDerivePTEProt4 +@Description calculate the PTE protection flags based on a 4 byte entry +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags) +{ + PVR_UNREFERENCED_PARAMETER(uiProtFlags); + PVR_DPF((PVR_DBG_ERROR, "4-byte PTE not supported on this device")); + + return 0; +} + +/*************************************************************************/ /*! +@Function RGXDerivePTEProt8 +@Description calculate the PTE protection flags based on an 8 byte entry +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) +{ + IMG_UINT64 ui64MMUFlags=0; + + PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); + + if (((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE)) + { + /* read/write */ + } + else if (MMU_PROTFLAGS_READABLE & uiProtFlags) + { + /* read only */ + ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_READ_ONLY_EN; + } + else if (MMU_PROTFLAGS_WRITEABLE & uiProtFlags) + { + /* write only */ + PVR_DPF((PVR_DBG_MESSAGE, "RGXDerivePTEProt8: write-only is not possible on this device")); + } + else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0) + { + PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: neither read nor write specified...")); + } + + /* cache coherency */ + if (MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags) + { + ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_CC_EN; + } + + if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0) + { + ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_VALID_EN; + } + + if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags) + { + ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN; + } + + /** + * Always enable caching on the fabric level cache irrespective of type of + * cache coherent interconnect and memory cache attributes. + * This needs to be updated, if selective caching policy needs to be + * implemented based on cache attributes requested by caller and based on + * cache coherent interconnect. + */ + ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_AXCACHE_WBRWALLOC; + + return ui64MMUFlags; +} + + +/*************************************************************************/ /*! +@Function RGXGetPageSizeConfig +@Description Set up configuration for variable sized data pages. + RGXPutPageSizeConfigCB has to be called to ensure correct + refcounting. +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, + const MMU_PxE_CONFIG **ppsMMUPDEConfig, + const MMU_PxE_CONFIG **ppsMMUPTEConfig, + const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, + IMG_HANDLE *phPriv) +{ + MMU_PAGESIZECONFIG *psPageSizeConfig; + + switch (uiLog2DataPageSize) + { + case RGX_HEAP_4KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig4KB; + break; + case RGX_HEAP_16KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig16KB; + break; + case RGX_HEAP_64KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig64KB; + break; + case RGX_HEAP_256KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig256KB; + break; + case RGX_HEAP_1MB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig1MB; + break; + case RGX_HEAP_2MB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig2MB; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", + uiLog2DataPageSize)); + *phPriv = NULL; + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; + } + + /* Refer caller's pointers to the data */ + *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig; + *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig; + *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig; + +#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) + /* Increment ref-count - not that we're allocating anything here + (I'm using static structs), but one day we might, so we want + the Get/Put code to be balanced properly */ + psPageSizeConfig->uiRefCount++; + + /* This is purely for debug statistics */ + psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount, + psPageSizeConfig->uiRefCount); +#endif + + *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize; + PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv); + + return PVRSRV_OK; +} + +/*************************************************************************/ /*! +@Function RGXPutPageSizeConfig +@Description Tells this code that the mmu module is done with the + configurations set in RGXGetPageSizeConfig. This can + be a no-op. + Called after RGXGetPageSizeConfigCB. +@Return PVRSRV_ERROR + */ /**************************************************************************/ +static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv) +{ +#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) + MMU_PAGESIZECONFIG *psPageSizeConfig; + IMG_UINT32 uiLog2DataPageSize; + + uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv; + + switch (uiLog2DataPageSize) + { + case RGX_HEAP_4KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig4KB; + break; + case RGX_HEAP_16KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig16KB; + break; + case RGX_HEAP_64KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig64KB; + break; + case RGX_HEAP_256KB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig256KB; + break; + case RGX_HEAP_1MB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig1MB; + break; + case RGX_HEAP_2MB_PAGE_SHIFT: + psPageSizeConfig = &gsPageSizeConfig2MB; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", + uiLog2DataPageSize)); + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; + } + + /* Ref-count here is not especially useful, but it's an extra + check that the API is being used correctly */ + psPageSizeConfig->uiRefCount--; +#else + PVR_UNREFERENCED_PARAMETER(hPriv); +#endif + return PVRSRV_OK; +} + +static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize) +{ + PVR_UNREFERENCED_PARAMETER(ui32PDE); + PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize); + PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device")); + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; +} + +static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize) +{ + IMG_UINT64 ui64PageSizeBits = ui64PDE & (~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK); + + switch (ui64PageSizeBits) + { + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB: + *pui32Log2PageSize = RGX_HEAP_4KB_PAGE_SHIFT; + break; + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB: + *pui32Log2PageSize = RGX_HEAP_16KB_PAGE_SHIFT; + break; + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB: + *pui32Log2PageSize = RGX_HEAP_64KB_PAGE_SHIFT; + break; + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB: + *pui32Log2PageSize = RGX_HEAP_256KB_PAGE_SHIFT; + break; + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB: + *pui32Log2PageSize = RGX_HEAP_1MB_PAGE_SHIFT; + break; + case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB: + *pui32Log2PageSize = RGX_HEAP_2MB_PAGE_SHIFT; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "RGXGetPageSizeFromPDE8: Invalid page size bitfield %" IMG_UINT64_FMTSPECx " in PDE", + ui64PageSizeBits)); + + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; + } + return PVRSRV_OK; +} + + + + +/*************************************************************************/ /*! +@Function RGXMMU4DerivePDEProt8 +@Description derive the PDE protection flags based on an 8 byte entry + +@Input uiLog2DataPageSize: ignored as MMU4 doesn't put page size in PD entries. + +@Return PVRSRV_ERROR +*/ /**************************************************************************/ +static IMG_UINT64 RGXMMU4DerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) +{ + IMG_UINT64 ret_value = 0; /* 0 means invalid */ + PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); + + if (!(uiProtFlags & MMU_PROTFLAGS_INVALID)) /* if not invalid */ + { + /* page size in range config registers. Bits in PD entries are reserved */ + ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN; + } + return ret_value; +} + + +/*************************************************************************/ /*! +@Function RGXMMU4GetPageSizeFromPDE8 +@Description The upper layers should be such that this function is never called + as pages size are not stored in PD entries for MMU4. +*/ /**************************************************************************/ +static PVRSRV_ERROR RGXMMU4GetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize) +{ + PVR_UNREFERENCED_PARAMETER(ui64PDE); + PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize); + + PVR_ASSERT(0 && "RGXMMU4GetPageSizeFromPDE8 called in error. MMU4 does not store page sizes in PDT."); + return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; +} + + +/*************************************************************************/ /*! +@Function RGXMMU4GetPageSizeFromVirtAddr +@Description Get page size by walking through range config registers + looking for a match against the virtual address. +*/ /**************************************************************************/ +static PVRSRV_ERROR RGXMMU4GetPageSizeFromVirtAddr(struct _PVRSRV_DEVICE_NODE_ *psDevNode, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 *pui32Log2PageSize) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 i; + + /* assume default of 4KB page size */ + *pui32Log2PageSize = 12; + + /* Loop through the range registers looking for the given target address */ + for (i = 0; i < ARRAY_SIZE(psDevInfo->aui64MMUPageSizeRangeValue); ++i) + { + IMG_UINT64 ui64RangeVal = psDevInfo->aui64MMUPageSizeRangeValue[i]; + + if (ui64RangeVal != 0) + { + /* end addr in register is inclusive in the range so add 1 to move it over the end */ + IMG_UINT64 ui64Base = ((ui64RangeVal & ~RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_CLRMSK) + >> RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_SHIFT) + << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT; + IMG_UINT64 ui64End = (((ui64RangeVal & ~RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_CLRMSK) + >> RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_SHIFT) + 1) + << RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT; + + if ((sDevVAddr.uiAddr >= ui64Base) && (sDevVAddr.uiAddr < ui64End)) + { + IMG_UINT32 ui32PageSizeField = (IMG_UINT32)((ui64RangeVal & ~RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_CLRMSK) + >> RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT); + if (ui32PageSizeField < 5) + { + *pui32Log2PageSize = (ui32PageSizeField << 1) + 12; /* 12 (4K), 14 (16K), 16 (64K), 18 (256K), 20 (1MB) */ + } + else if (ui32PageSizeField == 5) + { + *pui32Log2PageSize = 21; /* 2MB */ + } + else + { + eError = PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; + } + break; + } + } + } + + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/rgxmmuinit.h b/drivers/gpu/drm/phytium/octopus/rgxmmuinit.h new file mode 100644 index 000000000000..c5944b6610bb --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxmmuinit.h @@ -0,0 +1,61 @@ +/*************************************************************************/ /*! +@File +@Title Device specific initialisation routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device specific MMU initialisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* NB: this file is not to be included arbitrarily. It exists solely + for the linkage between rgxinit.c and rgxmmuinit.c, the former + being otherwise cluttered by the contents of the latter */ + +#ifndef SRVKM_RGXMMUINIT_H +#define SRVKM_RGXMMUINIT_H + +#include "device.h" +#include "img_types.h" +#include "mmu_common.h" +#include "img_defs.h" + +PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode); +PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode); + +IMG_UINT64 RGXMMUInit_GetConfigRangeValue(IMG_UINT32 ui32DataPageSize, IMG_UINT64 ui64BaseAddress, IMG_UINT64 ui64RangeSize); + +#endif /* #ifndef SRVKM_RGXMMUINIT_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxmulticore.c b/drivers/gpu/drm/phytium/octopus/rgxmulticore.c new file mode 100644 index 000000000000..5d5168a479b9 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxmulticore.c @@ -0,0 +1,213 @@ +/*************************************************************************/ /*! +@File rgxmulticore.c +@Title Functions related to multicore devices +@Codingstyle IMG +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Kernel mode workload estimation functionality. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxdevice.h" +#include "rgxdefs_km.h" +#include "pdump_km.h" +#include "rgxmulticore.h" +#include "multicore_defs.h" +#include "allocmem.h" +#include "pvr_debug.h" + + + +static PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32CapsSize, + IMG_UINT32 *pui32NumCores, + IMG_UINT64 *pui64Caps); + + +/* + * RGXInitMultiCoreInfo: + * Return multicore information to clients. + * Return not supported on cores without multicore. + */ +static PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32CapsSize, + IMG_UINT32 *pui32NumCores, + IMG_UINT64 *pui64Caps) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psDeviceNode->ui32MultiCoreNumCores == 0) + { + /* MULTICORE not supported on this device */ + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + else + { + *pui32NumCores = psDeviceNode->ui32MultiCoreNumCores; + if (ui32CapsSize > 0) + { + if (ui32CapsSize < psDeviceNode->ui32MultiCoreNumCores) + { + PVR_DPF((PVR_DBG_ERROR, "Multicore caps buffer too small")); + eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; + } + else + { + IMG_UINT32 i; + + for (i = 0; i < psDeviceNode->ui32MultiCoreNumCores; ++i) + { + pui64Caps[i] = psDeviceNode->pui64MultiCoreCapabilities[i]; + } + } + } + } + + return eError; +} + + + +/* + * RGXInitMultiCoreInfo: + * Read multicore HW registers and fill in data structure for clients. + * Return not_supported on cores without multicore. + */ +PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psDeviceNode->pfnGetMultiCoreInfo != NULL) + { + /* we only set this up once */ + return PVRSRV_OK; + } + + /* defaults for non-multicore devices */ + psDeviceNode->ui32MultiCoreNumCores = 0; + psDeviceNode->ui32MultiCorePrimaryId = (IMG_UINT32)(-1); + psDeviceNode->pui64MultiCoreCapabilities = NULL; + psDeviceNode->pfnGetMultiCoreInfo = NULL; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) + { + IMG_UINT32 ui32MulticoreInfo; + IMG_UINT32 ui32PrimaryCoreIds; + IMG_UINT32 ui32PrimaryId; + IMG_UINT32 ui32TotalCores; + IMG_UINT32 ui32NumCores; + IMG_UINT32 id, i; + + ui32NumCores = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_DOMAIN) + & ~RGX_CR_MULTICORE_DOMAIN_GPU_COUNT_CLRMSK) + >> RGX_CR_MULTICORE_DOMAIN_GPU_COUNT_SHIFT; + + ui32TotalCores = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM) + & ~RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK) + >> RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT; + ui32MulticoreInfo = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE); +#if defined(NO_HARDWARE) + /* override to defaults if no hardware */ + ui32NumCores = 8;//RGX_MULTICORE_MAX_NOHW_CORES; + ui32TotalCores = RGX_MULTICORE_MAX_NOHW_CORES; + ui32MulticoreInfo = 0; /* primary id 0 with 7 secondaries */ +#endif + /* ID for this primary is in this register */ + ui32PrimaryId = (ui32MulticoreInfo & ~RGX_CR_MULTICORE_ID_CLRMSK) >> RGX_CR_MULTICORE_ID_SHIFT; + + /* allocate storage for capabilities */ + psDeviceNode->pui64MultiCoreCapabilities = OSAllocMem(ui32NumCores * sizeof(psDeviceNode->pui64MultiCoreCapabilities[0])); + if (psDeviceNode->pui64MultiCoreCapabilities == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to alloc memory for Multicore info", __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + ui32PrimaryCoreIds = (ui32MulticoreInfo & ~RGX_CR_MULTICORE_PRIMARY_CORE_ID_CLRMSK) + >> RGX_CR_MULTICORE_PRIMARY_CORE_ID_SHIFT; + + psDeviceNode->ui32MultiCorePrimaryId = ui32PrimaryId; + psDeviceNode->ui32MultiCoreNumCores = ui32NumCores; + + PVR_DPF((PVR_DBG_MESSAGE, "Multicore domain has %d cores with primary id %u\n", ui32NumCores, ui32PrimaryId)); + PDUMPCOMMENT("RGX Multicore domain has %d cores with primary id %u\n", ui32NumCores, ui32PrimaryId); + for (i = 0, id = 0; id < ui32TotalCores; ++id) + { + if ((ui32PrimaryCoreIds & 0x7) == ui32PrimaryId) + { + /* currently all cores are identical so have the same capabilities */ + psDeviceNode->pui64MultiCoreCapabilities[i] = id + | ((id == ui32PrimaryId) ? RGX_MULTICORE_CAPABILITY_PRIMARY_EN : 0) + | RGX_MULTICORE_CAPABILITY_GEOMETRY_EN + | RGX_MULTICORE_CAPABILITY_COMPUTE_EN + | RGX_MULTICORE_CAPABILITY_FRAGMENT_EN; + PDUMPCOMMENT("\tCore %u has caps 0x%08x", id, (IMG_UINT32)psDeviceNode->pui64MultiCoreCapabilities[i]); + PVR_DPF((PVR_DBG_MESSAGE, "Core %u has caps 0x%08x", id, (IMG_UINT32)psDeviceNode->pui64MultiCoreCapabilities[i])); + ++i; + } + ui32PrimaryCoreIds >>= 3; + } + + /* Register callback to return info about multicore setup to client bridge */ + psDeviceNode->pfnGetMultiCoreInfo = RGXGetMultiCoreInfo; + } + else + { + /* MULTICORE not supported on this device */ + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + + return eError; +} + + +/* + * RGXDeinitMultiCoreInfo: + * Release resources and clear the MultiCore values in the DeviceNode. + */ +void RGXDeInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + if (psDeviceNode->pui64MultiCoreCapabilities != NULL) + { + OSFreeMem(psDeviceNode->pui64MultiCoreCapabilities); + psDeviceNode->pui64MultiCoreCapabilities = NULL; + psDeviceNode->ui32MultiCoreNumCores = 0; + psDeviceNode->ui32MultiCorePrimaryId = (IMG_UINT32)(-1); + } + psDeviceNode->pfnGetMultiCoreInfo = NULL; +} diff --git a/drivers/gpu/drm/phytium/octopus/rgxmulticore.h b/drivers/gpu/drm/phytium/octopus/rgxmulticore.h new file mode 100644 index 000000000000..ae43afeb261e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxmulticore.h @@ -0,0 +1,54 @@ +/*************************************************************************/ /*! +@File rgxmulticore.h +@Title Functions related to multicore devices +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description General purpose memory shared between kernel driver and user + mode. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXMULTICORE_H +#define RGXMULTICORE_H + +#include "pvrsrv_error.h" +#include "pvrsrv.h" + +PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode); +void RGXDeInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode); + +#endif /* RGXMULTICORE_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxpmdefs.h b/drivers/gpu/drm/phytium/octopus/rgxpmdefs.h new file mode 100644 index 000000000000..5b5f9c83ca05 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxpmdefs.h @@ -0,0 +1,5019 @@ +/*************************************************************************/ /*! +@Title Hardware definition file rgxpmdefs.h +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* **** Autogenerated C -- do not edit **** */ + +/* + * rogue_pm.def: #13 + */ + + +#ifndef RGXPMDEFS_H +#define RGXPMDEFS_H + +#include "img_types.h" +#include "img_defs.h" + + +#define RGXPMDEFS_REVISION 13 + +/* +The mini PB size on a per-RT basis +*/ +typedef struct PM_DATA_MINI_PB_TAG { + IMG_UINT32 u32_0; +} PM_DATA_MINI_PB; + +/* + +*/ +#define PM_DATA_MINI_PB_SIZE_WOFF (0U) +#define PM_DATA_MINI_PB_SIZE_SHIFT (0U) +#define PM_DATA_MINI_PB_SIZE_CLRMSK (0xFFFFFC00U) +#define PM_DATA_MINI_PB_SET_SIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_MINI_PB_SIZE_CLRMSK ) | (((_x_) & (0x000003ffU)) << (PM_DATA_MINI_PB_SIZE_SHIFT)))) +#define PM_DATA_MINI_PB_GET_SIZE(_ft_) (((_ft_).u32_0 >> (PM_DATA_MINI_PB_SIZE_SHIFT)) & 0x000003ffU) + + +/* +The minimum PB size for the WDDM driver only. It is consistent with the OPENGL/OPENGLES. However, it is breaking down as two parts: the pagable memory and non pagable memory. +*/ +typedef struct PM_DATA_WDDM_MINI_PB_TAG { + IMG_UINT32 u32_0; +} PM_DATA_WDDM_MINI_PB; + +/* + +*/ +#define PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_WOFF (0U) +#define PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_SHIFT (10U) +#define PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_CLRMSK (0xFFF003FFU) +#define PM_DATA_WDDM_MINI_PB_SET_NON_PAGABLE_SIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_CLRMSK ) | (((_x_) & (0x000003ffU)) << (PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_SHIFT)))) +#define PM_DATA_WDDM_MINI_PB_GET_NON_PAGABLE_SIZE(_ft_) (((_ft_).u32_0 >> (PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_SHIFT)) & 0x000003ffU) +/* + +*/ +#define PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_WOFF (0U) +#define PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_SHIFT (0U) +#define PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_CLRMSK (0xFFFFFC00U) +#define PM_DATA_WDDM_MINI_PB_SET_PAGABLE_SIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_CLRMSK ) | (((_x_) & (0x000003ffU)) << (PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_SHIFT)))) +#define PM_DATA_WDDM_MINI_PB_GET_PAGABLE_SIZE(_ft_) (((_ft_).u32_0 >> (PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_SHIFT)) & 0x000003ffU) + + +/* +the mini number of the reserve pages when only the local free list is used */ +#define PM_DATA_PM_RESERVE_PAGES_MIN_SIZE (0x00000007U) + + +/* +the mini number of the reserve pages when unified free list is present */ +#define PM_DATA_PM_RESERVE_PAGES_MIN_UNIFIED_SIZE (0x0000000bU) + + +/* +This defines the format of entries in the FSTACK, UFSTACK and MMUSTACK +*/ +typedef struct PM_DATA_FSTACK_ENTRY_TAG { + IMG_UINT32 u32_0; +} PM_DATA_FSTACK_ENTRY; + +/* +Reserved for future use +*/ +#define PM_DATA_FSTACK_ENTRY_RSV_WOFF (0U) +#define PM_DATA_FSTACK_ENTRY_RSV_SHIFT (28U) +#define PM_DATA_FSTACK_ENTRY_RSV_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_FSTACK_ENTRY_SET_RSV(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_FSTACK_ENTRY_RSV_CLRMSK ) | (((_x_) & (0x0000000fU)) << (PM_DATA_FSTACK_ENTRY_RSV_SHIFT)))) +#define PM_DATA_FSTACK_ENTRY_GET_RSV(_ft_) (((_ft_).u32_0 >> (PM_DATA_FSTACK_ENTRY_RSV_SHIFT)) & 0x0000000fU) +/* +Address of 4 kB physical page +*/ +#define PM_DATA_FSTACK_ENTRY_PPAGE_WOFF (0U) +#define PM_DATA_FSTACK_ENTRY_PPAGE_SHIFT (0U) +#define PM_DATA_FSTACK_ENTRY_PPAGE_CLRMSK (0xF0000000U) +#define PM_DATA_FSTACK_ENTRY_SET_PPAGE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_FSTACK_ENTRY_PPAGE_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_FSTACK_ENTRY_PPAGE_SHIFT)))) +#define PM_DATA_FSTACK_ENTRY_GET_PPAGE(_ft_) (((_ft_).u32_0 >> (PM_DATA_FSTACK_ENTRY_PPAGE_SHIFT)) & 0x0fffffffU) + + +/* +This defines the format of an ALIST (Allocation List) entry +*/ +typedef struct PM_DATA_ALIST_ENTRY_TAG { + IMG_UINT32 u32_0; + IMG_UINT32 u32_1; +} PM_DATA_ALIST_ENTRY; + +/* +Valid bit. Indicates whether this ALIST entry is valid. +*/ +#define PM_DATA_ALIST_ENTRY_VAL_WOFF (1U) +#define PM_DATA_ALIST_ENTRY_VAL_SHIFT (31U) +#define PM_DATA_ALIST_ENTRY_VAL_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_ALIST_ENTRY_SET_VAL(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_ALIST_ENTRY_VAL_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_ALIST_ENTRY_VAL_SHIFT)))) +#define PM_DATA_ALIST_ENTRY_GET_VAL(_ft_) (((_ft_).u32_1 >> (PM_DATA_ALIST_ENTRY_VAL_SHIFT)) & 0x00000001U) +/* +The "data master" of the virtual page. 0=VCE, 1=TE, 2,3=reserved. +*/ +#define PM_DATA_ALIST_ENTRY_DM_INDEX_WOFF (1U) +#define PM_DATA_ALIST_ENTRY_DM_INDEX_SHIFT (26U) +#define PM_DATA_ALIST_ENTRY_DM_INDEX_CLRMSK (0xF3FFFFFFU) +#define PM_DATA_ALIST_ENTRY_SET_DM_INDEX(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_ALIST_ENTRY_DM_INDEX_CLRMSK ) | (((_x_) & (0x00000003U)) << (PM_DATA_ALIST_ENTRY_DM_INDEX_SHIFT)))) +#define PM_DATA_ALIST_ENTRY_GET_DM_INDEX(_ft_) (((_ft_).u32_1 >> (PM_DATA_ALIST_ENTRY_DM_INDEX_SHIFT)) & 0x00000003U) +/* +Render Target Array index. Up to 2 k Render Target Arrays are supported. +*/ +#define PM_DATA_ALIST_ENTRY_RTA_INDEX_WOFF (1U) +#define PM_DATA_ALIST_ENTRY_RTA_INDEX_SHIFT (14U) +#define PM_DATA_ALIST_ENTRY_RTA_INDEX_CLRMSK (0xFE003FFFU) +#define PM_DATA_ALIST_ENTRY_SET_RTA_INDEX(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_ALIST_ENTRY_RTA_INDEX_CLRMSK ) | (((_x_) & (0x000007ffU)) << (PM_DATA_ALIST_ENTRY_RTA_INDEX_SHIFT)))) +#define PM_DATA_ALIST_ENTRY_GET_RTA_INDEX(_ft_) (((_ft_).u32_1 >> (PM_DATA_ALIST_ENTRY_RTA_INDEX_SHIFT)) & 0x000007ffU) +/* +The virtual page number (16 kB virtual page). +*/ +#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W0_WOFF (0U) +#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W1_WOFF (1U) +#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W0_SHIFT (16U) +#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W1_SHIFT (0U) +#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W0_CLRMSK (0x0000FFFFU) +#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W1_CLRMSK (0xFFFFFFF0U) +#define PM_DATA_ALIST_ENTRY_SET_VRP_PPAGE(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_ALIST_ENTRY_VRP_PPAGE_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000ffff))) << 16))); \ + ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_ALIST_ENTRY_VRP_PPAGE_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000f0000))) >> 16))); } +#define PM_DATA_ALIST_ENTRY_GET_VRP_PPAGE(_ft_) (((_ft_).u32_0 >> (16)) | ((IMG_UINT64)((_ft_).u32_1 & 0x0000000fU ) << (16))) +/* +The 16-bit macrotile mask. Indicates which macrotile(s) are using this 16 kB page +*/ +#define PM_DATA_ALIST_ENTRY_MTILE_MASK_WOFF (0U) +#define PM_DATA_ALIST_ENTRY_MTILE_MASK_SHIFT (0U) +#define PM_DATA_ALIST_ENTRY_MTILE_MASK_CLRMSK (0xFFFF0000U) +#define PM_DATA_ALIST_ENTRY_SET_MTILE_MASK(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_ALIST_ENTRY_MTILE_MASK_CLRMSK ) | (((_x_) & (0x0000ffffU)) << (PM_DATA_ALIST_ENTRY_MTILE_MASK_SHIFT)))) +#define PM_DATA_ALIST_ENTRY_GET_MTILE_MASK(_ft_) (((_ft_).u32_0 >> (PM_DATA_ALIST_ENTRY_MTILE_MASK_SHIFT)) & 0x0000ffffU) + + +/* +This defines the format of entries in the MLIST +*/ +typedef struct PM_DATA_MLIST_ENTRY_TAG { + IMG_UINT32 u32_0; +} PM_DATA_MLIST_ENTRY; + +/* +Original source of the MMU page: +0=Page was allocated from the FSTACK, +1=Page was allocated from the UFSTACK. +This bit is ignored when RGX_CR_PM_MMU_STACK_POLICY=1 +*/ +#define PM_DATA_MLIST_ENTRY_SRC_WOFF (0U) +#define PM_DATA_MLIST_ENTRY_SRC_SHIFT (31U) +#define PM_DATA_MLIST_ENTRY_SRC_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_MLIST_ENTRY_SET_SRC(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_MLIST_ENTRY_SRC_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_MLIST_ENTRY_SRC_SHIFT)))) +#define PM_DATA_MLIST_ENTRY_GET_SRC(_ft_) (((_ft_).u32_0 >> (PM_DATA_MLIST_ENTRY_SRC_SHIFT)) & 0x00000001U) +/* +Address of Physical Page allocated to MMU +*/ +#define PM_DATA_MLIST_ENTRY_PPAGE_WOFF (0U) +#define PM_DATA_MLIST_ENTRY_PPAGE_SHIFT (0U) +#define PM_DATA_MLIST_ENTRY_PPAGE_CLRMSK (0xF0000000U) +#define PM_DATA_MLIST_ENTRY_SET_PPAGE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_MLIST_ENTRY_PPAGE_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_MLIST_ENTRY_PPAGE_SHIFT)))) +#define PM_DATA_MLIST_ENTRY_GET_PPAGE(_ft_) (((_ft_).u32_0 >> (PM_DATA_MLIST_ENTRY_PPAGE_SHIFT)) & 0x0fffffffU) + + +/* +This defines the format of entries in the VFP Table +*/ +typedef struct PM_DATA_VFP_TABLE_ENTRY_TAG { + IMG_UINT32 u32_0; + IMG_UINT32 u32_1; +} PM_DATA_VFP_TABLE_ENTRY; + +/* +Valid bit. 0=VFP is unmapped, 1=VFP is mapped. +*/ +#define PM_DATA_VFP_TABLE_ENTRY_VALID_WOFF (1U) +#define PM_DATA_VFP_TABLE_ENTRY_VALID_SHIFT (31U) +#define PM_DATA_VFP_TABLE_ENTRY_VALID_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VFP_TABLE_ENTRY_SET_VALID(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_VFP_TABLE_ENTRY_VALID_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VFP_TABLE_ENTRY_VALID_SHIFT)))) +#define PM_DATA_VFP_TABLE_ENTRY_GET_VALID(_ft_) (((_ft_).u32_1 >> (PM_DATA_VFP_TABLE_ENTRY_VALID_SHIFT)) & 0x00000001U) +/* +Address of MMU Page Table Entry. 8 Byte Granular. +*/ +#define PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_WOFF (1U) +#define PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_SHIFT (0U) +#define PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_CLRMSK (0x80000000U) +#define PM_DATA_VFP_TABLE_ENTRY_SET_PTE_PTR(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_SHIFT)))) +#define PM_DATA_VFP_TABLE_ENTRY_GET_PTE_PTR(_ft_) (((_ft_).u32_1 >> (PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_SHIFT)) & 0x7fffffffU) +/* +Reserved for future use. +*/ +#define PM_DATA_VFP_TABLE_ENTRY_RSV_WOFF (0U) +#define PM_DATA_VFP_TABLE_ENTRY_RSV_SHIFT (28U) +#define PM_DATA_VFP_TABLE_ENTRY_RSV_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VFP_TABLE_ENTRY_SET_RSV(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_VFP_TABLE_ENTRY_RSV_CLRMSK ) | (((_x_) & (0x0000000fU)) << (PM_DATA_VFP_TABLE_ENTRY_RSV_SHIFT)))) +#define PM_DATA_VFP_TABLE_ENTRY_GET_RSV(_ft_) (((_ft_).u32_0 >> (PM_DATA_VFP_TABLE_ENTRY_RSV_SHIFT)) & 0x0000000fU) +/* +Address of 1 kB Physical Page. 1 TB addressable. +*/ +#define PM_DATA_VFP_TABLE_ENTRY_PPAGE_WOFF (0U) +#define PM_DATA_VFP_TABLE_ENTRY_PPAGE_SHIFT (0U) +#define PM_DATA_VFP_TABLE_ENTRY_PPAGE_CLRMSK (0xF0000000U) +#define PM_DATA_VFP_TABLE_ENTRY_SET_PPAGE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_VFP_TABLE_ENTRY_PPAGE_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VFP_TABLE_ENTRY_PPAGE_SHIFT)))) +#define PM_DATA_VFP_TABLE_ENTRY_GET_PPAGE(_ft_) (((_ft_).u32_0 >> (PM_DATA_VFP_TABLE_ENTRY_PPAGE_SHIFT)) & 0x0fffffffU) + + +/* +PerPipe Segment SIZE, it has a fixed mapping as follows: +PIPE Number - Segment Size +1 16G +2 8G +4 4G +8 2G +*/ +typedef struct PM_DATA_PERPIPE_SEGSIZE_TAG { + IMG_UINT32 u32_0; +} PM_DATA_PERPIPE_SEGSIZE; + +/* +PerSegment Size 2G +*/ +#define PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_WOFF (0U) +#define PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_SHIFT (3U) +#define PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_CLRMSK (0xFFFFFFF7U) +#define PM_DATA_PERPIPE_SEGSIZE_SET_PIPE8_SEGSZIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_SHIFT)))) +#define PM_DATA_PERPIPE_SEGSIZE_GET_PIPE8_SEGSZIZE(_ft_) (((_ft_).u32_0 >> (PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_SHIFT)) & 0x00000001U) +/* +PerSegment Size 4G +*/ +#define PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_WOFF (0U) +#define PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_SHIFT (2U) +#define PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_CLRMSK (0xFFFFFFFBU) +#define PM_DATA_PERPIPE_SEGSIZE_SET_PIPE4_SEGSZIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_SHIFT)))) +#define PM_DATA_PERPIPE_SEGSIZE_GET_PIPE4_SEGSZIZE(_ft_) (((_ft_).u32_0 >> (PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_SHIFT)) & 0x00000001U) +/* +PerSegment Size 8G +*/ +#define PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_WOFF (0U) +#define PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_SHIFT (1U) +#define PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_CLRMSK (0xFFFFFFFDU) +#define PM_DATA_PERPIPE_SEGSIZE_SET_PIPE2_SEGSZIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_SHIFT)))) +#define PM_DATA_PERPIPE_SEGSIZE_GET_PIPE2_SEGSZIZE(_ft_) (((_ft_).u32_0 >> (PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_SHIFT)) & 0x00000001U) +/* +PerSegment Size 16G +*/ +#define PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_WOFF (0U) +#define PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_SHIFT (0U) +#define PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_CLRMSK (0xFFFFFFFEU) +#define PM_DATA_PERPIPE_SEGSIZE_SET_PIPE1_SEGSZIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_SHIFT)))) +#define PM_DATA_PERPIPE_SEGSIZE_GET_PIPE1_SEGSZIZE(_ft_) (((_ft_).u32_0 >> (PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_SHIFT)) & 0x00000001U) + + +/* +PM Virtual Heap Buffer Offset. This buffer contains all the meta-data associated with each render target. +size is 11904/8 = 1488 Bytes. + +Natively the buffer supports up to 8-VCEs and 8-TEs scaling without changing HW. + +In case relevant PIPE N is not present, the corresponding space is just reserved. +*/ +typedef struct PM_DATA_VHEAP_BUFFER_TAG { + IMG_UINT32 u32_0; + IMG_UINT32 u32_1; + IMG_UINT32 u32_2; + IMG_UINT32 u32_3; + IMG_UINT32 u32_4; + IMG_UINT32 u32_5; + IMG_UINT32 u32_6; + IMG_UINT32 u32_7; + IMG_UINT32 u32_8; + IMG_UINT32 u32_9; + IMG_UINT32 u32_10; + IMG_UINT32 u32_11; + IMG_UINT32 u32_12; + IMG_UINT32 u32_13; + IMG_UINT32 u32_14; + IMG_UINT32 u32_15; + IMG_UINT32 u32_16; + IMG_UINT32 u32_17; + IMG_UINT32 u32_18; + IMG_UINT32 u32_19; + IMG_UINT32 u32_20; + IMG_UINT32 u32_21; + IMG_UINT32 u32_22; + IMG_UINT32 u32_23; + IMG_UINT32 u32_24; + IMG_UINT32 u32_25; + IMG_UINT32 u32_26; + IMG_UINT32 u32_27; + IMG_UINT32 u32_28; + IMG_UINT32 u32_29; + IMG_UINT32 u32_30; + IMG_UINT32 u32_31; + IMG_UINT32 u32_32; + IMG_UINT32 u32_33; + IMG_UINT32 u32_34; + IMG_UINT32 u32_35; + IMG_UINT32 u32_36; + IMG_UINT32 u32_37; + IMG_UINT32 u32_38; + IMG_UINT32 u32_39; + IMG_UINT32 u32_40; + IMG_UINT32 u32_41; + IMG_UINT32 u32_42; + IMG_UINT32 u32_43; + IMG_UINT32 u32_44; + IMG_UINT32 u32_45; + IMG_UINT32 u32_46; + IMG_UINT32 u32_47; + IMG_UINT32 u32_48; + IMG_UINT32 u32_49; + IMG_UINT32 u32_50; + IMG_UINT32 u32_51; + IMG_UINT32 u32_52; + IMG_UINT32 u32_53; + IMG_UINT32 u32_54; + IMG_UINT32 u32_55; + IMG_UINT32 u32_56; + IMG_UINT32 u32_57; + IMG_UINT32 u32_58; + IMG_UINT32 u32_59; + IMG_UINT32 u32_60; + IMG_UINT32 u32_61; + IMG_UINT32 u32_62; + IMG_UINT32 u32_63; + IMG_UINT32 u32_64; + IMG_UINT32 u32_65; + IMG_UINT32 u32_66; + IMG_UINT32 u32_67; + IMG_UINT32 u32_68; + IMG_UINT32 u32_69; + IMG_UINT32 u32_70; + IMG_UINT32 u32_71; + IMG_UINT32 u32_72; + IMG_UINT32 u32_73; + IMG_UINT32 u32_74; + IMG_UINT32 u32_75; + IMG_UINT32 u32_76; + IMG_UINT32 u32_77; + IMG_UINT32 u32_78; + IMG_UINT32 u32_79; + IMG_UINT32 u32_80; + IMG_UINT32 u32_81; + IMG_UINT32 u32_82; + IMG_UINT32 u32_83; + IMG_UINT32 u32_84; + IMG_UINT32 u32_85; + IMG_UINT32 u32_86; + IMG_UINT32 u32_87; + IMG_UINT32 u32_88; + IMG_UINT32 u32_89; + IMG_UINT32 u32_90; + IMG_UINT32 u32_91; + IMG_UINT32 u32_92; + IMG_UINT32 u32_93; + IMG_UINT32 u32_94; + IMG_UINT32 u32_95; + IMG_UINT32 u32_96; + IMG_UINT32 u32_97; + IMG_UINT32 u32_98; + IMG_UINT32 u32_99; + IMG_UINT32 u32_100; + IMG_UINT32 u32_101; + IMG_UINT32 u32_102; + IMG_UINT32 u32_103; + IMG_UINT32 u32_104; + IMG_UINT32 u32_105; + IMG_UINT32 u32_106; + IMG_UINT32 u32_107; + IMG_UINT32 u32_108; + IMG_UINT32 u32_109; + IMG_UINT32 u32_110; + IMG_UINT32 u32_111; + IMG_UINT32 u32_112; + IMG_UINT32 u32_113; + IMG_UINT32 u32_114; + IMG_UINT32 u32_115; + IMG_UINT32 u32_116; + IMG_UINT32 u32_117; + IMG_UINT32 u32_118; + IMG_UINT32 u32_119; + IMG_UINT32 u32_120; + IMG_UINT32 u32_121; + IMG_UINT32 u32_122; + IMG_UINT32 u32_123; + IMG_UINT32 u32_124; + IMG_UINT32 u32_125; + IMG_UINT32 u32_126; + IMG_UINT32 u32_127; + IMG_UINT32 u32_128; + IMG_UINT32 u32_129; + IMG_UINT32 u32_130; + IMG_UINT32 u32_131; + IMG_UINT32 u32_132; + IMG_UINT32 u32_133; + IMG_UINT32 u32_134; + IMG_UINT32 u32_135; + IMG_UINT32 u32_136; + IMG_UINT32 u32_137; + IMG_UINT32 u32_138; + IMG_UINT32 u32_139; + IMG_UINT32 u32_140; + IMG_UINT32 u32_141; + IMG_UINT32 u32_142; + IMG_UINT32 u32_143; + IMG_UINT32 u32_144; + IMG_UINT32 u32_145; + IMG_UINT32 u32_146; + IMG_UINT32 u32_147; + IMG_UINT32 u32_148; + IMG_UINT32 u32_149; + IMG_UINT32 u32_150; + IMG_UINT32 u32_151; + IMG_UINT32 u32_152; + IMG_UINT32 u32_153; + IMG_UINT32 u32_154; + IMG_UINT32 u32_155; + IMG_UINT32 u32_156; + IMG_UINT32 u32_157; + IMG_UINT32 u32_158; + IMG_UINT32 u32_159; + IMG_UINT32 u32_160; + IMG_UINT32 u32_161; + IMG_UINT32 u32_162; + IMG_UINT32 u32_163; + IMG_UINT32 u32_164; + IMG_UINT32 u32_165; + IMG_UINT32 u32_166; + IMG_UINT32 u32_167; + IMG_UINT32 u32_168; + IMG_UINT32 u32_169; + IMG_UINT32 u32_170; + IMG_UINT32 u32_171; + IMG_UINT32 u32_172; + IMG_UINT32 u32_173; + IMG_UINT32 u32_174; + IMG_UINT32 u32_175; + IMG_UINT32 u32_176; + IMG_UINT32 u32_177; + IMG_UINT32 u32_178; + IMG_UINT32 u32_179; + IMG_UINT32 u32_180; + IMG_UINT32 u32_181; + IMG_UINT32 u32_182; + IMG_UINT32 u32_183; + IMG_UINT32 u32_184; + IMG_UINT32 u32_185; + IMG_UINT32 u32_186; + IMG_UINT32 u32_187; + IMG_UINT32 u32_188; + IMG_UINT32 u32_189; + IMG_UINT32 u32_190; + IMG_UINT32 u32_191; + IMG_UINT32 u32_192; + IMG_UINT32 u32_193; + IMG_UINT32 u32_194; + IMG_UINT32 u32_195; + IMG_UINT32 u32_196; + IMG_UINT32 u32_197; + IMG_UINT32 u32_198; + IMG_UINT32 u32_199; + IMG_UINT32 u32_200; + IMG_UINT32 u32_201; + IMG_UINT32 u32_202; + IMG_UINT32 u32_203; + IMG_UINT32 u32_204; + IMG_UINT32 u32_205; + IMG_UINT32 u32_206; + IMG_UINT32 u32_207; + IMG_UINT32 u32_208; + IMG_UINT32 u32_209; + IMG_UINT32 u32_210; + IMG_UINT32 u32_211; + IMG_UINT32 u32_212; + IMG_UINT32 u32_213; + IMG_UINT32 u32_214; + IMG_UINT32 u32_215; + IMG_UINT32 u32_216; + IMG_UINT32 u32_217; + IMG_UINT32 u32_218; + IMG_UINT32 u32_219; + IMG_UINT32 u32_220; + IMG_UINT32 u32_221; + IMG_UINT32 u32_222; + IMG_UINT32 u32_223; + IMG_UINT32 u32_224; + IMG_UINT32 u32_225; + IMG_UINT32 u32_226; + IMG_UINT32 u32_227; + IMG_UINT32 u32_228; + IMG_UINT32 u32_229; + IMG_UINT32 u32_230; + IMG_UINT32 u32_231; + IMG_UINT32 u32_232; + IMG_UINT32 u32_233; + IMG_UINT32 u32_234; + IMG_UINT32 u32_235; + IMG_UINT32 u32_236; + IMG_UINT32 u32_237; + IMG_UINT32 u32_238; + IMG_UINT32 u32_239; + IMG_UINT32 u32_240; + IMG_UINT32 u32_241; + IMG_UINT32 u32_242; + IMG_UINT32 u32_243; + IMG_UINT32 u32_244; + IMG_UINT32 u32_245; + IMG_UINT32 u32_246; + IMG_UINT32 u32_247; + IMG_UINT32 u32_248; + IMG_UINT32 u32_249; + IMG_UINT32 u32_250; + IMG_UINT32 u32_251; + IMG_UINT32 u32_252; + IMG_UINT32 u32_253; + IMG_UINT32 u32_254; + IMG_UINT32 u32_255; + IMG_UINT32 u32_256; + IMG_UINT32 u32_257; + IMG_UINT32 u32_258; + IMG_UINT32 u32_259; + IMG_UINT32 u32_260; + IMG_UINT32 u32_261; + IMG_UINT32 u32_262; + IMG_UINT32 u32_263; + IMG_UINT32 u32_264; + IMG_UINT32 u32_265; + IMG_UINT32 u32_266; + IMG_UINT32 u32_267; + IMG_UINT32 u32_268; + IMG_UINT32 u32_269; + IMG_UINT32 u32_270; + IMG_UINT32 u32_271; + IMG_UINT32 u32_272; + IMG_UINT32 u32_273; + IMG_UINT32 u32_274; + IMG_UINT32 u32_275; + IMG_UINT32 u32_276; + IMG_UINT32 u32_277; + IMG_UINT32 u32_278; + IMG_UINT32 u32_279; + IMG_UINT32 u32_280; + IMG_UINT32 u32_281; + IMG_UINT32 u32_282; + IMG_UINT32 u32_283; + IMG_UINT32 u32_284; + IMG_UINT32 u32_285; + IMG_UINT32 u32_286; + IMG_UINT32 u32_287; + IMG_UINT32 u32_288; + IMG_UINT32 u32_289; + IMG_UINT32 u32_290; + IMG_UINT32 u32_291; + IMG_UINT32 u32_292; + IMG_UINT32 u32_293; + IMG_UINT32 u32_294; + IMG_UINT32 u32_295; + IMG_UINT32 u32_296; + IMG_UINT32 u32_297; + IMG_UINT32 u32_298; + IMG_UINT32 u32_299; + IMG_UINT32 u32_300; + IMG_UINT32 u32_301; + IMG_UINT32 u32_302; + IMG_UINT32 u32_303; + IMG_UINT32 u32_304; + IMG_UINT32 u32_305; + IMG_UINT32 u32_306; + IMG_UINT32 u32_307; + IMG_UINT32 u32_308; + IMG_UINT32 u32_309; + IMG_UINT32 u32_310; + IMG_UINT32 u32_311; + IMG_UINT32 u32_312; + IMG_UINT32 u32_313; + IMG_UINT32 u32_314; + IMG_UINT32 u32_315; + IMG_UINT32 u32_316; + IMG_UINT32 u32_317; + IMG_UINT32 u32_318; + IMG_UINT32 u32_319; + IMG_UINT32 u32_320; + IMG_UINT32 u32_321; + IMG_UINT32 u32_322; + IMG_UINT32 u32_323; + IMG_UINT32 u32_324; + IMG_UINT32 u32_325; + IMG_UINT32 u32_326; + IMG_UINT32 u32_327; + IMG_UINT32 u32_328; + IMG_UINT32 u32_329; + IMG_UINT32 u32_330; + IMG_UINT32 u32_331; + IMG_UINT32 u32_332; + IMG_UINT32 u32_333; + IMG_UINT32 u32_334; + IMG_UINT32 u32_335; + IMG_UINT32 u32_336; + IMG_UINT32 u32_337; + IMG_UINT32 u32_338; + IMG_UINT32 u32_339; + IMG_UINT32 u32_340; + IMG_UINT32 u32_341; + IMG_UINT32 u32_342; + IMG_UINT32 u32_343; + IMG_UINT32 u32_344; + IMG_UINT32 u32_345; + IMG_UINT32 u32_346; + IMG_UINT32 u32_347; + IMG_UINT32 u32_348; + IMG_UINT32 u32_349; + IMG_UINT32 u32_350; + IMG_UINT32 u32_351; + IMG_UINT32 u32_352; + IMG_UINT32 u32_353; + IMG_UINT32 u32_354; + IMG_UINT32 u32_355; + IMG_UINT32 u32_356; + IMG_UINT32 u32_357; + IMG_UINT32 u32_358; + IMG_UINT32 u32_359; + IMG_UINT32 u32_360; + IMG_UINT32 u32_361; + IMG_UINT32 u32_362; + IMG_UINT32 u32_363; + IMG_UINT32 u32_364; + IMG_UINT32 u32_365; + IMG_UINT32 u32_366; + IMG_UINT32 u32_367; + IMG_UINT32 u32_368; + IMG_UINT32 u32_369; + IMG_UINT32 u32_370; + IMG_UINT32 u32_371; +} PM_DATA_VHEAP_BUFFER; + +/* +TE7 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_WOFF (371U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_371 = (((_ft_).u32_371 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_371 >> (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +TE7 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W0_WOFF (370U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W1_WOFF (371U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_370 = (((_ft_).u32_370 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_371 = (((_ft_).u32_371 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_370 >> (20)) | ((IMG_UINT64)((_ft_).u32_371 & 0x0000ffffU ) << (12))) +/* +TE7 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W0_WOFF (369U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W1_WOFF (370U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_369 = (((_ft_).u32_369 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_370 = (((_ft_).u32_370 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_369 >> (24)) | ((IMG_UINT64)((_ft_).u32_370 & 0x000fffffU ) << (8))) +/* +TE7 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W0_WOFF (368U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W1_WOFF (369U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_368 = (((_ft_).u32_368 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_369 = (((_ft_).u32_369 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_368 >> (28)) | ((IMG_UINT64)((_ft_).u32_369 & 0x00ffffffU ) << (4))) +/* +TE7 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_WOFF (368U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_368 = (((_ft_).u32_368 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_368 >> (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +TE7 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_WOFF (364U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_364 = (((_ft_).u32_364 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_364 >> (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +TE6 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_WOFF (363U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_363 = (((_ft_).u32_363 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_363 >> (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +TE6 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W0_WOFF (362U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W1_WOFF (363U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_362 = (((_ft_).u32_362 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_363 = (((_ft_).u32_363 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_362 >> (20)) | ((IMG_UINT64)((_ft_).u32_363 & 0x0000ffffU ) << (12))) +/* +TE6 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W0_WOFF (361U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W1_WOFF (362U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_361 = (((_ft_).u32_361 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_362 = (((_ft_).u32_362 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_361 >> (24)) | ((IMG_UINT64)((_ft_).u32_362 & 0x000fffffU ) << (8))) +/* +TE6 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W0_WOFF (360U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W1_WOFF (361U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_360 = (((_ft_).u32_360 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_361 = (((_ft_).u32_361 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_360 >> (28)) | ((IMG_UINT64)((_ft_).u32_361 & 0x00ffffffU ) << (4))) +/* +TE6 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_WOFF (360U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_360 = (((_ft_).u32_360 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_360 >> (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +TE6 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_WOFF (356U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_356 = (((_ft_).u32_356 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_356 >> (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +TE5 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_WOFF (355U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_355 = (((_ft_).u32_355 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_355 >> (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +TE5 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W0_WOFF (354U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W1_WOFF (355U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_354 = (((_ft_).u32_354 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_355 = (((_ft_).u32_355 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_354 >> (20)) | ((IMG_UINT64)((_ft_).u32_355 & 0x0000ffffU ) << (12))) +/* +TE5 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W0_WOFF (353U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W1_WOFF (354U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_353 = (((_ft_).u32_353 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_354 = (((_ft_).u32_354 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_353 >> (24)) | ((IMG_UINT64)((_ft_).u32_354 & 0x000fffffU ) << (8))) +/* +TE5 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W0_WOFF (352U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W1_WOFF (353U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_352 = (((_ft_).u32_352 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_353 = (((_ft_).u32_353 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_352 >> (28)) | ((IMG_UINT64)((_ft_).u32_353 & 0x00ffffffU ) << (4))) +/* +TE5 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_WOFF (352U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_352 = (((_ft_).u32_352 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_352 >> (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +TE5 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_WOFF (348U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_348 = (((_ft_).u32_348 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_348 >> (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +TE4 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_WOFF (347U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_347 = (((_ft_).u32_347 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_347 >> (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +TE4 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W0_WOFF (346U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W1_WOFF (347U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_346 = (((_ft_).u32_346 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_347 = (((_ft_).u32_347 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_346 >> (20)) | ((IMG_UINT64)((_ft_).u32_347 & 0x0000ffffU ) << (12))) +/* +TE4 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W0_WOFF (345U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W1_WOFF (346U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_345 = (((_ft_).u32_345 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_346 = (((_ft_).u32_346 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_345 >> (24)) | ((IMG_UINT64)((_ft_).u32_346 & 0x000fffffU ) << (8))) +/* +TE4 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W0_WOFF (344U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W1_WOFF (345U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_344 = (((_ft_).u32_344 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_345 = (((_ft_).u32_345 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_344 >> (28)) | ((IMG_UINT64)((_ft_).u32_345 & 0x00ffffffU ) << (4))) +/* +TE4 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_WOFF (344U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_344 = (((_ft_).u32_344 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_344 >> (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +TE4 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_WOFF (340U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_340 = (((_ft_).u32_340 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_340 >> (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +TE3 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_WOFF (339U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_339 = (((_ft_).u32_339 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_339 >> (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +TE3 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W0_WOFF (338U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W1_WOFF (339U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_338 = (((_ft_).u32_338 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_339 = (((_ft_).u32_339 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_338 >> (20)) | ((IMG_UINT64)((_ft_).u32_339 & 0x0000ffffU ) << (12))) +/* +TE3 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W0_WOFF (337U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W1_WOFF (338U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_337 = (((_ft_).u32_337 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_338 = (((_ft_).u32_338 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_337 >> (24)) | ((IMG_UINT64)((_ft_).u32_338 & 0x000fffffU ) << (8))) +/* +TE3 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W0_WOFF (336U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W1_WOFF (337U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_336 = (((_ft_).u32_336 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_337 = (((_ft_).u32_337 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_336 >> (28)) | ((IMG_UINT64)((_ft_).u32_337 & 0x00ffffffU ) << (4))) +/* +TE3 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_WOFF (336U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_336 = (((_ft_).u32_336 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_336 >> (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +TE3 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_WOFF (332U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_332 = (((_ft_).u32_332 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_332 >> (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +TE2 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_WOFF (331U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_331 = (((_ft_).u32_331 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_331 >> (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +TE2 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W0_WOFF (330U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W1_WOFF (331U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_330 = (((_ft_).u32_330 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_331 = (((_ft_).u32_331 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_330 >> (20)) | ((IMG_UINT64)((_ft_).u32_331 & 0x0000ffffU ) << (12))) +/* +TE2 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W0_WOFF (329U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W1_WOFF (330U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_329 = (((_ft_).u32_329 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_330 = (((_ft_).u32_330 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_329 >> (24)) | ((IMG_UINT64)((_ft_).u32_330 & 0x000fffffU ) << (8))) +/* +TE2 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W0_WOFF (328U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W1_WOFF (329U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_328 = (((_ft_).u32_328 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_329 = (((_ft_).u32_329 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_328 >> (28)) | ((IMG_UINT64)((_ft_).u32_329 & 0x00ffffffU ) << (4))) +/* +TE2 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_WOFF (328U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_328 = (((_ft_).u32_328 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_328 >> (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +TE2 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_WOFF (324U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_324 = (((_ft_).u32_324 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_324 >> (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +TE1 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_WOFF (323U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_323 = (((_ft_).u32_323 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_323 >> (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +TE1 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W0_WOFF (322U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W1_WOFF (323U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_322 = (((_ft_).u32_322 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_323 = (((_ft_).u32_323 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_322 >> (20)) | ((IMG_UINT64)((_ft_).u32_323 & 0x0000ffffU ) << (12))) +/* +TE1 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W0_WOFF (321U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W1_WOFF (322U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_321 = (((_ft_).u32_321 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_322 = (((_ft_).u32_322 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_321 >> (24)) | ((IMG_UINT64)((_ft_).u32_322 & 0x000fffffU ) << (8))) +/* +TE1 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W0_WOFF (320U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W1_WOFF (321U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_320 = (((_ft_).u32_320 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_321 = (((_ft_).u32_321 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_320 >> (28)) | ((IMG_UINT64)((_ft_).u32_321 & 0x00ffffffU ) << (4))) +/* +TE1 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_WOFF (320U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_320 = (((_ft_).u32_320 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_320 >> (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +TE1 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_WOFF (316U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_316 = (((_ft_).u32_316 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_316 >> (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +TE0 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_WOFF (315U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_315 = (((_ft_).u32_315 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_315 >> (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +TE0 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W0_WOFF (314U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W1_WOFF (315U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_314 = (((_ft_).u32_314 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_315 = (((_ft_).u32_315 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_314 >> (20)) | ((IMG_UINT64)((_ft_).u32_315 & 0x0000ffffU ) << (12))) +/* +TE0 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W0_WOFF (313U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W1_WOFF (314U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_313 = (((_ft_).u32_313 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_314 = (((_ft_).u32_314 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_313 >> (24)) | ((IMG_UINT64)((_ft_).u32_314 & 0x000fffffU ) << (8))) +/* +TE0 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W0_WOFF (312U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W1_WOFF (313U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_312 = (((_ft_).u32_312 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_313 = (((_ft_).u32_313 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_312 >> (28)) | ((IMG_UINT64)((_ft_).u32_313 & 0x00ffffffU ) << (4))) +/* +TE0 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_WOFF (312U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_312 = (((_ft_).u32_312 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_312 >> (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +TE0 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_WOFF (308U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_308 = (((_ft_).u32_308 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_308 >> (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE7 opened page3 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_WOFF (307U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_307 = (((_ft_).u32_307 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_UFSTACK(_ft_) (((_ft_).u32_307 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE7 opened page3 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W0_WOFF (306U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W1_WOFF (307U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_306 = (((_ft_).u32_306 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_307 = (((_ft_).u32_307 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_306 >> (20)) | ((IMG_UINT64)((_ft_).u32_307 & 0x0000ffffU ) << (12))) +/* +VCE7 opened page3 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W0_WOFF (305U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W1_WOFF (306U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_305 = (((_ft_).u32_305 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_306 = (((_ft_).u32_306 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_305 >> (24)) | ((IMG_UINT64)((_ft_).u32_306 & 0x000fffffU ) << (8))) +/* +VCE7 opened page3 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W0_WOFF (304U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W1_WOFF (305U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_304 = (((_ft_).u32_304 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_305 = (((_ft_).u32_305 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_304 >> (28)) | ((IMG_UINT64)((_ft_).u32_305 & 0x00ffffffU ) << (4))) +/* +VCE7 opened page3 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_WOFF (304U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_304 = (((_ft_).u32_304 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_PPAGE0(_ft_) (((_ft_).u32_304 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE7 opened page3 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_WOFF (300U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_300 = (((_ft_).u32_300 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_PAGE(_ft_) (((_ft_).u32_300 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE7 opened page2 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_WOFF (299U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_299 = (((_ft_).u32_299 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_UFSTACK(_ft_) (((_ft_).u32_299 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE7 opened page2 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W0_WOFF (298U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W1_WOFF (299U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_298 = (((_ft_).u32_298 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_299 = (((_ft_).u32_299 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_298 >> (20)) | ((IMG_UINT64)((_ft_).u32_299 & 0x0000ffffU ) << (12))) +/* +VCE7 opened page2 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W0_WOFF (297U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W1_WOFF (298U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_297 = (((_ft_).u32_297 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_298 = (((_ft_).u32_298 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_297 >> (24)) | ((IMG_UINT64)((_ft_).u32_298 & 0x000fffffU ) << (8))) +/* +VCE7 opened page2 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W0_WOFF (296U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W1_WOFF (297U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_296 = (((_ft_).u32_296 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_297 = (((_ft_).u32_297 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_296 >> (28)) | ((IMG_UINT64)((_ft_).u32_297 & 0x00ffffffU ) << (4))) +/* +VCE7 opened page2 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_WOFF (296U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_296 = (((_ft_).u32_296 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_PPAGE0(_ft_) (((_ft_).u32_296 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE7 opened page2 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_WOFF (292U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_292 = (((_ft_).u32_292 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_PAGE(_ft_) (((_ft_).u32_292 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE7 opened page1 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_WOFF (291U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_291 = (((_ft_).u32_291 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_UFSTACK(_ft_) (((_ft_).u32_291 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE7 opened page1 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W0_WOFF (290U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W1_WOFF (291U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_290 = (((_ft_).u32_290 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_291 = (((_ft_).u32_291 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_290 >> (20)) | ((IMG_UINT64)((_ft_).u32_291 & 0x0000ffffU ) << (12))) +/* +VCE7 opened page1 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W0_WOFF (289U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W1_WOFF (290U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_289 = (((_ft_).u32_289 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_290 = (((_ft_).u32_290 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_289 >> (24)) | ((IMG_UINT64)((_ft_).u32_290 & 0x000fffffU ) << (8))) +/* +VCE7 opened page1 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W0_WOFF (288U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W1_WOFF (289U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_288 = (((_ft_).u32_288 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_289 = (((_ft_).u32_289 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_288 >> (28)) | ((IMG_UINT64)((_ft_).u32_289 & 0x00ffffffU ) << (4))) +/* +VCE7 opened page1 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_WOFF (288U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_288 = (((_ft_).u32_288 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_PPAGE0(_ft_) (((_ft_).u32_288 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE7 opened page1 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_WOFF (284U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_284 = (((_ft_).u32_284 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_PAGE(_ft_) (((_ft_).u32_284 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE7 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_WOFF (283U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_283 = (((_ft_).u32_283 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_283 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE7 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W0_WOFF (282U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W1_WOFF (283U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_282 = (((_ft_).u32_282 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_283 = (((_ft_).u32_283 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_282 >> (20)) | ((IMG_UINT64)((_ft_).u32_283 & 0x0000ffffU ) << (12))) +/* +VCE7 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W0_WOFF (281U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W1_WOFF (282U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_281 = (((_ft_).u32_281 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_282 = (((_ft_).u32_282 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_281 >> (24)) | ((IMG_UINT64)((_ft_).u32_282 & 0x000fffffU ) << (8))) +/* +VCE7 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W0_WOFF (280U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W1_WOFF (281U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_280 = (((_ft_).u32_280 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_281 = (((_ft_).u32_281 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_280 >> (28)) | ((IMG_UINT64)((_ft_).u32_281 & 0x00ffffffU ) << (4))) +/* +VCE7 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_WOFF (280U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_280 = (((_ft_).u32_280 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_280 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE7 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_WOFF (276U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_276 = (((_ft_).u32_276 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_276 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE6 opened page1 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_WOFF (259U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_259 = (((_ft_).u32_259 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_UFSTACK(_ft_) (((_ft_).u32_259 >> (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE6 opened page1 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W0_WOFF (258U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W1_WOFF (259U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_258 = (((_ft_).u32_258 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_259 = (((_ft_).u32_259 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_258 >> (20)) | ((IMG_UINT64)((_ft_).u32_259 & 0x0000ffffU ) << (12))) +/* +VCE6 opened page1 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W0_WOFF (257U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W1_WOFF (258U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_257 = (((_ft_).u32_257 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_258 = (((_ft_).u32_258 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_257 >> (24)) | ((IMG_UINT64)((_ft_).u32_258 & 0x000fffffU ) << (8))) +/* +VCE6 opened page1 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W0_WOFF (256U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W1_WOFF (257U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_256 = (((_ft_).u32_256 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_257 = (((_ft_).u32_257 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_256 >> (28)) | ((IMG_UINT64)((_ft_).u32_257 & 0x00ffffffU ) << (4))) +/* +VCE6 opened page1 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_WOFF (256U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_256 = (((_ft_).u32_256 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_PPAGE0(_ft_) (((_ft_).u32_256 >> (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE6 opened page1 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_WOFF (252U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_252 = (((_ft_).u32_252 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_PAGE(_ft_) (((_ft_).u32_252 >> (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE6 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_WOFF (251U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_251 = (((_ft_).u32_251 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_251 >> (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE6 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W0_WOFF (250U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W1_WOFF (251U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_250 = (((_ft_).u32_250 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_251 = (((_ft_).u32_251 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_250 >> (20)) | ((IMG_UINT64)((_ft_).u32_251 & 0x0000ffffU ) << (12))) +/* +VCE6 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W0_WOFF (249U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W1_WOFF (250U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_249 = (((_ft_).u32_249 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_250 = (((_ft_).u32_250 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_249 >> (24)) | ((IMG_UINT64)((_ft_).u32_250 & 0x000fffffU ) << (8))) +/* +VCE6 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W0_WOFF (248U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W1_WOFF (249U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_248 = (((_ft_).u32_248 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_249 = (((_ft_).u32_249 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_248 >> (28)) | ((IMG_UINT64)((_ft_).u32_249 & 0x00ffffffU ) << (4))) +/* +VCE6 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_WOFF (248U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_248 = (((_ft_).u32_248 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_248 >> (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE6 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_WOFF (244U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_244 = (((_ft_).u32_244 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_244 >> (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE5 opened page3 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_WOFF (243U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_243 = (((_ft_).u32_243 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_UFSTACK(_ft_) (((_ft_).u32_243 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE5 opened page3 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W0_WOFF (242U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W1_WOFF (243U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_242 = (((_ft_).u32_242 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_243 = (((_ft_).u32_243 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_242 >> (20)) | ((IMG_UINT64)((_ft_).u32_243 & 0x0000ffffU ) << (12))) +/* +VCE5 opened page3 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W0_WOFF (241U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W1_WOFF (242U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_241 = (((_ft_).u32_241 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_242 = (((_ft_).u32_242 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_241 >> (24)) | ((IMG_UINT64)((_ft_).u32_242 & 0x000fffffU ) << (8))) +/* +VCE5 opened page3 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W0_WOFF (240U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W1_WOFF (241U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_240 = (((_ft_).u32_240 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_241 = (((_ft_).u32_241 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_240 >> (28)) | ((IMG_UINT64)((_ft_).u32_241 & 0x00ffffffU ) << (4))) +/* +VCE5 opened page3 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_WOFF (240U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_240 = (((_ft_).u32_240 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_PPAGE0(_ft_) (((_ft_).u32_240 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE5 opened page3 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_WOFF (236U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_236 = (((_ft_).u32_236 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_PAGE(_ft_) (((_ft_).u32_236 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE5 opened page2 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_WOFF (235U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_235 = (((_ft_).u32_235 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_UFSTACK(_ft_) (((_ft_).u32_235 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE5 opened page2 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W0_WOFF (234U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W1_WOFF (235U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_234 = (((_ft_).u32_234 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_235 = (((_ft_).u32_235 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_234 >> (20)) | ((IMG_UINT64)((_ft_).u32_235 & 0x0000ffffU ) << (12))) +/* +VCE5 opened page2 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W0_WOFF (233U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W1_WOFF (234U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_233 = (((_ft_).u32_233 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_234 = (((_ft_).u32_234 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_233 >> (24)) | ((IMG_UINT64)((_ft_).u32_234 & 0x000fffffU ) << (8))) +/* +VCE5 opened page2 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W0_WOFF (232U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W1_WOFF (233U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_232 = (((_ft_).u32_232 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_233 = (((_ft_).u32_233 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_232 >> (28)) | ((IMG_UINT64)((_ft_).u32_233 & 0x00ffffffU ) << (4))) +/* +VCE5 opened page2 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_WOFF (232U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_232 = (((_ft_).u32_232 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_PPAGE0(_ft_) (((_ft_).u32_232 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE5 opened page2 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_WOFF (228U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_228 = (((_ft_).u32_228 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_PAGE(_ft_) (((_ft_).u32_228 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE5 opened page1 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_WOFF (227U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_227 = (((_ft_).u32_227 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_UFSTACK(_ft_) (((_ft_).u32_227 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE5 opened page1 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W0_WOFF (226U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W1_WOFF (227U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_226 = (((_ft_).u32_226 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_227 = (((_ft_).u32_227 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_226 >> (20)) | ((IMG_UINT64)((_ft_).u32_227 & 0x0000ffffU ) << (12))) +/* +VCE5 opened page1 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W0_WOFF (225U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W1_WOFF (226U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_225 = (((_ft_).u32_225 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_226 = (((_ft_).u32_226 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_225 >> (24)) | ((IMG_UINT64)((_ft_).u32_226 & 0x000fffffU ) << (8))) +/* +VCE5 opened page1 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W0_WOFF (224U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W1_WOFF (225U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_224 = (((_ft_).u32_224 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_225 = (((_ft_).u32_225 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_224 >> (28)) | ((IMG_UINT64)((_ft_).u32_225 & 0x00ffffffU ) << (4))) +/* +VCE5 opened page1 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_WOFF (224U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_224 = (((_ft_).u32_224 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_PPAGE0(_ft_) (((_ft_).u32_224 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE5 opened page1 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_WOFF (220U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_220 = (((_ft_).u32_220 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_PAGE(_ft_) (((_ft_).u32_220 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE5 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_WOFF (219U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_219 = (((_ft_).u32_219 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_219 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE5 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W0_WOFF (218U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W1_WOFF (219U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_218 = (((_ft_).u32_218 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_219 = (((_ft_).u32_219 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_218 >> (20)) | ((IMG_UINT64)((_ft_).u32_219 & 0x0000ffffU ) << (12))) +/* +VCE5 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W0_WOFF (217U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W1_WOFF (218U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_217 = (((_ft_).u32_217 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_218 = (((_ft_).u32_218 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_217 >> (24)) | ((IMG_UINT64)((_ft_).u32_218 & 0x000fffffU ) << (8))) +/* +VCE5 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W0_WOFF (216U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W1_WOFF (217U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_216 = (((_ft_).u32_216 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_217 = (((_ft_).u32_217 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_216 >> (28)) | ((IMG_UINT64)((_ft_).u32_217 & 0x00ffffffU ) << (4))) +/* +VCE5 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_WOFF (216U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_216 = (((_ft_).u32_216 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_216 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE5 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_WOFF (212U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_212 = (((_ft_).u32_212 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_212 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE4 opened page3 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_WOFF (211U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_211 = (((_ft_).u32_211 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_UFSTACK(_ft_) (((_ft_).u32_211 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE4 opened page3 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W0_WOFF (210U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W1_WOFF (211U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_210 = (((_ft_).u32_210 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_211 = (((_ft_).u32_211 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_210 >> (20)) | ((IMG_UINT64)((_ft_).u32_211 & 0x0000ffffU ) << (12))) +/* +VCE4 opened page3 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W0_WOFF (209U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W1_WOFF (210U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_209 = (((_ft_).u32_209 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_210 = (((_ft_).u32_210 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_209 >> (24)) | ((IMG_UINT64)((_ft_).u32_210 & 0x000fffffU ) << (8))) +/* +VCE4 opened page3 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W0_WOFF (208U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W1_WOFF (209U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_208 = (((_ft_).u32_208 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_209 = (((_ft_).u32_209 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_208 >> (28)) | ((IMG_UINT64)((_ft_).u32_209 & 0x00ffffffU ) << (4))) +/* +VCE4 opened page3 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_WOFF (208U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_208 = (((_ft_).u32_208 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_PPAGE0(_ft_) (((_ft_).u32_208 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE4 opened page3 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_WOFF (204U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_204 = (((_ft_).u32_204 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_PAGE(_ft_) (((_ft_).u32_204 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE4 opened page2 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_WOFF (203U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_203 = (((_ft_).u32_203 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_UFSTACK(_ft_) (((_ft_).u32_203 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE4 opened page2 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W0_WOFF (202U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W1_WOFF (203U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_202 = (((_ft_).u32_202 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_203 = (((_ft_).u32_203 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_202 >> (20)) | ((IMG_UINT64)((_ft_).u32_203 & 0x0000ffffU ) << (12))) +/* +VCE4 opened page2 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W0_WOFF (201U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W1_WOFF (202U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_201 = (((_ft_).u32_201 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_202 = (((_ft_).u32_202 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_201 >> (24)) | ((IMG_UINT64)((_ft_).u32_202 & 0x000fffffU ) << (8))) +/* +VCE4 opened page2 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W0_WOFF (200U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W1_WOFF (201U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_200 = (((_ft_).u32_200 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_201 = (((_ft_).u32_201 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_200 >> (28)) | ((IMG_UINT64)((_ft_).u32_201 & 0x00ffffffU ) << (4))) +/* +VCE4 opened page2 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_WOFF (200U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_200 = (((_ft_).u32_200 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_PPAGE0(_ft_) (((_ft_).u32_200 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE4 opened page2 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_WOFF (196U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_196 = (((_ft_).u32_196 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_PAGE(_ft_) (((_ft_).u32_196 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE4 opened page1 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_WOFF (195U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_195 = (((_ft_).u32_195 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_UFSTACK(_ft_) (((_ft_).u32_195 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE4 opened page1 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W0_WOFF (194U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W1_WOFF (195U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_194 = (((_ft_).u32_194 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_195 = (((_ft_).u32_195 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_194 >> (20)) | ((IMG_UINT64)((_ft_).u32_195 & 0x0000ffffU ) << (12))) +/* +VCE4 opened page1 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W0_WOFF (193U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W1_WOFF (194U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_193 = (((_ft_).u32_193 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_194 = (((_ft_).u32_194 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_193 >> (24)) | ((IMG_UINT64)((_ft_).u32_194 & 0x000fffffU ) << (8))) +/* +VCE4 opened page1 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W0_WOFF (192U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W1_WOFF (193U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_192 = (((_ft_).u32_192 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_193 = (((_ft_).u32_193 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_192 >> (28)) | ((IMG_UINT64)((_ft_).u32_193 & 0x00ffffffU ) << (4))) +/* +VCE4 opened page1 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_WOFF (192U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_192 = (((_ft_).u32_192 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_PPAGE0(_ft_) (((_ft_).u32_192 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE4 opened page1 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_WOFF (188U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_188 = (((_ft_).u32_188 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_PAGE(_ft_) (((_ft_).u32_188 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE4 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_WOFF (187U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_187 = (((_ft_).u32_187 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_187 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE4 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W0_WOFF (186U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W1_WOFF (187U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_186 = (((_ft_).u32_186 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_187 = (((_ft_).u32_187 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_186 >> (20)) | ((IMG_UINT64)((_ft_).u32_187 & 0x0000ffffU ) << (12))) +/* +VCE4 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W0_WOFF (185U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W1_WOFF (186U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_185 = (((_ft_).u32_185 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_186 = (((_ft_).u32_186 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_185 >> (24)) | ((IMG_UINT64)((_ft_).u32_186 & 0x000fffffU ) << (8))) +/* +VCE4 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W0_WOFF (184U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W1_WOFF (185U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_184 = (((_ft_).u32_184 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_185 = (((_ft_).u32_185 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_184 >> (28)) | ((IMG_UINT64)((_ft_).u32_185 & 0x00ffffffU ) << (4))) +/* +VCE4 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_WOFF (184U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_184 = (((_ft_).u32_184 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_184 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE4 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_WOFF (180U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_180 = (((_ft_).u32_180 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_180 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE3 opened page3 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_WOFF (179U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_179 = (((_ft_).u32_179 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_UFSTACK(_ft_) (((_ft_).u32_179 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE3 opened page3 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W0_WOFF (178U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W1_WOFF (179U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_178 = (((_ft_).u32_178 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_179 = (((_ft_).u32_179 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_178 >> (20)) | ((IMG_UINT64)((_ft_).u32_179 & 0x0000ffffU ) << (12))) +/* +VCE3 opened page3 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W0_WOFF (177U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W1_WOFF (178U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_177 = (((_ft_).u32_177 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_178 = (((_ft_).u32_178 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_177 >> (24)) | ((IMG_UINT64)((_ft_).u32_178 & 0x000fffffU ) << (8))) +/* +VCE3 opened page3 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W0_WOFF (176U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W1_WOFF (177U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_176 = (((_ft_).u32_176 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_177 = (((_ft_).u32_177 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_176 >> (28)) | ((IMG_UINT64)((_ft_).u32_177 & 0x00ffffffU ) << (4))) +/* +VCE3 opened page3 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_WOFF (176U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_176 = (((_ft_).u32_176 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_PPAGE0(_ft_) (((_ft_).u32_176 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE3 opened page3 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_WOFF (172U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_172 = (((_ft_).u32_172 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_PAGE(_ft_) (((_ft_).u32_172 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE3 opened page2 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_WOFF (171U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_171 = (((_ft_).u32_171 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_UFSTACK(_ft_) (((_ft_).u32_171 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE3 opened page2 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W0_WOFF (170U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W1_WOFF (171U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_170 = (((_ft_).u32_170 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_171 = (((_ft_).u32_171 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_170 >> (20)) | ((IMG_UINT64)((_ft_).u32_171 & 0x0000ffffU ) << (12))) +/* +VCE3 opened page2 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W0_WOFF (169U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W1_WOFF (170U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_169 = (((_ft_).u32_169 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_170 = (((_ft_).u32_170 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_169 >> (24)) | ((IMG_UINT64)((_ft_).u32_170 & 0x000fffffU ) << (8))) +/* +VCE3 opened page2 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W0_WOFF (168U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W1_WOFF (169U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_168 = (((_ft_).u32_168 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_169 = (((_ft_).u32_169 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_168 >> (28)) | ((IMG_UINT64)((_ft_).u32_169 & 0x00ffffffU ) << (4))) +/* +VCE3 opened page2 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_WOFF (168U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_168 = (((_ft_).u32_168 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_PPAGE0(_ft_) (((_ft_).u32_168 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE3 opened page2 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_WOFF (164U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_164 = (((_ft_).u32_164 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_PAGE(_ft_) (((_ft_).u32_164 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE3 opened page1 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_WOFF (163U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_163 = (((_ft_).u32_163 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_UFSTACK(_ft_) (((_ft_).u32_163 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE3 opened page1 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W0_WOFF (162U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W1_WOFF (163U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_162 = (((_ft_).u32_162 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_163 = (((_ft_).u32_163 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_162 >> (20)) | ((IMG_UINT64)((_ft_).u32_163 & 0x0000ffffU ) << (12))) +/* +VCE3 opened page1 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W0_WOFF (161U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W1_WOFF (162U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_161 = (((_ft_).u32_161 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_162 = (((_ft_).u32_162 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_161 >> (24)) | ((IMG_UINT64)((_ft_).u32_162 & 0x000fffffU ) << (8))) +/* +VCE3 opened page1 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W0_WOFF (160U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W1_WOFF (161U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_160 = (((_ft_).u32_160 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_161 = (((_ft_).u32_161 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_160 >> (28)) | ((IMG_UINT64)((_ft_).u32_161 & 0x00ffffffU ) << (4))) +/* +VCE3 opened page1 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_WOFF (160U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_160 = (((_ft_).u32_160 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_PPAGE0(_ft_) (((_ft_).u32_160 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE3 opened page1 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_WOFF (156U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_156 = (((_ft_).u32_156 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_PAGE(_ft_) (((_ft_).u32_156 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE3 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_WOFF (155U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_155 = (((_ft_).u32_155 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_155 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE3 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W0_WOFF (154U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W1_WOFF (155U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_154 = (((_ft_).u32_154 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_155 = (((_ft_).u32_155 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_154 >> (20)) | ((IMG_UINT64)((_ft_).u32_155 & 0x0000ffffU ) << (12))) +/* +VCE3 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W0_WOFF (153U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W1_WOFF (154U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_153 = (((_ft_).u32_153 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_154 = (((_ft_).u32_154 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_153 >> (24)) | ((IMG_UINT64)((_ft_).u32_154 & 0x000fffffU ) << (8))) +/* +VCE3 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W0_WOFF (152U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W1_WOFF (153U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_152 = (((_ft_).u32_152 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_153 = (((_ft_).u32_153 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_152 >> (28)) | ((IMG_UINT64)((_ft_).u32_153 & 0x00ffffffU ) << (4))) +/* +VCE3 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_WOFF (152U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_152 = (((_ft_).u32_152 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_152 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE3 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_WOFF (148U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_148 = (((_ft_).u32_148 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_148 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE2 opened page3 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_WOFF (147U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_147 = (((_ft_).u32_147 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_UFSTACK(_ft_) (((_ft_).u32_147 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE2 opened page3 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W0_WOFF (146U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W1_WOFF (147U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_146 = (((_ft_).u32_146 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_147 = (((_ft_).u32_147 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_146 >> (20)) | ((IMG_UINT64)((_ft_).u32_147 & 0x0000ffffU ) << (12))) +/* +VCE2 opened page3 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W0_WOFF (145U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W1_WOFF (146U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_145 = (((_ft_).u32_145 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_146 = (((_ft_).u32_146 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_145 >> (24)) | ((IMG_UINT64)((_ft_).u32_146 & 0x000fffffU ) << (8))) +/* +VCE2 opened page3 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W0_WOFF (144U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W1_WOFF (145U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_144 = (((_ft_).u32_144 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_145 = (((_ft_).u32_145 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_144 >> (28)) | ((IMG_UINT64)((_ft_).u32_145 & 0x00ffffffU ) << (4))) +/* +VCE2 opened page3 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_WOFF (144U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_144 = (((_ft_).u32_144 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_PPAGE0(_ft_) (((_ft_).u32_144 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE2 opened page3 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_WOFF (140U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_140 = (((_ft_).u32_140 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_PAGE(_ft_) (((_ft_).u32_140 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE2 opened page2 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_WOFF (139U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_139 = (((_ft_).u32_139 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_UFSTACK(_ft_) (((_ft_).u32_139 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE2 opened page2 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W0_WOFF (138U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W1_WOFF (139U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_138 = (((_ft_).u32_138 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_139 = (((_ft_).u32_139 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_138 >> (20)) | ((IMG_UINT64)((_ft_).u32_139 & 0x0000ffffU ) << (12))) +/* +VCE2 opened page2 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W0_WOFF (137U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W1_WOFF (138U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_137 = (((_ft_).u32_137 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_138 = (((_ft_).u32_138 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_137 >> (24)) | ((IMG_UINT64)((_ft_).u32_138 & 0x000fffffU ) << (8))) +/* +VCE2 opened page2 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W0_WOFF (136U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W1_WOFF (137U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_136 = (((_ft_).u32_136 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_137 = (((_ft_).u32_137 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_136 >> (28)) | ((IMG_UINT64)((_ft_).u32_137 & 0x00ffffffU ) << (4))) +/* +VCE2 opened page2 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_WOFF (136U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_136 = (((_ft_).u32_136 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_PPAGE0(_ft_) (((_ft_).u32_136 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE2 opened page2 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_WOFF (132U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_132 = (((_ft_).u32_132 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_PAGE(_ft_) (((_ft_).u32_132 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE2 opened page1 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_WOFF (131U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_131 = (((_ft_).u32_131 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_UFSTACK(_ft_) (((_ft_).u32_131 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE2 opened page1 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W0_WOFF (130U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W1_WOFF (131U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_130 = (((_ft_).u32_130 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_131 = (((_ft_).u32_131 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_130 >> (20)) | ((IMG_UINT64)((_ft_).u32_131 & 0x0000ffffU ) << (12))) +/* +VCE2 opened page1 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W0_WOFF (129U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W1_WOFF (130U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_129 = (((_ft_).u32_129 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_130 = (((_ft_).u32_130 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_129 >> (24)) | ((IMG_UINT64)((_ft_).u32_130 & 0x000fffffU ) << (8))) +/* +VCE2 opened page1 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W0_WOFF (128U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W1_WOFF (129U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_128 = (((_ft_).u32_128 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_129 = (((_ft_).u32_129 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_128 >> (28)) | ((IMG_UINT64)((_ft_).u32_129 & 0x00ffffffU ) << (4))) +/* +VCE2 opened page1 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_WOFF (128U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_128 = (((_ft_).u32_128 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_PPAGE0(_ft_) (((_ft_).u32_128 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE2 opened page1 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_WOFF (124U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_124 = (((_ft_).u32_124 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_PAGE(_ft_) (((_ft_).u32_124 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE2 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_WOFF (123U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_123 = (((_ft_).u32_123 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_123 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE2 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W0_WOFF (122U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W1_WOFF (123U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_122 = (((_ft_).u32_122 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_123 = (((_ft_).u32_123 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_122 >> (20)) | ((IMG_UINT64)((_ft_).u32_123 & 0x0000ffffU ) << (12))) +/* +VCE2 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W0_WOFF (121U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W1_WOFF (122U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_121 = (((_ft_).u32_121 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_122 = (((_ft_).u32_122 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_121 >> (24)) | ((IMG_UINT64)((_ft_).u32_122 & 0x000fffffU ) << (8))) +/* +VCE2 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W0_WOFF (120U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W1_WOFF (121U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_120 = (((_ft_).u32_120 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_121 = (((_ft_).u32_121 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_120 >> (28)) | ((IMG_UINT64)((_ft_).u32_121 & 0x00ffffffU ) << (4))) +/* +VCE2 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_WOFF (120U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_120 = (((_ft_).u32_120 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_120 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE2 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_WOFF (116U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_116 = (((_ft_).u32_116 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_116 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE1 opened page3 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_WOFF (115U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_115 = (((_ft_).u32_115 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_UFSTACK(_ft_) (((_ft_).u32_115 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE1 opened page3 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W0_WOFF (114U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W1_WOFF (115U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_114 = (((_ft_).u32_114 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_115 = (((_ft_).u32_115 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_114 >> (20)) | ((IMG_UINT64)((_ft_).u32_115 & 0x0000ffffU ) << (12))) +/* +VCE1 opened page3 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W0_WOFF (113U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W1_WOFF (114U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_113 = (((_ft_).u32_113 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_114 = (((_ft_).u32_114 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_113 >> (24)) | ((IMG_UINT64)((_ft_).u32_114 & 0x000fffffU ) << (8))) +/* +VCE1 opened page3 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W0_WOFF (112U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W1_WOFF (113U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_112 = (((_ft_).u32_112 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_113 = (((_ft_).u32_113 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_112 >> (28)) | ((IMG_UINT64)((_ft_).u32_113 & 0x00ffffffU ) << (4))) +/* +VCE1 opened page3 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_WOFF (112U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_112 = (((_ft_).u32_112 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_PPAGE0(_ft_) (((_ft_).u32_112 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE1 opened page3 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_WOFF (108U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_108 = (((_ft_).u32_108 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_PAGE(_ft_) (((_ft_).u32_108 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE1 opened page2 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_WOFF (107U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_107 = (((_ft_).u32_107 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_UFSTACK(_ft_) (((_ft_).u32_107 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE1 opened page2 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W0_WOFF (106U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W1_WOFF (107U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_106 = (((_ft_).u32_106 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_107 = (((_ft_).u32_107 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_106 >> (20)) | ((IMG_UINT64)((_ft_).u32_107 & 0x0000ffffU ) << (12))) +/* +VCE1 opened page2 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W0_WOFF (105U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W1_WOFF (106U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_105 = (((_ft_).u32_105 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_106 = (((_ft_).u32_106 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_105 >> (24)) | ((IMG_UINT64)((_ft_).u32_106 & 0x000fffffU ) << (8))) +/* +VCE1 opened page2 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W0_WOFF (104U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W1_WOFF (105U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_104 = (((_ft_).u32_104 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_105 = (((_ft_).u32_105 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_104 >> (28)) | ((IMG_UINT64)((_ft_).u32_105 & 0x00ffffffU ) << (4))) +/* +VCE1 opened page2 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_WOFF (104U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_104 = (((_ft_).u32_104 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_PPAGE0(_ft_) (((_ft_).u32_104 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE1 opened page2 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_WOFF (100U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_100 = (((_ft_).u32_100 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_PAGE(_ft_) (((_ft_).u32_100 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE1 opened page1 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_WOFF (99U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_99 = (((_ft_).u32_99 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_UFSTACK(_ft_) (((_ft_).u32_99 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE1 opened page1 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W0_WOFF (98U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W1_WOFF (99U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_98 = (((_ft_).u32_98 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_99 = (((_ft_).u32_99 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_98 >> (20)) | ((IMG_UINT64)((_ft_).u32_99 & 0x0000ffffU ) << (12))) +/* +VCE1 opened page1 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W0_WOFF (97U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W1_WOFF (98U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_97 = (((_ft_).u32_97 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_98 = (((_ft_).u32_98 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_97 >> (24)) | ((IMG_UINT64)((_ft_).u32_98 & 0x000fffffU ) << (8))) +/* +VCE1 opened page1 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W0_WOFF (96U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W1_WOFF (97U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_96 = (((_ft_).u32_96 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_97 = (((_ft_).u32_97 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_96 >> (28)) | ((IMG_UINT64)((_ft_).u32_97 & 0x00ffffffU ) << (4))) +/* +VCE1 opened page1 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_WOFF (96U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_96 = (((_ft_).u32_96 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_PPAGE0(_ft_) (((_ft_).u32_96 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE1 opened page1 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_WOFF (92U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_92 = (((_ft_).u32_92 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_PAGE(_ft_) (((_ft_).u32_92 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE1 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_WOFF (91U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_91 = (((_ft_).u32_91 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_91 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE1 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W0_WOFF (90U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W1_WOFF (91U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_90 = (((_ft_).u32_90 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_91 = (((_ft_).u32_91 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_90 >> (20)) | ((IMG_UINT64)((_ft_).u32_91 & 0x0000ffffU ) << (12))) +/* +VCE1 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W0_WOFF (89U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W1_WOFF (90U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_89 = (((_ft_).u32_89 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_90 = (((_ft_).u32_90 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_89 >> (24)) | ((IMG_UINT64)((_ft_).u32_90 & 0x000fffffU ) << (8))) +/* +VCE1 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W0_WOFF (88U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W1_WOFF (89U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_88 = (((_ft_).u32_88 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_89 = (((_ft_).u32_89 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_88 >> (28)) | ((IMG_UINT64)((_ft_).u32_89 & 0x00ffffffU ) << (4))) +/* +VCE1 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_WOFF (88U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_88 = (((_ft_).u32_88 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_88 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE1 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_WOFF (84U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_84 = (((_ft_).u32_84 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_84 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE0 opened page3 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_WOFF (83U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_83 = (((_ft_).u32_83 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_UFSTACK(_ft_) (((_ft_).u32_83 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE0 opened page3 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W0_WOFF (82U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W1_WOFF (83U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_82 = (((_ft_).u32_82 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_83 = (((_ft_).u32_83 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_82 >> (20)) | ((IMG_UINT64)((_ft_).u32_83 & 0x0000ffffU ) << (12))) +/* +VCE0 opened page3 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W0_WOFF (81U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W1_WOFF (82U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_81 = (((_ft_).u32_81 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_82 = (((_ft_).u32_82 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_81 >> (24)) | ((IMG_UINT64)((_ft_).u32_82 & 0x000fffffU ) << (8))) +/* +VCE0 opened page3 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W0_WOFF (80U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W1_WOFF (81U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_80 = (((_ft_).u32_80 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_81 = (((_ft_).u32_81 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_80 >> (28)) | ((IMG_UINT64)((_ft_).u32_81 & 0x00ffffffU ) << (4))) +/* +VCE0 opened page3 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_WOFF (80U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_80 = (((_ft_).u32_80 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_PPAGE0(_ft_) (((_ft_).u32_80 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE0 opened page3 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_WOFF (76U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_76 = (((_ft_).u32_76 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_PAGE(_ft_) (((_ft_).u32_76 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE0 opened page2 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_WOFF (75U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_75 = (((_ft_).u32_75 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_UFSTACK(_ft_) (((_ft_).u32_75 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE0 opened page2 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W0_WOFF (74U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W1_WOFF (75U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_74 = (((_ft_).u32_74 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_75 = (((_ft_).u32_75 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_74 >> (20)) | ((IMG_UINT64)((_ft_).u32_75 & 0x0000ffffU ) << (12))) +/* +VCE0 opened page2 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W0_WOFF (73U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W1_WOFF (74U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_73 = (((_ft_).u32_73 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_74 = (((_ft_).u32_74 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_73 >> (24)) | ((IMG_UINT64)((_ft_).u32_74 & 0x000fffffU ) << (8))) +/* +VCE0 opened page2 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W0_WOFF (72U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W1_WOFF (73U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_72 = (((_ft_).u32_72 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_73 = (((_ft_).u32_73 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_72 >> (28)) | ((IMG_UINT64)((_ft_).u32_73 & 0x00ffffffU ) << (4))) +/* +VCE0 opened page2 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_WOFF (72U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_72 = (((_ft_).u32_72 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_PPAGE0(_ft_) (((_ft_).u32_72 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE0 opened page2 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_WOFF (68U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_68 = (((_ft_).u32_68 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_PAGE(_ft_) (((_ft_).u32_68 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE0 opened page1 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_WOFF (67U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_67 = (((_ft_).u32_67 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_UFSTACK(_ft_) (((_ft_).u32_67 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE0 opened page1 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W0_WOFF (66U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W1_WOFF (67U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_66 = (((_ft_).u32_66 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_67 = (((_ft_).u32_67 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_66 >> (20)) | ((IMG_UINT64)((_ft_).u32_67 & 0x0000ffffU ) << (12))) +/* +VCE0 opened page1 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W0_WOFF (65U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W1_WOFF (66U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_65 = (((_ft_).u32_65 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_66 = (((_ft_).u32_66 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_65 >> (24)) | ((IMG_UINT64)((_ft_).u32_66 & 0x000fffffU ) << (8))) +/* +VCE0 opened page1 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W0_WOFF (64U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W1_WOFF (65U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_64 = (((_ft_).u32_64 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_65 = (((_ft_).u32_65 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_64 >> (28)) | ((IMG_UINT64)((_ft_).u32_65 & 0x00ffffffU ) << (4))) +/* +VCE0 opened page1 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_WOFF (64U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_64 = (((_ft_).u32_64 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_PPAGE0(_ft_) (((_ft_).u32_64 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE0 opened page1 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_WOFF (60U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_60 = (((_ft_).u32_60 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_PAGE(_ft_) (((_ft_).u32_60 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU) +/* +VCE0 opened page0 struct: unified stack bit +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_WOFF (59U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_59 = (((_ft_).u32_59 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_59 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U) +/* +VCE0 opened page0 struct: physical page 3 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W0_WOFF (58U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W1_WOFF (59U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W0_SHIFT (20U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_58 = (((_ft_).u32_58 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \ + ((_ft_).u32_59 = (((_ft_).u32_59 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_58 >> (20)) | ((IMG_UINT64)((_ft_).u32_59 & 0x0000ffffU ) << (12))) +/* +VCE0 opened page0 struct: physical page 2 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W0_WOFF (57U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W1_WOFF (58U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W0_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_57 = (((_ft_).u32_57 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \ + ((_ft_).u32_58 = (((_ft_).u32_58 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_57 >> (24)) | ((IMG_UINT64)((_ft_).u32_58 & 0x000fffffU ) << (8))) +/* +VCE0 opened page0 struct: physical page 1 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W0_WOFF (56U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W1_WOFF (57U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W0_SHIFT (28U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_56 = (((_ft_).u32_56 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \ + ((_ft_).u32_57 = (((_ft_).u32_57 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); } +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_56 >> (28)) | ((IMG_UINT64)((_ft_).u32_57 & 0x00ffffffU ) << (4))) +/* +VCE0 opened page0 struct: physical page 0 +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_WOFF (56U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_56 = (((_ft_).u32_56 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_56 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU) +/* +VCE0 opened page0 struct: virtual page number +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_WOFF (52U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_52 = (((_ft_).u32_52 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_52 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU) +/* +Rsv2 area +*/ +#define PM_DATA_VHEAP_BUFFER_RESV2_W0_WOFF (42U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W1_WOFF (43U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W2_WOFF (44U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W2_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W3_WOFF (45U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W3_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W4_WOFF (46U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W4_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W5_WOFF (47U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W5_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W6_WOFF (48U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W6_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W7_WOFF (49U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W7_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W8_WOFF (50U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W8_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W9_WOFF (51U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W9_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W0_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W1_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W2_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W3_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W4_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W5_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W6_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W7_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W8_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV2_W9_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W0(_ft_,_x_) ((_ft_).u32_42 = (((_ft_).u32_42 & PM_DATA_VHEAP_BUFFER_RESV2_W0_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W0_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W0(_ft_) (((_ft_).u32_42 >> (PM_DATA_VHEAP_BUFFER_RESV2_W0_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W1(_ft_,_x_) ((_ft_).u32_43 = (((_ft_).u32_43 & PM_DATA_VHEAP_BUFFER_RESV2_W1_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W1_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W1(_ft_) (((_ft_).u32_43 >> (PM_DATA_VHEAP_BUFFER_RESV2_W1_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W2(_ft_,_x_) ((_ft_).u32_44 = (((_ft_).u32_44 & PM_DATA_VHEAP_BUFFER_RESV2_W2_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W2_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W2(_ft_) (((_ft_).u32_44 >> (PM_DATA_VHEAP_BUFFER_RESV2_W2_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W3(_ft_,_x_) ((_ft_).u32_45 = (((_ft_).u32_45 & PM_DATA_VHEAP_BUFFER_RESV2_W3_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W3_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W3(_ft_) (((_ft_).u32_45 >> (PM_DATA_VHEAP_BUFFER_RESV2_W3_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W4(_ft_,_x_) ((_ft_).u32_46 = (((_ft_).u32_46 & PM_DATA_VHEAP_BUFFER_RESV2_W4_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W4_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W4(_ft_) (((_ft_).u32_46 >> (PM_DATA_VHEAP_BUFFER_RESV2_W4_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W5(_ft_,_x_) ((_ft_).u32_47 = (((_ft_).u32_47 & PM_DATA_VHEAP_BUFFER_RESV2_W5_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W5_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W5(_ft_) (((_ft_).u32_47 >> (PM_DATA_VHEAP_BUFFER_RESV2_W5_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W6(_ft_,_x_) ((_ft_).u32_48 = (((_ft_).u32_48 & PM_DATA_VHEAP_BUFFER_RESV2_W6_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W6_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W6(_ft_) (((_ft_).u32_48 >> (PM_DATA_VHEAP_BUFFER_RESV2_W6_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W7(_ft_,_x_) ((_ft_).u32_49 = (((_ft_).u32_49 & PM_DATA_VHEAP_BUFFER_RESV2_W7_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W7_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W7(_ft_) (((_ft_).u32_49 >> (PM_DATA_VHEAP_BUFFER_RESV2_W7_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W8(_ft_,_x_) ((_ft_).u32_50 = (((_ft_).u32_50 & PM_DATA_VHEAP_BUFFER_RESV2_W8_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W8_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W8(_ft_) (((_ft_).u32_50 >> (PM_DATA_VHEAP_BUFFER_RESV2_W8_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W9(_ft_,_x_) ((_ft_).u32_51 = (((_ft_).u32_51 & PM_DATA_VHEAP_BUFFER_RESV2_W9_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W9_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W9(_ft_) (((_ft_).u32_51 >> (PM_DATA_VHEAP_BUFFER_RESV2_W9_SHIFT)) & 0xffffffffU) +/* +Number of pages allocated to TE7 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_WOFF (41U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_41 = (((_ft_).u32_41 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT_CNT(_ft_) (((_ft_).u32_41 >> (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to TE6 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_WOFF (41U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_CLRMSK (0xFF00FFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_41 = (((_ft_).u32_41 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT_CNT(_ft_) (((_ft_).u32_41 >> (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to TE5 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_WOFF (41U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_SHIFT (8U) +#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_CLRMSK (0xFFFF00FFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_41 = (((_ft_).u32_41 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT_CNT(_ft_) (((_ft_).u32_41 >> (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to TE4 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_WOFF (41U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_CLRMSK (0xFFFFFF00U) +#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_41 = (((_ft_).u32_41 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT_CNT(_ft_) (((_ft_).u32_41 >> (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to TE3 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_WOFF (40U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT_CNT(_ft_) (((_ft_).u32_40 >> (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to TE2 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_WOFF (40U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_CLRMSK (0xFF00FFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT_CNT(_ft_) (((_ft_).u32_40 >> (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to TE1 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_WOFF (40U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_SHIFT (8U) +#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_CLRMSK (0xFFFF00FFU) +#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT_CNT(_ft_) (((_ft_).u32_40 >> (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to TE0 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_WOFF (40U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_CLRMSK (0xFFFFFF00U) +#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT_CNT(_ft_) (((_ft_).u32_40 >> (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to VCE7 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_WOFF (39U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_39 = (((_ft_).u32_39 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT_CNT(_ft_) (((_ft_).u32_39 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to VCE6 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_WOFF (39U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_CLRMSK (0xFF00FFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_39 = (((_ft_).u32_39 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT_CNT(_ft_) (((_ft_).u32_39 >> (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to VCE5 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_WOFF (39U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_SHIFT (8U) +#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_CLRMSK (0xFFFF00FFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_39 = (((_ft_).u32_39 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT_CNT(_ft_) (((_ft_).u32_39 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to VCE4 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_WOFF (39U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_CLRMSK (0xFFFFFF00U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_39 = (((_ft_).u32_39 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT_CNT(_ft_) (((_ft_).u32_39 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to VCE3 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_WOFF (38U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_SHIFT (24U) +#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_CLRMSK (0x00FFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_38 = (((_ft_).u32_38 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT_CNT(_ft_) (((_ft_).u32_38 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to VCE2 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_WOFF (38U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_SHIFT (16U) +#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_CLRMSK (0xFF00FFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_38 = (((_ft_).u32_38 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT_CNT(_ft_) (((_ft_).u32_38 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to VCE1 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_WOFF (38U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_SHIFT (8U) +#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_CLRMSK (0xFFFF00FFU) +#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_38 = (((_ft_).u32_38 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT_CNT(_ft_) (((_ft_).u32_38 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +Number of pages allocated to VCE0 but not yet closed +*/ +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_WOFF (38U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_CLRMSK (0xFFFFFF00U) +#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_38 = (((_ft_).u32_38 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT_CNT(_ft_) (((_ft_).u32_38 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_SHIFT)) & 0x000000ffU) +/* +1=The PM ran out of memory during processing +*/ +#define PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_WOFF (37U) +#define PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_PM_OUTOFMEM_R(_ft_,_x_) ((_ft_).u32_37 = (((_ft_).u32_37 & PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_PM_OUTOFMEM_R(_ft_) (((_ft_).u32_37 >> (PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_SHIFT)) & 0x00000001U) +/* +A copy of rgx_cr_pm_outofmem_abortall (at the point the VHEAP buffer was written) +*/ +#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_WOFF (37U) +#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_SHIFT (30U) +#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_CLRMSK (0xBFFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_OUTOFMEM_ABORT(_ft_,_x_) ((_ft_).u32_37 = (((_ft_).u32_37 & PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_OUTOFMEM_ABORT(_ft_) (((_ft_).u32_37 >> (PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_SHIFT)) & 0x00000001U) +/* +When running out of memory, indicates which of the free stacks have run out of memory. +If bit 2 is set, MMUSTACK has run out of memory. +Bit 2 is reserved. +If bit 1 is set, UFSTACK has run out of memory. +If bit 0 is set, FSTACK has run out of memory. +*/ +#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_WOFF (37U) +#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_SHIFT (2U) +#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_CLRMSK (0xFFFFFFE3U) +#define PM_DATA_VHEAP_BUFFER_SET_OUTOFMEM_SRC(_ft_,_x_) ((_ft_).u32_37 = (((_ft_).u32_37 & PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_CLRMSK ) | (((_x_) & (0x00000007U)) << (PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_OUTOFMEM_SRC(_ft_) (((_ft_).u32_37 >> (PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_SHIFT)) & 0x00000007U) +/* +When running out of memory, indicates the source of the request that caused the OOM event +If bit 1 is set, TE caused the OOM. +If bit 0 is set, VCE caused the OOM. +*/ +#define PM_DATA_VHEAP_BUFFER_REQ_SRC_WOFF (37U) +#define PM_DATA_VHEAP_BUFFER_REQ_SRC_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_REQ_SRC_CLRMSK (0xFFFFFFFCU) +#define PM_DATA_VHEAP_BUFFER_SET_REQ_SRC(_ft_,_x_) ((_ft_).u32_37 = (((_ft_).u32_37 & PM_DATA_VHEAP_BUFFER_REQ_SRC_CLRMSK ) | (((_x_) & (0x00000003U)) << (PM_DATA_VHEAP_BUFFER_REQ_SRC_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_REQ_SRC(_ft_) (((_ft_).u32_37 >> (PM_DATA_VHEAP_BUFFER_REQ_SRC_SHIFT)) & 0x00000003U) +/* +MAX RTA index dword in TA stream +*/ +#define PM_DATA_VHEAP_BUFFER_MAX_RTA_WOFF (36U) +#define PM_DATA_VHEAP_BUFFER_MAX_RTA_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_MAX_RTA_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_SET_MAX_RTA(_ft_,_x_) ((_ft_).u32_36 = (((_ft_).u32_36 & PM_DATA_VHEAP_BUFFER_MAX_RTA_CLRMSK ) | (((_x_) & (0xffffffffU)) << (PM_DATA_VHEAP_BUFFER_MAX_RTA_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_MAX_RTA(_ft_) (((_ft_).u32_36 >> (PM_DATA_VHEAP_BUFFER_MAX_RTA_SHIFT)) & 0xffffffffU) +/* +Rsv1 area +*/ +#define PM_DATA_VHEAP_BUFFER_RESV1_W0_WOFF (20U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W1_WOFF (21U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W2_WOFF (22U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W2_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W3_WOFF (23U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W3_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W4_WOFF (24U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W4_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W5_WOFF (25U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W5_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W6_WOFF (26U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W6_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W7_WOFF (27U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W7_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W8_WOFF (28U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W8_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W9_WOFF (29U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W9_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W10_WOFF (30U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W10_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W11_WOFF (31U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W11_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W12_WOFF (32U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W12_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W13_WOFF (33U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W13_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W14_WOFF (34U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W14_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W15_WOFF (35U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W15_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W0_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W1_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W2_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W3_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W4_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W5_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W6_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W7_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W8_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W9_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W10_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W11_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W12_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W13_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W14_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_RESV1_W15_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W0(_ft_,_x_) ((_ft_).u32_20 = (((_ft_).u32_20 & PM_DATA_VHEAP_BUFFER_RESV1_W0_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W0_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W0(_ft_) (((_ft_).u32_20 >> (PM_DATA_VHEAP_BUFFER_RESV1_W0_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W1(_ft_,_x_) ((_ft_).u32_21 = (((_ft_).u32_21 & PM_DATA_VHEAP_BUFFER_RESV1_W1_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W1_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W1(_ft_) (((_ft_).u32_21 >> (PM_DATA_VHEAP_BUFFER_RESV1_W1_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W2(_ft_,_x_) ((_ft_).u32_22 = (((_ft_).u32_22 & PM_DATA_VHEAP_BUFFER_RESV1_W2_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W2_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W2(_ft_) (((_ft_).u32_22 >> (PM_DATA_VHEAP_BUFFER_RESV1_W2_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W3(_ft_,_x_) ((_ft_).u32_23 = (((_ft_).u32_23 & PM_DATA_VHEAP_BUFFER_RESV1_W3_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W3_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W3(_ft_) (((_ft_).u32_23 >> (PM_DATA_VHEAP_BUFFER_RESV1_W3_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W4(_ft_,_x_) ((_ft_).u32_24 = (((_ft_).u32_24 & PM_DATA_VHEAP_BUFFER_RESV1_W4_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W4_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W4(_ft_) (((_ft_).u32_24 >> (PM_DATA_VHEAP_BUFFER_RESV1_W4_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W5(_ft_,_x_) ((_ft_).u32_25 = (((_ft_).u32_25 & PM_DATA_VHEAP_BUFFER_RESV1_W5_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W5_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W5(_ft_) (((_ft_).u32_25 >> (PM_DATA_VHEAP_BUFFER_RESV1_W5_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W6(_ft_,_x_) ((_ft_).u32_26 = (((_ft_).u32_26 & PM_DATA_VHEAP_BUFFER_RESV1_W6_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W6_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W6(_ft_) (((_ft_).u32_26 >> (PM_DATA_VHEAP_BUFFER_RESV1_W6_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W7(_ft_,_x_) ((_ft_).u32_27 = (((_ft_).u32_27 & PM_DATA_VHEAP_BUFFER_RESV1_W7_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W7_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W7(_ft_) (((_ft_).u32_27 >> (PM_DATA_VHEAP_BUFFER_RESV1_W7_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W8(_ft_,_x_) ((_ft_).u32_28 = (((_ft_).u32_28 & PM_DATA_VHEAP_BUFFER_RESV1_W8_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W8_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W8(_ft_) (((_ft_).u32_28 >> (PM_DATA_VHEAP_BUFFER_RESV1_W8_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W9(_ft_,_x_) ((_ft_).u32_29 = (((_ft_).u32_29 & PM_DATA_VHEAP_BUFFER_RESV1_W9_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W9_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W9(_ft_) (((_ft_).u32_29 >> (PM_DATA_VHEAP_BUFFER_RESV1_W9_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W10(_ft_,_x_) ((_ft_).u32_30 = (((_ft_).u32_30 & PM_DATA_VHEAP_BUFFER_RESV1_W10_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W10_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W10(_ft_) (((_ft_).u32_30 >> (PM_DATA_VHEAP_BUFFER_RESV1_W10_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W11(_ft_,_x_) ((_ft_).u32_31 = (((_ft_).u32_31 & PM_DATA_VHEAP_BUFFER_RESV1_W11_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W11_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W11(_ft_) (((_ft_).u32_31 >> (PM_DATA_VHEAP_BUFFER_RESV1_W11_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W12(_ft_,_x_) ((_ft_).u32_32 = (((_ft_).u32_32 & PM_DATA_VHEAP_BUFFER_RESV1_W12_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W12_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W12(_ft_) (((_ft_).u32_32 >> (PM_DATA_VHEAP_BUFFER_RESV1_W12_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W13(_ft_,_x_) ((_ft_).u32_33 = (((_ft_).u32_33 & PM_DATA_VHEAP_BUFFER_RESV1_W13_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W13_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W13(_ft_) (((_ft_).u32_33 >> (PM_DATA_VHEAP_BUFFER_RESV1_W13_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W14(_ft_,_x_) ((_ft_).u32_34 = (((_ft_).u32_34 & PM_DATA_VHEAP_BUFFER_RESV1_W14_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W14_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W14(_ft_) (((_ft_).u32_34 >> (PM_DATA_VHEAP_BUFFER_RESV1_W14_SHIFT)) & 0xffffffffU) +#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W15(_ft_,_x_) ((_ft_).u32_35 = (((_ft_).u32_35 & PM_DATA_VHEAP_BUFFER_RESV1_W15_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W15_SHIFT))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W15(_ft_) (((_ft_).u32_35 >> (PM_DATA_VHEAP_BUFFER_RESV1_W15_SHIFT)) & 0xffffffffU) +/* +Rsv0 area +*/ +#define PM_DATA_VHEAP_BUFFER_RESV0_WOFF (19U) +#define PM_DATA_VHEAP_BUFFER_RESV0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_RESV0_CLRMSK (0x00000000U) +#define PM_DATA_VHEAP_BUFFER_SET_RESV0(_ft_,_x_) ((_ft_).u32_19 = (((_ft_).u32_19 & PM_DATA_VHEAP_BUFFER_RESV0_CLRMSK ) | (((_x_) & (0xffffffffU)) << (PM_DATA_VHEAP_BUFFER_RESV0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_RESV0(_ft_) (((_ft_).u32_19 >> (PM_DATA_VHEAP_BUFFER_RESV0_SHIFT)) & 0xffffffffU) +/* +Init Bit Sent Flag for TE7 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_WOFF (18U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE7_INIT(_ft_,_x_) ((_ft_).u32_18 = (((_ft_).u32_18 & PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE7_INIT(_ft_) (((_ft_).u32_18 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE7 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_WOFF (18U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE7(_ft_,_x_) ((_ft_).u32_18 = (((_ft_).u32_18 & PM_DATA_VHEAP_BUFFER_VPTR_TE7_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE7_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE7(_ft_) (((_ft_).u32_18 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE7_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for VCE7 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_WOFF (17U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE7_INIT(_ft_,_x_) ((_ft_).u32_17 = (((_ft_).u32_17 & PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE7_INIT(_ft_) (((_ft_).u32_17 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE7 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_WOFF (17U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE7(_ft_,_x_) ((_ft_).u32_17 = (((_ft_).u32_17 & PM_DATA_VHEAP_BUFFER_VPTR_VCE7_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE7_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE7(_ft_) (((_ft_).u32_17 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE7_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for TE6 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_WOFF (16U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE6_INIT(_ft_,_x_) ((_ft_).u32_16 = (((_ft_).u32_16 & PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE6_INIT(_ft_) (((_ft_).u32_16 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE6 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_WOFF (16U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE6(_ft_,_x_) ((_ft_).u32_16 = (((_ft_).u32_16 & PM_DATA_VHEAP_BUFFER_VPTR_TE6_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE6_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE6(_ft_) (((_ft_).u32_16 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE6_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for VCE6 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_WOFF (15U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE6_INIT(_ft_,_x_) ((_ft_).u32_15 = (((_ft_).u32_15 & PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE6_INIT(_ft_) (((_ft_).u32_15 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE6 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_WOFF (15U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE6(_ft_,_x_) ((_ft_).u32_15 = (((_ft_).u32_15 & PM_DATA_VHEAP_BUFFER_VPTR_VCE6_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE6_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE6(_ft_) (((_ft_).u32_15 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE6_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for TE5 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_WOFF (14U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE5_INIT(_ft_,_x_) ((_ft_).u32_14 = (((_ft_).u32_14 & PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE5_INIT(_ft_) (((_ft_).u32_14 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE5 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_WOFF (14U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE5(_ft_,_x_) ((_ft_).u32_14 = (((_ft_).u32_14 & PM_DATA_VHEAP_BUFFER_VPTR_TE5_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE5_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE5(_ft_) (((_ft_).u32_14 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE5_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for VCE5 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_WOFF (13U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE5_INIT(_ft_,_x_) ((_ft_).u32_13 = (((_ft_).u32_13 & PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE5_INIT(_ft_) (((_ft_).u32_13 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE5 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_WOFF (13U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE5(_ft_,_x_) ((_ft_).u32_13 = (((_ft_).u32_13 & PM_DATA_VHEAP_BUFFER_VPTR_VCE5_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE5_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE5(_ft_) (((_ft_).u32_13 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE5_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for TE4 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_WOFF (12U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE4_INIT(_ft_,_x_) ((_ft_).u32_12 = (((_ft_).u32_12 & PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE4_INIT(_ft_) (((_ft_).u32_12 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE4 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_WOFF (12U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE4(_ft_,_x_) ((_ft_).u32_12 = (((_ft_).u32_12 & PM_DATA_VHEAP_BUFFER_VPTR_TE4_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE4_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE4(_ft_) (((_ft_).u32_12 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE4_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for VCE4 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_WOFF (11U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE4_INIT(_ft_,_x_) ((_ft_).u32_11 = (((_ft_).u32_11 & PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE4_INIT(_ft_) (((_ft_).u32_11 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE4 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_WOFF (11U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE4(_ft_,_x_) ((_ft_).u32_11 = (((_ft_).u32_11 & PM_DATA_VHEAP_BUFFER_VPTR_VCE4_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE4_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE4(_ft_) (((_ft_).u32_11 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE4_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for TE3 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_WOFF (10U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE3_INIT(_ft_,_x_) ((_ft_).u32_10 = (((_ft_).u32_10 & PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE3_INIT(_ft_) (((_ft_).u32_10 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE3 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_WOFF (10U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE3(_ft_,_x_) ((_ft_).u32_10 = (((_ft_).u32_10 & PM_DATA_VHEAP_BUFFER_VPTR_TE3_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE3_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE3(_ft_) (((_ft_).u32_10 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE3_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for VCE3 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_WOFF (9U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE3_INIT(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE3_INIT(_ft_) (((_ft_).u32_9 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE3 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_WOFF (9U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE3(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & PM_DATA_VHEAP_BUFFER_VPTR_VCE3_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE3_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE3(_ft_) (((_ft_).u32_9 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE3_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for TE2 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_WOFF (8U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE2_INIT(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE2_INIT(_ft_) (((_ft_).u32_8 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE2 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_WOFF (8U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE2(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & PM_DATA_VHEAP_BUFFER_VPTR_TE2_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE2_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE2(_ft_) (((_ft_).u32_8 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE2_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for VCE2 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_WOFF (7U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE2_INIT(_ft_,_x_) ((_ft_).u32_7 = (((_ft_).u32_7 & PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE2_INIT(_ft_) (((_ft_).u32_7 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE2 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_WOFF (7U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE2(_ft_,_x_) ((_ft_).u32_7 = (((_ft_).u32_7 & PM_DATA_VHEAP_BUFFER_VPTR_VCE2_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE2_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE2(_ft_) (((_ft_).u32_7 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE2_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for TE1 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_WOFF (6U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE1_INIT(_ft_,_x_) ((_ft_).u32_6 = (((_ft_).u32_6 & PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE1_INIT(_ft_) (((_ft_).u32_6 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE1 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_WOFF (6U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE1(_ft_,_x_) ((_ft_).u32_6 = (((_ft_).u32_6 & PM_DATA_VHEAP_BUFFER_VPTR_TE1_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE1_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE1(_ft_) (((_ft_).u32_6 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE1_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for VCE1 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_WOFF (5U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE1_INIT(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE1_INIT(_ft_) (((_ft_).u32_5 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE1 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_WOFF (5U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE1(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & PM_DATA_VHEAP_BUFFER_VPTR_VCE1_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE1_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE1(_ft_) (((_ft_).u32_5 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE1_SHIFT)) & 0x000fffffU) +/* +4KB aligned top pointer for MMU +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_MMU_WOFF (4U) +#define PM_DATA_VHEAP_BUFFER_VPTR_MMU_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_MMU_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_MMU(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & PM_DATA_VHEAP_BUFFER_VPTR_MMU_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_MMU_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_MMU(_ft_) (((_ft_).u32_4 >> (PM_DATA_VHEAP_BUFFER_VPTR_MMU_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for ALIST +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_WOFF (3U) +#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_ALIST_INIT(_ft_,_x_) ((_ft_).u32_3 = (((_ft_).u32_3 & PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_ALIST_INIT(_ft_) (((_ft_).u32_3 >> (PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for ALIST +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_WOFF (3U) +#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_ALIST(_ft_,_x_) ((_ft_).u32_3 = (((_ft_).u32_3 & PM_DATA_VHEAP_BUFFER_VPTR_ALIST_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_ALIST_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_ALIST(_ft_) (((_ft_).u32_3 >> (PM_DATA_VHEAP_BUFFER_VPTR_ALIST_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for TE0 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_WOFF (1U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE0_INIT(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE0_INIT(_ft_) (((_ft_).u32_1 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE0 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_WOFF (1U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE0(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_VHEAP_BUFFER_VPTR_TE0_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE0(_ft_) (((_ft_).u32_1 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE0_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for VCE0 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_WOFF (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_SHIFT (31U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_CLRMSK (0x7FFFFFFFU) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE0_INIT(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE0_INIT(_ft_) (((_ft_).u32_0 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE0 +*/ +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_WOFF (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_SHIFT (0U) +#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_CLRMSK (0xFFF00000U) +#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE0(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_VHEAP_BUFFER_VPTR_VCE0_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE0_SHIFT)))) +#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE0(_ft_) (((_ft_).u32_0 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE0_SHIFT)) & 0x000fffffU) + + +#if defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +The PM FreeListState Buffer Layout - this will apply to 3 resources - FSTACK, UFSTACK and MMUSTACK +*/ +typedef struct RGX_PM_FREELISTSTATE_BUFFER_TAG { + IMG_UINT32 u32_0; + IMG_UINT32 u32_1; + IMG_UINT32 u32_2; + IMG_UINT32 u32_3; + IMG_UINT32 u32_4; + IMG_UINT32 u32_5; + IMG_UINT32 u32_6; + IMG_UINT32 u32_7; +} RGX_PM_FREELISTSTATE_BUFFER; + +/* +Reserved field word 2 +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_WOFF (7U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_2(_ft_,_x_) ((_ft_).u32_7 = (((_ft_).u32_7 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_2(_ft_) (((_ft_).u32_7 >> (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT)) & 0xffffffffU) +/* +Reserved field word 1 +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_WOFF (6U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_1(_ft_,_x_) ((_ft_).u32_6 = (((_ft_).u32_6 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_1(_ft_) (((_ft_).u32_6 >> (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT)) & 0xffffffffU) +/* +Reserved field word 0 +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_WOFF (5U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_0(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_0(_ft_) (((_ft_).u32_5 >> (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT)) & 0xffffffffU) +/* +The number of pages consumed for the MMU Page Table. Must be initialised to zero. +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_WOFF (4U) +#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_MMUPAGE_STATUS(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_MMUPAGE_STATUS(_ft_) (((_ft_).u32_4 >> (RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT)) & 0xffffffffU) +/* +The total number of pages consumed from the free stack. Must be initialised to zero. This field is unused in the MMUSTACK. +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_WOFF (3U) +#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_PAGE_STATUS(_ft_,_x_) ((_ft_).u32_3 = (((_ft_).u32_3 & RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_PAGE_STATUS(_ft_) (((_ft_).u32_3 >> (RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT)) & 0xffffffffU) +/* +Stack pointer for the free stack - the location of the next free page relative to the BaseAddr, in number of DWORDs. Must be initialised to zero. +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_WOFF (2U) +#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_STACK_PTR(_ft_,_x_) ((_ft_).u32_2 = (((_ft_).u32_2 & RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_STACK_PTR(_ft_) (((_ft_).u32_2 >> (RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT)) & 0xffffffffU) +/* +Base address of the free stack - points to the bottom of the stack. +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_WOFF (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_SHIFT (5U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_WOFF (1U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_SHIFT (5U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_CLRMSK (0x0000001FU) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000007ffffff))) << 5))); \ + ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x07fffffff8000000))) >> 27))); } +#define RGX_PM_FREELISTSTATE_BUFFER_GET_BASE_ADDR(_ft_) (((_ft_).u32_0 >> (5)) | ((IMG_UINT64)((_ft_).u32_1 & 0xffffffffU ) << (27))) +#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */ + + +#if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +The PM FreeListState Buffer Layout - this will apply to 3 resources - FSTACK, UFSTACK and MMUSTACK +*/ +typedef struct RGX_PM_FREELISTSTATE_BUFFER_TAG { + IMG_UINT32 u32_0; + IMG_UINT32 u32_1; + IMG_UINT32 u32_2; + IMG_UINT32 u32_3; + IMG_UINT32 u32_4; + IMG_UINT32 u32_5; + IMG_UINT32 u32_6; + IMG_UINT32 u32_7; +} RGX_PM_FREELISTSTATE_BUFFER; + +/* +Reserved field word 2 +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_WOFF (7U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_2(_ft_,_x_) ((_ft_).u32_7 = (((_ft_).u32_7 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_2(_ft_) (((_ft_).u32_7 >> (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT)) & 0xffffffffU) +/* +Reserved field word 1 +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_WOFF (6U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_1(_ft_,_x_) ((_ft_).u32_6 = (((_ft_).u32_6 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_1(_ft_) (((_ft_).u32_6 >> (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT)) & 0xffffffffU) +/* +Reserved field word 0 +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_WOFF (5U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_0(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_0(_ft_) (((_ft_).u32_5 >> (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT)) & 0xffffffffU) +/* +The number of pages consumed for the MMU Page Table. Must be initialised to zero. +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_WOFF (4U) +#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_MMUPAGE_STATUS(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_MMUPAGE_STATUS(_ft_) (((_ft_).u32_4 >> (RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT)) & 0xffffffffU) +/* +The total number of pages consumed from the free stack. Must be initialised to zero. This field is unused in the MMUSTACK. +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_WOFF (3U) +#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_PAGE_STATUS(_ft_,_x_) ((_ft_).u32_3 = (((_ft_).u32_3 & RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_PAGE_STATUS(_ft_) (((_ft_).u32_3 >> (RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT)) & 0xffffffffU) +/* +Stack pointer for the free stack - the location of the next free page relative to the BaseAddr, in number of DWORDs. Must be initialised to zero. +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_WOFF (2U) +#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_STACK_PTR(_ft_,_x_) ((_ft_).u32_2 = (((_ft_).u32_2 & RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT)))) +#define RGX_PM_FREELISTSTATE_BUFFER_GET_STACK_PTR(_ft_) (((_ft_).u32_2 >> (RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT)) & 0xffffffffU) +/* +Base address of the free stack - points to the bottom of the stack. +*/ +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_WOFF (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_WOFF (1U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_SHIFT (0U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_FREELISTSTATE_BUFFER_SET_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ + ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); } +#define RGX_PM_FREELISTSTATE_BUFFER_GET_BASE_ADDR(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0xffffffffU ) << (32))) +#endif /* !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */ + + +#if defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +256-bit granular, lower bits ignored. +Maximum addressable range supported by hardware is 1 TB. +*/ +#define RGX_PM_FREELISTSTATE_BASE_ADDR_ALIGNSHIFT (5U) +#define RGX_PM_FREELISTSTATE_BASE_ADDR_ALIGNSIZE (32U) +#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR (0U) +#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR_LOWER (0U) +#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR_UPPER (68719476735ULL) +#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */ + + +#if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +128-bit aligned. +Maximum addressable range supported by hardware is 1 TB. +The 40-bit, 16-byte-aligned address is packed into bits 35:0 of the two DWORDs. +*/ +#define RGX_PM_FREELISTSTATE_BASE_ADDR_ALIGNSHIFT (4U) +#define RGX_PM_FREELISTSTATE_BASE_ADDR_ALIGNSIZE (16U) +#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR (0U) +#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR_LOWER (0U) +#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR_UPPER (68719476735ULL) +#endif /* !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */ + + +/* +Maximum range supported by hardware is 23 bits. +*/ +#define RGX_PM_FREELISTSTATE_STACK_PTR_STACK_PTR (0U) +#define RGX_PM_FREELISTSTATE_STACK_PTR_STACK_PTR_LOWER (0U) +#define RGX_PM_FREELISTSTATE_STACK_PTR_STACK_PTR_UPPER (16777215U) + + +/* +Maximum range supported by hardware is 23 bits. +*/ +#define RGX_PM_FREELISTSTATE_PAGE_STATUS_PAGE_STATUS (0U) +#define RGX_PM_FREELISTSTATE_PAGE_STATUS_PAGE_STATUS_LOWER (0U) +#define RGX_PM_FREELISTSTATE_PAGE_STATUS_PAGE_STATUS_UPPER (16777215U) + + +/* +Maximum range supported by hardware is 23 bits. +*/ +#define RGX_PM_FREELISTSTATE_MMUPAGE_STATUS_MMUPAGE_STATUS (0U) +#define RGX_PM_FREELISTSTATE_MMUPAGE_STATUS_MMUPAGE_STATUS_LOWER (0U) +#define RGX_PM_FREELISTSTATE_MMUPAGE_STATUS_MMUPAGE_STATUS_UPPER (16777215U) + + +#if defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE)&&defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)&&!defined(RGX_FEATURE_SINGLE_TE_VSPACE) +/* +The PM Render Context Buffer Layout +*/ +typedef struct RGX_PM_RENDERSTATE_BUFFER_TAG { + IMG_UINT32 u32_0; + IMG_UINT32 u32_1; + IMG_UINT32 u32_2; + IMG_UINT32 u32_3; + IMG_UINT32 u32_4; + IMG_UINT32 u32_5; + IMG_UINT32 u32_6; + IMG_UINT32 u32_7; + IMG_UINT32 u32_8; + IMG_UINT32 u32_9; + IMG_UINT32 u32_10; + IMG_UINT32 u32_11; + IMG_UINT32 u32_12; + IMG_UINT32 u32_13; + IMG_UINT32 u32_14; + IMG_UINT32 u32_15; + IMG_UINT32 u32_16; + IMG_UINT32 u32_17; + IMG_UINT32 u32_18; + IMG_UINT32 u32_19; + IMG_UINT32 u32_20; + IMG_UINT32 u32_21; + IMG_UINT32 u32_22; + IMG_UINT32 u32_23; + IMG_UINT32 u32_24; + IMG_UINT32 u32_25; + IMG_UINT32 u32_26; + IMG_UINT32 u32_27; + IMG_UINT32 u32_28; + IMG_UINT32 u32_29; + IMG_UINT32 u32_30; + IMG_UINT32 u32_31; + IMG_UINT32 u32_32; + IMG_UINT32 u32_33; + IMG_UINT32 u32_34; + IMG_UINT32 u32_35; + IMG_UINT32 u32_36; + IMG_UINT32 u32_37; + IMG_UINT32 u32_38; + IMG_UINT32 u32_39; + IMG_UINT32 u32_40; + IMG_UINT32 u32_41; + IMG_UINT32 u32_42; + IMG_UINT32 u32_43; + IMG_UINT32 u32_44; + IMG_UINT32 u32_45; +} RGX_PM_RENDERSTATE_BUFFER; + +/* +The base address of the Virtual-Physical Page Translation Table. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_WOFF (10U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_SHIFT (4U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_WOFF (11U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_SHIFT (4U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_CLRMSK (0x0000000FU) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VFP_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_10 = (((_ft_).u32_10 & RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffffff))) << 4))); \ + ((_ft_).u32_11 = (((_ft_).u32_11 & RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0ffffffff0000000))) >> 28))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_VFP_BASE_ADDR(_ft_) (((_ft_).u32_10 >> (4)) | ((IMG_UINT64)((_ft_).u32_11 & 0xffffffffU ) << (28))) +/* +A 16-bit macrotile mask indicating which macrotiles have been freed by the ISP. +A '1' in a bit position indicates that the ISP has signalled that the corresponding macrotile can be freed. +Only the least-significant 16 bits are valid. +Only used in the 3D phase. +Must be initialised to zero. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_WOFF (9U) +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MTILEFREE_STATUS(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MTILEFREE_STATUS(_ft_) (((_ft_).u32_9 >> (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)) & 0xffffffffU) +/* +A 16-bit macrotile mask indicating which macrotiles have been freed by the PM. +A '1' in a bit position indicates that the corresponding macrotile has been freed, and its pages released back to the appropriate free stack. +Only the least-significant 16 bits are valid. +Only used in the 3D phase. +Must be initialised to zero. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_WOFF (8U) +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_DEALLOC_MASK_STATUS(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_DEALLOC_MASK_STATUS(_ft_) (((_ft_).u32_8 >> (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)) & 0xffffffffU) +/* +The base address of the VHEAP buffer. +Must be initialised to point to the location of the VHEAP buffer in memory. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_WOFF (6U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_SHIFT (4U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_WOFF (7U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_SHIFT (4U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_CLRMSK (0x0000000FU) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VHEAP_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_6 = (((_ft_).u32_6 & RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffffff))) << 4))); \ + ((_ft_).u32_7 = (((_ft_).u32_7 & RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0ffffffff0000000))) >> 28))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_VHEAP_BASE_ADDR(_ft_) (((_ft_).u32_6 >> (4)) | ((IMG_UINT64)((_ft_).u32_7 & 0xffffffffU ) << (28))) +/* +Reserved bits, un-used. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_WOFF (5U) +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_RSV_STUFF32(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_RSV_STUFF32(_ft_) (((_ft_).u32_5 >> (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)) & 0xffffffffU) +/* +The number of entries on the MLIST. Must be initialised to zero, meaning no pages allocated. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_WOFF (4U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_TAIL(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_TAIL(_ft_) (((_ft_).u32_4 >> (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)) & 0xffffffffU) +/* +The base address of the MLIST. +Must be initialised to point to a block of memory where the PM can write the MLIST. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_WOFF (2U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_SHIFT (5U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_WOFF (3U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_SHIFT (5U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK (0x0000001FU) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_2 = (((_ft_).u32_2 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000007ffffff))) << 5))); \ + ((_ft_).u32_3 = (((_ft_).u32_3 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x07fffffff8000000))) >> 27))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_BASE_ADDR(_ft_) (((_ft_).u32_2 >> (5)) | ((IMG_UINT64)((_ft_).u32_3 & 0xffffffffU ) << (27))) +/* +The number of entries on the ALIST. Must be initialised to zero, meaning no pages allocated. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_WOFF (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_WOFF (1U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_TAIL(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ + ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_TAIL(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0xffffffffU ) << (32))) +#endif /* RGX_FEATURE_PM_REGISTER_CONFIG_MODE&&PM_BYTE_ALIGNED_BASE_ADDRESSES&&!SINGLE_TE_VSPACE */ + + +#if defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE)&&!defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)&&!defined(RGX_FEATURE_SINGLE_TE_VSPACE) +/* +The PM Render Context Buffer Layout +*/ +typedef struct RGX_PM_RENDERSTATE_BUFFER_TAG { + IMG_UINT32 u32_0; + IMG_UINT32 u32_1; + IMG_UINT32 u32_2; + IMG_UINT32 u32_3; + IMG_UINT32 u32_4; + IMG_UINT32 u32_5; + IMG_UINT32 u32_6; + IMG_UINT32 u32_7; + IMG_UINT32 u32_8; + IMG_UINT32 u32_9; + IMG_UINT32 u32_10; + IMG_UINT32 u32_11; +} RGX_PM_RENDERSTATE_BUFFER; + +/* +The base address of the Virtual-Physical Page Translation Table. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_WOFF (10U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_WOFF (11U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VFP_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_10 = (((_ft_).u32_10 & RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ + ((_ft_).u32_11 = (((_ft_).u32_11 & RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_VFP_BASE_ADDR(_ft_) (((_ft_).u32_10 >> (0)) | ((IMG_UINT64)((_ft_).u32_11 & 0xffffffffU ) << (32))) +/* +A 16-bit macrotile mask indicating which macrotiles have been freed by the ISP. +A '1' in a bit position indicates that the ISP has signalled that the corresponding macrotile can be freed. +Only the least-significant 16 bits are valid. +Only used in the 3D phase. +Must be initialised to zero. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_WOFF (9U) +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MTILEFREE_STATUS(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MTILEFREE_STATUS(_ft_) (((_ft_).u32_9 >> (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)) & 0xffffffffU) +/* +A 16-bit macrotile mask indicating which macrotiles have been freed by the PM. +A '1' in a bit position indicates that the corresponding macrotile has been freed, and its pages released back to the appropriate free stack. +Only the least-significant 16 bits are valid. +Only used in the 3D phase. +Must be initialised to zero. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_WOFF (8U) +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_DEALLOC_MASK_STATUS(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_DEALLOC_MASK_STATUS(_ft_) (((_ft_).u32_8 >> (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)) & 0xffffffffU) +/* +The base address of the VHEAP buffer. +Must be initialised to point to the location of the VHEAP buffer in memory. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_WOFF (6U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_WOFF (7U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VHEAP_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_6 = (((_ft_).u32_6 & RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ + ((_ft_).u32_7 = (((_ft_).u32_7 & RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_VHEAP_BASE_ADDR(_ft_) (((_ft_).u32_6 >> (0)) | ((IMG_UINT64)((_ft_).u32_7 & 0xffffffffU ) << (32))) +/* +Reserved bits, un-used. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_WOFF (5U) +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_RSV_STUFF32(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_RSV_STUFF32(_ft_) (((_ft_).u32_5 >> (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)) & 0xffffffffU) +/* +The number of entries on the MLIST. Must be initialised to zero, meaning no pages allocated. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_WOFF (4U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_TAIL(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_TAIL(_ft_) (((_ft_).u32_4 >> (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)) & 0xffffffffU) +/* +The base address of the MLIST. +Must be initialised to point to a block of memory where the PM can write the MLIST. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_WOFF (2U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_WOFF (3U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_2 = (((_ft_).u32_2 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ + ((_ft_).u32_3 = (((_ft_).u32_3 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_BASE_ADDR(_ft_) (((_ft_).u32_2 >> (0)) | ((IMG_UINT64)((_ft_).u32_3 & 0xffffffffU ) << (32))) +/* +The number of entries on the ALIST. Must be initialised to zero, meaning no pages allocated. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_WOFF (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_WOFF (1U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_TAIL(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ + ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_TAIL(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0xffffffffU ) << (32))) +#endif /* RGX_FEATURE_PM_REGISTER_CONFIG_MODE&&!PM_BYTE_ALIGNED_BASE_ADDRESSES&&!SINGLE_TE_VSPACE */ + + +#if defined(RGX_FEATURE_SINGLE_TE_VSPACE) +/* +The PM Render Context Buffer Layout +*/ +typedef struct RGX_PM_RENDERSTATE_BUFFER_TAG { + IMG_UINT32 u32_0; + IMG_UINT32 u32_1; + IMG_UINT32 u32_2; + IMG_UINT32 u32_3; + IMG_UINT32 u32_4; + IMG_UINT32 u32_5; + IMG_UINT32 u32_6; + IMG_UINT32 u32_7; + IMG_UINT32 u32_8; + IMG_UINT32 u32_9; + IMG_UINT32 u32_10; + IMG_UINT32 u32_11; + IMG_UINT32 u32_12; + IMG_UINT32 u32_13; + IMG_UINT32 u32_14; + IMG_UINT32 u32_15; + IMG_UINT32 u32_16; + IMG_UINT32 u32_17; + IMG_UINT32 u32_18; + IMG_UINT32 u32_19; + IMG_UINT32 u32_20; + IMG_UINT32 u32_21; + IMG_UINT32 u32_22; + IMG_UINT32 u32_23; + IMG_UINT32 u32_24; + IMG_UINT32 u32_25; + IMG_UINT32 u32_26; + IMG_UINT32 u32_27; + IMG_UINT32 u32_28; + IMG_UINT32 u32_29; + IMG_UINT32 u32_30; + IMG_UINT32 u32_31; + IMG_UINT32 u32_32; + IMG_UINT32 u32_33; + IMG_UINT32 u32_34; + IMG_UINT32 u32_35; + IMG_UINT32 u32_36; + IMG_UINT32 u32_37; + IMG_UINT32 u32_38; + IMG_UINT32 u32_39; + IMG_UINT32 u32_40; + IMG_UINT32 u32_41; + IMG_UINT32 u32_42; + IMG_UINT32 u32_43; + IMG_UINT32 u32_44; + IMG_UINT32 u32_45; +} RGX_PM_RENDERSTATE_BUFFER; + +/* +MMU catalogue base address for VCE pipe 3 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_WOFF (37U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE(_ft_,_x_) ((_ft_).u32_37 = (((_ft_).u32_37 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE(_ft_) (((_ft_).u32_37 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 3 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_WOFF (36U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE3_MAPPED(_ft_,_x_) ((_ft_).u32_36 = (((_ft_).u32_36 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE3_MAPPED(_ft_) (((_ft_).u32_36 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for VCE pipe 3 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_WOFF (36U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE(_ft_,_x_) ((_ft_).u32_36 = (((_ft_).u32_36 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE(_ft_) (((_ft_).u32_36 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 2 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_WOFF (35U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE(_ft_,_x_) ((_ft_).u32_35 = (((_ft_).u32_35 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE(_ft_) (((_ft_).u32_35 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 2 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_WOFF (34U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE2_MAPPED(_ft_,_x_) ((_ft_).u32_34 = (((_ft_).u32_34 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE2_MAPPED(_ft_) (((_ft_).u32_34 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for VCE pipe 2 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_WOFF (34U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE(_ft_,_x_) ((_ft_).u32_34 = (((_ft_).u32_34 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE(_ft_) (((_ft_).u32_34 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 1 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_WOFF (33U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE(_ft_,_x_) ((_ft_).u32_33 = (((_ft_).u32_33 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE(_ft_) (((_ft_).u32_33 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 1 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_WOFF (32U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE1_MAPPED(_ft_,_x_) ((_ft_).u32_32 = (((_ft_).u32_32 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE1_MAPPED(_ft_) (((_ft_).u32_32 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for VCE pipe 1 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_WOFF (32U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE(_ft_,_x_) ((_ft_).u32_32 = (((_ft_).u32_32 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE(_ft_) (((_ft_).u32_32 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 0 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_WOFF (30U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE(_ft_,_x_) ((_ft_).u32_30 = (((_ft_).u32_30 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE(_ft_) (((_ft_).u32_30 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 0 ADDR +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_WOFF (29U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_CLRMSK (0xF0000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_ADDR(_ft_,_x_) ((_ft_).u32_29 = (((_ft_).u32_29 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_ADDR(_ft_) (((_ft_).u32_29 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_SHIFT)) & 0x0fffffffU) +/* +MMU catalogue base address for VCE pipe 0 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_WOFF (28U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_MAPPED(_ft_,_x_) ((_ft_).u32_28 = (((_ft_).u32_28 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_MAPPED(_ft_) (((_ft_).u32_28 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for VCE pipe 0 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_WOFF (28U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE(_ft_,_x_) ((_ft_).u32_28 = (((_ft_).u32_28 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE(_ft_) (((_ft_).u32_28 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_WOFF (26U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE(_ft_,_x_) ((_ft_).u32_26 = (((_ft_).u32_26 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE(_ft_) (((_ft_).u32_26 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE ADDR +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_WOFF (25U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_CLRMSK (0xF0000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_ADDR(_ft_,_x_) ((_ft_).u32_25 = (((_ft_).u32_25 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_ADDR(_ft_) (((_ft_).u32_25 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_SHIFT)) & 0x0fffffffU) +/* +MMU catalogue base address for TE MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_WOFF (24U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_MAPPED(_ft_,_x_) ((_ft_).u32_24 = (((_ft_).u32_24 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_MAPPED(_ft_) (((_ft_).u32_24 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for TE INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_WOFF (24U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE(_ft_,_x_) ((_ft_).u32_24 = (((_ft_).u32_24 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE(_ft_) (((_ft_).u32_24 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for ALIST LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_WOFF (18U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_LAST_PAGE(_ft_,_x_) ((_ft_).u32_18 = (((_ft_).u32_18 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_LAST_PAGE(_ft_) (((_ft_).u32_18 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for ALIST ADDR +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_WOFF (17U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_CLRMSK (0xF0000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_ADDR(_ft_,_x_) ((_ft_).u32_17 = (((_ft_).u32_17 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_ADDR(_ft_) (((_ft_).u32_17 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_SHIFT)) & 0x0fffffffU) +/* +MMU catalogue base address for ALIST MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_WOFF (16U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_MAPPED(_ft_,_x_) ((_ft_).u32_16 = (((_ft_).u32_16 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_MAPPED(_ft_) (((_ft_).u32_16 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for ALIST INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_WOFF (16U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_INIT_PAGE(_ft_,_x_) ((_ft_).u32_16 = (((_ft_).u32_16 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_INIT_PAGE(_ft_) (((_ft_).u32_16 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for TE0 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_WOFF (12U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE0_INIT(_ft_,_x_) ((_ft_).u32_12 = (((_ft_).u32_12 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE0_INIT(_ft_) (((_ft_).u32_12 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE0 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_WOFF (12U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE0(_ft_,_x_) ((_ft_).u32_12 = (((_ft_).u32_12 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE0(_ft_) (((_ft_).u32_12 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for VCE3 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_WOFF (11U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE3_INIT(_ft_,_x_) ((_ft_).u32_11 = (((_ft_).u32_11 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE3_INIT(_ft_) (((_ft_).u32_11 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE3 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_WOFF (11U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE3(_ft_,_x_) ((_ft_).u32_11 = (((_ft_).u32_11 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE3(_ft_) (((_ft_).u32_11 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for VCE2 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_WOFF (10U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE2_INIT(_ft_,_x_) ((_ft_).u32_10 = (((_ft_).u32_10 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE2_INIT(_ft_) (((_ft_).u32_10 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE2 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_WOFF (10U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE2(_ft_,_x_) ((_ft_).u32_10 = (((_ft_).u32_10 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE2(_ft_) (((_ft_).u32_10 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for VCE1 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_WOFF (9U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE1_INIT(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE1_INIT(_ft_) (((_ft_).u32_9 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE1 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_WOFF (9U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE1(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE1(_ft_) (((_ft_).u32_9 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for VCE0 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_WOFF (8U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE0_INIT(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE0_INIT(_ft_) (((_ft_).u32_8 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE0 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_WOFF (8U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE0(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE0(_ft_) (((_ft_).u32_8 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_SHIFT)) & 0x7fffffffU) +/* +A 16-bit macrotile mask indicating which macrotiles have been freed by the ISP. +A '1' in a bit position indicates that the ISP has signalled that the corresponding macrotile can be freed. +Only the least-significant 16 bits are valid. +Only used in the 3D phase. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_WOFF (7U) +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MTILEFREE_STATUS(_ft_,_x_) ((_ft_).u32_7 = (((_ft_).u32_7 & RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MTILEFREE_STATUS(_ft_) (((_ft_).u32_7 >> (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)) & 0xffffffffU) +/* +A 16-bit macrotile mask indicating which macrotiles have been freed by the PM. +A '1' in a bit position indicates that the corresponding macrotile has been freed, and its pages released back to the appropriate free stack. +Only the least-significant 16 bits are valid. +Only used in the 3D phase. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_WOFF (6U) +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_DEALLOC_MASK_STATUS(_ft_,_x_) ((_ft_).u32_6 = (((_ft_).u32_6 & RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_DEALLOC_MASK_STATUS(_ft_) (((_ft_).u32_6 >> (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)) & 0xffffffffU) +/* +Reserved bits, un-used. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_WOFF (5U) +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_RSV_STUFF32(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_RSV_STUFF32(_ft_) (((_ft_).u32_5 >> (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)) & 0xffffffffU) +/* +The number of entries on the MLIST. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_WOFF (4U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_TAIL(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_TAIL(_ft_) (((_ft_).u32_4 >> (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)) & 0xffffffffU) +/* +The base address of the MLIST. +Must be initialised to point to a block of memory where the PM can write the MLIST. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_WOFF (2U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_SHIFT (5U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_WOFF (3U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_SHIFT (5U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK (0x0000001FU) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_2 = (((_ft_).u32_2 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000007ffffff))) << 5))); \ + ((_ft_).u32_3 = (((_ft_).u32_3 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x07fffffff8000000))) >> 27))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_BASE_ADDR(_ft_) (((_ft_).u32_2 >> (5)) | ((IMG_UINT64)((_ft_).u32_3 & 0xffffffffU ) << (27))) +/* +Init bit sent flag for ALIST +*/ +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_WOFF (1U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_INIT(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_INIT(_ft_) (((_ft_).u32_1 >> (RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_SHIFT)) & 0x00000001U) +/* +The number of entries on the ALIST. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_WOFF (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_WOFF (1U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_TAIL(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ + ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x7fffffff00000000))) >> 32))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_TAIL(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0x7fffffffU ) << (32))) +#endif /* RGX_FEATURE_SINGLE_TE_VSPACE */ + + +#if !(defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE)&&defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)&&!defined(RGX_FEATURE_SINGLE_TE_VSPACE)) && !(defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE)&&!defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)&&!defined(RGX_FEATURE_SINGLE_TE_VSPACE)) && !(defined(RGX_FEATURE_SINGLE_TE_VSPACE)) +/* +The PM Render Context Buffer Layout +*/ +typedef struct RGX_PM_RENDERSTATE_BUFFER_TAG { + IMG_UINT32 u32_0; + IMG_UINT32 u32_1; + IMG_UINT32 u32_2; + IMG_UINT32 u32_3; + IMG_UINT32 u32_4; + IMG_UINT32 u32_5; + IMG_UINT32 u32_6; + IMG_UINT32 u32_7; + IMG_UINT32 u32_8; + IMG_UINT32 u32_9; + IMG_UINT32 u32_10; + IMG_UINT32 u32_11; + IMG_UINT32 u32_12; + IMG_UINT32 u32_13; + IMG_UINT32 u32_14; + IMG_UINT32 u32_15; + IMG_UINT32 u32_16; + IMG_UINT32 u32_17; + IMG_UINT32 u32_18; + IMG_UINT32 u32_19; + IMG_UINT32 u32_20; + IMG_UINT32 u32_21; + IMG_UINT32 u32_22; + IMG_UINT32 u32_23; + IMG_UINT32 u32_24; + IMG_UINT32 u32_25; + IMG_UINT32 u32_26; + IMG_UINT32 u32_27; + IMG_UINT32 u32_28; + IMG_UINT32 u32_29; + IMG_UINT32 u32_30; + IMG_UINT32 u32_31; + IMG_UINT32 u32_32; + IMG_UINT32 u32_33; + IMG_UINT32 u32_34; + IMG_UINT32 u32_35; + IMG_UINT32 u32_36; + IMG_UINT32 u32_37; + IMG_UINT32 u32_38; + IMG_UINT32 u32_39; + IMG_UINT32 u32_40; + IMG_UINT32 u32_41; + IMG_UINT32 u32_42; + IMG_UINT32 u32_43; + IMG_UINT32 u32_44; + IMG_UINT32 u32_45; +} RGX_PM_RENDERSTATE_BUFFER; + +/* +MMU catalogue base address for VCE pipe 3 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_WOFF (45U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE(_ft_,_x_) ((_ft_).u32_45 = (((_ft_).u32_45 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE(_ft_) (((_ft_).u32_45 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 3 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_WOFF (44U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE3_MAPPED(_ft_,_x_) ((_ft_).u32_44 = (((_ft_).u32_44 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE3_MAPPED(_ft_) (((_ft_).u32_44 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for VCE pipe 3 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_WOFF (44U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE(_ft_,_x_) ((_ft_).u32_44 = (((_ft_).u32_44 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE(_ft_) (((_ft_).u32_44 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 2 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_WOFF (43U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE(_ft_,_x_) ((_ft_).u32_43 = (((_ft_).u32_43 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE(_ft_) (((_ft_).u32_43 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 2 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_WOFF (42U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE2_MAPPED(_ft_,_x_) ((_ft_).u32_42 = (((_ft_).u32_42 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE2_MAPPED(_ft_) (((_ft_).u32_42 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for VCE pipe 2 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_WOFF (42U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE(_ft_,_x_) ((_ft_).u32_42 = (((_ft_).u32_42 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE(_ft_) (((_ft_).u32_42 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 1 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_WOFF (41U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE(_ft_,_x_) ((_ft_).u32_41 = (((_ft_).u32_41 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE(_ft_) (((_ft_).u32_41 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 1 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_WOFF (40U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE1_MAPPED(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE1_MAPPED(_ft_) (((_ft_).u32_40 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for VCE pipe 1 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_WOFF (40U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE(_ft_) (((_ft_).u32_40 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE pipe 3 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_WOFF (37U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE(_ft_,_x_) ((_ft_).u32_37 = (((_ft_).u32_37 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE(_ft_) (((_ft_).u32_37 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE pipe 3 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_WOFF (36U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE3_MAPPED(_ft_,_x_) ((_ft_).u32_36 = (((_ft_).u32_36 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE3_MAPPED(_ft_) (((_ft_).u32_36 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for TE pipe 3 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_WOFF (36U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE(_ft_,_x_) ((_ft_).u32_36 = (((_ft_).u32_36 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE(_ft_) (((_ft_).u32_36 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE pipe 2 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_WOFF (35U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE(_ft_,_x_) ((_ft_).u32_35 = (((_ft_).u32_35 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE(_ft_) (((_ft_).u32_35 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE pipe 2 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_WOFF (34U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE2_MAPPED(_ft_,_x_) ((_ft_).u32_34 = (((_ft_).u32_34 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE2_MAPPED(_ft_) (((_ft_).u32_34 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for TE pipe 2 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_WOFF (34U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE(_ft_,_x_) ((_ft_).u32_34 = (((_ft_).u32_34 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE(_ft_) (((_ft_).u32_34 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE pipe 1 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_WOFF (33U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE(_ft_,_x_) ((_ft_).u32_33 = (((_ft_).u32_33 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE(_ft_) (((_ft_).u32_33 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE pipe 1 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_WOFF (32U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE1_MAPPED(_ft_,_x_) ((_ft_).u32_32 = (((_ft_).u32_32 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE1_MAPPED(_ft_) (((_ft_).u32_32 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for TE pipe 1 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_WOFF (32U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE(_ft_,_x_) ((_ft_).u32_32 = (((_ft_).u32_32 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE(_ft_) (((_ft_).u32_32 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 0 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_WOFF (30U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE(_ft_,_x_) ((_ft_).u32_30 = (((_ft_).u32_30 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE(_ft_) (((_ft_).u32_30 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for VCE pipe 0 ADDR +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_WOFF (29U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_CLRMSK (0xF0000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_ADDR(_ft_,_x_) ((_ft_).u32_29 = (((_ft_).u32_29 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_ADDR(_ft_) (((_ft_).u32_29 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_SHIFT)) & 0x0fffffffU) +/* +MMU catalogue base address for VCE pipe 0 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_WOFF (28U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_MAPPED(_ft_,_x_) ((_ft_).u32_28 = (((_ft_).u32_28 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_MAPPED(_ft_) (((_ft_).u32_28 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for VCE pipe 0 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_WOFF (28U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE(_ft_,_x_) ((_ft_).u32_28 = (((_ft_).u32_28 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE(_ft_) (((_ft_).u32_28 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE pipe 0 LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_WOFF (26U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE(_ft_,_x_) ((_ft_).u32_26 = (((_ft_).u32_26 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE(_ft_) (((_ft_).u32_26 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for TE pipe 0 ADDR +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_WOFF (25U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_CLRMSK (0xF0000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_ADDR(_ft_,_x_) ((_ft_).u32_25 = (((_ft_).u32_25 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_ADDR(_ft_) (((_ft_).u32_25 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_SHIFT)) & 0x0fffffffU) +/* +MMU catalogue base address for TE pipe 0 MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_WOFF (24U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_MAPPED(_ft_,_x_) ((_ft_).u32_24 = (((_ft_).u32_24 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_MAPPED(_ft_) (((_ft_).u32_24 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for TE pipe 0 INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_WOFF (24U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE(_ft_,_x_) ((_ft_).u32_24 = (((_ft_).u32_24 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE(_ft_) (((_ft_).u32_24 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for ALIST LAST_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_WOFF (18U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_LAST_PAGE(_ft_,_x_) ((_ft_).u32_18 = (((_ft_).u32_18 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_LAST_PAGE(_ft_) (((_ft_).u32_18 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_SHIFT)) & 0x000fffffU) +/* +MMU catalogue base address for ALIST ADDR +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_WOFF (17U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_CLRMSK (0xF0000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_ADDR(_ft_,_x_) ((_ft_).u32_17 = (((_ft_).u32_17 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_ADDR(_ft_) (((_ft_).u32_17 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_SHIFT)) & 0x0fffffffU) +/* +MMU catalogue base address for ALIST MAPPED +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_WOFF (16U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_MAPPED(_ft_,_x_) ((_ft_).u32_16 = (((_ft_).u32_16 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_MAPPED(_ft_) (((_ft_).u32_16 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_SHIFT)) & 0x00000001U) +/* +MMU catalogue base address for ALIST INIT_PAGE +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_WOFF (16U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_CLRMSK (0xFFF00000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_INIT_PAGE(_ft_,_x_) ((_ft_).u32_16 = (((_ft_).u32_16 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_INIT_PAGE(_ft_) (((_ft_).u32_16 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_SHIFT)) & 0x000fffffU) +/* +Init Bit Sent Flag for TE3 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_WOFF (15U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE3_INIT(_ft_,_x_) ((_ft_).u32_15 = (((_ft_).u32_15 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE3_INIT(_ft_) (((_ft_).u32_15 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE3 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_WOFF (15U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE3(_ft_,_x_) ((_ft_).u32_15 = (((_ft_).u32_15 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE3(_ft_) (((_ft_).u32_15 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for VCE3 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_WOFF (14U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE3_INIT(_ft_,_x_) ((_ft_).u32_14 = (((_ft_).u32_14 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE3_INIT(_ft_) (((_ft_).u32_14 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE3 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_WOFF (14U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE3(_ft_,_x_) ((_ft_).u32_14 = (((_ft_).u32_14 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE3(_ft_) (((_ft_).u32_14 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for TE2 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_WOFF (13U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE2_INIT(_ft_,_x_) ((_ft_).u32_13 = (((_ft_).u32_13 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE2_INIT(_ft_) (((_ft_).u32_13 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE2 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_WOFF (13U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE2(_ft_,_x_) ((_ft_).u32_13 = (((_ft_).u32_13 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE2(_ft_) (((_ft_).u32_13 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for VCE2 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_WOFF (12U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE2_INIT(_ft_,_x_) ((_ft_).u32_12 = (((_ft_).u32_12 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE2_INIT(_ft_) (((_ft_).u32_12 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE2 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_WOFF (12U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE2(_ft_,_x_) ((_ft_).u32_12 = (((_ft_).u32_12 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE2(_ft_) (((_ft_).u32_12 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for TE1 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_WOFF (11U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE1_INIT(_ft_,_x_) ((_ft_).u32_11 = (((_ft_).u32_11 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE1_INIT(_ft_) (((_ft_).u32_11 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE1 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_WOFF (11U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE1(_ft_,_x_) ((_ft_).u32_11 = (((_ft_).u32_11 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE1(_ft_) (((_ft_).u32_11 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for VCE1 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_WOFF (10U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE1_INIT(_ft_,_x_) ((_ft_).u32_10 = (((_ft_).u32_10 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE1_INIT(_ft_) (((_ft_).u32_10 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE1 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_WOFF (10U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE1(_ft_,_x_) ((_ft_).u32_10 = (((_ft_).u32_10 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE1(_ft_) (((_ft_).u32_10 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for TE0 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_WOFF (9U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE0_INIT(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE0_INIT(_ft_) (((_ft_).u32_9 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for TE0 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_WOFF (9U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE0(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE0(_ft_) (((_ft_).u32_9 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_SHIFT)) & 0x7fffffffU) +/* +Init Bit Sent Flag for VCE0 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_WOFF (8U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE0_INIT(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE0_INIT(_ft_) (((_ft_).u32_8 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_SHIFT)) & 0x00000001U) +/* +16KB aligned virtual top pointer for VCE0 +*/ +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_WOFF (8U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE0(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE0(_ft_) (((_ft_).u32_8 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_SHIFT)) & 0x7fffffffU) +/* +A 16-bit macrotile mask indicating which macrotiles have been freed by the ISP. +A '1' in a bit position indicates that the ISP has signalled that the corresponding macrotile can be freed. +Only the least-significant 16 bits are valid. +Only used in the 3D phase. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_WOFF (7U) +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MTILEFREE_STATUS(_ft_,_x_) ((_ft_).u32_7 = (((_ft_).u32_7 & RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MTILEFREE_STATUS(_ft_) (((_ft_).u32_7 >> (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)) & 0xffffffffU) +/* +A 16-bit macrotile mask indicating which macrotiles have been freed by the PM. +A '1' in a bit position indicates that the corresponding macrotile has been freed, and its pages released back to the appropriate free stack. +Only the least-significant 16 bits are valid. +Only used in the 3D phase. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_WOFF (6U) +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_DEALLOC_MASK_STATUS(_ft_,_x_) ((_ft_).u32_6 = (((_ft_).u32_6 & RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_DEALLOC_MASK_STATUS(_ft_) (((_ft_).u32_6 >> (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)) & 0xffffffffU) +/* +Reserved bits, un-used. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_WOFF (5U) +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_RSV_STUFF32(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_RSV_STUFF32(_ft_) (((_ft_).u32_5 >> (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)) & 0xffffffffU) +/* +The number of entries on the MLIST. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_WOFF (4U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_TAIL(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_TAIL(_ft_) (((_ft_).u32_4 >> (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)) & 0xffffffffU) +/* +The base address of the MLIST. +Must be initialised to point to a block of memory where the PM can write the MLIST. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_WOFF (2U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_SHIFT (5U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_WOFF (3U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_SHIFT (5U) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK (0x0000001FU) +#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_2 = (((_ft_).u32_2 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000007ffffff))) << 5))); \ + ((_ft_).u32_3 = (((_ft_).u32_3 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x07fffffff8000000))) >> 27))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_BASE_ADDR(_ft_) (((_ft_).u32_2 >> (5)) | ((IMG_UINT64)((_ft_).u32_3 & 0xffffffffU ) << (27))) +/* +Init bit sent flag for ALIST +*/ +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_WOFF (1U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_SHIFT (31U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_CLRMSK (0x7FFFFFFFU) +#define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_INIT(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_SHIFT)))) +#define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_INIT(_ft_) (((_ft_).u32_1 >> (RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_SHIFT)) & 0x00000001U) +/* +The number of entries on the ALIST. +*/ +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_WOFF (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_WOFF (1U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_SHIFT (0U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK (0x00000000U) +#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK (0x80000000U) +#define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_TAIL(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \ + ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x7fffffff00000000))) >> 32))); } +#define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_TAIL(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0x7fffffffU ) << (32))) +#endif /* !(defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE)&&defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)&&!defined(RGX_FEATURE_SINGLE_TE_VSPACE)) && !(defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE)&&!defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)&&!defined(RGX_FEATURE_SINGLE_TE_VSPACE)) && !(defined(RGX_FEATURE_SINGLE_TE_VSPACE)) */ + + +/* +Maximum range supported by hardware is 33 bits. +*/ +#define RGX_PM_RENDERSTATE_ALIST_TAIL_ALIST_TAIL (0U) +#define RGX_PM_RENDERSTATE_ALIST_TAIL_ALIST_TAIL_LOWER (0U) +#define RGX_PM_RENDERSTATE_ALIST_TAIL_ALIST_TAIL_UPPER (8589934591ULL) + + +#if defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +256-bit granular, lower bits ignored. +Maximum addressable range supported by hardware is 1 TB. +*/ +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_ALIGNSHIFT (5U) +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_ALIGNSIZE (32U) +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR (0U) +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR_LOWER (0U) +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR_UPPER (68719476735ULL) +#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */ + + +#if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +128-bit aligned. +Maximum addressable range supported by hardware is 1 TB. +The 40-bit, 16-byte-aligned address is packed into bits 35:0 of the two DWORDs. +*/ +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_ALIGNSHIFT (4U) +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_ALIGNSIZE (16U) +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR (0U) +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR_LOWER (0U) +#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR_UPPER (68719476735ULL) +#endif /* !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */ + + +/* +Maximum range supported by hardware is 33 bits. +*/ +#define RGX_PM_RENDERSTATE_MLIST_TAIL_MLIST_TAIL (0U) +#define RGX_PM_RENDERSTATE_MLIST_TAIL_MLIST_TAIL_LOWER (0U) +#define RGX_PM_RENDERSTATE_MLIST_TAIL_MLIST_TAIL_UPPER (8589934591ULL) + + +#if defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +128-bit granular, lower bits ignored. +Maximum addressable range supported by hardware is 1 TB. +*/ +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_ALIGNSHIFT (4U) +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_ALIGNSIZE (16U) +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR (0U) +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR_LOWER (0U) +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR_UPPER (68719476735ULL) +#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */ + + +#if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +128-bit aligned. +Maximum addressable range supported by hardware is 1 TB. +The 40-bit, 16-byte-aligned address is packed into bits 35:0 of the two DWORDs. +*/ +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_ALIGNSHIFT (4U) +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_ALIGNSIZE (16U) +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR (0U) +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR_LOWER (0U) +#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR_UPPER (68719476735ULL) +#endif /* !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */ + + +/* +Only the 16 least-significant bits are used +*/ +#define RGX_PM_RENDERSTATE_DEALLOC_MASK_STATUS_DEALLOC_MASK_STATUS (0U) +#define RGX_PM_RENDERSTATE_DEALLOC_MASK_STATUS_DEALLOC_MASK_STATUS_LOWER (0U) +#define RGX_PM_RENDERSTATE_DEALLOC_MASK_STATUS_DEALLOC_MASK_STATUS_UPPER (65535U) + + +/* +Only the 16 least-significant bits are used +*/ +#define RGX_PM_RENDERSTATE_MTILEFREE_STATUS_MTILEFREE_STATUS (0U) +#define RGX_PM_RENDERSTATE_MTILEFREE_STATUS_MTILEFREE_STATUS_LOWER (0U) +#define RGX_PM_RENDERSTATE_MTILEFREE_STATUS_MTILEFREE_STATUS_UPPER (65535U) + + +#if defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +128-bit granular, lower bits ignored. +Maximum addressable range supported by hardware is 1 TB. +*/ +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_ALIGNSHIFT (4U) +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_ALIGNSIZE (16U) +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR (0U) +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR_LOWER (0U) +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR_UPPER (68719476735ULL) +#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */ + + +#if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) +/* +128-bit aligned. +Maximum addressable range supported by hardware is 1 TB. +The 40-bit, 16-byte-aligned address is packed into bits 35:0 of the two DWORDs. +*/ +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_ALIGNSHIFT (4U) +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_ALIGNSIZE (16U) +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR (0U) +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR_LOWER (0U) +#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR_UPPER (68719476735ULL) +#endif /* !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */ + + +#endif /* RGXPMDEFS_H */ +/***************************************************************************** + End of file (rgxpmdefs.h) +*****************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxpower.c b/drivers/gpu/drm/phytium/octopus/rgxpower.c new file mode 100644 index 000000000000..6453da1fe006 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxpower.c @@ -0,0 +1,1562 @@ +/*************************************************************************/ /*! +@File +@Title Device specific power routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(__linux__) +#include +#else +#include +#endif + +#include "rgxpower.h" +#include "rgxinit.h" +#include "rgx_fwif_km.h" +#include "rgxfwutils.h" +#include "pdump_km.h" +#include "pvr_debug.h" +#include "osfunc.h" +#include "rgxdebug.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "rgxtimecorr.h" +#include "devicemem_utils.h" +#include "htbserver.h" +#include "rgxstartstop.h" +#include "rgxfwimageutils.h" +#include "sync.h" +#include "rgxdefs_km.h" + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) +#include "process_stats.h" +#endif +#if defined(SUPPORT_LINUX_DVFS) +#include "pvr_dvfs_device.h" +#endif +#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) +#include "validation_soc.h" +#include "oskm_apphint.h" +#endif + +static PVRSRV_ERROR RGXFWNotifyHostTimeout(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_KCCB_CMD sCmd; + PVRSRV_ERROR eError; + IMG_UINT32 ui32CmdKCCBSlot; + + /* Send the Timeout notification to the FW */ + sCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; + sCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; + sCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_HOST_TIMEOUT; + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sCmd, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + + return eError; +} + +static void _RGXUpdateGPUUtilStats(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb; + IMG_UINT64 *paui64StatsCounters; + IMG_UINT64 ui64LastPeriod; + IMG_UINT64 ui64LastState; + IMG_UINT64 ui64LastTime; + IMG_UINT64 ui64TimeNow; + + psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; + paui64StatsCounters = &psUtilFWCb->aui64StatsCounters[0]; + + OSLockAcquire(psDevInfo->hGPUUtilLock); + + ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64()); + + /* Update counters to account for the time since the last update */ + ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord); + ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64LastWord); + ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime); + paui64StatsCounters[ui64LastState] += ui64LastPeriod; + + /* Update state and time of the latest update */ + psUtilFWCb->ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState); + + OSLockRelease(psDevInfo->hGPUUtilLock); +} + +static INLINE PVRSRV_ERROR RGXDoStop(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + if (psDevConfig->pfnTDRGXStop == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPrePowerState: TDRGXStop not implemented!")); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + eError = psDevConfig->pfnTDRGXStop(psDevConfig->hSysData); +#else + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + eError = RGXStop(&psDevInfo->sLayerParams); +#endif + + return eError; +} + +/* + RGXPrePowerState +*/ +PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + + if ((eNewPowerState != eCurrentPowerState) && + (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)) + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_KCCB_CMD sPowCmd; + IMG_UINT32 ui32CmdKCCBSlot; + + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + /* Send the Power off request to the FW */ + sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; + sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_OFF_REQ; + sPowCmd.uCmdData.sPowData.uPowerReqData.bForced = bForced; + + eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", + __func__)); + return eError; + } + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sPowCmd, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send Power off request", + __func__)); + return eError; + } + + /* Wait for the firmware to complete processing. It cannot use PVRSRVWaitForValueKM as it relies + on the EventObject which is signalled in this MISR */ + eError = RGXPollForGPCommandCompletion(psDeviceNode, + psDevInfo->psPowSyncPrim->pui32LinAddr, + 0x1, 0xFFFFFFFF); + + /* Check the Power state after the answer */ + if (eError == PVRSRV_OK) + { + /* Finally, de-initialise some registers. */ + if (psFwSysData->ePowState == RGXFWIF_POW_OFF) + { +#if !defined(NO_HARDWARE) + IMG_UINT32 ui32TID; + RGXFWIF_OSDATA *psFwOsData = psDevInfo->psRGXFWIfFwOsData; + + /* Driver takes the VZ Fw-KM connection down, preventing the + * firmware from submitting further interrupts */ + KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); + + for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) + { + /* Wait for the pending FW processor to host interrupts to come back. */ + eError = PVRSRVPollForValueKM(psDeviceNode, + (IMG_UINT32 __iomem *)&psDevInfo->aui32SampleIRQCount[ui32TID], + psFwOsData->aui32InterruptCount[ui32TID], + 0xffffffff, + POLL_FLAG_LOG_ERROR); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Wait for pending interrupts failed. Thread %u: Host: %u, FW: %u", + __func__, + ui32TID, + psDevInfo->aui32SampleIRQCount[ui32TID], + psFwOsData->aui32InterruptCount[ui32TID])); + + RGX_WaitForInterruptsTimeout(psDevInfo); + break; + } + } +#endif /* NO_HARDWARE */ + + /* Update GPU frequency and timer correlation related data */ + RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_POWER); + + /* Update GPU state counters */ + _RGXUpdateGPUUtilStats(psDevInfo); + +#if defined(SUPPORT_LINUX_DVFS) + eError = SuspendDVFS(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to suspend DVFS", __func__)); + return eError; + } +#endif + + psDevInfo->bRGXPowered = IMG_FALSE; + + eError = RGXDoStop(psDeviceNode); + if (eError != PVRSRV_OK) + { + /* Power down failures are treated as successful since the power was removed but logged. */ + PVR_DPF((PVR_DBG_WARNING, "%s: RGXDoStop failed (%s)", + __func__, PVRSRVGetErrorString(eError))); + psDevInfo->ui32ActivePMReqNonIdle++; + eError = PVRSRV_OK; + } + } + else + { + /* the sync was updated but the pow state isn't off -> the FW denied the transition */ + eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED; + + if (bForced) + { /* It is an error for a forced request to be denied */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Failure to power off during a forced power off. FW: %d", + __func__, psFwSysData->ePowState)); + } + } + } + else if (eError == PVRSRV_ERROR_TIMEOUT) + { + /* timeout waiting for the FW to ack the request: return timeout */ + PVR_DPF((PVR_DBG_WARNING, + "%s: Timeout waiting for powoff ack from the FW", + __func__)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error waiting for powoff ack from the FW (%s)", + __func__, PVRSRVGetErrorString(eError))); + eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE; + } + } + + return eError; +} + +#if defined(SUPPORT_AUTOVZ) +static PVRSRV_ERROR _RGXWaitForGuestsToDisconnect(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError = PVRSRV_ERROR_TIMEOUT; + IMG_UINT32 ui32FwTimeout = (20 * SECONDS_TO_MICROSECONDS); + + LOOP_UNTIL_TIMEOUT(ui32FwTimeout) + { + IMG_UINT32 ui32OSid; + IMG_BOOL bGuestOnline = IMG_FALSE; + + for (ui32OSid = RGXFW_GUEST_OSID_START; + ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) + { + RGXFWIF_CONNECTION_FW_STATE eGuestState = (volatile RGXFWIF_CONNECTION_FW_STATE) + psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[ui32OSid].bfOsState; + + if ((eGuestState == RGXFW_CONNECTION_FW_ACTIVE) || + (eGuestState == RGXFW_CONNECTION_FW_OFFLOADING)) + { + bGuestOnline = IMG_TRUE; + PVR_DPF((PVR_DBG_WARNING, "%s: Guest OS %u still online.", __func__, ui32OSid)); + } + } + + if (!bGuestOnline) + { + /* Allow Guests to finish reading Connection state registers before disconnecting. */ + OSSleepms(100); + + PVR_DPF((PVR_DBG_WARNING, "%s: All Guest connections are down. " + "Host can power down the GPU.", __func__)); + eError = PVRSRV_OK; + break; + } + else + { + PVR_DPF((PVR_DBG_WARNING, "%s: Waiting for Guests to disconnect " + "before powering down GPU.", __func__)); + + if (PVRSRVPwrLockIsLockedByMe(psDeviceNode)) + { + /* Don't wait with the power lock held as this prevents the vz + * watchdog thread from keeping the fw-km connection alive. */ + PVRSRVPowerUnlock(psDeviceNode); + } + } + + OSSleepms(10); + } END_LOOP_UNTIL_TIMEOUT(); + + if (!PVRSRVPwrLockIsLockedByMe(psDeviceNode)) + { + /* Take back power lock after waiting for Guests */ + eError = PVRSRVPowerLock(psDeviceNode); + } + + return eError; +} +#endif /* defined(SUPPORT_AUTOVZ) */ + +/* + RGXVzPrePowerState +*/ +PVRSRV_ERROR RGXVzPrePowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + + PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", eError); + + if (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON) + { + /* powering down */ +#if defined(SUPPORT_AUTOVZ) + PVR_LOG_RETURN_IF_FALSE((!psDeviceNode->bAutoVzFwIsUp), "AutoVz Fw active, power not changed", eError); + if (PVRSRV_VZ_MODE_IS(HOST)) + { + /* The Host must ensure all Guest drivers have disconnected from the GPU before powering it down. + * Guest drivers regularly access hardware registers during runtime. If an attempt is made to + * access a GPU register while the GPU is down, the SoC might lock up. */ + eError = _RGXWaitForGuestsToDisconnect(psDeviceNode); + PVR_LOG_RETURN_IF_ERROR(eError, "_RGXWaitForGuestsToDisconnect"); + + /* Temporarily restore all power callbacks used by the driver to fully power down the GPU. + * Under AutoVz, power transitions requests (e.g. on driver deinitialisation and unloading) + * are generally ignored and the GPU power state is unaffected. Special power requests like + * those triggered by Suspend/Resume calls must reinstate the callbacks when needed. */ + PVRSRVSetPowerCallbacks(psDeviceNode, psDeviceNode->psPowerDev, + &RGXVzPrePowerState, &RGXVzPostPowerState, + psDeviceNode->psDevConfig->pfnPrePowerState, + psDeviceNode->psDevConfig->pfnPostPowerState, + &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest); + } + else if (PVRSRV_VZ_MODE_IS(GUEST)) + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_BOOL bFwOffline = IMG_FALSE; + RGXFWIF_KCCB_CMD sOfflineCmd = { 0 }; + + sOfflineCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE; + sOfflineCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = RGXFWIF_OS_OFFLINE; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + /* Update the watchdog token to prevent the firmware from resetting this driver's + * state to READY. It needs to stay OFFLINE until the driver can confirm it. */ + KM_SET_OS_ALIVE_TOKEN(KM_GET_FW_ALIVE_TOKEN(psDevInfo), psDevInfo); + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, &sOfflineCmd, PDUMP_FLAGS_CONTINUOUS, NULL); + if (eError != PVRSRV_ERROR_RETRY) break; + + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError == PVRSRV_OK) + { + LOOP_UNTIL_TIMEOUT(SECONDS_TO_MICROSECONDS) + { + if (KM_FW_CONNECTION_IS(OFFLINE, psDevInfo)) + { + bFwOffline = IMG_TRUE; + break; + } + } END_LOOP_UNTIL_TIMEOUT(); + } + + if (!bFwOffline) + { + /* The firmware failed to take down the connection, break it from the driver's end as a last resort. */ + KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); + } + } +#endif + PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering down: bAutoVzFwIsUp = %s", + __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", + psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); + } + else if (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON) + { + /* powering up */ + PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering up: bAutoVzFwIsUp = %s", + __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", + psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); + + } + + if (!(PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp))) + { + /* call regular device power function */ + eError = RGXPrePowerState(hDevHandle, eNewPowerState, eCurrentPowerState, bForced); + } + + return eError; +} + +/* + RGXVzPostPowerState +*/ +PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", eError); + + if (!(PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp))) + { + /* call regular device power function */ + eError = RGXPostPowerState(hDevHandle, eNewPowerState, eCurrentPowerState, bForced); + } + + if (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON) + { + /* powering down */ + PVR_LOG_RETURN_IF_FALSE((!psDeviceNode->bAutoVzFwIsUp), "AutoVz Fw active, power not changed", eError); + PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering down: bAutoVzFwIsUp = %s", + __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", + psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); + +#if !defined(SUPPORT_AUTOVZ_HW_REGS) + /* The connection states must be reset on a GPU power cycle. If the states are kept + * in hardware scratch registers, they will be cleared on power down. When using shared + * memory the connection data must be explicitly cleared by the driver. */ + OSCachedMemSetWMB(psDevInfo->psRGXFWIfConnectionCtl, 0, sizeof(RGXFWIF_CONNECTION_CTL)); +#endif /* defined(SUPPORT_AUTOVZ) && !defined(SUPPORT_AUTOVZ_HW_REGS) */ + + if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) + { +#if defined(SUPPORT_AUTOVZ) + /* AutoVz Guests attempting to suspend have updated their connections earlier in RGXVzPrePowerState. + * Skip this redundant register write, as the Host could have powered down the GPU by now. */ + if (psDeviceNode->bAutoVzFwIsUp) +#endif + { + /* Take the VZ connection down to prevent firmware from submitting further interrupts */ + KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); + } + /* Power transition callbacks were not executed, update RGXPowered flag here */ + psDevInfo->bRGXPowered = IMG_FALSE; + } + } + else if (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON) + { + /* powering up */ + IMG_UINT32 ui32FwTimeout = (3 * SECONDS_TO_MICROSECONDS); + volatile IMG_BOOL *pbUpdatedFlag = &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated; + + PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering up: bAutoVzFwIsUp = %s", + __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", + psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + /* Guests don't execute the power transition callbacks, so update their RGXPowered flag here */ + psDevInfo->bRGXPowered = IMG_TRUE; + +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + /* Guest drivers expect the firmware to have set its end of the + * connection to Ready state by now. Poll indefinitely otherwise. */ + if (!KM_FW_CONNECTION_IS(READY, psDevInfo)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Ready state. Waiting for Firmware ...", __func__)); + } + while (!KM_FW_CONNECTION_IS(READY, psDevInfo)) + { + OSSleepms(10); + } + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is Ready. Initialisation proceeding.", __func__)); +#endif /* RGX_VZ_STATIC_CARVEOUT_FW_HEAPS */ + + /* Guests can only access the register holding the connection states, + * after the GPU is confirmed to be powered up */ + KM_SET_OS_CONNECTION(READY, psDevInfo); + + OSWriteDeviceMem32(pbUpdatedFlag, IMG_FALSE); + + /* Kick an initial dummy command to make the firmware initialise all + * its internal guest OS data structures and compatibility information. + * Use the lower-level RGXSendCommandAndGetKCCBSlot() for the job, to make + * sure only 1 KCCB command is issued to the firmware. + * The default RGXFWHealthCheckCmd() prefaces each HealthCheck command with + * a pre-kick cache command which can interfere with the FW-KM init handshake. */ + { + RGXFWIF_KCCB_CMD sCmpKCCBCmd; + sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK; + + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, &sCmpKCCBCmd, PDUMP_FLAGS_CONTINUOUS, NULL); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot()"); + } + } + else + { + KM_SET_OS_CONNECTION(READY, psDevInfo); + + /* Disable power callbacks that should not be run on virtualised drivers after the GPU + * is fully initialised: system layer pre/post functions and driver idle requests. + * The original device RGX Pre/Post functions are called from this Vz wrapper. */ + PVRSRVSetPowerCallbacks(psDeviceNode, psDeviceNode->psPowerDev, + &RGXVzPrePowerState, &RGXVzPostPowerState, + NULL, NULL, NULL, NULL); + +#if defined(SUPPORT_AUTOVZ) + /* During first-time boot the flag is set here, while subsequent reboots will already + * have set it earlier in RGXInit. Set to true from this point onwards in any case. */ + psDeviceNode->bAutoVzFwIsUp = IMG_TRUE; +#endif + } + + /* Wait for the firmware to accept and enable the connection with this OS by setting its state to Active */ + while (!KM_FW_CONNECTION_IS(ACTIVE, psDevInfo)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Active state. Waiting for Firmware ...", __func__)); + OSSleepms(100); + } + PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is Active. Initialisation proceeding.", __func__)); + + /* poll on the Firmware supplying the compatibility data */ + LOOP_UNTIL_TIMEOUT(ui32FwTimeout) + { + if (*pbUpdatedFlag) + { + break; + } + OSSleepms(10); + } END_LOOP_UNTIL_TIMEOUT(); + + PVR_LOG_RETURN_IF_FALSE(*pbUpdatedFlag, "Firmware does not respond with compatibility data. ", PVRSRV_ERROR_TIMEOUT); + + KM_SET_OS_CONNECTION(ACTIVE, psDevInfo); + } + + return PVRSRV_OK; +} + +#if defined(TRACK_FW_BOOT) +static INLINE void RGXCheckFWBootStage(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + FW_BOOT_STAGE eStage; + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + /* Boot stage temporarily stored to the register below */ + eStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM, + RGX_FW_BOOT_STAGE_REGISTER); + } + else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) + { + eStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SCRATCH14); + } + else + { + return; + } + + PVR_LOG(("%s: FW reached boot stage %i/%i.", + __func__, eStage, FW_BOOT_INIT_DONE)); +} +#endif + +static INLINE PVRSRV_ERROR RGXDoStart(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + + if (psDevConfig->pfnTDRGXStart == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: TDRGXStart not implemented!")); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + eError = psDevConfig->pfnTDRGXStart(psDevConfig->hSysData); +#else + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + eError = RGXStart(&psDevInfo->sLayerParams); +#endif + + return eError; +} + +#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) +/* + * To validate the MTS unit we do the following: + * - Immediately after firmware loading for each OSID + * - Write the OSid to a memory location shared with FW + * - Kick the register of that OSid + * (Uncounted, DM 0) + * - FW clears the memory location if OSid matches + * - Host checks that memory location is cleared + * + * See firmware/devices/rgx/rgxfw_bg.c + */ +static PVRSRV_ERROR RGXVirtualisationPowerupSidebandTest(PVRSRV_DEVICE_NODE *psDeviceNode, + RGXFWIF_SYSINIT *psFwSysInit, + PVRSRV_RGXDEV_INFO *psDevInfo) +{ + IMG_UINT32 ui32ScheduleRegister; + IMG_UINT32 ui32OSid; + IMG_UINT32 ui32KickType; + IMG_UINT32 ui32OsRegBanksMapped = (psDeviceNode->psDevConfig->ui32RegsSize / RGX_VIRTUALISATION_REG_SIZE_PER_OS); + + /* Nothing to do if device does not support GPU_VIRTUALISATION */ + if (!PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, GPU_VIRTUALISATION)) + { + return PVRSRV_OK; + } + + PVR_DPF((PVR_DBG_MESSAGE, "Testing per-os kick registers:")); + + /* Need to get the maximum supported OSid value from the per-device info. + * This can change according to how much memory is physically present and + * what the carve-out mapping looks like (provided by the module load-time + * parameters). + */ + ui32OsRegBanksMapped = MIN(ui32OsRegBanksMapped, psDeviceNode->ui32NumOSId); + + if (ui32OsRegBanksMapped != RGXFW_MAX_NUM_OS) + { + PVR_DPF((PVR_DBG_WARNING, "The register bank mapped into kernel VA does not cover all OS' registers:")); + PVR_DPF((PVR_DBG_WARNING, "Maximum OS count = %d / Per-os register banks mapped = %d", RGXFW_MAX_NUM_OS, ui32OsRegBanksMapped)); + PVR_DPF((PVR_DBG_WARNING, "Only first %d MTS registers will be tested", ui32OsRegBanksMapped)); + } + + ui32KickType = RGX_CR_MTS_SCHEDULE_DM_DM0 | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED; + + for (ui32OSid = 0; ui32OSid < ui32OsRegBanksMapped; ui32OSid++) + { + /* set Test field */ + psFwSysInit->ui32OSKickTest = (ui32OSid << RGXFWIF_KICK_TEST_OSID_SHIFT) | RGXFWIF_KICK_TEST_ENABLED_BIT; + /* Force a read-back to memory to avoid posted writes on certain buses */ + (void) psFwSysInit->ui32OSKickTest; + OSWriteMemoryBarrier(); + + /* kick register */ + ui32ScheduleRegister = RGX_CR_MTS_SCHEDULE + (ui32OSid * RGX_VIRTUALISATION_REG_SIZE_PER_OS); + PVR_DPF((PVR_DBG_MESSAGE, " Testing OS: %u, Kick Reg: %X", + ui32OSid, + ui32ScheduleRegister)); + OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32ScheduleRegister, ui32KickType); + OSMemoryBarrier(); + + /* Wait test enable bit to be unset */ + if (PVRSRVPollForValueKM(psDeviceNode, + (IMG_UINT32 *)&psFwSysInit->ui32OSKickTest, + 0, + RGXFWIF_KICK_TEST_ENABLED_BIT, + POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Testing OS %u kick register failed: firmware did not clear test location (contents: 0x%X)", + ui32OSid, + psFwSysInit->ui32OSKickTest)); + + return PVRSRV_ERROR_TIMEOUT; + } + + /* Check that the value is what we expect */ + if (psFwSysInit->ui32OSKickTest != 0) + { + PVR_DPF((PVR_DBG_ERROR, "Testing OS %u kick register failed: firmware wrote 0x%X to test location", + ui32OSid, + psFwSysInit->ui32OSKickTest)); + return PVRSRV_ERROR_INIT_FAILURE; + } + + PVR_DPF((PVR_DBG_MESSAGE, " PASS")); + } + + PVR_LOG(("MTS passed sideband tests")); + return PVRSRV_OK; +} +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) */ + +#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) +#define SCRATCH_VALUE (0x12345678U) + +static void RGXRiscvDebugModuleTest(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + void *pvAppHintState = NULL; + IMG_UINT32 ui32AppHintDefault = 0; + IMG_BOOL bRunRiscvDmiTest; + + IMG_UINT32 *pui32FWCode = NULL; + PVRSRV_ERROR eError; + + OSCreateKMAppHintState(&pvAppHintState); + OSGetKMAppHintBOOL(pvAppHintState, RiscvDmiTest, + &ui32AppHintDefault, &bRunRiscvDmiTest); + OSFreeKMAppHintState(pvAppHintState); + + if (bRunRiscvDmiTest == IMG_FALSE) + { + return; + } + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pui32FWCode); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error acquiring FW code memory pointer (%s)", + __func__, + PVRSRVGetErrorString(eError))); + } + + PDumpIfKM("ENABLE_RISCV_DMI_TEST", PDUMP_FLAGS_CONTINUOUS); + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "DMI_TEST BEGIN"); + + RGXRiscvHalt(psDevInfo); + + /* + * Test RISC-V register reads/writes. + * RGXRiscv[Write/Poll]Reg are used to access internal RISC-V registers + * via debug module. + */ + + /* Write RISC-V mscratch register */ + RGXRiscvWriteReg(psDevInfo, RGXRISCVFW_MSCRATCH_ADDR, SCRATCH_VALUE); + /* Read RISC-V misa register (compare against default standard value) */ + RGXRiscvPollReg(psDevInfo, RGXRISCVFW_MISA_ADDR, RGXRISCVFW_MISA_VALUE); + /* Read RISC-V mscratch register (compare against previously written value) */ + RGXRiscvPollReg(psDevInfo, RGXRISCVFW_MSCRATCH_ADDR, SCRATCH_VALUE); + + /* + * Test RISC-V memory reads/writes. + * RGXRiscv[Write/Poll]Mem are used to access system memory via debug module + * (from RISC-V point of view). + */ + + if (pui32FWCode != NULL) + { + IMG_UINT32 ui32Tmp; + + /* Acquire pointer to FW code (bootloader) */ + pui32FWCode += RGXGetFWImageSectionOffset(NULL, RISCV_UNCACHED_CODE) / sizeof(IMG_UINT32); + /* Save FW code at address (bootloader) */ + ui32Tmp = *pui32FWCode; + + /* Write FW code at address (bootloader) */ + RGXRiscvWriteMem(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE, SCRATCH_VALUE); + /* Read FW code at address (bootloader + 4) (compare against value read from Host) */ + RGXRiscvPollMem(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE + 4, *(pui32FWCode + 1)); + /* Read FW code at address (bootloader) (compare against previously written value) */ + RGXRiscvPollMem(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE, SCRATCH_VALUE); + /* Restore FW code at address (bootloader) */ + RGXRiscvWriteMem(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE, ui32Tmp); + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); + } + + /* + * Test GPU register reads/writes. + * RGXRiscv[Write/Poll]Mem are used to access GPU registers via debug module + * (from RISC-V point of view). + * Note that system memory and GPU register accesses both use the same + * debug module interface, targeting different address ranges. + */ + + /* Write SCRATCH0 from the Host */ + PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_SCRATCH0, SCRATCH_VALUE, PDUMP_FLAGS_CONTINUOUS); + /* Read SCRATCH0 */ + RGXRiscvPollMem(psDevInfo, RGXRISCVFW_SOCIF_BASE | RGX_CR_SCRATCH0, SCRATCH_VALUE); + /* Write SCRATCH0 */ + RGXRiscvWriteMem(psDevInfo, RGXRISCVFW_SOCIF_BASE | RGX_CR_SCRATCH0, ~SCRATCH_VALUE); + /* Read SCRATCH0 from the Host */ + PDUMPREGPOL(RGX_PDUMPREG_NAME, RGX_CR_SCRATCH0, ~SCRATCH_VALUE, 0xFFFFFFFFU, + PDUMP_FLAGS_CONTINUOUS, PDUMP_POLL_OPERATOR_EQUAL); + + RGXRiscvResume(psDevInfo); + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "DMI_TEST END"); + PDumpFiKM("ENABLE_RISCV_DMI_TEST", PDUMP_FLAGS_CONTINUOUS); +} +#endif + +/* + RGXPostPowerState +*/ +PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced) +{ + if ((eNewPowerState != eCurrentPowerState) && + (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)) + { + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) + IMG_UINT32 ui32ConfigFlags; +#endif + + if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) + { + /* Update timer correlation related data */ + RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_POWER); + + /* Update GPU state counters */ + _RGXUpdateGPUUtilStats(psDevInfo); + + eError = RGXDoStart(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: RGXDoStart failed")); + return eError; + } + + OSMemoryBarrier(); + + /* + * Check whether the FW has started by polling on bFirmwareStarted flag + */ + if (PVRSRVPollForValueKM(psDeviceNode, + (IMG_UINT32 __iomem *)&psDevInfo->psRGXFWIfSysInit->bFirmwareStarted, + IMG_TRUE, + 0xFFFFFFFF, + POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Polling for 'FW started' flag failed.")); + eError = PVRSRV_ERROR_TIMEOUT; + +#if defined(TRACK_FW_BOOT) + RGXCheckFWBootStage(psDevInfo); +#endif + + /* + * When bFirmwareStarted fails some info may be gained by doing the following + * debug dump but unfortunately it could be potentially dangerous if the reason + * for not booting is the GPU power is not ON. However, if we have reached this + * point the System Layer has returned without errors, we assume the GPU power + * is indeed ON. + */ + RGXDumpRGXDebugSummary(NULL, NULL, psDeviceNode->pvDevice, IMG_TRUE); + RGXDumpRGXRegisters(NULL, NULL, psDeviceNode->pvDevice); + + return eError; + } + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Wait for the Firmware to start."); + eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfSysInitMemDesc, + offsetof(RGXFWIF_SYSINIT, bFirmwareStarted), + IMG_TRUE, + 0xFFFFFFFFU, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "RGXPostPowerState: problem pdumping POL for psRGXFWIfSysInitMemDesc (%d)", + eError)); + return eError; + } + +#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) + { + PVRSRV_ERROR eError; + RGXFWIF_SYSDATA *psFwSysData; + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfFwSysDataMemDesc, (void **)&psFwSysData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to acquire OS Config (%u)", + __func__, + eError)); + return eError; + } + + ui32ConfigFlags = psFwSysData->ui32ConfigFlags; + + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfFwSysDataMemDesc); + } + + /* Check if the Validation IRQ flag is set */ + if ((ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_IRQ) != 0) + { + eError = PVRSRVValidateIrqs(psDeviceNode); + if (eError != PVRSRV_OK) + { + return eError; + } + } +#endif /* defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) */ + +#endif /* defined(PDUMP) */ + +#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) + eError = RGXVirtualisationPowerupSidebandTest(psDeviceNode, psDevInfo->psRGXFWIfSysInit, psDevInfo); + if (eError != PVRSRV_OK) + { + return eError; + } +#endif + +#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) + RGXRiscvDebugModuleTest(psDevInfo); +#endif + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + SetFirmwareStartTime(psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp); +#endif + + HTBSyncPartitionMarker(psDevInfo->psRGXFWIfSysInit->ui32MarkerVal); + + psDevInfo->bRGXPowered = IMG_TRUE; + +#if defined(SUPPORT_LINUX_DVFS) + eError = ResumeDVFS(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Failed to resume DVFS")); + return eError; + } +#endif + } + } + + PDUMPCOMMENT("RGXPostPowerState: Current state: %d, New state: %d", eCurrentPowerState, eNewPowerState); + + return PVRSRV_OK; +} + +/* + RGXPreClockSpeedChange +*/ +PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eCurrentPowerState) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + PVR_UNREFERENCED_PARAMETER(psRGXData); + + PVR_DPF((PVR_DBG_MESSAGE, "RGXPreClockSpeedChange: RGX clock speed was %uHz", + psRGXData->psRGXTimingInfo->ui32CoreClockSpeed)); + + if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) && + (psFwSysData->ePowState != RGXFWIF_POW_OFF)) + { + /* Update GPU frequency and timer correlation related data */ + RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_DVFS); + } + + return eError; +} + +/* + RGXPostClockSpeedChange +*/ +PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eCurrentPowerState) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + IMG_UINT32 ui32NewClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + /* Update runtime configuration with the new value */ + psDevInfo->psRGXFWIfRuntimeCfg->ui32CoreClockSpeed = ui32NewClockSpeed; + OSWriteMemoryBarrier(); + + if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) && + (psFwSysData->ePowState != RGXFWIF_POW_OFF)) + { + RGXFWIF_KCCB_CMD sCOREClkSpeedChangeCmd; + IMG_UINT32 ui32CmdKCCBSlot; + + RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_DVFS); + + sCOREClkSpeedChangeCmd.eCmdType = RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE; + sCOREClkSpeedChangeCmd.uCmdData.sCoreClkSpeedChangeData.ui32NewClockSpeed = ui32NewClockSpeed; + + /* Ensure the new clock speed is written to memory before requesting the FW to read it */ + OSMemoryBarrier(); + + PDUMPCOMMENT("Scheduling CORE clock speed change command"); + + PDUMPPOWCMDSTART(); + eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, + &sCOREClkSpeedChangeCmd, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + PDUMPPOWCMDEND(); + + if (eError != PVRSRV_OK) + { + PDUMPCOMMENT("Scheduling CORE clock speed change command failed"); + PVR_DPF((PVR_DBG_ERROR, "RGXPostClockSpeedChange: Scheduling KCCB command failed. Error:%u", eError)); + return eError; + } + + PVR_DPF((PVR_DBG_MESSAGE, "RGXPostClockSpeedChange: RGX clock speed changed to %uHz", + psRGXData->psRGXTimingInfo->ui32CoreClockSpeed)); + } + + return eError; +} + +/*************************************************************************/ /*! +@Function RGXPowUnitsStateMaskChange +@Description Changes power state of power units/islands +@Input hDevHandle RGX Device Node. +@Input ui32PowUnitsStateMask Mask containing power state of PUs. + Each bit corresponds to an PU. + Bit position corresponds to PU number i.e. Bit0 is PU0, Bit1 is PU1 etc. + '1' indicates ON and '0' indicates OFF. + Value must be non-zero. +@Return PVRSRV_ERROR. +*/ /**************************************************************************/ +PVRSRV_ERROR RGXPowUnitsStateMaskChange(IMG_HANDLE hDevHandle, IMG_UINT32 ui32PowUnitsStateMask) +{ + + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + RGXFWIF_KCCB_CMD sPowUnitsStateMaskChange; + IMG_UINT32 ui32PowUnitsMask = psDevInfo->ui32AvailablePowUnitsMask; + IMG_UINT32 ui32CmdKCCBSlot; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + /** + * Validate the input. At-least one PU must be powered on and all requested + * PU's must be a subset of full PU mask. + */ + if ((ui32PowUnitsStateMask == 0) || (ui32PowUnitsStateMask & ~ui32PowUnitsMask)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Invalid Power Units mask requested (0x%X). Value should be non-zero and sub-set of 0x%X mask", + __func__, + ui32PowUnitsStateMask, + ui32PowUnitsMask)); + return PVRSRV_ERROR_INVALID_SPU_MASK; + } + + psRuntimeCfg->ui32PowUnitsStateMask = ui32PowUnitsStateMask; + OSWriteMemoryBarrier(); + +#if !defined(NO_HARDWARE) + { + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + if (psFwSysData->ePowState == RGXFWIF_POW_OFF) + { + return PVRSRV_OK; + } + + if (psFwSysData->ePowState != RGXFWIF_POW_FORCED_IDLE) + { + eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED; + PVR_DPF((PVR_DBG_ERROR, + "%s: Powered units state can not be changed, when not IDLE", + __func__)); + return eError; + } + } +#endif + + eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", + __func__)); + return eError; + } + + sPowUnitsStateMaskChange.eCmdType = RGXFWIF_KCCB_CMD_POW; + sPowUnitsStateMaskChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_NUM_UNITS_CHANGE; + sPowUnitsStateMaskChange.uCmdData.sPowData.uPowerReqData.ui32PowUnitsStateMask = ui32PowUnitsStateMask; + + PDUMPCOMMENT("Scheduling command to change power units state to 0x%X", ui32PowUnitsStateMask); + eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, + &sPowUnitsStateMaskChange, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + + if (eError != PVRSRV_OK) + { + PDUMPCOMMENT("Scheduling command to change power units state. Error:%u", eError); + PVR_DPF((PVR_DBG_ERROR, + "%s: Scheduling KCCB to change power units state. Error:%u", + __func__, eError)); + return eError; + } + + /* Wait for the firmware to answer. */ + eError = RGXPollForGPCommandCompletion(psDeviceNode, + psDevInfo->psPowSyncPrim->pui32LinAddr, + 0x1, 0xFFFFFFFF); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Timeout waiting for idle request", __func__)); + return eError; + } + +#if defined(PDUMP) + PDUMPCOMMENT("%s: Poll for Kernel SyncPrim [0x%p] on DM %d", __func__, psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP); + + SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, + 1, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + 0); +#endif + + return PVRSRV_OK; +} + +/* + @Function RGXAPMLatencyChange +*/ +PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, + IMG_UINT32 ui32ActivePMLatencyms, + IMG_BOOL bActivePMLatencyPersistant) +{ + + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; + IMG_UINT32 ui32CmdKCCBSlot; + PVRSRV_DEV_POWER_STATE ePowerState; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + eError = PVRSRVPowerLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Failed to acquire power lock")); + return eError; + } + + /* Update runtime configuration with the new values and ensure the + * new APM latency is written to memory before requesting the FW to + * read it + */ + psRuntimeCfg->ui32ActivePMLatencyms = ui32ActivePMLatencyms; + psRuntimeCfg->bActivePMLatencyPersistant = bActivePMLatencyPersistant; + OSWriteMemoryBarrier(); + + eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); + + if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF)) + { + RGXFWIF_KCCB_CMD sActivePMLatencyChange; + sActivePMLatencyChange.eCmdType = RGXFWIF_KCCB_CMD_POW; + sActivePMLatencyChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_APM_LATENCY_CHANGE; + + PDUMPCOMMENT("Scheduling command to change APM latency to %u", ui32ActivePMLatencyms); + eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, + &sActivePMLatencyChange, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + + if (eError != PVRSRV_OK) + { + PDUMPCOMMENT("Scheduling command to change APM latency failed. Error:%u", eError); + PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Scheduling KCCB to change APM latency failed. Error:%u", eError)); + goto ErrorExit; + } + } + +ErrorExit: + PVRSRVPowerUnlock(psDeviceNode); + + return eError; +} + +/* + RGXActivePowerRequest +*/ +PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + + psDevInfo->ui32ActivePMReqTotal++; + + /* Powerlock to avoid further requests from racing with the FW hand-shake + * from now on (previous kicks to this point are detected by the FW) + * PVRSRVPowerLock is replaced with PVRSRVPowerTryLock to avoid + * potential dead lock between PDumpWriteLock and PowerLock + * during 'DriverLive + PDUMP=1 + EnableAPM=1'. + */ + eError = PVRSRVPowerTryLock(psDeviceNode); + if (eError != PVRSRV_OK) + { + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_LOG_ERROR(eError, "PVRSRVPowerTryLock"); + } + else + { + psDevInfo->ui32ActivePMReqRetry++; + } + goto _RGXActivePowerRequest_PowerLock_failed; + } + + /* Check again for IDLE once we have the power lock */ + if (psFwSysData->ePowState == RGXFWIF_POW_IDLE) + { +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + SetFirmwareHandshakeIdleTime(RGXReadHWTimerReg(psDevInfo)-psFwSysData->ui64StartIdleTime); +#endif + + PDUMPPOWCMDSTART(); + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, + PVRSRV_DEV_POWER_STATE_OFF, + IMG_FALSE, /* forced */ + IMG_FALSE); + PDUMPPOWCMDEND(); + + if (eError == PVRSRV_OK) + { + psDevInfo->ui32ActivePMReqOk++; + } + else if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED) + { + psDevInfo->ui32ActivePMReqDenied++; + } + } + else + { + psDevInfo->ui32ActivePMReqNonIdle++; + } + + PVRSRVPowerUnlock(psDeviceNode); + +_RGXActivePowerRequest_PowerLock_failed: + + return eError; +} +/* + RGXForcedIdleRequest +*/ + +#define RGX_FORCED_IDLE_RETRY_COUNT 10 + +PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_KCCB_CMD sPowCmd; + PVRSRV_ERROR eError; + IMG_UINT32 ui32RetryCount = 0; + IMG_UINT32 ui32CmdKCCBSlot; +#if !defined(NO_HARDWARE) + RGXFWIF_SYSDATA *psFwSysData; +#endif + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + +#if !defined(NO_HARDWARE) + psFwSysData = psDevInfo->psRGXFWIfFwSysData; + + /* Firmware already forced idle */ + if (psFwSysData->ePowState == RGXFWIF_POW_FORCED_IDLE) + { + return PVRSRV_OK; + } + + /* Firmware is not powered. Sometimes this is permitted, for instance we were forcing idle to power down. */ + if (psFwSysData->ePowState == RGXFWIF_POW_OFF) + { + return (bDeviceOffPermitted) ? PVRSRV_OK : PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED; + } +#endif + + eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", + __func__)); + return eError; + } + sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; + sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; + sPowCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_FORCE_IDLE; + + PDUMPCOMMENT("RGXForcedIdleRequest: Sending forced idle command"); + + /* Send one forced IDLE command to GP */ + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sPowCmd, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send idle request", __func__)); + return eError; + } + + /* Wait for GPU to finish current workload */ + do { + eError = RGXPollForGPCommandCompletion(psDeviceNode, + psDevInfo->psPowSyncPrim->pui32LinAddr, + 0x1, 0xFFFFFFFF); + if ((eError == PVRSRV_OK) || (ui32RetryCount == RGX_FORCED_IDLE_RETRY_COUNT)) + { + break; + } + ui32RetryCount++; + PVR_DPF((PVR_DBG_WARNING, + "%s: Request timeout. Retry %d of %d", + __func__, ui32RetryCount, RGX_FORCED_IDLE_RETRY_COUNT)); + } while (IMG_TRUE); + + if (eError != PVRSRV_OK) + { + RGXFWNotifyHostTimeout(psDevInfo); + PVR_DPF((PVR_DBG_ERROR, + "%s: Idle request failed. Firmware potentially left in forced idle state", + __func__)); + return eError; + } + +#if defined(PDUMP) + PDUMPCOMMENT("RGXForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP); + + SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, + 1, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + 0); +#endif + +#if !defined(NO_HARDWARE) + /* Check the firmware state for idleness */ + if (psFwSysData->ePowState != RGXFWIF_POW_FORCED_IDLE) + { + return PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED; + } +#endif + + return PVRSRV_OK; +} + +/* + RGXCancelForcedIdleRequest +*/ +PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_KCCB_CMD sPowCmd; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32CmdKCCBSlot; + PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); + + eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", + __func__)); + goto ErrorExit; + } + + /* Send the IDLE request to the FW */ + sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; + sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; + sPowCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_CANCEL_FORCED_IDLE; + + PDUMPCOMMENT("RGXForcedIdleRequest: Sending cancel forced idle command"); + + /* Send cancel forced IDLE command to GP */ + eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, + &sPowCmd, + PDUMP_FLAGS_NONE, + &ui32CmdKCCBSlot); + + if (eError != PVRSRV_OK) + { + PDUMPCOMMENT("RGXCancelForcedIdleRequest: Failed to send cancel IDLE request for DM%d", RGXFWIF_DM_GP); + goto ErrorExit; + } + + /* Wait for the firmware to answer. */ + eError = RGXPollForGPCommandCompletion(psDeviceNode, + psDevInfo->psPowSyncPrim->pui32LinAddr, + 1, 0xFFFFFFFF); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Timeout waiting for cancel idle request", __func__)); + goto ErrorExit; + } + +#if defined(PDUMP) + PDUMPCOMMENT("RGXCancelForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP); + + SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, + 1, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + 0); +#endif + + return eError; + +ErrorExit: + PVR_DPF((PVR_DBG_ERROR, "%s: Firmware potentially left in forced idle state", __func__)); + return eError; +} + +#if defined(SUPPORT_VALIDATION) +#define RGX_POWER_DOMAIN_STATE_INVALID (0xFFFFFFFF) + +PVRSRV_ERROR RGXPowerDomainInitState(RGX_POWER_DOMAIN_STATE *psState, + IMG_UINT32 ui32MaxPowUnitsCount) +{ + /* + * Total power domain states = 2^(Max power unit count) + */ + IMG_UINT32 ui32TotalStates = 1 << ui32MaxPowUnitsCount; + IMG_UINT32 i; + + /** + * Allocate memory for storing last transition for each power domain + * state. + */ + psState->paui32LastTransition = OSAllocMem(ui32TotalStates * + sizeof(*psState->paui32LastTransition)); + + if (!psState->paui32LastTransition) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to allocate memory ", __func__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /** + * Initialize last transition of each state to invalid + */ + for (i=0; ipaui32LastTransition[i] = RGX_POWER_DOMAIN_STATE_INVALID; + } + + psState->ui32PowUnitsCount = ui32MaxPowUnitsCount; + psState->ui32CurrentState = RGX_POWER_DOMAIN_STATE_INVALID; + + return PVRSRV_OK; +} + +void RGXPowerDomainDeInitState(RGX_POWER_DOMAIN_STATE *psState) +{ + psState->ui32PowUnitsCount = 0; + + if (psState->paui32LastTransition) + { + OSFreeMem(psState->paui32LastTransition); + } +} + +IMG_UINT32 RGXPowerDomainGetNextState(RGX_POWER_DOMAIN_STATE *psState) +{ + IMG_UINT32 ui32NextState, ui32CurrentState = psState->ui32CurrentState; + IMG_UINT32 ui32TotalStates = 1 << psState->ui32PowUnitsCount; + + if (ui32CurrentState == RGX_POWER_DOMAIN_STATE_INVALID) + { + /** + * Start with all units powered off. + */ + ui32NextState = 0; + } + else if (psState->paui32LastTransition[ui32CurrentState] == RGX_POWER_DOMAIN_STATE_INVALID) + { + ui32NextState = ui32CurrentState; + psState->paui32LastTransition[ui32CurrentState] = ui32CurrentState; + } + else + { + ui32NextState = (psState->paui32LastTransition[ui32CurrentState] + 1) % ui32TotalStates; + psState->paui32LastTransition[ui32CurrentState] = ui32NextState; + } + + psState->ui32CurrentState = ui32NextState; + return ui32NextState; +} +#endif +/****************************************************************************** + End of file (rgxpower.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxpower.h b/drivers/gpu/drm/phytium/octopus/rgxpower.h new file mode 100644 index 000000000000..ea48ae956ecc --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxpower.h @@ -0,0 +1,272 @@ +/*************************************************************************/ /*! +@File +@Title RGX power header file +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the RGX power +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXPOWER_H) +#define RGXPOWER_H + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "servicesext.h" +#include "rgxdevice.h" + + +/*! +****************************************************************************** + + @Function RGXPrePowerState + + @Description + + does necessary preparation before power state transition + + @Input hDevHandle : RGX Device Node + @Input eNewPowerState : New power state + @Input eCurrentPowerState : Current power state + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced); + +/*! +****************************************************************************** + + @Function RGXPostPowerState + + @Description + + does necessary preparation after power state transition + + @Input hDevHandle : RGX Device Node + @Input eNewPowerState : New power state + @Input eCurrentPowerState : Current power state + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced); + +/*! +****************************************************************************** + + @Function RGXVzPrePowerState + + @Description + + does necessary preparation before power state transition on a vz driver + + @Input hDevHandle : RGX Device Node + @Input eNewPowerState : New power state + @Input eCurrentPowerState : Current power state + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXVzPrePowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced); + +/*! +****************************************************************************** + + @Function RGXVzPostPowerState + + @Description + + does necessary preparation after power state transition on a vz driver + + @Input hDevHandle : RGX Device Node + @Input eNewPowerState : New power state + @Input eCurrentPowerState : Current power state + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced); + +/*! +****************************************************************************** + + @Function RGXPreClockSpeedChange + + @Description + + Does processing required before an RGX clock speed change. + + @Input hDevHandle : RGX Device Node + @Input eCurrentPowerState : Power state of the device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + +/*! +****************************************************************************** + + @Function RGXPostClockSpeedChange + + @Description + + Does processing required after an RGX clock speed change. + + @Input hDevHandle : RGX Device Node + @Input eCurrentPowerState : Power state of the device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + + +/*! +****************************************************************************** + + @Function RGXPowUnitsStateMaskChange + + @Description Changes power state of SPUs + + @Input hDevHandle RGX Device Node. + @Input ui32PowUnitsStateMask Mask containing power state of SPUs. + Each bit corresponds to an SPU. + Bit position corresponds to SPU number + i.e. Bit0 is SPU0, Bit1 is SPU1 etc. + '1' indicates ON and '0' indicates OFF. + Value must be non-zero. + @Return PVRSRV_ERROR. + +******************************************************************************/ +PVRSRV_ERROR RGXPowUnitsStateMaskChange(IMG_HANDLE hDevHandle, + IMG_UINT32 ui32PowUnitsStateMask); + +/*! +****************************************************************************** + + @Function RGXAPMLatencyChange + + @Description + + Changes the wait duration used before firmware indicates IDLE. + Reducing this value will cause the firmware to shut off faster and + more often but may increase bubbles in GPU scheduling due to the added + power management activity. If bPersistent is NOT set, APM latency will + return back to system default on power up. + + @Input hDevHandle : RGX Device Node + @Input ui32ActivePMLatencyms : Number of milliseconds to wait + @Input bActivePMLatencyPersistant : Set to ensure new value is not reset + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, + IMG_UINT32 ui32ActivePMLatencyms, + IMG_BOOL bActivePMLatencyPersistant); + +/*! +****************************************************************************** + + @Function RGXActivePowerRequest + + @Description Initiate a handshake with the FW to power off the GPU + + @Input hDevHandle : RGX Device Node + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle); + +/*! +****************************************************************************** + + @Function RGXForcedIdleRequest + + @Description Initiate a handshake with the FW to idle the GPU + + @Input hDevHandle : RGX Device Node + + @Input bDeviceOffPermitted : Set to indicate device state being off is not + erroneous. + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted); + +/*! +****************************************************************************** + + @Function RGXCancelForcedIdleRequest + + @Description Send a request to cancel idle to the firmware. + + @Input hDevHandle : RGX Device Node + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle); + + +#if defined(SUPPORT_VALIDATION) +PVRSRV_ERROR RGXPowerDomainInitState(RGX_POWER_DOMAIN_STATE *psState, + IMG_UINT32 ui32MaxPowUnitsCount); + +void RGXPowerDomainDeInitState(RGX_POWER_DOMAIN_STATE *psState); + +IMG_UINT32 RGXPowerDomainGetNextState(RGX_POWER_DOMAIN_STATE *psState); +#endif +#endif /* RGXPOWER_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxray.c b/drivers/gpu/drm/phytium/octopus/rgxray.c new file mode 100644 index 000000000000..d9d0cee93266 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxray.c @@ -0,0 +1,732 @@ +/*************************************************************************/ /*! +@File +@Title RGX Ray routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX Ray routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "srvkm.h" +#include "pdump_km.h" +#include "pvr_debug.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgxray.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "osfunc.h" +#include "rgxccb.h" +#include "rgxhwperf.h" +#include "ospvr_gputrace.h" +#include "htbuffer.h" + +#include "sync_server.h" +#include "sync_internal.h" +#include "sync.h" +#include "rgx_memallocflags.h" + +#include "sync_checkpoint.h" +#include "sync_checkpoint_internal.h" + +#include "rgxtimerquery.h" + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "rgxworkest.h" +#endif + +/* Enable this to dump the compiled list of UFOs prior to kick call */ +#define ENABLE_CMP_UFO_DUMP 0 + +//#define CMP_CHECKPOINT_DEBUG 1 +//#define CMP_CHECKPOINT_DEBUG 1 + +#if defined(CMP_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +#else +#define CHKPT_DBG(X) +#endif + +struct _RGX_SERVER_RAY_CONTEXT_ { + PVRSRV_DEVICE_NODE *psDeviceNode; + DEVMEM_MEMDESC *psFWRayContextMemDesc; + DEVMEM_MEMDESC *psContextStateMemDesc; + POS_LOCK hLock; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WORKEST_HOST_DATA sWorkEstData; +#endif + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + SYNC_ADDR_LIST sSyncAddrListFence; + SYNC_ADDR_LIST sSyncAddrListUpdate; +}; + +PVRSRV_ERROR PVRSRVRGXCreateRayContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32ContextFlags, + IMG_UINT32 ui32StaticRayContextStateSize, + IMG_PBYTE pStaticRayContextState, + RGX_SERVER_RAY_CONTEXT **ppsRayContext) +{ + DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_SERVER_RAY_CONTEXT *psRayContext; + RGXFWIF_FWRAYCONTEXT *psFWRayContext; + RGX_COMMON_CONTEXT_INFO sInfo = {NULL}; + PVRSRV_ERROR eError; + + *ppsRayContext = NULL; + + psRayContext = OSAllocZMem(sizeof(*psRayContext)); + if (psRayContext == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psRayContext->psDeviceNode = psDeviceNode; + /* + Create the FW ray context, this has the RDM common + context embedded within it + */ + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_FWRAYCONTEXT), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwRayContext", + &psRayContext->psFWRayContextMemDesc); + if (eError != PVRSRV_OK) + { + goto fail_fwraycontext; + } + + eError = OSLockCreate(&psRayContext->hLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to create lock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_createlock; + } + + PDUMPCOMMENT("Allocate RGX firmware ray context suspend state"); + + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_COMPUTECTX_STATE), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwRayContextState", + &psRayContext->psContextStateMemDesc); + + eError = FWCommonContextAllocate(psConnection, + psDeviceNode, + REQ_TYPE_RAY, + RGXFWIF_DM_RAY, + hMemCtxPrivData, + psRayContext->psFWRayContextMemDesc, + offsetof(RGXFWIF_FWRAYCONTEXT, sRDMContext), + psFWMemContextMemDesc, + psRayContext->psContextStateMemDesc, + RGX_CDM_CCB_SIZE_LOG2, + RGX_CDM_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + ui32Priority, + UINT_MAX, + 0, + &sInfo, + &psRayContext->psServerCommonContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to init Ray fw common context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_raycommoncontext; + } + + eError = DevmemAcquireCpuVirtAddr(psRayContext->psFWRayContextMemDesc, + (void **)&psFWRayContext); + if (eError != PVRSRV_OK) + { + goto fail_acquire_cpu_mapping; + } + + OSDeviceMemCopy(&psFWRayContext->sStaticRayContextState, pStaticRayContextState, ui32StaticRayContextStateSize); + DevmemPDumpLoadMem(psRayContext->psFWRayContextMemDesc, 0, sizeof(RGXFWIF_FWCOMPUTECONTEXT), PDUMP_FLAGS_CONTINUOUS); + DevmemReleaseCpuVirtAddr(psRayContext->psFWRayContextMemDesc); + + SyncAddrListInit(&psRayContext->sSyncAddrListFence); + SyncAddrListInit(&psRayContext->sSyncAddrListUpdate); + + *ppsRayContext = psRayContext; + + return PVRSRV_OK; +fail_acquire_cpu_mapping: +fail_raycommoncontext: + OSLockDestroy(psRayContext->hLock); +fail_createlock: + DevmemFwUnmapAndFree(psDevInfo, psRayContext->psFWRayContextMemDesc); +fail_fwraycontext: + OSFreeMem(psRayContext); + + return eError; +} + +PVRSRV_ERROR PVRSRVRGXDestroyRayContextKM(RGX_SERVER_RAY_CONTEXT *psRayContext) +{ + + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO *psDevInfo = psRayContext->psDeviceNode->pvDevice; + + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp(psRayContext->psDeviceNode, + psRayContext->psServerCommonContext, + RGXFWIF_DM_RAY, + PDUMP_FLAGS_NONE); + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free its resources */ + FWCommonContextFree(psRayContext->psServerCommonContext); + //DevmemFwUnmapAndFree(psDevInfo, psRayContext->psContextStateMemDesc); + psRayContext->psServerCommonContext = NULL; + + + DevmemFwUnmapAndFree(psDevInfo, psRayContext->psFWRayContextMemDesc); + + OSLockDestroy(psRayContext->hLock); + OSFreeMem(psRayContext); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock, + IMG_UINT32 *paui32ClientUpdateSyncOffset, + IMG_UINT32 *paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE *piUpdateFence, + IMG_CHAR pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 ui32ExtJobRef) +{ + + RGXFWIF_KCCB_CMD sRayKCCBCmd; + RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1]; + PVRSRV_ERROR eError, eError2; + IMG_UINT32 ui32FWCtx; + + PRGXFWIF_TIMESTAMP_ADDR pPreAddr; + PRGXFWIF_TIMESTAMP_ADDR pPostAddr; + PRGXFWIF_UFO_ADDR pRMWUFOAddr; + PVRSRV_RGXDEV_INFO *psDevInfo; + RGX_CLIENT_CCB *psClientCCB; + IMG_UINT32 ui32IntJobRef; + + IMG_BOOL bCCBStateOpen = IMG_FALSE; + + IMG_UINT32 ui32IntClientFenceCount = 0; + PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL; + IMG_UINT32 ui32IntClientUpdateCount = 0; + PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL; + IMG_UINT32 *paui32IntUpdateValue = NULL; + PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE; + IMG_UINT64 uiCheckFenceUID = 0; + IMG_UINT64 uiUpdateFenceUID = 0; + PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32FenceSyncCheckpointCount = 0; + IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; + PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL; + IMG_UINT32 ui32FenceTimelineUpdateValue = 0; + void *pvUpdateFenceFinaliseData = NULL; + + psDevInfo = FWCommonContextGetRGXDevInfo(psRayContext->psServerCommonContext); + psClientCCB = FWCommonContextGetClientCCB(psRayContext->psServerCommonContext); + ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + + if (iUpdateTimeline >= 0 && !piUpdateFence) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Ensure we haven't been given a null ptr to + * update values if we have been told we + * have updates + */ + if (ui32ClientUpdateCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL, + "paui32ClientUpdateValue NULL but " + "ui32ClientUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Ensure the string is null-terminated (Required for safety) */ + pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; + + OSLockAcquire(psRayContext->hLock); + + eError = SyncAddrListPopulate(&psRayContext->sSyncAddrListFence, + 0, + NULL, + NULL); + if (eError != PVRSRV_OK) + { + goto err_populate_sync_addr_list; + } + + ui32IntClientUpdateCount = ui32ClientUpdateCount; + + eError = SyncAddrListPopulate(&psRayContext->sSyncAddrListUpdate, + ui32ClientUpdateCount, + pauiClientUpdateUFODevVarBlock, + paui32ClientUpdateSyncOffset); + if (eError != PVRSRV_OK) + { + goto err_populate_sync_addr_list; + } + if (ui32IntClientUpdateCount && !pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psRayContext->sSyncAddrListUpdate.pasFWAddrs; + } + paui32IntUpdateValue = paui32ClientUpdateValue; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psRayContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psRayContext->psDeviceNode->hSyncCheckpointContext)); + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence(psRayContext->psDeviceNode->hSyncCheckpointContext, + iCheckFence, + &ui32FenceSyncCheckpointCount, + &apsFenceSyncCheckpoints, + &uiCheckFenceUID, ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError)); + goto fail_resolve_input_fence; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints)); +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32FenceSyncCheckpointCount > 0) + { + IMG_UINT32 ii; + for (ii=0; ii", __func__, ii, (void*)psNextCheckpoint)); + } + } +#endif + /* Create the output fence (if required) */ + if (iUpdateTimeline != PVRSRV_NO_TIMELINE) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d, psRayContext->psDeviceNode->hSyncCheckpointContext=<%p>)...", __func__, iUpdateFence, iUpdateTimeline, (void*)psRayContext->psDeviceNode->hSyncCheckpointContext)); + eError = SyncCheckpointCreateFence(psRayContext->psDeviceNode, + pcszUpdateFenceName, + iUpdateTimeline, + psRayContext->psDeviceNode->hSyncCheckpointContext, + &iUpdateFence, + &uiUpdateFenceUID, + &pvUpdateFenceFinaliseData, + &psUpdateSyncCheckpoint, + (void*)&psFenceTimelineUpdateSync, + &ui32FenceTimelineUpdateValue, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", __func__, eError)); + goto fail_create_output_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __func__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue)); + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u, psFenceTimelineUpdateSync=<%p>", __func__, ui32IntClientUpdateCount, (void*)psFenceTimelineUpdateSync)); + /* Append the sync prim update for the timeline (if required) */ + if (psFenceTimelineUpdateSync) + { + IMG_UINT32 *pui32TimelineUpdateWp = NULL; + + /* Allocate memory to hold the list of update values (including our timeline update) */ + pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + if (!pui32IntAllocatedUpdateValues) + { + /* Failed to allocate memory */ + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_update_values_mem; + } + OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + /* Copy the update values into the new memory, then append our timeline update value */ + OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount); +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount)); + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Now set the additional update value */ + pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount; + *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue; + ui32IntClientUpdateCount++; + /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */ + paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: append the timeline sync prim addr <%p> to the compute context update list", __func__, (void*)psFenceTimelineUpdateSync)); + /* Now append the timeline sync prim addr to the compute context update list */ + SyncAddrListAppendSyncPrim(&psRayContext->sSyncAddrListUpdate, + psFenceTimelineUpdateSync); +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount)); + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */ + paui32IntUpdateValue = pui32IntAllocatedUpdateValues; + } + } + + /* Append the checks (from input fence) */ + if (ui32FenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to Ray RDM Fence (&psRayContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psRayContext->sSyncAddrListFence)); +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + SyncAddrListAppendCheckpoints(&psRayContext->sSyncAddrListFence, + ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + if (!pauiIntFenceUFOAddress) + { + pauiIntFenceUFOAddress = psRayContext->sSyncAddrListFence.pasFWAddrs; + } + ui32IntClientFenceCount += ui32FenceSyncCheckpointCount; + } +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)paui32IntUpdateValue; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: Dumping %d update values (paui32IntUpdateValue=<%p>)...", __func__, ui32IntClientUpdateCount, (void*)paui32IntUpdateValue)); + for (iii=0; iii", __func__, iii, (void*)pui32Tmp)); + CHKPT_DBG((PVR_DBG_ERROR, "%s: *paui32IntUpdateValue[%d] = 0x%x", __func__, iii, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + + if (psUpdateSyncCheckpoint) + { + /* Append the update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to Ray RDM Update (&psRayContext->sSyncAddrListUpdate=<%p>, psUpdateSyncCheckpoint=<%p>)...", __func__, (void*)&psRayContext->sSyncAddrListUpdate , (void*)psUpdateSyncCheckpoint)); + SyncAddrListAppendCheckpoints(&psRayContext->sSyncAddrListUpdate, + 1, + &psUpdateSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psRayContext->sSyncAddrListUpdate.pasFWAddrs; + } + ui32IntClientUpdateCount++; +#if defined(CMP_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress=<%p>, pui32Tmp=<%p>, ui32IntClientUpdateCount=%u", __func__, (void*)pauiIntUpdateUFOAddress, (void*)pui32Tmp, ui32IntClientUpdateCount)); + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount)); + +#if (ENABLE_CMP_UFO_DUMP == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: dumping Ray (RDM) fence/updates syncs...", __func__)); + { + IMG_UINT32 ii; + PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress; + PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress; + IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue; + + /* Dump Fence syncs and Update syncs */ + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Ray (RDM) fence syncs (&psRayContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psRayContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress)); + for (ii=0; ii. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr)); + psTmpIntFenceUFOAddress++; + } + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Ray (RDM) update syncs (&psRayContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psRayContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress)); + for (ii=0; iiui32Addr & 0x1) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue)); + pui32TmpIntUpdateValue++; + } + psTmpIntUpdateUFOAddress++; + } + } +#endif + + RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRayContext->psDeviceNode->pvDevice, + &pPreAddr, + &pPostAddr, + &pRMWUFOAddr); + + RGXCmdHelperInitCmdCCB(psClientCCB, + 0, // ui64FBSCEntryMask, + ui32IntClientFenceCount, + pauiIntFenceUFOAddress, + NULL, + ui32IntClientUpdateCount, + pauiIntUpdateUFOAddress, + paui32IntUpdateValue, + ui32CmdSize, + pui8DMCmd, + &pPreAddr, + &pPostAddr, + &pRMWUFOAddr, + RGXFWIF_CCB_CMD_TYPE_RAY, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + NULL, +#else + NULL, +#endif + "Ray", + bCCBStateOpen, + asCmdHelperData); + eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData); + if (eError != PVRSRV_OK) + { + goto fail_cmdaquire; + } + + if (eError == PVRSRV_OK) + { + /* + All the required resources are ready at this point, we can't fail so + take the required server sync operations and commit all the resources + */ + RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "RDM", FWCommonContextGetFWAddress(psRayContext->psServerCommonContext).ui32Addr); + } + + /* Construct the kernel compute CCB command. */ + sRayKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sRayKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psRayContext->psServerCommonContext); + sRayKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sRayKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + sRayKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + sRayKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; + + ui32FWCtx = FWCommonContextGetFWAddress(psRayContext->psServerCommonContext).ui32Addr; + + /* + * Submit the compute command to the firmware. + */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psRayContext->psDeviceNode->pvDevice, + RGXFWIF_DM_RAY, + &sRayKCCBCmd, + ui32ClientCacheOpSeqNum, + ui32PDumpFlags); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError2 != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s failed to schedule kernel CCB command (%s)", + __func__, + PVRSRVGetErrorString(eError2))); + } + else + { + /* + PVRGpuTraceEnqueueEvent(psRayContext->psDeviceNode->pvDevice, + ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_CDM); + */ + } + /* + * Now check eError (which may have returned an error from our earlier call + * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first + * so we check it now... + */ + if (eError != PVRSRV_OK ) + { + goto fail_cmdaquire; + } + +#if defined(NO_HARDWARE) + /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ + if (psUpdateSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint))); + SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); + } + if (psFenceTimelineUpdateSync) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim<%p> to %d", __func__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); + } + SyncCheckpointNoHWUpdateTimelines(NULL); +#endif /* defined(NO_HARDWARE) */ + + *piUpdateFence = iUpdateFence; + + if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psRayContext->psDeviceNode, iUpdateFence, + pvUpdateFenceFinaliseData, + psUpdateSyncCheckpoint, pcszUpdateFenceName); + } + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + /* Free memory allocated to hold the internal list of update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + pui32IntAllocatedUpdateValues = NULL; + } + OSLockRelease(psRayContext->hLock); + + return PVRSRV_OK; + +fail_cmdaquire: + SyncAddrListRollbackCheckpoints(psRayContext->psDeviceNode, &psRayContext->sSyncAddrListFence); + SyncAddrListRollbackCheckpoints(psRayContext->psDeviceNode, &psRayContext->sSyncAddrListUpdate); +fail_alloc_update_values_mem: + if (iUpdateFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); + } +fail_create_output_fence: + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); +fail_resolve_input_fence: + +err_populate_sync_addr_list: + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + /* Free memory allocated to hold the internal list of update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + pui32IntAllocatedUpdateValues = NULL; + } + + OSLockRelease(psRayContext->hLock); + return eError; +} + +/****************************************************************************** + End of file (rgxray.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxray.h b/drivers/gpu/drm/phytium/octopus/rgxray.h new file mode 100644 index 000000000000..36ed79357414 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxray.h @@ -0,0 +1,112 @@ +/*************************************************************************/ /*! +@File +@Title RGX ray functionality +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the RGX compute functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXRAY_H_) +#define RGXRAY_H_ + +#include "devicemem.h" +#include "device.h" +#include "rgxfwutils.h" +#include "rgxdebug.h" +#include "pvr_notifier.h" + +#include "sync_server.h" +#include "sync_internal.h" +#include "connection_server.h" + + +typedef struct _RGX_SERVER_RAY_CONTEXT_ RGX_SERVER_RAY_CONTEXT; + +/*! +******************************************************************************* + @Function PVRSRVRGXCreateRayContextKM + + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXCreateRayContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32ContextFlags, + IMG_UINT32 ui32StaticRayContextStateSize, + IMG_PBYTE pStaticRayContextState, + RGX_SERVER_RAY_CONTEXT **ppsRayContext); + +/*! +******************************************************************************* + @Function PVRSRVRGXDestroyRayContextKM + + @Description + Server-side implementation of RGXDestroyRayContext + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXDestroyRayContextKM(RGX_SERVER_RAY_CONTEXT *psRayContext); + + +/*! +******************************************************************************* + @Function PVRSRVRGXKickRDMKM + + @Description + Server-side implementation of RGXKickRDM + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXKickRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock, + IMG_UINT32 *paui32ClientUpdateSyncOffset, + IMG_UINT32 *paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE *piUpdateFence, + IMG_CHAR pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32CmdSize, + IMG_PBYTE pui8DMCmd, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 ui32ExtJobRef); + + +#endif /* RGXRAY_H_ */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxregconfig.c b/drivers/gpu/drm/phytium/octopus/rgxregconfig.c new file mode 100644 index 000000000000..7b3786d1383f --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxregconfig.c @@ -0,0 +1,315 @@ +/*************************************************************************/ /*! +@File +@Title RGX Register configuration +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX Regconfig routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxregconfig.h" +#include "pvr_debug.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "device.h" +#include "sync_internal.h" +#include "pdump_km.h" +#include "pvrsrv.h" + +PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT8 ui8RegCfgType) +{ +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + RGXFWIF_REG_CFG_TYPE eRegCfgType = (RGXFWIF_REG_CFG_TYPE) ui8RegCfgType; + + PVR_UNREFERENCED_PARAMETER(psDevConnection); + + OSLockAcquire(psRegCfg->hLock); + + if (eRegCfgType < psRegCfg->eRegCfgTypeToPush) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Register configuration requested (%d) is not valid since it has to be at least %d." + " Configurations of different types need to go in order", + __func__, + eRegCfgType, + psRegCfg->eRegCfgTypeToPush)); + OSLockRelease(psRegCfg->hLock); + return PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE; + } + + psRegCfg->eRegCfgTypeToPush = eRegCfgType; + + OSLockRelease(psRegCfg->hLock); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(psDevConnection); + + PVR_DPF((PVR_DBG_ERROR, + "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", + __func__)); + return PVRSRV_ERROR_FEATURE_DISABLED; +#endif +} + +PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32RegAddr, + IMG_UINT64 ui64RegValue, + IMG_UINT64 ui64RegMask) +{ +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sRegCfgCmd; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psRegCfg->hLock); + + if (psRegCfg->bEnabled) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Cannot add record whilst register configuration active.", + __func__)); + OSLockRelease(psRegCfg->hLock); + return PVRSRV_ERROR_REG_CONFIG_ENABLED; + } + if (psRegCfg->ui32NumRegRecords == RGXFWIF_REG_CFG_MAX_SIZE) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Register configuration full.", + __func__)); + OSLockRelease(psRegCfg->hLock); + return PVRSRV_ERROR_REG_CONFIG_FULL; + } + + sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; + sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Addr = (IMG_UINT64) ui32RegAddr; + sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Value = ui64RegValue; + sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Mask = ui64RegMask; + sRegCfgCmd.uCmdData.sRegConfigData.eRegConfigType = psRegCfg->eRegCfgTypeToPush; + sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ADD; + + eError = RGXScheduleCommand(psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + &sRegCfgCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXScheduleCommand failed. Error:%u", + __func__, + eError)); + OSLockRelease(psRegCfg->hLock); + return eError; + } + + psRegCfg->ui32NumRegRecords++; + + OSLockRelease(psRegCfg->hLock); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVR_DPF((PVR_DBG_ERROR, + "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", + __func__)); + return PVRSRV_ERROR_FEATURE_DISABLED; +#endif +} + +PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sRegCfgCmd; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psRegCfg->hLock); + + if (psRegCfg->bEnabled) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Attempt to clear register configuration whilst active.", + __func__)); + OSLockRelease(psRegCfg->hLock); + return PVRSRV_ERROR_REG_CONFIG_ENABLED; + } + + sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; + sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_CLEAR; + + eError = RGXScheduleCommand(psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + &sRegCfgCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXScheduleCommand failed. Error:%u", + __func__, + eError)); + OSLockRelease(psRegCfg->hLock); + return eError; + } + + psRegCfg->ui32NumRegRecords = 0; + psRegCfg->eRegCfgTypeToPush = RGXFWIF_REG_CFG_TYPE_PWR_ON; + + OSLockRelease(psRegCfg->hLock); + + return eError; +#else + PVR_DPF((PVR_DBG_ERROR, + "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", + __func__)); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + return PVRSRV_ERROR_FEATURE_DISABLED; +#endif +} + +PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sRegCfgCmd; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psRegCfg->hLock); + + sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; + sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ENABLE; + + eError = RGXScheduleCommand(psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + &sRegCfgCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXScheduleCommand failed. Error:%u", + __func__, + eError)); + OSLockRelease(psRegCfg->hLock); + return eError; + } + + psRegCfg->bEnabled = IMG_TRUE; + + OSLockRelease(psRegCfg->hLock); + + return eError; +#else + PVR_UNREFERENCED_PARAMETER(psConnection); + + PVR_DPF((PVR_DBG_ERROR, + "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", + __func__)); + return PVRSRV_ERROR_FEATURE_DISABLED; +#endif +} + +PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) + PVRSRV_ERROR eError = PVRSRV_OK; + RGXFWIF_KCCB_CMD sRegCfgCmd; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + OSLockAcquire(psRegCfg->hLock); + + sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; + sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_DISABLE; + + eError = RGXScheduleCommand(psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + &sRegCfgCmd, + 0, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXScheduleCommand failed. Error:%u", + __func__, + eError)); + OSLockRelease(psRegCfg->hLock); + return eError; + } + + psRegCfg->bEnabled = IMG_FALSE; + + OSLockRelease(psRegCfg->hLock); + + return eError; +#else + PVR_DPF((PVR_DBG_ERROR, + "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", + __func__)); + PVR_UNREFERENCED_PARAMETER(psConnection); + + return PVRSRV_ERROR_FEATURE_DISABLED; +#endif +} + +/****************************************************************************** + End of file (rgxregconfig.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxregconfig.h b/drivers/gpu/drm/phytium/octopus/rgxregconfig.h new file mode 100644 index 000000000000..9b17a45c036a --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxregconfig.h @@ -0,0 +1,130 @@ +/*************************************************************************/ /*! +@File +@Title RGX register configuration functionality +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the RGX register configuration functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXREGCONFIG_H) +#define RGXREGCONFIG_H + +#include "pvr_debug.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgx_fwif_km.h" + +/*! +******************************************************************************* + @Function PVRSRVRGXSetRegConfigTypeKM + + @Description + Server-side implementation of RGXSetRegConfig + + @Input psDeviceNode - RGX Device node + @Input ui8RegPowerIsland - Reg configuration + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT8 ui8RegPowerIsland); +/*! +******************************************************************************* + @Function PVRSRVRGXSetRegConfigKM + + @Description + Server-side implementation of RGXSetRegConfig + + @Input psDeviceNode - RGX Device node + @Input ui64RegAddr - Register address + @Input ui64RegValue - Reg value + @Input ui64RegMask - Reg mask + + @Return PVRSRV_ERROR +******************************************************************************/ + +PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui64RegAddr, + IMG_UINT64 ui64RegValue, + IMG_UINT64 ui64RegMask); + +/*! +******************************************************************************* + @Function PVRSRVRGXClearRegConfigKM + + @Description + Server-side implementation of RGXClearRegConfig + + @Input psDeviceNode - RGX Device node + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode); + +/*! +******************************************************************************* + @Function PVRSRVRGXEnableRegConfigKM + + @Description + Server-side implementation of RGXEnableRegConfig + + @Input psDeviceNode - RGX Device node + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode); + +/*! +******************************************************************************* + @Function PVRSRVRGXDisableRegConfigKM + + @Description + Server-side implementation of RGXDisableRegConfig + + @Input psDeviceNode - RGX Device node + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode); + +#endif /* RGXREGCONFIG_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxshader.c b/drivers/gpu/drm/phytium/octopus/rgxshader.c new file mode 100644 index 000000000000..c5d7e463abe7 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxshader.c @@ -0,0 +1,313 @@ +/*************************************************************************/ /*! +@File rgxshader.c +@Title TQ Shader Load +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Shader code and info are shared for all context on the device. + If allocation doesn't already exist, read shader data from file + and allocate PMR memory. PMR memory is not deallocated until + device deinit. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxshader.h" +#include "osfunc_common.h" +#include "rgxdevice.h" +#include "pdump_km.h" +#include "physmem.h" +#include "ri_server.h" +#include "pvr_ricommon.h" + +static void +RGXShaderReadHeader(OS_FW_IMAGE *psShaderFW, RGX_SHADER_HEADER *psHeader) +{ + const void * pvData; + + pvData = OSFirmwareData(psShaderFW); + + OSDeviceMemCopy(psHeader, pvData, sizeof(RGX_SHADER_HEADER)); +} + +static size_t +RGXShaderCLIMemSize(OS_FW_IMAGE *psShaderFW) +{ + RGX_SHADER_HEADER sHeader; + + RGXShaderReadHeader(psShaderFW, &sHeader); + + return sHeader.ui32SizeClientMem; +} + +static size_t +RGXShaderUSCMemSize(OS_FW_IMAGE *psShaderFW) +{ + RGX_SHADER_HEADER sHeader; + + RGXShaderReadHeader(psShaderFW, &sHeader); + + return sHeader.ui32SizeFragment; +} + +static void * +RGXShaderCLIMem(OS_FW_IMAGE *psShaderFW) +{ + return (void*)OSFirmwareData(psShaderFW); +} + +static void * +RGXShaderUSCMem(OS_FW_IMAGE *psShaderFW) +{ + IMG_PBYTE pui8Data; + + pui8Data = (IMG_PBYTE)OSFirmwareData(psShaderFW); + + pui8Data += RGXShaderCLIMemSize(psShaderFW); + + return (void*) pui8Data; +} + +#define RGX_SHADER_FILENAME_MAX_SIZE ((sizeof(RGX_SH_FILENAME)+ \ + RGX_BVNC_STR_SIZE_MAX)) + +static void +_GetShaderFileName(PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_CHAR * pszShaderFilenameStr, + IMG_CHAR * pszShaderpFilenameStr) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + OSSNPrintf(pszShaderFilenameStr, RGX_SHADER_FILENAME_MAX_SIZE, + "%s." RGX_BVNC_STR_FMTSPEC, + RGX_SH_FILENAME, + psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C); + + OSSNPrintf(pszShaderpFilenameStr, RGX_SHADER_FILENAME_MAX_SIZE, + "%s." RGX_BVNC_STRP_FMTSPEC, + RGX_SH_FILENAME, + psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C); +} + +PVRSRV_ERROR +PVRSRVTQLoadShaders(PVRSRV_DEVICE_NODE * psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + OS_FW_IMAGE *psShaderFW; + RGX_SHADER_HEADER sHeader; + IMG_UINT32 ui32MappingTable = 0; + IMG_UINT32 ui32NumPages; + IMG_CHAR aszShaderFilenameStr[RGX_SHADER_FILENAME_MAX_SIZE]; + IMG_CHAR aszShaderpFilenameStr[RGX_SHADER_FILENAME_MAX_SIZE]; + const IMG_CHAR *pszShaderFilenameStr = aszShaderFilenameStr; + size_t uiNumBytes; + PVRSRV_ERROR eError; + + _GetShaderFileName(psDeviceNode, aszShaderFilenameStr, aszShaderpFilenameStr); + + eError = OSLoadFirmware(psDeviceNode, aszShaderFilenameStr, NULL, &psShaderFW); + + if (eError != PVRSRV_OK) + { + eError = OSLoadFirmware(psDeviceNode, aszShaderpFilenameStr, + NULL, &psShaderFW); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load shader binary file %s (%s)", + __func__, + aszShaderpFilenameStr, + PVRSRVGetErrorString(eError))); + eError = PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE; + goto failed_init; + } + + pszShaderFilenameStr = aszShaderpFilenameStr; + } + + PVR_LOG(("Shader binary image '%s' loaded", pszShaderFilenameStr)); + + RGXShaderReadHeader(psShaderFW, &sHeader); + + ui32NumPages = (sHeader.ui32SizeFragment / RGX_BIF_PM_PHYSICAL_PAGE_SIZE) + 1; + + PDUMPCOMMENT("Allocate TDM USC PMR Block (Pages %08X)", ui32NumPages); + + eError = PhysmemNewRamBackedPMR(NULL, + psDeviceNode, + (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, + (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, + 1, + 1, + &ui32MappingTable, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE + | PVRSRV_MEMALLOCFLAG_GPU_READABLE + | PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT + | PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER, + sizeof("tquscpmr"), + "tquscpmr", + PVR_SYS_ALLOC_PID, + (PMR**)&psDevInfo->hTQUSCSharedMem, + PDUMP_NONE); + if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from PhysmemNewRamBackedPMR (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto failed_firmware; + } + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + eError = RIWritePMREntryWithOwnerKM(psDevInfo->hTQUSCSharedMem, PVR_SYS_ALLOC_PID); + if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RIWritePMREntryWithOwnerKM (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto failed_uscpmr; + } +#endif + + eError = PMR_WriteBytes(psDevInfo->hTQUSCSharedMem, 0, RGXShaderUSCMem(psShaderFW), RGXShaderUSCMemSize(psShaderFW), &uiNumBytes); + if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from PMR_WriteBytes (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto failed_uscpmr; + } + + ui32NumPages = (sHeader.ui32SizeClientMem / RGX_BIF_PM_PHYSICAL_PAGE_SIZE) + 1; + + PDUMPCOMMENT("Allocate TDM Client PMR Block (Pages %08X)", ui32NumPages); + + eError = PhysmemNewRamBackedPMR(NULL, + psDeviceNode, + (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, + (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, + 1, + 1, + &ui32MappingTable, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE + | PVRSRV_MEMALLOCFLAG_CPU_READABLE + | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT + | PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER, + sizeof("tqclipmr"), + "tqclipmr", + PVR_SYS_ALLOC_PID, + (PMR**)&psDevInfo->hTQCLISharedMem, + PDUMP_NONE); + if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from PhysmemNewRamBackedPMR (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto failed_uscpmr; + } + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + eError = RIWritePMREntryWithOwnerKM(psDevInfo->hTQCLISharedMem, PVR_SYS_ALLOC_PID); + if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RIWritePMREntryWithOwnerKM (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto failed_clipmr; + } +#endif + + eError = PMR_WriteBytes(psDevInfo->hTQCLISharedMem, 0, RGXShaderCLIMem(psShaderFW), RGXShaderCLIMemSize(psShaderFW), &uiNumBytes); + if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from PMR_WriteBytes (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto failed_clipmr; + } + + OSUnloadFirmware(psShaderFW); + + PVR_ASSERT(psDevInfo->hTQUSCSharedMem != NULL); + PVR_ASSERT(psDevInfo->hTQCLISharedMem != NULL); + + return PVRSRV_OK; + +failed_clipmr: + PMRUnrefPMR(psDevInfo->hTQCLISharedMem); +failed_uscpmr: + PMRUnrefPMR(psDevInfo->hTQUSCSharedMem); +failed_firmware: + OSUnloadFirmware(psShaderFW); +failed_init: + return eError; +} + +void +PVRSRVTQAcquireShaders(PVRSRV_DEVICE_NODE * psDeviceNode, + PMR ** ppsCLIPMRMem, + PMR ** ppsUSCPMRMem) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + PVR_ASSERT(psDevInfo->hTQUSCSharedMem != NULL); + PVR_ASSERT(psDevInfo->hTQCLISharedMem != NULL); + + *ppsUSCPMRMem = psDevInfo->hTQUSCSharedMem; + *ppsCLIPMRMem = psDevInfo->hTQCLISharedMem; +} + +PVRSRV_ERROR +PVRSRVTQUnloadShaders(PVRSRV_DEVICE_NODE * psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + + eError = PMRUnrefPMR(psDevInfo->hTQUSCSharedMem); + if (eError != PVRSRV_OK) + { + return eError; + } + + eError = PMRUnrefPMR(psDevInfo->hTQCLISharedMem); + if (eError != PVRSRV_OK) + { + return eError; + } + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/rgxshader.h b/drivers/gpu/drm/phytium/octopus/rgxshader.h new file mode 100644 index 000000000000..05cb3b6bcf84 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxshader.h @@ -0,0 +1,85 @@ +/*************************************************************************/ /*! +@File rgxshader.h +@Title TQ Shader Load +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Shader code and info are shared for all context on the device. + If allocation doesn't already exist, read shader data from file + and allocate PMR memory. PMR memory is not deallocated until + device deinit. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXSHADER_H) +#define RGXSHADER_H + +#include "fwload.h" +#include "rgxtransfer_shader.h" +#include "connection_server.h" + +/*************************************************************************/ /*! +@Function PVRSRVTQLoadShaders +@Description If PMR is not allocated, reads shader binary data from file + and allocates new PMR memory. +@Input psDeviceNode Device node +@Return PVRSRV_ERROR Returns PVRSRV_OK on success. +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVTQLoadShaders(PVRSRV_DEVICE_NODE *psDeviceNode); + +/*************************************************************************/ /*! +@Function PVRSRVTQAcquireShaders +@Description Get handle to ready allocated shader PMR memory +@Input psDeviceNode Device node +@Output ppsCLIPMRMem Shader data used by CPU client side. +@Output ppsUSCPMRMem Shader usc code used by GPU. +*/ /**************************************************************************/ +void +PVRSRVTQAcquireShaders(PVRSRV_DEVICE_NODE *psDeviceNode, + PMR **ppsCLIPMRMem, + PMR **ppsUSCPMRMem); + +/*************************************************************************/ /*! +@Function PVRSRVTQUnLoadShaders +@Description Unref PMR memory. +@Input psDeviceNode Device node +@Return PVRSRV_ERROR Returns PVRSRV_OK on success. +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVTQUnloadShaders(PVRSRV_DEVICE_NODE *psDeviceNode); + +#endif /* RGXSHADER_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxsignals.c b/drivers/gpu/drm/phytium/octopus/rgxsignals.c new file mode 100644 index 000000000000..580af799717d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxsignals.c @@ -0,0 +1,99 @@ +/*************************************************************************/ /*! +@File rgxsignals.c +@Title RGX Signals routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX Signals routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxsignals.h" + +#include "rgxmem.h" +#include "rgx_fwif_km.h" +#include "mmu_common.h" +#include "devicemem.h" +#include "rgxfwutils.h" + + +PVRSRV_ERROR +PVRSRVRGXNotifySignalUpdateKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_DEV_VIRTADDR sDevSignalAddress) +{ + DEVMEM_MEMDESC *psFWMemContextMemDesc; + RGXFWIF_KCCB_CMD sKCCBCmd; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + + /* Schedule the firmware command */ + sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE; + sKCCBCmd.uCmdData.sSignalUpdateData.sDevSignalAddress = sDevSignalAddress; + eError = RGXSetFirmwareAddress(&sKCCBCmd.uCmdData.sSignalUpdateData.psFWMemContext, + psFWMemContextMemDesc, + 0, RFW_FWADDR_NOREF_FLAG); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail_fwaddr); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand((PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice, + RGXFWIF_DM_GP, + &sKCCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule the FW command %d (%s)", + __func__, + eError, PVRSRVGETERRORSTRING(eError))); + } + +fail_fwaddr: + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/rgxsignals.h b/drivers/gpu/drm/phytium/octopus/rgxsignals.h new file mode 100644 index 000000000000..6eea1ebb3145 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxsignals.h @@ -0,0 +1,71 @@ +/*************************************************************************/ /*! +@File rgxsignals.h +@Title RGX Signals routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX Signals routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_SIGNALS_H) +#define RGX_SIGNALS_H + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "connection_server.h" +#include "device.h" + +/*! +******************************************************************************* + + @Function PVRSRVRGXNotifySignalUpdateKM + + @Description Server-side implementation of RGXNotifySignalUpdate + + @Input hMemCtxPrivData - memory context private data + @Input sDevSignalAddress - device virtual address of the updated signal + + @Return PVRSRV_ERROR + +******************************************************************************/ + +PVRSRV_ERROR PVRSRVRGXNotifySignalUpdateKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_DEV_VIRTADDR sDevSignalAddress); + +#endif diff --git a/drivers/gpu/drm/phytium/octopus/rgxsrvinit.c b/drivers/gpu/drm/phytium/octopus/rgxsrvinit.c new file mode 100644 index 000000000000..e37a6b2d7714 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxsrvinit.c @@ -0,0 +1,1423 @@ +/*************************************************************************/ /*! +@File +@Title Services initialisation routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "srvinit.h" +#include "pvr_debug.h" +#include "osfunc.h" +#include "km_apphint_defs.h" + +#include "htbuffer_types.h" +#include "htbuffer_init.h" + +#include "devicemem.h" +#include "devicemem_pdump.h" + +#include "rgx_fwif_km.h" +#include "pdump_km.h" +#include "rgx_compat_bvnc.h" + +#include "rgxdefs_km.h" +#include "pvrsrv.h" + +#include "rgxinit.h" + +#include "rgx_compat_bvnc.h" + +#include "osfunc.h" + +#include "rgxdefs_km.h" + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "virt_validation_defs.h" +#endif + +#include "rgx_fwif_hwperf.h" +#include "rgx_hwperf_table.h" + +static const RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] = +{ +#define X(a, b, c, d, e, f, g) {a, b, 0xFF, d, e, f, NULL} +RGX_CNT_BLK_TYPE_MODEL_DIRECT_LIST, +RGX_CNT_BLK_TYPE_MODEL_INDIRECT_LIST +#undef X +}; + +#include "fwload.h" +#include "rgxlayer_impl.h" +#include "rgxfwimageutils.h" +#include "rgxfwutils.h" + +#include "rgx_bvnc_defs_km.h" + +#include "rgxdevice.h" +#include "pvrsrv.h" + +#if defined(SUPPORT_TRUSTED_DEVICE) +#include "rgxdevice.h" +#include "pvrsrv_device.h" +#endif + +#define DRIVER_MODE_HOST 0 /* AppHint value for host driver mode */ + +#define HW_PERF_FILTER_DEFAULT 0x00000000 /* Default to no HWPerf */ +#define HW_PERF_FILTER_DEFAULT_ALL_ON 0xFFFFFFFF /* All events */ +#define AVAIL_POW_UNITS_MASK_DEFAULT (PVRSRV_APPHINT_HWVALAVAILABLESPUMASK) + +#if defined(SUPPORT_VALIDATION) +#include "pvrsrv_apphint.h" +#endif + +#include "os_srvinit_param.h" + +#if defined(__linux__) +#include "km_apphint.h" +#else +/*! +******************************************************************************* + * AppHint mnemonic data type helper tables +******************************************************************************/ +/* apphint map of name vs. enable flag */ +static SRV_INIT_PARAM_UINT32_LOOKUP htb_loggroup_tbl[] = { +#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) }, + HTB_LOG_SFGROUPLIST +#undef X +}; +/* apphint map of arg vs. OpMode */ +static SRV_INIT_PARAM_UINT32_LOOKUP htb_opmode_tbl[] = { + { "droplatest", HTB_OPMODE_DROPLATEST}, + { "dropoldest", HTB_OPMODE_DROPOLDEST}, + /* HTB should never be started in HTB_OPMODE_BLOCK + * as this can lead to deadlocks + */ +}; + +static SRV_INIT_PARAM_UINT32_LOOKUP fwt_logtype_tbl[] = { + { "trace", 0}, + { "none", 0} +#if defined(SUPPORT_TBI_INTERFACE) + , { "tbi", 1} +#endif +}; + +static SRV_INIT_PARAM_UINT32_LOOKUP timecorr_clk_tbl[] = { + { "mono", 0 }, + { "mono_raw", 1 }, + { "sched", 2 } +}; + +static SRV_INIT_PARAM_UINT32_LOOKUP fwt_loggroup_tbl[] = { RGXFWIF_LOG_GROUP_NAME_VALUE_MAP }; + +/* + * Services AppHints initialisation + */ +#define X(a, b, c, d, e) SrvInitParamInit ## b(a, d, e) +APPHINT_LIST_ALL +#undef X +#endif /* defined(__linux__) */ + +/* + * Container for all the apphints used by this module + */ +typedef struct _RGX_SRVINIT_APPHINTS_ +{ + IMG_UINT32 ui32DriverMode; + IMG_BOOL bEnableSignatureChecks; + IMG_UINT32 ui32SignatureChecksBufSize; + + IMG_BOOL bAssertOnOutOfMem; + IMG_BOOL bAssertOnHWRTrigger; +#if defined(SUPPORT_VALIDATION) + IMG_UINT32 ui32RenderKillingCtl; + IMG_UINT32 ui32CDMTDMKillingCtl; + IMG_BOOL bValidateIrq; + IMG_BOOL bValidateSOCUSCTimer; + IMG_UINT32 ui32AvailablePowUnitsMask; + IMG_BOOL bInjectPowUnitsStateMaskChange; + IMG_BOOL bEnablePowUnitsStateMaskChange; + IMG_UINT32 ui32FBCDCVersionOverride; + IMG_UINT32 aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST]; + IMG_UINT32 aui32USRMNumRegions[RGXFWIF_USRM_DM_LAST]; + IMG_UINT64 aui64UVBRMNumRegions[RGXFWIF_UVBRM_DM_LAST]; +#endif + IMG_BOOL bCheckMlist; + IMG_BOOL bDisableClockGating; + IMG_BOOL bDisableDMOverlap; + IMG_BOOL bDisableFEDLogging; + IMG_BOOL bDisablePDP; + IMG_BOOL bEnableDMKillRand; + IMG_BOOL bEnableRandomCsw; + IMG_BOOL bEnableSoftResetCsw; + IMG_BOOL bFilteringMode; + IMG_BOOL bHWPerfDisableCounterFilter; + IMG_BOOL bZeroFreelist; + IMG_UINT32 ui32EnableFWContextSwitch; + IMG_UINT32 ui32FWContextSwitchProfile; + IMG_UINT32 ui32ISPSchedulingLatencyMode; + IMG_UINT32 ui32HWPerfFWBufSize; + IMG_UINT32 ui32HWPerfHostBufSize; + IMG_UINT32 ui32HWPerfFilter0; + IMG_UINT32 ui32HWPerfFilter1; + IMG_UINT32 ui32HWPerfHostFilter; + IMG_UINT32 ui32TimeCorrClock; + IMG_UINT32 ui32HWRDebugDumpLimit; + IMG_UINT32 ui32JonesDisableMask; + IMG_UINT32 ui32LogType; + IMG_UINT32 ui32TruncateMode; + IMG_UINT32 ui32KCCBSizeLog2; + IMG_UINT32 ui32CDMArbitrationMode; + FW_PERF_CONF eFirmwarePerf; + RGX_ACTIVEPM_CONF eRGXActivePMConf; + RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf; + + IMG_BOOL bEnableTrustedDeviceAceConfig; + IMG_UINT32 ui32FWContextSwitchCrossDM; +#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) + IMG_UINT32 ui32PhysMemTestPasses; +#endif +} RGX_SRVINIT_APPHINTS; + +/*! +******************************************************************************* + + @Function GetApphints + + @Description Read init time apphints and initialise internal variables + + @Input psHints : Pointer to apphints container + + @Return void + +******************************************************************************/ +static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHINTS *psHints) +{ + void *pvParamState = SrvInitParamOpen(); + IMG_UINT32 ui32ParamTemp; + + /* + * NB AppHints initialised to a default value via SrvInitParamInit* macros above + */ + SrvInitParamGetUINT32(pvParamState, DriverMode, psHints->ui32DriverMode); + SrvInitParamGetBOOL(pvParamState, EnableSignatureChecks, psHints->bEnableSignatureChecks); + SrvInitParamGetUINT32(pvParamState, SignatureChecksBufSize, psHints->ui32SignatureChecksBufSize); + + SrvInitParamGetBOOL(pvParamState, AssertOutOfMemory, psHints->bAssertOnOutOfMem); + SrvInitParamGetBOOL(pvParamState, AssertOnHWRTrigger, psHints->bAssertOnHWRTrigger); + SrvInitParamGetBOOL(pvParamState, CheckMList, psHints->bCheckMlist); + SrvInitParamGetBOOL(pvParamState, DisableClockGating, psHints->bDisableClockGating); + SrvInitParamGetBOOL(pvParamState, DisableDMOverlap, psHints->bDisableDMOverlap); + SrvInitParamGetBOOL(pvParamState, DisableFEDLogging, psHints->bDisableFEDLogging); + SrvInitParamGetUINT32(pvParamState, EnableAPM, ui32ParamTemp); + psHints->eRGXActivePMConf = ui32ParamTemp; + SrvInitParamGetBOOL(pvParamState, EnableGenericDMKillingRandMode, psHints->bEnableDMKillRand); + SrvInitParamGetBOOL(pvParamState, EnableRandomContextSwitch, psHints->bEnableRandomCsw); + SrvInitParamGetBOOL(pvParamState, EnableSoftResetContextSwitch, psHints->bEnableSoftResetCsw); + SrvInitParamGetUINT32(pvParamState, EnableFWContextSwitch, psHints->ui32EnableFWContextSwitch); + SrvInitParamGetUINT32(pvParamState, EnableRDPowerIsland, ui32ParamTemp); + psHints->eRGXRDPowerIslandConf = ui32ParamTemp; + SrvInitParamGetUINT32(pvParamState, FirmwarePerf, ui32ParamTemp); + psHints->eFirmwarePerf = ui32ParamTemp; + SrvInitParamGetUINT32(pvParamState, FWContextSwitchProfile, psHints->ui32FWContextSwitchProfile); + SrvInitParamGetBOOL(pvParamState, HWPerfDisableCounterFilter, psHints->bHWPerfDisableCounterFilter); + SrvInitParamGetUINT32(pvParamState, HWPerfHostBufSizeInKB, psHints->ui32HWPerfHostBufSize); + SrvInitParamGetUINT32(pvParamState, HWPerfFWBufSizeInKB, psHints->ui32HWPerfFWBufSize); + SrvInitParamGetUINT32(pvParamState, KernelCCBSizeLog2, psHints->ui32KCCBSizeLog2); + SrvInitParamGetUINT32(pvParamState, ISPSchedulingLatencyMode, psHints->ui32ISPSchedulingLatencyMode); + SrvInitParamGetUINT32(pvParamState, CDMArbitrationOverride, psHints->ui32CDMArbitrationMode); +#if defined(__linux__) + /* name changes */ + { + IMG_UINT64 ui64Tmp; + SrvInitParamGetBOOL(pvParamState, DisablePDumpPanic, psHints->bDisablePDP); + SrvInitParamGetUINT64(pvParamState, HWPerfFWFilter, ui64Tmp); + psHints->ui32HWPerfFilter0 = (IMG_UINT32)(ui64Tmp & 0xffffffffllu); + psHints->ui32HWPerfFilter1 = (IMG_UINT32)((ui64Tmp >> 32) & 0xffffffffllu); + } +#else + SrvInitParamUnreferenced(DisablePDumpPanic); + SrvInitParamUnreferenced(HWPerfFWFilter); + SrvInitParamUnreferenced(RGXBVNC); +#endif + SrvInitParamGetUINT32(pvParamState, HWPerfHostFilter, psHints->ui32HWPerfHostFilter); + SrvInitParamGetUINT32List(pvParamState, TimeCorrClock, psHints->ui32TimeCorrClock); + SrvInitParamGetUINT32(pvParamState, HWRDebugDumpLimit, ui32ParamTemp); + psHints->ui32HWRDebugDumpLimit = MIN(ui32ParamTemp, RGXFWIF_HWR_DEBUG_DUMP_ALL); + SrvInitParamGetUINT32(pvParamState, JonesDisableMask, ui32ParamTemp); + psHints->ui32JonesDisableMask = ui32ParamTemp & RGX_CR_JONES_FIX__ROGUE3__DISABLE_CLRMSK; + + SrvInitParamGetBOOL(pvParamState, NewFilteringMode, psHints->bFilteringMode); + SrvInitParamGetUINT32(pvParamState, TruncateMode, psHints->ui32TruncateMode); + + SrvInitParamGetBOOL(pvParamState, ZeroFreelist, psHints->bZeroFreelist); +#if defined(__linux__) + SrvInitParamGetUINT32(pvParamState, FWContextSwitchCrossDM, psHints->ui32FWContextSwitchCrossDM); +#else + SrvInitParamUnreferenced(FWContextSwitchCrossDM); +#endif + +#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) + SrvInitParamGetUINT32(pvParamState, PhysMemTestPasses, psHints->ui32PhysMemTestPasses); +#endif + +#if defined(SUPPORT_VALIDATION) + SrvInitParamGetUINT32(pvParamState, KillingCtl, psHints->ui32RenderKillingCtl); + SrvInitParamGetUINT32(pvParamState, CDMTDMKillingCtl, psHints->ui32CDMTDMKillingCtl); + SrvInitParamGetBOOL(pvParamState, ValidateIrq, psHints->bValidateIrq); + SrvInitParamGetBOOL(pvParamState, ValidateSOCUSCTimer, psHints->bValidateSOCUSCTimer); + SrvInitParamGetUINT32(pvParamState, HWValAvailableSPUMask, psHints->ui32AvailablePowUnitsMask); + SrvInitParamGetBOOL(pvParamState, GPUUnitsPowerChange, psHints->bInjectPowUnitsStateMaskChange); + SrvInitParamGetBOOL(pvParamState, HWValEnableSPUPowerMaskChange, psHints->bEnablePowUnitsStateMaskChange); + SrvInitParamGetUINT32(pvParamState, FBCDCVersionOverride, psHints->ui32FBCDCVersionOverride); + + /* Apphints for Unified Store virtual partitioning. */ + SrvInitParamGetUINT32(pvParamState, USRMNumRegionsVDM, psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_VDM]); + SrvInitParamGetUINT32(pvParamState, USRMNumRegionsDDM, psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_DDM]); + SrvInitParamGetUINT32(pvParamState, USRMNumRegionsCDM, psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_CDM]); + SrvInitParamGetUINT32(pvParamState, USRMNumRegionsPDM, psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_PDM]); + SrvInitParamGetUINT32(pvParamState, USRMNumRegionsTDM, psHints->aui32USRMNumRegions[RGXFWIF_USRM_DM_TDM]); + + /* Apphints for UVB virtual partitioning. */ + SrvInitParamGetUINT64(pvParamState, UVBRMNumRegionsVDM, psHints->aui64UVBRMNumRegions[RGXFWIF_UVBRM_DM_VDM]); + SrvInitParamGetUINT64(pvParamState, UVBRMNumRegionsDDM, psHints->aui64UVBRMNumRegions[RGXFWIF_UVBRM_DM_DDM]); + + /* Apphints for TPU trilinear frac masking */ + SrvInitParamGetUINT32(pvParamState, TPUTrilinearFracMaskPDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_PDM]); + SrvInitParamGetUINT32(pvParamState, TPUTrilinearFracMaskVDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_VDM]); + SrvInitParamGetUINT32(pvParamState, TPUTrilinearFracMaskCDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_CDM]); + SrvInitParamGetUINT32(pvParamState, TPUTrilinearFracMaskTDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_TDM]); +#endif + + /* + * FW logs apphints + */ + { + IMG_UINT32 ui32LogGroup, ui32TraceOrTBI; + + SrvInitParamGetUINT32BitField(pvParamState, EnableLogGroup, ui32LogGroup); + SrvInitParamGetUINT32List(pvParamState, FirmwareLogType, ui32TraceOrTBI); + + /* Defaulting to TRACE */ + BITMASK_SET(ui32LogGroup, RGXFWIF_LOG_TYPE_TRACE); + +#if defined(SUPPORT_TBI_INTERFACE) + if (ui32TraceOrTBI == 1 /* TBI */) + { + if ((ui32LogGroup & RGXFWIF_LOG_TYPE_GROUP_MASK) == 0) + { + /* No groups configured - defaulting to MAIN group */ + BITMASK_SET(ui32LogGroup, RGXFWIF_LOG_TYPE_GROUP_MAIN); + } + BITMASK_UNSET(ui32LogGroup, RGXFWIF_LOG_TYPE_TRACE); + } +#endif + + psHints->ui32LogType = ui32LogGroup; + } + + SrvInitParamGetBOOL(pvParamState, EnableTrustedDeviceAceConfig, psHints->bEnableTrustedDeviceAceConfig); + + SrvInitParamClose(pvParamState); +} + + +/*! +******************************************************************************* + + @Function GetFWConfigFlags + + @Description Initialise and return FW config flags + + @Input psHints : Apphints container + @Input pui32FWConfigFlags : Pointer to config flags + + @Return void + +******************************************************************************/ +static INLINE void GetFWConfigFlags(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_SRVINIT_APPHINTS *psHints, + IMG_UINT32 *pui32FWConfigFlags, + IMG_UINT32 *pui32FWConfigFlagsExt, + IMG_UINT32 *pui32FwOsCfgFlags) +{ + IMG_UINT32 ui32FWConfigFlags = 0; + IMG_UINT32 ui32FWConfigFlagsExt = 0; + IMG_UINT32 ui32FwOsCfgFlags = psHints->ui32FWContextSwitchCrossDM | + (psHints->ui32EnableFWContextSwitch & ~RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK); + + if (PVRSRV_VZ_MODE_IS(GUEST)) + { + ui32FWConfigFlags = 0; + ui32FWConfigFlagsExt = 0; + } + else + { + ui32FWConfigFlags |= psHints->bAssertOnOutOfMem ? RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY : 0; + ui32FWConfigFlags |= psHints->bAssertOnHWRTrigger ? RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER : 0; + ui32FWConfigFlags |= psHints->bCheckMlist ? RGXFWIF_INICFG_CHECK_MLIST_EN : 0; + ui32FWConfigFlags |= psHints->bDisableClockGating ? RGXFWIF_INICFG_DISABLE_CLKGATING_EN : 0; + ui32FWConfigFlags |= psHints->bDisableDMOverlap ? RGXFWIF_INICFG_DISABLE_DM_OVERLAP : 0; + ui32FWConfigFlags |= psHints->bDisablePDP ? RGXFWIF_INICFG_DISABLE_PDP_EN : 0; + ui32FWConfigFlags |= psHints->bEnableDMKillRand ? RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN : 0; + ui32FWConfigFlags |= psHints->bEnableRandomCsw ? RGXFWIF_INICFG_CTXSWITCH_MODE_RAND : 0; + ui32FWConfigFlags |= psHints->bEnableSoftResetCsw ? RGXFWIF_INICFG_CTXSWITCH_SRESET_EN : 0; + ui32FWConfigFlags |= (psHints->ui32HWPerfFilter0 != 0 || psHints->ui32HWPerfFilter1 != 0) ? RGXFWIF_INICFG_HWPERF_EN : 0; + ui32FWConfigFlags |= (psHints->ui32ISPSchedulingLatencyMode << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) & RGXFWIF_INICFG_ISPSCHEDMODE_MASK; +#if defined(SUPPORT_VALIDATION) +#if defined(NO_HARDWARE) && defined(PDUMP) + ui32FWConfigFlags |= psHints->bValidateIrq ? RGXFWIF_INICFG_VALIDATE_IRQ : 0; +#endif +#endif + ui32FWConfigFlags |= psHints->bHWPerfDisableCounterFilter ? RGXFWIF_INICFG_HWP_DISABLE_FILTER : 0; + ui32FWConfigFlags |= (psHints->eFirmwarePerf == FW_PERF_CONF_CUSTOM_TIMER) ? RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN : 0; + ui32FWConfigFlags |= (psHints->eFirmwarePerf == FW_PERF_CONF_POLLS) ? RGXFWIF_INICFG_POLL_COUNTERS_EN : 0; + ui32FWConfigFlags |= (psHints->ui32FWContextSwitchProfile << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) & RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK; + +#if defined(SUPPORT_VALIDATION) + ui32FWConfigFlags |= psHints->bEnablePowUnitsStateMaskChange ? RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN : 0; + ui32FWConfigFlags |= psHints->bValidateSOCUSCTimer ? RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER : 0; + + if ((ui32FWConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) && + ((psHints->eRGXActivePMConf != 0) || (psHints->eRGXRDPowerIslandConf != 0))) + { + psHints->eRGXActivePMConf = 0; + psHints->eRGXRDPowerIslandConf = 0; + PVR_DPF((PVR_DBG_WARNING, "SoC/USC Timer test needs to run with both EnableAPM and EnableRDPowerIsland disabled.\n" + "Overriding current value for both with new value 0.")); + } +#endif + ui32FWConfigFlags |= psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode) ? RGXFWIF_INICFG_FBCDC_V3_1_EN : 0; + ui32FWConfigFlags |= (psHints->ui32CDMArbitrationMode << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) & RGXFWIF_INICFG_CDM_ARBITRATION_MASK; + } + + if ((ui32FwOsCfgFlags & RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN) && + ((ui32FWConfigFlags & RGXFWIF_INICFG_ISPSCHEDMODE_MASK) == RGXFWIF_INICFG_ISPSCHEDMODE_NONE)) + { + ui32FwOsCfgFlags &= ~RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN; + PVR_DPF((PVR_DBG_WARNING, "ISPSchedulingLatencyMode=0 implies context switching is inoperable on DM_3D.\n" + "Overriding current value EnableFWContextSwitch=0x%x with new value 0x%x", + psHints->ui32EnableFWContextSwitch, + ui32FwOsCfgFlags & RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL)); + } + + *pui32FWConfigFlags = ui32FWConfigFlags; + *pui32FWConfigFlagsExt = ui32FWConfigFlagsExt; + *pui32FwOsCfgFlags = ui32FwOsCfgFlags; +} + + +/*! +******************************************************************************* + + @Function GetFilterFlags + + @Description Initialise and return filter flags + + @Input psHints : Apphints container + + @Return IMG_UINT32 : Filter flags + +******************************************************************************/ +static INLINE IMG_UINT32 GetFilterFlags(RGX_SRVINIT_APPHINTS *psHints) +{ + IMG_UINT32 ui32FilterFlags = 0; + + ui32FilterFlags |= psHints->bFilteringMode ? RGXFWIF_FILTCFG_NEW_FILTER_MODE : 0; + if (psHints->ui32TruncateMode == 2) + { + ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_INT; + } + else if (psHints->ui32TruncateMode == 3) + { + ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_HALF; + } + + return ui32FilterFlags; +} + + +/*! +******************************************************************************* + + @Function InitDeviceFlags + + @Description Initialise and return device flags + + @Input psHints : Apphints container + @Input pui32DeviceFlags : Pointer to device flags + + @Return void + +******************************************************************************/ +static INLINE void InitDeviceFlags(RGX_SRVINIT_APPHINTS *psHints, + IMG_UINT32 *pui32DeviceFlags) +{ + IMG_UINT32 ui32DeviceFlags = 0; + +#if defined(SUPPORT_VALIDATION) + ui32DeviceFlags |= psHints->bInjectPowUnitsStateMaskChange? RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN : 0; +#endif + ui32DeviceFlags |= psHints->bZeroFreelist ? RGXKM_DEVICE_STATE_ZERO_FREELIST : 0; + ui32DeviceFlags |= psHints->bDisableFEDLogging ? RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN : 0; +#if defined(SUPPORT_VALIDATION) + ui32DeviceFlags |= psHints->bEnablePowUnitsStateMaskChange ? RGXKM_DEVICE_STATE_ENABLE_SPU_UNITS_POWER_MASK_CHANGE_EN : 0; +#endif +#if defined(PVRSRV_ENABLE_CCCB_GROW) + BITMASK_SET(ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN); +#endif + + *pui32DeviceFlags = ui32DeviceFlags; +} + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) +/*! +******************************************************************************* + + @Function RGXTDProcessFWImage + + @Description Fetch and send data used by the trusted device to complete + the FW image setup + + @Input psDeviceNode : Device node + @Input psRGXFW : Firmware blob + @Input puFWParams : Parameters used by the FW at boot time + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR RGXTDProcessFWImage(PVRSRV_DEVICE_NODE *psDeviceNode, + OS_FW_IMAGE *psRGXFW, + RGX_FW_BOOT_PARAMS *puFWParams) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_TD_FW_PARAMS sTDFWParams; + PVRSRV_ERROR eError; + + if (psDevConfig->pfnTDSendFWImage == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: TDSendFWImage not implemented!", __func__)); + return PVRSRV_ERROR_NOT_IMPLEMENTED; + } + + sTDFWParams.pvFirmware = OSFirmwareData(psRGXFW); + sTDFWParams.ui32FirmwareSize = OSFirmwareSize(psRGXFW); + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + sTDFWParams.uFWP.sMeta.sFWCodeDevVAddr = puFWParams->sMeta.sFWCodeDevVAddr; + sTDFWParams.uFWP.sMeta.sFWDataDevVAddr = puFWParams->sMeta.sFWDataDevVAddr; + sTDFWParams.uFWP.sMeta.sFWCorememCodeDevVAddr = puFWParams->sMeta.sFWCorememCodeDevVAddr; + sTDFWParams.uFWP.sMeta.sFWCorememCodeFWAddr = puFWParams->sMeta.sFWCorememCodeFWAddr; + sTDFWParams.uFWP.sMeta.uiFWCorememCodeSize = puFWParams->sMeta.uiFWCorememCodeSize; + sTDFWParams.uFWP.sMeta.sFWCorememDataDevVAddr = puFWParams->sMeta.sFWCorememDataDevVAddr; + sTDFWParams.uFWP.sMeta.sFWCorememDataFWAddr = puFWParams->sMeta.sFWCorememDataFWAddr; + sTDFWParams.uFWP.sMeta.ui32NumThreads = puFWParams->sMeta.ui32NumThreads; + } + + eError = psDevConfig->pfnTDSendFWImage(psDevConfig->hSysData, &sTDFWParams); + + return eError; +} +#endif + +/*! +******************************************************************************* + + @Function InitFirmware + + @Description Allocate, initialise and pdump Firmware code and data memory + + @Input psDeviceNode : Device Node + @Input psHints : Apphints + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_SRVINIT_APPHINTS *psHints) +{ + OS_FW_IMAGE *psRGXFW = NULL; + const IMG_BYTE *pbRGXFirmware = NULL; + + /* FW code memory */ + IMG_DEVMEM_SIZE_T uiFWCodeAllocSize; + void *pvFWCodeHostAddr; + + /* FW data memory */ + IMG_DEVMEM_SIZE_T uiFWDataAllocSize; + void *pvFWDataHostAddr; + + /* FW coremem code memory */ + IMG_DEVMEM_SIZE_T uiFWCorememCodeAllocSize; + void *pvFWCorememCodeHostAddr = NULL; + + /* FW coremem data memory */ + IMG_DEVMEM_SIZE_T uiFWCorememDataAllocSize; + void *pvFWCorememDataHostAddr = NULL; + + RGX_FW_BOOT_PARAMS uFWParams; + RGX_LAYER_PARAMS sLayerParams; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + + /* + * Get pointer to Firmware image + */ + eError = RGXLoadAndGetFWData(psDeviceNode, &psRGXFW, &pbRGXFirmware); + + if (eError != PVRSRV_OK) + { + /* Error or confirmation message generated in RGXLoadAndGetFWData */ + goto fw_load_fail; + } + + sLayerParams.psDevInfo = psDevInfo; + + /* + * Allocate Firmware memory + */ + + eError = RGXGetFWImageAllocSize(&sLayerParams, + pbRGXFirmware, + OSFirmwareSize(psRGXFW), + &uiFWCodeAllocSize, + &uiFWDataAllocSize, + &uiFWCorememCodeAllocSize, + &uiFWCorememDataAllocSize); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXGetFWImageAllocSize failed", + __func__)); + goto cleanup_initfw; + } + + psDevInfo->ui32FWCodeSizeInBytes = uiFWCodeAllocSize; + +#if defined(SUPPORT_TRUSTED_DEVICE) + if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: META DMA not available, disabling core memory code/data", + __func__)); + uiFWCorememCodeAllocSize = 0; + uiFWCorememDataAllocSize = 0; + } +#endif + + psDevInfo->ui32FWCorememCodeSizeInBytes = uiFWCorememCodeAllocSize; + + eError = RGXInitAllocFWImgMem(psDeviceNode, + uiFWCodeAllocSize, + uiFWDataAllocSize, + uiFWCorememCodeAllocSize, + uiFWCorememDataAllocSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PVRSRVRGXInitAllocFWImgMem failed (%d)", + __func__, + eError)); + goto cleanup_initfw; + } + + /* + * Acquire pointers to Firmware allocations + */ + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, &pvFWCodeHostAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", cleanup_initfw); + +#else + /* We can't get a pointer to a secure FW allocation from within the DDK */ + pvFWCodeHostAddr = NULL; +#endif + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, &pvFWDataHostAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_code); + +#else + /* We can't get a pointer to a secure FW allocation from within the DDK */ + pvFWDataHostAddr = NULL; +#endif + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + if (uiFWCorememCodeAllocSize != 0) + { + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, &pvFWCorememCodeHostAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_data); + } + else + { + pvFWCorememCodeHostAddr = NULL; + } +#else + /* We can't get a pointer to a secure FW allocation from within the DDK */ + pvFWCorememCodeHostAddr = NULL; +#endif + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + if (uiFWCorememDataAllocSize != 0) + { + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, &pvFWCorememDataHostAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_corememcode); + } + else +#endif + { + pvFWCorememDataHostAddr = NULL; + } + + /* + * Prepare FW boot parameters + */ + + if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) + { + uFWParams.sMeta.sFWCodeDevVAddr = psDevInfo->sFWCodeDevVAddrBase; + uFWParams.sMeta.sFWDataDevVAddr = psDevInfo->sFWDataDevVAddrBase; + uFWParams.sMeta.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase; + uFWParams.sMeta.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr; + uFWParams.sMeta.uiFWCorememCodeSize = uiFWCorememCodeAllocSize; + uFWParams.sMeta.sFWCorememDataDevVAddr = psDevInfo->sFWCorememDataStoreDevVAddrBase; + uFWParams.sMeta.sFWCorememDataFWAddr = psDevInfo->sFWCorememDataStoreFWAddr; +#if defined(RGXFW_META_SUPPORT_2ND_THREAD) + uFWParams.sMeta.ui32NumThreads = 2; +#else + uFWParams.sMeta.ui32NumThreads = 1; +#endif + } + else + { + uFWParams.sRISCV.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase; + uFWParams.sRISCV.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr; + uFWParams.sRISCV.uiFWCorememCodeSize = uiFWCorememCodeAllocSize; + + uFWParams.sRISCV.sFWCorememDataDevVAddr = psDevInfo->sFWCorememDataStoreDevVAddrBase; + uFWParams.sRISCV.sFWCorememDataFWAddr = psDevInfo->sFWCorememDataStoreFWAddr; + uFWParams.sRISCV.uiFWCorememDataSize = uiFWCorememDataAllocSize; + } + + + /* + * Process the Firmware image and setup code and data segments. + * + * When the trusted device is enabled and the FW code lives + * in secure memory we will only setup the data segments here, + * while the code segments will be loaded to secure memory + * by the trusted device. + */ + if (!psDeviceNode->bAutoVzFwIsUp) + { + eError = RGXProcessFWImage(&sLayerParams, + pbRGXFirmware, + pvFWCodeHostAddr, + pvFWDataHostAddr, + pvFWCorememCodeHostAddr, + pvFWCorememDataHostAddr, + &uFWParams); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXProcessFWImage failed (%d)", + __func__, + eError)); + goto release_corememdata; + } + } + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) + if (psRGXFW) + { + RGXTDProcessFWImage(psDeviceNode, psRGXFW, &uFWParams); + } +#endif + + + /* + * PDump Firmware allocations + */ + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware code image"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWCodeMemDesc, + 0, + uiFWCodeAllocSize, + PDUMP_FLAGS_CONTINUOUS); +#endif + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware data image"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWDataMemDesc, + 0, + uiFWDataAllocSize, + PDUMP_FLAGS_CONTINUOUS); + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + if (uiFWCorememCodeAllocSize != 0) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware coremem code image"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWCorememCodeMemDesc, + 0, + uiFWCorememCodeAllocSize, + PDUMP_FLAGS_CONTINUOUS); + } +#endif + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + if (uiFWCorememDataAllocSize != 0) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware coremem data store image"); + DevmemPDumpLoadMem(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, + 0, + uiFWCorememDataAllocSize, + PDUMP_FLAGS_CONTINUOUS); + } +#endif + + /* + * Release Firmware allocations and clean up + */ +release_corememdata: +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) + if (uiFWCorememDataAllocSize !=0) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc); + } + +release_corememcode: + if (uiFWCorememCodeAllocSize != 0) + { + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); + } +#endif + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) +release_data: + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); +#endif + +#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) +release_code: + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); +#endif +cleanup_initfw: + OSUnloadFirmware(psRGXFW); +fw_load_fail: + + return eError; +} + +IMG_INTERNAL static inline IMG_UINT32 RGXHWPerfMaxDefinedBlks(PVRSRV_RGXDEV_INFO *); +IMG_INTERNAL /*static inline*/ IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **); + +IMG_INTERNAL /*static inline*/ IMG_UINT32 +RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel) +{ + *ppsModel = gasCntBlkTypeModel; + return ARRAY_SIZE(gasCntBlkTypeModel); +} + +/*! +******************************************************************************* + @Function RGXHWPerfMaxDefinedBlks + + @Description Return the number of valid block-IDs for the given device node + + @Input (PVRSRV_RGXDEV_INFO *) pvDevice device-node to query + + @Returns (IMG_UINT32) Number of block-IDs (RGX_CNTBLK_ID) + valid for this device. +******************************************************************************/ +IMG_INTERNAL static inline IMG_UINT32 +RGXHWPerfMaxDefinedBlks(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGX_HWPERF_CNTBLK_RT_INFO sRtInfo; + IMG_UINT32 uiRetVal; + const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *psHWPBlkConfig; + IMG_UINT32 uiNumArrayEls, ui; + + uiRetVal = RGX_CNTBLK_ID_DIRECT_LAST; + + uiNumArrayEls = RGXGetHWPerfBlockConfig(&psHWPBlkConfig); + + if (psHWPBlkConfig == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Unexpected NULL Config Block", __func__)); + return 0; + } + PVR_ASSERT(uiNumArrayEls > 0); + + /* Iterate over each block-ID and find the number of instances of each + * block which are present for this device type. We only query the + * Indirect blocks as their presence varies according to GPU. All direct + * blocks have an entry - but they may not be physically present. + */ + for (ui = RGX_CNTBLK_ID_DIRECT_LAST; ui < uiNumArrayEls; ui++) + { + if (rgx_hwperf_blk_present(&psHWPBlkConfig[ui], (void *)psDevInfo, &sRtInfo)) + { + uiRetVal += sRtInfo.uiNumUnits; + PVR_DPF((PVR_DBG_VERBOSE, "%s: Block %u, NumUnits %u, Total %u", + __func__, ui, sRtInfo.uiNumUnits, uiRetVal)); + } +#ifdef DEBUG + else + { + PVR_DPF((PVR_DBG_WARNING, "%s: Block %u *NOT* present", + __func__, ui)); + } +#endif + } + + PVR_DPF((PVR_DBG_VERBOSE, "%s: Num Units = %u", __func__, uiRetVal)); + + return uiRetVal; +} + +/*! +******************************************************************************* + + @Function InitialiseHWPerfCounters + + @Description Initialisation of hardware performance counters and dumping + them out to pdump, so that they can be modified at a later + point. + + @Input pvDevice + @Input psHWPerfDataMemDesc + @Input psHWPerfInitDataInt + + @Return void + +******************************************************************************/ + +static void InitialiseHWPerfCounters(void *pvDevice, DEVMEM_MEMDESC *psHWPerfDataMemDesc, RGXFWIF_HWPERF_CTL *psHWPerfInitDataInt) +{ + RGXFWIF_HWPERF_CTL_BLK *psHWPerfInitBlkData; + IMG_UINT32 ui32CntBlkModelLen; + const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel; + const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc; + IMG_UINT32 ui32BlockID, ui32BlkCfgIdx, ui32CounterIdx; + RGX_HWPERF_CNTBLK_RT_INFO sCntBlkRtInfo; + IMG_UINT32 uiUnit; + IMG_BOOL bDirect; + + ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel); + + PVR_DPF((PVR_DBG_VERBOSE, "%s: #BlockConfig entries = %d", __func__, ui32CntBlkModelLen)); + + /* Initialise the number of blocks in the RGXFWIF_HWPERF_CTL structure. + * This allows Firmware to validate that it has been correctly configured. + */ + psHWPerfInitDataInt->ui32NumBlocks = RGXHWPerfMaxDefinedBlks(pvDevice); + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "HWPerf Block count = %u.", + psHWPerfInitDataInt->ui32NumBlocks); +#if defined(PDUMP) + /* Ensure that we record the BVNC specific ui32NumBlocks in the PDUMP data + * so that when we playback we have the correct value present. + */ + DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, + (size_t)&(psHWPerfInitDataInt->ui32NumBlocks) - (size_t)(psHWPerfInitDataInt), + psHWPerfInitDataInt->ui32NumBlocks, PDUMP_FLAGS_CONTINUOUS); +#endif /* defined(PDUMP) */ + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "HWPerf Counter config starts here."); + + /* Simply iterate over all the RGXFWIW_HWPERF_CTL blocks in order */ + psHWPerfInitBlkData = &psHWPerfInitDataInt->sBlkCfg[0]; + + for (ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; + ui32BlkCfgIdx++, psHWPerfInitBlkData++) + { + IMG_BOOL bSingleton; + + /* Exit early if this core does not have any of these counter blocks + * due to core type/BVNC features.... */ + psBlkTypeDesc = &asCntBlkTypeModel[ui32BlkCfgIdx]; + + if (psBlkTypeDesc == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Unexpected NULL - Index %d / %d", + __func__, ui32BlkCfgIdx, ui32CntBlkModelLen)); + continue; + } + + PVR_DPF((PVR_DBG_VERBOSE, + "%s: CfgIdx = %u, InitBlkData @ 0x%p, BlkTypeDesc @ 0x%p", + __func__, ui32BlkCfgIdx, psHWPerfInitBlkData, psBlkTypeDesc)); + + if (psBlkTypeDesc->pfnIsBlkPresent(psBlkTypeDesc, pvDevice, &sCntBlkRtInfo) == IMG_FALSE) + { + PVR_DPF((PVR_DBG_VERBOSE, "%s: %s [ID 0x%x] NOT present", __func__, + psBlkTypeDesc->pszBlockNameComment, + psBlkTypeDesc->uiCntBlkIdBase )); + /* Block isn't present, but has an entry in the table. Populate + * the Init data so that we can track the block later. + */ + psHWPerfInitBlkData->uiBlockID = psBlkTypeDesc->uiCntBlkIdBase; + continue; + } +#ifdef DEBUG + else + { + PVR_DPF((PVR_DBG_VERBOSE, "%s: %s has %d %s", __func__, + psBlkTypeDesc->pszBlockNameComment, sCntBlkRtInfo.uiNumUnits, + (sCntBlkRtInfo.uiNumUnits > 1) ? "units" : "unit")); + } +#endif /* DEBUG */ + + /* Program all counters in one block so those already on may + * be configured off and vice-versa. */ + bDirect = psBlkTypeDesc->uiIndirectReg == 0; + + /* Set if there is only one instance of this block-ID present */ + bSingleton = sCntBlkRtInfo.uiNumUnits == 1; + + for (ui32BlockID = psBlkTypeDesc->uiCntBlkIdBase, uiUnit = 0; + ui32BlockID < psBlkTypeDesc->uiCntBlkIdBase+sCntBlkRtInfo.uiNumUnits; + ui32BlockID++, uiUnit++) + { + + if (bDirect) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Block : %s", psBlkTypeDesc->pszBlockNameComment); + } + else + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Unit %d Block : %s%d", + ui32BlockID-psBlkTypeDesc->uiCntBlkIdBase, + psBlkTypeDesc->pszBlockNameComment, uiUnit); + } + + psHWPerfInitBlkData->uiBlockID = ui32BlockID; + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "uiBlockID: The Block ID for the layout block. See RGX_CNTBLK_ID for further information."); +#if defined(PDUMP) + DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, + (size_t)&(psHWPerfInitBlkData->uiBlockID) - (size_t)(psHWPerfInitDataInt), + psHWPerfInitBlkData->uiBlockID, + PDUMP_FLAGS_CONTINUOUS); +#endif /* PDUMP */ + + psHWPerfInitBlkData->uiNumCounters = 0; + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "uiNumCounters (X): Specifies the number of valid counters" + " [0..%d] which follow.", RGX_CNTBLK_COUNTERS_MAX); +#if defined(PDUMP) + DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, + (size_t)&(psHWPerfInitBlkData->uiNumCounters) - (size_t)(psHWPerfInitDataInt), + psHWPerfInitBlkData->uiNumCounters, + PDUMP_FLAGS_CONTINUOUS); +#endif /* PDUMP */ + + psHWPerfInitBlkData->uiEnabled = 0; + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "uiEnabled: Set to 0x1 if the block needs to be enabled during playback."); +#if defined(PDUMP) + DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, + (size_t)&(psHWPerfInitBlkData->uiEnabled) - (size_t)(psHWPerfInitDataInt), + psHWPerfInitBlkData->uiEnabled, + PDUMP_FLAGS_CONTINUOUS); +#endif /* PDUMP */ + + for (ui32CounterIdx = 0; ui32CounterIdx < RGX_CNTBLK_COUNTERS_MAX; ui32CounterIdx++) + { + psHWPerfInitBlkData->aui32CounterCfg[ui32CounterIdx] = IMG_UINT32_C(0x00000000); + + if (bDirect) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "%s_COUNTER_%d", + psBlkTypeDesc->pszBlockNameComment, ui32CounterIdx); + } + else + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "%s%d_COUNTER_%d", + psBlkTypeDesc->pszBlockNameComment, + uiUnit, ui32CounterIdx); + } +#if defined(PDUMP) + DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, + (size_t)&(psHWPerfInitBlkData->aui32CounterCfg[ui32CounterIdx]) - (size_t)(psHWPerfInitDataInt), + psHWPerfInitBlkData->aui32CounterCfg[ui32CounterIdx], + PDUMP_FLAGS_CONTINUOUS); +#endif /* PDUMP */ + + } + + /* Update our block reference for indirect units which have more + * than a single unit present. Only increment if we have more than + * one unit left to process as the external loop counter will be + * incremented after final unit is processed. + */ + if (!bSingleton && (uiUnit < (sCntBlkRtInfo.uiNumUnits - 1))) + { + psHWPerfInitBlkData++; + } + } + } + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "HWPerf Counter config finishes here."); +} + +/*! +******************************************************************************* + + @Function InitialiseAllCounters + + @Description Initialise HWPerf and custom counters + + @Input psDeviceNode : Device Node + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR InitialiseAllCounters(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + RGXFWIF_HWPERF_CTL *psHWPerfInitData; + PVRSRV_ERROR eError; + + eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc, (void **)&psHWPerfInitData); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", failHWPerfCountersMemDescAqCpuVirt); + + InitialiseHWPerfCounters(psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc, psHWPerfInitData); + +failHWPerfCountersMemDescAqCpuVirt: + DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc); + + return eError; +} + +/* + * _ParseHTBAppHints: + * + * Generate necessary references to the globally visible AppHints which are + * declared in the above #include "km_apphint_defs.h" + * Without these local references some compiler tool-chains will treat + * unreferenced declarations as fatal errors. This function duplicates the + * HTB_specific apphint references which are made in htbserver.c:HTBInit() + * However, it makes absolutely *NO* use of these hints. + */ +static void +_ParseHTBAppHints(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + void *pvParamState = NULL; + IMG_UINT32 ui32LogType; + IMG_BOOL bAnyLogGroupConfigured; + IMG_UINT32 ui32BufferSize; + IMG_UINT32 ui32OpMode; + + /* Services initialisation parameters */ + pvParamState = SrvInitParamOpen(); + if (pvParamState == NULL) + return; + + SrvInitParamGetUINT32BitField(pvParamState, EnableHTBLogGroup, ui32LogType); + bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE : IMG_FALSE; + SrvInitParamGetUINT32List(pvParamState, HTBOperationMode, ui32OpMode); + SrvInitParamGetUINT32(pvParamState, HTBufferSizeInKB, ui32BufferSize); + + SrvInitParamClose(pvParamState); +} + +#if defined(SUPPORT_TRUSTED_DEVICE) +static PVRSRV_ERROR RGXValidateTDHeap(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_PHYS_HEAP ePhysHeap, + PHYS_HEAP_USAGE_FLAGS ui32RequiredFlags) +{ + PHYS_HEAP *psHeap = psDeviceNode->apsPhysHeap[ePhysHeap]; + PHYS_HEAP_USAGE_FLAGS ui32HeapFlags = PhysHeapGetFlags(psHeap); + PHYS_HEAP_USAGE_FLAGS ui32InvalidFlags = ~(PHYS_HEAP_USAGE_FW_PRIV_DATA | PHYS_HEAP_USAGE_FW_CODE + | PHYS_HEAP_USAGE_GPU_SECURE); + + PVR_LOG_RETURN_IF_FALSE_VA((ui32HeapFlags & ui32RequiredFlags) != 0, + PVRSRV_ERROR_NOT_SUPPORTED, + "TD heap is missing required flags. flags: 0x%x / required:0x%x", + ui32HeapFlags, + ui32RequiredFlags); + + PVR_LOG_RETURN_IF_FALSE_VA((ui32HeapFlags & ui32InvalidFlags) == 0, + PVRSRV_ERROR_NOT_SUPPORTED, + "TD heap uses invalid flags. flags: 0x%x / invalid:0x%x", + ui32HeapFlags, + ui32InvalidFlags); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR RGXValidateTDHeaps(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_PRIV_DATA, PHYS_HEAP_USAGE_FW_PRIV_DATA); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_PRIV_DATA"); + + eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_CODE, PHYS_HEAP_USAGE_FW_CODE); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_CODE"); + + eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_GPU_SECURE, PHYS_HEAP_USAGE_GPU_SECURE); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:GPU_SECURE"); + + return PVRSRV_OK; +} +#endif + +/*! +******************************************************************************* + + @Function RGXInit + + @Description RGX Initialisation + + @Input psDeviceNode + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + /* Services initialisation parameters */ + RGX_SRVINIT_APPHINTS sApphints = {0}; + IMG_UINT32 ui32FWConfigFlags, ui32FWConfigFlagsExt, ui32FwOsCfgFlags; + IMG_UINT32 ui32DeviceFlags; + IMG_UINT32 ui32AvailablePowUnitsMask; + + PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + + /* Number of HWPerf Block-IDs (RGX_CNTBLK_ID) which are available */ + IMG_UINT32 ui32NumHWPerfBlocks; + + /* Size of the RGXFWIF_HWPERF_CTL_BLK structure - varies by BVNC */ + IMG_UINT32 ui32HWPerfBlkSize; + RGX_LAYER_PARAMS sLayerParams; + + sLayerParams.psDevInfo = psDevInfo; + +#if defined(SUPPORT_TRUSTED_DEVICE) + eError = RGXValidateTDHeaps(psDeviceNode); + PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeaps"); +#endif + +#if defined(SUPPORT_AUTOVZ) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + /* The RGX_CR_MTS_DM0_INTERRUPT_ENABLE register is always set by the firmware during initialisation + * and it provides a good method of determining if the firmware has been booted previously */ + psDeviceNode->bAutoVzFwIsUp = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_DM0_INTERRUPT_ENABLE) != 0); + + PVR_LOG(("AutoVz startup check: firmware is %s;", + (psDeviceNode->bAutoVzFwIsUp) ? "already running" : "powered down")); + } + else if (PVRSRV_VZ_MODE_IS(GUEST)) + { + /* Guest assumes the firmware is always available */ + psDeviceNode->bAutoVzFwIsUp = IMG_TRUE; + } + else +#endif + { + /* Firmware does not follow the AutoVz life-cycle */ + psDeviceNode->bAutoVzFwIsUp = IMG_FALSE; + } + + if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) + { + /* set the device power state here as the regular power + * callbacks will not be executed on this driver */ + psDevInfo->bRGXPowered = IMG_TRUE; + } + + /* Set which HW Safety Events will be handled by the driver */ + psDevInfo->ui32HostSafetyEventMask |= RGX_IS_FEATURE_SUPPORTED(psDevInfo, WATCHDOG_TIMER) ? + RGX_CR_EVENT_STATUS_WDT_TIMEOUT_EN : 0; + psDevInfo->ui32HostSafetyEventMask |= (RGX_DEVICE_HAS_FEATURE_VALUE(&sLayerParams, ECC_RAMS) + && (RGX_DEVICE_GET_FEATURE_VALUE(&sLayerParams, ECC_RAMS) > 0)) ? + RGX_CR_EVENT_STATUS_FAULT_FW_EN : 0; + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Register defs revision: %d", RGX_CR_DEFS_KM_REVISION); +#endif + + ui32NumHWPerfBlocks = RGXHWPerfMaxDefinedBlks((void *)psDevInfo); + + ui32HWPerfBlkSize = sizeof(RGXFWIF_HWPERF_CTL) + + (ui32NumHWPerfBlocks - 1) * sizeof(RGXFWIF_HWPERF_CTL_BLK); + + /* Services initialisation parameters */ + _ParseHTBAppHints(psDeviceNode); + GetApphints(psDevInfo, &sApphints); + InitDeviceFlags(&sApphints, &ui32DeviceFlags); + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#if defined(EMULATOR) + if ((sApphints.bEnableTrustedDeviceAceConfig) && + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACE))) + { + SetTrustedDeviceAceEnabled(); + } +#endif +#endif + + eError = RGXInitCreateFWKernelMemoryContext(psDeviceNode); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create FW kernel memory context (%u)", + __func__, eError)); + goto cleanup; + } + + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + eError = InitFirmware(psDeviceNode, &sApphints); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: InitFirmware failed (%d)", + __func__, + eError)); + goto cleanup; + } + } + + /* + * Setup Firmware initialisation data + */ + + GetFWConfigFlags(psDeviceNode, &sApphints, &ui32FWConfigFlags, &ui32FWConfigFlagsExt, &ui32FwOsCfgFlags); + +#if defined(SUPPORT_VALIDATION) + ui32AvailablePowUnitsMask = sApphints.ui32AvailablePowUnitsMask; +#else + ui32AvailablePowUnitsMask = AVAIL_POW_UNITS_MASK_DEFAULT; +#endif + + eError = RGXInitFirmware(psDeviceNode, + sApphints.bEnableSignatureChecks, + sApphints.ui32SignatureChecksBufSize, + sApphints.ui32HWPerfFWBufSize, + (IMG_UINT64)sApphints.ui32HWPerfFilter0 | + ((IMG_UINT64)sApphints.ui32HWPerfFilter1 << 32), + ui32FWConfigFlags, + sApphints.ui32LogType, + GetFilterFlags(&sApphints), + sApphints.ui32JonesDisableMask, + sApphints.ui32HWRDebugDumpLimit, +#if defined(SUPPORT_VALIDATION) + sApphints.ui32RenderKillingCtl, + sApphints.ui32CDMTDMKillingCtl, + &sApphints.aui32TPUTrilinearFracMask[0], + &sApphints.aui32USRMNumRegions[0], + (IMG_PUINT64)&sApphints.aui64UVBRMNumRegions[0], +#else + 0, 0, + NULL, NULL, NULL, +#endif + ui32HWPerfBlkSize, + sApphints.eRGXRDPowerIslandConf, + sApphints.eFirmwarePerf, + ui32FWConfigFlagsExt, + ui32AvailablePowUnitsMask, + ui32FwOsCfgFlags); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVRGXInitFirmware failed (%d)", + __func__, + eError)); + goto cleanup; + } + + if (!PVRSRV_VZ_MODE_IS(GUEST)) + { + eError = InitialiseAllCounters(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: InitialiseAllCounters failed (%d)", + __func__, + eError)); + goto cleanup; + } + } + + /* + * Perform second stage of RGX initialisation + */ + eError = RGXInitDevPart2(psDeviceNode, + ui32DeviceFlags, + sApphints.ui32HWPerfHostBufSize, + sApphints.ui32HWPerfHostFilter, + sApphints.eRGXActivePMConf, + ui32AvailablePowUnitsMask); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: PVRSRVRGXInitDevPart2KM failed (%d)", + __func__, + eError)); + goto cleanup; + } + +#if defined(SUPPORT_VALIDATION) + PVRSRVAppHintDumpState(); +#endif + + eError = PVRSRV_OK; + +cleanup: + return eError; +} + +/****************************************************************************** + End of file (rgxsrvinit.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxstartstop.c b/drivers/gpu/drm/phytium/octopus/rgxstartstop.c new file mode 100644 index 000000000000..15ef53392063 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxstartstop.c @@ -0,0 +1,851 @@ +/*************************************************************************/ /*! +@File +@Title Device specific start/stop routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device specific start/stop routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* The routines implemented here are built on top of an abstraction layer to + * hide DDK/OS-specific details in case they are used outside of the DDK + * (e.g. when trusted device is enabled). + * Any new dependency should be added to rgxlayer.h. + * Any new code should be built on top of the existing abstraction layer, + * which should be extended when necessary. */ +#include "rgxstartstop.h" +#include "rgxfwutils.h" + +/* + * Specific fields for RGX_CR_IDLE must not be polled in pdumps + * (technical reasons) + */ +#define CR_IDLE_UNSELECTED_MASK ((~RGX_CR_SLC_IDLE_ACE_CONVERTERS_CLRMSK) | \ + (~RGX_CR_SLC_IDLE_OWDB_CLRMSK) | \ + (RGX_CR_SLC_IDLE_FBCDC_ARB_EN)) + +static PVRSRV_ERROR RGXWriteMetaCoreRegThoughSP(const void *hPrivate, + IMG_UINT32 ui32CoreReg, + IMG_UINT32 ui32Value) +{ + IMG_UINT32 i = 0; + + RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXDT_OFFSET, ui32Value); + RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, ui32CoreReg & ~META_CR_TXUXXRXRQ_RDnWR_BIT); + + do + { + RGXReadMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, &ui32Value); + } while (((ui32Value & META_CR_TXUXXRXRQ_DREADY_BIT) != META_CR_TXUXXRXRQ_DREADY_BIT) && (i++ < 1000)); + + if (i == 1000) + { + RGXCommentLog(hPrivate, "RGXWriteMetaCoreRegThoughSP: Timeout"); + return PVRSRV_ERROR_TIMEOUT; + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR RGXStartFirmware(const void *hPrivate) +{ + PVRSRV_ERROR eError; + + /* Give privilege to debug and slave port */ + RGXWriteMetaRegThroughSP(hPrivate, META_CR_SYSC_JTAG_THREAD, META_CR_SYSC_JTAG_THREAD_PRIV_EN); + + /* Point Meta to the bootloader address, global (uncached) range */ + eError = RGXWriteMetaCoreRegThoughSP(hPrivate, + PC_ACCESS(0), + RGXFW_BOOTLDR_META_ADDR | META_MEM_GLOBAL_RANGE_BIT); + + if (eError != PVRSRV_OK) + { + RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start failed!"); + return eError; + } + + /* Enable minim encoding */ + RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXPRIVEXT, META_CR_TXPRIVEXT_MINIM_EN); + + /* Enable Meta thread */ + RGXWriteMetaRegThroughSP(hPrivate, META_CR_T0ENABLE_OFFSET, META_CR_TXENABLE_ENABLE_BIT); + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RGXInitMetaProcWrapper + + @Description Configures the hardware wrapper of the META processor + + @Input hPrivate : Implementation specific data + + @Return void + +******************************************************************************/ +static void RGXInitMetaProcWrapper(const void *hPrivate) +{ + IMG_UINT64 ui64GartenConfig; + + /* Garten IDLE bit controlled by META */ + ui64GartenConfig = RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META; + + RGXCommentLog(hPrivate, "RGXStart: Configure META wrapper"); + RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, ui64GartenConfig); +} + + +/*! +******************************************************************************* + + @Function RGXInitRiscvProcWrapper + + @Description Configures the hardware wrapper of the RISCV processor + + @Input hPrivate : Implementation specific data + + @Return void + +******************************************************************************/ +static void RGXInitRiscvProcWrapper(const void *hPrivate) +{ + IMG_DEV_VIRTADDR sTmp; + + RGXCommentLog(hPrivate, "RGXStart: Configure RISCV wrapper"); + + RGXCommentLog(hPrivate, "RGXStart: Write boot code remap"); + RGXAcquireBootCodeAddr(hPrivate, &sTmp); + RGXWriteReg64(hPrivate, + RGXRISCVFW_BOOTLDR_CODE_REMAP, + sTmp.uiAddr | + (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT) + << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT | + (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT | + RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN); + + RGXCommentLog(hPrivate, "RGXStart: Write boot data remap"); + RGXAcquireBootDataAddr(hPrivate, &sTmp); + RGXWriteReg64(hPrivate, + RGXRISCVFW_BOOTLDR_DATA_REMAP, + sTmp.uiAddr | + (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT) + << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT | + (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT | + RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN); + + /* Garten IDLE bit controlled by RISCV */ + RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to RISCV"); + RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META); +} + + +/*! +******************************************************************************* + + @Function RGXInitBIF + + @Description Initialise RGX BIF + + @Input hPrivate : Implementation specific data + + @Return void + +******************************************************************************/ +static void RGXInitBIF(const void *hPrivate) +{ + IMG_DEV_PHYADDR sPCAddr; + IMG_UINT32 uiPCAddr; + + /* + * Acquire the address of the Kernel Page Catalogue. + */ + RGXAcquireKernelMMUPC(hPrivate, &sPCAddr); + uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT) + << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT) + & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK); + + /* + * Write the kernel catalogue base. + */ + RGXCommentLog(hPrivate, "RGX firmware MMU Page Catalogue"); + + + /* Set the mapping context */ + RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWPRIV); + (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */ + + /* Write the cat-base address */ + RGXWriteKernelMMUPC32(hPrivate, + RGX_CR_MMU_CBASE_MAPPING, + RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT, + RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT, + uiPCAddr); + +#if (MMU_CONTEXT_MAPPING_FWIF != MMU_CONTEXT_MAPPING_FWPRIV) + /* Set-up different MMU ID mapping to the same PC used above */ + RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWIF); + (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */ + + RGXWriteKernelMMUPC32(hPrivate, + RGX_CR_MMU_CBASE_MAPPING, + RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT, + RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT, + uiPCAddr); +#endif +} + + +/**************************************************************************/ /*! +@Function RGXInitMMURangeRegisters +@Description Initialises MMU range registers for Non4K pages. +@Input hPrivate Implementation specific data +@Return void + */ /**************************************************************************/ +static void RGXInitMMURangeRegisters(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams = (RGX_LAYER_PARAMS*)hPrivate; + PVRSRV_RGXDEV_INFO *psDevInfo = psParams->psDevInfo; + IMG_UINT32 ui32RegAddr = RGX_CR_MMU_PAGE_SIZE_RANGE_ONE; + IMG_UINT32 i; + + for (i = 0; i < ARRAY_SIZE(psDevInfo->aui64MMUPageSizeRangeValue); ++i, ui32RegAddr += sizeof(IMG_UINT64)) + { + RGXWriteReg64(hPrivate, ui32RegAddr, psDevInfo->aui64MMUPageSizeRangeValue[i]); + } +} + + +/**************************************************************************/ /*! +@Function RGXInitAXIACE +@Description Initialises AXI ACE registers +@Input hPrivate Implementation specific data +@Return void + */ /**************************************************************************/ +static void RGXInitAXIACE(const void *hPrivate) +{ + IMG_UINT64 ui64RegVal; + + /** + * The below configuration is only applicable for RGX core's supporting + * ACE/ACE-lite protocol and connected to ACE coherent interconnect. + */ + + /** + * Configure AxDomain and AxCache for MMU transactions. + * AxDomain set to non sharable (0x0). + */ + ui64RegVal = RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE | + RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE; + + /** + * Configure AxCache for PM/MMU transactions. + * Set to same value (i.e WBRWALLOC caching, rgxmmunit.c:RGXDerivePTEProt8) + * as non-coherent PTEs + */ + ui64RegVal |= (IMG_UINT64_C(0xF)) << RGX_CR_ACE_CTRL_PM_MMU_AXCACHE_SHIFT; + + /** + * Configure AxDomain for non MMU transactions. + */ + ui64RegVal |= (IMG_UINT64)(RGX_CR_ACE_CTRL_COH_DOMAIN_OUTER_SHAREABLE | + RGX_CR_ACE_CTRL_NON_COH_DOMAIN_NON_SHAREABLE); + + RGXCommentLog(hPrivate, "Init AXI-ACE interface"); + RGXWriteReg64(hPrivate, RGX_CR_ACE_CTRL, ui64RegVal); +} + +static void RGXMercerSoftResetSet(const void *hPrivate, IMG_UINT64 ui32MercerFlags) +{ + RGXWriteReg64(hPrivate, RGX_CR_MERCER_SOFT_RESET, ui32MercerFlags & RGX_CR_MERCER_SOFT_RESET_MASKFULL); + + /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ + (void) RGXReadReg64(hPrivate, RGX_CR_MERCER_SOFT_RESET); +} + +static void RGXSPUSoftResetAssert(const void *hPrivate) +{ + /* Assert Mercer0 */ + RGXMercerSoftResetSet(hPrivate, RGX_CR_MERCER0_SOFT_RESET_SPU_EN); + /* Assert Mercer1 */ + RGXMercerSoftResetSet(hPrivate, RGX_CR_MERCER0_SOFT_RESET_SPU_EN | RGX_CR_MERCER1_SOFT_RESET_SPU_EN); + /* Assert Mercer2 */ + RGXMercerSoftResetSet(hPrivate, RGX_CR_MERCER0_SOFT_RESET_SPU_EN | RGX_CR_MERCER1_SOFT_RESET_SPU_EN | RGX_CR_MERCER2_SOFT_RESET_SPU_EN); + + RGXWriteReg32(hPrivate, RGX_CR_SWIFT_SOFT_RESET, RGX_CR_SWIFT_SOFT_RESET_MASKFULL); + /* Fence the previous write */ + (void) RGXReadReg32(hPrivate, RGX_CR_SWIFT_SOFT_RESET); + + RGXWriteReg32(hPrivate, RGX_CR_TEXAS_SOFT_RESET, RGX_CR_TEXAS_SOFT_RESET_MASKFULL); + /* Fence the previous write */ + (void) RGXReadReg32(hPrivate, RGX_CR_TEXAS_SOFT_RESET); +} + +static void RGXSPUSoftResetDeAssert(const void *hPrivate) +{ + RGXWriteReg32(hPrivate, RGX_CR_TEXAS_SOFT_RESET, 0); + /* Fence the previous write */ + (void) RGXReadReg32(hPrivate, RGX_CR_TEXAS_SOFT_RESET); + + + RGXWriteReg32(hPrivate, RGX_CR_SWIFT_SOFT_RESET, 0); + /* Fence the previous write */ + (void) RGXReadReg32(hPrivate, RGX_CR_SWIFT_SOFT_RESET); + + /* Deassert Mercer2 */ + RGXMercerSoftResetSet(hPrivate, RGX_CR_MERCER0_SOFT_RESET_SPU_EN | RGX_CR_MERCER1_SOFT_RESET_SPU_EN); + /* Deassert Mercer1 */ + RGXMercerSoftResetSet(hPrivate, RGX_CR_MERCER0_SOFT_RESET_SPU_EN); + /* Deassert Mercer0 */ + RGXMercerSoftResetSet(hPrivate, 0); +} + +static void RGXResetSequence(const void *hPrivate, const IMG_CHAR *pcRGXFW_PROCESSOR) +{ + /* Set RGX in soft-reset */ + RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 1"); + RGXSPUSoftResetAssert(hPrivate); + + RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 2"); + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_SOFT_RESET_JONES_ALL); + + /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_SOFT_RESET_JONES_ALL | RGX_SOFT_RESET_EXTRA); + + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + + /* Take everything out of reset but the FW processor */ + RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 1 excluding %s", pcRGXFW_PROCESSOR); + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_SOFT_RESET_EXTRA | RGX_CR_SOFT_RESET_GARTEN_EN); + + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN); + + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + + RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 2 excluding %s", pcRGXFW_PROCESSOR); + RGXSPUSoftResetDeAssert(hPrivate); + + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); +} + +static void DeassertMetaReset(const void *hPrivate) +{ + /* Need to wait for at least 16 cycles before taking the FW processor out of reset ... */ + RGXWaitCycles(hPrivate, 32, 3); + + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, 0x0); + (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + + /* ... and afterwards */ + RGXWaitCycles(hPrivate, 32, 3); +} + +static PVRSRV_ERROR InitJonesECCRAM(const void *hPrivate) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32Value; + IMG_BOOL bMetaFW = RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, META); + IMG_UINT32 ui32Mask; + + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, ECC_RAMS) == 0) + { + return PVRSRV_ERROR_NOT_SUPPORTED; + } + + if (bMetaFW) + { + /* META must be taken out of reset (without booting) during Coremem initialization. */ + RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, 0); + DeassertMetaReset(hPrivate); + } + + /* Clocks must be set to "on" during RAMs initialization. */ + RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL0, RGX_CR_CLK_CTRL0_ALL_ON); + RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL1, RGX_CR_CLK_CTRL1_ALL_ON); + + if (bMetaFW) + { + RGXWriteMetaRegThroughSP(hPrivate, META_CR_SYSC_JTAG_THREAD, META_CR_SYSC_JTAG_THREAD_PRIV_EN); + RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXCLKCTRL, META_CR_TXCLKCTRL_ALL_ON); + RGXReadMetaRegThroughSP(hPrivate, META_CR_TXCLKCTRL, &ui32Value); + } + + ui32Mask = bMetaFW ? + RGX_CR_JONES_RAM_INIT_KICK_MASKFULL + : RGX_CR_JONES_RAM_INIT_KICK_MASKFULL & ~RGX_CR_JONES_RAM_INIT_KICK_GARTEN_EN; + RGXWriteReg64(hPrivate, RGX_CR_JONES_RAM_INIT_KICK, ui32Mask); + eError = RGXPollReg64(hPrivate, RGX_CR_JONES_RAM_STATUS, ui32Mask, ui32Mask); + + if (bMetaFW) + { + RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXCLKCTRL, META_CR_TXCLKCTRL_ALL_AUTO); + RGXReadMetaRegThroughSP(hPrivate, META_CR_TXCLKCTRL, &ui32Value); + } + + RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL0, RGX_CR_CLK_CTRL0_ALL_AUTO); + RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL1, RGX_CR_CLK_CTRL1_ALL_AUTO); + + if (bMetaFW) + { + RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN); + RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); + } + + return eError; +} + +PVRSRV_ERROR RGXStart(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams = (RGX_LAYER_PARAMS*)hPrivate; + PVRSRV_RGXDEV_INFO *psDevInfo = psParams->psDevInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BOOL bDoFWSlaveBoot = IMG_FALSE; + IMG_CHAR *pcRGXFW_PROCESSOR; + IMG_BOOL bMetaFW = IMG_FALSE; + + if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) + { + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV; + bMetaFW = IMG_FALSE; + bDoFWSlaveBoot = IMG_FALSE; + } + else + { + pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META; + bMetaFW = IMG_TRUE; + bDoFWSlaveBoot = RGXDoFWSlaveBoot(hPrivate); + } + + /* Disable the default sys_bus_secure protection to perform minimal setup */ + RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, 0); + (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); + + /* Only bypass HMMU if the module is present */ + if (RGXDeviceHasFeature(hPrivate, RGX_FEATURE_HYPERVISOR_MMU_BIT_MASK)) + { + if (PVRSRV_VZ_MODE_IS(NATIVE)) + { + /* Always set HMMU in bypass mode */ + RGXWriteReg32(hPrivate, RGX_CR_HMMU_BYPASS, RGX_CR_HMMU_BYPASS_MASKFULL); + (void) RGXReadReg32(hPrivate, RGX_CR_HMMU_BYPASS); + } +#if defined(PVRSRV_VZ_BYPASS_HMMU) + if (PVRSRV_VZ_MODE_IS(HOST)) + { + /* Also set HMMU in bypass mode */ + RGXWriteReg32(hPrivate, RGX_CR_HMMU_BYPASS, RGX_CR_HMMU_BYPASS_MASKFULL); + (void) RGXReadReg32(hPrivate, RGX_CR_HMMU_BYPASS); + } +#endif + } + +#if defined(SUPPORT_VALIDATION) +#if !defined(RGX_CR_FIRMWARE_PROCESSOR_LS) +#define RGX_CR_FIRMWARE_PROCESSOR_LS (0x01A0U) +#define RGX_CR_FIRMWARE_PROCESSOR_LS_ENABLE_EN (0x00000001U) +#endif + { + if (psDevInfo->ui32ValidationFlags & RGX_VAL_LS_EN) + { + /* Set the dual LS mode */ + RGXWriteReg32(hPrivate, RGX_CR_FIRMWARE_PROCESSOR_LS, RGX_CR_FIRMWARE_PROCESSOR_LS_ENABLE_EN); + (void) RGXReadReg32(hPrivate, RGX_CR_FIRMWARE_PROCESSOR_LS); + } + } +#endif + + /*! + * Start series8 FW init sequence + */ + RGXResetSequence(hPrivate, pcRGXFW_PROCESSOR); + + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, ECC_RAMS) > 0) + { + RGXCommentLog(hPrivate, "RGXStart: Init Jones ECC RAM"); + eError = InitJonesECCRAM(hPrivate); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + if (RGX_DEVICE_HAS_BRN(hPrivate, BRN_66927)) + { + IMG_UINT64 ui64ClockCtrl; + + ui64ClockCtrl = RGXReadReg64(hPrivate, RGX_CR_CLK_CTRL0); + CLK_CTRL_FORCE_ON(ui64ClockCtrl, CLK_CTRL0_MCU_L0); + CLK_CTRL_FORCE_ON(ui64ClockCtrl, CLK_CTRL0_PM); + CLK_CTRL_FORCE_ON(ui64ClockCtrl, CLK_CTRL0_FBDC); + RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL0, ui64ClockCtrl); + + ui64ClockCtrl = RGXReadReg64(hPrivate, RGX_CR_CLK_CTRL1); + CLK_CTRL_FORCE_ON(ui64ClockCtrl, CLK_CTRL1_PIXEL); + CLK_CTRL_FORCE_ON(ui64ClockCtrl, CLK_CTRL1_GEO_VERTEX); + RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL1, ui64ClockCtrl); + } + + if (bMetaFW) + { + if (bDoFWSlaveBoot) + { + /* Configure META to Slave boot */ + RGXCommentLog(hPrivate, "RGXStart: META Slave boot"); + RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, 0); + } + else + { + /* Configure META to Master boot */ + RGXCommentLog(hPrivate, "RGXStart: META Master boot"); + RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, RGX_CR_META_BOOT_MODE_EN); + } + } + + /* + * Initialise Firmware wrapper + */ + if (bMetaFW) + { + RGXInitMetaProcWrapper(hPrivate); + } + else + { + RGXInitRiscvProcWrapper(hPrivate); + } + + if (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) >= 4) + { + // initialise the MMU range based config registers for Non4K pages. + RGXInitMMURangeRegisters(hPrivate); + } + + RGXInitAXIACE(hPrivate); + /* + * Initialise BIF. + */ + RGXInitBIF(hPrivate); + + RGXCommentLog(hPrivate, "RGXStart: Take %s out of reset", pcRGXFW_PROCESSOR); + DeassertMetaReset(hPrivate); + + if (bMetaFW) + { + if (bDoFWSlaveBoot) + { + eError = RGXFabricCoherencyTest(hPrivate); + if (eError != PVRSRV_OK) return eError; + + RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start"); + eError = RGXStartFirmware(hPrivate); + if (eError != PVRSRV_OK) return eError; + } + else + { + RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Master boot Start"); + } + } + else + { + /* Bring Debug Module out of reset */ + RGXWriteReg32(hPrivate, RGX_CR_FWCORE_DMI_DMCONTROL, RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); + + /* Boot the FW */ + RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Master boot Start"); + RGXWriteReg32(hPrivate, RGX_CR_FWCORE_BOOT, 1); + RGXWaitCycles(hPrivate, 32, 3); + } + +#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(SUPPORT_SECURITY_VALIDATION) + RGXCommentLog(hPrivate, "RGXStart: Enable sys_bus_secure"); + RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, RGX_CR_SYS_BUS_SECURE_ENABLE_EN); + (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */ +#endif + + /*! + * End series8 FW init sequence + */ + + return eError; +} + +PVRSRV_ERROR RGXStop(const void *hPrivate) +{ + RGX_LAYER_PARAMS *psParams = (RGX_LAYER_PARAMS*)hPrivate; + PVRSRV_RGXDEV_INFO *psDevInfo = psParams->psDevInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_BOOL bMetaFW = RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, META); + IMG_UINT32 ui32JonesIdleMask = RGX_CR_JONES_IDLE_MASKFULL^RGX_CR_JONES_IDLE_AXI2IMG_EN; + + RGXDeviceAckIrq(hPrivate); + +#if defined(SUPPORT_VALIDATION) && !defined(TC_MEMORY_CONFIG) +#if !defined(RGX_CR_POWER_EVENT) +#define RGX_CR_POWER_EVENT (0x0038U) +#define RGX_CR_POWER_EVENT_GPU_MASK_CLRMSK (IMG_UINT64_C(0x00FFFFFFFFFFFFFF)) +#define RGX_CR_POWER_EVENT_GPU_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF1F)) +#define RGX_CR_POWER_EVENT_DOMAIN_SPU0_SHIFT (9U) +#define RGX_CR_POWER_EVENT_DOMAIN_CLUSTER0_SHIFT (8U) +#define RGX_CR_POWER_EVENT_TYPE_SHIFT (0U) +#define RGX_CR_POWER_EVENT_TYPE_POWER_DOWN (0x00000000U) +#define RGX_CR_POWER_EVENT_REQ_EN (0x00000002U) +#endif + + /* Power off any enabled SPUs */ + if (BITMASK_HAS(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_ENABLE_SPU_UNITS_POWER_MASK_CHANGE_EN)) + { + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, POWER_ISLAND_VERSION) == 2) + { + IMG_UINT64 ui64PowUnitOffMask; + IMG_UINT64 ui64RegVal; + + ui64PowUnitOffMask = (1 << RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, NUM_CLUSTERS)) -1; + ui64RegVal = (~RGX_CR_POWER_EVENT_GPU_MASK_CLRMSK) | // GPU_MASK specifies all cores + (~RGX_CR_POWER_EVENT_GPU_ID_CLRMSK) | // GPU_ID all set means use the GPU_MASK + (ui64PowUnitOffMask << RGX_CR_POWER_EVENT_DOMAIN_CLUSTER0_SHIFT) | + RGX_CR_POWER_EVENT_TYPE_POWER_DOWN; + + RGXWriteReg64(hPrivate, + RGX_CR_POWER_EVENT, + ui64RegVal); + + RGXWriteReg64(hPrivate, + RGX_CR_POWER_EVENT, + ui64RegVal | RGX_CR_POWER_EVENT_REQ_EN); + } + else + { + IMG_UINT32 ui32PowUnitOffMask; + IMG_UINT32 ui32RegVal; + + ui32PowUnitOffMask = (1 << RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, NUM_SPU)) -1; + ui32RegVal = (ui32PowUnitOffMask << RGX_CR_POWER_EVENT_DOMAIN_SPU0_SHIFT) | + RGX_CR_POWER_EVENT_TYPE_POWER_DOWN; + + RGXWriteReg32(hPrivate, + RGX_CR_POWER_EVENT, + ui32RegVal); + + RGXWriteReg32(hPrivate, + RGX_CR_POWER_EVENT, + ui32RegVal | RGX_CR_POWER_EVENT_REQ_EN); + } + + /* Poll on complete */ + eError = RGXPollReg32(hPrivate, + RGX_CR_EVENT_STATUS, + RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN, + RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN); + if (eError != PVRSRV_OK) return eError; + + /* Update the SPU_ENABLE mask */ + if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, POWER_ISLAND_VERSION) == 1) + { + RGXWriteReg32(hPrivate, RGX_CR_SPU_ENABLE, 0); + } + RGXWriteReg32(hPrivate, 0xF020, 0); + } +#endif + + /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper */ + if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) || + RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) < 2) + { + ui32JonesIdleMask ^= (RGX_CR_JONES_IDLE_ASC_EN|RGX_CR_JONES_IDLE_RCE_EN); + } + + eError = RGXPollReg32(hPrivate, + RGX_CR_JONES_IDLE, + ui32JonesIdleMask^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN), + ui32JonesIdleMask^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN)); + + if (eError != PVRSRV_OK) return eError; + + + /* Wait for SLC to signal IDLE */ + eError = RGXPollReg32(hPrivate, + RGX_CR_SLC_IDLE, + RGX_CR_SLC_IDLE_MASKFULL^(CR_IDLE_UNSELECTED_MASK), + RGX_CR_SLC_IDLE_MASKFULL^(CR_IDLE_UNSELECTED_MASK)); + if (eError != PVRSRV_OK) return eError; + + + /* Unset MTS DM association with threads */ + RGXWriteReg32(hPrivate, + RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC, + RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK + & RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL); + RGXWriteReg32(hPrivate, + RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC, + RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK + & RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL); + RGXWriteReg32(hPrivate, + RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC, + RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK + & RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL); + RGXWriteReg32(hPrivate, + RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC, + RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK + & RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL); + + +#if defined(PDUMP) + if (bMetaFW) + { + /* Disabling threads is only required for pdumps to stop the fw gracefully */ + + /* Disable thread 0 */ + eError = RGXWriteMetaRegThroughSP(hPrivate, + META_CR_T0ENABLE_OFFSET, + ~META_CR_TXENABLE_ENABLE_BIT); + if (eError != PVRSRV_OK) return eError; + + /* Disable thread 1 */ + eError = RGXWriteMetaRegThroughSP(hPrivate, + META_CR_T1ENABLE_OFFSET, + ~META_CR_TXENABLE_ENABLE_BIT); + if (eError != PVRSRV_OK) return eError; + + /* Wait for the Slave Port to finish all the transactions */ + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) + { + /* Clear down any irq raised by META (done after disabling the FW + * threads to avoid a race condition). + * This is only really needed for PDumps but we do it anyway driver-live. + */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES, 0x0); + (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES); /* Fence write */ + + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN + | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN); + } + else + { + /* Clear down any irq raised by META (done after disabling the FW + * threads to avoid a race condition). + * This is only really needed for PDumps but we do it anyway driver-live. + */ + RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS, 0x0); + (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS); /* Fence write */ + + eError = RGXPollReg32(hPrivate, + RGX_CR_META_SP_MSLVCTRL1, + RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); + } + if (eError != PVRSRV_OK) return eError; + } +#endif + + + eError = RGXPollReg64(hPrivate, + RGX_CR_SLC_STATUS1, + 0, + (~RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_CLRMSK | + ~RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_CLRMSK | + ~RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_CLRMSK | + ~RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_CLRMSK)); + if (eError != PVRSRV_OK) return eError; + + eError = RGXPollReg64(hPrivate, + RGX_CR_SLC_STATUS2, + 0, + (~RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_CLRMSK | + ~RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_CLRMSK | + ~RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_CLRMSK | + ~RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_CLRMSK)); + if (eError != PVRSRV_OK) return eError; + + + /* Wait for SLC to signal IDLE */ + eError = RGXPollReg32(hPrivate, + RGX_CR_SLC_IDLE, + RGX_CR_SLC_IDLE_MASKFULL^(CR_IDLE_UNSELECTED_MASK), + RGX_CR_SLC_IDLE_MASKFULL^(CR_IDLE_UNSELECTED_MASK)); + if (eError != PVRSRV_OK) return eError; + + + /* Wait for Jones to signal IDLE except for the Garten Wrapper */ + eError = RGXPollReg32(hPrivate, + RGX_CR_JONES_IDLE, + ui32JonesIdleMask^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN), + ui32JonesIdleMask^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN)); + + if (eError != PVRSRV_OK) return eError; + + + if (bMetaFW) + { + IMG_UINT32 ui32RegValue; + + eError = RGXReadMetaRegThroughSP(hPrivate, + META_CR_TxVECINT_BHALT, + &ui32RegValue); + if (eError != PVRSRV_OK) return eError; + + if ((ui32RegValue & 0xFFFFFFFFU) == 0x0) + { + /* Wait for Sidekick/Jones to signal IDLE including + * the Garten Wrapper if there is no debugger attached + * (TxVECINT_BHALT = 0x0) */ + eError = RGXPollReg32(hPrivate, + RGX_CR_JONES_IDLE, + ui32JonesIdleMask^RGX_CR_JONES_IDLE_SOCIF_EN, + ui32JonesIdleMask^RGX_CR_JONES_IDLE_SOCIF_EN); + if (eError != PVRSRV_OK) return eError; + } + } + else + { + eError = RGXPollReg32(hPrivate, + RGX_CR_JONES_IDLE, + ui32JonesIdleMask^RGX_CR_JONES_IDLE_SOCIF_EN, + ui32JonesIdleMask^RGX_CR_JONES_IDLE_SOCIF_EN); + if (eError != PVRSRV_OK) return eError; + } + + return eError; +} diff --git a/drivers/gpu/drm/phytium/octopus/rgxstartstop.h b/drivers/gpu/drm/phytium/octopus/rgxstartstop.h new file mode 100644 index 000000000000..1047bf004125 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxstartstop.h @@ -0,0 +1,84 @@ +/*************************************************************************/ /*! +@File +@Title RGX start/stop header file +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the RGX start/stop functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXSTARTSTOP_H) +#define RGXSTARTSTOP_H + +/* The routines declared here are built on top of an abstraction layer to + * hide DDK/OS-specific details in case they are used outside of the DDK + * (e.g. when DRM security is enabled). + * Any new dependency should be added to rgxlayer.h. + * Any new code should be built on top of the existing abstraction layer, + * which should be extended when necessary. + */ +#include "rgxlayer.h" + +/*! +******************************************************************************* + + @Function RGXStart + + @Description Perform GPU reset and initialisation + + @Input hPrivate : Implementation specific data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXStart(const void *hPrivate); + +/*! +******************************************************************************* + + @Function RGXStop + + @Description Stop Rogue in preparation for power down + + @Input hPrivate : Implementation specific data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXStop(const void *hPrivate); + +#endif /* RGXSTARTSTOP_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxsyncutils.c b/drivers/gpu/drm/phytium/octopus/rgxsyncutils.c new file mode 100644 index 000000000000..e1ce1e8e606b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxsyncutils.c @@ -0,0 +1,184 @@ +/*************************************************************************/ /*! +@File rgxsyncutils.c +@Title RGX Sync Utilities +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX Sync helper functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "rgxsyncutils.h" + +#include "sync_server.h" +#include "sync_internal.h" +#include "sync.h" +#include "allocmem.h" + +#if defined(SUPPORT_BUFFER_SYNC) +#include "pvr_buffer_sync.h" +#endif + +#include "sync_checkpoint.h" +#include "sync_checkpoint_internal.h" + +//#define TA3D_CHECKPOINT_DEBUG + +#if defined(TA3D_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +static +void _DebugSyncValues(IMG_UINT32 *pui32UpdateValues, + IMG_UINT32 ui32Count) +{ + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues; + + for (iii = 0; iii < ui32Count; iii++) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } +} +#else +#define CHKPT_DBG(X) +#endif + + +PVRSRV_ERROR RGXSyncAppendTimelineUpdate(IMG_UINT32 ui32FenceTimelineUpdateValue, + SYNC_ADDR_LIST *psSyncList, + SYNC_ADDR_LIST *psPRSyncList, + PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync, + RGX_SYNC_DATA *psSyncData, + IMG_BOOL bKick3D) +{ + IMG_UINT32 *pui32TimelineUpdateWOff = NULL; + IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; + + IMG_UINT32 ui32ClientUpdateValueCount = psSyncData->ui32ClientUpdateValueCount; + + /* Space for original client updates, and the one new update */ + size_t uiUpdateSize = sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientUpdateValueCount + 1); + + if (!bKick3D) + { + /* Additional space for one PR update, only the newest one */ + uiUpdateSize += sizeof(*pui32IntAllocatedUpdateValues) * 1; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: About to allocate memory to hold updates in pui32IntAllocatedUpdateValues(<%p>)", + __func__, + (void*)pui32IntAllocatedUpdateValues)); + + /* Allocate memory to hold the list of update values (including our timeline update) */ + pui32IntAllocatedUpdateValues = OSAllocMem(uiUpdateSize); + if (!pui32IntAllocatedUpdateValues) + { + /* Failed to allocate memory */ + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xcc, uiUpdateSize); + pui32TimelineUpdateWOff = pui32IntAllocatedUpdateValues; + + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Copying %d %s update values into pui32IntAllocatedUpdateValues(<%p>)", + __func__, + ui32ClientUpdateValueCount, + bKick3D ? "TA/3D" : "TA/PR", + (void*)pui32IntAllocatedUpdateValues)); + /* Copy the update values into the new memory, then append our timeline update value */ + OSCachedMemCopy(pui32TimelineUpdateWOff, psSyncData->paui32ClientUpdateValue, ui32ClientUpdateValueCount * sizeof(*psSyncData->paui32ClientUpdateValue)); + +#if defined(TA3D_CHECKPOINT_DEBUG) + _DebugSyncValues(pui32TimelineUpdateWOff, ui32ClientUpdateValueCount); +#endif + + pui32TimelineUpdateWOff += ui32ClientUpdateValueCount; + } + + /* Now set the additional update value and append the timeline sync prim addr to either the + * render context 3D (or TA) update list + */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Appending the additional update value (0x%x) to psRenderContext->sSyncAddrList%sUpdate...", + __func__, + ui32FenceTimelineUpdateValue, + bKick3D ? "TA/3D" : "TA/PR")); + + /* Append the TA/3D update */ + { + *pui32TimelineUpdateWOff++ = ui32FenceTimelineUpdateValue; + psSyncData->ui32ClientUpdateValueCount++; + psSyncData->ui32ClientUpdateCount++; + SyncAddrListAppendSyncPrim(psSyncList, psFenceTimelineUpdateSync); + + if (!psSyncData->pauiClientUpdateUFOAddress) + { + psSyncData->pauiClientUpdateUFOAddress = psSyncList->pasFWAddrs; + } + /* Update paui32ClientUpdateValue to point to our new list of update values */ + psSyncData->paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; + +#if defined(TA3D_CHECKPOINT_DEBUG) + _DebugSyncValues(pui32IntAllocatedUpdateValues, psSyncData->ui32ClientUpdateValueCount); +#endif + } + + if (!bKick3D) + { + /* Use the sSyncAddrList3DUpdate for PR (as it doesn't have one of its own) */ + *pui32TimelineUpdateWOff++ = ui32FenceTimelineUpdateValue; + psSyncData->ui32ClientPRUpdateValueCount = 1; + psSyncData->ui32ClientPRUpdateCount = 1; + SyncAddrListAppendSyncPrim(psPRSyncList, psFenceTimelineUpdateSync); + + if (!psSyncData->pauiClientPRUpdateUFOAddress) + { + psSyncData->pauiClientPRUpdateUFOAddress = psPRSyncList->pasFWAddrs; + } + /* Update paui32ClientPRUpdateValue to point to our new list of update values */ + psSyncData->paui32ClientPRUpdateValue = &pui32IntAllocatedUpdateValues[psSyncData->ui32ClientUpdateValueCount]; + +#if defined(TA3D_CHECKPOINT_DEBUG) + _DebugSyncValues(psSyncData->paui32ClientPRUpdateValue, psSyncData->ui32ClientPRUpdateValueCount); +#endif + } + + /* Do not free the old psSyncData->ui32ClientUpdateValueCount, + * as it was constant data passed through the bridge down to PVRSRVRGXKickTA3DKM() */ + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/rgxsyncutils.h b/drivers/gpu/drm/phytium/octopus/rgxsyncutils.h new file mode 100644 index 000000000000..ec82454b75d9 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxsyncutils.h @@ -0,0 +1,76 @@ +/*************************************************************************/ /*! +@File rgxsyncutils.h +@Title RGX Sync Utilities +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX Sync helper functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXSYNCUTILS_H +#define RGXSYNCUTILS_H + +#include "rgxdevice.h" +#include "sync_server.h" +#include "rgxdebug.h" +#include "rgx_fwif_km.h" + +typedef struct _RGX_SYNC_DATA_ +{ + PRGXFWIF_UFO_ADDR *pauiClientUpdateUFOAddress; + IMG_UINT32 *paui32ClientUpdateValue; + IMG_UINT32 ui32ClientUpdateValueCount; + IMG_UINT32 ui32ClientUpdateCount; + + PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress; + IMG_UINT32 *paui32ClientPRUpdateValue; + IMG_UINT32 ui32ClientPRUpdateValueCount; + IMG_UINT32 ui32ClientPRUpdateCount; +} RGX_SYNC_DATA; + +PVRSRV_ERROR RGXSyncAppendTimelineUpdate(IMG_UINT32 ui32FenceTimelineUpdateValue, + SYNC_ADDR_LIST *psSyncList, + SYNC_ADDR_LIST *psPRSyncList, + PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync, + RGX_SYNC_DATA *psSyncData, + IMG_BOOL bKick3D); + +#endif /* RGXSYNCUTILS_H */ + +/****************************************************************************** + End of file (rgxsyncutils.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxta3d.c b/drivers/gpu/drm/phytium/octopus/rgxta3d.c new file mode 100644 index 000000000000..c5d322932959 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxta3d.c @@ -0,0 +1,5481 @@ +/*************************************************************************/ /*! +@File +@Title RGX TA/3D routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX TA/3D routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +/* for the offsetof macro */ +#if defined(__linux__) +#include +#else +#include +#endif + +#include "pdump_km.h" +#include "pvr_debug.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgxta3d.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "osfunc.h" +#include "pvrsrv.h" +#include "rgx_memallocflags.h" +#include "rgxccb.h" +#include "rgxhwperf.h" +#include "ospvr_gputrace.h" +#include "rgxsyncutils.h" +#include "htbuffer.h" + +#include "rgxdefs_km.h" +#include "rgx_fwif_km.h" +#include "physmem.h" +#include "sync_server.h" +#include "sync_internal.h" +#include "sync.h" +#include "process_stats.h" + +#include "rgxpmdefs.h" + +#include "rgxtimerquery.h" + +#if defined(SUPPORT_BUFFER_SYNC) +#include "pvr_buffer_sync.h" +#endif + +#include "sync_checkpoint.h" +#include "sync_checkpoint_internal.h" + +#if defined(SUPPORT_PDVFS) +#include "rgxpdvfs.h" +#endif + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "rgxworkest.h" + +#define HASH_CLEAN_LIMIT 6 +#endif + +/* Enable this to dump the compiled list of UFOs prior to kick call */ +#define ENABLE_TA3D_UFO_DUMP 0 + +//#define TA3D_CHECKPOINT_DEBUG + +#if defined(TA3D_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +static INLINE +void _DebugSyncValues(const IMG_CHAR *pszFunction, + const IMG_UINT32 *pui32UpdateValues, + const IMG_UINT32 ui32Count) +{ + IMG_UINT32 i; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues; + + for (i = 0; i < ui32Count; i++) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", pszFunction, i, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } +} + +static INLINE +void _DebugSyncCheckpoints(const IMG_CHAR *pszFunction, + const IMG_CHAR *pszDMName, + const PSYNC_CHECKPOINT *apsSyncCheckpoints, + const IMG_UINT32 ui32Count) +{ + IMG_UINT32 i; + + for (i = 0; i < ui32Count; i++) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFence%sSyncCheckpoints[%d]=<%p>", pszFunction, pszDMName, i, *(apsSyncCheckpoints + i))); + } +} + +#else +#define CHKPT_DBG(X) +#endif + +/* define the number of commands required to be set up by the CCB helper */ +/* 1 command for the TA */ +#define CCB_CMD_HELPER_NUM_TA_COMMANDS 1 +/* Up to 3 commands for the 3D (partial render fence, partial render, and render) */ +#define CCB_CMD_HELPER_NUM_3D_COMMANDS 3 + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#define WORKEST_CYCLES_PREDICTION_GET(x) ((x).ui64CyclesPrediction) +#else +#define WORKEST_CYCLES_PREDICTION_GET(x) (NO_CYCEST) +#endif + +typedef struct { + DEVMEM_MEMDESC *psContextStateMemDesc; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + IMG_UINT32 ui32Priority; +} RGX_SERVER_RC_TA_DATA; + +typedef struct { + DEVMEM_MEMDESC *psContextStateMemDesc; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + IMG_UINT32 ui32Priority; +} RGX_SERVER_RC_3D_DATA; + +struct _RGX_SERVER_RENDER_CONTEXT_ { + /* this lock protects usage of the render context. + * it ensures only one kick is being prepared and/or submitted on + * this render context at any time + */ + POS_LOCK hLock; + RGX_CCB_CMD_HELPER_DATA asTACmdHelperData[CCB_CMD_HELPER_NUM_TA_COMMANDS]; + RGX_CCB_CMD_HELPER_DATA as3DCmdHelperData[CCB_CMD_HELPER_NUM_3D_COMMANDS]; + PVRSRV_DEVICE_NODE *psDeviceNode; + DEVMEM_MEMDESC *psFWRenderContextMemDesc; + DEVMEM_MEMDESC *psFWFrameworkMemDesc; + RGX_SERVER_RC_TA_DATA sTAData; + RGX_SERVER_RC_3D_DATA s3DData; + IMG_UINT32 ui32CleanupStatus; +#define RC_CLEANUP_TA_COMPLETE (1 << 0) +#define RC_CLEANUP_3D_COMPLETE (1 << 1) + DLLIST_NODE sListNode; + SYNC_ADDR_LIST sSyncAddrListTAFence; + SYNC_ADDR_LIST sSyncAddrListTAUpdate; + SYNC_ADDR_LIST sSyncAddrList3DFence; + SYNC_ADDR_LIST sSyncAddrList3DUpdate; + ATOMIC_T hIntJobRef; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WORKEST_HOST_DATA sWorkEstData; +#endif +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_context *psBufferSyncContext; +#endif +}; + + +/* + Static functions used by render context code +*/ + +static +PVRSRV_ERROR _DestroyTAContext(RGX_SERVER_RC_TA_DATA *psTAData, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, + psTAData->psServerCommonContext, + RGXFWIF_DM_GEOM, + PDUMP_FLAGS_CONTINUOUS); + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free its resources */ + FWCommonContextFree(psTAData->psServerCommonContext); + DevmemFwUnmapAndFree(psDeviceNode->pvDevice, psTAData->psContextStateMemDesc); + psTAData->psServerCommonContext = NULL; + return PVRSRV_OK; +} + +static +PVRSRV_ERROR _Destroy3DContext(RGX_SERVER_RC_3D_DATA *ps3DData, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, + ps3DData->psServerCommonContext, + RGXFWIF_DM_3D, + PDUMP_FLAGS_CONTINUOUS); + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free its resources */ + FWCommonContextFree(ps3DData->psServerCommonContext); + DevmemFwUnmapAndFree(psDeviceNode->pvDevice, ps3DData->psContextStateMemDesc); + ps3DData->psServerCommonContext = NULL; + return PVRSRV_OK; +} + +static void _RGXDumpPMRPageList(DLLIST_NODE *psNode) +{ + RGX_PMR_NODE *psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock); + PVRSRV_ERROR eError; + + eError = PMRDumpPageList(psPMRNode->psPMR, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Error (%s) printing pmr %p", + PVRSRVGetErrorString(eError), + psPMRNode->psPMR)); + } +} + +IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList) +{ + DLLIST_NODE *psNode, *psNext; + + PVR_LOG(("Freelist FWAddr 0x%08x, ID = %d, CheckSum 0x%016" IMG_UINT64_FMTSPECx, + psFreeList->sFreeListFWDevVAddr.ui32Addr, + psFreeList->ui32FreelistID, + psFreeList->ui64FreelistChecksum)); + + /* Dump Init FreeList page list */ + PVR_LOG((" Initial Memory block")); + dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext) + { + _RGXDumpPMRPageList(psNode); + } + + /* Dump Grow FreeList page list */ + PVR_LOG((" Grow Memory blocks")); + dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext) + { + _RGXDumpPMRPageList(psNode); + } + + return IMG_TRUE; +} + +static void _CheckFreelist(RGX_FREELIST *psFreeList, + IMG_UINT32 ui32NumOfPagesToCheck, + IMG_UINT64 ui64ExpectedCheckSum, + IMG_UINT64 *pui64CalculatedCheckSum) +{ +#if defined(NO_HARDWARE) + /* No checksum needed as we have all information in the pdumps */ + PVR_UNREFERENCED_PARAMETER(psFreeList); + PVR_UNREFERENCED_PARAMETER(ui32NumOfPagesToCheck); + PVR_UNREFERENCED_PARAMETER(ui64ExpectedCheckSum); + *pui64CalculatedCheckSum = 0; +#else + PVRSRV_ERROR eError; + size_t uiNumBytes; + IMG_UINT8* pui8Buffer; + IMG_UINT32* pui32Buffer; + IMG_UINT32 ui32CheckSumAdd = 0; + IMG_UINT32 ui32CheckSumXor = 0; + IMG_UINT32 ui32Entry; + IMG_UINT32 ui32Entry2; + IMG_BOOL bFreelistBad = IMG_FALSE; + + *pui64CalculatedCheckSum = 0; + + PVR_ASSERT(ui32NumOfPagesToCheck <= (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages)); + + /* Allocate Buffer of the size of the freelist */ + pui8Buffer = OSAllocMem(ui32NumOfPagesToCheck * sizeof(IMG_UINT32)); + if (pui8Buffer == NULL) + { + PVR_LOG(("%s: Failed to allocate buffer to check freelist %p!", + __func__, psFreeList)); + PVR_ASSERT(0); + return; + } + + /* Copy freelist content into Buffer */ + eError = PMR_ReadBytes(psFreeList->psFreeListPMR, + psFreeList->uiFreeListPMROffset + + (((psFreeList->ui32MaxFLPages - + psFreeList->ui32CurrentFLPages - psFreeList->ui32ReadyFLPages) * sizeof(IMG_UINT32)) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)), + pui8Buffer, + ui32NumOfPagesToCheck * sizeof(IMG_UINT32), + &uiNumBytes); + if (eError != PVRSRV_OK) + { + OSFreeMem(pui8Buffer); + PVR_LOG(("%s: Failed to get freelist data for freelist %p!", + __func__, psFreeList)); + PVR_ASSERT(0); + return; + } + + PVR_ASSERT(uiNumBytes == ui32NumOfPagesToCheck * sizeof(IMG_UINT32)); + + /* Generate checksum (skipping the first page if not allocated) */ + pui32Buffer = (IMG_UINT32 *)pui8Buffer; + ui32Entry = ((psFreeList->ui32GrowFLPages == 0 && psFreeList->ui32CurrentFLPages > 1) ? 1 : 0); + for (/*ui32Entry*/ ; ui32Entry < ui32NumOfPagesToCheck; ui32Entry++) + { + ui32CheckSumAdd += pui32Buffer[ui32Entry]; + ui32CheckSumXor ^= pui32Buffer[ui32Entry]; + + /* Check for double entries */ + for (ui32Entry2 = ui32Entry+1; ui32Entry2 < ui32NumOfPagesToCheck; ui32Entry2++) + { + if (pui32Buffer[ui32Entry] == pui32Buffer[ui32Entry2]) + { + PVR_LOG(("%s: Freelist consistency failure: FW addr: 0x%08X, Double entry found 0x%08x on idx: %d and %d of %d", + __func__, + psFreeList->sFreeListFWDevVAddr.ui32Addr, + pui32Buffer[ui32Entry2], + ui32Entry, + ui32Entry2, + psFreeList->ui32CurrentFLPages)); + bFreelistBad = IMG_TRUE; + break; + } + } + } + + OSFreeMem(pui8Buffer); + + /* Check the calculated checksum against the expected checksum... */ + *pui64CalculatedCheckSum = ((IMG_UINT64)ui32CheckSumXor << 32) | ui32CheckSumAdd; + + if (ui64ExpectedCheckSum != 0 && ui64ExpectedCheckSum != *pui64CalculatedCheckSum) + { + PVR_LOG(("%s: Checksum mismatch for freelist %p! Expected 0x%016llx calculated 0x%016llx", + __func__, psFreeList, ui64ExpectedCheckSum, *pui64CalculatedCheckSum)); + bFreelistBad = IMG_TRUE; + } + + if (bFreelistBad) + { + PVR_LOG(("%s: Sleeping for ever!", __func__)); + PVR_ASSERT(!bFreelistBad); + } +#endif +} + + +/* + * Function to work out the number of freelist pages to reserve for growing + * within the FW without having to wait for the host to progress a grow + * request. + * + * The number of pages must be a multiple of 4 to align the PM addresses + * for the initial freelist allocation and also be less than the grow size. + * + * If the threshold or grow size means less than 4 pages, then the feature + * is not used. + */ +static IMG_UINT32 _CalculateFreelistReadyPages(RGX_FREELIST *psFreeList, + IMG_UINT32 ui32FLPages) +{ + IMG_UINT32 ui32ReadyFLPages = ((ui32FLPages * psFreeList->ui32GrowThreshold) / 100) & + ~((RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE/sizeof(IMG_UINT32))-1); + + if (ui32ReadyFLPages > psFreeList->ui32GrowFLPages) + { + ui32ReadyFLPages = psFreeList->ui32GrowFLPages; + } + + return ui32ReadyFLPages; +} + + +PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, + IMG_UINT32 ui32NumPages, + PDLLIST_NODE pListHeader) +{ + RGX_PMR_NODE *psPMRNode; + IMG_DEVMEM_SIZE_T uiSize; + IMG_UINT32 ui32MappingTable = 0; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_SIZE_T uiLength; + IMG_DEVMEM_SIZE_T uistartPage; + PVRSRV_ERROR eError; + static const IMG_CHAR szAllocName[] = "Free List"; + + /* Are we allowed to grow ? */ + if (psFreeList->ui32MaxFLPages - (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) < ui32NumPages) + { + PVR_DPF((PVR_DBG_WARNING, + "Freelist [0x%p]: grow by %u pages denied. " + "Max PB size reached (current pages %u+%u/%u)", + psFreeList, + ui32NumPages, + psFreeList->ui32CurrentFLPages, + psFreeList->ui32ReadyFLPages, + psFreeList->ui32MaxFLPages)); + return PVRSRV_ERROR_PBSIZE_ALREADY_MAX; + } + + /* Allocate kernel memory block structure */ + psPMRNode = OSAllocMem(sizeof(*psPMRNode)); + if (psPMRNode == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to allocate host data structure", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorAllocHost; + } + + /* + * Lock protects simultaneous manipulation of: + * - the memory block list + * - the freelist's ui32CurrentFLPages + */ + OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); + + + /* + * The PM never takes the last page in a freelist, so if this block + * of pages is the first one and there is no ability to grow, then + * we can skip allocating one 4K page for the lowest entry. + */ + if (OSGetPageSize() > RGX_BIF_PM_PHYSICAL_PAGE_SIZE) + { + /* + * Allocation size will be rounded up to the OS page size, + * any attempt to change it a bit now will be invalidated later. + */ + psPMRNode->bFirstPageMissing = IMG_FALSE; + } + else + { + psPMRNode->bFirstPageMissing = (psFreeList->ui32GrowFLPages == 0 && ui32NumPages > 1); + } + + psPMRNode->ui32NumPages = ui32NumPages; + psPMRNode->psFreeList = psFreeList; + + /* Allocate Memory Block */ + PDUMPCOMMENT("Allocate PB Block (Pages %08X)", ui32NumPages); + uiSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE; + if (psPMRNode->bFirstPageMissing) + { + uiSize -= RGX_BIF_PM_PHYSICAL_PAGE_SIZE; + } + eError = PhysmemNewRamBackedPMR(psFreeList->psConnection, + psFreeList->psDevInfo->psDeviceNode, + uiSize, + uiSize, + 1, + 1, + &ui32MappingTable, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + PVRSRV_MEMALLOCFLAG_GPU_READABLE, + sizeof(szAllocName), + szAllocName, + psFreeList->ownerPid, + &psPMRNode->psPMR, + PDUMP_NONE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate PB block of size: 0x%016" IMG_UINT64_FMTSPECX, + __func__, + (IMG_UINT64)uiSize)); + goto ErrorBlockAlloc; + } + + /* Zeroing physical pages pointed by the PMR */ + if (psFreeList->psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST) + { + eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to zero PMR %p of freelist %p (%s)", + __func__, + psPMRNode->psPMR, + psFreeList, + PVRSRVGetErrorString(eError))); + PVR_ASSERT(0); + } + } + + uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32); + uistartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages); + uiOffset = psFreeList->uiFreeListPMROffset + ((uistartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)); + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + + eError = RIWritePMREntryWithOwnerKM(psPMRNode->psPMR, + psFreeList->ownerPid); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: call to RIWritePMREntryWithOwnerKM failed (%s)", + __func__, + PVRSRVGetErrorString(eError))); + } + + /* Attach RI information */ + eError = RIWriteMEMDESCEntryKM(psPMRNode->psPMR, + OSStringNLength(szAllocName, DEVMEM_ANNOTATION_MAX_LEN), + szAllocName, + 0, + uiSize, + IMG_FALSE, + IMG_FALSE, + &psPMRNode->hRIHandle); + PVR_LOG_IF_ERROR(eError, "RIWriteMEMDESCEntryKM"); + +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + + /* write Freelist with Memory Block physical addresses */ + eError = PMRWritePMPageList( + /* Target PMR, offset, and length */ + psFreeList->psFreeListPMR, + (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset), + (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength), + /* Referenced PMR, and "page" granularity */ + psPMRNode->psPMR, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + &psPMRNode->psPageList); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to write pages of Node %p", + __func__, + psPMRNode)); + goto ErrorPopulateFreelist; + } + +#if defined(SUPPORT_SHADOW_FREELISTS) + /* Copy freelist memory to shadow freelist */ + { + const IMG_UINT32 ui32FLMaxSize = psFreeList->ui32MaxFLPages * sizeof(IMG_UINT32); + const IMG_UINT32 ui32MapSize = ui32FLMaxSize * 2; + const IMG_UINT32 ui32CopyOffset = uiOffset - psFreeList->uiFreeListPMROffset; + IMG_BYTE *pFLMapAddr; + size_t uiNumBytes; + PVRSRV_ERROR res; + IMG_HANDLE hMapHandle; + + /* Map both the FL and the shadow FL */ + res = PMRAcquireKernelMappingData(psFreeList->psFreeListPMR, psFreeList->uiFreeListPMROffset, ui32MapSize, + (void**) &pFLMapAddr, &uiNumBytes, &hMapHandle); + if (res != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map freelist (ID=%d)", + __func__, + psFreeList->ui32FreelistID)); + goto ErrorPopulateFreelist; + } + + /* Copy only the newly added memory */ + OSCachedMemCopy(pFLMapAddr + ui32FLMaxSize + ui32CopyOffset, pFLMapAddr + ui32CopyOffset , uiLength); + + if (PVRSRV_CHECK_CPU_WRITE_COMBINE(PMR_Flags(psFreeList->psFreeListPMR))) + { + OSWriteMemoryBarrier(); + } + +#if defined(PDUMP) + PDUMPCOMMENT("Initialize shadow freelist"); + + /* Translate memcpy to pdump */ + { + IMG_DEVMEM_OFFSET_T uiCurrOffset; + + for (uiCurrOffset = uiOffset; (uiCurrOffset - uiOffset) < uiLength; uiCurrOffset += sizeof(IMG_UINT32)) + { + PMRPDumpCopyMem32(psFreeList->psFreeListPMR, + uiCurrOffset + ui32FLMaxSize, + psFreeList->psFreeListPMR, + uiCurrOffset, + ":SYSMEM:$1", + PDUMP_FLAGS_CONTINUOUS); + } + + if (PVRSRV_CHECK_CPU_WRITE_COMBINE(PMR_Flags(psFreeList->psFreeListPMR))) + { + OSWriteMemoryBarrier(); + } + } +#endif + + + res = PMRReleaseKernelMappingData(psFreeList->psFreeListPMR, hMapHandle); + + if (res != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to release freelist mapping (ID=%d)", + __func__, + psFreeList->ui32FreelistID)); + goto ErrorPopulateFreelist; + } + } +#endif + + /* We add It must be added to the tail, otherwise the freelist population won't work */ + dllist_add_to_head(pListHeader, &psPMRNode->sMemoryBlock); + + /* Update number of available pages */ + psFreeList->ui32CurrentFLPages += ui32NumPages; + + /* Update statistics (needs to happen before the ReadyFL calculation to also count those pages) */ + if (psFreeList->ui32NumHighPages < psFreeList->ui32CurrentFLPages) + { + psFreeList->ui32NumHighPages = psFreeList->ui32CurrentFLPages; + } + + /* Reserve a number ready pages to allow the FW to process OOM quickly and asynchronously request a grow. */ + psFreeList->ui32ReadyFLPages = _CalculateFreelistReadyPages(psFreeList, psFreeList->ui32CurrentFLPages); + psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages; + + if (psFreeList->bCheckFreelist) + { + /* + * We can only calculate the freelist checksum when the list is full + * (e.g. at initial creation time). At other times the checksum cannot + * be calculated and has to be disabled for this freelist. + */ + if ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages) + { + _CheckFreelist(psFreeList, ui32NumPages, 0, &psFreeList->ui64FreelistChecksum); + } + else + { + psFreeList->ui64FreelistChecksum = 0; + } + } + OSLockRelease(psFreeList->psDevInfo->hLockFreeList); + + PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: %s %u pages (pages=%u+%u/%u checksum=0x%016" IMG_UINT64_FMTSPECx "%s)", + psFreeList, + ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages ? "Create initial" : "Grow by"), + ui32NumPages, + psFreeList->ui32CurrentFLPages, + psFreeList->ui32ReadyFLPages, + psFreeList->ui32MaxFLPages, + psFreeList->ui64FreelistChecksum, + (psPMRNode->bFirstPageMissing ? " - lowest page not allocated" : ""))); + + return PVRSRV_OK; + + /* Error handling */ +ErrorPopulateFreelist: + PMRUnrefPMR(psPMRNode->psPMR); + +ErrorBlockAlloc: + OSFreeMem(psPMRNode); + OSLockRelease(psFreeList->psDevInfo->hLockFreeList); + +ErrorAllocHost: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; + +} + +static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader, + RGX_FREELIST *psFreeList) +{ + DLLIST_NODE *psNode; + RGX_PMR_NODE *psPMRNode; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32OldValue; + + /* + * Lock protects simultaneous manipulation of: + * - the memory block list + * - the freelist's ui32CurrentFLPages value + */ + PVR_ASSERT(pListHeader); + PVR_ASSERT(psFreeList); + PVR_ASSERT(psFreeList->psDevInfo); + PVR_ASSERT(psFreeList->psDevInfo->hLockFreeList); + + OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); + + /* Get node from head of list and remove it */ + psNode = dllist_get_next_node(pListHeader); + if (psNode) + { + dllist_remove_node(psNode); + + psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock); + PVR_ASSERT(psPMRNode); + PVR_ASSERT(psPMRNode->psPMR); + PVR_ASSERT(psPMRNode->psFreeList); + + /* remove block from freelist list */ + + /* Unwrite Freelist with Memory Block physical addresses */ + eError = PMRUnwritePMPageList(psPMRNode->psPageList); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to unwrite pages of Node %p", + __func__, + psPMRNode)); + PVR_ASSERT(IMG_FALSE); + } + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + + if (psPMRNode->hRIHandle) + { + PVRSRV_ERROR eError; + + eError = RIDeleteMEMDESCEntryKM(psPMRNode->hRIHandle); + PVR_LOG_IF_ERROR(eError, "RIDeleteMEMDESCEntryKM"); + } + +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ + + /* Free PMR (We should be the only one that holds a ref on the PMR) */ + eError = PMRUnrefPMR(psPMRNode->psPMR); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to free PB block %p (%s)", + __func__, + psPMRNode->psPMR, + PVRSRVGetErrorString(eError))); + PVR_ASSERT(IMG_FALSE); + } + + /* update available pages in freelist */ + ui32OldValue = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages; + + /* + * Deallocated pages should first be deducted from ReadyPages bank, once + * there are no more left, start deducting them from CurrentPage bank. + */ + if (psPMRNode->ui32NumPages > psFreeList->ui32ReadyFLPages) + { + psFreeList->ui32CurrentFLPages -= psPMRNode->ui32NumPages - psFreeList->ui32ReadyFLPages; + psFreeList->ui32ReadyFLPages = 0; + } + else + { + psFreeList->ui32ReadyFLPages -= psPMRNode->ui32NumPages; + } + + /* check underflow */ + PVR_ASSERT(ui32OldValue > (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages)); + + PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: shrink by %u pages (current pages %u/%u)", + psFreeList, + psPMRNode->ui32NumPages, + psFreeList->ui32CurrentFLPages, + psFreeList->ui32MaxFLPages)); + + OSFreeMem(psPMRNode); + } + else + { + PVR_DPF((PVR_DBG_WARNING, + "Freelist [0x%p]: shrink denied. PB already at initial PB size (%u pages)", + psFreeList, + psFreeList->ui32InitFLPages)); + eError = PVRSRV_ERROR_PBSIZE_ALREADY_MIN; + } + + OSLockRelease(psFreeList->psDevInfo->hLockFreeList); + + return eError; +} + +static RGX_FREELIST *FindFreeList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FreelistID) +{ + DLLIST_NODE *psNode, *psNext; + RGX_FREELIST *psFreeList = NULL; + + OSLockAcquire(psDevInfo->hLockFreeList); + + dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) + { + RGX_FREELIST *psThisFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); + + if (psThisFreeList->ui32FreelistID == ui32FreelistID) + { + psFreeList = psThisFreeList; + break; + } + } + + OSLockRelease(psDevInfo->hLockFreeList); + return psFreeList; +} + +void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32FreelistID) +{ + RGX_FREELIST *psFreeList = NULL; + RGXFWIF_KCCB_CMD s3DCCBCmd; + IMG_UINT32 ui32GrowValue; + PVRSRV_ERROR eError; + + PVR_ASSERT(psDevInfo); + + psFreeList = FindFreeList(psDevInfo, ui32FreelistID); + if (psFreeList == NULL) + { + /* Should never happen */ + PVR_DPF((PVR_DBG_ERROR, + "FreeList Lookup for FreeList ID 0x%08x failed (Populate)", + ui32FreelistID)); + PVR_ASSERT(IMG_FALSE); + + return; + } + + /* Since the FW made the request, it has already consumed the ready pages, update the host struct */ + psFreeList->ui32CurrentFLPages += psFreeList->ui32ReadyFLPages; + psFreeList->ui32ReadyFLPages = 0; + + + /* Try to grow the freelist */ + eError = RGXGrowFreeList(psFreeList, + psFreeList->ui32GrowFLPages, + &psFreeList->sMemoryBlockHead); + + if (eError == PVRSRV_OK) + { + /* Grow successful, return size of grow size */ + ui32GrowValue = psFreeList->ui32GrowFLPages; + + psFreeList->ui32NumGrowReqByFW++; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + /* Update Stats */ + PVRSRVStatsUpdateFreelistStats(0, + 1, /* Add 1 to the appropriate counter (Requests by FW) */ + psFreeList->ui32InitFLPages, + psFreeList->ui32NumHighPages, + psFreeList->ownerPid); + +#endif + + } + else + { + /* Grow failed */ + ui32GrowValue = 0; + PVR_DPF((PVR_DBG_ERROR, + "Grow for FreeList %p failed (%s)", + psFreeList, + PVRSRVGetErrorString(eError))); + } + + /* send feedback */ + s3DCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE; + s3DCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr; + s3DCCBCmd.uCmdData.sFreeListGSData.ui32DeltaPages = ui32GrowValue; + s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewPages = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages; + s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages = psFreeList->ui32ReadyFLPages; + + + PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: Grow pages=%u, new pages=%u, ready pages=%u, counter=%d", + psFreeList, + ui32GrowValue, + s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewPages, + s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages, + psFreeList->ui32NumGrowReqByFW)); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_3D, + &s3DCCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + /* Kernel CCB should never fill up, as the FW is processing them right away */ + + PVR_ASSERT(eError == PVRSRV_OK); +} + +static void _RGXFreeListReconstruction(PDLLIST_NODE psNode) +{ + + PVRSRV_RGXDEV_INFO *psDevInfo; + RGX_FREELIST *psFreeList; + RGX_PMR_NODE *psPMRNode; + PVRSRV_ERROR eError; + IMG_DEVMEM_OFFSET_T uiOffset; + IMG_DEVMEM_SIZE_T uiLength; + IMG_UINT32 ui32StartPage; + + psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock); + psFreeList = psPMRNode->psFreeList; + PVR_ASSERT(psFreeList); + psDevInfo = psFreeList->psDevInfo; + PVR_ASSERT(psDevInfo); + + uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32); + ui32StartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages); + uiOffset = psFreeList->uiFreeListPMROffset + ((ui32StartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)); + + PMRUnwritePMPageList(psPMRNode->psPageList); + psPMRNode->psPageList = NULL; + eError = PMRWritePMPageList( + /* Target PMR, offset, and length */ + psFreeList->psFreeListPMR, + (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset), + (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength), + /* Referenced PMR, and "page" granularity */ + psPMRNode->psPMR, + RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + &psPMRNode->psPageList); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Error (%s) writing FL 0x%08x", + __func__, + PVRSRVGetErrorString(eError), + (IMG_UINT32)psFreeList->ui32FreelistID)); + } + + /* Zeroing physical pages pointed by the reconstructed freelist */ + if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST) + { + eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to zero PMR %p of freelist %p (%s)", + __func__, + psPMRNode->psPMR, + psFreeList, + PVRSRVGetErrorString(eError))); + PVR_ASSERT(0); + } + } + + psFreeList->ui32CurrentFLPages += psPMRNode->ui32NumPages; +} + + +static PVRSRV_ERROR RGXReconstructFreeList(RGX_FREELIST *psFreeList) +{ + IMG_UINT32 ui32OriginalFLPages; + DLLIST_NODE *psNode, *psNext; + PVRSRV_ERROR eError; +#if !defined(PM_INTERACTIVE_MODE) + IMG_DEV_VIRTADDR sFreeListBaseDevVAddr; +#endif + + //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: Reconstructing freelist %p (ID=%u)", psFreeList, psFreeList->ui32FreelistID)); + + /* Do the FreeList Reconstruction */ + ui32OriginalFLPages = psFreeList->ui32CurrentFLPages; + psFreeList->ui32CurrentFLPages = 0; + + /* Reconstructing Init FreeList pages */ + dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext) + { + _RGXFreeListReconstruction(psNode); + } + + /* Reconstructing Grow FreeList pages */ + dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext) + { + _RGXFreeListReconstruction(psNode); + } + + /* Ready pages are allocated but kept hidden until OOM occurs. */ + psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages; + if (psFreeList->ui32CurrentFLPages != ui32OriginalFLPages) + { + PVR_ASSERT(psFreeList->ui32CurrentFLPages == ui32OriginalFLPages); + return PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED; + } + + { + RGXFWIF_FREELIST *psFWFreeList; + + /* Update firmware freelist structure */ + eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); + if (eError != PVRSRV_OK) + { + return eError; + } + +#if defined(PM_INTERACTIVE_MODE) + psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1; + psFWFreeList->ui32AllocatedPageCount = 0; + psFWFreeList->ui32AllocatedMMUPageCount = 0; +#else + sFreeListBaseDevVAddr = psFWFreeList->sFreeListBaseDevVAddr; + psFWFreeList->bUpdatePending = IMG_FALSE; + psFWFreeList->ui32UpdateNewPages = 0; + psFWFreeList->ui32UpdateNewReadyPages = 0; + psFWFreeList->sFreeListLastGrowDevVAddr.uiAddr = 0; +#endif + + DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); + } + +#if !defined(PM_INTERACTIVE_MODE) + /* Reset freelist state buffer */ + { + RGX_PM_FREELISTSTATE_BUFFER sFLState; + size_t uiNbBytes; + IMG_DEV_VIRTADDR sFLBaseAddr; + + eError = PMR_ReadBytes(psFreeList->psFreeListStatePMR, psFreeList->uiFreeListStatePMROffset, (IMG_UINT8*)&sFLState, sizeof(sFLState), &uiNbBytes); + + if (eError != PVRSRV_OK) + { + return eError; + } + + PVR_ASSERT(uiNbBytes == sizeof(sFLState)); + + sFLBaseAddr.uiAddr = (sFreeListBaseDevVAddr.uiAddr + + ((psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages) * sizeof(IMG_UINT32))) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1); + /* Note: Freelist base address is packed shifted down. */ + RGX_PM_FREELISTSTATE_BUFFER_SET_BASE_ADDR(sFLState, sFLBaseAddr.uiAddr >> RGX_PM_FREELISTSTATE_BASE_ADDR_ALIGNSHIFT); + RGX_PM_FREELISTSTATE_BUFFER_SET_STACK_PTR(sFLState, psFreeList->ui32CurrentFLPages - 1); + RGX_PM_FREELISTSTATE_BUFFER_SET_PAGE_STATUS(sFLState, 0); + RGX_PM_FREELISTSTATE_BUFFER_SET_MMUPAGE_STATUS(sFLState, 0); + + eError = PMR_WriteBytes(psFreeList->psFreeListStatePMR, psFreeList->uiFreeListStatePMROffset, (IMG_UINT8*)&sFLState, sizeof(sFLState), &uiNbBytes); + + if (eError != PVRSRV_OK) + { + return eError; + } + + PVR_ASSERT(uiNbBytes == sizeof(sFLState)); + } +#endif + + /* Check the Freelist checksum if required (as the list is fully populated) */ + if (psFreeList->bCheckFreelist) + { + IMG_UINT64 ui64CheckSum; + + _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum); + } + + return eError; +} + + +void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32FreelistsCount, + IMG_UINT32 *paui32Freelists) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + DLLIST_NODE *psNode, *psNext; + IMG_UINT32 ui32Loop; + RGXFWIF_KCCB_CMD sTACCBCmd; +#if !defined(SUPPORT_SHADOW_FREELISTS) + DLLIST_NODE *psNodeHWRTData, *psNextHWRTData; + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet; + RGXFWIF_HWRTDATA *psHWRTData; +#endif + IMG_UINT32 ui32FinalFreelistsCount = 0; + IMG_UINT32 aui32FinalFreelists[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT * 2]; /* Worst-case is double what we are sent */ + + PVR_ASSERT(psDevInfo != NULL); + PVR_ASSERT(ui32FreelistsCount <= RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT); + if (ui32FreelistsCount > RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT) + { + ui32FreelistsCount = RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT; + } + + //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: %u freelist(s) requested for reconstruction", ui32FreelistsCount)); + + /* + * Initialise the response command (in case we don't find a freelist ID). + * Also copy the list to the 'final' freelist array. + */ + sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE; + sTACCBCmd.uCmdData.sFreeListsReconstructionData.ui32FreelistsCount = ui32FreelistsCount; + + for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++) + { + sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] = paui32Freelists[ui32Loop] | + RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG; + aui32FinalFreelists[ui32Loop] = paui32Freelists[ui32Loop]; + } + + ui32FinalFreelistsCount = ui32FreelistsCount; + + /* + * The list of freelists we have been given for reconstruction will + * consist of local and global freelists (maybe MMU as well). Any + * local freelists should have their global list specified as well. + * There may be cases where the global freelist is not given (in + * cases of partial setups before a poll failure for example). To + * handle that we must first ensure every local freelist has a global + * freelist specified, otherwise we add that to the 'final' list. + * This final list of freelists is created in a first pass. + * + * Even with the global freelists listed, there may be other local + * freelists not listed, which are going to have their global freelist + * reconstructed. Therefore we have to find those freelists as well + * meaning we will have to iterate the entire list of freelists to + * find which must be reconstructed. This is the second pass. + */ + OSLockAcquire(psDevInfo->hLockFreeList); + dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) + { + RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); + IMG_BOOL bInList = IMG_FALSE; + IMG_BOOL bGlobalInList = IMG_FALSE; + + /* Check if this local freelist is in the list and ensure its global is too. */ + if (psFreeList->ui32FreelistGlobalID != 0) + { + for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++) + { + if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID) + { + bInList = IMG_TRUE; + } + if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID) + { + bGlobalInList = IMG_TRUE; + } + } + + if (bInList && !bGlobalInList) + { + aui32FinalFreelists[ui32FinalFreelistsCount] = psFreeList->ui32FreelistGlobalID; + ui32FinalFreelistsCount++; + } + } + } + dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) + { + RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); + IMG_BOOL bReconstruct = IMG_FALSE; + + /* + * Check if this freelist needs to be reconstructed (was it requested + * or is its global freelist going to be reconstructed)... + */ + for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++) + { + if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID || + aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID) + { + bReconstruct = IMG_TRUE; + break; + } + } + + if (bReconstruct) + { + eError = RGXReconstructFreeList(psFreeList); + if (eError == PVRSRV_OK) + { +#if !defined(SUPPORT_SHADOW_FREELISTS) + /* Mark all HWRTData's of reconstructing local freelists as HWR (applies to TA/3D's not finished yet) */ + dllist_foreach_node(&psFreeList->sNodeHWRTDataHead, psNodeHWRTData, psNextHWRTData) + { + psKMHWRTDataSet = IMG_CONTAINER_OF(psNodeHWRTData, RGX_KM_HW_RT_DATASET, sNodeHWRTData); + eError = DevmemAcquireCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc, (void **)&psHWRTData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Devmem AcquireCpuVirtAddr Failed during Reconstructing of FreeList, FwMemDesc(%p),psHWRTData(%p)", + psKMHWRTDataSet->psHWRTDataFwMemDesc, + psHWRTData)); + continue; + } + + psHWRTData->eState = RGXFWIF_RTDATA_STATE_HWR; + psHWRTData->ui32HWRTDataFlags &= ~HWRTDATA_HAS_LAST_TA; + + DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); + } +#endif + + /* Update the response for this freelist if it was specifically requested for reconstruction. */ + for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++) + { + if (paui32Freelists[ui32Loop] == psFreeList->ui32FreelistID) + { + /* Reconstruction of this requested freelist was successful... */ + sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] &= ~RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG; + break; + } + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "Reconstructing of FreeList %p failed (%s)", + psFreeList, + PVRSRVGetErrorString(eError))); + } + } + } + OSLockRelease(psDevInfo->hLockFreeList); + + /* Check that all freelists were found and reconstructed... */ + for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++) + { + PVR_ASSERT((sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] & + RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG) == 0); + } + + /* send feedback */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GEOM, + &sTACCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + /* Kernel CCB should never fill up, as the FW is processing them right away */ + PVR_ASSERT(eError == PVRSRV_OK); +} + +/* Create HWRTDataSet */ +static PVRSRV_ERROR RGXCreateHWRTData_aux(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR psVHeapTableDevVAddr, + IMG_DEV_VIRTADDR sPMDataDevVAddr, /* per-HWRTData */ + IMG_DEV_VIRTADDR sPMSecureDataDevVAddr, /* per-HWRTData */ + RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS], + IMG_UINT32 ui32ScreenPixelMax, + IMG_UINT64 ui64PPPMultiSampleCtl, + IMG_UINT32 ui32TEStride, + IMG_DEV_VIRTADDR sTailPtrsDevVAddr, + IMG_UINT32 ui32TPCSize, + IMG_UINT32 ui32TEScreen, + IMG_UINT32 ui32TEAA, + IMG_UINT32 ui32TEMTILE1, + IMG_UINT32 ui32TEMTILE2, + IMG_UINT32 ui32RgnStride, + IMG_UINT32 ui32ISPMergeLowerX, + IMG_UINT32 ui32ISPMergeLowerY, + IMG_UINT32 ui32ISPMergeUpperX, + IMG_UINT32 ui32ISPMergeUpperY, + IMG_UINT32 ui32ISPMergeScaleX, + IMG_UINT32 ui32ISPMergeScaleY, + IMG_UINT16 ui16MaxRTs, + RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie, + RGX_KM_HW_RT_DATASET **ppsKMHWRTDataSet) /* per-HWRTData */ +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32Loop; + + /* KM cookie storing all the FW/HW data */ + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet; + + /* local pointers for memory descriptors of FW allocations */ + DEVMEM_MEMDESC *psHWRTDataFwMemDesc = NULL; + DEVMEM_MEMDESC *psRTArrayFwMemDesc = NULL; + DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc = NULL; + + /* local pointer for CPU-mapped [FW]HWRTData */ + RGXFWIF_HWRTDATA *psHWRTData = NULL; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + /* Prepare the HW RT DataSet struct */ + psKMHWRTDataSet = OSAllocZMem(sizeof(*psKMHWRTDataSet)); + if (psKMHWRTDataSet == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto AllocError; + } + + *ppsKMHWRTDataSet = psKMHWRTDataSet; + psKMHWRTDataSet->psDeviceNode = psDeviceNode; + + psKMHWRTDataSet->psHWRTDataCommonCookie = psHWRTDataCommonCookie; + + psDevInfo = psDeviceNode->pvDevice; + + /* + * This FW RT-Data is only mapped into kernel for initialisation. + * Otherwise this allocation is only used by the FW. + * Therefore the GPU cache doesn't need coherency and write-combine will + * suffice on the CPU side. (WC buffer will be flushed at the first TA-kick) + */ + eError = DevmemFwAllocate( psDevInfo, + sizeof(RGXFWIF_HWRTDATA), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwHwRTData", + &psHWRTDataFwMemDesc ); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: DevmemAllocate for RGX_FWIF_HWRTDATA failed", + __func__)); + goto FWRTDataAllocateError; + } + + psKMHWRTDataSet->psHWRTDataFwMemDesc = psHWRTDataFwMemDesc; + eError = RGXSetFirmwareAddress( &psKMHWRTDataSet->sHWRTDataFwAddr, + psHWRTDataFwMemDesc, + 0, + RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", FWRTDataFwAddrError); + + eError = DevmemAcquireCpuVirtAddr(psHWRTDataFwMemDesc, + (void **)&psHWRTData); + PVR_LOG_GOTO_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTDataCpuMapError); + +#if defined(PM_INTERACTIVE_MODE) + psHWRTData->psVHeapTableDevVAddr = psVHeapTableDevVAddr; +#endif + + psHWRTData->sHWRTDataCommonFwAddr = psHWRTDataCommonCookie->sHWRTDataCommonFwAddr; + + psHWRTData->sPMSecureRenderStateDevVAddr = sPMSecureDataDevVAddr; + +#if defined(PM_INTERACTIVE_MODE) + psHWRTData->sPMMListDevVAddr = sPMDataDevVAddr; +#else + psHWRTData->sPMRenderStateDevVAddr = sPMDataDevVAddr; +#endif + + psHWRTData->ui32ScreenPixelMax = ui32ScreenPixelMax; + psHWRTData->ui64PPPMultiSampleCtl = ui64PPPMultiSampleCtl; + psHWRTData->ui32TEStride = ui32TEStride; + psHWRTData->sTailPtrsDevVAddr = sTailPtrsDevVAddr; + psHWRTData->ui32TPCSize = ui32TPCSize; + psHWRTData->ui32TEScreen = ui32TEScreen; + psHWRTData->ui32TEAA = ui32TEAA; + psHWRTData->ui32TEMTILE1 = ui32TEMTILE1; + psHWRTData->ui32TEMTILE2 = ui32TEMTILE2; + psHWRTData->ui32RgnStride = ui32RgnStride; /* Region stride in Bytes */ + psHWRTData->ui32ISPMergeLowerX = ui32ISPMergeLowerX; + psHWRTData->ui32ISPMergeLowerY = ui32ISPMergeLowerY; + psHWRTData->ui32ISPMergeUpperX = ui32ISPMergeUpperX; + psHWRTData->ui32ISPMergeUpperY = ui32ISPMergeUpperY; + psHWRTData->ui32ISPMergeScaleX = ui32ISPMergeScaleX; + psHWRTData->ui32ISPMergeScaleY = ui32ISPMergeScaleY; + + OSLockAcquire(psDevInfo->hLockFreeList); + for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) + { + psKMHWRTDataSet->apsFreeLists[ui32Loop] = apsFreeLists[ui32Loop]; + psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount++; + psHWRTData->apsFreeLists[ui32Loop].ui32Addr = psKMHWRTDataSet->apsFreeLists[ui32Loop]->sFreeListFWDevVAddr.ui32Addr; + /* invalid initial snapshot value, the snapshot is always taken during first kick + * and hence the value get replaced during the first kick anyway. So it's safe to set it 0. + */ + psHWRTData->aui32FreeListHWRSnapshot[ui32Loop] = 0; + psHWRTData->bRenderStateNeedsReset = IMG_FALSE; + } +#if !defined(SUPPORT_SHADOW_FREELISTS) + dllist_add_to_tail(&apsFreeLists[RGXFW_LOCAL_FREELIST]->sNodeHWRTDataHead, &(psKMHWRTDataSet->sNodeHWRTData)); +#endif + OSLockRelease(psDevInfo->hLockFreeList); + + { + RGXFWIF_RTA_CTL *psRTACtl = &psHWRTData->sRTACtl; + + psRTACtl->ui32RenderTargetIndex = 0; + psRTACtl->ui32ActiveRenderTargets = 0; + psRTACtl->sValidRenderTargets.ui32Addr = 0; + psRTACtl->sRTANumPartialRenders.ui32Addr = 0; + psRTACtl->ui32MaxRTs = (IMG_UINT32) ui16MaxRTs; + + if (ui16MaxRTs > 1) + { + /* Allocate memory for the checks */ + PDUMPCOMMENT("Allocate memory for shadow render target cache"); + eError = DevmemFwAllocate( psDevInfo, + ui16MaxRTs * sizeof(IMG_UINT32), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_UNCACHED| + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), + "FwShadowRTCache", + &psRTArrayFwMemDesc ); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate %u bytes for render target array (%s)", + __func__, + ui16MaxRTs, PVRSRVGetErrorString(eError))); + goto FWAllocateRTArryError; + } + + psKMHWRTDataSet->psRTArrayFwMemDesc = psRTArrayFwMemDesc; + + eError = RGXSetFirmwareAddress( &psRTACtl->sValidRenderTargets, + psRTArrayFwMemDesc, + 0, + RFW_FWADDR_FLAG_NONE ); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", FWAllocateRTArryFwAddrError); + + /* Allocate memory for the checks */ + PDUMPCOMMENT("Allocate memory for tracking renders accumulation"); + eError = DevmemFwAllocate(psDevInfo, + ui16MaxRTs * sizeof(IMG_UINT32), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_UNCACHED| + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), + "FwRendersAccumulation", + &psRendersAccArrayFwMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate %u bytes for render target array (%s) (renders accumulation)", + __func__, + ui16MaxRTs, PVRSRVGetErrorString(eError))); + goto FWAllocateRTAccArryError; + } + + psKMHWRTDataSet->psRendersAccArrayFwMemDesc = psRendersAccArrayFwMemDesc; + + eError = RGXSetFirmwareAddress( &psRTACtl->sRTANumPartialRenders, + psRendersAccArrayFwMemDesc, + 0, + RFW_FWADDR_FLAG_NONE ); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", FWAllocRTAccArryFwAddrError); + } + } + +#if defined(PDUMP) + PDUMPCOMMENT("Dump HWRTData 0x%08X", psKMHWRTDataSet->sHWRTDataFwAddr.ui32Addr); + DevmemPDumpLoadMem(psKMHWRTDataSet->psHWRTDataFwMemDesc, 0, sizeof(*psHWRTData), PDUMP_FLAGS_CONTINUOUS); +#endif + + DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); + return PVRSRV_OK; + +FWAllocRTAccArryFwAddrError: + DevmemFwUnmapAndFree(psDevInfo, psRendersAccArrayFwMemDesc); +FWAllocateRTAccArryError: + RGXUnsetFirmwareAddress(psRTArrayFwMemDesc); +FWAllocateRTArryFwAddrError: + DevmemFwUnmapAndFree(psDevInfo, psRTArrayFwMemDesc); +FWAllocateRTArryError: + OSLockAcquire(psDevInfo->hLockFreeList); + for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) + { + PVR_ASSERT(psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount > 0); + psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount--; + } + OSLockRelease(psDevInfo->hLockFreeList); + DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); +FWRTDataCpuMapError: + RGXUnsetFirmwareAddress(psKMHWRTDataSet->psHWRTDataFwMemDesc); +FWRTDataFwAddrError: + DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psHWRTDataFwMemDesc); +FWRTDataAllocateError: + *ppsKMHWRTDataSet = NULL; + OSFreeMem(psKMHWRTDataSet); + +AllocError: + return eError; +} + +/* Destroy HWRTDataSet */ +static PVRSRV_ERROR RGXDestroyHWRTData_aux(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError; + PRGXFWIF_HWRTDATA psHWRTData; + IMG_UINT32 ui32Loop; + + PVR_ASSERT(psKMHWRTDataSet); + + psDevInfo = psKMHWRTDataSet->psDeviceNode->pvDevice; + + eError = RGXSetFirmwareAddress(&psHWRTData, psKMHWRTDataSet->psHWRTDataFwMemDesc, 0, RFW_FWADDR_NOREF_FLAG); + PVR_RETURN_IF_ERROR(eError); + + /* Cleanup HWRTData */ + eError = RGXFWRequestHWRTDataCleanUp(psKMHWRTDataSet->psDeviceNode, psHWRTData); + + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + + if (psKMHWRTDataSet->psRTArrayFwMemDesc) + { + RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRTArrayFwMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRTArrayFwMemDesc); + } + + if (psKMHWRTDataSet->psRendersAccArrayFwMemDesc) + { + RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRendersAccArrayFwMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRendersAccArrayFwMemDesc); + } + + /* decrease freelist refcount */ + OSLockAcquire(psDevInfo->hLockFreeList); + for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) + { + PVR_ASSERT(psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount > 0); + psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount--; + } +#if !defined(SUPPORT_SHADOW_FREELISTS) + dllist_remove_node(&psKMHWRTDataSet->sNodeHWRTData); +#endif + OSLockRelease(psDevInfo->hLockFreeList); + + /* Freeing the memory has to happen _after_ removing the HWRTData from the freelist + * otherwise we risk traversing the freelist to find a pointer from a freed data structure */ + RGXUnsetFirmwareAddress(psKMHWRTDataSet->psHWRTDataFwMemDesc); + DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psHWRTDataFwMemDesc); + + OSFreeMem(psKMHWRTDataSet); + + return PVRSRV_OK; +} + +/* Create set of HWRTData(s) and bind it with a shared FW HWRTDataCommon */ +PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR psVHeapTableDevVAddr, + IMG_DEV_VIRTADDR sPMDataDevVAddr_0, + IMG_DEV_VIRTADDR sPMDataDevVAddr_1, + IMG_DEV_VIRTADDR sPMSecureDataDevVAddr_0, + IMG_DEV_VIRTADDR sPMSecureDataDevVAddr_1, + RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS], + IMG_UINT32 ui32ScreenPixelMax, + IMG_UINT64 ui64PPPMultiSampleCtl, + IMG_UINT32 ui32TEStride, + IMG_DEV_VIRTADDR sTailPtrsDevVAddr, + IMG_UINT32 ui32TPCSize, + IMG_UINT32 ui32TEScreen, + IMG_UINT32 ui32TEAA, + IMG_UINT32 ui32TEMTILE1, + IMG_UINT32 ui32TEMTILE2, + IMG_UINT32 ui32RgnStride, + IMG_UINT32 ui32ISPMergeLowerX, + IMG_UINT32 ui32ISPMergeLowerY, + IMG_UINT32 ui32ISPMergeUpperX, + IMG_UINT32 ui32ISPMergeUpperY, + IMG_UINT32 ui32ISPMergeScaleX, + IMG_UINT32 ui32ISPMergeScaleY, + IMG_UINT16 ui16MaxRTs, + RGX_KM_HW_RT_DATASET **ppsKMHWRTDataSet_0, + RGX_KM_HW_RT_DATASET **ppsKMHWRTDataSet_1) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie; + RGXFWIF_HWRTDATA_COMMON *psHWRTDataCommon; + DEVMEM_MEMDESC *psHWRTDataCommonFwMemDesc; + RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; + + /* Prepare KM cleanup object for HWRTDataCommon FW object */ + psHWRTDataCommonCookie = OSAllocZMem(sizeof(*psHWRTDataCommonCookie)); + if (psHWRTDataCommonCookie == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_HWRTDataCommonCookieAlloc; + } + + /* + * This FW common context is only mapped into kernel for initialisation. + * Otherwise this allocation is only used by the FW. + * Therefore the GPU cache doesn't need coherency, and write-combine will + * suffice on the CPU side (WC buffer will be flushed at the first TA-kick) + */ + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_HWRTDATA_COMMON), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwHWRTDataCommon", + &psHWRTDataCommonFwMemDesc); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: DevmemAllocate for FwHWRTDataCommon failed", __func__)); + goto err_HWRTDataCommonAlloc; + } + eError = RGXSetFirmwareAddress(&sHWRTDataCommonFwAddr, psHWRTDataCommonFwMemDesc, 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", err_HWRTDataCommonFwAddr); + + eError = DevmemAcquireCpuVirtAddr(psHWRTDataCommonFwMemDesc, (void **)&psHWRTDataCommon); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", err_HWRTDataCommonVA); + + psHWRTDataCommon->bTACachesNeedZeroing = IMG_FALSE; +#if defined(PDUMP) + PDUMPCOMMENT("Dump HWRTDataCommon"); + DevmemPDumpLoadMem(psHWRTDataCommonFwMemDesc, 0, sizeof(*psHWRTDataCommon), PDUMP_FLAGS_CONTINUOUS); +#endif + DevmemReleaseCpuVirtAddr(psHWRTDataCommonFwMemDesc); + + psHWRTDataCommonCookie->ui32RefCount = 0; + psHWRTDataCommonCookie->psHWRTDataCommonFwMemDesc = psHWRTDataCommonFwMemDesc; + psHWRTDataCommonCookie->sHWRTDataCommonFwAddr = sHWRTDataCommonFwAddr; + + /* Here we are creating a set of HWRTData(s) + the number of elements in the set equals RGXMKIF_NUM_RTDATAS. + */ + + eError = RGXCreateHWRTData_aux( + psConnection, + psDeviceNode, + psVHeapTableDevVAddr, + sPMDataDevVAddr_0, + sPMSecureDataDevVAddr_0, + apsFreeLists, + ui32ScreenPixelMax, + ui64PPPMultiSampleCtl, + ui32TEStride, + sTailPtrsDevVAddr, + ui32TPCSize, + ui32TEScreen, + ui32TEAA, + ui32TEMTILE1, + ui32TEMTILE2, + ui32RgnStride, + ui32ISPMergeLowerX, + ui32ISPMergeLowerY, + ui32ISPMergeUpperX, + ui32ISPMergeUpperY, + ui32ISPMergeScaleX, + ui32ISPMergeScaleY, + ui16MaxRTs, + psHWRTDataCommonCookie, + ppsKMHWRTDataSet_0); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to create HWRTData [slot 0] (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto err_HWRTDataAlloc_0; + } + psHWRTDataCommonCookie->ui32RefCount += 1; + + eError = RGXCreateHWRTData_aux( + psConnection, + psDeviceNode, + psVHeapTableDevVAddr, + sPMDataDevVAddr_1, + sPMSecureDataDevVAddr_1, + apsFreeLists, + ui32ScreenPixelMax, + ui64PPPMultiSampleCtl, + ui32TEStride, + sTailPtrsDevVAddr, + ui32TPCSize, + ui32TEScreen, + ui32TEAA, + ui32TEMTILE1, + ui32TEMTILE2, + ui32RgnStride, + ui32ISPMergeLowerX, + ui32ISPMergeLowerY, + ui32ISPMergeUpperX, + ui32ISPMergeUpperY, + ui32ISPMergeScaleX, + ui32ISPMergeScaleY, + ui16MaxRTs, + psHWRTDataCommonCookie, + ppsKMHWRTDataSet_1); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to create HWRTData [slot 1] (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto err_HWRTDataAlloc_1; + } + psHWRTDataCommonCookie->ui32RefCount += 1; + + return PVRSRV_OK; + +err_HWRTDataAlloc_1: + RGXDestroyHWRTData_aux(*ppsKMHWRTDataSet_0); + *ppsKMHWRTDataSet_0 = NULL; +err_HWRTDataAlloc_0: +err_HWRTDataCommonVA: + RGXUnsetFirmwareAddress(psHWRTDataCommonFwMemDesc); +err_HWRTDataCommonFwAddr: + DevmemFwUnmapAndFree(psDevInfo, psHWRTDataCommonFwMemDesc); +err_HWRTDataCommonAlloc: + OSFreeMem(psHWRTDataCommonCookie); +err_HWRTDataCommonCookieAlloc: + + return eError; +} + +/* Destroy a single instance of HWRTData. + Additionally, destroy the HWRTDataCommon{Cookie} objects + when it is the last HWRTData within a corresponding set of HWRTDatas. +*/ +PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_ERROR eError; + PRGXFWIF_HWRTDATA psHWRTData; + RGX_HWRTDATA_COMMON_COOKIE *psCommonCookie; + + PVR_ASSERT(psKMHWRTDataSet); + + psDevNode = psKMHWRTDataSet->psDeviceNode; + psDevInfo = psDevNode->pvDevice; + + eError = RGXSetFirmwareAddress(&psHWRTData, + psKMHWRTDataSet->psHWRTDataFwMemDesc, 0, + RFW_FWADDR_NOREF_FLAG); + PVR_RETURN_IF_ERROR(eError); + + /* Cleanup HWRTData */ + eError = RGXFWRequestHWRTDataCleanUp(psDevNode, psHWRTData); + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + + psCommonCookie = psKMHWRTDataSet->psHWRTDataCommonCookie; + + RGXDestroyHWRTData_aux(psKMHWRTDataSet); + + /* We've got past potential PVRSRV_ERROR_RETRY events, so we are sure + that the HWRTDATA instance will be destroyed during this call. + Consequently, we decrease the ref count for HWRTDataCommonCookie. + + NOTE: This ref count does not require locks or atomics. + ------------------------------------------------------- + HWRTDatas bound into one pair are always destroyed sequentially, + within a single loop on the Client side. + The Common/Cookie objects always belong to only one pair of + HWRTDatas, and ref count is used to ensure that the Common/Cookie + objects will be destroyed after destruction of all HWRTDatas + within a single pair. + */ + psCommonCookie->ui32RefCount--; + + /* When ref count for HWRTDataCommonCookie hits ZERO + * we have to destroy the HWRTDataCommon [FW object] and the cookie + * [KM object] afterwards. */ + if (psCommonCookie->ui32RefCount == 0) + { + RGXUnsetFirmwareAddress(psCommonCookie->psHWRTDataCommonFwMemDesc); + + /* We don't need to flush the SLC before freeing. + * FW RequestCleanUp has already done that for HWRTData, so we're fine + * now. */ + + DevmemFwUnmapAndFree(psDevNode->pvDevice, + psCommonCookie->psHWRTDataCommonFwMemDesc); + OSFreeMem(psCommonCookie); + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32MaxFLPages, + IMG_UINT32 ui32InitFLPages, + IMG_UINT32 ui32GrowFLPages, + IMG_UINT32 ui32GrowParamThreshold, + RGX_FREELIST *psGlobalFreeList, + IMG_BOOL bCheckFreelist, + IMG_DEV_VIRTADDR sFreeListBaseDevVAddr, + IMG_DEV_VIRTADDR sFreeListStateDevVAddr, + PMR *psFreeListPMR, + IMG_DEVMEM_OFFSET_T uiFreeListPMROffset, + PMR *psFreeListStatePMR, + IMG_DEVMEM_OFFSET_T uiFreeListStatePMROffset, + RGX_FREELIST **ppsFreeList) +{ + PVRSRV_ERROR eError; + RGXFWIF_FREELIST *psFWFreeList; + DEVMEM_MEMDESC *psFWFreelistMemDesc; + RGX_FREELIST *psFreeList; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + if (OSGetPageShift() > RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT) + { + IMG_UINT32 ui32Size, ui32NewInitFLPages, ui32NewMaxFLPages, ui32NewGrowFLPages; + + /* Round up number of FL pages to the next multiple of the OS page size */ + + ui32Size = ui32InitFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); + ui32NewInitFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + + ui32Size = ui32GrowFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); + ui32NewGrowFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + + ui32Size = ui32MaxFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); + ui32NewMaxFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + + PVR_DPF((PVR_DBG_WARNING, "%s: Increased number of PB pages: Init %u -> %u, Grow %u -> %u, Max %u -> %u", + __func__, ui32InitFLPages, ui32NewInitFLPages, ui32GrowFLPages, ui32NewGrowFLPages, ui32MaxFLPages, ui32NewMaxFLPages)); + + ui32InitFLPages = ui32NewInitFLPages; + ui32GrowFLPages = ui32NewGrowFLPages; + ui32MaxFLPages = ui32NewMaxFLPages; + } + + /* Allocate kernel freelist struct */ + psFreeList = OSAllocZMem(sizeof(*psFreeList)); + if (psFreeList == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to allocate host data structure", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorAllocHost; + } + + /* + * This FW FreeList context is only mapped into kernel for initialisation + * and reconstruction (at other times it is not mapped and only used by the + * FW). + * Therefore the GPU cache doesn't need coherency, and write-combine will + * suffice on the CPU side (WC buffer will be flushed at the first TA-kick) + */ + eError = DevmemFwAllocate(psDevInfo, + sizeof(*psFWFreeList), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), + "FwFreeList", + &psFWFreelistMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: DevmemAllocate for RGXFWIF_FREELIST failed", + __func__)); + goto FWFreeListAlloc; + } + + /* Initialise host data structures */ + psFreeList->psDevInfo = psDevInfo; + psFreeList->psConnection = psConnection; + psFreeList->psFreeListPMR = psFreeListPMR; + psFreeList->uiFreeListPMROffset = uiFreeListPMROffset; + psFreeList->psFreeListStatePMR = psFreeListStatePMR; + psFreeList->uiFreeListStatePMROffset = uiFreeListStatePMROffset; + psFreeList->psFWFreelistMemDesc = psFWFreelistMemDesc; + eError = RGXSetFirmwareAddress(&psFreeList->sFreeListFWDevVAddr, psFWFreelistMemDesc, 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr); + + /* psFreeList->ui32FreelistID set below with lock... */ + psFreeList->ui32FreelistGlobalID = (psGlobalFreeList ? psGlobalFreeList->ui32FreelistID : 0); + psFreeList->ui32MaxFLPages = ui32MaxFLPages; + psFreeList->ui32InitFLPages = ui32InitFLPages; + psFreeList->ui32GrowFLPages = ui32GrowFLPages; + psFreeList->ui32CurrentFLPages = 0; + psFreeList->ui32ReadyFLPages = 0; + psFreeList->ui32GrowThreshold = ui32GrowParamThreshold; + psFreeList->ui64FreelistChecksum = 0; + psFreeList->ui32RefCount = 0; + psFreeList->bCheckFreelist = bCheckFreelist; + dllist_init(&psFreeList->sMemoryBlockHead); + dllist_init(&psFreeList->sMemoryBlockInitHead); +#if !defined(SUPPORT_SHADOW_FREELISTS) + dllist_init(&psFreeList->sNodeHWRTDataHead); +#endif + psFreeList->ownerPid = OSGetCurrentClientProcessIDKM(); + + + /* Add to list of freelists */ + OSLockAcquire(psDevInfo->hLockFreeList); + psFreeList->ui32FreelistID = psDevInfo->ui32FreelistCurrID++; + dllist_add_to_tail(&psDevInfo->sFreeListHead, &psFreeList->sNode); + OSLockRelease(psDevInfo->hLockFreeList); + + + /* Initialise FW data structure */ + eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); + PVR_LOG_GOTO_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWFreeListCpuMap); + + { + const IMG_UINT32 ui32ReadyPages = _CalculateFreelistReadyPages(psFreeList, ui32InitFLPages); + + psFWFreeList->ui32MaxPages = ui32MaxFLPages; + psFWFreeList->ui32CurrentPages = ui32InitFLPages - ui32ReadyPages; + psFWFreeList->ui32GrowPages = ui32GrowFLPages; + psFWFreeList->bUpdatePending = IMG_FALSE; + psFWFreeList->ui32UpdateNewPages = 0; + psFWFreeList->ui32UpdateNewReadyPages = 0; + psFWFreeList->sFreeListBaseDevVAddr = sFreeListBaseDevVAddr; +#if defined(PM_INTERACTIVE_MODE) + psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1; + psFWFreeList->ui64CurrentDevVAddr = (sFreeListBaseDevVAddr.uiAddr + + ((ui32MaxFLPages - psFWFreeList->ui32CurrentPages) * sizeof(IMG_UINT32))) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1); +#endif + psFWFreeList->ui32FreeListID = psFreeList->ui32FreelistID; + psFWFreeList->bGrowPending = IMG_FALSE; + psFWFreeList->ui32ReadyPages = ui32ReadyPages; + +#if defined(SUPPORT_SHADOW_FREELISTS) + /* Get the FW Memory Context address... */ + eError = RGXSetFirmwareAddress(&psFWFreeList->psFWMemContext, + RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData), + 0, RFW_FWADDR_NOREF_FLAG); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: RGXSetFirmwareAddress for RGXFWIF_FWMEMCONTEXT failed", + __func__)); + DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); + goto FWFreeListCpuMap; + } +#else + PVR_UNREFERENCED_PARAMETER(hMemCtxPrivData); +#endif + + /* + * Only the PM state buffer address is needed which contains the PM + * state including the freelist base address. + * + * Access to the physical PMR will be used to update the contents of the + * PM state buffer when PB grow occurs following OOM. + */ + psFWFreeList->sFreeListLastGrowDevVAddr.uiAddr = 0; + psFWFreeList->sFreeListStateDevVAddr = sFreeListStateDevVAddr; + } + + PVR_DPF((PVR_DBG_MESSAGE, + "Freelist [%p]: Created: Max pages 0x%08x, Init pages 0x%08x, FL base address 0x%016" IMG_UINT64_FMTSPECx ", Current FL base address 0x%016" IMG_UINT64_FMTSPECx ", Current pages %u", + psFreeList, + ui32MaxFLPages, + ui32InitFLPages, + sFreeListBaseDevVAddr.uiAddr, + (sFreeListBaseDevVAddr.uiAddr + + ((ui32MaxFLPages - psFWFreeList->ui32CurrentPages) * sizeof(IMG_UINT32))) & + ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1), + psFWFreeList->ui32CurrentPages - 1)); +#if defined(PDUMP) + PDUMPCOMMENT("Dump FW FreeList"); + DevmemPDumpLoadMem(psFreeList->psFWFreelistMemDesc, 0, sizeof(*psFWFreeList), PDUMP_FLAGS_CONTINUOUS); + +#if defined(PM_INTERACTIVE_MODE) + /* + * Separate dump of the Freelist's number of Pages and stack pointer. + * This allows to easily modify the PB size in the out2.txt files. + */ + PDUMPCOMMENT("FreeList TotalPages"); + DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc, + offsetof(RGXFWIF_FREELIST, ui32CurrentPages), + psFWFreeList->ui32CurrentPages, + PDUMP_FLAGS_CONTINUOUS); + PDUMPCOMMENT("FreeList StackPointer"); + DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc, + offsetof(RGXFWIF_FREELIST, ui32CurrentStackTop), + psFWFreeList->ui32CurrentStackTop, + PDUMP_FLAGS_CONTINUOUS); +#endif +#endif + DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); + + /* Add initial PB block */ + eError = RGXGrowFreeList(psFreeList, + ui32InitFLPages, + &psFreeList->sMemoryBlockInitHead); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to allocate initial memory block for free list 0x%016" IMG_UINT64_FMTSPECx " (%s)", + __func__, + sFreeListBaseDevVAddr.uiAddr, + PVRSRVGetErrorString(eError))); + goto FWFreeListCpuMap; + } +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + /* Update Stats */ + PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/ + 0, + psFreeList->ui32InitFLPages, + psFreeList->ui32NumHighPages, + psFreeList->ownerPid); + +#endif + + /* return values */ + *ppsFreeList = psFreeList; + + return PVRSRV_OK; + + /* Error handling */ + +FWFreeListCpuMap: + /* Remove freelists from list */ + OSLockAcquire(psDevInfo->hLockFreeList); + dllist_remove_node(&psFreeList->sNode); + OSLockRelease(psDevInfo->hLockFreeList); + RGXUnsetFirmwareAddress(psFWFreelistMemDesc); + +ErrorSetFwAddr: + DevmemFwUnmapAndFree(psDevInfo, psFWFreelistMemDesc); + +FWFreeListAlloc: + OSFreeMem(psFreeList); + +ErrorAllocHost: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +/* + RGXDestroyFreeList +*/ +PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32RefCount; + + PVR_ASSERT(psFreeList); + + OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); + ui32RefCount = psFreeList->ui32RefCount; + OSLockRelease(psFreeList->psDevInfo->hLockFreeList); + + if (ui32RefCount != 0) + { + /* Freelist still busy */ + return PVRSRV_ERROR_RETRY; + } + + /* Freelist is not in use => start firmware cleanup */ + eError = RGXFWRequestFreeListCleanUp(psFreeList->psDevInfo, + psFreeList->sFreeListFWDevVAddr); + if (eError != PVRSRV_OK) + { + /* Can happen if the firmware took too long to handle the cleanup request, + * or if SLC-flushes didn't went through (due to some GPU lockup) */ + return eError; + } + + /* Remove FreeList from linked list before we destroy it... */ + OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); + dllist_remove_node(&psFreeList->sNode); +#if !defined(SUPPORT_SHADOW_FREELISTS) + /* Confirm all HWRTData nodes are freed before releasing freelist */ + PVR_ASSERT(dllist_is_empty(&psFreeList->sNodeHWRTDataHead)); +#endif + OSLockRelease(psFreeList->psDevInfo->hLockFreeList); + +#if defined(PM_INTERACTIVE_MODE) + if (psFreeList->bCheckFreelist) + { + RGXFWIF_FREELIST *psFWFreeList; + IMG_UINT64 ui32CurrentStackTop; + IMG_UINT64 ui64CheckSum; + + /* Get the current stack pointer for this free list */ + DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); + ui32CurrentStackTop = psFWFreeList->ui32CurrentStackTop; + DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); + + if (ui32CurrentStackTop == psFreeList->ui32CurrentFLPages-1) + { + /* Do consistency tests (as the list is fully populated) */ + _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum); + } + else + { + /* Check for duplicate pages, but don't check the checksum as the list is not fully populated */ + _CheckFreelist(psFreeList, ui32CurrentStackTop+1, 0, &ui64CheckSum); + } + } +#endif + + /* Destroy FW structures */ + RGXUnsetFirmwareAddress(psFreeList->psFWFreelistMemDesc); + DevmemFwUnmapAndFree(psFreeList->psDevInfo, psFreeList->psFWFreelistMemDesc); + + /* Remove grow shrink blocks */ + while (!dllist_is_empty(&psFreeList->sMemoryBlockHead)) + { + eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockHead, psFreeList); + PVR_ASSERT(eError == PVRSRV_OK); + } + + /* Remove initial PB block */ + eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockInitHead, psFreeList); + PVR_ASSERT(eError == PVRSRV_OK); + + /* consistency checks */ + PVR_ASSERT(dllist_is_empty(&psFreeList->sMemoryBlockInitHead)); + PVR_ASSERT(psFreeList->ui32CurrentFLPages == 0); + + /* free Freelist */ + OSFreeMem(psFreeList); + + return eError; +} + + +/* + RGXCreateZSBuffer +*/ +PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEMINT_RESERVATION *psReservation, + PMR *psPMR, + PVRSRV_MEMALLOCFLAGS_T uiMapFlags, + RGX_ZSBUFFER_DATA **ppsZSBuffer) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_PRBUFFER *psFWZSBuffer; + RGX_ZSBUFFER_DATA *psZSBuffer; + DEVMEM_MEMDESC *psFWZSBufferMemDesc; + IMG_BOOL bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiMapFlags) ? IMG_TRUE : IMG_FALSE; + + /* Allocate host data structure */ + psZSBuffer = OSAllocZMem(sizeof(*psZSBuffer)); + if (psZSBuffer == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate cleanup data structure for ZS-Buffer", + __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorAllocCleanup; + } + + /* Populate Host data */ + psZSBuffer->psDevInfo = psDevInfo; + psZSBuffer->psReservation = psReservation; + psZSBuffer->psPMR = psPMR; + psZSBuffer->uiMapFlags = uiMapFlags; + psZSBuffer->ui32RefCount = 0; + psZSBuffer->bOnDemand = bOnDemand; + if (bOnDemand) + { + /* psZSBuffer->ui32ZSBufferID set below with lock... */ + psZSBuffer->psMapping = NULL; + + OSLockAcquire(psDevInfo->hLockZSBuffer); + psZSBuffer->ui32ZSBufferID = psDevInfo->ui32ZSBufferCurrID++; + dllist_add_to_tail(&psDevInfo->sZSBufferHead, &psZSBuffer->sNode); + OSLockRelease(psDevInfo->hLockZSBuffer); + } + + /* Allocate firmware memory for ZS-Buffer. */ + PDUMPCOMMENT("Allocate firmware ZS-Buffer data structure"); + eError = DevmemFwAllocate(psDevInfo, + sizeof(*psFWZSBuffer), + PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | + PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | + PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), + "FwZSBuffer", + &psFWZSBufferMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware ZS-Buffer (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto ErrorAllocFWZSBuffer; + } + psZSBuffer->psFWZSBufferMemDesc = psFWZSBufferMemDesc; + + /* Temporarily map the firmware render context to the kernel. */ + eError = DevmemAcquireCpuVirtAddr(psFWZSBufferMemDesc, + (void **)&psFWZSBuffer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware ZS-Buffer (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto ErrorAcquireFWZSBuffer; + } + + /* Populate FW ZS-Buffer data structure */ + psFWZSBuffer->bOnDemand = bOnDemand; + psFWZSBuffer->eState = (bOnDemand) ? RGXFWIF_PRBUFFER_UNBACKED : RGXFWIF_PRBUFFER_BACKED; + psFWZSBuffer->ui32BufferID = psZSBuffer->ui32ZSBufferID; + + /* Get firmware address of ZS-Buffer. */ + eError = RGXSetFirmwareAddress(&psZSBuffer->sZSBufferFWDevVAddr, psFWZSBufferMemDesc, 0, RFW_FWADDR_FLAG_NONE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr); + + /* Dump the ZS-Buffer and the memory content */ +#if defined(PDUMP) + PDUMPCOMMENT("Dump firmware ZS-Buffer"); + DevmemPDumpLoadMem(psFWZSBufferMemDesc, 0, sizeof(*psFWZSBuffer), PDUMP_FLAGS_CONTINUOUS); +#endif + + /* Release address acquired above. */ + DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc); + + + /* define return value */ + *ppsZSBuffer = psZSBuffer; + + PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] created (%s)", + psZSBuffer, + (bOnDemand) ? "On-Demand": "Up-front")); + + psZSBuffer->owner=OSGetCurrentClientProcessIDKM(); + + return PVRSRV_OK; + + /* error handling */ + +ErrorSetFwAddr: + DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc); +ErrorAcquireFWZSBuffer: + DevmemFwUnmapAndFree(psDevInfo, psFWZSBufferMemDesc); + +ErrorAllocFWZSBuffer: + OSFreeMem(psZSBuffer); + +ErrorAllocCleanup: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +/* + RGXDestroyZSBuffer +*/ +PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer) +{ + POS_LOCK hLockZSBuffer; + PVRSRV_ERROR eError; + + PVR_ASSERT(psZSBuffer); + hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; + + /* Request ZS Buffer cleanup */ + eError = RGXFWRequestZSBufferCleanUp(psZSBuffer->psDevInfo, + psZSBuffer->sZSBufferFWDevVAddr); + if (eError != PVRSRV_ERROR_RETRY) + { + /* Free the firmware render context. */ + RGXUnsetFirmwareAddress(psZSBuffer->psFWZSBufferMemDesc); + DevmemFwUnmapAndFree(psZSBuffer->psDevInfo, psZSBuffer->psFWZSBufferMemDesc); + + /* Remove Deferred Allocation from list */ + if (psZSBuffer->bOnDemand) + { + OSLockAcquire(hLockZSBuffer); + PVR_ASSERT(dllist_node_is_in_list(&psZSBuffer->sNode)); + dllist_remove_node(&psZSBuffer->sNode); + OSLockRelease(hLockZSBuffer); + } + + PVR_ASSERT(psZSBuffer->ui32RefCount == 0); + + PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] destroyed", psZSBuffer)); + + /* Free ZS-Buffer host data structure */ + OSFreeMem(psZSBuffer); + + } + + return eError; +} + +PVRSRV_ERROR +RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer) +{ + POS_LOCK hLockZSBuffer; + PVRSRV_ERROR eError; + + if (!psZSBuffer) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (!psZSBuffer->bOnDemand) + { + /* Only deferred allocations can be populated */ + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_DPF((PVR_DBG_MESSAGE, + "ZS Buffer [%p, ID=0x%08x]: Physical backing requested", + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); + hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; + + OSLockAcquire(hLockZSBuffer); + + if (psZSBuffer->ui32RefCount == 0) + { + if (psZSBuffer->bOnDemand) + { + IMG_HANDLE hDevmemHeap = (IMG_HANDLE)NULL; + + PVR_ASSERT(psZSBuffer->psMapping == NULL); + + /* Get Heap */ + eError = DevmemServerGetHeapHandle(psZSBuffer->psReservation, &hDevmemHeap); + PVR_ASSERT(psZSBuffer->psMapping == NULL); + if (unlikely(hDevmemHeap == (IMG_HANDLE)NULL)) + { + OSLockRelease(hLockZSBuffer); + return PVRSRV_ERROR_INVALID_HEAP; + } + + eError = DevmemIntMapPMR(hDevmemHeap, + psZSBuffer->psReservation, + psZSBuffer->psPMR, + psZSBuffer->uiMapFlags, + &psZSBuffer->psMapping); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Unable populate ZS Buffer [%p, ID=0x%08x] (%s)", + psZSBuffer, + psZSBuffer->ui32ZSBufferID, + PVRSRVGetErrorString(eError))); + OSLockRelease(hLockZSBuffer); + return eError; + + } + PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing acquired", + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); + } + } + + /* Increase refcount*/ + psZSBuffer->ui32RefCount++; + + OSLockRelease(hLockZSBuffer); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_POPULATION **ppsPopulation) +{ + RGX_POPULATION *psPopulation; + PVRSRV_ERROR eError; + + psZSBuffer->ui32NumReqByApp++; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRVStatsUpdateZSBufferStats(1, 0, psZSBuffer->owner); +#endif + + /* Do the backing */ + eError = RGXBackingZSBuffer(psZSBuffer); + if (eError != PVRSRV_OK) + { + goto OnErrorBacking; + } + + /* Create the handle to the backing */ + psPopulation = OSAllocMem(sizeof(*psPopulation)); + if (psPopulation == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto OnErrorAlloc; + } + + psPopulation->psZSBuffer = psZSBuffer; + + /* return value */ + *ppsPopulation = psPopulation; + + return PVRSRV_OK; + +OnErrorAlloc: + RGXUnbackingZSBuffer(psZSBuffer); + +OnErrorBacking: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR +RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer) +{ + POS_LOCK hLockZSBuffer; + PVRSRV_ERROR eError; + + if (!psZSBuffer) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_ASSERT(psZSBuffer->ui32RefCount); + + PVR_DPF((PVR_DBG_MESSAGE, + "ZS Buffer [%p, ID=0x%08x]: Physical backing removal requested", + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); + + hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; + + OSLockAcquire(hLockZSBuffer); + + if (psZSBuffer->bOnDemand) + { + if (psZSBuffer->ui32RefCount == 1) + { + PVR_ASSERT(psZSBuffer->psMapping); + + eError = DevmemIntUnmapPMR(psZSBuffer->psMapping); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Unable to unpopulate ZS Buffer [%p, ID=0x%08x] (%s)", + psZSBuffer, + psZSBuffer->ui32ZSBufferID, + PVRSRVGetErrorString(eError))); + OSLockRelease(hLockZSBuffer); + return eError; + } + + PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removed", + psZSBuffer, + psZSBuffer->ui32ZSBufferID)); + } + } + + /* Decrease refcount*/ + psZSBuffer->ui32RefCount--; + + OSLockRelease(hLockZSBuffer); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation) +{ + PVRSRV_ERROR eError; + + if (!psPopulation) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = RGXUnbackingZSBuffer(psPopulation->psZSBuffer); + if (eError != PVRSRV_OK) + { + return eError; + } + + OSFreeMem(psPopulation); + + return PVRSRV_OK; +} + +static RGX_ZSBUFFER_DATA *FindZSBuffer(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32ZSBufferID) +{ + DLLIST_NODE *psNode, *psNext; + RGX_ZSBUFFER_DATA *psZSBuffer = NULL; + + OSLockAcquire(psDevInfo->hLockZSBuffer); + + dllist_foreach_node(&psDevInfo->sZSBufferHead, psNode, psNext) + { + RGX_ZSBUFFER_DATA *psThisZSBuffer = IMG_CONTAINER_OF(psNode, RGX_ZSBUFFER_DATA, sNode); + + if (psThisZSBuffer->ui32ZSBufferID == ui32ZSBufferID) + { + psZSBuffer = psThisZSBuffer; + break; + } + } + + OSLockRelease(psDevInfo->hLockZSBuffer); + return psZSBuffer; +} + +void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32ZSBufferID) +{ + IMG_BOOL bBackingDone = IMG_TRUE; + RGX_ZSBUFFER_DATA *psZSBuffer; + RGXFWIF_KCCB_CMD sTACCBCmd; + PVRSRV_ERROR eError; + + PVR_ASSERT(psDevInfo); + + /* scan all deferred allocations */ + psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID); + + if (psZSBuffer == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (Populate)", + ui32ZSBufferID)); + + return; + } + + /* Populate ZLS */ + eError = RGXBackingZSBuffer(psZSBuffer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "Populating ZS-Buffer failed with error %u (ID = 0x%08x)", + eError, ui32ZSBufferID)); + bBackingDone = IMG_FALSE; + } + + /* send confirmation */ + sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE; + sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr; + sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = bBackingDone; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GEOM, + &sTACCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + /* Kernel CCB should never fill up, as the FW is processing them right away */ + PVR_ASSERT(eError == PVRSRV_OK); + + psZSBuffer->ui32NumReqByFW++; + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + PVRSRVStatsUpdateZSBufferStats(0, 1, psZSBuffer->owner); +#endif +} + +void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32ZSBufferID) +{ + RGX_ZSBUFFER_DATA *psZSBuffer; + RGXFWIF_KCCB_CMD sTACCBCmd; + PVRSRV_ERROR eError; + + PVR_ASSERT(psDevInfo); + + /* scan all deferred allocations */ + psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID); + + if (psZSBuffer == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (UnPopulate)", + ui32ZSBufferID)); + + return; + } + + /* Unpopulate ZLS */ + eError = RGXUnbackingZSBuffer(psZSBuffer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "UnPopulating ZS-Buffer failed with error %u (ID = 0x%08x)", + eError, ui32ZSBufferID)); + PVR_ASSERT(IMG_FALSE); + } + + /* send confirmation */ + sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE; + sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr; + sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = IMG_TRUE; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_GEOM, + &sTACCBCmd, + 0, + PDUMP_FLAGS_NONE); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + /* Kernel CCB should never fill up, as the FW is processing them right away */ + PVR_ASSERT(eError == PVRSRV_OK); +} + +static +PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + SERVER_MMU_CONTEXT *psServerMMUContext, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_RC_TA_DATA *psTAData, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + + /* + Allocate device memory for the firmware GPU context suspend state. + Note: the FW reads/writes the state to memory by accessing the GPU register interface. + */ + PDUMPCOMMENT("Allocate RGX firmware TA context suspend state"); + + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_TACTX_STATE), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwTAContextState", + &psTAData->psContextStateMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware GPU context suspend state (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_tacontextsuspendalloc; + } + + eError = FWCommonContextAllocate(psConnection, + psDeviceNode, + REQ_TYPE_TA, + RGXFWIF_DM_GEOM, + psServerMMUContext, + psAllocatedMemDesc, + ui32AllocatedOffset, + psFWMemContextMemDesc, + psTAData->psContextStateMemDesc, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TA_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TA_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + ui32Priority, + ui32MaxDeadlineMS, + ui64RobustnessAddress, + psInfo, + &psTAData->psServerCommonContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to init TA fw common context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_tacommoncontext; + } + + /* + * Dump the FW 3D context suspend state buffer + */ +#if defined(PDUMP) + PDUMPCOMMENT("Dump the TA context suspend state buffer"); + DevmemPDumpLoadMem(psTAData->psContextStateMemDesc, + 0, + sizeof(RGXFWIF_TACTX_STATE), + PDUMP_FLAGS_CONTINUOUS); +#endif + + psTAData->ui32Priority = ui32Priority; + return PVRSRV_OK; + +fail_tacommoncontext: + DevmemFree(psTAData->psContextStateMemDesc); +fail_tacontextsuspendalloc: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +static +PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + SERVER_MMU_CONTEXT *psServerMMUContext, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32MaxDeadlineMS, + IMG_UINT64 ui64RobustnessAddress, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_RC_3D_DATA *ps3DData, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + IMG_UINT uiNumISPStoreRegs = RGXFWIF_IPP_RESUME_REG_COUNT; /* default 1 register for IPP_resume */ + IMG_UINT ui3DRegISPStateStoreSize = 0; + + /* + Allocate device memory for the firmware GPU context suspend state. + Note: the FW reads/writes the state to memory by accessing the GPU register interface. + */ + PDUMPCOMMENT("Allocate RGX firmware 3D context suspend state"); + + uiNumISPStoreRegs += (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_SPU) * + RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_PER_SPU) * + RGXFWIF_PIPE_COUNT_PER_ISP); + + + if (uiNumISPStoreRegs > (RGXFWIF_ISP_PIPE_COUNT_MAX + RGXFWIF_IPP_RESUME_REG_COUNT)) + { + return PVRSRV_ERROR_NOT_SUPPORTED; + } + + /* Size of the CS buffer */ + /* Calculate the size of the 3DCTX ISP state */ + ui3DRegISPStateStoreSize = sizeof(RGXFWIF_3DCTX_STATE) + + (uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0])); + + eError = DevmemFwAllocate(psDevInfo, + ui3DRegISPStateStoreSize, + RGX_FWCOMCTX_ALLOCFLAGS, + "Fw3DContextState", + &ps3DData->psContextStateMemDesc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware GPU context suspend state (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_3dcontextsuspendalloc; + } + + eError = FWCommonContextAllocate(psConnection, + psDeviceNode, + REQ_TYPE_3D, + RGXFWIF_DM_3D, + psServerMMUContext, + psAllocatedMemDesc, + ui32AllocatedOffset, + psFWMemContextMemDesc, + ps3DData->psContextStateMemDesc, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_3D_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_3D_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + ui32Priority, + ui32MaxDeadlineMS, + ui64RobustnessAddress, + psInfo, + &ps3DData->psServerCommonContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to init 3D fw common context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_3dcommoncontext; + } + + /* + * Dump the FW 3D context suspend state buffer + */ + PDUMPCOMMENT("Dump the 3D context suspend state buffer"); + DevmemPDumpLoadMem(ps3DData->psContextStateMemDesc, + 0, + sizeof(RGXFWIF_3DCTX_STATE), + PDUMP_FLAGS_CONTINUOUS); + + ps3DData->ui32Priority = ui32Priority; + return PVRSRV_OK; + +fail_3dcommoncontext: + DevmemFree(ps3DData->psContextStateMemDesc); +fail_3dcontextsuspendalloc: + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + + +/* + * PVRSRVRGXCreateRenderContextKM + */ +PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32FrameworkRegisterSize, + IMG_PBYTE pabyFrameworkRegisters, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32StaticRenderContextStateSize, + IMG_PBYTE pStaticRenderContextState, + IMG_UINT32 ui32PackedCCBSizeU8888, + IMG_UINT32 ui32ContextFlags, + IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxTADeadlineMS, + IMG_UINT32 ui32Max3DDeadlineMS, + RGX_SERVER_RENDER_CONTEXT **ppsRenderContext) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_SERVER_RENDER_CONTEXT *psRenderContext; + DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + RGX_COMMON_CONTEXT_INFO sInfo = {NULL}; + RGXFWIF_FWRENDERCONTEXT *psFWRenderContext; + + *ppsRenderContext = NULL; + + if (ui32StaticRenderContextStateSize > RGXFWIF_STATIC_RENDERCONTEXT_SIZE) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psRenderContext = OSAllocZMem(sizeof(*psRenderContext)); + if (psRenderContext == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eError = OSLockCreate(&psRenderContext->hLock); + + if (eError != PVRSRV_OK) + { + goto fail_lock; + } + + psRenderContext->psDeviceNode = psDeviceNode; + + /* + Create the FW render context, this has the TA and 3D FW common + contexts embedded within it + */ + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_FWRENDERCONTEXT), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwRenderContext", + &psRenderContext->psFWRenderContextMemDesc); + if (eError != PVRSRV_OK) + { + goto fail_fwrendercontext; + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WorkEstInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); +#endif + + if (ui32FrameworkRegisterSize) + { + /* + * Create the FW framework buffer + */ + eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, + &psRenderContext->psFWFrameworkMemDesc, + ui32FrameworkRegisterSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware GPU framework state (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_frameworkcreate; + } + + /* Copy the Framework client data into the framework buffer */ + eError = PVRSRVRGXFrameworkCopyCommand(psRenderContext->psFWFrameworkMemDesc, + pabyFrameworkRegisters, + ui32FrameworkRegisterSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to populate the framework buffer (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_frameworkcopy; + } + sInfo.psFWFrameworkMemDesc = psRenderContext->psFWFrameworkMemDesc; + } + + eError = _Create3DContext(psConnection, + psDeviceNode, + hMemCtxPrivData, + psRenderContext->psFWRenderContextMemDesc, + offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), + psFWMemContextMemDesc, + ui32Priority, + ui32Max3DDeadlineMS, + ui64RobustnessAddress, + &sInfo, + &psRenderContext->s3DData, + U32toU8_Unpack3(ui32PackedCCBSizeU8888), + U32toU8_Unpack4(ui32PackedCCBSizeU8888), + ui32ContextFlags); + if (eError != PVRSRV_OK) + { + goto fail_3dcontext; + } + + eError = _CreateTAContext(psConnection, + psDeviceNode, + hMemCtxPrivData, + psRenderContext->psFWRenderContextMemDesc, + offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), + psFWMemContextMemDesc, + ui32Priority, + ui32MaxTADeadlineMS, + ui64RobustnessAddress, + &sInfo, + &psRenderContext->sTAData, + U32toU8_Unpack1(ui32PackedCCBSizeU8888), + U32toU8_Unpack2(ui32PackedCCBSizeU8888), + ui32ContextFlags); + if (eError != PVRSRV_OK) + { + goto fail_tacontext; + } + + eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc, + (void **)&psFWRenderContext); + if (eError != PVRSRV_OK) + { + goto fail_acquire_cpu_mapping; + } + + OSDeviceMemCopy(&psFWRenderContext->sStaticRenderContextState, pStaticRenderContextState, ui32StaticRenderContextStateSize); + DevmemPDumpLoadMem(psRenderContext->psFWRenderContextMemDesc, 0, sizeof(RGXFWIF_FWRENDERCONTEXT), PDUMP_FLAGS_CONTINUOUS); + DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc); + +#if defined(SUPPORT_BUFFER_SYNC) + psRenderContext->psBufferSyncContext = + pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, + "rogue-ta3d"); + if (IS_ERR(psRenderContext->psBufferSyncContext)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to create buffer_sync context (err=%ld)", + __func__, PTR_ERR(psRenderContext->psBufferSyncContext))); + + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail_buffer_sync_context_create; + } +#endif + + SyncAddrListInit(&psRenderContext->sSyncAddrListTAFence); + SyncAddrListInit(&psRenderContext->sSyncAddrListTAUpdate); + SyncAddrListInit(&psRenderContext->sSyncAddrList3DFence); + SyncAddrListInit(&psRenderContext->sSyncAddrList3DUpdate); + + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock); + dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock); + } + + *ppsRenderContext = psRenderContext; + return PVRSRV_OK; + +#if defined(SUPPORT_BUFFER_SYNC) +fail_buffer_sync_context_create: +#endif +fail_acquire_cpu_mapping: + _DestroyTAContext(&psRenderContext->sTAData, + psDeviceNode); +fail_tacontext: + _Destroy3DContext(&psRenderContext->s3DData, + psRenderContext->psDeviceNode); +fail_3dcontext: +fail_frameworkcopy: + if (psRenderContext->psFWFrameworkMemDesc != NULL) + { + DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc); + } +fail_frameworkcreate: + DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc); +fail_fwrendercontext: + OSLockDestroy(psRenderContext->hLock); +fail_lock: + OSFreeMem(psRenderContext); + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +/* + * PVRSRVRGXDestroyRenderContextKM + */ +PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psRenderContext->psDeviceNode->pvDevice; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_FWRENDERCONTEXT *psFWRenderContext; + IMG_UINT32 ui32WorkEstCCBSubmitted; +#endif + + /* remove node from list before calling destroy - as destroy, if successful + * will invalidate the node + * must be re-added if destroy fails + */ + OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock); + dllist_remove_node(&(psRenderContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock); + +#if defined(SUPPORT_BUFFER_SYNC) + /* Check psBufferSyncContext has not been destroyed already (by a previous + * call to this function which then later returned PVRSRV_ERROR_RETRY) + */ + if (psRenderContext->psBufferSyncContext != NULL) + { + pvr_buffer_sync_context_destroy(psRenderContext->psBufferSyncContext); + psRenderContext->psBufferSyncContext = NULL; + } +#endif + + /* Cleanup the TA if we haven't already */ + if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_TA_COMPLETE) == 0) + { + eError = _DestroyTAContext(&psRenderContext->sTAData, + psRenderContext->psDeviceNode); + if (eError == PVRSRV_OK) + { + psRenderContext->ui32CleanupStatus |= RC_CLEANUP_TA_COMPLETE; + } + else + { + goto e0; + } + } + + /* Cleanup the 3D if we haven't already */ + if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_3D_COMPLETE) == 0) + { + eError = _Destroy3DContext(&psRenderContext->s3DData, + psRenderContext->psDeviceNode); + if (eError == PVRSRV_OK) + { + psRenderContext->ui32CleanupStatus |= RC_CLEANUP_3D_COMPLETE; + } + else + { + goto e0; + } + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc, + (void **)&psFWRenderContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware render context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto e0; + } + + ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted; + + DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc); + + /* Check if all of the workload estimation CCB commands for this workload are read */ + if (ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived) + { + + PVR_DPF((PVR_DBG_WARNING, + "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", + __func__, ui32WorkEstCCBSubmitted, + psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)); + + eError = PVRSRV_ERROR_RETRY; + goto e0; + } +#endif + + /* + Only if both TA and 3D contexts have been cleaned up can we + free the shared resources + */ + if (psRenderContext->ui32CleanupStatus == (RC_CLEANUP_3D_COMPLETE | RC_CLEANUP_TA_COMPLETE)) + { + if (psRenderContext->psFWFrameworkMemDesc != NULL) + { + /* Free the framework buffer */ + DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc); + } + + /* Free the firmware render context */ + DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc); + + SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAFence); + SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAUpdate); + SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DFence); + SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DUpdate); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WorkEstDeInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); +#endif + OSLockDestroy(psRenderContext->hLock); + + OSFreeMem(psRenderContext); + } + + return PVRSRV_OK; + +e0: + OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock); + dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock); + return eError; +} + + +#if (ENABLE_TA3D_UFO_DUMP == 1) +static void DumpUfoList(IMG_UINT32 ui32ClientTAFenceCount, + IMG_UINT32 ui32ClientTAUpdateCount, + IMG_UINT32 ui32Client3DFenceCount, + IMG_UINT32 ui32Client3DUpdateCount, + PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress, + IMG_UINT32 *paui32ClientTAFenceValue, + PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress, + IMG_UINT32 *paui32ClientTAUpdateValue, + PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress, + IMG_UINT32 *paui32Client3DFenceValue, + PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress, + IMG_UINT32 *paui32Client3DUpdateValue) +{ + IMG_UINT32 i; + + PVR_DPF((PVR_DBG_ERROR, "%s: ~~~ After populating sync prims ~~~", + __func__)); + + /* Dump Fence syncs, Update syncs and PR Update syncs */ + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA fence syncs:", + __func__, ui32ClientTAFenceCount)); + for (i = 0; i < ui32ClientTAFenceCount; i++) + { + if (BITMASK_HAS(pauiClientTAFenceUFOAddress->ui32Addr, 1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x," + " CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, i + 1, ui32ClientTAFenceCount, + (void *) pauiClientTAFenceUFOAddress, + pauiClientTAFenceUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", + __func__, i + 1, ui32ClientTAFenceCount, + (void *) pauiClientTAFenceUFOAddress, + pauiClientTAFenceUFOAddress->ui32Addr, + *paui32ClientTAFenceValue, + *paui32ClientTAFenceValue)); + paui32ClientTAFenceValue++; + } + pauiClientTAFenceUFOAddress++; + } + + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA update syncs:", + __func__, ui32ClientTAUpdateCount)); + for (i = 0; i < ui32ClientTAUpdateCount; i++) + { + if (BITMASK_HAS(pauiClientTAUpdateUFOAddress->ui32Addr, 1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x," + " UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, i + 1, ui32ClientTAUpdateCount, + (void *) pauiClientTAUpdateUFOAddress, + pauiClientTAUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)", + __func__, i + 1, ui32ClientTAUpdateCount, + (void *) pauiClientTAUpdateUFOAddress, + pauiClientTAUpdateUFOAddress->ui32Addr, + *paui32ClientTAUpdateValue, + *paui32ClientTAUpdateValue)); + paui32ClientTAUpdateValue++; + } + pauiClientTAUpdateUFOAddress++; + } + + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D fence syncs:", + __func__, ui32Client3DFenceCount)); + for (i = 0; i < ui32Client3DFenceCount; i++) + { + if (BITMASK_HAS(pauiClient3DFenceUFOAddress->ui32Addr, 1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x," + " CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, i + 1, ui32Client3DFenceCount, + (void *) pauiClient3DFenceUFOAddress, + pauiClient3DFenceUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", + __func__, i + 1, ui32Client3DFenceCount, + (void *) pauiClient3DFenceUFOAddress, + pauiClient3DFenceUFOAddress->ui32Addr, + *paui32Client3DFenceValue, + *paui32Client3DFenceValue)); + paui32Client3DFenceValue++; + } + pauiClient3DFenceUFOAddress++; + } + + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D update syncs:", + __func__, ui32Client3DUpdateCount)); + for (i = 0; i < ui32Client3DUpdateCount; i++) + { + if (BITMASK_HAS(pauiClient3DUpdateUFOAddress->ui32Addr, 1)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x," + " UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", + __func__, i + 1, ui32Client3DUpdateCount, + (void *) pauiClient3DUpdateUFOAddress, + pauiClient3DUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)", + __func__, i + 1, ui32Client3DUpdateCount, + (void *) pauiClient3DUpdateUFOAddress, + pauiClient3DUpdateUFOAddress->ui32Addr, + *paui32Client3DUpdateValue, + *paui32Client3DUpdateValue)); + paui32Client3DUpdateValue++; + } + pauiClient3DUpdateUFOAddress++; + } +} +#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */ + +/* + * PVRSRVRGXKickTA3DKM + */ +PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientTAFenceCount, + SYNC_PRIMITIVE_BLOCK **apsClientTAFenceSyncPrimBlock, + IMG_UINT32 *paui32ClientTAFenceSyncOffset, + IMG_UINT32 *paui32ClientTAFenceValue, + IMG_UINT32 ui32ClientTAUpdateCount, + SYNC_PRIMITIVE_BLOCK **apsClientTAUpdateSyncPrimBlock, + IMG_UINT32 *paui32ClientTAUpdateSyncOffset, + IMG_UINT32 *paui32ClientTAUpdateValue, + IMG_UINT32 ui32Client3DUpdateCount, + SYNC_PRIMITIVE_BLOCK **apsClient3DUpdateSyncPrimBlock, + IMG_UINT32 *paui32Client3DUpdateSyncOffset, + IMG_UINT32 *paui32Client3DUpdateValue, + SYNC_PRIMITIVE_BLOCK *psPRFenceSyncPrimBlock, + IMG_UINT32 ui32PRFenceSyncOffset, + IMG_UINT32 ui32PRFenceValue, + PVRSRV_FENCE iCheckTAFence, + PVRSRV_TIMELINE iUpdateTATimeline, + PVRSRV_FENCE *piUpdateTAFence, + IMG_CHAR szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH], + PVRSRV_FENCE iCheck3DFence, + PVRSRV_TIMELINE iUpdate3DTimeline, + PVRSRV_FENCE *piUpdate3DFence, + IMG_CHAR szFenceName3D[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32TACmdSize, + IMG_PBYTE pui8TADMCmd, + IMG_UINT32 ui323DPRCmdSize, + IMG_PBYTE pui83DPRDMCmd, + IMG_UINT32 ui323DCmdSize, + IMG_PBYTE pui83DDMCmd, + IMG_UINT32 ui32ExtJobRef, + IMG_BOOL bKickTA, + IMG_BOOL bKickPR, + IMG_BOOL bKick3D, + IMG_BOOL bAbort, + IMG_UINT32 ui32PDumpFlags, + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, + RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_ZSBUFFER_DATA *psMSAAScratchBuffer, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 *paui32SyncPMRFlags, + PMR **ppsSyncPMRs, + IMG_UINT32 ui32RenderTargetSize, + IMG_UINT32 ui32NumberOfDrawCalls, + IMG_UINT32 ui32NumberOfIndices, + IMG_UINT32 ui32NumberOfMRTs, + IMG_UINT64 ui64DeadlineInus) +{ + /* per-context helper structures */ + RGX_CCB_CMD_HELPER_DATA *pasTACmdHelperData = psRenderContext->asTACmdHelperData; + RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelperData = psRenderContext->as3DCmdHelperData; + + IMG_UINT64 ui64FBSCEntryMask; + + IMG_UINT32 ui32TACmdCount=0; + IMG_UINT32 ui323DCmdCount=0; + IMG_UINT32 ui32TACmdOffset=0; + IMG_UINT32 ui323DCmdOffset=0; + RGXFWIF_UFO sPRUFO; + IMG_UINT32 i; + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_ERROR eError2; + + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psRenderContext->s3DData.psServerCommonContext); + IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + IMG_BOOL bCCBStateOpen = IMG_FALSE; + + IMG_UINT32 ui32ClientPRUpdateCount = 0; + PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress = NULL; + IMG_UINT32 *paui32ClientPRUpdateValue = NULL; + + PRGXFWIF_TIMESTAMP_ADDR pPreAddr; + PRGXFWIF_TIMESTAMP_ADDR pPostAddr; + PRGXFWIF_UFO_ADDR pRMWUFOAddr; + + PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress = NULL; + PRGXFWIF_UFO_ADDR uiPRFenceUFOAddress; + + IMG_UINT64 uiCheckTAFenceUID = 0; + IMG_UINT64 uiCheck3DFenceUID = 0; + IMG_UINT64 uiUpdateTAFenceUID = 0; + IMG_UINT64 uiUpdate3DFenceUID = 0; + + IMG_BOOL bUseCombined3DAnd3DPR = bKickPR && bKick3D && !pui83DPRDMCmd; + + IMG_UINT32 ui32TACmdSizeTmp = 0, ui323DCmdSizeTmp = 0; + + IMG_BOOL bTAFenceOnSyncCheckpointsOnly = IMG_FALSE; + + RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData; + RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData; + IMG_BOOL bUseSingleFWCommand = bKickTA && (bKickPR || bKick3D); + + PVRSRV_FENCE iUpdateTAFence = PVRSRV_NO_FENCE; + PVRSRV_FENCE iUpdate3DFence = PVRSRV_NO_FENCE; + + IMG_BOOL b3DFenceOnSyncCheckpointsOnly = IMG_FALSE; + IMG_UINT32 ui32TAFenceTimelineUpdateValue = 0; + IMG_UINT32 ui323DFenceTimelineUpdateValue = 0; + + /* + * Count of the number of TA and 3D update values (may differ from number of + * TA and 3D updates later, as sync checkpoints do not need to specify a value) + */ + IMG_UINT32 ui32ClientPRUpdateValueCount = 0; + IMG_UINT32 ui32ClientTAUpdateValueCount = ui32ClientTAUpdateCount; + IMG_UINT32 ui32Client3DUpdateValueCount = ui32Client3DUpdateCount; + PSYNC_CHECKPOINT *apsFenceTASyncCheckpoints = NULL; /*!< TA fence checkpoints */ + PSYNC_CHECKPOINT *apsFence3DSyncCheckpoints = NULL; /*!< 3D fence checkpoints */ + IMG_UINT32 ui32FenceTASyncCheckpointCount = 0; + IMG_UINT32 ui32Fence3DSyncCheckpointCount = 0; + PSYNC_CHECKPOINT psUpdateTASyncCheckpoint = NULL; /*!< TA update checkpoint (output) */ + PSYNC_CHECKPOINT psUpdate3DSyncCheckpoint = NULL; /*!< 3D update checkpoint (output) */ + PVRSRV_CLIENT_SYNC_PRIM *psTAFenceTimelineUpdateSync = NULL; + PVRSRV_CLIENT_SYNC_PRIM *ps3DFenceTimelineUpdateSync = NULL; + void *pvTAUpdateFenceFinaliseData = NULL; + void *pv3DUpdateFenceFinaliseData = NULL; + + RGX_SYNC_DATA sTASyncData = {NULL}; /*!< Contains internal update syncs for TA */ + RGX_SYNC_DATA s3DSyncData = {NULL}; /*!< Contains internal update syncs for 3D */ + + IMG_BOOL bTestSLRAdd3DCheck = IMG_FALSE; +#if defined(SUPPORT_VALIDATION) + PVRSRV_FENCE hTestSLRTmpFence = PVRSRV_NO_FENCE; + PSYNC_CHECKPOINT psDummySyncCheckpoint = NULL; +#endif + +#if defined(SUPPORT_BUFFER_SYNC) + PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0; + PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL; + struct pvr_buffer_sync_append_data *psBufferSyncData = NULL; +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTA = {0}; + RGXFWIF_WORKEST_KICK_DATA sWorkloadKickData3D = {0}; + IMG_UINT32 ui32TACommandOffset = 0; + IMG_UINT32 ui323DCommandOffset = 0; + IMG_UINT32 ui32TACmdHeaderOffset = 0; + IMG_UINT32 ui323DCmdHeaderOffset = 0; + IMG_UINT32 ui323DFullRenderCommandOffset = 0; + IMG_UINT32 ui32TACmdOffsetWrapCheck = 0; + IMG_UINT32 ui323DCmdOffsetWrapCheck = 0; + RGX_WORKLOAD sWorkloadCharacteristics = {0}; +#endif + + IMG_UINT32 ui32TAFenceCount, ui323DFenceCount; + IMG_UINT32 ui32TAUpdateCount, ui323DUpdateCount; + IMG_UINT32 ui32PRUpdateCount; + + IMG_PID uiCurrentProcess = OSGetCurrentClientProcessIDKM(); + + IMG_UINT32 ui32Client3DFenceCount = 0; + + /* Ensure we haven't been given a null ptr to + * TA fence values if we have been told we + * have TA sync prim fences + */ + if (ui32ClientTAFenceCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32ClientTAFenceValue != NULL, + "paui32ClientTAFenceValue NULL but " + "ui32ClientTAFenceCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + /* Ensure we haven't been given a null ptr to + * TA update values if we have been told we + * have TA updates + */ + if (ui32ClientTAUpdateCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32ClientTAUpdateValue != NULL, + "paui32ClientTAUpdateValue NULL but " + "ui32ClientTAUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + /* Ensure we haven't been given a null ptr to + * 3D update values if we have been told we + * have 3D updates + */ + if (ui32Client3DUpdateCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32Client3DUpdateValue != NULL, + "paui32Client3DUpdateValue NULL but " + "ui32Client3DUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Write FW addresses into CMD SHARED BLOCKs */ + { + CMDTA3D_SHARED *psGeomCmdShared = (CMDTA3D_SHARED *)pui8TADMCmd; + CMDTA3D_SHARED *ps3DCmdShared = (CMDTA3D_SHARED *)pui83DDMCmd; + CMDTA3D_SHARED *psPR3DCmdShared = (CMDTA3D_SHARED *)pui83DPRDMCmd; + + if (psKMHWRTDataSet == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "KMHWRTDataSet is a null-pointer")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Write FW address for TA CMD + */ + if (psGeomCmdShared != NULL) + { + psGeomCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; + + if (psZSBuffer != NULL) + { + psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; + } + if (psMSAAScratchBuffer != NULL) + { + psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; + } + } + + /* Write FW address for 3D CMD + */ + if (ps3DCmdShared != NULL) + { + ps3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; + + if (psZSBuffer != NULL) + { + ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; + } + if (psMSAAScratchBuffer != NULL) + { + ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; + } + } + + /* Write FW address for PR3D CMD + */ + if (psPR3DCmdShared != NULL) + { + psPR3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; + + if (psZSBuffer != NULL) + { + psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; + } + if (psMSAAScratchBuffer != NULL) + { + psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; + } + } + } + + if (unlikely(iUpdateTATimeline >= 0 && !piUpdateTAFence)) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + if (unlikely(iUpdate3DTimeline >= 0 && !piUpdate3DFence)) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d, " + "ui32Client3DFenceCount=%d, ui32Client3DUpdateCount=%d", + __func__, + ui32ClientTAFenceCount, ui32ClientTAUpdateCount, + ui32Client3DFenceCount, ui32Client3DUpdateCount)); + + RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRenderContext->psDeviceNode->pvDevice, + &pPreAddr, + &pPostAddr, + &pRMWUFOAddr); + + /* Double-check we have a PR kick if there are client fences */ + if (unlikely(!bKickPR && ui32Client3DFenceCount != 0)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: 3D fence passed without a PR kick", + __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Ensure the string is null-terminated (Required for safety) */ + szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; + szFenceName3D[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; + + OSLockAcquire(psRenderContext->hLock); + + ui32TAFenceCount = ui32ClientTAFenceCount; + ui323DFenceCount = ui32Client3DFenceCount; + ui32TAUpdateCount = ui32ClientTAUpdateCount; + ui323DUpdateCount = ui32Client3DUpdateCount; + ui32PRUpdateCount = ui32ClientPRUpdateCount; + +#if defined(SUPPORT_BUFFER_SYNC) + if (ui32SyncPMRCount) + { + int err; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling" + " pvr_buffer_sync_resolve_and_create_fences", __func__)); + + err = pvr_buffer_sync_resolve_and_create_fences( + psRenderContext->psBufferSyncContext, + psRenderContext->psDeviceNode->hSyncCheckpointContext, + ui32SyncPMRCount, + ppsSyncPMRs, + paui32SyncPMRFlags, + &ui32BufferFenceSyncCheckpointCount, + &apsBufferFenceSyncCheckpoints, + &psBufferUpdateSyncCheckpoint, + &psBufferSyncData + ); + + if (unlikely(err)) + { + switch (err) + { + case -EINTR: + eError = PVRSRV_ERROR_RETRY; + break; + case -ENOMEM: + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + break; + default: + eError = PVRSRV_ERROR_INVALID_PARAMS; + break; + } + + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "%s: " + "pvr_buffer_sync_resolve_and_create_fences failed (%d)", + __func__, eError)); + } + OSLockRelease(psRenderContext->hLock); + return eError; + } + +#if !defined(SUPPORT_STRIP_RENDERING) + if (bKickTA) + { + ui32TAFenceCount += ui32BufferFenceSyncCheckpointCount; + } + else + { + ui323DFenceCount += ui32BufferFenceSyncCheckpointCount; + } +#else /* !defined(SUPPORT_STRIP_RENDERING) */ + ui323DFenceCount += ui32BufferFenceSyncCheckpointCount; + + PVR_UNREFERENCED_PARAMETER(bTAFenceOnSyncCheckpointsOnly); +#endif /* !defined(SUPPORT_STRIP_RENDERING) */ + + if (psBufferUpdateSyncCheckpoint != NULL) + { + if (bKick3D) + { + ui323DUpdateCount++; + } + else + { + ui32PRUpdateCount++; + } + } + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + +#if !defined(UPDATE_FENCE_CHECKPOINT_COUNT) || UPDATE_FENCE_CHECKPOINT_COUNT != 1 && UPDATE_FENCE_CHECKPOINT_COUNT != 2 +#error "Invalid value for UPDATE_FENCE_CHECKPOINT_COUNT. Must be either 1 or 2." +#endif /* !defined(UPDATE_FENCE_CHECKPOINT_COUNT) || UPDATE_FENCE_CHECKPOINT_COUNT != 1 && UPDATE_FENCE_CHECKPOINT_COUNT != 2 */ + + if (iCheckTAFence != PVRSRV_NO_FENCE) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence[TA]" + " (iCheckFence=%d)," + " psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...", + __func__, iCheckTAFence, + (void *) psRenderContext->psDeviceNode->hSyncCheckpointContext)); + + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence( + psRenderContext->psDeviceNode->hSyncCheckpointContext, + iCheckTAFence, + &ui32FenceTASyncCheckpointCount, + &apsFenceTASyncCheckpoints, + &uiCheckTAFenceUID, + ui32PDumpFlags + ); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", + __func__, eError)); + goto fail_resolve_input_ta_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d " + "checkpoints (apsFenceSyncCheckpoints=<%p>)", + __func__, iCheckTAFence, ui32FenceTASyncCheckpointCount, + (void *) apsFenceTASyncCheckpoints)); + +#if defined(TA3D_CHECKPOINT_DEBUG) + if (apsFenceTASyncCheckpoints) + { + _DebugSyncCheckpoints(__func__, "TA", apsFenceTASyncCheckpoints, + ui32FenceTASyncCheckpointCount); + } +#endif /* defined(TA3D_CHECKPOINT_DEBUG) */ + } + + if (iCheck3DFence != PVRSRV_NO_FENCE) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence[3D]" + " (iCheckFence=%d), " + "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...", + __func__, iCheck3DFence, + (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext)); + + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence( + psRenderContext->psDeviceNode->hSyncCheckpointContext, + iCheck3DFence, + &ui32Fence3DSyncCheckpointCount, + &apsFence3DSyncCheckpoints, + &uiCheck3DFenceUID, + ui32PDumpFlags + ); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", + __func__, eError)); + goto fail_resolve_input_3d_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d " + "checkpoints (apsFenceSyncCheckpoints=<%p>)", + __func__, iCheck3DFence, ui32Fence3DSyncCheckpointCount, + (void*)apsFence3DSyncCheckpoints)); + +#if defined(TA3D_CHECKPOINT_DEBUG) + if (apsFence3DSyncCheckpoints) + { + _DebugSyncCheckpoints(__func__, "3D", apsFence3DSyncCheckpoints, + ui32Fence3DSyncCheckpointCount); + } +#endif /* defined(TA3D_CHECKPOINT_DEBUG) */ + } + + if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 || + iCheck3DFence >= 0 || iUpdate3DTimeline >= 0) + { + IMG_UINT32 i; + + if (bKickTA) + { + ui32TAFenceCount += ui32FenceTASyncCheckpointCount; + + for (i = 0; i < ui32Fence3DSyncCheckpointCount; i++) + { + if (SyncCheckpointGetCreator(apsFence3DSyncCheckpoints[i]) != + uiCurrentProcess) + { + ui32TAFenceCount++; + } + } + } + + if (bKick3D) + { + ui323DFenceCount += ui32Fence3DSyncCheckpointCount; + } + + ui32TAUpdateCount += iUpdateTATimeline != PVRSRV_NO_TIMELINE ? + UPDATE_FENCE_CHECKPOINT_COUNT : 0; + ui323DUpdateCount += iUpdate3DTimeline != PVRSRV_NO_TIMELINE ? + UPDATE_FENCE_CHECKPOINT_COUNT : 0; + ui32PRUpdateCount += iUpdate3DTimeline != PVRSRV_NO_TIMELINE && !bKick3D ? + UPDATE_FENCE_CHECKPOINT_COUNT : 0; + } + +#if defined(SUPPORT_VALIDATION) + /* Check if TestingSLR is adding an extra sync checkpoint to the + * 3D fence check (which we won't signal) + */ + if ((psDevInfo->ui32TestSLRInterval > 0) && + (--psDevInfo->ui32TestSLRCount == 0)) + { + bTestSLRAdd3DCheck = IMG_TRUE; + psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval; + } + + if ((bTestSLRAdd3DCheck) && (iUpdate3DTimeline != PVRSRV_NO_TIMELINE)) + { + if (iUpdate3DTimeline == PVRSRV_NO_TIMELINE) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Would append additional SLR checkpoint " + "to 3D fence but no update 3D timeline provided", __func__)); + } + else + { + SyncCheckpointAlloc(psRenderContext->psDeviceNode->hSyncCheckpointContext, + iUpdate3DTimeline, + hTestSLRTmpFence, + "TestSLRCheck", + &psDummySyncCheckpoint); + PVR_DPF((PVR_DBG_WARNING, "%s: Appending additional SLR checkpoint to 3D fence " + "checkpoints (psDummySyncCheckpoint=<%p>)", + __func__, (void*)psDummySyncCheckpoint)); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence, + 1, + &psDummySyncCheckpoint); + if (!pauiClient3DFenceUFOAddress) + { + pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; + } + + if (ui32Client3DFenceCount == 0) + { + b3DFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + ui323DFenceCount++; + } + } +#endif /* defined(SUPPORT_VALIDATION) */ + + /* + * Extract the FBSC entries from MMU Context for the deferred FBSC invalidate command, + * in other words, take the value and set it to zero afterwards. + * FBSC Entry Mask must be extracted from MMU ctx and updated just before the kick starts + * as it must be ready at the time of context activation. + * + * NOTE: We use sTAData to get the ServerCommonContext giving us the ServerMMUCtx, + * should we use s3DData in some cases? + * Under assumption that sTAData and s3DData share the same psServerCommonContext, + * the answer is NO. + * + * The ui64FBSCEntryMask filled by the following call gets cleared + * after the first KICK command and is ignored in the context of partial renders. + */ + eError = RGXExtractFBSCEntryMaskFromMMUContext( + psRenderContext->psDeviceNode, + FWCommonContextGetServerMMUCtx(psRenderContext->sTAData.psServerCommonContext), + &ui64FBSCEntryMask + ); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to extract FBSC Entry Mask (%d)", eError)); + goto fail_tacmdinvalfbsc; + } + + if (bKickTA) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," + " ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d", + __func__, ui32TAFenceCount, ui32TAUpdateCount)); + + RGXCmdHelperInitCmdCCB_CommandSize( + ui64FBSCEntryMask, + ui32TAFenceCount, + ui32TAUpdateCount, + ui32TACmdSize, + &pPreAddr, + (bKick3D ? NULL : &pPostAddr), + (bKick3D ? NULL : &pRMWUFOAddr), + pasTACmdHelperData + ); + + /* Clear the mask as we don't want to invalidate the FBSC multiple times + * with the same value of ui64FBSCEntryMask. + */ + ui64FBSCEntryMask = 0; + } + + if (bKickPR) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," + " ui32Client3DFenceCount=%d", __func__, + ui323DFenceCount)); + + RGXCmdHelperInitCmdCCB_CommandSize( + 0, /* empty ui64FBSCEntryMask it is assumed that PRs should + * not invalidate FBSC */ + ui323DFenceCount, + 0, + sizeof(sPRUFO), + NULL, + NULL, + NULL, + &pas3DCmdHelperData[ui323DCmdCount++] + ); + } + + if (bKickPR && !bUseCombined3DAnd3DPR) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," + " ui32PRUpdateCount=%d", __func__, + ui32PRUpdateCount)); + + RGXCmdHelperInitCmdCCB_CommandSize( + 0, /* empty ui64FBSCEntryMask it is assumed that PRs should + * not invalidate FBSC */ + 0, + ui32PRUpdateCount, + /* if the client has not provided a 3DPR command, the regular 3D + * command should be used instead */ + pui83DPRDMCmd ? ui323DPRCmdSize : ui323DCmdSize, + NULL, + NULL, + NULL, + &pas3DCmdHelperData[ui323DCmdCount++] + ); + } + + if (bKick3D || bAbort) + { + if (!bKickTA) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," + " ui32Client3DFenceCount=%d", __func__, + ui323DFenceCount)); + } + + RGXCmdHelperInitCmdCCB_CommandSize( + ui64FBSCEntryMask, /* equals: [a] 0 if 3D is preceded by TA + * [b] value from the MMU ctx otherwise */ + bKickTA ? 0 : ui323DFenceCount, + ui323DUpdateCount, + ui323DCmdSize, + (bKickTA ? NULL : & pPreAddr), + &pPostAddr, + &pRMWUFOAddr, + &pas3DCmdHelperData[ui323DCmdCount++] + ); + } + + if (bKickTA) + { + ui32TACmdSizeTmp = RGXCmdHelperGetCommandSize(1, pasTACmdHelperData); + + eError = RGXCheckSpaceCCB( + FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext), + ui32TACmdSizeTmp + ); + if (eError != PVRSRV_OK) + { + goto err_not_enough_space; + } + } + + if (ui323DCmdCount > 0) + { + ui323DCmdSizeTmp = RGXCmdHelperGetCommandSize(ui323DCmdCount, pas3DCmdHelperData); + + eError = RGXCheckSpaceCCB( + FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext), + ui323DCmdSizeTmp + ); + if (eError != PVRSRV_OK) + { + goto err_not_enough_space; + } + } + + /* need to reset the counter here */ + + ui323DCmdCount = 0; + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAFence, %d fences)...", + __func__, ui32ClientTAFenceCount)); + eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAFence, + ui32ClientTAFenceCount, + apsClientTAFenceSyncPrimBlock, + paui32ClientTAFenceSyncOffset); + if (unlikely(eError != PVRSRV_OK)) + { + goto err_populate_sync_addr_list_ta_fence; + } + + if (ui32ClientTAFenceCount) + { + pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: pauiClientTAFenceUFOAddress=<%p> ", + __func__, (void*)pauiClientTAFenceUFOAddress)); + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAUpdate, %d updates)...", + __func__, ui32ClientTAUpdateCount)); + eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAUpdate, + ui32ClientTAUpdateCount, + apsClientTAUpdateSyncPrimBlock, + paui32ClientTAUpdateSyncOffset); + if (unlikely(eError != PVRSRV_OK)) + { + goto err_populate_sync_addr_list_ta_update; + } + + if (ui32ClientTAUpdateCount) + { + pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs; + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: pauiClientTAUpdateUFOAddress=<%p> ", + __func__, (void*)pauiClientTAUpdateUFOAddress)); + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DFence, %d fences)...", + __func__, ui32Client3DFenceCount)); + eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DFence, + ui32Client3DFenceCount, + NULL, + NULL); + if (unlikely(eError != PVRSRV_OK)) + { + goto err_populate_sync_addr_list_3d_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DUpdate, %d updates)...", + __func__, ui32Client3DUpdateCount)); + eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DUpdate, + ui32Client3DUpdateCount, + apsClient3DUpdateSyncPrimBlock, + paui32Client3DUpdateSyncOffset); + if (unlikely(eError != PVRSRV_OK)) + { + goto err_populate_sync_addr_list_3d_update; + } + + if (ui32Client3DUpdateCount || (iUpdate3DTimeline != PVRSRV_NO_TIMELINE && piUpdate3DFence && bKick3D)) + { + pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DUpdateUFOAddress=<%p> ", + __func__, (void*)pauiClient3DUpdateUFOAddress)); + + eError = SyncPrimitiveBlockToFWAddr(psPRFenceSyncPrimBlock, + ui32PRFenceSyncOffset, + &uiPRFenceUFOAddress); + + if (unlikely(eError != PVRSRV_OK)) + { + goto err_pr_fence_address; + } + +#if (ENABLE_TA3D_UFO_DUMP == 1) + DumpUfoList(ui32ClientTAFenceCount, ui32ClientTAUpdateCount, + ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), + ui32Client3DUpdateCount, + pauiClientTAFenceUFOAddress, paui32ClientTAFenceValue, + pauiClientTAUpdateUFOAddress, paui32ClientTAUpdateValue, + pauiClient3DFenceUFOAddress, NULL, + pauiClient3DUpdateUFOAddress, paui32Client3DUpdateValue); +#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */ + + if (ui32SyncPMRCount) + { +#if defined(SUPPORT_BUFFER_SYNC) +#if !defined(SUPPORT_STRIP_RENDERING) + /* Append buffer sync fences to TA fences */ + if (ui32BufferFenceSyncCheckpointCount > 0 && bKickTA) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append %d buffer sync checkpoints to TA Fence " + "(&psRenderContext->sSyncAddrListTAFence=<%p>, " + "pauiClientTAFenceUFOAddress=<%p>)...", + __func__, + ui32BufferFenceSyncCheckpointCount, + (void*)&psRenderContext->sSyncAddrListTAFence , + (void*)pauiClientTAFenceUFOAddress)); + SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrListTAFence, + ui32BufferFenceSyncCheckpointCount, + apsBufferFenceSyncCheckpoints); + if (!pauiClientTAFenceUFOAddress) + { + pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; + } + if (ui32ClientTAFenceCount == 0) + { + bTAFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + ui32ClientTAFenceCount += ui32BufferFenceSyncCheckpointCount; + } + else +#endif /* !defined(SUPPORT_STRIP_RENDERING) */ + /* Append buffer sync fences to 3D fences */ + if (ui32BufferFenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append %d buffer sync checkpoints to 3D Fence " + "(&psRenderContext->sSyncAddrList3DFence=<%p>, " + "pauiClient3DFenceUFOAddress=<%p>)...", + __func__, + ui32BufferFenceSyncCheckpointCount, + (void*)&psRenderContext->sSyncAddrList3DFence, + (void*)pauiClient3DFenceUFOAddress)); + SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrList3DFence, + ui32BufferFenceSyncCheckpointCount, + apsBufferFenceSyncCheckpoints); + if (!pauiClient3DFenceUFOAddress) + { + pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; + } + if (ui32Client3DFenceCount == 0) + { + b3DFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + ui32Client3DFenceCount += ui32BufferFenceSyncCheckpointCount; + } + + if (psBufferUpdateSyncCheckpoint) + { + /* If we have a 3D kick append update to the 3D updates else append to the PR update */ + if (bKick3D) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append 1 buffer sync checkpoint<%p> to 3D Update" + " (&psRenderContext->sSyncAddrList3DUpdate=<%p>," + " pauiClient3DUpdateUFOAddress=<%p>)...", + __func__, + (void*)psBufferUpdateSyncCheckpoint, + (void*)&psRenderContext->sSyncAddrList3DUpdate, + (void*)pauiClient3DUpdateUFOAddress)); + /* Append buffer sync update to 3D updates */ + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, + 1, + &psBufferUpdateSyncCheckpoint); + if (!pauiClient3DUpdateUFOAddress) + { + pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; + } + ui32Client3DUpdateCount++; + } + else + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append 1 buffer sync checkpoint<%p> to PR Update" + " (&psRenderContext->sSyncAddrList3DUpdate=<%p>," + " pauiClientPRUpdateUFOAddress=<%p>)...", + __func__, + (void*)psBufferUpdateSyncCheckpoint, + (void*)&psRenderContext->sSyncAddrList3DUpdate, + (void*)pauiClientPRUpdateUFOAddress)); + /* Attach update to the 3D (used for PR) Updates */ + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, + 1, + &psBufferUpdateSyncCheckpoint); + if (!pauiClientPRUpdateUFOAddress) + { + pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; + } + ui32ClientPRUpdateCount++; + } + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: (after buffer_sync) ui32ClientTAFenceCount=%d, " + "ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, " + "ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,", + __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount, + ui32Client3DFenceCount, ui32Client3DUpdateCount, + ui32ClientPRUpdateCount)); + +#else /* defined(SUPPORT_BUFFER_SYNC) */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Buffer sync not supported but got %u buffers", + __func__, ui32SyncPMRCount)); + + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto err_no_buffer_sync_invalid_params; +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + } + + /* + * The hardware requires a PR to be submitted if there is a TA (otherwise + * it can wedge if we run out of PB space with no PR to run) + * + * If we only have a TA, attach native checks to the TA and updates to the PR + * If we have a TA and 3D, attach checks to TA, updates to 3D + * If we only have a 3D, attach checks and updates to the 3D + * + * Note that 'updates' includes the cleanup syncs for 'check' fence FDs, in + * addition to the update fence FD (if supplied) + * + * Currently, the client driver never kicks only the 3D, so we only support + * that for the time being. + */ + if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 || + iCheck3DFence >= 0 || iUpdate3DTimeline >= 0) + { + PRGXFWIF_UFO_ADDR *pauiClientTAIntUpdateUFOAddress = NULL; + PRGXFWIF_UFO_ADDR *pauiClient3DIntUpdateUFOAddress = NULL; + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: [TA] iCheckFence = %d, iUpdateTimeline = %d", + __func__, iCheckTAFence, iUpdateTATimeline)); + CHKPT_DBG((PVR_DBG_ERROR, + "%s: [3D] iCheckFence = %d, iUpdateTimeline = %d", + __func__, iCheck3DFence, iUpdate3DTimeline)); + + { + /* Create the output fence for TA (if required) */ + if (iUpdateTATimeline != PVRSRV_NO_TIMELINE) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling SyncCheckpointCreateFence[TA] " + "(iUpdateFence=%d, iUpdateTimeline=%d, " + "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)", + __func__, iUpdateTAFence, iUpdateTATimeline, + (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext)); + eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode, + szFenceNameTA, + iUpdateTATimeline, + psRenderContext->psDeviceNode->hSyncCheckpointContext, + &iUpdateTAFence, + &uiUpdateTAFenceUID, + &pvTAUpdateFenceFinaliseData, + &psUpdateTASyncCheckpoint, + (void*)&psTAFenceTimelineUpdateSync, + &ui32TAFenceTimelineUpdateValue, + ui32PDumpFlags); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: SyncCheckpointCreateFence[TA] failed (%d)", __func__, eError)); + goto fail_create_ta_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: returned from SyncCheckpointCreateFence[TA] " + "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, " + "ui32FenceTimelineUpdateValue=0x%x)", + __func__, iUpdateTAFence, + (void*)psTAFenceTimelineUpdateSync, + ui32TAFenceTimelineUpdateValue)); + + /* Store the FW address of the update sync checkpoint in pauiClientTAIntUpdateUFOAddress */ + pauiClientTAIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateTASyncCheckpoint); + CHKPT_DBG((PVR_DBG_ERROR, + "%s: pauiClientIntUpdateUFOAddress[TA]->ui32Addr=0x%x", + __func__, pauiClientTAIntUpdateUFOAddress->ui32Addr)); + } + + /* Append the sync prim update for the TA timeline (if required) */ + if (psTAFenceTimelineUpdateSync) + { + sTASyncData.ui32ClientUpdateCount = ui32ClientTAUpdateCount; + sTASyncData.ui32ClientUpdateValueCount = ui32ClientTAUpdateValueCount; + sTASyncData.ui32ClientPRUpdateValueCount = (bKick3D) ? 0 : ui32ClientPRUpdateValueCount; + sTASyncData.paui32ClientUpdateValue = paui32ClientTAUpdateValue; + + eError = RGXSyncAppendTimelineUpdate(ui32TAFenceTimelineUpdateValue, + &psRenderContext->sSyncAddrListTAUpdate, + (bKick3D) ? NULL : &psRenderContext->sSyncAddrList3DUpdate, + psTAFenceTimelineUpdateSync, + &sTASyncData, + bKick3D); + if (unlikely(eError != PVRSRV_OK)) + { + goto fail_alloc_update_values_mem_TA; + } + + paui32ClientTAUpdateValue = sTASyncData.paui32ClientUpdateValue; + ui32ClientTAUpdateValueCount = sTASyncData.ui32ClientUpdateValueCount; + pauiClientTAUpdateUFOAddress = sTASyncData.pauiClientUpdateUFOAddress; + ui32ClientTAUpdateCount = sTASyncData.ui32ClientUpdateCount; + } + + /* Create the output fence for 3D (if required) */ + if (iUpdate3DTimeline != PVRSRV_NO_TIMELINE) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling SyncCheckpointCreateFence[3D] " + "(iUpdateFence=%d, iUpdateTimeline=%d, " + "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)", + __func__, iUpdate3DFence, iUpdate3DTimeline, + (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext)); + eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode, + szFenceName3D, + iUpdate3DTimeline, + psRenderContext->psDeviceNode->hSyncCheckpointContext, + &iUpdate3DFence, + &uiUpdate3DFenceUID, + &pv3DUpdateFenceFinaliseData, + &psUpdate3DSyncCheckpoint, + (void*)&ps3DFenceTimelineUpdateSync, + &ui323DFenceTimelineUpdateValue, + ui32PDumpFlags); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: SyncCheckpointCreateFence[3D] failed (%d)", __func__, eError)); + goto fail_create_3d_fence; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: returned from SyncCheckpointCreateFence[3D] " + "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, " + "ui32FenceTimelineUpdateValue=0x%x)", + __func__, iUpdate3DFence, + (void*)ps3DFenceTimelineUpdateSync, + ui323DFenceTimelineUpdateValue)); + + /* Store the FW address of the update sync checkpoint in pauiClient3DIntUpdateUFOAddress */ + pauiClient3DIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdate3DSyncCheckpoint); + CHKPT_DBG((PVR_DBG_ERROR, + "%s: pauiClientIntUpdateUFOAddress[3D]->ui32Addr=0x%x", + __func__, pauiClient3DIntUpdateUFOAddress->ui32Addr)); + } + + /* Append the sync prim update for the 3D timeline (if required) */ + if (ps3DFenceTimelineUpdateSync) + { + s3DSyncData.ui32ClientUpdateCount = ui32Client3DUpdateCount; + s3DSyncData.ui32ClientUpdateValueCount = ui32Client3DUpdateValueCount; + s3DSyncData.ui32ClientPRUpdateValueCount = ui32ClientPRUpdateValueCount; + s3DSyncData.paui32ClientUpdateValue = paui32Client3DUpdateValue; + + eError = RGXSyncAppendTimelineUpdate(ui323DFenceTimelineUpdateValue, + &psRenderContext->sSyncAddrList3DUpdate, + &psRenderContext->sSyncAddrList3DUpdate, /*!< PR update: is this required? */ + ps3DFenceTimelineUpdateSync, + &s3DSyncData, + bKick3D); + if (unlikely(eError != PVRSRV_OK)) + { + goto fail_alloc_update_values_mem_3D; + } + + paui32Client3DUpdateValue = s3DSyncData.paui32ClientUpdateValue; + ui32Client3DUpdateValueCount = s3DSyncData.ui32ClientUpdateValueCount; + pauiClient3DUpdateUFOAddress = s3DSyncData.pauiClientUpdateUFOAddress; + ui32Client3DUpdateCount = s3DSyncData.ui32ClientUpdateCount; + + if (!bKick3D) + { + paui32ClientPRUpdateValue = s3DSyncData.paui32ClientPRUpdateValue; + ui32ClientPRUpdateValueCount = s3DSyncData.ui32ClientPRUpdateValueCount; + pauiClientPRUpdateUFOAddress = s3DSyncData.pauiClientPRUpdateUFOAddress; + ui32ClientPRUpdateCount = s3DSyncData.ui32ClientPRUpdateCount; + } + } + + /* + * The hardware requires a PR to be submitted if there is a TA OOM. + * If we only have a TA, attach native checks and updates to the TA + * and 3D updates to the PR. + * If we have a TA and 3D, attach the native TA checks and updates + * to the TA and similarly for the 3D. + * Note that 'updates' includes the cleanup syncs for 'check' fence + * FDs, in addition to the update fence FD (if supplied). + * Currently, the client driver never kicks only the 3D, so we don't + * support that for the time being. + */ + + { + if (bKickTA) + { + /* Attach checks and updates to TA */ + + /* Checks (from input fence) */ + if (ui32FenceTASyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append %d sync checkpoints to TA Fence (apsFenceSyncCheckpoints=<%p>)...", + __func__, + ui32FenceTASyncCheckpointCount, + (void*)apsFenceTASyncCheckpoints)); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence, + ui32FenceTASyncCheckpointCount, + apsFenceTASyncCheckpoints); + if (!pauiClientTAFenceUFOAddress) + { + pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: {ui32ClientTAFenceCount was %d, now %d}", + __func__, ui32ClientTAFenceCount, + ui32ClientTAFenceCount + ui32FenceTASyncCheckpointCount)); + if (ui32ClientTAFenceCount == 0) + { + bTAFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + ui32ClientTAFenceCount += ui32FenceTASyncCheckpointCount; + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: {ui32ClientTAFenceCount now %d}", + __func__, ui32ClientTAFenceCount)); + + if (psUpdateTASyncCheckpoint) + { + /* Update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append 1 sync checkpoint<%p> (ID=%d) to TA Update...", + __func__, (void*)psUpdateTASyncCheckpoint, + SyncCheckpointGetId(psUpdateTASyncCheckpoint))); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAUpdate, + 1, + &psUpdateTASyncCheckpoint); + if (!pauiClientTAUpdateUFOAddress) + { + pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs; + } + ui32ClientTAUpdateCount++; + } + + if (!bKick3D && psUpdate3DSyncCheckpoint) + { + /* Attach update to the 3D (used for PR) Updates */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append 1 sync checkpoint<%p> (ID=%d) to 3D(PR) Update...", + __func__, (void*)psUpdate3DSyncCheckpoint, + SyncCheckpointGetId(psUpdate3DSyncCheckpoint))); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, + 1, + &psUpdate3DSyncCheckpoint); + if (!pauiClientPRUpdateUFOAddress) + { + pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; + } + ui32ClientPRUpdateCount++; + } + } + + if (bKick3D) + { + /* Attach checks and updates to the 3D */ + + /* Checks (from input fence) */ + if (ui32Fence3DSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append %d sync checkpoints to 3D Fence...", + __func__, ui32Fence3DSyncCheckpointCount)); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence, + ui32Fence3DSyncCheckpointCount, + apsFence3DSyncCheckpoints); + if (!pauiClient3DFenceUFOAddress) + { + pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: {ui32Client3DFenceCount was %d, now %d}", + __func__, ui32Client3DFenceCount, + ui32Client3DFenceCount + ui32Fence3DSyncCheckpointCount)); + if (ui32Client3DFenceCount == 0) + { + b3DFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + ui32Client3DFenceCount += ui32Fence3DSyncCheckpointCount; + } + CHKPT_DBG((PVR_DBG_ERROR, + "%s: {ui32Client3DFenceCount was %d}", + __func__, ui32Client3DFenceCount)); + + if (psUpdate3DSyncCheckpoint) + { + /* Update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Append 1 sync checkpoint<%p> (ID=%d) to 3D Update...", + __func__, (void*)psUpdate3DSyncCheckpoint, + SyncCheckpointGetId(psUpdate3DSyncCheckpoint))); + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, + 1, + &psUpdate3DSyncCheckpoint); + if (!pauiClient3DUpdateUFOAddress) + { + pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; + } + ui32Client3DUpdateCount++; + } + } + + /* + * Relocate sync check points from the 3D fence that are + * external to the current process, to the TA fence. + * This avoids a sync lockup when dependent renders are + * submitted out-of-order and a PR must be scheduled. + */ + if (bKickTA) + { + /* Search for external timeline dependencies */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Checking 3D fence for external sync points (%d)...", + __func__, ui32Fence3DSyncCheckpointCount)); + + for (i=0; i (ID=%d) to TA Fence...", + __func__, (void*)apsFence3DSyncCheckpoints[i], + SyncCheckpointGetId(apsFence3DSyncCheckpoints[i]))); + + SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence, + 1, + &apsFence3DSyncCheckpoints[i]); + + if (!pauiClientTAFenceUFOAddress) + { + pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: {ui32ClientTAFenceCount was %d, now %d}", + __func__, + ui32ClientTAFenceCount, + ui32ClientTAFenceCount + 1)); + + if (ui32ClientTAFenceCount == 0) + { + bTAFenceOnSyncCheckpointsOnly = IMG_TRUE; + } + + ui32ClientTAFenceCount++; + } + } + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: (after pvr_sync) ui32ClientTAFenceCount=%d, " + "ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, " + "ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,", + __func__, + ui32ClientTAFenceCount, ui32ClientTAUpdateCount, + ui32Client3DFenceCount, ui32Client3DUpdateCount, + ui32ClientPRUpdateCount)); + } + } + + if (ui32ClientTAFenceCount) + { + PVR_ASSERT(pauiClientTAFenceUFOAddress); + if (!bTAFenceOnSyncCheckpointsOnly) + { + PVR_ASSERT(paui32ClientTAFenceValue); + } + } + if (ui32ClientTAUpdateCount) + { + PVR_ASSERT(pauiClientTAUpdateUFOAddress); + if (ui32ClientTAUpdateValueCount>0) + { + PVR_ASSERT(paui32ClientTAUpdateValue); + } + } + if (ui32Client3DFenceCount) + { + PVR_ASSERT(pauiClient3DFenceUFOAddress); + PVR_ASSERT(b3DFenceOnSyncCheckpointsOnly); + } + if (ui32Client3DUpdateCount) + { + PVR_ASSERT(pauiClient3DUpdateUFOAddress); + if (ui32Client3DUpdateValueCount>0) + { + PVR_ASSERT(paui32Client3DUpdateValue); + } + } + if (ui32ClientPRUpdateCount) + { + PVR_ASSERT(pauiClientPRUpdateUFOAddress); + if (ui32ClientPRUpdateValueCount>0) + { + PVR_ASSERT(paui32ClientPRUpdateValue); + } + } + + } + + CHKPT_DBG((PVR_DBG_ERROR, + "%s: ui32ClientTAFenceCount=%d, pauiClientTAFenceUFOAddress=<%p> Line ", + __func__, + ui32ClientTAFenceCount, + (void*)paui32ClientTAFenceValue)); + CHKPT_DBG((PVR_DBG_ERROR, + "%s: ui32ClientTAUpdateCount=%d, pauiClientTAUpdateUFOAddress=<%p> Line ", + __func__, + ui32ClientTAUpdateCount, + (void*)pauiClientTAUpdateUFOAddress)); +#if (ENABLE_TA3D_UFO_DUMP == 1) + DumpUfoList(ui32ClientTAFenceCount, ui32ClientTAUpdateCount, + ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), + ui32Client3DUpdateCount, + pauiClientTAFenceUFOAddress, paui32ClientTAFenceValue, + pauiClientTAUpdateUFOAddress, paui32ClientTAUpdateValue, + pauiClient3DFenceUFOAddress, NULL, + pauiClient3DUpdateUFOAddress, paui32Client3DUpdateValue); +#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */ + + /* Command size check */ + if (ui32TAFenceCount != ui32ClientTAFenceCount) + { + PVR_DPF((PVR_DBG_ERROR, "TA pre-calculated number of fences" + " is different than the actual number (%u != %u)", + ui32TAFenceCount, ui32ClientTAFenceCount)); + } + if (ui32TAUpdateCount != ui32ClientTAUpdateCount) + { + PVR_DPF((PVR_DBG_ERROR, "TA pre-calculated number of updates" + " is different than the actual number (%u != %u)", + ui32TAUpdateCount, ui32ClientTAUpdateCount)); + } + if (!bTestSLRAdd3DCheck && (ui323DFenceCount != ui32Client3DFenceCount)) + { + PVR_DPF((PVR_DBG_ERROR, "3D pre-calculated number of fences" + " is different than the actual number (%u != %u)", + ui323DFenceCount, ui32Client3DFenceCount)); + } + if (ui323DUpdateCount != ui32Client3DUpdateCount) + { + PVR_DPF((PVR_DBG_ERROR, "3D pre-calculated number of updates" + " is different than the actual number (%u != %u)", + ui323DUpdateCount, ui32Client3DUpdateCount)); + } + if (ui32PRUpdateCount != ui32ClientPRUpdateCount) + { + PVR_DPF((PVR_DBG_ERROR, "PR pre-calculated number of updates" + " is different than the actual number (%u != %u)", + ui32PRUpdateCount, ui32ClientPRUpdateCount)); + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + if (bKickTA || bKick3D || bAbort) + { + sWorkloadCharacteristics.sTA3D.ui32RenderTargetSize = ui32RenderTargetSize; + sWorkloadCharacteristics.sTA3D.ui32NumberOfDrawCalls = ui32NumberOfDrawCalls; + sWorkloadCharacteristics.sTA3D.ui32NumberOfIndices = ui32NumberOfIndices; + sWorkloadCharacteristics.sTA3D.ui32NumberOfMRTs = ui32NumberOfMRTs; + } +#endif + + /* Init and acquire to TA command if required */ + if (bKickTA) + { + RGX_SERVER_RC_TA_DATA *psTAData = &psRenderContext->sTAData; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Prepare workload estimation */ + WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, + &psRenderContext->sWorkEstData, + &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sDataTA, + RGXFWIF_CCB_CMD_TYPE_GEOM, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickDataTA); +#endif + + /* Init the TA command helper */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling RGXCmdHelperInitCmdCCB(), ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d", + __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount)); + RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(psTAData->psServerCommonContext), + ui32ClientTAFenceCount, + pauiClientTAFenceUFOAddress, + paui32ClientTAFenceValue, + ui32ClientTAUpdateCount, + pauiClientTAUpdateUFOAddress, + paui32ClientTAUpdateValue, + ui32TACmdSize, + pui8TADMCmd, + &pPreAddr, + (bKick3D ? NULL : & pPostAddr), + (bKick3D ? NULL : & pRMWUFOAddr), + RGXFWIF_CCB_CMD_TYPE_GEOM, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + &sWorkloadKickDataTA, +#else + NULL, +#endif + "TA", + bCCBStateOpen, + pasTACmdHelperData); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* The following is used to determine the offset of the command header containing + the workload estimation data so that can be accessed when the KCCB is read */ + ui32TACmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(pasTACmdHelperData); +#endif + + eError = RGXCmdHelperAcquireCmdCCB(CCB_CMD_HELPER_NUM_TA_COMMANDS, pasTACmdHelperData); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", + __func__, eError)); + goto fail_taacquirecmd; + } + else + { + ui32TACmdCount++; + } + } + + /* Only kick the 3D if required */ + if (bKickPR) + { + RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData; + + /* + The command helper doesn't know about the PR fence so create + the command with all the fences against it and later create + the PR command itself which _must_ come after the PR fence. + */ + sPRUFO.puiAddrUFO = uiPRFenceUFOAddress; + sPRUFO.ui32Value = ui32PRFenceValue; + + /* Init the PR fence command helper */ + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling RGXCmdHelperInitCmdCCB(), ui32Client3DFenceCount=%d", + __func__, ui32Client3DFenceCount)); + RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), + ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), + pauiClient3DFenceUFOAddress, + NULL, + 0, + NULL, + NULL, + sizeof(sPRUFO), + (IMG_UINT8*) &sPRUFO, + NULL, + NULL, + NULL, + RGXFWIF_CCB_CMD_TYPE_FENCE_PR, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, + NULL, + "3D-PR-Fence", + bCCBStateOpen, + &pas3DCmdHelperData[ui323DCmdCount++]); + + /* Init the 3D PR command helper */ + /* + Updates for Android (fence sync and Timeline sync prim) are provided in the PR-update + if no 3D is present. This is so the timeline update cannot happen out of order with any + other 3D already in flight for the same timeline (PR-updates are done in the 3D cCCB). + This out of order timeline sync prim update could happen if we attach it to the TA update. + */ + if (ui32ClientPRUpdateCount) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Line %d, ui32ClientPRUpdateCount=%d, " + "pauiClientPRUpdateUFOAddress=0x%x, " + "ui32ClientPRUpdateValueCount=%d, " + "paui32ClientPRUpdateValue=0x%x", + __func__, __LINE__, ui32ClientPRUpdateCount, + pauiClientPRUpdateUFOAddress->ui32Addr, + ui32ClientPRUpdateValueCount, + (ui32ClientPRUpdateValueCount == 0) ? PVRSRV_SYNC_CHECKPOINT_SIGNALLED : *paui32ClientPRUpdateValue)); + } + if (!bUseCombined3DAnd3DPR) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling RGXCmdHelperInitCmdCCB(), ui32ClientPRUpdateCount=%d", + __func__, ui32ClientPRUpdateCount)); + RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), + 0, + NULL, + NULL, + ui32ClientPRUpdateCount, + pauiClientPRUpdateUFOAddress, + paui32ClientPRUpdateValue, + pui83DPRDMCmd ? ui323DPRCmdSize : ui323DCmdSize, // If the client has not provided a 3DPR command, the regular 3D command should be used instead + pui83DPRDMCmd ? pui83DPRDMCmd : pui83DDMCmd, + NULL, + NULL, + NULL, + RGXFWIF_CCB_CMD_TYPE_3D_PR, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, + NULL, + "3D-PR", + bCCBStateOpen, + &pas3DCmdHelperData[ui323DCmdCount++]); + } + } + + if (bKick3D || bAbort) + { + RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData; + const RGXFWIF_CCB_CMD_TYPE e3DCmdType = bAbort ? RGXFWIF_CCB_CMD_TYPE_ABORT : RGXFWIF_CCB_CMD_TYPE_3D; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Prepare workload estimation */ + WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, + &psRenderContext->sWorkEstData, + &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sData3D, + e3DCmdType, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickData3D); +#endif + + /* Init the 3D command helper */ + RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), + bKickTA ? 0 : ui32Client3DFenceCount, /* For a kick with a TA, the 3D fences are added before the PR command instead */ + bKickTA ? NULL : pauiClient3DFenceUFOAddress, + NULL, + ui32Client3DUpdateCount, + pauiClient3DUpdateUFOAddress, + paui32Client3DUpdateValue, + ui323DCmdSize, + pui83DDMCmd, + (bKickTA ? NULL : & pPreAddr), + &pPostAddr, + &pRMWUFOAddr, + e3DCmdType, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + &sWorkloadKickData3D, +#else + NULL, +#endif + "3D", + bCCBStateOpen, + &pas3DCmdHelperData[ui323DCmdCount++]); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* The following are used to determine the offset of the command header containing the workload estimation + data so that can be accessed when the KCCB is read */ + ui323DCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(&pas3DCmdHelperData[ui323DCmdCount - 1]); + ui323DFullRenderCommandOffset = RGXCmdHelperGetCommandOffset(pas3DCmdHelperData, ui323DCmdCount - 1); +#endif + } + + /* Protect against array overflow in RGXCmdHelperAcquireCmdCCB() */ + if (unlikely(ui323DCmdCount > CCB_CMD_HELPER_NUM_3D_COMMANDS)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError)); + goto fail_3dcmdinit; + } + + if (ui323DCmdCount) + { + PVR_ASSERT(bKickPR || bKick3D); + + /* Acquire space for all the 3D command(s) */ + eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount, pas3DCmdHelperData); + if (unlikely(eError != PVRSRV_OK)) + { + /* If RGXCmdHelperAcquireCmdCCB fails we skip the scheduling + * of a new TA command with the same Write offset in Kernel CCB. + */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError)); + goto fail_3dacquirecmd; + } + } + + /* + We should acquire the space in the kernel CCB here as after this point + we release the commands which will take operations on server syncs + which can't be undone + */ + + /* + Everything is ready to go now, release the commands + */ + if (ui32TACmdCount) + { + ui32TACmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext)); + RGXCmdHelperReleaseCmdCCB(ui32TACmdCount, + pasTACmdHelperData, + "TA", + FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + ui32TACmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext)); + + /* This checks if the command would wrap around at the end of the CCB and therefore would start at an + offset of 0 rather than the current command offset */ + if (ui32TACmdOffset < ui32TACmdOffsetWrapCheck) + { + ui32TACommandOffset = ui32TACmdOffset; + } + else + { + ui32TACommandOffset = 0; + } +#endif + } + + if (ui323DCmdCount) + { + ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext)); + RGXCmdHelperReleaseCmdCCB(ui323DCmdCount, + pas3DCmdHelperData, + "3D", + FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext)); + + if (ui323DCmdOffset < ui323DCmdOffsetWrapCheck) + { + ui323DCommandOffset = ui323DCmdOffset; + } + else + { + ui323DCommandOffset = 0; + } +#endif + } + + if (ui32TACmdCount) + { + IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr; + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext); + CMDTA3D_SHARED *psGeomCmdShared = IMG_OFFSET_ADDR(pui8TADMCmd, 0); + + sTACmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext); + sTACmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sTACmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Add the Workload data into the KCCB kick */ + sTACmdKickData.ui32WorkEstCmdHeaderOffset = ui32TACommandOffset + ui32TACmdHeaderOffset; +#else + sTACmdKickData.ui32WorkEstCmdHeaderOffset = 0; +#endif + + eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTACmdKickData.apsCleanupCtl, + &sTACmdKickData.ui32NumCleanupCtl, + RGXFWIF_DM_GEOM, + bKickTA, + psKMHWRTDataSet, + psZSBuffer, + psMSAAScratchBuffer); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", + __func__, eError)); + goto fail_taattachcleanupctls; + } + + if (psGeomCmdShared) + { + HTBLOGK(HTB_SF_MAIN_KICK_TA, + sTACmdKickData.psContext, + ui32TACmdOffset, + psGeomCmdShared->sCmn.ui32FrameNum, + ui32ExtJobRef, + ui32IntJobRef + ); + } + + RGXSRV_HWPERF_ENQ(psRenderContext, OSGetCurrentClientProcessIDKM(), + ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_TA3D, + iCheckTAFence, + iUpdateTAFence, + iUpdateTATimeline, + uiCheckTAFenceUID, uiUpdateTAFenceUID, + NO_DEADLINE, + NO_CYCEST); + + if (!bUseSingleFWCommand) + { + /* Construct the kernel TA CCB command. */ + RGXFWIF_KCCB_CMD sTAKCCBCmd; + sTAKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sTAKCCBCmd.uCmdData.sCmdKickData = sTACmdKickData; + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice, + RGXFWIF_DM_GEOM, + &sTAKCCBCmd, + ui32ClientCacheOpSeqNum, + ui32PDumpFlags); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + } + + PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode->pvDevice, + ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_TA3D); + } + + if (ui323DCmdCount) + { + RGXFWIF_KCCB_CMD s3DKCCBCmd = { 0 }; + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext); + CMDTA3D_SHARED *ps3DCmdShared = IMG_OFFSET_ADDR(pui83DDMCmd, 0); + + s3DCmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext); + s3DCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + s3DCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + + /* Add the Workload data into the KCCB kick */ +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */ + s3DCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset; +#else + s3DCmdKickData.ui32WorkEstCmdHeaderOffset = 0; +#endif + + eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DCmdKickData.apsCleanupCtl, + &s3DCmdKickData.ui32NumCleanupCtl, + RGXFWIF_DM_3D, + bKick3D, + psKMHWRTDataSet, + psZSBuffer, + psMSAAScratchBuffer); + if (unlikely(eError != PVRSRV_OK)) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", + __func__, eError)); + goto fail_3dattachcleanupctls; + } + + if (ps3DCmdShared) + { + HTBLOGK(HTB_SF_MAIN_KICK_3D, + s3DCmdKickData.psContext, + ui323DCmdOffset, + ps3DCmdShared->sCmn.ui32FrameNum, + ui32ExtJobRef, + ui32IntJobRef); + } + + if (bUseSingleFWCommand) + { + /* Construct the kernel TA/3D CCB command. */ + s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK; + s3DKCCBCmd.uCmdData.sCombinedTA3DCmdKickData.sTACmdKickData = sTACmdKickData; + s3DKCCBCmd.uCmdData.sCombinedTA3DCmdKickData.s3DCmdKickData = s3DCmdKickData; + } + else + { + /* Construct the kernel 3D CCB command. */ + s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + s3DKCCBCmd.uCmdData.sCmdKickData = s3DCmdKickData; + } + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice, + RGXFWIF_DM_3D, + &s3DKCCBCmd, + ui32ClientCacheOpSeqNum, + ui32PDumpFlags); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + } + + /* + * Now check eError (which may have returned an error from our earlier calls + * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first + * so we check it now... + */ + if (unlikely(eError != PVRSRV_OK )) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", + __func__, eError)); + goto fail_3dacquirecmd; + } + +#if defined(NO_HARDWARE) + /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ + if (psUpdateTASyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Signalling NOHW sync checkpoint [TA] <%p>, ID:%d, FwAddr=0x%x", + __func__, (void*)psUpdateTASyncCheckpoint, + SyncCheckpointGetId(psUpdateTASyncCheckpoint), + SyncCheckpointGetFirmwareAddr(psUpdateTASyncCheckpoint))); + SyncCheckpointSignalNoHW(psUpdateTASyncCheckpoint); + } + if (psTAFenceTimelineUpdateSync) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Updating NOHW sync prim [TA] <%p> to %d", + __func__, (void*)psTAFenceTimelineUpdateSync, + ui32TAFenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(psTAFenceTimelineUpdateSync, ui32TAFenceTimelineUpdateValue); + } + + if (psUpdate3DSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Signalling NOHW sync checkpoint [3D] <%p>, ID:%d, FwAddr=0x%x", + __func__, (void*)psUpdate3DSyncCheckpoint, + SyncCheckpointGetId(psUpdate3DSyncCheckpoint), + SyncCheckpointGetFirmwareAddr(psUpdate3DSyncCheckpoint))); + SyncCheckpointSignalNoHW(psUpdate3DSyncCheckpoint); + } + if (ps3DFenceTimelineUpdateSync) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: Updating NOHW sync prim [3D] <%p> to %d", + __func__, (void*)ps3DFenceTimelineUpdateSync, + ui323DFenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue); + } + SyncCheckpointNoHWUpdateTimelines(NULL); + +#endif /* defined(NO_HARDWARE) */ + +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferSyncData) + { + CHKPT_DBG((PVR_DBG_ERROR, + "%s: calling pvr_buffer_sync_kick_succeeded(psBufferSyncData=<%p>)...", + __func__, (void*)psBufferSyncData)); + pvr_buffer_sync_kick_succeeded(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + + if (piUpdateTAFence) + { + *piUpdateTAFence = iUpdateTAFence; + } + if (piUpdate3DFence) + { + *piUpdate3DFence = iUpdate3DFence; + } + + /* Drop the references taken on the sync checkpoints in the + * resolved input fence. + * NOTE: 3D fence is always submitted, either via 3D or TA(PR). + */ + if (bKickTA) + { + SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints); + } + SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints); + + if (pvTAUpdateFenceFinaliseData && (iUpdateTAFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psRenderContext->psDeviceNode, iUpdateTAFence, + pvTAUpdateFenceFinaliseData, + psUpdateTASyncCheckpoint, szFenceNameTA); + } + if (pv3DUpdateFenceFinaliseData && (iUpdate3DFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psRenderContext->psDeviceNode, iUpdate3DFence, + pv3DUpdateFenceFinaliseData, + psUpdate3DSyncCheckpoint, szFenceName3D); + } + + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceTASyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints); + } + if (apsFence3DSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints); + } + + if (sTASyncData.paui32ClientUpdateValue) + { + OSFreeMem(sTASyncData.paui32ClientUpdateValue); + } + if (s3DSyncData.paui32ClientUpdateValue) + { + OSFreeMem(s3DSyncData.paui32ClientUpdateValue); + } + +#if defined(SUPPORT_VALIDATION) + if (bTestSLRAdd3DCheck) + { + SyncCheckpointFree(psDummySyncCheckpoint); + } +#endif + OSLockRelease(psRenderContext->hLock); + + return PVRSRV_OK; + +fail_3dattachcleanupctls: +fail_taattachcleanupctls: +fail_3dacquirecmd: +fail_3dcmdinit: +fail_taacquirecmd: + SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAFence); + SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAUpdate); + SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DFence); + SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DUpdate); + /* Where a TA-only kick (ie no 3D) is submitted, the PR update will make use of the unused 3DUpdate list. + * If this has happened, performing a rollback on pauiClientPRUpdateUFOAddress will simply repeat what + * has already been done for the sSyncAddrList3DUpdate above and result in a double decrement of the + * sync checkpoint's hEnqueuedCCBCount, so we need to check before rolling back the PRUpdate. + */ + if (pauiClientPRUpdateUFOAddress && (pauiClientPRUpdateUFOAddress != psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs)) + { + SyncCheckpointRollbackFromUFO(psRenderContext->psDeviceNode, pauiClientPRUpdateUFOAddress->ui32Addr); + } + +fail_alloc_update_values_mem_3D: + if (iUpdate3DFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(iUpdate3DFence, pv3DUpdateFenceFinaliseData); + } +fail_create_3d_fence: +fail_alloc_update_values_mem_TA: + if (iUpdateTAFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(iUpdateTAFence, pvTAUpdateFenceFinaliseData); + } +fail_create_ta_fence: +#if !defined(SUPPORT_BUFFER_SYNC) +err_no_buffer_sync_invalid_params: +#endif /* !defined(SUPPORT_BUFFER_SYNC) */ +err_pr_fence_address: +err_populate_sync_addr_list_3d_update: +err_populate_sync_addr_list_3d_fence: +err_populate_sync_addr_list_ta_update: +err_populate_sync_addr_list_ta_fence: +err_not_enough_space: +fail_tacmdinvalfbsc: + /* Drop the references taken on the sync checkpoints in the + * resolved input fence. + * NOTE: 3D fence is always submitted, either via 3D or TA(PR). + */ + SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints); +fail_resolve_input_3d_fence: + if (bKickTA) + { + SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints); + } +fail_resolve_input_ta_fence: + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceTASyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints); + } + if (apsFence3DSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints); + } + if (sTASyncData.paui32ClientUpdateValue) + { + OSFreeMem(sTASyncData.paui32ClientUpdateValue); + } + if (s3DSyncData.paui32ClientUpdateValue) + { + OSFreeMem(s3DSyncData.paui32ClientUpdateValue); + } +#if defined(SUPPORT_VALIDATION) + if (bTestSLRAdd3DCheck) + { + SyncCheckpointFree(psDummySyncCheckpoint); + } +#endif +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferSyncData) + { + pvr_buffer_sync_kick_failed(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + PVR_ASSERT(eError != PVRSRV_OK); + OSLockRelease(psRenderContext->hLock); + return eError; +} + +PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_UINT32 ui32Priority) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + OSLockAcquire(psRenderContext->hLock); + + if (psRenderContext->sTAData.ui32Priority != ui32Priority) + { + eError = ContextSetPriority(psRenderContext->sTAData.psServerCommonContext, + psConnection, + psRenderContext->psDeviceNode->pvDevice, + ui32Priority, + RGXFWIF_DM_GEOM); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to set the priority of the TA part of the rendercontext (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto fail_tacontext; + } + psRenderContext->sTAData.ui32Priority = ui32Priority; + } + + if (psRenderContext->s3DData.ui32Priority != ui32Priority) + { + eError = ContextSetPriority(psRenderContext->s3DData.psServerCommonContext, + psConnection, + psRenderContext->psDeviceNode->pvDevice, + ui32Priority, + RGXFWIF_DM_3D); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to set the priority of the 3D part of the rendercontext (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto fail_3dcontext; + } + psRenderContext->s3DData.ui32Priority = ui32Priority; + } + + OSLockRelease(psRenderContext->hLock); + return PVRSRV_OK; + +fail_3dcontext: +fail_tacontext: + OSLockRelease(psRenderContext->hLock); + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +PVRSRV_ERROR PVRSRVRGXSetRenderContextPropertyKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output) +{ + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2 = PVRSRV_OK; + + switch (eContextProperty) + { + case RGX_CONTEXT_PROPERTY_FLAGS: + { + OSLockAcquire(psRenderContext->hLock); + eError = FWCommonContextSetFlags(psRenderContext->sTAData.psServerCommonContext, + (IMG_UINT32)ui64Input); + if (eError == PVRSRV_OK) + { + eError2 = FWCommonContextSetFlags(psRenderContext->s3DData.psServerCommonContext, + (IMG_UINT32)ui64Input); + } + OSLockRelease(psRenderContext->hLock); + PVR_LOG_IF_ERROR(eError, "FWCommonContextSetFlags eError"); + PVR_LOG_IF_ERROR(eError2, "FWCommonContextSetFlags eError2"); + break; + } + + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + } + + return eError; +} + +void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + DLLIST_NODE *psNode, *psNext; + OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock); + dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext) + { + RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode); + + DumpFWCommonContextInfo(psCurrentServerRenderCtx->sTAData.psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + DumpFWCommonContextInfo(psCurrentServerRenderCtx->s3DData.psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock); +} + +IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + DLLIST_NODE *psNode, *psNext; + IMG_UINT32 ui32ContextBitMask = 0; + + OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock); + + dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext) + { + RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode); + if (NULL != psCurrentServerRenderCtx->sTAData.psServerCommonContext) + { + if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->sTAData.psServerCommonContext, RGX_KICK_TYPE_DM_TA) == PVRSRV_ERROR_CCCB_STALLED) + { + ui32ContextBitMask |= RGX_KICK_TYPE_DM_TA; + } + } + + if (NULL != psCurrentServerRenderCtx->s3DData.psServerCommonContext) + { + if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_3D) == PVRSRV_ERROR_CCCB_STALLED) + { + ui32ContextBitMask |= RGX_KICK_TYPE_DM_3D; + } + } + } + + OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock); + return ui32ContextBitMask; +} + +/* + * RGXRenderContextStalledKM + */ +PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext) +{ + RGXCheckForStalledClientContexts((PVRSRV_RGXDEV_INFO *) psRenderContext->psDeviceNode->pvDevice, IMG_TRUE); + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (rgxta3d.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxta3d.h b/drivers/gpu/drm/phytium/octopus/rgxta3d.h new file mode 100644 index 000000000000..b03175c18aae --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxta3d.h @@ -0,0 +1,507 @@ +/*************************************************************************/ /*! +@File +@Title RGX TA and 3D Functionality +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the RGX TA and 3D Functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RGXTA3D_H +#define RGXTA3D_H + +#include "devicemem.h" +#include "devicemem_server.h" +#include "device.h" +#include "rgxdevice.h" +#include "rgx_fwif_shared.h" +#include "rgx_fwif_resetframework.h" +#include "sync_server.h" +#include "connection_server.h" +#include "rgxdebug.h" +#include "pvr_notifier.h" +#include "ri_server.h" + +typedef struct _RGX_SERVER_RENDER_CONTEXT_ RGX_SERVER_RENDER_CONTEXT; +typedef struct _RGX_FREELIST_ RGX_FREELIST; +typedef struct _RGX_PMR_NODE_ RGX_PMR_NODE; + +/***************************************************************************** + * The Design of Data Storage System for Render Targets * + * ==================================================== * + * Relevant for * + * understanding RGXCreateHWRTDataSet & RGXDestroyHWRTDataSet * + * * + * * + * +=========================================+ * + * | RenderTargetDataSet | * + * +---------------|---------|---------------+ * + * | | * + * V V * + * +- - - - - - - - - - - - + +- - - - - - - - - - - - + * + * | KM_HW_RT_DATA_HANDLE_0 | | KM_HW_RT_DATA_HANDLE_1 | * + * +- - -|- - - - - - - - - + +- - - - - - - - - | - - + * + * | | * + * | | [UM]Client * + * ------|-----------------------------------------|----------------------- * + * | | Bridge * + * ------|-----------------------------------------|----------------------- * + * | | [KM]Server * + * | | * + * | KM-ptr | KM-ptr * + * V V * + * +====================+ +====================+ * + * | KM_HW_RT_DATA_0 | | KM_HW_RT_DATA_1 | * + * +-----|------------|-+ +-|------------|-----+ * + * | | | | * + * | | | | * + * | | | | * + * | | | | * + * | | KM-ptr | KM-ptr | * + * | V V | * + * | +==========================+ | * + * | | HW_RT_DATA_COMMON_COOKIE | | * + * | +--------------------------+ | * + * | | | * + * | | | * + * ------|-------------------|---------------------|----------------------- * + * | | | [FW]Firmware * + * | | | * + * | FW-addr | | FW-addr * + * V | V * + * +===============+ | +===============+ * + * | HW_RT_DATA_0 | | | HW_RT_DATA_1 | * + * +------------|--+ | +--|------------+ * + * | | | * + * | FW-addr | FW-addr | FW-addr * + * V V V * + * +=========================================+ * + * | HW_RT_DATA_COMMON | * + * +-----------------------------------------+ * + * * + *****************************************************************************/ + +typedef struct _RGX_HWRTDATA_COMMON_COOKIE_ +{ + DEVMEM_MEMDESC *psHWRTDataCommonFwMemDesc; + RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; + IMG_UINT32 ui32RefCount; + +} RGX_HWRTDATA_COMMON_COOKIE; + +typedef struct _RGX_KM_HW_RT_DATASET_ +{ + RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie; + + /* RGX_RTDATA_CLEANUP_DATA */ + /* RGXMKIF_NUM_RTDATAS */ + PVRSRV_DEVICE_NODE *psDeviceNode; + RGXFWIF_DEV_VIRTADDR sHWRTDataFwAddr; + + DEVMEM_MEMDESC *psHWRTDataFwMemDesc; + DEVMEM_MEMDESC *psRTArrayFwMemDesc; + DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc; + + RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS]; +#if !defined(SUPPORT_SHADOW_FREELISTS) + DLLIST_NODE sNodeHWRTData; +#endif + +} RGX_KM_HW_RT_DATASET; + +struct _RGX_FREELIST_ { + PVRSRV_RGXDEV_INFO *psDevInfo; + CONNECTION_DATA *psConnection; + + /* Free list PMR */ + PMR *psFreeListPMR; + IMG_DEVMEM_OFFSET_T uiFreeListPMROffset; + + /* Free list PM state PMR */ + PMR *psFreeListStatePMR; + IMG_DEVMEM_OFFSET_T uiFreeListStatePMROffset; + + /* Freelist config */ + IMG_UINT32 ui32MaxFLPages; + IMG_UINT32 ui32InitFLPages; + IMG_UINT32 ui32CurrentFLPages; + IMG_UINT32 ui32GrowFLPages; + IMG_UINT32 ui32ReadyFLPages; + IMG_UINT32 ui32GrowThreshold; /* Percentage of FL memory used that should trigger a new grow request */ + IMG_UINT32 ui32FreelistID; + IMG_UINT32 ui32FreelistGlobalID; /* related global freelist for this freelist */ + IMG_UINT64 ui64FreelistChecksum; /* checksum over freelist content */ + IMG_BOOL bCheckFreelist; /* freelist check enabled */ + IMG_UINT32 ui32RefCount; /* freelist reference counting */ + + IMG_UINT32 ui32NumGrowReqByApp; /* Total number of grow requests by Application */ + IMG_UINT32 ui32NumGrowReqByFW; /* Total Number of grow requests by Firmware */ + IMG_UINT32 ui32NumHighPages; /* High Mark of pages in the freelist */ + + IMG_PID ownerPid; /* Pid of the owner of the list */ + + /* Memory Blocks */ + DLLIST_NODE sMemoryBlockHead; + DLLIST_NODE sMemoryBlockInitHead; + DLLIST_NODE sNode; +#if !defined(SUPPORT_SHADOW_FREELISTS) + /* HWRTData nodes linked to local freelist */ + DLLIST_NODE sNodeHWRTDataHead; +#endif + + /* FW data structures */ + DEVMEM_MEMDESC *psFWFreelistMemDesc; + RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr; + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + HASH_TABLE* psWorkloadHashTable; +#endif +}; + +struct _RGX_PMR_NODE_ { + RGX_FREELIST *psFreeList; + PMR *psPMR; + PMR_PAGELIST *psPageList; + DLLIST_NODE sMemoryBlock; + IMG_UINT32 ui32NumPages; + IMG_BOOL bFirstPageMissing; +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + RI_HANDLE hRIHandle; +#endif +}; + +typedef struct { + PVRSRV_RGXDEV_INFO *psDevInfo; + DEVMEM_MEMDESC *psFWZSBufferMemDesc; + RGXFWIF_DEV_VIRTADDR sZSBufferFWDevVAddr; + + DEVMEMINT_RESERVATION *psReservation; + PMR *psPMR; + DEVMEMINT_MAPPING *psMapping; + PVRSRV_MEMALLOCFLAGS_T uiMapFlags; + IMG_UINT32 ui32ZSBufferID; + IMG_UINT32 ui32RefCount; + IMG_BOOL bOnDemand; + + IMG_BOOL ui32NumReqByApp; /* Number of Backing Requests from Application */ + IMG_BOOL ui32NumReqByFW; /* Number of Backing Requests from Firmware */ + + IMG_PID owner; + + DLLIST_NODE sNode; +}RGX_ZSBUFFER_DATA; + +typedef struct { + RGX_ZSBUFFER_DATA *psZSBuffer; +} RGX_POPULATION; + +/* Dump the physical pages of a freelist */ +IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList); + + + +/* Create HWRTDataSet */ +PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR psVHeapTableDevVAddr, + IMG_DEV_VIRTADDR sPMDataDevVAddr0, + IMG_DEV_VIRTADDR sPMDataDevVAddr1, + IMG_DEV_VIRTADDR sPMSecureDataDevVAddr0, + IMG_DEV_VIRTADDR sPMSecureDataDevVAddr1, + RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS], + IMG_UINT32 ui32ScreenPixelMax, + IMG_UINT64 ui64PPPMultiSampleCtl, + IMG_UINT32 ui32TEStride, + IMG_DEV_VIRTADDR sTailPtrsDevVAddr, + IMG_UINT32 ui32TPCSize, + IMG_UINT32 ui32TEScreen, + IMG_UINT32 ui32TEAA, + IMG_UINT32 ui32TEMTILE1, + IMG_UINT32 ui32TEMTILE2, + IMG_UINT32 ui32RgnStride, + IMG_UINT32 ui32ISPMergeLowerX, + IMG_UINT32 ui32ISPMergeLowerY, + IMG_UINT32 ui32ISPMergeUpperX, + IMG_UINT32 ui32ISPMergeUpperY, + IMG_UINT32 ui32ISPMergeScaleX, + IMG_UINT32 ui32ISPMergeScaleY, + IMG_UINT16 ui16MaxRTs, + RGX_KM_HW_RT_DATASET **ppsKmHwRTDataSet0, + RGX_KM_HW_RT_DATASET **ppsKmHwRTDataSet1); + +/* Destroy HWRTDataSet */ +PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKmHwRTDataSet); + +/* + RGXCreateZSBuffer +*/ +PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + DEVMEMINT_RESERVATION *psReservation, + PMR *psPMR, + PVRSRV_MEMALLOCFLAGS_T uiMapFlags, + RGX_ZSBUFFER_DATA **ppsZSBuffer); + +/* + RGXDestroyZSBufferKM +*/ +PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer); + + +/* + * RGXBackingZSBuffer() + * + * Backs ZS-Buffer with physical pages + */ +PVRSRV_ERROR +RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer); + +/* + * RGXPopulateZSBufferKM() + * + * Backs ZS-Buffer with physical pages (called by Bridge calls) + */ +PVRSRV_ERROR RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_POPULATION **ppsPopulation); + +/* + * RGXUnbackingZSBuffer() + * + * Frees ZS-Buffer's physical pages + */ +PVRSRV_ERROR RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer); + +/* + * RGXUnpopulateZSBufferKM() + * + * Frees ZS-Buffer's physical pages (called by Bridge calls) + */ +PVRSRV_ERROR RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation); + +/* + RGXProcessRequestZSBufferBacking +*/ +void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32ZSBufferID); + +/* + RGXProcessRequestZSBufferUnbacking +*/ +void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32ZSBufferID); + +/* + RGXGrowFreeList +*/ +PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, + IMG_UINT32 ui32NumPages, + PDLLIST_NODE pListHeader); + +/* Create free list */ +PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32MaxFLPages, + IMG_UINT32 ui32InitFLPages, + IMG_UINT32 ui32GrowFLPages, + IMG_UINT32 ui32GrowParamThreshold, + RGX_FREELIST *psGlobalFreeList, + IMG_BOOL bCheckFreelist, + IMG_DEV_VIRTADDR sFreeListBaseDevVAddr, + IMG_DEV_VIRTADDR sFreeListStateDevVAddr, + PMR *psFreeListPMR, + IMG_DEVMEM_OFFSET_T uiFreeListPMROffset, + PMR *psFreeListStatePMR, + IMG_DEVMEM_OFFSET_T uiFreeListStatePMROffset, + RGX_FREELIST **ppsFreeList); + +/* Destroy free list */ +PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList); + +/* + RGXProcessRequestGrow +*/ +void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32FreelistID); + + +/* Reconstruct free list after Hardware Recovery */ +void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32FreelistsCount, + IMG_UINT32 *paui32Freelists); + +/*! +******************************************************************************* + + @Function PVRSRVRGXCreateRenderContextKM + + @Description + Server-side implementation of RGXCreateRenderContext + + @Input pvDeviceNode - device node + @Input ui32Priority - context priority + @Input hMemCtxPrivData - memory context private data + @Input ui32PackedCCBSizeU8888 : + ui8TACCBAllocSizeLog2 - TA CCB size + ui8TACCBMaxAllocSizeLog2 - maximum size to which TA CCB can grow + ui83DCCBAllocSizeLog2 - 3D CCB size + ui83DCCBMaxAllocSizeLog2 - maximum size to which 3D CCB can grow + @Input ui32ContextFlags - flags which specify properties of the context + @Output ppsRenderContext - clean up data + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32FrameworkCommandSize, + IMG_PBYTE pabyFrameworkCommand, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32StaticRenderContextStateSize, + IMG_PBYTE pStaticRenderContextState, + IMG_UINT32 ui32PackedCCBSizeU8888, + IMG_UINT32 ui32ContextFlags, + IMG_UINT64 ui64RobustnessAddress, + IMG_UINT32 ui32MaxTADeadlineMS, + IMG_UINT32 ui32Max3DDeadlineMS, + RGX_SERVER_RENDER_CONTEXT **ppsRenderContext); + + +/*! +******************************************************************************* + + @Function PVRSRVRGXDestroyRenderContextKM + + @Description + Server-side implementation of RGXDestroyRenderContext + + @Input psRenderContext - + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext); + + +/*! +******************************************************************************* + + @Function PVRSRVRGXKickTA3DKM + + @Description + Server-side implementation of RGXKickTA3D + + @Input psRTDataCleanup - RT data associated with the kick (or NULL) + @Input psZBuffer - Z-buffer associated with the kick (or NULL) + @Input psSBuffer - S-buffer associated with the kick (or NULL) + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientTAFenceCount, + SYNC_PRIMITIVE_BLOCK **apsClientTAFenceSyncPrimBlock, + IMG_UINT32 *paui32ClientTAFenceSyncOffset, + IMG_UINT32 *paui32ClientTAFenceValue, + IMG_UINT32 ui32ClientTAUpdateCount, + SYNC_PRIMITIVE_BLOCK **apsClientUpdateSyncPrimBlock, + IMG_UINT32 *paui32ClientUpdateSyncOffset, + IMG_UINT32 *paui32ClientTAUpdateValue, + IMG_UINT32 ui32Client3DUpdateCount, + SYNC_PRIMITIVE_BLOCK **apsClient3DUpdateSyncPrimBlock, + IMG_UINT32 *paui32Client3DUpdateSyncOffset, + IMG_UINT32 *paui32Client3DUpdateValue, + SYNC_PRIMITIVE_BLOCK *psPRSyncPrimBlock, + IMG_UINT32 ui32PRSyncOffset, + IMG_UINT32 ui32PRFenceValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE *piUpdateFence, + IMG_CHAR szFenceName[PVRSRV_SYNC_NAME_LENGTH], + PVRSRV_FENCE iCheckFence3D, + PVRSRV_TIMELINE iUpdateTimeline3D, + PVRSRV_FENCE *piUpdateFence3D, + IMG_CHAR szFenceName3D[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32TACmdSize, + IMG_PBYTE pui8TADMCmd, + IMG_UINT32 ui323DPRCmdSize, + IMG_PBYTE pui83DPRDMCmd, + IMG_UINT32 ui323DCmdSize, + IMG_PBYTE pui83DDMCmd, + IMG_UINT32 ui32ExtJobRef, + IMG_BOOL bKickTA, + IMG_BOOL bKickPR, + IMG_BOOL bKick3D, + IMG_BOOL bAbort, + IMG_UINT32 ui32PDumpFlags, + RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, + RGX_ZSBUFFER_DATA *psZSBuffer, + RGX_ZSBUFFER_DATA *psMSAAScratchBuffer, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 *paui32SyncPMRFlags, + PMR **ppsSyncPMRs, + IMG_UINT32 ui32RenderTargetSize, + IMG_UINT32 ui32NumberOfDrawCalls, + IMG_UINT32 ui32NumberOfIndices, + IMG_UINT32 ui32NumberOfMRTs, + IMG_UINT64 ui64DeadlineInus); + + +PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDevNode, + RGX_SERVER_RENDER_CONTEXT *psRenderContext, + IMG_UINT32 ui32Priority); + +PVRSRV_ERROR PVRSRVRGXSetRenderContextPropertyKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output); + +/* Debug - Dump debug info of render contexts on this device */ +void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +/* Debug/Watchdog - check if client contexts are stalled */ +IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); + +PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext); + +#endif /* RGXTA3D_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxtdmtransfer.c b/drivers/gpu/drm/phytium/octopus/rgxtdmtransfer.c new file mode 100644 index 000000000000..cbd08699fe41 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxtdmtransfer.c @@ -0,0 +1,1325 @@ +/*************************************************************************/ /*! +@File rgxtdmtransfer.c +@Title Device specific TDM transfer queue routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pdump_km.h" +#include "rgxdevice.h" +#include "rgxccb.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgxtdmtransfer.h" +#include "rgx_tq_shared.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "osfunc.h" +#include "pvr_debug.h" +#include "pvrsrv.h" +#include "rgx_memallocflags.h" +#include "rgxhwperf.h" +#include "ospvr_gputrace.h" +#include "htbuffer.h" +#include "rgxshader.h" + +#include "pdump_km.h" + +#include "sync_server.h" +#include "sync_internal.h" +#include "sync.h" + +#if defined(SUPPORT_BUFFER_SYNC) +#include "pvr_buffer_sync.h" +#endif + +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) +#include "validation_soc.h" +#endif + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) +#include "rgxworkest.h" +#endif + +#include "rgxtimerquery.h" + +/* Enable this to dump the compiled list of UFOs prior to kick call */ +#define ENABLE_TDM_UFO_DUMP 0 + +//#define TDM_CHECKPOINT_DEBUG 1 + +#if defined(TDM_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +#else +#define CHKPT_DBG(X) +#endif + +typedef struct { + RGX_SERVER_COMMON_CONTEXT * psServerCommonContext; + IMG_UINT32 ui32Priority; +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_context *psBufferSyncContext; +#endif +} RGX_SERVER_TQ_TDM_DATA; + + +struct _RGX_SERVER_TQ_TDM_CONTEXT_ { + PVRSRV_DEVICE_NODE *psDeviceNode; + DEVMEM_MEMDESC *psFWTransferContextMemDesc; + DEVMEM_MEMDESC *psFWFrameworkMemDesc; + IMG_UINT32 ui32Flags; + RGX_SERVER_TQ_TDM_DATA sTDMData; + DLLIST_NODE sListNode; + SYNC_ADDR_LIST sSyncAddrListFence; + SYNC_ADDR_LIST sSyncAddrListUpdate; + POS_LOCK hLock; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WORKEST_HOST_DATA sWorkEstData; +#endif +}; + +static PVRSRV_ERROR _CreateTDMTransferContext( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + DEVMEM_MEMDESC * psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + SERVER_MMU_CONTEXT * psServerMMUContext, + DEVMEM_MEMDESC * psFWMemContextMemDesc, + IMG_UINT32 ui32Priority, + RGX_COMMON_CONTEXT_INFO * psInfo, + RGX_SERVER_TQ_TDM_DATA * psTDMData, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags) +{ + PVRSRV_ERROR eError; + +#if defined(SUPPORT_BUFFER_SYNC) + psTDMData->psBufferSyncContext = + pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, + "rogue-tdm"); + if (IS_ERR(psTDMData->psBufferSyncContext)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to create buffer_sync context (err=%ld)", + __func__, PTR_ERR(psTDMData->psBufferSyncContext))); + + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail_buffer_sync_context_create; + } +#endif + + eError = FWCommonContextAllocate( + psConnection, + psDeviceNode, + REQ_TYPE_TQ_TDM, + RGXFWIF_DM_TDM, + psServerMMUContext, + psAllocatedMemDesc, + ui32AllocatedOffset, + psFWMemContextMemDesc, + NULL, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TDM_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TDM_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + ui32Priority, + UINT_MAX, /* max deadline MS */ + 0, /* robustness address */ + psInfo, + &psTDMData->psServerCommonContext); + if (eError != PVRSRV_OK) + { + goto fail_contextalloc; + } + + psTDMData->ui32Priority = ui32Priority; + return PVRSRV_OK; + +fail_contextalloc: +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext); + psTDMData->psBufferSyncContext = NULL; +fail_buffer_sync_context_create: +#endif + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +static PVRSRV_ERROR _DestroyTDMTransferContext( + RGX_SERVER_TQ_TDM_DATA * psTDMData, + PVRSRV_DEVICE_NODE * psDeviceNode) +{ + PVRSRV_ERROR eError; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp( + psDeviceNode, + psTDMData->psServerCommonContext, + RGXFWIF_DM_TDM, + PDUMP_FLAGS_CONTINUOUS); + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free it's resources */ + FWCommonContextFree(psTDMData->psServerCommonContext); + +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext); + psTDMData->psBufferSyncContext = NULL; +#endif + + return PVRSRV_OK; +} + +/* + * PVRSRVCreateTransferContextKM + */ +PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32FrameworkCommandSize, + IMG_PBYTE pabyFrameworkCommand, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + RGX_SERVER_TQ_TDM_CONTEXT ** ppsTransferContext) +{ + RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext; + + DEVMEM_MEMDESC * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + PVRSRV_RGXDEV_INFO * psDevInfo = psDeviceNode->pvDevice; + RGX_COMMON_CONTEXT_INFO sInfo = {NULL}; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Allocate the server side structure */ + *ppsTransferContext = NULL; + psTransferContext = OSAllocZMem(sizeof(*psTransferContext)); + if (psTransferContext == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* + Create the FW transfer context, this has the TDM common + context embedded within it + */ + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_FWTDMCONTEXT), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwTransferContext", + &psTransferContext->psFWTransferContextMemDesc); + if (eError != PVRSRV_OK) + { + goto fail_fwtransfercontext; + } + + eError = OSLockCreate(&psTransferContext->hLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_lockcreate; + } + + psTransferContext->psDeviceNode = psDeviceNode; + + if (ui32FrameworkCommandSize) + { + eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, + &psTransferContext->psFWFrameworkMemDesc, + ui32FrameworkCommandSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware GPU framework state (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_frameworkcreate; + } + + /* Copy the Framework client data into the framework buffer */ + eError = PVRSRVRGXFrameworkCopyCommand(psTransferContext->psFWFrameworkMemDesc, + pabyFrameworkCommand, + ui32FrameworkCommandSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to populate the framework buffer (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_frameworkcopy; + } + sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc; + } + + eError = _CreateTDMTransferContext(psConnection, + psDeviceNode, + psTransferContext->psFWTransferContextMemDesc, + offsetof(RGXFWIF_FWTDMCONTEXT, sTDMContext), + hMemCtxPrivData, + psFWMemContextMemDesc, + ui32Priority, + &sInfo, + &psTransferContext->sTDMData, + U32toU8_Unpack1(ui32PackedCCBSizeU88), + U32toU8_Unpack2(ui32PackedCCBSizeU88), + ui32ContextFlags); + if (eError != PVRSRV_OK) + { + goto fail_tdmtransfercontext; + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WorkEstInitTDM(psDevInfo, &psTransferContext->sWorkEstData); +#endif + + SyncAddrListInit(&psTransferContext->sSyncAddrListFence); + SyncAddrListInit(&psTransferContext->sSyncAddrListUpdate); + + OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock); + dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock); + *ppsTransferContext = psTransferContext; + + return PVRSRV_OK; + +fail_tdmtransfercontext: +fail_frameworkcopy: + if (psTransferContext->psFWFrameworkMemDesc != NULL) + { + DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc); + } +fail_frameworkcreate: + OSLockDestroy(psTransferContext->hLock); +fail_lockcreate: + DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc); +fail_fwtransfercontext: + OSFreeMem(psTransferContext); + PVR_ASSERT(eError != PVRSRV_OK); + *ppsTransferContext = NULL; + return eError; +} + +PVRSRV_ERROR PVRSRVRGXTDMGetSharedMemoryKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + PMR ** ppsCLIPMRMem, + PMR ** ppsUSCPMRMem) +{ + PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRGXTDMReleaseSharedMemoryKM(PMR * psPMRMem) +{ + PVR_UNREFERENCED_PARAMETER(psPMRMem); + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_FWTDMCONTEXT *psFWTransferContext; + IMG_UINT32 ui32WorkEstCCBSubmitted; + + eError = DevmemAcquireCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc, + (void **)&psFWTransferContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to map firmware transfer context (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + ui32WorkEstCCBSubmitted = psFWTransferContext->ui32WorkEstCCBSubmitted; + + DevmemReleaseCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc); + + /* Check if all of the workload estimation CCB commands for this workload are read */ + if (ui32WorkEstCCBSubmitted != psTransferContext->sWorkEstData.ui32WorkEstCCBReceived) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", + __func__, ui32WorkEstCCBSubmitted, + psTransferContext->sWorkEstData.ui32WorkEstCCBReceived)); + + return PVRSRV_ERROR_RETRY; + } +#endif + + + /* remove node from list before calling destroy - as destroy, if successful + * will invalidate the node + * must be re-added if destroy fails + */ + OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock); + dllist_remove_node(&(psTransferContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock); + + + eError = _DestroyTDMTransferContext(&psTransferContext->sTDMData, + psTransferContext->psDeviceNode); + if (eError != PVRSRV_OK) + { + goto fail_destroyTDM; + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + WorkEstDeInitTDM(psDevInfo, &psTransferContext->sWorkEstData); +#endif + + SyncAddrListDeinit(&psTransferContext->sSyncAddrListFence); + SyncAddrListDeinit(&psTransferContext->sSyncAddrListUpdate); + + if (psTransferContext->psFWFrameworkMemDesc != NULL) + { + DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc); + } + DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc); + + OSLockDestroy(psTransferContext->hLock); + + OSFreeMem(psTransferContext); + + return PVRSRV_OK; + +fail_destroyTDM: + + OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock); + dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock); + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +/* + * PVRSRVSubmitTQ3DKickKM + */ +PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( + RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, + IMG_UINT32 * paui32ClientUpdateSyncOffset, + IMG_UINT32 * paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE * piUpdateFence, + IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32FWCommandSize, + IMG_UINT8 * pui8FWCommand, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 * paui32SyncPMRFlags, + PMR ** ppsSyncPMRs, + IMG_UINT32 ui32TDMCharacteristic1, + IMG_UINT32 ui32TDMCharacteristic2, + IMG_UINT64 ui64DeadlineInus) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode; + RGX_CCB_CMD_HELPER_DATA *psCmdHelper; + PRGXFWIF_UFO_ADDR * pauiIntFenceUFOAddress = NULL; + PRGXFWIF_UFO_ADDR * pauiIntUpdateUFOAddress = NULL; + IMG_UINT32 ui32IntClientFenceCount = 0; + IMG_UINT32 * paui32IntUpdateValue = paui32ClientUpdateValue; + IMG_UINT32 ui32IntClientUpdateCount = ui32ClientUpdateCount; + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2; + PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE; + PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psTransferContext->sTDMData.psServerCommonContext); + RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext); + IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + + IMG_UINT64 ui64FBSCEntryMask; + + IMG_UINT32 ui32CmdOffset = 0; + IMG_BOOL bCCBStateOpen; + + PRGXFWIF_TIMESTAMP_ADDR pPreAddr; + PRGXFWIF_TIMESTAMP_ADDR pPostAddr; + PRGXFWIF_UFO_ADDR pRMWUFOAddr; + + IMG_UINT64 uiCheckFenceUID = 0; + IMG_UINT64 uiUpdateFenceUID = 0; +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTransfer = {0}; + IMG_UINT32 ui32TDMWorkloadDataRO = 0; + IMG_UINT32 ui32TDMCmdHeaderOffset = 0; + IMG_UINT32 ui32TDMCmdOffsetWrapCheck = 0; + RGX_WORKLOAD sWorkloadCharacteristics = {0}; +#endif + +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_append_data *psBufferSyncData = NULL; + PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0; + PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL; +#endif + + PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32FenceSyncCheckpointCount = 0; + IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; + PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL; + IMG_UINT32 ui32FenceTimelineUpdateValue = 0; + void *pvUpdateFenceFinaliseData = NULL; + + if (iUpdateTimeline >= 0 && !piUpdateFence) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#if !defined(SUPPORT_WORKLOAD_ESTIMATION) + PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic1); + PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic2); + PVR_UNREFERENCED_PARAMETER(ui64DeadlineInus); +#endif + + /* Ensure we haven't been given a null ptr to + * update values if we have been told we + * have updates + */ + if (ui32ClientUpdateCount > 0) + { + PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL, + "paui32ClientUpdateValue NULL but " + "ui32ClientUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Ensure the string is null-terminated (Required for safety) */ + szUpdateFenceName[31] = '\0'; + + if (ui32SyncPMRCount != 0) + { + if (!ppsSyncPMRs) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + OSLockAcquire(psTransferContext->hLock); + + /* We can't allocate the required amount of stack space on all consumer architectures */ + psCmdHelper = OSAllocMem(sizeof(RGX_CCB_CMD_HELPER_DATA)); + if (psCmdHelper == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_allochelper; + } + + + /* + Init the command helper commands for all the prepares + */ + { + IMG_CHAR *pszCommandName; + RGXFWIF_CCB_CMD_TYPE eType; +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_context *psBufferSyncContext; +#endif + + pszCommandName = "TQ-TDM"; + + if (ui32FWCommandSize == 0) + { + /* A NULL CMD for TDM is used to append updates to a non finished + * FW command. bCCBStateOpen is used in case capture range is + * entered on this command, to not drain CCB up to the Roff for this + * command, but the finished command prior to this. + */ + bCCBStateOpen = IMG_TRUE; + eType = RGXFWIF_CCB_CMD_TYPE_NULL; + } + else + { + bCCBStateOpen = IMG_FALSE; + eType = RGXFWIF_CCB_CMD_TYPE_TQ_TDM; + } + +#if defined(SUPPORT_BUFFER_SYNC) + psBufferSyncContext = psTransferContext->sTDMData.psBufferSyncContext; +#endif + + eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListFence, + 0, + NULL, + NULL); + if (eError != PVRSRV_OK) + { + goto fail_populate_sync_addr_list; + } + + eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListUpdate, + ui32ClientUpdateCount, + pauiClientUpdateUFODevVarBlock, + paui32ClientUpdateSyncOffset); + if (eError != PVRSRV_OK) + { + goto fail_populate_sync_addr_list; + } + paui32IntUpdateValue = paui32ClientUpdateValue; + pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs; + + + if (ui32SyncPMRCount) + { +#if defined(SUPPORT_BUFFER_SYNC) + int err; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling pvr_buffer_sync_resolve_and_create_fences", __func__)); + err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext, + psTransferContext->psDeviceNode->hSyncCheckpointContext, + ui32SyncPMRCount, + ppsSyncPMRs, + paui32SyncPMRFlags, + &ui32BufferFenceSyncCheckpointCount, + &apsBufferFenceSyncCheckpoints, + &psBufferUpdateSyncCheckpoint, + &psBufferSyncData); + if (err) + { + switch (err) + { + case -EINTR: + eError = PVRSRV_ERROR_RETRY; + break; + case -ENOMEM: + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + break; + default: + eError = PVRSRV_ERROR_INVALID_PARAMS; + break; + } + + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "%s: pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorString(eError))); + } + goto fail_resolve_input_fence; + } + + /* Append buffer sync fences */ + if (ui32BufferFenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence , (void*)pauiIntFenceUFOAddress)); + SyncAddrListAppendAndDeRefCheckpoints(&psTransferContext->sSyncAddrListFence, + ui32BufferFenceSyncCheckpointCount, + apsBufferFenceSyncCheckpoints); + if (!pauiIntFenceUFOAddress) + { + pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs; + } + ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount; + } + + if (psBufferUpdateSyncCheckpoint) + { + /* Append the update (from output fence) */ + SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate, + 1, + &psBufferUpdateSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs; + } + ui32IntClientUpdateCount++; + } +#else /* defined(SUPPORT_BUFFER_SYNC) */ + PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail_populate_sync_addr_list; +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + } + + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence(psTransferContext->psDeviceNode->hSyncCheckpointContext, + iCheckFence, + &ui32FenceSyncCheckpointCount, + &apsFenceSyncCheckpoints, + &uiCheckFenceUID, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + goto fail_resolve_input_fence; + } +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 ii; + for (ii=0; ii<32; ii++) + { + PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii); + CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint)); //psFenceSyncCheckpoints[ii])); + } + } +#endif + /* Create the output fence (if required) */ + if (iUpdateTimeline != PVRSRV_NO_TIMELINE) + { + eError = SyncCheckpointCreateFence(psTransferContext->psDeviceNode, + szUpdateFenceName, + iUpdateTimeline, + psTransferContext->psDeviceNode->hSyncCheckpointContext, + &iUpdateFence, + &uiUpdateFenceUID, + &pvUpdateFenceFinaliseData, + &psUpdateSyncCheckpoint, + (void*)&psFenceTimelineUpdateSync, + &ui32FenceTimelineUpdateValue, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + goto fail_create_output_fence; + } + + /* Append the sync prim update for the timeline (if required) */ + if (psFenceTimelineUpdateSync) + { + IMG_UINT32 *pui32TimelineUpdateWp = NULL; + + /* Allocate memory to hold the list of update values (including our timeline update) */ + pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + if (!pui32IntAllocatedUpdateValues) + { + /* Failed to allocate memory */ + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc_update_values_mem; + } + OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + /* Copy the update values into the new memory, then append our timeline update value */ + if (paui32IntUpdateValue) + { + OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount); + } + /* Now set the additional update value */ + pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount; + *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue; + ui32IntClientUpdateCount++; +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Now append the timeline sync prim addr to the transfer context update list */ + SyncAddrListAppendSyncPrim(&psTransferContext->sSyncAddrListUpdate, + psFenceTimelineUpdateSync); +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */ + paui32IntUpdateValue = pui32IntAllocatedUpdateValues; + } + } + + if (ui32FenceSyncCheckpointCount) + { + /* Append the checks (from input fence) */ + if (ui32FenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence)); +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListFence, + ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + if (!pauiIntFenceUFOAddress) + { + pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs; + } + ui32IntClientFenceCount += ui32FenceSyncCheckpointCount; + } +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + if (psUpdateSyncCheckpoint) + { + /* Append the update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to TQ Update (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress)); + SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate, + 1, + &psUpdateSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs; + } + ui32IntClientUpdateCount++; +#if defined(TDM_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + +#if (ENABLE_TDM_UFO_DUMP == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: dumping TDM fence/updates syncs...", __func__)); + { + IMG_UINT32 ii; + PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress; + PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress; + IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue; + + /* Dump Fence syncs and Update syncs */ + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM fence syncs (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress)); + for (ii=0; ii. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr)); + psTmpIntFenceUFOAddress++; + } + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM update syncs (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress)); + for (ii=0; iiui32Addr & 0x1) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue)); + pui32TmpIntUpdateValue++; + } + psTmpIntUpdateUFOAddress++; + } + } +#endif + + RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psTransferContext->psDeviceNode->pvDevice, + &pPreAddr, + &pPostAddr, + &pRMWUFOAddr); + /* + * Extract the FBSC entries from MMU Context for the deferred FBSC invalidate command, + * in other words, take the value and set it to zero afterwards. + * FBSC Entry Mask must be extracted from MMU ctx and updated just before the kick starts + * as it must be ready at the time of context activation. + */ + { + eError = RGXExtractFBSCEntryMaskFromMMUContext(psTransferContext->psDeviceNode, + FWCommonContextGetServerMMUCtx(psTransferContext->sTDMData.psServerCommonContext), + &ui64FBSCEntryMask); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to extract FBSC Entry Mask (%d)", eError)); + goto fail_invalfbsc; + } + } + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + sWorkloadCharacteristics.sTransfer.ui32Characteristic1 = ui32TDMCharacteristic1; + sWorkloadCharacteristics.sTransfer.ui32Characteristic2 = ui32TDMCharacteristic2; + + /* Prepare workload estimation */ + WorkEstPrepare(psDeviceNode->pvDevice, + &psTransferContext->sWorkEstData, + &psTransferContext->sWorkEstData.uWorkloadMatchingData.sTransfer.sDataTDM, + eType, + &sWorkloadCharacteristics, + ui64DeadlineInus, + &sWorkloadKickDataTransfer); +#endif + + /* + Create the command helper data for this command + */ + RGXCmdHelperInitCmdCCB(psClientCCB, + ui64FBSCEntryMask, + ui32IntClientFenceCount, + pauiIntFenceUFOAddress, + NULL, + ui32IntClientUpdateCount, + pauiIntUpdateUFOAddress, + paui32IntUpdateValue, + ui32FWCommandSize, + pui8FWCommand, + &pPreAddr, + &pPostAddr, + &pRMWUFOAddr, + eType, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + &sWorkloadKickDataTransfer, +#else /* SUPPORT_WORKLOAD_ESTIMATION */ + NULL, +#endif /* SUPPORT_WORKLOAD_ESTIMATION */ + pszCommandName, + bCCBStateOpen, + psCmdHelper); + } + + /* + Acquire space for all the commands in one go + */ + + eError = RGXCmdHelperAcquireCmdCCB(1, psCmdHelper); + if (eError != PVRSRV_OK) + { + goto fail_3dcmdacquire; + } + + + /* + We should acquire the kernel CCB(s) space here as the schedule could fail + and we would have to roll back all the syncs + */ + + /* + Only do the command helper release (which takes the server sync + operations if the acquire succeeded + */ + ui32CmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext)); + RGXCmdHelperReleaseCmdCCB(1, + psCmdHelper, + "TQ_TDM", + FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr); + + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* The following is used to determine the offset of the command header containing + the workload estimation data so that can be accessed when the KCCB is read */ + ui32TDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(psCmdHelper); + + ui32TDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext)); + + /* This checks if the command would wrap around at the end of the CCB and + * therefore would start at an offset of 0 rather than the current command + * offset */ + if (ui32CmdOffset < ui32TDMCmdOffsetWrapCheck) + { + ui32TDMWorkloadDataRO = ui32CmdOffset; + } + else + { + ui32TDMWorkloadDataRO = 0; + } +#endif + + /* + Even if we failed to acquire the client CCB space we might still need + to kick the HW to process a padding packet to release space for us next + time round + */ + { + RGXFWIF_KCCB_CMD sTDMKCCBCmd; + IMG_UINT32 ui32FWAddr = FWCommonContextGetFWAddress( + psTransferContext->sTDMData.psServerCommonContext).ui32Addr; + + /* Construct the kernel 3D CCB command. */ + sTDMKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + sTDMKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext); + sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); + sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); + sTDMKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + + /* Add the Workload data into the KCCB kick */ +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Store the offset to the CCCB command header so that it can be referenced + * when the KCCB command reaches the FW */ + sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32TDMWorkloadDataRO + ui32TDMCmdHeaderOffset; +#else + sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; +#endif + + /* HTBLOGK(HTB_SF_MAIN_KICK_TDM, */ + /* s3DKCCBCmd.uCmdData.sCmdKickData.psContext, */ + /* ui323DCmdOffset); */ + RGXSRV_HWPERF_ENQ(psTransferContext, + OSGetCurrentClientProcessIDKM(), + FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr, + ui32ExtJobRef, + ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_TQTDM, + iCheckFence, + iUpdateFence, + iUpdateTimeline, + uiCheckFenceUID, + uiUpdateFenceUID, + NO_DEADLINE, + NO_CYCEST); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psDeviceNode->pvDevice, + RGXFWIF_DM_TDM, + & sTDMKCCBCmd, + ui32ClientCacheOpSeqNum, + ui32PDumpFlags); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + PVRGpuTraceEnqueueEvent(psDeviceNode->pvDevice, ui32FWAddr, ui32ExtJobRef, + ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQTDM); + } + + /* + * Now check eError (which may have returned an error from our earlier calls + * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first + * so we check it now... + */ + if (eError != PVRSRV_OK ) + { + goto fail_2dcmdacquire; + } + +#if defined(NO_HARDWARE) + /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ + if (psUpdateSyncCheckpoint) + { + SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); + } + if (psFenceTimelineUpdateSync) + { + SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); + } + SyncCheckpointNoHWUpdateTimelines(NULL); +#endif /* defined(NO_HARDWARE) */ + +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferSyncData) + { + pvr_buffer_sync_kick_succeeded(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + + * piUpdateFence = iUpdateFence; + if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psDeviceNode, iUpdateFence, pvUpdateFenceFinaliseData, + psUpdateSyncCheckpoint, szUpdateFenceName); + } + + OSFreeMem(psCmdHelper); + + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + /* Free memory allocated to hold the internal list of update values */ + if (pui32IntAllocatedUpdateValues) + { + OSFreeMem(pui32IntAllocatedUpdateValues); + pui32IntAllocatedUpdateValues = NULL; + } + + OSLockRelease(psTransferContext->hLock); + return PVRSRV_OK; + +/* + No resources are created in this function so there is nothing to free + unless we had to merge syncs. + If we fail after the client CCB acquire there is still nothing to do + as only the client CCB release will modify the client CCB +*/ +fail_2dcmdacquire: +fail_3dcmdacquire: + +fail_invalfbsc: + SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListFence); + SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListUpdate); +fail_alloc_update_values_mem: + +/* fail_pdumpcheck: */ +/* fail_cmdtype: */ + + if (iUpdateFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); + } +fail_create_output_fence: + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + +fail_resolve_input_fence: + +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferSyncData) + { + pvr_buffer_sync_kick_failed(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + +fail_populate_sync_addr_list: + PVR_ASSERT(eError != PVRSRV_OK); + OSFreeMem(psCmdHelper); +fail_allochelper: + + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + OSLockRelease(psTransferContext->hLock); + return eError; +} + + +PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM( + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + IMG_UINT32 ui32PDumpFlags) +{ + RGXFWIF_KCCB_CMD sKCCBCmd; + PVRSRV_ERROR eError; + + OSLockAcquire(psTransferContext->hLock); + + /* Schedule the firmware command */ + sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE; + sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError = RGXScheduleCommand(psTransferContext->psDeviceNode->pvDevice, + RGXFWIF_DM_TDM, + &sKCCBCmd, + 0, + ui32PDumpFlags); + if (eError != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to schedule the FW command %d (%s)", + __func__, eError, PVRSRVGETERRORSTRING(eError))); + } + + OSLockRelease(psTransferContext->hLock); + return eError; +} + +PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDevNode, + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + IMG_UINT32 ui32Priority) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psDevNode); + + OSLockAcquire(psTransferContext->hLock); + + if (psTransferContext->sTDMData.ui32Priority != ui32Priority) + { + eError = ContextSetPriority(psTransferContext->sTDMData.psServerCommonContext, + psConnection, + psTransferContext->psDeviceNode->pvDevice, + ui32Priority, + RGXFWIF_DM_TDM); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority (%s)", __func__, PVRSRVGetErrorString(eError))); + + OSLockRelease(psTransferContext->hLock); + return eError; + } + } + + OSLockRelease(psTransferContext->hLock); + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPropertyKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output) +{ + PVRSRV_ERROR eError; + + switch (eContextProperty) + { + case RGX_CONTEXT_PROPERTY_FLAGS: + { + OSLockAcquire(psTransferContext->hLock); + eError = FWCommonContextSetFlags(psTransferContext->sTDMData.psServerCommonContext, + (IMG_UINT32)ui64Input); + if (eError == PVRSRV_OK) + { + psTransferContext->ui32Flags = (IMG_UINT32)ui64Input; + } + OSLockRelease(psTransferContext->hLock); + PVR_LOG_IF_ERROR(eError, "FWCommonContextSetFlags"); + break; + } + + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + } + + return eError; +} + +void DumpTDMTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + DLLIST_NODE *psNode, *psNext; + + OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock); + + dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext) + { + RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode); + + DumpFWCommonContextInfo(psCurrentServerTransferCtx->sTDMData.psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + + OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock); +} + + +IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + DLLIST_NODE *psNode, *psNext; + IMG_UINT32 ui32ContextBitMask = 0; + + OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock); + + dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext) + { + RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode); + + if (CheckStalledClientCommonContext( + psCurrentServerTransferCtx->sTDMData.psServerCommonContext, RGX_KICK_TYPE_DM_TDM_2D) + == PVRSRV_ERROR_CCCB_STALLED) { + ui32ContextBitMask = RGX_KICK_TYPE_DM_TDM_2D; + } + } + + OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock); + return ui32ContextBitMask; +} + +/**************************************************************************//** + End of file (rgxtdmtransfer.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxtdmtransfer.h b/drivers/gpu/drm/phytium/octopus/rgxtdmtransfer.h new file mode 100644 index 000000000000..4eeab0eed628 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxtdmtransfer.h @@ -0,0 +1,132 @@ +/*************************************************************************/ /*! +@File rgxtdmtransfer.h +@Title RGX Transfer queue 2 Functionality +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the RGX Transfer queue Functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXTDMTRANSFER_H) +#define RGXTDMTRANSFER_H + +#include "devicemem.h" +#include "device.h" +#include "rgxdevice.h" +#include "rgxfwutils.h" +#include "rgx_fwif_resetframework.h" +#include "rgxdebug.h" +#include "pvr_notifier.h" + +#include "sync_server.h" +#include "connection_server.h" + +typedef struct _RGX_SERVER_TQ_TDM_CONTEXT_ RGX_SERVER_TQ_TDM_CONTEXT; + + +PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32FrameworkCommandSize, + IMG_PBYTE pabyFrameworkCommand, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32PackedCCBSizeU88, + IMG_UINT32 ui32ContextFlags, + RGX_SERVER_TQ_TDM_CONTEXT **ppsTransferContext); + + +PVRSRV_ERROR PVRSRVRGXTDMGetSharedMemoryKM( + CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + PMR ** ppsCLIPMRMem, + PMR ** ppsUSCPMRMem); + + +PVRSRV_ERROR PVRSRVRGXTDMReleaseSharedMemoryKM(PMR * psUSCPMRMem); + + +PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext); + + +PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( + RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext, + IMG_UINT32 ui32PDumpFlags, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, + IMG_UINT32 * paui32ClientUpdateSyncOffset, + IMG_UINT32 * paui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE iUpdateTimeline, + PVRSRV_FENCE * piUpdateFence, + IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], + IMG_UINT32 ui32FWCommandSize, + IMG_UINT8 * pui8FWCommand, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 * pui32SyncPMRFlags, + PMR ** ppsSyncPMRs, + IMG_UINT32 ui32TDMCharacteristic1, + IMG_UINT32 ui32TDMCharacteristic2, + IMG_UINT64 ui64DeadlineInus); + +PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM( + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + IMG_UINT32 ui32PDumpFlags); + +PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + IMG_UINT32 ui32Priority); + +PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPropertyKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output); + +/* Debug - Dump debug info of TDM transfer contexts on this device */ +void DumpTDMTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +/* Debug/Watchdog - check if client transfer contexts are stalled */ +IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); + + +#endif /* RGXTDMTRANSFER_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxtimecorr.c b/drivers/gpu/drm/phytium/octopus/rgxtimecorr.c new file mode 100644 index 000000000000..e3a3ddb940ba --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxtimecorr.c @@ -0,0 +1,645 @@ +/*************************************************************************/ /*! +@File +@Title Device specific time correlation and calibration routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device specific time correlation and calibration routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "rgxtimecorr.h" +#include "rgxfwutils.h" +#include "htbserver.h" +#include "pvrsrv_apphint.h" + +/****************************************************************************** + * + * - A calibration period is started on power-on and after a DVFS transition, + * and it's closed before a power-off and before a DVFS transition + * (so power-on -> dfvs -> dvfs -> power-off , power on -> dvfs -> dvfs..., + * where each arrow is a calibration period). + * + * - The timers on the Host and on the FW are correlated at the beginning of + * each period together with the current GPU frequency. + * + * - Correlation and calibration are also done at regular intervals using + * a best effort approach. + * + *****************************************************************************/ + +static IMG_UINT32 g_ui32ClockSource = PVRSRV_APPHINT_TIMECORRCLOCK; + +/* + AppHint interfaces +*/ + +static PVRSRV_ERROR _SetClock(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 ui32Value) +{ + static __maybe_unused const char* const apszClocks[] = { + "mono", "mono_raw", "sched" + }; + + PVR_UNREFERENCED_PARAMETER(psPrivate); + + if (ui32Value >= RGXTIMECORR_CLOCK_LAST) + { + PVR_DPF((PVR_DBG_ERROR, "Invalid clock source type (%u)", ui32Value)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + RGXTimeCorrEnd((PVRSRV_DEVICE_NODE *) psDeviceNode, + RGXTIMECORR_EVENT_CLOCK_CHANGE); + + PVR_DPF((PVR_DBG_WARNING, "Setting time correlation clock from \"%s\" to \"%s\"", + apszClocks[g_ui32ClockSource], + apszClocks[ui32Value])); + + g_ui32ClockSource = ui32Value; + + RGXTimeCorrBegin((PVRSRV_DEVICE_NODE *) psDeviceNode, + RGXTIMECORR_EVENT_CLOCK_CHANGE); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _GetClock(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *psPrivate, + IMG_UINT32 *pui32Value) +{ + *pui32Value = g_ui32ClockSource; + + PVR_UNREFERENCED_PARAMETER(psPrivate); + + return PVRSRV_OK; +} + +void RGXTimeCorrInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_TimeCorrClock, _GetClock, + _SetClock, psDeviceNode, NULL); +} + +/* + End of AppHint interface +*/ + +IMG_UINT64 RGXTimeCorrGetClockns64(void) +{ + IMG_UINT64 ui64Clock; + + switch (g_ui32ClockSource) { + case RGXTIMECORR_CLOCK_MONO: + return ((void) OSClockMonotonicns64(&ui64Clock), ui64Clock); + case RGXTIMECORR_CLOCK_MONO_RAW: + return OSClockMonotonicRawns64(); + case RGXTIMECORR_CLOCK_SCHED: + return OSClockns64(); + default: + PVR_ASSERT(IMG_FALSE); + return 0; + } +} + +IMG_UINT64 RGXTimeCorrGetClockus64(void) +{ + IMG_UINT32 rem; + return OSDivide64r64(RGXTimeCorrGetClockns64(), 1000, &rem); +} + +void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, + RGXFWIF_TIME_CORR *psTimeCorrs, + IMG_UINT32 ui32NumOut) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; + IMG_UINT32 ui32CurrentIndex = psGpuUtilFWCB->ui32TimeCorrSeqCount; + + while (ui32NumOut--) + { + *(psTimeCorrs++) = psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32CurrentIndex)]; + ui32CurrentIndex--; + } +} + +static __maybe_unused const IMG_CHAR* _EventToString(RGXTIMECORR_EVENT eEvent) +{ + switch (eEvent) + { + case RGXTIMECORR_EVENT_POWER: + return "power"; + case RGXTIMECORR_EVENT_DVFS: + return "dvfs"; + case RGXTIMECORR_EVENT_PERIODIC: + return "periodic"; + case RGXTIMECORR_EVENT_CLOCK_CHANGE: + return "clock source"; + default: + return "n/a"; + } +} + +static inline IMG_UINT32 _RGXGetSystemLayerGPUClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; + + return psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; +} + +static inline IMG_UINT32 _RGXGetEstimatedGPUClockSpeed(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; + GPU_FREQ_TRACKING_DATA *psTrackingData; + + psTrackingData = &psGpuDVFSTable->asTrackingData[psGpuDVFSTable->ui32FreqIndex]; + + return psTrackingData->ui32EstCoreClockSpeed; +} + +#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) +static inline void _DumpTimerCorrelationHistory(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; + IMG_UINT32 i = psGpuDVFSTable->ui32HistoryIndex; + + PVR_DPF((PVR_DBG_ERROR, "Dumping history of timer correlation data (latest first):")); + + do + { + PVR_DPF((PVR_DBG_ERROR, + " Begin times: OS %" IMG_UINT64_FMTSPEC ", CR %" IMG_UINT64_FMTSPEC ", " + "End times: OS %" IMG_UINT64_FMTSPEC ", CR %" IMG_UINT64_FMTSPEC ", " + "Core clk %u, Estimated clk %u", + psGpuDVFSTable->asTrackingHistory[i].ui64BeginOSTimestamp, + psGpuDVFSTable->asTrackingHistory[i].ui64BeginCRTimestamp, + psGpuDVFSTable->asTrackingHistory[i].ui64EndOSTimestamp, + psGpuDVFSTable->asTrackingHistory[i].ui64EndCRTimestamp, + psGpuDVFSTable->asTrackingHistory[i].ui32CoreClockSpeed, + psGpuDVFSTable->asTrackingHistory[i].ui32EstCoreClockSpeed)); + + i = (i - 1) % RGX_GPU_FREQ_TRACKING_SIZE; + + } while (i != psGpuDVFSTable->ui32HistoryIndex); +} +#endif + +static void _RGXMakeTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, RGXTIMECORR_EVENT eEvent) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; + IMG_UINT32 ui32NewSeqCount = psGpuUtilFWCB->ui32TimeCorrSeqCount + 1; + RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32NewSeqCount)]; + + /* + * The following reads must be done as close together as possible, because + * they represent the same current time sampled from different clock sources. + */ +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + if (OSClockMonotonicns64(&psTimeCorr->ui64OSMonoTimeStamp) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "_RGXMakeTimeCorrData: System Monotonic Clock not available.")); + PVR_ASSERT(0); + } +#endif + psTimeCorr->ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo); + psTimeCorr->ui64OSTimeStamp = RGXTimeCorrGetClockns64(); + psTimeCorr->ui32CoreClockSpeed = _RGXGetEstimatedGPUClockSpeed(psDevInfo); + psTimeCorr->ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(psTimeCorr->ui32CoreClockSpeed); + + if (psTimeCorr->ui64CRDeltaToOSDeltaKNs == 0) + { +#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) + _DumpTimerCorrelationHistory(psDevInfo); +#endif + + /* Revert to original clock speed (error already printed) */ + psTimeCorr->ui32CoreClockSpeed = _RGXGetSystemLayerGPUClockSpeed(psDeviceNode); + psTimeCorr->ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(psTimeCorr->ui32CoreClockSpeed); + } + + /* Make sure the values are written to memory before updating the index of the current entry */ + OSWriteMemoryBarrier(); + + /* Update the index of the current entry in the timer correlation array */ + psGpuUtilFWCB->ui32TimeCorrSeqCount = ui32NewSeqCount; + + PVR_DPF((PVR_DBG_MESSAGE, + "Timer correlation data (post %s event): OS %" IMG_UINT64_FMTSPEC " ns, " + "CR %" IMG_UINT64_FMTSPEC ", GPU freq. %u Hz (given as %u Hz)", + _EventToString(eEvent), + psTimeCorr->ui64OSTimeStamp, + psTimeCorr->ui64CRTimeStamp, + RGXFWIF_ROUND_TO_KHZ(psTimeCorr->ui32CoreClockSpeed), + _RGXGetSystemLayerGPUClockSpeed(psDeviceNode))); + + /* + * Don't log timing data to the HTB log after a power(-on) event. + * Otherwise this will be logged before the HTB partition marker, breaking + * the log sync grammar. This data will be automatically repeated when the + * partition marker is written. + */ + HTBSyncScale(eEvent != RGXTIMECORR_EVENT_POWER, + psTimeCorr->ui64OSTimeStamp, + psTimeCorr->ui64CRTimeStamp, + psTimeCorr->ui32CoreClockSpeed); +} + +static void _RGXCheckTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_GPU_DVFS_TABLE *psGpuDVFSTable) +{ +#if !defined(NO_HARDWARE) && !defined(VIRTUAL_PLATFORM) && defined(DEBUG) +#define SCALING_FACTOR (10) + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; + IMG_UINT32 ui32Index = RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount); + RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32Index]; + IMG_UINT64 ui64EstimatedTime, ui64CRTimeStamp, ui64OSTimeStamp; + IMG_UINT64 ui64CRTimeDiff, ui64OSTimeDiff; + IMG_INT64 i64Diff; + IMG_UINT32 ui32Ratio, ui32Remainder; + + /* + * The following reads must be done as close together as possible, because + * they represent the same current time sampled from different clock sources. + */ + ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo); + ui64OSTimeStamp = RGXTimeCorrGetClockns64(); + + if ((ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp) < (1 << SCALING_FACTOR)) + { + /* + * Less than ~1us has passed since the timer correlation data was generated. + * A time frame this short is probably not enough to get an estimate + * of how good the timer correlation data was. + * Skip calculations for the above reason and to avoid a division by 0 below. + */ + return; + } + + + /* Calculate an estimated timestamp based on the latest timer correlation data */ + ui64CRTimeDiff = ui64CRTimeStamp - psTimeCorr->ui64CRTimeStamp; + ui64OSTimeDiff = RGXFWIF_GET_DELTA_OSTIME_NS(ui64CRTimeDiff, + psTimeCorr->ui64CRDeltaToOSDeltaKNs); + ui64EstimatedTime = psTimeCorr->ui64OSTimeStamp + ui64OSTimeDiff; + + /* Get difference between estimated timestamp and current timestamp, in ns */ + i64Diff = ui64EstimatedTime - ui64OSTimeStamp; + + /* + * Calculate ratio between estimated time diff and real time diff: + * ratio% : 100% = (OSestimate - OStimecorr) : (OSreal - OStimecorr) + * + * The operands are scaled down (approximately from ns to us) so at least + * the divisor fits on 32 bit. + */ + ui32Ratio = OSDivide64(((ui64EstimatedTime - psTimeCorr->ui64OSTimeStamp) * 100ULL) >> SCALING_FACTOR, + (ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp) >> SCALING_FACTOR, + &ui32Remainder); + + PVR_DPF((PVR_DBG_MESSAGE, + "Estimated timestamp check: diff %" IMG_INT64_FMTSPECd " ns over " + "period %" IMG_UINT64_FMTSPEC " ns, estimated timer speed %u%%", + i64Diff, + ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp, + ui32Ratio)); + + /* Warn if the estimated timestamp is not within +/- 1% of the current time */ + if (ui32Ratio < 99 || ui32Ratio > 101) + { + PVR_DPF((PVR_DBG_WARNING, + "Estimated timestamps generated in the last %" IMG_UINT64_FMTSPEC " ns " + "were %s the real time (increasing at %u%% speed)", + ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp, + i64Diff > 0 ? "ahead of" : "behind", + ui32Ratio)); + + /* Higher ratio == higher delta OS == higher delta CR == frequency higher than expected (and viceversa) */ + PVR_DPF((PVR_DBG_WARNING, + "Current GPU frequency %u Hz (given as %u Hz) is probably %s than expected", + RGXFWIF_ROUND_TO_KHZ(psTimeCorr->ui32CoreClockSpeed), + _RGXGetSystemLayerGPUClockSpeed(psDeviceNode), + i64Diff > 0 ? "lower" : "higher")); + } +#else + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psGpuDVFSTable); +#endif +} + +static inline IMG_UINT32 _RGXGPUFreqGetIndex(RGX_GPU_DVFS_TABLE *psGpuDVFSTable, IMG_UINT32 ui32CoreClockSpeed) +{ + IMG_UINT32 *paui32GPUFrequencies = psGpuDVFSTable->aui32GPUFrequency; + IMG_UINT32 i; + + for (i = 0; i < RGX_GPU_DVFS_TABLE_SIZE; i++) + { + if (paui32GPUFrequencies[i] == ui32CoreClockSpeed) + { + return i; + } + + if (paui32GPUFrequencies[i] == 0) + { + paui32GPUFrequencies[i] = ui32CoreClockSpeed; + return i; + } + } + + i--; + + PVR_DPF((PVR_DBG_ERROR, "GPU frequency table in the driver is full! " + "Table size should be increased! Overriding last entry (%u) with %u", + paui32GPUFrequencies[i], ui32CoreClockSpeed)); + + paui32GPUFrequencies[i] = ui32CoreClockSpeed; + + return i; +} + +static void _RGXGPUFreqCalibrationPeriodStart(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_GPU_DVFS_TABLE *psGpuDVFSTable) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + GPU_FREQ_TRACKING_DATA *psTrackingData; + IMG_UINT32 ui32CoreClockSpeed, ui32Index; + + IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo); + IMG_UINT64 ui64OSTimestamp = RGXTimeCorrGetClockus64(); + + psGpuDVFSTable->ui64CalibrationCRTimestamp = ui64CRTimestamp; + psGpuDVFSTable->ui64CalibrationOSTimestamp = ui64OSTimestamp; + + ui32CoreClockSpeed = _RGXGetSystemLayerGPUClockSpeed(psDeviceNode); + ui32Index = _RGXGPUFreqGetIndex(psGpuDVFSTable, ui32CoreClockSpeed); + psTrackingData = &psGpuDVFSTable->asTrackingData[ui32Index]; + + /* Set the time needed to (re)calibrate the GPU frequency */ + if (psTrackingData->ui32CalibrationCount == 0) /* We never met this frequency */ + { + psTrackingData->ui32EstCoreClockSpeed = ui32CoreClockSpeed; + psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US; + } + else if (psTrackingData->ui32CalibrationCount == 1) /* We calibrated this frequency only once */ + { + psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US; + } + else + { + psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US; + } + + /* Update the index to the DVFS table */ + psGpuDVFSTable->ui32FreqIndex = ui32Index; + +#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) + /* Update tracking history */ + { + GPU_FREQ_TRACKING_HISTORY *psTrackingHistory; + + psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex]; + psTrackingHistory->ui32CoreClockSpeed = ui32CoreClockSpeed; + psTrackingHistory->ui32EstCoreClockSpeed = psTrackingData->ui32EstCoreClockSpeed; + psTrackingHistory->ui64BeginCRTimestamp = ui64CRTimestamp; + psTrackingHistory->ui64BeginOSTimestamp = ui64OSTimestamp; + psTrackingHistory->ui64EndCRTimestamp = 0ULL; + psTrackingHistory->ui64EndOSTimestamp = 0ULL; + } +#endif +} + +static void _RGXGPUFreqCalibrationPeriodStop(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_GPU_DVFS_TABLE *psGpuDVFSTable) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo); + IMG_UINT64 ui64OSTimestamp = RGXTimeCorrGetClockus64(); + + psGpuDVFSTable->ui64CalibrationCRTimediff = + ui64CRTimestamp - psGpuDVFSTable->ui64CalibrationCRTimestamp; + psGpuDVFSTable->ui64CalibrationOSTimediff = + ui64OSTimestamp - psGpuDVFSTable->ui64CalibrationOSTimestamp; + + /* Check if the current timer correlation data is good enough */ + _RGXCheckTimeCorrData(psDeviceNode, psGpuDVFSTable); + +#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) + /* Update tracking history */ + { + GPU_FREQ_TRACKING_HISTORY *psTrackingHistory; + + psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex]; + psTrackingHistory->ui64EndCRTimestamp = ui64CRTimestamp; + psTrackingHistory->ui64EndOSTimestamp = ui64OSTimestamp; + } +#endif +} + +static void _RGXGPUFreqCalibrationCalculate(PVRSRV_DEVICE_NODE *psDeviceNode, + RGX_GPU_DVFS_TABLE *psGpuDVFSTable, + RGXTIMECORR_EVENT eEvent) +{ +#if !defined(DISABLE_GPU_FREQUENCY_CALIBRATION) + GPU_FREQ_TRACKING_DATA *psTrackingData; + IMG_UINT32 ui32EstCoreClockSpeed, ui32PrevCoreClockSpeed; + IMG_INT32 i32Diff; + IMG_UINT32 ui32Remainder; + + /* + * Find out what the GPU frequency was in the last period. + * This should return a value very close to the frequency passed by the system layer. + */ + ui32EstCoreClockSpeed = + RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(psGpuDVFSTable->ui64CalibrationCRTimediff, + psGpuDVFSTable->ui64CalibrationOSTimediff, + ui32Remainder); + + /* Update GPU frequency used by the driver for a given system layer frequency */ + psTrackingData = &psGpuDVFSTable->asTrackingData[psGpuDVFSTable->ui32FreqIndex]; + + ui32PrevCoreClockSpeed = psTrackingData->ui32EstCoreClockSpeed; + psTrackingData->ui32EstCoreClockSpeed = ui32EstCoreClockSpeed; + psTrackingData->ui32CalibrationCount++; + + i32Diff = (IMG_INT32) (ui32EstCoreClockSpeed - ui32PrevCoreClockSpeed); + + if ((i32Diff < -1000000) || (i32Diff > 1000000)) + { + /* Warn if the frequency changed by more than 1 MHz between recalculations */ + PVR_DPF((PVR_DBG_WARNING, + "GPU frequency calibration of system layer frequency %u Hz (pre %s event): " + "more than 1 MHz difference between old and new value " + "(%u Hz -> %u Hz over %" IMG_UINT64_FMTSPEC " us)", + _RGXGetSystemLayerGPUClockSpeed(psDeviceNode), + _EventToString(eEvent), + RGXFWIF_ROUND_TO_KHZ(ui32PrevCoreClockSpeed), + RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed), + psGpuDVFSTable->ui64CalibrationOSTimediff)); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, + "GPU frequency calibration of system layer frequency %u Hz (pre %s event): " + "%u Hz -> %u Hz done over %" IMG_UINT64_FMTSPEC " us", + _RGXGetSystemLayerGPUClockSpeed(psDeviceNode), + _EventToString(eEvent), + RGXFWIF_ROUND_TO_KHZ(ui32PrevCoreClockSpeed), + RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed), + psGpuDVFSTable->ui64CalibrationOSTimediff)); + } + + /* Reset time deltas to avoid recalibrating the same frequency over and over again */ + psGpuDVFSTable->ui64CalibrationCRTimediff = 0; + psGpuDVFSTable->ui64CalibrationOSTimediff = 0; + +#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) + /* Update tracking history */ + { + GPU_FREQ_TRACKING_HISTORY *psTrackingHistory; + + psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex]; + psTrackingHistory->ui32EstCoreClockSpeed = ui32EstCoreClockSpeed; + psGpuDVFSTable->ui32HistoryIndex = + (psGpuDVFSTable->ui32HistoryIndex + 1) % RGX_GPU_FREQ_TRACKING_SIZE; + } +#endif + +#else + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(psGpuDVFSTable); + PVR_UNREFERENCED_PARAMETER(eEvent); +#endif +} + +void RGXTimeCorrBegin(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; + PVRSRV_VZ_RETN_IF_MODE(GUEST); + + _RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable); + _RGXMakeTimeCorrData(psDeviceNode, eEvent); +} + +void RGXTimeCorrEnd(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; + PVRSRV_VZ_RETN_IF_MODE(GUEST); + + _RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable); + + if (psGpuDVFSTable->ui64CalibrationOSTimediff >= psGpuDVFSTable->ui32CalibrationPeriod) + { + _RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable, eEvent); + } +} + +void RGXTimeCorrRestartPeriodic(IMG_HANDLE hDevHandle) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; + IMG_UINT64 ui64TimeNow = RGXTimeCorrGetClockus64(); + PVRSRV_DEV_POWER_STATE ePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT; + PVRSRV_VZ_RETN_IF_MODE(GUEST); + + if (psGpuDVFSTable == NULL) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: Required data not initialised yet", __func__)); + return; + } + + /* Check if it's the right time to recalibrate the GPU clock frequency */ + if ((ui64TimeNow - psGpuDVFSTable->ui64CalibrationOSTimestamp) < psGpuDVFSTable->ui32CalibrationPeriod) return; + + /* Try to acquire the powerlock, if not possible then don't wait */ + if (PVRSRVPowerTryLock(psDeviceNode) != PVRSRV_OK) return; + + /* If the GPU is off then we can't do anything */ + PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); + if (ePowerState != PVRSRV_DEV_POWER_STATE_ON) + { + PVRSRVPowerUnlock(psDeviceNode); + return; + } + + /* All checks passed, we can calibrate and correlate */ + RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_PERIODIC); + RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_PERIODIC); + + PVRSRVPowerUnlock(psDeviceNode); +} + +/* + RGXTimeCorrGetClockSource +*/ +RGXTIMECORR_CLOCK_TYPE RGXTimeCorrGetClockSource(void) +{ + return g_ui32ClockSource; +} + +/* + RGXTimeCorrSetClockSource +*/ +PVRSRV_ERROR RGXTimeCorrSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode, + RGXTIMECORR_CLOCK_TYPE eClockType) +{ + return _SetClock(psDeviceNode, NULL, eClockType); +} + +PVRSRV_ERROR +PVRSRVRGXCurrentTime(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT64 * pui64Time) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + *pui64Time = RGXTimeCorrGetClockns64(); + + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (rgxtimecorr.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxtimecorr.h b/drivers/gpu/drm/phytium/octopus/rgxtimecorr.h new file mode 100644 index 000000000000..b0bbca228214 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxtimecorr.h @@ -0,0 +1,269 @@ +/*************************************************************************/ /*! +@File +@Title RGX time correlation and calibration header file +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the RGX time correlation and calibration routines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXTIMECORR_H) +#define RGXTIMECORR_H + +#include "img_types.h" +#include "device.h" +#include "osfunc.h" +#include "connection_server.h" + +typedef enum +{ + RGXTIMECORR_CLOCK_MONO, + RGXTIMECORR_CLOCK_MONO_RAW, + RGXTIMECORR_CLOCK_SCHED, + + RGXTIMECORR_CLOCK_LAST +} RGXTIMECORR_CLOCK_TYPE; + +typedef enum +{ + RGXTIMECORR_EVENT_POWER, + RGXTIMECORR_EVENT_DVFS, + RGXTIMECORR_EVENT_PERIODIC, + RGXTIMECORR_EVENT_CLOCK_CHANGE +} RGXTIMECORR_EVENT; + +/* + * Calibrated GPU frequencies are rounded to the nearest multiple of 1 KHz + * before use, to reduce the noise introduced by calculations done with + * imperfect operands (correlated timers not sampled at exactly the same + * time, GPU CR timer incrementing only once every 256 GPU cycles). + * This also helps reducing the variation between consecutive calculations. + */ +#define RGXFWIF_CONVERT_TO_KHZ(freq) (((freq) + 500) / 1000) +#define RGXFWIF_ROUND_TO_KHZ(freq) ((((freq) + 500) / 1000) * 1000) + +/* Constants used in different calculations */ +#define SECONDS_TO_MICROSECONDS (1000000ULL) +#define CRTIME_TO_CYCLES_WITH_US_SCALE (RGX_CRTIME_TICK_IN_CYCLES * SECONDS_TO_MICROSECONDS) + +/* + * Use this macro to get a more realistic GPU core clock speed than the one + * given by the upper layers (used when doing GPU frequency calibration) + */ +#define RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(deltacr_us, deltaos_us, remainder) \ + OSDivide64((deltacr_us) * CRTIME_TO_CYCLES_WITH_US_SCALE, (deltaos_us), &(remainder)) + + +/*! +****************************************************************************** + + @Function RGXTimeCorrGetConversionFactor + + @Description Generate constant used to convert a GPU time difference into + an OS time difference (for more info see rgx_fwif_km.h). + + @Input ui32ClockSpeed : GPU clock speed + + @Return 0 on failure, conversion factor otherwise + +******************************************************************************/ +static inline IMG_UINT64 RGXTimeCorrGetConversionFactor(IMG_UINT32 ui32ClockSpeed) +{ + IMG_UINT32 ui32Remainder; + + if (RGXFWIF_CONVERT_TO_KHZ(ui32ClockSpeed) == 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: GPU clock frequency %u is too low", + __func__, ui32ClockSpeed)); + + return 0; + } + + return OSDivide64r64(CRTIME_TO_CYCLES_WITH_US_SCALE << RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT, + RGXFWIF_CONVERT_TO_KHZ(ui32ClockSpeed), &ui32Remainder); +} + +/*! +****************************************************************************** + + @Function RGXTimeCorrBegin + + @Description Generate new timer correlation data, and start tracking + the current GPU frequency. + + @Input hDevHandle : RGX Device Node + @Input eEvent : Event associated with the beginning of a timer + correlation period + + @Return void + +******************************************************************************/ +void RGXTimeCorrBegin(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent); + +/*! +****************************************************************************** + + @Function RGXTimeCorrEnd + + @Description Stop tracking the CPU and GPU timers, and if possible + recalculate the GPU frequency to a value which makes the timer + correlation data more accurate. + + @Input hDevHandle : RGX Device Node + @Input eEvent : Event associated with the end of a timer + correlation period + + @Return void + +******************************************************************************/ +void RGXTimeCorrEnd(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent); + +/*! +****************************************************************************** + + @Function RGXTimeCorrRestartPeriodic + + @Description Perform actions from RGXTimeCorrEnd and RGXTimeCorrBegin, + but only if enough time has passed since the last timer + correlation data was generated. + + @Input hDevHandle : RGX Device Node + + @Return void + +******************************************************************************/ +void RGXTimeCorrRestartPeriodic(IMG_HANDLE hDevHandle); + +/*! +****************************************************************************** + + @Function RGXTimeCorrGetClockns64 + + @Description Returns value of currently selected clock (in ns). + + @Return clock value from currently selected clock source + +******************************************************************************/ +IMG_UINT64 RGXTimeCorrGetClockns64(void); + +/*! +****************************************************************************** + + @Function RGXTimeCorrGetClockus64 + + @Description Returns value of currently selected clock (in us). + + @Return clock value from currently selected clock source + +******************************************************************************/ +IMG_UINT64 RGXTimeCorrGetClockus64(void); + +/*! +****************************************************************************** + + @Function RGXTimeCorrGetClockSource + + @Description Returns currently selected clock source + + @Return clock source type + +******************************************************************************/ +RGXTIMECORR_CLOCK_TYPE RGXTimeCorrGetClockSource(void); + +/*! +****************************************************************************** + + @Function RGXTimeCorrSetClockSource + + @Description Sets clock source for correlation data. + + @Input psDeviceNode : RGX Device Node + @Input eClockType : clock source type + + @Return error code + +******************************************************************************/ +PVRSRV_ERROR RGXTimeCorrSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode, + RGXTIMECORR_CLOCK_TYPE eClockType); + +/*! +****************************************************************************** + + @Function RGXTimeCorrInitAppHintCallbacks + + @Description Initialise apphint callbacks for timer correlation + related apphints. + + @Input psDeviceNode : RGX Device Node + + @Return void + +******************************************************************************/ +void RGXTimeCorrInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode); + +/*! +****************************************************************************** + + @Function RGXGetTimeCorrData + + @Description Get a number of the most recent time correlation data points + + @Input psDeviceNode : RGX Device Node + @Output psTimeCorrs : Output array of RGXFWIF_TIME_CORR elements + for data to be written to + @Input ui32NumOut : Number of elements to be written out + + @Return void + +******************************************************************************/ +void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, + RGXFWIF_TIME_CORR *psTimeCorrs, + IMG_UINT32 ui32NumOut); + +/**************************************************************************/ /*! +@Function PVRSRVRGXCurrentTime +@Description Returns the current state of the device timer +@Input psDevData Device data. +@Out pui64Time +@Return PVRSRV_OK on success. +*/ /***************************************************************************/ +PVRSRV_ERROR +PVRSRVRGXCurrentTime(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT64 * pui64Time); + +#endif /* RGXTIMECORR_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxtimerquery.c b/drivers/gpu/drm/phytium/octopus/rgxtimerquery.c new file mode 100644 index 000000000000..3f683eb301b7 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxtimerquery.c @@ -0,0 +1,243 @@ +/*************************************************************************/ /*! +@File +@Title RGX Timer queries +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description RGX Timer queries +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgxtimerquery.h" +#include "rgxdevice.h" +#include "rgxtimecorr.h" + +#include "rgxfwutils.h" +#include "pdump_km.h" + +PVRSRV_ERROR +PVRSRVRGXBeginTimerQueryKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32QueryId) +{ + PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (ui32QueryId >= RGX_MAX_TIMER_QUERIES) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#if !defined(PVRSRV_USE_BRIDGE_LOCK) + OSLockAcquire(psDevInfo->hTimerQueryLock); +#endif + + psDevInfo->bSaveStart = IMG_TRUE; + psDevInfo->bSaveEnd = IMG_TRUE; + + /* clear the stamps, in case there is no Kick */ + psDevInfo->pui64StartTimeById[ui32QueryId] = 0UL; + psDevInfo->pui64EndTimeById[ui32QueryId] = 0UL; + + /* save of the active query index */ + psDevInfo->ui32ActiveQueryId = ui32QueryId; + +#if !defined(PVRSRV_USE_BRIDGE_LOCK) + OSLockRelease(psDevInfo->hTimerQueryLock); +#endif + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +PVRSRVRGXEndTimerQueryKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode) +{ + PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + + PVR_UNREFERENCED_PARAMETER(psConnection); + +#if !defined(PVRSRV_USE_BRIDGE_LOCK) + OSLockAcquire(psDevInfo->hTimerQueryLock); +#endif + + /* clear off the flags set by Begin(). Note that _START_TIME is + * probably already cleared by Kick() + */ + psDevInfo->bSaveStart = IMG_FALSE; + psDevInfo->bSaveEnd = IMG_FALSE; + +#if !defined(PVRSRV_USE_BRIDGE_LOCK) + OSLockRelease(psDevInfo->hTimerQueryLock); +#endif + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +PVRSRVRGXQueryTimerKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32QueryId, + IMG_UINT64 * pui64StartTime, + IMG_UINT64 * pui64EndTime) +{ + PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; + IMG_UINT32 ui32Scheduled; + IMG_UINT32 ui32Completed; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (ui32QueryId >= RGX_MAX_TIMER_QUERIES) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#if !defined(PVRSRV_USE_BRIDGE_LOCK) + OSLockAcquire(psDevInfo->hTimerQueryLock); +#endif + + ui32Scheduled = psDevInfo->aui32ScheduledOnId[ui32QueryId]; + ui32Completed = psDevInfo->pui32CompletedById[ui32QueryId]; + + /* if there was no kick since the Begin() on this id we return 0-s as Begin cleared + * the stamps. If there was no begin the returned data is undefined - but still + * safe from services pov + */ + if (ui32Completed >= ui32Scheduled) + { + * pui64StartTime = psDevInfo->pui64StartTimeById[ui32QueryId]; + * pui64EndTime = psDevInfo->pui64EndTimeById[ui32QueryId]; + + eError = PVRSRV_OK; + } + else + { + eError = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + } + +#if !defined(PVRSRV_USE_BRIDGE_LOCK) + OSLockRelease(psDevInfo->hTimerQueryLock); +#endif + return eError; +} + + + +/****************************************************************************** + NOT BRIDGED/EXPORTED FUNCS +******************************************************************************/ +/* writes a time stamp command in the client CCB */ +void +RGXWriteTimestampCommand(void ** ppvPtr, + RGXFWIF_CCB_CMD_TYPE eCmdType, + PRGXFWIF_TIMESTAMP_ADDR pAddr) +{ + RGXFWIF_CCB_CMD_HEADER * psHeader; + PRGXFWIF_TIMESTAMP_ADDR * psTimestampAddr; + + psHeader = (RGXFWIF_CCB_CMD_HEADER *) (*ppvPtr); + + PVR_ASSERT(eCmdType == RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP + || eCmdType == RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP); + + psHeader->eCmdType = eCmdType; + psHeader->ui32CmdSize = (sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1); + + (*ppvPtr) = IMG_OFFSET_ADDR(*ppvPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); + + psTimestampAddr = (PRGXFWIF_TIMESTAMP_ADDR *) *ppvPtr; + psTimestampAddr->ui32Addr = pAddr.ui32Addr; + + (*ppvPtr) = IMG_OFFSET_ADDR(*ppvPtr, psHeader->ui32CmdSize); +} + + +void +RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO * psDevInfo, + PRGXFWIF_TIMESTAMP_ADDR * ppPreAddr, + PRGXFWIF_TIMESTAMP_ADDR * ppPostAddr, + PRGXFWIF_UFO_ADDR * ppUpdate) +{ + if (ppPreAddr != NULL) + { + if (psDevInfo->bSaveStart) + { + /* drop the SaveStart on the first Kick */ + psDevInfo->bSaveStart = IMG_FALSE; + + RGXSetFirmwareAddress(ppPreAddr, + psDevInfo->psStartTimeMemDesc, + sizeof(IMG_UINT64) * psDevInfo->ui32ActiveQueryId, + RFW_FWADDR_NOREF_FLAG); + } + else + { + ppPreAddr->ui32Addr = 0; + } + } + + if (ppPostAddr != NULL && ppUpdate != NULL) + { + if (psDevInfo->bSaveEnd) + { + RGXSetFirmwareAddress(ppPostAddr, + psDevInfo->psEndTimeMemDesc, + sizeof(IMG_UINT64) * psDevInfo->ui32ActiveQueryId, + RFW_FWADDR_NOREF_FLAG); + + psDevInfo->aui32ScheduledOnId[psDevInfo->ui32ActiveQueryId]++; + + RGXSetFirmwareAddress(ppUpdate, + psDevInfo->psCompletedMemDesc, + sizeof(IMG_UINT32) * psDevInfo->ui32ActiveQueryId, + RFW_FWADDR_NOREF_FLAG); + } + else + { + ppUpdate->ui32Addr = 0; + ppPostAddr->ui32Addr = 0; + } + } +} + + +/****************************************************************************** + End of file (rgxtimerquery.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxtimerquery.h b/drivers/gpu/drm/phytium/octopus/rgxtimerquery.h new file mode 100644 index 000000000000..b7151a42ec4a --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxtimerquery.h @@ -0,0 +1,123 @@ +/*************************************************************************/ /*! +@File +@Title RGX Timer queries +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the RGX Timer queries functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGX_TIMERQUERIES_H) +#define RGX_TIMERQUERIES_H + +#include "pvrsrv_error.h" +#include "img_types.h" +#include "device.h" +#include "rgxdevice.h" + +#include "connection_server.h" + +/*************************************************************************/ /*! +@Function PVRSRVRGXBeginTimerQueryKM +@Description Opens a new timer query. + +@Input ui32QueryId an identifier between [ 0 and RGX_MAX_TIMER_QUERIES - 1 ] +@Return PVRSRV_OK on success. +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVRGXBeginTimerQueryKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32QueryId); + + +/*************************************************************************/ /*! +@Function PVRSRVRGXEndTimerQueryKM +@Description Closes a timer query + + The lack of ui32QueryId argument expresses the fact that there + can't be overlapping queries open. +@Return PVRSRV_OK on success. +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVRGXEndTimerQueryKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode); + + + +/*************************************************************************/ /*! +@Function PVRSRVRGXQueryTimerKM +@Description Queries the state of the specified timer + +@Input ui32QueryId an identifier between [ 0 and RGX_MAX_TIMER_QUERIES - 1 ] +@Out pui64StartTime +@Out pui64EndTime +@Return PVRSRV_OK on success. + PVRSRV_ERROR_RESOURCE_UNAVAILABLE if the device is still busy with + operations from the queried period + other error code otherwise +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVRGXQueryTimerKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32QueryId, + IMG_UINT64 * pui64StartTime, + IMG_UINT64 * pui64EndTime); + + + +/****************************************************************************** + NON BRIDGED/EXPORTED interface +******************************************************************************/ + +/* write the timestamp cmd from the helper*/ +void +RGXWriteTimestampCommand(void ** ppvCmd, + RGXFWIF_CCB_CMD_TYPE eCmdType, + PRGXFWIF_TIMESTAMP_ADDR pAddr); + +/* get the relevant data from the Kick to the helper*/ +void +RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO * psDevInfo, + PRGXFWIF_TIMESTAMP_ADDR * ppPreAddr, + PRGXFWIF_TIMESTAMP_ADDR * ppPostAddr, + PRGXFWIF_UFO_ADDR * ppUpdate); + +#endif /* RGX_TIMERQUERIES_H */ + +/****************************************************************************** + End of file (rgxtimerquery.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxtransfer.c b/drivers/gpu/drm/phytium/octopus/rgxtransfer.c new file mode 100644 index 000000000000..02fa430612c1 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxtransfer.c @@ -0,0 +1,1712 @@ +/*************************************************************************/ /*! +@File +@Title Device specific transfer queue routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pdump_km.h" +#include "rgxdevice.h" +#include "rgxccb.h" +#include "rgxutils.h" +#include "rgxfwutils.h" +#include "rgxtransfer.h" +#include "rgx_tq_shared.h" +#include "rgxmem.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "osfunc.h" +#include "pvr_debug.h" +#include "pvrsrv.h" +#include "rgx_fwif_resetframework.h" +#include "rgx_memallocflags.h" +#include "rgxhwperf.h" +#include "ospvr_gputrace.h" +#include "htbuffer.h" +#include "rgxshader.h" + +#include "pdump_km.h" + +#include "sync_server.h" +#include "sync_internal.h" +#include "sync.h" +#include "rgx_bvnc_defs_km.h" + +#if defined(SUPPORT_BUFFER_SYNC) +#include "pvr_buffer_sync.h" +#endif + +#include "sync_checkpoint.h" +#include "sync_checkpoint_internal.h" + +#include "rgxtimerquery.h" + +/* Enable this to dump the compiled list of UFOs prior to kick call */ +#define ENABLE_TQ_UFO_DUMP 0 + +//#define TRANSFER_CHECKPOINT_DEBUG 1 + +#if defined(TRANSFER_CHECKPOINT_DEBUG) +#define CHKPT_DBG(X) PVR_DPF(X) +#else +#define CHKPT_DBG(X) +#endif + +typedef struct { + DEVMEM_MEMDESC *psFWContextStateMemDesc; + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + IMG_UINT32 ui32Priority; +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_context *psBufferSyncContext; +#endif +} RGX_SERVER_TQ_3D_DATA; + +typedef struct { + RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; + IMG_UINT32 ui32Priority; +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_context *psBufferSyncContext; +#endif +} RGX_SERVER_TQ_2D_DATA; + +struct _RGX_SERVER_TQ_CONTEXT_ { + PVRSRV_DEVICE_NODE *psDeviceNode; + DEVMEM_MEMDESC *psFWFrameworkMemDesc; + DEVMEM_MEMDESC *psFWTransferContextMemDesc; + IMG_UINT32 ui32Flags; +#define RGX_SERVER_TQ_CONTEXT_FLAGS_2D (1<<0) +#define RGX_SERVER_TQ_CONTEXT_FLAGS_3D (1<<1) + RGX_SERVER_TQ_3D_DATA s3DData; + RGX_SERVER_TQ_2D_DATA s2DData; + DLLIST_NODE sListNode; + ATOMIC_T hIntJobRef; + IMG_UINT32 ui32PDumpFlags; + /* per-prepare sync address lists */ + SYNC_ADDR_LIST asSyncAddrListFence[TQ_MAX_PREPARES_PER_SUBMIT]; + SYNC_ADDR_LIST asSyncAddrListUpdate[TQ_MAX_PREPARES_PER_SUBMIT]; + POS_LOCK hLock; +}; + +/* + Static functions used by transfer context code +*/ +static PVRSRV_ERROR _Create3DTransferContext(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psAllocatedMemDesc, + IMG_UINT32 ui32AllocatedOffset, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + IMG_UINT32 ui32Priority, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_TQ_3D_DATA *ps3DData, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags) +{ + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + IMG_UINT ui3DRegISPStateStoreSize = 0; + IMG_UINT uiNumISPStoreRegs = 1; /* default value 1 expected */ + /* + Allocate device memory for the firmware GPU context suspend state. + Note: the FW reads/writes the state to memory by accessing the GPU register interface. + */ + PDUMPCOMMENT("Allocate RGX firmware TQ/3D context suspend state"); + + if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) + { + uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, + RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX); + } + + /* Calculate the size of the 3DCTX ISP state */ + ui3DRegISPStateStoreSize = sizeof(RGXFWIF_3DCTX_STATE) + + uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0]); + +#if defined(SUPPORT_BUFFER_SYNC) + ps3DData->psBufferSyncContext = + pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, + "rogue-tq3d"); + if (IS_ERR(ps3DData->psBufferSyncContext)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to create buffer_sync context (err=%ld)", + __func__, PTR_ERR(ps3DData->psBufferSyncContext))); + + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail_buffer_sync_context_create; + } +#endif + + eError = DevmemFwAllocate(psDevInfo, + ui3DRegISPStateStoreSize, + RGX_FWCOMCTX_ALLOCFLAGS, + "FwTQ3DContext", + &ps3DData->psFWContextStateMemDesc); + if (eError != PVRSRV_OK) + { + goto fail_contextswitchstate; + } + + eError = FWCommonContextAllocate(psConnection, + psDeviceNode, + REQ_TYPE_TQ_3D, + RGXFWIF_DM_3D, + NULL, + psAllocatedMemDesc, + ui32AllocatedOffset, + psFWMemContextMemDesc, + ps3DData->psFWContextStateMemDesc, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TQ3D_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TQ3D_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + ui32Priority, + UINT_MAX, /* max deadline MS */ + 0, /* robustness address */ + psInfo, + &ps3DData->psServerCommonContext); + if (eError != PVRSRV_OK) + { + goto fail_contextalloc; + } + + + PDUMPCOMMENT("Dump 3D context suspend state buffer"); + DevmemPDumpLoadMem(ps3DData->psFWContextStateMemDesc, 0, sizeof(RGXFWIF_3DCTX_STATE), PDUMP_FLAGS_CONTINUOUS); + + ps3DData->ui32Priority = ui32Priority; + return PVRSRV_OK; + +fail_contextalloc: + DevmemFwUnmapAndFree(psDevInfo, ps3DData->psFWContextStateMemDesc); +fail_contextswitchstate: +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_context_destroy(ps3DData->psBufferSyncContext); + ps3DData->psBufferSyncContext = NULL; +fail_buffer_sync_context_create: +#endif + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +static PVRSRV_ERROR _Create2DTransferContext(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + DEVMEM_MEMDESC *psFWMemContextMemDesc, + IMG_UINT32 ui32Priority, + RGX_COMMON_CONTEXT_INFO *psInfo, + RGX_SERVER_TQ_2D_DATA *ps2DData, + IMG_UINT32 ui32CCBAllocSizeLog2, + IMG_UINT32 ui32CCBMaxAllocSizeLog2, + IMG_UINT32 ui32ContextFlags) +{ + PVRSRV_ERROR eError; + +#if defined(SUPPORT_BUFFER_SYNC) + ps2DData->psBufferSyncContext = + pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, + "rogue-tqtla"); + if (IS_ERR(ps2DData->psBufferSyncContext)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to create buffer_sync context (err=%ld)", + __func__, PTR_ERR(ps2DData->psBufferSyncContext))); + + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail_buffer_sync_context_create; + } +#endif + + eError = FWCommonContextAllocate(psConnection, + psDeviceNode, + REQ_TYPE_TQ_2D, + RGXFWIF_DM_2D, + NULL, + NULL, + 0, + psFWMemContextMemDesc, + NULL, + ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TQ2D_CCB_SIZE_LOG2, + ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TQ2D_CCB_MAX_SIZE_LOG2, + ui32ContextFlags, + ui32Priority, + UINT_MAX, /* max deadline MS */ + 0, /* robustness address */ + psInfo, + &ps2DData->psServerCommonContext); + if (eError != PVRSRV_OK) + { + goto fail_contextalloc; + } + + ps2DData->ui32Priority = ui32Priority; + return PVRSRV_OK; + +fail_contextalloc: +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_context_destroy(ps2DData->psBufferSyncContext); + ps2DData->psBufferSyncContext = NULL; +fail_buffer_sync_context_create: +#endif + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + + +static PVRSRV_ERROR _Destroy2DTransferContext(RGX_SERVER_TQ_2D_DATA *ps2DData, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, + ps2DData->psServerCommonContext, + RGXFWIF_DM_2D, + ui32PDumpFlags); + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free it's resources */ + FWCommonContextFree(ps2DData->psServerCommonContext); + ps2DData->psServerCommonContext = NULL; + +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_context_destroy(ps2DData->psBufferSyncContext); + ps2DData->psBufferSyncContext = NULL; +#endif + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _Destroy3DTransferContext(RGX_SERVER_TQ_3D_DATA *ps3DData, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eError; + + /* Check if the FW has finished with this resource ... */ + eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, + ps3DData->psServerCommonContext, + RGXFWIF_DM_3D, + ui32PDumpFlags); + if (eError == PVRSRV_ERROR_RETRY) + { + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", + __func__, + PVRSRVGetErrorString(eError))); + return eError; + } + + /* ... it has so we can free it's resources */ + DevmemFwUnmapAndFree(psDeviceNode->pvDevice, ps3DData->psFWContextStateMemDesc); + FWCommonContextFree(ps3DData->psServerCommonContext); + ps3DData->psServerCommonContext = NULL; + +#if defined(SUPPORT_BUFFER_SYNC) + pvr_buffer_sync_context_destroy(ps3DData->psBufferSyncContext); + ps3DData->psBufferSyncContext = NULL; +#endif + + return PVRSRV_OK; +} + + +/* + * PVRSRVCreateTransferContextKM + */ +PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32FrameworkCommandSize, + IMG_PBYTE pabyFrameworkCommand, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32PackedCCBSizeU8888, + IMG_UINT32 ui32ContextFlags, + RGX_SERVER_TQ_CONTEXT **ppsTransferContext, + PMR **ppsCLIPMRMem, + PMR **ppsUSCPMRMem) +{ + RGX_SERVER_TQ_CONTEXT *psTransferContext; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); + RGX_COMMON_CONTEXT_INFO sInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Allocate the server side structure */ + *ppsTransferContext = NULL; + psTransferContext = OSAllocZMem(sizeof(*psTransferContext)); + if (psTransferContext == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* + Create the FW transfer context, this has the TQ common + context embedded within it + */ + eError = DevmemFwAllocate(psDevInfo, + sizeof(RGXFWIF_FWTRANSFERCONTEXT), + RGX_FWCOMCTX_ALLOCFLAGS, + "FwTransferContext", + &psTransferContext->psFWTransferContextMemDesc); + if (eError != PVRSRV_OK) + { + goto fail_fwtransfercontext; + } + + eError = OSLockCreate(&psTransferContext->hLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_createlock; + } + + psTransferContext->psDeviceNode = psDeviceNode; + + if (ui32FrameworkCommandSize) + { + /* + * Create the FW framework buffer + */ + eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, + &psTransferContext->psFWFrameworkMemDesc, + ui32FrameworkCommandSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to allocate firmware GPU framework state (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_frameworkcreate; + } + + /* Copy the Framework client data into the framework buffer */ + eError = PVRSRVRGXFrameworkCopyCommand(psTransferContext->psFWFrameworkMemDesc, + pabyFrameworkCommand, + ui32FrameworkCommandSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to populate the framework buffer (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_frameworkcopy; + } + + sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc; + } + + eError = _Create3DTransferContext(psConnection, + psDeviceNode, + psTransferContext->psFWTransferContextMemDesc, + offsetof(RGXFWIF_FWTRANSFERCONTEXT, sTQContext), + psFWMemContextMemDesc, + ui32Priority, + &sInfo, + &psTransferContext->s3DData, + U32toU8_Unpack3(ui32PackedCCBSizeU8888), + U32toU8_Unpack4(ui32PackedCCBSizeU8888), + ui32ContextFlags); + if (eError != PVRSRV_OK) + { + goto fail_3dtransfercontext; + } + psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_3D; + + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)) + { + eError = _Create2DTransferContext(psConnection, + psDeviceNode, + psFWMemContextMemDesc, + ui32Priority, + &sInfo, + &psTransferContext->s2DData, + U32toU8_Unpack1(ui32PackedCCBSizeU8888), + U32toU8_Unpack2(ui32PackedCCBSizeU8888), + ui32ContextFlags); + if (eError != PVRSRV_OK) + { + goto fail_2dtransfercontext; + } + psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_2D; + } + + PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem); + + { + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock); + dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock); + *ppsTransferContext = psTransferContext; + } + + return PVRSRV_OK; + +fail_2dtransfercontext: + _Destroy3DTransferContext(&psTransferContext->s3DData, + psTransferContext->psDeviceNode, + psTransferContext->ui32PDumpFlags); +fail_3dtransfercontext: +fail_frameworkcopy: + if (psTransferContext->psFWFrameworkMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc); + } +fail_frameworkcreate: + OSLockDestroy(psTransferContext->hLock); +fail_createlock: + DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc); +fail_fwtransfercontext: + OSFreeMem(psTransferContext); + PVR_ASSERT(eError != PVRSRV_OK); + *ppsTransferContext = NULL; + return eError; +} + +PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice; + IMG_UINT32 i; + + /* remove node from list before calling destroy - as destroy, if successful + * will invalidate the node + * must be re-added if destroy fails + */ + OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock); + dllist_remove_node(&(psTransferContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock); + + if ((psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) + { + eError = _Destroy2DTransferContext(&psTransferContext->s2DData, + psTransferContext->psDeviceNode, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + goto fail_destroy2d; + } + /* We've freed the 2D context, don't try to free it again */ + psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_2D; + } + + if (psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D) + { + eError = _Destroy3DTransferContext(&psTransferContext->s3DData, + psTransferContext->psDeviceNode, + PDUMP_FLAGS_CONTINUOUS); + if (eError != PVRSRV_OK) + { + goto fail_destroy3d; + } + /* We've freed the 3D context, don't try to free it again */ + psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_3D; + } + + /* free any resources within the per-prepare UFO address stores */ + for (i = 0; i < TQ_MAX_PREPARES_PER_SUBMIT; i++) + { + SyncAddrListDeinit(&psTransferContext->asSyncAddrListFence[i]); + SyncAddrListDeinit(&psTransferContext->asSyncAddrListUpdate[i]); + } + + if (psTransferContext->psFWFrameworkMemDesc) + { + DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc); + } + + DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc); + + OSLockDestroy(psTransferContext->hLock); + + OSFreeMem(psTransferContext); + + return PVRSRV_OK; + +fail_destroy3d: + +fail_destroy2d: + OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock); + dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode)); + OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock); + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/* + * PVRSRVSubmitTQ3DKickKM + */ +PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32PrepareCount, + IMG_UINT32 *paui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ***papauiClientUpdateUFODevVarBlock, + IMG_UINT32 **papaui32ClientUpdateSyncOffset, + IMG_UINT32 **papaui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE i2DUpdateTimeline, + PVRSRV_FENCE *pi2DUpdateFence, + PVRSRV_TIMELINE i3DUpdateTimeline, + PVRSRV_FENCE *pi3DUpdateFence, + IMG_CHAR szFenceName[32], + IMG_UINT32 *paui32FWCommandSize, + IMG_UINT8 **papaui8FWCommand, + IMG_UINT32 *pui32TQPrepareFlags, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 *paui32SyncPMRFlags, + PMR **ppsSyncPMRs) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode; + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelper; + RGX_CCB_CMD_HELPER_DATA *pas2DCmdHelper; + IMG_UINT32 ui323DCmdCount = 0; + IMG_UINT32 ui322DCmdCount = 0; + IMG_UINT32 ui323DCmdLast = 0; + IMG_UINT32 ui322DCmdLast = 0; + IMG_UINT32 ui323DCmdOffset = 0; + IMG_UINT32 ui322DCmdOffset = 0; + IMG_UINT32 ui32PDumpFlags = PDUMP_FLAGS_NONE; + IMG_UINT32 i; + IMG_UINT64 uiCheckFenceUID = 0; + IMG_UINT64 ui2DUpdateFenceUID = 0; + IMG_UINT64 ui3DUpdateFenceUID = 0; + + PSYNC_CHECKPOINT ps2DUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT ps3DUpdateSyncCheckpoint = NULL; + PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32FenceSyncCheckpointCount = 0; + IMG_UINT32 *pui322DIntAllocatedUpdateValues = NULL; + IMG_UINT32 *pui323DIntAllocatedUpdateValues = NULL; + PVRSRV_CLIENT_SYNC_PRIM *ps2DFenceTimelineUpdateSync = NULL; + PVRSRV_CLIENT_SYNC_PRIM *ps3DFenceTimelineUpdateSync = NULL; + IMG_UINT32 ui322DFenceTimelineUpdateValue = 0; + IMG_UINT32 ui323DFenceTimelineUpdateValue = 0; + void *pv2DUpdateFenceFinaliseData = NULL; + void *pv3DUpdateFenceFinaliseData = NULL; +#if defined(SUPPORT_BUFFER_SYNC) + PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL; + struct pvr_buffer_sync_append_data *psBufferSyncData = NULL; + PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; + IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0; +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_ERROR eError2; + PVRSRV_FENCE i2DUpdateFence = PVRSRV_NO_FENCE; + PVRSRV_FENCE i3DUpdateFence = PVRSRV_NO_FENCE; + IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); + IMG_UINT32 ui32PreparesDone = 0; + + + PRGXFWIF_TIMESTAMP_ADDR pPreAddr; + PRGXFWIF_TIMESTAMP_ADDR pPostAddr; + PRGXFWIF_UFO_ADDR pRMWUFOAddr; + + RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psDeviceNode->pvDevice, + &pPreAddr, + &pPostAddr, + &pRMWUFOAddr); + + if (i2DUpdateTimeline != PVRSRV_NO_TIMELINE && !pi2DUpdateFence) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + if (i3DUpdateTimeline != PVRSRV_NO_TIMELINE && !pi3DUpdateFence) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Validate sync prim fence/update value ptrs + * for each prepare. + */ + { + IMG_UINT32 ui32Prepare; + IMG_UINT32 *pui32UpdateCount = paui32ClientUpdateCount; + IMG_UINT32 **papui32UpdateValue = papaui32ClientUpdateValue; + + /* Check that we have not been given a null ptr for + * update count parameters. + */ + PVR_LOG_RETURN_IF_FALSE((paui32ClientUpdateCount != NULL), + "paui32ClientUpdateCount NULL", + PVRSRV_ERROR_INVALID_PARAMS); + + for (ui32Prepare=0; ui32Prepare 0) + { + PVR_LOG_RETURN_IF_FALSE(*papui32UpdateValue != NULL, + "paui32ClientUpdateValue NULL but " + "ui32ClientUpdateCount > 0", + PVRSRV_ERROR_INVALID_PARAMS); + } + /* Advance local ptr to update values ptr for next prepare. */ + papui32UpdateValue++; + /* Advance local ptr to update count for next prepare. */ + pui32UpdateCount++; + } + } + + /* Ensure the string is null-terminated (Required for safety) */ + szFenceName[31] = '\0'; + + if ((ui32PrepareCount == 0) || (ui32PrepareCount > TQ_MAX_PREPARES_PER_SUBMIT)) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32SyncPMRCount != 0) + { + if (!ppsSyncPMRs) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#if defined(SUPPORT_BUFFER_SYNC) + /* PMR sync is valid only when there is no batching */ + if ((ui32PrepareCount != 1)) +#endif + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + OSLockAcquire(psTransferContext->hLock); + + /* We can't allocate the required amount of stack space on all consumer architectures */ + pas3DCmdHelper = OSAllocMem(sizeof(*pas3DCmdHelper) * ui32PrepareCount); + if (pas3DCmdHelper == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc3dhelper; + } + pas2DCmdHelper = OSAllocMem(sizeof(*pas2DCmdHelper) * ui32PrepareCount); + if (pas2DCmdHelper == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc2dhelper; + } + + if (iCheckFence != PVRSRV_NO_FENCE) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psDeviceNode->hSyncCheckpointContext)); + /* Resolve the sync checkpoints that make up the input fence */ + eError = SyncCheckpointResolveFence(psDeviceNode->hSyncCheckpointContext, + iCheckFence, + &ui32FenceSyncCheckpointCount, + &apsFenceSyncCheckpoints, + &uiCheckFenceUID, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError)); + goto fail_resolve_fencesync_input_fence; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints)); +#if defined(TRANSFER_CHECKPOINT_DEBUG) + if (ui32FenceSyncCheckpointCount > 0) + { + IMG_UINT32 ii; + for (ii=0; ii", __func__, ii, (void*)psNextCheckpoint)); + } + } +#endif + } + /* + Ensure we do the right thing for server syncs which cross call boundaries + */ + for (i=0;iasSyncAddrListFence[i]; + SYNC_ADDR_LIST *psSyncAddrListUpdate = &psTransferContext->asSyncAddrListUpdate[i]; + IMG_UINT32 ui32IntClientFenceCount = 0U; + IMG_UINT32 ui32IntClientUpdateCount = paui32ClientUpdateCount[i]; + IMG_UINT32 *paui32IntUpdateValue = papaui32ClientUpdateValue[i]; +#if defined(SUPPORT_BUFFER_SYNC) + struct pvr_buffer_sync_context *psBufferSyncContext; +#endif + + PVRSRV_FENCE *piUpdateFence = NULL; + PVRSRV_TIMELINE iUpdateTimeline = PVRSRV_NO_TIMELINE; + void **ppvUpdateFenceFinaliseData = NULL; + PSYNC_CHECKPOINT * ppsUpdateSyncCheckpoint = NULL; + PVRSRV_CLIENT_SYNC_PRIM **ppsFenceTimelineUpdateSync = NULL; + IMG_UINT32 *pui32FenceTimelineUpdateValue = NULL; + IMG_UINT32 **ppui32IntAllocatedUpdateValues = NULL; + IMG_BOOL bCheckFence = IMG_FALSE; + IMG_BOOL bUpdateFence = IMG_FALSE; + IMG_UINT64 *puiUpdateFenceUID = NULL; + + IMG_BOOL bCCBStateOpen = IMG_FALSE; + + if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 3D)) + { + psServerCommonCtx = psTransferContext->s3DData.psServerCommonContext; + psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx); + pszCommandName = "TQ-3D"; + psCmdHelper = &pas3DCmdHelper[ui323DCmdCount++]; + eType = RGXFWIF_CCB_CMD_TYPE_TQ_3D; +#if defined(SUPPORT_BUFFER_SYNC) + psBufferSyncContext = psTransferContext->s3DData.psBufferSyncContext; +#endif + bCheckFence = ui323DCmdCount == 1; + bUpdateFence = ui323DCmdCount == ui323DCmdLast + && i3DUpdateTimeline != PVRSRV_NO_TIMELINE; + + if (bUpdateFence) + { + piUpdateFence = &i3DUpdateFence; + iUpdateTimeline = i3DUpdateTimeline; + ppvUpdateFenceFinaliseData = &pv3DUpdateFenceFinaliseData; + ppsUpdateSyncCheckpoint = &ps3DUpdateSyncCheckpoint; + ppsFenceTimelineUpdateSync = &ps3DFenceTimelineUpdateSync; + pui32FenceTimelineUpdateValue = &ui323DFenceTimelineUpdateValue; + ppui32IntAllocatedUpdateValues = &pui323DIntAllocatedUpdateValues; + puiUpdateFenceUID = &ui3DUpdateFenceUID; + } + } + else if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 2D) && + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) + { + psServerCommonCtx = psTransferContext->s2DData.psServerCommonContext; + psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx); + pszCommandName = "TQ-2D"; + psCmdHelper = &pas2DCmdHelper[ui322DCmdCount++]; + eType = RGXFWIF_CCB_CMD_TYPE_TQ_2D; +#if defined(SUPPORT_BUFFER_SYNC) + psBufferSyncContext = psTransferContext->s2DData.psBufferSyncContext; +#endif + bCheckFence = ui322DCmdCount == 1; + bUpdateFence = ui322DCmdCount == ui322DCmdLast + && i2DUpdateTimeline != PVRSRV_NO_TIMELINE; + + if (bUpdateFence) + { + piUpdateFence = &i2DUpdateFence; + iUpdateTimeline = i2DUpdateTimeline; + ppvUpdateFenceFinaliseData = &pv2DUpdateFenceFinaliseData; + ppsUpdateSyncCheckpoint = &ps2DUpdateSyncCheckpoint; + ppsFenceTimelineUpdateSync = &ps2DFenceTimelineUpdateSync; + pui32FenceTimelineUpdateValue = &ui322DFenceTimelineUpdateValue; + ppui32IntAllocatedUpdateValues = &pui322DIntAllocatedUpdateValues; + puiUpdateFenceUID = &ui2DUpdateFenceUID; + } + } + else + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto fail_prepare_loop; + } + + if (i == 0) + { + ui32PDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE; + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, + "%s Command Server Submit on FWCtx %08x", pszCommandName, FWCommonContextGetFWAddress(psServerCommonCtx).ui32Addr); + psTransferContext->ui32PDumpFlags |= ui32PDumpFlags; + } + else + { + IMG_UINT32 ui32NewPDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE; + if (ui32NewPDumpFlags != ui32PDumpFlags) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_ERROR, "%s: Mixing of continuous and non-continuous command in a batch is not permitted", __func__)); + goto fail_prepare_loop; + } + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psTransferContext->sSyncAddrListFence, %d fences)", __func__, ui32IntClientFenceCount)); + eError = SyncAddrListPopulate(psSyncAddrListFence, + 0, + NULL, + NULL); + if (eError != PVRSRV_OK) + { + goto fail_prepare_loop; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psTransferContext->asSyncAddrListUpdate[], %d updates)", __func__, ui32IntClientUpdateCount)); + eError = SyncAddrListPopulate(psSyncAddrListUpdate, + ui32IntClientUpdateCount, + papauiClientUpdateUFODevVarBlock[i], + papaui32ClientUpdateSyncOffset[i]); + if (eError != PVRSRV_OK) + { + goto fail_prepare_loop; + } + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: (after sync prims) ui32IntClientUpdateCount=%d", __func__, ui32IntClientUpdateCount)); + if (ui32SyncPMRCount) + { +#if defined(SUPPORT_BUFFER_SYNC) + int err; + + CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling pvr_buffer_sync_resolve_and_create_fences", __func__)); + err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext, + psTransferContext->psDeviceNode->hSyncCheckpointContext, + ui32SyncPMRCount, + ppsSyncPMRs, + paui32SyncPMRFlags, + &ui32BufferFenceSyncCheckpointCount, + &apsBufferFenceSyncCheckpoints, + &psBufferUpdateSyncCheckpoint, + &psBufferSyncData); + if (err) + { + switch (err) + { + case -EINTR: + eError = PVRSRV_ERROR_RETRY; + break; + case -ENOMEM: + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + break; + default: + eError = PVRSRV_ERROR_INVALID_PARAMS; + break; + } + + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "%s: pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorString(eError))); + } + goto fail_resolve_buffersync_input_fence; + } + + /* Append buffer sync fences */ + if (ui32BufferFenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)psSyncAddrListFence , (void*)pauiIntFenceUFOAddress)); + SyncAddrListAppendAndDeRefCheckpoints(psSyncAddrListFence, + ui32BufferFenceSyncCheckpointCount, + apsBufferFenceSyncCheckpoints); + if (!pauiIntFenceUFOAddress) + { + pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs; + } + ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount; + } + + if (psBufferUpdateSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 buffer sync checkpoint<%p> to TQ Update (&psTransferContext->asSyncAddrListUpdate[i]=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)psBufferUpdateSyncCheckpoint, (void*)psSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress)); + /* Append the update (from output fence) */ + SyncAddrListAppendCheckpoints(psSyncAddrListUpdate, + 1, + &psBufferUpdateSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs; + } + ui32IntClientUpdateCount++; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: (after buffer_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount)); +#else /* defined(SUPPORT_BUFFER_SYNC) */ + PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount)); + PVR_DPF((PVR_DBG_ERROR, "%s: <--EXIT(%d)", __func__, PVRSRV_ERROR_INVALID_PARAMS)); + OSLockRelease(psTransferContext->hLock); + return PVRSRV_ERROR_INVALID_PARAMS; +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + } + + /* Create the output fence (if required) */ + if (bUpdateFence) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (piUpdateFence=%p, iUpdateTimeline=%d, psTranserContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __func__, piUpdateFence, iUpdateTimeline, (void*)psDeviceNode->hSyncCheckpointContext)); + eError = SyncCheckpointCreateFence(psDeviceNode, + szFenceName, + iUpdateTimeline, + psDeviceNode->hSyncCheckpointContext, + piUpdateFence, + puiUpdateFenceUID, + ppvUpdateFenceFinaliseData, + ppsUpdateSyncCheckpoint, + (void*)ppsFenceTimelineUpdateSync, + pui32FenceTimelineUpdateValue, + ui32PDumpFlags); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: SyncCheckpointCreateFence failed (%s)", + __func__, + PVRSRVGetErrorString(eError))); + goto fail_prepare_loop; + } + + CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence (piUpdateFence=%p)", __func__, piUpdateFence)); + + /* Append the sync prim update for the timeline (if required) */ + if (*ppsFenceTimelineUpdateSync) + { + IMG_UINT32 *pui32TimelineUpdateWp = NULL; + + /* Allocate memory to hold the list of update values (including our timeline update) */ + *ppui32IntAllocatedUpdateValues = OSAllocMem(sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); + if (!*ppui32IntAllocatedUpdateValues) + { + /* Failed to allocate memory */ + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_prepare_loop; + } + OSCachedMemSet(*ppui32IntAllocatedUpdateValues, 0xbb, sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferUpdateSyncCheckpoint) + { + /* Copy the update values into the new memory, then append our timeline update value */ + OSCachedMemCopy(*ppui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount-1)); + pui32TimelineUpdateWp = *ppui32IntAllocatedUpdateValues + (ui32IntClientUpdateCount-1); + } + else +#endif + { + /* Copy the update values into the new memory, then append our timeline update value */ + OSCachedMemCopy(*ppui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(**ppui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount); + pui32TimelineUpdateWp = *ppui32IntAllocatedUpdateValues + ui32IntClientUpdateCount; + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: Appending the additional update value 0x%x)", __func__, *pui32FenceTimelineUpdateValue)); + /* Now set the additional update value */ + *pui32TimelineUpdateWp = *pui32FenceTimelineUpdateValue; +#if defined(TRANSFER_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)*ppui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Now append the timeline sync prim addr to the transfer context update list */ + SyncAddrListAppendSyncPrim(psSyncAddrListUpdate, + *ppsFenceTimelineUpdateSync); + ui32IntClientUpdateCount++; +#if defined(TRANSFER_CHECKPOINT_DEBUG) + if (ui32IntClientUpdateCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)*ppui32IntAllocatedUpdateValues; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: set paui32IntUpdateValue<%p> to point to *ppui32IntAllocatedUpdateValues<%p>", __func__, (void*)paui32IntUpdateValue, (void*)*ppui32IntAllocatedUpdateValues)); + paui32IntUpdateValue = *ppui32IntAllocatedUpdateValues; + } + } + + if (bCheckFence && ui32FenceSyncCheckpointCount) + { + /* Append the checks (from input fence) */ + if (ui32FenceSyncCheckpointCount > 0) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)psSyncAddrListFence)); + SyncAddrListAppendCheckpoints(psSyncAddrListFence, + ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + if (!pauiIntFenceUFOAddress) + { + pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs; + } + ui32IntClientFenceCount += ui32FenceSyncCheckpointCount; + } +#if defined(TRANSFER_CHECKPOINT_DEBUG) + if (ui32IntClientFenceCount > 0) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress; + + for (iii=0; iiipasFWAddrs[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + if (bUpdateFence && *ppsUpdateSyncCheckpoint) + { + /* Append the update (from output fence) */ + CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to TQ Update (psSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->asSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress)); + SyncAddrListAppendCheckpoints(psSyncAddrListUpdate, + 1, + ppsUpdateSyncCheckpoint); + if (!pauiIntUpdateUFOAddress) + { + pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs; + } + ui32IntClientUpdateCount++; +#if defined(TRANSFER_CHECKPOINT_DEBUG) + { + IMG_UINT32 iii; + IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress; + + for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); + pui32Tmp++; + } + } +#endif + } + CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount)); + +#if (ENABLE_TQ_UFO_DUMP == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: dumping TQ fence/updates syncs...", __func__)); + { + IMG_UINT32 ii; + PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress; + PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress; + IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue; + + /* Dump Fence syncs and Update syncs */ + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TQ fence syncs (&psTransferContext->asSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->asSyncAddrListFence, (void*)pauiIntFenceUFOAddress)); + for (ii=0; iiui32Addr & 0x1); + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr)); + + psTmpIntFenceUFOAddress++; + } + PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TQ update syncs (&psTransferContext->asSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->asSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress)); + for (ii=0; iiui32Addr & 0x1) + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr)); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue)); + pui32TmpIntUpdateValue++; + } + psTmpIntUpdateUFOAddress++; + } + } +#endif + + ui32PreparesDone++; + + /* + Create the command helper data for this command + */ + RGXCmdHelperInitCmdCCB(psClientCCB, + 0, + ui32IntClientFenceCount, + pauiIntFenceUFOAddress, + NULL, /* fence value */ + ui32IntClientUpdateCount, + pauiIntUpdateUFOAddress, + paui32IntUpdateValue, + paui32FWCommandSize[i], + papaui8FWCommand[i], + &pPreAddr, + &pPostAddr, + &pRMWUFOAddr, + eType, + ui32ExtJobRef, + ui32IntJobRef, + ui32PDumpFlags, + NULL, + pszCommandName, + bCCBStateOpen, + psCmdHelper); + } + + /* + Acquire space for all the commands in one go + */ + if (ui323DCmdCount) + { + eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount, + &pas3DCmdHelper[0]); + if (eError != PVRSRV_OK) + { + goto fail_cmdacquire; + } + } + + if (ui322DCmdCount) + { + eError = RGXCmdHelperAcquireCmdCCB(ui322DCmdCount, + &pas2DCmdHelper[0]); + if (eError != PVRSRV_OK) + { + goto fail_cmdacquire; + } + } + + /* + We should acquire the kernel CCB(s) space here as the schedule could fail + and we would have to roll back all the syncs + */ + + if (ui323DCmdCount) + { + ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext)); + RGXCmdHelperReleaseCmdCCB(ui323DCmdCount, + &pas3DCmdHelper[0], + "TQ_3D", + FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr); + } + + if ((ui322DCmdCount) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) + { + ui322DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext)); + RGXCmdHelperReleaseCmdCCB(ui322DCmdCount, + &pas2DCmdHelper[0], + "TQ_2D", + FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr); + } + + if (ui323DCmdCount) + { + RGXFWIF_KCCB_CMD s3DKCCBCmd; + IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr; + RGX_CLIENT_CCB *ps3DTQCCB = FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext); + + RGX_CCB_CMD_HELPER_DATA *psCmdHelper = &pas3DCmdHelper[ui323DCmdCount]; + CMD_COMMON *psTransferCmdCmn = IMG_OFFSET_ADDR(psCmdHelper->pui8DMCmd, 0); + + /* Construct the kernel 3D CCB command. */ + s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + s3DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext); + s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(ps3DTQCCB); + s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(ps3DTQCCB); + s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; + + HTBLOGK(HTB_SF_MAIN_KICK_3D, + s3DKCCBCmd.uCmdData.sCmdKickData.psContext, + ui323DCmdOffset, + psTransferCmdCmn->ui32FrameNum, + ui32ExtJobRef, + ui32IntJobRef + ); + + RGXSRV_HWPERF_ENQ(psTransferContext, + OSGetCurrentClientProcessIDKM(), + ui32FWCtx, + ui32ExtJobRef, + ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_TQ3D, + iCheckFence, + i3DUpdateFence, + i3DUpdateTimeline, + uiCheckFenceUID, + ui3DUpdateFenceUID, + NO_DEADLINE, + NO_CYCEST); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_3D, + &s3DKCCBCmd, + ui32ClientCacheOpSeqNum, + ui32PDumpFlags); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWCtx, ui32ExtJobRef, + ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQ3D); + } + + if ((ui322DCmdCount) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) + { + RGXFWIF_KCCB_CMD s2DKCCBCmd; + IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr; + RGX_CLIENT_CCB *ps2DTQCCB = FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext); + RGX_CCB_CMD_HELPER_DATA *psCmdHelper = &pas2DCmdHelper[ui322DCmdCount]; + CMD_COMMON *psTransferCmdCmn = IMG_OFFSET_ADDR(psCmdHelper->pui8DMCmd, 0); + + /* Construct the kernel 2D CCB command. */ + s2DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; + s2DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext); + s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(ps2DTQCCB); + s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(ps2DTQCCB); + s2DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; + + HTBLOGK(HTB_SF_MAIN_KICK_2D, + s2DKCCBCmd.uCmdData.sCmdKickData.psContext, + ui322DCmdOffset, + psTransferCmdCmn->ui32FrameNum, + ui32ExtJobRef, + ui32IntJobRef); + + RGXSRV_HWPERF_ENQ(psTransferContext, + OSGetCurrentClientProcessIDKM(), + ui32FWCtx, + ui32ExtJobRef, + ui32IntJobRef, + RGX_HWPERF_KICK_TYPE_TQ2D, + iCheckFence, + i2DUpdateFence, + i2DUpdateTimeline, + uiCheckFenceUID, + ui2DUpdateFenceUID, + NO_DEADLINE, + NO_CYCEST); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + eError2 = RGXScheduleCommand(psDevInfo, + RGXFWIF_DM_2D, + &s2DKCCBCmd, + ui32ClientCacheOpSeqNum, + ui32PDumpFlags); + if (eError2 != PVRSRV_ERROR_RETRY) + { + break; + } + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); + } END_LOOP_UNTIL_TIMEOUT(); + + PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWCtx, ui32ExtJobRef, + ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQ2D); + } + + /* + * Now check eError (which may have returned an error from our earlier calls + * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first + * so we check it now... + */ + if (eError != PVRSRV_OK ) + { + goto fail_cmdacquire; + } + +#if defined(NO_HARDWARE) + /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ + if (ps2DUpdateSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling TLA NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)ps2DUpdateSyncCheckpoint, SyncCheckpointGetId(ps2DUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(ps2DUpdateSyncCheckpoint))); + SyncCheckpointSignalNoHW(ps2DUpdateSyncCheckpoint); + } + if (ps2DFenceTimelineUpdateSync) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating TLA NOHW sync prim<%p> to %d", __func__, (void*)ps2DFenceTimelineUpdateSync, ui322DFenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(ps2DFenceTimelineUpdateSync, ui322DFenceTimelineUpdateValue); + } + if (ps3DUpdateSyncCheckpoint) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling TQ3D NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)ps3DUpdateSyncCheckpoint, SyncCheckpointGetId(ps3DUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(ps3DUpdateSyncCheckpoint))); + SyncCheckpointSignalNoHW(ps3DUpdateSyncCheckpoint); + } + if (ps3DFenceTimelineUpdateSync) + { + CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating TQ3D NOHW sync prim<%p> to %d", __func__, (void*)ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue)); + SyncPrimNoHwUpdate(ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue); + } + SyncCheckpointNoHWUpdateTimelines(NULL); +#endif /* defined(NO_HARDWARE) */ + +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferSyncData) + { + pvr_buffer_sync_kick_succeeded(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + + if (pi2DUpdateFence) + { + *pi2DUpdateFence = i2DUpdateFence; + } + if (pi3DUpdateFence) + { + *pi3DUpdateFence = i3DUpdateFence; + } + if (pv2DUpdateFenceFinaliseData && (i2DUpdateFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psDeviceNode, i2DUpdateFence, pv2DUpdateFenceFinaliseData, + ps2DUpdateSyncCheckpoint, szFenceName); + } + if (pv3DUpdateFenceFinaliseData && (i3DUpdateFence != PVRSRV_NO_FENCE)) + { + SyncCheckpointFinaliseFence(psDeviceNode, i3DUpdateFence, pv3DUpdateFenceFinaliseData, + ps3DUpdateSyncCheckpoint, szFenceName); + } + + OSFreeMem(pas2DCmdHelper); + OSFreeMem(pas3DCmdHelper); + + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } + /* Free memory allocated to hold the internal list of update values */ + if (pui322DIntAllocatedUpdateValues) + { + OSFreeMem(pui322DIntAllocatedUpdateValues); + pui322DIntAllocatedUpdateValues = NULL; + } + if (pui323DIntAllocatedUpdateValues) + { + OSFreeMem(pui323DIntAllocatedUpdateValues); + pui323DIntAllocatedUpdateValues = NULL; + } + + OSLockRelease(psTransferContext->hLock); + return PVRSRV_OK; + +/* + No resources are created in this function so there is nothing to free + unless we had to merge syncs. + If we fail after the client CCB acquire there is still nothing to do + as only the client CCB release will modify the client CCB +*/ +fail_cmdacquire: +fail_prepare_loop: + + PVR_ASSERT(eError != PVRSRV_OK); + + for (i=0;iasSyncAddrListFence[i]); + SyncAddrListRollbackCheckpoints(psDeviceNode, &psTransferContext->asSyncAddrListUpdate[i]); + } +#if defined(SUPPORT_BUFFER_SYNC) + if (ui32PreparesDone > 0) + { + /* Prevent duplicate rollback in case of buffer sync. */ + psBufferUpdateSyncCheckpoint = NULL; + } +#endif + + /* Free memory allocated to hold the internal list of update values */ + if (pui322DIntAllocatedUpdateValues) + { + OSFreeMem(pui322DIntAllocatedUpdateValues); + pui322DIntAllocatedUpdateValues = NULL; + } + if (pui323DIntAllocatedUpdateValues) + { + OSFreeMem(pui323DIntAllocatedUpdateValues); + pui323DIntAllocatedUpdateValues = NULL; + } + + if (i2DUpdateFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(i2DUpdateFence, pv2DUpdateFenceFinaliseData); + } + if (i3DUpdateFence != PVRSRV_NO_FENCE) + { + SyncCheckpointRollbackFenceData(i3DUpdateFence, pv3DUpdateFenceFinaliseData); + } +#if defined(SUPPORT_BUFFER_SYNC) + if (psBufferUpdateSyncCheckpoint) + { + SyncAddrListRollbackCheckpoints(psDeviceNode, &psTransferContext->asSyncAddrListUpdate[0]); + } + if (psBufferSyncData) + { + pvr_buffer_sync_kick_failed(psBufferSyncData); + } + if (apsBufferFenceSyncCheckpoints) + { + kfree(apsBufferFenceSyncCheckpoints); + } +fail_resolve_buffersync_input_fence: +#endif /* defined(SUPPORT_BUFFER_SYNC) */ + + /* Drop the references taken on the sync checkpoints in the + * resolved input fence */ + SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, + apsFenceSyncCheckpoints); + /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ + if (apsFenceSyncCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); + } +fail_resolve_fencesync_input_fence: + OSFreeMem(pas2DCmdHelper); +fail_alloc2dhelper: + OSFreeMem(pas3DCmdHelper); +fail_alloc3dhelper: + + OSLockRelease(psTransferContext->hLock); + return eError; +} + + +PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDevNode, + RGX_SERVER_TQ_CONTEXT *psTransferContext, + IMG_UINT32 ui32Priority) +{ + PVRSRV_ERROR eError; + PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; + + PVR_UNREFERENCED_PARAMETER(psDevNode); + + OSLockAcquire(psTransferContext->hLock); + + if ((psTransferContext->s2DData.ui32Priority != ui32Priority) && + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) + { + eError = ContextSetPriority(psTransferContext->s2DData.psServerCommonContext, + psConnection, + psTransferContext->psDeviceNode->pvDevice, + ui32Priority, + RGXFWIF_DM_2D); + if (eError != PVRSRV_OK) + { + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 2D part of the transfercontext (%s)", __func__, PVRSRVGetErrorString(eError))); + } + goto fail_2dcontext; + } + psTransferContext->s2DData.ui32Priority = ui32Priority; + } + + if (psTransferContext->s3DData.ui32Priority != ui32Priority) + { + eError = ContextSetPriority(psTransferContext->s3DData.psServerCommonContext, + psConnection, + psTransferContext->psDeviceNode->pvDevice, + ui32Priority, + RGXFWIF_DM_3D); + if (eError != PVRSRV_OK) + { + if (eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 3D part of the transfercontext (%s)", __func__, PVRSRVGetErrorString(eError))); + } + goto fail_3dcontext; + } + psTransferContext->s3DData.ui32Priority = ui32Priority; + } + + OSLockRelease(psTransferContext->hLock); + return PVRSRV_OK; + +fail_3dcontext: + +fail_2dcontext: + OSLockRelease(psTransferContext->hLock); + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +PVRSRV_ERROR PVRSRVRGXSetTransferContextPropertyKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output) +{ + PVRSRV_ERROR eError; + PVRSRV_ERROR eError2 = PVRSRV_OK; + + switch (eContextProperty) + { + case RGX_CONTEXT_PROPERTY_FLAGS: + { + OSLockAcquire(psTransferContext->hLock); + eError = FWCommonContextSetFlags(psTransferContext->s2DData.psServerCommonContext, + (IMG_UINT32)ui64Input); + if (eError == PVRSRV_OK) + { + eError2 = FWCommonContextSetFlags(psTransferContext->s3DData.psServerCommonContext, + (IMG_UINT32)ui64Input); + } + if ((eError == PVRSRV_OK) && (eError2 == PVRSRV_OK)) + { + psTransferContext->ui32Flags = (IMG_UINT32)ui64Input; + } + OSLockRelease(psTransferContext->hLock); + PVR_LOG_IF_ERROR(eError, "FWCommonContextSetFlags eError"); + PVR_LOG_IF_ERROR(eError2, "FWCommonContextSetFlags eError2"); + break; + } + + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + } + } + + return eError; +} + +void DumpTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel) +{ + DLLIST_NODE *psNode, *psNext; + + OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock); + + dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext) + { + RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode); + + if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) + { + DumpFWCommonContextInfo(psCurrentServerTransferCtx->s2DData.psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + + if (psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D) + { + DumpFWCommonContextInfo(psCurrentServerTransferCtx->s3DData.psServerCommonContext, + pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); + } + } + + OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock); +} + +IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) +{ + DLLIST_NODE *psNode, *psNext; + IMG_UINT32 ui32ContextBitMask = 0; + + OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock); + + dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext) + { + RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx = + IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode); + + if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && + (NULL != psCurrentServerTransferCtx->s2DData.psServerCommonContext) && + (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) + { + if (CheckStalledClientCommonContext(psCurrentServerTransferCtx->s2DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ2D) == PVRSRV_ERROR_CCCB_STALLED) + { + ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ2D; + } + } + + if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D) && (NULL != psCurrentServerTransferCtx->s3DData.psServerCommonContext)) + { + if ((CheckStalledClientCommonContext(psCurrentServerTransferCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ3D) == PVRSRV_ERROR_CCCB_STALLED)) + { + ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ3D; + } + } + } + + OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock); + return ui32ContextBitMask; +} + +/**************************************************************************//** + End of file (rgxtransfer.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxtransfer.h b/drivers/gpu/drm/phytium/octopus/rgxtransfer.h new file mode 100644 index 000000000000..c10e9f64fff9 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxtransfer.h @@ -0,0 +1,153 @@ +/*************************************************************************/ /*! +@File +@Title RGX Transfer queue Functionality +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the RGX Transfer queue Functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXTRANSFER_H) +#define RGXTRANSFER_H + +#include "devicemem.h" +#include "device.h" +#include "rgxdevice.h" +#include "rgxfwutils.h" +#include "rgx_fwif_resetframework.h" +#include "rgxdebug.h" +#include "pvr_notifier.h" + +#include "sync_server.h" +#include "connection_server.h" + +typedef struct _RGX_SERVER_TQ_CONTEXT_ RGX_SERVER_TQ_CONTEXT; + +/*! +******************************************************************************* + + @Function PVRSRVRGXCreateTransferContextKM + + @Description + Server-side implementation of RGXCreateTransferContext + + @Input pvDeviceNode - device node + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32FrameworkCommandSize, + IMG_PBYTE pabyFrameworkCommand, + IMG_HANDLE hMemCtxPrivData, + IMG_UINT32 ui32PackedCCBSizeU8888, + IMG_UINT32 ui32ContextFlags, + RGX_SERVER_TQ_CONTEXT **ppsTransferContext, + PMR **ppsCLIPMRMem, + PMR **ppsUSCPMRMem); + + +/*! +******************************************************************************* + + @Function PVRSRVRGXDestroyTransferContextKM + + @Description + Server-side implementation of RGXDestroyTransferContext + + @Input psTransferContext - Transfer context + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext); + +/*! +******************************************************************************* + + @Function PVRSRVSubmitTransferKM + + @Description + Schedules one or more 2D or 3D HW commands on the firmware + + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, + IMG_UINT32 ui32ClientCacheOpSeqNum, + IMG_UINT32 ui32PrepareCount, + IMG_UINT32 *paui32ClientUpdateCount, + SYNC_PRIMITIVE_BLOCK ***papauiClientUpdateUFODevVarBlock, + IMG_UINT32 **papaui32ClientUpdateSyncOffset, + IMG_UINT32 **papaui32ClientUpdateValue, + PVRSRV_FENCE iCheckFence, + PVRSRV_TIMELINE i2DUpdateTimeline, + PVRSRV_FENCE *pi2DUpdateFence, + PVRSRV_TIMELINE i3DUpdateTimeline, + PVRSRV_FENCE *pi3DUpdateFence, + IMG_CHAR szFenceName[32], + IMG_UINT32 *paui32FWCommandSize, + IMG_UINT8 **papaui8FWCommand, + IMG_UINT32 *pui32TQPrepareFlags, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32SyncPMRCount, + IMG_UINT32 *paui32SyncPMRFlags, + PMR **ppsSyncPMRs); + +PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDevNode, + RGX_SERVER_TQ_CONTEXT *psTransferContext, + IMG_UINT32 ui32Priority); + +PVRSRV_ERROR PVRSRVRGXSetTransferContextPropertyKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, + RGX_CONTEXT_PROPERTY eContextProperty, + IMG_UINT64 ui64Input, + IMG_UINT64 *pui64Output); + +/* Debug - Dump debug info of transfer contexts on this device */ +void DumpTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile, + IMG_UINT32 ui32VerbLevel); + +/* Debug/Watchdog - check if client transfer contexts are stalled */ +IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); + +#endif /* RGXTRANSFER_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxtransfer_shader.h b/drivers/gpu/drm/phytium/octopus/rgxtransfer_shader.h new file mode 100644 index 000000000000..a10c64b954e6 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxtransfer_shader.h @@ -0,0 +1,61 @@ +/*************************************************************************/ /*! +@File rgxtransfer_shader.h +@Title TQ binary shader file info +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description This header holds info about TQ binary shader file generated + by the TQ shader factory. This header is need by shader factory + when generating the file; by services KM when reading and + loading the file into memory; and by services UM when + constructing blits using the shaders. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(RGXSHADERHEADER_H) +#define RGXSHADERHEADER_H + +typedef struct _RGX_SHADER_HEADER_ +{ + IMG_UINT32 ui32Version; + IMG_UINT32 ui32NumFragment; + IMG_UINT32 ui32SizeFragment; + IMG_UINT32 ui32NumTDMFragment; + IMG_UINT32 ui32SizeTDMFragment; + IMG_UINT32 ui32SizeClientMem; +} RGX_SHADER_HEADER; + +#endif /* RGXSHADERHEADER_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rgxutils.c b/drivers/gpu/drm/phytium/octopus/rgxutils.c new file mode 100644 index 000000000000..57ed9d92ec67 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxutils.c @@ -0,0 +1,221 @@ +/*************************************************************************/ /*! +@File +@Title Device specific utility routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "rgx_fwif_km.h" +#include "pdump_km.h" +#include "osfunc.h" +#include "allocmem.h" +#include "pvr_debug.h" +#include "rgxutils.h" +#include "power.h" +#include "pvrsrv.h" +#include "sync_internal.h" +#include "rgxfwutils.h" + + +PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_UINT32 *pui32State) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_UNREFERENCED_PARAMETER(pvPrivateData); + + if (!psDeviceNode) + return PVRSRV_ERROR_INVALID_PARAMS; + + psDevInfo = psDeviceNode->pvDevice; + *pui32State = psDevInfo->eActivePMConf; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_UINT32 ui32State) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_UNREFERENCED_PARAMETER(pvPrivateData); + + if (!psDeviceNode || !psDeviceNode->pvDevice) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevInfo = psDeviceNode->pvDevice; + + if (RGX_ACTIVEPM_FORCE_OFF != ui32State + || !psDevInfo->pvAPMISRData) + { + return PVRSRV_ERROR_NOT_SUPPORTED; + } + +#if !defined(NO_HARDWARE) + eError = OSUninstallMISR(psDevInfo->pvAPMISRData); + if (PVRSRV_OK == eError) + { + psDevInfo->eActivePMConf = RGX_ACTIVEPM_FORCE_OFF; + psDevInfo->pvAPMISRData = NULL; + eError = PVRSRVSetDeviceDefaultPowerState((const PPVRSRV_DEVICE_NODE)psDeviceNode, + PVRSRV_DEV_POWER_STATE_ON); + } +#endif + + return eError; +} + +PVRSRV_ERROR RGXQueryPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_BOOL *pbDisabled) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_UNREFERENCED_PARAMETER(pvPrivateData); + + if (!psDeviceNode || !psDeviceNode->pvDevice) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevInfo = psDeviceNode->pvDevice; + + *pbDisabled = !psDevInfo->bPDPEnabled; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXSetPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_BOOL bDisable) +{ + PVRSRV_RGXDEV_INFO *psDevInfo; + + PVR_UNREFERENCED_PARAMETER(pvPrivateData); + + if (!psDeviceNode || !psDeviceNode->pvDevice) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDevInfo = psDeviceNode->pvDevice; + + psDevInfo->bPDPEnabled = !bDisable; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 *pui32DeviceFlags) +{ + if (!pui32DeviceFlags || !psDevInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *pui32DeviceFlags = psDevInfo->ui32DeviceFlags; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Config, + IMG_BOOL bSetNotClear) +{ + if (!psDevInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if ((ui32Config & ~RGXKM_DEVICE_STATE_MASK) != 0) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Bits outside of device state mask set (input: 0x%x, mask: 0x%x)", + __func__, ui32Config, RGXKM_DEVICE_STATE_MASK)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (bSetNotClear) + { + psDevInfo->ui32DeviceFlags |= ui32Config; + } + else + { + psDevInfo->ui32DeviceFlags &= ~ui32Config; + } + + return PVRSRV_OK; +} + +inline const char * RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM) +{ + PVR_ASSERT(eKickTypeDM < RGX_KICK_TYPE_DM_LAST); + + switch (eKickTypeDM) { + case RGX_KICK_TYPE_DM_GP: + return "GP "; + case RGX_KICK_TYPE_DM_TDM_2D: + return "TDM/2D "; + case RGX_KICK_TYPE_DM_TA: + return "TA "; + case RGX_KICK_TYPE_DM_3D: + return "3D "; + case RGX_KICK_TYPE_DM_CDM: + return "CDM "; + case RGX_KICK_TYPE_DM_RTU: + return "RTU "; + case RGX_KICK_TYPE_DM_SHG: + return "SHG "; + case RGX_KICK_TYPE_DM_TQ2D: + return "TQ2D "; + case RGX_KICK_TYPE_DM_TQ3D: + return "TQ3D "; + default: + return "Invalid DM "; + } +} + +/****************************************************************************** + End of file (rgxutils.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/rgxutils.h b/drivers/gpu/drm/phytium/octopus/rgxutils.h new file mode 100644 index 000000000000..9baf832cc24d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rgxutils.h @@ -0,0 +1,185 @@ +/*************************************************************************/ /*! +@File +@Title Device specific utility routines declarations +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Inline functions/structures specific to RGX +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "device.h" +#include "rgxdevice.h" +#include "rgxdebug.h" +#include "pvr_notifier.h" +#include "pvrsrv.h" + +/*! +****************************************************************************** + + @Function RGXQueryAPMState + + @Description Query the state of the APM configuration + + @Input psDeviceNode : The device node + + @Input pvPrivateData: Unused (required for AppHint callback) + + @Output pui32State : The APM configuration state + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_UINT32 *pui32State); + +/*! +****************************************************************************** + + @Function RGXSetAPMState + + @Description Set the APM configuration state. Currently only 'OFF' is + supported + + @Input psDeviceNode : The device node + + @Input pvPrivateData: Unused (required for AppHint callback) + + @Input ui32State : The requested APM configuration state + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_UINT32 ui32State); + +/*! +****************************************************************************** + + @Function RGXQueryPdumpPanicDisable + + @Description Get the PDump Panic Enable configuration state. + + @Input psDeviceNode : The device node + + @Input pvPrivateData: Unused (required for AppHint callback) + + @Input pbDisabled : IMG_TRUE if PDump Panic is disabled + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXQueryPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_BOOL *pbDisabled); + +/*! +****************************************************************************** + + @Function RGXSetPdumpPanicDisable + + @Description Set the PDump Panic Enable flag + + @Input psDeviceNode : The device node + + @Input pvPrivateData: Unused (required for AppHint callback) + + @Input bDisable : The requested configuration state + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXSetPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, + const void *pvPrivateData, + IMG_BOOL bDisable); + +/*! +****************************************************************************** + + @Function RGXGetDeviceFlags + + @Description Get the device flags for a given device + + @Input psDevInfo : The device descriptor query + + @Output pui32DeviceFlags : The current state of the device flags + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 *pui32DeviceFlags); + +/*! +****************************************************************************** + + @Function RGXSetDeviceFlags + + @Description Set the device flags for a given device + + @Input psDevInfo : The device descriptor to modify + + @Input ui32Config : The device flags to modify + + @Input bSetNotClear : Set or clear the specified flags + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Config, + IMG_BOOL bSetNotClear); + +/*! +****************************************************************************** + + @Function RGXStringifyKickTypeDM + + @Description Gives the kick type DM name stringified + + @Input Kick type DM + + @Return Array containing the kick type DM name + +******************************************************************************/ +const char* RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM); + +#define RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(bitmask, eKickTypeDM) bitmask & eKickTypeDM ? RGXStringifyKickTypeDM(eKickTypeDM) : "" +/****************************************************************************** + End of file (rgxutils.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/ri_server.c b/drivers/gpu/drm/phytium/octopus/ri_server.c new file mode 100644 index 000000000000..df4c71e16a4a --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/ri_server.c @@ -0,0 +1,2114 @@ +/*************************************************************************/ /*! +@File ri_server.c +@Title Resource Information (RI) server implementation +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Resource Information (RI) server functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include "img_defs.h" +#include "allocmem.h" +#include "pvr_debug.h" +#include "pvrsrv_error.h" +#include "osfunc.h" + +#include "srvkm.h" +#include "lock.h" + +/* services/include */ +#include "pvr_ricommon.h" + +/* services/server/include/ */ +#include "ri_server.h" + +/* services/include/shared/ */ +#include "hash.h" +/* services/shared/include/ */ +#include "dllist.h" + +#include "pmr.h" + +/* include/device.h */ +#include "device.h" + +#if !defined(RI_UNIT_TEST) +#include "pvrsrv.h" +#endif + + +#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) + +#define USE_RI_LOCK 1 + +/* + * Initial size use for Hash table. (Used to index the RI list entries). + */ +#define _RI_INITIAL_HASH_TABLE_SIZE 64 + +/* + * Values written to the 'valid' field of RI structures when created and + * cleared prior to being destroyed. The code can then check this value + * before accessing the provided pointer contents as a valid RI structure. + */ +#define _VALID_RI_LIST_ENTRY 0x66bccb66 +#define _VALID_RI_SUBLIST_ENTRY 0x77cddc77 +#define _INVALID 0x00000000 + +/* + * If this define is set to 1, details of the linked lists (addresses, + * prev/next ptrs, etc) are also output when function RIDumpList() is called. + */ +#define _DUMP_LINKEDLIST_INFO 0 + + +typedef IMG_UINT64 _RI_BASE_T; + + +/* No +1 in SIZE macros since sizeof includes \0 byte in size */ + +#define RI_PROC_BUF_SIZE 16 + +#define RI_MEMDESC_SUM_FRMT "PID %d %s MEMDESCs Alloc'd:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) + "\ + "Imported:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) = "\ + "Total:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K)\n" +#define RI_MEMDESC_SUM_BUF_SIZE (sizeof(RI_MEMDESC_SUM_FRMT)+5+RI_PROC_BUF_SIZE+30+60) + + +#define RI_PMR_SUM_FRMT "PID %d %s PMRs Alloc'd:0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K "\ + "[Physical: 0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K]\n" +#define RI_PMR_SUM_BUF_SIZE (sizeof(RI_PMR_SUM_FRMT)+(20+40)) + +#define RI_PMR_ENTRY_FRMT "%%sPID:%%-5d <%%p>\t%%-%ds\t0x%%010" IMG_UINT64_FMTSPECx "\t[0x%%010" IMG_UINT64_FMTSPECx "]\t%%c" +#define RI_PMR_ENTRY_BUF_SIZE (sizeof(RI_PMR_ENTRY_FRMT)+(3+5+16+PVR_ANNOTATION_MAX_LEN+10+10)) +#define RI_PMR_ENTRY_FRMT_SIZE (sizeof(RI_PMR_ENTRY_FRMT)) + +/* Use %5d rather than %d so the output aligns in server/kernel.log, debugFS sees extra spaces */ +#define RI_MEMDESC_ENTRY_PROC_FRMT "[%5d:%s]" +#define RI_MEMDESC_ENTRY_PROC_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_PROC_FRMT)+5+16) + +#define RI_SYS_ALLOC_IMPORT_FRMT "{Import from PID %d}" +#define RI_SYS_ALLOC_IMPORT_FRMT_SIZE (sizeof(RI_SYS_ALLOC_IMPORT_FRMT)+5) +static IMG_CHAR g_szSysAllocImport[RI_SYS_ALLOC_IMPORT_FRMT_SIZE]; + +#define RI_MEMDESC_ENTRY_IMPORT_FRMT "{Import from PID %d}" +#define RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_IMPORT_FRMT)+5) + +#define RI_MEMDESC_ENTRY_UNPINNED_FRMT "{Unpinned}" +#define RI_MEMDESC_ENTRY_UNPINNED_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_UNPINNED_FRMT)) + +#define RI_MEMDESC_ENTRY_FRMT "%%sPID:%%-5d 0x%%010" IMG_UINT64_FMTSPECx "\t%%-%ds %%s\t0x%%010" IMG_UINT64_FMTSPECx "\t<%%p> %%s%%s%%s%%c" +#define RI_MEMDESC_ENTRY_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_FRMT)+(3+5+10+PVR_ANNOTATION_MAX_LEN+RI_MEMDESC_ENTRY_PROC_BUF_SIZE+16+\ + RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE+RI_SYS_ALLOC_IMPORT_FRMT_SIZE+RI_MEMDESC_ENTRY_UNPINNED_BUF_SIZE)) +#define RI_MEMDESC_ENTRY_FRMT_SIZE (sizeof(RI_MEMDESC_ENTRY_FRMT)) + + +#define RI_FRMT_SIZE_MAX (MAX(RI_MEMDESC_ENTRY_BUF_SIZE,\ + MAX(RI_PMR_ENTRY_BUF_SIZE,\ + MAX(RI_MEMDESC_SUM_BUF_SIZE,\ + RI_PMR_SUM_BUF_SIZE)))) + + + + +/* Structure used to make linked sublist of memory allocations (MEMDESC) */ +struct _RI_SUBLIST_ENTRY_ +{ + DLLIST_NODE sListNode; + struct _RI_LIST_ENTRY_ *psRI; + IMG_UINT32 valid; + IMG_BOOL bIsImport; + IMG_BOOL bIsSuballoc; + IMG_PID pid; + IMG_CHAR ai8ProcName[RI_PROC_BUF_SIZE]; + IMG_DEV_VIRTADDR sVAddr; + IMG_UINT64 ui64Offset; + IMG_UINT64 ui64Size; + IMG_CHAR ai8TextB[DEVMEM_ANNOTATION_MAX_LEN+1]; + DLLIST_NODE sProcListNode; +}; + +/* + * Structure used to make linked list of PMRs. Sublists of allocations + * (MEMDESCs) made from these PMRs are chained off these entries. + */ +struct _RI_LIST_ENTRY_ +{ + DLLIST_NODE sListNode; + DLLIST_NODE sSysAllocListNode; + DLLIST_NODE sSubListFirst; + IMG_UINT32 valid; + PMR *psPMR; + IMG_PID pid; + IMG_CHAR ai8ProcName[RI_PROC_BUF_SIZE]; + IMG_UINT16 ui16SubListCount; + IMG_UINT16 ui16MaxSubListCount; + IMG_UINT32 ui32RIPMRFlags; /* Flags used to indicate the type of allocation */ + IMG_UINT32 ui32Flags; /* Flags used to indicate if PMR appears in ri debugfs output */ +}; + +typedef struct _RI_LIST_ENTRY_ RI_LIST_ENTRY; +typedef struct _RI_SUBLIST_ENTRY_ RI_SUBLIST_ENTRY; + +static IMG_UINT16 g_ui16RICount; +static HASH_TABLE *g_pRIHashTable; +static IMG_UINT16 g_ui16ProcCount; +static HASH_TABLE *g_pProcHashTable; + +static POS_LOCK g_hRILock; + +/* Linked list of PMR allocations made against the PVR_SYS_ALLOC_PID and lock + * to prevent concurrent access to it. + */ +static POS_LOCK g_hSysAllocPidListLock; +static DLLIST_NODE g_sSysAllocPidListHead; + +/* + * Flag used to indicate if RILock should be destroyed when final PMR entry is + * deleted, i.e. if RIDeInitKM() has already been called before that point but + * the handle manager has deferred deletion of RI entries. + */ +static IMG_BOOL bRIDeInitDeferred = IMG_FALSE; + +/* + * Used as head of linked-list of PMR RI entries - this is useful when we wish + * to iterate all PMR list entries (when we don't have a PMR ref) + */ +static DLLIST_NODE sListFirst; + +/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */ +static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString); +/* Function used to produce string containing info for PMR RI entries (used for both debugfs and kernel log output) */ +static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString); + +static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v, void* pvPriv); +static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v, void* pvPriv); +static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v, void* pvPriv); +static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid); +#define _RIOutput(x) PVR_LOG(x) + +#define RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS 0x1 +#define RI_FLAG_SYSALLOC_PMR 0x2 + +static IMG_UINT32 +_ProcHashFunc(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen); + +static IMG_UINT32 +_ProcHashFunc(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen) +{ + IMG_UINT32 *p = (IMG_UINT32 *)pKey; + IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32); + IMG_UINT32 ui; + IMG_UINT32 uHashKey = 0; + + PVR_UNREFERENCED_PARAMETER(uHashTabLen); + + for (ui = 0; ui < uKeyLen; ui++) + { + IMG_UINT32 uHashPart = *p++; + + uHashPart += (uHashPart << 12); + uHashPart ^= (uHashPart >> 22); + uHashPart += (uHashPart << 4); + uHashPart ^= (uHashPart >> 9); + uHashPart += (uHashPart << 10); + uHashPart ^= (uHashPart >> 2); + uHashPart += (uHashPart << 7); + uHashPart ^= (uHashPart >> 12); + + uHashKey += uHashPart; + } + + return uHashKey; +} + +static IMG_BOOL +_ProcHashComp(size_t uKeySize, void *pKey1, void *pKey2); + +static IMG_BOOL +_ProcHashComp(size_t uKeySize, void *pKey1, void *pKey2) +{ + IMG_UINT32 *p1 = (IMG_UINT32 *)pKey1; + IMG_UINT32 *p2 = (IMG_UINT32 *)pKey2; + IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32); + IMG_UINT32 ui; + + for (ui = 0; ui < uKeyLen; ui++) + { + if (*p1++ != *p2++) + return IMG_FALSE; + } + + return IMG_TRUE; +} + +static void _RILock(void) +{ +#if (USE_RI_LOCK == 1) + OSLockAcquire(g_hRILock); +#endif +} + +static void _RIUnlock(void) +{ +#if (USE_RI_LOCK == 1) + OSLockRelease(g_hRILock); +#endif +} + +/* This value maintains a count of the number of PMRs attributed to the + * PVR_SYS_ALLOC_PID. Access to this value is protected by g_hRILock, so it + * does not need to be an ATOMIC_T. + */ +static IMG_UINT32 g_ui32SysAllocPMRCount; + + +PVRSRV_ERROR RIInitKM(void) +{ + IMG_INT iCharsWritten; + PVRSRV_ERROR eError; + + bRIDeInitDeferred = IMG_FALSE; + + iCharsWritten = OSSNPrintf(g_szSysAllocImport, + RI_SYS_ALLOC_IMPORT_FRMT_SIZE, + RI_SYS_ALLOC_IMPORT_FRMT, + PVR_SYS_ALLOC_PID); + PVR_LOG_IF_FALSE((iCharsWritten>0 && iCharsWritten<(IMG_INT32)RI_SYS_ALLOC_IMPORT_FRMT_SIZE), \ + "OSSNPrintf failed to initialise g_szSysAllocImport"); + + eError = OSLockCreate(&g_hSysAllocPidListLock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OSLockCreate (g_hSysAllocPidListLock) failed (returned %d)", + __func__, + eError)); + } + dllist_init(&(g_sSysAllocPidListHead)); +#if (USE_RI_LOCK == 1) + eError = OSLockCreate(&g_hRILock); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OSLockCreate (g_hRILock) failed (returned %d)", + __func__, + eError)); + } +#endif + return eError; +} +void RIDeInitKM(void) +{ +#if (USE_RI_LOCK == 1) + if (g_ui16RICount > 0) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: called with %d entries remaining - deferring OSLockDestroy()", + __func__, + g_ui16RICount)); + bRIDeInitDeferred = IMG_TRUE; + } + else + { + OSLockDestroy(g_hRILock); + OSLockDestroy(g_hSysAllocPidListLock); + } +#endif +} + +/*! +******************************************************************************* + + @Function RILockAcquireKM + + @Description + Acquires the RI Lock (which protects the integrity of the RI + linked lists). Caller will be suspended until lock is acquired. + + @Return None + +******************************************************************************/ +void RILockAcquireKM(void) +{ + _RILock(); +} + +/*! +******************************************************************************* + + @Function RILockReleaseKM + + @Description + Releases the RI Lock (which protects the integrity of the RI + linked lists). + + @Return None + +******************************************************************************/ +void RILockReleaseKM(void) +{ + _RIUnlock(); +} + +/*! +******************************************************************************* + + @Function RIWritePMREntryWithOwnerKM + + @Description + Writes a new Resource Information list entry. + The new entry will be inserted at the head of the list of + PMR RI entries and assigned the values provided. + + @input psPMR - Reference (handle) to the PMR to which this reference relates + + @input ui32Owner - PID of the process which owns the allocation. This + may not be the current process (e.g. a request to + grow a buffer may happen in the context of a kernel + thread, or we may import further resource for a + suballocation made from the FW heap which can then + also be utilized by other processes) + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR, + IMG_PID ui32Owner) +{ + PMR *pPMRHashKey = psPMR; + RI_LIST_ENTRY *psRIEntry; + uintptr_t hashData; + + /* if Hash table has not been created, create it now */ + if (!g_pRIHashTable) + { + g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default); + g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp); + } + PVR_RETURN_IF_NOMEM(g_pRIHashTable); + PVR_RETURN_IF_NOMEM(g_pProcHashTable); + + PVR_RETURN_IF_INVALID_PARAM(psPMR); + + /* Acquire RI Lock */ + _RILock(); + + /* Look-up psPMR in Hash Table */ + hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); + psRIEntry = (RI_LIST_ENTRY *)hashData; + if (!psRIEntry) + { + /* + * If failed to find a matching existing entry, create a new one + */ + psRIEntry = (RI_LIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_LIST_ENTRY)); + if (!psRIEntry) + { + /* Release RI Lock */ + _RIUnlock(); + /* Error - no memory to allocate for new RI entry */ + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + else + { + PMR_FLAGS_T uiPMRFlags = PMR_Flags(psPMR); + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psPMR); + + /* + * Add new RI Entry + */ + if (g_ui16RICount == 0) + { + /* Initialise PMR entry linked-list head */ + dllist_init(&sListFirst); + } + g_ui16RICount++; + + dllist_init (&(psRIEntry->sSysAllocListNode)); + dllist_init (&(psRIEntry->sSubListFirst)); + psRIEntry->ui16SubListCount = 0; + psRIEntry->ui16MaxSubListCount = 0; + psRIEntry->valid = _VALID_RI_LIST_ENTRY; + + /* Check if this PMR should be accounted for under the + * PVR_SYS_ALLOC_PID debugFS entry. This should happen if + * we are in the driver init phase, the flags indicate + * this is a FW Main allocation (made from FW heap) + * or the owner PID is PVR_SYS_ALLOC_PID. + * Also record host dev node allocs on the system PID. + */ + if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT || + PVRSRV_CHECK_FW_MAIN(uiPMRFlags) || + ui32Owner == PVR_SYS_ALLOC_PID || + psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode) + { + psRIEntry->ui32RIPMRFlags = RI_FLAG_SYSALLOC_PMR; + OSSNPrintf(psRIEntry->ai8ProcName, + RI_PROC_BUF_SIZE, + "SysProc"); + psRIEntry->pid = PVR_SYS_ALLOC_PID; + OSLockAcquire(g_hSysAllocPidListLock); + /* Add this psRIEntry to the list of entries for PVR_SYS_ALLOC_PID */ + dllist_add_to_tail(&g_sSysAllocPidListHead, (PDLLIST_NODE)&(psRIEntry->sSysAllocListNode)); + OSLockRelease(g_hSysAllocPidListLock); + g_ui32SysAllocPMRCount++; + } + else + { + psRIEntry->ui32RIPMRFlags = 0; + psRIEntry->pid = ui32Owner; + } + + OSSNPrintf(psRIEntry->ai8ProcName, + RI_PROC_BUF_SIZE, + "%s", + OSGetCurrentClientProcessNameKM()); + /* Add PMR entry to linked-list of all PMR entries */ + dllist_init (&(psRIEntry->sListNode)); + dllist_add_to_tail(&sListFirst, (PDLLIST_NODE)&(psRIEntry->sListNode)); + } + + psRIEntry->psPMR = psPMR; + psRIEntry->ui32Flags = 0; + + /* Create index entry in Hash Table */ + HASH_Insert_Extended (g_pRIHashTable, (void *)&pPMRHashKey, (uintptr_t)psRIEntry); + + /* Store phRIHandle in PMR structure, so it can delete the associated RI entry when it destroys the PMR */ + PMRStoreRIHandle(psPMR, psRIEntry); + } + /* Release RI Lock */ + _RIUnlock(); + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RIWritePMREntryKM + + @Description + Writes a new Resource Information list entry. + The new entry will be inserted at the head of the list of + PMR RI entries and assigned the values provided. + + @input psPMR - Reference (handle) to the PMR to which this reference relates + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR) +{ + return RIWritePMREntryWithOwnerKM(psPMR, + OSGetCurrentClientProcessIDKM()); +} + +/*! +******************************************************************************* + + @Function RIWriteMEMDESCEntryKM + + @Description + Writes a new Resource Information sublist entry. + The new entry will be inserted at the head of the sublist of + the indicated PMR list entry, and assigned the values provided. + + @input psPMR - Reference (handle) to the PMR to which this MEMDESC RI entry relates + @input ui32TextBSize - Length of string provided in psz8TextB parameter + @input psz8TextB - String describing this secondary reference (may be null) + @input ui64Offset - Offset from the start of the PMR at which this allocation begins + @input ui64Size - Size of this allocation + @input bIsImport - Flag indicating if this is an allocation or an import + @input bIsSuballoc - Flag indicating if this is a sub-allocation + @output phRIHandle - Handle to the created RI entry + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR, + IMG_UINT32 ui32TextBSize, + const IMG_CHAR *psz8TextB, + IMG_UINT64 ui64Offset, + IMG_UINT64 ui64Size, + IMG_BOOL bIsImport, + IMG_BOOL bIsSuballoc, + RI_HANDLE *phRIHandle) +{ + RI_SUBLIST_ENTRY *psRISubEntry; + RI_LIST_ENTRY *psRIEntry; + PMR *pPMRHashKey = psPMR; + uintptr_t hashData; + IMG_PID pid; + + /* Check Hash tables have been created (meaning at least one PMR has been defined) */ + PVR_RETURN_IF_INVALID_PARAM(g_pRIHashTable); + PVR_RETURN_IF_INVALID_PARAM(g_pProcHashTable); + + PVR_RETURN_IF_INVALID_PARAM(psPMR); + PVR_RETURN_IF_INVALID_PARAM(phRIHandle); + + /* Acquire RI Lock */ + _RILock(); + + *phRIHandle = NULL; + + /* Look-up psPMR in Hash Table */ + hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); + psRIEntry = (RI_LIST_ENTRY *)hashData; + if (!psRIEntry) + { + /* Release RI Lock */ + _RIUnlock(); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_SUBLIST_ENTRY)); + if (!psRISubEntry) + { + /* Release RI Lock */ + _RIUnlock(); + /* Error - no memory to allocate for new RI sublist entry */ + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + else + { + /* + * Insert new entry in sublist + */ + PDLLIST_NODE currentNode = dllist_get_next_node(&(psRIEntry->sSubListFirst)); + + /* + * Insert new entry before currentNode + */ + if (!currentNode) + { + currentNode = &(psRIEntry->sSubListFirst); + } + dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sListNode)); + + psRISubEntry->psRI = psRIEntry; + + /* Increment number of entries in sublist */ + psRIEntry->ui16SubListCount++; + if (psRIEntry->ui16SubListCount > psRIEntry->ui16MaxSubListCount) + { + psRIEntry->ui16MaxSubListCount = psRIEntry->ui16SubListCount; + } + psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY; + } + + /* If allocation is made during device or driver initialisation, + * track the MEMDESC entry under PVR_SYS_ALLOC_PID, otherwise use + * the current PID. + * Record host dev node allocations on the system PID. + */ + { + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psRISubEntry->psRI->psPMR); + + if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT || + psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode) + { + psRISubEntry->pid = psRISubEntry->psRI->pid; + } + else + { + psRISubEntry->pid = OSGetCurrentClientProcessIDKM(); + } + } + + if (ui32TextBSize > sizeof(psRISubEntry->ai8TextB)-1) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: TextBSize too long (%u). Text will be truncated " + "to %zu characters", __func__, + ui32TextBSize, sizeof(psRISubEntry->ai8TextB)-1)); + } + + /* copy ai8TextB field data */ + OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, sizeof(psRISubEntry->ai8TextB), "%s", psz8TextB); + + psRISubEntry->ui64Offset = ui64Offset; + psRISubEntry->ui64Size = ui64Size; + psRISubEntry->bIsImport = bIsImport; + psRISubEntry->bIsSuballoc = bIsSuballoc; + OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM()); + dllist_init (&(psRISubEntry->sProcListNode)); + + /* + * Now insert this MEMDESC into the proc list + */ + /* look-up pid in Hash Table */ + pid = psRISubEntry->pid; + hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid); + if (!hashData) + { + /* + * No allocations for this pid yet + */ + HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode)); + /* Increment number of entries in proc hash table */ + g_ui16ProcCount++; + } + else + { + /* + * Insert allocation into pid allocations linked list + */ + PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData; + + /* + * Insert new entry + */ + dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode)); + } + *phRIHandle = (RI_HANDLE)psRISubEntry; + /* Release RI Lock */ + _RIUnlock(); + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RIWriteProcListEntryKM + + @Description + Write a new entry in the process list directly. We have to do this + because there might be no, multiple or changing PMR handles. + + In the common case we have a PMR that will be added to the PMR list + and one or several MemDescs that are associated to it in a sub-list. + Additionally these MemDescs will be inserted in the per-process list. + + There might be special descriptors from e.g. new user APIs that + are associated with no or multiple PMRs and not just one. + These can be now added to the per-process list (as RI_SUBLIST_ENTRY) + directly with this function and won't be listed in the PMR list (RIEntry) + because there might be no PMR. + + To remove entries from the per-process list, just use + RIDeleteMEMDESCEntryKM(). + + @input psz8TextB - String describing this secondary reference (may be null) + @input ui64Size - Size of this allocation + @input ui64DevVAddr - Virtual address of this entry + @output phRIHandle - Handle to the created RI entry + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize, + const IMG_CHAR *psz8TextB, + IMG_UINT64 ui64Size, + IMG_UINT64 ui64DevVAddr, + RI_HANDLE *phRIHandle) +{ + uintptr_t hashData = 0; + IMG_PID pid; + RI_SUBLIST_ENTRY *psRISubEntry = NULL; + + if (!g_pRIHashTable) + { + g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default); + g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp); + + if (!g_pRIHashTable || !g_pProcHashTable) + { + /* Error - no memory to allocate for Hash table(s) */ + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + } + + /* Acquire RI Lock */ + _RILock(); + + *phRIHandle = NULL; + + psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_SUBLIST_ENTRY)); + if (!psRISubEntry) + { + /* Release RI Lock */ + _RIUnlock(); + /* Error - no memory to allocate for new RI sublist entry */ + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY; + + psRISubEntry->pid = OSGetCurrentClientProcessIDKM(); + + if (ui32TextBSize > sizeof(psRISubEntry->ai8TextB)-1) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: TextBSize too long (%u). Text will be truncated " + "to %zu characters", __func__, + ui32TextBSize, sizeof(psRISubEntry->ai8TextB)-1)); + } + + /* copy ai8TextB field data */ + OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, sizeof(psRISubEntry->ai8TextB), "%s", psz8TextB); + + psRISubEntry->ui64Offset = 0; + psRISubEntry->ui64Size = ui64Size; + psRISubEntry->sVAddr.uiAddr = ui64DevVAddr; + psRISubEntry->bIsImport = IMG_FALSE; + psRISubEntry->bIsSuballoc = IMG_FALSE; + OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM()); + dllist_init (&(psRISubEntry->sProcListNode)); + + /* + * Now insert this MEMDESC into the proc list + */ + /* look-up pid in Hash Table */ + pid = psRISubEntry->pid; + hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid); + if (!hashData) + { + /* + * No allocations for this pid yet + */ + HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode)); + /* Increment number of entries in proc hash table */ + g_ui16ProcCount++; + } + else + { + /* + * Insert allocation into pid allocations linked list + */ + PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData; + + /* + * Insert new entry + */ + dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode)); + } + *phRIHandle = (RI_HANDLE)psRISubEntry; + /* Release RI Lock */ + _RIUnlock(); + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RIUpdateMEMDESCAddrKM + + @Description + Update a Resource Information entry. + + @input hRIHandle - Handle of object whose reference info is to be updated + @input sVAddr - New address for the RI entry + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle, + IMG_DEV_VIRTADDR sVAddr) +{ + RI_SUBLIST_ENTRY *psRISubEntry; + + PVR_RETURN_IF_INVALID_PARAM(hRIHandle); + + psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle; + if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) + { + /* Pointer does not point to valid structure */ + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Acquire RI lock*/ + _RILock(); + + psRISubEntry->sVAddr.uiAddr = sVAddr.uiAddr; + + /* Release RI lock */ + _RIUnlock(); + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RIDeletePMREntryKM + + @Description + Delete a Resource Information entry. + + @input hRIHandle - Handle of object whose reference info is to be deleted + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle) +{ + RI_LIST_ENTRY *psRIEntry; + PMR *pPMRHashKey; + PVRSRV_ERROR eResult = PVRSRV_OK; + + PVR_RETURN_IF_INVALID_PARAM(hRIHandle); + + psRIEntry = (RI_LIST_ENTRY *)hRIHandle; + + if (psRIEntry->valid != _VALID_RI_LIST_ENTRY) + { + /* Pointer does not point to valid structure */ + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (psRIEntry->ui16SubListCount == 0) + { + /* Acquire RI lock*/ + _RILock(); + + /* Remove the HASH table index entry */ + pPMRHashKey = psRIEntry->psPMR; + HASH_Remove_Extended(g_pRIHashTable, (void *)&pPMRHashKey); + + psRIEntry->valid = _INVALID; + + /* Remove PMR entry from linked-list of PMR entries */ + dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sListNode)); + + if (psRIEntry->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR) + { + dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sSysAllocListNode)); + g_ui32SysAllocPMRCount--; + } + + /* Now, free the memory used to store the RI entry */ + OSFreeMemNoStats(psRIEntry); + psRIEntry = NULL; + + /* + * Decrement number of RI entries - if this is now zero, + * we can delete the RI hash table + */ + if (--g_ui16RICount == 0) + { + HASH_Delete(g_pRIHashTable); + g_pRIHashTable = NULL; + + _RIUnlock(); + + /* If deInit has been deferred, we can now destroy the RI Lock */ + if (bRIDeInitDeferred) + { + OSLockDestroy(g_hRILock); + } + } + else + { + /* Release RI lock*/ + _RIUnlock(); + } + /* + * Make the handle NULL once PMR RI entry is deleted + */ + hRIHandle = NULL; + } + else + { + eResult = PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP; + } + + return eResult; +} + +/*! +******************************************************************************* + + @Function RIDeleteMEMDESCEntryKM + + @Description + Delete a Resource Information entry. + Entry can be from RIEntry list or ProcList. + + @input hRIHandle - Handle of object whose reference info is to be deleted + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle) +{ + RI_LIST_ENTRY *psRIEntry = NULL; + RI_SUBLIST_ENTRY *psRISubEntry; + uintptr_t hashData; + IMG_PID pid; + + PVR_RETURN_IF_INVALID_PARAM(hRIHandle); + + psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle; + if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) + { + /* Pointer does not point to valid structure */ + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Acquire RI lock*/ + _RILock(); + + /* For entries which do have a parent PMR remove the node from the sublist */ + if (psRISubEntry->psRI) + { + psRIEntry = (RI_LIST_ENTRY *)psRISubEntry->psRI; + + /* Now, remove entry from the sublist */ + dllist_remove_node(&(psRISubEntry->sListNode)); + } + + psRISubEntry->valid = _INVALID; + + /* Remove the entry from the proc allocations linked list */ + pid = psRISubEntry->pid; + /* If this is the only allocation for this pid, just remove it from the hash table */ + if (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) + { + HASH_Remove_Extended(g_pProcHashTable, (void *)&pid); + /* Decrement number of entries in proc hash table, and delete the hash table if there are now none */ + if (--g_ui16ProcCount == 0) + { + HASH_Delete(g_pProcHashTable); + g_pProcHashTable = NULL; + } + } + else + { + hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid); + if ((PDLLIST_NODE)hashData == &(psRISubEntry->sProcListNode)) + { + HASH_Remove_Extended(g_pProcHashTable, (void *)&pid); + HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)dllist_get_next_node(&(psRISubEntry->sProcListNode))); + } + } + dllist_remove_node(&(psRISubEntry->sProcListNode)); + + /* Now, free the memory used to store the sublist entry */ + OSFreeMemNoStats(psRISubEntry); + psRISubEntry = NULL; + + /* + * Decrement number of entries in sublist if this MemDesc had a parent entry. + */ + if (psRIEntry) + { + psRIEntry->ui16SubListCount--; + } + + /* Release RI lock*/ + _RIUnlock(); + + /* + * Make the handle NULL once MEMDESC RI entry is deleted + */ + hRIHandle = NULL; + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RIDeleteListKM + + @Description + Delete all Resource Information entries and free associated + memory. + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIDeleteListKM(void) +{ + PVRSRV_ERROR eResult = PVRSRV_OK; + + _RILock(); + + if (g_pRIHashTable) + { + eResult = HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DeleteAllEntries, NULL); + if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE) + { + /* + * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when + * the hash table gets deleted as a result of deleting the final PMR entry, + * so this is not a real error condition... + */ + eResult = PVRSRV_OK; + } + } + + /* After the run through the RIHashTable that holds the PMR entries there might be + * still entries left in the per-process hash table because they were added with + * RIWriteProcListEntryKM() and have no PMR parent associated. + */ + if (g_pProcHashTable) + { + eResult = HASH_Iterate(g_pProcHashTable, (HASH_pfnCallback) _DeleteAllProcEntries, NULL); + if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE) + { + /* + * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when + * the hash table gets deleted as a result of deleting the final PMR entry, + * so this is not a real error condition... + */ + eResult = PVRSRV_OK; + } + } + + _RIUnlock(); + + return eResult; +} + +/*! +******************************************************************************* + + @Function RIDumpListKM + + @Description + Dumps out the contents of the RI List entry for the + specified PMR, and all MEMDESC allocation entries + in the associated sub linked list. + At present, output is directed to Kernel log + via PVR_DPF. + + @input psPMR - PMR for which RI entry details are to be output + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIDumpListKM(PMR *psPMR) +{ + PVRSRV_ERROR eError; + + /* Acquire RI lock*/ + _RILock(); + + eError = _DumpList(psPMR, 0); + + /* Release RI lock*/ + _RIUnlock(); + + return eError; +} + +/*! +******************************************************************************* + + @Function RIGetListEntryKM + + @Description + Returns pointer to a formatted string with details of the specified + list entry. If no entry exists (e.g. it may have been deleted + since the previous call), NULL is returned. + + @input pid - pid for which RI entry details are to be output + @input ppHandle - handle to the entry, if NULL, the first entry will be + returned. + @output pszEntryString - string to be output for the entry + @output hEntry - hEntry will be returned pointing to the next entry + (or NULL if there is no next entry) + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_BOOL RIGetListEntryKM(IMG_PID pid, + IMG_HANDLE **ppHandle, + IMG_CHAR **ppszEntryString) +{ + RI_SUBLIST_ENTRY *psRISubEntry = NULL; + RI_LIST_ENTRY *psRIEntry = NULL; + uintptr_t hashData = 0; + IMG_PID hashKey = pid; + + static IMG_CHAR acStringBuffer[RI_FRMT_SIZE_MAX]; + + static IMG_UINT64 ui64TotalMemdescAlloc; + static IMG_UINT64 ui64TotalImport; + static IMG_UINT64 ui64TotalPMRAlloc; + static IMG_UINT64 ui64TotalPMRBacked; + static enum { + RI_GET_STATE_MEMDESCS_LIST_START, + RI_GET_STATE_MEMDESCS_SUMMARY, + RI_GET_STATE_PMR_LIST, + RI_GET_STATE_PMR_SUMMARY, + RI_GET_STATE_END, + RI_GET_STATE_LAST + } g_bNextGetState = RI_GET_STATE_MEMDESCS_LIST_START; + + static DLLIST_NODE *psNode; + static DLLIST_NODE *psSysAllocNode; + static IMG_CHAR szProcName[RI_PROC_BUF_SIZE]; + static IMG_UINT32 ui32ProcessedSysAllocPMRCount; + + acStringBuffer[0] = '\0'; + + switch (g_bNextGetState) + { + case RI_GET_STATE_MEMDESCS_LIST_START: + /* look-up pid in Hash Table, to obtain first entry for pid */ + hashData = HASH_Retrieve_Extended(g_pProcHashTable, (void *)&hashKey); + if (hashData) + { + if (*ppHandle) + { + psRISubEntry = (RI_SUBLIST_ENTRY *)*ppHandle; + if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) + { + psRISubEntry = NULL; + } + } + else + { + psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode); + if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) + { + psRISubEntry = NULL; + } + } + } + + if (psRISubEntry) + { + PDLLIST_NODE psNextProcListNode = dllist_get_next_node(&psRISubEntry->sProcListNode); + + if (psRISubEntry->bIsImport) + { + ui64TotalImport += psRISubEntry->ui64Size; + } + else + { + ui64TotalMemdescAlloc += psRISubEntry->ui64Size; + } + + _GenerateMEMDESCEntryString(psRISubEntry, + IMG_TRUE, + RI_MEMDESC_ENTRY_BUF_SIZE, + acStringBuffer); + + if (szProcName[0] == '\0') + { + OSStringLCopy(szProcName, (pid == PVR_SYS_ALLOC_PID) ? + PVRSRV_MODNAME : psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE); + } + + + *ppszEntryString = acStringBuffer; + *ppHandle = (IMG_HANDLE)IMG_CONTAINER_OF(psNextProcListNode, RI_SUBLIST_ENTRY, sProcListNode); + + if (psNextProcListNode == NULL || + psNextProcListNode == (PDLLIST_NODE)hashData) + { + g_bNextGetState = RI_GET_STATE_MEMDESCS_SUMMARY; + } + /* else continue to list MEMDESCs */ + } + else + { + if (ui64TotalMemdescAlloc == 0) + { + acStringBuffer[0] = '\0'; + *ppszEntryString = acStringBuffer; + g_bNextGetState = RI_GET_STATE_MEMDESCS_SUMMARY; + } + /* else continue to list MEMDESCs */ + } + break; + + case RI_GET_STATE_MEMDESCS_SUMMARY: + OSSNPrintf(acStringBuffer, + RI_MEMDESC_SUM_BUF_SIZE, + RI_MEMDESC_SUM_FRMT, + pid, + szProcName, + ui64TotalMemdescAlloc, + ui64TotalMemdescAlloc >> 10, + ui64TotalImport, + ui64TotalImport >> 10, + (ui64TotalMemdescAlloc + ui64TotalImport), + (ui64TotalMemdescAlloc + ui64TotalImport) >> 10); + + *ppszEntryString = acStringBuffer; + ui64TotalMemdescAlloc = 0; + ui64TotalImport = 0; + szProcName[0] = '\0'; + + g_bNextGetState = RI_GET_STATE_PMR_LIST; + break; + + case RI_GET_STATE_PMR_LIST: + if (pid == PVR_SYS_ALLOC_PID) + { + OSLockAcquire(g_hSysAllocPidListLock); + acStringBuffer[0] = '\0'; + if (!psSysAllocNode) + { + psSysAllocNode = &g_sSysAllocPidListHead; + ui32ProcessedSysAllocPMRCount = 0; + } + psSysAllocNode = dllist_get_next_node(psSysAllocNode); + + if (szProcName[0] == '\0') + { + OSStringLCopy(szProcName, PVRSRV_MODNAME, RI_PROC_BUF_SIZE); + } + if (psSysAllocNode != NULL && psSysAllocNode != &g_sSysAllocPidListHead) + { + IMG_DEVMEM_SIZE_T uiPMRPhysicalBacking, uiPMRLogicalSize = 0; + + psRIEntry = IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sSysAllocListNode); + _GeneratePMREntryString(psRIEntry, + IMG_TRUE, + RI_PMR_ENTRY_BUF_SIZE, + acStringBuffer); + PMR_LogicalSize(psRIEntry->psPMR, + &uiPMRLogicalSize); + ui64TotalPMRAlloc += uiPMRLogicalSize; + PMR_PhysicalSize(psRIEntry->psPMR, &uiPMRPhysicalBacking); + ui64TotalPMRBacked += uiPMRPhysicalBacking; + + ui32ProcessedSysAllocPMRCount++; + if (ui32ProcessedSysAllocPMRCount > g_ui32SysAllocPMRCount+1) + { + g_bNextGetState = RI_GET_STATE_PMR_SUMMARY; + } + /* else continue to list PMRs */ + } + else + { + g_bNextGetState = RI_GET_STATE_PMR_SUMMARY; + } + *ppszEntryString = (IMG_CHAR *)acStringBuffer; + OSLockRelease(g_hSysAllocPidListLock); + } + else + { + IMG_BOOL bPMRToDisplay = IMG_FALSE; + + /* Iterate through the 'touched' PMRs and display details */ + if (!psNode) + { + psNode = dllist_get_next_node(&sListFirst); + } + else + { + psNode = dllist_get_next_node(psNode); + } + + while ((psNode != NULL && psNode != &sListFirst) && + !bPMRToDisplay) + { + psRIEntry = IMG_CONTAINER_OF(psNode, RI_LIST_ENTRY, sListNode); + if (psRIEntry->pid == pid) + { + IMG_DEVMEM_SIZE_T uiPMRPhysicalBacking, uiPMRLogicalSize = 0; + + /* This PMR was 'touched', so display details and unflag it*/ + _GeneratePMREntryString(psRIEntry, + IMG_TRUE, + RI_PMR_ENTRY_BUF_SIZE, + acStringBuffer); + PMR_LogicalSize(psRIEntry->psPMR, &uiPMRLogicalSize); + ui64TotalPMRAlloc += uiPMRLogicalSize; + PMR_PhysicalSize(psRIEntry->psPMR, &uiPMRPhysicalBacking); + ui64TotalPMRBacked += uiPMRPhysicalBacking; + + /* Remember the name of the process for 1 PMR for the summary */ + if (szProcName[0] == '\0') + { + OSStringLCopy(szProcName, psRIEntry->ai8ProcName, RI_PROC_BUF_SIZE); + } + bPMRToDisplay = IMG_TRUE; + } + else + { + psNode = dllist_get_next_node(psNode); + } + } + + if (psNode == NULL || (psNode == &sListFirst)) + { + g_bNextGetState = RI_GET_STATE_PMR_SUMMARY; + } + /* else continue listing PMRs */ + } + break; + + case RI_GET_STATE_PMR_SUMMARY: + OSSNPrintf(acStringBuffer, + RI_PMR_SUM_BUF_SIZE, + RI_PMR_SUM_FRMT, + pid, + szProcName, + ui64TotalPMRAlloc, + ui64TotalPMRAlloc >> 10, + ui64TotalPMRBacked, + ui64TotalPMRBacked >> 10); + + *ppszEntryString = acStringBuffer; + ui64TotalPMRAlloc = 0; + ui64TotalPMRBacked = 0; + szProcName[0] = '\0'; + psSysAllocNode = NULL; + + g_bNextGetState = RI_GET_STATE_END; + break; + + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Bad %d)",__func__, g_bNextGetState)); + + __fallthrough; + case RI_GET_STATE_END: + /* Reset state ready for the next gpu_mem_area file to display */ + *ppszEntryString = NULL; + *ppHandle = NULL; + psNode = NULL; + szProcName[0] = '\0'; + + g_bNextGetState = RI_GET_STATE_MEMDESCS_LIST_START; + return IMG_FALSE; + break; + } + + return IMG_TRUE; +} + +/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */ +static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, + IMG_BOOL bDebugFs, + IMG_UINT16 ui16MaxStrLen, + IMG_CHAR *pszEntryString) +{ + IMG_CHAR szProc[RI_MEMDESC_ENTRY_PROC_BUF_SIZE]; + IMG_CHAR szImport[RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE]; + IMG_CHAR szEntryFormat[RI_MEMDESC_ENTRY_FRMT_SIZE]; + const IMG_CHAR *pszAnnotationText; + IMG_PID uiRIPid = 0; + PMR* psRIPMR = NULL; + IMG_UINT32 ui32RIPMRFlags = 0; + + if (psRISubEntry->psRI != NULL) + { + uiRIPid = psRISubEntry->psRI->pid; + psRIPMR = psRISubEntry->psRI->psPMR; + ui32RIPMRFlags = psRISubEntry->psRI->ui32RIPMRFlags; + } + + OSSNPrintf(szEntryFormat, + RI_MEMDESC_ENTRY_FRMT_SIZE, + RI_MEMDESC_ENTRY_FRMT, + DEVMEM_ANNOTATION_MAX_LEN); + + if (!bDebugFs) + { + /* we don't include process ID info for debugfs output */ + OSSNPrintf(szProc, + RI_MEMDESC_ENTRY_PROC_BUF_SIZE, + RI_MEMDESC_ENTRY_PROC_FRMT, + psRISubEntry->pid, + psRISubEntry->ai8ProcName); + } + + if (psRISubEntry->bIsImport && psRIPMR) + { + OSSNPrintf((IMG_CHAR *)&szImport, + RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE, + RI_MEMDESC_ENTRY_IMPORT_FRMT, + uiRIPid); + /* Set pszAnnotationText to that of the 'parent' PMR RI entry */ + pszAnnotationText = PMR_GetAnnotation(psRIPMR); + } + else if (!psRISubEntry->bIsSuballoc && psRIPMR) + { + /* Set pszAnnotationText to that of the 'parent' PMR RI entry */ + pszAnnotationText = PMR_GetAnnotation(psRIPMR); + } + else + { + /* Set pszAnnotationText to that of the MEMDESC RI entry */ + pszAnnotationText = psRISubEntry->ai8TextB; + } + + /* Don't print memdescs if they are local imports + * (i.e. imported PMRs allocated by this process) + */ + if (bDebugFs && + ((psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset) == 0) && + (psRISubEntry->bIsImport && ((psRISubEntry->pid == uiRIPid) + || (psRISubEntry->pid == PVR_SYS_ALLOC_PID)))) + { + /* Don't print this entry */ + pszEntryString[0] = '\0'; + } + else + { + OSSNPrintf(pszEntryString, + ui16MaxStrLen, + szEntryFormat, + (bDebugFs ? "" : " "), + psRISubEntry->pid, + (psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset), + pszAnnotationText, + (bDebugFs ? "" : (char *)szProc), + psRISubEntry->ui64Size, + psRIPMR, + (psRISubEntry->bIsImport ? (char *)&szImport : ""), + (!psRISubEntry->bIsImport && (ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR) && (psRISubEntry->pid != PVR_SYS_ALLOC_PID)) ? g_szSysAllocImport : "", + (psRIPMR && PMR_IsUnpinned(psRIPMR)) ? RI_MEMDESC_ENTRY_UNPINNED_FRMT : "", + (bDebugFs ? '\n' : ' ')); + } +} + +/* Function used to produce string containing info for PMR RI entries (used for debugfs and kernel log output) */ +static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry, + IMG_BOOL bDebugFs, + IMG_UINT16 ui16MaxStrLen, + IMG_CHAR *pszEntryString) +{ + const IMG_CHAR* pszAnnotationText; + IMG_DEVMEM_SIZE_T uiLogicalSize = 0; + IMG_DEVMEM_SIZE_T uiPhysicalSize = 0; + IMG_CHAR szEntryFormat[RI_PMR_ENTRY_FRMT_SIZE]; + + PMR_LogicalSize(psRIEntry->psPMR, &uiLogicalSize); + + PMR_PhysicalSize(psRIEntry->psPMR, &uiPhysicalSize); + + OSSNPrintf(szEntryFormat, + RI_PMR_ENTRY_FRMT_SIZE, + RI_PMR_ENTRY_FRMT, + DEVMEM_ANNOTATION_MAX_LEN); + + /* Set pszAnnotationText to that PMR RI entry */ + pszAnnotationText = (IMG_PCHAR) PMR_GetAnnotation(psRIEntry->psPMR); + + OSSNPrintf(pszEntryString, + ui16MaxStrLen, + szEntryFormat, + (bDebugFs ? "" : " "), + psRIEntry->pid, + (void*)psRIEntry->psPMR, + pszAnnotationText, + uiLogicalSize, + uiPhysicalSize, + (bDebugFs ? '\n' : ' ')); +} + +/*! +******************************************************************************* + + @Function _DumpList + + @Description + Dumps out RI List entries according to parameters passed. + + @input psPMR - If not NULL, function will output the RI entries for + the specified PMR only + @input pid - If non-zero, the function will only output MEMDESC RI + entries made by the process with ID pid. + If zero, all MEMDESC RI entries will be output. + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid) +{ + RI_LIST_ENTRY *psRIEntry = NULL; + RI_SUBLIST_ENTRY *psRISubEntry = NULL; + IMG_UINT16 ui16SubEntriesParsed = 0; + uintptr_t hashData = 0; + IMG_PID hashKey; + PMR *pPMRHashKey = psPMR; + IMG_BOOL bDisplayedThisPMR = IMG_FALSE; + IMG_UINT64 ui64LogicalSize = 0; + + PVR_RETURN_IF_INVALID_PARAM(psPMR); + + if (g_pRIHashTable && g_pProcHashTable) + { + if (pid != 0) + { + /* look-up pid in Hash Table */ + hashKey = pid; + hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey); + if (hashData) + { + psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode); + if (psRISubEntry) + { + psRIEntry = psRISubEntry->psRI; + } + } + } + else + { + /* Look-up psPMR in Hash Table */ + hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); + psRIEntry = (RI_LIST_ENTRY *)hashData; + } + if (!psRIEntry) + { + /* No entry found in hash table */ + return PVRSRV_ERROR_NOT_FOUND; + } + while (psRIEntry) + { + bDisplayedThisPMR = IMG_FALSE; + /* Output details for RI entry */ + if (!pid) + { + PMR_LogicalSize(psPMR, (IMG_DEVMEM_SIZE_T*)&ui64LogicalSize); + + _RIOutput (("%s <%p> suballocs:%d size:0x%010" IMG_UINT64_FMTSPECx, + PMR_GetAnnotation(psRIEntry->psPMR), + psRIEntry->psPMR, + (IMG_UINT)psRIEntry->ui16SubListCount, + ui64LogicalSize)); + bDisplayedThisPMR = IMG_TRUE; + } + ui16SubEntriesParsed = 0; + if (psRIEntry->ui16SubListCount) + { +#if _DUMP_LINKEDLIST_INFO + _RIOutput (("RI LIST: {sSubListFirst.psNextNode:0x%p}\n", + psRIEntry->sSubListFirst.psNextNode)); +#endif /* _DUMP_LINKEDLIST_INFO */ + if (!pid) + { + psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), + RI_SUBLIST_ENTRY, sListNode); + } + /* Traverse RI sublist and output details for each entry */ + while (psRISubEntry) + { + if (psRIEntry) + { + if ((ui16SubEntriesParsed >= psRIEntry->ui16SubListCount)) + { + break; + } + if (!bDisplayedThisPMR) + { + PMR_LogicalSize(psPMR, (IMG_DEVMEM_SIZE_T*)&ui64LogicalSize); + + _RIOutput (("%s <%p> suballocs:%d size:0x%010" IMG_UINT64_FMTSPECx, + PMR_GetAnnotation(psRIEntry->psPMR), + psRIEntry->psPMR, + (IMG_UINT)psRIEntry->ui16SubListCount, + ui64LogicalSize)); + bDisplayedThisPMR = IMG_TRUE; + } + } +#if _DUMP_LINKEDLIST_INFO + _RIOutput (("RI LIST: [this subentry:0x%p]\n",psRISubEntry)); + _RIOutput (("RI LIST: psRI:0x%p\n",psRISubEntry->psRI)); +#endif /* _DUMP_LINKEDLIST_INFO */ + + { + IMG_CHAR szEntryString[RI_MEMDESC_ENTRY_BUF_SIZE]; + + _GenerateMEMDESCEntryString(psRISubEntry, + IMG_FALSE, + RI_MEMDESC_ENTRY_BUF_SIZE, + szEntryString); + _RIOutput (("%s",szEntryString)); + } + + if (pid) + { + if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) || + (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData)) + { + psRISubEntry = NULL; + } + else + { + psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)), + RI_SUBLIST_ENTRY, sProcListNode); + if (psRISubEntry) + { + if (psRIEntry != psRISubEntry->psRI) + { + /* + * The next MEMDESC in the process linked list is in a different PMR + */ + psRIEntry = psRISubEntry->psRI; + bDisplayedThisPMR = IMG_FALSE; + } + } + } + } + else + { + ui16SubEntriesParsed++; + psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)), + RI_SUBLIST_ENTRY, sListNode); + } + } + } + if (!pid && psRIEntry) + { + if (ui16SubEntriesParsed != psRIEntry->ui16SubListCount) + { + /* + * Output error message as sublist does not contain the + * number of entries indicated by sublist count + */ + _RIOutput (("RI ERROR: RI sublist contains %d entries, not %d entries\n", + ui16SubEntriesParsed, psRIEntry->ui16SubListCount)); + } + else if (psRIEntry->ui16SubListCount && !dllist_get_next_node(&(psRIEntry->sSubListFirst))) + { + /* + * Output error message as sublist is empty but sublist count + * is not zero + */ + _RIOutput (("RI ERROR: ui16SubListCount=%d for empty RI sublist\n", + psRIEntry->ui16SubListCount)); + } + } + psRIEntry = NULL; + } + } + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RIDumpAllKM + + @Description + Dumps out the contents of all RI List entries (i.e. for all + MEMDESC allocations for each PMR). + At present, output is directed to Kernel log + via PVR_DPF. + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIDumpAllKM(void) +{ + if (g_pRIHashTable) + { + return HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DumpAllEntries, NULL); + } + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function RIDumpProcessKM + + @Description + Dumps out the contents of all MEMDESC RI List entries (for every + PMR) which have been allocate by the specified process only. + At present, output is directed to Kernel log + via PVR_DPF. + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid) +{ + PVRSRV_ERROR eError; + IMG_UINT32 dummyPMR; + + if (!g_pProcHashTable) + { + return PVRSRV_OK; + } + + /* Acquire RI lock*/ + _RILock(); + + eError = _DumpList((PMR *)&dummyPMR, pid); + + /* Release RI lock*/ + _RIUnlock(); + + return eError; +} + +/*! +******************************************************************************* + + @Function _TotalAllocsForProcess + + @Description + Totals all PMR physical backing for given process. + + @input pid - ID of process. + + @input ePhysHeapType - type of Physical Heap for which to total allocs + + @Return Size of all physical backing for PID's PMRs allocated from the + specified heap type (in bytes). + +******************************************************************************/ +static IMG_INT32 _TotalAllocsForProcess(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType) +{ + RI_LIST_ENTRY *psRIEntry = NULL; + RI_SUBLIST_ENTRY *psInitialRISubEntry = NULL; + RI_SUBLIST_ENTRY *psRISubEntry = NULL; + uintptr_t hashData = 0; + IMG_PID hashKey; + IMG_INT32 i32TotalPhysical = 0; + + if (g_pRIHashTable && g_pProcHashTable) + { + if (pid == PVR_SYS_ALLOC_PID) + { + IMG_UINT32 ui32ProcessedSysAllocPMRCount = 0; + DLLIST_NODE *psSysAllocNode = NULL; + + OSLockAcquire(g_hSysAllocPidListLock); + psSysAllocNode = dllist_get_next_node(&g_sSysAllocPidListHead); + while (psSysAllocNode && psSysAllocNode != &g_sSysAllocPidListHead) + { + psRIEntry = IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sSysAllocListNode); + ui32ProcessedSysAllocPMRCount++; + if (PhysHeapGetType(PMR_PhysHeap(psRIEntry->psPMR)) == ePhysHeapType) + { + IMG_UINT64 ui64PhysicalSize; + + PMR_PhysicalSize(psRIEntry->psPMR, (IMG_DEVMEM_SIZE_T*)&ui64PhysicalSize); + if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > 0x7fffffff)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32",__func__)); + } + i32TotalPhysical += (IMG_INT32)(ui64PhysicalSize & 0x00000000ffffffff); + } + psSysAllocNode = dllist_get_next_node(psSysAllocNode); + } + OSLockRelease(g_hSysAllocPidListLock); + } + else + { + if (pid != 0) + { + /* look-up pid in Hash Table */ + hashKey = pid; + hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey); + if (hashData) + { + psInitialRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode); + psRISubEntry = psInitialRISubEntry; + if (psRISubEntry) + { + psRIEntry = psRISubEntry->psRI; + } + } + } + + while (psRISubEntry && psRIEntry) + { + if (!psRISubEntry->bIsImport && !(psRIEntry->ui32RIPMRFlags & RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS) && + (pid == PVR_SYS_ALLOC_PID || !(psRIEntry->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR)) && + (PhysHeapGetType(PMR_PhysHeap(psRIEntry->psPMR)) == ePhysHeapType)) + { + IMG_UINT64 ui64PhysicalSize; + + + PMR_PhysicalSize(psRIEntry->psPMR, (IMG_DEVMEM_SIZE_T*)&ui64PhysicalSize); + if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > 0x7fffffff)) + { + PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32",__func__)); + } + i32TotalPhysical += (IMG_INT32)(ui64PhysicalSize & 0x00000000ffffffff); + psRIEntry->ui32RIPMRFlags |= RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS; + } + if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) || + (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData)) + { + psRISubEntry = NULL; + psRIEntry = NULL; + } + else + { + psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)), + RI_SUBLIST_ENTRY, sProcListNode); + if (psRISubEntry) + { + psRIEntry = psRISubEntry->psRI; + } + } + } + psRISubEntry = psInitialRISubEntry; + if (psRISubEntry) + { + psRIEntry = psRISubEntry->psRI; + } + while (psRISubEntry && psRIEntry) + { + psRIEntry->ui32RIPMRFlags &= ~RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS; + if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) || + (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData)) + { + psRISubEntry = NULL; + psRIEntry = NULL; + } + else + { + psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)), + RI_SUBLIST_ENTRY, sProcListNode); + if (psRISubEntry) + { + psRIEntry = psRISubEntry->psRI; + } + } + } + } + } + return i32TotalPhysical; +} + +/*! +******************************************************************************* + + @Function RITotalAllocProcessKM + + @Description + Returns the total of allocated GPU memory (backing for PMRs) + which has been allocated from the specific heap by the specified + process only. + + @Return Amount of physical backing allocated (in bytes) + +******************************************************************************/ +IMG_INT32 RITotalAllocProcessKM(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType) +{ + IMG_INT32 i32BackingTotal = 0; + + if (g_pProcHashTable) + { + /* Acquire RI lock*/ + _RILock(); + + i32BackingTotal = _TotalAllocsForProcess(pid, ePhysHeapType); + + /* Release RI lock*/ + _RIUnlock(); + } + return i32BackingTotal; +} + +#if defined(DEBUG) +/*! +******************************************************************************* + + @Function _DumpProcessList + + @Description + Dumps out RI List entries according to parameters passed. + + @input psPMR - If not NULL, function will output the RI entries for + the specified PMR only + @input pid - If non-zero, the function will only output MEMDESC RI + entries made by the process with ID pid. + If zero, all MEMDESC RI entries will be output. + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR _DumpProcessList(PMR *psPMR, + IMG_PID pid, + IMG_UINT64 ui64Offset, + IMG_DEV_VIRTADDR *psDevVAddr) +{ + RI_LIST_ENTRY *psRIEntry = NULL; + RI_SUBLIST_ENTRY *psRISubEntry = NULL; + IMG_UINT16 ui16SubEntriesParsed = 0; + uintptr_t hashData = 0; + PMR *pPMRHashKey = psPMR; + + psDevVAddr->uiAddr = 0; + + PVR_RETURN_IF_INVALID_PARAM(psPMR); + + if (g_pRIHashTable && g_pProcHashTable) + { + PVR_ASSERT(psPMR && pid); + + /* Look-up psPMR in Hash Table */ + hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); + psRIEntry = (RI_LIST_ENTRY *)hashData; + + if (!psRIEntry) + { + /* No entry found in hash table */ + return PVRSRV_ERROR_NOT_FOUND; + } + + if (psRIEntry->ui16SubListCount) + { + psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), + RI_SUBLIST_ENTRY, sListNode); + + /* Traverse RI sublist and output details for each entry */ + while (psRISubEntry && (ui16SubEntriesParsed < psRIEntry->ui16SubListCount)) + { + if (pid == psRISubEntry->pid) + { + IMG_UINT64 ui64StartOffset = psRISubEntry->ui64Offset; + IMG_UINT64 ui64EndOffset = psRISubEntry->ui64Offset + psRISubEntry->ui64Size; + + if (ui64Offset >= ui64StartOffset && ui64Offset < ui64EndOffset) + { + psDevVAddr->uiAddr = psRISubEntry->sVAddr.uiAddr; + return PVRSRV_OK; + } + } + + ui16SubEntriesParsed++; + psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)), + RI_SUBLIST_ENTRY, sListNode); + } + } + } + + return PVRSRV_ERROR_INVALID_PARAMS; +} + +/*! +******************************************************************************* + + @Function RIDumpProcessListKM + + @Description + Dumps out selected contents of all MEMDESC RI List entries (for a + PMR) which have been allocate by the specified process only. + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR, + IMG_PID pid, + IMG_UINT64 ui64Offset, + IMG_DEV_VIRTADDR *psDevVAddr) +{ + PVRSRV_ERROR eError; + + if (!g_pProcHashTable) + { + return PVRSRV_OK; + } + + /* Acquire RI lock*/ + _RILock(); + + eError = _DumpProcessList(psPMR, + pid, + ui64Offset, + psDevVAddr); + + /* Release RI lock*/ + _RIUnlock(); + + return eError; +} +#endif + +static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v, void* pvPriv) +{ + RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v; + + PVR_UNREFERENCED_PARAMETER (k); + PVR_UNREFERENCED_PARAMETER (pvPriv); + + return RIDumpListKM(psRIEntry->psPMR); +} + +static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v, void* pvPriv) +{ + RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v; + RI_SUBLIST_ENTRY *psRISubEntry; + PVRSRV_ERROR eResult = PVRSRV_OK; + + PVR_UNREFERENCED_PARAMETER (k); + PVR_UNREFERENCED_PARAMETER (pvPriv); + + while ((eResult == PVRSRV_OK) && (psRIEntry->ui16SubListCount > 0)) + { + psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), RI_SUBLIST_ENTRY, sListNode); + eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE)psRISubEntry); + } + if (eResult == PVRSRV_OK) + { + eResult = RIDeletePMREntryKM((RI_HANDLE)psRIEntry); + /* + * If we've deleted the Hash table, return + * an error to stop the iterator... + */ + if (!g_pRIHashTable) + { + eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + } + } + return eResult; +} + +static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v, void* pvPriv) +{ + RI_SUBLIST_ENTRY *psRISubEntry = (RI_SUBLIST_ENTRY *)v; + PVRSRV_ERROR eResult; + + PVR_UNREFERENCED_PARAMETER (k); + PVR_UNREFERENCED_PARAMETER (pvPriv); + + eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE) psRISubEntry); + if (eResult == PVRSRV_OK && !g_pProcHashTable) + { + /* + * If we've deleted the Hash table, return + * an error to stop the iterator... + */ + eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; + } + + return eResult; +} + +#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ diff --git a/drivers/gpu/drm/phytium/octopus/ri_server.h b/drivers/gpu/drm/phytium/octopus/ri_server.h new file mode 100644 index 000000000000..73b4dfba4c16 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/ri_server.h @@ -0,0 +1,106 @@ +/*************************************************************************/ /*! +@File ri_server.h +@Title Resource Information abstraction +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Resource Information (RI) functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RI_SERVER_H +#define RI_SERVER_H + +#include "img_defs.h" +#include "ri_typedefs.h" +#include "pmr.h" +#include "pvrsrv_error.h" +#include "physheap.h" + +PVRSRV_ERROR RIInitKM(void); +void RIDeInitKM(void); + +void RILockAcquireKM(void); +void RILockReleaseKM(void); + +PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR); + +PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR, + IMG_PID ui32Owner); + +PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR, + IMG_UINT32 ui32TextBSize, + const IMG_CHAR ai8TextB[DEVMEM_ANNOTATION_MAX_LEN], + IMG_UINT64 uiOffset, + IMG_UINT64 uiSize, + IMG_BOOL bIsImport, + IMG_BOOL bIsSuballoc, + RI_HANDLE *phRIHandle); + +PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize, + const IMG_CHAR *psz8TextB, + IMG_UINT64 ui64Size, + IMG_UINT64 ui64DevVAddr, + RI_HANDLE *phRIHandle); + +PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle, + IMG_DEV_VIRTADDR sVAddr); + +PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle); +PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle); + +PVRSRV_ERROR RIDeleteListKM(void); + +PVRSRV_ERROR RIDumpListKM(PMR *psPMR); + +PVRSRV_ERROR RIDumpAllKM(void); + +PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid); + +#if defined(DEBUG) +PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR, + IMG_PID pid, + IMG_UINT64 ui64Offset, + IMG_DEV_VIRTADDR *psDevVAddr); +#endif + +IMG_BOOL RIGetListEntryKM(IMG_PID pid, + IMG_HANDLE **ppHandle, + IMG_CHAR **ppszEntryString); + +IMG_INT32 RITotalAllocProcessKM(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType); + +#endif /* RI_SERVER_H */ diff --git a/drivers/gpu/drm/phytium/octopus/ri_typedefs.h b/drivers/gpu/drm/phytium/octopus/ri_typedefs.h new file mode 100644 index 000000000000..5f71f5d37ea4 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/ri_typedefs.h @@ -0,0 +1,52 @@ +/*************************************************************************/ /*! +@File +@Title Resource Information (RI) Management +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Client side part of RI management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef RI_TYPEDEFS_H +#define RI_TYPEDEFS_H + +#include "img_types.h" + +typedef struct RI_SUBLIST_ENTRY RI_ENTRY; +typedef RI_ENTRY* RI_HANDLE; + +#endif /* #ifndef RI_TYPEDEFS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/rogue_trace_events.h b/drivers/gpu/drm/phytium/octopus/rogue_trace_events.h new file mode 100644 index 000000000000..0e4d1c1c1c4f --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/rogue_trace_events.h @@ -0,0 +1,543 @@ +/*************************************************************************/ /*! +@File +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rogue + +#if !defined(ROGUE_TRACE_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ) +#define ROGUE_TRACE_EVENTS_H + +#include +#include +#include +#include + +#define show_secs_from_ns(ns) \ + ({ \ + u64 t = ns + (NSEC_PER_USEC / 2); \ + do_div(t, NSEC_PER_SEC); \ + t; \ + }) + +#define show_usecs_from_ns(ns) \ + ({ \ + u64 t = ns + (NSEC_PER_USEC / 2); \ + u32 rem; \ + do_div(t, NSEC_PER_USEC); \ + rem = do_div(t, USEC_PER_SEC); \ + }) + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) +int trace_fence_update_enabled_callback(void); +#else +void trace_fence_update_enabled_callback(void); +#endif +void trace_fence_update_disabled_callback(void); + +TRACE_EVENT_FN(rogue_fence_update, + + TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset, + u32 sync_fwaddr, u32 sync_value), + + TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value), + + TP_STRUCT__entry( + __string( comm, comm ) + __string( cmd, cmd ) + __string( dm, dm ) + __field( u32, ctx_id ) + __field( u32, offset ) + __field( u32, sync_fwaddr ) + __field( u32, sync_value ) + ), + + TP_fast_assign( + __assign_str(comm, comm); + __assign_str(cmd, cmd); + __assign_str(dm, dm); + __entry->ctx_id = ctx_id; + __entry->offset = offset; + __entry->sync_fwaddr = sync_fwaddr; + __entry->sync_value = sync_value; + ), + + TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx", + __get_str(comm), + __get_str(cmd), + __get_str(dm), + (unsigned long)__entry->ctx_id, + (unsigned long)__entry->offset, + (unsigned long)__entry->sync_fwaddr, + (unsigned long)__entry->sync_value), + + trace_fence_update_enabled_callback, + trace_fence_update_disabled_callback +); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) +int trace_fence_check_enabled_callback(void); +#else +void trace_fence_check_enabled_callback(void); +#endif +void trace_fence_check_disabled_callback(void); + +TRACE_EVENT_FN(rogue_fence_check, + + TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset, + u32 sync_fwaddr, u32 sync_value), + + TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value), + + TP_STRUCT__entry( + __string( comm, comm ) + __string( cmd, cmd ) + __string( dm, dm ) + __field( u32, ctx_id ) + __field( u32, offset ) + __field( u32, sync_fwaddr ) + __field( u32, sync_value ) + ), + + TP_fast_assign( + __assign_str(comm, comm); + __assign_str(cmd, cmd); + __assign_str(dm, dm); + __entry->ctx_id = ctx_id; + __entry->offset = offset; + __entry->sync_fwaddr = sync_fwaddr; + __entry->sync_value = sync_value; + ), + + TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx", + __get_str(comm), + __get_str(cmd), + __get_str(dm), + (unsigned long)__entry->ctx_id, + (unsigned long)__entry->offset, + (unsigned long)__entry->sync_fwaddr, + (unsigned long)__entry->sync_value), + + trace_fence_check_enabled_callback, + trace_fence_check_disabled_callback +); + +TRACE_EVENT(rogue_job_enqueue, + + TP_PROTO(u32 ctx_id, u32 int_id, u32 ext_id, + const char *kick_type), + + TP_ARGS(ctx_id, int_id, ext_id, kick_type), + + TP_STRUCT__entry( + __field(u32, ctx_id) + __field(u32, int_id) + __field(u32, ext_id) + __string(kick_type, kick_type) + ), + + TP_fast_assign( + __entry->ctx_id = ctx_id; + __entry->int_id = int_id; + __entry->ext_id = ext_id; + __assign_str(kick_type, kick_type); + ), + + TP_printk("ctx_id=%lu int_id=%lu ext_id=%lu kick_type=%s", + (unsigned long) __entry->ctx_id, + (unsigned long) __entry->int_id, + (unsigned long) __entry->ext_id, + __get_str(kick_type) + ) +); + +TRACE_EVENT(rogue_sched_switch, + + TP_PROTO(const char *work_type, u32 switch_type, u64 timestamp, u32 next_ctx_id, + u32 next_prio, u32 next_int_id, u32 next_ext_id), + + TP_ARGS(work_type, switch_type, timestamp, next_ctx_id, next_prio, next_int_id, next_ext_id), + + TP_STRUCT__entry( + __string(work_type, work_type) + __field(u32, switch_type) + __field(u64, timestamp) + __field(u32, next_ctx_id) + __field(u32, next_prio) + __field(u32, next_int_id) + __field(u32, next_ext_id) + ), + + TP_fast_assign( + __assign_str(work_type, work_type); + __entry->switch_type = switch_type; + __entry->timestamp = timestamp; + __entry->next_ctx_id = next_ctx_id; + __entry->next_prio = next_prio; + __entry->next_int_id = next_int_id; + __entry->next_ext_id = next_ext_id; + ), + + TP_printk("ts=%llu.%06lu next_ctx_id=%lu next_int_id=%lu next_ext_id=%lu" + " next_prio=%lu work_type=%s switch_type=%s", + (unsigned long long) show_secs_from_ns(__entry->timestamp), + (unsigned long) show_usecs_from_ns(__entry->timestamp), + (unsigned long) __entry->next_ctx_id, + (unsigned long) __entry->next_int_id, + (unsigned long) __entry->next_ext_id, + (unsigned long) __entry->next_prio, + __get_str(work_type), + __print_symbolic(__entry->switch_type, + /* These values are from ospvr_gputrace.h. */ + { 1, "begin" }, + { 2, "end" }) + ) +); + +TRACE_EVENT(rogue_create_fw_context, + + TP_PROTO(const char *comm, const char *dm, u32 ctx_id), + + TP_ARGS(comm, dm, ctx_id), + + TP_STRUCT__entry( + __string( comm, comm ) + __string( dm, dm ) + __field( u32, ctx_id ) + ), + + TP_fast_assign( + __assign_str(comm, comm); + __assign_str(dm, dm); + __entry->ctx_id = ctx_id; + ), + + TP_printk("comm=%s dm=%s ctx_id=%lu", + __get_str(comm), + __get_str(dm), + (unsigned long)__entry->ctx_id) +); + +void PVRGpuTraceEnableUfoCallback(void); +void PVRGpuTraceDisableUfoCallback(void); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) +int PVRGpuTraceEnableUfoCallbackWrapper(void); +#else +#define PVRGpuTraceEnableUfoCallbackWrapper \ + PVRGpuTraceEnableUfoCallback +#endif + +TRACE_EVENT_FN(rogue_ufo_update, + + TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id, + u32 fwaddr, u32 old_value, u32 new_value), + + TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, old_value, + new_value), + + TP_STRUCT__entry( + __field( u64, timestamp ) + __field( u32, ctx_id ) + __field( u32, int_id ) + __field( u32, ext_id ) + __field( u32, fwaddr ) + __field( u32, old_value ) + __field( u32, new_value ) + ), + + TP_fast_assign( + __entry->timestamp = timestamp; + __entry->ctx_id = ctx_id; + __entry->int_id = int_id; + __entry->ext_id = ext_id; + __entry->fwaddr = fwaddr; + __entry->old_value = old_value; + __entry->new_value = new_value; + ), + + TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu" + " fwaddr=%#lx old_value=%#lx new_value=%#lx", + (unsigned long long)show_secs_from_ns(__entry->timestamp), + (unsigned long)show_usecs_from_ns(__entry->timestamp), + (unsigned long)__entry->ctx_id, + (unsigned long)__entry->int_id, + (unsigned long)__entry->ext_id, + (unsigned long)__entry->fwaddr, + (unsigned long)__entry->old_value, + (unsigned long)__entry->new_value), + PVRGpuTraceEnableUfoCallbackWrapper, + PVRGpuTraceDisableUfoCallback +); + +TRACE_EVENT_FN(rogue_ufo_check_fail, + + TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id, + u32 fwaddr, u32 value, u32 required), + + TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value, required), + + TP_STRUCT__entry( + __field( u64, timestamp ) + __field( u32, ctx_id ) + __field( u32, int_id ) + __field( u32, ext_id ) + __field( u32, fwaddr ) + __field( u32, value ) + __field( u32, required ) + ), + + TP_fast_assign( + __entry->timestamp = timestamp; + __entry->ctx_id = ctx_id; + __entry->int_id = int_id; + __entry->ext_id = ext_id; + __entry->fwaddr = fwaddr; + __entry->value = value; + __entry->required = required; + ), + + TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu" + " fwaddr=%#lx value=%#lx required=%#lx", + (unsigned long long)show_secs_from_ns(__entry->timestamp), + (unsigned long)show_usecs_from_ns(__entry->timestamp), + (unsigned long)__entry->ctx_id, + (unsigned long)__entry->int_id, + (unsigned long)__entry->ext_id, + (unsigned long)__entry->fwaddr, + (unsigned long)__entry->value, + (unsigned long)__entry->required), + PVRGpuTraceEnableUfoCallbackWrapper, + PVRGpuTraceDisableUfoCallback +); + +TRACE_EVENT_FN(rogue_ufo_pr_check_fail, + + TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id, + u32 fwaddr, u32 value, u32 required), + + TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value, required), + + TP_STRUCT__entry( + __field( u64, timestamp ) + __field( u32, ctx_id ) + __field( u32, int_id ) + __field( u32, ext_id ) + __field( u32, fwaddr ) + __field( u32, value ) + __field( u32, required ) + ), + + TP_fast_assign( + __entry->timestamp = timestamp; + __entry->ctx_id = ctx_id; + __entry->int_id = int_id; + __entry->ext_id = ext_id; + __entry->fwaddr = fwaddr; + __entry->value = value; + __entry->required = required; + ), + + TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu" + " fwaddr=%#lx value=%#lx required=%#lx", + (unsigned long long)show_secs_from_ns(__entry->timestamp), + (unsigned long)show_usecs_from_ns(__entry->timestamp), + (unsigned long)__entry->ctx_id, + (unsigned long)__entry->int_id, + (unsigned long)__entry->ext_id, + (unsigned long)__entry->fwaddr, + (unsigned long)__entry->value, + (unsigned long)__entry->required), + PVRGpuTraceEnableUfoCallbackWrapper, + PVRGpuTraceDisableUfoCallback +); + +TRACE_EVENT_FN(rogue_ufo_check_success, + + TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id, + u32 fwaddr, u32 value), + + TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value), + + TP_STRUCT__entry( + __field( u64, timestamp ) + __field( u32, ctx_id ) + __field( u32, int_id ) + __field( u32, ext_id ) + __field( u32, fwaddr ) + __field( u32, value ) + ), + + TP_fast_assign( + __entry->timestamp = timestamp; + __entry->ctx_id = ctx_id; + __entry->int_id = int_id; + __entry->ext_id = ext_id; + __entry->fwaddr = fwaddr; + __entry->value = value; + ), + + TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu" + " fwaddr=%#lx value=%#lx", + (unsigned long long)show_secs_from_ns(__entry->timestamp), + (unsigned long)show_usecs_from_ns(__entry->timestamp), + (unsigned long)__entry->ctx_id, + (unsigned long)__entry->int_id, + (unsigned long)__entry->ext_id, + (unsigned long)__entry->fwaddr, + (unsigned long)__entry->value), + PVRGpuTraceEnableUfoCallbackWrapper, + PVRGpuTraceDisableUfoCallback +); + +TRACE_EVENT_FN(rogue_ufo_pr_check_success, + + TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id, + u32 fwaddr, u32 value), + + TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value), + + TP_STRUCT__entry( + __field( u64, timestamp ) + __field( u32, ctx_id ) + __field( u32, int_id ) + __field( u32, ext_id ) + __field( u32, fwaddr ) + __field( u32, value ) + ), + + TP_fast_assign( + __entry->timestamp = timestamp; + __entry->ctx_id = ctx_id; + __entry->int_id = int_id; + __entry->ext_id = ext_id; + __entry->fwaddr = fwaddr; + __entry->value = value; + ), + + TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu" + " fwaddr=%#lx value=%#lx", + (unsigned long long)show_secs_from_ns(__entry->timestamp), + (unsigned long)show_usecs_from_ns(__entry->timestamp), + (unsigned long)__entry->ctx_id, + (unsigned long)__entry->int_id, + (unsigned long)__entry->ext_id, + (unsigned long)__entry->fwaddr, + (unsigned long)__entry->value), + PVRGpuTraceEnableUfoCallbackWrapper, + PVRGpuTraceDisableUfoCallback +); + +TRACE_EVENT(rogue_events_lost, + + TP_PROTO(u32 event_source, u32 last_ordinal, u32 curr_ordinal), + + TP_ARGS(event_source, last_ordinal, curr_ordinal), + + TP_STRUCT__entry( + __field( u32, event_source ) + __field( u32, last_ordinal ) + __field( u32, curr_ordinal ) + ), + + TP_fast_assign( + __entry->event_source = event_source; + __entry->last_ordinal = last_ordinal; + __entry->curr_ordinal = curr_ordinal; + ), + + TP_printk("event_source=%s last_ordinal=%u curr_ordinal=%u", + __print_symbolic(__entry->event_source, {0, "GPU"}, {1, "Host"}), + __entry->last_ordinal, + __entry->curr_ordinal) +); + +void PVRGpuTraceEnableFirmwareActivityCallback(void); +void PVRGpuTraceDisableFirmwareActivityCallback(void); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) +int PVRGpuTraceEnableFirmwareActivityCallbackWrapper(void); +#else +#define PVRGpuTraceEnableFirmwareActivityCallbackWrapper \ + PVRGpuTraceEnableFirmwareActivityCallback +#endif + +TRACE_EVENT_FN(rogue_firmware_activity, + + TP_PROTO(u64 timestamp, const char *task, u32 fw_event), + + TP_ARGS(timestamp, task, fw_event), + + TP_STRUCT__entry( + __field( u64, timestamp ) + __string( task, task ) + __field( u32, fw_event ) + ), + + TP_fast_assign( + __entry->timestamp = timestamp; + __assign_str(task, task); + __entry->fw_event = fw_event; + ), + + TP_printk("ts=%llu.%06lu task=%s event=%s", + (unsigned long long)show_secs_from_ns(__entry->timestamp), + (unsigned long)show_usecs_from_ns(__entry->timestamp), + __get_str(task), + __print_symbolic(__entry->fw_event, + /* These values are from ospvr_gputrace.h. */ + { 1, "begin" }, + { 2, "end" })), + + PVRGpuTraceEnableFirmwareActivityCallbackWrapper, + PVRGpuTraceDisableFirmwareActivityCallback +); + +#undef show_secs_from_ns +#undef show_usecs_from_ns + +#endif /* ROGUE_TRACE_EVENTS_H */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . + +/* This is needed because the name of this file doesn't match TRACE_SYSTEM. */ +#define TRACE_INCLUDE_FILE rogue_trace_events + +/* This part must be outside protection */ +#include diff --git a/drivers/gpu/drm/phytium/octopus/server_cache_bridge.c b/drivers/gpu/drm/phytium/octopus/server_cache_bridge.c new file mode 100644 index 000000000000..14f9da5c11dc --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_cache_bridge.c @@ -0,0 +1,448 @@ +/******************************************************************************* +@File +@Title Server bridge for cache +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for cache +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "cache_km.h" + +#include "common_cache_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psCacheOpQueueIN_UI8, + IMG_UINT8 * psCacheOpQueueOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_CACHEOPQUEUE *psCacheOpQueueIN = + (PVRSRV_BRIDGE_IN_CACHEOPQUEUE *) IMG_OFFSET_ADDR(psCacheOpQueueIN_UI8, 0); + PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *psCacheOpQueueOUT = + (PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *) IMG_OFFSET_ADDR(psCacheOpQueueOUT_UI8, 0); + + PMR **psPMRInt = NULL; + IMG_HANDLE *hPMRInt2 = NULL; + IMG_UINT64 *ui64AddressInt = NULL; + IMG_DEVMEM_OFFSET_T *uiOffsetInt = NULL; + IMG_DEVMEM_SIZE_T *uiSizeInt = NULL; + PVRSRV_CACHE_OP *iuCacheOpInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *)) + + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) + + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) + + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) + + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) + + (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) + 0; + + if (unlikely(psCacheOpQueueIN->ui32NumCacheOps > CACHE_BATCH_MAX)) + { + psCacheOpQueueOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto CacheOpQueue_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psCacheOpQueueIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psCacheOpQueueIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psCacheOpQueueOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto CacheOpQueue_exit; + } + } + } + + if (psCacheOpQueueIN->ui32NumCacheOps != 0) + { + psPMRInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *); + hPMRInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hPMRInt2, (const void __user *)psCacheOpQueueIN->phPMR, + psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto CacheOpQueue_exit; + } + } + if (psCacheOpQueueIN->ui32NumCacheOps != 0) + { + ui64AddressInt = (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64); + } + + /* Copy the data over */ + if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64) > 0) + { + if (OSCopyFromUser + (NULL, ui64AddressInt, (const void __user *)psCacheOpQueueIN->pui64Address, + psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) != PVRSRV_OK) + { + psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto CacheOpQueue_exit; + } + } + if (psCacheOpQueueIN->ui32NumCacheOps != 0) + { + uiOffsetInt = + (IMG_DEVMEM_OFFSET_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T); + } + + /* Copy the data over */ + if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T) > 0) + { + if (OSCopyFromUser + (NULL, uiOffsetInt, (const void __user *)psCacheOpQueueIN->puiOffset, + psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) != PVRSRV_OK) + { + psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto CacheOpQueue_exit; + } + } + if (psCacheOpQueueIN->ui32NumCacheOps != 0) + { + uiSizeInt = (IMG_DEVMEM_SIZE_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T); + } + + /* Copy the data over */ + if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T) > 0) + { + if (OSCopyFromUser + (NULL, uiSizeInt, (const void __user *)psCacheOpQueueIN->puiSize, + psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) != PVRSRV_OK) + { + psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto CacheOpQueue_exit; + } + } + if (psCacheOpQueueIN->ui32NumCacheOps != 0) + { + iuCacheOpInt = + (PVRSRV_CACHE_OP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP); + } + + /* Copy the data over */ + if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP) > 0) + { + if (OSCopyFromUser + (NULL, iuCacheOpInt, (const void __user *)psCacheOpQueueIN->piuCacheOp, + psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) != PVRSRV_OK) + { + psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto CacheOpQueue_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + { + IMG_UINT32 i; + + for (i = 0; i < psCacheOpQueueIN->ui32NumCacheOps; i++) + { + /* Look up the address from the handle */ + psCacheOpQueueOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt[i], + hPMRInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psCacheOpQueueOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto CacheOpQueue_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psCacheOpQueueOUT->eError = + CacheOpQueue(psConnection, OSGetDevNode(psConnection), + psCacheOpQueueIN->ui32NumCacheOps, + psPMRInt, + ui64AddressInt, + uiOffsetInt, + uiSizeInt, + iuCacheOpInt, + psCacheOpQueueIN->ui32OpTimeline, + psCacheOpQueueIN->ui32CurrentFenceSeqNum, + &psCacheOpQueueOUT->ui32NextFenceSeqNum); + +CacheOpQueue_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + if (hPMRInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psCacheOpQueueIN->ui32NumCacheOps; i++) + { + + /* Unreference the previously looked up handle */ + if (hPMRInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMRInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeCacheOpExec(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psCacheOpExecIN_UI8, + IMG_UINT8 * psCacheOpExecOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_CACHEOPEXEC *psCacheOpExecIN = + (PVRSRV_BRIDGE_IN_CACHEOPEXEC *) IMG_OFFSET_ADDR(psCacheOpExecIN_UI8, 0); + PVRSRV_BRIDGE_OUT_CACHEOPEXEC *psCacheOpExecOUT = + (PVRSRV_BRIDGE_OUT_CACHEOPEXEC *) IMG_OFFSET_ADDR(psCacheOpExecOUT_UI8, 0); + + IMG_HANDLE hPMR = psCacheOpExecIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psCacheOpExecOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psCacheOpExecOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto CacheOpExec_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psCacheOpExecOUT->eError = + CacheOpValExec(psPMRInt, + psCacheOpExecIN->ui64Address, + psCacheOpExecIN->uiOffset, + psCacheOpExecIN->uiSize, psCacheOpExecIN->iuCacheOp); + +CacheOpExec_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeCacheOpLog(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psCacheOpLogIN_UI8, + IMG_UINT8 * psCacheOpLogOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_CACHEOPLOG *psCacheOpLogIN = + (PVRSRV_BRIDGE_IN_CACHEOPLOG *) IMG_OFFSET_ADDR(psCacheOpLogIN_UI8, 0); + PVRSRV_BRIDGE_OUT_CACHEOPLOG *psCacheOpLogOUT = + (PVRSRV_BRIDGE_OUT_CACHEOPLOG *) IMG_OFFSET_ADDR(psCacheOpLogOUT_UI8, 0); + + IMG_HANDLE hPMR = psCacheOpLogIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psCacheOpLogOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psCacheOpLogOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto CacheOpLog_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psCacheOpLogOUT->eError = + CacheOpLog(psPMRInt, + psCacheOpLogIN->ui64Address, + psCacheOpLogIN->uiOffset, + psCacheOpLogIN->uiSize, + psCacheOpLogIN->i64QueuedTimeUs, + psCacheOpLogIN->i64ExecuteTimeUs, + psCacheOpLogIN->i32NumRBF, psCacheOpLogIN->iuCacheOp); + +CacheOpLog_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitCACHEBridge(void); +PVRSRV_ERROR DeinitCACHEBridge(void); + +/* + * Register all CACHE functions with services + */ +PVRSRV_ERROR InitCACHEBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE, + PVRSRVBridgeCacheOpQueue, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPEXEC, + PVRSRVBridgeCacheOpExec, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPLOG, + PVRSRVBridgeCacheOpLog, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all cache functions with services + */ +PVRSRV_ERROR DeinitCACHEBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPEXEC); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPLOG); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_cmm_bridge.c b/drivers/gpu/drm/phytium/octopus/server_cmm_bridge.c new file mode 100644 index 000000000000..91f6e9708eaf --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_cmm_bridge.c @@ -0,0 +1,410 @@ +/******************************************************************************* +@File +@Title Server bridge for cmm +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for cmm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "pmr.h" +#include "devicemem_server.h" + +#include "common_cmm_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#if !defined(EXCLUDE_CMM_BRIDGE) + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _DevmemIntExportCtxpsContextExportIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntUnexportCtx((DEVMEMINT_CTX_EXPORT *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntExportCtx(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntExportCtxIN_UI8, + IMG_UINT8 * psDevmemIntExportCtxOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *) IMG_OFFSET_ADDR(psDevmemIntExportCtxIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *) IMG_OFFSET_ADDR(psDevmemIntExportCtxOUT_UI8, + 0); + + IMG_HANDLE hContext = psDevmemIntExportCtxIN->hContext; + DEVMEMINT_CTX *psContextInt = NULL; + IMG_HANDLE hPMR = psDevmemIntExportCtxIN->hPMR; + PMR *psPMRInt = NULL; + DEVMEMINT_CTX_EXPORT *psContextExportInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntExportCtxOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psContextInt, + hContext, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); + if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntExportCtx_exit; + } + + /* Look up the address from the handle */ + psDevmemIntExportCtxOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntExportCtx_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntExportCtxOUT->eError = + DevmemIntExportCtx(psContextInt, psPMRInt, &psContextExportInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) + { + goto DevmemIntExportCtx_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntExportCtxOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntExportCtxOUT-> + hContextExport, + (void *)psContextExportInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + (PFN_HANDLE_RELEASE) & + _DevmemIntExportCtxpsContextExportIntRelease); + if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntExportCtx_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntExportCtx_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hContext, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psDevmemIntExportCtxOUT->eError != PVRSRV_OK) + { + if (psContextExportInt) + { + LockHandle(KERNEL_HANDLE_BASE); + DevmemIntUnexportCtx(psContextExportInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnexportCtx(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnexportCtxIN_UI8, + IMG_UINT8 * psDevmemIntUnexportCtxOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *) IMG_OFFSET_ADDR(psDevmemIntUnexportCtxIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *) + IMG_OFFSET_ADDR(psDevmemIntUnexportCtxOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntUnexportCtxOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psDevmemIntUnexportCtxIN->hContextExport, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT); + if (unlikely((psDevmemIntUnexportCtxOUT->eError != PVRSRV_OK) && + (psDevmemIntUnexportCtxOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psDevmemIntUnexportCtxOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnexportCtx_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntUnexportCtx_exit: + + return 0; +} + +static PVRSRV_ERROR _DevmemIntAcquireRemoteCtxpsContextIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntAcquireRemoteCtx(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntAcquireRemoteCtxIN_UI8, + IMG_UINT8 * psDevmemIntAcquireRemoteCtxOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX *psDevmemIntAcquireRemoteCtxIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX *) + IMG_OFFSET_ADDR(psDevmemIntAcquireRemoteCtxIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX *psDevmemIntAcquireRemoteCtxOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX *) + IMG_OFFSET_ADDR(psDevmemIntAcquireRemoteCtxOUT_UI8, 0); + + IMG_HANDLE hPMR = psDevmemIntAcquireRemoteCtxIN->hPMR; + PMR *psPMRInt = NULL; + DEVMEMINT_CTX *psContextInt = NULL; + IMG_HANDLE hPrivDataInt = NULL; + + psDevmemIntAcquireRemoteCtxOUT->hContext = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntAcquireRemoteCtxOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntAcquireRemoteCtx_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntAcquireRemoteCtxOUT->eError = + DevmemIntAcquireRemoteCtx(psPMRInt, &psContextInt, &hPrivDataInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) + { + goto DevmemIntAcquireRemoteCtx_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntAcquireRemoteCtxOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntAcquireRemoteCtxOUT->hContext, + (void *)psContextInt, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + (PFN_HANDLE_RELEASE) & + _DevmemIntAcquireRemoteCtxpsContextIntRelease); + if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntAcquireRemoteCtx_exit; + } + + psDevmemIntAcquireRemoteCtxOUT->eError = + PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntAcquireRemoteCtxOUT->hPrivData, + (void *)hPrivDataInt, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psDevmemIntAcquireRemoteCtxOUT->hContext); + if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntAcquireRemoteCtx_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntAcquireRemoteCtx_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK) + { + if (psDevmemIntAcquireRemoteCtxOUT->hContext) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); + + eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) + psDevmemIntAcquireRemoteCtxOUT-> + hContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); + + /* Avoid freeing/destroying/releasing the resource a second time below */ + psContextInt = NULL; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); + + } + + if (psContextInt) + { + DevmemIntCtxDestroy(psContextInt); + } + } + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +#endif /* EXCLUDE_CMM_BRIDGE */ + +#if !defined(EXCLUDE_CMM_BRIDGE) +PVRSRV_ERROR InitCMMBridge(void); +PVRSRV_ERROR DeinitCMMBridge(void); + +/* + * Register all CMM functions with services + */ +PVRSRV_ERROR InitCMMBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX, + PVRSRVBridgeDevmemIntExportCtx, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX, + PVRSRVBridgeDevmemIntUnexportCtx, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX, + PVRSRVBridgeDevmemIntAcquireRemoteCtx, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all cmm functions with services + */ +PVRSRV_ERROR DeinitCMMBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX); + + return PVRSRV_OK; +} +#else /* EXCLUDE_CMM_BRIDGE */ +/* This bridge is conditional on EXCLUDE_CMM_BRIDGE - when defined, + * do not populate the dispatch table with its functions + */ +#define InitCMMBridge() \ + PVRSRV_OK + +#define DeinitCMMBridge() \ + PVRSRV_OK + +#endif /* EXCLUDE_CMM_BRIDGE */ diff --git a/drivers/gpu/drm/phytium/octopus/server_devicememhistory_bridge.c b/drivers/gpu/drm/phytium/octopus/server_devicememhistory_bridge.c new file mode 100644 index 000000000000..891b4906378d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_devicememhistory_bridge.c @@ -0,0 +1,772 @@ +/******************************************************************************* +@File +@Title Server bridge for devicememhistory +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for devicememhistory +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "devicemem_history_server.h" + +#include "common_devicememhistory_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#include "lock.h" + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevicememHistoryMapIN_UI8, + IMG_UINT8 * psDevicememHistoryMapOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *psDevicememHistoryMapIN = + (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *) IMG_OFFSET_ADDR(psDevicememHistoryMapIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *psDevicememHistoryMapOUT = + (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *) IMG_OFFSET_ADDR(psDevicememHistoryMapOUT_UI8, + 0); + + IMG_HANDLE hPMR = psDevicememHistoryMapIN->hPMR; + PMR *psPMRInt = NULL; + IMG_CHAR *uiTextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevicememHistoryMapIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDevicememHistoryMapIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevicememHistoryMap_exit; + } + } + } + + { + uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextInt, (const void __user *)psDevicememHistoryMapIN->puiText, + DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistoryMap_exit; + } + ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevicememHistoryMapOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psDevicememHistoryMapOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevicememHistoryMap_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevicememHistoryMapOUT->eError = + DevicememHistoryMapKM(psPMRInt, + psDevicememHistoryMapIN->uiOffset, + psDevicememHistoryMapIN->sDevVAddr, + psDevicememHistoryMapIN->uiSize, + uiTextInt, + psDevicememHistoryMapIN->ui32Log2PageSize, + psDevicememHistoryMapIN->ui32AllocationIndex, + &psDevicememHistoryMapOUT->ui32AllocationIndexOut); + +DevicememHistoryMap_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevicememHistoryUnmapIN_UI8, + IMG_UINT8 * psDevicememHistoryUnmapOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapIN = + (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *) + IMG_OFFSET_ADDR(psDevicememHistoryUnmapIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapOUT = + (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *) + IMG_OFFSET_ADDR(psDevicememHistoryUnmapOUT_UI8, 0); + + IMG_HANDLE hPMR = psDevicememHistoryUnmapIN->hPMR; + PMR *psPMRInt = NULL; + IMG_CHAR *uiTextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevicememHistoryUnmapIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDevicememHistoryUnmapIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevicememHistoryUnmap_exit; + } + } + } + + { + uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextInt, (const void __user *)psDevicememHistoryUnmapIN->puiText, + DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistoryUnmap_exit; + } + ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevicememHistoryUnmapOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psDevicememHistoryUnmapOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevicememHistoryUnmap_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevicememHistoryUnmapOUT->eError = + DevicememHistoryUnmapKM(psPMRInt, + psDevicememHistoryUnmapIN->uiOffset, + psDevicememHistoryUnmapIN->sDevVAddr, + psDevicememHistoryUnmapIN->uiSize, + uiTextInt, + psDevicememHistoryUnmapIN->ui32Log2PageSize, + psDevicememHistoryUnmapIN->ui32AllocationIndex, + &psDevicememHistoryUnmapOUT->ui32AllocationIndexOut); + +DevicememHistoryUnmap_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevicememHistoryMapVRangeIN_UI8, + IMG_UINT8 * psDevicememHistoryMapVRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE *psDevicememHistoryMapVRangeIN = + (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE *) + IMG_OFFSET_ADDR(psDevicememHistoryMapVRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE *psDevicememHistoryMapVRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE *) + IMG_OFFSET_ADDR(psDevicememHistoryMapVRangeOUT_UI8, 0); + + IMG_CHAR *uiTextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevicememHistoryMapVRangeIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDevicememHistoryMapVRangeIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevicememHistoryMapVRange_exit; + } + } + } + + { + uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextInt, (const void __user *)psDevicememHistoryMapVRangeIN->puiText, + DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistoryMapVRange_exit; + } + ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; + } + + psDevicememHistoryMapVRangeOUT->eError = + DevicememHistoryMapVRangeKM(psDevicememHistoryMapVRangeIN->sBaseDevVAddr, + psDevicememHistoryMapVRangeIN->ui32ui32StartPage, + psDevicememHistoryMapVRangeIN->ui32NumPages, + psDevicememHistoryMapVRangeIN->uiAllocSize, + uiTextInt, + psDevicememHistoryMapVRangeIN->ui32Log2PageSize, + psDevicememHistoryMapVRangeIN->ui32AllocationIndex, + &psDevicememHistoryMapVRangeOUT->ui32AllocationIndexOut); + +DevicememHistoryMapVRange_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevicememHistoryUnmapVRangeIN_UI8, + IMG_UINT8 * psDevicememHistoryUnmapVRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE *psDevicememHistoryUnmapVRangeIN = + (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE *) + IMG_OFFSET_ADDR(psDevicememHistoryUnmapVRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE *psDevicememHistoryUnmapVRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE *) + IMG_OFFSET_ADDR(psDevicememHistoryUnmapVRangeOUT_UI8, 0); + + IMG_CHAR *uiTextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevicememHistoryUnmapVRangeIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psDevicememHistoryUnmapVRangeIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevicememHistoryUnmapVRangeOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevicememHistoryUnmapVRange_exit; + } + } + } + + { + uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextInt, (const void __user *)psDevicememHistoryUnmapVRangeIN->puiText, + DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDevicememHistoryUnmapVRangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistoryUnmapVRange_exit; + } + ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; + } + + psDevicememHistoryUnmapVRangeOUT->eError = + DevicememHistoryUnmapVRangeKM(psDevicememHistoryUnmapVRangeIN->sBaseDevVAddr, + psDevicememHistoryUnmapVRangeIN->ui32ui32StartPage, + psDevicememHistoryUnmapVRangeIN->ui32NumPages, + psDevicememHistoryUnmapVRangeIN->uiAllocSize, + uiTextInt, + psDevicememHistoryUnmapVRangeIN->ui32Log2PageSize, + psDevicememHistoryUnmapVRangeIN->ui32AllocationIndex, + &psDevicememHistoryUnmapVRangeOUT-> + ui32AllocationIndexOut); + +DevicememHistoryUnmapVRange_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevicememHistorySparseChangeIN_UI8, + IMG_UINT8 * psDevicememHistorySparseChangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE *psDevicememHistorySparseChangeIN = + (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE *) + IMG_OFFSET_ADDR(psDevicememHistorySparseChangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE *psDevicememHistorySparseChangeOUT = + (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE *) + IMG_OFFSET_ADDR(psDevicememHistorySparseChangeOUT_UI8, 0); + + IMG_HANDLE hPMR = psDevicememHistorySparseChangeIN->hPMR; + PMR *psPMRInt = NULL; + IMG_CHAR *uiTextInt = NULL; + IMG_UINT32 *ui32AllocPageIndicesInt = NULL; + IMG_UINT32 *ui32FreePageIndicesInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + + (psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32)) + + (psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32)) + 0; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDevicememHistorySparseChangeIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psDevicememHistorySparseChangeIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDevicememHistorySparseChangeOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto DevicememHistorySparseChange_exit; + } + } + } + + { + uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextInt, + (const void __user *)psDevicememHistorySparseChangeIN->puiText, + DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistorySparseChange_exit; + } + ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; + } + if (psDevicememHistorySparseChangeIN->ui32AllocPageCount != 0) + { + ui32AllocPageIndicesInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32AllocPageIndicesInt, + (const void __user *)psDevicememHistorySparseChangeIN->pui32AllocPageIndices, + psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32)) != + PVRSRV_OK) + { + psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistorySparseChange_exit; + } + } + if (psDevicememHistorySparseChangeIN->ui32FreePageCount != 0) + { + ui32FreePageIndicesInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32FreePageIndicesInt, + (const void __user *)psDevicememHistorySparseChangeIN->pui32FreePageIndices, + psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32)) != + PVRSRV_OK) + { + psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto DevicememHistorySparseChange_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevicememHistorySparseChangeOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psDevicememHistorySparseChangeOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevicememHistorySparseChange_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevicememHistorySparseChangeOUT->eError = + DevicememHistorySparseChangeKM(psPMRInt, + psDevicememHistorySparseChangeIN->uiOffset, + psDevicememHistorySparseChangeIN->sDevVAddr, + psDevicememHistorySparseChangeIN->uiSize, + uiTextInt, + psDevicememHistorySparseChangeIN->ui32Log2PageSize, + psDevicememHistorySparseChangeIN->ui32AllocPageCount, + ui32AllocPageIndicesInt, + psDevicememHistorySparseChangeIN->ui32FreePageCount, + ui32FreePageIndicesInt, + psDevicememHistorySparseChangeIN->ui32AllocationIndex, + &psDevicememHistorySparseChangeOUT-> + ui32AllocationIndexOut); + +DevicememHistorySparseChange_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +static POS_LOCK pDEVICEMEMHISTORYBridgeLock; + +PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void); +PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void); + +/* + * Register all DEVICEMEMHISTORY functions with services + */ +PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void) +{ + PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pDEVICEMEMHISTORYBridgeLock), "OSLockCreate"); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP, + PVRSRVBridgeDevicememHistoryMap, pDEVICEMEMHISTORYBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP, + PVRSRVBridgeDevicememHistoryUnmap, pDEVICEMEMHISTORYBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE, + PVRSRVBridgeDevicememHistoryMapVRange, pDEVICEMEMHISTORYBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE, + PVRSRVBridgeDevicememHistoryUnmapVRange, pDEVICEMEMHISTORYBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE, + PVRSRVBridgeDevicememHistorySparseChange, + pDEVICEMEMHISTORYBridgeLock); + + return PVRSRV_OK; +} + +/* + * Unregister all devicememhistory functions with services + */ +PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void) +{ + PVR_LOG_RETURN_IF_ERROR(OSLockDestroy(pDEVICEMEMHISTORYBridgeLock), "OSLockDestroy"); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, + PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_di_bridge.c b/drivers/gpu/drm/phytium/octopus/server_di_bridge.c new file mode 100644 index 000000000000..77bb456789f4 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_di_bridge.c @@ -0,0 +1,594 @@ +/******************************************************************************* +@File +@Title Server bridge for di +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for di +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "di_impl_brg.h" + +#include "common_di_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _DICreateContextpsContextIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DIDestroyContextKM((DI_CONTEXT *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDICreateContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDICreateContextIN_UI8, + IMG_UINT8 * psDICreateContextOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DICREATECONTEXT *psDICreateContextIN = + (PVRSRV_BRIDGE_IN_DICREATECONTEXT *) IMG_OFFSET_ADDR(psDICreateContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DICREATECONTEXT *psDICreateContextOUT = + (PVRSRV_BRIDGE_OUT_DICREATECONTEXT *) IMG_OFFSET_ADDR(psDICreateContextOUT_UI8, 0); + + IMG_CHAR *puiStreamNameInt = NULL; + DI_CONTEXT *psContextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + 0; + + PVR_UNREFERENCED_PARAMETER(psDICreateContextIN); + + psDICreateContextOUT->puiStreamName = psDICreateContextIN->puiStreamName; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDICreateContextIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDICreateContextIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDICreateContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto DICreateContext_exit; + } + } + } + + if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL) + { + puiStreamNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR); + } + + psDICreateContextOUT->eError = DICreateContextKM(puiStreamNameInt, &psContextInt); + /* Exit early if bridged call fails */ + if (unlikely(psDICreateContextOUT->eError != PVRSRV_OK)) + { + goto DICreateContext_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDICreateContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDICreateContextOUT->hContext, + (void *)psContextInt, + PVRSRV_HANDLE_TYPE_DI_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + (PFN_HANDLE_RELEASE) & + _DICreateContextpsContextIntRelease); + if (unlikely(psDICreateContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DICreateContext_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* If dest ptr is non-null and we have data to copy */ + if ((puiStreamNameInt) && ((PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psDICreateContextOUT->puiStreamName, puiStreamNameInt, + (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR))) != PVRSRV_OK)) + { + psDICreateContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto DICreateContext_exit; + } + } + +DICreateContext_exit: + + if (psDICreateContextOUT->eError != PVRSRV_OK) + { + if (psContextInt) + { + DIDestroyContextKM(psContextInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDIDestroyContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDIDestroyContextIN_UI8, + IMG_UINT8 * psDIDestroyContextOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT *psDIDestroyContextIN = + (PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT *) IMG_OFFSET_ADDR(psDIDestroyContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT *psDIDestroyContextOUT = + (PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT *) IMG_OFFSET_ADDR(psDIDestroyContextOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDIDestroyContextOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psDIDestroyContextIN->hContext, + PVRSRV_HANDLE_TYPE_DI_CONTEXT); + if (unlikely((psDIDestroyContextOUT->eError != PVRSRV_OK) && + (psDIDestroyContextOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(psDIDestroyContextOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto DIDestroyContext_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DIDestroyContext_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeDIReadEntry(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDIReadEntryIN_UI8, + IMG_UINT8 * psDIReadEntryOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DIREADENTRY *psDIReadEntryIN = + (PVRSRV_BRIDGE_IN_DIREADENTRY *) IMG_OFFSET_ADDR(psDIReadEntryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DIREADENTRY *psDIReadEntryOUT = + (PVRSRV_BRIDGE_OUT_DIREADENTRY *) IMG_OFFSET_ADDR(psDIReadEntryOUT_UI8, 0); + + IMG_HANDLE hContext = psDIReadEntryIN->hContext; + DI_CONTEXT *psContextInt = NULL; + IMG_CHAR *uiEntryPathInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = (DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) + 0; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDIReadEntryIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDIReadEntryIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDIReadEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto DIReadEntry_exit; + } + } + } + + { + uiEntryPathInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiEntryPathInt, (const void __user *)psDIReadEntryIN->puiEntryPath, + DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDIReadEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto DIReadEntry_exit; + } + ((IMG_CHAR *) uiEntryPathInt)[(DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDIReadEntryOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psContextInt, + hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT, IMG_TRUE); + if (unlikely(psDIReadEntryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DIReadEntry_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDIReadEntryOUT->eError = + DIReadEntryKM(psContextInt, + uiEntryPathInt, psDIReadEntryIN->ui64Offset, psDIReadEntryIN->ui64Size); + +DIReadEntry_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDIWriteEntry(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDIWriteEntryIN_UI8, + IMG_UINT8 * psDIWriteEntryOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DIWRITEENTRY *psDIWriteEntryIN = + (PVRSRV_BRIDGE_IN_DIWRITEENTRY *) IMG_OFFSET_ADDR(psDIWriteEntryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DIWRITEENTRY *psDIWriteEntryOUT = + (PVRSRV_BRIDGE_OUT_DIWRITEENTRY *) IMG_OFFSET_ADDR(psDIWriteEntryOUT_UI8, 0); + + IMG_HANDLE hContext = psDIWriteEntryIN->hContext; + DI_CONTEXT *psContextInt = NULL; + IMG_CHAR *uiEntryPathInt = NULL; + IMG_CHAR *uiValueInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) + + (psDIWriteEntryIN->ui64ValueSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely(psDIWriteEntryIN->ui64ValueSize > DI_IMPL_BRG_PATH_LEN)) + { + psDIWriteEntryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto DIWriteEntry_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDIWriteEntryIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDIWriteEntryIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDIWriteEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto DIWriteEntry_exit; + } + } + } + + { + uiEntryPathInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiEntryPathInt, (const void __user *)psDIWriteEntryIN->puiEntryPath, + DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDIWriteEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto DIWriteEntry_exit; + } + ((IMG_CHAR *) uiEntryPathInt)[(DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; + } + if (psDIWriteEntryIN->ui64ValueSize != 0) + { + uiValueInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psDIWriteEntryIN->ui64ValueSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psDIWriteEntryIN->ui64ValueSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiValueInt, (const void __user *)psDIWriteEntryIN->puiValue, + psDIWriteEntryIN->ui64ValueSize * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psDIWriteEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto DIWriteEntry_exit; + } + ((IMG_CHAR *) uiValueInt)[(psDIWriteEntryIN->ui64ValueSize * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDIWriteEntryOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psContextInt, + hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT, IMG_TRUE); + if (unlikely(psDIWriteEntryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DIWriteEntry_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDIWriteEntryOUT->eError = + DIWriteEntryKM(psContextInt, + uiEntryPathInt, psDIWriteEntryIN->ui64ValueSize, uiValueInt); + +DIWriteEntry_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDIListAllEntries(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDIListAllEntriesIN_UI8, + IMG_UINT8 * psDIListAllEntriesOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DILISTALLENTRIES *psDIListAllEntriesIN = + (PVRSRV_BRIDGE_IN_DILISTALLENTRIES *) IMG_OFFSET_ADDR(psDIListAllEntriesIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DILISTALLENTRIES *psDIListAllEntriesOUT = + (PVRSRV_BRIDGE_OUT_DILISTALLENTRIES *) IMG_OFFSET_ADDR(psDIListAllEntriesOUT_UI8, 0); + + IMG_HANDLE hContext = psDIListAllEntriesIN->hContext; + DI_CONTEXT *psContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDIListAllEntriesOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psContextInt, + hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT, IMG_TRUE); + if (unlikely(psDIListAllEntriesOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DIListAllEntries_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDIListAllEntriesOUT->eError = DIListAllEntriesKM(psContextInt); + +DIListAllEntries_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitDIBridge(void); +PVRSRV_ERROR DeinitDIBridge(void); + +/* + * Register all DI functions with services + */ +PVRSRV_ERROR InitDIBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DICREATECONTEXT, + PVRSRVBridgeDICreateContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIDESTROYCONTEXT, + PVRSRVBridgeDIDestroyContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIREADENTRY, + PVRSRVBridgeDIReadEntry, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIWRITEENTRY, + PVRSRVBridgeDIWriteEntry, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DILISTALLENTRIES, + PVRSRVBridgeDIListAllEntries, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all di functions with services + */ +PVRSRV_ERROR DeinitDIBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DICREATECONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIDESTROYCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIREADENTRY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIWRITEENTRY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DILISTALLENTRIES); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_dma_bridge.c b/drivers/gpu/drm/phytium/octopus/server_dma_bridge.c new file mode 100644 index 000000000000..d8874fe5b7d5 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_dma_bridge.c @@ -0,0 +1,469 @@ +/******************************************************************************* +@File +@Title Server bridge for dma +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for dma +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "dma_km.h" + +#include "common_dma_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeDmaTransfer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDmaTransferIN_UI8, + IMG_UINT8 * psDmaTransferOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DMATRANSFER *psDmaTransferIN = + (PVRSRV_BRIDGE_IN_DMATRANSFER *) IMG_OFFSET_ADDR(psDmaTransferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DMATRANSFER *psDmaTransferOUT = + (PVRSRV_BRIDGE_OUT_DMATRANSFER *) IMG_OFFSET_ADDR(psDmaTransferOUT_UI8, 0); + + PMR **psPMRInt = NULL; + IMG_HANDLE *hPMRInt2 = NULL; + IMG_UINT64 *ui64AddressInt = NULL; + IMG_DEVMEM_OFFSET_T *uiOffsetInt = NULL; + IMG_DEVMEM_SIZE_T *uiSizeInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psDmaTransferIN->ui32NumDMAs * sizeof(PMR *)) + + (psDmaTransferIN->ui32NumDMAs * sizeof(IMG_HANDLE)) + + (psDmaTransferIN->ui32NumDMAs * sizeof(IMG_UINT64)) + + (psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_OFFSET_T)) + + (psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_SIZE_T)) + 0; + + if (unlikely(psDmaTransferIN->ui32NumDMAs > MAX_DMA_OPS)) + { + psDmaTransferOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto DmaTransfer_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDmaTransferIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDmaTransferIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDmaTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto DmaTransfer_exit; + } + } + } + + if (psDmaTransferIN->ui32NumDMAs != 0) + { + psPMRInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psDmaTransferIN->ui32NumDMAs * sizeof(PMR *); + hPMRInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psDmaTransferIN->ui32NumDMAs * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psDmaTransferIN->ui32NumDMAs * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hPMRInt2, (const void __user *)psDmaTransferIN->phPMR, + psDmaTransferIN->ui32NumDMAs * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psDmaTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto DmaTransfer_exit; + } + } + if (psDmaTransferIN->ui32NumDMAs != 0) + { + ui64AddressInt = (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psDmaTransferIN->ui32NumDMAs * sizeof(IMG_UINT64); + } + + /* Copy the data over */ + if (psDmaTransferIN->ui32NumDMAs * sizeof(IMG_UINT64) > 0) + { + if (OSCopyFromUser + (NULL, ui64AddressInt, (const void __user *)psDmaTransferIN->pui64Address, + psDmaTransferIN->ui32NumDMAs * sizeof(IMG_UINT64)) != PVRSRV_OK) + { + psDmaTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto DmaTransfer_exit; + } + } + if (psDmaTransferIN->ui32NumDMAs != 0) + { + uiOffsetInt = + (IMG_DEVMEM_OFFSET_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_OFFSET_T); + } + + /* Copy the data over */ + if (psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_OFFSET_T) > 0) + { + if (OSCopyFromUser + (NULL, uiOffsetInt, (const void __user *)psDmaTransferIN->puiOffset, + psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_OFFSET_T)) != PVRSRV_OK) + { + psDmaTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto DmaTransfer_exit; + } + } + if (psDmaTransferIN->ui32NumDMAs != 0) + { + uiSizeInt = (IMG_DEVMEM_SIZE_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_SIZE_T); + } + + /* Copy the data over */ + if (psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_SIZE_T) > 0) + { + if (OSCopyFromUser + (NULL, uiSizeInt, (const void __user *)psDmaTransferIN->puiSize, + psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_SIZE_T)) != PVRSRV_OK) + { + psDmaTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto DmaTransfer_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + { + IMG_UINT32 i; + + for (i = 0; i < psDmaTransferIN->ui32NumDMAs; i++) + { + /* Look up the address from the handle */ + psDmaTransferOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt[i], + hPMRInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psDmaTransferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DmaTransfer_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDmaTransferOUT->eError = + DmaTransfer(psConnection, OSGetDevNode(psConnection), + psDmaTransferIN->ui32NumDMAs, + psPMRInt, + ui64AddressInt, + uiOffsetInt, + uiSizeInt, psDmaTransferIN->ui32uiFlags, psDmaTransferIN->hUpdateTimeline); + +DmaTransfer_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + if (hPMRInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psDmaTransferIN->ui32NumDMAs; i++) + { + + /* Unreference the previously looked up handle */ + if (hPMRInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMRInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDmaSparseMappingTable(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDmaSparseMappingTableIN_UI8, + IMG_UINT8 * psDmaSparseMappingTableOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DMASPARSEMAPPINGTABLE *psDmaSparseMappingTableIN = + (PVRSRV_BRIDGE_IN_DMASPARSEMAPPINGTABLE *) + IMG_OFFSET_ADDR(psDmaSparseMappingTableIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DMASPARSEMAPPINGTABLE *psDmaSparseMappingTableOUT = + (PVRSRV_BRIDGE_OUT_DMASPARSEMAPPINGTABLE *) + IMG_OFFSET_ADDR(psDmaSparseMappingTableOUT_UI8, 0); + + IMG_HANDLE hPMR = psDmaSparseMappingTableIN->hPMR; + PMR *psPMRInt = NULL; + IMG_BOOL *pbTableInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psDmaSparseMappingTableIN->ui32SizeInPages * sizeof(IMG_BOOL)) + 0; + + if (psDmaSparseMappingTableIN->ui32SizeInPages > 32) + { + psDmaSparseMappingTableOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto DmaSparseMappingTable_exit; + } + + psDmaSparseMappingTableOUT->pbTable = psDmaSparseMappingTableIN->pbTable; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psDmaSparseMappingTableIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDmaSparseMappingTableIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psDmaSparseMappingTableOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto DmaSparseMappingTable_exit; + } + } + } + + if (psDmaSparseMappingTableIN->ui32SizeInPages != 0) + { + pbTableInt = (IMG_BOOL *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psDmaSparseMappingTableIN->ui32SizeInPages * sizeof(IMG_BOOL); + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDmaSparseMappingTableOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psDmaSparseMappingTableOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DmaSparseMappingTable_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDmaSparseMappingTableOUT->eError = + DmaSparseMappingTable(psPMRInt, + psDmaSparseMappingTableIN->uiOffset, + psDmaSparseMappingTableIN->ui32SizeInPages, pbTableInt); + + /* If dest ptr is non-null and we have data to copy */ + if ((pbTableInt) && ((psDmaSparseMappingTableIN->ui32SizeInPages * sizeof(IMG_BOOL)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psDmaSparseMappingTableOUT->pbTable, pbTableInt, + (psDmaSparseMappingTableIN->ui32SizeInPages * sizeof(IMG_BOOL))) != + PVRSRV_OK)) + { + psDmaSparseMappingTableOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto DmaSparseMappingTable_exit; + } + } + +DmaSparseMappingTable_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDmaDeviceParams(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDmaDeviceParamsIN_UI8, + IMG_UINT8 * psDmaDeviceParamsOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DMADEVICEPARAMS *psDmaDeviceParamsIN = + (PVRSRV_BRIDGE_IN_DMADEVICEPARAMS *) IMG_OFFSET_ADDR(psDmaDeviceParamsIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DMADEVICEPARAMS *psDmaDeviceParamsOUT = + (PVRSRV_BRIDGE_OUT_DMADEVICEPARAMS *) IMG_OFFSET_ADDR(psDmaDeviceParamsOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psDmaDeviceParamsIN); + + psDmaDeviceParamsOUT->eError = + DmaDeviceParams(psConnection, OSGetDevNode(psConnection), + &psDmaDeviceParamsOUT->ui32DmaBuffAlign, + &psDmaDeviceParamsOUT->ui32DmaTransferMult); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitDMABridge(void); +PVRSRV_ERROR DeinitDMABridge(void); + +/* + * Register all DMA functions with services + */ +PVRSRV_ERROR InitDMABridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMATRANSFER, + PVRSRVBridgeDmaTransfer, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMASPARSEMAPPINGTABLE, + PVRSRVBridgeDmaSparseMappingTable, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMADEVICEPARAMS, + PVRSRVBridgeDmaDeviceParams, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all dma functions with services + */ +PVRSRV_ERROR DeinitDMABridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMATRANSFER); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMASPARSEMAPPINGTABLE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMADEVICEPARAMS); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_dmabuf_bridge.c b/drivers/gpu/drm/phytium/octopus/server_dmabuf_bridge.c new file mode 100644 index 000000000000..fd6d7cce9454 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_dmabuf_bridge.c @@ -0,0 +1,492 @@ +/******************************************************************************* +@File +@Title Server bridge for dmabuf +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for dmabuf +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "physmem_dmabuf.h" +#include "pmr.h" + +#include "common_dmabuf_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _PhysmemImportDmaBufpsPMRPtrIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysmemImportDmaBufIN_UI8, + IMG_UINT8 * psPhysmemImportDmaBufOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufIN = + (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemImportDmaBufIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufOUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemImportDmaBufOUT_UI8, + 0); + + IMG_CHAR *uiNameInt = NULL; + PMR *psPMRPtrInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = (psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely(psPhysmemImportDmaBufIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN)) + { + psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemImportDmaBuf_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPhysmemImportDmaBufIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemImportDmaBufIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto PhysmemImportDmaBuf_exit; + } + } + } + + if (psPhysmemImportDmaBufIN->ui32NameSize != 0) + { + uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiNameInt, (const void __user *)psPhysmemImportDmaBufIN->puiName, + psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemImportDmaBuf_exit; + } + ((IMG_CHAR *) uiNameInt)[(psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) + - 1] = '\0'; + } + + psPhysmemImportDmaBufOUT->eError = + PhysmemImportDmaBuf(psConnection, OSGetDevNode(psConnection), + psPhysmemImportDmaBufIN->ifd, + psPhysmemImportDmaBufIN->uiFlags, + psPhysmemImportDmaBufIN->ui32NameSize, + uiNameInt, + &psPMRPtrInt, + &psPhysmemImportDmaBufOUT->uiSize, + &psPhysmemImportDmaBufOUT->uiAlign); + /* Exit early if bridged call fails */ + if (unlikely(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)) + { + goto PhysmemImportDmaBuf_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPhysmemImportDmaBufOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPhysmemImportDmaBufOUT-> + hPMRPtr, (void *)psPMRPtrInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PhysmemImportDmaBufpsPMRPtrIntRelease); + if (unlikely(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemImportDmaBuf_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PhysmemImportDmaBuf_exit: + + if (psPhysmemImportDmaBufOUT->eError != PVRSRV_OK) + { + if (psPMRPtrInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefPMR(psPMRPtrInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgePhysmemExportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysmemExportDmaBufIN_UI8, + IMG_UINT8 * psPhysmemExportDmaBufOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufIN = + (PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemExportDmaBufIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufOUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemExportDmaBufOUT_UI8, + 0); + + IMG_HANDLE hPMR = psPhysmemExportDmaBufIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPhysmemExportDmaBufOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psPhysmemExportDmaBufOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemExportDmaBuf_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPhysmemExportDmaBufOUT->eError = + PhysmemExportDmaBuf(psConnection, OSGetDevNode(psConnection), + psPMRInt, &psPhysmemExportDmaBufOUT->iFd); + +PhysmemExportDmaBuf_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static PVRSRV_ERROR _PhysmemImportSparseDmaBufpsPMRPtrIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysmemImportSparseDmaBufIN_UI8, + IMG_UINT8 * psPhysmemImportSparseDmaBufOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF *psPhysmemImportSparseDmaBufIN = + (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF *) + IMG_OFFSET_ADDR(psPhysmemImportSparseDmaBufIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF *psPhysmemImportSparseDmaBufOUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF *) + IMG_OFFSET_ADDR(psPhysmemImportSparseDmaBufOUT_UI8, 0); + + IMG_UINT32 *ui32MappingTableInt = NULL; + IMG_CHAR *uiNameInt = NULL; + PMR *psPMRPtrInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) + + (psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks > PMR_MAX_SUPPORTED_PAGE_COUNT)) + { + psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemImportSparseDmaBuf_exit; + } + + if (unlikely(psPhysmemImportSparseDmaBufIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN)) + { + psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemImportSparseDmaBuf_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPhysmemImportSparseDmaBufIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemImportSparseDmaBufIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto PhysmemImportSparseDmaBuf_exit; + } + } + } + + if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks != 0) + { + ui32MappingTableInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32MappingTableInt, + (const void __user *)psPhysmemImportSparseDmaBufIN->pui32MappingTable, + psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) != + PVRSRV_OK) + { + psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemImportSparseDmaBuf_exit; + } + } + if (psPhysmemImportSparseDmaBufIN->ui32NameSize != 0) + { + uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiNameInt, (const void __user *)psPhysmemImportSparseDmaBufIN->puiName, + psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemImportSparseDmaBuf_exit; + } + ((IMG_CHAR *) + uiNameInt)[(psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) - 1] = + '\0'; + } + + psPhysmemImportSparseDmaBufOUT->eError = + PhysmemImportSparseDmaBuf(psConnection, OSGetDevNode(psConnection), + psPhysmemImportSparseDmaBufIN->ifd, + psPhysmemImportSparseDmaBufIN->uiFlags, + psPhysmemImportSparseDmaBufIN->uiChunkSize, + psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks, + psPhysmemImportSparseDmaBufIN->ui32NumVirtChunks, + ui32MappingTableInt, + psPhysmemImportSparseDmaBufIN->ui32NameSize, + uiNameInt, + &psPMRPtrInt, + &psPhysmemImportSparseDmaBufOUT->uiSize, + &psPhysmemImportSparseDmaBufOUT->uiAlign); + /* Exit early if bridged call fails */ + if (unlikely(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)) + { + goto PhysmemImportSparseDmaBuf_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPhysmemImportSparseDmaBufOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPhysmemImportSparseDmaBufOUT->hPMRPtr, (void *)psPMRPtrInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PhysmemImportSparseDmaBufpsPMRPtrIntRelease); + if (unlikely(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemImportSparseDmaBuf_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PhysmemImportSparseDmaBuf_exit: + + if (psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK) + { + if (psPMRPtrInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefPMR(psPMRPtrInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitDMABUFBridge(void); +PVRSRV_ERROR DeinitDMABUFBridge(void); + +/* + * Register all DMABUF functions with services + */ +PVRSRV_ERROR InitDMABUFBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF, + PVRSRVBridgePhysmemImportDmaBuf, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF, + PVRSRVBridgePhysmemExportDmaBuf, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF, + PVRSRVBridgePhysmemImportSparseDmaBuf, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all dmabuf functions with services + */ +PVRSRV_ERROR DeinitDMABUFBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, + PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_htbuffer_bridge.c b/drivers/gpu/drm/phytium/octopus/server_htbuffer_bridge.c new file mode 100644 index 000000000000..70fec9dcdeb0 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_htbuffer_bridge.c @@ -0,0 +1,322 @@ +/******************************************************************************* +@File +@Title Server bridge for htbuffer +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for htbuffer +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "htbserver.h" + +#include "common_htbuffer_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#include "lock.h" + +#if !defined(EXCLUDE_HTBUFFER_BRIDGE) + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHTBControlIN_UI8, + IMG_UINT8 * psHTBControlOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HTBCONTROL *psHTBControlIN = + (PVRSRV_BRIDGE_IN_HTBCONTROL *) IMG_OFFSET_ADDR(psHTBControlIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HTBCONTROL *psHTBControlOUT = + (PVRSRV_BRIDGE_OUT_HTBCONTROL *) IMG_OFFSET_ADDR(psHTBControlOUT_UI8, 0); + + IMG_UINT32 *ui32GroupEnableInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = (psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) + 0; + + if (unlikely(psHTBControlIN->ui32NumGroups > HTB_FLAG_NUM_EL)) + { + psHTBControlOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto HTBControl_exit; + } + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psHTBControlIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHTBControlIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psHTBControlOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto HTBControl_exit; + } + } + } + + if (psHTBControlIN->ui32NumGroups != 0) + { + ui32GroupEnableInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32GroupEnableInt, + (const void __user *)psHTBControlIN->pui32GroupEnable, + psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psHTBControlOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto HTBControl_exit; + } + } + + psHTBControlOUT->eError = + HTBControlKM(psHTBControlIN->ui32NumGroups, + ui32GroupEnableInt, + psHTBControlIN->ui32LogLevel, + psHTBControlIN->ui32EnablePID, + psHTBControlIN->ui32LogMode, psHTBControlIN->ui32OpMode); + +HTBControl_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeHTBLog(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHTBLogIN_UI8, + IMG_UINT8 * psHTBLogOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HTBLOG *psHTBLogIN = + (PVRSRV_BRIDGE_IN_HTBLOG *) IMG_OFFSET_ADDR(psHTBLogIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HTBLOG *psHTBLogOUT = + (PVRSRV_BRIDGE_OUT_HTBLOG *) IMG_OFFSET_ADDR(psHTBLogOUT_UI8, 0); + + IMG_UINT32 *ui32ArgsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) + 0; + + if (unlikely(psHTBLogIN->ui32NumArgs > HTB_LOG_MAX_PARAMS)) + { + psHTBLogOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto HTBLog_exit; + } + + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psHTBLogIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHTBLogIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psHTBLogOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto HTBLog_exit; + } + } + } + + if (psHTBLogIN->ui32NumArgs != 0) + { + ui32ArgsInt = (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ArgsInt, (const void __user *)psHTBLogIN->pui32Args, + psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psHTBLogOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto HTBLog_exit; + } + } + + psHTBLogOUT->eError = + HTBLogKM(psHTBLogIN->ui32PID, + psHTBLogIN->ui32TID, + psHTBLogIN->ui64TimeStamp, + psHTBLogIN->ui32SF, psHTBLogIN->ui32NumArgs, ui32ArgsInt); + +HTBLog_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +static POS_LOCK pHTBUFFERBridgeLock; + +#endif /* EXCLUDE_HTBUFFER_BRIDGE */ + +#if !defined(EXCLUDE_HTBUFFER_BRIDGE) +PVRSRV_ERROR InitHTBUFFERBridge(void); +PVRSRV_ERROR DeinitHTBUFFERBridge(void); + +/* + * Register all HTBUFFER functions with services + */ +PVRSRV_ERROR InitHTBUFFERBridge(void) +{ + PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pHTBUFFERBridgeLock), "OSLockCreate"); + + SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL, + PVRSRVBridgeHTBControl, pHTBUFFERBridgeLock); + + SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBLOG, + PVRSRVBridgeHTBLog, pHTBUFFERBridgeLock); + + return PVRSRV_OK; +} + +/* + * Unregister all htbuffer functions with services + */ +PVRSRV_ERROR DeinitHTBUFFERBridge(void) +{ + PVR_LOG_RETURN_IF_ERROR(OSLockDestroy(pHTBUFFERBridgeLock), "OSLockDestroy"); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBLOG); + + return PVRSRV_OK; +} +#else /* EXCLUDE_HTBUFFER_BRIDGE */ +/* This bridge is conditional on EXCLUDE_HTBUFFER_BRIDGE - when defined, + * do not populate the dispatch table with its functions + */ +#define InitHTBUFFERBridge() \ + PVRSRV_OK + +#define DeinitHTBUFFERBridge() \ + PVRSRV_OK + +#endif /* EXCLUDE_HTBUFFER_BRIDGE */ diff --git a/drivers/gpu/drm/phytium/octopus/server_mm_bridge.c b/drivers/gpu/drm/phytium/octopus/server_mm_bridge.c new file mode 100644 index 000000000000..4d08977ed2f3 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_mm_bridge.c @@ -0,0 +1,3093 @@ +/******************************************************************************* +@File +@Title Server bridge for mm +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for mm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "devicemem.h" +#include "devicemem_server.h" +#include "pmr.h" +#include "devicemem_heapcfg.h" +#include "physmem.h" +#include "devicemem_utils.h" +#include "process_stats.h" + +#include "common_mm_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +static PVRSRV_ERROR ReleasePMRExport(void *pvData) +{ + PVR_UNREFERENCED_PARAMETER(pvData); + + return PVRSRV_OK; +} + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _PMRExportPMRpsPMRExportIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnexportPMR((PMR_EXPORT *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePMRExportPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRExportPMRIN_UI8, + IMG_UINT8 * psPMRExportPMROUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMREXPORTPMR *psPMRExportPMRIN = + (PVRSRV_BRIDGE_IN_PMREXPORTPMR *) IMG_OFFSET_ADDR(psPMRExportPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMREXPORTPMR *psPMRExportPMROUT = + (PVRSRV_BRIDGE_OUT_PMREXPORTPMR *) IMG_OFFSET_ADDR(psPMRExportPMROUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRExportPMRIN->hPMR; + PMR *psPMRInt = NULL; + PMR_EXPORT *psPMRExportInt = NULL; + IMG_HANDLE hPMRExportInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRExportPMROUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRExportPMR_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRExportPMROUT->eError = + PMRExportPMR(psPMRInt, + &psPMRExportInt, + &psPMRExportPMROUT->ui64Size, + &psPMRExportPMROUT->ui32Log2Contig, &psPMRExportPMROUT->ui64Password); + /* Exit early if bridged call fails */ + if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) + { + goto PMRExportPMR_exit; + } + + /* + * For cases where we need a cross process handle we actually allocate two. + * + * The first one is a connection specific handle and it gets given the real + * release function. This handle does *NOT* get returned to the caller. It's + * purpose is to release any leaked resources when we either have a bad or + * abnormally terminated client. If we didn't do this then the resource + * wouldn't be freed until driver unload. If the resource is freed normally, + * this handle can be looked up via the cross process handle and then + * released accordingly. + * + * The second one is a cross process handle and it gets given a noop release + * function. This handle does get returned to the caller. + */ + + /* Lock over handle creation. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psPMRExportPMROUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, + &hPMRExportInt, (void *)psPMRExportInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & _PMRExportPMRpsPMRExportIntRelease); + if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto PMRExportPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + + /* Lock over handle creation. */ + LockHandle(KERNEL_HANDLE_BASE); + psPMRExportPMROUT->eError = PVRSRVAllocHandleUnlocked(KERNEL_HANDLE_BASE, + &psPMRExportPMROUT->hPMRExport, + (void *)psPMRExportInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + ReleasePMRExport); + if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(KERNEL_HANDLE_BASE); + goto PMRExportPMR_exit; + } + /* Release now we have created handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + +PMRExportPMR_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psPMRExportPMROUT->eError != PVRSRV_OK) + { + if (psPMRExportPMROUT->hPMRExport) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(KERNEL_HANDLE_BASE); + + eError = PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE, + (IMG_HANDLE) psPMRExportPMROUT-> + hPMRExport, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); + + /* Release now we have cleaned up creation handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + + } + + if (hPMRExportInt) + { + PVRSRV_ERROR eError; + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + eError = + PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase-> + psHandleBase, hPMRExportInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); + + /* Avoid freeing/destroying/releasing the resource a second time below */ + psPMRExportInt = NULL; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + } + + if (psPMRExportInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnexportPMR(psPMRExportInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRUnexportPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRUnexportPMRIN_UI8, + IMG_UINT8 * psPMRUnexportPMROUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *psPMRUnexportPMRIN = + (PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *) IMG_OFFSET_ADDR(psPMRUnexportPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *psPMRUnexportPMROUT = + (PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *) IMG_OFFSET_ADDR(psPMRUnexportPMROUT_UI8, 0); + + PMR_EXPORT *psPMRExportInt = NULL; + IMG_HANDLE hPMRExportInt = NULL; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + /* Lock over handle destruction. */ + LockHandle(KERNEL_HANDLE_BASE); + psPMRUnexportPMROUT->eError = + PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE, + (void **)&psPMRExportInt, + (IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, IMG_FALSE); + if (unlikely(psPMRUnexportPMROUT->eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); + } + PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK); + + /* Release now we have destroyed handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + /* + * Find the connection specific handle that represents the same data + * as the cross process handle as releasing it will actually call the + * data's real release function (see the function where the cross + * process handle is allocated for more details). + */ + psPMRUnexportPMROUT->eError = + PVRSRVFindHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, + &hPMRExportInt, + psPMRExportInt, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + if (unlikely(psPMRUnexportPMROUT->eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); + } + PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK); + + psPMRUnexportPMROUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psProcessHandleBase->psHandleBase, + hPMRExportInt, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + if (unlikely((psPMRUnexportPMROUT->eError != PVRSRV_OK) && + (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); + } + PVR_ASSERT((psPMRUnexportPMROUT->eError == PVRSRV_OK) || + (psPMRUnexportPMROUT->eError == PVRSRV_ERROR_RETRY)); + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + + /* Lock over handle destruction. */ + LockHandle(KERNEL_HANDLE_BASE); + + psPMRUnexportPMROUT->eError = + PVRSRVReleaseHandleStagedUnlock(KERNEL_HANDLE_BASE, + (IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + if (unlikely((psPMRUnexportPMROUT->eError != PVRSRV_OK) && + (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); + UnlockHandle(KERNEL_HANDLE_BASE); + goto PMRUnexportPMR_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + +PMRUnexportPMR_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRGetUID(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRGetUIDIN_UI8, + IMG_UINT8 * psPMRGetUIDOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRGETUID *psPMRGetUIDIN = + (PVRSRV_BRIDGE_IN_PMRGETUID *) IMG_OFFSET_ADDR(psPMRGetUIDIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRGETUID *psPMRGetUIDOUT = + (PVRSRV_BRIDGE_OUT_PMRGETUID *) IMG_OFFSET_ADDR(psPMRGetUIDOUT_UI8, 0); + + IMG_HANDLE hPMR = psPMRGetUIDIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRGetUIDOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psPMRGetUIDOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRGetUID_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRGetUIDOUT->eError = PMRGetUID(psPMRInt, &psPMRGetUIDOUT->ui64UID); + +PMRGetUID_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static PVRSRV_ERROR _PMRMakeLocalImportHandlepsExtMemIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnmakeLocalImportHandle((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePMRMakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRMakeLocalImportHandleIN_UI8, + IMG_UINT8 * psPMRMakeLocalImportHandleOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleIN = + (PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *) + IMG_OFFSET_ADDR(psPMRMakeLocalImportHandleIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleOUT = + (PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE *) + IMG_OFFSET_ADDR(psPMRMakeLocalImportHandleOUT_UI8, 0); + + IMG_HANDLE hBuffer = psPMRMakeLocalImportHandleIN->hBuffer; + PMR *psBufferInt = NULL; + PMR *psExtMemInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psPMRMakeLocalImportHandleOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psBufferInt, + hBuffer, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, IMG_TRUE); + if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRMakeLocalImportHandle_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psPMRMakeLocalImportHandleOUT->eError = PMRMakeLocalImportHandle(psBufferInt, &psExtMemInt); + /* Exit early if bridged call fails */ + if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)) + { + goto PMRMakeLocalImportHandle_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psPMRMakeLocalImportHandleOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, + &psPMRMakeLocalImportHandleOUT->hExtMem, (void *)psExtMemInt, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PMRMakeLocalImportHandlepsExtMemIntRelease); + if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto PMRMakeLocalImportHandle_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + +PMRMakeLocalImportHandle_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psBufferInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hBuffer, PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK) + { + if (psExtMemInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnmakeLocalImportHandle(psExtMemInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRUnmakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRUnmakeLocalImportHandleIN_UI8, + IMG_UINT8 * psPMRUnmakeLocalImportHandleOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE *psPMRUnmakeLocalImportHandleIN = + (PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE *) + IMG_OFFSET_ADDR(psPMRUnmakeLocalImportHandleIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE *psPMRUnmakeLocalImportHandleOUT = + (PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE *) + IMG_OFFSET_ADDR(psPMRUnmakeLocalImportHandleOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psPMRUnmakeLocalImportHandleOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psProcessHandleBase->psHandleBase, + (IMG_HANDLE) psPMRUnmakeLocalImportHandleIN->hExtMem, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT); + if (unlikely((psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_OK) && + (psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psPMRUnmakeLocalImportHandleOUT->eError))); + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto PMRUnmakeLocalImportHandle_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + +PMRUnmakeLocalImportHandle_exit: + + return 0; +} + +static PVRSRV_ERROR _PMRImportPMRpsPMRIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePMRImportPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRImportPMRIN_UI8, + IMG_UINT8 * psPMRImportPMROUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRIMPORTPMR *psPMRImportPMRIN = + (PVRSRV_BRIDGE_IN_PMRIMPORTPMR *) IMG_OFFSET_ADDR(psPMRImportPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *psPMRImportPMROUT = + (PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *) IMG_OFFSET_ADDR(psPMRImportPMROUT_UI8, 0); + + IMG_HANDLE hPMRExport = psPMRImportPMRIN->hPMRExport; + PMR_EXPORT *psPMRExportInt = NULL; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(KERNEL_HANDLE_BASE); + + /* Look up the address from the handle */ + psPMRImportPMROUT->eError = + PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE, + (void **)&psPMRExportInt, + hPMRExport, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, IMG_TRUE); + if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(KERNEL_HANDLE_BASE); + goto PMRImportPMR_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + + psPMRImportPMROUT->eError = + PhysmemImportPMR(psConnection, OSGetDevNode(psConnection), + psPMRExportInt, + psPMRImportPMRIN->ui64uiPassword, + psPMRImportPMRIN->ui64uiSize, + psPMRImportPMRIN->ui32uiLog2Contig, &psPMRInt); + /* Exit early if bridged call fails */ + if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK)) + { + goto PMRImportPMR_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPMRImportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPMRImportPMROUT->hPMR, + (void *)psPMRInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PMRImportPMRpsPMRIntRelease); + if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRImportPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PMRImportPMR_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(KERNEL_HANDLE_BASE); + + /* Unreference the previously looked up handle */ + if (psPMRExportInt) + { + PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE, + hPMRExport, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(KERNEL_HANDLE_BASE); + + if (psPMRImportPMROUT->eError != PVRSRV_OK) + { + if (psPMRInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefPMR(psPMRInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + return 0; +} + +static PVRSRV_ERROR _PMRLocalImportPMRpsPMRIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePMRLocalImportPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRLocalImportPMRIN_UI8, + IMG_UINT8 * psPMRLocalImportPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *psPMRLocalImportPMRIN = + (PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *) IMG_OFFSET_ADDR(psPMRLocalImportPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *psPMRLocalImportPMROUT = + (PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *) IMG_OFFSET_ADDR(psPMRLocalImportPMROUT_UI8, 0); + + IMG_HANDLE hExtHandle = psPMRLocalImportPMRIN->hExtHandle; + PMR *psExtHandleInt = NULL; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + /* Look up the address from the handle */ + psPMRLocalImportPMROUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, + (void **)&psExtHandleInt, + hExtHandle, PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, IMG_TRUE); + if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto PMRLocalImportPMR_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psPMRLocalImportPMROUT->eError = + PMRLocalImportPMR(psExtHandleInt, + &psPMRInt, + &psPMRLocalImportPMROUT->uiSize, &psPMRLocalImportPMROUT->uiAlign); + /* Exit early if bridged call fails */ + if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK)) + { + goto PMRLocalImportPMR_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPMRLocalImportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPMRLocalImportPMROUT->hPMR, + (void *)psPMRInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PMRLocalImportPMRpsPMRIntRelease); + if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PMRLocalImportPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PMRLocalImportPMR_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psExtHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, + hExtHandle, PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + + if (psPMRLocalImportPMROUT->eError != PVRSRV_OK) + { + if (psPMRInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefPMR(psPMRInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRUnrefPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRUnrefPMRIN_UI8, + IMG_UINT8 * psPMRUnrefPMROUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRUNREFPMR *psPMRUnrefPMRIN = + (PVRSRV_BRIDGE_IN_PMRUNREFPMR *) IMG_OFFSET_ADDR(psPMRUnrefPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRUNREFPMR *psPMRUnrefPMROUT = + (PVRSRV_BRIDGE_OUT_PMRUNREFPMR *) IMG_OFFSET_ADDR(psPMRUnrefPMROUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psPMRUnrefPMROUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psPMRUnrefPMRIN->hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + if (unlikely((psPMRUnrefPMROUT->eError != PVRSRV_OK) && + (psPMRUnrefPMROUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnrefPMROUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto PMRUnrefPMR_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +PMRUnrefPMR_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgePMRUnrefUnlockPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPMRUnrefUnlockPMRIN_UI8, + IMG_UINT8 * psPMRUnrefUnlockPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMRIN = + (PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *) IMG_OFFSET_ADDR(psPMRUnrefUnlockPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMROUT = + (PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *) IMG_OFFSET_ADDR(psPMRUnrefUnlockPMROUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psPMRUnrefUnlockPMROUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psPMRUnrefUnlockPMRIN->hPMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + if (unlikely((psPMRUnrefUnlockPMROUT->eError != PVRSRV_OK) && + (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnrefUnlockPMROUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto PMRUnrefUnlockPMR_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +PMRUnrefUnlockPMR_exit: + + return 0; +} + +static PVRSRV_ERROR _PhysmemNewRamBackedPMRpsPMRPtrIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysmemNewRamBackedPMRIN_UI8, + IMG_UINT8 * psPhysmemNewRamBackedPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMRIN = + (PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *) + IMG_OFFSET_ADDR(psPhysmemNewRamBackedPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMROUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *) + IMG_OFFSET_ADDR(psPhysmemNewRamBackedPMROUT_UI8, 0); + + IMG_UINT32 *ui32MappingTableInt = NULL; + IMG_CHAR *uiAnnotationInt = NULL; + PMR *psPMRPtrInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) + + (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) + 0; + + if (unlikely(psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks > PMR_MAX_SUPPORTED_PAGE_COUNT)) + { + psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemNewRamBackedPMR_exit; + } + + if (unlikely(psPhysmemNewRamBackedPMRIN->ui32AnnotationLength > DEVMEM_ANNOTATION_MAX_LEN)) + { + psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemNewRamBackedPMR_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPhysmemNewRamBackedPMRIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemNewRamBackedPMRIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto PhysmemNewRamBackedPMR_exit; + } + } + } + + if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks != 0) + { + ui32MappingTableInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32MappingTableInt, + (const void __user *)psPhysmemNewRamBackedPMRIN->pui32MappingTable, + psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) != + PVRSRV_OK) + { + psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemNewRamBackedPMR_exit; + } + } + if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength != 0) + { + uiAnnotationInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiAnnotationInt, + (const void __user *)psPhysmemNewRamBackedPMRIN->puiAnnotation, + psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) != + PVRSRV_OK) + { + psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemNewRamBackedPMR_exit; + } + ((IMG_CHAR *) + uiAnnotationInt)[(psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + psPhysmemNewRamBackedPMROUT->eError = + PhysmemNewRamBackedPMR(psConnection, OSGetDevNode(psConnection), + psPhysmemNewRamBackedPMRIN->uiSize, + psPhysmemNewRamBackedPMRIN->uiChunkSize, + psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks, + psPhysmemNewRamBackedPMRIN->ui32NumVirtChunks, + ui32MappingTableInt, + psPhysmemNewRamBackedPMRIN->ui32Log2PageSize, + psPhysmemNewRamBackedPMRIN->uiFlags, + psPhysmemNewRamBackedPMRIN->ui32AnnotationLength, + uiAnnotationInt, + psPhysmemNewRamBackedPMRIN->ui32PID, + &psPMRPtrInt, psPhysmemNewRamBackedPMRIN->ui32PDumpFlags); + /* Exit early if bridged call fails */ + if (unlikely(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)) + { + goto PhysmemNewRamBackedPMR_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPhysmemNewRamBackedPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPhysmemNewRamBackedPMROUT-> + hPMRPtr, + (void *)psPMRPtrInt, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PhysmemNewRamBackedPMRpsPMRPtrIntRelease); + if (unlikely(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemNewRamBackedPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PhysmemNewRamBackedPMR_exit: + + if (psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK) + { + if (psPMRPtrInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefPMR(psPMRPtrInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static PVRSRV_ERROR _PhysmemNewRamBackedLockedPMRpsPMRPtrIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PMRUnrefUnlockPMR((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgePhysmemNewRamBackedLockedPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPhysmemNewRamBackedLockedPMRIN_UI8, + IMG_UINT8 * psPhysmemNewRamBackedLockedPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMRIN = + (PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR *) + IMG_OFFSET_ADDR(psPhysmemNewRamBackedLockedPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMROUT = + (PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR *) + IMG_OFFSET_ADDR(psPhysmemNewRamBackedLockedPMROUT_UI8, 0); + + IMG_UINT32 *ui32MappingTableInt = NULL; + IMG_CHAR *uiAnnotationInt = NULL; + PMR *psPMRPtrInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32)) + + (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) + 0; + + if (unlikely + (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks > PMR_MAX_SUPPORTED_PAGE_COUNT)) + { + psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemNewRamBackedLockedPMR_exit; + } + + if (unlikely + (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength > DEVMEM_ANNOTATION_MAX_LEN)) + { + psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto PhysmemNewRamBackedLockedPMR_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psPhysmemNewRamBackedLockedPMRIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psPhysmemNewRamBackedLockedPMRIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psPhysmemNewRamBackedLockedPMROUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto PhysmemNewRamBackedLockedPMR_exit; + } + } + } + + if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks != 0) + { + ui32MappingTableInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32MappingTableInt, + (const void __user *)psPhysmemNewRamBackedLockedPMRIN->pui32MappingTable, + psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32)) != + PVRSRV_OK) + { + psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemNewRamBackedLockedPMR_exit; + } + } + if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength != 0) + { + uiAnnotationInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiAnnotationInt, + (const void __user *)psPhysmemNewRamBackedLockedPMRIN->puiAnnotation, + psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) != + PVRSRV_OK) + { + psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto PhysmemNewRamBackedLockedPMR_exit; + } + ((IMG_CHAR *) + uiAnnotationInt)[(psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * + sizeof(IMG_CHAR)) - 1] = '\0'; + } + + psPhysmemNewRamBackedLockedPMROUT->eError = + PhysmemNewRamBackedLockedPMR(psConnection, OSGetDevNode(psConnection), + psPhysmemNewRamBackedLockedPMRIN->uiSize, + psPhysmemNewRamBackedLockedPMRIN->uiChunkSize, + psPhysmemNewRamBackedLockedPMRIN->ui32NumPhysChunks, + psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks, + ui32MappingTableInt, + psPhysmemNewRamBackedLockedPMRIN->ui32Log2PageSize, + psPhysmemNewRamBackedLockedPMRIN->uiFlags, + psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength, + uiAnnotationInt, + psPhysmemNewRamBackedLockedPMRIN->ui32PID, + &psPMRPtrInt, + psPhysmemNewRamBackedLockedPMRIN->ui32PDumpFlags); + /* Exit early if bridged call fails */ + if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)) + { + goto PhysmemNewRamBackedLockedPMR_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psPhysmemNewRamBackedLockedPMROUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psPhysmemNewRamBackedLockedPMROUT->hPMRPtr, + (void *)psPMRPtrInt, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _PhysmemNewRamBackedLockedPMRpsPMRPtrIntRelease); + if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto PhysmemNewRamBackedLockedPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +PhysmemNewRamBackedLockedPMR_exit: + + if (psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK) + { + if (psPMRPtrInt) + { + LockHandle(KERNEL_HANDLE_BASE); + PMRUnrefUnlockPMR(psPMRPtrInt); + UnlockHandle(KERNEL_HANDLE_BASE); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntPin(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntPinIN_UI8, + IMG_UINT8 * psDevmemIntPinOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTPIN *psDevmemIntPinIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTPIN *) IMG_OFFSET_ADDR(psDevmemIntPinIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTPIN *psDevmemIntPinOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTPIN *) IMG_OFFSET_ADDR(psDevmemIntPinOUT_UI8, 0); + + IMG_HANDLE hPMR = psDevmemIntPinIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntPinOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psDevmemIntPinOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntPin_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntPinOUT->eError = DevmemIntPin(psPMRInt); + +DevmemIntPin_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnpin(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnpinIN_UI8, + IMG_UINT8 * psDevmemIntUnpinOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN *psDevmemIntUnpinIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN *) IMG_OFFSET_ADDR(psDevmemIntUnpinIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN *psDevmemIntUnpinOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN *) IMG_OFFSET_ADDR(psDevmemIntUnpinOUT_UI8, 0); + + IMG_HANDLE hPMR = psDevmemIntUnpinIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntUnpinOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psDevmemIntUnpinOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnpin_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntUnpinOUT->eError = DevmemIntUnpin(psPMRInt); + +DevmemIntUnpin_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntPinValidate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntPinValidateIN_UI8, + IMG_UINT8 * psDevmemIntPinValidateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE *) IMG_OFFSET_ADDR(psDevmemIntPinValidateIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE *) + IMG_OFFSET_ADDR(psDevmemIntPinValidateOUT_UI8, 0); + + IMG_HANDLE hMapping = psDevmemIntPinValidateIN->hMapping; + DEVMEMINT_MAPPING *psMappingInt = NULL; + IMG_HANDLE hPMR = psDevmemIntPinValidateIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntPinValidateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psMappingInt, + hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, IMG_TRUE); + if (unlikely(psDevmemIntPinValidateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntPinValidate_exit; + } + + /* Look up the address from the handle */ + psDevmemIntPinValidateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psDevmemIntPinValidateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntPinValidate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntPinValidateOUT->eError = DevmemIntPinValidate(psMappingInt, psPMRInt); + +DevmemIntPinValidate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psMappingInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnpinInvalidate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnpinInvalidateIN_UI8, + IMG_UINT8 * psDevmemIntUnpinInvalidateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE *psDevmemIntUnpinInvalidateIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE *) + IMG_OFFSET_ADDR(psDevmemIntUnpinInvalidateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE *psDevmemIntUnpinInvalidateOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE *) + IMG_OFFSET_ADDR(psDevmemIntUnpinInvalidateOUT_UI8, 0); + + IMG_HANDLE hMapping = psDevmemIntUnpinInvalidateIN->hMapping; + DEVMEMINT_MAPPING *psMappingInt = NULL; + IMG_HANDLE hPMR = psDevmemIntUnpinInvalidateIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntUnpinInvalidateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psMappingInt, + hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, IMG_TRUE); + if (unlikely(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnpinInvalidate_exit; + } + + /* Look up the address from the handle */ + psDevmemIntUnpinInvalidateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnpinInvalidate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntUnpinInvalidateOUT->eError = DevmemIntUnpinInvalidate(psMappingInt, psPMRInt); + +DevmemIntUnpinInvalidate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psMappingInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static PVRSRV_ERROR _DevmemIntCtxCreatepsDevMemServerContextIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntCtxCreate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntCtxCreateIN_UI8, + IMG_UINT8 * psDevmemIntCtxCreateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateOUT_UI8, + 0); + + DEVMEMINT_CTX *psDevMemServerContextInt = NULL; + IMG_HANDLE hPrivDataInt = NULL; + + psDevmemIntCtxCreateOUT->hDevMemServerContext = NULL; + + psDevmemIntCtxCreateOUT->eError = + DevmemIntCtxCreate(psConnection, OSGetDevNode(psConnection), + psDevmemIntCtxCreateIN->bbKernelMemoryCtx, + &psDevMemServerContextInt, + &hPrivDataInt, &psDevmemIntCtxCreateOUT->ui32CPUCacheLineSize); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) + { + goto DevmemIntCtxCreate_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntCtxCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntCtxCreateOUT-> + hDevMemServerContext, + (void *) + psDevMemServerContextInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntCtxCreatepsDevMemServerContextIntRelease); + if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntCtxCreate_exit; + } + + psDevmemIntCtxCreateOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntCtxCreateOUT-> + hPrivData, + (void *)hPrivDataInt, + PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psDevmemIntCtxCreateOUT-> + hDevMemServerContext); + if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntCtxCreate_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntCtxCreate_exit: + + if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK) + { + if (psDevmemIntCtxCreateOUT->hDevMemServerContext) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); + + eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psDevmemIntCtxCreateOUT-> + hDevMemServerContext, + PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); + + /* Avoid freeing/destroying/releasing the resource a second time below */ + psDevMemServerContextInt = NULL; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); + + } + + if (psDevMemServerContextInt) + { + DevmemIntCtxDestroy(psDevMemServerContextInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntCtxDestroy(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntCtxDestroyIN_UI8, + IMG_UINT8 * psDevmemIntCtxDestroyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyOUT_UI8, + 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntCtxDestroyOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psDevmemIntCtxDestroyIN-> + hDevmemServerContext, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + if (unlikely + ((psDevmemIntCtxDestroyOUT->eError != PVRSRV_OK) + && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psDevmemIntCtxDestroyOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntCtxDestroy_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntCtxDestroy_exit: + + return 0; +} + +static PVRSRV_ERROR _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntHeapDestroy((DEVMEMINT_HEAP *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntHeapCreateIN_UI8, + IMG_UINT8 * psDevmemIntHeapCreateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateOUT_UI8, + 0); + + IMG_HANDLE hDevmemCtx = psDevmemIntHeapCreateIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntHeapCreateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); + if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntHeapCreate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntHeapCreateOUT->eError = + DevmemIntHeapCreate(psDevmemCtxInt, + psDevmemIntHeapCreateIN->sHeapBaseAddr, + psDevmemIntHeapCreateIN->uiHeapLength, + psDevmemIntHeapCreateIN->ui32Log2DataPageSize, &psDevmemHeapPtrInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + { + goto DevmemIntHeapCreate_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntHeapCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntHeapCreateOUT-> + hDevmemHeapPtr, + (void *)psDevmemHeapPtrInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease); + if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntHeapCreate_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntHeapCreate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK) + { + if (psDevmemHeapPtrInt) + { + DevmemIntHeapDestroy(psDevmemHeapPtrInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntHeapDestroy(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntHeapDestroyIN_UI8, + IMG_UINT8 * psDevmemIntHeapDestroyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *) IMG_OFFSET_ADDR(psDevmemIntHeapDestroyIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *) + IMG_OFFSET_ADDR(psDevmemIntHeapDestroyOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntHeapDestroyOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psDevmemIntHeapDestroyIN->hDevmemHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + if (unlikely((psDevmemIntHeapDestroyOUT->eError != PVRSRV_OK) && + (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psDevmemIntHeapDestroyOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntHeapDestroy_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntHeapDestroy_exit: + + return 0; +} + +static PVRSRV_ERROR _DevmemIntMapPMRpsMappingIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntUnmapPMR((DEVMEMINT_MAPPING *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntMapPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntMapPMRIN_UI8, + IMG_UINT8 * psDevmemIntMapPMROUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *psDevmemIntMapPMRIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *psDevmemIntMapPMROUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMROUT_UI8, 0); + + IMG_HANDLE hDevmemServerHeap = psDevmemIntMapPMRIN->hDevmemServerHeap; + DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; + IMG_HANDLE hReservation = psDevmemIntMapPMRIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + IMG_HANDLE hPMR = psDevmemIntMapPMRIN->hPMR; + PMR *psPMRInt = NULL; + DEVMEMINT_MAPPING *psMappingInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntMapPMROUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemServerHeapInt, + hDevmemServerHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPMR_exit; + } + + /* Look up the address from the handle */ + psDevmemIntMapPMROUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPMR_exit; + } + + /* Look up the address from the handle */ + psDevmemIntMapPMROUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPMR_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntMapPMROUT->eError = + DevmemIntMapPMR(psDevmemServerHeapInt, + psReservationInt, + psPMRInt, psDevmemIntMapPMRIN->uiMapFlags, &psMappingInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + { + goto DevmemIntMapPMR_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntMapPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntMapPMROUT->hMapping, + (void *)psMappingInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntMapPMRpsMappingIntRelease); + if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPMR_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntMapPMR_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemServerHeapInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + } + + /* Unreference the previously looked up handle */ + if (psReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psDevmemIntMapPMROUT->eError != PVRSRV_OK) + { + if (psMappingInt) + { + DevmemIntUnmapPMR(psMappingInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnmapPMR(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnmapPMRIN_UI8, + IMG_UINT8 * psDevmemIntUnmapPMROUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMRIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMRIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMROUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMROUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntUnmapPMROUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psDevmemIntUnmapPMRIN->hMapping, + PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); + if (unlikely((psDevmemIntUnmapPMROUT->eError != PVRSRV_OK) && + (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(psDevmemIntUnmapPMROUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnmapPMR_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntUnmapPMR_exit: + + return 0; +} + +static PVRSRV_ERROR _DevmemIntReserveRangepsReservationIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = DevmemIntUnreserveRange((DEVMEMINT_RESERVATION *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeDevmemIntReserveRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntReserveRangeIN_UI8, + IMG_UINT8 * psDevmemIntReserveRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntReserveRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntReserveRangeOUT_UI8, 0); + + IMG_HANDLE hDevmemServerHeap = psDevmemIntReserveRangeIN->hDevmemServerHeap; + DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntReserveRangeOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemServerHeapInt, + hDevmemServerHeap, + PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); + if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntReserveRange_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntReserveRangeOUT->eError = + DevmemIntReserveRange(psDevmemServerHeapInt, + psDevmemIntReserveRangeIN->sAddress, + psDevmemIntReserveRangeIN->uiLength, &psReservationInt); + /* Exit early if bridged call fails */ + if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) + { + goto DevmemIntReserveRange_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntReserveRangeOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psDevmemIntReserveRangeOUT-> + hReservation, + (void *)psReservationInt, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _DevmemIntReserveRangepsReservationIntRelease); + if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntReserveRange_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntReserveRange_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemServerHeapInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK) + { + if (psReservationInt) + { + DevmemIntUnreserveRange(psReservationInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnreserveRangeIN_UI8, + IMG_UINT8 * psDevmemIntUnreserveRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *) + IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psDevmemIntUnreserveRangeOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psDevmemIntUnreserveRangeIN->hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + if (unlikely((psDevmemIntUnreserveRangeOUT->eError != PVRSRV_OK) && + (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psDevmemIntUnreserveRangeOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnreserveRange_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +DevmemIntUnreserveRange_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psChangeSparseMemIN_UI8, + IMG_UINT8 * psChangeSparseMemOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *psChangeSparseMemIN = + (PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemIN_UI8, 0); + PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *psChangeSparseMemOUT = + (PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemOUT_UI8, 0); + + IMG_HANDLE hSrvDevMemHeap = psChangeSparseMemIN->hSrvDevMemHeap; + DEVMEMINT_HEAP *psSrvDevMemHeapInt = NULL; + IMG_HANDLE hPMR = psChangeSparseMemIN->hPMR; + PMR *psPMRInt = NULL; + IMG_UINT32 *ui32AllocPageIndicesInt = NULL; + IMG_UINT32 *ui32FreePageIndicesInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) + + (psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) + 0; + + if (unlikely(psChangeSparseMemIN->ui32AllocPageCount > PMR_MAX_SUPPORTED_PAGE_COUNT)) + { + psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto ChangeSparseMem_exit; + } + + if (unlikely(psChangeSparseMemIN->ui32FreePageCount > PMR_MAX_SUPPORTED_PAGE_COUNT)) + { + psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto ChangeSparseMem_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psChangeSparseMemIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psChangeSparseMemIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psChangeSparseMemOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ChangeSparseMem_exit; + } + } + } + + if (psChangeSparseMemIN->ui32AllocPageCount != 0) + { + ui32AllocPageIndicesInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32AllocPageIndicesInt, + (const void __user *)psChangeSparseMemIN->pui32AllocPageIndices, + psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto ChangeSparseMem_exit; + } + } + if (psChangeSparseMemIN->ui32FreePageCount != 0) + { + ui32FreePageIndicesInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32FreePageIndicesInt, + (const void __user *)psChangeSparseMemIN->pui32FreePageIndices, + psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto ChangeSparseMem_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psChangeSparseMemOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSrvDevMemHeapInt, + hSrvDevMemHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); + if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto ChangeSparseMem_exit; + } + + /* Look up the address from the handle */ + psChangeSparseMemOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto ChangeSparseMem_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psChangeSparseMemOUT->eError = + DevmemIntChangeSparse(psSrvDevMemHeapInt, + psPMRInt, + psChangeSparseMemIN->ui32AllocPageCount, + ui32AllocPageIndicesInt, + psChangeSparseMemIN->ui32FreePageCount, + ui32FreePageIndicesInt, + psChangeSparseMemIN->ui32SparseFlags, + psChangeSparseMemIN->uiFlags, + psChangeSparseMemIN->sDevVAddr, + psChangeSparseMemIN->ui64CPUVAddr); + +ChangeSparseMem_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSrvDevMemHeapInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSrvDevMemHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntMapPages(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntMapPagesIN_UI8, + IMG_UINT8 * psDevmemIntMapPagesOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *psDevmemIntMapPagesIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntMapPagesIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *psDevmemIntMapPagesOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntMapPagesOUT_UI8, 0); + + IMG_HANDLE hReservation = psDevmemIntMapPagesIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + IMG_HANDLE hPMR = psDevmemIntMapPagesIN->hPMR; + PMR *psPMRInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntMapPagesOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); + if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPages_exit; + } + + /* Look up the address from the handle */ + psDevmemIntMapPagesOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntMapPages_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntMapPagesOUT->eError = + DevmemIntMapPages(psReservationInt, + psPMRInt, + psDevmemIntMapPagesIN->ui32PageCount, + psDevmemIntMapPagesIN->ui32PhysicalPgOffset, + psDevmemIntMapPagesIN->uiFlags, psDevmemIntMapPagesIN->sDevVAddr); + +DevmemIntMapPages_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntUnmapPagesIN_UI8, + IMG_UINT8 * psDevmemIntUnmapPagesOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntUnmapPagesIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntUnmapPagesOUT_UI8, + 0); + + IMG_HANDLE hReservation = psDevmemIntUnmapPagesIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntUnmapPagesOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); + if (unlikely(psDevmemIntUnmapPagesOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntUnmapPages_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntUnmapPagesOUT->eError = + DevmemIntUnmapPages(psReservationInt, + psDevmemIntUnmapPagesIN->sDevVAddr, + psDevmemIntUnmapPagesIN->ui32PageCount); + +DevmemIntUnmapPages_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIsVDevAddrValid(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIsVDevAddrValidIN_UI8, + IMG_UINT8 * psDevmemIsVDevAddrValidOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidIN = + (PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *) + IMG_OFFSET_ADDR(psDevmemIsVDevAddrValidIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *) + IMG_OFFSET_ADDR(psDevmemIsVDevAddrValidOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemIsVDevAddrValidIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIsVDevAddrValidOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); + if (unlikely(psDevmemIsVDevAddrValidOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIsVDevAddrValid_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIsVDevAddrValidOUT->eError = + DevmemIntIsVDevAddrValid(psConnection, OSGetDevNode(psConnection), + psDevmemCtxInt, psDevmemIsVDevAddrValidIN->sAddress); + +DevmemIsVDevAddrValid_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#if defined(RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED) + +static IMG_INT +PVRSRVBridgeDevmemFlushDevSLCRange(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemFlushDevSLCRangeIN_UI8, + IMG_UINT8 * psDevmemFlushDevSLCRangeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE *psDevmemFlushDevSLCRangeIN = + (PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE *) + IMG_OFFSET_ADDR(psDevmemFlushDevSLCRangeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE *psDevmemFlushDevSLCRangeOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE *) + IMG_OFFSET_ADDR(psDevmemFlushDevSLCRangeOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemFlushDevSLCRangeIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemFlushDevSLCRangeOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); + if (unlikely(psDevmemFlushDevSLCRangeOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemFlushDevSLCRange_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemFlushDevSLCRangeOUT->eError = + DevmemIntFlushDevSLCRange(psDevmemCtxInt, + psDevmemFlushDevSLCRangeIN->sAddress, + psDevmemFlushDevSLCRangeIN->uiSize, + psDevmemFlushDevSLCRangeIN->bInvalidate); + +DevmemFlushDevSLCRange_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeDevmemFlushDevSLCRange NULL +#endif + +#if defined(RGX_FEATURE_FBCDC) + +static IMG_INT +PVRSRVBridgeDevmemInvalidateFBSCTable(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemInvalidateFBSCTableIN_UI8, + IMG_UINT8 * psDevmemInvalidateFBSCTableOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE *psDevmemInvalidateFBSCTableIN = + (PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE *) + IMG_OFFSET_ADDR(psDevmemInvalidateFBSCTableIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE *psDevmemInvalidateFBSCTableOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE *) + IMG_OFFSET_ADDR(psDevmemInvalidateFBSCTableOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemInvalidateFBSCTableIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemInvalidateFBSCTableOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); + if (unlikely(psDevmemInvalidateFBSCTableOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemInvalidateFBSCTable_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemInvalidateFBSCTableOUT->eError = + DevmemIntInvalidateFBSCTable(psDevmemCtxInt, + psDevmemInvalidateFBSCTableIN->ui64FBSCEntries); + +DevmemInvalidateFBSCTable_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeDevmemInvalidateFBSCTable NULL +#endif + +static IMG_INT +PVRSRVBridgeHeapCfgHeapConfigCount(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHeapCfgHeapConfigCountIN_UI8, + IMG_UINT8 * psHeapCfgHeapConfigCountOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountIN = + (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *) + IMG_OFFSET_ADDR(psHeapCfgHeapConfigCountIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountOUT = + (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *) + IMG_OFFSET_ADDR(psHeapCfgHeapConfigCountOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psHeapCfgHeapConfigCountIN); + + psHeapCfgHeapConfigCountOUT->eError = + HeapCfgHeapConfigCount(psConnection, OSGetDevNode(psConnection), + &psHeapCfgHeapConfigCountOUT->ui32NumHeapConfigs); + + return 0; +} + +static IMG_INT +PVRSRVBridgeHeapCfgHeapCount(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHeapCfgHeapCountIN_UI8, + IMG_UINT8 * psHeapCfgHeapCountOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountIN = + (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *) IMG_OFFSET_ADDR(psHeapCfgHeapCountIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountOUT = + (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *) IMG_OFFSET_ADDR(psHeapCfgHeapCountOUT_UI8, 0); + + psHeapCfgHeapCountOUT->eError = + HeapCfgHeapCount(psConnection, OSGetDevNode(psConnection), + psHeapCfgHeapCountIN->ui32HeapConfigIndex, + &psHeapCfgHeapCountOUT->ui32NumHeaps); + + return 0; +} + +static IMG_INT +PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHeapCfgHeapConfigNameIN_UI8, + IMG_UINT8 * psHeapCfgHeapConfigNameOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameIN = + (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *) + IMG_OFFSET_ADDR(psHeapCfgHeapConfigNameIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameOUT = + (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *) + IMG_OFFSET_ADDR(psHeapCfgHeapConfigNameOUT_UI8, 0); + + IMG_CHAR *puiHeapConfigNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR)) + 0; + + if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz > DEVMEM_HEAPNAME_MAXLENGTH) + { + psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto HeapCfgHeapConfigName_exit; + } + + psHeapCfgHeapConfigNameOUT->puiHeapConfigName = + psHeapCfgHeapConfigNameIN->puiHeapConfigName; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psHeapCfgHeapConfigNameIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHeapCfgHeapConfigNameIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto HeapCfgHeapConfigName_exit; + } + } + } + + if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz != 0) + { + puiHeapConfigNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR); + } + + psHeapCfgHeapConfigNameOUT->eError = + HeapCfgHeapConfigName(psConnection, OSGetDevNode(psConnection), + psHeapCfgHeapConfigNameIN->ui32HeapConfigIndex, + psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz, + puiHeapConfigNameInt); + + /* If dest ptr is non-null and we have data to copy */ + if ((puiHeapConfigNameInt) && + ((psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psHeapCfgHeapConfigNameOUT->puiHeapConfigName, + puiHeapConfigNameInt, + (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR))) != + PVRSRV_OK)) + { + psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto HeapCfgHeapConfigName_exit; + } + } + +HeapCfgHeapConfigName_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHeapCfgHeapDetailsIN_UI8, + IMG_UINT8 * psHeapCfgHeapDetailsOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsIN = + (PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *) IMG_OFFSET_ADDR(psHeapCfgHeapDetailsIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsOUT = + (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *) IMG_OFFSET_ADDR(psHeapCfgHeapDetailsOUT_UI8, + 0); + + IMG_CHAR *puiHeapNameOutInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) + 0; + + if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz > DEVMEM_HEAPNAME_MAXLENGTH) + { + psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto HeapCfgHeapDetails_exit; + } + + psHeapCfgHeapDetailsOUT->puiHeapNameOut = psHeapCfgHeapDetailsIN->puiHeapNameOut; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psHeapCfgHeapDetailsIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHeapCfgHeapDetailsIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto HeapCfgHeapDetails_exit; + } + } + } + + if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz != 0) + { + puiHeapNameOutInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR); + } + + psHeapCfgHeapDetailsOUT->eError = + HeapCfgHeapDetails(psConnection, OSGetDevNode(psConnection), + psHeapCfgHeapDetailsIN->ui32HeapConfigIndex, + psHeapCfgHeapDetailsIN->ui32HeapIndex, + psHeapCfgHeapDetailsIN->ui32HeapNameBufSz, + puiHeapNameOutInt, + &psHeapCfgHeapDetailsOUT->sDevVAddrBase, + &psHeapCfgHeapDetailsOUT->uiHeapLength, + &psHeapCfgHeapDetailsOUT->uiReservedRegionLength, + &psHeapCfgHeapDetailsOUT->ui32Log2DataPageSizeOut, + &psHeapCfgHeapDetailsOUT->ui32Log2ImportAlignmentOut); + + /* If dest ptr is non-null and we have data to copy */ + if ((puiHeapNameOutInt) && + ((psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psHeapCfgHeapDetailsOUT->puiHeapNameOut, + puiHeapNameOutInt, + (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR))) != PVRSRV_OK)) + { + psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto HeapCfgHeapDetails_exit; + } + } + +HeapCfgHeapDetails_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemIntRegisterPFNotifyKM(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemIntRegisterPFNotifyKMIN_UI8, + IMG_UINT8 * psDevmemIntRegisterPFNotifyKMOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMIN = + (PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *) + IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *) + IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemIntRegisterPFNotifyKMIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemIntRegisterPFNotifyKMOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); + if (unlikely(psDevmemIntRegisterPFNotifyKMOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemIntRegisterPFNotifyKM_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemIntRegisterPFNotifyKMOUT->eError = + DevmemIntRegisterPFNotifyKM(psDevmemCtxInt, + psDevmemIntRegisterPFNotifyKMIN->ui32PID, + psDevmemIntRegisterPFNotifyKMIN->bRegister); + +DevmemIntRegisterPFNotifyKM_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeGetMaxDevMemSize(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psGetMaxDevMemSizeIN_UI8, + IMG_UINT8 * psGetMaxDevMemSizeOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE *psGetMaxDevMemSizeIN = + (PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE *) IMG_OFFSET_ADDR(psGetMaxDevMemSizeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE *psGetMaxDevMemSizeOUT = + (PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE *) IMG_OFFSET_ADDR(psGetMaxDevMemSizeOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psGetMaxDevMemSizeIN); + + psGetMaxDevMemSizeOUT->eError = + PVRSRVGetMaxDevMemSizeKM(psConnection, OSGetDevNode(psConnection), + &psGetMaxDevMemSizeOUT->uiLMASize, + &psGetMaxDevMemSizeOUT->uiUMASize); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDevmemGetFaultAddress(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDevmemGetFaultAddressIN_UI8, + IMG_UINT8 * psDevmemGetFaultAddressOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressIN = + (PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *) + IMG_OFFSET_ADDR(psDevmemGetFaultAddressIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressOUT = + (PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *) + IMG_OFFSET_ADDR(psDevmemGetFaultAddressOUT_UI8, 0); + + IMG_HANDLE hDevmemCtx = psDevmemGetFaultAddressIN->hDevmemCtx; + DEVMEMINT_CTX *psDevmemCtxInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psDevmemGetFaultAddressOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psDevmemCtxInt, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); + if (unlikely(psDevmemGetFaultAddressOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto DevmemGetFaultAddress_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psDevmemGetFaultAddressOUT->eError = + DevmemIntGetFaultAddress(psConnection, OSGetDevNode(psConnection), + psDevmemCtxInt, &psDevmemGetFaultAddressOUT->sFaultAddress); + +DevmemGetFaultAddress_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psDevmemCtxInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#if defined(PVRSRV_ENABLE_PROCESS_STATS) + +static IMG_INT +PVRSRVBridgePVRSRVUpdateOOMStats(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psPVRSRVUpdateOOMStatsIN_UI8, + IMG_UINT8 * psPVRSRVUpdateOOMStatsOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS *psPVRSRVUpdateOOMStatsIN = + (PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS *) IMG_OFFSET_ADDR(psPVRSRVUpdateOOMStatsIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS *psPVRSRVUpdateOOMStatsOUT = + (PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS *) + IMG_OFFSET_ADDR(psPVRSRVUpdateOOMStatsOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psPVRSRVUpdateOOMStatsOUT->eError = + PVRSRVServerUpdateOOMStats(psPVRSRVUpdateOOMStatsIN->ui32ui32StatType, + psPVRSRVUpdateOOMStatsIN->ui32pid); + + return 0; +} + +#else +#define PVRSRVBridgePVRSRVUpdateOOMStats NULL +#endif + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitMMBridge(void); +PVRSRV_ERROR DeinitMMBridge(void); + +/* + * Register all MM functions with services + */ +PVRSRV_ERROR InitMMBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR, + PVRSRVBridgePMRExportPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR, + PVRSRVBridgePMRUnexportPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID, PVRSRVBridgePMRGetUID, + NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE, + PVRSRVBridgePMRMakeLocalImportHandle, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE, + PVRSRVBridgePMRUnmakeLocalImportHandle, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR, + PVRSRVBridgePMRImportPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR, + PVRSRVBridgePMRLocalImportPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR, + PVRSRVBridgePMRUnrefPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR, + PVRSRVBridgePMRUnrefUnlockPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR, + PVRSRVBridgePhysmemNewRamBackedPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR, + PVRSRVBridgePhysmemNewRamBackedLockedPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPIN, + PVRSRVBridgeDevmemIntPin, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN, + PVRSRVBridgeDevmemIntUnpin, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE, + PVRSRVBridgeDevmemIntPinValidate, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE, + PVRSRVBridgeDevmemIntUnpinInvalidate, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE, + PVRSRVBridgeDevmemIntCtxCreate, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY, + PVRSRVBridgeDevmemIntCtxDestroy, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE, + PVRSRVBridgeDevmemIntHeapCreate, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY, + PVRSRVBridgeDevmemIntHeapDestroy, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR, + PVRSRVBridgeDevmemIntMapPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR, + PVRSRVBridgeDevmemIntUnmapPMR, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE, + PVRSRVBridgeDevmemIntReserveRange, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE, + PVRSRVBridgeDevmemIntUnreserveRange, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM, + PVRSRVBridgeChangeSparseMem, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES, + PVRSRVBridgeDevmemIntMapPages, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES, + PVRSRVBridgeDevmemIntUnmapPages, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID, + PVRSRVBridgeDevmemIsVDevAddrValid, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE, + PVRSRVBridgeDevmemFlushDevSLCRange, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE, + PVRSRVBridgeDevmemInvalidateFBSCTable, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT, + PVRSRVBridgeHeapCfgHeapConfigCount, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT, + PVRSRVBridgeHeapCfgHeapCount, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME, + PVRSRVBridgeHeapCfgHeapConfigName, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS, + PVRSRVBridgeHeapCfgHeapDetails, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM, + PVRSRVBridgeDevmemIntRegisterPFNotifyKM, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE, + PVRSRVBridgeGetMaxDevMemSize, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS, + PVRSRVBridgeDevmemGetFaultAddress, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS, + PVRSRVBridgePVRSRVUpdateOOMStats, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all mm functions with services + */ +PVRSRV_ERROR DeinitMMBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPIN); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_pvrtl_bridge.c b/drivers/gpu/drm/phytium/octopus/server_pvrtl_bridge.c new file mode 100644 index 000000000000..a62ada5a8537 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_pvrtl_bridge.c @@ -0,0 +1,785 @@ +/******************************************************************************* +@File +@Title Server bridge for pvrtl +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for pvrtl +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "tlserver.h" + +#include "common_pvrtl_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _TLOpenStreampsSDIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = TLServerCloseStreamKM((TL_STREAM_DESC *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLOpenStreamIN_UI8, + IMG_UINT8 * psTLOpenStreamOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLOPENSTREAM *psTLOpenStreamIN = + (PVRSRV_BRIDGE_IN_TLOPENSTREAM *) IMG_OFFSET_ADDR(psTLOpenStreamIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLOPENSTREAM *psTLOpenStreamOUT = + (PVRSRV_BRIDGE_OUT_TLOPENSTREAM *) IMG_OFFSET_ADDR(psTLOpenStreamOUT_UI8, 0); + + IMG_CHAR *uiNameInt = NULL; + TL_STREAM_DESC *psSDInt = NULL; + PMR *psTLPMRInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + 0; + + psTLOpenStreamOUT->hSD = NULL; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psTLOpenStreamIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psTLOpenStreamIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psTLOpenStreamOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto TLOpenStream_exit; + } + } + } + + { + uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiNameInt, (const void __user *)psTLOpenStreamIN->puiName, + PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psTLOpenStreamOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto TLOpenStream_exit; + } + ((IMG_CHAR *) uiNameInt)[(PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) - 1] = + '\0'; + } + + psTLOpenStreamOUT->eError = + TLServerOpenStreamKM(uiNameInt, psTLOpenStreamIN->ui32Mode, &psSDInt, &psTLPMRInt); + /* Exit early if bridged call fails */ + if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK)) + { + goto TLOpenStream_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psTLOpenStreamOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psTLOpenStreamOUT->hSD, + (void *)psSDInt, + PVRSRV_HANDLE_TYPE_PVR_TL_SD, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _TLOpenStreampsSDIntRelease); + if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLOpenStream_exit; + } + + psTLOpenStreamOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psTLOpenStreamOUT->hTLPMR, + (void *)psTLPMRInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psTLOpenStreamOUT->hSD); + if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLOpenStream_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +TLOpenStream_exit: + + if (psTLOpenStreamOUT->eError != PVRSRV_OK) + { + if (psTLOpenStreamOUT->hSD) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); + + eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) psTLOpenStreamOUT->hSD, + PVRSRV_HANDLE_TYPE_PVR_TL_SD); + if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); + + /* Avoid freeing/destroying/releasing the resource a second time below */ + psSDInt = NULL; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); + + } + + if (psSDInt) + { + TLServerCloseStreamKM(psSDInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLCloseStream(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLCloseStreamIN_UI8, + IMG_UINT8 * psTLCloseStreamOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLCLOSESTREAM *psTLCloseStreamIN = + (PVRSRV_BRIDGE_IN_TLCLOSESTREAM *) IMG_OFFSET_ADDR(psTLCloseStreamIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *psTLCloseStreamOUT = + (PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *) IMG_OFFSET_ADDR(psTLCloseStreamOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psTLCloseStreamOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psTLCloseStreamIN->hSD, + PVRSRV_HANDLE_TYPE_PVR_TL_SD); + if (unlikely((psTLCloseStreamOUT->eError != PVRSRV_OK) && + (psTLCloseStreamOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(psTLCloseStreamOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto TLCloseStream_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +TLCloseStream_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLAcquireData(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLAcquireDataIN_UI8, + IMG_UINT8 * psTLAcquireDataOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLACQUIREDATA *psTLAcquireDataIN = + (PVRSRV_BRIDGE_IN_TLACQUIREDATA *) IMG_OFFSET_ADDR(psTLAcquireDataIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLACQUIREDATA *psTLAcquireDataOUT = + (PVRSRV_BRIDGE_OUT_TLACQUIREDATA *) IMG_OFFSET_ADDR(psTLAcquireDataOUT_UI8, 0); + + IMG_HANDLE hSD = psTLAcquireDataIN->hSD; + TL_STREAM_DESC *psSDInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psTLAcquireDataOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSDInt, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); + if (unlikely(psTLAcquireDataOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLAcquireData_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psTLAcquireDataOUT->eError = + TLServerAcquireDataKM(psSDInt, + &psTLAcquireDataOUT->ui32ReadOffset, + &psTLAcquireDataOUT->ui32ReadLen); + +TLAcquireData_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSDInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLReleaseData(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLReleaseDataIN_UI8, + IMG_UINT8 * psTLReleaseDataOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLRELEASEDATA *psTLReleaseDataIN = + (PVRSRV_BRIDGE_IN_TLRELEASEDATA *) IMG_OFFSET_ADDR(psTLReleaseDataIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLRELEASEDATA *psTLReleaseDataOUT = + (PVRSRV_BRIDGE_OUT_TLRELEASEDATA *) IMG_OFFSET_ADDR(psTLReleaseDataOUT_UI8, 0); + + IMG_HANDLE hSD = psTLReleaseDataIN->hSD; + TL_STREAM_DESC *psSDInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psTLReleaseDataOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSDInt, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); + if (unlikely(psTLReleaseDataOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLReleaseData_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psTLReleaseDataOUT->eError = + TLServerReleaseDataKM(psSDInt, + psTLReleaseDataIN->ui32ReadOffset, + psTLReleaseDataIN->ui32ReadLen); + +TLReleaseData_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSDInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLDiscoverStreamsIN_UI8, + IMG_UINT8 * psTLDiscoverStreamsOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *psTLDiscoverStreamsIN = + (PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *) IMG_OFFSET_ADDR(psTLDiscoverStreamsIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *psTLDiscoverStreamsOUT = + (PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *) IMG_OFFSET_ADDR(psTLDiscoverStreamsOUT_UI8, 0); + + IMG_CHAR *uiNamePatternInt = NULL; + IMG_CHAR *puiStreamsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + + (psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) + 0; + + if (psTLDiscoverStreamsIN->ui32Size > PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER) + { + psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto TLDiscoverStreams_exit; + } + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psTLDiscoverStreamsOUT->puiStreams = psTLDiscoverStreamsIN->puiStreams; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psTLDiscoverStreamsIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psTLDiscoverStreamsIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto TLDiscoverStreams_exit; + } + } + } + + { + uiNamePatternInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiNamePatternInt, + (const void __user *)psTLDiscoverStreamsIN->puiNamePattern, + PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto TLDiscoverStreams_exit; + } + ((IMG_CHAR *) uiNamePatternInt)[(PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + if (psTLDiscoverStreamsIN->ui32Size != 0) + { + puiStreamsInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR); + } + + psTLDiscoverStreamsOUT->eError = + TLServerDiscoverStreamsKM(uiNamePatternInt, + psTLDiscoverStreamsIN->ui32Size, + puiStreamsInt, &psTLDiscoverStreamsOUT->ui32NumFound); + + /* If dest ptr is non-null and we have data to copy */ + if ((puiStreamsInt) && ((psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psTLDiscoverStreamsOUT->puiStreams, puiStreamsInt, + (psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR))) != PVRSRV_OK)) + { + psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto TLDiscoverStreams_exit; + } + } + +TLDiscoverStreams_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLReserveStream(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLReserveStreamIN_UI8, + IMG_UINT8 * psTLReserveStreamOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLRESERVESTREAM *psTLReserveStreamIN = + (PVRSRV_BRIDGE_IN_TLRESERVESTREAM *) IMG_OFFSET_ADDR(psTLReserveStreamIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *psTLReserveStreamOUT = + (PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *) IMG_OFFSET_ADDR(psTLReserveStreamOUT_UI8, 0); + + IMG_HANDLE hSD = psTLReserveStreamIN->hSD; + TL_STREAM_DESC *psSDInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psTLReserveStreamOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSDInt, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); + if (unlikely(psTLReserveStreamOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLReserveStream_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psTLReserveStreamOUT->eError = + TLServerReserveStreamKM(psSDInt, + &psTLReserveStreamOUT->ui32BufferOffset, + psTLReserveStreamIN->ui32Size, + psTLReserveStreamIN->ui32SizeMin, + &psTLReserveStreamOUT->ui32Available); + +TLReserveStream_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSDInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLCommitStream(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLCommitStreamIN_UI8, + IMG_UINT8 * psTLCommitStreamOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLCOMMITSTREAM *psTLCommitStreamIN = + (PVRSRV_BRIDGE_IN_TLCOMMITSTREAM *) IMG_OFFSET_ADDR(psTLCommitStreamIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *psTLCommitStreamOUT = + (PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *) IMG_OFFSET_ADDR(psTLCommitStreamOUT_UI8, 0); + + IMG_HANDLE hSD = psTLCommitStreamIN->hSD; + TL_STREAM_DESC *psSDInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psTLCommitStreamOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSDInt, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); + if (unlikely(psTLCommitStreamOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLCommitStream_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psTLCommitStreamOUT->eError = + TLServerCommitStreamKM(psSDInt, psTLCommitStreamIN->ui32ReqSize); + +TLCommitStream_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSDInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psTLWriteDataIN_UI8, + IMG_UINT8 * psTLWriteDataOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_TLWRITEDATA *psTLWriteDataIN = + (PVRSRV_BRIDGE_IN_TLWRITEDATA *) IMG_OFFSET_ADDR(psTLWriteDataIN_UI8, 0); + PVRSRV_BRIDGE_OUT_TLWRITEDATA *psTLWriteDataOUT = + (PVRSRV_BRIDGE_OUT_TLWRITEDATA *) IMG_OFFSET_ADDR(psTLWriteDataOUT_UI8, 0); + + IMG_HANDLE hSD = psTLWriteDataIN->hSD; + TL_STREAM_DESC *psSDInt = NULL; + IMG_BYTE *ui8DataInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = (psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) + 0; + + if (unlikely(psTLWriteDataIN->ui32Size > PVRSRVTL_MAX_PACKET_SIZE)) + { + psTLWriteDataOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto TLWriteData_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psTLWriteDataIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psTLWriteDataIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psTLWriteDataOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto TLWriteData_exit; + } + } + } + + if (psTLWriteDataIN->ui32Size != 0) + { + ui8DataInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui8DataInt, (const void __user *)psTLWriteDataIN->pui8Data, + psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psTLWriteDataOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto TLWriteData_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psTLWriteDataOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSDInt, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); + if (unlikely(psTLWriteDataOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto TLWriteData_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psTLWriteDataOUT->eError = + TLServerWriteDataKM(psSDInt, psTLWriteDataIN->ui32Size, ui8DataInt); + +TLWriteData_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSDInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitPVRTLBridge(void); +PVRSRV_ERROR DeinitPVRTLBridge(void); + +/* + * Register all PVRTL functions with services + */ +PVRSRV_ERROR InitPVRTLBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM, + PVRSRVBridgeTLOpenStream, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM, + PVRSRVBridgeTLCloseStream, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA, + PVRSRVBridgeTLAcquireData, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA, + PVRSRVBridgeTLReleaseData, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS, + PVRSRVBridgeTLDiscoverStreams, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM, + PVRSRVBridgeTLReserveStream, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM, + PVRSRVBridgeTLCommitStream, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLWRITEDATA, + PVRSRVBridgeTLWriteData, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all pvrtl functions with services + */ +PVRSRV_ERROR DeinitPVRTLBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLWRITEDATA); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_rgxbreakpoint_bridge.c b/drivers/gpu/drm/phytium/octopus/server_rgxbreakpoint_bridge.c new file mode 100644 index 000000000000..a394b8ca911b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_rgxbreakpoint_bridge.c @@ -0,0 +1,372 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxbreakpoint +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxbreakpoint +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxbreakpoint.h" + +#include "common_rgxbreakpoint_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#if !defined(EXCLUDE_RGXBREAKPOINT_BRIDGE) + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRGXSetBreakpoint(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetBreakpointIN_UI8, + IMG_UINT8 * psRGXSetBreakpointOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT *psRGXSetBreakpointIN = + (PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT *) IMG_OFFSET_ADDR(psRGXSetBreakpointIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT *psRGXSetBreakpointOUT = + (PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT *) IMG_OFFSET_ADDR(psRGXSetBreakpointOUT_UI8, 0); + + IMG_HANDLE hPrivData = psRGXSetBreakpointIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetBreakpointOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); + if (unlikely(psRGXSetBreakpointOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetBreakpoint_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetBreakpointOUT->eError = + PVRSRVRGXSetBreakpointKM(psConnection, OSGetDevNode(psConnection), + hPrivDataInt, + psRGXSetBreakpointIN->eFWDataMaster, + psRGXSetBreakpointIN->ui32BreakpointAddr, + psRGXSetBreakpointIN->ui32HandlerAddr, + psRGXSetBreakpointIN->ui32DM); + +RGXSetBreakpoint_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXClearBreakpoint(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXClearBreakpointIN_UI8, + IMG_UINT8 * psRGXClearBreakpointOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT *psRGXClearBreakpointIN = + (PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT *) IMG_OFFSET_ADDR(psRGXClearBreakpointIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT *psRGXClearBreakpointOUT = + (PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT *) IMG_OFFSET_ADDR(psRGXClearBreakpointOUT_UI8, + 0); + + IMG_HANDLE hPrivData = psRGXClearBreakpointIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXClearBreakpointOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); + if (unlikely(psRGXClearBreakpointOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXClearBreakpoint_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXClearBreakpointOUT->eError = + PVRSRVRGXClearBreakpointKM(psConnection, OSGetDevNode(psConnection), hPrivDataInt); + +RGXClearBreakpoint_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXEnableBreakpoint(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXEnableBreakpointIN_UI8, + IMG_UINT8 * psRGXEnableBreakpointOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT *psRGXEnableBreakpointIN = + (PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT *) IMG_OFFSET_ADDR(psRGXEnableBreakpointIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT *psRGXEnableBreakpointOUT = + (PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT *) IMG_OFFSET_ADDR(psRGXEnableBreakpointOUT_UI8, + 0); + + IMG_HANDLE hPrivData = psRGXEnableBreakpointIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXEnableBreakpointOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); + if (unlikely(psRGXEnableBreakpointOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXEnableBreakpoint_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXEnableBreakpointOUT->eError = + PVRSRVRGXEnableBreakpointKM(psConnection, OSGetDevNode(psConnection), hPrivDataInt); + +RGXEnableBreakpoint_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDisableBreakpoint(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDisableBreakpointIN_UI8, + IMG_UINT8 * psRGXDisableBreakpointOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointIN = + (PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT *) IMG_OFFSET_ADDR(psRGXDisableBreakpointIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointOUT = + (PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT *) + IMG_OFFSET_ADDR(psRGXDisableBreakpointOUT_UI8, 0); + + IMG_HANDLE hPrivData = psRGXDisableBreakpointIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXDisableBreakpointOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); + if (unlikely(psRGXDisableBreakpointOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXDisableBreakpoint_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXDisableBreakpointOUT->eError = + PVRSRVRGXDisableBreakpointKM(psConnection, OSGetDevNode(psConnection), hPrivDataInt); + +RGXDisableBreakpoint_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXOverallocateBPRegisters(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXOverallocateBPRegistersIN_UI8, + IMG_UINT8 * psRGXOverallocateBPRegistersOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS *psRGXOverallocateBPRegistersIN = + (PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS *) + IMG_OFFSET_ADDR(psRGXOverallocateBPRegistersIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS *psRGXOverallocateBPRegistersOUT = + (PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS *) + IMG_OFFSET_ADDR(psRGXOverallocateBPRegistersOUT_UI8, 0); + + psRGXOverallocateBPRegistersOUT->eError = + PVRSRVRGXOverallocateBPRegistersKM(psConnection, OSGetDevNode(psConnection), + psRGXOverallocateBPRegistersIN->ui32TempRegs, + psRGXOverallocateBPRegistersIN->ui32SharedRegs); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +#endif /* EXCLUDE_RGXBREAKPOINT_BRIDGE */ + +#if !defined(EXCLUDE_RGXBREAKPOINT_BRIDGE) +PVRSRV_ERROR InitRGXBREAKPOINTBridge(void); +PVRSRV_ERROR DeinitRGXBREAKPOINTBridge(void); + +/* + * Register all RGXBREAKPOINT functions with services + */ +PVRSRV_ERROR InitRGXBREAKPOINTBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT, + PVRSRVBridgeRGXSetBreakpoint, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT, + PVRSRVBridgeRGXClearBreakpoint, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT, + PVRSRVBridgeRGXEnableBreakpoint, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT, + PVRSRVBridgeRGXDisableBreakpoint, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS, + PVRSRVBridgeRGXOverallocateBPRegisters, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxbreakpoint functions with services + */ +PVRSRV_ERROR DeinitRGXBREAKPOINTBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, + PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS); + + return PVRSRV_OK; +} +#else /* EXCLUDE_RGXBREAKPOINT_BRIDGE */ +/* This bridge is conditional on EXCLUDE_RGXBREAKPOINT_BRIDGE - when defined, + * do not populate the dispatch table with its functions + */ +#define InitRGXBREAKPOINTBridge() \ + PVRSRV_OK + +#define DeinitRGXBREAKPOINTBridge() \ + PVRSRV_OK + +#endif /* EXCLUDE_RGXBREAKPOINT_BRIDGE */ diff --git a/drivers/gpu/drm/phytium/octopus/server_rgxcmp_bridge.c b/drivers/gpu/drm/phytium/octopus/server_rgxcmp_bridge.c new file mode 100644 index 000000000000..653395493a8c --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_rgxcmp_bridge.c @@ -0,0 +1,1013 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxcmp +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxcmp +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxcompute.h" + +#include "common_rgxcmp_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _RGXCreateComputeContextpsComputeContextIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXDestroyComputeContextKM((RGX_SERVER_COMPUTE_CONTEXT *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateComputeContextIN_UI8, + IMG_UINT8 * psRGXCreateComputeContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextIN = + (PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateComputeContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateComputeContextOUT_UI8, 0); + + IMG_BYTE *ui8FrameworkCmdInt = NULL; + IMG_HANDLE hPrivData = psRGXCreateComputeContextIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + IMG_BYTE *ui8StaticComputeContextStateInt = NULL; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXCreateComputeContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) + + (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * sizeof(IMG_BYTE)) + 0; + + if (unlikely(psRGXCreateComputeContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE)) + { + psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCreateComputeContext_exit; + } + + if (unlikely + (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize > + RGXFWIF_STATIC_COMPUTECONTEXT_SIZE)) + { + psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCreateComputeContext_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXCreateComputeContextIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateComputeContextIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateComputeContext_exit; + } + } + } + + if (psRGXCreateComputeContextIN->ui32FrameworkCmdSize != 0) + { + ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXCreateComputeContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXCreateComputeContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui8FrameworkCmdInt, + (const void __user *)psRGXCreateComputeContextIN->pui8FrameworkCmd, + psRGXCreateComputeContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) + { + psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateComputeContext_exit; + } + } + if (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize != 0) + { + ui8StaticComputeContextStateInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * + sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui8StaticComputeContextStateInt, + (const void __user *)psRGXCreateComputeContextIN-> + pui8StaticComputeContextState, + psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * + sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateComputeContext_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateComputeContextOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); + if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateComputeContext_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateComputeContextOUT->eError = + PVRSRVRGXCreateComputeContextKM(psConnection, OSGetDevNode(psConnection), + psRGXCreateComputeContextIN->ui32Priority, + psRGXCreateComputeContextIN->ui32FrameworkCmdSize, + ui8FrameworkCmdInt, + hPrivDataInt, + psRGXCreateComputeContextIN-> + ui32StaticComputeContextStateSize, + ui8StaticComputeContextStateInt, + psRGXCreateComputeContextIN->ui32PackedCCBSizeU88, + psRGXCreateComputeContextIN->ui32ContextFlags, + psRGXCreateComputeContextIN->ui64RobustnessAddress, + psRGXCreateComputeContextIN->ui32MaxDeadlineMS, + &psComputeContextInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)) + { + goto RGXCreateComputeContext_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateComputeContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateComputeContextOUT-> + hComputeContext, + (void *) + psComputeContextInt, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateComputeContextpsComputeContextIntRelease); + if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateComputeContext_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateComputeContext_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK) + { + if (psComputeContextInt) + { + PVRSRVRGXDestroyComputeContextKM(psComputeContextInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyComputeContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyComputeContextIN_UI8, + IMG_UINT8 * psRGXDestroyComputeContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyComputeContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyComputeContextOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyComputeContextOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psRGXDestroyComputeContextIN-> + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + if (unlikely + ((psRGXDestroyComputeContextOUT->eError != PVRSRV_OK) + && (psRGXDestroyComputeContextOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXDestroyComputeContextOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyComputeContext_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyComputeContext_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFlushComputeData(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFlushComputeDataIN_UI8, + IMG_UINT8 * psRGXFlushComputeDataOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataIN = + (PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *) IMG_OFFSET_ADDR(psRGXFlushComputeDataIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataOUT = + (PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *) IMG_OFFSET_ADDR(psRGXFlushComputeDataOUT_UI8, + 0); + + IMG_HANDLE hComputeContext = psRGXFlushComputeDataIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXFlushComputeDataOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); + if (unlikely(psRGXFlushComputeDataOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXFlushComputeData_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXFlushComputeDataOUT->eError = PVRSRVRGXFlushComputeDataKM(psComputeContextInt); + +RGXFlushComputeData_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetComputeContextPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetComputeContextPriorityIN_UI8, + IMG_UINT8 * psRGXSetComputeContextPriorityOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityIN = + (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetComputeContextPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetComputeContextPriorityOUT_UI8, 0); + + IMG_HANDLE hComputeContext = psRGXSetComputeContextPriorityIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetComputeContextPriorityOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSetComputeContextPriorityOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetComputeContextPriority_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetComputeContextPriorityOUT->eError = + PVRSRVRGXSetComputeContextPriorityKM(psConnection, OSGetDevNode(psConnection), + psComputeContextInt, + psRGXSetComputeContextPriorityIN->ui32Priority); + +RGXSetComputeContextPriority_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXNotifyComputeWriteOffsetUpdateIN_UI8, + IMG_UINT8 * + psRGXNotifyComputeWriteOffsetUpdateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *psRGXNotifyComputeWriteOffsetUpdateIN = + (PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *) + IMG_OFFSET_ADDR(psRGXNotifyComputeWriteOffsetUpdateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *psRGXNotifyComputeWriteOffsetUpdateOUT + = + (PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *) + IMG_OFFSET_ADDR(psRGXNotifyComputeWriteOffsetUpdateOUT_UI8, 0); + + IMG_HANDLE hComputeContext = psRGXNotifyComputeWriteOffsetUpdateIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXNotifyComputeWriteOffsetUpdateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); + if (unlikely(psRGXNotifyComputeWriteOffsetUpdateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXNotifyComputeWriteOffsetUpdate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXNotifyComputeWriteOffsetUpdateOUT->eError = + PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(psComputeContextInt); + +RGXNotifyComputeWriteOffsetUpdate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXKickCDM2IN_UI8, + IMG_UINT8 * psRGXKickCDM2OUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXKICKCDM2 *psRGXKickCDM2IN = + (PVRSRV_BRIDGE_IN_RGXKICKCDM2 *) IMG_OFFSET_ADDR(psRGXKickCDM2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *psRGXKickCDM2OUT = + (PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *) IMG_OFFSET_ADDR(psRGXKickCDM2OUT_UI8, 0); + + IMG_HANDLE hComputeContext = psRGXKickCDM2IN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClientUpdateUFOSyncPrimBlockInt = NULL; + IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32ClientUpdateOffsetInt = NULL; + IMG_UINT32 *ui32ClientUpdateValueInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_BYTE *ui8DMCmdInt = NULL; + IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; + PMR **psSyncPMRsInt = NULL; + IMG_HANDLE *hSyncPMRsInt2 = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + + (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + (psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) + + (psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + + (psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *)) + + (psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; + + if (unlikely(psRGXKickCDM2IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickCDM2_exit; + } + + if (unlikely(psRGXKickCDM2IN->ui32CmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickCDM2_exit; + } + + if (unlikely(psRGXKickCDM2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickCDM2_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXKickCDM2IN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickCDM2IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXKickCDM2_exit; + } + } + } + + if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) + { + psClientUpdateUFOSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); + hClientUpdateUFOSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClientUpdateUFOSyncPrimBlockInt2, + (const void __user *)psRGXKickCDM2IN->phClientUpdateUFOSyncPrimBlock, + psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickCDM2_exit; + } + } + if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) + { + ui32ClientUpdateOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientUpdateOffsetInt, + (const void __user *)psRGXKickCDM2IN->pui32ClientUpdateOffset, + psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickCDM2_exit; + } + } + if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) + { + ui32ClientUpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientUpdateValueInt, + (const void __user *)psRGXKickCDM2IN->pui32ClientUpdateValue, + psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickCDM2_exit; + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXKickCDM2IN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickCDM2_exit; + } + ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + if (psRGXKickCDM2IN->ui32CmdSize != 0) + { + ui8DMCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui8DMCmdInt, (const void __user *)psRGXKickCDM2IN->pui8DMCmd, + psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickCDM2_exit; + } + } + if (psRGXKickCDM2IN->ui32SyncPMRCount != 0) + { + ui32SyncPMRFlagsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32SyncPMRFlagsInt, + (const void __user *)psRGXKickCDM2IN->pui32SyncPMRFlags, + psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickCDM2_exit; + } + } + if (psRGXKickCDM2IN->ui32SyncPMRCount != 0) + { + psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *); + hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickCDM2IN->phSyncPMRs, + psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickCDM2_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXKickCDM2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); + if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickCDM2_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXKickCDM2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **) + &psClientUpdateUFOSyncPrimBlockInt[i], + hClientUpdateUFOSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickCDM2_exit; + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickCDM2IN->ui32SyncPMRCount; i++) + { + /* Look up the address from the handle */ + psRGXKickCDM2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncPMRsInt[i], + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickCDM2_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXKickCDM2OUT->eError = + PVRSRVRGXKickCDMKM(psComputeContextInt, + psRGXKickCDM2IN->ui32ClientCacheOpSeqNum, + psRGXKickCDM2IN->ui32ClientUpdateCount, + psClientUpdateUFOSyncPrimBlockInt, + ui32ClientUpdateOffsetInt, + ui32ClientUpdateValueInt, + psRGXKickCDM2IN->hCheckFenceFd, + psRGXKickCDM2IN->hUpdateTimeline, + &psRGXKickCDM2OUT->hUpdateFence, + uiUpdateFenceNameInt, + psRGXKickCDM2IN->ui32CmdSize, + ui8DMCmdInt, + psRGXKickCDM2IN->ui32PDumpFlags, + psRGXKickCDM2IN->ui32ExtJobRef, + psRGXKickCDM2IN->ui32SyncPMRCount, + ui32SyncPMRFlagsInt, + psSyncPMRsInt, + psRGXKickCDM2IN->ui32NumOfWorkgroups, + psRGXKickCDM2IN->ui32NumOfWorkitems, + psRGXKickCDM2IN->ui64DeadlineInus); + +RGXKickCDM2_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + + if (hClientUpdateUFOSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hClientUpdateUFOSyncPrimBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hClientUpdateUFOSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + if (hSyncPMRsInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickCDM2IN->ui32SyncPMRCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hSyncPMRsInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetComputeContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetComputeContextPropertyIN_UI8, + IMG_UINT8 * psRGXSetComputeContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyOUT_UI8, 0); + + IMG_HANDLE hComputeContext = psRGXSetComputeContextPropertyIN->hComputeContext; + RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetComputeContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psComputeContextInt, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSetComputeContextPropertyOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetComputeContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetComputeContextPropertyOUT->eError = + PVRSRVRGXSetComputeContextPropertyKM(psComputeContextInt, + psRGXSetComputeContextPropertyIN->ui32Property, + psRGXSetComputeContextPropertyIN->ui64Input, + &psRGXSetComputeContextPropertyOUT->ui64Output); + +RGXSetComputeContextProperty_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psComputeContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hComputeContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXGetLastDeviceError(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXGetLastDeviceErrorIN_UI8, + IMG_UINT8 * psRGXGetLastDeviceErrorOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorIN = + (PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *) + IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorOUT = + (PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *) + IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXGetLastDeviceErrorIN); + + psRGXGetLastDeviceErrorOUT->eError = + PVRSRVRGXGetLastDeviceErrorKM(psConnection, OSGetDevNode(psConnection), + &psRGXGetLastDeviceErrorOUT->ui32Error); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXCMPBridge(void); +PVRSRV_ERROR DeinitRGXCMPBridge(void); + +/* + * Register all RGXCMP functions with services + */ +PVRSRV_ERROR InitRGXCMPBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT, + PVRSRVBridgeRGXCreateComputeContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT, + PVRSRVBridgeRGXDestroyComputeContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA, + PVRSRVBridgeRGXFlushComputeData, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY, + PVRSRVBridgeRGXSetComputeContextPriority, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE, + PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2, + PVRSRVBridgeRGXKickCDM2, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY, + PVRSRVBridgeRGXSetComputeContextProperty, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR, + PVRSRVBridgeRGXGetLastDeviceError, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxcmp functions with services + */ +PVRSRV_ERROR DeinitRGXCMPBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, + PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_rgxfwdbg_bridge.c b/drivers/gpu/drm/phytium/octopus/server_rgxfwdbg_bridge.c new file mode 100644 index 000000000000..1f90d7a3c1f0 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_rgxfwdbg_bridge.c @@ -0,0 +1,306 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxfwdbg +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxfwdbg +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "devicemem_server.h" +#include "rgxfwdbg.h" +#include "pmr.h" +#include "rgxtimecorr.h" + +#include "common_rgxfwdbg_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetFWLog(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugSetFWLogIN_UI8, + IMG_UINT8 * psRGXFWDebugSetFWLogOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG *psRGXFWDebugSetFWLogIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG *) IMG_OFFSET_ADDR(psRGXFWDebugSetFWLogIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG *psRGXFWDebugSetFWLogOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG *) IMG_OFFSET_ADDR(psRGXFWDebugSetFWLogOUT_UI8, + 0); + + psRGXFWDebugSetFWLogOUT->eError = + PVRSRVRGXFWDebugSetFWLogKM(psConnection, OSGetDevNode(psConnection), + psRGXFWDebugSetFWLogIN->ui32RGXFWLogType); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugDumpFreelistPageList(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugDumpFreelistPageListIN_UI8, + IMG_UINT8 * psRGXFWDebugDumpFreelistPageListOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST *psRGXFWDebugDumpFreelistPageListIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST *) + IMG_OFFSET_ADDR(psRGXFWDebugDumpFreelistPageListIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST *psRGXFWDebugDumpFreelistPageListOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST *) + IMG_OFFSET_ADDR(psRGXFWDebugDumpFreelistPageListOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXFWDebugDumpFreelistPageListIN); + + psRGXFWDebugDumpFreelistPageListOUT->eError = + PVRSRVRGXFWDebugDumpFreelistPageListKM(psConnection, OSGetDevNode(psConnection)); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetHCSDeadline(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugSetHCSDeadlineIN_UI8, + IMG_UINT8 * psRGXFWDebugSetHCSDeadlineOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE *psRGXFWDebugSetHCSDeadlineIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE *) + IMG_OFFSET_ADDR(psRGXFWDebugSetHCSDeadlineIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE *psRGXFWDebugSetHCSDeadlineOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE *) + IMG_OFFSET_ADDR(psRGXFWDebugSetHCSDeadlineOUT_UI8, 0); + + psRGXFWDebugSetHCSDeadlineOUT->eError = + PVRSRVRGXFWDebugSetHCSDeadlineKM(psConnection, OSGetDevNode(psConnection), + psRGXFWDebugSetHCSDeadlineIN->ui32RGXHCSDeadline); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetOSidPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugSetOSidPriorityIN_UI8, + IMG_UINT8 * psRGXFWDebugSetOSidPriorityOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY *psRGXFWDebugSetOSidPriorityIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY *) + IMG_OFFSET_ADDR(psRGXFWDebugSetOSidPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY *psRGXFWDebugSetOSidPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY *) + IMG_OFFSET_ADDR(psRGXFWDebugSetOSidPriorityOUT_UI8, 0); + + psRGXFWDebugSetOSidPriorityOUT->eError = + PVRSRVRGXFWDebugSetOSidPriorityKM(psConnection, OSGetDevNode(psConnection), + psRGXFWDebugSetOSidPriorityIN->ui32OSid, + psRGXFWDebugSetOSidPriorityIN->ui32Priority); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugSetOSNewOnlineState(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugSetOSNewOnlineStateIN_UI8, + IMG_UINT8 * psRGXFWDebugSetOSNewOnlineStateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE *psRGXFWDebugSetOSNewOnlineStateIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE *) + IMG_OFFSET_ADDR(psRGXFWDebugSetOSNewOnlineStateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE *psRGXFWDebugSetOSNewOnlineStateOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE *) + IMG_OFFSET_ADDR(psRGXFWDebugSetOSNewOnlineStateOUT_UI8, 0); + + psRGXFWDebugSetOSNewOnlineStateOUT->eError = + PVRSRVRGXFWDebugSetOSNewOnlineStateKM(psConnection, OSGetDevNode(psConnection), + psRGXFWDebugSetOSNewOnlineStateIN->ui32OSid, + psRGXFWDebugSetOSNewOnlineStateIN-> + ui32OSNewState); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugPHRConfigure(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugPHRConfigureIN_UI8, + IMG_UINT8 * psRGXFWDebugPHRConfigureOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE *psRGXFWDebugPHRConfigureIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE *) + IMG_OFFSET_ADDR(psRGXFWDebugPHRConfigureIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE *psRGXFWDebugPHRConfigureOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE *) + IMG_OFFSET_ADDR(psRGXFWDebugPHRConfigureOUT_UI8, 0); + + psRGXFWDebugPHRConfigureOUT->eError = + PVRSRVRGXFWDebugPHRConfigureKM(psConnection, OSGetDevNode(psConnection), + psRGXFWDebugPHRConfigureIN->ui32ui32PHRMode); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXFWDebugWdgConfigure(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXFWDebugWdgConfigureIN_UI8, + IMG_UINT8 * psRGXFWDebugWdgConfigureOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE *psRGXFWDebugWdgConfigureIN = + (PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE *) + IMG_OFFSET_ADDR(psRGXFWDebugWdgConfigureIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE *psRGXFWDebugWdgConfigureOUT = + (PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE *) + IMG_OFFSET_ADDR(psRGXFWDebugWdgConfigureOUT_UI8, 0); + + psRGXFWDebugWdgConfigureOUT->eError = + PVRSRVRGXFWDebugWdgConfigureKM(psConnection, OSGetDevNode(psConnection), + psRGXFWDebugWdgConfigureIN->ui32ui32WdgPeriodUs); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXCurrentTime(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCurrentTimeIN_UI8, + IMG_UINT8 * psRGXCurrentTimeOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCURRENTTIME *psRGXCurrentTimeIN = + (PVRSRV_BRIDGE_IN_RGXCURRENTTIME *) IMG_OFFSET_ADDR(psRGXCurrentTimeIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *psRGXCurrentTimeOUT = + (PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *) IMG_OFFSET_ADDR(psRGXCurrentTimeOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXCurrentTimeIN); + + psRGXCurrentTimeOUT->eError = + PVRSRVRGXCurrentTime(psConnection, OSGetDevNode(psConnection), + &psRGXCurrentTimeOUT->ui64Time); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXFWDBGBridge(void); +PVRSRV_ERROR DeinitRGXFWDBGBridge(void); + +/* + * Register all RGXFWDBG functions with services + */ +PVRSRV_ERROR InitRGXFWDBGBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG, + PVRSRVBridgeRGXFWDebugSetFWLog, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST, + PVRSRVBridgeRGXFWDebugDumpFreelistPageList, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE, + PVRSRVBridgeRGXFWDebugSetHCSDeadline, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY, + PVRSRVBridgeRGXFWDebugSetOSidPriority, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE, + PVRSRVBridgeRGXFWDebugSetOSNewOnlineState, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE, + PVRSRVBridgeRGXFWDebugPHRConfigure, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE, + PVRSRVBridgeRGXFWDebugWdgConfigure, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME, + PVRSRVBridgeRGXCurrentTime, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxfwdbg functions with services + */ +PVRSRV_ERROR DeinitRGXFWDBGBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, + PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_rgxhwperf_bridge.c b/drivers/gpu/drm/phytium/octopus/server_rgxhwperf_bridge.c new file mode 100644 index 000000000000..56830b4e4707 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_rgxhwperf_bridge.c @@ -0,0 +1,363 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxhwperf +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxhwperf +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxhwperf.h" +#include "rgx_fwif_km.h" + +#include "common_rgxhwperf_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRGXCtrlHWPerf(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCtrlHWPerfIN_UI8, + IMG_UINT8 * psRGXCtrlHWPerfOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *psRGXCtrlHWPerfIN = + (PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *) IMG_OFFSET_ADDR(psRGXCtrlHWPerfIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *psRGXCtrlHWPerfOUT = + (PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *) IMG_OFFSET_ADDR(psRGXCtrlHWPerfOUT_UI8, 0); + + psRGXCtrlHWPerfOUT->eError = + PVRSRVRGXCtrlHWPerfKM(psConnection, OSGetDevNode(psConnection), + psRGXCtrlHWPerfIN->ui32StreamId, + psRGXCtrlHWPerfIN->bToggle, psRGXCtrlHWPerfIN->ui64Mask); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXConfigureHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXConfigureHWPerfBlocksIN_UI8, + IMG_UINT8 * psRGXConfigureHWPerfBlocksOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS *psRGXConfigureHWPerfBlocksIN = + (PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS *) + IMG_OFFSET_ADDR(psRGXConfigureHWPerfBlocksIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS *psRGXConfigureHWPerfBlocksOUT = + (PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS *) + IMG_OFFSET_ADDR(psRGXConfigureHWPerfBlocksOUT_UI8, 0); + + RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) + 0; + + if (unlikely(psRGXConfigureHWPerfBlocksIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX + 3)) + { + psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXConfigureHWPerfBlocks_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXConfigureHWPerfBlocksIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXConfigureHWPerfBlocksIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXConfigureHWPerfBlocks_exit; + } + } + } + + if (psRGXConfigureHWPerfBlocksIN->ui32ArrayLen != 0) + { + psBlockConfigsInt = + (RGX_HWPERF_CONFIG_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK); + } + + /* Copy the data over */ + if (psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK) > 0) + { + if (OSCopyFromUser + (NULL, psBlockConfigsInt, + (const void __user *)psRGXConfigureHWPerfBlocksIN->psBlockConfigs, + psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * + sizeof(RGX_HWPERF_CONFIG_CNTBLK)) != PVRSRV_OK) + { + psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXConfigureHWPerfBlocks_exit; + } + } + + psRGXConfigureHWPerfBlocksOUT->eError = + PVRSRVRGXConfigureHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection), + psRGXConfigureHWPerfBlocksIN->ui32CtrlWord, + psRGXConfigureHWPerfBlocksIN->ui32ArrayLen, + psBlockConfigsInt); + +RGXConfigureHWPerfBlocks_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXGetHWPerfBvncFeatureFlagsIN_UI8, + IMG_UINT8 * psRGXGetHWPerfBvncFeatureFlagsOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS *psRGXGetHWPerfBvncFeatureFlagsIN = + (PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS *) + IMG_OFFSET_ADDR(psRGXGetHWPerfBvncFeatureFlagsIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS *psRGXGetHWPerfBvncFeatureFlagsOUT = + (PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS *) + IMG_OFFSET_ADDR(psRGXGetHWPerfBvncFeatureFlagsOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXGetHWPerfBvncFeatureFlagsIN); + + psRGXGetHWPerfBvncFeatureFlagsOUT->eError = + PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(psConnection, OSGetDevNode(psConnection), + &psRGXGetHWPerfBvncFeatureFlagsOUT->sBVNC); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXControlHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXControlHWPerfBlocksIN_UI8, + IMG_UINT8 * psRGXControlHWPerfBlocksOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS *psRGXControlHWPerfBlocksIN = + (PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS *) + IMG_OFFSET_ADDR(psRGXControlHWPerfBlocksIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS *psRGXControlHWPerfBlocksOUT = + (PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS *) + IMG_OFFSET_ADDR(psRGXControlHWPerfBlocksOUT_UI8, 0); + + IMG_UINT16 *ui16BlockIDsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16)) + 0; + + if (unlikely(psRGXControlHWPerfBlocksIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX)) + { + psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXControlHWPerfBlocks_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXControlHWPerfBlocksIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXControlHWPerfBlocksIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXControlHWPerfBlocks_exit; + } + } + } + + if (psRGXControlHWPerfBlocksIN->ui32ArrayLen != 0) + { + ui16BlockIDsInt = (IMG_UINT16 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16); + } + + /* Copy the data over */ + if (psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16) > 0) + { + if (OSCopyFromUser + (NULL, ui16BlockIDsInt, + (const void __user *)psRGXControlHWPerfBlocksIN->pui16BlockIDs, + psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16)) != PVRSRV_OK) + { + psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXControlHWPerfBlocks_exit; + } + } + + psRGXControlHWPerfBlocksOUT->eError = + PVRSRVRGXControlHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection), + psRGXControlHWPerfBlocksIN->bEnable, + psRGXControlHWPerfBlocksIN->ui32ArrayLen, + ui16BlockIDsInt); + +RGXControlHWPerfBlocks_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXHWPERFBridge(void); +PVRSRV_ERROR DeinitRGXHWPERFBridge(void); + +/* + * Register all RGXHWPERF functions with services + */ +PVRSRV_ERROR InitRGXHWPERFBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF, + PVRSRVBridgeRGXCtrlHWPerf, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS, + PVRSRVBridgeRGXConfigureHWPerfBlocks, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS, + PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS, + PVRSRVBridgeRGXControlHWPerfBlocks, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxhwperf functions with services + */ +PVRSRV_ERROR DeinitRGXHWPERFBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, + PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_rgxkicksync_bridge.c b/drivers/gpu/drm/phytium/octopus/server_rgxkicksync_bridge.c new file mode 100644 index 000000000000..ec8c98fec3ec --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_rgxkicksync_bridge.c @@ -0,0 +1,559 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxkicksync +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxkicksync +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxkicksync.h" + +#include "common_rgxkicksync_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _RGXCreateKickSyncContextpsKickSyncContextIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXDestroyKickSyncContextKM((RGX_SERVER_KICKSYNC_CONTEXT *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateKickSyncContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateKickSyncContextIN_UI8, + IMG_UINT8 * psRGXCreateKickSyncContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextIN = + (PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateKickSyncContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateKickSyncContextOUT_UI8, 0); + + IMG_HANDLE hPrivData = psRGXCreateKickSyncContextIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateKickSyncContextOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); + if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateKickSyncContext_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateKickSyncContextOUT->eError = + PVRSRVRGXCreateKickSyncContextKM(psConnection, OSGetDevNode(psConnection), + hPrivDataInt, + psRGXCreateKickSyncContextIN->ui32PackedCCBSizeU88, + psRGXCreateKickSyncContextIN->ui32ContextFlags, + &psKickSyncContextInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)) + { + goto RGXCreateKickSyncContext_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateKickSyncContextOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateKickSyncContextOUT->hKickSyncContext, + (void *)psKickSyncContextInt, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateKickSyncContextpsKickSyncContextIntRelease); + if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateKickSyncContext_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateKickSyncContext_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK) + { + if (psKickSyncContextInt) + { + PVRSRVRGXDestroyKickSyncContextKM(psKickSyncContextInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyKickSyncContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyKickSyncContextIN_UI8, + IMG_UINT8 * psRGXDestroyKickSyncContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT *psRGXDestroyKickSyncContextIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyKickSyncContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT *psRGXDestroyKickSyncContextOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyKickSyncContextOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyKickSyncContextOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psRGXDestroyKickSyncContextIN-> + hKickSyncContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT); + if (unlikely + ((psRGXDestroyKickSyncContextOUT->eError != PVRSRV_OK) + && (psRGXDestroyKickSyncContextOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXDestroyKickSyncContextOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyKickSyncContext_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyKickSyncContext_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXKickSync2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXKickSync2IN_UI8, + IMG_UINT8 * psRGXKickSync2OUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXKICKSYNC2 *psRGXKickSync2IN = + (PVRSRV_BRIDGE_IN_RGXKICKSYNC2 *) IMG_OFFSET_ADDR(psRGXKickSync2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXKICKSYNC2 *psRGXKickSync2OUT = + (PVRSRV_BRIDGE_OUT_RGXKICKSYNC2 *) IMG_OFFSET_ADDR(psRGXKickSync2OUT_UI8, 0); + + IMG_HANDLE hKickSyncContext = psRGXKickSync2IN->hKickSyncContext; + RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL; + SYNC_PRIMITIVE_BLOCK **psUpdateUFODevVarBlockInt = NULL; + IMG_HANDLE *hUpdateUFODevVarBlockInt2 = NULL; + IMG_UINT32 *ui32UpdateDevVarOffsetInt = NULL; + IMG_UINT32 *ui32UpdateValueInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + + (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + 0; + + if (unlikely(psRGXKickSync2IN->ui32ClientUpdateCount > PVRSRV_MAX_DEV_VARS)) + { + psRGXKickSync2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickSync2_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXKickSync2IN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickSync2IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXKickSync2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXKickSync2_exit; + } + } + } + + if (psRGXKickSync2IN->ui32ClientUpdateCount != 0) + { + psUpdateUFODevVarBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); + hUpdateUFODevVarBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hUpdateUFODevVarBlockInt2, + (const void __user *)psRGXKickSync2IN->phUpdateUFODevVarBlock, + psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickSync2_exit; + } + } + if (psRGXKickSync2IN->ui32ClientUpdateCount != 0) + { + ui32UpdateDevVarOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32UpdateDevVarOffsetInt, + (const void __user *)psRGXKickSync2IN->pui32UpdateDevVarOffset, + psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickSync2_exit; + } + } + if (psRGXKickSync2IN->ui32ClientUpdateCount != 0) + { + ui32UpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32UpdateValueInt, + (const void __user *)psRGXKickSync2IN->pui32UpdateValue, + psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickSync2_exit; + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXKickSync2IN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickSync2_exit; + } + ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXKickSync2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psKickSyncContextInt, + hKickSyncContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, IMG_TRUE); + if (unlikely(psRGXKickSync2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickSync2_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickSync2IN->ui32ClientUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXKickSync2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psUpdateUFODevVarBlockInt[i], + hUpdateUFODevVarBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickSync2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickSync2_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXKickSync2OUT->eError = + PVRSRVRGXKickSyncKM(psKickSyncContextInt, + psRGXKickSync2IN->ui32ClientCacheOpSeqNum, + psRGXKickSync2IN->ui32ClientUpdateCount, + psUpdateUFODevVarBlockInt, + ui32UpdateDevVarOffsetInt, + ui32UpdateValueInt, + psRGXKickSync2IN->hCheckFenceFD, + psRGXKickSync2IN->hTimelineFenceFD, + &psRGXKickSync2OUT->hUpdateFenceFD, + uiUpdateFenceNameInt, psRGXKickSync2IN->ui32ExtJobRef); + +RGXKickSync2_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psKickSyncContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hKickSyncContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT); + } + + if (hUpdateUFODevVarBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickSync2IN->ui32ClientUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hUpdateUFODevVarBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hUpdateUFODevVarBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetKickSyncContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetKickSyncContextPropertyIN_UI8, + IMG_UINT8 * psRGXSetKickSyncContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY *psRGXSetKickSyncContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetKickSyncContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY *psRGXSetKickSyncContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetKickSyncContextPropertyOUT_UI8, 0); + + IMG_HANDLE hKickSyncContext = psRGXSetKickSyncContextPropertyIN->hKickSyncContext; + RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetKickSyncContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psKickSyncContextInt, + hKickSyncContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSetKickSyncContextPropertyOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetKickSyncContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetKickSyncContextPropertyOUT->eError = + PVRSRVRGXSetKickSyncContextPropertyKM(psKickSyncContextInt, + psRGXSetKickSyncContextPropertyIN->ui32Property, + psRGXSetKickSyncContextPropertyIN->ui64Input, + &psRGXSetKickSyncContextPropertyOUT->ui64Output); + +RGXSetKickSyncContextProperty_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psKickSyncContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hKickSyncContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXKICKSYNCBridge(void); +PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void); + +/* + * Register all RGXKICKSYNC functions with services + */ +PVRSRV_ERROR InitRGXKICKSYNCBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT, + PVRSRVBridgeRGXCreateKickSyncContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT, + PVRSRVBridgeRGXDestroyKickSyncContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2, + PVRSRVBridgeRGXKickSync2, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY, + PVRSRVBridgeRGXSetKickSyncContextProperty, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxkicksync functions with services + */ +PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, + PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_rgxray_bridge.c b/drivers/gpu/drm/phytium/octopus/server_rgxray_bridge.c new file mode 100644 index 000000000000..de6a55d6f6d6 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_rgxray_bridge.c @@ -0,0 +1,600 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxray +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxray +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxray.h" + +#include "common_rgxray_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _RGXCreateRayContextpsRayContextIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXDestroyRayContextKM((RGX_SERVER_RAY_CONTEXT *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateRayContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateRayContextIN_UI8, + IMG_UINT8 * psRGXCreateRayContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT *psRGXCreateRayContextIN = + (PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT *) IMG_OFFSET_ADDR(psRGXCreateRayContextIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT *psRGXCreateRayContextOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT *) IMG_OFFSET_ADDR(psRGXCreateRayContextOUT_UI8, + 0); + + IMG_HANDLE hPrivData = psRGXCreateRayContextIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + IMG_BYTE *ui8sStaticRayContextStateInt = NULL; + RGX_SERVER_RAY_CONTEXT *psRayContextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXCreateRayContextIN->ui32StaticRayContextStateSize * sizeof(IMG_BYTE)) + 0; + + if (unlikely + (psRGXCreateRayContextIN->ui32StaticRayContextStateSize > + RGXFWIF_STATIC_RAYCONTEXT_SIZE)) + { + psRGXCreateRayContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCreateRayContext_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXCreateRayContextIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateRayContextIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXCreateRayContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateRayContext_exit; + } + } + } + + if (psRGXCreateRayContextIN->ui32StaticRayContextStateSize != 0) + { + ui8sStaticRayContextStateInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXCreateRayContextIN->ui32StaticRayContextStateSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXCreateRayContextIN->ui32StaticRayContextStateSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui8sStaticRayContextStateInt, + (const void __user *)psRGXCreateRayContextIN->pui8sStaticRayContextState, + psRGXCreateRayContextIN->ui32StaticRayContextStateSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) + { + psRGXCreateRayContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateRayContext_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateRayContextOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); + if (unlikely(psRGXCreateRayContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateRayContext_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateRayContextOUT->eError = + PVRSRVRGXCreateRayContextKM(psConnection, OSGetDevNode(psConnection), + psRGXCreateRayContextIN->ui32ui32Priority, + hPrivDataInt, + psRGXCreateRayContextIN->ui32ContextFlags, + psRGXCreateRayContextIN->ui32StaticRayContextStateSize, + ui8sStaticRayContextStateInt, &psRayContextInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateRayContextOUT->eError != PVRSRV_OK)) + { + goto RGXCreateRayContext_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateRayContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateRayContextOUT-> + hRayContext, + (void *)psRayContextInt, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateRayContextpsRayContextIntRelease); + if (unlikely(psRGXCreateRayContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateRayContext_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateRayContext_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateRayContextOUT->eError != PVRSRV_OK) + { + if (psRayContextInt) + { + PVRSRVRGXDestroyRayContextKM(psRayContextInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyRayContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyRayContextIN_UI8, + IMG_UINT8 * psRGXDestroyRayContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT *psRGXDestroyRayContextIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT *) IMG_OFFSET_ADDR(psRGXDestroyRayContextIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT *psRGXDestroyRayContextOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyRayContextOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyRayContextOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psRGXDestroyRayContextIN->hRayContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT); + if (unlikely((psRGXDestroyRayContextOUT->eError != PVRSRV_OK) && + (psRGXDestroyRayContextOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXDestroyRayContextOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyRayContext_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyRayContext_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXKickRDM(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXKickRDMIN_UI8, + IMG_UINT8 * psRGXKickRDMOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXKICKRDM *psRGXKickRDMIN = + (PVRSRV_BRIDGE_IN_RGXKICKRDM *) IMG_OFFSET_ADDR(psRGXKickRDMIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXKICKRDM *psRGXKickRDMOUT = + (PVRSRV_BRIDGE_OUT_RGXKICKRDM *) IMG_OFFSET_ADDR(psRGXKickRDMOUT_UI8, 0); + + IMG_HANDLE hRayContext = psRGXKickRDMIN->hRayContext; + RGX_SERVER_RAY_CONTEXT *psRayContextInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClientUpdateUFOSyncPrimBlockInt = NULL; + IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32ClientUpdateOffsetInt = NULL; + IMG_UINT32 *ui32ClientUpdateValueInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_BYTE *ui8DMCmdInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + + (psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + (psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + (psRGXKickRDMIN->ui32CmdSize * sizeof(IMG_BYTE)) + 0; + + if (unlikely(psRGXKickRDMIN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickRDMOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickRDM_exit; + } + + if (unlikely(psRGXKickRDMIN->ui32CmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickRDMOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickRDM_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXKickRDMIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickRDMIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXKickRDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXKickRDM_exit; + } + } + } + + if (psRGXKickRDMIN->ui32ClientUpdateCount != 0) + { + psClientUpdateUFOSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); + hClientUpdateUFOSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClientUpdateUFOSyncPrimBlockInt2, + (const void __user *)psRGXKickRDMIN->phClientUpdateUFOSyncPrimBlock, + psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickRDM_exit; + } + } + if (psRGXKickRDMIN->ui32ClientUpdateCount != 0) + { + ui32ClientUpdateOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientUpdateOffsetInt, + (const void __user *)psRGXKickRDMIN->pui32ClientUpdateOffset, + psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickRDM_exit; + } + } + if (psRGXKickRDMIN->ui32ClientUpdateCount != 0) + { + ui32ClientUpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientUpdateValueInt, + (const void __user *)psRGXKickRDMIN->pui32ClientUpdateValue, + psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickRDM_exit; + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXKickRDMIN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickRDM_exit; + } + ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + if (psRGXKickRDMIN->ui32CmdSize != 0) + { + ui8DMCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickRDMIN->ui32CmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickRDMIN->ui32CmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui8DMCmdInt, (const void __user *)psRGXKickRDMIN->pui8DMCmd, + psRGXKickRDMIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXKickRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickRDM_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXKickRDMOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRayContextInt, + hRayContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT, IMG_TRUE); + if (unlikely(psRGXKickRDMOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickRDM_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickRDMIN->ui32ClientUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXKickRDMOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **) + &psClientUpdateUFOSyncPrimBlockInt[i], + hClientUpdateUFOSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickRDMOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickRDM_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXKickRDMOUT->eError = + PVRSRVRGXKickRDMKM(psRayContextInt, + psRGXKickRDMIN->ui32ClientCacheOpSeqNum, + psRGXKickRDMIN->ui32ClientUpdateCount, + psClientUpdateUFOSyncPrimBlockInt, + ui32ClientUpdateOffsetInt, + ui32ClientUpdateValueInt, + psRGXKickRDMIN->hCheckFenceFd, + psRGXKickRDMIN->hUpdateTimeline, + &psRGXKickRDMOUT->hUpdateFence, + uiUpdateFenceNameInt, + psRGXKickRDMIN->ui32CmdSize, + ui8DMCmdInt, + psRGXKickRDMIN->ui32PDumpFlags, psRGXKickRDMIN->ui32ExtJobRef); + +RGXKickRDM_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRayContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRayContext, PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT); + } + + if (hClientUpdateUFOSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickRDMIN->ui32ClientUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hClientUpdateUFOSyncPrimBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hClientUpdateUFOSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXRAYBridge(void); +PVRSRV_ERROR DeinitRGXRAYBridge(void); + +/* + * Register all RGXRAY functions with services + */ +PVRSRV_ERROR InitRGXRAYBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXCREATERAYCONTEXT, + PVRSRVBridgeRGXCreateRayContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRAYCONTEXT, + PVRSRVBridgeRGXDestroyRayContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXKICKRDM, + PVRSRVBridgeRGXKickRDM, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxray functions with services + */ +PVRSRV_ERROR DeinitRGXRAYBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXCREATERAYCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRAYCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXKICKRDM); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_rgxregconfig_bridge.c b/drivers/gpu/drm/phytium/octopus/server_rgxregconfig_bridge.c new file mode 100644 index 000000000000..90ebd988ab2c --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_rgxregconfig_bridge.c @@ -0,0 +1,241 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxregconfig +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxregconfig +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxregconfig.h" + +#include "common_rgxregconfig_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRGXSetRegConfigType(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetRegConfigTypeIN_UI8, + IMG_UINT8 * psRGXSetRegConfigTypeOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeIN = + (PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *) IMG_OFFSET_ADDR(psRGXSetRegConfigTypeIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeOUT = + (PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *) IMG_OFFSET_ADDR(psRGXSetRegConfigTypeOUT_UI8, + 0); + + psRGXSetRegConfigTypeOUT->eError = + PVRSRVRGXSetRegConfigTypeKM(psConnection, OSGetDevNode(psConnection), + psRGXSetRegConfigTypeIN->ui8RegPowerIsland); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXAddRegconfig(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXAddRegconfigIN_UI8, + IMG_UINT8 * psRGXAddRegconfigOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *psRGXAddRegconfigIN = + (PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *) IMG_OFFSET_ADDR(psRGXAddRegconfigIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *psRGXAddRegconfigOUT = + (PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *) IMG_OFFSET_ADDR(psRGXAddRegconfigOUT_UI8, 0); + + psRGXAddRegconfigOUT->eError = + PVRSRVRGXAddRegConfigKM(psConnection, OSGetDevNode(psConnection), + psRGXAddRegconfigIN->ui32RegAddr, + psRGXAddRegconfigIN->ui64RegValue, + psRGXAddRegconfigIN->ui64RegMask); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXClearRegConfig(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXClearRegConfigIN_UI8, + IMG_UINT8 * psRGXClearRegConfigOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *psRGXClearRegConfigIN = + (PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *) IMG_OFFSET_ADDR(psRGXClearRegConfigIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *psRGXClearRegConfigOUT = + (PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *) IMG_OFFSET_ADDR(psRGXClearRegConfigOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXClearRegConfigIN); + + psRGXClearRegConfigOUT->eError = + PVRSRVRGXClearRegConfigKM(psConnection, OSGetDevNode(psConnection)); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXEnableRegConfig(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXEnableRegConfigIN_UI8, + IMG_UINT8 * psRGXEnableRegConfigOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *psRGXEnableRegConfigIN = + (PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXEnableRegConfigIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *psRGXEnableRegConfigOUT = + (PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXEnableRegConfigOUT_UI8, + 0); + + PVR_UNREFERENCED_PARAMETER(psRGXEnableRegConfigIN); + + psRGXEnableRegConfigOUT->eError = + PVRSRVRGXEnableRegConfigKM(psConnection, OSGetDevNode(psConnection)); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDisableRegConfig(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDisableRegConfigIN_UI8, + IMG_UINT8 * psRGXDisableRegConfigOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *psRGXDisableRegConfigIN = + (PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXDisableRegConfigIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *psRGXDisableRegConfigOUT = + (PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXDisableRegConfigOUT_UI8, + 0); + + PVR_UNREFERENCED_PARAMETER(psRGXDisableRegConfigIN); + + psRGXDisableRegConfigOUT->eError = + PVRSRVRGXDisableRegConfigKM(psConnection, OSGetDevNode(psConnection)); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +#endif /* EXCLUDE_RGXREGCONFIG_BRIDGE */ + +#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) +PVRSRV_ERROR InitRGXREGCONFIGBridge(void); +PVRSRV_ERROR DeinitRGXREGCONFIGBridge(void); + +/* + * Register all RGXREGCONFIG functions with services + */ +PVRSRV_ERROR InitRGXREGCONFIGBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE, + PVRSRVBridgeRGXSetRegConfigType, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG, + PVRSRVBridgeRGXAddRegconfig, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG, + PVRSRVBridgeRGXClearRegConfig, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG, + PVRSRVBridgeRGXEnableRegConfig, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG, + PVRSRVBridgeRGXDisableRegConfig, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxregconfig functions with services + */ +PVRSRV_ERROR DeinitRGXREGCONFIGBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, + PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG); + + return PVRSRV_OK; +} +#else /* EXCLUDE_RGXREGCONFIG_BRIDGE */ +/* This bridge is conditional on EXCLUDE_RGXREGCONFIG_BRIDGE - when defined, + * do not populate the dispatch table with its functions + */ +#define InitRGXREGCONFIGBridge() \ + PVRSRV_OK + +#define DeinitRGXREGCONFIGBridge() \ + PVRSRV_OK + +#endif /* EXCLUDE_RGXREGCONFIG_BRIDGE */ diff --git a/drivers/gpu/drm/phytium/octopus/server_rgxsignals_bridge.c b/drivers/gpu/drm/phytium/octopus/server_rgxsignals_bridge.c new file mode 100644 index 000000000000..052878087327 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_rgxsignals_bridge.c @@ -0,0 +1,168 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxsignals +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxsignals +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxsignals.h" + +#include "common_rgxsignals_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#include "rgx_bvnc_defs_km.h" + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRGXNotifySignalUpdate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXNotifySignalUpdateIN_UI8, + IMG_UINT8 * psRGXNotifySignalUpdateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE *psRGXNotifySignalUpdateIN = + (PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE *) + IMG_OFFSET_ADDR(psRGXNotifySignalUpdateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE *psRGXNotifySignalUpdateOUT = + (PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE *) + IMG_OFFSET_ADDR(psRGXNotifySignalUpdateOUT_UI8, 0); + + IMG_HANDLE hPrivData = psRGXNotifySignalUpdateIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK)) + { + psRGXNotifySignalUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXNotifySignalUpdate_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXNotifySignalUpdateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); + if (unlikely(psRGXNotifySignalUpdateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXNotifySignalUpdate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXNotifySignalUpdateOUT->eError = + PVRSRVRGXNotifySignalUpdateKM(psConnection, OSGetDevNode(psConnection), + hPrivDataInt, + psRGXNotifySignalUpdateIN->sDevSignalAddress); + +RGXNotifySignalUpdate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXSIGNALSBridge(void); +PVRSRV_ERROR DeinitRGXSIGNALSBridge(void); + +/* + * Register all RGXSIGNALS functions with services + */ +PVRSRV_ERROR InitRGXSIGNALSBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXSIGNALS, + PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE, + PVRSRVBridgeRGXNotifySignalUpdate, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxsignals functions with services + */ +PVRSRV_ERROR DeinitRGXSIGNALSBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXSIGNALS, + PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_rgxta3d_bridge.c b/drivers/gpu/drm/phytium/octopus/server_rgxta3d_bridge.c new file mode 100644 index 000000000000..2fee63d5f710 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_rgxta3d_bridge.c @@ -0,0 +1,2172 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxta3d +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxta3d +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxta3d.h" + +#include "common_rgxta3d_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _RGXCreateHWRTDataSetpsKmHwRTDataSet0IntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyHWRTDataSet((RGX_KM_HW_RT_DATASET *) pvData); + return eError; +} + +static PVRSRV_ERROR _RGXCreateHWRTDataSetpsKmHwRTDataSet1IntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyHWRTDataSet((RGX_KM_HW_RT_DATASET *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateHWRTDataSetIN_UI8, + IMG_UINT8 * psRGXCreateHWRTDataSetOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetIN = + (PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *) IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *) + IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetOUT_UI8, 0); + + RGX_FREELIST **psapsFreeListsInt = NULL; + IMG_HANDLE *hapsFreeListsInt2 = NULL; + RGX_KM_HW_RT_DATASET *psKmHwRTDataSet0Int = NULL; + RGX_KM_HW_RT_DATASET *psKmHwRTDataSet1Int = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (RGXFW_MAX_FREELISTS * sizeof(RGX_FREELIST *)) + + (RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE)) + 0; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXCreateHWRTDataSetIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateHWRTDataSetIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateHWRTDataSet_exit; + } + } + } + + { + psapsFreeListsInt = + (RGX_FREELIST **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXFW_MAX_FREELISTS * sizeof(RGX_FREELIST *); + hapsFreeListsInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hapsFreeListsInt2, + (const void __user *)psRGXCreateHWRTDataSetIN->phapsFreeLists, + RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateHWRTDataSet_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + { + IMG_UINT32 i; + + for (i = 0; i < RGXFW_MAX_FREELISTS; i++) + { + /* Look up the address from the handle */ + psRGXCreateHWRTDataSetOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psapsFreeListsInt[i], + hapsFreeListsInt2[i], + PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE); + if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateHWRTDataSetOUT->eError = + RGXCreateHWRTDataSet(psConnection, OSGetDevNode(psConnection), + psRGXCreateHWRTDataSetIN->sVHeapTableDevVAddr, + psRGXCreateHWRTDataSetIN->sPMDataAddr0, + psRGXCreateHWRTDataSetIN->sPMDataAddr1, + psRGXCreateHWRTDataSetIN->sPMSecureDataAddr0, + psRGXCreateHWRTDataSetIN->sPMSecureDataAddr1, + psapsFreeListsInt, + psRGXCreateHWRTDataSetIN->ui32PPPScreen, + psRGXCreateHWRTDataSetIN->ui64PPPMultiSampleCtl, + psRGXCreateHWRTDataSetIN->ui32TPCStride, + psRGXCreateHWRTDataSetIN->sTailPtrsDevVAddr, + psRGXCreateHWRTDataSetIN->ui32TPCSize, + psRGXCreateHWRTDataSetIN->ui32TEScreen, + psRGXCreateHWRTDataSetIN->ui32TEAA, + psRGXCreateHWRTDataSetIN->ui32TEMTILE1, + psRGXCreateHWRTDataSetIN->ui32TEMTILE2, + psRGXCreateHWRTDataSetIN->ui32RgnStride, + psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerX, + psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerY, + psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperX, + psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperY, + psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleX, + psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleY, + psRGXCreateHWRTDataSetIN->ui16MaxRTs, + &psKmHwRTDataSet0Int, &psKmHwRTDataSet1Int); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + { + goto RGXCreateHWRTDataSet_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateHWRTDataSetOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateHWRTDataSetOUT-> + hKmHwRTDataSet0, + (void *)psKmHwRTDataSet0Int, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateHWRTDataSetpsKmHwRTDataSet0IntRelease); + if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet_exit; + } + + psRGXCreateHWRTDataSetOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateHWRTDataSetOUT-> + hKmHwRTDataSet1, + (void *)psKmHwRTDataSet1Int, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateHWRTDataSetpsKmHwRTDataSet1IntRelease); + if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateHWRTDataSet_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateHWRTDataSet_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + if (hapsFreeListsInt2) + { + IMG_UINT32 i; + + for (i = 0; i < RGXFW_MAX_FREELISTS; i++) + { + + /* Unreference the previously looked up handle */ + if (hapsFreeListsInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hapsFreeListsInt2[i], + PVRSRV_HANDLE_TYPE_RGX_FREELIST); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK) + { + if (psKmHwRTDataSet0Int) + { + RGXDestroyHWRTDataSet(psKmHwRTDataSet0Int); + } + if (psKmHwRTDataSet1Int) + { + RGXDestroyHWRTDataSet(psKmHwRTDataSet1Int); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyHWRTDataSetIN_UI8, + IMG_UINT8 * psRGXDestroyHWRTDataSetOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *) + IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *) + IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyHWRTDataSetOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psRGXDestroyHWRTDataSetIN->hKmHwRTDataSet, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); + if (unlikely((psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_OK) && + (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXDestroyHWRTDataSetOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyHWRTDataSet_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyHWRTDataSet_exit: + + return 0; +} + +static PVRSRV_ERROR _RGXCreateZSBufferpssZSBufferKMIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyZSBufferKM((RGX_ZSBUFFER_DATA *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateZSBufferIN_UI8, + IMG_UINT8 * psRGXCreateZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *psRGXCreateZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *psRGXCreateZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferOUT_UI8, 0); + + IMG_HANDLE hReservation = psRGXCreateZSBufferIN->hReservation; + DEVMEMINT_RESERVATION *psReservationInt = NULL; + IMG_HANDLE hPMR = psRGXCreateZSBufferIN->hPMR; + PMR *psPMRInt = NULL; + RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateZSBufferOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psReservationInt, + hReservation, + PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateZSBuffer_exit; + } + + /* Look up the address from the handle */ + psRGXCreateZSBufferOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRInt, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateZSBuffer_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateZSBufferOUT->eError = + RGXCreateZSBufferKM(psConnection, OSGetDevNode(psConnection), + psReservationInt, + psPMRInt, psRGXCreateZSBufferIN->uiMapFlags, &pssZSBufferKMInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) + { + goto RGXCreateZSBuffer_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateZSBufferOUT-> + hsZSBufferKM, + (void *)pssZSBufferKMInt, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateZSBufferpssZSBufferKMIntRelease); + if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateZSBuffer_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateZSBuffer_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psReservationInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); + } + + /* Unreference the previously looked up handle */ + if (psPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK) + { + if (pssZSBufferKMInt) + { + RGXDestroyZSBufferKM(pssZSBufferKMInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyZSBufferIN_UI8, + IMG_UINT8 * psRGXDestroyZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferOUT_UI8, + 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyZSBufferOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psRGXDestroyZSBufferIN->hsZSBufferMemDesc, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + if (unlikely((psRGXDestroyZSBufferOUT->eError != PVRSRV_OK) && + (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXDestroyZSBufferOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyZSBuffer_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyZSBuffer_exit: + + return 0; +} + +static PVRSRV_ERROR _RGXPopulateZSBufferpssPopulationIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXUnpopulateZSBufferKM((RGX_POPULATION *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXPopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXPopulateZSBufferIN_UI8, + IMG_UINT8 * psRGXPopulateZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferOUT_UI8, + 0); + + IMG_HANDLE hsZSBufferKM = psRGXPopulateZSBufferIN->hsZSBufferKM; + RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; + RGX_POPULATION *pssPopulationInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXPopulateZSBufferOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pssZSBufferKMInt, + hsZSBufferKM, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); + if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXPopulateZSBuffer_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXPopulateZSBufferOUT->eError = + RGXPopulateZSBufferKM(pssZSBufferKMInt, &pssPopulationInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) + { + goto RGXPopulateZSBuffer_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXPopulateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXPopulateZSBufferOUT-> + hsPopulation, + (void *)pssPopulationInt, + PVRSRV_HANDLE_TYPE_RGX_POPULATION, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXPopulateZSBufferpssPopulationIntRelease); + if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXPopulateZSBuffer_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXPopulateZSBuffer_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (pssZSBufferKMInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hsZSBufferKM, PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK) + { + if (pssPopulationInt) + { + RGXUnpopulateZSBufferKM(pssPopulationInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXUnpopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXUnpopulateZSBufferIN_UI8, + IMG_UINT8 * psRGXUnpopulateZSBufferOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferIN = + (PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferOUT = + (PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *) + IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXUnpopulateZSBufferOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psRGXUnpopulateZSBufferIN->hsPopulation, + PVRSRV_HANDLE_TYPE_RGX_POPULATION); + if (unlikely((psRGXUnpopulateZSBufferOUT->eError != PVRSRV_OK) && + (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXUnpopulateZSBufferOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXUnpopulateZSBuffer_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXUnpopulateZSBuffer_exit: + + return 0; +} + +static PVRSRV_ERROR _RGXCreateFreeListpsCleanupCookieIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RGXDestroyFreeList((RGX_FREELIST *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateFreeList(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateFreeListIN_UI8, + IMG_UINT8 * psRGXCreateFreeListOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *psRGXCreateFreeListIN = + (PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *psRGXCreateFreeListOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListOUT_UI8, 0); + + IMG_HANDLE hMemCtxPrivData = psRGXCreateFreeListIN->hMemCtxPrivData; + IMG_HANDLE hMemCtxPrivDataInt = NULL; + IMG_HANDLE hsGlobalFreeList = psRGXCreateFreeListIN->hsGlobalFreeList; + RGX_FREELIST *pssGlobalFreeListInt = NULL; + IMG_HANDLE hsFreeListPMR = psRGXCreateFreeListIN->hsFreeListPMR; + PMR *pssFreeListPMRInt = NULL; + IMG_HANDLE hsFreeListStatePMR = psRGXCreateFreeListIN->hsFreeListStatePMR; + PMR *pssFreeListStatePMRInt = NULL; + RGX_FREELIST *psCleanupCookieInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateFreeListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hMemCtxPrivDataInt, + hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; + } + + if (psRGXCreateFreeListIN->hsGlobalFreeList) + { + /* Look up the address from the handle */ + psRGXCreateFreeListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pssGlobalFreeListInt, + hsGlobalFreeList, + PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; + } + } + + /* Look up the address from the handle */ + psRGXCreateFreeListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pssFreeListPMRInt, + hsFreeListPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; + } + + /* Look up the address from the handle */ + psRGXCreateFreeListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pssFreeListStatePMRInt, + hsFreeListStatePMR, + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateFreeListOUT->eError = + RGXCreateFreeList(psConnection, OSGetDevNode(psConnection), + hMemCtxPrivDataInt, + psRGXCreateFreeListIN->ui32MaxFLPages, + psRGXCreateFreeListIN->ui32InitFLPages, + psRGXCreateFreeListIN->ui32GrowFLPages, + psRGXCreateFreeListIN->ui32GrowParamThreshold, + pssGlobalFreeListInt, + psRGXCreateFreeListIN->bbFreeListCheck, + psRGXCreateFreeListIN->spsFreeListBaseDevVAddr, + psRGXCreateFreeListIN->spsFreeListStateDevVAddr, + pssFreeListPMRInt, + psRGXCreateFreeListIN->uiPMROffset, + pssFreeListStatePMRInt, + psRGXCreateFreeListIN->uiPMRStateOffset, &psCleanupCookieInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + goto RGXCreateFreeList_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateFreeListOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateFreeListOUT-> + hCleanupCookie, + (void *)psCleanupCookieInt, + PVRSRV_HANDLE_TYPE_RGX_FREELIST, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateFreeListpsCleanupCookieIntRelease); + if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateFreeList_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateFreeList_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hMemCtxPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + + if (psRGXCreateFreeListIN->hsGlobalFreeList) + { + + /* Unreference the previously looked up handle */ + if (pssGlobalFreeListInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hsGlobalFreeList, + PVRSRV_HANDLE_TYPE_RGX_FREELIST); + } + } + + /* Unreference the previously looked up handle */ + if (pssFreeListPMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hsFreeListPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + + /* Unreference the previously looked up handle */ + if (pssFreeListStatePMRInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hsFreeListStatePMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateFreeListOUT->eError != PVRSRV_OK) + { + if (psCleanupCookieInt) + { + RGXDestroyFreeList(psCleanupCookieInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyFreeList(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyFreeListIN_UI8, + IMG_UINT8 * psRGXDestroyFreeListOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *psRGXDestroyFreeListIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *psRGXDestroyFreeListOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListOUT_UI8, + 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyFreeListOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psRGXDestroyFreeListIN->hCleanupCookie, + PVRSRV_HANDLE_TYPE_RGX_FREELIST); + if (unlikely((psRGXDestroyFreeListOUT->eError != PVRSRV_OK) && + (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXDestroyFreeListOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyFreeList_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyFreeList_exit: + + return 0; +} + +static PVRSRV_ERROR _RGXCreateRenderContextpsRenderContextIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXDestroyRenderContextKM((RGX_SERVER_RENDER_CONTEXT *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateRenderContextIN_UI8, + IMG_UINT8 * psRGXCreateRenderContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextIN = + (PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateRenderContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateRenderContextOUT_UI8, 0); + + IMG_BYTE *ui8FrameworkCmdInt = NULL; + IMG_HANDLE hPrivData = psRGXCreateRenderContextIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + IMG_BYTE *ui8StaticRenderContextStateInt = NULL; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) + + (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE)) + 0; + + if (unlikely(psRGXCreateRenderContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE)) + { + psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCreateRenderContext_exit; + } + + if (unlikely + (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize > + RGXFWIF_STATIC_RENDERCONTEXT_SIZE)) + { + psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCreateRenderContext_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXCreateRenderContextIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateRenderContextIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateRenderContext_exit; + } + } + } + + if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize != 0) + { + ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui8FrameworkCmdInt, + (const void __user *)psRGXCreateRenderContextIN->pui8FrameworkCmd, + psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) + { + psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateRenderContext_exit; + } + } + if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize != 0) + { + ui8StaticRenderContextStateInt = + (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui8StaticRenderContextStateInt, + (const void __user *)psRGXCreateRenderContextIN->pui8StaticRenderContextState, + psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * + sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateRenderContext_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateRenderContextOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); + if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateRenderContext_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateRenderContextOUT->eError = + PVRSRVRGXCreateRenderContextKM(psConnection, OSGetDevNode(psConnection), + psRGXCreateRenderContextIN->ui32Priority, + psRGXCreateRenderContextIN->ui32FrameworkCmdSize, + ui8FrameworkCmdInt, + hPrivDataInt, + psRGXCreateRenderContextIN-> + ui32StaticRenderContextStateSize, + ui8StaticRenderContextStateInt, + psRGXCreateRenderContextIN->ui32PackedCCBSizeU8888, + psRGXCreateRenderContextIN->ui32ContextFlags, + psRGXCreateRenderContextIN->ui64RobustnessAddress, + psRGXCreateRenderContextIN->ui32MaxTADeadlineMS, + psRGXCreateRenderContextIN->ui32Max3DDeadlineMS, + &psRenderContextInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) + { + goto RGXCreateRenderContext_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateRenderContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateRenderContextOUT-> + hRenderContext, + (void *)psRenderContextInt, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateRenderContextpsRenderContextIntRelease); + if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateRenderContext_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateRenderContext_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK) + { + if (psRenderContextInt) + { + PVRSRVRGXDestroyRenderContextKM(psRenderContextInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyRenderContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyRenderContextIN_UI8, + IMG_UINT8 * psRGXDestroyRenderContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyRenderContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyRenderContextOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyRenderContextOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psRGXDestroyRenderContextIN-> + hCleanupCookie, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + if (unlikely + ((psRGXDestroyRenderContextOUT->eError != PVRSRV_OK) + && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXDestroyRenderContextOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyRenderContext_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyRenderContext_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetRenderContextPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetRenderContextPriorityIN_UI8, + IMG_UINT8 * psRGXSetRenderContextPriorityOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityIN = + (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityOUT_UI8, 0); + + IMG_HANDLE hRenderContext = psRGXSetRenderContextPriorityIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetRenderContextPriorityOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSetRenderContextPriorityOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetRenderContextPriority_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetRenderContextPriorityOUT->eError = + PVRSRVRGXSetRenderContextPriorityKM(psConnection, OSGetDevNode(psConnection), + psRenderContextInt, + psRGXSetRenderContextPriorityIN->ui32Priority); + +RGXSetRenderContextPriority_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXRenderContextStalled(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXRenderContextStalledIN_UI8, + IMG_UINT8 * psRGXRenderContextStalledOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledIN = + (PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *) + IMG_OFFSET_ADDR(psRGXRenderContextStalledIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledOUT = + (PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *) + IMG_OFFSET_ADDR(psRGXRenderContextStalledOUT_UI8, 0); + + IMG_HANDLE hRenderContext = psRGXRenderContextStalledIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXRenderContextStalledOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); + if (unlikely(psRGXRenderContextStalledOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXRenderContextStalled_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXRenderContextStalledOUT->eError = RGXRenderContextStalledKM(psRenderContextInt); + +RGXRenderContextStalled_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXKickTA3D2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXKickTA3D2IN_UI8, + IMG_UINT8 * psRGXKickTA3D2OUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *psRGXKickTA3D2IN = + (PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *psRGXKickTA3D2OUT = + (PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2OUT_UI8, 0); + + IMG_HANDLE hRenderContext = psRGXKickTA3D2IN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClientTAFenceSyncPrimBlockInt = NULL; + IMG_HANDLE *hClientTAFenceSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32ClientTAFenceSyncOffsetInt = NULL; + IMG_UINT32 *ui32ClientTAFenceValueInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClientTAUpdateSyncPrimBlockInt = NULL; + IMG_HANDLE *hClientTAUpdateSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32ClientTAUpdateSyncOffsetInt = NULL; + IMG_UINT32 *ui32ClientTAUpdateValueInt = NULL; + SYNC_PRIMITIVE_BLOCK **psClient3DUpdateSyncPrimBlockInt = NULL; + IMG_HANDLE *hClient3DUpdateSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32Client3DUpdateSyncOffsetInt = NULL; + IMG_UINT32 *ui32Client3DUpdateValueInt = NULL; + IMG_HANDLE hPRFenceUFOSyncPrimBlock = psRGXKickTA3D2IN->hPRFenceUFOSyncPrimBlock; + SYNC_PRIMITIVE_BLOCK *psPRFenceUFOSyncPrimBlockInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_CHAR *uiUpdateFenceName3DInt = NULL; + IMG_BYTE *ui8TACmdInt = NULL; + IMG_BYTE *ui83DPRCmdInt = NULL; + IMG_BYTE *ui83DCmdInt = NULL; + IMG_HANDLE hKMHWRTDataSet = psRGXKickTA3D2IN->hKMHWRTDataSet; + RGX_KM_HW_RT_DATASET *psKMHWRTDataSetInt = NULL; + IMG_HANDLE hZSBuffer = psRGXKickTA3D2IN->hZSBuffer; + RGX_ZSBUFFER_DATA *psZSBufferInt = NULL; + IMG_HANDLE hMSAAScratchBuffer = psRGXKickTA3D2IN->hMSAAScratchBuffer; + RGX_ZSBUFFER_DATA *psMSAAScratchBufferInt = NULL; + IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; + PMR **psSyncPMRsInt = NULL; + IMG_HANDLE *hSyncPMRsInt2 = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) + + (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) + + (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) + + (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + (psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) + + (psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) + + (psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) + + (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + + (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)) + + (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; + + if (unlikely(psRGXKickTA3D2IN->ui32ClientTAFenceCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely(psRGXKickTA3D2IN->ui32ClientTAUpdateCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely(psRGXKickTA3D2IN->ui32Client3DUpdateCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely(psRGXKickTA3D2IN->ui32TACmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely(psRGXKickTA3D2IN->ui323DPRCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely(psRGXKickTA3D2IN->ui323DCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (unlikely(psRGXKickTA3D2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXKickTA3D2_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXKickTA3D2IN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickTA3D2IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXKickTA3D2_exit; + } + } + } + + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) + { + psClientTAFenceSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *); + hClientTAFenceSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClientTAFenceSyncPrimBlockInt2, + (const void __user *)psRGXKickTA3D2IN->phClientTAFenceSyncPrimBlock, + psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) + { + ui32ClientTAFenceSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAFenceSyncOffsetInt, + (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceSyncOffset, + psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) + { + ui32ClientTAFenceValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAFenceValueInt, + (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceValue, + psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) + { + psClientTAUpdateSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); + hClientTAUpdateSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClientTAUpdateSyncPrimBlockInt2, + (const void __user *)psRGXKickTA3D2IN->phClientTAUpdateSyncPrimBlock, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) + { + ui32ClientTAUpdateSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAUpdateSyncOffsetInt, + (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateSyncOffset, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) + { + ui32ClientTAUpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientTAUpdateValueInt, + (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateValue, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + { + psClient3DUpdateSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); + hClient3DUpdateSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hClient3DUpdateSyncPrimBlockInt2, + (const void __user *)psRGXKickTA3D2IN->phClient3DUpdateSyncPrimBlock, + psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + { + ui32Client3DUpdateSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32Client3DUpdateSyncOffsetInt, + (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateSyncOffset, + psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) + { + ui32Client3DUpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32Client3DUpdateValueInt, + (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateValue, + psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + + { + uiUpdateFenceName3DInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceName3DInt, + (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName3D, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + ((IMG_CHAR *) uiUpdateFenceName3DInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + if (psRGXKickTA3D2IN->ui32TACmdSize != 0) + { + ui8TACmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui8TACmdInt, (const void __user *)psRGXKickTA3D2IN->pui8TACmd, + psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui323DPRCmdSize != 0) + { + ui83DPRCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui83DPRCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DPRCmd, + psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui323DCmdSize != 0) + { + ui83DCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui83DCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DCmd, + psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) + { + ui32SyncPMRFlagsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32SyncPMRFlagsInt, + (const void __user *)psRGXKickTA3D2IN->pui32SyncPMRFlags, + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) + { + psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *); + hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickTA3D2IN->phSyncPMRs, + psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXKickTA3D2_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psClientTAFenceSyncPrimBlockInt[i], + hClientTAFenceSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **) + &psClientTAUpdateSyncPrimBlockInt[i], + hClientTAUpdateSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **) + &psClient3DUpdateSyncPrimBlockInt[i], + hClient3DUpdateSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPRFenceUFOSyncPrimBlockInt, + hPRFenceUFOSyncPrimBlock, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + + if (psRGXKickTA3D2IN->hKMHWRTDataSet) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psKMHWRTDataSetInt, + hKMHWRTDataSet, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + + if (psRGXKickTA3D2IN->hZSBuffer) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psZSBufferInt, + hZSBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + + if (psRGXKickTA3D2IN->hMSAAScratchBuffer) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psMSAAScratchBufferInt, + hMSAAScratchBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) + { + /* Look up the address from the handle */ + psRGXKickTA3D2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncPMRsInt[i], + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXKickTA3D2_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXKickTA3D2OUT->eError = + PVRSRVRGXKickTA3DKM(psRenderContextInt, + psRGXKickTA3D2IN->ui32ClientCacheOpSeqNum, + psRGXKickTA3D2IN->ui32ClientTAFenceCount, + psClientTAFenceSyncPrimBlockInt, + ui32ClientTAFenceSyncOffsetInt, + ui32ClientTAFenceValueInt, + psRGXKickTA3D2IN->ui32ClientTAUpdateCount, + psClientTAUpdateSyncPrimBlockInt, + ui32ClientTAUpdateSyncOffsetInt, + ui32ClientTAUpdateValueInt, + psRGXKickTA3D2IN->ui32Client3DUpdateCount, + psClient3DUpdateSyncPrimBlockInt, + ui32Client3DUpdateSyncOffsetInt, + ui32Client3DUpdateValueInt, + psPRFenceUFOSyncPrimBlockInt, + psRGXKickTA3D2IN->ui32FRFenceUFOSyncOffset, + psRGXKickTA3D2IN->ui32FRFenceValue, + psRGXKickTA3D2IN->hCheckFence, + psRGXKickTA3D2IN->hUpdateTimeline, + &psRGXKickTA3D2OUT->hUpdateFence, + uiUpdateFenceNameInt, + psRGXKickTA3D2IN->hCheckFence3D, + psRGXKickTA3D2IN->hUpdateTimeline3D, + &psRGXKickTA3D2OUT->hUpdateFence3D, + uiUpdateFenceName3DInt, + psRGXKickTA3D2IN->ui32TACmdSize, + ui8TACmdInt, + psRGXKickTA3D2IN->ui323DPRCmdSize, + ui83DPRCmdInt, + psRGXKickTA3D2IN->ui323DCmdSize, + ui83DCmdInt, + psRGXKickTA3D2IN->ui32ExtJobRef, + psRGXKickTA3D2IN->bbKickTA, + psRGXKickTA3D2IN->bbKickPR, + psRGXKickTA3D2IN->bbKick3D, + psRGXKickTA3D2IN->bbAbort, + psRGXKickTA3D2IN->ui32PDumpFlags, + psKMHWRTDataSetInt, + psZSBufferInt, + psMSAAScratchBufferInt, + psRGXKickTA3D2IN->ui32SyncPMRCount, + ui32SyncPMRFlagsInt, + psSyncPMRsInt, + psRGXKickTA3D2IN->ui32RenderTargetSize, + psRGXKickTA3D2IN->ui32NumberOfDrawCalls, + psRGXKickTA3D2IN->ui32NumberOfIndices, + psRGXKickTA3D2IN->ui32NumberOfMRTs, psRGXKickTA3D2IN->ui64Deadline); + +RGXKickTA3D2_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + + if (hClientTAFenceSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hClientTAFenceSyncPrimBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hClientTAFenceSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + if (hClientTAUpdateSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hClientTAUpdateSyncPrimBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hClientTAUpdateSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + if (hClient3DUpdateSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hClient3DUpdateSyncPrimBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hClient3DUpdateSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + /* Unreference the previously looked up handle */ + if (psPRFenceUFOSyncPrimBlockInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPRFenceUFOSyncPrimBlock, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + + if (psRGXKickTA3D2IN->hKMHWRTDataSet) + { + + /* Unreference the previously looked up handle */ + if (psKMHWRTDataSetInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hKMHWRTDataSet, + PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); + } + } + + if (psRGXKickTA3D2IN->hZSBuffer) + { + + /* Unreference the previously looked up handle */ + if (psZSBufferInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hZSBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + } + } + + if (psRGXKickTA3D2IN->hMSAAScratchBuffer) + { + + /* Unreference the previously looked up handle */ + if (psMSAAScratchBufferInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hMSAAScratchBuffer, + PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); + } + } + + if (hSyncPMRsInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hSyncPMRsInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetRenderContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetRenderContextPropertyIN_UI8, + IMG_UINT8 * psRGXSetRenderContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyOUT_UI8, 0); + + IMG_HANDLE hRenderContext = psRGXSetRenderContextPropertyIN->hRenderContext; + RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetRenderContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRenderContextInt, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSetRenderContextPropertyOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetRenderContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetRenderContextPropertyOUT->eError = + PVRSRVRGXSetRenderContextPropertyKM(psRenderContextInt, + psRGXSetRenderContextPropertyIN->ui32Property, + psRGXSetRenderContextPropertyIN->ui64Input, + &psRGXSetRenderContextPropertyOUT->ui64Output); + +RGXSetRenderContextProperty_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRenderContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRenderContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXTA3DBridge(void); +PVRSRV_ERROR DeinitRGXTA3DBridge(void); + +/* + * Register all RGXTA3D functions with services + */ +PVRSRV_ERROR InitRGXTA3DBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET, + PVRSRVBridgeRGXCreateHWRTDataSet, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET, + PVRSRVBridgeRGXDestroyHWRTDataSet, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER, + PVRSRVBridgeRGXCreateZSBuffer, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER, + PVRSRVBridgeRGXDestroyZSBuffer, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER, + PVRSRVBridgeRGXPopulateZSBuffer, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER, + PVRSRVBridgeRGXUnpopulateZSBuffer, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST, + PVRSRVBridgeRGXCreateFreeList, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST, + PVRSRVBridgeRGXDestroyFreeList, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT, + PVRSRVBridgeRGXCreateRenderContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT, + PVRSRVBridgeRGXDestroyRenderContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY, + PVRSRVBridgeRGXSetRenderContextPriority, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED, + PVRSRVBridgeRGXRenderContextStalled, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2, + PVRSRVBridgeRGXKickTA3D2, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY, + PVRSRVBridgeRGXSetRenderContextProperty, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxta3d functions with services + */ +PVRSRV_ERROR DeinitRGXTA3DBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, + PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_rgxtimerquery_bridge.c b/drivers/gpu/drm/phytium/octopus/server_rgxtimerquery_bridge.c new file mode 100644 index 000000000000..42369570e165 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_rgxtimerquery_bridge.c @@ -0,0 +1,168 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxtimerquery +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxtimerquery +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxtimerquery.h" + +#include "common_rgxtimerquery_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRGXBeginTimerQuery(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXBeginTimerQueryIN_UI8, + IMG_UINT8 * psRGXBeginTimerQueryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryIN = + (PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY *) IMG_OFFSET_ADDR(psRGXBeginTimerQueryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryOUT = + (PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY *) IMG_OFFSET_ADDR(psRGXBeginTimerQueryOUT_UI8, + 0); + + psRGXBeginTimerQueryOUT->eError = + PVRSRVRGXBeginTimerQueryKM(psConnection, OSGetDevNode(psConnection), + psRGXBeginTimerQueryIN->ui32QueryId); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXEndTimerQuery(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXEndTimerQueryIN_UI8, + IMG_UINT8 * psRGXEndTimerQueryOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY *psRGXEndTimerQueryIN = + (PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY *) IMG_OFFSET_ADDR(psRGXEndTimerQueryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY *psRGXEndTimerQueryOUT = + (PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY *) IMG_OFFSET_ADDR(psRGXEndTimerQueryOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psRGXEndTimerQueryIN); + + psRGXEndTimerQueryOUT->eError = + PVRSRVRGXEndTimerQueryKM(psConnection, OSGetDevNode(psConnection)); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXQueryTimer(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXQueryTimerIN_UI8, + IMG_UINT8 * psRGXQueryTimerOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXQUERYTIMER *psRGXQueryTimerIN = + (PVRSRV_BRIDGE_IN_RGXQUERYTIMER *) IMG_OFFSET_ADDR(psRGXQueryTimerIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXQUERYTIMER *psRGXQueryTimerOUT = + (PVRSRV_BRIDGE_OUT_RGXQUERYTIMER *) IMG_OFFSET_ADDR(psRGXQueryTimerOUT_UI8, 0); + + psRGXQueryTimerOUT->eError = + PVRSRVRGXQueryTimerKM(psConnection, OSGetDevNode(psConnection), + psRGXQueryTimerIN->ui32QueryId, + &psRGXQueryTimerOUT->ui64StartTime, + &psRGXQueryTimerOUT->ui64EndTime); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXTIMERQUERYBridge(void); +PVRSRV_ERROR DeinitRGXTIMERQUERYBridge(void); + +/* + * Register all RGXTIMERQUERY functions with services + */ +PVRSRV_ERROR InitRGXTIMERQUERYBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, + PVRSRV_BRIDGE_RGXTIMERQUERY_RGXBEGINTIMERQUERY, + PVRSRVBridgeRGXBeginTimerQuery, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, + PVRSRV_BRIDGE_RGXTIMERQUERY_RGXENDTIMERQUERY, + PVRSRVBridgeRGXEndTimerQuery, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, + PVRSRV_BRIDGE_RGXTIMERQUERY_RGXQUERYTIMER, PVRSRVBridgeRGXQueryTimer, + NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxtimerquery functions with services + */ +PVRSRV_ERROR DeinitRGXTIMERQUERYBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, + PVRSRV_BRIDGE_RGXTIMERQUERY_RGXBEGINTIMERQUERY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, + PVRSRV_BRIDGE_RGXTIMERQUERY_RGXENDTIMERQUERY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, + PVRSRV_BRIDGE_RGXTIMERQUERY_RGXQUERYTIMER); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_rgxtq2_bridge.c b/drivers/gpu/drm/phytium/octopus/server_rgxtq2_bridge.c new file mode 100644 index 000000000000..5079c1f1bbae --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_rgxtq2_bridge.c @@ -0,0 +1,1166 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxtq2 +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxtq2 +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxtdmtransfer.h" + +#include "common_rgxtq2_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#include "rgx_bvnc_defs_km.h" + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _RGXTDMCreateTransferContextpsTransferContextIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXTDMDestroyTransferContextKM((RGX_SERVER_TQ_TDM_CONTEXT *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXTDMCreateTransferContextIN_UI8, + IMG_UINT8 * psRGXTDMCreateTransferContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT *psRGXTDMCreateTransferContextIN = + (PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXTDMCreateTransferContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT *psRGXTDMCreateTransferContextOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXTDMCreateTransferContextOUT_UI8, 0); + + IMG_BYTE *ui8FrameworkCmdInt = NULL; + IMG_HANDLE hPrivData = psRGXTDMCreateTransferContextIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) + 0; + + if (unlikely(psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE)) + { + psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXTDMCreateTransferContext_exit; + } + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMCreateTransferContext_exit; + } + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXTDMCreateTransferContextIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = + (IMG_BYTE *) (void *)psRGXTDMCreateTransferContextIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXTDMCreateTransferContextOUT->eError = + PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXTDMCreateTransferContext_exit; + } + } + } + + if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize != 0) + { + ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui8FrameworkCmdInt, + (const void __user *)psRGXTDMCreateTransferContextIN->pui8FrameworkCmd, + psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) != + PVRSRV_OK) + { + psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMCreateTransferContext_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXTDMCreateTransferContextOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); + if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMCreateTransferContext_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXTDMCreateTransferContextOUT->eError = + PVRSRVRGXTDMCreateTransferContextKM(psConnection, OSGetDevNode(psConnection), + psRGXTDMCreateTransferContextIN->ui32Priority, + psRGXTDMCreateTransferContextIN-> + ui32FrameworkCmdSize, ui8FrameworkCmdInt, + hPrivDataInt, + psRGXTDMCreateTransferContextIN-> + ui32PackedCCBSizeU88, + psRGXTDMCreateTransferContextIN->ui32ContextFlags, + &psTransferContextInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)) + { + goto RGXTDMCreateTransferContext_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXTDMCreateTransferContextOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXTDMCreateTransferContextOUT->hTransferContext, + (void *)psTransferContextInt, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXTDMCreateTransferContextpsTransferContextIntRelease); + if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMCreateTransferContext_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXTDMCreateTransferContext_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK) + { + if (psTransferContextInt) + { + PVRSRVRGXTDMDestroyTransferContextKM(psTransferContextInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXTDMDestroyTransferContextIN_UI8, + IMG_UINT8 * psRGXTDMDestroyTransferContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT *psRGXTDMDestroyTransferContextIN = + (PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXTDMDestroyTransferContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT *psRGXTDMDestroyTransferContextOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXTDMDestroyTransferContextOUT_UI8, 0); + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMDestroyTransferContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMDestroyTransferContext_exit; + } + } + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXTDMDestroyTransferContextOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psRGXTDMDestroyTransferContextIN-> + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); + if (unlikely + ((psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_OK) + && (psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, + PVRSRVGetErrorString(psRGXTDMDestroyTransferContextOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMDestroyTransferContext_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXTDMDestroyTransferContext_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXTDMSetTransferContextPriorityIN_UI8, + IMG_UINT8 * psRGXTDMSetTransferContextPriorityOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY *psRGXTDMSetTransferContextPriorityIN = + (PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY *psRGXTDMSetTransferContextPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPriorityOUT_UI8, 0); + + IMG_HANDLE hTransferContext = psRGXTDMSetTransferContextPriorityIN->hTransferContext; + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMSetTransferContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMSetTransferContextPriority_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXTDMSetTransferContextPriorityOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE); + if (unlikely(psRGXTDMSetTransferContextPriorityOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSetTransferContextPriority_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXTDMSetTransferContextPriorityOUT->eError = + PVRSRVRGXTDMSetTransferContextPriorityKM(psConnection, OSGetDevNode(psConnection), + psTransferContextInt, + psRGXTDMSetTransferContextPriorityIN-> + ui32Priority); + +RGXTDMSetTransferContextPriority_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXTDMNotifyWriteOffsetUpdateIN_UI8, + IMG_UINT8 * psRGXTDMNotifyWriteOffsetUpdateOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE *psRGXTDMNotifyWriteOffsetUpdateIN = + (PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE *) + IMG_OFFSET_ADDR(psRGXTDMNotifyWriteOffsetUpdateIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE *psRGXTDMNotifyWriteOffsetUpdateOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE *) + IMG_OFFSET_ADDR(psRGXTDMNotifyWriteOffsetUpdateOUT_UI8, 0); + + IMG_HANDLE hTransferContext = psRGXTDMNotifyWriteOffsetUpdateIN->hTransferContext; + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMNotifyWriteOffsetUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMNotifyWriteOffsetUpdate_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXTDMNotifyWriteOffsetUpdateOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE); + if (unlikely(psRGXTDMNotifyWriteOffsetUpdateOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMNotifyWriteOffsetUpdate_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXTDMNotifyWriteOffsetUpdateOUT->eError = + PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(psTransferContextInt, + psRGXTDMNotifyWriteOffsetUpdateIN-> + ui32PDumpFlags); + +RGXTDMNotifyWriteOffsetUpdate_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXTDMSubmitTransfer2IN_UI8, + IMG_UINT8 * psRGXTDMSubmitTransfer2OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2 *psRGXTDMSubmitTransfer2IN = + (PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2 *) + IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2 *psRGXTDMSubmitTransfer2OUT = + (PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2 *) + IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer2OUT_UI8, 0); + + IMG_HANDLE hTransferContext = psRGXTDMSubmitTransfer2IN->hTransferContext; + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; + SYNC_PRIMITIVE_BLOCK **psUpdateUFOSyncPrimBlockInt = NULL; + IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL; + IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL; + IMG_UINT32 *ui32UpdateValueInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_UINT8 *ui8FWCommandInt = NULL; + IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; + PMR **psSyncPMRsInt = NULL; + IMG_HANDLE *hSyncPMRsInt2 = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) + + (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + + (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + (psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8)) + + (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + + (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)) + + (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; + + if (unlikely(psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS)) + { + psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXTDMSubmitTransfer2_exit; + } + + if (unlikely + (psRGXTDMSubmitTransfer2IN->ui32CommandSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) + { + psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXTDMSubmitTransfer2_exit; + } + + if (unlikely(psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) + { + psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXTDMSubmitTransfer2_exit; + } + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMSubmitTransfer2_exit; + } + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXTDMSubmitTransfer2IN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXTDMSubmitTransfer2IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXTDMSubmitTransfer2_exit; + } + } + } + + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0) + { + psUpdateUFOSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * + sizeof(SYNC_PRIMITIVE_BLOCK *); + hUpdateUFOSyncPrimBlockInt2 = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hUpdateUFOSyncPrimBlockInt2, + (const void __user *)psRGXTDMSubmitTransfer2IN->phUpdateUFOSyncPrimBlock, + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != + PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0) + { + ui32UpdateSyncOffsetInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32UpdateSyncOffsetInt, + (const void __user *)psRGXTDMSubmitTransfer2IN->pui32UpdateSyncOffset, + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != + PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0) + { + ui32UpdateValueInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32UpdateValueInt, + (const void __user *)psRGXTDMSubmitTransfer2IN->pui32UpdateValue, + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != + PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXTDMSubmitTransfer2IN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + if (psRGXTDMSubmitTransfer2IN->ui32CommandSize != 0) + { + ui8FWCommandInt = (IMG_UINT8 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8) > 0) + { + if (OSCopyFromUser + (NULL, ui8FWCommandInt, + (const void __user *)psRGXTDMSubmitTransfer2IN->pui8FWCommand, + psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount != 0) + { + ui32SyncPMRFlagsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32SyncPMRFlagsInt, + (const void __user *)psRGXTDMSubmitTransfer2IN->pui32SyncPMRFlags, + psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount != 0) + { + psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *); + hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hSyncPMRsInt2, + (const void __user *)psRGXTDMSubmitTransfer2IN->phSyncPMRs, + psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXTDMSubmitTransfer2_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE); + if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSubmitTransfer2_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount; i++) + { + /* Look up the address from the handle */ + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psUpdateUFOSyncPrimBlockInt[i], + hUpdateUFOSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSubmitTransfer2_exit; + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount; i++) + { + /* Look up the address from the handle */ + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncPMRsInt[i], + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSubmitTransfer2_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXTDMSubmitTransfer2OUT->eError = + PVRSRVRGXTDMSubmitTransferKM(psTransferContextInt, + psRGXTDMSubmitTransfer2IN->ui32PDumpFlags, + psRGXTDMSubmitTransfer2IN->ui32ClientCacheOpSeqNum, + psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount, + psUpdateUFOSyncPrimBlockInt, + ui32UpdateSyncOffsetInt, + ui32UpdateValueInt, + psRGXTDMSubmitTransfer2IN->hCheckFenceFD, + psRGXTDMSubmitTransfer2IN->hUpdateTimeline, + &psRGXTDMSubmitTransfer2OUT->hUpdateFence, + uiUpdateFenceNameInt, + psRGXTDMSubmitTransfer2IN->ui32CommandSize, + ui8FWCommandInt, + psRGXTDMSubmitTransfer2IN->ui32ExternalJobReference, + psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount, + ui32SyncPMRFlagsInt, + psSyncPMRsInt, + psRGXTDMSubmitTransfer2IN->ui32Characteristic1, + psRGXTDMSubmitTransfer2IN->ui32Characteristic2, + psRGXTDMSubmitTransfer2IN->ui64DeadlineInus); + +RGXTDMSubmitTransfer2_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); + } + + if (hUpdateUFOSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hUpdateUFOSyncPrimBlockInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hUpdateUFOSyncPrimBlockInt2[i], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + + if (hSyncPMRsInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hSyncPMRsInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static PVRSRV_ERROR _RGXTDMGetSharedMemorypsCLIPMRMemIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXTDMReleaseSharedMemoryKM((PMR *) pvData); + return eError; +} + +static PVRSRV_ERROR _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXTDMReleaseSharedMemoryKM((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXTDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXTDMGetSharedMemoryIN_UI8, + IMG_UINT8 * psRGXTDMGetSharedMemoryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY *psRGXTDMGetSharedMemoryIN = + (PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXTDMGetSharedMemoryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY *psRGXTDMGetSharedMemoryOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXTDMGetSharedMemoryOUT_UI8, 0); + + PMR *psCLIPMRMemInt = NULL; + PMR *psUSCPMRMemInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMGetSharedMemoryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMGetSharedMemory_exit; + } + } + + PVR_UNREFERENCED_PARAMETER(psRGXTDMGetSharedMemoryIN); + + psRGXTDMGetSharedMemoryOUT->eError = + PVRSRVRGXTDMGetSharedMemoryKM(psConnection, OSGetDevNode(psConnection), + &psCLIPMRMemInt, &psUSCPMRMemInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) + { + goto RGXTDMGetSharedMemory_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXTDMGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXTDMGetSharedMemoryOUT-> + hCLIPMRMem, + (void *)psCLIPMRMemInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXTDMGetSharedMemorypsCLIPMRMemIntRelease); + if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMGetSharedMemory_exit; + } + + psRGXTDMGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXTDMGetSharedMemoryOUT-> + hUSCPMRMem, + (void *)psUSCPMRMemInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease); + if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMGetSharedMemory_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXTDMGetSharedMemory_exit: + + if (psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK) + { + if (psCLIPMRMemInt) + { + PVRSRVRGXTDMReleaseSharedMemoryKM(psCLIPMRMemInt); + } + if (psUSCPMRMemInt) + { + PVRSRVRGXTDMReleaseSharedMemoryKM(psUSCPMRMemInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMReleaseSharedMemory(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXTDMReleaseSharedMemoryIN_UI8, + IMG_UINT8 * psRGXTDMReleaseSharedMemoryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY *psRGXTDMReleaseSharedMemoryIN = + (PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXTDMReleaseSharedMemoryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY *psRGXTDMReleaseSharedMemoryOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY *) + IMG_OFFSET_ADDR(psRGXTDMReleaseSharedMemoryOUT_UI8, 0); + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMReleaseSharedMemoryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMReleaseSharedMemory_exit; + } + } + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXTDMReleaseSharedMemoryOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psRGXTDMReleaseSharedMemoryIN->hPMRMem, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE); + if (unlikely((psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_OK) && + (psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXTDMReleaseSharedMemoryOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMReleaseSharedMemory_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXTDMReleaseSharedMemory_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXTDMSetTransferContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXTDMSetTransferContextPropertyIN_UI8, + IMG_UINT8 * psRGXTDMSetTransferContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY *psRGXTDMSetTransferContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY *psRGXTDMSetTransferContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPropertyOUT_UI8, 0); + + IMG_HANDLE hTransferContext = psRGXTDMSetTransferContextPropertyIN->hTransferContext; + RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; + + { + PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); + + /* Check that device supports the required feature */ + if ((psDeviceNode->pfnCheckDeviceFeature) && + !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, + RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) + { + psRGXTDMSetTransferContextPropertyOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; + + goto RGXTDMSetTransferContextProperty_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXTDMSetTransferContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE); + if (unlikely(psRGXTDMSetTransferContextPropertyOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXTDMSetTransferContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXTDMSetTransferContextPropertyOUT->eError = + PVRSRVRGXTDMSetTransferContextPropertyKM(psTransferContextInt, + psRGXTDMSetTransferContextPropertyIN-> + ui32Property, + psRGXTDMSetTransferContextPropertyIN-> + ui64Input, + &psRGXTDMSetTransferContextPropertyOUT-> + ui64Output); + +RGXTDMSetTransferContextProperty_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRGXTQ2Bridge(void); +PVRSRV_ERROR DeinitRGXTQ2Bridge(void); + +/* + * Register all RGXTQ2 functions with services + */ +PVRSRV_ERROR InitRGXTQ2Bridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT, + PVRSRVBridgeRGXTDMCreateTransferContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT, + PVRSRVBridgeRGXTDMDestroyTransferContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY, + PVRSRVBridgeRGXTDMSetTransferContextPriority, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE, + PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2, + PVRSRVBridgeRGXTDMSubmitTransfer2, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY, + PVRSRVBridgeRGXTDMGetSharedMemory, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY, + PVRSRVBridgeRGXTDMReleaseSharedMemory, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY, + PVRSRVBridgeRGXTDMSetTransferContextProperty, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxtq2 functions with services + */ +PVRSRV_ERROR DeinitRGXTQ2Bridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, + PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_rgxtq_bridge.c b/drivers/gpu/drm/phytium/octopus/server_rgxtq_bridge.c new file mode 100644 index 000000000000..356293965c44 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_rgxtq_bridge.c @@ -0,0 +1,1162 @@ +/******************************************************************************* +@File +@Title Server bridge for rgxtq +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for rgxtq +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "rgxtransfer.h" +#include "rgx_tq_shared.h" + +#include "common_rgxtq_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +#if defined(SUPPORT_RGXTQ_BRIDGE) + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _RGXCreateTransferContextpsTransferContextIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVRGXDestroyTransferContextKM((RGX_SERVER_TQ_CONTEXT *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXCreateTransferContextIN_UI8, + IMG_UINT8 * psRGXCreateTransferContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT *psRGXCreateTransferContextIN = + (PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateTransferContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT *psRGXCreateTransferContextOUT = + (PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXCreateTransferContextOUT_UI8, 0); + + IMG_BYTE *ui8FrameworkCmdInt = NULL; + IMG_HANDLE hPrivData = psRGXCreateTransferContextIN->hPrivData; + IMG_HANDLE hPrivDataInt = NULL; + RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; + PMR *psCLIPMRMemInt = NULL; + PMR *psUSCPMRMemInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) + 0; + + if (unlikely(psRGXCreateTransferContextIN->ui32FrameworkCmdize > RGXFWIF_RF_CMD_SIZE)) + { + psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXCreateTransferContext_exit; + } + + psRGXCreateTransferContextOUT->hTransferContext = NULL; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXCreateTransferContextIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateTransferContextIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXCreateTransferContext_exit; + } + } + } + + if (psRGXCreateTransferContextIN->ui32FrameworkCmdize != 0) + { + ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE); + } + + /* Copy the data over */ + if (psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0) + { + if (OSCopyFromUser + (NULL, ui8FrameworkCmdInt, + (const void __user *)psRGXCreateTransferContextIN->pui8FrameworkCmd, + psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != + PVRSRV_OK) + { + psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXCreateTransferContext_exit; + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXCreateTransferContextOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hPrivDataInt, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); + if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateTransferContext_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXCreateTransferContextOUT->eError = + PVRSRVRGXCreateTransferContextKM(psConnection, OSGetDevNode(psConnection), + psRGXCreateTransferContextIN->ui32Priority, + psRGXCreateTransferContextIN->ui32FrameworkCmdize, + ui8FrameworkCmdInt, + hPrivDataInt, + psRGXCreateTransferContextIN->ui32PackedCCBSizeU8888, + psRGXCreateTransferContextIN->ui32ContextFlags, + &psTransferContextInt, + &psCLIPMRMemInt, &psUSCPMRMemInt); + /* Exit early if bridged call fails */ + if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) + { + goto RGXCreateTransferContext_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRGXCreateTransferContextOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateTransferContextOUT->hTransferContext, + (void *)psTransferContextInt, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RGXCreateTransferContextpsTransferContextIntRelease); + if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateTransferContext_exit; + } + + psRGXCreateTransferContextOUT->eError = + PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateTransferContextOUT->hCLIPMRMem, + (void *)psCLIPMRMemInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psRGXCreateTransferContextOUT->hTransferContext); + if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateTransferContext_exit; + } + + psRGXCreateTransferContextOUT->eError = + PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psRGXCreateTransferContextOUT->hUSCPMRMem, + (void *)psUSCPMRMemInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psRGXCreateTransferContextOUT->hTransferContext); + if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXCreateTransferContext_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXCreateTransferContext_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hPrivDataInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRGXCreateTransferContextOUT->eError != PVRSRV_OK) + { + if (psRGXCreateTransferContextOUT->hTransferContext) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); + + eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) + psRGXCreateTransferContextOUT-> + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); + if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); + + /* Avoid freeing/destroying/releasing the resource a second time below */ + psTransferContextInt = NULL; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); + + } + + if (psTransferContextInt) + { + PVRSRVRGXDestroyTransferContextKM(psTransferContextInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXDestroyTransferContextIN_UI8, + IMG_UINT8 * psRGXDestroyTransferContextOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT *psRGXDestroyTransferContextIN = + (PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyTransferContextIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT *psRGXDestroyTransferContextOUT = + (PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT *) + IMG_OFFSET_ADDR(psRGXDestroyTransferContextOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRGXDestroyTransferContextOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psRGXDestroyTransferContextIN-> + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); + if (unlikely + ((psRGXDestroyTransferContextOUT->eError != PVRSRV_OK) + && (psRGXDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRGXDestroyTransferContextOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RGXDestroyTransferContext_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RGXDestroyTransferContext_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetTransferContextPriorityIN_UI8, + IMG_UINT8 * psRGXSetTransferContextPriorityOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY *psRGXSetTransferContextPriorityIN = + (PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetTransferContextPriorityIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY *psRGXSetTransferContextPriorityOUT = + (PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY *) + IMG_OFFSET_ADDR(psRGXSetTransferContextPriorityOUT_UI8, 0); + + IMG_HANDLE hTransferContext = psRGXSetTransferContextPriorityIN->hTransferContext; + RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetTransferContextPriorityOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSetTransferContextPriorityOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetTransferContextPriority_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetTransferContextPriorityOUT->eError = + PVRSRVRGXSetTransferContextPriorityKM(psConnection, OSGetDevNode(psConnection), + psTransferContextInt, + psRGXSetTransferContextPriorityIN->ui32Priority); + +RGXSetTransferContextPriority_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSubmitTransfer2IN_UI8, + IMG_UINT8 * psRGXSubmitTransfer2OUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2 *psRGXSubmitTransfer2IN = + (PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2 *) IMG_OFFSET_ADDR(psRGXSubmitTransfer2IN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2 *psRGXSubmitTransfer2OUT = + (PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2 *) IMG_OFFSET_ADDR(psRGXSubmitTransfer2OUT_UI8, + 0); + + IMG_HANDLE hTransferContext = psRGXSubmitTransfer2IN->hTransferContext; + RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; + IMG_UINT32 *ui32ClientUpdateCountInt = NULL; + SYNC_PRIMITIVE_BLOCK ***psUpdateUFOSyncPrimBlockInt = NULL; + IMG_HANDLE **hUpdateUFOSyncPrimBlockInt2 = NULL; + IMG_UINT32 **ui32UpdateSyncOffsetInt = NULL; + IMG_UINT32 **ui32UpdateValueInt = NULL; + IMG_CHAR *uiUpdateFenceNameInt = NULL; + IMG_UINT32 *ui32CommandSizeInt = NULL; + IMG_UINT8 **ui8FWCommandInt = NULL; + IMG_UINT32 *ui32TQPrepareFlagsInt = NULL; + IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; + PMR **psSyncPMRsInt = NULL; + IMG_HANDLE *hSyncPMRsInt2 = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; + IMG_BYTE *pArrayArgsBuffer2 = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) + + (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + + (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) + + (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) + + (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + + (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)) + + (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; + IMG_UINT32 ui32BufferSize2 = 0; + IMG_UINT32 ui32NextOffset2 = 0; + + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + + ui32BufferSize += + psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **); + ui32BufferSize += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_HANDLE **); + ui32BufferSize += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *); + ui32BufferSize += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *); + ui32BufferSize += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT8 *); + } + + if (unlikely(psRGXSubmitTransfer2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) + { + psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXSubmitTransfer2_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRGXSubmitTransfer2IN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXSubmitTransfer2IN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXSubmitTransfer2_exit; + } + } + } + + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + ui32ClientUpdateCountInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32ClientUpdateCountInt, + (const void __user *)psRGXSubmitTransfer2IN->pui32ClientUpdateCount, + psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + /* Assigning psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer for first dimension */ + psUpdateUFOSyncPrimBlockInt = + (SYNC_PRIMITIVE_BLOCK ***) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += + psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **); + /* Assigning hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer for first dimension */ + hUpdateUFOSyncPrimBlockInt2 = + (IMG_HANDLE **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_HANDLE); + } + + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + /* Assigning ui32UpdateSyncOffsetInt to the right offset in the pool buffer for first dimension */ + ui32UpdateSyncOffsetInt = + (IMG_UINT32 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *); + } + + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + /* Assigning ui32UpdateValueInt to the right offset in the pool buffer for first dimension */ + ui32UpdateValueInt = + (IMG_UINT32 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *); + } + + { + uiUpdateFenceNameInt = + (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiUpdateFenceNameInt, + (const void __user *)psRGXSubmitTransfer2IN->puiUpdateFenceName, + PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - + 1] = '\0'; + } + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + ui32CommandSizeInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32CommandSizeInt, + (const void __user *)psRGXSubmitTransfer2IN->pui32CommandSize, + psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + /* Assigning ui8FWCommandInt to the right offset in the pool buffer for first dimension */ + ui8FWCommandInt = (IMG_UINT8 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT8 *); + } + + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + ui32TQPrepareFlagsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32TQPrepareFlagsInt, + (const void __user *)psRGXSubmitTransfer2IN->pui32TQPrepareFlags, + psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + if (psRGXSubmitTransfer2IN->ui32SyncPMRCount != 0) + { + ui32SyncPMRFlagsInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32SyncPMRFlagsInt, + (const void __user *)psRGXSubmitTransfer2IN->pui32SyncPMRFlags, + psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + if (psRGXSubmitTransfer2IN->ui32SyncPMRCount != 0) + { + psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *); + hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); + } + + /* Copy the data over */ + if (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) + { + if (OSCopyFromUser + (NULL, hSyncPMRsInt2, (const void __user *)psRGXSubmitTransfer2IN->phSyncPMRs, + psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + IMG_UINT32 i; + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + ui32BufferSize2 += + ui32ClientUpdateCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *); + ui32BufferSize2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE *); + ui32BufferSize2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32); + ui32BufferSize2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32); + ui32BufferSize2 += ui32CommandSizeInt[i] * sizeof(IMG_UINT8); + } + } + + if (ui32BufferSize2 != 0) + { + pArrayArgsBuffer2 = OSAllocZMemNoStats(ui32BufferSize2); + + if (!pArrayArgsBuffer2) + { + psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RGXSubmitTransfer2_exit; + } + } + + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + IMG_UINT32 i; + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + if (ui32ClientUpdateCountInt[i] > PVRSRV_MAX_SYNCS) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXSubmitTransfer2_exit; + } + + /* Assigning each psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer (this is the second dimension) */ + psUpdateUFOSyncPrimBlockInt[i] = + (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer2, + ui32NextOffset2); + ui32NextOffset2 += + ui32ClientUpdateCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *); + /* Assigning each hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer (this is the second dimension) */ + hUpdateUFOSyncPrimBlockInt2[i] = + (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2); + ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE); + } + } + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + IMG_UINT32 i; + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + /* Assigning each ui32UpdateSyncOffsetInt to the right offset in the pool buffer (this is the second dimension) */ + ui32UpdateSyncOffsetInt[i] = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2); + ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32); + } + } + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + IMG_UINT32 i; + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + /* Assigning each ui32UpdateValueInt to the right offset in the pool buffer (this is the second dimension) */ + ui32UpdateValueInt[i] = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2); + ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32); + } + } + if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) + { + IMG_UINT32 i; + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + if (ui32CommandSizeInt[i] > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RGXSubmitTransfer2_exit; + } + + /* Assigning each ui8FWCommandInt to the right offset in the pool buffer (this is the second dimension) */ + ui8FWCommandInt[i] = + (IMG_UINT8 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2); + ui32NextOffset2 += ui32CommandSizeInt[i] * sizeof(IMG_UINT8); + } + } + + { + IMG_UINT32 i; + IMG_HANDLE **psPtr; + + /* Loop over all the pointers in the array copying the data into the kernel */ + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + /* Copy the pointer over from the client side */ + if (OSCopyFromUser + (NULL, &psPtr, + (const void __user *)&psRGXSubmitTransfer2IN-> + phUpdateUFOSyncPrimBlock[i], sizeof(IMG_HANDLE **)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + + /* Copy the data over */ + if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE)) > 0) + { + if (OSCopyFromUser + (NULL, (hUpdateUFOSyncPrimBlockInt2[i]), + (const void __user *)psPtr, + (ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE))) != + PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + } + } + + { + IMG_UINT32 i; + IMG_UINT32 **psPtr; + + /* Loop over all the pointers in the array copying the data into the kernel */ + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + /* Copy the pointer over from the client side */ + if (OSCopyFromUser + (NULL, &psPtr, + (const void __user *)&psRGXSubmitTransfer2IN->pui32UpdateSyncOffset[i], + sizeof(IMG_UINT32 **)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + + /* Copy the data over */ + if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > 0) + { + if (OSCopyFromUser + (NULL, (ui32UpdateSyncOffsetInt[i]), (const void __user *)psPtr, + (ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32))) != + PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + } + } + + { + IMG_UINT32 i; + IMG_UINT32 **psPtr; + + /* Loop over all the pointers in the array copying the data into the kernel */ + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + /* Copy the pointer over from the client side */ + if (OSCopyFromUser + (NULL, &psPtr, + (const void __user *)&psRGXSubmitTransfer2IN->pui32UpdateValue[i], + sizeof(IMG_UINT32 **)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + + /* Copy the data over */ + if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > 0) + { + if (OSCopyFromUser + (NULL, (ui32UpdateValueInt[i]), (const void __user *)psPtr, + (ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32))) != + PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + } + } + + { + IMG_UINT32 i; + IMG_UINT8 **psPtr; + + /* Loop over all the pointers in the array copying the data into the kernel */ + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + /* Copy the pointer over from the client side */ + if (OSCopyFromUser + (NULL, &psPtr, + (const void __user *)&psRGXSubmitTransfer2IN->pui8FWCommand[i], + sizeof(IMG_UINT8 **)) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + + /* Copy the data over */ + if ((ui32CommandSizeInt[i] * sizeof(IMG_UINT8)) > 0) + { + if (OSCopyFromUser + (NULL, (ui8FWCommandInt[i]), (const void __user *)psPtr, + (ui32CommandSizeInt[i] * sizeof(IMG_UINT8))) != PVRSRV_OK) + { + psRGXSubmitTransfer2OUT->eError = + PVRSRV_ERROR_INVALID_PARAMS; + + goto RGXSubmitTransfer2_exit; + } + } + } + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSubmitTransfer2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSubmitTransfer2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSubmitTransfer2_exit; + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + IMG_UINT32 j; + for (j = 0; j < ui32ClientUpdateCountInt[i]; j++) + { + /* Look up the address from the handle */ + psRGXSubmitTransfer2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **) + &psUpdateUFOSyncPrimBlockInt[i][j], + hUpdateUFOSyncPrimBlockInt2[i][j], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + IMG_TRUE); + if (unlikely(psRGXSubmitTransfer2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSubmitTransfer2_exit; + } + } + } + } + + { + IMG_UINT32 i; + + for (i = 0; i < psRGXSubmitTransfer2IN->ui32SyncPMRCount; i++) + { + /* Look up the address from the handle */ + psRGXSubmitTransfer2OUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncPMRsInt[i], + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRGXSubmitTransfer2OUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSubmitTransfer2_exit; + } + } + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSubmitTransfer2OUT->eError = + PVRSRVRGXSubmitTransferKM(psTransferContextInt, + psRGXSubmitTransfer2IN->ui32ClientCacheOpSeqNum, + psRGXSubmitTransfer2IN->ui32PrepareCount, + ui32ClientUpdateCountInt, + psUpdateUFOSyncPrimBlockInt, + ui32UpdateSyncOffsetInt, + ui32UpdateValueInt, + psRGXSubmitTransfer2IN->hCheckFenceFD, + psRGXSubmitTransfer2IN->h2DUpdateTimeline, + &psRGXSubmitTransfer2OUT->h2DUpdateFence, + psRGXSubmitTransfer2IN->h3DUpdateTimeline, + &psRGXSubmitTransfer2OUT->h3DUpdateFence, + uiUpdateFenceNameInt, + ui32CommandSizeInt, + ui8FWCommandInt, + ui32TQPrepareFlagsInt, + psRGXSubmitTransfer2IN->ui32ExtJobRef, + psRGXSubmitTransfer2IN->ui32SyncPMRCount, + ui32SyncPMRFlagsInt, psSyncPMRsInt); + +RGXSubmitTransfer2_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); + } + + if (hUpdateUFOSyncPrimBlockInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) + { + IMG_UINT32 j; + for (j = 0; j < ui32ClientUpdateCountInt[i]; j++) + { + + /* Unreference the previously looked up handle */ + if (hUpdateUFOSyncPrimBlockInt2[i][j]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hUpdateUFOSyncPrimBlockInt2[i] + [j], + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + } + } + } + + if (hSyncPMRsInt2) + { + IMG_UINT32 i; + + for (i = 0; i < psRGXSubmitTransfer2IN->ui32SyncPMRCount; i++) + { + + /* Unreference the previously looked up handle */ + if (hSyncPMRsInt2[i]) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncPMRsInt2[i], + PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + } + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize2 == ui32NextOffset2); + + if (pArrayArgsBuffer2) + OSFreeMemNoStats(pArrayArgsBuffer2); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRGXSetTransferContextProperty(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRGXSetTransferContextPropertyIN_UI8, + IMG_UINT8 * psRGXSetTransferContextPropertyOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY *psRGXSetTransferContextPropertyIN = + (PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetTransferContextPropertyIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY *psRGXSetTransferContextPropertyOUT = + (PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY *) + IMG_OFFSET_ADDR(psRGXSetTransferContextPropertyOUT_UI8, 0); + + IMG_HANDLE hTransferContext = psRGXSetTransferContextPropertyIN->hTransferContext; + RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRGXSetTransferContextPropertyOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psTransferContextInt, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, IMG_TRUE); + if (unlikely(psRGXSetTransferContextPropertyOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RGXSetTransferContextProperty_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRGXSetTransferContextPropertyOUT->eError = + PVRSRVRGXSetTransferContextPropertyKM(psTransferContextInt, + psRGXSetTransferContextPropertyIN->ui32Property, + psRGXSetTransferContextPropertyIN->ui64Input, + &psRGXSetTransferContextPropertyOUT->ui64Output); + +RGXSetTransferContextProperty_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psTransferContextInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hTransferContext, + PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +#endif /* SUPPORT_RGXTQ_BRIDGE */ + +#if defined(SUPPORT_RGXTQ_BRIDGE) +PVRSRV_ERROR InitRGXTQBridge(void); +PVRSRV_ERROR DeinitRGXTQBridge(void); + +/* + * Register all RGXTQ functions with services + */ +PVRSRV_ERROR InitRGXTQBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT, + PVRSRVBridgeRGXCreateTransferContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT, + PVRSRVBridgeRGXDestroyTransferContext, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY, + PVRSRVBridgeRGXSetTransferContextPriority, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2, + PVRSRVBridgeRGXSubmitTransfer2, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY, + PVRSRVBridgeRGXSetTransferContextProperty, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all rgxtq functions with services + */ +PVRSRV_ERROR DeinitRGXTQBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, + PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY); + + return PVRSRV_OK; +} +#else /* SUPPORT_RGXTQ_BRIDGE */ +/* This bridge is conditional on SUPPORT_RGXTQ_BRIDGE - when not defined, + * do not populate the dispatch table with its functions + */ +#define InitRGXTQBridge() \ + PVRSRV_OK + +#define DeinitRGXTQBridge() \ + PVRSRV_OK + +#endif /* SUPPORT_RGXTQ_BRIDGE */ diff --git a/drivers/gpu/drm/phytium/octopus/server_ri_bridge.c b/drivers/gpu/drm/phytium/octopus/server_ri_bridge.c new file mode 100644 index 000000000000..1de5d27b917b --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_ri_bridge.c @@ -0,0 +1,729 @@ +/******************************************************************************* +@File +@Title Server bridge for ri +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for ri +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "ri_server.h" + +#include "common_ri_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeRIWritePMREntry(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIWritePMREntryIN_UI8, + IMG_UINT8 * psRIWritePMREntryOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *psRIWritePMREntryIN = + (PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *) IMG_OFFSET_ADDR(psRIWritePMREntryIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *psRIWritePMREntryOUT = + (PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *) IMG_OFFSET_ADDR(psRIWritePMREntryOUT_UI8, 0); + + IMG_HANDLE hPMRHandle = psRIWritePMREntryIN->hPMRHandle; + PMR *psPMRHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRIWritePMREntryOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRHandleInt, + hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRIWritePMREntryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIWritePMREntry_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRIWritePMREntryOUT->eError = RIWritePMREntryKM(psPMRHandleInt); + +RIWritePMREntry_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static PVRSRV_ERROR _RIWriteMEMDESCEntrypsRIHandleIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RIDeleteMEMDESCEntryKM((RI_HANDLE) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIWriteMEMDESCEntryIN_UI8, + IMG_UINT8 * psRIWriteMEMDESCEntryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryIN = + (PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *) IMG_OFFSET_ADDR(psRIWriteMEMDESCEntryIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryOUT = + (PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *) IMG_OFFSET_ADDR(psRIWriteMEMDESCEntryOUT_UI8, + 0); + + IMG_HANDLE hPMRHandle = psRIWriteMEMDESCEntryIN->hPMRHandle; + PMR *psPMRHandleInt = NULL; + IMG_CHAR *uiTextBInt = NULL; + RI_HANDLE psRIHandleInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = (psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely(psRIWriteMEMDESCEntryIN->ui32TextBSize > DEVMEM_ANNOTATION_MAX_LEN)) + { + psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RIWriteMEMDESCEntry_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRIWriteMEMDESCEntryIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRIWriteMEMDESCEntryIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RIWriteMEMDESCEntry_exit; + } + } + } + + if (psRIWriteMEMDESCEntryIN->ui32TextBSize != 0) + { + uiTextBInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextBInt, (const void __user *)psRIWriteMEMDESCEntryIN->puiTextB, + psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RIWriteMEMDESCEntry_exit; + } + ((IMG_CHAR *) + uiTextBInt)[(psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) - 1] = + '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRIWriteMEMDESCEntryOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRHandleInt, + hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIWriteMEMDESCEntry_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRIWriteMEMDESCEntryOUT->eError = + RIWriteMEMDESCEntryKM(psPMRHandleInt, + psRIWriteMEMDESCEntryIN->ui32TextBSize, + uiTextBInt, + psRIWriteMEMDESCEntryIN->ui64Offset, + psRIWriteMEMDESCEntryIN->ui64Size, + psRIWriteMEMDESCEntryIN->bIsImport, + psRIWriteMEMDESCEntryIN->bIsSuballoc, &psRIHandleInt); + /* Exit early if bridged call fails */ + if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)) + { + goto RIWriteMEMDESCEntry_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRIWriteMEMDESCEntryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRIWriteMEMDESCEntryOUT-> + hRIHandle, + (void *)psRIHandleInt, + PVRSRV_HANDLE_TYPE_RI_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RIWriteMEMDESCEntrypsRIHandleIntRelease); + if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIWriteMEMDESCEntry_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RIWriteMEMDESCEntry_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK) + { + if (psRIHandleInt) + { + RIDeleteMEMDESCEntryKM(psRIHandleInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static PVRSRV_ERROR _RIWriteProcListEntrypsRIHandleIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = RIDeleteMEMDESCEntryKM((RI_HANDLE) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIWriteProcListEntryIN_UI8, + IMG_UINT8 * psRIWriteProcListEntryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryIN = + (PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *) IMG_OFFSET_ADDR(psRIWriteProcListEntryIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryOUT = + (PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *) + IMG_OFFSET_ADDR(psRIWriteProcListEntryOUT_UI8, 0); + + IMG_CHAR *uiTextBInt = NULL; + RI_HANDLE psRIHandleInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely(psRIWriteProcListEntryIN->ui32TextBSize > DEVMEM_ANNOTATION_MAX_LEN)) + { + psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto RIWriteProcListEntry_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psRIWriteProcListEntryIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRIWriteProcListEntryIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto RIWriteProcListEntry_exit; + } + } + } + + if (psRIWriteProcListEntryIN->ui32TextBSize != 0) + { + uiTextBInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiTextBInt, (const void __user *)psRIWriteProcListEntryIN->puiTextB, + psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto RIWriteProcListEntry_exit; + } + ((IMG_CHAR *) + uiTextBInt)[(psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) - 1] = + '\0'; + } + + psRIWriteProcListEntryOUT->eError = + RIWriteProcListEntryKM(psRIWriteProcListEntryIN->ui32TextBSize, + uiTextBInt, + psRIWriteProcListEntryIN->ui64Size, + psRIWriteProcListEntryIN->ui64DevVAddr, &psRIHandleInt); + /* Exit early if bridged call fails */ + if (unlikely(psRIWriteProcListEntryOUT->eError != PVRSRV_OK)) + { + goto RIWriteProcListEntry_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psRIWriteProcListEntryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psRIWriteProcListEntryOUT-> + hRIHandle, + (void *)psRIHandleInt, + PVRSRV_HANDLE_TYPE_RI_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _RIWriteProcListEntrypsRIHandleIntRelease); + if (unlikely(psRIWriteProcListEntryOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIWriteProcListEntry_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +RIWriteProcListEntry_exit: + + if (psRIWriteProcListEntryOUT->eError != PVRSRV_OK) + { + if (psRIHandleInt) + { + RIDeleteMEMDESCEntryKM(psRIHandleInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIUpdateMEMDESCAddr(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIUpdateMEMDESCAddrIN_UI8, + IMG_UINT8 * psRIUpdateMEMDESCAddrOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrIN = + (PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *) IMG_OFFSET_ADDR(psRIUpdateMEMDESCAddrIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrOUT = + (PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *) IMG_OFFSET_ADDR(psRIUpdateMEMDESCAddrOUT_UI8, + 0); + + IMG_HANDLE hRIHandle = psRIUpdateMEMDESCAddrIN->hRIHandle; + RI_HANDLE psRIHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRIUpdateMEMDESCAddrOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psRIHandleInt, + hRIHandle, PVRSRV_HANDLE_TYPE_RI_HANDLE, IMG_TRUE); + if (unlikely(psRIUpdateMEMDESCAddrOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIUpdateMEMDESCAddr_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRIUpdateMEMDESCAddrOUT->eError = + RIUpdateMEMDESCAddrKM(psRIHandleInt, psRIUpdateMEMDESCAddrIN->sAddr); + +RIUpdateMEMDESCAddr_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psRIHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hRIHandle, PVRSRV_HANDLE_TYPE_RI_HANDLE); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIDeleteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIDeleteMEMDESCEntryIN_UI8, + IMG_UINT8 * psRIDeleteMEMDESCEntryOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryIN = + (PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *) IMG_OFFSET_ADDR(psRIDeleteMEMDESCEntryIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryOUT = + (PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *) + IMG_OFFSET_ADDR(psRIDeleteMEMDESCEntryOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psRIDeleteMEMDESCEntryOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psRIDeleteMEMDESCEntryIN->hRIHandle, + PVRSRV_HANDLE_TYPE_RI_HANDLE); + if (unlikely((psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_OK) && + (psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psRIDeleteMEMDESCEntryOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto RIDeleteMEMDESCEntry_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +RIDeleteMEMDESCEntry_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIDumpList(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIDumpListIN_UI8, + IMG_UINT8 * psRIDumpListOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIDUMPLIST *psRIDumpListIN = + (PVRSRV_BRIDGE_IN_RIDUMPLIST *) IMG_OFFSET_ADDR(psRIDumpListIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIDUMPLIST *psRIDumpListOUT = + (PVRSRV_BRIDGE_OUT_RIDUMPLIST *) IMG_OFFSET_ADDR(psRIDumpListOUT_UI8, 0); + + IMG_HANDLE hPMRHandle = psRIDumpListIN->hPMRHandle; + PMR *psPMRHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRIDumpListOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRHandleInt, + hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRIDumpListOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIDumpList_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRIDumpListOUT->eError = RIDumpListKM(psPMRHandleInt); + +RIDumpList_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIDumpAll(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIDumpAllIN_UI8, + IMG_UINT8 * psRIDumpAllOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIDUMPALL *psRIDumpAllIN = + (PVRSRV_BRIDGE_IN_RIDUMPALL *) IMG_OFFSET_ADDR(psRIDumpAllIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIDUMPALL *psRIDumpAllOUT = + (PVRSRV_BRIDGE_OUT_RIDUMPALL *) IMG_OFFSET_ADDR(psRIDumpAllOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psRIDumpAllIN); + + psRIDumpAllOUT->eError = RIDumpAllKM(); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIDumpProcess(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIDumpProcessIN_UI8, + IMG_UINT8 * psRIDumpProcessOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIDUMPPROCESS *psRIDumpProcessIN = + (PVRSRV_BRIDGE_IN_RIDUMPPROCESS *) IMG_OFFSET_ADDR(psRIDumpProcessIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *psRIDumpProcessOUT = + (PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *) IMG_OFFSET_ADDR(psRIDumpProcessOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psRIDumpProcessOUT->eError = RIDumpProcessKM(psRIDumpProcessIN->ui32Pid); + + return 0; +} + +static IMG_INT +PVRSRVBridgeRIWritePMREntryWithOwner(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psRIWritePMREntryWithOwnerIN_UI8, + IMG_UINT8 * psRIWritePMREntryWithOwnerOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER *psRIWritePMREntryWithOwnerIN = + (PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER *) + IMG_OFFSET_ADDR(psRIWritePMREntryWithOwnerIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER *psRIWritePMREntryWithOwnerOUT = + (PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER *) + IMG_OFFSET_ADDR(psRIWritePMREntryWithOwnerOUT_UI8, 0); + + IMG_HANDLE hPMRHandle = psRIWritePMREntryWithOwnerIN->hPMRHandle; + PMR *psPMRHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psRIWritePMREntryWithOwnerOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psPMRHandleInt, + hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); + if (unlikely(psRIWritePMREntryWithOwnerOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto RIWritePMREntryWithOwner_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psRIWritePMREntryWithOwnerOUT->eError = + RIWritePMREntryWithOwnerKM(psPMRHandleInt, psRIWritePMREntryWithOwnerIN->ui32Owner); + +RIWritePMREntryWithOwner_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psPMRHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitRIBridge(void); +PVRSRV_ERROR DeinitRIBridge(void); + +/* + * Register all RI functions with services + */ +PVRSRV_ERROR InitRIBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY, + PVRSRVBridgeRIWritePMREntry, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY, + PVRSRVBridgeRIWriteMEMDESCEntry, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY, + PVRSRVBridgeRIWriteProcListEntry, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR, + PVRSRVBridgeRIUpdateMEMDESCAddr, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY, + PVRSRVBridgeRIDeleteMEMDESCEntry, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST, PVRSRVBridgeRIDumpList, + NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL, PVRSRVBridgeRIDumpAll, + NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS, + PVRSRVBridgeRIDumpProcess, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER, + PVRSRVBridgeRIWritePMREntryWithOwner, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all ri functions with services + */ +PVRSRV_ERROR DeinitRIBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_srvcore_bridge.c b/drivers/gpu/drm/phytium/octopus/server_srvcore_bridge.c new file mode 100644 index 000000000000..66eb9b36536c --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_srvcore_bridge.c @@ -0,0 +1,1019 @@ +/******************************************************************************* +@File +@Title Server bridge for srvcore +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for srvcore +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "srvcore.h" +#include "info_page.h" +#include "proc_stats.h" +#include "rgx_fwif_alignchecks.h" + +#include "common_srvcore_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeConnect(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psConnectIN_UI8, + IMG_UINT8 * psConnectOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_CONNECT *psConnectIN = + (PVRSRV_BRIDGE_IN_CONNECT *) IMG_OFFSET_ADDR(psConnectIN_UI8, 0); + PVRSRV_BRIDGE_OUT_CONNECT *psConnectOUT = + (PVRSRV_BRIDGE_OUT_CONNECT *) IMG_OFFSET_ADDR(psConnectOUT_UI8, 0); + + psConnectOUT->eError = + PVRSRVConnectKM(psConnection, OSGetDevNode(psConnection), + psConnectIN->ui32Flags, + psConnectIN->ui32ClientBuildOptions, + psConnectIN->ui32ClientDDKVersion, + psConnectIN->ui32ClientDDKBuild, + &psConnectOUT->ui8KernelArch, + &psConnectOUT->ui32CapabilityFlags, &psConnectOUT->ui64PackedBvnc); + + return 0; +} + +static IMG_INT +PVRSRVBridgeDisconnect(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDisconnectIN_UI8, + IMG_UINT8 * psDisconnectOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DISCONNECT *psDisconnectIN = + (PVRSRV_BRIDGE_IN_DISCONNECT *) IMG_OFFSET_ADDR(psDisconnectIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DISCONNECT *psDisconnectOUT = + (PVRSRV_BRIDGE_OUT_DISCONNECT *) IMG_OFFSET_ADDR(psDisconnectOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + PVR_UNREFERENCED_PARAMETER(psDisconnectIN); + + psDisconnectOUT->eError = PVRSRVDisconnectKM(); + + return 0; +} + +static PVRSRV_ERROR _AcquireGlobalEventObjecthGlobalEventObjectIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVReleaseGlobalEventObjectKM((IMG_HANDLE) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeAcquireGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psAcquireGlobalEventObjectIN_UI8, + IMG_UINT8 * psAcquireGlobalEventObjectOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectIN = + (PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *) + IMG_OFFSET_ADDR(psAcquireGlobalEventObjectIN_UI8, 0); + PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectOUT = + (PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT *) + IMG_OFFSET_ADDR(psAcquireGlobalEventObjectOUT_UI8, 0); + + IMG_HANDLE hGlobalEventObjectInt = NULL; + + PVR_UNREFERENCED_PARAMETER(psAcquireGlobalEventObjectIN); + + psAcquireGlobalEventObjectOUT->eError = + PVRSRVAcquireGlobalEventObjectKM(&hGlobalEventObjectInt); + /* Exit early if bridged call fails */ + if (unlikely(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)) + { + goto AcquireGlobalEventObject_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psAcquireGlobalEventObjectOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psAcquireGlobalEventObjectOUT->hGlobalEventObject, + (void *)hGlobalEventObjectInt, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _AcquireGlobalEventObjecthGlobalEventObjectIntRelease); + if (unlikely(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto AcquireGlobalEventObject_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +AcquireGlobalEventObject_exit: + + if (psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK) + { + if (hGlobalEventObjectInt) + { + PVRSRVReleaseGlobalEventObjectKM(hGlobalEventObjectInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeReleaseGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psReleaseGlobalEventObjectIN_UI8, + IMG_UINT8 * psReleaseGlobalEventObjectOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectIN = + (PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *) + IMG_OFFSET_ADDR(psReleaseGlobalEventObjectIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectOUT = + (PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT *) + IMG_OFFSET_ADDR(psReleaseGlobalEventObjectOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psReleaseGlobalEventObjectOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psReleaseGlobalEventObjectIN-> + hGlobalEventObject, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT); + if (unlikely + ((psReleaseGlobalEventObjectOUT->eError != PVRSRV_OK) + && (psReleaseGlobalEventObjectOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psReleaseGlobalEventObjectOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto ReleaseGlobalEventObject_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +ReleaseGlobalEventObject_exit: + + return 0; +} + +static PVRSRV_ERROR _EventObjectOpenhOSEventIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = OSEventObjectClose((IMG_HANDLE) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeEventObjectOpen(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psEventObjectOpenIN_UI8, + IMG_UINT8 * psEventObjectOpenOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *psEventObjectOpenIN = + (PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *) IMG_OFFSET_ADDR(psEventObjectOpenIN_UI8, 0); + PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *psEventObjectOpenOUT = + (PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *) IMG_OFFSET_ADDR(psEventObjectOpenOUT_UI8, 0); + + IMG_HANDLE hEventObject = psEventObjectOpenIN->hEventObject; + IMG_HANDLE hEventObjectInt = NULL; + IMG_HANDLE hOSEventInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psEventObjectOpenOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hEventObjectInt, + hEventObject, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, IMG_TRUE); + if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto EventObjectOpen_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psEventObjectOpenOUT->eError = OSEventObjectOpen(hEventObjectInt, &hOSEventInt); + /* Exit early if bridged call fails */ + if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK)) + { + goto EventObjectOpen_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psEventObjectOpenOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psEventObjectOpenOUT->hOSEvent, + (void *)hOSEventInt, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _EventObjectOpenhOSEventIntRelease); + if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto EventObjectOpen_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +EventObjectOpen_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hEventObjectInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hEventObject, PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psEventObjectOpenOUT->eError != PVRSRV_OK) + { + if (hOSEventInt) + { + OSEventObjectClose(hOSEventInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeEventObjectWait(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psEventObjectWaitIN_UI8, + IMG_UINT8 * psEventObjectWaitOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *psEventObjectWaitIN = + (PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *) IMG_OFFSET_ADDR(psEventObjectWaitIN_UI8, 0); + PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *psEventObjectWaitOUT = + (PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *) IMG_OFFSET_ADDR(psEventObjectWaitOUT_UI8, 0); + + IMG_HANDLE hOSEventKM = psEventObjectWaitIN->hOSEventKM; + IMG_HANDLE hOSEventKMInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psEventObjectWaitOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hOSEventKMInt, + hOSEventKM, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, IMG_TRUE); + if (unlikely(psEventObjectWaitOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto EventObjectWait_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psEventObjectWaitOUT->eError = OSEventObjectWait(hOSEventKMInt); + +EventObjectWait_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hOSEventKMInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hOSEventKM, PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeEventObjectClose(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psEventObjectCloseIN_UI8, + IMG_UINT8 * psEventObjectCloseOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *psEventObjectCloseIN = + (PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *) IMG_OFFSET_ADDR(psEventObjectCloseIN_UI8, 0); + PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *psEventObjectCloseOUT = + (PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *) IMG_OFFSET_ADDR(psEventObjectCloseOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psEventObjectCloseOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psEventObjectCloseIN->hOSEventKM, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); + if (unlikely((psEventObjectCloseOUT->eError != PVRSRV_OK) && + (psEventObjectCloseOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(psEventObjectCloseOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto EventObjectClose_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +EventObjectClose_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeDumpDebugInfo(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psDumpDebugInfoIN_UI8, + IMG_UINT8 * psDumpDebugInfoOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *psDumpDebugInfoIN = + (PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *) IMG_OFFSET_ADDR(psDumpDebugInfoIN_UI8, 0); + PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *psDumpDebugInfoOUT = + (PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *) IMG_OFFSET_ADDR(psDumpDebugInfoOUT_UI8, 0); + + psDumpDebugInfoOUT->eError = + PVRSRVDumpDebugInfoKM(psConnection, OSGetDevNode(psConnection), + psDumpDebugInfoIN->ui32VerbLevel); + + return 0; +} + +static IMG_INT +PVRSRVBridgeGetDevClockSpeed(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psGetDevClockSpeedIN_UI8, + IMG_UINT8 * psGetDevClockSpeedOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *psGetDevClockSpeedIN = + (PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *) IMG_OFFSET_ADDR(psGetDevClockSpeedIN_UI8, 0); + PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *psGetDevClockSpeedOUT = + (PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *) IMG_OFFSET_ADDR(psGetDevClockSpeedOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psGetDevClockSpeedIN); + + psGetDevClockSpeedOUT->eError = + PVRSRVGetDevClockSpeedKM(psConnection, OSGetDevNode(psConnection), + &psGetDevClockSpeedOUT->ui32ClockSpeed); + + return 0; +} + +static IMG_INT +PVRSRVBridgeHWOpTimeout(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psHWOpTimeoutIN_UI8, + IMG_UINT8 * psHWOpTimeoutOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_HWOPTIMEOUT *psHWOpTimeoutIN = + (PVRSRV_BRIDGE_IN_HWOPTIMEOUT *) IMG_OFFSET_ADDR(psHWOpTimeoutIN_UI8, 0); + PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *psHWOpTimeoutOUT = + (PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *) IMG_OFFSET_ADDR(psHWOpTimeoutOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psHWOpTimeoutIN); + + psHWOpTimeoutOUT->eError = PVRSRVHWOpTimeoutKM(psConnection, OSGetDevNode(psConnection)); + + return 0; +} + +static IMG_INT +PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psAlignmentCheckIN_UI8, + IMG_UINT8 * psAlignmentCheckOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_ALIGNMENTCHECK *psAlignmentCheckIN = + (PVRSRV_BRIDGE_IN_ALIGNMENTCHECK *) IMG_OFFSET_ADDR(psAlignmentCheckIN_UI8, 0); + PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *psAlignmentCheckOUT = + (PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *) IMG_OFFSET_ADDR(psAlignmentCheckOUT_UI8, 0); + + IMG_UINT32 *ui32AlignChecksInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = + (psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) + 0; + + if (unlikely(psAlignmentCheckIN->ui32AlignChecksSize > RGXFW_ALIGN_CHECKS_UM_MAX)) + { + psAlignmentCheckOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto AlignmentCheck_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psAlignmentCheckIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psAlignmentCheckIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psAlignmentCheckOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto AlignmentCheck_exit; + } + } + } + + if (psAlignmentCheckIN->ui32AlignChecksSize != 0) + { + ui32AlignChecksInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32); + } + + /* Copy the data over */ + if (psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32) > 0) + { + if (OSCopyFromUser + (NULL, ui32AlignChecksInt, + (const void __user *)psAlignmentCheckIN->pui32AlignChecks, + psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) != PVRSRV_OK) + { + psAlignmentCheckOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto AlignmentCheck_exit; + } + } + + psAlignmentCheckOUT->eError = + PVRSRVAlignmentCheckKM(psConnection, OSGetDevNode(psConnection), + psAlignmentCheckIN->ui32AlignChecksSize, ui32AlignChecksInt); + +AlignmentCheck_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeGetDeviceStatus(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psGetDeviceStatusIN_UI8, + IMG_UINT8 * psGetDeviceStatusOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_GETDEVICESTATUS *psGetDeviceStatusIN = + (PVRSRV_BRIDGE_IN_GETDEVICESTATUS *) IMG_OFFSET_ADDR(psGetDeviceStatusIN_UI8, 0); + PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *psGetDeviceStatusOUT = + (PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *) IMG_OFFSET_ADDR(psGetDeviceStatusOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psGetDeviceStatusIN); + + psGetDeviceStatusOUT->eError = + PVRSRVGetDeviceStatusKM(psConnection, OSGetDevNode(psConnection), + &psGetDeviceStatusOUT->ui32DeviceSatus); + + return 0; +} + +static IMG_INT +PVRSRVBridgeGetMultiCoreInfo(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psGetMultiCoreInfoIN_UI8, + IMG_UINT8 * psGetMultiCoreInfoOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_GETMULTICOREINFO *psGetMultiCoreInfoIN = + (PVRSRV_BRIDGE_IN_GETMULTICOREINFO *) IMG_OFFSET_ADDR(psGetMultiCoreInfoIN_UI8, 0); + PVRSRV_BRIDGE_OUT_GETMULTICOREINFO *psGetMultiCoreInfoOUT = + (PVRSRV_BRIDGE_OUT_GETMULTICOREINFO *) IMG_OFFSET_ADDR(psGetMultiCoreInfoOUT_UI8, 0); + + IMG_UINT64 *pui64CapsInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = (psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64)) + 0; + + if (psGetMultiCoreInfoIN->ui32CapsSize > 8) + { + psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto GetMultiCoreInfo_exit; + } + + psGetMultiCoreInfoOUT->pui64Caps = psGetMultiCoreInfoIN->pui64Caps; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psGetMultiCoreInfoIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psGetMultiCoreInfoIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto GetMultiCoreInfo_exit; + } + } + } + + if (psGetMultiCoreInfoIN->ui32CapsSize != 0) + { + pui64CapsInt = (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64); + } + + psGetMultiCoreInfoOUT->eError = + PVRSRVGetMultiCoreInfoKM(psConnection, OSGetDevNode(psConnection), + psGetMultiCoreInfoIN->ui32CapsSize, + &psGetMultiCoreInfoOUT->ui32NumCores, pui64CapsInt); + + /* If dest ptr is non-null and we have data to copy */ + if ((pui64CapsInt) && ((psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psGetMultiCoreInfoOUT->pui64Caps, pui64CapsInt, + (psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64))) != PVRSRV_OK)) + { + psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto GetMultiCoreInfo_exit; + } + } + +GetMultiCoreInfo_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeEventObjectWaitTimeout(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psEventObjectWaitTimeoutIN_UI8, + IMG_UINT8 * psEventObjectWaitTimeoutOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutIN = + (PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *) + IMG_OFFSET_ADDR(psEventObjectWaitTimeoutIN_UI8, 0); + PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutOUT = + (PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *) + IMG_OFFSET_ADDR(psEventObjectWaitTimeoutOUT_UI8, 0); + + IMG_HANDLE hOSEventKM = psEventObjectWaitTimeoutIN->hOSEventKM; + IMG_HANDLE hOSEventKMInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psEventObjectWaitTimeoutOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&hOSEventKMInt, + hOSEventKM, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, IMG_TRUE); + if (unlikely(psEventObjectWaitTimeoutOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto EventObjectWaitTimeout_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psEventObjectWaitTimeoutOUT->eError = + OSEventObjectWaitTimeout(hOSEventKMInt, psEventObjectWaitTimeoutIN->ui64uiTimeoutus); + +EventObjectWaitTimeout_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (hOSEventKMInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hOSEventKM, PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +static IMG_INT +PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psFindProcessMemStatsIN_UI8, + IMG_UINT8 * psFindProcessMemStatsOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS *psFindProcessMemStatsIN = + (PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS *) IMG_OFFSET_ADDR(psFindProcessMemStatsIN_UI8, + 0); + PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *psFindProcessMemStatsOUT = + (PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *) IMG_OFFSET_ADDR(psFindProcessMemStatsOUT_UI8, + 0); + + IMG_UINT32 *pui32MemStatsArrayInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = (psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) + 0; + + if (psFindProcessMemStatsIN->ui32ArrSize > PVRSRV_PROCESS_STAT_TYPE_COUNT) + { + psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto FindProcessMemStats_exit; + } + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psFindProcessMemStatsOUT->pui32MemStatsArray = psFindProcessMemStatsIN->pui32MemStatsArray; + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psFindProcessMemStatsIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psFindProcessMemStatsIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto FindProcessMemStats_exit; + } + } + } + + if (psFindProcessMemStatsIN->ui32ArrSize != 0) + { + pui32MemStatsArrayInt = + (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32); + } + + psFindProcessMemStatsOUT->eError = + PVRSRVFindProcessMemStatsKM(psFindProcessMemStatsIN->ui32PID, + psFindProcessMemStatsIN->ui32ArrSize, + psFindProcessMemStatsIN->bbAllProcessStats, + pui32MemStatsArrayInt); + + /* If dest ptr is non-null and we have data to copy */ + if ((pui32MemStatsArrayInt) && + ((psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) > 0)) + { + if (unlikely + (OSCopyToUser + (NULL, (void __user *)psFindProcessMemStatsOUT->pui32MemStatsArray, + pui32MemStatsArrayInt, + (psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32))) != PVRSRV_OK)) + { + psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto FindProcessMemStats_exit; + } + } + +FindProcessMemStats_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static PVRSRV_ERROR _AcquireInfoPagepsPMRIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVReleaseInfoPageKM((PMR *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeAcquireInfoPage(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psAcquireInfoPageIN_UI8, + IMG_UINT8 * psAcquireInfoPageOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE *psAcquireInfoPageIN = + (PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE *) IMG_OFFSET_ADDR(psAcquireInfoPageIN_UI8, 0); + PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE *psAcquireInfoPageOUT = + (PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE *) IMG_OFFSET_ADDR(psAcquireInfoPageOUT_UI8, 0); + + PMR *psPMRInt = NULL; + + PVR_UNREFERENCED_PARAMETER(psAcquireInfoPageIN); + + psAcquireInfoPageOUT->eError = PVRSRVAcquireInfoPageKM(&psPMRInt); + /* Exit early if bridged call fails */ + if (unlikely(psAcquireInfoPageOUT->eError != PVRSRV_OK)) + { + goto AcquireInfoPage_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psAcquireInfoPageOUT->eError = + PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, + &psAcquireInfoPageOUT->hPMR, (void *)psPMRInt, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & _AcquireInfoPagepsPMRIntRelease); + if (unlikely(psAcquireInfoPageOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto AcquireInfoPage_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + +AcquireInfoPage_exit: + + if (psAcquireInfoPageOUT->eError != PVRSRV_OK) + { + if (psPMRInt) + { + PVRSRVReleaseInfoPageKM(psPMRInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeReleaseInfoPage(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psReleaseInfoPageIN_UI8, + IMG_UINT8 * psReleaseInfoPageOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_RELEASEINFOPAGE *psReleaseInfoPageIN = + (PVRSRV_BRIDGE_IN_RELEASEINFOPAGE *) IMG_OFFSET_ADDR(psReleaseInfoPageIN_UI8, 0); + PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE *psReleaseInfoPageOUT = + (PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE *) IMG_OFFSET_ADDR(psReleaseInfoPageOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psProcessHandleBase->psHandleBase); + + psReleaseInfoPageOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psProcessHandleBase->psHandleBase, + (IMG_HANDLE) psReleaseInfoPageIN->hPMR, + PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT); + if (unlikely((psReleaseInfoPageOUT->eError != PVRSRV_OK) && + (psReleaseInfoPageOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(psReleaseInfoPageOUT->eError))); + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + goto ReleaseInfoPage_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); + +ReleaseInfoPage_exit: + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitSRVCOREBridge(void); +PVRSRV_ERROR DeinitSRVCOREBridge(void); + +/* + * Register all SRVCORE functions with services + */ +PVRSRV_ERROR InitSRVCOREBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_CONNECT, + PVRSRVBridgeConnect, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DISCONNECT, + PVRSRVBridgeDisconnect, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT, + PVRSRVBridgeAcquireGlobalEventObject, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT, + PVRSRVBridgeReleaseGlobalEventObject, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN, + PVRSRVBridgeEventObjectOpen, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT, + PVRSRVBridgeEventObjectWait, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE, + PVRSRVBridgeEventObjectClose, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO, + PVRSRVBridgeDumpDebugInfo, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED, + PVRSRVBridgeGetDevClockSpeed, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT, + PVRSRVBridgeHWOpTimeout, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK, + PVRSRVBridgeAlignmentCheck, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS, + PVRSRVBridgeGetDeviceStatus, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO, + PVRSRVBridgeGetMultiCoreInfo, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT, + PVRSRVBridgeEventObjectWaitTimeout, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS, + PVRSRVBridgeFindProcessMemStats, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE, + PVRSRVBridgeAcquireInfoPage, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE, + PVRSRVBridgeReleaseInfoPage, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all srvcore functions with services + */ +PVRSRV_ERROR DeinitSRVCOREBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_CONNECT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DISCONNECT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, + PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_sync_bridge.c b/drivers/gpu/drm/phytium/octopus/server_sync_bridge.c new file mode 100644 index 000000000000..ec731b909408 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_sync_bridge.c @@ -0,0 +1,730 @@ +/******************************************************************************* +@File +@Title Server bridge for sync +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for sync +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "sync.h" +#include "sync_server.h" +#include "pdump.h" +#include "pvrsrv_sync_km.h" +#include "sync_fallback_server.h" +#include "sync_checkpoint.h" + +#include "common_sync_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static PVRSRV_ERROR _AllocSyncPrimitiveBlockpsSyncHandleIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVFreeSyncPrimitiveBlockKM((SYNC_PRIMITIVE_BLOCK *) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeAllocSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psAllocSyncPrimitiveBlockIN_UI8, + IMG_UINT8 * psAllocSyncPrimitiveBlockOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockIN = + (PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *) + IMG_OFFSET_ADDR(psAllocSyncPrimitiveBlockIN_UI8, 0); + PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockOUT = + (PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *) + IMG_OFFSET_ADDR(psAllocSyncPrimitiveBlockOUT_UI8, 0); + + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + PMR *pshSyncPMRInt = NULL; + + PVR_UNREFERENCED_PARAMETER(psAllocSyncPrimitiveBlockIN); + + psAllocSyncPrimitiveBlockOUT->hSyncHandle = NULL; + + psAllocSyncPrimitiveBlockOUT->eError = + PVRSRVAllocSyncPrimitiveBlockKM(psConnection, OSGetDevNode(psConnection), + &psSyncHandleInt, + &psAllocSyncPrimitiveBlockOUT->ui32SyncPrimVAddr, + &psAllocSyncPrimitiveBlockOUT->ui32SyncPrimBlockSize, + &pshSyncPMRInt); + /* Exit early if bridged call fails */ + if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)) + { + goto AllocSyncPrimitiveBlock_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psAllocSyncPrimitiveBlockOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psAllocSyncPrimitiveBlockOUT-> + hSyncHandle, + (void *)psSyncHandleInt, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + (PFN_HANDLE_RELEASE) & + _AllocSyncPrimitiveBlockpsSyncHandleIntRelease); + if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto AllocSyncPrimitiveBlock_exit; + } + + psAllocSyncPrimitiveBlockOUT->eError = + PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, + &psAllocSyncPrimitiveBlockOUT->hhSyncPMR, + (void *)pshSyncPMRInt, + PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psAllocSyncPrimitiveBlockOUT->hSyncHandle); + if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto AllocSyncPrimitiveBlock_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +AllocSyncPrimitiveBlock_exit: + + if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK) + { + if (psAllocSyncPrimitiveBlockOUT->hSyncHandle) + { + PVRSRV_ERROR eError; + + /* Lock over handle creation cleanup. */ + LockHandle(psConnection->psHandleBase); + + eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + (IMG_HANDLE) + psAllocSyncPrimitiveBlockOUT-> + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", __func__, PVRSRVGetErrorString(eError))); + } + /* Releasing the handle should free/destroy/release the resource. + * This should never fail... */ + PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); + + /* Avoid freeing/destroying/releasing the resource a second time below */ + psSyncHandleInt = NULL; + /* Release now we have cleaned up creation handles. */ + UnlockHandle(psConnection->psHandleBase); + + } + + if (psSyncHandleInt) + { + PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt); + } + } + + return 0; +} + +static IMG_INT +PVRSRVBridgeFreeSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psFreeSyncPrimitiveBlockIN_UI8, + IMG_UINT8 * psFreeSyncPrimitiveBlockOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockIN = + (PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *) + IMG_OFFSET_ADDR(psFreeSyncPrimitiveBlockIN_UI8, 0); + PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockOUT = + (PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *) + IMG_OFFSET_ADDR(psFreeSyncPrimitiveBlockOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psFreeSyncPrimitiveBlockOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psFreeSyncPrimitiveBlockIN->hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + if (unlikely((psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_OK) && + (psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psFreeSyncPrimitiveBlockOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto FreeSyncPrimitiveBlock_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +FreeSyncPrimitiveBlock_exit: + + return 0; +} + +static IMG_INT +PVRSRVBridgeSyncPrimSet(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncPrimSetIN_UI8, + IMG_UINT8 * psSyncPrimSetOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCPRIMSET *psSyncPrimSetIN = + (PVRSRV_BRIDGE_IN_SYNCPRIMSET *) IMG_OFFSET_ADDR(psSyncPrimSetIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCPRIMSET *psSyncPrimSetOUT = + (PVRSRV_BRIDGE_OUT_SYNCPRIMSET *) IMG_OFFSET_ADDR(psSyncPrimSetOUT_UI8, 0); + + IMG_HANDLE hSyncHandle = psSyncPrimSetIN->hSyncHandle; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncPrimSetOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncHandleInt, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); + if (unlikely(psSyncPrimSetOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncPrimSet_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncPrimSetOUT->eError = + PVRSRVSyncPrimSetKM(psSyncHandleInt, + psSyncPrimSetIN->ui32Index, psSyncPrimSetIN->ui32Value); + +SyncPrimSet_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSyncHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#if defined(PDUMP) + +static IMG_INT +PVRSRVBridgeSyncPrimPDump(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncPrimPDumpIN_UI8, + IMG_UINT8 * psSyncPrimPDumpOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *psSyncPrimPDumpIN = + (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *) IMG_OFFSET_ADDR(psSyncPrimPDumpIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *psSyncPrimPDumpOUT = + (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *) IMG_OFFSET_ADDR(psSyncPrimPDumpOUT_UI8, 0); + + IMG_HANDLE hSyncHandle = psSyncPrimPDumpIN->hSyncHandle; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncPrimPDumpOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncHandleInt, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); + if (unlikely(psSyncPrimPDumpOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncPrimPDump_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncPrimPDumpOUT->eError = + PVRSRVSyncPrimPDumpKM(psSyncHandleInt, psSyncPrimPDumpIN->ui32Offset); + +SyncPrimPDump_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSyncHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeSyncPrimPDump NULL +#endif + +#if defined(PDUMP) + +static IMG_INT +PVRSRVBridgeSyncPrimPDumpValue(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncPrimPDumpValueIN_UI8, + IMG_UINT8 * psSyncPrimPDumpValueOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueIN = + (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *) IMG_OFFSET_ADDR(psSyncPrimPDumpValueIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueOUT = + (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *) IMG_OFFSET_ADDR(psSyncPrimPDumpValueOUT_UI8, + 0); + + IMG_HANDLE hSyncHandle = psSyncPrimPDumpValueIN->hSyncHandle; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncPrimPDumpValueOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncHandleInt, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); + if (unlikely(psSyncPrimPDumpValueOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncPrimPDumpValue_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncPrimPDumpValueOUT->eError = + PVRSRVSyncPrimPDumpValueKM(psSyncHandleInt, + psSyncPrimPDumpValueIN->ui32Offset, + psSyncPrimPDumpValueIN->ui32Value); + +SyncPrimPDumpValue_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSyncHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeSyncPrimPDumpValue NULL +#endif + +#if defined(PDUMP) + +static IMG_INT +PVRSRVBridgeSyncPrimPDumpPol(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncPrimPDumpPolIN_UI8, + IMG_UINT8 * psSyncPrimPDumpPolOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolIN = + (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *) IMG_OFFSET_ADDR(psSyncPrimPDumpPolIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolOUT = + (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *) IMG_OFFSET_ADDR(psSyncPrimPDumpPolOUT_UI8, 0); + + IMG_HANDLE hSyncHandle = psSyncPrimPDumpPolIN->hSyncHandle; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncPrimPDumpPolOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncHandleInt, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); + if (unlikely(psSyncPrimPDumpPolOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncPrimPDumpPol_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncPrimPDumpPolOUT->eError = + PVRSRVSyncPrimPDumpPolKM(psSyncHandleInt, + psSyncPrimPDumpPolIN->ui32Offset, + psSyncPrimPDumpPolIN->ui32Value, + psSyncPrimPDumpPolIN->ui32Mask, + psSyncPrimPDumpPolIN->eOperator, + psSyncPrimPDumpPolIN->uiPDumpFlags); + +SyncPrimPDumpPol_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSyncHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeSyncPrimPDumpPol NULL +#endif + +#if defined(PDUMP) + +static IMG_INT +PVRSRVBridgeSyncPrimPDumpCBP(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncPrimPDumpCBPIN_UI8, + IMG_UINT8 * psSyncPrimPDumpCBPOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPIN = + (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *) IMG_OFFSET_ADDR(psSyncPrimPDumpCBPIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPOUT = + (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *) IMG_OFFSET_ADDR(psSyncPrimPDumpCBPOUT_UI8, 0); + + IMG_HANDLE hSyncHandle = psSyncPrimPDumpCBPIN->hSyncHandle; + SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncPrimPDumpCBPOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&psSyncHandleInt, + hSyncHandle, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); + if (unlikely(psSyncPrimPDumpCBPOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncPrimPDumpCBP_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncPrimPDumpCBPOUT->eError = + PVRSRVSyncPrimPDumpCBPKM(psSyncHandleInt, + psSyncPrimPDumpCBPIN->ui32Offset, + psSyncPrimPDumpCBPIN->uiWriteOffset, + psSyncPrimPDumpCBPIN->uiPacketSize, + psSyncPrimPDumpCBPIN->uiBufferSize); + +SyncPrimPDumpCBP_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (psSyncHandleInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + return 0; +} + +#else +#define PVRSRVBridgeSyncPrimPDumpCBP NULL +#endif + +static IMG_INT +PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncAllocEventIN_UI8, + IMG_UINT8 * psSyncAllocEventOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCALLOCEVENT *psSyncAllocEventIN = + (PVRSRV_BRIDGE_IN_SYNCALLOCEVENT *) IMG_OFFSET_ADDR(psSyncAllocEventIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *psSyncAllocEventOUT = + (PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *) IMG_OFFSET_ADDR(psSyncAllocEventOUT_UI8, 0); + + IMG_CHAR *uiClassNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = (psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely(psSyncAllocEventIN->ui32ClassNameSize > PVRSRV_SYNC_NAME_LENGTH)) + { + psSyncAllocEventOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto SyncAllocEvent_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psSyncAllocEventIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncAllocEventIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psSyncAllocEventOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto SyncAllocEvent_exit; + } + } + } + + if (psSyncAllocEventIN->ui32ClassNameSize != 0) + { + uiClassNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiClassNameInt, (const void __user *)psSyncAllocEventIN->puiClassName, + psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psSyncAllocEventOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto SyncAllocEvent_exit; + } + ((IMG_CHAR *) + uiClassNameInt)[(psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) - 1] = + '\0'; + } + + psSyncAllocEventOUT->eError = + PVRSRVSyncAllocEventKM(psConnection, OSGetDevNode(psConnection), + psSyncAllocEventIN->bServerSync, + psSyncAllocEventIN->ui32FWAddr, + psSyncAllocEventIN->ui32ClassNameSize, uiClassNameInt); + +SyncAllocEvent_exit: + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +static IMG_INT +PVRSRVBridgeSyncFreeEvent(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncFreeEventIN_UI8, + IMG_UINT8 * psSyncFreeEventOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCFREEEVENT *psSyncFreeEventIN = + (PVRSRV_BRIDGE_IN_SYNCFREEEVENT *) IMG_OFFSET_ADDR(psSyncFreeEventIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCFREEEVENT *psSyncFreeEventOUT = + (PVRSRV_BRIDGE_OUT_SYNCFREEEVENT *) IMG_OFFSET_ADDR(psSyncFreeEventOUT_UI8, 0); + + psSyncFreeEventOUT->eError = + PVRSRVSyncFreeEventKM(psConnection, OSGetDevNode(psConnection), + psSyncFreeEventIN->ui32FWAddr); + + return 0; +} + +#if defined(PDUMP) + +static IMG_INT +PVRSRVBridgeSyncCheckpointSignalledPDumpPol(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncCheckpointSignalledPDumpPolIN_UI8, + IMG_UINT8 * psSyncCheckpointSignalledPDumpPolOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *psSyncCheckpointSignalledPDumpPolIN = + (PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *) + IMG_OFFSET_ADDR(psSyncCheckpointSignalledPDumpPolIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *psSyncCheckpointSignalledPDumpPolOUT = + (PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *) + IMG_OFFSET_ADDR(psSyncCheckpointSignalledPDumpPolOUT_UI8, 0); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + psSyncCheckpointSignalledPDumpPolOUT->eError = + PVRSRVSyncCheckpointSignalledPDumpPolKM(psSyncCheckpointSignalledPDumpPolIN->hFence); + + return 0; +} + +#else +#define PVRSRVBridgeSyncCheckpointSignalledPDumpPol NULL +#endif + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitSYNCBridge(void); +PVRSRV_ERROR DeinitSYNCBridge(void); + +/* + * Register all SYNC functions with services + */ +PVRSRV_ERROR InitSYNCBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK, + PVRSRVBridgeAllocSyncPrimitiveBlock, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK, + PVRSRVBridgeFreeSyncPrimitiveBlock, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMSET, + PVRSRVBridgeSyncPrimSet, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP, + PVRSRVBridgeSyncPrimPDump, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE, + PVRSRVBridgeSyncPrimPDumpValue, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL, + PVRSRVBridgeSyncPrimPDumpPol, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP, + PVRSRVBridgeSyncPrimPDumpCBP, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT, + PVRSRVBridgeSyncAllocEvent, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT, + PVRSRVBridgeSyncFreeEvent, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL, + PVRSRVBridgeSyncCheckpointSignalledPDumpPol, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all sync functions with services + */ +PVRSRV_ERROR DeinitSYNCBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMSET); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, + PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/server_synctracking_bridge.c b/drivers/gpu/drm/phytium/octopus/server_synctracking_bridge.c new file mode 100644 index 000000000000..055627bba723 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/server_synctracking_bridge.c @@ -0,0 +1,317 @@ +/******************************************************************************* +@File +@Title Server bridge for synctracking +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side of the bridge for synctracking +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include + +#include "img_defs.h" + +#include "sync.h" +#include "sync_server.h" + +#include "common_synctracking_bridge.h" + +#include "allocmem.h" +#include "pvr_debug.h" +#include "connection_server.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif +#include "srvcore.h" +#include "handle.h" + +#include + +/* *************************************************************************** + * Server-side bridge entry points + */ + +static IMG_INT +PVRSRVBridgeSyncRecordRemoveByHandle(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncRecordRemoveByHandleIN_UI8, + IMG_UINT8 * psSyncRecordRemoveByHandleOUT_UI8, + CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleIN = + (PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *) + IMG_OFFSET_ADDR(psSyncRecordRemoveByHandleIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleOUT = + (PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE *) + IMG_OFFSET_ADDR(psSyncRecordRemoveByHandleOUT_UI8, 0); + + /* Lock over handle destruction. */ + LockHandle(psConnection->psHandleBase); + + psSyncRecordRemoveByHandleOUT->eError = + PVRSRVReleaseHandleStagedUnlock(psConnection->psHandleBase, + (IMG_HANDLE) psSyncRecordRemoveByHandleIN->hhRecord, + PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE); + if (unlikely((psSyncRecordRemoveByHandleOUT->eError != PVRSRV_OK) && + (psSyncRecordRemoveByHandleOUT->eError != PVRSRV_ERROR_RETRY))) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s", + __func__, PVRSRVGetErrorString(psSyncRecordRemoveByHandleOUT->eError))); + UnlockHandle(psConnection->psHandleBase); + goto SyncRecordRemoveByHandle_exit; + } + + /* Release now we have destroyed handles. */ + UnlockHandle(psConnection->psHandleBase); + +SyncRecordRemoveByHandle_exit: + + return 0; +} + +static PVRSRV_ERROR _SyncRecordAddpshRecordIntRelease(void *pvData) +{ + PVRSRV_ERROR eError; + eError = PVRSRVSyncRecordRemoveByHandleKM((SYNC_RECORD_HANDLE) pvData); + return eError; +} + +static IMG_INT +PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 * psSyncRecordAddIN_UI8, + IMG_UINT8 * psSyncRecordAddOUT_UI8, CONNECTION_DATA * psConnection) +{ + PVRSRV_BRIDGE_IN_SYNCRECORDADD *psSyncRecordAddIN = + (PVRSRV_BRIDGE_IN_SYNCRECORDADD *) IMG_OFFSET_ADDR(psSyncRecordAddIN_UI8, 0); + PVRSRV_BRIDGE_OUT_SYNCRECORDADD *psSyncRecordAddOUT = + (PVRSRV_BRIDGE_OUT_SYNCRECORDADD *) IMG_OFFSET_ADDR(psSyncRecordAddOUT_UI8, 0); + + SYNC_RECORD_HANDLE pshRecordInt = NULL; + IMG_HANDLE hhServerSyncPrimBlock = psSyncRecordAddIN->hhServerSyncPrimBlock; + SYNC_PRIMITIVE_BLOCK *pshServerSyncPrimBlockInt = NULL; + IMG_CHAR *uiClassNameInt = NULL; + + IMG_UINT32 ui32NextOffset = 0; + IMG_BYTE *pArrayArgsBuffer = NULL; +#if !defined(INTEGRITY_OS) + IMG_BOOL bHaveEnoughSpace = IMG_FALSE; +#endif + + IMG_UINT32 ui32BufferSize = (psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) + 0; + + if (unlikely(psSyncRecordAddIN->ui32ClassNameSize > PVRSRV_SYNC_NAME_LENGTH)) + { + psSyncRecordAddOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; + goto SyncRecordAdd_exit; + } + + if (ui32BufferSize != 0) + { +#if !defined(INTEGRITY_OS) + /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ + IMG_UINT32 ui32InBufferOffset = + PVR_ALIGN(sizeof(*psSyncRecordAddIN), sizeof(unsigned long)); + IMG_UINT32 ui32InBufferExcessSize = + ui32InBufferOffset >= + PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; + + bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; + if (bHaveEnoughSpace) + { + IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncRecordAddIN; + + pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; + } + else +#endif + { + pArrayArgsBuffer = OSAllocZMemNoStats(ui32BufferSize); + + if (!pArrayArgsBuffer) + { + psSyncRecordAddOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto SyncRecordAdd_exit; + } + } + } + + if (psSyncRecordAddIN->ui32ClassNameSize != 0) + { + uiClassNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); + ui32NextOffset += psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR); + } + + /* Copy the data over */ + if (psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0) + { + if (OSCopyFromUser + (NULL, uiClassNameInt, (const void __user *)psSyncRecordAddIN->puiClassName, + psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) + { + psSyncRecordAddOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + + goto SyncRecordAdd_exit; + } + ((IMG_CHAR *) + uiClassNameInt)[(psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) - 1] = + '\0'; + } + + /* Lock over handle lookup. */ + LockHandle(psConnection->psHandleBase); + + /* Look up the address from the handle */ + psSyncRecordAddOUT->eError = + PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, + (void **)&pshServerSyncPrimBlockInt, + hhServerSyncPrimBlock, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); + if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncRecordAdd_exit; + } + /* Release now we have looked up handles. */ + UnlockHandle(psConnection->psHandleBase); + + psSyncRecordAddOUT->eError = + PVRSRVSyncRecordAddKM(psConnection, OSGetDevNode(psConnection), + &pshRecordInt, + pshServerSyncPrimBlockInt, + psSyncRecordAddIN->ui32ui32FwBlockAddr, + psSyncRecordAddIN->ui32ui32SyncOffset, + psSyncRecordAddIN->bbServerSync, + psSyncRecordAddIN->ui32ClassNameSize, uiClassNameInt); + /* Exit early if bridged call fails */ + if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK)) + { + goto SyncRecordAdd_exit; + } + + /* Lock over handle creation. */ + LockHandle(psConnection->psHandleBase); + + psSyncRecordAddOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, + &psSyncRecordAddOUT->hhRecord, + (void *)pshRecordInt, + PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + (PFN_HANDLE_RELEASE) & + _SyncRecordAddpshRecordIntRelease); + if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK)) + { + UnlockHandle(psConnection->psHandleBase); + goto SyncRecordAdd_exit; + } + + /* Release now we have created handles. */ + UnlockHandle(psConnection->psHandleBase); + +SyncRecordAdd_exit: + + /* Lock over handle lookup cleanup. */ + LockHandle(psConnection->psHandleBase); + + /* Unreference the previously looked up handle */ + if (pshServerSyncPrimBlockInt) + { + PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, + hhServerSyncPrimBlock, + PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); + } + /* Release now we have cleaned up look up handles. */ + UnlockHandle(psConnection->psHandleBase); + + if (psSyncRecordAddOUT->eError != PVRSRV_OK) + { + if (pshRecordInt) + { + PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt); + } + } + + /* Allocated space should be equal to the last updated offset */ + PVR_ASSERT(ui32BufferSize == ui32NextOffset); + +#if defined(INTEGRITY_OS) + if (pArrayArgsBuffer) +#else + if (!bHaveEnoughSpace && pArrayArgsBuffer) +#endif + OSFreeMemNoStats(pArrayArgsBuffer); + + return 0; +} + +/* *************************************************************************** + * Server bridge dispatch related glue + */ + +PVRSRV_ERROR InitSYNCTRACKINGBridge(void); +PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void); + +/* + * Register all SYNCTRACKING functions with services + */ +PVRSRV_ERROR InitSYNCTRACKINGBridge(void) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, + PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE, + PVRSRVBridgeSyncRecordRemoveByHandle, NULL); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD, + PVRSRVBridgeSyncRecordAdd, NULL); + + return PVRSRV_OK; +} + +/* + * Unregister all synctracking functions with services + */ +PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void) +{ + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, + PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE); + + UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, + PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD); + + return PVRSRV_OK; +} diff --git a/drivers/gpu/drm/phytium/octopus/services_kernel_client.h b/drivers/gpu/drm/phytium/octopus/services_kernel_client.h new file mode 100644 index 000000000000..8b366d0da0a9 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/services_kernel_client.h @@ -0,0 +1,267 @@ +/*************************************************************************/ /*! +@File services_kernel_client.h +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* This file contains a partial redefinition of the PhytiumVR Services 5 + * interface for use by components which are checkpatch clean. This + * header is included by the unrefined, non-checkpatch clean headers + * to ensure that prototype/typedef/macro changes break the build. + */ + +#ifndef __SERVICES_KERNEL_CLIENT__ +#define __SERVICES_KERNEL_CLIENT__ + +#include "pvrsrv_error.h" + +#include + +#include "pvrsrv_sync_km.h" +#include "sync_checkpoint_external.h" + +#ifndef __pvrsrv_defined_struct_enum__ + +/* sync_external.h */ + +struct PVRSRV_CLIENT_SYNC_PRIM { + volatile __u32 *pui32LinAddr; +}; + +struct PVRSRV_CLIENT_SYNC_PRIM_OP { + __u32 ui32Flags; + struct pvrsrv_sync_prim *psSync; + __u32 ui32FenceValue; + __u32 ui32UpdateValue; +}; + +#else /* __pvrsrv_defined_struct_enum__ */ + +struct PVRSRV_CLIENT_SYNC_PRIM; +struct PVRSRV_CLIENT_SYNC_PRIM_OP; + +enum tag_img_bool; + +#endif /* __pvrsrv_defined_struct_enum__ */ + +struct _PMR_; +struct _PVRSRV_DEVICE_NODE_; +struct dma_buf; +struct SYNC_PRIM_CONTEXT; + +/* pvr_notifier.h */ + +#ifndef CMDCOMPNOTIFY_PFN +typedef void (*PFN_CMDCOMP_NOTIFY)(void *hCmdCompHandle); +#define CMDCOMPNOTIFY_PFN +#endif +enum PVRSRV_ERROR_TAG PVRSRVRegisterCmdCompleteNotify(void **phNotify, + PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, void *hPrivData); +enum PVRSRV_ERROR_TAG PVRSRVUnregisterCmdCompleteNotify(void *hNotify); +void PVRSRVCheckStatus(void *hCmdCompCallerHandle); + +#define DEBUG_REQUEST_DC 0 +#define DEBUG_REQUEST_SYNCTRACKING 1 +#define DEBUG_REQUEST_SYS 2 +#define DEBUG_REQUEST_ANDROIDSYNC 3 +#define DEBUG_REQUEST_LINUXFENCE 4 +#define DEBUG_REQUEST_SYNCCHECKPOINT 5 +#define DEBUG_REQUEST_HTB 6 +#define DEBUG_REQUEST_APPHINT 7 +#define DEBUG_REQUEST_FALLBACKSYNC 8 + +#define DEBUG_REQUEST_VERBOSITY_LOW 0 +#define DEBUG_REQUEST_VERBOSITY_MEDIUM 1 +#define DEBUG_REQUEST_VERBOSITY_HIGH 2 +#define DEBUG_REQUEST_VERBOSITY_MAX DEBUG_REQUEST_VERBOSITY_HIGH + +#define DD_VERB_LVL_ENABLED(_verbLvl, _verbLvlChk) ((_verbLvl) >= (_verbLvlChk)) + +#ifndef DBGNOTIFY_PFNS +typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile, + const char *fmt, ...) __printf(2, 3); +typedef void (*PFN_DBGREQ_NOTIFY) (void *hDebugRequestHandle, + __u32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); +#define DBGNOTIFY_PFNS +#endif +enum PVRSRV_ERROR_TAG PVRSRVRegisterDbgRequestNotify(void **phNotify, + struct _PVRSRV_DEVICE_NODE_ *psDevNode, + PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, + __u32 ui32RequesterID, + void *hDbgRequestHandle); +enum PVRSRV_ERROR_TAG PVRSRVUnregisterDbgRequestNotify(void *hNotify); + +/* physmem_dmabuf.h */ + +struct dma_buf *PhysmemGetDmaBuf(struct _PMR_ *psPMR); + +/* pvrsrv.h */ + +enum PVRSRV_ERROR_TAG PVRSRVAcquireGlobalEventObjectKM(void **phGlobalEventObject); +enum PVRSRV_ERROR_TAG PVRSRVReleaseGlobalEventObjectKM(void *hGlobalEventObject); + +/* sync.h */ + +enum PVRSRV_ERROR_TAG SyncPrimContextCreate( + struct _PVRSRV_DEVICE_NODE_ *psDevConnection, + struct SYNC_PRIM_CONTEXT **phSyncPrimContext); +void SyncPrimContextDestroy(struct SYNC_PRIM_CONTEXT *hSyncPrimContext); + +enum PVRSRV_ERROR_TAG SyncPrimAlloc(struct SYNC_PRIM_CONTEXT *hSyncPrimContext, + struct PVRSRV_CLIENT_SYNC_PRIM **ppsSync, const char *pszClassName); +enum PVRSRV_ERROR_TAG SyncPrimFree(struct PVRSRV_CLIENT_SYNC_PRIM *psSync); +enum PVRSRV_ERROR_TAG SyncPrimGetFirmwareAddr( + struct PVRSRV_CLIENT_SYNC_PRIM *psSync, + __u32 *sync_addr); + +/* osfunc.h */ +enum PVRSRV_ERROR_TAG OSEventObjectWait(void *hOSEventKM); +enum PVRSRV_ERROR_TAG OSEventObjectOpen(void *hEventObject, void **phOSEventKM); +enum PVRSRV_ERROR_TAG OSEventObjectClose(void *hOSEventKM); +__u32 OSGetCurrentClientProcessIDKM(void); +__u32 OSStringUINT32ToStr(char *pszBuf, size_t uSize, __u32 ui32Num); + +/* srvkm.h */ + +enum PVRSRV_ERROR_TAG PVRSRVCommonDeviceCreate(void *pvOSDevice, + int i32OsDeviceID, + struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode); +enum PVRSRV_ERROR_TAG PVRSRVCommonDeviceDestroy( + struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); +const char *PVRSRVGetErrorString(enum PVRSRV_ERROR_TAG eError); +#if defined(SUPPORT_FWLOAD_ON_PROBE) +enum PVRSRV_ERROR_TAG PVRSRVCommonDeviceInitialise( + struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); +#endif + +#ifndef CHECKPOINT_PFNS +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, PVRSRV_FENCE fence, u32 *nr_checkpoints, PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid); + +#ifndef CHECKPOINT_PFNS +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)( + const char *fence_name, + PVRSRV_TIMELINE timeline, + PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE *new_fence, + u64 *fence_uid, + void **fence_finalise_data, + PSYNC_CHECKPOINT *new_checkpoint_handle, + void **timeline_update_sync, + __u32 *timeline_update_value); +#endif + +#ifndef CHECKPOINT_PFNS +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN)(PVRSRV_FENCE fence_to_rollback, void *finalise_data); +#endif + +#ifndef CHECKPOINT_PFNS +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN)(PVRSRV_FENCE fence_to_finalise, void *finalise_data); +#endif + +#ifndef CHECKPOINT_PFNS +typedef __u32 (*PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN)(__u32 num_ufos, __u32 *vaddrs); +#endif + +#ifndef CHECKPOINT_PFNS +typedef enum tag_img_bool (*PFN_SYNC_CHECKPOINT_UFO_HAS_SIGNALLED_FN)( + __u32 ui32FwAddr, __u32 ui32Value); +typedef enum PVRSRV_ERROR_TAG (*PFN_SYNC_CHECKPOINT_SIGNAL_WAITERS_FN)(void); +typedef void(*PFN_SYNC_CHECKPOINT_CHECK_STATE_FN)(void); +#if defined(PDUMP) +typedef PVRSRV_ERROR(*PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN)(PVRSRV_FENCE iFence, + IMG_UINT32 *puiNumCheckpoints, + PSYNC_CHECKPOINT **papsCheckpoints); +#endif +#endif + +/* This is the function that kick code will call in a NO_HARDWARE build only after + * sync checkpoints have been manually signalled, to allow the OS native sync + * implementation to update its timelines (as the usual callback notification + * of signalled checkpoints is not supported for NO_HARDWARE). + */ +#ifndef CHECKPOINT_PFNS +typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data); +typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr); + +#define SYNC_CHECKPOINT_IMPL_MAX_STRLEN 20 + +typedef struct { + PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve; + PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate; + PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback; + PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise; + PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines; + PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem; + PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN pfnDumpInfoOnStalledUFOs; + char pszImplName[SYNC_CHECKPOINT_IMPL_MAX_STRLEN]; +#if defined(PDUMP) + PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN pfnSyncFenceGetCheckpoints; +#endif +} PFN_SYNC_CHECKPOINT_STRUCT; + +enum PVRSRV_ERROR_TAG SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_STRUCT *psSyncCheckpointPfns); + +#define CHECKPOINT_PFNS +#endif + +/* sync_checkpoint.h */ +enum PVRSRV_ERROR_TAG SyncCheckpointContextCreate(struct _PVRSRV_DEVICE_NODE_ *psDevConnection, PSYNC_CHECKPOINT_CONTEXT *phSyncCheckpointContext); +enum PVRSRV_ERROR_TAG SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext); +void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext); +void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext); +enum PVRSRV_ERROR_TAG SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, PVRSRV_TIMELINE timeline, PVRSRV_FENCE fence, const char *pszCheckpointName, PSYNC_CHECKPOINT *ppsSyncCheckpoint); +void SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags); +void SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags); +enum tag_img_bool SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags); +enum tag_img_bool SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags); +enum PVRSRV_ERROR_TAG SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint); +enum PVRSRV_ERROR_TAG SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint); +void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint); +__u32 SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint); +void SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint); +__u32 SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint); +__u32 SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint); +__u32 SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint); +PVRSRV_TIMELINE SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint); +const char *SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint); + +#endif + +#endif /* __SERVICES_KERNEL_CLIENT__ */ diff --git a/drivers/gpu/drm/phytium/octopus/services_km.h b/drivers/gpu/drm/phytium/octopus/services_km.h new file mode 100644 index 000000000000..a8afbdcd5ea6 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/services_km.h @@ -0,0 +1,180 @@ +/*************************************************************************/ /*! +@File +@Title Services API Kernel mode Header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Exported services API details +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SERVICES_KM_H +#define SERVICES_KM_H + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "virt_validation_defs.h" +#endif + +/*! 4k page size definition */ +#define PVRSRV_4K_PAGE_SIZE 4096UL /*!< Size of a 4K Page */ +#define PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT 12 /*!< Amount to shift an address by so that + it is always page-aligned */ +/*! 16k page size definition */ +#define PVRSRV_16K_PAGE_SIZE 16384UL /*!< Size of a 16K Page */ +#define PVRSRV_16K_PAGE_SIZE_ALIGNSHIFT 14 /*!< Amount to shift an address by so that + it is always page-aligned */ +/*! 64k page size definition */ +#define PVRSRV_64K_PAGE_SIZE 65536UL /*!< Size of a 64K Page */ +#define PVRSRV_64K_PAGE_SIZE_ALIGNSHIFT 16 /*!< Amount to shift an address by so that + it is always page-aligned */ +/*! 256k page size definition */ +#define PVRSRV_256K_PAGE_SIZE 262144UL /*!< Size of a 256K Page */ +#define PVRSRV_256K_PAGE_SIZE_ALIGNSHIFT 18 /*!< Amount to shift an address by so that + it is always page-aligned */ +/*! 1MB page size definition */ +#define PVRSRV_1M_PAGE_SIZE 1048576UL /*!< Size of a 1M Page */ +#define PVRSRV_1M_PAGE_SIZE_ALIGNSHIFT 20 /*!< Amount to shift an address by so that + it is always page-aligned */ +/*! 2MB page size definition */ +#define PVRSRV_2M_PAGE_SIZE 2097152UL /*!< Size of a 2M Page */ +#define PVRSRV_2M_PAGE_SIZE_ALIGNSHIFT 21 /*!< Amount to shift an address by so that + it is always page-aligned */ + +/*! + * @AddToGroup SRVConnectInterfaces + * @{ + */ + +#ifndef PVRSRV_DEV_CONNECTION_TYPEDEF +#define PVRSRV_DEV_CONNECTION_TYPEDEF +/*! + * Forward declaration (look on connection.h) + */ +typedef struct PVRSRV_DEV_CONNECTION_TAG PVRSRV_DEV_CONNECTION; +#endif + +/*! + * \anchor SRV_FLAGS + * @name SRV_FLAGS: Flags for Services connection + * Allows to define per-client policy for Services + * @{ + */ + +/* + * Use of the 32-bit connection flags mask + * ( X = taken/in use, - = available/unused ) + * + * 31 27 20 6 4 0 + * | | | | | | + * X---XXXXXXXX-------------XXX---- + */ + +#define SRV_NO_HWPERF_CLIENT_STREAM (1U << 4) /*!< Don't create HWPerf for this connection */ +#define SRV_FLAGS_CLIENT_64BIT_COMPAT (1U << 5) /*!< This flags gets set if the client is 64 Bit compatible. */ +#define SRV_FLAGS_CLIENT_SLR_DISABLED (1U << 6) /*!< This flag is set if the client does not want Sync Lockup Recovery (SLR) enabled. */ +#define SRV_FLAGS_PDUMPCTRL (1U << 31) /*!< PDump Ctrl client flag */ + +/*! @} SRV_FLAGS */ + +/*! @} End of SRVConnectInterfaces */ + +/* + * Bits 20 - 27 are used to pass information needed for validation + * of the GPU Virtualisation Validation mechanism. In particular: + * + * Bits: + * [20 - 22]: OSid of the memory region that will be used for allocations + * [23 - 25]: OSid that will be emitted by the Firmware for all memory accesses + * regarding that memory context. + * [26]: If the AXI Protection register will be set to secure for that OSid + * [27]: If the Emulator Wrapper Register checking for protection violation + * will be set to secure for that OSid + */ + +#define VIRTVAL_FLAG_OSID_SHIFT (20) +#define SRV_VIRTVAL_FLAG_OSID_MASK (7U << VIRTVAL_FLAG_OSID_SHIFT) + +#define VIRTVAL_FLAG_OSIDREG_SHIFT (23) +#define SRV_VIRTVAL_FLAG_OSIDREG_MASK (7U << VIRTVAL_FLAG_OSIDREG_SHIFT) + +#define VIRTVAL_FLAG_AXIPREG_SHIFT (26) +#define SRV_VIRTVAL_FLAG_AXIPREG_MASK (1U << VIRTVAL_FLAG_AXIPREG_SHIFT) + +#define VIRTVAL_FLAG_AXIPTD_SHIFT (27) +#define SRV_VIRTVAL_FLAG_AXIPTD_MASK (1U << VIRTVAL_FLAG_AXIPTD_SHIFT) + + +/* Size of pointer on a 64 bit machine */ +#define POINTER_SIZE_64BIT (8) + + +/* + Pdump flags which are accessible to Services clients +*/ +#define PDUMP_NONE 0x00000000U /*pszIOCName, + (psEntry->pfFunction != NULL) ? psEntry->pszFunctionName : "(null)", + psEntry->ui32CallCount, + psEntry->ui32CopyFromUserTotalBytes, + psEntry->ui32CopyToUserTotalBytes, + (unsigned long long) OSDivide64r64(psEntry->ui64TotalTimeNS, 1000, &ui32Remainder), + (unsigned long long) OSDivide64r64(psEntry->ui64MaxTimeNS, 1000, &ui32Remainder)); + + + } +} +#endif + +PVRSRV_ERROR +CopyFromUserWrapper(CONNECTION_DATA *psConnection, + IMG_UINT32 ui32DispatchTableEntry, + void *pvDest, + void __user *pvSrc, + IMG_UINT32 ui32Size) +{ + g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyFromUserTotalBytes+=ui32Size; + g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size; + return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size); +} +PVRSRV_ERROR +CopyToUserWrapper(CONNECTION_DATA *psConnection, + IMG_UINT32 ui32DispatchTableEntry, + void __user *pvDest, + void *pvSrc, + IMG_UINT32 ui32Size) +{ + g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyToUserTotalBytes+=ui32Size; + g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size; + return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size); +} +#else +INLINE PVRSRV_ERROR +CopyFromUserWrapper(CONNECTION_DATA *psConnection, + IMG_UINT32 ui32DispatchTableEntry, + void *pvDest, + void __user *pvSrc, + IMG_UINT32 ui32Size) +{ + PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry); + return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size); +} +INLINE PVRSRV_ERROR +CopyToUserWrapper(CONNECTION_DATA *psConnection, + IMG_UINT32 ui32DispatchTableEntry, + void __user *pvDest, + void *pvSrc, + IMG_UINT32 ui32Size) +{ + PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry); + return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size); +} +#endif + +PVRSRV_ERROR +PVRSRVConnectKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32Flags, + IMG_UINT32 ui32ClientBuildOptions, + IMG_UINT32 ui32ClientDDKVersion, + IMG_UINT32 ui32ClientDDKBuild, + IMG_UINT8 *pui8KernelArch, + IMG_UINT32 *pui32CapabilityFlags, + IMG_UINT64 *ui64PackedBvnc) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32BuildOptions, ui32BuildOptionsMismatch; + IMG_UINT32 ui32DDKVersion, ui32DDKBuild; + PVRSRV_DATA *psSRVData = NULL; + IMG_UINT64 ui64ProcessVASpaceSize = OSGetCurrentProcessVASpaceSize(); + static IMG_BOOL bIsFirstConnection=IMG_FALSE; + +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + /* Gather BVNC information to output to UM */ + + *ui64PackedBvnc = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, + psDevInfo->sDevFeatureCfg.ui32V, + psDevInfo->sDevFeatureCfg.ui32N, + psDevInfo->sDevFeatureCfg.ui32C); +#else + *ui64PackedBvnc = 0; +#endif /* defined(SUPPORT_RGX)*/ + + /* Clear the flags */ + *pui32CapabilityFlags = 0; + + psSRVData = PVRSRVGetPVRSRVData(); + + psConnection->ui32ClientFlags = ui32Flags; + + /*Set flags to pass back to the client showing which cache coherency is available.*/ + /* Is the system snooping of caches emulated in software? */ + if (PVRSRVSystemSnoopingIsEmulated(psDeviceNode->psDevConfig)) + { + *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_EMULATE_FLAG; + } + else + { + /*Set flags to pass back to the client showing which cache coherency is available.*/ + /*Is the system CPU cache coherent?*/ + if (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig)) + { + *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_DEVICE_FLAG; + } + /*Is the system device cache coherent?*/ + if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) + { + *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_CPU_FLAG; + } + } + + /* Has the system device non-mappable local memory?*/ + if (PVRSRVSystemHasNonMappableLocalMemory(psDeviceNode->psDevConfig)) + { + *pui32CapabilityFlags |= PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG; + } + + /* Is system using FBCDC v31? */ + if (psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode)) + { + *pui32CapabilityFlags |= PVRSRV_FBCDC_V3_1_USED; + } + + /* Set flags to indicate shared-virtual-memory (SVM) allocation availability */ + if (! psDeviceNode->ui64GeneralSVMHeapTopVA || ! ui64ProcessVASpaceSize) + { + *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED; + } + else + { + if (ui64ProcessVASpaceSize <= psDeviceNode->ui64GeneralSVMHeapTopVA) + { + *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED; + } + else + { + /* This can happen when processor has more virtual address bits + than device (i.e. alloc is not always guaranteed to succeed) */ + *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL; + } + } + + /* Is the system DMA capable? */ + if (psDeviceNode->bHasSystemDMA) + { + *pui32CapabilityFlags |= PVRSRV_SYSTEM_DMA_USED; + } + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +{ + IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0; + IMG_BOOL bOSidAxiProtReg = IMG_FALSE; + + ui32OSid = (ui32Flags & SRV_VIRTVAL_FLAG_OSID_MASK) >> (VIRTVAL_FLAG_OSID_SHIFT); + ui32OSidReg = (ui32Flags & SRV_VIRTVAL_FLAG_OSIDREG_MASK) >> (VIRTVAL_FLAG_OSIDREG_SHIFT); + +#if defined(EMULATOR) +{ + /* AXI_ACELITE is only supported on rogue cores - octopus cores all support full ACE + * and don't want to compile the code below (RGX_FEATURE_AXI_ACELITE_BIT_MASK is not + * defined for octopus cores). + */ + + PVRSRV_RGXDEV_INFO *psDevInfo; + psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; + +#if defined(RGX_FEATURE_AXI_ACELITE_BIT_MASK) + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE)) +#else + if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACE)) +#endif + { + IMG_UINT32 ui32OSidAxiProtReg = 0, ui32OSidAxiProtTD = 0; + + ui32OSidAxiProtReg = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPREG_MASK) >> (VIRTVAL_FLAG_AXIPREG_SHIFT); + ui32OSidAxiProtTD = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPTD_MASK) >> (VIRTVAL_FLAG_AXIPTD_SHIFT); + + PVR_DPF((PVR_DBG_MESSAGE, + "[AxiProt & Virt]: Setting bOSidAxiProt of Emulator's Trusted Device for Catbase %d to %s", + ui32OSidReg, + (ui32OSidAxiProtTD == 1)?"TRUE":"FALSE")); + + bOSidAxiProtReg = ui32OSidAxiProtReg == 1; + PVR_DPF((PVR_DBG_MESSAGE, + "[AxiProt & Virt]: Setting bOSidAxiProt of FW's Register for Catbase %d to %s", + ui32OSidReg, + bOSidAxiProtReg?"TRUE":"FALSE")); + + SetAxiProtOSid(ui32OSidReg, ui32OSidAxiProtTD); + } +} +#endif /* defined(EMULATOR) */ + + /* We now know the OSid, OSidReg and bOSidAxiProtReg setting for this + * connection. We can access these from wherever we have a connection + * reference and do not need to traverse an arbitrary linked-list to + * obtain them. The settings are process-specific. + */ + psConnection->ui32OSid = ui32OSid; + psConnection->ui32OSidReg = ui32OSidReg; + psConnection->bOSidAxiProtReg = bOSidAxiProtReg; + + PVR_DPF((PVR_DBG_MESSAGE, + "[GPU Virtualization Validation]: OSIDs: %d, %d", + ui32OSid, + ui32OSidReg)); +} +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + +#if defined(SUPPORT_WORKLOAD_ESTIMATION) + /* Only enabled if enabled in the UM */ + if (!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_WORKLOAD_ESTIMATION_MASK)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Workload Estimation disabled. Not enabled in UM", + __func__)); + } +#endif + +#if defined(SUPPORT_PDVFS) + /* Only enabled if enabled in the UM */ + if (!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_PDVFS_MASK)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Proactive DVFS disabled. Not enabled in UM", + __func__)); + } +#endif + + ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN); + ui32DDKBuild = PVRVERSION_BUILD; + + if (ui32Flags & SRV_FLAGS_CLIENT_64BIT_COMPAT) + { + psSRVData->sDriverInfo.ui8UMSupportedArch |= BUILD_ARCH_64BIT; + } + else + { + psSRVData->sDriverInfo.ui8UMSupportedArch |= BUILD_ARCH_32BIT; + } + + if (IMG_FALSE == bIsFirstConnection) + { + psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildOptions = (RGX_BUILD_OPTIONS_KM); + psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildOptions = ui32ClientBuildOptions; + + psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildVersion = ui32DDKVersion; + psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildVersion = ui32ClientDDKVersion; + + psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildRevision = ui32DDKBuild; + psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildRevision = ui32ClientDDKBuild; + + psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType = + ((RGX_BUILD_OPTIONS_KM) & OPTIONS_DEBUG_MASK) ? BUILD_TYPE_DEBUG : BUILD_TYPE_RELEASE; + + psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType = + (ui32ClientBuildOptions & OPTIONS_DEBUG_MASK) ? BUILD_TYPE_DEBUG : BUILD_TYPE_RELEASE; + + if (sizeof(void *) == POINTER_SIZE_64BIT) + { + psSRVData->sDriverInfo.ui8KMBitArch |= BUILD_ARCH_64BIT; + } + else + { + psSRVData->sDriverInfo.ui8KMBitArch |= BUILD_ARCH_32BIT; + } + } + + /* Masking out every option that is not kernel specific*/ + ui32ClientBuildOptions &= RGX_BUILD_OPTIONS_MASK_KM; + + /* + * Validate the build options + */ + ui32BuildOptions = (RGX_BUILD_OPTIONS_KM); + if (ui32BuildOptions != ui32ClientBuildOptions) + { + ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32ClientBuildOptions; +#if !defined(PVRSRV_STRICT_COMPAT_CHECK) + /*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/ + ui32BuildOptionsMismatch &= OPTIONS_STRICT; +#endif + if ( (ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; " + "extra options present in client-side driver: (0x%x). Please check rgx_options.h", + __func__, + ui32ClientBuildOptions & ui32BuildOptionsMismatch )); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH, chk_exit); + } + + if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; " + "extra options present in KM driver: (0x%x). Please check rgx_options.h", + __func__, + ui32BuildOptions & ui32BuildOptionsMismatch )); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH, chk_exit); + } + if (IMG_FALSE == bIsFirstConnection) + { + PVR_LOG(("%s: COMPAT_TEST: Client-side (0x%04x) (%s) and KM driver (0x%04x) (%s) build options differ.", + __func__, + ui32ClientBuildOptions, + (psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType)?"release":"debug", + ui32BuildOptions, + (psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType)?"release":"debug")); + }else{ + PVR_DPF((PVR_DBG_WARNING, "%s: COMPAT_TEST: Client-side (0x%04x) and KM driver (0x%04x) build options differ.", + __func__, + ui32ClientBuildOptions, + ui32BuildOptions)); + + } + if (!psSRVData->sDriverInfo.bIsNoMatch) + psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: Client-side and KM driver build options match. [ OK ]", __func__)); + } + + /* + * Validate DDK version + */ + if (ui32ClientDDKVersion != ui32DDKVersion) + { + if (!psSRVData->sDriverInfo.bIsNoMatch) + psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE; + PVR_LOG(("(FAIL) %s: Incompatible driver DDK version (%u.%u) / client DDK version (%u.%u).", + __func__, + PVRVERSION_MAJ, PVRVERSION_MIN, + PVRVERSION_UNPACK_MAJ(ui32ClientDDKVersion), + PVRVERSION_UNPACK_MIN(ui32ClientDDKVersion))); + PVR_DBG_BREAK; + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DDK_VERSION_MISMATCH, chk_exit); + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK version (%u.%u) and client DDK version (%u.%u) match. [ OK ]", + __func__, + PVRVERSION_MAJ, PVRVERSION_MIN, PVRVERSION_MAJ, PVRVERSION_MIN)); + } + + /* Create stream for every connection except for the special clients + * that don't need it e.g.: recipients of HWPerf data. */ + if (!(psConnection->ui32ClientFlags & SRV_NO_HWPERF_CLIENT_STREAM)) + { + IMG_CHAR acStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; + OSSNPrintf(acStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE, + PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC, + psDeviceNode->sDevId.i32OsDeviceID, + psConnection->pid); + + eError = TLStreamCreate(&psConnection->hClientTLStream, + acStreamName, + PVRSRV_CLIENT_TL_STREAM_SIZE_DEFAULT, + TL_OPMODE_DROP_NEWER | + TL_FLAG_ALLOCATE_ON_FIRST_OPEN, + NULL, NULL, NULL, NULL); + if (eError != PVRSRV_OK && eError != PVRSRV_ERROR_ALREADY_EXISTS) + { + PVR_LOG_ERROR(eError, "TLStreamCreate"); + psConnection->hClientTLStream = NULL; + } + else if (eError == PVRSRV_OK) + { + /* Set "tlctrl" stream as a notification channel. This channel is + * is used to notify recipients about stream open/close (by writer) + * actions (and possibly other actions in the future). */ + eError = TLStreamSetNotifStream(psConnection->hClientTLStream, + psSRVData->hTLCtrlStream); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "TLStreamSetNotifStream"); + TLStreamClose(psConnection->hClientTLStream); + psConnection->hClientTLStream = NULL; + } + } + + /* Reset error status. We don't want to propagate any errors from here. */ + eError = PVRSRV_OK; + PVR_DPF((PVR_DBG_MESSAGE, "Created stream \"%s\".", acStreamName)); + } + + /* + * Validate DDK build + */ + if (ui32ClientDDKBuild != ui32DDKBuild) + { + if (!psSRVData->sDriverInfo.bIsNoMatch) + psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE; + PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch in driver DDK revision (%d) / client DDK revision (%d).", + __func__, ui32DDKBuild, ui32ClientDDKBuild)); +#if defined(PVRSRV_STRICT_COMPAT_CHECK) + PVR_DBG_BREAK; + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DDK_BUILD_MISMATCH, chk_exit); +#endif + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK revision (%d) and client DDK revision (%d) match. [ OK ]", + __func__, ui32DDKBuild, ui32ClientDDKBuild)); + } + + /* Success so far so is it the PDump client that is connecting? */ + if (ui32Flags & SRV_FLAGS_PDUMPCTRL) + { + PDumpConnectionNotify(); + } + + PVR_ASSERT(pui8KernelArch != NULL); + + if (psSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT) + { + *pui8KernelArch = 64; + } + else + { + *pui8KernelArch = 32; + } + + bIsFirstConnection = IMG_TRUE; + +#if defined(DEBUG_BRIDGE_KM) + { + int ii; + + /* dump dispatch table offset lookup table */ + PVR_DPF((PVR_DBG_MESSAGE, "%s: g_BridgeDispatchTableStartOffsets[0-%lu] entries:", __func__, BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT - 1)); + for (ii=0; ii < BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT; ii++) + { + PVR_DPF((PVR_DBG_MESSAGE, "g_BridgeDispatchTableStartOffsets[%d]: %u", ii, g_BridgeDispatchTableStartOffsets[ii][PVR_DISPATCH_OFFSET_FIRST_FUNC])); + } + } +#endif + +#if defined(PDUMP) + if (!(ui32Flags & SRV_FLAGS_PDUMPCTRL)) + { + IMG_UINT64 ui64PDumpState = 0; + + PDumpGetStateKM(&ui64PDumpState); + if (ui64PDumpState & PDUMP_STATE_CONNECTED) + { + *pui32CapabilityFlags |= PVRSRV_PDUMP_IS_RECORDING; + } + } +#endif + +chk_exit: + return eError; +} + +PVRSRV_ERROR +PVRSRVDisconnectKM(void) +{ +#if defined(INTEGRITY_OS) && defined(DEBUG_BRIDGE_KM) + PVRSRVPrintBridgeStats(); +#endif + /* just return OK, per-process data is cleaned up by resmgr */ + + return PVRSRV_OK; +} + +/**************************************************************************/ /*! +@Function PVRSRVAcquireGlobalEventObjectKM +@Description Acquire the global event object. +@Output phGlobalEventObject On success, points to the global event + object handle +@Return PVRSRV_ERROR PVRSRV_OK on success or an error + otherwise +*/ /***************************************************************************/ +PVRSRV_ERROR +PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + *phGlobalEventObject = psPVRSRVData->hGlobalEventObject; + + return PVRSRV_OK; +} + +/**************************************************************************/ /*! +@Function PVRSRVReleaseGlobalEventObjectKM +@Description Release the global event object. +@Output hGlobalEventObject Global event object handle +@Return PVRSRV_ERROR PVRSRV_OK on success or an error otherwise +*/ /***************************************************************************/ +PVRSRV_ERROR +PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject) +{ + PVR_ASSERT(PVRSRVGetPVRSRVData()->hGlobalEventObject == hGlobalEventObject); + + return PVRSRV_OK; +} + +/* + PVRSRVDumpDebugInfoKM +*/ +PVRSRV_ERROR +PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32VerbLevel) +{ + if (ui32VerbLevel > DEBUG_REQUEST_VERBOSITY_MAX) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + PVR_LOG(("User requested PVR debug info")); + + PVRSRVDebugRequest(psDeviceNode, ui32VerbLevel, NULL, NULL); + + return PVRSRV_OK; +} + +/* + PVRSRVGetDevClockSpeedKM +*/ +PVRSRV_ERROR +PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_PUINT32 pui32RGXClockSpeed) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVR_ASSERT(psDeviceNode->pfnDeviceClockSpeed != NULL); + + PVR_UNREFERENCED_PARAMETER(psConnection); + + eError = psDeviceNode->pfnDeviceClockSpeed(psDeviceNode, pui32RGXClockSpeed); + PVR_WARN_IF_ERROR(eError, "pfnDeviceClockSpeed"); + + return eError; +} + + +/* + PVRSRVHWOpTimeoutKM +*/ +PVRSRV_ERROR +PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#if defined(PVRSRV_RESET_ON_HWTIMEOUT) + PVR_LOG(("User requested OS reset")); + OSPanic(); +#endif + PVR_LOG(("HW operation timeout, dump server info")); + PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); + return PVRSRV_OK; +} + + +IMG_INT +DummyBW(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 *psBridgeIn, + IMG_UINT8 *psBridgeOut, + CONNECTION_DATA *psConnection) +{ + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + PVR_UNREFERENCED_PARAMETER(psBridgeOut); + PVR_UNREFERENCED_PARAMETER(psConnection); + +#if defined(DEBUG_BRIDGE_KM) + PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u (%s) mapped to " + "Dummy Wrapper (probably not what you want!)", + __func__, ui32DispatchTableEntry, g_BridgeDispatchTable[ui32DispatchTableEntry].pszIOCName)); +#else + PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u mapped to " + "Dummy Wrapper (probably not what you want!)", + __func__, ui32DispatchTableEntry)); +#endif + return PVRSRV_ERROR_BRIDGE_ENOTTY; +} + +PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32AlignChecksSize, + IMG_UINT32 aui32AlignChecks[]) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + +#if !defined(NO_HARDWARE) + + PVR_ASSERT(psDeviceNode->pfnAlignmentCheck != NULL); + return psDeviceNode->pfnAlignmentCheck(psDeviceNode, ui32AlignChecksSize, + aui32AlignChecks); + +#else + + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + PVR_UNREFERENCED_PARAMETER(ui32AlignChecksSize); + PVR_UNREFERENCED_PARAMETER(aui32AlignChecks); + + return PVRSRV_OK; + +#endif /* !defined(NO_HARDWARE) */ + +} + +PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 *pui32DeviceStatus) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + + /* First try to update the status. */ + if (psDeviceNode->pfnUpdateHealthStatus != NULL) + { + PVRSRV_ERROR eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, + IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetDeviceStatusKM: Failed to " + "check for device status (%d)", eError)); + + /* Return unknown status and error because we don't know what + * happened and if the status is valid. */ + *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN; + return eError; + } + } + + switch (OSAtomicRead(&psDeviceNode->eHealthStatus)) + { + case PVRSRV_DEVICE_HEALTH_STATUS_OK: + *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_OK; + return PVRSRV_OK; + case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: + *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_NOT_RESPONDING; + return PVRSRV_OK; + case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: + case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: + case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: + *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_DEVICE_ERROR; + return PVRSRV_OK; + default: + *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN; + return PVRSRV_ERROR_INTERNAL_ERROR; + } +} + +PVRSRV_ERROR PVRSRVGetMultiCoreInfoKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32CapsSize, + IMG_UINT32 *pui32NumCores, + IMG_UINT64 *pui64Caps) +{ + PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_SUPPORTED; + PVR_UNREFERENCED_PARAMETER(psConnection); + + if (psDeviceNode->pfnGetMultiCoreInfo != NULL) + { + eError = psDeviceNode->pfnGetMultiCoreInfo(psDeviceNode, ui32CapsSize, pui32NumCores, pui64Caps); + } + return eError; +} + + +/*! + * ***************************************************************************** + * @brief A wrapper for removing entries in the g_BridgeDispatchTable array. + * All this does is zero the entry to allow for a full table re-population + * later. + * + * @param ui32BridgeGroup + * @param ui32Index + * + * @return + ********************************************************************************/ +void +UnsetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, IMG_UINT32 ui32Index) +{ + ui32Index += g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC]; + + g_BridgeDispatchTable[ui32Index].pfFunction = NULL; + g_BridgeDispatchTable[ui32Index].hBridgeLock = NULL; +#if defined(DEBUG_BRIDGE_KM) + g_BridgeDispatchTable[ui32Index].pszIOCName = NULL; + g_BridgeDispatchTable[ui32Index].pszFunctionName = NULL; + g_BridgeDispatchTable[ui32Index].pszBridgeLockName = NULL; + g_BridgeDispatchTable[ui32Index].ui32CallCount = 0; + g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0; + g_BridgeDispatchTable[ui32Index].ui64TotalTimeNS = 0; + g_BridgeDispatchTable[ui32Index].ui64MaxTimeNS = 0; +#endif +} + +/*! + * ***************************************************************************** + * @brief A wrapper for filling in the g_BridgeDispatchTable array that does + * error checking. + * + * @param ui32Index + * @param pszIOCName + * @param pfFunction + * @param pszFunctionName + * + * @return + ********************************************************************************/ +void +_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, + IMG_UINT32 ui32Index, + const IMG_CHAR *pszIOCName, + BridgeWrapperFunction pfFunction, + const IMG_CHAR *pszFunctionName, + POS_LOCK hBridgeLock, + const IMG_CHAR *pszBridgeLockName) +{ + static IMG_UINT32 ui32PrevIndex = IMG_UINT32_MAX; /* -1 */ + +#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM) + PVR_UNREFERENCED_PARAMETER(pszFunctionName); + PVR_UNREFERENCED_PARAMETER(pszBridgeLockName); +#endif + + ui32Index += g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC]; + +#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) + /* Enable this to dump out the dispatch table entries */ + PVR_DPF((PVR_DBG_WARNING, "%s: g_BridgeDispatchTableStartOffsets[%d]=%d", __func__, ui32BridgeGroup, g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC])); + PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s %s", __func__, ui32Index, pszIOCName, pszFunctionName, pszBridgeLockName)); +#endif + + /* Any gaps are sub-optimal in-terms of memory usage, but we are mainly + * interested in spotting any large gap of wasted memory that could be + * accidentally introduced. + * + * This will currently flag up any gaps > 5 entries. + * + * NOTE: This shouldn't be debug only since switching from debug->release + * etc is likely to modify the available ioctls and thus be a point where + * mistakes are exposed. This isn't run at a performance critical time. + */ + if ((ui32PrevIndex != IMG_UINT32_MAX) && + ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) || + (ui32Index <= ui32PrevIndex))) + { +#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) + PVR_DPF((PVR_DBG_WARNING, + "%s: There is a gap in the dispatch table between indices %u (%s) and %u (%s)", + __func__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName, + ui32Index, pszIOCName)); +#else + PVR_DPF((PVR_DBG_MESSAGE, + "%s: There is a gap in the dispatch table between indices %u and %u (%s)", + __func__, (IMG_UINT)ui32PrevIndex, (IMG_UINT)ui32Index, pszIOCName)); +#endif + } + + if (ui32Index >= BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Index %u (%s) out of range", + __func__, (IMG_UINT)ui32Index, pszIOCName)); + +#if defined(DEBUG_BRIDGE_KM) + PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE_DISPATCH_TABLE_ENTRY_COUNT = %lu", + __func__, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)); +#if defined(SUPPORT_RGX) + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST)); + + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGX_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_LAST = %lu", + __func__, PVRSRV_BRIDGE_RGX_LAST)); +#endif + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_LAST = %lu", + __func__, PVRSRV_BRIDGE_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST)); + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST = %lu", + __func__, PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST)); +#endif + + OSPanic(); + } + + /* Panic if the previous entry has been overwritten as this is not allowed! + * NOTE: This shouldn't be debug only since switching from debug->release + * etc is likely to modify the available ioctls and thus be a point where + * mistakes are exposed. This isn't run at a performance critical time. + */ + if (g_BridgeDispatchTable[ui32Index].pfFunction) + { + if (g_BridgeDispatchTable[ui32Index].pfFunction != pfFunction) + { +#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) + PVR_DPF((PVR_DBG_ERROR, + "%s: Adding dispatch table entry for %s clobbers an existing entry for %s (current pfn=<%p>, new pfn=<%p>)", + __func__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName), + (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction)); +#else + PVR_DPF((PVR_DBG_ERROR, + "%s: Adding dispatch table entry for %s clobbers an existing entry (index=%u). (current pfn=<%p>, new pfn=<%p>)", + __func__, pszIOCName, ui32Index, + (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction)); + PVR_DPF((PVR_DBG_WARNING, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.")); +#endif + OSPanic(); + } + } + else + { + g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction; + g_BridgeDispatchTable[ui32Index].hBridgeLock = hBridgeLock; +#if defined(DEBUG_BRIDGE_KM) + g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName; + g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName; + g_BridgeDispatchTable[ui32Index].pszBridgeLockName = pszBridgeLockName; + g_BridgeDispatchTable[ui32Index].ui32CallCount = 0; + g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0; + g_BridgeDispatchTable[ui32Index].ui64TotalTimeNS = 0; + g_BridgeDispatchTable[ui32Index].ui64MaxTimeNS = 0; +#endif + } + + ui32PrevIndex = ui32Index; +} + +static PVRSRV_ERROR _BridgeBufferAlloc(void *pvPrivData, void **pvOut) +{ + PVR_UNREFERENCED_PARAMETER(pvPrivData); + + *pvOut = OSAllocZMem(PVRSRV_MAX_BRIDGE_IN_SIZE + + PVRSRV_MAX_BRIDGE_OUT_SIZE); + PVR_RETURN_IF_NOMEM(*pvOut); + + return PVRSRV_OK; +} + +static void _BridgeBufferFree(void *pvPrivData, void *pvFreeData) +{ + PVR_UNREFERENCED_PARAMETER(pvPrivData); + + OSFreeMem(pvFreeData); +} + +PVRSRV_ERROR BridgeDispatcherInit(void) +{ + PVRSRV_ERROR eError; + +#if defined(DEBUG_BRIDGE_KM) + eError = OSLockCreate(&g_hStatsLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", errorLockCreateFailed); +#endif + + eError = PVRSRVPoolCreate(_BridgeBufferAlloc, + _BridgeBufferFree, + PVRSRV_MAX_POOLED_BRIDGE_BUFFERS, + "Bridge buffer pool", + NULL, + &g_psBridgeBufferPool); + PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPoolCreate", erroPoolCreateFailed); + + return PVRSRV_OK; + +erroPoolCreateFailed: +#if defined(DEBUG_BRIDGE_KM) + OSLockDestroy(g_hStatsLock); + g_hStatsLock = NULL; +errorLockCreateFailed: +#endif + return eError; +} + +void BridgeDispatcherDeinit(void) +{ + if (g_psBridgeBufferPool) + { + PVRSRVPoolDestroy(g_psBridgeBufferPool); + g_psBridgeBufferPool = NULL; + } + +#if defined(DEBUG_BRIDGE_KM) + if (g_hStatsLock) + { + OSLockDestroy(g_hStatsLock); + g_hStatsLock = NULL; + } +#endif +} + +PVRSRV_ERROR BridgedDispatchKM(CONNECTION_DATA * psConnection, + PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM) +{ + + void * psBridgeIn=NULL; + void * psBridgeOut=NULL; + BridgeWrapperFunction pfBridgeHandler; + IMG_UINT32 ui32DispatchTableEntry, ui32GroupBoundary; + PVRSRV_ERROR err = PVRSRV_OK; + PVRSRV_POOL_TOKEN hBridgeBufferPoolToken = NULL; + IMG_UINT32 ui32Timestamp = OSClockus(); +#if defined(DEBUG_BRIDGE_KM) + IMG_UINT64 ui64TimeStart; + IMG_UINT64 ui64TimeEnd; + IMG_UINT64 ui64TimeDiff; +#endif + IMG_UINT32 ui32DispatchTableIndex, ui32DispatchTableEntryIndex; + +#if defined(DEBUG_BRIDGE_KM_STOP_AT_DISPATCH) + PVR_DBG_BREAK; +#endif + + if (psBridgePackageKM->ui32BridgeID >= BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Out of range dispatch table group ID: %d", + __func__, psBridgePackageKM->ui32BridgeID)); + PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EINVAL, return_error); + } + + ui32DispatchTableIndex = OSConfineArrayIndexNoSpeculation(psBridgePackageKM->ui32BridgeID, BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT); + + ui32DispatchTableEntry = g_BridgeDispatchTableStartOffsets[ui32DispatchTableIndex][PVR_DISPATCH_OFFSET_FIRST_FUNC]; + ui32GroupBoundary = g_BridgeDispatchTableStartOffsets[ui32DispatchTableIndex][PVR_DISPATCH_OFFSET_LAST_FUNC]; + + /* bridge function is not implemented in this build */ + if (0 == ui32DispatchTableEntry) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)", + __func__, + ui32DispatchTableEntry, + ui32GroupBoundary, + psBridgePackageKM->ui32BridgeID, + psBridgePackageKM->ui32FunctionID)); + /* this points to DummyBW() which returns PVRSRV_ERROR_ENOTTY */ + err = g_BridgeDispatchTable[ui32DispatchTableEntry].pfFunction(ui32DispatchTableEntry, + psBridgeIn, + psBridgeOut, + psConnection); + goto return_error; + } + if ((ui32DispatchTableEntry + psBridgePackageKM->ui32FunctionID) > ui32GroupBoundary) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)", + __func__, + ui32DispatchTableEntry, + ui32GroupBoundary, + psBridgePackageKM->ui32BridgeID, + psBridgePackageKM->ui32FunctionID)); + PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EINVAL, return_error); + } + ui32DispatchTableEntry += psBridgePackageKM->ui32FunctionID; + ui32DispatchTableEntryIndex = OSConfineArrayIndexNoSpeculation(ui32DispatchTableEntry, ui32GroupBoundary+1); + if (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT <= ui32DispatchTableEntry) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Dispatch table entry=%d, entry count = %lu," + " (bridge module %d, function %d)", __func__, + ui32DispatchTableEntry, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT, + psBridgePackageKM->ui32BridgeID, + psBridgePackageKM->ui32FunctionID)); + PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EINVAL, return_error); + } +#if defined(DEBUG_BRIDGE_KM) + PVR_DPF((PVR_DBG_MESSAGE, "%s: Dispatch table entry index=%d, (bridge module %d, function %d)", + __func__, + ui32DispatchTableEntryIndex, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID)); + PVR_DPF((PVR_DBG_MESSAGE, "%s: %s", + __func__, + g_BridgeDispatchTable[ui32DispatchTableEntryIndex].pszIOCName)); + g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui32CallCount++; + g_BridgeGlobalStats.ui32IOCTLCount++; +#endif + + if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock != NULL) + { + OSLockAcquire(g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock); + } +#if !defined(INTEGRITY_OS) + /* try to acquire a bridge buffer from the pool */ + + err = PVRSRVPoolGet(g_psBridgeBufferPool, + &hBridgeBufferPoolToken, + &psBridgeIn); + PVR_LOG_GOTO_IF_ERROR(err, "PVRSRVPoolGet", unlock_and_return_error); + + psBridgeOut = ((IMG_BYTE *) psBridgeIn) + PVRSRV_MAX_BRIDGE_IN_SIZE; +#endif + +#if defined(DEBUG_BRIDGE_KM) + ui64TimeStart = OSClockns64(); +#endif + + if (psBridgePackageKM->ui32InBufferSize > PVRSRV_MAX_BRIDGE_IN_SIZE) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Bridge input buffer too small " + "(data size %u, buffer size %u)!", __func__, + psBridgePackageKM->ui32InBufferSize, PVRSRV_MAX_BRIDGE_IN_SIZE)); + PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_ERANGE, unlock_and_return_error); + } + +#if !defined(INTEGRITY_OS) + if (psBridgePackageKM->ui32OutBufferSize > PVRSRV_MAX_BRIDGE_OUT_SIZE) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Bridge output buffer too small " + "(data size %u, buffer size %u)!", __func__, + psBridgePackageKM->ui32OutBufferSize, PVRSRV_MAX_BRIDGE_OUT_SIZE)); + PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_ERANGE, unlock_and_return_error); + } + + if ((CopyFromUserWrapper (psConnection, + ui32DispatchTableEntryIndex, + psBridgeIn, + psBridgePackageKM->pvParamIn, + psBridgePackageKM->ui32InBufferSize) != PVRSRV_OK) +#if defined(__QNXNTO__) +/* For Neutrino, the output bridge buffer acts as an input as well */ + || (CopyFromUserWrapper(psConnection, + ui32DispatchTableEntryIndex, + psBridgeOut, + (void *)((uintptr_t)psBridgePackageKM->pvParamIn + psBridgePackageKM->ui32InBufferSize), + psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK) +#endif + ) /* end of if-condition */ + { + PVR_LOG_GOTO_WITH_ERROR("CopyFromUserWrapper", err, PVRSRV_ERROR_BRIDGE_EFAULT, unlock_and_return_error); + } +#else + psBridgeIn = psBridgePackageKM->pvParamIn; + psBridgeOut = psBridgePackageKM->pvParamOut; +#endif + + pfBridgeHandler = + (BridgeWrapperFunction)g_BridgeDispatchTable[ui32DispatchTableEntryIndex].pfFunction; + + if (pfBridgeHandler == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: ui32DispatchTableEntry = %d is not a registered function!", + __func__, ui32DispatchTableEntry)); + PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EFAULT, unlock_and_return_error); + } + + /* pfBridgeHandler functions do not fail and return an IMG_INT. + * The value returned is either 0 or PVRSRV_OK (0). + * In the event this changes an error may be +ve or -ve, + * so try to return something consistent here. + */ + if (0 != pfBridgeHandler(ui32DispatchTableEntryIndex, + psBridgeIn, + psBridgeOut, + psConnection) + ) + { + PVR_LOG_GOTO_WITH_ERROR("pfBridgeHandler", err, PVRSRV_ERROR_BRIDGE_EPERM, unlock_and_return_error); + } + + /* + This should always be true as a.t.m. all bridge calls have to + return an error message, but this could change so we do this + check to be safe. + */ +#if !defined(INTEGRITY_OS) + if (psBridgePackageKM->ui32OutBufferSize > 0) + { + if (CopyToUserWrapper (psConnection, + ui32DispatchTableEntryIndex, + psBridgePackageKM->pvParamOut, + psBridgeOut, + psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK) + { + PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EFAULT, unlock_and_return_error); + } + } +#endif + +#if defined(DEBUG_BRIDGE_KM) + ui64TimeEnd = OSClockns64(); + + ui64TimeDiff = ui64TimeEnd - ui64TimeStart; + + /* if there is no lock held then acquire the stats lock to + * ensure the calculations are done safely + */ + if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock == NULL) + { + BridgeGlobalStatsLock(); + } + + g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64TotalTimeNS += ui64TimeDiff; + + if (ui64TimeDiff > g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64MaxTimeNS) + { + g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64MaxTimeNS = ui64TimeDiff; + } + + if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock == NULL) + { + BridgeGlobalStatsUnlock(); + } +#endif + +unlock_and_return_error: + + if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock != NULL) + { + OSLockRelease(g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock); + } + +#if !defined(INTEGRITY_OS) + if (hBridgeBufferPoolToken != NULL) + { + err = PVRSRVPoolPut(g_psBridgeBufferPool, + hBridgeBufferPoolToken); + PVR_LOG_IF_ERROR(err, "PVRSRVPoolPut"); + } +#endif + +return_error: + if (err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: returning (err = %d)", __func__, err)); + } + /* ignore transport layer bridge to avoid HTB flooding */ + if (psBridgePackageKM->ui32BridgeID != PVRSRV_BRIDGE_PVRTL) + { + if (err) + { + HTBLOGK(HTB_SF_BRG_BRIDGE_CALL_ERR, ui32Timestamp, + psBridgePackageKM->ui32BridgeID, + psBridgePackageKM->ui32FunctionID, err); + } + else + { + HTBLOGK(HTB_SF_BRG_BRIDGE_CALL, ui32Timestamp, + psBridgePackageKM->ui32BridgeID, + psBridgePackageKM->ui32FunctionID); + } + } + + return err; +} + +PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemStatArray) +{ +#if !defined(__QNXNTO__) + return PVRSRVFindProcessMemStats(pid, + ui32ArrSize, + bAllProcessStats, + pui32MemStatArray); +#else + PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform")); + + return PVRSRV_ERROR_NOT_SUPPORTED; +#endif + +} diff --git a/drivers/gpu/drm/phytium/octopus/srvcore.h b/drivers/gpu/drm/phytium/octopus/srvcore.h new file mode 100644 index 000000000000..64e8685b3f98 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/srvcore.h @@ -0,0 +1,216 @@ +/**************************************************************************/ /*! +@File +@Title PVR Bridge Functionality +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header for the PVR Bridge code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef SRVCORE_H +#define SRVCORE_H + +#include "lock_types.h" +#include "connection_server.h" +#include "pvr_debug.h" + +#include "pvr_bridge.h" +#if defined(SUPPORT_RGX) +#include "rgx_bridge.h" +#endif + +PVRSRV_ERROR +CopyFromUserWrapper(CONNECTION_DATA *psConnection, + IMG_UINT32 ui32DispatchTableEntry, + void *pvDest, + void __user *pvSrc, + IMG_UINT32 ui32Size); +PVRSRV_ERROR +CopyToUserWrapper(CONNECTION_DATA *psConnection, + IMG_UINT32 ui32DispatchTableEntry, + void __user *pvDest, + void *pvSrc, + IMG_UINT32 ui32Size); + +IMG_INT +DummyBW(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 *psBridgeIn, + IMG_UINT8 *psBridgeOut, + CONNECTION_DATA *psConnection); + +typedef IMG_INT (*BridgeWrapperFunction)(IMG_UINT32 ui32DispatchTableEntry, + IMG_UINT8 *psBridgeIn, + IMG_UINT8 *psBridgeOut, + CONNECTION_DATA *psConnection); + +typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY +{ + BridgeWrapperFunction pfFunction; /*!< The wrapper function that validates the ioctl + arguments before calling into srvkm proper */ + POS_LOCK hBridgeLock; /*!< The bridge lock which needs to be acquired + before calling the above wrapper */ +#if defined(DEBUG_BRIDGE_KM) + const IMG_CHAR *pszIOCName; /*!< Name of the ioctl: e.g. "PVRSRV_BRIDGE_CONNECT_SERVICES" */ + const IMG_CHAR *pszFunctionName; /*!< Name of the wrapper function: e.g. "PVRSRVConnectBW" */ + const IMG_CHAR *pszBridgeLockName; /*!< Name of bridge lock which will be acquired */ + IMG_UINT32 ui32CallCount; /*!< The total number of times the ioctl has been called */ + IMG_UINT32 ui32CopyFromUserTotalBytes; /*!< The total number of bytes copied from + userspace within this ioctl */ + IMG_UINT32 ui32CopyToUserTotalBytes; /*!< The total number of bytes copied from + userspace within this ioctl */ + IMG_UINT64 ui64TotalTimeNS; /*!< The total amount of time spent in this bridge function */ + IMG_UINT64 ui64MaxTimeNS; /*!< The maximum amount of time for a single call to this bridge function */ +#endif +}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY; + +#if defined(SUPPORT_RGX) + #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_RGX_DISPATCH_LAST+1) + #define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT (PVRSRV_BRIDGE_RGX_LAST+1) +#else + #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_DISPATCH_LAST+1) + #define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT (PVRSRV_BRIDGE_LAST+1) +#endif + +extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT]; + +void BridgeDispatchTableStartOffsetsInit(void); + +void +_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, + IMG_UINT32 ui32Index, + const IMG_CHAR *pszIOCName, + BridgeWrapperFunction pfFunction, + const IMG_CHAR *pszFunctionName, + POS_LOCK hBridgeLock, + const IMG_CHAR* pszBridgeLockName); +void +UnsetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, + IMG_UINT32 ui32Index); + + +/* PRQA S 0884,3410 2*/ /* macro relies on the lack of brackets */ +#define SetDispatchTableEntry(ui32BridgeGroup, ui32Index, pfFunction,\ + hBridgeLock) \ + _SetDispatchTableEntry(ui32BridgeGroup, ui32Index, #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction,\ + (POS_LOCK)hBridgeLock, #hBridgeLock) + +#define DISPATCH_TABLE_GAP_THRESHOLD 5 + + +#if defined(DEBUG_BRIDGE_KM) +typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS +{ + IMG_UINT32 ui32IOCTLCount; + IMG_UINT32 ui32TotalCopyFromUserBytes; + IMG_UINT32 ui32TotalCopyToUserBytes; +} PVRSRV_BRIDGE_GLOBAL_STATS; + +void BridgeGlobalStatsLock(void); +void BridgeGlobalStatsUnlock(void); + +/* OS specific code may want to report the stats held here and within the + * BRIDGE_DISPATCH_TABLE_ENTRYs (E.g. on Linux we report these via a + * debugfs entry /sys/kernel/debug/pvr/bridge_stats) */ +extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats; +#endif + +PVRSRV_ERROR BridgeDispatcherInit(void); +void BridgeDispatcherDeinit(void); + +PVRSRV_ERROR +BridgedDispatchKM(CONNECTION_DATA * psConnection, + PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM); + +PVRSRV_ERROR +PVRSRVConnectKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32Flags, + IMG_UINT32 ui32ClientBuildOptions, + IMG_UINT32 ui32ClientDDKVersion, + IMG_UINT32 ui32ClientDDKBuild, + IMG_UINT8 *pui8KernelArch, + IMG_UINT32 *ui32CapabilityFlags, + IMG_UINT64 *ui64PackedBvnc); + +PVRSRV_ERROR +PVRSRVDisconnectKM(void); + +PVRSRV_ERROR +PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject); + +PVRSRV_ERROR +PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject); + +PVRSRV_ERROR +PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32VerbLevel); + +PVRSRV_ERROR +PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_PUINT32 pui32RGXClockSpeed); + +PVRSRV_ERROR +PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode); + +PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32FWAlignChecksSize, + IMG_UINT32 aui32FWAlignChecks[]); + +PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 *pui32DeviceStatus); + +PVRSRV_ERROR PVRSRVGetMultiCoreInfoKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32CapsSize, + IMG_UINT32 *pui32NumCores, + IMG_UINT64 *pui64Caps); + +PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, + IMG_UINT32 ui32ArrSize, + IMG_BOOL bAllProcessStats, + IMG_UINT32 *ui32MemoryStats); + +#endif /* SRVCORE_H */ + +/****************************************************************************** + End of file (srvcore.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/srvinit.h b/drivers/gpu/drm/phytium/octopus/srvinit.h new file mode 100644 index 000000000000..48d09ebbada6 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/srvinit.h @@ -0,0 +1,68 @@ +/*************************************************************************/ /*! +@File +@Title Initialisation server internal header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Defines the connections between the various parts of the + initialisation server. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SRVINIT_H +#define SRVINIT_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "device_connection.h" +#include "device.h" + +#if defined(SUPPORT_RGX) +PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode); +#endif + +#if defined(__cplusplus) +} +#endif +#endif /* SRVINIT_H */ + +/****************************************************************************** + End of file (srvinit.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/srvkm.h b/drivers/gpu/drm/phytium/octopus/srvkm.h new file mode 100644 index 000000000000..59e99cce604e --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/srvkm.h @@ -0,0 +1,145 @@ +/**************************************************************************/ /*! +@File +@Title Services kernel module internal header file +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef SRVKM_H +#define SRVKM_H + +#include "servicesext.h" + +#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) +#define __pvrsrv_defined_struct_enum__ +#include +#endif + +struct _PVRSRV_DEVICE_NODE_; + +/*************************************************************************/ /*! +@Function PVRSRVCommonDriverInit +@Description Performs one time driver initialisation of Services Common and + Device layers. +@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVCommonDriverInit(void); + +/*************************************************************************/ /*! +@Function PVRSRVCommonDriverInit +@Description Performs one time driver de-initialisation of Services. +@Return void +*/ /**************************************************************************/ +void PVRSRVCommonDriverDeInit(void); + +/*************************************************************************/ /*! +@Function PVRSRVCommonDeviceCreate +@Description Creates and initialises a common layer Services device node + for an OS native device. First stage device discovery. +@Input pvOSDevice OS native device +@Input i32OsDeviceID A unique identifier which helps recognise this + Device in the UM space provided by the OS. +@Output ppsDeviceNode Points to the new device node on success +@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVCommonDeviceCreate(void *pvOSDevice, IMG_INT32 i32OsDeviceID, + struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode); + +/*************************************************************************/ /*! +@Function PVRSRVCommonDeviceInitialise +@Description Initialises the device layer specifics (e.g. boot FW etc) + for the supplied device node, created previously by + PVRSRVCommonDeviceCreate. The device is ready for use when this + second stage device initialisation returns successfully. +@Input psDeviceNode Device node of the device to be initialised +@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /**************************************************************************/ +PVRSRV_ERROR PVRSRVCommonDeviceInitialise(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); + +/*************************************************************************/ /*! +@Function PVRSRVCommonDeviceDestroy +@Description Destroys a PVR Services device node. +@Input psDeviceNode Device node to destroy +@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise +*/ /**************************************************************************/ +PVRSRV_ERROR +PVRSRVCommonDeviceDestroy(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); + +/****************** +HIGHER LEVEL MACROS +*******************/ + +/*---------------------------------------------------------------------------- +Repeats the body of the loop for a certain minimum time, or until the body +exits by its own means (break, return, goto, etc.) + +Example of usage: + +LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) +{ + if (psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset) + { + bTimeout = IMG_FALSE; + break; + } + + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); +} END_LOOP_UNTIL_TIMEOUT(); + +-----------------------------------------------------------------------------*/ + +/* uiNotLastLoop will remain at 1 until the timeout has expired, at which time + * it will be decremented and the loop executed one final time. This is + * necessary when preemption is enabled. + */ +/* PRQA S 3411,3431 12 */ /* critical format, leave alone */ +#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \ +{\ + IMG_UINT32 uiOffset, uiStart, uiCurrent; \ + IMG_INT32 iNotLastLoop; \ + for (uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, iNotLastLoop = 1;\ + ((uiCurrent - uiStart + uiOffset) < (TIMEOUT)) || iNotLastLoop--; \ + uiCurrent = OSClockus(), \ + uiOffset = uiCurrent < uiStart ? IMG_UINT32_MAX - uiStart : uiOffset, \ + uiStart = uiCurrent < uiStart ? 0 : uiStart) + +#define END_LOOP_UNTIL_TIMEOUT() \ +} + +#endif /* SRVKM_H */ diff --git a/drivers/gpu/drm/phytium/octopus/sync.c b/drivers/gpu/drm/phytium/octopus/sync.c new file mode 100644 index 000000000000..8a5b63bb59ad --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/sync.c @@ -0,0 +1,899 @@ +/*************************************************************************/ /*! +@File +@Title Services synchronisation interface +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements client side code for services synchronisation + interface +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +#include "img_types.h" +#include "img_defs.h" +#include "client_sync_bridge.h" +#include "client_synctracking_bridge.h" +#include "info_page_client.h" +#include "pvr_bridge.h" +#include "allocmem.h" +#include "osfunc.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "pvr_debug.h" +#include "dllist.h" +#include "sync.h" +#include "sync_internal.h" +#include "lock.h" +#include "log2.h" +#if defined(__KERNEL__) +#include "pvrsrv.h" +#endif + + +#define SYNC_BLOCK_LIST_CHUNCK_SIZE 10 + +/* + This defines the maximum amount of synchronisation memory + that can be allocated per SyncPrim context. + In reality this number is meaningless as we would run out + of synchronisation memory before we reach this limit, but + we need to provide a size to the span RA. + */ +#define MAX_SYNC_MEM (4 * 1024 * 1024) + +/* forward declaration */ +static PVRSRV_ERROR +_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value); + +/* + Internal interfaces for management of SYNC_PRIM_CONTEXT + */ +static void +_SyncPrimContextUnref(SYNC_PRIM_CONTEXT *psContext) +{ + if (!OSAtomicRead(&psContext->hRefCount)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: context already freed", __func__)); + } + else if (0 == OSAtomicDecrement(&psContext->hRefCount)) + { + /* SyncPrimContextDestroy only when no longer referenced */ + RA_Delete(psContext->psSpanRA); + RA_Delete(psContext->psSubAllocRA); + OSFreeMem(psContext); + } +} + +static void +_SyncPrimContextRef(SYNC_PRIM_CONTEXT *psContext) +{ + if (!OSAtomicRead(&psContext->hRefCount)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: context use after free", __func__)); + } + else + { + OSAtomicIncrement(&psContext->hRefCount); + } +} + +/* + Internal interfaces for management of synchronisation block memory + */ +static PVRSRV_ERROR +AllocSyncPrimitiveBlock(SYNC_PRIM_CONTEXT *psContext, + SYNC_PRIM_BLOCK **ppsSyncBlock) +{ + SYNC_PRIM_BLOCK *psSyncBlk; + IMG_HANDLE hSyncPMR; + IMG_HANDLE hSyncImportHandle; + IMG_DEVMEM_SIZE_T uiImportSize; + PVRSRV_ERROR eError; + + psSyncBlk = OSAllocMem(sizeof(SYNC_PRIM_BLOCK)); + PVR_GOTO_IF_NOMEM(psSyncBlk, eError, fail_alloc); + + psSyncBlk->psContext = psContext; + + /* Allocate sync prim block */ + eError = BridgeAllocSyncPrimitiveBlock(GetBridgeHandle(psContext->hDevConnection), + &psSyncBlk->hServerSyncPrimBlock, + &psSyncBlk->ui32FirmwareAddr, + &psSyncBlk->ui32SyncBlockSize, + &hSyncPMR); + PVR_GOTO_IF_ERROR(eError, fail_blockalloc); + + /* Make it mappable by the client */ + eError = DevmemMakeLocalImportHandle(psContext->hDevConnection, + hSyncPMR, + &hSyncImportHandle); + PVR_GOTO_IF_ERROR(eError, fail_export); + + /* Get CPU mapping of the memory block */ + eError = DevmemLocalImport(psContext->hDevConnection, + hSyncImportHandle, + PVRSRV_MEMALLOCFLAG_CPU_READABLE, + &psSyncBlk->hMemDesc, + &uiImportSize, + "SyncPrimitiveBlock"); + + /* + Regardless of success or failure we "undo" the export + */ + DevmemUnmakeLocalImportHandle(psContext->hDevConnection, + hSyncImportHandle); + + PVR_GOTO_IF_ERROR(eError, fail_import); + + eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc, + (void **) &psSyncBlk->pui32LinAddr); + PVR_GOTO_IF_ERROR(eError, fail_cpuvaddr); + + *ppsSyncBlock = psSyncBlk; + return PVRSRV_OK; + +fail_cpuvaddr: + DevmemFree(psSyncBlk->hMemDesc); +fail_import: +fail_export: + BridgeFreeSyncPrimitiveBlock(GetBridgeHandle(psContext->hDevConnection), + psSyncBlk->hServerSyncPrimBlock); +fail_blockalloc: + OSFreeMem(psSyncBlk); +fail_alloc: + return eError; +} + +static void +FreeSyncPrimitiveBlock(SYNC_PRIM_BLOCK *psSyncBlk) +{ + SYNC_PRIM_CONTEXT *psContext = psSyncBlk->psContext; + + DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc); + DevmemFree(psSyncBlk->hMemDesc); + BridgeFreeSyncPrimitiveBlock(GetBridgeHandle(psContext->hDevConnection), + psSyncBlk->hServerSyncPrimBlock); + OSFreeMem(psSyncBlk); +} + +static PVRSRV_ERROR +SyncPrimBlockImport(RA_PERARENA_HANDLE hArena, + RA_LENGTH_T uSize, + RA_FLAGS_T uFlags, + const IMG_CHAR *pszAnnotation, + RA_BASE_T *puiBase, + RA_LENGTH_T *puiActualSize, + RA_PERISPAN_HANDLE *phImport) +{ + SYNC_PRIM_CONTEXT *psContext = hArena; + SYNC_PRIM_BLOCK *psSyncBlock = NULL; + RA_LENGTH_T uiSpanSize; + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(uFlags); + + /* Check we've not been called with an unexpected size */ + PVR_LOG_GOTO_IF_INVALID_PARAM(hArena, eError, e0); + PVR_LOG_GOTO_IF_INVALID_PARAM(uSize == sizeof(IMG_UINT32), eError, e0); + + /* + Ensure the synprim context doesn't go away while we have sync blocks + attached to it + */ + _SyncPrimContextRef(psContext); + + /* Allocate the block of memory */ + eError = AllocSyncPrimitiveBlock(psContext, &psSyncBlock); + PVR_LOG_GOTO_IF_ERROR(eError, "AllocSyncPrimitiveBlock", fail_syncblockalloc); + + /* Allocate a span for it */ + eError = RA_Alloc(psContext->psSpanRA, + psSyncBlock->ui32SyncBlockSize, + RA_NO_IMPORT_MULTIPLIER, + 0, + psSyncBlock->ui32SyncBlockSize, + pszAnnotation, + &psSyncBlock->uiSpanBase, + &uiSpanSize, + NULL); + PVR_GOTO_IF_ERROR(eError, fail_spanalloc); + + /* + There is no reason the span RA should return an allocation larger + then we request + */ + PVR_ASSERT(uiSpanSize == psSyncBlock->ui32SyncBlockSize); + + *puiBase = psSyncBlock->uiSpanBase; + *puiActualSize = psSyncBlock->ui32SyncBlockSize; + *phImport = psSyncBlock; + return PVRSRV_OK; + +fail_spanalloc: + FreeSyncPrimitiveBlock(psSyncBlock); +fail_syncblockalloc: + _SyncPrimContextUnref(psContext); +e0: + return eError; +} + +static void +SyncPrimBlockUnimport(RA_PERARENA_HANDLE hArena, + RA_BASE_T uiBase, + RA_PERISPAN_HANDLE hImport) +{ + SYNC_PRIM_CONTEXT *psContext = hArena; + SYNC_PRIM_BLOCK *psSyncBlock = hImport; + + if (!psContext || !psSyncBlock || uiBase != psSyncBlock->uiSpanBase) + { + /* Invalid input params */ + return; + } + + /* Free the span this import is using */ + RA_Free(psContext->psSpanRA, uiBase); + + /* Free the syncpim block */ + FreeSyncPrimitiveBlock(psSyncBlock); + + /* Drop our reference to the syncprim context */ + _SyncPrimContextUnref(psContext); +} + +static INLINE IMG_UINT32 SyncPrimGetOffset(SYNC_PRIM *psSyncInt) +{ + IMG_UINT64 ui64Temp; + + PVR_ASSERT(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL); + + ui64Temp = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase; + PVR_ASSERT(ui64Tempu.sLocal.psSyncBlock; + + psSyncInt->sCommon.pui32LinAddr = psSyncBlock->pui32LinAddr + + (SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32)); +} + +static void SyncPrimLocalFree(SYNC_PRIM *psSyncInt, IMG_BOOL bFreeFirstSyncPrim) +{ + SYNC_PRIM_BLOCK *psSyncBlock; + SYNC_PRIM_CONTEXT *psContext; + + psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psContext = psSyncBlock->psContext; + +#if !defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST) + PVR_UNREFERENCED_PARAMETER(bFreeFirstSyncPrim); +#else + /* Defer freeing the first allocated sync prim in the sync context */ + if (psSyncInt != psContext->hFirstSyncPrim || (psSyncInt == psContext->hFirstSyncPrim && bFreeFirstSyncPrim)) +#endif + { + PVRSRV_ERROR eError; + IMG_HANDLE hBridge = + GetBridgeHandle(psSyncInt->u.sLocal.psSyncBlock->psContext->hDevConnection); + + if (GetInfoPageDebugFlags(psSyncInt->u.sLocal.psSyncBlock->psContext->hDevConnection) & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + if (psSyncInt->u.sLocal.hRecord) + { + /* remove this sync record */ + eError = BridgeSyncRecordRemoveByHandle(hBridge, + psSyncInt->u.sLocal.hRecord); + } + } + else + { + IMG_UINT32 ui32FWAddr = psSyncBlock->ui32FirmwareAddr + + SyncPrimGetOffset(psSyncInt); + + eError = BridgeSyncFreeEvent(hBridge, ui32FWAddr); + PVR_LOG_IF_ERROR(eError, "BridgeSyncFreeEvent"); + } +#if defined(PVRSRV_ENABLE_SYNC_POISONING) + (void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_POISON_VALUE); +#else + /* reset the sync prim value as it is freed. + * this guarantees the client sync allocated to the client will + * have a value of zero and the client does not need to + * explicitly initialise the sync value to zero. + * the allocation of the backing memory for the sync prim block + * is done with ZERO_ON_ALLOC so the memory is initially all zero. + */ + (void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_RESET_VALUE); +#endif + + RA_Free(psContext->psSubAllocRA, psSyncInt->u.sLocal.uiSpanAddr); + OSFreeMem(psSyncInt); + _SyncPrimContextUnref(psContext); + } +} + +static void SyncPrimLocalUnref(SYNC_PRIM *psSyncInt) +{ + if (!OSAtomicRead(&psSyncInt->u.sLocal.hRefCount)) + { + PVR_DPF((PVR_DBG_ERROR, "SyncPrimLocalUnref sync already freed")); + } + else if (0 == OSAtomicDecrement(&psSyncInt->u.sLocal.hRefCount)) + { + SyncPrimLocalFree(psSyncInt, IMG_FALSE); + } +} + +static IMG_UINT32 SyncPrimGetFirmwareAddrLocal(SYNC_PRIM *psSyncInt) +{ + SYNC_PRIM_BLOCK *psSyncBlock; + + psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + return psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psSyncInt); +} + +static INLINE IMG_UINT32 _Log2(IMG_UINT32 ui32Align) +{ + PVR_ASSERT(IsPower2(ui32Align)); + return ExactLog2(ui32Align); +} + +/* + External interfaces + */ + +IMG_INTERNAL PVRSRV_ERROR +SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection, + PSYNC_PRIM_CONTEXT *phSyncPrimContext) +{ + SYNC_PRIM_CONTEXT *psContext; + PVRSRV_ERROR eError; + + psContext = OSAllocMem(sizeof(SYNC_PRIM_CONTEXT)); + PVR_GOTO_IF_NOMEM(psContext, eError, fail_alloc); + + psContext->hDevConnection = hDevConnection; + + OSSNPrintf(psContext->azName, SYNC_PRIM_NAME_SIZE, "Sync Prim RA-%p", psContext); + OSSNPrintf(psContext->azSpanName, SYNC_PRIM_NAME_SIZE, "Sync Prim span RA-%p", psContext); + + /* + Create the RA for sub-allocations of the SynPrim's + + Note: + The import size doesn't matter here as the server will pass + back the blocksize when does the import which overrides + what we specify here. + */ + + psContext->psSubAllocRA = RA_Create(psContext->azName, + /* Params for imports */ + _Log2(sizeof(IMG_UINT32)), + RA_LOCKCLASS_2, + SyncPrimBlockImport, + SyncPrimBlockUnimport, + psContext, + RA_POLICY_DEFAULT); + PVR_GOTO_IF_NOMEM(psContext->psSubAllocRA, eError, fail_suballoc); + + /* + Create the span-management RA + + The RA requires that we work with linear spans. For our use + here we don't require this behaviour as we're always working + within offsets of blocks (imports). However, we need to keep + the RA happy so we create the "span" management RA which + ensures that all are imports are added to the RA in a linear + fashion + */ + psContext->psSpanRA = RA_Create(psContext->azSpanName, + /* Params for imports */ + 0, + RA_LOCKCLASS_1, + NULL, + NULL, + NULL, + RA_POLICY_DEFAULT); + PVR_GOTO_IF_NOMEM(psContext->psSpanRA, eError, fail_span); + + if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_MEM, 0, NULL)) + { + RA_Delete(psContext->psSpanRA); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, fail_span); + } + +#if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST) + psContext->hFirstSyncPrim = NULL; +#endif + + OSAtomicWrite(&psContext->hRefCount, 1); + + *phSyncPrimContext = psContext; + return PVRSRV_OK; +fail_span: + RA_Delete(psContext->psSubAllocRA); +fail_suballoc: + OSFreeMem(psContext); +fail_alloc: + return eError; +} + +IMG_INTERNAL void SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext) +{ + SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext; + +#if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST) + /* Free the first sync prim that was allocated as part of this context */ + if (psContext->hFirstSyncPrim) + { + SyncPrimLocalFree((SYNC_PRIM *)psContext->hFirstSyncPrim, IMG_TRUE); + psContext->hFirstSyncPrim = NULL; + } +#endif + + if (1 != OSAtomicRead(&psContext->hRefCount)) + { + PVR_DPF((PVR_DBG_ERROR, "%s attempted with active references, may be the result of a race", __func__)); + } +#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) +#if defined(__KERNEL__) + if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Forcing context destruction due to bad driver state", __func__)); + OSAtomicWrite(&psContext->hRefCount, 1); + } +#endif +#endif + _SyncPrimContextUnref(psContext); +} + +static PVRSRV_ERROR _SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext, + PVRSRV_CLIENT_SYNC_PRIM **ppsSync, + const IMG_CHAR *pszClassName, + IMG_BOOL bServerSync) +{ + SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext; + SYNC_PRIM_BLOCK *psSyncBlock; + SYNC_PRIM *psNewSync; + PVRSRV_ERROR eError; + RA_BASE_T uiSpanAddr; + + PVR_LOG_RETURN_IF_INVALID_PARAM(hSyncPrimContext, "hSyncPrimeContext"); + + psNewSync = OSAllocMem(sizeof(SYNC_PRIM)); + PVR_GOTO_IF_NOMEM(psNewSync, eError, fail_alloc); + + eError = RA_Alloc(psContext->psSubAllocRA, + sizeof(IMG_UINT32), + RA_NO_IMPORT_MULTIPLIER, + 0, + sizeof(IMG_UINT32), + "Sync_Prim", + &uiSpanAddr, + NULL, + (RA_PERISPAN_HANDLE *) &psSyncBlock); + PVR_GOTO_IF_ERROR(eError, fail_raalloc); + + psNewSync->eType = SYNC_PRIM_TYPE_LOCAL; + OSAtomicWrite(&psNewSync->u.sLocal.hRefCount, 1); + psNewSync->u.sLocal.uiSpanAddr = uiSpanAddr; + psNewSync->u.sLocal.psSyncBlock = psSyncBlock; + SyncPrimGetCPULinAddr(psNewSync); + *ppsSync = &psNewSync->sCommon; + _SyncPrimContextRef(psContext); +#if defined(PVRSRV_ENABLE_SYNC_POISONING) + (void) _SyncPrimSetValue(psNewSync, LOCAL_SYNC_PRIM_RESET_VALUE); +#endif + +#if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST) + /* If this is the first sync prim allocated in the context, keep a handle to it */ + if (psSyncBlock->uiSpanBase == 0 && psNewSync->u.sLocal.uiSpanAddr == 0) + { + psContext->hFirstSyncPrim = psNewSync; + } +#endif + + if (GetInfoPageDebugFlags(psSyncBlock->psContext->hDevConnection) & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + IMG_CHAR szClassName[PVRSRV_SYNC_NAME_LENGTH]; + size_t uiSize; + + if (pszClassName) + { + uiSize = OSStringNLength(pszClassName, PVRSRV_SYNC_NAME_LENGTH); + /* Copy the class name annotation into a fixed-size array */ + OSCachedMemCopy(szClassName, pszClassName, uiSize); + if (uiSize == PVRSRV_SYNC_NAME_LENGTH) + szClassName[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; + else + szClassName[uiSize++] = '\0'; + } + else + { + /* No class name annotation */ + uiSize = 0; + szClassName[0] = '\0'; + } + + /* record this sync */ + eError = BridgeSyncRecordAdd( + GetBridgeHandle(psSyncBlock->psContext->hDevConnection), + &psNewSync->u.sLocal.hRecord, + psSyncBlock->hServerSyncPrimBlock, + psSyncBlock->ui32FirmwareAddr, + SyncPrimGetOffset(psNewSync), + bServerSync, + uiSize, + szClassName); + if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, "%s: failed to add SyncRecord \"%s\" (%s)", + __func__, + szClassName, + PVRSRVGETERRORSTRING(eError))); + psNewSync->u.sLocal.hRecord = NULL; + } + } + else + { + size_t uiSize; + + uiSize = OSStringNLength(pszClassName, PVRSRV_SYNC_NAME_LENGTH); + + if (uiSize < PVRSRV_SYNC_NAME_LENGTH) + uiSize++; + /* uiSize now reflects size used for pszClassName + NUL byte */ + + eError = BridgeSyncAllocEvent(GetBridgeHandle(hSyncPrimContext->hDevConnection), + bServerSync, + psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psNewSync), + uiSize, + pszClassName); + PVR_LOG_IF_ERROR(eError, "BridgeSyncAllocEvent"); + } + + return PVRSRV_OK; + +fail_raalloc: + OSFreeMem(psNewSync); +fail_alloc: + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext, + PVRSRV_CLIENT_SYNC_PRIM **ppsSync, + const IMG_CHAR *pszClassName) +{ + return _SyncPrimAlloc(hSyncPrimContext, + ppsSync, + pszClassName, + IMG_FALSE); +} + +static PVRSRV_ERROR +_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eError; + + if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL) + { + SYNC_PRIM_BLOCK *psSyncBlock; + SYNC_PRIM_CONTEXT *psContext; + + psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psContext = psSyncBlock->psContext; + + eError = BridgeSyncPrimSet(GetBridgeHandle(psContext->hDevConnection), + psSyncBlock->hServerSyncPrimBlock, + SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32), + ui32Value); + } + else + { + /* Server sync not supported, attempted use of server sync */ + return PVRSRV_ERROR_NOT_SUPPORTED; + } + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + SYNC_PRIM *psSyncInt; + + PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); + + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL) + { + SyncPrimLocalUnref(psSyncInt); + } + else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER) + { + /* Server sync not supported, attempted use of server sync */ + return PVRSRV_ERROR_NOT_SUPPORTED; + } + else + { + /* + Either the client has given us a bad pointer or there is an + error in this module + */ + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out); + } + +err_out: + return eError; +} + +#if defined(NO_HARDWARE) +IMG_INTERNAL PVRSRV_ERROR +SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + SYNC_PRIM *psSyncInt; + + PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); + + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + + /* There is no check for the psSyncInt to be LOCAL as this call + substitutes the Firmware updating a sync and that sync could + be a server one */ + + eError = _SyncPrimSetValue(psSyncInt, ui32Value); + +err_out: + return eError; +} +#endif + +IMG_INTERNAL PVRSRV_ERROR +SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + SYNC_PRIM *psSyncInt; + + PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); + + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) + { + /* Invalid sync type */ + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out); + } + + eError = _SyncPrimSetValue(psSyncInt, ui32Value); + +#if defined(PDUMP) + SyncPrimPDump(psSync); +#endif +err_out: + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync, + IMG_HANDLE *phBlock, + IMG_UINT32 *pui32Offset) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + SYNC_PRIM *psSyncInt; + + PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); + PVR_LOG_GOTO_IF_INVALID_PARAM(phBlock, eError, err_out); + PVR_LOG_GOTO_IF_INVALID_PARAM(pui32Offset, eError, err_out); + + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + + if (likely(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)) + { + *phBlock = psSyncInt->u.sLocal.psSyncBlock->hServerSyncPrimBlock; + *pui32Offset = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: psSync not a Local sync prim (%d)", + __func__, psSyncInt->eType)); + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, err_out); + } + +err_out: + return eError; +} + +IMG_INTERNAL PVRSRV_ERROR +SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + SYNC_PRIM *psSyncInt; + + *pui32FwAddr = 0; + PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); + + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL) + { + *pui32FwAddr = SyncPrimGetFirmwareAddrLocal(psSyncInt); + } + else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER) + { + /* Server sync not supported, attempted use of server sync */ + return PVRSRV_ERROR_NOT_SUPPORTED; + } + else + { + /* Either the client has given us a bad pointer or there is an + * error in this module + */ + PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out); + } + +err_out: + return eError; +} + +#if defined(PDUMP) +IMG_INTERNAL void SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync) +{ + SYNC_PRIM *psSyncInt; + SYNC_PRIM_BLOCK *psSyncBlock; + SYNC_PRIM_CONTEXT *psContext; + PVRSRV_ERROR eError; + + PVR_ASSERT(psSync != NULL); + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + + if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) + { + /* Invalid sync type */ + PVR_ASSERT(IMG_FALSE); + return; + } + + psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psContext = psSyncBlock->psContext; + + eError = BridgeSyncPrimPDump(GetBridgeHandle(psContext->hDevConnection), + psSyncBlock->hServerSyncPrimBlock, + SyncPrimGetOffset(psSyncInt)); + PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDump"); + PVR_ASSERT(eError == PVRSRV_OK); +} + +IMG_INTERNAL void SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) +{ + SYNC_PRIM *psSyncInt; + SYNC_PRIM_BLOCK *psSyncBlock; + SYNC_PRIM_CONTEXT *psContext; + PVRSRV_ERROR eError; + + PVR_ASSERT(psSync != NULL); + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + + if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) + { + /* Invalid sync type */ + PVR_ASSERT(IMG_FALSE); + return; + } + + psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psContext = psSyncBlock->psContext; + + eError = BridgeSyncPrimPDumpValue(GetBridgeHandle(psContext->hDevConnection), + psSyncBlock->hServerSyncPrimBlock, + SyncPrimGetOffset(psSyncInt), + ui32Value); + PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDumpValue"); + PVR_ASSERT(eError == PVRSRV_OK); +} + +IMG_INTERNAL void SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 ui32PDumpFlags) +{ + SYNC_PRIM *psSyncInt; + SYNC_PRIM_BLOCK *psSyncBlock; + SYNC_PRIM_CONTEXT *psContext; + PVRSRV_ERROR eError; + + PVR_ASSERT(psSync != NULL); + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + + if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) + { + /* Invalid sync type */ + PVR_ASSERT(IMG_FALSE); + return; + } + + psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psContext = psSyncBlock->psContext; + + eError = BridgeSyncPrimPDumpPol(GetBridgeHandle(psContext->hDevConnection), + psSyncBlock->hServerSyncPrimBlock, + SyncPrimGetOffset(psSyncInt), + ui32Value, + ui32Mask, + eOperator, + ui32PDumpFlags); + PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDumpPol"); + PVR_ASSERT(eError == PVRSRV_OK); +} + +IMG_INTERNAL void SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync, + IMG_UINT64 uiWriteOffset, + IMG_UINT64 uiPacketSize, + IMG_UINT64 uiBufferSize) +{ + SYNC_PRIM *psSyncInt; + SYNC_PRIM_BLOCK *psSyncBlock; + SYNC_PRIM_CONTEXT *psContext; + PVRSRV_ERROR eError; + + PVR_ASSERT(psSync != NULL); + psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); + + if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) + { + /* Invalid sync type */ + PVR_ASSERT(IMG_FALSE); + return; + } + + psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; + psContext = psSyncBlock->psContext; + +#if defined(__linux__) && defined(__i386__) + PVR_ASSERT(uiWriteOffsethDevConnection), + psSyncBlock->hServerSyncPrimBlock, + SyncPrimGetOffset(psSyncInt), + TRUNCATE_64BITS_TO_32BITS(uiWriteOffset), + TRUNCATE_64BITS_TO_32BITS(uiPacketSize), + TRUNCATE_64BITS_TO_32BITS(uiBufferSize)); + PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDumpCBP"); + PVR_ASSERT(eError == PVRSRV_OK); +} + +#endif diff --git a/drivers/gpu/drm/phytium/octopus/sync.h b/drivers/gpu/drm/phytium/octopus/sync.h new file mode 100644 index 000000000000..cbfa2e23f788 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/sync.h @@ -0,0 +1,316 @@ +/*************************************************************************/ /*! +@File +@Title Synchronisation interface header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Defines the client side interface for synchronisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SYNC_H +#define SYNC_H + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "sync_prim_internal.h" +#include "pdumpdefs.h" +#include "dllist.h" +#include "pvr_debug.h" + +#include "device_connection.h" + +#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) +#define __pvrsrv_defined_struct_enum__ +#include +#endif + +/*************************************************************************/ /*! +@Function SyncPrimContextCreate + +@Description Create a new synchronisation context + +@Input hBridge Bridge handle + +@Input hDeviceNode Device node handle + +@Output hSyncPrimContext Handle to the created synchronisation + primitive context + +@Return PVRSRV_OK if the synchronisation primitive context was + successfully created +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection, + PSYNC_PRIM_CONTEXT *hSyncPrimContext); + +/*************************************************************************/ /*! +@Function SyncPrimContextDestroy + +@Description Destroy a synchronisation context + +@Input hSyncPrimContext Handle to the synchronisation + primitive context to destroy + +@Return None +*/ +/*****************************************************************************/ +void +SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext); + +/*************************************************************************/ /*! +@Function SyncPrimAlloc + +@Description Allocate a new synchronisation primitive on the specified + synchronisation context + +@Input hSyncPrimContext Handle to the synchronisation + primitive context + +@Output ppsSync Created synchronisation primitive + +@Input pszClassName Sync source annotation + +@Return PVRSRV_OK if the synchronisation primitive was + successfully created +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext, + PVRSRV_CLIENT_SYNC_PRIM **ppsSync, + const IMG_CHAR *pszClassName); + +#if defined(__KERNEL__) +/*************************************************************************/ /*! +@Function SyncPrimAllocForServerSync + +@Description Allocate a new synchronisation primitive on the specified + synchronisation context for a server sync + +@Input hSyncPrimContext Handle to the synchronisation + primitive context + +@Output ppsSync Created synchronisation primitive + +@Input pszClassName Sync source annotation + +@Return PVRSRV_OK if the synchronisation primitive was + successfully created +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncPrimAllocForServerSync(PSYNC_PRIM_CONTEXT hSyncPrimContext, + PVRSRV_CLIENT_SYNC_PRIM **ppsSync, + const IMG_CHAR *pszClassName); +#endif + +/*************************************************************************/ /*! +@Function SyncPrimFree + +@Description Free a synchronisation primitive + +@Input psSync The synchronisation primitive to free + +@Return PVRSRV_OK if the synchronisation primitive was + successfully freed +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync); + +/*************************************************************************/ /*! +@Function SyncPrimSet + +@Description Set the synchronisation primitive to a value + +@Input psSync The synchronisation primitive to set + +@Input ui32Value Value to set it to + +@Return PVRSRV_OK on success +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value); + +#if defined(NO_HARDWARE) + +/*************************************************************************/ /*! +@Function SyncPrimNoHwUpdate + +@Description Updates the synchronisation primitive value (in NoHardware drivers) + +@Input psSync The synchronisation primitive to update + +@Input ui32Value Value to update it to + +@Return PVRSRV_OK on success +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value); +#endif + +#if defined(PDUMP) +/*************************************************************************/ /*! +@Function SyncPrimPDump + +@Description PDump the current value of the synchronisation primitive + +@Input psSync The synchronisation primitive to PDump + +@Return None +*/ +/*****************************************************************************/ +void +SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync); + +/*************************************************************************/ /*! +@Function SyncPrimPDumpValue + +@Description PDump the ui32Value as the value of the synchronisation + primitive (regardless of the current value). + +@Input psSync The synchronisation primitive to PDump +@Input ui32Value Value to give to the sync prim on the pdump + +@Return None +*/ +/*****************************************************************************/ +void +SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value); + +/*************************************************************************/ /*! +@Function SyncPrimPDumpPol + +@Description Do a PDump poll of the synchronisation primitive + +@Input psSync The synchronisation primitive to PDump + +@Input ui32Value Value to poll for + +@Input ui32Mask PDump mask operator + +@Input ui32PDumpFlags PDump flags + +@Return None +*/ +/*****************************************************************************/ +void +SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 ui32PDumpFlags); + +/*************************************************************************/ /*! +@Function SyncPrimPDumpCBP + +@Description Do a PDump CB poll using the synchronisation primitive + +@Input psSync The synchronisation primitive to PDump + +@Input uiWriteOffset Current write offset of buffer + +@Input uiPacketSize Size of the packet to write into CB + +@Input uiBufferSize Size of the CB + +@Return None +*/ +/*****************************************************************************/ +void +SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync, + IMG_UINT64 uiWriteOffset, + IMG_UINT64 uiPacketSize, + IMG_UINT64 uiBufferSize); + +#else + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SyncPrimPDumpValue) +#endif +static INLINE void +SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) +{ + PVR_UNREFERENCED_PARAMETER(psSync); + PVR_UNREFERENCED_PARAMETER(ui32Value); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SyncPrimPDump) +#endif +static INLINE void +SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync) +{ + PVR_UNREFERENCED_PARAMETER(psSync); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SyncPrimPDumpPol) +#endif +static INLINE void +SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 ui32PDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psSync); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + PVR_UNREFERENCED_PARAMETER(eOperator); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SyncPrimPDumpCBP) +#endif +static INLINE void +SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync, + IMG_UINT64 uiWriteOffset, + IMG_UINT64 uiPacketSize, + IMG_UINT64 uiBufferSize) +{ + PVR_UNREFERENCED_PARAMETER(psSync); + PVR_UNREFERENCED_PARAMETER(uiWriteOffset); + PVR_UNREFERENCED_PARAMETER(uiPacketSize); + PVR_UNREFERENCED_PARAMETER(uiBufferSize); +} +#endif /* PDUMP */ +#endif /* SYNC_H */ diff --git a/drivers/gpu/drm/phytium/octopus/sync_checkpoint.c b/drivers/gpu/drm/phytium/octopus/sync_checkpoint.c new file mode 100644 index 000000000000..2680435df92d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/sync_checkpoint.c @@ -0,0 +1,2979 @@ +/*************************************************************************/ /*! +@File +@Title Services synchronisation checkpoint interface +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Server side code for services synchronisation interface +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +#include "img_defs.h" +#include "img_types.h" +#include "allocmem.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "pvr_debug.h" +#include "pvr_notifier.h" +#include "osfunc.h" +#include "dllist.h" +#include "sync.h" +#include "sync_checkpoint_external.h" +#include "sync_checkpoint.h" +#include "sync_checkpoint_internal.h" +#include "sync_checkpoint_init.h" +#include "lock.h" +#include "log2.h" +#include "pvrsrv.h" +#include "pdump_km.h" +#include "info_page.h" + +#include "pvrsrv_sync_km.h" +#include "rgxhwperf.h" + +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) +#include "validation_soc.h" +#endif + +#if defined(PVRSRV_NEED_PVR_DPF) + +/* Enable this to turn on debug relating to the creation and + resolution of contexts */ +#define ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG 0 + +/* Enable this to turn on debug relating to the creation and + resolution of fences */ +#define ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG 0 + +/* Enable this to turn on debug relating to the sync checkpoint + allocation and freeing */ +#define ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG 0 + +/* Enable this to turn on debug relating to the sync checkpoint + enqueuing and signalling */ +#define ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG 0 + +/* Enable this to turn on debug relating to the sync checkpoint pool */ +#define ENABLE_SYNC_CHECKPOINT_POOL_DEBUG 0 + +/* Enable this to turn on debug relating to sync checkpoint UFO + lookup */ +#define ENABLE_SYNC_CHECKPOINT_UFO_DEBUG 0 + +/* Enable this to turn on sync checkpoint deferred cleanup debug + * (for syncs we have been told to free but which have some + * outstanding FW operations remaining (enqueued in CCBs) + */ +#define ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG 0 + +#else + +#define ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG 0 +#define ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG 0 +#define ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG 0 +#define ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG 0 +#define ENABLE_SYNC_CHECKPOINT_POOL_DEBUG 0 +#define ENABLE_SYNC_CHECKPOINT_UFO_DEBUG 0 +#define ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG 0 + +#endif + +/* Maximum number of deferred sync checkpoint signal/error received for atomic context */ +#define SYNC_CHECKPOINT_MAX_DEFERRED_SIGNAL 500 + +/* Set the size of the sync checkpoint pool (not used if 0). + * A pool will be maintained for each sync checkpoint context. + */ +#if defined(PDUMP) +#define SYNC_CHECKPOINT_POOL_SIZE 0 +#else +#define SYNC_CHECKPOINT_POOL_SIZE 128 +#define SYNC_CHECKPOINT_POOL_MASK (SYNC_CHECKPOINT_POOL_SIZE - 1) +#endif + +/* The 'sediment' value represents the minimum number of + * sync checkpoints which must be in the pool before one + * will be allocated from the pool rather than from memory. + * This effectively helps avoid re-use of a sync checkpoint + * just after it has been returned to the pool, making + * debugging somewhat easier to understand. + */ +#define SYNC_CHECKPOINT_POOL_SEDIMENT 20 + +#if (SYNC_CHECKPOINT_POOL_SIZE & (SYNC_CHECKPOINT_POOL_SIZE - 1)) != 0 +#error "SYNC_CHECKPOINT_POOL_SIZE must be power of 2." +#endif + +#define SYNC_CHECKPOINT_BLOCK_LIST_CHUNK_SIZE 10 + +/* + This defines the maximum amount of synchronisation memory + that can be allocated per sync checkpoint context. + In reality this number is meaningless as we would run out + of synchronisation memory before we reach this limit, but + we need to provide a size to the span RA. + */ +#define MAX_SYNC_CHECKPOINT_MEM (4 * 1024 * 1024) + + +typedef struct _SYNC_CHECKPOINT_BLOCK_LIST_ +{ + IMG_UINT32 ui32BlockCount; /*!< Number of contexts in the list */ + IMG_UINT32 ui32BlockListSize; /*!< Size of the array contexts */ + SYNC_CHECKPOINT_BLOCK **papsSyncCheckpointBlock; /*!< Array of sync checkpoint blocks */ +} SYNC_CHECKPOINT_BLOCK_LIST; + +typedef struct _SYNC_CHECKPOINT_CONTEXT_CTL_ +{ + SHARED_DEV_CONNECTION psDeviceNode; + PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve; + PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate; + /* + * Used as head of linked-list of sync checkpoints for which + * SyncCheckpointFree() has been called, but have outstanding + * FW operations (enqueued in CCBs) + * This list will be check whenever a SyncCheckpointFree() is + * called, and when SyncCheckpointContextDestroy() is called. + */ + DLLIST_NODE sDeferredCleanupListHead; + /* Lock to protect the deferred cleanup list */ + POS_SPINLOCK hDeferredCleanupListLock; + +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) + _SYNC_CHECKPOINT *psSyncCheckpointPool[SYNC_CHECKPOINT_POOL_SIZE]; + IMG_BOOL bSyncCheckpointPoolFull; + IMG_BOOL bSyncCheckpointPoolValid; + IMG_UINT32 ui32SyncCheckpointPoolCount; + IMG_UINT32 ui32SyncCheckpointPoolWp; + IMG_UINT32 ui32SyncCheckpointPoolRp; + POS_SPINLOCK hSyncCheckpointPoolLock; /*! Protects access to the checkpoint pool control data. */ +#endif +} _SYNC_CHECKPOINT_CONTEXT_CTL; + +/* this is the max number of sync checkpoint records we will search or dump + * at any time. + */ +#define SYNC_CHECKPOINT_RECORD_LIMIT 20000 + +#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1)) + +struct SYNC_CHECKPOINT_RECORD +{ + PVRSRV_DEVICE_NODE *psDevNode; + SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock; /*!< handle to SYNC_CHECKPOINT_BLOCK */ + IMG_UINT32 ui32SyncOffset; /*!< offset to sync in block */ + IMG_UINT32 ui32FwBlockAddr; + IMG_PID uiPID; + IMG_UINT32 ui32UID; + IMG_UINT64 ui64OSTime; + DLLIST_NODE sNode; + IMG_CHAR szClassName[PVRSRV_SYNC_NAME_LENGTH]; + PSYNC_CHECKPOINT pSyncCheckpt; +}; + +static PFN_SYNC_CHECKPOINT_STRUCT *g_psSyncCheckpointPfnStruct = NULL; + +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +static _SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext); +static IMG_BOOL _PutCheckpointInPool(_SYNC_CHECKPOINT *psSyncCheckpoint); +static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext); +#endif + +#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1) +static IMG_UINT32 gui32NumSyncCheckpointContexts = 0; +#endif + +/* Defined values to indicate status of sync checkpoint, which is + * stored in the memory of the structure */ +#define SYNC_CHECKPOINT_PATTERN_IN_USE 0x1a1aa +#define SYNC_CHECKPOINT_PATTERN_IN_POOL 0x2b2bb +#define SYNC_CHECKPOINT_PATTERN_FREED 0x3c3cc + +#if defined(SUPPORT_RGX) +static inline void RGXSRVHWPerfSyncCheckpointUFOIsSignalled(PVRSRV_RGXDEV_INFO *psDevInfo, + _SYNC_CHECKPOINT *psSyncCheckpointInt, IMG_UINT32 ui32FenceSyncFlags) +{ + if (RGXHWPerfHostIsEventEnabled(psDevInfo, RGX_HWPERF_HOST_UFO) + && !(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + { + RGX_HWPERF_UFO_EV eEv; + RGX_HWPERF_UFO_DATA_ELEMENT sSyncData; + + if (psSyncCheckpointInt) + { + if ((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) || + (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED)) + { + sSyncData.sCheckSuccess.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt); + sSyncData.sCheckSuccess.ui32Value = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + eEv = RGX_HWPERF_UFO_EV_CHECK_SUCCESS; + } + else + { + sSyncData.sCheckFail.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt); + sSyncData.sCheckFail.ui32Value = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + sSyncData.sCheckFail.ui32Required = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + eEv = RGX_HWPERF_UFO_EV_CHECK_FAIL; + } + RGXHWPerfHostPostUfoEvent(psDevInfo, eEv, &sSyncData, + (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE); + } + } +} + +static inline void RGXSRVHWPerfSyncCheckpointUFOUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, + _SYNC_CHECKPOINT *psSyncCheckpointInt, IMG_UINT32 ui32FenceSyncFlags) +{ + if (RGXHWPerfHostIsEventEnabled(psDevInfo, RGX_HWPERF_HOST_UFO) + && !(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + { + RGX_HWPERF_UFO_DATA_ELEMENT sSyncData; + + if (psSyncCheckpointInt) + { + sSyncData.sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt); + sSyncData.sUpdate.ui32OldValue = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + sSyncData.sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + RGXHWPerfHostPostUfoEvent(psDevInfo, RGX_HWPERF_UFO_EV_UPDATE, &sSyncData, + (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE); + } + } +} +#endif + +static PVRSRV_ERROR +_SyncCheckpointRecordAdd(PSYNC_CHECKPOINT_RECORD_HANDLE *phRecord, + SYNC_CHECKPOINT_BLOCK *hSyncCheckpointBlock, + IMG_UINT32 ui32FwBlockAddr, + IMG_UINT32 ui32SyncOffset, + IMG_UINT32 ui32UID, + IMG_UINT32 ui32ClassNameSize, + const IMG_CHAR *pszClassName, PSYNC_CHECKPOINT pSyncCheckpt); +static PVRSRV_ERROR +_SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord); +static void _SyncCheckpointState(PDLLIST_NODE psNode, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); +static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); +static PVRSRV_ERROR _SyncCheckpointRecordListInit(PVRSRV_DEVICE_NODE *psDevNode); +static void _SyncCheckpointRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode); + +#if defined(PDUMP) +static void +MISRHandler_PdumpDeferredSyncSignalPoster(void *pvData); +static PVRSRV_ERROR _SyncCheckpointAllocPDump(_SYNC_CHECKPOINT *psSyncCheckpoint); +static PVRSRV_ERROR _SyncCheckpointUpdatePDump(PPVRSRV_DEVICE_NODE psDevNode, _SYNC_CHECKPOINT *psSyncCheckpoint, IMG_UINT32 ui32Status, IMG_UINT32 ui32FenceSyncFlags); +static PVRSRV_ERROR _SyncCheckpointPDumpTransition(void *pvData, PDUMP_TRANSITION_EVENT eEvent); +#endif + +/* Unique incremental ID assigned to sync checkpoints when allocated */ +static IMG_UINT32 g_SyncCheckpointUID; + +static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext); + +void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext) +{ + _SYNC_CHECKPOINT_CONTEXT *psContextInt = (_SYNC_CHECKPOINT_CONTEXT *) psContext; + _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContextInt->psContextCtl; + IMG_UINT32 ui32RefCt = OSAtomicRead(&psContextInt->hRefCount); + + if (ui32RefCt == 0) + { + PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT, + "SyncCheckpointContextUnref context already freed"); + } + else if (OSAtomicDecrement(&psContextInt->hRefCount) == 0) + { + /* SyncCheckpointContextDestroy only when no longer referenced */ + OSSpinLockDestroy(psCtxCtl->hDeferredCleanupListLock); + psCtxCtl->hDeferredCleanupListLock = NULL; +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) + if (psCtxCtl->ui32SyncCheckpointPoolCount) + { + PVR_DPF((PVR_DBG_WARNING, + "%s called for context<%p> with %d sync checkpoints still" + " in the pool", + __func__, + (void *) psContext, + psCtxCtl->ui32SyncCheckpointPoolCount)); + } + psCtxCtl->bSyncCheckpointPoolValid = IMG_FALSE; + OSSpinLockDestroy(psCtxCtl->hSyncCheckpointPoolLock); + psCtxCtl->hSyncCheckpointPoolLock = NULL; +#endif + OSFreeMem(psContextInt->psContextCtl); + RA_Delete(psContextInt->psSpanRA); + RA_Delete(psContextInt->psSubAllocRA); + OSLockDestroy(psContextInt->hLock); + psContextInt->hLock = NULL; + OSFreeMem(psContext); + } +} + +void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext) +{ + _SYNC_CHECKPOINT_CONTEXT *psContextInt = (_SYNC_CHECKPOINT_CONTEXT *)psContext; + IMG_UINT32 ui32RefCt = OSAtomicRead(&psContextInt->hRefCount); + + if (ui32RefCt == 0) + { + PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT, + "SyncCheckpointContextRef context use after free"); + } + else + { + OSAtomicIncrement(&psContextInt->hRefCount); + } +} + +/* + Internal interfaces for management of synchronisation block memory + */ +static PVRSRV_ERROR +_AllocSyncCheckpointBlock(_SYNC_CHECKPOINT_CONTEXT *psContext, + SYNC_CHECKPOINT_BLOCK **ppsSyncBlock) +{ + PVRSRV_DEVICE_NODE *psDevNode; + SYNC_CHECKPOINT_BLOCK *psSyncBlk; + PVRSRV_ERROR eError; + + psSyncBlk = OSAllocMem(sizeof(*psSyncBlk)); + PVR_LOG_GOTO_IF_NOMEM(psSyncBlk, eError, fail_alloc); + + psSyncBlk->psContext = psContext; + + /* Allocate sync checkpoint block */ + psDevNode = psContext->psDevNode; + PVR_LOG_GOTO_IF_INVALID_PARAM(psDevNode, eError, fail_alloc_ufo_block); + + psSyncBlk->psDevNode = psDevNode; + + eError = psDevNode->pfnAllocUFOBlock(psDevNode, + &psSyncBlk->hMemDesc, + &psSyncBlk->ui32FirmwareAddr, + &psSyncBlk->ui32SyncBlockSize); + PVR_LOG_GOTO_IF_ERROR(eError, "pfnAllocUFOBlock", fail_alloc_ufo_block); + + eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc, + (void **) &psSyncBlk->pui32LinAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail_devmem_acquire); + + OSAtomicWrite(&psSyncBlk->hRefCount, 1); + + OSLockCreate(&psSyncBlk->hLock); + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Allocated Sync Checkpoint UFO block (FirmwareVAddr = 0x%08x)", + psSyncBlk->ui32FirmwareAddr); +#if defined(PDUMP) + OSLockAcquire(psContext->hSyncCheckpointBlockListLock); + dllist_add_to_tail(&psContext->sSyncCheckpointBlockListHead, &psSyncBlk->sListNode); + OSLockRelease(psContext->hSyncCheckpointBlockListLock); +#endif + + *ppsSyncBlock = psSyncBlk; + return PVRSRV_OK; + +fail_devmem_acquire: + psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc); +fail_alloc_ufo_block: + OSFreeMem(psSyncBlk); +fail_alloc: + return eError; +} + +static void +_FreeSyncCheckpointBlock(SYNC_CHECKPOINT_BLOCK *psSyncBlk) +{ + OSLockAcquire(psSyncBlk->hLock); + if (0 == OSAtomicDecrement(&psSyncBlk->hRefCount)) + { + PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode; + +#if defined(PDUMP) + OSLockAcquire(psSyncBlk->psContext->hSyncCheckpointBlockListLock); + dllist_remove_node(&psSyncBlk->sListNode); + OSLockRelease(psSyncBlk->psContext->hSyncCheckpointBlockListLock); +#endif + DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc); + psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc); + OSLockRelease(psSyncBlk->hLock); + OSLockDestroy(psSyncBlk->hLock); + psSyncBlk->hLock = NULL; + OSFreeMem(psSyncBlk); + } + else + { + OSLockRelease(psSyncBlk->hLock); + } +} + +static PVRSRV_ERROR +_SyncCheckpointBlockImport(RA_PERARENA_HANDLE hArena, + RA_LENGTH_T uSize, + RA_FLAGS_T uFlags, + const IMG_CHAR *pszAnnotation, + RA_BASE_T *puiBase, + RA_LENGTH_T *puiActualSize, + RA_PERISPAN_HANDLE *phImport) +{ + _SYNC_CHECKPOINT_CONTEXT *psContext = hArena; + SYNC_CHECKPOINT_BLOCK *psSyncBlock = NULL; + RA_LENGTH_T uiSpanSize; + PVRSRV_ERROR eError; + PVR_UNREFERENCED_PARAMETER(uFlags); + + PVR_LOG_RETURN_IF_INVALID_PARAM((hArena != NULL), "hArena"); + + /* Check we've not be called with an unexpected size */ + PVR_LOG_RETURN_IF_INVALID_PARAM((uSize == sizeof(SYNC_CHECKPOINT_FW_OBJ)), "uSize"); + + /* + Ensure the sync checkpoint context doesn't go away while we have + sync blocks attached to it. + */ + SyncCheckpointContextRef((PSYNC_CHECKPOINT_CONTEXT)psContext); + + /* Allocate the block of memory */ + eError = _AllocSyncCheckpointBlock(psContext, &psSyncBlock); + PVR_GOTO_IF_ERROR(eError, fail_syncblockalloc); + + /* Allocate a span for it */ + eError = RA_Alloc(psContext->psSpanRA, + psSyncBlock->ui32SyncBlockSize, + RA_NO_IMPORT_MULTIPLIER, + 0, + psSyncBlock->ui32SyncBlockSize, + pszAnnotation, + &psSyncBlock->uiSpanBase, + &uiSpanSize, + NULL); + PVR_GOTO_IF_ERROR(eError, fail_spanalloc); + + /* + There is no reason the span RA should return an allocation larger + then we request + */ + PVR_LOG_IF_FALSE((uiSpanSize == psSyncBlock->ui32SyncBlockSize), + "uiSpanSize invalid"); + + *puiBase = psSyncBlock->uiSpanBase; + *puiActualSize = psSyncBlock->ui32SyncBlockSize; + *phImport = psSyncBlock; + return PVRSRV_OK; + +fail_spanalloc: + _FreeSyncCheckpointBlock(psSyncBlock); +fail_syncblockalloc: + SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext); + + return eError; +} + +static void +_SyncCheckpointBlockUnimport(RA_PERARENA_HANDLE hArena, + RA_BASE_T uiBase, + RA_PERISPAN_HANDLE hImport) +{ + _SYNC_CHECKPOINT_CONTEXT *psContext = hArena; + SYNC_CHECKPOINT_BLOCK *psSyncBlock = hImport; + + PVR_LOG_RETURN_VOID_IF_FALSE((psContext != NULL), "hArena invalid"); + PVR_LOG_RETURN_VOID_IF_FALSE((psSyncBlock != NULL), "hImport invalid"); + PVR_LOG_RETURN_VOID_IF_FALSE((uiBase == psSyncBlock->uiSpanBase), "uiBase invalid"); + + /* Free the span this import is using */ + RA_Free(psContext->psSpanRA, uiBase); + + /* Free the sync checkpoint block */ + _FreeSyncCheckpointBlock(psSyncBlock); + + /* Drop our reference to the sync checkpoint context */ + SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext); +} + +static INLINE IMG_UINT32 _SyncCheckpointGetOffset(_SYNC_CHECKPOINT *psSyncInt) +{ + IMG_UINT64 ui64Temp; + + ui64Temp = psSyncInt->uiSpanAddr - psSyncInt->psSyncCheckpointBlock->uiSpanBase; + PVR_ASSERT(ui64TemppfnFenceResolve)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", + __func__)); + eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; + PVR_LOG_ERROR(eError, "g_pfnFenceResolve is NULL"); + return eError; + } + + if (papsSyncCheckpoints) + { + eError = g_psSyncCheckpointPfnStruct->pfnFenceResolve( + psSyncCheckpointContext, + hFence, + pui32NumSyncCheckpoints, + papsSyncCheckpoints, + pui64FenceUID); + } + else + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_LOG_RETURN_IF_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceResolve"); + +#if defined(PDUMP) + if (*papsSyncCheckpoints) + { + for (i = 0; i < *pui32NumSyncCheckpoints; i++) + { + psSyncCheckpoint = (_SYNC_CHECKPOINT *)(*papsSyncCheckpoints)[i]; + psSyncCheckpoint->ui32PDumpFlags = ui32PDumpFlags; + } + } +#endif + + if (*pui32NumSyncCheckpoints > MAX_SYNC_CHECKPOINTS_PER_FENCE) + { + PVR_DPF((PVR_DBG_ERROR, "%s: g_psSyncCheckpointPfnStruct->pfnFenceResolve() returned too many checkpoints (%u > MAX_SYNC_CHECKPOINTS_PER_FENCE=%u)", + __func__, *pui32NumSyncCheckpoints, MAX_SYNC_CHECKPOINTS_PER_FENCE)); + + /* Free resources after error */ + if (*papsSyncCheckpoints) + { + for (i = 0; i < *pui32NumSyncCheckpoints; i++) + { + SyncCheckpointDropRef((*papsSyncCheckpoints)[i]); + } + + SyncCheckpointFreeCheckpointListMem(*papsSyncCheckpoints); + } + + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) + { + IMG_UINT32 ii; + + PVR_DPF((PVR_DBG_WARNING, + "%s: g_psSyncCheckpointPfnStruct->pfnFenceResolve() for fence %d returned the following %d checkpoints:", + __func__, + hFence, + *pui32NumSyncCheckpoints)); + + for (ii=0; ii<*pui32NumSyncCheckpoints; ii++) + { + PSYNC_CHECKPOINT psNextCheckpoint = *(*papsSyncCheckpoints + ii); + PVR_DPF((PVR_DBG_WARNING, + "%s: *papsSyncCheckpoints[%d]:<%p>", + __func__, + ii, + (void*)psNextCheckpoint)); + } + } +#endif + + return eError; +} + +PVRSRV_ERROR +SyncCheckpointCreateFence(PVRSRV_DEVICE_NODE *psDevNode, + const IMG_CHAR *pszFenceName, + PVRSRV_TIMELINE hTimeline, + PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE *phNewFence, + IMG_UINT64 *puiUpdateFenceUID, + void **ppvFenceFinaliseData, + PSYNC_CHECKPOINT *psNewSyncCheckpoint, + void **ppvTimelineUpdateSyncPrim, + IMG_UINT32 *pui32TimelineUpdateValue, + PDUMP_FLAGS_T ui32PDumpFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_UNREFERENCED_PARAMETER(psDevNode); + + if (unlikely(!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceCreate)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", + __func__)); + eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; + PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceCreate is NULL"); + } + else + { + eError = g_psSyncCheckpointPfnStruct->pfnFenceCreate( + pszFenceName, + hTimeline, + psSyncCheckpointContext, + phNewFence, + puiUpdateFenceUID, + ppvFenceFinaliseData, + psNewSyncCheckpoint, + ppvTimelineUpdateSyncPrim, + pui32TimelineUpdateValue); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, + "%s failed to create new fence<%p> for timeline<%d> using " + "sync checkpoint context<%p>, psNewSyncCheckpoint=<%p>, eError=%s", + __func__, + (void*)phNewFence, + hTimeline, + (void*)psSyncCheckpointContext, + (void*)psNewSyncCheckpoint, + PVRSRVGetErrorString(eError))); + } +#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) + else + { + PVR_DPF((PVR_DBG_WARNING, + "%s created new fence<%d> for timeline<%d> using " + "sync checkpoint context<%p>, new sync_checkpoint=<%p>", + __func__, + *phNewFence, + hTimeline, + (void*)psSyncCheckpointContext, + (void*)*psNewSyncCheckpoint)); + } +#endif + +#if defined(PDUMP) + if (eError == PVRSRV_OK) + { + _SYNC_CHECKPOINT *psSyncCheckpoint = (_SYNC_CHECKPOINT*)(*psNewSyncCheckpoint); + if (psSyncCheckpoint) + { + psSyncCheckpoint->ui32PDumpFlags = ui32PDumpFlags; + } + } +#endif + } + return eError; +} + +PVRSRV_ERROR +SyncCheckpointRollbackFenceData(PVRSRV_FENCE hFence, void *pvFinaliseData) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceDataRollback) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", + __func__)); + eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; + PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceDataRollback is NULL"); + } + else + { +#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s: called to rollback fence data <%p>", + __func__, + pvFinaliseData)); +#endif + eError = g_psSyncCheckpointPfnStruct->pfnFenceDataRollback( + hFence, pvFinaliseData); + PVR_LOG_IF_ERROR(eError, + "g_psSyncCheckpointPfnStruct->pfnFenceDataRollback returned error"); + } + return eError; +} + +PVRSRV_ERROR +SyncCheckpointFinaliseFence(PPVRSRV_DEVICE_NODE psDevNode, + PVRSRV_FENCE hFence, + void *pvFinaliseData, + PSYNC_CHECKPOINT psSyncCheckpoint, + const IMG_CHAR *pszName) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceFinalise) + { + PVR_DPF((PVR_DBG_WARNING, + "%s: Warning (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED) (this is permitted)", + __func__)); + eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; + } + else + { +#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s: called to finalise fence <%d>", + __func__, + hFence)); +#endif + eError = g_psSyncCheckpointPfnStruct->pfnFenceFinalise(hFence, pvFinaliseData); + PVR_LOG_IF_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceFinalise returned error"); + + RGXSRV_HWPERF_ALLOC_FENCE(psDevNode, OSGetCurrentClientProcessIDKM(), hFence, + SyncCheckpointGetFirmwareAddr(psSyncCheckpoint), + pszName, OSStringLength(pszName)); + } + return eError; +} + +void +SyncCheckpointFreeCheckpointListMem(void *pvCheckpointListMem) +{ + if (g_psSyncCheckpointPfnStruct->pfnFreeCheckpointListMem) + { + g_psSyncCheckpointPfnStruct->pfnFreeCheckpointListMem(pvCheckpointListMem); + } +} + +PVRSRV_ERROR +SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", + __func__)); + eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; + PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines is NULL"); + } + else + { + g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines(pvPrivateData); + } + return eError; + +} + +PVRSRV_ERROR +SyncCheckpointDumpInfoOnStalledUFOs(IMG_UINT32 ui32NumUFOs, IMG_UINT32 *pui32Vaddrs, IMG_UINT32 *pui32NumSyncOwnedUFOs) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_LOG_RETURN_IF_FALSE((pui32NumSyncOwnedUFOs != NULL), "pui32NumSyncOwnedUFOs invalid", PVRSRV_ERROR_INVALID_PARAMS); + + if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs) + { + *pui32NumSyncOwnedUFOs = 0; + PVR_DPF((PVR_DBG_ERROR, + "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", + __func__)); + eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; + PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs is NULL"); + } + else + { + *pui32NumSyncOwnedUFOs = g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs(ui32NumUFOs, pui32Vaddrs); + PVR_LOG(("%d sync checkpoint%s owned by %s in stalled context", + *pui32NumSyncOwnedUFOs, *pui32NumSyncOwnedUFOs==1 ? "" : "s", + g_psSyncCheckpointPfnStruct->pszImplName)); + } + return eError; +} + +PVRSRV_ERROR +SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode, + PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext) +{ + _SYNC_CHECKPOINT_CONTEXT *psContext = NULL; + _SYNC_CHECKPOINT_CONTEXT_CTL *psContextCtl = NULL; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_LOG_RETURN_IF_FALSE((ppsSyncCheckpointContext != NULL), + "ppsSyncCheckpointContext invalid", + PVRSRV_ERROR_INVALID_PARAMS); + + psContext = OSAllocMem(sizeof(*psContext)); + PVR_LOG_GOTO_IF_NOMEM(psContext, eError, fail_alloc); /* Sets OOM error code */ + + psContextCtl = OSAllocMem(sizeof(*psContextCtl)); + PVR_LOG_GOTO_IF_NOMEM(psContextCtl, eError, fail_alloc2); /* Sets OOM error code */ + + eError = OSLockCreate(&psContext->hLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate:1", fail_create_context_lock); + + eError = OSSpinLockCreate(&psContextCtl->hDeferredCleanupListLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate:1", fail_create_deferred_cleanup_lock); + +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) + eError = OSSpinLockCreate(&psContextCtl->hSyncCheckpointPoolLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate:2", fail_create_pool_lock); +#endif + + dllist_init(&psContextCtl->sDeferredCleanupListHead); +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) + psContextCtl->ui32SyncCheckpointPoolCount = 0; + psContextCtl->ui32SyncCheckpointPoolWp = 0; + psContextCtl->ui32SyncCheckpointPoolRp = 0; + psContextCtl->bSyncCheckpointPoolFull = IMG_FALSE; + psContextCtl->bSyncCheckpointPoolValid = IMG_TRUE; +#endif + psContext->psDevNode = psDevNode; + + OSSNPrintf(psContext->azName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim RA-%p", psContext); + OSSNPrintf(psContext->azSpanName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim span RA-%p", psContext); + + /* + Create the RA for sub-allocations of the sync checkpoints + + Note: + The import size doesn't matter here as the server will pass + back the blocksize when it does the import which overrides + what we specify here. + */ + psContext->psSubAllocRA = RA_Create(psContext->azName, + /* Params for imports */ + _Log2(sizeof(IMG_UINT32)), + RA_LOCKCLASS_2, + _SyncCheckpointBlockImport, + _SyncCheckpointBlockUnimport, + psContext, + RA_POLICY_DEFAULT); + PVR_LOG_GOTO_IF_NOMEM(psContext->psSubAllocRA, eError, fail_suballoc); + + /* + Create the span-management RA + + The RA requires that we work with linear spans. For our use + here we don't require this behaviour as we're always working + within offsets of blocks (imports). However, we need to keep + the RA happy so we create the "span" management RA which + ensures that all are imports are added to the RA in a linear + fashion + */ + psContext->psSpanRA = RA_Create(psContext->azSpanName, + /* Params for imports */ + 0, + RA_LOCKCLASS_1, + NULL, + NULL, + NULL, + RA_POLICY_DEFAULT); + PVR_LOG_GOTO_IF_NOMEM(psContext->psSpanRA, eError, fail_span); + + if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_CHECKPOINT_MEM, 0, NULL)) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call to RA_Add(span) failed"); + goto fail_span_add; + } + + OSAtomicWrite(&psContext->hRefCount, 1); + OSAtomicWrite(&psContext->hCheckpointCount, 0); + + psContext->psContextCtl = psContextCtl; + + *ppsSyncCheckpointContext = (PSYNC_CHECKPOINT_CONTEXT)psContext; +#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s: created psSyncCheckpointContext=<%p> (%d contexts exist)", + __func__, + (void*)*ppsSyncCheckpointContext, + ++gui32NumSyncCheckpointContexts)); +#endif + +#if defined(PDUMP) + dllist_init(&psContext->sSyncCheckpointBlockListHead); + + eError = OSLockCreate(&psContext->hSyncCheckpointBlockListLock); + PVR_GOTO_IF_ERROR(eError, fail_span_add); + + OSLockAcquire(psDevNode->hSyncCheckpointContextListLock); + dllist_add_to_tail(&psDevNode->sSyncCheckpointContextListHead, &psContext->sListNode); + OSLockRelease(psDevNode->hSyncCheckpointContextListLock); + +#endif + + return PVRSRV_OK; + +fail_span_add: + RA_Delete(psContext->psSpanRA); +fail_span: + RA_Delete(psContext->psSubAllocRA); +fail_suballoc: +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) + OSSpinLockDestroy(psContextCtl->hSyncCheckpointPoolLock); + psContextCtl->hSyncCheckpointPoolLock = NULL; +fail_create_pool_lock: +#endif + OSSpinLockDestroy(psContextCtl->hDeferredCleanupListLock); + psContextCtl->hDeferredCleanupListLock = NULL; +fail_create_deferred_cleanup_lock: + OSLockDestroy(psContext->hLock); + psContext->hLock = NULL; +fail_create_context_lock: + OSFreeMem(psContextCtl); +fail_alloc2: + OSFreeMem(psContext); +fail_alloc: + return eError; +} + +/* Poisons and frees the checkpoint + * Decrements context refcount. */ +static void _FreeSyncCheckpoint(_SYNC_CHECKPOINT *psSyncCheckpoint) +{ + _SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext; + + psSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = 0; + psSyncCheckpoint->psSyncCheckpointFwObj = NULL; + psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_FREED; + + RA_Free(psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA, + psSyncCheckpoint->uiSpanAddr); + psSyncCheckpoint->psSyncCheckpointBlock = NULL; + + OSFreeMem(psSyncCheckpoint); + + OSAtomicDecrement(&psContext->hCheckpointCount); +} + +PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointContext; + PVRSRV_DEVICE_NODE *psDevNode; + IMG_INT iRf = 0; + + PVR_LOG_RETURN_IF_FALSE((psSyncCheckpointContext != NULL), + "psSyncCheckpointContext invalid", + PVRSRV_ERROR_INVALID_PARAMS); + + psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode; + +#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s: destroying psSyncCheckpointContext=<%p> (now have %d contexts)", + __func__, + (void*)psSyncCheckpointContext, + --gui32NumSyncCheckpointContexts)); +#endif + + _CheckDeferredCleanupList(psContext); + +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) + if (psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0) + { + IMG_UINT32 ui32NumFreedFromPool = _CleanCheckpointPool(psContext); + +#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s freed %d sync checkpoints that were still in the pool for context<%p>", + __func__, + ui32NumFreedFromPool, + (void*)psContext)); +#else + PVR_UNREFERENCED_PARAMETER(ui32NumFreedFromPool); +#endif + } +#endif + + iRf = OSAtomicRead(&psContext->hCheckpointCount); + + if (iRf != 0) + { + OS_SPINLOCK_FLAGS uiFlags; + + /* Note, this is not a permanent error as the caller may retry later */ + PVR_DPF((PVR_DBG_WARNING, + "%s <%p> attempted with active references (iRf=%d), " + "may be the result of a race", + __func__, + (void*)psContext, + iRf)); + + eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT; + + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + { + DLLIST_NODE *psNode, *psNext; + + dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) + { + _SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode); + bool bDeferredFree = dllist_node_is_in_list(&psSyncCheckpoint->sDeferredFreeListNode); + + /* Line below avoids build error in release builds (where PVR_DPF is not defined) */ + PVR_UNREFERENCED_PARAMETER(bDeferredFree); + PVR_DPF((PVR_DBG_WARNING, + "%s syncCheckpoint<%p> ID=%d, %s, refs=%d, state=%s, fwaddr=%#08x, enqCount:%d, FWCount:%d %s", + __func__, + (void*)psSyncCheckpoint, + psSyncCheckpoint->ui32UID, + psSyncCheckpoint->azName, + OSAtomicRead(&psSyncCheckpoint->hRefCount), + psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED ? + "PVRSRV_SYNC_CHECKPOINT_SIGNALLED" : + psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE ? + "PVRSRV_SYNC_CHECKPOINT_ACTIVE" : "PVRSRV_SYNC_CHECKPOINT_ERRORED", + psSyncCheckpoint->ui32FWAddr, + OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount), + psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount, + bDeferredFree ? "(deferred free)" : "")); + +#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1) + gui32NumSyncCheckpointContexts++; +#endif + } + } + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); + } + else + { + IMG_INT iRf2 = 0; + + iRf2 = OSAtomicRead(&psContext->hRefCount); + SyncCheckpointContextUnref(psSyncCheckpointContext); + } + +#if defined(PDUMP) + if (dllist_is_empty(&psContext->sSyncCheckpointBlockListHead)) + { + OSLockDestroy(psContext->hSyncCheckpointBlockListLock); + psContext->hSyncCheckpointBlockListLock = NULL; + + OSLockAcquire(psDevNode->hSyncCheckpointContextListLock); + dllist_remove_node(&psContext->sListNode); + OSLockRelease(psDevNode->hSyncCheckpointContextListLock); + } +#endif + + return eError; +} + +PVRSRV_ERROR +SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, + PVRSRV_TIMELINE hTimeline, + PVRSRV_FENCE hFence, + const IMG_CHAR *pszCheckpointName, + PSYNC_CHECKPOINT *ppsSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psNewSyncCheckpoint = NULL; + _SYNC_CHECKPOINT_CONTEXT *psSyncContextInt = (_SYNC_CHECKPOINT_CONTEXT*)psSyncContext; + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_ERROR eError; + + PVR_LOG_RETURN_IF_FALSE((psSyncContext != NULL), "psSyncContext invalid", PVRSRV_ERROR_INVALID_PARAMS); + PVR_LOG_RETURN_IF_FALSE((ppsSyncCheckpoint != NULL), "ppsSyncCheckpoint invalid", PVRSRV_ERROR_INVALID_PARAMS); + + psDevNode = (PVRSRV_DEVICE_NODE *)psSyncContextInt->psDevNode; + +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) + PVR_DPF((PVR_DBG_WARNING, "%s Entry, Getting checkpoint from pool", + __func__)); +#endif + psNewSyncCheckpoint = _GetCheckpointFromPool(psSyncContextInt); + if (!psNewSyncCheckpoint) + { +#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) + PVR_DPF((PVR_DBG_WARNING, + "%s checkpoint pool empty - will have to allocate", + __func__)); +#endif + } +#endif + /* If pool is empty (or not defined) alloc the new sync checkpoint */ + if (!psNewSyncCheckpoint) + { + psNewSyncCheckpoint = OSAllocMem(sizeof(*psNewSyncCheckpoint)); + PVR_LOG_GOTO_IF_NOMEM(psNewSyncCheckpoint, eError, fail_alloc); /* Sets OOM error code */ + + eError = RA_Alloc(psSyncContextInt->psSubAllocRA, + sizeof(*psNewSyncCheckpoint->psSyncCheckpointFwObj), + RA_NO_IMPORT_MULTIPLIER, + 0, + sizeof(IMG_UINT32), + (IMG_CHAR*)pszCheckpointName, + &psNewSyncCheckpoint->uiSpanAddr, + NULL, + (RA_PERISPAN_HANDLE *) &psNewSyncCheckpoint->psSyncCheckpointBlock); + PVR_LOG_GOTO_IF_ERROR(eError, "RA_Alloc", fail_raalloc); + +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s CALLED RA_Alloc(), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx", + __func__, + (void*)psSyncContextInt->psSubAllocRA, + psNewSyncCheckpoint->uiSpanAddr)); +#endif + psNewSyncCheckpoint->psSyncCheckpointFwObj = + (volatile SYNC_CHECKPOINT_FW_OBJ*)(void *)(psNewSyncCheckpoint->psSyncCheckpointBlock->pui32LinAddr + + (_SyncCheckpointGetOffset(psNewSyncCheckpoint)/sizeof(IMG_UINT32))); + psNewSyncCheckpoint->ui32FWAddr = psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + + _SyncCheckpointGetOffset(psNewSyncCheckpoint) + 1; + OSAtomicIncrement(&psNewSyncCheckpoint->psSyncCheckpointBlock->psContext->hCheckpointCount); + psNewSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE; +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s called to allocate new sync checkpoint<%p> for context<%p>", + __func__, (void*)psNewSyncCheckpoint, (void*)psSyncContext)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpointFwObj<%p>", + __func__, (void*)psNewSyncCheckpoint->psSyncCheckpointFwObj)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint FwAddr=0x%x", + __func__, SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint))); + PVR_DPF((PVR_DBG_WARNING, + "%s pszCheckpointName = %s", + __func__, pszCheckpointName)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint Timeline=%d", + __func__, hTimeline)); +#endif + } + + psNewSyncCheckpoint->hTimeline = hTimeline; + OSAtomicWrite(&psNewSyncCheckpoint->hRefCount, 1); + OSAtomicWrite(&psNewSyncCheckpoint->hEnqueuedCCBCount, 0); + psNewSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount = 0; + psNewSyncCheckpoint->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_ACTIVE; + psNewSyncCheckpoint->uiProcess = OSGetCurrentClientProcessIDKM(); + OSCachedMemSet(&psNewSyncCheckpoint->sDeferredFreeListNode, 0, sizeof(psNewSyncCheckpoint->sDeferredFreeListNode)); + + if (pszCheckpointName) + { + /* Copy over the checkpoint name annotation */ + OSStringLCopy(psNewSyncCheckpoint->azName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH); + } + else + { + /* No sync checkpoint name annotation */ + psNewSyncCheckpoint->azName[0] = '\0'; + } + + /* Store sync checkpoint FW address in PRGXFWIF_UFO_ADDR struct */ + psNewSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint); + + /* Assign unique ID to this sync checkpoint */ + psNewSyncCheckpoint->ui32UID = g_SyncCheckpointUID++; + +#if defined(PDUMP) + /* Flushing deferred fence signals to pdump */ + MISRHandler_PdumpDeferredSyncSignalPoster(psDevNode); + + _SyncCheckpointAllocPDump(psNewSyncCheckpoint); +#endif + + RGXSRV_HWPERF_ALLOC_SYNC_CP(psDevNode, psNewSyncCheckpoint->hTimeline, + OSGetCurrentClientProcessIDKM(), + hFence, + psNewSyncCheckpoint->ui32FWAddr, + psNewSyncCheckpoint->azName, + sizeof(psNewSyncCheckpoint->azName)); + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + IMG_CHAR szChkptName[PVRSRV_SYNC_NAME_LENGTH]; + + if (pszCheckpointName) + { + /* Copy the checkpoint name annotation into a fixed-size array */ + OSStringLCopy(szChkptName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH); + } + else + { + /* No checkpoint name annotation */ + szChkptName[0] = 0; + } + /* record this sync */ + eError = _SyncCheckpointRecordAdd(&psNewSyncCheckpoint->hRecord, + psNewSyncCheckpoint->psSyncCheckpointBlock, + psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr, + _SyncCheckpointGetOffset(psNewSyncCheckpoint), + psNewSyncCheckpoint->ui32UID, + OSStringNLength(szChkptName, PVRSRV_SYNC_NAME_LENGTH), + szChkptName, (PSYNC_CHECKPOINT)psNewSyncCheckpoint); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync checkpoint record \"%s\" (%s)", + __func__, + szChkptName, + PVRSRVGetErrorString(eError))); + psNewSyncCheckpoint->hRecord = NULL; + /* note the error but continue without affecting driver operation */ + } + } + + { + OS_SPINLOCK_FLAGS uiFlags; + /* Add the sync checkpoint to the device list */ + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + dllist_add_to_head(&psDevNode->sSyncCheckpointSyncsList, + &psNewSyncCheckpoint->sListNode); + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); + } + + *ppsSyncCheckpoint = (PSYNC_CHECKPOINT)psNewSyncCheckpoint; + +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s Exit(Ok), psNewSyncCheckpoint->ui32UID=%d <%p>", + __func__, + psNewSyncCheckpoint->ui32UID, + (void*)psNewSyncCheckpoint)); +#endif + return PVRSRV_OK; + +fail_raalloc: + OSFreeMem(psNewSyncCheckpoint); +fail_alloc: + return eError; +} + +static void SyncCheckpointUnref(_SYNC_CHECKPOINT *psSyncCheckpointInt) +{ + _SYNC_CHECKPOINT_CONTEXT *psContext; + PVRSRV_DEVICE_NODE *psDevNode; + + psContext = psSyncCheckpointInt->psSyncCheckpointBlock->psContext; + psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode; + + /* + * Without this reference, the context may be destroyed as soon + * as _FreeSyncCheckpoint is called, but the context is still + * needed when _CheckDeferredCleanupList is called at the end + * of this function. + */ + SyncCheckpointContextRef((PSYNC_CHECKPOINT_CONTEXT)psContext); + + PVR_ASSERT(psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE); + if (!OSAtomicRead(&psSyncCheckpointInt->hRefCount)) + { + PVR_DPF((PVR_DBG_ERROR, "SyncCheckpointUnref sync checkpoint already freed")); + } + else if (0 == OSAtomicDecrement(&psSyncCheckpointInt->hRefCount)) + { + /* If the firmware has serviced all enqueued references to the sync checkpoint, free it */ + if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount == + (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount))) + { +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s No outstanding FW ops and hRef is zero, deleting SyncCheckpoint..", + __func__)); +#endif + if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + && psSyncCheckpointInt->hRecord) + { + PVRSRV_ERROR eError; + /* remove this sync record */ + eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord); + PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove"); + } + + { + OS_SPINLOCK_FLAGS uiFlags; + /* Remove the sync checkpoint from the global list */ + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + dllist_remove_node(&psSyncCheckpointInt->sListNode); + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); + } + + RGXSRV_HWPERF_FREE(psDevNode, SYNC_CP, psSyncCheckpointInt->ui32FWAddr); + +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) + PVR_DPF((PVR_DBG_WARNING, + "%s attempting to return sync checkpoint to the pool", + __func__)); +#endif + if (!_PutCheckpointInPool(psSyncCheckpointInt)) +#endif + { +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) + PVR_DPF((PVR_DBG_WARNING, + "%s pool is full, so just free it", + __func__)); +#endif +#endif +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx", + __func__, + psSyncCheckpointInt->ui32UID, + (void*)psSyncCheckpointInt, + (void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA, + psSyncCheckpointInt->uiSpanAddr)); +#endif + _FreeSyncCheckpoint(psSyncCheckpointInt); + } + } + else + { + OS_SPINLOCK_FLAGS uiFlags; +#if ((ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) + PVR_DPF((PVR_DBG_WARNING, + "%s Outstanding FW ops hEnqueuedCCBCount=%d != FwObj->ui32FwRefCount=%d " + "- DEFERRING CLEANUP psSyncCheckpoint(ID:%d)<%p>", + __func__, + OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount), + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount, + psSyncCheckpointInt->ui32UID, + (void*)psSyncCheckpointInt)); +#endif + /* Add the sync checkpoint to the deferred free list */ + OSSpinLockAcquire(psContext->psContextCtl->hDeferredCleanupListLock, uiFlags); + dllist_add_to_tail(&psContext->psContextCtl->sDeferredCleanupListHead, + &psSyncCheckpointInt->sDeferredFreeListNode); + OSSpinLockRelease(psContext->psContextCtl->hDeferredCleanupListLock, uiFlags); + } + } + else + { +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint(ID:%d)<%p>, hRefCount decremented to %d", + __func__, + psSyncCheckpointInt->ui32UID, + (void*)psSyncCheckpointInt, + (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount)))); +#endif + } + + /* See if any sync checkpoints in the deferred cleanup list can be freed */ + _CheckDeferredCleanupList(psContext); + + SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext); +} + +void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_RETURN_VOID_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); + +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s Entry, psSyncCheckpoint(ID:%d)<%p>, hRefCount=%d, psSyncCheckpoint->ui32ValidationCheck=0x%x", + __func__, + psSyncCheckpointInt->ui32UID, + (void*)psSyncCheckpoint, + (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount)), + psSyncCheckpointInt->ui32ValidationCheck)); +#endif + SyncCheckpointUnref(psSyncCheckpointInt); +} + +void +SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); + + if (psSyncCheckpointInt) + { + PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), + "psSyncCheckpoint already signalled"); + + if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) + { +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; + + RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags); +#endif + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + +#if defined(PDUMP) + _SyncCheckpointUpdatePDump(psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_SIGNALLED, ui32FenceSyncFlags); +#endif + } + else + { + PVR_DPF((PVR_DBG_WARNING, + "%s asked to set PVRSRV_SYNC_CHECKPOINT_SIGNALLED(%d) for (psSyncCheckpointInt->ui32UID=%d), " + "when value is already %d", + __func__, + PVRSRV_SYNC_CHECKPOINT_SIGNALLED, + psSyncCheckpointInt->ui32UID, + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State)); + } + } +} + +void +SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); + + if (psSyncCheckpointInt) + { + PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), + "psSyncCheckpoint already signalled"); + + if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) + { +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; + + RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, PVRSRV_FENCE_FLAG_NONE); +#endif + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; + } + else + { +#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s asked to set PVRSRV_SYNC_CHECKPOINT_SIGNALLED(%d) for (psSyncCheckpointInt->ui32UID=%d), " + "when value is already %d", + __func__, + PVRSRV_SYNC_CHECKPOINT_SIGNALLED, + psSyncCheckpointInt->ui32UID, + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State)); +#endif + } + } +} + +void +SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); + + if (psSyncCheckpointInt) + { + PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), + "psSyncCheckpoint already signalled"); + + if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) + { +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; + if (!(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) + { + RGX_HWPERF_UFO_DATA_ELEMENT sSyncData; + + sSyncData.sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr(psSyncCheckpoint); + sSyncData.sUpdate.ui32OldValue = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + sSyncData.sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_ERRORED; + + RGXSRV_HWPERF_UFO(psDevInfo, RGX_HWPERF_UFO_EV_UPDATE, &sSyncData, + (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE); + } +#endif + + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_ERRORED; + +#if defined(PDUMP) + _SyncCheckpointUpdatePDump(psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_ERRORED, ui32FenceSyncFlags); +#endif + } + } +} + +IMG_BOOL SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags) +{ + IMG_BOOL bRet = IMG_FALSE; + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); + + if (psSyncCheckpointInt) + { +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; + + RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags); +#endif + bRet = ((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) || + (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED)); + +#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s called for psSyncCheckpoint<%p>, returning %d", + __func__, + (void*)psSyncCheckpoint, + bRet)); +#endif + } + return bRet; +} + +IMG_BOOL +SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags) +{ + IMG_BOOL bRet = IMG_FALSE; + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); + + if (psSyncCheckpointInt) + { +#if defined(SUPPORT_RGX) + PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; + + RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags); +#endif + bRet = (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED); + +#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s called for psSyncCheckpoint<%p>, returning %d", + __func__, + (void*)psSyncCheckpoint, + bRet)); +#endif + } + return bRet; +} + +const IMG_CHAR * +SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_RETURN_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", "Null"); + + switch (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State) + { + case PVRSRV_SYNC_CHECKPOINT_SIGNALLED: + return "Signalled"; + case PVRSRV_SYNC_CHECKPOINT_ACTIVE: + return "Active"; + case PVRSRV_SYNC_CHECKPOINT_ERRORED: + return "Errored"; + case PVRSRV_SYNC_CHECKPOINT_UNDEF: + return "Undefined"; + default: + return "Unknown"; + } +} + +PVRSRV_ERROR +SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + PVRSRV_ERROR eRet = PVRSRV_OK; + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psSyncCheckpoint, "psSyncCheckpoint"); + +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)", + __func__, + psSyncCheckpointInt, + OSAtomicRead(&psSyncCheckpointInt->hRefCount), + OSAtomicRead(&psSyncCheckpointInt->hRefCount)+1, + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); +#endif + OSAtomicIncrement(&psSyncCheckpointInt->hRefCount); + + return eRet; +} + +PVRSRV_ERROR +SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + PVRSRV_ERROR eRet = PVRSRV_OK; + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_RETURN_IF_INVALID_PARAM(psSyncCheckpoint, "psSyncCheckpoint"); + +#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)", + __func__, + psSyncCheckpointInt, + OSAtomicRead(&psSyncCheckpointInt->hRefCount), + OSAtomicRead(&psSyncCheckpointInt->hRefCount)-1, + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); +#endif + SyncCheckpointUnref(psSyncCheckpointInt); + + return eRet; +} + +void +SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_RETURN_VOID_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint"); + + if (psSyncCheckpointInt) + { +#if !defined(NO_HARDWARE) +#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)", + __func__, + (void*)psSyncCheckpoint, + OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount), + OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)+1, + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); +#endif + OSAtomicIncrement(&psSyncCheckpointInt->hEnqueuedCCBCount); +#endif + } +} + +PRGXFWIF_UFO_ADDR* +SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + + PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt); + + if (psSyncCheckpointInt) + { + if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE) + { + return &psSyncCheckpointInt->sCheckpointUFOAddr; + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x", + __func__, + (void*)psSyncCheckpoint, + psSyncCheckpointInt->ui32ValidationCheck)); + } + } + +invalid_chkpt: + return NULL; +} + +IMG_UINT32 +SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + IMG_UINT32 ui32Ret = 0; + + PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt); + + if (psSyncCheckpointInt) + { + if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE) + { + ui32Ret = psSyncCheckpointInt->ui32FWAddr; + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x", + __func__, + (void*)psSyncCheckpoint, + psSyncCheckpointInt->ui32ValidationCheck)); + } + } + +invalid_chkpt: + return ui32Ret; +} + +IMG_UINT32 +SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + IMG_UINT32 ui32Ret = 0; + + PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt); + + if (psSyncCheckpointInt) + { +#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s returning ID for sync checkpoint<%p>", + __func__, + (void*)psSyncCheckpointInt)); + PVR_DPF((PVR_DBG_WARNING, + "%s (validationCheck=0x%x)", + __func__, + psSyncCheckpointInt->ui32ValidationCheck)); +#endif + ui32Ret = psSyncCheckpointInt->ui32UID; +#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s (ui32UID=0x%x)", + __func__, + psSyncCheckpointInt->ui32UID)); +#endif + } + return ui32Ret; + +invalid_chkpt: + return 0; +} + +PVRSRV_TIMELINE +SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + PVRSRV_TIMELINE i32Ret = PVRSRV_NO_TIMELINE; + + PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt); + + if (psSyncCheckpointInt) + { + i32Ret = psSyncCheckpointInt->hTimeline; + } + return i32Ret; + +invalid_chkpt: + return 0; +} + + +IMG_UINT32 +SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + PVR_LOG_RETURN_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0); + + return OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount); +} + +IMG_UINT32 +SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + PVR_LOG_RETURN_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0); + + return OSAtomicRead(&psSyncCheckpointInt->hRefCount); +} + +IMG_PID +SyncCheckpointGetCreator(PSYNC_CHECKPOINT psSyncCheckpoint) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint; + PVR_LOG_RETURN_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0); + + return psSyncCheckpointInt->uiProcess; +} + +IMG_UINT32 SyncCheckpointStateFromUFO(PPVRSRV_DEVICE_NODE psDevNode, + IMG_UINT32 ui32FwAddr) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt; + PDLLIST_NODE psNode, psNext; + IMG_UINT32 ui32State = 0; + OS_SPINLOCK_FLAGS uiFlags; + + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) + { + psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode); + if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt)) + { + ui32State = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; + break; + } + } + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); + return ui32State; +} + +void SyncCheckpointErrorFromUFO(PPVRSRV_DEVICE_NODE psDevNode, + IMG_UINT32 ui32FwAddr) +{ + _SYNC_CHECKPOINT *psSyncCheckpointInt; + PDLLIST_NODE psNode, psNext; + OS_SPINLOCK_FLAGS uiFlags; + +#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s called to error UFO with ui32FWAddr=%d", + __func__, + ui32FwAddr)); +#endif + + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) + { + psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode); + if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt)) + { +#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s calling SyncCheckpointError for sync checkpoint <%p>", + __func__, + (void*)psSyncCheckpointInt)); +#endif + /* Mark as errored */ + SyncCheckpointError((PSYNC_CHECKPOINT)psSyncCheckpointInt, IMG_TRUE); + break; + } + } + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); +} + +void SyncCheckpointRollbackFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr) +{ +#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s called to rollback UFO with ui32FWAddr=0x%x", + __func__, + ui32FwAddr)); +#endif +#if !defined(NO_HARDWARE) + { + _SYNC_CHECKPOINT *psSyncCheckpointInt = NULL; + PDLLIST_NODE psNode = NULL, psNext = NULL; + OS_SPINLOCK_FLAGS uiFlags; + + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) + { + psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode); + if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt)) + { +#if ((ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)) || (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s called for psSyncCheckpointInt<%p> %d->%d", + __func__, + (void *) psSyncCheckpointInt, + OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount), + OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount) - 1)); +#endif + OSAtomicDecrement(&psSyncCheckpointInt->hEnqueuedCCBCount); + break; + } + } + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); + } +#endif +} + +static void _SyncCheckpointState(PDLLIST_NODE psNode, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + _SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode); + + if (psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) + { + PVR_DUMPDEBUG_LOG("\t- ID = %d, FWAddr = 0x%08x, r%d:e%d:f%d: %s", + psSyncCheckpoint->ui32UID, + psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + + _SyncCheckpointGetOffset(psSyncCheckpoint), + OSAtomicRead(&psSyncCheckpoint->hRefCount), + OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount), + psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount, + psSyncCheckpoint->azName); + } +} + +static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle; + DLLIST_NODE *psNode, *psNext; + OS_SPINLOCK_FLAGS uiFlags; + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) + { + PVR_DUMPDEBUG_LOG("------[ Active Sync Checkpoints ]------"); + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) + { + _SyncCheckpointState(psNode, pfnDumpDebugPrintf, pvDumpDebugFile); + } + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); + } +} + +PVRSRV_ERROR +SyncCheckpointInit(PPVRSRV_DEVICE_NODE psDevNode) +{ + PVRSRV_ERROR eError; +#if defined(PDUMP) + PVRSRV_RGXDEV_INFO *psDevInfo; + + psDevInfo = psDevNode->pvDevice; +#endif + + eError = OSSpinLockCreate(&psDevNode->hSyncCheckpointListLock); + PVR_RETURN_IF_ERROR(eError); + + dllist_init(&psDevNode->sSyncCheckpointSyncsList); + + eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncCheckpointNotify, + psDevNode, + _SyncCheckpointDebugRequest, + DEBUG_REQUEST_SYNCCHECKPOINT, + (PVRSRV_DBGREQ_HANDLE)psDevNode); + PVR_GOTO_IF_ERROR(eError, e0); + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + _SyncCheckpointRecordListInit(psDevNode); + } + +#if defined(PDUMP) + eError = OSSpinLockCreate(&psDevInfo->hSyncCheckpointSignalSpinLock); + if (eError != PVRSRV_OK) + { + psDevInfo->hSyncCheckpointSignalSpinLock = NULL; + goto e1; + } + + eError = OSLockCreate(&psDevNode->hSyncCheckpointSignalLock); + if (eError != PVRSRV_OK) + { + psDevNode->hSyncCheckpointSignalLock = NULL; + goto e2; + } + + psDevNode->pui8DeferredSyncCPSignal = OSAllocMem(SYNC_CHECKPOINT_MAX_DEFERRED_SIGNAL + * sizeof(_SYNC_CHECKPOINT_DEFERRED_SIGNAL)); + PVR_GOTO_IF_NOMEM(psDevNode->pui8DeferredSyncCPSignal, eError, e3); + + psDevNode->ui16SyncCPWriteIdx = 0; + psDevNode->ui16SyncCPReadIdx = 0; + + eError = OSInstallMISR(&psDevNode->pvSyncCPMISR, + MISRHandler_PdumpDeferredSyncSignalPoster, + psDevNode, + "RGX_PdumpDeferredSyncSignalPoster"); + PVR_GOTO_IF_ERROR(eError, e4); + + eError = OSLockCreate(&psDevNode->hSyncCheckpointContextListLock); + if (eError != PVRSRV_OK) + { + psDevNode->hSyncCheckpointContextListLock = NULL; + goto e5; + } + + + dllist_init(&psDevNode->sSyncCheckpointContextListHead); + + eError = PDumpRegisterTransitionCallbackFenceSync(psDevNode, + _SyncCheckpointPDumpTransition, + &psDevNode->hTransition); + if (eError != PVRSRV_OK) + { + psDevNode->hTransition = NULL; + goto e6; + } +#endif + + return PVRSRV_OK; + +#if defined(PDUMP) +e6: + OSLockDestroy(psDevNode->hSyncCheckpointContextListLock); + psDevNode->hSyncCheckpointContextListLock = NULL; +e5: + (void) OSUninstallMISR(psDevNode->pvSyncCPMISR); + psDevNode->pvSyncCPMISR = NULL; +e4: + if (psDevNode->pui8DeferredSyncCPSignal) + { + OSFreeMem(psDevNode->pui8DeferredSyncCPSignal); + psDevNode->pui8DeferredSyncCPSignal = NULL; + } +e3: + OSLockDestroy(psDevNode->hSyncCheckpointSignalLock); + psDevNode->hSyncCheckpointSignalLock = NULL; +e2: + OSSpinLockDestroy(psDevInfo->hSyncCheckpointSignalSpinLock); + psDevInfo->hSyncCheckpointSignalSpinLock = NULL; +e1: + _SyncCheckpointRecordListDeinit(psDevNode); +#endif +e0: + OSSpinLockDestroy(psDevNode->hSyncCheckpointListLock); + psDevNode->hSyncCheckpointListLock = NULL; + + return eError; +} + +void SyncCheckpointDeinit(PPVRSRV_DEVICE_NODE psDevNode) +{ +#if defined(PDUMP) + PVRSRV_RGXDEV_INFO *psDevInfo; + + psDevInfo = psDevNode->pvDevice; + PDumpUnregisterTransitionCallbackFenceSync(psDevNode->hTransition); + psDevNode->hTransition = NULL; + + if (psDevNode->hSyncCheckpointContextListLock) + { + OSLockDestroy(psDevNode->hSyncCheckpointContextListLock); + psDevNode->hSyncCheckpointContextListLock = NULL; + } + + if (psDevNode->pvSyncCPMISR) + { + (void) OSUninstallMISR(psDevNode->pvSyncCPMISR); + psDevNode->pvSyncCPMISR = NULL; + } + + if (psDevNode->pui8DeferredSyncCPSignal) + { + OSFreeMem(psDevNode->pui8DeferredSyncCPSignal); + psDevNode->pui8DeferredSyncCPSignal = NULL; + } + if (psDevNode->hSyncCheckpointSignalLock) + { + OSLockDestroy(psDevNode->hSyncCheckpointSignalLock); + psDevNode->hSyncCheckpointSignalLock = NULL; + } + if (psDevInfo->hSyncCheckpointSignalSpinLock) + { + OSSpinLockDestroy(psDevInfo->hSyncCheckpointSignalSpinLock); + psDevInfo->hSyncCheckpointSignalSpinLock = NULL; + } +#endif + + PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncCheckpointNotify); + psDevNode->hSyncCheckpointNotify = NULL; + OSSpinLockDestroy(psDevNode->hSyncCheckpointListLock); + psDevNode->hSyncCheckpointListLock = NULL; + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + _SyncCheckpointRecordListDeinit(psDevNode); + } +} + +void SyncCheckpointRecordLookup(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr, + IMG_CHAR * pszSyncInfo, size_t len) +{ + DLLIST_NODE *psNode, *psNext; + IMG_BOOL bFound = IMG_FALSE; + + if (!pszSyncInfo) + { + return; + } + + pszSyncInfo[0] = '\0'; + + OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); + dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext) + { + struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec = + IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode); + if ((psSyncCheckpointRec->ui32FwBlockAddr + psSyncCheckpointRec->ui32SyncOffset + 1) == ui32FwAddr) + { + SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock = psSyncCheckpointRec->psSyncCheckpointBlock; + if (psSyncCheckpointBlock && psSyncCheckpointBlock->pui32LinAddr) + { + void *pSyncCheckpointAddr = IMG_OFFSET_ADDR(psSyncCheckpointBlock->pui32LinAddr, + psSyncCheckpointRec->ui32SyncOffset); + OSSNPrintf(pszSyncInfo, len, "%s Checkpoint:%05u (%s)", + (*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ? + "SIGNALLED" : + ((*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_ERRORED) ? + "ERRORED" : "ACTIVE"), + psSyncCheckpointRec->uiPID, + psSyncCheckpointRec->szClassName); + } + else + { + OSSNPrintf(pszSyncInfo, len, "Checkpoint:%05u (%s)", + psSyncCheckpointRec->uiPID, + psSyncCheckpointRec->szClassName); + } + + bFound = IMG_TRUE; + break; + } + } + OSLockRelease(psDevNode->hSyncCheckpointRecordLock); + + if (!bFound && (psDevNode->ui32SyncCheckpointRecordCountHighWatermark == SYNC_CHECKPOINT_RECORD_LIMIT)) + { + OSSNPrintf(pszSyncInfo, len, "(Record may be lost)"); + } +} + +static PVRSRV_ERROR +_SyncCheckpointRecordAdd( + PSYNC_CHECKPOINT_RECORD_HANDLE * phRecord, + SYNC_CHECKPOINT_BLOCK *hSyncCheckpointBlock, + IMG_UINT32 ui32FwBlockAddr, + IMG_UINT32 ui32SyncOffset, + IMG_UINT32 ui32UID, + IMG_UINT32 ui32ClassNameSize, + const IMG_CHAR *pszClassName, PSYNC_CHECKPOINT pSyncCheckpt) +{ + struct SYNC_CHECKPOINT_RECORD * psSyncRec; + _SYNC_CHECKPOINT_CONTEXT *psContext = hSyncCheckpointBlock->psContext; + PVRSRV_DEVICE_NODE *psDevNode = psContext->psDevNode; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_RETURN_IF_INVALID_PARAM(phRecord); + + *phRecord = NULL; + + psSyncRec = OSAllocMem(sizeof(*psSyncRec)); + PVR_LOG_GOTO_IF_NOMEM(psSyncRec, eError, fail_alloc); /* Sets OOM error code */ + + psSyncRec->psDevNode = psDevNode; + psSyncRec->psSyncCheckpointBlock = hSyncCheckpointBlock; + psSyncRec->ui32SyncOffset = ui32SyncOffset; + psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr; + psSyncRec->ui64OSTime = OSClockns64(); + psSyncRec->uiPID = OSGetCurrentProcessID(); + psSyncRec->ui32UID = ui32UID; + psSyncRec->pSyncCheckpt = pSyncCheckpt; + if (pszClassName) + { + if (ui32ClassNameSize >= PVRSRV_SYNC_NAME_LENGTH) + ui32ClassNameSize = PVRSRV_SYNC_NAME_LENGTH; + /* Copy over the class name annotation */ + OSStringLCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize); + } + else + { + /* No class name annotation */ + psSyncRec->szClassName[0] = 0; + } + + OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); + if (psDevNode->ui32SyncCheckpointRecordCount < SYNC_CHECKPOINT_RECORD_LIMIT) + { + dllist_add_to_head(&psDevNode->sSyncCheckpointRecordList, &psSyncRec->sNode); + psDevNode->ui32SyncCheckpointRecordCount++; + + if (psDevNode->ui32SyncCheckpointRecordCount > psDevNode->ui32SyncCheckpointRecordCountHighWatermark) + { + psDevNode->ui32SyncCheckpointRecordCountHighWatermark = psDevNode->ui32SyncCheckpointRecordCount; + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync checkpoint record \"%s\". %u records already exist.", + __func__, + pszClassName, + psDevNode->ui32SyncCheckpointRecordCount)); + OSFreeMem(psSyncRec); + psSyncRec = NULL; + eError = PVRSRV_ERROR_TOOMANYBUFFERS; + } + OSLockRelease(psDevNode->hSyncCheckpointRecordLock); + + *phRecord = (PSYNC_CHECKPOINT_RECORD_HANDLE)psSyncRec; + +fail_alloc: + return eError; +} + +static PVRSRV_ERROR +_SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord) +{ + struct SYNC_CHECKPOINT_RECORD **ppFreedSync; + struct SYNC_CHECKPOINT_RECORD *pSync = (struct SYNC_CHECKPOINT_RECORD*)hRecord; + PVRSRV_DEVICE_NODE *psDevNode; + + PVR_RETURN_IF_INVALID_PARAM(hRecord); + + psDevNode = pSync->psDevNode; + + OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); + + dllist_remove_node(&pSync->sNode); + + if (psDevNode->uiSyncCheckpointRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: psDevNode->uiSyncCheckpointRecordFreeIdx out of range", + __func__)); + psDevNode->uiSyncCheckpointRecordFreeIdx = 0; + } + ppFreedSync = &psDevNode->apsSyncCheckpointRecordsFreed[psDevNode->uiSyncCheckpointRecordFreeIdx]; + psDevNode->uiSyncCheckpointRecordFreeIdx = + (psDevNode->uiSyncCheckpointRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; + + if (*ppFreedSync) + { + OSFreeMem(*ppFreedSync); + } + pSync->psSyncCheckpointBlock = NULL; + pSync->ui64OSTime = OSClockns64(); + *ppFreedSync = pSync; + + psDevNode->ui32SyncCheckpointRecordCount--; + + OSLockRelease(psDevNode->hSyncCheckpointRecordLock); + + return PVRSRV_OK; +} + +#define NS_IN_S (1000000000UL) +static void _SyncCheckpointRecordPrint(struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec, + IMG_UINT64 ui64TimeNow, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + _SYNC_CHECKPOINT *psSyncCheckpoint = (_SYNC_CHECKPOINT *)psSyncCheckpointRec->pSyncCheckpt; + SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock = psSyncCheckpointRec->psSyncCheckpointBlock; + IMG_UINT64 ui64DeltaS; + IMG_UINT32 ui32DeltaF; + IMG_UINT64 ui64Delta = ui64TimeNow - psSyncCheckpointRec->ui64OSTime; + ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF); + + if (psSyncCheckpointBlock && psSyncCheckpointBlock->pui32LinAddr) + { + void *pSyncCheckpointAddr; + pSyncCheckpointAddr = IMG_OFFSET_ADDR(psSyncCheckpointBlock->pui32LinAddr, + psSyncCheckpointRec->ui32SyncOffset); + + PVR_DUMPDEBUG_LOG("\t%05u %05" IMG_UINT64_FMTSPEC ".%09u %010u FWAddr=0x%08x (r%d:e%d:f%d) State=%s (%s)", + psSyncCheckpointRec->uiPID, + ui64DeltaS, ui32DeltaF, psSyncCheckpointRec->ui32UID, + (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset), + OSAtomicRead(&psSyncCheckpoint->hRefCount), + OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount), + psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount, + (*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ? + "SIGNALLED" : + ((*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_ERRORED) ? + "ERRORED" : "ACTIVE"), + psSyncCheckpointRec->szClassName); + } + else + { + PVR_DUMPDEBUG_LOG("\t%05u %05" IMG_UINT64_FMTSPEC ".%09u %010u FWAddr=0x%08x State= (%s)", + psSyncCheckpointRec->uiPID, + ui64DeltaS, ui32DeltaF, psSyncCheckpointRec->ui32UID, + (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset), + psSyncCheckpointRec->szClassName + ); + } +} + +static void _SyncCheckpointRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle; + IMG_UINT64 ui64TimeNowS; + IMG_UINT32 ui32TimeNowF; + IMG_UINT64 ui64TimeNow = OSClockns64(); + DLLIST_NODE *psNode, *psNext; + + ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF); + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) + { + IMG_UINT32 i; + + OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); + + PVR_DUMPDEBUG_LOG("Dumping allocated sync checkpoints. Allocated: %u High watermark: %u (time ref %05" IMG_UINT64_FMTSPEC ".%09u)", + psDevNode->ui32SyncCheckpointRecordCount, + psDevNode->ui32SyncCheckpointRecordCountHighWatermark, + ui64TimeNowS, + ui32TimeNowF); + if (psDevNode->ui32SyncCheckpointRecordCountHighWatermark == SYNC_CHECKPOINT_RECORD_LIMIT) + { + PVR_DUMPDEBUG_LOG("Warning: Record limit (%u) was reached. Some sync checkpoints may not have been recorded in the debug information.", + SYNC_CHECKPOINT_RECORD_LIMIT); + } + PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-10s %-17s %-14s (%s)", + "PID", "Time Delta (s)", "UID", "Address", "State", "Annotation"); + + dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext) + { + struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec = + IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode); + _SyncCheckpointRecordPrint(psSyncCheckpointRec, ui64TimeNow, + pfnDumpDebugPrintf, pvDumpDebugFile); + } + + PVR_DUMPDEBUG_LOG("Dumping all recently freed sync checkpoints @ %05" IMG_UINT64_FMTSPEC ".%09u", + ui64TimeNowS, + ui32TimeNowF); + PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-10s %-17s %-14s (%s)", + "PID", "Time Delta (s)", "UID", "Address", "State", "Annotation"); + for (i = DECREMENT_WITH_WRAP(psDevNode->uiSyncCheckpointRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN); + i != psDevNode->uiSyncCheckpointRecordFreeIdx; + i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)) + { + if (psDevNode->apsSyncCheckpointRecordsFreed[i]) + { + _SyncCheckpointRecordPrint(psDevNode->apsSyncCheckpointRecordsFreed[i], + ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile); + } + else + { + break; + } + } + OSLockRelease(psDevNode->hSyncCheckpointRecordLock); + } +} +#undef NS_IN_S +static PVRSRV_ERROR _SyncCheckpointRecordListInit(PVRSRV_DEVICE_NODE *psDevNode) +{ + PVRSRV_ERROR eError; + + eError = OSLockCreate(&psDevNode->hSyncCheckpointRecordLock); + PVR_GOTO_IF_ERROR(eError, fail_lock_create); + dllist_init(&psDevNode->sSyncCheckpointRecordList); + + psDevNode->ui32SyncCheckpointRecordCount = 0; + psDevNode->ui32SyncCheckpointRecordCountHighWatermark = 0; + + eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncCheckpointRecordNotify, + psDevNode, + _SyncCheckpointRecordRequest, + DEBUG_REQUEST_SYNCCHECKPOINT, + (PVRSRV_DBGREQ_HANDLE)psDevNode); + PVR_GOTO_IF_ERROR(eError, fail_dbg_register); + + return PVRSRV_OK; + +fail_dbg_register: + OSLockDestroy(psDevNode->hSyncCheckpointRecordLock); +fail_lock_create: + return eError; +} + +static void _SyncCheckpointRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode) +{ + DLLIST_NODE *psNode, *psNext; + int i; + + OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); + dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext) + { + struct SYNC_CHECKPOINT_RECORD *pSyncCheckpointRec = + IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode); + + dllist_remove_node(psNode); + OSFreeMem(pSyncCheckpointRec); + } + + for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++) + { + if (psDevNode->apsSyncCheckpointRecordsFreed[i]) + { + OSFreeMem(psDevNode->apsSyncCheckpointRecordsFreed[i]); + psDevNode->apsSyncCheckpointRecordsFreed[i] = NULL; + } + } + OSLockRelease(psDevNode->hSyncCheckpointRecordLock); + + if (psDevNode->hSyncCheckpointRecordNotify) + { + PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncCheckpointRecordNotify); + } + OSLockDestroy(psDevNode->hSyncCheckpointRecordLock); +} + +#if defined(PDUMP) + +static PVRSRV_ERROR +_SyncCheckpointAllocPDump(_SYNC_CHECKPOINT *psSyncCheckpoint) +{ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Allocated Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)", + psSyncCheckpoint->azName, + psSyncCheckpoint->ui32UID, psSyncCheckpoint->hTimeline, + psSyncCheckpoint->sCheckpointUFOAddr.ui32Addr); + + DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc, + _SyncCheckpointGetOffset(psSyncCheckpoint), + PVRSRV_SYNC_CHECKPOINT_ACTIVE, + PDUMP_FLAGS_CONTINUOUS); + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +_SyncCheckpointUpdatePDump(PPVRSRV_DEVICE_NODE psDevNode, _SYNC_CHECKPOINT *psSyncCheckpoint, IMG_UINT32 ui32Status, IMG_UINT32 ui32FenceSyncFlags) +{ + IMG_BOOL bSleepAllowed = (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE; + PVRSRV_RGXDEV_INFO *psDevInfo; + + psDevInfo = psDevNode->pvDevice; + /* + We might be ask to PDump sync state outside of capture range + (e.g. texture uploads) so make this continuous. + */ + if (bSleepAllowed) + { + if (ui32Status == PVRSRV_SYNC_CHECKPOINT_ERRORED) + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Errored Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)", + psSyncCheckpoint->azName, + psSyncCheckpoint->ui32UID, psSyncCheckpoint->hTimeline, + (psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + + _SyncCheckpointGetOffset(psSyncCheckpoint))); + } + else + { + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Signalled Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)", + psSyncCheckpoint->azName, + psSyncCheckpoint->ui32UID, psSyncCheckpoint->hTimeline, + (psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + + _SyncCheckpointGetOffset(psSyncCheckpoint))); + } + + DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc, + _SyncCheckpointGetOffset(psSyncCheckpoint), + ui32Status, + PDUMP_FLAGS_CONTINUOUS); + } + else + { + _SYNC_CHECKPOINT_DEFERRED_SIGNAL *psSyncData; + OS_SPINLOCK_FLAGS uiFlags; + IMG_UINT16 ui16NewWriteIdx; + + OSSpinLockAcquire(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); + + ui16NewWriteIdx = GET_CP_CB_NEXT_IDX(psDevNode->ui16SyncCPWriteIdx); + if (ui16NewWriteIdx == psDevNode->ui16SyncCPReadIdx) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: ERROR Deferred SyncCheckpointSignal CB is full)", + __func__)); + } + else + { + psSyncData = GET_CP_CB_BASE(psDevNode->ui16SyncCPWriteIdx); + psSyncData->asSyncCheckpoint = *psSyncCheckpoint; + psSyncData->ui32Status = ui32Status; + psDevNode->ui16SyncCPWriteIdx = ui16NewWriteIdx; + } + + OSSpinLockRelease(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); + + OSScheduleMISR(psDevNode->pvSyncCPMISR); + } + + return PVRSRV_OK; +} + +static void +MISRHandler_PdumpDeferredSyncSignalPoster(void *pvData) +{ + PPVRSRV_DEVICE_NODE psDevNode = (PPVRSRV_DEVICE_NODE) pvData; + OS_SPINLOCK_FLAGS uiFlags; + IMG_UINT16 ui16ReadIdx, ui16WriteIdx; + _SYNC_CHECKPOINT_DEFERRED_SIGNAL *psSyncData; + PVRSRV_RGXDEV_INFO *psDevInfo; + + psDevInfo = psDevNode->pvDevice; + + OSLockAcquire(psDevNode->hSyncCheckpointSignalLock); + + OSSpinLockAcquire(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); + /* Snapshot current write and read offset of CB */ + ui16WriteIdx = psDevNode->ui16SyncCPWriteIdx; + ui16ReadIdx = psDevNode->ui16SyncCPReadIdx; + + OSSpinLockRelease(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); + /* CB is empty */ + if (ui16WriteIdx == ui16ReadIdx) + { + OSLockRelease(psDevNode->hSyncCheckpointSignalLock); + return; + } + do + { + /* Read item in the CB and flush it to pdump */ + psSyncData = GET_CP_CB_BASE(ui16ReadIdx); + _SyncCheckpointUpdatePDump(psDevNode, &psSyncData->asSyncCheckpoint, psSyncData->ui32Status, PVRSRV_FENCE_FLAG_NONE); + ui16ReadIdx = GET_CP_CB_NEXT_IDX(psDevNode->ui16SyncCPReadIdx); + /* Increment read offset in CB as one item is flushed to pdump */ + OSSpinLockAcquire(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); + psDevNode->ui16SyncCPReadIdx = ui16ReadIdx; + OSSpinLockRelease(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); + /* Call to this function will flush all the items present in CB + * when this function is called i.e. use snapshot of WriteOffset + * taken at the beginning in this function and iterate till Write != Read */ + } while (ui16WriteIdx != ui16ReadIdx); + + OSLockRelease(psDevNode->hSyncCheckpointSignalLock); +} + +PVRSRV_ERROR PVRSRVSyncCheckpointSignalledPDumpPolKM(PVRSRV_FENCE hFence) +{ + PVRSRV_ERROR eError; + PSYNC_CHECKPOINT *apsCheckpoints = NULL; + _SYNC_CHECKPOINT *psSyncCheckpoint = NULL; + IMG_UINT32 i, uiNumCheckpoints = 0; +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) && defined(NO_HARDWARE) && defined(PDUMP) + PVRSRV_RGXDEV_INFO *psDevInfo; +#endif + + if (hFence != PVRSRV_NO_FENCE) + { + eError = g_psSyncCheckpointPfnStruct->pfnSyncFenceGetCheckpoints(hFence, &uiNumCheckpoints, &apsCheckpoints); + } + else + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_LOG_RETURN_IF_ERROR(eError, "g_pfnFenceGetCheckpoints"); + + if (uiNumCheckpoints) + { + /* Flushing deferred fence signals to pdump */ + psSyncCheckpoint = (_SYNC_CHECKPOINT *)apsCheckpoints[0]; + MISRHandler_PdumpDeferredSyncSignalPoster(psSyncCheckpoint->psSyncCheckpointBlock->psDevNode); + } + + for (i=0; i < uiNumCheckpoints; i++) + { + psSyncCheckpoint = (_SYNC_CHECKPOINT *)apsCheckpoints[i]; + if (psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) + { + PDUMPCOMMENTWITHFLAGS(psSyncCheckpoint->ui32PDumpFlags, + "Wait for Fence %s (ID:%d)", + psSyncCheckpoint->azName, + psSyncCheckpoint->ui32UID); + + eError = DevmemPDumpDevmemPol32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc, + _SyncCheckpointGetOffset(psSyncCheckpoint), + PVRSRV_SYNC_CHECKPOINT_SIGNALLED, + 0xFFFFFFFF, + PDUMP_POLL_OPERATOR_EQUAL, + psSyncCheckpoint->ui32PDumpFlags); + PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32"); + } + } + +#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) && defined(NO_HARDWARE) && defined(PDUMP) + /* Sampling of USC timers can only be done after synchronisation for a 3D kick is over */ + if (uiNumCheckpoints) + { + psSyncCheckpoint = (_SYNC_CHECKPOINT *)apsCheckpoints[0]; + psDevInfo = psSyncCheckpoint->psSyncCheckpointBlock->psDevNode->pvDevice; + if (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) + { + PVRSRVValidateSOCUSCTimer(psDevInfo, PDUMP_CONT, 0, 0, NULL); + } + } +#endif + + /* Free the memory that was allocated for the sync checkpoint list returned */ + if (apsCheckpoints) + { + SyncCheckpointFreeCheckpointListMem(apsCheckpoints); + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR +_SyncCheckpointPDumpTransition(void *pvData, PDUMP_TRANSITION_EVENT eEvent) +{ + _SYNC_CHECKPOINT_CONTEXT *psContext; + DLLIST_NODE *psNode, *psNext; + DLLIST_NODE *psNode1, *psNext1; + PPVRSRV_DEVICE_NODE psDevNode = (PPVRSRV_DEVICE_NODE) pvData; + + if ((eEvent == PDUMP_TRANSITION_EVENT_RANGE_ENTERED) || (eEvent == PDUMP_TRANSITION_EVENT_BLOCK_STARTED)) + { + OSLockAcquire(psDevNode->hSyncCheckpointContextListLock); + dllist_foreach_node(&psDevNode->sSyncCheckpointContextListHead, psNode, psNext) + { + psContext = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT_CONTEXT, sListNode); + + OSLockAcquire(psContext->hSyncCheckpointBlockListLock); + dllist_foreach_node(&psContext->sSyncCheckpointBlockListHead, psNode1, psNext1) + { + SYNC_CHECKPOINT_BLOCK *psSyncBlk = + IMG_CONTAINER_OF(psNode1, SYNC_CHECKPOINT_BLOCK, sListNode); + DevmemPDumpLoadMem(psSyncBlk->hMemDesc, + 0, + psSyncBlk->ui32SyncBlockSize, + PDUMP_FLAGS_CONTINUOUS); + } + OSLockRelease(psContext->hSyncCheckpointBlockListLock); + } + OSLockRelease(psDevNode->hSyncCheckpointContextListLock); + } + + return PVRSRV_OK; +} +#endif + +static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext) +{ + _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE*)psContext->psDevNode; + DECLARE_DLLIST(sCleanupList); + DLLIST_NODE *psNode, *psNext; + OS_SPINLOCK_FLAGS uiFlags; + +#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, "%s called", __func__)); +#endif + + /* Check the deferred cleanup list and free any sync checkpoints we can */ + OSSpinLockAcquire(psCtxCtl->hDeferredCleanupListLock, uiFlags); + + if (dllist_is_empty(&psCtxCtl->sDeferredCleanupListHead)) + { + OSSpinLockRelease(psCtxCtl->hDeferredCleanupListLock, uiFlags); +#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, "%s: Defer free list is empty", __func__)); +#endif + /* if list is empty then we have nothing to do here */ + return; + } + + dllist_foreach_node(&psCtxCtl->sDeferredCleanupListHead, psNode, psNext) + { + _SYNC_CHECKPOINT *psSyncCheckpointInt = + IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sDeferredFreeListNode); + + if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount == + (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount))) + { + if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + && psSyncCheckpointInt->hRecord) + { + PVRSRV_ERROR eError; + /* remove this sync record */ + eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord); + PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove"); + } + + /* Move the sync checkpoint from the deferred free list to local list */ + dllist_remove_node(&psSyncCheckpointInt->sDeferredFreeListNode); + /* It's not an ideal solution to traverse list of checkpoints-to-free + * twice but it allows us to avoid holding the lock for too long */ + dllist_add_to_tail(&sCleanupList, &psSyncCheckpointInt->sDeferredFreeListNode); + } +#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) + else + { + PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpoint '%s'' (ID:%d)<%p>), " + "still pending (enq=%d,FWRef=%d)", __func__, + psSyncCheckpointInt->azName, psSyncCheckpointInt->ui32UID, + (void*)psSyncCheckpointInt, + (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)), + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); + } +#endif + } + + OSSpinLockRelease(psCtxCtl->hDeferredCleanupListLock, uiFlags); + + dllist_foreach_node(&sCleanupList, psNode, psNext) { + _SYNC_CHECKPOINT *psSyncCheckpointInt = + IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sDeferredFreeListNode); + + /* Remove the sync checkpoint from the global list */ + OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); + dllist_remove_node(&psSyncCheckpointInt->sListNode); + OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); + + RGXSRV_HWPERF_FREE(psDevNode, SYNC_CP, psSyncCheckpointInt->ui32FWAddr); + +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s attempting to return sync(ID:%d),%p> to pool", + __func__, + psSyncCheckpointInt->ui32UID, + (void *) psSyncCheckpointInt)); +#endif + if (!_PutCheckpointInPool(psSyncCheckpointInt)) +#endif + { +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, "%s pool is full, so just free it", + __func__)); +#endif +#endif +#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) + else + { + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint '%s'' (ID:%d)<%p>), still pending (enq=%d,FWRef=%d)", + __func__, + psSyncCheckpointInt->azName, + psSyncCheckpointInt->ui32UID, + (void*)psSyncCheckpointInt, + (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)), + psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); +#endif + _FreeSyncCheckpoint(psSyncCheckpointInt); + } + } +} + +#if (SYNC_CHECKPOINT_POOL_SIZE > 0) +static _SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext) +{ + _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; + _SYNC_CHECKPOINT *psSyncCheckpoint = NULL; + OS_SPINLOCK_FLAGS uiFlags; + + /* Acquire sync checkpoint pool lock */ + OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); + + /* Check if we can allocate from the pool */ + if (psCtxCtl->bSyncCheckpointPoolValid && + (psCtxCtl->ui32SyncCheckpointPoolCount > SYNC_CHECKPOINT_POOL_SEDIMENT) && + (psCtxCtl->ui32SyncCheckpointPoolWp != psCtxCtl->ui32SyncCheckpointPoolRp)) + { + /* Get the next sync checkpoint from the pool */ + psSyncCheckpoint = psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp]; + psCtxCtl->ui32SyncCheckpointPoolRp = + (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & SYNC_CHECKPOINT_POOL_MASK; + psCtxCtl->ui32SyncCheckpointPoolCount--; + psCtxCtl->bSyncCheckpointPoolFull = IMG_FALSE; + psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE; +#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s checkpoint(old ID:%d)<-POOL(%d/%d), psContext=<%p>, " + "poolRp=%d, poolWp=%d", + __func__, + psSyncCheckpoint->ui32UID, + psCtxCtl->ui32SyncCheckpointPoolCount, + SYNC_CHECKPOINT_POOL_SIZE, + (void *) psContext, + psCtxCtl->ui32SyncCheckpointPoolRp, + psCtxCtl->ui32SyncCheckpointPoolWp)); +#endif + } + /* Release sync checkpoint pool lock */ + OSSpinLockRelease(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); + + return psSyncCheckpoint; +} + +static IMG_BOOL _PutCheckpointInPool(_SYNC_CHECKPOINT *psSyncCheckpoint) +{ + _SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext; + _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; + IMG_BOOL bReturnedToPool = IMG_FALSE; + OS_SPINLOCK_FLAGS uiFlags; + + /* Acquire sync checkpoint pool lock */ + OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); + + /* Check if pool has space */ + if (psCtxCtl->bSyncCheckpointPoolValid && !psCtxCtl->bSyncCheckpointPoolFull) + { + /* Put the sync checkpoint into the next write slot in the pool */ + psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolWp] = psSyncCheckpoint; + psCtxCtl->ui32SyncCheckpointPoolWp = + (psCtxCtl->ui32SyncCheckpointPoolWp + 1) & SYNC_CHECKPOINT_POOL_MASK; + psCtxCtl->ui32SyncCheckpointPoolCount++; + psCtxCtl->bSyncCheckpointPoolFull = + ((psCtxCtl->ui32SyncCheckpointPoolCount > 0) && + (psCtxCtl->ui32SyncCheckpointPoolWp == psCtxCtl->ui32SyncCheckpointPoolRp)); + bReturnedToPool = IMG_TRUE; + psSyncCheckpoint->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_UNDEF; + psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_POOL; +#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, + "%s checkpoint(ID:%d)->POOL(%d/%d), poolRp=%d, poolWp=%d", + __func__, + psSyncCheckpoint->ui32UID, + psCtxCtl->ui32SyncCheckpointPoolCount, + SYNC_CHECKPOINT_POOL_SIZE, + psCtxCtl->ui32SyncCheckpointPoolRp, + psCtxCtl->ui32SyncCheckpointPoolWp)); +#endif + } + /* Release sync checkpoint pool lock */ + OSSpinLockRelease(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); + + return bReturnedToPool; +} + +static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext) +{ + _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; + _SYNC_CHECKPOINT *psCheckpoint = NULL; + DECLARE_DLLIST(sCleanupList); + DLLIST_NODE *psThis, *psNext; + OS_SPINLOCK_FLAGS uiFlags; + IMG_UINT32 ui32ItemsFreed = 0, ui32NullScpCount = 0, ui32PoolCount; + IMG_BOOL bPoolValid; + + /* Acquire sync checkpoint pool lock */ + OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); + + bPoolValid = psCtxCtl->bSyncCheckpointPoolValid; + ui32PoolCount = psCtxCtl->ui32SyncCheckpointPoolCount; + + /* While the pool still contains sync checkpoints, free them */ + while (bPoolValid && psCtxCtl->ui32SyncCheckpointPoolCount > 0) + { + /* Get the sync checkpoint from the next read slot in the pool */ + psCheckpoint = psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp]; + psCtxCtl->ui32SyncCheckpointPoolRp = + (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & SYNC_CHECKPOINT_POOL_MASK; + psCtxCtl->ui32SyncCheckpointPoolCount--; + psCtxCtl->bSyncCheckpointPoolFull = + ((psCtxCtl->ui32SyncCheckpointPoolCount > 0) && + (psCtxCtl->ui32SyncCheckpointPoolWp == psCtxCtl->ui32SyncCheckpointPoolRp)); + + if (psCheckpoint) + { + PVR_ASSERT(!dllist_node_is_in_list(&psCheckpoint->sListNode)); + /* before checkpoints are added to the pool they are removed + * from the list so it's safe to use sListNode here */ + dllist_add_to_head(&sCleanupList, &psCheckpoint->sListNode); + } + else + { + ui32NullScpCount++; + } + } + + /* Release sync checkpoint pool lock */ + OSSpinLockRelease(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); + + /* go through the local list and free all of the sync checkpoints */ + +#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) + PVR_DPF((PVR_DBG_WARNING, "%s psContext=<%p>, bSyncCheckpointPoolValid=%d, " + "uiSyncCheckpointPoolCount=%d", __func__, (void *) psContext, + bPoolValid, ui32PoolCount)); + + if (ui32NullScpCount > 0) + { + PVR_DPF((PVR_DBG_WARNING, "%s pool contained %u NULL entries", __func__, + ui32NullScpCount)); + } +#endif + + dllist_foreach_node(&sCleanupList, psThis, psNext) + { + psCheckpoint = IMG_CONTAINER_OF(psThis, _SYNC_CHECKPOINT, sListNode); + +#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) + if (psCheckpoint->ui32ValidationCheck != SYNC_CHECKPOINT_PATTERN_IN_POOL) + { + PVR_DPF((PVR_DBG_WARNING, "%s pool contains invalid entry " + "(ui32ValidationCheck=0x%x)", __func__, + psCheckpoint->ui32ValidationCheck)); + } + + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint(ID:%d)", + __func__, psCheckpoint->ui32UID)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint->ui32ValidationCheck=0x%x", + __func__, psCheckpoint->ui32ValidationCheck)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint->uiSpanAddr=0x%llx", + __func__, psCheckpoint->uiSpanAddr)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint->psSyncCheckpointBlock=<%p>", + __func__, (void *) psCheckpoint->psSyncCheckpointBlock)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint->psSyncCheckpointBlock->psContext=<%p>", + __func__, (void *) psCheckpoint->psSyncCheckpointBlock->psContext)); + PVR_DPF((PVR_DBG_WARNING, + "%s psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA=<%p>", + __func__, (void *) psCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA)); + + PVR_DPF((PVR_DBG_WARNING, + "%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), " + "psSubAllocRA=<%p>, ui32SpanAddr=0x%llx", + __func__, + psCheckpoint->ui32UID, + (void *) psCheckpoint, + (void *) psCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA, + psCheckpoint->uiSpanAddr)); +#endif + + dllist_remove_node(psThis); + + _FreeSyncCheckpoint(psCheckpoint); + ui32ItemsFreed++; + } + + return ui32ItemsFreed; +} +#endif /* (SYNC_CHECKPOINT_POOL_SIZE > 0) */ diff --git a/drivers/gpu/drm/phytium/octopus/sync_checkpoint.h b/drivers/gpu/drm/phytium/octopus/sync_checkpoint.h new file mode 100644 index 000000000000..b09f41ad505d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/sync_checkpoint.h @@ -0,0 +1,665 @@ +/*************************************************************************/ /*! +@File +@Title Synchronisation checkpoint interface header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Defines the client side interface for synchronisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SYNC_CHECKPOINT_H +#define SYNC_CHECKPOINT_H + +#include "img_types.h" +#include "pvrsrv_error.h" +#include "pvrsrv_sync_km.h" +#include "pdumpdefs.h" +#include "pdump.h" +#include "dllist.h" +#include "pvr_debug.h" +#include "device_connection.h" +#include "opaque_types.h" + +#ifndef CHECKPOINT_TYPES +#define CHECKPOINT_TYPES +typedef struct _SYNC_CHECKPOINT_CONTEXT *PSYNC_CHECKPOINT_CONTEXT; + +typedef struct _SYNC_CHECKPOINT *PSYNC_CHECKPOINT; +#endif + +/* definitions for functions to be implemented by OS-specific sync - the OS-specific sync code + will call SyncCheckpointRegisterFunctions() when initialised, in order to register functions + we can then call */ +#ifndef CHECKPOINT_PFNS +#define CHECKPOINT_PFNS +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE fence, + IMG_UINT32 *nr_checkpoints, + PSYNC_CHECKPOINT **checkpoint_handles, + IMG_UINT64 *pui64FenceUID); +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)(const IMG_CHAR *fence_name, + PVRSRV_TIMELINE timeline, + PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE *new_fence, + IMG_UINT64 *pui64FenceUID, + void **ppvFenceFinaliseData, + PSYNC_CHECKPOINT *new_checkpoint_handle, + IMG_HANDLE *timeline_update_sync, + IMG_UINT32 *timeline_update_value); +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN)(PVRSRV_FENCE fence_to_rollback, void *finalise_data); +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN)(PVRSRV_FENCE fence_to_finalise, void *finalise_data); +typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data); +typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr); +typedef IMG_UINT32 (*PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN)(IMG_UINT32 num_ufos, IMG_UINT32 *vaddrs); +#if defined(PDUMP) +typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN)(PVRSRV_FENCE iFence, + IMG_UINT32 *puiNumCheckpoints, + PSYNC_CHECKPOINT **papsCheckpoints); +#endif + +#define SYNC_CHECKPOINT_IMPL_MAX_STRLEN 20 + +typedef struct +{ + PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve; + PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate; + PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback; + PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise; + PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines; + PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem; + PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN pfnDumpInfoOnStalledUFOs; + IMG_CHAR pszImplName[SYNC_CHECKPOINT_IMPL_MAX_STRLEN]; +#if defined(PDUMP) + PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN pfnSyncFenceGetCheckpoints; +#endif +} PFN_SYNC_CHECKPOINT_STRUCT; + +PVRSRV_ERROR SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_STRUCT *psSyncCheckpointPfns); + +#endif /* ifndef CHECKPOINT_PFNS */ + +/*************************************************************************/ /*! +@Function SyncCheckpointContextCreate + +@Description Create a new synchronisation checkpoint context + +@Input psDevNode Device node + +@Output ppsSyncCheckpointContext Handle to the created synchronisation + checkpoint context + +@Return PVRSRV_OK if the synchronisation checkpoint context was + successfully created +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode, + PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext); + +/*************************************************************************/ /*! +@Function SyncCheckpointContextDestroy + +@Description Destroy a synchronisation checkpoint context + +@Input psSyncCheckpointContext Handle to the synchronisation + checkpoint context to destroy + +@Return PVRSRV_OK if the synchronisation checkpoint context was + successfully destroyed. + PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT if the context still + has sync checkpoints defined +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext); + +/*************************************************************************/ /*! +@Function SyncCheckpointContextRef + +@Description Takes a reference on a synchronisation checkpoint context + +@Input psContext Handle to the synchronisation checkpoint context + on which a ref is to be taken + +@Return None +*/ +/*****************************************************************************/ +void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext); + +/*************************************************************************/ /*! +@Function SyncCheckpointContextUnref + +@Description Drops a reference taken on a synchronisation checkpoint + context + +@Input psContext Handle to the synchronisation checkpoint context + on which the ref is to be dropped + +@Return None +*/ +/*****************************************************************************/ +void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext); + +/*************************************************************************/ /*! +@Function SyncCheckpointAlloc + +@Description Allocate a new synchronisation checkpoint on the specified + synchronisation checkpoint context + +@Input hSyncCheckpointContext Handle to the synchronisation + checkpoint context + +@Input hTimeline Timeline on which this sync + checkpoint is being created + +@Input hFence Fence as passed into pfnFenceResolve + API, when the API encounters a non-PVR + fence as part of its input fence. From + all other places this argument must be + PVRSRV_NO_FENCE. + +@Input pszClassName Sync checkpoint source annotation + (will be truncated to at most + PVRSRV_SYNC_NAME_LENGTH chars) + +@Output ppsSyncCheckpoint Created synchronisation checkpoint + +@Return PVRSRV_OK if the synchronisation checkpoint was + successfully created +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, + PVRSRV_TIMELINE hTimeline, + PVRSRV_FENCE hFence, + const IMG_CHAR *pszCheckpointName, + PSYNC_CHECKPOINT *ppsSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointFree + +@Description Free a synchronisation checkpoint + The reference count held for the synchronisation checkpoint + is decremented - if it has becomes zero, it is also freed. + +@Input psSyncCheckpoint The synchronisation checkpoint to free + +@Return None +*/ +/*****************************************************************************/ +void +SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointSignal + +@Description Signal the synchronisation checkpoint + +@Input psSyncCheckpoint The synchronisation checkpoint to signal + +@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior + +@Return None +*/ +/*****************************************************************************/ +void +SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags); + +/*************************************************************************/ /*! +@Function SyncCheckpointSignalNoHW + +@Description Signal the synchronisation checkpoint in NO_HARWARE build + +@Input psSyncCheckpoint The synchronisation checkpoint to signal + +@Return None +*/ +/*****************************************************************************/ +void +SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointError + +@Description Error the synchronisation checkpoint + +@Input psSyncCheckpoint The synchronisation checkpoint to error + +@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior + +@Return None +*/ +/*****************************************************************************/ +void +SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags); + +/*************************************************************************/ /*! +@Function SyncCheckpointStateFromUFO + +@Description Returns the current state of the synchronisation checkpoint + which has the given UFO firmware address + +@Input psDevNode The device owning the sync + checkpoint + +@Input ui32FwAddr The firmware address of the sync + checkpoint + +@Return The current state (32-bit value) of the sync checkpoint +*/ +/*****************************************************************************/ +IMG_UINT32 SyncCheckpointStateFromUFO(PPVRSRV_DEVICE_NODE psDevNode, + IMG_UINT32 ui32FwAddr); + +/*************************************************************************/ /*! +@Function SyncCheckpointErrorFromUFO + +@Description Error the synchronisation checkpoint which has the + given UFO firmware address + +@Input psDevNode The device owning the sync + checkpoint to be errored + +@Input ui32FwAddr The firmware address of the sync + checkpoint to be errored + +@Return None +*/ +/*****************************************************************************/ +void +SyncCheckpointErrorFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr); + +/*************************************************************************/ /*! +@Function SyncCheckpointRollbackFromUFO + +@Description Drop the enqueued count reference taken on the synchronisation + checkpoint on behalf of the firmware. + Called in the event of a DM Kick failing. + +@Input psDevNode The device owning the sync + checkpoint to be rolled back + +@Input ui32FwAddr The firmware address of the sync + checkpoint to be rolled back + +@Return None +*/ +/*****************************************************************************/ +void +SyncCheckpointRollbackFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr); + +/*************************************************************************/ /*! +@Function SyncCheckpointIsSignalled + +@Description Returns IMG_TRUE if the synchronisation checkpoint is + signalled or errored + +@Input psSyncCheckpoint The synchronisation checkpoint to test + +@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior + +@Return None +*/ +/*****************************************************************************/ +IMG_BOOL +SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, + IMG_UINT32 ui32FenceSyncFlags); + +/*************************************************************************/ /*! +@Function SyncCheckpointIsErrored + +@Description Returns IMG_TRUE if the synchronisation checkpoint is + errored + +@Input psSyncCheckpoint The synchronisation checkpoint to test + +@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior + +@Return None +*/ +/*****************************************************************************/ +IMG_BOOL +SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, + IMG_UINT32 ui32FenceSyncFlags); + +/*************************************************************************/ /*! +@Function SyncCheckpointTakeRef + +@Description Take a reference on a synchronisation checkpoint + +@Input psSyncCheckpoint Synchronisation checkpoint to take a + reference on + +@Return PVRSRV_OK if a reference was taken on the synchronisation + primitive +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointDropRef + +@Description Drop a reference on a synchronisation checkpoint + +@Input psSyncCheckpoint Synchronisation checkpoint to drop a + reference on + +@Return PVRSRV_OK if a reference was dropped on the synchronisation + primitive +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointResolveFence + +@Description Resolve a fence, returning a list of the sync checkpoints + that fence contains. + This function in turn calls a function provided by the + OS native sync implementation. + +@Input psSyncCheckpointContext The sync checkpoint context + on which checkpoints should be + created (in the event of the fence + having a native sync pt with no + associated sync checkpoint) + +@Input hFence The fence to be resolved + +@Output pui32NumSyncCheckpoints The number of sync checkpoints the + fence contains. Can return 0 if + passed a null (-1) fence. + +@Output papsSyncCheckpoints List of sync checkpoints the fence + contains + +@Output puiFenceUID Unique ID of the resolved fence + +@Return PVRSRV_OK if a valid fence was provided. + PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native + sync has not registered a callback function. +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointResolveFence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE hFence, + IMG_UINT32 *pui32NumSyncCheckpoints, + PSYNC_CHECKPOINT **papsSyncCheckpoints, + IMG_UINT64 *puiFenceUID, + PDUMP_FLAGS_T ui32PDumpFlags); + +/*************************************************************************/ /*! +@Function SyncCheckpointCreateFence + +@Description Create a fence containing a single sync checkpoint. + Return the fence and a ptr to sync checkpoint it contains. + This function in turn calls a function provided by the + OS native sync implementation. + +@Input pszFenceName String to assign to the new fence + (for debugging purposes) + +@Input hTimeline Timeline on which the new fence is + to be created + +@Input psSyncCheckpointContext Sync checkpoint context to be used + when creating the new fence + +@Output phNewFence The newly created fence + +@Output pui64FenceUID Unique ID of the created fence + +@Output ppvFenceFinaliseData Any data needed to finalise the fence + in a later call to the function + SyncCheckpointFinaliseFence() + +@Output psNewSyncCheckpoint The sync checkpoint contained in + the new fence + +@Return PVRSRV_OK if a valid fence was provided. + PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native + sync has not registered a callback function. +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointCreateFence(PPVRSRV_DEVICE_NODE psDeviceNode, + const IMG_CHAR *pszFenceName, + PVRSRV_TIMELINE hTimeline, + PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, + PVRSRV_FENCE *phNewFence, + IMG_UINT64 *pui64FenceUID, + void **ppvFenceFinaliseData, + PSYNC_CHECKPOINT *psNewSyncCheckpoint, + void **ppvTimelineUpdateSyncPrim, + IMG_UINT32 *pui32TimelineUpdateValue, + PDUMP_FLAGS_T ui32PDumpFlags); + +/*************************************************************************/ /*! +@Function SyncCheckpointRollbackFenceData + +@Description 'Rolls back' the fence specified (destroys the fence and + takes any other required actions to undo the fence + creation (eg if the implementation wishes to revert the + incrementing of the fence's timeline, etc). + This function in turn calls a function provided by the + OS native sync implementation. + +@Input hFence Fence to be 'rolled back' + +@Input pvFinaliseData Data needed to finalise the + fence + +@Return PVRSRV_OK if a valid fence was provided. + PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native + sync has not registered a callback function. +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointRollbackFenceData(PVRSRV_FENCE hFence, void *pvFinaliseData); + +/*************************************************************************/ /*! +@Function SyncCheckpointFinaliseFence + +@Description 'Finalise' the fence specified (performs any actions the + underlying implementation may need to perform just prior + to the fence being returned to the client. + This function in turn calls a function provided by the + OS native sync implementation - if the native sync + implementation does not need to perform any actions at + this time, this function does not need to be registered. + +@Input psDevNode Device node + +@Input hFence Fence to be 'finalised' + +@Input pvFinaliseData Data needed to finalise the fence + +@Input psSyncCheckpoint Base sync checkpoint that this fence + is formed of + +@Input pszName Fence annotation + +@Return PVRSRV_OK if a valid fence and finalise data were provided. + PVRSRV_ERROR_INVALID_PARAMS if an invalid fence or finalise + data were provided. + PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native + sync has not registered a callback function (permitted). +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointFinaliseFence(PPVRSRV_DEVICE_NODE psDevNode, + PVRSRV_FENCE hFence, + void *pvFinaliseData, + PSYNC_CHECKPOINT psSyncCheckpoint, + const IMG_CHAR *pszName); + +/*************************************************************************/ /*! +@Function SyncCheckpointFreeCheckpointListMem + +@Description Free memory the memory which was allocated by the sync + implementation and used to return the list of sync + checkpoints when resolving a fence. + to the fence being returned to the client. + This function in turn calls a free function registered by + the sync implementation (if a function has been registered). + +@Input pvCheckpointListMem Pointer to the memory to be freed + +@Return None +*/ +/*****************************************************************************/ +void +SyncCheckpointFreeCheckpointListMem(void *pvCheckpointListMem); + +/*************************************************************************/ /*! +@Function SyncCheckpointNoHWUpdateTimelines + +@Description Called by the DDK in a NO_HARDWARE build only. + After syncs have been manually signalled by the DDK, this + function is called to allow the OS native sync implementation + to update its timelines (as the usual callback notification + of signalled checkpoints is not supported for NO_HARDWARE). + This function in turn calls a function provided by the + OS native sync implementation. + +@Input pvPrivateData Any data the OS native sync + implementation might require. + +@Return PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native + sync has not registered a callback function, otherwise + PVRSRV_OK. +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData); + +/*************************************************************************/ /*! +@Function SyncCheckpointDumpInfoOnStalledUFOs + +@Description Called by the DDK in the event of the health check watchdog + examining the CCBs and determining that one has failed to + progress after 10 second when the GPU is idle due to waiting + on one or more UFO fences. + The DDK will pass a list of UFOs on which the CCB is waiting + and the sync implementation will check them to see if any + relate to sync points it has created. If so, the + implementation should dump debug information on those sync + points to the kernel log or other suitable output (which will + allow the unsignalled syncs to be identified). + The function shall return the number of syncs in the provided + array that were syncs which it had created. + +@Input ui32NumUFOs The number of UFOs in the array passed + in the pui32VAddrs parameter. + pui32Vaddr The array of UFOs the CCB is waiting on. + +@Output pui32NumSyncOwnedUFOs The number of UFOs in pui32Vaddr which + relate to syncs created by the sync + implementation. + +@Return PVRSRV_OK if a valid pointer is provided in pui32NumSyncOwnedUFOs. + PVRSRV_ERROR_INVALID_PARAMS if a NULL value is provided in + pui32NumSyncOwnedUFOs. + PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native + sync has not registered a callback function. + +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointDumpInfoOnStalledUFOs(IMG_UINT32 ui32NumUFOs, + IMG_UINT32 *pui32Vaddrs, + IMG_UINT32 *pui32NumSyncOwnedUFOs); + +/*************************************************************************/ /*! +@Function SyncCheckpointGetStateString + +@Description Called to get a string representing the current state of a + sync checkpoint. + +@Input psSyncCheckpoint Synchronisation checkpoint to get the + state for. + +@Return The string representing the current state of this checkpoint +*/ +/*****************************************************************************/ +const IMG_CHAR * +SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointRecordLookup + +@Description Returns a debug string with information about the + sync checkpoint. + +@Input psDevNode The device owning the sync + checkpoint to lookup + +@Input ui32FwAddr The firmware address of the sync + checkpoint to lookup + +@Input pszSyncInfo Character array to write to + +@Input len Len of the character array + +@Return None +*/ +/*****************************************************************************/ +void +SyncCheckpointRecordLookup(PPVRSRV_DEVICE_NODE psDevNode, + IMG_UINT32 ui32FwAddr, + IMG_CHAR * pszSyncInfo, size_t len); + +#if defined(PDUMP) +/*************************************************************************/ /*! +@Function PVRSRVSyncCheckpointFencePDumpPolKM + +@Description Called to insert a poll into the PDump script on a given + Fence being signalled or errored. + +@Input hFence Fence for PDump to poll on + +@Return PVRSRV_OK if a valid sync checkpoint was provided. +*/ +/*****************************************************************************/ + +PVRSRV_ERROR PVRSRVSyncCheckpointSignalledPDumpPolKM(PVRSRV_FENCE hFence); + +#endif + +#endif /* SYNC_CHECKPOINT_H */ diff --git a/drivers/gpu/drm/phytium/octopus/sync_checkpoint_external.h b/drivers/gpu/drm/phytium/octopus/sync_checkpoint_external.h new file mode 100644 index 000000000000..d6d0bc4f8319 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/sync_checkpoint_external.h @@ -0,0 +1,83 @@ +/*************************************************************************/ /*! +@File +@Title Services external synchronisation checkpoint interface header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Defines synchronisation checkpoint structures that are visible + internally and externally +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SYNC_CHECKPOINT_EXTERNAL_H +#define SYNC_CHECKPOINT_EXTERNAL_H + +#include "img_types.h" + +#ifndef CHECKPOINT_TYPES +#define CHECKPOINT_TYPES +typedef struct _SYNC_CHECKPOINT_CONTEXT *PSYNC_CHECKPOINT_CONTEXT; + +typedef struct _SYNC_CHECKPOINT *PSYNC_CHECKPOINT; +#endif + +/* PVRSRV_SYNC_CHECKPOINT states. + * The OS native sync implementation should call pfnIsSignalled() to determine if a + * PVRSRV_SYNC_CHECKPOINT has signalled (which will return an IMG_BOOL), but can set the + * state for a PVRSRV_SYNC_CHECKPOINT (which is currently in the NOT_SIGNALLED state) + * where that PVRSRV_SYNC_CHECKPOINT is representing a foreign sync. + */ +typedef IMG_UINT32 PVRSRV_SYNC_CHECKPOINT_STATE; + +#define PVRSRV_SYNC_CHECKPOINT_UNDEF 0x000U +#define PVRSRV_SYNC_CHECKPOINT_ACTIVE 0xac1U /*!< checkpoint has not signalled */ +#define PVRSRV_SYNC_CHECKPOINT_SIGNALLED 0x519U /*!< checkpoint has signalled */ +#define PVRSRV_SYNC_CHECKPOINT_ERRORED 0xeffU /*!< checkpoint has been errored */ + + +#define PVRSRV_UFO_IS_SYNC_CHECKPOINT_FWADDR(fwaddr) ((fwaddr) & 0x1U) +#define PVRSRV_UFO_IS_SYNC_CHECKPOINT(ufoptr) (PVRSRV_UFO_IS_SYNC_CHECKPOINT_FWADDR((ufoptr)->puiAddrUFO.ui32Addr)) + +/* Maximum number of sync checkpoints the firmware supports in one fence */ +#define MAX_SYNC_CHECKPOINTS_PER_FENCE 32U + +/*! + * Define to be used with SyncCheckpointAlloc() to indicate a checkpoint which + * represents a foreign sync point or collection of foreign sync points. + */ +#define SYNC_CHECKPOINT_FOREIGN_CHECKPOINT ((PVRSRV_TIMELINE) - 2U) + +#endif /* SYNC_CHECKPOINT_EXTERNAL_H */ diff --git a/drivers/gpu/drm/phytium/octopus/sync_checkpoint_init.h b/drivers/gpu/drm/phytium/octopus/sync_checkpoint_init.h new file mode 100644 index 000000000000..f54ba096670c --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/sync_checkpoint_init.h @@ -0,0 +1,82 @@ +/*************************************************************************/ /*! +@File +@Title Services synchronisation checkpoint initialisation interface + header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Defines synchronisation checkpoint structures that are visible + internally and externally +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SYNC_CHECKPOINT_INIT_H +#define SYNC_CHECKPOINT_INIT_H + +#include "device.h" + +/*************************************************************************/ /*! +@Function SyncCheckpointInit + +@Description Initialise the sync checkpoint driver by giving it the + device node (needed to determine the pfnUFOAlloc function + to call in order to allocate sync block memory). + +@Input psDevNode Device for which sync checkpoints + are being initialised + +@Return PVRSRV_OK initialised successfully, + PVRSRV_ERROR_ otherwise +*/ +/*****************************************************************************/ +PVRSRV_ERROR +SyncCheckpointInit(PVRSRV_DEVICE_NODE *psDevNode); + +/*************************************************************************/ /*! +@Function SyncCheckpointDeinit + +@Description Deinitialise the sync checkpoint driver. + Frees resources allocated during initialisation. + +@Input psDevNode Device for which sync checkpoints + are being de-initialised + +@Return None +*/ +/*****************************************************************************/ +void SyncCheckpointDeinit(PVRSRV_DEVICE_NODE *psDevNode); + +#endif /* SYNC_CHECKPOINT_INIT_H */ diff --git a/drivers/gpu/drm/phytium/octopus/sync_checkpoint_internal.h b/drivers/gpu/drm/phytium/octopus/sync_checkpoint_internal.h new file mode 100644 index 000000000000..df8929dd0a83 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/sync_checkpoint_internal.h @@ -0,0 +1,270 @@ +/*************************************************************************/ /*! +@File +@Title Services internal synchronisation checkpoint interface header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Defines the internal server interface for services + synchronisation checkpoints. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SYNC_CHECKPOINT_INTERNAL_H +#define SYNC_CHECKPOINT_INTERNAL_H + +#include "img_types.h" +#include "opaque_types.h" +#include "sync_checkpoint_external.h" +#include "sync_checkpoint.h" +#include "ra.h" +#include "dllist.h" +#include "lock.h" +#include "devicemem.h" +#include "rgx_fwif_shared.h" +#include "rgx_fwif_km.h" + +struct SYNC_CHECKPOINT_RECORD; + +/* + Private structures +*/ + +typedef struct _SYNC_CHECKPOINT_CONTEXT_CTL_ _SYNC_CHECKPOINT_CONTEXT_CTL, *_PSYNC_CHECKPOINT_CONTEXT_CTL; + +typedef struct _SYNC_CHECKPOINT_CONTEXT_ +{ + PPVRSRV_DEVICE_NODE psDevNode; + IMG_CHAR azName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the RA */ + RA_ARENA *psSubAllocRA; /*!< RA context */ + IMG_CHAR azSpanName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the span RA */ + RA_ARENA *psSpanRA; /*!< RA used for span management of SubAllocRA */ + ATOMIC_T hRefCount; /*!< Ref count for this context */ + ATOMIC_T hCheckpointCount; /*!< Checkpoint count for this context */ + POS_LOCK hLock; + _PSYNC_CHECKPOINT_CONTEXT_CTL psContextCtl; +#if defined(PDUMP) + DLLIST_NODE sSyncCheckpointBlockListHead; /*!< List head for the sync chkpt blocks in this context*/ + POS_LOCK hSyncCheckpointBlockListLock; /*!< sync chkpt blocks list lock*/ + DLLIST_NODE sListNode; /*!< List node for the sync chkpt context list*/ +#endif +} _SYNC_CHECKPOINT_CONTEXT; + +typedef struct _SYNC_CHECKPOINT_BLOCK_ +{ + ATOMIC_T hRefCount; /*!< Ref count for this sync block */ + POS_LOCK hLock; + _SYNC_CHECKPOINT_CONTEXT *psContext; /*!< Our copy of the services connection */ + PPVRSRV_DEVICE_NODE psDevNode; + IMG_UINT32 ui32SyncBlockSize; /*!< Size of the sync checkpoint block */ + IMG_UINT32 ui32FirmwareAddr; /*!< Firmware address */ + DEVMEM_MEMDESC *hMemDesc; /*!< DevMem allocation for block */ + volatile IMG_UINT32 *pui32LinAddr; /*!< Server-code CPU mapping */ + IMG_UINT64 uiSpanBase; /*!< Base of this import (FW DevMem) in the span RA */ +#if defined(PDUMP) + DLLIST_NODE sListNode; /*!< List node for the sync chkpt blocks */ +#endif +} SYNC_CHECKPOINT_BLOCK; + +typedef struct SYNC_CHECKPOINT_RECORD* PSYNC_CHECKPOINT_RECORD_HANDLE; + +typedef struct _SYNC_CHECKPOINT_ +{ + //_SYNC_CHECKPOINT_CONTEXT *psContext; /*!< pointer to the parent context of this checkpoint */ + /* A sync checkpoint is assigned a unique ID, to avoid any confusion should + * the same memory be re-used later for a different checkpoint + */ + IMG_UINT32 ui32UID; /*!< Unique ID assigned to sync checkpoint (to distinguish checkpoints if memory is re-used)*/ + ATOMIC_T hRefCount; /*!< Ref count for this sync */ + ATOMIC_T hEnqueuedCCBCount; /*!< Num times sync has been put in CCBs */ + SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock; /*!< Synchronisation block this checkpoint is allocated on */ + IMG_UINT64 uiSpanAddr; /*!< Span address of the sync */ + volatile SYNC_CHECKPOINT_FW_OBJ *psSyncCheckpointFwObj; /*!< CPU view of the data held in the sync block */ + PRGXFWIF_UFO_ADDR sCheckpointUFOAddr; /*!< PRGXFWIF_UFO_ADDR struct used to pass update address to FW */ + IMG_CHAR azName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the checkpoint */ + PVRSRV_TIMELINE hTimeline; /*!< Timeline on which this sync checkpoint was created */ + IMG_UINT32 ui32ValidationCheck; + IMG_PID uiProcess; /*!< The Process ID of the process which created this sync checkpoint */ + PSYNC_CHECKPOINT_RECORD_HANDLE hRecord; /*!< Sync record handle */ + DLLIST_NODE sListNode; /*!< List node for the global sync chkpt list */ + DLLIST_NODE sDeferredFreeListNode; /*!< List node for the deferred free sync chkpt list */ + IMG_UINT32 ui32FWAddr; /*!< FWAddr stored at sync checkpoint alloc time */ + PDUMP_FLAGS_T ui32PDumpFlags; /*!< Pdump Capture mode to be used for POL*/ +} _SYNC_CHECKPOINT; + + +typedef struct _SYNC_CHECKPOINT_SIGNAL_ +{ + _SYNC_CHECKPOINT asSyncCheckpoint; /*!< Store sync checkpt for deferred signal */ + IMG_UINT32 ui32Status; /*!< sync checkpt status signal/errored */ +} _SYNC_CHECKPOINT_DEFERRED_SIGNAL; + +#define GET_CP_CB_NEXT_IDX(_curridx) (((_curridx) + 1) % SYNC_CHECKPOINT_MAX_DEFERRED_SIGNAL) +#define GET_CP_CB_BASE(_idx) (IMG_OFFSET_ADDR(psDevNode->pui8DeferredSyncCPSignal, \ + ((_idx) * sizeof(_SYNC_CHECKPOINT_DEFERRED_SIGNAL)))) + + +/*************************************************************************/ /*! +@Function SyncCheckpointGetFirmwareAddr + +@Description . + +@Input psSyncCheckpoint Synchronisation checkpoint to get + the firmware address of + +@Return The firmware address of the sync checkpoint + +*/ +/*****************************************************************************/ +IMG_UINT32 +SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointCCBEnqueued + +@Description Increment the CCB enqueued reference count for a + synchronisation checkpoint. This indicates how many FW + operations (checks/update) have been placed into CCBs for the + sync checkpoint. + When the FW services these operation, it increments its own + reference count. When these two values are equal, we know + there are not outstanding FW operating for the checkpoint + in any CCB. + +@Input psSyncCheckpoint Synchronisation checkpoint for which + to increment the enqueued reference + count + +@Return None + +*/ +/*****************************************************************************/ +void +SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointGetEnqueuedCount + +@Description . + +@Input psSyncCheckpoint Synchronisation checkpoint to get + the enqueued count of + +@Return The enqueued count of the sync checkpoint + (i.e. the number of FW operations (checks or updates) + currently enqueued in CCBs for the sync checkpoint) + +*/ +/*****************************************************************************/ +IMG_UINT32 +SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointGetReferenceCount + +@Description . + +@Input psSyncCheckpoint Synchronisation checkpoint to get + the reference count of + +@Return The host reference count of the sync checkpoint + +*/ +/*****************************************************************************/ +IMG_UINT32 +SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointGetCreator + +@Description . + +@Input psSyncCheckpoint Synchronisation checkpoint to get + the creating process of + +@Return The process id of the process which created this sync checkpoint. + +*/ +/*****************************************************************************/ +IMG_PID +SyncCheckpointGetCreator(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointGetId + +@Description . + +@Input psSyncCheckpoint Synchronisation checkpoint to get + the unique Id of + +@Return The unique Id of the sync checkpoint + +*/ +/*****************************************************************************/ +IMG_UINT32 +SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointGetTimeline + +@Description . + +@Input psSyncCheckpoint Synchronisation checkpoint to get + the parent timeline of + +@Return The parent timeline of the sync checkpoint + +*/ +/*****************************************************************************/ +PVRSRV_TIMELINE +SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint); + +/*************************************************************************/ /*! +@Function SyncCheckpointGetRGXFWIFUFOAddr + +@Description . + +@Input psSyncCheckpoint Synchronisation checkpoint to get + the PRGXFWIF_UFO_ADDR of + +@Return The PRGXFWIF_UFO_ADDR of the sync checkpoint, used when + providing the update in server kick code. + +*/ +/*****************************************************************************/ +PRGXFWIF_UFO_ADDR* +SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint); + +#endif /* SYNC_CHECKPOINT_INTERNAL_H */ diff --git a/drivers/gpu/drm/phytium/octopus/sync_fallback_server.h b/drivers/gpu/drm/phytium/octopus/sync_fallback_server.h new file mode 100644 index 000000000000..c635d628a976 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/sync_fallback_server.h @@ -0,0 +1,200 @@ +/**************************************************************************/ /*! +@File +@Title Fallback sync interface +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#ifndef SYNC_FALLBACK_SERVER_H +#define SYNC_FALLBACK_SERVER_H + +#include "img_types.h" +#include "sync_checkpoint.h" +#include "device.h" +#include "connection_server.h" + + +typedef struct _PVRSRV_TIMELINE_SERVER_ PVRSRV_TIMELINE_SERVER; +typedef struct _PVRSRV_FENCE_SERVER_ PVRSRV_FENCE_SERVER; +typedef struct _PVRSRV_FENCE_EXPORT_ PVRSRV_FENCE_EXPORT; + +typedef struct _PVRSRV_SYNC_PT_ PVRSRV_SYNC_PT; + +#define SYNC_FB_TIMELINE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH +#define SYNC_FB_FENCE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH + +/*****************************************************************************/ +/* */ +/* SW SPECIFIC FUNCTIONS */ +/* */ +/*****************************************************************************/ + +PVRSRV_ERROR SyncFbTimelineCreateSW(IMG_UINT32 uiTimelineNameSize, + const IMG_CHAR *pszTimelineName, + PVRSRV_TIMELINE_SERVER **ppsTimeline); + +PVRSRV_ERROR SyncFbFenceCreateSW(PVRSRV_TIMELINE_SERVER *psTimeline, + IMG_UINT32 uiFenceNameSize, + const IMG_CHAR *pszFenceName, + PVRSRV_FENCE_SERVER **ppsOutputFence, + IMG_UINT64 *pui64SyncPtIdx); +PVRSRV_ERROR SyncFbSWTimelineFenceCreateKM(PVRSRV_TIMELINE iSWTimeline, + const IMG_CHAR *pszFenceName, + PVRSRV_FENCE *piOutputFence, + IMG_UINT64* pui64SyncPtIdx); + +PVRSRV_ERROR SyncFbTimelineAdvanceSW(PVRSRV_TIMELINE_SERVER *psTimeline, + IMG_UINT64 *pui64SyncPtIdx); +PVRSRV_ERROR SyncFbSWTimelineAdvanceKM(void *pvSWTimelineObj, + IMG_UINT64* pui64SyncPtIdx); + +/*****************************************************************************/ +/* */ +/* PVR SPECIFIC FUNCTIONS */ +/* */ +/*****************************************************************************/ + +PVRSRV_ERROR SyncFbTimelineCreatePVR(IMG_UINT32 uiTimelineNameSize, + const IMG_CHAR *pszTimelineName, + PVRSRV_TIMELINE_SERVER **ppsTimeline); + +PVRSRV_ERROR SyncFbFenceCreatePVR(const IMG_CHAR *pszName, + PVRSRV_TIMELINE iTl, + PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext, + PVRSRV_FENCE *piOutFence, + IMG_UINT64 *puiFenceUID, + void **ppvFenceFinaliseData, + PSYNC_CHECKPOINT *ppsOutCheckpoint, + void **ppvTimelineUpdateSync, + IMG_UINT32 *puiTimelineUpdateValue); + +PVRSRV_ERROR SyncFbFenceResolvePVR(PSYNC_CHECKPOINT_CONTEXT psContext, + PVRSRV_FENCE iFence, + IMG_UINT32 *puiNumCheckpoints, + PSYNC_CHECKPOINT **papsCheckpoints, + IMG_UINT64 *puiFenceUID); + +/*****************************************************************************/ +/* */ +/* GENERIC FUNCTIONS */ +/* */ +/*****************************************************************************/ + +PVRSRV_ERROR SyncFbGetFenceObj(PVRSRV_FENCE iFence, + void **ppvFenceObj); + +PVRSRV_ERROR SyncFbSWGetTimelineObj(PVRSRV_TIMELINE iSWTimeline, + void **ppvSWTimelineObj); + +PVRSRV_ERROR SyncFbTimelineRelease(PVRSRV_TIMELINE_SERVER *psTl); + +PVRSRV_ERROR SyncFbFenceRelease(PVRSRV_FENCE_SERVER *psFence); +PVRSRV_ERROR SyncFbFenceReleaseKM(void *pvFenceObj); + +PVRSRV_ERROR SyncFbFenceDup(PVRSRV_FENCE_SERVER *psInFence, + PVRSRV_FENCE_SERVER **ppsOutFence); + +PVRSRV_ERROR SyncFbFenceMerge(PVRSRV_FENCE_SERVER *psInFence1, + PVRSRV_FENCE_SERVER *psInFence2, + IMG_UINT32 uiFenceNameSize, + const IMG_CHAR *pszFenceName, + PVRSRV_FENCE_SERVER **ppsOutFence); + +PVRSRV_ERROR SyncFbFenceWait(PVRSRV_FENCE_SERVER *psFence, + IMG_UINT32 uiTimeout); + +PVRSRV_ERROR SyncFbFenceDump(PVRSRV_FENCE_SERVER *psFence, + IMG_UINT32 uiLine, + IMG_UINT32 uiFileNameLength, + const IMG_CHAR *pszFile, + IMG_UINT32 uiModuleLength, + const IMG_CHAR *pszModule, + IMG_UINT32 uiDescLength, + const IMG_CHAR *pszDesc); + +PVRSRV_ERROR SyncFbDumpFenceKM(void *pvSWFenceObj, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +PVRSRV_ERROR SyncFbSWDumpTimelineKM(void *pvSWTimelineObj, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +PVRSRV_ERROR SyncFbRegisterSyncFunctions(void); + +PVRSRV_ERROR SyncFbRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode); + +PVRSRV_ERROR SyncFbDeregisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode); + +IMG_UINT32 SyncFbDumpInfoOnStalledUFOs(IMG_UINT32 nr_ufos, IMG_UINT32 *vaddrs); + +IMG_BOOL SyncFbCheckpointHasSignalled(IMG_UINT32 ui32FwAddr, IMG_UINT32 ui32Value); + +/*****************************************************************************/ +/* */ +/* IMPORT/EXPORT FUNCTIONS */ +/* */ +/*****************************************************************************/ + +#if defined(SUPPORT_INSECURE_EXPORT) +PVRSRV_ERROR SyncFbFenceExportInsecure(PVRSRV_FENCE_SERVER *psFence, + PVRSRV_FENCE_EXPORT **ppExport); + +PVRSRV_ERROR SyncFbFenceExportDestroyInsecure(PVRSRV_FENCE_EXPORT *psExport); + +PVRSRV_ERROR SyncFbFenceImportInsecure(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevice, + PVRSRV_FENCE_EXPORT *psImport, + PVRSRV_FENCE_SERVER **psFence); +#endif /* defined(SUPPORT_INSECURE_EXPORT) */ + +PVRSRV_ERROR SyncFbFenceExportSecure(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDevNode, + PVRSRV_FENCE_SERVER *psFence, + IMG_SECURE_TYPE *phSecure, + PVRSRV_FENCE_EXPORT **ppsExport, + CONNECTION_DATA **ppsSecureConnection); + +PVRSRV_ERROR SyncFbFenceExportDestroySecure(PVRSRV_FENCE_EXPORT *psExport); + +PVRSRV_ERROR SyncFbFenceImportSecure(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevice, + IMG_SECURE_TYPE hSecure, + PVRSRV_FENCE_SERVER **psFence); + +#endif /* SYNC_FALLBACK_SERVER_H */ diff --git a/drivers/gpu/drm/phytium/octopus/sync_internal.h b/drivers/gpu/drm/phytium/octopus/sync_internal.h new file mode 100644 index 000000000000..0be8b647ed55 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/sync_internal.h @@ -0,0 +1,127 @@ +/*************************************************************************/ /*! +@File +@Title Services internal synchronisation interface header +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Defines the internal client side interface for services + synchronisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SYNC_INTERNAL +#define SYNC_INTERNAL + +#include "img_types.h" +#include "img_defs.h" +#include "ra.h" +#include "dllist.h" +#include "lock.h" +#include "devicemem.h" +#include "sync_prim_internal.h" + +#define LOCAL_SYNC_PRIM_RESET_VALUE 0 +#define LOCAL_SYNC_PRIM_POISON_VALUE 0xa5a5a5a5u + +/* + Debug feature to protect against GP DM page faults when + sync prims are freed by client before work is completed. +*/ +#define LOCAL_SYNC_BLOCK_RETAIN_FIRST + +/* + Private structure's +*/ +#define SYNC_PRIM_NAME_SIZE 50 +typedef struct SYNC_PRIM_CONTEXT +{ + SHARED_DEV_CONNECTION hDevConnection; + IMG_CHAR azName[SYNC_PRIM_NAME_SIZE]; /*!< Name of the RA */ + RA_ARENA *psSubAllocRA; /*!< RA context */ + IMG_CHAR azSpanName[SYNC_PRIM_NAME_SIZE];/*!< Name of the span RA */ + RA_ARENA *psSpanRA; /*!< RA used for span management of SubAllocRA */ + ATOMIC_T hRefCount; /*!< Ref count for this context */ +#if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST) + IMG_HANDLE hFirstSyncPrim; /*!< Handle to the first allocated sync prim */ +#endif +} SYNC_PRIM_CONTEXT; + +typedef struct SYNC_PRIM_BLOCK_TAG +{ + SYNC_PRIM_CONTEXT *psContext; /*!< Our copy of the services connection */ + IMG_HANDLE hServerSyncPrimBlock; /*!< Server handle for this block */ + IMG_UINT32 ui32SyncBlockSize; /*!< Size of the sync prim block */ + IMG_UINT32 ui32FirmwareAddr; /*!< Firmware address */ + DEVMEM_MEMDESC *hMemDesc; /*!< Host mapping handle */ + IMG_UINT32 __iomem *pui32LinAddr; /*!< User CPU mapping */ + IMG_UINT64 uiSpanBase; /*!< Base of this import in the span RA */ + DLLIST_NODE sListNode; /*!< List node for the sync block list */ +} SYNC_PRIM_BLOCK; + +typedef enum SYNC_PRIM_TYPE_TAG +{ + SYNC_PRIM_TYPE_UNKNOWN = 0, + SYNC_PRIM_TYPE_LOCAL, + SYNC_PRIM_TYPE_SERVER, +} SYNC_PRIM_TYPE; + +typedef struct SYNC_PRIM_LOCAL_TAG +{ + ATOMIC_T hRefCount; /*!< Ref count for this sync */ + SYNC_PRIM_BLOCK *psSyncBlock; /*!< Synchronisation block this primitive is allocated on */ + IMG_UINT64 uiSpanAddr; /*!< Span address of the sync */ + IMG_HANDLE hRecord; /*!< Sync record handle */ +} SYNC_PRIM_LOCAL; + +typedef struct SYNC_PRIM_TAG +{ + PVRSRV_CLIENT_SYNC_PRIM sCommon; /*!< Client visible part of the sync prim */ + SYNC_PRIM_TYPE eType; /*!< Sync primitive type */ + union { + SYNC_PRIM_LOCAL sLocal; /*!< Local sync primitive data */ + } u; +} SYNC_PRIM; + + +IMG_INTERNAL PVRSRV_ERROR +SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr); + +IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync, + IMG_HANDLE *phBlock, + IMG_UINT32 *pui32Offset); + + +#endif /* SYNC_INTERNAL */ diff --git a/drivers/gpu/drm/phytium/octopus/sync_prim_internal.h b/drivers/gpu/drm/phytium/octopus/sync_prim_internal.h new file mode 100644 index 000000000000..c34959355034 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/sync_prim_internal.h @@ -0,0 +1,84 @@ +/*************************************************************************/ /*! +@File +@Title Services internal synchronisation typedef header +@Description Defines synchronisation types that are used internally + only +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SYNC_INTERNAL_H +#define SYNC_INTERNAL_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include + +/* These are included here as the typedefs are required + * internally. + */ + +typedef struct SYNC_PRIM_CONTEXT *PSYNC_PRIM_CONTEXT; +typedef struct PVRSRV_CLIENT_SYNC_PRIM +{ + volatile uint32_t __iomem *pui32LinAddr; /*!< User pointer to the primitive */ +} PVRSRV_CLIENT_SYNC_PRIM; + +/*! + * Bundled information for a sync prim operation + * + * Structure: #PVRSRV_CLIENT_SYNC_PRIM_OP + * Typedef: ::PVRSRV_CLIENT_SYNC_PRIM_OP + */ +typedef struct PVRSRV_CLIENT_SYNC_PRIM_OP +{ + #define PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK (1U << 0) + #define PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE (1U << 1) + #define PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE (PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE | (1U<<2)) + uint32_t ui32Flags; /*!< Operation flags: PVRSRV_CLIENT_SYNC_PRIM_OP_XXX */ + PVRSRV_CLIENT_SYNC_PRIM *psSync; /*!< Pointer to the client sync primitive */ + uint32_t ui32FenceValue; /*!< The Fence value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK is set) */ + uint32_t ui32UpdateValue; /*!< The Update value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE is set) */ +} PVRSRV_CLIENT_SYNC_PRIM_OP; + +#if defined(__cplusplus) +} +#endif +#endif /* SYNC_INTERNAL_H */ diff --git a/drivers/gpu/drm/phytium/octopus/sync_server.c b/drivers/gpu/drm/phytium/octopus/sync_server.c new file mode 100644 index 000000000000..7dfbed2f3f40 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/sync_server.c @@ -0,0 +1,1227 @@ +/*************************************************************************/ /*! +@File sync_server.c +@Title Server side synchronisation functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements the server side functions that for synchronisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "img_types.h" +#include "img_defs.h" +#include "sync_server.h" +#include "allocmem.h" +#include "device.h" +#include "devicemem.h" +#include "devicemem_pdump.h" +#include "osfunc.h" +#include "pdump.h" +#include "pvr_debug.h" +#include "pvr_notifier.h" +#include "pdump_km.h" +#include "sync.h" +#include "sync_internal.h" +#include "connection_server.h" +#include "htbuffer.h" +#include "rgxhwperf.h" +#include "info_page.h" + +#include "sync_checkpoint_internal.h" +#include "sync_checkpoint.h" + +/* Include this to obtain MAX_SYNC_CHECKPOINTS_PER_FENCE */ +#include "sync_checkpoint_external.h" + +/* Include this to obtain PVRSRV_MAX_DEV_VARS */ +#include "pvrsrv_devvar.h" + +#if defined(SUPPORT_SECURE_EXPORT) +#include "ossecure_export.h" +#endif + +#if defined(SUPPORT_EXTRA_METASP_DEBUG) +#include "rgxdebug.h" +#endif + +/* Set this to enable debug relating to the construction and maintenance of the sync address list */ +#define SYNC_ADDR_LIST_DEBUG 0 + +/* Set maximum number of FWAddrs that can be accommodated in a SYNC_ADDR_LIST. + * This should allow for PVRSRV_MAX_DEV_VARS dev vars plus + * MAX_SYNC_CHECKPOINTS_PER_FENCE sync checkpoints for check fences. + * The same SYNC_ADDR_LIST is also used to hold UFOs for updates. While this + * may need to accommodate the additional sync prim update returned by Native + * sync implementation (used for timeline debug), the size calculated from + * PVRSRV_MAX_DEV_VARS+MAX_SYNC_CHECKPOINTS_PER_FENCE should be ample. + */ +#define PVRSRV_MAX_SYNC_ADDR_LIST_SIZE (PVRSRV_MAX_DEV_VARS+MAX_SYNC_CHECKPOINTS_PER_FENCE) +/* Check that helper functions will not be preparing longer lists of + * UFOs than the FW can handle. + */ +static_assert(PVRSRV_MAX_SYNC_ADDR_LIST_SIZE <= RGXFWIF_CCB_CMD_MAX_UFOS, + "PVRSRV_MAX_SYNC_ADDR_LIST_SIZE > RGXFWIF_CCB_CMD_MAX_UFOS."); + +/* Max number of syncs allowed in a sync prim op */ +#define SYNC_PRIM_OP_MAX_SYNCS 1024 + +struct _SYNC_PRIMITIVE_BLOCK_ +{ + PVRSRV_DEVICE_NODE *psDevNode; + DEVMEM_MEMDESC *psMemDesc; + IMG_UINT32 *pui32LinAddr; + IMG_UINT32 ui32BlockSize; /*!< Size of the Sync Primitive Block */ + ATOMIC_T sRefCount; + DLLIST_NODE sConnectionNode; + SYNC_CONNECTION_DATA *psSyncConnectionData; /*!< Link back to the sync connection data if there is one */ + PRGXFWIF_UFO_ADDR uiFWAddr; /*!< The firmware address of the sync prim block */ +}; + +struct _SYNC_CONNECTION_DATA_ +{ + DLLIST_NODE sListHead; /*!< list of sync block associated with / created against this connection */ + ATOMIC_T sRefCount; /*!< number of references to this object */ + POS_LOCK hLock; /*!< lock protecting the list of sync blocks */ +}; + +#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1)) + +/* this is the max number of syncs we will search or dump + * at any time. + */ +#define SYNC_RECORD_LIMIT 20000 + +enum SYNC_RECORD_TYPE +{ + SYNC_RECORD_TYPE_UNKNOWN = 0, + SYNC_RECORD_TYPE_CLIENT, + SYNC_RECORD_TYPE_SERVER, +}; + +struct SYNC_RECORD +{ + PVRSRV_DEVICE_NODE *psDevNode; + SYNC_PRIMITIVE_BLOCK *psServerSyncPrimBlock; /*!< handle to _SYNC_PRIMITIVE_BLOCK_ */ + IMG_UINT32 ui32SyncOffset; /*!< offset to sync in block */ + IMG_UINT32 ui32FwBlockAddr; + IMG_PID uiPID; + IMG_UINT64 ui64OSTime; + enum SYNC_RECORD_TYPE eRecordType; + DLLIST_NODE sNode; + IMG_CHAR szClassName[PVRSRV_SYNC_NAME_LENGTH]; +}; + +#if defined(SYNC_DEBUG) || defined(REFCOUNT_DEBUG) +#define SYNC_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__) +#else +#define SYNC_REFCOUNT_PRINT(fmt, ...) +#endif + +#if defined(SYNC_DEBUG) +#define SYNC_UPDATES_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__) +#else +#define SYNC_UPDATES_PRINT(fmt, ...) +#endif + +/*! +***************************************************************************** + @Function : SyncPrimitiveBlockToFWAddr + + @Description : Given a pointer to a sync primitive block and an offset, + returns the firmware address of the sync. + + @Input psSyncPrimBlock : Sync primitive block which contains the sync + @Input ui32Offset : Offset of sync within the sync primitive block + @Output psAddrOut : Absolute FW address of the sync is written out through + this pointer + @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input + parameters are invalid. +*****************************************************************************/ + +PVRSRV_ERROR +SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock, + IMG_UINT32 ui32Offset, + PRGXFWIF_UFO_ADDR *psAddrOut) +{ + /* check offset is legal */ + if (unlikely((ui32Offset >= psSyncPrimBlock->ui32BlockSize) || + (ui32Offset % sizeof(IMG_UINT32)))) + { + PVR_DPF((PVR_DBG_ERROR, "SyncPrimitiveBlockToFWAddr: parameters check failed")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psAddrOut->ui32Addr = psSyncPrimBlock->uiFWAddr.ui32Addr + ui32Offset; + return PVRSRV_OK; +} + +/*! +***************************************************************************** + @Function : SyncAddrListGrow + + @Description : Grow the SYNC_ADDR_LIST so it can accommodate the given + number of syncs, up to a maximum of PVRSRV_MAX_SYNC_PRIMS. + + @Input psList : The SYNC_ADDR_LIST to grow + @Input ui32NumSyncs : The number of sync addresses to be able to hold + @Return : PVRSRV_OK on success +*****************************************************************************/ + +static PVRSRV_ERROR SyncAddrListGrow(SYNC_ADDR_LIST *psList, IMG_UINT32 ui32NumSyncs) +{ + if (unlikely(ui32NumSyncs > PVRSRV_MAX_SYNC_ADDR_LIST_SIZE)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: ui32NumSyncs=%u > PVRSRV_MAX_SYNC_ADDR_LIST_SIZE=%u", __func__, ui32NumSyncs, PVRSRV_MAX_SYNC_ADDR_LIST_SIZE)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs)); +#endif + if (ui32NumSyncs > psList->ui32NumSyncs) + { + if (psList->pasFWAddrs == NULL) + { + psList->pasFWAddrs = OSAllocMem(sizeof(PRGXFWIF_UFO_ADDR) * PVRSRV_MAX_SYNC_ADDR_LIST_SIZE); + PVR_RETURN_IF_NOMEM(psList->pasFWAddrs); + } + + psList->ui32NumSyncs = ui32NumSyncs; + } + +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs)); +#endif + return PVRSRV_OK; +} + +/*! +***************************************************************************** + @Function : SyncAddrListInit + + @Description : Initialise a SYNC_ADDR_LIST structure ready for use + + @Input psList : The SYNC_ADDR_LIST structure to initialise + @Return : None +*****************************************************************************/ + +void +SyncAddrListInit(SYNC_ADDR_LIST *psList) +{ + psList->ui32NumSyncs = 0; + psList->pasFWAddrs = NULL; +} + +/*! +***************************************************************************** + @Function : SyncAddrListDeinit + + @Description : Frees any resources associated with the given SYNC_ADDR_LIST + + @Input psList : The SYNC_ADDR_LIST structure to deinitialise + @Return : None +*****************************************************************************/ + +void +SyncAddrListDeinit(SYNC_ADDR_LIST *psList) +{ + if (psList->pasFWAddrs != NULL) + { + OSFreeMem(psList->pasFWAddrs); + } +} + +/*! +***************************************************************************** + @Function : SyncAddrListPopulate + + @Description : Populate the given SYNC_ADDR_LIST with the FW addresses + of the syncs given by the SYNC_PRIMITIVE_BLOCKs and sync offsets + + @Input ui32NumSyncs : The number of syncs being passed in + @Input apsSyncPrimBlock: Array of pointers to SYNC_PRIMITIVE_BLOCK structures + in which the syncs are based + @Input paui32SyncOffset: Array of offsets within each of the sync primitive blocks + where the syncs are located + @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input + parameters are invalid. +*****************************************************************************/ + +PVRSRV_ERROR +SyncAddrListPopulate(SYNC_ADDR_LIST *psList, + IMG_UINT32 ui32NumSyncs, + SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock, + IMG_UINT32 *paui32SyncOffset) +{ + IMG_UINT32 i; + PVRSRV_ERROR eError; + +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs)); +#endif + if (ui32NumSyncs > psList->ui32NumSyncs) + { + eError = SyncAddrListGrow(psList, ui32NumSyncs); + + PVR_RETURN_IF_ERROR(eError); + } + + psList->ui32NumSyncs = ui32NumSyncs; + + for (i = 0; i < ui32NumSyncs; i++) + { + eError = SyncPrimitiveBlockToFWAddr(apsSyncPrimBlock[i], + paui32SyncOffset[i], + &psList->pasFWAddrs[i]); + + PVR_RETURN_IF_ERROR(eError); + } + +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs)); +#endif + return PVRSRV_OK; +} + +PVRSRV_ERROR +SyncAddrListAppendSyncPrim(SYNC_ADDR_LIST *psList, + PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32FwAddr = 0; + +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs)); +#endif + /* Ensure there's room in psList for the additional sync prim update */ + eError = SyncAddrListGrow(psList, psList->ui32NumSyncs + 1); + PVR_GOTO_IF_ERROR(eError, e0); + + SyncPrimGetFirmwareAddr(psSyncPrim, &ui32FwAddr); +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: Appending sync prim <%p> UFO addr (0x%x) to psList[->pasFWAddrss[%d]", __func__, (void*)psSyncPrim, ui32FwAddr, psList->ui32NumSyncs-1)); +#endif + psList->pasFWAddrs[psList->ui32NumSyncs-1].ui32Addr = ui32FwAddr; + +#if (SYNC_ADDR_LIST_DEBUG == 1) + { + IMG_UINT32 iii; + + PVR_DPF((PVR_DBG_ERROR, "%s: psList->ui32NumSyncs=%d", __func__, psList->ui32NumSyncs)); + for (iii=0; iiiui32NumSyncs; iii++) + { + PVR_DPF((PVR_DBG_ERROR, "%s: psList->pasFWAddrs[%d].ui32Addr=0x%x", __func__, iii, psList->pasFWAddrs[iii].ui32Addr)); + } + } +#endif +e0: +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d", __func__, (void*)psList, psList->ui32NumSyncs)); +#endif + return eError; +} + + +static PVRSRV_ERROR +_AppendCheckpoints(SYNC_ADDR_LIST *psList, + IMG_UINT32 ui32NumCheckpoints, + PSYNC_CHECKPOINT *apsSyncCheckpoint, + IMG_BOOL bDeRefCheckpoints) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32SyncCheckpointIndex; + IMG_UINT32 ui32RollbackSize = psList->ui32NumSyncs; + +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints)); +#endif + /* Ensure there's room in psList for the sync checkpoints */ + eError = SyncAddrListGrow(psList, psList->ui32NumSyncs + ui32NumCheckpoints); + if (unlikely(eError != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: * * * * ERROR * * * * Trying to SyncAddrListGrow(psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints)); + goto e0; + } + +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: (ui32NumCheckpoints=%d) (psList->ui32NumSyncs is now %d) array already contains %d FWAddrs:", __func__, ui32NumCheckpoints, psList->ui32NumSyncs, ui32RollbackSize)); + if (ui32RollbackSize > 0) + { + { + IMG_UINT32 kk; + for (kk=0; kkpsList->pasFWAddrs[%d].ui32Addr = %u(0x%x)", __func__, + (void*)&psList->pasFWAddrs[kk], kk, + psList->pasFWAddrs[kk].ui32Addr, psList->pasFWAddrs[kk].ui32Addr)); + } + } + } + PVR_DPF((PVR_DBG_ERROR, "%s: apsSyncCheckpoint=<%p>, apsSyncCheckpoint[0] = <%p>", __func__, (void*)apsSyncCheckpoint, (void*)apsSyncCheckpoint[0])); +#endif + for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndexpasFWAddrs[ui32RollbackSize + ui32SyncCheckpointIndex].ui32Addr = SyncCheckpointGetFirmwareAddr(apsSyncCheckpoint[ui32SyncCheckpointIndex]); +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: SyncCheckpointCCBEnqueued(<%p>)", __func__, (void*)apsSyncCheckpoint[ui32SyncCheckpointIndex])); + PVR_DPF((PVR_DBG_ERROR, "%s: ID:%d", __func__, SyncCheckpointGetId((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]))); +#endif + SyncCheckpointCCBEnqueued((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]); + if (bDeRefCheckpoints) + { + /* Drop the reference that was taken internally by the OS implementation of resolve_fence() */ + SyncCheckpointDropRef((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]); + } + } +#if (SYNC_ADDR_LIST_DEBUG == 1) + if (psList->ui32NumSyncs > 0) + { + IMG_UINT32 kk; + for (kk=0; kkui32NumSyncs; kk++) + { + PVR_DPF((PVR_DBG_ERROR, "%s: <%p>psList->pasFWAddrs[%d].ui32Addr = %u(0x%x)", __func__, + (void*)&psList->pasFWAddrs[kk], kk, + psList->pasFWAddrs[kk].ui32Addr, psList->pasFWAddrs[kk].ui32Addr)); + } + } +#endif + return eError; + +e0: + for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints)); +#endif + return eError; +} + +/*! +***************************************************************************** + @Function : SyncAddrListAppendCheckpoints + + @Description : Append the FW addresses of the sync checkpoints given in + the PSYNC_CHECKPOINTs array to the given SYNC_ADDR_LIST + + @Input ui32NumSyncCheckpoints : The number of sync checkpoints + being passed in + @Input apsSyncCheckpoint : Array of PSYNC_CHECKPOINTs whose details + are to be appended to the SYNC_ADDR_LIST + @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input + parameters are invalid. +*****************************************************************************/ +PVRSRV_ERROR +SyncAddrListAppendCheckpoints(SYNC_ADDR_LIST *psList, + IMG_UINT32 ui32NumCheckpoints, + PSYNC_CHECKPOINT *apsSyncCheckpoint) +{ + return _AppendCheckpoints(psList, ui32NumCheckpoints, apsSyncCheckpoint, IMG_FALSE); +} + +/*! +***************************************************************************** + @Function : SyncAddrListAppendAndDeRefCheckpoints + + @Description : Append the FW addresses of the sync checkpoints given in + the PSYNC_CHECKPOINTs array to the given SYNC_ADDR_LIST. + A reference is dropped for each of the checkpoints. + + @Input ui32NumSyncCheckpoints : The number of sync checkpoints + being passed in + @Input apsSyncCheckpoint : Array of PSYNC_CHECKPOINTs whose details + are to be appended to the SYNC_ADDR_LIST + @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input + parameters are invalid. +*****************************************************************************/ +PVRSRV_ERROR +SyncAddrListAppendAndDeRefCheckpoints(SYNC_ADDR_LIST *psList, + IMG_UINT32 ui32NumCheckpoints, + PSYNC_CHECKPOINT *apsSyncCheckpoint) +{ + return _AppendCheckpoints(psList, ui32NumCheckpoints, apsSyncCheckpoint, IMG_TRUE); +} + +void +SyncAddrListDeRefCheckpoints(IMG_UINT32 ui32NumCheckpoints, + PSYNC_CHECKPOINT *apsSyncCheckpoint) +{ + IMG_UINT32 ui32SyncCheckpointIndex; + + for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex)", __func__, (void*)psList)); +#endif + if (psList) + { +#if (SYNC_ADDR_LIST_DEBUG == 1) + PVR_DPF((PVR_DBG_ERROR, "%s: psList->ui32NumSyncs=%d", __func__, psList->ui32NumSyncs)); +#endif + for (ui32SyncIndex=0; ui32SyncIndexui32NumSyncs; ui32SyncIndex++) + { + if (psList->pasFWAddrs[ui32SyncIndex].ui32Addr & 0x1) + { + SyncCheckpointRollbackFromUFO(psDevNode, psList->pasFWAddrs[ui32SyncIndex].ui32Addr); + } + } + } + return eError; +} + +PVRSRV_ERROR +PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + SYNC_RECORD_HANDLE *phRecord, + SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock, + IMG_UINT32 ui32FwBlockAddr, + IMG_UINT32 ui32SyncOffset, + IMG_BOOL bServerSync, + IMG_UINT32 ui32ClassNameSize, + const IMG_CHAR *pszClassName) +{ + struct SYNC_RECORD * psSyncRec; + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_UNREFERENCED_PARAMETER(psConnection); + + RGXSRV_HWPERF_ALLOC(psDevNode, SYNC, + ui32FwBlockAddr + ui32SyncOffset, + pszClassName, + ui32ClassNameSize); + + PVR_RETURN_IF_INVALID_PARAM(phRecord); + + *phRecord = NULL; + + psSyncRec = OSAllocMem(sizeof(*psSyncRec)); + PVR_GOTO_IF_NOMEM(psSyncRec, eError, fail_alloc); + + psSyncRec->psDevNode = psDevNode; + psSyncRec->psServerSyncPrimBlock = hServerSyncPrimBlock; + psSyncRec->ui32SyncOffset = ui32SyncOffset; + psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr; + psSyncRec->ui64OSTime = OSClockns64(); + psSyncRec->uiPID = OSGetCurrentProcessID(); + psSyncRec->eRecordType = bServerSync? SYNC_RECORD_TYPE_SERVER: SYNC_RECORD_TYPE_CLIENT; + + if (pszClassName) + { + if (ui32ClassNameSize >= PVRSRV_SYNC_NAME_LENGTH) + ui32ClassNameSize = PVRSRV_SYNC_NAME_LENGTH; + /* Copy over the class name annotation */ + OSStringLCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize); + } + else + { + /* No class name annotation */ + psSyncRec->szClassName[0] = 0; + } + + OSLockAcquire(psDevNode->hSyncServerRecordLock); + if (psDevNode->ui32SyncServerRecordCount < SYNC_RECORD_LIMIT) + { + dllist_add_to_head(&psDevNode->sSyncServerRecordList, &psSyncRec->sNode); + psDevNode->ui32SyncServerRecordCount++; + + if (psDevNode->ui32SyncServerRecordCount > psDevNode->ui32SyncServerRecordCountHighWatermark) + { + psDevNode->ui32SyncServerRecordCountHighWatermark = psDevNode->ui32SyncServerRecordCount; + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync record \"%s\". %u records already exist.", + __func__, + pszClassName, + psDevNode->ui32SyncServerRecordCount)); + OSFreeMem(psSyncRec); + psSyncRec = NULL; + eError = PVRSRV_ERROR_TOOMANYBUFFERS; + } + OSLockRelease(psDevNode->hSyncServerRecordLock); + + *phRecord = (SYNC_RECORD_HANDLE)psSyncRec; + +fail_alloc: + return eError; +} + +PVRSRV_ERROR +PVRSRVSyncRecordRemoveByHandleKM( + SYNC_RECORD_HANDLE hRecord) +{ + struct SYNC_RECORD **ppFreedSync; + struct SYNC_RECORD *pSync = (struct SYNC_RECORD*)hRecord; + PVRSRV_DEVICE_NODE *psDevNode; + + PVR_RETURN_IF_INVALID_PARAM(hRecord); + + psDevNode = pSync->psDevNode; + + OSLockAcquire(psDevNode->hSyncServerRecordLock); + + RGXSRV_HWPERF_FREE(psDevNode, SYNC, pSync->ui32FwBlockAddr + pSync->ui32SyncOffset); + + dllist_remove_node(&pSync->sNode); + + if (psDevNode->uiSyncServerRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN) + { + PVR_DPF((PVR_DBG_ERROR, "%s: freed sync record index out of range", + __func__)); + psDevNode->uiSyncServerRecordFreeIdx = 0; + } + ppFreedSync = &psDevNode->apsSyncServerRecordsFreed[psDevNode->uiSyncServerRecordFreeIdx]; + psDevNode->uiSyncServerRecordFreeIdx = + (psDevNode->uiSyncServerRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; + + if (*ppFreedSync) + { + OSFreeMem(*ppFreedSync); + } + pSync->psServerSyncPrimBlock = NULL; + pSync->ui64OSTime = OSClockns64(); + *ppFreedSync = pSync; + + psDevNode->ui32SyncServerRecordCount--; + + OSLockRelease(psDevNode->hSyncServerRecordLock); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PVRSRVSyncAllocEventKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_BOOL bServerSync, + IMG_UINT32 ui32FWAddr, + IMG_UINT32 ui32ClassNameSize, + const IMG_CHAR *pszClassName) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + RGXSRV_HWPERF_ALLOC(psDevNode, SYNC, ui32FWAddr, pszClassName, ui32ClassNameSize); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PVRSRVSyncFreeEventKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32FWAddr) +{ + PVR_UNREFERENCED_PARAMETER(psConnection); + RGXSRV_HWPERF_FREE(psDevNode, SYNC, ui32FWAddr); + + return PVRSRV_OK; +} + +static +void _SyncConnectionRef(SYNC_CONNECTION_DATA *psSyncConnectionData) +{ + IMG_INT iRefCount = OSAtomicIncrement(&psSyncConnectionData->sRefCount); + + SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d", + __func__, psSyncConnectionData, iRefCount); + PVR_UNREFERENCED_PARAMETER(iRefCount); +} + +static +void _SyncConnectionUnref(SYNC_CONNECTION_DATA *psSyncConnectionData) +{ + IMG_INT iRefCount = OSAtomicDecrement(&psSyncConnectionData->sRefCount); + if (iRefCount == 0) + { + SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d", + __func__, psSyncConnectionData, iRefCount); + + PVR_ASSERT(dllist_is_empty(&psSyncConnectionData->sListHead)); + OSLockDestroy(psSyncConnectionData->hLock); + OSFreeMem(psSyncConnectionData); + } + else + { + SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d", + __func__, psSyncConnectionData, iRefCount); + PVR_ASSERT(iRefCount > 0); + } +} + +static +void _SyncConnectionAddBlock(CONNECTION_DATA *psConnection, SYNC_PRIMITIVE_BLOCK *psBlock) +{ + if (psConnection) + { + SYNC_CONNECTION_DATA *psSyncConnectionData = psConnection->psSyncConnectionData; + + /* + Make sure the connection doesn't go away. It doesn't matter that we will release + the lock between as the refcount and list don't have to be atomic w.r.t. to each other + */ + _SyncConnectionRef(psSyncConnectionData); + + OSLockAcquire(psSyncConnectionData->hLock); + if (psConnection != NULL) + { + dllist_add_to_head(&psSyncConnectionData->sListHead, &psBlock->sConnectionNode); + } + OSLockRelease(psSyncConnectionData->hLock); + psBlock->psSyncConnectionData = psSyncConnectionData; + } + else + { + psBlock->psSyncConnectionData = NULL; + } +} + +static +void _SyncConnectionRemoveBlock(SYNC_PRIMITIVE_BLOCK *psBlock) +{ + SYNC_CONNECTION_DATA *psSyncConnectionData = psBlock->psSyncConnectionData; + + if (psBlock->psSyncConnectionData) + { + OSLockAcquire(psSyncConnectionData->hLock); + dllist_remove_node(&psBlock->sConnectionNode); + OSLockRelease(psSyncConnectionData->hLock); + + _SyncConnectionUnref(psBlock->psSyncConnectionData); + } +} + +static inline +void _DoPrimBlockFree(SYNC_PRIMITIVE_BLOCK *psSyncBlk) +{ + PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode; + + SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d (remove)", + __func__, psSyncBlk, OSAtomicRead(&psSyncBlk->sRefCount)); + + PVR_ASSERT(OSAtomicRead(&psSyncBlk->sRefCount) == 1); + + _SyncConnectionRemoveBlock(psSyncBlk); + DevmemReleaseCpuVirtAddr(psSyncBlk->psMemDesc); + psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->psMemDesc); + OSFreeMem(psSyncBlk); +} + +PVRSRV_ERROR +PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDevNode, + SYNC_PRIMITIVE_BLOCK **ppsSyncBlk, + IMG_UINT32 *puiSyncPrimVAddr, + IMG_UINT32 *puiSyncPrimBlockSize, + PMR **ppsSyncPMR) +{ + SYNC_PRIMITIVE_BLOCK *psNewSyncBlk; + PVRSRV_ERROR eError; + + psNewSyncBlk = OSAllocMem(sizeof(SYNC_PRIMITIVE_BLOCK)); + PVR_GOTO_IF_NOMEM(psNewSyncBlk, eError, e0); + + psNewSyncBlk->psDevNode = psDevNode; + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Allocate UFO block"); + + eError = psDevNode->pfnAllocUFOBlock(psDevNode, + &psNewSyncBlk->psMemDesc, + &psNewSyncBlk->uiFWAddr.ui32Addr, + &psNewSyncBlk->ui32BlockSize); + PVR_GOTO_IF_ERROR(eError, e1); + + *puiSyncPrimVAddr = psNewSyncBlk->uiFWAddr.ui32Addr; + + eError = DevmemAcquireCpuVirtAddr(psNewSyncBlk->psMemDesc, + (void **) &psNewSyncBlk->pui32LinAddr); + PVR_GOTO_IF_ERROR(eError, e2); + + eError = DevmemLocalGetImportHandle(psNewSyncBlk->psMemDesc, (void **) ppsSyncPMR); + + PVR_GOTO_IF_ERROR(eError, e3); + + OSAtomicWrite(&psNewSyncBlk->sRefCount, 1); + + /* If there is a connection pointer then add the new block onto it's list */ + _SyncConnectionAddBlock(psConnection, psNewSyncBlk); + + *ppsSyncBlk = psNewSyncBlk; + *puiSyncPrimBlockSize = psNewSyncBlk->ui32BlockSize; + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Allocated UFO block (FirmwareVAddr = 0x%08x)", + *puiSyncPrimVAddr); + + return PVRSRV_OK; + +e3: + DevmemReleaseCpuVirtAddr(psNewSyncBlk->psMemDesc); +e2: + psDevNode->pfnFreeUFOBlock(psDevNode, psNewSyncBlk->psMemDesc); +e1: + OSFreeMem(psNewSyncBlk); +e0: + return eError; +} + +PVRSRV_ERROR +PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk) +{ + + /* This function is an alternative to the above without reference counting. + * With the removal of sync prim ops for server syncs we no longer have to + * reference count prim blocks as the reference will never be incremented / + * decremented by a prim op */ + _DoPrimBlockFree(psSyncBlk); + return PVRSRV_OK; +} + +static INLINE IMG_BOOL _CheckSyncIndex(SYNC_PRIMITIVE_BLOCK *psSyncBlk, + IMG_UINT32 ui32Index) +{ + return ((ui32Index * sizeof(IMG_UINT32)) < psSyncBlk->ui32BlockSize); +} + +PVRSRV_ERROR +PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index, + IMG_UINT32 ui32Value) +{ + if (_CheckSyncIndex(psSyncBlk, ui32Index)) + { + psSyncBlk->pui32LinAddr[ui32Index] = ui32Value; + return PVRSRV_OK; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncPrimSetKM: Index %u out of range for " + "0x%08X byte sync block (value 0x%08X)", + ui32Index, + psSyncBlk->ui32BlockSize, + ui32Value)); + return PVRSRV_ERROR_INVALID_PARAMS; + } +} + +#if defined(PDUMP) +PVRSRV_ERROR +PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value) +{ + /* + We might be ask to PDump sync state outside of capture range + (e.g. texture uploads) so make this continuous. + */ + DevmemPDumpLoadMemValue32(psSyncBlk->psMemDesc, + ui32Offset, + ui32Value, + PDUMP_FLAGS_CONTINUOUS); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset) +{ + /* + We might be ask to PDump sync state outside of capture range + (e.g. texture uploads) so make this continuous. + */ + DevmemPDumpLoadMem(psSyncBlk->psMemDesc, + ui32Offset, + sizeof(IMG_UINT32), + PDUMP_FLAGS_CONTINUOUS); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T ui32PDumpFlags) +{ + DevmemPDumpDevmemPol32(psSyncBlk->psMemDesc, + ui32Offset, + ui32Value, + ui32Mask, + eOperator, + ui32PDumpFlags); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset, + IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize, + IMG_UINT64 uiBufferSize) +{ + DevmemPDumpCBP(psSyncBlk->psMemDesc, + ui32Offset, + uiWriteOffset, + uiPacketSize, + uiBufferSize); + return PVRSRV_OK; +} +#endif + +/* SyncRegisterConnection */ +PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData) +{ + SYNC_CONNECTION_DATA *psSyncConnectionData; + PVRSRV_ERROR eError; + + psSyncConnectionData = OSAllocMem(sizeof(SYNC_CONNECTION_DATA)); + if (psSyncConnectionData == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto fail_alloc; + } + + eError = OSLockCreate(&psSyncConnectionData->hLock); + PVR_GOTO_IF_ERROR(eError, fail_lockcreate); + dllist_init(&psSyncConnectionData->sListHead); + OSAtomicWrite(&psSyncConnectionData->sRefCount, 1); + + *ppsSyncConnectionData = psSyncConnectionData; + return PVRSRV_OK; + +fail_lockcreate: + OSFreeMem(psSyncConnectionData); +fail_alloc: + PVR_ASSERT(eError != PVRSRV_OK); + return eError; +} + +/* SyncUnregisterConnection */ +void SyncUnregisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData) +{ + _SyncConnectionUnref(psSyncConnectionData); +} + +void SyncConnectionPDumpSyncBlocks(void *hSyncPrivData, PDUMP_TRANSITION_EVENT eEvent) +{ + if ((eEvent == PDUMP_TRANSITION_EVENT_RANGE_ENTERED) || (eEvent == PDUMP_TRANSITION_EVENT_BLOCK_STARTED)) + { + SYNC_CONNECTION_DATA *psSyncConnectionData = hSyncPrivData; + DLLIST_NODE *psNode, *psNext; + + OSLockAcquire(psSyncConnectionData->hLock); + + PDUMPCOMMENT("Dump client Sync Prim state"); + dllist_foreach_node(&psSyncConnectionData->sListHead, psNode, psNext) + { + SYNC_PRIMITIVE_BLOCK *psSyncBlock = + IMG_CONTAINER_OF(psNode, SYNC_PRIMITIVE_BLOCK, sConnectionNode); + + DevmemPDumpLoadMem(psSyncBlock->psMemDesc, + 0, + psSyncBlock->ui32BlockSize, + PDUMP_FLAGS_CONTINUOUS); + } + + OSLockRelease(psSyncConnectionData->hLock); + } +} + +void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr, + IMG_CHAR * pszSyncInfo, size_t len) +{ + DLLIST_NODE *psNode, *psNext; + IMG_INT iEnd; + IMG_BOOL bFound = IMG_FALSE; + + if (!pszSyncInfo) + { + return; + } + + OSLockAcquire(psDevNode->hSyncServerRecordLock); + pszSyncInfo[0] = '\0'; + + dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext) + { + struct SYNC_RECORD *psSyncRec = + IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode); + if ((psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset) == ui32FwAddr + && SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType + && psSyncRec->psServerSyncPrimBlock + && psSyncRec->psServerSyncPrimBlock->pui32LinAddr + ) + { + IMG_UINT32 *pui32SyncAddr; + pui32SyncAddr = psSyncRec->psServerSyncPrimBlock->pui32LinAddr + + (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32)); + iEnd = OSSNPrintf(pszSyncInfo, len, "Cur=0x%08x %s:%05u (%s)", + *pui32SyncAddr, + ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"), + psSyncRec->uiPID, + psSyncRec->szClassName + ); + if (iEnd >= 0 && iEnd < len) + { + pszSyncInfo[iEnd] = '\0'; + } + bFound = IMG_TRUE; + break; + } + } + + OSLockRelease(psDevNode->hSyncServerRecordLock); + + if (!bFound && (psDevNode->ui32SyncServerRecordCountHighWatermark == SYNC_RECORD_LIMIT)) + { + OSSNPrintf(pszSyncInfo, len, "(Record may be lost)"); + } +} + +#define NS_IN_S (1000000000UL) +static void _SyncRecordPrint(struct SYNC_RECORD *psSyncRec, + IMG_UINT64 ui64TimeNow, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + SYNC_PRIMITIVE_BLOCK *psSyncBlock = psSyncRec->psServerSyncPrimBlock; + + if (SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType) + { + IMG_UINT64 ui64DeltaS; + IMG_UINT32 ui32DeltaF; + IMG_UINT64 ui64Delta = ui64TimeNow - psSyncRec->ui64OSTime; + ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF); + + if (psSyncBlock && psSyncBlock->pui32LinAddr) + { + IMG_UINT32 *pui32SyncAddr; + pui32SyncAddr = psSyncBlock->pui32LinAddr + + (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32)); + + PVR_DUMPDEBUG_LOG("\t%s %05u %05" IMG_UINT64_FMTSPEC ".%09u FWAddr=0x%08x Val=0x%08x (%s)", + ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"), + psSyncRec->uiPID, + ui64DeltaS, ui32DeltaF, + (psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset), + *pui32SyncAddr, + psSyncRec->szClassName + ); + } + else + { + PVR_DUMPDEBUG_LOG("\t%s %05u %05" IMG_UINT64_FMTSPEC ".%09u FWAddr=0x%08x Val= (%s)", + ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"), + psSyncRec->uiPID, + ui64DeltaS, ui32DeltaF, + (psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset), + psSyncRec->szClassName + ); + } + } +} + +static void _SyncRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, + IMG_UINT32 ui32VerbLevel, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle; + IMG_UINT64 ui64TimeNowS; + IMG_UINT32 ui32TimeNowF; + IMG_UINT64 ui64TimeNow = OSClockns64(); + DLLIST_NODE *psNode, *psNext; + + ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF); + + if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) + { + IMG_UINT32 i; + OSLockAcquire(psDevNode->hSyncServerRecordLock); + + PVR_DUMPDEBUG_LOG("Dumping all allocated syncs. Allocated: %u High watermark: %u @ %05" IMG_UINT64_FMTSPEC ".%09u", + psDevNode->ui32SyncServerRecordCount, + psDevNode->ui32SyncServerRecordCountHighWatermark, + ui64TimeNowS, + ui32TimeNowF); + if (psDevNode->ui32SyncServerRecordCountHighWatermark == SYNC_RECORD_LIMIT) + { + PVR_DUMPDEBUG_LOG("Warning: Record limit (%u) was reached. Some sync checkpoints may not have been recorded in the debug information.", + SYNC_RECORD_LIMIT); + } + + PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)", + "Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation"); + + dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext) + { + struct SYNC_RECORD *psSyncRec = + IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode); + _SyncRecordPrint(psSyncRec, ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile); + } + + PVR_DUMPDEBUG_LOG("Dumping all recently freed syncs @ %05" IMG_UINT64_FMTSPEC ".%09u", + ui64TimeNowS, ui32TimeNowF); + PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)", + "Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation"); + for (i = DECREMENT_WITH_WRAP(psDevNode->uiSyncServerRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN); + i != psDevNode->uiSyncServerRecordFreeIdx; + i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)) + { + if (psDevNode->apsSyncServerRecordsFreed[i]) + { + _SyncRecordPrint(psDevNode->apsSyncServerRecordsFreed[i], + ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile); + } + else + { + break; + } + } + + OSLockRelease(psDevNode->hSyncServerRecordLock); + } +} +#undef NS_IN_S + +static PVRSRV_ERROR SyncRecordListInit(PVRSRV_DEVICE_NODE *psDevNode) +{ + PVRSRV_ERROR eError; + + psDevNode->ui32SyncServerRecordCount = 0; + psDevNode->ui32SyncServerRecordCountHighWatermark = 0; + + eError = OSLockCreate(&psDevNode->hSyncServerRecordLock); + PVR_GOTO_IF_ERROR(eError, fail_lock_create); + dllist_init(&psDevNode->sSyncServerRecordList); + + eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncServerRecordNotify, + psDevNode, + _SyncRecordRequest, + DEBUG_REQUEST_SYNCTRACKING, + psDevNode); + + PVR_GOTO_IF_ERROR(eError, fail_dbg_register); + + return PVRSRV_OK; + +fail_dbg_register: + OSLockDestroy(psDevNode->hSyncServerRecordLock); +fail_lock_create: + return eError; +} + +static void SyncRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode) +{ + DLLIST_NODE *psNode, *psNext; + int i; + + OSLockAcquire(psDevNode->hSyncServerRecordLock); + dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext) + { + struct SYNC_RECORD *pSyncRec = + IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode); + + dllist_remove_node(psNode); + OSFreeMem(pSyncRec); + } + + for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++) + { + if (psDevNode->apsSyncServerRecordsFreed[i]) + { + OSFreeMem(psDevNode->apsSyncServerRecordsFreed[i]); + psDevNode->apsSyncServerRecordsFreed[i] = NULL; + } + } + OSLockRelease(psDevNode->hSyncServerRecordLock); + + if (psDevNode->hSyncServerRecordNotify) + { + PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncServerRecordNotify); + } + OSLockDestroy(psDevNode->hSyncServerRecordLock); +} + +PVRSRV_ERROR SyncServerInit(PVRSRV_DEVICE_NODE *psDevNode) +{ + PVRSRV_ERROR eError; + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + eError = SyncRecordListInit(psDevNode); + PVR_GOTO_IF_ERROR(eError, fail_record_list); + } + + return PVRSRV_OK; + +fail_record_list: + return eError; +} + +void SyncServerDeinit(PVRSRV_DEVICE_NODE *psDevNode) +{ + + if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) + { + SyncRecordListDeinit(psDevNode); + } +} diff --git a/drivers/gpu/drm/phytium/octopus/sync_server.h b/drivers/gpu/drm/phytium/octopus/sync_server.h new file mode 100644 index 000000000000..335b012419bf --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/sync_server.h @@ -0,0 +1,266 @@ +/**************************************************************************/ /*! +@File +@Title Server side synchronisation interface +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Describes the server side synchronisation functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv.h" +#include "device.h" +#include "devicemem.h" +#include "pdump.h" +#include "pvrsrv_error.h" +#include "connection_server.h" +#include "pdump_km.h" + +#ifndef SYNC_SERVER_H +#define SYNC_SERVER_H + +typedef struct _SYNC_PRIMITIVE_BLOCK_ SYNC_PRIMITIVE_BLOCK; +typedef struct _SYNC_CONNECTION_DATA_ SYNC_CONNECTION_DATA; +typedef struct SYNC_RECORD* SYNC_RECORD_HANDLE; + +typedef struct _SYNC_ADDR_LIST_ +{ + IMG_UINT32 ui32NumSyncs; + PRGXFWIF_UFO_ADDR *pasFWAddrs; +} SYNC_ADDR_LIST; + +PVRSRV_ERROR +SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock, + IMG_UINT32 ui32Offset, + PRGXFWIF_UFO_ADDR *psAddrOut); + +void +SyncAddrListInit(SYNC_ADDR_LIST *psList); + +void +SyncAddrListDeinit(SYNC_ADDR_LIST *psList); + +PVRSRV_ERROR +SyncAddrListPopulate(SYNC_ADDR_LIST *psList, + IMG_UINT32 ui32NumSyncs, + SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock, + IMG_UINT32 *paui32SyncOffset); + +PVRSRV_ERROR +SyncAddrListAppendSyncPrim(SYNC_ADDR_LIST *psList, + PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim); +PVRSRV_ERROR +SyncAddrListAppendCheckpoints(SYNC_ADDR_LIST *psList, + IMG_UINT32 ui32NumCheckpoints, + PSYNC_CHECKPOINT *apsSyncCheckpoint); + +PVRSRV_ERROR +SyncAddrListAppendAndDeRefCheckpoints(SYNC_ADDR_LIST *psList, + IMG_UINT32 ui32NumCheckpoints, + PSYNC_CHECKPOINT *apsSyncCheckpoint); + +void +SyncAddrListDeRefCheckpoints(IMG_UINT32 ui32NumCheckpoints, + PSYNC_CHECKPOINT *apsSyncCheckpoint); + +PVRSRV_ERROR +SyncAddrListRollbackCheckpoints(PVRSRV_DEVICE_NODE *psDevNode, SYNC_ADDR_LIST *psList); + +PVRSRV_ERROR +PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE * psDevNode, + SYNC_PRIMITIVE_BLOCK **ppsSyncBlk, + IMG_UINT32 *puiSyncPrimVAddr, + IMG_UINT32 *puiSyncPrimBlockSize, + PMR **ppsSyncPMR); + +PVRSRV_ERROR +PVRSRVExportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, + DEVMEM_EXPORTCOOKIE **psExportCookie); + +PVRSRV_ERROR +PVRSRVUnexportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk); + +PVRSRV_ERROR +PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *ppsSyncBlk); + +PVRSRV_ERROR +PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index, + IMG_UINT32 ui32Value); + +PVRSRV_ERROR +PVRSRVSyncAllocEventKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_BOOL bServerSync, + IMG_UINT32 ui32FWAddr, + IMG_UINT32 ui32ClassNameSize, + const IMG_CHAR *pszClassName); + +PVRSRV_ERROR +PVRSRVSyncFreeEventKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + IMG_UINT32 ui32FWAddr); + +PVRSRV_ERROR +PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection, + PVRSRV_DEVICE_NODE *psDevNode, + SYNC_RECORD_HANDLE *phRecord, + SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock, + IMG_UINT32 ui32FwBlockAddr, + IMG_UINT32 ui32SyncOffset, + IMG_BOOL bServerSync, + IMG_UINT32 ui32ClassNameSize, + const IMG_CHAR *pszClassName); + +PVRSRV_ERROR +PVRSRVSyncRecordRemoveByHandleKM( + SYNC_RECORD_HANDLE hRecord); +void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr, + IMG_CHAR * pszSyncInfo, size_t len); + +void ServerSyncDumpPending(void); + +PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData); +void SyncUnregisterConnection(SYNC_CONNECTION_DATA *ppsSyncConnectionData); +void SyncConnectionPDumpSyncBlocks(void *hSyncPrivData, PDUMP_TRANSITION_EVENT eEvent); + +/*! +****************************************************************************** +@Function SyncServerInit + +@Description Per-device initialisation for the ServerSync module +******************************************************************************/ +PVRSRV_ERROR SyncServerInit(PVRSRV_DEVICE_NODE *psDevNode); +void SyncServerDeinit(PVRSRV_DEVICE_NODE *psDevNode); + + +/*! +****************************************************************************** +@Function PVRSRVLockServerSync + +@Description Acquire a global lock to maintain server sync consistency +******************************************************************************/ +void PVRSRVLockServerSync(void); +/*! +****************************************************************************** +@Function PVRSRVUnlockServerSync + +@Description Release the global server sync lock +******************************************************************************/ +void PVRSRVUnlockServerSync(void); + +#if defined(PDUMP) +PVRSRV_ERROR +PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset); + +PVRSRV_ERROR +PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value); + +PVRSRV_ERROR +PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiDumpFlags); + +PVRSRV_ERROR +PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset, + IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize, + IMG_UINT64 uiBufferSize); + +#else /* PDUMP */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVSyncPrimPDumpKM) +#endif +static INLINE PVRSRV_ERROR +PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset) +{ + PVR_UNREFERENCED_PARAMETER(psSyncBlk); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVSyncPrimPDumpValueKM) +#endif +static INLINE PVRSRV_ERROR +PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value) +{ + PVR_UNREFERENCED_PARAMETER(psSyncBlk); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVSyncPrimPDumpPolKM) +#endif +static INLINE PVRSRV_ERROR +PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + PDUMP_FLAGS_T uiDumpFlags) +{ + PVR_UNREFERENCED_PARAMETER(psSyncBlk); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + PVR_UNREFERENCED_PARAMETER(ui32Value); + PVR_UNREFERENCED_PARAMETER(ui32Mask); + PVR_UNREFERENCED_PARAMETER(eOperator); + PVR_UNREFERENCED_PARAMETER(uiDumpFlags); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVSyncPrimPDumpCBPKM) +#endif +static INLINE PVRSRV_ERROR +PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset, + IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize, + IMG_UINT64 uiBufferSize) +{ + PVR_UNREFERENCED_PARAMETER(psSyncBlk); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + PVR_UNREFERENCED_PARAMETER(uiWriteOffset); + PVR_UNREFERENCED_PARAMETER(uiPacketSize); + PVR_UNREFERENCED_PARAMETER(uiBufferSize); + return PVRSRV_OK; +} +#endif /* PDUMP */ +#endif /*SYNC_SERVER_H */ diff --git a/drivers/gpu/drm/phytium/octopus/syscommon.h b/drivers/gpu/drm/phytium/octopus/syscommon.h new file mode 100644 index 000000000000..9c5d4eacc8b6 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/syscommon.h @@ -0,0 +1,146 @@ +/**************************************************************************/ /*! +@File +@Title Common System APIs and structures +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description This header provides common system-specific declarations and + macros that are supported by all systems +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /***************************************************************************/ + +#if !defined(SYSCOMMON_H) +#define SYSCOMMON_H + +#include "img_types.h" +#include "pvr_notifier.h" +#include "pvrsrv_device.h" +#include "pvrsrv_error.h" + +/*************************************************************************/ /*! +@Description Pointer to a Low-level Interrupt Service Routine (LISR). +@Input pvData Private data provided to the LISR. +@Return True if interrupt handled, false otherwise. +*/ /**************************************************************************/ +typedef IMG_BOOL (*PFN_LISR)(void *pvData); + +/**************************************************************************/ /*! +@Function SysDevInit +@Description System specific device initialisation function. +@Input pvOSDevice pointer to the OS device reference +@Input ppsDevConfig returned device configuration info +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /***************************************************************************/ +PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig); + +/**************************************************************************/ /*! +@Function SysDevDeInit +@Description System specific device deinitialisation function. +@Input psDevConfig device configuration info of the device to be + deinitialised +@Return None. +*/ /***************************************************************************/ +void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig); + +/**************************************************************************/ /*! +@Function SysDebugInfo +@Description Dump system specific device debug information. +@Input psDevConfig pointer to device configuration info +@Input pfnDumpDebugPrintf the 'printf' function to be called to + display the debug info +@Input pvDumpDebugFile optional file identifier to be passed to + the 'printf' function if required +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /***************************************************************************/ +PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile); + +/**************************************************************************/ /*! +@Function SysInstallDeviceLISR +@Description Installs the system Low-level Interrupt Service Routine (LISR) + which handles low-level processing of interrupts from the device + (GPU). + The LISR will be invoked when the device raises an interrupt. An + LISR may not be descheduled, so code which needs to do so should + be placed in an MISR. + The installed LISR will schedule any MISRs once it has completed + its interrupt processing, by calling OSScheduleMISR(). +@Input hSysData pointer to the system data of the device +@Input ui32IRQ the IRQ on which the LISR is to be installed +@Input pszName name of the module installing the LISR +@Input pfnLISR pointer to the function to be installed as the + LISR +@Input pvData private data provided to the LISR +@Output phLISRData handle to the installed LISR (to be used for a + subsequent uninstall) +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /***************************************************************************/ +PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszName, + PFN_LISR pfnLISR, + void *pvData, + IMG_HANDLE *phLISRData); + +/**************************************************************************/ /*! +@Function SysUninstallDeviceLISR +@Description Uninstalls the system Low-level Interrupt Service Routine (LISR) + which handles low-level processing of interrupts from the device + (GPU). +@Input hLISRData handle of the LISR to be uninstalled +@Return PVRSRV_OK on success, a failure code otherwise. +*/ /***************************************************************************/ +PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData); + +/**************************************************************************/ /*! +@Function SysRGXErrorNotify +@Description Error reporting callback function, registered as the + pfnSysDevErrorNotify member of the PVRSRV_DEVICE_CONFIG + struct. System layer will be notified of device errors and + resets via this callback. + NB. implementers should ensure that the minimal amount of + work is done in this callback function, as it will be + executed in the main RGX MISR. (e.g. any blocking or lengthy + work should be performed by a worker queue/thread instead). +@Input hSysData pointer to the system data of the device +@Output psErrorData structure containing details of the reported error +@Return None. +*/ /***************************************************************************/ +void SysRGXErrorNotify(IMG_HANDLE hSysData, + PVRSRV_ROBUSTNESS_NOTIFY_DATA *psErrorData); + +#endif /* !defined(SYSCOMMON_H) */ diff --git a/drivers/gpu/drm/phytium/octopus/sysconfig.c b/drivers/gpu/drm/phytium/octopus/sysconfig.c new file mode 100644 index 000000000000..c4804db7b253 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/sysconfig.c @@ -0,0 +1,976 @@ +/*************************************************************************/ /*! +@File + +*/ /**************************************************************************/ + +#include +#include +#include + +#include "pvrsrv.h" +#include "img_defs.h" +#include "pvr_debug.h" +#include "osfunc.h" +#include "allocmem.h" +#include "pvrsrv_device.h" +#include "pvrsrv_memallocflags.h" +#include "syscommon.h" +#include "power.h" +#include "sysinfo.h" +#include "sysconfig.h" +#include "physheap.h" +#include "pci_support.h" +#include "interrupt_support.h" +#include "di_server.h" + +#define HOST_PCI_INIT_FLAGS 0 +static PHYS_HEAP_FUNCTIONS gsLocalPhysHeapFuncs = +{ + FtCpuPAddrToDevPAddr, + FtDevPAddrToCpuPAddr, +}; + +PVRSRV_ERROR FtGetRegisters(SYS_DATA *psDevData, IMG_UINT32 ui32BaseNum, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Size, IMG_UINT64 *pvCpuPAddr) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_CPU_PHYADDR sCpuPBase; + + sCpuPBase.uiAddr = OSPCIAddrRangeStart(psDevData->hRGXPCI, ui32BaseNum) + ui32Offset; + + eError = OSPCIRequestAddrRegion(psDevData->hRGXPCI, ui32BaseNum, ui32Offset, ui32Size); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Failed to request the system address region(BAR %d)", + __func__, ui32BaseNum)); + goto ErrorReturn; + } + PVR_LOG(("-----gpu reg physical addr is %llx",sCpuPBase.uiAddr)); + + *pvCpuPAddr = sCpuPBase.uiAddr; + + return eError; + +ErrorReturn: + return eError; +} + +void FtReleaseRegisters(SYS_DATA * psDevData, IMG_UINT64 PBase, IMG_UINT32 ui32BaseNum, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Size) +{ + OSPCIReleaseAddrRegion(psDevData->hRGXPCI, ui32BaseNum, ui32Offset, ui32Size); +} + +/* + * Acquire memory-mapped base address of GPU vram by querying PCI BAR + */ +static PVRSRV_ERROR AcquireLocalMappableMemory(IMG_HANDLE hPCI, + IMG_PUINT64 pui64MemCpuPAddr, + IMG_PUINT64 pui64MemSize) +{ + IMG_UINT16 uiVendorID, uiDevID; + IMG_UINT64 uiBarSize; + + PVRSRV_ERROR eError; + + OSPCIGetVendorDeviceIDs(hPCI, &uiVendorID, &uiDevID); + if (uiDevID != SYS_RGX_DEV_DEVICE_ID) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Unexpected device ID 0x%X", + __func__, uiDevID)); + + return PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND; + } + + uiBarSize = OSPCIAddrRangeLen(hPCI, SYS_DEV_MEM_PCI_BASENUM); + + /* Reserve the address region for whole BAR */ + eError = OSPCIRequestAddrRegion(hPCI, SYS_DEV_MEM_PCI_BASENUM, 0, uiBarSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Device memory region not available", + __func__)); + + return eError; + } + + *pui64MemCpuPAddr = OSPCIAddrRangeStart(hPCI, SYS_DEV_MEM_PCI_BASENUM); + *pui64MemSize = uiBarSize; + + return PVRSRV_OK; +} + +static INLINE void ReleaseLocalMappableMemory(IMG_HANDLE hPCI, + IMG_UINT64 ui64MemCpuPAddr, + IMG_UINT64 ui64MemSize) +{ + PVR_UNREFERENCED_PARAMETER(ui64MemCpuPAddr); + OSPCIReleaseAddrRegion(hPCI, SYS_DEV_MEM_PCI_BASENUM, 0, ui64MemSize); + OSPCIReleaseResourceMTRRs(hPCI, SYS_DEV_MEM_PCI_BASENUM); +} + +static PVRSRV_ERROR PhysHeapsCreate(SYS_DATA *psDevData, + PVRSRV_DEVICE_CONFIG *psDevConfig, + PHYS_HEAP_CONFIG **ppasPhysHeapsOut, + IMG_UINT32 *puiPhysHeapCountOut) +{ + PHYS_HEAP_CONFIG *pasPhysHeaps; + IMG_UINT32 ui32NextHeapID = 0; + +#if 1 /* use VRAM */ + IMG_UINT32 uiHeapCount = 2 ;//Sys+LMA heap + + + IMG_UINT64 ui64MappedMemSize =psDevData->ui64MappedMemSize ; + IMG_UINT64 ui64MappedMemCpuPAddr =psDevData->ui64MappedMemCpuPAddr; + IMG_UINT64 ui64MappedMemDevPAddr= ui64MappedMemCpuPAddr; + + pasPhysHeaps = OSAllocZMem(sizeof(*pasPhysHeaps) * uiHeapCount); + if (!pasPhysHeaps) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /*-------configure GPU local vram heap --------*/ + pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_LMA; + pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "LMA"; + pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsLocalPhysHeapFuncs; + pasPhysHeaps[ui32NextHeapID].ui32UsageFlags = PHYS_HEAP_USAGE_GPU_LOCAL; + + pasPhysHeaps[ui32NextHeapID].sStartAddr.uiAddr = ui64MappedMemCpuPAddr; + pasPhysHeaps[ui32NextHeapID].sCardBase.uiAddr = ui64MappedMemDevPAddr; + pasPhysHeaps[ui32NextHeapID].uiSize = ui64MappedMemSize; + ui32NextHeapID++; + PVR_LOG(("-----Set GPU Local heap @%llx------",ui64MappedMemCpuPAddr)); + + /*----------configure host memory heap--------------*/ + pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "SYSTEM"; + pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsLocalPhysHeapFuncs; + pasPhysHeaps[ui32NextHeapID].ui32UsageFlags = PHYS_HEAP_USAGE_CPU_LOCAL; + +#else /* use System RAM */ + IMG_UINT32 uiHeapCount = 1; + + pasPhysHeaps = OSAllocZMem(sizeof(*pasPhysHeaps) * uiHeapCount); + if (!pasPhysHeaps) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_UMA; + pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "SYSTEM"; + pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsLocalPhysHeapFuncs; + pasPhysHeaps[ui32NextHeapID].ui32UsageFlags = PHYS_HEAP_USAGE_GPU_LOCAL; +#endif + + *ppasPhysHeapsOut = pasPhysHeaps; + *puiPhysHeapCountOut = uiHeapCount; + + return PVRSRV_OK; +} + +static void PhysHeapsDestroy(PHYS_HEAP_CONFIG *pasPhysHeaps) +{ + OSFreeMem(pasPhysHeaps); +} + +static void DeviceConfigDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + if (psDevConfig->pszVersion) + { + OSFreeMem(psDevConfig->pszVersion); + } + PhysHeapsDestroy(psDevConfig->pasPhysHeaps); + OSFreeMem(psDevConfig); +} + +void __iomem *gpu_reg_base; + +/* sysfs cur_temp */ +#define FT_GPU_SE_GPU_CUR_TEMP 0x10104c +static DI_ENTRY *gpsCurTempDIEntry; +static int CurTempDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) +{ + int val = 0; + + if(!gpu_reg_base){ + PVR_LOG(("----gpu reg base error!\n")); + return -1; + } + + val = readl(gpu_reg_base + FT_GPU_SE_GPU_CUR_TEMP); + + DIPrintf(psEntry, "%d\n",val); + + return 0; +} + +#if defined(SUPPORT_LINUX_DVFS) + +#define FT_GPU_STATUS 0x101404 +#define FT_GPU_PLL_REG 0x101408 +#define FT_GPU_SE_GPU_OPP_CONFIG 0x101048 +#define OPP_CONFIG_COUNT 2 +#define LEVEL_COUNT 4 + +static const IMG_OPP asOPPTable[OPP_CONFIG_COUNT][LEVEL_COUNT] = +{ + { + { 800, 100000000}, + { 800, 200000000}, + { 800, 300000000}, + { 800, 600000000}, + }, + { + { 800, 100000000}, + { 800, 200000000}, + { 800, 300000000}, + { 800, 400000000}, + } +}; +static int current_opp_table; + +static void se_set_gpu_freq(int point) +{ + int val = 0; + + val = readl(gpu_reg_base + FT_GPU_STATUS); + val |= 0x10000; + + writel(point,gpu_reg_base + FT_GPU_PLL_REG); + writel(val,gpu_reg_base + FT_GPU_STATUS); + + return; +} + +static int wait_se_gpu_freq_change_complete(void) +{ + int val = 0; + int timeout = 100; + int res=0; + + /* poll bit[17] of GPU status&ctrl register 0x404 to tell that freq change complete + and then clear bit[17] */ + do{ + timeout--; + mdelay(1); + val = readl(gpu_reg_base + FT_GPU_STATUS); + if ((val & 0x20000)) { + break; + } + }while(timeout>0); + + if(timeout == 0){ + pr_err("---freq change timeout----\n"); + res = -1; + } + + /* clear freq change request bit [16]*/ + val=readl(gpu_reg_base + FT_GPU_STATUS); + val |= 0x30000; + writel(val, gpu_reg_base + FT_GPU_STATUS); + + return res; +} + +int query_freq_point(IMG_UINT32 freq) +{ + int i; + + for(i=0;ichan_id == *val) + return true; + else + return false; +} + +static void * FTgetDMAChan(PVRSRV_DEVICE_CONFIG *psDevConfig, char *name) +{ + struct device* dev = (struct device*) psDevConfig->pvOSDevice; + struct dma_chan *chan; + dma_cap_mask_t mask; + u32 chan_id; + + dma_cap_zero(mask); + dma_cap_set(DMA_MEMCPY | DMA_INTERRUPT | DMA_TX_TYPE_END , mask); + + if(!(strcmp("tx",name))){ + chan_id = 0; + } + else if(!(strcmp("rx",name))){ + chan_id = 1; + } + else { + dev_err(dev, "Wrong DMA channel name\n"); + return NULL; + } + + chan = dma_request_channel(mask, filter, &chan_id); + + printk("----request dma channel:%p----\n",chan); +// chan = dma_request_chan(dev, name); + if (IS_ERR(chan)) { + dev_err(dev, "request_channel failed for %s (%ld)\n", + name, PTR_ERR(chan)); + return NULL; + } + + printk("----succeed to -request dma channel:%p----\n",chan); + return chan; +} + + +static void FTFreeDMAChan(PVRSRV_DEVICE_CONFIG *psDevConfig, void *channel) +{ + struct dma_chan *chan = (struct dma_chan*) channel; + + if(chan) + dma_release_channel(chan); +} + +static void FTDevPhysAddr2DmaAddr(PVRSRV_DEVICE_CONFIG *psDevConfig, + IMG_DMA_ADDR *psDmaAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_BOOL *pbValid, + IMG_UINT32 ui32NumAddr, + IMG_BOOL bSparseAlloc) +{ + IMG_UINT32 ui32Idx; + + /* Fast path */ + if (!bSparseAlloc) + { + /* In FT X100 GPU, DMA address space is the same as host CPU */ + psDmaAddr->uiAddr = psDevPAddr->uiAddr; + } + else + { + for (ui32Idx = 0; ui32Idx < ui32NumAddr; ui32Idx++) + { + if (pbValid[ui32Idx]) + { + psDmaAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr; + } + else + { + /* Invalid DMA address marker */ + psDmaAddr[ui32Idx].uiAddr = ~((IMG_UINT64)0x0); + } + } + } +} +#endif + +#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) +/* #define _DBG(...) PVR_LOG((__VA_ARGS__)) */ +#define _DBG(...) + +static PVRSRV_ERROR PrePower(IMG_HANDLE hSysData, + PVRSRV_SYS_POWER_STATE eNewPowerState, + PVRSRV_SYS_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced, + IMG_BOOL bPreserveRam) +{ + SYS_DATA *psSysData = (SYS_DATA *) hSysData; + IMG_DEV_PHYADDR sDevPAddr = {0}; + IMG_UINT64 uiHeapTotalSize, uiHeapUsedSize, uiHeapFreeSize; + IMG_UINT64 uiSize = 0, uiOffset = 0; + PVRSRV_ERROR eError; + + PVR_LOG(("----before change gpu power state----")); + if (eNewPowerState == eCurrentPowerState || + eNewPowerState != PVRSRV_SYS_POWER_STATE_OFF || !bPreserveRam) + { + return PVRSRV_OK; + } + + eError = LMA_HeapIteratorCreate(psSysData->devconfig->psDevNode, + PHYS_HEAP_USAGE_GPU_LOCAL, + &psSysData->psHeapIter); + PVR_LOG_GOTO_IF_ERROR(eError, "LMA_HeapIteratorCreate", return_error); + + eError = LMA_HeapIteratorGetHeapStats(psSysData->psHeapIter, &uiHeapTotalSize, + &uiHeapUsedSize); + PVR_LOG_GOTO_IF_ERROR(eError, "LMA_HeapIteratorGetHeapStats", + return_error); + uiHeapFreeSize = uiHeapTotalSize - uiHeapUsedSize; + + _DBG("(%s()) heap stats: total=0x%" IMG_UINT64_FMTSPECx ", " + "used=0x%" IMG_UINT64_FMTSPECx ", free=0x%" IMG_UINT64_FMTSPECx, + __func__, uiHeapTotalSize, uiHeapUsedSize, uiHeapFreeSize); + + psSysData->pvS3Buffer = OSAllocMem(uiHeapUsedSize); + PVR_LOG_GOTO_IF_NOMEM(psSysData->pvS3Buffer, eError, destroy_iterator); + + while (LMA_HeapIteratorNext(psSysData->psHeapIter, &sDevPAddr, &uiSize)) + { + void *pvCpuVAddr; + IMG_CPU_PHYADDR sCpuPAddr = {0}; + + if (uiOffset + uiSize > uiHeapUsedSize) + { + PVR_DPF((PVR_DBG_ERROR, "uiOffset = %" IMG_UINT64_FMTSPECx ", " + "uiSize = %" IMG_UINT64_FMTSPECx, uiOffset, uiSize)); + + PVR_LOG_GOTO_WITH_ERROR("LMA_HeapIteratorNext", eError, + PVRSRV_ERROR_INVALID_OFFSET, + free_buffer); + } + + FtDevPAddrToCpuPAddr(psSysData, 1, &sCpuPAddr, + &sDevPAddr); + + pvCpuVAddr = OSMapPhysToLin(sCpuPAddr, uiSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC); + if (pvCpuVAddr == NULL) + { + PVR_LOG_GOTO_WITH_ERROR("OSMapPhysToLin", eError, + PVRSRV_ERROR_BAD_MAPPING, free_buffer); + } + + _DBG("(%s()) iterator: dev_paddr=%px, cpu_paddr=%px, cpu_vaddr=%px, " + "size=0x%05" IMG_UINT64_FMTSPECx, __func__, + (void *) sCpuPAddr.uiAddr, + pvCpuVAddr, (IMG_BYTE *) psSysData->pvS3Buffer + uiOffset, + uiSize); + + /* copy memory */ + memcpy((IMG_BYTE *) psSysData->pvS3Buffer + uiOffset, pvCpuVAddr, + uiSize); + /* and now poison it */ + memset(pvCpuVAddr, 0x9b, uiSize); + + OSWriteMemoryBarrier(); + + uiOffset += uiSize; + + OSUnMapPhysToLin(pvCpuVAddr, uiSize); + } + + return PVRSRV_OK; + +free_buffer: + OSFreeMem(psSysData->pvS3Buffer); + psSysData->pvS3Buffer = NULL; +destroy_iterator: + LMA_HeapIteratorDestroy(psSysData->psHeapIter); + psSysData->psHeapIter = NULL; +return_error: + return eError; +} + +static PVRSRV_ERROR PostPower(IMG_HANDLE hSysData, + PVRSRV_SYS_POWER_STATE eNewPowerState, + PVRSRV_SYS_POWER_STATE eCurrentPowerState, + IMG_BOOL bForced, + IMG_BOOL bPreserveRam) +{ + SYS_DATA *psSysData = (SYS_DATA *) hSysData; + IMG_DEV_PHYADDR sDevPAddr = {0}; + IMG_UINT64 uiSize = 0, uiOffset = 0; + PVRSRV_ERROR eError; + IMG_UINT32 value; + + PVR_LOG(("---after change gpu power state----")); + if (eNewPowerState == eCurrentPowerState || + eCurrentPowerState != PVRSRV_SYS_POWER_STATE_OFF || + psSysData->pvS3Buffer == NULL || !bPreserveRam) + { + return PVRSRV_OK; + } + + /*Configure Address Translate Region for FT GPU */ + value = (psSysData->ui64MappedMemCpuPAddr) >> 22; + writel(value,psSysData->reg_base + FT_GPU_AT_REGION0_SRC_ADDR); + value = (psSysData->ui64MappedMemSize) >> 22 | 0x80000000; + writel(value,psSysData->reg_base + FT_GPU_AT_REGION0_SIZE); +#if 1 // to be modified after se is stable + value = 0x8000; + writel(value,psSysData->reg_base + FT_GPU_AT_REGION0_SIZE +4); +#endif + + eError = LMA_HeapIteratorReset(psSysData->psHeapIter); + PVR_LOG_GOTO_IF_ERROR(eError, "LMA_HeapIteratorReset", free_buffer); + + while (LMA_HeapIteratorNext(psSysData->psHeapIter, &sDevPAddr, &uiSize)) + { + void *pvCpuVAddr; + IMG_CPU_PHYADDR sCpuPAddr = {0}; + + FtDevPAddrToCpuPAddr(psSysData->psDevConfig, 1, &sCpuPAddr, + &sDevPAddr); + + pvCpuVAddr = OSMapPhysToLin(sCpuPAddr, uiSize, + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC); + + _DBG("(%s()) iterator: dev_paddr=%px, cpu_paddr=%px, cpu_vaddr=%px, " + "size=0x%05" IMG_UINT64_FMTSPECx, __func__, + (void *) sCpuPAddr.uiAddr, + pvCpuVAddr, (IMG_BYTE *) psSysData->pvS3Buffer + uiOffset, + uiSize); + + /* copy memory */ + memcpy(pvCpuVAddr, (IMG_BYTE *) psSysData->pvS3Buffer + uiOffset, + uiSize); + + uiOffset += uiSize; + + OSUnMapPhysToLin(pvCpuVAddr, uiSize); + } + + LMA_HeapIteratorDestroy(psSysData->psHeapIter); + psSysData->psHeapIter = NULL; + + OSFreeMem(psSysData->pvS3Buffer); + psSysData->pvS3Buffer = NULL; + + return PVRSRV_OK; + +free_buffer: + OSFreeMem(psSysData->pvS3Buffer); + psSysData->pvS3Buffer = NULL; + + return eError; +} +#endif /* defined(SUPPORT_LMA_SUSPEND_TO_RAM) */ + +static PVRSRV_ERROR DeviceConfigCreate(void *pvOSDevice,SYS_DATA *psSysData, + PVRSRV_DEVICE_CONFIG **ppsDevConfigOut) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig; + RGX_DATA *psRGXData; + RGX_TIMING_INFORMATION *psRGXTimingInfo; + PHYS_HEAP_CONFIG *pasPhysHeaps; + IMG_UINT32 uiPhysHeapCount; + PVRSRV_ERROR eError; + IMG_UINT32 tmp; + + psDevConfig = OSAllocZMem(sizeof(*psDevConfig) + sizeof(*psRGXData) + sizeof(*psRGXTimingInfo)); + if (!psDevConfig){ + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig)); + psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData)); + + eError = PhysHeapsCreate(psSysData, psDevConfig, &pasPhysHeaps, &uiPhysHeapCount); + if (eError != PVRSRV_OK){ + goto ErrorFreeDevConfig; + } + + /* Setup RGX specific timing data */ + psRGXTimingInfo->ui32CoreClockSpeed = FT_GPU_CLOCK;//tc_core_clock_speed(&psSysData->pdev->dev) * 6; + psRGXTimingInfo->bEnableActivePM = IMG_FALSE; + psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE; + psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS; + + /* Set up the RGX data */ + psRGXData->psRGXTimingInfo = psRGXTimingInfo; + + /* Setup the device config */ + psDevConfig->pvOSDevice = pvOSDevice; + psDevConfig->pszName = "ft"; + psDevConfig->pszVersion = NULL; + + psDevConfig->sRegsCpuPBase.uiAddr = psSysData->pvSystemRegCpuPBase; + psDevConfig->ui32RegsSize = psSysData->uiSystemRegSize; + + psDevConfig->ui32IRQ = psSysData->ui32IRQ; + psDevConfig->eCacheSnoopingMode =PVRSRV_DEVICE_SNOOP_NONE; + psDevConfig->pasPhysHeaps = pasPhysHeaps; + psDevConfig->ui32PhysHeapCount = uiPhysHeapCount; + psDevConfig->hDevData = psRGXData; + psDevConfig->hSysData = psSysData; + psSysData->devconfig = psDevConfig; + +// config DVFS +#if defined(SUPPORT_LINUX_DVFS) + /* query se for dvfs opp config*/ + tmp = (readl(gpu_reg_base + FT_GPU_SE_GPU_OPP_CONFIG)>>8) & 0xFF ; + /* ensure that the opp config is within the defined scope */ + if (tmp >= OPP_CONFIG_COUNT) tmp = 0; + current_opp_table = tmp; + psDevConfig->sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE; + psDevConfig->sDVFS.sDVFSDeviceCfg.pasOPPTable = asOPPTable[current_opp_table]; + psDevConfig->sDVFS.sDVFSDeviceCfg.ui32OPPTableSize = LEVEL_COUNT; + psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency; + psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage; + psDevConfig->sDVFS.sDVFSDeviceCfg.ui32PollMs = FT_DVFS_SWITCH_INTERVAL; + psDevConfig->sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE; + psDevConfig->sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90; + psDevConfig->sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10; +#endif + +#if defined(SUPPORT_DMA_TRANSFER) + /* DMA channel config */ + psDevConfig->pfnSlaveDMAGetChan = FTgetDMAChan; + psDevConfig->pfnSlaveDMAFreeChan = FTFreeDMAChan; + psDevConfig->pfnDevPhysAddr2DmaAddr = FTDevPhysAddr2DmaAddr; + psDevConfig->pszDmaTxChanName = "tx"; + psDevConfig->pszDmaRxChanName = "rx"; + psDevConfig->bHasDma = true; + psDevConfig->ui32DmaTransferUnit = 4; + psDevConfig->ui32DmaAlignment = 32; +#endif + psDevConfig->bHasFBCDCVersion31 = IMG_FALSE; + +#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) + psDevConfig->pfnPrePowerState = PrePower; + psDevConfig->pfnPostPowerState = PostPower; +#endif /* defined(SUPPORT_LMA_SUSPEND_TO_RAM) */ + + + *ppsDevConfigOut = psDevConfig; + + + return PVRSRV_OK; + +//ErrorDestroyDevConfig: + DeviceConfigDestroy(psDevConfig); +ErrorFreeDevConfig: + OSFreeMem(psDevConfig); + return eError; +} + +PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) +{ + PVRSRV_DEVICE_CONFIG *psDevConfig; + SYS_DATA *psDevData; + PVRSRV_ERROR eError; + IMG_UINT32 value; + + PVR_LOG(("-----SysDevInit for pci gpu------pci flag:%x",HOST_PCI_INIT_FLAGS)); + PVR_ASSERT(pvOSDevice); + + dma_set_mask(pvOSDevice, DMA_BIT_MASK(44)); + + psDevData = OSAllocZMem(sizeof(*psDevData)); + if (psDevData == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psDevData->hRGXPCI = OSPCISetDev(TO_PCI_COOKIE(pvOSDevice), HOST_PCI_INIT_FLAG_MSI| HOST_PCI_INIT_FLAG_BUS_MASTER); + if (!psDevData->hRGXPCI) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire PCI device", + __func__)); + eError = PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND; + goto ErrorFreeDevData; + } + + psDevData->uiSystemRegSize = SYS_FT_REG_SIZE; + + eError = FtGetRegisters(psDevData, SYS_FT_REG_PCI_BASENUM, SYS_FT_REG_SYS_OFFSET, + psDevData->uiSystemRegSize, &psDevData->pvSystemRegCpuPBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map system registers", + __func__)); + goto ErrorPCIReleaseDevice; + } + + PVR_LOG(( "----- GPU reg base is %llx ",psDevData->pvSystemRegCpuPBase)); + + eError = AcquireLocalMappableMemory(psDevData->hRGXPCI, + &psDevData->ui64MappedMemCpuPAddr, + &psDevData->ui64MappedMemSize); + if (eError != PVRSRV_OK) + { + goto ErrorFtRegMap; + } + + PVR_LOG(("----Detected ft-gpu mappable memory base: 0x%llx", psDevData->ui64MappedMemCpuPAddr)); + PVR_LOG(("----Detected ft-gpu mappable memory size: 0x%llx", psDevData->ui64MappedMemSize)); + + /* ----configure pci address tranlate register to map vmem to cpu address specified by pci bar2 -----*/ + psDevData->reg_base = ioremap(psDevData->pvSystemRegCpuPBase,SYS_FT_REG_SIZE); + if(!psDevData->reg_base){ + PVR_LOG(("----failed to ioremap gpu reg space---- ")); + goto ErrorFtRegMap; + } + gpu_reg_base = psDevData->reg_base; + + /* Configure Address Translate Region for FT GPU */ + value = (psDevData->ui64MappedMemCpuPAddr) >> 22; + writel(value,psDevData->reg_base + FT_GPU_AT_REGION0_SRC_ADDR); + value = (psDevData->ui64MappedMemSize) >> 22 | 0x80000000; + writel(value,psDevData->reg_base + FT_GPU_AT_REGION0_SIZE); + +#if 1 // to be modified after se is stable + /*Set GPU vram gbus base addr to 0x20_00000000*/ + value = 0x8000; + writel(value,psDevData->reg_base + FT_GPU_AT_REGION0_SIZE +4); +#endif + + eError = OSPCIIRQ(psDevData->hRGXPCI, &psDevData->ui32IRQ); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't get IRQ", __func__)); + + goto ErrorReleaseLocalMappableMemory; + } + PVR_LOG(("----Detected ft-gpu irq is : %x", psDevData->ui32IRQ)); + + eError = DeviceConfigCreate(pvOSDevice, psDevData, &psDevConfig); + if (eError != PVRSRV_OK) + { + goto ErrorOSUninstallSystemLISR; + } + + *ppsDevConfig = psDevConfig; + + /* Add sysfs interface cur_temp to get current temperature of GPU */ + { + DI_ITERATOR_CB sIterator = { + .pfnShow = CurTempDIShow + }; + eError = DICreateEntry("cur_temp", NULL, &sIterator, NULL, + DI_ENTRY_TYPE_GENERIC, &gpsCurTempDIEntry); + if (eError !=PVRSRV_OK) { + PVR_LOG(("----failed to create sysfs cur_temp---- ")); + if (gpsCurTempDIEntry != NULL){ + DIDestroyEntry(gpsCurTempDIEntry); + } + } + } + + return PVRSRV_OK; + +ErrorOSUninstallSystemLISR: + OSUninstallSystemLISR(psDevData->hLISR); + +ErrorReleaseLocalMappableMemory: + ReleaseLocalMappableMemory(psDevData->hRGXPCI, + psDevData->ui64MappedMemCpuPAddr, + psDevData->ui64MappedMemSize); + +ErrorFtRegMap: + FtReleaseRegisters(psDevData, psDevData->pvSystemRegCpuPBase, SYS_FT_REG_PCI_BASENUM, + SYS_FT_REG_SYS_OFFSET, psDevData->uiSystemRegSize); + +ErrorPCIReleaseDevice: + OSPCIReleaseDev(psDevData->hRGXPCI); + +ErrorFreeDevData: + OSFreeMem(psDevData); + + return eError; +} + +void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + SYS_DATA *psDevData = (SYS_DATA *)psDevConfig->hSysData; + + pr_info("DevDeInit"); + + if(psDevData->reg_base) + iounmap(psDevData->reg_base); + + DeviceConfigDestroy(psDevConfig); + + ReleaseLocalMappableMemory(psDevData->hRGXPCI, + psDevData->ui64MappedMemCpuPAddr, + psDevData->ui64MappedMemSize); + +#if !defined(VIRTUAL_PLATFORM) + FtReleaseRegisters(psDevData, psDevData->pvSystemRegCpuPBase, SYS_FT_REG_PCI_BASENUM,SYS_FT_REG_SYS_OFFSET, psDevData->uiSystemRegSize); +#endif + + OSPCIReleaseDev(psDevData->hRGXPCI); + OSFreeMem(psDevData); +} + +/* + * convert CPU address to device physical address + */ +void FtCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr) +{ + /* Optimise common case */ + psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr; + + if (ui32NumOfAddr > 1) + { + IMG_UINT32 ui32Idx; + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) + { + psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr; + } + } +} + +void FtDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + + /* Optimise common case */ + psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr; + if (ui32NumOfAddr > 1) + { + IMG_UINT32 ui32Idx; + for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) + { + psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr; + } + } +} + +IMG_UINT32 FtLocalGetRegionId(IMG_HANDLE hPrivData, + PVRSRV_MEMALLOCFLAGS_T uiAllocationFlags) +{ + PVR_UNREFERENCED_PARAMETER(hPrivData); + + //只有一个region,直接返回0 + return 0; +} + +typedef struct { + void __iomem *regbase; + int ui32IRQ; + void *pvData; + PFN_LISR pfnLISR; +} FT_LISR_DATA; + +static irqreturn_t FTLISR(int iIrq, void *pvData) +{ + FT_LISR_DATA *psdata = pvData; + int handled=0; + + /* ----- Disable GPU device interrupt */ + + if (psdata->pfnLISR(psdata->pvData)) + handled = 1; + /* + * Clear MSI status register + * FT_GPU_AT_REGION0_SRC_ADDR + 0x420 ~ 0x43c: OS0 ~ 7 + */ + writel(0x1,psdata->regbase + FT_GPU_AT_REGION0_SRC_ADDR + 0x420); +#if 0 + writel(0x1,psdata->regbase + FT_GPU_AT_REGION0_SRC_ADDR + 0x424); + writel(0x1,psdata->regbase + FT_GPU_AT_REGION0_SRC_ADDR + 0x428); + writel(0x1,psdata->regbase + FT_GPU_AT_REGION0_SRC_ADDR + 0x42c); + writel(0x1,psdata->regbase + FT_GPU_AT_REGION0_SRC_ADDR + 0x430); + writel(0x1,psdata->regbase + FT_GPU_AT_REGION0_SRC_ADDR + 0x434); + writel(0x1,psdata->regbase + FT_GPU_AT_REGION0_SRC_ADDR + 0x438); + writel(0x1,psdata->regbase + FT_GPU_AT_REGION0_SRC_ADDR + 0x43c); +#endif + /* ------ Enable GPU device interrupt */ + + if(handled){ + return IRQ_HANDLED; + } + else{ + return IRQ_NONE; + } +} + +PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, + IMG_UINT32 ui32IRQ, + const IMG_CHAR *pszName, + PFN_LISR pfnLISR, + void *pvData, + IMG_HANDLE *phLISRData) +{ + FT_LISR_DATA *ft_lisrdata; + + if (pfnLISR == NULL || pvData == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + ft_lisrdata = OSAllocMem(sizeof(*ft_lisrdata)); + if (ft_lisrdata == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + ft_lisrdata->regbase = gpu_reg_base; + ft_lisrdata->ui32IRQ = ui32IRQ; + ft_lisrdata->pfnLISR = pfnLISR; + ft_lisrdata->pvData = pvData; + + + if (request_irq(ui32IRQ, FTLISR, IRQF_TRIGGER_HIGH | IRQF_SHARED, pszName, ft_lisrdata)) + { + kfree(ft_lisrdata); + + return PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER; + } + + *phLISRData = (IMG_HANDLE) ft_lisrdata; + + return PVRSRV_OK; + +/* PVR_UNREFERENCED_PARAMETER(hSysData); + return OSInstallSystemLISR(phLISRData, ui32IRQ, pszName, pfnLISR, pvData, + SYS_IRQ_FLAG_TRIGGER_HIGH | SYS_IRQ_FLAG_SHARED); +*/ +} + +PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData) +{ + FT_LISR_DATA *ft_lisrdata = hLISRData; + + free_irq(ft_lisrdata->ui32IRQ, ft_lisrdata); + + OSFreeMem(ft_lisrdata); + + return PVRSRV_OK; +// return OSUninstallSystemLISR(hLISRData); +} + +PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, + DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, + void *pvDumpDebugFile) +{ + PVR_UNREFERENCED_PARAMETER(psDevConfig); + PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); + PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile); + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (sysconfig.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/sysconfig.h b/drivers/gpu/drm/phytium/octopus/sysconfig.h new file mode 100644 index 000000000000..29ea249c607c --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/sysconfig.h @@ -0,0 +1,66 @@ +/* file: */ + +#include "pvrsrv_device.h" +#include "rgxdevice.h" + +#if !defined(__SYSCCONFIG_H__) +#define __SYSCCONFIG_H__ + +/* ----FT GPU PCI BAR num for register space-- */ +#define SYS_FT_REG_PCI_BASENUM 0 +// FT GPU register space offset +#define SYS_FT_REG_SYS_OFFSET 0 +#define SYS_FT_REG_SIZE 0x200000 + +// ----FT GPU PCI BAR num for memory mapped space +#define SYS_DEV_MEM_PCI_BASENUM 2 + +//----FT GPU Address Tranlate Register Region0 +#define FT_GPU_AT_REGION0_SRC_ADDR 0x101000 +#define FT_GPU_AT_REGION0_SIZE 0x101004 + +#define FT_GPU_CLOCK (800*1000*1000) +#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (100) + +#define FT_DVFS_SWITCH_INTERVAL (300) + +typedef struct _SYS_DATA_ SYS_DATA; + +struct _SYS_DATA_ +{ + PVRSRV_DEVICE_CONFIG *devconfig; + IMG_HANDLE hRGXPCI; + IMG_UINT64 pvSystemRegCpuPBase; //GPU寄存器空间映射到CPU内核空间的physical地址i + void __iomem *reg_base; //GPU + size_t uiSystemRegSize; //GPU寄存器空间的大小 + IMG_UINT64 ui64MappedMemCpuPAddr; //GPU显存映射到CPU空间的物理基址 + IMG_UINT64 ui64MappedMemSize; //GPU显存大小 + IMG_HANDLE hLISR; + IMG_UINT32 ui32IRQ; +#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) + PVRSRV_DEVICE_CONFIG *psDevConfig; + + PHYS_HEAP_ITERATOR *psHeapIter; + void *pvS3Buffer; +#endif /* defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__) */ +}; + +/* Helpers for getting DDR/GPU/PLL clock speed */ +IMG_UINT32 SysGetFtCoreClockSpeed(void); + +/* Phys heap funcs*/ +void FtCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_DEV_PHYADDR *psDevPAddr, + IMG_CPU_PHYADDR *psCpuPAddr); +void FtDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, + IMG_UINT32 ui32NumOfAddr, + IMG_CPU_PHYADDR *psCpuPAddr, + IMG_DEV_PHYADDR *psDevPAddr); +IMG_UINT32 FtLocalGetRegionId(IMG_HANDLE hPrivData, + PVRSRV_MEMALLOCFLAGS_T uiAllocationFlags); + +PVRSRV_ERROR FtGetRegisters(SYS_DATA *psDevData, IMG_UINT32 ui32BaseNum, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Size, IMG_UINT64 * pvCpuPAddr); +void FtReleaseRegisters(SYS_DATA * psDevData, IMG_UINT64 PBase, IMG_UINT32 ui32BaseNum, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Size); + +#endif //__SYSCCONFIG_H__ diff --git a/drivers/gpu/drm/phytium/octopus/sysconfig_cmn.c b/drivers/gpu/drm/phytium/octopus/sysconfig_cmn.c new file mode 100644 index 000000000000..a6cf4e6cfd07 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/sysconfig_cmn.c @@ -0,0 +1,132 @@ +/*************************************************************************/ /*! +@File +@Title Sysconfig layer common to all platforms +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implements system layer functions common to all platforms +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv.h" +#include "pvrsrv_device.h" +#include "syscommon.h" +#include "pvr_debug.h" + +void SysRGXErrorNotify(IMG_HANDLE hSysData, + PVRSRV_ROBUSTNESS_NOTIFY_DATA *psErrorData) +{ + PVR_UNREFERENCED_PARAMETER(hSysData); + +#if defined(PVRSRV_NEED_PVR_DPF) + { + IMG_UINT32 ui32DgbLvl; + + switch (psErrorData->eResetReason) + { + case RGX_CONTEXT_RESET_REASON_NONE: + case RGX_CONTEXT_RESET_REASON_GUILTY_LOCKUP: + case RGX_CONTEXT_RESET_REASON_INNOCENT_LOCKUP: + case RGX_CONTEXT_RESET_REASON_GUILTY_OVERRUNING: + case RGX_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING: + case RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH: + case RGX_CONTEXT_RESET_REASON_GPU_ECC_OK: + case RGX_CONTEXT_RESET_REASON_FW_ECC_OK: + { + ui32DgbLvl = PVR_DBG_MESSAGE; + break; + } + case RGX_CONTEXT_RESET_REASON_GPU_ECC_HWR: + case RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR: + { + ui32DgbLvl = PVR_DBG_WARNING; + break; + } + case RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM: + case RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM: + case RGX_CONTEXT_RESET_REASON_FW_ECC_ERR: + case RGX_CONTEXT_RESET_REASON_FW_WATCHDOG: + case RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT: + case RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR: + { + ui32DgbLvl = PVR_DBG_ERROR; + break; + } + default: + { + PVR_ASSERT(false && "Unhandled reset reason"); + ui32DgbLvl = PVR_DBG_ERROR; + break; + } + } + + if (psErrorData->pid > 0) + { + PVRSRVDebugPrintf(ui32DgbLvl, __FILE__, __LINE__, " PID %d experienced error %d", + psErrorData->pid, psErrorData->eResetReason); + } + else + { + PVRSRVDebugPrintf(ui32DgbLvl, __FILE__, __LINE__, " Device experienced error %d", + psErrorData->eResetReason); + } + + switch (psErrorData->eResetReason) + { + case RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM: + case RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM: + { + PVRSRVDebugPrintf(ui32DgbLvl, __FILE__, __LINE__, " ExtJobRef 0x%x, DM %d", + psErrorData->uErrData.sChecksumErrData.ui32ExtJobRef, + psErrorData->uErrData.sChecksumErrData.eDM); + break; + } + default: + { + break; + } + } + } +#else + PVR_UNREFERENCED_PARAMETER(psErrorData); +#endif /* PVRSRV_NEED_PVR_DPF */ +} + +/****************************************************************************** + End of file (sysconfig_cmn.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/sysinfo.h b/drivers/gpu/drm/phytium/octopus/sysinfo.h new file mode 100644 index 000000000000..5923664bd9d9 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/sysinfo.h @@ -0,0 +1,44 @@ +/*************************************************************************/ /*! +@File +@Title System Description Header +@Copyright +@Description This header provides system-specific declarations and macros +@License Dual MIT/GPLv2 + + +*/ /**************************************************************************/ + +#if !defined(__SYSINFO_H__) +#define __SYSINFO_H__ + +// PCI vendor and device id for ft gpu +#define SYS_RGX_DEV_VENDOR_ID (0x1DB7) +#define SYS_RGX_DEV_DEVICE_ID (0xDC20) + +/*!< System specific poll/timeout details */ +/* +#if defined(VIRTUAL_PLATFORM) +#define MAX_HW_TIME_US (5000000) +#else +#define MAX_HW_TIME_US (50000000) +#endif + +#define FATAL_ERROR_DETECTION_POLL_MS (10000) +#if defined(VIRTUAL_PLATFORM) +#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (10000) +#else +#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (15000)//(10000) +#endif +*/ + +#define MAX_HW_TIME_US (50000000) +#define FATAL_ERROR_DETECTION_POLL_MS (10000) +#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (15000) +#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) +#define WAIT_TRY_COUNT (10000) + +#if defined(__linux__) +#define SYS_RGX_DEV_NAME "ft_gpu" +#endif + +#endif /* !defined(__SYSINFO_H__) */ diff --git a/drivers/gpu/drm/phytium/octopus/sysvalidation.h b/drivers/gpu/drm/phytium/octopus/sysvalidation.h new file mode 100644 index 000000000000..cbd9f446ae82 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/sysvalidation.h @@ -0,0 +1,63 @@ +/*************************************************************************/ /*! +@File +@Title Validation System APIs and structures +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros + needed for hardware validation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(SYSVALIDATION_H) +#define SYSVALIDATION_H + +#if defined(SUPPORT_GPUVIRT_VALIDATION) +#include "img_types.h" +#include "rgxdefs_km.h" +#include "virt_validation_defs.h" + +void SysInitVirtInitialization(IMG_UINT64 aui64OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], + IMG_UINT64 aui64OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]); + +#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR) +/* functions only used on rogue, but header defining them is common */ +void SysSetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState); +void SysSetTrustedDeviceAceEnabled(void); +#endif +#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ + +#endif /* !defined(SYSVALIDATION_H) */ diff --git a/drivers/gpu/drm/phytium/octopus/tlclient.c b/drivers/gpu/drm/phytium/octopus/tlclient.c new file mode 100644 index 000000000000..d25e89100fb1 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/tlclient.c @@ -0,0 +1,469 @@ +/*************************************************************************/ /*! +@File tlclient.c +@Title Services Transport Layer shared API +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Transport layer common API used in both clients and server +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* DESIGN NOTE + * This transport layer consumer-role API was created as a shared API when a + * client wanted to read the data of a TL stream from within the KM server + * driver. This was in addition to the existing clients supported externally + * by the UM client library component via PVR API layer. + * This shared API is thus used by the PVR TL API in the client library and + * by clients internal to the server driver module. It depends on + * client entry points of the TL and DEVMEM bridge modules. These entry points + * encapsulate from the TL shared API whether a direct bridge or an indirect + * (ioctl) bridge is used. + * One reason for needing this layer centres around the fact that some of the + * API functions make multiple bridge calls and the logic that glues these + * together is common regardless of client location. Further this layer has + * allowed the defensive coding that checks parameters to move into the PVR + * API layer where untrusted clients enter giving a more efficient KM code path. + */ + +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "pvr_debug.h" +#include "osfunc.h" + +#include "allocmem.h" +#include "devicemem.h" + +#include "tlclient.h" +#include "pvrsrv_tlcommon.h" +#include "client_pvrtl_bridge.h" + +/* Defines/Constants + */ + +#define NO_ACQUIRE 0xffffffffU + +/* User-side stream descriptor structure. + */ +typedef struct _TL_STREAM_DESC_ +{ + /* Handle on kernel-side stream descriptor*/ + IMG_HANDLE hServerSD; + + /* Stream data buffer variables */ + DEVMEM_MEMDESC* psUMmemDesc; + IMG_PBYTE pBaseAddr; + + /* Offset in bytes into the circular buffer and valid only after + * an Acquire call and undefined after a release. */ + IMG_UINT32 uiReadOffset; + + /* Always a positive integer when the Acquire call returns and a release + * is outstanding. Undefined at all other times. */ + IMG_UINT32 uiReadLen; + + /* Flag indicating if the RESERVE_TOO_BIG error was already printed. + * It's used to reduce number of errors in kernel log. */ + IMG_BOOL bPrinted; +} TL_STREAM_DESC, *PTL_STREAM_DESC; + + +IMG_INTERNAL +PVRSRV_ERROR TLClientOpenStream(SHARED_DEV_CONNECTION hDevConnection, + const IMG_CHAR* pszName, + IMG_UINT32 ui32Mode, + IMG_HANDLE* phSD) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + TL_STREAM_DESC *psSD = NULL; + IMG_HANDLE hTLPMR; + IMG_HANDLE hTLImportHandle; + IMG_DEVMEM_SIZE_T uiImportSize; + PVRSRV_MEMALLOCFLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(pszName); + PVR_ASSERT(phSD); + *phSD = NULL; + + /* Allocate memory for the stream descriptor object, initialise with + * "no data read" yet. */ + psSD = OSAllocZMem(sizeof(TL_STREAM_DESC)); + PVR_LOG_GOTO_IF_NOMEM(psSD, eError, e0); + psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE; + + /* Send open stream request to kernel server to get stream handle and + * buffer cookie so we can get access to the buffer in this process. */ + eError = BridgeTLOpenStream(GetBridgeHandle(hDevConnection), pszName, + ui32Mode, &psSD->hServerSD, &hTLPMR); + if (eError != PVRSRV_OK) + { + if ((ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT) && + (eError == PVRSRV_ERROR_TIMEOUT)) + { + goto e1; + } + PVR_LOG_GOTO_IF_ERROR(eError, "BridgeTLOpenStream", e1); + } + + /* Convert server export cookie into a cookie for use by this client */ + eError = DevmemMakeLocalImportHandle(hDevConnection, + hTLPMR, &hTLImportHandle); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemMakeLocalImportHandle", e2); + + uiMemFlags |= ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ? + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE : 0ULL; + /* Now convert client cookie into a client handle on the buffer's + * physical memory region */ + eError = DevmemLocalImport(hDevConnection, + hTLImportHandle, + uiMemFlags, + &psSD->psUMmemDesc, + &uiImportSize, + "TLBuffer"); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemImport", e3); + + /* Now map the memory into the virtual address space of this process. */ + eError = DevmemAcquireCpuVirtAddr(psSD->psUMmemDesc, (void **) + &psSD->pBaseAddr); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e4); + + /* Ignore error, not much that can be done */ + (void) DevmemUnmakeLocalImportHandle(hDevConnection, + hTLImportHandle); + + /* Return client descriptor handle to caller */ + *phSD = psSD; + return PVRSRV_OK; + +/* Clean up post buffer setup */ +e4: + DevmemFree(psSD->psUMmemDesc); +e3: + (void) DevmemUnmakeLocalImportHandle(hDevConnection, + &hTLImportHandle); +/* Clean up post stream open */ +e2: + BridgeTLCloseStream(GetBridgeHandle(hDevConnection), psSD->hServerSD); + +/* Clean up post allocation of the descriptor object */ +e1: + OSFreeMem(psSD); + +e0: + return eError; +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientCloseStream(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(hSD); + + /* Check the caller provided connection is valid */ + if (!psSD->hServerSD) + { + PVR_DPF((PVR_DBG_ERROR, "%s: descriptor already " + "closed/not open", __func__)); + return PVRSRV_ERROR_HANDLE_NOT_FOUND; + } + + /* Check if acquire is outstanding, perform release if it is, ignore result + * as there is not much we can do if it is an error other than close */ + if (psSD->uiReadLen != NO_ACQUIRE) + { + (void) BridgeTLReleaseData(GetBridgeHandle(hDevConnection), + psSD->hServerSD, psSD->uiReadOffset, psSD->uiReadLen); + psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE; + } + + /* Clean up DevMem resources used for this stream in this client */ + DevmemReleaseCpuVirtAddr(psSD->psUMmemDesc); + + DevmemFree(psSD->psUMmemDesc); + + /* Send close to server to clean up kernel mode resources for this + * handle and release the memory. */ + eError = BridgeTLCloseStream(GetBridgeHandle(hDevConnection), + psSD->hServerSD); + PVR_LOG_IF_ERROR(eError, "BridgeTLCloseStream"); + + OSCachedMemSet(psSD, 0x00, sizeof(TL_STREAM_DESC)); + OSFreeMem(psSD); + + return eError; +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientDiscoverStreams(SHARED_DEV_CONNECTION hDevConnection, + const IMG_CHAR *pszNamePattern, + IMG_CHAR aszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE], + IMG_UINT32 *pui32NumFound) +{ + PVR_ASSERT(hDevConnection); + PVR_ASSERT(pszNamePattern); + PVR_ASSERT(pui32NumFound); + + return BridgeTLDiscoverStreams(GetBridgeHandle(hDevConnection), + pszNamePattern, + /* we need to treat this as one dimensional array */ + *pui32NumFound * PRVSRVTL_MAX_STREAM_NAME_SIZE, + (IMG_CHAR *) aszStreams, + pui32NumFound); +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientReserveStream(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; + IMG_UINT32 ui32BufferOffset, ui32Unused; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(hSD); + PVR_ASSERT(ppui8Data); + PVR_ASSERT(ui32Size); + + eError = BridgeTLReserveStream(GetBridgeHandle(hDevConnection), + psSD->hServerSD, &ui32BufferOffset, ui32Size, ui32Size, &ui32Unused); + PVR_RETURN_IF_ERROR(eError); + + *ppui8Data = psSD->pBaseAddr + ui32BufferOffset; + + return PVRSRV_OK; +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientReserveStream2(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32SizeMin, + IMG_UINT32 *pui32Available) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; + IMG_UINT32 ui32BufferOffset; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(hSD); + PVR_ASSERT(ppui8Data); + PVR_ASSERT(ui32Size); + + eError = BridgeTLReserveStream(GetBridgeHandle(hDevConnection), + psSD->hServerSD, &ui32BufferOffset, ui32Size, ui32SizeMin, + pui32Available); + PVR_RETURN_IF_ERROR(eError); + + *ppui8Data = psSD->pBaseAddr + ui32BufferOffset; + + return PVRSRV_OK; +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientCommitStream(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_UINT32 ui32Size) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(hSD); + PVR_ASSERT(ui32Size); + + eError = BridgeTLCommitStream(GetBridgeHandle(hDevConnection), + psSD->hServerSD, ui32Size); + PVR_RETURN_IF_ERROR(eError); + + return PVRSRV_OK; +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientAcquireData(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_PBYTE* ppPacketBuf, + IMG_UINT32* pui32BufLen) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(hSD); + PVR_ASSERT(ppPacketBuf); + PVR_ASSERT(pui32BufLen); + + /* In case of non-blocking acquires, which can return no data, and + * error paths ensure we clear the output parameters first. */ + *ppPacketBuf = NULL; + *pui32BufLen = 0; + + /* Check Acquire has not been called twice in a row without a release */ + if (psSD->uiReadOffset != NO_ACQUIRE) + { + PVR_DPF((PVR_DBG_ERROR, "%s: acquire already " + "outstanding, ReadOffset(%d), ReadLength(%d)", + __func__, psSD->uiReadOffset, psSD->uiReadLen)); + return PVRSRV_ERROR_RETRY; + } + + /* Ask the kernel server for the next chunk of data to read */ + eError = BridgeTLAcquireData(GetBridgeHandle(hDevConnection), + psSD->hServerSD, &psSD->uiReadOffset, &psSD->uiReadLen); + if (eError != PVRSRV_OK) + { + /* Mask reporting of the errors seen under normal operation */ + if ((eError != PVRSRV_ERROR_RESOURCE_UNAVAILABLE) && + (eError != PVRSRV_ERROR_TIMEOUT) && + (eError != PVRSRV_ERROR_STREAM_READLIMIT_REACHED)) + { + PVR_LOG_ERROR(eError, "BridgeTLAcquireData"); + } + psSD->uiReadOffset = psSD->uiReadLen = NO_ACQUIRE; + return eError; + } + /* else PVRSRV_OK */ + + /* Return the data offset and length to the caller if bytes are available + * to be read. Could be zero for non-blocking mode so pass back cleared + * values above */ + if (psSD->uiReadLen) + { + *ppPacketBuf = psSD->pBaseAddr + psSD->uiReadOffset; + *pui32BufLen = psSD->uiReadLen; + } + + return PVRSRV_OK; +} + +static PVRSRV_ERROR _TLClientReleaseDataLen( + SHARED_DEV_CONNECTION hDevConnection, + TL_STREAM_DESC* psSD, + IMG_UINT32 uiReadLen) +{ + PVRSRV_ERROR eError; + + /* the previous acquire did not return any data, this is a no-operation */ + if (psSD->uiReadLen == 0) + { + return PVRSRV_OK; + } + + /* Check release has not been called twice in a row without an acquire */ + if (psSD->uiReadOffset == NO_ACQUIRE) + { + PVR_DPF((PVR_DBG_ERROR, "%s: no acquire to release", __func__)); + return PVRSRV_ERROR_RETRY; + } + + /* Inform the kernel to release the data from the buffer */ + eError = BridgeTLReleaseData(GetBridgeHandle(hDevConnection), + psSD->hServerSD, + psSD->uiReadOffset, uiReadLen); + PVR_LOG_IF_ERROR(eError, "BridgeTLReleaseData"); + + /* Reset state to indicate no outstanding acquire */ + psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE; + + return eError; +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientReleaseData(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD) +{ + TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(hSD); + + return _TLClientReleaseDataLen(hDevConnection, psSD, psSD->uiReadLen); +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientReleaseDataLess(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, IMG_UINT32 uiActualReadLen) +{ + TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(hSD); + + /* Check the specified size is within the size returned by Acquire */ + if (uiActualReadLen > psSD->uiReadLen) + { + PVR_DPF((PVR_DBG_ERROR, "%s: no acquire to release", __func__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return _TLClientReleaseDataLen(hDevConnection, psSD, uiActualReadLen); +} + +IMG_INTERNAL +PVRSRV_ERROR TLClientWriteData(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_UINT32 ui32Size, + IMG_BYTE *pui8Data) +{ + PVRSRV_ERROR eError; + TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; + + PVR_ASSERT(hDevConnection); + PVR_ASSERT(hSD); + PVR_ASSERT(ui32Size); + PVR_ASSERT(pui8Data); + + eError = BridgeTLWriteData(GetBridgeHandle(hDevConnection), + psSD->hServerSD, ui32Size, pui8Data); + PVR_LOG_IF_ERROR(eError, "BridgeTLWriteData"); + + if (eError == PVRSRV_ERROR_STREAM_FULL && !psSD->bPrinted) + { + psSD->bPrinted = IMG_TRUE; + } + + return eError; +} + +/****************************************************************************** + End of file (tlclient.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/tlclient.h b/drivers/gpu/drm/phytium/octopus/tlclient.h new file mode 100644 index 000000000000..9023c1ea5e8d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/tlclient.h @@ -0,0 +1,257 @@ +/*************************************************************************/ /*! +@File tlclient.h +@Title Services Transport Layer shared API +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Transport layer common API used in both clients and server +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef TLCLIENT_H +#define TLCLIENT_H + + +#include "img_defs.h" +#include "pvrsrv_tlcommon.h" +#include "pvrsrv_error.h" + + +/* This value is used for the hSrvHandle argument in the client API when + * called directly from the kernel which will lead to a direct bridge access. + */ +#define DIRECT_BRIDGE_HANDLE ((IMG_HANDLE)0xDEADBEEFU) + + +/*************************************************************************/ /*! + @Function TLClientOpenStream + @Description Open a descriptor onto an existing kernel transport stream. + @Input hDevConnection Address of a pointer to a connection object + @Input pszName Address of the stream name string, no longer + than PRVSRVTL_MAX_STREAM_NAME_SIZE. + @Input ui32Mode Unused + @Output phSD Address of a pointer to an stream object + @Return PVRSRV_ERROR_NOT_FOUND when named stream not found + @Return PVRSRV_ERROR_ALREADY_OPEN stream already open by another + @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error + @Return PVRSRV_ERROR_TIMEOUT timed out, stream not found + @Return PVRSRV_ERROR for other system codes +*/ /**************************************************************************/ + +IMG_INTERNAL +PVRSRV_ERROR TLClientOpenStream(SHARED_DEV_CONNECTION hDevConnection, + const IMG_CHAR* pszName, + IMG_UINT32 ui32Mode, + IMG_HANDLE* phSD); + + +/*************************************************************************/ /*! + @Function TLClientCloseStream + @Description Close and release the stream connection to Services kernel + server transport layer. Any outstanding Acquire will be + released. + @Input hDevConnection Address of a pointer to a connection object + @Input hSD Handle of the stream object to close + @Return PVRSRV_ERROR_HANDLE_NOT_FOUND when SD handle is not known + @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error + @Return PVRSRV_ERROR for system codes +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientCloseStream(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD); + +/*************************************************************************/ /*! + @Function TLClientDiscoverStreams + @Description Finds all streams that's name starts with pszNamePattern and + ends with a number. + @Input hDevConnection Address of a pointer to a connection object + @Input pszNamePattern Name pattern. Must be beginning of a string. + @Output aszStreams Array of numbers from end of the discovered + names. + @inOut pui32NumFound When input, max number that can fit into + pui32Streams. When output, number of + discovered streams. + @Return PVRSRV_ERROR for system codes +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientDiscoverStreams(SHARED_DEV_CONNECTION hDevConnection, + const IMG_CHAR *pszNamePattern, + IMG_CHAR aszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE], + IMG_UINT32 *pui32NumFound); + +/*************************************************************************/ /*! + @Function TLClientReserveStream + @Description Reserves a region with given size in the stream. If the stream + is already reserved the function will return an error. + @Input hDevConnection Address of a pointer to a connection object + @Input hSD Handle of the stream object to close + @Output ppui8Data pointer to the buffer + @Input ui32Size size of the data + @Return +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientReserveStream(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size); + +/*************************************************************************/ /*! + @Function TLClientStreamReserve2 + @Description Reserves a region with given size in the stream. If the stream + is already reserved the function will return an error. + @Input hDevConnection Address of a pointer to a connection object + @Input hSD Handle of the stream object to close + @Output ppui8Data pointer to the buffer + @Input ui32Size size of the data + @Input ui32SizeMin minimum size of the data + @Input ui32Available available space in buffer + @Return +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientReserveStream2(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32SizeMin, + IMG_UINT32 *pui32Available); + +/*************************************************************************/ /*! + @Function TLClientStreamCommit + @Description Commits previously reserved region in the stream and therefore + allows next reserves. + This function call has to be preceded by the call to + TLClientReserveStream or TLClientReserveStream2. + @Input hDevConnection Address of a pointer to a connection object + @Input hSD Handle of the stream object to close + @Input ui32Size Size of the data + @Return +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientCommitStream(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_UINT32 ui32Size); + +/*************************************************************************/ /*! + @Function TLClientAcquireData + @Description When there is data available in the stream buffer this call + returns with the address and length of the data buffer the + client can safely read. This buffer may contain one or more + packets of data. + If no data is available then this call blocks until it becomes + available. However if the stream has been destroyed while + waiting then a resource unavailable error will be returned to + the caller. Clients must pair this call with a ReleaseData + call. + @Input hDevConnection Address of a pointer to a connection object + @Input hSD Handle of the stream object to read + @Output ppPacketBuf Address of a pointer to an byte buffer. On exit + pointer contains address of buffer to read from + @Output puiBufLen Pointer to an integer. On exit it is the size + of the data to read from the packet buffer + @Return PVRSRV_ERROR_RESOURCE_UNAVAILABLE when stream no longer exists + @Return PVRSRV_ERROR_HANDLE_NOT_FOUND when SD handle not known + @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error + @Return PVRSRV_ERROR_RETRY release not called beforehand + @Return PVRSRV_ERROR_TIMEOUT block timed out, no data + @Return PVRSRV_ERROR for other system codes +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientAcquireData(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_PBYTE* ppPacketBuf, + IMG_UINT32* puiBufLen); + + +/*************************************************************************/ /*! + @Function TLClientReleaseData + @Description Called after client has read the stream data out of the buffer + The data is subsequently flushed from the stream buffer to make + room for more data packets from the stream source. + @Input hDevConnection Address of a pointer to a connection object + @Input hSD Handle of the stream object to read + @Return PVRSRV_ERROR_RESOURCE_UNAVAILABLE when stream no longer exists + @Return PVRSRV_ERROR_HANDLE_NOT_FOUND when SD handle not known to TL + @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error + @Return PVRSRV_ERROR_RETRY acquire not called beforehand + @Return PVRSRV_ERROR for system codes +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientReleaseData(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD); + +/*************************************************************************/ /*! + @Function TLClientReleaseDataLess + @Description Called after client has read only some data out of the buffer + and wishes to complete the read early i.e. does not want to + read the full data that the acquire call returned e.g read just + one packet from the stream. + The data is subsequently flushed from the stream buffer to make + room for more data packets from the stream source. + @Input hDevConnection Address of a pointer to a connection object + @Input hSD Handle of the stream object to read + @Input uiActualReadLen Size of data read, in bytes. Must be on a TL + packet boundary. + @Return PVRSRV_ERROR_INVALID_PARAMS when read length too big + @Return PVRSRV_ERROR_RESOURCE_UNAVAILABLE when stream no longer exists + @Return PVRSRV_ERROR_HANDLE_NOT_FOUND when SD handle not known to TL + @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error + @Return PVRSRV_ERROR_RETRY acquire not called beforehand + @Return PVRSRV_ERROR for system codes +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientReleaseDataLess(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, IMG_UINT32 uiActualReadLen); + +/*************************************************************************/ /*! + @Function TLClientWriteData + @Description Writes data to the stream. + @Input hDevConnection Address of a pointer to a connection object + @Input hSD Handle of the stream object to read + @Input ui32Size Size of the data + @Input pui8Data Pointer to data +*/ /**************************************************************************/ +IMG_INTERNAL +PVRSRV_ERROR TLClientWriteData(SHARED_DEV_CONNECTION hDevConnection, + IMG_HANDLE hSD, + IMG_UINT32 ui32Size, + IMG_BYTE *pui8Data); + + +#endif /* TLCLIENT_H */ + +/****************************************************************************** + End of file (tlclient.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/tlintern.c b/drivers/gpu/drm/phytium/octopus/tlintern.c new file mode 100644 index 000000000000..3bffac57e0d1 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/tlintern.c @@ -0,0 +1,473 @@ +/*************************************************************************/ /*! +@File +@Title Transport Layer kernel side API implementation. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Transport Layer functions available to driver components in + the driver. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +//#define PVR_DPF_FUNCTION_TRACE_ON 1 +#undef PVR_DPF_FUNCTION_TRACE_ON +#include "pvr_debug.h" + +#include "allocmem.h" +#include "pvrsrv_error.h" +#include "osfunc.h" +#include "devicemem.h" + +#include "pvrsrv_tlcommon.h" +#include "tlintern.h" + +/* + * Make functions + */ +PTL_STREAM_DESC +TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3) +{ + PTL_STREAM_DESC ps = OSAllocZMem(sizeof(TL_STREAM_DESC)); + if (ps == NULL) + { + return NULL; + } + ps->psNode = f1; + ps->ui32Flags = f2; + ps->hReadEvent = f3; + ps->uiRefCount = 1; + + if (f2 & PVRSRV_STREAM_FLAG_READ_LIMIT) + { + ps->ui32ReadLimit = f1->psStream->ui32Write; + } + return ps; +} + +PTL_SNODE +TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4) +{ + PTL_SNODE ps = OSAllocZMem(sizeof(TL_SNODE)); + if (ps == NULL) + { + return NULL; + } + ps->hReadEventObj = f2; + ps->psStream = f3; + ps->psRDesc = f4; + f3->psNode = ps; + return ps; +} + +/* + * Transport Layer Global top variables and functions + */ +static TL_GLOBAL_DATA sTLGlobalData; + +TL_GLOBAL_DATA *TLGGD(void) /* TLGetGlobalData() */ +{ + return &sTLGlobalData; +} + +/* TLInit must only be called once at driver initialisation. + * An assert is provided to check this condition on debug builds. + */ +PVRSRV_ERROR +TLInit(void) +{ + PVRSRV_ERROR eError; + + PVR_DPF_ENTERED; + + PVR_ASSERT(sTLGlobalData.hTLGDLock == NULL && sTLGlobalData.hTLEventObj == NULL); + + /* Allocate a lock for TL global data, to be used while updating the TL data. + * This is for making TL global data multi-thread safe */ + eError = OSLockCreate(&sTLGlobalData.hTLGDLock); + PVR_GOTO_IF_ERROR(eError, e0); + + /* Allocate the event object used to signal global TL events such as + * a new stream created */ + eError = OSEventObjectCreate("TLGlobalEventObj", &sTLGlobalData.hTLEventObj); + PVR_GOTO_IF_ERROR(eError, e1); + + PVR_DPF_RETURN_OK; + +/* Don't allow the driver to start up on error */ +e1: + OSLockDestroy (sTLGlobalData.hTLGDLock); + sTLGlobalData.hTLGDLock = NULL; +e0: + PVR_DPF_RETURN_RC (eError); +} + +static void RemoveAndFreeStreamNode(PTL_SNODE psRemove) +{ + TL_GLOBAL_DATA* psGD = TLGGD(); + PTL_SNODE* last; + PTL_SNODE psn; + PVRSRV_ERROR eError; + + PVR_DPF_ENTERED; + + /* Unlink the stream node from the master list */ + PVR_ASSERT(psGD->psHead); + last = &psGD->psHead; + for (psn = psGD->psHead; psn; psn=psn->psNext) + { + if (psn == psRemove) + { + /* Other calling code may have freed and zeroed the pointers */ + if (psn->psRDesc) + { + OSFreeMem(psn->psRDesc); + psn->psRDesc = NULL; + } + if (psn->psStream) + { + OSFreeMem(psn->psStream); + psn->psStream = NULL; + } + *last = psn->psNext; + break; + } + last = &psn->psNext; + } + + /* Release the event list object owned by the stream node */ + if (psRemove->hReadEventObj) + { + eError = OSEventObjectDestroy(psRemove->hReadEventObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); + + psRemove->hReadEventObj = NULL; + } + + /* Release the memory of the stream node */ + OSFreeMem(psRemove); + + PVR_DPF_RETURN; +} + +static void FreeGlobalData(void) +{ + PTL_SNODE psCurrent = sTLGlobalData.psHead; + PTL_SNODE psNext; + PVRSRV_ERROR eError; + + PVR_DPF_ENTERED; + + /* Clean up the SNODE list */ + if (psCurrent) + { + while (psCurrent) + { + psNext = psCurrent->psNext; + + /* Other calling code may have freed and zeroed the pointers */ + if (psCurrent->psRDesc) + { + OSFreeMem(psCurrent->psRDesc); + psCurrent->psRDesc = NULL; + } + if (psCurrent->psStream) + { + OSFreeMem(psCurrent->psStream); + psCurrent->psStream = NULL; + } + + /* Release the event list object owned by the stream node */ + if (psCurrent->hReadEventObj) + { + eError = OSEventObjectDestroy(psCurrent->hReadEventObj); + PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); + + psCurrent->hReadEventObj = NULL; + } + + OSFreeMem(psCurrent); + psCurrent = psNext; + } + + sTLGlobalData.psHead = NULL; + } + + PVR_DPF_RETURN; +} + +void +TLDeInit(void) +{ + PVR_DPF_ENTERED; + + if (sTLGlobalData.uiClientCnt) + { + PVR_DPF((PVR_DBG_ERROR, "TLDeInit transport layer but %d client streams are still connected", sTLGlobalData.uiClientCnt)); + sTLGlobalData.uiClientCnt = 0; + } + + FreeGlobalData(); + + /* Clean up the TL global event object */ + if (sTLGlobalData.hTLEventObj) + { + OSEventObjectDestroy(sTLGlobalData.hTLEventObj); + sTLGlobalData.hTLEventObj = NULL; + } + + /* Destroy the TL global data lock */ + if (sTLGlobalData.hTLGDLock) + { + OSLockDestroy (sTLGlobalData.hTLGDLock); + sTLGlobalData.hTLGDLock = NULL; + } + + PVR_DPF_RETURN; +} + +void TLAddStreamNode(PTL_SNODE psAdd) +{ + PVR_DPF_ENTERED; + + PVR_ASSERT(psAdd); + psAdd->psNext = TLGGD()->psHead; + TLGGD()->psHead = psAdd; + + PVR_DPF_RETURN; +} + +PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName) +{ + TL_GLOBAL_DATA* psGD = TLGGD(); + PTL_SNODE psn; + + PVR_DPF_ENTERED; + + PVR_ASSERT(pszName); + + for (psn = psGD->psHead; psn; psn=psn->psNext) + { + if (psn->psStream && OSStringNCompare(psn->psStream->szName, pszName, PRVSRVTL_MAX_STREAM_NAME_SIZE)==0) + { + PVR_DPF_RETURN_VAL(psn); + } + } + + PVR_DPF_RETURN_VAL(NULL); +} + +PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc) +{ + TL_GLOBAL_DATA* psGD = TLGGD(); + PTL_SNODE psn; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psDesc); + + for (psn = psGD->psHead; psn; psn=psn->psNext) + { + if (psn->psRDesc == psDesc || psn->psWDesc == psDesc) + { + PVR_DPF_RETURN_VAL(psn); + } + } + PVR_DPF_RETURN_VAL(NULL); +} + +static inline IMG_BOOL IsDigit(IMG_CHAR c) +{ + return c >= '0' && c <= '9'; +} + +static inline IMG_BOOL ReadNumber(const IMG_CHAR *pszBuffer, + IMG_UINT32 *pui32Number) +{ + IMG_CHAR acTmp[11] = {0}; /* max 10 digits */ + IMG_UINT32 ui32Result; + IMG_UINT i; + + for (i = 0; i < sizeof(acTmp) - 1; i++) + { + if (!IsDigit(*pszBuffer)) + break; + acTmp[i] = *pszBuffer++; + } + + /* if there are no digits or there is something after the number */ + if (i == 0 || *pszBuffer != '\0') + return IMG_FALSE; + + if (OSStringToUINT32(acTmp, 10, &ui32Result) != PVRSRV_OK) + return IMG_FALSE; + + *pui32Number = ui32Result; + + return IMG_TRUE; +} + +IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern, + IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE], + IMG_UINT32 ui32Max) +{ + TL_GLOBAL_DATA *psGD = TLGGD(); + PTL_SNODE psn; + IMG_UINT32 ui32Count = 0; + size_t uiLen; + + PVR_ASSERT(pszNamePattern); + + if ((uiLen = OSStringLength(pszNamePattern)) == 0) + return 0; + + for (psn = psGD->psHead; psn; psn = psn->psNext) + { + if (OSStringNCompare(pszNamePattern, psn->psStream->szName, uiLen) != 0) + continue; + + /* If aaszStreams is NULL we only count how many string match + * the given pattern. If it's a valid pointer we also return + * the names. */ + if (aaszStreams != NULL) + { + if (ui32Count >= ui32Max) + break; + + /* all of names are shorter than MAX and null terminated */ + OSStringLCopy(aaszStreams[ui32Count], psn->psStream->szName, + PRVSRVTL_MAX_STREAM_NAME_SIZE); + } + + ui32Count++; + } + + return ui32Count; +} + +PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc) +{ + PTL_SNODE psn; + + PVR_DPF_ENTERED; + + psn = TLFindStreamNodeByDesc(psDesc); + if (psn == NULL) + PVR_DPF_RETURN_VAL(NULL); + + PVR_ASSERT(psDesc == psn->psWDesc); + + psn->uiWRefCount++; + psDesc->uiRefCount++; + + PVR_DPF_RETURN_VAL(psn); +} + +void TLReturnStreamNode(PTL_SNODE psNode) +{ + psNode->uiWRefCount--; + psNode->psWDesc->uiRefCount--; + + PVR_ASSERT(psNode->uiWRefCount > 0); + PVR_ASSERT(psNode->psWDesc->uiRefCount > 0); +} + +IMG_BOOL TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove) +{ + PVR_DPF_ENTERED; + + PVR_ASSERT(psRemove); + + /* If there is a client connected to this stream, defer stream's deletion */ + if (psRemove->psRDesc != NULL || psRemove->psWDesc != NULL) + { + PVR_DPF_RETURN_VAL(IMG_FALSE); + } + + /* Remove stream from TL_GLOBAL_DATA's list and free stream node */ + psRemove->psStream = NULL; + RemoveAndFreeStreamNode(psRemove); + + PVR_DPF_RETURN_VAL(IMG_TRUE); +} + +IMG_BOOL TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psNodeToRemove, + PTL_STREAM_DESC psSD) +{ + PVR_DPF_ENTERED; + + PVR_ASSERT(psNodeToRemove); + PVR_ASSERT(psSD); + + /* Decrement reference count. For descriptor obtained by reader it must + * reach 0 (only single reader allowed) and for descriptors obtained by + * writers it must reach value greater or equal to 0 (multiple writers + * model). */ + psSD->uiRefCount--; + + if (psSD == psNodeToRemove->psRDesc) + { + PVR_ASSERT(0 == psSD->uiRefCount); + /* Remove stream descriptor (i.e. stream reader context) */ + psNodeToRemove->psRDesc = NULL; + } + else if (psSD == psNodeToRemove->psWDesc) + { + PVR_ASSERT(0 <= psSD->uiRefCount); + + psNodeToRemove->uiWRefCount--; + + /* Remove stream descriptor if reference == 0 */ + if (0 == psSD->uiRefCount) + { + psNodeToRemove->psWDesc = NULL; + } + } + + /* Do not Free Stream Node if there is a write reference (a producer + * context) to the stream */ + if (NULL != psNodeToRemove->psRDesc || NULL != psNodeToRemove->psWDesc || + 0 != psNodeToRemove->uiWRefCount) + { + PVR_DPF_RETURN_VAL(IMG_FALSE); + } + + /* Make stream pointer NULL to prevent it from being destroyed in + * RemoveAndFreeStreamNode. Cleanup of stream should be done by the + * calling context */ + psNodeToRemove->psStream = NULL; + RemoveAndFreeStreamNode(psNodeToRemove); + + PVR_DPF_RETURN_VAL(IMG_TRUE); +} diff --git a/drivers/gpu/drm/phytium/octopus/tlintern.h b/drivers/gpu/drm/phytium/octopus/tlintern.h new file mode 100644 index 000000000000..02ca3362afb0 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/tlintern.h @@ -0,0 +1,345 @@ +/*************************************************************************/ /*! +@File +@Title Transport Layer internals +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Transport Layer header used by TL internally +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef TLINTERN_H +#define TLINTERN_H + + +#include "devicemem_typedefs.h" +#include "pvrsrv_tlcommon.h" +#include "lock.h" +#include "tlstream.h" + +/* Forward declarations */ +typedef struct _TL_SNODE_* PTL_SNODE; + +/* To debug buffer utilisation enable this macro here and define + * PVRSRV_NEED_PVR_TRACE in the server pvr_debug.c and in tutils.c + * before the inclusion of pvr_debug.h. + * Issue pvrtutils 6 on target to see stream buffer utilisation. */ +//#define TL_BUFFER_STATS 1 + +/*! TL stream structure container. + * pbyBuffer holds the circular buffer. + * ui32Read points to the beginning of the buffer, ie to where data to + * Read begin. + * ui32Write points to the end of data that have been committed, ie this is + * where new data will be written. + * ui32Pending number of bytes reserved in last reserve call which have not + * yet been submitted. Therefore these data are not ready to + * be transported. + * hStreamWLock - provides atomic protection for the ui32Pending & ui32Write + * members of the structure for when they are checked and/or + * updated in the context of a stream writer (producer) + * calling DoTLStreamReserve() & TLStreamCommit(). + * - Reader context is not multi-threaded, only one client per + * stream is allowed. Also note the read context may be in an + * ISR which prevents a design where locks can be held in the + * AcquireData/ReleaseData() calls. Thus this lock only + * protects the stream members from simultaneous writers. + * + * ui32Read < ui32Write <= ui32Pending + * where < and <= operators are overloaded to make sense in a circular way. + */ +typedef struct _TL_STREAM_ +{ + IMG_CHAR szName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; /*!< String name identifier */ + TL_OPMODE eOpMode; /*!< Mode of Operation of TL Buffer */ + + IMG_BOOL bWaitForEmptyOnDestroy; /*!< Flag: On destroying a non-empty stream block until + * stream is drained. */ + IMG_BOOL bNoSignalOnCommit; /*!< Flag: Used to avoid the TL signalling waiting consumers + * that new data is available on every commit. Producers + * using this flag will need to manually signal when + * appropriate using the TLStreamSync() API */ + + void (*pfOnReaderOpenCallback)(void *pvArg); /*!< Optional on reader connect callback */ + void *pvOnReaderOpenUserData; /*!< On reader connect user data */ + void (*pfProducerCallback)(void); /*!< Optional producer callback of type TL_STREAM_SOURCECB */ + void *pvProducerUserData; /*!< Producer callback user data */ + + struct _TL_STREAM_ *psNotifStream; /*!< Pointer to the stream to which notification will be sent */ + + volatile IMG_UINT32 ui32Read; /*!< Pointer to the beginning of available data */ + volatile IMG_UINT32 ui32Write; /*!< Pointer to already committed data which are ready to be + * copied to user space */ + IMG_UINT32 ui32Pending; /*!< Count pending bytes reserved in buffer */ + IMG_UINT32 ui32Size; /*!< Buffer size */ + IMG_UINT32 ui32ThresholdUsageForSignal; /*!< Buffer usage threshold at which a TL writer signals a blocked/ + * waiting reader when transitioning from empty->non-empty */ + IMG_UINT32 ui32MaxPacketSize; /*! Max TL packet size */ + IMG_BYTE *pbyBuffer; /*!< Actual data buffer */ + + PTL_SNODE psNode; /*!< Ptr to parent stream node */ + DEVMEM_MEMDESC *psStreamMemDesc; /*!< MemDescriptor used to allocate buffer space through PMR */ + + IMG_HANDLE hProducerEvent; /*!< Handle to wait on if there is not enough space */ + IMG_HANDLE hProducerEventObj; /*!< Handle to signal blocked reserve calls */ + IMG_BOOL bSignalPending; /*!< Tracks if a "signal" is pending to be sent to a blocked/ + * waiting reader */ + + POS_LOCK hStreamWLock; /*!< Writers Lock for ui32Pending & ui32Write*/ + POS_LOCK hReadLock; /*!< Readers Lock for bReadPending & ui32Read*/ + IMG_BOOL bReadPending; /*!< Tracks if a read operation is pending or not*/ + IMG_BOOL bNoWrapPermanent; /*!< Flag: Prevents buffer wrap and subsequent data loss + * as well as resetting the read position on close. */ + +#if defined(TL_BUFFER_STATS) + IMG_UINT32 ui32CntReadFails; /*!< Tracks how many times reader failed to acquire read lock */ + IMG_UINT32 ui32CntReadSuccesses; /*!< Tracks how many times reader acquires read lock successfully */ + IMG_UINT32 ui32CntWriteSuccesses; /*!< Tracks how many times writer acquires read lock successfully */ + IMG_UINT32 ui32CntWriteWaits; /*!< Tracks how many times writer had to wait to acquire read lock */ + IMG_UINT32 ui32CntNumWriteSuccess; /*!< Tracks how many write operations were successful*/ + IMG_UINT32 ui32BufferUt; /*!< Buffer utilisation high watermark, see TL_BUFFER_STATS above */ + IMG_UINT32 ui32MaxReserveWatermark; /*!< Max stream reserve size that was ever requested by a writer */ + IMG_UINT32 ui32SignalsSent; /*!< Number of signals that were actually sent by the write API */ + ATOMIC_T bNoReaderSinceFirstReserve; /*!< Tracks if a read has been done since the buffer was last found empty */ + IMG_UINT32 ui32TimeStart; /*!< Time at which a write (Reserve call) was done into an empty buffer. + * Guarded by hStreamWLock. */ + IMG_UINT32 ui32MinTimeToFullInUs; /*!< Minimum time taken to (nearly) fully fill an empty buffer. Guarded + * by hStreamWLock. */ + /* Behaviour counters, protected by hStreamLock in case of + * multi-threaded access */ + IMG_UINT32 ui32NumCommits; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32SignalNotSent; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32ManSyncs; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32ProducerByteCount; /*!< Counters used to analysing stream performance, see ++ loc */ + + /* Not protected by the lock, inc in the reader thread which is currently singular */ + IMG_UINT32 ui32AcquireRead1; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32AcquireRead2; /*!< Counters used to analysing stream performance, see ++ loc */ +#endif + +} TL_STREAM, *PTL_STREAM; + +/* there need to be enough space reserved in the buffer for 2 minimal packets + * and it needs to be aligned the same way the buffer is or there will be a + * compile error.*/ +#define BUFFER_RESERVED_SPACE (2 * PVRSRVTL_PACKET_ALIGNMENT) + +/* ensure the space reserved follows the buffer's alignment */ +static_assert(!(BUFFER_RESERVED_SPACE&(PVRSRVTL_PACKET_ALIGNMENT-1)), + "BUFFER_RESERVED_SPACE must be a multiple of PVRSRVTL_PACKET_ALIGNMENT"); + +/* Define the largest value that a uint that matches the + * PVRSRVTL_PACKET_ALIGNMENT size can hold */ +#define MAX_UINT 0xffffFFFF + +/*! Defines the value used for TL_STREAM.ui32Pending when no reserve is + * outstanding on the stream. */ +#define NOTHING_PENDING IMG_UINT32_MAX + + +/* + * Transport Layer Stream Descriptor types/defs + */ +typedef struct _TL_STREAM_DESC_ +{ + PTL_SNODE psNode; /*!< Ptr to parent stream node */ + IMG_UINT32 ui32Flags; /*!< Flags supplied by client on stream open */ + IMG_HANDLE hReadEvent; /*!< For wait call (only used/set in reader descriptors) */ + IMG_INT uiRefCount; /*!< Reference count to the SD */ + +#if defined(TL_BUFFER_STATS) + /* Behaviour counters, no multi-threading protection need as they are + * incremented in a single thread due to only supporting one reader + * at present */ + IMG_UINT32 ui32AcquireCount; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32NoData; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32NoDataSleep; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32Signalled; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32TimeoutEmpty; /*!< Counters used to analysing stream performance, see ++ loc */ + IMG_UINT32 ui32TimeoutData; /*!< Counters used to analysing stream performance, see ++ loc */ +#endif + IMG_UINT32 ui32ReadLimit; /*!< Limit buffer reads to data present in the + buffer at the time of stream open. */ + IMG_UINT32 ui32ReadLen; /*!< Size of data returned by initial Acquire */ +} TL_STREAM_DESC, *PTL_STREAM_DESC; + +PTL_STREAM_DESC TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3); + +#define TL_STREAM_KM_FLAG_MASK 0xFFFF0000 +#define TL_STREAM_FLAG_TEST 0x10000000 +#define TL_STREAM_FLAG_WRAPREAD 0x00010000 + +#define TL_STREAM_UM_FLAG_MASK 0x0000FFFF + +#if defined(TL_BUFFER_STATS) +# define TL_COUNTER_INC(a) ((a)++) +# define TL_COUNTER_ADD(a,b) ((a) += (b)) +#else +# define TL_COUNTER_INC(a) (void)(0) +# define TL_COUNTER_ADD(a,b) (void)(0) +#endif +/* + * Transport Layer stream list node + */ +typedef struct _TL_SNODE_ +{ + struct _TL_SNODE_* psNext; /*!< Linked list next element */ + IMG_HANDLE hReadEventObj; /*!< Readers 'wait for data' event */ + PTL_STREAM psStream; /*!< TL Stream object */ + IMG_INT uiWRefCount; /*!< Stream writer reference count */ + PTL_STREAM_DESC psRDesc; /*!< Stream reader 0 or ptr only */ + PTL_STREAM_DESC psWDesc; /*!< Stream writer 0 or ptr only */ +} TL_SNODE; + +PTL_SNODE TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4); + +/* + * Transport Layer global top types and variables + * Use access function to obtain pointer. + * + * hTLGDLock - provides atomicity over read/check/write operations and + * sequence of operations on uiClientCnt, psHead list of SNODEs and + * the immediate members in a list element SNODE structure. + * - This larger scope of responsibility for this lock helps avoid + * the need for a lock in the SNODE structure. + * - Lock held in the client (reader) context when streams are + * opened/closed and in the server (writer) context when streams + * are created/open/closed. + */ +typedef struct _TL_GDATA_ +{ + IMG_HANDLE hTLEventObj; /* Global TL signal object, new streams, etc */ + + IMG_UINT uiClientCnt; /* Counter to track the number of client stream connections. */ + PTL_SNODE psHead; /* List of TL streams and associated client handle */ + + POS_LOCK hTLGDLock; /* Lock for structure AND psHead SNODE list */ +} TL_GLOBAL_DATA, *PTL_GLOBAL_DATA; + +/* + * Transport Layer Internal Kernel-Mode Server API + */ +TL_GLOBAL_DATA* TLGGD(void); /* TLGetGlobalData() */ + +PVRSRV_ERROR TLInit(void); +void TLDeInit(void); + +void TLAddStreamNode(PTL_SNODE psAdd); +PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName); +PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc); +IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern, + IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE], + IMG_UINT32 ui32Max); +PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc); +void TLReturnStreamNode(PTL_SNODE psNode); + +/****************************************************************************** + Function Name : TLTryRemoveStreamAndFreeStreamNode + + Inputs : PTL_SNODE Pointer to the TL_SNODE whose stream is requested + to be removed from TL_GLOBAL_DATA's list + + Return Value : IMG_TRUE - If the stream was made NULL and this + TL_SNODE was removed from the + TL_GLOBAL_DATA's list + + IMG_FALSE - If the stream wasn't made NULL as there + is a client connected to this stream + + Description : If there is no client currently connected to this stream then, + This function removes this TL_SNODE from the + TL_GLOBAL_DATA's list. The caller is responsible for the + cleanup of the TL_STREAM whose TL_SNODE may be removed + + Otherwise, this function does nothing +******************************************************************************/ +IMG_BOOL TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove); + +/****************************************************************************** + Function Name : TLUnrefDescAndTryFreeStreamNode + + Inputs : PTL_SNODE Pointer to the TL_SNODE whose descriptor is + requested to be removed + : PTL_STREAM_DESC Pointer to the STREAM_DESC + + Return Value : IMG_TRUE - If this TL_SNODE was removed from the + TL_GLOBAL_DATA's list + + IMG_FALSE - Otherwise + + Description : This function removes the stream descriptor from this TL_SNODE + and, if there is no writer (producer context) currently bound to this + stream, this function removes this TL_SNODE from the TL_GLOBAL_DATA's + list. The caller is responsible for the cleanup of the TL_STREAM + whose TL_SNODE may be removed +******************************************************************************/ +IMG_BOOL TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psRemove, PTL_STREAM_DESC psSD); + +/* + * Transport Layer stream interface to server part declared here to avoid + * circular dependency. + */ +IMG_UINT32 TLStreamAcquireReadPos(PTL_STREAM psStream, + IMG_BOOL bDisableCallback, + IMG_UINT32* puiReadOffset); +PVRSRV_ERROR TLStreamAdvanceReadPos(PTL_STREAM psStream, + IMG_UINT32 uiReadLen, + IMG_UINT32 uiOrigReadLen); +void TLStreamResetReadPos(PTL_STREAM psStream); + +DEVMEM_MEMDESC* TLStreamGetBufferPointer(PTL_STREAM psStream); +IMG_BOOL TLStreamOutOfData(IMG_HANDLE psStream); + +/****************************************************************************** + Function Name : TLStreamDestroy + + Inputs : PTL_STREAM Pointer to the TL_STREAM to be destroyed + + Description : This function performs all the clean-up operations required for + destruction of this stream +******************************************************************************/ +void TLStreamDestroy(PTL_STREAM psStream); + +/* + * Test related functions + */ +PVRSRV_ERROR TUtilsInit(PVRSRV_DEVICE_NODE *psDeviceNode); +PVRSRV_ERROR TUtilsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode); + + +#endif /* TLINTERN_H */ +/****************************************************************************** + End of file (tlintern.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/tlserver.c b/drivers/gpu/drm/phytium/octopus/tlserver.c new file mode 100644 index 000000000000..3521d909b982 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/tlserver.c @@ -0,0 +1,747 @@ +/*************************************************************************/ /*! +@File +@Title KM server Transport Layer implementation +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Main bridge APIs for Transport Layer client functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" + +/*#define PVR_DPF_FUNCTION_TRACE_ON 1*/ +#undef PVR_DPF_FUNCTION_TRACE_ON +#include "pvr_debug.h" + +#include "connection_server.h" +#include "allocmem.h" +#include "devicemem.h" + +#include "tlintern.h" +#include "tlstream.h" +#include "tlserver.h" + +#include "pvrsrv_tlstreams.h" +#define NO_STREAM_WAIT_PERIOD_US 2000000ULL +#define NO_DATA_WAIT_PERIOD_US 500000ULL +#define NO_ACQUIRE 0xffffffffU + + +/* + * Transport Layer Client API Kernel-Mode bridge implementation + */ +PVRSRV_ERROR +TLServerOpenStreamKM(const IMG_CHAR* pszName, + IMG_UINT32 ui32Mode, + PTL_STREAM_DESC* ppsSD, + PMR** ppsTLPMR) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_ERROR eErrorEO = PVRSRV_OK; + PTL_SNODE psNode; + PTL_STREAM psStream; + TL_STREAM_DESC *psNewSD = NULL; + IMG_HANDLE hEvent; + IMG_BOOL bIsWriteOnly = ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ? + IMG_TRUE : IMG_FALSE; + IMG_BOOL bResetOnOpen = ui32Mode & PVRSRV_STREAM_FLAG_RESET_ON_OPEN ? + IMG_TRUE : IMG_FALSE; + IMG_BOOL bNoOpenCB = ui32Mode & PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK ? + IMG_TRUE : IMG_FALSE; + PTL_GLOBAL_DATA psGD = TLGGD(); + +#if defined(PVR_DPF_FUNCTION_TRACE_ON) + PVR_DPF((PVR_DBG_CALLTRACE, "--> %s:%d entered (%s, %x)", __func__, __LINE__, pszName, ui32Mode)); +#endif + + PVR_ASSERT(pszName); + + /* Acquire TL_GLOBAL_DATA lock here, as if the following TLFindStreamNodeByName + * returns NON NULL PTL_SNODE, we try updating the global data client count and + * PTL_SNODE's psRDesc and we want to make sure the TL_SNODE is valid (eg. has + * not been deleted) while we are updating it + */ + OSLockAcquire (psGD->hTLGDLock); + + psNode = TLFindStreamNodeByName(pszName); + if ((psNode == NULL) && (ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT)) + { /* Blocking code to wait for stream to be created if it does not exist */ + eError = OSEventObjectOpen(psGD->hTLEventObj, &hEvent); + PVR_LOG_GOTO_IF_ERROR (eError, "OSEventObjectOpen", e0); + + do + { + if ((psNode = TLFindStreamNodeByName(pszName)) == NULL) + { + PVR_DPF((PVR_DBG_MESSAGE, "Stream %s does not exist, waiting...", pszName)); + + /* Release TL_GLOBAL_DATA lock before sleeping */ + OSLockRelease (psGD->hTLGDLock); + + /* Will exit OK or with timeout, both cases safe to ignore */ + eErrorEO = OSEventObjectWaitTimeout(hEvent, NO_STREAM_WAIT_PERIOD_US); + + /* Acquire lock after waking up */ + OSLockAcquire (psGD->hTLGDLock); + } + } + while ((psNode == NULL) && (eErrorEO == PVRSRV_OK)); + + eError = OSEventObjectClose(hEvent); + PVR_LOG_GOTO_IF_ERROR (eError, "OSEventObjectClose", e0); + } + + /* Make sure we have found a stream node after wait/search */ + if (psNode == NULL) + { + /* Did we exit the wait with timeout, inform caller */ + if (eErrorEO == PVRSRV_ERROR_TIMEOUT) + { + eError = eErrorEO; + } + else + { + eError = PVRSRV_ERROR_NOT_FOUND; + PVR_DPF((PVR_DBG_ERROR, "Stream \"%s\" does not exist", pszName)); + } + goto e0; + } + + psStream = psNode->psStream; + + /* Allocate memory for the stream. The memory will be allocated with the + * first call. */ + eError = TLAllocSharedMemIfNull(psStream); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Failed to allocate memory for stream" + " \"%s\"", pszName)); + goto e0; + } + + if (bIsWriteOnly) + { + + /* If psWDesc == NULL it means that this is the first attempt + * to open stream for write. If yes create the descriptor or increment + * reference count otherwise. */ + if (psNode->psWDesc == NULL) + { + psNewSD = TLMakeStreamDesc(psNode, ui32Mode, NULL); + psNode->psWDesc = psNewSD; + } + else + { + psNewSD = psNode->psWDesc; + psNode->psWDesc->uiRefCount++; + } + + PVR_LOG_GOTO_IF_NOMEM(psNewSD, eError, e0); + + psNode->uiWRefCount++; + } + else + { + /* Only one reader per stream supported */ + if (psNode->psRDesc != NULL) + { + PVR_DPF((PVR_DBG_ERROR, "Cannot open \"%s\" stream, stream already" + " opened", pszName)); + eError = PVRSRV_ERROR_ALREADY_OPEN; + goto e0; + } + + /* Create an event handle for this client to wait on when no data in + * stream buffer. */ + eError = OSEventObjectOpen(psNode->hReadEventObj, &hEvent); + if (eError != PVRSRV_OK) + { + PVR_LOG_ERROR(eError, "OSEventObjectOpen"); + eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT; + goto e0; + } + + psNewSD = TLMakeStreamDesc(psNode, ui32Mode, hEvent); + psNode->psRDesc = psNewSD; + + if (!psNewSD) + { + PVR_DPF((PVR_DBG_ERROR, "Not possible to make a new stream descriptor")); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e1; + } + + PVR_DPF((PVR_DBG_VERBOSE, + "TLServerOpenStreamKM evList=%p, evObj=%p", + psNode->hReadEventObj, + psNode->psRDesc->hReadEvent)); + } + + /* Copy the import handle back to the user mode API to enable access to + * the stream buffer from user-mode process. */ + eError = DevmemLocalGetImportHandle(TLStreamGetBufferPointer(psStream), + (void**) ppsTLPMR); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemLocalGetImportHandle", e2); + + psGD->uiClientCnt++; + + /* Global data updated. Now release global lock */ + OSLockRelease (psGD->hTLGDLock); + + *ppsSD = psNewSD; + + if (bResetOnOpen) + { + TLStreamReset(psStream); + } + + /* This callback is executed only on reader open. There are some actions + * executed on reader open that don't make much sense for writers e.g. + * injection on time synchronisation packet into the stream. */ + if (!bIsWriteOnly && psStream->pfOnReaderOpenCallback != NULL && !bNoOpenCB) + { + psStream->pfOnReaderOpenCallback(psStream->pvOnReaderOpenUserData); + } + + /* psNode->uiWRefCount is set to '1' on stream create so the first open + * is '2'. */ + if (bIsWriteOnly && psStream->psNotifStream != NULL && + psNode->uiWRefCount == 2) + { + TLStreamMarkStreamOpen(psStream); + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Stream %s opened for %s", __func__, pszName, + ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ? "write" : "read")); + + PVR_DPF_RETURN_OK; + +e2: + OSFreeMem(psNewSD); +e1: + if (!bIsWriteOnly) + OSEventObjectClose(hEvent); +e0: + OSLockRelease (psGD->hTLGDLock); + PVR_DPF_RETURN_RC (eError); +} + +PVRSRV_ERROR +TLServerCloseStreamKM(PTL_STREAM_DESC psSD) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PTL_GLOBAL_DATA psGD = TLGGD(); + PTL_SNODE psNode; + PTL_STREAM psStream; + IMG_BOOL bDestroyStream; + IMG_BOOL bIsWriteOnly = psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO ? + IMG_TRUE : IMG_FALSE; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psSD); + + /* Quick exit if there are no streams */ + if (psGD->psHead == NULL) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); + } + + /* Check stream still valid */ + psNode = TLFindStreamNodeByDesc(psSD); + if ((psNode == NULL) || (psNode != psSD->psNode)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); + } + + /* Since the descriptor is valid, the stream should not have been made NULL */ + PVR_ASSERT (psNode->psStream); + + /* Save the stream's reference in-case its destruction is required after this + * client is removed */ + psStream = psNode->psStream; + + /* Acquire TL_GLOBAL_DATA lock as the following TLRemoveDescAndTryFreeStreamNode + * call will update the TL_SNODE's descriptor value */ + OSLockAcquire (psGD->hTLGDLock); + + /* Close event handle because event object list might be destroyed in + * TLUnrefDescAndTryFreeStreamNode(). */ + if (!bIsWriteOnly) + { + /* Reset the read position on close if the stream requires it. */ + TLStreamResetReadPos(psStream); + + /* Close and free the event handle resource used by this descriptor */ + eError = OSEventObjectClose(psSD->hReadEvent); + if (eError != PVRSRV_OK) + { + /* Log error but continue as it seems best */ + PVR_LOG_ERROR(eError, "OSEventObjectClose"); + eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; + } + } + else if (psNode->uiWRefCount == 2 && psStream->psNotifStream != NULL) + { + /* psNode->uiWRefCount is set to '1' on stream create so the last close + * before destruction is '2'. */ + TLStreamMarkStreamClose(psStream); + } + + /* Remove descriptor from stream object/list */ + bDestroyStream = TLUnrefDescAndTryFreeStreamNode (psNode, psSD); + + /* Check the counter is sensible after input data validated. */ + PVR_ASSERT(psGD->uiClientCnt > 0); + psGD->uiClientCnt--; + + OSLockRelease (psGD->hTLGDLock); + + /* Destroy the stream if its TL_SNODE was removed from TL_GLOBAL_DATA */ + if (bDestroyStream) + { + TLStreamDestroy (psStream); + psStream = NULL; + } + + PVR_DPF((PVR_DBG_VERBOSE, "%s: Stream closed", __func__)); + + /* Free the descriptor if ref count reaches 0. */ + if (psSD->uiRefCount == 0) + { + /* Free the stream descriptor object */ + OSFreeMem(psSD); + } + + PVR_DPF_RETURN_RC(eError); +} + +PVRSRV_ERROR +TLServerReserveStreamKM(PTL_STREAM_DESC psSD, + IMG_UINT32* ui32BufferOffset, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32SizeMin, + IMG_UINT32* pui32Available) +{ + TL_GLOBAL_DATA* psGD = TLGGD(); + PTL_SNODE psNode; + IMG_UINT8* pui8Buffer = NULL; + PVRSRV_ERROR eError; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psSD); + + if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Quick exit if there are no streams */ + if (psGD->psHead == NULL) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); + } + + /* Acquire the global lock. We have to be sure that no one modifies + * the list while we are looking for our stream. */ + OSLockAcquire(psGD->hTLGDLock); + /* Check stream still valid */ + psNode = TLFindAndGetStreamNodeByDesc(psSD); + OSLockRelease(psGD->hTLGDLock); + + if ((psNode == NULL) || (psNode != psSD->psNode)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); + } + + + /* Since we have a valid stream descriptor, the stream should not have been + * made NULL by any producer context. */ + PVR_ASSERT (psNode->psStream); + + /* The TL writers that currently land here are at a very low to none risk + * to breach max TL packet size constraint (even if there is no reader + * connected to the TL stream and hence eventually will cause the TL stream + * to be full). Hence no need to know the status of TL stream reader + * connection. + */ + eError = TLStreamReserve2(psNode->psStream, &pui8Buffer, ui32Size, + ui32SizeMin, pui32Available, NULL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, "Failed to reserve %u (%u, %u) bytes in the stream, error %s.", + ui32Size, ui32SizeMin, *pui32Available, PVRSRVGETERRORSTRING(eError))); + } + else if (pui8Buffer == NULL) + { + PVR_DPF((PVR_DBG_WARNING, "Not enough space in the stream.")); + eError = PVRSRV_ERROR_STREAM_FULL; + } + else + { + *ui32BufferOffset = pui8Buffer - psNode->psStream->pbyBuffer; + PVR_ASSERT(*ui32BufferOffset < psNode->psStream->ui32Size); + } + + OSLockAcquire(psGD->hTLGDLock); + TLReturnStreamNode(psNode); + OSLockRelease(psGD->hTLGDLock); + + PVR_DPF_RETURN_RC(eError); +} + +PVRSRV_ERROR +TLServerCommitStreamKM(PTL_STREAM_DESC psSD, + IMG_UINT32 ui32Size) +{ + TL_GLOBAL_DATA* psGD = TLGGD(); + PTL_SNODE psNode; + PVRSRV_ERROR eError; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psSD); + + if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Quick exit if there are no streams */ + if (psGD->psHead == NULL) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); + } + + /* Acquire the global lock. We have to be sure that no one modifies + * the list while we are looking for our stream. */ + OSLockAcquire(psGD->hTLGDLock); + /* Check stream still valid */ + psNode = TLFindAndGetStreamNodeByDesc(psSD); + OSLockRelease(psGD->hTLGDLock); + + if ((psNode == NULL) || (psNode != psSD->psNode)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); + } + + /* Since we have a valid stream descriptor, the stream should not have been + * made NULL by any producer context. */ + PVR_ASSERT (psNode->psStream); + + eError = TLStreamCommit(psNode->psStream, ui32Size); + PVR_LOG_IF_ERROR(eError, "TLStreamCommit"); + + OSLockAcquire(psGD->hTLGDLock); + TLReturnStreamNode(psNode); + OSLockRelease(psGD->hTLGDLock); + + PVR_DPF_RETURN_RC(eError); +} + +PVRSRV_ERROR +TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern, + IMG_UINT32 ui32Size, + IMG_CHAR *pszStreams, + IMG_UINT32 *pui32NumFound) +{ + PTL_SNODE psNode = NULL; + IMG_CHAR (*paszStreams)[PRVSRVTL_MAX_STREAM_NAME_SIZE] = + (IMG_CHAR (*)[PRVSRVTL_MAX_STREAM_NAME_SIZE]) (void *)pszStreams; + + if (*pszNamePattern == '\0') + return PVRSRV_ERROR_INVALID_PARAMS; + + if (ui32Size % PRVSRVTL_MAX_STREAM_NAME_SIZE != 0) + return PVRSRV_ERROR_INVALID_PARAMS; + + /* Quick exit if there are no streams */ + if (TLGGD()->psHead == NULL) + { + *pui32NumFound = 0; + return PVRSRV_OK; + } + + OSLockAcquire(TLGGD()->hTLGDLock); + + *pui32NumFound = TLDiscoverStreamNodes(pszNamePattern, paszStreams, + ui32Size / PRVSRVTL_MAX_STREAM_NAME_SIZE); + + /* Find "tlctrl" stream and reset it */ + psNode = TLFindStreamNodeByName(PVRSRV_TL_CTLR_STREAM); + if (psNode != NULL) + TLStreamReset(psNode->psStream); + + OSLockRelease(TLGGD()->hTLGDLock); + + return PVRSRV_OK; +} + +PVRSRV_ERROR +TLServerAcquireDataKM(PTL_STREAM_DESC psSD, + IMG_UINT32* puiReadOffset, + IMG_UINT32* puiReadLen) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + TL_GLOBAL_DATA* psGD = TLGGD(); + IMG_UINT32 uiTmpOffset; + IMG_UINT32 uiTmpLen = 0; + PTL_SNODE psNode; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psSD); + + TL_COUNTER_INC(psSD->ui32AcquireCount); + + /* Quick exit if there are no streams */ + if (psGD->psHead == NULL) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); + } + + /* Check stream still valid */ + psNode = TLFindStreamNodeByDesc(psSD); + if ((psNode == NULL) || (psNode != psSD->psNode)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); + } + + /* If we are here, the stream will never be made NULL until this context itself + * calls TLRemoveDescAndTryFreeStreamNode(). This is because the producer will + * fail to make the stream NULL (by calling TLTryRemoveStreamAndFreeStreamNode) + * when a valid stream descriptor is present (i.e. a client is connected). + * Hence, no checks for stream being NON NULL are required after this. */ + PVR_ASSERT (psNode->psStream); + + psSD->ui32ReadLen = 0; /* Handle NULL read returns */ + + do + { + uiTmpLen = TLStreamAcquireReadPos(psNode->psStream, psSD->ui32Flags & PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK, &uiTmpOffset); + + /* Check we have not already exceeded read limit with just offset + * regardless of data length to ensure the client sees the RC */ + if (psSD->ui32Flags & PVRSRV_STREAM_FLAG_READ_LIMIT) + { + /* Check to see if we are reading beyond the read limit */ + if (uiTmpOffset >= psSD->ui32ReadLimit) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_READLIMIT_REACHED); + } + } + + if (uiTmpLen > 0) + { /* Data found */ + + /* Check we have not already exceeded read limit offset+len */ + if (psSD->ui32Flags & PVRSRV_STREAM_FLAG_READ_LIMIT) + { + /* Adjust the read length if it goes beyond the read limit + * limit always guaranteed to be on packet */ + if ((uiTmpOffset + uiTmpLen) >= psSD->ui32ReadLimit) + { + uiTmpLen = psSD->ui32ReadLimit - uiTmpOffset; + } + } + + *puiReadOffset = uiTmpOffset; + *puiReadLen = uiTmpLen; + psSD->ui32ReadLen = uiTmpLen; /* Save the original data length in the stream desc */ + PVR_DPF_RETURN_OK; + } + else if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING)) + { /* No data found blocking */ + + /* Instead of doing a complete sleep for `NO_DATA_WAIT_PERIOD_US` us, we sleep in chunks + * of 168 ms. In a "deferred" signal scenario from writer, this gives us a chance to + * wake-up (timeout) early and continue reading in-case some data is available */ + IMG_UINT64 ui64WaitInChunksUs = MIN(NO_DATA_WAIT_PERIOD_US, 168000ULL); + IMG_BOOL bDataFound = IMG_FALSE; + + TL_COUNTER_INC(psSD->ui32NoDataSleep); + + LOOP_UNTIL_TIMEOUT(NO_DATA_WAIT_PERIOD_US) + { + eError = OSEventObjectWaitTimeout(psSD->hReadEvent, ui64WaitInChunksUs); + if (eError == PVRSRV_OK) + { + bDataFound = IMG_TRUE; + TL_COUNTER_INC(psSD->ui32Signalled); + break; + } + else if (eError == PVRSRV_ERROR_TIMEOUT) + { + if (TLStreamOutOfData(psNode->psStream)) + { + /* Return on timeout if stream empty, else let while exit and return data */ + continue; + } + else + { + bDataFound = IMG_TRUE; + TL_COUNTER_INC(psSD->ui32TimeoutData); + PVR_DPF((PVR_DBG_MESSAGE, "%s: Data found at timeout. Current BuffUt = %u", + __func__, TLStreamGetUT(psNode->psStream))); + break; + } + } + else + { /* Some other system error with event objects */ + PVR_DPF_RETURN_RC(eError); + } + } END_LOOP_UNTIL_TIMEOUT(); + + if (bDataFound) + { + continue; + } + else + { + TL_COUNTER_INC(psSD->ui32TimeoutEmpty); + return PVRSRV_ERROR_TIMEOUT; + } + } + else + { /* No data non-blocking */ + TL_COUNTER_INC(psSD->ui32NoData); + + /* When no-data in non-blocking mode, uiReadOffset should be set to NO_ACQUIRE + * signifying there's no need of Release call */ + *puiReadOffset = NO_ACQUIRE; + *puiReadLen = 0; + PVR_DPF_RETURN_OK; + } + } + while (1); +} + +PVRSRV_ERROR +TLServerReleaseDataKM(PTL_STREAM_DESC psSD, + IMG_UINT32 uiReadOffset, + IMG_UINT32 uiReadLen) +{ + TL_GLOBAL_DATA* psGD = TLGGD(); + PTL_SNODE psNode; + + PVR_DPF_ENTERED; + + /* Unreferenced in release builds */ + PVR_UNREFERENCED_PARAMETER(uiReadOffset); + + PVR_ASSERT(psSD); + + /* Quick exit if there are no streams */ + if (psGD->psHead == NULL) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); + } + + if ((uiReadLen % PVRSRVTL_PACKET_ALIGNMENT != 0)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Check stream still valid */ + psNode = TLFindStreamNodeByDesc(psSD); + if ((psNode == NULL) || (psNode != psSD->psNode)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); + } + + /* Since we have a valid stream descriptor, the stream should not have been + * made NULL by any producer context. */ + PVR_ASSERT (psNode->psStream); + + PVR_DPF((PVR_DBG_VERBOSE, "TLReleaseDataKM uiReadOffset=%d, uiReadLen=%d", uiReadOffset, uiReadLen)); + + /* Move read position on to free up space in stream buffer */ + PVR_DPF_RETURN_RC(TLStreamAdvanceReadPos(psNode->psStream, uiReadLen, psSD->ui32ReadLen)); +} + +PVRSRV_ERROR +TLServerWriteDataKM(PTL_STREAM_DESC psSD, + IMG_UINT32 ui32Size, + IMG_BYTE* pui8Data) +{ + TL_GLOBAL_DATA* psGD = TLGGD(); + PTL_SNODE psNode; + PVRSRV_ERROR eError; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psSD); + + if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Quick exit if there are no streams */ + if (psGD->psHead == NULL) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); + } + + OSLockAcquire(psGD->hTLGDLock); + /* Check stream still valid */ + psNode = TLFindAndGetStreamNodeByDesc(psSD); + OSLockRelease(psGD->hTLGDLock); + + if ((psNode == NULL) || (psNode != psSD->psNode)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); + } + + /* Since we have a valid stream descriptor, the stream should not have been + * made NULL by any producer context. */ + PVR_ASSERT (psNode->psStream); + + eError = TLStreamWrite(psNode->psStream, pui8Data, ui32Size); + PVR_LOG_IF_ERROR(eError, "TLStreamWrite"); + + OSLockAcquire(psGD->hTLGDLock); + TLReturnStreamNode(psNode); + OSLockRelease(psGD->hTLGDLock); + + PVR_DPF_RETURN_RC(eError); +} + +/****************************************************************************** + End of file (tlserver.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/tlserver.h b/drivers/gpu/drm/phytium/octopus/tlserver.h new file mode 100644 index 000000000000..b15a6d1a456d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/tlserver.h @@ -0,0 +1,97 @@ +/*************************************************************************/ /*! +@File +@Title KM server Transport Layer implementation +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Main bridge APIs for Transport Layer client functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef TLSERVER_H +#define TLSERVER_H + +#include "img_defs.h" +#include "pvr_debug.h" +#include "connection_server.h" + +#include "tlintern.h" + +/* + * Transport Layer Client API Kernel-Mode bridge implementation + */ + +PVRSRV_ERROR TLServerConnectKM(CONNECTION_DATA *psConnection); +PVRSRV_ERROR TLServerDisconnectKM(CONNECTION_DATA *psConnection); + +PVRSRV_ERROR TLServerOpenStreamKM(const IMG_CHAR* pszName, + IMG_UINT32 ui32Mode, + PTL_STREAM_DESC* ppsSD, + PMR** ppsTLPMR); + +PVRSRV_ERROR TLServerCloseStreamKM(PTL_STREAM_DESC psSD); + +PVRSRV_ERROR TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern, + IMG_UINT32 ui32Max, + IMG_CHAR *pszStreams, + IMG_UINT32 *pui32NumFound); + +PVRSRV_ERROR TLServerReserveStreamKM(PTL_STREAM_DESC psSD, + IMG_UINT32* ui32BufferOffset, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32SizeMin, + IMG_UINT32* pui32Available); + +PVRSRV_ERROR TLServerCommitStreamKM(PTL_STREAM_DESC psSD, + IMG_UINT32 ui32Size); + +PVRSRV_ERROR TLServerAcquireDataKM(PTL_STREAM_DESC psSD, + IMG_UINT32* puiReadOffset, + IMG_UINT32* puiReadLen); + +PVRSRV_ERROR TLServerReleaseDataKM(PTL_STREAM_DESC psSD, + IMG_UINT32 uiReadOffset, + IMG_UINT32 uiReadLen); + +PVRSRV_ERROR TLServerWriteDataKM(PTL_STREAM_DESC psSD, + IMG_UINT32 ui32Size, + IMG_BYTE *pui8Data); + +#endif /* TLSERVER_H */ + +/****************************************************************************** + End of file (tlserver.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/tlstream.c b/drivers/gpu/drm/phytium/octopus/tlstream.c new file mode 100644 index 000000000000..9908170aad00 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/tlstream.c @@ -0,0 +1,1622 @@ +/*************************************************************************/ /*! +@File +@Title Transport Layer kernel side API implementation. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Transport Layer API implementation. + These functions are provided to driver components. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +//#define PVR_DPF_FUNCTION_TRACE_ON 1 +#undef PVR_DPF_FUNCTION_TRACE_ON +#include "pvr_debug.h" + +#include "allocmem.h" +#include "devicemem.h" +#include "pvrsrv_error.h" +#include "osfunc.h" +#include "log2.h" + +#include "tlintern.h" +#include "tlstream.h" + +#include "pvrsrv.h" + +#define EVENT_OBJECT_TIMEOUT_US 1000000ULL +#define READ_PENDING_TIMEOUT_US 100000ULL + +/*! Compute maximum TL packet size for this stream. Max packet size will be + * minimum of PVRSRVTL_MAX_PACKET_SIZE and (BufferSize / 2.5). This computation + * is required to avoid a corner case that was observed when TL buffer size is + * smaller than twice of TL max packet size and read, write index are positioned + * in such a way that the TL packet (write packet + padding packet) size is may + * be bigger than the buffer size itself. + */ +#define GET_TL_MAX_PACKET_SIZE( bufSize ) PVRSRVTL_ALIGN( MIN( PVRSRVTL_MAX_PACKET_SIZE, ( 2 * bufSize ) / 5 ) ) + +/* Given the state of the buffer it returns a number of bytes that the client + * can use for a successful allocation. */ +static INLINE IMG_UINT32 suggestAllocSize(IMG_UINT32 ui32LRead, + IMG_UINT32 ui32LWrite, + IMG_UINT32 ui32CBSize, + IMG_UINT32 ui32ReqSizeMin, + IMG_UINT32 ui32MaxPacketSize) +{ + IMG_UINT32 ui32AvSpace = 0; + + /* This could be written in fewer lines using the ? operator but it + would not be kind to potential readers of this source at all. */ + if (ui32LRead > ui32LWrite) /* Buffer WRAPPED */ + { + if ((ui32LRead - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE)) + { + ui32AvSpace = ui32LRead - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE; + } + } + else /* Normal, no wrap */ + { + if ((ui32CBSize - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE)) + { + ui32AvSpace = ui32CBSize - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE; + } + else if ((ui32LRead - 0) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE)) + { + ui32AvSpace = ui32LRead - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE; + } + } + /* The max size of a TL packet currently is UINT16. adjust accordingly */ + return MIN(ui32AvSpace, ui32MaxPacketSize); +} + +/* Returns bytes left in the buffer. Negative if there is not any. + * two 8b aligned values are reserved, one for the write failed buffer flag + * and one to be able to distinguish the buffer full state to the buffer + * empty state. + * Always returns free space -8 even when the "write failed" packet may be + * already in the stream before this write. */ +static INLINE IMG_INT +circbufSpaceLeft(IMG_UINT32 ui32Read, IMG_UINT32 ui32Write, IMG_UINT32 ui32size) +{ + /* We need to reserve 8b (one packet) in the buffer to be able to tell empty + * buffers from full buffers and one more for packet write fail packet */ + if (ui32Read > ui32Write) + { + return (IMG_INT)ui32Read - (IMG_INT)ui32Write - (IMG_INT)BUFFER_RESERVED_SPACE; + } + else + { + return (IMG_INT)ui32size - ((IMG_INT)ui32Write - (IMG_INT)ui32Read) - (IMG_INT)BUFFER_RESERVED_SPACE; + } +} + +IMG_UINT32 TLStreamGetUT(IMG_HANDLE hStream) +{ + PTL_STREAM psStream = (PTL_STREAM) hStream; + IMG_UINT32 ui32LRead = psStream->ui32Read, ui32LWrite = psStream->ui32Write; + + if (ui32LWrite >= ui32LRead) + { + return (ui32LWrite-ui32LRead); + } + else + { + return (psStream->ui32Size-ui32LRead+ui32LWrite); + } +} + +PVRSRV_ERROR TLAllocSharedMemIfNull(IMG_HANDLE hStream) +{ + PTL_STREAM psStream = (PTL_STREAM) hStream; + PVRSRV_ERROR eError; + + /* CPU Local memory used as these buffers are not accessed by the device. + * CPU Uncached write combine memory used to improve write performance, + * memory barrier added in TLStreamCommit to ensure data written to memory + * before CB write point is updated before consumption by the reader. + */ + IMG_CHAR pszBufferLabel[PRVSRVTL_MAX_STREAM_NAME_SIZE + 20]; + PVRSRV_MEMALLOCFLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | + PVRSRV_MEMALLOCFLAG_GPU_READABLE | + PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | + PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | + PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL); /* TL for now is only used by host driver, so cpulocal mem suffices */ + + /* Exit if memory has already been allocated. */ + if (psStream->pbyBuffer != NULL) + return PVRSRV_OK; + + OSSNPrintf(pszBufferLabel, sizeof(pszBufferLabel), "TLStreamBuf-%s", + psStream->szName); + + + /* Use HostMemDeviceNode instead of psStream->psDevNode to benefit from faster + * accesses to CPU local memory. When the framework to access CPU_LOCAL device + * memory from GPU is fixed, we'll switch back to use psStream->psDevNode for + * TL buffers */ + eError = DevmemAllocateExportable((IMG_HANDLE)PVRSRVGetPVRSRVData()->psHostMemDeviceNode, + (IMG_DEVMEM_SIZE_T) psStream->ui32Size, + (IMG_DEVMEM_ALIGN_T) OSGetPageSize(), + ExactLog2(OSGetPageSize()), + uiMemFlags, + pszBufferLabel, + &psStream->psStreamMemDesc); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAllocateExportable", e0); + + eError = DevmemAcquireCpuVirtAddr(psStream->psStreamMemDesc, + (void**) &psStream->pbyBuffer); + PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e1); + + return PVRSRV_OK; + +e1: + DevmemFree(psStream->psStreamMemDesc); +e0: + return eError; +} + +void TLFreeSharedMem(IMG_HANDLE hStream) +{ + PTL_STREAM psStream = (PTL_STREAM) hStream; + + if (psStream->pbyBuffer != NULL) + { + DevmemReleaseCpuVirtAddr(psStream->psStreamMemDesc); + psStream->pbyBuffer = NULL; + } + if (psStream->psStreamMemDesc != NULL) + { + DevmemFree(psStream->psStreamMemDesc); + psStream->psStreamMemDesc = NULL; + } +} + +/* Special space left routine for TL_FLAG_PERMANENT_NO_WRAP streams */ +static INLINE IMG_UINT +bufSpaceLeft(IMG_UINT32 ui32Read, IMG_UINT32 ui32Write, IMG_UINT32 ui32size) +{ + /* buffers from full buffers and one more for packet write fail packet */ + PVR_ASSERT(ui32Read<=ui32Write); + return ui32size - ui32Write; +} + +/******************************************************************************* + * TL Server public API implementation. + ******************************************************************************/ +PVRSRV_ERROR +TLStreamCreate(IMG_HANDLE *phStream, + const IMG_CHAR *szStreamName, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32StreamFlags, + TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB, + void *pvOnReaderOpenUD, + TL_STREAM_SOURCECB pfProducerCB, + void *pvProducerUD) +{ + PTL_STREAM psTmp; + PVRSRV_ERROR eError; + IMG_HANDLE hEventList; + PTL_SNODE psn; + TL_OPMODE eOpMode; + + PVR_DPF_ENTERED; + /* Parameter checks: non NULL handler required */ + if (NULL == phStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + if (szStreamName == NULL || *szStreamName == '\0' || + OSStringLength(szStreamName) >= PRVSRVTL_MAX_STREAM_NAME_SIZE) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + eOpMode = ui32StreamFlags & TL_OPMODE_MASK; + if (( eOpMode <= TL_OPMODE_UNDEF ) || ( eOpMode >= TL_OPMODE_LAST )) + { + PVR_DPF((PVR_DBG_ERROR, "OpMode for TL stream is invalid")); + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Acquire TL_GLOBAL_DATA lock here because, if the following TLFindStreamNodeByName() + * returns NULL, a new TL_SNODE will be added to TL_GLOBAL_DATA's TL_SNODE list */ + OSLockAcquire (TLGGD()->hTLGDLock); + + /* Check if there already exists a stream with this name. */ + psn = TLFindStreamNodeByName( szStreamName ); + if (NULL != psn) + { + eError = PVRSRV_ERROR_ALREADY_EXISTS; + goto e0; + } + + /* Allocate stream structure container (stream struct) for the new stream */ + psTmp = OSAllocZMem(sizeof(TL_STREAM)); + if (NULL == psTmp) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto e0; + } + + OSStringLCopy(psTmp->szName, szStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE); + + if (ui32StreamFlags & TL_FLAG_FORCE_FLUSH) + { + psTmp->bWaitForEmptyOnDestroy = IMG_TRUE; + } + + psTmp->bNoSignalOnCommit = (ui32StreamFlags&TL_FLAG_NO_SIGNAL_ON_COMMIT) ? IMG_TRUE : IMG_FALSE; + psTmp->bNoWrapPermanent = (ui32StreamFlags&TL_FLAG_PERMANENT_NO_WRAP) ? IMG_TRUE : IMG_FALSE; + + psTmp->eOpMode = eOpMode; + if (psTmp->eOpMode == TL_OPMODE_BLOCK) + { + /* Only allow drop properties to be mixed with no-wrap type streams + * since space does not become available when reads take place hence + * no point blocking. + */ + if (psTmp->bNoWrapPermanent) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e1; + } + } + + /* Additional synchronisation object required for some streams e.g. blocking */ + eError = OSEventObjectCreate(NULL, &psTmp->hProducerEventObj); + PVR_GOTO_IF_ERROR(eError, e1); + /* Create an event handle for this kind of stream */ + eError = OSEventObjectOpen(psTmp->hProducerEventObj, &psTmp->hProducerEvent); + PVR_GOTO_IF_ERROR(eError, e2); + + psTmp->pfOnReaderOpenCallback = pfOnReaderOpenCB; + psTmp->pvOnReaderOpenUserData = pvOnReaderOpenUD; + /* Remember producer supplied CB and data for later */ + psTmp->pfProducerCallback = (void(*)(void))pfProducerCB; + psTmp->pvProducerUserData = pvProducerUD; + + psTmp->psNotifStream = NULL; + + /* Round the requested bytes to a multiple of array elements' size, eg round 3 to 4 */ + psTmp->ui32Size = PVRSRVTL_ALIGN(ui32Size); + + /* Signalling from TLStreamCommit is deferred until buffer is slightly (~12%) filled */ + psTmp->ui32ThresholdUsageForSignal = psTmp->ui32Size >> 3; + psTmp->ui32MaxPacketSize = GET_TL_MAX_PACKET_SIZE(psTmp->ui32Size); + psTmp->ui32Read = 0; + psTmp->ui32Write = 0; + psTmp->ui32Pending = NOTHING_PENDING; + psTmp->bReadPending = IMG_FALSE; + psTmp->bSignalPending = IMG_FALSE; + +#if defined(TL_BUFFER_STATS) + OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 0); + /* Setting MAX possible value for "minimum" time to full, + * helps in the logic which calculates this time */ + psTmp->ui32MinTimeToFullInUs = IMG_UINT32_MAX; +#endif + + /* Memory will be allocated on first connect to the stream */ + if (!(ui32StreamFlags & TL_FLAG_ALLOCATE_ON_FIRST_OPEN)) + { + /* Allocate memory for the circular buffer and export it to user space. */ + eError = TLAllocSharedMemIfNull(psTmp); + PVR_LOG_GOTO_IF_ERROR(eError, "TLAllocSharedMem", e3); + } + + /* Synchronisation object to synchronise with user side data transfers. */ + eError = OSEventObjectCreate(psTmp->szName, &hEventList); + PVR_GOTO_IF_ERROR(eError, e4); + + eError = OSLockCreate (&psTmp->hStreamWLock); + PVR_GOTO_IF_ERROR(eError, e5); + + eError = OSLockCreate (&psTmp->hReadLock); + PVR_GOTO_IF_ERROR(eError, e6); + + /* Now remember the stream in the global TL structures */ + psn = TLMakeSNode(hEventList, (TL_STREAM *)psTmp, NULL); + PVR_GOTO_IF_NOMEM(psn, eError, e7); + + /* Stream node created, now reset the write reference count to 1 + * (i.e. this context's reference) */ + psn->uiWRefCount = 1; + + TLAddStreamNode(psn); + + /* Release TL_GLOBAL_DATA lock as the new TL_SNODE is now added to the list */ + OSLockRelease (TLGGD()->hTLGDLock); + + /* Best effort signal, client wait timeout will ultimately let it find the + * new stream if this fails, acceptable to avoid clean-up as it is tricky + * at this point */ + (void) OSEventObjectSignal(TLGGD()->hTLEventObj); + + /* Pass the newly created stream handle back to caller */ + *phStream = (IMG_HANDLE)psTmp; + PVR_DPF_RETURN_OK; + +e7: + OSLockDestroy(psTmp->hReadLock); +e6: + OSLockDestroy(psTmp->hStreamWLock); +e5: + OSEventObjectDestroy(hEventList); +e4: + TLFreeSharedMem(psTmp); +e3: + OSEventObjectClose(psTmp->hProducerEvent); +e2: + OSEventObjectDestroy(psTmp->hProducerEventObj); +e1: + OSFreeMem(psTmp); +e0: + OSLockRelease (TLGGD()->hTLGDLock); + + PVR_DPF_RETURN_RC(eError); +} + +void TLStreamReset(IMG_HANDLE hStream) +{ + PTL_STREAM psStream = (PTL_STREAM) hStream; + + PVR_ASSERT(psStream != NULL); + + OSLockAcquire(psStream->hStreamWLock); + + while (psStream->ui32Pending != NOTHING_PENDING) + { + PVRSRV_ERROR eError; + + /* We're in the middle of a write so we cannot reset the stream. + * We are going to wait until the data is committed. Release lock while + * we're here. */ + OSLockRelease(psStream->hStreamWLock); + + /* Event when psStream->bNoSignalOnCommit is set we can still use + * the timeout capability of event object API (time in us). */ + eError = OSEventObjectWaitTimeout(psStream->psNode->hReadEventObj, 100); + if (eError != PVRSRV_ERROR_TIMEOUT && eError != PVRSRV_OK) + { + PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectWaitTimeout"); + } + + OSLockAcquire(psStream->hStreamWLock); + + /* Either timeout occurred or the stream has been signalled. + * If former we have to check if the data was committed and if latter + * if the stream hasn't been re-reserved. Either way we have to go + * back to the condition. + * If the stream has been released we'll exit with the lock held so + * we can finally go and reset the stream. */ + } + + psStream->ui32Read = 0; + psStream->ui32Write = 0; + /* we know that ui32Pending already has correct value (no need to set) */ + + OSLockRelease(psStream->hStreamWLock); +} + +PVRSRV_ERROR +TLStreamSetNotifStream(IMG_HANDLE hStream, IMG_HANDLE hNotifStream) +{ + PTL_STREAM psStream = (PTL_STREAM) hStream; + + if (hStream == NULL || hNotifStream == NULL) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + psStream->psNotifStream = (PTL_STREAM) hNotifStream; + + return PVRSRV_OK; +} + +PVRSRV_ERROR +TLStreamReconfigure( + IMG_HANDLE hStream, + IMG_UINT32 ui32StreamFlags) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PTL_STREAM psTmp; + TL_OPMODE eOpMode; + + PVR_DPF_ENTERED; + + if (NULL == hStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + eOpMode = ui32StreamFlags & TL_OPMODE_MASK; + if (( eOpMode <= TL_OPMODE_UNDEF ) || ( eOpMode >= TL_OPMODE_LAST )) + { + PVR_DPF((PVR_DBG_ERROR, "OpMode for TL stream is invalid")); + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + psTmp = (PTL_STREAM)hStream; + + /* Prevent the TL Stream buffer from being written to + * while its mode is being reconfigured + */ + OSLockAcquire (psTmp->hStreamWLock); + if (NOTHING_PENDING != psTmp->ui32Pending) + { + OSLockRelease (psTmp->hStreamWLock); + PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY); + } + psTmp->ui32Pending = 0; + OSLockRelease (psTmp->hStreamWLock); + + psTmp->eOpMode = eOpMode; + if (psTmp->eOpMode == TL_OPMODE_BLOCK) + { + /* Only allow drop properties to be mixed with no-wrap type streams + * since space does not become available when reads take place hence + * no point blocking. + */ + if (psTmp->bNoWrapPermanent) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e1; + } + } + + OSLockAcquire (psTmp->hStreamWLock); + psTmp->ui32Pending = NOTHING_PENDING; + OSLockRelease (psTmp->hStreamWLock); +e1: + PVR_DPF_RETURN_RC(eError); +} + +PVRSRV_ERROR +TLStreamOpen(IMG_HANDLE *phStream, + const IMG_CHAR *szStreamName) +{ + PTL_SNODE psTmpSNode; + + PVR_DPF_ENTERED; + + if (NULL == phStream || NULL == szStreamName) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + /* Acquire the TL_GLOBAL_DATA lock first to ensure, + * the TL_STREAM while returned and being modified, + * is not deleted by some other context */ + OSLockAcquire (TLGGD()->hTLGDLock); + + /* Search for a stream node with a matching stream name */ + psTmpSNode = TLFindStreamNodeByName(szStreamName); + + if (NULL == psTmpSNode) + { + OSLockRelease (TLGGD()->hTLGDLock); + PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_FOUND); + } + + if (psTmpSNode->psStream->psNotifStream != NULL && + psTmpSNode->uiWRefCount == 1) + { + TLStreamMarkStreamOpen(psTmpSNode->psStream); + } + + /* The TL_SNODE->uiWRefCount governs the presence of this node in the + * TL_GLOBAL_DATA list i.e. when uiWRefCount falls to zero we try removing + * this node from the TL_GLOBAL_DATA list. Hence, is protected using the + * TL_GLOBAL_DATA lock and not TL_STREAM lock */ + psTmpSNode->uiWRefCount++; + + OSLockRelease (TLGGD()->hTLGDLock); + + /* Return the stream handle to the caller */ + *phStream = (IMG_HANDLE)psTmpSNode->psStream; + + PVR_DPF_RETURN_VAL(PVRSRV_OK); +} + +void +TLStreamClose(IMG_HANDLE hStream) +{ + PTL_STREAM psTmp; + IMG_BOOL bDestroyStream; + + PVR_DPF_ENTERED; + + if (NULL == hStream) + { + PVR_DPF((PVR_DBG_WARNING, + "TLStreamClose failed as NULL stream handler passed, nothing done.")); + PVR_DPF_RETURN; + } + + psTmp = (PTL_STREAM)hStream; + + /* Acquire TL_GLOBAL_DATA lock for updating the reference count as this will be required + * in-case this TL_STREAM node is to be deleted */ + OSLockAcquire (TLGGD()->hTLGDLock); + + /* Decrement write reference counter of the stream */ + psTmp->psNode->uiWRefCount--; + + if (0 != psTmp->psNode->uiWRefCount) + { + /* The stream is still being used in other context(s) do not destroy + * anything */ + + /* uiWRefCount == 1 means that stream was closed for write. Next + * close is pairing TLStreamCreate(). Send notification to indicate + * that no writer are connected to the stream any more. */ + if (psTmp->psNotifStream != NULL && psTmp->psNode->uiWRefCount == 1) + { + TLStreamMarkStreamClose(psTmp); + } + + OSLockRelease (TLGGD()->hTLGDLock); + PVR_DPF_RETURN; + } + else + { + /* Now we try removing this TL_STREAM from TL_GLOBAL_DATA */ + + if (psTmp->bWaitForEmptyOnDestroy) + { + /* We won't require the TL_STREAM lock to be acquired here for accessing its read + * and write offsets. REASON: We are here because there is no producer context + * referencing this TL_STREAM, hence its ui32Write offset won't be changed now. + * Also, the update of ui32Read offset is not protected by locks */ + while (psTmp->ui32Read != psTmp->ui32Write) + { + /* Release lock before sleeping */ + OSLockRelease (TLGGD()->hTLGDLock); + + OSEventObjectWaitTimeout(psTmp->hProducerEvent, EVENT_OBJECT_TIMEOUT_US); + + OSLockAcquire (TLGGD()->hTLGDLock); + + /* Ensure destruction of stream is still required */ + if (0 != psTmp->psNode->uiWRefCount) + { + OSLockRelease (TLGGD()->hTLGDLock); + PVR_DPF_RETURN; + } + } + } + + /* Try removing the stream from TL_GLOBAL_DATA */ + bDestroyStream = TLTryRemoveStreamAndFreeStreamNode (psTmp->psNode); + + OSLockRelease (TLGGD()->hTLGDLock); + + if (bDestroyStream) + { + /* Destroy the stream if it was removed from TL_GLOBAL_DATA */ + TLStreamDestroy (psTmp); + psTmp = NULL; + } + PVR_DPF_RETURN; + } +} + +/* + * DoTLSetPacketHeader + * + * Ensure that whenever we update a Header we always add the RESERVED field + */ +static inline void DoTLSetPacketHeader(PVRSRVTL_PPACKETHDR, IMG_UINT32); +static inline void +DoTLSetPacketHeader(PVRSRVTL_PPACKETHDR pHdr, + IMG_UINT32 ui32Val) +{ + PVR_ASSERT(((size_t)pHdr & (size_t)(PVRSRVTL_PACKET_ALIGNMENT - 1)) == 0); + + /* Check that this is a correctly aligned packet header. */ + if (((size_t)pHdr & (size_t)(PVRSRVTL_PACKET_ALIGNMENT - 1)) != 0) + { + /* Should return an error because the header is misaligned */ + PVR_DPF((PVR_DBG_ERROR, "%s: Misaligned header @ %p", __func__, pHdr)); + pHdr->uiTypeSize = ui32Val; + } + else + { + pHdr->uiTypeSize = ui32Val; + pHdr->uiReserved = PVRSRVTL_PACKETHDR_RESERVED; + } +} + +static PVRSRV_ERROR +DoTLStreamReserve(IMG_HANDLE hStream, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32ReqSize, + IMG_UINT32 ui32ReqSizeMin, + PVRSRVTL_PACKETTYPE ePacketType, + IMG_UINT32* pui32AvSpace, + IMG_UINT32* pui32Flags) +{ + PTL_STREAM psTmp; + IMG_UINT32 *pui32Buf, ui32LRead, ui32LWrite, ui32LPending, lReqSizeAligned, lReqSizeActual, ui32CreateFreeSpace; + IMG_UINT32 ui32InputFlags = 0; + IMG_INT pad, iFreeSpace; + IMG_UINT8 *pui8IncrRead = NULL; + PVRSRVTL_PPACKETHDR pHdr; + + PVR_DPF_ENTERED; + if (pui32AvSpace) *pui32AvSpace = 0; + if (pui32Flags) + { + ui32InputFlags = *pui32Flags; + *pui32Flags = 0; + } + + if (NULL == hStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + psTmp = (PTL_STREAM)hStream; + + /* Assert used as the packet type parameter is currently only provided + * by the TL APIs, not the calling client */ + PVR_ASSERT((PVRSRVTL_PACKETTYPE_UNDEF < ePacketType) && (PVRSRVTL_PACKETTYPE_LAST >= ePacketType)); + + /* The buffer is only used in "rounded" (aligned) chunks */ + lReqSizeAligned = PVRSRVTL_ALIGN(ui32ReqSize); + + /* Lock the stream before reading it's pending value, because if pending is set + * to NOTHING_PENDING, we update the pending value such that subsequent calls to + * this function from other context(s) fail with PVRSRV_ERROR_NOT_READY */ + OSLockAcquire (psTmp->hStreamWLock); + +#if defined(TL_BUFFER_STATS) + /* If writing into an empty buffer, start recording time-to-full */ + if (psTmp->ui32Read == psTmp->ui32Write) + { + OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 1); + psTmp->ui32TimeStart = OSClockus(); + } + + if (ui32ReqSize > psTmp->ui32MaxReserveWatermark) + { + psTmp->ui32MaxReserveWatermark = ui32ReqSize; + } +#endif + + /* Get a local copy of the stream buffer parameters */ + ui32LRead = psTmp->ui32Read; + ui32LWrite = psTmp->ui32Write; + ui32LPending = psTmp->ui32Pending; + + /* Multiple pending reserves are not supported. */ + if (NOTHING_PENDING != ui32LPending) + { + OSLockRelease (psTmp->hStreamWLock); + PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY); + } + + if (psTmp->ui32MaxPacketSize < lReqSizeAligned) + { + PVR_DPF((PVR_DBG_ERROR, "Requested Size: %u > TL Max Packet size: %u", lReqSizeAligned, psTmp->ui32MaxPacketSize)); + psTmp->ui32Pending = NOTHING_PENDING; + if (pui32AvSpace) + { + *pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin, psTmp->ui32MaxPacketSize); + if (*pui32AvSpace == 0 && psTmp->eOpMode == TL_OPMODE_DROP_OLDEST) + { + *pui32AvSpace = psTmp->ui32MaxPacketSize; + PVR_DPF((PVR_DBG_MESSAGE, "Opmode is Drop_Oldest, so Available Space changed to: %u", *pui32AvSpace)); + } + } + OSLockRelease (psTmp->hStreamWLock); + PVR_DPF_RETURN_RC(PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED); + } + + /* Prevent other threads from entering this region before we are done + * updating the pending value and write offset (in case of padding). This + * is not exactly a lock but a signal for other contexts that there is a + * TLStreamCommit operation pending on this stream */ + psTmp->ui32Pending = 0; + + OSLockRelease (psTmp->hStreamWLock); + + /* If there is enough contiguous space following the current Write + * position then no padding is required */ + if ( psTmp->ui32Size + < ui32LWrite + lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) ) + { + pad = psTmp->ui32Size - ui32LWrite; + } + else + { + pad = 0; + } + + lReqSizeActual = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) + pad; + if (psTmp->bNoWrapPermanent) + { + iFreeSpace = bufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size); + } + else + { + iFreeSpace = circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size); + } + + if (iFreeSpace < (IMG_INT) lReqSizeActual) + { + /* If this is a blocking reserve and there is not enough space then wait. */ + if (psTmp->eOpMode == TL_OPMODE_BLOCK) + { + /* Stream create should stop us entering here when + * psTmp->bNoWrapPermanent is true as it does not make sense to + * block on permanent data streams. */ + PVR_ASSERT(psTmp->bNoWrapPermanent == IMG_FALSE); + while ( ( circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size) + <(IMG_INT) lReqSizeActual ) ) + { + /* The TL bridge is lockless now, so changing to OSEventObjectWait() */ + OSEventObjectWait(psTmp->hProducerEvent); + // update local copies. + ui32LRead = psTmp->ui32Read; + ui32LWrite = psTmp->ui32Write; + } + } + /* Data overwriting, also insert PACKETS_DROPPED flag into existing packet */ + else if (psTmp->eOpMode == TL_OPMODE_DROP_OLDEST) + { + OSLockAcquire(psTmp->hReadLock); + + while (psTmp->bReadPending) + { + PVR_DPF((PVR_DBG_MESSAGE, "Waiting for the pending read operation to complete.")); + OSLockRelease(psTmp->hReadLock); +#if defined(TL_BUFFER_STATS) + TL_COUNTER_INC(psTmp->ui32CntWriteWaits); +#endif + (void) OSEventObjectWaitTimeout(psTmp->hProducerEvent, READ_PENDING_TIMEOUT_US); + OSLockAcquire(psTmp->hReadLock); + } + +#if defined(TL_BUFFER_STATS) + TL_COUNTER_INC(psTmp->ui32CntWriteSuccesses); +#endif + ui32LRead = psTmp->ui32Read; + + if ( circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size) + < (IMG_INT) lReqSizeActual ) + { + ui32CreateFreeSpace = 5 * (psTmp->ui32Size / 100); + if (ui32CreateFreeSpace < lReqSizeActual) + { + ui32CreateFreeSpace = lReqSizeActual; + } + + while (ui32CreateFreeSpace > (IMG_UINT32)circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size)) + { + pui8IncrRead = &psTmp->pbyBuffer[ui32LRead]; + ui32LRead += (sizeof(PVRSRVTL_PACKETHDR) + PVRSRVTL_ALIGN( GET_PACKET_DATA_LEN(pui8IncrRead) )); + + /* Check if buffer needs to wrap */ + if (ui32LRead >= psTmp->ui32Size) + { + ui32LRead = 0; + } + } + psTmp->ui32Read = ui32LRead; + pui8IncrRead = &psTmp->pbyBuffer[psTmp->ui32Read]; + + pHdr = GET_PACKET_HDR(pui8IncrRead); + DoTLSetPacketHeader(pHdr, SET_PACKETS_DROPPED(pHdr)); + } + /* else fall through as there is enough space now to write the data */ + + OSLockRelease(psTmp->hReadLock); + /* If we accepted a flag var set the OVERWRITE bit*/ + if (pui32Flags) *pui32Flags |= TL_FLAG_OVERWRITE_DETECTED; + } + /* No data overwriting, insert write_failed flag and return */ + else if (psTmp->eOpMode == TL_OPMODE_DROP_NEWER) + { + /* Caller should not try to use ppui8Data, + * NULLify to give user a chance of avoiding memory corruption */ + *ppui8Data = NULL; + + /* This flag should not be inserted two consecutive times, so + * check the last ui32 in case it was a packet drop packet. */ + pui32Buf = ui32LWrite + ? + (void *)&psTmp->pbyBuffer[ui32LWrite - sizeof(PVRSRVTL_PACKETHDR)] + : // Previous four bytes are not guaranteed to be a packet header... + (void *)&psTmp->pbyBuffer[psTmp->ui32Size - PVRSRVTL_PACKET_ALIGNMENT]; + + pHdr = GET_PACKET_HDR(pui32Buf); + if ( PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED + != + GET_PACKET_TYPE( pHdr ) && (ui32InputFlags & TL_FLAG_NO_WRITE_FAILED) == 0) + { + /* Insert size-stamped packet header */ + pui32Buf = (void *)&psTmp->pbyBuffer[ui32LWrite]; + pHdr = GET_PACKET_HDR(pui32Buf); + DoTLSetPacketHeader(pHdr, PVRSRVTL_SET_PACKET_WRITE_FAILED); + ui32LWrite += sizeof(PVRSRVTL_PACKETHDR); + ui32LWrite %= psTmp->ui32Size; + iFreeSpace -= sizeof(PVRSRVTL_PACKETHDR); + } + + OSLockAcquire (psTmp->hStreamWLock); + psTmp->ui32Write = ui32LWrite; + psTmp->ui32Pending = NOTHING_PENDING; + OSLockRelease (psTmp->hStreamWLock); + + if (pui32AvSpace) + { + *pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin, psTmp->ui32MaxPacketSize); + } + + /* Inform call of permanent stream misuse, no space left, + * the size of the stream will need to be increased. */ + if (psTmp->bNoWrapPermanent) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE); + } + + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_FULL); + } + } + + /* The easy case: buffer has enough space to hold the requested packet (data + header) */ + + /* Should we treat the buffer as non-circular buffer? */ + if (psTmp->bNoWrapPermanent) + { + iFreeSpace = bufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size); + } + else + { + iFreeSpace = circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size); + } + + if (iFreeSpace >= (IMG_INT) lReqSizeActual) + { + if (pad) + { + /* Inserting padding packet. */ + pui32Buf = (void *)&psTmp->pbyBuffer[ui32LWrite]; + pHdr = GET_PACKET_HDR(pui32Buf); + DoTLSetPacketHeader(pHdr, + PVRSRVTL_SET_PACKET_PADDING(pad-sizeof(PVRSRVTL_PACKETHDR))); + + /* CAUTION: the used pad value should always result in a properly + * aligned ui32LWrite pointer, which in this case is 0 */ + ui32LWrite = (ui32LWrite + pad) % psTmp->ui32Size; + /* Detect unaligned pad value */ + PVR_ASSERT(ui32LWrite == 0); + } + /* Insert size-stamped packet header */ + pui32Buf = (void *) &psTmp->pbyBuffer[ui32LWrite]; + + pHdr = GET_PACKET_HDR(pui32Buf); + DoTLSetPacketHeader(pHdr, + PVRSRVTL_SET_PACKET_HDR(ui32ReqSize, ePacketType)); + + /* return the next position in the buffer to the user */ + *ppui8Data = &psTmp->pbyBuffer[ ui32LWrite+sizeof(PVRSRVTL_PACKETHDR) ]; + + /* update pending offset: size stamp + data */ + ui32LPending = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR); + } + else + { + OSLockAcquire (psTmp->hStreamWLock); + psTmp->ui32Pending = NOTHING_PENDING; + OSLockRelease (psTmp->hStreamWLock); + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); + } + + /* Acquire stream lock for updating stream parameters */ + OSLockAcquire (psTmp->hStreamWLock); + psTmp->ui32Write = ui32LWrite; + psTmp->ui32Pending = ui32LPending; + OSLockRelease (psTmp->hStreamWLock); + +#if defined(TL_BUFFER_STATS) + TL_COUNTER_INC(psTmp->ui32CntNumWriteSuccess); +#endif + + PVR_DPF_RETURN_OK; +} + +PVRSRV_ERROR +TLStreamReserve(IMG_HANDLE hStream, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size) +{ + return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32Size, PVRSRVTL_PACKETTYPE_DATA, NULL, NULL); +} + +PVRSRV_ERROR +TLStreamReserve2(IMG_HANDLE hStream, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32SizeMin, + IMG_UINT32* pui32Available, + IMG_BOOL* pbIsReaderConnected) +{ + PVRSRV_ERROR eError; + + eError = DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32SizeMin, PVRSRVTL_PACKETTYPE_DATA, pui32Available, NULL); + if (eError != PVRSRV_OK && pbIsReaderConnected != NULL) + { + *pbIsReaderConnected = TLStreamIsOpenForReading(hStream); + } + + return eError; +} + +PVRSRV_ERROR +TLStreamReserveReturnFlags(IMG_HANDLE hStream, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size, + IMG_UINT32* pui32Flags) +{ + return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32Size, PVRSRVTL_PACKETTYPE_DATA, NULL, pui32Flags); +} + +PVRSRV_ERROR +TLStreamCommit(IMG_HANDLE hStream, IMG_UINT32 ui32ReqSize) +{ + PTL_STREAM psTmp; + IMG_UINT32 ui32LRead, ui32OldWrite, ui32LWrite, ui32LPending; + PVRSRV_ERROR eError; + +#if defined(TL_BUFFER_STATS) + IMG_UINT32 ui32UnreadBytes; +#endif + + PVR_DPF_ENTERED; + + if (NULL == hStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + psTmp = (PTL_STREAM)hStream; + + /* Get a local copy of the stream buffer parameters */ + ui32LRead = psTmp->ui32Read; + ui32LWrite = psTmp->ui32Write; + ui32LPending = psTmp->ui32Pending; + + ui32OldWrite = ui32LWrite; + + // Space in buffer is aligned + ui32ReqSize = PVRSRVTL_ALIGN(ui32ReqSize) + sizeof(PVRSRVTL_PACKETHDR); + + /* Check pending reserver and ReqSize + packet header size. */ + if ((ui32LPending == NOTHING_PENDING) || (ui32ReqSize > ui32LPending)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE); + } + + /* Update pointer to written data. */ + ui32LWrite = (ui32LWrite + ui32ReqSize) % psTmp->ui32Size; + + /* and reset LPending to 0 since data are now submitted */ + ui32LPending = NOTHING_PENDING; + +#if defined(TL_BUFFER_STATS) + /* Calculate new number of bytes unread */ + if (ui32LWrite > ui32LRead) + { + ui32UnreadBytes = (ui32LWrite-ui32LRead); + } + else if (ui32LWrite < ui32LRead) + { + ui32UnreadBytes = (psTmp->ui32Size-ui32LRead+ui32LWrite); + } + else + { /* else equal, ignore */ + ui32UnreadBytes = 0; + } + + /* Calculate high water mark for debug purposes */ + if (ui32UnreadBytes > psTmp->ui32BufferUt) + { + psTmp->ui32BufferUt = ui32UnreadBytes; + } +#endif + + /* Memory barrier required to ensure prior data written by writer is + * flushed from WC buffer to main memory. */ + OSWriteMemoryBarrier(); + + /* Acquire stream lock to ensure other context(s) (if any) + * wait on the lock (in DoTLStreamReserve) for consistent values + * of write offset and pending value */ + OSLockAcquire (psTmp->hStreamWLock); + + /* Update stream buffer parameters to match local copies */ + psTmp->ui32Write = ui32LWrite; + psTmp->ui32Pending = ui32LPending; + + TL_COUNTER_ADD(psTmp->ui32ProducerByteCount, ui32ReqSize); + TL_COUNTER_INC(psTmp->ui32NumCommits); + +#if defined(TL_BUFFER_STATS) + /* IF there has been no-reader since first reserve on an empty-buffer, + * AND current utilisation is considerably high (90%), calculate the + * time taken to fill up the buffer */ + if ((OSAtomicRead(&psTmp->bNoReaderSinceFirstReserve) == 1) && + (TLStreamGetUT(psTmp) >= 90 * psTmp->ui32Size/100)) + { + IMG_UINT32 ui32TimeToFullInUs = OSClockus() - psTmp->ui32TimeStart; + if (psTmp->ui32MinTimeToFullInUs > ui32TimeToFullInUs) + { + psTmp->ui32MinTimeToFullInUs = ui32TimeToFullInUs; + } + /* Following write ensures ui32MinTimeToFullInUs doesn't lose its + * real (expected) value in case there is no reader until next Commit call */ + OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 0); + } +#endif + + if (!psTmp->bNoSignalOnCommit) + { + /* If we have transitioned from an empty buffer to a non-empty buffer, we + * must signal possibly waiting consumer. BUT, let the signal be "deferred" + * until buffer is at least 'ui32ThresholdUsageForSignal' bytes full. This + * avoids a race between OSEventObjectSignal and OSEventObjectWaitTimeout + * (in TLServerAcquireDataKM), where a "signal" might happen before "wait", + * resulting into signal being lost and stream-reader waiting even though + * buffer is no-more empty */ + if (ui32OldWrite == ui32LRead) + { + psTmp->bSignalPending = IMG_TRUE; + } + + if (psTmp->bSignalPending && (TLStreamGetUT(psTmp) >= psTmp->ui32ThresholdUsageForSignal)) + { + TL_COUNTER_INC(psTmp->ui32SignalsSent); + psTmp->bSignalPending = IMG_FALSE; + + /* Signal consumers that may be waiting */ + eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj); + if (eError != PVRSRV_OK) + { + OSLockRelease (psTmp->hStreamWLock); + PVR_DPF_RETURN_RC(eError); + } + } + else + { + TL_COUNTER_INC(psTmp->ui32SignalNotSent); + } + } + OSLockRelease (psTmp->hStreamWLock); + + PVR_DPF_RETURN_OK; +} + +PVRSRV_ERROR +TLStreamWrite(IMG_HANDLE hStream, IMG_UINT8 *pui8Src, IMG_UINT32 ui32Size) +{ + IMG_BYTE *pbyDest = NULL; + PVRSRV_ERROR eError; + + PVR_DPF_ENTERED; + + if (NULL == hStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + eError = TLStreamReserve(hStream, &pbyDest, ui32Size); + if (PVRSRV_OK != eError) + { + PVR_DPF_RETURN_RC(eError); + } + else + { + OSDeviceMemCopy((void*)pbyDest, (void*)pui8Src, ui32Size); + eError = TLStreamCommit(hStream, ui32Size); + if (PVRSRV_OK != eError) + { + PVR_DPF_RETURN_RC(eError); + } + } + + PVR_DPF_RETURN_OK; +} + +PVRSRV_ERROR +TLStreamWriteRetFlags(IMG_HANDLE hStream, IMG_UINT8 *pui8Src, IMG_UINT32 ui32Size, IMG_UINT32 *pui32Flags){ + IMG_BYTE *pbyDest = NULL; + PVRSRV_ERROR eError; + + PVR_DPF_ENTERED; + + if (NULL == hStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + eError = TLStreamReserveReturnFlags(hStream, &pbyDest, ui32Size, pui32Flags); + if (PVRSRV_OK != eError) + { + PVR_DPF_RETURN_RC(eError); + } + else + { + OSDeviceMemCopy((void*)pbyDest, (void*)pui8Src, ui32Size); + eError = TLStreamCommit(hStream, ui32Size); + if (PVRSRV_OK != eError) + { + PVR_DPF_RETURN_RC(eError); + } + } + + PVR_DPF_RETURN_OK; +} + +void TLStreamInfo(IMG_HANDLE hStream, PTL_STREAM_INFO psInfo) +{ + IMG_DEVMEM_SIZE_T actual_req_size; + IMG_DEVMEM_ALIGN_T align = 4; /* Low fake value so the real value can be obtained */ + + actual_req_size = 2; + /* ignore error as OSGetPageShift() should always return correct value */ + (void) DevmemExportalignAdjustSizeAndAlign(OSGetPageShift(), &actual_req_size, &align); + + psInfo->headerSize = sizeof(PVRSRVTL_PACKETHDR); + psInfo->minReservationSize = sizeof(IMG_UINT32); + psInfo->pageSize = (IMG_UINT32)(actual_req_size); + psInfo->pageAlign = (IMG_UINT32)(align); + psInfo->maxTLpacketSize = ((PTL_STREAM)hStream)->ui32MaxPacketSize; +} + +PVRSRV_ERROR +TLStreamMarkEOS(IMG_HANDLE psStream, IMG_BOOL bRemoveOld) +{ + PTL_STREAM psTmp; + PVRSRV_ERROR eError; + IMG_UINT8* pData; + + PVR_DPF_ENTERED; + + if (NULL == psStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + psTmp = (PTL_STREAM)psStream; + + /* Do not support EOS packets on permanent stream buffers at present, + * EOS is best used with streams where data is consumed. */ + if (psTmp->bNoWrapPermanent) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE); + } + + if (bRemoveOld) + { + eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS_REMOVEOLD, NULL, NULL); + } + else + { + eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS, NULL, NULL); + } + + if (PVRSRV_OK != eError) + { + PVR_DPF_RETURN_RC(eError); + } + + PVR_DPF_RETURN_RC(TLStreamCommit(psStream, 0)); +} + + +static PVRSRV_ERROR +_TLStreamMarkOC(IMG_HANDLE hStream, PVRSRVTL_PACKETTYPE ePacketType) +{ + PVRSRV_ERROR eError; + PTL_STREAM psStream = hStream; + IMG_UINT32 ui32Size; + IMG_UINT8 *pData; + + PVR_DPF_ENTERED; + + if (NULL == psStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + + if (NULL == psStream->psNotifStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_NOTIF_STREAM); + } + + ui32Size = OSStringLength(psStream->szName) + 1; + + eError = DoTLStreamReserve(psStream->psNotifStream, &pData, ui32Size, + ui32Size, ePacketType, NULL, NULL); + if (PVRSRV_OK != eError) + { + PVR_DPF_RETURN_RC(eError); + } + + OSDeviceMemCopy(pData, psStream->szName, ui32Size); + + PVR_DPF_RETURN_RC(TLStreamCommit(psStream->psNotifStream, ui32Size)); +} + +PVRSRV_ERROR +TLStreamMarkStreamOpen(IMG_HANDLE psStream) +{ + return _TLStreamMarkOC(psStream, PVRSRVTL_PACKETTYPE_STREAM_OPEN_FOR_WRITE); +} + +PVRSRV_ERROR +TLStreamMarkStreamClose(IMG_HANDLE psStream) +{ + return _TLStreamMarkOC(psStream, PVRSRVTL_PACKETTYPE_STREAM_CLOSE_FOR_WRITE); +} + +PVRSRV_ERROR +TLStreamSync(IMG_HANDLE psStream) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PTL_STREAM psTmp; + + PVR_DPF_ENTERED; + + if (NULL == psStream) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); + } + psTmp = (PTL_STREAM)psStream; + + /* If read client exists and has opened stream in blocking mode, + * signal when data is available to read. */ + if (psTmp->psNode->psRDesc && + (!(psTmp->psNode->psRDesc->ui32Flags & PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING)) && + psTmp->ui32Read != psTmp->ui32Write) + { + TL_COUNTER_INC(psTmp->ui32ManSyncs); + eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj); + } + + PVR_DPF_RETURN_RC(eError); +} + +IMG_BOOL +TLStreamIsOpenForReading(IMG_HANDLE hStream) +{ + PTL_STREAM psTmp; + + PVR_DPF_ENTERED; + + PVR_ASSERT(hStream); + psTmp = (PTL_STREAM)hStream; + + PVR_DPF_RETURN_VAL(psTmp->psNode->psRDesc != NULL); +} + +IMG_BOOL +TLStreamOutOfData(IMG_HANDLE hStream) +{ + PTL_STREAM psTmp; + + PVR_DPF_ENTERED; + + PVR_ASSERT(hStream); + psTmp = (PTL_STREAM)hStream; + + /* If both pointers are equal then the buffer is empty */ + PVR_DPF_RETURN_VAL(psTmp->ui32Read == psTmp->ui32Write); +} + + +PVRSRV_ERROR +TLStreamResetProducerByteCount(IMG_HANDLE hStream, IMG_UINT32 ui32Value) +{ + PTL_STREAM psTmp; + IMG_UINT32 ui32LRead, ui32LWrite; + PVRSRV_ERROR eErr = PVRSRV_OK; + + PVR_DPF_ENTERED; + + PVR_ASSERT(hStream); + psTmp = (PTL_STREAM)hStream; + ui32LRead = psTmp->ui32Read; + ui32LWrite = psTmp->ui32Write; + + if (ui32LRead != ui32LWrite) + { + eErr = PVRSRV_ERROR_STREAM_MISUSE; + } +#if defined(TL_BUFFER_STATS) + psTmp->ui32ProducerByteCount = ui32Value; +#else + PVR_UNREFERENCED_PARAMETER(ui32Value); +#endif + PVR_DPF_RETURN_RC(eErr); +} +/* + * Internal stream APIs to server part of Transport Layer, declared in + * header tlintern.h. Direct pointers to stream objects are used here as + * these functions are internal. + */ +IMG_UINT32 +TLStreamAcquireReadPos(PTL_STREAM psStream, + IMG_BOOL bDisableCallback, + IMG_UINT32* puiReadOffset) +{ + IMG_UINT32 uiReadLen = 0; + IMG_UINT32 ui32LRead, ui32LWrite; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psStream); + PVR_ASSERT(puiReadOffset); + + if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST) + { + if (!OSTryLockAcquire(psStream->hReadLock)) + { + /* + * This is a normal event when the system is under load. + * An example of how to produce this is to run testrunner / + * regression/ddk_test_seq2_host_fw_mem.conf with HTB / pvrhtbd + * configured as + * + * # pvrdebug -log trace -loggroups main,pow,debug \ + * -hostloggroups main,ctrl,sync,brg -hostlogtype dropoldest + * + * # pvrhtbd -hostloggroups main,ctrl,sync,brg + * + * We will see a small number of these collisions but as this is + * an expected calling path, and an expected return code, we drop + * the severity to just be a debug MESSAGE instead of WARNING + */ + PVR_DPF((PVR_DBG_MESSAGE, + "%s: Read lock on stream '%s' is acquired by some writer, " + "hence reader failed to acquire read lock.", __func__, + psStream->szName)); +#if defined(TL_BUFFER_STATS) + TL_COUNTER_INC(psStream->ui32CntReadFails); +#endif + PVR_DPF_RETURN_VAL(0); + } + } + +#if defined(TL_BUFFER_STATS) + TL_COUNTER_INC(psStream->ui32CntReadSuccesses); +#endif + + /* Grab a local copy */ + ui32LRead = psStream->ui32Read; + ui32LWrite = psStream->ui32Write; + + if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST) + { + psStream->bReadPending = IMG_TRUE; + OSLockRelease(psStream->hReadLock); + } + + /* No data available and CB defined - try and get data */ + if ((ui32LRead == ui32LWrite) && psStream->pfProducerCallback && !bDisableCallback) + { + PVRSRV_ERROR eRc; + IMG_UINT32 ui32Resp = 0; + + eRc = ((TL_STREAM_SOURCECB)psStream->pfProducerCallback)(psStream, TL_SOURCECB_OP_CLIENT_EOS, + &ui32Resp, psStream->pvProducerUserData); + PVR_LOG_IF_ERROR(eRc, "TLStream->pfProducerCallback"); + + ui32LWrite = psStream->ui32Write; + } + + /* No data available... */ + if (ui32LRead == ui32LWrite) + { + if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST) + { + psStream->bReadPending = IMG_FALSE; + } + PVR_DPF_RETURN_VAL(0); + } + +#if defined(TL_BUFFER_STATS) + /* The moment reader knows it will see a non-zero data, it marks its presence in writer's eyes */ + OSAtomicWrite (&psStream->bNoReaderSinceFirstReserve, 0); +#endif + + /* Data is available to read... */ + *puiReadOffset = ui32LRead; + + /*PVR_DPF((PVR_DBG_VERBOSE, + * "TLStreamAcquireReadPos Start before: Write:%d, Read:%d, size:%d", + * ui32LWrite, ui32LRead, psStream->ui32Size)); + */ + + if (ui32LRead > ui32LWrite) + { /* CB has wrapped around. */ + PVR_ASSERT(!psStream->bNoWrapPermanent); + /* Return the first contiguous piece of memory, ie [ReadLen,EndOfBuffer] + * and let a subsequent AcquireReadPos read the rest of the Buffer */ + /*PVR_DPF((PVR_DBG_VERBOSE, "TLStreamAcquireReadPos buffer has wrapped"));*/ + uiReadLen = psStream->ui32Size - ui32LRead; + TL_COUNTER_INC(psStream->ui32AcquireRead2); + } + else + { /* CB has not wrapped */ + uiReadLen = ui32LWrite - ui32LRead; + TL_COUNTER_INC(psStream->ui32AcquireRead1); + } + + PVR_DPF_RETURN_VAL(uiReadLen); +} + +PVRSRV_ERROR +TLStreamAdvanceReadPos(PTL_STREAM psStream, + IMG_UINT32 uiReadLen, + IMG_UINT32 uiOrigReadLen) +{ + IMG_UINT32 uiNewReadPos; + + PVR_DPF_ENTERED; + + PVR_ASSERT(psStream); + + /* + * This API does not use Read lock as 'bReadPending' is sufficient + * to keep Read index safe by preventing a write from updating the + * index and 'bReadPending' itself is safe as it can only be modified + * by readers and there can be only one reader in action at a time. + */ + + /* Update the read offset by the length provided in a circular manner. + * Assuming the update to be atomic hence, avoiding use of locks + */ + uiNewReadPos = (psStream->ui32Read + uiReadLen) % psStream->ui32Size; + + /* Must validate length is on a packet boundary, for + * TLReleaseDataLess calls. + */ + if (uiReadLen != uiOrigReadLen) /* buffer not empty */ + { + PVRSRVTL_PPACKETHDR psHdr = GET_PACKET_HDR(psStream->pbyBuffer+uiNewReadPos); + PVRSRVTL_PACKETTYPE eType = GET_PACKET_TYPE(psHdr); + + if ((psHdr->uiReserved != PVRSRVTL_PACKETHDR_RESERVED) || + (eType == PVRSRVTL_PACKETTYPE_UNDEF) || + (eType >= PVRSRVTL_PACKETTYPE_LAST)) + { + PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_ALIGNMENT); + } + /* else OK, on a packet boundary */ + } + /* else no check needed */ + + psStream->ui32Read = uiNewReadPos; + + if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST) + { + psStream->bReadPending = IMG_FALSE; + } + + /* notify reserves that may be pending */ + /* The producer event object is used to signal the StreamReserve if the TL + * Buffer is in blocking mode and is full. + * Previously this event was only signalled if the buffer was created in + * blocking mode. Since the buffer mode can now change dynamically the event + * is signalled every time to avoid any potential race where the signal is + * required, but not produced. + */ + { + PVRSRV_ERROR eError; + eError = OSEventObjectSignal(psStream->hProducerEventObj); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_WARNING, + "Error in TLStreamAdvanceReadPos: OSEventObjectSignal returned:%u", + eError)); + /* We've failed to notify the producer event. This means there may + * be a delay in generating more data to be consumed until the next + * Write() generating action occurs. + */ + } + } + + PVR_DPF((PVR_DBG_VERBOSE, + "TLStreamAdvanceReadPos Read now at: %d", + psStream->ui32Read)); + PVR_DPF_RETURN_OK; +} + +void +TLStreamResetReadPos(PTL_STREAM psStream) +{ + PVR_DPF_ENTERED; + + PVR_ASSERT(psStream); + + if (psStream->bNoWrapPermanent) + { + + /* Update the read offset by the length provided in a circular manner. + * Assuming the update to be atomic hence, avoiding use of locks */ + psStream->ui32Read = 0; + + PVR_DPF((PVR_DBG_VERBOSE, + "TLStreamResetReadPos Read now at: %d", + psStream->ui32Read)); + } + else + { + /* else for other stream types this is a no-op */ + PVR_DPF((PVR_DBG_VERBOSE, + "No need to reset read position of circular tlstream")); + } + + PVR_DPF_RETURN; +} + +void +TLStreamDestroy (PTL_STREAM psStream) +{ + PVR_ASSERT (psStream); + + OSLockDestroy (psStream->hStreamWLock); + OSLockDestroy (psStream->hReadLock); + + OSEventObjectClose(psStream->hProducerEvent); + OSEventObjectDestroy(psStream->hProducerEventObj); + + TLFreeSharedMem(psStream); + OSFreeMem(psStream); +} + +DEVMEM_MEMDESC* +TLStreamGetBufferPointer(PTL_STREAM psStream) +{ + PVR_DPF_ENTERED; + + PVR_ASSERT(psStream); + + PVR_DPF_RETURN_VAL(psStream->psStreamMemDesc); +} diff --git a/drivers/gpu/drm/phytium/octopus/tlstream.h b/drivers/gpu/drm/phytium/octopus/tlstream.h new file mode 100644 index 000000000000..559d9d73ef56 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/tlstream.h @@ -0,0 +1,600 @@ +/*************************************************************************/ /*! +@File +@Title Transport Layer kernel side API. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description TL provides driver components with a way to copy data from kernel + space to user space (e.g. screen/file). + + Data can be passed to the Transport Layer through the + TL Stream (kernel space) API interface. + + The buffer provided to every stream is a modified version of a + circular buffer. Which CB version is created is specified by + relevant flags when creating a stream. Currently two types + of buffer are available: + - TL_OPMODE_DROP_NEWER: + When the buffer is full, incoming data are dropped + (instead of overwriting older data) and a marker is set + to let the user know that data have been lost. + - TL_OPMODE_BLOCK: + When the circular buffer is full, reserve/write calls block + until enough space is freed. + - TL_OPMODE_DROP_OLDEST: + When the circular buffer is full, the oldest packets in the + buffer are dropped and a flag is set in header of next packet + to let the user know that data have been lost. + + All size/space requests are in bytes. However, the actual + implementation uses native word sizes (i.e. 4 byte aligned). + + The user does not need to provide space for the stream buffer + as the TL handles memory allocations and usage. + + Inserting data to a stream's buffer can be done either: + - by using TLReserve/TLCommit: User is provided with a buffer + to write data to. + - or by using TLWrite: User provides a buffer with + data to be committed. The TL + copies the data from the + buffer into the stream buffer + and returns. + Users should be aware that there are implementation overheads + associated with every stream buffer. If you find that less + data are captured than expected then try increasing the + stream buffer size or use TLInfo to obtain buffer parameters + and calculate optimum required values at run time. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef TLSTREAM_H +#define TLSTREAM_H + +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "pvrsrv_tlcommon.h" +#include "device.h" + +/*! Extract TL stream opmode from the given stream create flags. + * Last 3 bits of streamFlag is used for storing opmode, hence + * opmode mask is set as following. */ +#define TL_OPMODE_MASK 0x7 + +/* + * NOTE: This enum is used to directly access the HTB_OPMODE_xxx values + * within htbserver.c. + * As such we *MUST* keep the values matching in order of declaration. + */ +/*! Opmode specifying circular buffer behaviour */ +typedef enum +{ + /*! Undefined operation mode */ + TL_OPMODE_UNDEF = 0, + + /*! Reject new data if the buffer is full, producer may then decide to + * drop the data or retry after some time. */ + TL_OPMODE_DROP_NEWER, + + /*! When buffer is full, advance the tail/read position to accept the new + * reserve call (size permitting), effectively overwriting the oldest + * data in the circular buffer. Not supported yet. */ + TL_OPMODE_DROP_OLDEST, + + /*! Block Reserve (subsequently Write) calls if there is not enough space + * until some space is freed via a client read operation. */ + TL_OPMODE_BLOCK, + + /*!< For error checking */ + TL_OPMODE_LAST + +} TL_OPMODE; + +typedef enum { + /* Enum to be used in conjunction with new Flags feature */ + + /* Flag set when Drop Oldest is set and packets have been dropped */ + TL_FLAG_OVERWRITE_DETECTED = (1 << 0), + /* Prevents DoTLStreamReserve() from adding from injecting + * PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED */ + TL_FLAG_NO_WRITE_FAILED = (1 << 1), +} TL_Flags; + +static_assert(TL_OPMODE_LAST <= TL_OPMODE_MASK, + "TL_OPMODE_LAST must not exceed TL_OPMODE_MASK"); + +/*! Flags specifying stream behaviour */ +/*! Do not destroy stream if there still are data that have not been + * copied in user space. Block until the stream is emptied. */ +#define TL_FLAG_FORCE_FLUSH (1U<<8) +/*! Do not signal consumers on commit automatically when the stream buffer + * transitions from empty to non-empty. Producer responsible for signal when + * it chooses. */ +#define TL_FLAG_NO_SIGNAL_ON_COMMIT (1U<<9) + +/*! When a stream has this property it never wraps around and + * overwrites existing data, hence it is a fixed size persistent + * buffer, data written is permanent. Producers need to ensure + * the buffer is big enough for their needs. + * When a stream is opened for reading the client will always + * find the read position at the start of the buffer/data. */ +#define TL_FLAG_PERMANENT_NO_WRAP (1U<<10) + +/*! Defer allocation of stream's shared memory until first open. */ +#define TL_FLAG_ALLOCATE_ON_FIRST_OPEN (1U<<11) + +/*! Structure used to pass internal TL stream sizes information to users.*/ +typedef struct _TL_STREAM_INFO_ +{ + IMG_UINT32 headerSize; /*!< Packet header size in bytes */ + IMG_UINT32 minReservationSize; /*!< Minimum data size reserved in bytes */ + IMG_UINT32 pageSize; /*!< Page size in bytes */ + IMG_UINT32 pageAlign; /*!< Page alignment in bytes */ + IMG_UINT32 maxTLpacketSize; /*! Max allowed TL packet size*/ +} TL_STREAM_INFO, *PTL_STREAM_INFO; + +/*! Callback operations or notifications that a stream producer may handle + * when requested by the Transport Layer. + */ +#define TL_SOURCECB_OP_CLIENT_EOS 0x01 /*!< Client has reached end of stream, + * can anymore data be supplied? + * ui32Resp ignored in this operation */ + +/*! Function pointer type for the callback handler into the "producer" code + * that writes data to the TL stream. Producer should handle the notification + * or operation supplied in ui32ReqOp on stream hStream. The + * Operations and notifications are defined above in TL_SOURCECB_OP */ +typedef PVRSRV_ERROR (*TL_STREAM_SOURCECB)(IMG_HANDLE hStream, + IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser); + +typedef void (*TL_STREAM_ONREADEROPENCB)(void *pvArg); + +/*************************************************************************/ /*! + @Function TLAllocSharedMemIfNull + @Description Allocates shared memory for the stream. + @Input hStream Stream handle. + @Return eError Internal services call returned eError error + number. + @Return PVRSRV_OK +*/ /**************************************************************************/ +PVRSRV_ERROR +TLAllocSharedMemIfNull(IMG_HANDLE hStream); + +/*************************************************************************/ /*! + @Function TLFreeSharedMem + @Description Frees stream's shared memory. + @Input phStream Stream handle. +*/ /**************************************************************************/ +void +TLFreeSharedMem(IMG_HANDLE hStream); + +/*************************************************************************/ /*! + @Function TLStreamCreate + @Description Request the creation of a new stream and open a handle. + If creating a stream which should continue to exist after the + current context is finished, then TLStreamCreate must be + followed by a TLStreamOpen call. On any case, the number of + create/open calls must balance with the number of close calls + used. This ensures the resources of a stream are released when + it is no longer required. + @Output phStream Pointer to handle to store the new stream. + @Input szStreamName Name of stream, maximum length: + PRVSRVTL_MAX_STREAM_NAME_SIZE. + If a longer string is provided,creation fails. + @Input ui32Size Desired buffer size in bytes. + @Input ui32StreamFlags Used to configure buffer behaviour. See above. + @Input pfOnReaderOpenCB Optional callback called when a client + opens this stream, may be null. + @Input pvOnReaderOpenUD Optional user data for pfOnReaderOpenCB, + may be null. + @Input pfProducerCB Optional callback, may be null. + @Input pvProducerUD Optional user data for callback, may be null. + @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle or string name + exceeded MAX_STREAM_NAME_SIZE + @Return PVRSRV_ERROR_OUT_OF_MEMORY Failed to allocate space for + stream handle. + @Return PVRSRV_ERROR_DUPLICATE_VALUE There already exists a stream with + the same stream name string. + @Return eError Internal services call returned + eError error number. + @Return PVRSRV_OK +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamCreate(IMG_HANDLE *phStream, + const IMG_CHAR *szStreamName, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32StreamFlags, + TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB, + void *pvOnReaderOpenUD, + TL_STREAM_SOURCECB pfProducerCB, + void *pvProducerUD); + +/*************************************************************************/ /*! + @Function TLStreamOpen + @Description Attach to existing stream that has already been created by a + TLStreamCreate call. A handle is returned to the stream. + @Output phStream Pointer to handle to store the stream. + @Input szStreamName Name of stream, should match an already + existing stream name + @Return PVRSRV_ERROR_NOT_FOUND None of the streams matched the + requested stream name. + PVRSRV_ERROR_INVALID_PARAMS Non-NULL pointer to stream + handler is required. + @Return PVRSRV_OK Success. +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamOpen(IMG_HANDLE *phStream, + const IMG_CHAR *szStreamName); + + +/*************************************************************************/ /*! + @Function TLStreamReset + @Description Resets read and write pointers and pending flag. + @Output phStream Pointer to stream's handle +*/ /**************************************************************************/ +void TLStreamReset(IMG_HANDLE hStream); + +/*************************************************************************/ /*! + @Function TLStreamSetNotifStream + @Description Registers a "notification stream" which will be used to + publish information about state change of the "hStream" + stream. Notification can inform about events such as stream + open/close, etc. + @Input hStream Handle to stream to update. + @Input hNotifStream Handle to the stream which will be used for + publishing notifications. + @Return PVRSRV_ERROR_INVALID_PARAMS If either of the parameters is + NULL + @Return PVRSRV_OK Success. +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamSetNotifStream(IMG_HANDLE hStream, IMG_HANDLE hNotifStream); + +/*************************************************************************/ /*! + @Function TLStreamReconfigure + @Description Request the stream flags controlling buffer behaviour to + be updated. + In the case where TL_OPMODE_BLOCK is to be used, + TLStreamCreate should be called without that flag and this + function used to change the stream mode once a consumer process + has been started. This avoids a deadlock scenario where the + TLStreaWrite/TLStreamReserve call will hold the Bridge Lock + while blocking if the TL buffer is full. + The TL_OPMODE_BLOCK should never drop the Bridge Lock + as this leads to another deadlock scenario where the caller to + TLStreamWrite/TLStreamReserve has already acquired another lock + (e.g. gHandleLock) which is not dropped. This then leads to that + thread acquiring locks out of order. + @Input hStream Handle to stream to update. + @Input ui32StreamFlags Flags that configure buffer behaviour. See above. + @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle or inconsistent + stream flags. + @Return PVRSRV_ERROR_NOT_READY Stream is currently being written to + try again later. + @Return eError Internal services call returned + eError error number. + @Return PVRSRV_OK +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamReconfigure(IMG_HANDLE hStream, + IMG_UINT32 ui32StreamFlags); + +/*************************************************************************/ /*! + @Function TLStreamClose + @Description Detach from the stream associated with the given handle. If + the current handle is the last one accessing the stream + (i.e. the number of TLStreamCreate+TLStreamOpen calls matches + the number of TLStreamClose calls) then the stream is also + deleted. + On return the handle is no longer valid. + @Input hStream Handle to stream that will be closed. + @Return None. +*/ /**************************************************************************/ +void +TLStreamClose(IMG_HANDLE hStream); + +/*************************************************************************/ /*! + @Function TLStreamReserve + @Description Reserve space in stream buffer. When successful every + TLStreamReserve call must be followed by a matching + TLStreamCommit call. While a TLStreamCommit call is pending + for a stream, subsequent TLStreamReserve calls for this + stream will fail. + @Input hStream Stream handle. + @Output ppui8Data Pointer to a pointer to a location in the + buffer. The caller can then use this address + in writing data into the stream. + @Input ui32Size Number of bytes to reserve in buffer. + @Return PVRSRV_INVALID_PARAMS NULL stream handler. + @Return PVRSRV_ERROR_NOT_READY There are data previously reserved + that are pending to be committed. + @Return PVRSRV_ERROR_STREAM_MISUSE Misusing the stream by trying to + reserve more space than the + buffer size. + @Return PVRSRV_ERROR_STREAM_FULL The reserve size requested + is larger than the free + space. + @Return PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED The reserve size + requested is larger + than max TL packet size + @Return PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE Permanent stream buffer + does not have enough space + for the reserve. + @Return PVRSRV_OK Success, output arguments valid. +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamReserve(IMG_HANDLE hStream, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size); + +/*************************************************************************/ /*! + @Function TLStreamReserve2 + @Description Reserve space in stream buffer. When successful every + TLStreamReserve call must be followed by a matching + TLStreamCommit call. While a TLStreamCommit call is pending + for a stream, subsequent TLStreamReserve calls for this + stream will fail. + @Input hStream Stream handle. + @Output ppui8Data Pointer to a pointer to a location in the + buffer. The caller can then use this address + in writing data into the stream. + @Input ui32Size Ideal number of bytes to reserve in buffer. + @Input ui32SizeMin Minimum number of bytes to reserve in buffer. + @Input pui32Available Optional, but when present and the + RESERVE_TOO_BIG error is returned, a size + suggestion is returned in this argument which + the caller can attempt to reserve again for a + successful allocation. + @Output pbIsReaderConnected Let writing clients know if reader is + connected or not, in case of error. + @Return PVRSRV_INVALID_PARAMS NULL stream handler. + @Return PVRSRV_ERROR_NOT_READY There are data previously reserved + that are pending to be committed. + @Return PVRSRV_ERROR_STREAM_MISUSE Misusing the stream by trying to + reserve more space than the + buffer size. + @Return PVRSRV_ERROR_STREAM_FULL The reserve size requested + is larger than the free + space. + Check the pui32Available + value for the correct + reserve size to use. + @Return PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED The reserve size + requested is larger + than max TL packet size + @Return PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE Permanent stream buffer + does not have enough space + for the reserve. + @Return PVRSRV_OK Success, output arguments valid. +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamReserve2(IMG_HANDLE hStream, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32SizeMin, + IMG_UINT32* pui32Available, + IMG_BOOL* pbIsReaderConnected); + +/*************************************************************************/ /*! + @Function TLStreamReserveReturnFlags + @Description Reserve space in stream buffer. When successful every + TLStreamReserve call must be followed by a matching + TLStreamCommit call. While a TLStreamCommit call is pending + for a stream, subsequent TLStreamReserve calls for this + stream will fail. + @Input hStream Stream handle. + @Output ppui8Data Pointer to a pointer to a location in the + buffer. The caller can then use this address + in writing data into the stream. + @Input ui32Size Ideal number of bytes to reserve in buffer. + @Output pui32Flags Output parameter to return flags generated within + the reserve function. +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamReserveReturnFlags(IMG_HANDLE hStream, + IMG_UINT8 **ppui8Data, + IMG_UINT32 ui32Size, + IMG_UINT32* pui32Flags); + +/*************************************************************************/ /*! + @Function TLStreamGetUT + @Description Returns the current stream utilisation in bytes + @Input hStream Stream handle. + @Return IMG_UINT32 Stream utilisation +*/ /**************************************************************************/ +IMG_UINT32 TLStreamGetUT(IMG_HANDLE hStream); + +/*************************************************************************/ /*! + @Function TLStreamCommit + @Description Notify TL that data have been written in the stream buffer. + Should always follow and match TLStreamReserve call. + @Input hStream Stream handle. + @Input ui32Size Number of bytes that have been added to the + stream. + @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle. + @Return PVRSRV_ERROR_STREAM_MISUSE Commit results in more data + committed than the buffer size, + the stream is misused. + @Return eError Commit was successful but + internal services call returned + eError error number. + @Return PVRSRV_OK +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamCommit(IMG_HANDLE hStream, + IMG_UINT32 ui32Size); + +/*************************************************************************/ /*! + @Function TLStreamWrite + @Description Combined Reserve/Commit call. This function Reserves space in + the specified stream buffer, copies ui32Size bytes of data + from the array pui8Src points to and Commits in an "atomic" + style operation. + @Input hStream Stream handle. + @Input pui8Src Source to read data from. + @Input ui32Size Number of bytes to copy and commit. + @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handler. + @Return eError Error codes returned by either + Reserve or Commit. + @Return PVRSRV_OK + */ /**************************************************************************/ +PVRSRV_ERROR +TLStreamWrite(IMG_HANDLE hStream, + IMG_UINT8 *pui8Src, + IMG_UINT32 ui32Size); + +/*************************************************************************/ /*! + @Function TLStreamWriteRetFlags + @Description Combined Reserve/Commit call. This function Reserves space in + the specified stream buffer, copies ui32Size bytes of data + from the array pui8Src points to and Commits in an "atomic" + style operation. Also accepts a pointer to a bit flag value + for returning write status flags. + @Input hStream Stream handle. + @Input pui8Src Source to read data from. + @Input ui32Size Number of bytes to copy and commit. + @Output pui32Flags Output parameter for write status info + @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handler. + @Return eError Error codes returned by either + Reserve or Commit. + @Return PVRSRV_OK + */ /**************************************************************************/ +PVRSRV_ERROR +TLStreamWriteRetFlags(IMG_HANDLE hStream, + IMG_UINT8 *pui8Src, + IMG_UINT32 ui32Size, + IMG_UINT32 *pui32Flags); + +/*************************************************************************/ /*! + @Function TLStreamSync + @Description Signal the consumer to start acquiring data from the stream + buffer. Called by producers that use the flag + TL_FLAG_NO_SIGNAL_ON_COMMIT to manually control when + consumers starting reading the stream. + Used when multiple small writes need to be batched. + @Input hStream Stream handle. + @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle. + @Return eError Error codes returned by either + Reserve or Commit. + @Return PVRSRV_OK + */ /**************************************************************************/ +PVRSRV_ERROR +TLStreamSync(IMG_HANDLE hStream); + + +/*************************************************************************/ /*! + @Function TLStreamMarkEOS + @Description Insert a EOS marker packet in the given stream. + @Input hStream Stream handle. + @Input bRemoveOld if TRUE, remove old stream record file before + splitting to new file. + @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handler. + @Return eError Error codes returned by either + Reserve or Commit. + @Return PVRSRV_OK Success. +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamMarkEOS(IMG_HANDLE hStream, IMG_BOOL bRemoveOld); + +/*************************************************************************/ /*! +@Function TLStreamMarkStreamOpen +@Description Puts *open* stream packet into hStream's notification stream, + if set, error otherwise." +@Input hStream Stream handle. +@Return PVRSRV_OK on success and error code on failure +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamMarkStreamOpen(IMG_HANDLE hStream); + +/*************************************************************************/ /*! +@Function TLStreamMarkStreamClose +@Description Puts *close* stream packet into hStream's notification stream, + if set, error otherwise." +@Input hStream Stream handle. +@Return PVRSRV_OK on success and error code on failure +*/ /**************************************************************************/ +PVRSRV_ERROR +TLStreamMarkStreamClose(IMG_HANDLE hStream); + +/*************************************************************************/ /*! + @Function TLStreamInfo + @Description Run time information about buffer elemental sizes. + It sets psInfo members accordingly. Users can use those values + to calculate the parameters they use in TLStreamCreate and + TLStreamReserve. + @Output psInfo pointer to stream info structure. + @Return None. +*/ /**************************************************************************/ +void +TLStreamInfo(IMG_HANDLE hStream, PTL_STREAM_INFO psInfo); + +/*************************************************************************/ /*! + @Function TLStreamIsOpenForReading + @Description Query if a stream has any readers connected. + @Input hStream Stream handle. + @Return IMG_BOOL True if at least one reader is connected, + false otherwise +*/ /**************************************************************************/ +IMG_BOOL +TLStreamIsOpenForReading(IMG_HANDLE hStream); + +/*************************************************************************/ /*! + @Function TLStreamOutOfData + @Description Query if the stream is empty (no data waiting to be read). + @Input hStream Stream handle. + @Return IMG_BOOL True if read==write, no data waiting, + false otherwise +*/ /**************************************************************************/ +IMG_BOOL TLStreamOutOfData(IMG_HANDLE hStream); + +/*************************************************************************/ /*! + @Function TLStreamResetProducerByteCount + @Description Reset the producer byte counter on the specified stream. + @Input hStream Stream handle. + @Input IMG_UINT32 Value to reset counter to, often 0. + @Return PVRSRV_OK Success. + @Return PVRSRV_ERROR_STREAM_MISUSE Success but the read and write + positions did not match, + stream not empty. +*/ /**************************************************************************/ + +PVRSRV_ERROR +TLStreamResetProducerByteCount(IMG_HANDLE hStream, IMG_UINT32 ui32Value); + +#endif /* TLSTREAM_H */ +/***************************************************************************** + End of file (tlstream.h) +*****************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/trace_events.c b/drivers/gpu/drm/phytium/octopus/trace_events.c new file mode 100644 index 000000000000..b6ccaaee3d94 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/trace_events.c @@ -0,0 +1,265 @@ +/*************************************************************************/ /*! +@Title Linux trace event helper functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include + +#if defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) +#if !defined(CONFIG_TRACE_GPU_MEM) +#define CREATE_TRACE_POINTS +#include +#undef CREATE_TRACE_POINTS +#else /* !defined(CONFIG_TRACE_GPU_MEM) */ +#include +#endif /* !defined(CONFIG_TRACE_GPU_MEM) */ +#endif /* defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) */ + +#include "img_types.h" +#include "trace_events.h" +#include "rogue_trace_events.h" +#include "sync_checkpoint_external.h" + +static bool fence_update_event_enabled, fence_check_event_enabled; + +bool trace_rogue_are_fence_updates_traced(void) +{ + return fence_update_event_enabled; +} + +bool trace_rogue_are_fence_checks_traced(void) +{ + return fence_check_event_enabled; +} + +/* + * Call backs referenced from rogue_trace_events.h. Note that these are not + * thread-safe, however, since running trace code when tracing is not enabled is + * simply a no-op, there is no harm in it. + */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) +int trace_fence_update_enabled_callback(void) +#else +void trace_fence_update_enabled_callback(void) +#endif +{ + fence_update_event_enabled = true; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + return 0; +#endif +} + +void trace_fence_update_disabled_callback(void) +{ + fence_update_event_enabled = false; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) +int trace_fence_check_enabled_callback(void) +#else +void trace_fence_check_enabled_callback(void) +#endif +{ + fence_check_event_enabled = true; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + return 0; +#endif +} + +void trace_fence_check_disabled_callback(void) +{ + fence_check_event_enabled = false; +} + +#if defined(SUPPORT_RGX) +/* This is a helper that calls trace_rogue_fence_update for each fence in an + * array. + */ +void trace_rogue_fence_updates(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext, + IMG_UINT32 ui32Offset, + IMG_UINT uCount, + PRGXFWIF_UFO_ADDR *pauiAddresses, + IMG_UINT32 *paui32Values) +{ + IMG_UINT i; + for (i = 0; i < uCount; i++) + { + trace_rogue_fence_update(current->comm, cmd, dm, ui32FWContext, ui32Offset, + pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED); + } +} + +void trace_rogue_fence_checks(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext, + IMG_UINT32 ui32Offset, + IMG_UINT uCount, + PRGXFWIF_UFO_ADDR *pauiAddresses, + IMG_UINT32 *paui32Values) +{ + IMG_UINT i; + for (i = 0; i < uCount; i++) + { + trace_rogue_fence_check(current->comm, cmd, dm, ui32FWContext, ui32Offset, + pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED); + } +} + +void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData) +{ + IMG_UINT i; + for (i = 0; i < ui32UFOCount; i++) + { + trace_rogue_ufo_update(ui64OSTimestamp, ui32FWCtx, + ui32IntJobRef, + ui32ExtJobRef, + puData->sUpdate.ui32FWAddr, + puData->sUpdate.ui32OldValue, + puData->sUpdate.ui32NewValue); + puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sUpdate)); + } +} + +void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_BOOL bPrEvent, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData) +{ + IMG_UINT i; + for (i = 0; i < ui32UFOCount; i++) + { + if (bPrEvent) + { + trace_rogue_ufo_pr_check_success(ui64OSTimestamp, ui32FWCtx, + ui32IntJobRef, ui32ExtJobRef, + puData->sCheckSuccess.ui32FWAddr, + puData->sCheckSuccess.ui32Value); + } + else + { + trace_rogue_ufo_check_success(ui64OSTimestamp, ui32FWCtx, + ui32IntJobRef, ui32ExtJobRef, + puData->sCheckSuccess.ui32FWAddr, + puData->sCheckSuccess.ui32Value); + } + puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sCheckSuccess)); + } +} + +void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_BOOL bPrEvent, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData) +{ + IMG_UINT i; + for (i = 0; i < ui32UFOCount; i++) + { + if (bPrEvent) + { + trace_rogue_ufo_pr_check_fail(ui64OSTimestamp, ui32FWCtx, + ui32IntJobRef, ui32ExtJobRef, + puData->sCheckFail.ui32FWAddr, + puData->sCheckFail.ui32Value, + puData->sCheckFail.ui32Required); + } + else + { + trace_rogue_ufo_check_fail(ui64OSTimestamp, ui32FWCtx, + ui32IntJobRef, ui32ExtJobRef, + puData->sCheckFail.ui32FWAddr, + puData->sCheckFail.ui32Value, + puData->sCheckFail.ui32Required); + } + puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sCheckFail)); + } +} +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + +int PVRGpuTraceEnableUfoCallbackWrapper(void) +{ + +#if defined(SUPPORT_RGX) + PVRGpuTraceEnableUfoCallback(); +#endif + + return 0; +} + +int PVRGpuTraceEnableFirmwareActivityCallbackWrapper(void) +{ + +#if defined(SUPPORT_RGX) + PVRGpuTraceEnableFirmwareActivityCallback(); +#endif + + return 0; +} +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) */ + +void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId, + IMG_UINT64 ui64Size) +{ +#if defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) + trace_gpu_mem_total(ui8GPUId, 0, ui64Size); +#endif /* defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) */ +} + +void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId, + IMG_UINT32 ui32Pid, + IMG_UINT64 ui64Size) +{ +#if defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) + trace_gpu_mem_total(ui8GPUId, ui32Pid, ui64Size); +#endif /* defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) */ +} diff --git a/drivers/gpu/drm/phytium/octopus/trace_events.h b/drivers/gpu/drm/phytium/octopus/trace_events.h new file mode 100644 index 000000000000..78a574f91257 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/trace_events.h @@ -0,0 +1,198 @@ +/*************************************************************************/ /*! +@Title Linux trace events and event helper functions +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(TRACE_EVENTS_H) +#define TRACE_EVENTS_H + +#include "rgx_fwif_km.h" +#include "rgx_hwperf.h" + +/* We need to make these functions do nothing if CONFIG_EVENT_TRACING isn't + * enabled, just like the actual trace event functions that the kernel + * defines for us. + */ +#ifdef CONFIG_EVENT_TRACING +bool trace_rogue_are_fence_checks_traced(void); + +bool trace_rogue_are_fence_updates_traced(void); + +void trace_job_enqueue(IMG_UINT32 ui32FWContext, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + const char *pszKickType); + +#if defined(SUPPORT_RGX) +void trace_rogue_fence_updates(const char *cmd, const char *dm, + IMG_UINT32 ui32FWContext, + IMG_UINT32 ui32Offset, + IMG_UINT uCount, + PRGXFWIF_UFO_ADDR *pauiAddresses, + IMG_UINT32 *paui32Values); + +void trace_rogue_fence_checks(const char *cmd, const char *dm, + IMG_UINT32 ui32FWContext, + IMG_UINT32 ui32Offset, + IMG_UINT uCount, + PRGXFWIF_UFO_ADDR *pauiAddresses, + IMG_UINT32 *paui32Values); + +void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData); + +void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_BOOL bPrEvent, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData); + +void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_BOOL bPrEvent, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData); +#endif /* if defined(SUPPORT_RGX) */ + +void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId, + IMG_UINT64 ui64Size); + +void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId, + IMG_UINT32 ui32Pid, + IMG_UINT64 ui64Size); + +#else /* CONFIG_TRACE_EVENTS */ +static inline +bool trace_rogue_are_fence_checks_traced(void) +{ + return false; +} + +static inline +bool trace_rogue_are_fence_updates_traced(void) +{ + return false; +} + +static inline +void trace_job_enqueue(IMG_UINT32 ui32FWContext, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + const char *pszKickType) +{ +} + +#if defined(SUPPORT_RGX) +static inline +void trace_rogue_fence_updates(const char *cmd, const char *dm, + IMG_UINT32 ui32FWContext, + IMG_UINT32 ui32Offset, + IMG_UINT uCount, + PRGXFWIF_UFO_ADDR *pauiAddresses, + IMG_UINT32 *paui32Values) +{ +} + +static inline +void trace_rogue_fence_checks(const char *cmd, const char *dm, + IMG_UINT32 ui32FWContext, + IMG_UINT32 ui32Offset, + IMG_UINT uCount, + PRGXFWIF_UFO_ADDR *pauiAddresses, + IMG_UINT32 *paui32Values) +{ +} + +static inline +void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData) +{ +} + +static inline +void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_BOOL bPrEvent, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData) +{ +} + +static inline +void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp, + IMG_UINT32 ui32FWCtx, + IMG_UINT32 ui32ExtJobRef, + IMG_UINT32 ui32IntJobRef, + IMG_BOOL bPrEvent, + IMG_UINT32 ui32UFOCount, + const RGX_HWPERF_UFO_DATA_ELEMENT *puData) +{ +} +#endif /* if defined(SUPPORT_RGX)*/ + +static inline +void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId, + IMG_UINT64 ui64Size) +{ +} + +static inline +void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId, + IMG_UINT32 ui32Pid, + IMG_UINT64 ui64Size) +{ +} + +#endif /* CONFIG_TRACE_EVENTS */ + +#endif /* TRACE_EVENTS_H */ diff --git a/drivers/gpu/drm/phytium/octopus/uniq_key_splay_tree.c b/drivers/gpu/drm/phytium/octopus/uniq_key_splay_tree.c new file mode 100644 index 000000000000..21c058101448 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/uniq_key_splay_tree.c @@ -0,0 +1,280 @@ +/*************************************************************************/ /*! +@File +@Title Provides splay-trees. +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Implementation of splay-trees. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ /**************************************************************************/ + +#include "allocmem.h" /* for OSMemAlloc / OSMemFree */ +#include "osfunc.h" /* for OSMemFree */ +#include "pvr_debug.h" +#include "uniq_key_splay_tree.h" + +/** + * This function performs a simple top down splay + * + * @param uiFlags the flags that must splayed to the root (if possible). + * @param psTree The tree to splay. + * @return the resulting tree after the splay operation. + */ +IMG_INTERNAL +IMG_PSPLAY_TREE PVRSRVSplay (IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) +{ + IMG_SPLAY_TREE sTmp1; + IMG_PSPLAY_TREE psLeft; + IMG_PSPLAY_TREE psRight; + IMG_PSPLAY_TREE psTmp2; + + if (psTree == NULL) + { + return NULL; + } + + sTmp1.psLeft = NULL; + sTmp1.psRight = NULL; + + psLeft = &sTmp1; + psRight = &sTmp1; + + for (;;) + { + if (uiFlags < psTree->uiFlags) + { + if (psTree->psLeft == NULL) + { + break; + } + + if (uiFlags < psTree->psLeft->uiFlags) + { + /* if we get to this point, we need to rotate right the tree */ + psTmp2 = psTree->psLeft; + psTree->psLeft = psTmp2->psRight; + psTmp2->psRight = psTree; + psTree = psTmp2; + if (psTree->psLeft == NULL) + { + break; + } + } + + /* if we get to this point, we need to link right */ + psRight->psLeft = psTree; + psRight = psTree; + psTree = psTree->psLeft; + } + else + { + if (uiFlags > psTree->uiFlags) + { + if (psTree->psRight == NULL) + { + break; + } + + if (uiFlags > psTree->psRight->uiFlags) + { + /* if we get to this point, we need to rotate left the tree */ + psTmp2 = psTree->psRight; + psTree->psRight = psTmp2->psLeft; + psTmp2->psLeft = psTree; + psTree = psTmp2; + if (psTree->psRight == NULL) + { + break; + } + } + + /* if we get to this point, we need to link left */ + psLeft->psRight = psTree; + psLeft = psTree; + psTree = psTree->psRight; + } + else + { + break; + } + } + } + + /* at this point re-assemble the tree */ + psLeft->psRight = psTree->psLeft; + psRight->psLeft = psTree->psRight; + psTree->psLeft = sTmp1.psRight; + psTree->psRight = sTmp1.psLeft; + return psTree; +} + + +/** + * This function inserts a node into the Tree (unless it is already present, in + * which case it is equivalent to performing only a splay operation + * + * @param uiFlags the key of the new node + * @param psTree The tree into which one wants to add a new node + * @return The resulting with the node in it + */ +IMG_INTERNAL +IMG_PSPLAY_TREE PVRSRVInsert(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) +{ + IMG_PSPLAY_TREE psNew; + + if (psTree != NULL) + { + psTree = PVRSRVSplay(uiFlags, psTree); + if (psTree->uiFlags == uiFlags) + { + return psTree; + } + } + + psNew = (IMG_PSPLAY_TREE) OSAllocMem(sizeof(IMG_SPLAY_TREE)); + if (psNew == NULL) + { + PVR_DPF ((PVR_DBG_ERROR, "Error: failed to allocate memory to add a node to the splay tree.")); + return NULL; + } + + psNew->uiFlags = uiFlags; + OSCachedMemSet(&(psNew->buckets[0]), 0, sizeof(psNew->buckets)); + +#if defined(PVR_CTZLL) + psNew->bHasEltsMapping = ~(((IMG_ELTS_MAPPINGS) 1 << (sizeof(psNew->buckets) / (sizeof(psNew->buckets[0])))) - 1); +#endif + + if (psTree == NULL) + { + psNew->psLeft = NULL; + psNew->psRight = NULL; + return psNew; + } + + if (uiFlags < psTree->uiFlags) + { + psNew->psLeft = psTree->psLeft; + psNew->psRight = psTree; + psTree->psLeft = NULL; + } + else + { + psNew->psRight = psTree->psRight; + psNew->psLeft = psTree; + psTree->psRight = NULL; + } + + return psNew; +} + + +/** + * Deletes a node from the tree (unless it is not there, in which case it is + * equivalent to a splay operation) + * + * @param uiFlags the value of the node to remove + * @param psTree the tree into which the node must be removed + * @return the resulting tree + */ +IMG_INTERNAL +IMG_PSPLAY_TREE PVRSRVDelete(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) +{ + IMG_PSPLAY_TREE psTmp; + if (psTree == NULL) + { + return NULL; + } + + psTree = PVRSRVSplay(uiFlags, psTree); + if (uiFlags == psTree->uiFlags) + { + /* The value was present in the tree */ + if (psTree->psLeft == NULL) + { + psTmp = psTree->psRight; + } + else + { + psTmp = PVRSRVSplay(uiFlags, psTree->psLeft); + psTmp->psRight = psTree->psRight; + } + OSFreeMem(psTree); + return psTmp; + } + + /* The value was not present in the tree, so just return it as is + * (after the splay) */ + return psTree; +} + +/** + * This function picks up the appropriate node for the given flags + * + * @param uiFlags the flags that must associated with the node. + * @param psTree current splay tree node. + * @return the resulting tree node after the search operation. + */ +IMG_INTERNAL +IMG_PSPLAY_TREE PVRSRVFindNode(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) +{ + if (psTree == NULL) + { + return NULL; + } + + while (psTree) + { + if (uiFlags == psTree->uiFlags) + { + return psTree; + } + + if (uiFlags < psTree->uiFlags) + { + psTree = psTree->psLeft; + continue; + } + + if (uiFlags > psTree->uiFlags) + { + psTree = psTree->psRight; + continue; + } + } + + return NULL; +} diff --git a/drivers/gpu/drm/phytium/octopus/uniq_key_splay_tree.h b/drivers/gpu/drm/phytium/octopus/uniq_key_splay_tree.h new file mode 100644 index 000000000000..5f7334c331b0 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/uniq_key_splay_tree.h @@ -0,0 +1,90 @@ +/*************************************************************************/ /*! +@File +@Title Splay trees interface +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Provides debug functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef UNIQ_KEY_SPLAY_TREE_H_ +#define UNIQ_KEY_SPLAY_TREE_H_ + +#include "img_types.h" +#include "pvr_intrinsics.h" + +#if defined(PVR_CTZLL) + /* map the is_bucket_n_free to an int. + * This way, the driver can find the first non empty without loop + */ + typedef IMG_UINT64 IMG_ELTS_MAPPINGS; +#endif + +typedef IMG_UINT64 IMG_PSPLAY_FLAGS_T; + +/* head of list of free boundary tags for indexed by pvr_log2 of the + boundary tag size */ + +#define FREE_TABLE_LIMIT 40 + +struct _BT_; + +typedef struct img_splay_tree +{ + /* left child/subtree */ + struct img_splay_tree * psLeft; + + /* right child/subtree */ + struct img_splay_tree * psRight; + + /* Flags to match on this span, used as the key. */ + IMG_PSPLAY_FLAGS_T uiFlags; +#if defined(PVR_CTZLL) + /* each bit of this int is a boolean telling if the corresponding + bucket is empty or not */ + IMG_ELTS_MAPPINGS bHasEltsMapping; +#endif + struct _BT_ * buckets[FREE_TABLE_LIMIT]; +} IMG_SPLAY_TREE, *IMG_PSPLAY_TREE; + +IMG_PSPLAY_TREE PVRSRVSplay (IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree); +IMG_PSPLAY_TREE PVRSRVInsert(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree); +IMG_PSPLAY_TREE PVRSRVDelete(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree); +IMG_PSPLAY_TREE PVRSRVFindNode(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree); + + +#endif /* !UNIQ_KEY_SPLAY_TREE_H_ */ diff --git a/drivers/gpu/drm/phytium/octopus/vmm_impl.h b/drivers/gpu/drm/phytium/octopus/vmm_impl.h new file mode 100644 index 000000000000..65e04eff44e3 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/vmm_impl.h @@ -0,0 +1,186 @@ +/*************************************************************************/ /*! +@File vmm_impl.h +@Title Common VM manager API +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description This header provides common VM manager definitions that need to + be shared by system virtualization layer itself and modules that + implement the actual VM manager types. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef VMM_IMPL_H +#define VMM_IMPL_H + +#include "img_types.h" +#include "pvrsrv_error.h" + +typedef enum _VMM_CONF_PARAM_ +{ + VMM_CONF_PRIO_OSID0 = 0, + VMM_CONF_PRIO_OSID1 = 1, + VMM_CONF_PRIO_OSID2 = 2, + VMM_CONF_PRIO_OSID3 = 3, + VMM_CONF_PRIO_OSID4 = 4, + VMM_CONF_PRIO_OSID5 = 5, + VMM_CONF_PRIO_OSID6 = 6, + VMM_CONF_PRIO_OSID7 = 7, + VMM_CONF_HCS_DEADLINE = 8 +} VMM_CONF_PARAM; + +/* + Virtual machine manager (hypervisor) para-virtualization (PVZ) connection: + - Type is implemented by host and guest drivers + - Assumes synchronous function call semantics + - Unidirectional semantics + - For Host (vmm -> host) + - For Guest (guest -> vmm) + - Parameters can be IN/OUT/INOUT + + - Host pvz entries are pre-implemented by IMG + - For host implementation, see vmm_pvz_server.c + - Called by host side hypercall handler or VMM + + - Guest pvz entries are supplied by 3rd-party + - These are specific to hypervisor (VMM) type + - These implement the actual hypercalls mechanism + + Para-virtualization (PVZ) call runtime sequence: + 1 - Guest driver in guest VM calls PVZ function + 1.1 - Guest PVZ connection calls + 1.2 - Guest VM Manager type which + 1.2.1 - Performs any pre-processing like parameter packing, etc. + 1.2.2 - Issues hypercall (blocking synchronous call) + + 2 - VM Manager (hypervisor) receives hypercall + 2.1 - Hypercall handler: + 2.1.1 - Performs any pre-processing + 2.1.2 - If call terminates in VM Manager: perform action and return from hypercall + 2.1.3 - Otherwise forward to host driver (implementation specific call) + + 3 - Host driver receives call from VM Manager + 3.1 - Host VM manager type: + 3.1.1 - Performs any pre-processing like parameter unpacking, etc. + 3.1.2 - Acquires host driver PVZ handler and calls the appropriate entry + 3.2 - Host PVZ connection calls corresponding host system virtualisation layer + 3.3 - Host driver system virtualisation layer: + 3.3.1 - Perform action requested by guest driver + 3.3.2 - Return to host VM Manager type + 3.4 - Host VM Manager type: + 3.4.1 - Prepare to return from hypercall + 3.4.2 - Perform any post-processing like result packing, etc. + 3.4.3 - Issue return from hypercall + + 4 - VM Manager (hypervisor) + 4.1 - Perform any post-processing + 4.2 - Return control to guest driver + + 5 - Guest driver in guest VM + 5.1 - Perform any post-processing like parameter unpacking, etc. + 5.2 - Continue execution in guest VM + */ +typedef struct _VMM_PVZ_CONNECTION_ +{ + struct { + /* + This pair must be implemented if the guest is responsible + for allocating the physical heap that backs its firmware + allocations, this is the default configuration. The physical + heap is allocated within the guest VM IPA space and this + IPA Addr/Size must be re-expressed as PA space Addr/Size + by the VM manager before forwarding request to host. + If not implemented, return PVRSRV_ERROR_NOT_IMPLEMENTED. + */ + PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID, + IMG_UINT64 ui64Size, + IMG_UINT64 ui64PAddr); + + PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID); + } sClientFuncTab; + + struct { + /* + Corresponding server side entries to handle guest PVZ calls + NOTE: + - Additional PVZ function ui32OSID parameter + - OSID determination is responsibility of VM manager + - Actual OSID value must be supplied by VM manager + - This can be done either in client/VMM/host side + - Must be done before host pvz function(s) are called + - Host pvz function validates incoming OSID values + */ + PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32OSID, + IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID, + IMG_UINT64 ui64Size, + IMG_UINT64 ui64PAddr); + + PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32OSID, + IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID); + } sServerFuncTab; + + struct { + /* + This is used by the VM manager to report pertinent runtime guest VM + information to the host; these events may in turn be forwarded to + the firmware + */ + PVRSRV_ERROR (*pfnOnVmOnline)(IMG_UINT32 ui32OSID); + + PVRSRV_ERROR (*pfnOnVmOffline)(IMG_UINT32 ui32OSID); + + PVRSRV_ERROR (*pfnVMMConfigure)(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue); + + } sVmmFuncTab; +} VMM_PVZ_CONNECTION; + +/*! +******************************************************************************* + @Function VMMCreatePvzConnection() and VMMDestroyPvzConnection() + @Description Both the guest and VM manager call this in order to obtain a + PVZ connection to the VM and host respectively; that is, guest + calls it to obtain connection to VM, VM calls it to obtain a + connection to the host. + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection); +void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection); + +#endif /* VMM_IMPL_H */ diff --git a/drivers/gpu/drm/phytium/octopus/vmm_pvz_client.c b/drivers/gpu/drm/phytium/octopus/vmm_pvz_client.c new file mode 100644 index 000000000000..645ddd9882c9 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/vmm_pvz_client.c @@ -0,0 +1,138 @@ +/*************************************************************************/ /*! +@File vmm_pvz_client.c +@Title VM manager client para-virtualization +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header provides VMM client para-virtualization APIs +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvrsrv.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" + +#include "vmm_impl.h" +#include "vz_vmm_pvz.h" +#include "vmm_pvz_client.h" + + +static inline void +PvzClientLockAcquire(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + OSLockAcquire(psPVRSRVData->hPvzConnectionLock); +} + +static inline void +PvzClientLockRelease(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + OSLockRelease(psPVRSRVData->hPvzConnectionLock); +} + +/* + * =========================================================== + * The following client para-virtualization (pvz) functions + * are exclusively called by guests to initiate a pvz call + * to the host via hypervisor (guest -> vm manager -> host) + * =========================================================== + */ + +PVRSRV_ERROR +PvzClientMapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVRSRV_ERROR eError; + IMG_DEV_PHYADDR sDevPAddr; + VMM_PVZ_CONNECTION *psVmmPvz; + IMG_UINT32 uiFuncID = PVZ_BRIDGE_MAPDEVICEPHYSHEAP; + PHYS_HEAP *psFwPhysHeap = psDevConfig->psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]; + + eError = PhysHeapGetDevPAddr(psFwPhysHeap, &sDevPAddr); + +#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES) +{ + /* Host expects PA rather than IPA address, so on the platforms where + * IPA-PA translation is not done in hw, performs a software translation */ + + IMG_DEV_PHYADDR sDevPAddrTranslated; + + PhysHeapCpuPAddrToDevPAddr(psFwPhysHeap, 1, &sDevPAddrTranslated, (IMG_CPU_PHYADDR *)&sDevPAddr); + sDevPAddr.uiAddr = sDevPAddrTranslated.uiAddr; +} +#endif + + PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapGetDevPAddr"); + PVR_LOG_RETURN_IF_FALSE((sDevPAddr.uiAddr != 0), "PhysHeapGetDevPAddr", PVRSRV_ERROR_INVALID_PARAMS); + + psVmmPvz = PvzConnectionAcquire(); + PvzClientLockAcquire(); + + eError = psVmmPvz->sClientFuncTab.pfnMapDevPhysHeap(uiFuncID, + 0, + RGX_FIRMWARE_RAW_HEAP_SIZE, + sDevPAddr.uiAddr); + + PvzClientLockRelease(); + PvzConnectionRelease(psVmmPvz); + + return eError; +} + +PVRSRV_ERROR +PvzClientUnmapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVRSRV_ERROR eError; + IMG_UINT32 uiFuncID = PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP; + VMM_PVZ_CONNECTION *psVmmPvz = PvzConnectionAcquire(); + PVR_ASSERT(psVmmPvz); + + PvzClientLockAcquire(); + + PVR_ASSERT(psVmmPvz->sClientFuncTab.pfnUnmapDevPhysHeap); + + eError = psVmmPvz->sClientFuncTab.pfnUnmapDevPhysHeap(uiFuncID, 0); + + PvzClientLockRelease(); + PvzConnectionRelease(psVmmPvz); + + return eError; +} + +/****************************************************************************** + End of file (vmm_pvz_client.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/vmm_pvz_client.h b/drivers/gpu/drm/phytium/octopus/vmm_pvz_client.h new file mode 100644 index 000000000000..b90fc57ec1b9 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/vmm_pvz_client.h @@ -0,0 +1,77 @@ +/*************************************************************************/ /*! +@File vmm_pvz_client.h +@Title Guest VM manager client para-virtualization routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header provides guest VMM client para-virtualization APIs +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef VMM_PVZ_CLIENT_H +#define VMM_PVZ_CLIENT_H + +#include "pvrsrv.h" +#include "img_types.h" +#include "pvrsrv_error.h" +#include "vmm_pvz_common.h" +#include "vmm_impl.h" + +/*! +******************************************************************************* + @Function PvzClientMapDevPhysHeap + @Description The guest front-end to initiate a pfnMapDevPhysHeap PVZ call + to the host. + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR +PvzClientMapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig); + +/*! +******************************************************************************* + @Function PvzClientUnmapDevPhysHeap + @Description The guest front-end to initiate a pfnUnmapDevPhysHeap PVZ call + to the host. + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR +PvzClientUnmapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig); + +#endif /* VMM_PVZ_CLIENT_H */ + +/****************************************************************************** + End of file (vmm_pvz_client.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/vmm_pvz_common.h b/drivers/gpu/drm/phytium/octopus/vmm_pvz_common.h new file mode 100644 index 000000000000..e4719cb2c3ef --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/vmm_pvz_common.h @@ -0,0 +1,65 @@ +/*************************************************************************/ /*! +@File vmm_pvz_common.h +@Title Common VM manager function IDs +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header provides VM manager para-virtualization function IDs and + definitions of their payload structures, if appropriate. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef VMM_PVZ_COMMON_H +#define VMM_PVZ_COMMON_H + +#include "img_types.h" + +#define PVZ_BRIDGE_DEFAULT 0UL +#define PVZ_BRIDGE_MAPDEVICEPHYSHEAP (PVZ_BRIDGE_DEFAULT + 1) +#define PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP (PVZ_BRIDGE_MAPDEVICEPHYSHEAP + 1) +#define PVZ_BRIDGE_LAST (PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP + 1) + +typedef struct _PVZ_BRIDGEPARA_MAPDEVICEPHYSHEAP +{ + IMG_UINT64 ui64MemBase; + IMG_UINT32 ui32OSID; +}PVZ_BRIDGEPARA_MAPDEVICEPHYSHEAP; + +#endif /* VMM_PVZ_COMMON_H */ + +/***************************************************************************** + End of file (vmm_pvz_common.h) +*****************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/vmm_pvz_server.c b/drivers/gpu/drm/phytium/octopus/vmm_pvz_server.c new file mode 100644 index 000000000000..a09ec277b02d --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/vmm_pvz_server.c @@ -0,0 +1,245 @@ +/*************************************************************************/ /*! +@File vmm_pvz_server.c +@Title VM manager server para-virtualization handlers +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header provides VMM server para-virtz handler APIs +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvrsrv.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "rgxfwutils.h" + +#include "vz_vm.h" +#include "vmm_impl.h" +#include "vz_vmm_pvz.h" +#include "vmm_pvz_server.h" + +static inline void +PvzServerLockAcquire(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + OSLockAcquire(psPVRSRVData->hPvzConnectionLock); +} + +static inline void +PvzServerLockRelease(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + OSLockRelease(psPVRSRVData->hPvzConnectionLock); +} + + +/* + * =========================================================== + * The following server para-virtualization (pvz) functions + * are exclusively called by the VM manager (hypervisor) on + * behalf of guests to complete guest pvz calls + * (guest -> vm manager -> host) + * =========================================================== + */ + +PVRSRV_ERROR +PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID, + IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID, + IMG_UINT64 ui64Size, + IMG_UINT64 ui64PAddr) +{ +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + /* + * Reject hypercall if called on a system configured at build time to + * preallocate the Guest's firmware heaps from static carveout memory. + */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Host PVZ config: Does not match with Guest PVZ config\n" + " Host preallocates the Guest's FW physheap from static memory carveouts at startup.\n", __func__)); + return PVRSRV_ERROR_INVALID_PVZ_CONFIG; +#else + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_LOG_RETURN_IF_FALSE((ui32DevID == 0), "Invalid Device ID", PVRSRV_ERROR_INVALID_PARAMS); + + if (ui32FuncID != PVZ_BRIDGE_MAPDEVICEPHYSHEAP) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Host PVZ call: OSID: %d: Invalid function ID: expected %d, got %d", + __func__, + ui32OSID, + (IMG_UINT32)PVZ_BRIDGE_MAPDEVICEPHYSHEAP, + ui32FuncID)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PvzServerLockAcquire(); + +#if defined(SUPPORT_RGX) + if (IsVmOnline(ui32OSID)) + { + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->psDeviceNodeList; + IMG_DEV_PHYADDR sDevPAddr = {ui64PAddr}; + IMG_UINT32 sync; + + eError = RGXFwRawHeapAllocMap(psDeviceNode, ui32OSID, sDevPAddr, ui64Size); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", e0); + + /* Invalidate MMU cache in preparation for a kick from this Guest */ + eError = psDeviceNode->pfnMMUCacheInvalidateKick(psDeviceNode, &sync); + PVR_LOG_GOTO_IF_ERROR(eError, "MMUCacheInvalidateKick", e0); + + /* Everything is ready for the firmware to start interacting with this OS */ + eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32OSID, RGXFWIF_OS_ONLINE); + } +e0: +#endif /* defined(SUPPORT_RGX) */ + PvzServerLockRelease(); + + return eError; +#endif +} + +PVRSRV_ERROR +PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID, + IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID) +{ +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + /* + * Reject hypercall if called on a system configured at built time to + * preallocate the Guest's firmware heaps from static carveout memory. + */ + PVR_DPF((PVR_DBG_ERROR, + "%s: Host PVZ config: Does not match with Guest PVZ config\n" + " Host preallocates the Guest's FW physheap from static memory carveouts at startup.\n", __func__)); + return PVRSRV_ERROR_INVALID_PVZ_CONFIG; +#else + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_LOG_RETURN_IF_FALSE((ui32DevID == 0), "Invalid Device ID", PVRSRV_ERROR_INVALID_PARAMS); + + if (ui32FuncID != PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Host PVZ call: OSID: %d: Invalid function ID: expected %d, got %d", + __func__, + ui32OSID, + (IMG_UINT32)PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP, + ui32FuncID)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PvzServerLockAcquire(); + +#if defined(SUPPORT_RGX) + if (IsVmOnline(ui32OSID)) + { + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->psDeviceNodeList; + + /* Order firmware to offload this OS' data and stop accepting commands from it */ + eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32OSID, RGXFWIF_OS_OFFLINE); + PVR_LOG_GOTO_IF_ERROR(eError, "RGXFWSetFwOsState", e0); + + /* it is now safe to remove the Guest's memory mappings */ + RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID); + } +e0: +#endif + + PvzServerLockRelease(); + + return eError; +#endif +} + +/* + * ============================================================ + * The following server para-virtualization (pvz) functions + * are exclusively called by the VM manager (hypervisor) to + * pass side band information to the host (vm manager -> host) + * ============================================================ + */ + +PVRSRV_ERROR +PvzServerOnVmOnline(IMG_UINT32 ui32OSID) +{ + PVRSRV_ERROR eError; + + PvzServerLockAcquire(); + + eError = PvzOnVmOnline(ui32OSID); + + PvzServerLockRelease(); + + return eError; +} + +PVRSRV_ERROR +PvzServerOnVmOffline(IMG_UINT32 ui32OSID) +{ + PVRSRV_ERROR eError; + + PvzServerLockAcquire(); + + eError = PvzOnVmOffline(ui32OSID); + + PvzServerLockRelease(); + + return eError; +} + +PVRSRV_ERROR +PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue) +{ + PVRSRV_ERROR eError; + + PvzServerLockAcquire(); + + eError = PvzVMMConfigure(eVMMParamType, ui32ParamValue); + + PvzServerLockRelease(); + + return eError; +} + +/****************************************************************************** + End of file (vmm_pvz_server.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/vmm_pvz_server.h b/drivers/gpu/drm/phytium/octopus/vmm_pvz_server.h new file mode 100644 index 000000000000..f62a9676fc64 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/vmm_pvz_server.h @@ -0,0 +1,121 @@ +/*************************************************************************/ /*! +@File vmm_pvz_server.h +@Title VM manager para-virtualization interface helper routines +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Header provides API(s) available to VM manager, this must be + called to close the loop during guest para-virtualization calls. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef VMM_PVZ_SERVER_H +#define VMM_PVZ_SERVER_H + +#include "vmm_impl.h" +#include "img_types.h" +#include "pvrsrv_error.h" +#include "vmm_pvz_common.h" + +/*! +******************************************************************************* + @Function PvzServerMapDevPhysHeap + @Description The VM manager calls this in response to guest PVZ interface + call pfnMapDevPhysHeap. + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR +PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID, + IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID, + IMG_UINT64 ui64Size, + IMG_UINT64 ui64PAddr); + +/*! +******************************************************************************* + @Function PvzServerUnmapDevPhysHeap + @Description The VM manager calls this in response to guest PVZ interface + call pfnUnmapDevPhysHeap. + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR +PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID, + IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID); + +/*! +******************************************************************************* + @Function PvzServerOnVmOnline + @Description The VM manager calls this when guest VM machine comes online. + The host driver will initialize the FW if it has not done so + already. + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR +PvzServerOnVmOnline(IMG_UINT32 ui32OSID); + +/*! +******************************************************************************* + @Function PvzServerOnVmOffline + @Description The VM manager calls this when a guest VM machine is about to + go offline. The VM manager might have unmapped the GPU kick + register for such VM but not the GPU memory until the call + returns. Once the function returns, the FW does not hold any + reference for such VM and no workloads from it are running in + the GPU and it is safe to remove the memory for such VM. + @Return PVRSRV_OK on success. PVRSRV_ERROR_TIMEOUT if for some reason + the FW is taking too long to clean-up the resources of the + OSID. Otherwise, a PVRSRV_ERROR code. +******************************************************************************/ +PVRSRV_ERROR +PvzServerOnVmOffline(IMG_UINT32 ui32OSID); + +/*! +******************************************************************************* + @Function PvzServerVMMConfigure + @Description The VM manager calls this to configure several parameters like + HCS or isolation. + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR +PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType, + IMG_UINT32 ui32ParamValue); + +#endif /* VMM_PVZ_SERVER_H */ + +/****************************************************************************** + End of file (vmm_pvz_server.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/vmm_type_stub.c b/drivers/gpu/drm/phytium/octopus/vmm_type_stub.c new file mode 100644 index 000000000000..a7bab15d0669 --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/vmm_type_stub.c @@ -0,0 +1,119 @@ +/*************************************************************************/ /*! +@File vmm_type_stub.c +@Title Stub VM manager type +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description Sample stub (no-operation) VM manager implementation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "pvrsrv.h" +#include "img_types.h" +#include "img_defs.h" +#include "pvrsrv_error.h" +#include "rgxheapconfig.h" + +#include "vmm_impl.h" +#include "vmm_pvz_server.h" + +static PVRSRV_ERROR +StubVMMMapDevPhysHeap(IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID, + IMG_UINT64 ui64Size, + IMG_UINT64 ui64Addr) +{ + PVR_UNREFERENCED_PARAMETER(ui32FuncID); + PVR_UNREFERENCED_PARAMETER(ui32DevID); + PVR_UNREFERENCED_PARAMETER(ui64Size); + PVR_UNREFERENCED_PARAMETER(ui64Addr); + return PVRSRV_ERROR_NOT_IMPLEMENTED; +} + +static PVRSRV_ERROR +StubVMMUnmapDevPhysHeap(IMG_UINT32 ui32FuncID, + IMG_UINT32 ui32DevID) +{ + PVR_UNREFERENCED_PARAMETER(ui32FuncID); + PVR_UNREFERENCED_PARAMETER(ui32DevID); + return PVRSRV_ERROR_NOT_IMPLEMENTED; +} + +static VMM_PVZ_CONNECTION gsStubVmmPvz = +{ + .sClientFuncTab = { + /* pfnMapDevPhysHeap */ + &StubVMMMapDevPhysHeap, + + /* pfnUnmapDevPhysHeap */ + &StubVMMUnmapDevPhysHeap + }, + + .sServerFuncTab = { + /* pfnMapDevPhysHeap */ + &PvzServerMapDevPhysHeap, + + /* pfnUnmapDevPhysHeap */ + &PvzServerUnmapDevPhysHeap + }, + + .sVmmFuncTab = { + /* pfnOnVmOnline */ + &PvzServerOnVmOnline, + + /* pfnOnVmOffline */ + &PvzServerOnVmOffline, + + /* pfnVMMConfigure */ + &PvzServerVMMConfigure + } +}; + +PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection) +{ + PVR_LOG_RETURN_IF_FALSE((NULL != psPvzConnection), "VMMCreatePvzConnection", PVRSRV_ERROR_INVALID_PARAMS); + *psPvzConnection = &gsStubVmmPvz; + PVR_DPF((PVR_DBG_ERROR, "Using a stub VM manager type, no runtime VZ support")); + return PVRSRV_OK; +} + +void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection) +{ + PVR_LOG_IF_FALSE((NULL != psPvzConnection), "VMMDestroyPvzConnection"); +} + +/****************************************************************************** + End of file (vmm_type_stub.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/vz_vm.h b/drivers/gpu/drm/phytium/octopus/vz_vm.h new file mode 100644 index 000000000000..5c13591bdcbe --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/vz_vm.h @@ -0,0 +1,61 @@ +/*************************************************************************/ /*! +@File vz_vm.h +@Title System virtualization VM support APIs +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description This header provides VM management support APIs +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef VZ_VM_H +#define VZ_VM_H + +#include "vmm_impl.h" + +bool IsVmOnline(IMG_UINT32 ui32OSID); + +PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32OSid); + +PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32OSid); + +PVRSRV_ERROR PvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue); + +#endif /* VZ_VM_H */ + +/***************************************************************************** + End of file (vz_vm.h) +*****************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/vz_vmm_pvz.c b/drivers/gpu/drm/phytium/octopus/vz_vmm_pvz.c new file mode 100644 index 000000000000..41fbf23563fd --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/vz_vmm_pvz.c @@ -0,0 +1,183 @@ +/*************************************************************************/ /*! +@File vz_vmm_pvz.c +@Title VM manager para-virtualization APIs +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description VM manager para-virtualization management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvrsrv.h" +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv_error.h" +#include "allocmem.h" +#include "pvrsrv.h" +#include "vz_vmm_pvz.h" + +#if (RGX_NUM_OS_SUPPORTED > 1) +static PVRSRV_ERROR +PvzConnectionValidate(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + VMM_PVZ_CONNECTION *psVmmPvz; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* + * Acquire the underlying VM manager PVZ connection & validate it. + */ + psVmmPvz = PvzConnectionAcquire(); + if (psVmmPvz == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: %s PVZ config: Unable to acquire PVZ connection", + __func__, PVRSRV_VZ_MODE_IS(GUEST) ? "Guest" : "Host")); + eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG; + goto e0; + } + + /* Log which PVZ setup type is being used by driver */ +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + /* + * Static PVZ bootstrap setup + * + * This setup uses carve-out memory, has no hypercall mechanism & does not support + * out-of-order initialisation of host/guest VMs/drivers. The host driver has all + * the information needed to initialize all OSIDs firmware state when it's loaded + * and its PVZ layer must mark all guest OSIDs as being online as part of its PVZ + * initialisation. Having no out-of-order initialisation support, the guest driver + * can only submit a workload to the device after the host driver has completely + * initialized the firmware, the VZ hypervisor/VM setup must guarantee this. + */ + PVR_LOG(("Using static PVZ bootstrap setup")); +#else + /* + * Dynamic PVZ bootstrap setup + * + * This setup uses guest memory, has PVZ hypercall mechanism & supports out-of-order + * initialisation of host/guest VMs/drivers. The host driver initializes only its + * own OSID-0 firmware state when its loaded and each guest driver will use its PVZ + * interface to hypercall to the host driver to both synchronise its initialisation + * so it does not submit any workload to the firmware before the host driver has + * had a chance to initialize the firmware and to also initialize its own OSID-x + * firmware state. + */ + PVR_LOG(("Using dynamic PVZ bootstrap setup")); + + if (!PVRSRV_VZ_MODE_IS(GUEST) && + (psVmmPvz->sServerFuncTab.pfnMapDevPhysHeap == NULL || + psVmmPvz->sServerFuncTab.pfnUnmapDevPhysHeap == NULL)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Host PVZ config: Functions for mapping a Guest's heaps not implemented\n", __func__)); + eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG; + } +#endif + + PvzConnectionRelease(psVmmPvz); +e0: + return eError; +} +#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ + +PVRSRV_ERROR PvzConnectionInit(PVRSRV_DEVICE_CONFIG *psDevConfig) +{ + PVRSRV_ERROR eError; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + +#if (RGX_NUM_OS_SUPPORTED == 1) +# if !defined(PVRSRV_NEED_PVR_DPF) + PVR_UNREFERENCED_PARAMETER(psPVRSRVData); +# endif + PVR_DPF((PVR_DBG_ERROR, "This kernel driver does not support virtualization. Please rebuild with RGX_NUM_OS_SUPPORTED > 1")); + PVR_DPF((PVR_DBG_ERROR, "Halting initialisation, cannot transition to %s mode", + psPVRSRVData->eDriverMode == DRIVER_MODE_HOST ? "host" : "guest")); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + goto e0; +#else + + /* Create para-virtualization connection lock */ + eError = OSLockCreate(&psPVRSRVData->hPvzConnectionLock); + PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); + + /* Create VM manager para-virtualization connection */ + eError = VMMCreatePvzConnection((VMM_PVZ_CONNECTION **)&psPVRSRVData->hPvzConnection); + if (eError != PVRSRV_OK) + { + OSLockDestroy(psPVRSRVData->hPvzConnectionLock); + psPVRSRVData->hPvzConnectionLock = NULL; + + PVR_LOG_ERROR(eError, "VMMCreatePvzConnection"); + goto e0; + } + + /* Ensure pvz connection is configured correctly */ + eError = PvzConnectionValidate(psDevConfig); + PVR_LOG_RETURN_IF_ERROR(eError, "PvzConnectionValidate"); + + psPVRSRVData->abVmOnline[RGXFW_HOST_OS] = IMG_TRUE; +#endif +e0: + return eError; +} + +void PvzConnectionDeInit(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + VMMDestroyPvzConnection(psPVRSRVData->hPvzConnection); + psPVRSRVData->hPvzConnection = NULL; + + OSLockDestroy(psPVRSRVData->hPvzConnectionLock); + psPVRSRVData->hPvzConnectionLock = NULL; +} + +VMM_PVZ_CONNECTION* PvzConnectionAcquire(void) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVR_ASSERT(psPVRSRVData->hPvzConnection != NULL); + return psPVRSRVData->hPvzConnection; +} + +void PvzConnectionRelease(VMM_PVZ_CONNECTION *psParaVz) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + /* Nothing to do, just validate the pointer we're passed back */ + PVR_ASSERT(psParaVz == psPVRSRVData->hPvzConnection); +} + +/****************************************************************************** + End of file (vz_vmm_pvz.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/vz_vmm_pvz.h b/drivers/gpu/drm/phytium/octopus/vz_vmm_pvz.h new file mode 100644 index 000000000000..d743e45fe8fb --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/vz_vmm_pvz.h @@ -0,0 +1,79 @@ +/*************************************************************************/ /*! +@File vz_vmm_pvz.h +@Title System virtualization VM manager management APIs +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description This header provides VM manager para-virtz management APIs +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef VZ_VMM_PVZ_H +#define VZ_VMM_PVZ_H + +#include "img_types.h" +#include "vmm_impl.h" + +/*! +******************************************************************************* + @Function PvzConnectionInit() and PvzConnectionDeInit() + @Description PvzConnectionInit initializes the VM manager para-virt + which is used subsequently for communication between guest and + host; depending on the underlying VM setup, this could either + be a hyper-call or cross-VM call + @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code +******************************************************************************/ +PVRSRV_ERROR PvzConnectionInit(PVRSRV_DEVICE_CONFIG *psDevConfig); +void PvzConnectionDeInit(void); + +/*! +******************************************************************************* + @Function PvzConnectionAcquire() and PvzConnectionRelease() + @Description These are to acquire/release a handle to the VM manager + para-virtz connection to make a pvz call; on the client, use it + it to make the actual pvz call and on the server handler / + VM manager, use it to complete the processing for the pvz call + or make a VM manager to host pvzbridge call +@Return VMM_PVZ_CONNECTION* on success. Otherwise NULL +******************************************************************************/ +VMM_PVZ_CONNECTION* PvzConnectionAcquire(void); +void PvzConnectionRelease(VMM_PVZ_CONNECTION *psPvzConnection); + +#endif /* VZ_VMM_PVZ_H */ + +/****************************************************************************** + End of file (vz_vmm_pvz.h) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/octopus/vz_vmm_vm.c b/drivers/gpu/drm/phytium/octopus/vz_vmm_vm.c new file mode 100644 index 000000000000..b3674a9e788a --- /dev/null +++ b/drivers/gpu/drm/phytium/octopus/vz_vmm_vm.c @@ -0,0 +1,223 @@ +/*************************************************************************/ /*! +@File vz_vmm_vm.c +@Title System virtualization VM support APIs +@Copyright Copyright (c) Phytium Information Technologies Ltd. All Rights Reserved +@Description System virtualization VM support functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "osfunc.h" +#include "pvrsrv.h" +#include "img_defs.h" +#include "img_types.h" +#include "pvrsrv.h" +#include "pvrsrv_error.h" +#include "vz_vm.h" +#include "rgxfwutils.h" + +bool IsVmOnline(IMG_UINT32 ui32OSID) +{ + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + + return (ui32OSID >= RGX_NUM_OS_SUPPORTED) ? (false) : (psPVRSRVData->abVmOnline[ui32OSID]); +} + +PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32OSid) +{ +#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1) + PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; +#else + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_RGXDEV_INFO *psDevInfo; + + if (ui32OSid == 0 || ui32OSid >= RGX_NUM_OS_SUPPORTED) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: invalid OSID (%d)", + __func__, ui32OSid)); + + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e0; + } + + if (psPVRSRVData->abVmOnline[ui32OSid]) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OSID %d is already enabled.", + __func__, ui32OSid)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e0; + } + + /* For now, limit support to single device setups */ + psDevNode = psPVRSRVData->psDeviceNodeList; + psDevInfo = psDevNode->pvDevice; + + if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT) + { + + /* Firmware not initialized yet, do it here */ + eError = PVRSRVCommonDeviceInitialise(psDevNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to initialize firmware (%s)", + __func__, PVRSRVGetErrorString(eError))); + goto e0; + } + } + + eError = RGXFWHealthCheckCmd(psDevNode->pvDevice); + if (eError != PVRSRV_OK) + { + goto e0; + } + + psPVRSRVData->abVmOnline[ui32OSid] = IMG_TRUE; + +#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) + /* Everything is ready for the firmware to start interacting with this OS */ + eError = RGXFWSetFwOsState(psDevNode->pvDevice, ui32OSid, RGXFWIF_OS_ONLINE); +#endif + +e0: +#endif + return eError; +} + +PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32OSid) +{ +#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1) + PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; +#else + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_RGXDEV_INFO *psDevInfo; + + if (ui32OSid == 0 || ui32OSid >= RGX_NUM_OS_SUPPORTED) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: invalid OSID (%d)", + __func__, ui32OSid)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e0; + } + + if (!psPVRSRVData->abVmOnline[ui32OSid]) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: OSID %d is already disabled.", + __func__, ui32OSid)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto e0; + } + + /* For now, limit support to single device setups */ + psDevNode = psPVRSRVData->psDeviceNodeList; + psDevInfo = psDevNode->pvDevice; + + eError = RGXFWSetFwOsState(psDevInfo, ui32OSid, RGXFWIF_OS_OFFLINE); + if (eError == PVRSRV_OK) + { + psPVRSRVData->abVmOnline[ui32OSid] = IMG_FALSE; + } + +e0: +#endif + return eError; +} + +PVRSRV_ERROR PvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_RGXDEV_INFO *psDevInfo; + + psDeviceNode = psPVRSRVData->psDeviceNodeList; + psDevInfo = psDeviceNode->pvDevice; + + switch (eVMMParamType) + { +#if defined(SUPPORT_RGX) + case VMM_CONF_PRIO_OSID0: + case VMM_CONF_PRIO_OSID1: + case VMM_CONF_PRIO_OSID2: + case VMM_CONF_PRIO_OSID3: + case VMM_CONF_PRIO_OSID4: + case VMM_CONF_PRIO_OSID5: + case VMM_CONF_PRIO_OSID6: + case VMM_CONF_PRIO_OSID7: + { + IMG_UINT32 ui32OSid = eVMMParamType; + IMG_UINT32 ui32Prio = ui32ParamValue; + + if (ui32OSid < RGX_NUM_OS_SUPPORTED) + { + eError = RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32Prio); + } + else + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + break; + } + case VMM_CONF_HCS_DEADLINE: + { + IMG_UINT32 ui32HCSDeadline = ui32ParamValue; + eError = RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadline); + break; + } +#else + PVR_UNREFERENCED_PARAMETER(ui32ParamValue); +#endif + default: + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + } + + return eError; +} + +/****************************************************************************** + End of file (vz_vmm_vm.c) +******************************************************************************/ diff --git a/drivers/gpu/drm/phytium/phytium_crtc.c b/drivers/gpu/drm/phytium/phytium_crtc.c index 796c046d0a73..4ce26e4cbea3 100644 --- a/drivers/gpu/drm/phytium/phytium_crtc.c +++ b/drivers/gpu/drm/phytium/phytium_crtc.c @@ -1,7 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #include @@ -13,360 +21,14 @@ #include "phytium_plane.h" #include "phytium_dp.h" #include "x100_dc.h" -#include "phytium_reg.h" - -#define MAXKERNELSIZE 9 -#define SUBPIXELINDEXBITS 5 -#define SUBPIXELCOUNT (1 << SUBPIXELINDEXBITS) -#define SUBPIXELLOADCOUNT (SUBPIXELCOUNT / 2 + 1) -#define WEIGHTSTATECOUNT (((SUBPIXELLOADCOUNT * MAXKERNELSIZE + 1) & ~1) / 2) -#define KERNELTABLESIZE (SUBPIXELLOADCOUNT * MAXKERNELSIZE * sizeof(uint16_t)) -#define PHYALIGN(n, align) (((n) + ((align) - 1)) & ~((align) - 1)) -#define KERNELSTATES (PHYALIGN(KERNELTABLESIZE + 4, 8)) -#define PHYPI 3.14159265358979323846f - -#define MATH_Add(X, Y) (float)((X) + (Y)) -#define MATH_Multiply(X, Y) (float)((X) * (Y)) -#define MATH_Divide(X, Y) (float)((X) / (Y)) -#define MATH_DivideFromUInteger(X, Y) ((float)(X) / (float)(Y)) -#define MATH_I2Float(X) (float)(X) - -struct filter_blit_array { - uint8_t kernelSize; - uint32_t scaleFactor; - uint32_t *kernelStates; -}; - -static uint32_t dc_scaling_get_factor(uint32_t src_size, uint32_t dst_size) -{ - uint32_t factor = 0; - - factor = ((src_size - 1) << SCALE_FACTOR_SRC_OFFSET) / (dst_size - 1); - - return factor; -} - -static float dc_sint(float x) -{ - const float B = 1.2732395477; - const float C = -0.4052847346; - const float P = 0.2310792853; - float y; - - if (x < 0) - y = B*x - C*x*x; - else - y = B*x + C*x*x; - if (y < 0) - y = P * (y * (0 - y) - y) + y; - else - y = P * (y * y - y) + y; - return y; -} -static float dc_sinc_filter(float x, int radius) +static int phytium_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, + u16 *green, u16 *blue, uint32_t size, + struct drm_modeset_acquire_ctx *ctx) { - float pit, pitd, f1, f2, result; - float f_radius = MATH_I2Float(radius); - - if (x == 0.0f) { - result = 1.0f; - } else if ((x < -f_radius) || (x > f_radius)) { - result = 0.0f; - } else { - pit = MATH_Multiply(PHYPI, x); - pitd = MATH_Divide(pit, f_radius); - f1 = MATH_Divide(dc_sint(pit), pit); - f2 = MATH_Divide(dc_sint(pitd), pitd); - result = MATH_Multiply(f1, f2); - } - - return result; -} - -static int dc_calculate_sync_table( - uint8_t kernel_size, - uint32_t src_size, - uint32_t dst_size, - struct filter_blit_array *kernel_info) -{ - uint32_t scale_factor; - float f_scale; - int kernel_half; - float f_subpixel_step; - float f_subpixel_offset; - uint32_t subpixel_pos; - int kernel_pos; - int padding; - uint16_t *kernel_array; - int range = 0; - - do { - /* Compute the scale factor. */ - scale_factor = dc_scaling_get_factor(src_size, dst_size); - - /* Same kernel size and ratio as before? */ - if ((kernel_info->kernelSize == kernel_size) && - (kernel_info->scaleFactor == kernel_size)) { - break; - } - - /* check the array */ - if (kernel_info->kernelStates == NULL) - break; - - /* Store new parameters. */ - kernel_info->kernelSize = kernel_size; - kernel_info->scaleFactor = scale_factor; - - /* Compute the scale factor. */ - f_scale = MATH_DivideFromUInteger(dst_size, src_size); - - /* Adjust the factor for magnification. */ - if (f_scale > 1.0f) - f_scale = 1.0f; - - /* Calculate the kernel half. */ - kernel_half = (int) (kernel_info->kernelSize >> 1); - - /* Calculate the subpixel step. */ - f_subpixel_step = MATH_Divide(1.0f, MATH_I2Float(SUBPIXELCOUNT)); - - /* Init the subpixel offset. */ - f_subpixel_offset = 0.5f; - - /* Determine kernel padding size. */ - padding = (MAXKERNELSIZE - kernel_info->kernelSize) / 2; - - /* Set initial kernel array pointer. */ - kernel_array = (uint16_t *) (kernel_info->kernelStates + 1); - - /* Loop through each subpixel. */ - for (subpixel_pos = 0; subpixel_pos < SUBPIXELLOADCOUNT; subpixel_pos++) { - /* Define a temporary set of weights. */ - float fSubpixelSet[MAXKERNELSIZE]; - - /* Init the sum of all weights for the current subpixel. */ - float fWeightSum = 0.0f; - uint16_t weightSum = 0; - short int adjustCount, adjustFrom; - short int adjustment; - - /* Compute weights. */ - for (kernel_pos = 0; kernel_pos < MAXKERNELSIZE; kernel_pos++) { - /* Determine the current index. */ - int index = kernel_pos - padding; - - /* Pad with zeros. */ - if ((index < 0) || (index >= kernel_info->kernelSize)) { - fSubpixelSet[kernel_pos] = 0.0f; - } else { - if (kernel_info->kernelSize == 1) { - fSubpixelSet[kernel_pos] = 1.0f; - } else { - /* Compute the x position for filter function. */ - float fX = MATH_Add( - MATH_I2Float(index - kernel_half), - f_subpixel_offset); - fX = MATH_Multiply(fX, f_scale); - - /* Compute the weight. */ - fSubpixelSet[kernel_pos] = dc_sinc_filter(fX, - kernel_half); - } - - /* Update the sum of weights. */ - fWeightSum = MATH_Add(fWeightSum, - fSubpixelSet[kernel_pos]); - } - } - - /* Adjust weights so that the sum will be 1.0. */ - for (kernel_pos = 0; kernel_pos < MAXKERNELSIZE; kernel_pos++) { - /* Normalize the current weight. */ - float fWeight = MATH_Divide(fSubpixelSet[kernel_pos], - fWeightSum); - - /* Convert the weight to fixed point and store in the table. */ - if (fWeight == 0.0f) - kernel_array[kernel_pos] = 0x0000; - else if (fWeight >= 1.0f) - kernel_array[kernel_pos] = 0x4000; - else if (fWeight <= -1.0f) - kernel_array[kernel_pos] = 0xC000; - else - kernel_array[kernel_pos] = - (int16_t) MATH_Multiply(fWeight, 16384.0f); - weightSum += kernel_array[kernel_pos]; - } - - /* Adjust the fixed point coefficients. */ - adjustCount = 0x4000 - weightSum; - if (adjustCount < 0) { - adjustCount = -adjustCount; - adjustment = -1; - } else { - adjustment = 1; - } - - adjustFrom = (MAXKERNELSIZE - adjustCount) / 2; - for (kernel_pos = 0; kernel_pos < adjustCount; kernel_pos++) { - range = (MAXKERNELSIZE*subpixel_pos + adjustFrom + kernel_pos) * - sizeof(uint16_t); - if ((range >= 0) && (range < KERNELTABLESIZE)) - kernel_array[adjustFrom + kernel_pos] += adjustment; - else - DRM_ERROR("%s failed\n", __func__); - } - - kernel_array += MAXKERNELSIZE; - - /* Advance to the next subpixel. */ - f_subpixel_offset = MATH_Add(f_subpixel_offset, -f_subpixel_step); - } - } while (0); - return 0; } -static void phytium_dc_scaling_config(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) -{ - struct drm_device *dev = crtc->dev; - struct phytium_display_private *priv = dev->dev_private; - struct drm_display_mode *mode = &crtc->state->adjusted_mode; - struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); - int phys_pipe = phytium_crtc->phys_pipe; - uint32_t group_offset = priv->dc_reg_base[phys_pipe]; - uint32_t scale_factor_x, scale_factor_y, i; - uint32_t kernelStates[128]; - struct filter_blit_array kernel_info_width; - void *tmp = NULL; - - if (mode->hdisplay != mode->crtc_hdisplay || mode->vdisplay != mode->crtc_vdisplay) { - phytium_crtc->src_width = mode->hdisplay; - phytium_crtc->src_height = mode->vdisplay; - phytium_crtc->dst_width = mode->crtc_hdisplay; - phytium_crtc->dst_height = mode->crtc_vdisplay; - - phytium_crtc->dst_x = (mode->crtc_hdisplay - phytium_crtc->dst_width) / 2; - phytium_crtc->dst_y = (mode->crtc_vdisplay - phytium_crtc->dst_height) / 2; - - scale_factor_x = dc_scaling_get_factor(phytium_crtc->src_width, - phytium_crtc->dst_width); - scale_factor_y = dc_scaling_get_factor(phytium_crtc->src_height, - phytium_crtc->dst_height); - if (scale_factor_y > (SCALE_FACTOR_Y_MAX << SCALE_FACTOR_SRC_OFFSET)) - scale_factor_y = (SCALE_FACTOR_Y_MAX << SCALE_FACTOR_SRC_OFFSET); - - phytium_writel_reg(priv, scale_factor_x & SCALE_FACTOR_X_MASK, - group_offset, PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X); - phytium_writel_reg(priv, scale_factor_y & SCALE_FACTOR_Y_MASK, - group_offset, PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y); - phytium_writel_reg(priv, FRAMEBUFFER_TAP, - group_offset, PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG); - - tmp = kmalloc(KERNELSTATES, GFP_KERNEL); - if (!tmp) { - DRM_ERROR("malloc %ld failed\n", KERNELSTATES); - return; - } - - memset(&kernel_info_width, 0, sizeof(struct filter_blit_array)); - kernel_info_width.kernelStates = tmp; - memset(kernel_info_width.kernelStates, 0, KERNELSTATES); - kernel_neon_begin(); - dc_calculate_sync_table(FRAMEBUFFER_HORIZONTAL_FILTER_TAP, - phytium_crtc->src_width, - phytium_crtc->dst_width, - &kernel_info_width); - memset(kernelStates, 0, sizeof(kernelStates)); - memcpy(kernelStates, kernel_info_width.kernelStates + 1, KERNELSTATES - 4); - kernel_neon_end(); - phytium_writel_reg(priv, HORI_FILTER_INDEX, - group_offset, PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX); - for (i = 0; i < 128; i++) { - phytium_writel_reg(priv, kernelStates[i], - group_offset, PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER); - } - - memset(&kernel_info_width, 0, sizeof(struct filter_blit_array)); - kernel_info_width.kernelStates = tmp; - memset(kernel_info_width.kernelStates, 0, KERNELSTATES); - kernel_neon_begin(); - dc_calculate_sync_table(FRAMEBUFFER_FILTER_TAP, phytium_crtc->src_height, - phytium_crtc->dst_height, &kernel_info_width); - memset(kernelStates, 0, sizeof(kernelStates)); - memcpy(kernelStates, kernel_info_width.kernelStates + 1, KERNELSTATES - 4); - kernel_neon_end(); - phytium_writel_reg(priv, VERT_FILTER_INDEX, - group_offset, PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX); - for (i = 0; i < 128; i++) - phytium_writel_reg(priv, kernelStates[i], - group_offset, PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER); - phytium_writel_reg(priv, INITIALOFFSET, - group_offset, PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET); - kfree(tmp); - phytium_crtc->scale_enable = true; - } else { - phytium_crtc->scale_enable = false; - } -} - -static void phytium_crtc_gamma_set(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct phytium_display_private *priv = dev->dev_private; - struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); - int phys_pipe = phytium_crtc->phys_pipe; - uint32_t group_offset = priv->dc_reg_base[phys_pipe]; - uint32_t config = 0; - struct drm_crtc_state *state = crtc->state; - struct drm_color_lut *lut; - int i; - - if (state->gamma_lut) { - if (WARN((state->gamma_lut->length/sizeof(struct drm_color_lut) != GAMMA_INDEX_MAX), - "gamma size is not match\n")) - return; - lut = (struct drm_color_lut *)state->gamma_lut->data; - for (i = 0; i < GAMMA_INDEX_MAX; i++) { - phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); - config = ((lut[i].red >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; - config |= (((lut[i].green >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); - config |= (((lut[i].blue >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); - phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); - } - } -} - -static void phytium_crtc_gamma_init(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct phytium_display_private *priv = dev->dev_private; - struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); - int phys_pipe = phytium_crtc->phys_pipe; - uint32_t group_offset = priv->dc_reg_base[phys_pipe]; - uint32_t config = 0; - uint16_t *red, *green, *blue; - int i; - - if (WARN((crtc->gamma_size != GAMMA_INDEX_MAX), "gamma size is not match\n")) - return; - - red = crtc->gamma_store; - green = red + crtc->gamma_size; - blue = green + crtc->gamma_size; - - for (i = 0; i < GAMMA_INDEX_MAX; i++) { - phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); - config = ((*red++ >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; - config |= (((*green++ >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); - config |= (((*blue++ >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); - phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); - } -} - static void phytium_crtc_destroy(struct drm_crtc *crtc) { struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); @@ -403,7 +65,7 @@ phytium_crtc_atomic_destroy_state(struct drm_crtc *crtc, } static const struct drm_crtc_funcs phytium_crtc_funcs = { - .gamma_set = drm_atomic_helper_legacy_gamma_set, + .gamma_set = phytium_crtc_gamma_set, .set_config = drm_atomic_helper_set_config, .destroy = phytium_crtc_destroy, .page_flip = drm_atomic_helper_page_flip, @@ -416,74 +78,9 @@ static void phytium_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { - struct drm_device *dev = crtc->dev; - struct phytium_display_private *priv = dev->dev_private; - struct drm_display_mode *mode = &crtc->state->adjusted_mode; - struct drm_atomic_state *state = old_state->state; - struct drm_connector_state *new_conn_state; - struct drm_connector *conn; struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); - int phys_pipe = phytium_crtc->phys_pipe; - uint32_t group_offset = priv->dc_reg_base[phys_pipe]; - int config = 0, i = 0; - - for_each_new_connector_in_state(state, conn, new_conn_state, i) { - if (new_conn_state->crtc != crtc) - continue; - - switch (conn->display_info.bpc) { - case 10: - phytium_crtc->bpc = DP_RGB101010; - break; - case 6: - phytium_crtc->bpc = DP_RGB666; - break; - default: - phytium_crtc->bpc = DP_RGB888; - break; - } - } - /* config pix clock */ - phytium_crtc->dc_hw_config_pix_clock(crtc, mode->clock); - - phytium_dc_scaling_config(crtc, old_state); - config = ((mode->crtc_hdisplay & HDISPLAY_END_MASK) << HDISPLAY_END_SHIFT) - | ((mode->crtc_htotal&HDISPLAY_TOTAL_MASK) << HDISPLAY_TOTAL_SHIFT); - phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HDISPLAY); - config = ((mode->crtc_hsync_start & HSYNC_START_MASK) << HSYNC_START_SHIFT) - | ((mode->crtc_hsync_end & HSYNC_END_MASK) << HSYNC_END_SHIFT) - | HSYNC_PULSE_ENABLED; - config |= (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : HSYNC_NEGATIVE; - phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HSYNC); - config = ((mode->crtc_vdisplay & VDISPLAY_END_MASK) << VDISPLAY_END_SHIFT) - | ((mode->crtc_vtotal & VDISPLAY_TOTAL_MASK) << VDISPLAY_TOTAL_SHIFT); - phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VDISPLAY); - config = ((mode->crtc_vsync_start & VSYNC_START_MASK) << VSYNC_START_SHIFT) - | ((mode->crtc_vsync_end & VSYNC_END_MASK) << VSYNC_END_SHIFT) - | VSYNC_PULSE_ENABLED; - config |= (mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : VSYNC_NEGATIVE; - phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VSYNC); - config = PANEL_DATAENABLE_ENABLE | PANEL_DATA_ENABLE | PANEL_CLOCK_ENABLE; - phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_PANEL_CONFIG); - config = phytium_crtc->bpc | OUTPUT_DP; - phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_DP_CONFIG); - - config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); - - if (crtc->state->active) - config |= FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET; - else - config &= (~(FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET)); - - if (phytium_crtc->scale_enable) - config |= FRAMEBUFFER_SCALE_ENABLE; - else - config &= (~FRAMEBUFFER_SCALE_ENABLE); - - config |= FRAMEBUFFER_GAMMA_ENABLE; - - phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + phytium_crtc->dc_hw_crtc_enable(crtc, old_state); drm_crtc_vblank_on(crtc); } @@ -494,7 +91,7 @@ phytium_crtc_atomic_disable(struct drm_crtc *crtc, struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); drm_crtc_vblank_off(crtc); - phytium_crtc->dc_hw_disable(crtc); + phytium_crtc->dc_hw_crtc_disable(crtc, old_state); } static void phytium_crtc_update_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, @@ -560,14 +157,9 @@ phytium_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct phytium_display_private *priv = dev->dev_private; struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); - int phys_pipe = phytium_crtc->phys_pipe, config; - uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + int phys_pipe = phytium_crtc->phys_pipe; - config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); - if (config & FRAMEBUFFER_RESET) { - phytium_writel_reg(priv, config | FRAMEBUFFER_VALID_PENDING, - group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); - } + phytium_crtc->dc_hw_crtc_begin(priv, phys_pipe); } static void phytium_crtc_atomic_flush(struct drm_crtc *crtc, @@ -577,19 +169,13 @@ static void phytium_crtc_atomic_flush(struct drm_crtc *crtc, struct phytium_display_private *priv = dev->dev_private; struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); struct phytium_crtc_state *phytium_crtc_state = NULL; - int phys_pipe = phytium_crtc->phys_pipe, config; - uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + int phys_pipe = phytium_crtc->phys_pipe; DRM_DEBUG_KMS("crtc->state active:%d enable:%d\n", crtc->state->active, crtc->state->enable); phytium_crtc_state = to_phytium_crtc_state(crtc->state); - if (crtc->state->color_mgmt_changed) - phytium_crtc_gamma_set(crtc); - - config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); - phytium_writel_reg(priv, config&(~FRAMEBUFFER_VALID_PENDING), - group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + phytium_crtc->dc_hw_crtc_flush(priv, phys_pipe); if (crtc->state->event) { DRM_DEBUG_KMS("vblank->refcount:%d\n", @@ -626,6 +212,7 @@ phytium_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mo } static const struct drm_crtc_helper_funcs phytium_crtc_helper_funcs = { + //.mode_fixup = phytium_crtc_mode_fixup, .mode_valid = phytium_crtc_mode_valid, .atomic_check = phytium_crtc_atomic_check, .atomic_begin = phytium_crtc_atomic_begin, @@ -634,15 +221,6 @@ static const struct drm_crtc_helper_funcs phytium_crtc_helper_funcs = { .atomic_disable = phytium_crtc_atomic_disable, }; -void phytium_crtc_resume(struct drm_device *drm_dev) -{ - struct drm_crtc *crtc; - - drm_for_each_crtc(crtc, drm_dev) { - phytium_crtc_gamma_init(crtc); - } -} - int phytium_crtc_init(struct drm_device *dev, int phys_pipe) { struct phytium_crtc *phytium_crtc; @@ -669,11 +247,10 @@ int phytium_crtc_init(struct drm_device *dev, int phys_pipe) phytium_crtc->phys_pipe = phys_pipe; if (IS_X100(priv)) { - phytium_crtc->dc_hw_config_pix_clock = x100_dc_hw_config_pix_clock; - phytium_crtc->dc_hw_disable = x100_dc_hw_disable; - priv->dc_reg_base[phys_pipe] = X100_DC_BASE(phys_pipe); - priv->dcreq_reg_base[phys_pipe] = X100_DCREQ_BASE(phys_pipe); - priv->address_transform_base = X100_ADDRESS_TRANSFORM_BASE; + phytium_crtc->dc_hw_crtc_begin = x100_dc_hw_crtc_begin; + phytium_crtc->dc_hw_crtc_flush = x100_dc_hw_crtc_flush; + phytium_crtc->dc_hw_crtc_enable = x100_dc_hw_crtc_enable; + phytium_crtc->dc_hw_crtc_disable = x100_dc_hw_crtc_disable; } phytium_primary_plane = phytium_primary_plane_create(dev, phys_pipe); @@ -702,9 +279,6 @@ int phytium_crtc_init(struct drm_device *dev, int phys_pipe) } drm_crtc_helper_add(&phytium_crtc->base, &phytium_crtc_helper_funcs); drm_crtc_vblank_reset(&phytium_crtc->base); - drm_mode_crtc_set_gamma_size(&phytium_crtc->base, GAMMA_INDEX_MAX); - drm_crtc_enable_color_mgmt(&phytium_crtc->base, 0, false, GAMMA_INDEX_MAX); - phytium_crtc_gamma_init(&phytium_crtc->base); return 0; diff --git a/drivers/gpu/drm/phytium/phytium_crtc.h b/drivers/gpu/drm/phytium/phytium_crtc.h index 125a99b42660..539664ce3a22 100644 --- a/drivers/gpu/drm/phytium/phytium_crtc.h +++ b/drivers/gpu/drm/phytium/phytium_crtc.h @@ -1,7 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __PHYTIUM_CRTC_H__ @@ -22,8 +30,10 @@ struct phytium_crtc { bool scale_enable; bool reserve[3]; - void (*dc_hw_config_pix_clock)(struct drm_crtc *crtc, int clock); - void (*dc_hw_disable)(struct drm_crtc *crtc); + void (*dc_hw_crtc_begin)(struct phytium_display_private *priv, uint32_t phys_pipe); + void (*dc_hw_crtc_flush)(struct phytium_display_private *priv, uint32_t phys_pipe); + void (*dc_hw_crtc_enable)(struct drm_crtc *crtc, struct drm_crtc_state *old_state); + void (*dc_hw_crtc_disable)(struct drm_crtc *crtc, struct drm_crtc_state *old_state); }; struct phytium_crtc_state { @@ -33,6 +43,5 @@ struct phytium_crtc_state { #define to_phytium_crtc(x) container_of(x, struct phytium_crtc, base) #define to_phytium_crtc_state(x) container_of(x, struct phytium_crtc_state, base) -void phytium_crtc_resume(struct drm_device *drm_dev); int phytium_crtc_init(struct drm_device *dev, int pipe); #endif /* __PHYTIUM_CRTC_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.c b/drivers/gpu/drm/phytium/phytium_debugfs.c index b38deafcf874..24bb1841502a 100644 --- a/drivers/gpu/drm/phytium/phytium_debugfs.c +++ b/drivers/gpu/drm/phytium/phytium_debugfs.c @@ -1,7 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #include @@ -9,7 +17,6 @@ #include "phytium_display_drv.h" #include "phytium_dp.h" -#include "phytium_reg.h" static ssize_t phytium_dp_register_write(struct file *filp, @@ -34,76 +41,8 @@ static int phytium_dp_register_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - uint32_t group_offset = priv->dp_reg_base[port]; - - seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_M_VID, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_M_VID)); - seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_N_VID, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_N_VID)); - seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_TRANSFER_UNIT_SIZE, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE)); - seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_DATA_COUNT, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_DATA_COUNT)); - seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HTOTAL, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL)); - seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HRES, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HRES)); - seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSWIDTH, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH)); - seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSTART, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSTART)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VTOTAL, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VRES, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VRES)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSWIDTH, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSTART, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSTART)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_POLARITY, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC0, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC1, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_USER_SYNC_POLARITY, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_VIDEO_STREAM_ENABLE, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SECONDARY_STREAM_ENABLE, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE)); - seq_puts(m, "audio:\n"); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_INPUT_SELECT, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DIRECT_CLKDIV, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_COUNT, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_MAP, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DATA_WINDOW, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_CATEGORY_CODE, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_MAUD, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_MAUD)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_NAUD, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_NAUD)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CLOCK_MODE, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_SOURCE_FORMAT, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY)); - seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_AUDIO_ENABLE, - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE)); - return 0; + return phytium_dp->funcs->dp_hw_register_show(m, data); } static int phytium_dp_register_open(struct inode *inode, struct file *file) @@ -398,3 +337,4 @@ int phytium_debugfs_connector_add(struct drm_connector *connector) return 0; } + diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.h b/drivers/gpu/drm/phytium/phytium_debugfs.h index 37ca93c18821..49e20440656c 100644 --- a/drivers/gpu/drm/phytium/phytium_debugfs.h +++ b/drivers/gpu/drm/phytium/phytium_debugfs.h @@ -1,7 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __PHYTIUM_DEBUGFS_H__ diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.c b/drivers/gpu/drm/phytium/phytium_display_drv.c index 49a66740388f..b66e2385d73e 100644 --- a/drivers/gpu/drm/phytium/phytium_display_drv.c +++ b/drivers/gpu/drm/phytium/phytium_display_drv.c @@ -1,7 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #include @@ -18,8 +26,9 @@ #include "phytium_gem.h" #include "phytium_fb.h" #include "phytium_fbdev.h" -#include "phytium_reg.h" +#ifdef CONFIG_PHYTIUM_PCI_DRIVER #include "phytium_pci.h" +#endif int dc_fake_mode_enable; module_param(dc_fake_mode_enable, int, 0644); @@ -41,49 +50,14 @@ int link_dynamic_adjust; module_param(link_dynamic_adjust, int, 0644); MODULE_PARM_DESC(link_dynamic_adjust, "dynamic select the train pamameter according to the display mode (0-disabled; 1-enabled; default-1)"); -int phytium_wait_cmd_done(struct phytium_display_private *priv, - uint32_t register_offset, - uint32_t request_bit, - uint32_t reply_bit) -{ - int timeout = 500, config = 0, ret = 0; - - do { - mdelay(1); - timeout--; - config = phytium_readl_reg(priv, 0, register_offset); - } while ((!(config & reply_bit)) && timeout); - - phytium_writel_reg(priv, config & (~request_bit), 0, register_offset); - - if (timeout == 0) { - DRM_ERROR("wait cmd reply timeout\n"); - ret = -EBUSY; - } else { - timeout = 500; - do { - mdelay(1); - timeout--; - config = phytium_readl_reg(priv, 0, register_offset); - } while ((config & reply_bit) && timeout); - if (timeout == 0) { - DRM_ERROR("clear cmd timeout\n"); - ret = -EBUSY; - } - } - mdelay(5); - - return ret; -} - static void phytium_irq_preinstall(struct drm_device *dev) { struct phytium_display_private *priv = dev->dev_private; int i, status; for_each_pipe_masked(priv, i) { - status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); - phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + status = priv->dc_hw_get_irq_status(priv, i); + priv->dc_hw_disable_irq(priv, i); } } @@ -93,8 +67,8 @@ static void phytium_irq_uninstall(struct drm_device *dev) int i, status; for_each_pipe_masked(priv, i) { - status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); - phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + status = priv->dc_hw_get_irq_status(priv, i); + priv->dc_hw_disable_irq(priv, i); } } @@ -107,15 +81,15 @@ static irqreturn_t phytium_display_irq_handler(int irq, void *data) irqreturn_t ret = IRQ_NONE, ret1 = IRQ_NONE; for_each_pipe_masked(priv, i) { - enabled = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); - if (enabled & INT_STATUS) { + enabled = priv->dc_hw_get_irq_status(priv, i); + + if (enabled) { virt_pipe = phytium_get_virt_pipe(priv, i); if (virt_pipe < 0) return IRQ_NONE; drm_handle_vblank(dev, virt_pipe); ret = IRQ_HANDLED; - if (priv->dc_hw_clear_msi_irq) - priv->dc_hw_clear_msi_irq(priv, i); + priv->dc_hw_clear_msi_irq(priv, i); } } @@ -135,7 +109,7 @@ static int phytium_enable_vblank(struct drm_device *dev, unsigned int virt_pipe) if (phys_pipe < 0) return phys_pipe; - phytium_writel_reg(priv, INT_ENABLE, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_INT_ENABLE); + priv->dc_hw_enable_irq(priv, phys_pipe); return 0; } @@ -147,8 +121,7 @@ static void phytium_disable_vblank(struct drm_device *dev, unsigned int virt_pip phys_pipe = phytium_get_phys_pipe(priv, virt_pipe); if (phys_pipe >= 0) - phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[phys_pipe], - PHYTIUM_DC_INT_ENABLE); + priv->dc_hw_disable_irq(priv, phys_pipe); } static const struct drm_mode_config_funcs phytium_mode_funcs = { @@ -271,7 +244,7 @@ static int phytium_display_load(struct drm_device *dev, unsigned long flags) if (priv->vram_support) priv->vram_hw_init(priv); - ret = drm_irq_install(dev, priv->irq); + ret = drm_irq_install(dev, dev->pdev->irq); if (ret) { DRM_ERROR("install irq failed\n"); goto failed_irq_install; @@ -409,7 +382,6 @@ static int phytium_display_pm_resume(struct drm_device *dev) if (ret) return -EIO; - phytium_crtc_resume(dev); phytium_gem_resume(dev); if (priv->vram_support) @@ -443,14 +415,19 @@ static int __init phytium_display_init(void) { int ret = 0; +#ifdef CONFIG_PHYTIUM_PCI_DRIVER ret = pci_register_driver(&phytium_pci_driver); - + if (ret) + return ret; +#endif return ret; } static void __exit phytium_display_exit(void) { +#ifdef CONFIG_PHYTIUM_PCI_DRIVER pci_unregister_driver(&phytium_pci_driver); +#endif } module_init(phytium_display_init); diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.h b/drivers/gpu/drm/phytium/phytium_display_drv.h index 9e052b805fcd..847d09fb24df 100644 --- a/drivers/gpu/drm/phytium/phytium_display_drv.h +++ b/drivers/gpu/drm/phytium/phytium_display_drv.h @@ -1,7 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __PHYTIUM_DISPLAY_DRV_H__ @@ -63,15 +71,9 @@ struct phytium_display_private { struct phytium_device_info info; bool vram_support; bool reserve[3]; - uint32_t dc_reg_base[3]; - uint32_t dcreq_reg_base[3]; - uint32_t dp_reg_base[3]; - uint32_t address_transform_base; - uint32_t phy_access_base[3]; /* drm */ struct drm_device *dev; - int irq; /* fb_dev */ struct drm_fb_helper fbdev_helper; @@ -88,41 +90,44 @@ struct phytium_display_private { int (*display_pm_suspend)(struct drm_device *dev); int (*display_pm_resume)(struct drm_device *dev); + void (*dc_hw_enable_irq)(struct phytium_display_private *priv, uint32_t phys_pipe); + void (*dc_hw_disable_irq)(struct phytium_display_private *priv, uint32_t phys_pipe); + bool (*dc_hw_get_irq_status)(struct phytium_display_private *priv, uint32_t phys_pipe); void (*dc_hw_clear_msi_irq)(struct phytium_display_private *priv, uint32_t phys_pipe); int (*dc_hw_fb_format_check)(const struct drm_mode_fb_cmd2 *mode_cmd, int count); }; static inline unsigned int -phytium_readl_reg(struct phytium_display_private *priv, uint32_t group_offset, uint32_t reg_offset) +phytium_readl_reg(struct phytium_display_private *priv, unsigned int offset) { unsigned int data; - data = readl(priv->regs + group_offset + reg_offset); + data = readl(priv->regs + offset); #if DEBUG_LOG - pr_info("Read 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); + pr_info("Read 32'h%08x 32'h%08x\n", offset, data); #endif return data; } static inline void -phytium_writel_reg(struct phytium_display_private *priv, uint32_t data, - uint32_t group_offset, uint32_t reg_offset) +phytium_writel_reg(struct phytium_display_private *priv, unsigned int data, + unsigned int offset) { - writel(data, priv->regs + group_offset + reg_offset); + writel(data, priv->regs + offset); #if DEBUG_LOG - pr_info("Write 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); + pr_info("Write 32'h%08x 32'h%08x\n", offset, data); #endif } static inline void -phytium_writeb_reg(struct phytium_display_private *priv, uint8_t data, - uint32_t group_offset, uint32_t reg_offset) +phytium_writeb_reg(struct phytium_display_private *priv, unsigned char data, + unsigned int offset) { - writeb(data, priv->regs + group_offset + reg_offset); + writeb(data, priv->regs + offset); #if DEBUG_LOG - pr_info("Write 32'h%08x 8'h%08x\n", group_offset + reg_offset, data); + pr_info("Write 32'h%08x 8'h%08x\n", offset, data); #endif } @@ -135,10 +140,6 @@ phytium_writeb_reg(struct phytium_display_private *priv, uint8_t data, int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe); int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe); -int phytium_wait_cmd_done(struct phytium_display_private *priv, - uint32_t register_offset, - uint32_t request_bit, - uint32_t reply_bit); void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev); extern struct drm_driver phytium_display_drm_driver; diff --git a/drivers/gpu/drm/phytium/phytium_dp.c b/drivers/gpu/drm/phytium/phytium_dp.c index 7c7284bac8ee..8d9d285db89c 100644 --- a/drivers/gpu/drm/phytium/phytium_dp.c +++ b/drivers/gpu/drm/phytium/phytium_dp.c @@ -1,7 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #include @@ -17,208 +25,12 @@ #include "phytium_debugfs.h" #include "x100_dp.h" #include "phytium_panel.h" -#include "phytium_reg.h" static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp); static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged); static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp); static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp); -static int phytium_rate[] = {162000, 270000, 540000, 810000}; - -void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data) -{ - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - uint32_t group_offset = priv->phy_access_base[port]; - -#if DEBUG_LOG - pr_info("phy address write: 0x%x data:0x%x\n", address, data); -#endif - phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); - phytium_writel_reg(priv, data, group_offset, PHYTIUM_PHY_WRITE_DATA); - phytium_writel_reg(priv, ACCESS_WRITE, group_offset, PHYTIUM_PHY_ACCESS_CTRL); - udelay(10); -} - -uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address) -{ - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - uint32_t group_offset = priv->phy_access_base[port]; - uint32_t data; - - phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); - phytium_writel_reg(priv, ACCESS_READ, group_offset, PHYTIUM_PHY_ACCESS_CTRL); - udelay(10); - data = phytium_readl_reg(priv, group_offset, PHYTIUM_PHY_READ_DATA); -#if DEBUG_LOG - pr_info("phy address read: 0x%x data:0x%x\n", address, data); -#endif - - return data; -} - -static int -phytium_dp_hw_aux_transfer_write(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) -{ - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - uint32_t group_offset = priv->dp_reg_base[port]; - unsigned int i = 0, j = 0; - unsigned int cmd = 0; - unsigned int aux_status = 0, interrupt_status = 0; - unsigned char *data = msg->buffer; - int count_timeout = 0; - long ret = 0; - - for (i = 0; i < 3; i++) { - /* clear X100_DP_INTERRUPT_RAW_STATUS */ - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); - phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); - for (j = 0; j < msg->size; j++) - phytium_writeb_reg(priv, data[j], group_offset, PHYTIUM_DP_AUX_WRITE_FIFO); - - cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); - if (msg->size == 0) - cmd |= ADDRESS_ONLY; - else - cmd |= (msg->size-1) & BYTE_COUNT_MASK; - phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); - - count_timeout = 0; - do { - mdelay(5); - interrupt_status = phytium_readl_reg(priv, group_offset, - PHYTIUM_DP_INTERRUPT_RAW_STATUS); - aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); - if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) - || (interrupt_status & REPLY_TIMEOUT)) { - DRM_DEBUG_KMS("aux wait exit\n"); - break; - } - count_timeout++; - } while (count_timeout < 6); - - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); - if (interrupt_status & REPLY_TIMEOUT) { - DRM_DEBUG_KMS("aux write reply timeout\n"); - continue; - } else if (aux_status & REPLY_ERROR) { - DRM_DEBUG_KMS("aux write reply error\n"); - continue; - } else if (aux_status & REPLY_RECEIVED) { - DRM_DEBUG_KMS("aux write reply received succussful\n"); - break; - } - } - - if (interrupt_status & REPLY_TIMEOUT) { - DRM_NOTE("aux(%d) write reply timeout\n", phytium_dp->port); - ret = -EIO; - goto out; - } else if (aux_status & REPLY_ERROR) { - DRM_ERROR("aux(%d) write reply error\n", phytium_dp->port); - ret = -EIO; - goto out; - } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { - DRM_ERROR("aux(%d) write reply no response\n", phytium_dp->port); - ret = -EIO; - goto out; - } - - msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); - ret = msg->size; -out: - return ret; -} - -static int -phytium_dp_hw_aux_transfer_read(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) -{ - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - uint32_t group_offset = priv->dp_reg_base[port]; - unsigned int i = 0; - unsigned int cmd = 0; - unsigned int aux_status = 0, interrupt_status = 0; - unsigned char *data = msg->buffer; - int count_timeout = 0; - long ret = 0; - - for (i = 0; i < 3; i++) { - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); - phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); - cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); - if (msg->size == 0) - cmd |= ADDRESS_ONLY; - else - cmd |= ((msg->size-1) & BYTE_COUNT_MASK); - phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); - - count_timeout = 0; - do { - mdelay(5); - interrupt_status = phytium_readl_reg(priv, group_offset, - PHYTIUM_DP_INTERRUPT_RAW_STATUS); - aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); - if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) - || (interrupt_status & REPLY_TIMEOUT)) { - DRM_DEBUG_KMS("aux wait exit\n"); - break; - } - count_timeout++; - } while (count_timeout < 6); - - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); - if (interrupt_status & REPLY_TIMEOUT) { - DRM_DEBUG_KMS("aux read reply timeout\n"); - continue; - } else if (aux_status & REPLY_ERROR) { - DRM_DEBUG_KMS("aux read reply error\n"); - continue; - } else if (aux_status & REPLY_RECEIVED) { - DRM_DEBUG_KMS("aux read reply received succussful\n"); - break; - } - } - - if (interrupt_status & REPLY_TIMEOUT) { - DRM_NOTE("aux(%d) read reply timeout\n", phytium_dp->port); - ret = -EIO; - goto out; - } else if (aux_status & REPLY_ERROR) { - DRM_ERROR("aux(%d) read reply error\n", phytium_dp->port); - ret = -EIO; - goto out; - } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { - DRM_ERROR("aux(%d) read reply no response\n", phytium_dp->port); - ret = -EIO; - goto out; - } - - msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); - ret = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA_COUNT); - - if (ret > msg->size) { - ret = msg->size; - } else if (ret != msg->size) { - DRM_DEBUG_KMS("aux read count error(ret:0x%lx != 0x%lx)\n", ret, msg->size); - ret = -EBUSY; - goto out; - } - - for (i = 0; i < ret; i++) - data[i] = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA); - -out: - return ret; -} - static void phytium_get_native_mode(struct phytium_dp_device *phytium_dp) { struct drm_display_mode *t, *mode; @@ -356,7 +168,7 @@ phytium_connector_mode_valid(struct drm_connector *connector, return MODE_CLOCK_HIGH; } - if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) + if (mode->hdisplay == 1600) return MODE_BAD_HVALUE; if ((mode->hdisplay == 1024) && (mode->clock > 78000)) @@ -563,542 +375,36 @@ static void phytium_dp_hw_set_lane_setting(struct phytium_dp_device *phytium_dp, uint32_t link_rate, uint8_t train_set) { - phytium_dp->funcs->dp_hw_set_phy_lane_setting(phytium_dp, link_rate, train_set); + phytium_dp->funcs->dp_hw_set_lane_setting(phytium_dp, link_rate, train_set); } static void phytium_dp_hw_set_link(struct phytium_dp_device *phytium_dp, uint8_t lane_count, uint32_t link_rate) { - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port, ret = 0, retry = 3; - uint32_t group_offset = priv->dp_reg_base[port]; - - phytium_writel_reg(priv, lane_count, - group_offset, PHYTIUM_DP_LANE_COUNT_SET); - phytium_writel_reg(priv, - drm_dp_link_rate_to_bw_code(link_rate), - group_offset, PHYTIUM_DP_LINK_BW_SET); - - if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) - phytium_writel_reg(priv, ENHANCED_FRAME_ENABLE, - group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); - else - phytium_writel_reg(priv, ENHANCED_FRAME_DISABLE, - group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); - -try_again: - ret = phytium_dp->funcs->dp_hw_set_phy_lane_and_rate(phytium_dp, lane_count, link_rate); - if ((ret < 0) && retry) { - retry--; - goto try_again; - } + phytium_dp->funcs->dp_hw_set_link(phytium_dp, lane_count, link_rate); } + static void phytium_dp_hw_set_test_pattern(struct phytium_dp_device *phytium_dp, - uint8_t lane_count, + uint8_t lane_count, uint8_t test_pattern, uint8_t *custom_pattern, uint32_t custom_pattern_size) { - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port, val = 0, tmp = 0, i; - uint32_t group_offset = priv->dp_reg_base[port]; - - if ((test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) - && custom_pattern && (custom_pattern_size > 0)) { - val = *(int *)custom_pattern; - phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0); - val = *(int *)(custom_pattern + 4); - phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1); - val = *(short int *)(custom_pattern + 8); - phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2); - } - - if (test_pattern == PHYTIUM_PHY_TP_D10_2 || test_pattern == PHYTIUM_PHY_TP_PRBS7 - || test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) - phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, - PHYTIUM_DP_SCRAMBLING_DISABLE); - else - phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, - PHYTIUM_DP_SCRAMBLING_DISABLE); - - tmp = test_pattern - PHYTIUM_PHY_TP_NONE + TEST_PATTERN_NONE; - val = 0; - for (i = 0; i < lane_count; i++) - val |= (tmp << (TEST_PATTERN_LANE_SHIFT * i)); - phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_LINK_QUAL_PATTERN_SET); + phytium_dp->funcs->dp_hw_set_test_pattern(phytium_dp, lane_count, test_pattern, + custom_pattern, custom_pattern_size); } static void phytium_dp_hw_set_train_pattern(struct phytium_dp_device *phytium_dp, - uint8_t train_pattern) -{ - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port, tmp = 0; - uint32_t group_offset = priv->dp_reg_base[port]; - - /* Scrambling is disabled for TPS1/TPS2/3 and enabled for TPS4 */ - if (train_pattern == DP_TRAINING_PATTERN_4 - || train_pattern == DP_TRAINING_PATTERN_DISABLE) { - phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, - PHYTIUM_DP_SCRAMBLING_DISABLE); - phytium_writel_reg(priv, SCRAMBLER_RESET, group_offset, - PHYTIUM_DP_FORCE_SCRAMBLER_RESET); - } else { - phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, - PHYTIUM_DP_SCRAMBLING_DISABLE); - } - switch (train_pattern) { - case DP_TRAINING_PATTERN_DISABLE: - tmp = TRAINING_OFF; - break; - case DP_TRAINING_PATTERN_1: - tmp = TRAINING_PATTERN_1; - break; - case DP_TRAINING_PATTERN_2: - tmp = TRAINING_PATTERN_2; - break; - case DP_TRAINING_PATTERN_3: - tmp = TRAINING_PATTERN_3; - break; - case DP_TRAINING_PATTERN_4: - tmp = TRAINING_PATTERN_4; - break; - default: - tmp = TRAINING_OFF; - break; - } - - phytium_writel_reg(priv, tmp, group_offset, PHYTIUM_DP_TRAINING_PATTERN_SET); -} - -void phytium_dp_hw_enable_audio(struct phytium_dp_device *phytium_dp) -{ - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - int config = 0, config1, data_window = 0; - const struct dp_audio_n_m *n_m = NULL; - uint32_t group_offset = priv->dp_reg_base[port]; - - config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); - phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); - - data_window = 90*(phytium_dp->link_rate)/100 - *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) - /phytium_dp->mode.clock/4; - - phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); - - n_m = phytium_dp_audio_get_n_m(phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); - if (n_m == NULL) { - DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", - phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); - phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); - phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); - } else { - phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); - phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); - } - - config1 = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); - phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, - group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); - phytium_writel_reg(priv, config1, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); - phytium_writel_reg(priv, config, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); -} - -static void phytium_dp_hw_audio_shutdown(struct phytium_dp_device *phytium_dp) + uint8_t test_pattern) { - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - uint32_t group_offset = priv->dp_reg_base[port]; - - phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, - group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); -} - -static void phytium_dp_hw_audio_digital_mute(struct phytium_dp_device *phytium_dp, bool enable) -{ - struct phytium_display_private *priv = phytium_dp->dev->dev_private; - int port = phytium_dp->port; - uint32_t group_offset = priv->dp_reg_base[port]; - - if (enable) - phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, - group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); - else - phytium_writel_reg(priv, SEC_AUDIO_ENABLE, - group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); -} - -static int -phytium_dp_hw_audio_hw_params(struct phytium_dp_device *phytium_dp, struct audio_info audio_info) -{ - struct phytium_display_private *priv = phytium_dp->dev->dev_private; - int port = phytium_dp->port; - int ret = 0, data_window = 0; - const struct dp_audio_n_m *n_m = NULL; - uint32_t fs, ws, fs_accurac; - uint32_t group_offset = priv->dp_reg_base[port]; - - DRM_DEBUG_KMS("%s:set port%d sample_rate(%d) channels(%d) sample_width(%d)\n", - __func__, phytium_dp->port, audio_info.sample_rate, - audio_info.channels, audio_info.sample_width); - - phytium_writel_reg(priv, INPUT_SELECT_I2S, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT); - phytium_writel_reg(priv, APB_CLOCK/audio_info.sample_rate, - group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV); - phytium_writel_reg(priv, audio_info.channels & CHANNEL_MASK, - group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT); - phytium_writel_reg(priv, CHANNEL_MAP_DEFAULT, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP); - data_window = 90*(phytium_dp->link_rate)/100 - *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) - /phytium_dp->mode.clock/4; - phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); - phytium_writel_reg(priv, 0xb5, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE); - - phytium_writel_reg(priv, CLOCK_MODE_SYNC, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE); - phytium_writel_reg(priv, CS_SOURCE_FORMAT_DEFAULT, - group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT); - - switch (audio_info.sample_rate) { - case 32000: - fs = ORIG_FREQ_32000; - fs_accurac = SAMPLING_FREQ_32000; - break; - case 44100: - fs = ORIG_FREQ_44100; - fs_accurac = SAMPLING_FREQ_44100; - break; - case 48000: - fs = ORIG_FREQ_48000; - fs_accurac = SAMPLING_FREQ_48000; - break; - case 96000: - fs = ORIG_FREQ_96000; - fs_accurac = SAMPLING_FREQ_96000; - break; - case 176400: - fs = ORIG_FREQ_176400; - fs_accurac = SAMPLING_FREQ_176400; - break; - case 192000: - fs = ORIG_FREQ_192000; - fs_accurac = SAMPLING_FREQ_192000; - break; - default: - DRM_ERROR("dp not support sample_rate %d\n", audio_info.sample_rate); - goto out; - } - - switch (audio_info.sample_width) { - case 16: - ws = WORD_LENGTH_16; - break; - case 18: - ws = WORD_LENGTH_18; - break; - case 20: - ws = WORD_LENGTH_20; - break; - case 24: - ws = WORD_LENGTH_24; - break; - default: - DRM_ERROR("dp not support sample_width %d\n", audio_info.sample_width); - goto out; - } - - phytium_writel_reg(priv, ((fs&ORIG_FREQ_MASK)<link_rate, audio_info.sample_rate); - if (n_m == NULL) { - DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", - phytium_dp->link_rate, audio_info.sample_rate); - phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); - phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); - - } else { - phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); - phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); - } - phytium_writel_reg(priv, SECONDARY_STREAM_ENABLE, - group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); - phytium_dp->audio_info = audio_info; - - return 0; - -out: - phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, - group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); - - return ret; -} - -void phytium_dp_hw_disable_video(struct phytium_dp_device *phytium_dp) -{ - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - uint32_t group_offset = priv->dp_reg_base[port]; - - phytium_writel_reg(priv, SST_MST_SOURCE_0_DISABLE, - group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); -} - -bool phytium_dp_hw_video_is_enable(struct phytium_dp_device *phytium_dp) -{ - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port, config; - uint32_t group_offset = priv->dp_reg_base[port]; - - config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); - return config ? true : false; -} - -void phytium_dp_hw_enable_video(struct phytium_dp_device *phytium_dp) -{ - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - uint32_t group_offset = priv->dp_reg_base[port]; - - phytium_writel_reg(priv, SST_MST_SOURCE_0_ENABLE, - group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); - phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); -} - -void phytium_dp_hw_config_video(struct phytium_dp_device *phytium_dp) -{ - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - uint32_t group_offset = priv->dp_reg_base[port]; - unsigned long link_bw, date_rate = 0; - struct drm_display_info *display_info = &phytium_dp->connector.display_info; - unsigned char tu_size = 64; - unsigned long data_per_tu = 0; - int symbols_per_tu, frac_symbols_per_tu, symbol_count, udc, value; - - /* cal M/N and tu_size */ - phytium_writel_reg(priv, phytium_dp->mode.crtc_clock/10, group_offset, PHYTIUM_DP_M_VID); - phytium_writel_reg(priv, phytium_dp->link_rate/10, group_offset, PHYTIUM_DP_N_VID); - link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; - date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; - - /* mul 10 for register setting */ - data_per_tu = 10*tu_size * date_rate/link_bw; - symbols_per_tu = (data_per_tu/10)&0xff; - frac_symbols_per_tu = (data_per_tu%10*16/10) & 0xf; - phytium_writel_reg(priv, frac_symbols_per_tu<<24 | symbols_per_tu<<16 | tu_size, - group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE); - - symbol_count = (phytium_dp->mode.crtc_hdisplay*display_info->bpc*3 + 7)/8; - udc = (symbol_count + phytium_dp->link_lane_count - 1)/phytium_dp->link_lane_count; - phytium_writel_reg(priv, udc, group_offset, PHYTIUM_DP_DATA_COUNT); - - /* config main stream attributes */ - phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal, - group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL); - phytium_writel_reg(priv, phytium_dp->mode.crtc_hdisplay, - group_offset, PHYTIUM_DP_MAIN_LINK_HRES); - phytium_writel_reg(priv, - phytium_dp->mode.crtc_hsync_end - phytium_dp->mode.crtc_hsync_start, - group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH); - phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal - phytium_dp->mode.crtc_hsync_start, - group_offset, PHYTIUM_DP_MAIN_LINK_HSTART); - phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal, - group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL); - phytium_writel_reg(priv, phytium_dp->mode.crtc_vdisplay, - group_offset, PHYTIUM_DP_MAIN_LINK_VRES); - phytium_writel_reg(priv, - phytium_dp->mode.crtc_vsync_end - phytium_dp->mode.crtc_vsync_start, - group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH); - phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal - phytium_dp->mode.crtc_vsync_start, - group_offset, PHYTIUM_DP_MAIN_LINK_VSTART); - - value = 0; - if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) - value = value & (~HSYNC_POLARITY_LOW); - else - value = value | HSYNC_POLARITY_LOW; - - if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) - value = value & (~VSYNC_POLARITY_LOW); - else - value = value | VSYNC_POLARITY_LOW; - phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY); - - switch (display_info->bpc) { - case 10: - value = (MISC0_BIT_DEPTH_10BIT << MISC0_BIT_DEPTH_OFFSET); - break; - case 6: - value = (MISC0_BIT_DEPTH_6BIT << MISC0_BIT_DEPTH_OFFSET); - break; - default: - value = (MISC0_BIT_DEPTH_8BIT << MISC0_BIT_DEPTH_OFFSET); - break; - } - value |= (MISC0_COMPONENT_FORMAT_RGB << MISC0_COMPONENT_FORMAT_SHIFT) - | MISC0_SYNCHRONOUS_CLOCK; - phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0); - phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1); - - value = USER_ODDEVEN_POLARITY_HIGH | USER_DATA_ENABLE_POLARITY_HIGH; - if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) - value = value | USER_HSYNC_POLARITY_HIGH; - else - value = value & (~USER_HSYNC_POLARITY_HIGH); - if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) - value = value | USER_VSYNC_POLARITY_HIGH; - else - value = value & (~USER_VSYNC_POLARITY_HIGH); - phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY); -} - -void phytium_dp_hw_disable_output(struct phytium_dp_device *phytium_dp) -{ - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - uint32_t group_offset = priv->dp_reg_base[port]; - - phytium_writel_reg(priv, TRANSMITTER_OUTPUT_DISABLE, - group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); - phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); -} - -void phytium_dp_hw_enable_output(struct phytium_dp_device *phytium_dp) -{ - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - uint32_t group_offset = priv->dp_reg_base[port]; - - phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); - phytium_writel_reg(priv, TRANSMITTER_OUTPUT_ENABLE, - group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); -} - -void phytium_dp_hw_enable_input_source(struct phytium_dp_device *phytium_dp) -{ - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - uint32_t group_offset = priv->dp_reg_base[port]; - - phytium_writel_reg(priv, VIRTUAL_SOURCE_0_ENABLE, - group_offset, PHYTIUM_INPUT_SOURCE_ENABLE); -} - -void phytium_dp_hw_disable_input_source(struct phytium_dp_device *phytium_dp) -{ - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - - phytium_writel_reg(priv, (~VIRTUAL_SOURCE_0_ENABLE)&VIRTUAL_SOURCE_0_ENABLE_MASK, - priv->dp_reg_base[port], PHYTIUM_INPUT_SOURCE_ENABLE); -} - -bool phytium_dp_hw_output_is_enable(struct phytium_dp_device *phytium_dp) -{ - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - uint32_t group_offset = priv->dp_reg_base[port]; - int config = 0; - - config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); - return config ? true : false; + phytium_dp->funcs->dp_hw_set_train_pattern(phytium_dp, test_pattern); } static void phytium_dp_hw_get_hpd_state(struct phytium_dp_device *phytium_dp) { - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - uint32_t val = 0, raw_state = 0; - uint32_t group_offset = priv->dp_reg_base[port]; - - val = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_RAW_STATUS); - - /* maybe miss hpd, so used for clear PHYTIUM_DP_INTERRUPT_RAW_STATUS */ - phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); - raw_state = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SINK_HPD_STATE); - if (val & HPD_EVENT) - phytium_dp->dp_hpd_state.hpd_event_state = true; - - if (val & HPD_IRQ) - phytium_dp->dp_hpd_state.hpd_irq_state = true; - - if (raw_state & HPD_CONNECT) - phytium_dp->dp_hpd_state.hpd_raw_state = true; - else - phytium_dp->dp_hpd_state.hpd_raw_state = false; -} - -void phytium_dp_hw_hpd_irq_setup(struct phytium_dp_device *phytium_dp, bool enable) -{ - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - uint32_t group_offset = priv->dp_reg_base[port]; - - phytium_dp->dp_hpd_state.hpd_irq_enable = enable; - if (enable) - phytium_writel_reg(priv, HPD_OTHER_MASK, group_offset, PHYTIUM_DP_INTERRUPT_MASK); - else - phytium_writel_reg(priv, HPD_IRQ_MASK|HPD_EVENT_MASK|HPD_OTHER_MASK, - group_offset, PHYTIUM_DP_INTERRUPT_MASK); -} - -int phytium_dp_hw_init(struct phytium_dp_device *phytium_dp) -{ - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port, ret; - uint32_t group_offset = priv->dp_reg_base[port]; - - ret = phytium_dp->funcs->dp_hw_reset(phytium_dp); - if (ret) - goto out; - ret = phytium_dp->funcs->dp_hw_init_phy(phytium_dp); - if (ret) - goto out; - - phytium_writel_reg(priv, AUX_CLK_DIVIDER, group_offset, PHYTIUM_DP_AUX_CLK_DIVIDER); - phytium_dp->fast_train_support = false; - phytium_dp->hw_spread_enable = phytium_dp->funcs->dp_hw_spread_is_enable(phytium_dp); - -out: - return ret; -} - -static void phytium_dp_hw_set_source_rate_and_lane_count(struct phytium_dp_device *phytium_dp) -{ - phytium_dp->source_rates = phytium_rate; - phytium_dp->num_source_rates = num_source_rates; - - if (phytium_dp->port == 0) - phytium_dp->source_max_lane_count = source_max_lane_count; - else if (phytium_dp->port == 1) - phytium_dp->source_max_lane_count = source_max_lane_count; - else if (phytium_dp->port == 2) - phytium_dp->source_max_lane_count = 1; - else - phytium_dp->source_max_lane_count = 1; + phytium_dp->funcs->dp_hw_get_hpd_state(phytium_dp); } static int phytium_dp_dpcd_get_tp_link(struct phytium_dp_device *phytium_dp, @@ -1141,7 +447,7 @@ static int phytium_dp_dpcd_set_link(struct phytium_dp_device *phytium_dp, } ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_LINK_BW_SET, link_config, 2); if (ret < 0) { - DRM_NOTE("write dpcd DP_LINK_BW_SET fail: ret:%d\n", ret); + DRM_ERROR("write dpcd DP_LINK_BW_SET fail: ret:%d\n", ret); goto failed; } @@ -1213,7 +519,7 @@ static int phytium_dp_dpcd_set_train_pattern(struct phytium_dp_device *phytium_d ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); if (ret < 0) { - DRM_NOTE("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); goto failed; } @@ -1293,7 +599,7 @@ static bool phytium_dp_link_training_clock_recovery(struct phytium_dp_device *ph ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->link_lane_count, phytium_dp->link_rate); if (ret < 0) { - DRM_NOTE("phytium_dp_dpcd_set_link failed(ret=%d)\n", ret); + DRM_ERROR("phytium_dp_dpcd_set_link failed(ret=%d)\n", ret); return false; } @@ -1520,11 +826,11 @@ phytium_dp_stop_link_train(struct phytium_dp_device *phytium_dp) int ret; /* config source and sink's train_pattern x: DP_TRAINING_PATTERN_DISABLE */ - phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + phytium_dp->funcs->dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); if (ret < 0) { - DRM_NOTE("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); return ret; } @@ -1535,11 +841,11 @@ int phytium_dp_start_link_train(struct phytium_dp_device *phytium_dp) { int ret = 0; - phytium_dp_hw_disable_output(phytium_dp); - phytium_dp_hw_disable_input_source(phytium_dp); - phytium_dp_hw_disable_video(phytium_dp); - phytium_dp_hw_enable_input_source(phytium_dp); - phytium_dp_hw_enable_output(phytium_dp); + phytium_dp->funcs->dp_hw_disable_output(phytium_dp); + phytium_dp->funcs->dp_hw_disable_input_source(phytium_dp); + phytium_dp->funcs->dp_hw_disable_video(phytium_dp); + phytium_dp->funcs->dp_hw_enable_input_source(phytium_dp); + phytium_dp->funcs->dp_hw_enable_output(phytium_dp); phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_OFF); phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_ON); @@ -1551,7 +857,7 @@ int phytium_dp_start_link_train(struct phytium_dp_device *phytium_dp) ret = phytium_dp_stop_link_train(phytium_dp); if (ret < 0) { - DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + DRM_ERROR("phytium_dp_stop_link_train failed: ret = %d\n", ret); goto out; } @@ -1576,7 +882,7 @@ int phytium_dp_start_link_train(struct phytium_dp_device *phytium_dp) ret = phytium_dp_stop_link_train(phytium_dp); if (ret < 0) { - DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + DRM_ERROR("phytium_dp_stop_link_train failed: ret = %d\n", ret); goto out; } @@ -1621,7 +927,7 @@ static bool phytium_dp_needs_link_retrain(struct phytium_dp_device *phytium_dp) return true; } - if (!phytium_dp_hw_output_is_enable(phytium_dp)) { + if (!phytium_dp->funcs->dp_hw_output_is_enable(phytium_dp)) { DRM_DEBUG_KMS("check DP output enable failed\n"); return true; } @@ -1810,7 +1116,7 @@ static int phytium_dp_long_pulse(struct drm_connector *connector, bool hpd_raw_s DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); - video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + video_enable = phytium_dp->funcs->dp_hw_video_is_enable(phytium_dp); phytium_dp_start_link_train(phytium_dp); status = phytium_dp_set_edid(connector); @@ -1819,7 +1125,7 @@ static int phytium_dp_long_pulse(struct drm_connector *connector, bool hpd_raw_s if (video_enable) { mdelay(2); - phytium_dp_hw_enable_video(phytium_dp); + phytium_dp->funcs->dp_hw_enable_video(phytium_dp); } } @@ -1850,11 +1156,11 @@ static int phytium_dp_short_pulse(struct drm_connector *connector) goto out; } - video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + video_enable = phytium_dp->funcs->dp_hw_video_is_enable(phytium_dp); phytium_dp_start_link_train(phytium_dp); if (video_enable) { mdelay(2); - phytium_dp_hw_enable_video(phytium_dp); + phytium_dp->funcs->dp_hw_enable_video(phytium_dp); } out: @@ -1927,7 +1233,7 @@ void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable) drm_for_each_encoder(encoder, dev) { phytium_dp = encoder_to_dp_device(encoder); - phytium_dp_hw_hpd_irq_setup(phytium_dp, enable); + phytium_dp->funcs->dp_hw_hpd_irq_setup(phytium_dp, enable); } } @@ -2191,7 +1497,10 @@ static void phytium_encoder_disable(struct drm_encoder *encoder) if (phytium_dp->is_edp) phytium_edp_backlight_off(phytium_dp); - phytium_dp_hw_disable_video(phytium_dp); + phytium_dp->funcs->dp_hw_disable_video(phytium_dp); + + if (phytium_dp->has_audio) + phytium_dp->funcs->dp_hw_disable_audio(phytium_dp); mdelay(50); @@ -2216,9 +1525,13 @@ void phytium_dp_adjust_link_train_parameter(struct phytium_dp_device *phytium_dp phytium_dp->link_rate, phytium_dp->link_lane_count); DRM_DEBUG_KMS("for crtc_clock(%d) bs_request(%ld) bs_limit(%ld) rate(%d)\n", phytium_dp->mode.crtc_clock, bs_request, bs_limit, rate); - if ((link_dynamic_adjust && (bs_request < bs_limit) && rate < 10) || - ((!link_dynamic_adjust) && (rate < 10))) - break; + if (link_dynamic_adjust) { + if ((bs_request < bs_limit) && rate < 10) + break; + } else { + if (rate < 10) + break; + } phytium_dp_get_link_train_fallback_values(phytium_dp); } @@ -2231,7 +1544,7 @@ static void phytium_encoder_enable(struct drm_encoder *encoder) struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); int ret = 0; - phytium_dp_hw_disable_video(phytium_dp); + phytium_dp->funcs->dp_hw_disable_video(phytium_dp); if (phytium_dp->is_edp) { phytium_edp_panel_poweron(phytium_dp); @@ -2247,11 +1560,11 @@ static void phytium_encoder_enable(struct drm_encoder *encoder) mdelay(2); } - phytium_dp_hw_config_video(phytium_dp); + phytium_dp->funcs->dp_hw_config_video(phytium_dp); if (ret == 0) { - phytium_dp_hw_enable_video(phytium_dp); + phytium_dp->funcs->dp_hw_enable_video(phytium_dp); if (phytium_dp->has_audio) - phytium_dp_hw_enable_audio(phytium_dp); + phytium_dp->funcs->dp_hw_enable_audio(phytium_dp); } if (phytium_dp->is_edp) { @@ -2320,10 +1633,10 @@ static int phytium_dp_audio_get_eld(struct device *dev, void *data, u8 *buf, siz static int phytium_dp_audio_digital_mute(struct device *dev, void *data, bool enable) { struct phytium_dp_device *phytium_dp = data; + int ret; - phytium_dp_hw_audio_digital_mute(phytium_dp, enable); - - return 0; + ret = phytium_dp->funcs->dp_hw_audio_digital_mute(phytium_dp, enable); + return ret; } const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate) @@ -2357,7 +1670,7 @@ static int phytium_dp_audio_hw_params(struct device *dev, void *data, goto failed; } - ret = phytium_dp_hw_audio_hw_params(phytium_dp, audio_info); + ret = phytium_dp->funcs->dp_hw_audio_hw_params(phytium_dp, audio_info); failed: return ret; @@ -2367,7 +1680,7 @@ static void phytium_dp_audio_shutdown(struct device *dev, void *data) { struct phytium_dp_device *phytium_dp = data; - phytium_dp_hw_audio_shutdown(phytium_dp); + phytium_dp->funcs->dp_hw_audio_shutdown(phytium_dp); } static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged) @@ -2436,12 +1749,12 @@ static long phytium_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_ms case DP_AUX_NATIVE_WRITE: case DP_AUX_I2C_WRITE: case DP_AUX_I2C_WRITE_STATUS_UPDATE: - ret = phytium_dp_hw_aux_transfer_write(phytium_dp, msg); + ret = phytium_dp->funcs->dp_hw_aux_transfer_write(phytium_dp, msg); DRM_DEBUG_KMS("aux write reply:0x%x ret:0x%lx\n", msg->reply, ret); break; case DP_AUX_NATIVE_READ: case DP_AUX_I2C_READ: - ret = phytium_dp_hw_aux_transfer_read(phytium_dp, msg); + ret = phytium_dp->funcs->dp_hw_aux_transfer_read(phytium_dp, msg); DRM_DEBUG_KMS("aux read ret:0x%lx\n", ret); break; default: @@ -2524,7 +1837,7 @@ int phytium_dp_resume(struct drm_device *drm_dev) phytium_edp_backlight_off(phytium_dp); phytium_edp_panel_poweroff(phytium_dp); } - ret = phytium_dp_hw_init(phytium_dp); + ret = phytium_dp->funcs->dp_hw_init(phytium_dp); if (ret) { DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); return -EIO; @@ -2536,7 +1849,6 @@ int phytium_dp_resume(struct drm_device *drm_dev) int phytium_dp_init(struct drm_device *dev, int port) { - struct phytium_display_private *priv = dev->dev_private; struct phytium_dp_device *phytium_dp = NULL; int ret, type; @@ -2549,13 +1861,7 @@ int phytium_dp_init(struct drm_device *dev, int port) phytium_dp->dev = dev; phytium_dp->port = port; - phytium_dp_hw_set_source_rate_and_lane_count(phytium_dp); - - if (IS_X100(priv)) { - x100_dp_func_register(phytium_dp); - priv->dp_reg_base[port] = X100_DP_BASE(port); - priv->phy_access_base[port] = X100_PHY_ACCESS_BASE(port); - } + x100_dp_func_register(phytium_dp); if (phytium_dp_is_edp(phytium_dp, port)) { phytium_dp->is_edp = true; @@ -2568,7 +1874,7 @@ int phytium_dp_init(struct drm_device *dev, int port) type = DRM_MODE_CONNECTOR_DisplayPort; } - ret = phytium_dp_hw_init(phytium_dp); + ret = phytium_dp->funcs->dp_hw_init(phytium_dp); if (ret) { DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); goto failed_init_dp; diff --git a/drivers/gpu/drm/phytium/phytium_dp.h b/drivers/gpu/drm/phytium/phytium_dp.h index e1cf6c8483ad..08ae77d3eb48 100644 --- a/drivers/gpu/drm/phytium/phytium_dp.h +++ b/drivers/gpu/drm/phytium/phytium_dp.h @@ -1,7 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __PHYTIUM_DP_H__ @@ -37,20 +45,44 @@ struct phytium_dp_compliance { }; struct phytium_dp_func { - int (*dp_hw_reset)(struct phytium_dp_device *phytium_dp); - bool (*dp_hw_spread_is_enable)(struct phytium_dp_device *phytium_dp); - int (*dp_hw_set_backlight)(struct phytium_dp_device *phytium_dp, uint32_t level); - uint32_t (*dp_hw_get_backlight)(struct phytium_dp_device *phytium_dp); - void (*dp_hw_disable_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_set_test_pattern)(struct phytium_dp_device *phytium_dp, uint8_t test_pattern, + uint8_t lane_count, uint8_t *custom_pattern, + uint32_t custom_pattern_size); + void (*dp_hw_set_link)(struct phytium_dp_device *phytium_dp, uint8_t link_lane_count, + uint32_t link_rate); + void (*dp_hw_set_lane_setting)(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, uint8_t train_set); + void (*dp_hw_set_train_pattern)(struct phytium_dp_device *phytium_dp, + uint8_t train_pattern); + int (*dp_hw_init)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_enable_output)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_disable_output)(struct phytium_dp_device *phytium_dp); + bool (*dp_hw_output_is_enable)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_enable_input_source)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_disable_input_source)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_get_hpd_state)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_hpd_irq_setup)(struct phytium_dp_device *phytium_dp, bool enable); + void (*dp_hw_disable_video)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_enable_video)(struct phytium_dp_device *phytium_dp); + bool (*dp_hw_video_is_enable)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_config_video)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_disable_audio)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_enable_audio)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_audio_shutdown)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_audio_digital_mute)(struct phytium_dp_device *phytium_dp, bool enable); + int (*dp_hw_audio_hw_params)(struct phytium_dp_device *phytium_dp, + struct audio_info audio_info); + int (*dp_hw_aux_transfer_write)(struct phytium_dp_device *phytium_dp, + struct drm_dp_aux_msg *msg); + int (*dp_hw_aux_transfer_read)(struct phytium_dp_device *phytium_dp, + struct drm_dp_aux_msg *msg); + int (*dp_hw_register_show)(struct seq_file *m, void *data); void (*dp_hw_enable_backlight)(struct phytium_dp_device *phytium_dp); - void (*dp_hw_poweroff_panel)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_disable_backlight)(struct phytium_dp_device *phytium_dp); + uint32_t (*dp_hw_get_backlight)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_set_backlight)(struct phytium_dp_device *phytium_dp, uint32_t level); void (*dp_hw_poweron_panel)(struct phytium_dp_device *phytium_dp); - int (*dp_hw_init_phy)(struct phytium_dp_device *phytium_dp); - void (*dp_hw_set_phy_lane_setting)(struct phytium_dp_device *phytium_dp, - uint32_t link_rate, uint8_t train_set); - int (*dp_hw_set_phy_lane_and_rate)(struct phytium_dp_device *phytium_dp, - uint8_t link_lane_count, - uint32_t link_rate); + void (*dp_hw_poweroff_panel)(struct phytium_dp_device *phytium_dp); }; struct phytium_dp_hpd_state { @@ -141,11 +173,9 @@ enum phytium_dpcd_phy_tp { #define connector_to_dp_device(x) container_of(x, struct phytium_dp_device, connector) #define panel_to_dp_device(x) container_of(x, struct phytium_dp_device, panel) #define train_retry_to_dp_device(x) container_of(x, struct phytium_dp_device, train_retry_work) -void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data); -uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address); - int phytium_dp_init(struct drm_device *dev, int pipe); int phytium_dp_resume(struct drm_device *drm_dev); +void phytium_dp_hw_init(struct phytium_dp_device *phytium_dp); void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable); irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv); void phytium_dp_hpd_work_func(struct work_struct *work); diff --git a/drivers/gpu/drm/phytium/phytium_fb.c b/drivers/gpu/drm/phytium/phytium_fb.c index fecca2cd3b8c..0d89e0d0bbbf 100644 --- a/drivers/gpu/drm/phytium/phytium_fb.c +++ b/drivers/gpu/drm/phytium/phytium_fb.c @@ -1,7 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #include diff --git a/drivers/gpu/drm/phytium/phytium_fb.h b/drivers/gpu/drm/phytium/phytium_fb.h index c11c6c009b13..7913d3364f21 100644 --- a/drivers/gpu/drm/phytium/phytium_fb.h +++ b/drivers/gpu/drm/phytium/phytium_fb.h @@ -1,7 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __PHYTIUM_FB_H__ diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.c b/drivers/gpu/drm/phytium/phytium_fbdev.c index 8eb16b3d7c70..71f6ddd64fc8 100644 --- a/drivers/gpu/drm/phytium/phytium_fbdev.c +++ b/drivers/gpu/drm/phytium/phytium_fbdev.c @@ -1,7 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #include diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.h b/drivers/gpu/drm/phytium/phytium_fbdev.h index d291d82c2706..b61f92c7d162 100644 --- a/drivers/gpu/drm/phytium/phytium_fbdev.h +++ b/drivers/gpu/drm/phytium/phytium_fbdev.h @@ -1,7 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef _PHYTIUM_FBDEV_H diff --git a/drivers/gpu/drm/phytium/phytium_gem.c b/drivers/gpu/drm/phytium/phytium_gem.c index bd0b85e64bbc..e068df1fb1cf 100644 --- a/drivers/gpu/drm/phytium/phytium_gem.c +++ b/drivers/gpu/drm/phytium/phytium_gem.c @@ -1,7 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #include diff --git a/drivers/gpu/drm/phytium/phytium_gem.h b/drivers/gpu/drm/phytium/phytium_gem.h index b1d6b54ebf2f..b4fddd3efbca 100644 --- a/drivers/gpu/drm/phytium/phytium_gem.h +++ b/drivers/gpu/drm/phytium/phytium_gem.h @@ -1,7 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __PHYTIUM_GEM_H__ diff --git a/drivers/gpu/drm/phytium/phytium_panel.c b/drivers/gpu/drm/phytium/phytium_panel.c old mode 100644 new mode 100755 index ed16ed15197d..b2a35ddad2bb --- a/drivers/gpu/drm/phytium/phytium_panel.c +++ b/drivers/gpu/drm/phytium/phytium_panel.c @@ -1,7 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #include diff --git a/drivers/gpu/drm/phytium/phytium_panel.h b/drivers/gpu/drm/phytium/phytium_panel.h old mode 100644 new mode 100755 index e2d5f068064a..a55f7bb3dc8a --- a/drivers/gpu/drm/phytium/phytium_panel.h +++ b/drivers/gpu/drm/phytium/phytium_panel.h @@ -1,7 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __PHYTIUM_PANEL_H__ diff --git a/drivers/gpu/drm/phytium/phytium_pci.c b/drivers/gpu/drm/phytium/phytium_pci.c old mode 100644 new mode 100755 index 72fe10b242dd..58639adb2750 --- a/drivers/gpu/drm/phytium/phytium_pci.c +++ b/drivers/gpu/drm/phytium/phytium_pci.c @@ -1,9 +1,16 @@ // SPDX-License-Identifier: GPL-2.0 -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ - #include "phytium_display_drv.h" #include "phytium_pci.h" #include "phytium_dp.h" @@ -100,9 +107,11 @@ phytium_pci_private_init(struct pci_dev *pdev, const struct pci_device_id *ent) goto failed_ioremap; } - priv->irq = pdev->irq; if (IS_X100(priv)) { pci_priv->dc_hw_vram_init = x100_dc_hw_vram_init; + priv->dc_hw_enable_irq = x100_dc_hw_enable_irq; + priv->dc_hw_disable_irq = x100_dc_hw_disable_irq; + priv->dc_hw_get_irq_status = x100_dc_hw_get_irq_status; priv->dc_hw_clear_msi_irq = x100_dc_hw_clear_msi_irq; priv->dc_hw_fb_format_check = x100_dc_hw_fb_format_check; } @@ -153,8 +162,6 @@ static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e DRM_ERROR("pci enbale msi fail\n"); } - dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); - priv = phytium_pci_private_init(pdev, ent); if (priv) dev->dev_private = priv; @@ -269,7 +276,15 @@ static const struct phytium_device_info x100_info = { }; static const struct pci_device_id phytium_display_pci_ids[] = { - { PCI_VDEVICE(PHYTIUM, 0xdc22), (kernel_ulong_t)&x100_info }, + { + .vendor = 0x1db7, + .device = 0xdc22, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = 0x030000, + .class_mask = 0xff0000, + .driver_data = (unsigned long)&x100_info, + }, { /* End: all zeroes */ } }; MODULE_DEVICE_TABLE(pci, phytium_display_pci_ids); diff --git a/drivers/gpu/drm/phytium/phytium_pci.h b/drivers/gpu/drm/phytium/phytium_pci.h old mode 100644 new mode 100755 index 94e3a5e8e95c..6d426e1f0dad --- a/drivers/gpu/drm/phytium/phytium_pci.h +++ b/drivers/gpu/drm/phytium/phytium_pci.h @@ -1,7 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __PHYTIUM_PCI_H__ @@ -20,4 +28,5 @@ struct phytium_pci_private { #define to_pci_priv(priv) container_of(priv, struct phytium_pci_private, base) extern struct pci_driver phytium_pci_driver; + #endif /* __PHYTIUM_PCI_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_plane.c b/drivers/gpu/drm/phytium/phytium_plane.c index 777bcd137293..d9e0ca21baee 100644 --- a/drivers/gpu/drm/phytium/phytium_plane.c +++ b/drivers/gpu/drm/phytium/phytium_plane.c @@ -1,7 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #include @@ -17,7 +25,6 @@ #include "phytium_gem.h" #include "phytium_crtc.h" #include "x100_dc.h" -#include "phytium_reg.h" #define PHYTIUM_CURS_W_SIZE 32 #define PHYTIUM_CURS_H_SIZE 32 @@ -99,7 +106,8 @@ phytium_plane_atomic_destroy_state(struct drm_plane *plane, struct drm_plane_sta { struct phytium_plane_state *phytium_state = to_phytium_plane_state(state); - __drm_atomic_helper_plane_destroy_state(state); + if (state->fb) + drm_framebuffer_unreference(state->fb); kfree(phytium_state); } @@ -194,315 +202,25 @@ phytium_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *stat return 0; } -static void phytium_dc_get_plane_parameter(struct drm_plane *plane) -{ - struct phytium_plane *phytium_plane = to_phytium_plane(plane); - struct drm_framebuffer *fb = plane->state->fb; - struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); - struct phytium_gem_object *phytium_gem_obj = NULL; - int i, num_planes = 0; - - num_planes = drm_format_num_planes(fb->format->format); - for (i = 0; i < num_planes; i++) { - phytium_gem_obj = phytium_fb->phytium_gem_obj[i]; - phytium_plane->iova[i] = phytium_gem_obj->iova + fb->offsets[i]; - phytium_plane->size[i] = phytium_gem_obj->size - fb->offsets[i]; - - if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC) - phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE0; - else if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC) - phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE3; - else if (fb->modifier == DRM_FORMAT_MOD_LINEAR) - phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; - else - phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; - - if (i == 0) { - switch (fb->format->format) { - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_RGBA1010102: - case DRM_FORMAT_BGRA1010102: - phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB2101010; - break; - - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_RGBA8888: - case DRM_FORMAT_BGRA8888: - phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB8888; - break; - - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_RGBX8888: - case DRM_FORMAT_BGRX8888: - phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB8888; - break; - - case DRM_FORMAT_ARGB4444: - case DRM_FORMAT_ABGR4444: - case DRM_FORMAT_RGBA4444: - case DRM_FORMAT_BGRA4444: - phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB4444; - break; - - case DRM_FORMAT_XRGB4444: - case DRM_FORMAT_XBGR4444: - case DRM_FORMAT_RGBX4444: - case DRM_FORMAT_BGRX4444: - phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB4444; - break; - - case DRM_FORMAT_ARGB1555: - case DRM_FORMAT_ABGR1555: - case DRM_FORMAT_RGBA5551: - case DRM_FORMAT_BGRA5551: - phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB1555; - break; - - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_XBGR1555: - case DRM_FORMAT_RGBX5551: - case DRM_FORMAT_BGRX5551: - phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB1555; - break; - - case DRM_FORMAT_RGB565: - case DRM_FORMAT_BGR565: - phytium_plane->format = FRAMEBUFFER_FORMAT_RGB565; - break; - - case DRM_FORMAT_YUYV: - phytium_plane->format = FRAMEBUFFER_FORMAT_YUYV; - break; - - case DRM_FORMAT_UYVY: - phytium_plane->format = FRAMEBUFFER_FORMAT_UYVY; - break; - case DRM_FORMAT_NV16: - phytium_plane->format = FRAMEBUFFER_FORMAT_NV16; - break; - case DRM_FORMAT_NV12: - phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; - break; - case DRM_FORMAT_NV21: - phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; - break; - default: - DRM_ERROR("unsupported pixel format (format = %d)\n", - fb->format->format); - return; - } - - switch (fb->format->format) { - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_ARGB4444: - case DRM_FORMAT_XRGB4444: - case DRM_FORMAT_ARGB1555: - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_RGB565: - phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; - phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; - break; - - case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ABGR4444: - case DRM_FORMAT_XBGR4444: - case DRM_FORMAT_ABGR1555: - case DRM_FORMAT_XBGR1555: - case DRM_FORMAT_BGR565: - phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ABGR; - phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; - break; - - case DRM_FORMAT_RGBA1010102: - case DRM_FORMAT_RGBA8888: - case DRM_FORMAT_RGBX8888: - case DRM_FORMAT_RGBA4444: - case DRM_FORMAT_RGBX4444: - case DRM_FORMAT_RGBA5551: - case DRM_FORMAT_RGBX5551: - phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_RGBA; - phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; - break; - - case DRM_FORMAT_BGRA1010102: - case DRM_FORMAT_BGRA8888: - case DRM_FORMAT_BGRX8888: - case DRM_FORMAT_BGRA4444: - case DRM_FORMAT_BGRX4444: - case DRM_FORMAT_BGRA5551: - case DRM_FORMAT_BGRX5551: - phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_BGRA; - phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; - break; - - case DRM_FORMAT_YUYV: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_NV16: - case DRM_FORMAT_NV12: - phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; - phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; - break; - - default: - DRM_ERROR("unsupported pixel format (format = %d)\n", - fb->format->format); - return; - } - } - } -} - -static void phytium_dc_primary_plane_update(struct drm_plane *plane) -{ - struct drm_device *dev = plane->dev; - struct phytium_display_private *priv = dev->dev_private; - struct phytium_plane *phytium_plane = to_phytium_plane(plane); - struct drm_framebuffer *fb = plane->state->fb; - int phys_pipe = phytium_plane->phys_pipe; - int src_x, src_y, crtc_x, crtc_y, crtc_w, crtc_h; - unsigned long base_offset; - int config; - - src_x = plane->state->src_x >> 16; - src_y = plane->state->src_y >> 16; - crtc_x = plane->state->crtc_x; - crtc_y = plane->state->crtc_y; - crtc_w = plane->state->crtc_w; - crtc_h = plane->state->crtc_h; - - if (phytium_plane->dc_hw_update_dcreq) - phytium_plane->dc_hw_update_dcreq(plane); - phytium_plane->dc_hw_update_primary_hi_addr(plane); - - /* config dc */ - /* Y */ - base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; - phytium_writel_reg(priv, (phytium_plane->iova[0] + base_offset) & ADDRESS_MASK, - priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS); - phytium_writel_reg(priv, ALIGN(fb->pitches[0], 128), - priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE); - - /* U */ - phytium_writel_reg(priv, phytium_plane->iova[1] & 0xffffffff, - priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS); - phytium_writel_reg(priv, ALIGN(fb->pitches[1], 128), - priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_STRIDE); - - /* V */ - phytium_writel_reg(priv, phytium_plane->iova[2] & 0xffffffff, - priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS); - phytium_writel_reg(priv, ALIGN(fb->pitches[2], 128), - priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_STRIDE); - - /* size */ - phytium_writel_reg(priv, (crtc_w & WIDTH_MASK) | ((crtc_h&HEIGHT_MASK) << HEIGHT_SHIFT), - priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_SIZE); - /* config */ - config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], - PHYTIUM_DC_FRAMEBUFFER_CONFIG); - config &= ~(FRAMEBUFFER_FORMAT_MASK << FRAMEBUFFER_FORMAT_SHIFT); - config |= (phytium_plane->format << FRAMEBUFFER_FORMAT_SHIFT); - config &= ~(1 << FRAMEBUFFER_UVSWIZZLE_SHIFT); - config |= (phytium_plane->uv_swizzle << FRAMEBUFFER_UVSWIZZLE_SHIFT); - config &= ~(FRAMEBUFFER_SWIZZLE_MASK << FRAMEBUFFER_SWIZZLE_SHIFT); - config |= (phytium_plane->swizzle << FRAMEBUFFER_SWIZZLE_SHIFT); - config &= ~(FRAMEBUFFER_TILE_MODE_MASK << FRAMEBUFFER_TILE_MODE_SHIFT); - config |= (phytium_plane->tiling[0] << FRAMEBUFFER_TILE_MODE_SHIFT); - config &= (~FRAMEBUFFER_CLEAR); - phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], - PHYTIUM_DC_FRAMEBUFFER_CONFIG); -} - -static void phytium_dc_cursor_plane_update(struct drm_plane *plane) -{ - struct drm_device *dev = plane->dev; - struct phytium_display_private *priv = dev->dev_private; - struct phytium_plane *phytium_plane = to_phytium_plane(plane); - struct drm_framebuffer *fb = plane->state->fb; - int phys_pipe = phytium_plane->phys_pipe; - int config; - unsigned long iova; - - phytium_plane->enable = 1; - phytium_plane->cursor_hot_x = fb->hot_x; - phytium_plane->cursor_hot_y = fb->hot_y; - phytium_plane->cursor_x = plane->state->crtc_x + fb->hot_x; - phytium_plane->cursor_y = plane->state->crtc_y + fb->hot_y; - - config = CURSOR_FORMAT_ARGB8888 | - ((phytium_plane->cursor_hot_y & CURSOR_HOT_Y_MASK) << CURSOR_HOT_Y_SHIFT) | - ((phytium_plane->cursor_hot_x & CURSOR_HOT_X_MASK) << CURSOR_HOT_X_SHIFT); - phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); - - config = ((phytium_plane->cursor_x & CURSOR_X_MASK) << CURSOR_X_SHIFT) | - ((phytium_plane->cursor_y & CURSOR_Y_MASK) << CURSOR_Y_SHIFT); - phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], - PHYTIUM_DC_CURSOR_LOCATION); - iova = phytium_plane->iova[0]; - phytium_writel_reg(priv, iova & 0xffffffff, priv->dc_reg_base[phys_pipe], - PHYTIUM_DC_CURSOR_ADDRESS); - if (phytium_plane->dc_hw_update_cursor_hi_addr) - phytium_plane->dc_hw_update_cursor_hi_addr(plane, iova); -} - static void phytium_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { - struct drm_framebuffer *fb, *old_fb; - - DRM_DEBUG_KMS("update plane: type=%d\n", plane->type); - if (!plane->state->crtc || !plane->state->fb) - return; - - fb = plane->state->fb; - old_fb = old_state->fb; - - if (fb) - drm_framebuffer_reference(fb); - if (old_fb) - drm_framebuffer_unreference(old_fb); - - phytium_dc_get_plane_parameter(plane); + struct phytium_plane *phytium_plane = to_phytium_plane(plane); - if (plane->type == DRM_PLANE_TYPE_PRIMARY) - phytium_dc_primary_plane_update(plane); - else if (plane->type == DRM_PLANE_TYPE_CURSOR) - phytium_dc_cursor_plane_update(plane); + phytium_plane->dc_hw_plane_update(plane, old_state); } static void phytium_plane_atomic_disable(struct drm_plane *plane, struct drm_plane_state *old_state) { - struct drm_device *dev = plane->dev; - struct phytium_display_private *priv = dev->dev_private; struct phytium_plane *phytium_plane = to_phytium_plane(plane); - int phys_pipe = phytium_plane->phys_pipe; - int config; struct drm_framebuffer *old_fb; old_fb = old_state->fb; if (old_fb) drm_framebuffer_unreference(old_fb); - if (plane->type == DRM_PLANE_TYPE_PRIMARY) { - phytium_writel_reg(priv, CLEAR_VALUE_RED, priv->dc_reg_base[phys_pipe], - PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE); - config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], - PHYTIUM_DC_FRAMEBUFFER_CONFIG); - config |= FRAMEBUFFER_CLEAR; - phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], - PHYTIUM_DC_FRAMEBUFFER_CONFIG); - } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { - phytium_writel_reg(priv, CURSOR_FORMAT_DISABLED, - priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); - } + phytium_plane->dc_hw_plane_disable(plane, old_state); } const struct drm_plane_helper_funcs phytium_plane_helper_funcs = { @@ -541,9 +259,8 @@ struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int p if (IS_X100(priv)) { phytium_plane->dc_hw_plane_get_format = x100_dc_hw_plane_get_primary_format; - phytium_plane->dc_hw_update_dcreq = x100_dc_hw_update_dcreq; - phytium_plane->dc_hw_update_primary_hi_addr = x100_dc_hw_update_primary_hi_addr; - phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + phytium_plane->dc_hw_plane_update = x100_dc_hw_plane_update; + phytium_plane->dc_hw_plane_disable = x100_dc_hw_plane_disable; } phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); @@ -572,39 +289,38 @@ struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int p struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int phys_pipe) { struct phytium_display_private *priv = dev->dev_private; - struct phytium_plane *phytium_plane = NULL; - struct phytium_plane_state *phytium_plane_state = NULL; + struct phytium_plane *phytium_cursor_plane = NULL; + struct phytium_plane_state *phytium_cursor_plane_state = NULL; int ret = 0; unsigned int flags = 0; const uint32_t *formats = NULL; uint32_t format_count; const uint64_t *format_modifiers; - phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); - if (!phytium_plane) { + phytium_cursor_plane = kzalloc(sizeof(*phytium_cursor_plane), GFP_KERNEL); + if (!phytium_cursor_plane) { ret = -ENOMEM; goto failed_malloc_plane; } - phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); - if (!phytium_plane_state) { + phytium_cursor_plane_state = kzalloc(sizeof(*phytium_cursor_plane_state), GFP_KERNEL); + if (!phytium_cursor_plane_state) { ret = -ENOMEM; goto failed_malloc_plane_state; } - phytium_plane_state->base.plane = &phytium_plane->base; - phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; - phytium_plane->base.state = &phytium_plane_state->base; - phytium_plane->phys_pipe = phys_pipe; + phytium_cursor_plane_state->base.plane = &phytium_cursor_plane->base; + phytium_cursor_plane_state->base.rotation = DRM_MODE_ROTATE_0; + phytium_cursor_plane->base.state = &phytium_cursor_plane_state->base; + phytium_cursor_plane->phys_pipe = phys_pipe; if (IS_X100(priv)) { - phytium_plane->dc_hw_plane_get_format = x100_dc_hw_plane_get_cursor_format; - phytium_plane->dc_hw_update_dcreq = NULL; - phytium_plane->dc_hw_update_primary_hi_addr = NULL; - phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + phytium_cursor_plane->dc_hw_plane_get_format = x100_dc_hw_plane_get_cursor_format; + phytium_cursor_plane->dc_hw_plane_update = x100_dc_hw_plane_update; + phytium_cursor_plane->dc_hw_plane_disable = x100_dc_hw_plane_disable; } - phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); - ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, + phytium_cursor_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); + ret = drm_universal_plane_init(dev, &phytium_cursor_plane->base, 0x0, &phytium_plane_funcs, formats, format_count, format_modifiers, @@ -614,14 +330,14 @@ struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int ph goto failed_plane_init; flags = DRM_MODE_ROTATE_0; - drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); - drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); + drm_plane_create_rotation_property(&phytium_cursor_plane->base, DRM_MODE_ROTATE_0, flags); + drm_plane_helper_add(&phytium_cursor_plane->base, &phytium_plane_helper_funcs); - return phytium_plane; + return phytium_cursor_plane; failed_plane_init: - kfree(phytium_plane_state); + kfree(phytium_cursor_plane_state); failed_malloc_plane_state: - kfree(phytium_plane); + kfree(phytium_cursor_plane); failed_malloc_plane: return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/phytium/phytium_plane.h b/drivers/gpu/drm/phytium/phytium_plane.h index 41bb607d857e..4b1c97df8726 100644 --- a/drivers/gpu/drm/phytium/phytium_plane.h +++ b/drivers/gpu/drm/phytium/phytium_plane.h @@ -1,7 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __PHYTIUM_PLANE_H__ @@ -29,9 +37,8 @@ struct phytium_plane { void (*dc_hw_plane_get_format)(const uint64_t **format_modifiers, const uint32_t **formats, uint32_t *format_count); - void (*dc_hw_update_dcreq)(struct drm_plane *plane); - void (*dc_hw_update_primary_hi_addr)(struct drm_plane *plane); - void (*dc_hw_update_cursor_hi_addr)(struct drm_plane *plane, uint64_t iova); + void (*dc_hw_plane_update)(struct drm_plane *plane, struct drm_plane_state *old_state); + void (*dc_hw_plane_disable)(struct drm_plane *plane, struct drm_plane_state *old_state); }; struct phytium_plane_state { diff --git a/drivers/gpu/drm/phytium/phytium_reg.h b/drivers/gpu/drm/phytium/phytium_reg.h index 7d8e1183f158..a64f6a47b9d9 100644 --- a/drivers/gpu/drm/phytium/phytium_reg.h +++ b/drivers/gpu/drm/phytium/phytium_reg.h @@ -1,64 +1,69 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __PHYTIUM_REG_H__ #define __PHYTIUM_REG_H__ -/******************************register base******************************************/ -#define X100_PIPE_BASE(pipe) (0x8000*pipe) -#define X100_DC_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x0000) -#define X100_DCREQ_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x2000) -#define X100_DP_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x3000) -#define X100_ADDRESS_TRANSFORM_BASE 0x4000 -#define X100_PHY_ACCESS_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x5000) -/******************************register base end******************************************/ +#define PHYTIUM_PIPE_BASE(pipe) (0x8000*pipe) +#define PHYTIUM_DC_BASE(pipe) (PHYTIUM_PIPE_BASE(pipe) + 0x0000) +#define PHYTIUM_DCREQ_BASE(pipe) (PHYTIUM_PIPE_BASE(pipe) + 0x2000) +#define PHYTIUM_DP_BASE(pipe) (PHYTIUM_PIPE_BASE(pipe) + 0x3000) +#define PHYTIUM_ADDRESS_TRANSFORM_BASE 0x4000 +#define PHYTIUM_PHY_BASE(pipe) (PHYTIUM_PIPE_BASE(pipe) + 0x5000) /******************************dc register start******************************************/ -#define PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS 0x1400 +#define PHYTIUM_DC_CLOCK_CONTROL(pipe) (PHYTIUM_DC_BASE(pipe) + 0x0000) + #define SOFT_RESET (1<<12) +#define PHYTIUM_DC_CLOCK_IDLE(pipe) (PHYTIUM_DC_BASE(pipe) + 0x0004) + #define IS_IDLE (1<<16) +#define PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1400) #define ADDRESS_MASK 0xffffff80 -#define PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE 0x1408 -#define PHYTIUM_DC_PANEL_CONFIG 0x1418 +#define PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1408) +#define PHYTIUM_DC_PANEL_CONFIG(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1418) #define PANEL_DATAENABLE_ENABLE (1<<0) #define PANEL_DATA_ENABLE (1<<4) #define PANEL_CLOCK_ENABLE (1<<8) -#define PHYTIUM_DC_HDISPLAY 0x1430 +#define PHYTIUM_DC_HDISPLAY(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1430) #define HDISPLAY_END_SHIFT 0 #define HDISPLAY_END_MASK 0x7fff + #define HDISPLAY_END_MAX 3840 #define HDISPLAY_TOTAL_SHIFT 16 #define HDISPLAY_TOTAL_MASK 0x7fff -#define PHYTIUM_DC_HSYNC 0x1438 +#define PHYTIUM_DC_HSYNC(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1438) #define HSYNC_START_SHIFT 0 #define HSYNC_START_MASK 0x7fff #define HSYNC_END_SHIFT 15 #define HSYNC_END_MASK 0x7fff #define HSYNC_PULSE_ENABLED (1<<30) #define HSYNC_NEGATIVE (1<<31) -#define PHYTIUM_DC_VDISPLAY 0x1440 +#define PHYTIUM_DC_VDISPLAY(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1440) #define VDISPLAY_END_SHIFT 0 #define VDISPLAY_END_MASK 0x7fff + #define VDISPLAY_END_MAX 2160 #define VDISPLAY_TOTAL_SHIFT 16 #define VDISPLAY_TOTAL_MASK 0x7fff -#define PHYTIUM_DC_VSYNC 0x1448 +#define PHYTIUM_DC_VSYNC(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1448) #define VSYNC_START_SHIFT 0 #define VSYNC_START_MASK 0x7fff #define VSYNC_END_SHIFT 15 #define VSYNC_END_MASK 0x7fff #define VSYNC_PULSE_ENABLED (1<<30) #define VSYNC_NEGATIVE (1<<31) -#define PHYTIUM_DC_DISPLAY_CURRENT_LOCATION 0x1450 -#define PHYTIUM_DC_GAMMA_INDEX 0x1458 - #define GAMMA_INDEX_MAX 256 -#define PHYTIUM_DC_GAMMA_DATA 0x1460 - #define GAMMA_BLUE_SHIFT 0 - #define GAMMA_BLUE_MASK 0x3ff - #define GAMMA_GREEN_SHIFT 10 - #define GAMMA_GREEN_MASK 0x3ff - #define GAMMA_RED_SHIFT 20 - #define GAMMA_RED_MASK 0x3ff -#define PHYTIUM_DC_CURSOR_CONFIG 0x1468 +#define PHYTIUM_DC_DISPLAY_CURRENT_LOCATION(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1450) +#define PHYTIUM_DC_GAMMA_INDEX(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1458) +#define PHYTIUM_DC_GAMMA_DATA(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1460) +#define PHYTIUM_DC_CURSOR_CONFIG(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1468) #define CURSOR_FORMAT_DISABLED 0x0 #define CURSOR_FORMAT_MASKMODE 0x3 #define CURSOR_FORMAT_ARGB8888 0x2 @@ -67,24 +72,23 @@ #define CURSOR_HOT_Y_MASK 0x1f #define CURSOR_HOT_X_SHIFT 16 #define CURSOR_HOT_X_MASK 0x1f -#define PHYTIUM_DC_CURSOR_ADDRESS 0x146c -#define PHYTIUM_DC_CURSOR_LOCATION 0x1470 +#define PHYTIUM_DC_CURSOR_ADDRESS(pipe) (PHYTIUM_DC_BASE(pipe) + 0x146c) +#define PHYTIUM_DC_CURSOR_LOCATION(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1470) #define CURSOR_X_SHIFT 0 #define CURSOR_X_MASK 0x7fff #define CURSOR_Y_SHIFT 16 #define CURSOR_Y_MASK 0x7fff -#define PHYTIUM_DC_CURSOR_BACKGROUND 0x1474 -#define PHYTIUM_DC_CURSOR_FOREGROUND 0x1478 -#define PHYTIUM_DC_INT_STATUS 0x147c +#define PHYTIUM_DC_CURSOR_BACKGROUND(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1474) +#define PHYTIUM_DC_CURSOR_FOREGROUND(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1478) +#define PHYTIUM_DC_INT_STATUS(pipe) (PHYTIUM_DC_BASE(pipe) + 0x147c) #define INT_STATUS 0x1 -#define PHYTIUM_DC_INT_ENABLE 0x1480 +#define PHYTIUM_DC_INT_ENABLE(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1480) #define INT_ENABLE 0x1 #define INT_DISABLE 0x0 -#define PHYTIUM_DC_FRAMEBUFFER_CONFIG 0x1518 +#define PHYTIUM_DC_FRAMEBUFFER_CONFIG(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1518) #define FRAMEBUFFER_OUTPUT BIT(0) - #define FRAMEBUFFER_GAMMA_ENABLE BIT(2) - #define FRAMEBUFFER_VALID_PENDING BIT(3) + #define FRAMEBUFFER_VALID_PENDING (1<<3) #define FRAMEBUFFER_RESET BIT(4) #define FRAMEBUFFER_PROGRESS BIT(6) #define FRAMEBUFFER_ROT_ANGLE_SHIFT (11) @@ -108,8 +112,6 @@ #define FRAMEBUFFER_FORMAT_ARGB8888 0x6 #define FRAMEBUFFER_FORMAT_YUYV 0x7 #define FRAMEBUFFER_FORMAT_UYVY 0x8 - #define FRAMEBUFFER_FORMAT_NV12 0x11 - #define FRAMEBUFFER_FORMAT_NV16 0x12 #define FRAMEBUFFER_FORMAT_ARGB2101010 0x16 #define FRAMEBUFFER_SWIZZLE_SHIFT 23 #define FRAMEBUFFER_SWIZZLE_MASK 0x3 @@ -122,70 +124,132 @@ #define FRAMEBUFFER_UVSWIZZLE_ENABLE 1 #define FRAMEBUFFER_CLEAR BIT(8) #define FRAMEBUFFER_SCALE_ENABLE BIT(22) -#define PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG 0x1520 +#define PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1520) #define FRAMEBUFFER_FILTER_TAP 3 #define FRAMEBUFFER_HORIZONTAL_FILTER_TAP 3 #define FRAMEBUFFER_TAP 0x33 -#define PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS 0x1530 -#define PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS 0x1538 -#define PHYTIUM_DC_OVERLAY_CONFIG 0x1540 - #define X100_DC_OVERLAY_ENABLE BIT(24) - -#define PHYTIUM_DC_FRAMEBUFFER_U_STRIDE 0x1800 -#define PHYTIUM_DC_FRAMEBUFFER_V_STRIDE 0x1808 -#define PHYTIUM_DC_FRAMEBUFFER_SIZE 0x1810 +#define PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1530) +#define PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1538) +#define PHYTIUM_DC_OVERLAY_CONFIG(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1540) + #define PHYTIUM_DC_OVERLAY_ENABLE BIT(24) + +#define PHYTIUM_DC_FRAMEBUFFER_U_STRIDE(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1800) +#define PHYTIUM_DC_FRAMEBUFFER_V_STRIDE(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1808) +#define PHYTIUM_DC_FRAMEBUFFER_SIZE(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1810) #define WIDTH_SHIFT 0 #define WIDTH_MASK 0x7fff #define HEIGHT_SHIFT 15 #define HEIGHT_MASK 0x7fff -#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X 0x1828 +#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1828) #define SCALE_FACTOR_X_MASK 0x7fffffff -#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y 0x1830 +#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1830) #define SCALE_FACTOR_Y_MASK 0x7fffffff #define SCALE_FACTOR_Y_MAX 0x3 #define SCALE_FACTOR_SRC_OFFSET 16 -#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX 0x1838 +#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1838) #define HORI_FILTER_INDEX 0x0 -#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER 0x1a00 -#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX 0x1a08 +#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1a00) +#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1a08) #define VERT_FILTER_INDEX 0x0 -#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER 0x1a10 -#define PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE 0x1a18 +#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1a10) + +#define PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1a18) #define CLEAR_VALUE_RED 0x00ff0000 #define CLEAR_VALUE_GREEN 0x0000ff00 -#define PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET 0x1a20 - #define INITIALOFFSET (0x8000 | (0X8000 << 16)) -#define PHYTIUM_DC_DP_CONFIG 0x1cd0 + +#define PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1a20) + #define INITIALOFFSET (0x8000 | (0X8000 << 16)) + +#define PHYTIUM_DC_DP_CONFIG(pipe) (PHYTIUM_DC_BASE(pipe) + 0x1cd0) #define OUTPUT_DP (1<<3) #define DP_RGB666 (0x1) #define DP_RGB888 (0x2) #define DP_RGB101010 (0x3) /******************************dc register end********************************************/ -/******************************phy access register****************************************/ -#define PHYTIUM_PHY_ACCESS_ADDRESS 0x0000 -#define PHYTIUM_PHY_WRITE_DATA 0x0004 -#define PHYTIUM_PHY_READ_DATA 0x0008 -#define PHYTIUM_PHY_ACCESS_CTRL 0x000c - #define ACCESS_WRITE (1<<0) - #define ACCESS_READ (1<<1) -/******************************phy access register end*************************************/ +/******************************dcreq register start**************************************/ +#define PHYTIUM_DCREQ_PLANE0_ADDR_START(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x00) +#define PHYTIUM_DCREQ_PLANE0_ADDR_END(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x04) +#define PHYTIUM_DCREQ_PLANE1_ADDR_START(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x08) +#define PHYTIUM_DCREQ_PLANE1_ADDR_END(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x0c) +#define PHYTIUM_DCREQ_PLANE0_CONFIG(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x10) + #define DCREQ_NO_LOSSY (0 << 0) + #define DCREQ_LOSSY (1 << 0) + #define DCREQ_TILE_TYPE_MASK (0x3 << 1) + #define DCREQ_TILE_TYPE_MODE0 (0x1 << 1) + #define DCREQ_TILE_TYPE_MODE3 (0x2 << 1) + #define DCREQ_COLOURFORMAT_MASK (0x7f << 8) + #define DCREQ_COLOURFORMAT_RGB565 (0x5 << 8) + #define DCREQ_COLOURFORMAT_ARGB1555 (0x4 << 8) + #define DCREQ_COLOURFORMAT_ARGB4444 (0x02 << 8) + #define DCREQ_COLOURFORMAT_BGRA8888 (0x29 << 8) + #define DCREQ_COLOURFORMAT_ARGB2101010 (0xe << 8) + #define DCREQ_COLOURFORMAT_YUYV (0x59 << 8) + #define DCREQ_COLOURFORMAT_UYVY (0x5b << 8) + #define DCREQ_ARGBSWIZZLE_MASK (0xf << 4) + #define DCREQ_ARGBSWIZZLE_ARGB (0X0 << 4) + #define DCREQ_ARGBSWIZZLE_BGRA (0XC << 4) + #define DCREQ_MODE_MASK (1 << 16) + #define DCREQ_MODE_LINEAR (0 << 16) + #define DCREQ_MODE_TILE (1 << 16) +#define PHYTIUM_DCREQ_PLANE1_CONFIG(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x14) +#define PHYTIUM_DCREQ_PLANE0_CLEAR_COLOR_L(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x18) +#define PHYTIUM_DCREQ_PLANE0_CLEAR_COLOR_H(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x1C) +#define PHYTIUM_DCREQ_PLANE1_CLEAR_COLOR_L(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x20) +#define PHYTIUM_DCREQ_PLANE1_CLEAR_COLOR_H(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x24) +#define PHYTIUM_DCREQ_PIX_CLOCK_CONFIG(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x38) + #define PIX_CLOCK_VAL_MASK (0x3fffffff) + #define REQUEST_PIX_CLOCK (1<<30) + #define STATE_PIX_CLOCK_STABLE (1<<31) + #define DC_DP_RESET_CMD (0x3fffffff) + #define PIX_CLOCK_MAX (594000) +#define PHYTIUM_DCREQ_FBCD_CLOCK_CONFIG(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x3c) +#define PHYTIUM_DCREQ_PIX_DMA_PREFIX(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x50) + #define PREFIX_MASK 0xff + #define PREFIX_SHIFT (32) +#define PHYTIUM_DCREQ_FRAME_START(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x54) +#define PHYTIUM_DCREQ_FILTER_CONFIG(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x58) +#define PHYTIUM_DCREQ_CONTROL(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x5C) + #define DC_REQ_ENABLE (1<<0) +#define PHYTIUM_DCREQ_MSI_CLEAR(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x60) + #define MSI_CLEAR 0x0 + +#define PHYTIUM_DCREQ_RESET(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x68) + #define DCREQ_RESET (0x3 << 0) + #define DCREQ_RESET_MASK 0x3 +#define PHYTIUM_DCREQ_PLAN(pipe) (PHYTIUM_DCREQ_BASE(pipe) + 0x94) + #define DCREQ_PLAN_A 0x0 + #define DCREQ_PLAN_B 0X5 +/******************************dcreq register end**************************************/ + +/******************************address transform register start**************************/ +#define PHYTIUM_DC_ADDRESS_TRANSFORM_SRC_ADDR (PHYTIUM_ADDRESS_TRANSFORM_BASE + 0x24) + #define SRC_ADDR_OFFSET 22 + #define SRC_ADDR_MASK 0xffffffffff +#define PHYTIUM_DC_ADDRESS_TRANSFORM_SIZE (PHYTIUM_ADDRESS_TRANSFORM_BASE + 0x28) + #define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) + #define SIZE_OFFSET 22 +#define PHYTIUM_DC_ADDRESS_TRANSFORM_DST_ADDR (PHYTIUM_ADDRESS_TRANSFORM_BASE + 0x2c) + #define DST_ADDR_OFFSET 22 +#define PHYTIUM_DC_DP_RESET_STATUS (PHYTIUM_ADDRESS_TRANSFORM_BASE + 0x48) + #define DC_DP_RESET_STATUS(pipe) (1 << pipe) +/******************************address transform register end**************************/ /******************************dp register start******************************************/ -#define PHYTIUM_DP_LINK_BW_SET 0x0000 -#define PHYTIUM_DP_LANE_COUNT_SET 0x0004 -#define PHYTIUM_DP_ENHANCED_FRAME_EN 0x0008 +#define PHYTIUM_DP_LINK_BW_SET(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0000) +#define PHYTIUM_DP_LANE_COUNT_SET(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0004) +#define PHYTIUM_DP_ENHANCED_FRAME_EN(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0008) #define ENHANCED_FRAME_ENABLE 0x1 #define ENHANCED_FRAME_DISABLE 0x0 -#define PHYTIUM_DP_TRAINING_PATTERN_SET 0x000c +#define PHYTIUM_DP_TRAINING_PATTERN_SET(pipe) (PHYTIUM_DP_BASE(pipe) + 0x000c) #define TRAINING_OFF 0x0 #define TRAINING_PATTERN_1 0x1 #define TRAINING_PATTERN_2 0x2 #define TRAINING_PATTERN_3 0x3 #define TRAINING_PATTERN_4 0x4 -#define PHYTIUM_DP_LINK_QUAL_PATTERN_SET 0x0010 +#define PHYTIUM_DP_LINK_QUAL_PATTERN_SET(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0010) #define TEST_PATTERN_NONE 0x0 #define TEST_PATTERN_D10_2 0x1 #define TEST_PATTERN_SYMBOL_ERROR 0x2 @@ -195,57 +259,59 @@ #define TEST_PATTERN_CP2520_2 0x6 #define TEST_PATTERN_CP2520_3 0x7 #define TEST_PATTERN_LANE_SHIFT 8 -#define PHYTIUM_DP_SCRAMBLING_DISABLE 0x0014 + +#define PHYTIUM_DP_SCRAMBLING_DISABLE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0014) #define SCRAMBLING_ENABLE 0x0 #define SCRAMBLING_DISABLE 0x1 -#define PHYTIUM_DP_DOWNSPREAD_CTRL 0x0018 -#define PHYTIUM_DP_ALT_SCRAMBLER_RESET 0x001c -#define PHYTIUM_DP_HBR2_SCRAMBLER_RESET 0x0020 -#define PHYTIUM_DP_DISPLAYPORT_VERSION 0x0024 -#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0 0x0030 -#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1 0x0034 -#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2 0x0038 -#define PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE 0x0080 +#define PHYTIUM_DP_DOWNSPREAD_CTRL(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0018) +#define PHYTIUM_DP_ALT_SCRAMBLER_RESET(pipe) (PHYTIUM_DP_BASE(pipe) + 0x001c) +#define PHYTIUM_DP_HBR2_SCRAMBLER_RESET(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0020) +#define PHYTIUM_DP_DISPLAYPORT_VERSION(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0024) +#define PHYTIUM_DP_LANE_REMAP (PHYTIUM_DP_BASE(pipe) + 0x002C) +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0030) +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0034) +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0038) +#define PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0080) #define TRANSMITTER_OUTPUT_ENABLE BIT(0) #define TRANSMITTER_OUTPUT_DISABLE 0 -#define PHYTIUM_DP_VIDEO_STREAM_ENABLE 0x0084 +#define PHYTIUM_DP_VIDEO_STREAM_ENABLE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0084) #define SST_MST_SOURCE_0_ENABLE BIT(0) #define SST_MST_SOURCE_0_ENABLE_MASK 0x1 #define SST_MST_SOURCE_0_DISABLE 0 -#define PHYTIUM_DP_SECONDARY_STREAM_ENABLE 0x0088 +#define PHYTIUM_DP_SECONDARY_STREAM_ENABLE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0088) #define SECONDARY_STREAM_ENABLE 0x1 #define SECONDARY_STREAM_DISABLE 0x0 -#define PHYTIUM_DP_SEC_DATA_WINDOW 0x008C -#define PHYTIUM_DP_SOFT_RESET 0x0090 +#define PHYTIUM_DP_SEC_DATA_WINDOW(pipe) (PHYTIUM_DP_BASE(pipe) + 0x008C) +#define PHYTIUM_DP_SOFT_RESET(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0090) #define LINK_SOFT_RESET (0x1 << 0) #define VIDEO_SOFT_RESET (0x1 << 1) -#define PHYTIUM_INPUT_SOURCE_ENABLE 0x0094 +#define PHYTIUM_INPUT_SOURCE_ENABLE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0094) #define VIRTUAL_SOURCE_0_ENABLE BIT(0) #define VIRTUAL_SOURCE_0_ENABLE_MASK 0x1 -#define PHYTIUM_DP_FORCE_SCRAMBLER_RESET 0x00C0 +#define PHYTIUM_DP_FORCE_SCRAMBLER_RESET(pipe) (PHYTIUM_DP_BASE(pipe) + 0x00C0) #define SCRAMBLER_RESET BIT(0) -#define PHYTIUM_DP_SOURCE_CONTROL_STATUS 0x00C4 -#define PHYTIUM_DP_DATA_CONTROL 0x00C8 -#define PHYTIUM_DP_CORE_CAPABILITY 0x00F8 -#define PHYTIUM_DP_CORE_ID 0x00FC -#define PHYTIUM_DP_AUX_COMMAND 0x0100 +#define PHYTIUM_DP_SOURCE_CONTROL_STATUS(pipe) (PHYTIUM_DP_BASE(pipe) + 0x00C4) +#define PHYTIUM_DP_DATA_CONTROL(pipe) (PHYTIUM_DP_BASE(pipe) + 0x00C8) +#define PHYTIUM_DP_CORE_CAPABILITY(pipe) (PHYTIUM_DP_BASE(pipe) + 0x00F8) +#define PHYTIUM_DP_CORE_ID(pipe) (PHYTIUM_DP_BASE(pipe) + 0x00FC) +#define PHYTIUM_DP_AUX_COMMAND(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0100) #define BYTE_COUNT_MASK 0xf #define COMMAND_SHIFT 8 #define COMMAND_MASK 0xf #define ADDRESS_ONLY (1<<12) -#define PHYTIUM_DP_AUX_WRITE_FIFO 0x0104 -#define PHYTIUM_DP_AUX_ADDRESS 0x0108 -#define PHYTIUM_DP_AUX_CLK_DIVIDER 0x010C +#define PHYTIUM_DP_AUX_WRITE_FIFO(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0104) +#define PHYTIUM_DP_AUX_ADDRESS(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0108) +#define PHYTIUM_DP_AUX_CLK_DIVIDER(pipe) (PHYTIUM_DP_BASE(pipe) + 0x010C) #define AUX_CLK_DIVIDER 48 -#define PHYTIUM_DP_SINK_HPD_STATE 0x0128 +#define PHYTIUM_DP_SINK_HPD_STATE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0128) #define HPD_CONNECT 0x1 #define HPD_DISCONNECT 0x0 -#define PHYTIUM_DP_INTERRUPT_RAW_STATUS 0x0130 +#define PHYTIUM_DP_INTERRUPT_RAW_STATUS(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0130) #define REPLY_TIMEOUT (1<<3) - #define DP_STATUS_REQUEST_IN_PROGRESS (1<<1) + #define PHYTIUM_DP_STATUS_REQUEST_IN_PROGRESS (1<<1) #define HPD_STATE (0<<1) -#define PHYTIUM_DP_AUX_REPLY_DATA 0x0134 -#define PHYTIUM_DP_AUX_REPLY_CODE 0x0138 +#define PHYTIUM_DP_AUX_REPLY_DATA(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0134) +#define PHYTIUM_DP_AUX_REPLY_CODE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0138) #define AUX_NATIVE_ACK (0x0<<0) #define AUX_NATIVE_NACK (0x1<<0) #define AUX_NATIVE_DEFER (0x2<<0) @@ -254,32 +320,32 @@ #define AUX_I2C_NACK (0x1<<2) #define AUX_I2C_DEFER (0x2<<2) #define AUX_I2C_MASK (0x3 << 2) -#define PHYTIUM_DP_INTERRUPT_STATUS 0x0140 - #define HPD_IRQ (1<<1) +#define PHYTIUM_DP_INTERRUPT_STATUS(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0140) + #define HPD_IRQ (1<<1) #define HPD_EVENT (1<<0) -#define PHYTIUM_DP_INTERRUPT_MASK 0x0144 +#define PHYTIUM_DP_INTERRUPT_MASK(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0144) #define HPD_IRQ_MASK (1<<1) #define HPD_EVENT_MASK (1<<0) #define HPD_OTHER_MASK 0x3c -#define PHYTIUM_DP_AUX_REPLY_DATA_COUNT 0x0148 -#define PHYTIUM_DP_AUX_STATUS 0x014C +#define PHYTIUM_DP_AUX_REPLY_DATA_COUNT(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0148) +#define PHYTIUM_DP_AUX_STATUS(pipe) (PHYTIUM_DP_BASE(pipe) + 0x014C) #define REPLY_RECEIVED 0x1 #define REPLY_IN_PROGRESS 0x2 #define REQUEST_IN_PROGRESS 0x4 #define REPLY_ERROR 0x8 -#define PHYTIUM_DP_AUX_TIMER 0x0158 -#define PHYTIUM_DP_MAIN_LINK_HTOTAL 0x0180 -#define PHYTIUM_DP_MAIN_LINK_VTOTAL 0x0184 -#define PHYTIUM_DP_MAIN_LINK_POLARITY 0x0188 +#define PHYTIUM_DP_AUX_TIMER(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0158) +#define PHYTIUM_DP_MAIN_LINK_HTOTAL(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0180) +#define PHYTIUM_DP_MAIN_LINK_VTOTAL(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0184) +#define PHYTIUM_DP_MAIN_LINK_POLARITY(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0188) #define VSYNC_POLARITY_LOW BIT(1) #define HSYNC_POLARITY_LOW BIT(0) -#define PHYTIUM_DP_MAIN_LINK_HSWIDTH 0x018C -#define PHYTIUM_DP_MAIN_LINK_VSWIDTH 0x0190 -#define PHYTIUM_DP_MAIN_LINK_HRES 0x0194 -#define PHYTIUM_DP_MAIN_LINK_VRES 0x0198 -#define PHYTIUM_DP_MAIN_LINK_HSTART 0x019C -#define PHYTIUM_DP_MAIN_LINK_VSTART 0x01A0 -#define PHYTIUM_DP_MAIN_LINK_MISC0 0x01A4 +#define PHYTIUM_DP_MAIN_LINK_HSWIDTH(pipe) (PHYTIUM_DP_BASE(pipe) + 0x018C) +#define PHYTIUM_DP_MAIN_LINK_VSWIDTH(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0190) +#define PHYTIUM_DP_MAIN_LINK_HRES(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0194) +#define PHYTIUM_DP_MAIN_LINK_VRES(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0198) +#define PHYTIUM_DP_MAIN_LINK_HSTART(pipe) (PHYTIUM_DP_BASE(pipe) + 0x019C) +#define PHYTIUM_DP_MAIN_LINK_VSTART(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01A0) +#define PHYTIUM_DP_MAIN_LINK_MISC0(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01A4) #define MISC0_SYNCHRONOUS_CLOCK BIT(0) #define MISC0_BIT_DEPTH_OFFSET 5 #define MISC0_BIT_DEPTH_6BIT 0x0 @@ -287,44 +353,47 @@ #define MISC0_BIT_DEPTH_10BIT 0x2 #define MISC0_COMPONENT_FORMAT_SHIFT 1 #define MISC0_COMPONENT_FORMAT_RGB 0x0 -#define PHYTIUM_DP_MAIN_LINK_MISC1 0x01A8 -#define PHYTIUM_DP_M_VID 0x01AC -#define PHYTIUM_DP_TRANSFER_UNIT_SIZE 0x01B0 -#define PHYTIUM_DP_N_VID 0x01B4 -#define PHYTIUM_DP_USER_PIXEL_WIDTH 0x01B8 -#define PHYTIUM_DP_DATA_COUNT 0x01BC -#define PHYTIUM_DP_INTERLACED 0x01C0 -#define PHYTIUM_DP_USER_SYNC_POLARITY 0x01C4 - #define USER_ODDEVEN_POLARITY_HIGH BIT(3) - #define USER_DATA_ENABLE_POLARITY_HIGH BIT(2) - #define USER_VSYNC_POLARITY_HIGH BIT(1) - #define USER_HSYNC_POLARITY_HIGH BIT(0) -#define PHYTIUM_DP_USER_CONTROL 0x01C8 -#define PHYTIUM_EDP_CRC_ENABLE 0x01D0 -#define PHYTIUM_EDP_CRC_RED 0x01D4 -#define PHYTIUM_EDP_CRC_GREEN 0x01D8 -#define PHYTIUM_EDP_CRC_BLUE 0x01DC -#define PHYTIUM_DP_SEC_AUDIO_ENABLE 0x0300 - #define SEC_AUDIO_ENABLE BIT(0) - #define CHANNEL_MUTE_ENABLE BIT(1) -#define PHYTIUM_DP_SEC_INPUT_SELECT 0x0304 + +#define PHYTIUM_DP_MAIN_LINK_MISC1(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01A8) +#define PHYTIUM_DP_M_VID(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01AC) +#define PHYTIUM_DP_TRANSFER_UNIT_SIZE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01B0) +#define PHYTIUM_DP_N_VID(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01B4) +#define PHYTIUM_DP_USER_PIXEL_WIDTH(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01B8) +#define PHYTIUM_DP_DATA_COUNT(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01BC) +#define PHYTIUM_DP_INTERLACED(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01C0) +#define PHYTIUM_DP_USER_SYNC_POLARITY(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01C4) +#define USER_ODDEVEN_POLARITY_HIGH BIT(3) +#define USER_DATA_ENABLE_POLARITY_HIGH BIT(2) +#define USER_VSYNC_POLARITY_HIGH BIT(1) +#define USER_HSYNC_POLARITY_HIGH BIT(0) +#define PHYTIUM_DP_USER_CONTROL(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01C8) +#define PHYTIUM_EDP_CRC_ENABLE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01D0) +#define PHYTIUM_EDP_CRC_RED(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01D4) +#define PHYTIUM_EDP_CRC_GREEN(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01D8) +#define PHYTIUM_EDP_CRC_BLUE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x01DC) + +#define PHYTIUM_DP_SEC_AUDIO_ENABLE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0300) + #define SEC_AUDIO_ENABLE 1 + #define SEC_AUDIO_DISABLE 0 +#define PHYTIUM_DP_SEC_INPUT_SELECT(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0304) #define INPUT_SELECT_I2S 0x0 -#define PHYTIUM_DP_SEC_CHANNEL_COUNT 0x0308 +#define PHYTIUM_DP_SEC_CHANNEL_COUNT(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0308) + #define CHANNEL_MUTE 0x0 #define CHANNEL_2 0x2 #define CHANNEL_2_LFE 0x3 #define CHANNEL_5_1 0x6 #define CHANNEL_7_1 0x7 #define CHANNEL_MASK 0xf -#define PHYTIUM_DP_SEC_DIRECT_CLKDIV 0x030c +#define PHYTIUM_DP_SEC_DIRECT_CLKDIV(pipe) (PHYTIUM_DP_BASE(pipe) + 0x030c) #define APB_CLOCK 48000000 -#define PHYTIUM_DP_SEC_MAUD 0x0318 -#define PHYTIUM_DP_SEC_NAUD 0x031c -#define PHYTIUM_DP_SEC_CLOCK_MODE 0x0320 +#define PHYTIUM_DP_SEC_MAUD(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0318) +#define PHYTIUM_DP_SEC_NAUD(pipe) (PHYTIUM_DP_BASE(pipe) + 0x031c) +#define PHYTIUM_DP_SEC_CLOCK_MODE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0320) #define CLOCK_MODE_SYNC 0x1 -#define PHYTIUM_DP_SEC_CS_SOURCE_FORMAT 0x0340 +#define PHYTIUM_DP_SEC_CS_SOURCE_FORMAT(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0340) #define CS_SOURCE_FORMAT_DEFAULT 0x0 -#define PHYTIUM_DP_SEC_CS_CATEGORY_CODE 0x0344 -#define PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ 0x0348 +#define PHYTIUM_DP_SEC_CS_CATEGORY_CODE(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0344) +#define PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ(pipe) (PHYTIUM_DP_BASE(pipe) + 0x0348) #define ORIG_FREQ_32000 0xc #define ORIG_FREQ_44100 0xf #define ORIG_FREQ_48000 0xd @@ -340,7 +409,7 @@ #define WORD_LENGTH_24 0xd #define WORD_LENGTH_MASK 0xf #define WORD_LENGTH_SHIFT 4 -#define PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY 0x034c // not used +#define PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY(pipe) (PHYTIUM_DP_BASE(pipe) + 0x034c) // not used #define SAMPLING_FREQ_32000 0xc #define SAMPLING_FREQ_44100 0x0 #define SAMPLING_FREQ_48000 0x4 @@ -350,8 +419,260 @@ #define SAMPLING_FREQ_192000 0x7 #define SAMPLING_FREQ_MASK 0xf #define SAMPLING_FREQ_SHIFT 4 -#define PHYTIUM_DP_SEC_CHANNEL_MAP 0x035C + +#define PHYTIUM_DP_SEC_CHANNEL_MAP(pipe) (PHYTIUM_DP_BASE(pipe) + 0x035C) #define CHANNEL_MAP_DEFAULT 0x87654321 /******************************dp register end********************************************/ +/******************************phy register start******************************************/ +/* self define */ +#define PHYTIUM_PHY_ACCESS_ADDRESS(pipe) (PHYTIUM_PHY_BASE(pipe) + 0x0000) +#define PHYTIUM_PHY_WRITE_DATA(pipe) (PHYTIUM_PHY_BASE(pipe) + 0x0004) +#define PHYTIUM_PHY_READ_DATA(pipe) (PHYTIUM_PHY_BASE(pipe) + 0x0008) +#define PHYTIUM_PHY_ACCESS_CTRL(pipe) (PHYTIUM_PHY_BASE(pipe) + 0x000c) + #define ACCESS_WRITE (1<<0) + #define ACCESS_READ (1<<1) +#define PHYTIUM_PHY0_PIPE_RESET 0x40104 + #define RESET 0x0 + #define RESET_DEASSERT 0x1 +#define PHYTIUM_PHY1_PIPE_RESET 0x100100 + #define PHY1_PIPE_RESET 0x0 + #define PHY1_PIPE_RESET_DEASSERT 0x4 + +#define PHYTIUM_PHY1_EN_REFCLK 0x100070 + +#define PHYTIUM_PHY0_MODE 0x40088 + #define LANE_BIT (0x3) + #define LANE_BIT_SHIFT 0x2 +#define PHYTIUM_PHY1_SEL 0x100004 + #define PHY1_DP_LANE_BIT 0x1 + #define PHY1_DP_LANE_BIT_SHIFT 2 + +#define PHYTIUM_PHY0_LINK_CFG 0x40044 + #define LANE_MASTER 0x1 + #define LANE_MASTER_SHIFT 1 + +#define PHYTUIM_PHY0_PLL_EN 0x40010 + #define PLL_EN 0x1 + #define PLL_EN_SHIFT 1 +#define PHYTUIM_PHY0_PMA_WIDTH 0x40020 + #define BIT_20 0x5 + #define BIT_20_SHIFT 4 + +#define PHYTIUM_PHY0_PMA0_POWER 0x40014 +#define PHYTIUM_PHY0_PMA1_POWER 0x40018 + #define A0_ACTIVE 0x1 + #define A0_ACTIVE_SHIFT 8 + #define A3_POWERDOWN3 0x8 + #define A3_POWERDOWN3_SHIFT 8 + +#define PHYTIUM_PHY1_PMA_MISC 0x1000a0 + #define PHY1_PLL_EN 0x1 + #define PHY1_PLL_EN_MASK 1 + #define PHY1_PLL_EN_SHIFT 8 + #define PHY1_BIT_20 0x5 + #define PHY1_BIT_20_SHIFT 9 + #define PHY1_A0_ACTIVE 0x1 + #define PHY1_A0_ACTIVE_SHIFT 2 + #define PHY1_A0_ACTIVE_MASK 0x3f + #define PHY1_A3_POWERDOWN3 0x8 + #define PHY1_A3_POWERDOWN3_MASK 0x3f + #define PHY1_A3_POWERDOWN3_SHIFT 2 + +#define PHYTIUM_PHY0_LINK_RESET 0x40108 + #define LINK_RESET 0x1 + #define LINK_RESET_MASK 0x1 + #define LINTK_RESET_SHIFT 0x1 + +#define PHYTIUM_PHY0_APB_RESET 0x40100 + #define APB_RESET 0x1 +#define PHYTIUM_PHY1_APB_RESET 0x100104 + #define PHY1_APB_RESET 0x4 + +/* phy origin register */ +#define PHYTIUM_PHY0_PLL_CFG 0x30038 +#define PHYTIUM_PHY1_PLL_CFG 0xb0038 + #define SINGLE_LINK 0x0 + #define DOUBLE_LINK 0x2 + +#define PHYTIUM_PHY0_PMA_CONTROL 0x3800c +#define PHYTIUM_PHY1_PMA_CONTROL 0xb800c + #define CONTROL_ENABLE 0x1 + #define CONTROL_ENABLE_MASK 0x1 + #define CONTROL_ENABLE_SHIFT 0x1 + +#define PHYTIUM_PHY0_PMA_CONTROL2 0x38004 +#define PHYTIUM_PHY1_PMA_CONTROL2 0xb8004 + +#define PHYTIUM_PHY0_PLL0_CLK_SEL 0X684 +#define PHYTIUM_PHY0_PLL1_CLK_SEL 0x704 +#define PHYTIUM_PHY1_PLL_CLK_SEL 0X80684 + #define PLL_LINK_RATE_162000 0xf01 + #define PLL_LINK_RATE_270000 0x701 + #define PLL_LINK_RATE_540000 0x301 + #define PLL_LINK_RATE_810000 0x200 + +#define PHYTIUM_PHY0_HSCLK0_SEL 0x18398 +#define PHYTIUM_PHY0_HSCLK1_SEL 0x1a398 +#define PHYTIUM_PHY1_HSCLK_SEL 0x90398 + #define HSCLK_LINK_0 0x0 + #define HSCLK_LINK_1 0x1 + +#define PHYTIUM_PHY0_HSCLK0_DIV 0x1839c +#define PHYTIUM_PHY0_HSCLK1_DIV 0x1a39c +#define PHYTIUM_PHY1_HSCLK_DIV 0x9039c + #define HSCLK_LINK_RATE_162000 0x2 + #define HSCLK_LINK_RATE_270000 0x1 + #define HSCLK_LINK_RATE_540000 0x0 + #define HSCLK_LINK_RATE_810000 0x0 + +#define PHYTIUM_PHY0_PLLDRC0_CTRL 0x18394 +#define PHYTIUM_PHY0_PLLDRC1_CTRL 0x1a394 +#define PHYTIUM_PHY1_PLLDRC_CTRL 0x90394 + #define PLLDRC_LINK0 0x1 + #define PLLDRC_LINK1 0x9 + +#define PHYTIUM_PHY0_PLL0_DSM_M0 0x250 +#define PHYTIUM_PHY1_PLL0_DSM_M0 0x80250 + #define PLL0_DSM_M0 0x4 +#define PHYTIUM_PHY0_PLL0_VCOCAL_START 0x218 +#define PHYTIUM_PHY1_PLL0_VCOCAL_START 0x80218 + #define PLL0_VCOCAL_START 0xc5e +#define PHYTIUM_PHY0_PLL0_VCOCAL_CTRL 0x208 +#define PHYTIUM_PHY1_PLL0_VCOCAL_CTRL 0x80208 + #define PLL0_VCOCAL_CTRL 0x3 + +#define PHYTIUM_PHY0_PLL1_DSM_M0 0x350 + #define PLL1_DSM_M0 0x4 +#define PHYTIUM_PHY0_PLL1_VCOCAL_START 0x318 + #define PLL1_VCOCAL_START 0xc5e +#define PHYTIUM_PHY0_PLL1_VCOCAL_CTRL 0x308 + #define PLL1_VCOCAL_CTRL 0x3 + +#define PHYTIUM_PHY0_PLL0_CP_PADJ 0x690 +#define PHYTIUM_PHY0_PLL0_CP_IADJ 0x694 +#define PHYTIUM_PHY0_PLL0_CP_FILT_PADJ 0x698 +#define PHYTIUM_PHY0_PLL0_INTDIV 0x240 +#define PHYTIUM_PHY0_PLL0_FRACDIVL 0x244 +#define PHYTIUM_PHY0_PLL0_FRACDIVH 0x248 +#define PHYTIUM_PHY0_PLL0_HIGH_THR 0x24c +#define PHYTIUM_PHY0_PLL0_PDIAG_CTRL 0x680 +#define PHYTIUM_PHY0_PLL0_VCOCAL_PLLCNT_START 0x220 +#define PHYTIUM_PHY0_PLL0_LOCK_PEFCNT 0x270 +#define PHYTIUM_PHY0_PLL0_LOCK_PLLCNT_START 0x278 +#define PHYTIUM_PHY0_PLL0_LOCK_PLLCNT_THR 0x27c + +#define PHYTIUM_PHY0_PLL1_CP_PADJ 0x710 +#define PHYTIUM_PHY0_PLL1_CP_IADJ 0x714 +#define PHYTIUM_PHY0_PLL1_CP_FILT_PADJ 0x718 +#define PHYTIUM_PHY0_PLL1_INTDIV 0x340 +#define PHYTIUM_PHY0_PLL1_FRACDIVL 0x344 +#define PHYTIUM_PHY0_PLL1_FRACDIVH 0x348 +#define PHYTIUM_PHY0_PLL1_HIGH_THR 0x34c +#define PHYTIUM_PHY0_PLL1_PDIAG_CTRL 0x700 +#define PHYTIUM_PHY0_PLL1_VCOCAL_PLLCNT_START 0x320 +#define PHYTIUM_PHY0_PLL1_LOCK_PEFCNT 0x370 +#define PHYTIUM_PHY0_PLL1_LOCK_PLLCNT_START 0x378 +#define PHYTIUM_PHY0_PLL1_LOCK_PLLCNT_THR 0x37c + +#define PHYTIUM_PHY1_PLL0_CP_PADJ 0x80690 +#define PHYTIUM_PHY1_PLL0_CP_IADJ 0x80694 +#define PHYTIUM_PHY1_PLL0_CP_FILT_PADJ 0x80698 +#define PHYTIUM_PHY1_PLL0_INTDIV 0x80240 +#define PHYTIUM_PHY1_PLL0_FRACDIVL 0x80244 +#define PHYTIUM_PHY1_PLL0_FRACDIVH 0x80248 +#define PHYTIUM_PHY1_PLL0_HIGH_THR 0x8024c +#define PHYTIUM_PHY1_PLL0_PDIAG_CTRL 0x80680 +#define PHYTIUM_PHY1_PLL0_VCOCAL_PLLCNT_START 0x80220 +#define PHYTIUM_PHY1_PLL0_LOCK_PEFCNT 0x80270 +#define PHYTIUM_PHY1_PLL0_LOCK_PLLCNT_START 0x80278 +#define PHYTIUM_PHY1_PLL0_LOCK_PLLCNT_THR 0x8027c + +#define PHYTIUM_PHY0_PLL0_TX_PSC_A0 0x18400 +#define PHYTIUM_PHY1_PLL0_TX_PSC_A0 0x90400 + #define PLL0_TX_PSC_A0 0xfb +#define PHYTIUM_PHY0_PLL0_TX_PSC_A2 0x18408 +#define PHYTIUM_PHY1_PLL0_TX_PSC_A2 0x90408 + #define PLL0_TX_PSC_A2 0x4aa +#define PHYTIUM_PHY0_PLL0_TX_PSC_A3 0x1840c +#define PHYTIUM_PHY1_PLL0_TX_PSC_A3 0x9040c + #define PLL0_TX_PSC_A3 0x4aa +#define PHYTIUM_PHY0_PLL0_RX_PSC_A0 0x28000 +#define PHYTIUM_PHY1_PLL0_RX_PSC_A0 0xa0000 + #define PLL0_RX_PSC_A0 0x0 +#define PHYTIUM_PHY0_PLL0_RX_PSC_A2 0x28008 +#define PHYTIUM_PHY1_PLL0_RX_PSC_A2 0xa0008 + #define PLL0_RX_PSC_A2 0x0 +#define PHYTIUM_PHY0_PLL0_RX_PSC_A3 0x2800C +#define PHYTIUM_PHY1_PLL0_RX_PSC_A3 0xa000C + #define PLL0_RX_PSC_A3 0x0 +#define PHYTIUM_PHY0_PLL0_RX_PSC_CAL 0x28018 +#define PHYTIUM_PHY1_PLL0_RX_PSC_CAL 0xa0018 + #define PLL0_RX_PSC_CAL 0x0 + +#define PHYTIUM_PHY0_PLL1_TX_PSC_A0 0x1a400 + #define PLL1_TX_PSC_A0 0xfb +#define PHYTIUM_PHY0_PLL1_TX_PSC_A2 0x1a408 + #define PLL1_TX_PSC_A2 0x4aa +#define PHYTIUM_PHY0_PLL1_TX_PSC_A3 0x1a40c + #define PLL1_TX_PSC_A3 0x4aa +#define PHYTIUM_PHY0_PLL1_RX_PSC_A0 0x2a000 + #define PLL1_RX_PSC_A0 0x0 +#define PHYTIUM_PHY0_PLL1_RX_PSC_A2 0x2a008 + #define PLL1_RX_PSC_A2 0x0 +#define PHYTIUM_PHY0_PLL1_RX_PSC_A3 0x2a00C + #define PLL1_RX_PSC_A3 0x0 +#define PHYTIUM_PHY0_PLL1_RX_PSC_CAL 0x2a018 + #define PLL1_RX_PSC_CAL 0x0 + +#define PHYTIUM_PHY0_PLL0_XCVR_CTRL 0x183a8 +#define PHYTIUM_PHY1_PLL0_XCVR_CTRL 0x903a8 + #define PLL0_XCVR_CTRL 0xf +#define PHYTIUM_PHY0_PLL1_XCVR_CTRL 0x1a3a8 + #define PLL1_XCVR_CTRL 0xf + +#define PHYTIUM_PHY0_PLL0_RX_GCSM1_CTRL 0x28420 +#define PHYTIUM_PHY1_PLL0_RX_GCSM1_CTRL 0xa0420 + #define PLL0_RX_GCSM1_CTRL 0x0 +#define PHYTIUM_PHY0_PLL0_RX_GCSM2_CTRL 0x28440 +#define PHYTIUM_PHY1_PLL0_RX_GCSM2_CTRL 0xa0440 + #define PLL0_RX_GCSM2_CTRL 0x0 +#define PHYTIUM_PHY0_PLL0_RX_PERGCSM_CTRL 0x28460 +#define PHYTIUM_PHY1_PLL0_RX_PERGCSM_CTRL 0xa0460 + #define PLL0_RX_PERGCSM_CTRL 0x0 + +#define PHYTIUM_PHY0_PLL1_RX_GCSM1_CTRL 0x2a420 + #define PLL1_RX_GCSM1_CTRL 0x0 +#define PHYTIUM_PHY0_PLL1_RX_GCSM2_CTRL 0x2a440 + #define PLL1_RX_GCSM2_CTRL 0x0 +#define PHYTIUM_PHY0_PLL1_RX_PERGCSM_CTRL 0x2a460 + #define PLL1_RX_PERGCSM_CTRL 0x0 + +/* swing and emphasis */ +#define PHYTIUM_PHY0_PLL0_TX_DIAG_ACYA 0x1879c +#define PHYTIUM_PHY0_PLL1_TX_DIAG_ACYA 0x1a79c +#define PHYTIUM_PHY1_PLL0_TX_DIAG_ACYA 0x9079c + #define LOCK 1 + #define UNLOCK 0 + +#define PHYTIUM_PHY0_PLL0_TX_TXCC_CTRL 0x18100 +#define PHYTIUM_PHY0_PLL1_TX_TXCC_CTRL 0x1a100 +#define PHYTIUM_PHY1_PLL0_TX_TXCC_CTRL 0x90100 + #define TX_TXCC_CTRL 0x8a4 + +#define PHYTIUM_PHY0_PLL0_TX_DRV 0x18318 +#define PHYTIUM_PHY0_PLL1_TX_DRV 0x1a318 +#define PHYTIUM_PHY1_PLL0_TX_DRV 0x90318 + #define TX_DRV 0x3 + +#define PHYTIUM_PHY0_PLL0_TX_MGNFS 0x18140 +#define PHYTIUM_PHY0_PLL1_TX_MGNFS 0x1a140 +#define PHYTIUM_PHY1_PLL0_TX_MGNFS 0x90140 + +#define PHYTIUM_PHY0_PLL0_TX_CPOST 0x18130 +#define PHYTIUM_PHY0_PLL1_TX_CPOST 0x1a130 +#define PHYTIUM_PHY0_PLL1_TX_CPOST1 0x1a13c +#define PHYTIUM_PHY1_PLL0_TX_CPOST 0x90130 + +/******************************phy register end********************************************/ #endif /* __PHYTIUM_REG_H__ */ diff --git a/drivers/gpu/drm/phytium/x100_dc.c b/drivers/gpu/drm/phytium/x100_dc.c old mode 100644 new mode 100755 index 06394c232dab..beb040198210 --- a/drivers/gpu/drm/phytium/x100_dc.c +++ b/drivers/gpu/drm/phytium/x100_dc.c @@ -1,9 +1,16 @@ // SPDX-License-Identifier: GPL-2.0 -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ - #include #include #include @@ -61,77 +68,479 @@ static const unsigned int x100_cursor_formats[] = { DRM_FORMAT_ARGB8888, }; +#define MAXKERNELSIZE 9 +#define SUBPIXELINDEXBITS 5 +#define SUBPIXELCOUNT (1 << SUBPIXELINDEXBITS) +#define SUBPIXELLOADCOUNT (SUBPIXELCOUNT / 2 + 1) +#define WEIGHTSTATECOUNT (((SUBPIXELLOADCOUNT * MAXKERNELSIZE + 1) & ~1) / 2) +#define KERNELTABLESIZE (SUBPIXELLOADCOUNT * MAXKERNELSIZE * sizeof(uint16_t)) +#define PHYALIGN(n, align) (((n) + ((align) - 1)) & ~((align) - 1)) +#define KERNELSTATES (PHYALIGN(KERNELTABLESIZE + 4, 8)) +#define PHYPI 3.14159265358979323846f + +#define X100_MATH_Add(X, Y) (float)((X) + (Y)) +#define X100_MATH_Multiply(X, Y) (float)((X) * (Y)) +#define X100_MATH_Divide(X, Y) (float)((X) / (Y)) +#define X100_MATH_DivideFromUInteger(X, Y) ((float)(X) / (float)(Y)) +#define X100_MATH_I2Float(X) (float)(X) + +struct filter_blit_array { + uint8_t kernelSize; + uint32_t scaleFactor; + uint32_t *kernelStates; +}; + +static uint32_t x100_dc_scaling_get_factor(uint32_t src_size, uint32_t dst_size) +{ + uint32_t factor = 0; + + factor = ((src_size - 1) << SCALE_FACTOR_SRC_OFFSET) / (dst_size - 1); + + return factor; +} + +static float x100_dc_sint(float x) +{ + const float B = 1.2732395477; + const float C = -0.4052847346; + const float P = 0.2310792853; + float y; + + if (x < 0) + y = B*x - C*x*x; + else + y = B*x + C*x*x; + if (y < 0) + y = P * (y * (0 - y) - y) + y; + else + y = P * (y * y - y) + y; + return y; +} + +static float x100_dc_sinc_filter(float x, int radius) +{ + float pit, pitd, f1, f2, result; + float f_radius = X100_MATH_I2Float(radius); + + if (x == 0.0f) { + result = 1.0f; + } else if ((x < -f_radius) || (x > f_radius)) { + result = 0.0f; + } else { + pit = X100_MATH_Multiply(PHYPI, x); + pitd = X100_MATH_Divide(pit, f_radius); + f1 = X100_MATH_Divide(x100_dc_sint(pit), pit); + f2 = X100_MATH_Divide(x100_dc_sint(pitd), pitd); + result = X100_MATH_Multiply(f1, f2); + } + + return result; +} + +static int x100_dc_calculate_sync_table( + uint8_t kernel_size, + uint32_t src_size, + uint32_t dst_size, + struct filter_blit_array *kernel_info) +{ + uint32_t scale_factor; + float f_scale; + int kernel_half; + float f_subpixel_step; + float f_subpixel_offset; + uint32_t subpixel_pos; + int kernel_pos; + int padding; + uint16_t *kernel_array; + int range = 0; + + do { + /* Compute the scale factor. */ + scale_factor = x100_dc_scaling_get_factor(src_size, dst_size); + + /* Same kernel size and ratio as before? */ + if ((kernel_info->kernelSize == kernel_size) && + (kernel_info->scaleFactor == kernel_size)) { + break; + } + + /* check the array */ + if (kernel_info->kernelStates == NULL) + break; + + /* Store new parameters. */ + kernel_info->kernelSize = kernel_size; + kernel_info->scaleFactor = scale_factor; + + /* Compute the scale factor. */ + f_scale = X100_MATH_DivideFromUInteger(dst_size, src_size); + + /* Adjust the factor for magnification. */ + if (f_scale > 1.0f) + f_scale = 1.0f; + + /* Calculate the kernel half. */ + kernel_half = (int) (kernel_info->kernelSize >> 1); + + /* Calculate the subpixel step. */ + f_subpixel_step = X100_MATH_Divide(1.0f, X100_MATH_I2Float(SUBPIXELCOUNT)); + + /* Init the subpixel offset. */ + f_subpixel_offset = 0.5f; + + /* Determine kernel padding size. */ + padding = (MAXKERNELSIZE - kernel_info->kernelSize) / 2; + + /* Set initial kernel array pointer. */ + kernel_array = (uint16_t *) (kernel_info->kernelStates + 1); + + /* Loop through each subpixel. */ + for (subpixel_pos = 0; subpixel_pos < SUBPIXELLOADCOUNT; subpixel_pos++) { + /* Define a temporary set of weights. */ + float fSubpixelSet[MAXKERNELSIZE]; + + /* Init the sum of all weights for the current subpixel. */ + float fWeightSum = 0.0f; + uint16_t weightSum = 0; + short int adjustCount, adjustFrom; + short int adjustment; + + /* Compute weights. */ + for (kernel_pos = 0; kernel_pos < MAXKERNELSIZE; kernel_pos++) { + /* Determine the current index. */ + int index = kernel_pos - padding; + + /* Pad with zeros. */ + if ((index < 0) || (index >= kernel_info->kernelSize)) { + fSubpixelSet[kernel_pos] = 0.0f; + } else { + if (kernel_info->kernelSize == 1) { + fSubpixelSet[kernel_pos] = 1.0f; + } else { + /* Compute the x position for filter function. */ + float fX = X100_MATH_Add( + X100_MATH_I2Float(index - kernel_half), + f_subpixel_offset); + fX = X100_MATH_Multiply(fX, f_scale); + + /* Compute the weight. */ + fSubpixelSet[kernel_pos] = x100_dc_sinc_filter(fX, + kernel_half); + } + + /* Update the sum of weights. */ + fWeightSum = X100_MATH_Add(fWeightSum, + fSubpixelSet[kernel_pos]); + } + } + + /* Adjust weights so that the sum will be 1.0. */ + for (kernel_pos = 0; kernel_pos < MAXKERNELSIZE; kernel_pos++) { + /* Normalize the current weight. */ + float fWeight = X100_MATH_Divide(fSubpixelSet[kernel_pos], + fWeightSum); + + /* Convert the weight to fixed point and store in the table. */ + if (fWeight == 0.0f) + kernel_array[kernel_pos] = 0x0000; + else if (fWeight >= 1.0f) + kernel_array[kernel_pos] = 0x4000; + else if (fWeight <= -1.0f) + kernel_array[kernel_pos] = 0xC000; + else + kernel_array[kernel_pos] = + (int16_t) X100_MATH_Multiply(fWeight, 16384.0f); + weightSum += kernel_array[kernel_pos]; + } + + /* Adjust the fixed point coefficients. */ + adjustCount = 0x4000 - weightSum; + if (adjustCount < 0) { + adjustCount = -adjustCount; + adjustment = -1; + } else { + adjustment = 1; + } + + adjustFrom = (MAXKERNELSIZE - adjustCount) / 2; + for (kernel_pos = 0; kernel_pos < adjustCount; kernel_pos++) { + range = (MAXKERNELSIZE*subpixel_pos + adjustFrom + kernel_pos) * + sizeof(uint16_t); + if ((range >= 0) && (range < KERNELTABLESIZE)) + kernel_array[adjustFrom + kernel_pos] += adjustment; + else + DRM_ERROR("%s failed\n", __func__); + } + + kernel_array += MAXKERNELSIZE; + + /* Advance to the next subpixel. */ + f_subpixel_offset = X100_MATH_Add(f_subpixel_offset, -f_subpixel_step); + } + } while (0); + + return 0; +} + +static void x100_dc_scaling_config(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t scale_factor_x, scale_factor_y, i; + uint32_t kernelStates[128]; + struct filter_blit_array kernel_info_width; + void *tmp = NULL; + + if (mode->hdisplay != mode->crtc_hdisplay || mode->vdisplay != mode->crtc_vdisplay) { + phytium_crtc->src_width = mode->hdisplay; + phytium_crtc->src_height = mode->vdisplay; + phytium_crtc->dst_width = mode->crtc_hdisplay; + phytium_crtc->dst_height = mode->crtc_vdisplay; + + phytium_crtc->dst_x = (mode->crtc_hdisplay - phytium_crtc->dst_width) / 2; + phytium_crtc->dst_y = (mode->crtc_vdisplay - phytium_crtc->dst_height) / 2; + + scale_factor_x = x100_dc_scaling_get_factor(phytium_crtc->src_width, + phytium_crtc->dst_width); + scale_factor_y = x100_dc_scaling_get_factor(phytium_crtc->src_height, + phytium_crtc->dst_height); + if (scale_factor_y > (SCALE_FACTOR_Y_MAX << SCALE_FACTOR_SRC_OFFSET)) + scale_factor_y = (SCALE_FACTOR_Y_MAX << SCALE_FACTOR_SRC_OFFSET); + + phytium_writel_reg(priv, scale_factor_x & SCALE_FACTOR_X_MASK, + X100_DC_FRAMEBUFFER_SCALE_FACTOR_X(phys_pipe)); + phytium_writel_reg(priv, scale_factor_y & SCALE_FACTOR_Y_MASK, + X100_DC_FRAMEBUFFER_SCALE_FACTOR_Y(phys_pipe)); + phytium_writel_reg(priv, FRAMEBUFFER_TAP, + X100_DC_FRAMEBUFFER_SCALECONFIG(phys_pipe)); + + tmp = kmalloc(KERNELSTATES, GFP_KERNEL); + if (!tmp) { + DRM_ERROR("malloc %ld failed\n", KERNELSTATES); + return; + } + + memset(&kernel_info_width, 0, sizeof(struct filter_blit_array)); + kernel_info_width.kernelStates = tmp; + memset(kernel_info_width.kernelStates, 0, KERNELSTATES); + kernel_neon_begin(); + x100_dc_calculate_sync_table(FRAMEBUFFER_HORIZONTAL_FILTER_TAP, + phytium_crtc->src_width, + phytium_crtc->dst_width, + &kernel_info_width); + memset(kernelStates, 0, sizeof(kernelStates)); + memcpy(kernelStates, kernel_info_width.kernelStates + 1, KERNELSTATES - 4); + kernel_neon_end(); + phytium_writel_reg(priv, HORI_FILTER_INDEX, + X100_DC_FRAMEBUFFER_HORI_FILTER_INDEX(phys_pipe)); + for (i = 0; i < 128; i++) { + phytium_writel_reg(priv, kernelStates[i], + X100_DC_FRAMEBUFFER_HORI_FILTER(phys_pipe)); + } + + memset(&kernel_info_width, 0, sizeof(struct filter_blit_array)); + kernel_info_width.kernelStates = tmp; + memset(kernel_info_width.kernelStates, 0, KERNELSTATES); + kernel_neon_begin(); + x100_dc_calculate_sync_table(FRAMEBUFFER_FILTER_TAP, phytium_crtc->src_height, + phytium_crtc->dst_height, &kernel_info_width); + memset(kernelStates, 0, sizeof(kernelStates)); + memcpy(kernelStates, kernel_info_width.kernelStates + 1, KERNELSTATES - 4); + kernel_neon_end(); + phytium_writel_reg(priv, VERT_FILTER_INDEX, + X100_DC_FRAMEBUFFER_VERT_FILTER_INDEX(phys_pipe)); + for (i = 0; i < 128; i++) + phytium_writel_reg(priv, kernelStates[i], + X100_DC_FRAMEBUFFER_VERT_FILTER(phys_pipe)); + phytium_writel_reg(priv, INITIALOFFSET, + X100_DC_FRAMEBUFFER_INITIALOFFSET(phys_pipe)); + kfree(tmp); + phytium_crtc->scale_enable = true; + } else { + phytium_crtc->scale_enable = false; + } +} + +static int x100_dc_wait_se_timeout(struct phytium_display_private *priv, int port) +{ + int timeout = 500, config = 0, ret = 0; + + do { + mdelay(1); + timeout--; + config = phytium_readl_reg(priv, X100_DCREQ_PIX_CLOCK_CONFIG(port)); + } while ((!(config & FLAG_REPLY)) && timeout); + + if (timeout == 0) { + phytium_writel_reg(priv, config & (~FLAG_REQUEST), + X100_DCREQ_PIX_CLOCK_CONFIG(port)); + ret = -1; + } else { + timeout = 500; + phytium_writel_reg(priv, config & (~FLAG_REQUEST), + X100_DCREQ_PIX_CLOCK_CONFIG(port)); + do { + mdelay(1); + timeout--; + config = phytium_readl_reg(priv, X100_DCREQ_PIX_CLOCK_CONFIG(port)); + } while ((config & FLAG_REPLY) && timeout); + if (timeout == 0) + ret = -2; + } + mdelay(5); + + return ret; +} + void x100_dc_hw_vram_init(struct phytium_display_private *priv, resource_size_t vram_addr, resource_size_t vram_size) { uint32_t config; - uint32_t group_offset = priv->address_transform_base; - - config = phytium_readl_reg(priv, group_offset, - X100_GPU_ADDRESS_TRANSFORM_SRC_ADDR); - if (config) - phytium_writel_reg(priv, config, group_offset, - X100_GPU_ADDRESS_TRANSFORM_SRC_ADDR); - - config = phytium_readl_reg(priv, group_offset, - X100_GPU_ADDRESS_TRANSFORM_SIZE); - if (config) - phytium_writel_reg(priv, config, group_offset, - X100_GPU_ADDRESS_TRANSFORM_SIZE); - - config = phytium_readl_reg(priv, group_offset, - X100_GPU_ADDRESS_TRANSFORM_DST_ADDR); - if (config) - phytium_writel_reg(priv, config, group_offset, - X100_GPU_ADDRESS_TRANSFORM_DST_ADDR); phytium_writel_reg(priv, (vram_addr & SRC_ADDR_MASK) >> SRC_ADDR_OFFSET, - group_offset, X100_DC_ADDRESS_TRANSFORM_SRC_ADDR); + X100_DC_ADDRESS_TRANSFORM_SRC_ADDR); phytium_writel_reg(priv, (vram_size >> SIZE_OFFSET) | ADDRESS_TRANSFORM_ENABLE, - group_offset, X100_DC_ADDRESS_TRANSFORM_SIZE); - config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_DST_ADDR); - phytium_writel_reg(priv, config, group_offset, X100_DC_ADDRESS_TRANSFORM_DST_ADDR); + X100_DC_ADDRESS_TRANSFORM_SIZE); + config = phytium_readl_reg(priv, X100_DC_ADDRESS_TRANSFORM_DST_ADDR); + phytium_writel_reg(priv, config, X100_DC_ADDRESS_TRANSFORM_DST_ADDR); +} + +void x100_dc_hw_enable_irq(struct phytium_display_private *priv, uint32_t phys_pipe) +{ + phytium_writel_reg(priv, INT_ENABLE, X100_DC_INT_ENABLE(phys_pipe)); +} + +void x100_dc_hw_disable_irq(struct phytium_display_private *priv, uint32_t phys_pipe) +{ + phytium_writel_reg(priv, INT_DISABLE, X100_DC_INT_ENABLE(phys_pipe)); +} + +bool x100_dc_hw_get_irq_status(struct phytium_display_private *priv, uint32_t phys_pipe) +{ + int ret = 0; + + ret = phytium_readl_reg(priv, X100_DC_INT_STATUS(phys_pipe)); + + return (ret & INT_STATUS) ? true : false; } void x100_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe) { - phytium_writel_reg(priv, MSI_CLEAR, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_MSI_CLEAR); + phytium_writel_reg(priv, MSI_CLEAR, X100_DCREQ_MSI_CLEAR(phys_pipe)); } -void x100_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock) +void x100_dc_hw_crtc_begin(struct phytium_display_private *priv, uint32_t phys_pipe) +{ + int config = 0; + + config = phytium_readl_reg(priv, X100_DC_FRAMEBUFFER_CONFIG(phys_pipe)); + if (config & FRAMEBUFFER_RESET) { + phytium_writel_reg(priv, config | FRAMEBUFFER_VALID_PENDING, + X100_DC_FRAMEBUFFER_CONFIG(phys_pipe)); + } +} + +void x100_dc_hw_crtc_flush(struct phytium_display_private *priv, uint32_t phys_pipe) +{ + int config = 0; + + config = phytium_readl_reg(priv, X100_DC_FRAMEBUFFER_CONFIG(phys_pipe)); + phytium_writel_reg(priv, config&(~FRAMEBUFFER_VALID_PENDING), + X100_DC_FRAMEBUFFER_CONFIG(phys_pipe)); +} + +void x100_dc_hw_crtc_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct drm_device *dev = crtc->dev; struct phytium_display_private *priv = dev->dev_private; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_atomic_state *state = old_state->state; + struct drm_connector_state *new_conn_state; + struct drm_connector *conn; struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); int phys_pipe = phytium_crtc->phys_pipe; - uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; - int ret = 0; + int config = 0, i = 0, ret = 0; + + for_each_new_connector_in_state(state, conn, new_conn_state, i) { + if (new_conn_state->crtc != crtc) + continue; + + switch (conn->display_info.bpc) { + case 10: + phytium_crtc->bpc = DP_RGB101010; + break; + case 6: + phytium_crtc->bpc = DP_RGB666; + break; + default: + phytium_crtc->bpc = DP_RGB888; + break; + } + } /* config pix clock */ - phytium_writel_reg(priv, FLAG_REQUEST | CMD_PIXEL_CLOCK | (clock & PIXEL_CLOCK_MASK), - group_offset, X100_DCREQ_CMD_REGISTER); - ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, - FLAG_REQUEST, FLAG_REPLY); + phytium_writel_reg(priv, (mode->clock & PIX_CLOCK_MASK) | FLAG_REQUEST, + X100_DCREQ_PIX_CLOCK_CONFIG(phys_pipe)); + ret = x100_dc_wait_se_timeout(priv, phys_pipe); if (ret < 0) - DRM_ERROR("%s: failed to set pixel clock\n", __func__); + DRM_ERROR("%s wait se timeout(ret = %d)\n", __func__, ret); + + x100_dc_scaling_config(crtc, old_state); + config = ((mode->crtc_hdisplay & HDISPLAY_END_MASK) << HDISPLAY_END_SHIFT) + | ((mode->crtc_htotal&HDISPLAY_TOTAL_MASK) << HDISPLAY_TOTAL_SHIFT); + phytium_writel_reg(priv, config, X100_DC_HDISPLAY(phys_pipe)); + config = ((mode->crtc_hsync_start & HSYNC_START_MASK) << HSYNC_START_SHIFT) + | ((mode->crtc_hsync_end & HSYNC_END_MASK) << HSYNC_END_SHIFT) + | HSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : HSYNC_NEGATIVE; + phytium_writel_reg(priv, config, X100_DC_HSYNC(phys_pipe)); + config = ((mode->crtc_vdisplay & VDISPLAY_END_MASK) << VDISPLAY_END_SHIFT) + | ((mode->crtc_vtotal & VDISPLAY_TOTAL_MASK) << VDISPLAY_TOTAL_SHIFT); + phytium_writel_reg(priv, config, X100_DC_VDISPLAY(phys_pipe)); + config = ((mode->crtc_vsync_start & VSYNC_START_MASK) << VSYNC_START_SHIFT) + | ((mode->crtc_vsync_end & VSYNC_END_MASK) << VSYNC_END_SHIFT) + | VSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : VSYNC_NEGATIVE; + phytium_writel_reg(priv, config, X100_DC_VSYNC(phys_pipe)); + config = PANEL_DATAENABLE_ENABLE | PANEL_DATA_ENABLE | PANEL_CLOCK_ENABLE; + phytium_writel_reg(priv, config, X100_DC_PANEL_CONFIG(phys_pipe)); + config = phytium_crtc->bpc | OUTPUT_DP; + phytium_writel_reg(priv, config, X100_DC_DP_CONFIG(phys_pipe)); + + config = phytium_readl_reg(priv, X100_DC_FRAMEBUFFER_CONFIG(phys_pipe)); + + if (crtc->state->active) + config |= FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET; + else + config &= (~(FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET)); + + if (phytium_crtc->scale_enable) + config |= FRAMEBUFFER_SCALE_ENABLE; + else + config &= (~FRAMEBUFFER_SCALE_ENABLE); + + phytium_writel_reg(priv, config, + X100_DC_FRAMEBUFFER_CONFIG(phys_pipe)); } -void x100_dc_hw_disable(struct drm_crtc *crtc) +void x100_dc_hw_crtc_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct drm_device *dev = crtc->dev; struct phytium_display_private *priv = dev->dev_private; struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); int reset_timeout = 100; - int config = 0; + int config = 0, ret = 0; int phys_pipe = phytium_crtc->phys_pipe; - // reset dc - config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); - phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], - X100_DC_CLOCK_CONTROL); - phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); + config = phytium_readl_reg(priv, X100_DC_CLOCK_CONTROL(phys_pipe)); + phytium_writel_reg(priv, config | SOFT_RESET, X100_DC_CLOCK_CONTROL(phys_pipe)); + phytium_writel_reg(priv, 0, X100_DC_CLOCK_CONTROL(phys_pipe)); do { - config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_IDLE); + config = phytium_readl_reg(priv, X100_DC_CLOCK_IDLE(phys_pipe)); if (config | IS_IDLE) break; mdelay(1); @@ -139,16 +548,19 @@ void x100_dc_hw_disable(struct drm_crtc *crtc) } while (reset_timeout); /* reset pix clock */ - x100_dc_hw_config_pix_clock(crtc, 0); - // reset dc + phytium_writel_reg(priv, (0x0 & PIX_CLOCK_MASK) | FLAG_REQUEST, + X100_DCREQ_PIX_CLOCK_CONFIG(phys_pipe)); + ret = x100_dc_wait_se_timeout(priv, phys_pipe); + if (ret < 0) + DRM_ERROR("%s wait se timeout(ret = %d)\n", __func__, ret); + reset_timeout = 100; - config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); - phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], - X100_DC_CLOCK_CONTROL); - phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); + config = phytium_readl_reg(priv, X100_DC_CLOCK_CONTROL(phys_pipe)); + phytium_writel_reg(priv, config | SOFT_RESET, X100_DC_CLOCK_CONTROL(phys_pipe)); + phytium_writel_reg(priv, 0, X100_DC_CLOCK_CONTROL(phys_pipe)); do { - config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_IDLE); + config = phytium_readl_reg(priv, X100_DC_CLOCK_IDLE(phys_pipe)); if (config | IS_IDLE) break; mdelay(1); @@ -156,12 +568,12 @@ void x100_dc_hw_disable(struct drm_crtc *crtc) } while (reset_timeout); /* reset dcreq */ - phytium_writel_reg(priv, DCREQ_PLAN_A, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_PLAN); - phytium_writel_reg(priv, 0, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_CONTROL); - phytium_writel_reg(priv, DCREQ_RESET, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_RESET); + phytium_writel_reg(priv, DCREQ_PLAN_A, X100_DCREQ_PLAN(phys_pipe)); + phytium_writel_reg(priv, 0, X100_DCREQ_CONTROL(phys_pipe)); + phytium_writel_reg(priv, DCREQ_RESET, X100_DCREQ_RESET(phys_pipe)); msleep(20); phytium_writel_reg(priv, (~DCREQ_RESET)&DCREQ_RESET_MASK, - priv->dcreq_reg_base[phys_pipe], X100_DCREQ_RESET); + X100_DCREQ_RESET(phys_pipe)); } int x100_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count) @@ -252,18 +664,182 @@ void x100_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, *format_count = ARRAY_SIZE(x100_cursor_formats); } -void x100_dc_hw_update_dcreq(struct drm_plane *plane) +static void x100_dc_get_plane_parameter(struct drm_plane *plane) +{ + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + struct phytium_gem_object *phytium_gem_obj = NULL; + int i, num_planes = 0; + + num_planes = drm_format_num_planes(fb->format->format); + for (i = 0; i < num_planes; i++) { + phytium_gem_obj = phytium_fb->phytium_gem_obj[i]; + phytium_plane->iova[i] = phytium_gem_obj->iova + fb->offsets[i]; + phytium_plane->size[i] = phytium_gem_obj->size - fb->offsets[i]; + + if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC) + phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE0; + else if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC) + phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE3; + else if (fb->modifier == DRM_FORMAT_MOD_LINEAR) + phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; + else + phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; + + if (i == 0) { + switch (fb->format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB2101010; + break; + + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB8888; + break; + + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB8888; + break; + + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_BGRA4444: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB4444; + break; + + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_BGRX4444: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB4444; + break; + + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB1555; + break; + + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB1555; + break; + + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + phytium_plane->format = FRAMEBUFFER_FORMAT_RGB565; + break; + + case DRM_FORMAT_YUYV: + phytium_plane->format = FRAMEBUFFER_FORMAT_YUYV; + break; + + case DRM_FORMAT_UYVY: + phytium_plane->format = FRAMEBUFFER_FORMAT_UYVY; + break; + + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + fb->format->format); + return; + } + + switch (fb->format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_RGB565: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_BGR565: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ABGR; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_RGBX5551: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_RGBA; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_BGRX5551: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_BGRA; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + fb->format->format); + return; + } + } + } +} + +static void x100_dc_primary_plane_update(struct drm_plane *plane) { struct drm_device *dev = plane->dev; struct phytium_display_private *priv = dev->dev_private; struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; int phys_pipe = phytium_plane->phys_pipe; - uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; + int src_x, src_y, crtc_x, crtc_y, crtc_w, crtc_h; + unsigned long base_offset; int config; + src_x = plane->state->src_x >> 16; + src_y = plane->state->src_y >> 16; + crtc_x = plane->state->crtc_x; + crtc_y = plane->state->crtc_y; + crtc_w = plane->state->crtc_w; + crtc_h = plane->state->crtc_h; + if (phytium_plane->tiling[0] == FRAMEBUFFER_LINEAR) { phytium_writel_reg(priv, DCREQ_MODE_LINEAR, - group_offset, X100_DCREQ_PLANE0_CONFIG); + X100_DCREQ_PLANE0_CONFIG(phys_pipe)); } else { config = DCREQ_NO_LOSSY; if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE0) @@ -302,20 +878,118 @@ void x100_dc_hw_update_dcreq(struct drm_plane *plane) config |= DCREQ_ARGBSWIZZLE_ARGB; config |= DCREQ_MODE_TILE; phytium_writel_reg(priv, phytium_plane->iova[0] & 0xffffffff, - group_offset, X100_DCREQ_PLANE0_ADDR_START); + X100_DCREQ_PLANE0_ADDR_START(phys_pipe)); phytium_writel_reg(priv, (phytium_plane->iova[0] + phytium_plane->size[0]) & - 0xffffffff, group_offset, X100_DCREQ_PLANE0_ADDR_END); - phytium_writel_reg(priv, config, group_offset, X100_DCREQ_PLANE0_CONFIG); + 0xffffffff, X100_DCREQ_PLANE0_ADDR_END(phys_pipe)); + phytium_writel_reg(priv, config, X100_DCREQ_PLANE0_CONFIG(phys_pipe)); } + + /* config dc */ + /* Y */ + base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; + phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, + X100_DCREQ_PIX_DMA_PREFIX(phys_pipe)); + phytium_writel_reg(priv, (phytium_plane->iova[0] + base_offset) & ADDRESS_MASK, + X100_DC_FRAMEBUFFER_Y_ADDRESS(phys_pipe)); + phytium_writel_reg(priv, ALIGN(fb->pitches[0], 128), + X100_DC_FRAMEBUFFER_Y_STRIDE(phys_pipe)); + + /* U */ + phytium_writel_reg(priv, phytium_plane->iova[1] & 0xffffffff, + X100_DC_FRAMEBUFFER_U_ADDRESS(phys_pipe)); + phytium_writel_reg(priv, ALIGN(fb->pitches[1], 128), + X100_DC_FRAMEBUFFER_U_STRIDE(phys_pipe)); + + /* V */ + phytium_writel_reg(priv, phytium_plane->iova[2] & 0xffffffff, + X100_DC_FRAMEBUFFER_V_ADDRESS(phys_pipe)); + phytium_writel_reg(priv, ALIGN(fb->pitches[2], 128), + X100_DC_FRAMEBUFFER_V_STRIDE(phys_pipe)); + + /* size */ + phytium_writel_reg(priv, (crtc_w & WIDTH_MASK) | ((crtc_h&HEIGHT_MASK) + << HEIGHT_SHIFT), X100_DC_FRAMEBUFFER_SIZE(phys_pipe)); + /* config */ + config = phytium_readl_reg(priv, X100_DC_FRAMEBUFFER_CONFIG(phys_pipe)); + config &= ~(FRAMEBUFFER_FORMAT_MASK << FRAMEBUFFER_FORMAT_SHIFT); + config |= (phytium_plane->format << FRAMEBUFFER_FORMAT_SHIFT); + config &= ~(1 << FRAMEBUFFER_UVSWIZZLE_SHIFT); + config |= (phytium_plane->uv_swizzle << FRAMEBUFFER_UVSWIZZLE_SHIFT); + config &= ~(FRAMEBUFFER_SWIZZLE_MASK << FRAMEBUFFER_SWIZZLE_SHIFT); + config |= (phytium_plane->swizzle << FRAMEBUFFER_SWIZZLE_SHIFT); + config &= ~(FRAMEBUFFER_TILE_MODE_MASK << FRAMEBUFFER_TILE_MODE_SHIFT); + config |= (phytium_plane->tiling[0] << FRAMEBUFFER_TILE_MODE_SHIFT); + config &= (~FRAMEBUFFER_CLEAR); + phytium_writel_reg(priv, config, X100_DC_FRAMEBUFFER_CONFIG(phys_pipe)); } -void x100_dc_hw_update_primary_hi_addr(struct drm_plane *plane) +static void x100_dc_cursor_plane_update(struct drm_plane *plane) { struct drm_device *dev = plane->dev; struct phytium_display_private *priv = dev->dev_private; struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; int phys_pipe = phytium_plane->phys_pipe; + int config; - phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, - priv->dcreq_reg_base[phys_pipe], X100_DCREQ_PIX_DMA_PREFIX); + phytium_plane->enable = 1; + phytium_plane->cursor_hot_x = fb->hot_x; + phytium_plane->cursor_hot_y = fb->hot_y; + phytium_plane->cursor_x = plane->state->crtc_x + fb->hot_x; + phytium_plane->cursor_y = plane->state->crtc_y + fb->hot_y; + + config = CURSOR_FORMAT_ARGB8888 | + ((phytium_plane->cursor_hot_y & CURSOR_HOT_Y_MASK) << CURSOR_HOT_Y_SHIFT) | + ((phytium_plane->cursor_hot_x & CURSOR_HOT_X_MASK) << CURSOR_HOT_X_SHIFT); + + phytium_writel_reg(priv, config, X100_DC_CURSOR_CONFIG(phys_pipe)); + phytium_writel_reg(priv, + ((phytium_plane->cursor_x & CURSOR_X_MASK) << CURSOR_X_SHIFT) | + ((phytium_plane->cursor_y & CURSOR_Y_MASK) << CURSOR_Y_SHIFT), + X100_DC_CURSOR_LOCATION(phys_pipe)); + phytium_writel_reg(priv, phytium_plane->iova[0], X100_DC_CURSOR_ADDRESS(phys_pipe)); +} + +void x100_dc_hw_plane_update(struct drm_plane *plane, struct drm_plane_state *old_state) +{ + struct drm_framebuffer *fb, *old_fb; + + DRM_DEBUG_KMS("update plane: type=%d\n", plane->type); + if (!plane->state->crtc || !plane->state->fb) + return; + + fb = plane->state->fb; + old_fb = old_state->fb; + + if (fb) + drm_framebuffer_reference(fb); + if (old_fb) + drm_framebuffer_unreference(old_fb); + + x100_dc_get_plane_parameter(plane); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + x100_dc_primary_plane_update(plane); + else if (plane->type == DRM_PLANE_TYPE_CURSOR) + x100_dc_cursor_plane_update(plane); +} + +void x100_dc_hw_plane_disable(struct drm_plane *plane, struct drm_plane_state *old_state) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + int config; + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + phytium_writel_reg(priv, CLEAR_VALUE_RED, + X100_DC_FRAMEBUFFER_CLEARVALUE(phys_pipe)); + config = phytium_readl_reg(priv, X100_DC_FRAMEBUFFER_CONFIG(phys_pipe)); + config |= FRAMEBUFFER_CLEAR; + phytium_writel_reg(priv, config, X100_DC_FRAMEBUFFER_CONFIG(phys_pipe)); + } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { + phytium_writel_reg(priv, CURSOR_FORMAT_DISABLED, + X100_DC_CURSOR_CONFIG(phys_pipe)); + } } diff --git a/drivers/gpu/drm/phytium/x100_dc.h b/drivers/gpu/drm/phytium/x100_dc.h old mode 100644 new mode 100755 index ae98b4ffe0cf..3a82897128c2 --- a/drivers/gpu/drm/phytium/x100_dc.h +++ b/drivers/gpu/drm/phytium/x100_dc.h @@ -1,7 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __X100_DC_H__ @@ -15,9 +23,14 @@ extern void x100_dc_hw_vram_init(struct phytium_display_private *priv, resource_size_t vram_addr, resource_size_t vram_size); +extern void x100_dc_hw_enable_irq(struct phytium_display_private *priv, uint32_t phys_pipe); +extern void x100_dc_hw_disable_irq(struct phytium_display_private *priv, uint32_t phys_pipe); +extern bool x100_dc_hw_get_irq_status(struct phytium_display_private *priv, uint32_t phys_pipe); extern void x100_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe); -extern void x100_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock); -extern void x100_dc_hw_disable(struct drm_crtc *crtc); +extern void x100_dc_hw_crtc_begin(struct phytium_display_private *priv, uint32_t phys_pipe); +extern void x100_dc_hw_crtc_flush(struct phytium_display_private *priv, uint32_t phys_pipe); +extern void x100_dc_hw_crtc_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state); +extern void x100_dc_hw_crtc_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state); extern int x100_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count); extern void x100_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, const uint32_t **formats, @@ -25,6 +38,6 @@ extern void x100_dc_hw_plane_get_primary_format(const uint64_t **format_modifier extern void x100_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, const uint32_t **formats, uint32_t *format_count); -void x100_dc_hw_update_dcreq(struct drm_plane *plane); -void x100_dc_hw_update_primary_hi_addr(struct drm_plane *plane); +extern void x100_dc_hw_plane_update(struct drm_plane *plane, struct drm_plane_state *old_state); +extern void x100_dc_hw_plane_disable(struct drm_plane *plane, struct drm_plane_state *old_state); #endif /* __X100_DC_H__ */ diff --git a/drivers/gpu/drm/phytium/x100_dp.c b/drivers/gpu/drm/phytium/x100_dp.c index 4cc390442461..9bee7c782f2d 100644 --- a/drivers/gpu/drm/phytium/x100_dp.c +++ b/drivers/gpu/drm/phytium/x100_dp.c @@ -1,7 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #include "phytium_display_drv.h" @@ -95,7 +103,40 @@ static int cpost_val[4][4][4] = // [link_rate][swing][emphasis] }, }; -static int x100_dp_hw_set_phy_lane_and_rate(struct phytium_dp_device *phytium_dp, +static void x100_phy_writel(struct phytium_dp_device *phytium_dp, u32 address, u32 data) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + +#if DEBUG_LOG + pr_info("phy address write: 0x%x data:0x%x\n", address, data); +#endif + phytium_writel_reg(priv, address, X100_PHY_ACCESS_ADDRESS(port)); + phytium_writel_reg(priv, data, X100_PHY_WRITE_DATA(port)); + phytium_writel_reg(priv, ACCESS_WRITE, X100_PHY_ACCESS_CTRL(port)); + udelay(10); +} + +static u32 x100_phy_readl(struct phytium_dp_device *phytium_dp, u32 address) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + u32 data; + + phytium_writel_reg(priv, address, X100_PHY_ACCESS_ADDRESS(port)); + phytium_writel_reg(priv, ACCESS_READ, X100_PHY_ACCESS_CTRL(port)); + udelay(10); + data = phytium_readl_reg(priv, X100_PHY_READ_DATA(port)); +#if DEBUG_LOG + pr_info("phy address read: 0x%x data:0x%x\n", address, data); +#endif + + return data; +} + +static int x100_dp_phy_set_lane_and_rate(struct phytium_dp_device *phytium_dp, uint8_t link_lane_count, uint32_t link_rate) { @@ -112,13 +153,13 @@ static int x100_dp_hw_set_phy_lane_and_rate(struct phytium_dp_device *phytium_dp mask |= (((1<port; int i = 0, data, tmp, mask; int timeout = 500, ret = 0; if (port == 0 || port == 1) { - phytium_phy_writel(phytium_dp, X100_PHY0_APB_RESET, APB_RESET); + x100_phy_writel(phytium_dp, X100_PHY0_APB_RESET, APB_RESET); - phytium_phy_writel(phytium_dp, X100_PHY0_PIPE_RESET, RESET); + x100_phy_writel(phytium_dp, X100_PHY0_PIPE_RESET, RESET); /* config lane to dp mode */ data = 0; @@ -605,9 +646,9 @@ static int x100_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) } mask = (mask << (port*LANE_BIT_SHIFT*4)); data = (data << (port*LANE_BIT_SHIFT*4)); - tmp = phytium_phy_readl(phytium_dp, X100_PHY0_MODE); + tmp = x100_phy_readl(phytium_dp, X100_PHY0_MODE); tmp = (tmp & (~mask)) | data; - phytium_phy_writel(phytium_dp, X100_PHY0_MODE, tmp); + x100_phy_writel(phytium_dp, X100_PHY0_MODE, tmp); /* config lane master or slave */ data = 0; @@ -618,9 +659,9 @@ static int x100_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) } mask = (mask << (port*LANE_MASTER_SHIFT*4)); data = (data << (port*LANE_MASTER_SHIFT*4)); - tmp = phytium_phy_readl(phytium_dp, X100_PHY0_LINK_CFG); + tmp = x100_phy_readl(phytium_dp, X100_PHY0_LINK_CFG); tmp = (tmp & (~mask)) | data; - phytium_phy_writel(phytium_dp, X100_PHY0_LINK_CFG, tmp); + x100_phy_writel(phytium_dp, X100_PHY0_LINK_CFG, tmp); /* pll clock enable */ data = 0; @@ -631,9 +672,9 @@ static int x100_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) } mask = (mask << (port*PLL_EN_SHIFT*4)); data = (data << (port*PLL_EN_SHIFT*4)); - tmp = phytium_phy_readl(phytium_dp, X100_PHY0_PLL_EN); + tmp = x100_phy_readl(phytium_dp, X100_PHY0_PLL_EN); tmp = (tmp & (~mask)) | data; - phytium_phy_writel(phytium_dp, X100_PHY0_PLL_EN, tmp); + x100_phy_writel(phytium_dp, X100_PHY0_PLL_EN, tmp); /* config input 20 bit */ data = 0; @@ -644,9 +685,9 @@ static int x100_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) } mask = (mask << (port*BIT_20_SHIFT*4)); data = (data << (port*BIT_20_SHIFT*4)); - tmp = phytium_phy_readl(phytium_dp, X100_PHY0_PMA_WIDTH); + tmp = x100_phy_readl(phytium_dp, X100_PHY0_PMA_WIDTH); tmp = (tmp & (~mask)) | data; - phytium_phy_writel(phytium_dp, X100_PHY0_PMA_WIDTH, tmp); + x100_phy_writel(phytium_dp, X100_PHY0_PMA_WIDTH, tmp); /* config lane active power state */ data = 0; @@ -656,13 +697,13 @@ static int x100_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) mask |= (((1<dev; struct phytium_display_private *priv = dev->dev_private; int port = phytium_dp->port; - uint32_t group_offset = priv->dcreq_reg_base[port]; int ret = 0; phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_ENABLE, - group_offset, X100_DCREQ_CMD_REGISTER); - ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, - FLAG_REQUEST, FLAG_REPLY); + X100_DCREQ_PIX_CLOCK_CONFIG(port)); + + ret = x100_dp_wait_se_timeout(priv, port); if (ret < 0) - DRM_ERROR("%s: failed to poweron panel\n", __func__); + DRM_ERROR("%s wait se timeout(ret = %d)\n", __func__, ret); } static void x100_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) @@ -767,15 +838,14 @@ static void x100_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) struct drm_device *dev = phytium_dp->dev; struct phytium_display_private *priv = dev->dev_private; int port = phytium_dp->port; - uint32_t group_offset = priv->dcreq_reg_base[port]; int ret = 0; phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_DISABLE, - group_offset, X100_DCREQ_CMD_REGISTER); - ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, - FLAG_REQUEST, FLAG_REPLY); + X100_DCREQ_PIX_CLOCK_CONFIG(port)); + + ret = x100_dp_wait_se_timeout(priv, port); if (ret < 0) - DRM_ERROR("%s: failed to poweroff panel\n", __func__); + DRM_ERROR("%s wait se timeout(ret = %d)\n", __func__, ret); } static void x100_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) @@ -783,14 +853,13 @@ static void x100_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) struct drm_device *dev = phytium_dp->dev; struct phytium_display_private *priv = dev->dev_private; int port = phytium_dp->port, ret = 0; - uint32_t group_offset = priv->dcreq_reg_base[port]; phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_ENABLE, - group_offset, X100_DCREQ_CMD_REGISTER); - ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, - FLAG_REQUEST, FLAG_REPLY); + X100_DCREQ_PIX_CLOCK_CONFIG(port)); + + ret = x100_dp_wait_se_timeout(priv, port); if (ret < 0) - DRM_ERROR("%s: failed to enable backlight\n", __func__); + DRM_ERROR("%s wait se timeout(ret = %d)\n", __func__, ret); } static void x100_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) @@ -798,15 +867,14 @@ static void x100_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) struct drm_device *dev = phytium_dp->dev; struct phytium_display_private *priv = dev->dev_private; int port = phytium_dp->port; - uint32_t group_offset = priv->dcreq_reg_base[port]; int ret = 0; phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_DISABLE, - group_offset, X100_DCREQ_CMD_REGISTER); - ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, - FLAG_REQUEST, FLAG_REPLY); + X100_DCREQ_PIX_CLOCK_CONFIG(port)); + + ret = x100_dp_wait_se_timeout(priv, port); if (ret < 0) - DRM_ERROR("%s: failed to disable backlight\n", __func__); + DRM_ERROR("%s wait se timeout(ret = %d)\n", __func__, ret); } static uint32_t x100_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) @@ -814,9 +882,8 @@ static uint32_t x100_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) struct drm_device *dev = phytium_dp->dev; struct phytium_display_private *priv = dev->dev_private; int config; - uint32_t group_offset = priv->address_transform_base; - config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE); + config = phytium_readl_reg(priv, X100_DP_BACKLIGHT_VALUE); return ((config >> BACKLIGHT_VALUE_SHIFT) & BACKLIGHT_VALUE_MASK); } @@ -825,7 +892,6 @@ static int x100_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32 struct drm_device *dev = phytium_dp->dev; struct phytium_display_private *priv = dev->dev_private; int port = phytium_dp->port; - uint32_t group_offset = priv->dcreq_reg_base[port]; int config = 0; int ret = 0; @@ -835,26 +901,746 @@ static int x100_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32 } config = FLAG_REQUEST | CMD_BACKLIGHT | ((level & BACKLIGHT_MASK) << BACKLIGHT_SHIFT); - phytium_writel_reg(priv, config, group_offset, X100_DCREQ_CMD_REGISTER); - ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, - FLAG_REQUEST, FLAG_REPLY); + phytium_writel_reg(priv, config, X100_DCREQ_PIX_CLOCK_CONFIG(port)); + ret = x100_dp_wait_se_timeout(priv, port); if (ret < 0) - DRM_ERROR("%s: failed to set backlight\n", __func__); + DRM_ERROR("%s wait se timeout(ret = %d)\n", __func__, ret); + +out: + return ret; +} + +static int +X100_dp_aux_transfer_write(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + unsigned int i = 0, j = 0; + unsigned int cmd = 0; + unsigned int aux_status = 0, interrupt_status = 0; + unsigned char *data = msg->buffer; + int count_timeout = 0; + long ret = 0; + + for (i = 0; i < 3; i++) { + /* clear X100_DP_INTERRUPT_RAW_STATUS */ + phytium_readl_reg(priv, X100_DP_INTERRUPT_STATUS(port)); + phytium_writel_reg(priv, msg->address, X100_DP_AUX_ADDRESS(port)); + for (j = 0; j < msg->size; j++) + phytium_writeb_reg(priv, data[j], X100_DP_AUX_WRITE_FIFO(port)); + + cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); + if (msg->size == 0) + cmd |= ADDRESS_ONLY; + else + cmd |= (msg->size-1) & BYTE_COUNT_MASK; + phytium_writel_reg(priv, cmd, X100_DP_AUX_COMMAND(port)); + + count_timeout = 0; + do { + mdelay(5); + interrupt_status = phytium_readl_reg(priv, + X100_DP_INTERRUPT_RAW_STATUS(port)); + aux_status = phytium_readl_reg(priv, X100_DP_AUX_STATUS(port)); + if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) + || (interrupt_status & REPLY_TIMEOUT)) { + DRM_DEBUG_KMS("aux wait exit\n"); + break; + } + count_timeout++; + } while (count_timeout < 6); + + phytium_readl_reg(priv, X100_DP_INTERRUPT_STATUS(port)); + if (interrupt_status & REPLY_TIMEOUT) { + DRM_DEBUG_KMS("aux write reply timeout\n"); + continue; + } else if (aux_status & REPLY_ERROR) { + DRM_DEBUG_KMS("aux write reply error\n"); + continue; + } else if (aux_status & REPLY_RECEIVED) { + DRM_DEBUG_KMS("aux write reply received succussful\n"); + break; + } + } + + if (interrupt_status & REPLY_TIMEOUT) { + DRM_ERROR("aux(%d) write reply timeout\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if (aux_status & REPLY_ERROR) { + DRM_ERROR("aux(%d) write reply error\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { + DRM_ERROR("aux(%d) write reply no response\n", phytium_dp->port); + ret = -EIO; + goto out; + } + + msg->reply = phytium_readl_reg(priv, X100_DP_AUX_REPLY_CODE(port)); + ret = msg->size; +out: + return ret; +} + +static int +X100_dp_aux_transfer_read(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + unsigned int i = 0; + unsigned int cmd = 0; + unsigned int aux_status = 0, interrupt_status = 0; + unsigned char *data = msg->buffer; + int count_timeout = 0; + long ret = 0; + + for (i = 0; i < 3; i++) { + phytium_readl_reg(priv, X100_DP_INTERRUPT_STATUS(port)); + phytium_writel_reg(priv, msg->address, X100_DP_AUX_ADDRESS(port)); + cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); + if (msg->size == 0) + cmd |= ADDRESS_ONLY; + else + cmd |= ((msg->size-1) & BYTE_COUNT_MASK); + phytium_writel_reg(priv, cmd, X100_DP_AUX_COMMAND(port)); + + count_timeout = 0; + do { + mdelay(5); + interrupt_status = phytium_readl_reg(priv, + X100_DP_INTERRUPT_RAW_STATUS(port)); + aux_status = phytium_readl_reg(priv, X100_DP_AUX_STATUS(port)); + if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) + || (interrupt_status & REPLY_TIMEOUT)) { + DRM_DEBUG_KMS("aux wait exit\n"); + break; + } + count_timeout++; + } while (count_timeout < 6); + + phytium_readl_reg(priv, X100_DP_INTERRUPT_STATUS(port)); + if (interrupt_status & REPLY_TIMEOUT) { + DRM_DEBUG_KMS("aux read reply timeout\n"); + continue; + } else if (aux_status & REPLY_ERROR) { + DRM_DEBUG_KMS("aux read reply error\n"); + continue; + } else if (aux_status & REPLY_RECEIVED) { + DRM_DEBUG_KMS("aux read reply received succussful\n"); + break; + } + } + + if (interrupt_status & REPLY_TIMEOUT) { + DRM_ERROR("aux(%d) read reply timeout\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if (aux_status & REPLY_ERROR) { + DRM_ERROR("aux(%d) read reply error\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { + DRM_ERROR("aux(%d) read reply no response\n", phytium_dp->port); + ret = -EIO; + goto out; + } + + msg->reply = phytium_readl_reg(priv, X100_DP_AUX_REPLY_CODE(port)); + ret = phytium_readl_reg(priv, X100_DP_AUX_REPLY_DATA_COUNT(port)); + + if (ret > msg->size) { + ret = msg->size; + } else if (ret != msg->size) { + DRM_DEBUG_KMS("aux read count error(ret:0x%lx != 0x%lx)\n", ret, msg->size); + ret = -EBUSY; + goto out; + } + + for (i = 0; i < ret; i++) + data[i] = phytium_readl_reg(priv, X100_DP_AUX_REPLY_DATA(port)); + +out: + return ret; +} + +static int x100_dp_hw_register_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + + seq_printf(m, "addr:h0x%08x h0x%08x\n", X100_DP_M_VID(port), + phytium_readl_reg(priv, X100_DP_M_VID(port))); + seq_printf(m, "addr:h0x%08x h0x%08x\n", X100_DP_N_VID(port), + phytium_readl_reg(priv, X100_DP_N_VID(port))); + seq_printf(m, "addr:h0x%08x h0x%08x\n", X100_DP_TRANSFER_UNIT_SIZE(port), + phytium_readl_reg(priv, X100_DP_TRANSFER_UNIT_SIZE(port))); + seq_printf(m, "addr:h0x%08x h0x%08x\n", X100_DP_DATA_COUNT(port), + phytium_readl_reg(priv, X100_DP_DATA_COUNT(port))); + seq_printf(m, "addr:h0x%08x h0x%08x\n", X100_DP_MAIN_LINK_HTOTAL(port), + phytium_readl_reg(priv, X100_DP_MAIN_LINK_HTOTAL(port))); + seq_printf(m, "addr:h0x%08x h0x%08x\n", X100_DP_MAIN_LINK_HRES(port), + phytium_readl_reg(priv, X100_DP_MAIN_LINK_HRES(port))); + seq_printf(m, "addr:h0x%08x h0x%08x\n", X100_DP_MAIN_LINK_HSWIDTH(port), + phytium_readl_reg(priv, X100_DP_MAIN_LINK_HSWIDTH(port))); + seq_printf(m, "addr:h0x%08x h0x%08x\n", X100_DP_MAIN_LINK_HSTART(port), + phytium_readl_reg(priv, X100_DP_MAIN_LINK_HSTART(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_MAIN_LINK_VTOTAL(port), + phytium_readl_reg(priv, X100_DP_MAIN_LINK_VTOTAL(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_MAIN_LINK_VRES(port), + phytium_readl_reg(priv, X100_DP_MAIN_LINK_VRES(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_MAIN_LINK_VSWIDTH(port), + phytium_readl_reg(priv, X100_DP_MAIN_LINK_VSWIDTH(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_MAIN_LINK_VSTART(port), + phytium_readl_reg(priv, X100_DP_MAIN_LINK_VSTART(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_MAIN_LINK_POLARITY(port), + phytium_readl_reg(priv, X100_DP_MAIN_LINK_POLARITY(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_MAIN_LINK_MISC0(port), + phytium_readl_reg(priv, X100_DP_MAIN_LINK_MISC0(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_MAIN_LINK_MISC1(port), + phytium_readl_reg(priv, X100_DP_MAIN_LINK_MISC1(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_USER_SYNC_POLARITY(port), + phytium_readl_reg(priv, X100_DP_USER_SYNC_POLARITY(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_VIDEO_STREAM_ENABLE(port), + phytium_readl_reg(priv, X100_DP_VIDEO_STREAM_ENABLE(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_SECONDARY_STREAM_ENABLE(port), + phytium_readl_reg(priv, X100_DP_SECONDARY_STREAM_ENABLE(port))); + seq_puts(m, "audio:\n"); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_SEC_INPUT_SELECT(port), + phytium_readl_reg(priv, X100_DP_SEC_INPUT_SELECT(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_SEC_DIRECT_CLKDIV(port), + phytium_readl_reg(priv, X100_DP_SEC_DIRECT_CLKDIV(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_SEC_CHANNEL_COUNT(port), + phytium_readl_reg(priv, X100_DP_SEC_CHANNEL_COUNT(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_SEC_CHANNEL_MAP(port), + phytium_readl_reg(priv, X100_DP_SEC_CHANNEL_MAP(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_SEC_DATA_WINDOW(port), + phytium_readl_reg(priv, X100_DP_SEC_DATA_WINDOW(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_SEC_CS_CATEGORY_CODE(port), + phytium_readl_reg(priv, X100_DP_SEC_CS_CATEGORY_CODE(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_SEC_MAUD(port), + phytium_readl_reg(priv, X100_DP_SEC_MAUD(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_SEC_NAUD(port), + phytium_readl_reg(priv, X100_DP_SEC_NAUD(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_SEC_CLOCK_MODE(port), + phytium_readl_reg(priv, X100_DP_SEC_CLOCK_MODE(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_SEC_CS_SOURCE_FORMAT(port), + phytium_readl_reg(priv, X100_DP_SEC_CS_SOURCE_FORMAT(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_SEC_CS_LENGTH_ORIG_FREQ(port), + phytium_readl_reg(priv, X100_DP_SEC_CS_LENGTH_ORIG_FREQ(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_SEC_CS_FREQ_CLOCK_ACCURACY(port), + phytium_readl_reg(priv, X100_DP_SEC_CS_FREQ_CLOCK_ACCURACY(port))); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", X100_DP_SEC_AUDIO_ENABLE(port), + phytium_readl_reg(priv, X100_DP_SEC_AUDIO_ENABLE(port))); + + return 0; +} + + +static void x100_dp_hw_set_test_pattern(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, + uint8_t test_pattern, + uint8_t *custom_pattern, + uint32_t custom_pattern_size) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, val = 0, tmp = 0, i; + + if ((test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) + && custom_pattern && (custom_pattern_size > 0)) { + val = *(int *)custom_pattern; + phytium_writel_reg(priv, val, X100_DP_CUSTOM_80BIT_PATTERN_0(port)); + val = *(int *)(custom_pattern + 4); + phytium_writel_reg(priv, val, X100_DP_CUSTOM_80BIT_PATTERN_1(port)); + val = *(short int *)(custom_pattern + 8); + phytium_writel_reg(priv, val, X100_DP_CUSTOM_80BIT_PATTERN_2(port)); + } + + if (test_pattern == PHYTIUM_PHY_TP_D10_2 || test_pattern == PHYTIUM_PHY_TP_PRBS7 + || test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) + phytium_writel_reg(priv, SCRAMBLING_DISABLE, X100_DP_SCRAMBLING_DISABLE(port)); + else + phytium_writel_reg(priv, SCRAMBLING_ENABLE, X100_DP_SCRAMBLING_DISABLE(port)); + + tmp = test_pattern - PHYTIUM_PHY_TP_NONE + TEST_PATTERN_NONE; + val = 0; + for (i = 0; i < lane_count; i++) + val |= (tmp << (TEST_PATTERN_LANE_SHIFT * i)); + phytium_writel_reg(priv, val, X100_DP_LINK_QUAL_PATTERN_SET(port)); +} + +static void x100_dp_hw_set_train_pattern(struct phytium_dp_device *phytium_dp, + uint8_t train_pattern) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, tmp = 0; + + /* Scrambling is disabled for TPS1/TPS2/3 and enabled for TPS4 */ + if (train_pattern == DP_TRAINING_PATTERN_4 + || train_pattern == DP_TRAINING_PATTERN_DISABLE) { + phytium_writel_reg(priv, SCRAMBLING_ENABLE, X100_DP_SCRAMBLING_DISABLE(port)); + phytium_writel_reg(priv, SCRAMBLER_RESET, X100_DP_FORCE_SCRAMBLER_RESET(port)); + } else { + phytium_writel_reg(priv, SCRAMBLING_DISABLE, X100_DP_SCRAMBLING_DISABLE(port)); + } + switch (train_pattern) { + case DP_TRAINING_PATTERN_DISABLE: + tmp = TRAINING_OFF; + break; + case DP_TRAINING_PATTERN_1: + tmp = TRAINING_PATTERN_1; + break; + case DP_TRAINING_PATTERN_2: + tmp = TRAINING_PATTERN_2; + break; + case DP_TRAINING_PATTERN_3: + tmp = TRAINING_PATTERN_3; + break; + case DP_TRAINING_PATTERN_4: + tmp = TRAINING_PATTERN_4; + break; + default: + tmp = TRAINING_OFF; + break; + } + + phytium_writel_reg(priv, tmp, X100_DP_TRAINING_PATTERN_SET(port)); +} + +static void x100_dp_hw_set_link(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, uint32_t link_rate) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0, retry = 3; + + phytium_writel_reg(priv, link_lane_count, + X100_DP_LANE_COUNT_SET(port)); + phytium_writel_reg(priv, + drm_dp_link_rate_to_bw_code(link_rate), + X100_DP_LINK_BW_SET(port)); + + if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) + phytium_writel_reg(priv, ENHANCED_FRAME_ENABLE, + X100_DP_ENHANCED_FRAME_EN(port)); + else + phytium_writel_reg(priv, ENHANCED_FRAME_DISABLE, + X100_DP_ENHANCED_FRAME_EN(port)); + +try_again: + ret = x100_dp_phy_set_lane_and_rate(phytium_dp, link_lane_count, link_rate); + if ((ret < 0) && retry) { + retry--; + goto try_again; + } +} + +void x100_dp_hw_set_lane_setting(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, uint8_t train_set) +{ + x100_dp_phy_set_lane_setting(phytium_dp, link_rate, train_set); +} + +static int X100_rate[] = {162000, 270000, 540000, 810000}; + +static void x100_dp_set_source_rate_and_lane_count(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->source_rates = X100_rate; + phytium_dp->num_source_rates = num_source_rates; + + if (phytium_dp->port == 0) + phytium_dp->source_max_lane_count = source_max_lane_count; + else if (phytium_dp->port == 1) + phytium_dp->source_max_lane_count = source_max_lane_count; + else if (phytium_dp->port == 2) + phytium_dp->source_max_lane_count = 1; + else + phytium_dp->source_max_lane_count = 1; +} + +void x100_dp_hw_get_hpd_state(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t val = 0, raw_state = 0; + + val = phytium_readl_reg(priv, X100_DP_INTERRUPT_RAW_STATUS(port)); + + /* maybe miss hpd, so used for clear X100_DP_INTERRUPT_RAW_STATUS */ + phytium_readl_reg(priv, X100_DP_INTERRUPT_STATUS(port)); + raw_state = phytium_readl_reg(priv, X100_DP_SINK_HPD_STATE(port)); + if (val & HPD_EVENT) + phytium_dp->dp_hpd_state.hpd_event_state = true; + + if (val & HPD_IRQ) + phytium_dp->dp_hpd_state.hpd_irq_state = true; + + if (raw_state & HPD_CONNECT) + phytium_dp->dp_hpd_state.hpd_raw_state = true; + else + phytium_dp->dp_hpd_state.hpd_raw_state = false; +} + +void x100_dp_hw_hpd_irq_setup(struct phytium_dp_device *phytium_dp, bool enable) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + + phytium_dp->dp_hpd_state.hpd_irq_enable = enable; + if (enable) + phytium_writel_reg(priv, HPD_OTHER_MASK, X100_DP_INTERRUPT_MASK(port)); + else + phytium_writel_reg(priv, HPD_IRQ_MASK|HPD_EVENT_MASK|HPD_OTHER_MASK, + X100_DP_INTERRUPT_MASK(port)); +} + +void x100_dp_hw_disable_audio(struct phytium_dp_device *phytium_dp) +{ + +} + +void x100_dp_hw_enable_audio(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int config = 0, config1, data_window = 0; + const struct dp_audio_n_m *n_m = NULL; + + config = phytium_readl_reg(priv, X100_DP_SEC_AUDIO_ENABLE(port)); + phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, X100_DP_SEC_AUDIO_ENABLE(port)); + + data_window = 90*(phytium_dp->link_rate)/100 + *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) + /phytium_dp->mode.clock/4; + + phytium_writel_reg(priv, data_window, X100_DP_SEC_DATA_WINDOW(port)); + + n_m = phytium_dp_audio_get_n_m(phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); + if (n_m == NULL) { + DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", + phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); + phytium_writel_reg(priv, 0, X100_DP_SEC_MAUD(port)); + phytium_writel_reg(priv, 0, X100_DP_SEC_NAUD(port)); + + } else { + phytium_writel_reg(priv, n_m->m, X100_DP_SEC_MAUD(port)); + phytium_writel_reg(priv, n_m->n, X100_DP_SEC_NAUD(port)); + } + + config1 = phytium_readl_reg(priv, X100_DP_SECONDARY_STREAM_ENABLE(port)); + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + X100_DP_SECONDARY_STREAM_ENABLE(port)); + phytium_writel_reg(priv, config1, X100_DP_SECONDARY_STREAM_ENABLE(port)); + phytium_writel_reg(priv, config, X100_DP_SEC_AUDIO_ENABLE(port)); +} + +static void x100_dp_hw_audio_shutdown(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + X100_DP_SECONDARY_STREAM_ENABLE(port)); +} + +static int x100_dp_hw_audio_digital_mute(struct phytium_dp_device *phytium_dp, bool enable) +{ + struct phytium_display_private *priv = phytium_dp->dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + if (enable) + phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, X100_DP_SEC_AUDIO_ENABLE(port)); + else + phytium_writel_reg(priv, SEC_AUDIO_ENABLE, X100_DP_SEC_AUDIO_ENABLE(port)); + + return ret; +} + +int x100_dp_hw_audio_hw_params(struct phytium_dp_device *phytium_dp, struct audio_info audio_info) +{ + struct phytium_display_private *priv = phytium_dp->dev->dev_private; + int port = phytium_dp->port; + int ret = 0, data_window = 0; + const struct dp_audio_n_m *n_m = NULL; + uint32_t fs, ws, fs_accurac; + + DRM_DEBUG_KMS("%s:set port%d sample_rate(%d) channels(%d) sample_width(%d)\n", + __func__, phytium_dp->port, audio_info.sample_rate, + audio_info.channels, audio_info.sample_width); + + phytium_writel_reg(priv, INPUT_SELECT_I2S, X100_DP_SEC_INPUT_SELECT(port)); + phytium_writel_reg(priv, APB_CLOCK/audio_info.sample_rate, + X100_DP_SEC_DIRECT_CLKDIV(port)); + phytium_writel_reg(priv, audio_info.channels & CHANNEL_MASK, + X100_DP_SEC_CHANNEL_COUNT(port)); + phytium_writel_reg(priv, CHANNEL_MAP_DEFAULT, X100_DP_SEC_CHANNEL_MAP(port)); + data_window = 90*(phytium_dp->link_rate)/100 + *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) + /phytium_dp->mode.clock/4; + phytium_writel_reg(priv, data_window, X100_DP_SEC_DATA_WINDOW(port)); + phytium_writel_reg(priv, 0xb5, X100_DP_SEC_CS_CATEGORY_CODE(port)); + + phytium_writel_reg(priv, CLOCK_MODE_SYNC, X100_DP_SEC_CLOCK_MODE(port)); + phytium_writel_reg(priv, CS_SOURCE_FORMAT_DEFAULT, + X100_DP_SEC_CS_SOURCE_FORMAT(port)); + + switch (audio_info.sample_rate) { + case 32000: + fs = ORIG_FREQ_32000; + fs_accurac = SAMPLING_FREQ_32000; + break; + case 44100: + fs = ORIG_FREQ_44100; + fs_accurac = SAMPLING_FREQ_44100; + break; + case 48000: + fs = ORIG_FREQ_48000; + fs_accurac = SAMPLING_FREQ_48000; + break; + case 96000: + fs = ORIG_FREQ_96000; + fs_accurac = SAMPLING_FREQ_96000; + break; + case 176400: + fs = ORIG_FREQ_176400; + fs_accurac = SAMPLING_FREQ_176400; + break; + case 192000: + fs = ORIG_FREQ_192000; + fs_accurac = SAMPLING_FREQ_192000; + break; + default: + DRM_ERROR("dp not support sample_rate %d\n", audio_info.sample_rate); + goto out; + } + + switch (audio_info.sample_width) { + case 16: + ws = WORD_LENGTH_16; + break; + case 18: + ws = WORD_LENGTH_18; + break; + case 20: + ws = WORD_LENGTH_20; + break; + case 24: + ws = WORD_LENGTH_24; + break; + default: + DRM_ERROR("dp not support sample_width %d\n", audio_info.sample_width); + goto out; + } + + phytium_writel_reg(priv, ((fs&ORIG_FREQ_MASK)<link_rate, audio_info.sample_rate); + if (n_m == NULL) { + DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", + phytium_dp->link_rate, audio_info.sample_rate); + phytium_writel_reg(priv, 0, X100_DP_SEC_MAUD(port)); + phytium_writel_reg(priv, 0, X100_DP_SEC_NAUD(port)); + + } else { + phytium_writel_reg(priv, n_m->m, X100_DP_SEC_MAUD(port)); + phytium_writel_reg(priv, n_m->n, X100_DP_SEC_NAUD(port)); + } + phytium_writel_reg(priv, SECONDARY_STREAM_ENABLE, + X100_DP_SECONDARY_STREAM_ENABLE(port)); + phytium_dp->audio_info = audio_info; + + return 0; out: + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + X100_DP_SECONDARY_STREAM_ENABLE(port)); + return ret; } -bool x100_dp_hw_spread_is_enable(struct phytium_dp_device *phytium_dp) +void x100_dp_hw_disable_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + + phytium_writel_reg(priv, SST_MST_SOURCE_0_DISABLE, + X100_DP_VIDEO_STREAM_ENABLE(port)); +} + +void x100_dp_hw_enable_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + + phytium_writel_reg(priv, SST_MST_SOURCE_0_ENABLE, + X100_DP_VIDEO_STREAM_ENABLE(port)); + phytium_writel_reg(priv, LINK_SOFT_RESET, X100_DP_SOFT_RESET(port)); +} + +bool x100_dp_hw_video_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + + return phytium_readl_reg(priv, X100_DP_VIDEO_STREAM_ENABLE(port)) ? true : false; +} + +void x100_dp_hw_config_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + unsigned long link_bw, date_rate = 0; + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned char tu_size = 64; + unsigned long data_per_tu = 0; + int symbols_per_tu, frac_symbols_per_tu, symbol_count, udc, value; + + /* cal M/N and tu_size */ + phytium_writel_reg(priv, phytium_dp->mode.crtc_clock/10, X100_DP_M_VID(port)); + phytium_writel_reg(priv, phytium_dp->link_rate/10, X100_DP_N_VID(port)); + link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; + date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; + + /* mul 10 for register setting */ + data_per_tu = 10*tu_size * date_rate/link_bw; + symbols_per_tu = (data_per_tu/10)&0xff; + frac_symbols_per_tu = (data_per_tu%10*16/10) & 0xf; + phytium_writel_reg(priv, frac_symbols_per_tu<<24 | symbols_per_tu<<16 | tu_size, + X100_DP_TRANSFER_UNIT_SIZE(port)); + + symbol_count = (phytium_dp->mode.crtc_hdisplay*display_info->bpc*3 + 7)/8; + udc = (symbol_count + phytium_dp->link_lane_count - 1)/phytium_dp->link_lane_count; + phytium_writel_reg(priv, udc, X100_DP_DATA_COUNT(port)); + + /* config main stream attributes */ + phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal, + X100_DP_MAIN_LINK_HTOTAL(port)); + phytium_writel_reg(priv, phytium_dp->mode.crtc_hdisplay, + X100_DP_MAIN_LINK_HRES(port)); + phytium_writel_reg(priv, + phytium_dp->mode.crtc_hsync_end - phytium_dp->mode.crtc_hsync_start, + X100_DP_MAIN_LINK_HSWIDTH(port)); + phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal - phytium_dp->mode.crtc_hsync_start, + X100_DP_MAIN_LINK_HSTART(port)); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal, + X100_DP_MAIN_LINK_VTOTAL(port)); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vdisplay, + X100_DP_MAIN_LINK_VRES(port)); + phytium_writel_reg(priv, + phytium_dp->mode.crtc_vsync_end - phytium_dp->mode.crtc_vsync_start, + X100_DP_MAIN_LINK_VSWIDTH(port)); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal - phytium_dp->mode.crtc_vsync_start, + X100_DP_MAIN_LINK_VSTART(port)); + + value = 0; + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) + value = value & (~HSYNC_POLARITY_LOW); + else + value = value | HSYNC_POLARITY_LOW; + + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) + value = value & (~VSYNC_POLARITY_LOW); + else + value = value | VSYNC_POLARITY_LOW; + phytium_writel_reg(priv, value, X100_DP_MAIN_LINK_POLARITY(port)); + + switch (display_info->bpc) { + case 10: + value = (MISC0_BIT_DEPTH_10BIT << MISC0_BIT_DEPTH_OFFSET); + break; + case 6: + value = (MISC0_BIT_DEPTH_6BIT << MISC0_BIT_DEPTH_OFFSET); + break; + default: + value = (MISC0_BIT_DEPTH_8BIT << MISC0_BIT_DEPTH_OFFSET); + break; + } + value |= (MISC0_COMPONENT_FORMAT_RGB << MISC0_COMPONENT_FORMAT_SHIFT) + | MISC0_SYNCHRONOUS_CLOCK; + phytium_writel_reg(priv, value, X100_DP_MAIN_LINK_MISC0(port)); + phytium_writel_reg(priv, 0, X100_DP_MAIN_LINK_MISC1(port)); + + value = USER_ODDEVEN_POLARITY_HIGH | USER_DATA_ENABLE_POLARITY_HIGH; + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) + value = value | USER_HSYNC_POLARITY_HIGH; + else + value = value & (~USER_HSYNC_POLARITY_HIGH); + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) + value = value | USER_VSYNC_POLARITY_HIGH; + else + value = value & (~USER_VSYNC_POLARITY_HIGH); + phytium_writel_reg(priv, value, X100_DP_USER_SYNC_POLARITY(port)); +} + +void x100_dp_hw_disable_output(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + + phytium_writel_reg(priv, TRANSMITTER_OUTPUT_DISABLE, + X100_DP_TRANSMITTER_OUTPUT_ENABLE(port)); + phytium_writel_reg(priv, LINK_SOFT_RESET, X100_DP_SOFT_RESET(port)); +} + +void x100_dp_hw_enable_output(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + + phytium_writel_reg(priv, LINK_SOFT_RESET, X100_DP_SOFT_RESET(port)); + phytium_writel_reg(priv, TRANSMITTER_OUTPUT_ENABLE, + X100_DP_TRANSMITTER_OUTPUT_ENABLE(port)); +} + +void x100_dp_hw_enable_input_source(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + + phytium_writel_reg(priv, VIRTUAL_SOURCE_0_ENABLE, + X100_INPUT_SOURCE_ENABLE(port)); +} + +void x100_dp_hw_disable_input_source(struct phytium_dp_device *phytium_dp) { struct drm_device *dev = phytium_dp->dev; struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port, config; - uint32_t group_offset = priv->address_transform_base; + int port = phytium_dp->port; - config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + phytium_writel_reg(priv, (~VIRTUAL_SOURCE_0_ENABLE)&VIRTUAL_SOURCE_0_ENABLE_MASK, + X100_INPUT_SOURCE_ENABLE(port)); +} - return ((config & DP_SPREAD_ENABLE(port)) ? true:false); +bool x100_dp_hw_output_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + + return phytium_readl_reg(priv, X100_DP_TRANSMITTER_OUTPUT_ENABLE(port)) ? true : false; } int x100_dp_hw_reset(struct phytium_dp_device *phytium_dp) @@ -863,19 +1649,16 @@ int x100_dp_hw_reset(struct phytium_dp_device *phytium_dp) struct phytium_display_private *priv = dev->dev_private; int port = phytium_dp->port; int timeout = 100, config, ret = 0; - uint32_t group_offset = priv->address_transform_base; - config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + config = phytium_readl_reg(priv, X100_DC_DP_RESET_STATUS); config &= (~DC_DP_RESET_STATUS(port)); - - phytium_writel_reg(priv, config, group_offset, X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); - phytium_writel_reg(priv, FLAG_REQUEST | CMD_DC_DP_RESET, - priv->dcreq_reg_base[port], X100_DCREQ_CMD_REGISTER); + phytium_writel_reg(priv, config, X100_DC_DP_RESET_STATUS); + phytium_writel_reg(priv, CMD_DC_DP_RESET | FLAG_REQUEST, + X100_DCREQ_PIX_CLOCK_CONFIG(port)); do { mdelay(10); timeout--; - config = phytium_readl_reg(priv, group_offset, - X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + config = phytium_readl_reg(priv, X100_DC_DP_RESET_STATUS); if (config & DC_DP_RESET_STATUS(port)) break; } while (timeout); @@ -884,21 +1667,64 @@ int x100_dp_hw_reset(struct phytium_dp_device *phytium_dp) ret = -1; } + return 0; +} + +int x100_dp_hw_init(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret, config; + + ret = x100_dp_hw_reset(phytium_dp); + if (ret) + goto out; + x100_dp_set_source_rate_and_lane_count(phytium_dp); + ret = x100_dp_phy_init(phytium_dp); + if (ret) + goto out; + mdelay(10); + phytium_writel_reg(priv, AUX_CLK_DIVIDER, X100_DP_AUX_CLK_DIVIDER(port)); + phytium_dp->fast_train_support = false; + + config = phytium_readl_reg(priv, X100_DC_DP_RESET_STATUS); + phytium_dp->hw_spread_enable = ((config & DP_SPREAD_ENABLE(port)) ? true:false); + +out: return ret; } static struct phytium_dp_func x100_dp_funcs = { - .dp_hw_reset = x100_dp_hw_reset, - .dp_hw_spread_is_enable = x100_dp_hw_spread_is_enable, - .dp_hw_set_backlight = x100_dp_hw_set_backlight, - .dp_hw_get_backlight = x100_dp_hw_get_backlight, - .dp_hw_disable_backlight = x100_dp_hw_disable_backlight, + .dp_hw_init = x100_dp_hw_init, + .dp_hw_enable_output = x100_dp_hw_enable_output, + .dp_hw_disable_output = x100_dp_hw_disable_output, + .dp_hw_output_is_enable = x100_dp_hw_output_is_enable, + .dp_hw_enable_input_source = x100_dp_hw_enable_input_source, + .dp_hw_disable_input_source = x100_dp_hw_disable_input_source, + .dp_hw_get_hpd_state = x100_dp_hw_get_hpd_state, + .dp_hw_hpd_irq_setup = x100_dp_hw_hpd_irq_setup, + .dp_hw_set_test_pattern = x100_dp_hw_set_test_pattern, + .dp_hw_set_link = x100_dp_hw_set_link, + .dp_hw_set_lane_setting = x100_dp_hw_set_lane_setting, + .dp_hw_set_train_pattern = x100_dp_hw_set_train_pattern, + .dp_hw_disable_video = x100_dp_hw_disable_video, + .dp_hw_enable_video = x100_dp_hw_enable_video, + .dp_hw_video_is_enable = x100_dp_hw_video_is_enable, + .dp_hw_config_video = x100_dp_hw_config_video, + .dp_hw_enable_audio = x100_dp_hw_enable_audio, + .dp_hw_disable_audio = x100_dp_hw_disable_audio, + .dp_hw_audio_shutdown = x100_dp_hw_audio_shutdown, + .dp_hw_audio_digital_mute = x100_dp_hw_audio_digital_mute, + .dp_hw_audio_hw_params = x100_dp_hw_audio_hw_params, + .dp_hw_aux_transfer_write = X100_dp_aux_transfer_write, + .dp_hw_aux_transfer_read = X100_dp_aux_transfer_read, + .dp_hw_register_show = x100_dp_hw_register_show, .dp_hw_enable_backlight = x100_dp_hw_enable_backlight, - .dp_hw_poweroff_panel = x100_dp_hw_poweroff_panel, + .dp_hw_disable_backlight = x100_dp_hw_disable_backlight, + .dp_hw_get_backlight = x100_dp_hw_get_backlight, + .dp_hw_set_backlight = x100_dp_hw_set_backlight, .dp_hw_poweron_panel = x100_dp_hw_poweron_panel, - .dp_hw_init_phy = x100_dp_hw_init_phy, - .dp_hw_set_phy_lane_setting = x100_dp_hw_set_phy_lane_setting, - .dp_hw_set_phy_lane_and_rate = x100_dp_hw_set_phy_lane_and_rate, + .dp_hw_poweroff_panel = x100_dp_hw_poweroff_panel, }; void x100_dp_func_register(struct phytium_dp_device *phytium_dp) diff --git a/drivers/gpu/drm/phytium/x100_dp.h b/drivers/gpu/drm/phytium/x100_dp.h index a7a0fc48a58b..a5882df1a360 100644 --- a/drivers/gpu/drm/phytium/x100_dp.h +++ b/drivers/gpu/drm/phytium/x100_dp.h @@ -1,7 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __X100_DP_H__ diff --git a/drivers/gpu/drm/phytium/x100_reg.h b/drivers/gpu/drm/phytium/x100_reg.h old mode 100644 new mode 100755 index 130430e924b5..87940f8c964b --- a/drivers/gpu/drm/phytium/x100_reg.h +++ b/drivers/gpu/drm/phytium/x100_reg.h @@ -1,27 +1,177 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Phytium display drm driver +/* Phytium X100 display drm driver * - * Copyright (C) 2021 Phytium Technology Co., Ltd. + * Copyright (c) 2021 Phytium Limited. + * + * Author: + * Yang Xun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ #ifndef __X100_REG_H__ #define __X100_REG_H__ -#include "phytium_reg.h" +#define X100_PIPE_BASE(pipe) (0x8000*pipe) +#define X100_DC_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x0000) +#define X100_DCREQ_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x2000) +#define X100_DP_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x3000) +#define X100_ADDRESS_TRANSFORM_BASE 0x4000 +#define X100_PHY_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x5000) /******************************dc register start******************************************/ -#define X100_DC_CLOCK_CONTROL 0x0000 +#define X100_DC_CLOCK_CONTROL(pipe) (X100_DC_BASE(pipe) + 0x0000) #define SOFT_RESET (1<<12) -#define X100_DC_CLOCK_IDLE 0x0004 +#define X100_DC_CLOCK_IDLE(pipe) (X100_DC_BASE(pipe) + 0x0004) #define IS_IDLE (1<<16) +#define X100_DC_FRAMEBUFFER_Y_ADDRESS(pipe) (X100_DC_BASE(pipe) + 0x1400) + #define ADDRESS_MASK 0xffffff80 +#define X100_DC_FRAMEBUFFER_Y_STRIDE(pipe) (X100_DC_BASE(pipe) + 0x1408) +#define X100_DC_PANEL_CONFIG(pipe) (X100_DC_BASE(pipe) + 0x1418) + #define PANEL_DATAENABLE_ENABLE (1<<0) + #define PANEL_DATA_ENABLE (1<<4) + #define PANEL_CLOCK_ENABLE (1<<8) +#define X100_DC_HDISPLAY(pipe) (X100_DC_BASE(pipe) + 0x1430) + #define HDISPLAY_END_SHIFT 0 + #define HDISPLAY_END_MASK 0x7fff + #define HDISPLAY_TOTAL_SHIFT 16 + #define HDISPLAY_TOTAL_MASK 0x7fff +#define X100_DC_HSYNC(pipe) (X100_DC_BASE(pipe) + 0x1438) + #define HSYNC_START_SHIFT 0 + #define HSYNC_START_MASK 0x7fff + #define HSYNC_END_SHIFT 15 + #define HSYNC_END_MASK 0x7fff + #define HSYNC_PULSE_ENABLED (1<<30) + #define HSYNC_NEGATIVE (1<<31) +#define X100_DC_VDISPLAY(pipe) (X100_DC_BASE(pipe) + 0x1440) + #define VDISPLAY_END_SHIFT 0 + #define VDISPLAY_END_MASK 0x7fff + #define VDISPLAY_TOTAL_SHIFT 16 + #define VDISPLAY_TOTAL_MASK 0x7fff +#define X100_DC_VSYNC(pipe) (X100_DC_BASE(pipe) + 0x1448) + #define VSYNC_START_SHIFT 0 + #define VSYNC_START_MASK 0x7fff + #define VSYNC_END_SHIFT 15 + #define VSYNC_END_MASK 0x7fff + #define VSYNC_PULSE_ENABLED (1<<30) + #define VSYNC_NEGATIVE (1<<31) +#define X100_DC_DISPLAY_CURRENT_LOCATION(pipe) (X100_DC_BASE(pipe) + 0x1450) +#define X100_DC_GAMMA_INDEX(pipe) (X100_DC_BASE(pipe) + 0x1458) +#define X100_DC_GAMMA_DATA(pipe) (X100_DC_BASE(pipe) + 0x1460) +#define X100_DC_CURSOR_CONFIG(pipe) (X100_DC_BASE(pipe) + 0x1468) + #define CURSOR_FORMAT_DISABLED 0x0 + #define CURSOR_FORMAT_MASKMODE 0x3 + #define CURSOR_FORMAT_ARGB8888 0x2 + #define CURSOR_FORMAT_MASK 0x3 + #define CURSOR_HOT_Y_SHIFT 8 + #define CURSOR_HOT_Y_MASK 0x1f + #define CURSOR_HOT_X_SHIFT 16 + #define CURSOR_HOT_X_MASK 0x1f +#define X100_DC_CURSOR_ADDRESS(pipe) (X100_DC_BASE(pipe) + 0x146c) +#define X100_DC_CURSOR_LOCATION(pipe) (X100_DC_BASE(pipe) + 0x1470) + #define CURSOR_X_SHIFT 0 + #define CURSOR_X_MASK 0x7fff + #define CURSOR_Y_SHIFT 16 + #define CURSOR_Y_MASK 0x7fff +#define X100_DC_CURSOR_BACKGROUND(pipe) (X100_DC_BASE(pipe) + 0x1474) +#define X100_DC_CURSOR_FOREGROUND(pipe) (X100_DC_BASE(pipe) + 0x1478) +#define X100_DC_INT_STATUS(pipe) (X100_DC_BASE(pipe) + 0x147c) + #define INT_STATUS 0x1 +#define X100_DC_INT_ENABLE(pipe) (X100_DC_BASE(pipe) + 0x1480) + #define INT_ENABLE 0x1 + #define INT_DISABLE 0x0 + +#define X100_DC_FRAMEBUFFER_CONFIG(pipe) (X100_DC_BASE(pipe) + 0x1518) + #define FRAMEBUFFER_OUTPUT BIT(0) + #define FRAMEBUFFER_VALID_PENDING (1<<3) + #define FRAMEBUFFER_RESET BIT(4) + #define FRAMEBUFFER_PROGRESS BIT(6) + #define FRAMEBUFFER_ROT_ANGLE_SHIFT (11) + #define FRAMEBUFFER_ROT_ANGLE_MASK (0x7) + #define FRAMEBUFFER_ROT_ANGLE_ROT0 (0) + #define FRAMEBUFFER_ROT_ANGLE_FLIP_X (1) + #define FRAMEBUFFER_ROT_ANGLE_FLIP_Y (2) + #define FRAMEBUFFER_TILE_MODE_SHIFT (17) + #define FRAMEBUFFER_TILE_MODE_MASK (0x1f) + #define FRAMEBUFFER_LINEAR 0 + #define FRAMEBUFFER_TILE_MODE0 4 + #define FRAMEBUFFER_TILE_MODE3 7 + #define FRAMEBUFFER_FORMAT_SHIFT 26 + #define FRAMEBUFFER_FORMAT_MASK 0x3f + #define FRAMEBUFFER_FORMAT_XRGB4444 0x0 + #define FRAMEBUFFER_FORMAT_ARGB4444 0x1 + #define FRAMEBUFFER_FORMAT_XRGB1555 0x2 + #define FRAMEBUFFER_FORMAT_ARGB1555 0x3 + #define FRAMEBUFFER_FORMAT_RGB565 0x4 + #define FRAMEBUFFER_FORMAT_XRGB8888 0x5 + #define FRAMEBUFFER_FORMAT_ARGB8888 0x6 + #define FRAMEBUFFER_FORMAT_YUYV 0x7 + #define FRAMEBUFFER_FORMAT_UYVY 0x8 + #define FRAMEBUFFER_FORMAT_ARGB2101010 0x16 + #define FRAMEBUFFER_SWIZZLE_SHIFT 23 + #define FRAMEBUFFER_SWIZZLE_MASK 0x3 + #define FRAMEBUFFER_SWIZZLE_ARGB 0 + #define FRAMEBUFFER_SWIZZLE_RGBA 1 + #define FRAMEBUFFER_SWIZZLE_ABGR 2 + #define FRAMEBUFFER_SWIZZLE_BGRA 3 + #define FRAMEBUFFER_UVSWIZZLE_SHIFT 25 + #define FRAMEBUFFER_UVSWIZZLE_DISABLE 0 + #define FRAMEBUFFER_UVSWIZZLE_ENABLE 1 + #define FRAMEBUFFER_CLEAR BIT(8) + #define FRAMEBUFFER_SCALE_ENABLE BIT(22) +#define X100_DC_FRAMEBUFFER_SCALECONFIG(pipe) (X100_DC_BASE(pipe) + 0x1520) + #define FRAMEBUFFER_FILTER_TAP 3 + #define FRAMEBUFFER_HORIZONTAL_FILTER_TAP 3 + #define FRAMEBUFFER_TAP 0x33 +#define X100_DC_FRAMEBUFFER_U_ADDRESS(pipe) (X100_DC_BASE(pipe) + 0x1530) +#define X100_DC_FRAMEBUFFER_V_ADDRESS(pipe) (X100_DC_BASE(pipe) + 0x1538) +#define X100_DC_OVERLAY_CONFIG(pipe) (X100_DC_BASE(pipe) + 0x1540) + #define X100_DC_OVERLAY_ENABLE BIT(24) + +#define X100_DC_FRAMEBUFFER_U_STRIDE(pipe) (X100_DC_BASE(pipe) + 0x1800) +#define X100_DC_FRAMEBUFFER_V_STRIDE(pipe) (X100_DC_BASE(pipe) + 0x1808) +#define X100_DC_FRAMEBUFFER_SIZE(pipe) (X100_DC_BASE(pipe) + 0x1810) + #define WIDTH_SHIFT 0 + #define WIDTH_MASK 0x7fff + #define HEIGHT_SHIFT 15 + #define HEIGHT_MASK 0x7fff + +#define X100_DC_FRAMEBUFFER_SCALE_FACTOR_X(pipe) (X100_DC_BASE(pipe) + 0x1828) + #define SCALE_FACTOR_X_MASK 0x7fffffff +#define X100_DC_FRAMEBUFFER_SCALE_FACTOR_Y(pipe) (X100_DC_BASE(pipe) + 0x1830) + #define SCALE_FACTOR_Y_MASK 0x7fffffff + #define SCALE_FACTOR_Y_MAX 0x3 + #define SCALE_FACTOR_SRC_OFFSET 16 + +#define X100_DC_FRAMEBUFFER_HORI_FILTER_INDEX(pipe) (X100_DC_BASE(pipe) + 0x1838) + #define HORI_FILTER_INDEX 0x0 +#define X100_DC_FRAMEBUFFER_HORI_FILTER(pipe) (X100_DC_BASE(pipe) + 0x1a00) +#define X100_DC_FRAMEBUFFER_VERT_FILTER_INDEX(pipe) (X100_DC_BASE(pipe) + 0x1a08) + #define VERT_FILTER_INDEX 0x0 +#define X100_DC_FRAMEBUFFER_VERT_FILTER(pipe) (X100_DC_BASE(pipe) + 0x1a10) +#define X100_DC_FRAMEBUFFER_CLEARVALUE(pipe) (X100_DC_BASE(pipe) + 0x1a18) + #define CLEAR_VALUE_RED 0x00ff0000 + #define CLEAR_VALUE_GREEN 0x0000ff00 + +#define X100_DC_FRAMEBUFFER_INITIALOFFSET(pipe) (X100_DC_BASE(pipe) + 0x1a20) + #define INITIALOFFSET (0x8000 | (0X8000 << 16)) + +#define X100_DC_DP_CONFIG(pipe) (X100_DC_BASE(pipe) + 0x1cd0) + #define OUTPUT_DP (1<<3) + #define DP_RGB666 (0x1) + #define DP_RGB888 (0x2) + #define DP_RGB101010 (0x3) /******************************dc register end********************************************/ /******************************dcreq register start**************************************/ -#define X100_DCREQ_PLANE0_ADDR_START 0x00 -#define X100_DCREQ_PLANE0_ADDR_END 0x04 -#define X100_DCREQ_PLANE1_ADDR_START 0x08 -#define X100_DCREQ_PLANE1_ADDR_END 0x0c -#define X100_DCREQ_PLANE0_CONFIG 0x10 +#define X100_DCREQ_PLANE0_ADDR_START(pipe) (X100_DCREQ_BASE(pipe) + 0x00) +#define X100_DCREQ_PLANE0_ADDR_END(pipe) (X100_DCREQ_BASE(pipe) + 0x04) +#define X100_DCREQ_PLANE1_ADDR_START(pipe) (X100_DCREQ_BASE(pipe) + 0x08) +#define X100_DCREQ_PLANE1_ADDR_END(pipe) (X100_DCREQ_BASE(pipe) + 0x0c) +#define X100_DCREQ_PLANE0_CONFIG(pipe) (X100_DCREQ_BASE(pipe) + 0x10) #define DCREQ_NO_LOSSY (0 << 0) #define DCREQ_LOSSY (1 << 0) #define DCREQ_TILE_TYPE_MASK (0x3 << 1) @@ -41,66 +191,253 @@ #define DCREQ_MODE_MASK (1 << 16) #define DCREQ_MODE_LINEAR (0 << 16) #define DCREQ_MODE_TILE (1 << 16) -#define X100_DCREQ_PLANE1_CONFIG(pipe) 0x14 -#define X100_DCREQ_PLANE0_CLEAR_COLOR_L 0x18 -#define X100_DCREQ_PLANE0_CLEAR_COLOR_H 0x1C -#define X100_DCREQ_PLANE1_CLEAR_COLOR_L 0x20 -#define X100_DCREQ_PLANE1_CLEAR_COLOR_H 0x24 -#define X100_DCREQ_CMD_REGISTER 0x38 +#define X100_DCREQ_PLANE1_CONFIG(pipe) (X100_DCREQ_BASE(pipe) + 0x14) +#define X100_DCREQ_PLANE0_CLEAR_COLOR_L(pipe) (X100_DCREQ_BASE(pipe) + 0x18) +#define X100_DCREQ_PLANE0_CLEAR_COLOR_H(pipe) (X100_DCREQ_BASE(pipe) + 0x1C) +#define X100_DCREQ_PLANE1_CLEAR_COLOR_L(pipe) (X100_DCREQ_BASE(pipe) + 0x20) +#define X100_DCREQ_PLANE1_CLEAR_COLOR_H(pipe) (X100_DCREQ_BASE(pipe) + 0x24) +#define X100_DCREQ_PIX_CLOCK_CONFIG(pipe) (X100_DCREQ_BASE(pipe) + 0x38) #define FLAG_REPLY (1<<31) #define FLAG_REQUEST (1<<30) - #define CMD_PIXEL_CLOCK (0x0 << 28) #define CMD_BACKLIGHT (0x1 << 28) #define CMD_DC_DP_RESET (0x3 << 28) #define BACKLIGHT_SHIFT 21 #define BACKLIGHT_MASK 0x7f - #define BACKLIGHT_MAX 100 #define BACKLIGHT_ENABLE (101 << BACKLIGHT_SHIFT) #define BACKLIGHT_DISABLE (102 << BACKLIGHT_SHIFT) #define PANEL_POWER_ENABLE (103 << BACKLIGHT_SHIFT) #define PANEL_POWER_DISABLE (104 << BACKLIGHT_SHIFT) - #define PIXEL_CLOCK_MASK (0x1fffff) -#define X100_DCREQ_FBCD_CLOCK_CONFIG 0x3c -#define X100_DCREQ_PIX_DMA_PREFIX 0x50 + #define PIX_CLOCK_MASK (0x1fffff) +#define X100_DCREQ_FBCD_CLOCK_CONFIG(pipe) (X100_DCREQ_BASE(pipe) + 0x3c) +#define X100_DCREQ_PIX_DMA_PREFIX(pipe) (X100_DCREQ_BASE(pipe) + 0x50) #define PREFIX_MASK 0xff - #define PREFIX_SHIFT 32 -#define X100_DCREQ_FRAME_START 0x54 -#define X100_DCREQ_FILTER_CONFIG 0x58 -#define X100_DCREQ_CONTROL 0x5C + #define PREFIX_SHIFT (32) +#define X100_DCREQ_FRAME_START(pipe) (X100_DCREQ_BASE(pipe) + 0x54) +#define X100_DCREQ_FILTER_CONFIG(pipe) (X100_DCREQ_BASE(pipe) + 0x58) +#define X100_DCREQ_CONTROL(pipe) (X100_DCREQ_BASE(pipe) + 0x5C) #define DC_REQ_ENABLE (1<<0) -#define X100_DCREQ_MSI_CLEAR 0x60 +#define X100_DCREQ_MSI_CLEAR(pipe) (X100_DCREQ_BASE(pipe) + 0x60) #define MSI_CLEAR 0x0 -#define X100_DCREQ_RESET 0x68 + +#define X100_DCREQ_RESET(pipe) (X100_DCREQ_BASE(pipe) + 0x68) #define DCREQ_RESET (0x3 << 0) #define DCREQ_RESET_MASK 0x3 -#define X100_DCREQ_PLAN 0x94 +#define X100_DCREQ_PLAN(pipe) (X100_DCREQ_BASE(pipe) + 0x94) #define DCREQ_PLAN_A 0x0 #define DCREQ_PLAN_B 0X5 /******************************dcreq register end**************************************/ /******************************address transform register start**************************/ -#define X100_GPU_ADDRESS_TRANSFORM_SRC_ADDR 0x0 -#define X100_GPU_ADDRESS_TRANSFORM_SIZE 0x4 -#define X100_GPU_ADDRESS_TRANSFORM_DST_ADDR 0x8 - -#define X100_DC_ADDRESS_TRANSFORM_SRC_ADDR 0x24 +#define X100_DC_ADDRESS_TRANSFORM_SRC_ADDR (X100_ADDRESS_TRANSFORM_BASE + 0x24) #define SRC_ADDR_OFFSET 22 #define SRC_ADDR_MASK 0xffffffffff -#define X100_DC_ADDRESS_TRANSFORM_SIZE 0x28 +#define X100_DC_ADDRESS_TRANSFORM_SIZE (X100_ADDRESS_TRANSFORM_BASE + 0x28) #define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) #define SIZE_OFFSET 22 -#define X100_DC_ADDRESS_TRANSFORM_DST_ADDR 0x2c +#define X100_DC_ADDRESS_TRANSFORM_DST_ADDR (X100_ADDRESS_TRANSFORM_BASE + 0x2c) #define DST_ADDR_OFFSET 22 -#define X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS 0x48 +#define X100_DC_DP_RESET_STATUS (X100_ADDRESS_TRANSFORM_BASE + 0x48) #define DC_DP_RESET_STATUS(pipe) (1 << pipe) #define DP_SPREAD_ENABLE(pipe) (0x8 << pipe) -#define X100_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE 0x4c +#define X100_DP_BACKLIGHT_VALUE (X100_ADDRESS_TRANSFORM_BASE + 0x4c) #define BACKLIGHT_VALUE_MASK (0x7f) #define BACKLIGHT_VALUE_SHIFT 16 /******************************address transform register end**************************/ +/******************************dp register start******************************************/ +#define X100_DP_LINK_BW_SET(pipe) (X100_DP_BASE(pipe) + 0x0000) +#define X100_DP_LANE_COUNT_SET(pipe) (X100_DP_BASE(pipe) + 0x0004) +#define X100_DP_ENHANCED_FRAME_EN(pipe) (X100_DP_BASE(pipe) + 0x0008) + #define ENHANCED_FRAME_ENABLE 0x1 + #define ENHANCED_FRAME_DISABLE 0x0 +#define X100_DP_TRAINING_PATTERN_SET(pipe) (X100_DP_BASE(pipe) + 0x000c) + #define TRAINING_OFF 0x0 + #define TRAINING_PATTERN_1 0x1 + #define TRAINING_PATTERN_2 0x2 + #define TRAINING_PATTERN_3 0x3 + #define TRAINING_PATTERN_4 0x4 +#define X100_DP_LINK_QUAL_PATTERN_SET(pipe) (X100_DP_BASE(pipe) + 0x0010) + #define TEST_PATTERN_NONE 0x0 + #define TEST_PATTERN_D10_2 0x1 + #define TEST_PATTERN_SYMBOL_ERROR 0x2 + #define TEST_PATTERN_PRBS7 0x3 + #define TEST_PATTERN_80BIT_CUSTOM 0x4 + #define TEST_PATTERN_CP2520_1 0x5 + #define TEST_PATTERN_CP2520_2 0x6 + #define TEST_PATTERN_CP2520_3 0x7 + #define TEST_PATTERN_LANE_SHIFT 8 + +#define X100_DP_SCRAMBLING_DISABLE(pipe) (X100_DP_BASE(pipe) + 0x0014) + #define SCRAMBLING_ENABLE 0x0 + #define SCRAMBLING_DISABLE 0x1 +#define X100_DP_DOWNSPREAD_CTRL(pipe) (X100_DP_BASE(pipe) + 0x0018) +#define X100_DP_ALT_SCRAMBLER_RESET(pipe) (X100_DP_BASE(pipe) + 0x001c) +#define X100_DP_HBR2_SCRAMBLER_RESET(pipe) (X100_DP_BASE(pipe) + 0x0020) +#define X100_DP_DISPLAYPORT_VERSION(pipe) (X100_DP_BASE(pipe) + 0x0024) +#define X100_DP_CUSTOM_80BIT_PATTERN_0(pipe) (X100_DP_BASE(pipe) + 0x0030) +#define X100_DP_CUSTOM_80BIT_PATTERN_1(pipe) (X100_DP_BASE(pipe) + 0x0034) +#define X100_DP_CUSTOM_80BIT_PATTERN_2(pipe) (X100_DP_BASE(pipe) + 0x0038) +#define X100_DP_TRANSMITTER_OUTPUT_ENABLE(pipe) (X100_DP_BASE(pipe) + 0x0080) + #define TRANSMITTER_OUTPUT_ENABLE BIT(0) + #define TRANSMITTER_OUTPUT_DISABLE 0 +#define X100_DP_VIDEO_STREAM_ENABLE(pipe) (X100_DP_BASE(pipe) + 0x0084) + #define SST_MST_SOURCE_0_ENABLE BIT(0) + #define SST_MST_SOURCE_0_ENABLE_MASK 0x1 + #define SST_MST_SOURCE_0_DISABLE 0 +#define X100_DP_SECONDARY_STREAM_ENABLE(pipe) (X100_DP_BASE(pipe) + 0x0088) + #define SECONDARY_STREAM_ENABLE 0x1 + #define SECONDARY_STREAM_DISABLE 0x0 +#define X100_DP_SEC_DATA_WINDOW(pipe) (X100_DP_BASE(pipe) + 0x008C) +#define X100_DP_SOFT_RESET(pipe) (X100_DP_BASE(pipe) + 0x0090) + #define LINK_SOFT_RESET (0x1 << 0) + #define VIDEO_SOFT_RESET (0x1 << 1) +#define X100_INPUT_SOURCE_ENABLE(pipe) (X100_DP_BASE(pipe) + 0x0094) + #define VIRTUAL_SOURCE_0_ENABLE BIT(0) + #define VIRTUAL_SOURCE_0_ENABLE_MASK 0x1 +#define X100_DP_FORCE_SCRAMBLER_RESET(pipe) (X100_DP_BASE(pipe) + 0x00C0) + #define SCRAMBLER_RESET BIT(0) +#define X100_DP_SOURCE_CONTROL_STATUS(pipe) (X100_DP_BASE(pipe) + 0x00C4) +#define X100_DP_DATA_CONTROL(pipe) (X100_DP_BASE(pipe) + 0x00C8) +#define X100_DP_CORE_CAPABILITY(pipe) (X100_DP_BASE(pipe) + 0x00F8) +#define X100_DP_CORE_ID(pipe) (X100_DP_BASE(pipe) + 0x00FC) +#define X100_DP_AUX_COMMAND(pipe) (X100_DP_BASE(pipe) + 0x0100) + #define BYTE_COUNT_MASK 0xf + #define COMMAND_SHIFT 8 + #define COMMAND_MASK 0xf + #define ADDRESS_ONLY (1<<12) +#define X100_DP_AUX_WRITE_FIFO(pipe) (X100_DP_BASE(pipe) + 0x0104) +#define X100_DP_AUX_ADDRESS(pipe) (X100_DP_BASE(pipe) + 0x0108) +#define X100_DP_AUX_CLK_DIVIDER(pipe) (X100_DP_BASE(pipe) + 0x010C) + #define AUX_CLK_DIVIDER 48 +#define X100_DP_SINK_HPD_STATE(pipe) (X100_DP_BASE(pipe) + 0x0128) + #define HPD_CONNECT 0x1 + #define HPD_DISCONNECT 0x0 +#define X100_DP_INTERRUPT_RAW_STATUS(pipe) (X100_DP_BASE(pipe) + 0x0130) + #define REPLY_TIMEOUT (1<<3) + #define X100_DP_STATUS_REQUEST_IN_PROGRESS (1<<1) + #define HPD_STATE (0<<1) +#define X100_DP_AUX_REPLY_DATA(pipe) (X100_DP_BASE(pipe) + 0x0134) +#define X100_DP_AUX_REPLY_CODE(pipe) (X100_DP_BASE(pipe) + 0x0138) + #define AUX_NATIVE_ACK (0x0<<0) + #define AUX_NATIVE_NACK (0x1<<0) + #define AUX_NATIVE_DEFER (0x2<<0) + #define AUX_NATIVE_MASK (0x3 << 0) + #define AUX_I2C_ACK (0x0<<2) + #define AUX_I2C_NACK (0x1<<2) + #define AUX_I2C_DEFER (0x2<<2) + #define AUX_I2C_MASK (0x3 << 2) +#define X100_DP_INTERRUPT_STATUS(pipe) (X100_DP_BASE(pipe) + 0x0140) + #define HPD_IRQ (1<<1) + #define HPD_EVENT (1<<0) +#define X100_DP_INTERRUPT_MASK(pipe) (X100_DP_BASE(pipe) + 0x0144) + #define HPD_IRQ_MASK (1<<1) + #define HPD_EVENT_MASK (1<<0) + #define HPD_OTHER_MASK 0x3c +#define X100_DP_AUX_REPLY_DATA_COUNT(pipe) (X100_DP_BASE(pipe) + 0x0148) +#define X100_DP_AUX_STATUS(pipe) (X100_DP_BASE(pipe) + 0x014C) + #define REPLY_RECEIVED 0x1 + #define REPLY_IN_PROGRESS 0x2 + #define REQUEST_IN_PROGRESS 0x4 + #define REPLY_ERROR 0x8 +#define X100_DP_AUX_TIMER(pipe) (X100_DP_BASE(pipe) + 0x0158) +#define X100_DP_MAIN_LINK_HTOTAL(pipe) (X100_DP_BASE(pipe) + 0x0180) +#define X100_DP_MAIN_LINK_VTOTAL(pipe) (X100_DP_BASE(pipe) + 0x0184) +#define X100_DP_MAIN_LINK_POLARITY(pipe) (X100_DP_BASE(pipe) + 0x0188) + #define VSYNC_POLARITY_LOW BIT(1) + #define HSYNC_POLARITY_LOW BIT(0) +#define X100_DP_MAIN_LINK_HSWIDTH(pipe) (X100_DP_BASE(pipe) + 0x018C) +#define X100_DP_MAIN_LINK_VSWIDTH(pipe) (X100_DP_BASE(pipe) + 0x0190) +#define X100_DP_MAIN_LINK_HRES(pipe) (X100_DP_BASE(pipe) + 0x0194) +#define X100_DP_MAIN_LINK_VRES(pipe) (X100_DP_BASE(pipe) + 0x0198) +#define X100_DP_MAIN_LINK_HSTART(pipe) (X100_DP_BASE(pipe) + 0x019C) +#define X100_DP_MAIN_LINK_VSTART(pipe) (X100_DP_BASE(pipe) + 0x01A0) +#define X100_DP_MAIN_LINK_MISC0(pipe) (X100_DP_BASE(pipe) + 0x01A4) + #define MISC0_SYNCHRONOUS_CLOCK BIT(0) + #define MISC0_BIT_DEPTH_OFFSET 5 + #define MISC0_BIT_DEPTH_6BIT 0x0 + #define MISC0_BIT_DEPTH_8BIT 0x1 + #define MISC0_BIT_DEPTH_10BIT 0x2 + #define MISC0_COMPONENT_FORMAT_SHIFT 1 + #define MISC0_COMPONENT_FORMAT_RGB 0x0 + +#define X100_DP_MAIN_LINK_MISC1(pipe) (X100_DP_BASE(pipe) + 0x01A8) +#define X100_DP_M_VID(pipe) (X100_DP_BASE(pipe) + 0x01AC) +#define X100_DP_TRANSFER_UNIT_SIZE(pipe) (X100_DP_BASE(pipe) + 0x01B0) +#define X100_DP_N_VID(pipe) (X100_DP_BASE(pipe) + 0x01B4) +#define X100_DP_USER_PIXEL_WIDTH(pipe) (X100_DP_BASE(pipe) + 0x01B8) +#define X100_DP_DATA_COUNT(pipe) (X100_DP_BASE(pipe) + 0x01BC) +#define X100_DP_INTERLACED(pipe) (X100_DP_BASE(pipe) + 0x01C0) +#define X100_DP_USER_SYNC_POLARITY(pipe) (X100_DP_BASE(pipe) + 0x01C4) + #define USER_ODDEVEN_POLARITY_HIGH BIT(3) + #define USER_DATA_ENABLE_POLARITY_HIGH BIT(2) + #define USER_VSYNC_POLARITY_HIGH BIT(1) + #define USER_HSYNC_POLARITY_HIGH BIT(0) +#define X100_DP_USER_CONTROL(pipe) (X100_DP_BASE(pipe) + 0x01C8) +#define X100_EDP_CRC_ENABLE(pipe) (X100_DP_BASE(pipe) + 0x01D0) +#define X100_EDP_CRC_RED(pipe) (X100_DP_BASE(pipe) + 0x01D4) +#define X100_EDP_CRC_GREEN(pipe) (X100_DP_BASE(pipe) + 0x01D8) +#define X100_EDP_CRC_BLUE(pipe) (X100_DP_BASE(pipe) + 0x01DC) + +#define X100_DP_SEC_AUDIO_ENABLE(pipe) (X100_DP_BASE(pipe) + 0x0300) + #define SEC_AUDIO_ENABLE 1 + #define CHANNEL_MUTE_ENABLE BIT(1) + +#define X100_DP_SEC_INPUT_SELECT(pipe) (X100_DP_BASE(pipe) + 0x0304) + #define INPUT_SELECT_I2S 0x0 +#define X100_DP_SEC_CHANNEL_COUNT(pipe) (X100_DP_BASE(pipe) + 0x0308) + #define CHANNEL_2 0x2 + #define CHANNEL_2_LFE 0x3 + #define CHANNEL_5_1 0x6 + #define CHANNEL_7_1 0x7 + #define CHANNEL_MASK 0xf +#define X100_DP_SEC_DIRECT_CLKDIV(pipe) (X100_DP_BASE(pipe) + 0x030c) + #define APB_CLOCK 48000000 +#define X100_DP_SEC_MAUD(pipe) (X100_DP_BASE(pipe) + 0x0318) +#define X100_DP_SEC_NAUD(pipe) (X100_DP_BASE(pipe) + 0x031c) +#define X100_DP_SEC_CLOCK_MODE(pipe) (X100_DP_BASE(pipe) + 0x0320) + #define CLOCK_MODE_SYNC 0x1 +#define X100_DP_SEC_CS_SOURCE_FORMAT(pipe) (X100_DP_BASE(pipe) + 0x0340) + #define CS_SOURCE_FORMAT_DEFAULT 0x0 +#define X100_DP_SEC_CS_CATEGORY_CODE(pipe) (X100_DP_BASE(pipe) + 0x0344) +#define X100_DP_SEC_CS_LENGTH_ORIG_FREQ(pipe) (X100_DP_BASE(pipe) + 0x0348) + #define ORIG_FREQ_32000 0xc + #define ORIG_FREQ_44100 0xf + #define ORIG_FREQ_48000 0xd + #define ORIG_FREQ_88200 0x7 + #define ORIG_FREQ_96000 0x5 + #define ORIG_FREQ_176400 0x3 + #define ORIG_FREQ_192000 0x1 + #define ORIG_FREQ_MASK 0xf + #define ORIG_FREQ_SHIFT 0 + #define WORD_LENGTH_16 0x4 + #define WORD_LENGTH_18 0x2 + #define WORD_LENGTH_20 0xc + #define WORD_LENGTH_24 0xd + #define WORD_LENGTH_MASK 0xf + #define WORD_LENGTH_SHIFT 4 +#define X100_DP_SEC_CS_FREQ_CLOCK_ACCURACY(pipe) (X100_DP_BASE(pipe) + 0x034c) // not used + #define SAMPLING_FREQ_32000 0xc + #define SAMPLING_FREQ_44100 0x0 + #define SAMPLING_FREQ_48000 0x4 + #define SAMPLING_FREQ_88200 0x1 + #define SAMPLING_FREQ_96000 0x5 + #define SAMPLING_FREQ_176400 0x3 + #define SAMPLING_FREQ_192000 0x7 + #define SAMPLING_FREQ_MASK 0xf + #define SAMPLING_FREQ_SHIFT 4 + +#define X100_DP_SEC_CHANNEL_MAP(pipe) (X100_DP_BASE(pipe) + 0x035C) + #define CHANNEL_MAP_DEFAULT 0x87654321 +/******************************dp register end********************************************/ + /******************************phy register start******************************************/ /* self define */ +#define X100_PHY_ACCESS_ADDRESS(pipe) (X100_PHY_BASE(pipe) + 0x0000) +#define X100_PHY_WRITE_DATA(pipe) (X100_PHY_BASE(pipe) + 0x0004) +#define X100_PHY_READ_DATA(pipe) (X100_PHY_BASE(pipe) + 0x0008) +#define X100_PHY_ACCESS_CTRL(pipe) (X100_PHY_BASE(pipe) + 0x000c) + #define ACCESS_WRITE (1<<0) + #define ACCESS_READ (1<<1) #define X100_PHY0_PIPE_RESET 0x40104 #define RESET 0x0 #define RESET_DEASSERT 0x1 -- Gitee From e1d7886b0dbfccd3c198e65b52e61e7139e77390 Mon Sep 17 00:00:00 2001 From: zhangjianwei Date: Mon, 8 Aug 2022 15:14:37 +0800 Subject: [PATCH 3/5] support uefi boot --- uefi/EFI/BOOT/bootaa64.efi | Bin 0 -> 716800 bytes uefi/EFI/BOOT/grub.cfg | 10 ++++++++++ uefi/initramfs.img | Bin 0 -> 6379520 bytes 3 files changed, 10 insertions(+) create mode 100644 uefi/EFI/BOOT/bootaa64.efi create mode 100755 uefi/EFI/BOOT/grub.cfg create mode 100644 uefi/initramfs.img diff --git a/uefi/EFI/BOOT/bootaa64.efi b/uefi/EFI/BOOT/bootaa64.efi new file mode 100644 index 0000000000000000000000000000000000000000..1a7f4e5294da5ee58b6077d5e4736a7b1494d4f3 GIT binary patch literal 716800 zcmeFadwi7Tng4%3GXq3Hg&ZIRYGwi|OHJ@jEV3Y8Zu@pss3#D|6l%>y>3fB&Tg zw_I}0IcHDxtG<4YF$>bP{>?|~g4W(L=Y~dyxXbVxd$#v@tggLF_y3vq!K=5(@8|PM zTK=0qmkwtSYWT}(_Ei0UIphEA?IlZZS+-O~GW-O8ka8LPMU6>ZvY=sUgZlt{p3_B; zx)0rdHl?BS68C28S>z*6keKF(Zn=Ph6OQWFo=W`LX0oIl%`c+Twymmg5z0}6aV zfe$F~0R=vwzy}ogfC3*--~$SLK!Fb^@c%;!#4U59ZH%+Sm>VlearC$^-FxF&51Fw& z_>TM)`)R2$2d5U>cefZ*S7FRzV>&lx!u5M&5p#nXYdgn>h9*Re?NFqxr7Bq)zRJ4} z* zN88TH6+5;B`P9~M-Zi$92-(gbhlVyJM%vDsO3${P-;*}qW4e^L`E26^b(pob^QE&* z;iV%yJhKPM!QaUhPj&4No3!dZ`ngcoQLcI1cC0DfRZKd8UpQ6i9G-7=9SK|MY`W5^ zyee56`d+2eGsSeILiTKGY%}*;H<-yLfBN?;ozgNcFQFD{unbI1GJe4*}YNA%zVf?e)*TV&W6XAAD&;?b@X%bu47GA zT_;yO?b1Q8S&vsbN$U7)iRm~>KRgT!%@0_MZOha(r>wadUs~K)>f~*D@)EO8?l{(v~R}PPGdAw-d~Zdjjy!V zkLS7S2|rco><^E3Y4+@QD?j|qxwf+^-#DV(tRBU!(ZwH@tF8f{O2r+bUkFujQWUKoQjA($=VXX z|HgAI-V~3Tj<_?Cv3kUs^Pz9UH;oU(v0iLx1HBSALIsGVlgc#G3CF;L@ zO;-Dac~y?>($nfX7P8v=!xP;8Rp0*fJY?NS(^Ehh$-F;D^Az;lKhAVGk`ocr{RY>^ zxi(zuo$8F!#(z`a+dQ|!rrW>^S!22_WA|2=l0}l^jMLsyc*#aS7-Un!R8RIW_Ik4L zz#ZH_@$OLfQuM1kr*^k{TlTok=KSk-xn?{Z?Tenp6Wi}8OsG7enY2B>f5*lK4p@b5)VYkZn> zlC|iR+3(lJX8K8Ggx`NiCn7&GW0JL^`)0;q@NOsXqVEmONbpYMDB%0kX+huleUYHv zXvpKMPe60QnF6L7uKT%Gdz+z22KlSJf>%8FAkVVEfIYGJZzOl&yPb?tl?Rs|m*;c( zbO~jbtTOHUfvF$493uZ!AEM8Ao@1nc*ja8S{yvI)7rrmSJJJK(ckd0G83Q~M-@ZH9bi4}h>}Oo6o_#zY2p6X{zO#(~ zl&RQzjB*p<{U_-IuN-+P`zB>Io|i#)c14o^=o zZyoaJ{S)k@Q2JHya&532eKpc}`re~MnDHJ;hu6+=Z2;@M-$6aFuF7uGl#@P3Yb?}zuTK3-?)vjFdn!+8JLwV@@0)ZZpL*R_FTQ|CIl_6hHu z%J+~xE|87GH&mj9%!@9y^YzTU*6hi+Q=@S*WntgW5E zZx+9a6aMqi6`2*;;jBjM(>-{Udcdse?GS_Xs-<0v^TmyY^N{@T8(DRdIo1n*i zI~Kpgm3g9V>?~84WUOu|lgxnL_$?{gEyZt1K<85Y7Aw!x9VXA8*hD$9Dmk9_*t!p$ z9NKUk-7mXG{PzGhz+q%(0vY*l*hAtao4$UKYeR~yio8Uo^8P5g3!AE3_a8& zK))z(1$@y!pJ0=?JZo{ihIVD+Wy6uQQJ>>iO!4(h6FQ~Yga*YwF?39n`xeqbo=LaK zKB%NWe+Es%Du-T)n{caiuxx|pNo6Z-Ck^zAXevG2Bzy6F?IBNoOQ$xHM|2QvMBf09 z&Cng1%r^4h@l!Ph^LQrxusER8$)WBe(6R#>iYC|bJc*3W9m5v5S9S8FYvq@e?WaTbX}w?i#;HEE};($ zdV5J(W~1Pnm}fegLuL?Ob$Mq7k-koFueuaZUBB6fL^-4 zhkR=P0>A9zewh~f#B8x0JL=a-nI8bx8^gF1rZ<2Kw$jXpc=j9&q`w`*a`Ke@CVtP- zUXjZmMZUNLVftnj?_+P;&g@CZ5$at!((WD4yN-3{`Hr{mS+_J)*xDQ_9L$^? zs@r}DpPBr3T?k!}Kcd@5D2L5E^L60a<-_p{KUFNjAWtie)rzk>SOv`#KdCCTXBiV7 zt-gPUa=CV%;Qst**LLgqCaL&{es}F|H(r3v@+^8vu*=rY9hU)ahCK8`kf&!ovarDA zLDMGqt%a5Dcs@bQ!anKYeAhbDK8*K!@jV?UhPt*MgHW8p&8iD&&%I9 z2JEHL@QZzC;H@~$nK2xVo5R>Eg$IaVP~Wjh*mn4(W#-BktFIk9KR(9fC!v+}RV4a! ziC{6&9VKgsVHkdELS7tc;1DsN5yZIOtVFg}Iv+2E{^V`oncjtud(YRv$CvVM6>Y78 z50uB8sB}*9Y&GvB8=6w9Hui_ZyPHfw>l5H~xheYk60Ri|mV~UY_oK6ljBF&?ZsXId zC>Pj2$0o^EdwL7+3+MUr_}4xi2L1G)pGNl3I)BA}DjD>3cpTyJSSybYof*q2ku;$BmQyI5b2K zOhE>T$A{@cd`!CV-6eB~Q|xkW*=6D%fiF_+`{JkKYPscB1Gi++s&nIA z)``TPSYCM3RmkYgj8EAw4d5*O@Lt}KvdH4K=!W`vk%I?lt3DDr zXwdQXg^_~^WUOc-SglK|9Ybs@SAPdEFQ9Gv+Q{>z7n_4?-y5nUT|AFeG9#8(?L==( zETjFg(Jj$dL)^Ch<;cM(b=PMj2QB2px6kwAUKdw8Kc#Qg=Bwe!>9T@qe1=Ks@R({R zHab~rPpEcYztTAOo^Lu{zY^bg;jH7S^YOVtrgaPD`YDqo|896taw7NsZSEfkPuiWu z?;vd4VdMX{T!Nm%UPxYI9CLQEZ?_C|K$)~&`ZKe;uJ@mHFO4pPRYpEbGKX(!L0;1#WQ8pir~O&Qg7z3`ec zx9h;dSzGacCp>T=apGSsT_-w>B7NN$7k<%3ba_oaRDQB{@)~nobV*(a9?&j46<&c> zSNU|xhDW8p0-T~5FnM@Pt9HKN;xWp_<4)d-mJQHy0AAHNSslPX131ZB20t|s!)#8* z2RFkj@@jl7bZwf=bY)|44@mw(^kryKnAdw#mjPk-&F$NV(< z0#xGfGCytb)BF6?GMkY^R%`#4>>oUJF~h zAC;^{#&Mm#g=?B=+m8;9jZD@q`C@2(cn#x-XQ8!TYVNOg`iK#<5I2t%j?ABK3d+}q zutykk!TiBUbdX{_TYdQU`sr`{RJ@j*;K5jS;@b7G6Q5eYkFgq$gy+ZRO`4BBEf3*q zhqF)IiM@7q%!JAwU<9iJX$db#rzjMP}vpX`d%}# zwbP6o3=Iv{eGNN%=nKdo@((SD9DIZHBKYWa#^*(93z`lgHb8^KtSDvS_5e<1m{~+I;{CPVz#;WHy zQR-6plPk7wG4tj)4^B0Osy8TqX*qBC8|DW$hGWWMughPd&h=*O$Ah_>>&BVF;99g% zJ;UXEJ~EMH-`}8TT^j|PRW{#;xle?E!;NnixG_lS?!bNtcw^~%rmOi}5BK4EJ$SIC zY^OXZr^LNDOarp~*$=Vt4TW82}bx$JX#U8g&PZXa4wA;*EE?rOK zQFMko_VqsCUF%IR-tlsQDeQQsdCwsIlc<$llB`wVz2)<7Fb^T0Suj_uG+h-dtuB{d zei}m7z>EJhK>u6Bqw0b8tUxzTt8w{5ZCg{lSi?5j4CSdGJ)RWZ3&a)tP4iRL$!(i8 zi?{>h<%g6}zJ&iiu**EZFyzL*-$w5r4M)noI2pD89;X6LQiQ(~a$}b4yTwv!2|E1%Bk^XL=FGRnB{WY4iabseh?N`WZ=~Fh2>}+CW z6K=d%)c=!llUBSlW9-Wlz>`$mpZgd%Sj8R=`LyTa;`8ub=ObT<5xvT90x>ciW4GUe zaXkg!QRBI6)?7zvv#}4wX3T|uTCiWmWAN}*bDe7ZDe;HKxp-}(PlpHn^nVZY!M3iT zoEuN7o9oOxqpbMm;o2KoF4{Jj4rubwHc4hSA5@-X*Z9;UIfQX2o9HN~%VB9<^S?kh_e)RPP^?5S)s9&G;xQE|8 z;3vLS{)$2~D@DvGMfpU?%vOIM_ThfVPX(`R8P#V!=hgA|XVB;m)R8-8)Gw#--zL6q z)7Tf^hi5Qv&wXf?ms(eOsj(}aF2x-+)~9K#Pq@sQW{_K6*>JUAZk}K6TEE=&mCh#0 z4d<)#^WEs@yV=i|sC3$M@@agB7x;Pc6m?bNMHjOIK`A{-q&yI~6+- zd!SqWu0D-Ny>Xs`rz9Ug%KIp^KMz~Ui=~)0jdR1ggdf3233PLQq!^a`{R4>F&oVt5NoK|D;RYB9T&eM_;7ss+gxit_D9qs88RKd z3FLEt-+{}3fWO=rFmYjbUb+a_#`DX5E!;J9k#SPzSHUC9i-6w}d`4{9#rT?ahWw}S zd&bqztAV)*Uhlw96%3iOD&~X>%KP`3_5|?SLqjv7l#{F!z3=e*^|=`yJXIYvdlvl5 z1AY4=sY8!8GoHpoHtoB{{$XTnc+={=ow>fqP&S~EM?2&qexv5jMVkiZAQXdh znBP$RZ3XkwtKrjxJC`@1RddeS@Wk|I!8-w8YdwL=s1A*b1?btQp-&V1Mc(rMGLxUV z@UyK+c#Hn5JqvjyTJAuWN>(A~dPILy==np2H3_HDaX7B$tv`~D--LbM!2AIA^S`FR zdk5=kOmzFkdibjh|Em$WvdBu}-PdftaQjBqXQZpK8Ovy&7-os&jNr*2V}(ykXae!q ziRm&QC*>DT!cBNJftTdp7GT~5KkE8Xt_Al6@aRtPJ5pw^D**@j4i>Q0hfQw}^FNz` z3)=3!*@VB|Xv)6cabc*nhxmR^Xi{s>g_(`1vP?-cb-fEM5{yebZ1--XjRtVor)Sj1 zJbC$4;5XFUa*^glJovM$1-WOe2ZwcOvUUOTCtU2!wTC8qsfo^US|^&04)Q4$^cC9D zyn*m;2p76|?=SS;3AaH0?54N$_n&^6^;5}p9((IB(P0JG($2y4efUwW8Z#;SzM+i% zK@Q$bTN*!09y0Bk&#;h5>f6BY&gD~!I%7A_p^s!!8z(fGaRx63@<;E>md$Z0dB2%9 zreGgeNS@IDRpeLRvaDf!RXDA+89_P0+dtK_rM&q7gI@(LsW1GrPpe%%t$seov7wdt zIz=0-)o`dE-RJ4UCTs=45qpmKM#MM?%|X&<3FsaJSLF-v`_Lp)SV*5u;yJ#aqnM5G zee=)I^%T56cGsh}6rY0G)#Caz>3U_Bmn)Y96Eh_Ia%&%+J%R2+y#>|E8 zc5e#4eNJ&Bo++*;_+wLu`Hh+D;_wRo`vKmIo|+5&M|b@F>H4^LKce{%Pc96wu0{6# z$Kd5{pa1wQwOg+73_j|g1nb(=-Xwk{Cu*F}hM5nd@AOQ1-WogCT~oV?cAo>k0eIQ- z!;4&dBE?#qL|DEsaRh9o{^P{xu_2Bj=d{lBE7(e3#-`d1Ed=*u*@yId#&5&Q^XmUR z^=~ErbJ!B1r9BE>v`gDd&G^AM^qK~)pBd%N(+9GoqS!1xix__(zXY=tftJKuK0|$v zP~W@%7rqL{^_X8%n!hXj^jbe{^wU6Jh}Uk6+Ut&?2Q+S`PmQ-)L!;Vb#45cwMmQZi z@rga+P4OeM7cLJ%Z+8wigse)+K8o(x7_zc`((%j$ZY(tggZF`_F$KFde(xLM`SGjh zBWoG`gxrpS(`a}wyw>ij_w&ceuXxo%$albh+*<189Zz2T?-^H->bcfOwXo)M#7cLJ zKORncX=sg?Y907o`0IKM`=#B_*$HUe&G5SMeeMi}jjSc8T>AwW)Und3mub$+# z*H|*CRXT6QDAWEZ^WLw7CJp8>&e!;QeKq{PBR+1vc-)e`MmYxAMw+d{q@;>{{5NyEEpGVYzkT>koVN->XWE! zQ`s2cA5%OH8p_U49A?KKLPwvZ0sdh5O<=U z=%Z*9{!WjR;;;I4EPTGa0JvCFP|KY3^7WyDR>i=~N^4SLN3>Pz3X^v;4{M4RUHSOEtl>gW&5RXRj((VZ4F;K8mbbG+ae&G-;Yo)1lF+h-xRF!u7t<;yq#MwEi!VlQC6EWU${IKR z*k)33#-`D_RL@SEr8#?iJ><6RwDHibC{xk?0=OK57rW6Hds)}m0&X%Gxy0IY= zm)XwGk<$tC{RgRddKYO_Jmkk$a`(lQWwf?FoZbiAN#w!$3FrTZM|>3={~jD&c-GXovhSzpLhV}sH`ng6)_O2p01OE~-et9U zd7mOLHkX(0K`-AV^2rWsK3_I$LHb7eix|oTi}nq2Fm`_p#&FgBOVh$SY@x+wm#@VH&JvN!& zTmP_b_F;akE14ZGY3;`s{S2|`#AWym&?}SY(ZM{TIWS}ddV96E4zad{_LJBrlGP8N z%UsuartVw3`&Ou^)zu4OvuGe{>Qby>DP=x6!hCY(L+}j!T|A1tJizVA6lmqw|2^nX ze4w)H0c@2L;HeK658j8pYr^(y`?%H=6sK{7-2Euvj}`U z5uZ4=GBio^Ub53w|AV0j_S|nuV!U5@YpAPV>!`kBdb3|azeB6kABXDtS*wyH?(nSw zd-er(wDqBLP5!3Ph}NEaBJ=CVn#Eg2GhfUYh;6}7o7Xt2k#$IwYh$fZ)*-c=XD_!S zW=4#)v;Ej)l9yUDxDUO`v-TC+ zaeTn%C)sz_&%JT~p)sZdSz2gghp3H#3Ty%583PsQo^y#{qiYne6y2Z47HpwkE%>B` zKAFk;eaH&MKCmlmU*mo99M(ijHu(IWd~ImPAoCZ(MRjD(sN+S}NC$P4QOAqCCr&j} zby&^K{JMtsL3@t~CSa}MStn_APk}PU?{e^X$L({P2S2Xk%e03OYQ#o*Q9xpU@H=Q56hdW)ixK}G2VI$HJ% zZMnAbh2%NShsc>(BCj zI)18e>$>*otMqFyZ>M$cMetPM2c&*D*I9qI2e-;U#M=2pgn16%2Y870R`*yX=P|z@-}BS)za=++<$kLE<#sr3T9^ed>R%5GxqhPHFQfk%uQTN% zDwbIq4ZnD*Ur+f-!0yfmVRJt2+MC3Kin)H_64@}wz!|nvVNV}i=-P2!Y{4R~ao0UR z+GJ*-6t!*;e%N0sqvv{ttv@`EnQLcje(3-(U3v&?_zTVr$n_O+;BsS@6Fj)u zUbyG|;W`I-XRSCLF8rug!Ewj`5H`iOMXxjA(|r2tLmTrCG;hj(1Ko6Xat&+y%=7j5 z_l&PP`8`^1xO1w#tQs5h7Lzx6+lliQuSK`BPHwarYucNpMMgK7vqv|q%%30o!sN@W zOzEtK>TCC~*6%XwIj%3R+0#^L@^#;&XW7zO;kQa>sqOyo$h7I!_kkJ-q^M58Jc2x8$L;jebZ1wT0j~n9Zh;QDRUpf0rqIS?FxJLt(V)gZ@J8# zy=NiIhv&h?dm02vfZnb88e1CXkIrC#Pl&2zM4sIuZ zr#WlU?X;URX16&}xn8`uBX3fEy$LVgPQ9;;DZSJ1{^_wM|03Yl_}ejNa(;a>ytsw; zPqU9=*Z!^j@Vxj>DR$ZOk4!boFi;A=_b#>%&jWa{ z)$5e!-m6T$;9`wQ-GT6k^w(_rhG(b3+j-{s*RTELo@dSI^4HDT<)h}9WsgP7@>jt} zv>HWw8S0a~3;FUcUKeNG7;(r;v0qZe?wY*0W?~2jv)IDk9KUzI$SHYpYMvMURBrOT zc#!smYrU4_o%Ro8FCtbhI|O}jn%){dH{%)CzL1<2e@Q1PMr?l0xdBtGc6W{s*ouJ- z{Zc;SbDI09*t;2i3ihIIhnAAdTV&H*gv=&YeOecx{QJ2M>hg77x#~!ww^eQ<`F{gV zRMwoR+LH|#?d5j)^c23@#p>pZ$rniWrK&J&`?5;JycJKYR1K;*#sP`|^?I&Fp z`2xM~(vEt~tSV;{Iu0J|iPA@&&hhrqYd%c)^gqFRBg$eY_ay~CH0=zT#o3P%H)L%z zbM}jfXM6Lj;AID}@J{?L+h1#}HU6&m^@By*v1_aFNAO9&BSu{QE&9-eO5ONhfWPv^ zDIdF;ytZj4o_Jb4T4N?1@8WgDvq=M3g|A6gp|_y##n1`8Cs>37eeSMn4BAmW!e90D zpHYuVRmryM5}Z@NARoe`gYd~M9|2aCU&DU?HMF^gHrL>LslALF3zHn%zQy`^m3x-S zAM(7|XRt;xx4t#Q_Q5H=E_*D9|BBC#i{Ih(FP-bzEtSF--|E-MLD_BK*k)|4g+F~B zBUr2I+9>e7cvEv-+wjes?~X20x&8R!nkz8X)sAAr1JEX*;|TB)|E9o8_L8n&r_E1e zmj?T;&y2-c^Qv8(ANO&V>=C|!-5k*I;bFW_oqr;n)lM+JwTE7K2kQ}#(e9f0A?c{*BzV!4wI=GF$1;9wsJ&oX<#OrJJYc`^-m5LiMag*PaLWz*?W@?|U&j9Y&C`;#UnGuUfGq|c(A(p*2B70=lP}%%a2e+X zu?Gepa`s)=d7WFmSjsI68@E38H{Ymz><{0+bDJ6I`nIRl|Da$0hne@Je$}P-uL7gm z7aUb7FHb{xvi9%DBYF+fl6X!qCIb5H^!ad?pZ?KLkNN4y{{&P1X8LLFdMwdb<5TO4 zG!D<)_j%?zAdfethd6tMcnEtuyuI<*fhp{`3Supd*j&lH>bv`)Z9V&?chL_t-)0f3 zNND{Z>tz+MtsKk#+hwKmV@uDMP9A46vDT(&S*zr)%56c96VvQIK%7nU%$%Fkt@9y{ zvwk@V-CML5SbN`To4Jv?QreEvZUgqp@$qKXD*9UW?}CsKz6>#z@W1xN|n~yM)hgZ@fEH zr*=-G4Q+}xhIdQKX14Z$d*<1=8<))fN8EV_KUQ&(C_aP6+5znO%3sBu=)A+S1gtUca19vt$i|GBvHCgo(;>$&@kwX5>~+TO#ZN4LBg zcXV!5B5u#lu9jeF+az)T-80(`@@Q_8JX`%dSCL2YcIDs1TDV!< zYpkiW>xV~cQ^Yh$N zj_$b7p50VNOg*S~a+Sl}Pse9Z)zfRDRSq$Cx9@IShaXOvTPU;Oe9AI^I9&JCDu)H{ z9rMVWz^}iJ`u|+sEWUq&xx_5KKXbp!v-tjr5>kBs#3lCZEWW?sHq)w{zopLUsxwWU zl)07sw~#l80|@qP6Xj}2KS%m0()pyHB)x{ThO~+_PI@Kj)uhu&uOKBBk+_^x@LWbJ zcrGCoJmsW<=R#5+9+&nPS2@MNbZ$T+(eIm_sd6c6Y?(tE$GcJfy9LmpsfPFPxtRfN z>6f#3KZKlj`E?chDS81A6>gVx|3*YaFWFqxTw*kt|bFy0|>2|peCGA;2p(NCxN>1BRe;iolz`bj_il%Ll6>1X`( zQop_1{QG5o`c*&uuAip;^hrN`&QE{ir;*=zaDCKI@AT7lKYh$kpY+p=pVs;9@AdD` z`!BDan4boI;zj=bOh2vg(-nUD4L|+ge)l+Q_vVc<0&;jH$W~=Iod73v6%sV6uPUZ`Y2@ z!?p+O33-73Q{Yu65!O0oV zyn#|f?4{J7bC8`m4w&w`=|`LT!eK;?sr@08}e&I9h&-r}tUw*pZPlG**mtzxIv=i&DcA~@+KE+y=pR$%= zf5>zgor{ExDBk%3cJZ7r&&YdPoN23I15aW^kt8WNCW#p+w)4cB$oU>>=P!UK;63r1 z{2tZk@gXs=FJndUk)um1>jD+U?@|H?dnf1S_IEmKd$6X`ccYcq5C;Zh&JU|q9d77w%LWu1I? z@wL`o6Et<(H1vB|>G44;ia*addX4%O$53oo{jGdnEV&^4Hn6$%5Z4foNgum98@fJ7Jm&?@ z!BZOp#FwmavhRrE%g`wtRUag46O@szh9lb)TZ&DsaSE?BUHF$S?}wnJ+SR&d;bJ%g z;5pgI+#mMGO~A(`uaJYkfS%L*d+GHm+A8BmTx#88l-2zJGI?6R= zoc@q|RzUZ5_`ikgC6wQeP1nHnUAn%^?%f>n)>XQD`CU2U^@AX6l^;Y?FyI275v9m)mCj%Umb2lNkQP zrrly*CA1nlrpmN;D)wiNUrzV2US+P6hfFrC!`;JpY60#F))Y4SHjq1)j!e(x_fuyi zJ-g+Kuqm96U1ZJ^Z!(7-$SCI3k!8JquAF2|09ydpa^Nau9qIk_t=8#XPrX{_(y()` zV{e=5(waJ5dY3IF9?zJUY*D+6m2Q=(Fp~#Ul&=&_!td$1&U4i9xBk7^HP@L0Pisw@ zXm7BSm(n-FUovt%*P&6BUD7cRA@jeYceIn`-IsOEc+7Au9x~HecLNWJ|DL5j$(f)Z zTm13xT|W)%m_WV;{rnUE{qOu#^JI$QN>)k-?`tycS^CW4eu%XomqH`MneZ-d$LG4S z_1{QWDZhUQ3ZM_ZnyYh?;&I9abr7p^pWjQo z{21i}yiTrovMb}m7RXLF=Ef?WIy5U)yC7+QP$N~mD)05;k2 zE9r0UWv`T)34{BHhc4kfI%C%MHLt5Ym@%nCOV%Yh_hNjyImFps(PZC}vLt&vtn?6T zn-35(9b)gtJpZgO^FXEZ@4%5@-EQe*&b-9NOyED3nu&wEi2F34yUnFmYpJO|_zCV~ z=zhr;$#%&T10Q<%O9o}socU6Fcj8>+HuVjt9%yAjEBjKL`ich?3pMOF z%qkD>ZTR*W`wf@yQ@qx~zTj*V?yc5mxaW)z_H=5_M7V?p7ebFxam;=+?m%ECXI|W&b+Cnt7fv*RmJtb>x`rEHix#D zzjU^l$Y4CWdSer9X??~E!1Wv8?}YyM0dGgBVE63lg{?ZTReqr2#8Vh!aen#qZw6j9 z@GdmM_nMeC9hDA#bVtiCm+tvC_93_pCa6byYraeU0blLWI;tGKeM*k~hdU>%@jEq3 z&HP=NbrW-gpWf@If&CW9>rmeA71LfeF>Z81VYzUUoK8t*N+vV^pTZ7x z*N+e5>3!EDd59eRJ&<)1)8nV|O?WcAUTWVy(>}1%v=zu)DLZE?Jd1&eLg6F-PTNT)A+&@eN{qg}{f*>{0uM z7(4SaGX#^aONm9K-qF5&><;GQT{{q*6%TA!#ad>c{@8N8DbB|dPC>aW>$8{8uJn@1 zM~GoQ&-Jb>cl*EXmHL%hdY<-; zeVldnI{Q*SV~|(;uW=L5Bk+T^?6hdsn2-8+e=G-ojrZJn76aYEarQ&Zky5JdA?;zz zRxvGz&62mHB$^COTJloj_;hR~(aGZ%kN@L&q0#7vcHkPVxL1PlE?(I-!nk|0bSAKT zptt;Sa=JY7;uzA8tmE1cMURA#w~|Rk&=;EyomuFP#RR$|fy|8p!(Cj1TATc9_@{Ri z^isUye{n{x_5w(L{(<=$;pNKDoY;ZZ;q2v|+Q_1lmG^qeC6G%g+E+c->Yg?PkJ`bu zsWr)J&KEGN(2H}-Mr%$^_3Z#2;g%s4egBj*NFj zjd69d1x^WIU+=@-?!zAN#y3WDu8XS=JU;oZEbnM=HQLcvQ* zk4wJnVJvFyz4LNx5UrK-bzv|Eo}&Gt47`WWo^j!pzXXrKd!^{Eb10)ci4x;HLH~*u zHK%tJS*)`|MBh_%J#`+7Xd<2A#XPJua{wE%zMo(%@YWQ3O7zpWEX0QkUAR5pA&1x0 zms81CbiIWx@V>ba&>`s8;eJ65soi3)UQb7{7D08WeelEHEJ%;#I!0aAEKmQPDF=f3 zwa+UUFDlNAQJ4B=h^3!%&|66GnB#_Rn!% zE6wX#Ut_i+8=Rqf(<_}ga9(xSzYhHSD=@NxriOWX!%C89M4hU`L<{I|N&?da{_p}*j zZnK3tBhg5^;z)u)@P#5)Z_`R^)9QsyTf^1HZNG9%%x%8~pQW1i)kZMZ;JseMy<~IH zUas6Lq;2ti5vh28{X@i3u(fUYDfO7QK0cGKLBCY&MUJ#@B2JLF{2C9x%SZR37dIr> zzmd4?8uX~ogX}|TCtovoo*F0K`ndO_|8@R&{G6ZW!Y96$jH^fyOT_+=e2!uNh|c2C zgg+L_cxD~1L1yp{n8crM+c{&P&=lKxP!V!06d3d*UbclWY^!dTB ztItnV?peBbPS*>_Kk)=}Cyu$aO z{%bhbII@?v^NhCp6ero>!#@^9p}%09TgciN^2+<>i%C}^x75!Gr0$>V`n%G3;;r0%mAx5^cQ5V|oKbC+YkkjJ=py=2*W<-ELqqmUCC_F} z6!H90`Z>ur3y$C)3eQo%pg!02B-;Np->xv{B>VFGK1}49?i4Ue53}c`JE=OJV$A{j zX;r^TAal<~KPaw6--qx8RF@5&*s91e&I;F@CBp5Uu-J@8fqvG4)T?L{6O>>t%x!hFZ) zb=LHJ`daZvH-D~wg&iAwOGYq?=f4Mk+Q){v*=yY{pHTL~uVk-*pK4_|)o6MN*RF5#S?&QNGjF_4aRt2R!YrhUxl z9bhgSMf@8cEOhO>Vs9*;_N@-jN2n*>d=_#c$$GoE8PA&UQSJM~CFxJU^p!pJQ|-YX z+R4JtJ(qQCjD>ZU1Lr{+{C#M%9lw3Te=}cz{iL~~;&4$b^G3V(UO3-{ZFZGu*BTF< zNuhnsz_m_ii=AB2JIvqW0cc!nuz5UQ_x39b=OWtCSu*ZELG}f<(1)CDGHxdQ=*Dm0 zT|-|U4#QXMB~+{^OTH`qv(l4!-{<>c;MPx!Mt0vA_$w8{=3%@T*3heuwC37+nOF_) zFQkvu=Qq&b$`i<5ckP_ce)@=b>Zh*1u6Cfq>{8m0tk8YX&by)!a0#aGV82vSSBmk~ z!uO04I=?O``?&VN+IJ`5|MS2piT~9AjeGDTQ>*HaYu_N>VsX#b5Uf_#gYyKj1g+U_ zDZ@URYFd+J#yyWdX!AVy7oHwxv8U!BdPK4!#rWIG8601!{WwQ3nYOoq&0J(|*vhy* z3eKI>Q9LEHafrR|{YQuD)|c}hn&|wefbX+DpC0qmfKSzLk}={7#U_H7T#E1T2yV59 zBj|n~uyCvQUB30m9NC0P*~-9{_g9%Nhr za{^B=C-Wno+1o8pVIFY6O-#&Br}}Aei}(Bz|32vNOa1#AKMnMr`WT)n-$C3-<2OS} z$b~a>3I>^HIqh7n+%lS*6E8l2%ntAjV!nZXnMyo4sxwKTpZq_3XZL)oII^rVV(`sn z&vs6#K)CL(TKn-!(AqWAIN!HS6zuf8Ma3!pv(pZS9-l zR$HKRMIHNSL~r;~dx>L@P?p%pec&18SpvEnQnhvSa~>T7dpO2(>2X7BKsG`F`~>VC z-=_T7%_CL%MuN+0))rz2Kd5rbwXcvguzP=h!n335{C=Ew*lQ<<3p5RQ_qX^dcWq{? z>i2Ampxo(w;jJ4n#0CnW+j{y$@-FC)#!EaO6rDe;vx7dl!;eR(KG!b=N6!Yb(znxp z4cHK}H!IPF(7QGER{gAsN05`y(PAAg{y6!)c>i4?`TOX4|31OJ3qRj8kUTNS2Ku#k zGclXZZp_9z2S&D+<`_ixAWjmFZm`G`dIbLiUui7$_fKJNg?Qy2;&}boa+T11fEZ&Vd@8s?Ykm0em5$J#l0#8= zMLw8fDVZPEI5)DEL1_u~$#3}_{qO|!Bz!x0Gjk#0y%=`IMbKAuWRWKp=X;4qW9&^^ zPx^EJoyuWP%utypc_vx56nKsRUm@pWWJyz$ZxR2a+owvm)5hP@?kxIKd|ZzmTM3^O zU>Bbm3)~6~wRcdo6wTfs)jDv7y|?a~IyZA2V_hh;o?XViT;XcK+1&&4bdB?`&_ZkM z(V>O){!sTabd%(difRbUTM@El&@DX)jTiQ%M>GL z?RtXog&^!mC_bX{3#ZRIOj=ByrSzNfy7s^PkevQ+%wM%}i4S82`n&M|TaEM5JL>oG z>EFS>;w$+td)#M*>6i43e&qbJjxpj7-nYRohH~E|O)+k4>e?1E&eg0xQQL;`E19I{ zAI3&3;926|IsgByp$+efGvu&g)boD1b%@q1DLabW*pa;TbY1uPP^l7~b4>IQJE?C$!i&&1kk9aEA5w3aP z-i&|Vf}T`8Dx-NF;XY+yB1fi*6=BxA>S&UPeR+O+x0KGu42qLlgEr*wNWsv_GxEj z*7TLG!3H&%LovSb-XD&oW5(o1XYjRgV_e%rJ}LFM`#Fi1Zpa|}`94Zt{ae;zWBLzi zPo&Iw0^{qrfX)PWwBwe_Zv-#z_Dh^QjxyV^*GF{OTTA zIg@V_4wn^A$fi^JD8Jjta~t*;-!syl?+u1|_!Q4d&8S6L_PyM~bv8VrTrsja&=Htt zsNP>FFJ~aA9`V$x>}S_L1eHyQ=Dd$`mfTce&HC->{jmK5UrBa@XPddhcFsp8P&abl zw6PZ=9bnasIdjK6C3 z>b{Eq+%zWAx+;%zttsEh_{zdB4GX8b{3|#b*mDnW9PGyq5PYgnbDg&=oO*l>I!F7f zHP)p+8tJE~F}sydI>Ot#bLvcZ=|jqHDBzuBCfEyghO^k^ADSe5tg;t@9cse zoPl1xin0rsvsrz=HL1$ruX3&S*RWQj${c@kjj?~IbsJ6mDj4$%-7#qOfX7iBNR%vi?U5JI+zKPsZKmsw|UX-kRbGwR_r z_kL_kiR`>rrlL#XpA2Uk8TiyX!C8jncbLOPkae#|PpJ+1v+$zgQJd6`c=A@@y_K=B zBVyf9zku(!0oSuhGwxa1eipc1$(Vy5hF6|_&^%vZMh@z1*KGJgoln&FklSaY23PUS zZS#1g8_Rn6^fuvVQy9RYZaN z*|Ui?wrh?O+si%IW7QGxW4~7;VF5eX@0GD`;H>NXdh+}Xy1jVBKWFB6mGcvoO`3zd zSXZ=UmEytIL$+kCV!pD^e#7;T`7y7(=V`{V=^u-Kt z(^=3NcvS0|be3$$?-S8W__h$srX6Cjd>;dTp9D`TFZew#8CLJpU9{A_Vhp;jqMqg) zx_9KaY;2(&*~vP)V@l5Z3g9z*1G)m5Z)g2fC2;CGi|t%j;GG86?&G8GX;`Im95~N}RQK^QW?b3A=+-^d6Fj?u`(NeU&*px2&V2>< z#4k9zl6|nG8t?D=)du?eo$Zej3=XEB*Jf{m_Z-8jc_aEFZqE*zTQQXLkv= z!|>EI^nrMRy#;L*W`z5lhG2XKYn66_OBuK%*uP!Mnu}fZ*VB(wJ4twb0dz?Eyk2J= z|9IjOL#&(q;`r;gg_6BT!V%U^oAhl9%Z`hOv_5Q28FSXgEdB=LQu&ov`c(Sxi5)Xq zr8izMXVzu+%HDXtvaA6dE-PEeEqiq?9JaeJ^0czNSG#AHUF*~DGw9!7j@XOEdgm0p z11=8ZL9~=VDS4`Ol9Ra}m`==iy4D)7W{GRV*hwN!E5ZeXKf8ea-ixG%y}939`R9Ej z)SnC1xY2l^%qJwbC?mb|HhZZ~Fh(_J@@>v;KE}P<7HvpQZfDF$KFy$CvpyV?c-OF# z*cxM2`(m4SvKNZAWE%4;!Yw75+nx*$;GdQc*M&T7pk37!)FW9ey{dX-H>sTJI7c!G zd?k0Q(H))0mIh#-Fv{E@T3~<5_pa6a#3b6)9Lj548h44?#3)yWTyUQu zEA*@dILa80?lZ<_C$!njGtuVuW2|3PV;UL%&TjH&S?lon zPOH7b=3ID{r|#Go)&ybO#LTLa*U-{i z?7)UJtP6X^7;K$U>7|N=BX6_BaE6e9Rlt~~jc2hPO?c#>Np^1R#+Kw9bvGu~kKBW9 z>-x!4Ml59pGN#3sOAGT&9ri+76Fy$UjSb6O*|a6rs`DF#hh&LMGxaOq*c4AC;He6} zoqIdatn;v^7DZ>twu*556#2n<_v=5k+8-j0GqBT|O{|ZztwyD>pZW$Kw%YFiew`}` zUyR!hZZ^36j6ANMrcVcWW*sS=rDp*zf6nLSpXWTQ^q*BOTr#Tx`|Zd9=|>OeRx_sd?vvwiH-(BAv|(B<`DpAEc^`DcJ>Zp?ze>6cpBlNR4} z$Z#z=Wlc`jCgCNmr%~Sk$3f;m%9}2o>r{?iy-~Ix@VNAsAN;WPOK}crZolg+{TSm- z{m%LL#0m;_H#0Y>XWg$1%_uM-&V?-)v|kaOLaw~{+#)XqAUef;c^l9xprK&WGxhxx z=w9X1@r~d)b5$B!UEJ3%nmtQ+H=J&z}Z$}1*FQ*glmfafl*HM`7 z&vj!znPd@^b;AiiP88_XAl?z^g#OkM;Hx!4VHfXy))4fMWR1`@oQ+mx+J~+) zz5CXWgf>Uw>yaURF!I=RA=`F~BF7Ha@hUyN8+P>0%xft&CO+ELp< zTN`rP2;w$DTk0p#SbK6L-~6>)y9IbB(3k{wmUkJvl?FTK8$atDbEy0(S8{>8)nQ$Al|+p{*$<0-*5 z9ocwljTvid`2Lsb6+a~+?B`J3^t1Z$8Dx>i1Hl^mqVza;0sP9v3+KZNE%(4aDzTM~d?W>zccI}@~#n0kb z%`epTI_K8@(xtigjq(7W+%e+P9sU>W!5nWePKfhvNW%Y1{5UvvK#$=6qZ?oK*AB7X zAmCFEXYV`?omo4can!jZI@pMxA(|_WthyAR2>Sj1E8k+L4)g6$_^jtkk!!@{-#NL$ zoVNbJVk~RSpIl+P-_5cf$G1lAQl>rjw)BgonCH6w)$%ctwb#RM*ooyC;#>PRFz>%J z;`XiHeZqeSKhE609=jUZxo=XXGf-OTAXf(yv}IwR-mZE1*81~__YcGC#S}}& z+J{oeKi8L58+XK=U(lzo(jS61wlwa>i7Sn9h>bcA8`CqKHyL-obJUQ;-!zuHhC=&4 z7O-Dojr%O(t~YC7eoFc5ERy5uYbSWPv7)Y#X;y-H|DMJ^_jKI`65qbr|$@{ z?uW75c&_4Y(R4jHgs}ssV1tE1sdb92uRX`q6`q*0XZlLw(X5Ml034RQH)qec#&>R< z2&}Tj@wcC!3ttw4+joHb8R~roIt@iwCq}&Fqp#A3yq6As3>s#b+tl8j1pe@L?)MRk zi$bsLc>DJ@eF0zZZlOGAwVC^?oZF#~*38$lpD4+D_c?3un|MAI+BPx=sW@d7w&=as z1V_U~yLVtWe%cg$oikPcX*0afKBmD2(kSgEL|?xzrYHFp_Iu|Yl&zEET?KtZU)McM z9>Em&erKL-#rG!NGQ^f?d({0 zw(4%dP)*+G!fD4FX{UnNeGC3FGBzFOUcR>M-Wt_Mo~T=|X;t1n-n~LUi4N?wAO>%X zer!5ltS@hM^Ens(79_T3*jV^DUWVj!{wZ$g%6?MSuXjyHCaeK+w;ZR$RV z&q@DEmd*FuOS*Aeubz21GCycL%R0jVZdcDYnf9|OyPtiMLI19XJ`M1W`Zde^zvQq* zzW}$>Lf=^Q3+*^Toekt4!TYcBPGxkyDKQ`T72A~S5bsv;PJ4;KfzQ1tXtp@!M~RMHT9%bq~aI8(~#EMA<1*e z@qiCPX4N|3c!1cOXv(~C`#$D)g1)H-F4@QZLoMrK@FsY-&ADohbA+=}AEC{C;YfN4 z->(QEC*lWItgB+|+)BP@NyV>Q@YihaM?yc9zZV+Dkatz!V}q~u(bW_i=R#ynjPhEy zSi_!yM>zZXA@XRvOh!L7(wEvhGm>Ak897*qUt?&qfHJa)5+hBQ>><{5cQu}EoJ#83 z%)E>CS+p?bwP(RXPBpN`zmhUq=MW!juWJlhT?xL`q!{<-;UT5De7-bl+MDPT`Ho3s zyl7j+nEfix*YJF=Pt#zW<*tub+&P6jQ+?9&iX&Z3{_Q-wjWuW|HIz<$46D2`QvA1!ViIVfrKBTYi{_Vm?9v8fEtXC|2mNl7wcW}-o@49Irr;wWtLF25pL3p>JC}?fySv}->-)#OFuC`>?{l8>oaa2}Ilu2^*iYE< zGQd5fpr!4w+#(-d*uy2*eGN6oD{ta~+WKC`dEw14sL z8{{qPiyUO-JT=PaDSKqtE=51M?ik{tpU4Ig24xri_F2{s_hKx+7h$PWT@TuO&Lzp* zUJ&_;JuaM1uIHXj)oN-+@~Kzy+FD7y(PYjle&BQRlTOU+Cdwu5$Rz+jkYt0vDT+wj*0~wE`>{aL={59+VdRV5gwr%i9)6YPA@!;^2YS{pu^<5{x$Xw!v?djB+sT@Xy#!o?6`rJ{Z5QK7yY+&KmMZUS+>UpMo{C*n_YPW6Zw` z&8TMkD~I!5$cna0b?#?1%L1HVVVQdsd|wSLV_dZPBcS;$=p*faWhD3!JF=?gjkW-+{;Dj1$k1M+YBAm|o1`(!M9@vc_^&-uJh_ zv)DV7LzvUMEqd{V50CX)?udTpLtPwl z#eN0a`hI_X{z*zZabTEbP2n;hTM@qi#Y25$n9B>+ix1K2ILk3KwAWQU>{>S$`}20W-9U*wt=#W`X28u0ERkAuOn)<{n2C0eH@Zs z?sc=wBRneF)5gC2Fy47MIKB+?NLaTI`^jBW@TKBMcaOunKVdukHW14h^%X_l41B!S zlz)T?!(P*{SJrPL&+2benu0u8YoDs8cIc*e*bXV|7h#$yBkhWF&$K1>le2u62-+!E zqWr23yx$pmcpUw|dW`k=!d`xYaT$io;ynq|@GXzZ<|RuxH+wqLdOzk3W`3;}Yk#q> z4ds>NSdA#t=U^<3*Neduyq;zKX2$WR^j(bn)9)`qob&Mf+xGcO=x-C}dRwkNTnM^D z*b|qAJg6@pKaBk`oWG`i&c!pHi)RF|JU;U z8FU95I;Cyt0OOjW2dsQQwda@f@|v|o7@I-8%km%Ox}*LrnA_-uKNOzQzhfHEhQBWi zf32{$`zFK-|IR)j)^y@sY`|VYznSrJPR{LP@GXX2v+<4)iRb7vek(-v&BnVzW=>eK zhi%D+rv~=SuBhGf@YLX*hdv(K^BCfSzgc`I-hE1cA4MK*LAld=ix3~}SK&G1%LY}U z?V#I{#uw$d?v>>MFfBe0JJfOUP75Z@E0rK2P*o$q05Ph zCyH{C!+Qf*Pm)I#?Tp8fM_db3s+>qW^eFuEk$X5E64>)^DEC?TmBDku{qPB!ex>z{ z_~)7Z^|UonMA*^-n1zi;F zDZxe|pLiX@nSB^u(@#XdUM_nikkzH!aYcQqQf#+TZL0c#)5mC+L`UycLJc{`%1< z&u?S>awX_&dygR;di==)vw4(13*`yluTJ?-sutEV&Vxx>~QGOtvf*W>rm zMm6J?YPxv`Uxn5m)w25MZNzxsd&(;=KP4XW=DG^8~scpWwP&8E;^o zQ-2&Q6Fp!xt=)rV_d2=Ld7;sbq?Ix-k0pMI3*VKw5A)3TZK;M|dB*hXx7?xhJeTBylq>p0%&q7(;^G&} zV0Q?QeraG&2yyV5Z7z(*R0qbxPtG%#S{NScVLuw*O{jdG`&$3#)*R+1{^euYi+Ujs z>y8218^_4TqECc(k(Euq`qlxwgPJ^PBWkg!H_Fi$u9%?5L-sy|GzFF-EZ*<*N$fQ~ z4Sjy@*&N2$!I5&z(ZkPS+^3rD{48u4aV!QeJ!Zl7;U6LWS0haB3e>4;;)RP55A&CO zx`E*_7b)z8uLTX;hEdok`)qST&+-+-vw!^=zS;6AJtnbNT?!qKLmfYpX*tPO>)t>Z z>49wA2hIMZ!k*jt;Kw~U1Mo4Vt;YQT+A*w49bnslQQjvIhHXmHL@;KUHFd>awNmS= zA8A>_IJ6E{Gfu3%|26t#C^s>d8?-%GmcM;+c;5hYJPYx#%~{3xAj<(@jd)KA>7t-x z+nZ&25c(yLD(t_dF7|`(1-O1Gh&~nSqp!kN^5Zym&$!e@5~qUC=H70(XFvGE@&(@_ zSi(HOc*N(SXT4VzzBDhR)`^u9y)G^10pthe#2VgwTG{TCI31M3@8f=ztsmCgbHtuy ze`JTf&%Fh0;`q9~7rXzVy=QysDDp$ffN4wG{!lq}6Q~Y#GjH%t{~56T9&@ekN4t*w z4*G$w8yikTb_KoCUK{XT@&?>f*PLgAZNXmcyO59u?fdn3nHgjLcPme+^e~|NYr+bdC= zRW>k5g?m%vxg2enLHjI=_5(O$t{30BKNQ=~HS)`EGHaUyXrpM}w+A*|iF(k@`zKgG z&bAQRvafg2ClV-|fd#>cbNh0;!yd;z7Y)8LCZ?pj`7W8WEoH6a+=jH2EA z1Ndj~Te1y)hIKLapXmBIjD2Z*jbKPqn!3%55KVmXUYfvuIrz@ z`F*bTeL~tNqrCp*mk$nn>w71{umAH2^#a~g4co}>+IFz_ANHQeJ@w*=-j#2k;8+Xg z<@*;E`aDs{6onl)W}L5^ZHHRKrM|A8<2wuCp@)v5KXMksNj+@rVd>*0U*)}>n(raQ zH^;SDj0f*9Ks-ar2@kPN4;|=lX=HIm-Y*G){~&yuyiW=oyQ}a%fhx4W_^fJrSg$YD z_)(03KnKhZ)^FbU|FR~l=J_Us`vXpRGikz?stcS;Ctkodgv7J(ZG^&6q`&Qkf3N)} zh}uzzFMxitv>gJ_8T$$9u39~Y#Wa;8Jq_@q(X+Yl6XON2v#`Q`H25lMLvRfbuSrLH zX8Kv*vTfkit5-h8&3w@Fd$73+_F-%Sw#$4SL|KGPb(l|vzrns$*bL5-qtT@S1%H`nM|3Zibu;Bl2;~(3>y8UqB)pc{z1C3vfL5KG?6er-nNwu8ZN@ zP5l0a9=Ak3Xni6tLTA#xLb~1MPQT6K$kL{}q5Vjw2L^(!04s8aMV+UhE zdYlygcyJxQIf`~7?MZwQ^ZW?$p0LC4C|*tyoK+{u|M(`h%0RO z?+jwSC&rpE_NV*hLluxEx<33uCDsFd3HCRL_AchlH#Z$iztDW<)p!3Aaecl)kINp! z_;cNl5gv258^46S!u<85CvR7quqUp%<};1l_0Kv2_CiI*^Y1M4o};LGshfi5f9rFwcp-e$N8Bc?$I}xwhg>EHn0tG z=zg#ovvDlqY{vM*t4PN$kq+kN_O96e*`J-+bvQJtx(Z{fXy@ZwwB!0HZ<#Xt!M35# z@n z$|ByaT&^l7hn2c89aM|LYA(*xaCUJ1L!77MJRRqWIG?o${-xU3yXm8H+=AnAQof4h zaL_-e%|nhC@E#S)p)eK-Uox5Ua4u(X0mgVC7w3w4;TPV-yFDStV#=n1Cog0_D+_(4 z)qcc3cF~)luPi$=MQjkXRciDi@~A93OJg};N{v~R14dq6c~RuM8K0VM-WAAq|6y$d z`lZWX?)?DoW5PSG|Cb)eR@H1{&6lttRy{<8t^rU}9JWAGj62DR(E z|Au}I(mD2b(42qx8t^sXYv8}2foKT<+fVS*3`FEN*%9J`$R(1ma z%1+?aU5(#W z_+c%rx)#4W{MO-jC4Sf8cL{!%;&&N-pTuuDeqsD-@IyaOt-x<3exJc_J$~fg*qThY zv~O_cU^7#qsU_Z$-l+Iz0dDx?_54Pa>`1f=S$i_$#2g`RPde$&#%3p;xVfb?!rQM0nNuQ&dVlAzmse~h}tYC#unktou z#VNu^h^Q%*Y|~K*X9-)(S(9nrbJFwXq*dLzmG$cKniXr+dS0!Z?XbNN>)SWBCvRzYG);k5RXzt|smA8>;tAM8oO!cq z&E-y8Y(q<q`y^Gf@cav* z>TxRG>XtIz3K27@SX)O@C&}_l#o9L{Ok~L3WV^&;aEYM<5nZt8)F|Zn2H3DCerIl8 zlyk?a)mJV$znJx!l@~6KTybrEWX-kbFN$2V!nvg-(@de-VwuKf=5o!-I!&;)=89Er z2~F-pB`i5DZ5^!%Y9kSMHYYO59q3e+-bnK*?80GmIH&P4YpkbgFw&f8?MS4c$W&re zXClo=ZSm4Y2r^}oNfg!;beg<5k!os9-ong_Co(8$iMVP{+=5Jt#R+!6RcMtrBsM48 z<2q|$29nDx3YkQ^6OUzLYEJrM{k$>R))oVeO?|8t5kQDrnn6hK5T~)>VWPb$nQBCY zYJo!FIcZ1Dos&M-QJuyhRlz?%*iLOuB)HhQyrngf-i)NRk6SRoO-_ODN1@Kgf?R=>x}C(u9#>9tfm}>BEhW}U zyWR{djG@+0M*XY=Sbgczq08+!`y_3AllDq0V zOh$*4rPYR1XF~)xk(TzBOyRBxw^Dqa^yapPWUEdSj3Ryx91@DZ@!;Y}g@y%BHMMr8 zwO2LX(;6gmM{-2Vf=Ui$nh`rqo$XprfIwrJ>D(?DAnJvtmaY#eN~VfDkF6iL5+q8s zwIGk-;o-HI__V@5R2TV)1w?kxob2VjZ>!sm5eyE8>J%fY4cW`IULEq>B~pG7TO&%`CI9A>Hb- z&?SzwLR-RVLB@ueIQ&p6xI#C_(oO?Pkgm)frVOP3^&3QJNa*`<$g@mmO1A?gU<=$f z%mL*fFUQ5s%sJ_q$fQ^+9-!5vMPI3#htAqqrWr{>NoRbmiS`Yd=F89``#p$TJJo_! z?eSIZnU+*yoW8C{Su$6)`7(D-7wYf@Ygb%}t9iKL^@^Ihm3*KUTzz${LqA}q8>u66P&Vn=x&c9&UVhxbzx>_wPCCBv&WOCX7;AsBi$(4()HDX>JYpj7W z!8jLnU0mvqk>$#@HzStL`SO3BTmZqf{%_R>(hI@=;*tJ?SqZ$DMLRu0Em{LwdP%L# zfCbmr)YUV|MPb&VEs;;_x$0^Z5i~a2H{>;)M@-^ZBZtADz`I0Al#-cO--_aZj0(+t zwScXQxpUh)TU*bClJ%X==(VY2CfS%ouKm6~z$H3jFk;tUdF?t$rpt^h%zQVkpn6mZ zh7RVft5npZ8_>c?q)^T-Us>;jwev%RxwETK*qh=R6n3eU+R%JSZFa23(am0TbaYFc zCBasqhDxo-J0VtdXyDt>x(3H}1L{7yCjC&m^ZCxIF4MibqVD=y=awX!5KXBB8ceON zHaJ|8_HKLxr;=((pgmV@8T`lN;J$&S*+>vbZYg#v%36NzzSy)6|f*K3Z z7te{KWs}#s36Q7L?LKHQspHViDK)O9L3b+VD`zd6KIl(vb~Yw9QzHp@kok~64-Y}| zj${iu%qbKg6;F1uIcppWn}bX|{SpTu*am7yB{!mTsT=aJ$ks0LXnKUe&5B+fjU;Vf zDOM_)X$eMXhUPihvYIz)#42_@Vm8cI9oA0S!Z#xoBL-Gz3Z*+6&dc|K9AvWYV8bTR z5j>aW0Geu|GC%|e15?dYn?jqvh2h$I!;x-y)N}zkEI=>mM0s_NZoze;W<`7Ila6k| z!!4&9I}^AY2bY5ASTqdAu0X<+5tuy-z0O`-p!2C+!e%3gxCfRXb8Q>^?!kx*Orjx) zepn-tgFyq>bVJJq(<{omIpiy=3h&qn$^&eP!*DM5Kn`hCDeW7`Y@S!PmP9L)g2$}8tuy+zc7ol-u zy0gs$Zq#OJugxF`I%aSSnl*egGcSVp7`Xv;S&k&E1J`tB;2^?LS}T-A%vD_s0YNq=J68?FaUEvYuPovEvqk?@u@#9#@I zyPl!~Yq>cQ5eHn5(31?ES%s&1ydZK*BDPU@-He>Wno2{`iXks#uH3M5UvvYFL(q2k z6X~kPTVW4Pc_LhcrW2!;!lSdjhznI<$>EbDQsGD7z9Obp*8z*3q0cF4FTyoF&9T+Q z#)rDk4jFAmvkI_Q#;5Vl4)o+vQ@fuR0&8n!FYrSiV?eGWq8m7QL78#s=12qD0mu;B ziYh!sQRL}3xkT8NMT}v_{0psL|Wi_twqZd<7wF|E~JqmFf1Ht zh)J0cYSTf3H*HMAr0 zd1e|kZU=(YYX`v`u;%GWu=lac3fgauph1Bu-^PG@@G0CtyuytcJ2j$!u(Jb$$_Z^N zY29eBl&KW-r5#H|w>Bm3QmMAIC^nU9Pv*&!Dc5DBBPnLZ=4+$_BTn`)+8^es2^~7?}%YeP~U(vOO9^Gr8+yTdRZg7^saAFsiSdZLKQP?!%P-tw<5Y?rueCJ zyyNEc4e7`a7ZpD&)KI27YoDSS!HLNHU1OUy4EUddFu2JZXywd5jor@y|aJ*XSxH>2|jyXg)SJ zTTj7ju?Aju*aCuU)Wt?vT%*FvU$?shw7aQ_*k&-ak`Ke1!rd*&lx?=axwJ)^F&_Z? zY-W|wDlSYvX=(3(N(~nm5P=^y!vMPM=_jVwcnSw}o;@rqG7=AMq@7rYrP&r5gOV8r zH+=wDD;wb!a;x{|Vr_TD((b2S1#!A=E`D^YF%XnPYM3YFh7?g-I`xVPYiY%@PY0=F$(>QFY@rU;S+S;G{1vQ@uZFNnNt=y#MF;j`Loa^uScr zea`X&S8qDjyyLe=Uzqg7OqKodr8~B6yJf*!^8=ID{Q0M^99mKKPSx)7&$zMSUW8w6 z!tZMT%xTNp3jEf7^K;S5b85vmzkS)EiT}29R_xWIJOA;XgOmC`rgHa(K5>`#xl1&C zdC8zbj$xArkJ%Y>F7>zBuHW}Q|LkE8vd!E(SU~t+Iod71jxT<+(A{{cHID#(%&T*F zgM;^F_|4=E4&HOU%xvD^;C&Fkn|OnR*Ti`6OkO+-Kl0+e@ZeRB%H{@d&=bCC(n5=bmYZ*;g8Fr zF+YSmc!tvJ6M*snhwfl5d+VjX%N)6)}>Djbj6r;`$&Hb?-72_TDe&W*xpKkB7 z_CEXrgFk1_#i5N}yx2(hBG2BZik~+B#N6keDaI?U{1a_PQ1?78uz1dcUup4URfbOJ zZ?gEpzbeL?EiU`P%~PdrwYYpQT=1`0ysNvI&qEgPSX_+nv-p1QP{1MbJY(@47Zl_F z+v4|+5=!&@Hx@s6aWVa{#XCIolV3A*y&n2=Ek5AEYc1aG;d6t;?=Gf4XmRI}V*R{o@!o5S>0h__>Q%+~sAJ}S$BJTn zlEs~s#rQ0XFZSSzEdHX0PuSw?TZ{SJWN|bm^dYIpSX^x^#&=o#SW_|nw-(QRt{DGk zi+6bN|FHP|$zu9Zzclx~ZN>O3i-$j7j9+B&wI2R!Ex!83V)`bFPi!v6vlj3DbTR&b z#k)NE`Fo2mwe_j!?HP;jwe_puIg20qY%%}eS^WO>#rT=OGWXLx_3$E#%YFhwrPLJ` z-^<0QI3(UbvUrCFzs2HnJ^XiCy!ZNIJ`Y(u`$xt2_bq_MV*GZCkKJ61f63y{+)|8x)#BW_ z$3xZ zbc>Jm=zo^QJHAoOXTHS;?kL7TX>r-VYo02#*5c7G6w`m+;?9<0e6z*nyX3-Wm&H$d z`1D%*xok0?zqPpRpB6qpu=vrxDyIJ@i?8{2`0yJosLVKXYv{pMSD=bZs&ID~sP>U5vjcxF^5H{@T!ueWaLvs>Qp$T#V1O z_+x)oj4!bG8=m&kB^KZR$HnwlSzO-3YMv^!-s1AzOTiNsf8E39R*N@#+7&x2p0)d? zgwJk^pZx1$dA?`y=l-S`f63y`zGD18EZ+UCV!Z4(=3d^@WbTwY)8e_mE2cl+;sYK( zQ!9KPET+H4;!k++8!euBsF;3}#VfvCjNfkYxxK~smo1*{E5^TO@c~ag@>dpb_SpYE zi+7w?%;yD*W5Jd_#IOF^;!#gMGVTpSw{mwe{d9|0e5DwlZSluE>0M~?_dIrcsl~s( ztC-Jfi)TG}y~SVj@Q+zM=b>-6_(G4Iw^@AcpBKlw!{U#6+QVP9_|qOf->|rA|Gj4T zeT&QfK@*Eo|7>yDPbl~?i_3m~!G|p_`x6D9^jmW!`xFJAXYnpixm#}W=REfC8R0Wv zIEX!b-r^m9QHaz}JAU0bc{Y27C?p8t^sXYrxlluK`~J zz6N{^_!{sv;A_CwfUf~x1HJ})4fq=HHQ;N&*MP49Ujx1dd=2;-@HOCTz}JAU0bc{Y z27C?p8t^sXYrxlluK`~Jz6N{^_!{sv;A_CwfUf~x1HJ})4fq=HHQ;N&*MP49Ujx1d zd=2;-@HOCTz}JAU0bc{Y27C?p8t^sXYrxlluK`~Jz6N{^_!{sv;A_CwfUf~x1HJ}) z4fq=HHQ;N&*MP49Ujx1dd=2;-@HOCTz}JAU0bc{Y27C?p8t^sXYrxlluK`~Jz6N{^ z_!{sv;A_CwfUf~x1HJ})4fq=HHQ;N&*MP49Ujx1dd=2;-@HOCTz}JAU0bc{Y27C?p z8t^sXYrxlluK`~Jz6N{^_!{sv;A_CwfUf~x1HJ})4fq=HHQ;N&*MP49Ujx1dd=2;- z@HOCTz}JAU0bc{Y27C?p8t^sXYrxlluK`~Jz6N{^_!{sv;A_CwfUf~x1HJ})4fq=H zHQ;N&*MP49Ujx1dd=2;-@HOCTz}JAU0bc{Y27C?p8u&dm(ALtnfkCF*aBC-=mgkWB z1EsdCT61|Y5XfTz^&M5X%fo8=#|qbBeHm8c)i_>tD-|BXuYVvEd09WgpZ9P3{+{sU+$95nAjR}8y57#?ygo(-tQ`|#{V*hMZ> zUhR%^AQX9-e8{5*H18n%&?)s4g6tsa4#Mu34qh9Z zRXwz^DqzlZ1w9=&MNh@7i|!1M>6%}KJlUM=blMY%xRXgbt;rb9%?W2?BGsN~b(&gI z>5QsPCNoZ~-AQ&NQn5@+`vxbyIh{$g%~uV0qSjowV%0V4R$X?*+BG*{@Ckzn^AhLK zh$U935dMvHgw&{!U{H@f1&4lcn7{6aWfq_42J@7GyuXphLO&pYJq!P5Jh;%Kj29h3 zKhc8=eX|D_`VJ2+^m{$H(C_!)LZ3z9cOQc9@ZiD+Mptx{ibpJL?^T=G-m9qTb6gP&?Av%qJv%T9#PGT?G;ld4ZwS$fHrOYOtY z;XD$3R|&k^@)7$HK5(id@)@x7lD`t}`4aT6l)!&s`AEI`Fz$KKUa1GM&-DgYYIc{J z9akGtoehz;WIWQ+-jXTYHR0CW(Pt!5k(P8sUrC5qV{=P;B612{XL~wf`J@sXTHr@g zk;Y_OTdX}E=}5IC4b#qc@57?VT#q|a33{POb27QniWh5bO*R(MYd@t^l1QbJDf6r; zl}Hp(<)b{6r%fb9m!lkdqm4@58#`0#EDtWv*<=wK@~F3r&Hz5sUh{O5y%zoh_C9XU z!e4MXd->-q{S5G*W3PESYOjUA_^V?U7yg33Rf7Ix33>_VmB)b`JS5&`d%w`0C0@bh z?B$d3@DYEQ_3#l~&R#xGc=(8)e8$5^@IxMY@t?;$^n$-(aq54zNxJ@f(&EhbKXl;| zk5^9NN8IfS#ZQwzeVH8hTO!f3_~(Z$E_xRHQ4f92-tYC$3%<`of6U(R_s|RegvFWO zN)Zz00S_PX|4)1P2rlx99%fozA{TM|XQN3U62H)kpA~+@-R1G5M;@t1-t)*Kc$uwd zsDH;qssGAzFMl;Zy}2zDYrr{^GUsNwN+nuj^D~LAjGCWH#$%ZnY0jU2zM9V)H6Q-B zvo#TEZD~xveX98l>9hvhV{Hjn)c>tZ2cfAyZ%r8n#$%$VUL9-T{NFQhMSX_zh`GpG z!lhQ(yk$HRUh1Q>ExyPE%l}<#_OOJ1@C)@hwGh+@KT(Y zR83I3(BABJ-n!%NN>zURsSk$h(I4xt0*|UPb=~p_YSUa`521fpvpTl40mYcEKWc z9%i4Byl{5+hkuT=9SsHha_FaDK2beismhKA$Edt~luhA&`9z$@;#{US1xDjJc&!+v z7PHUEzUFwOThhcZ@TWQq>_vw;fH4b(38CMc3ytb~6zK_}pUU`FAnouM70|`5nJT<& zIk0UQC%6+jROq|H9xK$s2Or4yQ`P9>wRl!4t-rEq)ng$gc(RX8o;+8AU&Dv+qnzKh zJm1F|<1PImj0qfu?#jaId7Y0+?QB$mtLC6j9aVwxaHx;Z!MM#Rj0uF9Ct>6R@;eK< zOleCxpk(o!%7u6ei*i}_2Y)d=xX}Tt--)fO^dOT=xDtZ>oJQM!fKu_dryP+OVC3lY_ z?csZ<;~#-vQ*iXo!O&=oE2#N`~hTM{<K zOP~*uKX@!x!Hw`q_2S2v*2*$%+0a9_&z zP-gm`L6n8Vp(&`RD*DzT4J^Om{~X>oNV`P(nQzROfe(h4WKkvp&|x%K@hp8L%RcRG z5b{u0%%>{k59>waf4`Uamv361z;gJut`~!S^p``k)DYv?GCF+X0P<|iL^X+J;nML?NnY>M=EHaG6) zc?-(}{CyaDM_Xy%J(y#VHUi2?KkJp(k>>Yr`|h5fgBRmw-k!8^v#m!ywBwyl^R6clE)mr&+D(wRQ-2xU$*}?r1NH!p&a6jj#mALL({4UL(}`B;Pon=kq7Lj?jGcK zysK(|721J+i|aFhO~G%!y{7&i$2Dx_ai;5KrfZR!^b+QRLCJI#~)PA;S4Lb7k`U*SMyB{zIQZ`5}k2 zp-r7;(oB6#p}d%z8*urSs`r*ps=9r`IMp7Ju)zZ=)=*NNYf_9n_$HE9{&kvG+@@^Ek6cEp2r9P%SuM;>o3r!A=-d)9QM z;d9cFhRne;mo3OocO56p*q*~Rh9R>DsT+}#woRPr z$GIoKwBXvo9KEzxKVp6u9#|UyJ{9p!B9vUcII@9Q;0^b;VksZ`dTghd}U z48M`+A1Hx8QUd=w;N&m%iE(IsJZ16ctSUHG&*Kn&q@O)X-|C8zS8rQ9j)7Pn_nVVC z4H+|%oK+?8g}})_Z2d{M<#SaD`p=ZW8%yA6%m0v-U+fcOIwQ(?e+j(L@)7?t5BEIw zTl_t%hZha3{@LPjo9|r~&jBAvKgTV7h2^u>(tl8b9^+dh@;MVY_4&HZ$1a5DaefK< zwI%S6x()*~hOLQqv!DSLcEqgFis;JKprB~%*lZU+#4{1BX26n*WJe*)1|V4T)pkoP zg{2#)SoN9-TtqtC*%mPOZHczFN1Qt}Z+eI6|^J_a8#p2^>>{|Xm=nT!9K78m}~&UGv<`~_d^;^VffH7@)j7yqb-zqFT|J^Tgl zaPg^i@#%8mTyr4DUKcKPBcJWJxTH(k<4;&z(k1xQEW~V;pe&V^&bAx&uI4W z7rfO&Fa3`W553@+Q`3jYFa44(553^GTb%NL(j{k)3t#QR_qlL)xy!lmr7rrT78iM> zKlGNxMW2F~{ecNgdFHwJOt84{k^a+}78gE(&o047^37{MqHnPW$urTb;GeQGiT(ta zdPveCc+Ap^T!MF4yvLI+>F3?=NtfVX_s~oKuh&B__<)Cg`a~1)(;j-ke`;~+-(4PG z@$hjhpJN_Ag1_#em;U7&9(uvw^3X4|{7-u51%J=tUb_`NQO|DwAo?KAK}k6z-I6c2 z+Xs>k!SA-X$Se3$78kh$Uu*G8Z92qmyX^gHdltJDJZ^EO%k59(Ir+QOwcW#iz}|Oz z_zT|Sp?}8S-|wLp{Ocb2=j?s2hhFf9El&BbusY!BQ4gO(_Wm&sAHhXl(dUmWFOiEl z{wonKm!bW4(L zGf-|-$#s-`fa8AK-eYr+`6c0}^C6D2@gpAOjq6y7**G4y8EkGwxStP<;TPp8%*CZP ze9nWDmWQ0#CgZicZNTSePUhcJ4+$qvR^Z~p!do{b_M!l=qtl~4CiDq*CDLWBHytAH9itJZR0$wO^`4$AH#XBUxWW|z@KwD$1(r^E37&Ak5F0lF#==Cqk!HzY2aI!`g$_5#QjqxE^dm-xG*0i1;FEoL&=BSEiOAzGZ`< zk6|8kd>>TT!}&g0r!%-9-2bUS@LA;7;&2&sI7Kb}<3M;BDyv;{Fvqh3&jA*DIh8Q-Qh4>Hy}mF2b`x%=vD0(Q@DF@1U#0 zje%Wy%@5LvbskG{xM!MS*4> zo7yAs|2$Mt&G<3b+sF88W|BYRw=$#cv5&M0Gzx1)vxrB>E%VuU#=PY7x3MP{=l;U{ z_mB0u6NLXCIYykf#GX?y@4;G<{wU@!2T;ba-f#%>-MgxkdbaAlH81da6nj*+e4x|a zgLR`ry`-CA=`0QD6m;jF>vYdk%6hWEQqJ|8brR0%Kds|h>)tOtmRhcqUSA15`T2c4 zuWk2I1Fwh$!<&1r+*wCj$WSk|3Uk1;AIv3mS7?2!?%kMse+YB+%%3dIpoM&@JO0UV zKk);0Zocr$uHAI)-FC~5upjAo|F-XF`=B3SIys-sa>Df&T$k|<%0Lu)m-S{~s@Gaf z3z;<&?s7ALGLl94B|Yd&J|QoF?Sn4*YfSmkd6ErU3~MU9d4sNdMK97E)Ap}=SdN_c zu@BwBy!zPe!PpF}Ik07>@;&w23Ghh+;4kY63O4x@ZIe>Iaz>Ru9 z0oQTZ37+*td4^pnv8kzwcsHN#w)ETi9AUPDhJ185*qg#$Y|>IUET6<>Ey;I~@ATDj zAN$kVJ^5?!#=5HGa5_3YQTP+`Vg8d>uk}xMy-4?gbKxTpubzvi?2H?6aUC4i=Cb?J zgF%i-X8jJ!1=iAFjbZ>iw*|5XtH6^qjTdt*;*`Dynn|{;r>5_ zZP8acQM}(E)O+IRvjV%qYT(3C_|EVewI_--Tu0#V!dSnM`^|%OS=E~gPuFWtY^|Mhxaw2uHf4J!>G?;XLV!n`{nS1b-xXK-eiLHMOenfh&vdoj>Pc@%gp_#K$7x~_t*hsR=_ zI`(HQzZ7jBoZk+W^|2mb67p(M%dyS~dN^uzKwZH0 z4&M2By(Z{j6uKt1HD%sma9|PEah;`oe17fIHpH`g>=~1>esj|Sr1QxMm8k)&`Pw~E zO=g?z-}rpOnUj}iLz}kZ`R)m7@)f}Uh4{ojoVw5bN^fgUDr0M2#0l4jcaIqwcH z<{B-L6B+zBjy>x_<<9_k(Jb-s?iTDzr1TFT*w0bP+S<)~q8tazvr6RbE|&b!ckB zz`y1ym8K!B_|V+R;U&G``%!3n|8scp_l(X9?HZ&XarVpFG|2EjaqoD_X8T{iujIZc@)YlQxi4yMk83F1GMN65*=Hp60QJXq%u|qw zbsfvYTkt<@LvxSKJE4%SBe8aM7i|~i`##i_u(4^?*iUj_^ag`J%j;X!uAlJyfI?d{ zTlXVgf7d+!0k8j+;VP;Jc>S)yzt8I#fnBgM@D1!j`3BFxF0=u_kNt)z)tT=Gc72QY z4+OLfdvQ%$#vY%YY3vu^9s{=P)1$+8L)Sf6Ux~V)P}Z{8E0wQbTzhyV-?_G2>`>*IQJt}jPJqHhLt#&zZxmgz=aG4z&RhaFy4`2_l%@SQ`5yDEe|ak!s7 zJ$&nK_{RYD(4~e9V8=RS!heKx~ zeP*3)mTgDoZ}aU-qgdyQG!Jood=41yhpNC|%6T-Xh8BmWK05{NNCjDc4moR4f3O~B z`KZBK*C_7s3~jF|s1GZ&9#NhS9$CJlt`mCJ??M92wjTv{)nY#i`rhBfAj422(n+2y zk5T#>@Q6|_&C9Os#dG)x?JM&7=Y9guI{Y^wXL_dE^*2a|yPSa6haXr6X>7Xi{wC^^ z<$-zVpsZ9NEmNWE7}{u2=xVvD*!a5XXJB85@dNCqN#Fb4JEne(d7#vko#X=ndKgYM zcb?=m>oWG274~)&Y$lku8L^iD;*|Xe5vBT3_YDnSpe#T5Alv@^aQ&G6;-8}|F&$h7 z&-Tz~(capEHRZ1cD>mK*+qwj{#j>xkXH_wea)H0&n)7YgbCA0LWg2@H6vA@dFXX=+y(c1*kOMSkZ(Gx};!caw&8ktA$> z%UlatqT4xf9lIPKRG24~8WV@)90d=(7KgX>h8_sjbl@jN~j z?VIJ@bt@4ceFDP;C|~pDlNaL|_pC@7b|ddA)r{j&*bil5|2_&G4%o4*r*7@U*dfPK zjO~m3q&bMZyyX4i#Vk*Z+pSOL$7~xv(>@8;%zw7)sIRwC*W8P1#5v-@bxB+>`BEvtGQ#TwhI?8&w!4wOlO0A@7TrT z)Y?@GR$P1e<&Iev=3sI7!lhl~RQ|nWSTIfrFLvg%wjzNRhiH0Xl)xp|!&`c@=DhaW zYwIIdUR`s=s>@VktevUg3NDj2vl1RF*7Ylj9Hmp2@27G|yrPOZm-`_%&RUKi^y%^7+UbGE)mL z!oTy?3NCqL#>ZPyuodJe7XQz~&tn2QN@o7eiBwZ-@)oBhjjaJ0V_xlvTN*D`^>sB@ zu31%ARaK=*YH_6H_~!j@Tsx>E8PTNk;P1aK-#t^CXM?7Qom4463T;{(=C2FR<9j`i z%)CxOIiATA!RMC17nZsW4=((}9$fe@WJb6S!Izf6!ybGV!g@s4+!mC;x$Iyh`p=ZW+kkUFMl;SlZZaqJ zMVv?C^Ht!a7k?=4pL@In{Zl3I7fax|68LX`Gv23h=JC2Ysd31Fk>s3R0$)%9$2h@= ze6A~jHMlk{ZZVkGxDCORIDx15R;uj>>To~ zwQX^9aT1Z@4NLBgQn6HHbA(%VbXO>%-&3pmie}?Z(LO56)b)DYcq{hg$gU!8PD&>d z_7%5wYg4fxhAE@t-Xi@BZ|!CO+&&q3rxpWfsz@{5!Q1NEqExhh>y!;eg_rf3KB$h- z#Pkt2X;DlYwxGjXU0braRjb*)t@lH>YEe6dO<9&FPez_ocWE`cVqvyZTZVSz9a~gV ze!o<#eY1Q2)rhYYhUo3_PP@$vIZ2h+yd*F09xjiim@RRxyrWd7?C~lRN{@S(OeMh{E@T?qfk;|!edG?)4>-d&*&tbszQR{8$& zA&blUYX{|(N6zBpBW+INQeOz4xt9Jd4-df2qYq9^n)A;6lIJgA4z)7N`7aBZB_B-i6=b!dXx7 zAU)=z^2ZK~i~KU4aF@kJPQf2`@oC9(!NMdD9~pnx@8KiqW&OcJ_zV3r9$eDi)O;S;s_TV~H9zu=C=F_$MMNj6I2A#U2f z8dhSDLOaZ2$6*bP$glNdIlM=@NbB z?0uI<4}xbc&iuN~#i!SWyY>8-3*YRbf7*q+>7Q}o(pG0MiJ$Vj^*qtui2O26Rbg?- zN5SV>ocj;le(6#V9~svQd-w=0`&K1h8P{6t;WJ?G>pi%PkKN?qFMOgNT3L!azWwD6Jf!!nBtAHijxvCz-8 z@lN#63qIZAjMr_Sb6vQ*+=X4ZyL?4mxVwCHxbRJ`blv5`H@fisF8m8Fe87c^@8RR8 zEiUN|+w?x;!DW2)ryhK@rGL)iqEF!??~xE(_`Kqw7d}TV?zPXiT=*@nbiHSBkzdAd z$J(JIiC6H69$ey`?!iU=GcE3wXRgI1UKt;r=iwvxVizB`om}d|rO(CWt#RQoS9;gG zaCg3USX|_Z+I;D(cboxh~u`g*3jmbo5!!53Pb>2jCT z)gC@sJ8!bagUh_kS`RMsGV*>4kyH3YExq8vC+@+8PqW3n<+{U#H|m@OrzB7er>Uilw&;qH1_?1gsdE_Y%V#MeuraF+Zh z-sHk1pNX@aw&To0@0a^h5&SKSv;F@$mz*a(d}JO_-lrn+2tL-%6EeLw zyZBUC+)MAc=QSAS(t^JE#2+ny7eOP|P%UtqHJ_!BumR|HNxRgiHtKgrqyhKle4_ZEw zZoy@IP0~T!?Pu0nImHh5+WUHY7Ci{Q-b26N-rwY*7d+~rAF%iG-XP&Gc(Z*@`)PLR zt<}Tl8GGMl&%#IWtcU(Nd%xX7FZd1*{ULjQmxo^PZV!FV-uHOu1;5|o-gJpxCBH`5 zM%jK#Pn=o%oHZiRlhBKuNV&--muVrCt93-*o{j#ul(CxeBAxq9v2@s{+Pu@PMOEwZ*j2`!QJmka<}K* z@AYxxciH)WrYqq}m+%w4&9Qou_xgxDvL4`=5}yf@;)2kFSxw7hUs$G z@9y{0xbwHnri1iue5}PK-l)BwV9ye-;F3PcuX#4VL?41*VR1>f;8$B*(joXw7MFO4 zyZ!CMCFK8U3H*=??{MYsnUl;7^^kGlb1g1uqb}T? z-nTq_WSx?v&zryUT=MZ2o4*pj;M*+zd%dsdBYy8Op6K`=|L~)OJc7zjSwS5J#$&a$ zCv#VLUya;j|C?@KgawfETyqh|MsgnP?lK_Zba26R>X+`cY7_RL z*JH2zvM}PqR+ZpFTQ#$AK6Q-<{8dJuAi zN%zk1KJvc{{J#x)$}{-dm_4w-?vq!jp~FW4yQ)Cf8(jV}@@Gj-?~4uhzf4-_<3*go z6W_>L{_=mq9(q0R-*{Zp8J%y1E*aNWrUiQ;d0h)0>u?`^dw9uF+#kVyUbAwv+AzJUN-8Z7g(u$Q`i%T z-$y#U`x&`6m$o;A?-u38jM_8Y2s^t-(U$i?HnWc!`&z-go9Ut~Imm*0{XPX0`3k;Q z3VYMBFB>)+8in_njIQQ8Y%W521{Z)H*VNN#xX1p#YLwq4vTu)jDx1BK1l3Sw8PbX86Y>1P z;FOKIF%QnDRIuwAHMbrxWxc4Z@Vy!4dF`z=usQ01I>6rk58XEkI(^7{ndIFT+}{uP zi1be}zD)Af<-hno6n;nVE$B3gxW1*rc=yO?O`pa4PWb*c?E5@e)*M^IBYP;%d`sq~}kvYSOXLsOpHle-!sDS?(BEsj#Q4>!*`s#=e#r{cU5>OQm-~)EHAI`sNL7DRQB#7+sN-Z6eNbm$xP(=>h;751dd^xL?vbV4s_ z;~n7-w`Il(8yi(7nZ%yOM2FL!Nb5b1*u{!3tYfL|#3{ zQ^93@nirS#X@ZOV(GvV+U69ZVe_0pg#Rm+5z)I+4U64GGcx60QaG~E}=V!fmcM1G{ z56)Yze}B}jCz)s&v0Eh9!kg!F@gu#gx2doXFUNT#`s+*J(GqwoaPpV+Hv6r~Y%4+k z}%#OSxTdagj&xsKpuYSth9Y8#E{3BjvWt;=)Jp)gF5B3-um)!J{5r zW78(`di5{y3;lXaFZ=`-6}i6~ zamtv{z8&J<#tWgVdCLF$hRI}juC3+`SA=+=X*(-S^Y->miU5q!JFg|UiJPT?%oEls_IG`-uK=B5ojAi)EKC_37|-kB1R3BnVbYv zT5aVgoibG?_vQzJ1oDd}g~~JH0yicv5Uv&FBO~!S^?~;r$%6Ii=wtGYJjZGhVm2t11KF|(MzURvG2F$KM zgP*Bi<9qX$7_&MJ|6B$5Zoet{jTPni7L@Uc$WvZtg_*Vv-^X`FhAZi$4uE;dax*t{p?HdtAPBy7&GuzdHu+{6&Z{X_hYa5M@!9=D{=oq=|2}? z@A>6@cN9!}0e*G|F&4Wu2l|6P2a$iZ7ah45`(A(PIL4i~K?e8~e6A$^FY-yzlg4-^ zY`>Qm#?HNd_jq#|{N7f-iSRNW){ZgZ$maBY68{$=uXi)X>=5@Lo=Kj`bF(>eU$Mw1 z&l6sMwaN#7|1t*ekD+}P*OK;c2N>Y9kKrfHkI!t1_MX{hcI6_khv8>)!UbOc%ZTUY zNv41NWUqfh2jogk*oU^i`(^k;`~m1NKDAHAr(`@z#!FBJFkT3Mgnt>-qcfYTdck{_ zjMLlo=q1n{2mSb*YkFgt6Ntih@<4jrF8b!5g>=~C_-0vUzl@e+7KX&N##y5v?kmYPS7OZa^;agC>Tw1>HKXRrhyVx}(wzL1*Iz4jGX6wS1ZAl9+g_K+eWIuyX@2QyuYb?Pr;4t4K4)6} zib`A$6vdx6({>k_R@ll^AHT`#e<+o+uWGZI_K;VyQ{qaB45+8EywtOM5WmFjVeD4o z<@g?b&efs5if^|WI7_{b!dIr0H9OzzaaL(hOhqtfBz(o^T>2by;5g2)Ie{;8X=5t! zljrz#`}@)(?u__%Z$mx*#n)F-B>_V9?i=%uA%~rGp71O7+c{tC2EL&DY`V+{HTF6uZnMxp&hd64V z7`H19AL1QfI=2@(Ske9+_=fy7#ug#Nbr=^HInx-k%(iFN*tFE|1o+C_P#R(7TK2Di z++!Z2^Cf~b`sJ)!=JQnNez}vl;Iq;94>%w`$>(G2GHUwMWB4(Z@?pS7d_Ut1-^anc zpMgAYI^}{w4aXvAMuH>0N4&0waE?Cjb#2Eva@gyt!`Zy%byeY9g5S&Vt!;3ATF&@oVu5e=hj6_e zzdFH@mMWY}{A<&biWplLIu9h$sD}%rG7{7Yk{A~k6|3%i=jM? z$Gpq;ye<#EdNXK~;9Irq_@>H*590h1#_+dK_rAIfzHde644*)F37f_~F(yCIgS0Pk z%zY-1M}y!adfECV(|;7air1BvAKg;9@F?2I-Cu^US8^W!C94oZ0k(zYj$6OK5W;kHnLN9LQ7l1^W|k50^=Km$?nWr7ba-GsSg}?8j3x z9doQ`Gm@Y=D)%{to;uB_82lsotOTo)V1MBvEE}_a(dHR z&Qmyl1u|tWHkj`;-DRMOqx{MBQM{|Jfm}0z$^8sDL2vzG4W;XoywqY ztU);*gwK7sE&;u~551rr?Ga!625C-@#BbRVeBamqSm{ETF%PC!`U1Y6X2{9@_j!HFm@S|Fd5`+WQW%ujZ2*< z*f(${n7@s`J6tAyb5q}jUhxd?WB-xB zpVN%k!n~$} zv2(cZ$Tc3!NCtD28zlGwf2K0nDZfj{_`_#r#D^@r!!~TFYwKze@5BD|=_o{JUB!v)2${$@hRXi?kTb_> zbdvRb#rK=fv+AI;BlcMSW`!hX+|eeE*Wm5p90BJDwjCOM1eEa8I2uNiAV{@k0(y{#%L+ z0r@O*aPnE@;H39B{-+TT=lWPTUao^Bp5-6H=ST>iad7fE;oy{~Zz5w1&cp{CoO}*B zIO(|_kKBmgqvMz^&hbqb-y1@|-@z$QpMx`At|M~!W3Y16LHgVfoa>7ii1b`vM4a-( zL--7Z;6~T&xO{RQobeuZaK?MY!KGM=zgFUlGy?J|bOzRm$3t+gBXZ^B{@ui7j9cO* zuG0|N_J{j-lRk(y_)ARA0pe6H;4|>c69VTz=V45IX$b!55WGGFZwbLSh2Vb_f`2^( zmk*h-()FDX{9p(!9on(@ycmMN5`w=Sg4=c&>zm;rFzY5y?pEVg9pMG$695FI`9x>3 z9ghDtbf_=8Q7#PYN1J8X{tJI26OGmFaG2PxzWZ#yf0m7g5Wuu!Jv`W}xNQx3^&YPG z{7@|$+Z&-9mTQ^+lrqMHy661oe0?AvJaTaUmN@5rh0iAf68|rs%Y3H&Dm@nNIZ8_&EPC3~xn6EhHBwlXuxi-t)7~221dU(;{&;G@6hd=SxLip3Z z!%f$5ophu8?5CViobnTAIz*pJ5~M==CB)hMF&y!aXqp!(J>zA6XNlsBmw3J6BL8Ab zZ+tHNCF$(1ZPupHO>atZ#>;+_Yac;;D1;B6OT3kqJj=9@NxXKsYO!#;Ty-c;dDt)O zRGfMsPJ1!Zv%hxOp(jrNOd^k6-cC62lK!<2KDK`*n~%|cq42ls1??#)C;N|arDwW` zGkvZ;DHqGn&8lmLBi^p~apli=*$<^3AjV7lElYmS%EtnIWlFm2_GG$++v$xf&Uo3+ zovk?ICC>OoepR_2JDE>{nH?xuITW^%tO)vQgF7KlsDz77sJp8UY z?#M%&erQA<&yt7!W~fhoSLN#W67@hlPjS-oyKTBdPdx6>^LuWlLr;9Z;vy%%MMT_1 z4xjyczr^7~yx!tt>zRIKgujj3{$oC2>8D%yxaE`S6CD2yzvq|^;{2Yb{sgbI_)}ie z^Ly@)#!Ef(JC*)pm@mYSTj}*I{_KE~55H@3v|~y>#0wR7^%D=lXIr?fxA}@QUVb-w ziZfo~OB{NBPcL=oiPLV)Ehn80AAWytcK8tAuDI~A>uH~b|E{Ia0SmXw5A#9t(SF}i zuWr6G-OP8EAEu8u%MaxhT-t$*%CuC+3#ey~AKaig^-R3Oq35_lr$bLXrMRn~ZVR{D z)%_N3>+MCwT{$y~yK9uC;;ud^FZJ_=>ZeS{J4F5}#pKjt;WqyV6{nmWN1>k; z%1NAOS02~T3dd1iQ~u;b{G<~v$5-BR=!p+0F6p}7O4n(JPj);;KUf$qamFw5>nDiM zjqwP+#Ny9zg4^Ys<2ckC$8XA>;G*;!UvW<0!# zKH&Rb@aMNVADkE1G{8PFJNJu_1@^r0RMr^;PVvekz{cDnq|LiF|X)91iRnU}Lv0aUN{s@?~AGIZzqc zY4nJ!LW^Iw%Fu87e=wLAKYi9bc%MJ7fAm9G6Ak;+8HHl+7=g`junvC+>yv4x{SItp zPKzxk)~|2LF_&$H{odu%#YTGkz7m9M|LplI+hLz9cFd1--+k{f)d^%QgDv4#MMs>= zA8n6($hU8K8|jM2VPjP6_2+`#%!GaM%z=Bwwjj4;;NCwqv%gehBI`a5TZb)QiCrRg z$YP_|Q#w$D=Bm2~HpQn;4v3cvwq64r`k`E?l`KWm;d(xo*x3&c(yE@JJy1Q9O&x^k7pPU3bpVaJASyeNn>C_4;dI&w0#R zQyy2laq;o+a%b_%pWvLF;oCG^X*TDql=ws*GuPycy=kJ@w+We&*crPdxqI;?5?=-O zc3zBlBGBW@xn}vj(er+E9QIwiV@0nXg+0Z~@q$m;#+n;%T_=@|n*K=X42=?YhJ0FE@_j%C&Hz5n?-`6n#W#{K8 zM=!x9C6aqhZw_plW3Xw?KySBz57+2R8T3%DC4cR@CHJuN$lBO%MLvtYQfhoqH>B>s zmgRuh6N`P#;z=VmFA%Ut?2ZN>S=^aHnc4%pqw!*|67g-CTc0+&Kl_83X8CZTX*~z| zvO20ZHf(BE448mN26>D7{Mu6E@Ba+D^OaQy zEB+aLe+ICJKDNc{I&Pwq>Oqr;|DY%_5xj zT9Z31Id0#5B{QGCw{JtlH}$;_vUN^;GVvbjnfxXwY zT{*KU@VS6?E+6eg=*FZKs5D?TkyeN0!r9MoG#nj}(}4S-&Cnu3{T$+dUz!QHSe)#h`x5dY!jn z!xz{NM)nmVP6X;6hEEo;9UREYR=BT#cFB@9=rmIoStvIBuuI!Dylv7YVh4ye>)V;d zmA3a8(2TZ04D699 zlf)r9!mt0N&W&VNK{3|zJ=`H-d0@Iekd-Sc?d2?iKjQH!2_jna-hJSL$1=Gw^SM5X+{CE4? z=b0&k;=>2ePT#TRZu{B&mz!P5HnZzazn)F^!$>(t+E$#4^ogI18gt&dJ!pRth_@K; ziArgU1n-HpCgA^QOHNT+3C=ZGCn4`RFNS&q|4BcKn$`y=R(=d)LDdnTe`-ZEaluM2 z$D31oYqA{e%`VSi20ay*+nCru}^Q-u#!h+?Ri1 z0{ri!wy%(PHrDn07;Ad|2K^X>gTE%w+j)N~yKd=ezn^dn{gmWPud@nk4Q81MlO8|) zp;oL5udXuVCt;F&SIO+?q!M$^q|(j#E32BOFNh8lmzOTN;kl~B1q-5waXt6i=Sm7p zzTB4xmMJceoGdOEIc0s$i_nS0k&Aldw+*Kwt&Px!)GKMnD^3@;E`_YJeq+|=@hcyy zFIph{gx42ln(|1F*fQqqlR7vPGVO+ZaI8{26SB&+@DkoH$@9&VSDMLY*Y?I~3vwXa zOqFfB%9f+D{VQn2)>GCiNxyGfPT^Hj)@pC9D<6!-=0wqMZ-s4oDb`-hz_0e!+VUE- zuj%nvgMqfa7w3cHQY$14QIww~o?{)-9O%4kMowh;mioXy5896@Rme}Ft3h5K#5yaX z+Y&deDe$NR@8l=y%BAhzgLdBFT_gU)q+HaYEQpLx;+-gEA_g8(9;BQJUJ5@`;EnLL zLHLtFc(D&3L>yI+!)nK-ATD1H#Qpj~^l2oWvffMLJcDx-ZDo!4;X;{}G|D*we_R#7 z_JICrp+$SJ73pT1Anz4`dYtX{k^K*`4Idtk^@?q{k#;Q>k#d;xe@OY z@TEO<0ch|pI`vD~Qm3+QvYv+1w> zcF%9$o{Yl3o6pBji>yHrJ!OW+`~83M4JE$d1miaVnBZII{C#56EPbtXp0@k3SkUf2 zDeZnC+Wo?y-G6ej-|maOd=ARU+c!?qc0aK3Pk#{Uu8Zv&K-xT97oFU;Vp{6!E2g*n z;fnL;e`&?Po=mshcOc!iXi>WDDwA##ze9td?}pA&lg$-T*dU|qt;o!X`nJ(Wzp{AY zH<3?+GpZJPc}t!vF)@7lUx@w!`XV!;3#0EAUv=zfm0fG{O#W~O)=;5d;`_JzmAdGz z%(2R@6UW3i80^wFpR;mFf!BNdYv=S<<>Fm^jyWZC@@xry$1BV!;Qi5aOn>R3$dsCy z(bl!nMnIPN=#!3*U-R5kDC>8kO+-CvU5flHeeJ?2vrb+(r3Bv{U&41r)#m8bng3GX zS#@B-@{-)>^;Jzp(@JxvPrEbs{Am^NNr%2|_0LeQ<$I^foIfdN?XBf#-|}~(-?JF& zOwzDxuHQUq~9P2sD;Hz+T`J6gUqf_fNyqh|XvSf2{KfAlxukNviO>n~y3 zKfUFV%JjAs7ff$_q{93Cb%W@q9(3w0;(1@knN7j>uGC$zVGsH^2IU+!@-rg4WIe>& zs0Y)fu36Bu^hBH&!wudRb!ee;HR$J+I9L6%(K0!L$Q@mcMrczW@L_~-u_BRT&) zD8&9<684`SzdieRceF8xdMx{Qc@oQxCXR?0@h^kFBPi`gv0tE}B*=`$ht zi4gpxgOmS|gOmReXP}yRCImm>;N*YO!N~`Gt9|^%T4xW|M!70x&9j~X7NWaR#MZIFjOxmaE*~mm$@zZm%HPX^9P_UKB@ck_%hWu? zsQm2B;oqN`IpVZ7bsw=TFMxjJaP1mq2C$KN^(wuV6YHf*zeH(Ve5vA`&u`KD8}v*& z0^-s!mqYk04lez_tMr3+SIcz@Tz_lDpD7H;e3 zPze5v;*|fS-XGR8=ooP8-J{WBB5AtFE z!c&}blK)Z%C;bf$&Uo#8;-yXFvDt~2{gITzhd92E{X^vU$OLDu6Liy6s7*H0#eU6n z#hEVROo!-SN&HxtZsHee+)N+wQpG7R@sB9Zcm%i0VOk>-`Ky#xP7gZrv!BI%$|*nb z0~R08;`5Y++vOZ%hW_E|tzKVgF3xzEF7jHUm5q94|Bn6vDJSu2r5FBUbHSt0;d4Uo zTl7pm#5Y@f#P)&*#&G>Z_%9}i^ZgcX>p87BOG)VLYW z&3D=}FkbdsUAuVV^R++f>aAXJ@?k%>LvhMa-1di9Zt3TM#oyM?V{FcS;`hgZ;*^ItKfjYE zJNA;qXF7I~{C=A4&=a4o<&^UCyK1K5l%M!q#YKLPnTzu(3%Bzz5rQ{axLqDv6sJ7= zp6gVc@(}M++|76DTX6i()<^0|@Tes}^&xl+_%?mUbQ8ZtamFvW{eFBnFCY;8@SP(_ zpW;jxzdIjOoarLYv&+9xzxT+8-fmm4TOimx{DiNJci2Z< z!*|;tu7y?(o}~?xYbjIqv0uY4vq4-_e!@~W#!A1rIH1_8gpz*g0y8|z?NVT4#cwcK zAcr=a!43XWZUU=tAv|6A^E5o;BfWbs?WD_7Xc=C+R}6j@?dp*5a5U;4cAFxA8<*U> zc`tNv4d1EtN{W&6$0{W>;&j>IYR^eORSiFmLl*c|Rx zC3cjw^-Mr7k`}SAlrVDb+ThSPT2FNPf`Zwme`*}I4WBi=8}Gnesw4MBm`es9$UWjW zO!P-PE7%r5zrj9pZ$nnu3rXTF&QbeX$O?U&5W(%Et~iSlelcX zojvbJ{g2)^4|DPP$h$`?(vj8y*z{C9A88%L8T|Y_F=^xbvqZUry>4V(8uR#qNja)9bFOjmb0+zOYq>9wa$XqVQs#m^ z-28cT*lNJ1v-&^{>V@I(Vi2kI*Dg20KbY_K%eHUh20^>gR&<|&Z6BJ1ZmB03%q!da zJ%zH%ycZkK_;&br?TEC(2Yqz{vfc^%nqp&KH?TRa%1gwLc(! z@4y4U#GQXIB)SwEd0UQuK%Qg}=a#5>^&spL{si_m_H*AR6!|L8_u%=Ts{cmeD>h%G z6<>TkNQd~XG_d`XIO8}=oMNK|y8n^GE=Q7&)vzCta)fqjpX6Brc_wui?}35@(kb_% z7qQLa{(lgxCow#Hg*-Qn9pwhs*?MmI)MZ>V&>jVL5@uf=Yy*O_6Ww_ zGokX^waBA7WB%e=*wRS-t~~kq=Z4>|di5ac5%&U1MUD8+L>qISU(X?nmJ!>(GX3U? zZB^j6Tzu*J`5M@r6eOdm{^;b$&QGU`hl)E(hEU&DjAiGUt^ewm@j}?CMi8giLrVHf zkOnh7IFD6cM5m>og-+oo73oJCk%0V1@vazzeW=JUGU}R+Qx`$cvK|5N--0CS?jG^a zh&Dso6%TEOv@7lTcNW#5jEP+!`4mE~_x4x(crW@0-Lf`iJ7lgfIez&>{5?o}K7H?R z2VZ%XMAdAMZf zA4c-7Of=T_&4TUkc;vI${Q0|AH_q&PpfNsF0xXU_-Ow(hKEgf{e8q;h0nbatMg;ex z+uiT@@2Q}y7=1SdzH?8Zy+`BV+nPx}h&}D`^T+oNZo(dqusa&Q-kciTgm>syu-+mX zS$FFPQ4jC<{N1N-y=^Ft_iI|p?QHRp>0Nm=HhtgV<*?aW9Nkq79sUq{PsL!@dO&UG z@LY5RxnEDihA}lEc$S7us_?l7`_BolH0;{HC4B?DXOgfdEP?ON2>7IdbwQ6(*QDI0 z(Z*%!q6?40PR!tWW=(YA@Ve+i#NqcvJil!-J<}maCN_TGtq7a>_3#1I8-L%@NU>LL z;{KkGWggO!fRD~;rZ_*N_EoizDT4eZAAE}!$h%J7)naoKMcw{9(pibNrUdm8J;gZ# zzaB1|20hLPZ5p<)8R!me?G%jnubYFlDiX&};afKq$-U~GTQ?2CzHd&V6zzAODZ6Ph z%CpK~PJif1iAUmS1+QO$cCa0GU|LrP+u_?-e7?iq?f`6=MaNrljkuo`AH5msTJ}ao zyl9`Nh+VJ9Q)~*>VGg8v5bI5@hODxeV%#8~W!(w#8P|ws+8oyR(KcZJ;M;&5{anWo z^BTN&9f5A+XNJqf22A4K1A8{)Lyzp2iq7hRsG0twY0!HW*1t$TZk>d8IPwrQew*`` zny;XL^jFdIcK#IoHEgnQAfe?4JgVCz&$YazU#qxE%G(#Lv_`@E72FSAjeZ{1<=g|F zverk+%OLFEV4HW~dv6UtDn3|6=V`3Z5WS>vFM50K&EQ$F*sY3w#QqSr-N*w!Etwlp z*6~g(Hq%}mK%F)yn<>1LJmf#FQEp9{L3%_kDK`o54f=J{&@Yuf(z*LyhfH>PN&maQ zhr*PLL3`iHVgG&0@XLVN^%%V0*THv%)c5ny-pKpnS9tfp<{ayG&dv7I=o9^Nyfa={ z03XuGN9jYO9T5A?XU*x#=M3`f)1EizfRNE@gD>1ed*(WifC5*$NYUI;kR{taznj% zRrR;1Y-(!0y`esSdt<|fxa?0^-+cRqz~w5e&gvTK&8ORwYZ^XgX0^iv4gV*aOJ~*1 zs*9UT8|UKBM*NB6Fqd8m3$@?DFAma)1`gEPR)e*$s&D9Ms1qNrcrejy+^}wg#DVQE zH?D^(+jv5@%Ur*IGYZ%+aS%QYL2IBc$YcIew1;e9T?wj=8&SJ??S#$)`AHkYhzZ)r+) zfd>*J2uSMTEq86lJhM@>V@4<%;yx0`uAx3T)jblzF|>mvVcq)r_s;d-+`djmLe4(M zvZIUmWWirML@=%pL3WNlNRLnQQ3vTyjzT#e0spcNPI`3CMjhn8-@!>g+ZoIt{rnJo zm4lPdeg`L?J_jd#JT^*9G&OQW%-ryn|332X2#|OmwLg)uVaPC9r z@;Mwrec=0%BZzYl9OY2QsrZQaYzHTw?0B&TAbrN6C(e0b zmp|u$iIYFayNQ!e+8NLz{ayzreWT7#x_nwf@XioC6@u>w!RhCS@lwuvLg-UE&P@71 zey1?wp(HbpMAlh`8^^-qA-Ie?%YL$KXP&~n9M|AC79Z*K2tD=un2MHEoaJFZ?&avf zPw3fhFrJ;j$KsFgd7mCZ{NuskWQ5;X^xq4?UkJgEhTy*r!No6v$j|&b0XjKkzGp0X zE(*aHgy4%q@J|7kc>8dcqbWFszun z9nBk?nu5FF62wYn*Q- zUZ~HdJYK8Ua*FG91i0OLY*t+6TdVY1PWM}QxrMVm5c;JS{)|O$<3~d97Zqo^PU?L| z&rBC_o<)9{o8oa=FC@KJ62!S2_j0)N$V(JwyzB>I0hoUIKkVCO{m`2o zoc+Oi2WNkfc8^RK`?>p-p6McP+e6yv8gTft-+IX5L;M-VrCiy3p11JJHLY@@T_xpY z|M!H$pZIGQADchssQg3XwfPqY9j2_^FZ;!D#i0RQ`@7Mck zJu|(1dcVq{Cw{ZyF8>aP&w$={I(&$y9Qs3gzuloH-s9jIy}#eVPv||%5z8~{0MjkF zRzW|Xm_E`k53J_J){Qhfn=!rKe?w0>1#Eua(dC>!|&kZijxoV*DQH#KDlDJf`jywzf*Bne%o)f zov!%AfRE5G4lez_OB82%`Tf0AamGvhE{l)N|1}Gj31uF+`W;R_l)pf6#>?+}*B>+S z@(@1N4j+yO-0aX3XTGs~eo+NwJ`mrnIOP@GuBX(8Tiz%y`5aU}jGy?+iqlR|@}*KS zIW_5r3iZQrix$PHPvV`53m?1u@HyjU`@^V+WAkGF;CB%5s}-NE0f>J_aem7XzeREQ z3w>XHL&b$?;5_s!BggmlAG#sAzJZDOZ=B-@Dedphzg8#4lYshG+az zhefUh_}Pc2cIsbbe(FQs@>~U6uC=?EW${C`PtO8+!EnTv;^Mu+|5xoAxi1vs;n|J< z7LA|Jd1iQheGv9#&EDWb__=u}&oi3-TxB55k?7jhzck~n?f{t+=gME$3_1ckx1n~4 zCp7<82W+x`-SF~Y;M4ElF0ot&DE09CGyR4>?%O*=U`rx)BnJN6is54@0^3Bf%@AKh z1F@PHvh$>3`?VyPC#5}H5_9T%Ft_W^fm?Ps56tmS$=s&cyLsR(bCy}V7WyfLeFnto z*)raoN?@+rzn+L|wV&F8{Z13`7v|^!`Uwrdr{n#ZSAFyt;7=D}y$T;dAa*nze zf2TG*5j@0>jcKF(qh&KQ%Kj5{|F1siXH4pn*iMKIuh{MGnGo0|!d{_U^dmN~ zNQhr=&`$VmOa`{ag2}z)F?>~KX`~IxTt51_k6kqy%fN0WMSk#QCD&R0i-Y^O7Dskv zvFk?SK-@hNzr-!_wZq=Sz;5MIl=tN4y#BTUR8IuHJK3+8+M;&eL?4~eALb_}S#_3x-A7z}tbIgu2Aic!q&1;-y;hm&aqW)iLo^Os6yYs#kiXcS2w(V@ zy+YD|0e)i7Mm=PZzDHmucvS3=fD3QN^-K7;qHUAJf#+zaQY*wiCw1b%_ACMYiLL8{ zMc(I3OHuxL=Daz#eGdLtVIx#L!L$y*_m#A@gRof|OLpN~0vjQb1=l@kwHaa@26j`_ z{qu-ht_!+N*SS5WOKg=8=KsiX)_YCbZO9&h9&4pMp{&3j^%TD0PN6?B>K$dcLBEV5PvDDNY&Iod24Js@#1^1zy_&#~_2u$DgAU?2Uj*B!gy<00lD~iU)N`zn0^@ilf8@NC~m zvS;O^=mS2UN8+El@9!X|lwsI?W%Y^Y*|h>ugn3i@H(nezkvU@1hqVjaV55b+xOTeN z`}5*xZ`I_=-a98pdMmJIU@q!MWpwB&$$Kd)_$@4jzv2@3&0d^)$8#n4-PxGz+uB&y zXUh`g=Tm#{IRn3*u*FNE91s&-ke}E&N<0tad1QF_Dz6|&lcZn5iEg|+@09op9kW~@ zQaQ%b556JG#MU2=HIGKtD6~N?Y3szEmwiauWc&RulqazPk0Z@ymG@k68GQbs+!Vuy zH4KNV;gfZ_Trb5n(-L2rC{JRVIld#l6MM72Pz?X3m0~Z7yiSWO$nONo$3*1GZ72(p z-`66~SPt%l4idm|KP7=OB+vJtu1MJ|S!METQAVU(okUwMc7G;X{K6LSlm6>g^fQG% zjm%uM}G1gh!iu{x?LMt*{^d9j`e7+1?CV5|}_j|x!(uS{= zNwN;0I3D|FKQrWyl+UHCuYv8d=yxh~&i8U{k=HNp=A+O}6?7BWS5Dnmw#4hJEi(O8 zoA0<=@(wbbWygAEQ((h-8~Wh#{uqF-M6?5Q2F%cvTM)JbysO_GF1r(J8h!!U;;0jt z3+qqdS?Y6z`Nd7}R|;M1emByZLcZ?4!?a>gjp|e>@)hL{oxJMZSIXK5*xlY4nbH%T zut35KuL7iB{G>v*?o)Mj?h>-_1(Zt(@0fgSYVov@Ka zdUkJs|5(H&*T44Lj&rl}!yfputwq7Jh$;F}#a)rt&w-u$GeLa*yIAZ$CA~tM{4i+? zeA-JuEB3o3c+Xapo7eYDLz!)gv~GPHzQctF@Z$NfCoV+#emByJ`)c?)sownr^nrCJ zsi%>C_({$E71rZGzEhTbL(s3pS@I#qc|lgb2f=&y29$TaM9{Chxe2H;QE4l~oVHvaG7>T2ZuThW4P(w9gT-SD|*UJKGI>%{EzM&LIz zkn8+xzT9NVHTl&bp7Wh}hR2)M*}oWDhl9T+&k?;`3!cUAAb(E`S7U4=n~ur2KP%ls z$e)+do+1rBwPsq+6Ikz4Yg+gIe7O4Uqp*|yY)4-@*IaQKc;XwjzgYD5(r|U>zsCE7 zRxp&8DN={blPD+9$!txhT~4Ho92EbkejWy8M9SqfJN?JbmJjc(XJ@Pnd>t3VZ>I;p z$SlW|2tUaVKR8DCoXGNuJjj$m+^1`%m=hIKra&i5OViJWt2>aEqqSJ0Xr~47#$XeT z_xPN;#@h1T(b&3!(1~|L$xeKu_oI)}FYSk<`xozyl&#lZ976^t4F{1cZe1gEe-2QSgc%#ydLhEsPHjm&F@v`(!Peqfh7teI(`cNyssX_pb1WNqF!fj|BSD z@y5C#4}D=TK5mySPYvWuVhzMG^d<7l-e>a$2HtoO{o`>+$9VkC!7mrTN%&2~Z!UhH zME(5W(d{cfin5fqvTjJa3rw(m-F8No+FRzJjzxQ{IloNi}V4cYvlb2f2o}Vg; zL;qM${52_4QuZTz;lp=7zL${xm5n9k(w~j(l{)Z*UkB{+Q_~5aoBpo%DU4SxNpCOm z`J?|MbvuRfD}5kY3;n0KM&HSg;~3UG%UH}otnra=_XJ5<5T%%km=1WQ!W_-&F(wS?cM#IK)>h7dPVd}5S_nPXzO=^ zyf}n1_cqp%MbD8nE9d*)B|@`li+4BDiZyGg75VsGfc175)9X%{bAKf3a;`_X8kCVZ z>cNUIEv*@|Qug0y?b0 zTGq-bvUc*^edw%puL8fnN2j7B*X$~R&bC5+;Vm+yz6-z4zaD;+GRZpf1j=4<55`B( zKiUeq1oHhTc;h|vHMEHfW&PrD?59+L_v#6hyNdEiE9y*j#bf9{q2FJD@{f!zeL&HS#iX8L>P`6}1s)`u;w2%Wj!G1z`hWkH8nA9^1cWyn3e7-e4B+sPoCg9&c z3HhYoBk%P_q`4-BeUwlqKl{y85#H}}WWA2iOFDjp>pJLB^6rp^7hJ;2eig!FEz&1_ zQQ;wet`nWfp)`E`1>+;EQ^NQ2NsMPf2Mt(r`X<({qmPMwu*|LN1@Mz^%dvP!TW|A_b>*4PHqph8(N2A>4flU0 z`7LXy9*OK4#2UUY1D7>uXa^P+Bkx2%#mL`YoDK4;Sn> zFZk_G8t)Z+!xU5?jPzxWHe<~vWS4yqqLL=o%`T$7RDfy19!n0o^ z9Md;>JwA!HeufZ@RYp^;*E*EllY#Mv4T3p0b7<`4d@f5r99Rff6t;JJm31p zxLxrhv-;kQ|vbc{g|1sy9o18=kLWm)d zKbmP*tlgZN30jN?ea$6^gkEY;$0$Z?mvuYHFyVXMcuv}Jg{ziVZ~|FI*7ai z&nc3JmC&o?m5g6Vdn9f0R>{jjzpu1h@~fgJ(u%d~)hUc|=i**?3$3&%LBbEGL!?H(=AE&fe(q4*eiKx6= z&wKUOe+$}|V(`x#!MbCl`#8QOjJ{`1gnd_l;QbtV(P;I|6D>+rh~zcPefi8FK#dpw~v zW*N>X+vbxvFTweGoUg@M=$GOwVXxQwWjIUt3Yd&Cm($4txG@fR>7oKapaWXm-8zAnV*d(wqaPGnH8xZ6v z^Y`W-P|cMn`e#8T z#AY~8$uZBF+d3|rKi^EdY1Z0VEtk!zzj{{7f?4%T0W6(%(F5zA=0E^ThBZ)n4s-~hg^sjHSu1v@TC>*ieVf zI5w;?i#N8l$ui|p@wCf==#Rx`wVxyZ>pN%FPsAo3b$#V5I zD?S;Q<Bh~M-gZm;rc2kvKhwGNmVoFcp}6H1jed2rNw(Lm zU2Cd+v5*R^l=gGs(&i0KUGbah8&)SbHg(*B1=Ag?%SVwlHLt;rCrzU>W}ar)rDu_k zossj*nug}}4IOP=BfJ}vS*=57d_K>7=HpAMmv%0_=Hu5bt;QL8vQg6DF zvLc`I5PHwSssE)8PCh#vob=rePI{9|NrE%+Tn8tgJO|IxpEH_ImfpchUpR3zJ#pH> z^C|J!4n6tLckqDTsa#-2K7EtOIXIKgfP<5MHYPmmgZO+0C!cZ$7q!biHVMV0zbps& z(BE!=j^OmG>*50rfAVqtyAnSfLVv`;DSyVn&xHUYKk+;ji2P4F^a1V^5`-cDp)u*P zK0co*4bG%5cW}nr>ENVKIXLMD9Gvur9Gvutg0bakba2x5IXL+bI5_D&QLJ^4eyM{8 z^f7ZXAjpOwpA-fu?SuG^5WG7Czb6Dwhv0id@cm=rxo7zbm;s?#mTdXu`%%{YPuC)J zLLV*(p}$e-o3tHRs(O;O2f`lABe@5HARWg`>V@(MC z9ZJu3lKT9r;w-mZxAmu+7v>-R;}yl(-clz~43O|zN&V9Wz(qf= zDVWB+9G5H3_Zri6T?n6*A$WZV-W-B=h2UQf!S4&fZ9f*+*0E;ohP54mX9l@jjoaYP z_Hn_FjhZ#h9nGCx0fG3Rs0l8!&wl@bBwP<9xUJut@PVl-W^?(6kvm0q;qit{K@W1`BS7nDAQX?KoeESC53&toO!MDBd zbVKl(#!)Nxa~phhjF}9?7LsQWnnggIe$e%aWU}lGTC-mB1k%&d z$NO~U;VTk6#f1u{bb@0V>g^qREvIh1cJcc|@Vz1Ufe?Hk1b-$3$9#@|a9tn2Z}SxA zx<2BCii`Z4l&74eJ(k0zpJ~yr4M_aIu*>id#>?;L`3@iAWg&bPIehrN?KymiFA3pu zgTsg4=gS;E#ATvR4$*_1F8UK;y7p_=JmK&mULV4zNpbS&Q$8&YAL1P$d^S6LI9`x) z_z>S7!l%dKb6DlM-{C_%ZSk?o3Ffo?gXM(u4_oxMJ`X5PJ+M6VIedse7Q*K#hY!oc zA%_p~XF~Wq@9<%HIO6aj{$dE9;|?E|2h3%T(hu?1LioJp@L_ova`+HG9l|FE?^`*j zXO@Rt#hG8k;XBqp-11-G@HwL8q|o6*JnqnElzz5DPaO8-*?8@8RTe^DZqeK2%2S-_ zJ)!X~arh9&+ zX9%B^!-wN>I~+d5?+W457xH2ri=J~ho0koeNMc@AG6}M%fnM4^oJ~ZJ0A}# z&hl`N%K5y*hxm~YJ{gA($0d(De2AY2;d9dAvsdML%i%+OD1?vEa7-85lN`mVf8x1{ zyY)KHp{M=}EqYu3vlVB&@!&%}>lcaf5-$tkv&iAYaoZ(|lMnHwA$*pF&{tdZc6n$~ zobq#AIOXsmzC&^GxAOU*%KCr16{kM}K4Li{&SBxSLoXBSa_n{R3j^5fckmgC_c{2* ziVrwAW@`N7kcGD^Ca1#|E|*v6vm*{(sd&c0s}(=t;Hwlr>EMlu4>>r$gV}CTZ!JpC zc8GX~;w-1czo0nlAMr0M&hkjSM{$-f;@?u7`A_^W6nFhVah_n6)&rI+j@RGp;2eKX zD9&9X`b8>Ub0Lk@;JuxbWX- z@y8x6{vr5Q3-4B(@^e0>$Kg+Wze7J;(|f?7CvN+(vfHr}O7F@u6eOf&ewQ- zV;-c3IOl=ruZsF1pF(XIN$)A2xZlyKXQJnRO_({gCdL6GdPhHAn-ynziEmfjjd!o&qR+N~rT>@V81MHr-r45_L@xbO#f5*u;?t-& z^M&)KEs9f4;@u8C=Tmzedg2EZcjd`Ad^rDl+~Gs~bO;~HC3=u9xvI-Be!)Af^pc<8 z8!Y?=o!@cgd{}Ynhx5Dp6{miP^Gtadh3)5S4MChA)CmFd=X@~tQ6_)l^b19L&J#bQ zd`M6Hh~kp2)mFU6Eqt?u8}<7n^fq1)g3k@Xw};^A5d5(a{P__4LHUTrQ9M9X?0&KJD-!PJOs~X1bX#ztDVP`iOr>#}}9m!G*M3`@58v z^p`0;;}_h1Z)P0%PwM?~M}Fe3DK75|e)EYg_}t|`ah}OXi?^XY2J@G8 z7>m@7g5`}k&n};o(v#1C-fwsK5a-$DQ&;T#!SU25AmgnOZjOCg8F~?+~t3l z!=H8@*r3lp$e%dRE}xSQpRE0d+Ix@>@jS&v&L&IFxP{kPIK#Q|?ofK_hjt-7PQ1kT zhVVZSf*-N)4ojYsiZfo?oh-w`IXQ^azNATUH(yRDPW_Nit_}o|5A9bbD^7aieQH2K zdfKl%ra0+|A8~NXnFy9*`0oo_|BZ^1KkZ;z6eoY;+e7&DS-73=hb-KVw?*x37%%OC zIuvKT#Ceu-BJZ6yv|VEUyXA-a7F~J z#KZoMRzLF*#Z|3AAp9NDcwcZHFgyt_!o^tepIjDD98p4vU&aZ68m~u8!v%XxR5lt@gLFnZ+7Ho z`WgNfT(~mHGltjCagv`!E8jF@h5wnxub+*f;U%miqECB5!_U|6BsodDNcow6!rx81 zFescae=(2XFn48@?$KD`4=wkDo95t@{p*I82f0_GuX@c5*G8rB&?930c1*o^RzdT8 zS`VKtAB5il-Je|jjm5E+IbjCg$iPplhxLx|{nsy;Cm8&-B?P+~K0T_ueh?8P{3YHwYhP4PW&Z#y~eA*E=<{vldf|SVLQiHO}yPl`!T2_^n7>3myow z*z8!5{FsM5jXmt`i?xwL3tw0sXnndW({KAD6JHG_kjL@^v2XBva|&zTzm~X``+}zO zgD`=Q1&QbAZmj$KCe|mz&sYMyGw)(;>G|-TJC<=GcyU8+74-)5!oUsF|Jme))k z{Y=&iBTR2}aM=g)zU;Ah`~p8~Zed1dXIi+ zLT{<~UC62I%}mJc9efhLD8Wa{hKF^v67Fs6olD*?VEuUp`|3t=%#R1*?^FClh+nj$ zh*SJ|{0hGd7Moq?L-r!*M%LVd$JmGXMVsr(ajFXCSbVvOA2;zuXOQO+lvk`V1|IS8 zv;5jYHjyhR7f%fC&po~FlB${Y?$|=e;od`{x{Y?KM&bDkOtvdApBHr@ktWby@$nTid7e~zDLh&I^>tF z0?3y^`h@3Q@RcEcAw*tTA8&H1dNY3%>rIbejB`$J0_{mk_K}7^XQoZ$l(uOgm_D`Z zUC&G4kyrpfcPKXr_~08ArXpz1GC`U0!$}<%ei*;LVt6s0w?QxB8&GtH^d){bc<#Xu zR~)*{tOU&pY2z%KdCCJc!`IP}^_8_nIFV0ZBjsUo*##V zk+AcCA61j=io??!?EC@Et_i6aO%B zEp_tT0=z4(#d&U!jsu^X1YfozZ3*anA4h_f;1@yN5KR0zK)=4MaxeNkn?~AE(xg^M zSxllHY(-kq{yS~zKJiN-ZFU^*mjv2dX?M0NO~R)sP@2GhCu!{V81etN4!$G&_Y2-J z^aESz_nRQsAISUL9H@tHm&9j-YxrLE{mF*CYy9#By#(nGd~Mk88Hr2U>~g#SK?FXOCAl%=GM{) zyApVJN3{-6Ht?46{~I0DpXhBWWJ@C7;)pYie5QWlH~4Z3?;dGa?EFj38tLoX5l8GB zTyJ#ldFyohM~$J9M$Rt3cKe}q?ee?bdS+j`bZz!S>)MUC!+K_4y8LSGht{>r&z@_t z$j(Zy{p^48dYM5h`dc_QZfM`w($d_9ecKn`K}!rv($pk^EKc;@iQPJso|s8{#VZN71b+0y107fJmk@4$s;|Q|7q6uo`3z> zDfdn`-4|CJJVX5dk7u{-kuL7{tZZT$B9T#O$xqOts32w}{ea~6HjZn~}K z$U5x-=TR{I7cqxI@WT!s+(yh12PYp0HR>Rq8-nMJi8~-T27J!e59KLzf-qj%(NZYV z4}{S>QIHSDFGd~24~5{g4`U$G(>~0_Ga-CvALi22KFq~wALio58Th0;xgj|1!(2YJ z4|DN&2%p&@IPJs8hxtPLFyfSx_C_wg%&7vTUloEULU7tGx%_Fj>sHKkVS= zA|jEWc$r4bcync=ZLT`z#JRIW7S{7QJi) zG8TSg2woq8Zvrmzv%YgZ8V13}r>ydGMnl z^lyjYGeWmKgCGG2(<^BMt ze2CxU(DVD?eutho+E=Lp3c!^I|T=>}eHPgav{&N*) zy7*mKra1W%Uu5yI`7d+$^l9R%9X`al?oi~ENk1N3M@V`2J=)^%A>L`pW48~_Sa>~| z4%DZh+}@-KU{fI7G7cLlj{YUUVa~Qy&(A$pRDO1J;w?16em6LLd9Kq z<~n>ht}x%>L%ckM&r*jE#~W^N_z=flqhsm^KKuQH>EifAqr-=Ii$l-xi4KRJ_-4gj zJ#;&KIG*9IBP4FGBecsK*9%g9j(_ZT7P)X<%Rh76=(hv|DobT4*!2tocT=pA1ls$BmU2dv)&N@wc^Y-;-?j7 zJ`jIbaq5*g*Fm!02);~d<+Mu2r=&dC@Arh_%twwt)ho{WMf|Q1K52&!$E)@_e25>g z_$;yH>~r{VeCsKP5Aj15pT!oR!ww&ghaGYF5I=75vE_No!V?z%(;+z5HHx0?{#9Xa z5Lj@VPdo&lYvFc&EwXT%&yo=QW(&8|)fs|wJ*CK5XQlTZrDr+eIAU6HmJ{Ok`pYVd z&%+L%79H0);P4?n;LvkC@{mJM{29ev`42mMIDUD=;Y0kmL%&1ie9fUJeo}Ea-W&}_ z{V-j#9h~tlRotz=tT%#77h}Hal=Xn^0M{|ITnnyEmmi|h;vWMd8zjp?xrBaY0QrBZ zSHV|VIOP@mGZs#L2)^9HpVxTZe)0*$S*|!9{F>s_8}R}iR~0>1TKxH3;) z>O*jW2IQff9X>Tm&vdxu;YFoqdEmHvMsb!0;zNo{x;`01>Hj^gxLXbx&W$&&VVSPg zDktkP@pXzXb?7-x!8%C#FDQMBL;rx{DF@%H_&pB(q~d!WoZ}Y*4laIPjh zSUBSn$Sx0_(#UDAUJL&lEu85S{L>cRXVL$zg%4QxH5Puz!mqP%yIfsw;X@X^q_>>U z=CyaN??~2S9Tz;C`sYSowKX&)=XGGj(aiHN+nVc>9ZB5yX9;lGyvy`i@C3`WHa0cX zG_9?}{w)n=UTu4@cI&sihzqjE@q-FSy|Zv@Jo$qo&zTp@4dCqGX}FU9fR{yr3$FJ& zqy<*i;Yd6TU#RpSQCy3=qWE^ucnh5e3{S#~a4}Z=jq?HuwjUC|ltYGR{Feh0xfbAO zAC2m#K<0t$LulpTS;~xDYxR^z{&9i}ut=ylzSjU)~90 z#;4XZ>E^j<7X~|ulrYU3kos^(cX@`M?Lw74obyN7fI;un#Hry3W0B z;g-HwYbEweh4|fK=bZ@FR`s0ORN4FH$|}gZxcBY4C6MyTfS6M`*#f z;5^9iW3hks%r1Gki% zy0AaS9-GHtpFa4`#9fIzZ(+0$dt4Tn)||o{KbL{->mX}|iM?8a@E0QuYoDFDGlTup z@5G**uT&tz@TkRvTJUJW|ze<*o;6!!ht zkK4~%Stl3QH8g1P!OQEv8|f~_dblRhm4?Y6z5X@Qeo*v=G+^K0*ArM*_h#&ZIVDJk z$cQBF?8#5vn?X5h7C!M9hp8auZ|(5Q7`Sx@GMj>sc*cCbp*h|F!{2(@i!GkD5tc3B z)_29%*S49aW>^T3ohZq9yZCiZfe2oJ2?EOA*FpAv(*AVR?8>!@mAi6M~-z z!B2+ZoabXID37ZO;+*Gm={e6woO)pYpSZmL#BPgtfj%IAeix9Qc)61`yu>T>A22=fXqmVdRkU`my+L55d0@f~Q09_iO)> zt;PR0d+!5ZS8?6@&N-58VLM=3@*iXu9mx(MN}4*s1}n*}w`532NOM8BEv0FFWEo;G z4Hz3@V;UV9H{r&4_er2M1oEDa3BAwK&3!jb?9^)0A_GlHocp<86Ut5VE=RHxNSwC( z#{!Jr_cwdi(bl#OIBDAVKJW96Kl0vt?U^-e*37I~Yt5S3>llM5=v%_q8VJAGL>xj# z2)9T?X(V!Jysf*I$cyrqD)bSIbsLf~E^ijwFcGLw_P?1Q!N$o7>ZhOi5e&XD2A9uQ zv3`u)_CLd8{TN~BfATM!9qx767w#>+jkf~?rNToOUwp4nP3l`5|bQ-)bmEOjGP3Xs#-uRzO z!7cr=6x`b7yZ{$J(J#tBi<2-un0&c0g~#BJ1i0+yqd@}e41KjvwaLEV%3&a`xjsP}38T`%wxB3YlRCxc!t`C6WnYOy@*ofUKXA&3Rk1k;Fkor%00(p zj{mdw>2ghm3a<9OFXS;A1eZ-;{+*^@;}>>i{A!M&aXAQu6F&H_%FM zlr%CxxNZy+`SS2A`jyV|TYE>qL`%B-d?^2|{0El>oay{Gg#5go^s{;!Q1g8nrgxS< z<$gLPt@0-2l=A9Vu<`?8a+j;l>H1f){|mwK@Lz>2@k`TM$^O4P@c$1Jd3Br4FIa&= zdAj!hs^FJWzDWP^|J(2YjITNKG1=b7)=cj`xx-D`+paz7eqZ~hYV2uz0Qd!Dqw9=~ zOt9693A>K-EQZ)e-F$LKggxGhJ>G^rz6N`ovqZLBJV06A9v^A=@OIhFvXx5-TiNV+ z_ANL66Lwt*nnu>l=*?pLufz8DX}zuMj(1?|QVDOP>8uod4ej_E+G(qvJx!0-AA2a< z+jv`am-f5gK5N3aAl!vl$5$@IrY(^so1s2oUplt*3gSuT_MhW6k8lo!;r}7{kFBZh z-Psl074KbNFjzGw_EqFgjB6BK7eU!6JMynPN#^<0c z7?eGo=M09@jMkyARJmFGRW+MC;N$-34&Onc{i+$*;8m?d4X%FEC9jUJy%+k08-1+D z^Ji9}UHD&N@3m-C9>=v_SM2T6A7;8w-ng}F4Renz43=9uuD^CeP8c?# zjT8Lc&L!7z3Vfy#D_L1T&!N$WPnbJ*@rOQ=yR5ZcedkSoQ`yy`>&U&2jy}^4@aKnFh3O*kO%8oZUBH#ZE&)!t}7@nS0<2iit39&JrrCbbW z42l2$m)hUfFVv%_!vEO{a5-V8N`GGk_@7sR|LfW1jM8=GvG1+FE*OGemMS984>9bTc}V~B08iKJL<*0kuZahS-w)6f(&(}zz^z_3zHO0iI9czbe2trr_omzcIkAf8G(E)B8^hzAcr$ zCp_O4uEu|Z+kO?LT@0V-Px8(337+(C+?^_SZ+QMns$7HLAK)rCmnhfXr^~hZ zQ1B%GEwA87zuf7bXZuTsLi`)~556AwWGpiK(Vi>$4=m2tzPlv*6BeCff13XwRg8N! z0Mc`)a3p^Ua}X@;e=vjoKPD50v;rBPws4;uvwGhX0O#o;@k`hL-|GBT*z+3qrt_(QS`Ts5rAf0|z@AxU+o0f*@o#mH*Ao)wD)pNS- zl>X6BzWL|m$Iy69*T0hgVNp=w|60frzjXPP{11-?{$G>GtJ`#b!S=r6^!*Q|Wy%r% zB7cd9KA6clv=ir8#^>o}Ij^(Y`yGfyXg*O@HR#VN#s5&6f1vx(f_FIpN|oX&@QMb^k4=NT z?XBds_gS51%GqkAs(N2v_s00jC&0m(w+pR|0_W9P8KqE$=&2na-=Bjv2R~k@Q0AUD z-nz`f$?=s($^Qc9or;&;l%MBMcwfX1l;bSm@#${!FmV|x@CngZeVoU5wrL^QRd)m8*SddEXNt(K3*+qO-sj_`pW1I~b7=x+E>R@$r@Tg?bNBUJ{Sa0Zif< z2Crxq9?`Xvd&QT$C2fg%tBqAJ(Lv@(JS)96xx8YEXQfxi%RC#y@3nne$Uh_IIRbYQ zz2*G&9-mbs_c_iM&C_>Fw7Ft_s7v1nu^sg7-MMGu{@OvjN2l78^dp?ttF%hz`|MdC z=DEs0=TW)D6P+cQ3vD08%Yl8Q$J~gl|5r?Lw-bW;HlVXckT?s>=y>ZNDV6p!2ZjLSc! zKh;r(edK+Dek8nKV7x!|hWgVjC%S{Y$kTrsNhg?tmwD>nN0}M}5%6+4`lHUL908w| zAD1`Z^IWpZ=y3E=aCtt%yP=0D^MV8q)Gn%jnzjS~0@@1TD<$~Uo$&E42(&>{-<$DF zbS3GO4^}+f8hF^t^Xc$T=X+1ZJ7_AmmE+9asrdMl(S<#PT*Dt~M9)QB1;3i>nsBf8OKIEFCF)J_6X!P-{#m~Oemh^Znz5|2 z-M!x%n&WduW4RBlJv-@B8vER9shqP%3+;lP4=f+fzqP!Q56WZd^Jh9@8CwTV?ugIV zt`Ns{x-9YOjBY!Rp0y9?i0R8SHtoz&5>JX7d;69gTF{2xt6f78?NCr#w|Boe>Dd}#@BM9Q;vbOgHpDvcby%dddJpWj5)WJBNeaj z*s`^Z-?iS-aqac~(82f&j`Eu=?{}@8TRHk9H^qx`CVx!F6nRd8e~c)X+zmHz{NwdR zx#c=Om7AP|vmlKTw;3b;Aou%gFK+*>k9@rKWA9DezwZxnoEPb=_Uk+%cWMDCx)V9# zH>aE@Xc$kY@qcmudnG^n^iztXPxFiaPri(8_>5z1YSPc(wjbDD8r=2+r{T6AI1Nv` z{tb`q2TrHA{lICs*&k_mF155^_-#LMI=$@&PQzEF@EG5s6x{G{s{k)lfZtI8-ctd7 zcM5Lw6jN}c=gA82QU&-k72wZSfS*XgjUHzPWMHV0XGL5y7yq;LxAHU<{?Ar`S6WB;T7~p~R{{QH1^BTF@YgHA z&!l0dqW`=K@D&x{AFcq`e#@!wcUFMkQ~`dv>qcz8BFs8IR*Lh=wyaB^W3{g1=B+ne ze}e^fZn&O|Qd39|v zm!B!a*^FC=yH!q+PLkA7M| z`1SbK|8MJI=1;fz@RE?m;5HwAEWpigZt%+jT=*{t_qtq}fG5k%2e{$4ayM3h-x%QO za!tOb%e_05-tZR#Je^+SS3kpV>9ODZFP&cFS3kiei|ltI+z4*6P@qLT>z9UWoEn_C zo$@cau9n3AiI6@`XFh=^$^YvU@FYF9ewL=k*2x4<((|=|$J)#EpsxqG(PQufiE@+l zKbpd0deGx3xamPJR>1RmfLpnSry9daKhd8o*Y+D*dc$8=A^oxdxB8lXcV2)S9)mBh zfG0}faY2u`D22!18v1$u5w;jwbJRY+e*rMGhLsF42t1Uxz3zh43VqXaxT-k(jtrGwe;L<0U^19DBD zGnWhP8x-8KTs3%2tO%ClEzS$+=LER5ufZ1uxcHo;GkyNt@_=V~3Xj1*mV%qSTARS1 zlpA)=ob_89@A(uSgWLIY>He@aq&Ga}c2B{}?VEyI`Y)v5mj2-YSGigPv|lL!|4jpO ztqCS{noc`^&iHTTE~=3Jya2cKMvt98m&Sil0#B08%M$P;of`t&_-6fQW2#((Z%d^& z`fp3Yjn4Z6JWc1;DVnE6x`^%D1j&0|I_E#8J@Kj(%U(9>Hctih4f!Yz?1d5I{{DDYi|Ocr1R?m zZsi)C2P&k0B$eLy@MwkfrVnP|bC|OHXfplR;9$cS{8xu}mxT1z-?oM4 zkEP&+@O)XgT6lh0$yL*%kO>W;FqP+?+wpu zQ|S$UWh(vG!?T@VXmlDpAKs_yYwctG;pc%Dg^=Fh@}uc@M*=QTG4#ux1YGj}H$!^k zhwvo(r`6l=|5iwE^%Gp6DB!nxSo#}7`frDFtsg!Uo*xNUVf7G-N9)^#!;N#Q z?rr0*OXuSd+c8|V?um;x=2tD_@9i6Nt1jXXA=9h4oDAR4?ohlBd6iE2-vFNcUZ?*~ zsmDFlx3m9x(w;%oyHW}p)s8Ti#f z9E1Pv_w4Mw!nc)o#GO61mN>A^ovj03dNK(oHLp%_Rn~o-e1R}h(6f4j*l?jY_7XP0_7Nd==LqLINbdTwDR(UIG3G zz*Vl~q<-hc7q^}3RQUg-0$l5j!m};pxAl&J3hBQY(yJc&Es8HLx&Ee=tcgZjcOzfl z^ovDlc=-z~Iu)h-4VXzE{YjpyG8yZJt69amVS9`@`3;%Q1g=zxpSt?far%jm#-vi{ zOQHU^A=FEHPI9g@{z%VB!tYL`w>>NZ?M=XEhkIQPB;ZLrxzHt5uFXkGYU?`j}0H{@v-F`|xk3faM@cRjZ_beLd@`F8`P0m4J)+(^!u9 zOa9D%_iO-a{Hh$wZ~fWyuk;-LY^q$tEgZ?8bPB;L3=O2095h>s>SbDz$y z*K^K1ef`PKZ&toY|M9=hS*WgwT6SjqzWeR{oO|bHM=c|t>#y&0nM+%H`s*8A^{iu6 zh4#i89Y0<*tMRt##f>!&*6*BMXkX8@(Pb~qzAwISt64J(+_M__D>3Ic)eP51yWC>w zS0{Ho;B^-Y?xQZ8dw6hf6Z(km8@WbL=1=JQtm3t^4d`Q&f3#K4_!{?^;zSiQdX)13 z+c+F8_RclPlgdJnh5%Km1^L@WwXn#a!IS&8YTow@L*xmI+`A~EYf+0?n}DVN=J2I}M&el9s9~G(=H`Te8+#=UPef+r-S<>h{#tmE<)w6bfA-mY^i_0$ow=yH* zu#Zt@CpuZ1tG={rxqGhTwehv0OX=3#=r(_s=lf|d>5Y~2Dt(LI>!K4n;h1|jF|)IY zkG|dYM(A#IxP7!hk78DH;XKPYb~M%*OZU=GmPSJzSvNFtX|x?3aeKaAuAxHv%alF2 z^&I4d)y`~`Hc(<!X&^y{<)hJF?Nx$WmhUsYA!iX#2>eZhHp3 zU3JM(mqHxZ@9Xl9p0+}}u1Wqt%dh0uu%zJ}{`vph6*qrsL&tXaTQ~U2+MBQ5qQ~F8 zdc&vUyR{o{zR80=bkntZbA`@X)J+^H`^l@f=9rMK-g@mHU7fpmi%x9h8*t$zU;Ufv zWlF!a>~C7P%6jEg5=wGw+LZWynR6OUuP}tBpB4=euE+^~nX5 z5+`WfYs)u(>e_AU+3zd&Q4glmubT{^r>7uF^XaRv-lU;Y&J+OayqC;F6LDVT@CTMV zH2Fk^r~#*6(wv^ai(h9`SQgf_}p%dgdkIme1%^ z|J2X+lUun~i9)JegWEoEOK;<pYhz%FO9)27p?}k*unEsaEo=bac=3$Yssf)H@(?+ z02s}D9H7!C0DeVIZPk~qR?(hXlpGYKGd^?cf0gX1q-xZ~T8(fQMur zr~bP#l)I`7iEk{wfzRbeb3tT)a6J%=+r8mgX_f9)1m zyL+9hJ^mp6-7VSdp|*nSeIQ$Ve2uF*^vyf(;*9u3gZs0!hZa@U?jF^-^x3N2kkW@g zeV_KFehWW!314?dHnaN){z_Hu&=Xnr`PFX0@dvTtRQ~_SW@Nhe>3+28Z2bB0UfF!+ z#JBLn-B0sUG)dQvYCO>PJiZI-`|loxz?a{ z2CXq@jl;cdVl5*FU-$9XpS|e0OtyHq`_Arr_CardHgmj>`vUyN$NsZ}_}B{p_po38 zs2voqe0^yr?;}sFM`YZt!#ZcN&eaXF&d{s-J##;#U-A4?!BddG$nN7^rzBU$sUq^WV%tZ z+@%%ZivOA_z4V=_;2)^~|Gf(EjlgAxS-n2SvwpYoHx>R}z?Hrx@L`x||68`rEenUQ zPg*g!=2||av<2fQwtNx`2>#8)k?qOv(fe1O*Wa)u*k1lwAEJ+u(A!dwi3J4T6a7c) zJz1u2NRLj|Cl^!g_YKsBdtH`=d(*F@@Vq=+&HgaBU8RSbPOPMl>OX!ecUFAs|F4ca zl7}<>m49^!IBwGN@3sUynf{IhJemIS1Uy-;3+XG3@tj0&X^je;1UPbo*Lc3xCM!lb;t53NBXJ__y{o zcu3}cxwsf_Ui`npjvI7K9>+zom?Gn(C zBK`$*K93-Z=@l%ki*5{n%akbbv+@n3xEe{{$N=GL@nqJ%qF?DOztJB7o2vW+q5R~S zAZeLy-Rmd%gfmUQonNIi$)DBVV86{x8m4!azY_l}zw##gx4|C`1X=q@Etfn<*I#@| z(=Hs31ea-_9umJat(EM6Vkqbj6M1!;&M(-rdo!nP2W6^RoUf z{V0ciFTGuQJo@_P96B9|IS-?icnxA`stfIQuJ*|t|9vm->Pin~dQ0zeoM&CtD;=`L zxlE-snO@%YWjLqW?se_1iuFNs%w{*kt$g`6&`)!myOSwC{~-F8&dWYn7=AvJD?LwN z>&fhLCpzZExbn@>kkSl~I=0Of+J`?H>#>EI0bb7cA-(9Mj-)wOqvLVDT*$-2#N1WX8pUdq~%ef|M1H~Ds9Xh5*+jR!n z>KyBKzCBJoYwF~VA9&my3!;|Qb2vYov$ZUuC;OxUyX+wK z+Vxz^Zicg`mhrpOxP{J-t>n*zGd~dtDYN} z<<_3uF;wm&Hb(WmJUgGxeSJ+!!xu7}M~HhWeT(~=?B;IW|0VYy%WQ7c{ollC#P1I; zar4GFC+mf8RQ0a-devjEf5Y{Dkvb7`)s7hILB{E`#XZsFUEG(Rz9AYCEuu-ZHB`;m z)Jh%l;P^n5Ye|kXJMaJGj=zh?+Yz@pKdbfDgJa`sZ@KQ)Ro{mu*|<7S?N7(fy5Nah z&zbi^)sn#{&T;cR-8F^75BWAi)>Ig;)@Ubp>>uF1$Kv(Wufy1F*4TZ8F(#aM(eBnx zcVQdv=PW?wGr3ww$khq@4|)0eRMu_&0{qqa`dh&BEVPYq&EqQpM_r-PP0go6o==ju9NhGcd%?~5cI`W$QF5U;y8Q^VUg+U|%m=1TZKqEAOd4F$LP4(idmh3^nnyUSaVmj~;idt1X5 z(WhPfo_<+a8MlLc55u9YCATuPL*{J1)u1m&n=21zc+LGWZSz3e1;_6omhkRHZl!p) zkiWCx|BF=%d3WLQM;0-DL!JK@XzLDT5R2NJpU?b7zct^TV8eB8WdVOm7P#sZA(Pae zYRCNS@Giu#cK_Psyrp?=<(J936ZmUuvc0cY>064izbcn2wCAea_F?GD&2lZJqp?iP zo#D5JcsU(aM+2o9XADHlJ9*lW@_R;Kh_(kFFmD1Qu7ma+iuV25<%+>*SeD&fGsCU@ z>G9T<#%b>I9P+8*-EK3nGHWAvUG?nk?bU_r z8y)}9v90C|cd46GwYciW&$ci9An!9(iDovN4NPA z+PL%@_s>Ef_v#Cca~3YHCN89LTJ_>x#D>&xMRT0B?%8uTzmvR$>r zN;krspguW5{XR(DqEAv!XqW#|<1Auat)lD}p558fPvSmYqOK0TRC4CvkH*(t!27$A zPa5N*UpNcYFVA@O_vjZpdCn}4$7^Oe_@I^KHl>l+F3dU{WgQnuo|5~b7^KD z_vj2G(3LsBJvxKNjQe)1GZYf^{we)d{LaEBYnv0uI@O1d*N(kFtj>`d`s56EdBclU zo2k^Kx_-{<5El;|U5#$eJpY}O<6qMEe$a3IyeL=(JVj!hz^VDH`F-Rueo1c>JmUP< z((2Y0`h?fRG_U9J@v3}m?7C6j$?vYTO83BZxm5-D0PdF6``qS(GpTEJ)Y3gOmR8i2JbZG; zcm4eP5IT|S(UHpx{#^Z@JV$9?$#v3KCf2q)ZR%Q zMZ9@`oS!QE+y74P_?p+bH4eqUoaEtAHzeCEKSz5kv~Qt`HHP@iXl$zHS2!P9y53mT zj6r?3>c_~s(s4KRA@rrEkUPJ}m9-)1Z!Opm?HxCm&SBT#8OYxux^nSTgN;(mq96jqTx(wm{m&q|Glt zW-~uRPtP@f7`0q|Rn(H<8&q*$t?J$IJ5ft#d&C%FZwhHfvHz-guXeV2=v|39APRUi z_BbPBkU4Hl_={}IEP1;Txem{iJ_|1OuZXnvZ1el3s^ZwkQTTckJ1t3c5I^WY@=-oi$Z&2_cx*= zB9GdeZmX(2TGiOxS>xtqA6dVu?YxKTcXH%ZBl2K1I&6mXe{C+G)ZXOn1ZN4}s1NNy zCt6({J>Q{NS#;M%-k08$8G?_rL1t+DbD|e{v@|nhV_yBLNWDhSx@=(dtTP5=qmQ0- zIrlU9Yv*qUe^>BVGcbz3Kry$zU4ooW(xdUQ8hT{s7UsJBieXN^U&T8w&uZQN(bT*5 z+B;}DN?jfbeW8=Sn{0z7=FKF|rKg#%EtPNnY392R-7s0+jML1Q%+o|%_SkEIFSM1$ zaKlBkzuF19O5@pk2-T*M{KOh#}l_xoxeu-{ED+^XX$gnE?N5<*nsGWNf|z2&pz(wIeqW#N1V~y z{|)!tPnmmufW&r_`cnzyid)HHXX-j(Frc`sp=o&v#itoXT9EE*Rks2>ddTMm~%UsJFjK# z>|)-$hWYb5h5u0ho$tMU6?4EWrR`+y)cdP>r!-&X{X^e7cNO#ObB|EZW6K`Chc&|s z+8D19c^>&*ZWUv>>INSKXRFd7JAFE2TSod6GN|J^ zd~4`bKVZ&4CT=d)x82$bj{J4lF!P8F_>}Av_l^AfoxZy)J1H+^xxI!LK&_wpl8JE&9NNUykCmtdtbR&OQRI{=JnB*0$9%5c zlzkt6F`W}=iscYLZO`OXbd%>- z@_WcNzYEvZBy_e^;TMbIn`+jN>qh+$Zn}&#vml^5Z4#y|^t3hil=L#?J!yC3}yK>%W^R zo$_q%**wzo8nX8X&|QRP?QcEL_uI+3vzNQ?Qa_FDiz4ao{n?Hqd<|fCW>GXG8}ttH zjjI00&elshR;_2bzx9sB-8t9rM6%6tZuJxLDGJBPS2Mke*I|`@H;vQBUyqN&0&ea5 zKd4(Wzv`;zh;pVJ$P_@0h~tx|gcm{>Pc)3VcIFvZl1KzrHL365p0Rx#K&2ZrcH0 z8mjAWLcXnCSGCxD=I#0h%9iQq!gY(T_{uCdx7J|;K6w7_u5A7BEV|h0hdy}x5Pc(0 zIVEJnx1ebl8#XVU1^FD}2Wln||Lx%FHp}sJxZvO~Nbr zB|MI{7Jhu!yNA&oiVNH-8sGB5rxuik?XKegNaU1-aqYsL0SoPU^agnS$LGRDqD zuZ*u~B<&Bvb3Zf{Umlk)t@jA99Ai!Mn|-54?fBcQjcH88c2{Qb0R7SY>f4~#{G*P% zQ@7J2zMU@kLNxIB`&@4qW9M4l+PaOQB!Fg9Ap$?*#sX8+nv zTGJaolYTHedcK>vhmqjbDJ)$ach4t!>uSV?o@Vu25ndC{cbC0+n!^rS7y$yyxvw=7>)hS z8{=PEaN7=+F!-J?euWj_`BHYl?!tBPH$(cbyWm7A_?*U(@5%%2z5;NS>-j>SQao4m zl}MxanLjSH@Bd6@@%(7z?w8I)hXhAneG9xfY`F`L?>O?s1!qqxrx?n4K;?kDkN28y z#OJnWb{vt6DTFlEFTTl`UJq=Yn{m7bz8Wvk6Zc#Dh@KnaWe2jggbjZ?a~1RXVdl=m zPqViDG&~TFr}-|j@W{6{LVsROTcMY%?Laqq?cJ_-om+f-Z=#GGevV`rzfBox)2ChI zZmmT;O~2Q-Rz~m{9mJn&@@9m+nn&wg@3CdBm%h1qWRAvIf4%Jc3~j9YtLvgwdvem_ ztB>!YZ@khIZF^<<@Kwit)6M(z(hG^FU3mN<@L5YP?1V4kSvPH zBm;8Jjor?=jlKu{G`i6rP&bWft-tk0;D=7(U!yv_3I6!I)-nDhZs96`{~6(xpV6It z-9czJTl#L=?br*M-djJ&M&+x5`wXQJGg!P61t^1Run!AA^T$ z&zC5t@x6uiW5Clolgd}xZpyYgMI+;7It5c7`eo+N$#K82-&T4@{>2E8#U3;k3-x*A?MlXdBQij=x!|vjX+8DKk$l#T-OTvRquVyoCo*-J-Ru3>=ldyV z0-NmycAnYFH8UXH9GsJW>f=Sm7r>OCJU$~OVm@Fk$@@a{;%~S&ozjrym_r-DrLlS{ zu5ZUW1M5^TFbAm2Zqh~cuhIwH5HYEBPM!GlsS^H~Fm3^~7 zc(DZ(1N}I4DtNuJ_V{<lZeIf%bj(*=rUK(k4Ze#w$a5eU6u@yGHI<~!j}@q676KXt|y=7JK>OTMY^WsjMn@FlOCKuIgV}= z+o7c!D2IC6oOsNq4>oE|yf*HAc!@OlL;be{GiSy)h3mC|FLMe0li(ZrA@x~%K6+qP z)Dkh*xLl#VKkK#^-siS0rf-Qp`D;q|wr=OW))x=2TgaMYuK0Yk)SWUm1KV_*mG3)QWxd(Rvw;q%;s4?|@fYxwX$$+@vz?Au_D^}3(vS07XxYmOPu zVk@9u-c784Xe0i@x1I5|p{nk9hPA3P{lqFMf8p8j{iP%L6VAX+2zANBi;qL2=q-hG zwEzAZaGvqg9Y>lwU9bEUCFZJO#$ zwDrM{PW0nvsIS@fKYdpXVm0RHGkm8{KAilm8&9b2&rtWDzUz{KBK*_%(b&@XVSX7h z+6pJe_m6YlLIM2*JEgZFv*`Fkua2)B`6@np)_&0+ynOu(b!xz7ZKWNRKH|EI^`a+$ zi(c6gdj2qUYaLnl(x;DA&1lxX4t+Z@wp(4_*LeOmG>K<%JGh~@12Y~L$a|7`pXYy* zpKP2QbCmhf#>cZh{!-^wK|pfu^FAp1PN<)+)NaU{NOR%Qm{%pfg#Lb7>)Vd5Bk$OjKWz|4B56Pnnu%z^_dr*GD?z zHAu<(0%h29ye}ti7xKO7&-^mg&@+`^%KDgS0gbIN|-HBhNS-yYG2lDlfE@l1j zQm;ez<*;G?0$Dyn9Lj_6@(Jt_(|;{~vV=cEG!~%w<$BKZSb=XB{V0RaxRJku$VV>y6fcciqOSOw|T|;dt)C@G7eL7t7F`WKUxzW@23vAp7!jbXuJ5a zj&+r8=B><y{-ag2oW)-8{=S)6L=iiz0W z@yGvqxpMN9-RHz+~yI=Irf1}@6p?d+gM}%`Zx{`J+k=> zcy|!JL$Q^kq>KIe@%!@Jm5ht_d<}7$xvC$W+!1tfzpn1Q?@s<6>Q~lR6r0HWJV`H- z9;{eRwZZ&d(Lke{e+d8PbANek2%CLI;HTVTP$rZfzBbF$R z?YF)uJ-Qy=NPpHC7hk0}H@bSyE53EJmN}vv#|6$wJWlJLSInS~!`F2)-1gVxXRWIC zzSSQQw|5jD>nP8ytaU}Yu6LiFbdTqXKLx3#^i@p_4L<&xKxV|rA+ zXhs$f{a5Ifj?Vd6hZ;kA(d+0#1rK+ui^c1zf=g%armqasH`Gu5Bj-@+z51nSR+=1f zvPXEIBknd&-|_v0eYY`BV_bRq%~A4=qRUFIN$y@0+Cn&t_g40wc)n6U;^6y<=ic+u zI5MPeup9YwH)W}8eGk1D`20fE!e+rfO};PjzMKBL8Xwa}$sy{$s8-`58dEIT@4kTT zft*#GKlNMt3Ujvfu@Ymc@bmE%qi5!JuV%mU(x0RAG1g>%OSUy|?ch2>%y{XFDnEzF zmiq`iQ7lPZuLbx`;yxyPuYgN-a;XWu8og)_Yi8QpUAoWpnjgv0hHh@mPqkh2{P@01 zc=r7D-{WN;N`mKwm&aGCO*D^<5c`k+)wfG)B26CNDNSom>qKt5Z#VcSJ_&FB#rS=O zwR_Q$xyT(5ZS$vSWA}omv0d%_xzIN5r*YdHd}g9;gj?;BCw;OV2A(F3_&bu|??~XU z`0^(FeNH~HH{tKofu6##_`Aj3$oqStQ?wA5l;|UIEcHL3N9U0h4Igwjbsz)AiBDB} z*?Mum!SL9hDHLHd*Wk8t|fqq^8`5={2eG-yydJ=gPh592Q<*#&O-Rs3Ae z{FDtb5P9bEs~Gn$^A1{ChMyc?vHq;M-*rC8ei-CHvfs7h6YZEax!?Wg3Vg|vSM%;3 z<_F%=?Jvx9p6C+hv9=4k9Ie7785=1_!w(f#|_=;GNo(W`zpYH9q$l}C1dlJz6#&JDZ4 z?kn&+uXD^xU58ssnZcc%l9idk?sdS&-!{(Ze6PYTUj8jMK zSP7qtwbLT#deUr*3u8Ginv?YIY>0;B?~L%}6dES-sxImWQ6iRJ{opIqQ@VybE0!;L zc((}HPt*q#@T{v^c)W-| zJmRc9$u0TDbv{H?728 zRe#cnv$^8o(T3sYWs8{3fqZNqxxx*NtV3=R8zenRV_P~1{q0WaAk2--Q|OBLdMvJ0 z^3vgFG&wDOB}(|lG`FVp6Wi}&vRw1m!GwG+5|3O!?ns93L5BYYK0T#@##S_*P8#WRnxk#L^*lfySjd=;_2}3~2ac7@=fp?HbSK{x;2CGpjK#6{lkut!C2Xr# z*t zwUO&;)}BjC`>%3+{q?eE%IBK+wF&&V!KN24^Jdovm~4P#{EAa~w5{dQ`ij}E8h5W? zD~&wV+M+g;eMJ2&R-*Z(5X1Z`di@x>yzGRhH9obM?;`w7#0oB^9v#f;pLpo>B6TR8@O@Q8pJmCN6&Xsz1TLtq!{XpK;AF=FH!c*8NpC z*?p>yY|K2k1V2g>*M>1UQb=< z_m>sXAvK=%wM9c11}inripUMg50z15EX6u#T&CuO{phw@%N72`$iv&N`E2`Z$V8R% zdPDIh?7=adSNn_EAm>@$9F4IWfL=kMy2g=`j7^cn8myQ&;&Y8qlrt%x!(( zS1gUnwSBJ<{t!!(Vcsa=FW-fqWE$%t+Dlss>kxb1UYWo6qwBP0S@81~{up9Vp861L z!nEfl=t56T$L~X%KSdnco)^Z~#%CtQapz?p$)tFgzuOBB>I(4C&u92^tbb%A&D_ds}cl-&BjU}5hz@=xjH4tn5iOY^CZO1F) zYulFc{J~57^9V5{t?xjebGhT)+|zFk|2KH{)X&)O@sHzcfAY;%@6SHQ{2JS%aqONS zKg6Ti9-0YzH?GU+*7+!SWBdKB`2&wm>!&s3EdBq-&}kqY4^4&)_miJRc$#pt3=v$$P?}JX>;zLk8IK$>aCl73Sraa^5j#9q>F<;yi*b z*2nU-nx{ex7SFNIHMaGmSMf;@^P(8M{1vgj8^2%TWnnZnzVhe}{HpQ?us);tshV>T z#0Tu!6%VStX)mkOF!t|f!KXS6W5*V-2cBH!TArHiwsZ2L-y1kwt^0UgA_wo(4mpRu zuKrb{cZGSwIoBebs>|@~xGqoCv2S#a>N4HaHq5hV>7gY3#^q~giz_s*qqbXwD;>~cVzM1g7mgw2>m0whB7It}e@Dmi#(^T&R{;ccyhg{KJ zuDTUD|5|k`p5t1I=zo;gH^Q~Je!{amkMG6N^EM_&5%_Z<@xQqH5;i=>vQ4lPn{~ydPV$W2sxajCk_WhuxT-_iBEn zZMxqimaFt(^?B`W!$xDSJh`J4zhE1(Zw+UETr@DY>0*3?9~yu@e|C9o4Rgxu&m7s3 zoqwn+TYdbxz-xSVgL{aFtu$}T4%>tORXW1^KN#KF)zs*}#nO?j_r6Ze+XeZQ0zLBG ztgEU${vQJVA2TPZjMiW;s$OZ`K(f6U;@lcm#CuhQPdsXbX4O;Wl<@oHsPCRLqoLAW zZs=*`v~1s>UwucWHh1jIuq!eCniA4CQAcqZFHdCRgGH^G$Ah5uBW3=g&6JXPw%k=Ps_2 zkuK;pTDL;J?KizfX&mq6SH6?-I~X^@FTHUOJnP_I<>l;qs>*jF^-<$Y{7j-K`J-#N@&pfRAnuYRvSe~fcCG&is|-=cAG)W!f~LuHPJvCsz1 z{p?ee?q4l=z#2>cSKM>ghq2ndwz3>hTOGsqVKP_!A?9oCU{<{NSu~)2s=6*gPFz)0 zeTbOV;~xQcYt<5b;#YUwMQE48LebME^?i#kU`I(HUK$ zH&49$2>plq&?iEy7<^XDzS0=q)PIa;aev}%;x|<}ypChfrYJ`;nfYg*rIFsJdzF_g z2fiO3OVr1ToT|M4Z`5nzJZ0&BI`_eRsV|`e%GOf<)B5HZu@+}@C8p`I{^{<|i`e-& z{K?Xnj?x!Le#lw?&&Jn|5RYbjSN|;cH`(q>C-$_F$M=a0{dx6P{L%Ee2KuzE7s^gs z4@`Ylv7QtC1pSaR7Bt@`$Cb5P`YQiohO&0j&Kh@(j9HDhxX$q!wb_)ahtjr?CSnYU z#vjaMPb7J@&PZHgdp)ql{CyC)#2Bno8tO1+bQeNC{%#68Lm@nhF$pwSD*L(G8;Z%H-Y=?MXY#i^Mm(LNH+qiX=<9?Ek z$T+xdrsPuDzdI>DQ#uUzeS2_T66wNO%D>-+u1>pcx(HqUV&qvIpQ1I9C9jX~=bOzd zBkn~TYv$##<|f&_$bspJcif}@_%hbR1LYOZkm)TA&$!Lii=taI$gqj_o-8{Y@vwT= z9e5VOGx|-7rPp4y5&DhsW&-XupU@f>c~$0&%eR?r1sM(Pch8x>iffSpJpt?b8SOno=?tqFEKA< z*pI65Cpn{W*NsnAF)))VnsOm?siyK@it+_^_9UVQ%ni zGx4{$l5~O7|0>h>H~948bMYqhj_hD9df_({eKR7K@ErKmEqdhphSnnfY|*NzkWnYa9v^=Hp}jQ3t+^K1BU;9#F)TmDTgaRrQ^r(sW$0PA}cMtBG|3V#8`Ai=^+< zXQ8vK?`Qc&Ame$ci?xx$4_n_~B90w>?7b0wxDw^9&(>YK@ZlA^wGP~Oj+@)2*oJKF zrHb!syRGZ4{KJYdBmMxI+FUJudiHMUo_B(aM_M12othV~pqVk^?S;REXAR%J@%dJ* zi(osue>i_zvQ9c1!0IAD8Ti64?$7CYiI;vZb?Q^}H*eGV_2$WIiyA{&`jNiV(E;Dq zzb_i9qK{Iy%h$h;Z#D8B{m{pDw4np6!A@(q%?)(>eJB~eT@lN=O17eOtE9ZKc}jEB z3ich!z8gKN_1C)cH!H?z-yVD(=qoEq4U#2M3uAF*4qj^B$ulRE$D)^Ov3+HDq<^i4 zzr*q`g#3Bd_#*O3E+3=qwI-}|thwlXhr^QH@XY%cbF7(_mLflcd{dmG_~e34sdj;9 ze*aT;Hs7LV|C91cSLW(}7XK-I66-%4! z7vS(e+t0?D=TObEXyCkMZeaS7)`4f0@1 zb2~D$G(Gmmh#wJjXs&?ny6U$@Lwx^WMa>z}(1uL)r8Q>^U$v|8;rg1)ypQ&~n(H#i zreXB%YRXX=ci;nHy!mp9w2jIcLHDUX4?6g3$W(7~kKTTdd|=gQqf5OpzM`Kvv+qOG z!{GnJ#)t1AChG86)1t9|e%tU5|2Z@Fd*hjNes+*GTxb}p0alZl^F8qVOl#ivljj`x zbrJa#TUEV;a`=<)U+XN2gQ$w4p@-DhmQgq40(|rJ)7)GYWqQx0Pgd7mJg}_J4Q!}u z<(e6&ChZ1f2j?@4y$FsCbNRGzXhau zB1=30w)QLZj|b6Xj-LC2dphxH9)zdLx4+F@4z9AU!usUy2in|w_t1`g=rYRN7xLbo z%A3pwp3TauxkLUi>@GFo$~5gd6W5)JRr?!{j{D;LVs?u#*Eqg^MQ$nwT#UpUXD)8=eFmm2v4`R zaZX~;UGd)!HPBufOIm9#am|3E5xn}#5woSTRE}(xw9hNw#`+}tJS`2^KJ8`7x5cqc z2KVg_TsRD-w9-{2m%10UzH^xSM)-P&-E7R!w^fvlY36_E2b-3 zzmkmLz0F_ZNx3aM;6*w;@6}hQ{7J9m#iV{m;+_e+#O#9Dm%@5i@5t?O+vInmZ-jO# zK>tzhS$`WF4e!+tI%G@T;4L2~9&l-A@E!>%yTd?gy=Q}^fCZ#E?@=r%R{N%jxZ zk0kpwSEE~bot6UXc0yOa2|CfGtt@D$lN{EW`P|3t^$XHUzDYmRKV_@R-p-+uNHz}B zF7gqGw+r0o4tKv#a}Z}DY}|5SHFS4CuXK6`eIV>B!Pf$5^sPRVYvwc9GRk>H*#*Z( z(C^^gVa1JGxhhNL#C`?#BQbtrySa*3&g>v_$d}N$pL@7Iy46{kMcEdu0~*^Jn=0p# z*Txq@^ZvYOKrZCp!~8O%aJY#!bGK#g?EroO>(fmebL?H2KRA4!^XKOOC1>>w!)Nc0 zKz@8nXZMk};q|KLN;3-gOe+byU50_~J1{xeU^ zPyyN9l|?=fyXepl^6T9JfBuJ_U*C2_d;;g%gZR7DcGpw4VPr;zeW@MD54SvyA^8zJ z+R2o@<}JqbIHdb zPOJf$kNwU09iNwD{C2r`Jka+yGf%Dk7BPCrk7kXtgU~UIOp4Yx3BxW6ec2TOGbK>o?-B+QZk0n4W^4lj8Lrt-a*n3v&wV;o5sq zcd6!wt>^HZJ6NwVGn{oZ7J^eNq?$kY`@_3Go6nUe*;T-q^GG~-S}tCZ|#ELTJgqQE|+{PfH~fqh8%>BNxHMxow4YO8cRc^`I) z`lP-yS${Qshp`>{A@jkM^d{|wnC7qso1$6!A&P-!?VS?Mw?Jo3{SeyY`b-P;na=lm zeP50Fljv{qqok_gRwI|E{=UPv{QMQEUwOY?7M(5AJaYSlttpvmeX4{V)lbYv5g9Kz zF;W%BBMqbDlxBR0{a)1r>R+W9ALf4Afa1hUGcMsi%il-%s~X7D7t8x{eBb$=pDUr+ z*QE|$uXN@YLq8v7?iCGTZ1l<2P~RkdUmm|!ANbVA%jIis3FRlt()s~;-t=evGVYIl z9~eH)Sbi(sK|G4bh|~E<<@P3SZ)mD{xsqOA#Lk2ezeZn5^jrDr)JJ3&yu`h9X06M7 zc5M7h{nxEr70-XMjMBUb4&;W}BQadEUAQN%v3(Nkc3z$Oo7~S`bJF)jYJGQP5a!W7 zk`oIKY2AD$a;uhg*F5w8FzsOD)A~DU>q@kZ){%&5*e5@=`t0YJgZ2YgycB8t&(>G8 zuZOt5LB&is@>x4f#PoBX0&uMb53%<}?V+_3+4M!}Dzr(FzLCM#*@0~1ysAO@M~ZF! zT*;MMKPezDwU;sf8MoL))8|4!U)O&keku5^?n8#Eze#V9WNrDA0OAD z!i;F_DZT-IgztYp!MDHr_~vy6x^k==OIOYYC{s(NbvFSo-K zu^6H)nh}3XYO47;+5clZJv*p+#j?Q-*1>~Do|~v=e>Qtu{@kVLn4Rc_??h+Cr@Ajk z8sviC&!BH|%Rbwg&H9*0_0>N?zoR~7xvsfp8hJFX?g!VOfq$2qcBs)+@0S0<`g|Uq z4YQX{aTL<^)xWK-(&29)?Na)q(rfI94)HBdd#W!vo@c^8d(MbXtrb${RIv{7V>Ka* z8eY!~hzEQVV~ledXs>u}GtZo;^@8tFR*A8gq(wS^lE2#LraId?E$gtp50Ako_3;Jp zty1mwc9-P%Hs4Ft2vd4jNyouZ??^C!VI)d?&}~#J&%G zUjlo~f7eat&r9!<&fG{JHrXGA@4EFtCwt}FA4hKeJ=aB?iPgweYg#(*!f@&A+28N_ z<0D@}=a8?)bkpK>A2?Bbb$s95&C%Fx*~PmLXxZ};j9|M1N@$pZD2VQdHUGy1r= zKqE9^1MB-a@m!rQ<37sB=`3v8={vNI=@GQO_f5*D{#9%w@c12Tm;wXCf3ODLG8H@-R+@F?{%->XZS7EJ2`E|bjVCA|4r$9 z&uON^hX+2jk8dOKZs*%2^Wrr)o~Nvv{|>|j_Aa)ulX)h5g<$^wfB$A2u=vI-@=NoM z+C+1t)&n3Sj6;imi40b5|60=L?R*0EODRvjDO}GDSIysozb9P1-<#*KuFJdfShRa| zCh~A5=nLqT7sUDWPW?^({mFeNKAZ7hBF{8;x4O*kObz8Sm#-#n6UB0ETxLAqa9<~y zq=Rzre<9x?pATLr-+iZ(551T24rQuU&tEtnHqkFgC*MKfoUz><{AG6$LmBG05n9*1 zMO~A8Rexa*?sHy8p^wzEU%ED@z1H1_x4hBNyXElBcXkB-H>vg*K_Cg|_ z)$AO~=yT}W@@Yt(X2gebdqM+klC6%1`XH9ry5PQ|T_w>sV&zb;N$w?D7d4 zPBgDqomVOLW%UzV>Hm+TgGu&vy;04++lg=BNT2>9^M+`W4)kKMF+|&BpMk}lV$X!N zz=iCobkh^-y52U!kC4-zW!JOyhp{i^=6Egx|AakfK7_O#CH~u)A)3p@bJbt^=LM7} zec#I0S%8h$(Vm~gB9^Lh&9$T{;;)dOIfs3sHIp26tN5GGx>*M~qkw+1>00_5_w(2v zKTG)>`F(q7&DgI|2D^#amC~ht5A9It-@Bpi>FjP_lW7s(UjDH5rn0{teM|XE{|9G; zu$G*pN8jo2_DoaQ_ZQypvFFY>J+h8-v54Ia{imBV|6YC|#xFfgZLe?8%7|rV79I3i#$j`W4yrSV7I5aY(Ij6Wy5A}L|7@+<{2e3swHjWZHqfxbjmd}FLB9|^ z`S-vNea{hpsDEC7e>Qoxkk+p({u)2Ll?zQh-HoT}dM=hNeR{9^2Iu@5hRGl0??yjY zonl!s5s$E^X)-?9as~L^fzPXvc1Gq8Wtw-kXJ)&hhGlkE1Mw30VM@e4CTZ(fHW`=f zjHFzp{3(AX4{b~q=COv0oWY0GOoqM)en=)}LYxQBlpEU(yzdz~cXGLci4V$a_r?>+ zDbvG*V=`YRaAwY%jFY(bg?ae$Mas^6e)zEFrf&MQc%r_a;cSU+`m(?0JPDJ>?}kqi zd=ed*cTL9m1UShv!RMLqc@cPsKR$>p-~IU*UnAo@v&3yz9I?jo$Ed$!Po ztSq|C|Al@pUSH2Tp?pK)rBmEE<59BiPZ^JrHIgNJGWiSC_c%kx^M(EBn;dJyk{gl{ zk{OZ{k{Qt1R|IxDIOLzv7}gn~Hhv|~SJF;};Hx>v*<;JF+08atBcH|;{iF{XC&!n^ ze0;0_)91>l4`=R}HZY1DuIiaS;C5+UqiUe=S*>YQ4``jF^iW(6$(mv8;{tWq2`_C8 zMR4lB(&U3=U#^nu^L;y<@e<^R@)wXprd!Cjse9}9dY@xWwn*Qe)E7S*`XYU>+{fvI zIe5>C-w?duo2il`@GuwGA@1|@2XBbigPS9K5C!%K`L<>bP=B9F9-Q3q&4fQ_n)e53 zKGqq$I;&7~PJCvFzC+9!Uyx(++e`kW`{&6W|1FN6xXYLAzm@V``k-jeK<_Yq72!}E ze7vqi8y%%BW_*-(VQuUo&PQAm&z;CPzCW#gMVkOuKF0G(S0Bq$_%}HG+W5YO`E2hk z^k=2Lh4GO89XBT2`OnS(=SMjMH^X-nup>SWd^fUxX3WFwqtm>v=jOxH2Xc>qn(ub=z}Z46Wij_C%~D%#*Ia+t9+4m z*E8~LZvL~=-zi-n<94-od_F#Nx5ymtK01sJQhJ{Iu?NnL=V{I7x3;-S^LcVEcbel< z^SEe`FX{g#?tS3ns;+zAtv@!xxQU1bh{TNtkQ0R%qa=nXj#I`U2LTQUqa;djq8V$( z9tHgwq>&kdTL$9hs*T^RXw*b){kajXbFb{WeY&?wD_U-&P^V8f^(R-QeyQHA@5?}Z zTc!B6N-;rhP5st7d;Lah7PE72@9X=#0r|}No^^hE?X}n5=ggUX&Ri4SPq~^mze+yv zv6uVruTmK=Q(xxmTj<9HRmX2r-5Tlp_yo<9oue_5pYs)$dx7aUBBEza^0vunA3o>w zv7zJDb>En(`!xD~J9;J+pMUP8=SsOxl7I9Wr=f}NCHxKd zXJ7gIs%NgFW61mHJQvqK>7q^9xf1o)()<|Uqw0bf%tQudb z|2#1Y(0GByiRih;{Jk2@A$GK$q;ph0|K;xrPttSM^s(B}Goo0J&g&j!pQ@?O>{pD= zRouES>~Zx^zn#8Adz^iY(>a#*FGgb+Yuyo@%QRLPJH9=2#~a*sg*|)>X!!NQizlMC z^uM>N$1nX^0>@Q;4j~^~`Py}np5gTr^;^F7&h`8*wM#=S-5){6xgP2tJ#@bOBt7G3 zl#Xw-ot_z9bMww*`8fh}YO|=17U{WPeC@NAKF9aadG=v?#`@Rj87ue7{yX=D_mzAc z<7Xq%_sti7m*V#-ecxFiUmW=oqq!O1pd9As1{T)Rarj2h?})`7*-7)5`TIkDZeN_n z(K-I!OO1c-hSl$~`TL@AswZnC-%0sG+N_bF{rK4b1M2Q3GvtOji@z_b<@v?6}e#iJ4u(vmgsm)zqQLbm-rbM9p8A4 zMU2`xLC3~FcJ;SM?{B8#0p;3OnrHkB=~vTO^Hn;p8F?3dS4VS_zCmLO&n5Xd<;atC z4U=N_9H0BJ?j7`5{GRB0x#*tSJ@m}>uNFvSlFn&nDUP*tt>bC>U4|Z4`=?LQ=l46w zUp`-NwQj%paMev5(`)$AA89P=3$P?n%(xoJqdMKyl^o zEgj|P4fNR;x#38YKELp@e4>7M1J!c_`NLx!|9&d#&=~2@Gf0Ei=^A$f#zM!R-6-ew zQQfLooBU;8`P_~=dE^spqp_6Y%yFxta_E65JZHP?v)$@`LDF? zBCQvwzjJ@$<3r-cXfD#@kJ0&2iP1Taqu);Cb0v2L&0YFp9nDkn?WOw{my>v!&Qa+6 zBthpVwEm`ZnmKiz#QDto{9p7Oqu*S2XEf)DbD!(T`TBA4O^@ef`@i&}>toOT2AzxC zNNrHvLZ6pudEX_PdtB4~J>K>-wb3TwyM%MOrRxQBjgikIsc`C>bRA2s9nv~oukv%O z@Y$I9QS_`z%8dtk&ODzB{DO@C`rMYU6-HWgJudnVF}m*L=SqE$+Tq7^9H1CSpF{YX z7teEhnffx%yW)9SbR9G5*K>RyD8-SV&5ik5F=DtsJUaiSV+PwK=z9~^Osu8PIME!m z+T+C!6Tg{`h4jqE#HXp9sm-3GG4e6$8|?4FPgL`HFI^Au=e@lBMu)j}zWu)YKWL}& z>j!?`xS#9b=Z$k*>3i1O_+0nk3pDri<~V(hy!Hg$=NskVWHo(;r!k-AiTi6&Z=u|x zeC6|3{(S1_*HB(E8ZACI0@w$c4)x3v4S^m`!OA31i_vUGkq9(~p(&c7wY z*EE^)_Zh#-&j;pXG+l?{y74jkS^8}s_Md;-Vkf=NQ>=Kb;d3*NA;p94lkoGkCixgn z_jY05Z_(%d-=pW@T}#L39@6+G#fP6K#`WQ2Gsl-TzORnY-!{kRXRi&{<2lL!jyacK zq`t=GbJ@|DLb(*9*zvKK%j34^vgot&&2#iS7CmpHXKK85d1l|WJkI&^P96jP9cl78 z2|vT^pEnL{avye|c6s+zI`KKx_#1(3=EHCvx7sq zbSjoirH0bO!?FFz{@%XFUGKobWM6M8cCarwkV_u8A+|XmyWu1B?>aY-&hg)^$)W7l z9@o}>S4V6xo9i7Mh;1H;HMit9x4b|9p4k4uk%3g~vEE!J)|VbQK#R>ITUU49H#nH% z-3JC^r1jX~&?B*)lqyBOAWj`7fX)h273kv_V*qb8A``C z54%kpH@%boi~ey`-cT$#bYP@EJ&+rYrSsWz57mnch+OO)*gq)hK;CPrGP_!;v93_h zU@BcwSgoYrVK_I`J8*z}rCN>j?@JH4&3%3JBdqjqYMslZ^Az8qbkE>{f!<*Xu-4r_ zIMkoa#fHhp-u=Bj$tZe>&BO05-vyEB9mu7J_9uJNvB+j~YAgPyhREl-htop`srcOf zTy_h^B?_Hq-2=(~wBulR@9*tPle61X>HW!(zMRVp4(Fn+slona?|>WP7D?tZjug`a z!_=O`(P~d$FGaLB<@}qG5qe9Gb&F1S$+<|JTFqt2S*lu9J?i! z%cKWlS!%Exzl~9HkEFLc-h9vfoxApYbmv{WV)uXiV|VV_6KmV`&|Q1(>FB)Yf%_v5 z4{&8U8+`&eH#pGuc&s;ja9io!?Jhf%-k%=wboKr16q8u<=Hd6Ob9ddp^JBYWoGht< zVb;Bsmec`yl06wpBTB-d*k-B)CCosItOf_trF34+=38#z!f%NRUwwbOyX6*ND7LJw z-Cjy;bdD&Y?|+Dj;;b2tmHJew6kO_uLxcU)BYQGwayUwm7$*xE_h-p93L}LzH#ivU z8>Bw)VCSbgc5UbW{~%YB-f^d(o=3%S7btb?63bEuX-m5IAoWq||E0RBuFE1FrCegI zzQMsqMzU^aTic#p4?V>45AE9XiCuf5b{ZJQ<`fNQQQ6Wl%I#>R>*hAtlkOj+lqs|H ziQN}%-`zW$Qz}uTb@vTN+b}%dbJxc@4&EN4F!ek#Jkn1UZ`qco6dcZ_ld0I?{%C!5 z)a&i(&)!arF1z*9fSWv!=DpsZ-xB>x(e54Qk&s3VZp7X^ZB5a((jN^l!`p|F=*PFa zo}o1L5lW%qzGSi|+PpdC?vVlf<82)!_qN7%$*@QR`Igv$!CcvZ-pd0rYF1(3&n9~w zNmKu!R^QiqfZCVF&ZyE`wW#M8Cg@!dH8hMe0n?o!`bVzhZ9`}YD!h#CedI?&zPefsJR}AhUFN?qx?`p zy0KLHU~f;_rH6*7gZqr9w&HrwU$CD)J~%?XkB@q(bYD7`ra_C`kYlvNv3j*cKGF;i z_8m;_>!Xa8LpdD^%8o$fPIREf_5>erQt4qz9)AR4YwGeWLNy$rVyQ=E$%ULhmU1Kq zhmu2&(=jf;o&Rw-5YZ9IeSF}Nfx*WHO6BJs&(d)^Mfn<|KT6EiMTC3bCis7UJ=!q5_l+G@q`Gq$o2N8 zDJI(~foR95wSCOfu?+Rs$C7kXBBi+PZhK$vz$4_+_P)WMWS`4rhSIC!xfA$ibDWfj+qS8r%AQmh2+iclR+-rA61Gh=jt?r@r2ljMEhfh9j zh+*`9xc!sz)}KjGpt`vMR*xAxIDO*w(D_G933vrMtlr*@a}U?%dtNdYc1UW_=gTd3 zKk7i*`TKWu#vU%6Z5)bGdf&aXbJr(#ernbKXs0)qELuK%ecj%wkB?9AEkdP3l&qCm zaQc)C{7Tt&4jmnj(j}WHmZZOb*kx(7rrhTP8lOGMm_C#oI6x;+BLhEib9Q7n6W%#H z($^Q>N)GW!O4z^;t37>#!_fh7kPlF+hud5aAJM!G9lNwiCw*$+Q_1AOa6g?i?oZO$ zv5d&2;~Kgk^^So78Ugtu4jqz5=xkmi=~s>=D;x&t7{*&F9S3O>4%2Lcvxv}P8pqp` zUwmv2=u*3Ja^5`XuA_jqL9obg5)6&7r45 za_`_!Z|?E#M@P~lbl|3%|HM}f`(5Ex<7z5UdP2U7>iwo|HEzwD7=s!|Pr#_~s&RF1 zqTZ7?`{PTja(S}}oUYTBy`V$i*Oa|rpEp$lF9i5#fYZe6vKMqFD)MlBCo9@lxn>hm z=(hxTtKo=4-0+glRb}o=%T~G80H^O0%U;lF5Acov?+WllfM)_cYdGqgHynOWy`{|k zQhlcbd^W)60(?He7Xo}Sz>5LC6yVDNz7pW>t(D_Y7vS{)-e@@DA2S^BpTDMZxeEcl z7~sVKUkdQ$0AC4kcWq@q>jJz!z#9WR7U0bR-V)%gh9mxQ!x8^QGt&XQXgGA142L~G z2wuOyCjxvjz^4LyI>2WGd@jJ}1AHOC7X!R#cqu-HBM!0ab#$DJGiHz{6K`MlrXwNr zn*%zg*wQ#QN;l)_7yRLy^z;jSBETmDd@8`F1Dt-xs_cbw=K_4*aKvH3aOlk6P$plB z!$N>B26!>RmjZm*@T$m-Dz{=d{Nyjn^b35#aOg}L4*R0vuwOD9_VYLCQju>}ZXv)I z1H2gEO98%YIQ(2O9Da^AmHP?%F~eaWqi>e=3%ohNTLQc_z~cel9^f4T-WA}90M7(? zHo)@%UN9W-95ozqqwjsoUf`Jk&jxtjaFkmx9Qy6=Ei1Ove>(!aE5H*0o(b@5fae3e z5a6Q$J{I5;0X`YvQvp65;IoD!{&R*S{%%X9_^)yAD#xYcRLpSb*BKq~v90AhpxlL9 z%Q@^94fpmnZezK5DNmL@SiS-J%K^R;;I6f@PF;Z42Y6$E#{#@Lz*_>mHNfMBBcAPs zBR-AW%j*mKnBlNbe7LfG#&FmdXd;zd-Wd zd&}(+w?@OE6Eht4dBb5}FdX)E_m!6m`+CD+zi2q@>7Kx{7wlVim*LWQ6*n9@?S?~V zD!``=ht90w(5a^fw&)jlqv6nr84mkGfR7puoiW3qL-$scy}&a8o(=H4;V8FYIP~iu zEGxFuE{y>m3-IOuZwc_$0FMWFdw_QYcvpZY3`cx2h9f>JhQr?Rj4<_ref>iU(p&K8 z$?a+rcr0Mw9N;a6!=F~ep+8|b>?aL}eGAWMRxj{Y!=V#59QJh|S0eNlygtAi13YFp z%563r`eUCcFBkR`hQogOla=jP42S(>S7rMt!(l)Asmk_ahQogL)8+QyG+VIjg?h~! z4*TNXGV@aWmjZm*aOkfX4*ibLR@Uzd@I-)T0z7Lt{K*>*e-^td`%?_?r2t0=yXDONK*#*>LE0C0*GIJYhI=GKRx`F2Lsv zht7iG(8>3dxl`&Fg#aH7@UZ}&FdY6&8V-NPQkDIgFubH=ICNUmm388VL#N$v=p-_g zbus~-H5~eR!=b;>TUmcGz>9`Mf5~v@FMh7Felft842S-*;n1IZq_X~efG-62Vt~`L zEXrQ+XUTB*Q|K!*FXi*7;m{c~96Ig&m32A{hfbH_&`Atb*2x5THo)_SquhewrE&)Y z zfF}Yx6X4kZ&j)xRz()goEWjrMd@{hN0(?5aX9IjLz~=*eA;1>{ycpn10lpmID*^5f z1@b?@>kY^F(`fiLB+KIu_-QEym1?8}L8C>jS*8B46WL%H`#}rOvrR z`3A%xR#B(Qbr=r&F2fOrQNv+BW;pEU42S)^;jmvZ9QN+#%IgLDxZ$vG5AfKNm310J zaWfp{=ASOtLAeFPVL$POa(nQ}0G|r*X~R+Otl`jKK3uunl>m1?U)jDc!0Qc%KaGaN zpN=n9*6#}NM1W@kJR9J7!{KMaaQHd(uPXaFZ8&sh4TsLyFILu>2=K`Op9=8l0G~A+ ze$E*VKQl+l{e*qiaM-v0a=ATtJiyxnyd%K70z47mnE=lkj(X({_x`VOh4N~Z#?^w^ z0G;WII#sUt<#Hj!XDPs!4Tt`U;n0bHrLul|fOiCVSAZuf@-^;od9kJVjG6e{M0WMY zmRE13zqirxcS6Qz*z6>JHO*Sf7w8SW=tvg5_R+tv5fm->w1IURQl8>t#xDuqc8lj- zK9KIWX|bM_J0uI-w~a9InJVv|21I$ z-vfMYb!9)V3Gf>O{MG>faDacDIJZ}G+}D3wjbG9;Tq2I^a-HNizkV^GGa>dEZ;y%n zzbDT5R$qvAFXj1v3h0~>9n3?J-P~V`PR!_BN&d1v&S#L%*AwS@ossdXPW*YVa9m%_ zdAHn0gtsONtnvIW0y^}}k|+*i`@LPeU$|?i{7Qblk2uE-^Olc^{my{>0|EXy(ZT$g z(-QwL3di|UtMGpl&^aOYINwIUeqK1{X^)G}ivgX#A` zZ@(b=zZVKUZU6kAspWqND}5Yr9^NE6jl%JL&8+aP!f`#MMfe@U7x#N`Sn}jy;+4wH1o%jRKNaA=8sPty zIJYmZdmx|xFkn9~_V`@0QR?-V0ejb!cTFjGU}$7tcmH6D9x~ILTV2~vtA351!9!0M z^6%-!Pr5U7fA@a6)xJc!iSk1ay8GyvF@4?Hp+SC#P~Y&XL3XgOuRFRQy1O?;4?c*> zq`kT`kMSMQ-So(gRYnhRIM|)0d!UEd!ry!AxANn4SpERtMBSaC2bHYug*P2m0jLan_@U>Es??m7-gkyZLr!@w5Ei*Y50~_)3aB*~i^* zdLUKVT{0Nz$qbc*qsT>fa(k5O>+Ty&N(-8Z zp1?%U0Vyw_r!Sq9GNZe)yZPpDZ(?gw+K^8UN6+oq@9pU61%32P1;5gt?&oK?_!X*B ziXILVIS{o*CKk4AJ;Lu|DX5bhc{6D`N<8@PQwG*1vZT&xG{7N z@P=eIE4jvZ*ZbY`bc4LgI4Zd(!;g#cTX`?~Q(0PigoD~iZs{-8fo|-k2NJ2==VuW3 z=Ma=^B!hE<-4vVE!lJa+ke-I4dKk6Lz~HJiZ}%3Ku3zKVU^FFHWqMKO8;0K(#m*$~k1)m>>S6laq_`&vPi%CCX0D5SyLU z7SS^gxNG{TmeP>BJB3`iNYA>7o*Y&-m__weT`=kx-BEA$b@{y7NNrOZ*Z4gKoPK4) z{f+PA@9tN1=UnE*(<5for`6c!TYL3bfHU23yF+uyO@j~NIzCeiYVCq$w%nO@`&nk>;*~j!?Q}J zUfU89rZAv!V! zsZ3FAC6V*Sa+2X!s0Zfgp?`jbhWApJHJM2jTiu&>@+;&`R4(cBvsyf)#?H}Wl1dg) zXjNdT6nebmfzoUroEOt?dhl6g>A4qMvHqLt7u7=ph_>4!<;?}mtOSS$I%P%!ShA%xeb1Tw~4)t8+>Mdukt@( z^zodMtZ?XqA2s%Pe#tRo4?bn=@m!OW#vXjy*yDL8GsYf#Ryfz|Bl4aDHm`WQ;^!6D zdAp*x&f8jPB-Yn?i|fHQ&TYzG=Ph3^<(IYZQ@kusgxh>TJ20ZA1o1vHi-lF10NO+zrY_8-X_<>5Vw}A{DuzUrTPj#WbE-=oWsT*d{Vd_hv$TI zeBQ6};f&&O#f!q>C!V`=PB{DoUoV3Z?D0IFnBjOH&lcg(0Ut9QI`q40(Tn}BResJX zzFYAH#dUpG6xa3*blr?!w%_eloWC2wD7rPHExCXEiBCp4q%wV!i} z>+$D|;(Gj9Qd}PgmxaTBJP+xDaQF{iOV`u*W#hxw6~T46EsE=M+ZDe|ZHZThaQKsu z{O=SFf50=w9?xgW8v7Eb>stJRKH4QG9Qxqn!fk!e1^BY!Iu6^``Ept3v#Q-Y6u(9B zKEt;D|MhuYi*O#_-m7#l4q@E; zf;ft}GQUpQqkqHxSH&LV0dsx)LOwE=p*q?FdBI%CJG4LazbX2tANbdW7o=Xu6Fjf$ zu!#@&5#e^c9XC37zSmKs1OB|R$8)XbjXn5T;kMj!!nwWlxYx4YR}l5W^TW0YN4tZ! z3um3Q3TTJX!SlvCjShI9vBz`Bvc?{K+}Pv!WJirX_%Y!&Kc|fjo@X{=bikiiIy!Er z72l@fu%x&?e_Q`k<#Aqmi(hXLjyQui3TOYDlzvQc-G7^n{<7$AG5X-!l#Z@f#^~TV zZGA=uJgaoHpGS=jp6fPcbik*T&Q+?u&nw=o_$i~0=flk#eeeaPqwBk*xNgV25&D9#@Rev0D3uSLV(B^><*`adIl$=L4_j(mjuJ;GOvJw6X3Uts?Uu}3?D^L!?L zp&h^z!ZBWge_8m%Tm63A?z;cZD851EVViuZhkU?u1UrQzAHY+>SzqUQ#^~U=gE^xE zen{#3SbS&C3ySOW$s?!&^?EkZD!?*?h7sAnx!Iy-i-+=!; z%8+<~zabp?4vttOf5CrBVvT$Rzg9T%3ml)Zkr&`w#UAlvuFrFDT!Q_F#GZeRi(hD8 zJg0V}aI`OYOgQI>9yhiquE(*2;{Sx0QF$4~b>8*~hd+2eZdN$_0f%3x*8@^7Y`1YZ zZ}jmz-4&w`e#zM5xw*A6(F=a!IlJo($8&k>g~NZ?Hy954M#E9=M&TU)8>Ft>elg+j zXIj>qjSlp;7!Lau!{KMEaOls<`VM0c9yj)LvfgIw!P|w~dUY6`d0Fo=I^cVa{er9~ zj6HbD*e}X@#@K`R8T+EFXN^60PPnbtm~iCnC#5YX41Zksq~hF-@Lc&R#rg9~LF}g$ z=S#nGdC<)&&h|VnpI>u^|CVsX6@JbLNBqF6Sd3nX6La=6FV|%d57^%*_UENsDJVnK_>C7GJpBx&Ut!&ew~yJ@nWg9iPKu4}b7n=W*fi2mGk9$Mc?#8II>Y zPZ^HqJx?2s=QPh4j^{c*FC6}$+}gKuG@JuRxpju4+?a6GcS_cqj6HZ%^6PUO4h({swP%NI3EhyzZTTJM5RlzCk$b!5fV|o)^B+*n`K6eNptAj6Ha> zaITmB9=z4);Q8WlqXWKM>FD#8t^iLdu0PKnQe4ORu;RC=_|GV=%bg4GbBgPHIIp;F z_l8ZY?XLCn0p1>4wb%Mn0luiX)^Ct@L!KaiHVQ|cfNv4b?WlFy6xW}3a*AvFL&8xm zo?CxJILZYdGxo^CabpiYVeFBIM~yxBq;Q*`Cl%M%brFxMs`B%E_{*G!2F#y;U*HF2 z+$f4Z_~(VAUxFVNzO3wdoa~}Me%0ORmxw2xyT4vI;t9S(IM;WL%JVj(Ga>8kvV;ye z^w`g`aa-(}>(4<`M!zWQCyhS%jBxvX)4Xu@|G{cscmDT`;`(zm^bpUZ63+`_kGSD? z16G8izToaAUzYW!SvYj?I|EyULkE1DaQ0v4RlDN4UY&~TdZmP;T>OqfMmWj^&ng{V zZo%jjW&N8`g~?t zaee-SA39(^^>KOqyL`JdKTm&nH7Ks9TgaRyg`mM&ff? z*?&OUpH*DzUr=28c`3l%yM4`3ZbHhf6^{G|-zc2prrV{>*u(#XvVWWMKPMdJqTTa` zL+6muhy78duj_kEaee+dqqw#|Egb&jB@PS15eM*Cb9vmdV&5Vh_Tbx$J^I}aV-FrT z_UL!*#vXjPaN8gED!xU`uH88OUk}c<;nV+IjVl%qwbH5!r>==cW9$<_zB)@?D4xsTZ}z; zTsYV38s$&Bv4{U1%D(J6l5oTs^Q}=1bDf9KgZ84ml? zMkgye3&tM&jIl3>{i3l4KWpqq#lC3l!Osb|^*wKN#zg0W(LsGz42S(Cqcb5oPL{|M z@LJ)>t4Ud}Gxp%?g#`q?fV{q8rV z-*p&1E4)i_N>Mi|<5xm)9*4gp_8G;?`n%$6{}<@*hA#;(D9+_B-AMn?YgBR8`FpV+ zGyI=~qn)|FtTQP(Xivo9ZEIMa?(;G_?-Y)9fKEZa4?w)3bF0{&5*_3fet&F1IPwsD zQ8>4k{@$b*;7b9%BHVuef^yhT-7Z~Qe7(6`eAmq+r?@`9JsjX?0(>dJF9rCaTUPzi z<&G)N&&a~77~pPeWu1lqZ&&;sNw2{*ewVWE zR9u(aB^>#N-<{hl9Qg*GS2|^JR$TjeQt>O5KeLK!f93)l<#QhD^T0*1=e*VaG`6g^ zqt;I;ev9&_5a6T25g+`X-nekY2YkZV<9GIs8hh|bV~^k8J7(;`r-XC6>+1${itBth zWAyPmev5{~pRwo?Ge0w3z_+7yU;V2h8E}V7rb%qYb zb-Q;Oef&OQm(d5$Djj`YVpMTG?wt(qGm7hcTUK26i%W`Y|7+i0IiH&q*X_PTajnxS z9C60)G42(PID^kC9euy;vf@eAk1i>$+hzR+R{huUj47_;*%sib03QqRImLB+nzya` zqwBR>ac!SaT*q@n-BtGU(Nof3|EfuAuP{e97Jj1Omp+i@TzH};}jv_vKe&T$1yPOZBeZYH#qkX{pg*#~n z@W+MYxWSymAeV(4;>ui~SD@a^>s8#)pO{~*IQju|{(SK(Qf`a!=l6vpA7TF|!jUiF z-xEGAI-H04KFwo_>+yD4c&Qz?bM&2?5ngIX;fqQ~kI&~6*W+{DhrAy9qj__HcLaD| zaeaK8P+T7$rxpKc75_QK_4U=$ifjAxifjE^i3it9kMsCFU~U(kC!NY(w@XTK-7Z<- zXczo$a85Yd1$T*v=(fS(WWx(`=w#}>tPyX;n6$E{0o9XHg&#%)H~@6+w5 zxYn6h{GG~vQ8=If>v628?6uC4;w!WdUVS@OtW$d+=kz?eS<@ab4e2itF>4Gm2~b72&9F>G!2S;^Tw*g0~81fAo1pyW;Os z?bRjR>SqG_b4o`a@0N@Xe(!qO=zw1^_K5$Au?N3o>=A$WGv#pruN7|Fr9p9h9Na6M z^Y#uEpXNKfKJ#6QcPPG7@k5G#LUH6Hjw5H~ID-0tU%?l3=!JTKUnLyv%v>J_5m(qZ zh<#r?k}KV(fZqep3P=8cPaFHhU4H*rV-J2oILBF!t8S;?4|#>(Ew2@ha>2IjwE3V_y5#Xo?=Nn(5m9I@5+LO6H zF2gV6CqIXpU&uS~pAo(&j(ewyZ7zF;Xn9pV-G(&j6Ha#v4@{s#vXjHaO?je#V=9a@H(QnuGdlF zsMq8@evgyFQ7`amV-Nplj6L|Qv4{W98+-6M;kI7q6xaPazRUXy|MTL1n{fCK-X)y% z_57bBMrT@d#*Gg6QKfUI%Ab>p>+_{)qdz73Ge#eL+edx9*iSvrsx81%iq|NArWDuX z`HJHD{IKrs)pGUu+eXE8{Bw$z<(1-k{Fzo<+b=1u$NBd5^7cjl>JX0h1@AQW$ipsU z55CvfBM%eC9y}%7=4VcE9iNkm>;5>WxQ@euaO5-kT~Rpd3x3Ynqh3qK9{jwqN4=Jf zJ@^ITw!RILzsz;}HU)U6aMWv7*7wR1^#V^Ad-$I+_TU*~5C8j&J$P2Qtyf++AHUX8 z>a9HK^RJ+I4I%!01%9D?9Pdi!d&a(WzGrw+bkLsAc|!PH%+`gQmDa?cul@N>dBAGFSS#mnYxiXNA%IrOZb+r%F6T$J^8S#r5)HE!<~ zjyS^}OV-!+*;WWe#T|JPL}Woe7$h?Utf3KsJMRaN0Z_@ z&hU%t#r^9!$sd#h{-1^K79IFGCF`BCgrDGhRk^ypeMVT5q=zyP7I=a3W6xZ#0NpW3Y z_=S3*z9b&CY7w5<4cHEygZ z{z1hX#V^z=FY6m+2|vNN3FmV4yz$)uj(Q-T=Olkn4{&#dk0;^*UMn2sGuPwlVJR2> zH$Lcf#*F{qCzYSCkrrOlMyFYHW{eK_^Txhj?B|U=_=2&Ii~SkHW5SC8e$ME_{=9H* zm-nmoy@1R|Aoj)nX^>x%kioZ|Q zYfN!{e4kWY_xCB`s8_4h>!fhR0la89+UuOs*W=82#p{%xE5cE3yOev$=!3fs-!2?a ztzW0Ou5W{I_>X*uDSMsgI~3RPi3^9H$cHwg58keHv_E?l*X`b?xNi5uitBbCS6usf zOgQ}Pka$iDM;yRsl#bSaK43qm?D32oZjn>MQEr!%J8yKr7ma;F?2E=8d`Y!RK*{(b*{W=x1zS zOQtQ}+{KOk2gDxz1LbZP-eK(T5RP_Wdwsk>yrI)3_Qynr zlqKQ}z9gJKkLYm{+u=`__|vw>2L$@{5Bc?W;m`-q1?=;NH;T?7!(+k=hBphJ5YF+@ z*Sn?^{}FA&>!fh&=krFtMf6V@d-yqTIQ%?qIQ(2N9QtR3v;X>hX}y%s?V_Itg}B=M zfnUrosQSV$wBsYvj_?Z{=Q;2T9OpSZ#2>W#gsiv867dAztNg!79d{DKZF^xqE?4){ zDX~Ynt)0FlPnvSUXH>a5pPx56Euu4TbimIEXFv6MORen3T=&QI0S-M|FZhc%AC`7$ z7ai2M?&H3`yM-gp;GM#4|J`eJ>P07Ibinh*J|^~uj6L|6a2ub~itG5C3Gij%sBfc` zdqFtr3yyl*xS<|4&e#w3zb0`;J-}y$w@CRMPknu|Lpax0U;pnC4*$nK;oEDkaQF{C zp>*{5$&BJUZbii(QvIk=%CULaq_{pV>{eXscLsPuajk=TaNP9q>yp?b4$GhPacGq# z`>C%}?GTRmz-bM9e!YqV_Ot%fclq++&!nt3$P(p(Zxhb#exK6EcGlP9QroAz zKG#d%_tzyH<&MgFLY62OJfn2_4Qpg<${krT%IRO zVm~e%^#Y$1ZpYixvLE8JEb9xhgbp~$w|RwfP%r#`2KEErSm*uT@|kk~J3i~zw+TnN z;O)ZM|BLj8SBKH*5}i)r*N`ongXe{Fd+Fy?7Zlf@r=}Ix_Ops>`+4DrXF~j06png< zpBK*c()!CrCnGveme2uj7H)O62!}seS#LEu;JX7lXa}?-e(wZv1>Y|1i~51zAsqgK z-z6O7gFhhrknw-9+qe8-<3ISAu`i1KxZy70b!LnX?B|4Yo;0a(b3$@FC6XODD|xuj{E^{2cpR;Mjw1q>F9izGCK95bJFO5&jxhnjZU-Z zoHjb(3jv));oOh(alfc|jdSjR^sgnw6DrS_6=!?&cjO)V=_8_pb^sp~j`)E;DIDZ$0Q!i-=h3PJYfHuVvjrm|F^>XLDyqF;cI3b)6LDWg-A^^-;i ze8$)>$@;9Z2R|d6$8();@C)^tl6s*W=DHoB2Ydd#d463;M2phA%C4keza$)S0I!wp zh+9I|n`O!IX;AI5O>y1tT7^R&I_=6{*B9lp|2m(sAL{j8w6EyE&pBDo$r9y)A2Rmy zvOXzGTd!kA2lms-Ui*)7tpC^#{x6FEmz0jaKHt92w~zH_w{X-q-{b8%g`>XUT}r36 z+S~HKM-|uKZ%zt_enIr7ghL;ETIp=8Ue%vfJi1{?XN7b7>i!5lu9qGsu^-}78Ye}^`tQ=dUhuzF)@x-6|H0QA z`?##v8+-7L!ma<%v;Jd0`2T$wPmU-ZeZNCtzxSW@^*FQfz$(|*D^3eXeG?gPw;&w# z1wSX88ov9If^++XU2BR&OLZ;&P81HMh^=<|d&#dZFt zghPK+*85}$eej&p(fN<^ZGEvHbDih2N=F|D=Y+$b_I@9iQ^p_gC8eY5yP~+RZ;R+b zzeCoy$rAMi-yxjyT-O)n+xlWZTi=8!H!5uT&wj=z4{hZiONV%w2V$iqmG2w^<_>}TfAE%~`PDXTQj1KsD zrK8VJE-9|_zghG+p8E4wi*Wd#mGxFx!hi6%($Sy4+7;KIzd97x{&WR6;)FQk_pniK z=6YO(U(D%$w<6#FjVt}zR6ZOPj`}Xk`eeZVv~cc^dVa!!aKr~XSh7EQ+^fs_yg<3x zNBw$EIJb*_?m@xW=f!?hILZYdQ#!i8oHq6-w^8)CT>ZT*wzL1G-(DkEQ4VwNM}I5% zza%=S*TRsm*LmUa6TCyV+j6m;^>zE66?^CxM!f!#aOi_K4*Tt_A6M<(9N=-qb$+%7 z_-^6wXH@*@5DtI9CzOsJuZoJVQ+{4jTpu4BbF1~zIxULp^ZYi&_2-D)itBMBqqx?| z3P*j%q`o=fs4w^trK3OBj4Q7FIcoGLM1RuggD)wayH(uQ%YL>WH3^6Rld|5b>~*`x z1AMP=)E7E^%3k+7!E4s}JV5#2>x9pXKd5hW-rJuxyhZpK z<-cy1bH=_=?9U5F9KdT2uI8uKX;56}b4)n=jER1eaOi`#8;*K)Dt-NZ?q0=vRGwrM z*X^D)`iM_n+3WAY4=b+o{FHFis~zz-`ru2#?fKpXrLW6vdCbQd`mLhBLpb6F-YJ~> zg^tg-;`(~;F{2+B{b{2QKC5)J{&}O*Avza~4tTw6w{bvzah~hrDe{83K3*U%&@Y;# zU!Xm~Zx@a@fo~U%`howZT#rJ#f#W(A>H&`HR4J(!+H30bzWp+WPYcf)J}W#Y9PJC8 zyy4I}WH@vR!Z{!G`P<=u{iw3n=XFPnJ<1&uj{44t|Kr9U{!bYD_9uLNjv9ODObWOD z9}Cz|8U01^=cLln^_n*J^I|_^^ucG1{esv(Z|uS642M6bgxh+}2kcJ^N1T_X+y$kh z>vhK17sY%@M&u?Md=_Vr@lVC=ygg>xKkP{*T8hhwGZ|q0Ke$Lo~pECAiVn1)}!A}df^*Up8CPe3~(SiR(!(o5U z=uC;ulCcLrZ|tYVe%aWAUl4Bnzhrb~MW}YQMmQL$>=PI&K9Es|62@){WhbsC_1gi9(;$fFN%HK*n_tTxBl-|e3Kf_I~2cG z@lN50^OBU?W%#o2gmAPY>{EurzR&2ah)&kngXfIBJLHd-d1DWLNI1vgepRngqf;k3 zM+~nQK5q13KVdlRr-bu)HU01A@AZC88;<9X%qq^;sqT~W`8mTsD|}vY)}P(&^%o3} zea!Pk#aaJR(ZP8W{4Cz*?U$52>$i*kvf`{$5SeTZMNh&ie7r zNXB(3&iZ$XeZueugl7y-3C}9d`d#vUMqY8&|3$Gc7(O9RvKQHr=rxj=Y7e!~*@S^ZJ#aSnlq<`o&uQ=M^h6=$8o{a$Cq@_RkUIE4Nc6OQqN`QNdSyfA*izE#>~ql`nP zaah)yWmy_;Wqq40dEC>Vms^EHXIa+UWCM2u!ckxFV@l^fRo^M2QztqnjSl#n z(%GtXP8pqg(U~{AQTT$T%xfuREz&PgKjykW z!Y}5!AHgr?^uI$tYL)t-T{>iahb+-9;BCU~^*wB7eI7U7BjZNa=#R>J&gg?53g}}y z^gk;4Q$~MI)=wIJ@R@)ljbG|_l|G2C-%M$Si zZxL?ekL}QZ7V#H*9`_EY{GU`h`uh1PWv`zDc}DS+(mAKN{{FG{NnbB6S3fUzLUC<> zOmXec8O8PYTuZ_cx5k1mZ&^6v29D?Ru|Ikq2)1+FxZf2-CnigdvmSpsg|p6EbsPe` z_9+*=*k0e?+7RGPitBl#3ySOKqMuV-$7fkM;*)>c`*A@y;sf3!+u2Y3J^OCOuT=F- zDX!ZQdK^!EJ%3Kw>%3Y}T<6tU#kGD>IO;X=1z)cv;iwn*d1F5*_RGc|{DQHc68jZn z4}M8F$4x)a!O4E`b6VDGWeGpQW5QWSKi71N;yMm3!l94)wix7O0UtH?&_81A!Lc94u@__ z$Kir-#DDp)&%;Z?(Z1kLw!?l!)@x-6d+<7A?|$CvuQ&GK^}=l)HW-~c(P=a~;H^qW z=V6=TIu7kdANB1p`rw_$9`)@q_TWd2eZBa9%-DlZ8GGoQH1^=|3;Bb5!*+1w8{!R) zd_(+fzQHfp|CQuti_`=8(Dg;%zS|5>2;U)`+gFdz?Z&=C>~{-CUV$$w9eo^GFZ;2+ z9_Kd-ho2c)Z;~bS;eWH?u-{^Iva;S{>`~ushQq#9xXtq&!r^CL*5gJ89P!|GSxccW z6ny+q59a#0I`9ka_>a<#s2@1;1pa~}Pf$K{eSDl1Ke=9es=Y7#?}Fl=RlKOU9>>lL zM;xa9mDgGpj`jktm+f4x9`|C3>;B%NxE>$+grnSPSH^@CSU0aE^nX|I?wkZkLqex?Qr0>v=NM!r{+^_%kaU z{(zrRI=X$&DX!0xmlfCTcu8?>U;7I_9$a7Dz72}&_T4BP{!fbkG2!qZyjkh!=iav} zuE((n#r3!}DIDcaNx3J5qg?PArK9uejN*EHD=Mzf^Dil`=W{kn`8IA_6xTYf!r}k4 ztnZK|{0DDSIyw(`E3VtCQ*r%#{*>bHRO3dUaQHJT>seXCAMk?G(dCXRuAl#p{)BPw z+Mn`yHX-)RYY7!RcE}gz^uLSA=Qp&c?eB;a^qWN={(^s4c>R}rLEK)tA8izlIAo4^ zyO?mq0lZD==>FBIxb83G0Y0U;{yaFNxc*&$bBgQF(eR7wtB)f`r5-jvpBIk$w*IoO z@4Rr-7yPubkBj|+u?Ih6?AyhD(b$8Z70!NsMCIotqthWewf~#<7k+}*2}ix)&w67I zUT^H-PlK@sZxn9*j2WFS@w3_JfM=DCJ}w+GIvLR^7#;94N=M)K=l=C-p6l_mQ8?n0 z5d9|Mh$ncnu}9pt7<=#*V~@CPGxp%E!fia8ereTzJczSWJb`l)qV6xa6citB#8S8-kLF~#-uob!rn`}{Mj{^O5#v6>=_y3fy5Aq9qalPlrBXFEoV>|%Q zhz|N6bA8^3e24uP#2)>OxgJ+hZ|3xW={Pl~>ZOnS3&Q#Op^n2P;fQnb%f4PtmWVTW zop6q)J|E~%yzIPG@v`$$#r61eOgQ{mlJ#j>!XNNc#(r7W7mPjl8R6`|J|9E<5dYti z_@f-=e4cPv!UH|#+>T=fzoBv5FIm6r_^7xZ566UayXf=4d1bHj^MY{1C;k;*-X-CP z54igkza94NVqYs9_TY8KzC-NS8+-72;ap#R+-x*DU82)ubins29eo^`R9wex*61fh z|CG@OpEvfX?`dNXzF_Q8-!sM@d{MZK!+PKi;29?xSUH$S*T^ zH({}+oG?aOiCzO7$zS;F$nqVusTay=-U z8$EXK60S}qqAgQW?pCqGg3Hn`-p;!3*O1|_aBx0 zCkn9iOOy}#8G2(ic^me>35K$?w)=@&#-_g|<=ghMw!CiJp7)=Y{qeij+4 zcrPr_XX2C_>)abp{Kw;O)VOC}+u$x<(-6P->O)n>Z>w=LuhzH|4X*IwrUrL0@s{|- zO|IdU`6}00thwd{DUBzp;}_%fv*!H%wGHu$Pgc8&#hMK#s@E1?eD$H7$E#dn&l^wt z@f2-uDE#fAKObM~o|*hyVukI`U%q@D+wgvDGgs67z3M9W4Z2FOhu`zkZik=j-v#pR z)tc%P?DK0iRVQAmsfm2rRO|lzv(9x=8Fcm>zes=2{VkRCGWqoq`CY7O@OEy)E5%Ef zBfAY`m#B_EH%XV-dA}-JpR0zk?3wjs6@$D5=2 zKYZu$H=g)*6rYzq9Di=^;ivZ$CjZ0G9nL)xS8c&%yi8@X?*)n-t&g)N*ZtKy;?KQG zkHUerwtbDZ7w?WgS6EAJcONa+(ef@@zQtYq+KukHr*4ctSKaijUyIu2{GmT*-NLnG zL-m5*JZaoe>#lx^>@U!J)~9TY;(r7AQ%!%rQtd8wyQ){>7cX~G&hz$*J;e9X^4qje zrTCK0PkbT%9COZ9G9G(P+Jf8u^`4sJuTze!P;Okj$x$v)&PF-5+8#R2ulL+_{NhcV zj}mXvYY8z|1pQr{ocG@f|5<~qu;YfoH5WwS5r!#Atlb6hU# zBq(OTOZoHz+UF(F9VFe0O84S;YIHE$XU)wE{kLM)1+IZoS{0t$n{^%#Zyr(zV`6l@0q0jOr>z1otXP4@6!*c z{skHrUZ%dbo-_(k-ff8b!ha{HD{RR`WNg7k*Yuya@BN{7T{6q4K>mI9i)V|k6 zF^jKpxAC@XY25h4O^a{meZKc|@h^X``uY=_+_kSql;87{RYc}l5=JIao+rh z+PPA{=KNdDyQ&i$+t;X{{+Rly@3Zd2!~b;ot8~1WDOATlL^0Y%{cUEF-p{Rdw|#Go zo0+5c-2Vz|T-0A)V_&EZen8{<%hVVBSbyD#*GY$BdUK(g$3H*z|CsZ&ieh|~jPdlI z;_@M?JN3J3qy4!bum;DkNNw@oLd~iY3@)st_p?=x-oWqI(fdPHkJdKE zKmV*c4zUjVYsZ^6p78yN%Vz%ma)Gyga+rLcqFHv8gtgecVOY2xFnOE_6{6cgNVGDbcesmo0`aIr}eT7(T zIN{4{I8lhZXFg-gi^i48<)M5o7x~NmmE!#&I<7={$nlHD0*dqBQO?X!Uu0IGn7%^m zaau<#xnBNU$1C4a>mHqyoZ;hmygJIyhBu!0W>j9B<3#@Q*iUgFh~-gx3$kyhpY zw(75qi}!z3i*tb6hWn9^6ZuYY+Cb$NC};e*>*Is>T!%{K`dT?Uzx4h6^>?_7baD_K zqpzW5k&Y9b!(y&T}blwQpShYJy@@O-m|rEE)MpK2l$!wvYOo&P%wJ9ZL5nb0b6P zSb8888{8k;-`kgV{e#q6Y`Q0xPLb?7*Ey6N8175v(!;S_CLN>8AiVr!!RNy_>-*2aRj%1=0&g+guU5HM!(l&aJ~V?K!*Q+DIy9|eZ!f;N5YRVt*MsHY(f1WQCsu%nz z1o&uxj|DjHyF(GspA6Vf1^9G;&j$EhfX@f`g5ikgqTzH76U7reE)_$)8s)mbt#7O% zH-iA}bQAs6V`sJe0KI=3wIlK;CEM<&WhML126%sfKOW$t0siFx|5||aW%){e(nY_D z{N(`uQGnC6s){;q32^>iu#!LT3h?&__?-d%V1Vxp@cjWk65vk;F9H97n^M;C_mrRHH}ZF}6>*Clylb#O{+b=H&L{DM8s zGwX%J9vsIbuGc60uF=2ni@)NyM{(@OT%Q7Vi9YLmT-hgt!ylZ-ri8;E@G+&M+hs1m zPZ@oj51%&r;AfPMt}mX!XY*m%=;OS4#pr`?k)W_nhl;}v#dW>E|M+}y#`q7uB%I^_Q7;kwyKHpudE|o80dJAd8LU$_ZU{$v!M;N{+6BDR*yHm~ zm$3&=2)F)^Dz5z*Gy3@4G->p~r<9J4&%EMVA8|$dep+IR`Z3qz1L^_$wAiD3@L}OQ zq_Mfa4+-N%N^$KEU7L?yh(A8}wFyW3!85{Hf0y4i`gcO{2Ngdf9QyeDxF{U@;7b9W z^>S*-2c)jNxMXxn*A3+JJ=zf*zl&F78d6K>;x^5OsgF8)K$*7teQLA`Lj<&^OUd_no6^KjAV;QGv2 zqXS+H=qwo>T+dlHI^eZ(eZ}TUgK!%k)WgOHet{!TP>zjTyXYfs1zGQqCGr6rORF zuc&f${y4dgg?QpRSgmlx6TD8i?O*jq2iMISjSl!W;T%uhzu+(Gi++Ui!AB(y&||L0 zlRo25Ual|Y49E4iyzvwE^gVs_vh^J`_Ru*JupbwWIOF=>gwX*%8qhgrbZ|ZJq|pJN zQ#v}%r;N^o#An{U9v=Fvj@MLcmmvrahj z797u814q5;#h%+&>o*wvX<6SWOXz^djQy;vHyL~IW@A4m>syRHc#E;0m-TJN9=ugJ z*K4L2fW?zysYn*rHxxhz`j%2-=+NNGWKZqy#f1#v7eCjoGf`f(faTg z;|9hPlmm|OWK?v(aXcDRT#qv-ANDwJnN;@VlAD#|=#=4I!lxDIkE-957|j}fS@@jd ztUoH_JK7)qtmTdLLVGf2gzv4+iVpe_uB$HzN1lUUGWNLMUMtt#VGoY|xLh4KY-g^o z8~n58*_Yj}g$(MCb-vbSHF+EMxAxYS*X2?W?^b$^N%^B9z}<<*5$GZQ%&qS%(kK_o ze#sPQR^8Qs{j=|771I}J-R{pe^Edv_&r5eP-owA*(ZbJ5zlr8Swz;YkwXXKW;kx+W ze~aeVt)aO)``wy9{r*PxqdJ+t`uolGYk%F*@@uZ<6}IPj zEKLpZ)}C7TXS-;5*sZy@w!yXjkmlLFE0bQioBsa9UGbf@nawL-kGW@{K4_+y26VzS7}c) z*DRVlM{|+C;OE>vSzYy`UMh#?@%$NuY@Fxryz#`J`1xdq;(SPm=8c`BIh8*kKX|S! z=8a+gAn&K=ZN2ft@0I4YVV>AaUs`*dMvjX=`0^FU|J$$E9slbG9M6Lqrv#g!b|2%p zT0F;_*&NNWq~#df^ZMm(nmhT0>Su~Iwa1HJsCuUOUYa+)w(z#%!^E#>J6^n^n*Mgt zU(NBCs9u<#$@}`bW#7U4IJy)czsU3EcpJ^zzis6w%#$RW=*9lAZ#U9kjOH~}Z*kGw zUfIlfJI32~(%-W*zx1Xp@r!pj_ezrDO8qLby)jYs4BuOz%PZ;9T;W*lPtZLQU-{?g zzVXCwm*OAIfot;fjyJj5AN_*=YN!<33)G%GXEsT5n0el8@sHgzF?YxJdfv`+udns< zUx_7%CErQ0lkLeHSKo6!?0Xx>)z6E?9JM!|_;bWP3YER@g5Vg>Ipulozf1F~U!~Y3 ztG&(#d7dEU!SL4j#ebkSdUlF-C9AZD- zOZ)wp^n95cPF%@lQvbbqP5k2Dkbhh@&+TXXYT7nQU*Fw(gL`Ht=_W5<{s!kmk$sZ+ zqU?VZEc&N3KUDgxqxs{G#unN%9;aB*;WL_Rhxwm1hvO?fjZ_}BqpRLTec#WQ^!GAE z^GoGkhAJwr>UdR*e4ze!&HDJo5n4{Jb$|A3bq&o`uKJsosE&MJOsA{)(&CNonJ23o zUim?kJ90k_&q1X795C;Aj$-BSjk)f`aP>7`dY%4iNWVyPs1pJyJId42TxptXUPb#~r29c$s&z9B6vGR&EKvNOqBwF4 zlQgHE`_@WL?TJrN9ABa(4I}6JteFkd_P>&t|Bb|aDT?_YL}P=llb<)gniE{7B+ZTI zIu*DsJKT?&*SOB7sBSc_{mr+iUYr}K7uV@Ys#6Wssd7F1@x=Q|b-t@A%3<~&_fxUI zK3{wtxSoFQ{WZ}%`nO7*Ic6`>{)pSlQb)cQhVzQ%HB(;scH_3<;}6&8BE_((-j9Q> z+27N{V>Eq6=`6e}esP8B!|m`o*-u_c?RYcar`GsNHQj?zps|g&@mM-n=VtspHP=VS zL5z79DdrPo$NRA@-_zklhu8UDfjHgc;qNK9KDy6>`RcmA&x_;t$8`VKVY;W_Y0`R$ z&Ih{Vu~+IS<~VLWMPoJJOTgpL7igU3II%4=?rZ$Miga*a6_&21@FMDw;GEs-j$d2n zXus9_a%}F=y#_D-DaG_RK9_8DbT7)CuIAp~_}mSxFVOm(uJ+y+=>7L-`?X|u4gIk_ z{A7Q)Jd}@n9@vK4i}&GinfK!{ndV3zFCV}B{e-^H`SA<6>gbrp+H}oxhTAwnxy1c3 zLB|D-OSDeoPiURnBeJ0}6~`1_Pp*q!tf4WqnsS6W$CEkV<5Ere!`n3HGWGr(ciuNa zI*$A+E!jWbMt-wT(Kal3TQpYsV;dg>Z=|xQ?Mr#WK45<5+c^@`}qD-_-0LrJT6Aw((=1Ptb8}Q=fa(@iOKfeU`N7{?bRe zoZ)JBUv!^g!9BPsU-f7+oln!8#;fUms7GFS<8tTBy&q^DUhA%Yh00{@7pgbhH+km= zzQnQSa_BEQkE){M0^W1(@wTrG_%du_263-*|If{n1B2-ssDiM5{%*kitkwa0uBaQIU&AM)YPsNt}0 z^lu=;pMU5@@i!dy(}u%-)^OP4`XdT}J+423m+Z}TDDa7j++NLcj=xUkd9av2F)0OX z5{@_=5zfy`sF(M8aWAc~4!#fLd71o5&>!<<`QCa~_(9^8be;-uE;0Ybtv_QWkCv8qa!D2DPZMQ%2z^* z=l%WH-YYqYEd$TYbv@7foa;)?*=w(LuY29=Uia6!_u66kGQRb3aVU@Lv%!CVSl{u( zvY?r13d_|_pG$%-vo0)map7-;<)3r)!*%5F+)eq}`u$p1e@-}$$3r;ZK1cn-=P3Uv zax_0!*13y)n-Kbq+Xx8rI)?UI4)L z^@*_D$t6dh+s^@~Gps)r@@IdD&tIRT{@23#r3FDnKdk@lu>9!|j>9K64>{0XO*gc* zG~MuNdl6*ShUGVr{Cn$Xnm)C9J4dW)9~jDwDz6e#6F~eao%4 ze7b4f`sUU!QQ5(3X`MQ#+j7&C!%DWFoE+j1iCaJIvYkzU%Je?Y0t)n$%pBAYjFG0m z@CeZS3}v_9e9LN^eH;ut81Y0`wlFt8I%g!SZ?G2uC?2!B<%V@((sypMbl9Nz*7Y~v zbTi4~liAzMVa}jzPh?yRpDB|N&Ui?MDYG8JZfkZO=aNm<)Xhzn32wjT#`Pg@(CO5{ zO{Qpv(8PIDhNkRyrDx}Ih6USl>nA>OLzo{PHV&~Qd%a1_5Pl>d_0c}#qTr|f@A8&a z{??rGE5rKQ2h1&R49ndST+{fLQJZDKxd{Wck$HSXeS* z`<@PGXd0ZS!|QKNgX7BG_jEXgp`GjI(b>bLVY#D^D=!brr>~cra>}bL#e#pXo%$7O z%fowL3G2Ilon74@mb-pkx%-}ue|0!X4vw?8mxbjHjw^TH)8YIutgms^Aav^e z7ay0$HNAdrzE*Di-2AJm9QZ$;9b8O@^UIv_TsSVDYI=Q_Po?tQ`tJ?(%J#feEn&G^N3MK(SS~okk|#dyzFY6thj)IHgJ*K0HUX*7 zPn|yA>%;O1e8TIGhu;(Ugx9O5)p!1{%a?HVUHNCi`sz1Vuk8rSh5ws#@O16mxIW?T zy^-@G|2NEI=?};2h7&GxcWKU*$MSSpP{rjtsU0Mn@L!$3B%4a*m-CnV$%Os3RoUXs=^!p$;rsJf#u6?L3T=PGu ztG3aJyOTfsu>ZZGev&wh^XK|^c&R*8sr;J!|^uWo=v9j(U|tWvUl}k6X-?cu*xohkt5Hn|AD3VMN!*AK# zJa!*@-%M1I*|J4BCc&9gMP^@=GlG2DV0iBT2By8+OR{+k>`_0+zT^U6L1S_HaP?9bk8!TtJe_Z;?|WyN?!M*b zxyb2k<1aYdOM9^M*)KTGx2JId)1DhP+w9{^zvudkQ%a+>`-MJxSp*}CBAlmZg4~-P z+c#s50>3gAZJ(okK=zGZVb8Vi7t6+|zu(Lb_Vv7&VK2(;D{C|ZrG;jEzGu?wpkWpL zM+;4QtD7SOuP$dV>SM9+ygBl@?(_zpzb?vtb0qn4YaZ)p#(@7$FMWF5%z=LLL89c2 z(3#y26RcM^*HOQovz(TbM{yPPuV&p{GXVT<@yyn6J&8ZHN97lKY385E3%-=>^_{2-@Ayl$dX&pG`$ecZRHdCbf) z-OCwID{~?lqCAq0d(B$$-Zt~O3NyU0OSv?{bLU%3H#*zqY(ev$C^+OD-`?;f`;>jr zX#4wPrct>9e$;GyinAwAz=sE;-hk*RIg)^OeAm=MyAf!|K5SZh>``bv1Nz0FooFf8 zmVQU;YBCxZa2{g(D(4Kvd8;oq8}8rKGUfv-1>VB-hBC8Ed}(v|z`v96HzOCc21dU; zYvB02BAMnrRtECT9%a|@d8-zUe)-w~ox$qUUO9x7XW2)j-Rt_e^Z%mPhdF_E@6^gB zd!`y@&5pVJLf%Q)V3l2C>@(+?=SGwZR$Et7So~CdHPy>rVEsJF92?e5A3B1%4dTNN?%`*w8DVc)aNOQ+4Lpob!{FPocW1SHd4U1pOc6x%o*i?ZeCK zps(`se!@NFOt@SGFPp8_etA$9j2pUCOQr#H%E#q@2`ATQaJFBNr=zowl|NK(@=IgZ zTAPA27ZZ7TIe1UnLjm6z`cbYD)uFl0m_0;TbMh=k;Y1!%8hn-rH}$VM&HYyYuC2z= z@%zYtayO_m<>Tf#myXJ-6i&*m>P%f3o8u{@i*LT;;AncbmIn&{N7? z=?`;P4iYMO;rH3kO_;xuPyPzAoTAFH3)k*NMi}ZLkBZ>we&Ko8_aG&yoM`$CO`oI+(A1Ajj^I~?1;6^^1YLLwF;o0~W>w4Ss z;YVKX_$P8IPqsk=GuCm`kJ@?2%`fV?pK*0sSrqV~?38uIJ-d5=C0hmf|1BS6@Y+v- zhZoSO6rh%7uhK^@zw5kJ-OGc{%9SU-&C)a)e&^DddOgyo3GT)7bav*s&tZLyK?J;n zokAVqWk1vPob+Du-P#VPyYlOeS<$@O;*-V=UsP}d4@V0p|R%n$H-2%_TtQ~ zWW_1X0d+F?h;-iLW25%GJIKElKbp{PwfCskA2k!Xa!&sJ$22-pcgjcOQEnWC_6%dK zO|4hEKJk0Yz%4bd!^7naYuy%EcA!)V36V zO}@!T@foFOI$o+=rnAj6*rB>F{WPhoSQDYHlAaQtK4T~W#!qaU0AFGmvco6OBE<*(}_zV=MNJ@cIglYDE;IJAO)0{H67@o`n2SF=#|dptT= zmG9Nm7k=g_xukr=(h6Z$<_Wvmp@@Xdum(Zwu)F=)iT-N z@)@M6-&jZ1Tvf=@i=;@isg=QqC9zJtKyS(dUH1N?Ztk>pa_3Hd@oUlbY2Z7$eo=G11697q$JR>d#qF+?R^2-9x_~^;_p?3??nPv%2t2#eKuTdWHN~-OCT| z$p?|nZDH=4&P3MUN!ut}gW!5ZV{AH8xt2Zu#u)3WmK@$iGfiXNmYLVpGw#Idip%R+ zf6P}+G|$w~r_J#`mfc;v+{6kxXnP~EtYg_eXclXKT5yO12+lLCp}H+|u1mDdx9#iR zcFj=>6L=OT@W{)r0oNna>oH;jwtsAsx-G@mC2m>}w7cs4M-#$<`hNg}_R%>8zdUv1 zUx8tWV<^u(&bRpB24p?wI}JXA4{`z@p0)DsQ>F(sfJoJ>!c4okVz*W<~wC( z9`0kTqRVz;x(Mx0oVOm`_{pJ>*bMB*JS*3k1)9N|-BsP0g6B0{ zZhArBpXT@zl1C?~`y?=A4?Xqjf#PgOY|#VQ4hMMO4*b~o0CXSb`O48&(>=1zbiWxH zOCQyl`Mk5{_t_;Qm0r3N|C4#b#tzQCXD-^Gf#1(q+`l~_ynY?_A^1-*Z_n^d)YH(P zb+Qj#O&p_O@Hd*rhp;Y;%`T2Ddc=HTw{Y&KECbFS?_>F6+Vx*$hMpo;ujf{#(wXOP zz;hxFfn4rnp6zoalF{huVd&)Y@v$k7#NbuKy`A9Pi5|k{>1w8)v)x?Izj!;y`JX($ ze~Lbn|4B@7LLXpvOxFk4iKYK-@*Y|{`RjCe3LN^0wM^(6WP|i*uD-GQ;Iict`am>9 zzD($H$+9=B4}d58!QmtQ>hyu0C4#>HsXlP~GO|N8htWLd4SO{39ZSM@k?{NxJga>f z?Z;@|<`tWLhoaaqtXZ$TY14Vgz4@8Gh6QJyIP%lk^SWNZH{zWy_=%~8_x%3AEoV&j zR0T22EYIS7@d9KHZAT9D6-P{@pcY+`Px~P~cafK0)Xcp|{Jh2RL%OG!ck?h7A0DZ{ z-n-khee7=Y%d6p?D9^xPvjN|)3go)u9cM4*#{MH_enIWGVpg7P(EEkH6&e(f$$IX~u4C?t zo?*@}K__ni%!iKlZ8vN0i+*&|UdD7_4LXv0Ptr#MIVBlZ3v3@g^6@JZ3+_0oa?w=p zcwn9E=eM5+22d(+t*lU zQpPw9V~i{~e{u6nlkTOTJ~Ovndi^W%k)maXQ@^j@+0Qj|H@=KH5!{4X@Fk-;kN3DW-jxohkk5e528Ws zkWUR2{K@|vjBIb{;*Z#7@_<=zaxc8_wRiDOBKTSQWaLlw^kS==^s8lKz^BI>?vCa~ z$0WCgH#HBviFZs?Zg|rzapt0O4s(yLc$wJ7zjMC^__701G3+TbyInE$r=>H9k!+0) zA2CmFKHALt;u7$C^P2eBy(OkwxL(VjY@*#FcAHLIq%cF&6aaEp4J2-Cv&c6W1q?WI$WGz24mTmmvuQwmvV-_49Upa`B=Qe{0fDeZ!=m{-%*n#`9J9qOr2W&n!kBQD}muIrOqS%lp{?aDNsxJ01ANOPCa`9s23uf(&(2>1}qI~3gea9U~Pdps!#vd*EMO)R-iHB!( zpJvT{hu@bn#(N7QT{{?OZhJj5^;+#@n`*Dd-&|RY4Kk%q$!oQJChYS)H&zWj6ZUxt zzvZj7(5{|#P2k!_U+<&-Z^HV$H!d3bjjbO|Zs#}lQSzs>X=Uyi)2biyd($h~YR0o_ zlr|o1Ki1|Cjnd{Sz8~Yh#+5~u)G_}p(D~ZP@R52mYp`X_qOq;4+m?Cdi?^~~vFWP& zBy*^riKQx{g~f}-f0uDBo>!eA;AL4Cb>Jyje*+)Y!#lv6{RLpvn>Ss4Exhvu>T2C5 zpnH8v`vsA9^D&(j;Eg{ViTz7Q>Gh|OQP_0s8#0c1c(G+o%UJ!swxg<>`7_hFaLeYS zH>_zLdw{ZfV{ZAuetmzwLNxdd=QKRSU!RGzA7d>%5%Es7fn$DAU&TuoHvCimn+!fn zq`l=L(|7@JkeSKV(F?kM6U!Ui3?Hz+pOjp`lY6bv{I0e&ZDaZ0|MbzyCCmZvE<`^4 z_I+*3ZiA=(?Mv_I%0wz3?<-<23tnntjjSjsUfjm|u>Hntra==ss##PXAp>uO({Fl$HfIlj!jY$VdYmEXVp?B=64ueo{bw?z$i z3l8hF@nUdQUHGx_w}E|^diz@wYu_IDf-1ieeh>|Aga*O*=AZ0W8_|@$M1S#} z@Wy|%uyt-of}6oxcit6`ed@^x;=Bz{p05PxkX& zX=3foc3pgWO!A_su;J8b?DG|CfrSBc%c98VE1JxC?f7d+bQ8GX)AM~7 zPU#SX4qUJe---M2>&;@;KwA1o^;{k4$vSAG`>9#cp)58{W?S93^!Ah3GqO4RXPYkN ztz^+v8GBYs>h27>>o~HZufFDFJ+%Bvto(3!_UfI^9~2&cukj&J7N2mRbJ=6nf>sxye`Iac_QB%zYIHT&+JIoTk_1t{xz3v$Xpx5 z+eZ2JsW);R`48yGCqH7wPoPUZQ~S8=?k7LuagCm=^~N8-A5S4etuDuws!W;Z^2*=$ zN@5{&JY65qRywzE`)-Xfvs}MFw>!hw3nQjfIYGk-WDNM~w~f)t?!zBb`_} zeh~Rw#Ut=dKI@dYP-#bOX8YC1TMypZt2PnSevG=0B9HQ)31u3xzY;n3+wRX-2=1?F zFW67RCrz=7+lQ(D-45xC8?3(2z4N%YpSbsv$dntRGrCsbe?0)b_ikz%dSKHHL;3$# z-O&fGtULNcbK%MdOn$)w%ux~iPNeX8vIuC3>L1A}k( z2JSoG3~W8WZs6DNIDcU8o9noa41DSBX5h{@M+TmL%LM~3LbCzRQd#uk9Y^=Bxp8PV z{S2DfD_7K1F5bIl(a;xPDbXz7Sd4uvK{*7blyb&(kllZlDL_EX@bU8T=h# zu1?w5&HUtVWAl?bqvZzZK9jY&m%m4@_l_bD_CJFSCcYWzD;OX@Zv1_(WOo%HPy7gL zsBh)KUm{nY!QT1+>qY$Z7;)Yiw4Y6#%qKP1~ncz zSE#S=qW=#NkAqhlpFpqMc{CTy{+c;|HhfZ?|Jg{%%o#J2-!(ImGm#5l!#f zEa+iO4-oGL=k$fQe(31U_+5L6CI16)^TMk9!M((i^NT)nG(S=}_yjy~KQiR$NPdBS z*Yle>-T2+pO-qII0s8mJOMAfh|1`sx@t;AHtD%W(kqq~rVhue5Eo5uTCLL)t<2OT( zr&vpa*fP&Be%X{yQg@`)8~++(dvm1xp&1)4E+&3sW13Gg2HDY%n99q)%UGUdEb!0DPu{Gplj^#uLm2Q)tOF>n0R83A5T znTsz!0bZBR0Iv-f-!cg~OH#Wa38?9*XM-NCBv0kdrre}I{ zbWsXjbcXh4tCy>f0y@0=>L)y!)iq4*5@A200Z)FGxr2S<^C<@Cu9f ztf?9rdEVO?VIEFw_R=-@Fi%>)v?6&Ob2a=xc8Tm-#T=?%ow`rU9G>+PWn&GQGFVhc{*2T!Ds@#K0P-u(NkcfJ)KOQ{S!t$cuHVjr1a;-0bj zhcgBEjEo_(((1wS7daa$1Dq$(r^ng%3*I|hVtvgsr?a~(9?`Lf>={AuV;+BfX8y@$ zY~pogu`cy*kXzE>xi*4N>_KzVi?1v@!Jio&!ag`6pZ6$hRQMXkrM_R?Tu-jUa`s@a zVh{Feav857#&xaDWmN3cLta!a_`oUIxSYOm_rb{aBlsUDGhE*Q4%nsLUY<7q&kcBa zb$mw#u(!MA8)Mj|J7z+ul5y`-=#5%H!Q9tI)5<8 z4fKn>bc}au`@BcGIe&O}{4UdrpRv)O=@Bo96!A0DB88r(L*gR43ymyi9hWR~!y41Cjq>9MXfNBfD(Dp&AP?3s1gGs=7DXMSz& zAAM+i@`?3MYS}Njc^r~OTj5EU_prfxbC+Uzmh* z(P8|+UAt$ObbXcetC*hhZ=%p=8~3))&gyj561m zjnHm~dk?LC8|css4sqsR-A4hW$w4)F+q;uWImx_bH9|o|CY}LJ=}SN$T7R#;Mqt#pl@VL;dO>T@p7>C%toDo zC3^Ev^DU9t@OEKmg+DvEKXH#X&;c5S_b)JMmzNvNad0l3+e2~Bv$%J;>#Vt#t&SzP zp1I=LmgMQTn(@=@rKMOiah~TGX6}2U^AAU2-uOMl(oKF(m-NhX<|R(f7{2g$0$rFu zXJk)%_FUHtZ6IEc3G&W+_-*Gzb7p{J&eYj<=Rs%fqXc=mW=FGK!_b9#n#)Yoj2{v# z+SDPlYKb9iWgS|6qLa_gs+`-%?>ml5=-?g^)1dtH$gV% za;jP=`?~Vw!h33~b@m;;W3W!4g3Z;Rx$?Di%V*hs4Bo}9y)WhLPUM`ntsir_)DM9RJk#BY4WY4R)>M!6 zQ)Z}xU8nt;DYT!!WxjCujy*?o-Ai{6dnqH99^}co_LXgmha@L~G365juhqsI--cf1 zy~qVA^zrKa;fmBGax(!d{i+U$Cze7a*v|kVGpHaU&Hm}PsOQnsOcM_Wh-KF1=uKrE_7;$a8msS~@ck*d`NS`qqQ??S@ zJDp-o(sOZR3UuAfm|D0`9zuJPe&F$ydw!7U5{>Eu-so}9^9UcugB=Z#MQU&0-C+E| z9IH->9M=}B`{Q zIP8LW>WKDY(J8=#Cy8@uzda7^xJK>e4;+Y^wD|cO^tp~ba+R0xr~KKAD3?A=u;wNI zgs=N;;c-vj(!cY_v*_pk^Y39l$L1H7CL>|YPBseiYkvlRa;{q+Hs?4ee|X}#yu)@a zG*_wb;0#*M=e#Aw94dF+*%s^QOZy+>2^SggRIE*Tfo}cTb1s!n82GTv5qogdIf&0P z3f}S)VqtzkM0=gs2xHXQ58TX}#E|w|U&0@Kk-V|oe8V#O8fFfC?B|i#`vw-F8x1u0 zWj=Br7}^(HP}qM<>QDN6-QKHw9O^gf8H$6U&J`Vnx791Om#;0qHi~V2XpvbW`D(DO zM)8BhLs{ZDdGwXJKiVAu_X2*a{{-?@{cB!x$I6*grE@Pd$lbZvEoJzmVa{ZU;-%$- z<@kJ8B11La??IQuh2%?n{kO=b<9v_C#A^B5k@lPT-HgtQFeY8=d8SIUpnVRqqVf{P zMVw7J+@^}}IM;kf$k(pryunAQa{_!b9dj2whJG1&zOP~Az=2!re!g$kKFZpQ!fWzL z>aK6z(1QI@w>Qxh$Nq>IA0H;Ns`i~`?VZFD&Q%ZHa3qTlikv|YlniPgRrI)zF^%Gj z^1Tn8^rKPg$fg%PU)A@v6$4Gs_&#vpez*LX5$cPERq#W3Tgy--`+|ZcA5*Y2N6R); z4GqIrD(7!Pmx}+;l{=~!%LrrE{V~mn^j%Q5CqtYKJy)%Er@=Ls2lQKOP5hwPYVJM7 zTr2TuwWo9Z!uOFI9E_E*H>^eu9eUA|Cc!96lkYGx%g{wEOtBX!o(@(eBZOQO*btaQ_zlDV~Ji(4Ap!98Qj2qPKDY zI@%LMuLWliINE0!VLr8Hs)+RrW6ue9l?lFNsnHszOmj3$9>g-{DMI{V1fSZDli1C% zT`LCAc`J{DYoqwJYsG-ZC;pIa(~Mo?Y#IxjvV-(hyCFU#9K?qY0dI8H`vzEVN%YLB zd91M~i63d)#~71%bQGBC<2%gT@a(!pnW!m<IAmE zZA+X={2zV!j-#U?PpNzqp3*vT^9oPe_4ppfrtuQDNouSbv#axY?#%@~Z7M}qb1+G-tNBG}Ar2lIVjCfoRC*1=9-7-aT8&<;5NEuUOFQ2tX1 zW0Q_me5?$*$@Uv&zHYzR6h}6!8*_F*2HmhLYHH4E<6(Pr)SKtDmx)|2mP~TU%0J<^ z&G{GYoe#JZd6Vl)wEPh{59Gv&W(mgbAv2^8*%^kbh% zPld8p9;pmbygdW?J0*~4L;5wxeiH?BImr$6ThfgepEK{4zY194p z=qcpEyY`^}Xt%?`QXIoRi_Xu_r3^3^O0m!>1bbXvy8|}tB(VtwO{66wMLYt;a`KTJD6U$|^Lo!y6zfE<{IDNRluIQ*2kT*zLJ%QJ6=PZIQ} zx!*=UIKR8eYdS3b>XQ@Be!1O;iIeZk`yNiNK+ib~-Kq}yA` z0q-COd=It+eAKP|dfB+LAzz5iO=>@lvDnWLJSV%826l|qz;$!zVmhLMa-gZh`VH2m>TB$dAK8Q0r~8xR zEg8H9yu_alcRk-wnIoU5Z$Azn#*55Uj;R?W)^r6ot!^@x;pY`%V_N2(T{rUeoDLEWqL=)y7UjryybGbz#@nL@zc=6c2#EYHGow8Cj)xx7wtlUX9_tcmPC7Qx) zrS_C>#H>eKGE2D;@TXh+}H-+uf))2+26{8Tq{ZKOu;+ISmn&{fr6=bqC^FH=`K zqqd(MGknxUXxDf%;Un62>N?~}#$;m{DPp830sG8ohC1@~A4Kje#?W^G^5-|%M(uk& z%vf3}f1LLPNw;V$#5a>#U(#usTlT5QyX6@k;0V6>DMg>dQDf&u^wpfc@Z0RZ6m{a* z*Ae=T=E{AmD*}CgEAX7GeJ{TqT=Bcw)c`MnKhylIor{3)dA?13TR)V&*-iBGAh91u zBZCh0Vy0^(q|GZ@E7WncSNCaIuHD_Ir`dFI(9hjH?8GHsb4?F*Sojf}AG$+Y$5|&`@?pKlsO)dw!>H$KFu9 zMYbfdKlE0>Ge7-Rwi-P!uL(a@bG*Ooo{C6#-N~BmKzD$5V;;{XRq<>~l<$1*pKU$* z;#NCfhjd8+NU&t7lHBE4$(OkPrWJ zKV@=WG(OgH(p(VcIfi2lX_(F(tLP&#oKrLwc=) zCeriNImai}a4^JExYQAQNU$$ZhdlgdPCI88%En>5*jKjS3EtAY(Sha>-Zg7iS9q@s zTYq4ZzeIzDah|7UZmZyh7UrpivD0swI6FD(v4Sf470pEB9DG&@A7?vi&71(Y<($`5 z&tAM_&h)|k?{gkPz*m8t7})L3zw{Y%oIT+6&>_wo%f|AtrApy9-81qHxt`8%- zzGqGAvQge!b4gxgmB-&6_=4vh?K>U&RtN8Nd~?baoxp#s46m=@nmwcLC)i^$_+zeK zf^t`fT$m#HoFmAuR48-3a2*cASJIU|d@GmUO#3kBQ+zY652{SqO>QjqnXiS~Z~Ly@ zV|aDiISX2AqLb(*8=6f`%U|+GQrNI{oUH)dAl7dEAk*`cGlAbzPTP`M-uO{$q>Jdg za+X2=1!opWM|CTMoL|~-Y&y`-R3vFcE2R49WCq8v5(Ou;5Zc5?vm{_pRwdOr| zIF7v`dN_S2zFg1wUV=NRuOx>89V_2hdtQ=9lJ$D7-s$AjOynhU zySa5z@Phi1A7vObIF6SxUgbTzF>@as6wHCvpzu|hXiWKduiC!OkLF3bKyxyW_L?K* zG6eZ*ipQD3G3;>7*El@lV#$6EUmz=>R~V}bV-w>!yr4O9?HpZ2qooBCG6o-Tsx9qv z=3RvI(xRMCNQ~l3>=B+!Ab;+Mf3@F@&zNM0@{^HWfo&rHJ;uAQ+<+&8$bgr-c7Ckpr6*ihz@3Q0X+A<68GkVMR)>XW z`?y@CTztGjvroS0i24GS=0PtBHbJnEVYCq7f9eDaWZ1amK ztKiS!qPaB%W}o&i{P0W?>B-@l_;FlU?hX7f&f@G|iafJ*wi6Ry&vW9$!nB`|yT*p~ zj>~CltY3R_VVg&pzc{!oU_4i{AL{la)PDwfya5Rob_|`gAY^)PpT%Q@9J-wH`Aw3f)+MS4% zC9mTC3}_euU)kNlH+n%Jr~0j35bQq*hGeJC$Fw;Zv;#(u@O89yGC{BR8p_w)YZsrT2W;AJv&RxVXRkQ9;=9-!yS@?5 zQEpuWUEH}luVCEU*T!RKsH&a0zKoH06@R(^+g3up7u&d!aaBi9q>Kj9n z{Iftu1v2`!ys?Kevz8CITp`uB?=J=)!HSYIVc55?FHBYF?6W5JVBU(H^O&dlIl~<3 zE`PaXuZ`C-PUmXZcY5Nv1UC*o+1nQZXXk(^^mE@Uix46(=*HT-IFsq5Z4h7jZ8h z-jhyr_w+j@d$FJAQ_**7AMCLO{TuqSyrkS7?v<(!${Z~H)|kadnfoKkEom51JS{JH z)(ahHV{?wrl#8G?f!uR)NIVkQ0&_l7=B1~@x4 zlv~LCVD1^SeAP{z#3SPPPXO=X&&mPmQiq?*3)DTuKwO!gTiMHy z99hh@Tc?`;Ou$RF5BbfDt?d%X67fVCe_Bf^U}YgP$HNkz>hUIIOfu}udRBI#=u9=q0|8UPbS6)T%I}_xb>ztk1&u|_Fe$OG= zHuL9nbFLgZ*Y9=U@_pu5YgPCvF9Ur3OCRY)(Nr*PObUBL^lc{ZJ%Jw6c^B%7PMAl< zU4q|!PF}npwJ~h%tp@%zeuc9s9CEq(PCU~*BiKW9HiO`6y$=WZdp%{}QfxL@8{!2| z@mlaahJC3xM&PeZ@|N1CEFT~*h9YZU2=w}nt<0V5k^V3aSZYd>>)|8KpW00S))*&w zG>CTkgGFKA?W-Yw30F{~z&jcCu__29(R2J>o;kto#*EHDh~`9Qh&3Z8J8hz~%o37m4|pAW2NY;)-^hf^*bWLZtqm|4r(sEOd8 zFXZ{)4z5$g3Y*ZS@=u0iO^+vtF)Ej&gIw)B+&iQ=HDxLfoMCd+UuVfuk{}T z`z3cyn(Z2xT#J+QV#^QV;n)t=5#=3DzV#KVcl zXx;<)d@J$6W_%XW*xIgP3?j^Vumh1?=7*gx!BSqh^}Q(bfxm}8wO1X7GrukWM@F|J zCwcCJYjX1KGkyAi5ykescLoYfZf4J3R{Q4&u~Q%*AEm z_vBoin~nybk~R0-CFS0x_rotC53Mqn1ZTfHJr&R*$hGKaEcA&?4bG3!Z_!h>L9nI+ z+g^CdW=nx@KYZ9vJLGXO<=U$d-fEYVI~UB8>S&KSm;b42Kj6CdS@>Uc(U~?B+PRX> zh~Ud)S`(K3Dj!)M#N_zhuNyTB6 zpQij04!urFcZX-bP0(LwcUpb$oISdu_VM|Y zl>@7{(89F)`hsWqqR2GvVC?e)JoR3kq)&Zv7}8}6%@TaPwf_q5sD9w9l^@nwZQAo(U;ckj*)ipU^vgTSG$pRFHJ7cyN3VYY@_xh{wk2$mQVhfb*y}P zokyn}psPIbUtW7Z{D)2|(mvu3!#di-Ar@Y&@w;d6w64{!Y%b68fa%#ly~mLyKHr*y z&3s3|MZ6|_JnZ%l6BjuKZv}e{bBQGe^O6epZ5?lIrroD#U&egi!nfMP8%fJg&t@Ax z9vQoWvMJ-%wWC44c!crC?K6@6x6~5rdq250=50Lx!SBcL;cD-StnG)tZX)(n%X1<= z=Q_Thxu|8$8*-Owc_wB!pXbk+Qxnc&V zb}KS#N4;pY!e7?HGbCR446B|&se{(y3FVCKkD8UNkJ8#6aGFEkKjS`m^C|U>UslO^ zh5H}Br-I2Y&G4*BhG$jcJO{?}M{DyrSL9s%2ygAT|KH@?C&2F>;EC7R6G;9wWeX@f zaH(1Q0c6~RNtKr+dFDBJH}~dpPyO#tlB2*GPVJ0mF1W~VeV+C+pmE8Ju`BR*na99i zm(SGyW4rIzv$Vj<4C|*zHfa4zZ%Zdt#7y^nRe^n`>jXGF-hB)9G;ErLwP6M?B=C8{;+Lqi34b zZw8*v=(!hczzokV*x%TQapaTeFrs?_{yop1$u50}oV7hkm8Fivfw_{t1y{JflX@>d znyvm0urtU4&36X=hfc|_*#0K{;;FHvN9}$y{roTeOpmLwQg~i*^1TAm;gWF~Vp6Fv z-ko}l_|()fe$S23=FdTV8)r?C^Od<7o(cI!c0mUD1tEeM=rzQdIOj{(CXB}_-pO9q zK9z~S!-3qga*(nK`IAEabC#E47d6POCHL6ewjdTL{p|(1SMN30r#Tcn<+!3>VFs zC>sgiHQ{Ke=beS4_8ThcyAHYH0V_@}o4$V+mhYk5pGmBTI-UHM@2|hdxKGYzwZ=m1 z%8s}5a&l4e54N%gBG@TC8+EhvRixdct;+ZdX^%`mFYcs{tsC$N^Ap@pSiKd%>4$dO z??$dLPeIPVV9O5kLbxe-zz4=sXfuqw$uQre%zJ|RF23cy4S2j~A}$>0wF<5?jD2`{-yXIjgpUtC;rL>|^e|p^wh$_#N{r|0eoQn{TwwT@S~!{}|*m1o2_{ zox$Fg%Q?vqt5=RPo!a<|{m$KQ%H4NZ80521r*s43*mcK6y=Ad^f181}!|eBa;^Fq& z#{@&y`c}TPZ}XT-+iybOh&S!M>u()X4w&lr@PqUr&zd!gpQPVT^Q}Fzi^xY&*@T}P zaY5t+z&L0&0bKwbAv=kZ6 z7)m+2K{`)yy!tKGy}pTdwY0U2nfzcKM1I&=;XNAfs(Uo>vzmD>T>U1fTgSUO?Q@2l zYm1FzWBAVRno1|mxCb8Z3rc&O@q#{XpN%~fCue;*f05y;u8bLbsKZ2l{iIzVQ)$u4 zImxZr>_gR@_kgYOP>Q+Kvza>YVGnaDnp#>y^El6BtOpm3SMd++OUQJSIrg-^2V@@*Ghs@hH*x5PV>eoq0S%y*hlCNegvc4BPdq-24P{ z+RQ!WF`aF_X17hg!&2|8)SOmB^Ot#VrSgU4!v*iQ%)^JBAs?1^$j%&ivg^V{$G%|F zKJ&_$+uQLYbe>0*bk~+dZ!@&Lg1+Nz{#f$`skGy_h4a01H2>k!Qp7TeBw<|^k-YfUS#;%B8D7)6QufZmc13LmvFJb3pI*@(Eruy9lo^t&gZlgcR zevQ43+y-MR3ZUy+J$tR3^v7FFH~K88xSMj9RF^!jqNUU=Z{=(~o-6%V?v2DEE91ai z0?Zc~pPs+=_?;*~*Jm$WWS=R*=A?Y3o+0eRUKX9ku$jv{KAM!Qh(ZUA-{e*GVuzg^ zoy~c|$fV4Je{sbB#e=7+V$s9>pZdkA;Rl~RRexsrPUXngZ!x_(x4>r&i8qijyxSmk zWc17VdM=lD<9a*AH=|!(G2pjVjb*lJot3ty@Xs^{c@uqJV*7-qjjoPpWrpfE?a|tq z2aWM>ue!gl;&>N(nHQcnuz-E-c!~5=UcrJZ3m1#dBlxv{4V_1MhFbM6WgedVo82Eh z-sPpgz;hl4XQ$GSn7ox*^9$dK-NAS-p&c^E%9bkn5x#CtehI9P#!P|cQ}Y4Lb^zP<)2;CP-2Ay+=!BEs2d8TIiMYg(a&*CJ z;NAetK2z3y6Zhz6d?x)s!;L-fHN}Zo=OM9ypa&{E3H5UnBZ>NvW4b5W*qLGuW z_+xv}{fR5o zuyXY_qNU`;Ur;Wdvb5*-Ip3QLJp)@N zJrJ8Uc?Jo4mF&S!=4{ME?kY|w-(r;ab7K!NE}kKUzIt}^N`BAB_WigSK9#~Q_ON?7 zcQl<@V3t&YM=^Gd_$iVX>t(#%wdH19Hk*9^J@B@k$C?YT`S4$Y`9LqE6I+(_YL2i` zmFI(+N?}JvgIKG}{mjYN&^QD~XWvwDuBP@6)__~W#8$3GMyagTEa^oT9H|F(XI`Wi zS=r?e2ez}%IW6FDq!}Dot-T3!g#3*m<|Ize-Ui@q8_WLq$;o>?EQOW<3pQ{LuO28((t_N-~-UG9~pg7$7hf7JYJD$+h}b>l~uOY zj7hgjzE|bv^+MO~k3)Cml*u=*3v+?w%canD&R@R=dq86)U%BD_XzAfjaN5MUj}d1q z?}+ziIIB(bBH!x6(7<~>yYCn=HpQuQ9?M90AN!CzIptTD4gMoELH;I(?SAw{gW@5< z7==f4K14J0*v4~whoGeg-Bu4Lx^$+F`ix+w)o;0jbF%VmTwHyL-&aFd)osx^AJEs` zcYal1`IttP9#LBG3C5tg_lr$>4>$v3 zykdgxn=IW`R@wIMu@UZXg6~vSzhUG2NHZJ;f9_BO$4L$6&7@mgIc1r1G;b zqMhJjW7u<^Cv3#<9ilEbUpU#bBfyI@KZ#{3E>tquIWui>`vQ0=wm-dpgDtQAB@;9U z>VGb4b_0A^8k^g-mH87MO{|TZLiwS!Q46ik$ENA5^3s3B+7wS0(?^PTzHGxTFzvUF z$p`9W9Ujtk&iQ63U>@X8@;W7+V0;G{pLAL(T(_*HbmyVK??QI3ls#pTE72`gtcxIr z!w1Gy^ugHE3;71GRleXc4#_a_k7#f|_T8J2i?>4~;rY`)Pxm6j;gflT4{mv#iZOC);iDC5{X62@^O!qi;wtu5#(VC#qIY=Bdi(+G>d-ILTpy&Y6`rf# zas|4Gvt#ga48CD2w&iFn(w)fnm{)WaK7o%qc1J6gjx zvbHCl7iLa0A1|W+rOU)8jvnHjGI(an{Md8n9s^Q=ct^h6WA z(8%H`eemk$2PgIcMHhq2o*qAxY)-xInBl(!F+=p0+Sxrfd{$raptl5mtB*cIpV<38 zcDP~({k&I7dm0(e|Ml>f#1|T)%6`e3?YPjSTNZ0Ql9L1M4)%^^Z|L06j(Nl#=0QXH z>sSnI{NYl@osxd$8FTTs=GVu*Rs5rkHa@T-A&x%n`#Edruc(jCZ2*45OFXxreaTX- z3F_K+Uh+;4SEjWr+(u)uq}I3E{S)?|A!R#;DEE!0zek9zN>pJDD9Xm)RHVezfpd;X68Ug_3<2G9OA>&7DSAm2c-c8_<= z$&XXH{J6Jh?x9m9{?l7oliG*Uy#(d9Ej~dVK6soMzI#!A}i3bf;HgN`Mcs{7^>x@u?jHsiI^l*&z zC48lag>#a3QF@^r^#kZ2xh4B5&N*y`bJ$pu?Y}|3nbl^Me&2Xw45f8_w--rLg%cea=_| zzX)qVvD^6d{)T3q1-mym3pN$?SDD`d@8j&Zv7dI%dR5dZr z>yWh)>`OO8C(%oKku`??sp@SmfsV*Yt!Krd6@Sw_6*I?C)`iNOS*N*jPWOKy8NhwP zw{Z(#M+J|%@G|>+k@lqM#~jwhku~(^)0g1YGFOQorYf`_TSwp0NAPXB1iV%Qry0M@ z+p@TK8~onNeeFNbW^PjOw3YvlF6>I6SGCSq7wHz-B&a_nK1e@4aQqUJ*8Z<#O+C0s zr+oms%*rot3gSBR?Y`ah$P4d0AJs-82so#(5v*f>RqSZC4)|>}z}Lxe^u^_5vkq z)7jsf0UJNNFqO{o%)ZvYa2Y0UFFsM;iFj0H@(nV?2vhhK1i@@v`0L=Nd(t_b$aLn- z)*VybaF2)k6Lc6SFYWKxw;Q#w$rAhSPI$!Xkjl;Rp-2H|9&k>j&YaKSpXnWLOX*`S z>#L6M>GYF)OSG-CI7@d#g5S13;ia*UY=&2%#fxo&Y#OU`2C#Y zuf0dtwlX&NWQsV634Q*Nf@=q~{&hbO*tI5lQqQDFr)hsOvZIspZNeP)S=aL3oXAN6 zw2cm)7sAoC=3?Z_-~yaqjR!hLF{b;-zmTlGuA{$K^>p9Kt+~jpIPYcBxlw|=T~UQ=J6sk$j4^p90_Mbk-b?5W^7v)p8!t)2ut>fZhAAAJ4 zyz?K$zv7EOr~a+1bJbtRI*8ClzF=mAoH)u4Ajdm{c}#ck+xxZNZ83lFFt+Oq+O~3* zhJSvb50GciT~gw0@~F@68uX?0vp2Bb_TLfj6}%Gsn-=V~k6=?itoSqjwESS8A`om#c!Bbm}N%iCWX}&(fJ3O*~8q6Kn z6SC$%`z^Vw91NcraVFR|pR%tW-y7_!W1Et%8N_b`TbHvx;0x_1sej4Z;jd-)VUHcr z9(x5g_=lj4>b68LA`V>M{(kB=!Hc8FZsD%^P}_-byEX#5%Y$!qk3W0%w9h&o)*d%y zsW-TPtg-pi_KQW+gvBB7rJYYL`0C5=x951doY5Zq!+FwG{vGAAVY`w0UD8X5{1oy9 zJkc>d#Iw@Oi}b$fsOa4mz34FZTQ_UAOZwi`C2rnz5%q6f6JIt&ecJyY{n0rW+Cy~u z!-RIPY+m;aXaxJa85yK_yyEVPu};|+f7>7Ki)Xh*c)x38;ML_i2VDDhdE}PHa$-q& z)R9`>O>bWCf**t zOD2pfP2^P(V+wx5!^uC0eZLj?xh-Pj-dTLn=`d!&f4oD6_h$ub_*u&fsRqTVvJX!m zTa4!#H8%F|*q7A%E3_ARdNF5{kXw4al_$YDz1p93Rdi3!l75czX#aNRM9;MBITb^HR+_Stb?kZKbK3o_XORib z)R#`Gx94%C8a$p?rSG2BOTF~9*fpzlA6j1uO+{C&^Um32#5{v}vba!o6a6(u>lY=+ z*K;_mrtCcqU+PrwyM;2X(auf&kn$1l1c$my4(?|CFWm-zXx%@~{nw(Oc)2nj`w4iy z=EvoayS!=TDpZ;JMfKnq$Igj>6a5wGS~0ac=%@W0;jf(YYH)S58U{}vIVO7blP~4= zi<&7DEu?cn#y;!W8m(K@LG0q|(7I)&_6%couZY-RL35S$f#@^XW6SNcJnT<>X#dg9 z74X5adLKO5@9a!qw#<|r7Qj$Ge?)W77_=|t#vorq&mTm^W6b}L`EI72p7B%6zaQVT z7CgzDs#r$fD2RPMmtxPrSN}#`jZyyBddhu%D>g1T+OMAajAgLjBpN7A2mWc%P_W;R zPb{0L3BU9=*cBT89&(pf|7G1G#aZi3UV-)->+uU)nG4NHGh?Y^9)66^C0eLY18qAl zHufC{diGHBX6>5_Z~XJ3+ccV-jUL0mv*#%gdlP;EeZI4IKej+K`6`dUx3D0Achd$+3;9Es1Zy#ZbjjUIsxvU?nU%8dGgBwR`%G&uG0%DU7F*$$t>kv;JOs%+%^~}zw%$ZcoY+(5 zEdgI;7)$3S?diU$IFYxYxYjdkeeh{Yac-t~pZ7qjuS#N2kK6M z5dV%Xo-51knko%uCsSSefm1&*k96z@SBtg5AWR$jl8fg2U9qUH_tX^wbfbndWXbi z zt0ddw#Ntx4Bkwm&z#Uy0oS&h0K#4CD)2foMjcuEl#W(NM# zy@xsp>PWw9YzgR+TlZ}x@G^MoUL2Z4RhNE5L)Alst`z^jy_mjKhx?h^4drIXXotSh zY1p~M4r?4=%WtK;M(+z%ndE8aSY6HVOk?Z(dsN@Sx|R;e@$V*lW0h0i`mdwpUA5S= zJA!>OMw2>Br2T?bbdJjdlG5v5;rkOBM|OzTjRJuogm_deNzXoF>jXcp@5We=GHDAJ}Mh z^r1Em{yqmkgpmvXU)j6%?+;)*y#JQ^4(BS(gT*z(TY6dcO^Eydpl$|wj6h@MFJg4p zWS~hz^qA#YJ34badtL{g)?SF6Kb?Ofr>himQp*7%mUpB$J;Qu(Wbm{kM z+Nkb&>Lm*57D={g-HgN%tRGWgZ6WmF>2+5RBq|MgLV8zT@T^1?us(%6$ig>zHc4>K zux=I0i}!DD(0;e(l)dnICcd=bW6oV8 zizYT#khzr|u67@{_Sb@B@IEo}#AoU~t4?mkl2o3O zOAFsm=F*0CgM1L_4wIicqWX&0q;hg`LOs6YKF-~^^Qd4$s=sI!Y0TpZLvL z;1j>g%y>tOS#(>43Ewd>*T%7=Yo$9Rk#fi$**=yN?!r&zYxqWrC3uZttPyAqUp@=3osyMEJL?v0?kM;AuA zJLy;a!Tp|NVZGzn8hQ_}U>_t#HW8;MUO!=H*}eAf+P&^|(fn@hN3Q$*A0Pdhc$dD8 z(_VI(+J`vWK7RX#`71rpi68CgB%Tm`#9MT?RA07D4!oC- zdK<_7wys8JFOC0wUCk@ssH-{sP+iTLZ`ReQ?o%QCPQ(`QZs++);eVX-sDkG^%kc^3 zQ5N=J#XkI5#^i@%%HT(Qqq&S#abK^B;x2 zwZ=P@I8%3YIXLRNp6?d3b+`HyOnqY){No<%Qhrb9ZO2m^!}V~s{-*QMPpP|{XLswt z0Ugz4bJ(%T6Ri70d{{B03HUZ{z_sH83!j*-_|x)Fh}%4HcX%@YYzgx0tzy57Lv`iT z%HMWt{78te)~s^jsQ>@VPi|ax!fy_q^%rkhI+ifTo#+>>Rrg#=%OA0Z;3b}6$My}@ z&^l=O612$Gi@Cm&)mgw)d`q(R8`Q^V-fxN~^;jnGhit#{fdt#i2;xl6AL?YS`}F}& z8P1qczlyV_CK-VI(>2$(*J7w%Rcm zfB23D-?7pQe8<{U@cg)9;6DDNWUcy>Z7mYvk;TfpieeiD=i2HW6^gTGJMsp^gC)e;4g1vSyNY%1_3oB0%>KoX z#FLK^%aBc#`hn>U<|)uQ)6Z{+vqrKVGj)cp&U~v$K_~TRpoMfrwqw=+zRGh4(Vg|g z6-HXI@xe`gO&#w_A87?g;!p?C`MI*&*={Z5V~uQ^l!1y9CF1O#={Y^-Qscl!E-qz1 zi9ME;8Q^Q|abRZx*sJ#?HJJ`eF)kmU-pxrT&yatGe9hn-g_6PBi1mL7I^a_k zYiym2Ev|eyVu5p*V~yR7S>q*-{JD{FVg}%_2Yl3@!y|VLinZ~6rFYdKyISZ!e!X`z zbFE3&!yipP@1ntfYBuIU@q)^Ew_RF&CwBNpUD?YEBfJ9-x|Bq^xACkivdY?SnJ9h` zF-gVr9_JmrI*YTG_sPeBS&7}FJ^7UKNZW23G9UDgDo!CCr)S~}FeH04M>^wK_(#Gy z60J1`4|->zg>qUfJ+U?1di^$KTAzyjiI=k-k%42arY74F9jH7LS<3@sjSE=^t@&@d z1-q`XBaTn3vi|MF^JDG9_%Cf-Kl!fh#m3Ais3gw%G%?p0YrK{9z zd=1C&4`Q0@<-DTz3}aVEeD#@`zD<8gfqKTG$#`M_X**_dq=qQ$YW zjn95$D?X9x#HpkF8IN}YD3<8auhw0hZ^eaLiLt5P3UEL6uHbhYbj|=*c3Ix#8c$w< z=&u~WbB$-1=Z08!^J4v~A1kPAWBl+f^gQCnhj+)!okzCa*?)?@e!YjjhNFd7RH7&E z$4*mU8iR7=hWY)^`sVE3A-+@PsfGvn_Q7`_-HWGX;Oyl)SN_6(?lZAh9cOOhc|4biOs{93xR&3NSMs|mH<+~HzSdeUoCtGj<(y(i zYG0^&b}z}o7a#Fs;`zc->WDwJ4kYjQAlEy&rjGMhIO~>s$i`~!olVXg{4@0>f2Ola zwq2=}AN|*u4SHAip3%w;f}wqWiv<7T$eCO{COeg4t51Ye&on*Oe2q6GxbMYx{r|{& z6ZopCtN(l7n~(&85FyNHP6CKTKqP&r{ zYwx}G+G~$TVeKO<<%J(!Bkq0wYLE2pTb+N+JSF~YT0@+7U661VsM4x;H!zo{u$i-O z{e-ylusdtN;VgIYCik*s`eK`W8~1~7-xf6N2Q%cKyN*cx9O_G_bG5_RsbQU|{o}yu z5PB}_mx0w0WcCYe9-T{FfNW`9gP*1-ysqnVmUfIfYv{+wjoNzLJmkQq+m5?)^Rm?x zp6cTl=rN76Y7SoQw;z6#J)$x4y~D3NKB;!x&$}q=SQFhuQ{1*+?M;X{qokRN+p2i{ zbi-GtF;f2cg%Xo88Du3UeQk~L13)u($9HWD-$42P0KB2DEgJYQsucFCX*vV^})# z4l!ipbM1js9?*AF+b(_zILDN+x$are-gN4T z`i)Pq?`bWvTR+$|_0Qb6(;-caLG4<@7)zq9)X~p%$GfyoG-l;S`)~$tr!REVQ2c6r zXu+^h>;7r9m(YDa+%Ycg8yk|FLE0trUtM%h`}%y@bE@NU%Ev9MZk$7uYRgJxw4d^7 zW=-OI=^3N;+`Sqh*}x6ArCj9Y3<`vabBT0UaWgz!q)qO2vWG)-y5fnVG*wn(u#0gu@?w+5%}-KTQR7_|q_yH_^|jwz z9U$JrS|i4|o#agEinISv&vAIxe0>|?Yww5e|3&I!+mXbDjnjskHqwo4lqp)2waap+ zadyR}Gnd56hCT&fJcXSuJlbdR7q(90ZY}GnupV|zr@Yg7ulln#Xm+YRWecl3RhHP8 z6N{5F_s+%!jF-CUduQM&7e?X5FUQj*k2M9Zo!vQ28yn6;CSXEb^lO zTzM@Sorny6L3@>C2JhU5ZYX8Ga}PR!cFDF8{#ba2UUF=fnHAnWmoP#rwChC!M~8Y(~8oUFj7)`p>or2-^VhTtgXj<$DKm zWcgP9v-ZQv!-7DQ)!km1G;-FO@|JfKeZ6oh`K>jDZe7n@ehqc~Q*>4veoGY}@*L(2 z^k(iMWIUU7bQ6(p?cJyi2Ih#q1zD$0IDW!1r(Slt%8hz=jKjPxajlLo2{f_p)`q)} z_B4WS)INEQ;j65C{iwYnde&Z%=NNm|zO(oojnApQz=l~?n@=B>zI)&CG3t`~T;g>y z_r&~d?vlR`;a2&oGF92%!M+K}U7WM9nmxH2NIvX=H|4wX+>qa&D1PKR`cd0N&JAEa zn9it3+V&Ow_fHlitN%~?A7s5-H}tl2b_(q)-?gE=ZC#$at@W``)Bc*YZC|3Fzjz89 zUH(j(nfR`qw@Tjr6VfO{dJdVF0@_VRm?#kVLk?72C5PyM|qiX2h) ztD9+$n{PrsBvVm+cj$}@3plfFOK`wWH@|fD*wF1;{yF3N-Nf@Q@k~O8`Ek9=I#uO+ z3UNJS`p$XxbiV7Yx^ok9r?c~!$9xC>tQm$k(@LIfZb6U9u1PYYx$78W!F>;F&y?~E z8;|vOGZ`B>f$uTRVhsC4?6i}ZE1JEHyt8ZY(CfROCSN1AZ5+D&G&kRC7!-vWh(J?1L@~CePTn2RAbq zA1G#gpoR5GB_=qh!4`C+mln?5kYwSomI4L3C zn*P@h{wuVoY^4PI#*P!<&(g7PqA9_B`tEMaPPZ%wtLZIlP;Pwux}<+3MfU@r6b z?wS&KSiqi$#wn)tHvHK#8(>=F_?MsfFhiLR$yM3v`!lTBXi_+F^2?Sn;VZmW%3S69 zg`EQ&u>E&l_>9e{|6+gcIV~M^K>ak;1Jwb^KaRGv>8itK3A%=G6vh_zD;~_P#$>R8`ZOya&=qauOB;Fd4q>*DXId-D zb1O1x*EJ?_S3ltkwOOQ3>3oSWvYe#X!0oRsDX{AU>b<-pj2YmiT+u3rBp4NAdUiH~4=qftRoI|I7jYlRD`= zmAT5Behhtz6zX{Dw*51$j@YtZZHd=kRUd>**iuWm9}C|DDfcGIeIotKu4QiZ#7yOR zl1+2}B$a)SZDGmC9qg;RBRKh0TmI<>Eq|^5X)jacr&Iag!PuY5pKugLr}9s5tMYf} z#;F^WIqQhp*XeTh(I(Qu8V5HW>ic8=_L;)-CxrP5X;S(I0qjlc^RVWR2;-a5NqCt? z>7*Q0_w)@Hl_Pzo1LVz@wQd=*lQOrTLmA07dp&sb;2PpryQ{DQ@a1rqZf_xN{vNG3 z`klJcX-q`*i}UX0FG@8JeU5%md#dtM`G%nXg}oVa^BQwS4$tptuLj}BjiBF9)5q`E z@5kLm_WKm>7>Q5UH$1gg^od+M*3KA|$zAV#6Vgo+b_F~$*3xP?_pEe(+k|59-@!}# zij(=SO}|dxwRwnm{-Af*0|8`dC~f;iq*wDrN%f3*Vh1NN7Pf~zwjo@}g?Rn7_0w3!u*PFYy08Q zFZILm)_s$l|N7THPT{YmBoDAHkn4^$mxRK)$u8%;>r6Fd_+WEKb__p|F-dc~zAYY^ zMBby1C4--m&M0@n-9lO)isVWkyK@H`14-12?J1zVOmZlfvdA?VfCGd8>_>WL_Kf)U zw7{Fn4)?8bWSRa-(>nGks*moEyO=Zh78Lyu@esk5Q@&3(nf6RP(PYzM@6K15_~8Z# z&;53m^cDLiY3rkuYXIH~IO`}NS-JRm&yB~pXW76Ll=UKN%^ArHqDJ% zH`prlaT^+4e-m$KEv)K-%7Sx$ZCgh<=Gt)FHO1N|Wy6gkPkx!vS4wA{7|)sQ{YZZ< zMSN*o96dfcJH~#_;f!C9Zz@w?b`oVtG8~}3Y91&Xeb*t!$R>Rl_})xdwjP4peoa@J z+7aeK2g~N<-9dkTUGc{(ab$G_vfoO7>%p98Xq5I5IIP!n(5t~yb9Oq^rJOX=8a8Q@ z9KPp^AG@Xiewqr{yP@aWpgYFaUVpvVHP@9tikO!GPWxD+B1^x`fwg{ z31!kkzY5uEDBetZs0YQX0;`)@>yyp-)zjIFxzU*U@6vC&&2+Qln8VpO@(%uyhsp2I zXOj%ejvP+e$=_D=Kn`wNaHH_$PCQ8-BCJE4zAUi%Nc!E=O<=Xo51RfC^G?J&{g>=f zVQfO*91Jl=VtJqs?T1CeC%;P=)4w#Ur5C6FHLyB_Kb_Sx{o=stM#?+;vcSA-+Slo2 zoMR1LywRX#vc|e$EAvKv`>Ot?L9XNQ8>1cXT<_hvWcKLp)c1w#JSOrQ4Qh=$a*9k^ zT>%eq@-4<)@<*9k*|YH@e--qPeE$)~dVK#5XZDK^jaP`jk3Ij&s{&*<(SJexVAgYY zr0IFmR7_f<+%=crho6lOX%C-oS)n5uZsz@EpRga3v#0z#+`t^3%Hu}fA-|KQM|OZG z*7;A8lD3zi+haWaq}8?`+7ILOG@DcpO|*C)Vg6lpLUqm$ z(;d5)ZA3jor^IRZokls$jgHx$g2QxcS~;k_;T{&J0|`y=b_~fryqX@x+-hg@ccww zQF6DLHv^d}e$K2`ANuy;Pu^TYJF0(k9HkUedKW&@ZQU_ZykGw%XT^*ZHHVP{-FXbLx`Jn8hFtQsZKJ>+8+1YZd zKC$BV`!!0x|4!{-7J6m}`58thsD3*5B_C~{RAsGaw_H2gd&Z6EH)}tk#{2`?3uf1o z+WjN6wXA*Edll)4A~%nIe90>h1X?84Zv%khM?A06pPTgT07w5Vn z!V18k<yIC`M98|Mxnqq~!rMe=7dK7In{$0~1r%ySuSZYh3W!!L1n`X#Qr zRXz4?3$@9%--+%gzsaXen@?fly2<8KKk^B?X4wVp`J{LDd0Cy!C*R)z`IKNQX`E5{ z#QKrH2guKumrq+&&RVZjkNtrCqPlCtuh*GR*b(S?-6OcMd4n6-?TP0wsmwLMW7l>b zg^g5$jntO9EtX>*l+0_d{h6#0Xq3$*TaU368+Nvt_yDvT&YFIRH1*?qr|}xbco?e) zSUcLx)0)^2da0Q(+x9b{Vx*V8lXT&3-<@Z( zdA5&h=PI5k`6j*P%eJe(yfvO1-MiM`sJ_b#K--bO`rTBFJ6yA%lfx^h*tE4?H`%N-Ky@AL1 z6^xb0F7Kv1@ahu!e!>LaDX;$Fwa2s_j4QZ0ziWHTSsdG=v=PdSC~a(rIF+Vibo&hE zhx~9AkI%F08nq40=!_8V)}upY^9|29)6QwlKo5pE`(JrCgFW$~kFxUBPLS`MuOoRT zpM8FbPkO7B`l34GpcioGwTW(8^&GeT3^xtFUx#$|PE6au!YA*g*W!%h)iB@wKJy*5 z#21BU^^yF1X>SXCzQ{+dQIlKZ+1=Kb#$HMIvFBh!cjwTLX+drS)H%x^^SRtNw;(@^ zd$?)DeKP67&ah=7xs>es@)^Hk_O`bfU%#F2%b5B#`=|p}A8Nc#wtfNmYsgErD{6-n zzS=Oi-nwg#lxKm2c(Zx`zQ$UR#rCw>v8{l{`L?K?Y243Qr`&yh{qVHcsFCszk2k=# zwZUy&QQsLm!OI(076xqJLYXK`$=eGqe_Nj89wE=pkEG?u{HOWE~a zSkl!&#ufFgYtbIW{TB9fTt_*P$F2UF!KljLtE-fooiB{jCaLd_jWIu-N4{{L&w6yG zc=`-opmCee@Hf$Wuk(ItyuBiN=kq|{c%uBn2j{KSSU&VU9Bk#)pV6<99wE&Cm0!D! z?2SFBZ2y`x^aa$&=h4tT&V6Zbavzc?FVUv&&fN`FA@CP_Ar#QcQ)^tHq6#R|h) zIB2j9I5(PkOy#@An;r7R>VBtV9*FSl@4w35654N#A8O4P9y-ZdcgZh$M|l!v&zIgS z-Q2e{TG{Db7K!$Vbfw&irt*;=>8g$}m?Ket{vfg$_sT(Wwz%K;v3%)#?zedN@5-4! zw$m3~r@T}7^&?HH2OVYO_8lKYcGlA_70Bi#zt5nquV=mwxq7GuJ*sitL_XT}1M+Lj z9@(0MAJrYzn{4)=6u93CYR^Oe9euwhwXgoOm%kIk#P9c~X{ZN;8M9Y=?b9??fZK2U zNq&`Xcbt^|tYoccha5|OKZ8%=SDRsZWBqFrZB)Wop)0R?pGm&S4(dyq#E+p33^VW0 zJe9F;_?^!A{@t-X)gK8^cUhP5_Z>X1Lw*~fH4l>?@&lXi9_*w>!Uzz*+O9D98zmnU zP8hi)>|Ivw-FS8>9>1>0=2PEC_Nljbr&B((mk0G#dR4r4Xhi5{^OoA?2b*iysP{umsa+NG%!EZ+RQljjmW6RB%?ue1M@m9 z^zF#AL&K~c3L~R8!SikKOI-RUc&^;sbSZl{=)1{{?1{TLE3MP|BHJf*$Gx)>^ohSe zM7(N`ns{sPSHe%*a`m-Rw`m`3jIkhWlw8eG+w@t!J9HuCK-`_rm}o^V{JkM|Zu<=T z9qL2HJNz~b^W}= zJCM|WP7ZwvooAQC_l&|%PTHikWb#8_$cv}n^1Ep>YJ-`Z3h*l(aNEaO`En!NhEG`B zH}8Wl#k-dBXg}YN^LIE0Y@TP=u&;0sV=kJ{^>wKAyG3<=0)9h1)qfhao%KRZ3OkhU zJ`3N`IJon} z@Og=nPK8wfuYTSs4>&(4SNWhkke`^Be&G?q)m>}8{rL2(IbfAXHgN$>JNV9D0KFaL z8&&K{zglN*HghKL9>zZs{b@ZB{{J8O`TAS+P5I=vS@A5>8qY$87%Oc-rw4ea`KZ;@ z=dGkwG}#R>Wyd(e3ATu>f8IOfCSkm?UC-Nc^6Qr$j@Q@elum!#tobPBuE{&`nMeL< zTvqegE$F9O_=yCK<)vf(#hoMc1M6xB8|)~hRby{TgXG_#ZHOX^>(E`Ig$WayZJ+f_ z{?Ly*a33J=HrFv8FZ~&XmqzJa@Cdm4@hz=mMK^?{d$@-?POkBoEj({Qwq2XB!;kd7 z-ud+|if)Z1*w>!@zeV5p6VIP`{rnr?Lu-hMx6{v0znf-P=PNJ8^Yz4YFdIeu`(wmv zXDH)|VQA~g7XsfBCSKjU<_WaSTGFJsM%lbso%FnCFW9{A;6wQ(Ube92;uBZib~m0M z$#<6J+Bj z6I7Ys_QIrmrH{FAYqyHub^i=(YbNh}9nqOb!p6O|U+CW{KeRSRa}(MFEB`kBloLiF zu6)xhuzVA9o)_Rw>nl#7%xlSm@Db6b0N=SYU#`18!gJQe>z6k&e(ulJYb}ey!uGUz z*GS%3TVL}OLps(fD9yef<+&kGN|ZO)NZ}m51;f09!clqY+>+WH(`5G|kw5LWrf>U? zR(|2u<{Nj;5zw=(hv|y*Nv6Y0W>;HJ-=Ax@sl4wY@qS&W?MR zd7fd|L##W@oxwej%UVh9;(06eZx8QVSR?I^?>3@e6h;^s2#@EkwV=4;`N0_Z9D=^k zk$#4(G0B-^PI-R=eStRG)~$W$3s8Ty=F)G&_A!+69i*|;wh>a%3?UN()|?6|_Cyu`OZ7pwMMYlOtV+B&)8{p78#{j(*{)0@)6a^tuE zjN?7n2#<}N!{E>J_D7b(>?!fXYScP8*$Vi3ojCZ;J$+5i(f+77e>!Vg34?x$xj$&< z@!YXUNA0TTM{=vN0Qm)O1EqZVsq>6LDo23aF*E?E^+PYsOp1P*EKe&y64Ns_5Dq2 zomcKU(?Dx3{dW#yqN)qG;YM+3j)A-VCCWVnY!&mM@=ACZ+m~{hC_eQJY?i!BMqC|( zO+wp*o}L#DnkKbvxUoOA8z7tgH7{CYuV>fh@XuZ_n>Qfy(gD8Q1w22Z%g)m~!nS_> zFnqVjhovhHx%b~o?iE+NJM1l=7vD|Lb8~=O>E;9;eD_`5X?=y-QMYY%$8-|wKVke| z{i$4Gpnc2}o4oy7x6rNawvK!axq41It#uvw+5X&%XY#H6nZCZp82o3odx^Twc)xae z!nRcXaE^1w<|I=w>cTs&Y;7)JAF`jfep&eC6@?d-`SrxF{;h4-64pe1Q$8owPsNE+ z*(|-=kGuNDvIkzFp7^>nv5lrrOB>a9yZU$8X^fR{zW^XOY$_ zY=|`WB~mx+m?X~y-gBJ(RAL_4x|SSj&sU9kHNszFnu#G3`D@DTX~&T!@(e$jb{(JW zEb2^?#_inpKC#`Q?NUF7^fPwMo>^Z%x%JzigVb;EC78Gwk>`U1( z4{;`lbc|@KhcVg-cf8r1SC*_SgeE?1p1^yIvCh1ntZ{=^->&T1AJIA^UoO;th-7Mg zvMq!5`YgL6;O*~@BfnXNW?L3^ah^?&vFGQ>PHsYtJ{z zRC9W`wK`R6N)q!<{n}>EpE!`0{@VlHyfF)pmwttTZ9Xr3GhCe}S@3Q7Z^{p!*MEtx z!~ggBQoE!4A)j*ne*ZV-gX7up39^%z5AEsg(vM9n11lH94@@ha)CXU&?LTttjUTD5 zMO|HN>Zt@CM6>k?ouT?PA(_473GJ?59$oKUb&lp2jVA|+7kj=b^F`{> zm#lqG(6x#B)YT(8FHrp_cYTWVsc-K{--pm|hO-2T-`bgBtIIOC_dth9U%0lut0UCk zh}wMc=J!dP`jhf2y>T!dU55>$JXk{;;p-&wL*Iwic=|-fDfq^_nXtywSN`~g{3u+V z-==p_be422eO>0hxYM3>b*piJo>y$>v9dg_@e4O;uOkF+s9 z*+*-j1`Xm{71$)&t)>2(+ zZn-r*cC+5!y2S8Z$Duae+GCtWn3Tst|7<$(R8wMhy%y}ZDaJU;^+|m#ZyUy#h5OL^ zU&1%vfm$8nnY`Ov!kBn}bk-l<+FyU$$iOiUYk}K(uy2tt?L2g8u%RW-Z6{jtm^+|c z+Bn1TCDPg!q;Gfx_sf%4|CHz0DM<~1=l+rJn4x_8D{1v}oa^)S?$m}P`WDgW;`!>2 zXG4okN^cmlY)}3=_K0fVKKo?m?3k9(5X4;zYdf-WYdEvExzseN57bhs^Vi&OTV|h{ zb4yOo(6*M|>Dx3H+fs^q>W#j;^d-6=>r>XE6NmJ%^k3I+js*yhHAwUAo_y|F*Vl5L z``zx6@3LN&H3DZPnK|JSJ2u^?xd<6VK2?_)?{C8fsPkPVU7Q7~~ zEjI1YhNq_WX}DPRW?6Io)?jS1xyJBqQ2KhxW6miRPs{k0Ti0^h(cdh5oizOZ{5|>V zw?#HA*;f)gXP=)&jlD~LR43Y&81*};Yi^xa-j=|#p2=^w?&D?`^eBC0{8?RK?+>U= z_jCnicq{exHtdXNlAB$*#izS``21{Fe59|1Fq*x0Erb!GO>d;{yB@xiHlOIQw;)v5`nE~h zJDW5r>@voPnyIf(Gp9DCb{9VH{OZJ|51v``Z`YVa*Y2&) z`fG6Y80JLpe7-zix-2mrLHZAYOxD}fm+ULx+iigfrl|m(J_Fu@*gydX2xZ8oo*amo z$!=e$!&bNbEc}NVt1{l&J+1$UV}FoQcI~b1hVUfYj+y?t^#e`MqKoF%HP@C15{}vk z{B2=;Zuhzkoo%O*-38m$bM8(t-^!cG7`E28t!GWQ+$-)7bM>XzAVJPgw&CevCZ-=hoqnC|xd665=e?i7Sm|QsD``8IGnO45Pd~PdzRp>+jeK93x=THC$G*ek zsein8yfq`5dm(~ZTBkDCmlu`R=ODZAb(1T$Mk$^aOlnK zM{7@W?dq=kqdFyy2j9N-?JU`zvO8NCuk_PE`Q)nJ2GVAaiUb3E$0fLYBje9?@G?XD zOE{D2iC}Q~Lpd7;V|(U`U&pgIrSE+EQvDp>W$Nq;ty^19n9fksI`IV4Bs_<3oDlOJ z)HB-VT_JC*UhmXT(mVDVrsswHw7KI!frYlOIWMpfSq<)+q%tEOcTC2cd(-p3By-~! zOL#N#Xg2A~CXWNBy7NJVbI@Pcd{m#rp{&&h>PLOh*e)L1+t|ij+&6!-B zPnm;j+(-Ns9BG=QzjmlyCf@A&(tXkkij%!#-q_6(+ukG0fF~m($$yQPy^F2d41B;C zkje^OGe0_kv+xP;a)Zr5m|K+=@(Vp{ZG7pYD2JJRK);T&Wt-a1ge3s|I?DxX3+)(@ zcNV;Np8J1CH%fPA4m3?$da%Bqu_B_h<3v%$gv5V1hq-&|lRHk`cRS-Rc8y75osGrWQx8Hma-p;1X;;hU3I(d%}pTdhF+kSZelAPmLvZB6e4troqzzyxD<_<-h zz`kXrYlqouIEKC}Oz1td~?n!rNg_5kjr!;hxGtI-ujwCM*`b(6z34G@S zJ({a?k==Gld$@w_Hh7HvOa$_8*%Uj@FU=1u8Ge}tdw&SO%y&=0;&HPm;>Xjrm4 z&YPDaoPVlJ^ZjS{93BUqDZgqDHsD^mLgykBU<-*~;`wj2(at!xP5#FGoLy*naAs%N zdz^lz;?;boLGRNBG&%5p(0bhWGMg9SplLq?C1Ufn!(ZauOc3|vpPTRGW!9JOIA2z# zY0bJgd0SSf+rF%E;MmVuCuCp`xN8{``EUL<{9)6`zOeJ6--gG8amKDZGGJhBk8i`P zY*6yf{@Ufu(|OXtQOa04FGv}exc2TA&WyG8FP>OO;*PayJ&EY+E=}IHZbr9#>#&j6 z&B$n7ccSt{^Pkk41byf3-F4@B6~5xi^6V11*V+_YmdIH)eXI!Mlb#%Bc9d<$8K=rS z>67-ol#CxrKAhR1rZ{%5!jQl4DcBZ-6~>lMOrP3+>XmKpcCb#C@7#52@9N-hi`U+1 zuejXLFVf?AEue`>&b72Ft#!_O88A%gQ3ZhC(UHWOeZ(DZJb{+h4bR*-&~8` zNFM)NI@YXx-^PD0Yb}R8Uz-04mO8Eeh5(brnTr~GS#SEWm7(%MbCP6m45)2w`@Z=e~tv!1kJ1GR4bxH#XY zkF~#v=^M@dLGdJc5kD_rC(3=8`fGJ2@};qw#P~Jtk@!CBbm;lc>to#WUK8Cm;?|D6 zB!nM@rn$~UcQI&cLlS9mXlh>)X(pR}8C_@&v_EvA#i3~ou#?}M=zKQbb)toE_fi*H z7@GE-CHifI_EZ;I4BF#eXby2Sb)kizJ=ld7hW7I=v?w%p46vgt8lm0U#a#?qV;5Q+ zn!e?o$g>$1oquB&nggw&3oQfbbfGmuyQB*(22EpWiM+(2)j-qQ zH0hQIIw6TMmu%UdlcIcsfjj*g&PLfpe`hc4!a`RMW^OOn+WbGZO$Z?mC0)W*SYb~O zOHcWBr|;#Wo7W`_1MO6gmThRy(5veI?!p%Dlm^@=+z@UaX5WO~ zwY#5fnzTPc@1wlenib(;@OwPB1zy;y4|>(X#UK94uadWjd-ssak8@Jt^bcT3)%?oe67v96r{j`_!&RaXS zd4?Ch_VdZ#v%Gr%+)r+re&o z(`b*PF=vx>s?zZ|>)O@6MR3=(jeD0i?j72=_i5v9r)^46`^MPIOzs z_!D0(*PXa61W)qo{0rlvd~0LC@SeXPyq-BSt#>ab43%+GY}ls6ye&lr417qAjp;wf zmt&1-XTjTC>=EU;#**&9zx`@6TCholxfWr19&*^6SutoJCkio~sUA zi+l3)Th;ULyKZs*zyC|?5@q);&&<y10PKfcTJv@XxnyF7=wJQs9% zp3&u*@%2veAupYti@H3Y(d9XS>*P*8+y~JUf5F6u1iY)eq=a{gXjgX!i&O5qx=Rcr z5&yRC68(G|{IH{D&NLbMC)i&v4t?p?Eu*Ykw{E7-;oas=A%tK`Fgb|S^vXO`0?zQJ z`CD6BpuZW&{cmlDalOW3_i}yCVxsT%Fe)Fv$IZKK@3y6K?FIBSci88D+IRoox(p5u z+q-w~xV?w>$TenH#;)Z4-FMmFN4q|_&=i}u0!vr|7cw8VogUc0_1wUOwh!CJnrF$; z58Hlf-2aEr{uJ0jp}!p1)0X&u)jqGnq!M)z*GWLN8E&8XOm3jcJQaAW?X9+_0@s>`wVB)Ja@tnzH>PB-1gCg)F1&Q9*pH+x9G{^`Yo2KNk|e0WwycFw@UvGW%!T-0U) zZL^cx=A1kCwA1rKZ4;L!w@o>N0%&U+G&tOrYy%UX+(y8Cvvcf=wzj^9XC-In3>wVi z;aP1WCuh2zz$_)Rt*89?j~$rYC3pBd7MQ(sn677W}P^2(7^F$ z92P1LpI)BU{hX8M^+?YBt4nlspIkDX~{!UkA*nGq^3?kegbP3r{HWwnp9_2B_m4c1^sbOc zb$R3wqU|;@b;jbEy)(uhKYQBI;|@J_Qr{_=gOBYuq5seULr6@r8Hyeoj!spArkR;$ zHhN{gDPnA3(O44KF^FdqiI*ia?c@5VnqmP8($NT8^UAw-fcrz57**5W)AM-MDybVLQ<~(dw(fbG(j-6TN-9ew{B_Z zgS5`S9>H{5u*k$PYQPv|eWJNLcw6u!$hQY8%x%Gpq+E2|t>%VcL+~1&GLoJ!)#m!( zcSzF_Sdz8Wno;Ilv&LLxGLlMgflMVuxknK;xz9+tkpeYL8jcxgnIX&<>e)FIo5zGkm3ls;!fiODvC*~i4&jSxr zI-dnT4g3b)w;^Y4?25~ey8~k-3%!D7bZ$Jb7wa_;EaJH}P!ZUKcGWcM(tsxY?+rW= zcq=eFa9`kTlg67Kfk(^}f!=81C1~z1Y)XC-$Vhqw&HPm05&Zo;@Ic_rz!!n*0!srW zfk>bT-)9F74gA4`19$SCv<4myykMR)KMUMx9-|kNk<>FdAea*z6U+}51Wyev44xaT z2v!F#30@hzKKP^HwqUNgFZgKiso-ydzYD$-{9EwT;AcUTluWv40RD&mNoXG*x}mcU z1!Nz;2#B7_UoG+B_MP-+3X)Q0^+6f$vU!*OKlbm1e(FU#+6!6yE|Qgn<{1X)-U}_I z|5@IBFf^CKt^l+rM>_)!TJ%4-4mV4{9JJ_A{^R*C0bKX2zC*)K;+?P)pdW9sx!jL8 zC-a|Vv1z=UX668o((0cLR?fS=XzsJ24YL2U?X{e~>G$}Hw$@$;QHzUB*sIG+?fqe% zAHr&o1}gR59A-lNOYx!s#W zbC1P;`#1eJnE$E&RoHM> znY(~HfCtc0KjeQOPY>{XwP`euaMk}dd;KBHNFJd-DN2*QUX3kyySWy66TW`@KlZ^=RD^Y|(SgIndNHANW^Wec)fM_kn-4=Lh~{W)b5Fv4p=`-~<0p z(kdVLe~NbdRC5y7nYIOgn)b8n|JT@A1!(V%|1-4o2mYVMVt5wo>{+z^QCvsb=jYI? z2mYVOrWj)SbL|H_kAcJ{V>{yEHtj{T30s?vNnb6+9N2ZBe4>4DYK)nq({@E8ix)& z#@;XBK8F7UEDymVH1aI2x|ec|pu2BGCzk+|z5kT<`_y)93O2j@{y4C&{~vE%O(*0K zPyqY{IGgr>{R`A1+59Kb(>s|qh^{<(f3c;U1ilDY@^>n(&IMb9mM`bJ6x}`zS7(}) zj3-@Zs&K3STy%A{sUbbm<4boU~3 zq4L%L&!$CK=RK`Mo4NFL(AInSH|hHI=z953+OHu_<&T^4due@?dwO#vpp+-3?>3La zJK(-};Sz#XTFi-EyJ(9qridXnQv`->mmtH`56xgzhfjewF1z z{69>1k67Ltp}Fb%5pJf@;w+*~xfM*M?51xKqYZarrThnXpEEafU4P+mlZgKU>ev6} z@_u5y6V9d)Ya`alt>y=`HOjF?SUNMX_z(6UOKrb?;v|0iIF z9r$;xoUT_{9hZ}ng|sz=v{?uHFT%c1EFJ$Mte^w`%Z&bOfaRuw>v{J1d~>k>3bTSx z7jm5jtYC~t*VX)IqK|dgKi^*K7&G$!`R&++R&QUJP*xo!c#^dXWgB+>Z^Rxv@c$ii zu>Td-_l-HJRPk(7_)S|S(;~-PB2R+ znx&J>(qqlie6w^iuf_qqKZa&+MWm+2%vEN%%+{`5RX)}%SUCUW(@rvV1XW{Bt*EJW zs+KzT$%(93QFTFinNwTklvGvKI;@m zH5FxvDX1uen~J3s<<(K2WX>EC#S=i30QyQssO3H2IB&B4n6RBKl)2GZ` zRbK5TTvuo0h?4(wDAyP3JHymYy9!Lst&WnKspw2Decd3EZ^3l@$wOMLVk6RE=)=_rj*rXy;O zj#RIlG+_+#a`e)gu`6n7jVY``L`x9C(yEm!Bb8+}PNcegN(Z^Dd`$%mbfGS-SaARi zkwnfVxLq8#%X=3(WnOun=h&g@I3&L)Qe6pkic_<;vNlrhlvh_*RXa!eRi_3)TQ*9` z4U?e>&MPX{L{?Okk-gRB=T$3@Nze+sevLZ`{XyK7tblGxR@9Z7^Qz0sD^2OzNTsPJ z<5xz`BO@cGq`K;YO0%Nkyk)gx&Z~~BHD!_Ni+=4!J@DH5Ss*s`qA0of$J`S^0`p-0EsziaTzZMs-Y;w1B5v zobvirP&ZK$-Y=wRaS?1Rn%0DtaVmYMaoG1G;+7DWEu*t zR%KaPRlO2^$}87YR996>HJX!7TjZQHf6)xbS%dOGs2mbGe(bnOmsXb^V^&oo!u1Bl zyR_oGVhN*ZvAo<_Ax&CdMh2m_saVuwCA++AtaBpLr0iQ-QH>^3?HuhaESQ0pn%Wv?RhN21 z?yf|$+xjrpDX3gam8~Y|6%@ije7V)kAqu6Yvu6@O?kdN|mM%TdEG>`J)>W4mSC-cn z*OW)9OP3XkT)VV(l{xRiiYha61ysvwO{s);P}Dp+auWLlNtXulp&n~rux-PV=0#>$Q@{o>M9b%tVe zA6Bi1)KYg>wx_OiS)`)Uic58I$*PiedS&g3vg-CH-v;Bn{1;np6AfQB_Mk;t!eWXiH0~YM0So*u>)vrAW9oOeL*?3f{wb44DySwD{JgPx5#i zQ3gkjLEu-S3zya_6O31v#aHJFjRP7qO3#bQlrBTtRMKl!>u${uWT&pOwoKY{Sw$Jm z8QI`;v@`#An;&JrnjbEH$wX8M)gsyot}?Po`NqO<6-wPoXV%I{X^pE<+&cI*I;BGs zbW#^?Br|KPSB$BbPB`(HalWNA+OgzPOCGQG4#UsmlnBvYP^9jVAH=X!#**}z1gOlZ zE=P4#>YX&1Rj?}9vDP*cX#{!_cifP*8ab@NAS^}SEvqP9=2>^>#r9f{X7mIU;VMV% zv>SBgo>wIE^aZ1xaQR5R6fZXN_5v>{SEFsMC<#C8hf7BuBw=IyWO<385m-}ERp(lA z|7%Q8V`{Jg%M)|J%a#i&R;+Na2g~crOY3Md9IUUZl_-Q#Y6blP+ORSj9c6fNCqas*NUX+B{YH=ZxUR|SjQ(8#Ho>z@)fSy(F4sKH69}v^HEK(Z8}Gkje#xq;s~44 zeo-K!okfdIK0(-NRnGECs=IB`Jci2XTUevS8RK*$p{AD9sC{68wPm9| zjMQ&UIZD?ncf_trmcOqboRYewO9^#EjbpM2p`9*62WwH?9Z86%+vVNsqj@4N8K%#J z098k1Wtm7VVz!D3U4?3)A#wYm9ZZ$0M*H=_OAU@>oV&U;toEI`mReV)4y85!Xk3M_EMFySVP4zO_Z(_}YWfe4X4bo?O-iUEERk=nl&4Ti;b+BY{ z!KsU8ouh`jqtia$Rl?TNia35|jis9*d+yWFcH-` z+Yg)KjHn@N-K;KKi$GVDdRZ>XSi82ovPPDvD`prH*fB?r@;$VNyowe^a$&3e7;;+X zHtNN?HwUFUbm-gy?Hro_bTDR$F@6z#kDCrY4(gVMvhrVXxwf{vh9FBK^!sh4axte_;zN0afT<>mCk&ph+gxdmr1 zF?=@D5@(-nmQn{CgtnJ7+S6IH0BhrrUP`c&qed+2h_+NYJ_q09$}5&|c!*Z-AG9-vXQ&X>R1* zO~4O;?*n6?ZROqVz>k4DfRmv8nfqUWzXE%K$DwV<{XM|Fz)yi4(C*^?6JQ%~HxPsN zWA1kX{*V1)=7)swBcKtu4fsB^t8ud_!QD(~zvJCoz#o9$1KXf|7dO`d4Zvn#EVMg$ zcNcIs@Dt$I(Biyn2K?Xi&{}xjlR!HG+8=TMCm;^I4a|bp%)JG82iODL3+)yBy$ZYz zyawD4?Pl(`06zq71x|$aF7Dq0J^;HQcWS{NF@q zP5Ao{;FrK-KpwRFx&H!q5O@HX0PP{}4+D<^j{wI)+rj-w;A!9~;Bsge68=TNCBVhN z70@o_z7ALqM1fprH{kw8;QPQ$z$j?fbKe5^zje?`a9;|P17*NCX!qderwQ(kg7yQR zZw77!ZUNRpTZ;ShfC^w4a3QpFxfcTwAPhVU?G5g40{(9&v_J9ub^`6k(9Y+1Ij|B~ z0nCQ>A#wdD;Qxk0`wq{SC(urT_II8?20jHo0d_QBe!Rj{~N8AbbgMjt9ao z0xt1D_&va59tfY(+nA{y2tN)u-UH#caG3h79tfZ8;Ku{uvw#yl5Pk`8sRzQZ1-|Qn z@U$F0nhA6VdICLwLjckAPBgs}P4D^vpON6>xF7F<@R`6%z3$WJ%;eQ1_^g#GWz~4O({xR?c z@CmRV_!RgO_#F5O_y_P$KyKt#Zsb;O%|1TYx63W1L3a%uX!N+P2hJP2!9Ls zy$8aj@U6h@9tgh+_=yL??*_JcAbdM;j|akk3f${~@Sg+sc_91&;6V?BKL-5L z1L3~{9``^v-)S;24}|Xkp7cQYQ^3<62>&(kj0eJB0)FFx@ZSP2dmwxlu-gOSuK=%l zApAApbq|EAO;X#mhr8OOcetyKQk$f<>95>B^g#GOfPEeaZw0>aKzNWXJxLx2PX@Yq zAiO)!!vo`uO|9b>+(2V(~h_JG>cESp)Ila*_M8e#m^h z*C4A5Lx!V6?OS$0;J%4)g7hEc>z6i!H|*Pg*lSZTgqtUWvGvJjx4pg_bin@>bT%X# zlhimPuc%*=6Pj7nKPej8vv{z@hj4vr@zA73DA`G&(1OLo?AwtRAC(j?*t2*7Z_^7W zTl(=yrXa6q7QQwXoyaw>=%l1Deox_=S2P!2dlsJ#eScA*r7W<%imb~sxXvs()4Ew= zDQ9z?Q*@5vDk`>k*y1G?FHLGBuG%D}>(V4rZ7#F;6&C;8UNMgSjTn z8)DrYo)RJiNBFpGOUX%znt*eJrHs_oInv^zES{Uvh|8m`o4l09Kww-*^iV-c9QthQ z=46FAZVs{hzVKA8dkW`r-CKAX*G~#h=eobJkZaPsd0f-y&F9*8-hvdB$Re(L78j*7 zf}ah2&*F0|z1ZU6l+cc#IX}fAy~|TVw)i+g%PTsD>z>8qQ$yerEI!fV$EJp%PqA*MLfNzUc#BW7Zl?1#uV{wF zXIlCR7N2GD6D@v{#b;amWE2SI$bCmetlxwXWl8EWOUA zx8C9xrpBO0Qz=JtS8BLmf6@JNX=16N9rRaIou|#Xy{U0{``E_&iRIzb)KH9GSZb7# z`hxs4A(Iw+lD0K1ip#-iP|T1thnh0n;v>?Ox+5(Hm7i)?5m;QNc#L*HMt!Mgk<_|(G3ZD>2XN1;D!={xQ9CEm^{e8a}uV*P&HT{>ZZ zch!_Y4~3T0LmVF0!*bQba%Jf!+3Os8oy*%fh4Z-XFIr_ObrxT5@r^cwU)m5Jw;}B4 z(I`&r+vn}uojsJUmuxsKmj3r1mWOnO8BEvPsSGr!X*O#o{H>|Hu(qoi+Yq~i3f@`t~^pwkK zJ;Q`n&@&2tqQy_K*SS3v?;@^u%wNp)_w!47D&G2@ar|!R8N&T9p;IC~9q=c(?kkG* zYy^MC;?G()FWBpgxZGd#64yC}uUSfq4J~+x!kKnRm}|iyQQVwp@l)(|?jdqtbcn() zIYhB5^xs-HyAKINe;vyHqBjpQfsax@wr-B;CCY?eQLg#DLZPV(ra+lfIK7v0 z;3WHYHgA_KIJK8TxZhG9wC*3p{R0ag@8V6)?O!beQNQP z-qfkIX}y(})lkwGR&(vUu!igYqFS!Q7S{D{1i!F%6#Au>zOJ{@6}2wcTgnE)&nvnN zd`{uz7QeQ)YWEL%M^D&a^bGVpi;v1sEMqdFgmz4ZLKvT+kdMt!S>#*FR;4Adbt& z2dM0x!40)(KnVKt102%6+xmLN(tkfdW%oz$yrPc=*mwtqi9LN_7+<{xD)wz$hZQ-4 zL>V?HN;o42g}_G*3WJXuq>#rCQY?9cV&Id(KUqA5YhKY*u6q`T1}QBk3?dcgzClj# zqtxfDuRje6U6yPH43^7*T=y*I7#c!YG}z&~7|Q;lB8#6fI1V0$GN&+NDJAx8>0rfP zK3JM?$a7)WKKF&^T^BgtBMx$CeU1OfH{1Oq8b%i-W&l@tv0b68M6m z-*A0k@oz2tWlu7SeQPy&TxaBm97at#z7 z?Z_4#=QJX>6P+mGPj-kUZK|Ul$_e)EEK51bhFokz4&y$l@O&r6bro)crk2Qq=0YbF z3Yv=vIhh#`g%c&(pt;)8ud(!NExi$wJZN5qo>#P+YkJ|UP89r2Ck+03OaCL3y+zH$ zm{-&SeQ(jb*5&(_@&VU9i~nr#zgYTTaldErN0wr;iQOb+N4ch1JTqHd_0JA-Jv=+c zb%dpiwAWGgI+iPaPwPG;N?L&p;S_r<%r+a6%~jUr=4|J?v`{%w+;q!Pm}xnLnbynV zhvvv{pB(jS`+^5erYMZ$g`f|xZU$Swhg*7<#fMpZxUOj88 zKH1WzST|F1LK~9P=34rE8_r_6G{rWqh;>t9-IrRtEXNSe3cY0(C%!Q`?OKapXZ>Dp z!}-2-{{tJ&?KYerTQ_&*go&%kx_R8vW7f^{qNM%Cx_Q~scUzuc&p{f~{?pPw$|3hS z0EyTcT_;6pJ%>ffPiGjMq~#2YN0C^IA2lqDn~R2pHYS@Jhs8D_^TXq4w$O0V!^6#Y zlFgOFqrss0%ka>n$>!c8!dFukBjeX3GY%gkb!$e3u1Yqskq#m67)khymq)>$6@{^J zC}}TP{6*{LHzOONzhd3IVevnV4CCf)T^So!m>-RVe+HGpo0A#6j**rhj3OL!%cw^1 z`$idfdvuh8uclFP=&?~T@MlJa!JoJE7e+18P`YWO^8XN+D-G=j~rT@{oX}0cL zteZUwA?-cu<^${gFQY=l^{Ea23rqjf(*HRs3cW{e7=8}RRrrVJM)8}IODtx%C}|@s z{RoSXwDeIH&$aZUtox&L8*x8DZ__5_#=wuu4TDe5m2N4r^fPnA#8qbTrSfIYw|*;a zI8~OuT31tJ@jB~fZEpBG$!R~fbVgyx)t_1SjHp6?!^Zmu%g-Mz{ZAHuTd|v#|3lXq zz(;n~_5N-*l-V}a0fGz~V5+M|2xVxaFY1C*y66ZC46xFO3z_XE?JUjij@fLRLf}m` zV!*9LT;B_@%AgSfL|)K5iMsfv5ODBCOktG>3kC=pAB#jAO0?VdopbN+m-)@VbM9w% z?%ePE=6C+*fByH}J9j2C{?XLG#G1=-Jl}~uAB;8IvF3ZR=1TnBM`HfybJpMPzRT>n zc^B(7f0-gIkpL*9|AZm7CeuD+q4>(TUv=C~uX$IW zIp-Xua$f(GoOAZq5Z(RmT+Q8oj?;YMwa<08KYVMiyS?Ss=egSlUwcC?pWl0HF6U$T z$@p0>%$3~zcRPMd@x3vr#g9H5-_~DHb$iM)Z}k4f8-2x2-01ZmyK&t`vT|e9-M{-L zfBV&&yymf+vhKG2@@T3fnx0*Mo4cm>fs!MqKHz8F$_MXiL+c{d-9#rLbR zW^pcZ=P=9hbJyqc?)dnlAF8=$J@-Q;H=qCfhx{4lt3Tu&UgK^HQPpvG9vG89{g6M7 zFMi0k^=BWdI{p{&{R@sieCsDZRC2r?KkLUoTzBO8ANI-@d^qp!-xS~fiTM64ANKk~ z@$Jh$?Av;Ce7oStO}GAu)7*6HqPzc%*M2;{|5x$t{qdvU_=xxX=1084Kgahs|YE2xVwK<^qhxl?tcHnHTUR?VomRgZejtK<8J^UvM;SjlOwj_;@Z zZR(oGvW{Q-nD2GxWASyc`2KZ|zH_ou6MWBr0)H+`$SRdW_SNUOnx@? zHFxde*3|WGWTSoex+@aT-sgR?`)+pke|DdLR&dCm3i_DSyc z8?U`CzW;sh_NH6Y*WBxrPrK%*yM4wrN1a1|%zw(=UR(TWcl(Xk{;a!wSuuaj9@k>= z8sBc|nmx;|m22<+jQfPjwO{kSxb}V*)xfphVfy1Sos(3_GgO2 z58UiNLwCmmM_q&`9=JYAKk?)P?|0<%1AFe>TYU2ad)yW5Uw`2Ju6OTv;AR*9hab4t z-H$$hvzv=v{Q0Bq{!2dZGZuZ}YwzfYzWfjFe(Gso^tZ2!Z{Hr@ek#8G+!t@Y+Qq+h zuRAx$ZQXmeBdTqE&5hK@w(fVg_s91ih;O5RKk8~MdhmL;N8}#7-}(Qy2k&<+Rv+}e zt3CLB$3OGn`yF3@@Opp5-~QTfxV&w-|TAOzU`y;xeD37*T?qVd!7DAzI(l! z`HSD(<8B}Ru79rk8ZYO(%Z=zI76mh+5maC=rZy6)`WVzlSFH#~f+8$Gui`h{Cw@`~5I-OMw%iib-?XXi{9pI?|-}gA4MDqA}T+ZCGC(4_U$O{2p4Df*f zF9mqnaK>3NobkAK09qFEOn_$tyeGhO0q#F@rwiN78_xI(hEu;5;Ohat8Q@z1z7ycl z6E%AGzH_{}CrSl)+Hm$OV>o>ZZMhjl*Jh#**JsiAu-%*~b+%g%@P^^^Sv8zK<0d5X zs^RpRGMxI{g!|AgW;B~|4(=ePqg=^c#Q#71D1BTNl|D^Ulg#a%G_&|V{ z0=yjHl>i?P@T%eL@08)}@0{V(FBne!PJp{N{aY65Qvse1@QmS%Cu=zUb8Weq99(3H zexLiEuhxbB{cU~Rd31p91i1Suv6hAYDZ?3m$pl1R4)98Vj|X_QE%%?_Xk8fRR9pR? zXyD1Mdd67_@Ur3buNY3B>{HtN_ZUu}oZgcz{<8r~j1U^shdnz5i5zPa97E z8N=zH>2CF>K5KZQ-y3DzsjDvZDdN13kC6`qcqzckhBKau;q=cxk1Zx^>I;TbUk~tx z;q+NGJn`8V^|UsNGtxcL(haQ-u-*I%+Vg_pY`185;NxClNJ%+R0oZ-|L4X1v< zaO$@Vr+&w9>U;b*{>wsr&T#4rFA}l4OI{4{fdDTBcsal;0X`n!)c~Jr%lAe3)^6>N z?{r)Jo@mE#=FR=Nd&|Q76bz@nXgKvb6A*cSfaeXTf5CA2OdC%9jN#O;x8-}I88bi7 zXS1z-PgH$z)Ur@NWjNc--_j!At+zse7Xy4Cz)OZRp0eSLXEVUJ45!bI;q+NQ*uGzz z0lpRBI|1hi%8h_~G9!~jnRc*bz{D{DCY>o09-c6YoQ0lpgGYXQC<;F|%y72rDo z9=)u6{HXv>2Y4pHvjN@{;JEZrPlCw=dpZS{hp|1ILCL+@ZIq>ocf00)UO*( z{ifm6yDy+^S*TALPW@VduLt<1;q>1!oc^mXZwY6&p4S3=-EjJE8czSzD_Z@jPa97C zLVzzBPM^Br^hv$4wO#UbfM)_c8{j>LGoGB`Y&YMQU+sRIpmj;~``mXbw=VQAwDs8& z^%Pt6Y&T~(efkZje(P23eRcxeeT{6(LZ4KCr`z&=7si=stKSo)U(>2*yBWjj zlQo?Bn&H&X8BTru7u&bn2=LVaUkmW{0N*s6ac&vD8|Q0V0=#ZG`_(X<{Tg^f`+k)Iyd2<_03Q$VYJg7#__X2d*Noxp*N)-TyD!6U zS*ULW_-cT!1^Bw*YEAHCBM!rzM-8{eL!Jrntl{+UF`Pc*hErcP zocf&rk8W=tXUcH;rwyn7K!BG5ylgoAD~8iQeMkE^GltVAYdC$%0bVhjKI4YdXWDS; zXADpDdn5Zf2l}j^Y~QcV0N)Dmod9<~N7J&f-Bf_513VMp*#Pee@LYiR2Y5cf3jtmX z@Bza)UM0hspQ+K7wC$cJOb7T(fY$l0W%O95UF@P^^s?^X?`|J1wNw>ur+GXY);@HxX7&w}BMCw0Dk zJm~<>1bEhPw%cPk+g+-)Z?_)cjR0Q_@HN93&${7^XY2h@%R;^r;L#tn$ajxNsQ^#6 zyY&yWkEapfs{y_i;Ohat z8Q@z1z7ycl2iwP=3h;D*X97GM;5`AJ3-JB`&j)xRz>5Js5a6W%F9&#~E#J!z80;oH zsh@q(Qv&rr-qvSNv}!o(bIowpXK8M?$@mU=*>L()3{UiXBlpL1Eem~$f82t1=Yavk z>0dIOJ~M_>Uo)KgCBvz&8%}-xqpfjLUof2dX~U_XF??6Q(CSY<6X3M~pEI28E*MV# z&A)7Im-;Qksn2|(y*_I=^_BZu_2lCLUJdZ60G~E|Pu$#|XvT21oBL$@c>3G&eNiqk z+2KZWluNzMPO~dJ5{7h@R)DIX=egEII>dEtl)2CoK^@Wx8 zK1IXnGhle)vo|`Q+}SmwfBNDsitmtT0z4bwJprBz@csbL2Y4aCivd0m;H3aB2YAJB z=6T$3=B;5k^{a+cU;F#^`JW5$g#cd)@OpqZ4Bw5@aK>5wht@c$uNY2!_KU50@*czK zlQW$9l(_&wo(}L#fM)}|C%|)tGtPd)8PBxg)Xx}Befl5U=O+{3*#Pe`obBceXSQ@b?zVhYPcBvmXocfusw(7}i0X`St3jw|q;Pn7+1o&!zuLbye zfNvVk{A?M{JXHUweSW44r_Z$E^jU4o-H%))Puy*UK5K3Dd!okrt~9>0>u>n3kKxo0 ze67`o`jX+)kAJ;YPhM@y_eIsjaQ8{{pK7b$6Rm%9R~p};|7L)18BYHl!|9X%=T?8} z3x-o4ZMN#kQvse1@QmSXH)}ZkHw~wL%W&%R|JoWS^##Ls_5ZI`PhJS{Vt@|>cqzck z0bU94@c^#|_*8&T2lz~Y*8+Slz!w61DZuN7bG#acb6onr)o#4@MN`QWcORrrzOCN9 ze;wcp0ls87{p*G^&YsJ=p~QE{a{=COIQ{d6(_ZUw7Qh?V3 zyb<85hO^x@!;^Nejz(I;+KJ4d~#>^LB`+M*+uak^3?!e3-I*--!z>4 z+A^H&PXA|XJk-w^zN`PQRz3MlfY%JC|D56UsXW}?-~UdNF4X(qF4Bem{L# zQeJ-C6V(H}VL1I)4X1zqBdz|_=MAUc|IU>z)cfD7(uMlz$CNhXp9%1q;q;#~oc``x zi(48XPX&0|aQbHqr%$vu;2+@W0M8iCcC&`F-C{@kb_W8yWH|lHhSNX&gjRp*Glo;Y zWH|M8!xR19XeaPJ2IEg|ZI}Ml0G|r*=>VSz@LGV+1^7aMF9mo#z#9R+8sKXIzHayr zyQ)vORDQDYe7Aavo9$Pz{|-Qt2j_L~Z486|*u2;Cb;m*I?*QlftsCHP2Iu=|Rri*N zU*7HhdmsLtCcn?)!{GdT&tA0qmySQ#eg6#mkaydD`8)UD`|!H=e(3$nE^57w%eUPf zuOCO9pTh(G@ZmXMukpF=E4*UP`D7CMjN{Y*#j76ZYrmGzo+l;i0|EW3q0eU$UEXc` zaIOg<5EygmcY*WFw2`Fg$mgh*OvqO zZ$W?2wEGbF%-0hG=H$ALq+k4dy)O7?!8h=J)>-#_zx0Bq|2^48Swt^#oSt|jUcDT8 zzHTbv!E!)<2711Zqo?~qvpE0!KAb9i{wUz%&!_!(aXEna{~G-Ka`MOp2)+q^;8O{x zgFgbkfPC%;zvc<;`}=gqeY^bJ1yB2)3(nUm)9`t5z~?p4ui`rY4D`d`{63#eba4`# z-?QPq)GuD%4?ew-Z0D0zv;@xgd8^?6m;4n8I0*g~a9-c@^R-{T2hQ`BT1?%1c6Iyw zbOpHoJz$PCEaKGx$9+Eey-Za+SPJOxcHAG=`1)(n#pjoI!H3tqS?3>!_4{@&*Kt3+ z06kx?ExK*Lth#l(IKNIk@>Yx0?sxVa_wi)$e2&{wpP2NEpKJ26dJL>weP<{-w(aD1mA0^eeoxCd;(P@-zwc!Rb^b>1`u8Wo zA?V)%&hPs=4}Q+^c5%Kh!2dkJKONu?IPT-;-%YcgzX8tov)Hdk!1=lN4B~vcd!5iO z&gVJq{jXp?tU-TEKtBk5uX~PPYVoc6{lg6+D*CFbG}aaxvV_+(;RP? zx93A&`r|~%{QO)%e=GC{Z%FiG@c$KXzOS9d`Qd#5pWlOi=#z;7$LmkQ`F(wi`Lh9^ zHRyTWxF7L+E1>t^Kj`zr>th?xUwvKs{P^z^^m<+wnuh*I1Ns{S{N@0EO@Nmi_wDlU zys~I_415_0^RMUq@{WMd4D{u1CpsQa7r^V66F!%$qGfPi@8|{p0yw{yriSytZ#&L0 zf;3+J7xcVd$NB0B-Xzue=O^&=D5E;GYwq_*8kga z_wRAba)AG%k9{J4AjjZ`Sd4@%$9@LwNn3N4vie z(9b<3X}5y&d4C?@7ymt&KAyGpcr#*sjydl0Gl2b>`^$ac^+b-(F-OrXeE9j6JRbif z_$70`_qUF>>(`e8d@H~ofqwxFvi_gp`rFRG7kSu^>umFg^F`pZCZC7FFW|UPh5n5J z|GNWx59g?p9q{`72^axC559^60?!*iO8vJIVHx^Qf%9`C4e&34FXDWX^H{Wh zJlgNRBz^SEZG$IH4GkVYd3>z7b-UY4woZ?YjtrlS?zv}hXn16JY0_hED`SKA+%r5ndTKQBaNor4Y>uD2N*y-Jv1i9!(hR3Cg2-3mf(ZSK-+nr!^`1IJ(QKsPdi0jeF@Ngw@IWc_l%;50J zvC(@5N6(y0*zKnWkDeJjHFC=3p?vDpoe4d2^7ye+Lvi4P$4;HNZE)=I+9&@=KJXLXHA3x*F*zNs;W2Xks96Q}N zcywgs)Uky75^$MysozaU`zDULC*hH?(PNc+5r$&bdE2mDok*J)ipvc?^?{>uJ z*$vk5(}VFnRIW?Y=&^EZr;_dux^&;}3eXkq>0?JvvTWTZ9@jc{W7(R4W2a8~-wqf& zHgf7TGFfreaXikiE6TX)k{-BG9QPG@Vd#Q7Q;i33HC-7`+O9wu8!iYupFwcT@% zH;eO~Ja+fd<1Uf6yQ~hnvOazE4m`)n;($hn;~lT!iuU+8%#-0iJv`<@JbD8ALcIG8 zR?eI*qY<|k-_i1fr21V_UH!))`?!2KzShrVGctbkbiDiG=@2y2@Au%u z$Hm;N80bz6pK!b0u~VZMO1CqW-9+G5ZcjUY%#Xa&>{bdUjH5$Cae&G0*wUe+qj%gj zc)Bv;Ggxtj%Nj}Y6F&-`q}}aIXHKBJT}vlU-PKI?VEN1mS2d@PzX=RcCkDQ_lBO|p zHTM(fPM440j@>ivnA`P^-RbubcFygni34YWqj!v)Ix#%zx*bm(Ej>$ysyQjgJKgD{ zcO|<_Jb0|w<~)u_cK31j^dVOZoC&q++-@{<29wRu@w?pd;3xZ%0B%3-rdVGT=%}A< zlSkriojEx=e21HsUAeaFTXUarlb7$B>GJ5&+m0U_JY7Co;S`Rb-NbzC4CQy+J|0iN ze)?_Bv{1VFqFIq{#&x4MWOnA}bSAqq(|+6Wq2q&4=5b3ti4Jn-i_`6nIKfG- zS@U<7UBWPSC$V?jbC26M2S@I5zdM5cEIvfUGwQLj+h>ot29k&3mUgH2WV&b0b~Ekz zvs-^0o6y_M>Br%uQOmvw>;OAE|M0X6oW`b1@E~={S`m zr|xk3?ub8B+;#l)89KXp+x7qW(4g;i(xbS5q-x;eCa#lcC?45Szq7?B19CKQyQ|A- z;^V6G&S4x*{1oPs-Ryd6L3yv^n6KiI-tFaG$A|CcDENB6dwBTHp`-W6L=UHU9yxLJ zPK4`zUB6l8=#jnKToK|_DUQnT)A6tLs7MmfJS@Y2=WPDK;m!|EVb+p~2!XpH z7`%O?IS4J0-tP7nDUx`W^SehPiuXeFDL&&$useym)6au4y(D`Zj@0fnXviJ9hFgz2 zCg9z3O!TKYb+9h|xSToJwu(oG$Igu6xFM60KWj?Pa+1^RBwREg$&|^da`g1+yL}2> z9ks~auOscA+aU!llX22xIk}Ig8h>KwPY~tFoFz^i_b&uwns}3lT)ewyxh(^H&pmgK z9>v(kb>|HHsjol&#M5>10wnqACm9fL$n!xrmANxZyx!1$p{3^W)J>1gX$+ojX0j*u z$cHm^`%?3Kj=SO@&%M}n|A4E;+uXTIYfZ?QH20Rc2;~TcDC7CY*K0CNh#{Uln;nRs z+uVuq)TFkO=N&)oUI85Q6BEvdo700oI~yFr&YGMc47oG%(NmaI{h2#Y_jijkam87I zMC}eC<=q`=cczM8JGFGc9Ae!WcxCj|ZRjMs?#}gG_a8S0I5z0c0~5cvjbUx7!P9picdrlqVH+=u;t4tafi-y*IPOm*29s!#z>~5WOg3~b zXz5q;WMAg&xQ2I+H*zMhJ7mo>w~^tyhI#rF&nNCxV$#B{fu0umY1mB`@sGXv%NwXG zH>U1X!SUN%LEq^r!SBfWGT?Oi)ZKDih-dthN5_u4GwsvE{uMFCJ(&f4catFE#K$kT z^fa9a^_wkXTV@KW0>t17@JUw#MnI;~Hc6i*KdrEgrWt<$o+wH(^S4SN;51u&p zNz8mV-8q-LxZ=w7Or?3M)T-C33OcHH4UZnb-OUv7Rhz_7F93k}O997DopH77_mtMx ziSe{`(v3-?^})I9CmYUo?9>?yXT1M5OZeDGt4};1;c3YWIbTas*9lCHvP~OgO%J}w z1uuDSi0+d{yH^J_$}e zf1hvK=*fA#+sFSy(r(T8@b~;?jSu-6xcBj&W#h6VykB^y)A@yQmhc#_OFqp#?j_`W zAISUvhWPXaeEPwyPtoZ4eSC+(IWFW!z4Ue|qcH@*`sD9Qj)SvK$gALX|E(Jzey(fT_>gZIeGfhdwr%v} z#g4ceah`ji@niy=@$>gG-H)%v3*#sEpMUg=A1{un-@YXLKJni){yD^X+4z(1$NQ;_ zryu%W!}H)daK=e}AGnWmpTsjQT=Os|T(yE7uE~; z3lhBhc|Y=7z&Xz3w}CUSo@<>mPt?B&diIZe68sSI&2d@7_7SX^fAW&iuVcGx_$IbT z4Bx_b1#9}#XUyn#usv?{^sgFD|0%=ia~9m!&nLBhz?ldBPU?*DVY@ZMsh>5R@yr{4 z{@&_>(UbG@ql|~Y!@6Yj->|)8{q60f4{Z>&VG>}0{8t|6aNw8!{58D7$5SQ_IcZn(uxNbv`@wbNL*4+l=OLHG zU;Dcu{C`V-FAG;a7ZG3irzZsvup$|Df zmt*Up8+xB-?eBi!S`WR#RnK^B9@xIEhbi%Soz&Y5IP=5bnXZAeU*rv=FSyS=`eoJd z0q~24^LMG&j6Z!Yf&2VCBnQrQ;r?&GxTHFhdyJF*X>jjzLi9PKXPo|XzJ9U#l5ox2 z5IFP3-_I@^fAWgamyw?_qbHvL_wn#=x_o?7#-{?Gv&M&fT6{F0=Y{h%l=q)C{^RhU zH~!@7M$bCgG0lJQNW9pQ({7kH`UyBo3Y zxW61gOspgFmxHrj$X^9sLc5;7QtFN4N&R8yIX>jC1!rD8_vddszO&!d-vRwOwCm@Y z7f8D^;LPV7w$EeDe3Elb|1n*Pkzzp`Frha zMo)eT+}Ft)B|q!N$A2E%FB`^(oc&{d{NLxs3!X>*Zg9ryx%(GQ!IyDSKMOtE@m$B9 zpI7HN=Fr}LtT~S4z2LqcMy0=e-1--wr$2uuzG(OswhtS=gY6?&GamX(8cv@Xa6c~o zZ-Tij2-p3$ZhYA8is*k$d@c#s_lpnP@m$X{*+1qpiw>mS=c)a|e3r2T z_kc4$n+p5-y{0j z0ACQUukY%@b-Y#r{IYO8pY)$+_lwVu`ee~>&wp3q>=my195}~u9NYa^GjHStqpxE7 zkkONKJbnH6a6X24fOX;d70xm4agL9zH@5G&)+gKX-2IFCQTOK%kI#di&(E0nOSrIm z-o#HnXY^(0=Z&7cfXA6n|9N%46tQML$+`CNFUWYYedeJjv5DBuw&MR4YG3V-)=7@YYe9}*wk?<&S;8a`vjhrA1qv)wATyRl}w zPk)>^L$mJB};x_j%UWO*22x%;zIAzh!^0$@My3FF5;~=}vSxaQ2t{ zp!n!G4hZLaWZr2y!0BiERYv>1U+RD9na%iblsf5sR>B$oz;l!BK5)iQK9x-#Coe&t z`QcU{^6dVE+dLl(@bkj;c$fdN)^^Kiw+zns$w$EXJ}>!Mqc6heFeYmDi#+$7)_&2a z&v5FezMliz2+e3SD!8{_rdpSSaPw1j@!Iw|)i{k7vY zEnLT?^LefL$zwZ>HS#<*6eQ{+g(Oqz;?#y$@x6rU%d{% z$36Gqe;RohGya(yllaDsKlxds&q6Su@4(}n4+pTF#+vm*-UaSo zkLz{F3^@0TJjQ_I#ya^7_ZQLo`uPet^F;lF;LHd46S2>*-{e_v_K%#esfyCBo~N;Y z)V~})12-l0>+4OA<0awxI(-P7`KGJMp;fFXQt4^2e!s5`M(z z+5YC>!*+|cIH=T9abVVphxE!pk@XPo5w!M(rc ze?+*B*P?K(p9{iOAEAAlf3|PqKO*{;c);1O!i$oAjf1magU0IzkVD0x{qj4xW1mcC|vusDO~f`)1UOu`|I(&AY6S0gsab(aDBaYUbyOO;LLLg z`I!Z0p2?TQ=d9%6qHukGYSZ{n!GFv6lkbR+`uG1-Ge48k?m^+&uafXB92305HaPoR zMm#&<>@RuuPbckCUx7XgPCa=6+>fKie@?i@KQCP8y#;W#JC1f2!Pzc(-RP^(FB?7i z3b>DRPV%`XT=RKJxYqwBIOF6vZi6#U^7PLn`6TE5br9Ugqx;c-aIOCl;Tq?(aIOF7 zK+>-FSA8nL4+VHhxL$7=0%smLE+gRVFZsCm=(x`b*ZH>e!uIix2-p0Kf!lb-!EHQ~ z;-m3Q1$@qe^Yt(LH7`CIe}sB`T>b;R0Pc?$Z;|@sJngx@zF-}B?&Yt-b2x9=;|}+4 z&;9gJz#PH-n0x}9{qtPUi?}~|?*2tZoHw)HJ@=u!2Jy_JUmTYmY%gNXaUov@_j%Lz zgZMb(^xuE$myVxJ*7Pr6yAx~rlXLBTG|wZ@`*wLQ?48GjKLdU8d`kFl3O@(Vcyf8K zj-vD6jE8(yd@hL3oN#S-LAdVM>)>p+KOZ+6ZGy91@(w&seIDDLSW{2Vcsahmh*;@I z&bgk?BcF%Po^SUq(VvH&{awfQEY|EV`2x6Y zw*hY3<@0>IFO_z8pl7?)7e~o*bFyN)&)vVM zjP}?L^{<0|0+0KCJ=Ggwsev>8O>EC$&G^aZ!M)E9?WK$R?5J>^f9l}$-@W@Ru{E^=Y&U}!c1ZV%qUk9E;K3KQ) zmnQM&!C7zQ2aUb~{b8dgp8)rDa_8Q}+utr3pH=u=Fh1nV;-mBSMd5n>vnBkOq}|S+ zOJZPu*U)a-@OAJmaOQ#fJh<J|0Qt7!#Ja#Px|HK(Yj3q zcsjtlz?q-y%M#xVINK%fH~LNF?SRpfA2NF8bHM1yOW?j=OOl5P-r2`a(co60ZB>cz{oWv)FBP2!+DYXE^y9s$J2A)?0tx*fcRMtHEbWkn)N__7~H=e*S`yx0QdFwbmG=6?j-=U?~73OM80#P&GWjEDTJ`0%>|e2>lvSO28|uLt-_fHNOH zpC6O@VZS~1cn7X$inqu+sl)#%C3f%`n@>%LjxGm?h|;W}?GfwN!HtCN0R z0B7FF8%EFmUKD@6_u`#4g}+DQ*%7X+y$EjOUo)Kgb>owQ&xX;H zvwgcCv3@=G;Xj0ViS=doNBVg_E&XL&p1Xfh0rSNn#AWA)!{E$M`Zc~f-98S^JdjU{ zkLLfZaGhsnguhJUJP*!xGiY}fob8g&i_enyTok@n__}b7^9nfIWxpb{&vwbV_W2wT zf5zpxzOKx}$L6^xT;mxKu6Y|0u6ZkiGyWR3D_Apr@^SIeyiE$%c=$ZquSMwDZv7XN zI=NulCBG>CIuCCMpOJiS8UF_ScZ@%I$Kj-X#DOFaGkd&g{#jLIOE^M_B7UvpL`*pUkvD3zrOzcFkbhBq|R7(cE8~G zd#-=K%>2>k$Kb>KdG7v28O%qF*Yg3XpXjwo`##S_;VI!y7M>RV4&fQ$|8`a4>2G_$ z{rTz-i2kr}t>>!v=zeq-oa0i*_B7TU7xD%1(edTuHV;Y$gE7lc%i+;Xe z{`FcN0r7dv!!5`|4(<5<>Nw`Xy}w=`I2_Oq1^7gOPY3u+fY$gWW8$ONX=jChM*7tN_xrD2 zmtGP55z(&+pAo(({4a#>2-owI=$DgmV83S2uMTjI3wa9Mj!Tztoo~C1e+~Xw<4?X{ ze01D%!qxwv@nM{YM6cJA4+~eHQov^jocXD9+{H()KaUt6_Nx-mj|tb$jn5b#wp$bZ zh>Y)?aNVyLz#0D>;$JlW`d@{f?U4T#IO~i2ec-GY@>%dDiDy9iRTu8#DI=Z@ z>DLQHzaw1tuPfl(FG_Dn=E3Oo$(ng4&wx{3hQ8bA$+JdZfquWyllK|@IP?dMp1ff6 z1JEBbdh)~I_WYz|e5&vnGCt&E;-mFEZhWTTGhsOMSv7nb`blu+pFZcnef&C(Ge%GU zCBx}o7ayHJ8^X0;Yr_9f>Z%*@`gq2L_XyYXsJ;Nt2RO%#`^87FU$B3k>wdwy^IXr< zSYPy+hY#zB{8Qjn#N)aCJqyRdbA6q{yn62bMdNs#a~?k2Uv{uPk2S}aybkX3`Kyx8 z72%J{>sda}#?v*J?7wWcf$eUr*)Dk&+_$U8yI$e?`J#T|`u@d1aJIXO?L%0zUGl@? z^YhZ*0pqiV?IXsAyd*vv|CsSv$M%HrA)geV_yswp!*jxqN}T6~e@ghQaNUpU!d1_4 z@Z;!hc-�$oYNr>^J$iFGjbpALQ=@-$A<^m&VaVAC;0d$A!ED+}Fd?q+cm;`mDmI z)A*2giO-*kPe%Ap2=5lYC_F3tOTzbqGoCfX(*w?ak@p(?I`lcCCqH2Ho6zTtp1cU| z^QPDT4;!B?_>>G!-xkHo5IEzfzHIb6@EI|D96lAptKehCpFZP8A3;B1^yF1=-(USa zz**t^E{(`7)8HJh6#QolFM^*3XCA1p8P2@Tg8O)m?TvPqxqyBiobi;OhOwz!`rT@$?%X@&kr5p1k<$>$ro)zXJb)@gYBCIO9JY@ELCe%5gMOoRKn(!8Au=x4wgPYvy!7axtkX7n@A&l-R7Im78c zFa8?mLO@>!_t)7po(4G2AI{{>Ht(Cfxh{4fHv~9{}$c?tS?F zN?y44;qTTIgnz+5fTbwh<8Am42;U?6l5mgrJTWmU8~$YQis3&5J}&$!XOTf0RpB1p z1pSoZKLI{%_|JgP7=94EW;lQ6W)a-)j~|i#E(!lF;TME|Pr=(^*sld_Ph!n}kxzm9dN{EcopP(Q#%BrJ=Zp{ejQHsH zU7R;Qb!^WWAM$x{Uk@4&R@o&Cj;+$--yH_>f-_AI(oEp6Bz{@_QWM%)`r(2gXJIYH+qg&b;-)hjlfM z?S8CTSLE&oapT43O|L)jaqsW&k2T{YKM3yYO0T;Xj86sIMdL$$SbX$# z2isx%pGN$A9y$9pZrYv3_JnDdoNMoMwajC6u@rlpAx<1=R7#u zWxr~MXHH>1HU8vFMxTZLg3*)L!R>f8j86og72`v`X7oMKUov{~b#ULVUJu_8uJd!p z@#gp{PYKs~vJ;%+#eD7uXP)~JXRr9^@vcv}*2zKRlZQ_spf7?m9_oigukB6>FH3!% z2WR|gv|BU&ruKmUALc%QVp1qFf)qez>@zZ}?^qPkWaK^*_P8uKb zDe=)foE5HlIB)!S>%sVw&xw!L$-L2X-dhOh8{q8MI_A$6<3rwgXKOw5pdQlT%qMx5 z(dVGg7(ICw+}Dr(-F?5&a~{ZxUiXV5!cR#(3<=kHscif?|5Qb<^>z-NdE>a8H$LPw z@zL>`H9iI8XU_PL&j)<^M_R{a06qu6IWFWiqc1`~YxLy%Pc-NAKahOpgnwChKRENl z`pFy4ejPNN`7eMoe)<%_`Ms(183DJ)$8q6$J~Jg;&u7kpvt7n>-f*^C6CWMlIpKPq zvm{*W@B%pVmdCi%!5I(vs`%*Voz{e_|0UzkeytmS@*VNf&s}t4p743rdgv1V@Ks68 z_}gyb8c(lq^(hM1zhfE~uIJkm!ZpvV7k{4I;o?XmB%c5BJe$qM&A--J((;okz!8U7$R=S#NB-{)o>dH%Pu z|FW*B@7kC2m-B(=et7vAWzH+)JO`RaJibn}K4*k$J)8$;{ZEZ1?bg6q|K!U?KMj4u z@EPzG!)xHH#-Bcy45!aJxSeMys7Z!#Kx9ulb)6{yE9RdExpxs|LQ}*io|mNEZQtH}%0B8QWe-(`nc^TYaSJe6(F?z;X0rz=O{kYLH z&WV7YR9p*>CE3d|VR!&C+gNxW_*V zpY&y@f_n>h7JFfVckKEJb?-V}wruIB~b9;Vh zqRBOHi^6-wKl_%ZUU^lx*44c5-xQwxDHc273r*YmUy;Tq={INM!CyW`+&m%JuET5r*> zH{1Q?y~%xlTNJMC4hdJE3E?gI5q?noN8XmS#rW3{e+8WJlh1`DJh)&!oh&ZG2Mj*)cxk(L0*sI43?`!p{ot7OuyIUg5fb^$XW}H~?xSrR|3D@(G@!x9tsD2Tg zby7n77rmAE`L_6U%6MHdK2yjKzX+H9)8HLA9@3xsl;PB;!To-x*NY0m z^}0||xLy}JEL^V(m4xeYsw`adHYNNH$-|=XPY7>-+t-_`!u5RtoaQ3-{JH?@OML z5$@yJM!Q+zpO=312>19U@W~l|1f0hQAE(!kg6EC?ZQupN-v?ea{NvyQhOdB^4F5cM z+3<_t6~n&>&iRJ%{~LH!^nWb*nG&wo$EJmQ`M<+wM))hmrzYIvhoH~B%Wb&D?_YYH z$_v-?x+B8%d~8DaKS;Y%!gXF+5w6!yx_+m9JXzry&jH~YPZ``^-x~qvytHsGX|Dp# zd5L`7=$D|c8a?@>(bu7$GJ5i}M&E$`oY9k~@VFl@J#Wkj*L<=cthc9Llk}JIdVZIT z7vu8$rNY_1=YD=jyR~0(zniRWJbB^z{_m`CjsN`rXzz32f41ih;p)E~;Qcf0eI~(e zoh*T~PNv?SJpTeX>x8^v^wZF<7(Mxp(a%7C#pua9@wm_P56QUn3D^BA56<{&*gl9g z<0qdm`Z;V@jh>wS_Vu9iIs50i53>vP#(t3hEjZgJzvkK`&uoYMo#3hWG{;e|n;jCa zc^DJ^h`jDw1!sOz=M&$H;LH#Cy3wbh-!OXeEpR(tSA^^NM)cmKea4f8e+r!Okart> z2KubglaGV@c*doEs=_sID*?U=&N$iMHE_mBz9BwZx6G>_U#%ZLkK_J5j63^5{xCT6 z;rW2{YyJ0H^SM+@wl~2UC;7I~*P-7rdh*ooxB54r?*ykmdB*5hq0bsU`F?OeUTe~? zoba~`?=${u@b5SN^?d=JLqDv3D8SDL z_Y4_1WJU#S8OI{!;MN`;*6gzx4HB8l3yr7Ph;vW?hl@ zgIk}1aNVzq;Pl_Y_7SY@gZ-BkN$nlw(;qQPY2fQFL@`pk4Jr& z51-Gge9HWbRJX9-p6l^~{qx-Kk56%Hzc60U_qy#t?00OR{7&#Ah==2~^?}$XDodRD zKIw?juR&h{XMf4ZjeZ^as?n2A8T}^o(?(Bz4&3KcuY=7PJ^M8$`seRW`s#1z!I_85 z2b0GZjSu++qmQ7k8$Ef$=u^XVCm%JC;w>vBC9xy&xw0qF_kRK8s{oMOu<5NdG1ICB^h|%|;-65kVFN53q znHH|s2WE^9>uMI9dCQ^Qd7~#^5Pz-PCEALX0mhrl5{7Z;`%lMOTi_b&xfh4JeE5d8y-+_2p56oK% zob^DSHu?hM=`woqtkI{T-*5EfJ>WJ!hm6kve2T`0{IK|F-5xPM>{rS7kPjJs8SR#h zo_qq_$FJi!72wmxzk%%;teJoEdGXQxWzqPsK9`ISIp<5B4~*h`fbEdq4bJD0zX^N| z{+_>5>S|rM$8Uyy)9|-|ZyElZ;5^Q--TS~fKA!7&Q3~Tj{S5T1FY-SF&xl^nle5Bo z{C^I8kKum>o-_ObaL%J__p9J}qyPRVCHrZ?@GN-I@E3x!-s%5(@RHG2z*(o%zYV-% z^uG^&4xIDDDz;~^W_^-#?fu^(^{{DtN^?nnw~P;Ygvb4Uq^}b@!5L2(+i9#B4|$*X z=y^p!c)!HS_I>~n1E@Mr9a;|MYYsO~@ z+w;bUd=cDVul{Yx4(dQRL0|lW&6i@zVEM__+7idOiX_=3yG!C9IhTa;~k< zdE--CNaCy+AM!<`pM(B_(UV^U_x;lAkQ>5hB%fQt^}K5boc*0ayH~*3U-HiXmGr~6 zt3F);o(=FGaJD;*c6-6uF8P6g&%ppM2KWFt+hzXCqMw(%O$gV&@0$~@ucKFl-zPq6 z;OrOM-4MOzGlhKkJnKBx1#W$^qF0|@;p)>L@Hr@Y^(hJeUFp{lxXtHCz@Ovk_m?M% z59`bGmk4K`JlFY|`+?_P{`Ds%b-65Y>b#x(ShH^RI&Jw+nq2$4`ln6)I*DgvvB}lH z@@Gx1{-uvMx#}-2wdb3Eo^a-K^DmR_ZE%h|`4w65%|3)lU0N4S1&xuf13cl~~h zt^n^AuKxYtjGy_*gWJ4~h>w0gc2>Ba=X4<-ww{j!IP1&SIpgwN&u=+yp6lx=j+^H` z%~vpgavVHwnQ!lRBDeTB^}5gna9>wh8Lwq<<}LThzC8e1KmOuAgh2_*CL&^HvwG`b)y~bF+t* zn?7HZehmo!AHu7`b-bp9KS%V-!u337Gr&7P-Hb`+q1@pd_Lf_XnfYOy<~jI>j59e%euN2b;b6{p9D@n^6vxRG4T{XljQx1 ziHE!wkNbMldN?3l>zU8x-xbm8{@eMvC8EA{CT{*N*)dEpv=S-6g4O}OsY3&OQd>cX`SFA3LnFALXk-v(!$^dk?^ zYO}wpPlL1GsP7iN#+ehY{T&jnagGVsb{B=Ke?z$D?V@n?-w>|zaOVT<`^y&j{CeYQnW17KE#Q5uEWC5&s2n#!tR1J{r$u;Tr!H z;i~Wbd^6Aby1oyb@eClIJUHVaFNu%#YdpZu8UGUeXN*7jywR7TUov{~2DqQkH4hhs zYrSn4{|fvs8-McD7n*rf|BP_WbB}P%PhWr+!I|d_;u#XX?sqf7wcaiW*LE)j_@;36 zPosT*+`r0o{d`B#f5v6c6SAWB(=gBHdkp9KL(XuX=l2`_jO&tic^u$*2+#kyf0O?p z^hM+IOmNn(*SmkwG|tyqcb@BY0M-}v132$tU6A+Ud}{{nazAQddlqYsFZn#UKac&W z9KY(q_57L7^X+>1v(c|D=-F=V?~}Z3n|8^sfcyPL+wI2lJlEstesKEFVY>%w_KQ4k z^b6QNX!PVoqhG@IVWTH68GRkwLq<7Zd*paE?3q ztoZAEyI_2J;In9a$k)WDB>CJBuJaq)xAD`@a~+p{w9kH}){=f51ZTgXNye>cYE2=pA6%$JgUmcThKWUJjutc{{`q7=ge0V|2jD1B;OK$J?~9l zN}lK2)p@=foc=ZVXTj-D-XlJG9#Sy+IrtnAyt~t7*dq399CwUIs*VXmn zbHMmaVLNYp$PbB+&I3cjTaHJ-+2eMH_gX&h|am^M~{BVIEeoJ&QH- zKz>2m{S%3Yk9&VTzHf_Ok9Vo{B>z62I)7$_Yrp!y8D|6A2e4+Gri5jF)+<`ayFG{XF++?)bhKN1N-(n(+*LE!o}zXFTLx zc--e%^Uxz)`idLiyCvcJIgknA`!pWmzbAY_xVF0#;09j`Ir8vl&&?@2w+3RnH2@L_4U4$kq~#P%}Q953=!@tG2z zb>TWc^Lfkz^K(V?dcKjQqo=m)@u3@?C>7+wUg7=P-=45xnFaJD-E?(_CW8J8+J^HV~*Q^tqwo;95MX~P-M zjPWnS|Gd$Y*Nnab{jAZG&w<;1Ef}A1_$(P8_UnS-)Ypwq6+X*GPu?*4Dd<;>o_rPD z#=mBKrs1=0e8@M9eg^tYqbI*?^fl~KcpmF*0o$EebAK#g zJ8d}iUEuUt!gj{!$-9ldj_s_`lkW$&@#n!gU;PQ^ZNonaUKD;(_PYV$9yg#b3BTI$ zDYw7+r7Ya*KL>rq@UMW68~)GWRpB{_XG*xod(*yE_ddDd`@m-m-w$3B{)6Jrx?tX( z4?V||{HMSyeiig9;H-!KZ^hw9Yv8Oa@(pl5zN6y59pD}Cv-++8-yh&? z-}g($_ps>o_&x;Ae$8Qf1Z(z-ykhhV*d8-_@^PbI!uEvGlUKog9`yAE+vhkAaD1UB zKgRJj`~>(aIQ!dk8RKYp4*Zhg{oq%?nIHOe;Bj*LbQ(^dE^yo5tZ+S#Vms{bThK4I zL(V*~9dhPjLj1oX^?455zCN85y`KMU3fI@~-QP~~>ieay3v$9$e^9vo{lz#q$1C-3 ziSHyh^GrTv^l9kN89n)o(PyAPZ}j9fa64Y}!nI$Fi}`;W#*6KfbG+CNImau9xR{43 zwhv&x`3}Yw!Qx#lIiVV}F;heGqHx9eQ$(7vm!5c+t=EisW+(K0bauzv}<@q#e)SE&76Ry$(|r{#DVB z3)l03$pBvvo)@1L;dj`1NcZMU|&4*d`~+a(_X_wDL+;No|i`Mg2m z831R$*60sTfASIWQU9v&KNA0G;X862Z$|h%qMsH1Bf=ZP|4R6p@Lv&rS@@HLcRbY0 zgT9ZD5w3Nc75*&o=@YK|-38&AhfU!>EIwV|YsRDb=@zbe$O+dx zo(bW49$pjvCeg16zfSm;@ShWYMY#5N2zAFg+{E?>)~rMFF>qVwQ^Ixs<$mesRbPfZ zPe}64{fvCRBjK!f&d(pme9rxh{IlSUi~JwJcMuQzHT|E7e;3y57dh9AXZo5%zX`qX zug-7V;Jg0+mFRb@Kll}+p8=1sW<2C+aQfGDYvzr-V)S)vj~PArq|rC9eb(s7&x6}MvwfRSw!^$JpY$WY75Us2f1Q`E2-o*L zQ;#HhV}AOv-HA2xL*51MW+92boz3vRE^OoB6>b!?x-n(>g=z=8 ze1BW!E9f~ctRJ>R{_CtC@lQ)Wx8qNlCEwrjZ8^Uh_Xck9acVr1;LK-qRic{$XFkc# z8hr}-X`?5v8GRc1Iin|E0=M~G7yjqcU&iGh)_k&k&%K=aWIN;^Lq79}%jTy5zMG%D zNq!E2@8$=*X!I%Y!$wa&Wb|q1M~t3)0^H_jPWbmpein>>1ph_jPrhXIjPrugldl+k z2L2a~o}BGGZoKe3j?3LlMjPSy7b*Ll7m{938!A#h*+`Z<;j(d%(!2b}pFe{vE> z`iaS!`6QnQr@jjPg3*&N8vPXXOGZ!LF#2ieSB;*09o+60ov9>##yJC@ZgBRCd=A|E z{FCIf5#ZYap4ykRYkj%{JQv^x1AHLBM*_SW;O7E-F2FAY_-cS}1o%#XcV5#zZ`lCv z3-CgK9|`bEfKLYaOn}b^cs;-`2KZ)xUkUKT*S62wi16E_o+pLtyfGtO>%T5skB>c_ z&34u20yy`(8uq(oaPD{H7ma=n`b$PnzG3tW&|fxs^6n=kaWbAI==Xy&9`XXX-+%u< zdv604)phQRuRVK!5zwHbBG@p4B!SVUCQ$>4EgRH?h;5D1)~4xUh7ScnMa2Y#hCxk2 zLwX9S=f*ZUM@%$VXxeLA(n3#KM`MjA$-UQTn{v*{IT`#;^z_)~3lhxS-@EsEhs|;( zNl$y9=RW`EzjfGq|Mt7yul2FlT6@2H+3l)S_;CAHt?*&IPNC=atzMyLJfP4ArJijH zJ>!iN#@fYlnf5uKIbY#8o|)NnF*lRH0{mRx0$2S1a@(d0wy3Gu|k1>Mv1o z;CW8YXTt5{JC;K@6)Wz?cG&n3zF78SciC{_KQs=1uyxsRqOX$l`xSh(#1ANVjl}yD zyjJ2t1z#ueV+y`a;>Q*Iml7XP@N*Jp|FIuZC!1zP{xE(_B67M@Fn&VfZn`*pMuNR(nP(2%hy^&K*2A|^9}`248J za5?S9pDT;_M^3YdkU}qqhl2HDzsYW=73_yJjLX|)Wc^{rWwwemlw;Z$&ywdk3Z5&^ z^A!AZ^4w7Hugmjg3cgmJ`xLxEo>wdQ-^=qx1@DpPZ3_MqdETMmY*&|pAD8C`6#S$- z?^AH*b4H8G?Er|ye{0|a8 zrr_^M{J4UTNPIxS6Xf%$kb+N?_yq-@A@R!!o+EL72FL!nMdD^73x6>Fd5LpOG~*=_ z=dmQ?%O##B?O^z?<6q(io}Z*K7$fp zrr;k+oaY>v&v=)mzfYmhmN>5i$@E?0Eqa&q2jfpkJVn8uk$9Sd_ek8W;6Ii)=O^?3 zsl?|f^t=vKo`U~e(i;liC-Gth|E0v2DfsgeU#Z}~mbg#BgA%V+@FNnhSMZl4-l*Wm zB_2@l*CgJi;IB)ZvyJ>l?=yE<*(Flsp8YvX_8;5|K1bqn6#O=cFSFs)B-K}1z)A%_ zCUKvFr>v5|jhnWtZSa?i8#gsjQ}gF>aBxy{iBCb+OA<3~rFK8-+Zv+yHaHh%<}TaH|Sb=7AWvG$yv z(d%&#O@5nfIP+vXLGr0d;Pfp27GNa*X8i2i-^v^KoofG05?NN^Th=(IW7*t9SPT?d z6f?*`+s5)4D#AfD{exl4Bl{&G=}&pY=~@3T0*fa99oeC{MyBT->&@ri!~yYC^>?ka zDEa=+^emsx%WLh>K-R~nY6eu>iN5${ORKkKzn(DKR9)%uN3`_S5^XS3js8*7)0Jau@owhgrFS@}F_W7n zesPs7x)fZ)6kO*F)U6~)GrB8NjsA)hqra%xnc->HGm3=P4)3%F^|IG4>Y^v4Ydu$* z4tD>Cu6r-&PVW_+mi{t^2laUGCEejYr6+h3L}1dIMd{+MeC%I`+B$T!+$)a;G3^I$ao*;pVl7MpKM{fo{M73fhe|knzNqBHFDd%hPR#UDY4me z1MKm@o>{Pm>>_(+!=9zEr3ALzDQ$V#v}GJ@Ni=Qw19U$HTj2AEE$EYes=RsF=}f9c z9xsF~wOT^SYWSiC=egsYB?0Kqc|-XW(i3}{faSjXz+;q0z^e4Pk`{;9@GfxQQ@hifpm#dye%Rffk$U?RW%~5mA0ofsUQyWn?l+3M z->+EMJzTq}`@>nr@Rg>cW|9s!JOflk9iuzxyinT)o;^{vSnlfPwruRu?$y(koWW|A$xF>M)yULk!7 z>e%ESvV-{KLuUb>iiQUs+vyPPAz*y|9L~v}D@{M^raa0)J3_jWpUIB5p@Z-k{iOT5 zun9N0yD4wpN4|b=(&+ziQ(X6u?)1Kg{2WGpW@d{GMPC#1+A>5bTEPwv__eEo3(oil^d;SM7DihR>`B5hJ9rLuG$FD_9t(~PSzN50RX>s?JrvGXBFPG{M z+BNTSvxq5uJaCfi=@|}}Qk&{PS;Rds^+;oRAGK#i zPSM%coU$SZ`nEN@R^&kEZ0OwLaMWH>bf&h9d`$i&pO!%`>w)n%%2R63J-~2JS^ffi zUl~6+Nt4RN$J&HMzv|&I6AvN}2hD=U7&InDl!mTuT0km-;>3xT=rZ z*@e*SD0HlbZpF~80=h0V^@}(4+h^YMobBh~d#kNYhhGv*{ceRW^7K2!92r4%y1E%Ne^26Kw``Ao@&~*s$mGqVzm` zeNLb5J*!Xkp3x_JPb1$?!KOj@pZtAQdnL>0*^g|p-KalKv7!4)xRm(+UK6RUkmW{Q zG3!>c*ia1J(Ov4e9=;)8d^pq5S%f-iD0PzZp8fmAmC^hs9}mE9q*K3`Trz-b)HY0m z-$KxreC2DpweS6S(HSlhfAmA123@GEqD(Fq=1=UTvJ&6oSV86ZEbONH7?tHi_@T_4 z+>NrbeFk=^IC%X2A?KME0P~LSy5Zjw|#3?AC+q=!_+1}=ZI_H>CoD# zeqKhNoYsYx`V@9so`t?tR_;fcrLscy{9a6p*s~Ychk&J;_M}RC=JxG`J-eknWCz(p zcG>MY2fkGP?*~3=Q&#&OX}99OjPmt5mHo6Hq&q?= znOGnuS^1_#&$po#Gv6NhU95a-p?qsZzBT^2`Su0X9+W*UgSSSrr}ox7yFFa~?0H80 z7s{t|C9*i)?Sr`QPIteUPQG||#Hx$> zXAAT5GsPrFZehN5U17fP;D>sNcF?;Q?ZNgLh56gk3iDf23-bdh*0r-0)b9!`uMipN z4paG!^Om8^dSA5su?yvub?ky}Bk=!q(3Rwqo)1CCbI>s#X{lX3TPylMwCVaJbk7Lh z-cEYHY}50GnJ=c3p2NtCSh|v)q;EvmTI*gzK0gP&ozQ!Sm{CIa&vWQ^>_I+Ieo{L^ zd7600Dvyb{%4_N$<KvA#iFCEs}aKGrvle+}O( zGJWneeSQW$-+?+re9?C{ea`iXd`|W09zCv|@-}o_06-!*QZ0m+o2daYebw z;7xs8(s|WG@fmba{_w-_<8R^~#eFi;^F3hXyM5@>uY>Q7Vo&4Gmyg}p{$u=hYq0Pwtvwc5`qxum$kmeCmuM5(AI13c9Q^QQ@OFJhOgg2z(1&n( zL*HC>awE$1Alekd;=woc`QnpnfyLq2>T9~tm%GMW4J-lJDe#Fy`(nR${|Yu*{OZ82mu~{2XvGBLzMk?~QlIdyl}Um~)vHFY|pJbVWWbe}2_Foc9tiCon2E8uI&x&N%eTtZ~OI zkyJuy3qV8pX&=+w28{al)Q6?{1+v=@pHV(@8AzJ#d14>>_LS#ub%aZuupjdnCnmuD znOoNMt%m(KziQr&?Z29K=r2h7^S>bNj1Mi} z(EX?Y{n0keA883<9*r6C{6E!`hIN zdepa}zA5zsX9O3R{k<=voYEMP`lV`rkNULSKb^KfOp4V%O~SRavfo#Yx_$}w(cgE{RqPw5(NIa!7}zhr~e&fkY}AIpwfX~)G+VaI1+2hAmXk{z%$ zWpNUTLZ$od;F`%+>}7s_@3_0>7M zF#nvM>fMd{>&N-ItvB>h-k z`?eF~f^(>YH1B)nN_cK)>*_ullTdpVhjwBD+KB*sVBO=K-b27bsB=xg=sr%ghd@L3 z!~1~IJuc22=iTdfn*D||(CweWJAdXY>7*O=+d?>Rg-v8nfv&A#oBn{YG~4vITd!+W zZQ}bgwMkFv$)-)hY1yf#n$qk2ViRw0aG&Gxz}uaD-bGZ$KOFVrV|?g5@> znP!f2X`W@zVQZen93QO=mtr2n9P?(?`phwJ0Bu4M<|HuY?c281-3Q;I3<rG)z^biX2f>7KRpQ|i15<)7~9XHgDlEJyvBDW%ur`4jqY z%fqFun4c$|X&!}j#)Gwp&RMv(E~2><^lP$lAJ4))mTaK8P_m;A<;JpOiaDmEc@(=H zS*qXV{0Z5S5#5HytNqUNBy5B9yBvEhSsKglE#&t`_`UJd`u)8jrJP8A_hSE_xJK>P zm~v8UJ{e(e`F?j428Ml2n zI&OQ!2$vdi+!jK+PtR=Z=`Mixo6H;i82cEK`!c2-(1ZFhXP`5+5&0+=)IT8|%{Bvl z80x>=0{lGTu))O9e+i-Q@>#Qt3C#6U8;fy-*>{Qao|)+|$D(h-9^kbH?;sX9)8^ZdEMqlPan@AJc~XHbTQ`% zXkK1J8$t6kKI~_bUd`eOPjm2z)+^!XD)1w{s88d8UZ*j(C%s6go$;wjl_<}o*S>4T zB+{$Z>3-swFBG4A?r)ZzB;Bm~GOgB&vK7+x9-70(yBw)_aNik+_9q_g4$VbRLEE!i zOs*|4b+hI$XudWZ?Lgsh_!oBHyf9?xuybps<(r|FvA#Jn);CMux8~>Yu0ksO&=}1R z?~xzgv&+#gZhzO3^BinzFy^+u0Q(wXE48&P@cAQnP6PYsRq)imTa%TC`i*weQA_?M z|5U&(n(KB1vv4nPw9_1Cp`MyV&qGh^Q!wtyKpmXggTlJA4D%+lkdB@^7U6w<5ancdp4fnQF-v!& z|LQ}#vD+;+1aU4PgP!Z`_K6K4v}L;kVgo&!z_{~qA&h>0p8@j@k2_pOzUDMg2k~yh z0?fU;HLn}X#?!Nef3fJdyo2N4TgUlS&QR}rLi9W*2lKfoN9T0SI|#l4eY?UmE#oOC z`U1(pbmN#0V?eF82z_J^{T6Mz=M;1+6k3VMIfQ5CV$#2XAK7;n<&tc4!A5;N<~*g1 zEwHf~HnqUU5ba^3fj!&M0vm&{u|?YWBj}0Gu9)pyk9E9dANpx=E|E|9wohVoFMrB9 z&ZqtoJx3KD%O(%zdCtQoO*8t>;5oJfcI|_%tuwTYBd`tcpw!|eoJm>9htn;)0{4Vq zmj=6v&_-0a52f$JFHl{MS4U?iR){$7_Uf#>laNFC{z>^w{U(|#Ta;{c?-u&SLge+r z+0aeswM$^DF`MoMT2kR0jA?|husNci-aoLHEBLnq`8rHL$BrerWkPJoPBXR!;Xl`t zOLOU+iZeJTf5vy*l^geHN$z3cNNNS|{lb;>2HvmuC-;NtWq6;ZsG}`?wMa;UKDBM` z?db&~q1My!aC(8}sCC>_xOF@9%4|;P&NRd@>ERKs5=vhr#+5jn*W~UI@g?Kj6LLLs zcBc~$Eg<^+cy>zX+78iAc>{gT?MlDn$Y(C@m2)ze&;TM%?10FQn z!#HnNvyOK1g(voWgm$th&@YA^c^l5@)hYrKBV_w z58pbC{)Qr>-}g~muI~#t-j6=k0(6shkPSZ8A9|C{q%XA-G`}y^5>fR^r1FDjm=6f#3EImym~qsPoUuT>9=>0S@n2OWAXoK@-v9&qSMl&>>U$4FNsOeG)q;X_aJ z1LpH~AAD+{>{<3i>TEyv1Fkf^V3u7!(jDGfVd|N+(%nb%h<3lT{dgC%V+U-e-$Q7Z z-$4DqoIBm)%xAa{BL5FUC&Hkw`D`~qI8fgcP+vvqVIfMchrbTOS9Jcg&`JV3#^na; zjQ(wpCgz4Pf6Vu!^LY1QY#s!btL;!wh+ex+JC@=(7TV}Tz@c-bjS|a@em8K)AB&e7 z{RZ$CRD31MF>sPkGQrE-NH>y4W#zB$hoszXk0$4KD|XxSA*kAWZwz~VG3>33VQ+PG zd#P?@;{GVa?sTpj?z*e}f-z$+(K3w(`H8l3!(am1;3$5ixoSAH!-GA&3m1>_^a6fW zE~CFoWKMcecg_DN)G3!NS1+P2;a!*I1zLP9Jx4x^e#L3@EAT!G-jQ))?kX;$Dpk)o zjXp<(aFkTw_#Dborl!|^2jwh;xfj&Ud6bu@F|NToG7}Ml*LeYb18S$Lw3PW(?!x?o z!dW|rF>PgbVg7EE<5nE|atblU;;0Qx+nrALb|3DSaky`Hp)7lHg6TfoE6EOz8^_tC zzbFX+uY~^eY+SgHz<0vyn`>P&aIHL#uN_V=fAr4WebD(-CZ3z-{3f0JQRIFBeHaJ& z`I?vR?eLYkv5&LB8~5B=Ja;I~g5QeZw=B(d>rD7bpziyrU9R@!Wx*dFvk!*;v@FZ^ z@wN@x}Vd#h)(Pu0?#1w;u+zZe}~AK|3aLYOY>BqPdy5MJp_NzeVFDzgzqC9_U(fn``kz2 zyEyNI@Le39=aYXPgzqdL;dlY~0r$bD+YR>V3+ZTEyas$n&x6FZf#rkP+wG?EhBDRv zBFb5UmNTEm=7*p!e45{a-){U4;zv3cVC+oq`6ov(dxZNA3;TFQn_haSywj zXe&fQV#OF*-D}zjp9G#*3$Muf$lOhvpYT3u!)! z@7-2^#?FuCjd8t$`rvP(?k9sE_;}m!uB=!Ko#Vw^(Q5Q3P5)Bv4XDTBLA0;qznoiy zW0Z*vmFUy@@V?1z+|NFXy+=!!bq4RKwV=HJ2>UMKm{o~A=}q^D)#xw230atTUS5u4 zn%AIvCABT!S^8~}P=fMi9#^3axI@Gzna^L)UQNOKI05JtLO<>d<}FSGJ0+5E9nX~D zgU~f%IGVjl-@hx@`JE-XzlATk&ckMN3+vjee}F!3!Df1o_9E)@l_stG3jC(!fM<>{po{e8C+Hp^knA;jK`oHV`Qtn-7Z(Da5{ok4M<=hG!A4ETY$8=+B0MCTpfPbjZ z@5b>#ZCvetxI`)1>sfoT-;H*7E%q%UVHRS|@Z1>p2Q6;akp%QzoEVb{F{={CWm;S< zoK;$cbZ6p0uZdc`qq15_(3_j&sV(3OwUX!g-b!pY&}>gL+Ux z^Z$N~Ew)R#T1`iHKCt{C#unuNXF#)Cv-09$Tq{I+YRAlLBC!N=%{~^LCqM@ETdFaB z+1K$ScU6M=vReDyN3cUzB zIc{kFq@_mx7afyJ=sXeE+kpK6Wp9b*D$&r7Ex?@fx6vkP=(`r+zKrKl^o+fvR-0N9 zmqzcxBziTo%s9KNzD4grP#lETN$>Rj3D15nVV)w>Y5b`*nVw&xyy}z8>lbm2@Iy(W z6k}~~0gnAR-iI;q(dMG>(O7@qNYTmPW3KW)(VjfhY&=$r`hu~@)Iz-gVGEwyV=P2t3hN!$FG`xzc&C?W=$aFD z5*^V{JI7<1_cKP{+aC{~{ZyNr^fT0}30h*(&#~XW#owpPxy~@^{-fr-J-+=>E#92( zq-Sg3e}d-J4r}9*3LQyFMYvayul7qn6#?I)jp9!MN6_V~l_KyM+1ff$yh`oN(Prlp zWH)Snxdk>qf_X{ke**Wk)Wx_4zMToxRed*9U)L8#n^Xr~JR-gw(QM}P6dE%=>k_xx z^W$NX1&s3IP=dJC%7;Rk4}~XxkNHqFAJ$-ucocZFe0b|^Ywi={npF6v5xz0!Kco4E z=Id0SRK)PfH!VG1&1RpBi|&)R@VXUpQ9+3Uw~}z#B-Q& zwQpUr*;nJw)2pp_PpO@!yeFR^A13a>dmr@PH2T~q3tBtPN2<2wfG_c<{KvD19(x(^ z;M$;5yeja+`;#wI{!^a6b-}Wa{OiQLT{hupuQ_$gXFzaQH za6k`7JB>F4(o_Aw^OzC`%6yr;KcUSip}K;4Q{vDZNwKN+WSSBLi{nC=I>CiuQWZcv|(2Jg>M+lLtX2jdZwY>3S$1tjQug|+mZda zmWuBI^t^-ijN(|to6yDn;35R>NUA7MPk_Zi0BVLf;xtLFLVY@_$G;d;Xs?91^?ua$Mtm4??(UE zmnHhkCmH=aA8=$eOcmHq!FyoVyNJFeV%V*V=R&#_&qHAMzt9chb1KaDv~K_2OP2>GC4976s$=>QG* zAH;QP$3DcKe8WEdv2_mU>g`ICecnjx@uV32{LIOXHtJpUg$smOqro>Av#!Co+#I(M zZv#H!Hj?Ijz&*8fsyXkuCIJ0#Zt6hKIqZ1{9#Qj-=Z-SlU@PpLQY?mP9u4nES?xCR zvDIPZ`nOqm>EAY*mwtzpmw|YaKc%+Klpk15zPSMTNY}s3QIuP;1Y@gaEra~j^?=C0 zJ5&>W!kRCo`U#z*ZXwJ^a3B0_^q;8=iZ*kOW3CT8k!UC9J>`XcevrLU z_s%^MvpS{F^T30_l^_2@9EAS#P;>vkLSKC9d6t+reI8z+T+Wz z`kZ)IWnMYvXbY*oeOMpzW1hA((dgei0rRfci(yY=#-x@T#c;Vp47VnV{>iW{fa}^M z%1_NpWhDTd^32`b^#pX?>GO(VibD!2WyElbc|OSUffw1v<<5)~G~?=CGV8ouzb?$z z_`Fu$U>JN3{VWevXT-+WH%E{Fc*^U*e-&iPT_DxiDNQSe#_@2`+- z(ogC?3Hm3J?!e482<=_}*C21`y&LAUjZ}8YPNE~dLJKu>EE)P5`eLiEwi#t?OuYyU zh39^pyeDDLeem-yu^$9)&KK2gnjawjJ@A{EH#duZ4R(|6ly5XgApZ^Ue1RZFfDHS6y4*VR(|$; zYpB|2M`{`>sv`+ER;{b7+URZogDOr9-eG)xCoJ}3zkh!itt)=mmtv3e($Z4=Fq`-h z_8)x|{s`wE;rt_E&xZVB$S;QcV#tT6a53Z;Lw+&j7ejtAz+%WQhTLMvEr#4;$SsE4V#qCq++xTr zhTLMvEr#4;$SsE4V#qCq++xTrhTLMvtb@Ed$g6`qungBhULEAsL0%o?)j?hz?5%^$ zI@ntWqB_W}gWNjEt%KY;$gP9iI>@bq+&ajugWNjEt%KY;$gP9iI>^OB0QljYWW!$6 zKK#CdUk`pbhx`u6?|}Rc$nSvs4#@9-{0_+Pfcy@~?|}Rc$nQX+4#@9-{0_+Pfcy@~ z?|}Rc$nSvs4#@9-{0_+Pfcy@~?|}Rc$nS-0BnkIIZZG8aLT)eQ_CjVaWcEU4FJ$&Y zW-ny+LS`>y_CjVaWcEU4FBtSfW-sLRLS8TA^+H}R0OSrp?f~Qtz}^AK9)Rot$R2>~0mvSJ>;cFgfb0Rt9)Rot$R2>~0mvSJy#tUx z0Hyf&3B39)a8u$Q^;) z5y%~Z+!4qff!ql)fMA>wKCzi@)NxWLF#rbNv z7OshuYsWHTJS_(9j)CXIz~{uk^J3se47^yu+1_Oe&h|ErHA{L}6PMj%FEF2>82E*( zvGnYp;+bP{=Ce$}nI5-+F&opn6rAZp3eNNw6rAZBuN%W&mOxDe6rAaU3eNnGDLB)o zTpyi(nu0UEq2SEFSizaTO~IMIL&2Fo50m-!jd4T4nNP8TGyQP|XZisJXL=*YE)~5? z^HdYX3eNOF1!wwW3eNO%a-;LlQ*fqVso>1tr{GNArr=E9q2Np(q>+hjWBOwX&h#m> zY{WQZ`ZNV+dKY!#Y#Y<3C^*v}_^gc>hfLq6;7l)Wj85-T@Cg0qV$dr%(;t`&01P;jOXEr>4nLJa(J417qzS+4kebpCDyXZjokXFc1Z;t6A! z|5?jW@o&IW@Kd+907S$82%PNVdOai)j)RmHLoggb#Tv z2JE-nB+mB*_Rm)&&iU(>a_fOdliL&ne^~P2`=cTG?2$O%2lq<+A0^K1F#DOlS4;Zy zeV>ctIf?UqL1wdXI-={D5(A$BocMG3^htddNPMoWUo5vm;(Y&dOZrxc^L?{H;&`BB z^5^z}?fR9((>xYoNYcLuJer?RNcu)uo_0;Q68u5p+;4mm=d>lFiyVzlD)5Pzvxsbx zAFbJL`dQk`{#+q(?r-dn3D-%S`%6s!pv1Xdnj`i3zQp-H!sYXq66f~61Nmxh*^MH* zQmo#%xx9?WDrIZ^^<^7Zm-`iXRn58@1;V^X*~Y5XHS5(*~9!=#JC&309}B2`t;O%)s0W8iLzfFgBu>np_CsRW#RrlAg->53T z>3&#NzIoNEs*Rg0&Q%*Xu3K*%LLyiy0*P6@zpkpGK~=>RAN5MaeF`t+Sv2;3xTS1U zgTG<3)M=~bCDUbVZO3CQtXa2eeVKnK&tsG8Zd=6HrEw>Ofpq?LJv#H*XIlD5-MCF>9`AlG4TeY!Darvf(N~>5(KSflfO!}1e z#wZxXfjp|H1s`m#S1_{(j;TxvLA3ft^?L{EJ#Bb!BFe2p^Y2pG9udCUhQA_lUSEO7 z`Nt*B>nkumq|o#De?*~Ye246EsQNrFahA*T1wo0kT*gaXmeNem^9Yp^XL`mDNSx%> z+wA?V4Zqih<2@#GQ|a-1C4$@OD-%a?JO4HtZl|A{6rFyh4Y$)5Cy&zG@yZzZJ2CJS z1bEU$cG>x)Oo)!pj)5sO0q{KPj84tQGhE(3{{^aY- zzfsC{&9n}fKhIYtOPu*L&U@lx>+K4j&tyJ4k6EhlVVw6WpJeH9<`a_VX>!l{Fy19` zve$lJ>(BTu8~+Dnp3si3tNdS4 z`15@0fWn{gPz?STV&I%kl{-Vuud-b{Z<`}=wu|vO5?B4k*H!*Y75+TWyHerLIPX2UIJ380WppXQ#r4pA&Q`d>H4w%7^E#*e-s4@QQb=KN%PD zIRXWt+53BZUFFZ~Q80gg-jE~taDFkqGzNdZuJT_rdu)B-=MzgM&ionYy~^i#`TT$OFkMNiH>k=pa-?Hc$ zxyT%eGk?Y(ontX2{>?W2d|l=Lx}<0R{2V1DapuqX0{J{e<B^K`OPwkvs}h2B~InP$tJf^;$&}w4d0>g=jT9e z3V+6v8_h<4X&ieE7qy-XZ{TXkw@mXurvrFN__U={qFwT4Or*7lJb`WmA z|FV45|09ar$n!1ve2VpFJXPYV|Fb2|e&FX}ISL=f=Sp1lv!U?e=Vyx)K8)Af_^h`1 z;kXUA`d%zhtc0Y$y*#AF!1E-|{^93*3nb2dW_+na&(Hss zDfEo5l(?EN_bPmtzfa-AIPcYR!gAO@(!GM8KeAo?+;NA(pK;!+{EsVo^7F{o6+VoM z+eiIhX3O_d8*Xn0DkaW(@^j5y%Q)x1T7Ed+2)DO;>~F&HU-0`z%#ZQNdqtnOoW=g<=hIG!vp$UTUe#xT!iS$* z8wwxBizQBev$tanHvF5meB^YhT&zd;sn(^7z9ikpuSWM*TZ9gFxbdU>h{w;q8Sx=tgw!9WWzoVr)IuOf;73O;W6R%xih+ST*_^x6Wd0z>>G6t zDZT6-MJ_GB&vxQ&s)}ncin5i~LpnzYT#Vd>P*xu;M31<4K4w#s1ji>X@%>&$h-pC#NLc{iZ$lie z$VMEN8!>g`@twgO927b?m3(xz@2p$9b?JNz5}~|5xE7AC5Vl zhIm3ww4baY%u$4FNOIfICeqcT#4 z@9?-Oo)2XaeTCW@#ICyWeT__kZ}fq8XmTK(V)qPOe+PTi*NzI55BfbFr7TxuQ#sbT z9FHj=pO`jhG;Orp|vuKoFpsc9v6y-O4tH^^NeOrgi&32@%K-}YKIkD1m zVWE7sr`tj04e4pW18MO+u1G#dlN*Qh^j#P0XT{Um+q8YlD1BVaa`K7kOFuq6?vouZ zd}D=v`=jKO>+6)>6;u8vrvK*0r~kxy=A-mh9gS8_kPngjNmms8MJ1)TV*R3}x7Sm& zBg0YaYBlk&>MG|u?*qzrzoM4)I!e#?%dzE$`Fvt|uOH?uUPQvMa_6vv<5GYr2t7$2I+?`C{a!c>gt}*+yJL&uP;A!SHzTZyk5KbDxH{;3Alpn+&{Y9?_d4{o2 zDy60R0vk`zHI7N20s4TBSZV1i=B4tXwI^vUK$6ejWFUUD{tEFV>f8e6ZiHBiU&3vFWDr38Fj6>hgXS$#h^d4@)kIE{2_pJ)w+&GOj zTkw4ee5Z%jHBIw&q3;j9=K7{t>FC=r{!G*ploL0uQU1A6wn9l?&V?_$r8ClQ5!#&j zA$%9zhjgL!$hVKeb8koaAs^89DT0nvvuyj&*Q9UzLw^1}*o(i?wr;)Ke6!NMc70`) z+kCGQ?+;f-Lai4sx@(cUqPnW0cGKpy?&~TA64Y<3!aKDa5vXk?y3b!z=P$3T617!T z_2!@1=HJ_ic+=*Z2D)e$vuge3b(Nx_srmvzX9(!yM5Pf zx<`y_zUnwD?Z6x7e(|_9VEkoA@$2h-1cSKEl2+ADG1F7y zYu^|@UjQbRv`BW=7 z)9;FbcPTjY*{|TtXCMY1QgG&TLBW|%nspE{jd8bvGoKsfaG!!RpK1kXKD%Pz zT?)>8_A5B^8Hj<06rA~7P;lmxrc^P;-3rcpaul5Dmnk^YuT*fRZ&Prl?@(~2Kc?VJ ze_X+tUMLg9jJp(^`J^Z~(;G4HVg+YD%M_gX1Y+QA3eJ2w6rA}4W8lXWocSDAaONWt zB2~gXVBDqP%qK;`nI7{?V>ZT%6`c7jQ*fq7A9&2hc$?e7}M-AAaATZD2n5Ch3@s>2nmE>D$JS zAs38y#K3pOz`J7L`(xk-V&Hu-@L&x5SX5l86flvB0mY^9{qckU&h5W^oe5v;vG{X+WV}V$Wd{5$Bu9*H866bb?@&CpAWqZi@S>P%7 zsoNg`nDK~KhMM05BANu)O*ps1G|-?eL*gmYbqz9jLWq;t#tX^C@xhUq_$INyI6pW?9OQXC^~ zjL!xh&91x{_#(+CMYcc8=U$0h+=W;w57x)v(=6$^9b-Niqgnou<$?3%XEFHvTGC&T z@|MZ{zs8_HE9rfbZjsy%OPt%EdWomOUuL;d%Fld>b3c&v|B}SH-@Ew{xLCeQDX?BN8XLbixR)9L-yHJiq` zy{fVXF@|z%tGXvz>>#cz>O7aW%wZ-SC5IP zygHWEVzaSoQ^Wd=GH+`(A?C7TOh%g;d_r)j%$SHH$HsD!X;H#Y)qqVVv%AssZ)CFI zszUhjv6ZBGtUS2Nrxassb6&&x&2_T=%iE0L_hYzy;kwN4$1u+KS*GW4dXD7B^o(;l z4ed6|B2vCiIO{^VkN#j|{`;-dNX!HCXS`lsXZi#3yg}}no^gH;gx;&;VXL_FZFcf;mizTk=$^2A3 zIi0E}_d8hbF3Gn}k;{0a#F@TJo;NG>jC1_6sweYP_2hJ_o_v31xdT$(vx;2C`y|fv zA$k71LeKaSiK}`tKUGgor|Nk@k<0TxmlU~-4=MC+*?5d7^o%>@byZL1r|QY+R6TPf zANJ2mc|Ke2Bjr<`=PC3)dA>lQXMB;wRXtgbswbyY_2h93>$ywvtyAPO-Y9XV?~>=u z3O(aHB(Cbo{8T+TovJ7I(^>9-wRsKBx$9#C+ zuUg^5IPXLeKamiL3r+eyTp4 zj&S>ZoX6R$ry<8psdCT$XWT7urZ1M~*$O@5vn8(T$^2A3Ii0E}&qJ`>faF`M$YtCo zai(vR=UCHb%zc^hI*F@#GCx&MPN(YGp~wx&b3A(;BbV_mh5nd4->c9w{;b4RJ(-`X zC#O^OJg&&)dD`D9av2XP^gMriMxkf?lEhU#nV+gBr&IOhc?-@*L(XHQ%6+7K%5%5e zGkvi<&sOLepDl4!PnM(V$>~%*d0vX;1|(lBVl*b-8OPd7=En4G^1NE1XS`nGs-DbG z)sxeydhS%@9+T(06uFG=Rp^h)^Zg1v;|CdE|6Jvp7KC(kpne|Vn!bww`Y7bMQ~ zJb!*kp=bP^7dEt_EVtNYMUMnLMAZ&@(<);;KKHpQk?HUw5{G6gu;lp^BLeI}Ho>k}>KOk|E zyTc~;w>I1!r!=7O=jR$Bg+JpX5?B3(SN_e7{lm{wToPyhFrFfDRnIhq4?ll#D|{Hw ziNR-)!iW7?tngvH-o}TXh49vB!|7QFZ-E&6`Fm1qFZ17T<74;#WgEVQnPWes@aN}B zBNAu+j60L8#N^M-Ha=-K+-?_t--`2#pHJmUdgjl#VdG=xUuwhev<lfw?jNWAZsy zy~3aIMjIddeAG@G{-BNjlL~+KbC<%O@n;o!mfNS$Gai&U`O~h?fDO0Xd)bED^%Tj` z{os-~`j0Ysn`Nnu8Z~mNk#Tzk^f76XOi5vgliBmi-{Ugm3@^;CmB37K@Y*XbQm-17T^s4`s;kwF`l5l!C42p13Y3bZP9%sS3021{8&Ew zpKw)o;!#Z*i*1{7QI#Jn&d{KY!S;1)7x}BErwhCA*7T>1GfbHuVI>Un&=3>Ec;Uvo zxo*5`OK~z9V&w{4VnYz`{B|Rrs~}lyp!KX_)C%*ut0)(70&80Fez1;r=&^R6w?OFT z8f=akc&AqA-ZHGo){5AzBD}j#?}LZ1M%+xS7v6$&KJY~&HId%y#r|%rw|kEHFTfAkm5Q}!y+lvz8xtMT z1VH;D(ua|j=xNQt#h|ai8lW?!y#kMG9C(kr5HVJV99T?fyLdGo?_|f}=fscK3%--$ zD-lO^y5ZUtrxV2Rc|F0a<2tR+bqeng7vb3D!n?>C#bjb_=O(Ooi#2e`XLN4uAq`tV z#;yp+6^JbZl$z<*e@EA7qj*T{BJq%uLFe0uckk(a=lNKh>OQP9?X=<_TQ=go>zhp; zJ6moZjr~OWRQ$~CO4C7;pJP1i(P@1&diUOX&;3f%q5U9I9}N;d@+b)>rr* z(sBcclYm_FJ^wD`HI8ZRL~jV|nGx<;jQAAt4~{8+Xm9!pbXd#Iip#(|AQgR`)>ICU#aXyc#P#sYn+LRMt}KJqSODF z=)4Pl5L0k|80S9~or~#wI?hkw{DkO4S$$l52Imf~)Bh*Y`FWiGJMs84w<}G*>~^GB z@f~Jttr-g=dMMw(iXKNI{x`6k@|KRLU{7nob$!7+-|0h~jD~XW$69)XwH8sVkd;?K zhZR53y3Hz2h_UTH^<8JTa2s1r>ssfj(^&ucXJSierWIE*I1}YorU6fmWu zRL0aGP6aV8Xk#{DopY}S{fjzUf1shvabDoIntBTF`XgTTkKaZf7a3TG6m(^XU-B5x zu_N#UrVT+ZY=f<68{k8GJp&$fyVCTWZ9PKKL-}p3NvQQqg`X&v=xeC&X%4KLJ5vnj zPYRrvhIKO2@SBb`HG_y<7HBKbmJWO1zXHU|R8zYLK5pplg#F>qiQ&@~@Y6!Aa}eb@ zfS3gz{JjZlK3;~NgP${o9d6L0o_P#oXmE9Ww?NwyYM+oxWj0hI%r!YfU&bEwC-gMR zC-@H{mLs$Y^=-S>`F4xwf4{=j9l$z)6gTC@^)c~G@XeK`mrcLh{c@%0e?;t;>uq|! zvY_}R+5JB9gkr1Gv34NEQiWSma>I9u;oDF~LJJ+86)3AAir2w0oVzoC<5QCzu+h=U zSO_uOq3x)nBLy4U;Fr+D)-l#2C>52kWjpF{3HA?T4}Vj=&qRAo$Eo;D#xDby?^a_t z4nE$Cva#bD(Hqj8J({Y`S&HOa^POo)C*YZtB2V4C2Q+yiMUWebQT^`h@ zdkxA@q?v&{Oh-AHk1~>u@-d9OBR(iAC#E6arlP$39nxkY-AwQ{@H+*67D4CcYxGCgO0A?m4oza6w=>*-Py!+e&yj*$Zs&PRrj#;7{eR24#`T9^LDxEN<2vxd!y{ ze?xQ&=+vB~=lYQ#k&Oft0 z&Zs?x{tLwd8Gj6$_n|#`SIZ$8)K&$o?;)6aAyz6zOyH|#J&FVU>6s4HYbx_II8LdT z>d2D`Vj{KUA^5lOmcVicVw}!^CT^FJOS0_wF^Kx%!P=%oi!z1s>!^Ji%72n;>WtsbzIv? zW#ViL`ghPJQ`W02=u#%@)epeis#lA*LQX`l!ugg?#E0V6JVL$Og$1K|)dU*4$7jLsnvC5WLu-uE z)=N8EZCI^^f$jgh8{NmD9r{65|BYf=>E1#8LTZyy7nk3ESaecbjrh18x+jIdX113* z1GuM9EMN%r4ehd7|NY~vJh6D1{*CmluD(B5_vNW{zk?3b@tcCYBsqbt_YF~hpz905 zEF)d>k#}@$h1U5ww8J4K)qd-kBIvVBAoo<_WK{`{-W*A?!Tj79fGa&of8P|p!H+P2N>PbJv5cSe?nu3sCrJB zf#q8_ZLX(8gzRVOwL?n?#Us|NTWztwt}@X= zue0J7H@PdZvPA`yleihocwOafh;nR<5J$j<_3PbrwBW2tU9+xY{aUPbQA0tF=BgN` zJe91ns=lEbtW|tt)f&4Jh?q1*njw;j6hH}sE2<+_SfP_+U1+Wvf~ZaRrfPrX`un8> zT7aYmE9!2dw09J_4fl2RmT9BbE#zFV-nk_*YsnbNBXS*YV}ATa`7WzWw_b=M=u>(W>C_}@6w{?hkOOzasiLc8=?kN$giQOD~u^OpR#54ZmN z@xa|{OeRtO=)jgm!T-Mssll{n{j{nbt(q!p-p5=2=f~CBXM#t0>mqLS#C8;I-Bh}) zM`pc7iE+QhX8j#SdG1*g)ygHNuUBwOLrt_PIMbu^HD+V_0R?Az+~3A*Oz*-C!@emeik1IIqKcL_&_p)`sY6`~rJu|+*xLPC_ zcPT}M<)$cjgkG6&jnFGN(~Bh5#M(3Nih-xZz|&&j?ihGZ417)uJTC@r#K4PV;LBp* zD`Vik7rSbJSPS|CkCDu12V&GLV@cU!n{}2OzJ_i1~ z82C^OT*rMen!Twp@M~k>6t@_S&o^V>jWO`YW8goIf&cFqxINAhJ$ZzR@p6B37G=4@ zs9}KRS5Z?>aq$@==Lb!GJ%*Eg((FpGrN6D_N(--vPkDn6RSUAh{z zOvMhwwJm%}v86~px{9e9Ai1ji|JI6@pAcp#GdjwGmmeR1iFDCw6Yh7kjaa)#d|hOG zh4!8{ejkW&-qZUu_O5q>r04gI+T?ko!iVvI#OXanez%9@@pYB|PD#)Fd7Rdz@Mrvp z#OXd=NsQ(8TZxn0#SCK4=~TIABt6UJ@!n;Lvws*Lk~qobcbrTyxS!BQa#vc1=3h>y z%5_V6mdoSAY>Bg6#v2uS9yd~aKW$9U_zsDap5+#a`4>l{o21@5%7?y26LY!y$za<8IvdXd^y%Scm4{xi;Kh9yp$p z=Cm*WOm zPo77pSNJgAXya3EklD8gy%#oLpTp5+QT-tLk(%Vm7O zLhq9F9M{Br(jRUFVtSaqd;eH*p&5?3B zE`|B9+@PeN0{!t{Fn&bQhxxps;H=Mp#8rL7ILj&Qhm}$vm&8?lk`yvV4D0~>tw(+sIpF0#jb0nWOg%9J8Dmd%eVdHPFch4*Q^CbUQ6h4d}S8(QU zkE^lszohUtB>y3W593Z5*TH@(mh`C-=lo)v<760TeK<}=&6iSz|1!xR?>Cwo^I^P8 z;>5?^e|}cs6Oep*6+USa?^AH54=Q}xB>fSEp7B>CPWpV+QrY}Fpz!ICd_oEz*5{0Z zGyNrn&o0U59fh9xaC{E?jp>Cb#)lMoeoio=&@-;a9r5!6rxJ(5IL9NY?M9BIXM5*JduJ;+KW~_$ z@Mro(5-0xl{`^vfp85C`{`}mcTA^p09>y(I0Hp)=n_a?-SH3 zc#3ptqYbZ-$FvK`W9Gx}4Yb*C`~9@ThJQsK({7gyUv0y?Y`9#a%tZFvaGyM;-2r(_ z`rK*5`)qib4G-GzuiNlrHvDTg{J0HYV#E19MsgR~@Q{uEE*pNqhA*|@mu>i$Z1|83 zUt`0$aFg5;8}5?%%=yCeq$xJM)<&OZ!~HhgZNurXUY_UJ@KrYYIX0XQ`F)5y1s{;} zJAc`_iyN!z{GSlii1*z#i5u|{=&!R1qZ>AnDfYGor^28=*n+Y?TW6zjaeSVE1FE+M z19BhAv<_T&fuAOK>7s3O%Jj<= z#zadS?~CLyXP^~YFs3(h+_E@lALi`20-K=}a}c&pfVmR|#W_K)hjSn)&W^2VJN4nom(I z2*n7vM1NQBXYLjY#H7q@#|C^ernK5+CO3ZN7KeRk+LD$BYnvL%{Qydvt|GcU( ze$FF^)9l)~C?!6FV(wbeoyZ&~HsC%`xhj)gf;tJSI& z2A+j@BKTFtXPQor{Gs=^c`eGxh|l!>ypZ0@rnp1OkDd6L{wAMW=ZLGqyW8~Mwmr`C zO4C0^;;7!nyXL8Qhu(@KbM#oTRHA1H>zxhbef{aX@czD2^bW>TT#iWHMQEgLWO-H#J?PLOuhK?$Nt+7 zJctQ8txxR{`z`$iVlJO^Onn(^R<)<;Q`;#%Pbyr+`XfxaCmzf-oHnD?e-and4nG){wV;T_90fR71s-J9d(G;5bQ_m;l-_D zgU_-1Ur6TFe*JZK5ywJcy-@Nkr>Fb~A{K@6*msZKKQ>>iwNOE0ZdaOq6v>~tDARi= ze-Oun_>rDSOafwp-$qR28I%cZOm#!dG5S~JUEBAwZqv)<3hwrEEcY$ zgCo~$XhWV?h=e3h3gQ`aus)keOj_7HX$96xyLo0XGjHLdr6((V({G-69LKZoI9Zg6 zbqsM{L|7;@FY9t<9_e>RkME(_RFaXKD)K(WaoLDw)`u*d51Hqg8*oo6Jp(^3ZjN8E zuQL5+;zzt5%ocfBPLV%|^c1tb2s-VCe!0+Z5p<uayM?#~c)stKM!x?~MSbGHXC@*Ba*OYO8To@x>wSZhwa(CoB0qFmbpDXy zQV>Ukc&7gH`oJXL39LKjbYz6?v|^P9!B-QR;5WHv5IjlN(}@P+YQO_^$%nW;t_N3| zeq-k8m8SkkIij){t&WFa6U9vq!;cgCV;2|>_nyJ= zX^KNdTZ9<6Nc-bPEGX9hg6`9yJK9pc;5GLM)-Ho?Xz$ujLO$8@YwV-Oxj-mw8ooPl z2)1a5ne?P;?LgW^J;fngae?H|E8t-gp2cIs=|K3AlqVL{?{S_EnkE0=^a3S@}%)6ZkOPD-ffO zSk)8$ZKAsi{Co~E9J(JdR*2mXQdxMo2=xK|1AGHQh}QHFVp~JVSLEGuA;`e@8aBZ9 zrJ)y!@&isQo{;<=Ux@pBFkU=Ix>CLIG;1sNLMK1kNuouz>@*PbgmymP zjk-=T%U@CGui6KnuI==n1|Tmi(RnO zeYq?zvkz+&U+@Lw^4MGU8lZV-0esfhad#xL>`uU-U59hlRhHecNAjTc{ z^YVv`ZhRx-2WFA~L2VpPP z((9o-rGA4Ob#M@I%)SIM*N-|yILRd5cf)@Ur*?N)<^O5#ZQ!e_&V1o@P9jENoTw2a za+}SE8Yy5}!6vO<4+t77v=L}0ZFM;L!XQXO5;O`O57zgc%;4M`+T1qRdVe0007>}l z9hf0^oZC>n)AZ`hq-fjRJN|ADNZ-jFoSC$uhAQR#KYKs_la8e4;tMLPXK?4jw&>~D3TkHYX+$By?g)-t1? zB>Im)pOWT^3CI`7A^9R{k0eZqoj1=Q-uIB+KZRfM6P#PA=`6%O(wotpec`I_pV?%_ zwH;`mdd>R8QR%-)+kt?xZ$)n^*BrbiC$SvgAgxonBd1S%Wx}>&eqEf>`}!LHyj8!w zMLn92Fs;(J@^oFXqz!uv_|G?@4n(94{=*5|IuuKwZBKpWF2C-fJg=~hmlU+2jLP-j)RB`;C!zb>HY@3NZjzX5{SOYHUO(9>;NyF*03l>54 zQ!gwmIeq8C67++vXnVZi%C;xX6(w1DYf-*de)0*ax51g7(#J~L`=H?cC+By{yF=m? z8Kj)z%*S5Fn~?H^a)!9uP zN*nz(tkcJS6;DpS4E1(l&kWSv_|MQzy%H@+PDTCy{VX{nb;F;ad`cWAkUw$`r<8A5 zN1A#)vMmd3nJv%XK%TS=$g6j*G*=Y7m$l)@KL=&^%%)fm`wj0@-HEcZxCiAcE@xy1KV=no`H zVkJrOZ-?IDi=2|5F|8Zu@62WzSUv}#>!V{m zIu=4-=1_bmu;*hxJ0@+lqsQl><}|hy%ZuN-tmMHfmzAu(d|64&#APMb6PA@!MVFOS z{$goK#m|?PpuS)~p=BkhPr;uD{}%YO;eQnV4e)2ep8+55o%l!KW6z2BRQQwNPl2E1 zuRGW8WAt$mZ_+B4kgH8(JF{sC+D_kfTHTG00EKA$3yt=$h+3`JRoSy_4s1rtBiT zUt;JJ^y4g9Tz@|f9t9?Pav1L+A}gtBq^6Pf7IYkK;^Td}AHOH{#L7=*I0^nbrL+nm0`WS^8W z^xWm<^jBu!`x-iuGpf&Q`k~)eT#vrYbm(PCZOviqrL|S|k6;>DMeom&NlW?Q9Z?w@6y)4`IYRRbX)(m$hNm< zMz$?MT@3U!VZ6SsijL_E{Xf}{C#|n4>MJNGs9S!WmpzUAcMWuR5@jj{AA4b(j0N@0 zZ+Atuq~<5CeY=Y2LG&lOIJ4!nAyKz>tXn>N3(WU;T!1&v=tTE=3B=huiy6+JxaSF{ZL##fqoeCTHaA- zHU;knY0qR2E9s{lhs`PIFJFW2F4NW)y?T;_FBq6#{8P}#x4gWQx!;u3N2#C3to#&x z*!`yj&e=}jjP!UU2R6H)+{B{>GM)SV#Bc6|@prTH%h+3pH-T@+f?Ts<4f>B26Jvc> z=b8H+MLrxt`nSkFD)?3wd8PlhJ!*DWLI)_r->MQ_;C&!_K*TlQByPD*KsS^JGWTIU z)9Kehhebj6$$l6H-ygzL=%o%!hrAUNrTv=%o7~mCTW|(?@Lay{r)>T4Lko&ulJH35 zSidDGV+qvB#FSXc7U;qAFf=kb@5%x%w!t9J3JTTFjcthFK8ASWmzzG=&#pnaUoPLe zark1}c>?%<);Q$7CGkLZKYtT%on5d1bk`z&u@gvN)x$d>0ehf<{nbn^K%B<6JuO5Z z)N}SslYGXv#Ua=huGl`&TmhN4ejQ=OMztjx*%}28ssAg{_vlADkj^V*Pq-A`k^R_b zN5V-MKdu~e#i6K~B>DC>_S89YrP=lg#CZqyg82%*;Uvr!N!Qd^UjpB;5j+##Ga&zM z;P3N%_w4*;O)Jug^VtKM`FJkh%ZR_VQt_YTUU*3OcW`|%^$&b5$nm}Orf-uB+Aom_ zfz$6pUSDNU*DRaydE{aEar^X{%Wt@R&gGY1UIeBE=2Ms((`>$hBKxX&(_9fLihL$g zFsNYlcb*sIGiuDiHm?g{2ik0ThCC1D%^X6d#4D9pa-LvAeCAZ#M zB%6iQG(OhYRP(U8b8%78$IOZ{Q}b|r(_Z zizIUQX;|G@TeGhI!eabw-k(9nD)F6|9`~l&B8oQ$7laMz4?8&N`yHJ0-O)4)b=VoQ5&v$UrmpeG+i90yy4F-p7M;zl~;~a6$OXoxK&kv!GIXLM{9i03*e~hMjSH;v*sWXb8@6c$a^Uj;|A^Jlv0%IOFYi29k*ngy4fA z_>hA$-jsur|7b2H34G$_ip+RU2%hWUj5puG$-m#hNzZX@#!mXdD~XJkb0`E)h2X;u z&Ui;0oblqDZk!`N9D~-TD<;ijIAYP~PA|&OBJ73tuYeVq*5WG1APlVtt zA$V&D-X4M{L-1|~r+yANxHKcOp9b;URIAj(h(k|&Gz3piz)?PoHwPU>+g*(tiI=!e zmrUnNzC$0xdx_1$zYcKD-WYp46PHdE^&j3Gf}{VEk$!myE_3lS@n0K)KOTbrVF=z9 zf+s`p{UP{Ygy4S_f*%dRe;k6J48h+C!Lv{xGU+XB|BZqWdhA=7kF?=ra*mEJ5YJXMYEw|`v z6=%Hc@8X-+cZ`>Ki{dgbI&SgdbLxTJus0n3>>rOf{E5>SK3}x>&r#)4PWGFN6sMfT z=PNGhz0cyq=ads;xxRyqz;{BAG2FD{`ILfxDUITcC4DsrUTNX4S-8D^>V)EypZ)k@ z#VJ4W`I>p8XaBy~!P&oGsW|zNKCZZ{2YY?4Eq{~4hx`*Ee7K(1)dSb<3U2G+Ws8qp z4w-H@-j6#TPRyxH2& zxj5G+3jR5ZK4#HNUxu#K!tL_b9D)yo;DaG}u6|d#^29CNj+gqA{KbEBQ2X)B2j=@t zngHsL_(v7zx-f~?UI%(m<7K|^dv(x~$1eXv4n4nnUsIghPyMrOe?^{FFmaJIkm;Pu!?oMdXQF@=UgHTb?`%xAUc4aq59~oM#oM9*9#P zqECJG`LQz{ZoW`n=1ZB%N`A!I7a4WrN!y9^qlfYk&r#gXm)jI)x@cF5IedthT6}DM z9(MT94)wglhj_omXQh=dFI%{sFRxm-oi9U*Q-0dHQi@X##HkNAUzk4X`5~2w@)CbW zaq<(~u3z`7-Awds%NbXka`x+et@@Oc_-2bwm8Bm(cjFy$_|qPD!r@Q+4U4~BPv@(h zjdId{7gL;a5?}1l)2_GFp(nmlanVnim97I8Zs+@93%B+8yyBFTcEf(fDJOBJTl8kv zN2Wt?{5Q0ZQ(oeycK81>te^T#@9D3q0hrV0yOC5URWe(1GD zAepAt!TFt4@8BO*dZy2N8)64>ER6&ghi3&o+i5XJh^iT$Y;RE9uA34vt^_mW#&UMq z6?h*0gzv}Y#&S<;_`kC-xzy}V`->ElvSz#ZwZWa;R~3ZvlQwJ}ed{L!ihd=O^o!}2 z;hFwYU?P(n{{f9(iyJ5cNBMbQjtk-GhVR$#jF0s0eb_pBhF9|^gQxXIU^OmWndI4b zL*jR7<=)ME2|uXexv!g;kEH)a`KdoKZ@Os*Wa2aB7dB5PLg&^%x-39Nl1TL2yo7-kD9oIkyqarY*g&W+KwI ze#{-k{7?VvW4XsT)qL>{<{ol>EzhkLK3gzn6?3y^3ysX76>PhO{mhxKC~||gH5>e7 zo~_L7mU~G<9(+j$x@4{B*0gp&mM!>$E`&yg%=~aK07E)Y%ky~D_~C83IM!1!4xwQj z<7hA+_`EcjLrxlr)0Ic&MaQoU<{eWfp#K+j8P8{fY>2ci?$5!8f4Sc4-01j~C?h&w z6LjO|VahnN?7ZUDG}rs*i;y2J#CKl~g< z={>rpsa7UyR$=S*2jy5X1Zt{jZklHr>ozu2)r7_o8q%u-yQYe@HD2BNb&q)u*R0>@ z)vRx7SX0wD&wz77MOD)(1y-Fel8fPGC3l{2Cs{(7>*b`mb+%T{pZym<-%?Om-b!e$q-NJbMiUhL_wV6&SXctKZJfD1Ro5+ zheGgF2tFKwkA&c(AvlyV&M{p%4ju%Lm|O>!&rH!L@x{uGe99gA0JnSCK>&GujIV^%}E3>L< zU0tJ2ju}txJRrRsA`x(ra~&{iyaB8pei%m);%JFAb-^Q&h!l_YPxyVrdZ@!+>HX4)<6Y=G`9AXWTD&s7mhZml7QW8Hr&;)B3+EgM zfu6GPsd_D5w}oG8;m=#R^zG>=uY6D0Mfg=EV0zhJq!j1(FYyx&J==vh9GvaQDF~&T{yt z^uExcr~Gpqob*MCyYd$+PWgxRKIZVDJTk9M9Q8)}Qisoo-Y<3NDQB63lYXV*uAKKP zPB};QzS`kKIcpu9^y?fxEWGs&J>_h2aMCv`?#h{P`13u{;_w;L`==e8^0zuT<&n;? zt$*@wSKO87fa0vDY9<*sPfY6XoPe!iJhYQ2Yz7=YAJYee79PC2f(J|=C8u|BbZ$a>>GZiPK%+#Xz5-_7;LTsx1nw91<1 zPr!DPoDi(Jj%S-gSPzV_!CKzw%23@dkRR{eyr<{tGyus zI}3RyU{8r*E&CSOAR_RQ&{m`s>-5EjV@{(#i*xT~zas2`l5MVt-+(;}Q!hFwd-%f1?t4)Nta zp|%jF>ueNjdGYLR@RI94d;9Esi9h!CSwH>X3c~p^OZfAxvt|ATc{aA*U)K6_efUAh ziucN)YakbdY;C_Ju{=H_)>mMzS&MU?un(WVC*%123$eF~@(oo zoj?b<{k7`-*ds^y#jrQmVeonfYpAgo#WiA&s|xndX_Gzt7GiC)?B|ri9(fWL2D{mI zIPQCO?Jv&GM|+UxL7#D?Thb`9&4paach1fiUC>6!eQH<-z&E3#9hLcrz4&^AeH5d< zEGfu2Kd2wpzB4cQ`yfbtIF3IsZVk$1V0V*!4ygBAeu46gGB4}DvmhtJ%;r85vM&LJ zIPn^>!?49v-jM0Y zZaaA4L95Wm(zfYu_h3K9*<#;~0T&x~Kia5;*sJ3??E7>l_&$~V%G9TF%zbyp{Jkmt zc!bZHO-HosW8J;%XCixJSbCxmXEuF1qa9iHk)=H1YdwWCt(T{hYEYXqpGFdZ?5-9}} z8RGvs?U>!jP}>E4VXP+%!4Erla2rW~=a3iq)N6ml#hXKLw58)5`L{SY`SaX61|ofK zKtR}#{y+$RI0Wx^aPl8;@ZdILiX0O&@nQ!jpO}NwUQB$krht4}9D3rdA$WTTo(#de z9h`C=aB%64`Rh&<&voQP|7M&Eo9<}s;Sr)3Kh+L_m@dD=BjTb65k-Dj8;8heV!RHc$-*4eJSoq5!cuH}~$@kC+#VIH8$%s#!#5+H@^nY)&aN6SJ0qv(k zPn(Jl?V&DCyQhoOo=ExmzHC={C_nLMEjee4AnKS85j-y>2%qT|JZMvchvX-? zZ5J3+{*;IB;~~W<5AhL)p6_Ye`^bmyX-^{~AL6qV7rWanmi)F|&(4?omEP57bqKyL z1b^DXr(5yz9Ae>b*NbfJR|syG=gEpQUDQLqMSr8kpY|4)5B2Ki7x@W}|Aze=>W?_1 z=(6ISNf7?aiZfr>A9_`B%1?aA;xo(QgFRP$=jxw@%*Dx%<&9;rP%kJC`&G9oPI-tg zcIeqZD|P6Jw>$LgpCui7;@ygi{6$uJ$xq~;Vd0cZaJ$}-AJaQg!@TUs!~Wl(BMwVmzCtmH)v!7b)&=X&$xajRBDvIedsSebgJ(j} z0nJy+MSAHUieo&2E2)nkaOCXQ`-6_0#GhAO%FjoA4#5wfyZrwp#xs67Crkx$d+T{(gBc%9ECZzq4P`Q@_`EXg}_Vyy5Z$P`$ddH3@Tp<7n2|S$~Xkx zpK=z=(L`-k@eKS;$d25%1b<1)`^1>e>`M4Qt`7FlzZ84u4`aX9Z|i+2?yInuZb9VI zwFYxONoRnQW)ZHT>3hYNN1OStYs{nnea1Wt_Hmtpa|Fbu9(Tq(Qtvb75!W$~=@&8P zkS{2}xQ9Gvur9h~vjYX6A*iPwkV%??ifco&Xy zH)?SYAl8{c@p(TvwL-^H$Ty4)ia?Na0?xczEe zYVnu0lCH^-lkeHhic?PF35(AxGJ$W857_$bw&-n~{3vJ8uEQ?n$iw&epd%0QVTV5b zepka0_0RV^#~GL|;?tB~?0>eMc%|Z!FSh*mhv3yA_)``>-O?M!6NJBAA8`($?*zB& zQMM|I@-w};iVM9xeo|=RHXrKGEkEQZIR2;mW7HGzb()5Djy%Lm z6?exa5|*6yxGwi=WqP}n?rDcVamGVAIWEEHZn|jyCV%z=`yKwoUsRm*>>my|^u%9w z=-F=^bm)n{>d>=4Ipok2e@$`8Uwd34=kjR$0!HQqkCv7GP_M?Y*_;$s?bKt1-$a74n!B^dsL zj!VQeezz~r^fUf4T(~mHGloB3{1R4Qxe_ex$~69z7WRuBmnfPXu!=jvIk%zX67?4` zE-`cw;}W@3(n&Ni|B~w?g7ZTEWycC$OwW0iIog$&LyesB?ck>{E)vJw?IRNs%NLqt z14L`{aNp`ZUX1uZGqiB5}bqADl$ay zwM>n8NfJPbGGiT&R{+bzWYKLQ*7+gKwY>mJ_5Joz}{O1u-Sd-#iswEz2AgddLO za2x=T?%naPcm2bcenai=3;vJn@4ve7oJo)q_3Fd*5C4kRKNMcycoSAS{8E-d$fh#C zvf%(ZKPAI{E@MU_e~TM7cNzU(=XgNEd2t8bf{1Ae!COP{b_Wk`BPQwK5RNwn> zoFLG~#jcCE5*a4r3M~IgRVvOy&Zy_Gae});=yC9DMn3n4;PoN+|7GI}Ve2L2rD5+s z_^9HHm+!;(6lc7| zxv!3-_g1A5Z$9qD32vwBwh)~A;kfi%Z$WwZzLk0;j`9$1viRHcWSMWYg9V{6|6b`` zI~DaRxNXN{JmjNhh4lIi%E|ZqVMk8lgNl=$@Bd*3Prv`wsKN9S=Qxho-^BJqm#g%m zPuqTbn}yr?vN!~{$7O8#br$^$MhgFV3m01!-3be~ZCh_B&U8@^qZWN3nZVE0evG8c z=0m-@`AB|(}MTRobnStY{_Z24=-A{t(AVpIu|rSX9rtNgx|l;xe6iw`hyCADho1OS#U+33@!v9s z5Btk29X`aFKI-k0syC*C_Ki}c@8dd4HTW}W|})sZu)_ic`x#Gh4M z%Fi6-Egqk{{D0fWHM%vuOsZ>_4;$C0*KrNZ?e(2_l1~f|Q?~EI#x;gD-oI2jdcs2- z{jhP39p`>RjSJ`B;3sTcgW=CNu5d4SNSd^nCR=k@<9EN=n0^wL;eu%)CO*TTFMbKD zuS7ez8PNC(RgW^RF#~^$pZXWvm0cL|oVzbO!a290;~EDpVq7D3?S=3AbXfU{j4}Da z@8#xq#@WrU!d@aWzVSBpN|8N-Mzf;5P0M3_r`KlpF#=ey2KyEz7~lD`#imc zv5#299D>mP-b9ccaxUtD-{_5}yVN9gs@#&+}p^HlB zP~=$DTrpY+o%|5v9TNA?;dd&{4{#l=1wG{aJ6y*cTIXMO&n1C-x9gRsY- zAM%)Eq^D7s{`c7o!&3;|>w(`e@0$Admmoa|i0)0`bB}b09I)}7^7Q#akK^6y@wcIO{LT#}&<7jON1)Fe;Uizqac=vu{v3+TKrLYV zy-g~YpWy#m=$OV#enVYdlbJiWcFp>xxgq-@ZCsz;>BtN0C0^5G^)+V2Ir}7u#r6{a2OBBLeoWZZi5n|<{xgsXW z{Ng&Osmb$q1;N1qk9h&^RX418q-LIxtT%W0n^!e8Jm&KaY+1fi{P7-NJ2mU88@)B_ zaaI7f-k#%CtZRf22++8(GQIstqY+Y{<=izjH8mgm_$L;G^Y+%P_Nq2EG+_5FoEgxt zas7HZi=b}3ch8a~OC;#UM(&s5$~AS3A0W;}_k4E62aB(wq2}S5CLA&lszga>^_m8p z-{93&G*mx|5MD)9RZSxlihaa3*4NiHG@*P-iM|M3`^l|Yz2^NR&Zyxfcir=WV#Cg8 zA0W1c%kR9{*xaE%>?vRMa5c9-!*U?MHa7ZA&?>GD(%YaW1~-}7JQw@rjT6#ZH$H}} zbZ>bK58gNDzIB3kPQ;WsFFoSrA$U9luMNTL9h~wsJ2=P3h%eP7P#(@#X3B_jzOsuC zIs8eV3c)$Q+2u3h&{O_V2frEtWqgk~zoQs$j`PtH;0_2L0nV=;N)tYwCUytLHAZdB zk`Vk0A^5`~_+ugXQz7`j3&A@>@IxW^g^nXx8(C@9b$7&a>aPEI4D+kEk5yHyTUS?= z-cak@?X1#UYuOQvYYKcm57#^_TWG0EsxS5?&KVZS9TzFr@^nz?X-6YIO{*~NT&#y& z*Fk#X_V~mM~U#K|K#dc?o;!GFum_twbIetKT;`GH{X4hKgkKp)jO*`fT z>D8=Zs8`}@b~2QU_)O)*^&w2xfXdzCq>K2|PPzv5zSW^8-saE`>3zFHPyAVjKBf0b zhn{$sLqDwd-3~qRJ&H^IdTDK9-!O;Ih~6J`_zf{I8Y1(7}JCIP=Hl6LaWq*K)x8A^lRt z*`5=pJhcv==as(M!T+1$2?r;iRtKLQbX?N&#Tf4`ig!EoX|sh^Zo{0NE9&w>bG7QTpU)$62BAyhG+aT1}Rb4W7fD$?;FhVP{G7e9QunC)8ggR z$j^Wn_A@w!W-)W#`h$y_@$EK3~I^YQiQ1llKPYXa0qapTxBzazvO}&W&Hjv0dE>d8vE=!PNy` z`qvFF4~9|JFZA3DPZFXo=KI0AkgEOdN0*#<{Hw2LCx5o6?XFkkJj_{}v%dNIy|Mr7 zpC_7|_06s)uxI=vgEI!ATPt&3d}h(p+272%Byn^R=#YliD%iGHn5?afYO4;fGUntF z>`foXnur1uIawjsI9H&+Tyhd^*-AOnpx>B-(=lheVg}AyfKB`foD2E{u6d@W>}}Nn}6e?t+c&wzcjI2#(ubm{kx5q$e7D%iF3;@ zF5NcWT)j0uC3brHd-uE&yTlxdPsRAdEOVd8YrO9~6OE*f7MSQs)B5By1t$OGO%r5n zB>$y5kj8;akQUH?LdHHY#`10i>sSyrHKYHTRF-+^T9g0BCd-@Hc7vIOIfMr*KpUHR zWNH(p<$OD8PChv?I3E`4W_vx1jX);6xn>mRQd! z$nr<{1X%=GRzb%YGl5*e_(ojhK>ECQ&(1d{apgZ^Tq8d#YpcnNo&K(lgH&Q&j*Lr? z_ATI5knaR!X@)+Ym|}1a#I^nz3o`a}=gb$ROWxKDJb|&7RMs?qT;>&oYeu+ZSyOrw zch28hK67YlepcSS-;U;=Jp9%6XO3s(`*czkj)PwX&P_ErvA!y-TM+DZ@DaVtKpffm z$Oq9M&Ptem5?K~e7 zXFtHjIe*N><4$8vKD8luy@ONEW(QB(^AzX%hx}U|dgAR4PX0*;r@f7Mt|p9p4mk8P z@tic&f%rusc$oc68mvrw@czh%uMNS!3|!`OC*h0Zck-X$Ium~>ADQru5d1>zXRua1 zT=$6X;w}|pm0#OdJz7(t`_}uQRK>JujC;-7o2r0d-KuJPi}^8T>44|j)O@0U~0e*B-=h@*QH^-Ck6B@@GAIRdL#Vhz~jRtY0S_ zdg8+lJ?q_wLr?sa;v&EF*XT9}1>c`{oKFybi{g~OUd#E@ic@~#0}egwJNI89J@Hqq zcx}7eh{K2N!>AK4aig77Ntey1(88@Hzoyv2ZM$CF!t*RWu)etY&U}-6*PwnV<_~es z$7DVbr%cob@#z{j(?|TXic{Z$rw?<&vu4d7=2xrUZ+7r@y-%pmd?7vO^AaaLpG&;j z1@|T5bHQ!9WYUTEfZlgG@e-%+^1smU_B#1Gzrb@Qf0g?(AJ+70_NBj*cpvtip06KN z-5OqQq2hFS)>Zk=7To;R$>H_00)A-?ya}00WMuotYU+(StOD3F$J*W@mdsf;7%@#xbhkobk2linyRNsw1 z>^rw#3?pz~2)y*K8(tm^&AssN+_KqeRwm|Oa-QJWx@?)p_a?p_gYQU#@5sn(qh~V4 z^=i}FS=7gY9A@bEn?=MToBX1)h(25bV;aW?1->_LI=_zlx> zzc6>p)NS~-lXQqK7eGc-q(e!}C-n2;DmnWz$YX=}{~2-qmtgIEZwh5Y!puOJ2%bqg zxt9fH{-+H3J+sN)k0UZ$>{^{@hLQ5?hfG8T@?4oJc(rP|0D( z8LNL%H>4h5o-XzXkuoy{g2%YPj8G1A;LN}2@Dt;Bcwwm9mKeB0w3$dyti z-o$)gDIYQy^~|Qj>GK~YKAs^eHlVXlT-sxx?=3jb5%T$EGNyA~8TWYG%YeVH$tc@~1Q;n_;2B>^2G z(!3ic$NH|%iJgA&y-Tk+o^?s@Q=2ba@g(A|irujF817|WYz#DaOTK}oP<+rBbLrO0 zBH3FDBIcy`vAciXW7(HHwJ#-5pO_yh$P}AAR?p>3(nW|@+Lt);ZYt!x4!k#C6FZG@ zPpMa@Wq$It_)C4loNfXp`PTn<(K^kc%!y|Dm{%$APh1L~;QK|` zg#NeOU+$j=ds1Yk?dTi8-g&%WMJnqWza0{rThhs=ml5{K%NO2s0`{gYkS~>WUGH== z^(B!9ZS8_5!Q&X7r9L5L{93r?k<)=q zNz&ejIP#$r$xj3M`;nK}cjjo*TW1f79J8{fYz^edhaA`VawJaSTw2H>{C|kHPwaM5 zro^`QPBgMr+NC_`UE-8B=$k?Oxc%Tx?7;}#3OnF(spHZvcH1J1?4JaBg$7_cZb`in;es6q*fN z(AF7Ti>*-NWS-nnZGKv4)zbv@EpZx@DO)dc9=7C(^aW<1&m--P(8W;3O*D9ap?y7= zx774Vn~G=STslsqQQnu#XTM)DN#2v+=|+IOGn@L-vI(7k4zRYv??{?pmzTEqUC0Ca zwqvN9=+pO3gWMi;;9)(&BcG4;`~ZD{By6%MS0#5mZ8iX3c?a|$KGK$cSB!73zZ;xK z3q3wL$y|}0|LMo4LudDAKRZ?02imiGaxqMV`v{zSyOvgWCd+OBkFh(?}FIcl4A5( z^2TVU`011NCMoP^)&J0A#RaF3k7%zezB7NTtP{DzTz#@b+J=YX#RW*)5v1)&*wK@S zx1fK<$(Kg)>^rw@eJ(m}?ZbH15sd`xL3G-^71@d9QS=RCFL*w-WfIOF$63^(zZlx; zc+`}5(9@qGy<3om0_bFZcH2~W-*li4CAydMfTh1BeYfAo`{r%r!+!-Pw4%F(D2o+S zOmS*TtT+yT7|+;$k@xC5n!nOEXusD_v*i0*(OA!;NaHB`y9`t)0 z`5wIg^}KXB2m7H%eH}$aM>yj;@tx^gXCRFQ(9;W$#ePS#kCS+)($`%Q&u8QW>0Fq- zW$I5MEAsvdX%BA!k1gOu{UxJUy(DKd3qR2#$6SOb^pYoGL?_p3zv-x-?%SbPz%L4a)VOYC4~YUVRehy{wGOx$`KyM{q6mM0jRve?iu*JO?_w%g3ebw4Pfo z_52jVB*5e6kjHD~Wbg~H7e*Hk%k@QQuaR>4)mW?qF@P`%fC6sye&-5Je z_s$;tV%AkJ{YRw#TD;5T%?FPLHS;J`t1Bci3{;a+!a!0Cd5jlUdUf$q@XzH3n3Ob8NbcsCOBZDbN@ka>l-lf zy4j=`AIP`~CUdX9u?cG=ntaP@&H6{on)Q!VtXos<;d)I&-Fp9^=}JszTx+gxy#4yd ziE=7+nuoo1!5@F&Y0?cf8#ZG4c(s9LcXeIE!!_0Zngi2_0hdNOd3s%qKgC~WNR9L*2vY%BzbUuzz<`cWaR`_P|R&y zyQY3_b+XVt7bm$3t06Vf=$vMJr1IUUZBYjB0FJCBauzJW^u? z$6|F$}--zQ}V6KW7^fAUc;zbS~+(t~XgOfhx;G`dRaMCB#PQ}QH zw>UWYpu;-Okv`@a;)v7U%|PT+7DC_b;G~B=WSk>^y#K~I;%=4^&vCMncy0)uAA+M_ zGtQAuVF+I2;7nJsgUhYV(gO-WU^bq`Gz(t;V)n^L#;ueO`FAc#zAA+w9!J9+yr$X=x9j~{~nU~GS zuvkZ&f9kxwWQU4e^^;ewWw+5H~uhd_0g4^jD3Sb}qgkFpHnqE^*_M6^NoN^LBrMU37`54@b6a22= z(*Mo1@K0Mf(~a*;^b^M%(@mVOXU0z)-+jKrhof42 zKk-_{Mb2VN&Uy>~q=h$IxXhuVWBelj?F8X79>FE)?6`JNV=9<^aB>3J1smvJGh}d?C-n#uMy||Yr@|y&+8mM{645xoarLYaeU!p>$A<_ z!|w^MD4~3k==q&7?8r$xrV|yYAAYZJ-!bwhZtpK<>!)1#NP6vZ zRcqn49=a{ueqTIq;iXnSzNk3m^|L%AA6K}ToEVB6YTln1;{)UCy=^a&^^((Dw zebKn}o#m1A6&eT2o#6PNb}TQXr?rLUj`;thA(%em|4DJ?C-MD?-==ahUqiLZ0$`Q2Ra&=YS`T*}qGssr(w9X|Zd ze#+rP{Aq{Y({!~u^u*g7dVZI;JM_e#b?EuMo^k_N}O-u+GP5iST4x_cBNLM}H~EifKY)5AJ^P*2nl82*?RsCYKHClA&5BFD+~#W__(_D|t%}RK zW}BY;L>^n8!%9ziXuo+wamqvdlohZ2UYmSbAQ$=2-jlC5`4FF`xTMQ|9~D}-{T^ie z)aOUEFp?i}&U3!s;$y#e)+sLfvGHdudiy)>{)s`lM9vaR|C=q`mVeN~ZTb@y{%#4k-9!!!O{fQel0yzJQJ z0O6K9f-#%vF9MBRYcncAeD_rrC<5v;9N|)2ykB@#AIW{%^bIb`@h53=c4QL!IazlLXjP~?zyh5VQ z*ycp&+=lM|n$q%9C(JD8hL=b6b1r;dcenDDa54G8?o>9_H?V9%ViGPl-?NDw}rkpCQXemu}yALns!PUjKmOJoz?3C!6sBddxF zo;Mq|{5+UzHej{kxcRj5#n- zq*3BcKsK4nS>R>;0DLn#nQts}-TZkj2h9e&Km2)cGOrMNIEp}+yJGI;yki@G*U$Su zx8|codL{pZxk}O5qEnf}a}DG}@o#-6YPRJgE}5hDIOYX?5%YV`o}J*&S4%aTzO!d9 z?Kyk5!CXcub5qv#mS8(5%sILP*RyXjD--^Hyun;vBl9yMTMToJGAmJ zbe*RuW%gKSWLpYl_IfBwXi!&L(Kv6L4*dwNg!eG#IqX>_!k`>tZfJCC1bh>ZSNdVn zyixwqEaxo(Pw;{%b&N*BCNhK-nat*b6@{3mB3adX4CC{@GJN;Bsj?RR z1BLz3b5sA<`-DxQT)SycWqGszHY7|1(k%Irc)VitA*2swb++ghjpR1bcMUMiPh0y8 z!h6t}L8qpN?J6P5tuw@L@WpWZ-j!@_9hRAH&>dmg^MSKq-S#hZl`}`IWCcR5|)2 z+O#Do$Fk4#>e`CpF8n=bCakT%oU-Z2kHu#(S2f$*XE2|yqBdSEa!>C!_uVneY^Vi| zl>4VIGb<&YLzq7pLmfPXIfzn^B=1r=LEewG5m)d}E5$w4wb(!gfUCM6p7D~a<`qy_xHzZbsLi4Uxo8=sqc>K9{rk@Sn)cHQwtMK_QOb26p9?7iII zO0PBXKhY-YJ5S0(f0kLW2w?*~RgPW(J)}fWNLLDJL0(=d&pfniXq&b*ohx(KX3~Jm<8l zQYY+iLMw8`ety6NnZ)Q=-GZ)q=WNsW$tUR=gIf{dEA-! z#g*T2tZkF|t7*RIvs{9*AbIl`^l^<<9&LFH!l%{-eMXTxwbq~x2J+n%7Do2U&yY6J ziKNF)ORCQFc_>rTU-$>rNl=#MeI6@W4%tow`b9SV>v#70FZ8=-V-05S*_%z@9Vj<* z(U*(8e9Kmr8IxF*?!QRhG)B$6n5RAa*q=&YHm~j#7W_P;?L{|V||6m42( zZDsK+{H@N8+?T*#C)R@feOA`&YWP2{HXBOu4tg+Qt{BeBo?W2#Ex4EZRS>y!t-*Y4 z(p3Q`%~QC3&Sb6iu*Se3-XJ}}dKS?iWW#V@untH1&T=2D$H7_ASmTH}VXa&6&g;Y< z%9!8p6rD;K!DRhSq%hWp^+X>>9mKWi6CM(V{LdJ4{^v_I5M3W`mjgU=9ATB|i4Nn7a=CdieDM)cbe1kPY*z zL2Tx(Hgg+6Vdg$=ZZd^tx|v&n$L0aE8oQ&{m?fC|y|DsYdj-THZE zNqO1wB`a3k^V!dM5121mx2CBEbG6;AuYIlfPtK#CU*5c59b~iOL%zcWzZPu7<|Fu^ z^N9FR2%d8A;5L%Bvoj9z>7Pg>@QDwE;IyYQ6zPXT=u;v1a0osUf{%vaFfNR9lqV+y z&kez8_a!s(@j~beL-3*yyf_4ph2W(jIOiFEJBS-4344FUO!qywJhPhxT};SBBAKzV5Knqhsbf@@aPL#l#aKxNBb~eQOAP zdkCHk!Mh!t={n%xc<1=(B0k4SSIVIe)5c~@vxU4VA z#ODt}@IMa0cZA@78G?T=1Q!EXCV74mf}aY(Cm_Qz@yQFpvDP~y{Y@cw*ty6baQw|~>G*-+iU8Pk1PB+1H(}srhc3Rf-+Vq@N{}hz<)vGYQ zwl3gh&C}I~0d7yJm1_#-$8Amfb*I$UJhG;0+*!pq<-$M1C19KJ5Mtfq-cJGIYO33~ z4*4l=Y2f+u8twT#=otFW9sjFWT*m+I)@$*a^;+ncSa^$t+w@7rh0kJ(evd`J(832S zyx78D)oYFuv%fKJzh?2V=XGU64sjy??ZKu0%YFIDpZ%7Y(vv@NnU5|`_}J-M zY2mgW;ubC+1$50Jc$=ZhY$OQ*bl;Y)B|yQU$R(0;{QIc z^pY+cf7Rj7ekSL?l0WeiA$&$0KJ1smDCRrzA< z2%nW9IOnsHKl{ntCyo4xKNZ5~*%17o;^fc%_F=`zpE&Gz8TBv}f^%LC`Lo}C%HdD^ zy%0VaW6K!tEXB#6{r^J6DL?U|5I)roAEWh!`;Ln;&xpf^_-Kecb1n(=M*ZX~pCZMXF5<fdb?OtsNo=}|WEmApK6sMfTTP;4eJbNr0_Mf!Nm5x80$Z6x8-zm5)XTqYl zan6IJoH3P?^BO59aeMxv9q&2w5tR?;9gY4N5C{LNvugnS3OFqQ!SKQ^( zqB!}KE1#zwKE&HX_`K-wi7TH0hY#_=5I&p_Cvx5yB*p*b{5HWCSvcpnQT|$u*Sl=I z9*ECZob>fdf15*3ob%pXy~Q0q&B~|R;X|DB;aooL4xfbb;XFC!Bk?Y!cliuEd|H&x z8;(50PlfO)RC^5d)2e*tC{B5Z&sW@)hx7T!r(OBP9X`ZsL-;f)PCiNH)9mmeo(SR7 zuQ>a4+MO^14zA6zVgG{k+AJ9M3y5oQ!}mGy$y!GF9wyFtQG9-)Zl?Umhj@TiZfo?{R$mET{&mF0wPT6`jg3fp5J$;7bz1Z;HFFfDNq<6#L ziwBY>H~t}w-?jBJ{p2U_3zw%nV|e}iBKbMA5|(|IG!ZUR8vlrX|46q^>}8Cf`WM`l z9grE60R*^3j&ROx=zQbSn}YCsUj=vPrd4Dw$6s*H=sYprdWExaL~(sZQ|(PR<9y|t z7k&1#D{mH4?7XVg4|;ME`9y4pfOEKI=djxgX9+S!sU0s#lcX`t0oNb84>j7KGZ^?TxbYxfT9t=(6@uX$hhJ~@D=1<~XrbCW?Diju|2STdO`-yYwd z++N#J-_hKW=xFI^?P%{vc64_f=s4Wb-!af3lB7Bkon>9+UGc8ku7R%Z9S3$4?~Lv2 z-7j zPkHY^?_h7??*82aC@qKgP_H9<59~X2gP_9q9Dx!VtP9`5Y#tl#14VQ8oD zE!|bNt9(~I_?LB`FTbQjf`2R462VI)Q4&zTQ@e&ymPU7(?wsz@p7_4neP#P4)g~E? z<9Nx#NvUI|E2k^BE59q%HQY7QHQHr%XnqSmd=1fA?j$5e}Qg-x>VoY+@0*{?m5tN7E=jA@+?_zZYVYpuKG5CYJ%H4u{8BEh zmDf|)Q`{5lDeEckky4-NY3XV2N%~qJ=o#!u^$hon_L$z>-uzx^UqmbAz46}q-e$j* zOZIl7wdqG09_k%NeKEUp{MNN-ck%Ah-DSJuyK8qh?@st_QTOfxemNgP%Q&)o)Ng$~ zw7pWl%J!7+sYP3v*weD7eNWOaInn}2`!kB%ma<%kmPhJGIa(cQbEL&d?(N=tcyIsS z!M#I%J7>_!<)e)&+E?ti=u&>0_a*kV?rTTedthJxzJYy1`%?Qx_Koh#*`K@L+h4f9 zczsRbhUQ1qkTHi)sHvG5bE!U)ZP!>hZ^`$10QPO TLk)bWfe$tCp$2|qHSqrf?IIDA literal 0 HcmV?d00001 diff --git a/uefi/EFI/BOOT/grub.cfg b/uefi/EFI/BOOT/grub.cfg new file mode 100755 index 000000000000..b16df1e29fbd --- /dev/null +++ b/uefi/EFI/BOOT/grub.cfg @@ -0,0 +1,10 @@ +# Automatically created by OE +serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1 +search --no-floppy --set=root -l 'boot' +default=boot +timeout=10 + +menuentry 'boot'{ +linux /Image LABEL=boot root=/dev/ram0 hardware=d2000 selinux=0 rootfstype=ext4 +initrd /initramfs.img +} diff --git a/uefi/initramfs.img b/uefi/initramfs.img new file mode 100644 index 0000000000000000000000000000000000000000..1d149689f5e33352a5ec57d255734b41e1062f69 GIT binary patch literal 6379520 zcmeFae}Gk4b?1Bd(Ji*5MMD%7_1Z#2Evl-!u^W+AimC$2E}*O`y3wXBE_H8FCAaEc z>)zr=i%KFI;-Dc8Cd8maBAFoxW0)CYFhi6W;&dh?LndUxWJre0bmlR6Bw-Sg*O`#a z`>eJ1K4+hO _5f4qO*?WWc_-?jGdwfA28=V^bu{qgpP+aKQ0`RHShu4?DE>fsGt zLfaqhUY(-t?cI+((zU8%1M=ygk!x>%EDgtRkGQASh|p|!2&+HSg!jl}k39bPhK@%& zoO?@W3QyA^vb)A6qR0#L(RX!gTvq3&@nJ8GtKRk4<1Xl#!hE+AB+Rz4iLpi$eX_fE z!`wM@h=KXdi+(ARx5FPb*gA{f**f1oMbWxwb@U4UzCL&>yN9P)bqYJFu zJgB%kWBDO{y_Iv+#ZYJx6D#DA!$l+MXtc;M1-*}d4tMEubm8xrh&bxIlQbTC%!GRE zsWd(GTI0W?p}R#}jQpZcr0E^wEL#0W`(WcKv5@fkHX8Z9-D!HvW`v_IKl}frQ4-0S z;#qs03D0^H^;vh9?=wh9TV7@49W}9uq7UFs;`vg4;$Hq;w&fnuRn|jCU3hJkv0ZJI z(#r9vdSzv?I5|A}*s9igwe?}-rCddB>!z&(s=W4HvQ+eJW|9uWV4@Jz< z!v#6`t8(xqIrv+1@OR|k59i?T&cWZ8gKx;et2y{o4*u~R{L?x3(H#5>Irxv~;9t(c zPv_ugbMRlx!G9$O|8@@k?{o0q$-)0~4*thE_^llL&vWp*Ir!h@;B)B*vh@7H9Q?I8 z_#1QZWjXlD9Q@H7{D~aAI|uL0!3T5j9XWV02meS8{^cC}r*m+zyR!2CYdQG&9Q+$O z_&0NK>G!k3{mmR)%(E={|B{3Mw;cSha`6A1gU`c&$O`vmIr!^x@Ins$jvV~q9DH>S zzCH)vl7oLJ2OrA8%Q^T&4!$=B|9B35I0rwPgCEbqznp{rbPj$d2me|Q{`DOEn>qMz z=HS1VgWt@-Z|C5DorBNEAkNa;#X0!W9DG?0-jajAD+hmX4lcu-ta$e2;7{k^Lpit% zpR)Y_Xb!$N2mg2uek2D!mVS#YuCR~^il1<9*mCMFoN4H{uztkwD@9+ zM+}tz|NmZE4$O^`|D4OkzyGJNlssL^zxPn~@{~q>-C8FN#aQ?_Y4uW#@Ki{D&uf3f0?u#86F+4mWqw2GCW${N5JJq z9nos75eY3r8k59S$b`gTYP?wA6HQH2#wPYe`zFU4<*2_rUfo-6j?mM>H8C2}z)BUH z@`_1ZsiKK3Rq6-Eqmk<5o@lHp*(8Y_CGCyLMs-SGh4OGA}zF1b&HG}{b$ zB-&RT)BGPXL5*k#luxk~jhN_;m&Zw^q_j3URwa%HNDgo1iM`SAs1yf*$HgS?{2Re} zN&J%c5%hxpw5Ti*SOO6$T$@8Yj2ji{VTyQjwKP>JZ{9uGGl2|oPl4Me99(Qv$A|<4 z!(Svmr=)k4DUY$;6U7Q8C{d#%okRCZ3N+FL zShh8|ENLC97b_D};}M0vd$LT`b1b zXnblyh@{L`vf=8)M0vQO?S$4?D&AEa;Xhno@`9jbQQ!bh4l8U@7dBbI5aT0ZT-+w-TebSTel2! z_V@G+q5`{ynQW$Mj@(BK7ziR!t72w}z{dqCK|H#TO*pbfX->~?S0H3hot_bjlto-T#|G33B1^Ct9H0jzN z;6G*k9}VzdvGV%^{GB#kM*{p=>(7Y*f4Pm%nE?NUmA??+e`LdbKES_f{kb0CZ?gW} z3h@$HeE*oe6Ph%1o$I1+%p0Gi&p+ZfH!Qo&jU{J8*McgNs21N^MT7s*5nKi99Flm!8P>jHf7UmLs@;D;=JD!^}B z{Az$NdC~Y2?M#PTvUq!dSN_(>?+ozhp23d=_yUVx3h)>I!N}hU@YDat;DurkA06?r zt{>nvi*FC`&mPrmi$(*yZrl0&0sh<k=EG=!-}fU%z82tvHr)LI{!JVILjnGQL&l#Y0si)F20s?y&;O{wPXzey zTKrUif8N%kGXefrRu9hw_{-KC|1SjiO)GyXz<2c<`R4=NwR_hC+_ihR0^GHGF9x`4 z_ZH}`sH8t~?cS0AckSMa0C(-%>Hv4`+ok|_?c4SMckSC~fV=i>e}KF8?MQ&T_U%M~ zyY}r&fV=kXLV&yW?fC%jy=uzkYJk5}HX`x68Q|afgu!nG`1MbkcHvHdzw)4we=)$n z)obvmoNiBEJY?P%1o(-M8Tmy4{ww z_@7#T)&;mL_r3sk<+wAzoj%k8e68)j4h8sxwHuEF_>Wn;@mPR=%*vk#@TaZ(sQ`by zl|K{UZU4dK^SJ{P`UGYJea5mgyg_2lz3I-wg0m7QYqXXDogv zz|UDc8cFNnl0P)zE(q{t7GD(LEf!xA;8!eO2=LM$8UNb@e5=)~)dBuKr*8pXu;cMf z0saqlQ(k=mUa|e`_5lB|#diky4_kXG~oRKzRBW;0=&=SM*`g0OUDBI zj+H+Z;FS|rKLfn{8G~O6@NZcBYJfXC=VpMPv+{QWyw}>3k=?gD&!|!KqbE(civs+& zZ9W$Q+}S7X0sa|lhpY?myLNqVQ-Ghg8?SGUru8|p_^klHvewAm3Gf>hkH*seT(Rx^ zf&jm2@kIfCebVT|k^pc23FB`ez)yU_;41?BjK$r)YM0MWKbKg(!*BgtJ1z_Gr(F9V z;Oj0L`St+cxzCj2>Hv50>jM03u096%3G07ffY)7p4DfqaerJIHlpQyX2KY|dNXV}i z;AgDf?ho+0zh>l*1o$8Qfl2SN03Wmckt-jU&+h$HAn$NjZcg64p9|z2{-V{xIjDir z1QVY-R$h)S+;1TL+ivwlu@Hct*3J-syhqrt9at~ka;cxNq zbsqi(58veBul4Xg50||S?zi2;U++NZP7i;`!$&=QrH9u%{OumT-^1VG;fFkYg@+&U z@V9yRF%N&IhoA89@AvRi9^UHVXFR;!!_Rs6BOZRi!&^N3l7~O);m>>cDi6Qv;SYQG zbq{az@S7g~E)T!u;eLB~$HO1@dU(yl zJ3M^9hs#+Z_dDd_e*bmE!|f1VogDM<_d8FZCp_FvaVqzehi~=d&v^J|4?pMOTRi-N zhxdB;B@eel2<1NS;UDniuX?zhDpKxs4}a2=zvFA@$jgU z=>O9mzQDr=J$#XeZ};#e9{xcOFL?NeJbZfv_iuH5S$-tgpadbph3bH7_2 zKIK5@9S`5<;V*jla~>XzC;D%POv+v0;RihVMIOH2!_pnJ-pAu4}17_5C4pZ@AU8^9zN>fpYrgUhkx3`_j~vc zdH5j@|6vb5;^Ck5@M9kSPdxmDhadIuQy%^~4?pAKpZD-{9{wX9e!;_!dH5v{|4|Qr z-or;c{Hlk4!Nae6_&@dVn;!l}55MK%$36UxhoA897d`yPJUp66^#8{_e1V7mgonGw zb@DLB?Flw0uTi`FkssdPnr9xbzATCg7JKNv1^T#7-uZdrx~I5g;MniK__yDR4o1%` zudi8NdUgXFf6Jvpqgs&tk%c;2ZgtD3W%o?%Emp=#O?cvLc(gdNyIhbRr3H38ve~aX zdBFL;v5c3+iPEm>ej_2>?@Zi3bJdHO~NZ`W2GmSm)>63H%9QCecRqvFAP^F%Z2)sZXzCG_igM~5|LVQ zvW|@I6E2Jt$H%gS*z8!4t%4J~3vBUqVv?CdyDQaQ#mdm0a)#f-)!KoAaAebB%qJZh z#W9=hjRUoEOQAl>_TUnUDikLtiwC04(em(~fyQKqdDCs8@hg!oj<2bXR_m?x^5ovJ zVM=7O(pnm;*DAPOc_3!BeOHSJQRCO zEJrHE-Ssu?`0*No#);^fX=FQOk8|v4AUU}&$hZM=Z38v4(9N#xrmAZ8T&unnCwK3S*j-S?oUvfR*U>n`1%Qek$|*GV4zcZAUZ$eu_15K6{O`nF*_&c6ET7{4kVmAa$_0)Cq}hpPP1V@gF9}8A z3q~3}NIXg;wM7wG8n?-( ziAEJ+FDTjQ{xlo>2WmyK-U_x9B=fe?Oo>{gf-a1VP1YM*C^i?gjfM+b_A`^&^%mQp zNXEAmq&eV;nA5N|s>9Wa!O`P!gR`&OJh0K*JaE*3!yfV+NlCMTxi-~s!MJiTpfn8Z zt8ObdhTAmVZPJVkY5U+@Y?i(Dg<6#z?uEBWR=llHtVlW!6gUG>kA3DW!>($jAltDE zl9*|Q@kX0tcB8YQT`SRuG}DUu4Uiv#YoXM4d>Ad(X(CaVR6L|=2x~NhmncZRxEDpD zkR=6lT7%U+<%wQ-*``Y7TGt)}HdRQoRiEided zCU^i)6g3ryw1J(I2WpM(iQ=vb%5S*rvX_%a*dD}Z1esxxaWV7CH93jURA%(n<3$XS z1}!*ULG0RO?$_vw_2^4CsX%&Xp(4xI9L;>f~t^;VZe4Jm`} zbSYIQwRIk&eyJVjnrj7&Lg-_yT5qh>Ct8EDc_NkO!Rmr*KS}9qX$#m$g+c?;kFgOLghQcVl>)Q(Ss} z$qIKCC!|dyQ!uo(!aXFBd8k0elrvL><wZ_pD@3g{2 zc5Gs}I!;BS0_wRi?8YseM%`Q19{2tA>crMvA0e$#Crw_XoXu;>Wf^0T1*YwKsO=%? zCE}oy!Vs~ziL=qqmQ-~zRoibCr*~t4P@S>UTZK9jRq@ zoSfP8#G;*qQs}D2U2k@)lNQ&5faWOh4C0&By32`yZIWWYC1bU~hby!^Rz(Ul_Lvv3 zkTeaLF~#u#JF_75+ZxC&`KGJ~%s9K5EnWp`eBWqg1gJP+4QHo8j(o zrAVCG#85J(t(x?0jq%!$HT#B4%Pf{Y-KWGjmmB($??VPBt-PX=Ucio7o!mvYi0YN5 z#%mb@t~z;6?QvG?gA=H7!nGtPW24eaC6;drTI^M(Y;=^<7?W{ATyE)FQVx~j7UIr{-PNC)k-_U z30KJ)dA9>3>61t5!+Tms9tluXpYdcKF4Crx=0Uq?sCLj$Ol29kn@}WVeHk7v#pzAT zL}!mi;J7|qs;o?_6(>Fjs#8x3kXjiqrtn+tq%KDjoS}CgmrlZv6#SIYBl1XGzHm?_v zRAn&JermEhn^lz06IW}j$-HtyVv4{F9GR7jW1F5;EL4iS$`vYxnNM^z$dj)#doK+} z;sd&DySf4uNgib!Lftf}5%ljLX#g56{^;TlO_Zixj zbjUXUv=Fu5^kS9dt*LRQIa!Xv$y6qBP7;tK>Wg$RWng?5nXHZ%=mqJ<84mg~i8oTp zsqoT*c$OHBDKFr7_qt?8F{@8YlCoLaSJS!RvDp3zFNq&EalBNKmT~&_5k@|_&aJXi zZA!-6N&At6W2{L=+Gzn72p8=H9PYmmyg0zK_8?ep{!+HR=MP3MQI&a_fCvHO@! zuj_1j+B=sPG$Ke#5?OC~B&#&EA&RF=6Nye8a@t{tRc|_M=G+U*_iME(`^D>GVkBkZ zY4v`Ml2oUZbF2b1=xt{Uya%l{Mj#iiJ7kR944ktzu5wRmmf(qdgQ zjx@#w)<2V#i4VF|EFFBSnSCCj8c1uDNH|Sr>V{U9i8?zX$`G+>ZWc&XKyI1xTqX>b zQittgkuJn#sE@x+<68U-vA7a>;Wz~o9x1Gte$X^U(#6>RBlgY7t1aasrmxl_+6@>Nb(YS*JB& zbr(cI2HFMg_?OklI{hM5BoR#_XNJnM+#0VDyYPCL%-)SAm|}4&$d3CCer94pmSIEg zfG(BlP7JjHH0x!s=bSiif|!_bPbkzam*L4|bxmeHrRs{wU~EI_|5QzLQ%AEd9Zn#$ zvE0zUKv$VfFNU7fLb0xLWj10RCDXIa;9HVf?dr^+(`IlGzjW@0OuGTb6ivWP0@1E~ z2ASA^%k{;o2s4PeBBGEnu!_Mfh)5@QP6RzU3`=b zH#1{fWJNM7y6gsG&W%cD>#e3PkT%`u%JRkNt6`If|i zNkA%-tor%dp$1VPnfOL}Xo`%HEHk}{Z({PK<~Fo@2|OamWNJ}rm+DO*l!P`tS8As2 zDo7%cI8`mM6ZukcT~X%A%A9O$G>S^(32Fh%#z(W>uiK$!Du&X)-EIScy~H?$#<(yT>Ms zz2y35JDh6Bjvv;%Xrbbq$ev;VeXvIayj01IzkXfR_6JD$a zWecF4Cv@VOf~L>fDqC;5bfH6M{n(IK-rFb2D4yz@YOxLYUll1WN(lxZuvn5TV1ov6FpKV<{#M@UD@Cna>#&=@m;cSr?Xfq?i$0i zVi}IMB-JnYaoM0V^aZz!V67=%LS{5%(%o$1a}A#_;mT*+E~}YEw6dzWs{=BzZg(l0 znS#w$SPE5?jIN6eU}n&4KsB?jekc{)1?!t?^+FRr)rjI{N@BBaDxhk!{lT`dtzI?N zE5Zx90+TIZ+{Y`l8D3ZkS-H*S>WXeIS2kr#vKsBNm=gv+*NC!6@}87G*IcFU;~ zb6$F!Z4=x8Ke5j=R!&Y@ZD)My_|)8*Zr8bbbl+-GY|Tn}FKZ<90&Ib{%N_23$I3_A zSqd%{9)5RA+H)J%0Uh7;m#JvOc z9o4S3r#9)tQFTGMt({bV2`Wd}#WZm)_QH@^=SCl7pM!fblY_19RktCUwMhpVi5B-N zDbp_x3{8=JE@DaEv29dJ&YSbH)3&BuB-=9G*#k#*FBaFwXL)W5Mp`+u`z=dIY%jHD znj>f3r?@htHS$?|sZydYV>7`xx3Rp<`J_Ryn_MKcM;=B2;(6@))Go|)cF4Frcdn&J zXVT+-=6^Zr5_{#MvCo`SQR_T|i-DV#1s*y$aii)?os$KCX0DkN@Jc*a z5*?tz>Ba(Y%`016tWYtYvbl`*(@RtNVvPG`@+}>Em*fCWlH8Q2T~i|?N$Q<@i)ugT z?=axLezSculDa3oZL`}^#u&}0q}wd|x}2V>?{b|(s>bh{lC4p)JS;25u6t6zIZe_d zwZd9ZjJPsNk@7=k$hC@?Rg+P&h#;xZXp70>Ivt#ahk8)SP*!@;0$jMnTQFU>gAaK3n2pTPGCtkWHL{*<7qqS_L+gRLVVS_(`lf)!ucJTCunE zQhm|`+WAG(wCHg;a+Py@dJf5%{dPnjJ7(LG7cGJ!-KGtbuk-3&7q+{!J|~|_NHbuM zj+M&0rgjT&vg$H49uQmIl!1)nxiWEMPM?<)gs7CBq43$Uw=xb^h71$z*~%UX!;E!I zL#=?-Ma`VT+m_egW+&?9$Q8D<9KjNkT5`30c{NhohmV}t4zxZx)hNkFPi%J?zq@RZ zP8UTbM-iIXSKeiwq-AZhy5Lo}`b3<6a%{`)YHYKc-B;T8R+&1@6qI^FXfq4WO2`y( z;F)fhwMElbuC%wA(ZZ10qHSi6r#_olCQXUSA-f(a$Fd~*IrO@LH8|NSE|9lF+=n}A z75}x*6yFx?IUgRBNuQol3^~)&T4FS?8jN2ua&(E1U3(NSCabmL+D$Vv&@7}K5nY>f zX)@YwmS8E3?rItV&#>Y2cR&zrqM}>tyxq4EeXnfXjo9~_cyC5o zG`qS09gl??9EB?GmM=`1?bUfDt?JiCR}^oai%OH@ra81DC-Y(kMG-seV#9}T>FDug zyM*oXtun9mGDD+r# z7)Tlo3f}JWg9u;d97jTkv(w7{%$3E6$ zuFYc7m=jAWQL+7ELG4{-lSH#*U3*81)UoUS88+rE(%EktU^8qsZlNBIlb- zeA3Def1RBD_?I_+EazNrwM0>TU^14eHb}Z2hUSP~voXA- zfR#}x^~v2Dyi_xSGV|PW0wXp)#JnkU5Mp>wRH+wMRtj2Ub@kfz5VFCV55ut;$i+R( zTs6r|XHl9Qb5cnT&@k}bFBzdsO>nMWFE^o_uq4b3o(iNDzutQ3` zs`52GP)1L*M!TcsmEC-5P){thniC3k4$uW==iGJ6mxSPsc<9k?bBhi35R-;cdb^RmJe#p@!BUD@dx#O;;fyzOPQ#j{gH^UhMav&S*B)Y!6hK!)hFyX?`VjmwO`MqM6@y%bjv;%NhU>RcZ;!~q+-ifyfLzy zBL;HmLaV&kBejO9yC-B1mJ55Z%IOX{mZN96TI}?mN|^Jxv7jBO=WXM{>ZHY(+RuJO znRgcxlraDk(RKEC@exRJDdPB09B`6ot#H~EyRa!eVk_+HLf|;n@rcs#BIE>BVNL!3t=$H#~_1N zWmvxl+tjW!C5rN9Y{jQz_2{|tCDtqHMr6fG?;VKryPteB(+oMeq=aW~RTw$58vf4W&(b?3>+B_MD}Wkx%NTr0tm;(rZSYSYFcNnA0G>x8k2JOvt#vZZEOt z;Z%(64s3glI5Dr(^fFAb>sva1C10#DH6+t`()GypCanf0{CtP5%+hi!r;SZAQXMa2 z3K`ne&vBngLn?A}56HS4LsaFNlj(XgD-xYDPwDu(Sz~JxXhR+7N4iZ>pPT$p1xi+< z10P%<@e+MpM5*wcCc)+GUUop*JY}=AQIvG1oZ7C=l#>>XcIcG6Eh*nK<_|`gm>7v? zJJiqgdbmrQi>>e8)+o&l^lFQgkeDc}MeDQLBec%4iOcyjoE%NDeZJRGIUyIAjkEMg z=A+HdR1F1H!WNSm);4FpTSI;i%kyl7XA89UhRNxvNOv;~RyRm+rXI_nv1fulTd_`E zNpNqp_FKyPv|}Hv%B4ab6D1XEQWwT79E&qEtzpbDzhu0g&6H$GhUZHd<;ixPjmD!+ znK|C13rmdZWJjA!AIJ8*SUO^H?Ji@p%khI~6y(yFTR*k#OSt{E~& zSM9BKM&eq{Z@Y6w+PGXVVuMwW8@nK!u4qn}0Tx!p-pyu1L@4F6)CFsPx(9%S+lQv& zGaVWe$+GhAJoM6S)|YfWLU)ihi^%X)rsQ+jnscsxdSSSvWw4Uq#cz$6n%FC4emWu> z%|laKOj)`GM-`TRpOLAHvMg^#5&lw^=9BF>T-qdRba{5tUPk>%=i+3NIC}dE0)G-2 z%>+5pP4gKD*;gHoGNdr(sBE!)hVOMV0(n+Lry~s=4%psK*K1`mNt&kgs(BEUj>qhR zO=_*JNsc{s#@p${TnReY8GUDSZrOpnDhw-2nz(qzq*+`PKqfYtMB>2Zyq$CB3NFNC z=XTsQMEgpSS+y_OhHzuGSm|NtBy;NhnC_D$*HWx;ixO*OkTgUlmkgwjQC*hyT;Q39 zUCfpxbQtSMg|*wLD$52hNCgv2sPhaGu5Q@lqs4U^G#!v#R7|l6Yu`noNRb$_VIYZ=4TQ!+7V7&Xo^y zCo45%%$WUIVy6F?Mj&9-n1bh`gM1G)T+QxKs%f=C!p;^-?wHG#aaYY{bKQ+|S=>(3 z0VVrwWR4Zh!+@PdNw5^?GO@=Fr6>pKorR-%>o0;00ovt3oyc)-E%wdLD#+X!Q*PDW zvG0j4*v6o)lZ9CEE_^E?sv;JG8K}S>VFL2Gax4}{)7T(%e_Y6s%eGxF%PMIxE+#7O zDQ}@`vBn}-LC8Fjj6dr-eSnu!3?s&wB#6Tvz_^ebf>BF%Zr`LPmPk)m_?Cei55~=m zw=qdfJGtH_BP-0#rBJ}uTV2Vq=AO>Y*iGj$&f&gB*1PD*87al1C}t{bNOdfsBtWKA z_l{NBd|^5OR7kH%LI4}0R!aoCdImbyvu572rDt$x@ae5X1A`qKy9ZR`=n4ooPK4=r zT#IFgXRhJ0x$Y7yGf-nFEC=PRUKmqPt|`W1jQv!LpfJ$0aj?68vo$euWjZ(YcIC)) z%B99`z?=#}?A;kI4@pM}U673^FZo92v@a5Da4l3?pr4Huwk-;iW@$uZ7KW85)v|{Q zW~gf!4+Z6C#v7)p*(K0)5dG$YJZm3jida7~Mcj!TSt6DzQd>o_bTf}w{*6|9<(5-B zJ#0)L>5`4nhXYxWbrM=Gr80A|W1OL8se?QkNY6ObrgpJ&&vVmdW)P(B(s2fr$n^J# zw@igd^vDPqDNCcHlU`Dv5t7py*g!50uEiS}D_3;vB$la&Isr50ksZ{V>$}~Z(dF?P zd%I;CO&b9x>pZiojhiUNNcS}Klw_AeKblh=w{yGAciSPPeS$2Gvxlo~dHbU!{lm=i z(DFD!1MJ9QvWGb^s)i0D+)!1I0Zdfw9@*VJU7ZsRE%u4Jj~m!-$Z6doRBAQG_LL^rWE zIv?>@>6!{mmek|!B&z@lv#r*0<)C=%b2(@@h0nUg_O zw$AZVk8V(tNd$W)h_+O>ti@GAqED+{9Dka<*E*jn=7(Gc&p6T58&|92;}IPBa<_+j zVy2-mzR@sr$Us{rWAIQqez}(|@zd-B)zcQ(1G47DDu>6n`by4;$;QIG$BXky8bP(# z6Ms3!?Oz}DU*N78a7ng6mo<5>I4 z-W(@&pIdIdB=S`X`cJR>)W$A>njC1^Y2HiQXf7O-wDUhZB|V=RQ_2nea({@-9_eU2 zEs~MqW8Up!5In z+)Avrl#e|m>6%1MELL6vTQpG_+vUge2X-a5VW@X1({|7>Sre><8JqoXr!uu8iK<$?9jWDSS4pKN zi#^dZL9;BMAnB9-Lo^`C5ShSL9ob^*Wy0&K_R{Bfnd_EShEWJtV41bBM9!HfHWz6% zr9q8@*ebW0jEK2O$TD3fcLt5n?@E`GFPSNkxvQ%ioyF@+2UxLkYxYo6Sbb(Dt6S%l znNinL)a7m2{bBlqI0=c0=@gq?s!7E?5fS6ujB1iRNVw8;nN90n7HJE!QLZPVWso@{ zNslZRZ*kZDyCO|IGUGs};$$U8PF8^SQW?pB90@hqWLZE?5rtx=1*cEni>XJ|k`27r zB3RSZ)>E{V3#(ukd@+G2-+w&ZKlrI4QHNJ^II1qNo>Thi?bG4j;15h=IFC~iDm zwz%dS_4J%3nI&_8c9(`zy5zuerVBNSCSI$?d5i>o(a_?Q6aU2wP5WVEHVBY`{o6aOES}JFG30ACTiF4K&k`a^RCjIx(l<4g>lQ zo2KpPWo6@SL+wnL6-FwVx62W76L&dmH*=TCPVzFoqgT2GNkVgc5@9S0vP!S+oxM04 z*=8ebHoWCaYWL=b%eGyWe$nY>YN`Q?$n@m0smkT6?aQ45(=@*{FKkvw%dYbb1>I^R zZDpzilN>WrGFH+dn;0fd(%LV^l}5*@Dc_W@gJ0CkT;ieCt68#ZiYsg6&S2BcPCt7v zlH*AddFkV{BS=NZH!jUeTeEzU zvbfRrcJ#^q05guAK0ByK+PY{w$F)ORuFbBQ7p&ikX)rU zJ4i-Ur<zt-GNR_6Nc+;W)4f8 z(x|<3m$Eam@KF32oQIlI)YKC4X_m3749IoZ>!$Y~8tB>DInc9VLvP2%fedew{Ir7( zu^CM1HizkILE$C2N`y7k#uq>9%!=Fb6S?{3Bs1a%QR(U~cUKeiDd+S=7uQ(`i$AZ;tL zH*Jg6G)a>dMp``aHT9{Rf`pzYDgA%zv=UmaOuL#TXzXqs1KJx_n1YKZ{_K%^Kkdy9 z{F%ZG2j*6ASINu)Zr{Ts*^*f%vnS+spN#C*EdXo_b-N%XYtpiva+o9D`+{lDdM)EK zxthW(9{L`~m+Z*c&u64A>g8Ia)Hgaq;n)|{)AuCCW!BJ3jvOf#Yk~o~hi>5XihkF} z&Dv3+&_~%CFAAg=;>0;3vw-7L`_|}(K_u4@k5$+Ax2xK{U%s8kN9RoTYF^}sYCFG% z^9>B_#m97K=zAM<>YkOK%X5)$~ltklg&q#{tdfX3Q}aS@Y*WnA=?YSq@2(Uu)faH1G|;5&C*%Aieo<27^{dvqRRR>>m+As;7ju% zqwJYB<4oA}^(cz%B^Fjk*+BuqBgY1=kSWp}3RhJ6YC zXkr)@a$U&~Uw&lwk|`7ejqjd)jMS%MsaRu8$@pPkW}T))n{`S|lvy8U?SV!iowvpu z8e-x?E?b((r%vvs(Yhxo6GmrFh`A zy0W6cBGjz2K=nXIB(rwWoaef#K9l?GF4o<=)fV5uHKc+Q#mY<`#;Tk|$g~RDTk0O% z@4^dNlvNM(QVx0hM|h$G%73IEMKMYLk$$`+=Sj<8CPrsUf7rOn>eNSFMVe5l zKQ~^g59!=6edSE?&ukrn(aezS=+e#eGx-*%ojS_r+059Gf><;e zipzFemWb_FXmz0D{7Z!FbgXVu5?}P1Dfju*m9kYK!;S1j&t}PFs%8+oc*$tCQf154 zIHr@snc{?=&vI$^F)#iS7Y=QV4QD#y%AqbV4bHFP6t{$BF{oNZzpN}B>o=#KyV_V8 zH@m8$mBolvg?e3g9}k(0#zWrMj)#UKnKN>}Y}S2+_Cg>QFA2*W8<%+}_GB?3^Q@DV z`Ad6J&19(@t5k`USiC<%)Od;we!d?}G@C7vX_&n@dH^Mp8G9&|GVLyA5!Pg5Gc!mz ze6v?$s;7(Cl;RY{&h>Z_yQsx=20OmlFaVLCki^D z7DrQsz1u!zrcCQwlY@>8L)*6WY>&1u-`C&qbkyJ5vzap{oqgLBTHn95qpP!HU@#i^ z;J{Glrv9y4&EtA`z)^3{mM4dv+`9h#-JOF&+q$1(S}_{nBehH(exR>IK2{s8@7~B? zaSzZrs6h;E(QpP_c>M$MV=u1bmyUs`yT5;Hf7H|0IkffZE#3Xm21A&j>_aaj=mw?u zPXcatfxDkWGhW?NB?G% zj;@XU9h;-R4*6>mHrU_Ib4Ooq)UmBg@@ngr!Tznikv$`8c4t!)wruO|jh^h`&xVe{ z4x~2>*$mUvNIG3yH+OH=R}Emx)-CIOCTD$+>>{EB+(pKA^mlHGy1LhI+b9X?-MW!T z_iyRhve9|RNu9p#EnR|0whs<|P$IdZXJfSOsiDD5;!WSyt-V77Js;|JZZ`NYn+G<2 zU|aXLZkxiV`+Ekvqu%b0fmpnAYk#-#yk~O^_74uqn}oS}YuC2kn6YK6e5p7>S+;E- z>e;+`+n^k0iX`*5K1BidbaiV2;}`Lwzk6Wow*F4U*krz8t2{~mpr~8>279({8S3wR zYW=nih9mmBzGsVMPybUrh+0M7BG2Mm{LnYp-yxnpxn*ltq!~Rlux&u{i#&a5TThp$ zl#mTj2;fEoDoMqluVY}Kv%kB`=96S>9B^O%)=r}_oxNMTw|93sP8Zq`t&7$^{@!Tq zWBgmizenHeUp!08^h6|;T<;=!!e>=$_Ep<2uGWjM%X=b1v$ys`I^Hh?l^xvjMAXqK zdG&-OW6QS9Pekjt4JaMhww`Pm>;{6>*D<*1iD*Ml|7KLF+emk8=0SbYC#&AFY_7XI z^waZ9OuHq*`WjaQ^qvrFx5P)9O7QK~-)qc_^qNSIFRqb$0pxb3H4!FG`HA+&-}SDD zPH$Bt-9e#^Cd+U}+_vc*MUaOy|DEiF_QrE&fn4OLe@M za~wJ?o|bc;S!i=9uvencqa~LaRt~f}wo99?wTxn-XknDOi?<>Jj6|@*-qTvB?-}D{ zjC>%XU=7FEvrT$nb21rnMZ|G{@=&8LnqNvK$TAvs>b^xfxDw;AHhh_p-6@!=~JE zZs;)?*=iKt*?o3u+r>3DfF^g#?UmZCnaOV_k(gE(C!8`?E?)t}evOmS$%a;O?bZZk z0o4mlSxeT&nFJ3Wn>bhzvkv0Z>i_$mSqPJ148< z#Dpg201Mu6LhOA-@o~zOD4wN+b@fZU77Nx~ViWt6>~yq=)Wf#*nD1|HoK8aaGiLMc z7RGEQ=YIEev0Y-!9(Im5kH!&5a@a33V`e7N^vd3x4rnP1NX@c|w$@Gk!1yk?FHZKX zg#qYR2~&AiO7S%bNYSX)kWxEms2?*+1X->$%eV4zb&er!L8%L>)oh8Ehd4|mx61bi z;&h9aaULpPKvq9}0WVwl_#PHe-S=VLS5RZ+7zsvdfdeXa@3W169* z+0|zEKDqP$Qxn?amP@T}%p-~<{cj5wmFW=zd5JT#hgP;vRMNGB+>;bS=F8^FFQ#@Zz|Kjez)*g-1l?$Q=}n_VjIv@XrAv{AJB7JI4f zkZE&Bl{q0FGgJLJb2$`2%nSPROs?(6176aH2aGNBEM~%eiY`c6SB~PzrdN&TSlxgk>+(h&f5gn=z&to_J+IIXZ3gHe+*pLF`44PkM`s%O`Tqbo8PJ zGBZi>%a%;h0yAand6dM(4wBSzG+U%I#msdb1#^-{G|rvjm{~eQ(hW6CwyJ$7-(YD9 z-^yV=m~oQ?oGECVd+Ekx$Lm{Ae1?sB8G&*77CqGDMOHS}>=pSOe#%Eg-DOBk9?300 zaSLX$UTR*Bw_VjvIcp)uza|^iDMpt`%1lP#I4*CoDOj3kx9n)Ox~D*QPI0bTUgN|D zm!$H})ilq_o=l_1zEfXSxN5?DfwGJ)=(@Hq86*HpnemMmiJ+1<@#6|*vn;u&uY~0= zOdTb)qoh@?E|jaNhs(sn3m_f?+04+7#>z#rndYNW64}=tXVV6ghx7}5y1{xLxytbNzEctK!KDL%o>s+#ZxXkw7l+a^GV5x5;jlf znhou`77&MsYdjU9*!_dSKg7O4HILBu4l25`DoS zHq#r|lVmd4N^EZR>13orS@f?T8hF}G;KtGI(T_t4ztGwmqjq`#RFuVhrv9O1e(|AL zFc}2KrA@WN;MBok3oD!NjEe6xvs6|y)f%*4p?TsRt}%bwUZ;&W%PUj1%Un@^yW4HsE zLN^x6_GG!7T)$N%ciGFGj{4T@z*rV0O3_;#zo$|u@7Ak%WEMaRP88R%Wv9F-wKW2~ z%N?j!qUoyUiL|?jWA-kL|E504c%RmM4@b}Z6AM{EB^g1FU*Z^%WLX=7VCIg>ar6@@ zoiWgP7Crpa7K_+1^Dq@#hJ8}3I<-lXX-~~{8xNUlp<*}Q;T_Yon7&jFlbUybByxXo+e#T2wdB|GiuqGj7zv?JpVadl>_ z9Vfq8ZSYx(C(*X|`^I0q)S;84+g9#$FtaNbQ(SSmpbDv8*R+x&+7Z0PC14<)`7m?V zu2XjWR39%^gWaW&I)|C<8-0EdSW`Q#>KzDm%UPzDf?An%?i!KX2c3DZB`^zltNF%0 z4dk0rWR6rwvlK}7+52(f=m9Glt*n$Ljj+oU>AGXdEoCl8`4F3EcNcW}qTg);p$jUg z{=9{s%kK?wKP`%F505#fn{tpw&S4#Tkn_sb0(v%~~-Bd7X+~xe(ECdP+u2@z;7fWXqhnaK%L>9e0_6V}86I`_+j;Hp+cW zVAVTumr;|l3CiP2Q^{nX%uh)lN^re2b}Qe@L#MgHLHbSS z7dP~5v>$qOm-gsqiF_F~h;{x~T7H>_Yq3)wo=18Bieo;>#VFcMcoN&09gO^AcTHB*#BT=*#=N}k18Zpu5B5q~DIbu(oAm>VH` z^vx-S?``_F6Bi+`9WxfR+>c17r5mr~D5canMw48zjhozt<2-gw;=7<@PU1|Dkw|Vk zN0x;`$=V8y2URR&n(QfPz08)d^ZIJdXLx`ikZ7#hJlaahXx7CeomaFdyLGW}ghs|# z|2hNWq-}JS>v`f@HF4y@BYGm<9zOIL6?@1&karmwubD}eRb}uM46eCw#YM_yrL>0D zX!obmTH~ZzIWonZ)c1zev5bCo2CW#8ZzVH%M(7x`(t_v{lZ8Sbh?duBBOeH2fX}6h z7D?*UtsL3Q7G?FWAR~$J3xz2Mz1$R_I}gk~Vj4F&(aA?CCTtqn>@PsIGta*$#DjM#6*|D==8CYEn%L>v!?TB7h4Cr#MZB5 zU*H3n9;$Q zUB4Hjr;oXU{P+FzAN#Wqck@ji!9JIw0XzwcT<^_qi#5`ZzZ!synliG*fpZ z)g~rC9-l(tx0fAAbJ;dm-cY!f$KxRoggfZrEmW5eJiMmhxR`L3GGUc@WK6y0;&i{w`hnrTX24 z?-c2~bIl&%?=t2ly#6vf6Nk&viS$g&GRKTdQ?eOOuyUW(T_dp#;S8AkLZptHYr#$D zmoKPR-b_L!f>;8osggF^x#j!or-_DECQN4v#q6kwL3 znA5hEn<;4{CYz-}oJmxye-`oh%Fc|U+w^;b&7bF)(V;1vK1!vr4cktvaMopJbjJ5Z zWI1$(l9`o|o$b-N>=}LBJ*0J7zOyE`@Vlv^**VsWZ%sF-z1U3APeydf0D+!@RiRu% z#>}c2k6ZM<7{?b{oKt(9WG;INr!WXYdiC6G5&IlLNh_GvtCSu#4P6otC7P_jP0;` zo4IZtDR!3l*vuN_@FlZE9EZ{2>@&-!TJ-;Cb!AM4vz6kmaz&qHzR(ydGCA_KQ%j0&`N;-cT5RyIWNJa9UNpOTdEQW#nUsXpg7G79pe5jFkIU@;7`z%9F{n!`n zLz?5Y8yko`pFMFu)rlqf6@|wl3Ui9F?2Nv8ve=ahwFD!dU z`(ta?ZRqXa{^8Q-MB}+bpE>%46JI&~bLYN(@!QeMUipTHzJKMr|1P>PPeQFiv;Mnp zjzgcxl$bN8MbjYeTVmM@^Ikc}$iD*Y)rvn8X{MT-eUs+tymQgzd6x-a{$~F-I1TSR z@P3PbS7RCD)9>s1AL!pf()O)6-;lrU_taUia`EEkt)6dMg;DH1ut8mTpl8xBl$n};Q!MHjU7>*BdJ)hfTYf!Kc5F8 zHE-U6`}AS%+cy(U))!y$8JpZTT|K(A&_ZuZg0aU%?IZ zEOZC7af&!YFF{X3??Ep^5B?PChF*Zqe{B@4_$pyS_d~m&i%yf@&_mF}(7vC+J#-Ly z5xV4GkeASt(0kAmX9#}@>HJy3hwk_}{y}R$Pgu~aXOV^O{~GS0*P%C{FF@}???4y6 zj_UslqzQTq+6!I%uSo-R@p;@sTYeGu&}Co8E%YFC{_DvL=rZW)Un2g{lh6`$*#+Vc zjs7k1hqix{a)Ta*-iF@#7U8~u@S)3~M=s$Wy87F=hhB#shPGcO-=T|sov@&%{vF{i zjiQ@ZqG%QL_HR?p(4AK)C+KnLdFa~TK^{8)Kax&p1-kf+#2eZIJq_)Go`>#$F8N(} z(4)|k&>Pn%FX+?+fSv^d__cJ@Y>Z2YL^B z8G8Ny!X0$kE!@0?eEOd}Ll6Eb>4e^bvb-K$|8wF5?fVPT4!sDy23>K7I6?>ilC(X9 zd+1u|nZF``plknz^g+9zC!wqU7Pruw&>PTG|C_i#&q5c!m3)AceW8<=$ZC8(Q)XqM~N@A z2E75@@z|Vb{tC)<-JEC@bZ6(BXb1Ej^dNNb1B3%T2fYG~w#|v|LN7xXza71KnsA`| zp(W_$?ZglIJoGg5gWiOepf5rXLN5=KFVMvpt{0(8 z%fu5}8X=zVB)n1b6?&zPJLs_n;XxZy)vJ@h%kfnNF%!ht@2jBuc(FAxrN*-sEZ=r!oFHuB|5=n3@BN#vmmze;+bx1iUc zgFi#QLr2dLuXgnP=SUB9;aS3gF8Lbnhe}X$`>32;>4YWYdLc5^nUp635!$zK zZgc~B7N2L+JIhxZZFJ@ZbSRtGB;ZIKFa$c;tO2>?Sig` zmY_#4QV$D%Iq8J%Xu&;n=}O`Yz1TK4TDlH>fUbq^d>D7o!AA%edJlRQy6|1(5A-JV zE_BIbbECx_@E^w=^!n<#(GKX+ch8LuLK|yv2R-y2@&|fq?cC@d^n54jT#r6<6Hn-l z4TJ~1x{-Jaz6p2GGtev0h3_Xk=zi$pPRj8~!h;@z?tm`pB|PX+=t<}e=tby@&>PST zTgcxo^rDY&p^N&-U+4~K1-f{UbV3KAXQA_-B3yZYn*4>H{UG6X6YmerjaET>cMxCb z(r1Y;^x%ha2R$}K{s_L4_(IP?7jGb5MchHx?V1}6La##`f)CG)jzdqD=SG*IeIw`t z^u}(=b0g(9N_j%Bjo}V@2U>yleT4Wz=kFn0=oRP{XrY38=rQR0O{8m_^g{b4NH6qc zm2jc8I(h)T3%v~8znAzz4?`FB5HIL5Xv;pz2YMD-f?hg6xX@+K5iYbBdKr54W8@RG za1i(JNB=%SyrEs6BwXmJL&O`}@&mYs9{oY|73B3n>0lKsoe?Es^KyQ4W_(He;2;~D^cZ~Q#kNqg&L2p6t zLMvY&zMF~1kK+zn`V#J-gFi)h&=;ZSp@+Uoc+fki2@ksLXNcz(!ueU^32py5+(8fj zJmEnX{!8)~x(<37y83IB3$zP*58Cnz#CI$HLf1ko=g?zl>EGZUdg(mwp;w_7p{u@* zd+1H*{66@^E=+ z9sT#@(}46>=s)z(719a42|Wlc{0GVfx(Rv(dK-Ehy8SBd2GLV!3$*1L?x0=2kN!Y6 zU8h{2NB#hL=)ylFe%lE5kC2C6gZ4r%{ZGPyuJ|+31?_`ggf9DY^ZwAM!ap@*Tjp(mj8pTRwJDfIbA=0&~G+mFtRO3?Z5 zB7Ep#vQmC?p6ie19DLK9H!OU!zAxpk{5ytUB|&-@zH!l4=6-p;Jo8(|Ke|&56O(|7 zx$uqa{^{J~^S^lik1hD|gp zo*`3oaToqdIy2(c1%7InJcb?SQwvZD{w4xHeLpWd7rt?4oKFv&SojkUereH9zWU_i zue|2VOJFbK-*E}=Bb1?pN4~Zj`I8I2^1zoD0?AL}h3Ol$RH#c5?^w=A3I7KCD-*Ou zJcsd?a4@x^oA6(>?(@n|=F!f5l=P*-xjz*j$^TyXeGPO|!m~e#M+v+O{H$PolK<$5 zEdk-vwM19p9}&KmPnu8D9<81-`Vpo>!o3XtH2nGE-u^WG;K#rj@?@4fO+&PFA37s^ z;$`yLy%zpS`0ely@tl>8i|~(4lq_=E6oe}Sqd?gKrR_%-0S z9H+kKr0+QV^Ix?6T#&DlZ|C9Pg}+eZZ-0Vc0lyXC65b2oH^8;NX7pe47m#Q0llC{! z1JU7f}}Jcf58m)lX>pk*j@)_*l8Z$#i(f z;g9|#d5kxSUNyI;7m-_ik|>E>Mn2z=aHiqkgMS8oDaZZNms5_@@Ylj`|8jP|Eyd3r z@K3n($@DzT~K1Vw|yC}AbOnY=1xgB3cR^pjaewX1NgD-YQlJBOzh+ez^ z|JrH#0C8`B62E)km%%BrOn;X?K)L@6^+fy)`W11%7XFogj_t#9*uJy??SS9=FDM^z zPx{h&gWaQ=BaTfvk0ReVgH0kpP@dx7S@^4dmb6NKX4+BCilTlYas&C}KhIc*b}lMS zZ+ERiWZ^4lk7yJY=K7!5X^87j>PHvu_Wx_*F6m6!x2{(*>8l`j`QMNRahcH`ABBG% zK3R~-KMQ~7d2458l*bkL3x1KhBknc-5<5!t;5Pg|_=|*Ze}d1aA=w1J!hul1mx6B( z1VRS5xM!V#76lxS!m;*=wrJ6wjg)s*9a3gS^C%uzjWE!g_-SBlHXE( z2a&(`>$Y9bDBqLt+rL9P;_wo^)bfXa8@}pS&|V7v2K*)ej&LMh8UEjc-+12oALO67 zUrIyK{u|`4gdg}X<-QjFLHKt>zi8igrtE_BxK{kDAiwsvXe)U(`Ik=5QTWH90RX7ujc35;#ZLz_zNW8f_7HISp~oAcZjQm6ZUJi z)sePh2Xg0rx4FG(fS&@tBklwLC7s9N*Zvdf+|WN| z_v`6;a~}DwKe2k9k$>0VpZXuR|51M0zPtGYjBu^qWaXojuj~gp^yjqSJZIIb)N&X!LKMNj7y5sh!xnGd|PlsQ$?Rubx;*ac$I{3HnZT|J8+9hKZNIdT$SNS_T zA3=Lv2>(;k$Vl)1Dgrm-JkO zf4QCTML*H|TB@FV_Q!pjWJ^zU8~K}$%`xp{X1OHoxrD#;b@b7Xvu}atFh3<-YvDJT z?8MvnC+-F<{*AvekS_NzV>&ct7>_?IQclvyM{e3+WL(^8R#of+LR^M{^QHh7_qr8XQ z3*szTsC!wE5?$G;5zsLpcgYd7xKLh_r4*xFvMNiT$cuKU-9Q4{DoVseovF$)5yJu+$e6s^ol>1 z;jhDZJ2_2!RR57X&%V4ak;@5Bf{>rYe<^>S?`OY~0D+!LxNG6B9hhV0Idk+-{3-FL zG046p@rQ83d^imMCj4vC?xy@n#$OWuv&bENn))kpGt^&+-wottKVc;4&CxsYcm5lR z--qTz*XQT?o7h|8?<(Y%4%v95;+vkoko~x;ihey#+o7^gcj+!$ZaL!!iQj4Djt$R= zF3qAJmiS#mzVH$Bj^}haC;2II_mDe-99>Ip`z`ImvNs{Ohy7_HmznQL{ve@0iT4iN zmB!gmXXD+MZl6>0GZa;H6#2e7`}o9%jCy?*{=x?P$|N3v9*O%a@V8Ge+4h zz5DTh8hsFd7QdN4&&@IGJ3&9H5kQf1RX1r#?tNok#A)PbK|?mUC*A!L(mD zkh^pk|8SEPpL_6c!q2j=CB94FLOHWvcPG!{KJ3TW!oLN-Ap9Ub;(mwauNHoWJ|Bc1 zea7m$wy#NkNj{&1e-Xasr})87@blo;!51lF{s?{r{AvdN0{HXb3j+Cj;1>d1@^|q= zq#OLU$OrAd^Frj-B3Jn#+kWJG7usNwAheV zYiSKeY83CSDD|aa8QyFvXfqlWm0N;Jn@sWk(l)fD#R>{7t+tJoT5s$B8W3z0yv2%| zD474}yU#f@=S(JIo@X+1_S$>xwbx#It+m&FIVpNVbe|A>LwdMS>d*B5DEPV2vEM0p z1fK@H@&U*HN#T>}f8ZHEU|*{crnFmWI!6!&e!95J)8rdp%f4RW=|SHJ-%F}5=I9S! zzNB9$IXOYTfe-rSNA&gsNtpC?{7$7;@NvLz0lqQ;PsZ5TT{#5P;(qHSL zoZ#z#mptUiN1Ac;H1NBCn{k?uGg~jJ{9fRZy7c~<;D>?l27VR4Q^zSF_TJ*>14s4t z=zl1m&S%Vd#F6)eJox=s@u5Qb9`(<6lzt28C8U?|+x-*Xb);WKx{-gMKasR2YR7i+ z9oWQtER9?X{u1zhkFy^?4P5mf1781xzkZd=I0rpL`Ye8@_PgjC4ZIC_K?V<>(eC-= zduLtBic~i!R;G2O@s}H2ze;~ZOfHwp8;*~x*K>E|9 zUlZgZ)sAStm1o^i?aMe9`ko9}JC(nO_9@>u@-5m%|A%<^jDD(6zJ~PnRd{s1rWts> z>hs}Aj#JZq@{Rimc2@nFBKN|hb2&u_&1dNk@#8T0u6ib5wPo<|Irx#I0sJ%QbL1#7 zZ}h~q#Q$mJyX)uO{eTM6SCU>yp49OsdX@v<1-wRlJdHo8dY&eK&MyO&v7*1Al2(z_WiQsc!|}1$^bRUVWVO9|V6L_-nxDtA6Cn zYbVtYCxK`DhVi3*IE`Eg-?(CM?sn{uv3JS(gl|6ZoqLF%eU5%rex2Vsx!2KWr?LCW zzn%P}f18YFq|7I}jcc6)YvTNl_6MenvqS_${ikzc^Z&qk8GgHelA95vSCSt7jBy~o z%pzap#q@eiICbu9H}E=#&z1gpkZ=E|@YRuj^q-yiei}K{xwTt>myp-}Q#~({UPHRs zAD5P&$$6UEr}J(#oI5%$BtCy3X?K)wG{~>|3;q5Xd{Dmm8?xFQ)y#j{$er3wxq> z^!fM)e|PkZu~YtWsPld!m{d;j+7D+wqIz@=u;hQ8d8<#~C;2Qn)H%Vs-f-qCqM1Ln z<7v|CNk1(9r;Izn_X5A+U?8zyDs_MJVc^pa;TMoseDLzy$o~cSxxh07cm9NT1nIj- zAFs4D?VbkwCE!9TFVlI(lMuZ!O?mNUCGcyGF#db<&;wtTPv;lGf8yhgY5$QdR|Uo%6d!{OdM;{r;Eo>m2It z4(z9szq@`+ng5Z0Gv@^hoblkLPy9R}_{Zp%&yXv%Q|DZ30>MOlwjg!=?;_R^L%|-$ zmCnT;2fo~)!&_c-=)CN#OwKFu+pk~!{9Z@CCi2Zvz7&26&(pvU0KY-?`1&oWKTQ9V zZ=lXW3Qx*-6`o_j7XjZXctU>s?1oD!vQu$KGhu|f^$AyDdRPY3ze-b<&- zKMj2RAo!IAu72GMymD~Rp6?|3qkF#|BcIN<8h^&nnbZ#&hy6;hKSP4{xs%iQ<)UjE z`P+sD)B2~PYZ36;T;xb}_n?oJPv^1svsj? zcuUhy$AH%Xf4ztD{U(BTIOo>{PZ>XI?>OLxfp6*oJ|Fm^5#I8C{@w!oIB=uq)B0zA zJ-3(ao*zmve*flvDk{wd$+$>bfw_*cGEJ>jfkX#D7$e!;n% zUv&KZs}gdIzE6oei2jx2-&^d^mm;siw;6cRdBEitoMt?VPcM;w<@ubqHSrU8#!z_{(^xA^Vb4dffgq~r$SNz=dU zfZqz7X?o&M_;pV~l=O9uT>AN$)X#g#_xeSSJxMce4+HNvA!xM=PfGo2XU0_Q>&1>7 zq$xid`0*0PPY>m10UtNX(Mu_EsCL`{d=qe24<_}SpHCaf*E-p2C;an_?V0^n3>Y)xKHeE4s|FU!UQhnEN0u z?^zFR1kW+xr_*Q6xX{vqq=aZanCtvd{?7V}|>o=M45BwN# z({APAPdqwI`ZZ<9UnUQy(Ick))9{PuV&^0$J=g=`okl+0<8Zv!>G6tw-OsUeA?LmM zooZiIukP)bRzbgXw0^YI6X-^8B!oL!D%b2H zZxcL)|B{E3!0T>w=5NTkza9R0s_4@FEw5EO`l*L;p?nqOn|8CKH$OvvtR!Cr_k*-i zCY3(n*$h1A7H7O9{J-ve*h@a$D{?#y9`X4w@XG)2%ZbuYs(#W{m;0ymoQR!2&rRgp zwBJdODm}qZKRu@XeJ*~Nxly9MU~ITC;xbVntZw^vqRqPN7F~SqFS4@CJ>q)95MHyO;dq?;w5<;30v}-!H2782RSk>5UI0=O@C` zZzg^U@a@8be~@gae_vAx`9^TxOeNWpa$xgI^j8311RN84TKYxTI`ZGL!twu7?5N;R z178mOX?~~Ld((d49l(uzB>KXee6LW;Gd(t737~*$M_GC-#ZQxt1gU3 zKr6|&@=?FvukQ7VgEf8YVQH^z4jP-Mf|2cUO!3j z6TqiDfj;a3uKT1$Z+7A&Y2;D&O!fP(U}FDhs{O10z7lvHbx9wk!j}U-4*Y4sMW5&V zv*@b>zWj$lYl`SgQ-3q?jaz_cc88B9=>YJjpLXm|iarEv{#4IN^6lE@*t3*;f{(ZY zKlsN<|JBP!je}{xj|-m5!)fF~_--M8!*e~idn531yOZ-9Z+()B?ZCGKFW|TPr*z%V z)l9m6yMOxqI_XWMizn`%(oc}SDo*&{OM0y`r;G!Y8wcFnwlfBN_IGrv>iSoQ7#z6lwRNbFz3I9X3jxjn)c7(EM{x>@j8q2yGS>5f=YkNzliirq=!w4 z{ZDl3p0gU#Pbi6ToYaT@`PF9fRlJ<4*KNHd{JVhH1D`KE&Y$ofApHR82bAW+pVYVF z?@97S|KiM-B>%eKD05C&#e`?XmGBPuW+f!#!EaZDNB6K@^9til`OG*<+Eu}IKigfv z4S!O`v+7v~ycYO4jrShtReN=>Tit=~a=usP)1<#n`o@%W_4^6ZYe?7qHm8dNsNT_Y z>9fBvjzpjHr*=&vJ^EUDzo`WHBH(xZ5BG4X+-c$s4qEed7QJ#dpSF+{Tg>uwVdSh> z{;`$pAhUA1Ua($1wIbMfYONKk1{Pj6tCOwK$E%jl>SP=oUw7B6&bLGP@%pv-9r3l- zb<7P!I^(7JJAcCOrG-}PXm6`?Yw2}65A}|8wuQp+BcZJL?|x?0omw%o5!|ueq3d2f zwPIJ}M}4i%53de3o>&lSJOv#V%cmzoq2doi!Qu}>f%y9&E6x;X#~Zz@WrXM|p6u#s zTHDoC`!wG(E62yG_V$mhtL_`Cw4&vU19j!wLecUsmCGbV2cK9_+W6tsGpQred1}Sp z2GzO$jY@6KYY<%iH* zIoRqf;QF$z%dKU((JhaQmKQUviuTaZ;?M`*eWf778n8HG4QiWd#SVpr#M^pVrw#_K zmh!DuT|LuIhvwD<&6|e%XfEp=EhluG(=A^`z?W}ZX0-hGU0u2D!AMK~3~L!%b1QBd z9O=BdIMNb6%Zi1+eb0k^XIQbZfj%3?26A6q705bzJ(B)WFuWlM{*UOdoS-#nG9lt> zcxdgIeX0h2wL`0DzLB=J4Ypct()UoSWhvk7A*;A8lwaIF%xc+A-TEEpyBn=3ZCW3W zmjCNZ;$5Wk>)_c;nXXW8WGb)mT(14tY=C%RWDeKwUr@|T0^03u#Ti?L232(n?#oieeh}}W|9o!IV>F2mV~f@2~|=2U*M7KJBVN&*en2 zI)2&yLS*Wkenpet$&Pf2cXRt&&F$w!T0RWr#>M|P`@vh%Kj1Sb5Z>^1s84)uR;1;+ zOlui3TT$Gf-&xi&;(8TIW6Ms(2aJPwuNgb%&ahs-ZcxU1*MsBgQ!`)bO`QiYuf+k% zETcSh6%UHE+(+M-H0m2g`MLD-T>5z){oF5*MIZMweOyeRXPxZY@gcNK3|q~)e7Jun zwi-EZ3+2R*{nV=akg*i)Z8fj{@Vl?P$)_zeG%k5gr0$X}f=HjTsaz z{|YqD>9XgH&WbXIRtKWxKPPQB+Z`JD7Ei8+ zX7JU*liHcp=&BQE$JSYw7+qCpp{p3r=YUVT>ePxoMnAbY4{6*1pF#h<$(TFL=LrXY ztS9`}`0;0=SI$VmZ|mLPG``Q+kiLyVqw3(0eJl{DzwKUAN5|xY)Q9dZ)^nkeq3q(d z{8rw!PX0K@Z za}NI1)RCZnM25lt0{t!euW;~>bMWgqE&iP$AN@&mY$z-IF#`k!*@c(%tnGX1>Iik6>!6}nk`-#FFa3l>JpcR2Wd;NWZQ37_T1w;p_b zTzuEq_(p#^cMq;P{Q`c-eB z+4vnT*l%3v@ahS%=R)}%U8}4{e9)G*QtM>bs6gYx(73hKdKvkRNq#T|A8c%*J z(D?CfSwG-IduCiwe#Q$f!Lc>R_w?^E7m~U z!szxOGBDcc&D`Ry7c!@|t?lzn+uAHXVLrY2^x~7rCxcJunXUm=Cv*Ll=2Kmdif(L( zq5FiZ3pb55w7*4N@>69?GV`M44=u3j82`0}a%X&FK5O{A5vT!#%{}rF9OUK8Mx7KT) z%8gZptwdiR!529Mj>E`F=NGNIUn6HH8JjDf7!bSnJ1cj(^^L(6Dc_r+;U@3@k#}=% zE7lbpadZ#9wS2sT@Z=Ca+O#qB_kfPc$UtpjeVG2vi?5+w(Z1G1hd;HB?A!iRQ9obX z%Fyqxg8wS`@V9{RS>g(zz}MO;d(7D7st5b{x`ejur`(KWUiE>UH|+1v_4vHck{6jg zUN~n!Z2y`_XLW|vy#L-vXA$39Gp!vT_34=AD8i!)d$YH`Y2b=J<^heDsgO*WCn7gP`B#ldru` z5Ax?994+5yx1p)`&zb1a8Tj(QHU4c{dUt30=v|Kt zRYLFI@w1)2O!iNmE#K}qUf(ZT{$=w1iM--_$Hi9X2ed_f(Y`hi8w^-sDUjY^JqW;L0<%PoEjT$(4Y#*X}) z-@Wlsy9V0x#53T(c!~Tr6&?@J@43ap;b}JM^;X9(ZNGVF{CK`qr@T)&Q$7sYRv24b>H;)iT-L7?|D1%Y0CGWlfi3GoRUyVJHd_zZCiXC7mmJNTYrd^8H*sji9bq$$0fHm(PE zd&ud#b7?cNB-ya8(qKndX{e(e`}p=Ko?i%d9318LRZj5=%DHp;IsEp{=?TH*><)DN zVkYV8L&=qo|GRtp_`kld>`8X~H_&cmJ!QVCxWS19*TDO$k?pTE(ig@Dxd8moMZbg_ zKbU8Ao>7T z`50{;J6HL8cTA>VVv?_s#6i~at-0DnzOMlOX0{Crf?qJr(;{{~$9bt)OmjOuxP~cGq%1Kozs3nLjP~W-+c=`@g4XfeNjSr`YM(aSoJg0ui#ycUMgiS zEcgf6*4-1-w_NA%@Y>)vxL;$ugF7*XkrCcG0p9H5|9eK`Cp!3{aK|K;O>$o4*(+zF z(crjhq7TP5Y{5cs5c|zB`iyeHe%2t#hw%1;-Y9$%Ozjsx`;u0VU;b=QbIb|FrK*#bA?BMSF$vlob&8y*fd}MfA?5!7v+zn!Lc435zCr*-+cN6{E}l{ z0A%71LHN1^xP=}zHuts3vFm|LzJE-*_#pVUlg2mJ7;Z|+1anf4%$ALamj8)5&X(_s zPcfx0ZR~4tt(9VnR(uk%=J$~U;<)jwLSm>xBAsKif(tJWWcRJ<>e?ayPjJ;4KvxZ? z-b!q~{N-Ozme?O}l|V*^MmmKH-(YG(SMH?U*n}u~>;FEvBTD{{dRc=$g14)P8y_Xk zHTDVGjn8lpeR>Go_}9gs)~Rhni!U&3%!wcEW8!txJp_5pF5X^ikGpNtvCE;3NeiiW z0C_!QBguzdm-zcv_8&eBq)f1`XQ6~#yn4Zu5tMJCHUEG9F8XL=i$W?<_f%1$DW7R43++j_oGhn z|FdjxALh7y`MmuUv3YFp+!>Q%OP5WG9Wpv8-{_$e*gV-~6R$om(0G)1^`8e5V;pVO zDc=m^L*~c-K-!FNT6NFjGqZS8%f(i))(v)^)#!QR^6@?7p^s{L5vbAeye8Q0vyw|= zuTQq>7(?+&E+8; zM!pjGzm>e`KJ?;+#{LG8f#uLs`8_8+GTx-GA@A4Rv?kkDk0kF`NK0oYdh%HUjJW`D z0QKGV+gbZFF&t-2ZY?$*m}E!r)vR%SmHaj5%xJvtf=J`B&L95emYa9|d;9WN{=G5p z@W0{Bi~aA3mcyT;-#j}qHhj0CN2(lYj*{!jEfHT$8OQ*$!JAKir zF0 zeff3b^K9^$@d7{5vGF&5f}NlrO&a`E+W(Rk#onD*pgGL*W)4$9S^S7%H*NF9vVV5G z`??Hk8Cwl2O7Ww=iJcKW=%wU1IY2DJBO958(eg3Skx_*H1aIi8_IK^wRvo${$0>h2 zp$FG72lnWuBL&g&A(Yp;oNJSZe^c}%WBi4Eu9;-wRe_aL_Y+?cpW2|0vDQ==4aU|O z9g`jZCvBC#X3~kJy6Fc=Z_BjDj!(yK!e{o-vVKCee1b!>E90h5w~zDX+cqA1?yMWU zHP^~P)>{i#o-aQwzJ|Ii>RK)NZ$HbV-6Oh4qhDIByh~&6&!$hHb0%^_J+<(r)zni_ z?62n{)<2-fEq8=+yXoK4l`Eqik`b52XQ9#kEt%_9emrqeZ=X`{e)MOze7pVm%EpR@E57604Qw*^Aj7VoQ>)E$ck7OB=}j%Vfh@kXmo^T@2M19 zFAs_T{0yseY^F6RD{DYYBWuK4$x}Z#(!!Sa9EVPgW4j+-!hS-d>Nb8XwnOtjx31q) zR|LPP9-OA0%mpp+HC9Y*?$gieJWuruv|9RR4rp0NJ@0?qwL|h^#PrhxmS-Q9z?XCC?o_uB4uFK%9g`aU^!A$x|KK2XFTHUk6IRebR?S5;c zO&VLwor^0?Uu4J6qAw~LYdQTQoh5wN;2Y)#a*kTW+&*Mq#P8wZ^uUm%^%+*htH9m} z<{V`_R45Pl@atz%hjfo!r{vO2b7+@tvH3m?`S;L1#;j4i(9j?GO)1l3U+rTarMfK2 zC{{Nf{_Fb|`pl($@h4psZu>^jJ~Q9)(78D7+K>RWJ9ea0{Tql&w@AhcXHAdIGN;aN1Cty2-#G-#7D_{{`uCj4ZPCL(I>4{k;{B-)LiPH$Y?up(pRoDuTstt4yC!i z=&2Pm8oTl8X~oo%a-+GOiT__|I-^3ihuGTmQXES>h;^OiBjN2%3 zVSj;=Ut(JcDJ9D4fs>;0BA>s|_o#L-=KqnACwD1=y`4+9; z?J{%LrgruLdDr{5n)t~~Gh=q!K^LvN|QzI0+i zhP%dO=v916G>TUF(`_N^8RThdR$%yq>8tJi6z%w)|HGI^R;GqC{4m8^{vud!U|~OO z4zL#m>kF)(A65-)uV4d!4fMlmfITZ%Hn1E&tO3|B1j_}M=ZEbF_WuMM4s3)U)(#B* z_TAU{jeHf z>jk?QScxCj0PMShO$Ii_58DsyPQfk(Hq8%f2X?Dq(}6|&urTwSn*=KbHp>qy0(QM% zvw_X=!{z|{nqc#Q&G*BqfmH~$0N7Q2SPif*2(}Pdg&)=c> z25gZZ)(%WDD&B)QHC*Y3g|U+t304K{20yF_Sg~L?0;~4J<^UTd*ivA(_+iz+*h84p z{w-j)`e8M|h6r{$u;qSO12Fkc?RNsZ%MaTRO#7AER{&d?gayLd6BjLi8y`k{t)x4q z-&8bZ(J1*QjA?XO4L%8D6L^07dE!}azJ=r|z$cNNJb|85p7sLbxt6u82ECe95xsWo zpn(g=&WH>;HxMqqF=(~i>(~nHe9{J1vIhXYE`4G2UM_t&2OUaY?N{n%2O4af$v%^W zzp{^bM1(S$t1^#jO5Lku>#kYoF4mrzhe|IsHQ4#vS6NfvM=y7+4Vf75$!}$`2GWPM zkiOW$e)tOg-FUFkTe5Q&zJ;r|9%7H#2gL4Y@~wT4q5f8h)|fx!!+Kk>^xTSI?uHI@ ztK#)$O=GR~%xe4@t)G1i9i8B;pE+&A>fnfZXX4XM4-G&1Jif#wp}ZFlk>^d;@XmV_ zpQe{J@$t~GzEfGBD|-LBDK*Ru^4|?C!}qS(%KC%WWZwecZuXLOfHP#B`66rdu>iJE zYka?Y%=nBGYl6e(ZJl|^2Eh(pOur3aZRIP*XP?+UAkvZt{;W{;yy=0VOGgBT^?mAN z*~0u{$+}HXZv4mKl8@ibj#cEx)-{wQ`bKfyw`haM7fkkjT_0aw`9B^X&y$b!F%Qmt z#GhqHHQ(wcA2%RRM;6`Dh)I3}5dy4iX{F?^$5qW4WyVCY=%4a+CJ#Z{T1h9x4xQM;zMDgSN9N z=dCAErqy3&+g#TF3_s;F+Odc1hGd+<_64nY(Z)!}9NK5$E460Wxas>Q@d?~U9;({x zapB6*ku&XmJH4SNjJ@rJ&!kxQTj)E7HdEfnM8=uEwrq3Cc3k22TO;_a;%)T<(8KRz z^j&>W>lVZtoVmXGJxzVB^sD5m$<#ML&sX0O;=FG8e^BQ)@w=hXU*;u0jbqsJARSyu z-f!Z6>RZ0(7HDF9@S=l^&)Z3x&a>76yW#5~#^UWivxg!({_87{OY~w3<+R?>ah+#= zgZ(hN9RF1^Tuq)Rf^l)55TiLYn_Sr=2;ud z)z5k8Ela#$Jesk`9&OFVWRveDoq1l1=4kofx0bDjSKkyIo>O14pSBaH^YCaJ_1zAC zDp?D0aoz6V(*B_YF2PTO>j=0ig$tV_4~~zmd1Fk~n0; zr}JMKdH!7w2ES18^ZlRBdFA=DBR>y>a*Kmszvbs|{0MtSpM8^j--8FK{C38bEvLKA z^0j|G^@!gS;J3G&d+xxi?<6Pn)Ro{r^|?Gd$7yGRUxJ;6U;9Q8^ANv~H5X5g2hSe$ zAuvZ@eI~y$N85VTGVMslJl%QX`>ao#!WJ4?&xmv`2Jb0{Z%g?$Ywl&%vU=6|!-({D ztB-FT!+d;O!oEB7aLeH`;ymoL_s$2sG0a}nC0wPB%&UH*FY-7`;p%97J^2+cgG+tw z>WzP{O&^aCt=ZtsXK#LA#w9qLRmW3~oNH{DIu4Id)UlqmG}R${_e<*0oc^`@^lf50 zEut%*J^wF)FA8t8CXa2g?N2yAHrMX=W!0n)4Qv^U3qx z@QL`rBV|_3z3F7+b=z;Lq#oI?eUyI>oyh)svrj;JK);>%Uh$9tdL4Rgx!N3Hzk%8n zf>!iEaT|KP0Qn{k$hvtB{^QW%b!rE?G=P43cbnbs-TL|f`vpBZLVM%X1~*=FBm7&= zyg)P@0LQn@I`@!b;Y$}Qd2yTH=JeWZ%a_|<37Ob`VWPi|urFSIU>{fV|{ z4s#-R3pZk+8^9y|;_xb|Pp7db!jWU`O;mmGsrYQsO@75!TtA?{so!32@X+3rum#g> zU!uV2`!TGe^mAz6dp={qiJ`ndin08HiTTtj=Ht#mG$zFVr=a~!gXm$_s^iZ- zVtfn}Z^Y)8z_V7>TlEs_$DY2{{-uA-vg#($W|yy3hlvZ52fxJjsmSvdc_Pp`gOB!P zJV>4O>=V}B-^C+iX5C-%AYY<=ROD`@uO=P(io*eG0=dvGAo}#a=YoWlriAw#cucx*z+bA-x2U_aPYl+S+Wlc*k_UG!@ca$ zrLAt6_2=k}mFd6ZrjJWld+_k047fX&Vhv-F}CY`lLlOC09NdA-Y_wsS+`B0i_A8zsUbsOu_S2}zM zUTWL>+o5aM)3z@t{iHTYhFqQ+8!)A>ukG8;x9#L4+O0mCLwm4!CDr&1(*H|O5hF(r zFqdvEykTnWCGhV)*)>slH7|*fKf<%iKl#Sf(Qo$JYoL++Q`l?v(U&I2r)Z+DjBIMG zCiLAc<74f_Wu*(e{deR7VmP#QId#rpjA~!FVzoMZBc02>x|R!S%vxG=d+($UTa4|J z&;0)t19R4*mP5b%74@gu;QARuX-5NXp3~dR-9=whLJu9bby_9$nRzqoYK8Qz+Ce|H zCh|42ChC!^Z5KjcDOU(4^BVax{V;yCFK-!ZiCX_UKz?WbE*g^gN2Jien%nQmdyw`@ z$G!SJdo0s_Uwm3VlWgq=u1|ukRUfIGc%ZVsqpV{;6$$H$Az5?dzeV8&u{{nyLyMEwnZ z{g62^TE3S0A7U zf38+WKDV7!&^?2F8ikeT$D-6VB7jfmjK#H%+$ZL=m3CaiP`1!G(HVD_DnO4r%pJHE? zvj@fH2{DD^@=?Ik1WzP)wLYF)Cy6Ef$z#R=U3D5BwfA~o~X`NwZ-VR0(&l_zLLJ&MjOPF|AHrZ@FZnSC-{&U z(*=IHSRaX&KLEWC_415q)}3YVZf74r#|-JO?0D7X=$n9ve>EW&NqTDhbKEgAZU6ml zaH<^l(z*L7{N+kG$KuuJPPwH{xqj@q`-r+L@!8a$zB*U>^~jjgX!+ME=jw>#GmQO6 zwB7KngEe7Kefwy)oBxaOu3H^7?8o)wUq2&SenK&OBXfS+>&l(>Bun08XWOuKW-S{Y z)?g@0DmjRqwa=R87k>#@zBOd2?5gbS+yB~9o*&4%S##ujDKGk5 z+3S5-c%f-i4d)y@G>$og{iossexT-yqUS8WA9yn1w~CLo30a!uA74v|+4rTq$_=0# zGE=;YvQGRfQNO9X*b`kC5u#cLm!zXx>Yf&CZ3Vza-I^H#Ux zTjFO{ET&HEShHxk2^`6UT?ZA$ROLS8BB z>V@@`-9%ZH6Hio5be~%B>&8y@5p`S_WbGlu+5&6j(*4Mn#U8hP>);{&oM=+J1I+2( z+DaPwL44DgJ_p_~uYd2ZptYfsHol0ycm$pB>IGS`4}fRFvya#h^Z|DZJQvJ6dWikZ z0kifrB>pwZR)XKe=&T_}cXQrrO;En3_An2PbGOFSFN*h{c1^U?a^m-(dw2Khm^>AF zjxq-S4n58s&M(jP#4|iR*v6X8)$pJcdj8l`nU^@T>nSsab(^`A(VSFm5&cgxCXIhH zE-U8JItCh<^CoDlfW}JpTswLsDXUkp4&uSTk1@(PiC;t=XHv)M@|W4;Dn5q%;;s>-#{^f}Uv*^!K;>|PZU)kM#roRO%EK#29_&V}Bdc??`8OOJbGW4mh z3$Z7egYe0y>vsC{_Qyw>y>*&bK1*4AhSu;sBV&qH+#alXTsQ^$1+apEc%j&npALIY zy078261%PFuw>osBYnKtM%_mvR^9X9>sp)1+GsXsXL69yA?)MGh`D-J>zmTE_8tz- z$A&l`8{&Lyi1V=_&c}vAd|3Akah8ic9&LRj&-S?2SXj+7I(h0G&eLsWtx!DLL7ke% zD0Vex{){PeSmz8`!58JnuR_*eWZl}tI2c#!A2hbG#NlOf4EAGRn5VC|vCr(6v}ZE? z%z07i`uNq;c_0ez*d$ys$#aZdf3G1d& zaEhO4eHgQM=gah81#6lMS<}okbEALN+dj%kbddB;SMSKnU7_I1Zr`@O8Q@&q8m%)B zyRrND0rd@b%Q`VScF=*(NqNii$T8ssIZX$bD zo6w)T@Z&t?GO;OhKBIlBnfE|@4BBHS&3Lo*YkMft`F7t(%iLr4|K*+g|NGzXEPd(U z^Del*ywCZ^{{7;++izp<^B>sv{0H`f$MXZ$du_!0+R?IDa zA)j{bSPQPS_I@1p-ou~u=tYv|w@#dz% zJM=T~M*p#;yx@iggZGnqKi&xR@=`w5Is)+kco!p6YtV6$t-{R%V>4@rPt^`E?Kx$A zKH3j`Mjh?!hoO!pr;et=TZhJG?x&7LLroorKW81kPpLz4f)CJYts5NsGj+_6tkEv< zA3b4s|HmG1OAoH~>aDxjJK^nn)@LOv%jy5re)=(Vr1PmQ)w=3vt(_t4o^%;86CLbtTrW2eB;o%>9^_8P~9@T8hJ@sO<9$n%i+ zpSN%x#*A6n)*+_-16%7h+2xr>s z6-I8@bBixidnIxk{(hg>*k^;rrzr{4JYEW2TC1TCYem-$$hGm4`Vk|9F1HWMpi46C zu2VhB{5aj*MtcBc>k@mM{WcGOYF4NAa=HGAY@XK!e#qTs@^k(OU5wA;UK{v*{JN-Z z14V07g4S_<`CM;&$eg(5bgms-g070#W5i#!W2le+np3Ks>uH~5+xphjHH{gAleW-Z zd$et$eV^VV?DZ&x_Nt8D=o8jv@b|Cha~OZW<9^M1t!4K!hv;H|$h{*MNnYksl%K8;>{a@sp`8c)-txX&DpdTE1O@&v+U^xRq&mh?2lI&Tyr^SNQz`h@?E#U|3X&A5t`|?8fv-WcmcOc*&u^$m%E8%Z)w@ynn^WZk- z1KOL&Ig6$DGl#7%kK8?^wPeFLi4UlM!~Zd2VfeCs+2MOuKGeIqx}$+~_?YxXr?6&G zH#DI$)^kpT{_gx4{1LA2pOLpP{2w{Qj5r%uZr!~c`O@6RyJvHWgA;z&R@D+~o6kI= z)Y)H&Z(D?nXx(!w`IP>nYwSBeL|>IxufC4{f)=+wb!G+ISNww>XhC;&8|QnBPv89v z`N#id%aR>mu;U--rRFQIvIf0HoMrldS<9-CJK7%4;taL=YHVP{gsd|&#=7Ux-0`vw zx{bcQm3gU$-X+X8Zh%(D*Q_;T#?8B#bHARvnaumYN?z$J7r(nV#hk-|PZ8o7;hoNU zR=`^JNBZ~rbGW0>ktNMPYLLguENfzvdaT+>F+JC#Z+}I5o%n;X5eb?%ac15_ht7*k zhK@@5=8qpB$2Uhh1Nb%7e?X>KJNXGdO#$csg2Id4)qb<5!0q_oe%v+0ygaz~fm^Y? zZhV{1c=Y6x-#ygfMGpBFU#~Nqko%epuI(5E9wjc;T^`ZQD?_Qkc;JO_i z-UdF`h9ZN>@lb1z2UiBSnK?ubHZ+^_^;XLs-#3wS?x0^}Kr5CZ6-oz=w#L@zaQy9Ch`jMk}*qO)vHrM!R&D^I^`(-|N zz#Dv@Hx*yVi!;{h+Sor|NW3lN?DtkoWJ*ldc`-l2+;Pbsu_L@eB6)aF0`N zTzf-Tz^}ic%hTzPfLSk{1FvSovn$|T8S*oW{qgo)6z!Y+>lY)Cvl;ks_;a;|k!i7+ zSDhETczV(okZhqBZU1aL`)~(Ni$z>}g}zV>ye~Kwf@AT0vpYxPPyD{8@~;y+@suBf z4w*w+KH;OXhlqpzin1F2j@%^cp6@Tap0b+fXdXIwTFP0fkmM%V_z8Xc@r@bqArpS| zLUwv{H;jysiS(C^syWj`5U zRa78T--a*9*YP{~e2vdz@QOYxu0zIlT%Q|zb?~&3 z=O`MvlUZ^nyMF{bVejYCyZt-6nq*~-qY<|~ldKcw?Rrd?wC+->VV;&m@l9zVNQJ|vfo z#|jsXioMJ^g|zaFUv^@_Z2IpC`mqdoo<+afYo4zEqWI99lRbtahcl3JFC#zv? z0Qq!jl|LrjTQ0S7($Q9E##(yXR8F?KpBZnTafhK|b89mcn=|^%$Ye-&G-5lg$W+bG z8?YhR$0qqLyu6${7lOnW{r46AhI%af+=lFS(svvU|2bRZ_UAV46VX1aLr++BIv*t- zxcQmuwHUwPg!bDf%N$|P;y36s%blB~+&6Y`q%${JoAH@MU?)uw}` z(yQFh$DNB_zwG;VKBEg<8V!BB{cY0ye2OtlgvNvO>^bP4X=AtkouhT#WV^~JCz;iK zJ?-d{=Yrv*Gw`9jJTdjsaTs;U|5p6qHOv0J1bSt&e+f>-zKe3K5+kE| zmk}q9mLIt z1()?<*Q9*Duh%Y`RadsE$~&cmbMz& z3XylaUfs25*E<||0e4`T@es|9y}@(+yIqq;!i!OSu(z$Y+%ZS=$gB3FuB7gv$cFMl zug1BZcNX_+c=F00TSne)IIFey(=!IxQ>(G_P4c?(Jlc`xpF&rNw*BpK+h2C-Y)aJG zq`js|Tw~bxEnEx1^)BUpPdV|`Tkd<%Y}$6$6jSz}@W}+UQQ=I2LF2+*r>>+MvAv>X-Y)cg70qsPYM$ zrohARz(4NxjBDNR)iv3%kG?TDmd6;pmNBR`jZ*Tt^ldY?FfU$JorOFMV+@Xn=Rv1% zOWuX2fO}h4E|5MO!8jZmZ-Rcyj6*%=#<%kvWgJ$|<19+x>A&ZnQ} z(cg3NU9aR^x_wq}D&@P@>fCv-@gsb@#9hF)PuJw#J6*-x^L^raC+zoE=x(n#F^nBo zV`o2nV0PzF_KdvBo&@>5Gj|un4vtcv4UKdEN6RNG7{BcM)7p>putmQQR{P-}cEapC z3=hONtF&UjA+O5G4(RtUu6J-+g$bM|z^QxQj|gwT+97h_kpdo_eWZ+p&p@*dg@Tyu?5Gi=JR9INZTE> zI_qiQZrc9<^?dk1@6KmU9}SHk;QVJ7&#LoH(q88|3J&l3k+IvfUGuJY44uil5cWRC z{f_uOsdGzrjOsp2jZ^L4QGbx$YTZA(v(n`Sdtt2t;`{0o_U>!~Klz$}&Zmm~t%^U3 zHqG61AHVpgewb;EFzc;9BERG-b#Kg8>`m(Ya+}&(>c7XprBi;Fbcm}{xKp5WHGaFr zeF$y617mBLA94?9OZ%!o<3i@m*Z(Necu4nQLT4MYR6zV&dD@T{mAjt3Ub=?@SW7kO zMYLD>?6gRW_H$%0{}~By+(+Vpo=(L9zTfA5)VE=Z<+V*P(LwsLu39xt1~BTv3`{spC=m6usZSgo7b_o{So-_ zF#I?|TWXjy+AiSebW|kO`VPWGV&r=A@(P{ z*orkg*t_!!)bkwQmEdlZT+Vfaul;qu9hj+V4>q&nFm_M2I z{ZeAX&O3rma;DNftM@Er+vp>2KN{VmJ6Nc%(No_APJM3wJh{o%L)wdV^2Pu*#lIIT zwNKo>u(02*EItfQ)yFXS|ucVWMF%zoYa&ajaJ9@Evo<#-u0I57z=SHVqlrKUcm) z{)GIo_!8V1@V76@mgUADre4LA@^d){!5*;is>nlU4zXU|A3Sq|^#$qgA@LgW2wurK zi3s=q?w>hzgJ_noBKxoX0Qq0Arq0D*ID|dWI7_RS=u4GL_GfaB(#adur^&ri!=e3O z;3}hT-KTRSedW~$>T~HY!}~{OOSX0h_xr4;MIY&HWKnV`z2wrU@kiTpl_q)9v&N|G zOZeg0+Ix+D2**G9RQG(@@pHySZPo4)cxB%KDO_6nH|fVGChl5Px?|S)d zL+kN`5C1y%T|;LjewrJLs)A1WJ>GU1pIvv}dGfADS6xG1?PaBZh*c-gsk&{u4%&1U z?@h+*V&=V~>2YZCw&is7Xph(B)MNLnz5Yae#rT0IzZJq)OdI3#`Gc!j6CnQIJd^o{ z?E3U3t7UqH)j8AZ9otHrPw~Awhtfaf*-^S>=W~w)md))go?7v(yrZcA{@n8@k?$k+X^A)K#${lj`~`oE>g2cF zyBqBomE!!=(~1mBE}BeV-@;ivPd!Umm-r|1lGmy02;;1SxdUyjrL9euA_s2UgexIS zCT5{~P88Q1L0ofKoViHLi{Mbak^Opuh`B6{6SuT&%aG#z+!3sp;@Te=-Cv?P_MQCx z7`Yj4-F-Ia`M;F^jCIlPnfp|Kg+0VSi?j^Cd+g3?zQf#CS52S14Sgzi1LdNu_v!an zg^#{vJ~~zSYlxL~GH2-k@7M4%yBI%;rHqJAr{5#Qq}Lz= zcG}SRcS&O&H<5Xb$uo&(i|6)W-iF~>krpq1T;9A4U22EkTjk+-ZGz{^(HS0|?}O)2 zc=dfx{wi-D%_1i5$v*~utB-D{jK&vzlY8tD`G&t{jC8kw;)m|YlZ;Eg3t4N}OMatw zu{+o_)@6q*?a#idi;U>A=WbLtdLKj{r^?Ss+kjFXZHkiUsQT2F`t5eT+&fZKaTxJb;nrd zsp2ztG8p)!sc`gZn0{o;#j1l=%k&jR9d|&}cPM{dXvk8Zc z{oO>XPIcw8UV9WfmIK~z(-w_M7J=W}!EfdW74_(rDDrfk+Kg-~|5A9GE{P@JC}yv7mP!w)d0PW1~fj&XPN8d&OUQ7n_KVamK;%#5mZ_`9u%DmarzF_2KWp zGwrcyH#pUvA;_rzj>-b!6{%-5(0}$Bd3d)0-ko54cz7M#!r1+Tc=w&<<{s)+@Jl8- z;kP{wl4pN^|FARHPjg;Ja^FV%!F7?Tii0YaqPr%g=Py?5N%8|NjrT^Tim%KSmu{rq zICr<_Gmn)1=r&eXqVGJs7-Q~Y7-IH^x$9*m@MRSIR~*v2SF9UfbuXg_R~h@_wZCxm zM3uF1rj|#JljBzRR(i^7U%krbP`-h?Rm3aJF43~x4ZG$e~#xr;J3SvGqKkV zn}v^C>&Cv^xQhDK#IBH0*}N=t!F2S%TImCH1O8=2{{7sCK)&}w(Z{zU3-}MU?J4JP z3+V^-nL9so@4Jw1J&846{G9l!hbG4k1cxwh$cf)Y8;E0?xwv#qGtbN`N_ISN#hzQ! z|2^~WA?&TrJbw+D|A2E?@})L#7n$sk-b*Pw-%P>tB+u3-T_wB=C-z2g$k96Ry@1{L zEpmA=FguPDX#9vZ^~9MQ4{y{?&GE#a0_NND0T)}=4xK^KI6fVZK7dD+@S=n^i^e+I zEIOwp=+xO7yZ!a}+J?@#)_bDU+vc-r^M|zAyt~$nKP#?xmkQU|6kJPqj-nHcO$`oB zv{_?a^Pl{qU5|=qvRQYagPFJFw9mx{;&-~f)O=g@xVG#g_e^!EbEcYiAEb%f8QEft z7U$2xrZ_x&iLpuy>v;I!2LmKiH+Q**TN4 zm~XWk-=Xsfoh#woiJnKrZ2zM#?@0?668Biy@PyV10)g-TOgeitbyrfxV*WoF8z$fW zTE_?ah&BGH-&`QA|8HUbR{4P~6{#pV*(hq{sz6@6e_7 z&bY!Z6!(P=tsU&-j$g~H6%3C*qkEk{?P^vGR4|Q~+0vUQyL2wRlUQL(enIH3MTL1! zjKDW|99pjrSsMZc@=-^`bG5gQeO4|#nP0W((Vc@@R}qeP)IT0PeKLtn8r=8*y8Ai6 zejf5#@KiQUK4rRovt##H&?jZ|%`Ez;l)iHJE!na)7y7(&{0KHwV-T5aR=@p$IibaG zcV4gg`_0VNy>m+SnHQe(4rfJQ(EG8I_vX5?;L3pJeX55#jQsY)k4V=B^7-UBiagtO zx$UrGS3r*|-{Tzlwq-hJ>AT2o7+J3g7Cm7h>neK*ZL(vOImLSJ%^r?)bxZD3_j!HF zJjfgO(w!1s8z!4+=4qqZZ;u_)+&; zHthH81<+a8D05!nwy00`>B{p5%O>$CfTBw$~A7&<;}%?@3%rzo|`&@0-zh zTZWDEcAlGh!l`$5dvKPWXZuaVkYle*>9^x&Bz>-{Y@gg6J4ep7&xD@E`vr7oP&fKF zb0*SL*ZLxzWwm1=11aO$Z0}tnMpwaOmv4RG)5GvlykBV6(CzoltRx-Zx1vBNw0J1M@7_5q6@_FC=XOdtOB<7{8&esbM^YqJl^36||Z#icgJ^3=Zi(@AF zibBK9nTp&%E_;BY=Kj|FQ26M5jN67Y@-~d%yu~zZ({!N#c;SePV)MwuTtRy+nr1SNWj_k1T^1`-dZx~tj)({O zJq$ldwx|H!2zLv%RQlF>jP=5S=KMkJy7LrQ)*g%DaqYAJpVi>_xNFiH+Ow5%nzNpD zr&ae7cEyo{B){6&+w0-OHu&JKZ>GM(@DgOh?WYF$*rD*!KJ>lq8|4+h!27PWcB1~b z*JpLUO=tGBk8Mc&SI{n-p!|>WI}Cl@^X12zoO}n+VGr|LIWXk5ftN?znWFthvW;KV9zpJrVlQTG z<>RwE7lU8rM|owk!q!#lL)nad+?TTI@!p+3a{2=~PxeO>=QTZb9id&XUu5=r8vIv+ zUu9dJveeU*ko`q|{kx4jhn#q(#!@oht^ToegnTbh#|Chw+IH72>8`_(8lRF+__zE| zk;V(4#nFw@Ewu?co@URNrwwCfCv>BES3v`HrP^Mv-F54{7W~=^6_t<5x}UqgAR9CQ znB+ozE?BodJHVY)UK*~j_1pI^N~1r~S-ojjPj#KZSNzPnDv8m1>dK7Rb^RxGb(QM8 z_n^j;4{45+jsKj3|C|vsemrNS)K>XTItwNHzwQqF2z;`_aIJawVQ{?GCaw4z(BO`p z{roQBOta=uvd`YUe|7SmU~S8_;~N;Gtea20o<20|UNV?2!28lMTx+8@e-j z+C?VT{UGnK{zVpbep`4L!+)nPTkpx&8WR7MXU4)W)IUGww`bfYW#g(5K3*N+o)`7u zDC%H56+c6{vDkyuvseMG3oNL>-(3hVB@16-f3Uv(J%lJ^32QcOCe>W5Dq}6M95)Vtmhj-1ma4Nsp9q{#3r_f$@Id z^D*wsN*%{N;yYQGz^A)|g>ScmZ*wkwD0bZVO=0vsapR-d^Fr*^bjF|b=tIn1@RM53 zgMN=LIG)f2voqD?&bHbInhfL#kH*Go?vSL4lPdfMdSDycuGHJ)i2==~u z^$9X8fALy)rPvSe)y9VA79XO#Y}#JF(GkrWlZxs77Tk^vO7_DD_E~#yu0NZYH~0pT z{{Z=a4UV+5+{GH1r(BtNhgeR0GG$fwHp-}um(bhn z(cX;?Ar{gy2R=%_q2rqi=ScpBaMwODPT+cGK3`l<9o(x@9ARJG!B8-E2pTjdpM;lY zt-UwCCu2XzTD#!l%|9rUZ{;mzy{O_Y8?hgg z@4$GIHitCDgG|~88(%4D1#a3^Q%t_gN!!g>G5K1xXCOO1m9$DXt>hw;RzjNPrdf8{ zg{0MUcHEThXSbu6w1}HF(57z;X>Hg+lW)o;244YbQ8%q%f=L@rT7jE(%q}~WwB4*} znzHqF+Xj(F%+lCZe4?b@TInBuM`qc2a0zm8A@W{;%{rU!2;UmRm3(X79OXNY?|Qz6 z^1Yky9KPH59?Z9eF4kIG0pHp?8{u0q=t{oj`$hSdpIy&)FTQv4ZQ>(*OKvP;5VCO^ zFZKA_Ev(tuOMYZH2Rz8&+Zq+Vlt%A)0ypgrB#w__{rdbb^m`Z0LDu_(#+4@u(; zjeQS0a;SNb;u)i%rH1)M1N1nt0{`4KWz1%>pIW}*;0syL%CX+lS^4boHIF-ONb0~Q z?us3!kKKq~&1d(ZXKwY#) zaL$C-HQ+SAnb&I4f|u{qiC zZ#rqaNy9%fb2-IR7t&`s52d|_1=IuYMSoL*{*&C}<*_4~qis1pUA{5obLKFgiLZ=& z55fzJxT$;q?|b9A)iKWIr7O$ZD66$A#nQXwb!5iPd&K0;j^9aL+LMvG)+b)dZi$Bh z?h=>(E)9ER_@7r8Ugm;mUU|c{D~7bl-q}^3bslS$`HfSd1|5 z9&yG-(xz=EF5=0*9=bJ#+FSw^&+s5z?H*i-zXz!Q(8S911gZQ$@ zdEs`w=QSrDhHlNBOj@g*79vgdm;2sg8R|E_3$e|a&hLY=*Ze+6y4u42$he-XS#V+qCrjY@dU)&d z&&_A_M2hSmAr8&?pxj5n>Ct^jeotW!fd_XcdjN9IM~=+AE1UV+U_OKR4CFI_&l!CB z^Xc~tyy`3>W(r?)9=y2(o@k#h`v_~%L9N=CroCe3Eb*a>Vy~i$K7rpO3fS*=#^o{V z?(gh;2EIgz_Z@~um7%;1tMy(n<|B%6u2=~lc(=X!?jh*c+(f?E-SmGI&(gz*G0r^L ze8-jcWV*%Q$HqBOhv`3mW$(@P++3nZ4xrE*~{dkf) zJPaMdgdQtnj`<8Uxcy%0)ind{esssB=A%!N_e}Ve=kNQbgnrVwW>4R5gNNd8zt5R} zAAI>fb$t6!M$BR_onls((VhtY`ad`F4iM60XC(iZk}kR}V)U|8k`dV`wLx-GwP|dk zolC?o%Gu}0bBb?u+6+%ytC~h9@@29{<>tGQJfhvHNBlCP0} z)IV6S9ZtOJNql4OM?lB43=#c>cA8m(!EdPkqRz8KTFwAgaF$?Eb1(mk$kR=NWm+#6 z4zgl>oU+1$y(}@$%58mpT!pMQ$QabLnh9r zp64>Hi{Mkmf1fIQNjdpzBlm|!r&yo{IKFf2FVC}7HYLeS9=yDN75-Fo)_oNJs0MBU*B-Vo?bI3& zHfV>^tB^rgkMttXdd^gveGk!LX5T~Wc~;#Ja2OjxU)jFX@OUrqYzbOR!BZhT$Y;|( zX_s&bk95Zv7gwE)>sfGVzwN)^$4ZC3B7Up9%9=8q%~pBE4n>FdJ81vKe^XZc!AFbb zzfg2^X@~YhX2%!)A9j^}tUm&e#+dXlaa-)u_?X7b4vnuHMkU70684-)S85IEF>tC3 zJZP<|Ozz#@XU3MDr&ctB{{ir4IdwSWENMsPPxI-L_3YzaL%G|*-LWANE0Ujp3s1{hBKCkDkixXWs28vxGgliVKJb zH!^P3E?*ssQhYM*qL~kUI&-gj_I%EIj)d(w(PfnVHD!NJIoabI(LGL`O^J53vzF?i zF_Stkq}(%uS1>fAKmyu)WmHdU)p*6UYxNeD~17tM21{0N|{o z9>uSJKwm}BXX452q^+QRPW;;6=Zar@+O{NRo&N*gS7G797Sbo;3-XfWOS9jv-^U({ zx1Id<_{=ZPrrc`ApW4}veQKs(p(lvk2-hsWja?YenGxtHAg{)p-of%JxHRU_jf1Q4 zVKwjd-dj}3e8sz_5H7IiE?!>U?eOZ8i@Nj5yXGc(kh9tZJv&+R^74gv(9eoBU+n&F z!01qWuGpKoleb-x1Jkx9f7{lx2EB#0sa;zg-l%NT>*Cfv92a+gehc@x9^CDNleoutaKFp_ zg+=g>`Aw(rN^f*)^I^ZQzU>0u&jas`9MT@;AB7G~^!qr!4_3x|QcEP$xeD^-)|95a!@^M?&ENHjq zb3t?lHn2o(^va`=#SDACSm}(jO!i})z$-)Uxyd`f6Y0E~ee!A_a|#pJ zFzx#PNqh77sH^M$|2>lgChU-vMKb}S$>4&rSCnB>#2xusx0XOe16UPT5R?H#qtG_* zic6_2K}$=f(x!gE8nrb@v1qlQxD@U3sR@b=qL!UR!Tg@D`+mP?-jf9N^ZETTkC}PD z&pr3tbI(2Z+;h)8_xX@%S0{Mi@TB52xLd3b`x(^z?y=TQJIO0H1HaiR*c_rKQ7>^T z{&xQZ8dOH`x@+f~^y6PB_eZC@1Ez9H$VJ3;NR!@u|>~Uf5Cpk=Qjk|4@$#7m(D(1hRM;7?;al{_datDG<2Wh zhn)P-hj-6LOdws~z~4enfj0vc~%xdhhOK_&?|i`lS9!zIR|-9KpYokNu@H zV&qP0<6p{k<5cqFG4Mor-v#;Me&~JIxc(*im6mfZZDudamQ5vp)8h-!!R$SVrz!gJ zJ#w}A^y9WD<1mqY_WT->T}&OnKH&MBnHG+b{8^u4drq~cSmD*0CDj? z_!aD_Ujn?&!+i^Q|9shv7@bEME6$vg8($9{rTECg_MKqrOtwwGMnsXqRa1Pu-&7cKD=N-46TlI_^pI(Y|d$Y+4vxD({2eo6tYyoXr5< z(+}$>vHl01eM9`;laZ@Q$k#;TVJ9GO+H-uxy&odg@7;i%p!rb#TFpZn0#iomoR#L~ zc5Tj2o$J=0&mFeFdG^Kz>X*F{8S3eY+&&{22cFGQ#rr!ZC< z;(q(%DwlWvfEGu`CT%s@M2~&$K4WCS3bz{8$ymwMA(`7&B#J@*>89!g;0M?K>@yrJq|K@yT zPJGNfe17=RC4Uv_fW1@5Y=<9u6ydR2endnbY~ zJN^sK^C@=W<@>?KzOCZvrn+bRhLAI_FYwBfHx}KgJ#^Ni5tHxgm%Z&LhMjH~yYij} zOk#{ZjW36`c=N$*&YG$%-=ZyZU3l7FnSg(eHHr^DgEg|?PXpd!9DkQGW$?

1EO z>jRVHsaGG?tIDgD6CVVu`12(3_N*6sw+8d)u^&9$i4V+=6woG}A$^^god*0hIup8u z-|Rze((~WJAN=0&GCX-1VD`0X zh1;f~{OhOpcRls#{Zl`CWB&!8y}tjo*FV|+t8NYZ{r5*~M>p^N9&<1@ z8fT{Jt%kvNHavm8z1GkHHZrZV`jysb%jAplo@en~I@SVCV{Ss_NwyU|%`W_8fUTSg z7l2dmtfQyedM{YL6P(^pXG$Q{)xuZP#mfBioM7h4Avw{}Q}L;V0$mO4QsAWHh*2{4qe^c%xtcuND%rPQ zY?Im3YFs=flxvMJ{k|^Kwlbm{gPC<_huMd*s?KJ9vh0S=*2bM(V`o2m1A2vi@b~vg z^oh}VtJpUj8?awto$HOU$?T~QrOyqnItSPK{{xN!4@ZN8WA*<5M-LB&b&hG%^8W*l zV<%aw4bK8+%l~f1hgBG`C+rQ|0emFHu;XjBHs%vwZRC)oU# z(|K1vQ2c#7Y)=TEW2aG8d%m-Qt#ja>x^UHB*6%NW`t|*vT=%c5H92eAiP~nerEauV1My1(Y((_JzZomThLxGZTLYqS4Z%Xz>t{I6^qBf1V=NavZcy4)G7 znY7;`S5+aO$OFW?@LkNe-!G!^tLG;Ht8rTObz}JY|J@j-e-dN3WTs_*lW}Z?tvCO2 zzs|8ZdH4>0y9?{3t|s>$@@UQns^6ur)$iW;ZX0{J54%7yF=fzT#ydO=1+10mn0jt7 zH@fjd!LSBx>g?Rptu>tCcj3Kb#wouJ{)iIfD?*u)jL!DdI|kU=lO9C<6M8?mri}U< z&>7l4d7pSb;W`!HT@%j=&JS~z-?A^Z6tisAJp2r~u}q9fL1gVSX?pUZpw zwxQ(!HVpoO&4PWY2fjf1Re38O<{qD6$kzz)Y~DH6&C5e>B(Dr{)8!MQWA4OOKx$n6EJUEJJzX-@9Ft7%u; z_!7yg>UHN{M-M;F`rS94*Bjq$R{Ss2DV}zkV|0wsL!V(INGE|~?}VRcqZbPzvL{Oe z&YV7Bad+l#*-+eR=RI4{$z3nz#IN9)-raMrb4RiChV1@Bj%}Xchv9|nLg@1NAfK|< z1{;cueUsV-*;_tX*+IhnQ_}{WpH0TKgfuQpPQpNS3ulj0{0S40xqNH>RODS}5XU1g zqv#uSaHOD@HU9{5)0J3?7WRFHId+!lknQI1@(FTU_~@9;9;0iIrNS*Hp1}vV0=QeD zRq?svkKguc@y`6ZI_c0~hOhi3Io{j#V->!8a2lO#Vq6O%#O)}irsW!NS5Zd(qsHr5 zH)12@pd%E6^kedbH23JcvUAUg&Fyf$$NJjES;YRQWZ%Pit}7o+D@+{ekF<^mtZ;m9 zYk4NQsDgi67<*NWAMPh--5Q8rbGx%{RbAekW%_$DcCU{oyQoinc#5efC%y)p&pEdC zSC(7hmOGJh+CylfZJC_AT8l2&G{G8^lVP<~b4K{}gRRpo@^Y>lXxY1GTdSW0wxF9e z|9gxl`ACgDv^Djts;2|9-w3aKpiLj>?pw)>X-fxvp?+u~|8~snhb8obbSC3R2ZA~Kh&qTcx62LL(q@imcVtq zv-sl2D&*ZOmqWR>vX|2Cu}OJZPb{?LMeTk8Iz0K?2d|&~L9}@X`nC?akW5}q-&$r5 zHdpcK@zvY7`?u?SeJ#V$7{2ArSY$MK>gdBev8o;%ny6dM}|Xoce~`GFnhz+e78?( zqz_K!-XtRndpRS4T!&qIsfe?wlIuI2zKJLofT`>3OH8bmVj#U(Ov!;qmnV~cxQ}|l#@@sEBguZ(Wwo3=H-Wy{uJ-=RYjspt<@h93&Pro^9~mJEi1w~`NuSuYCfY} z>izL%K12>J`Vpnd^uCC^eNaYD`= z6Zm#F|6(*bzq0K7aYM-YAM5kPf5zBX-!R5j%=CTKYhoao8@#zlJ_^UbE?+=yJPp3w z4V?BkJ(wcnFVOi#?Pur4KOf}m4P7 zeA>JBaBgF5?f2{N0;h5TJpk+nzy&z(DcsseUx%&H(5-pY4eSS&=Muxo^RO#}$vXn_ z4Q<$3wQqr|ls%hLt9jHl;9E$JO7cwB&Z4Yv1_J%gyol$qBdwz@=X<96JSuuQPxdXI zsr}4{dp&w*yL>9ESDxYAcrA5s9=t^Ou5inp(m^@No7!16T6w&4Q-1Wk_@LX+L5fELz7d}k>(gm_W!vx!`LC&3ZoL^G zW^O0@yNcPs#}j#J$vM?IlyhPBiif`hF3~=05<};GS%%J~>JMPn0i$@QW6%>NulRUl z6ZI#)p&&AycHfKaU(X(apH~^8No{_8q``F)xaI-(;qp|u+J-+!ca~AM#-l?%B2)J9 zE~e}Z$|{!fuWiashF;ZaV5f{Su+xD}V_bN+wz+a!wxGLte+%eG`k{&(f<{MhK1R6&e~T>O1FE0S=Qbx+tA6#xXFwkv{J9>Ls-k>5sAvISIHTAKVuMv73qECe-B#jC&#fLX0iu!@Du~{--6+eFF(v| z59anE2IdvP3`&Jr;lb>6V16%{lT%^VdoZ^+b!`Hs;_~RiOwMhV%8X(87B83d}?-Z{{WZ=UU0^!d@3Fd zMMgIP_eqAshtc`=a?t^-=0xqcE0#=gXDCD`KNn-*!Ln$}$qR39=1w>4IHzwMJ*WMg z{>YQ=3L^f`#kCK)e&8S9p#CSmWtXW8uwPmKZQrc^OI=*5e-Lu%(K_4c7}`DB>DzOG zyZ?v83NudA&?z3=dJk^ZX@);z1@~}!xD2B^z?bd7ohi7-+QZHE;H=XPzGA`sq&?g= z4{r6T2JS@Q{PeG2O=!l!KTFK}T;8u<6m7Yk_3ZsGC+EgpE}S!O=f%6JK4L7q_GK9T zn7rR9UGLFJ-g{@>clPMir$-UM`-$`L-cL@HSB}lR_(9&w-aW`YQujaQ^tT7Q&6CL~ znve40yMe6$W;FcH|L?)J`g;$uC3&m3#L)?o_YC6KIuXaH_I^OlOqKn+DI2RbGD(cX zO5zyr|F6{gCR5Jv2-{C(-cT7}ocaQ$z9Iiq-Y@F=cCx+|9n|*+Q#Q4}x5&qXja);& zXx-xYJSv8Ae_vWcZ|dH|-%wWipskP2$y@Z*xlso!OPxnpf5gR4tuxZmmtn_WS~C$I zCTMtWWC{)2h+~wE{(a~;KwISp@y{_HzR%bXk9s~@>IjhAp<$_$@3D^O5BIQI=5VKB z1UvVB)-vV94wrDZ;T-NYJfAs3{x|VM<43ey&$`sYA8Z8ze1d$^HsZ7FZPpRJxZ?*t zLDIDK_{p`ln(}JT+Ut1m9%I#y$G~sroD%S*uCAF)oaLWgxD@_RVeiogw+pySz_}IJ zx-2Ijq5sTR;(kD6@6WF6`E?y_ojzw4vVpEEMb|wIKXfjlVBzp)okuwg-ctVmLeKvm ztz9z9YFYBUWpvkZ@ewcT;d@-=_;2u0-r(c^kDPjHm)r`zzkdzBkx6{_sT}_ezF96l z{@?4=Q@dm&_-1_#zH^fJZc#b@8+^CA`1n6p&zI5ObHI1&*Wf!hiSJsK$JadUm0nUCi0uyLtAcRl5m&t@wye%ukP^ zceGBM2#nUwJGAB|Mu@fcsH=JQ$Vpc15&G+P;wL*QH^N^I9jJ1u?-A@Wl`BT>Gm&$R z7xI%`mWjO1q|RSx+)x*5TjRs^@BN3Da?X_Xp5GSKx|_4v@z3uOomM<|33j%7j`?Ri zlh2qnQ*G?!wQJay%hp=iKVB7Ac!c<&L=MI@@&_`vS${k4m~fMaoBKwBaqgNqrh3@x zj#q)Vfjo5bV_moqUZXd()>2IMk9lXg?_ME{c3ypiJWCq&T0DN56Ge$CzCq& zY#_I-uPxiICifxydKY-%(07OB+VEi8mCn6n9l~}?twVYJUiyZ3R{*=|c(9+O_QN({ ztATCZ`qjA?L(Hq%uh%$IKWGgkpU^@4IvNY>_`YLjP7qW!k7&Ox{#7)|(@G>2HwI)rm|v1I(E z=JBLFzf+PzLs1!fO7KMcRfX^6vX;w^$MHqYoDj5+&=x-)Zw@wX`6GE4d~I0|o;N%^ z+1v*Ko_FQbLYGQE?3cWf8$e_EZk}r_k7CS8-AeLUE9U2K}}#a3+Ev&g{v4_Wi)5|dkZ@4XNIj(wq0 z=9p5>qo_T1((aAoNq^uNk9&@^PFK9{Va|H%J$F?^zC}A4@k>5JKgf5evqjS5p}^XH z#S2G0r*^L`Cz(F_q*DrgIv<7ZiiDB5*Ia#v->-&v+8V{u(!cea zSyLz)Yt79t?MD7l!PUL5&{uvs_MYg}ulAS}?V7?S<=@DTA8_x;kxZ^7rl5h? zZt?qJey7S;U*xNwU%n~}7mqPGcKUGqfO4@c2gkB@IP$^K*N>y7Y!>!uQ?9+t#P4Ls zmj*Z&2YqGB*q3VVlz6_#dA_&oWuC`IC7$2m!3WVl6`6_WH}jl!Ru)bQ7#eG#@iOR% zx?|})?14}^dnorPe*<|ZcjB)^&b5}=a+6zjbi|a6Q#QR?#Gbbv?K!^!xo=wB>GJZ6 zi&tj;G}`jepg*k{7wGcw7T$*%Sw|2rfnII-7#Yww63&<5Q=PNEsT>5~nTIRQSmEw# zzwBg`kpD0#XO+mAbf^Q{i#=s6g)7y4pTW;^oy6?iKRUgWeRyaWAsvj-+%CWE(+GhW3LjaR3g+N<@q z^V-B-Z7S~`Pn~DiefEAl>BEtn%7^?88G_7=zvRg{x&4RfXWf0YZ<`vMcB5OjV3CJp-wY;B8`!Wno`@NaQ>WTFg>hXN?rhpx{-ibL zS=xKC_>3+H5F=Gs+Qptlxrr)!9Xx0K9qMKcD`(AZ_;jIDb_3<2lq>bhg}d6z+;UlN zxy)_@E)RV#zYnoW5tXOEYYK^b5T75XJpU^TZ#>DCEw!5er+|-jiP0?=-wW;^Xnj)< zF)`amS|2m;_Z67(6~Kpm@Dl|8qa^&h4*cH2O9vVF#lUC#;71Dn(IotzJ^TY4{I>uf z0zSN{b(83e2!4$RpB;Y*c+vJ_?nv_bD`k9KO74DNe@&(j&UgFW?XRTHxQ|#3AM7sV zU@3TO(XSn?y*TrLt(nF;E6BPl5T6XK*eHh1wbIXSzVDNHj-HMvR_)Kk^S%_!oA(gk zzayrg;of^5{;}HB3A=-}7`8)84&$Z?+d?^#=XQ#=%ve~`%(|0!qs%=^iRUr>$2_FC z;Af$Yz8`L$G1utXZ+TY7x{_ymH81Je(>!CXI^qa>naU%7SCF+Wzn$@#8wo+P&aog-hwLHu_At!?b@Xd?|1n+UPUjh@p?d~g|qO&j9t zpzH78_4^JQh{to*_^jjUC-txTZalxM82jUAXzrEIAUi%y@69?tJAN*|E$%OpUjg|$ zqakbU)tcwq_0MQ>bNOhQT*TRp1b>qEYn8H=(b+5A9e!+4H}b7_=bT9o&YSckcB>ch zdA*6JPP18`)oXobwiWlkGr@N9aS*(Wn&WX~+_zu_LNxz}0K zbLn?%mzpd2*8Wl+^R4#k<#W_|rZJ;VwezuEu6|*(ePpC`TH3|~Gpt%iu5%*Oz#U^A zQEp(JFTYmx6T8#EGwn%#yf|rdO<~>LKzx#8bM=p3cOx{Chxh>gk|UH~!y3OI|xT@%vF@r37Tw2;b+CIGmUSG|=&fUcAHG*4nK|VT5 zYm87ga&hpjIpZVVmGZ6-zv!;Hnxk^#WnrB^&ySx#KZplvtMGYnMdXVRoO91rKKm~H z;(dVit^r*pUOo)G z%Kw$}ZR=by&!rLHu-f!j>eM)d-<2kZ^xjeS7eg$3txnE>vB;Wm#fCe$x3b368g0?m zwQGz{9%fB{VHoisLlj#^J_O?Xp^f;-p3R&?VeaFut^QW+OO%rwvG3XP8oy^DN7gB< z6$e=@SJL+d_gVX9oq`M%SS^7+TBfgi@Vo(-rj-+`)n2O(n;u>M2IpR4r;_9PWb`OH zNVJ!qY}FP^23DurOY*FiFTh*Q9lpPzuJcq^*x}jP;H)YDC-}yjHusMgzH=J!5}|k# zb>G4JD16+BoF?AKhTAvueD@$?ZI}l_tgE#jt^caCm^>4rr4W1@M8n`y?c(lMZ6$RV ztL_t>x=&QyC$y=1@X$ovSMc6K_oUQ~?!Q#!+R`*>m_5~{DIBmyOrhRRE={5#g+6HU z=qtP>Xpie{)y{#ofM_E&M!Y@3oLkk;p-uc$zZG)#u*a9vsjD4dwx4d7@cavxFHOWv zs9yb79CUe>Q!n^x3b`loLhAfbF&KT#ncgwweK{9E8SROW0#9jPg3cQGYNEiu2fXFN zZ_npWgYN)`Jh|RKolY*;c;i`C?J?GQsw)n?F}JR@{Knof{qP9=FdqKz1jh8i z>a*-GxHo$zv|~ca8MLo}Taso$*DQ!>4gp*LhulS^BTm z>BTFeW14zfBO7=RA68e9TV*9>(Peu*{k@Gd{zlK=r@iR>_*4B-V_0tG(6y_?>OaZu81=4~E{~#QTOp(UwULo=UInlgasP=o=fdzs0*j$cy)GoA)j~ z*B;j9-Q=-uxl;z%vNQd(%^vN(s~B#}7V`6Bh4*gpP(#-g=n4ZPoez#gTtp}Od7B5z zy@+bV*}&!i`w{K%!%ilLy=lXX-3;6?;4*>pw`ILY?~4NrOho)Zp0(~$-TfExNw+rZ zw|q33;|mrte;_AjF3TQk-{g+BhZxV%9A|9z;hAK<+#TCZj4h4t3%D1wi7^t&Mh}8h zXCdYy%|qIslg!hemfh%rFTf>T zU?Jb*&<72@9UVlxUu_HPGwFk0kWY;qLPq!egx}1gn@n5zjXp5vG@c~J^C`_Q+{yM* z+KJ6oY3FSGgY;ZBIfw8my^wZo^CI-fhUM2bGbe4@koKMCCTy+^%fHjSLEmZLZ7%0q zco#u$;67)b9nS9<&qKFZcEg~wYQ4Wr?-fhPJg9pi6em{|vPM*ayAWO%zSE0$R?BSQ zs$_QnXP$EpODWGZSLj(IpHgHpmnb3wZ9A>unxvSn+`Q z@q2)iuHM7kqPb!>-+me2HQ5^nj*QO--pKYn(UzYO11j0^WP8aH%kb04_Q4bF1<1~# zr_zX@?jIk#i@1hgwGLmBX0?2qb+;$0Z&06P^>5^gC`Io|RzKvKkyYkV$?8Y+6KzQ9 zkV!60pPX#ka3M7P!^kve8{5YA!Rv#UT*mt4Qr0n-u%5Y?bUh7$X zY->>KwHmG06ldb%uF>3=z<*up9&5&JTB~Nobtd*uK6i64R>O<~?Z-46z$e50TMqPY z=3749kI0D<)BN3*Cy$eB#JBFuxZT0q0M4qpoNIV3TKg&EwG>O6tEs;ECIee`_Pvv7sTGiP$K;0K=q?FVVUVz^yj^Iqehr0=}BPyOcknAgGs$#hwMM&kYY zo09JzRU1Q@@y6gyi%@5r3jP*dY`4pG8Jl|){8P$Abpk*_sOs_^h#B=32+QWD}LY?{4 zIR+lf&!v2G`4^mQkHJUKdEuRj7|dPs#Yg37Q(T$u$;*vQgqG^pVg{dlS{w*?^Zj6# zJr~e-;$6YtN}9Q#Y4wHByNU0%=O?jU6JwwNxeDX6(U{d(XeDn`6u-60`=m~2BmcYZ zaB@O_lRnLZ-mmM+72wl2)fs8!b?Cyq7~XFuKIQml5tM@_z zzxTI3R)>ztrG1a`T}NEI=CUwxQ;m$D1@xDmXW&&lie z9QghRIryqr@HX;c^o`p60<@zGjegge6Vd!E&!q2Jn{Nuk(^A?K3c*wEt)vf9?*2K( zz1l7KC7Ndh8G~u)aOE(tY;<=-^1zr&%GW#O+hfdmeA#x7k%veH^y``Ed5pU2h(S3- z*()eJiatAkU+WU9n)yfPI*zsu2~_WbH|r)^Z)#3b zjvM(=wIArxC4H#7Eh5Gycn3fBi$fFrqqFUbpALgVc8bxz#8bA7A#F70`(%4ehHj1R ziP$R6dGGJvCEfIU&ZDY5K_eS8EY86!(Ah7I-$0}ZITQVvMbX-Ef$VuPbf?-^!I@+2 zqlngtjvUn+Iu{fhI=%6*g7`}1WIdc~<`w8jo=v^yF76=6v0B=;+2DB^-?+cM=Kg`~ z$cNy0j5cd*{tJF-j{lo-m0>f89uGfs)kBwfSYz->e}d1WYc}|FzP3JBXY7(RN!PV* zO@Fhg%sO^#QFrXS9@u$3nZJ55hI%uG`gne8bKXq6*uc4~y3?#p`S4Bh{Wv@eu~w3O zUTXMX5VBO))Y;URIkiRR1|2#*jJxaP?z3_K`mfwH0rY6O?zJbjzdK;eVRHF0eTR7LS)tqf8i2Mmyt!b9>e-%rG4$zYQcir#ieG5tlyl&~i&nL-(WQ4CeYp%;(|9L&dl{Nu zrtVhgEbVKJ2y~rTqWbh7|Kf;XUbMEf8|Qh#j=g(KaO5Nw-5w3=$;oYE(5E;t=<3TM z(9oZH`wwUyRYA`3@(gQa2p>`?aN?Pam-(VYXSbM(BI(!{b<`C@_R;@)3$N{Ezd${8 z?1LZ12i*@m=*hhy^ik`QwCbjbX-7Y&-&zNyS8t=9b)4PWN&7y)Zkf@aGa%p*eJ!rt zt2t7-#?Xika`@CQ@;Unnl2r>FTC0ZeL;sjODJpCF9h}Pfv6J%=Ws7wtZ9u$XId=B) zz($=jTSWg@InARq2X%B7?KZv7rvJbr8V6F>Z1`RmOg&%rBG2-fAGD{q;G@>f%mvw( z(|*^kOw|E{IdkpP0oxcy^*NkZpv<|)E3^HqlrodK?_n!t7E(q!M)H)L~{Yu`eZOn}|2^y}%_V=}IHs=YK0yhF0j@;p#8C4neXQGV7 zUZ}5(&WLGGanCn|pUl`2y*E&g^5gvv1IAN%3<*2n(qRrVXP9i?x){CDgFRhI1?Xm6pM{3x2szBKltlTYE;L}MqK zezwrX4@z%#8*28t8;NC;{im4Z-Ak;JdpKj+)HS-WU>h<1v~?ghAY)3ppnk&9GwrgY z$nGCcFt)%JbaN4PYHi;@yR1_N+B^Q)`dGU%5!|*&J{#swQ zju8AC{0=A~6R5+(-U^mysT#WWx<;_E=m$w{EK*JZ!73zkyTZ?Ad#xKT?0y;aeJ@y3ymZe{>c{ z^SET-Enw-3eM|94i{_W+jvRdlIO};X{MuhKbxtz*`|63~sV{6g$(}$i%thb~gRdiA zFD1quzS;1N^Xn7g_XPMVz3iM>5l-aX;Ik^uApY6Gc|ADg^G|ty?a;*g@A6*$UG0gB z-d{kc&MJJuf5iyx9@O=F$Bh%(nw%%I(G$8W##?u-UN)(vf>@&s>@Vjq*KIz@@gsi; z-U2IqU-3!YMIL02wa6OYHw~Mbwv0=&_9=fC`v?1WGjEJrZq47GtGq9vE)}`fs9O43 zvUumB+ncY*6F&S%24Amzg4+&0;D$3n5WkbXM}CVubj|_hz5Jfhme!zyzre-c8Vt-Y z&vWn>=UJnE=i<)=|6v#ZTQ0qV6MoOFxw6_cCtTME`1>w;$|AcP?xfxjGqSAW2w~c>DV?2rTb#kCd2UPX9X9G92?R#IH-*M{{ zZR6X*wW)KOS7)@3J)teE59bu%?rHnp(@W44b?cN&ZPUK9pY~{Rg7%TXDPFJ{d#E4y zrT`Zz%Cx0N8ISmV`^5tp)@9doe#gA)+r>Ui|9#H-qzU=+!+ZyrhO%h2_>SJKH2mRy z$rGqUbUXzes!MWxn4FgCgF8OzJTla6VoL?S(PH>hK^&&hXMbzmY->2Ux}7c({$+wvmlL1Ecp=Qy=>UVDBWy*p4Hun*z{Lc%{z0#Ws1CbkTx-`J7C4kZ^LI7`Xj_29s0NkO3Byxr)mne2sCbeW8wzVEyMzsGS8 z^99%qkDyPg=>M7ExrlPuc_WlZf6*Yz-po7bpDtM$PQ5y_kcVG>_0`VVCHb`mt4;rD z9i={623_d7$jR_WdmhM|dH;SN&dBy9cNM<73hD`=FR+Oet5jPO$eB9sBJ0+f)Wd${ z{3*m`Pol27*aMaRJSv*l&kR}Xr){{{8Wt+IO30DqoTt1r+b#^_tD}8~=({IpLJQ?m zco}l#qG7E49q8H$ZjJwn9^ysbye;rRXIRCHAEPJ3&==s-MeR}?I{SF}@ItY&;)Oq! zg7YMi?VJZR{pt4mQSOH-#rFklqTi434Er>t-}mbo=a^Gy_4WIi(5kvlhF%{`y|s9x zBO8Oc8yDFqr!D3#V%6u2_nL%VM4O32&YJ>E4tVJ2W3l^#$2KsRTh>V@R`kiJUDqYr zA{mf>t(tudVm?Ncc8b={WPjvQ_@Q~_DW2=xBRU^FloP)NTt-K9vTA2hzKrLOg5xpf zCCNVL4YBX>LsCxi5KF@s$vm5ejq+pWp~u17fUVNV+;te3D&pIk4DMW`<4z3_vx==z z2AwhHp{2}2pSZZfj1@mDd3skDV^;~E^y7*Ax8Q}b4UoxJ;)(i_hl0KwE}GS6<$rG- z(Zt-PT+DBBe&#UoE4i|_p}jY;)LO^r_f_gylu zb8=WqpIkZC-Xa--4?iWIr;&b0f!m8+?85yHxWA>oueT1mwrj(1`xnqCJh#FFeLo03 z$%fhzdZ%@S&dQd;tC{d@8T=XroOFuUU*gMqJS(SP)h_*i$KcA2hiT&v!LM=Y=bPsA z2bs^ana{<8?ZiEo9&R0R2wsgaJjjW^LTs?ZtDJZbmsiqLdHnu`%ddxBexbNl*RdXH zG`!7j%d{#b9RC3Q*@Hn zH1czJeQ4%|p~IX$ycSywJ7u!l-@4zK``64G$6Leb4kK?3)AnBHf568&^tV|*yRtie ziZdU+ehYZG@00Zy^L19diT|QI$X)WRVeHOdq0{jHbNt|`a zjXX!07-gQMOs?C9H=BMQ-5jN#m)(7bxtFiM+ozl<0AJ3Y@y3Vn6@AM6-{Mm}xWsqn z)d+rrujtc1;5!MJJ7~rIG5i}7)JE1P@=vAa&Pd{s zzrf5bqsQ3qp_jBKzpLEH=J3^wb**Es1CQ334?1gnY!d8z>MX?;(A=mPoquxXMtW0y zY-v99+l38h+K;14?U{mZ zSYXCVe*EXiPwCymiEp(w?F3#v`b&`)=|06YXq>6t8aHbhH+7UPWlY78VH>a$Yp%2=o9@2#JQds^?! z{O+SJyhSW8bW}G(ZPGE@RE}z76k8mGw)sM z!Ls*aXkff~u#*LA^4{F=v%6flC_Yw$vzN};IylebU5xmxW!U=c4c070@02xkGVk#l za_6))Vi~rt^Df;k)UPyn*p8S8vYuMzTl89lc|5X!xo6B-8b40%5y3S;pX`0n zT*Uu6{#(5qee)Y^Mfzwsb3Hj5^5$u7HZns#bnSte-{=;Nee?Uq9!8dAix?ffxR?DA zy26p6j>mqfZ|we(`m4Td-6Z)@zd2(!+l=*2@Us-&&1C$@#w$feCNO^F_a3PEpLY&z zCFCxauFHw9hsWj_c{sCqHk0w?lufrMFuqhai?T7+AO>z##FnpmCS%Qk>t;`2d1rI!oWp`q>hg%C^ai_v2X< zz0rPLWXEgq5iw?vFZHMRlofwb_=DC6L+3l>Ms)i|{513D^b;NV8hxt0guc-l)xdTu zF+B3)>LGfJygGNf2`+&rc0vNr_rN2c)%oz-lV^+b_a3Z~1?np^a9Qzj>KCI|+K;2w zHv4}~mU9kxJn$j*X0-p;fM3l*#;n07*dMcA(OvT6!xh+6>`etW+zM|aw;;R7v~2Up zZeZ4ums~l;<)^XOE8h*?j`ldETm5@+uTj>^3!f4%Ig!0tmo|?s#euW;Glsn``5Ed9 ztFp`n)?yv7>&D^q#rrEp|i@`r{3{wZ=19Atd+9k0b-Z6 z{&^ib=*#&{>`Tb^aa%AHp$>8ZSo1$+?q~n5a>t2hP%fKt$iw|kxj#^@JXmn1%4P)~ z?k71i<*0KA`hhxYcHA_$UAY=lj<}*W1jOn?2-I8z9?Ktf3X3?(V^QbDrk9tG=i4jeSemgS0gVn}@l&?!8_qaNB^B?r#Uz&?f~>ajWOkmQv(RvZQsF){CXCtUaUu z^i!!TYroci`lb|Fivl0#zihY-d|SuSLo*qd-gxof)gbx&A99s3_tjQV|90?Y<~T7Y zb8@YgEMjIQ$LiCEp=me2*HDj_3tVStKffWJeew)^3!PXOcdkCRJgLVtM@cp%+a@l# z)v7*&ab1P&ZR}b0qct~=p#P<}G*{dS{@ZzPYop;is>jx7>P|W2X}@(~?O(ES<&ZGH1@{86}(%WIbLmr)0F=5exBVXh3vy!BTQa z7Fm~7p-;yVM_I;q`LWjF1>d*khw(AVcaQ%mqO%^&@IOY3QuU?nAm8NY*s9=* zp~ueTIrd1l$1nBuZk~x>2M@Py)_dKf;EtX8#5j28oD@0R*4H_A@oU zyuO_raz5A<PS_eGCMknhzD~rWb!Pm8c%RAm=|W6SAYli{ z58#cR@F2CFe60h`I+S?*U|jaVljPg~fN`Q+`!g<)T##3pxu&WRUQV3x+{{92_&99$ zAqD6F-j#Ra{MWlyHG9S_`Lsp*jmrISp!4`@@n|Ri#k0papBCe6uU~&f-LbQ4SZtb+ z6W!ZNyOrl7pSG@>xb`{u6V?U;d!~{Tpa~yxKk^lS&Y9i-<5G1M7N2Qvq(1)dtx79+ z&OCd)$ll;QtEt+0qVudN$9{@u$bDtia`#zz5Bp~hTxHcn_Zj|xA3Jd1bD!0h*sHy= zTTgKCabEU^PT9Q;#IT!s?<=w&a^cW1*dEZg>Lh!G16Rq0fb;B@K6cD`Rxf|Efy+M0 zUgW}Y*4TO0&9d)w@a-kHi}S3a*q-k`)4Iu&%^zsr=$1wHooAJU?He38b0@5Uvn+cS z&y;&Lxa5{J%c?Xp9OkE-UxJ$JRRs3SsCaWnM+QD^8=X? zJlwQS8eey=mXm#$I!-sE@hUiO-TpmpnB zeh=+axduMm+LN)^v(FFLz?)l72^;LeK&pmzx{BGiRaqo&X=-rxNLB^KssB}JNoLs<8Ni8DcVWY4{uAhm{o-n+ZJ<*&y$c}HLzpC)bEdamtwA;4IMBBL6 zB}Mm=C+bip0jtt&_~8`GXmS5%Lt1Cf-FD)W!KeLs+<_M%7p#4?rZ(l?iC||~w&EIP zH<$7rTb;3*8@~uz-es(b7JOz&K3{uA3N1w^C-Q8WGjIu-YxpI#adkQ~Sq#w43qmOdqzhNIxYuhZxrfeq{OX)Ah6zj0*^!4TR z`84F=vT9_&$V0c|_16?)gne`6dU&)N*-e-HxNG$T%wNZ_!92O~)<(2j>tVBw+m0=8 z7iFxp=1~o-iyDb*RQ%~0o;9%+-u<7o)5uRZw+_4HdTaq#ZffX*`p##EE;r+%g1h}1 zPED*yCUcJBdibUC7BD?%|KI1N$X&h4o3fMWLtohp;#$5Bd3_gsD<(yM@&(Ab)w=}VFwUYNzcyDZU z@HdWg_M4jdEuT&ueZd*>n$72qGk!c{Kf5rU^d5Um&;E<{hOnPA=ar8hhwV&k*7SmT z#iR93_gQa6c4Zr%%2;b)9o@uPq=LT0*JCqFUM==l#4GKyYbPB&*`&AMk`mi_(&YaeU-=?!!5WKT3Fei?Mj2QJ&Vd|7nN1}ijDa;N!h zF}xEV*;$28Yd;_-J{kBr_QgxFt!1Ybh6mbFo>eirhbmuiPW&vMHE<@(4>yu$ zQJ&oie$J9qZhqOpsTiX3goC`m8jJD`B*xOU+;8X`OGT_V?h7f$O{~vy`q70qW7mwm zv78a|!OuqL>%MwoiB^*bFT&lT*}89z_=W5Q9kY^j6p0S(hIVwkld2E2)}Q6lkp&%! z0S%4Mv8yHq-`qTCRQ0nJ;A&c24kZ@8-{Y8F`fq{^EFi`)8^Q?mB_<2;lJeww*Sl@GYma!?#aT z>CGre^6hQPi*Hk1zQvB$_Z!HW?3ZWe?mtrYt}CL|3mBWyA63iG*;uvwY(8W8oVD=? zkap#Bq3+pz3QD5JUm@6XVAi3d^?N+O zwbm)%TYC1#R`bX|v1i>n3I7Im^oJ8=e<-F|ev5o_HsJjrx!ZtmU~S$HnfijcX8?1f z@LY}0WzoM!*-w0Ms=XaN1*d`so2Aqp3)=>2{^$oD-On|Eb6k1&UUK3O(B_Wn>h023 zMQ#M`OZxl4$Zm{0Hh%tvW7BR2*Pa`k_QJ9t5ptrEIIFQA7{#V<9ar0{i~=8+f}*i6LlE;rL2$r_zN99brJB=Pr@($xaE@i zZ98YkedUU%qlxqU#Bw@wSDP4I$42kVIjSySc^BZX;73P!gTu_<@{cpW>;AmGBbm#I zC&b4Ph-{uU)ZQ_$#6Gg~RC9($zQEP^UN&JXza6kfdVRVJ|A_X`b%sZ0^Rl6}g1M&R z%RGF-+3_)i&^uAKN`_nporsjqwUo_^LDJN0vok&mZ?Dg8YA zbVok6QIE=qHhqgve{LbRk#)b{Hc@T~FXrLv?k7QK8-B@#Yce?!wNG9U`M!9CZ?&Im zSAPy1@xS$w1MTI&uf6iE=0V45__q0^TzdzykiREvkNM>QTkFm9`bVp$Kug#i-;?nV zIctigkU} z8Qt}kYkZk1vl98fmT#3o*G|_OSaxEmM;Gf}(N)$fjWrd%UUbh~>Qfx~w~33qhBo}` zddCkd`@}1k;guWi+ykm||3)Ur#cJ}hUIh;EXFGWG?e(?lPxSS5_!E6&dc_&e`fRFO zR(-9q@>5)D`Z|xk{_1;N%sSzQ0BiEpyesBB+Hy;_Xv&#i);u?H-9Oreo` zPVcL7a?{IZ*~PtU+QV?=wldL{TiEOH!EFOBpLST#k8NGq*0v4|anE`!*IlJP+xiCi zL>=3@LFY#D&myip%j~n4;+w5Q?^>Ki)w3#Smi=kqh&R!*pYRO-zw<0K$UJ-0eb%mR z#1Z*vE*dTVXT`_TKEtP_S#~Zym0WaS9dm9~ORM(WTFUi3Ytk6jn}%QRdcWqCS;Py- zZkob9)bJwg%9!#x{xySnk+JoAbZ}pBtaY>K+Um?Fjt;mG`^Cr0$>;#iSvuv$ zXB(OxKorr^$E&B>Iw`=ou^33>f?}D~pL))X2lU<)?RZJ@k zrP*nLs#~F#xxIk#R1hB_UW2ng->%O^a+2)->$pe2hqs8llahJyEoeQ&8qSL6K^O8i zyvy-$SlmzO!!a8i4?x?^(Dp8T|J|ppo0|Dv!1p`g&0>5ulqK&Z{yBIv80+wIZyC9s z^)6s7o2I*ob%*S+>5gyUue{$pH^&Y#wi|)b_}WYCMHR8?M_AwFF*m3lKW^2v%Ej-; ztGk+n`y+72j}NxWMWiG+t#D6UZq{_`sX0IXW+rVq07@XMd(h+4mmM(=9f#JR$jmJeAM)0Px^Gub~r6%LcWli~!pVpD7b3VM}K9*?9m5d?L>ZftE zOJ6EKl`A4E{v&9MvIn;08}f7Zh}g6{;LQQ}P>O6^1n%4Tj-fMd0*7p0m)A*|9Xlqa zPm0LtDn5UlZmrh+nab(pmZ?eTxue{L)TV!C(`MOXk5hj*!^yW|?k%QX__gWcBF%C9K*md0(i-U?q|rHnLcJM?eU+X z*E}!(!41Gw;kPW}+gl%O!OoRUu#Vim8XF&;4-C5Q0mm*?-jTftel(=ELHk^?d&IXC zoz~3VY|t5Xb>c4Li_D7u5ME0!x;|>@lq9b&%t$Gdfjz1ISw|!yZ+9=Xk!2pNg`?NW{6L?lG&Qxh|Vr3_f0Fm^=Ei;@4B3{C@4)bW1Al zCl85DyGF8eowa&9zUq)=an{DMVRd%G`1#VDn3P8XrK4QgO7ikjM~8YmT2KAwfp2s0 z_Gcam+|nMNoaTO>PChj@?JNU->oc+sH6KcUJ36W${zvGl`l5AsoNtZacCsCIWV_Vu zqYUylljjS&hO-CA_h4|pj%>A!qlwB!Mqd2$buD#R%+3D!dXxLiKVSdG|ID4QMW4#H zgPRU-qo{rENgR)_l$X0&xRvkIq2I%jP(laf(mp_JKF2XE9s}j_@}bs|Lvna zWxQO7PuvGz1pFU?4bwONI3?|~M6If6pK`_qy)nx0`{|SHxA-4BF+eVg=20>1l16+G z(n;@K?u>WQ$2hJ}jN=8|ed4Q6ewq*I>vrvUB2{O~CnZ~1>#Up4rQg12-SpCT;W0ef z0{+5=LCX0T|79ulG6$TaJ!E1^X41zoVpghA72;RH2C&x{Fs=ZW1Tjl%H9gY5Wz5+dFF21q9Y3nP{=E?dt%G`LozNjJI$j6&) zVf=SSj)$c{ri3zW$HxFq?tSA!y57L~#z!CTnL9q-VO+$Ri%aSE z<+MX_agSib9AbPuf}Qn9nzcE0;&|oeoPPoS|3?KgD}9XDPA^76bJ1ca&Z7=o!hIv}SdX{VfwKyja{Uilr0{gk&|Sy>zY=$lQWjX1{f-MOTq=8&i%Vtw zd4Q~BzgBu{lY4=!Vm@K))p%=@h3+%@wNkP0(#5y?pSk^dv-^yGsbTKZc&s!sO73{S z%;`=jtxH@u$-V=ZPVP*fY_6XgoAx|oe;BaaIVT)q4WoVG5c=&j?-~84XXWs}#Ct~n z=^14<5B8qXf2J&L&YU@E3}<|XTPHaE)Rn!y@#vlXjLDex+nB@Mc|Wxs7pB?=voB1n zA5-D}nQ9-exX97%56~8iIaGF?m-FcJSqXbC6-Rxl&dhLe+zF2Vn0{=3H!=OTX{qVc zV0;4Kj-8-8kh9`M=dpmbMO{4mezh}Uh`$Mvc6?XLdZ-3;#q$6Qd8=;IK+U|%0^!+(!W!(8CNqQ{f4 zu5K%J=kOKy?6W(OaEHp)D8n>hoJHBYj%j&lHtEfgXaOwi5%9s_s~AibLq^Lto^_OI_K>AorZ(yO+8$5y7sR5(veMGs&~!-m|7V zW!F=89s99Up}iD+soeOwyXq|35M@1~Hk$nT#5#@SSp&KQ{{rz|1LB3mNu9y77|#-L zd&BlAJhRxdEr&jx{SYnkffe&#aR*cQeifZY-hf2B%U|is!!r|WZpli0V%#W~w@-H0 zlXI~fcs~Ay@VkJ&0iK*hnRa9MT4Hv6xF-`+tbK@bcsE7*5?v}eevtVnoBqsA;z-K! z0OB!yI24!cjlmQd-ka(d*#;c`eRup#Ce~sS{=SLC;E*rrX8F%vF)@Xz|90e3ap{MR z%%{e`dvbmo@dAn?WKS<4=MTao%hiXT?BB;T$$1^mjSPo%?ky)i6j|6#9E#+84=^cm zeyIaFgraM_)^Y%8|r{xio1>h=lW8MCcN~1XeVsLC{@%-6^j`mjeOZ(qoA!=dr&rd)T@Z`T z--6w<-Ha1wF7;r1bAf|bc`f~TJH@8`0lZp25C4X=%*IFY+vCtuMcc+f&pLeNqT!db z5_62kl|#deti${?bWX~xI~O@{`OI5Bxb?t2+^*g~R3E^XWpaZPvru~Vq=X%qO?!+@ z#d=1(59xoPdF1D%?d0OPJip7Wuk`BlTE7j^L|xWZlWNtMvSYqSJ0%ZVPx}-h@MiuBWS)gh;Qiclu&h$zOV>Hdpr%R7OSeeL{x_q=W3*X7zda~3rJjQXVi$0ASW zc_7yg@q8<~@gts>ox=U>`0N5P^#2mpd5qy*oToSI{L8KC{~~7i0Qj|M5>^Zhu&;u1 zC;Hv>mqtG2f1TC*tl+S5BGvpCUz+akQ7awY*59w#u-4_hQhoi#rc3(z+wyG{IK{UH zaO&)v+Fa4xI!@N-SM+KSH4V|rxJ9`E;dVKsa zFSzleg0zj|q2V8V4K1aO0c+lN#qp=be)Mq~x%u|Kzkg~VXvYVQtZpJcy?QIpmRff8 zpb^#UR^Gi4ooR9ynEm&O!8bo#9!%saY`Y)d*fnG0G0rxKk5$Odlfa8tI!Ct@A5=$t zk{&pC$yKyT_U^aIN7~86&W)$Nez^<{RZeE@abl|sh_>82UT3hd>CwTmIjz9B9g=tH zHqUPE$VcY!M|hY6)HZU9IA`EEPgWw_*lG4@;17kl_uI1eHKtXpiDJL4wt9^$#*Y*V z1kNnR*Hug`lImtZEu#A6|7Q*F*>5!yo+z=)YPcVJak|lqOLdPVxqNje>i6MCxyfsw zGl@A74~KZvHg2xb@MvPS=TlP~X`A8G5o}9xhawl`W0+{!Ui&&~6Pt&t{rJ1M?Q2MT zXiW(`&^;ncsmu87mRkn@`{4J+x_DCoZ+<=eY8PIShyw+Kd*IeGd${Ka*_&htj<-B9#Gou?PX4ai;HR3aZf1Ew3 zwQ{UujVrBZ5TN_Dv>ZJXAO1xe`9aC0-!CZ2rSX@4v-6JtqdRR^J}~4qnp9oSE?WcM$$% z(YGECE8yXj4tO}EJr665%yz&->>rPZ=8R`GYm{%w!wcY{WV!+#>W(eR!H%Gj#rn2- zQ~cDPub-di7#aLMc3*&aYUl0*3mdA5@f-Sd@!RPBxK$W1cR*-NM@q5Dd9N{!y%u?% z|60G6POxe}C67c1U6{!S+q1GOA8aY?baD*!?rfK=3)xdI>uJwh+Q(iM$g|(Z{<~vZ zMoIZeR`reXK=nL)Ir7QTr`dM-bIZ z6-{2*Yxu7mR^?|#Ys(d1Iw4y768}Z-B;Kj+Z_+o)34fIM5Jv`5*F;|!JHuHMwQkki zz?fN%O~aWr@6M=dY%0lz#!xF~5{VtJ4RV%bLNGL{BG^2#-#3ttZN!TI!=*v`ReNu` z3nTbt&yf$hzeAiOw~#;e>#rEw9}Ke0J|FkLMwF{Z?G;^Z^?)-Eab|9-GY_4O+&X8* zgVl#Pb7}6^p-t$#dfAPhu8Ywo&!?sH!D`zk_=-$M8ffDo?z)3mU{9NtBX6XX$+H2H!V|>>qbOEu({_(B3 zO1_=@U7mKtmn}v=PJ#!TbKnp2rsje}oaOt!^5@3?Km38_tN)k$nd$OJens);WoYk^ zKcP1KLEgllbN&zf*=YEq{NG)w4_zbqA@?`7Y+Lz}|5G;17UmDFLxP;YlpjiKffjD8ChrrB1Y?(7hLB^c_=h~pj~oVkvRukP-E4Gejcc{xRZK=tE)GCmpO=eXlQaw%9e2ElV12# zAMci!ch<8$KFqn5Fgkm%3!gHlTuZ#C4}Kf)9|JEvlAnZ6;vdf1*9SkD{{ENXX}_nZ zQt$F;F-*f%sXh!5t5XQJf$3`YFAL;B6w zY4167h8a(qyH=R-krTNSI`4eh!dInySvir1;D>yaZRVC_AGeXABJj)xhh(Vzo|J<# z!<9|v>{LPA`JWZPihj@-(b&+t8%$rYcR*j1)0QU2dj;qA6<^@CDM?2gS)1Jj|Fd|G ztm$5{3ii|n(pGPORQ)NOY8UaZz004mYTp5_qxjzP#l0?P{WxMuIJ!{wqRvp8GuGn= ztqd)`^lfKt$+=X?rH8^))cHjaen zS6EfIz8@@}t9AYPVdb;W>@qGheLi|}`Vq<;`oYBJmx6hFqWm90xhsJYd<^(dAiqli zurv8BJ!qYyc|RvUmbN#sh9@`eox zc_*L!0{aSCDfjsXJel$16YOP2S$D0P*ot{#+qr_|AB|Pg9=tQX z9m&N~co^G>e*u{oP1`r1|0W3TIuzxi)U{F zk9;eBoxpzZGth+$C+}QduGqO?^%2$)iM8u;vz9Ptw7s>2-xgFH-OJd9>bnZQ*O6~r zxrs6Zvi0$q2J*>YG~i6e4)f;Uq>M zS;&ir23ReWugwixEuo?0qyg8H{~u}Z9v@Y8?*H$ZAutIS3AqAF5-^&KmwE{TRm%`e zMYJjyt@X4eO0fZKYrNB{1e7+2Z8M5@PYoiPjJ1s|w4}$@AYxlV`=?%9Hld?tCvh+RDx2;-H!dr_68>hxoBnwyu5ak35Fdf@M2k80PAF)xI5O3& zd0~7i?`aP~_~q_%@OrSguI_zN%vEyMi|eicPov3U zMgDG_#UHVgl0&U`4)T2epm%rbq3OHL_&BQBDjz2pe;B$BV7tjal7B1OmTeipmQyWC z`O-rnhkH@D^Afr~fb5Bqx2E$Yszq*m9c)Ky6O}}MfGt?g*}PnQ9L@mk;XT&2EK>bq z5iz#PSkZXuD^)s!xgCLy7HE2MDoyt$`ueh+IkjP;OCxq)Zx+|wW%VF23E zvmYaW0?@mj>$|0OlV=#&BiNvUaJj9gfjzT(vFtWvP(`ZzFn7C~b*Ud~=4f;P{*l)8 z4P@+_{Pl5W{zCfP4qw9q5#?U*;Q5uRV*w96$G*7sxkb-A*e^+}-5zqLeR$hOzq1YA zOpGW)E^Pru#ftYGBj53iMEw0H^q=kiV0bG=op*n>Fg!!us$Y0G=SCzE<5XGYwV_rO8% z^N`c=#zTRQF~ZFh;if2on`&^=Ku+Cq7dJlzH@b`PYGS3r(c0ouV++Ag1N>_00-HNbTqu(Vp`AV9b@OKQOmI~1JQ4ujKk#hr zPx6&_-}OtzY^F~3(|j5_3zWv3oahrl_Lj)|?P@+LVD5J58xeS7dqMN*!)GvG@VVXc zK&#QsSDqYO=pZ{9f@U3OvCi@dPGvn_W~`Tk<6_;xK?~q<;ZDb9-gmX z6KI*RGYkx@Ul{$>&3jH~KDMXTLk;BgY0UMmO;@Wir(-ku#@vQ1l+08+=*rp+YWD}u z^7`6IX8s3j{SIw7Q`1VVgFRPYb#iR&{UbO}lbpj&mljp?ReSH}@{Z_O^V`P!M5o?w zXY!kBSKP$-2IpU>9c*VF=Kp!M=FT2;>7MEfYX=u2v+m4srkd}cs~xP)aW-YZd@=Tc z59V#i;+K%U8SP$8ZC6LT=d-o@E%~Ru{ua?r@%xPa(%P+}-6q;;J=jY#Yf@2qwCea7 zyQABs1@J9-xaST2%-Z~x8Y_(V9Bn2B8#_hS#ypP|8r^JDJ#Njln04ma<8n_T#E z7uR*X_rJW?qPPX?73A!j^Lhx|)j4T0J}vfo1U=k!pJG(mX!EI5+FZoGrRvtuSFb~T zC426zAhP-szOQ}HTX`R!r$+gbtYe0Z516{|s9fQLZG7#HTzn~wz3t2o zsp-%ee!e8OdM3PpY?ZB|`fhz&oey*;(EGq7zU`yVd&XLkcj#Z!I()$m;7R$FWfvn$ z`nV5=zv#u}%<%acKK(HQ`i<-PX|(Ml{u912dIxtE?p=v~+9iF++>{%T=S1(w4dlJB zo9}P)_q8{i*gr$L@gMJ9Fuk)fI{kONvovtn$lC*BM=s3?%)6aGYK_!42S$#J;1{Bo zUsxQ>T~p0ob)+adotW3&JST5tly>A-)O#^S&o@ux83A7ruyy}mV55fJCxtD~2iu|G za>|_-pB>5GzwyBWD2C6Bt&&Ev*1_d(>T>^bGAa(_`| z=J%ZgU(dtN!j?EFJe@~p_76M*@5xUtieL13XD#FOY8?9LMOR1%^cvWD$dC5nu<}BS z!#|mK3*pn$y-1r&VtvIZYpS~Fk4!T$tB5P}dR-l3`&a%l{d;)Isc)YDj0@K(BkehL zWN5F0t)u%JUWXPQ6HcBA#*`cD?SUog4Bz7J5u&>r<;O9P)bThE;v27ybM6uFDw>0r zH#VY2+xbNQ^g3Uv9YlZXoX;N7V68jYK(X zaSih~e)mQGcGRtfKILOZ!Pg&t7u}50S#vpUT{sM$46U8dUX^dob)2!E2^_iL$I^U3 zeEn!k>%yzGfeIrpK_`trh*mEi6YW}(t>4$llcrzU9~VuK zW>kg7PBZ^w3l6+Lv=JM*_hszy?dT0l3pW0!G2kUbSMt%GoA1O7eVu04M08MBCpvJ5 z+1Oa-2Ig_?fuX7DUwn@|P4NBE$S=tO@xAC6J&ccC5Pt=pI5k1*sud)=wk-Z6G%7vf zAS)gl7w!5rHh2$yc$H+w5#-4MAGqoiuNMbKj5M&dSs5+Za!gD`{&jKWodr(E&A@hC zHr!?5@2hF+_P-|~OVapz5obge)9x;4S3apHOVaCPKh0gcbFk$*_?w#FGC}(@e%KA% zeXzy-lubuh(*TcM18Zz%_t~-In@}sFmExp`T%lp)8j{J%7yjXBz zU9deodA#M6WX|RQdpXU-K5~!Ho`thldplmt*o8Ssn_2Ky$(X>Aj2VaIBc|2)wy~O4 zfBuP~NBFD#6Y#6*Z;xag2iz=XJ+-&+47FiIqvVsE*f5G5d6)ivc5~?kA?#ztL&*ym zw~+do+tF?8u|@`_I9)rKdjs!^CUg z%U*LvEL}{F*3UkAt%Vq4@BWp;V(*c2-WTjXy>CP_f8D3Q&NxwM10OQ3`jXT)itGe8 z#CsxrBb!fu<3eYx-v8N0L(?`NMczVQblIi!zn=aj|AV3Mns;YAYnyYOsl>XMJ+!21 zx;`7BhxNR7i#uNWIQ}vIn=#ZDKAG0&6wfU?-MLD1vhIVSdhMGWiVo|*b?3Y6e=ra2 zJ--Q#GU(8ujtR7DckBfA_NY}%>r!~SqX-$ufSKy!c>wTLzRLJ@#nxkMuKIChz zdt825@0W|sss7|D&gFia@m@~yR+3K7n4H2}Ir#op((XNKN2KpniB9Bail!ucDq4rf zp205BddvrxZT8I4o(k?+xy*fj4$u40#gb*Y{9o1_*t>&VaTm9(3Hec#>MvCusW`E8 zGUow-rEFMq?`6bGi}2gU?>*L+Ow}K0b6GV-_%4i}$=n-|FUltq?vxjKsPXH}yX>v; zZa3?jd8Q>bzoHwRs60{Gc)u6CHwN|&0I$w#DGpfCQ-ZHi5Pz26Xj`)uIno%6&M510 znzp$3@brn1U%%ySluzc>9c67|)!-`trZ!;mY&uWQXOopVmDKK}y=XK&?_v|Vf~$iC zYpU@TX97pxzlZ9J;8)eV+_W+j%R9much)9n9{0;O&0%liHpdzJ_>#rb=jM)F6AhHT z+!!og6Xspbp&!5WFVM|`n*vS9$a;r&i_lx$?+?{HkKZ>lP`LYHo||`aqBGvZN7Wew zt%uIO+n99_-z`M`=P>;89O7oguC1;8|5+ZcI-z;~uiqKErxad0k9F7jbfa5E%PaWw zWMKn()06oJ$m75#jMq+uc73*Z3;$~ke~wPRh;_7fYEmcv78`Mp{u?+0R#m{>F=tpW zVx4>7)9GWWscr5WFF@WE#;e&gisoB89^N&2R5j4e8b(v?__~vt=c^94OS4JdJ&pWe zAB}8au7%m!Wzj_|!HL_S8QYATro(sA-wm|87uv}tr)F^{G5#yMJ++>e@fWsQ|I9z< zx7ja{EYP@znsbg@ujfs0^?6|EGqh3=KS1pTKi>aIJ-lYt5!}TChs~lUeM$Ux#43rO zZqi!4O?$10WYjMHf8G3V*K9j=-xMR1@6gP7Q03OiAAgniwWpEG`32&CvD^bZ_} z40BJe)-pqO9H5?qC*zOx+Zu)ODqz=L_MO0g12X7Gd}iQ%GdU;zb*yflKSA$-cg5o> zpe4=gUf}A(W}FbV`9_-GM&?xJOxWE|9Z1cwlkcyhC$;xGbaS97915Hym^GjA%qL9U z${ut`8GPDKPIOOj!WyU2iS6LraDeedo0Dph<*tv3Y)R5vntWAllRf_$y608iy8$?l z22OWvlka?xs*84lALRpW=RM7DiN!~!OAnof4o~Br3?Dvnj>s2wIbCAdY8 zqKBW*@9R^n4Wm7ckkdI%xoMt#p;+N2)`A#X$rL`b<&PnII>Yz@(hn`B-ZVL);MKbl3pndo+|-bZH!oq`v;R)%## zd!5(^mL{^uuzAeUj}uEf>>c@NM|I<51ALaYx}Qb!^zya*^)Ss^1PWr}`N%BiMdc~! z43mv}+FX%x#U#T~o!RNErfnYWsD*CY`TPLE-I{>=Ue0Ft;Mw`5=J`|1+NApdLCzhA z(0^(E0Q#?;*kzhO&?$dA9CD_0@LRh5xDL7Ow;$jC>KXJsn>crEhw$a{w8iV3)VPdl zOXyCS_xDlgP;wACF z?f>?)*xTot=ilwFuN%Lz?T(~&1$Zilmqg3kX!obRL-*jI)UapS$v#l$TK0kTj`o2T zt_>vG=A3KgmiB>u%R6Q*pUjU9KO$O9&U6>=bS@m5?0c23y?nu^dFQXJ>4Twy*nQM1 zn;ANMH}whXH7@p$&Y7rgYbUwPt%70GN2kYL28R7V2*y+!U@Y_T;9hjLg~ir3EsnR- z_j>M<=?hK7cULXI(s&Vkwtz7PsTUBX9>Av=^Qf&(Vm@_qy*^YIuQ-0{Z0E(Fau3a2 z^i@Tz&vxps46nBH^2S}|zO%61F*P`EbK44EUYp^S(Js|Ck&Iv1In%UjPPXezw5y1A zZS&ey+5WD})-FHVwbN_Y7BKx?k*!@(v@6HO%feFI?y_XNBz)teU6o$D6NZ`oE~cI0 zWun_udZEUvk6u=t*F67oYMX^0Dxl4S@q>4N(l0n;DnGXo9C?kYuOMz9Z+c4ei~L{<>{_lxkY*7NnGxcj8lx*=$-OZJ7*jHss47m51%`%op0sc`BvUF zxbkil?f;Xu>9O|%3*37yXM6Uv?i(zT9*>s-5As*`bi#f~z?0@r=X_1@6vuC64EbHl zu?cs&cf|BT)9ojj`wONuqR$nh4hPCkng$N-+>7Jqvo9+Avv0Lm_Qd{R^Zo6gfR+{S z{VHSD5bsqzigxlp8*RL`GfccSY~!t+VZ~difuge()>bJo`9vEy$WPgGAYJOq%rW=|6qu{|8`!hQAlkjKVmCYuf#X(jW zz9vs#4*r3cOPOX1By}ORKK!=8BKj!@P6r#OAxtka@vmB+HB{e%Aj#scB6yQ@f?Fl(12By66Wy24TR_Bh5E_(rZ^G$O-rx1Gm{dyhz#ZzNTpDd0w-W=#y90_(Toj_e4a7p{l@ZJ%zM(!_lV^XO$u;TZO@5wsV z=(>!tMfd(Q0{;95eXkX4M`<&BN~ z*_xoijqFeQ#;(gF|IZ!UpW8sp%H;fey4{ltEwbi49pl;aY{=THuU@&;8`<7w{i`wRJe#4tA6*hNA77vMp0kM-DsUQFc#Vjt^5 z3y67)k5h18@z;&_AYa4q&_3D@1;b0%5nJrZn=tMQWTEB)ZuTT_^DMrS4>!t>myi4J ze_DCHf_EOJe?Ltbda9>>j_*CyQv0X-&NaN#0bQFt(VL$z>k~|U*_S! z*wf?DgA?NaI~*Kv-Z{0`TKno_zf=Bk`tvqnL~oU3w2kD?X#UeMVF5P zqyPPEc&r1S2Wj6kdH4k#pS$Rb!TEf{5cP$Ohrhm%d+xUp&dRd#TAbu{eDt?R3Z>@NcyLURrLo;@-G)x>N-(5f9 z5c+R5eWmeoCHN`HE{PHG^IIK8XLdN)GI0iwllC{qSVwQKT^4o ztv_czUGVk$;81yzo1l;V&v1vtH%^IZze8*7(WUmQloz=Wnw76??}wQX;hxQ|kN&~_ zFMX?@D0gLl5ucQtgN<{z>v0aB6M=0abr`faAbK7^mJO6}o`l$(&N2V{oly0_(VSx^ zpGfcMF5ijEt~>B8>f6cpy$zg5=9Myrf_0G{W;GYJ{p?(0P#lIcoSK#;q;+#Kg;`P@?5wJGkzs^;#l4gEvQCc zkvaRJId5UkU!?8B;8C@R$m=t5sP^r#G3&n{jy#&kdH+BMH>nJI_qScjIVp!4N{7bn z;Rd&lG0uNsYpQPF92XBSL+`=^a3t{XBG2?)c(}obhf_1~aJa$4wJsjM<-^0X^x@$^ z@Cgrf6-gW{Y@ck_Uh`i8T$g<8{3m3ce+Bc8{$prb1`X`CG_a8z6wbty^m7NAa`OE& zZs>a~HkC`?y0b{@^JQSU57@KunDyU32mhP{ADs<9&4sVdBAzve+D`Ut_iq^M<7$Yk zgHLpCpJdH9+%+hdy*PU1{D*jVAJ3uf$TjRGT*#lE%MTH~9RpnYtr^`?#q$zm%mHWy zzU$T9*!#zo#Y(R}IX2^+;@E!$!)q?tF)20zx`YSv{r!xmp9a?4`ZwlY2x76i1E(y$ zJmk!{0{aoVu1WAKbzJxRO>@);?+R>xVo=` z6Wc>RhH{aWBjB!CZ(_}EIM%G0x8{pRt8QoWINgJz*vh?cf1D0heC6PKZ#lp?bc^ne z9mhSQ1@KQH`lpCJ)nOUB*t^F_YtT5#nMT|sf?aR?fa}OhMMie+fZpU|g!4~~Rc|@b z+=p3Pe|T&mdE|Ywk)7ljC|E03$ma0MxUlNJ9Ka_|`sMWGuE`H#(?+{_DACq--o_W<=1sBq1~J#9DM(xkyZIQ=Dm(*oO!v`wtZOzpPaFi53fGi6YFZ=b42+W)wxwx zhQVvCiFG@lGhr=-*8Uy~4N-Ufk;_S*)t)Q;c|E4zTy$s*u&~<{lN+Adf!OZs8B@`5oy2o%Q$5 z?9eFG-(KYIXSF%c0JIauEKtn|C=c-QIUw4_x7@D0@-CrHiq(9N?Ajt2k^q@>~Bq7mG%M zktfel4t-{Q>Rf8f&AHOaFTQrtxT7kfT}M~5kLwhVEDMZSIvG2ET44AZY8FxRBN*A2 z7ce?v`$~sA8|6TY-^17rh4EK8BU6sA@6I_%3wwx-`sTcW+`2gP6-_9gTJ2MC8ZJv| zUrGBPxb2mjGVl{8_VkjP>18)L9cB5JJ{{oB1?F7QDRh}j?49^aelEXzF;@S6mfx4| zL#E*q&Jb^hbq`yu6We$k{6#;+LN-o@z8l~*&b_Ra&FlHayUtF)o}6nv`#HY3Zi5Dh z6L@n>wrfeH1Jy3``dzqNakqolj$U_|vvDQ&naUrQ&DcDa*f+4w;d3=Sx*Gd*HTTM` zE+7t7iLVR2%1@Fl%z1`2(#@Mz7R2N;?f?7fvGGqjvC=h#v41nSEoT35Vmty}$<~$M z|HaV67uf%qicTz)t@_&8)*d}=j8juJl5_vWd{;C6S^3V!2(+_+eSq1c$%g?3_L0|+ zLszK(!?UW_Xe{fW%T_%+5=EXw$pb0p?3m_Ih0jxskNG@wlxOSw3?pxXWV85&e-Y|WH(et^?jU36PA$+*-ta5{wKQnjv@yhvI36B4R zz4qpJhNiNwrn(Y)b|4p@<hjpoDv(yc%Js$ zL5N*4o;}~xGX(w`&l&ywHkF?%=}&hl9;#g(?cACxg0D9*7x|e!__Qvn*XEz|cXakc z=Q?huwx_S{B5DoDpU37*!O_9mZy<)?)^14P^I&aP5vOUd=I%wwi+%7k{IIbDe%QtHc6g?Wn)2=9)iLld^5R|gg{lL2Yd+09 zZUGkj#;z7(jA~=))8=?L(5Lo(Uje?&tmmKjd>Og2AGuQc+Uc5&$OAxYa!q;*zbY{9FP* z>;A{_@qF+qThsa}@^251NdM0wULo1mfGk<-&TkI2UNpzQ7#`GKba8wga9s@#_6;6{ z2hZVLj&Ch;u7;zCsgkHG6_qo(7TCA>yyYObsX5fZfnNwV9_4uufrcwwib*cIH&<)IvRL zL~pLfzFX{*;~{K`w_H6?X!L+&wfvDHqX)R#ZfXJVwGhAVfd=Tq#H>FF58VwOu?1Oc z)zz_ks;T-u)9!hO^*8(g{O!Ozz}VUo)fo=?YuXoG!CfWNq0*bh24>kjf|**E+$o{= z!u0bzG2LADmpz@dUUHpxM6c3Kn!{G~Zem^1?Al85R~In{Tp7ZA5(<>h<%WATWQ_eVvF`$$G zzhg|yG6wO4wX!F(uYLBrd{-a`UuIpfi+W$>(_8yNS5A8Nk!&L?Z^kFq{aWy)IrTs@ z)yS-LI`{Ba%=)Ctr=-2HYrx8Vt&8F)S`*IGac<+_@BEc_($}$vcMKf@lWe2a$TDvY zl@qS^1s~JruMHfc>m(dMrOyjpzL>+gGU-=uPF9{;c*#96V-IL-?6zILvC$odeq{40 zztcP8qCI>69m46)4@Li?XWid=-+=Z>lXbY8fmeEY4xg9GP9zttn$O|*1|zVyoBw*W@1ObkVLW_e4HNMZ3zr<}Axy%%Ai-@8zB7tzcpWQE*erT;%H+NguxU5D;oFZq-s!_`jy@F}eIG1?<>^2g)PR)vNeKXyBO5DpDr^Dy(_ z6lIM1BlBvBhj@3Za#ws;$)(P+;ziD~Cpl}Tc*}qDP9r>SearDl-;#VS<69C-QBJ>q zjk2vzJL}WAr50|oIT^#sLw8U!^YPhW_(xa*s>K)(|YO#;2+iJR&t&q-)VXt+_&Ig zmVMW0igNeRUBnKtn?BU8%uIZfsKjy*bE&vXud({Sg-hk`}BrL*KOwM>dmSHHk( z`z&&rR^lg?(Y}KAU!u*^|0^g|QM0!6#u;@wyRH6rBlqajcUhyS*O;D z+mDG&<~}X8{g<8(bEeX7tJ8Xf=~FhGVuyOB_73+s>m5BmmglD5(~gU6cXL1b>7&mZ zuX$|j8Ta{Lc;3MCEgG})h}gsK^FOLRu&fZwM;_>-(Wwdc8VJ1l=%e$ic2!x^`V zxSe9|bNJN$T_c|z#Bi&(l$txKf;rqdOrMOm$Aj6+6TxpR911C>o!{}novLqDz&!=S zV*P8v9XsyK?b!K1UdNjYoWVZ#?vi%;)t<9(?lcph<_?oEdzre&M7b6<&@gg+hC-5k?%Lx%~6b1S%CB9Lcl*k8J2$p;m;mmh!3 zX@N1^EjaC%(?Vm8F`qeOmYUBIV;(o3xnp*l&yiz}A&2~!(?*Y3YCaDev*yDQr>M5m z@+Fb!=KYUGoU$~Sw`O^cGvk=ka>nfDck?}0-|5f$K3d;%$1L@}=j%KDd*6%nee{^! zAGq(2*Z0H5jC|kyeuOhaF%jj{2jFY*pJ*aI_h1XSd-uExSbOcc&m-T!#)&1rly@3M z1}$0AIDNt1K+~nj!R4F_9h6SuUR5u*H2plhd?W{bu^gQ^hy1Sh3R)&`#xb&Trg+Qv zL)Z+VNEdySQ>PPJitGy&@XV>vK0}4Oj;@~XH9GPH&ZZLsP1wfl5md5{C)4%;`awsi zc6+a@>kPwW`he#nilcoI`n?Portl}4(=%cK?Aw;b zzrb(!Zm}|Iy37HmH!-gEV>8AzG5+MZ+lWWAXBZjWDY`3++`@YrSM(-a>DkRZ6K*`2 zmwv7rJ+^Tjf875QQEj9fj3^>D_>!)vTJ!Q9z10TN}h1@+zKmF`yW%N@|e9PC5&O^O3MYK%r3^~3JGI!?M z8~$$XU=Oy*6O*AiV!>`dotm$quX9RM`dNhy_Fd*!4oqSArcHe`mByTgLUaMOi_W9Y zB6$q#RSqs9mpA(1P-dU1jqig^=eus8PsJ1QuUfC3f4G^yY7l&83iF2M^#7^+uk-jL zN1Ofo44#D!>iu)np3UFuv^Qz%6uv{9!Vg1%#2V)^7IClE>JzI?Zrt38V`6<%*yEW^ z-Fx=Aw4OuD108QY7VO|W%Ajf%ew{hBp>u>MSDsotO+PMWPKzSs|1l?reI&*yBo}?K zpLfOolH->hZSR#4ji~;0Hu~sC2EF6e2XyW8jP<#ZGn_tLZev`!?|B8y&ya~JPsMZbQd`S0mhcC%%1jOZjH%edf8kqn`e48F#jyi)~E7p z7;kT8J7<52xO(;qkHZ^1VD?&Dq7D?0vg#Pm!As{f9Q~e}lr`B&zv}a8`ZBrcvlmPsKrf2U zJo}HcauJ<*dX+qF@uB*1Y0UI(c=1T$r2e%gk4)$Mm3M)z7<`QWwKkfA_(idhjxr-t zl~b0w_nU9M$KbJ?{GI()(wSuZo_C z45KDxH@1&-k>q;2vu9el%hTVifG6m`otT*)?r&&&r6qdCTM2u zQKk>kjA~hWG;=0>Xx?vzowXa0i!FE84weN9b|WLPC1!P83QVfo_b=ho$Ce*q>Z*G5?C~vUDK}OTdl7AyJrQW)?0dcBN(1LO zTfkS@Xs4+sAA1nks{=W)+c;By%;Ltxnfm?wfo1PHf#cWavWmUmRB*yIoU= z2@u~BT%w&Id%24JDn3)f|9?h4aE^bY@_ErA@uDke-{w@m1}(d|?@i$T{8V|jh#2m! z9ahd}qw$Ipl4r8^d$N?gNa==*bv-h*?^Ws>n&@ZF23PEVz74FL;Z|*Ba~DHdPPD20 zFycFIo7efhZ#*@RS)1Rvaxxb=*+H(?OlYBjSWw2eKV&b?N0XJ*gt(k{_pv^YeJ{FM zIMq8_{WMsP4aYl1ZdH0TSR8*8Tr{&bvd>09U(Mj$!?*G%IFGZon|)L9lyF@Jt_#7n z=yoo2`vb=P_9$xp@|jD%eZgHp&S7v)Y(+RW^n3LBLTAPh`cP|i7jO+6<8-ybkKqHH zZ)VLkuJ$#e#Du>?n+&*Bhl)t zGttDifx+-~&+#cZtYgn{A$@+)Z4dt*l!hvgXr4bWTe}a)TlcMt>SdlwpJ!!jn?{#) z^eI2;7u1f)Y75^;euJ;?oxnSdzNKTzpN($TnicWKK91}%g9FvT(wgOhha%QYv1C6k z3?3dK&&$_u9XOV4U!DcO9|XWJwx96p@B0*&9-bQ^KQnQsSE@ao1veuY1Nkwo9eJ+Y zGI#A;6TI6XcbdvrtoT4Wb} zK&!2dE}ib8HvRy#s(JlkoOsvf*X*B?Q#~F1+Ks;u;M|S!iHXUWIjXLz+I^Gt>KPtg zR!p7#EqP9UIenhUvu5~rB{Z~!TuXnvQ!@AZliWBby#2Rw`dipMBbT+a{7`^i%6%X_ z-sXJu(T{lTSo-`a{MC3zt@ijfJGN`)sSo&C z^kwm)yN~EYYi8(#oL}Wb{|z|O?BfG^9#O$tHY4b;=!=y`T zUw`e9+?PEr+SPax`6s%!W)k=46!ZNQr)#W>pRK@>fag`~CpNh7D7WM@%*TN@!nD8C z*Po@);`jodiMG>g=y*1+zcyrxd&bAJ3AE>|_*1jw1@O(`)2p}sEI0^v2l5r`3KGw+ zs466O+C2Z?(7%H%Av#`Xa%DLyi+%e6?VEY7T-65naci*Xg?{9pa#DLwMs5(db@88^ zZzpwPd}~+<4_uH9FXeWf8eF_67tu#w$^KeW?cEKu`)9w!-|8&queab&dvOAPElI)2 zy6j1;OM5D$8f#o>cByf0d9;d?l+ z`u^`MY24h-d7!z+mByAHaiU}@XY2y`jX!XzSM%O2KAgA}4NKFR zXRsUa6||pL!RKsf_(<@*5!+xmHkJ0(6f=81Sh%LAGcd0@-^SzEN2+(}F}cn^q&DBJ zMJAS@`RKkl{5tKybbXPz6uGns-dhv2p&TmTJC(fS(Pql}Y-X>?*RGCsXS%fM(O~+S z>Qbk0P0KJRzu@=7r{|s?y`w~SbdL5Sbj~^ieHF*UBb;RypOA<#h$fQr`3m-_Z_Gv3 zcZpOpu6+KXC8taekBBbQo?Upxq;W05@W^n!liyDMhxXUfW#AU@yPz}B)B^oq)Z8*b za!~epIrJ|3{A!RUx!|w{k^z?g3**;b;#8l=Io;-Ld~hshihMBU5N8VjpXl}*^ylV` zrPBJ1_~5>GRuPYUx6kCV$9?U46KljdS%3RF;)nyZ*BI$?)7$%#e4oX9tMEZ{p`*Ml zff#icb`w*m&qdcq{uW(U{$wC_7PzYhcj#<`yDG=&^5vjO|8*wdXy;BUAKcrRt4Av- z?GDH1^tIbbyQd7i$y~K;c|_gFU#-asBcG`g#^>eS+45QJ_eSaog~>a+ZnE{MU!!jH zC}K7Z@JWAYc>KbkvsP`j|9TuXMv&vgM~BC2S%c8)PQI&eBHE)_h~H}J|6qH_jtb8@ zCRQCg-rVn_adaL+GSJ-dQ|joBANJ|2oqsK3ytiF4t|)#CeFumGOWsLO>;U!+pF&QM zk0w}Ff;;eH=~?#h-@@pl}O)eUGNIX<>u)a4#$3B=9@5!TI z^2{>s+wg3d^t-x}b=?E;VbEQJ&hN61wMMa9ch7r8G=2a3Db8c1-U}E-CUFm9oV;Ib z6!Pt`t4$43zMFg^zPFEw&Jf-jN2n$qcO3%jI{4#)kU8TM5pVpExQTxIGrzfOyhrqu zz^Cds`{&HL_Wyp(u2cCZp;gYk$_{YXD!Kk?G`)y+KAKLp>qFN0#$QFdCm7#P)4E?! z_W>%VQG#t5LY~`oErGv_;!E+%qR76m$KQcz3I0~zs%qMWPk^r_Q>yqRSEKkyXE1!M zGpM@(eqKQg4qWy?KloI>wJtSuqqV*PI@g^8E)ES%nmbm~_+$h4y9xNwd65w=pD33j zvj%7?y+>x8CE&-BfM9}{aD%;yvyXk7sTrsw;$Q5y)X6CMy_JxNj8T>ydu8w`u~#X*~psO4nN&T zv43j&l6feHqJ?ebmez>ISW8d$c)3W~?i}#R2KI;p@o%x-4)lLFbgZ)qsi%rRq_ z+?uk;KmI;+&n?It4}O0>)B9hwb!?=sz!|!w?B#5~>o@Vs$DQRAUY+KZ+$k0`c%2Kp z_o8of9`nz9DwbW+z?qMSs$(-Qm>9bRylO7LW-dYAlfG13|A<*OXIsB@^P7jWo!E?` zMgQ5$H$xL6o0#83>IxqNzQfon%57mEt)ASfCWlY0-9%({F}|w$v~_wWM!LbR&W|eB z;X3Hh!;fTu;w;EH{K{nRmvH63Z#|s{WrNH93*b+L_>+!T4ds7vrbj+J_=yC8(E-N8 z`DFcCrQ;rf=9r_s=l6B$KlOboq2m|f!`;OEu&Yd7#w_D=vUeJEmMx%Kd(00b%zg3gN7rMfkk;W z`u|tZT=%Gh%~$mik~E^TWwKBG^Zcf_4&=>KB+PtvMBR-k^LuwZ&$L4kD3jG)9hSvJ9EA`zEf5M)) z#`3Ab3I4g}uo&32*Vcm!QhtlSZ{6qFcT0f3;1hj2zJ1!ejQhMhmgwC-#&_URBQt(@ zQUdSWlC^&#=P{0Q;{5NtL7f8g&V$6NeRRK*T2-@Yui6SeJ0&TXZcmdp$(Tl}{7P%L z0eFt1UwAQcCH81GnY0C8TED-0-dy~`WWAsVsDEHaoNV*momOV6Y4Lh^w&| z%$_oJH;cj9GteSDx#w9v%^sFHt2!bPo6Ui@B){Eut%-JjAU4>NY$qQ7$6u^(lx^tVj+JuK6GA?G0T zh)K3Cop(ws0&a#r73w&K&$;DoSDg)x9tt>D{gB^7hrshTl&<&^p6z6zP8{Gkk7Ua7N*NgU};Y{eYZ4p}5$+~WKO;^Dq_+i2(E zoOVf^x3llSftLOgO!4~Ll zAAZArc=`bJy9>Fvf;$7Nh}V_`N=6O^3zjO!znVDh%s}bxF#nf58ED$dn(KY^u<^&$ zZVLIiUd+!QACWKqfX^2{6fWz~ne&)Cawc)EI9sfC1#u@gejXPs-A!C;KkpGokL<+f zYk;Qgxv(;tl2M16R}L~S&j5=3f|J4qR?L-pHr~T~!eW_rbM+cZTtfhkuP`IxIZi`I^)_RjHwK|1rKv#j!mv5(^`%=li6 zTld0B*4ASGDGs-_{hB})SkicO8?cD?GGIFJq{0F9vI4Bflh$m5f2R8T@nSx>*lOjQ#zK@v7)YYm*w=%e?a*-Q4M~V;DuQ7(U=r zd96WYitfA@Pn6?l1i0(|ez(TaXx+a7AA~D%Vosp@^l+}uqB{}k<&cS!nlt0<2{oUt zTjUjES+sLvDSJ?*J}mjAun zjD6TY@18e|x>t_uDzkQ48*kr6HkQ^;v7TQ+2O4uBe;P9p?|hcrc*d1|yULAsDn8W7 z*i*?FmJjx0-o1K^7T|jqx%m=*>gUhU6aJ9V=ecHF8-rIY z@)gkP4<2LY4<5Ad;)a71BTz5S=H4l@2qc)1K?@g}HL1`+7ys@vE^wDM}HE& zqx~e>Ywt*PW-lU7|4!~f8AN|MQ)I8}T)yHF|5pde z>$uE`{Q|mgfL3l`Pp^@^J#5008iP)kWVDsBZeI0Pd%oT5r$YDX>sF3T)tdEV$B`q| zTJgi-hBqYlHvtRhSf@VBTDbD6*Vq?dBv0<%YLhEdhb~bb`E#$bA1YWkVb7wIMZ>~} z=y)UNug!1Fqbd}hmeJqq+?`B+4ao9Y@V|72`h1c;Ph!2){uXQB*zu)neC@ZPOCP1J z>RE^$%vir9zmPfMv&Ft483Am4)cg!QQ9G#bVQ1MS?JI;>58<^TZu50G-*(UPcyN%q z2W#a^Sh$OHZ%tPox}-WV=_H-?xQ&_yW<6?)OzvVu12y`9nfPve&xhE($e#f879{^~ z6n$zEza$@3(QrS{XneZg)&9&KumawPqzreD^v zv%FJT-aP-KSM1nVFs9^|-(JjSb83B_K@PcNNd8;ezhY7f?N_?BxZa?D&Jad)kDd5A z-=`Sgy<7a$oD+Lyf~~t8RW38nQsiVG z`JT+*gIl%67V~}!{9E=*r-@iVz3A{sw-43T(r=AsucRz;3Tu_iJ&!YYao!m@F|*of zYGZ$F=2XY*lePS2>U8QBFeU&CoP()rLPm1t7!7$U`V|?xG??@b2>Q-9Orv*d;Cf^!WGKT`*(Y}Y>-$?qmqAM@%>(yLGKcx67{hSv<=NS3w(nFGt zui(6kj|UfV4nT13e;M0oinaSrV;$SF)M;M_ZLz=IS#b`v*ZTso9pq3-mUZBp=<`v| zF1z#VMOW@YUUV8B)SW24`PDJzUl>m^2z``sHm(AFm|kza-yJK2ywzRuXM#%`PYqBf zI?J7+vyGi$>HMMdGA9068L|Fy&B(|Z=f$ro5hi3^dbJ!xo`E;K|kKQq}z>Jm-Jf1AAG~J8?4RV z&-niM5q3jwV{KiRY)3loU5oA5jO};~IB3Rp+{Jn#Z;b6o43fJy!_%PW-td#PgYDRG zvfcE)?kZ}m&FMlZFIeu3hO}?kT%eF%I#fzc29oVM*{HEvf6~995;CJy!F8XLlc4n{a{>I6I z+n%RsWZmQkgqIyLPImv#3=J9^vo)P|wip{T^PaE^4ZUR6wd*(WCSuiN;aB9XVu16g zZ5riy5odHHi!=|vmLB>tYl)37d<{{9LeG>B)DG_l@%zhJ!|s0%d30*}z1+Lc z=p4>e`TEu#%-w&qGVw)tI^A|ILT<@^P&~udG782oCRenZ`FeJT=B*g0<|?{e@0S6` zvajIMq<3ew_Scs&=K7$6p0Tvx^(C7~eceec&+i9FmNtV!r>!LRk9*i-gVw^(SuTIX zm0Gnw1HZKjmhg+?W7V@NV{>O65nGyH9=pA1a%}Nrr(?qkg!Ar{{p}PHgo?=wvZ};U|a1sw;-Yw9hXevpmpp zf5qm4*dl1gVGca&tO%>dwD`fw{W$at{}_K`A?>_*|CqiXB0u6-V$(U$g7>_G=c={s z$^@;qol_5Y-TLQ5JWKL-0`mgzS?tDS{9XL(^&g}EFZ<`z5sFpkhRmE!WoyrX;qmu8Z1JJ{MjxECQ~1ozv2uypE| zk$#!)#fg(RwejhTz^BHLjMO?jfj&V`nYCZI5xa!3jITsoL-eCNT}7k4jHmC4Te|YT zcVQ#`w+}YO^q)HjTwa|1U(9n1_;cX(s~Pv|pA0kh%QZYl-|lhRLq=bl-zMANzUKWF z4w*gGYUUvQ3BA@?^-JYQFG?4<=y5q8|nh+F#Udw&CtSq@ZvA{ z+`sZTd%s4(arSvOW&PP6Dowv)A)GVj+|3r=J^DW>b;xstr2)I86k_^H6_*-lpg zuljqGJB0jeXXg6>`W8C#=Bqq7&vuHqbeRt>B|k5rAJumE<1MlN4mn`-^IZ9`#QJXl zcjFT@d+^-QgT%OvZ7aQ~xc0-Gm6QEp^@I4$-Y2BNt<*%>2HHUhwk~ zxoI{p#n!dh`;xI)GHi6kZNV9`E3ZzK9?RS4pT=G|EC>4Hf(a7i{jUD zhUzlfu=kG+K~@jEjvemrgL&KsejRkiI735akvq|gcNRN?W0G`~gy9-w zi2r@)XEpDOxA$`{RAXned%Sa4(gPL1TJA{JY)RM&$kc=4TK46YW{l(>yhDD5_rCSP zLgX3@V-GfmSjX@V)x2rJ2Jr8nX5&TZ}>-50w1e(j6ZU=wo&VsIeG*<|^?FwP##TKcSUd95=+E61=ofV+gy&8Q#e5u;DWZZWifG@QM%fWrv z#l6~jYn*{&#lC&>+(yleH~+`zXkVU2YV5a$oF?&R5qp`Em4Yu|&a2t`mp*_G8!X>- zg40&kv(xZh8~VmKRwa6VENis<=3qzvSadmX>kgU!EP(%$?`uu$`*W!E;Cp`q?>_~s z;yKUO^=#&Jn;TzfV-`fLg7sKx3LlsIn+cbpTVc{ zf5`b!+_rN)cB*hF7>!KmA)W~Rm_39~d%l8daTUh5y~r7kaZb|#=B)g6|$j?o4?VXzoH&090u9da2gOxK?6#o`*sog^MsboVLT>5^$ zn%`A7`WoUpewsqYn!JL>&xqENdQ)vZ87N#SZYp}x`e~d551eD=u{Y)_#=KZ#UBpkbj5R`IJ;zuN=VTx2jkI~uHrrU)=xE`nB#r2< zcaMhN>K(f0a_*aLq}Gpkbm2KO6Z7BcYd1jd?Q68X;o)d?Stz`;mi?nYK^Ge9kI=@? z_?$5!6cg=kT5@{qWoUN-JS$)74PxHjJQtl|$F}v(i{rncPmNtV!pT>^$mIIP#3Yxn zM?G@n^Ai^L=T7|m7Zx9&{@KQt$G|7tQ#ybhs6IVgunPGV4or9;e4_AS^Qn~YuJhym z90~7iqnG29?Ki)yGIHv0@}B&94*Jvk5B8?U)cW8l*%nU#qx4Io&sN2^u(m3?IACB* z-rZG>Ue3@PPvNin#$5H~=K0OwN%4hl@a#|*(u*?;-4aAb4`cr%hx;~%BY#F9hr-C| zTvPYXlhcx2)my0TLtSLa?8&*^r%P^Qn?JFgylm-c!S8Ud4>nWmZ`eqEHa?$fYuroS z4nJ(?HP8P7uw9wdf4vDgk!B;*(e_;0E~2es&Q0T-wbudH%eS2|$XTe)LSmBK!`6#k z-ih7Zo68?(?R&|!=`G<;dZ6@{8L_psPHe_c1F^YVCdR`3BRGp4j$N`oFZQjiqhkA4 zj*B%QgO*Ir>1Zf+KD=RmsAKsh!#cis)bNgFUmwwNeXTQi0prqtt77Yi^JnzSmZ`B# z<31C6aQ)0!19GL|T)mseyCZpTOvm-#bOswQ8rAX5t44Rsw(oZWU#EGO+MU(&hQ}J< z+lFuJy?ovq$9qK`*Z;&B{N`1K9gRm7bWC-}4KtSJ%UnAfFPhK+ES% zhsDAbg|SU{P{$Uz)9@?j!{wKZ@3>)pX~&IkOzdd<`{5ne{@EF-U^z~Wjtd1O$Gth0e2H-{1T5*WwSMYcJ2~MH@zn0JRc(a~gO~KzC z-dz4Fs2N@6`CpY{k^f z!uGNCS3~6VbB>s})k^k!Yz^rmVn)Lv1=Iyvk1icX9MbB7f{4zBUeEJ3>dUzBb|!do zSgLPbS7XmQxb2d>`(Ublzv1MBzbtu>?C~iHD z%HEXSG{F9v-Gj4!vGVxzTy;8RYq!^m78SoTIr!$c0me`FU5o0izb)Uj2>qhC$;03d z+m^9p_Y}onr6w}IVvV)qbmvsk-z*tU4v+7<{0sS&Vb170&l*hu_M6XGR+|sykX1@;NiiT?{Ub%C65wpG&!`Qu7ZO`n34ia|All(q`*IYge`! zUHb?2SbXbTc}jvd)lO?^+ojp)tEMJsvlZHGO|(m+%?-5s(eFIkJOjBl8@ZNx2hh_W z<#qI6FTP@I%OowxchVY@yHI!l+hY)YsdlnA`k@Q0Z_lIALDhcH9q8u!4-QTT{RukI zed$R$kl!VGeAUoHat}%IVZ~dwuznBor#qbG*G+&Py3P7~dw+J%``#R_{j3hzT}hdj z8qbLmYn6O?hPfy<*TUzc!R8s_E7<})PWXY>T)((ayjAGzeVp}%w}j{31fJ7)YY}-> zXIpz9bv>@({FHxvY3Ih^Qrq>U#(Q#T7iHa&_o^jnT(r6!+z3waP~UU7GfuK|7I21; zm*lz>hKPSs(<$=3VK&DK+B5U@ct~s2Y~zrEqcbtD&DI}iaDA&v^0y`dM~lz4Yy3C$ zB*0f{9Ma~i{(&{jz}2ngDLAMj?=F`-z33W?h@65bF@VWWbxYwyVfJ6FvR) zzhzq|+9!FgH+7Bc-1fb+myB|-E3IwpjHAxlJnlSbYGmD%gG|7E?ca#KJqLX~ha5r| zq}b5|wDV$G*4DTA?Xva10}e7^|Ds(#`NbQ^#}Z%t2>c0Of+d{e%oENu56!9M_{@CQ zDsmUp?|N7M7#x1d8E52GgY^x>W5)O99*Vo^U;F8g`{v;wEB*cee0I=zxO-2j@-B-% za3kY*c)|y3C2r6v-%D^RZm{r?83{jb5pa3_-q(3gwZznJ;jzg!t9)%1sm&>_Klq2w zB>LIlYx705If?tWkzI;KczVY4`<3K7+jvK@+4EWdL_Cmn4V$>8O9Q!16FI8&4tmLp z9SY|Q{W3od2RY>jksayrd)l1{te$T3`pD)xn=%aG#2+59=4%;tV2A!5{Z~f+;ge~KK{}@{)sl%iLQ+1d>mq7cOUOvH9 z)U`<3F9q=*@m>69e@@#9)|=`#=8i~YA2v%eX4VGH zE6%6i?Y^9qsmQFFWLvwhrgrerCJ#<;&m{u}9qm^f{O<8`&KZDjvj=@XeJlshw{wqE12r8V3`PeZ3Ki}yr*3287^`F2 z`Cm1sBqy&!$5Yp<^%CM$n~<3=&p$o3ADKCkby7@Nawnnl*-OzL^FsW**Rk~`K`UkS z{Q>*~4D~H1FPMJcEzXOjqn&ZbYu!TyOZ$PN8NBMZH_7$%{7KH%5T}~x#;L{|o+Ljs zE*nMdq(c^6Z1t4J)VXGjsaTi8xc8E?SHu|3zlTiUUcW8aAf{jTd2|Lz_RK5Dg7o;7 z?$XYXt9z)4>RYQ-7m2o>Yk&QPp%QW=pvlyn2Ct8glS$j$K3<}aZ11ao*=-rHq{(>2 zy`Fg?spqtx?&&<)2Ewn0gG}4N;z70n`A45@9;)}^!$l5jAz0TjkAwL`PvU=t+j7ia z;ya<1`*lve5gw~DIiLkb4r@K4?14*uT?=gyD{Yk=HnCFfy;rPsJhUf2Q#MKXWT)%r z?4PI~>(`aU4?qX%X8<{+cY8h>n%Y>b-{n8p92))pUEqm=oA7zbeZzJWA02P@$vt01 z{$?tTE8j$SQWeJ+f%86o&sd)y9+85d4PW5=ui9c)iH0SA{k41jcdKU0C20UB(oNf$ ztNf34K7SVMp7Gikr|CD;@sSVGMjY7a$8)P5kRM!v?Cd`VobPe+L)786{#P+EkHYxf z0c+noIio`J3kB=xjJ~ z&CGUbKF%uI`D}?~e`)s2Hrjpo8^14Nf) zM!$_(d0Ns&&87eK*!Y*T-hRKW&ez7FjcdpD`u(;=zBc=x$M-~rUXR|G1|QuC9O#^0 z!Gq4})qeWb*gI?UJ{=QWFF!Lq*2mu4s=G{0&BPvE&gp`!z~oTNx9g|hH^{y7?CZ+J z@3K#d;(y}zvVR}e)PjG$AKOcNd7uAKc4DTlu9;NWV4;Q^g&uHJ}1orB{zlptsY;C7QwOd3x>ElDSTSYsc4v-wN;|_2p*gr1$ zpW|5r4|YpW_WVXObeg`;%1I1d3!>#hYtnQPI5&*>KQ6o-~^Tuv#o!G z{n@u!AMK$Hgod$q7Kn^=MyJRvZV&WW|NTJjn&Wsj#Qn2hZ>*la592q)+>-+B_SYt$pVZ`+>vQ7tA4! zKAg21!M=0YD77wOLrik4XY9^g~FBl`OWIC$uL)UTVuAHJaO@6#QT%@a)Rf5452F%6AK$q66 zzAQSp;*RF7F!as$7yG6-vB^h*Mt~m$`@W-TnO(ZRvS{4!&P87~|-JVIUgsr~dT86$o168XGC z6X9#t!R&QGQ)S%myMoU!`yi(r->`ekZ?jmVe(>q_@eF&5LIdEXe$Z+8+AqQ@lk|GCm2U;F|X7KQ>;goi=hev}$ z@zjyD7ksCMoT=XW#gVf;oZI&1m!LV{`jv||Xrr-GV1nNKvPpC}*^E&Zse(Vma|6&| zjM^m7aF>7Wz5?FK0q9K6Z(C>X1eybHp9znD7GAG|=g%Nsu6uB84uIg8?Z?gZNhxrM zZhL=iYR1Z*PRi0W+$h(&i#5=?cx$3K#-aLOgdX%{`p3bccB=n(D7-6Jl%DfsP6 zUGr7gtKW6uOKF$34(hM<6ZE$eTXPxx1z4Yh-R~oiF6MbW;bmU)U);oBOzzPKFP+-yFTe?n@WM=g`Kp z|H;uQR9p5i1|xyerLy^@bKVN&MSiT@e&2=@#9w4!_jpFZ8jUSGAPSrZrA6&e{YTU>`0Re}{%xEe z9n2$tU_qd0DR~6-Zv)F3#{O+^)EWz0QRH_1{~mhqAHXMF=m7UO0;AV-xVmu6;^PdA zPYX8bK|VvwAp=I`ANXLbWFPKu)>AzEsT5d88(1$T$Ke`aMTTJuQkMaE(M2^i*i$X| z+XKkwF!qhku3!3Pa#hW__`>+BIwOfJxQ4l1&2z=+&Hv;|tRrrrJIB=Df6^a1u15cB z5BNU5vz|4xIM1SIf75u3P5G7=#Pf>H|AA-7T;55>YQ*mYX?2DASu=kO$JCQ?YdQS~ zeGJXcLGK@i3>b?n$VVoOLpBtk_Y2YcCNDU7W+YU0AZlz#Y7g?s9sv2kPP~bJM3c?P zh0j1|ecx~f8~FV!=2-r>Ge7J=MpS&R`*eLyp4EMN-v!Qx&ZzLY+?^S#MvmpR=EefI z_MhwAy8YZfp84&?U5`6m&WhVk3#?du+FMJV!IEOmqJT%%v=_STM1FS~`>~PQLFL{s*b=M~GL4sXeXtkgqQe@ZSGp?_J=l zs;+(iwRZq_65dHj1SAhdlb{F)A@Ri~2nMh|FsE8iYaSR)cs~`iN&xi)qb*xeta?ho z;z_pBLtAK#r>6$g9`Mm(vGnDwAr+g4Snx%HVD9%f*ILQin}F@--v7P-|L1?gXS3Fr zYd*&ubBrhT6XX@qXSgKntb&I8L>a$|{U3$LGN7z0TlEUi0OX-I|DtrFm>o{xu7HyROQvYCZ z9B19o(#N_%~Xg1QA_twB#yKfQbVJX>qGoP~_; zGo2A>26yTowPhRYrOot>_S1Lioqo|+X#tPxSntEvR9-P@ym^OdgXqa^zvyZ;{4biW zvMVxkJDBHd9C_P%2>S|eUl_X>;{Eou&W*}b`j9t%Z_dluXE^P?Q=YFAS9Ox_N>{&Q z40h3ZiQYYQ{t#_ZeJ-LcE={N&T03`Pm7{Xq^P-}Y?Ua*78jbBK{Pf-Mfy*QHv)<=b z&YDVItexyDAFWpXgtGGYY?wWiw;;lhXLjO-g zS5MB!HuQfyCN^6%^8)nm=tGe>Sr!|?8xNgdIjfkw8|eqlUEdms{6Y31Ki1dA6t#Q( z(v`3_@ac7h$b=h_4YQCDHy|snM_;@SeX+N(EtAf>30hZvjo5S7lb__$hduSrz(J99 zxj=S%LpdjskbRj`r^+uncRF_`osq{sK)>j&B+}Pn=jl{M`>x4fe zo%{_%r=_pgca8T8u`TnYEpyV=lJ=DPee<%2xUUiS`v(8Acj>O5$_|V?)plsI+0*c| zR%qJ=D1Ug*k*>O2pKkIuat)nQW57PQofy8Kay~=HL$}{&jlHU4^L@d`w8te|O7C9! z<`&_~-Zwv`{!#cngl?_%wE46ZKLPvFe{e;p$#3h7*U*2Blt=iH^oK` zpLy!ut;2Fgq9zqzQo_p*5Ye?Rt%iJ_O)j$j{VDEj!2?198jV{DcPuP^3?WLtVRVL!qTSI`dMt~s3cchi56 z6H@+RzMU{(3E^Au*O{vG*NoTZgkT5t$!4!|$4l7N3YOll{b0%%FJ32JG#UNr0p{4o zo}oA+w4Hw7+rDD=TblFjolA}G>iu@e{brqG>>%EKrE}vCNDn=CrPuu3za;_N5R zQ^Z*Y4R6R#an3Xi&3@wuJS?8O@dn!Sql57CrkLTP?eMqwR!O*g=&ohliodGffkEt3 zprdPym@>Y3mMO#b<1kb{bnAacdFm&Fvv>N1UQ4q!ytMY#_TT_(#~0)?g8pjkul^bq zevWTuew%95&u2YJKBMGkL3;<@cK1dWnDQ>{W6JwJWvn_rX|e1{np1=IOYxhx${(1U z4Xk{bk7a+4yeH9D4am05_;gU;o?!3q58q*L3;ChBkG*f79QHF_D(N@dZa6udjUOY` zIU~lZJ_b(w)CZm%D&P!rENhsPlB~sRb@pMkwK&LA`ve_~M|71V8~!|^isU&$5lMFU*$7SP5jN=f2{F-@8769b4+lv-cA3Si<_Tk zIen25{1xwGB@6vxn2FAM?BTJE~e;3*3JfrMzl^i19|-3my;HM&fIIt`<;c7#oqc) zJ=ZC-IkqdW^U3R@Zhq3OCo(3ya^p30iDAUGTs>%Qlpciq|EjYy4!`a$qn9lY+_pY| z-lcO6_nsT6hcG6xnal5!y_pqE;k;I2_UH6Z8v030ifn)q86&oDf5lTA+0_+Wy34QI zCz^L}0-m=$fSlpEL-^C2a4BI+*QDEiY14EjF(JIF5BG5Vm3a{Re6{EYY48qTuyR%| zbyOZ$Wpt#m_ftUMT5n%{(&S;reuDf|pVXb(H-0P~#@NPW@1foa@Y|(>`-Y@jCM2>J zpnRxvI(awnJvw|c;V*f@!Q*ylM0i|pboy60M}UkM-&0pz_^O#xmmGs)oH%e1IxkDPVm|b-09vN;Yxa# zGbhOoiN^w`^0eqD^}`Ni`cl#yKsVda2mWDny@c=(;xISdskuOOzP3%Ecy5AtN%lQX zzOg)m&>Osc`%dx|e{UdP{D>~DWG*kBVLADVE_@TN>eyvyOLW<7=a*(^vKy8@tNvHJ zL<9G)9~Ua2ZT6l-WcX^r-FkWRoOML|#oc#1Xq+6h$Bdz?Q6Acf?7Qys7T^XwAG`9S z&^q@1qA~h!z4}i!WoKdUXZmmjb%ZW74h+5Zs+^TX|7kzA54H*KA*1u*Lk_)%zw;5} zl)NR^Dp^xq&A$2#$OZd*V)#M6>l<^`oyqiv%7tD{xu!hXOnB&UB;|GNvi%P@aSe@` zxc76`z!SHOxE~SMf@ZyZK=>A}?YTrZ4vL{0;lTZ_wMO~1WQ}r1AZXtWT#HYA@A!KV z8wOMMJ9!a!dEjFF|5LX1ptT<9=7~+DIk(`}=cd0IyF+MXa-BC{ljqHLp2ZHXdt>`7 zzE;0WI*@B*e|EN4)qYmFAzi8aa zKE{hXAN2AFd8dI>^{@K-2;)O^;u1rOEs@}fH9%E$QQWV7iyFGT(H|; z!rvc0LXUn${xtn{9o*Rv;Wuclbd^Zo5RJ6?n8qM-LA=9JlP z8O$HE^TAakw!Au5<_&K`kCMK!|1dlYKgVWo1073kmHt-wFzE=l&*KS4Dd9Tlef?Y= zD<%APy0t<2)mQ0Ov%CA>^o^mxZ*%6uGo}uW?b=g{9H&}e_QdPW8GuA5zbTCAF8T95 z_nvv;jdacwcxl3vi%#v0S4O-q8tkz=y@#{Ao3TOm`XuUb@Aq*05;=6;%`0Rt=c&h5 z^4DCni87>voH7nBkK{wWIrphG80??6{}ZzK33!%m-#Q|6fVq!5W^KK++tyU_ZZBoH z-ya|!d!B3CqDmi?E^6zc$>BeJXzQU4e!A;_%jevQG+Nmudyil`rUy66(kPJ@y>F z>^baD6#WQW1J;m>*#Ar88CRZ}o4V3>lzkKVH!Cc{PO>>BF8r)nn~%%p+u{nsWgj3r zfLLTEYXYIzg3J}zrqq{LGe1TCX$|aU+H?tWB9XQl-#g&^CDYC!ZOUVhKcO0XywEAM z`mVhOH5rZ)g&&9hA%*#}piFOa|VI=8Np&s*Uw#nNpOZP}@HOn6bYWT<3gEb_Gi zeAHS4bKG?5rzF~_JUxB2zN@dI^tIP0{}IZzj1Cs5_v(zn@jHC1oBLTmk6}-hyO3h6 z5Mvm5=?m|oy-#EZ>*ecnKDzrx?1%5-+mqP;=JNa=bFw^S>{ESM4+7rv@xA-ipT1p| z(NBM!%Gl@okW8!o>~gCv<(s}b^s$b7#xr`;5$e2MbR8IIb>x?i3C+&L2f(s`Neere zRmOhT2ek8B)L-pg?zb>qW_`j=uQqQt>7*l(t{A#dxFg}vY3M54)h3<re}J&BGoSeU?iKBK(r2ICo!kE0=wSV+?O_~(KlRxI*sQ3J z@yEsd)#_;ch27^m(@eVcv~QoGanVVy3nK776aX?A1})PtcN~` z|8wH?WK-P;-bMSHH7+?HYU9K{UmcB;OTfwb+)4W)I2rQKOWQw()@vCHHa_e$l83(o zANWRazS(CNg{x-vHa3mUEvzHP-&4`vN?kks@on&%!Z>`AN^Zv8lhy96o67BpKZ<@ES#R9W zuN!yvoFE!^7l6Ay&{OTT7gQ&n9qdSi*X%U7vu#Xj{F4sA8^>>;KFgpbtvfn6>867> z*_ZM3tInB7F5Lhf#5?iNNH^HTOe5~;&iRWLqHUA|nYPT*-s()PxgqJ?##A1$o`tUm$g@NXV(|dnrLDOKBeI6+_}g2=3O*!+A&dpAJ)8}Uw=4*C;srloU`B2hdTci{p3CDc(=jF18-hXodnKz^37gvVSWt0 z)4=ym@Vy*-YwhwYX)v*i|7g6OwytHw`|v-FA42#`_t|atJm*)@>uaQcm-IS66ofxo zmQ^SEv*14mfzvFjBf#CpnZ4|%54!d7t~fP(k4+2KW%lOVc|7^%(yrI`+yWP(-(3#= z@C#;iF6mj~XYcuw+951d(k)XyFJixIqQ~sCQ#mf-nCxj_HR$+ z+k7T9oMie99~rbweCTuVrSjp8;Zy5i_Zxc=*{ahX&gh+5&Z+(=)h6i!PdIJO=UHx9 zgZJE0-tPEN45#{raPBHKyO=f?P?rk){U_rWO};Lrr>O1C@UHz|4#?3H9eupZBeeJG z@`#`_hNAuMTZ{qru-h6mPMo$T5k}|kP~I>56WTVzgR~C!7XDiV7Xyoae#@EQrPo^M zE7AH)_ga$1u5^))Nn1(&veC;#Kd8WdFv~Y=T-F1=8zxn7kDBaHSg)6FC(Y3U^d%RD zY|wfEeOsSz_8IN9k4Yo!dq-r;U|E|mg|YR_oziotH$IN*#V>7Lt9#wMGbV<$i`cr) zZWJBFn|s#_SSOdgy6R|rdNHRk{?Vd*Op2eDW5ZX)JcD&3vnF0bJ)r9epE1X-?StRr zvfxqq({RSo`r8Arc+~eX&cZhVqCJk#(iK@%x zq*I%u7 zWWocz=!b8NIl6^5zKlO`ue|DNo9?!m#uUE7jBccLwNhZ4Z{d%c zeTQW7{T=6TZ2rjn5?H+EPU5M&0l?s{I@L;UW6oo2VYRPVsr^Of%g}nE@c9QPo#LI# zJb7?WZA@WK>uF;d_E9&|K6~s%+DeGu4xxYcLZ{8x=1%e@w{3x!Fz&Pd{~LV5h1X&KFbX=p&}#TtI`)F}x1M^>*SFP29j|4MJ%3JiZfi{cu;%mdjZi+e zO{wJThoANH1@m5}J@Df~o=4MpD$mc1tmm8yX=P`Ay19VHrEHww>xNHf!5efxJ>zFb zt--mC`zFH=0@z;17@Lny^yV+Y-JR@-)F6lAsjJ4Q&av#wu(-1!xeZ=eXs1hMd?tq5 z!O2n5)Z9I>T|PP*;jN#XxZwk(X`z%&<3gKWnb;=Xu#sfsuA9L{Y>u8Kd~ z{uWZg$;h1_{0DcbQMP#1Ncc&b@i(FQ*xBL9;9?ki^0EQZo_{~eFTVTo_6BI-Ah3JK z*VpkWSkudzP=5Ef*-w|OP#-6Sv5yU~DJlm1~YsveRet9u5 zqFr0lu=%E4JK#^~UB-{6^}dh(j`UCc_}!^DJ|S9x{;aD8g&q^#D*jcz(0bmb4_W7J znAK?2ERoH^cL{TKi~?xR^0nvONgL2R=Zaqjkh`h)PLj<-5T5p4pTxPFo%*gN->!Zb zJ~5)cvZ0Xt(HyO3p>cIOo9ZFtId>1=MlHJ;0PdFt=CblE& zkTtSnF_)mzCvfb_O0>TX2|vQOG-y9y4VjCa3I&mewakrQ@h4ZS3?KPx-}x7@@Kwfl z^SQyi0>+8H$@bzR(&MK%2U(mQ$GgGBRp_YLayz)#*M*C6dzYc4ISJ9?cM|J%J!}UJl}HD z@X$TTm9K&CN1c9f@TD>5(6|*ragi9l zPW2@%_A=OfrDmT}W{~@BprtgIj@X;D$NAaWN9r}UhJlnF--TU(z0Z1e0PCEUvq}k9K9T4BZJ!uH zXdxEb)jq;>Xm=01KxeXI(NXtecbavMHF%OQeXjfq`;VsPO18Rq>7s+5vqwUoXk7Q6 zXzLn}7`$j-!}c$rH0RoK)25x@<5SnmA9Ii^q*33yX*AZn>m|}Ti_qnyV~U}2QQf4N~zQM+`^ezX}mJ04q|qX+W2gF*GDUHch-?zr==jTAGNE@0j*o)VO; zNaavN184Hjl|I~=i*T=j<|bQsYW<|~uV*a`&<^y@qs6=ZC(3iI`r-`hB>;bKjWL5v)<-R75%OCobs5ntB0ZMufiV}4igGr@(*LK zGc^2%v*F(<_I|(LtQQ!Y+i(KyPGe4CuOTQbIozKx?aekp3$|@;N;sCVO2WF+4iEPd z&N%0dz3!0=C(rc4Nue((E9iu44_JAmUw&n%y{GWBD?=X>UKt&~*Oj3U32%rF4_p@d zfN%?&Rh8S7KX&Pj%HIj^s!w22=#R?Z33uC+{^XURcX|q6ab@UNgg1QFIU$%TxmG#& zyI-;gHqEN9gO>3D6Dnuk@|@mR1#`i<>apImao{DPdz6;2$<)n+HK&E{BrFX%+ZFGA z{Jg7d!n)!eNej&*tkE0RYfR`4!h+zvD_+`|P$^->gmvY&XmIFTgsIQE!paASZYE6Q zxS2i(^sX9Oo^92yP`yPf+;PtuYI4B^yyItQ3GV^=P;nOW4(~B-zg}%0ZPm{qOnbkp z_<}u*!Uhc`|ORE_(B=hYFoeJeCHbQuJy8mg!>8CItcz8SReIqFN?Kp+*$To z)7%8sn*7#L?f+{YhK-2E`1%(7BBJAJ-z9tJ_XmZZDH<5cr47S;v2ib<$FO)O`(Me` zBd%Pt&t44=&kl0m+m+Bo_(?7UTf&BfjZXN~NW zGZuF=j|$fR2mG*^_4dXXYzcZ)gXsU1M9C%;h7<`l;BhE2=X*Q!% z)}H7jjrNd|!aqxarnxH?`rW6n^z-sUKPHUxNe)dDR+SyvMwn&FLZ0-tHfT?6DEX@F zG-RaorRI*#@yc_H(sy)DI5t4OrBlPV-@USZBYd4RI);BO5&n;L?nKtHo^W=X4_@9t ze!|5s$@@Qf|10fu^o%YXZeHfP}QjmY?O`{4k2bWq+#c!&6_=F8Jf9;{K5M|59moygRqma|T#FVALP zk{o`OeAI@+mbF2&BHDO}wv<9M=K_l!^1nGSG&_yCE9s2h%o&fLlFsNfQRmXqnTJ5v z=(?x+Gj#nCo(ryfT4!TzBAc;&#KI;=_9j}hV54YbKC&?XVSCNEn0i^;`-NhHwjEI7 z7mP_{uj)&iyX(7x^;GKX>P@<!p1U_pRU$W8Tvh@uX?>EziJn(ThPs#Wcbm_c*$qe1XxPRv5;URR4 zMDBKiwz(%={yfY$Newi_N$$(@=f zuIR!`?_AA$xX*h^oH<8!!da{Q_n&qq_mt=A{#|)$oZ5N5!(NRi&oXrP==`20zw&X| z-Nv_1EXQAVb6h+9oG6%&iXoAu%sxqej2{^f4gG76mewCM2Qq{>;EGyes-dT9Vz0nf>2T!Ei`U2Umky~@*; zsh3vHnn&M{Vyu;5JEVN>rq1J;3u}BeB45rWkJ*L8Lp4+UZ_B?z6MG%+)3(1NQ#77f z=PBIGw+u5L62ecAKAHSZDLaL=Cr{bEFRPsOHOiK))a@gtWhuVsx~s0=yeK061H_+9 z{GR$vYgEyFBL4Lwd_H*Bs4V9FlBuG#0_dun))K<66StAIn{?6NykulvLijbpY6v6V z_Vj{06V^aju^sP7I`~w){~#>Qj&~$|g=jY+{8Qxz&mzAg>F>syFx97qFq7YyktXZ~ z!ZImSJ}&~_8W74lXQa7LTX%6t=g~UYV4m>&eHOA2nu10roUCN881CtkNE*;k}9B#qb2J*&H-$ zXNlo-@nWSL&YXZW`ldRa()TAK>8^}5f?w@Cz1@K=qq_HyZ^ZB08D-IwL0x=Bb6n}E zk~@+~$6pDAYB_Itc4n|1+l!nA^qLyvAbnu?%r)w#1beJNf8FDL5w?}kYtZ4#hv3Vb zsk7`#CdIRlgT2P2v+ym(`TCXptqH&1GA-+O<0|W8nWLwy3O?3+u~on4%3yuVv%Rt& z!+yOSJQm>hLOQbA*SMB(y_RvkmU|1=`nHXx9le)%ncimoP zkIgfdq&r;&-hT;SaB$xpw{fhAE7_uR4_fP)M%<x1a^N8r&lUv~cNAaT^L_9Qd6X6+>@Jcu^w z4BaVpx`z80JoSG7yVV)s_LO-0I2YxK*PC^)AwAL87|!H*;z_5?A-`eFiJoSy{FHpZ zd2vMi*Dkcz$FfNC4m__CTm*SOPFpI$=_cmB4bV(TFnn?oYYsu+M}WoWt9jad>DCAz z2%9I1FAX}QuK5o68y}!%jjo?@3>zd&6;=e&;NDvtk zM1};BAwgtF5E&9gh6IrzL1ah}84^T>1d$=p{dO03pcs7!nP)KvE`iRcQ?AyGDznfN zDeqqTwqYE8M5%|)QOLi|vACGfee{9)`g_E${Jb;gQRxiO?PB7uMwdyiObBWGE$4d; zG+#pc^nmVY91>ncm_^uTe1{5-Lw^pAX`guTCswHGSu6CH$1Fof&-$$mtrrCAKfE>N z#+Uupl7MyptXS*eS#4ilv;XmnRy>+HsA}l)uND5JUuVJoq}1R~xc@6#wB3AxRloCF z^L`@z;~?o(PwCFrQm)p_rI%g6JG`zwbt=Ab#^M{EaS(uie^z}}*1?PIG}EA6j||=3 zC6|65$rrX_!>P5^r*=q(GPV`j36W{L$KoQ+7(k~kPZ{aheN>b7(+lkJSbKpr-i$?H ze1m8WZ2LyW**}7wM`DAR%$RJOkqJAJk%!ID(;mv#_;q!r+4F`QJRAalPdd2Rj0`Du&DhXL;S2k8>bxV3?=H?p3uo9iRD!ejX>&w7KOG*rjy9?+(TvLT*>+T2 z`mEcA-ppm(_Nomp(gtS_-K?d%b<&=>iI*AI)t}pl>$c|}{Nv=)rnEEQYbyKJCJzT+ z|4MqC8vWr^IXr?o*!n}j(I1k+oCVlm^|dC5m&Y-V#d9^zwzac110OuZ zJ?rv^uo4-0M~sQ+XXY0QPoNh@Aj(t$rs=+DLZ zAvce{I-{S$TsA3uJ$bwd&eWbwjG1lVLhnQ1KMwsGeknd}aCl9m4j}tl&55rhpIFkk zJZ>#=r;EqQpNa?XW$dGlBfcxwH|c(Pk6eFRut%M;@?l1X0Mxh^XE7D+0%?U z?cCYI1_n81=FYY3VQXGk%2RXE<^lFxw3er6=70Abv;h93`J#8;`3yRRg&YN6`=;3V zT5tL=YTnuV%F0<^hc9IHa9(jT>%&fZ;ml5dbL1R+E56R{`JS_1?bx-}{}^8|&OCO{ z3C{JP$4Q@&ZR~x(iQr3f6m<3-Dg#s75_^`3;S_Wm@wL^=9h|xEK8L>S_D0Qp zx8_&QDga-n;5BMaeF(g+psqdPbv0*ed%~IK$+v;CSM9m}zS;AJMa=aN`|P?lz<)K* zxAiG(j=<*!jN{w#gY~y@)}m}$uzn;@`hDN*WfLOuc-P6p$J*TOBMLOvk8JP2$T7Zk zGWP$p<9+h~d+;+gSUKxb;_oKDgQN9b{W=dHv>sm2o4kKUJSNo}oO*QA*@gXxdwJKs zxqO752Tj#LFWUdbr|Ed?F^pcU#))VzASyamT}sE+&hnYYfaUm8}WnfhG(&-dp~=O9}u?~{7VlV$ywr>A9ZeA z?Uyec?&={A^vRrr)8%pE)I7G5$9Ks?{*50TabKOvrOw^uHsc4Ea@{hwa{hWP@wXBG zHJPS!r)v%V~g za3AaBP1MH%S84Exzi^gTYv>uA&AXo`YjW9YA3BT)_mf89D(4vC2Wi(D(hKec4t~*@ zvy-Qz+e+RDhc^Gb>dJ`rl%a==qdiLdbJEEF%X6no`vP+e4=!{D^<2_w|Lk8$WBUyY z)O|7&{Kxq5L(6^YwoP!hXxOqUAC>J~9XRhC#r`L2JB{3{UeLRJqSmE+@RCpBeRVCg z^C8;PHpL2ENISPZYrWkcTpV0$LAwF=Ch%{M53<%#R(%_N{o!R+{o!-1j*n(q^-HM- z^5f|9+K-qStWTY6Eq;YHQ0&h-s#urcE-YgYSYzwv!cBKy??ZR-wC^?S@!Pf=o03P_ zm(A;`zc-WjCo^T|ThMji_bt@HwWDdKT`}nQu_uf6bI-%hEzE%``K_`7Z3Wb^$gaF54LVqrTmGSL!(V zl&(--;HsTIVV<`G+ip|VI+KTd)C9tRrhe1W%d~&nKwY%&uJGRzz84)*`#sV1YIf?S zvNNz7kE3oyK7ZR@*%mqJ_b5H6o znQ{8PtMJL>ryen3ol$RQe^h79q-z*EZTjBu?Wnc}owizTJCUPyK7sHLs85h~Y97)+ zeYD=z8PkjU3^0C}e~_G4_gw0wb=X*JYZ~C&_I|Q|+XtyJ+tkmZ+t!%MN84ho&-ZdXbm=*hGYKuw8otBY))80uckQi$^g#wcom&c6zUsL0irg8iNj_hj=+f`YKfZ@DRkjsc$32?1ZJB7peOI29 zq?zN3g}!}lw8!{sm!52o!PxMFlIMml?exmO06NpyQQquNbmi^hP5EYlyBfy)8pc2| zzh*OL18o8ED%#2Z;f@*Lt&MRaxG(K~!D)BEX}A0^#L;HuIm%pd$KbEpX9sJ$#><)@V)7QVBQQjEMVouS=LJV5++`~6R%$P9Znx;;@M#X zO}zRhJN-)1+u;FgCF$$s%VzqR;EP@1W2_eye!$MR!^yWpYi-lhOulwln#q?dc};oB z*A5?O^6kLpAbI*Cldm1V$mH9BPOo;#cT$?@)NLC%o7W z|Dm0KclUtKkaO8(rm!A1GchnCFJY2H_ioe_!Eg!IfR%Y1T@GKdJB|!56#2 zk614%TzdfEwZ1ESApZ9We^}xBg6&=5`>b|_KdA6VPk5spUTNpw9sh_Oez(H$h0v8x z!0J#woIy_xu5Y+}vfHNhm8kobS1eF?jVHXu4lh^uc2D?rJ6yJW!3IxwgB|`&h41r( z@3X`4qmUeI^n^Fs;nyqth$sAr9WGnG$yRWi@b0s$ZPS3YH~r~#=ncqUugnpTlgzn< zanXoAx0_%2iK6}JNQU=F-e1VK8V8n)LBB{$fUm}umvfGmdnJrK3K)4LKKmEWyUYJd z17q(T6E{$;@N>ySdYRti;KgBh@oe~KH!qG2SNN?r#M=`cIW)(gX!Igi4y}P#Vtddg zo+&x8hcR91&p+NJD?(L6KlS*VR9?lzt@6iKi@*DMM>lt554Hu!V#%IWbH?XtT=yh< zG_Jk)D|ckJ_Vy*WC5I$)nrPpCey;9VPFbrgpOKxC;RUpP1~QxjZ$^f%a%6Z0IC9fe zkVgHHPney?XVT1Jecmhgb++^|)|g89RnT88=)y7BX~2K-$YcC;&>ZF`dDZ_g23!wUwhpItqHF@XMVB`S~T^s#))hKVd#)-*065Njj&$bi~u&Mcw&I-?tB5r@MIUGvqpJvW_`vRz*K^ z9_cFPdq=@VZA@x=c_3KV%(-)(9nDjCW=_fKd}Tm;=Kx=Q^S@j5&B^FKM+daO{{?rU zb@s1oS<1a==+2#&_*XXj*&Etvjc>h+IpNxv&_kN{PHng%e9#KDUTk%Qj|M`mlk~Jg zZLj!4trK}aOxPt>$DGN;xf-|ynE7e=kQJIY|9i7q>9_EqKtl=Te1ggYLb_2BNuXMLY{XlMP?{MbJp4Ha#1f6wPJ zgTis=1RtGKK1X*7WzY4k`0an&=>e^(MVDb-q>`R`UCylUX{eqRX)zS3IcQ5C5CGMK@)#p*S-`?Av zQ^NNE=QskZxhI_Y^fw4^{x@ zujtc0k#o%1IehCp%UAzB^7!oT_8j#2#8{mXA+FDf+j5p2S3Jd(JJNSU>TA~GKaeJK zrCsjpr<6M&`%dl4ii#D&gK*G%*5vv{Xw+v z^|8I$bIw-SfXVeOd{_87)+CxY2J3b2(+fYa3hOC*`SSLh5!825x-~X4-5NBHaWK6h zt6FEuTJby30-u_Z4h=!OjOY4~;%$1x&su#8V@mjGjftJ9yC(v^5#ySlPtAH`Gd42h z{rHmhoU{Isyq&So0xtI+@67otPu20C7IF3s|L~Kz>oXmH+iKq=U!S@Cc>hGt2|Bmy^jdQII_V;anns-?phdJM(@p>tB zMkZ#nE?FOeZ(#NLXQk~q4}qr^@Q~>nWbhY8&zSL+bwg2-Z*gXx#%jOtBk+%HKZ*;z z0WH61>?2Eg}EZJ(uUIo_qRTKe6>L-A|Yhj!SQ!D873P z9w~m>*P-3mKC7|)jGa?Le+GO>2FKz@E&XjCbGOs3p5iqVuY6$p#N*KG%fz$$>FmJ> zM)N+Lr{v2FWLPWos_%QP+?<({gLwyM_sVO#qgUQc`lt=MHhe4uA7d|A^enml9n${l z4}s7z@be~p`xbZCJRIAPzD*8i`pzC)slFx8Sl`fb8V57ACdxR{*w7dXSdY#ECv&t` z@*#D7A*MZN6TGPk-XxsQV_)i;h6}5u2d(p`O{|MApIDKaIkE1`f0-ETubNo#<;ID7 zSB#mcv=^>aJNvQj&^POVS$e22!fxF{}_d}9r6hlQ_UonL&Rpdqum1)W4b23nT(%4;EACuv$pGn_Pa zi8g*;IIkO5nc$%5>dAF$>3``A(tTW=Kz6NeAHJDv6$-!V!xw1RljxhWJNYU16nDaN zym@F0i2l8KX&f}gH(cTBBJ#r^UF3AShV%^SBQF2?SZ$(hE%dG0DnBGy^luCOD_Xbc z<2dk_3GP^Xo+JL>NV{3%Efn4=nOCK;25L248dA7q>pJEI8sBj%%3o|9;LODs*ILt4 z{$@NKbkFl6I|M5HC%C~vfzv3 z_i`3cWu;NpEb`im58Wo<;lS4duXErO;2FSK0h{(dOzfsT^uk2LEAS!Wj@|Wy9Ye3w z8PpS;E0LT#%o_al(Ai5@E!uwHpoBA_wZ*5#N<4!dBg(EL-&bB7p#Mn^1KE!_I1J=+rZ+xHU5S}f_xk+zh zOGDpD&K)O?blT@{Vh;gZL$6+&l-Zv16ULv~+V}eIw%$+JKJwFi?N>Y-sY9h7S@0+J zg}B=(8`^EAuh~N~=Y%z~EDO)VYng*<;avFP zT>TO6xc=(#n-We%KT18@(q31G&I&%B+MctJwyU1!{x4Ya;}PMEI`#X<@?41=2`Bp>oplV~A3wY|_$WS+ z=Ir%7^3+F@f*l{hAL`gwSe4G+Kl6xHS)6@BPg})&&Ka_QRiH>|L`d9w}Sm`bNl99#QONCiwgG7AWho^jN6mf>~FoJZ(bYe zr}wpXw2iv9?rZEvnf%CqZpHlEEb^VnIrl955q^XAX)e`<&!wAr&yEc~+D0Ab%w_II z9-~f9+22YX+VGo`oit^C8+GXZHf?_^b%@uu^lAG|xT(YRt~z9KW&>H;F^YWW&Yery zQ}(;{aqEyvxvhWo|2X`;?|qzo@9R&`AkNIKy`ENl;bT&kbyRpz8=l4Hrj5AI6L;pl zZ#+GhxO3`yJ#EUSZj(a3qpFkYJ_Q`leewIhiGSevkK1ne{m0oS^Y=eL;?T!4NT)nz z2=?SNmwZf_v|-Zb0Y}@wqso*oyTioaX5zkO%A9}iTTi#pu5FaRZCkIW=g*y=Tl9$U zXtvq=6TgfPUj&X>k11>;-diSHZJ| zu#cdj3)wG?^DWBB(%RAL%R;K-blMiC4Xxrev{7p#?UcWBG(JkHLrcur+y~z;Ty`R&Q*-iUbf37HS@0(0>Ef0IXjD26?U>Pf#NdZ~)`U#zD4y3wo6)^**vEQ7 zDsi=Da1H!U^%EaaUU7t1qYtg)j;B`o)9_?ysfBaSDqnU#;4Gx|8137=;Okrc@ImG# zvDV@@@hb+?EWC#At9*Usm+Xdv^y?mfT=jTg-LT_p#jB>MEu{#TpkL-fTd+{|%R_h- zw6+kbQzeG?;CG3&-^5QT4;c_Tk{axIVg5ajwlU`BGTvV2p6|H_t-(&qAS+PC(?pYt> zLFcSlk3ZwtEAhs^f;;huLp)WdzwkU4yOC&nm8JoKy7!Y~>pn=0t2>Ro%64N<)7!RJ z`7j3CFwGsYr`hJ(tL*C8US*48uQHY~WZDyT)_F7dh_+Wb&e^~o_FPOI;339VO8YLV zLn<~?+IJ}c*O#zI;^$Z5*%#5C%IloRNy8pe^Eb1Xg5K`hz?4(I!n9vNQCT|+5um(2_nPbwV1WzhF(D}FDSiU>H zACZnYZn|inW0ly2sQuWi?i20Q-hFk4&g#kDia8TLh`7^YDEBsEuUdL+UK$MfP5>j?ntvz4|GoPWBb&ru7mxS z_M$%cYdVX4-vIl*KG|>Jv%&?eoVt=RBH_KqngjyTF08@q}BKvB)(*k?yu+E zL&s!ztJbsx`=B*s(%T9b{>$OBdRHDX$S!D(J-wvxm5xp7PuY)X{Wtc+kfF`cC;DjH z7V^+sbd^(<EBXqLb;;X$e=lQq_xe0oE>5#!C8B=Q^9oAAb8bd(}r)GH>sZOstE3^ijKgI7R z%4-A{0qrUJlG>WVPglI!tk4$XTuYov;wYbp?_)AdKGXPaq0goAeamRwX_LZQ0CRwK z=!UGp_JpJKi`{*F-NiheHWtvX~-m{^d zhw%Bt*vL!O_e^L+{#S($%FI9~dL6o!&*1`eF46VNqHDg1u5W@C3aF3jR8F0W@%iMf z^H}QD7Srd)T1(NK>-iJk{rJOk85=QHN57cZ>X#{p^XG;Apvxa_vu=2=g}poa>$>z` zJ=N)pZ-ws_K2l+?wM;r`9W7yuXxzNlk9WSk#XL#&uAh=;BR`FwO{~q-BCAAWo2YAJ zIy&|pbAH@%S@Xx~@JcJKWCZv-iJd}W?B(shal^6%TPsGSkiUzgKw1=zz-38lB#yR# zV;4tz$Rip@!^khz+CEt0hUcsba1@K~T>-vY=wJ4jHOF{EW8p#CBK)dtwc-=tS$&Z~ zzNU=jzQvPy*Z5nfeq-%TYbiaYSpyzTn$PH0bgr&6@WAEx{8E3JzM@ZM|3kgsmfeEN ze1$v}U;8`?uOfaGd|&)iys~kK?FUl4(&dwn!!L%=ufOLUxXmMmgsYGr57EY0%2-Dp zambh|&SQzkh>y*T;$!UBw6!?2C7vl*xKJHbFX`X%6Z1JfVj2cp;~#o1SXV{<&0obE zJ$Xj(M#2qm97q||rJej!f5bRyXRirdZ^kE9730O=0DQxbJDvqsXTlBs=!}od|692E z5o^GWtOu_X-i7aQu&&6#_bBWxUAZxXK9LU8>cNd*FTVHC=kMWLZO;h$-^s^s=KHLL zC@;qAYSu=Tm&#CHyXn_D`r{uh%kA4$@;%00!T+wl)i{XgTjmC-jEP;)RdnCx(YMQ? zW67`|om4$ivwPy0u>EtiFqm(gc5{?cS0;xp%2YU%UFwQDbLT)T$f>dS}l z&FD7%hAV0ri;aKc9*T>#_P+M==5x7Am~$5g;Xkq?xAZVL7@b_rP)OrIo0-iW69lzURbgRJwB~*Bzt` zwE?|@z1_IBj0sgwU5qSkCO-JyzG_ZpZYFwSBk`2CY{&I3z0=fzZwi;s>z?}OAbqR4 zeR|#1$X7?MGWQUEk8&PU=N6u$ucd=F53^35{353A3+#K2-_aXc?9oMEF=vU4-eI4a zOf`P~q<`+nk}i#ZcAizGI`@e!uhv6OVuyoFD@W%D+>j7@NOmYIyX{bRAqQ2r-!V=r z@n^92!iQ(oa>voS`9E9|z`iyXJK71zO>EH?uRfoBbjD!teAb+)calRp@_qMTNc|Uy~wwkbGZX;+jnMFJ+f^{D)zns z%1tIc`uxexPW0m`*2+%$Jm9DgC!c^XrMeusvw?B3fpvS$ zbySZv=)N_>tnuZ9?}4UP5Z5B^qu@AT*=efy)+`@2=i9z7$aC?FeI9?xhkjHp{9(_~ z3=I!qeW-#s73`JMf1%`F*lR;uN~gA{ua#EyPU>Ct)MnPJZdUqa^2UFR+EtE>Q69({ z{k{TpgaCOdAEj?*E-1Sa>8R=NpJVhL>zvwa7t`+wgFdsOQ&5{v*=|k@{-hr@-_xD{H^@hM zZ@S$wcRz~Gze7DfWsMi!Z+ORfv?mQeIhsH1V79F#wT)|m}9|hXhqT8wt?BSU_ zHMbSrjG?ZQt>q5gEH`wMRNc$aO?UZjpZ~Vc6HjTZzDt>D)FWASr9TV65qBh*db>2q z8nY?uPtt9e_eGb{M%e+YyNq?z!7Zm5|7LN?o0z*Nejn{up4&-VgYKaCwd_CZ8|&3agUCJ2d9%QMEcQ8b z2p7#F|Cv9=wao@^7Hh1vHeIh&I_XaGE2}wz_(L>rm~3R}nfZeF_FCbdb_4?Um=zrw z8OXZ#9`@W6M`3zuT=E_+N8U_kPsoa;ZL~w_S`hFL8hbTSg6?Sa$C$u3$3zz#POOmqJB6sP#JbVUm<}Z-F3Rr1<%z?D8`|=2D zNhP>jK4(R);&a#KhAQ6O_)DGmDe$Wr(&#M5CVb2`#%z4b^>wMc`&^&)(Q8F64v}T1 z-3MuF5_m*5?btz`$MbaC9Hh-P+JB-=tWlb_il*%H9;du+zh!R!q+)Nb{z4}x+(TZQ z`DvXdmSuIiS|?O^ zonYUJ5wceZIP+4TvjW_Es`Hl3XXT%;@fztv9o=(%?VVQ}-x2OSP&ynmYy7VS*qbQ! z`?eMPt!7M|s|2lGU0qQN~@phsE}?cD`l-15D&Cwf!;?fH;RgXQ2y^k>Tog~9uv zMbVtWH8Ml>*g;=Z^3z>)Y4DN&Jg%HMS!^%#3uv%_d_{v*tdk)h+tL_&<;=%TxagGe zo5_B270<+U`<}=EX%%-ebhDH^9wWchZ(DU0^p9k3P2>+Xq zNxRX-CF7+dwlkgs;!DIW9&8n|ryWW`?)Hs|f5XHDAAKv9=Wbf;TYM0nsqcOH{;<#a zP8#V5I)CHMQ)!#{%Z*7P=`^3Q@1k+Q-e#zlebr9uJ{0IqQktwP-1}@@d)J&P`*z@jbtP@fK)M z>wbIqT}&R$L)pg#ACt%{9vdCxw`tR6))bmY@cvuc$9p5b-Pmv45cqc7jj8_w`)1Y{ z0_RzS_nbfElGw??L8^cD$?NxH*LQRe{QV*FkH_bg*23QNr!8*5*Yg(Wb%s-Re2RT; z=e_*k;!@~gjvJ2uE9?($c$oFle)uiBD9O5H+nft?9|pHsC#UT%0oOUc`0Ssu$Fmcg zfMcwOt3RIo#6HigcIljtOFuD3Itx_?@xqr$^9+2f3SL;wcoMHSv;ZAwj#55E`rB~B z5A|KLs@WfSL-Un0`Qc_v=f$$di~Y{xQrg(kyK=Df;Mm@reK^|=+uNsduxKdu0m714 zy9K9v&(ynsSzTu>uN!nGUEeF4EkhU-9NFO2X%TC(EPTEIE z`?8bvEhp_Gq?zjo68nzWsyt%aU`O|u z>LI&GKzT%$^@Xy0TSdWLV|7`5W zrM-3mQ2r!B49wPkWoSjBh=Q z7O^KlKI|Lh$u|>z=ED!uKRaprLEt_8Drfxf0 zKLa;;;3i=I1>NO;0G#20TY>XE@JGO<9{6M6H6HkL;4L2bIPh){{3URc2lh{;{LJq1 z`v7Nn;5guX58NNP)B_I&UgLp>0B`ZYLxFdD;1R%09ykNopVeLd`M?<-I2$ZX&hWr54gz# zuL1U7*j@gEz!@I+A>e!u{2ky@5BwmJ6!2~j{C(gi54;7~e^Gb&&jDw6 z;4pB$2mT>&sR#Zs@EQ;NQ{XKg_$A=o9{3l)O&)kRuzz%S`M(6t@W8JF=X>B^0hfB< z-vF=izz2Z0c;G(*@Akld25$1eO~C$(yUYIoIKuWs}M*uf@;0$2@ z*zWSr2hQ-o*}(Z8crJ-?_!i*Z9{5(^CJ$T+?7yVD{M&&uJn$Ugd=I<;xYPqL z0bb*Q?*!iBf$s+1?SbzDZt}oufc@jU%YP6!!vjAAobQ3Z16=BX9|d0Hfu8{0;(?z6 z-tB?E58UK|w*dPmbeI1eaE1pC1Lu3-9|D(p;2#68@xVU?-r|8@0^aR`e*xU&fp-J@ zbGpm_C2)oZejPa91OE!R)C2zpc#Q`>0KCNm{}Fh%2mUi~lLu}B_D}3C{{!F*58Miz z?}0x8F7?141F!MGp9634z{i1id*CmDn>?`pYRb>;F24_Oh7C(c>*;Ul$_2B_R+P<7 z>**t5J|>FZzca;Eyy*Dz=Pp=yTUnYni->pDXe0jSm&`4@t?bUUh(HhE_j>?dm#&;Y zuVTU6w8+qIC_4VKB^7tvQMTl?@qH}jd;cmHES{g zU%n`!0#@W-bo}`XmoHdWboeWZ zMG?`ufav%Y3o4cs-7#-rY012zxn(QUx*|F+J;tA3Qn8?D`O+STK~(+c&s};~$!Y4M zBBJwG?1c-WYtaoy$0z@i1-F$hT+tmR;w?IU#K-@QKvc7tR!9D^juG+4UP#1W6dSX! z|Bm=$9v<<>Ts`8CwTOtnC^F@$w#dJ}ksw#WMgHN-C*p5F1jK@G6wDe@#NXfu$g&cm z;KV36DGKHcZp2@51Y}vPeMJ179RXPuYabDRtbs)Qu@(~XcTNOkStFug_WUFM(jp+s zN{@mwqTq9*;Pay3kq&G|dRo*+FBnB=^XJ}Hae8UQ_=+O5+2v;xM^?0anO(l{uG6qi zQ>&OL!Zz&r4E2we?`10%EnKqf^a-Ntv*fgr(MD4AcwaLAEASJA&%2gXEIWg`Vfmv5 zmgL3hYkw+#Lmri#p}D8^C|6(u3dEUsA2b& z@-jO8|L5Q+0*daPXnaLQ|4Mp`#z!>yqff$j+TrAP+)3~1uPI~g02ePoN6~iil3QV8 z#MO`8a^3kryOYq>=iTyLyuIKk+OC?i+WGL=mv;LWIp1A%jQAH~4CI6#o+roa^L|LAUwtwlfNy|Hr{2ZoU8eX>kAF_^|2vzwz+jc+i0O ze>)xgpJtengPr{Sqvd9jK+AW_`OR~FSv=jp%mvHm%_AUlL4_Wt{~K*qCQFtqDOqWa zURJhZnKk;3h5WZ*;j*&P3z;U4uDGpifgL(}(L7$3l+7!#-;~rEUAlCsHTt%d3zjZh zVvQ~tt3PI~v5)|JwJ zvAIrn;5uHV)41ikdXD?F2H0_>cd72K{^jaFoCOoT;&#sdbM+GU8FU)#mg~lIpT8iP zbQTwO^_Z@wldsBE95){)*xJa;=brSgp5;C-cOts!-1@ueujWNMpj*DH$GJ~ek8|Tk z_umpeC~r5ttEak8S5J*D-^Jg!sPwL$>ONgPRq3@h>(<{*-$ z+v)Ewv7ZHRxHYp=M3=Ar^`>|A<~J|2V@9{v&ehIK|A(T|yL$G!e`orMQR%znnUlUI zR;6_RMc1c{PpXqU4_W_pyWZ<_?C18V1kw0YT1{o#^zOVOcy-V1k52y3?rO+~{5O z?*=qSr5|{oonYX#_Vb7v(RoIfpHH~L-1b|moC2@2p9N7H7SZVsc+)y%n1_@8BX1b* z4m!I+$4unk*fC?LP01NECU*>fw|ae|Eev zW5}Y%Kll3>?a7whA)1RQ*W~yAK)Dm$+ALkUbXnPaH9Df)@%ny-axcjlH)Z0~yu7Jy zKF-r^&*|E0@>Se<6{QhnUXqh%+M0WM9FNTzmp3gZf7~>ZkDWGl?3BN+%(3H~cq;Q^ zev4)=DH}KX?$JvZTC1jBGtK8&5Rb7k4IQ9cS>vhSJBOhPztwxZm1f29EeRJ?8k73w z{(?l&PgJQNuMvMMZnG@?^i507`fkD~L*)b>@R^@kmTitr+lR+xZlNF1x^;5W$$D%RkK~K()bJmuA?sq+Dh8fpaU;9ti_<6&6eX!=4Kl}FlXZ9oO zUzOSt@#o4@1!@n~t0PMU{!i-O1um*G{R4l`%m4!-jyFU^J%gw-2y*d$one4c%7na> z6$S_>Bmyy@W9E8LOw!##?E>Ddc2i5dZ0)9*VB3~%mu1vi-Ll5kM5W6CL8rEj|MxlP z%rKyC?f!oM|L6BV`trQ@%kw_(^E~hKzUR!x)M@3Rbo{2T=b`XFx`gRvQ1?UWN4tdI z?h_t?D^8c$#*vE|+B|WH1c)#0VrKG>!C5-phJP*~M;5K9_`BS@uGrENF{YlS5 z_*miY~i_Kze4`5H`kB_l3cD_3&$xf>O zF#0q3?;JaebjtrV!t*f6MAHAO(>CG_tNgsFZ3(7XOdgdfL8bPRw!aZBJKwD&yPqiLt| ze*`|JAnb1Mdk}n?SGq6oQT7za@|5-)&|cDMjujwG?cuw}o!Z-k^wLi_b_MlQ`oo~l z2DINtX+Iz3j2k)W{GZy#I~(lvCpoqW{h;tZbR^1w9~xHrSBCa4LV0k{-*C|1`@oO8 z|Mc+&@A=L{xnq&waTo{9&qd&C#S!oQCj76!IG==3**%`O z(B3rA8@eNvPeT6;z%Qu_Qu`?1{3PgYtQ`Iv8F!<omUA81F?y&cJ{vU5|W6 zfFC#h*CCvRFi1#BN7KxJ@$d-KcqXG<0_K~}AE+;0a*AWq75bip z`d{~I?|#g8;8Dm=r9L0z^R90=>PyA^y7>dC#Vfq#yDt`kH-5Jx?7bfFo*H{`6=M^X z`p2PsIm)9OQvcuWl7Bts=jlIi4BtSK()S_kUH^CJU)lGJ?NZ9O0-uMx#?ue^bAK<> z2jOQB>N%IiSQ_#Z{vPf!zE42EPkQy|S=1Me^+)`I%BO;UEWU|2Ki}Ujv^%qF$*y;+KQiLlpeu+b8TQ`1N|!L+Pyu|8`o(dS2XRJ=_mO zz2!B&nHXQqYaAP|wEqC$VDP86mHz&S@CD#IT?y|1y*%IoKT^UDqCYQwrbYg5lEJ@~ z`p%-C|G@ejujDVqIK26BEyCXOy9WKAkMdPYete6s6TL4%|0kgT1|@wU=q>hb$Rnja z9qRL@r_reIJmv$)k?_+O_y@w%`ga2T16rl_Dlsp+7Rd6O=%obh)k7ZSDd~5kz7*7lYNY&GNO%6VWBuZN zW>(pP!Qr9)?MFs${#J|jHlV#>O8IvY_QvNQ(7r$B*NxBqps#J{4~8kxFWHF-5ROp7 zsYr0#M4L)|_#Fy%9`n6T2}h&+4&+C)RQ??LdkOu0O-T<2q_aFTTqvBzSJ1GeT{(HWMnUy05=(qOjTC6yWsAH zyC3cWxQF4g;d0>~fs+n20ZZT>gA?E?;C>B9^G$QR1#TzY=qH?IpS)kRw4kQ;OfUV0 zzkiT;?!-S|yYSn*ZI|a&tN!`dw1+hh>mRzYc=9Ecee`VGA=REOudr8*#Xk(KTyfzO z?qQ0*{NRkHmtSuVJvw{FoQL}t|1EsRU-%zy{^f!RX^xOz^;z})oBL+^7vGz9_odts zx{OIz?H^5zoOpTc%du$x#=M-%n#bFMZ``=AwrTKtF3pXX zx{2>Td^UBW$nAXC`SE~3Sz*&ZE*bFnq_f-JoS4^q_vz8D(u_|(c|UDlyE=Q)%BM!> zys$j1a??voUC%Eav_oGz@OnniogX}*J^D%9pAP;Z?p%1E5jm!3_YOSz_lE-;%yR~e z>O13F?=gQb?pY1squzA+Pq&{^YoEjf|bnsmd9!Q<}{_jo&jvIMte(tc| z2b)UI{t&t#^UQm;pGJN9YTn)nyOyu(^{wmZx2e^Rw8Q_*IAz%o_WqZDDi9h%s{CJ? z@H^ll3OjQ5M=bMmU9enTj0OK>;Mh)v9re6B$--gb;|FgIX zpMs@DpMNs-k@Vomx%XdPTz=vu&_Qya zeW$zA$zJ)MRQzO{q-Q(aQ*gZ~fM+=z@u=r0fQNXG_d~p-g8?$O**g(_;yd2YZ@dEY z;NPKy!7HVQ_>!;U2RmnL6u%qnc6uL^D^+ZMlkz91kxMlF?!Oe$z6s{Pq2(AonDI7hIz>R}@ z3@#IHHry1r0=R{68{kI3xt~FbXC2@QxCh}@!!3Zj8;+iQxLml6aL>clz|k`aZWY`t zxN!bPQdwv6%axU90$qKx7qn+>lx`k&u(SFsrPfGy})A1-Vjz+ch8O zt|vRIprEL%G$R*1%gM!^YOgljY0~ZfTT}CL(feFmc2;rjt<7f_Eh)|~$j!hjOmqz1 zkpTzNIT;H|F!Efl*gRWtR(5Vi0lg3A8DqXJqcC?FUUc)S{}H_Sv#=!BD`rXVk|jk; zb3LVSR1Iu()Pitf_LAaTBeKVP$Cc%HA#rI|L4Hm~S?R)#97{X(5RIZvMxb~(0o+Ju z@@!?}%|8sgn5ydc24Y6 zCCDgafvv=YF$%kvp&N@*TpnJA%*J()uGAzYDr*nxt|Tj?sJIk2LTFjbli-~LuNam| z7e+E9hSMoePHr)XReH;!Q#`$P*Rkw7>XhPbx%4(C&dE9z$;-_uzD-Rs9&T-6NtPWi zg_dG;g$m>AR9gp6=xDrX;X)eMt#zS0xfpyQUZ}iHYRC9*E!mNV#ZgqU9E+n6r+-Uu zeWz1*Y-I~#Cg;j>z zi5T3G?nUk42g|Z6}3I*zzCqDo)uc7Vmo!a5qTNZaLr`Ck?W!5U*n9peqt?y^}llnktb6 zB}SLOh#0#(?Hvg?EA-|v9SNPVN$*&DgrZ!9SCo>qw5XtHS#Aj{Eh;O<`^j$n*s@FV zi%T;~$~wyxS*$D|N#L%qE5es#l`QsjnUEuoj&4!8kti3XG>EZmT07a&>&5PYdZ!c@ zNteJn)hLPQY%l~*tXCUdaC2+Q!jhsT9`bWH>6J}Ej6x_yB{}i3Sd@G1rubYCJD98& zuXht~e&NC*Psi}`Z9YURXeS3RH$zUk8|+BPExbcQr^)QdPuM}@3&0AC76NlQveF}W z53--;$Xn8vWkUtavgJZKvSom0bgUS!h3x4Tt?iQh!bNUAv|zc?)~z|{Z7zwvGN2N5 zkf~IwRD~oUPyyZNyS2DGRUXQ%>79q~=@$+Ej&&-DcdAlWD(_re(#G!4u9Qag-`Z0J zLlR*rgH@=9a8GZwm*jY9nG^{UUX&Y$y1X(#6lCCCM~T#m=~g8A zkXyKv*-CT3+gR2DyqC#x3bJyLw6GX>FI~uNg^Fk4!h$kff`lS2u`8tKF7kwmOQ87> zwRVo75qM(U`K4Q)(3G+Yq$DV+9d#i~z4 zJL9z6(ivHWkob#oXBX#Y=P%69PNG|#Ug@)P7oo&#=>>jw^sKCW+ihx_C3oGO>%LjD zGthviG76_i7gtNV#HQhGfA3PWDgAbvm3TzsqN;lXC8JegAh~(8cPTAdE@2A9plEpv zmq@bd`bg)?B-oJAeT{yAzgA$i0$)<#K?NRD;7J9ZRiIOWUn|h1Kn>12sa~A|!xU&x z;AjP!6*xnI^Awn)z+wf``8CxiD3H!mDZEF4biPdCvkLrLfuaIi73fmnj|ybCb3)}b z3e+kvP=PuH_EBJn0>c!@D=Buwco;7X+vNB? zFnk>_4HK@sP3Z@P(swAH38n86R0$_f5eyJc{*}^S5rhUgeYyisTOsT^L-Bira}qqL zz72|>gY>{cO&1P@FWu3VAhMZni!s3>ir_|d`~o|Ec*CL9puP7*{v0*A`s z5Wp(rkBy=DUP5Lj;C?2I&H{Xe2_ixCCvyS8gTe;{(a(@|1J?Q+7Pc*+d@7;7lwh#%BMzqzn0g7X;hqEG$+e!K zukZ}c{i#2(xL-l_y+IK1S8(1*{oR3cLJEg$BFKctHUlD`1kv9g5f&=83Z>7We(P2t zZwt~V<3wf~&X)kIx6ARncFXbSUXa7pd*ynb0X&9$=6yKt2mAo=9l&|7aO@=D8-P^* z2Y?hGyI+p41pENuyjqSm0Dg7=_gw&w9pu@&d8xQE~byaad&@Nt|AUjdws z^R=%j9#8}naG#|WP=|A87vL_y9|2u}G+xag<@i20B&B#A&JQU*43P5oq4QpXZ^9CY zd3pm-11JJ&0n?7j`3X||6+nuweoKxgNaMT$NcBY2%k@k?A*WyZ3n$qalRlDRV4#Wx zBa4op2JI4raV&)3DA?`@!YIlUydU)u?2q+C@B!E|3BrIEOAv<1(F9>^9ZyhzU65b} z=7k`Pbx8!xz#G8~#%2)IVq64a5S~NO5A#Ma5cWfY{;=^8gmE>8U;yTe;A+_W2!_E% zNN_XO6~XSf-$yVA^F(kB=7nGq?D7O*Jgp?S9`b_VeDEiN+4$%(!F1r0;8vV{5(IvC z5gZG8AP9rx9)c;1y-IL9)-pjDTwfyygW($lgF#0GVeG6Y2xII!1Yw*!Nl*tmCD;pm zkzh~oLxMTrdjyw*9trjZe;2fW&K=Wr+8 z7IG@J#>dxB>mSf9uzOGsUC&;<`}7U&7t%j;K-j=RgL!@Ukcgqfh8v8LBch^5#>B?O zj~YE@?6~n0CQh0>Wva=XwICaoyG42Vk1Q@&Qdm^{Xo;<~Z0R!l^2Z)ZpZ{=1=AF}5 zJihW*tAz5^Ybw@0v99vT^&5Wuo2Q3opL(+uyys zXYamOUfo}N;NYRdzkluZKm75HBS+sn_SW&b`V(*e>775n``*b@e|i6d4?p^AgX8p> zvmbx*w{z$J{^@5IKL5gb@ymZ)`s#Aym8;j9zW(Oh@5JVo>#c1!Zo0nz;h#VL>!)`1 z|MvJ3EQv|U)@jpI?z(%%Ju_3&?wvJz&fNR%pZCCn|2M`DeZox_ok*uspk4hdgvs6h z!LLdE%=1$_eogFWo;&Ah$4{!Zue(z^;@!`x+cn`%1iWoJsNhH7zy)XTTAs>!#R*>i z&R$|uw?Smrs1VPi6z_G%hcf8Ec<=OdZvX#KXd+ITCPUy)#p#|IZ;$=h-qQXQh3I4=eBIg2c8FEh)ZQk=U-6poU*_%L0!n_F8Q!_@&!ykFuAR>>q*JaO zUa}wYIoP$swx7cC$Lr91j zJwA3!+^A?60~h7y#FQ2l6~Ho?Ul>zRuyjc@Oe~LJWb}fEd>9&IE6I*=*J>M?SNuPy zMz(?^;yk0Mi*-MoAM(=4l?5n=8k+Gn)0r^1n0=0 zV$T>A!;n1FsfD>#dXt&ml`@CVx5?GH*9oXMpDedsVvDnxytLRu6Q@p`KObN+0kV+Y zXH846&0{lCEOTexpE5HkBW+g7%sCkeX(_YWeaW+Cr=-qg_axv)Te{|uv!~3Q#_ml| zW>6kdW+o%kJt=cCrY9$)O;P-_k`w1jwYVdalGEl)cL(lCn3ITrbjC9=s{ojum}Se( z&!4!UAggfkMCr_DqIBRhaZyQDao$AfRAHiIhMXuJs!o*bfD@%thKaP0C&~vz6PaxS zE1AG@CNP6>#Y&Gp$ZYY5go}$EIcfqQ9~&PR9Tyvo2wNNy;B4br$#|AC9ytNvY;k~a zwsEXv9LpJp1OPZ&93Y%+EGrqya>gP70L~T%2xl9^O2)99F-QP_v&D^J^y$ouc^M0F zK0($=ngssw$9cWv%aP)8Vl#4((>9uwjAl8bQ5*oy76%At8^uaSv7Avz0D!Z_0m9kh zSxG#IO1dB+(QQ1^5lsLv4(V_?aJD#j;X`EraJG0rI9nX0P!jyH3^y!tVvz{|&K3^{ zXNyA$Tn^mGtiqzg<@ofUZ6uqSIw$#_v^ftLS;9>GaR)OJ=FFaxFl!FQWJo?-#*jmx zV<|Ra?wr(&wA9(NlV{JCB9c<2G8qXo6Q`%*m;4gp<|d{gZSD;C=2(r)GJ6)1A4r@v zLkY~CnUa{Alq@BWH?kruD>_8QPmiaYB1b$v^5KXTD<7JVpdnI564p1FZ24h)tbL*K z?wpaIG6g~R>wWyNVSM;KWwug&K`x)g(?J|gjCsje8ct?Wxu%J{Tbxb1y;<0xSX8GB zEk+Ad(ZbMz0(*dW4V@;=kZaEL+v~a+r|9QVjhPE`NZ_I3J}|=_MtX@sBI#OMFVz9@t%*)$fEN) zNj7-3?@=#?7I&(}Yd$&zhb)<(n{;RuO7Af@5)iaZAocfr-I&H?>q?^b7v)H zq|C%BoG~LKF?Hsgc&diGzGo}Mr>X-4v_3~TD#nYgBYhauet(}dxanRAmf zWPy3dqFt~-J^pt^WppWLSO&U%Qd9y~5k*Sq%(5j@hUP@^xP4WYD=lhcr)ImLnhwvr zYZ$hmY@t*Y;EdeDDMJg&ZdbnJ?9RKe7n8e9n{>|Bk^6SF(jxZm&h7GaIT`%-6+-uu z5B((h>Nybn`9dBN@5ma(52J%kV#%EcH&>RpT@DH*-nZb9J7({E5cspXWz3LNxLuu| z)QA*r>3Z&DW>&LS=$P>_524FW{&FPy5sIb}At;5gQ@AbYM+LnZXiM z@4?22rOld)P1NkWQ_|R+>B)4_TY8KF+2?Rngh%$vZAA)Xrjc!TBP73^NA}Bkq68`> zk9DNvm-EDW`Qs(OwEbf8wJrJ5mVEV2+K@46L&oGwc=8QAX+y@Otr(N%t+dl@q+KLshyUDk}S+h6bHcY67mSAIl$^6&EU^IrZM_+Ny30d5!EZn!OQIL?yZ z<->86^ghN;KzeXoCf(P^@zu{ik+@bLCB2G(yyI}Me}elRgd~;>3YsQYaW8Jm&vw62 zBO4{NIq%0EM|4B(b0E*{za7L89Ntxw^LnQv3XfLH zaXOsB$&VI@*2%MvzqdScrdy)_7b4sp_}_#b-BPR72#_Q29|it*I#gK1OE*+mIu8pb z-bvt#7V^qXLYAI=o5-?4x=^;L5O)f5c)A2B-HPTH7U3fMG8>=HyuE42vA* zSXj4KtmGdp%PK6z?I&0}25Jv z#%*f8!@ONoSg@R@Z7@a#3j^g*N@WB85QV@J?x4|?X}PFmMD$qw+(^wXfTTrM>8^HZ zUM_wWh?V3nggufFSXzYf7DngV$u3<8BV^eEH$^BA1<|=2KFh|##xkR0d=#l%K#r!L znBnES@R;IKgU!g??HhUb(1%cV>Mry2MHw_vMh<>N00XAGSAolqe|Qj@IQ%pp58NyDPG@>p(3k=s^FV_1e! zW1r-)M|8{t&<0HxzZ%8kHaDg$FRPF*U52#;`p(9LLeX(I?jc4u&J8lX8BB>rCP`ny zs7$Z!EF;_`={7pZq$v@cM4(-Wo>rRs0x;g|F_0lDPqK8=xQnu}?PMTY8?SsWlLniX zI4e26V|nF~dDCr{Rw(}GiGnx4gp<7(%N*}U8JX8QChN9zd0Rm*dYEnp55ihYF9PgD zI$pD;#1j=4^9gl}$?)Vne_b9B#!)chjp&V7kI)$wD-ushKr=G8Nym?oJM-ohjcc|#=3_7(C&35B_*fFwzJ8juc42zlqx zOIL^_-kY8#MHPEw3V4TnqKB@uh`@-DOm+{Vxl-O*!X%cG zTx99cGQ1@@db(ga=trs!MldJ_1-a1_!fPwJhyYnUT87UE<0uYv8lz||h?Rx%MqVOW90|w@guB_>EMHc%z*a;z$dzSg=*mOsI6_&pV5?p_ z1v=#+-%fNWy@}xFbx^cACDQ71v;JGq2zgA>+&vVJD5T{kQ(6c80TB{UHMj>QPm#hB zzz1^j7fRX%NDqzLB$??=>ktPW{6OwADLy1T+;Y$mNvDenHYE-M3WeN<5fIt~^(qhN z`knhk{Qi!8f_8iRnUQv?l6sdgJ254tqfVB-_~2YwsoESp6>8^P_~ESo_!z(DvYtD!EK1U0T4PB}ptRKR5s> z$->(rch=s$Rjm%P@2oycT0YUUn;&(MI*~c>Cab`pPU*!`;`+p=yOQgj=F2@>ow?pE z3wA*k?QbBy6)6j?S7MVHw#E6ym|9tj!8zHigbbXTrQV-0doGT)rX|ct&Pbk}n2-k5 zMiD~pRS9g7D2^K|Ym9D%tz+Y4lNBAg>KQ3>l4m8%07C%P=HQVO*$$>4Z4jwS<$i|& zb%SS;F6yp1Z+UTEF1AjE?#-E7KpG+?%bxP`7Zx{k!%kvGv7GXFE7Jz)BVCR1w50SQ zieHwOpPeUDz`xh_JCq-M(E!(PXdNuVot9!R-Dm8WF{8#96+5jLNw~$9XCvvosY$>4 zHb^GQ%0lghIjElQdD(Jv>0>KEi0{R{Gp`2~5T4##5~T}-=l zSwW&Z4V`-}KEl2{flxb2>zKC6?EP{S3ZMpoB}zo0e5X+Ln@&= zH&99xKB+7u_acLGgS`ayr8o+LZX#JRyJU5wYfc(lmTavXXKijRwD7bQ00~scL7eum< zE-N9*A`vl>A0HDpHf9v43{xWwnNdIFZ#^hIks_qmTa^ema$)N%z{ZT!aYYgJE)2n2 z#A5>8Np6)@YDJQ%9swl{8{)~c>6CP$Bx?d>j5C88 zo@M9ahw7z?>NLF0iVSWfxf>=fK)+54)2&IG0J{GvYZKB{P}yE4D@iaZVbh)iTESNo z6vfHiA_+dpw&*dR%G+ve+hpAf849Ce#(>$K_VS8CMruA?yqn?Ko1tf9EM~G5*9-Z; zhiB7=3h1ITEOvARjD3ZB9>GWPRg|uMBjrOf3cD3Ch3hZH9V6PRusdWRcB4>phnh%Q zHtn8d4QuY~~8VngzKaQsABLuMtVh9{Ma3(m?sk@HH!(gUD#{kgL3K$lT=B5z@9i zHe|Zedr!hVqcm#IHy>nH#3XCFj5iOJxL3byAH*8=HpYM~bIKGmYUe>p!%@^_>25DB zn%$Q$BPA(=ctqEJ-@3ZJPrq;kOUJOcecbVMj_dt&E#pq}j(3;s3fOb8aN*~UpPNFEu24G09*`Q1{{58j_^hI74X`p^d1YHQ?G?H z@VI#ZN9mae)46px+{17~;fBCPz^#CzCll^Yc~(+}J3K9ix$=Vt59lfRFR~wRcH+!g z_EVMY6o)(q9Wo6%;S~zxc)eM5*k1QhwS0^ez4bDbI;t9Pix+?>fEXtn^I3e{Bx`-$uIDK8sT=TpV@*z}9tn{@%*oIS>?4CCumV`fjcC$YSQ^t|-$ zNqY?6oZi)}=T03q8#U)n9G+v0JwI1J$+@e!5OFFaYpD>Pb?GyI*Sx^bFNU=|F6_=T zSZ7(8nDRZRX9oQ>U(RPE6N?4)UT$NGHQYMeicj~75kKIAqGGNa8>-2SESV}D;H@+=KC`5E|gUtD3LPNNA7 z)N1<}?AlmEaj?^1_XX?)=m!`C=nohG=mY5MEZ|knB|LL3=GVH=i2cOj-A2=m%mY1I zbSlQ5zS2{i{5*aa0`3qy`2UlCzhfQ$j^j5gjw|8g|3Uxj|AV}D!pobeqqDlId=i<`uTzI~OAd9hjV^UYy%G;7Hf=8T}VFgK`&bDHFv9u(l5DETG@`8Yv+K_f?u zuS*s6K`%B1X^O;QKKjvff2e5ljrKdo4fFlMr>(Cztw%^*V6&PPiQ&HYNol@Ft2GCU zi9NUm7NK!oKKzH~5t@)XE@Dn*ZSzRY@Td?_o<{65%imL^xRONjlWG->PYj_9?1Gqyt}zV#q^rU+;cuZs9Ji7eS2g!xNEeeYm@AeHyc80)Uh94WuXsPE!H8{_M1<) z`|e-{v$#&!(fFKesZN`8uBxeItYmUy<&!$v)yGN9gt*Npd;V@L>DR zN853p*1F~Z<)?KpF=RrzE3BW@Zft7cq_MAW%*>-ak9zWq4`udWjn^6OImh&0T+xY( zRq{Xjt&hbjE50LpH@GRU@+7BKq4J3|KdtN(HCzRpWxo+{>;70ar$$vK;9hx*2OoGU+pMDn?=!oZrFel zx)Iyz_6_b6X}a;)f$1rIhc8c3|I@GMlj+Q8YW;Jqe+@SjGiY4Z`f=^Ci-Vdu&eVFe z_I{Vbs_oxIZ?FTp?&YkQ3BGhqxxvBf{vVG&F%eBq;SQ6Gkc%7xipsHxxqQ62y@%;WMD013PS?{Fr1SSPwPha|pu|6Xpd)@&+x-W6DDi=w{HOd@ zwcdR|t;AbB`A_+oT1^LTekIr6y5C*TDPL3Dr~^%i{|W7m?8xuCs%`jzFTP^><83S> z_ft#DNY!)6?0Th8+s0;fL$5;)JbqbDwJNC#K9kexS_2Lg(pWi-vAtDu0AuB9n>bE| z(7&Xynp(fFJ@nOimo`>avm@Em`s)6BU7AS`hPfPHU+#$gN3A<{tJ3e!Yt@hhgrUv* zkx!R;?Q_+mp--j0_%P_SXY7=e*q5rE|->TIE zH#>X8bxJ?1q?=mz*Y><@X^BDJFw9W~psCfr_IR5)V^!k!l5pWffa#jX?E{U%vfwf5f6#qq{ghd*>ZioZX7Fk-b* z>h4g2-{Zzbbyc{*aGVl~Ph9 zTGgSed8w_H3w%+hu8HH{b(uB03NB|3vzl84epgPcQs2i{IX!PhA5b?lXzPln>0Z}Z z@wsk8Se+haf4KIH)SGS}JEh)CIK)k${-_;iQPPQ)2=dmquIS9VX|h~gcd*?0`t(8a z9^l}o*aP@hop-Ta>yx{kZm{;XhPb#DjE~KHxj9yKSESGR$<}W#<|54aZ!Yecap9^i z=<%crt{L^Ll?@sFeJ^wRAug)ErCP8=Y8(L(8P`6_oLAR9g7IfAYMy1H$|ndKby9!Q zZ;la{+}y+Qa&xv%Uual=6u1Ztw4{U9F^Br(1of9G0Nn}1`u`}DJrP_@mmYX1?*=U9`#xG_OZW)?esm*u-NRNCsyv=BZ5 z8-g*HNg}7})!akn15>~d{EUG(`JeoeSV(Y9-IHi3kRRAcu!nOIKcF$dP~Y-b zLA7<<2-fmv;WJlhY_-LFyqdL#{$<~|B%H%MqJtn%;u_GWM#{@fA)7uPfYj=Sr zDwBiU>0GQ)RYz$QAJDAUa!$(6_z_1Aw+XAl>z+rujPGOp5N*e1o&uG9dYMJZ{hAw5 zkGAYom-(gUEEbA=CO=uT{iSA}Wfla1Y-}ywO^9OR(}HGuWwIcCAs`n|JIgnO6_IA~b%C&0 zX%WQJ!r$)M-K+{^5nTAnq%HW7>k;YgBbYd|{IzBwLlCXy7?UdTnx@ZsXsQe&H^q6dO)#lG-OcZ@8XL`)J=O`0a__$^r`|7$7{CvY z45^zOX=p%Cf@mC{(d^Y@EUMHS?Y8xncR_K2*i=qb#hM$-b@phC`S0Z;peKG-jy%P? zngZ1{qEwL!`sCu1d|3p|j%GXNMTpAutpP8Pc1?bYzIOBq&1PltY{E0YB66~WekY{c{i&9iDgpb2k@G;^2}HRVlP8NJAHn1AOo;YMQ^_~|NW)0MzWh=iB* z>Ftrd;Zw)H4}1r~rzt))zugFpu)G&%xyJaA#DHcWEn%+OSqA*OHx~&9`dzFLt6=QaoS)5g_-hv;50414Q+Djs?sZ@vC=B1iJQTzEH zoHTDVTk=eOO!UcUuHF-9uHF|q<_dF*xq25pTP@-J5YfxE0A@})_=6yt{WGtrSiK){ z0To*bZZW8qC{10>4A~8@wN(zzIqmM#s@84?(3eLeu5sKU^rfgl)u`Tzmh4yo0leMV56)_ra;8bt{PVLX|z+8isMVW86msb^J!~{?~+F zz@2*ynr=oTR<=mD)r6%a<#ymC1ciyK&yb;U3*^hk8ro@4az7$g!V)8u?$s zdLx_=Cs<1}i;n<-X&8P59-?98mgNoDgN;yuN@B6k+JzP$1;yD=vgER~w#4?Sz>-aa z{XPpKpT)Y~5`0=`j~(xC=yi_q>n#IL_qG2PROBBOEc5z!ABopjTg3Cb`6{dVtL<39 zwANBk@5`9OJ!psVmdiu|_n)QR#+-(8zdzp0^_cQ$X3|XRF=oDfD(7i+y``_xW>2Zj zo}M-XZ_}pVSBoOT_?}Yzda3?F4aJA3{@$oR#93*5f@kOp2Um0XSLoxnmwv^6eJL}! z-)FBj!DbyB=*&#!F4*`lFUjHc7VYVt_LW8-2Nw^?a=~n=vNm4Y&Y!+yL+QU>Vs#CB z+uO14PPpI-5rC_}lP*>dE2HzNa zL`x&(&xDpDmXmMgNSA|3?|@{8o$MbKe=ew*3t~f;S^K%Td!7dNUqTK3F}=?DG_Ccc zG)>bqKjr{J$l>Gi>H#Q53Q%E9MuzR@{jrE9F2zhg&z3&kL-3Xu<6&{JqSF89(;YKuJP66 z5B{E~Kh`MgT>INL+_M?B*Dt1bZxd#+VX&o+0B$7h>m%1mAGFRUR*idZFm|An<8zld zltqR$$ZfL7F|RZG)xfUUl2*#O`~z zwhhzwru5GAdu8l%_`#gE4KoJsQMZ|ATU`8Pi%VC49$8r1dNZ(^`GV__T%V=@mD!aR zMe);JZ){th^tP)$mqO=VeT27N2PMDR6;|*DM*D(mVQ#9}NA*Oe*`jQc z%{R=@aqBQA z;gjrdz^G-Gix1Q$f;Iw!orq(jYq-SG8+GK@QK`|@ps`kF9$gh6rN>u|uEyR7VM?VM zY2+7N?^Lz7K6GHPGrH;qxJY{}VR_`Om=B1r>JI(zSUJa@fO}2AZ;`I#*aggm+&}7@ z4{%Mho#x`BdbRQ~u?-dcxGJ231U)(c@ploWuwk?|yEkh=O&gMXX7ZUt8-_fhtx8K+ z-FfV{s;13#^$F~2(43j;3JH{BC>M=0sBLvZN4^o4-@aa-*sGQ4eI2S+RqVkwl_}V9 zfd5mYOEdDUowc5&Hb1;Ny!V(IgOyqi7WuE4VO*6;e5HcVREgV<#aB!Hg0_Fe6~^L0 zX-6gQaKtre*GH~xKKq*Sdyod*x#1h>JVUMP6&e=aoge6|<*OQL#Jey?!sGhJZTz~% z0AA;$`ph@F^R=#5eN=pRCkL(qh*+u*J8H@Q6jC`Qc9+kXYQttwvri2;(Wqy1Vjye_ znla$dY`Nd`y5xbNqNZnK_)UEpG(2{ja9SNhD)zPpl{l_~Fe+Gq)5O^>T_8BJ;0t=u z2I-`@GYHRirAN(nJsuXbN!zp*+NLJxPcDtq1YR@a5vHHAvHp5}!pRn2U*9(KI2Jx= zV;@n=MQl9T(qA>fI?$?dWcm4i?4v)~62ht4g!n60RK36vXE0HtD(7gW4Shz{yljnW zE7MM<;K3@b6t61FZ|PQ>4&a7XV0XY zT0cmix0}}aO-SmoQP6YO(h{_Z`YOVEP?bhhFj!6q8w?i=*B5RGTo|Yy{Ib|$#dx7HgjboiQoqix5%KrKHFI5$7Z|t1KCmE~>m zy*2Z~OjU#JQ=i{`C=I)#^cYPYvnLzoH4Lfd469p`*OI>RiDU4Vv_UM=eEkW*V)ea9 zap4Vu{zGV`^(|)Z1?#V^`Wo(9AeWVV0dc;LAu+=m=DG^PPR&2%*WF=^VWM3~k1>lF zY_(a%ny(4z{B)P5+j?hAO{GPLc7q(8{a#pAl8ye3YV>J$^Sz9)N?L1z|F%YbjlXZt zvr}Wfv<`TNUAGA@H%G$yRWF*#4XFJ)!R4e$Cl@p#qK-XHD#kVKV!F(J)pW8LQb*XJSsmU8 z-wK(c^o=Hr?IU3YF!!WD6dO}hZZWs45vUH%>L<}V^(JNq7x$E9X+w9?Dt!Y+ww#HI zHj9oYXg$zuQJzH((pauGerZiQw5M688fJaQqN!`iYsaqRzBMd7$%?h00`2fD{3N(` zd-&9yCtd@`ghM4>e>1_ZpAc?TwS2pVXmtqty7zzn zuR!Hp&aQg7IfBDa5{e}PZVkgsAk%_BC#7Y#c1oGVaCNJO~=fwx=3Tp z_KfiXeBG7)EnpwBD%A!R`Fn73+@1eECFQ$w`khijxVusJ(x0vWPUY8ezvUPYDrBrp z(Dl1WT;TUkQ%y`YX@BLG)nd&WEwnr)K3-vhuD81)X6tltylSNUdX3tSQ)s_9oH>nN zM`uotc8~Xy|84a9!0#*Hct1^dAH?Y7Sp4ebE&q^W@>|yZ15e3?N)7IsI^b%gyy3@& zV{f~3%JblBlbJqdOBH(Z>>8Clb|PtnRp2NwTdTn}_pbFJt^~U#Yx(V3Rnuw}7wgkx zQN>O?Z29KAN__OqU}=nGZ)+|Ou4a{07PEMFMR4^!H~PivxBm6UKNp;+3^faR@B4LY z0AEQQoEN-xYvoRhsScrpZkfR~rz&-jpB&F$yO)JkP4~0Z5ia{P;vUFj4z5XaWd>?P zL+W-n<14@7;A3uFSL!Ga$7eZB)iE`MYyVJRW6w{|r}&01PwFc^_8s-Bf5j&Vo2n_M zSc=hAP=1mMFO+kzh>TSAfDL--8g7UhXR;|RoH`WtiNZB8TdZN52B;$ITk_VZtk=M? z`5V?Z?_7z|KmiAq#n7PZ1G!Mjw{qtspR#`qj zN2S#E9j;f0Znm6uaKLwiR8yioX2%ZKevkgX*>cjs#FNM3tub5g3)?z?8)3e_V)gy@ z-qwd=G!5x7z76wZx;Y;Ga*KVv#m}+i%TQ}Cmny?Y{K+Rh`m2VgEnhme*yl&aTfYF$ zpL8^xp9<_S!^nn%&Fv=h^^t4J2M2?T?XDm$-4DMe^6tviOsKK^a9Yj_oj9(4rWQ9LPP#OF zC2V>LR-(eGTUJE+IJ#}&ngg|WWd?3ppNUfjeb^=zK4cRUpFD=y<-<14{}OtSxc1o7 z+uGZg!m(jftLGbh9W)O+Ty~#&KeKp!HS&xk$;`DeUR%et2@*}|tJ$!at|6NTa-RSOTL8%H}#)x>o+37kFIXcnImxL8wFrgKPj>>(EJk9qy0z~k?0LQM5T z#stR&*N^qEax=BT{^>SYb2uJ1YOt0|&Uvron5~N&0->F+v4(`I4B4M%STn6Y$eZb$ z7WU-;E^^=ni(PGfC=wJ92?~e|bo6mn+l!O64xKZ^8naF3p@GW{CCQ}0%m-IFAjE7N zZ@thMZS3VZ(yZ#y;|$XuX;$~ZOkl*jq!HIkBZlVY&%z&YrW=O>L*AUD)QfX)opi0f zV$?UpIno+rzVRMLEc1;bg>Njz2x7J#fsN14F|}qMtRx#P-5qy=- z6;=M2*8%a?Q0ID!s%4uX9AV+}T$(_l7*$(+coisJ=C@2<^?_4ss07UrtrMD&a?!Oe zA(q^_S`7#08-uN@S|bjK7h|@D8YZ7(;a}jUNEp76ME??d4pe+{xZ`hP zPq$z84ZyD_f6%z4Q7f&c=NjczwGnqlxE?BTSo!hhi9Iahgz|aT4_!YV+=@N*!-kQL zKv-W)RcovqZ*-y?`ekf7`4L%TCs?Kg8n~ni%b~;PAOwADITg|Kd>2FbB1KH?&oZb2Q7Bz$Q zxV4_gdWOA5%bBW!Ak}?OtL)K{YKK|s$0~t^S6XRidtpz;hH;{>EM^O>F<8aJrfy7! zt~>1zkDZ*qQR7GmtFmaV;u+m#Hd0&nFiy||Yq?>;&MJ%MG_6n0!Nmq)4@kOl`;DLW z*QBT=se8PM;cVi@ep9s?HItnE;LNvQ#Z;TI`cAaumj{5Way_P284aAcpq!*r%#I%R z*{+<)hbs=|!gb(>u(rT%%=tDYaIm=rGX0^ZFq{tUSwnZ=xW)1a z)#qZi?E=29o>@6EtWImEGp3R?*C97y^Sb%FBP8=!I7vP5RF!Gx3vK3j+9@Sk60fOz zEmn(5vsedPToF!uCM4D4w4L-5+P$%+H7v~)W#UGG?lL*Zc#`T>)}0p%w?{*YzPxq>WbFR6lxK6;?xsKvk}8Oad*0fmkBosuL|SjO+LDjU zJSiZ~0*(YqgLL4yu#ISiGu=2}OVU>6P9}*vStRacYW<*=_!BV0vYi7Asp4Mj{kBV3 zu^%Pt)|hC-hS4) z8l%u3kE#B3nCY1ctf`s`-frAtGT@w4W!w}}M;8C`HEw+yG$gGiQ#CiYu96E77*!wh7+wGiIvBoE@+9aFvh>eNBp=xq0jA0n(1>`PBoqkTqvL_KO==Ln8@mHx9sU&jjm$ zjqAUR*-{ByR{?WBVl0y*O$}2po`0>L8kr!btc|I$gavF`k5uj2fEs`7O6Vjbp(U8; zA7>3YQTL{w@=a*|9=m`I2?1O0b+_i9M_s#vS z=fgH)cSmuxu7!PRzTYEhi=8Vt(C{?a-8s6~(X93F3BI+#`~c>FN2|h2+_?4U z#!(yVpp(B^egw1H9sAEKjt6k+6j0rLOCRwH_R~&Dt96%V;>9&AiWBEcb#TM>x>T7T zx!%eBxbdm5nC-6v|L4ntUED+S=YIqI+XkJ6q#zkLrRuts@S&np#^mFz-|g2*XYNyP zT-(q1m~Fe!huSqkBwriQ$bS_2rX}&&_&tI2pdsH8-#VJ)k-F*Ko8XH*< zf~$gSjMe(BTx<`kR`yB!msJ+|Kw8XKD;`qr)g(Q z&C{0drzhASHU>EKTkpAXe;haNuQ%GW)YrH`HpKKi&dG;da8=Y(zH)Kx>dCcJ?3~^h z9a1+jnu*(l<3q10JU(h-NM>6Zf3M5?Y0~=Z(OEn=K)LLX_MU^)D1I*BW9FQzb&?$U4Dpg) z2}?9lPw&v~n;|=5L#%1wXo1)Z6L*BJqy6*}`2TP=%viI|!ZmZ8abvys_cg#9EZ~8U zuS4st)Ek6!>0uYi&du44#*hZ$b^(!n&Rm8@(JyaO9^rlc>VMp&8TVLHuRO>CF{vCl zpQMRrBW^l4@$sYJxRTZeeVCW^iYd@5Vkh_;C!D(%^wrBwYf05)R)MFn!)?{80<2A+ za$?R=aH6F@-TW3ZM<2Jr5^(+*{Xi#CIYx$Eu1x)O4n*&EFua|`L34PqN#)xe_Yw`O zH={3U<^2!!x04NQ@am@|jRKqQ8m?KVJ((GL5#uwn@F|-Z?k}?D*Vlwga%KK&rMPkcs;)3P-ffAeah2GVCpt6;S2Q+L?aKD?@rZNK^Xn}mln7k<+d zw~%qZ$m*q&Qcmnmqf7F!&bg}5t-_C(nb~S)u=WwZ{(6op5NFQXGfK%Y;^z+OG@5uf zViSmSCtTIsPo)BCl-Q(X7q6of3eR!r!YGx)?02u>%Dv;m^V}(xOz(Jitksj7i^Yu# zpT{9b>6DDDe?&Fh@Skp;Io1=mtvPX0`GBo%*(Jn>W}3D>7{)n!Te)A0Xr_by;fU0MI|J|`EF5FjWjV#Jex7!V<-E#Qq1 z!V#=hwAyK_w%FR1X**-IH|#YaD%x8BZ4*#Y+p$*LDz%;QQjK;xz0KGmYCCpXJIBtH z2=sD-7t(e{zTY|lZ9DV5&-?!W-}BwbL(a)Q`|Q2;-fOSD_PXs6Er}_raFXtmN@@|U z6dtSgpfkHHV^5}T6}*MRwK z(kb&fI5DuB}Zb zELK)CZ>@r96Ii!Xv94X`{0g>Yw0`~l&BW^5mU-rl(6pkJC0Uu*MQ-?0T6JQ3bf*$` z%8PtO>kaEK^&We3i;#t#!Zs!4Qtuz$#2e6jExG61!cVpQNzS@mOp}_h4sUV}YR8ON za6eYz-jmP6f{HA_0vF%iQ6=>CF2sADSv2Z218un2tFv!!SiPj<0c znXh)MRdG$m_U+K9o~b1Z$eFdUmNJ!>I^^lKWOeldb`_+Pi;M2HqkBEb1Zlsr6}xiC zbR?P?2JC7uJFI5x5;*J<3?ir`w3kL5aoUFUeZ6G|xCyvxENZeJWT4d3)=)3j$ZAhA zumGziIOb9j;P=-vW{|Q1?9+C;YyP4fsBI5nKexQ zM}SHjrj+K9Eo+jisicu>K85V()0x$-M_e%@VSfX=8t!VBS(l#(ja>X1(ASM|zOvGu zpuuMmi4$_6#O`34zo3fCjQMnSk1_8^566Y!g;6YP$i{Qodp+q~y>sd5=zRw69p_&5 z5M>yH`7tgtuB2IzmLz|pr0i5uGL)1Kq&VbT*61=? zrLAQerKS-|O{bJ6bJu7y8l{c1w5aJZSKG)W`A>k(ieb*DLwyBpS9U4d6HFdQ znxlS>4qP1gS@3Zt9rQ%?mAvqthiLCrphEBUj7EJLrDmnKytMS+@hBc6YQl=g9E?Da41i^;$qu6k0YE|}X~ z4E|ev+)$mFr4AG|=tK@u1$yR?|Gt`}scosO8d89sNw(Qta|f9lOCeLV1hw5F&*fki z7+7~Lb5>#XV1x&QVU?@8;F+MxEC@i$IM^W$^TQ@?azP+b9M(|eAJT9v;_@VcV$2Mn z4A{81JPdLUM-{dCQm-SR$^(5GkI{KhB{3^qEs*c}VP6@Gouq#3pi?X@cXOL_!SMku z`}$L=?v*MIJa&o7KKY>Y+fSJ6#jci(Fxd2;rj!d}3?zO@@;jYH8wtyfd#u%CvMSoA zWX)}#n-v?RoV+dOQwzzt5;AN&EXs`{Cvj82hpf-ESgRcv|3Oku1NK+=zth{-Hwo8N z{I}!&b^O;o)7Lkv2ImO~(Y@_MkO*UsD?d=fvPd65R<1{SzJtu280#1dhuWuEa$D{+8=h+gMZRUVp;e)=S)j2~ zrd6xkvD*MA;OOb3o#pgjM3y{q81e>C(hNiE2x+^rqls;&{mi2ZC4Dee13CNQi(kLW z!@uxTtAbeYk8pkO6Tc8C?&gETm8!E7lrGc`ywFemsq zbe;z2Jd@;$XKy$-P=X+w4$)}D767SSec6Fvb?8kO(amAc}5 zm>DEbX27+(tE9)2HOap}&d0;Eq!lz|8{}z6N(x-pE#&eDi&)PFPrb7@80Yt*cc*3* zHIR&ncD2_iSjKv`uVQIan_zu{9$!#5Na4R&&-HbNkk|))sd7EGaHK<&#hTC&<;nm z9iSLR&?DafJ7t|+m%VMk6l!;C;1{Vy&*iod9$lrRV)uU!%^&+Tea`Hz5>@7@qDG`W zGTAjueEx#^-Zc2MVLeJif2&j%>{eDi*JcuRnB|p19OmV|fJxMbqB1L6TXJYN%J{^$@>mjw>izp6dw7+dR=zQe3T)O=D zji6iL=Ch_X^t3mMEc zhX|YD8Kaw%rHN$6!|ISzs_L^XX$LHpei#_Rqkd_0Gv!^V$ZY_LDczsgroG@vR7-PW zV*2g+sFlNc=`3$?m+FsWZWVKtnLehzsy(B1IeKV@^t_PKI@K#m6Z|U#T_y2^EQ5GL zvhl=q#=m-#50WM5X;@t1H`Wv(`(M4|4?ZtYj;UTYhH_;9k2AnI!eS48MnBX5D&yj+ z8+&p6&PilYi#<_c3^W8J>T1 zwUh5)#=m-BIq(^{r@pNCPZaA*d(`@}mMsx|(_avoO;~(wJLUK+Vq$7Y!)tkFVHrzM z-7ge8_SeoIx+OVAyZFyKGv4R&*(AIJ`MXK?n;tyR++3wB-AO!-{svBI5G8&+GM zicCqI^b%|!ZpC}fc@dp)%I;&G{JnAEuZ+h!)$io*I@YPROnYhSPH2w*nGgBnvbEAwWss)yj?_GF?pr3UBJeq*MxhLPdJVc!6G1vf;76lyXkSJ$*$7ucsj$O7a z4ZAfqYCG1T@ilqPEbaGqXvLLx9Qn3_d8@RLG9Le$jiopDRBFR+U6xO-9P2Co?iqtN zEO7E|r9Gv>A9_j{rA)Z(ozA$5caWdjU|&*kmwQQ$ACezqc*#V+4zhZV?O8I;XTLq$ z_fF>!)w$c>=^Q>`V25ZmV=fDxP$8 zYA1XcdGjiQDW~zn$iA!M!V?vnhS5=P;~R3ZrnPSw4a*wh*YDWCe$b!aVZI7>q*wWL-TIhhtQYnBXSxo+ziFR1sQqf6 zF3fXT80jR~Q+(A(6d=7Kp`2H+?f*9Pi5#ulBP8%s3{(#}**<^{=Uh>fzL` z@)7G&nPss2mXdZF^;7G~gQ^z3f&_PL*ceN2S4137=0c{uHRQo8C~A7BgP!Iz(=)=J z{9QG_TrHT~_Ir+XP90Q;=rB`W;u{KHVpdhd^%;o~`G+1xu zG~;CV5tID2Dbu{`oSC0IEDVNzX1Ff{5)Q3484^|43}-)nUh^tT-}t2m`^%{fMc$zu zD-?XCc};ssWoGy9|A%*M5y;M(tgMgh`B!iG{(Q{zd+%np;jU%3RR`n3j(*z}mbt7w z$H&Xt{>5yw+D}LJZHWv2Qx~J?XLaeZ(j0MXw}H_pycpqF#LUZ2{VPV?w>KKeL34nO z`x@5wETKY7YA+Wn;02)TeoM8WYLipf{$AjQ$auz$=L&iFg7*Yf5`LRl=0`!BpGH6Z z>-xRi6i(b1h)FMtOk}$5x7q%%E}^utAV9tbM?-gE#Ga7$2-Z-(4>BKNghZ`w%3I@pZVOFrpal-l}pl~SoUp;VTga1vJQD3_7_sxyb| zbklhGcj#&IzGbe7Oa9Z?Nvf$A5_Sl_DVt!MGLellpDb)mm$pJ`r9q@0COt%0tmtU| z)7t~E{T{i#3FY{v0FQaO=F*f+F=C;Yjhl3G&pp4p-65N}wk4f#&1ikR$FT@ZE9DW04>Bq`kK8mnn0_*7v;Ab=J#3uj zWFmB{h?60Ecs@vYp^-L4Bw>*D1_euMW4MznGv#DK^O2agdwOn%lvcHO$)I*mmiojP zpHLDPyp$EIv*+?0qKK*Uv4%5J>I<&Ye zEfdS&g-Cb~J8M|Q+4t8w-wSMy+`<0YTNwBQ+FlU)4Zg1m(Mmf$v<)-(ZpB_MEA(5W zWQOh)(n9NSmmYc+->}Q}zJcq=5XnspzU0tjmiW-qLOON`gQa~)GlibPcU)+h5EH7w zoi4N)-{K@3j^Pe?(D=`e?YMr|aj)f@j;C;UzT*jehdW4aQ`GQv2kC#iI(~^U zDgsy;42%vrN1M!dPqjMxC0DqEH2Ij!PJbHnXSm|Ugy?hUiq3|0@=f$C>p z3_lVVPBLJOVuuoi(!G1JZ^gS8LyyErn&%)5DbhR_&A-mG7u5N}XDV3CFyk~xW0oPd z7n7}N)(m06qBN`Wmv+lI{4KUxZ~JAMAP82gkdc;_Hhz5CcpIME`ajXURx1J^F?Ie2 zp$|gjiS%wrm;ZLC8gfz(iy0ZfFU@n5elS!Zsv58t8pStgyryQgUE{3~ZxknpY&|dE zeTi0$?9(SBFPQYupb-|m9eIHjR_RI#Dm6Z?Dn@K@{=Hr0r?$q3)V9WuG5?v6DgUXE zI{#0&{~)A-wHXKNj@7$=8zMiogws1v^JAf%LcFBLyio@p71Z9vzvA-L0p7p(cI>nv zk4vazg4Ul^MKP+P-qwR2p}1?YURHI7S65~9evb9q z(3n*97GfFQ+Q$9zaAYAQ!<5g~wn`BFY>Wp#E}h4Vc9U)WobNZHiS*8@lA%Xph|7JewQk2QeVA(p*eQKpe8O2T*UgUCgfGe zgU^hE1jAU9D2?4%ZMFCkVEOW=BVU?@lr3(n+afZ9WqqZ4=K3kndyd-(&yw)S=5g!G z^~XAMZ%ZuE1$Vd~wJ<}byUMM)>RMPHU5mam+L22tEBtz7*sewSK*l6MBv=jt$4` zq0)sP<)wbWn--lT8Tp&~%N^#m`oc7eV&TQa<-nK8;~djii>MHJjjC`3KNR14_#yZ< z@k!7(PJPaWZyWye8&M)ZS^0J--%g}X!T%)u7vq0o6G`EZbxz~4*AvHL87pi&qb6hwo+OClOmkj%#~(iDs8`@FzZDFQ7aih|SP9!yol&jCyGU zFK6##*6Hitz4)i)+mp+l%LGOobkZ+1v^@oZ$>MJ>*mI|^H-9qwV)ydT#Xn;7B=4yR zs{ox?{(QGyJ$J-5>`~WxL|?VlEPf*LmCRb&xLjny(_)jz!J@0I$*h!8A9!ALHP41E z%CU&84OasLReM@2hkuR^SE|W}D{M=OYr0IuMX>9!*Cq5M@vB~&ecprkW9xKm48`8_ zmc#E4pO00asHXB~R2?c#JrvfqUjA)-GI0H;=4SP6>~8$u#vW6zReRNY)K2yfbx57j z|M?z9wXvE+%}`CA=4#DkjYIROrc`r=Kf^z&`JC_N>3^x_K}|H0$j6L6e^ox8Gx{Jp z#`M6c* zN@yYEGuOZ~*X-FJxdD5gB_>@rAG0u`zK7Ye*Aous!#4NhTG&KsSh^lwn5WJhM$UW1 zk40{x@v1ubl4sUG?TiHHLvLRoju(5d@*2-FTW7GU9=RJBEb)fcT_aNYAB%Mur`Ky) z`gB+<#;Mg8jtEQTrChC({5F2YWp5;Xy{*kJo^j9hUhjR-SG`~K&E0uD_IFI5Cl-hg zh`gNKN9Fz&drc-Etd*)YV-7{i5@}>OyX4j#I25_T!i-xyCUCcshsgzRa${)h;B7bt zBOa@f(CPrS4eflSXfw;5;?o7RV)3R)xN1bFZ>nF1SYuAFqh&HuRN}$z7-NBA8wI^O ztd}^U9CjwSS~v$h@f(1v?&+|aF1k(UD&7DouALbTSe+7TIYgf;x7IikorWNC!;;C- z(^#y7bdIZKqw`uIB~0T+&w!5U&4;e@l7-($w#vr!+y+Zill(px{Vdl1zxDGeB84s1 z(>HzY_%}FLv!8z!>sR9q`v0GN*2lAPy1+rmV;^px;a``d4!l>r9^;>0cMzJ>zlzg0 z=mIg=CD!G<(9LOVfxn7eZgvxs=hQIx$X!=MShZC=09$Ivt}!>(OR5tG@Mf`GQ5zfP zv^T=ecA;n&2g!%O0WYefy)Y-MYO`A`Vg~#|pAX-Kz)3U3=sR<3Pe*)vXv_!`U7$zn zF*9685*j#p;=zM>m&q$?2wGT0=5q4xRRpCjz;GX`iQZHCE49%)l+Nl@;VRToV0h78 z=|&!Va^MD0SjG&JTQiZ9GY-)k>sCD;njr#zi!h(y`D!*{&N7yz%v7{@MGb6jVn{w2 z$AH~$!tUC{b*psQpbBg>Rs}=3S1(;7?!|1RU9~aaFV>cNxFUUSd%z*qSHF4Ag}`-|G$qs$)0l7 zBL+f!#fPEKAls!LBDwjab&ExoHV3o@aH*OhjjpPj-h3J>^8AM9FF2qZW%BBpO7}D$ zi`4aNR)=u#%3w~dIErO*;Y#azVE z%p+P7XHjG4?eq`4%?Zv1aLUSCQyKZ${s!`Qpl${-N%pGJ-5A!e#Th)vl+A>U6JW(6qly{gUB5D%Y;e z@ulVz`?;R7nolA-Ok4>kI2!1!3@<0&cW_J#LM?Y}0sh$9IGH;@HPSt;p`5Jt%=4{C z-SPvMc09q^Rl+;FPAs3hzpR8-L9BizTK(pxkk!K9YD9mv5QX)3&_A&BI%nVcnXiD( z`lEDKQs`=pGkc?7ObPNiM|&DQ`M%|;#bzcTN|&8%G{|bzUO?7PHS)`~+$6Fakr5B# zK2tsXf=<_p{(B+m%=f<^nP);BmU7@!X+v}t7-8S{*nt@vIl1u7{FYbtmV;mAZ)I9qYxUP)e4+m$>$kO}#YCSx~v+YdoIcx;aO7q4g zJ%IY=`y5qNA6>WL+f#LOIWw$qNx0Wn)mB-TH6S`>MZ}nU>oTKjzW1vRs^PPU(M_Yp ztF|F%k?^b3^Tp}PWb&ub;Z}xG@nDm!tbsgA^f3Z zIwY0k<+FYbaZ$>wK{>!E4*ofQ!Uz^q2E0i{-;4yToA9L49S6;{!KT3oxe@1!-TFRV zTeq1Bu7hS+c-J^u3D=5S{v_sXn%Xc$bZ!_f7I~MQ0cA*_^)lc4B6x*)N4YVQn`na< zPjm@1;>eL*58K{(M;$$@R@2DmH!3|Wzyc`nStkd150k|;2AfA-xn=`W*953Vv*i@P zI9yoJG0o@E&X(iw%ZRy5vua@M{_|X(-oXN<>b}13AMES9bsBi9)|W;lW=<9Ha1$%V(g5n2E4!a#%=D zeH;pD&Lb8pqJgKh5ps|ka< z+li*A@}MIzeH8*uF~@mN1#pUO7Vbm*$Sc3!{t?||X^F1#rUBZy*sO+3cYgDA4Re8E z^BXyv@F_u4oLUCmC#{yiWcZYzm6JJpP22ott`IqksQ&ql!1uf`K_PJC0foRazX@LF zkCBKcj8fugi*#=68Wk$d$q5HQR~g~=;_9i-Il46J)18ici+oeO<-Rjmw}90w89d!I zU(SZ)fFSGGG#XF*JJu(OlbT}1$1qVKRN=BgRgu^ z8B1nhWAkHbkk4J5DPM z%^@n8$#);VZsWf2iwOzPy5BA7To^Al`@F-w!m`El0(z1Y&bYM`nf!~x`@(ez`JX!d z6+S-CA-`Q$V`i{QR_I-qbnntGhR7*%hRp72cNaCE3Jei#;41p;TSaG+Btjl0k3P&n ze~i(=)_0J4(19nC*DhRDLLRv4)14K(t!YR2 zF;$wZR+&o3r<7_XjJV#99u^kplO4saxbZ&MuaF}xLeSOoLqnwNu@GJ~qc7rZC!YOM=PgEf;7PB$t z#CJNrJ7%CY+40?<4e+X5I=^ANvor{(;;FRht!h|4F11oU)hW@)Y^zIkfnQs*H#%Dc z5gLWD%~76c!h;G2MZD*M{m(d!!SVjPP^R1`X%>H%kR|thF?t4m7vp}8xcIv`!{YD$ zoaMmPffR?I6DBuUCSKvDoJ~`kCShb-4o~yZJ|1!0c!J7wo+;iSedD98)#=I`Q>E)) zLS0!X(Xjkm*>bfXH7Yfa_p@}=nnf+FL#+u}w|*;gfWfd>9BA#6?UjfNf_5f~@OWA0 z%nw$8GyL=#RTkgvjCt5KN!%A;R-y^Iz$Edyh}AuDlL~so`QBXD@lJ01ooE9adnd}i ze)n|r=5*A@!UsHKUF3an8|reL+vd4?)AhUW{O->)OcRy>g4>ZBPiEZu?Rm=<>4gg{ zeY;ad^)8+7RZG8BR^Nl3N--`6Yif=<_&{^w(&F=MapIlDRiq0Z#lx~Vufcguu#ULz zQM3YVMf5uG1Ec19>%=(h1`Y6Qd!X606k4N7)143a`ZAAnW-PV^`YhCzVeM_sAtxU2 zO%y#<{3345lg`;)$5o_3Nr|W`$p69FrQr<`6=Z1~pCo>DqRnYKA?vhqj4Fj_BI28> z&OPmPcBzmHGQTj#wBk1>SUUA}hV+1+YB}1OaYsUXUryYq2mIWawN+~ueW2sy7%nBk z+he}x!oy1pupVdWeej-*QyWf&`^(4df)9r3Tne6ZLY=~qjt*Vv!+AgI!@1}~^tquF zeR^*S%fie$Uly3_G#&bEu*R@Ia1uSyXK`n?JPR~<*Qe)23XOL zHD7iB+uZ%F05wp952qD!?&_toh7r!0zF#dTKrZ_EtF$LhM99U7Y&!b|yX&$SCV68% zggDk;BKXM%9V5VdGI?T+(87e_3qd*KssC<;?;85e#MuSgx!~_Q%t|g>e=f?Ah_*+c zUktw3F-&c1#uSx1VJOW&XC=cO#}7*bZ= zO0rvR6mI^B?xo=`$=I=W*EQDj+M712FA*?zHjN-%Z~$G(JR7^_lOEjAY+IyQ%x@Q`xnaub@I-?u-|{8q#V z-HLkQTak?wxK7bm^^HvxQD4TBu$J!|Ylh`mUwTx70(qYEuP0uHeeGXDPdh(4p+yyUEG+Twk!s%8fekCN{hC89`1PK zNA0-(|JHU0KB9${pgTi-DbkOBjg}@}ewO4j{^Zv%Y1HLsLwtC91}LfGB{!aaWi7`n zi~qMgL`|d5iINe0Cu&~tWgd;%(reAHI=^dQZk{4O(3~8&FSRfGtj7_Ib|oe3u} z3K^gUnytW*pn@ZGuEdp|)K)Ds|26n=e_1ol+m*5wDP`TC+BXhmm7=WCzqwMD`lTP0 zb!BwFJrONy&zHk;j>y59Ukf?Xz|NV;IkilY&Fo$|APHge-&Pig2eHm^1tp3H*Bx~# z%Tk$%o*%eDENxlJz3{G!{j@~kPKYxhOL2FJ8;G=EX!}~>*Wq04h>hUorgKII+0F=}QjqqNC9v=0T>FC~Lpo;o_$B1|RT!4yx`tGE>|u z)`;()nd67v#U_c6{D*&C(}oq0ox7`Mu79g|eFGvga){sCVDl~}yGMCvZJ`p43S1NR zrQ5%F@IUUCX^%eNS5swKrbaw^_zWzCdGZbFP0@0LTN z?~sL4xKSFp(uPaDHtdB<5cAq-S}Nbo#(-y8D&NH>|EO1~4rep*^bJuC3eiWG$_r@D zF=r#?U&y&o8uk_=XotXL=^(95w|%i)nDMOOCOO}!etAb!-pAy(4ywYpGTM#I|GS!M zXJc%SUwm-KdVzM->vZ#dM2ERCM)X5p@0tCdKto8aWSMI8<@@+WqfU_hbZ;8n-w;2x z`Cu$CIVzvb@FvQ?{wljAHq2}F@M30`)xu11SA=6EpL`;sGCcu4oMe>K&cFJ~PV_WE zOrw6fbMzOVLu)%*eo-g6`+9ZQ_x9Nh;%MTw^YSjswU~V4%I)C<=pk9p$l4izexjkc zxwzU>JsLfe;WdA81xpF)X+AFPH*S*JEyqrO8Suhp5XM!a?XaLyB!jygDJ9O9_ajdV zs)+-f>(pjvlQOHuKSeq)Zfav4&7=zK%o;}j9TL43a3_%HE#(+A1RsklFY5=+xaX6f zxItJ(*GlD^=H$R!Y&^p-+j?t{eW?mBQO0HM7hA=mX2|rtL&WP_2+xZ*o(iOij@DD) z{5fGtV{y~5&Pmf}ijd#Wp*9hvIMs0GM=kY>&gT1-wl$ceZ6m0E<5q`NYI@^+oJA6C zPdRp8Hpmy8jZ+(^Gz7k&u^zy0%r}7p|9&U-WV#VsAla>4MmEbYe-Yh3Z4t9uSO$%r zCdhC^W`dXAj+J(H$FQ5ADWhK`ox3)e0GV@>I1#ZprPS%03R~#(8q#NN>E^U~)uY?$ zGWpz2Hzsx4tC!06a*tR)6W7_$QnYZXT+QV{3eFiGXn9p!8__U*SgT$OYq7Q9 zTlct^xof2oe?}wGC1#r+oSw?U$34JJgD%4D8j!q?+PhWE9!c=7h8G6pLcm!DzI#`< zR%h|_XCT(89&xMRNlg?T$W^v6QEu$o z0Xp*2*||X?Yk`u}+0E+^Ujkb7Nc3!-Weck_Qv%PRf7V$Z3i08GIL>|g%shWg-qs)3 z&t$DvRvdkmwt&`~JCvO;(E{HdnhOJKPFd@FXSlp;#60&S7S5gE_k%i{13p3L=5syr zjGC9vX!7#h#UZhIt6-|U)VuHy&A?-w6_63F)AjWh?Nf#CP%zeh<~EUyz7CQ`%mU3$ zoHcQJ2dpfGVGH+xi~h@w{YW{vV}8@LmYn9n@N&eeY0YWmthHT1u0+V~PRlk<-&D5#aNCY?^Do z6@kZSAih0z1Q-#W+pU9qWW1ZA653OkO)G|i89tXd!}}LW5~Dh{>CgwSmwt7IV$}BK z5%tLM7B^E%M|%%qf0EE{fHtDIaZMYSn;aybU{*VE0n;~J8`OoDt!@*KMtkRgUw-_co=dN%++>Y_sasV~l(4O6J{Y!L4 z+zKiO*PAN+Rnq4XB!lT)YO>}<~%GQO)u#e1C>Ik7W!tbeY z&z1<952an8rCZzv5ji+m3%C=-O81v$ha~zB2~iqLm_Vyzlmi){BkQH?jc-abS~ys= zf8s(T*341SkdYs>Tx0X_tf`LGt2zg&BEV!HjEtYt5}w}jNshWz6&#-v#9~^ zXhtyVq#bT%h(eJG7|0$WLy@fA*+y-igf`!SHXA#)i?1uI)mOuHQZ_IV>&e}>1V;OI zxPR&X$~=7&&Hp8y>o(irB{q_q3oZ9}SkiEE7XIx0f0u9Q_kZ)p{R1x#T<^V?y^4ta z`YPQ*t?RmHUmU^Zj``GHeLqgudkN{fg}OGoJ5?geGYR(1_ldUy{(qjW6Sj+X_X?pN z^74o|y7F?sMx3{HpdR$*I=>E-=Vg%@w}M`fZe@1YOssYog_8Fn5rhw=BK>5x^7i?@ zG;tZl2T`Q`|LWZtuw1=Aw5BXvUF}LJWL12oH?oM~WJ3EZA({;{T6C2MOJ_6{oxf-I zz#O9SAPN-|%f`&uq_UnhX9CV0XSKrWj2)14*_N*m&U*Ahj^}Nq-$^PneN%SJJX&Av zh2R}q_YWCKr4*e%^6H>aDRz910B1itN;0XeR?2lM5F?TlPSu|2KMl*1nUcjv*UU|< zy4q$A#{1Hted7b;d}+9jXooCr9C3r=nwfApOy zZgKWB-#D*D8s|>~KiU6=Fs>obL8}=`!rSRSGxXg<#np(%zf|Tqos(~;G7v!*yg*-` z4=b_QrQs&{NGcM?BT|gjQU6*un>{S}p#pu@b5qaHeDzw`*qEBhQpW8YR7KV{CfB%) ze=tvhg$=B*53?~Pl4Ucsc5F**&^Lk|0Cg6aaXUuS1S)ei=*#(_(YL~y;!aNSx%JJ= zaD$u6+6E51QW#YAKE6+xP0dt-snXz^(9BmZLsTNZa=C9DzVAf@0lxA+A6J=x$mCZf zn%DXVr65+-@oM2;y-y#~xe%?J-QkP%13z)TSb=z_64l%!-}-G`2FJ{p>?d#T>$^?C zV7_`QqE+m~JQ*av32DRsH$J=AmCq}TKGEN={Fm_Wz5MhL?tXju*Z+E(x1o%m{tukD zSW=zrTDQ>TniVRkX43Dg6DIRxX*Qk@?Ey}3bZVYJl3$Z+YKRH$=`3(ldepY9X3sjD zGQ?G@ps{~sh8Z)@=6%RDw8|v)^?nQ6QzmzK4g#a-9J|j{;>Hw+oH9#UV2?G52P^kzMK@{VC)UH>G%l<*3=!EFcs9^!RE+l; zEA7?4EU&h&a6KU!B}6}v4|+gv6n#$~)+HGMbR6;_&(YVkh?eGf?M87yL}2hX7wj(1 zqJp+q{1?UY9LX;O1=yrefOV{iwe)}1v2~a4*W&KVucja6YQ&Q(?+m>A|J-$XKg@Lf z@o9chh$!W;LW`{CC?aRMe#Y=Qh^<-H$3Z?b;v`O9I=KdG!IRD*!5c$HF-E*T#M>Wj z!`Np(eqq*bmL4NsiG5H4N?2p+i z_Y4tlGV_h`nBQh`;SB63V^2rynBRy+w8bQC4UbTEsn2`%_0Mn}W;kpa`e%5H$0MfD z6N>{g{EmNe)$20H2V-#VhicS=nd<8;lu0c_mU*SkldEO&YaPQ9czt22As5i;MxA5&ZK0|v4Xk6-cDyAdZ<>vzz*eZ4dA zo<;ofqn%2gXV&A~qAeH{gNjz}*7WuYYNhcpKHX&*yLzN7}aB6xX&3&HU`H@lJh~2az)L4b#rpNK*Wa-b*n281^NABBn2DhjOTf zEZJd`olchQe}W}@CM?-)l_(wl0>inj7mg(IRT}7pKW!%%O^B_pOMpaXW*u7lb`6)U zZfDsJxyQ2|-b%0=-O$%z6FCc)jtEWP5yT39*!14V&!2ns4`()FMexKd3~Y=nXSZ~D za02hPno-}^MD`)(AVXYHV|P6}a+liMEJbs&aAOVCsP?`IJGRdtW&A9%L|``9czg9Y zDe!JOqwKz|C42)9zk4-2V3?kuTa;{oJh0*jy+s};6a%6L)^FF<>;n!z={z4qtW&cM zG7yuz6Hy1w;kS$NRHV? zMq9uV^xmTLov)&8rnOqbNsOy$PQ>O|%vg9lPKP7ApKmJB`xQ29uOg;zelSfS221C_r@WXImC!yU)?b>po-sI@gF|A}A39rOl zdJ5m|c3?VAt3nQqwM(V?&4n#Pb|HRBt<&0W6uT5(g=E45M=|M(j#AL*K4K4soW2`^ZE^8{4$Dy()thj@tJbbX{1UbK_ zs106(Ra;>z*yJHAmPl|PBn0q0V8=RR?$dgWh|{|qhkfX%k70*r!yJF;Bw_UA)7l*| zyJ((9XK53BwHBZJ@f^J=sQuXt)nbOm>|$Q6GQazJSKu9s*zr-Rf>;yp6aU(FfA&zN~w#_DdSonKVQFh1aUTO!95o+a86|R4jXx zxt%2)6fz`uV6r5C)Z2q*b=j{FYcy~)GS8DD-78Q{l&bdq36>6tQj(7#*^2Xon+ z|H+f`8!QbL@iS*%y!6;oIYq6hda`SXI3Z{3DT>Uim1hbmX_@#-p|5e7Dbc^o%)H4; z3O$MbnQ?25ez*x%$%!~XwJcS-u8ZRhlBO#jQJ3liYWooB4UC%kvqXu@1#--=}HohoJdrE!nqpqC`sZ6!~tc9;K`h2APUupfp%efA+=YRBx=GcL!KmU(D`7<*|;sujyc4$V3?n~IElvSUKd`skik zZMTFxhgjgfAsq9(!HYF*OJxg7K-4he9?iO>A;X5G4%6v`w6Qpji@^rVbI6pH4q3c~ zc@8B(>s=AC;*J+mr1Bn5CC<-fLnIFVhTjW`k^y+aNqT(a>~t|f{`I$<-E+u!kyRY& z$wRz%oE%(9R$ZP$Z%*#(J2R!PuV7|hpXm*wIK0hTwH3bqoRlChL?lLCz;tM0+sUt4 z?AEH9$|Ja%4ynpnnrg-h*F!GP*?49;l)xBHac3++UVk`2_8oS=5j!gmHfD+9W3D9l zPiRG>uxGlXOrr23`z)4Yn_A;%@uynJoC$XXz|QzSdpi{I13jg)y-oMw^g;2nWpe z8Te$=q4*grJ^q@9Adh?)FvIxya*01L54u#;a}>3h4t+j|V~YS6{S#~gV9Qou6ZQ&# z%?#MgmrLSd_w)=blFYDsqN@Vq4BXGqS^gcqY`{n75B&8%!gr!W{yZy&}kNr7))+YrF_dYitApnA%nui_T}Cn%BtMHQe3DNyLqOZqB3`ib6o z*o9FV&x>`HPNjszN)yKFa3aSFQD3f7Ctsx&YpRR_#YsrO85;w5k{pO)is_AL4jXcK z+tlzq6D&(GR<}lbh(~^m-7{IdNImqH((5!{1Mg*De$Q;dJ2TL$JiWgJ@8G=|(f6nX z!1kl}lJH(Z+Y-RdiGfUYS|VO8Q;XpQ`Ck zJ*uQ~x6-p><@-pdY7qT8(y39dHA?DHhsba9(4JcH?K0A6424W%sXU&CJVQC_yjr;u`=)o!IZQfl z{*GhV%_Hg*jc23xgL};06W0(c z;p78@OS~C!cJpAC^L#hQ#X-iPu00lUduzlI*w^9&H1BrkHkqvqbZ+L3(83R`;m7<9JK;ra zFh`$fAVYeE-K6{)NQ*xCPv6me|LMtpo%UOJ8V<(6jQ@46|MRCe0m7Tu5Z~Fo{Fvd* z*lRH_B2_J4vo^x)%z6O*O8uA8x+pZfnfir6`gS0!P8i&NAgoDn$Zt}Nt?*9I)-avs z?U8@unJsG!dl3KdR07AAGb2%ox{g8 zpqH;+Q!Xwf-Fs{q&K+PqIcu!d1~;?zS&|!)L&?oqkO;Q`TM&=HTPC&vCVFcG#%6&s zHV1+0Odhkw2MoIzlESr~lvXb+j|^-0X|v^D)sMi|iSj3{8RA=j z)$h(Az4ut=ZKwWT7H>_!Y)+6rP@?HQ=fRFZLL@4NKC+dI+EY~T-&PMmOS3!*@kgs~ zt>&jCH}Vq+?v&P}9n?~1FxkhAv0)AnohUj#V>cUn&B;HOC0OrJYVmbb`OpNSAD{`t znHtr*9}N+7n)XR!)d9f8aE1glJSxKir7zWng<}Cnkl<*!3`Z1(rq$72tLfmZWui+= z#rQo6INIBOAsRzBBL(9LsT%dnqpKIeB9ZW#n@00TWvJ>XSKyc$;tJxtzYx!L*s_Yw z=j9e5I<`&eov;->zyX(4=bcAsrAk&fy8QRk=nv?3mfviw$sfR*R0I+ zpx%F`E#A-V&6kKu=p||s?KVzFW^MuP;IZ>woteC~Th9@V;@u?2B>L%)Utg8_C7-MM z6z9>Sq+#s^epk=JL9ay43iXKZx3C{RCM5#Kf%>&pF1N5T-+QEI+NvGE9Yn5-`Z+Yf zGe(Hu7>f6<=5Gv)&h zQ5gywJ+f_Ou{pmX2R0hkzZYHA*Z29=IQs$Dk%$Gk+Nl8~9(nMpS=Cgo`!!}uI{BBf zJTU=rD-&e7lW;|-$U|cKcG>EZT zI3+yZx2J7hsiWs~-C)Eg9}Mox?EUzQ)1CP|T60^W-|Iw+?Dpde(g-sXTCyd|_gK44 zcnU2g>tjUsLGF@^Mg6%u?Te1v`g7kXU4bYHE?zsBTKBcxBfnQO0P*k75FFD2 z6j2$FJe;!W3RJ4Rd~aa`Cv=>lUXsu7Y56;nT^`K%o6S^1p+B*8f=Kysw)XU}ZR5uBC}ELpT5zO~v)<6Q(y8w+ z%OgLr5_H1OS=ZrAEuFYod_KGYk~Q@Sywe^hElAUW!sZVv+_c}R33Hml4Ml<3=KSXt zrW@TzA5ypxbMoProbWGD7p|WcxYnG%A;Dc+DxWAJ%iD}$8*9asP1{QQ&NuD8>F@cw z(1IA6uU=|N=!LW{!N(GHd&jjrbGz|?Vz+FvTYO`I*p z1kqNC1^h6)WN+E+>}}^JP8!$Uj$e}}jr}3m5n_R-C-n9GF{iI@!B};$06oX#1uGY% z6^IGqW=Je&tSl+bgN-?7d8f1>5DyQ)1EgPwuL%(ih29rlQOQRaSch#y3-b#%9qe!U zw7@NBd5U{ZDSsSeH1^w2oIhm~%PbETry4T{pNjR7wM$I#kQB3EP0z69*vB&m)(q|0 zQRrDJo67w~h|<(&5zElZWl+6KL%c5?k@ym1{;WZY@uf;ypIOqt?py6kMtZ#b@wfCQ ze2jf2N#%=|c7$&s`gNY3ox(mEb{WEAJ>?m;QCFh;&WPe%ap>ptaS{4k2TO+2or7OB zl{n;M9iv&rus*M#xv;_O%=fAkdXlM7BeOT2)`rV8db08?$8xoVcK(D8sxQ+)tcd@Y(W@vd zM6JfA6~gi+pPyG+A1DHq>a=I2d7YrNcB}^6qyTysH2B9g6J3}Ce3`lBNoUc;x|iQL zli2EuQlPt$`|h9LPbT_0dHANmBMY1Iz4O}&TCc<1K2TCx4;Ny*^L>Ra1bJrs zP}p7cgeYv>AH7asysEJ2Jta4-p1%*JY?@O#xE<7VJ|Nfrp{KmG&_8*D^P+9^+ZRjj z-viu8Ic^(Gc#*JasFYmoD1A(!e+vHi|8Q|q>7)(QFBX>)CH$)5UgS;KOu3gziJSJI zB}0Hg`2Fzz?tLz`{NaLDJ!t8^pJ2dF#2p`=kve7qjk zO6&f+_v54M*e`v7+|em`FyQ#KL5@zWJDx@tyOV#57j{O>{0{LIM0=kmRC z+X|Zhj63g7p=d2=USX!$rd41g3?kev^cSJesP{@PX5J6(0)3bH-|o8`&76=7PTK5U z-9hVLLCf@u3)4}G!$SDMY6 z-wf4?ymXgUB^?m&vJ!_j*uN9wtI|*Ib^s|4Y4j$eGJu>KzA(7?nxzTSTn9Ya)UmxOUGfqkJLd8@0>eQJgmXo@H-iX|D z1n(BV5xJEOX(0}$IO58FBl2q@$@>N(%NT#)=WY#~jf1^stBNBNA1RKM&3q$L=_!t6 ze^ne2zIyU(nS**nCB7OluEn|RlF3^fDKLI`&S4+i!p0J}Q;gF@_^=!29nLf+`HE9D znOf*J5M4)S7?d<3P8f>$JXRa*FhE~{vWguSG4Y>_ZGg8vYOT;UAZojyZ_uEmXeo$=hj%Jc$%jL73=`WU^Va$zquG%dWBVz{ zoKHbsfmnN;<_{^A$(LYdKbj5oktgyXzoUy|BfZ-59yfTS^UQ7Z68(t_7sS^`(joa` z?7dD^#e0#Y=so&4*&nsi(m4z`GcL)bE1{krgcIemSu*03h8;B8W%i+H1IAm7$2HDt zy9cNDocGur$(nW=#VMdD3o`>}%)Tvq?=0OXjz7LnRKZ4`(@CY-$FGkhnXdal?fS>usNa|zRvmAActt6 zl(spd*ZUyxz4R{(A?S?ly7uYVVSQ7R1*HD_LS@A>Jv-ojg`=Y?`TW(@92)6u=nX2z1Ukm# z=RAKPd{ugF-r4|U4Ka-b`x@t*Q+v8~>g0f0ybirlmOb2Oa>44bXQhYQwa!C5S>E=X zQ}|?P3;c{Bm5P_UI^S!1EZTR;&o~F4n3O_gQ_l@T{Qg*_?;Or(ntphJ3sGCE>z%i^ zbNSOtD={`@*^nQ@M+j|dAs835&DlSsCEAiH;y_#O2|CZP%qe13aM~HFb#md zQilyxIn57Io4o*ff&;qFW=V*%NOWNieXVsHBcYA+UlGZQH}U4M&+3i zm@?4U(LB-qrhcRT$_)Z{jCIbjC%7~8oN(>RhzQC|SLXICjDUIISzIR7rN_HTX0DLS zBq~q4K}ed~r-&M9iio{rtN~*@1uKy>3w8WypvDEkHxTjlWt^^356iQ6JtkN#&S_8b z-;tc)f7+SwiR+>xuehgv!jYbtSbeDE0+j4?#z6C~bEuWIhvrJYq-%Y-TdT8+G3%Y; z60wj*>ntbgeXxL3Rzz4z!QarL%$_6#CtgtQm+ja4?IOJsFCRZk@KHZTpHY6AlQB3+ zs7HZ)acM#WwR_p|6knb5*QfSHG8jvXwrZyNT;-jj*lBV(Sn_5>7pcVBqbG{MH(DXn zRH<@y3~$jWGo8t=L`M3xh)+Utv-e9Jdd|6^-HcPym32aV6Z*@6xjd(By>leyGGQU{ z2N`GI6nC`Q`{(eJ&IDx+TXFt5&0rm{(5lRa#YoZXYv6@&r}@s(1cH^8*(;(_4 zT{syhFu+P}#E21tMm!0PH29<8GMwY?Iq@V=ZBF3yZ-p;h_2>y#KLxz#(*1|$awk2!%O8HT<0G~ z#2Py|3JYKG*2SF`-Gt90f}`RGF5L1RW9Idy@Z$-in!wXYsfe=4bqAN_G;lbrLe*WP zpYa(JZlu^KjwkBV)#(RDs7D+~ShUh?(bu+y z>zT%uuAH3?iz7KLA+=p22vjqSnHXwjrj@Xwz1Z&X2RoVihJ)&E7aQfjU94qg7@=+7 zybLZIeXuDN|H9@Gl1g|Q7^M-gvZv1wb6Pe6qE}Yswr~@Vq}GqW_ zl~6sltGxtK>N$P{&R;m)In88g6cF>&?9=5L8(0?3zPT4Cp}2=p98G^p6I!o|sXQ81 zaU*?W#K8^m;WBPa)0S?nDiQo@Du+qVs^aj)KyVLsiU1NhCiNG%?YqQ7EsOvF88gTn^%qX(&=p3&76g0Sl9Eg zInDGc{4iFbq2B+4y*H0=Vy&rQ;1>6R9d(v6!^AxWX6ELu1zOkTh(ttVg zI}AnoD%c8*!-&iZAFE$)++Zp8(2v-&1ajvTcTiIZ6{RsH3tAr7ykoJAL1WR}r^(fLaQINPELt`K9}7MML+PHbzu>;$B4v@X6nv)O6CFzT zbp5}Q=RWy;@(uXBfzOxt*fn%d*MWNe^7jTrroBNCe7-}Y3)O}G!0nd>K2-yc%K{%t zaadC|a8&3rSvI~;g!WwpbJPEIq3iIr|C7M=zmbGJJlS;{QGQzo!RG3X@_M zKI!^yok?2e1^#BL-%xjeY#pjt>Rz!Y{7?@(}(Wqdb>9Q++bRz zsb(Xv(T;pWwZV$Gy6Gzu-f3e46gc}mwTesazWVHL(hQ~*#JRNoFqV2g;5ot$0?%k2 z((aSyGF2L?@&TnJe>6@u-3HqZ4l&3ACCsZtM1LR4~YvT(kVpEF0{3AN#UR6N88e@UajTOmT=-)&=>c-Rpy zwjE8{ma92$s!pviZQu+k72Q{xc2S+P@&mnZIj42OCS9Cbgg&n-K$|~y8mi|c9PmDc z*p!S5@ycZwJ)K!BDheNG*kD5ZjGDuNwqV@Mr8a7d8*M52MN5teXv<)!Eq_2;l>4Q& zyo9zW4F{yQd;@BX@%yE=oI_OUKwHi^Lj-ZjN#SiF!zu<^qdpCLo?v?_&zn2ywN9Ks z`Bs6crU`fNN_YRj-D%vNmhM_{=RKc_69&7lX6;I;>Q3c4jK!Ypv(pDx+O1DnwSF}_ zo)hg?Yqe%*k9}izabjE}rJHhf)UH52!>&)W|M?r~d$a`MNJn%@a@8Skc+|j_FCizGn*b~16QIXnA0PI~mwqcVABc9r_Zi;hHU3#F z1!r&nmpjFU_!6s+h|?U#nnT{OD6ZqZVn|?q3=_3J{fbPnwU`qd8W-BQk*42;+}#)P0ivCybaXVEWTF2@gpE*i7n+2vks-91^FGj9JgVmA;u(p z08yS2<<(Mo%^iCS7TTyk%+S&%S5S#uI{6i>?=A6QBVcYyLQKbCZ^ zRv)t5(_*YSaOJDjuv@!we)X;HNuEH-@jWFwJANqnWyuk}B|AIXN&@ZsdC4QBl3(*` zl#0=^6Gc5G59%#>uvGH*Qpw+~=2{G{HK6QkEY6PR5`DE1IQ=dQcT;lig4|bg!^i12 z7&qedE{6e_q3mFwj%Nx2z2oe7;}>=O===xC-QAz!TZ!-M_<}yfd3{^j!#LuUNv?Fj zJ28r^T4r9D9Zga=!bBu*$ToA}YJazb@IPN*tUBP$iZUVM(gANO=D?5woGLmYJi|~r z;5@-h!TX6(ueLMxYI~TRU{mzT0NlF=m@mi)t{iZO&5_`mb{MM{#daGC1`jzG3qryd#yc8NziDd0?_Uoj95(sO$lft>=HEf4&$Z+UO|F3G77y2 zSZOYObrJe8p#*%vc$y7T|GnJZ*>NCH#zbRJ387TN+dmdx4V3U-H<&MkQVF3bVR#-& zFpD1NG{lV=Wt72I=e5#G0V^PvyV>ZvNx?AJZaVlEi=*SO&a^5QN<3ezufqJikNA)y zXJ^|63CT4fICHVK%@cGL@cC&| z49b-9>HM%s(|`3zCSoMN08OIm2x40ya`pN~-j$3!$lNiY2;8!EPSTnHkIa~TaD^(T z`7+oIF|;ADN2gYD#_p?gw$GRL;2A}M{S2O44PV0F<4IU|h8A6rA_gF0v>CC#8U|FF zstna>bf$2}1I56G?J5pqpykFA1rk1vc+?;b((cAyx4yeme*oB`x)3=m#fuXW0|NXU zc2{lqc*+yZVK)qH8Bk@mrom5&6Gs&@U0f_u>B7hF1yr4d)qh{qh{H{)~(1&#cUT#P-8ZV|1X zJBBzdc4KlSS$;Lw?(=h|ctz5|S05ZF!FUinZJ-8?MTSc5CdL~QX_pHhV<-<`c4tva zWv(sU$S!~H-0BVEOdBcg+9jvPh!fJC-JJ#dgAg|T283KdfK`S~jT~ktkKaj7V^uBY zIO8+U26M0ajog_-fS;wN$XLb1um)<441QBjKZ*G$KM^m@>>DzrnVpAI zKR@f*ZX@qB4%FH9+#?+?747t{;y9BIaW`lU5&%&9$MMRkz z5DACV%)<%x!QKEatc}Nt4%;99GNtYA4BekrU5l~P73r(l2+X*_wCX?dZ*x%EkFa*T zI(h{(gvKESO6KrD_xx*HqpA;;&m~! z4y_2Z&aCFACcFF3+yK2j+GVWX2F#XnXjFx%l0$^mp=UBHRE8rRe}_y#ZeqHR{Au&4-LrbR`Ki2N?Yx?5Dl#8nMx6)&L8Zxkk1K)xl9 zje{&CnH%^1RA3?5Y-YB2BLikbNL;uu#{1!syWt#3C+HFq@O-V8lXV`FLs^YSX9 z1735aRMdl>Y8FoyZTGGS+OCVI8jMw@iU2mar_fZUub7n(k(-?xo~}n6yQNLWD(uEG z(s10RF147&XreH9bI1GI_Va0o{Nn7GKu;Lc87gIC2@~k!{@NzzVzDAji2M}RWU5RB zKI8PJ@~*3jJw){k>ZJm;Jyc-0Ls7wb>NxSK z1E$TX6<9ZD>83(jeIFG_tM3(BeIFOl>T?%#b-vTsmu_t#r=vA?KA+Z@C*N2jtufu1 zb5fcrg7t?&T1T9YR+0sC4IIK@#$!b`L?BORQIXNKah0#44r8vB87BKS)G?j0+Ejxc zF_}BwiYd!7RH~Z7CmPCUUXV|vSi0DgSo}d%6F1pqF>Q2oY%CyYp%ed?T3$KNS0=kk zEvtNab&+QyUCE%FR{56HVayD@x0^}OQY}P?&un7YX;gn7Of3MYdJC0VY0mzkl0qJ z;!4n`&ASzzB&mP9d>Fh7&RZJ^hdT#R)Xdk2KQyb7K9yvD*=v>SCXizKPz{ zC)c#OYy4)`9Ljl%!HM?uxzD-L;uN3FXZ%8FBTcBoJFt7FzNUPfxFSE+mV{aqTQbGP z`R}y`;4^PU0l_zW{l&y(T*1Cf7-j^9@P6d+n9weO(|&F~8-$xp(*Zx)y3*Z3{qzL<9rp3n@sxgjuO zV|S;x=46d>cXg)W2`~)y(gdumXviqm=Fogh-ks($N@d@QR&GZGb#>2(4U+OkwRClA zd!U8yPN}>VB>oIK2*#XsQm8F=G z=}yM$tnaqTwXx%6H-XCA*wv{z+{cL+tN;JMe1;!(7QY9$)JZE4>?Q-ZV zz)#;r`;-y=_0L1bn!cT?40F7~i5QpqqZE&@is^`N`^vGskH{MVW_+QjJwaPww;%>fPIiL973r?{xZJt9 z6}c`C?VS%5V()xqmG7q=!M*e94(y%K3iXW|jhO~x_U3>J=ub@5H-q{n9)vjnQO@`W z;`P=G3VY zl3HP3y(vh&QVZ%1GH}}4y@8lRq$``QUIYBM6}bwLtDz?s<a90$rrwrZ$axe7Xt-#UO|OvjqN{WKK@(=jn1jhxN!8rgS7E^!Z`a&H zP##bt{?ss%#C#^nLrzQbkO@Ls_3J1tBOnhMLGlpL^&^7v5L1}}b=Dl*4;gJA&XGnY z|FYVD|4t_hpO)zhV^CUR`eI$91#y!?A%PY0oM_+Yo;zmoH61WPvu|^3F_jJP3 z*xxtgBW-|hj(h1GVfC~ML*)zT%W`ndv699(d<>-d<)#f?or@0)sxar=P(kq&IUe|z zA6;RrnLiOcT`B1V!hqu+D>-%FvN_tV|=n!Hy-xpbwyZfY+mp=`lKz z24b>r5_*H?{{6*R!@~fRNn>~S5>Rnvg24rztd#VhpeRZ&eM7p2@H1&u6@Zuth=8Uh zs05E_4tPzuNgB1PkRTbWmg>1CgNx=Cben1>K6W^*QeRE=NiH^Qx;;Uv%?Ly;LTDOr z@HbEPUD+0>PfRgEkod!!!^bU?Y7?kSgur1p+5Qdf2=i@${UBJU&Z1zQW+tLek5Qds z-=uvV&~V1<@>F(EL--BcJnbV}nhaIpw;0w_?TnBY=Ub?Z=8m)ZMDcu?PyCRhxgt#b^!vb!q|`&3^5Q2~L{wrI!?dg@=l^^G!Of8^+B7dNW)9(|SJMM7G zbYwBYwVOFjyO~iG=DXsdr%~xQg+ZEQs#0Wxcw`e)8FGVhgZc7DPDGk|Hq_P`AK^}` zHr5c{`n-@0=cZu(4L4Tr$qgURjr*oGt@>4qvD(q`aUry6W7&xJ+E@u`(oVz2=LJt@ z!agDXNzQ6LHeg<30QR3OeofQKA;GnERnZl3W?8hU9W%=0FBPeB`yx@lEknX2^d4&+L*kS~dA6 zdS=|XmO0Xl!wIgHwQIa6co;(T;fGN_(Y^#Oj8dDW!(>EYp-DCTcOZMu8; z$uP0{VgQE_KZ%nAfvbp;coOj-7AJIfo<2nSsp!~smTSH{>W0KYN5zNP*ISll#;}TB17@e+cEJw)PuM}M@M=A@y_~6qG+ylW;dcXaW7dWT@3}SS=x!P zAAQcp>PT|`Za&9%caGn0TCYc>?qz$gb^02DGc;0?bp$0HG(Ty?zCxsoNxrk-AZhRV zcfKBUTaaqgmm4=;>pTGnREzKP^)9mVL9~#&!cF?pYn|^kFde9yrhp{iT4y61o3lcyR6U9MaQH@;$U0@`;h)Zumeep3L9weTdUn z(vHO4G+hmwQPAw5O=m+BD>Fbl6h6UFW~$JatK&>%)KimtYY`@mI@KZ+wU`LWMxYMk z-N&;}{6-B%Kn6@>Ffm^b`R@Vm@1jyY!B$-lFQyk>gu6NW$ZxMvjf^% zf*jEUIX4QYONC+;&x&mt0m=lLRL=BDY?E0ql!Es|OHLmSl$>1oJZwahD`^DCPFaoC zOz{!5^B!ern1T?W?y?>y+MG63J?qF#(eG!vrjOgKFx22qyE zfPMby2=x0wC+48;ukd*};W1XFNpw?zrR(YgyLX+v-&Q3IQy^MB!fk}%%&-q*p=}ro zyk^>p9cmcq8)S%$kg9A6=o#VISuVozb< zM$%1)G$J=)_He|L1lHzoj0@$@lv2q}!F0h?*E$CsPJ$GAjpd=^oPAD$1~P)r(M#)) zXM_`c2x6DCPsO?#vn98?^V|J|lW9%=cu3OzmC>~?fW5FXqI%~f&Z@@BXx$kYMPm(N zu1UUs?cASz;8(|yR)?G927F$^yq@t21IG$H+Hu}Nwt=(3X#raRCmaTsN;u%34r3zI z>8prCMO<0*ibVe*HDOia@Q6ZlgbsIQW5o?e&d(3UN*I%M6gz0Xjlp~~iw*~;f2@TA z5B^14@+07V8d+0HNOyM{zFQUD-MjF;3*Q`kKbCiQpTai)Cx8hE3SojNzJU@ZAj~_( z7rC1(1RjOe#5Lc7og^_N8}>=Q2XSqMEwZ9%2;#;+=erkpW|lSutKj|?N9aAD03$L?)<{L{0g3hQv!h|-a+p`}Tiu64Q&!kdeYtAM35KRZs*G+v4{ z6efQBE!kvtP~;k2Y$;iNCN-IHg09n|ZHd8IBVi}MY>VPlQyokzWFuBfw`rF}kF|Ze zRV(~6Ac+kPJsDF$QDVROTq|nXd3UU9PW=k-{(o;x+LT;9q~Zjuug9|H)(_!Zx65+k z1T?g)(DEJhG&5iy($?|;-cM$G@jDtPf$oz2pA@#?|8TZXcn$wYur00o1oF_Ej1*PU z|C7Qi(to0g%5gQC(bbEsp)$nK!q@SZrG9~4{RMyfLj5z=@Rph4BjJhgR2q#~YlH_v zzKFe@cyuO9o@zMkiOSAB2k z>iRhJbRUdGpJ`WF5y?-hvfj~}iD>jk>`@bB28u?18PWR!^4K!VcP){K$_)KF#io}e zCMxUrR%Vo+toA>LyrWx-g*?I0;C>f;HRv^XRU-D64xB0J14CN4Ww&uTO^EwBk6XAx z9Eo><-#qwwa*ryT8FF8rNC2F1QM!ldB zKf9#%hcIsZTY!JHMcp2vpoo?xy!rT2?Y-Phh`GF4>3-cij}6gZK1I(zxYW_AfL9Bw zqw*x=FnSZ@EzL;yAMvq;e<@?=Dz>}LmZ|z5OHSag@0-^dhWQH z*C2XPCL%O($8&iFt_i;8lml_ol{ndQmNna|!pWAI4@OVTyf;G#ZU1MOM!W}65Qa7> z+QV3>zgA1>C{03B^i&7#-|(v0VEv*~Lvd2m`tpb*6M2f> zQ9LPn2aA4gy47GU^LuzL(s>w_bK(16mcyc{92QOQe?;s&IxplGb&=BW(Z6ebX!PqX zG8b1->fgz7#nr5)mATlB{0MfX*8V5788$uM5Xoy1UP%0ObtX>u&FYE2^C$Ex=HR$E z{h!eL8BL0D8oeH#SE_M{q!+4@;s?eyjT0UkrNS>aUghU*=cdqHXlcn70&%@Hm@f)o zUXA!+1ESugYlYZp0l(s{cWmCP5WdQe@Xm=G43Cz`D_h`?RC{NlAhSI$eCe-s(w!Eb ztdDc1ZKhm%0-XuM)Jo4RoD`xFzxr+r;=g02sHb0TzTT@iEF2hfOxVzut@RY4ihPM{ z;XU@+JeC#-&ndcooQuZU8(h;Il)3eeq0+vh5x0If6g-yXIUIJOwYHy^uvvW5v1J)z zcQE2-_BtNAuqKxv9fgffc9V>UA1cS!^UElXv4b5x87O7jY&I&P>D$FxU}LlBa%ga} zPGadsc>BbcMU^)z)$UK*IGt&6=`?yq{+Px1Pd!5Zl3TL*9h(rd@+cy^8U@q*nXoG` z&3no^;+2zrv*)Bgd#|b~SvU@#T{f!R#;?!zYe%Y@B5a?u$%hTCNUZt9pU`~NpIw{% zU1-?RwiKLhJyMH$WsWZW2&Wbu^;_|sk$dbi!<$%sAEYjbB)9k2W%F1D8x(HXQAD^# z`rD8CpJfVfp6n?6Y>&1LX8Zh)n6~(}>H81~dZ#}o`l#Q*kj~8gB-b7F-^w5Lo3;C( zb*lA8Mkk=0U(tR-2P1?X^gy@Hy`pBPCiwMe3{p(xUD#ga9j2H=jG=2m| zE-7pzcE+J$hSm+xU9!+AR~GDnk93$0PgBF9Did2f1jt}Kfm51T#;{I?VW$bdk)VES%{a9OxjfQt-|sNd~o0L+nSfcn>nK?T5uxLOE#jv zWHdb{9Bt#Y_wcO!!;r(?D^W-N;U?{=Q+{R!&N3T>nHN4(XZuw%2EMEj8{3=F@@)7T z8$2QHMa<0V+e=5&edO`C+MT22r?LB^&VT#n1(`wMF>haM=i})89DnS{Ieu*d*#HAt zI1`^JPzXt`fiLg%+q83#V&;V->cN%ax}jCPH5(DZ72bPToHedIzLL98eoI2dZ35?G zf9Td}#D^~?ZMtCFzD#z-kte${>#?J4D*l5|c%_*Vq?veIDEyJ-Y_$Bsxs_N4>C`9C zEhG;pXNUoiQ;O9#FB;cce203L9bs&KGovxR6Q0ZF=R>161an~p$|!Z{$pJX~E2dn# zf%1oX29(Plq${oTG_F$p3O6yb{Pqi>vKz`Zq2%`*Qi^!r_-fz{ox{H$5>igy%c12m zk0U1((wf^YgmN_V6UsC`$D@P|;=!AcHs;ZJ5nglT7i}>(HzjdX^i<4ET^JyJ;kVU> zm&v^&xd_~mC;OI64X&!whm<8kj>&lO_eri&%%NuGn3j0g(CYnf?#OBp#vgBi*C-^@ z>g`#hkALcSXgik8t}n~Fa(swnKaq`5`Vx`yzH{@QPcdhrHU8Y_ z=PmyN?M`0BweXdl$5migc3RZ#ldao?d!<>KR82j0 z*C?EpTtvD|Mme&J1S14kQq^<^u-$mpKSyr?!ntXnl=yHg%9A>yh9jJiO5eqUd^-nc{SJG>0Q&{hm#IEKOF<=t#5RS%w@TZSYJPj5@8GS6+9p+# z%sc|~`L%SFTQBUyYMvu7>`!wL^;G7et2?BWff{HJya<`wc;_1EesfBbz$p)?ysLit zfdQ3AROE{tthK!5#>y4dA=DPwi^iXmED4GH$3eW0j;Y8?)%Wwn6vq^V|3{FN`i_68``-8bCVaJfVGHrY%Ej3GeUB{%v~&sICofbdn-g(@uTe;ZPvKBOfoQQ5 zF=0=bJSvQRiq*CwOMY4ndA(WO=s>J54R}q@-W59paS=2LZw6M<-1?U-Nmxl#S3P_b z*_uDkLPc>;Qu-V z7Jq6@w(vLT^>kQy)(fGq*b8xI90;wTi0c~hR-C;+Gi6RzWT`_u5a{!7xD+an`aeRi zZb2Ivm%&LC(;84r{L~Q4_p9FoW|04zdj&);l03uj723aHDV|o~15W{>#!!n?(yvVV zy;nFY&87bW?oWpdKA{O7%evBO)Dzm^&tlgc*>A3dXFy0eY$sWm_7mp}#u}!oVX164Q!=B-$D3IW8!`|_0OW(lU zd8bW-Qy0~2>pXrzROLHuO0K(e|0Bf;y^N z5OtmnhsHr0Q%3rbrc7BG1wlB zRy5WA`53%JlBz0}#4folp>jTaK*VoK<}5h_D{i_zUlOu0sRj{}O`uq~So=DEI(uC& z>v+E;aK6l)+%t+o#Y>7M+!D^E*r{k#v?#t(bSg9TT+z0>R%?Z}`xnDM4wnX-% z>?zqQS%s|c`3ZLYn|9Xs87&_z7eem}y*G4j=)<8@%U}CUl~0wwH~2C6WAY+=o*dL7 zZ;?0QD73)mhtSwTxJNgDYfu}H0r%?T6idY^kl>urAlKG8sChwpHFKL^~; zZ@Xp0=l$SL)30gD?hm)L2X1x4**OWM-x^|{d3Dz!_ig{U+WiOj?e06CwklzT{*3iG z>s@}XIcLtjevaxfgNcteP)#tO)uhjZt(1l^S}`=?&jZ8WUA!to3(JG75>7gk=o6J) zL*E}&KdSzvx+%(nPc(~W^gnQQ5L)mvBqszkC#()HI>I+UA%xWUhJbS9_~rRnANMe| zm~n|;UMC!btgm+2F@HA8#V-O$&rPb4iMj&>fh_gR#b+?*GZSRuvf?p|2gioHX8Ih% zepr`beY2gL2}{1>=4F`)G{40bnV8df#-E`zJ>mqNa7%Gml}1o*Mr7NFcv~gnYKB#4 zaH2$pcA`xyc{M~Rg zLRi%uV7kwdhS!pOU?g_y;2l%l=s{m<>7JR@in1S_|3)8wI8R9S0KeQCBK>kY!n|BK z(-Djuq)llCPo<(fk^P{_RDcD1z|1j<@*ng6P3fQ=(BMh!F#9N+dZaf^IV~>L`sIir zc+5YtbbCEoV#Xnk9U*%Mj(35@lOf~*j`~R#kM(X)c=@;`vW zgf!9!ImD|a)VHwmc9u%NePc8;Yp~FM`Ygt1=R-cu|C|{I0cVS;l z#yI6;odw1Tk>bRw4vZ7^+ALZXm-a_iP%Q>f=@==EG*a-SD(M?34spF{q^R8QrE<^j z3eJtV-jR~Z#YiP#q^MjPDU~!*Nll!IMyVe9D;lYY29|=pH3zU9Y!9UQdn#dw9rG8d za#XQIajMeK%e^{{gtM^Msn~kIPCI`>m_gPt$-yPdAn`JHOj#L?-bpEudy}QvPibae zwJ*9NVK*O6Y5;CiUY_v`aGSFES?lS!_xP0(mSv)$X=TPw(1TRx)_UuOkYUdy$bSk? zKI5Nd3h^LK9yf^_HPIUR#Xa`H_Jz!PVcH1U6u`{eY`JtCHYv*0%ek8`xMlXO-ud#- z)O!&553qO!>}+3jRI2~qZ$CPNTHs*MSf91V&xx6H4`h(&JJUm+=pA!#`rZ?QO#JJ= znLhP%&O5I#+Es^jg9voA%dqawsKfpHI2^L|aE23en|2|(?lFIOGqszs&+Ia>OHTe- zgG&0Kz5=M`%Rd#flsbWlYnxaK6N{zaf+pKU8N3iq6+_kwOOpZU1KtEFFu!R=jN(~q z4DM5Sr9A|r4z3W9`+?!cDB4S48Dd{VyUr{c1Ee_vT@v~$Z@(oRJ>m#ivHaNtrF-}C zJ44^MHcseMXGbxrP}_BRN%ys>L1so z+W7NSu(_{Bs>9c%D)>bzvv)00ZEyP2@6xV(gxXG==9FSu;dd0XVUG)Jy}C**ZfJiU z{4g*U<_r__8vD0nPH}+t2=q*M=U)#-ZYD?*z*jiYqD<;Bt;`;7Za+Jn*2sMacl|g?U878+I4+%r6@SEwXm&K7@T5T5!LO-hW@3W`&p_ZVpvHL;C)jB z|Bt4PIRDCl8S!G_Mw|iL9BMonxruth2#h%xvK|+-sPN4w?CaNWFHpa7qN+sgGTJyB zrJfzP20LhXF-~+?Lw){Sp$74BIDK?36aP>MEPtSXjjlSWmT#bzGATEW+|0sYS(5eIX7OH7GlhqP>wy!)aPB15e*MiO-Y} zhLRC1wr;3YgH*CYlXWoEVt{y3eNnB@BxuHIW@r{_D9TTlwo7|RgCm5)28WFbi>xRU z5YbJhwuQx2S~5FsEr;9)c8+_gjaMfPPgs><(*ydk7Z9ubp2&eFnRlFym0~!MiyOk z&>2hP>40aiO7OWkaUsf{!O|CV)^Jw{B7`yZY1|XHe01GRpEV&daw%uvWA#41J0V{H2hNz-edtVEWK87dGSuk}$3*mRJ|aYX z+Lk;lZfZHRcr$6YAUO=j#5VMC{C?<6D#3LirRMn96%>EH0zHwwXo4-^6QlJ+RK~2C zUmsQ#Z>zA#q|q|eYpSrajYD!}9bpcpLr?Q8-sU2{o7(nbUkN?9gX{RLBpDhjnMyWtvQOLtt6Nq| za}s;vWZ$=YIB|AkM1^|Iyn32Xpe9eLVBx2)$6Vq$gEp7zI9MX(9yQt5BIVxJlY7+tpd}&X6)v}{)7A8AYw^kB z!IfzCIvv-q#EYlrdwwlXPEVe0<)W7Bb9{Gt`mg4g+MDA}_s?^fel>?Nm?L!j$)D#K z{Hr;ldvhdw^79-=g-^d`O?PgCPNa8y*Zs=)UMM~)u#_$5XgzW_MsC)4q^t1~^#rV& z!Tx07I$Ze^F}hCiYCi1|nf1ZFNF{E)^tw=5?>MK%{JATx)DnJbqraAygHutqaE2u8 zZd6EL{yGPrG2y*Ko%jA9# zC@(ZNsqJph+3U+m?M($AU&H`u7TwMwl=G3Zubgkb3g!vvDQAc<5)Az{00dd+OUDUXxA}lW-{_B;-&8gL{irR>c4A z!Lam{y#21Yi6)~u{Tvb#Z*3&0%?c6x-g2^vM;X{5GV&_pfL-NN{W@OW-YoA}elTvT zUony}ZyCG<&8WvL;MK%NXoAiXrLh*f=iSBs8RIyMXzxirTM8_EUw$N7Nz$0);*I`4 z;#^_oO$0S+IBSjztDsiWTaxyuAnjqM09Db|`Ah@JwYoZAX`K54c$D8`k9tDx9j+nY zMo))x<6RE3Dj5XFNjKZacS4z$z19M4JemeE4n=`ftr6`r1K!Qoh&p)D@w$+C`3Fa2MUWG^y1!1~yycE)$iEmy zE?n!PU6n;|-qNm=ILFGIt=?7g6hv^kw~*+Q!Qpp@)Av7pZe$A8Xw_8J67`dc-%7Vc zU0}D^i6?xZhYLT5woP=xW|f=1oJ;4#an90Ycvu|{VxX{I3m}5M<0ExKF+1n93q?{QJ$CyD}(!_L^t*B#J&5Z0Gbz2d*^{-VUU#$MsI1wX*iKb6_)~tK zi}KizXX1gGSCga~p`6R={9yg4)FJtbj;~E}eUtBQc(hQAvc8nv&cM57ab!VXp4Gm| zfH`PC)po+3d(PAU@p~TVZO2Ho15#p?64z7aYTu23-PS-U`90I690_~A1O``VQNoxI zBO(2US*wxXX!sk%uPQl<#vSuLqqKvIp&5xq+f0D^~lu>N-}ilzdB*?}s|I z-43pJqm#3vj|{7QV%;AY~?%SaT6A$$VYF_&hozkxo6P`Sc#N zcj1XG-%t(zfgFJ+!LbUqU6I%CP zt?T0+9XMy61Gdof&s&nvetTY~w+?qBxurQQ8&y3;{3BT&~V@EZx%c8c%J zZcemI?f!K4XbIZ!Ux9XXcmKY;>In2@Uk#`D{sK4yBshQG9V5Z1`xQ8^0Zvc~kb4~Y zwc&;sXpKqkfCwZGHS93hh9?A-xa$HWe^|NeP*=T{TK>||sS@86H5xr?;yV89l-Vgl ztd^Z)txRM;FgZ6|JybPRwMV^29l;IZZiI;b7ETE+!2vl~tA|E+!R}+yoO)9*h~%tZ!wge=H}4K` zB_YI>q%e5JPVtS}O}q)p0C%$b>Tr~?7&s_^6$>${iGXusLl7%oUJ=Axme}=J)seg9 zb~~hxj@>bK)ZuZvakWpe+YWrev~+)Y1>p;(kR4nTTY&UecZDH+xSIu1qwl`k6#@#I zY5UUAKDsu?4j$a=z)9=qbz1p)EozK5e6nlrd0Jbq3z6tEmg2N@`;P6xPL@LS!4%)S zy8@g%+DPpNFF(b1Xcy7WXd|RQtFP`ue2LEe!QGMTxZ4q=&@;t59Mlt2d|P+XF3D0X z&}2>VRUwCYe{hV3;`A=+Jx5Nna<%WpT}&L^)b|8FEO;`Uo=}QV!h=(;JdI}w*F9T( zB@fSHATJI#j{iI$&&xQWLH{kp^Ux->k#Zs$i*btYfnBxuorC8;HmYIQ#~T?%9Z9K0 z>O1iCQsY)cW??A`eYY=bR}s=$@boL`sREH;{)MzfNP9D$o^C8MD%=scGoe+p_k&A7 zPMLm+Pmi}BfL>KxHx>$MEGTXfFnur*5#(vt3V%6j&*yN6$K`W-q&Q&Fepxeo%^(HnEHT>O>_RrWEqg-`m_MVl?E-#O>mLcZB?R&ZrHG3R)dbTh9!~N3F=uL0- z-rq0%-Tl(v*)RRLp7h6|9kBFEe@nmgH}y-O(3_r##(wFC^-G`BFTEm|9{f~Hzw}}K z(uej-f6)K6r!m|3UR1c z3h{QY6r%n#6Ypt^+*BseSh^fEmP!w_j-*%Oxd~c~`HK#c**|d{fyPt4?ub-POF-U@ zk+!TlJ^)WIw8)abZUqkFWyUAmuX@9jfH3-V@aCVnmwO_s2>RmF1Un9pW*wK=)y8>l z9yqYp_@KM5-|p*4d$*JpUI#sT@7vv!Z=95m@^m1t=r((3R=t*WtCX60a~M(phsqVH zV@@E}Nk7QOp0vY%206)1`INo+I*?Cv5Ae`hc3vm_%kGu_DbQyJ8?=(C5eoWU&qF`t8X>>m#m86$wn z&EgzLqbSb7edOB=4>OrGS8~r0ZYRt>ar^xe*5s0Dj7hIZ`i99Q7q|}B_w2in3d;#& zvumBJ8-g<^qL+(=wI8B?eX=LWM^5(rV^5GfFK`AeLP(nB5O+hbLUAKQHbT}RgLO)~ z85DLlTf4NAbR@IhkB7WNInAG}Rkk-Py~`KJP4g=fz};$}@0EA7XAiO4t;}Z5iAB#* zt;PX=r|^x!26zvq?Is%xCVqkaXdv5Q3A9k5g|?Jy!Koly+T|QHx!NY*zjqN8alLMo ziNZDRM$ZAQi&hz7w{UG>2Csqb2fGpFz+()0`uv}3NueEwY~Y+b5?;fNh;iU|2K(SL zBuOzgwGFzOj_;gAdB{L_G>fgkn1>FN7C1&R_#hjNxH?WyQ>JGVpbO`pSWeL{5sF^0@J+hw1U?4ks<>gE%9=~W<4!M@ru#EO5*#2n) zH0IAW6@lITb@|k;)^WLDVnCWd zdXi6#`OzoMpXB3q{T{0ZHa3#PtjqVxp1?jm*;l^j`h9w`Z|xr1r-_DMb+us6-_KLc z98&$BYoZmTv6fmMKTc1dpfl!sYV=F_Gd%MO0{VeTz9ZN>6)dP9=^!zVJM|M%a z{B_CFjOah&cI33DGpv+mq7YbnxMWxI5`0>1JHNu-Gq8*?a~zeSw7+73e!vjlD|5O* zs_kX)C0(7OG&VK~SC;ii;?oIN_SEO*ox{!tG*{O;Z#lFc^ZdlYKpV1p_amWqKVlsb z(8YCizS{`AWSRzSIoWq@8}+rhp5~`flCOtIWALU_{$8S9_?UXiwWBcrIieTx4gDZ{ zpxcG^M9THD^jqH82%B`ARDNNWX>nMGw^u;#Z#yT*dNFpJi9rILH>v zvG&_MbR8%C2A;G^^s*|4s9*)Z@GRqJ`dB(?TVsX9&GE3yl1tW((DO+!KJUedltaef zjvT?%Sl5zYA$P@*&XTbTW5_bdJ)N){*;Xoe?4gkN@#eKTtew-S_)(B{^JY7Lv|lac zyzcBvDZ}{Y4w)iBvbL;W9^m2_c#oeP!BVbz=xhP-StwsotCZ{~e~UWjymF+}@IDWx z3ydq-3(d8!HN5Ih-26ww7B{u}wbnPXD-!A#Nj=BgQDd_fQ7PwlI{%kTEQ^s{F%{HvaI{|&4Ge3_@qec8x(@#aQx4=Upn-^D4CEZbPXJ7sl}Wsu^47AHI6iT6sgTxlmw@Mi6g-}9gY`h* zE*W=6g?QW@`G$0>R-PM^0BkcOZ!n}~W^tVZGcASYG07@8`$xAuZH=+bmgZI%@~(qq zH(khE9M-N3{m8HI?6r(P^^q-7_{ZXp`~w~DUcQ&#Z&962tk^G8ivDk#+e`FWS+Wyp zXJ=l>QjM{hg?d{lysMa~bAH+;*Et2Hc(Bd1c)UjsI*c~4wY4n?P&e^0tg1j0*KlJioQ) zYAC;{b=(V#$?*Mn9b83Q5~fVhV2~Kkc8~IBi9MItWlx7f#f{mjd!l?DdGA zym>E9bYrS5-vE+1IDc>8&wUfXN_k(?{<{wG?S}NDZK*Ynf1^W8E3yQk(;peHjguh; z+5pK9R0;kQr=)RS_#2MnzQ-@ewc~d*V!kUp-{5Mpbal41{^7?_^I2@~Lb6}TTa>KxGg$*ah|=T@z3lPWGYH(=|dikp+0;36A{ZSXnFg0xS`So^y&IbzT$#bsm>0o!K8 zL2A#E$CS$ppR@+*}DoYrH-^5hs z-oeWeQNU4Rx$!ieh`+$Nd?lueFDUDB)|J{w5psp>Vr~TJC1s(fWe=VvB344 z{u$be`VWOcZ$9wIn0SIXQ`l^YIzGW3YX76bjR!b|&` zHn+vUi)FmYFXx7}_+OhPZ=VIP)fw__xwXr%s^x1|*ybHw{_4`$INN;Q1}ag?wN1JX z?2zBiA7+cSKdD!>-2W8L;8~zwup~FiLv;`v7;np^To^Cd%;*>JHjQc90 zrZ!I&rGL`Q}g zmPt)c|BdX!LY1Ix(7pu^P&`8_VI+S~LLg=MucQo|*i3Lsk-J$bcb^0nbz+c!QX!u zEb!z8%s)OhH*Xrf*Wi6o&wJ*Q>8NZ@r}S_WIbFU*e%g!^7*$O&9nLvN4#!)Cg3OTM z=f0D-G*a#sxu?z>)(g|o&1c>ExSE(z z(WF2P`{En8{Ta+%?0fE;o0B{L_e-BZSoYt0nwKuXyjsF?@teyY#IKFr?*Y7d zGnyRB8Gny-4O*LlTjAz3u?q=0#AD6Pn>O>pjCli1?84bRi|513&HNPnI`~^LizXsZ zIPy0m|2TT5O~4{n6_qbgs=>BT9j>q zO<9&#?_63gpy!g&bFwnTP%8CjYu{I@TByeso~7s!^YEL*es>nN0bXKi)QA68knDng z;u!Phh0euFRM^t(TfB)Z1JYu_$@nb@&0>LFU9>p$I&Y+)ztaDi|7UZH|2MRD24GzO zWlget#@=?qwzKvBeg0=Xked@~-TRHQVxos}%@-{uI}cdTOX)*_8~bv6~Y;6s8wlq+6Hd=q9 z>zT1!NN92i47;9eZ^K2!I%!9IX|2E+63$@t@+@Vh@A=EOK zgZn;M0xhXuzqG&-(Q?AZMvCrg%O&>;F_E1H<&eNW#qStKagLIhyLF5WQCeAmV$O6I z2zzZ0{}83r=$bo5aEJmkpEyBt2WM{XP_Wza=2)9TJGJ9({@sqd*jxdptw)7<6Bt*f z1N|3U8Y(I-9)Jhypfiq3vm9X^Io4yseSXDKURb#_T?iGgw(|nhu^%Fyho5U0mz%d# zVg0^^bA5naKU=`wKdS`w8qEq@S<{kL5^Xb+D1iUUW~oVt7D7G5MpFF~aIP=InLvSl zc^>@&;|rWw^@lSPY^Wcw+g-qJ^IM*>X_CNrk9&+YyN`R4i=Y-!FnlYRg}M&4=l0n*IXZZs~}Abd5!KeE+heR#x#tF4LKP z4t~PC{R^%B)VwhC$%(8F&&(16t)kHiUM2Nj4ehz27$O;{^$KxPzbDL9I*mMlBi%4% z8F7z)1fz7N)2#jxv$&h7Zx3Z%ZdoApasamQ94M~dxItXM-V5%knZ13eYH{}KLq$`w zjiL$BPAk5(#xm@5K`b*)b93#oaIJCtiNdTkP3tV_r;^2}$gPMSlWr{>@@jjed{=v9 z_{Uxr!iK*zWYG^%nm2}MC%5%_!rIr`JLMTi8^9SeHX37!3J1 z_I^PM;CuKGZl z1zE7moJh1S#ZC9M&`#Tz3Kj{Fr_-}c#L3j;aENdEQ%l}7ja@EGtmo3@f*dx=WT(uG zKeWWgG2M{79-J_3_m$A+6v7kOT@eYm_A{)CB1BJFMzL44H!kCTVwT!vh}>89?&aoe ztxvI9=FL+p-Ifl<1!5Or)cG2ZVuzxP7p4o)!1_5o@0LUMTIS#&f7s_=qGh}TJM{|= z`3}W)Md?&Ty=F3Akd;jrZbJ_1zcUj)cRL~PecEv(!3>*mIphx^J~mQ;SR&L)HXQs5 z{IR>ogm_ef>LzZif0RS~gJXzYHJ;&2<(WwnAUiH_yp)tQdN3jhC`IiQ-IOU)M&L_- zqXPeWf5$Qb)D+nOj%rEsm<_z!VK4})CIL~H=xK7xFpmI>FS<@>NyN2Wy1t=hFs>ES zwYDW1*CEoirX>v5q0+UYCB$P8C@1G$ZOHf>`q3+Y+C%Xc#(aJb64r7*)0J8@0`&EJ z44&F$bN@f~-aS03>RjMndv2LrAOi#mF~B552oPXEl%S|XNX7tB1_X^7H6U8FC{eME z^)>+%RM0^Y2Ly>qk9Y^6Eh0*^w56@K0Z~!$I7tWzmugUiR4Ms>@1DJ9wxQ>op7Wi5 z&i8!bS2y$Jfp(Xqonlm_1vt!4>e# zUL4r)-7oG(hO};#_RsmOsEksNJ;C=kGo0LM?R6fd>7%1kv>eybvy~Q}+qOt?(_~Wc zd)`7^=E_gn%bF)QNO{_>=Ppb4QD@S50d@)6we$atyV@!i*mC3q;WT^fUzt{>mGh>> zeYRz`=WR!A=JTz#ONciTTZtIu+uo2^Hth!-K?ut9jTD_C3D6foOYBq(%#ii zUj3A3sx5Q;>}lt=_yS`BSF!W%=&+%IH8sC6W}jm!lKB4XiF*6!yq43SWwp>xf7@u+ z;d&9mMUAtW|2)LGD&OW&RrlIPuV81{-!@iN-8qgM!yaTU=UrO!f$11Ryu0Le6-8@Yj;T?2h*DKM^rOJ#_S>t16 z+IOwe%P3lM8!hGila>~pR%+mBE#0Y?$)_cs(b82PRvL7;-JnAybhsTlWSmy^(s)^u zW>VIqnQgRC?O3yLY{Y$j=Bz6?fg(KPX0D$V@&AMD73^pxJXQZ7wodQ@v!sXz^&}yvc80JrzBjl1aw1 z1l??-Uv0Ln)X;=3H3yJLm!<_2ez>K#0wQpLg?){JXH|{jp zg8!*hh)fKOir#DePq!~w(IOT?MiTTA1L3qCtA?e!xgzO%`?o^fgr$>bVwYx5v#sPd zUR9kd5|PQ-olvoxTaPa~_RCj#{S(P(VZ~VVF2SpR=SJ7q_-8YIH{Tu`E1%l%OxZof zd+^M-3%2Tvr_!n&NAFDPc*8l|#;%-dYkwB- zU*MyRQf&v``rx2vWa*)LcqH&*Q7Euu z`!@l6g}de5hkrwhe#zc!$EYg;azedMA2VOm|G!52@xO9TrC+U;Tb^3iOWszoe{zqh z#A5fEzNQ-Vw3+(f%XweDuZC;GHTfN_jn*n{l9qU_{h$B4Ub~*W`?UMC&HS5ddqFL; z)o}(t_U{F5NlpBF*7mGz1wrnZnZmCbvtry&lYjN#*Eu|Af6o4z>ZfHIdyh%%`*|m$+ndjqY}sDH4KR1tf7Ixn z@C!8crf8DU#$BuueAZHW8@e!;tr^0BTa}6uWGqYJYtOpv<6cmIz)o*=mAX>CFV=;7 zx8?2wf3mwMbC)+VhVKj9$#*ZbaBjpF?jGSDxRD(vp5W^jWp%Fwx-8eFDBls#0%>7& z^zqY1%Ctz5?0@S0{Pn<>26VyV&ZC>`4847zy4seLUTEV697oQ>#RH>zCVUT8@~-@I zN7FUAS5~NSYl~p*Z@YTFoZ~N#^_iQ#TqIktN;#~$rqG^0u;PH+`(}?)D~UDy6lSO3 z`^bUq#<+W{nqNGo;@P7!x$vva@J3@}g>#qZn4SAuFRhUot+aCMf!visJI>ZRTw5P4 za_!5F1ltENm+rVoeY>TmJ2!ca)8F78Sp$_kBLhbR&bD)zU8}Z?`3`Pz`<3B44XSYJ zzEcW!&Z)M6HJ+9EfsvH;>SggmLc%5Guy0ko0rqFz6Pw$nCUjrv zSfw1S+z4|HZf=00qzTw zZvkroc)}T-?pqhI<6J%DME%1hsh`d-;Vl?eYPyDn)57&H=*b)Pa<82?Qp9PWuj}Q^ z=HHQ({(+2e#(EVtrR9r*-MHJZi$iy(IA=)E<$5r6vtzc#xxdqYvz z&$tEfaEflM)V^K$cfX}Q@1B2;f%k90QMSdt)Rwp$1 zbzo4l%9|D4O39R?PKz%0X@0Jx@Y9z{&NaAZ*7!@{63(a;q5mGj)?3)1iWC|ggeW>rb8jLh(Y2EyOu98v|d{z?aMsbALuw<{fgT{ zVaHZ!Z~teGAClWBar<+JQ*FdI&Syq#)Hre7Hql6@cVkOT8l+YC;S}~8uz}u=6O+le zInIdS32;W+w%r@(rO%n2ZSS#1Usfmkwwgm?M_NQ*%IaG6JLNvyFmTzjz$aMGuCr&Z z965WcK7x0dol;q`vK#m8r?57fqo%GJG5aA_i!U?Uj2!&p;59{jQ}JT$SZIVdoUy6z z6FHk+3N$b2#!g6im>rmq+`^&-@}a>mxI^$~te*1qa3y=ZPImBQhqqj}62Abx@^A%r zgm#M#+9<1+e`rCcQd6Sc0)3x|wc>26JR~STSvzcO$yu2ZSv<4bGdJohh}XBwh|CNOTp0<>qAwSChWH-apfkq@ z@Xm581CHn`hn+U%)$#@fvciFHI@Di}pR7uI(&z6Ho^V`Mr5v^=*L`DGu0u*zlzd%| z?sfNPtz$S4D(_i@JVHOh00Je>Bb=MN{DXsia?+nTlN*i8*_^f2=R_9V+?{(!uL|U? zydGHG`IgY!w)54)d?jE;Nyo8yuRmVm z-u`IO4fP)!>~izmHdSwG@~u{tl=@KxjUc!r#aUb zowGYP5_#*oLp%NVf9HF>uY8HFE-ekHe$IbL_?Twb!1 z)xyi6QQk7ucJac$A9(g~TIy#dJ*BNTTOId*m-@P3%>9?o2sd6noZ^($E{f(xW^e2h z$uI5`&L*C@NpQcWSn@_h)lFP;6&W1JTFco!ZAPT9*x9C9`$mQrXFcUVv#$A;*+yAxnel6{aB)xPvHAQ;pvqrv@)@zmK zyKd!~;ogy8*yFflMZq$4bmW2{lA}^5w zi~m(s&oHAA?@QfQubop6DJ>bqmrS;AdXZB};UdTG3f67MRhJC9soowa&n^gORu2ge zfTyGCheRf~g>3R=*`3=fb7fwXQfxb1Z)5#1_cQI9m(5oI=e3+Sqcc{%q}%ev+{qt`%5ycaAhMukf!=o&r(-nl;6cN= zjX_|UE$M-M!q-IS&{l2e&Ovsgtu?Hn<}UBEO!NKwK~MJ!0@BXX)(2I=`XMh3eR%@1 zWPi%R{enYR_6|?kl#TQW@A|9}4(G1EFq*aE*`q#J^01+MugWfXd}7JnyKQIp33p@n z*r$^7)Aza#KbZ6mJKEaei<4fUJTij1w z$Je{6>(&Q0a{?oGxhpVqy?0p6biSV&+Sa>Zd0B~LcTROx_Oy!quC_fx*Q;D#@dw-& zE;h6zP*fG^ox8dy+r3tB6I;|L(o?6bSTDkee!!vz4p{cv7NNI1g`fJfe#_o$Q3InJ zdq!e&gu?Oe?dJ^&)=wF7F5TevLhJF6;B;HD8PfG(~R=lmw)m>be;Ju4#D@Ii6qi(r$rgO7+k|ncIi( zM!~;@o;^BLE0ePlcx;{n!p9K-+dj_uIkc@gk-;x@ zfg1C^S{IPk>cj^g>&r_n}!at@9NE08C?Upqnvi$I@%+>&zg&l&w35p z*giyT7w>x6QtR3s*j$yL#8-n0@g_3E1z|T|5l!|^KG1t*x1%Fe3OQP!+tJag_p-^m z#P+Hoy~FSCN{&p3wyNxv4xhB|E}znVj;GI>nxgH;x4t@XRkvbKao=!qbjH&$JGBZs z)t7HjZ9n$%tFk{f;Ay_`H>6iRI*o3HlCZr4eSv<(+{fTbI=nmSsdjby3BN1Qf&Id>uU*bIrR9_rC<5y#%cuhL&z1CcSC5gpz!@bs= zOKKZFZFQ~ad)5jWqeIbNYtC9b2+1$s;LO+kt*f$D__?EMR^Y6sdad~a|EK`$2CniI zcPkzYZtf#59d@P+D~`d?YYpDpT4_t#mA7q5x1V(^bf@&7!+f*biyxw<1uie)of+?q z>=cVv@B!YzI(lf8F`YLRyS=QUv+#dB8`D;0tsWZf8yOaO`LF}uCk21S|7OJ;m-y@k={S%-A}EuZDT1r&XRA={RxK z)?RCDg<1J$?489LpK9xt-Zex2%as4mjK%f;`>0hd9Vf1}4XLgrl5U#h>XiJNd~ci|*e2+)>S)y#5UF z*i#2%KSkD^Q8ln<;O6Y)$KNQrWp~f&>oyjZ9N%3r^w_SRa!2{(l7g>yB|oVK{;lQS zq0U|Mos?_pMf;jst~zArT=98Br`J~uO{>ov`q{3clD)fQweBwXkXkY`Y-lNBywacE z|I^`)ePzQlw@)AbQN2BSC$Txh>+zkPeOCtdwflWjviG$YS0%l)v~{uD{qn3p^0E=g zS=xpL(BFHQx$r@^ZF=A2C(GYb17~dPJ*Yg8QlB+Qu<`Etf>zcAw1JZXk(P4CGD%O; zF%2{G7^57}YYdBWd;ucRr7~ruoaU9&KB@Zr&C#~ykkC_Vea%^tqKeph@}dokIWx(d zk%Melsr;k?(R$!{tNqr>QyWv+OSgyLVojtEwA3DVg*Uw6`F_#&uE^v=S2-L%sBlfT zAIRsh=F_!)@Ad)A)^=|HN%qD!w!BeO)XH~WeMyCZB)xyv(=zhYF7LWrrPPzx1nfUJ zzR|{%kLPPC%h5O6@P& z-L<9TdB-)G6YJ`lK#*C2d>d9u4>(Mje7-l3+E(w1s2?0bPUqQH6xlwV+jdUUZ(8hY zWmi@tojZnuBY+kd>7yWiY} z+qcMESmPZXAQ_%?d&{7kct3iLR-6yi-yC$dyQ_9^w&q5ChQ4S6Ck_3sWoer@d&QaI zDkm#bp#{S2lz!ho+~)ay;opS|SGrt3xUYCK+tKzC^X)G1V3f`S*85YQeM3st6de#g zxG(2Ch)Mq|mHP+RRBep=2Nl$Qa8GeQzA)Q{ALNWY9B>z}=S>1v#KCu%9Bua||H23zZi_?x4 z*e`3(sS^p~*RzrNWuf>Qm2&dv99j_@xoE>9hF&{FuT@M7(e2u;x^91PQ1hl7t#WTQ zwA;|_m{f^whibCR!SlX@F}eCDx^1+no)pP)J*^I3sc73X-KyC&hGwJHNac#gMp> zQ7JR<(^&4f?6d0cQfh#3&7oU`E?JY?GWeR`Ol(yyEr1<9U?wh>< z@g(f50Ngm8o6-Do8Zb$hO0o6lx@3eL?h+Z~q|AcRf?VX9F-AJ z16<+t*z*V5#gddKCHu*0i)js+7$=lUe;nZ~jzEgg? z{gBO3_w{sdDqsAVsRjlr|Gsp5<=2gF$3-`G94o5)dcXU-U2R`S^PeZ)=swv|Ss=e2 z>T`B<;4Hq*lylX zQAZuFf{mQNeRGgVq&A5Ce771_IV#`^ByD2Ma;t%9(FvS-9agLgGPW1DJFe!ol+mts z?U5@=(m#{;uy-9y)w)2J_jdKP5>YaXW_~F~3l9SAN z%zdm>X;JcQ!}MG1&fK*sX>wbRt(-3obt|?>cf!Ut|}dRu*-(p22~esR;r>YiK`>HF|tYdYMLGVIy@VibFyn2 zv~CNc$}_(?U8zGAN19rcy0*!U3v1_B`D%M;>e?PqG^=7~ON-P?8?hj2tE+9&D6jD= z&y;U|v#P6UrhJ24(dT0HfAY|S*&CKN=G9-)sQk4Jo1JT$?P_X+QfuiW-0W3Ln+I!Z zaIHKK);8F}z*0$FBR_+DrD~fp`DMN65T~pfy`I{pbZ=DPYU-xuN8O&3#+tK24G!s( zyj#=NVK?nK)s+qDv~5>|8nw_vjjHzJmXOM7>Ixp&-ldIFz6<5gb+Nr&Rojp)zbe?o zG25&Zt+v5W*+-?UXH7FaiaJ!Nk@O*)oyL&zH+s9*HY5X^Q%z~&#QDPx?_-Txx5rzw zw)xRarPM4!c?aJX=GPCQ!UU%(Z|G3Kr z{cI*rXtmQfv9ZTRa~q`Y^CN2;a-n^?*Vbt7UejRn)kOQL%bH#UCe1y+L4&87H>C5| z30^;?JvY33mbNrLB`h-J_V}d?JcU-2mvU9mRH-YPx(JP`Vl-L=jTAHyx(Tg>MnbF3 zblTrNMyC(EDm>Jf?9g6{$j(W{1z~C&efb-+X=A>@6Nku_cTKY?hkxOD5Ew)zg+B@& zh&;#ot!;2fTKYD4GtcPr*YtUWdjAn_qVHn_x2oDEy*=bYbmJP~hZo&Iu58ftexD7w z?%venwnYs|o|?w*Iy!!&CvybuP;J9h1Ab&YOW;h>LeZM^kdE6VBd>oa+@9ij7BJYk zRwErAqznClODSZ*6>30iqE^1$3r-^MT?B4b9^*+U#*@YWB2Oy1#dvZnZSA3*f0eHJ zM!ibveLy|YJvVyJu6dJ3$17$#x_WAxx>nXU4G^4zwGC+o?%2?*WFwwqp+h?5r}JFe z+UpV6uz{Xs&0b`{=9v=BuAgLUd+crZiY}tF(_^|CjN$x%;9PmS{E2Lcu2!qH*vYjm z^JrJ>s_3i__*)z?{r>`46y6KJ(vf9dmXKx9b=?NQ3z4frqn%NfcKVuhn8Fzw!pk+M zI-VkYIR8{fuh6NEiwJF#l61$MSs8OCvJC_-}A-_i`SljT7K_kpomXk7>^^+Rg9(lWU zhtL9=IceVh9{%KppnHBh3y^gmFzB>SjFWSx;<+-3^VC=~HYhV`PH^ z9txd3!KKaj=rZtF^!M=F+OOD}Hsr39{%7`3=q!4#9>ae8m_MVuz~_(gy1=)#+2(yL zYWGRG+J=p^DZiWe{WW3vM5XrJdaA?C?-@To)zRa&QyuqLo$C1cz3}dm7@s`wN#~It z-c$iIv=P2tZqTQTPU|b02IxGyqG>E?8?v0PrZq9Y(Rp?@&x9U=kHb^ju#&n%po>R4 zotA_Do3sq7rL9TOP-yD(qHoY;n?b`n!BK zeAIh)HfY%k8zs+al@HRoJS=^?RrrD)9V&w_f9@x?G{Kj#A}i=$A@CGh_~Prtmq`X+ zF5x#wD4n3x6Zf3z7<FPopI{V;x26*?4*=}=0o z_)x3xp;DXwW{>IlP@PB9npoFQVb!+jec@51*x4(Zo+F-#-y?b;wy=x8w&{e_qs=dj z`U@=OR}jyuT`GQClNO?ls#@rZzgM-W(L;Ne(#JVTnGN4#TQX%F@FFwFzrz#WFaDbJ zlP5HyFIBs=ITL)vzrv5m>Y^@g^rX*isI^hYzp&vTbghNIqAQ{&J8dy~?qG~F?b*pR z;qkPlox)?as7de=8!$)U?1b05f={Tn;RORP=wZ;Iys47-ovP!2z>)zDXO-4A+!;J} zATxxY3LXw%(B;e%YK*}jW{eIu%h)YAiT@>Il?p9wZUQE0XA*5{evzMP4S(zCI8S7e zww(Bkwmh}e`G5fjECyB?Pt>9IXTc2|G?mdLygZM*yNzARh5?`)&| zEXvRF(x+j2D>~`;RN4^RJB{Bf2zxHZHzq8<>r_W&36{M2R7X9(?toI;ij|r~dK>A( z{GKHJ2*2ewo$6RZd=++j%1=*q*a@bcE|l@KW^J=OB>EpgE>oM!)H0n0={D79(}GQd z;hh%LZDgYjSZw~(Q$_0r}Q%T^tx|; zQshtM3!ADK>(GOyZ10&O$9C~uu#vX5rEj}`BD|}L`4uwO%`#|KX`yinG%@?yj~pC; z-o=!A310gt=ZBs$-iZ$*<5^eA`^dkM`~vc2th463gvR610KyyC&2fZH(C^62r#k$E zCy0OYlY~8;^yMXu+UK$PXq#bYM1Jz1Mb}9N?+fc^I-qK3!AULO~h0RXr zfV?)GLs_q9X`{qH6Z$}JO&>d^>wb+rgpGjCkI>eZscV~^LE+bv&3T5*yDc)`8@g3t z>!hED?w{0{8D84xtzFt^`iL3GJATX|=_`f)-!b~bFkyeDH#K1sg?H#|O13Wsljy9i zptgxSS#=vl{VXp!kp!GF@5sZiRcd||{S*It)v+l8<71+W4ayU35`CKsj^cL-9wVq< z5R^L8oA$xC-y6@xPm^{>(Vm}nOQDg_s?58v*+m;c&qDko=+!a2L2U9Cqn^l|8@n}x z-%ZA|-_x$lJH-YsHvI7WpiLbzVm_b7bEi*i-DA<8llLWgrQU^&Zk{En$6J#aN2I=# zFEHA;g>urD$QwSvA%Qm^cng3xhw|op&JCS2eaw5wZ$;Lmk87!G;{7gkUPFJJVQPAa zw(5gx;Y)dw@U}O2)EIb(-t_6Zw6PTV5Lm=!`A~5iGI1~U#rEjWv5{7veLQ_tk{`0< zTkX^Nk*ynzzHR~z(JyIF;1-xiQ0{uli63u$K7{A^NQagbDK*xw)OPGkHu0YL z94`74jA)4Nj&$i7fq8 zgZAW`&pnKzqYc=XK_`C@Irn4db-t`@_QMyYrZhZ{ENlMaMosl<)WFxq{0|vs+?Xb1 zvpn9F&|dnFHg(X4_$QLqNISi?4Rt(Ytk(7O8`109+J@eGzgIQgM17HIWH+UlG8w*D zInfsf<&ymPf#7qVG46`)$aqo=tQp9R%g1VHp{@P59LMA9JG-_TY1QoVvXWY z@JiY#pnf6ka9gI#EgQ!1M-<8q3Jc0@;a z7&5*YJM|j8_4C|I`{HM);L>LF;h>Zgxt2O%qx^cxe?<9Y)?yf*QX)K;e374L$%8hB zJ}~+dzw%E;90sHmdt>wx+TRGB{YF}7Ka=!!qijC)tYxnzub4hO;4Qq`3SaW3oNgOU zU*N-JY&$w8yb>K_O(I3cd%?$q^=H8PHF&J=^t_7aKjHb7PS0=V`BMgb@1xHj8#I$P z6|z|jofcEaZ2wxy%R0a;ek;(+r#t!nWt1)R5_~ax;qfz{x6H*ZrJe&^gZy4b7<&=e z5H6Z~s$)NssKJcwpUytjF^6$#3cB~+^`|->CVuUWr#il!d8%VOV^|950hDuJpj2OE zqpED^LC=Pzjl=$(aSS!z1L2oTLF?6leQ9==-n2gb~tVO0&Qe{uf~C|6joL z2Lq;R;3*?~kDua!w{GYp^!d4^54U$YxJ+$g9?o*J_{Hcz+1st%k%v6+6}&|TvMo5j zPWxuPqsY2v$=j_*IF8{*mdw2Y+G=<86c&ZozsD@hBlHIK}P zR1eMbhi04lgWeWvoA8{u9{5gFJDUC8+NLdz9SvKIx^C9xYt+J4w_4cvXJk6aulWBX zb5zrZq(%P4PW=YC(t>s9#qwt6R1LBQBl8H+0htrcG1~iu0gF;=dID1qd|Z61+NO}U z19*^AkL(8oqrhZ0W$#7$VXo1j>l$s(HO2wfaZ;fBp-N~zZ=zB+%sACCjrddObK7X{ zQegsW^1IXzz5GJYJvu$@Iz9L3G!U8y@A3^8FFB1~JD``q2Mvv~c@va6r!pbqZg{z) z3LaF(<^#e{EC13g{ObY#WbP>bTZBGM9)>Nlm1*#Bo&i(-X)q;tcv$D*3f*^hd&9cz zA1VGI@IqgPlvrNckMb6RPi+%4ZbZLZZ+CTV>PVj)Pj4<&>e=f~b%degI-#erj^zW- zY0QsAe{A3?cv|O5V>`+1X?R!0<}caEuZeetMRq@f*CM;^2HtEg{D-m|!+UugZ;=lZ z@BTX8u{@!($0zFyv9TW>!N)sUuN8Z@yqP(0V;;0g)W;Se<5F&v<+=EOLes8B+cF*t zP0e~DyUJUG>@LTKN1LpDA~%O_zY!YGoy@$2d8K7;ZqDz%PEv>0vzW3ChvCI*@fC?* zXR)LI+cI{)L2FaS9{Vq89h0MsAIZ^=Wh~Q@*QRzLga5IN1p`W5aLuWX0v0&!GKVx| zOz?c)kBn8ZCKp1^kTG3%E0H%te`9hL(_hgiug3Z%e0KR_Iwdx7xkaCf4P2`Yxteww zt_dBF$<~jM8GSjn5@VqOS zH;+=BLps&rV;qpNs5%(aCz&5=`Z__diB7QhAL;VOTrNd?BXbOzX2Gkgfmfk{SKrg{ z`W?3GcX~Ulo$2!K^~7Y>SNe8qH(*}KTF6~jpX!*+0`r&n6Q=(YsRS-)ulZ$c64*Yr zj75AE5?zq7NN6gwa6k*8`y2~=2asve=b!=KaniaSjgLjY2fp9yZN{Zz7*6~Vxdey0pog?{9I&hox4E+oAoCQ24Ju58mZZ+Wb z8}R<=G3FAZ=?8V7Wv-b|L@7SKG%T8A>a}j7dl(# zu1gHKMNVY>X7y=s|8w~^W!Ptt;dy6th1S8Qj(*rG)+HIAZDYwx6Y6FL{1`aiH(<)=DUvnX)0MV|jR zU>w;A#-e`_#zF(esU~wjkAj&9nt)o(9|LvRh!0U6I*p z)+BDZ;#5ZkaP`K&`QLyGheWplCS2di`3dAJ;g6mU*Vs-rD$1Phqsvcq>=?#f()cBv z{W`nFuX7OB+wbJpebT94yJrVu_l_TxEu)|N>Br+4*C;lphq_ICz6CNbH2akC$>yzT z?8bb-%RZaK_gJGX=ZO}+Egi*eQsso||3Q$tvyJ*=@w>&JcQclVZ(7_b?eCOM!!|M} zeOS|Y!=zJQG5WMQ^T%_}QM!(;ZIQXB&x`J&|1$5)_x*UU^Jmb^f&N$XdpF_XsjOSE zU@;iG_6YGUgdfqxCv>??-)CmbZnTeB-ePZCsMBQ{xK;iSg3zUoAoa(3LkXH#;(|Z8 zhuz-LlhEvOXcmz%=aWu&$L#vrma*VH)`It_PIwm@c%R4b1q8pPug9mbX2-_RNbKMu z;&p_6>fI(S%^%-)6L~RpN90Au(R3-}uWjhoHON{w^USnhldNS;FvfW%MEYE))bmmE z1jg*3dH@-iqWhZu655P;t`&cM9kVj#Q~Xcp zQ2Jlcp{Nrb+A8ZB%sGy%iwcc|4%X*Fhrj+0>F}CChYkF0B3w9`eWpP$7aRI>;t_&L z2RQ?amMc z^>r>8|7AXu@5PsA{QAhD4qYy1l7bsrvxCnP!i1Vuu~VkZ%lvSHk~U&%Wu=}y$TMpP z$g}v+vfeonJM9k2Gksp2vX}hd^IYH;-*l^89g@8mYdJYLb(}eWjaJ_*I%fJ*vX-*Y z&)QjKY`!4#28W-uFMLPY-%*SwXh^)t5O<7sDRO;fWQj770DJ$)JV`q%adEC9w(vz>0_mYHvB=D^k-uq*7W`V=x zf9xFQa^OqMB{fyoB7pr11*{Q3cm;Trnc2qsytbDaiFKCys*2erQ^my|N4ai(2 zXTW^54bN9S7N#GVv;9RbcI(q>f@dwO}p!gn`vd`qQByAR$*yEBp+AFNu`V5?dbU6`{t>hL~} z&3?S6rl$9R99eegVb0^E589lljc_(D@I3%53!;=o9`2MqkI&-IqnzUf7KeXRb6y$0 zoa=Q27c>f7*#AQsztiV5wb+1B))eHdl}GUB7dB+IN13Ap=c6B&H7U-jb&Vp=A{+bo5$-}>HX{`8F~+_yiL-GNS>xmR8xs<` zC3XszB*o!Fj-VCOtrT}1aj)nnevPVV=+Bb$Hg7%lZ>p^Oq`2{|RFr%_YvK4y=*^)t zzlZrQ<^2nzKDD4#BTw^AZS;~abMTCyr@5DHdebp<1-WQC0o~JhrUf5}I_cwE&ZekJ z<|4rd^)zFClNMBGJEM;_$Qe@UOZx1p_(qwRvjpIaPPU3){4MLLqtF}iL#%o;09xdc zfA8?xCNKHkPWgSwUj|+N2tDpcug)kv)$txkPpWAiM1gs~5oafW)Dn{y4uIcmKR z&QdQ%35uPpSn*p+c!j_ovf4plOi=p>tpxN}4p^%+LM9=PAa#BGO8v3_$3rRbPa{+k z<`Ak03khMuYQlQLW@ge8Os zVI5%uVH@H9I{p5C`2TP7a3?ajhY%$kA+!@57)1{un~+Z!Meq~K2<3!Xget;4gvEsA zgj&KYge`;}gnGh00tZLbF#_)esWd_+A&=lAj3w0RXBZxfich!265n8nueZc&E%AsY z9=606TjC*0yxJ14vczXw;+2+oxh0-qi5EL!wpw5=v^@7&;`x@i*AmaP#66a{+Y(on zc)Nan-uv+0R(rI?MAdmiPuse7z-JYl%lJ@vtSn*b)y} z;? zEb&%LJZg#WwZ!W!@tu}5xAEeiK&Lb3`>i7db$O^)Bg!2eJ34glyRL3s~A;Q&!UWA)?UPwqH zcnDweY&~HC;aWlu!dC%&8^St5C1E4aq>aghY{Chiy+T+-m`S*RkVzOpo+A8}_-ew> z2?c~CLSxCPj<*Sm2pdU<2rCFT5XKUc3H!-gNtj2NOc+c!0B&Q*a}z!xT}!x|a5*8B zAo=eSRug_jdLiL5f|u|E&o&SqA>2q1I;9fMAkjDndY(4XY~24 zH#K}jWG?Yructfug(w$lE{ujK8$vc~n_}fcMtSqMPEQ-}gPcbhP0l1z{%qnSJb#_r z(2aM$(tP8iz5l{`-MeMc1q7L2WYaGmN>j)`^kCtMj>DnQ+pRU}!3Nui1-Uipey>Jc zBd!sz>B{qwJnzbL;u>*{cug0cpUv|wJSVOZ*NE4o@q84|(|Ar?BhLDnw zm|gdcN;_Fxd9nYSX_olTg2eOH@i<;f{Lc{`+N=)nF6yC28&7`sPV=5lf9m(-{b@<# z{_nQwd1iWVr}X=s(siBEn>(e~cS^7Bln!@FFYJ`A?v$R>DP7qqJ*`u^v{QO)r?jtA zI`(t_@>6DIkO7H2E-q|U=ty6kKr}Vl` z=}4#al1}MRr}Vr|>Dfm5Z=8u?y3L%%y zzQ!bm{bAy)qcI_8-ac3uJ9%y=K9{(IxWN5KBQCW+A@1h6;PxT$G`%I}V@6!CS=r7iFQM}v zufcOf+orxZGXA4-k}YM`&=Fc_MMO!kmSxLVRTyo^PxP;v9$LT9)Ja0dmzv6qYN|j& zgQi9`l27omTem9UW^iydzmBEkH6}m0wc`CocSD{@UnJl2HrS$jRsgAjO z$P`*fSg`$H|2t0S{~rvk*)}zfbG&NC4Krrmq%@9tsPQ9*`Nk+$O42yDmTXJ0r)gdJ zoRmlFX6x?A(0Zy~_Drp}mSxM;254&7xT%*6yP^7~S=DOTyc??LsbN2v8~blq)f^(X z-!LZ@nR&+@vBERv&Aa`knRm{+f%L4qZoOmP?flM|H$yLa``lO+GGe)R-9n4E-Z^ac zj5}tlVYkkmcY~gsdFM@YW(~b*mR@MaEjL|n0y1yLOf~F|*|e(1Zl5bn$2$1$$i%G_ zmL&|4$9b?`f|t1Y7qdk%9TJ(8aw4A+{0ODUCsADdJGGV+r{@u$Ggu7ZOBA%Lwej>E%TKB#8c5+Y>#N@`S4S z{QB}@p_~o`H~BN+^Ab1P6MdB+`YP=So>ETQTT75OS)W&h>@`Z5!+Blv&us5u9!NQ< zziwtM?2!k?pIMGQ6uq3-0fbCd&tfgACQ;BVmuD%b>+1ncg>+*QYfv8(hz5*uk`+p} zsn9`9y^?4yj&BY5(x;RwNVTZ~PIpI$3yZU^OdP;M-7^Us7&?3`In z?3+5Gsr_ale$8?(Sjz23NB2|C%#-3~9J1>DOTVJat&|hFHUCVMhVg(hRpxUkXNAu! z_hhV`S&YjM%?l<;v6$E=cbv%QTJQ!ykpML8Mogu?2fs^FjV)Klm#@+ zgk12;*J-eNHFVnuK0HhL}9&CKg-k|Pi8EeqTa186{eu1@5T|B(<4cTLDCl^u+=%qieh>` zXBk!fcBcv%=^7k}J#4sC8|gbTSf75$rTYPT`(EU{Polj|K6tq=Srr=PdBIg3;Y}*tQf^Mmm>TKj5gi>A%28FXq%h z)vGR@9spO0NctXlQ}>$$Juf^HdYzRTr(Yio&g#uE z_@(?_@=rn!z}D+GlD_9~s)B5Kx?l)n-J8jA_+J?fyq~4S;TbiK@u($Ly<(JKQ3U;u zr|Py}+MmMdlxfj4ojy5W?cmUp!=1qA*x9t_Nil3@ntBAmN}J_W_ZaQHi7{QigWCv< z^!L!O&_cg_oWUa;L|SO%Ukv~DCBr}HA^f@ydK}r3tQH&joBYWC7YTlUN&5XSQ{(Ml z1O6NS;ZpOA@^|2aSHF{hZ$Hl~ev+i`i3onbWsI%hu;pf>yqvP$Y>~HDpkL+WWQ7>$ z^h^PsAp9}YuaWj{Ptk3!w6}nPV{G4qzAQt&<{9!O`1HzQ|IwGE?%}zVKaYi=X=Q0~ zeHwEH^xv4!k6RI(>UF6~_AI6R$=>X3e&CAp_i8WWzc)!08}Ma9zp}Fv@^k)R@M}m> zG6w7YgU=R}T=qnzyD5O1T;%`#bgmFG(nIp-{|vX9W2DbO zzYBluj@y^Z&r>SoOpUkK19@rvGzmy!@Q0yC!Bt86{{#$qo9rK){K z`CG@qpVpW?6#kTyF#g#Yi;Vnd(W5Qd&ba+t1O9F43HYna6KQHTKkEc+6UI6*kV&#_t zPZW7F<>f=8NaQs(!vy=(W`ke%J@o{(b;`GO&-u)ED}E zQ;K|_m!>GDw>KJjtY$p$8tDS;hXaobu=V^A0zdq87-_zCtd7AiGrbjf_v4qC@NUH3 z?D%6!9G>$pgulHZO(k)d#mn``G z2LA5l;s>+7%+1rVA0~ZXMs+=ZPT0o)<3*J5A}*i!SzC@L$L;l>CL(X#Yb*GOew4eR z@2zR^@jf4U$?uvJw>R^VhsBohYys^pKbahd=Q;RM8A<5-H5WkNXkt8i?PC0g5efK@ zgGkjG$#H+yqw|*oJn`{cS-y~)T=Q{^x~+P?$)KIe}vVzD%5@|4$| ztm+JSuaOBtZi0XJU{9jh6OYkf$vEI;JU8pV1mi=c*h{1SDB!R9))^lkoo9hhl{+pU zt;p~GKd0#R1)nJR6{F9ledu(SE-&UK@pKYa_=NS{@hrl-&f3xyM_-)|- z6o8i3rO4%>I)A6o-xdqMcA!`bT`_-C;9ZS8Z?nkr2Jl@!GbygWpYTqB_fIiG=ivv6_PxCVi6Nf8AQQehy3EYsG$@ygcE5eaQrRA@^F{tfmyV)3^IA>Rv;Z@|*=?VC>8mf*L)ANJ>rn7tSH+r~j} z{6iC7ITX61&eZ>Obx$((?GFk0_?-H4@Xt;A_A&e^txb$CqeftVzE6pdFQ1ObKRb~c zm+xQFer*PF0l}pIa`e-|WkdxAf6m4}nf|QgkG8-o^qU6$dpU4|dg%CdK+n)M$#MC8 z4Eu6&WvcEUO8vF;-;Vxb)?@k&J^h+1PS0LF7|+MJ;{3at_FJK^oST*YT-fvI2MK=t z9XRjdfQ{(C4KGUz=UdsdK}n4dgahCj>v)s%-7FuLIL)VM#j2m0}6~@20%Jk3Dr@PfhvU1bl5bI@K|weTD{AzboOdoDKhrE&M;ic)!@56qnzx z0A%OvWVPSuZx;EpPPx<$qrE$^7eUMXcQNp_zVD9v-$Sr>TOMOPGV0HR{-Z`A?*@J+ zNH5>#j^{szKM^|air2p~4|*Z*wMKub$m8mv$#Hvhn;-kKFJ^B=K4#O;PRsbs9UjUz zK3N?x>Sv)(^U#0kU&=2Qd0{?SY^1MqG5-8MRn0NrtpUE1*zZas|Ak!eo6mgMNFVdb zc!~Wt(*6ASUw^^hH|l>b{m)B`@7Wm^P#8N-pDlM-Lf7g?*kXOnJ*joy@mfC+K{T&8SSO=y!GRl zJraMq0{gChmGHl3(*E-AljHWOC;E5n?9}-D=pWg@Ygu2}j(_3DzwjFEA1H?2taq94 z|JDip-cR`3QRHPdFA$slNzXCN&%RE~PcQLfFLo#F$20K1@RVDEVfTXt|FYR0oX8IGmx`Dq+_!EbrcNO$D@s0SP z-@atE&7fC3={(+`U1y{}N05WRO!)tG*sF@V#CpRcz>~+}ayiE?{kH)By7ydh{r#Hp zV&M<2xPQDCdKE%1e)RGm=U~6_UjbX!-#h5P&6TPO4SM7jBA=}PEjIF3A|J)yrKm0B z3%!2>q7}yajHIvVq#xJ&f#I)1Hv`PvJ+%!jERg zyUiAP>WTj6FG$R9{%tVwd{J_Iet0(i&644%YLC%=74R*72jKVHp-T33{%i{{q&p{d6PznTWjIvy1f% zgMS~>-Z4vi@4)Z3Od700cY$-l|SbH@3}s?unG4EB02zb-^Ra3V~6 zZ$UqntW3=RFK{4FKXIzD2K}lm{($s11^O>xJUe3KKf~`5_EY8<^tlj1Z()7UZ2x)m zv*6uS{k`MQQ5OOEuhY4>f{$nxo5xUXk>v{}pm9ZWn{lAMm zv|W@~KfP@N`fx!)zK`QitjC`K4849e1I1?Mr@}Ak&yT&&V86knhX;Di>gkNHFJ2G4 z=6-;bFJrx^YD-GofBPEx<=4mjDao&;-C36Xgssr8n)R(C2EX&+=a%J(_2&)3-!+N( z>($uPLW_S~i~di0lv_p(c+>opXTPD!NG~Ft&3a>nkv<>&PGi1n+LuGL-)fmJ)}#Rs z^Fy;e5A^k%guVuRZRlIDZ$jTTx$qa3IpghB)3E2;gg^Bd_SegJYU=L>;IQJKmXq*r zgg)nC59&50$K^xbIV*i6;SW4Eg!$Tp1V0CXzj`F$Pl#i*n*C`L{)g=XuQNU$*zH8$ z@we|W=_1$QSv$pY(pxrucH_B+wjeM2j9~2ttF08LqLu9(u})k!1$0rsWJvVSy@f_qpGgN=Im50T&cjWK^*;K`<=49obj8hTXUj{MWF(Ep}n z`1RYA_;uuhvkg-nXGS^^r~mP^JL>s8}j$Sc;v;h zURR$B|FFlVzxpinsp{rdc>tvI3;B1T|A4Kh?a-$de3u((TQ>GFE3v-03H{w-882@g ziv6N~slor1=-+bY)293n^wYn^etn4jbmQL&KGOe^bMWUjCdQ*h^uO=*#QbkL{14VS zDH$s-haK0!pB)Ci)8Jn={4?{fVEpi54~5=B-xs0R4(J6KdVfFUp&tgnWIlNv@E0yh z%r6cB|Nfn^^-!tr2A)yumzeaZL0$`>uZiCv^y`(^m|qz9Uq$*J{FPBge=iZq{2;MD z{TP6FSZ_1yUu0*z;Durn-e%wnvVS0Hq0e#T*Ly``f9}`7>s^qnP8#@kr~M7+tJ&T- z(eJlX^@A!>e+Bwl@qB{bS5PnVTEc$sMPHBH7PEJP??ce@efHZ;`}b?oCs}{2G~h`= z{Wa?*hMfgFCUF4dT@n z`y2+|JPZB)M0$UIf}a1#0DfaVPxv(!L`-`k@_BL`^!+B`FUkAS?YxL>^4kj{?_*!g z^z|0|DfO>_Up~%ngbe)BMaPxQE`kOHp5LKwjyK$DpF#hL z1+>Sak;iDSq?GY^becZi2z}bn136zNe^S4rE8{WyRj^%8zXko;3*GwBK3zU(clB_m z@)_-q&P}Y(Nq!RY)Q-Pu_Lq!3Y-hZp8NI!}CFn~x)`O7)>HjSFUyuJc&&YqRFZR~r z@7#|)H}_Me{JCOp*iSa$TSh&rf3X7kx!E7AH`?nV^PTq-@|MATU|xD^e0+0ZA0qI} zJX-805L& zvj%-wjDCp32)|$UqE7?Z&qt0V-P4CWA>U?xGVt8P`m{;k>sjBa<$Rjoke{vaXAk~{ zX`hadM_w)aXID{vmSw+UEB$4#-ec0^6Zo+oeh59K|4jH1`j-6^iV?WIOO~LoJ!NAA-Qsha}w<3Rq#&{=bruyohKPJw1rJ#S& zZ=CV=>>|(1H(;X<&kE$BeH3=okgo^OpXK)^&KJA@V#Nd9@%6J^1MuHJg?a|P4gt>= z{Ev{)-t&yddl-)^jr5t=gI6Xc_OsT(uO$|KJpnzdYhCIUgFaV6zjb@uasRjseOq^P zs;b8h2!2~~=#s4+SkLi!U{9fXG%pYlgG4f-|OEvU3GB#P&QeWUHD1~0w zw{jyryBFh=<^0Ec;A5^2ihP`)|9c)~9%uA_JM;>9TycN#c?8Q`50vt+iQwfY^#2^} zNt%Q85Tm~nV}WlK^Jk;IJG}7skBRd*`ShP%nyl_I`1w2d>-{Lv-i65T7F*)H<8w4z zX0fLXl`4~qhRcomn;=xkGTyDn{+>L}`i_x56M5hJo5cK~W*qHvUdnC2`|tQ$)sfix zpWqik9(UfCI8S#c{H=h$rha&U=bkT^@6x`|dnWBvTIRQ#JosCGO6ap6{)O=G&H9a` zH?Uq8#_mgh=OcgZtfxu7wD$t`^H~3wzar^(Mxwv$pN7bn@;`u&Ip3B1pCB)$|0e0} z_+Q(GB;@1d`Lxe^N43#@SttG7jNP*CS2Y5^x!$O^mxsOayVY0#l>ToSf_(}k?Avqr zpR4ho&Glms<*fSgbL44#Ch~0X<7@og+3e4m^1TRp&av<}2YDKGDn-|S>A#qCG4eIm zs2>9V&^rnJVd+vWX1`nbDe(ER=jQsp)V~Wld)R*q8u>f>B=%Dz{|VYR*T*H@#`w^V z{mnGW{}KFaFLUef5=i@xAfI#aKk|)y7jVsf)uAE=eA9ual>O)RM*dXfwPLH2R1Cfa zDaiBtiS?Z~fhQmPX!bV@{)8FdO?`BXhadRwutmpj29Z+6qj^U9Ug*26Pr_enBwaNk z;jeLrqFVS)V*WJ+c`FSk#*3}!%fj}=d9@LY_ohAzzITA{o*x*mjQ$Rc0{#)P@mc8g zkqi1-)-{+d}E1GzdQK8(j_TwFZZXR4=hs5HuC#Ze@=@#em>+J z>|33dqM}CrYw*|o|FCr~a5X3I&}w5X06ql zrzZT;%kyUw|7pL;WzF^u@$ftAANnsD|1H{|j`?4P?Ds-{Jk=G?S48^Ujs(zODdk&9 zc_J$O52CN+RRPUil& zN<+p&=r5&xuGBI9^{|t&Wcdh!e_2=5ru`R@Ump1d*qYZP&Z2Mh*nbAf`MJ@b#xtHB zB>P9|Y5&+m_{sjU&|`%EN`Af37ukQC^?%0>j9(bvlFX!UD^n25@l6={MYSvWz5PB$ z{zFve_3-f0w71iY_hkB2=wFHcx`mwo10DL~sKfXD(z!w8$9z`dKfk#uxw1d6tXJQy z2ESNO&z95I?t;EecKp8oy#qPSru`sEUlsd7!Osis znpA>b|~YqC^>xwIJ_Lp)%pJDG_B**{YLHs}M zO#2EVzb43&w+Hh-x&8Q1Uq#dxVr7z-IK~@Khnd%RCxKsUG2icne#pn(RH@HKj2|V| zUzGZ+3%~LYv%gEfCGvZR{+1w0@XsWFEzxiE=L-GPXs@}f--XKgUqwGeTI_GG0pCb5 z--a&&pN95xLsv|nz_+ag{o70P{!f2Yc@XD81U(|WBk3&ldqeOU((TY$#rQUO!rvNZ zebbZnqqk^3=kV{k&iGM|zm@iSfc1KBnck-8g9P+}uk8PbzKX}5lwz3ZeXm2_nGaBn zCVJjb-|^JI^Bze0UvZZX?KTt_f682<}-5%@ZQ zFVbSXv;cqXb?jec{@8obKiZl1Z*6Ix39JuLEEB!C^cODrlBRPEBEL%TPg#!?{t(LR zYEj<(jF*BiY9A}(e@FXFwV3~pqrE3#kE!&(JJH9ntPd&qS;6>l+K-S2fnP=YIn8() zGEM$z_={GX&vz^#pY-(N`BrI3BMqScQufb8`ieTrU)ek!SPVb6BOgkBd*J6t_FENy zR~pK@7iRgKO?y!6fdxO0L0HHbbNkIAKR5P26!}>SKin+VNAJ``zq^>vv-I$xelMHt zar)s`T`Bop|IW}e|3v>P^6H1Y#M3?;DZj}7KjbL;tV)VRZ;0~Eb*H}y#=e&CBHV@g zot%XJliPDH<Lq- zPuU(-_z$6vGO(W%`bd;_2=q^eeuZE4$o~c7b)!tLUp?|)qB7+t_P4D_pC|7Ji1g<= zKri<5Bjxz#DMN|k(kwSlKLI2j`{X-GH1^Ej4)6DN@@ZZIP z|7#jE-se1nlAnhB^%nc(4QW5h`8rYF>*N<^AzvFPxU$|O!n@KxMEkN{DYv(8)W6nB zZR$@1J?p5iLi&e9$|wA1;QwSHkFtCmq5UcAvm(45@yAlXN_~2OSDfR#A>*g%NCDaQ zQXvsU{$Z3S@u2#42UCA*;!hCm(V~AU<;nQix|m-gJPCeV z>i+`t%`Vo{sa8{d?zFE&>^~I$Ok2h~7W46E2xf9F_Hr_P{C1Gk;-*>Nb`b8%d`qdn zn{`OfcnC2w;rHbwzNXn8X9@h2ZrXhxPu`$D9WClJO|+M1=JPc7;Aa|(be>9mP@Wgq zhmoxbzYpUZH_8KfrutaI{6yLB5anM^{P2xt{c;NW@320v(0iNo0qh?H$oP(;|I;k= zzZ&|A_L=wJ#nN3E=T{EM`7J~rd%2nIA^W@2p4%6%hlu<`(N_WgnbSMcKV~z(q?ji9 zPg9?Y{l2LG1<*5ezj^=cDe*nArwW(jmx6!M`|PAb*}nq+4frX_FVcG=4~jjC@SlTk zI_5vZ3IC54{k5Ra3jJldKTx|o@@jE@vw-p|`#WMk?kw#m1bYFcer@1aH1iLGjK3M} zExM$6Jd=X_>qnUTXSzZuV4Ru%&&e;F@&?Q4?cIqkQJ!lMrtD7&|BVvhE6;igPf`AMdgw=fM0*l`U+~=7VPm>yR`>(e z1YoonDTlY$!9R=r^-AzNlJSH>Pj~#@7wn{Dx&22{u|+q{@|)R){4D0jovDfh*1vXv zU(k04`H8XU&wT}d(T782eEqm769XA%45F*3eCZ0Wx(_Dfb% z|1lQr357Uye&Mg2>Rk7azKA$u}j68c;4qZI8uVhZagGCfc9==aCw z`Q2k$fe`RXo&!K;p z^J|1Z(PBP%p82uc9P@dLNyu~9$6|dh+5^=gsVvsl8pCg+gRPV)<2ws|ihdIOo{zru zwHVI~qP)udS(M+-pYh*)vwThkPb}w;l4bf|Q;&u*Hd3za&(P66z3fc)1`GU$Xb%Cb z??uSrO9P>Y@ebXbsr)Cz{Mur_%!c_`1ojt>a<~=k_eMGFX=Qo&_5=B2zZM{eUq@f~ zv0kj;+v!67@_a?)KNJ3ia6T$jjvv7I*IVoGU0##%J7T{~GdIz*kMf4He-kYGPf)+< zNsdyu?B9bt`Jrz;W&c|(__3dkrvZxmd?DoY-{$?*-|2rw(jO`7c`X>hjhe+qh^ely#DuRvcXvA##JDgW#6HwpeK^T{Um^dGW4 zvA}0UzQZ3Y<7pRZHTA9NBN47cKP0igHAu$S#8dRg=Kez5?-bAetxgWlg+GIs?<(^C zFa87gA;UxuLoz9p{Um`;;AbgNa)Ta)zm=(f?LFilxf1+4L4UKI^#NZw{a5&T;~4TQ z`(2U$)Zrz+&*v*L-V04I@83(vb8=a`@B8^{;7>CA36}A{L0@>$|4f$gO+#KBE#|jX z8J}A2Zx69U-&w3jUZMSlv;L^G->uZ2?i=TAWPER-FL$o_yhsiBwTu2h;m-*C!D;L- z%KFp=eka0jrT%;x&>l+Je_s#lf&U$TMSgD}pLxiqqnzJO5)L}Y{)6o2H&msO7W|#3 zqdjpR&>)A$(_eYEDn36X_`Lyz;dR$Mo+ha@GSfUCXwLZCFH>ip9p$dlg@`91&@&F54F&j zBENmmr}9#p=2wCrf6yL+v4?po<4dBw-eCP8RrbHZ@5=h1Qr{;?9~_{T#J+$?ACEpV zvOmdZlivnH-w_|OO@1Hrm|wQi{>n(OR!X4%r`V=&Oi7sko7Z>O&>sh3-i2ZgluQ zURVr&V;Il5$>n*8KFsGlSCkz867q0kmwCNX#r!?#ll}Mo#mSUU*{>4x_JA=5r5e~iWc(?aN( z%=)S*zbNlB9r7Zt&x!m>P~X1TZwY=0|1#S50T1(f<5b4$lP%^afykqp_5Rax{5%q- zS)0#KZlpaKm(mZ);pJ&RW8=*09bIU@Lob-?<2T}2?pKb2-f)>-K~Ez7F#Li(LH}U% zpW9`-@AikMsx$w@ekfAT?+)^#oc|W-dm-=fQCfhvf8i=EgU0?hYA%tfDNd70-+SLu&kIe)6O7c2Vv7|w?YeJ<#EjlMkK z?(m&o&7n7tPGY+p&QwB*w6K>MPI)6~pP_QNC-PCm{8hnkU4!ybf3V4fzXtio{$REb z?|}YLTi9FHgI|Gn%>79S@=%2R5!GZ$ujgqw-w^X!U5?Vfxk%;7KfMI^_sH?jlAl^{ zJ|Fag@tf~DbN`gie9_fnK0nnReDeCI;729o!SX!TE94=Y{S1{X5B=zey;+YuAm`^U z_;K94-p5c}O6NY7csV?P_EOk}{y_GZ^=G_<{a}FXuR}#B>whBuEQ|4}kjKr8|5O&^ zza_K>$85j0jq+r$f2yP(=S6$B7=OG#|4(MTs>t&t`je5&kFw+_n_kGl}prAWD_{L*b7= z-#=Yso{z=T9=5YT8Ygw<76&o8; zKiJa_lJWKTV|+#btKj<#f3=)HMQn=YoB9oVss28G+=;&h^-VHUcqR0Yv<$l^dHgq* z@+4xf7b1rnh-jccRq#zkzl^0n*UI5{NFTuZFUd^!hSRW~GJcDd{oQH*mhPMha10~`rQzLfa$kcTYpN7BjZ4?zFv25QL%x^3TIT+&u@C%Yzr;(XCjfor(t!04 zxjb9Zr-LprzmfU95`D6(BT7d02iK?n`&9C~{X{SPEjZ5{D&re~KCoOLE>AjF?5l6c z;T7Od5d0DHA?o8T?XksQ#ri<_y%=8?F&>7^CVs_qKpuR|`_r4D)1|t3{q79yqlojS ziu|oa-zxSX0^bPgC+{5V#WMZ(@#jkB_1+=$524t*DfwmLPolEJ<^0p&XQGPvo6Mi1 zFm9}c{hdAax1IUBQXgONdosRL{JlV|zGSxdnF~LoI+*V_8_#%Q`&e7k_(aefjeLyM zslV&@1yUgudjlASr*t-^xU+_uVr%dm3%Iih{H&!msVd^&%dkd18 z_&X7L0_a2(d06X9eU-NVK3{!7dz#F8loI|Q{gdT>dt>x_aIU$3?9-9{i~A##_#@%x zAoxqSY058vidXKF6y*)A27MRI^Xu;LVM|bKjR_$)pCBH1izR>#v@mvy~iS-%6MGl zznuC}xtsad1O1-Fd8}YL|1&|9kM$xU-y;1sJKEzj8!1V~H-_<)=T-Ih`#*f4(`%9W zynqw@%6!E7nOuLRX)nHVdlC2^z>gyOONF0S$b&EA=~y|x^YkY=*0Uzd{+#;o6ZqM9^r~S+#p`I!K|-pQQmZWb9+l>UNF{T{q_?4cI?M}Q0ij(d}xme7VF2$ z!JlS~{sfPEejC^t~VXiuy}Me@4mpg?}#il>4THzbyTcAM+`NAHyuc z+XNWpg?>@^<%)h+Gd|IgUX*7(@{+~=fTHit)850{vz{x<%RkHq4eXbt%kfJi|DG22 z9ooU)7OZ!P`~|+1D)if6bN@9H{i*D4i1KXKBOmPF5zQ38wLSICez{Thm!Le#cuL@# zT?6@$@52%4#eM!y*>4>w$8XAbA#lHg^g{M;6Xj$5Q78NFHbI|Qtbcc)KC&#`w|4oBZj8`vmzl_YEU#S>neN@o=Qn9@ihY1cUmt!b<1^t8L$5rAKd2p3{pjF-i~VN(RFnQFVFdjL?Ld^* z*@OC)*EfZ~JN1!$z~Q@n{W=&KR%jm2>%eQt54OG~FXkU%a(=&}&)m?r3O}YpPrjIc z%ltk~dvme2`##?6S%>z*dK%5fL{I%L$UpXOia!GWDd*7zeL3(a-(o-PIP$B7|BC$9 zp}cyF^17jq-_buP_H8NXhso@R1j_kcfc`x8%M||SgGkMKyiN{RH;@=hvi}ah1b@Zv zNaQlUc9X;BQLb?8(?ezda&PpBg}vcc^p7&17wN}PF-05A`lK9!`!wA=AA4Jx`mi$Z zucT5xzO&8t=SENRudFuR@h6u0S1yg^HvVRMNE9*HT zeJb=R`^SR5ptk5wi~ZA=&{GIKKJxgqE&Xu_{V{Aa)t3wWZNd7k!vB~~w8!3Nd$7v1 zkI?Jp^_Q0T58zLb)7$AtZ!y2xNO^X3W;~7D2>Ra9KLt`A1%Kbp%y%v9-B!C3-(r5X z1HuYf4+)d;UuV9j*#C?2t))F>q_bZm%cBqdjn2W`-wa@VE#!>Z{?`kApI+76evVTg zlc^7?%|u@cFOJxEw@~~ry6B_X9=9{{DOuQ;rB$Q-7qVV}Jc;s65%T(v+GIB({K1TO zgSp>W;m=*w#ixF=IiHM}nbP;r)1Fx0rkYLu7XE}kG1qr00TI-{ryTwe{*7fm zHd*#pp*{JAo7WHS5i!W(eBf6yGEC&W4!DW}FNyg7Q2)Ue_fbu!{`2S`RdRmUsBh)| zWf8v)^?8H!nov2Mp{?YJ{!rv?4e~kkgIQh!sSoA6r%7HYkCFLcu$=xf@)&HPFKfY% ze9l*R$@I==eylFfeuW&r7T=}o#rBM1Kj|vv@s!6a0{<5DxtEpMzNj|iix9>aqJ0Va zHzE&d>&@kBPW>34o6lp`BtPZ877@P>1yk-15crSNo&qh-13ZPlgBsa>pO3ne|8~}s z6?w@lUl;l#ieZwcAj+R@F(01=ecp@BZPgB zUwt?PeX!j^pY|p{WxOKNe}?`b)nVSZ`96?B>NB&6S^pkI-y2zf5yckivzpMqGyYT3YZ$*xMxQ3i z;oI@ME->H!(uDd8X>T4swnP3L+0;?ePevakTFAq7#smBa;rH>mNHThT2-jw|v5x;LB`D34;mg%!1-Mb={DV>QQ_Oypg^ZdUw z{M8{JN_o2>A2*PXkyMz7zYBi&!jB*sUnKRV>>rBo^#qKy*iT_9^!+{`5x$CoEBC7l z`qPo0rtrvVPd=!}=fl^9DJ;oA`ZSnAd9qso#KX^L~T} z`lW@%{3Raw4`Tl2DW~uAKl=T1E86$o;`8vL{D;~i56s7?MiYNAZDar0{`-DHHR_brKALWtPdqsI$ zqOV52HR~%4@@(LIDQq^;*AD!-8_f4t-Un~Y2<|J9>GwchJuTYbE(`e)<=aL7qhh?1 zD&xzgJtQ(7Q0U1M?T_;;h=(b^VXRNT;QWtLU$4*)G1xPQ$l=Z5cTg!?^Z5t#vyt@% zC43hADYD>?4e2fC+nMyoihZ@9uTE>myZg-Z%j0^=kNzQX)ZsAsn4X#o8rQ_bgxygR^8FSCAep#FRk&GfgkrT=1mmCvT~m8CuA z6`1Yg=TIL37V>69ekJ5rl-I;B@>lM^6aGZ}iv5wGKhq9=GJY2NLHLs>-#hO6QqsrJ zA1mw8BK#oonT>sfVxMF~`IYrvf&UEktE~45e^22H+vNGBAiN_{M)enXib1j^;9)dqcq zK2XA+;U8o%-|IsE;>&)$(mvlZ-@CKLd|q-+ZOY%$tZxRR&sE*b_J8xCKc4mF06D)I z(4F*`>id2|ClA`&VDs-s+$JNhG2G`vyAkzehyGH|Qwx6%{&(mzh29tS(J$8AUm)XO zOMXe%TZPO1^WNxJIxR)N&qLp6(HBa2E>bbd{;gQQJ4gKlvmUI}*DU&zBo3A9WPD-J zdw}t`;$Mv4V6k3&75vHISLj1DrPyrRzfEy@lHp$>{Bx7>{et}7fj{B0{}Ta4c4mE& z&UZcIZP;PbKN=7_G8njlU&u>E#s^RDaX*2qpEK|mRW|PzUGXA6)(iC@5b-D6FU3NC2zsBuk9_!{gioo4ykmc`3po<$C((YC`+0@` zEP@u!{kIDLzfgZF=7S2q$J2fa3+%tkBUepJN!Z5^mGfUlf2Q28DDZtke+7Kz{3Lu5 zeh>INi2ZyP9npW-SLo#M8t~5r`$N5q zFOB>p*25Hjw-xe6|D@3Ws22JQ`!J&%|9A`Jf&N92?=U^`aMR)Y{TTBt`Y$2h?Hx#X z(L8>d#`s0~{UK4lQ^<#Mzq9anq`nmUV?qCa&~s-O_kGLqc9imul6O>cA zZ$R+l2#k)lSdU!7cqjF}dH%Ikw2v6{i@{m zn(`+xXjkOJgYrgMoOh^9f9*G$`=zKyLH`QTf5$U_Ru$)GKt98ZxQ|@s$5!}Z#J-1W zFvZ8zMhY2cKL3zoF@6>J{OBKCxnIXmPT#MCs893w;th;Wu8+J|DbBAB5fytSQQp0D zyzv(Ek52GGxj$HhpQ50a`qsA%{I%$R8c>cP4$&6M`EztxQn}bkxw3yY`ct{TPvG+e zU*Q4seEkY^Df(NK=RWf#9qWatK~sJFN%;~uucr8`Q$M>{|EC$5!fU`+R~z&mbc^(R z84o0h^$oebRwBPF@>9|e74l-yp9g^N-FfqR>(73S2O2t<=EH*CC;rI)bMttxJL7|( zZRYu7dr$gfSMz<_Q^+X9!hU6ym@jbuY_v@8Qsh^$M-cRmN8g28tS`Pmew6cbBLAI% z)DQDfp-)A8j;crr7tQvZzeE3I>;YABdE3*zypzo1sXt4hkFOTr=PK|mKwlido^qs2 zPd_35?04qM?a#r9_?&k=Er%!4o|W;5$iEBa&8ECmyNUnb^vwUUpG1sI{u#V@TFBE_ z>O;9NU8J8vd0pwBqviPJsEEPFzjA-O$geT&HHpU7WX@} zq3*U@aC*sFiVPr-Osxej9NEPOR%FRHtxiX7lBy-Z7dsf74sa&0KC+R9i0 zGQ_Pi#ZK!2Hk{}W#!6D{29lMISCi9Hqin3!9Wlm80Ul6NJ5IEmS%x9-dIeGIufClot07Y(Cgx*NXY{Y zMI=#jEh&S=oQBj^GD#|<(NSx~7i-B;k7(i?tGI}VRG5bIq@tIa_>QloMf2O2Y=}k(mjc@I%aJffXHk=qWJDB<)yYLKM8{fcK=Ht@E}`d- z-d`mqSs~fh`t(SFSf@3rRGuY_^*2DH_#oNn$%@@$EAUcb7*!?Gnv6OX^{#_6!BpmV zM>b@_Pa&)4M~dDLkID#E_#^iEAXlf;8`<70sh4m@zA4UVRs5#zrc*vKUU8l!JTBT< zYOM=J!3AigWPg1}qnD&k7gYvNj8c@5BAK1BtkGD~=x!V<*?`vCRJD%o?qpx=60P;c zFXAsW3TH3*TU+ZHmq=E#D(l>jBLhC7%^-K$D7CZo21)G$CAl;`wX+_nmeAjNqgEB{ zsI@d0nWuY)fY1|iqcESw~xQx-BA}r(fDb#()LD^GL@u6#7&4?fR&2Gda^aDt+i5k zOrUkRPGzN!5;*zcni2`pbjeBYDg|n-9A{Qh9Tznn?4dSN9lC|{XX!+=MDYg~rJe;# zOe~YEjXJ#w+*&K0HI1PoEy-ST4mZUUe-c#MUE(QZSAq@b9Z6+`kuay&SsC>r7z-p7 ze*=t0dgDk*>k(xnl2weC#qtS}>6D6SzO_T?!wXUZ4FdA*>uO*pvX+(e=^w!db)fJ3ZYDXPv z4;)S={lp8E6QyN^U1zU~x3!`a$f4HDl$oc!(I~`A&?mJiE{jgEPPkyI5n`i_I-^nW zkJ5_DOh;>UaJSW~V0J)AEvZC$l2Nj;PA90?sjaO|FEJoeNj~U(fzaB^pTFU%*|BuX zWNt0`VhVwH6W^%!QHeqs`DTpd4HQC6({_uI!Vo?g6Vx*q6>7_g#^x;Flu$%`5pR5NtGzsm_F(yqgtgy@PtpRGm?W|XW}<+Fw~k_gBp>SsAH5c z%}98NM|qFIeTv1qq;pX_9v9zfVJ7F*rZ@|umCi%5_YzG~CwQwy|C5%I;sQ$q=&eek zFRi%`oh~BW)J#=IP+I8)9jagl5mU#jBVS8elO2TAl*E>tH6c(f>Gj0W`>>&CMI6Oh zk)_I==#n}f#u-7vfKioKf`kG3rKV_tss_%W675`l%BUNqc9()hs#%h4DHR2rZ;GdL zsiiaW*3F7PTJ-`sZ^?%`0EN=F+@-SC6;%N~E~Z5Mp#U-N{xgNw6M$eN#j%Q#bVi-< z#)ujg&Zt8-c}ZII+@Pp7Qhg8$r}a=v-QbO6Jk~&Dh1(M>Nw#gI0MS{iB_);g6nRVd1d zt7o(dG)l3s(&VgtjC#ot{<~P|Meit-r&^^C=S_}b|HxXUf|pVxMHHeg1-uY+`Wq!{ zE2*^Je%WdvZ#cbw33U|fD8i``4G>rh-LQ-3TFJ4N)W5!wdTYm5G0HPJAJ=2Ox~q+} z4y_4?1N+&W6^odH>7xR%+{3mafQDkj; zPY>6NPbTN;uils#X>D}Hjz_11`BnhV3B4q0TsW^eR!KgIlGa83KxU#aDo^p#lWJ>X z)fbYa6D1Sz`>Q1SMv*^d5Nz>i&qJCC>#fu_db>n>wbfZ;q&V0`TL~sek`f!>ixAOq z=pXU$vNm=3sz7II24Ul)Q6+`L13eietT?SRdb;}SK0^+(E^FGjDOE5-G0JLUkWoB` z>bgUoUhvYLWii%vI-Y3I!Y|7I9)qr z57B@aL90;&OQnM)oujt4>774*-i&Uo6HVT>+HR-Q^~9qXV(-sSq!Mw|$LZyeo%SZL zlrBEvOe06hCtRyT!emEnY!uZ_!HDd_odm|YBoHb^7hD`i@57Vokn}p%QFu}^wY|Mg zo9<44Cu3ge7^O05-KbVrrYsDmsEJYH@m&q6 zr4pcD{+kpWRo`0I$-%)|=W4CjSufKWP0_#q(Lt@iHH-Qa^%<m2otyy>+PcMG=HOCBCZ$taaH>O?p*7OSbY_v0OPsy0SRWeoSycem{?0_YDd7LNVD2do-dy*CL)E+TL zc5G)ClP#%3t^MhVOBSaBYv3HoC?y*4n+VD5>kuvglopYsQH3gk+YU-%8-d;ZJ6bVFGle{9dWmUJq%`%>(hcP8^8{L4crJO ziCL^v77%@cqmC7fX+Y{;71@B=mCTReSrbMKwu~^ z78nmq0t~?KfGCXM>mpzYupEd7)&lE*jlgDLJFpk{3-}v245S06fD3>a^IZXM06D;Y z;34n`cnZ7(3V`=O5dbeF8^9iL1hjxS`&$mE0#pNP0Cj-+fS4ot0*wGapaq}@+5??| zAV7%dPe3Tp9~cM>27U!b0KWkdfN)cIP6dp>Two!v2v`cl0;_;^zczfxAE+@C0}cyawI_{{deC8*4FKLLPuJKv|$F zPz|UJGyoa_O@QV=OTZrxDz_uh1qcRy0(t^MZ-xQ=fg!*s;5T3*Fcp{yL;~}G#lSK^ z==4>9_`ReJz-C|@unR~94gzUFI&cg)37i5lfeV0emw09aw}JaW9`F=+0ptS(Kq2rS z@C_(IM`j0<0<=IGpgd3!s0>sEssYu3T0lLZ0niK(ze~{;=m2y9x&qw*anIb(fN*_z z4gp30V}Rd)@xUZN?5{-vbATv7xCJ~H0Wq?FInP)?oH1SntOYgzNx+}L7GMXk6G#RA z0{#XL0~x?6K)CZfF9SD$+rR_hUmzcN4}1nB6>64lpbPLL@H6lW5C-%E1_0um*>GSCFcz2qOaZ0=Gk|$O6tD;2>}mI0^g%WC9m~E5I#4+&lRIcnmxPUITA|Pk^hJm%ruld;&ZLo&m1`%!Z^-fN(`TzXHtsOn0xAdO)-ip>vrAr4k zGnNc5b+=6Ps94(_;}2c=`t9c>J8rn{JYHmcKDEM;%Ksgjx-Q4yU13pOzdWmH35M=V zH=gNvWwUC)_Wp^nUDg^}eSNLz*}w6*H}So8#3jyOsT%zBeLt^^Em`+fx1vg)#HuHB zNlM>F8g*Ka)kTY6PpVyQpXY}7eup}BNUyfVc7isdQ@2^RPa0ME+Gb?ZhcQhP-z^Ap z_&q$q;Blj+#=fA(j6p3oX@1hHq;YBCcfbDALi^RlZn&ye;orA@n5ES(sr+Sbey`J8 z2CX#2Oqf;b-^c}SE{`j1dbQ-8W>$5FofGQqO)=y--PY#q^BnZ**vv~6&z3LD{OtR7 zLnBS#e^sjN-T2Y@4;%aCk54w)aN&olpN~{oUHM)BEs5G|b&f4`0mtL)UWe>|NH}FxboJg)rwdyi z4~{b|CC>!%`cUnHZs?hw<*t5PWpL_OV^7deGp9JUDZ4ksm^uFUde%R*b9#C4TI_PS z-u*H+4@#(zQ^)`C^kr4le@85+<~+UERKEcmdM@6)Z*sMq!~G7GYIbe(jTd9LTYoKl zU_4hzlV`Qy#FG;bss*`ccE`0xb_ zrsY>jn=;S2;*o-;?yqkqbeQjXjqr7!?>g!C^{x{*Jj$WgFH_eaxfN&VeRlpa>nyuH zj#c9OX!kl6EVfhc931>5xxm{_I<{Gdbz`Ce$Zs$I{CIlJD3&;I(tafKmeo7;v3cj62`-d<~XU$CUW>su*Gx`nnUiaF0I?<7Io2oWfRmY4_Ke8(yXQKF_1! zQ}T_DI_r#w#!Ay&KHGNhRJwPa4s%k%iq;z@cPY^P-r)0J>d?PBXw$FRIBkt`n3!&G z%7XSI$$b|6H)F4-ZonYBu4i`D89Smzg<%CPzg1j+?QpYg?=McQl@ii^*tPwWYC7!H zPqVizd@yNRz@RvTo8Ro)r8>NNo&x>zzHS-lHy*l6*X^6pQ(8M}UDm{~6TJ=v5^o(m zUs!eIo{H%um$pvrll-cZ(}IbQ;tW+*mHg-5F@Jt+mg8G1EWKo_$=&O1>5w#2yLgqK zRnOcX7cEFz)UL+kSLz?Sw)}AVmu+zen_6ca`t0z;{Up7j$~#$t+Xa$=2n;UyK|!!=Dl)@%p6$KxuHX>%lI`nt{?B# z`|%2$ls7%j@LWH+)8`!x_f7}5YLxohIJ=Z@1AW#Ra=v~1IJ`o);Vs_2Pe@gryMM>! z$ipF0^1=326Evzzy@!qW{O31c!>xYxH&n7o=xMifjmFQac1&8(sN;WCDp%Ngwa4S$ z+Nrj4h74PA_Go%|>vREwId?uWR0ip9;@D8uG_Zz0^0fW)G=GNzq?ME zYlwbY?&4a*(9N@V?XaJ+U~k>>nuK{j?02oV+F%6N*&gR~1*kFb3bU9m87%Ju>4 zwH~Lg>*Bk-%c9J?b0*I-dTBQ<>apDCZGMmA zJE9y0xZ9*x?|ioPj-=q~L;CjYxVLV-Yvm@C+c1%IQ}Qb~)-IuPXgsmXhF|A}Tzkeo6f8vd!mCvsjW%osY)zweE>(_=aJD%Hjpw5W0BbE)^zH!~GQZpv+^PKv$ z=i)uLTqbn?BeO!AUh}>!|2p^eLA$ixUI}xw7dxeVE84I`JJIj4R5;T1?d$AXt#18N z>*Kfx?+Oi@=4foKRiBI%))^iLy^Ps%IRB9IB>&4{`6D&9wa?XP-KqaKL#4O%=2qLk z`vCYISM@v8xWnpNZw_g*!A&yB-)5{I0R*&PDU8Cf_SZ`f_`|cI@A_ z^r^hV%iV4n{r_6*zdO~Y?i$0>=>})@_D@UBq9{z;IYQUY$T@yytx|#6Q)$R9k7iJlp!UHa}`uSFc_#qE} z)5M)=P?U79`_u8H^=>jL-=ov9r?K#(&7C(M`4?6;f6{%~aHr(nH{Yabb{=#r=j=Ko z&ajO9{=M?rNqfrR^tA7aVOC|x6S4} zcT28a-r+)*iB~3WE1TdppzTA?TGMr3e#vn@-Be#yuUU5Q>7Bw4H+r_#T<~kENq{E5 zm8D##opP5v_MNNfdA^VRzV(gMdY{qkyiFW8`>)IErS_j)1zK))+IOiC+V0-_xI6P) z(f$0ALJZgW98%?iPeLr_^tmC~B+x{GL z$;Ns6%lhkvbZck5?Q!>-1*;5S<3AsLJE7ZQXkP_fBgDUg-|r z{3OlDmUh`T+1fi92{D}?hqp-mdz|xajrH$Uf`U80+1<^ls7$nbjCQs6L`|;!=fA(* zeI1^DqRZ|Y9%X;rv%Boaz7fq&wv7zik^5(y;o9JkPg(t%YHoI~v&HY*lnOqtpQSnd zd0!LzZuZoW5wmtS=ssuf`Ihlb4~7`>S7$pqUGlSA8r8^EI%2)T(D{1E9nV==pW`0% zY%_WD4wcX8o|W9B@}nB`9~1Is$Nd)v$ZK`@oJz^z9ksQFEL?Wr2d|}@dcTf|3qMuG zHsxx~J3nnZv(?Q?&DlF^VxzxOz;F9Qgr-;2vx|PvF!AHwR^{$pKjiV@ zWX;+4_QpCzR(_m&s&l;r^6ekFazge`qXRxqm=GC2eihe6IcWbZ-}7(7`SQtscE0~d z!02lio$-}yCL9V#JN4)X$u}*^A>aA^wmxN6>}=}(Kr<+_rAC_5 z`bWdZVJ{vJjy#z4yZt}&RP{9(vOENCgj5CcKQ=hGuw^r z`lTG@t5`UFeA!)}Zv_0@Roj5@7O&ry`D;~kO;-PcvI&!Z(e`XOL{s6;r1@#1$F0o1 zbY$hflV?`CyRq|ck@OGaZAQd3Ce4AdQ^&Oj=Y?8By3g`AhIceJDixMcf4+CH*S58j z+&;W`*0zqDYq|X@UGUz$Wmg+A|7xXC&0KFt`8u!4Ch~XyzTuh8<`nE}ci*Pzn2MtZ zHs5O!y4-hs&BEN#{17M%0V;P42pT*^_>MU8`P6Gkjsw zuRosL6m75B_ebIUv#Wdt4$+)E^54<484lIDez@(vziv|I4dPv{ldOp--)7x{4?k#x82YahP?Zl^2C_IG*IbxR>Qhl2CPpw$~sOek3`V&~r0 z#~080R+N$M*7Hh<{r0K0_a=AUelP0BT8AcO-mtlG*zZe@Lypxej~s_iX|;kJTzhpK zO&+NSs(tDHq@dKf35P!YTW*VM)4s{Ndd*KZX#U@N!`TH{TiyD&{TX_7_ww7W4G$hy ztuQ>;U2BYQn}`W<0|Kr*S|?SFt?=7Xw`||CbDJLb59zhA$2;%`gTHUxf-yh!z4dTk z&-!0%ljeQf6O*@k#mUZ<+RScVw&icPOP{*`;jasiyHzahJkF}cj;5vd*sh#C+okQ2 zJu%I%sHZP;tDLlJbVuTb_L);jQpLK=U;m4H*4O;kKSt%2v@O%+fwRqXP3p7WZw|kT zIUBg8iDuBnALbtD{cO{eS{L?gAM3R9#k~o&@17~x{mP-+r$DUXo z<o37w|l0LNTpe6mk zN>@5H{BKbu)!ADi<<8AYh#i_YK@;gU?n+#p8ef;D9Y{QNwVF@+TjLMi`KxcxQ1D%N z-|qTQbB3^ zYE1mx{J3+i2p_(eSn{|;LAl58Z(ORg*4Uu-#}w-WK^bE@w%WLS&#;|W>sK&VDd_cU z$BrJEtsQrkd)Qi2crdYC+^e5255K;2&D_QNE{(X8`|R_Q{4x$RRYV|)67Gxq1R zm6}0A|rf_0w<-b@|fT%~@sVf#D{FH37S9`X3E@0Pdi zU;VNwcEqu|+vh@qRP9qz;`%L`s17R)UoOAa*xSt6+M>}a!~O$*yf&~c6q0E_yXuE9 z?ZFkx-Nub&o-pR&*k#WjuH8_u+@M`IPMdP;rAAfGwz6|vp>smnEt-ArW}p6Wq-d#q z&PV6PGwzKW<@xANWdBKD`qplDJ>qJLYG%r?#IfD0W*n(mwz2DvEj5mvUyW%NIHyuf zDVv}-iw3$RImeb={yOWhM{e`$VIluMU9#B6uzF}{)6Mxy3WJZv8MIgPLm%`~^?B9c zXyK;R<)iw3=>I%(?4yzscBNFFczun*NB30|_OiwjW6O_Eu0*K6IU8(u9>{(6KK1lg z58LuL3agOk&yB`u4)X1S_rP%}?c*fhA0`~}-L&Om&fAn}C0&cw)%M;R)b#IBbE{15 zCB?%R&z_A34f|!HgARXc&dnQb|J7+z*9_}wKiF~SjkWog)7+c79|7N;sqZ_7fjbLb zvOMnOoeQ^?yEV=DZ^+#96ExSHT5J5C251{}di(ph&oe6}fVXf@oZ!Op{Q8rBm;J9UFE9jr2rSc} zY3Bt+wI9!k_Wk;Adt*)i>f6n@>U%>KAV1s6Wv(zP+}})5Cj%_x@Gs!H!dvUe%va;l$^J{OFRkv}xp> zP2S(G^{r`G^~33?>K_(mQJxWl`?!pHG~8=Q(g@9hdhw+$c(krqj{N>P@ycz;zfCns z$GmuC_;q?=?}ZE8ey=o9^C79=x9L6Z z{hg&z0$v z6T6~uj?ciTDJT6u=4mgb@BQff;m_MvE3zhOqJMK4V12(sa^m6_^ ztWr_KZFdHp${C+DaD3|z73vy#Y@Pq;$lTXXRYnDr8144(@m2c%c7pCX`u+d|gr)JKYdEYMFjx!XDZr!`iI=e3ctsHl{CfEDu-1*=4ndc)7 zX}vbYKEJs*zvzlyb2`>-l6FOdzM84Mq@3S>33nk*%B%Uwp5*1kw>h<{PY;dT>>PI3 z*Q#+ultXX_x8L2!H|g;BQsE`q7gcj^+)Py=tyRzT4(Gq77P*c5abx9a)t4vhE|&b# zz5Lpedt$Hrn)lv&>PKT2rx^F5!oc+3FVv`Tre*q)MFpf#VBJ=?^B0x<>K1rmbqR-Ay9~QpMP(W5y{X-Bg~6VDGn{@b zl@-`QTA9?V>0L z+f@q}ymi`_7V=`P;d4pH58W5l%=AX^l{(0)af;I_Bl6rSW}Gw zTo?2I+vlsheY`_G&C<_17u*|hxtYtjT))eH8HGR0ir@SCh10XMy_dud=$>6}+M*5* zYNcd!(X0+j+#daRpWN};Ep2hYHfn|D3;KCzH=6(7xc(i7Y#$m2|(vb`_2=`+6dLr3RMF9&FH z`zILQIlmuNd)(DhUOHXLAvc%1g>>4`IeF*l>jQHA^thTgb4JZ4K zt&~zR$6>~W+ZV=Nsk7BD@bkXXN4Gb-&5tpm;95PGrh;w>G0e? z?Q2s0u3HTjPtbS<&pkZ&!m}nRJ=Yl;*FC#BnEW4;zcZU zIcms)q3xuWanm1t`8jZpZt?JgRtq{Fec;h+R9Nj3+wZ1A!!Gcg4O;T=c&Bnz+-x#` zD}2)AqjvV3NtCqzIsGR+6X4D$tS5-Bt=k4(L*Um#7*BDYs zUwYiQr!IBRm5~-kJHMJY`|j5nXC@DbjB@b!;61f4qwe)7L+5?lb@jeS(m(HpMC~b; zG5XWAWtZo9cGs%6vjo9quA@-}w$#T17aXjo?b zd!+|^#^qOSQT9}WxnA=hd!;Ox{X-S61Gz(A^!0xCVKj6YLf=&`n!dB%*lPFAEE&3M zbPLvua-v4(d6(Xt)nH=o)t48ldmKNN78u`jt!CVm)*A0W8fkpzmvj6O)WEQMsrp+< z@04#bzf^FJc;C}^WTU0$_Y4ew(0JwXsn4z6pAYK2rD>IZ1Gijky}~eeap<;!Tka>f zGA3HSLp305V_4vVX{GlrbDQhmc1UM!Sf84)<+>i35H)B1a9dTKt6P^n-#pX(%{Qll zwYLT|+w=FV4NdYghs~_?xT2p&GxDDmy)NR!kDlXKKWn0KPARPEdnRe^g4LM~?@Y2? z>o;nj=jU4cJiRY}GXC`Rx2%mb+wa}eX~nQL$I9<7^bS2T_DJJHL7i%^k7*oSD(>;l z8_kHHQ~K`K@f{Kjkq6@pdA%EHVxVo%p5DVIKAQi0QCH9Fr>B0r`ZUw7ZEpBP%>?Hc zf98*CDFx?`?_2)giC?#0JK&KB9eWrPTQ`~bWnR@eMZ>d?Zb>WQy{nUIf<_Pi(96#g z3Tt(4FjITuaPLL=nsF@zvOklPK%F;2ihKYD$p-}aG>%Mw`uiumizawV>uT) zwBHdkBd1Eev@JP%nmru&CHT-K#wBT&GR9qSAAG1mMwG+;zw862h4;NbZJl?Kv9S8y z(^oU1xBORq)aJXJMhq#fIuT$U==PYtG`Ug3MN3okZar@=cROKsIPV&G9u0_cXfyTU zqq&zGw4K_i?22_Q(hbIDHi^#yMosc=p#Ng+vE}iNod;gfXtFl1S?)F${Eu>$yLn7b zsq-YOU8ZwNJx}e%x<6@4t}s02dz0ap-gau);q8+DMr(pIE9CjbRz2Us&oGvNrT{oFSyeU%Pdko|np9za``1h$x3Z);m9tcEO_Z zJp);KpMj zV&#Ay5nk45{mzD6ec$fyMQ@z82d8{#nDNm$b3xIkW;*YE0HvLcQox6)19$ebxvEsxY(i+e7KV%a;r_#Dj7lwGJ*Yc`Y zqQ0A7;ANj|_Vj){Rc8JJ|8e=r69;sxyl+oNJ@u%A8=qBBwF19Wgm0zm(G@RpY)V|s zBpKLq7JKiOPVAA2z0-DLKNh!!aIS&)Kdvw7F7YiI8x7&cVY3iQ{8hw#$9Im6eRpE7 zxrlh>Wqcw)*v(uZPLLdLiTr#HUME28DZi1wmE>Im=&GB$r7dy|6(Ld*#gu-)VuvpRm!CW3-&cB1jUpVeWviV-Wk_ z!u?nL`2IJT@4-qweBDZXOZOAuesbFC2N5pz<%L@;KgIh7-p>O*#1nQ; z^LQ^Hua7`9Wsk;R20R9y;`fQTE>yAsssh5sp*`P9Q9fa(;z%5KK-f{$=PBF)z6)Cf zdu0D4@4_Bq0#D)I5>MD)3D=JA4~ZvSGoAs&DUfl9d(ef;CGJ7_UHDh?F6_m`-hLYy zkN7U!Fv7SR#N<}Ubt^nG`946-=Q4TD<6YRw2={__jvsz^Kk&UYafIE5;y6w(aRl9D zSLVyRutiuU((x2d*v5#{Q`UT&##7kzD2}7%CTAp0t^c7{*nJ5nY(lP)=DbYvuf)A2 z;}*6b%K+gVWLy$qdx;}lcljyaxwc)LWikD>sIXP3i(lAi4d=a$MZER+g$?~?q;1qEZ$)`VI zrHJ!_XK$W&!Ba@QM?8f)1GFTbu-Q^vCUJ!=ldxqg0Jv7vbiPr9kCEd|m!H3ba}J<5 zVb69G{}FIb{K2D!kv@TeiH;-qYpp%Se|%9FnQEyBdNkAw+3&kub6 zmuE6KdRW8}woW%?+J&v)O7gqFH{oK4%XP6P+olo%5739S@x=Qmr@0~1C*m)c(}{QC ztO(P~v~7`};(G$`9W7`O--OMeutTZ}3=zP$&*UL~({&K>Z^-e*ofg7gOL47uvErL> zm-)WdBCPmpv2Ef_`Ma=T43}x%2i}L^y-50(gkvNmT_S9Y1rA}e=p^G8_?&sa!?!PT zc|1t(2+rDkJ59REyn6z|woW)4a^arJF|jf50c4o-iAW_&G8@FT$tEaYVcUyq_iRZ8`54IZY(~34H$` zr`yi=DRO?|PGmP|{3PRP!uMY+=oNS^ov`zam+=dm#b>mqp%;OPS>?yyX^u-6k#*ewbtY~zIOs&F3iQ`l+x5bq2A zraZ+R2*L?l-p=@cmHoM zRMd?^BndAIS>pyt0%nsy+z?Di#DuszyEFR|W@jcdvoAz6n@E%>SBXJIMMZ;(iUt)G zBN}2*F|LYA3@RE_R8&+{u5y(t|IfEh^=$19%f;V)e*fqBKmUg5?>VPVojP@@y1Kf$ zdz;_$u=AKNOKBgbykq=6gIztt{8#WT@PFX<6knF~?j=pzXl&{E)<;QO8rYeG?USVI zS>|}V#OEK6Y&rSP25Zo}$;)3w{{O~SxE&@vOZtZq?g7V>Puo7^DRgh}_K$%dA$^-q zPs6_xdwPaZ+nwmn5B!ttKS^r_e;$;n=cT*7&32E{JrCU*`F#w!jJ-=Uhy&J*Ld7Vbe(zEsp@#`SE zdam?-(*EfEyc_-j`9s=S(0cCqm!vO*el~QxT?hS{!0-L!{{cGjv&gT9ezVu-&*4RS z7CDK{D}y{2p`-2P$UX_ZpWim@{KDsZo7a7n_x)}#j}1LzT!-#eV4Jtm=<~eCqwIc! z^ghx*g3Zq(e+l`Q^V$; zvLo1D#&5H?q4zhAqVpDXH~IWklyx~edGFr`y<9f+XT+bv?~nZAP0zb)+l0PedU>kn zTRgf%zdPxDO9pjVjoz*N(t)ft$opgH6tDG{<^}$# zXgw3It<}rxgR=DQz+I$Y>U~>4d3sj)X804xlYy--B9FHtq3ciolnzVCNQiJ-ht|ucv3X5H*(g)jelFO$w{er-O_SoGh3o&SLU9rU%( zU0}TZ5&G%gw{xMd!A{QG{18f=~-UE zJ^}wxU+#;^`=n2}F3_6~-GR;}-u9QU{dS+{5om3CM@8>pXw$ne+TMddKgQ4Zd0Xcr zYeHY|dd1sMe11K@A8t1wKT7`hdYfU|#n}IE^rw;@Zr==aFY{^VVn^>!Y5Nd)-s;Qy z26UCDm3ANeAv3qAwC{ssMKZ~M94&&fW<-}1P~=l^HmS8PAV zjregn>3XM3+ta{5z`u)fHhDWUkne_%w}lY5d;TE$PlLY=I~_yn`eo93vGGrD`+1}{ zV*6ZlwT*+H44nrn&^Zphz?Z4?-Cpm-zO0!NvKgYzs$=&g-zJW5I2~PKRUkQI5^o72>4)7dobd#yo>nQ&!V!PYZeIDZiCc!uQy1W_u7Wwue z`)Vxnz8Af}cVg>&jJ6NpWeyTcC+{8&Cp*( z_7QKp7}%9w0{IeTKk(`2q5Dnv%gBG+x6dN*Q`mkEX=i);%KK$>CV8D&@_*dZO8Ytb z)4gt=1b(~O>wMeeo#=(zeMH2EMFf>8Q3R${!Z{;fp60IHNQ)dFW|Qh zyUR)E&zVLm@O2~h#$fv~WY?m%cSzo=z1v1i5v%RmQ zvHdlE@8zfMuh=?^a$g2N&)b#!)8t?5^B?rOlJ_8627fQVkzRgjkgoThzwPa`d%w>1 z?Wg?OuJLw153N6e^nCQTE5UF7@V>_U{~vJTkV;-Sg#Z64OV|3pvNio1R`nM{&SYDB z21+-dfzJO%<+lGJv;H=Pwp;mq&QXJRwS(E{9RdHAfEM{_ek=JsMGV^-gMVxP!9ZJo zBjalUKOFhWQzSewMAklpKOLJrUE&$0T`@#27}8wI$A_f=6}0AG{okq>}@%fFpxbnphdpM+q~Q3n~;4mDCgVI z=Xw5HL-OkHwP;HjxcyH8IwUXgx<3ttr=b7q6tYO#Bb1zpE2&&+&7*48m)Iw59Mf z13t_%l)mu{^!_{~)8j+Cf$m2=JrR6X zAP;REhY6zMS6ze)r&s+q(S?;(I3z64@2}r zrKgkkPw1TBcW#(4g#Iw;ujRL$pSBU+=9<9vL~MPAv~PNy50Un6pZ+yu*9P^wI-q55 zvCs1Y=r2N#@#!Ch|B_FW-gqz9-_we>MacdW=3!(tUjAUdzu4z{a+rL- z_vv5q>2HI7;Sf8YLN>?i$>tN#uK~XgD%;c0e>e0W`27OG)u6Uc1GoP?I(P89Vi-T9 zbU~o+O+@cRzb9r$|2@BtV&`FR=k4%M_jDcdc)KemJU-%e+OhQ)WOF?~8~#@4N&H^v z%NY%RmS3&c`D9>sTwr%yK%WWuM{oacv0aa!^7hXGAMiS|m-h1KqqidP;SGU~^hO40 z;^XZxZ+|m5ngQ__K4HGs{{;L2uk)6`=6Wxa`bPrW-$4F$&+iTNzvA^jOIpFF|CqdA zMdx1l9QZ!3e-ZK*3>lnXPkx_GMe6UJmbPD!@H_a;UZFKed#f+^mw_zKm_fgb^js_e zp9231e(Op5h?idk{}z5nJUNWW*6ZGm-YLiR^i@ z8~7Wi37axev)gDQ9+cTLUzHNvPaoRmYbe~PaztH^z_C5){AN)a}vplf# zn!q2~Yd}}qEbzNVy8TUFZ!GfHlfMss1o$zpFQtuMe=2Fu^=16o=lcZZ|AyZZuYWo9 zndj-LUiW;jJI?bmx77Rn>A-(qOtc5R|MK#g*t!A!r#|mH0{xfx$@MqA{W!mH3i>(H zZw%7j2|e5AX$k80GvxEV{m@28ZzTOL^1c+^8Q^cgFZw(Oysd3M!yTUgH9Fcp;q@MZ zUIG0|>hMork1DVGY;6AAX>)JQ>)%ZNEP74wW4zy9MW2^eer{HbWsrd44@Ot#)Z7ecvPmt$xfxby?f3es3 z?U3|6-VbR_@MVVi-irQG{JtIG%P8|c=mp-E^j-u#5*zR0_Z!kyAiD`W*Fv}XvKGLP zgdgY2nhF0`U)B!vzZ2Bu+wjlvX^`&kbk3uUR|T>+db__4WIF@8k+QTsM0wZvGS9-s zDxa_DQ91>memOV-8!t!iA@a|He!}ZrBSpVW^R~qvA zew<$uP>{LI@it$}@)Cl`~xm){-m{perEPunVfGw^LYw%#4snC#Pk zNZA*Ww%hytIpps{enZ-hBY+7HoR2!8`QTcH1r zd;_?iw9?ktm`Ej>I~qGvoh`YQ(O|ZA2wxZSo!RcxP<~JbDGbWdlFo-2Ub2?>oYdX<}Jl)aU(%70$$wx zE<{ePwJXz7?8s&sv#pJV_C!9Flv3ogW!tlbMx(8m0hN3r?*dmSr1Hha)uuyp4^?bYW#@W3i(%b+IB9)VeX5YE5*di;c;SLM~BkX^*ho z+fl@FOFCOffs~t7N!eT~6SbPOwIkC}Xm9MuP@8nRacB$*5$VO#MJ1h0Bu|$<)FMS; zFm8#+*DP0T&!;fd!oHY_Vm6r~#_ds|qb=bY&QZyv3ra^@sxwzy>F8pzBcsS>yXa5E zsF)z|E?RS%4KD(SY)3wufoesgI1t8Sy3k1E8(kBSpIA0`wGCyJhpy&=+G0J;V%U<+ zw05**^VSzjoGMDRGljTTj_$~`r}A-vS5bpBW(rZ#M}tO5!Z$XrY|JD&Q&FpHF?kp_ zGRm;YRJx-x>L@fOT3S+tLI($kIQ3f{ERky)`iSek>1?L$w$5CXE~HW`Tn~fk>`buB z<+H_9OEK!~%7~|ABGH&EWMeXRS}E=jgCOvZ>9M1zAYI9baw8qEy(GYa!2(w&uboTf zJ3ER+Yz)d%Nad&lVyvb+3a)7sAoX+>;>camVWH z;^1gxuvjrf)Rh?|%=d_G%yZBLs$P0(-Jbj)T|&~5$h4%?Dd|4WUG35xnHAz(I8jp5 z6pHzl1jjqjp7)UHN;`^?Tk=9q9X)S{GDMm~S)drXF%BlvVzx3qF}CLOq7ik+U5#dl zv-w=fF6T%gmnL557W4G6)>O7NYNg8+TxmTmf{dJHjB=+5cxx!DkmsmaRLrKcJ*k27 z0%6d&)Y#b4n`rb*YpNxkVnFU~N#z(aQi;o{Bv7MhqZyuUL#~N4BxhRbV_I zuZ{A7C<E8DBHW(#5NqtK$nbi z1bEvqUrH(j)}1bbfyBYt3M3HI4%y1bNhN_L=DP}swp5U40!viJY-=*HGDt7Uq|)i# zG$N1?Ba^vc(YlGWS1r+Km*|UY(JOJ5Cp;gzk&4p<`)nBtrOUU9!D zj*Mfxur?IdB5ds~#ofqw&>E1JsJLHis*&l+HRiIMoy&x}SkrmdW^Dn9f0EnOt3-Ke zYhh)k#icvSOWkZwsv=6VVpl%nRP!m3Uf#o1LCT38g|xLLQ%pyk5{e0D-O8W~T82Br9T#09#kwws5?5*y+M ziLy+5OAc`WbtVc-xUHr-W}UK#T$bs&2sO5@@>)C6T&NTy(~XPJT+jQFCaRinc4VUg zlAyI@3sI_=YwlEVY{Sa zeTFob*@?^?g!wijrI2s4Jxb|pTh!tvl_C+_+|rqrO-gV!g|Z0>Fv>ZatfE>)$|AJ# z2x8VN1I~T(ls)O9foyIi0d>*PrL-(~krJiieGp@I*eizGp~D_QM<F41+})2gdXF*<%46WzJ?l?ARm+fwd&nsZ=2+Y*wECEY-?u(II9p)D?c zxQ?YFE4hAgdnQ(e(wa}cnB_Vym9ca>_KK7ilv;`_bE(E=&CN=j9P4auZ0X9gtkPQ& z`>Uuysu<020qtcSg+w~j)!CS44Zu+`B57=tEZ3g!sZcJpBq_9XbU3Z%bRx6DX*mk5 zSRxyYNO@M)kUAl5(kzA~lvM)khGiqhdr~sXYTBHInqfGDXsB#=%6vNGFfO zN5(j!M#M!7#Iz?4su3jx*GuG3k98PD*vJy$>emwomQ@Q~aibN7tH(t|k~j!GaeaGw zt+l+xQW?(sfhO&V+ti5Ixws9n9Fs2NKzo`q=ohoTOs5bxVC-aUFm5`@gH|-Zt(tNB z^|Uaz)CD5C1MOHMDbM2I6s(qJphPRB#Ka^~D#?o3gg-P&2VlIQ;T$d0$2Xb0+j|R4PcgD zXz$b&R=cINcP1U9^zPEl5v{1UW}}ua?$Ox%IHd_PO1T~gdTC3$dR{Vt$xOsD=NuN+ zS>U7J6i}oy=*K!CJ*p6KUe({&cU!~^!&OGkYjUCv=jcKqDz+r)o=kFDkZ2+g zMHL*y(W~kg+BwQ-P{oe*VHxhoI12-*WEo(X9OxKGbSfBrD$`{%8kyt}Dl&P2=oN)M z6FI_S+%-@{pv1bXY4laUe3S zWu?KUPw}E^e2Lgh+qNWfI4Qt79U!08^O;D zY4V)AN-^5VZsNiSnlf~kdnLv$VOpJ=I-pAw-Jq3K)?!ENO1ly&ah(Z#=WyYk1MVj= z?(g?NkP8_;C$pWRip03XThVQs9(VIEmbfddkeOXl&g*JRHwsrUjdzD*s-WirB?V^L-2v=bWXYboQV5RjIxak99_EegE`+dJrW7c*SYi6*xl)3&e>o<;Np&i zSaB74dsmiAT8!wdm1t)N)hye2dQ#4Mf?Wv|)N`~7soqf{D2+bUT4*EQj?m}=(Z)cW zzmNrtI|bRB2BN*trR?9;+V^}a6H@28O zWjmXFiuukubKvD5j@d(y24^KX<0z(d|4 zP-MOcDBVi*f*4WiQRuld z=|*B!fNd!D&roo)Lr-kDlb1xUiDP1ppXZc@aoQ=@l1^|JyH!`l15E26j?-E?%XyiY zaCkK{`zldV^g>o;Sj=#{R=0203d-!9(QY|qHY*!`AxA(CPPuM}T=}>OR+5{wm6wi%lhk!k2lipk>XBX`e+3Jg2VbNB<7PoVx z@h-(>n_Z)9b0UV`RwCJ>pUGy|xFxSStuDOfdpLf#WulcC_Hg_(*X5xG?HO9#?9~h1fkP*0V_K7D z$30&Irh-PG=;Wsydr5)Ys7%Vsr-L&0Aw!P<2(#>n9C4#e$ zfqG+&>yuQ^F&a7Op$_JVrUkWkf@YCKM|&q=XTI1&MX?XQ@VHQ4m^ul=6bobgTD5~? zG;Ut0I+Z9=VxiJ|i-}flMu^Lk$qdI87I}|Y3k(()O@20J6bz*E$U>RMzbTM;lUtg zl~1dT?Af_BMjg~zOk{~vdoI=$x}x&tv0LmTxYX8b&{ZEa6lJK01YePzrYMx7OuoIZ7LGEqZ_lCG;>YP8190F z)uAjmM4VUT)1(V!D1S|7GeMgR7rL5N0_koprn!upjQW{bM^@85R{glpkR8s4D|9Ha zCqGl>PHtFoATZ(c6wjLI+OL%!MZaow4@_a*rROl!?J5qtJX5$ViQ(^_AaU7KCG>~N*XK$oJRC23BvwxUCC(SmnIk94U7 z@$RNNIiTH5m;%+enVLNffdQ#?drbB6K#pG!idN`c)sxM;kyhxQ80z%v7mV~RS!Sp1 znm|72nKEkJsUw?*%iOIio-9+q^@N#FY8HXm3fJ~F4HJQqo{sXsa{)FDgCO>9z6(iZ zV$!aDN;jJPMNH`eCcb+V%iT56P@%hy(=F`?+cqJ*UAMgT9D_tIfS`BEI(*CQinvhWGMrT^0-HHISLu#S-xpYbd*-+ zp(HcSE2SFO#yE;nmALFQ+p%Um@>xD{a|SXx)^ZM{!>iMGuxOXeyKC{7>}ciMog)MH z^r$B5EG&32B1Ed5)6At(Hpb%%i!zRv=*6Lb8O3_xq} z@j;1b9}QgXXnP8`82eh?MUZkiUu7+$c{>RMLeCU=7=-#zlZ#c^#zY1^PcsS3HFIOw zs_6pmD(w1%qtclKXK+V$CRS+qnA;F8CJgmH0hUo@g^Li*`G|2tN)%@c?tC(YbM&}M zYspcqmj>`4Kl%8C4M>NYTPK6V$ z_3btt1CW$#8A}#oJqZoZ)PAOz+dgp~$iwpp0XBx~!qY%@%A7H)%2dp_gw zHjkYi1yRB6wIfREwAOwS&}rnqYfq=<1Su8eSQW_$yW8z?(J=<;oN+O5MdwrP{L8I0 zH8MAD3tWn;{|0hfp0Y>p-812C1)aO(r&n^X8=TIJTYEp9LhBh7;8rhN-$N4qtUk9a?ogveU+B*<$n4m7D=O-35-@ zF>i0>y;k2)9fGG%|>GaSw7X~=0KoJ0_|idg2SXVPb8#?R=2>xdlvL*|F|XY zg<^YrG0^2gQ%^evYD`N^){^ULaVy`YOx#AZ3mNw?$En1unUuujA7HGdOKFZ^ot?Qr zTz;sbph_NStn5golTo{0MfS+x(jFfgF(H{hr+9QjP=PZJ(c}u8S4oHymrhLLjkm}X5G&keL#{2#X>ifhP6P|$I;+PN@AafLt}&xP#7IXiL}A`j zx|!*Qr?!kPGDvfem*%@TDT?7nMf?&_2YsOQr2fi6w_6OPyygOp<%=#smI;BgkV~*I zDpUTkdy$+dc%g{1Wk(xzr}hwexkN*|SFYk~ZOJ(|>M5O5$knp$1p$G(zVPV6!HPiV ziVO>6F~R*16SzHGOS@Y?3jt}`_)d2_-R)h*)(%44xk3Q%9(~u6f=(K(x@Y95?pOqY z%o06#UMiBCMwdGnSr6duCnv=o7De528@wD*qU=sQv!s~NH4?K1Ufy9Hx4Em;tpG!& zS;5lEu8*$Bt~__%AlywEy|UzKMIVwjk|ob+$-%wXM2cu9c5m0o@(^S04v)9S(nD+* zlIvA(n_(epTy$Nd-fkIidN60{npTGzLwM&zkj`kqq=7x_Q(an^H;!T1=1W=EBFGB^ zneY`sPs@PBc$Z_4n1@5$yl#+CdChuHU{rIMOqNw#L14LQ}T;X-G8k;J1djwdDlSIo25_ZSKPpP^5YFoEQ|HPX#Z;>rDQ7ZkXonFI!}-g8~5YnK#CO_@MVHHYnkWWr4-9k zrr^^(=s9OD$%&;_4R{fmYkKW;XG+(X$qp_RTDproVa~2iJ09rmRW_lL(_(m13(81G zGtY4rtL3QH97D?-i{tBDHnB}N^}a@ zpaNX^D${%7J>x{jV>~Sdh08&!0UkTn<#3dHPS1tWu{_r6F(zcGSvOUi^@vr}d`Bvu zdfWWOh&wNU3bQITCZ z=RRzVv3pMBU6r!P$=onQ_uw!^tSNU zwCujODnKItozx9lOsg~|FrU(LX+SU}#4^@xeX368ohxERhXWF+Igx2OYb`oo@Q`wf zDUzeJF|9STe1fOvb!IDRK$kS2O}W_OCS(G=vf>^G({pTk8Z47+=H#HqpY%?@Uc%73 zm;*0Hi;Ji3qTLmz2qvgH3PrjJtKPSZ6-7Hm5t=p&>Mxgvlx9i(a4IrKWE0;y`yPMh4lOxj~~WGOXSxe zVv+03dS9pDZXOJnw}heXU{c)B0*xNW(beQ&TZ;IwiT(;|aF$cH_`qQ{sL2P!t{m++I^6F@liPJK~2U}9MomK*S}pn$hY{7DO0E!KImeXzg#@1bvfQo+gQTK7V>+P#DN?s?Lm97b-&*0#DJCQ8X!CqvW83 zarcf}sgUEDjiJGC!a-qhl0l~eCmGtmoY0?O^>i*F;h7Gt%;{FSBzEB^%3b@3P&>I7 zDxt|MO^YwH11;ZaTyP_wqug=mC|fzyaq1TjMaFA>kW`lmBY&%aC$QT^>WS4m^i(zE z9qxj!lvqmg3B@~L(8}{7ZogD{87OJY37IZBv)D2)a5p@Qb&ABHSm^O;f zmppb%?6YIYuXQXQ&2g%_o6M{)&K;AQ48}6AZ-IJ$GPW$REHA943WJ45fv!iYj%NyW-Mn!j0s``ne(Nv;+}K7e+gJq zJ|5rKvq7v3s#G;IWwKnET0topJ$r1coO)=;-y5*b;{JLLIu7;lG&gL!#k;3_N(3oR zpd_jI5}dZbz^A}`r+cx-2ZTm;?_YrO9B?&oNDV-i<1K-kl>?N&kC%@#J6?Z>ga@WO zt*;yE+C}eNyNR!hihB>jC_R4VdblTWoCvx2dL*ix0M)W~U|^0v8m4lrs=69;*D-!+ zmnjH0;`OXrWV2u`_|D`{B-K1TA<}mf>O9W#IN#$!kNZ3x_gJ;d@=f$O$>U^?)gGsK zoaS-5#~P2d9v6CS@VLa|Qjg0$COxJ-7Co-=xZ2}dkLx{d^tjpMR*%~~?((?X;~tND zJvKdJ^*!ow`lH6zc&znU=ds@7Qjg0#F8A2vG3l|_<64hyjwrx>g5|o{VZ#FyA-(u-||HI4QX7cTz{MZRf|JXW{Z&+{km-7dD^=BG; zLHV=%Jtp7(VU;toZ#jP`UHNPIWA5_*;11*0?^60%eajv(zGAodZGHOisQjx0>(O8R zb@8M6s`!J!va^|wPAcET{ht7(sNniMT~*-!p_B6me>ahT*<_`U=vxm; zew;rJukzMbo1H@r&+0pIuBC6gpu)Aop&7=Xm~HwMb3A{M*{i8FKB^OcR^NV5_IF%f z5!F&|-+alVzJ5^hjf=#O?mGZhlYY3t)QmXKWqU{%;5V0`eUx- z+X+g32$bFt@WHFz+gI6c@@?J5xAz%e^&iBM{A)m0Z*VXC5m5OSzO5qahu;WFUbW8j z*Mj@u4}b^2dHliKgWziL5ZDjO-aap{;X}@nuLTc-Rs4bCBVZqRR6Kt)^cc7qJTCqn zRNWA56;TDa9IOO4gA>3bU=>))$9yM(>%d9i6#nqI{ND;nK9N7bD)}mKGI#(~ zoRjaVh^BzM!D(O(AA+6^t^;epqoCSr$;OJP4%`OL1FQH@=X|gitOxgi3&FbgS40iq z25<>@99#-+`9MXq44k;BB3cd>!6tAom;@(%kg*Ef1g63LpxXO{moNR0$q$1$(&s-= z5f#CGU@zFPxguHx?gab5B_FPcR)aghHA??TMYI+?2&!Esf7Ier%LjGE3sqj)^Q%C` zZ#f_Ql%H!r@ySn{|MMARrJn>PPw%k$u71e)_Agl+rtdU<$Cs^~ouJ}Y_Z5rVW>9gf zdf4>aLB(wssJKnqW#!BR<>x|BlpQE7kR{hK52cl6?mii{RN|3z3Qut?>ldlJ5Hy+ z*!YdpN7-@BAC#X+zST2LKJOB553EGKWUk3K^Kse=_<4NrRPxFEA!+%&d$GxL*BQS_ zA66#4uF3cv?W5fIxjb$BuG`K2u7cHT5+9+K|9zn9dC>FQyGOb4aMDUkpATxBOoJLH zH-H*fw}KiccY&_|zSit)0j0kal>S~&>3de0yz2GFSA%;%d*`%RDdK2|wvBledG?KME>NeRrF@9~8f5wdqg1$Lvi5cc8x>l$`^h?C0KS zc6PnV{5%e-9#v~hz5|s1tN56-{I7qr_wOwxUkXZo3{-sU)|z}hDEU!P@!9kr7SE=) zT0AS>Hp-oMwt^~8s5tNT@_k;u-^+L2XZow&KFXb^Cfq;DosXu0Iv>@8D!C(mx8yP91-MUFX4tpvv0~N}l_& z$=8GOXES(2dS4mk&X3DLogX)WIzMg$Pk_5Y`E}gO=RIul+FjPqSA#k(4uCo?njRVD z&YQhowd3n3sN-une`H+aOcB&^x)#)UvT*k(H=Y~^bzD#U+9-E^tN~SSJt+AeFQ4>v zi_2k9>Bl_Zw8!eZ4y;1I;+t0PPEc{^2PLn6%;fs0wen8^UBB@BLQuzVZ@;Cl1{H_( zpyZoD)pt9nemvp(CT{{I?*$cyP2dD@AE-*4=fH#K*eD-sPZ>>`9Uu~4k|vC z`z$`ypyE&iP6ro)%C`zsJ^DcDZw9A;`#|X*0;PWp)OcI*xQ(k#KeT-9pyaDSH{OCe zk8AV5 zrok;>4%`J6!G<4=a`TPl;41hc*avO^SA$!@HQ-)wEqD-I2Ob62gVB$<1_P_WjbJsn z39JD(gL}X&V8c&FMO(onxDDJ6ZU=XRJHUP5PVgwW3ycmh-hfr$9x zdDG8Ko&(h`y`c2hc=-k|-wZ0>@+ZxH8kGH2p!C;*vcD1B3vLDXfjhzd;9l?mcnCZQ z9tRJBl?OS#!Aal|a2hB-YC)B^?ibc?j)1b$|4U0h3@Ux|pDexdsQI%Hlt1mD{F(4) z^Jo5Fe7*UQfjhoH`CrLLFvK^E8|~t@29(_cRim9BOP)U3#d+hz(Js!_&m8UI+zYxm zpX2qPZSv`Sq(S+&R9k+Blyewf@-=+;Vk`VUQ0-Vb#q7@mRqtg&vO^e9(Rk=~b7HcKx#fl%L!9 zpoQc|K7BT1v^!4g-e7W}ioO*XC|Ngq{%0N(x39n(e6B+JY@1FQ0MX5Uzz+cs5l=2rN8%Yquu;u-*MyH zpFYOT6DFKJ#-&Hk9^>XYJ0}}|Y{nRuzjCJWyXG6e2b8_O2IJR)>W>>hUH9*}*7)6^ zTQ6E{dPPurTR`QnxYgtlD1LR)_&!j4eW&sB!FuX-7+eS@Gh?C#a6;Df4(3en04Vz> zJb&Et({CRWO+)`UsPw-4m}m<8ZczL_P;uJt`NN(+0#>3I6~{yqzY-Q@lEn|u!_`Q&$) zeBwL39Z>T2cbU9ygUM?_$(Mq%p9YVBhu&>|O#gt%SA&wT{-F2o0h7<)Y2AM{&|b*Zcy>s1@0mLvM-E@`oXQBV$1Q#N6IO0JC_BmhroRW2Kf6Ko zw*#Qc-}MvkC%6{24znu`F1a_KV){c{%TCL4*5=S zJ=hO!0PBA}CfW!d0af4RZ_Lkapz2$5*vhN_t;yGe^7F_M(?0=<-+avMoB(BK!hf3n zWKeddgNoZcQ2G^rGkHBI{s5>rRQ=uTRD-fp11fGyLD}gAWoPI-fMw}a7tjfr-E zRiNT95tQ9s|1kaff13U-P~{v4WvB9l>Fxh-vp?})mVY5Ay~CcLeA41p6IHsn?F1FK znvs>Ry!oKYtE;GVaXvVv(#5&DveMaEH_prB1`oJab0Q0epLntT%|e*HYt-v-Lg-b<~#{h;jE zUFQ7*YvAWyZu#cFyfT^&e*lzx$`vL*QE&cLUTOIzgNjoUoQU3b(D`|FWmE+}X|c)c zK*?9#>gx+Se;fG%HhR07jNh87j80t0JipcWHEpK98$61<{kF>J7+AN$(hqlGgWptSIBcS?$Q2x}st};4+d>(iZ+zlQAm%QH6C){cB zUQqe^K;=972D9J4+T{Dd-Q=76#>!}q?1BB@8gMVT{7sfGxyI!CK*?+Ft&H}8yFuCA z{x-9F5R{$G_ZdIo?Ph-$D0$ueW@js?eEsh?z2o36(x+@Pel55Ye%c33z7~|9M?mE} z^ni~CxC8n6&8EL?i>2@Tgry$@rN8vS%4iFEn?dpGwpB)(;rD@xcf+Sme>o`rnCCZr zhT{;uRiCwdJGNIwtKn-uXYzyKTKFlS=XeHl;CgW37dZaGi90y{!6TsB;W#LHSX4}mIY;%=+QQc(KaK*^7K zdG*)K{!Xw@`MzQ4M?lrTX^*9^f2=Y}lYXS%`}37?*iq|UQqg5=Z}r%qgS`U_-V~!qj|CiO1>VH-p&@2_k)rj1na?? z->oGgadM&*jbaB4J@=bcJ z#d#H|cy0t`@37}5tQs3l!%ig- zl%3?AW@j&`a`%Ih@9Q&pbeGw!1y!GAcblDUp!9{Z-|yweK-sT&gW1^*%FikHn0`Gd zJGE~%ejYfP^y8kN^p>&FB=~8dw_cqht z1d88xpV<>ukbl$tW1~v2YW>(~0=Ns5{mt(%JJCDMpY5Rh=?7Kc9UCkTo8N8rgwFnZ zOn>=%O@AjS{dpTrZ#5`8M?fckpV?UrDsKBg)%WQ8O}^^`KAxK_o(n<6bK8eZ?+B>& z+WvsacWoXU)xsYD<Q60D%oTq%?e6aTu);}j~wSIm8Y(T#1lU9y! zA$-lJ%GdqDYH`2~|F zcbI%T*o5AyFWT|D`XL)1)_=+R!!|If^qnT}{j!z6`zt0t0k)IA`(YbTChoHFr0NkX zZ|hh6I0Gu~b&r~UwA=U`DEmvkX8bBpey-YMd>^=k^rmlGy|#n0vkR2`faec^ibK_7 zCZGH*(?1R>4jcO|pHTL9eB1PQe#i8Czia72**OGCZ{80q&YM7If1l|e0yQp8d)(}A z0hhzC|Dnb6FsOR%e8TEmx!>$h1SOvbDsBy68r%owz$rg6z1^VXRX;X)6Ig`b2KIs{ zz*S(wPt4C{KQsBnCr!TeAnix`A#gRA`?=XogRWlZjB{};K5Lxu=Z$mz%zv@*Yicch z*Gr5)T5tTMD~&${)?;_oRVH5zs-IU}ZF~f({~rV;-?h;68y1ao{kGy7)0+-DJD~b= zzn4c1Cf^Lo-$~b+oq3@2r!6-5((6p#2dZDJ16A%m&+i9yJf86UgeBwLak>PQ-KLwz zx#K;$#pD}7>DMjyc0l>xd#mYBe2vMsgOV?6G=GkP^8Y~7IM+{hCC0h_xw(0q>nHPD z#<_l2n>0I9+D$&G!{pmR`QHy}96sRr{h;bwy~6BQrY*f5l>bw1H-FaTE&p~<{x2+; ze09^48Afa=ftK>0DD$K;bi$&Z7}!0El?te=BT@CU#oIOPuO zKU+Zc+l8+k=lb(uQ2loLD)W0IsD8c`l>7+T3s${uoa@ii!9Mr~a5cCFls|jCeEaLI z-qC8)-wsN@_8zbQM$_K`O1=-2o#S49!pkSW$>hsG+1~+5f8H9C=RnC@&S|A zY%=~ZSV?-*2QB}8Q2zCP)Y7+svU?Cbasl&(EyhP58y6jhKL|>m{?eJ{=l6ko;8*pV-m-6x zi+00r2i3mQzB4Y`1-~8?e+ZPl+8>OIwj)2j&*U|ao4o1?bF92tQ2MJqzZR5!?X%4OvPovQ7gRmhy%G zea3hfhwb3;^BJFKj*m`&TR^36pKbE0IadAz@EGayKP+4Q9wC1YJSuyj>`%JH^rwNcUkA$0Dp2jc0hIn0Q2Kj7`7?j6=|}U- zpUq$eel=V=KB@#OE*l?B04IU2KY@~O^z!uO;ctY`#|-FL!kP@QBeJ}X~B5cKXc%G_&%^6Tn{b;kAV$f{maeY zUQqHipyUg$FhA-+SD$*bvjLQyEuiEFy!?olp8yrdnk!9zJ}7=YsN-d`=eL3KbNW># zUjxd|gP`mm1?5k4wfQpvTmrugTncUgmw{Ws<=`Q(Nw{!)lmwSA8XvWTX)q100(0OI zP<}7J#^h;GeC`#de-xDd>IU<>50srli%njAo!Mysi^#Wts-IBycYwOjxQ*xO&-wmY z(RLxvw(k(`8yW2s{%J(COStf?=*stwxNFpvf9;aWS`<-{tT!8wqasb?z7o9{qj1N+Yfa@-^%?7^On2>Tl9Zn~do6XbfhdLo0_KmPEo*o7}94_M=LjsPZa4{^`KXZTnH+hHi=gj3{Y=@&yUze~y z7#+567+X8ut-dz4l*iWeLz|{kK8Bl}V7%I9beYY_Y@S{}^sU@x=+BwdvCM~U<@E~f zg!aSuggy_a6WS@)x3(y^S$>WP^M-VJxfY)@kq@U6wrRQlp>JWEmbX!8r@W3~JD%QV z*j~f=T<+(Y`&RDfzneGg!{M=GF&WP1upP^ND7O>pgfR*2l*hR|wx`z*+vQB+P+s?P zAI#2haR}Sa>UAb#njJgkdCi}z+?vmb2esxC`V+=2jA!UuC@swwg(=Ycw^r0;N zeqI-r8|pjx2rIYTemM4)ms_rX<~ruXaPtT2SLJal_p_{xMug|G&>xeBbuXufi+NbC z(P6n^U52x1^3Z7`z8)@N@wisHQnofDUl*cD5 zx10{!CG zH?(7P7^_ex^li9$g?Ym`3@0zQZ+*C2UOwhHn}g;IrW4A;xS0>-{W+|Q>4)d9un(8} z5aunnKV07k`AIyvdZfl%0oM${m|!fdC&)ZY{PQHydhm)Rybys$IbaU z$bJxKFE^XvI3LpG_QSe|^y&Q!k5j9ASl`eGXVb5LhW5j{hw_jfPRHcoc`9hrL2Fat zu^jZ_&>z3f;K~h;i;zBrPUu@0t1u4ba%-=0c^Jb`CzO}d&gZZ$A??>L!dQhqhj9*l z4)sI%nbV>F|1Mo#$FN-U+1kU%w~DrgZ{A1=&l`eB?yy4;^ICgz*zn?Ip`Sgw=%F(iynsBiR{ z%s0yG9@;FAmBl=)S4a=1Z~m0mcQ|=yC#+ZKTUc&69oEa*E6i&)tsjPEmD>r=#nwjQ z+Els!!^M2Kx`)R?dH)Q33wsXvao8>y8pQoq87?#KR^m#)cPEVKn zT&_P{|0$2N`B^^JmD@aX|HHbM+b{Rw^ks$mAszZte%zVO;q=42p-xC!48t}J`$l;k z!!atfV`a7RvF>)h7U({|&EIq9@A+plGx-znGdmNxsIIws?<3#;ZvXzMv1aDXnX@jr z`Hp2T?YQBZ#mPCd3pYkfUR58>;_D&N)Pnv+Q>*bzBR}9JnH!< zc=dVJHNCAhww+&n>7}rBo~=Ibyy_QT%O`QFn^V<^YJD}9j~!R@3G4jI7rKv-N3--r z+FAMr%q+eg8_nb|B~P7sK_Qye+>x2ZXP0Jm74oy3Q@S5{9g%&nfvr+rg=VW3z{C1>UmMZO?EQ{a~9vMn*q;9E?PU<=2mxP_(&gx zWKz||Y?i_)xg$eq)zhXX&mTw2=^IIuM=5;alTzyShw?jGl!vcTRktSidSsH!)qG8g z@E23n{B8U!9`kvpYWHa|1cgGPEfpNib8ZnR2?mIn~$CP0WK(fE?&e;U(+D- z41L?TdTGNA)qGDNo9<2(wzFtLzWh%9 zg8N!UwyQWd(gzIO7uxgDjA-f2QT>9YS6+L=D^$L9`nlDOeCVTx*rsyR_=-v)o4<5w zj-onU+Yw!Twm_keKW4y}5#pZ0=M;*GUf-evU1aXK8JFQRZZVgWsoAWlD*UNvR$-6GGpXH4zXID(TmE`HUb#r2I#u3xlxY2$(=i*De6ZBOw9-a>R^ zrohmY%`<+u=<_{j`Zmo@c(1(v`s=O-Z?!&AY-ye~wZIUSNb(`!=h7FG>B2?PqKrk1 zO1NRLozFmXEKbdxx*fU(6=;J2$Cy6xqk_x(l15vcYhLM(_2}9guDt8-FV48b2 zYRuHZ5Y_02a$oo8JJ~m)`p&!h`kpfGC6k# zNxIknwC^GV!<{>UiEv~7fGC}uq49ABpDD2vIsqr4`SB>x$yXxnliSXMx>*lr0-X+h zn8&Dh4xCN%J&9;W&9Vz?meJ#9)bw6h)64mg3h{|x#$+3SX_h(7>Yq!lzp~-F1@+Mt zGjCja)r>lKK1=e^?V`?Sixyv1`uq`d0sk4Yu?%vTYPevi)3{T83&&yM)>F47=C8j_ zqi)e=4+A4*=czOBIVT$){CSAczW&M^u5c$Jh6EbTpES*1HN-krRYDb5>7KA7CWaHA6;<@fb6)DX;D zUNi1i_Tz4i=3M4{$aL}5)q%s)4npP*$y~H}!Q$vb#`2EVD4Vf?Sp>GDeDvs>`&aW@ z`{>8PkN2-t(66J2B}_^f{OVJMmV8H!QGsuE51Njt8LR=^N0P=Zy{`JIMa#&kB1;`N znmX0SXYJ|JQ(LCCaA*%5!KUVh3}CbP3{zrOZC%YvW?wXeZ!Wc^lCzjw@ckh^k25Qs z?(UpHquxfN&LW0(W}3xkIA`7HW@a<4T)5!ni)Oaua-~y`Z&-IO))4ukmqnR#FP+Wa zpH@?eoJ|#~r)H)yKVSkg946fWq)Eignj|%AnaZh#CS^#nk=^xHO(5>*Vp!mauI8gx zoNs2RSEZ}@25g?Wj193$z19`%>!-1Zj-vC|xM)VAKu_z9rp%tANgkf!Gvvi3a-i=lZn(jnB%TsYy>MnN z=O_#6b(z#vOjwwJB>Bp3zOB2)m8KFF>oi(zO&L#u{QPEI0}CNKD5Va=l|qnGjJ@}Xr-@T6(U!~X*yWWx7f{{dPBtND$$kZ z^HO$tiW(XIuD$VwhQ@~Lu5cMfj6CbC3bs+BMvop%{ptJh|K!-I$#Gn0@CDakB411t zR%jB!e7}<;{h~s(c2h5+X=cvRsO@+3BaP>c9NBJ2jYnfQ3)8foiBE?O#rphKX_k@9 z(xo&`75SR$kf}&V?qZuJ(b%o!^EO>n(Mm0RVyZPeFoBz1eVzn1l_OWCt9M2l6LIF5 zGiTeZQ2s=AMPc)SzTP==X7#+{C39|m#nlUP!$^gJzOhU0!KijIwl2KK!L?A2+?% z$${xPS7xuZ!Qpk0hPk=HrQiT7IUL7Y={h)?(IOM6Xl^uRPGU+;&6HD4%DSSZ=f3dS zPoZ}|{S}iZR8@?B-ZP&6;&x5hnyW{Re%jd+pZT21bH+@Y@~lZ?$36AAFPQqGH{AWY z^Dek>-ldmae#O<-T+3YVmgTp;rm-pECVZOjG1Xgn$7@$DjOI;EUPf>kK{Y#fqa?lH z#!RA_i5C}5ZWun*)hS`8&%3GL%xJ0$k2B#W`D&E0nr@V)2<~IYJr{CDEJXg2I|gK$ zYByYW_2LDbb6aNm3G>W+ac1YtWO`;-u_HaRH~3|4BCP!cf6$9jeW;CKY;tML+ipy5s5hq8V9CNbr0DNS@$IuRH{EbU?xnfK_3c+&e@V?tXD@19bbTtiG3uCI|I(UE zx?2;SUFnX_^wll5*W7;lMK^YL-IyqJUztEdt$d1Ygc^0tZI@5^LFEJY%zX2+&#o`z z&U#_p*v)%qf8hKL|Cm{MZ+h{OU!Obe%AT|L)_u2i=~vPdep~y6^SAE$>5^67{M_TK zFMrn9t3UUn$Ip5DbN{*W-j9r*{k^T9dve21^8Y>S3r{|0?;jrg`HpwK{=D0+T(|0> zci%bv&sV?ugH!&VzVEZY_{)gOb56YVp8eH)Gk)=_Zhzok%QnCM()*9St@w_oEc^Uh z^UJ>dmfQb$%`@+Q-sYvR{oJc>{^})<*K9ny=`R<&XxsO`cJ!X6X_HnicC4{y{B4i)JoAYczhd^6e?05K-lpYEH~w|%jlW+1i5oxOykX(1 z=1)A_a@!@3-T3*cPh3#Dynp+54lj86gKu2=!TW!kU-k5ffBftPJF_jfe(hry{%!W8 z;yITs|IYL8f75#}dgzA_F8}V7^j$a1xufTe@0hk>;oDyJ;1^%9;<<+(-hKbM(^~t- z?oZtJv2*{~v~|_OIekZNz4g}BfBJOSJ-;3KqVo={d2Z_WcU^Yxi@vt?oj>?%w)1J1 zy!_Z3?yes{v!Z(S=4+1b>3Ge1U-15yw}1WI_oRROx5UN?ZAaJq;1l29^vc`XXD!`7 z?W0e7=K~kL@)POtf65$s*&SyuIr_4PHeB$}*FP`y=MQ#|c<_?TUj10>nx|fI_p{&l z$Fw>@%G^T&U%cy!PI!`^oXG_gF7 zUjPLWB`Da1sGtbaL9h}P6%~`aOLEwy3sR*h*oeLN-Vl3N>|L?BA!gx^4dFtzFSM5ICee$se{XM@~K-!M|c`}AgLdY3^rQyxBFk#TW6e5p}-BY%lEC{mLb$w1|Hk-GOop0Wn zHFw(7jr}HXc$S$nBl_Z{$rtnNPHb}N-hA@TnJJUMKKM8#%(2qDO1s96t@WnD)XZKR z+MLVk3)_#(`RAk*vdAkA^G!C=1-d*lZ?P;N^wJYy+9oObi zYPzD^jiud(NRI9P!=S%m{Sj*}%nFIDe$t{<$0XTlr}kSruCW}%QcO*&hKl*Vf#$gn zk1yEQe|eB2mHcSf_V_&}Hcx8?M!%T-&gw=5hdSY*J9d2D{n)vTrzVUWb*!gTmSV}2 z#cSrZ-(%9(cvRfegn^y>8gAY0TrTBSz53OUX043dFy!8<4!eS0EsWUma!S96YhP%p z&X0;=!?^nyn-8xrXk5KvK)DoY<*u16n^t=`f3DIud{OJ}xeupA=lXt{XT0$8k`Jy= zUO)12>U^lD-J8*pI|22dCyYG)ba-g5v(^T_(UIj&z24K(vf7;HOA`7JIzkkEXY89`^R^zy0K@JAck^7wosDqTxmJ zmM2cWXl0$UeBL>q2QC$#HCwV*?e4a>`O%#k^q6xa7guZFWnk5tCdvEe zFWxtCNT;VwUkrTlrS_;>5pBwaUOp0bw0*T&uT5@PxqCNjbEnFl!?tZYFR8>F4R?V5 zUP<41*tBv+!M%PenVUFw!1`ees$2Aa%Xt|^pICOSSv8C1-)yg$D0+Rz{m6=LF$YG~=@M=*cU`3Uiu13U&2qW5VaRKT zkuGuF?RV6%N(eS?n9}~v3$fMWVAs`WE6?5^{M`Dxr{RTA!?ja;ByDf9G$EjTMn;b1 z-L+@-jXwKoRilugbv@QCj2rpqmQB_Z!cM>0UwwB}y-a#4zZ*v4zK4hsdblUA5>;7-*2cLoZwq;`kO?LRzqh@di$bx?yY8Z zW4e0Z9UPSW>ddNlqoi@}1I~|B8?Vfu3#zNGzkcF1=SJ|_RZ(jWmo+<9xX!5C?bGGe z&IA22MtZhAQl?MNw^JF`6|dY%x^-@>N}P7HcOPk+KKbFEM~_|iY1uEMz`lh<6Z`gY zot9P|TVzdfT%7;t(}oeZBD=MC`bj!$X07@GA3YmJ&HN;>YW3-=L4W7y_xmELrC0me znq2MsUgELy>drQsg5lqnd~9OfuC48yWO*G+>DUPeYZuPyylL9@*;nqoPiX6s8S$dr zzJsIY>^Hm^yFT4$)ET#}4W7?#)#RJ%QG2_a^G#2$vrDDz-#0W;+#KD=^lR*nry-WN zBdI9=Ys<4MM!uS~q`}hfzVeA1#ylyTH*x!p-1h?qTc2P1qPO>zdCOAIw|0n@dS!f< z>>l;!jdj7{?NDbA^+Q>%}=~K_1W0m z#;DcuwD!`fabtTvXL`F|%3hE&<P6q5-qItaylu0(yAQmoxOB^?c@_3NnrN1I*Z$K|r?#n;E`FQ2 zb5@ASkgzu&&OVM`)c&`?%6n}$bY~8x%^dVRw^{$K&P}}2x>}r_^Ps-k$<6uwstv6pn>qG9 zel{SbQmwbnsot$C^?l4t-}z|olCpH@*G|>M(KWujJZ*7(zg>${NA5r9TBWt$=*pE$ z<6fBEaCo*ZN7iRkSnt)NZ;pL>@WA&Y55of0`xcGP{`96`P-;-;Aj6<`BfeeAvhn$7 zw6X1gvxSYXc|^A!(Qlpgn=!4_S-pEv%?|`-4zS($WdFR@?Nq_eT@Frp-$l0c+nskl zuan;&-al^4kEq2$qw|qr;^LL|{M7@o)A8~W%?eArq1THxLU`+-&8$QwW7NO64>dm}! zSm9aa@L}%NnZ0*+?3=n|Na6GTwY#1#_pnU-Skd54Eld-e*bbgPxOZay=-rDtCvNM0 zs7B$gj4P=s<;(3W3QevIZgKsn`G(HDUeneEkAgSfupG6td1WT%@rj)Hqq6(l`GXFP zy-yy$zS^ zJlrAtRe`x;$lXcpYMUnI9PfNF;MU$I4e54!);y^{YrRo^uLEy#zATx~IQ4xn@pWk3 zSEJ|eIy^4>qqA+BwT9L!mS23ga8Gddh|YbVE}*Ucn3CH)Y|rPV_3iIBZMtgotTmmU zhNxa&dF^uI<|p=3RJ@UISJN*8tkyhKi$-US+3={`f*F6bY}W1D)TeWUrPRXSHAO zWyZ@%N3T3N8`#XgVMI=Wy>sQXX`ujn}>UHhI^au z8E-Lwu`jq`dbkgBc3to38_qISpLCvl+tnwe%OdY-pCvO6PaXK|{GgU!!_L=X+z-eH z49(xMx?PKgN1_yW{nd{uhlx!#F16hfd*_?esHliK#^p@g&EI+_yKUHoyD?oho|M$O z^XWi$>7*_-(?yrh?#M>BPPx7NQF$);?L%Va(JT^Y_cIp1FJTqR&m!9ET%K z4Er>mc6_kg=SgzM&3QY!W_)V8_d^>^^_+JL+umrcf{-@2S+w_|OiszGd>m>C58Q%MH&|jC+yS?AF{W2Quddggu1|kh5$S z-+inO&1>}xgL7xOkYR7u zZ-1WBZBFAVac7e*kDVSe`|iaKyNA`BN(WeS$NTL+ar}{~cvhQa`U7z%1eAef+1Bb5t9MI#ifAxvuzZyL3dqnj4{`1_Zf~!+A z@Ag>fWLM62@gtMN?!mI1-5(4%pMP&k;G@JNO{K+v65+ktrp@q@oggqUG%8cp*rc3k zd9w-?&BYd#;4)v8s@1C3sL3nTs%>o}sZ+O}tzG>F_6;2z8#!rj`x3eK`h^$>M3Mj@ z7EsI3UM|xL#Grtv1ql#{BmqJqpq3#40e-XsxPcBNlm(DNjz|(9qylOgQV`%rD*)$# zgt7o?$Pq~bgjzr?Lk$A_Xa#CeKtvD;iAW&GwL&O}1O~8?g+(NZ1TzFQH4qX5fiOT0 z6bu9gmhgit6d;Ki0-72LiIG6Sf6WFaqyhs=_(2v5ki-lDP2s<^kiY^f0 zqSMN$cJYO%K(+NU;qT5kYS9IIOn;*7Ct;1%b5w-UW&(3v`tCdbh|rbv?G3TY5y+S?W=AYm&dQDH4&TZJ^EgiVTFI z#PLBN@g2M-fTp}yyOlyN1&&lN=M|)s1PBRA)j+7B47mhy9>9QfU9X~IWeKPm0M;d` z0o24#NL}8Xq-ZPF_0@>bLVZ#YVSWC92vR`(GBVV_Q%iX_plkwV6DT8)T7VP-;%Jgq zq>uwr5Jhl<241QOIUogB(8YKay%pquJg5b~)qFSLHJp5eL79>OW`bM-m4AhX1g?udJJp4|MHarq{B|r;^H6RWX zP$Ype;4lFx0DczKkgK62f}FPyqm=>30Vx<0F}|J@?hJ_mDT85;!Eg=O4;!kB@>6nBrR+jR@Ut6PvT0o=`)BaEg66e`KSl9fdSB183<8Cj?lov^BW562%;R2 z*98&Lg)*=a$Vx_C;Nu@@mlUBe91xjA05UbuSOk5nhxhLNwzBJ~ps)W!Bl+r(OE4|S0bAL0|(@w&h!GV~2;!4@d3FT`(X8~r076?AzW ztOqzIl#vLeBL2kD2v6=slo9G1kqsL+Il>^D9UdQmPk2BZ5nFyE7-D-U7knoIlo1`?SHUN~ zVK4_fu~ZF%#v|}?K;feX3W9@z8~_Iy`KU|rJk%mOSjNf0GA|csQm0Utjs_jj)T>KZ zj~)+eBS3gqGx?~22W^25#r;)Fd7rd?U=BKh4X+EPpjF&I;)fa}YMdoNpzGl9A`d{q z^YB|2;ep&pqULq#01YargM&|WkVae3F+w>Ykzp>OctAerBcDPVGEz6{ARu1?Jdv80 zLtQ!^u>~9CA%L#79ju4cR|jpe0J#Jbw4v0X3kYSR2W+f^v>}1j0W%^IqtrwKIr(O# zqtQwXe6W$=YQk?_ga-`ZLkfUAph+DR=pv1)PWYBaqJz`{^%EP=0X^iSjZT-hB|cys zq&;j5bx>E!BYt8X=qu)YT|#@v2Gq%jAH08r&9|%b8LAfHH&P?Y0L3^&NQ;c1Nh9Rr zw^z~y(8F)91o&^UIGlnm)WOrB1NsKUme?Up>V$d;AL~It9ladJ@y#`mjCXH}4nkr?yVg2JgF|OMC*Fw2N&c4a3i0c_QveObp1hAy0y>!q7>uS<1kK z$V`9+9iYkNNKgZ1SV9|OZ9t(7AaGEZD3JttMY^P3XrIqf7wP~zM1khpA-)hhV$b_X zVH?B(^syb%Ce}b~v0c(K%Ajua8})f4eF8bkc#u*Q{83UVpFs*FAg0KIoO}oa3rLUz z&u$Y+oO z35Y4OASWNfzycB^0XYE715O2A_ar6Ak;UHxAQVoQz(6hG5-DmU7?3|a+W`s?KFH#4 zVh~DT@@!s$;x1{fZqj-p19iHlg6Xhwk~;VU7JPyoFPy^9SM&-^n9x2xerf%q!lOBl zWWV;EyQRf-bfZeB!OK?oc?bT45o;^D3QK+*Q@YfHi}349C%bxa{_WyCz1z6N_ifj$ zaVM7Unketv*DWR>#NRnAx}8Tzx5n^4oLt7Yp62J>@50yY?JI6=f7<;p*W|6m+pf!p z`%cg4RcqbcwZ_*)-A7b9Fv`vL$PhDCa?b;a-#6b?uPS?cLGs%LhYm$8csRQKf!D_u z9Wp<=#LPCn(Nm|jBa=JcUX#`1=Gf2uS*GRO*yC=iPc|^jI+5P>;GgC5j<=t-Du)R@ zUA9ldhL)pDcQ){;Q_~rq3hi9Ef{#L(WE#HLv~PdsQ-_C*YLqyA$&-UiJ4Mu3oiL%( z;blzBiRw?LU$QMbFL#tz#rFxBeZ^G{Ew648<82)N#LYjlU0)yP0p0H)D5`+U2Z0Y&=c9%1g}ZG_LDi!KY$9bK53%O~uVD>Q`#e+`dKSmX-~vx*P4U z;XI!Ue_apf+ND|N7dYShZjBp;bMCX<|4*Fv>KDuGz`0)_uf7TAzuK_an-~MF4GfoI z92|&GXp6Bhamzy)#>0bcnjDOY_;G{YVO(sUU8NbuhF7H%b{HQs>qlL{7`fc>Z3f0k zzMb3$V`Y2J#0eNL;S1Lfz?exfaO{e4GveXOIEEy9?)8@g!%#@*^I1Da#(J=*xPJ;q-%ui=&$gJFhm127InuC4cB zEKZwKa1rCt^?FA(E%*QLGg7^5dIUfzXqs#w>G!C0O5 zeDXbv*VXR{0paN%^4zFn*Wq+LDVgT)5?RPmJRrm#S?rmJ`># zS%UHWqDfc{jA?u8(0UlxPxlYMi?MB$c6F zSwGJ^mpkOi0f3zWWq@)(8jt~G0TqBs zKouYd7zo$}uq$9Uz|!{r|5p3|+y7w_{s+ZIppu2|nRWhYbqjc0-9r1kx&=J2E*87u zgzhUOpbue1MM7Cvg#W<#xoBY#(MK$fj4GuMeJw1ae>N(CV-XXp3iC-jIlkXahF(N4&NsKg_bg)7!8IEWS zDI4581(1;Z-ahFyjpnP)i zyk7iSqxp6$e{olJ)&-5B_|+IJ#~v9|-Jn;t#+Q0^5PJ9RW&XjMBI<&MvVKMMU*<2a zo(=j2)Qb}>a;u%t963>Y7?rJ|sMq(tfxagHUhN=b&=<P>-066KY8v!VDVHsrO79hui(LBVtqFryNuysv3l`a4(k=lV@8svfCyJ1V4qBuy;E21@@@<>a1oOI$7zt8aA z5sl)Budcb`{2xm~|Mc0q(IZS>jSb{*p@Wpt-%KI7F2QEhutDenEO!L_o{h4DSO5w$ zK2CYcg{->5C!|E;(HM1Xc!MyXiC{hqVKRbghO) z4cH=We4fxGU|G|iq)1s)8oYc6<*!g$EJYe2 zeLpNIeitY9{P7l#pTqJ>AAgNzud!wX+GE`aTTAH^FZE3aRdiXapKc-O3v%!rr9}Q1 zjwi(jl^1E{Xi{v~PTTjDa320yzXQ0M`tBLMWG~ua|3|%ba>Mi_<*Ae z_#NHnAsvTLlfLZ?$Pc1!`XHU}TAk8zP}a7Cm1v<@U$sn<>w8&DK(^2lo6*{pCHDd%yT~q^J57)^{CVnF) z>r3dGG4zMjWTW;i?)A@lKXQm2oO6-9gv3TL z(2Ay*6ps#VeK-!Yzkf0v`E2&u*`Gf3(tOpJzXH_C`5ct6C4$8zbhU)bZ~ ziB>{Cu^3+MDTdd$!)(Hv+y@ON{NiEtb4eS>rR8Yx9lijkYNulK&fNhnZ= zB|~#KXyk1ws`VZrCDN>7>e6B*EEEM8g2^ej<@_nyHsJkU?7yG zLJ{?mG|a;kKq!CXOPES9(HM&{Ly%yi)(B-mA9)0-QJHvOO>eU?LcXA_rqtQ@$URhE z8=rj?b}BMjaO~C!gf3;n-l{hu>atW(U$eEkkmG$?E#$$#L#=u~fJ8SUTCHE@K}5fr zPQ?c)>_dW%#S^X&bm=bG7MYnJq$Lz9;?3G0;3$S1@Updh5tX+X>2%*8YsH65$ypBc z4*>5*14t>eH$0^-;PaxoG?rQg%MO-Ikr zmQO|0Aj6A?HnpbAZ1xFrSP`Yg0Zbs0cneoD=@qk|mO9S*#V{Q#%QQ@IHy?U+nVoE>$1Qos|tQC)@kfkGC8tBzq zjMVBEW0C?d!7&BMhB+z%wfPT$icyeG9&pepmDN#%kYXxa#DR-@golYZ8+|t=B##%OiYe|q6U#mP5)|cK`>Jtt$c#Z-_Q#hIWq)Gqb^apg+ddHRmA!>D1q^DD^{Q2<|A-eK#w$1rIk$L*jL}!Zx zXpk0vXjaWDl`t}bgqQ!As=)_DZA#=YbJ0+NXdDo7W^ z6s|L@8_}ASw31)vMY}c{V=(>c0cz;0zF1umN;-FbTskIt zM>l<|A{xzqhCe*ti@On_>9VDP1TC0ME9vJYJFFMSH|0V5vNTH-I{>JoyOHzTV< zWB5a5rHy~_3kjt*oc*IjJM9{cE1Vye-n$|@@T-L3DU$0`zVIoX{j%+ z3B1ByGQj9*1=^nIH;3DYR*p*x=u*d9-wtV50(k5Klt2ckh71wch9Cq=uYRE=bE*LL z#2+C2=N;w;jS`D*e%hCGYU&@hiUi{y7mZF;kI)XJjgBX9N*X?$PU(bjBg4FCl*yRG zgyDx=T(m(M)POr(EzF@PG@lKma48XPwHWeO)_!OVtxE=o4V$7{HoT7he8Y|nfu_4n zgXf|MIAeOf#S6(rFo@whaz;l3dz|(+d055<|FyvPNF9E#_ z@*K#AL!JwG6YvJ$E#ScsglrR=q3*?iCp8)advF3k=Z6QhSs!FW0UaGEQBTP29pK6- z&@F&&3%c#Wzy^4=0IdO?!Nwi1F`yTqd<8sM1Ly{{J>-6XEY=;BG)WKr7%?S^;ClACyEAXdA{+=Px{*BZ3DsMfCyg0HuH!A~(VI z7T|rrY`{Zc19L(h1{y+yx&(L{5WWIXa2Jls0>sU6BIv=F7b+9-&oDkU5_DrDl7JlB zrlAeo2j7AofC%LT#<=90=TPl%$g*Xn`Bp!O6JYa;9i0z9N7!v1swIt)+^m;g8w zupG#CfE`@nqi*GEL<@nQ3aADgj=rr2CD0K=@&;bfOPb1oMPeVNfz4DuN zzG(YS?8@b8L^a-OMAdUOR89_D9LUj#9s(Ky|32go01p8lAv2#JJ7LTW%=Qtsb__-P zx@kn2hLorABY0S^Mc0?Y+`4v1sFAM%qaeBG{*8Y-xeuU`Vy!kl5{ zB6~=#3$zsIY=M>lt<}+|>jM8S9A3@bDx_9SH&=^?S&XhUuJZWGlP#xMO|3Gc^31BU zs?DxGr^fu63#%`xl~H@K^-7yHHXCg=OSVXM);U=BSiO_=&eXeO`>x(cyHE8cPEDO# zxU_Wjb(6Zaarg6R>q&btjahGnkFrVErrn$MXdc`mv}L$&gfy~MRO`Od=r%Ea@vZx{ zO>CFaeu)3@4r4lw>oh)KLcqk%(`Ad~8FbsgLA3|h9#T8K%jm9SyN&BMzT3p^lY30* zvA*YqUK@LD>UFsH*`Ra5=YubVTnxPwmJ^;Eajnmr$hT4N`u-XHKITL0=eTc9-<>p0 zbQBZCMk%6{QK~2|Dln=`RM)6(QQf0@MD>j771cXxI5mPA2|MCwY7D%eYaErGa-qL^ z+_3S(CybgndQ!roevA5N49G}boVIvi=Ag{MONK1jx^&O7z03A3Kd|D+(zDBNuDrGK zkG{`VJzw=A?$4zKt3R*)vgYfW!sYd|95y>{k!>Am60#?BZ`i)@{SgOZ55{H3A4)hp z^ho-VAr~fHoOx;8<@HxKT+PbaoV(@P(d)-<9KXrGofH1mlmHdVmaV9z@r|8jD{D(- z%UW6$(f?hlN>ymD+W&_(t7Dxtgo6+ z1Sp~XYQPf|jU64k3yK{%u8bR9{9{X}5r)qXe3_eptU?8up^UPPqhxON;~1GOEo)*q ztY+=n3bd%e*HcqrOS>8L7^MAJrV+}?v4f$%VQP;-meg!hk)wUtvXnzv2M0&vz1tnF zDMxV1QKaokX?bZ14#f=4wL1s@Qp(X-R92)S{e;xOb{K*cApWXj%O(CpQRmKzsA~u# zhyqF0>wrH(>WvSa1L>875t1Q`BziJ~F)NFIia80hp_?uAv4k1>7g@!cGNa6jx#DQ- zfJeE%55;RKHU%6PGt5dGZ^a}f7mQ;JTRA!kK`CU7xDs{$vae<_q)SB?8h+WsvUY#> z!wt-+{~H^KS2@Qua!lvACXS8}hDGIJ-YMM&^&n+~^+s?et}lm?aIZWpkcuBR^whPX zPQ!*(9NHv3K9CwVY^W7A)KGt1j2&$q=ja&xl#l^bV<-#sKYu6Nq+FylLRYt!NWBy$Vznq7EApBtk;`b%^b(hZ5-`A|? zE7`N;x;;$0mqG62=$K}Sd749y20VW`Wx5S1xnT*T{i}HO*ZEX}q1GFnPHBWz(U|hr zt|0uX_Md>ZDw6Y?r%+|dSHX(f&>e=r)R@xxfq9Y6?G_~Hu6m>T+jjM8D3L7NfPw#%t%JdE|4LgJF?sqNYOS_JQQM9d(f?iQ z_$g=)zu*7ca2G4QFVy-_ulC1!^uI?w`}eB)yEVNm*#Pe17rp-0cuE zcgzkQL8H^}wEm7fpd{taGJz{6InA&nS13zTCNDxNIWPq7kCz#LUH)ZNMELuw_;_I! ze+j_@Tt*dcV&aty{R3h4!Nn~=M~%lRupqlUZxSUjE zX8FC&cXCx!IO>V5!4j^LN`+HD@#rQfsYhwc2kH=cp`bt2S^~H8K?6?12#6$nZoy~S zhgt;b*FRMXS9?&OT&;vVU_yJmHl{{BifHPcv0Aq)Bm13=-gRE6B2#eVd-}aRa%I~_ zs%K5*eT#tN5fR+`3zJY1eUToWr$_6N`27VDy+uUG@|Awau?0vasV)gZO7KR@93wLN zVeGXM%++ws4ndm!hfTp^o-Vq3fEOX*(qmOJt_9{~yqY)TwWQ00(R+iJh<}7nns0T) zg2In7-Hnm2^jG15t0NMn*PRsfGKm{4zM{fqV%j2ty+%6TIT7d!lF2Au>g-Qo<}P zkwAy`rHDl+A=I@2D?~(UeeK|*L+aNyf|UuyM@)7)EX5*FUy&9Sbduua znX!U2H8qrvg~D{9%lZ|j>p2AXzzSpag+LG8R-~}(2idQRcts20cMF}@`Z?9kYx>Wk zA317)p(Y*vQL0}KDs8yc@GSUa6c*o4h;1$X8uVz~gW%>ef3UNW;DczzZUCqM=1FJ1 zs=v@by*72^FBewvI^UEsEI*yKRQiciSq%Tb3b<9pV*DpowMzN1DzIypF8U`DYjg=t zoc~50c#i#Ru)yb}>(u?V(!VaPS4tmjEiCN*QezhN8~i1`(v-CSa7^q9r{@5=SSp1e z;r$nrQmQa3^@waOoSKwk2<#x;Hp)K>rp8;924+-{s}x)}YGf9^K;w@W_J}^hjXAO+kPnsV1E;j4>5z=Q((e z{Wn0{iJwetcuw)v&ufFkMr)_df3gQg@%<4`d{C>z2LSP51057Q#z6xm`JinVb8JUv zTr%++xiQLxX8yBZ%9OeQ)aYReAmxv!S_$z0hU`}lu!!;wYI}E2H|t|?_ARvhbo_x# z39wk;`BJGsk*_~Nc#P2kf7~HTJWJ5m_=+3Ax1Q2=eL)u|AWv(7`Ogo)XdB}5pC5qf z5EB#LCpbnDl9n7UNeb_m5*`;CPWi>h21mt7!V?qY6Dc-4DJ3RZ5*ME=NeoX&3{MJ= zOAZc+38!Sh36>-$r%`lxVr*1WQdE4LBrH5GDm;vmCxj;kCnE#xpb#6CoD6(fyd)_l zG*S`~6%#IrPn3j3C5DG4$0w%gBngS}q2WnMR6EoNPmM}SPNKNrn3Qlya(uibCOENA zxFiBCgOj20v;_1+LVkK9i3yKLmJlEMM?o*-DNu@##D>RW{m_m0l*G_*Nk~dk8r2~# z8G4tJ06s~Q(vp(HVA2z)6Tt3{H%SNt2|+1@{k*iXolu5Z6CACJKzg!=tG- ziSbEEPGRBLoFpa+eka6+2ZuSu$AJ;)AzwA+r|l&f4X7E$tWA7e9CRIr2*)%TrU1q% zJW+4xlA~h5z4(;m;>r;zNidTB@kz;&s3b{P{D3%JAtf#`JUBFxc-kr~3@T2Nz(j%p z)jBagIy_F25CtRF!9^}iu690zKESvJ^JY}*nE23Wm`>8SM2HONL(%;4d?5siCb2^j zL_|?h8J?QV4+I(XwyEKvICrtZ$wKqm(S%t|iG{eLS_uSD5+4&4 zN(NaH6Ca;I;v+6NRtO7JQbK&h2FImI;uD~wIR2#b!W=+P$&`eJCxs?PB|yx9uexb% z9US(f7~?85GAbraq76w@(hd&^PhCV7wT)4bgr+!r`Yq_L>T_~lq3nvaX2oPVk;{&7}jbGu-f6z&$MeTDmEb|9LJKM ztylZ1>%5Pakqkvu&G1~;kkzR(! z!7_<}{Z+z`up}umC7EnfMMs6;7znShG@Rsw#P~kAQR!`b`dnBUa3CN{H!?Ub3|0jT z@d}HjXcJ>n;^9~jo*Eh+9tO@qS76_aN{NM8PKgVn0?D)sEONx?3+kqhbMjMut@G4(!N*~j%O|}S%Ab8rK{E{S37l); zfje`wh?2H8r_{6O!tIj}WgdjP)J=CB*xa~>rAPgh!5RtvR0J;`=QT zqv1Gy9?oGA|2DuAq{4qM7EFKsC&t=;U(74~PX4z&_;(GUKSsueWy?@*&hE~xluJld zoJ&%qDak{DfbqIp$!Hd2#m+RbToMGd>D@-zm2QhNpk=DgZP^ZO2(9eUnXr-HMjjB* zN(n`vJIiHlm95yWgl?-)v})g0L7A8uo0czcm*isS?2v@$f^?E|Y&^UtG&#bDfbTMI`r)N}`f5 zp)P^ByQ{l9$}aZ_ar1HYaD|5?`-DVO)QvtN?jCMnd^04fj~4|=L{$H9Dm0;qmn)XM zBBPT+p^iMLJ35hqf>&c#H^@`nTp14>{f<7zLk)GeggJB%@%pJzv(*;?}_|Hp$i-&x=KWFjKuWzp{PS=R4WvUG;b;%rNTz78f@bd20Ge?-WuC`=k zWwy(bQB_0cE*V`j@%oZ6_0vs4#@3zeA2P0f#@LYY4jEhOP7o%dF}i>Yb2%miLU#Oc zr-aIe{gk5SQ8S|+&5K;Q=F$9^9Zw%ENI2f`@xtWXsK<*^AFX+uG5F(C*n<($4Qi~; z98sy;>Lp|9&R@NBg8R+Y%ciuh(U6xGrGg9I=t4liTYK9%F{c}@UOxT0!}@Io(xX{N z>z-*e-s$*tr%6pdUU#1Qq0+pt84Ncsd`|G}c@Ycx<<9G~cvzMBk;|r)l}4>xG`d^g zwHuCii{7}uZ1tW&r=Y8)V;O`Xv3aF2^uGk@L zT084;VbeQ0p9-5HYgsOQCM)X@K8x!$E_`;kxKnoUhUFKoS{Go&de&--QB6WB45K>B zlSDNiQhHHc`xr(w38{w|)!Y1Xe1)F4!7hSfZQ*Y}&*(}+uU5(&E^+%i6`f`2$@n*1 zWO>D@aoaGfz@)Zjvr7o;vmO`UdtOB!ItL12oqw)`us+}HO@SXQ?yI3w0{O6hpFbr) z>w_DW9F^4Aow$O{6Wrpp03^Ny!Yjvug?44o^Ix{pD1cwAsbDPMliEM5M0-nZ@G#yJKl}V zY?W&;vjq*ch?W}rv^@?X36|C|;f4sjMcstwf zns=EEmKR$+oSAgVufmEyxwdtV&79tud;9Q8bH5x|<&jT~thy8)^{RSI>jT+ou13GM zH+4#W-Oz9q>w2Kv&IcX4e}4A-Qv0v(qB|Wl`WpS_psCG7*KFAA$WC)sZ?lUM!lHBb zbrKfITXS)<`zb8?3SluNyMTnn`y;u!u<+uK0uW^lZRfB#4Y?IdPwT~aVh)T%(jQCD zIJd8plj~BF7Z=HvTNIqh5EvzS@rG=q=lQ0Q^`GMosfsy&FM}B z1Z+N5HNMdGD77#q=L8+)b*+15!3Q{8ioVkGX^GJ;Z98D$q7yg!?OZfKA}exw@PiZfYvV)N>lP8a=0lkw&+5P75PVN&nGE|C1vX zaoS7IX*dlk=Jd}aJ$#XFq%l0|&iTzz`izUq;cCk!dR*?ZY?Ai_6Y1opuTF+eY5CnG zY--PepY7%b>&|fg>QS0F-B{0Q*vcff@@{V0m~OP@M0;jn)QWk!g*Z1O=IIH4r?Xir zGsia6#q^e^dfToL)9#PRsl~j7vwv}d%zeWPU!iO9tB@(npqqhalQHb>+rRPKg{NNz4MKZkMcZ{c~V zZOtK`mpeG__q@`%*|+3my1mnftIELG4>?`>Y)Ema{oOcdlwl>GC2C9Su|dO4olgdh zs5r1dGSZ@Bmmqb`zL9yOYK24wkN!5qxYv;UY2J%d-!B@|Yv8jDCwdKf_4$+a;J2p& zd!>GT+UM?&!ux%C4@Eyy{;r>OO*@B-n}XX{6tZ}uvyO@P~}xuIq)KAzkvZ>i~WWRm@|nU&mo-rwdirR7_{W#gR> zW}ofovTx*;)6OvNjn6gpt>AU3P5W<2mpVBv+IG(S&wWYfJwBaHzT#hgh*ys6>-v-& zu2YK-m)aTDPR{L;-y`W-k3ru)(@+nK@U zXRlr5U-7jnpUrZS|QXl~fjQ}r#CdE?4T)O9O1$s6e*Yx4MLz47InXJ#4L)W<6Y zPw+~?i@}u*$~i308d240pTx{9+sGrp?Sx;bX?eTB_m7wt;FX7yc;(@&ddU$J>B5U6 zOsdj?q#_oDswpPEBOU64t$_p`TxV^y5xOU3* ze#X{mPb^NmT#UIt$#MNe_O#QQg(2l#_7_~P;IJvV)fulrMs<#g{VaozTKL;uGKC2D zkhd6OzuBmAXP>u5mW=GdC0tE-m>O0 zsjQW(waib}PS#Pz$W*dGSvOgCS%@rD7A6aq^^rx&`pTkZF|t@$oGf0JC`*>5$ok6$ z$Wmo#vVpQevca+;vUHhRHcU2LHbOR1HcB=|HeNPWHd{7FHcz%twn&yCTP9m8+a$}9 zZI*45?U3!1?UL=5?UU`79gv-rot0gXU6fsx-IU#u-InFa9>^Za9?PD}Udmp{Ud!If z-pdMPpJiWUg|hE5N^T%GlpD#*$ji!&(RD!1KN>xpzQT9Y}YfyVBk1o^&s|HyuO=(;;*y9Y#mcedtI!itbBC)3J0M z9Zx6F{pdtGiB6_d=u|q59!L+ShtR|5;q(Z4Bt4oQLyx1!(-Y{4^kjMpJ(ZqDPp4j!E`5*AqwmuX=zRJi{RjPoeo8;1pVP1DH}qTj9sQpENPnUW=+E>Q zx{&@xf2TDx#fTUK#*i^$$}lEOImVPRV=6Ee8FNO=STL0tOU8<+!c=9dG1ZwGOiiX1 zQ=747Y#0esm#N3tGImUTrU7HmG-MnYN2U?u#5gl9j4R{DxHBG%Cj&oLXS^97rU}!O zX~r~XS}-k{)=Yb*BNM>L7&!w^(=!Stkm}K{b2bhD*A?7f1ggM5XWKJ{ZnDfj<<}!1Ixys})xy&`@I&*`$$=qh{ zGWVDVOg{4m^N4xOJYk+P&zKj?E9Ncpj(N{~V!ks*Y#Fv3Tb?yz%~>&9nYCoC*jlVL zYs1!I>$3G&JGMUCfOTLSu`a9|>&|+zUTkx=CF{qwW!tgsS%0;vb>|k~% zt7b>ABiT{xXm$)cjvdcVU?;MZ*vae^b{adKoypE(XR~wIx$Hc4KD&@z&aPlrva8sQ z>?Ssg-NtTbcd$FzUF>dl54)G$$L?pd*+cAM_9%OdJ_Bs24eaXIJU$bx7ckG|+d-enS zk^RILuwU6i_8Tiw7$^)CMhas^IfbdByuwUTQDLE|q^PX0R8&*cRMb*fD{K^X6}AdH zMFWMsqM^c3(MaK>a8bA_+!XE#4~3_~OVL;XFF{Z=SF}(_6|EJ1igt=liU37t1+7pj zx+uCTx+{7r!WEHDyAu> zD`qHWDrPI@DCQ{^C>AO*6q$-8isgz`inWS$iuH;OicN|v#b(78#a6{O#dgI`#V*Be z#U8~z#eT&BMYiII;;7=7;<)03;=JO5;-ccR;))_iaZ_Wm9DfrLVG;vbEAr*;d(J z*+Chg?5vb2yIOTZdB;_>a4CNft5 zt=yyBr#zrMqCBQNtvsX5RbE%#Qsyb|D<3HHl@FEAl`oX9m2Z@9mG6}wlpmF!lm*Jq z%CE}rN=jv*GE^C<%BW0KrmFHPGgSpuMO7tLWtEkximIxrnyR|0hN_mTw#r&%qpG8_ zQ`J}5s~V~tRZc2*m8Z%})mY`DYNBeYYNl$TlB!y(+N#>C{8gP)0jka_nM$r=RIEy& zQmO(~T~u9F-BjIGJypF_y;VV~5LKuuQWd3&R>i9lRQ*(msw7piDor&|HAI!J8l@Vo zny8wjnyi|tny#9mnx&epnxmSpTA<2MWvZ5_ma3MimaA5%R;pI3)~MF1)~VL3HmEkL zHmR~yTU6UrJ5;+=dsO>W2UXdsL#o57BdX)76RMM{)2cJ7^QsG~i>e&eHPv<14b@H6 zE!ACBp6Y?>vFfSnrRtUHjq0uHz3QW?K=oPmMfFuxsM4qmI77~eE5nuLj5$-TJXe7e za~51Bt}7=O%Cy zxk=nqZW=e8o5{`MW^;46x!gQ%KDU5d$SvYBxW!y1w}e~DE$3EnE4fwNYHkg;mRrZI z=QeN~xh!row}sovZR56cJGfokZf*~^m)p-B;0|)x+#&8TcZ5629pjF3C%BW`Deeq+ zmOICt=Pqy;xl7z-?h2Q~<#N}#8{BO!pZkM*#69Moa8J2s+zaj%_nLdh{mFggK5+%y zSFVtw0!4uafn@@X1Iq`R{Xd%S!o5w0jrVZ7X729Nu9*v+ySsY`mXH8J5Y3)5?wR44>6zo1>zU`7?^)nk=vm}h;#ulh=2`Ap>)GJhDles>)Geo?>Xo>m);yLO$<~i;;;W_C!<2ma&=Q;1W;JN6zbc>$>ACH> z=eh5B?0MpO<}p3#o);d=V|yIWOV2CMYtI|cThD)<51x;nPoB@7ubyw7@1EbDKb}la z+>;0~qA*dH7KdO$A-fiQ@GDCh(IAPxq=AV`1|7y@aK0a-8t@}K~U zpajaG0;*sX)Ic4Kfdj!o;9zhFI20TXjsnMkl49q=Xi3VaQ|1K)!m!B5~9@GJNo`~m&~e}jL( zzhEX92NS%B-Xw2UZ#HiZZ%%J6Z*FfMZ$57UZ$WP%Z((n7ZwYT{Z&`0SZv}6%H^p1g zTgh9+Tf+-@>v-#W>w6n`n|hmjTX;iL%hSh!@VQCW4#l-lf6^C z)4bEYGrTjsbG&oC3%rZGi@i&{OTEjzE4(YctGw&H>%CjNTfIBHJH5NTd%SzS`@H+T z2fT;8N4!V9$Gs=KC%vbg1uL5-m%P*bQm)BI?OQ20$JN zguD<8ArJ~-kPq@hI242kh=eGJh8T#0!cYX_ApsI036dcNia`UR!O#$BC^QTj0gZ%4 zL8GBD&{$|3G#>hYUejb~3N#g(22F=%Kr^A)&>UzkG!L2&Er1q6OQ5CDGH5xp0$K^J zf>uLoptaCCXg#z6+6Zliwm@5L7$;7&{yai^d0&E{e*r&zo9?SUnmobLkVyqoCQvTU2s-78=M`^0q25q!+GGm za6ULcTmUWz7lI4JMc|@v3AiL&3N8(ofy=_>;PP+VO>@IMp_ZU#4pTfi;h)^HoRE!+-n4|jk&!kysGa2L2M z+zsvy_kerCec%Bw2K!+g4#70c!4a5;1z3hvSc7#q1`mV>!$aU<@Njq(JPw`&Plcz! zGvJxy_C-+}MK_u%{R1Nb5Q2!0GdfeknvegWIC1HXb_!*AfX@H_ZD{1N^He}=!nU*T`? zcla-y2`3;~kR&84k{!u`5ky2}L_t&}ifD+A{DXp!A;?f<7%~DGjf_FYBIA(p$V6lkG8vinPZ67i z%tq!SOOa*Ba%2Uv5?O<+Mb;tfkqyWuWE-*_*@5gr_8|L^gUBJ|FmePrhMYi7A*YeE z$T{RZasj!BTtcoOSCMPTb>s$e6S14&0-AP({h zd4s$~{zKj)ACXVUXXFd=75RpIM}8nbkzdFkNqIvO2=jz!0zM^B(9(NpMY^bC3qJ&#^MFQS*w%jgyK8hRbQf!;)Kp|{aH=w0+4dLMm& zK13g(PtfP6fxbX9(AVf&^d0&E{fd4=f1p3nU+8c24;n`kFc+2`%ZcT}a$|Y1d{`l@ zFjgEZhn2@FV98htRuN0Z+*lQ?Dpn1vj@7_wV*pkMtBcjc>SGPC##j@q1=bR4g|)`o zVjZxKSSPGA)&=W|b;o*OJ+WR`Z>$g27aM?iFc9-%5Qbp?x)GQk3t|LDViXp_XpF&F zjKjiM1miIQ6EO*kVmdYu8-|U@;=;JByve&SMv_%h(m{Ds~OK zj@`iSV0W?m*aPe#_6U1|J;k13&oKitv2^SOmVsHAjXBsW><#u7dx!mpy~jRaAF)r^ zXY4EX1N({n#{OV`v8=vqzU;o7zC6CXzI?s{zCyktzGA+TzEZw2zOue@zGPpDuc9y2 zSH)M|SHoA!2l(pv>iX*W>iZh_8u}Xfn);giTKHP}TKn4g+WOl0+WR{AI{G^KI{Ui# zy862Ly8C+gdir|%`uV&*$OronALjG<{Jx-%^o4x1kMr?9!6*AvU(~1hV!lDXp}t|h z;l9znalWa(X};;cnZDV+Ilg(mMZU$prM_jp<-QfZRle1}HNLgJb-wk!jlRvkExzr( z9lo8uUB2DEJ-&Uu1HOa4L%zelW4`0Q6TXwaQ@-=Q3%*Of%f4&A+rB%#yS_)h$G#`N zXTImYbl(e~<+FV+eXo3PeeZoAe4l)ueP4XveBXUPeZPEveA)du{5k!({dxQa{004m z{l)zy{H6Tm{N?>A{)+xoe+_?4e=R@YPxIID*Y(%)H}E&~H}W_3H}N<3xA3>}xAV96 zckp-gck*}kcky@g_we`h_xAVo_wx_%d;Fju_9K4G@ALcpxS#Y>e%>$nWxwi=`gQ*> z{|Ns`|0w@h|2Y46{{;Ue|78C(|8)Nh|4jdE|6Kok{{sI){}TT)|4RQV{~G^V|9byM z|0e%7{|^6d|6cz-|9<~L{~`ZT|1tk@{|Wy||0(}z{~7;z{{{a=|39J8f6ag0f5U&v zf7^e@f6ss4|Iq)~|IBat)BP{}8Gg(E+W*o2$^X^=-T%Y?+yBS^*PrQ+`?KIlxC_sU zXUB8lx$!)B0lXky2rrBm!HeO=@e+7Ryfj`0FN>GMlkpVXjaR{|;??jPcul+(UKWxygS|#?}hil z`{MoZ0k{VTaW4+xFpl6D?!)~!jt6l9CvggAa2DtAF#d0ifeW~V%eaE8|31oN_&|IR zJ{TW@55*xEz8qhHuf$j3tMN7XT6`V83Ezxw#kb+x@g4Y1d>6hO--GYP_u&WdgZLr* zFn$C-iXX>M;3x6Z_!;~xehxp6U&OEA|5wJuZ{WA_yZAl)KK=lIh(E#~<4^FX_%r-D zZr~=Kj=#V&a0_?vm-s9EHU0*Fi@(GF!{6f{@Q?T>{4@Rq|Av3Zf8amyU-)nQ5B?X= z{1**M3}gu;1zdrwf$V{tfx>|zfs%nTfwF;efeL|&fl7hOfhvKjf$D*pf!YBekQS&D zs2^w$XcTB1XcA}|XclM_XcuT7=n&`<=o#o0=pPsm@C3X8D1Zde02c5C`~f@=2oM1> zKnJ9N98d$%fF6hi1_cHOh6Y9kMg>L(#s5Ywg$EZwg+|ub_I3^_5}6?_6H6G4h4<{P6SQ{P6sXrt_N-g?gpL) zo&}768OR8{4!j9`2z&~B4g3iF4EzfG4kQGVg05iJV76ecVD4a^VBTQnFm z5w!_`NF(YHb&2{!1EL|(h-geSC7KZ}h*m@!qAk&x=t^`Wx)VK!oiP6M3VgfOdm_$q_rVvw!X~cA51~HqML(C@@5Q~V##1djDv7A^ztRz+utBJM5 zI$}Msf!IWBCbknhiCx5QVh^#8I7l2OjuOX+Ak<3CSkuEYTnT^a&<{)#Cxyd|aUNRq7$=l=|@-F#+d`LbbACphWr{pv8IcbtF$PDr|`G)*NekQ+= zU&$ZjPx3eUhx|)sl5sMTa#2~S>{JdaCzYGZOBJ9BQiZ5uRB@^VRf;N2m7&T}$y5qe zkxHd1QB|pGRCTHbRf__sI#gY%A=QX#Of{jJQq8GA3)P+K zLG_|~Qy}G~U<#p73Zwi~fC^G1^>21Su@pyzsR$)eGNn*aDn<>Y22(?*q113{1T~Tx zO^v0-Q4^?%)Ff&$HI`6SbMzLT#nCQ#+`g)Glf_wU^pQ?WYbKt{R zxV}ME7Vo$8g-qzLEWToQMai()LrVIvrIjp9#PMz=afO2R66y7%AhRDpK*kT^`81jeWkupKd4{SZ|X0V7|Ie#3b{gAL)k*vLwQ2^Lis}lLxnsvN2ssuco4bwYJR4MGh=jY5q>O+(E>EkZ3rtwZfX z?L!?yT|!+$-9vps{X+dio)8%FhM*7}LPLR2Fhqo?P$8E>BmWQ|O9xD($8#(Us{cbalE0U6Zaw*QNnFjjl)6 zryI~s=%#c_x+C3{?oRil`_Kbu4-L{@8lqtup-~#6eKbpl=?Kl!0xi-KEz|!7Y;=qs zNDrb1(?jT?^e}ojJ%S!ZkD(^c(su{f>T5f1*FrU+8c2clrnYoBm5@(s4S0$-*QtE+!k3oyo!EWO6ZinEXsZ zrZ7{4DasUMiZdmcl1wS4G*gBt%ami1nG~iX<7O%`m6<9`Ri+wKovFdpVrnyKOdX~! zQ=e(TG-MhvO_-)kGo}U8ifPTXW!f?AnGQ@xrW4bd>B4knx-s3E9!yW>|7+v_v&R1? zjsMRX_c1sVVEzqUnGi!W48t;ECc+4e$jFSss7#d6m_f{7W+*d^8P1GkMl)lXNz7zs z3NwwF&dgwDGP9W3%sgg3vyfTDEM=B4%b69-N@f+anpwlFW!5npnN7?VW*f7e*}?2$ zb~AgKear#oAajU0!W?CeF~^w`%qiwHbA~y~oMX;27nqC8W#%e#ow>=}Vs0~cnS0EA z<^l7NdBi+no-)rElSyY@Fd2-+*o?!xWL`0^nK#T^=0D~=^MU!ud}h8dUzu;rcjgE4 zlljH`X8tglOahz8W?_?97n_yM#%5=8usPY>Y#uf*n~%-U7GMjqh1kMu5w<8>j4jEQ zVoS4S*s^RnwgQ{NR%BCIH(QCV%vNQqvo+Y7Y%LaG>#%j%dTax>A=`*;%r;@0vd!3* zY%8`k+lFn+wr4xAo!HK77q%2KQDy+&zS&h}%7(0j^%noITvBTMs>?n3LJC+^Cj%O#Z z6WK}ZWOfQWm7UJcVrR2+*tzUHb^*JPUBoVCm#|COW$bcxCA*4U&8}hBvg_FO>;`rd zyOrI>ZfAF}JK0_AZgvm5m)*ziXAiIk*+cAM_6U2FJ;okqPp~K1Q|xK>411P6$DU^| zuou}&>}B=}dzHP$UT1HxH`!b4ZT1d(m%Yc{XCJT+*+=YS_6hrxea1d#4c27S*%xdE zYq2)#urJwH>}&Q7`<8vj{>Q#&Kd>L!e>y(7{mW*uaW;WV zKac+z4(gH-VeNP2;9>Gq{=D9BwW*pIg8!{hS;9$SS2#yFXE;|lcQ{WtU${`Xc(_csY`9#w zLbzhs9j+d(5w00d3)c-d2saEj4mSxm4L1w72)7Kk4Yv!o4|fW84R;H75BCW74EGNA z3HJ>T2!mm77zzi%!7v%7!l5u14u>OQJ}iW#uo8}j^>8daC_FekBs@GkB0MHMK0GNr zIXpEyEj%+kD?B$mFFZfIAiOBNB)l}dEW9GTD!d`QCA>YnBfKZPH@q*rKYS>BB78D@ zDttP8E_^Xh|Je(1>!%p~B_-*)I_*3{x_-8mC zPKacQBt^1DvPE)4az=7R@<#GU3PcJ<3P*}Wibje>N<_*;%0|jZDnybaDUphi)JUaB zl}Ob{jY!Q%?FbO57ikb_7-n~9~ltwM8F6fK_k8h9tlK< ze?mqmLPxkrI1-775h)@^lt?t9MFvI&MFvNPMutU(M@B?OMn*@*M8-wNM%CMz%z@Ms`GYMs`Q`MD|7YM-D~~MGi-fM2<#|MNURe zMNUU9L@q`yMXpA!MXpC~L~ccHNA5)KM(#%*MIJ|kD39@e9_IsmkSBPOr}z+0^9;}OVLrkOyvR$u z%qzUgM|q9c`4~TtAH)ylhwwxB5&TGg6hDR^&rjed@ss%}{8WA#Ka-!u&*tawbNPAv ze10Lnh+oVv<(Kg*_?7%>eht5l-@tF=H}hNgt^78A2fv%&%OBtm@<;e%{BiySf0{qT zpXJZ-7x;_(CH@M3mA}T{;BWEw_y_z${xScAf6Bk$9sV`{hJVYy=1SeyM#T$USYp*KsY2E7LEuVsWvASW+w_mKDp1<;4nOvX~;Kif*xzSXrzhRu!v>)x{cOO|h0(TLi>3 zv5r_*tS2@Q8;Xs@CSp^unb=%xA+{7-iLJ#pVq3AD*k0@)b`(2_oy9IHO+C=wzmQesG?|9La4$cvIFi;Ad{1RXx0FZ9E9H~&O9iDuQW2@RR9Y${m6gg#<)sQzvXmlKlu{+P zR9UJbRh6nq)ukFzO{tbtN2)8;lj=(irAAU?sfpB7Y9=+8T1YLWR#I!Jjnr0ZCv}iI zN}Z(6Qdgv@}*4CykdTNE4;W(iCZ`G)z(mrXwbU->J z9hQzrN2O!Zap{C~QaUA_k7sN=x-4Chu1eRX8`4ebmULUXC*7AGNDrk) z(qrj~^i+B#J(o-=U3wv9NVfD+dL_M<-binychY<5gY;4QBz=~?NMEII(s${H^h^3J z{gM7knNnOzkQ3!Ba+2(lv&z}!9CA)Mmz-P9Bj=U#$@%31azVL}Tv#q57nO_2#pM!m zNx76%(u zd|tjNUy?7&SLEyRP5HKbN4_iHlOM>B%X~wgR=NU$ZnUS9HA|oTi%CIw>jF%a&GG1rA z$#|RbF5|z9_Zc5DK4yH%_?+=2<7>vZjPDsgGJaspJhWr5|9-rQUQw>t5B&zi3%kvl&Mg*LJ^=SkORmG zv%bO2JdQrF2SZo>Df&pTegIDMSjHQZA)_O1+dem0DHWkx-{X-3m#`uK1dN z=$aV^Dy&Z^mYl6p)=HP%*WK6L=iTSrE8Q#HyWP9oO)Jbwo{>B=d3JJPppsSDs$x~O zs#(>o8dgoKmQ~vVtTd~RRoAL#)wdd04Xs92W2=eP)M{omw^~>&tyWfRtBuvxYG<{# zI#?a8PF82Di`CWYW_7oESUs&?R&T41)z|80^|uCC9t*U*7G%K|Vxbmh`7FPMTLCL* z5f*7tR>-0)#$qkb3R@A2w**VHBulmwOSPhwX6aVU8fXo&23td{q1G^KxHZBWX^pZ* zTVt%TmY6Ih%gIWznjB5mlJ(?R^1x)u2|2XGIIP1t6H+IpPD-7eIwf^#>a^79sWVb% zrp`*8ojNCVZtA?$`Kb$17p5*sU7V_>MpLy^JvEj(Fm+Js;M5_hLsN&P4o@ACIx=-s z>gd!lsbf>erH)Var{bxB)L<%+N~Th&p;S7RN!?N*;#8=UTqmVY#X6~V+;u9|sa&T@ zovL+sM{xYfcyb^)m`o&-$y9PEnNDVs*<>zRbR=h-HQt(FO|&LildUP%RBM_w-I`&| zv}ReetvS|QYo0aVT3{`-7Fmm}CDu}FnYG+nVXd@QS*xuz)>>I$#~N4q1n-Bi2#tn04GbVV$&2S*NWt)>-SEb>6yQ zU9>J)m#r(-RqL8{-MV4jv~F3qtvl9T>z;MrdSE@Y9$AmAC)QKznf2TF`7;w9sy z;-%wd;$`FI;^pHN;>q!pc*S^X+#RnJuNCY4JMoy77AP z`tb(whVe%6#_=ZcrtxO+=J6K)LglUEt>bOtZR73Y?c*Kd9pjzio#S2NUE|&2-Qzvt zJ>$LNz2kl2edGP&{o@1To;Voy#-TVIN8)JwMclR=>!tO|dTqV2-dgXh|E%}c2kWEt z$@*-4vA$a0tnbzj>!b_F}xPO&T6skYm$WLLJU*j4Rn zc6GakUDK{**R}yW&8}nDwd>jS?FM#3yOG`4ZelmJo7v6n7IsU!mEGEIW4E>2+3oGt zKpUVf&<S&Jc#GHZ7 zAZM^M#OYvnv^&|I?Jjm#yPMtJ?qT<|d)dA1K6YQbpWWXcV0&!P_S%pQ+lYu?Q^0B93~&}W2b>2k02hHvz-8bHa22=) zTnBCdH-THgZQu@Y7q|!92Oa)cm_NN48R1^ffqmqU;#GZ055@8z-!=~7wn7n zCHt~{#lC7^v#;AX?3?y2`?h_@zH8sJ@7oXThxQ};vHiq;YCp4|+lFo0>Glgd!?tYO zcI=n-EBm$m#(rzRv;VW-+aK(Y_9y$Z{l)%jf3v^aKkT3OFZ;Ls$Np<)+HpIdZ` z+sWhPb@Dm+odQllr;t82{lvCO%{M~8I@O%&P7SA~Q_HFC08W}y$EoYobLu+{oQ6&#r?Jz-Y3ej{nma9=mQE|DwbRCF z>$G#)I~|;kPA8|c)5YoPbaT2pJ)E9SFQ>QD$LZ_zbNV|29FGG!UI%hu2XRmbb9|29 z!JU8;bO?uZ3b+co3b_iqinxlp5?#ey#a$&_C0(UlrCnuQWnJZ5Z<0d?yBLc>8jXeogL0jXP2|v+2ibW_Bs2V1I|I`kaO6%l;v`khgk|&C|bc?p?-y| z$vKnfCC^V@ki0OtXiE8%iYa|ksFYC3@swWf-tIo`zV3eR{_X*8j~jG*-H;o0BW~1< zxqWWG8+QlXK{w$h-IP1zrrnI2b#v~pJL2Zuf?IS;ZrQE4N4iJ3N4v+k$GXS4$Ga!E zC%PxOC%dP(r@E)Pr@Lpn=eXy(kGPMz)7>xJ8E(s+v(ns@LO@xd9B{-r>Kt>9J13lz z&MD`#bH+LAoO2Qr&4g=-Mnd7lR|%yPUnW#fe3x(|@ohr)#Lo%05g=5q5 zrR7g6kXA6QP+H-%B56g_ilr4#E0I<*tyEg+v@&UB)5@ilPpgoYoR*SSF)cOComMHW za$1$Ns%cl9YtD7&hI7-o<=l4eICq_U&VA>B^U!(ZJa(QqPn~DZbH{K@CmBcqDgvp1 z8>j?S2C4v6fz@@U)tz2CJ>P z;Y=hG&BQW&nf^>XGmsg~Br?fNDwED+GTBTnvjy;>_Q%?vYJaZ%rS{j_-)eua{iF8J z+P`Z5uALqD;(T?!Ip3Wh&QIr;^V|93{B<&&xRdZQ!JX*N;!bkA+*$vX0<*hwxO2L5 zxpTYoxbwR6x%0aVxC^=qxeL3CxQn`rxr@6?xJ$ZAxl6mtxXZfBxy!pNxRc!}?uzbI zx7%IGUD;j5UDaL9UEN*7UDI95UE2+~)7*93b=~#c_1z8J4c(30jonS$P2J7h&D|~B zE#0l$t=(3W&q3^JT7=xnJga znfGPBm-$~lk)O)X<#hRloFQAXEj#jS`HlQl{wDvF|H_$iTuxB3C@v+dl1<63=gl#)s*rMyx>si>qXm6Xa#6{V(9TLF|hN?oPC(m-jVG*y}@ z&6QS4Yo(3SR%xfSS2`#il}<`$rHj&4>8A8hdMdq@zDhr(zcN4p6|Vv*u!1P4f+@a# zTwvgz(?=+zLMb7IRv3jKn83e25QKPqNqw#(G*>Y{d0Z? zDT9?E%1~vvGC~=tj8aA`W0i5r1Z9#kMVYEhS7s=)l-bH0Wv((`S)eRb7AZ@VrOGm8 zg|bpvt*lYjD(jT>$_8bVvPIdd>{RwBdzF340p*}_L^-M)Q;sVqmD9=@<*af}Ij>w+ zt|-@(Tgq+aj&fJIr`%T_D36rK$`j?O@?0^MbmfJTq1ehx<+bugd8_=VyjMObAC=F_ z7v-z+P5G{1hR^UDxDhad24Ro}WrPgcU<}sajIa?gctbEmLo#GTF;pXJXohaYjDf}= zW3VyA7-|eNh8rV{k;W)vv@ymQYm76-8xxF)#w261F~yi_Of#k%GmM$WEMvAY$Czu( zGv*r$jD^M`W3jQsSZXXYmK!UKmBuP#wXw!nYpgTY8yk#`#wKI4vBlVGY%{hSJB*#i zE@QW`$JlG^Gxi$?jDyA@*ubNLS zs1{NSt3}kJYB9CAT0$+UmQl;9<<#>3Xy;_LqME8!R;#Mj)tYK;HBGIfHdGs_jnyV< zQ?Hrl~QPuxX z3J$12l~75QQbQ`OGAgTbYFOn}L6uZlRa8}tsxft-I!HZY95s#^$Bh%lN#m4p+BjpJ zHO?95jSI#_rgYvyfTXEMgWliNs_RI#HdZPEn_-)76>kEOoXzN1dzAQ|GG-)P?FIb+NisU9PTB zSE{Sj)#@5`t-4O#q;6KXs9V)->UMR9x>Mby?pF7xd)0mFe)WKQP(7?3QID#})Z^+2 z^`v@AJ*}Qm&#LFt^XdilqIyZatX@&Cs@K%(>J9a#dRx7t-c|3Z_tgjLL-mpRSbeHK zS54JY9rdO9N`0-qQQxZX)DP-M^^;o4ENzxC%bMlP@@54y*-SAjnyIGStYlU;tC&^I zYG!q_hFR0BW!5$UGtI1H)-~&y_00xmL$i_D*lc1pHJh2u%@$@$vz6J}Y-6@H+nMdn z4rWKQliAtqVs2%1+1u=6_BH#N{mlWU#{^BU37N2on5c=FKGSdFX21-Z zgh`r|88T^;FAVz zMSW3!6psd?!6*?8Md>IL<)YzeB+5sHs2G)^N;DePqIz^tbZ~S?bZB%~bVPJybX0Uq zbZm5dG-eJo2bqJ-A?8qXm^s`WVU9FMnWN1y=2&x_Io_OLPBbT(lg%mSRCAg+-JD_0 zG-sK!%{k^=bDlZhTwpFV7nzIACFW9dnYr9tVXib+nXAn;=2~-|x!&AhZZtQUo6Rle zR&$%V-P~dBGwS`ICzmRrlK z<|8(aLJ&v}CQKR#~f})zE5bwY4;@j@CeHs5RD_Xic@|T5GM1 z)=s-;-ZvkZ56wsBWAlmm)O=<>Hx1J?)6ExVhH06$>6kCgSLSQ;jrrDmXZ~ltH$Rvk z%}?fM^Nab_{APYPf0#ebU*>P~kNMZkG~;GMdSZH(^rUoGde-!8>DkkBq~}b}m7Y62 zPkP?;eCheq3#1oJFO*(5y-0e|^kV77(@Uh6OfQvQI=xJK+4OSh<RS4>Y$ zcc)iMubf^by=r>3^y=v~(rc#IO0S&`q^G6VNw1sUUhANB)H-RMwJus$t%ufA>!tP9 z`e=Q%{+dSvHLnI~u!d?r&9C8FKqE9#qqLAlYmCNfVJ)KZny5*dtSOqRMKw*+wSn3o zZHP8p8=;NVMr&iWiP|J>iZ)f7uFcS9X>+u>+I($+wn$s7Ezy>0%e3X%3T>sfN?Wb1 z)z)e2wGG-vZIiZH+oEmNwre}I-P&GlpSE8+pdHi>X@|8V+EMM8c0xO;oz}YaZ#nO@ zr%LczznuL~^qZ-5)0&O_PaigFYAj7&FmLMoD&dLnubw%B^Qi;KuMwT-Z6mT!*R-F# z2asE&kHg1I*wu^lUcz?^TSs*Z_6fY}Te)Ah!Cdb((J$yfcyX#We^Q|22-MSPeqeYb zd8l?hG)(J2h&|m?p9G2znn}J>Y7k1G#;B1ChKD-IeBer;Ue6b5m%hvTTpG4+MlEa{ zy@VMt^5(Ex^x4>yalPkG?Ug#^*{EkDvi2=Bk?eCuJFi{TE^Ak`+uB|2q4r37qCM4~ zX@+KM>Dmh|L$ftUd#SzBUTbf(x7s`HgZ5ebs(sVGYrnML+FvcMCFoi7Bt5I1P0yw0 z(evr~^#Xc9y^vm5FQOOKi|NJn5_)O9j9ykRrzh(b^;F%hSJtcP)%6;BEgjI)^g4Q7 zy`J7!Z>l%fTj(wIHhMd~z1~6Zq<7Z4=w0=0dUw5t-c#?R_tpF9{q+I5M+bGUj_6~? z{+#!;XEG=)Tng_DtPSj;22Po}B-=mueP(Qzzjxo_eJ4!6GNSC%m2>j;x*%uwzVVlo zHc#m1eLt#3sLJ4~==WhYqhG_+v>(%(&1ky(4p(w!k-tpxEG#`7Hd&u4660z~v z&*;0kD4EN5*0+56N$}Lj;WOZQTjw8~UTk=csjUOYdp_uk_1!Riz(fdrOWRHsjhb-JJqB^GgbiYpMA)V1#J)-lv zpo_Ys%etbgdQ{hRU61L5^uhWNeWX50AFYqm$LkaHiTWgcvOZm(q0iRm=yUaX`h0zX zzEEGJFV>gnOZ8>?N`1AyMqj6I&^PN_^=8JHG`dR&)eqO(zU(_$@m-Q?9HT}ANL%*rt(r@c`^t<{!{ek{af22RvpXtwaLpSvd z{cKdj4lBm!t;EZ~-Z|-EsPE}M`H53=<_xagvozLVRI!nhMyB$Y!pfrlL8R}me&hth zleb3_Msh8Oyqrj_{j^4PpL5c)137^3d~!xAP-ecvGtewYyG|cLI0?K(!c0m z_3!#m{g?h*|Dz|xT(PXNY_aUI9I;%n+_8MI{ILSD!m*;UlCjdUa=5{5NjT58EYMD6Kfl57wZt~6zd%866+f49_tzF9qSwG7XxG7 z7!-qJNDPblV*VH&3&eskB1XojSSUuvn3xdzKMb6Mdz%dxhiBWi_s+Jx#ci6VP12-o z+BK<>$F|a~O|h+=ZQHhO+vfNE59hkhIX~Q~fwVw+AR~|&$O>c!0s$ZZ2A}{EKm%9+ z4-f$|Km~?cTSg6bVELF~K2Dk)aTBuogtav=3hge*Yw7J9o}BV9dVpnCYP^=>UL3o~ z13CGW&%sB;daX%RLi`7(;F}yG6d#`(`yM!Jwau*Ko@|NGRR>?V=#-lh=57^~iXQ{z zEuXW>dYYPDzI$48XKL{<94qDZL!)AqBiNF-J*g9n8;wb9<>>kLJ#JraW>yWg4Olh? zfR5?DqfAa+-5a8%>8CMm=5jNqR>(gWI*=S2icF*fOn?pW0Wly2v_LR0E07n+4}=0m zfq8+F!2H0%z~aEt!1BO~z{*T!2Q63z{9|!z~jJ^ zz|+99z>C1kz^lNUz}vvP!27_5z^A~Mz}LX{z>mPM!0*7Hz~8{XKpCJMP#&lNR01jk zRe-8MHK00B0~j4xZn&0HC-$jO!S={=CmI7sB(~8HAq&I$>eglzfNqEAc$eSYG{hpN zW_vp1lh%&$qY}B?%3+`U>(F+5KlgidOI8=^K;U$!M4KG*E%S^Nn>5s9P}(NV#M9^x z;g!s1bYmL2mIQ9A2WI*?BDID0&8RI$FB5r z_cYD#5xZBjKwY!V?#{M}79eSnp})J6gH3v*8>C1)0`Wj1 zFcU}ud_Xdg3Zwz)Kn9QrWC7U#06+i?AOH$r00~e43vd7r2!I60fC8vM5Xc2)0c~6( z@FVUax-j$i;C9bZ{ApZI-$eae*8~4uX^8U_Yj(EvL>gl#PeK`!$1_>2XP6U}1N^r4 zHVp%JyUf9B+1s22PCDDCrldmIQ=pYOpNzAE9U^r4&&e;f>2b}W!|99t_pk?%Tip4M z-d2+S4{GUX?W`AUfXy!?J;#y@13OX!Mti|3>wv6r`oUQRj;Ah{ZNGnCk`VZ9z7jfR zENc=x)hx5~%FCyfA?c-oZ0?O#Q@(GW4dem&KnN%R3V|YE4loy(2b2J%z@*aBTn4TJ*MJ+qP2d)A8@L191?~e6fQP^n;5qOTcn!P-J^&wqPrzs33-A^A z2K)ei0>6R3z(1fYSOKgERtBqpRl#at4X`#?2doR$1M7ng!A4*cuqoIaYypIntWY_Lv~&IcjNU8)}}3U&j#ga3g&z@A_) zus7HT>2Y>^?!QfDEBsdxz1C9m9f#bmm;3RM|I0c*vP6MZdGe8}v2MwSJG=ml} z3=9V&KpPkd+QBF=8gzh8&;`bTZqNh9f^lFx=mitNL~tgU1o}Wfm<*^ z2!jZSf+Wa*94LSyD1kDlf*KeEXM=fQ5m*e)0ZYII;6kvsa={#l>7hD$y{=5?itl{J z3#O7|X4WsP+RVQ>v+~BE4~$P!&zLW3<*Ba9x^#chE#Fk@CUQsIN_>>NZ)mygpKhPD z6CD}%k#ZEogxxJ%f_F3jk=j^x;-!x5z9GtLGBUTTrMa(?X&dUaHcLZ&0zA$3NB7U! zA!a4qTURz}mA4%CKB{{D*}`>Me^T28*BM)4d!#A0$40xplcl>YCvGvGWzTjWbO*Hw zj@G_TP;LEneNFEoa51Mn%1Go{~3~m9pg4@9D;0|ynxC`73?g96L z`@sX?LGTcG7(4y|-Ls0_jP_1{ zJ9k-JFTzO7br^_DCB?)F)0CPv#&BKNA~Y}^AX+P@?Te)GA+)sVh#DDz^Rremgz}lW2?AFlzhUAveHe71$KIP!eh2g)*Q_C>shuAOu4Qgh2#ELKH+pEW|-P zBtRl0K{BL58k7sof@VV@s1PcGilI4B2~-NrhZaGLp(W5-XdSd3+6Zlewm@53%2^DkIg69<;8>K1^9$GwD4#6 zu~1Lb?M#E|fxVokoDi-%@9yUMnUG;Dw9hvl6c#{ikpX+>nx5KAnPS8o&4a0#hlL*2 z2?3L{wDf0wf;N2411XR_HsQASr7}PnVG^~Q$!8KO=D!L437_lz!5`C(TaG}-p%c(a z=oEAYIt!hH&O;ZW%g`0*Ds&CH4&8!oLwBKj(0%A3^ay$kJ%gS@FQ8Y@Yv>L14tfuL zgg!x^p)b%^=sWZS`U(Al{y_hrGH_YA99$l*09S-7!Bya@a5cClTnnxP*M;lB_2C9^ zL%1>A1a1m9gPX%G;FfSJxDDJEZU?uAJHQ>`E^t@4JNzHq1MUg;f_uY#;J$D_xIa7q z9taPDhrmPOVeoKx1UwQR3vVw@wCDIc`IhK5*|}m*?(&#*!50PHEnEB-?SG77SQz#m z+Z{W@^C50;jN+=6X7x?+^|JZB-N-8uYWPg~RnnaxVrz_lGbj49Ec3#0sB_fdguQ@l z3DZ}xT+9~aa-mJW?fxXCju2ZiR2db1q2x*y@4TO$;QDP@uIn~4hBvuVwZoR{sQ^2w zq?=!}H*qdRg|Kz{{u#q9#pb4lS{B%%)BSNqW`2!Gq95Ry-1+pAaCq`Kcsx7-o(NBZ zC&N?Vsqi#-I;?~BumLv0CfE#H;4nBGw!#sx4UUBEa1KBZSZz@C%g;p z?9{RUg)Mf^(yjCja&I87S#;)+85fd=Wz{n^GT)KTCyLH8uD5=@wO!J>40FPDtv=@V zEOK=Sdzs~y8>TJtFSSDs6E!{NaLLbrmFo<1+3$^CUDMOX#ZPz4aMhAiQ#*PDZI9FK zy3F1UKaiB?^2SUt-*?5wk5c{s&Gau)C;2u>AEoPPELA%`GIH!3qyL+Kj6BTWRG*Z( zjcVk+;Hp{BSqT8Yj3cc-twX(mtP$WlARpQX?}rbJ75)bQ zfPccj;Xm+S_#a#bDT|at$|Dt!iby4-GExPpid08xAT^O%NNuDJQWvR*)JGa14UtAj zW26bv6lsPuM_M2)kyc1+q%G1O>40=ZIw4(OAq5tJ12x@+n%8-(W>FwyhCgm`mOf)rbtRG9I5op z+M74YFdY1*+v-J&4@AOA7qTO*jiQD^pYwHTuyF;RN(O8du+G%}gxB5;ft&z z-soqV&KV8Vt;Eb(Uz9$|0ikF98tAKT5Hc7Uf(%85A;Xao$Vg-qG8!3!j77#F zMdFZn#ET>#iO5VO3GpF*Bn3%D(vWl{1Ia|PkZdG?00@Xc2#g>IieLzi5D1A-2#qiZ zi*N{!2#APCh>R$RifBj>$wg)%vynU`9|<7^NFfqu=uH|;DMfXXndD{upN=nv+qOzk z6a7vfn$8y*ZwwoHt-u|0VR1 zaNbtWeS%p+PS0v>IHg;X^-7xhe^YuIv^efU?nzxozfmuLMG76@}6Tk)S zb!xz0gcKulkh#b_qy#BN<|7M`g~%dgF|q_%hAc-`AS;no$ZBK_vJP2~Y(O?5n~=@O z7Gx{34cU(DKz1U#kln~0WG}K0*^eAR4kCw;!^jcjC~^!rj+{VFBBzkk$Qk4;at=9< zTtF@&mypZI733;%4Y`ioKyD(pklV-|@(y{Ad_X=TpODYU7vwAQ4f&4zKz<@4qBfc)Qf^}{%AS)QzkTj%-AUJ8coTdM z&DX_CDe-)vt$>Q~#YN@Q&J$VT1^2+a;C|z})I#v3CmWoPhBDq1Owa{Vqjjd-6=q}H zv$)?xMN+l&i7L*TUvS-hBYjWSgeai+BsDq;x4g(1saxxd#4rB8@bWx5$9P9+rS{7I zpnqb1=31TVH5okPN?J#DG7QSeF+W6^*zR$HyU^3XFd+Seu*tYzx`R%#o~D-N%`rB& z{EGU8{6_vDf02Jk8MG`~4lR#XKr5n^(8_2Pv?^K+t&Y|}YofK#+GriLE?N(*k2XLX zqK(kTXcM$4+6--uwm@5=tz9vFJE-JURiLh)zbQpi|Ll=yY@jszddt z0X3o~)QnotFf<&sq7kSKjYREe6dH}5)IS0v{SSg#F&-AO#F*Dv$~a$mauVMe6Jn~F zx8v#a)pNEva$@eoc6Wh$p)NnQJpRr7IMr2Jey4J^s=~5|HL*k z_eSbG|3#BQ_buelN12m>UPxQxEbGBo35Vd9z7PKgO>Hw4nwKXK>1RtmK;P4M0rgWV zW$L`Aa~IDY4hWQms%>eUFhVDoH<(5SSaVksZ;@M~YdxSmAIt9*x zm-x1Z=9s9=7nT=#3%-%MtEJ|jaI|oh`kOfHaf6A!dDC3GQxBnQ;S)w8VFR2(cF?if zxm+}Ut*)ZHB6cc%(@>CgHtmUhj~1W8>d076{#8d&Tw~Kg{&?ylQ>~1CnFZc&fkp-W zb-RLh=brN4GLBJNTF$yRW_+N&hUn5mc?0#u*lFYcZO+h|D7Nsa`LS+pa@EYuiAe#i z$d=W_)FFF}|C85mEGnIb7g>fw`@qN8GweC`5_^Tc#@=A>u#ebh>pj90;{;??k4cx}8cULS9UH^*Dzt?<@(Tf9Bq0q>6ghxfpH;=S}!2>vegE)l4ID)ew+WOEvDFi20h%6U- z396kkEwq$$o0-C(LrC4^i8bq4V(#n`IncM@oibLzN*pOP5ayg1>gmobZfLoaw@7~^ zE-or2sv%}%d-)_Mm+?6HANn+^xo4*RX>yi1O*h{Ym$ks1taZ{~&@J_CMR!Uq>7ASv z8y0MzzXDoN{GA^`H}|u}w^81U;f*b)tf{)CCE@1dT2*3J&a9%h z?1SX`%cHq$9Len@FVy!{5XCBKZ&2h&*A6s3;0F+5`G!Kf?vn);rH=}_#^x&{ucj&f5U&^ zzwp2KKfDZ4mMBkDASx1-i7G^Oq6X2BXiPLEniDOEmPBi!4bhfpN3-zS&_hswj*17)V*NQLY+Lye^ zt7$HEHRE4PfQ>t5mSg6gpSY?CL)}$5)p)*h$5m12jL`Q2shy&Vu^Sn zftX1o5k4Y?NGCFgEFzo8Ap!(JPy|gd1WRy)KuCm4D1=G`iCM&KB9F)?LPP;kL=+Px zL@6v4z-5Y$LW4JBZ!H9%3)Ck2p*m zA&wEpi4(*r;tX+?I7gf(E)W-qO9T*Gmu)+9F7Z5&7;Blc!`X z5K$?(Gr2o++@0yz8`u&HdA>NCL}IzEOGZWMJ!5gio*Xtce?~szU+%i^y_aCufj4(m)zX6KN(bWEdGvT1gveC!@${ z(m}e&7}8C8$XGItj3>QhBKgdH*Jn@nyVvIJLF*G{|6F4QXQpkXsh%st7K%5prCHrP zPh;M}Kc%OZ&Pg3hHYZ(S!YtW_#lD$go0b348#)@g246Y27D{&l%>xN&&=eSu*7t`8Qg}V{v*p!)u`%JO{xx6kE%~KqMA@msg_hLsx{SyYD=}F+EX2VOlhOS%@2HNYV(1st zM*6a~zqhRsN4^tZH?_5QXi}quX6{|K^18#uN+}n;nlIaUDq)h%n-rf_E4mJ0 zD7_Xp85jfP7Uw8rltWim-^*gO)d(bnH}D7lHOodV5l$DC0Y*aMmrc$XiDxJ!pGN~*on*u11LMe>GDS{#?ilQlw;wh1m zD49|yjS5n^R34R2g{T6mh$^P$QKi%ZY9Y0VT1+jWmQu^8)zlhlEwzr?LhYjVQv0Zb z)FJ9Hb(A_zouE!pXQ;E(dFldnk-9`(p{`LksGHO+>Mr$=dPF^@o>I@K=hQ3eHT8x{ zb&c?6rN_I~yd!zz%m*|Sx~JAv@x6UVTsei`B-SxJGY32cG|_Dc zmd_mPyBR($_lQqYhMJGEALG}llcUm#+a$lG-cg^Z&(s&{EA@@~N&TXJQ-7&{R2jM) zU6HO#SD~xY)#y5OJ-R;KfNn@Pq8rmq>1K3ux)t4;ZbP@F+tKamu5>rLJN+NsgYHT9 zqI=VQ>3(#7dH_9;9z+kJhtk97k@P5fG(CnMOOK<+(-Y`P^kjMpJ(ZqDPp4x~y=VcTVmJUxM*u)+hJs^ueZirlD9~>fr25^VE`UiOGTep&R^SY+%}g$LDx}CZ+&SwR`bc?0(CegMEwYbmdp2XF152jAg z9!lZz(zYh=uwO(Aj3+#oy#hXjYRK0!f_BojLCerZ`{Ilpg%qu4Xe#5#tdsvff2sco z*~d96ww}KWolG2>>xi%`Rdol<2#wMhjnf28(KOA_EG^Ostrt|20Iz$)J z#q=C{9$iA0((~zs^dfpOy@Xy$FQb>!E9jN=q>bCdK06w#4k+Hd1`{sQWf&2>1#pVsOOd~!854a zHdQ&s)*x$=@yhtv3Q@)w!9Nn%D-8T z-$%60>X?BjV6dNRG3-b^2+FVm0d&kSG&G9#H$%xGo|GnN_8 zOkgH5lbC7DbY=#lWAu!HF)}8`%vhK(#>zx6k&K;*Vxk!*<6>eMH{)UAn0Us^Br-FZ zB*w@1nPet~NoCTQ3?_@oW^$MS12YJNF*rjoWpi$2Cz@{K?X6WDeQa;j_rzhz*QCKw zY3vyDr<}j;uW|cz_X=^g#C}xzXnvT3>2*=r#iwkwokM|F5sh^JypryXd$!mzxX;up z{&tuzyj<{#PSKytO)aYJE9V&JT5LZmT#*LGO>>8ec>6y)@97zK&D<#)G5*Ia^lnNJ zlV-VRYnRRS$P0cgD_*BlQthQ_>zLg58}aE_8SYF;n^H$qFYkM;Zi3TW(|r|WBDOkG z|#%mQX1vxr&3 zEM=B4E0|TxYGw_yf!WAxVm32dn61oqW+$_Y+0E=>_A>jJ{men;5ObJ0!W?CeF~^yc z%qiwHbA~y~TwpFTmzk@~HRd{VgSpAvV(u_^nS0EA<^l7NdBi+no-)sv=gdpy74w>T z!@OnQG4GiV%tz)E^O^a=d}Y2d-H@DP5Ni6=;{wVBL_)z_DeR|QSXw*9x zFXti)%LXO;Zt08EA~u4&8~Z2h92u8;P2UD-9V~@E8H^F@lK0t9NE9B(ol6%$2rGrO z&=d4hu^J$vtl-$-vXB$7*`1#1&;@!;LZz^-Se?vvNZI_+?&%R%b(9^jSt5QgKbham zALcJphAqpMW6QG@*h*|=whCL7t;SYoYp^xhT5N5$4qK0{&o*EivW?j$Y*V%w+njB| zwq#qet=TqgTecnBp6$SPWIM54*luih_CK}<+mr3Z_GbIAec66&e|7*nkR8MhVTZEA z*b(eVb`(3B9m9@g$Fbwt3G7665<8il!cJwUvC~--Yi7gPaMsF3ur}7tMzPVXlXbB% ztcQ(d`d05(aJMTt*xHLFO#d?@6GRhj@)jxvaVF$4C~a`Zc6v;7s+-j zKJ$H+Kc$CUMUJ<=OPa-R3M};eFt16C^+iN#Zv(F*jqgz~}b{_N0l$!Y`AA2TSiJ4+PTC0I=D)VlfSJ%xqiRXEJ7{}-=?UvcI8 z$C;)?b+!HSCq?wmYLVr0H6SMvbv-fOhuYAbPfCB~KwcvUWB$)_DAmuVu&Hc1o55zX zS!_0&!vCAW*rn_$ zb`86h-N=E`jdzwANUSuz^SJ}ea^mQU$L**ckCDTEBlT8$^K%0vwzrsY+0@XSCOm4Rp)AOwYb_` z9j-3dfNR8!_I-0i8P@35IEH#Ak{Vk{^aI_slNDh=yD@p-ixqB_(PRje406zpPAIiHeK%JM3Yx!Accc<-%@3JL12U@ zGCq`j$~DJ6C9<+EjF{<@G8X6ruMxwYKjWHvuRCd5*~q3`bFL-Vnrp*#;5u=gxh`Bc zt~>W1*MsZD_2&9={kQ?#KyENMgd55Y=SFa&xiQ>WZX7qBo4`%vrgGD`8JwOoa7NC= zS-3DRoQvQhIXf4{MRN|$$;EJPE}rvp30xvKlS|@!oS#eK(ztXkgUjM_xBv%mAct`{ zM{qR9avUdcA}4byr*T0pmz%}qars<`E98o}Vr~vMmz&3xaPzqZ+(K>W)>_|9KQd}H+RfXH$*}0N4&h{&e$EVikL3Dt z)u_~%By;QRuE`?H#r?|Pfey3u1FIo#^B;sAz}{xI^fcxc_~(Ij-3#Kp<_8|i_bNKd zvOI6PF2?s-nih3GZZ5ejVWg$MU72j~;pz0uHGB_;OSdU7ZEnLFC7L)!)f^I&x%uVrYFv&y0O$ zkYI`yU@Pr8Awy^%`aOH0IXl0oXah7}h)9mnUB=>or_v|a8OtgiQ1~r6X689VT}zDI znT`%rD41$`YgrUtCZG3ju^;uD<2&#j`A&Riz6;-#@5XoM|Koe`J^5aIZ@v%TkMGY9 z;0N-9_`&>8ei%Q3AIXp6$M9qMar{Jn5~;*AI?Yck-VLc z;-h&7@8n&4Jn!Xa@_s&rPvbNAOg@Xx<^w#ygFM8;JkC=*%X7TIi@eOMyv7IlTz)p6 z$LI3}d?8=N7xQ!Yd3*_9%FpK)@Qe5*{8D}yznoveujFe-McdmZgz!LEU1)NgC2@{! zQj(673iRgIo}sZTgLC5!;C1980Bc^P4!$5tS9|N=C5Am*^WbQCk+a z$iCWjDvr)r&9CLx@$31G{3d=gzlGn*Z|8ULJNaGw9)2&skKfN9;t%u3_~ZOZ{uFulYCpTmBvYk^jPf<-hSi z`Ct4W{vTgPC@Yi~DhL&YN!fPIBbsF1?e0|4qhz$DyKWC@OP|X&inj6V{6|6; zEXBI5Qq{CQma3t9sXzSZvpeb<1nO}sqP{xHh;s{Th1Ar2zVWsdp5Zw&_y#E%WbcS8 zX|lOl;6SKsSiG}$+Gcw*SuUoMdfIxGJOdKEK6i?KL6(SB_xB5R_Py6#%z})A5=N%r z!Biq-%g!7u3=xJ3!-V0&2w|i!N*FDS6~+k@go(l=VX`nqm?}&YrVBF!ouC&Cf>AID zX2Bwa3E_fOh!AW-q+l1KglNGf#0YM|Bg6`Ef>%fo5`~#UlHe2kLb8w|qzY+5x{x7c z3Ryz7kRt>HKmY|;Km=641yY~|M&JZq5Cl<>1X)l7RnUZ>kSoj*W(#>jz7P@$ghHW6 zC>G`jbA@?AiBKxc7ZwPUVmn901#`jn@=e=nOPT-U=?I|=-aAnjJ`f+2el~Zj(W%QS z40)RAzM4P9Pa|5`Hu}rxenbp2U-f**o2Rd)_wd2OK{jjf7w}T|9?#7peRJ*4ZPCt( zi3Hos@G;t^dzAA;2a>t&`ti9zAZniua2_QF2Wfm&)>7SNzIM!!q@&I$x|7)plOIWI zrE2m8shsmSb{ad24xo~#`{CP?J{9y2{j#;Te9e31KVnZV8WPpe84N8H772@mCBjl+ znXp_~A*>Wu39E%Q!dhXSuwK|8Y!o&Ln}sdHR$-g4UDzS)6m|)_g+0PvVV|&HI3%1A zE(w=~E5bG5x^P3dCEOA23HOBu!b9P)@Jx6vybxXruZ1_lTj9O%LHHS-l z;fL^3_$B-n{s@1Ce?nQYoLFA0AXXGBiB-g^Vl}b4SVOES))H%rb;P=2J+Z#nKx`;B z5*v$6#HM01vANhnY$>LiKahQ9W=5u#PUO3N!(e@tsKuzbA}3ifj05? zEKl4X{UPFb)^^94P&M@jt64tzWdC3v<~nBTl7%>$=;ynhIXgu4$yyuSpSL4@hi`E5 zl-#P`Q|3=*qy9wh2kE#Ob{y0m`&He+Y&OxsHjAl5+VpqzBZKhVsxd8-8zeI!Ue_n` zd8n2Si`oHj_BYZVOvt!rmB<6ZvX)7~U12Yy7C5WK^iBGZz7ZG1R$^dx$;7USe;tkJwl2C-xTyhy%qz;$U%zI8+=a4i`s=qr}nT z7;&sPP8=^z5GRV0#3|xbahf-roH^FXe7sydDU0m+a z`}n1(s2oz3NIQ#k#>#@Bb3ELm8*kCVaF%cn|(({KYKtzKC0pvX~{*nc;1MWOjDyleG5A z3wKk~AK!4Lma8N}jrkkzQgrT3VeON9c)c1Sk|HJ2A|tXQC-R~s%Az8wq9z8#Tyd6| zC+3SGu|O;oi^O7ajyPAGCzgn%;(T#|xKLaqE*6)FOT}g4a&d*YQd}jj7T1Vt#dYF( zaf7%~+$3%mw}@NCZQ^!uhqzPRCGHmYh4-v>wr6QG?kk2`+OJ_YKgV z{0D74^FEmB?3dEUDSMXt!@X5>bsU|PYr6e-|LnP;Tkan5sC&0BOYx>9ViS!V{@R=z zEOdXgyl0EFig@Gv#>@@P=0B5B8rM3fLRRO%FQS#g=PlLedv|1=lqwhW@=VGf5ZX?+ z_l(IQobS^GT?1EU+&i}?X;j=}@rn3Ud?r2@Ux+WoSK@2&jrdl4C%zXyh#$pI;%D)T z_*MKSeiwg;KgD0-Z}E@#SNtcIk;+Qtr1DY)siIU#sv=dDs!7$Q8d6QEmQ-7+Bh{7a zN%f@$QbVbc)L3dFHIMC`Ux=a5_J*1ve zFR8cGN9rr}lln^oq=C{PX|Oaz8Y&HwhD#%)kv@}KIDf(N8| z@MzcsXK>Z<_8HIvxL(E8EV${u&pUgf3vdE9pNnzNZ_ zo97epnMlT3IBrA@G=cFifM#G9r6~1s*4^Y?zFA-o-I473&eFUb{y2ID*oU9V{itiK zZ%rmx?nd|ZpO%)unOY&<%rVQdA-l*vJbrQU{fur2x7a@3ZW-UDTT-3i5AfFiA+?9y zD{}gT57Sxc58f4~{cKO=d!7-RC{29EK-;hE?K1r z$tFcgb}34VmK>5(a!E0gTk=T%A27))B}j?VOesn7OUY7-lq#i3=~9N2DP>97QjQdm z011>J36>BEl`sjH2#J&^iIy0Nl{kr)1WA-6NtP5zl{6_R6mn0dMQ23%O;yGqGw>S-~#xPooX`yWU7cl-^)n=BV?$?(=%j>pySs ze6T!39x4x$hsz`6k@6^cv^-WGCy$pW$dl!%@-%sdtdsS!K{m=J*(_V+aM>zH$Trz7 zN6FE$Lw3q8*(1lwadN!ul@sJd`Tr|SPL@;TR5?vfmow!oIa|(=AsLnt8I>^^mkF7Y zX_=KdnU^J5kyTlfgL1AsOP(#~%OSZyE|QDoIr3b2o?IfA%Jby~@em#CYq<4L>S7gjs`uud`0GcPhPF|Rep6+|n|%wNo% z%stH`%~49c*<&tes$!~T>S5|<8f6-7nr?EKVoXt{bu?lkO@t|Enr$MD7HpU9knV); zpzgWuf$o*=uCBTMldhdUR{urUQQui#Ro`9z|93$@I4)8jtzRpzlh?}|&oANFBwtPpvE8mmv%Mav-@+0}N{6u~#Ka-!!FXWf5li$l9-zw5girW-!#|1;D!^fdG^{MENG)G%~3bTOcYafUF%WP{G&GeCwVhGm8ohMk71 zhDydahS!FthPQ^w#_Gm9x)HtzeVqQRetzjhUnZPv{1bhmq_BuEHn9CL)ihTY>zH2| zo7%#R^L63IO}b=$F~3FEfVjxxK!1CAeml0xxWTx`xZ7Ausjbve>MHe=`bq<(q0&fc ztTa)YD$SJUN(-fpVpL3uRdFhDN`jK8%v5|zvXY{tDd|c;0TfVy6j(tNRKXNnAr(qt z6;9z5L6H?jQ58)IDzlW?O1=_O3Y0>nNGVq4D07v0N{KRGS)eRb7As4XrOGm8xw1l8 zsjN~~D{GXs$~tAevQgQpY*Th9yOcf3e&v92P&uL;Q;sVql#|LC<-BrHxujfHju}J7 zg~lbu`O;EpnY2>cENzjtNe87v(kbbjbXK}3U6L+KSEcLH4e7RYPr5HXmYz#5q_@(0 z>67$T`XT+6{z+xza&iT^vRp&1E!UOn$qnR&awEB^++1!Vx0GAUZRECcN4c}yP3|uD zlzYj2<$m%2d7zwV2F)!)?LuuseL~$rBSOPNV?$FymXI^#4f#Upq0A5;QbGlx^`U~` zoZ!4*QE+u|L2z?$Q*di=XYh)0UAdv$R_-egl!wX_<*D*ad7->f-YV~v56Va7v+`B> zt^8H~DP`2MYB{yMT0yO(R#vO1)zzA6EwzqXSFNYkR~xF0)W&KPwW-=nZLYRZ+o)~T z4r)iWliFGBqIOq%sJ+#`YCpBVI#3;?4pxV$!_|@MD0Q4VL7k{hS7)d?Rj(RUvuaVp z)Ns|RMyQdhU5!$sRfph|FJ;JM(D;NIZ*;Njr0 z;HBWx;KShU;I-hb;H%)f;NM_{+;+LGa@*(j$?cUpBzIiy#N3*uv9^V#)uuJ3y{3Pb z^QNn&_ojR1w`pf`K0Ce`e;a$v*=jE9jLdo$J>9y(y5Ro-(A^P7A}&O9wf<-AX6Kt{RTB^=h7pM!>Me1U8iMmu>rY=`ks4LYq>RNTZxP7XE zdRe`qURAHD*VP;9P4$*~TfL*+Rqv_y)d%WB^^y8mtsYS`;*a%>wM;~-h^7(aBicm7 zMff6!2qB_40*_c;)U@&fHfN%9d{NNyw7 zlBdYi5Fq#!*gcG}V}LQ9Y=kR6i=7ild&W zPt|AYbM=M#QhlYqR^O;^)pzQ9^@I9R{iJ?Yzo=i;Z|ZmThx$|frT$j`sDIUeY8kDp zR!%FgRnRJGm9)xQ6|JgPO{=ce&}wS6wAxx7t*%y2tFJZC8fuNS##$4tsn$$uuC>rw zYOS=^S{tpc)=q1$bi)=%rN4bTQ^gS5fg z5N)V7OdGC^&_-&bw9(oaZLBs<8?Pl$&8bAnN6n=CR5G=m3Q(J<&D4BqB{iFxOEDBo zZKYOF2(_Jps6wiQT2Aexc2ftavh)+`I#oHM30DzeOX>)(0v(`-(-Y}LI-8bhmF`3P=>zl``aJ!V z-a}uaPtyD7o%DTr0lkCXOc&5k=tp!7re@Y)<~==xY0V5}CTJ72N!ny>iZ)f7rcKvo zXgW==88oA2(#)Dg3)8|is}`Zzv`EdaMQPERLvv~_Ek<)|9xYaj(`IV8CTp{_dD?t! zfwoXvtS!-&YRj~h+G=f$wpLrGZPYeto3$<4Hf_7MQ`@EO*7j-pwL{ub?SytxJEfi0 z&S>Yfi`rG~ns!sWuRYKnY0tFR+B@xo_EGz+ebK&ZKeb=lZ|$%4PpcfP608xd6|5g@ z7;F*@XNEEV=-;%Bnam_Gv5bQOnT0vym~;kW7BkBkj?w2P9 zMG6!vRw&R?5+t}=2o@j^Ab~=Hv+h!NcXxMp-Q9ZMy6egJ{0nF1JU{JZW@l$N&+fB( z-`DlI>dWgl)-(0>^?T}f)o-r9R)3`ae*M4t3C7OGNyb6O*~Y&K-4bWV_{6vv6fw$} z=oqVrF>$tWc5#kzAt^a2e9FF*vnh8|Zl%0U8I$@o#Wi(as&DFs)UT<9sTHYhsoPWY zQrD+8rDmsYOx>UQG4*@ub~5!~s(+eKTFfB+ak02IIg9KZvMf#tvoU=^?$ zSOcsBHUL|JUBG_e0B{I63{<4;O52@ArtM8ToOU_wR+^M1q;*fXO`n$DGx2JoO-bLx z#E1_Oe zI(ex)L(a-C%Qwi6%D2m}%2&x(%CE_<%Zud820P6o2Fc6rbc{6k`=uioS{oiph#hMZ02$B1n;^@KGofUWx^Z*@~mUG2kR{ z8aM--1I_~%fUCfD;0ACLxDDI^?g9^iN5Er11SH@o@Dg|hyaC<Cf2YY}$!Cqi*&=Rx)`+$AHeqeua05}jF1P%tR z!6D#Ka2Plo90`sBM}y-)TW~x$0h|a<0w;r0Ks#_MI1QWuI)F1lN6-m$23^2epd088 z&IUa|PtXhW27SOepdaX>h*Xp)N)=^_nTi>TI7PGKs$!91i(-$WK~baN71fHH3Q(a_ zJW?D`98@3*Qt?XhO>sssGV+Szwc?H9y5gDQf#SI0o#LtDqvDg|yyAl5h2o1sQXuUk zBD+QQinNFv7dbZauR<0%A#!SDbfjx!RHR*GXk!>S zkAx#PMP84*75O0YN#vKv*O5OW|3!9+>JimFYFLz2f=$AJguV&(3GNBA6a2wIa4r}Q zMu2iq0Y-w+U~fLmp&ye05*YbU^@td z2#A6hh=T-3f(*!l9LR%gk@H%({yb0a{Z-aNhyWl*BV= zEsHx6w=3>=+|@WK?p)l{xDN3h<0r(AkDnGlE`CIOm-wOa(ea+~j`1Gxs`%LW{P_6z z-1vF%!SUJgn)upyGM_^#0`l%689&jG$bc( zDB4`Ky=Yg_?xJHwM~W^MT`0O*^rGlp(aWNbMW2hl6(pTRHSSMVG79sB|Q1b>0Q!9U<%@E<6HET9fhN2n9j8R`Oc zg}OoAp&n3As29{5vV^RlK2Tq%AJiWj01bo&L4zS{$Oak$4TXk5!=Vw-NN5x^8X5zQ zg~ma)(0FJ9G!dEvO@^jGcF2A{Yq*2M^lLsb`P5zuDOSVj&k{p~| zlDs|{NnVw_EBSizm*fYD+pw4DYxD#93H^e8MSr7z&1maRSQo4t)&uK_^}>2%R#-o5 zAT|gajM-pAFuO3XqJb$+D!D376|7pOs#gIjMAf3oQe~?&s#29vwMKPWbzQYV^-c9% z^+9z@wN>?4byM|I^;-2-wMBJJ^-lFywHjIjt%cS>>!A(MCTKIX1=x=nixjx(D5d9zYMFN6=&F2_!%wBtcK1XV7!#1@sbn1-*veK>tB+p?A=G z=mYc-`UHK3zCd50Z_sz>2lNyA1^tHpK!2fskPNnfJHQ>`PH<eo7Xk3Q=UzpW!{v$(7b7RPI*J}=I1TUljqIM1M~9pxV*)A33+fHk{6eklvkOT znm0s!D(^(z!#qp%p}fa=ebvwM9^~E08>C*B*Gs)IZ;D#24pZlMsC7F!2EIf1M`2Vr{_E8 z&&${3PtNb2KPkU4zcs%lUz=Z)UzuN#eTYuq`|uo&ZmTC&829DX<+p6`lr9hiAa{ume03 zc7&Z^XV?Xv1-rs-usb{(_JF-$Z`cQ(1N*{$us<9C2f}mVAUGJF2Zz9+a2PxvUI2%~ z3*iV@4lCeDI0}x2V_+p53&+9nZ~~kNC&9^Z3Y-e3!Rc@YoC#;a*>Em=CVy4_p8TEp zNAsWLzs-M`|2Y3${*Z#P1w#v53!Dn(7bFxIV(VhjSU7e??C#j3LCdIyuozeI{ zHn6R0^Un5b?Kj$=w!dwc+F!OmYyZ^#qP;WF3Frxo2l@iNfkD7%zzJ{$rUKIeKOh`f z2rK}yfM_5Ehz0g!Xn-o97H9w#0nNaI#Hhr`#OOr#xOMUcaWQd)<_PYLxc&OciIWni zCTePDC%V(>xSE(nF|9FcV%Ekii&4ROuo}*X3*bVy2rh;-a0y%rm%&=N9Ik+Ma3x#? zSHpVP0N22^@FKVlu7{1V32uNJ;U>5lZh>3jHW+|G7=mFKfl(NPahQNfn1X4TfmxV? zd3Z6r1YQa+gO|fA;Fa(ycs0BRUJI{-*TWm&jqoOTGrR@f3U7nA!#m)e@Gf{aya(P3 z?}PWl2jGM7A^0$S1U?EMgO9@};FItv_%wV5J_nzNFTi-r*_d@PT+H&At1(++HpSeE zIT+Jf*+uyvrkhfXxgGN(rkAq2(n0B{?5iBB?634zW-5b~3gv8Nin2uc#;{gNE6bD> z%7e<+%B{+)%InJA%EwAk`9XP8`Am6Rd0*K*c5v*VSch1bSnt?@al_)2vFWkJvEi{1 zu_>`7R2ikEDkwcwMH#3Xs-9|~nkayRD44=1f}$ygVkw?lOf99BQ7fob6kolhdRg^~ z>ebb2sxQKq;LGq8_$qu2z7F4jZ^5_WJMdlj9(*5u06&Bu!H?l5umFp&1V4qJ!O!6r z@Jsj={2G1({|CQ?-@)(U5Aa9$6Z{$e0)K_S!QbH@@K5*`{2TrQ|Aqg-GQ*f2%F@9rPXbUG?4cJ@vixR{FmBe)_?B z8~rf-Q2j{#X#E)dSp7J?t$w_If_}1oir!8?RX<%nLvOEl(9hI6=_?ZSCHLYzOHiP# z7%bjcytJ4vzEpg)_)77v;%&u;i_a9FE`C~kr?`^np?O*Sy!c`9-r|qNR+`D0Sj~8i zt!AQThGv?^PBR^uf!HGs$V|i$aYCFC7i1RVint-}$ZW&|@j|>2A7l>Vi})e_NB|Ou z%teBbU}PQ=f`lSr$b4i05{@iHA`m&EKq8SSBpQi9lt?TRhr}ZZNFtJiBqJ$EDw2kz zBN<30l7(a=Ifx3$L)1tlyHLvj3_!~SVGa@q~b8cpK=88;HW=>{tCXu-+vm|qMCYQM(vpKUY zvoiC2X7{YynLV;zW^T^xm31d`N9NtkZJ7@;Wm(5EcV`NjmRUzK|71F3&CE*5O3n() zO3CueO3PZ5H9Sj^m67F>m6_$8<(U(KS+26Q933EhltLARpY(Cz3BbSJtC-Hq-+_oDmI{pbPoAbJQrj2=OcqQ}tV z=n3>BdI~*_o1=UC^Y z=X}ie%kj)voUaj$-#5h<+v0tEUw9|%{ArLlT*a&PCHWnL)*<$0d3D`tz5;g_1!=_@>u^E^PHVbpb+%Qkf2lK^(u`p~t7LF~% zBCtp-3X8^KFeMg;#bXIr5|)akV;NX3mWQdae5?Q~#ELO3R*qF*I;;|_!m2SnX25E& zT5J(khncWOtO;vvZ)78JRLBWps*d%D5D}l;<_?G@mv9G*%^j zOZt~MroT;pm;OGzM}|#CQD(=Cff=%l`hss6!!sN+>@%D*oHDu?5;No(J{j{fA~TXR zqB6=d>M~j~R%Ngm2Q%(v+|PKH@iN1qb!Mw`t9z?Qt9Pq^Ye4JV)}YqF*3ec(YfS5@ z##IV=J(g*lKJIwia85t;aTC8?jB;7HliF9ovEJ!ggbO zv3=Mf>?n2&JB^*i&S96aE7&#cI(8Ggh26&PV0W>5*nR8)_7Ho7J;4M_#3bw~_6&QD zy~f^P|6y;j_t*#QBla2lf_=w+V!yE8*dOd4)&cK`cgB0*J@H<6AG|N#4!LZ%1%kbRr!tm1Y%5cwc z)NtHz$8f=L%g`tDf#J2`gWt%bJgd--dyi{c4mg&NU-y>}srQ9BSs)gw#aT zXli0=lr@DlNj2Ful{Hm0c{Q0eq0u{(4K=kjV9gEnvYNFu=WE7io~_weGa4U*kHyE~ zw)l8_0zMI+gipck@M-vT+#a8aJK|2bGwy=B;qLfs+ynQ*y>TCW4(^Nl;r@649*EDy zgYaN{9v*^+;$irFd;uPg%W(xBg-7E`JPwb?6Y*3$4bQ+c@hm(C&&5@E9cK?@vFOemwnH z`uX&W=~vTlrW*>F0<3^7*jI44U{}Gqf-MCv3eFbvDePZ3sBn6rePMB7TwzwBe_>H! zaG|!aqL3)uR=A^Zb>ZT|$A#AmpBMfp99}fBNL656m{GW=&@;s&#W!VcN>GY>xp#R$ zc~E(5d0cr!xw1T^JgPjYJik1%yrR6mys4ZjUsb-X{7T~GL=NZiCHPW&Ilcm4iLb&} z<7@D>_?7r%$!#~jYwu{!Xy0f*YCD&I*7h$SS3aWLv7F|AaKE_U+=tRm zWxdOOly)uaRyL$;OqosD)UxqqPGz&o7L`5$?on!h`T6ya;c?hnPe75`Kg~5kLeIbBQ1#n3zX|5TQgEF`rmK zgcA#i2trOMh)5!eh$dnPB@s)+5%ELC0Nns>%#yEoERCU$(exbJ>ov17&U{-X&or^GfEH#FiA5l$R_jiPk6U6ZLbO zvh*5#p}t&Sqp#LqD1BP$&`^&U5fjpkG$AcWE7Fbt2!h}UickoRFvv1wIkFB}i)=vF zBb$)T$W~-KvJ=^b>_+w=dy)Of0pt*J9661gLCzxQkn_j|UBqr;53!fnN9-pK5C@4v#9`tHag;bl94Af?Cy7(UY2plV zmN-Y8CoT||h|9!v;wEv6xJTS49uSX-Cxk$VghV_go)IsJSHx@L4e=lGmUu_JCq57# ziBH65;tTPW_(psueh@#2U&L?X5Am1yN61JEvIE(X>_m1ZyO3SUm&j}6E%FZegnUN6 zAm5N5$ZzBi^3NPL)&cE;c163RJ<#5$C2EECL;Irx(1GY6)EXU%4o63zBhfMFWONET z3AID*Q3uo!bw*v#S?Fxk6ZJyppiZbC>W>DXL1+jXhR#P9q7kSZjYOl+Xfy_mMHA3O zG#*VxQ_wUt9nC;9(JV9@%|UZf6`GG0qD5#i>V`fqdsB7>yHWmMxmflQ!fKaws{B98QiPN0Ot+(c~C% zEIE#}CC8H!$cf}6axyuEv?HgI)5#g6J?TKsBppd7(wTH2XOXU?8|hBYCOt?`(u?#a zeaJbaFX>16lL2HPIhPC~gUNYh2pLL-k@Lv~WH`Byj36V)C^DLiA(dn-8AryOp%95= z5}8b3o05)+e+b5vUGXrL(OB2UCAhJ z3^$RR#7*X=aCV#nXU{ovvp5&dm2=}fIUjBg=f?$bf!tg!h?~cSaA90Hr{JPEB^S%Z zaS2>1m%(LnSzI=k!{u@+u8_;)iaDqRDZxq>m#`)4OO};vE7@MMyX1JumXggS$4ZWt z+$_0JB9^=;c~&Boye@fO(y{bI$?uYXC4EX|rJYNAl@2cLQEFRiT{^JTrnH4@BPo(5 z8ImPAk|!6FOUR|*Njc zCV7jzP2M5zlK05_J|P8CBqj1G`HXx{z93(cugKTr8}dK$E%}aoPktaj zlAp-WxQHauJGsn5 zW&I2T41)~A45JKV47P^xh6#pAhKUAyLr;T~!PgLA2sJD;WE%<%iyF0rfmlTRB!7{= z$v@;@@*gRqET|4tN2(Lmnd(AyrMgkwsUB2Msu$IpvZSo2K2%?-AJv~4KnJL}NNN-{ni@lmrN&XV)OczFHIbS`O{S($cGOgA8a17oLD^Fd)J)2e za-y6m7it#eO1V+))NIOw@}#^dZ_0<7L-|sEls^?f1yXaVAS#%eM}<(KR2Vg%T0n(U z3#kZ7PARBJDvFAxVyH%y4~&m2sJItr0e^G43=TH|{WAG;TKTHEuC(GM+JBGHy4%G=4CCH2!D&WV~$b zZ+d9_YhCm^@8$O^zme(vmyR%#oyo!UX| zq4rS+sDsoY>KJvLI!T?PPE%*7v(yFZB6W$nOkJg}Q#Yua)Gg``b(gwF-KQQ<52?qL zK#7z@J*A#gFQ}K)E9wpPmU>TppgvNcsL#|F>MQk)`cD0%ep7#_ztlfUMqAJw=uUKJ zx(j{M)L^PNZ7|`cMW%J8r>6C$9j0xjbEa#i1Ewpc!=|&Qd!|>W$ELTYK@I&I{+fC; z3~d|!C5~?@2B_I_o@9^@u#AjuA{D#ZlJD*Zi>!I7pR-1o2;|ZP1D)yM(J#I z<8(81p1KjbXq{5$uPf1o=;C!rI<-!xOVJhS3Uz>PrEZmOw{E?T)veam>(=TR-BR5; z-FDp--4We+-2>fS-BaB&-78(M%D=kbI;+Y-l|3uF(%tDEbWge$-J7uRC-tbt+A->P}`~YO^si}=-M&0V{50@POqIWA zPgS0;yij?%@6i`^xrGXs#aEQtO~5At9NtT zx#HHkR-_ed-O`%bn%5fG64Vk5E&#*8`CuAYNmtR;v>CEY*U+`}BD#*Qr;W6UZlD|K zCc2q!p+}u!CVh*(P2ZvK()Z{TFc~ZWv%!3@ z3akc;Kn++9wt)2@0Jef4xD;Flt_3%M8^KNB4sbiT7gV%VbGut#w{~ueY0GZg(R!$L zf9u}XwP{;w zTbf$xTZk5CX&c1~P+~!HhLy!wg}DGQ*hR%m`*AGm06_jA6zy;}~0J zJTrlr$V_4;GgBBlW-2p{na<2$>=_4UCgaFBG0uz&GmCL$+!%M}Y2)k0ca5G+IHp>>_p* zJ5=@|qWvhnU065#}g! znmNN$`GY^ZrMMZNvslC*G>Iik1Izb(!&Qa&7E0jO&O9#?H^jtcW4yRuby~$~$ zf|Qews3+7j>K*l(`bGUm{h+$hR&;-QBt4csz#Zh=t7|zE*T6M%5O))1Ii9=6-RB-~ zkGV&jz`f+2b5FTf+&gXzZ^uvPU3nMYgZJe9c#)Bqr_3|vIrD;f$-H7-GjEvxn77P3 z<~{R)`N(`?J~LmKugo{*JM)A2$^2q|Gk=)B%s)oPTCg41j%+8kGuwsj%64PBvpv|J zY%jJqYsp%%eb~NiKej(RfE~yVVh6L%=;&bet-mDKhhjkR3 z1ZTlTm?gLhZi2foTksG(1uwx{@Db(+zJj0NF9ZmI!dxLp2o~lEAwsASCd?NW2;ss) zAwrM~3L#R65~76|K`F!vaYDS1AS4P&Lb8w|qzY+5x{x7c3Ryz7kR#*@Dj`o$3;9BU zP$(1$#ezmC5lV$JK`WFC6@pHv6sm-3K`$7D8lhHLB-9D@f>AID4ML;PBs2>xLaWdw zv(2(Tf$Us1hz(}vu_0_I8^+FO7qH>%LN>i8{5tTEXYDE%pxqxVl2)QEXh(V z%`z;@axBj-W|y!_SxA5dL_h^hzy(4e1xla=MqmX_;DyD)5@D&ZOjs_g5LODSgw?_t zVXd%ESTAf4HVT`B&B7L8tFTSjF6~eMmyOLeSu4dP;YuR<|dUgZ5k=?{@ zX1B0g*=_80b_ctY-No)^_pp1}ee8br0DF)<#2#jkut(Wr>~Z!4dy+lHo@URmXW4V? zdG-Q(k-fxTX0NbU*=y`|_6B>Cy~W;U@342-d+dGo0sD}B#6D)9umUTx68n^W#y)3X zurJwH>}&Q7`ycz3eaF6MKd>L!PwZ#*3;UJ*#(rmius_*f>~HoD`^8!dKy&@Ll*J{1ko(zlA@7IqX=Fi56l9v7^{Y>@0Q>yNcb!?qUzIr`SvE zEn13JVjr=u*iY;)4iE>5gT%q2wP+&_5r>My#Npxyailm(94(F!$BN@bTXDQNL7XT~ z5+{pOL_2Y+I8B@`&JgWI2XUt8C_0JGqKi07bQRr1cX77pA$p2lqPOTH&Jle@Kha+d z5Cg@zVvrat&J#n#P_ZM|iR;XD;kt6&xb9pJt|!-v>&;nmR$L#hFV~Oj&kf)Pa)Y?R zoHb{|4dI4z!?@wx2yPrVft$w7;2b%3ZZ_x5g>nnHg9|U+iZgIE+#;@ytLKbdBiF>WaIIV$*UkYP$RQlcVI0m8 z9LZ4}%`qIuE#{VROSxs-a&85;id)UC;ns3t;(T#|7%napBSg8V5F^DXF$vsY25uv_iQCL=;kI(yxEI^kiMPc&;$88ccwc-VJ`^8`kHsgV zAc~?SJ{6yd&&3zwOYxQXT6`n^C%zTmiSNY^;z#k5_*wiSeigro-^Cx|Pw|)dTl^#b z75|Ac$wKNNb(A_uouw{PSE-xSU2@=O@{YU{@66BQ-T2wO7w^sc@N;-y-k%TP1Npgp z5FgCX<3spRK8&BwFXSV5Ij`U&`6xb`kKvVkEFZ_m^9g(+pTsBgDSRrQ#;5Zcd?ug8 zXY)CHF0bPAcr~BT7x0CA5ns$}_!7R9FXOd*IbXr+_)5NtujXs`TE3NUd$Vj~-G_sh8ASvXrc(K2l$)pVVI( zAPtlTNrNS8$wnF?4V8vT!=(|@NNJQbS{fsbmBvZ7(s*ftG*Ox)O_ruecG6U7nlxRS zA=yg~(oD%wa*~`S7ipH{D!EDS(rn2?@|3(JZ^=iRBl${xlD`xn1xj3#AB2E-9o)DN2f#VkD&$E5%9iQi7ByB}vIrij*p)N$FCClqvQ4+54yE zPphAOfA;@5;HUM^kv~WM9Q||b&v8E|{G9l6($AV{gI)}NVf~_RTCroGW5cxOX(L{6 zPD7k6om-~i)6}lkuH#=^a((1l>~`68$_u*}VeabLk7uu%X8*$B#oB3+-ibaNr@8oU zo3?Y>u4(Qsw)n31-8b!l?@hntfRF&+7h?k_1y%;O2A-W37*r8d6;vCvD5yTj7{mwB zLA!!>2FYJ2UMvgYLl%cD2`LL%8qz;>KH-@IY$da<994S{)NqLf5 z%9jeHLa9h9mNZg{R4SE8TB%&BkaSX|R3%kQddVQwNVU=;sZOewjFL%ekQ${Xsaa}~ zTBSCrT>>Off+Sc%BvisATp}b=q9j^kBv#@iURo?Ik(Nr!q~+2IX{EGES}m=S)=KN7 z_0k4uqqIrdENzjtO53FE(hg~-v`gA8?UD9M`=tHS0qLN0NIEPXk&a5oq~p>F>AkN* zNXLMc{#yUcqO#C!{#z=AkQ0@tyK|slU|@)M$e}rdofj52tFIPBg@iAgA9&ZlK15a7 zHFSCKl7O+fwncJf>X0s;$zGuuzanKav}$z1W1|%ayjyo4G5>U9rF8 z)R}O-KbfpyH6^nmhI7uNliqkfjQ-;Gy8`EveR?;(vTy2<;LyXjt`6hP%9Wu0Ml3oQ zw}vj4J5(fPOv+&!LNq6IwV8Gm^A`pH&m4~z?};~OKl9t7rU!+{?S0yut)q)7(sk|4 z8v`2DH9lngM&15#r}-n}&Wt-h?hN0lexK8J@3Z_9zkeH!q^Jwd@v{G?aoGNd{Zad4 z_Q&meS)8!HBD)|vX@AQ8ob1KO)Ana&XY9|~pR+%2Z)tJC{-V9i;)v{m#bH@Li!-tz z7N=z)7T07~EUwCA_YaQfbpP;(voZ^dC$ftck7eN<`dhq|4Yhb7n_}^w?6SohS*XP) z*;R{=vRsRwvg;N3?81h+fHR*+K~AZ=dYe08LkOu!lm#f3s*06j|hvn5^+01hykTYyA&NJjIYgy2Po4MqOlT1dbJ!@@BWU{_NDyp3#c~iNn?(Fc(ou5q}7giHy z2s4EpGP`?S=7%^Qm_OIEfz3#ZKy z5l%-V#>(@YikxbkVY9nqyL^w?_wi0{ZT4?WR1i*HiWN?WoNg&jIMr3zDrB-by$1H4 z5)LR%+P!i0=@`(qJM<|M;eF6hQ8vrY(d|ZHe9gz~SD_EmDtm9Yy3y-%pVqYMHq5eT zuQaPQc9FH${A99#-l5b>!{@+NVMpz@Qkia_(f_=vYoH3d0v$p07!SpUq{p(sX;c)Ac$jUTdPnpbOWp~R@i6>{z3M%QYcf)Y-RS{P|>wbgDEzTeS9D?eV>oT9pS~5Co z;Sgt!sI9uJSlL{=c@y~OK_i(~S`+JG|4Z@FsnP6L?+|ljsP=m}Zj{A_?F;jC8XFg$ zcs%jvtdJC^^om?3m77XcFr~;YY(vq@xeN8~&+8e@r$_l>3jhc5MJjQ88{Fr2ird91>bTctx zHSZ{Iu7aX3NADaCC6Eb66Z=hQPX95}CfheBb@o4%c@h+L_HQY^=h;7aVC){vqUPms zXe3$50%vj0&3#!y{IY3M8e z(_U8dlARy=9q{|l$1{Fzd}RD@535Ph$y_MvV z{4}o7byxD9k=$3gC91WmbF(|-b;%o_SCjW{_VT~qf>Pc#& z`f%euwV=MM*7GmaFV*An9rFV`e7tJ%59a51lz8kaB;%Sq>147|@f$qS7BOcv!m zDM_m8@A0MT$Ka<`d5dJS(aFkWwK-pAimwWpQ2oEGm_WUA`Q?-+B(4unU8YYSlqHMv z&v5DJ@3!ctzA;TE>t@dHkjbVSOpA}H+|5}X$%X+F8iU4Xh#oI8Y%^CG<~DV$8B()3 z`s;YTib&f%|9M)|0PETW>xi86U^KYGZ&GdVo(prTJ$}spGeD?IS=1wURnQEl;=$iN z*47TqMtjpd%lVQ1KYG3ukI++Yy zwAc?e)11LL?>UF#eEl8kKPpM}1pngtLH>s_2br0z?#3ha4Fw&HwF55{Z2ghrKhZeX zd8zfc2&%Z+SZMmb_^oMz-4-)z(W)UiQe}+HvI_7w^8g>0?D3FF5*nC>*nsV3n%$Aq z6AfkNJb06tZT7#rc2(p4M%$+6jpND>R4`3&*K`~uS6SH0Ow5@f zPgi*TUrNlHrix&|Ol8^B^k~q)fcXt{i`<+VcKwH~OGMSp7NY7vRX;PgqL0nJmbikx zt!F|mn^&VXtsM*l`ZNVqy6>;)RrtJOgW0KmtnE5vWA;L)FX~sfqP-)`FI~`MTl?go zhm9Wf`#ioUq&WO&AFz18(W2gVtikn`+69jswq(8v*l-0^hr> z0gr?0D`z$~jAX>*n@?V@knH97+%JgWfc| zw)|@@hGUWaZS`h-?|5V}`Za&goQmpmMWiR*bVjF%#ncbULf(P34m}!sCF*@T`!5;qg{x5?|3VdRR;^wCs7=;JNz6KL< zTjcVBT{HI2Xf!L?w%`ZN+O(@?eVI!B79XaWX>my`(VQ zV@8)apDs~n&5EAWX4THW;0bxx25nY6G3#>P5L(!koD^I%FN)}FRvz>+cg_2l``@-v z$DOwTH|YQMaXZtWDa()n=DwuN=(xa#t0;ZlmYT>A?J>dofwu zb#@mIEm^nZ$>Om~9xbUhS4bU~SaJa+J^SjAr;3KZDOEG_| z->+!n2NfNhR}!=@sI2gN&|Kf}V6OOX^G|EOyt>i3E!*ry@N0Sl)tLPms>VHjgNt5! z7M1x{TrMc~`|ev^_-^pBf`iti(<#q->*@0EgU7i18suvI#91@=iuJt2u`Y)P#YWi% z|LwoJ-^uX4xHLVM3R#V^l=1)u%iIJB?F3lWVO0yZ_AT#|7HgM9yTT1W|p(x{;>2;`i}GFH_Z=^ z%u=qiIMhKtZd0!gMA(G|y~CHz?>OHwxXE+Q{Ap&oO3i}v z^PkR-UhsRt|L(f~jZ!u0jZTIJ5{tYO_p6_7A{{{ z-0g|^UAiKmrLe_!RIl|xQlBf?!_Dtu4?RiE08ONZC~lj#Ke*NVazvezRnZym?F#>n z7vv{<4v&h5K4Tg1gQ(UheUzv3xoA=u(DZXq{@^}??UcJ?+M#OYC1pQlK+GqbWNUQJ z7w67-zM9d>MF};^Th0ADJ%(E}9s74KEDGv|{AxS^FLoIb$HgTi%uT>-DsA4nT}YT8 z+tH?zjfKl%>j%z%2VHZXToc%|cX*NG>c(D)mWdN=)(y^8x)1h?9bo)ZlGIS0lr(Y5 z_l}J39y{Ez_ChXZG&*dvf9wYM z)G5X^w<~@RidERk-)cMBPqub+NOQDusI@;cr!8Y#UTy|FZ$?Ie_Vw`8jH?b$97bdm zMPHfaHfz5N>AuwE@65{@%)LExa=^~45n0FG59XxKy5VB-o0+pP=S)uL+ zp4ZIQm|zzT@AoP%+<#F?s@I*6E}_w0ijv~elPhf8vozC!RUq0!?k4&4wmL;zOD=psBuYfdp~!UhSnVN+#7r<__Q|`yfL{e zT%#DFZPhktuWBzB$CUe&iIJ8*=Jly=Mfud6pPp_NpUc%g&jva8I6)uELwdIaksbDW zs0ZUtxrzd(wa(WqdiN44Vw7LTZ&EVK-pbex@3oVaCu9Fs{P2md%&Y7>XIt$4gp~;w zDt`oR9^6)0S-G)tYtH?uuGQD7!t}{0amjPd!sgn9oH-@AgSy|$43k}QIv&+8?R#oU z&;$Jhon5z=O5gZiI?IG-q`xth8kXbbD7bzL+K@hyTAXgEn^1T~^~%qsri0%}Lw9o$ z()P?_#GITIbH+`0PDI_RygtUEMMkCG$J=^J>=J89dE0uiasQ8YkEsn~G#!-c2LJgc zGuic7L%NwQx+qfKSYK9G_QlNbVVZw5Ztz_8f7rXru(sB)PxL8C6-sGofjZDay4wYSZcXxMp*IjqbJbTZXIp>`@Gv_)V=G&Vuxk8eaYb}x` z&vW1ZpXc+;b2C3ArGT8t-1@o9F}0q|%luL5!>XJpdHZ~j?R4ALLOEo7M^R*5P8<&O3i3o z@1G?aoL}d^2hO@hb!BDo6%)$$a(CADri#mx^S)~bX10R*@u|)6y$;ZilyA%*+H|x2 zOYOCJPippOqsxW)yz<*kIX+n0=-RlvzMdJN=CDiQ@08YNZ^6C&qQmZ~eUR&w`ltwQ~>|Q61~>Tv7_A1R{!8Yw}&E@5@XD|Nc85e0*7 ziOlnbu+J6*7wpa~%^sUnIQv)GuDb1YSEB~^(s=gE!u$Tnj30X-tDdqgB}2H}MrAK% z?Z_+7`%(6!_M!Gg>Si6ce16n})J^%U$*Z_pQlWPuUmcj`HXh7CcO(rmRn7)|v~PK2DUb?o9IL9u4`vX`j&IlsPGF z;d=GY%qUkc#bf`gY?2&h7pm6Dt|q@u8xk{o#~ zu}bZ)YY{BTR1}OUsLLE9`JS3+q-LMY9IU2fJ}6s{8Yw?WJDWY%z9%b0Ld-;jFI7!e zzsOpWa!(fb4VBNgzoJZ_e@vk<>G}5}7u(`JUi^>gu*rRc?4&E*`geJf)-iPJBDjiL zqoP-9FIg$R==$TRXGLc;VcJiYsi1%V3o%2Rt6h;ARVed+R)5HSp#9=JnHplekh)0r zo7i7BL^o9TF!f#Pr_>**mvo_NxU@_?fPwUtdTZKo{cZjJw7G_BX*$F0G>d_TDpe07 zI?BZ4!RcAXY3XcXPWr7WO-4z&GMz!&YdmHAK#nr)Pz^FIPoD;A^D!oYDc3WFFxdoZ?l>#Fyw&V`%%o80#rb2Pv^*jW6wIH#e! zq|406nQK9s^>C2WTvH}Aj{@nm^&t5a0}}E+R1B(Yn#Cj^s(d}`4RU(bbpoEGuKG+^ zUzL=57UWAk28m1`s?6(Xvy)qTwYn{0P=$5}R6BiY zmCuQ8Qzji8nAbkN?N0}qSO;dp_k;Q9FZEli3&DKihK@a8w(xfSLofsQt>d^QdwxA^ zThIdd0P&&v)s$<9bn=1v9Z6R@_v>QlvNM82(y9;kIMZWaumnHj+o|9V{LkPSA-E8J zh&ALHt1`qw+!Asp!l5irtR((%%Fv#!VFs^)W3LVSF>Le*^+?pH-!9>(x=~j~FDZLIdO>yQ znC*>eV`eq&X_*5?5lhEz8~dEvZCsym+s4fsmpNWH;q-*bkN_Gw`R?Qm%v`}ff!QLQ z@^K0bwiBH93`mSf0wbaDE#y#Plz7>6lHnn{WIb8WEc!ivChgZTU$^@?LzYpUP!{IVXz=}A1l$nk==sgln3$hc~tsW zuoMq(Q2&>{FL|ZBw`qO&`)BXtZxIX}!LNTnn#UedaON8fwu2ax^GhH>Cgc$ts^xoR zQHjQ~W}!mVo5v8rV8=w6h5P-zd&G;uHIX>S0)zb!$G41=FcZbR7?|6fAfZaf())vL z*DgIjbFplOo?p6FzLh*EsWNW1>Wmtn?9g~M&RR=Mxdy6xsyUxGO*X$Ct@{=_0%Xa@ zkJ5TJL~d7%&X0~-JZ%TK-hsi=a<&TAv@LGRDm!f?o30Y?rT2(fIBi1GbH|bJHEMzT zj(Itl71%asooZFtZ*vbzaG}6v$;c|So4Y%JmltMx}F@V~hB8 z_=HKh4lf<5a69^uhBf7tt)iFa?d4w)U&JlRcc%4DP89k<*WGyM!l5W&N3L@YaCHHx zY(DqNDI(9x;vczBxx}(=CPMA@vJY9$%I;-7FPjOn62DQpSHNILs{aOOMPVR^=xYk2 z=4Af)>S|3n@mc)~&}F-Q-lWD0O?e<++-wj6xoT&4U(70#|dN0+fCDx995qqhdX2-SvJ+O zPVg#)VAz&Cg-E2dAe{^{XCF}>8d@c%mZo;hxyyyYzC;#ALHu#VeA)nfZ`{$8rgqz$ zSmF?3UeR@io|2xTDj9@)*(0FYXTRAFh~?HAd66X?zRjUc$1T%P@2CLkQsnfni*gPqab5)qyB2 zE;&}Z)j7y>AnfmKgtp5pKQ-80Cn_X16*hRrGMQE2FqoY^!`g-WkuS-V)&?MJ@-;TANXr0!2mn4hC z(Jt}M*H7W{)N6`I_KcN3%vbk>!Tw-+4XvWX8!rrYcFge)EevYCK>Mpu#F@ekw45s( z-D(G;lbP0HYrnQ%h22|Wuz_uFL$>IJNjL_}|+^kE!tT-8pi z7UMTJrZ?|w9E*e4W5qXdJ=%iWXUxfHaEOXhQs)##G zep1q#SXE=udpW=0D6PsGL;IZ!gZ0n5YG`jAjl3MP*PU4kgXPoqgZ+htG!*-peik`V z_My`>R2#@7Yr(oI|A6)VXJ3nzT5FcfZ)_b8dcStIEbs7iSk!&N80-&eRH>;5jLQ=9 zml~^ZU@YcxC8}yrN2e+m$L1ahvM4WEjQOdS>!z17p|7iHYW(u3aM$BpGu(4qH%!=m*PhR!^$}^J3_kgS(g~;ifjs=*3S5|1%>DcF;P2dDA-1A^?{1Tw7=T2VH&H z09S~R<0#fq9q)8-=Wt_L*=Zq8e?hUL5zlao!^!h4*_c`M5n1Z|Rb4qt_OJ-i!wl-y&1C2q(M2iwpNU(9sCXTfhN zh9bbM9mbLnijg&CB}I4^=anE>n9Y+Vm_YEa=qEu?Tvg}ElidAXR9nfW&dtdiG21Z- z?I$qjFc&bllP_VG=5NA0#AM=LW6ro!#p99+nZek2Y%+EWmV~Xx?!X?w_OoVs?_+xw zhl693kF!+=#K60SwiZ-9MGWGP-=Mi_*i?Zc79d1>z9X#LA zFFfL@u@2YjTYy^ylA4!kH6%(}oAvRRM(SEs0K-TOK0_ryc4=PWp2;iWliz32(RZ%z(?we2`dON*tMwX>DZhp z*_#BL!;*++1ind&1#1Lzi74WG(|ck`;~yN1^p^OVC@3G6i0U(wv?N0zD_3QbHYD0* zPe~Dd;beSYZdN?`^K2SfSN@@FEnLds#`c^C}pYWP$ET<8Zo>8 zN2rbH>^(t2_H@cd%Wp^Cp!|&77`argmXD0O67ijqqgYkIEqI!^A`i?iBsCQ9b(abj zD)DjNl-~U(C2fyzGtOfT)Kb2c8frnL({m9)SEwv@!hqA1C7nM~+o@r5JJYfUhSSb! z`_h&S8Y#`A-O2URUeMms%IV(+Ck+XsN6}}}7Z2G++?u<_im*+d-JQNRcY64A zW~u1>Fz{_2&SmatR46pe^twdfGUiq~GX;&64E?4O#b2%I2pZ*^$npw|QQT?AQl=&P zNt;Kg&FQHf;CHZz<;tth!&w&V9-tQ#@YusWne3(CE-IYtPZ(mC681{!wlwC*OYHV? zdh>bqq-l;(2SyHK(bz-y<=m)FM>wj;E1aI=W!!stk<+Jwv74&!Uy(4FPnkHrZ&OA3 zVA2EL6JBAxVES|(Y5H7#gmPs5uzW9H&S&!N&=!7rUefFf{OjqXqVDq_@$WHV2~mQt z6TeN&7A&5)Wa4aq(+UMoFH)?C*WMSAN^fSNgN%MBY`KiOFqunAUrNdC{Dgm{z~2evb3xUgt92lo32qz_Ue?!Fkoj28}Eq$d!-4fIGro) zv>BzSDVPm?Q}#@1S3;>j6pejT(g`{5>avwIB{D^)RMpH#nXk;wxTLH}CuB@Zf0wp7 zEiL07hL}OhNYA*#o~^R0mZ_Yo)2b_~tEzh{M8@-!&FTiVF>?&)-Y+f42j{+%g@Mq|?2x8!K_?aG+I)nPJYaE}uz-S9C-j z*Om4U)r0XJ{f(;G1Flh5S9$B^>htwNy+ZFoA=1Lo>f0eoN{t4izsnEHP{|oHoo?ay;#58)nduA)xa;-MEeyjU^jbPg-Z>0Ws)6#3yj{ zF@*QYKPg&gI%c|LTGBAye4lX5lwjUq3NdW2_LPn(ce4h?7n@;aL1KrQL~b`vD@-=8 zFppNmtCy!9HTSH!Y>w{C%Cr2YU@s4YB?Nc+=kEV`3rYWPj}T-SI4psi zgSO`O$6Y6~-n?GiEB-*M;ltazKYx7fH7R@DpSQpN^k%IG)1>{K(P5>_Sux__u@*5@Zi zKPs{%F){?E*x7vo3lAG&62+>B9dRG4wnYZNeQJ!P4m=RnRM{tL>$y%7H3O()x9*RA zc!=g2c>m^E@744b_~7%saYwg8AweIPQTJu&#F+(+RyW;&A#0Meo(o z(+=|V)IV?5sD3BKT z4gG=Ip$-TR-9dJSx@Qg706fqo&?68XSO)77-#PwN+_|{3amV70#x0Cn5Vt9A zW89v&-ElLb`#`lZ{+ODWx|nbXY}EgkTesNY*y7ldSXZn&c698h*#Ge*BVtF!u7+KV zxf}zFh5t{_{?}JW$1aKOGb|{!Yb-k!8!L&G#tw*0i5(m}C3Z~g*#F@@V`89aWxPCI z5+5BM6TS7nf81Me_V&NKg#~&C`UJuQeFOah5rN1+RG@$0KfXRD5E~d47#@fV#0N$M zMg~R&MhC_O#s4rLSRZDF_09P8kiQC{vVc*5y%W=1+oJ zf!sh|AU`lWFeflKfDE7l=l~{w4d4R!03kpOkOJfYB~b9+-=6_xLRnBYlmpF#WZikOeA)tdI?|Lq(7SazZZ14S66hR1B3s zrBE5LCc{P&`M|(v>I9it%cS> z>!A(MMraeX8QKDEg|pg|0!@p&QUm=oWMvx&z&X?m_pV2hc<45%d^(0zHME zLC>KV&`anQ^f&YxdIP!;ip^!jHj^!%zHA@Bbf{_y6Z{7@{|#4eL;k%7oWWFfKl-&HZfxA}xP-XmxK45KxL$GLaRcKbo(&pt!iWsd1VAAHVhg^*a6MGyk~-{&Nfb=N9Ex+ zsflCTYpZIF;LNvfD=njLVRYv<3UWoq=2bSLginw?P;*i3s66xsViM&tu*kd9?y-8Y zRqSvbmy0scPmC#4_rL(?nyW5+lz2V_?+X!zL}jQ@x$p`KIiDLPpFyg`Yh zwePg_M%I~rIpY;kLUo*O8su#MhX#~8r1=4ED*>XanbCre z0)%L)?4xXyoT@u+Kw1)--n*!!EZ^(ejq+9clZGSBOIe-DeU%&MRf*T&iZOk_>0LHu zGxZB&Bx?ot6L+7iud+(jS4~l0QKxA;*$u7>p#GcXr`8T^7n67M3uJNn@0jrvKE=Vj z&D_oT!5=LdA>Ax3*B&S$mW>5H@P|7V(i&uiZUkmFZ7chz(;#AND99fu6Hd$;r9fFe zHU=XjaBq+VV3;jLX;BNXZ*kp;y(k9?wo)HZFPKX}h{0Cj9O)*RRCQ2WY2Is2wv-fZ zaNTkt+(!3X_a(QWWTS5sae2klYNUTiqp$cd>N%}9hoYIGf8yNZ`cu5W;z&a{NOU@d z9DsUMc9)n>K1u$`c*Pves%LRn=h=NY=Q-2(8La}rdXZ7CQq9p98}zm%uJ0w~0A6aZ zc#qkOji7A8Z=y!fMQoRNoxIX?%0nvE_?FdeZ;{EDH{U4!ixd!^Ee@@9V@rq#{3XT% z&JJF(XsT$JRHNN#l3H5IE|y6v;nlDGf6c3J5H|{_n`k!iT##wk%8f1@BPo(>74>Xm zDmB=4f{OJwXNJ71;(~(e2`kI4?u(q(_60XkJYC{x94(?@4p}yOZr4=QQHdklV?~|a z6Fp0;-LOYoPjz;r8?^v)k=g@$9s3OXliorYO@0cPz$WS~s)k`^{KXo|=Cb#)Z*e*N z3qqXehbUEwlagey%0^|5a=k7JwBZID1D1t_G1eEh#G-egNnYUUaBnIDRb>?(Kc>D8 zq~tD|$8WgQII=m)H5EAqznylPzCt_4kZfE*dsP%%d7z;;NarIrbwY)r)VMZ+i1>pv zi_)EXnChp6Fzk%e%s1TW!fX78{Gq~AqQfGl1Y#VKM#^lmB7LrzONCirwmr=1vMAr? z3S;Hzs=?I{n?hTXTOCLn8iuK(v{5H;9Gt&-)1?bEHTqfR6-7TwcKcN2tGOrL3mUuQ z;rLYqyofAO`9_%Z!ZRH@+#>1#C%U+cKMI+HJcjIzI*FT#UrP;V#<4K`6k(=JBYPw> zso*+;Wi3b>8escl71*BfshVB59NKgGMb3Rre{Qs*pm?J1PUW}SPRga`g4X58Rmj!I zR@4I21@tV;7mFIW<9J*Qt_b%Ex0>K4KcFQs^O+M^XqKEcluO~ymK=~B)V$WbHGXq+ zc6M_iyH>|V=7N@Y$UB&EG^ao=PN#hk=*4APw8&{af_{vhMfyXJuUpI?CQXqh z=$z&~=GlcOZ9812-9@F$vT5ZPsxQnkgUB*`m zGDYDEfj-6Z(>ll2Y@2PD7KOU=-Dirs0ZUj={<~^GQ+4}x*V*dlttHr@95-*jvbiBc zRD+I|T4Z>o&Vsh3mk#rF6MjKXL&u}}Xe(wamWy3R=tb&GK3(vGv620a{gI>L4ib-& z*fgs(x4wbdCf`d=F)|Z2ab1cWO0e_ zd1X@Vu?AsN`22cgEwU#n7ng@m#ZM@xU@inr$jbmqSDhq(3wkxW5(5*Sz%$6ZDC?S&@ht^l|*Ex>c?F3|K*TWE1)z zPEC8xs^xL{Owm*IN$oIwW1-7Fuh?CFtHM+h*1Vd(N>DDng1n5Jjw(V;$9}_oz&#?| zBEBM@qMV=vC}`?jS`cFl;~H}?t1EW`pGIBB-@|{-Zx?c;h0vs!Hms1~~FP?ki~`b#@FcKUt;lA1hwzABz5g zole6Et*R-SIlwC#Q}@JmkUEk1oizh_6<11~&6z0lNndF0YsXup&c7yYx4xPV7INN%H;XQ|3(DT>HhM*Uq6Xzh`Etrp#Zi ztC~C*HR zGZ>rBsS(@|3{Y%kT(oJ5z6sWOzk0vv~K=6B}%mfnTStkY}_j;jDc&hyesKb8&!dOIGt z>TICKF0K*O+Uug~+Zvi0dmy_3Q( zJBSG1UK3q6k@$)9r|C84A8Y!x8gDl`G_I#*_nHVL8>(l}rlKC8wxGHaq6_X&Ur-M* zk?aZF_59g_Hy~WjB!4fTtlyy@YGjz7nkU;HIoG*XdRLZB_pJhvYm@wUYPvS=L>@rG zv3l~~Ms9e=1mC-@+W5OJlQ6&lS@{Vl^@E79?e{JXTMY=N&1sOf_N zH-v4ZG?wv0EnS?u8pbhhvj%XM@cRnxt4krzOxmdD@{PdTLbhX_q?^ObFm)6RAr)Cgz=gQ z_D#;S);UBBc{yz*_mAMWpg?>>Q?0Mkhnk7zd*()Kv@P4#wK%~yqk-*@p8pK3$DYPL z#C1onB)lP`C@Gq9Dvvf8_lWh8MdP};Y201hGrSl4?}A74GveRke#$56GUN;WH$$D} zp7orq&Bk`*xyf#sXSe64_eiO#vXfs9;=u7p%}E273cUZHY~28)POm}B~zu;9<-i3jF9HPiX1>0O;O1(zEna# z@)F8B7LId3IK@Q}r#Bo1by_%77xEu!m}3KN0>jIUX3b_NZ~}sNvb&12jw0t}S9$67 z3YdSq-`8-f<5ue<bZ{j){^LcP>^mMhx~wSrru}n;Uo** z3!lrR#m3SZK4i_u+Vjnkl6ci)_miQYuvB(Nd#GY< zowgxP-bM}uTEy?_mbzQ|0saDVkZ3dar#K5WR(V~xt2*Ah*@mx66RVUG(pU0W>R8)o zO%ME|n$7KNkVq5_tw(2JJ1TPV!^thw!Hju~HH@!}p#T@(!kNxJ&s`@;{d+N)s3iogKj{|yL==>)#G7uT{{7(vxVPK)yf1z%t;sbsMb1`pj_3P^FS`$V} z9z`M08W}(3)%HR9-4ze3s7*UmgHijW8 znI9Kkl>V(*%;Vx8^In2cY z#uL64$;Y@TX|y+uKaoDl8D^#LfpR^_%zp+tO2=q7sCA6b%t8xeqii?f8w^P^d9&FD!B77Cx5%*U*8{ zv-DC7r?x_2F*Me^YzafDirP4+!X@)wv{{H0^5f#&4GE~>*nHd>+$~%e##mkrzet!W zL&|bB>ol*mczuR_tm}n$MaAL5qcvi6ACORuHOgyMhE z^+aAr-Y1Qr2ntS9mt)|JHbw-?0OISH@f8AvJW&&0h<3I(n_aZ>1wfS7RSs&%Zdi+2 z#S=(}xY!=c6Kc*F8eHxWD+>rW#^D$O>Juz z>6q$D=1~wyVilWcMT`Wxmd&F!Xsw-^ zexeVMiH2pSIP4N*Z*OrS3f-MDOLMI7xov0lSnP!}}{z%Cm<)O5 z2R~SNMfO1UT{X!x#(vka$ra-H+Y?mMr);z@zhbHXV(ssyk8QGc6mlW55>3SBla^Cb z7}uFMm}#8Nf`P*G!i%C>OpI-%W4RMnUQw~H!d^S6AsRonDH3-DO~WlGZ{#Hig&Kiz zrsZkPA?!j~u@pw0!@yI<3V#+oz#KJ*dCyJw)?FQ^NINJa1TDg|(iiv}VgjShIZwE< z^^XwSMi(swk!Z(pJ2+#6t7M0(*P!I~eH~qq9JWI-Mj7ta;ztW}T0hd~6|SZor4A~| zS6uLbWO-*ZrPA747HzzNjitn>nVMV15smEjU~y4FRKXHVPrMBuD!6C(THRFpsqH0& zt~sKZi#Z|MXZzd6Y24Xi;>AdU+#U*(dP_P``@yi$774J)Ntlts8y#;99AN<4xp_Ek z2FGq)Ux6ZQWd++D^E%;}$gP+`lpgBp!FhCPK_LbWj#uw3k`-0r|~5-KX>iP|LXL_OMkrL@VH@2>%3 z?cs*Xl75s@Bn1_UEc2yRhc&H1TNw?M9n|fNo7^AV1H9XUeDQU0uzahcUOiI3&f+VA zIr2OwJkGM1@*Y)uzpbH1vz=YTnIr|pKl0N>zl!$Dy+u1qzLoNY8{ElFUmFI>*|rpq zq(+U@m&TrTRObAwpQbEOYTPuDyI5>Pp_M0?7D^eo`= z^<~%$5Qf@aN6|}7wbtT>+m_1tTTu%35k9TB7`YaC6CHy*jd??D<~v2Z!Q!y_6iyFU{-1P7b?c zcF9@498=ffF_$|g)m`LOQ~?S?QBiah2{j94K$W7ZQB9~qlo+K#)u5E9VpIbv z3Ue8C4|NuG9d!$J0d*5~9`zXXRS!UqME6CHM#rH$qX(hBpkmOY&|}emP;XJa(OuA4 z=vipEkbp*`bI}E8BIw38p&jTJv=_Y@y%Bu^eGeA1PR35dqOjT6+1NQ)1{RN%V0BnAR*Nmd+OUAW#Uc=tSKEgi5zHYjY{fPaH{f7OH?Skuuo2u`L3&Zuo^}`Lo4Z}^q zO~g&cO~s|-X5w;ik;pvUT-;R*11H8Qa8Z;(oDt{3HQ*NDmg1IxO5gRkt++$DKGquz z_i(=iZ`5yqw*4CS1J?P70|aATtZw++)CU`JV@M4JU~1|+)q4CJWISlyaeRwH^lEm80jB%x;v>GDVWrg6i$jF zMUajW29e@PNu;SHBxwdIorEHZN&88qBsS=~FC*2F7LYcP7L!(z){_pAj*>2uE|M;h zdXTS@9+85{{mFyL8gdMoL!M1mk@rin?Q9Y&nK@U?v}uO%NLUm{;1 zUnAcjUnf5Sb#|Qc@`wEd&ah zg8WyV=TKM_Aw@mrJce+twQ}p*+@A`c}}?vJkJ}Ho0PbMp}@Hu1jNe$K)RF_ zH!vX|c4Cw5hadv<%uz8j^;ip=l_flFDcb8V6~hnQ0Y3#IC2c zQdZG+(YDfd0}1;GZ7=N@?FOi`{!RNqdrhmLzoZ4z`_UumH2P4WVfUu@rN`0-(EEex z>p65R9SJmS8C^>^&`anZAYv~9BKB(fTKXQkpT3R0n|^|RhJJ~Dk$#nai~frKf&Q5u z%oqT?@!pJqj6}viR(BR7oq=SKfZ5Gta2PzsEQX$8V1U}~jv^pEA7C709A}(ooMT*N zJYw8o+yvV5L&gIjKEGi6&3MoF$an{;y1z00FyPGYOgfXs3}*IbM*J(TPGx2=bD49P zNah44hDiiED}~8s%9;OI-EN?D7cqUzQf3WMyE}l`-Nan*ui3pD7}7t19sP~@o(Vi; zR)879VzK(O!hjkb%9;oS>3%?ujt7qPbl_5Dup}%ai^`ZnxTgx`G$)H1|fGuU~*h)5^ zt!6WUjfDfMb2GbvUGlH(+`{&;YuKyUYuTsSC)p?1U)jgmhuBBiJAt2NE;_@0%6`QD z!v4Yj#1623vwyMwuwk5TobH^SoDfc5Ap8vhzTarhcn-vw#F@;=;2=2+4wpmaus9-) zmE+=+aqOI0P93L#bD87j)N`(JE^u~n&T#^qqnyjUgPdcWR?Z1d2=@)= zJ!b?rh}(_(mh+i|=B9GzatHBH+y-tH*TG%IHE{ppR&&d^^SN8NQm&Z0m-~XdpL>-1 zha=M;C11J^Wu4lyutiP-bh|Ao{*>H8F)zEX#PB29d8M51+SF1n|G6U zk#~f5jJFHec?Ws#dCz#=UH5qpc&j;&cyD=Q`CoXScprE_crgBNUKD>2zcYV0e;9uZ zKY>4mpTtk(PvryQicbWd-dsMJ&*0Pf8or6|;=B32@>c$Q{wn@9{vrNR{sI1BAiiDX z-{Rlk-{(KzzvpX&Jp^#UPrg<-1z3j(f3r7g2HjWpj z2@{1G!l}X};Y?tQvV;^NSI85V301-sKogY+D}XDyRJc&+6>by06>b*(4K&dg!tKJx z!n?vdz!QBV+#x(GJSu!9Y?BNT%>-&_x@fLw5|BkViN=U1A};Vli$wE*B)UdaFKQRH z0&8@wWCw6Xw}=*qc8Yq5D@AruC-F&9H}Q2*tT zekSfL*&rSQWY4~mSK=O$K9U2HVUlo3wj@NNkt9k?l35atgd!10awStGu|W5%1n#Ft zvPkk5P(CAzcS#OO4obF5Hc3`X?n*95?nzckZc1KCeuDgxh~giTKGF#3#MYtGslWuh zZI~u~(=-dnfO%3humSN>s+1{ZOHERi;Tl=Z_vQ@&vFrbo;Q5Y0+6+=M+*L0wi4_5>fgB4R1F2zbrud^sQ+`lPRK8VAQl3_%D#Mju6~(|L zHz}P;hf=AeDnBaaN*8d-$;v8ajq;#UpxmVVOSw?FSh+)aRJl@lPkBZ8xAL;`y7E1+ zzi%nKtAbTOl)Y7*RJ~Mxlmk@*R66BgRg`L|3R0!2CaEwguBus8t6HaeuUf6zuG*wp z3wk+DsqUz5sJg3vs|Kn2tNChzx)f-O9Q7PEQ7uzftADCJ>UqFqT&=#Q-l5*BzNkI{ zY{s)dX#5)pji1%OfYKOH_tb=G1_G;boMtRgHz#RkXp%K)pwnZfX10c`(E*dxrE!AX znp#bRW|3x@W`|~@=8)#7=CJ0h=A!1I=Dy~E=DX&rrWZO=+e14@8=)PnP0&uzj?%_y zQ?*DfN_)M7q?KqzT9wwM)oTm27N9L}(w^1s((VW9@*AKnFVc0^DS@;6O*=_99oWQa zIwtUm|B;HtI+c#AljsaOyY3&Kc$w~`?wszj?!ETD?vd`9?uG7+?t|`~?!E4t?yC-= zhwD4*L-e8gp89^kavh?N*N@dF>$CO$^oryG^KYiUKrhzwK;MWIq&=AQ27R%a{*&+QF{T}^x{chlyozb7vU)0|Kn%Q0bA0U|h(swev)Bn(a z(0|ki8+sbL8-^N20R1Zl7+}K<2|xgwVVGseHOvI^7umoza10`Y*dQ?o4KjnupaH!p zCWF=BFt`n+hH@aaHT-L}wHVd|ziYc;hheATpy7<+s^OyHuHgaD2%i~V8YUF~G;}j| zH6o0ojKhsXjB%iUWuP$$*p0LPRU4&7>c8%x-Doye8OwotxWL!}+{3NL9maje-Nrq} zUB+X^GsbJio5tJ5N5(hCU&atqA5*+J+BDjnW2!RgOvNTE5G5;sCb`A5(zMQW!F1Gg z3Ye06O}kA;OecXW`OI|F^xO2w)YaU_+}qsWJk~r0n4T=)crt;VT@=s>r z1It6pQ_E}1J4+A{q zQ8FM&wHG!O&M&M4Zd6NQd0}l~H4vtn3s3#4N1X$X)KOqay(sK$?Q4AsBq^2^X6l|yA6>miWmkDhpTbWj#m1yN#WmY}NNc3A5 zTh{o3tUs(>Y(cg` zwtluAwjA4R+j!d~TbgZ(ZKh3YBifKQjZI`z*ibgEt2{%=ZYSCqc8a~gZnjJ8V!PE|Xm{JM*lhN-)kXGFdzl>! zxY_;oI(xmn!CnOvs5bl5qUpf+no~5l2w8+K!W0pT@I_=GZnBC*MdBi1k*Ua4R9plK zU5ZMIDvIihI*QthT8jPx7S+b09Yt%4wiazKQkNeoI$CrRXj7N}FKy~$(f6X;OV;M64=WJ(r!%Sx?aDy?EqC4F^&5d^FfMkX-?nrl(+v{#|uXN9MFLF1yhqfYkky7le_ z?lSjs_a}F_=au`c`?nkJx$X}39B_YiclF$Ne{lEo#CoDVd)!k!UA#Zta?fZ_vZui7 z@fbZ)&l-=yC_Z;vX^<4EV^u%}r zo==`gZ+bF?E!0a(sk?jQAuhxrLWqzANC^pq5JC(HTKd-By1To(ySux$^gb|m?rJu3 zXEoiIB_IC3?>Xmj&UMlWG^fBRcP3)RxgYCa{SpPeU?Y|Sw9LjnxGrWjceQb~c6G->UX*JXR`PneM!TlCCb=jsmW%Haxs>g;vd8?$$2@5$bmy}xQr>`wNx?9bWXvumPt)y&<<-O=6I9pN74p6KSd zDeh_R8SYfK+O2Yn+?j5>JHx%)z0e(SFLnRxE^#k*A9kN{A9UYzzjA+aKXX5Je{i?T zx$l1H{^q{xzU6L|Q#YqW&I5OwoMt&~bGqe3=d3F1nbR+4Tu$$ti71m2QHYtD6O+Tq zVdp4w1Uaglgd9VTHOH3Y$?@g{bMkURsD6~>jPgwMjQ7m&%<&LBBoEyq@~Av!Pcqig zvaycl_T+lJSV&97PFiQt8qZ449?wS4CeK>WR?l(I0ncO4BhO9GAJ0$EFHf!9+PMvK z>*hAiZJgUIm$0B^?io+3+%{DUcZs?B-1OYy+=ID$b9Z9T?IzaUPGi;WRPIYGzum*C z+wa_Z-um9Zxt+aTyv?xcHVI2^gSgi& zwXcP*y|1OOldr3KjW@G*Q`pU5ZnNql9#1-`|; zHNJJeZN8hnC%%Wim%ewtEdOU;ZT}D7PhWF?Yk!!(vA>JIjlZG4mw%XlsDGsYyN~E6 z`N@8YzlndQf1E$Y&-C;C3V(t>$-g|L^V|Jaf4bl1cl)#bKL2w6I{ya$7XL&4WB&}% zEq{$bi$MFp;J~23tUyeF84w4gSk=>GQ74GZ-mkX{I+%9-f@TA~jL0G7FC^9rKG$}MEL=4RinL^wUKg0_O zLb8x9qzdJRib4gU!qCdlve1Um#?boE=FryA_RtP2-JJ@Z2%QPd&AuIKTzDn)BUG!f zPGP;mme|T`Uf8s-O<{X1-1o(1-bifNkHdETH0;(-!g4*gP*}B1uPD4oQKBbk!M0l} z_UDtaJ)cvxIA4ZU_J6VRwh1e5hp@MP982rB3*QtH7kn(dm;ANxOJP&2p^v~yco;Uo z$71Q5h|;aHD4}ZO8WB@0TCc;7^;WD@AHquY3+ztU#0GNie~X}zr2|U`mrlW^9T9tW1T5MyvDul2 zb<052c4g&S<#H@h?#II94y;X{tJ;=)gbl{W*q3}zI;!j=8f5QFTVpY?K6VouVO6mn z78Dzl)xsWPN30}wK&>tI{1C)T}s z%hduA(Cqm|H=pm|ZcaLQo;Bh^=V&xAEUfBYZ=(4pl~Y=PD;$2rHZd(^^y^u8>qnD`XY? zzkT3tFYVr~d)My!x?haG5Z$kPB<$-U-6!;%)U%*_ara6C+#ePU3xySi6^9jtMRf1c zz3JaEe_MA~RMh(WwJ!W)qr;fctJa7Sdr2^~^p zhjxm{kEjM`w4llwZ3S;s9ifTPMkGX#BKQ&U5wQ^uBOXOOiFh257@>>MNBmc|w=gU= zOcW*#lY~jbWMM$M!{Wli26Z3QV^H{@h(SFE^%~TBP@h2)sZ(%;`~Pvl|9|}N|E_=h z-}Q}aN7ad{8&xl=epG{~hEa{88b>vWY8urns(Dn4sFqQ!qFP6_iE10wE~1qIyU5iMmnaM2(X*j@39-qgr_N@EYMY!)t}t z4zCklH@seW{qP3i4Z|CSHx6$S-ZZ>fc=PZU;Vr{kg|`lG6W%twU3mNO4&fccJB4=+ z?-JfMJS=>5xBtO!2m*^xolui7nZP4B2_C{u!g#Wjti=7c8l^g=l+v0yk;J@PIs(FoPf<@CjbA{YZIaJPfPE*{t zMsO3snLgotzgpq{VgpphqAwVdAk&ur&`T@dz z!XaGHkKvMjl5h&w^vUFjWI0)ld$fvN2tTC`r7oo&Pc`_ufgZ_1I0N2WKgr+84JiLm7Eo$a>rmTLJ5%owW>IHS zW5MDk;5NOB`i}aT`U!OQTd<$cslTb;sb650i`aF*jq{up(Je zECVZvMaRW)46L4KuoG(VYl3P@=UZfsOqi$3!HIZ7M)|A$W)_~TG zR-M+6R*Tl0)|gfg4pb9b9a;;Rg?D4_#6&X&F?urwF#0nFGWsyuvpTSb;~t!W%ddlF zWm#BuT!0xICMS#I;$(8NVI8!=E%h$14(^r)ep5k5a8y^p;y1z#ty$Hrj42u>eF3xK zwe*ejBOJr}^0D$E@+kRGd6-fH*P)Gyr(&pBDuGIe`x1*@mk3UoZ(yrdt= z`qb~K!_wSod(x`ITxo>2nrOViWZQCV9*jP|gd6cYy^+16y{lbg*Wq73x2;}V1K z4uiwAoG_P21QWs_@`*!0h|s}_kcea$grkTfh?9vl;uzvIqL|1f3W)=W1mZ9v3lzx= z@Fa7{v&nPGM)FqjHgW_doYDt`MQ^w;D<~@|t0_IGda9Ay85Bbo_&q&AF!Z8LqfMqo z(`L{n(K>=^m_-{&8%-Ml#$hNZhi;%9CeWg2{b*gmKD>;1AM-foRm>~~8F&A=jOic% z#xY1B07f(3F~{M`9%Lx*7LUWNx{iA|Iq-q{b0^@wJ)S#` z>#w?Id$_N7jrcx(7F>gGV1K#_BmxmGz81lC!D+!Y!5Q3jU*Wp@QqV%w93Da;E|)e@ z67HC4ksBAyc_1D2xM(JbibNVw0WO;XkrTe-Xj}t7NI%O2a)O*FC&@u^!zJL$XX5rI zlvCvlxm0;hISDtLVq9ums%%vz?ltkMWZY`BDi7{9A>3`ODhK?zc`Ad-sY=3CCtp>B z>rRq3RcqCnwIW@tPKb-ZL3nBXjs1+njU$Zqq<5J5JYd;x*=I?DDMn8V;mvKntq5;# zCAN*W1iWMA;591;#$+}3aL+V+mCpyKug%?*yUKeK9>RLxLEll|5#M3o175(t$iLG6 zuYa+BX5eVxbYL*PhN41!L;XWN!5rxcIfPP>MwPB!0_@#f_@!w?6|odlmXqiJM`Q$5 z6bGg#M4Sh@C_qdm(m?aj$#G;8If-lrpYsP+)CkIO$|%YpusFjg6DfNsyC@VYjY3Cc9u6?_jNET>z7Do^^(Gigk%~lC_StiFJ{6 z1iYVwBjyxxO5k>uaf&&moDOjMrh<#u#$CwW&RxM>#ofT&#BB#Up$)$^=!E8cGYqpr zkfa6t5dSNjxL%+NY=U%f1F0Yb?hF11z6kybnhIM92MUJ@+X(xD3T9lt3F% zYteYoIMD=A8t&ewMTbTEa0TBXx(M@S6{!5nqEn*dqUEAJqTTRmZ{R+DMzlqAQ?vmt z+f`iAkBU}{&Wgr?VBIbK2@j(sOqn{e8nPy`YO*93#am~7+yQVv@TcKaBU!Y&5U#cIATTn4BLK(Qld`$YB^u+YXG!D0p ziMVAPw+O5(+w26qNp_|vT&KKe!P8#wZ2@b)!N1jiEN~%UDzFuF z3QY)&4~+)-I|<+O&qI_#e&GVb8t{vihR-_UKH_~aap#D4h=+(1~r9R0_J)(Z9gcj zv$PYSw*IAU0>5>TwuiQocA$#iS_gh>18o;=1#KB^AMJ09osk35X)!#+z_x!-u5 z`4RlCe2}NujQGi~Cm0}*f^^9Q?*gh*;1OgCS_nG}y9pyewDbnIq7^D&c25JnQc14F zgJGEnZljI3zWBH3ji@@9jX$Eg;t!&4qIaU#q6*PxQF9O+u)42$qB;VX>nL>>xI?3H&jxp|?x_Bv8maDu+jbvy zB(B>{aOqy9-HChdYTS0$;Ig|`yF@!kr_&|i@(Sw?x7BU>lem;_)Stn%bd$b=aR5N= zIYxqUwsDnl32v2(aIG{aIg_$*Nj#o(H0e;%k)-OTA4%Vn>YEyxDw2LD)y1{1hPjow zk-4#Xnq{VChGlxyMec&-A}l>fOv!(f|0G*eYglVrt6A$?>so7B>sUvmdDHf#)v`6S zHMTXk4Z~&R3GO3@a3whnU-p*mtnIGtF|H<`)4SWB*=ss#J096@*`LDJe`2rUsPDLF zzh?hnuZ~N^WwZqD+ArZ!@eY>>lGEZ$aV9(GR=#CtU(T-QuHmkUY3hGKx9f8^=brJN z_P+N$t$KTW0;A-W@3HT>@1*~%|A7Ax+`(CaGXZ-632$I?LUTi+!Z^G+y(#=uSiNX@ z(XFC;MWN#2VkTJAMx@519;A1~o}fd&6Z^o>twS0^>PqTBYC&pF`bn$?a_x8&wy`O_CZiVPAQ-??U;*nho3a=z zCg{B8Aiq0-z3a$s$$ri1#_qxH!;WA#0*9yMFuChFn>brJn>lMaTR0%9IkmWCE*&J? z8xU^4xG%ZSx$n7Oxz%}}xo>#`V56>r#j}hbB?y8H^9xvDx}>nu>G0Cygb5(I7J%o< zMG3(p%mmXlUGzy*PyAOz0LL^?OczfQPZJZxGsUCCB=J~rUvWQiZ}3fX#51HbrN!Wc zjz}kg0wTgwrpV^Vrh*6h4gBURbVMGH{>1kz6OGc}jL_ zn6<67A?~x?tlh1xtj%z%?SPAIV{3bBN9!ouJM+_yq#a1BgIi^5Tr1n!#^P4_0e0|v zbOPR>pk62ayX}*$Uiu$f16&&4+1{kTPOpZ`U|-w?d)xct-q+7TanRt|kH95wrlaz< zN5pM!q+=K^dJ}Nt>+P8ApgG;RY~?wZ;KJp>ovX~Lb(wH8vg2N4akayJrU@=It=$dW zZQU(#uW9OT?w)}=h{dzevj`oA)!vKV%ieR|4ZiCBI{t6IYW|m}cp;Qvg z?WyCzEsq0{{987S9*?U36gr+TCx>|ZpdJ62{St(D(2G^&M z$w2u1kX@s6l&34!DHbX6z{;g5lHmL=LzQ4X2)e_d>70tpDxz*V=(>dpyJE0%CkU|D z%6=-Zdbj!$xT}ro2jH+CtAByQs;xPuKBZo#zMx(UI_n2Gt#9ft;Il5OJAmA}i3;v@ zFj^l#X*~zQby&R;6xTlRTt7fHH2~N2PCHg-1%1>>-w|99=~8Vy5tpmh?s zizw?@FbXAUr_;`+T}uEZv$GOADDqb>EY=$>_hFt?ZaROdmZJDB8R!^%D%_3 z$`QnsJrCFQe^9hZ!M)w-SmvNR;|0f@`*Ddr?A(G&^eN{C+@w!9skrjGT&r;14dJqz zN@z_ z`g{1h`aAoZ;da->-wp0#cU`(%36~~SXn*Ks=tby$^1b95MKz1qrBBL?<0GzCsW8WflW_i2ic|UWo$hsk#muA znsW(Uc>``kZWMP8kHj0oo6h^go5~*xPJ1$XG$*T=?8E#qAgNafR)U$n0HS%La4VSR zPO&{=e+pa2z8C%yeikZXKM0$EX|5kzEw)8$SJ6z-VzEbj4m9rzu)IgbXTbA5f+zbx zd{z8L{1)WzXYot%EipxMQ~X2Bm&&9fsaP72`lah&@oC|(C&?0I0a>zap3Eexko}c) zm3Nl^1ZjF-ab0muQ9JIQ;)~)f*wY`1i;9onPERS$C>n!8Z4!4vaZ{m*`-OhbD70aU zlp(N}@05|MfvWzhfuI&?nlQ~^ja)NJ(@hfry0N2%g9<^oCI-~w08LL#f@TO<$5ER8 zpdCAde>|nh1`A2hh&59{M8<)MY@gTx&4NLRVTr>MHHn=P=jbwYHr+7LdqcqRjWYB# zL>sz;%^PZfH)I>&djy(`n#x$mxXik?ECe&K%G=hV&NpPWBgIRlb8y zx$ihqMXbDYd;rJ7$Y_x9-q9%IG+368j+5Y4nq<_h)^9{(4Th7PM z^WZ-it^=-vpeP*Kq3qFap_}gJqt`GI#RiU>1fF4tdlcw~7}Oi=o^_tx-~{%75!mI~ z@7e6x24Y~JXS>Ipn+X!&Z0;T0{%_`9sG_GHV!rm*2OhDzra_JQxGV~D+m^N3)~@ZC?{kMWrm!%`RYO$ zp#z~Kp~Io0p<|(Up^u@{g|&<76xAzwR`e2P?2Dq0MQ@7M;y!w){7m_UaywxSX$#z^ zO{62Fy{NNnBkdxsBON29QA}`*?!h5?i~_?W%0TKp`g8gv`cwK@I5o%U7wC^**1Usf z^8lvJQTh}533>~7FnlJt(#U4AnKY)7Iftoc&VtiIfz={M*XsoPH0+T*a7Z@6Alb-{ z<0bIspiXrHW<(1(4XXuf1s!6W#?FZC8v9AuG4_YBI$T6T>>M}(bKwpQfjiI`cENBQ zRJw~=O2$dLNcu_!NXAO~gPiXqX(ee4lD>_kEvR}GsP^^JLh$HW;L<}?RC=zgraV7R z9mfW-Ed;G?1fy+_n-!-3yUmZI##!Q=;I+kZoVe*qopQf&ABg5Z%0*y*FKM=d`Sqi~ zb4+st{BMzFw`Qki6-r$pO&Q4G%V33eMAa=^AGfj_MVinMytN8<;uq<@W{L6hE1x|hT; zDNQmH-y{af8E;aVxZpY!CJ~5DuX(+>&|CyUGaIC4z?^4JGgtDK>6ZQ(dn}!jJAkQd z1*)=3@`B`ll1(Z5Q-oj}2^EluCWSWAI_E_rs{&3s&M{t)&MzGC#yjg%XJSt(hb*bFiCe{YMyjmab0wkWk1SRx^?KF z$WX`8y0gFuiQWIW6Tu0oK?`ZX3|ZVM?s0B~TLeZY+jG)$3_Q+ZusMf32R%nT55Uws z&wYnh!cP!3k8_{qZuHjl)%R7>FAIDdeY5?u{8T?`B>pjeIvADdexje?p9@xHoPVOf zMW8K8G{Eselne(mG9oxOI3YML$N@1T36g_Dz>v%ZNiq@)2|HL+P!!4sV{xX6r#Kn< z7WxI2;%g|T&`_9E*sQ2|QOlwRMJ>QP{4V-j^rxtzXjAcq;?2bmN?w%N%bP4{xuCM_ z@fNP(RoI3%NmoecNEb*SNM}heNFwqlvV&r$L{ZmJE9li?I>xk(`2&xwPE7rn#xYH* ze73GJ^<&R zUs0BXDWOT0NX!y}Bw0d+w=zVcmZV5@(uLA%(yP)%vgNQq%Ho#8Yd8S=;ZoeGxYD?7 za2{5{b2uM&CTy2|W^8C-h4gnBYq2oA6pw9n}44%{REj#ptlOLAM7J$5nCZQ}iUgq>2;Q z8q}b`1IAupvKxZRZV7t&QBsBp%D*Yrbilm8yv)4GyaIf)*W$C}S|XF9l2?K)ev|wT zHM|0lzs8h8u)z7?f6*;S$w;Z>a`&e00yA3%j&`s0AL~i$X6r_@xOQ3>S@(gj-ELiD zy$H&7wN;yTAAIV)G(OnVEE@@YsIQ6>O#>%d$$i$fx3YJ!&$gcc4|z1>a7J-PdB(tCGKVJcra}qbOtVXE_#$;(h9(ywE_>;8a&uypAd`| z*Dv<7Kx&EnJU_?ZKF}`EC14Dg1Ezo>U=G@XnZY7ZQz_u4%7V*-sVLhl1g*59U~$3H zf+YpTp|VhE=yd2}s2ZrC%)+mQZHwAf(L6PaYZo^H#dDx|9cY@TB@;_0m3}V!TK1`I zcll^g9<|8T$iHCP{vg#L*CW>_*CyX4XTfaEhUMy}yr*oWM#hYe83|u=e9VxTJ~7l7 zLJT9u0E2U2%pm-tTEHx4Zeo_fk6gk0m#Ic`@E5xw=L?*~dYreM*Qg!6LN7n~J`tGIn0vQ@Ip zvJJ8x@m=Ge$5oH-65l@lPh1!*o(A!sVEfdD-P1gtq$I$qIRcMnJUo$%1Tq{ET7nba z2nQaC2Hr?=f;K^dUL!N%iY6gJm>@`40Fz?_e2y4hsg9s$=;L5SIN(Lt3~9!0Nqt}- zgeUc^G7w7O4?IrtnzBqDSO7VuQ(*UZg5KY2z5?-c^ztqYf?6( z9Ij&AkAZjpC*^d?$<+JS�XK0xA9)toU>5BkN7@;xDY_)*E2Pe^`xaCNSDB(t6rT z)63H5gM%(fFG_ElIX1IH=G%<^nFBIgW;W04kr@W6_cg$azo3QfU}Td(%*sH_D!|Dy zz|A6l>f*VYW!KGa0W!5U_|)C*i|%9KLJQD$`tIK7J^?;-pL@OgDoD|-?j4{)D=AKu z=RWFLPr+us_Pp^t1fhA`^V0Lcqt0EByEwNW=*rIC?%t-}KAGg4_;^9PvzRa-LP-IvDf9Ij05!{?1Nh6X*!D)GtRB5OzFfBDbhgDMP zmi%j(n@mWamOLkUcJj{T8}Kboq&!Z!mGS`Y#nY54saI1Q%xgZc-n@?R6}rsp3|FDq zJPS;L_h}!}-lp}oZ32V89DIE}F!U_@tV|Y2az$ob=H$$opvU7g`I!zd+2vV_vU0Ox zU463$gM;pu9hSWkWb;$>B42?&u9kBfB=TE#g}ZuAjht`pHz1S0x_`Ts99k@J%SUl2V;-Hu00hySTsa* zR&o*!>V3(36ct~?M13fEBdPRAZ@}@i#phSKhh_1%;2+L|f#`>g$W^XY-c;68Rl05W z67Iupdz|np;T_Dl*9i|2Uch{NoA5s2S>nCKw~0>^pCrD4jrJK%+E5s0TD?xc!m!@( zuVJ}iTGFJXIk28KnO4GKT5VcpT4Q==`eyzK_vjtkddn;;Eex1Ibz$FBz`|*idOzhp zyqUW&XYQrmNWGI9Id2>+mP&VJ^1OcY#=>oxI&b2Nv!fi z65xp(O+N%@;~?CPeK0sqz~p$IULWSg%FGPd24%1i@-x@LK*)lN;Lemg_h(%I&3_Rz z|0?kOJF||1^sl7pr+}v)3sSxbnDzEK9djChT<@Gy3k-WDg{}uj{s+a*@1V(-=g#!b z2J=13I|W4cB#_zTeQDsOGeAt+!Ay?~EDn?h$^vuq?0LQOjCr&2qnX1^N)N^F zf?Ikper^1^cmX`ob;|ni7Aq3!RJn^ywZC8#RwRB;G{N#)X4q$FZH$5YMoHQO`)n8d zvnFuCD(x;I46Y-|{i=MaE~!0I!&0B6(&o`&7P05W&ZEp@&6@$c=U18u&d!dMU1d)=;Uav_?wvC-r$4L!lBaeq3*5X61ib+C{C@9D z-(2wP4!_I4DzF;Vcz&KcFO;_^Z&Th{@ZIb4it+>M4ABJv+ZiH@y?uNoa zHUC?<#C$ zv$i#y&pNus@GckW% zfQWxw#m3(**c^Hk>RX7S5%}+4g?x0OIYqmRrh?aQQ_{Yqaq08YXQfw5Z3X9H&|*fkQ^E8nxVa7ARWMFj9hDqW6jSQeRHyLU6}f_5PL|KvR=7*u$;hyvDp zd2zpzL7>Nnmb5HuTxKcX0+x9oIA!sI*agGMBgh9(3^^FnilyS7;Z>FhI>X5?kiL-p zim#>o8NX3EN81tR>{WQHaY>g4?)k@?Z^3r6LS$R7;T;9fM-8fLIZC;0F3y$ z`J2IQZ=XLOymk8p+y#{+b!EZkc+AO|6ESI=%jjAV1b<`0r48VFleJ8(P5;>7HfrFe zZn6krE6#%{cQ0KB2k19Ytv0!Ba|7^BVtq3LQ}W4h94bA5Lxl$ll|^`@1xJ6eq(kY< zvM3PgqsuPMzcznJ6}g?i;B?GYUNuo=nV_%qwXB))rr~6=BvlL(G7o-W{=A3jBj9F5 zx!gH^m?2_#6Ow#IVL}lXwd_hZzmmYeP(mwXl*N=inZE$^_=T7k%>GioHW3c(H4_Qe zV5KD`a^=DG3FVmV9dq{w;`1lLV3-0QL0cp!=?(^8Q1)Z~&-tlf*6UXB;AB#OqRp;j!VOaB;XKTpBJ5mxn9Dk*g!uMsAJV9l1C1XynPr(~)N)&qZF1 zycT&k@=@e}{CGzVQVmWGS`A|jYW45cW>^b2! z;RWFV;SS*@p%JAMbq=*VqX(lcYalC~lfh}h9nCdx_1s!~8{f`9A*e4RiCU|MsV1w& zt46EV>ek}{a=XQC>k?{0Y)9-*{6c6&{6MHi{6?rp>`bgdtWE4rY)Py|{7h&;+(2$g zsZVJ}`Ipk2+7#H-ekWP3ER> z=W*}xjQk9K4M7KiL2yNIS#VCkMUh4#iWSL3A`uH+8n#F(dMAA=9VZ`&Hp*!ED0EWB z$fM=$RMS)x6<0-6%~H`+vsH6ciCVsHvu=ydtM6+}OEM;t)6Di%yUlL3r`hK^=VcfB z3;jj@Qh&a`dni2AEz~0vLmW*M5oZ#qLqOZ1UF0*2K*-c9DX+6!*}s>`LzX|1!h5#;G*Dy;Jo0C;JM(f;FI71+D4BAKLzgu z-_SX#Cj2ZIg>H{Wc2Dp8f7ibJzRt;$jrsLE6_R7<3) z6qQ$H&?alex_!DmRrQMlx}CZsx&nQnK2M*o59&u5N1`54U`$UMYZ+oIuoc_1X?UjNGqa=X>yu` z_6?o01cr{GW+)kn3@syxVPhB>DGWWszz{JM3@f7%Ybt9CYbWa*>nsXlr&$MBTUonV zhgerx8(4c-`&q|Xn_0(LBGkX~I0c+3-09r4+$G$9xLdgExXZb#xeK_9xofzKxGTBK zxbwM7QSMsL{g>O8-wIu=-h%Fe9)fTI4SlP50;}Mz;E7-WYE|upgM|%+Lxc^4ZH3Xo z4#FlkUw_B& z+%Voa!8p$7F%}uuC4EZzkn}$3OVW3gJ${&enSPqSnI>7rTaH-F$tlUdl8vcZS-%3)4R#P-aj%lHZ&$QDl|EChIoW{iMWw? zf_RE}fOwgBn|On`i?|8(#$&{*#GSL;0l(E!QY9Z=_#nd9|PTE4+ z8uS9s(GJlXJGRl*(yr3B(vHxM(*8kPa5rj#duck_UD|ru7DhQ^0UCQAMj>N9qm+@) zC}Q{+#f+tl5=H^zGqW*^%6h=M%X-VI#{R&%!}`hk!}`to%6iQD$oj^*#d^W|#QMUz z&#GX3X1!r8=gi@r=U(TY=3e8T;@;xk<=*6;KvV7{_bT@i_cHe!_c-?`_bm4w_dE9o zFO1)v--F+cAI@LTU&3F+FXb=hFXh)2M55Q`6bu)R6w-t`AzwIENEJ>O&J=QlbA%I6 zSQ7}tg;Rt~p-iX|P7+N+Jnq4^l*lH#}Apa;oF5fA?E#D>o zF263{kABt>`73m@?#Z{v&!D39P5u@2tVi;LRRyhQ^7rzM^4}omHPy2rZbx>ve;x|h1Ax<|T9{b~I{)Pj!acj&k3x9B(PU+MSh zkLma759yEUx9d;oU!(sMX`F7HVO(ZhX|yCAMj@vr3OTh+4NTvXekJ`(dS-fVs$*`B z&P{D|GjmgO6LWoYO>;~0EXy3rY|B*36m)6MSk76lTFzQ*$@7vgBv(tRo|2quPPL?_ zrP@-{Q)^o5S*u%_X@Rudw7qF{ZB1-VZLMs>ZP#tbZ1-&!Y{zW}ZRc%AY>!Y8xotaX zyJ9FQdulsl`?Wtdnd;O#C%OnOxhvik=em?#%U##4%YEc~;Ct)4@B852cw>p$i{sC7v#N!>|Kx;t%Mf*nkPPOJqLK{kj z>B9MF7Z#vgSSZXAmI=MW_M&N`kD_Mc7;%5`6!BE?F!5yZWARAwZ1F7d7;zu*MDY+Y zQ%n=H!~?|B#nEE6m@FP4W{4+r=gQ~2%Wrx(pf0qO_z~nE#xibT@@V^Gf|w2 zR*Y86RuHjK5vAy)7>fGb0L4Ib=%!#dqO+ofqLrejqPHRvb-GcC2t_wVf8{{s0A-YN zh*GVz+g!@0%E!v*%16qF%4f<5RWeFv5_OWgu{u^AR5R5gwOXB^4ybe0@6>5(r<$*p ztA*-9wN{;~7O3s&BK1JELYM>rA>NT{Znr-Dh2e?yK&T?w78*{*wL%%3N3V zyYS1bbYHez7>R{?_>SgL-?r-jH9%>$F?rn}V_cMo?`A#yo|>f8Pic{oks3&Kr+QLdsky0{sa>p{t<9}XtnI9AtSzlwtu3rgt&ObQ zwAeI0`a`@lPFg4}n06@bVA}q)HnwrLk+z?FDI>?wI5tI66A|I_f#bIOd|W6Yl8d7>>eDAIEf*cIG&!jxLV= zj@gc0ju{TBlj2odDFmN-#MMGkD z=uy$Vl7XdZ1T86^6iechgd{3yHi<{lp`ARJG?Bz1%_50Ntw~c!0#Y1FNfMJJq-mrH zBn?SVVvxwB^JFDOO;J%al&h3WlmXN-s*#>VA48u@C((z{`_jpD5q%haD18u}g062P z`e3@0PN0vZ&!7*Wk4E!%HoXC(F5?%Y74sqEE8{lfGou>w4dV}^9`ieTxt|#47uMw{|uQ9JDFM`*UH;C7Q*8@%2NZvU95dH@KX8sQTM*b%LFu@2xsURd+ zAaD!)LZ@)AaJ_IRYN?ln=Y)@iH-vYD$Aue&w}jh-Cx!cjhlFQ@`-OwWQn6cX7hA=9 z#O2~O;`!o2ltjJa1aX$wFV=|HiH+hiv0A)VoGM--9*wrBS)3_O7YD?-Vy&1eJtLLM zVr3i|OU6Z$vz^?c$Wr(fWeT-Iqgbn0rHF}3P%KugR;*AM(Z1Y>9;QhVRHQ1D3Y}u5 z!lp<>1+!EUQskhDnXOo&Fe_3N0fk55R#+9ol+ma}j!*`aK6D_xN{_OyYQK7wdb4_y z+MrpjexZJ--lqPlzO1gMIj+91zNfyb-m8A1zOBBazNP-GKB?ZLexrV`-iZ$5LG?NH z3H5pPNA)H3AN4i$8TBLeF7;1s;wuzs$jS}B$YbActHcR}i{j05$*fOzN;&<&= zZNtPGiA@sc=~8s5x-?y9eOrB9eRF*~bO!6`|LU6RyXal|uln!$FZx=B>V`l1x`t|o zpZeeWzxqaohKA4jdIpJ6Y&>hcXgp^;Wjt-XU_5X1C1odFN*ZYzXc}OOG7UBjF-4ok zqNO+5G}JW1OfkzmR!B`T#~%7B!CDT7l+q;yE>k&>TUlp0E1oI2Dx);hvE z%sL7UxBk{X)``~X)^X^#&9aWR_O^=C3ew8ZJ}X7%>{QyBwDV~f(vGE_OFNO))z-z< z(Kg*S**3%0EeR{|A0qI@S`=@tF4@+;I-YdOxdiV6^_7V1I`zZSm`(V4l?sqIk z!z11!fe&#~08&f#(tI@UYV9RH#P zw$_n^J{Zdx%$Cc;uy6o9kvu|Wy!RUK0cYk*m_ejrZ4?@iz zt7oalnwyeq$=!`|(vIAHxd(Eud+&PhcyD{JdGC84c*9W}YVPmkZ-n~LJO4}nd;e$u zNB<}PW&bPxYyS&>WjBZ&pa$rH*MZN04}njC?}5jGAAy&FSAlPV=Yh9@;qEC%kM9-tU=sx-udOCeEJxE_dx6(cIf9YxT0{TL_m)?lchdCUb z?=H+<%pT0%%rA@?%m`*DW=Cc;b3AhxGm<%yIf6NW*`GNYDdctReD(r%GJ6rboNZ?R z%T8r4Wv^xzu-CGOvoqLkb|Krr&S9@$r?9PT54()*W9PBwvtM$~a89A7eSve5bC%PH zJBT}+H<_p8O+?#zJZ~tEjLJ2g7tI@m-t|~s6pzK5#2d+*$s55N!yC*S#+$&K%A3NU z$Y%=v6)Y935}Xk35S|u36rL3x7PgB0C~OehCbo9$7h%iT@50~03gH1^&DeIaonxED z{uS1WZ5TUClrP>OUN1f?ejT<>3nIqbglF}io~fhl`KVOlO@V5 zvUr(JR$bmG?vkQb+-*hQxYvr8iVtY`UR8Wkyizg^uod*w&8Z3n4-tEXwEqZd0<)fuGg?y$H8k2^uv1lk5 zcp0i0reSNkC3Z<1nD|Q@nHZJWBQYYed*a~4u8F-9MN0gXx)%C| z`a$~c`Y3(0zL$Qmet^EGzK4FOzL7rC(BCk~(9h7u(7_O97+{Dpgd2JoIve^Jx)}x= zei;TD6O1b3GviI;1LITU9pgRYW8*brK~ga3X3{kDL#LSLnx>j&nFyxYra7j&rq?E+ zSz?x&xn{LF(Hw8qn6>6Ov%<_bi_8X#-lDT;Ej5xGB-c!Clw2#h7~Rds$+J>Mr%Xwi zfx0FkWkL!Qjm<@=nGQj<1ep8Alz*9j_dZ9Qz#AGwwK^IIcVXIvzMq zp=x;5@x^h%Q9q+;#$8AAj5-+y9iJRm96ue$92XofQ9eB6c)Ptt z?b_zrn7Cb|Wl zbdSxm)3evJ-m}BA#gmb1%gxTco_i_xOztrh@E2r-=7n6L?2sd53mps{4;>192)z%f3blor!n#E((DQm)_M_ZE$iVi; z9@0M2QPNt{VJvTKCLPE2#(L5L(oWI_(kZNQY$a_cT_Yz`%#;*LD&-LU7X38+JpB~? zB>fJ3KUN(M(_hf{(J#_->F4NI=nv^v>Bs3G>1XIS=m+Vo7*wW$DPa%=y@9=jy`H^~eVo0EeU!b8y`O!Oy_vn0eTKb* z?Pp)*T<6^2bmc~K6M16ZES{97CqCQ<}?h$2Mcq9fuD;#cBkk`|Ks zl5Uc4Nwj3Lq^qR0q@84>WVoceq=%#t`tXC$hMy=IDj6tgAn72PAc>Ilk~EY&6L*wE zO2Q;fBni_0!`^)drJeqb-q%#KX{IL|lQjlRFGzaK#&pw+Er+^mIv4hy$XkK)2bYV0%IxkupT@;-f?HUt}i@@!``QV~(B-~!yAUR8*}sa~nuMN&UlRI2QM);zJ>gEmSb{V`n7~iSPq>tDKA|O{D#4b} zl^{;2NVu4AgcL~fC!HZhlJ=2Mq$pA%DV2mLp-KBmNYVk)aT1XfLGmR9kPebgl0rzQ z67v&ziGsw!M1G=Y(*C5~NxPD~l8z-EO*%+EMm|pVC;O3u$X;Xs9g=;>`^ksNC&_{2 zhvaecck(y#vSeyu_ z4mE>HrOx?R?Rak5qO=8RZfWzR+jcMEjk^OeioF7ap{SmNSp*}#Kd%T zdTKf){Y<)!R!yszAy{Sy!@dXWhwKk&U58(o^VRbR0dD9z{>2uZRh!Q|K{t0{tKzO<$ICH0K~_&F;@R zl=GIcmw{kVK&`cak0H3Gn}!Vv5T>Vk;XW{ zNM(314l&j+Zk;>M*u>b+*uaQj5E(leK8zC#f6#t4GWd+$j3bO~3^*f}k;yp7SjUKD z1cE-SFJn4u66<5`4A$q|k=*yWCt06zXMwt^BkM!%Ox9%92)moz!|r9@W8Y@~%C@p? zY$iv}xzBmRdCYmpdBFLNbBFVkL&?j{%gRIN?afR1S5-7Q?_3@|kDQm5=gB+HJI2HE zFuZ79E{_5Fo>pFLJ}zI7&&%iM7v$&Xi}H)|3-g8fZ}K1Hzsi4`|3|(p{{^UX{+9nN z|1VJLe3w6^V4Uw*-~bw$Qwy95CKOCB@Gsn3co6h0_Y^t{W(sBt<_H!F<_o3^W(g7m z1cA5EU$|MgOBgML3$dU;c}}=dcuaU$cvcuMJSB`2#s~?*Z9=q=D1-?Eg=avKGEwLw z3=p0WB7`17WpQb7S+TrWSRyJBfQn;p$>)-9C7(*(mn4Z(#Hr#$&{3QznI%z4K1l46 zN~un&k=97nvKz8c`R}E#LH#cq)byVE4*8DyKKK3G_qOlvzIS~e`wsfP@_pm`C$Nnk z_>TEy`_uhj_`meu9H0tVg;;=?ftZWuO8hnRr)e0mvT5@P(c{WJ$jLlz<> z$Xp~HnS~@HxyUpm3n@VIkrZSOl8sD979k6e-;r^_cS8OMc@i=l@_WdyA+JMThWs7! zTgZcu+aa$)?uFb9xfSv}=!4Klp=ZK!P}!&qR4ytFMMtHe@=zQU8U>VK3o+}gu9MAk1NC7!ijN(xHeo3 z?lR7VI~I2`?l`a%lyT*8?QtD(-{X1{-Y2|Gm`H#UJ|%1?d`Z|ym`3m+EFt_zSVdS& z*iD#Ac%ASAVH+WdU{ClXVFSU7u$kaV*h+YrFqJTiaDXtG@Gap(!cKw<;cLP!!cT;m zgyjS$!U_V5R7m2GvPn!*4p0EnNx39CNlZGJ7?3DVEKU?99!NTw?;P=27y2Q_+n3N3}1l9Q5Qr^=)zr+?8>wZfms87ARjC(JFQrOA`JGEGp%zn%s3NME%BBjbh17Lv$I>>ZZAx36 z=AL#n?M9j^?T<7zD6QwE3(^bIOF(s1hqLD;c#J zZJ=X*J;RXEnqkaPWb|fqWSBC#GpwLzUY}8tp$A3tUqI{pTE_W|&Wxn2-?AQMJ;}P4 z1<6Kbhh=l^(V~8=pxXsWo1PpX8oVAj*nia?L zVr^tCXRTl@VJ&4HV6A8EWBtrJ%yMOUu-39Rusm6FS-V&;)>hVjmOE<|YX=Jg>d~87 zb66kQ&)9E4o%u8SJNpmzSM~??7xo+WEA~sa0yLMEoHEV}&P&d7&W${29yd>zSCChd zw}%(Y!|{fBeY{a#N&Ync1ke54K#MCptO5gcu7bBZQL|rxv)rhU6?QYMJN;A5K@I=VV3Zs z@DCwhm;*|>7lau?w(z_#Pnav@38lg-!b~AcculAfa)ld&WhLiJ#!Ham4A6(o6rU4k zi*v;3Vg~5NW{G>nJ>rEDwWLBaLFyo#Dpg2JrR~ysX`{43+9qw1nx$IVBiS?A&+@(U zNI6;_A%9!?SLyrGD`n%p6a6;(ee#{+H`VWh?|0wnelz^O`TpR?_2>D&_5ah~8?hg8 z0FKy%P?v9lwt}nTudH@hq;JRVq!5`j08i$reUerbgT|rg{_I!L|=>6Mb}1O zkFJb%z)!(1!au=%!Y#nR#XZKk0xjkh?l;_0{4)G^+$#KExIcgv^AXrFUU(<`Q`{Ha zO#D~eOWZX4B>ZAL6h9mP0q2gNihqY&i1&-T9CtD9LfkKLj`0rh<8c$>_YvF)nFJP4 zNrZ%a0*AmOL=jFAn1m2Q34ub02a3sQLL$MR;7bT16cI!O6v2UjCd3gk2r-00LI8nF zz!9voAxo|cE;}+&oV}Vi7+wq zbH>w*4;gPW-emldaX*8SrOMJ|sk2^Zz0CSO>s8j%tmj#n?3nC}bS1qE7yuXOQo5MF zFy~ZGK#qToZw}bZmjhw$X3l0hFxN0=F{d$Sg2w$1%vDSm<`>2~=4XZza|ZJrV=eP9 z#scO?#!JTEjIWHj%-%-8LOGgm>ZcBnBN%7nI9N1raSW)<2mCE<4=a2v6$(~ z+{~QLT*BPMjLJo@&ag176xKsl5(~)+WQDT)S!h-W%a0Y#I?KYdd_kK&f`wucSn;eh z(5DY$VOghG>8xZ{6f1^>V}-CMgD(7h&UDTq&SK6I&J@l<&K%Ax&NR*i&Lz%8&TGya z&MQt|o++;`Z!oVvZzyj#uQ6{luQ{(Z&zLus*Phpq*OYfF?`PggUIFh3?>_Gq?>XqD z-{n2ymFJh{pU;=)yYg4^p?nB`BYy#ZDSsh<4u1*Xoxh4dpFfwsm_Lv2!e7Myk-xqG zQn0w-#{$=al?4k678N8F5lVF`-jbJV4gu4qi3YdaiL7spP>fj7Pw!kKA z7j_6+g|~z^LEZbU@V;;i^u0%gcZF8rpTaibb77azBJ34D74`^+g`L6&!h6DJ!hYeP zutxY$XcATnX~nAI8^smHn&L|(7fJ?8CWAiq1aXKsPs|sy#Wr!Dc)4Vm#9gvRvRdLQ zSt?m2(Mg=7pCxlZr+T4uk#vFdj9tLdVF-a*{k+PLU_eiSk%EMjkK!TKc*4Tj_Y|$I__^2Zg=#OR1A$vSO0rT-jjR zY^Af(#c!V9BEKK~=J?(7+Y<0FfQX1hBp{Ly(TFpMvxr1QI3fk%60|hPf^0%|B3pnT z*oM?2O~^%|OG1~1t_YnUIxo~2Xnm_ge++dET^{;ps0yV;T|-?#)u1%Myt|INiqfIf zs7h2dDkeNOJUW~Jl)7i(Pk~nVB>Z@UK0<-M3S2fLx*GUv2H>$Zpj*&&XdZeLGlH>U z`Y@fCCJYDDj=7Eb8*>W-$7W$^*b7(-@W5K5yP~_J`=Xno8>1VdtLyCr?k#PToY>K!H*AP~a3#%2tX8 z5P%vfPbsn#Maub<(iC~hg_Mga#VOJhamwA)7pYHDe^0%a`Y82w>ciCEQeUUuPkoSj ziFz>2JM9QCZtBu%(zI!{Y4_3}r9T7i%rFpVeocR#ZciUd|2_S;^gHP<)5p^9rw`DE zXrnY6t)Dg*=q-yfX8^s$HFI9(tW1~81(`nr#btWt?97FkGc)IBLNg(m2eQ($Xjy4l z+N?jb)@HBCj?dQ8f1y{<>w#Kw1K1@hdNo}~uccq1|3yEY1LAHu2<9PX9P=bIfQbew zhZplO69Z(9DCR+?AM*&1I(}w`GD%DyW&$%ASRFCUNG6XN%|tN+nOJ5#^Gq%_7n3Vz z6|lrC5lhHov*@f6mV`xRu~-*bOjaRFz*4XntRhw^OUB}`mUG-Wn>iae8#${uFpe8% z4QC~16K6f=C(c{WKPJRqdGGSx=iSYl&wZSCKkrSR1sD+z^PcC8=lz-YB~QQ;@(OwH zd4KTU^4{?N=EdhH|6VK2XlzpY?n!PWwgf}I81 z3$_$c3aN$Zg}H^F3*myTf^7nkpim$XP}t3=C1PNJVgQ$({xX`&q>Ptj7~1JA|H6E48%D^CL#xsg`guwkVD8mzCkV|Uni@{TJmLbCD}Q7PV(I3*~yIL13>FKN;yKgM0r7}Nx6}tOQ{8NR&|Ot z$$FX*8_ z2@1pfFeSjzQ8Ht5n_1UcYL=0uVX0X4tQwY{Rl(|HwX@n-R#q)gY&uw7tZLRx zRtu{RcsBP~I@VSWoa4#)87MOTob8NX;ofB1xFhJ-y0;CQk5IZQMqat6CACNoFiV8$0M0gQZ6eKz= z@)w;Fkwht?bP-wfM8P6+acgmBaeJ|`xVE?gNExaUSMg8c#o}c^!jOs2 zi%Z32Vug5EJS2WA-X}RIIVL$OIV3qCflAi_iDIo3F7=RZl5UW~q?>_0aZ!3+IzjeX z`cC>$Y6srL2cS-TlYWyJFVmD+%6f1ux@e@DMT zzeJCqU!cb^lYkrg3FC;Ji2aHw0{Z8#(YK?=qK)`^d=vg5-h^+!-^ADA&G>4(8Xq3l z88j~>V>u=U$))4CsYmoJtHO6|xy2Kh~-C{Yi-?5&v?y>?n z$2q5fm2{Zn%kkqJ<*end;;!VvxtqCLxktF0xSl{Jf^nhT?Ob>6Htt3)gu9!&hr5fr zpO?&&0$oVV`wj%53Hc8BVf-kd`~>k4{BS;&AHxsjhw#yS6d%VAR*K!kP_ar}0sNL5;!EP|;tS%-;!*J} z@rZa#{8{WP@ss#V>LmN5p3+^?*|O=fX|kEJNwPVzAAqGYRW=0(DR*Q}@=5Z^@-_0U z@?-K7^5b$d5KY?UEpmgrMqVc$l;4zh0Oh1f9;OIToKhT9ger~$a|EF{p$Jm=D*_b} z3Zx=L!7t;L-7j-gIw%(^p~`j2waPWh)yfmf)5Kl)b(+(4)izaXw4t|L^4 zUy*l_w~=#$_W*TzZ)iJ;7~T@`1w9q(jGc*{hMj?(g=JvxVeew^ML)woz*~WWdJEqT zB-A^28~!D}58nf1)W|qZ+>H1o#8pIB;zHs&;tHZ0aV2pLaS;(hbRn)K&PewAeH_ z&6Bp1ww|_|_KNm9jgpz1nVLz=OvxlQ;)1px)s1zv)^0*yc|xF<4;`b2G_Uqv>NUGz$172OhD6Fn7miH1afh#rZC zMIEAEQLAV`^h}gd{A=;;;)f+`#OuWC#jC~cKn#he?7ZC`pi{Mbac` zlr&4crAMR(q`ye7NPmnVZa2wo0~Awp`{Sn=fmVO_%>5pCz9m-!A`M z{!;!xeh)Z0Pl2ZM2535?@>lXZ@>}vJ@?jwB{3=gT&=q+KhJvjiDY6x*id+R(!Bb=? z;uK^>vVy3Y!h#Eu{ z;t}#ts0n38{Sp3Wcx!|q!j4{qU5H(Pork@TeIETN`f>E%_%}e`eU7)|-{Swk|B3e` zZY6FaZYORb!if8cJBWLUyNEl9Uc}u*IB_3wUgG@3tBDy&w4?^|4a#*&GvzhKn$nxn z2h7gDQlQk8)K%2gR0wqqRYy%o1Di$DKBgU|dDD*3j?fO#{-C92W@geevojmA#@MD#3K%V0{s} z7R?4C)_2hY;9;#UT2eH#=)K6H=%Z+A(X66{MRSWji>4KQ68%s#F8V5(QS?DHr)Wyi zz2XPO_lq}*H;L=TjpA?O@8U>Fl;n&gLSm4#NqnU~(qQQcU_Av$J!D&CFxh(9TG=+) z8reqK4%r5oS=J>p$(-eL<#XiIOPxwT$|nOU>66^6bZY4=;3h5lm!YIoNP%mlPzV(g zMWI5jC|49IN)^`?V#RqN9$ipeRg@_%Dq4ViRHFDraYZ35lb6ZLo|HWHXt5#FIkLc39YsJG$Y&`Yrs;=bZ1#(lyp|W zBpxGr6OR%P5f2lWCH|PWBr!8-Q8J!lOW8n$Q8!ToX#upqXnC0|U}kbNb2IZZPi8e{ zZO*<+U!1!(*FE>A+?Ba5xl3|aTeKTEWowG|6nPeH zF4|eNwrG3N`Xa9)SkaT>$Hk9|w~BX(Tf`5=7)d8^xLPGhDMGqawnuhYb`XeJ`((Rh zdu2Vc1@c9}y;>;WCHI%FD_vIVQo6o$W$EhDHUDy{Y85vXdZ0&{6V6fhMI=SQJ`?T47aGDSCi2l~z_-_Ok4cve#uxm5Y@>Dq%_=C02=13YGOfCWH~u zjUWXd4?Ph&jOs`I6}|%Nigm}jVW-DA#Z8Nw8Go9HAO;criOUmzO1zesl{7{kCf_2D zlFw4ql&#ciDlv_e7DNl91=B)lA+&e2w=_ZKDIkpAqsQfJ$=#X@&t0GEk-IH-J$p6V zoxPR~Ws`ts*v$FD$>cJ)Iow<>lbgj|kY5a}JsDrfFXWf-#r#O%1R zRMC;5Lq*<2M~e;@?G--~<0UvrwF zV~P>Qd&RKguHt6do3gD+xY9$pUl|8ftP9FLep3JE$Y;pJ;FFrs5spz4qb5bI z!9uYm*q70B;^xN9j_ZmGC58~)5_6L1N#~M?l%bRuS~RUVGd^cmu2=5PT+dus?(W>5 zb2qa+*c;h!HU;R9Nu2K-9#9myTsHR*Z%Mv{j{!=cUJzAux(HPi1cbW|u~FBx+r%jw=1_P4=7J5NlKzJK}k>!B6<)$p=ZN3U|&c7 z5j`(1f`}%fh>^sViMPp5$ipc+sI}CaR6I?b$;mpKb0BvIdpmn8`#8IZTg)xvEBWPo z1;1I)Bxn=F6~z|eik_4l79S8F5u3$N#No1GS+M*JaMYqpqe{bps5YVOyJBkD#Ih-6 zlgk{+9LuJajRW~?QdxP~a^+zqS(ywxvLVDqY#nxS+>*FOaX-dI5#1B7Cyu4uO4&)p z(c*yEE6bDs{Z^X!fc}tvIQK}dcP^Ea#-VZ2IpZ7&5GgP7FYzz(6N=6j5sFAfNkvD+ ziINCuY-vL2+0yA{Gs~RIW|z$^n^Sha?0wlZ77GF_Rbq$-yOMH2}$Pqr@q7k;~d zT$EZw0m5LUj9i*jN-9k*onJN&2x~tmFDYY*SmH+-KKEz#9`;^#8Ml;ojo-;XE>4n= zB~}SqdP9nlMayDj5cz_#MP=T~sZq=0RwbUuy~@ui$_7#)P8KWMC|^>xCh>T#l6$U* zT3TC{Oo@}Fl`bp85%I)JWtqyON{N!5S}*KlHzNHbuh2e+1VW>sLC{EOJoF4S1d4;A zp#D${^z?t%vyjjjXecxQiiIMe5zshj6f_tb3k`$%0blDsq%9HDVY4G}(rO)`uI{c^ zflH=#Sl|kE^Bnhd`wzYB9|6q?XlMV(Xa8tt|43)fo9ArSI{f31MY>_!us|Q%;-+!% zaD}^0T(NG&KW^BQ@%`>z?yf)}o4ay?+f-na`MX_l*zNj{cJ_~b)(ia)^Xwl5?H~6{ z2sE;POtPXC^ETt%;@p08+vBzjh-K^C*1I{mO?S(6W4YzHodc@b8n;z$tKEJZA6PN8 z!oh8#TeU;@xOu|Zgu4@dop5VHox|-3=N(!cY!f!R&2gLUcFAGdip{PDXe+c9dJ~Fq zk90@7W8Gui&$vt7EYOQk8?+xPgkFMPhF*nUf&K!$290%}?LN!h#eKQ^O$S%k6|Tkq z64FY5k#^P{e7xfx@1Eu10;aXiaJLn0@G>a#e}AJRnE0MJJ_-10Q^551 zKkD2M3C@m=G) z$De{?!OuXJ^BVvEBAbPdgK_ZjN8>P<2Mi9|0^17P2HOtX0ow`N1=|htgzbU-4BHFy zg6)IthaG?&gdKt%h8=-M-M`R;#Jcl*z0J2?EWYH$DPe4ypcfI0&yZ$8xJUxwZmpy@e6CqidK=Ruc37eQA- z;m~!^-O%GuSLi0_YUp<8Ug$FDW+)W81G*nN7di*}BXkLLA#?$B7IY?bIrJweVE>@* z&{fbi(6!JF(2YJ2>#JqG|QaD&f{qLfn5?&5W!3*#%I9NgkvS1&a4d=iu@GJ1E za1C4w*TJjc)$khlO?WN54qgw>hST9-SP0I5H^3X=dUzAO8Qua1%gJyM0fQI8-B+~2 z4e&fT7tVu^z>RPKq2X2D@KVO8&!RBz?!QIip z!2^8lb=>DT=HTzx;Be0&+VL2;CDH@ryFAW#U_7uM(H=1#u^u=NyhofzyvJFO1P_7- z(SziX=#k_>_DJ@ic%*owdQd&mJkmXEkbcMjWDqh08HS8NMj>O6TaaHNw;^{RcOmy6 z_aP4;4Lofz)u z&Ep4;=^isYW_rx>nC;>0F~?)B$2^bu9t%7cdMxr-?6Jh-M~|f*r4R)KER8|RA?G0% zAQvH*AeSM(K(0WpLasrsLvBD+5H+L%QVG#Ov=AMn3Q`TJ`L~6ZObFOI0--~4AoUJ9 zhbjlMYnChBHOKXwE5r4FkFv_YfA0M6)LZ$Vui(GlU-_T!@joB`*Xu0*`TqXT(f@e_ z{!5R5%272*HC5%Lnx^_e<*b^cnyZ?pny*@*TBus2TB7<QB`h)mzoys`si7s;{bVDhKr>^%V6C^-T3_^&ItLwTs$Sy+*xWy+OT2y;Z$K z?WsPfKBPXPKC1Rr2da^3wEB!XUVTSQ%Vovx;-Gt^mXx;jVAR&&%mb)mXc ztx#W5*Q#6926czpqPD5~)r0Cg>Idq_>Q8FB`m1_e?NH%Z;Z!lLVtU2Qiuo0rD_|8{ zDz;bbsMuBERk5$)K*gbo!xhIWPE?$%@Tu^v2&_OrB)TPw|#mdeh`?#iCZ-pZlMJC)BXUsS%Sd|Uah@>8XQ#!)j# zGehI7nXhrtEYqyk9Ml}rcx#SnPH22I0h&O~X-$wOScB5gG#Q#~4MW4z!js5KRuN{v=irK#4`XzDb2O^3#)F>5TEPK`~|uNl&e zX&z{vXq>duw9~aSw9eYO+IiZ=+9lehS{Lmy?Mf{~3)QaHuGenSZq;tr?$GYkdTIA* z4{E)&zFI$Rp!T#DsSVRcYE!jTElr!P&C#;894%LyuN7)V+9It)TdKXPwQ6nJA?+RQ zUG07CbL|W5EA1PtUHe7*O*=_9S?8piuA8BouUnv7q+6nM)osyv>X5oHU8F8nhtrXD z$vT>jp=0Vex6Y*v+F+VzUaomTfbw~q$;N>=c+kX z^Qz`oEvRy-3amO^g{VSSg;s@Cg;zyXVXClIF;%##xT=IIVil>1T$Ne{BD7WKsu)$w zDqdA#RZ&%W)%mKcRjR6vDr1$Us=LZs)mvq&8mbzu8m+ojb-U_r)q|?XRnM!wRykKM zu6C(*tzKQdrg~lVmTKQ>zv{qhWOZaUy828twmQB#yP93CsJ>KvwOUnOQ(aqKU)@^W zQEjQVRS#F+slHqNp!!kujlQO-rnRP{##+-;(^q4w>8}~9c~#?lbI#5AHy7W8-dulk%T4c_ zn47UTOK&P}mfyT|^TtipP3_I0n{RHuyZPy+L#<=&YiifmZmQi}yQ6kz?e5xx zwcfQSY6ELe*GAT2YGZ4OwWQjl+T>bVZANW&Eu)rKn_pW{TUaZo71oy2ifbjc(%RD6 z^4hDl*J^Lns%q7>)wMOXb+zW&&e~VCuWR4bzN`IO`>l4o)}hX^Zfc!V-L$$Hbz33lty@+Hse{(7u3KNXrEY88_PQN)JL|ma4%YeA1=NMrq3R;*Fm>^DXX^-cq`K5P zT3vqK)jCaGbzNOuYn`F4z0O!?t+Uk))!nVTU-z)?dEJY;S9NddKGoUlzSMoIn^eEJ z-nHJdeowtu{lWSZ^{49n>XG%q^UvFmb-lj6slKJ&P;aU4thd(p*7w!h>ig>l>c{GD)!(kaTYsv${$u^8dVBrndWQzb2B(Gv4LcfkHh4C8HSBNjZaCU-yy0YnPXoy7G@Ncg zHiR``8n6v94Y3Wl24VxLf!vVVaIS&Tz-(YO@EWc*Tx+<|plZ-G=o;!8>Khsxni@J9 zEDd7~w;H}SI5bXboY^?LaZcmnMwdp{#x;%W8n-lVZQRzlqj7hmXXBp6pBoQ09%?+& zc(n0k&(Y709+3`W}6s-lp%@59`PDcl7V{@AV(_pY&h#lbR+sO>LUi1fl5Cf_DxQ*aZeDYhxQiQdF$Vl}awI8Av?g-wDcQB!e~v`N-f+H|Q& z(^S_q)HK{Q)^w-oMbn$6cTJy~9GWLKPi~&kJhR!kd497?^Rnijnh!P~YW8kE(d^S4 z*nGM%e0ooElXOKwz#w`Yw>E?*K)AMyT!M~ zujO=0QH!R<*kWzzX|c5owcKgB-}11K^*&5aw*&5r5Yb|UQv=+5WTFYC{w_a?$ z+^T9-w^p`lTB};CTWeZxw(47(T3cFMTMezoR!eJVtF_hEI@Wrt^>*u>*1N5bTc5N( zZ++GJuJwKE$5wl*gJF`v$>3~QU~n-kGb}g!WNIhRp_;VVhxx zVW+{<;AQYO95oy_oG_d;1R72okcKb=#(*`%7-9`L1JOV>q#Bq8o+01BHxwF*3?+tA zgTkORoHtxHTs2%Xs0lhEc(F}(k5?Hv|Vbu+;+81)uwK%ZmVglZL4p)({{J*LEEFY$89g$ zUbVe$`?KwB+lRJKZT7Y=ZC~3Q+MU{`wa;ju**>p*e*1#<#qBQbkalSM>h?A5huUM? zaqaQ##P-DY)OKn+tv#ci(avt?wYRp9wcl#L(|*7GLHlp*&)Z+Lzixlm{=WTFyHkgA z2c%~<3Zygqqp&>@tE<1@ubnm7+?%Eo;C&< zk;Y&n${22pG@^|dW2}*ABpH*8WMi_CX3Q{V8yQBHk!Q>|78nJ_Vx!n7F_s$3jn|CT z#u{UtvEJBd)Ek?O&BhL+(P%Q7jTU34vCC*R_8M)*e&djF%=p0g(D=ys#Q4hi+W3d@ zjq#oFgYl#BtMQxhyK&s;U~)7~Hcc^2H947Pn4C?EO-oEmO)e&g32ItxT5sB5+GyHr zf}6IOwwkt^cA9pZyiEH{2Tk54Uz49Hz!YdYZ3;7?Op&H|6Tw6>rJATFnkn1FHgQZ` zQ@*LlRBV!%N==tcmrYkp*Gw9d)>Lh(Gu4|KOsyt^soi8WSxr4An`y{&$8^{9!1U1c z-1Nfq%Jjzc$z(TuGfgs2HanSTnCF`pm=~Fsm|e{)%nnOn@QW`o&c?lfD? zy=I$v%zW4U*!%qJ?B3TT(4)mTU{%a?ZlEDL+9 ziR_H*M0cL)jPE3Nk~?Xg>`qQ6ud}dI(kbnfcPcttI}M#3o#swUXIH1Kv%hn&bGUP) z^H%4top(C#c0TC*t@Cl`v(8tYuRH(jeAD^1^HZn2^J}L=ms8iYuIXJfx@LCG?^@io zrt3u4$u8fnz^IS4dz^Zv^~~sT?pfTkq-SZ*vK~m!>YnvIJA1-(o1=cYg1ZUf13gy^!A3 zy_y-}%0) zeVV?Gz6X5|`yTf_?|aktw(sx0_kCac9Bh+pb8L%k8*E!_TWvdRp0qx3$^~who)c)@ifZ`fY=@A=|L+ zj_t1Pf$g#FmF>0dPum;YTiYkw%>K>&u>LLmJNkF^d-d<@KhS@$|4{#l{*(Q_{ek^q z{iyzkesq6qe|$f!pV80mFYiC!f2sd!zouW?U)5jJ-_Wn`Z|xuJf7k!1f9k-r0q24B z0~-c54{RCm9N06kcVORu_dwVHW*~cjKEN1Y4-^gv28stt2g(O74X6gR19b!S1NwoM zf!2Yx0n0$=K=;7Vz}Uc@fyV<+2Hp(39e6kJX~1F7ad6_`>nQUFlV@M`0}u2xO3P#Y#SaJ9vi+je0%ur@Z;ep z!_S9b4ZjKc*2sgA$0N^2-i*8-`8x7##9?&O=$z5Tqf16zMqNi&kFFVA zH@abT%jnk89iyJ32S<;L9vubAgweoJ#Awhcax{1}auhv!W)wS$8;u_&j*>@dqZy<0 zQT8Zjls8&9DjAiI%10HW|D|=Q|DR9Zb1Lc75gxQXnwm(fYW)q#x-GoB__o;&Z2d z@NY(@0-HV&$b@Ee>zTvAea`|BdLPR4l=rDlz(!v|Q)DLq1>|L(s5s6?ar$HQmV}F` zuhT*q&Kw`sRq>d=PtaQA3Di%(+*uJ7h3$_yh+mqp7?{(KD4LXA8I^^8pqXE+Tp6GW z^GLp(?FB5kP=6=no#69fi%_-UyCRN7q@W|>(}45vfR+SgF&>EauE-HE_i>hTzY0&5 z94YPg`RFs<_pRS4E}5`C6kdEiK@g&$<&Oqnb)$pz%e*e=%?UBrNsvle1U(xGLPnW46R6C!VMDgL@J-b zQ_F!sImOQa+LBoT#GrFXS-1co7b z_OIxISg*A0yhz0x1SV(>YD%;gXs+KAR+G!vkL9uPlk8Vz@>B7CBtNBpVCWN+FA&<_ zMqN3x8+$7$E$w3N1zv&J!{=eBQ&efp9dbW)@Ei*mSkLo)3Xh8pO9sQAd}5jrn|3T!iej*9Q@yTkvJe@cK;(BDysXMTulKf68U zazId0ER(_?F5VT>jwce%=2`j2rBm!F(W!COJWu&l`^$h0K|aBzGhcAOCml*{0A|g1 zS~8=6la#-;@M`fF|06+1k|Wct=avhd>`44^zvKQa|DU49Vv}*w5~J@WV5zpFFJX7a zj^i@P$J6U`UY^r&c5?d*j!LK5yL`mK1DH9{eEb$No!UgbmXiq@j}e?7?CzrJr@8{5 zk))`^_#c6PEAff)yOUCx?RaX|DT-gV-<{L3h}}V-gT4mM4?Yqs3hoFk44Z{|i^>eQ zMr22gVjp3b$NUr%9@7oH?g;#@xRvo4@j3D4vyvotVEN_%i7PcZE13=qrG3;RRBvh; zwK&}YScYt1({IS;WiQS<#q|af{0cC`^qa&FNH?>AfgvpG%AlN@7WhY0U~IqO#_3&t z)9o|tE+J3RGwpp*<59Ehv+d6IIrh2sdG?Ei^X&`l3+;>Si|ya@me_x^FSWbaOQM(A z5v1kzpX`rHTvc7#31{w*7654L-T zJ&E5N(2($B@V0Y1LF4G~sR|%;nvsZzTI^G7HohkAWWwu|Khu`e?qzi63fUflP*EeM znUI!LmmFA#IDImx0X>8OKz7j zLu(_dBIo)#_;2=K7_=2wy|aL2{~?Tv`XyW#p^uyc4EaTv13;c`!;hU^kua3-nDCu+ zheA!!Qg^3KVqD|A=KY=T&7TOo@BpBNpA=C_?uZBCrV#%ziei(_q_j{Ur^_?5xz~{9 z$Y|_0Y;!a(emRifLU{8eXMN~tcYRcTJ$}RfGXk%mNin{$_hXlzr4jVWRVh!>-esR* zFoj`$Vr*E9C}u3#oR)Mh#D18!sBnXX5Y|eUtd3^uvKXNoV*uhWknI|pAu(*m`MkTa0Cw4hIL^VD|WtnCkc6`p|pIm(Fr z75|;EmU@S|lC3U|lHp}y;QYz`P0=ZF%ZTf!Q3d=%;#HyH8#6EH)@PAhLttLh#cBQucH|E~5JF@fo+pMD^ zVKFl^Gou)aOR7?pn3<)P)MBYyt(IC+t6R{RICjizCuX~iF(zhah#`j9j_qp4_H+DS zGiyEbV%E&Oo6m6WweS1vy?@sQ=D5FK|8e(7;Em8%saNU`+MXNojo&y?KL6k4!&~CB z8y8h!Z5&O4I-H(E zisSYBePLDeq21k}JVdSgyzj0V+2-+v!X|QOQ|8F-3z6gVZ-t);*LN|d9+>&}(%9}B zL1|<^iU!HjZRx8U10C0N_Vz6d|1^vM{g=&|55{^APlDE~V@rPr-NMg+mah+XpFGQq z%?|uyXn*wb%5^9Hv|DrT;)T7Ar~CAypDvL06o;Ok;!K}B+|}AW`t8_#hdw=Y!$Iuq zy%*j|#p^i(=N7JCTHQhhs-n``-%Ux)P0p6C_W!p3)-NAu8N4u|SjMl|SMi%g2ljJ} z_}$I&j_Z4Kmb#Bk?fo&ZoP4d}^#*#!Z8NdClZ*1D`ib2ocY&G^WL2o9_z=hToo>e&BRckaaY`RvH!vG?FsVI!^;P^9u z;ddjCC!O{2CJ_Pp8CFx)ik9koonIYFMkZxOx(IW9TAzAow5>g^c* zYP@}-cn!SXz9wbI`!(JWastB7j-v*hQQqB zCz`KseXlLq)!y@T4|nLtk-D+sS=M6KQuoTA)*f63ePHLWNxXIe8$w0wv0(gY+jwv8 zOx{fW{MwQE(_ioY7CGJ9H9QhKQ|n4s)K!8$a9PJ+I$OFNpq)NvV&w3NxuYwn<3DaT z?R|gYuj#)u7JzovVB61a|82wee%*U<Rq;0$Hk*=?L69Zq4#Kw4_ zG4;RW|2XvXA;o0zjAHSaVy^U1Hy_|Nh~8=>-= zYm$<5N&Q5FyV=!tciSbj~(L9TsJ?zAYZ(F@sG>vM`UZ{;wcaODZytDq5&sN^r#{`4ZgL7A%em{(;zq8ZT8|Zzr_vYb8r=d$P z_u58(4y{HmC0=XY>%Mxldi?4`Z3rz+VN{Zv)I~}FmlyI*}3eC)Ie5f zESySD)U!aNz@r_Hb>2P*j6F8?u z>95n@q&r$(==^2yiXr)^eD+_)-5l zWKuzL!(79|&CTOtWyS@A7o^;>82Z)1p!#9px4f>kx``bT(K}{;p15f6%~e>})yLc5~Yg9Y1&6*g4&E z(EDmHzdvWNXe8^9Ytl2>IC*LEvuX0o@XV3R{w|khwHy< z40kqlC;FYkp5cdve;RL_ymk7ex$joLKQ^+~1R8xAd-aL?M}Jv=BHo*6n&Qn2FAfj0 z$An{j8S3;(;^)MDb({5nso!ddH=b;sYpH76ZhyV~r*=~3x1ICD^`rgM{WGu349|T$ zk6XNXW$xI{8guKTlh0n@TqGv@lFItG8VT(q?OPq8-pBeH2ELoP_R!913pBVy13rCNRJ~-Mte%<67Gx6DL=B5^IU$8FHj{NKB*xH91 zhqks)3Qtvme$CEkI34SHv{%|E@6TU6yJA141by6}?{-{lirn3PThH$7uSW|umQL60 zGTV;zeKpjdd9LQg^^8Gc%C6W&K}Kb#*v#X9}Ihra!3jG%vLfT9xg$ciz{#+IMq5d+^G^7luS*n`0-& zsN>k__on5CIWyK71?av-9x<#HY&C7&x%JnRKkS*$yc+CHH*{?GT|B%#O9CB#?5&>E zFLed24=ty+-rqkuay)gWakk@~KK9z5*MP13eRE_m6CONqXm@9D_0{dKg0Dt@iN8_3 zSo2Hb;o8;I_34Mw!g`>opt+)XwT01Y0!`S~?zJ96FSSn#+Fm~ceU?ZteJ>hsns^BG zwthZ!_u);@nBFvdVNSZBUD{fHWBK!y-PKz_-K1sH2O2wjw|7t84NCofcjUeTI>xj2 zcMo0!J%cYrXA{)qVCuot^L@3T6Pr7F_vp&}FH7&&9c{R-!Poe&HeBaGSG4$>V1M$a#F z9({S>*X9@IOvi?gu>y&}KSOUvN}_Yk$o8&T_-g&`MvxjcRX?4)veuFgq^r_d_1Fe~ zxp;wf8Do$Zm0Ky(=kr%T`5P?Ep46c{~gkg)lceY0t+85>@Qshrk9~d zbJwmvadvYk^Vp^~d1dO8)Un3TQhDh|8WwtoGym?(8>Efhy!gY~LjiAeB=h<%y7xlh z(O@K89~ahsn#MLKhwop!Zt2$}$yMf>d-t(j{6P+AE6q*Zp1vRCK)>HeY|@SNTs)S#E1lC4Y%lF}OkCRC*o*C7Jp0Es;P}%g?mKOaUz>iV?#}wX_R8_^ zm!3HNV&cB$=Y}QczX_<4=tg_{QqMmpMdx>-55*p-A=S~FUTMF%Z{yJH;{I;o-g5MC z^KQ#+eYvw03sw6+cUR8ce4cipAz%;vCCo@>8h>cfblHYY^P`#PwzUmonehy4=HtZy zkjeJ(%EpO_%s}u^W-{|p!kucL@pURYX@w@Z-fWh9O7sd)eljY#lbY^Yt zaHjCsOlIf!L)$aEvzgojSj`_B?rQ@EAme|Yys&oTLG9UI&`AH6rtRj9?#CDWAx)ya zA!i5zx}XoINJlOQ9pO8w3zOHjWcseF>+3oaUJWFJbD8og*n6UHr|&;~gn^TpcLz;F$)TOhBcNCNR3><6 z;Lz#J*z|`pyBYQDUZ!gG-4pwngUp#s>g3tX59iKh&Sx%UKoCpc#muckf5`kXbL;6F zqgQ19l({nVV#igP6N8!|&FJ{z3yXiw+oa%N z4K>`5;k2JwnGMe)6sieh;&MDo_CWqHYV7MqM#|?2ULcNHXe3(rZ5gBbaw*o@%OY`>n0D z{p!xz-rEQM4lXuEMrNk7k6yW^JBPejkzVR8962^(J9&M7+bDayYl=9Vd&CodEODvz zE6_wdIDTpF^wzVxu>(>6C!^O+T{rdd!s|y49i^|8)vl+`rvBIo>1ye_b_5S{wD%5u zHuJ?CWg)(BVX1OmwTs>VrhUHQa@(>uNv%7eN%?`yTkQ{KO1mG*{5I4&dMUFyb@NmSXgh|? z4=y~M`TFR>G0ySi)+3qut;oS_ldfm9k3B0qcI)Qz!5c$+zmI}zjwS9+-Bs^vI^DJ2 z*U|r%{!>HEqmnVj*vPzP>B;5X6Q_3G*?BZ`A`lLahh*WuMW2p-7d@JKF(pXTniiYB zZ%YmDjd(^M9uG}!Pu@L=n7M!M_~Oee^+z7wEZO?c*5}(RyPxiUw);w?qv1<1;G*tK z51FPeZRPH72Oi5rx-ax>4ICSy%-^*bU8&eQz5DI%&wI2L?b)rs+R&rB{_w@<`!ReC zuI5iQXF`C+o(TT z6)X$A1Hya@YF zp2}GIpU%8LU>Y%xJd;U-F`Z{Kk@4p;f6v@F^Xr0T`Q_F9wKqY}y9Ttnmv6a1e|y>X zliU3}|HwR_$w|DBd9ml%{B83uW?ss8!#zoB`sK_knbv`;M?RWIZoHZypWHnCTITi4 z8<{sVbMvTX#$e~n#?iMjf2iqdENS{{&jV}oCvV+N2H3GL`jjh!yWj17wf{=XhrQcd zZ-<|3x@RGC{(O)Yf`red&`rNJ{i_w;^;pmL*oPC>&b2OXg6`&))%(^4k6*vN9{4zr z8+<$SPR5-4H2H4k+d6tfs)5`1UgrJG2bm8uuHlbIKFSP^Jv;Vs=I6;zGWqlR`Q`cd z7sE^c%zT=;fBp9Lqg$V4KF@r=pMUU0=3g1};8WvI%{>De&}ql-30w+5BZJW|Gxx{d ztNuCpSc=&2Y(szlSDCLf-806eZ!$YaHrD=~=>vJR-)6qcKmxVF*cPdA0=CFCu_u$i zB}1v2R65ntSlaY-|G{AY(2)ccMTec@1NONtlWNRcli8J>YttaJyQekjBlr}Z%+-}F)cf6T77FbJMe0N6E!66 zO?;i`O(N5J(7kDFSZ}$ly}E-9+UoOr+ylkKmouS}6QU48h_!n+IqTv%J`UFL(1 z_rI^ay^7zsVSD2Ax!nzL5kXROORB0l)Zy&h>2&tL*grGKoxQaB$Mu}e3NW>-9NwIM zc8#`E5)1@&;rZA~K)JyAseONYf|JYsA#+iSD7e>wUk1MIQo7az? zpl;o@^&BXS`7n|jd%yPKRHAjP?fd>mhkwp2k5^66r>hRTX5XEgUXmYwYQ1&i{f&kb zpKoS1ySAR+@du}Zw}(2zhr(5n?no0z>upSCr{n22>(4f7n(>_v_GI_oIuID*jB3U# zW8tyk@yhAr(>G2(KD{~HzvkL3+jO7YImz3RpZa;XcE4wzb%2b>n)jQNeXZk9O+7R{ zKGPU|AiY;_8Mu3J|1fs%#MyT)T-7qwI(`%goDSR=NQcHkrLjknXHuT}8|#0pUubx& z(bnQ_8*O{0Z5-r~|Ju>nb*#Ik$J6^z-=}?S(22kr{Nvz>kzYVhW(s6xpP0;>nwfgw z@QIm{#jLf@*DkCpx68L*+inMmeViS{UiDsjFKZul(G=N^9F7YT%zAdidgDJ@Zti?{ zpl?z!{p`X$E5EG%v{raxW4m?t?5=aKa=-lIbHU>9)8T6)x@tm0Ps4RB&$fQn=54>+ zo@r-xobPt9;(8oiv;p;|i6V;QolkZM)rcO`o9e($4;mqLd(fOZOt~?T3 zdvNVwW8nm6t8eSWZTs%ayC0wXdmHlb?HiBmBvQHQNc8n)=7>17m%7k!OJh&d%bl(zL|4i+^=UrcEWgKc878*6b#jTQg?N|yrp~29tZ|EgSDZzlaJ1K$2w!b#)HX& z)H`)=)YpN5!hd(jIzI)YS>^p31AiTSZ{+A$<>4&Q5p%^1d-2U9_O+F@!HqRAf~MUH zY=he0Q~x-~OkH*OsUQ%3Jp8wsuTwv#lkLv#+P>ZXe+}dh4UBy_#arcWjcq->``ZDq zqzrr;C|-SbdnWp3cTbnMcWU`U;zZN0?O!*)J%}B9Vk|Oy*K!%?f3FC99=$%Msqxgj z+5og5TLxOyZJ)MHwVwyQfz3UQpcPdy@bQH3P}yY7Bz5MB`K*QS7S1pJb&+y(eeJ&) z*v11J!&~2vQ>Vv=6j&y>=ywup;WABf% zCXP>hGMO{Woc(!0y|TAzTYG-}ixanO>b7p&Y1tjwMeZ#J{~i52`D87%KG69%xc2zj zK z-k&{lJa=Q}6b7VESR?PIK512UsAj$lJrbQvfU*|QOMgS#>r>#b$?=>JH#8Q}#vQfq z)eklpn(Eu~+fD6Pv_ISRC>XLMjc~_zrmPFe#aEW@TfQ^&P0d58Pg7kDe{2yQx^w2O zCGN@-+yB{K3;(C1disX-N5l6f?{ADBVT69m#3HuzOcSm7#pZ8YuWEaypEI7j(0HO@ z_uk!i1ODiKT$+5l?y=SQ{gQB6jL%<|z@`cHQsonWQi!U#hEtRb-uCHt~Zy(>jY5S4j^VMGty*dFo z`qQzWQm-_GJFeOKAX=OFPwL9Xg{H<<=j`CTaAoP_O|3VC2z8K-bBX(#uW3Pb9qM9r z_w?U0{`BO%i_+!4uU)h8%O-BSV*A`SW{0wKX6Maa)0u*E*g$%$Zu0U}c>2j{)$F6o z$rGa|ema5K61Ka${G*q{s*blV6oubw*zNuHXx-|>>K%=DE*KJ@nX;hjg1eguYuzi4=@HbIelG9kxbfjmn_nThU^mE{<&Ep%lY~8vK zyZl7!l>^%#8!QmU#Mm{cwT7ngPWa_F$0E~1Gqf#4;+d|w1=%skmUaJ7s3v(s@^mwL zOLn%dX>$0L(@izSwcppRHQ(5JxpkuBgI-skV%RilI5aVL*nf22f96t*Jr(Wxajks) zn*Ey&4mVuieB0PfTKL-{C8VTFAt=d(gx#m-^^{F zK6yH{`{Lekq;T_>usn8GtR=n{zqk9&zSm~j7tfuX*qywP9s67Ch4GJ$TzUGX)9KwS z0y)92lAqN6SQlyBYwqjtb{^?f_gm)9&dV0FFXvo_?_U*&hi;GU)YK-sQn#o6)&wdo z`uKyHqaPj{Ui;_T;?|c3cb$14V(BD>4%G)cN|q{DuMc3s(9|a*#O>eqat>+^4i2K# z7Za%?+O^x7e(0VadErD!&vm;sXW~i7aL-29-b=yn)1ro-no{jw^j<%>JaYHQuOm{> zG31|Vp5f1$7bB~=mp@o9J2|i)PK{1^R)PIzYTpaSqORmGX;;f+UuGD-fLyp@>F*~s z`_}{t!&neg)!guK?{od|!O-B%gC~bR8|fM?U%KLG(`xqC6)wWIHi?u@4=pE}H%eSiMXi#^K^E(eY@92q@2alCu|h8@U$+y3*7q~?k3+FEq` zkA1Fw_o(meFAZ-rqNg8?Q5x!dU+k6jy*3D+nLLueWYgb};7mM}{Bvr${!fi>wlwv$wMRRt{cS_HjAqX) zZ*`p!p0oEo-|rgnu0M79n{Z|QH4RmbKQwBZDx3W6h3!{$UFvNBEi26fl|$!7kB(oO z79Wl*)f^3PZR|dBK^=Xk`sez!hUUgojXyTEw?jM5b$``&)4qXy~f;p-~u*iYkhjKeXQe1Yh&Q}Pv=X*KZLWRroNx%pT9tC=lARnzB~qv zO{S4Eq%}^Dv!}Sn5*uuE_Psopz4%J#o$$GM4@iaiS28D+Uw_cF-=uA>Z~vtIVCe3l zdxowazIqZeb=%zF8Uz4BZUBsc2{3~hb}L{5?0^Gs z0xmF#?*sfmAy5Pq10_Hyn8vRFDuF5>1cZSI5CvjD9H<6BaU}r$@&YL!4b%bkKm*VS zGy%;(3jksn0Z?}abOK#KH_!w00)0R~FaQh!Ljb5|1xA4}0BjQj;8hDu0n@-?UG3 z8gw0c0=fy^g8qMHNL&ZK9(n`xM(9n@o1wQr|DOpG4?`b;J_>yd`Z)9n=-;4ELZ5;@ z4SfdsEc7|>(`3P*Fc=IDL%@)*Y*-E~7nTRhhoN9-*#DmTCc(%s3XBS)!RRmsj0t1G z*f0)^3**7~Fab;m6T!qV2}}x;!Q?OnObJuL)Uf|O)op^AVHTJbW`o&b4ww_>g1KQH zm>1@Q`C)~yB3Lo31XcVC5ummg#tA&9B7g#;4 z0k#F(hTQ_c6@DB1cK99eJK=wU{}p~0{BHQY@cZER!ykY@2!9BE3H~tr5%@pg&%)%Md%QEgaKhh zm=I=!1z|=dt zBlZ#3B5p?9gLnXO3Go5~KxLtzC>RQkLZFbSY*Y>^7nO&~M-`w@C^QO#!lG~}Jc@uK zqDUw*ih`n|Xec_0fnuUqC^m|N;-YvcK1zTRqC_Y$N`jK2WGFdGfl{JWC^brh(xP-I zJ<5PGqD&|=%7U_@Y$!L%gYu$$C_kzYRfH-=m7q#dWvFsg1*#HNg$kg8s1PcQ0(I0V za2WtqjRHlms3fWul|p5)psZY09*f0dvpg&>tD05AYGgIBx>-G}dDa4Jk+sAEQTnWJ zS>LhpIRzXP2hG8A2pl4Z%=w)wMB^|xOb(0lJ6(qdl7}=LE5`Dr2UkF^q}7h!X}V_)B^I6+CXYj2S`Hd5_W@Bq+a2?a9Ox2To-N$w?Pij zj_?mcHUw-CL24ik@h0EY_xB{+(tKe$52Cjwc;Ci?L zZiJiQW;g-?K>&y>1QY>7z!6A9HX;X+i^xOdBMJ~G1R8-sU=cV39zj455hMf|K|xRv zH1NDK5KIIMJhhzv%gOvd&!rvVKsXUDg!})+2{j>@5C((`5kf=|F+>9Sy><#9e2{ih z0jY!_fhtJ&e=2$oSko1N60FWHus*y0rk7a&y3%UJCL>N+2a*b@l>Y zhzBTy6hVHk*S}Z%-z)#`H5`J>LjuTrWEQdj2}Poia3lr^L!yxgBppdYVv*TMCX#~0 zBauill7Yk_bC6Ud3rRrcB56oAl8DR#?;IF-=ODm42LjYVY9VPz3Q`9oAqhwsq!bbd z?|>-CXaOKu5E6s}p+aa7Ekp;=Lktij!~`)zED$Ti2FV8RC^`@lAvTL1xkg|pmZn$%7n6@Y$ylHh4R4K zFMtZ6BB&TDfl8q=s2r+*DxoT<8mfV6p*pA@YJeJ{Ca4)|fm)$9s2%EnI-xG88|s02 zp+2Y|S_mzI7DG#*rO+~HIkW;=39W(#pg|}oX@N$dxsW^v0YZd8VE=bDz2N^BRXy~- z)pa<84#7e2kP38?N1~@>MNBlUkV(htB`gJ?Mq#ROS!yVot2Gj+(qbl4$(2IYMLd`~ zNrbB%cr>q=j!+}j*=mBwL?DTLLLM0>la%JDOPIN80=AGCB$vwJ#-JiCgy9K39h4HW73v~&K&|EQ$##**U8QyieCjwkM)s(!^ip*|s^Ar>LnN=dOzl@|l;vuU zs)!gQ1=S&SOszz#rBQW6U9FC*{roVgMlGPjxrO{VF`*_BlIoP2AS)taXw~9cwSdY~ z)Ty&HX*HjkW>82_jf(g?i4G-9ak7XC8BD`a!a?dBLZjp$LH1l8NS_l+Nrd0&b0}=S zCQ8SG3_7&NsK#mP7%j2 zQaqN@Ef8pJ8dy|7v}>HQ5=M&a)cA!ijayTI^Jox6xiE=QGazEG#v@R%e40F|l?~vt z@qSH_hDs{bM3kirhFC5w*3>9UG^Lt4L766-T}O?Q$~6*NfKtS-)F8M4YLzCSL31iJ zAq`RHrvx=Il9O5~ifY1|T1gqRR^!LTG!c!8Q>}?>_&7HyrO8*;XwsUbCZSO&6J#DX zs*Ewrf>VCAwDm7=w90A@CksIY_F?1y!$<+`-3YnIKm1=W{a&1Vi&}x+_GLfv*s@-P}snk*m}6Kz@uua28bgfi_Q>yD2t;*#ky z8XObE6;Y(L-??`Zf)H0s5ffZ|7zQI`$|015LL{goqe#VCA5kiEh>NsN{O@GE-??}; zf`^qa^?|fJAK9&is>0F|zLy-NlxQ8o3T+)dP7i>zyexSc$jPe$d3i!IOCAP^d2wx! z%2kzvM75?Eb52CBpGIga?3@)LC>EdXR<%O0?09^{C>!r0~DqL5MM*R=l z3&t#!<%4W5l%N1)eBm)fDw>t8^9piw02;5$QXrUAkne@o@wgaWfJ@ZDnK+$aPAC1& z{UU(WFRYHE6Y97i`%t9Aa3nf_!qTOAJdg&KkFUZP;`lnYE>4o_#5x^EO2{J$bWtK7 zqtwa4$FfEz)2Va{ot&oD>2zkDQD@L$c>=6fr`I8wIE+c>#Cmlprc-CvIdm=^14H3h zbXHvw=g|f5ew|zA(-lY!LbA9>SEv)EgO-U5!qlNzoOw2(|{z=OuJdstWC7C3PZlh?3IP>fF*a_zfw5r z9;vqqvh@VCi^?FWcscr9eZIax@26^rBw3ywrRPwyC^$WgisPa6Rz6V-5Q4n8FwaBo-8j_03 z(ew3uu0Su;i}j`a0z4$@!9=7LOtGMds?;;24rLT8)l)eNy-y$!_yrWPf-j*;^l=PP zAk&NVYL-f`WaJao`VwiHBjjuKDw$4i)}y2*y+*Is8})FiL~79scrk1x)26rTODJ}| zQ}58b^#;9H@6o&TN}7hH=lJw=b|u;`vhfS`etnU?SYM(q)oY|>dLg!4PgkIE60MlQ z;#TO(<#>6OzL-lAu(>8lKp)iCVyN_xUX2dxBYH0;gm(+0(rUed6W6mzHd#zxC`jmQ z^tJk=K7~o?vzc{z7%#2INvjz)0mJ~K00xN2Db~ngqFe)+AW)#>dB!XQ+#ukRWl{mc zP$-j9bC^hjnx>*r&|-2fTZ7Is8OVlQCf<;zl8cB21eIbiOQ;5dfhKo~Xa=l=ZeSZ21`(EJpwXFz1W9fX z7`O&Mk7M8&_y(atWC-IW2C+eEsA4J%cD?{3GbjxzgVvxnXbf7K&H$(A4M`fAYcLoM zRvLn$l_LofvdLh^nGF=K#gJvR8f*r;!D(Hg z@yTckAtvS0wS;muosLv^40eKyipBXbc}y|ZYY-|V3_smxh~TxtY`Nc1Xs}3%4M==7 zvB-eMV_D?}2u*@5Gn5!A45fx#k;0B+RT|jJDnrmv#|s#;*tIk@x0)W4RInBFkRhKL zHbe|MG?lAmMGZJ%0TacH87kRmX53J1aLa29LL$IO7?K7%J&zR?>RB~nKqMj78Ulor zp^m5$qOm$!8f?=*jA>$>fh<6CWdbqX%tTm9m$8bS#qcwHoNNl2nnMW^+(rsjO7-g+{-z*jUc@frm!Q)^Py>z%MbD8UsvJX3+mkE!CLOi5B4OBA6^wL+T|V=7Q#O>8N_1W{wuXnIUYG&u-R zeiBVG!KG-5nit>)Ek)Xd=VJG_g%_0msBO@l1FV z&cruyNC=6XN|M0&GKs*1XM)&LlgJd7yTwvd8k5JCn8YS7NzYZ9gXa1r=1X;c|aR)?{fN_l2e zH8$UDHK9colgX5%I7~iMsmWuq3yMq*iQ80YDps0k2CmcOB$Sx)*@YOdsRmnY!VvwY zGE;>qZi<>hrl6@t88%^w7)p|nFqNAECYLFRjhJF44G&2rV=GNHrfL(8n9VK1@kmvs zq$v&lLNe$GbC6#PHoX$qEHl(x2R15;$vFxF-hzRfsiJ(eM{GCcnbGD#dA7M!P+$%+ zQD&1+q{=m8%(WC5TaV5$Bh6M$%7mv0xeT+7LpBr4L~{|HVy2r(W*CEN#+y^}S~=6q zG8@o1bC?K66Y(@N&&)N8%wn_9EHQJ;0<+XCGqcSp8sBW>VkuZNfn_$AN|k1nS#36% zjpnqBgx8w|Bs@-ICM$JjBidk=o3&<(nNL!f9g19q-Rw5kaS}?8*=e?!t!9@QEyW9k zB(K?F_L*x*7;L$jj4w5pnX{=S<|1>Uxx(x>SD9xdJ9BwX6YtdWO7LSy{w{vQxO1j(PvedBZNEQo)St7)U zOcn>HhHkfznMRAvq7xY`5RwDuu;eP0R07Xxv9k#jKdT5s=a?-5f{3KzJ1s7b$Ktb) zxTO}qrO3jOMEJ#)JcU?6!55$lEnZ6vz8F(tktixGG;YLFX(_XWEILb-C2Zk}%LV0@ zfJICSQ621R3tSMjxMeX*+@h2NY%eKoA<&YRgas=i2r+_MOUe?E2t--55L+#i;9%A| zi&TNIW?7+DHwUn?EO0BtnrqFoA_N6iC@tGcC!nlIE6$3wVyt*8(HdeBtQ0HBO11_q zG%M9wW1(BoRtt?`Wm?%*mX%|z!xAYeDT=Km^Q=OvR^lQEtbD6pB(f5S606us#S>{} zp@bA=Nv&u$Qs9!vta7WuN|rhdLbhCDrWT5MVugUCR9V3vb}JFBCo0%(iN?yMiFh^| ziKVrsrPUZa&0y78jaG_MZ>?pUtXQtun#5bJ7OTx_x7OlQLL#q%;GlUpb_N>lumV_* zl|*q{bE#CO%SvOi#fl(w2Q|U|uyU^;h`mN)UdjWl&`3h)=Bbg5h{zHZ$qh=vRrJ;J2flmdz^mrJ#UnGQMK#;}zrxHj+!C1Tks z1Wv2hsv?L)92=9#7mI93G1JDk@oXw~rA((RmdUYBnph>Y#aIFx+omGRZBm;8E4C?Z zdYhQbks57)#9#}GSTY*En$2eD_yr1$O>NWKR5p`MVUyW(Hi@l*S0fkFqf{z2AmiYa ze5p`^3A6F+9FoOGqoi;)TNLN9Ic*WP43|fh;#5?-t&k)mRSK*&A=+Vc*%ZV)5*;ri zcx?%4NJgUhY-mZqhQs=8Fb}5%>XK@I2uAO5K2-)^BA>U52i-}abgW(p@?J^?XPPF6fXq6R5vGeQ%Trt&w z(c3X9lby;k+6{KCU1;~&1$MJtZI{`_c8OhM2ck+lgCgepIdZ$mF10HZWiplBV)xjs z_F{XX-EDW-)%-$^!)~*C?Ire@xXNB?udo-{qdc<2Zui^SOqIOGUTz1-Sy&fNXXh}3 z_G){A9>x{$D(zu=+)gCQ2~j(QAQOVoG&{I@Zg=A(yp%m@FC{1J5Qo`TYfszj?0F8P zqtb?Oz#UnRGH`SRb>ukUs(eQ+G24;rK!^(*SO>}xQJ@`8JI;Z35HLhXF$qF95TRI? zg6tqUs163fNMzy}4!VQsP-00umV<@oIM@!ZgXiEoTr>h3LJdm1Qobq%w)4VDfkWtU z(?kxjL+X$?%rYmVg3hPO912GkOYTrQR1UR6m{Jg|tK_5?UQa;##pzZgV&sc8AO1ba)&d8b-DSnOa@bi7hW$Wi9taJ_`Eql5$_1RYh5a)(>N=39gnj;JHz$Rg!q zY1B$bz+sX_xnQ)(k#?xCNNHLQac0rE%o<0XqXa`D#vDmUlpPWki>n@CS_P z&#iDuL1Ia-z^!4EWG-=5bX{jgv$1Ivq}f)9%cb zTb)j)nk-;aczP#FVREXSVS&zRacWdddY#PYED#5rG_spf=tL>Gl(4f39d!DgWzGtx zN>S`A5R^JY&T^+nUgQMmb7 zih^V5QcMviA>!bZN{C#`uXU!KSPsOMa7LUwd?^X#%5l}OeK-#T=gM>C@v~hPzT5?O z!CVC{qzmE7cR|=t7utn!aq&bK!9{XWT|5EaMRxf}2`<(}6H#1D7v05jaa}Z583ie# zIHL-Ni{pxM*{)nyX#l(&T_P9H#dnEaLYGpabje&&m%^nNs$4N%B^Sn4yR ziiLWY8XFbBaDIZ5uB9SWMXWOLXUyQTxTrk4%jB}TtS+{|f{rPjE+oe7a=9EXvnxyH zb$MLn7{3cE^|=Ug5fjG|GHaD(t^~WtRqm?5l)5~kLRX1P%+!c0TpD_k9dH%95Tr_% zS`l=?qbf$o6?PSfzyN{^CyEKGT?v;#8F$sVDlrk4O_y{PbI7!mtJVc&S+r?z09WOz za{=z8D9a6X!`yJUlaF*G+}Z9Nw~OwRRLFDPd2S;IO3!x}xKVDcR3t{bO`?20#$Al9 zBnah1IUb8wX=OQ55*zF03A4yJx1LIHbAJ;ZkwA09+W3R!Fn(r0ba+V3veDXn@USy@E(oA zCnb3J0-Sp?J7fIF9VGqI1MjF-FPo&^&e?oSiGqm#{o} zN(G7SVS0>Wj)&{9q2o-EhwrHr@;q>Y)Fbc+Jra-D1CwyUX)%i+^T<64kIJL;7%?VK ziCE*&c?=#V%jnU2*mSi=>)}cyOpC|tv3lTi6t!Al^T_xP4@7DAlybZtzo*cn=lMKt zkJIDwcswPZB2TfW)Kl)M@Kk!LJY}AsC*TQr1j?u+r_KWbhniVllvpi*dcz{T1m=Z%a|8$+pMvlrz1iMuTCO+8o9E5EWS|b)yZUDxmV*= zdsSYYSL@Y#jb4LSAq872N|To)GkZ0%UL~(gR8FMv z{9Yb&J> zs4vSWXTp3u4#EfbA$=v@GH;GA+lQo=Dz)@nABn^!AP7DhUV-s}V>3>^59O0F*sKCy zo-aqFVA3TfYK;Kv!}%mWGm)VpaPU4Ywwe!-5`0Ml(MR@CeAyzZkLIKMEJPhvAVf0! zG=?vmA;t0}OkcTJz~T6~zETl~#qxQ0Y@d>8M`sgwz8XH?Ct~Nzvm^qa&}ZO@lwzOA z7obahLA1gr^XaillH4bxgegT*rB5$Y`!sA7hKkKYt9%-t)>qBc`BXTAuZ&bgF#2Ld zlP|?q2;J-$)$H@~$~jh_-DmOHd;rPegOiF0T7}E!_7z}!zBtb7^Y{vVPG6BPTUqQY z@kRAzzEWSgufj)AR{8=#6|>40@R=B7VbF)Vr{C@O_=Qrh-z!%Vvq?Tb zURnY+8T@{nGAa^s@FE3+jV|>U`wRULyoOokFYy=otNfMz3V*pj;1Bw#?2td=5Bs%b zGAHIw38MbEpCGN^h$z*5u`=OL`fL3ueAzXZDvfGrL(i=ggUt zO+i|StHjyb8R2U=EcZgVR4?Fo1yb3m-Xwm67u#LnDRy=VGT{f(8Npc|LQmA+f}hNb z^RVPIXx992AF_uP-`d;9jYT)(+wjf#p|19PN50h6j_<+`a+hz{#a2ef?AIJ}OkLE=Cd-DVM9(*4^ z8sD27#PQ)LxX4&?dMHont>Clxg}(lLHb0o3MS`a6Q6~qfXeG>$Ua48TKDCtRpWI-V>MSux_fb5?tFsG#n z_yV^Ofj}q_3q%4lRu0oA#EK;qMEF>H;_p(Csm$OSz25O*8@MaD3vGpVLVIDDyNl3KC}7KiE!h;IlaMO(4|Wy030>*#LQf%0=q2k-V)CkzaU|-kV4@Ixk8>WL>MaccCn5R z6Y_n-g~47CLUL%NFexa}$&?=Jt!CSMM+w8-qlFw+nlM(#W@H4?++u`g^f+Oze}XVx zhzb*hNkSk@7KS)s!W3bukmAAS@r4#N6-6Kv3PnNCyQ{dXy`j zVhNWg)pQS$rzp#a?PkRe7STj3QMkxYA z%}x}>h@yNPyluT=Ie~1UNF|CFWl^(z_#%l&OBaabqGUgXD4Lrpa-)hxGLbDuDH4gK zBCB8|L@UY_X+(ZrVIp3T2`2}>s4ZQTA#$N)iPWMf|4dOK*@WR2lqW*OajZg7v=7^h zB=&JJ73aX$xkQWdMIj+3OfB0+Oc6VZ&BU334q^+jm6$5F6x)h56h6~VOctAq&FR)+ z3(o)=*)^S$!_j!Vik-#05GQd4*G23mb{BhzJ;WrYrBGv3}`dYmP>&6)VIlv0AJY3w+%}vc)-KL}E?L z6Bmjz#ThVhF3~YxOp+9cbH!BuEU_riLgL9amzYWHC6*GhM8Ks=tR*%Q2Z^JEBC(R# zO1vd55+{kX#6#jKv8FPd-6f_HH;I?TRpKM@mCz&%30)Eo|Go__ElL6;ffAW3Tf&l% zy(8T%l7b{*-tN?3NjN3Oha-t{=SncLH=8Gs(4r*a5~W{+Bvg{*5+aF~xVR=U5+$(` zXGV-9UJ@orkf0JJD^8Lm0g?!}NC};sEJ^Ul<`R*Mr29 zN@S8)ABhAi0*P4S?OR9_N#qhzkW!+M$f!2KDoMUXBhgCIB^i<&Nvp(NR*Kmt=TrKVDn)J#h8HkVpREu}ubFnKA+>dr zNTc1Oyp+;du11<6)k>Y&Dk*SDmu5A{-2ovi`GJj{f zAjij4rf@Tpnag}UdGQu9OPQm$mCRaZBeRv+$!z@WWn`Iy%$!M)*#$ews4^#+vn-y8 z`nbrX?)IUsGB+8G?JlFsJY-%nPnoxj8SEpY$$VuDnH@ATVs?s0jC%+>*^$QzkVQB} z`TEP)vK+ruPN0k>vxm-DYG5)$&GHUl$^wIBjvSd~2*W=hMC}mh^09C_6+( z^VE1I`S1hn+;}psOv0vng$IPm!ez1Up|S{Bq%2xSVnoRVPO-8WS)43EhRTv;(Y_&G z$+B=NkY&;_S&B@`c5qIWx%ekg(qw9{Oqv}-C=%H`A;d7?Z)9xrzcK;=pDWVy`U%oWHn zd5WA(OO@MsrOEknB)l+8AXo8)a#KGWns1;;9u*YnU+8Y-XHCftj-e&FVlL4^V!2c< zkpss}rc9m{NDYw7!@ZSq2B%Q2mTTl}7)HsWTQXJhDCZn`Cj1eA6%+Vr<%Rwc0qOE= zIiLvxIsOdaEV(_yl#wB~BAbNd$}xtscMvUKo+nq53*-)L3q=A`N{jQ$b}?7Da%q&9 zAj}a_m?%gJQ-vp!D~-$g_gphJGjMjZ4|Zv+4LAMTZMyy?`@~Bp*@}C9~YULlntA;fhd2m?BaUp~&@#Qba3a6rrpL56ml8 z5vMTa1$Zks9A3O4#U(*O3P@69Q-f)V3RD67qk{8YfC5t_D^eB7Oh2knk){wRQWPOd zzCy~2cT*~y0wfB#LaI1@K95X9cIZFulE)cJy?GLZMaoaoqef z6dHvsJx7tP$Wk~2=XvKUl7k{COt!`^+c{r>y4!l@DdaQ-4N;Pmc|ImeQ)Prp06WEt z6jG?j_MnBBE6tP^@YOwHW>i3ozpc{G&r)fnv~#sllHIJ8m`@IstfYD5(1ZLvsrE_- zrJui@lA?4~dMMqMb^(q`s?tU2ru233R5~l2lr*KUlCJbodMi_WteIZQ0HrZGfuYQ1 zFqMHywld0zrOf3-Dua|axf!XDQD-w_ZcO-=PDU)0YL>yL{+FXQ6ZFA zTE2Ij6Gl2j~mvI?lud@xm@bDAnum7-Eh6NC6Ffl887bok-KFG%5yH`6}dBnKDz*r}5__Uc3~gF^Ro zP%9kCYKoewc2c{j9o5e21SeNDOej=)s6Ex*YA-bn{!ZOT?W?A%{nQK>t(U(#K#jRG z)jrgaz(6%i9q%V)vegBF97Zff;i7cW`tj5%7o{Ut9jxZ4gVeF=5Ot_JOdYO{R7a@e zyra}HYN~UzI*(3ui&JwM@oGC}f*Pok)TlaHov2PxW9l@upG&HmuNJ69YN1-JmZ~M{ ze0P~zuJ)oT)JnCQrc$fb8nsrP;-0R~P-m*M)Y)nZD_3pK&Qs^83)K1QLinQ>6AejY zsxi};Yb-RD8Y_*p#zteSvD4UVoKXi&cAAqjLUr_@Xvi8f4prl%anv}&(4LDX&)F=* zRpYLKp$&~qkekL+C z*hXuiwbfc_EwzcRc3OL_gVtJ`t|4nNecr*(4k*9K@EyqMZRElbPRa91@Oaw7z!3QU;BalI zR_@N_qy}e212iw##xYVG>Kvtw*2ZWv++($oAc2$0h|?x$5n?%c{OSLjB!j@|lTG!NgnOvpR!dI1Pwc2#8il^3QXfw6h+AM9Z zHcy+Qb>Zb}3$&4`g<5MQ9Z?}#Bm)UTf{{!l5iv!~5E?=zQAmy?D#?lDOmZQ)lH5q{ zBn!k6NkuRu4Y5M95ITuL@+0|^0!U0!Ac;j{lY&UWBo2v7;*ml~F!7%Rlb1=6B$y6K ziXp|4;z;o%9uk6(5l6%x!AL2jR8ks=PZE%XBoPTtgh)~nbeKpAl9Hq%sYx1=mXuD) zAZ3!WNZF(uQZ6YJ@k7E83ZjIgAn9X=A&TL{kc_bL^x_7lHPlaQP!{b zM5EYP!4{*9HEcEP&lYSu>i0*mgHZzej2&g%koF#i2uu1Pir%A;lo6v~4L^fBl8zmM zq>L>^(o04ls=i|o@=_8nmCI&jF}&Lzg)6OG-+j)sR; zGGa{0&^{%jjN=S9)^MypAHV_sY@{dqyL})yVT5K1i?L zHHej08!~fM39{a~2w8{?K+5hK5Y}h~Vqp=ET*-1lE^i-=Onz333|^r7dp@xrjBoHK ziwMHwk4c7A*-GJ86xPL>h6#yAuJG!Nhc!lLPBLDIGQaZYF|bB_J`Rj_&1(N=pJ@xa zhO=STno5hL@^O*@XPIb{;~UGYm0NheR;x%XtsgQR7*17F#OI3_17 z2z$0@q99qk4`1DNXHxRQ_lMVixlotOB3tXg&a_wgb~=f8CTW;`AQ$;EzGSk<3;&bdGJps&B{vdWMYqr8dQ_jg@+ zXT|Tj{SapG8Gy>Y4MP*`Ix*M@C`bv5lgejCI?gJb_& zI>DHkE;s7#=>d7}w;?%)^+1zPZ8BM+2a`2Z(PN|ZVCV4x;j=UJAg<5YVUyzY;OS%E z?sYUha5)-o^UFdHuI@;P-S(ja_(f6WTOW4--rU~BR*fBi6&!QxKG6ZnRqL`ihdMw| zhWL@?t`0CWGofzU<_>TzaeDuM)^~t0MekmXt?dA_P2P@n-p~P@8s)3zZs`CI)(5A) z+}#0Q)?cfOJ=y`PJ2#crobLdI%;W>6w>rR{8+og0Uv&UcuOD6G3>{#tcIKls9(pja zTK|w9s|T|;|9I%0qX$K6CP%KAq6hb@H*egxSr0J5u>5gN`hTz6tJJ0bHL|aRc7u=S zzpERGMsQneXGjfc`&S;9_N$yz0)wI0pLTfipLQYtwp$MV$V;(5Co~S~n(x0-L&gya ziT~iMzu28R)nkC=2|zP4VW|f-9JlM4nOlHc)8p%slOi-?Ay_5NG`~UCy4I}1wsuI_r(UCuPBz@L{!iRU)Iu(z^ zdzXz*Uj0rFn)}bDOzKsQ&zwH&mCJKIaQa*tt!-I={=M$s7k%?DYn+ci{`@FtFg!24 zFSYenekn#Ttr_bsxTpuKZbat4*i?;He7tZa`LrGkom^44b@dAL@BQ*q-?P~B@FMh! zXcTAhZn(d-p(Bz`C!-siZ$!M?qzAUcs^>k zLlcs}yjzX_y|35B&AM9NUWNPI`{}1v=)uBsTlp!&=b^V3^qG?=)Pt#JFY3%Em7&WY zPOXsuJy^PZ`}3Yf%hA8j!&LE@F85xuQLy!Zsxe#-y4rnLazth5qbbiy1ROo+Wi=8^ zKDHd6(YO2QlK?#k-(@}Y>6dc!?{oB>81A`-k%U*@yWce3T@Mb~*o`WWosZg@zSugG zss~k98|N-|9)@OxChyp1rw2!N)hWg*D$u{rpM1>jXC|q$(D1wRpCe54K>GDYVf>>i z)Zp6hfYr|qV3xlqv24>U{6p>HqWaGrV1QcI@LswY|NGo7;%U6TTCT$T-!&h)_gx2Q ziCLQbs(B>-rS@gigZ2*K;?;Q2!($cxa=X(x-t!JHR35xdTssr}`@GK^$evV8U4na; z?KTtK?*RF8-NaSfa`51gFASU89l&d~q;w5;3cg~y`d)W)2XNedw)lPDH1w~Y(D$=Q zwC=wg%|?9Qr(Ed(6HPbVf7&n%Z9>l+-guz{gi*<0kzon?_{N>i;By_IZr`Mnjn(7O zzxri-YsTACK1)#hM?I?!;2mJmrjbb0(kgr#cip%H$2)*z(~^e`k7whl{jYDlf20Ex z#gR{4csv3BtCv0(OnxV%&cexy3$JY65BuNgaW{`%g5GBxjuGwa0Oo7vg&tiw8SS~a z;wE)Z2e8iYe)hC)HTqYdy}96_o_R3}Ju1wd7r3JXMC5c7;B%K5_y6R}b!e&6ztPy%MLNYlsqV>Hs-8SrZ;LF2R|8ndNaC zJ3v2=X=OVp+34sU@|&4ZKeGLL5&DBm@W1-A{CL#tJ=8%sao=_<73$ND9Yc@lOZ(!X zo+ao3s8=;}S~HR!mf@94HQ%N}zMh;nt)K4%{IA}vn>t@7?U;p+nXyH@8p`LjWzr%% zX5p7hQ?uSezH!sK*VVPvC@6fa8VJYt*`hf9Fk&+PS6>g-m1gw3TZ%@U2{U;Qk1un` z?OjjbmY{u<*JD1y<4?VQFtmQd9GvQzoWX*6KPooy!;yhY@V|PVX7EU;>{W*Ex#%zS zgy(Bm_Ncq+n@jQci}`ErZSMfcJcs(>?^ zDZv-;54%Rd>%}knm|O9AG8!}9Wz}AIJu8xE+kCB;;1>)vw)sXoL>n^y*p5Q{uU*MK zUOnZT{}7z4y>yj-q66&w_Q@|TZ85G^KCUf4)d6IS`rjFHWhQ>Y^2&%0I%wA#YdAmm zjYR+2qm!d;o<_F}Mg9Gsd-T230s8v&@iVKRjhDPEoZP>m1KbLK_I#H2d^D0LeYd6w zK2Im{8_g$c@V|DfCA?#8ctbUsb1=;5pL-qPVBY9ql>;kLqGw{}U}#Sdty{F>+LLm8 z(#IRbVEFvaj=Z{fyypV+ul@UeuUT3(xf<_(WMPnHM+Z2u_hHt(bD8)L7uB(iA3Ff{ zd25ox=4w14_vOM--{ARQvM27-?E&~-yBR$%XYjL*8eD3zA*{|^53W5bvbkdzgSMQJ zRnwrp*}Zb{_aCQfaBJF~{IgIW9kkmQws+WQ^sl{L+EZH)@q7_FsmV~Ziw@i8S~pj< zR-&@z?2r#EJt*~5#_hY-7d_l=v2+s zm^#M)*~+~O@eieoc;bC}U`5}Vf}dN2vhsR&c%9ILh_^e=E#*|AD^3iK<<#pz$IRyN z9sTB^fBlg6&l6vqh+Tv`pI*DC=YbxqyH=w}oI4)9Tm8?BGwpgXx1l)YjCnfxVPqBO z?8m=;3xY)a_AB-i6St`jH3iXieP4M@COMAK{k)>MHf+!)T~fb>7~7eLbehr~mXUsi zSlqx|_0*4hu%fyn!%nt5$4>32ts>9(i9M8XRpV3Iv4&+YMEl)$oVxI1b(_JkpCAU? zeH*cfqN{M5niu+3t?S#6T6*x{7~P%uo7noTV|9<~SK7@eR$~3w(+WsKtFdCA(^X&d zj$(1&$9+H6a2D(L%J!m9!$xe@&ZomopE6I&p6R}PhU1PY+r0OHz*Og~JT8}hiPi}` zZBqSg6gu+V#oKp}0=gShEh`tlPr)3fNQLn$$6>d7Q-%)Px)__b=G?KLwKdp`>e!#l z50qd-NvzkdH^!e%rS+2Zf#ara511Dp^M&}hZQuKk^JqHLz5(@ZeFAml_Nsj|9$V{j zbLUr=#KmA$vtK#L1WBDd^R{IFd@cl@FDcTpac8}*DRP+Az;}C@V3%YH@0dPC& zV7_)P3uC&6x%3@60~`9uJ$aSl4A$SJ+TkYk8HUH_$K0CG59@0_Sedhtz`%kV8QT6P zDQTQ7756LHDM_}E*@L$RrPPNl;|}d{K67tDLr_)Hp(J_TsAl8%HHl&v8~T9q`!8n#nN1Q{W!Qk18dYV?ypZ=j}4{ePHrh}!AAMFu6Ta< zEw(;mlCOKsQ_T9pgVWh>4(ZCTJ|1LxEh0J9YN1019KU<5bMK!z8|=|u-tv_y0p{D& z>)C_c60DW+ea4hK{jjZR&k4WYV(g$YV^#%k7S{Kt;)SepH^v@x@y-zRIHudLy0>%t zCXD>aoBnoNAKin&UosNID`D2{oaFk`2f*6#to9XO4dCVQZ##(|E+&2WBWTf1fE|*} z7`Yz{!`@HQeOX=z^A}~!FKTX$!2F1VeuDntQ3dvpCt$JeQE*9;dtSVj{~v3emoVVfYDS1;aUFjSqI z<26$^Id}HiHS4yVn;5(2oR!(=RHp$I{PTBC2riYl3OkBj1zuL6`;Wo4%XpqT?TLM7 zsG0$1OP2jOvvdeK^~#w;X`>^T@!Jdw_{Wy=(vGe?eUSR2>Qux2<3w%tCmmdUKV$z% zspFm7pVAp+l@@`{PV;Q_NLxK~(C_&hf-Fy3B1Ka?2HWzl46zZeUrrJ2yx^a9mf9m& z5@9R8N#jd$BNdW?Z?|2V_-+HwU|5zk>(mLX+S@YiT4@YgA8g3d^wgP+f&z^0JpFzo!n;H4taNK!&dcDHOk+GoWt?R^9DOjvK zS26Qk7WOiE`=Tn57+Y%*wLwynf)#E1U>RvQ7W2OqtDcfyjlB*J*);X`d@TKa$dsX7 zx!5Ou=I5KQrzBU{k2{?T$JOnv`gXL7g!S7oa(e6(Huh}MbhqvM!?9my#%z(uoUw=R zZp}M2JQ};j{pP$xTZG+vu<_PitD)GAO85Jd1{Y$BHOf!>PtgC|FX=}Ug<+c3BTea;3@rZZ@+8C%hOV&~B8 zSC}kj%5^i@1I%o_;b+M313J~EsvA45N1x7kzgP1Xj&Ha}iaa}ls=M!6cy{DFxvuDz zyXeO3A-d0A1x3gE57xaXz0u%*ON1qNgzMfq%*0ktS;SWk*oke;@Y#A|*I`Wd3 zd>!T+ZWA=3tytF*c!3}IVfJZCow#OE@(JR;^5nouGYj4H>H%k-?TFOvlu{mV3KHnF z!>-526h-R{{q|0nHzx@@;a&b_RknX0K=`|a>c{dsDHLWi;_?1`t ztlLii-rt=`Z*HciY_e!fd(RmqxSjq*z#a4=^<_vuL39B{)PKWi(fsY5LJ&CsKAZX^ z|GInFm34Phj}9>5U*MVXGku4q?!Pyl?=RgZn8oZCl$qds`=_e#U9fHY;mMd_k{!(J zJ(4;FO-)q<+orDUy*=&ugL3}+gT44emRwKWu56EV{jN{aEpx;0r)eoE3m2v+@-|}c zrtV5vwe)-HMd5(7JNa)@t8bbAt4~@^E##J;sc1_&pTLU0P!W@KF}8$}<~IM6;5JSY zuRRbdS$f(}BEDL6$)n}yo?nLMQ!(wxnZdha&qZ$Cb>3U#U%ztU#?-2BU-@4r%n=Sf znJNm*xi6eBaY?;u>fZhNuswCw65XP{rDskK8T@xCq90SzLW-04!S~|$U0q&j zv4{SF=bH}WC0_utQ6KWtygezt!WchkCr+CsJhZ;}82ktw(NE-ZhY-t6)kDU&0DSQq z_Q8vdM94R{r_4TA{*9-!6JJdKvduJ;wNW?MfT zxLhQpUY!}umFEE($KCc~?pdOLM;<%$pbmW9y?-z}03J`>wT>d|r^JSami}L!eg;U> zxKm3jTZwr$!yJ+}K>qq_Z<^>$#Jk>*G258>Uyq5eD;<}8_lMki)bn1?BVq{Y z`sV!hZh%x@wA*WVNE}l!^2~QbUf<&wXZDbw4Ra;G9|^fNA)?gw0TKPxHrF~Havd)( z)a5?$Xix9=Dig@XU-PCj?h#Fs#fy$z`2@!sS*HlQOLV9r55+Ho+`8l2+>|@Sz+3yC zTrD=rJq~j+ZxiXqUnu+YAXCd~D77c`qUBN2uH4_KgoyuP%KyJknW`1uX_Me*R;b9Ls ztUY3_PZM!x*tdS|(5;8o(K zVrgH$UXWW=q25z26Z>6eC1piHhA~Of?TbX*zU1dZTch0Eb6eyE;?bsSJsru|i{HyZY_^ z>xhku=x1uej65MIa8M1wn0Am!p&0qq-LLTP`37&h@@<;V=(nr)r~jG-DnER_QNo8@ zMt<7hy#Y+8RMuL;c_LCaf5A`1ZtyB^#j1pC$XGTlV&_57mUOD~_RIH1dDD9L+tZv=6CS?JvQAn?mW<3z5QwtfJ}cj zKF#tXklT?|oJVhA{~hxmFS-n_xD+Eprb2FQ8Fb9P0aQhX42z)~xpALOWh01J2L+C( ze*^blddOyC6L=KP!cB%k)|m`2-`x!6e*pLmSIDiuZaO|_0egOxN}A8UhVAQ>LW`S# zv}($y7b75dCRm^5-2z>BdHfGL zI&_!hg&=Av7pvdxgk zTlpnaWPLjr#|fCyW;Frb`Z_lPX>A8b{XPf2ugK)3LYw;P_k1|;DE(Y@)5 z(0x52cMQI7z_h*B6q$B|@MQ+G$A+ReVDN$03wBJMg>n}5x?-q(10Flc4_~e<$KROl zd1QF<2B0CuZ!S@DP(_mc2ZQfhuyz7#&!g{)@H|&WqoL?6h^1ceaJ!n0j%ichGt|QS zrEzJ>y)UEDecRVMA+2wLZcQQMyv)!!i`Kxi5?IgU=T7J*) zyt6hMir#~d%Uag3%_ie^^ZnKtYTtvHjpQf|X*TLQ4KG1j-vhh5n_X!pWAPf%XA(m1 z0IhxW)}du{(YsskWFy5L;QJT5oUyjka68?576Rj1jw=P$6_pF{DObCPAgvwX(>Rx9 ztg0?ISn^A?^2R2cJ8O ze9Q1e4~8wOx$60CHd^W*WQNc?!3<$U$;aty&;hrfKQ|P0g2~JFKC=2g3(ej((*{}J z32J`aD0}=Y8*czbo=9saaPF)(k1VgjyHD)-CF_(7Qm|kG1lqo;i5U z-CB73U7);q>BQh)3()&*TZ54GU7&t`uaTokBk=g-pjN|^F7R#Zf&1}O)}ZB`55E|E zKY|SH!(ovhrlQ63^EwShAHm!3Yw|49fAEIA4k5_;k6`@f8?yG!v1rVJL^GuIBZ!={ z&iU%pwWwisGd%yF06U3yeV^4dbpMn%dj#6Et*c6A##)u5b8EBAkoBKH3$Cgz_&yi^ zm=+EDe*!`2Ztt0n6L9i$QiH*_8$8SyUwOA;B|7}z>>fi=H?U+kg|}PJ#!r0l=`z%I z1JdMC18Tp_MrTC*d~0~p4U%7ng;bZ-;HN)+ooKY@9%0M#FF<>qG-sdGXrDL#ycsb7 z+UGh-XQ$ELPCn{uGauU9nWo3;jP`T#^`V|d`?;mx96O^u3_sHKaSgPG>lfKhHrlt& zNr_%Q(7u5vPpQ#f)uQ{BJcst`#LIb|M*CB+Z~}1^+MjL6suTe2Nn?@d$U10Gkl)2< zAO4ixqSZYM;Qg4?9Z-8E?jTq{h~#?^>hBXZaTz^E{* zb!Oph;)ET1(^w5;#C=>7={Dh?>gQfN5^{C*tSj4Z5%*|YR;jd*)uvf#s$0bQfU2EY zk&v;$=VCwKB%B|v8+_2&DBs?*ZQV^mUT{I%?=$q@w$F`UEVxOixHX4M??OIS7ys$~ z4dQWi{#E=0m6uAeRaC&tA6>HwL#iuILN7 zH7w3+X$!Gr`nG3G5#%}d7t|-W5ETN8R6{W2YF5zG*UiL}QwJXSks*H-bA~Q!Chk0r z5>m8-Zv4KHcikapECN358St`ZX)I}ThC432>DoF zZJVfxh^o99U_J+O=PZHW+v|ia&%>*6DC9b~u~N}>BEvoY#=UgNVU#5P=0+l~@(`bu zV3fb++mjlJA75Qk4#Vdg=^P)lvG^LX|5Nh)a!bh7%gju44Mf$i?82%J=ucASW(Cn3 zh}Ux4_4jTWdB#Gw8CMC;8H%p>6y&BG4VRx?A&%W2%L&*5xtbb+39b;NrrK+M3n6#< z3>d%tGT}X`vOa$-~&vXEA#ujfIePg01wCmk8{pV&z1kQSN-G3i_J?R}GB? zp^!)PL64yq35orNvE$t#7sswyz3Bo`9iJR~$prHHqZ#407l@Rv%T6laLjN=@VBFNv z^+fXy50cF-BcH9B{O~*x+;epMx6?*{wnf7go+oyB*-i4^VdM&YUfnq&?67n3s3k_0 z3|{4Rj_BTyo&9|(WNdSF#_Y2Mk?6DL-~h<$Cw@Kt@eFaQyQ*Scx{JY2>}kSQ ztdM+q2mN2{Tk=Y`Q^dfMDsA^&$m{PaOTV2YrcBFr^sk3p97><~@C5NX@amW;`;GoG zyZxN*IB~?2YiIHgWZkHP!mY=MQzMQ{uA2uL*?rJt;ZfoUu4m30Z{#?^hcQQp`{DBF zh5d~EXAgDgb(nZm#Hv27f!w;`ae?>{A#aJ;>;NFIAEdY!d62mOjWTLJ7jkj^*NOB4 zMEK!%>@ZKr^gb(v4*Q9HQI+?9SVBfx1`YYXk2pA3N$%)|`Vt#2U3h;F(O4T?Xz{|R zKWDyphwma7TeyAsEs%9oZ0?2~gy+g)- zPKr)>I2(klno&oA>)J@!+4Gwkmw;<~I?&HdqkOLOnc6j=-Koa((rY+>KpY$I)@=a0 z_FtV)wimL_q$z*i7O=){-A2zbkjw636$LxM;Z-&jH3>$})(>Ls0a>eyty@iu?N`2B zIkpb0JURZQ$rU)ipxZz1PCo!lYo^$CEryK5&3_bf2s9S_=)b!FvMzgeuEi0s?s2c3 zT$WKjD00gd(J}+ zyK-$<;90eM!1+w4d%(3p7r@4(;&7jRkgNN@`-)uz+j4GzwsND~vs>qK z3CtgDt;ezVP?bI8YB!yA=5?^{j$x6u4bGR2`BH~CG=V8dLv14gxoL^Rv5`%{ z;oRx_x>b-z%=%~W%_i`}&3`9rj8QI&oE_5)tX3b~ji?}t{Y&jvHiOLMl2?*o$Yl+! zhOf<_nDl9xi3Q{VeeV?Iwt%2bt$mr#;CySlvvt^UIR88RaQwm3kc-nOqug(Rv&XI6{0)$8;nrn25pvkRAF5k7fSuFr2UoHnw}w9Nh`0#`A5obgls16or`8N1 zO+%L*QkWuPZ6Ku(5x*f8q7!Z|wMW!#;OLgyv$nzc&7R>(OJqbFkTWv|zxml4-AjJ* z%TUz@dK_})tZ7qlyuPm`Qr8Ad?mrX{I5Q9be5QbgG_?W8C6aaCpaM<)Z2s1uZv*MP zXP#zd)9|ttvm6jgJJ3$F9@*b88Ku51{b>ko2LZEB1wKO-po1J|JTa)+L5s8zcKldE%*V|pcE2@Ly#m=+lQs@R z=HgGxa6hE(6|j-D#yvr%;`6-hUK*~y0_L>_;Yef-p0=pwr$PS;#EqELGv` z4WYpKI9AmmlTyLcM4WN!YTS+1o;j&~^FjDsh6afQj z)-w=qrtQ6Bxc&wl8c|JcPcFwNm}xo;`Zr+HV~5sci$Q3-O@ECM-X}>e`(dXIUKNv#af%g}}7V3ZX!vkIr4-Kk!V65mJJ^)nW zUyf(IHw=FV-nFK8Zkkq!-l|#!um3ynVez+x78Rq>)eI&Bse1>^9NhaY>KTBq6)t^b zxc&}|dUMhH^t46jxbr=44ElHA*PPlt@uwG|oUf~05z2coGVl2InyF(^pD$#4BN9iE^W=$d3@SLk{*_Z7>yU%Loc%Ba8Sx(Q$!8R|%ckQy%O5h3>i3|apl4~- zlreY(Y4BaczV~2SZ$`b(^aW_VXh;y!^d6+|HyrrKWDeS0&3|dozlZDG+6yhnFm&nX zUDpg`n1?iwN5?9rRPUPFWl+JqmR(Ehwi`yFj=ko+G7Rqk zxu^B}M}N-7?G67pBGnzh?Tef~?}N zBsG2>W_BQ*aQ@#GUC75~;-rO>x(#GKFt5?ahyPIF7w5gCAz^xu={UM;_l$Yy(x~!o z1I$B6yLjhj3Ydoumhbp!7_J9nC#>}TMCya5od5RJPzCc2+Q)xto;n_#_PiBdf0*~R zY07F3li~P3FUET#FwbuP?nYV1jFD*gxz;ZRy&mMv9$^w|J`5jLT=L04?gaO3yd`_4 z&BJph&H88v?F6j{ex4gReFg4j|Fg%S>IC^quSWeMR-omIYo^GEPH;8rg{}fAM_*m- zg7;r1@EjX^`iWsEUUJ{g1gYx;Zv)%*P6su3+>Om9NK+^HQY)VLeQFun({um3LEi~< zgKv*8Hye*H?>S+MP(FY@wnHm#mMuaHKfU~J2!-*6n4W&$iFN3`)wiJj{Q&l=sUyZC ztI^e&45A@XPP18C+J2nx@T!`o9ZA z618v7!aT6b&$z*$>H?!eLZ-Ewj6?4+g1QaEyFhQd>EllRoQ|RiOPrDFF2G44p;t(w zaHqbR&PZJsAo+GhmX|HY6ScqI8m@PNu7|3)X@)uY@;%3Fkxm%@NC{JG3^Vb4&nEpa zkUxTT>|S$5A~W#mMZ4ki{}JZ99tDMl8a%6Xs~w{L2u_vH_bZ>i3}t-z;fRd*2pVqL zpFe?=p~pjW;ragvG}{jF)`6vXYRe-tr0yel8aHml;OR5b?8^tb4cB2jfl5#kNRv?a zLv`@^`v~f^`}+@_z7qYkFTw?(d;$S8J&Iz#mZCxPMNt2L0%@9uquNX=(7uP3wHs8Q zK;GN~#|9eaqOD`i;QjvzWOSWfvtPFiZ-;*bXsG%GMg=EySz~KZvu&qe8uonxf~oez zQNvRFF^l4cG<^a|gC36Tr(1|_pcxO;`gF73{%}0^u;yW^m^oHTxfZFe}LmROMrBd45km_zw zt=~7Je%c&#aq?wTq^=w2>(-SDgb#cOiL>ugln z7ygUaAT!opJ**%T>q}@pTiP9MT^FQTp zyrwd@t8nL*o&Uva4u!<*Oz(yN7q78;Kp+1yu>QYz%?Od7{j9TB|BKf=owokceDmi2 z;x*PCN3z4rZi2sgz)=;)#G4$Nh(Wfkw)xq&fPacj*Du2p0{Oqrlhj~d@ z{QjU7C^K_m9OgBcv^fY$UJ|bb;x+k_F5(;roXhf6m9Be%*SPBq^m=wDjzT5a(@^;=&2Kk^)b+xT&}h@t8uw>J0j|0Bo#NB)&J zdynTW;#O(hKQAiV@ZbIE|0AC;ep%7!n?!4J(2{{(W`Fu4|06Hg3VM{+03a^QAyE;Fxhm%97EA44~T^ZGL@7RW4)ty zWK{Id(iGiMP|37V(aaxtws`^>6_yp%YTw86`mxu zdR^CbpXznL{7K<6`+v0jnn?Y5zx>CepO$b$Yi!dqkKF$7I9or6d24`O)%;V-Mo5`r+xk4 zkCv}>W&dy8H+s>RUCS=X?fA-XLR&u-$xr^g<><3Ec2&(gaoXbr2Y%92`>=f0HGK0N zp9%Y~+0b<|ABFwWymu^pl9BwKhwM7UV6O|HWUWy57BH+v2;L^DX=zmLEpyr(Zkn_8nbae>~xvQzy@L{1Zg- zgQJm;$GWP5o#V?Z#@qHkET46vLHwjHSvL8y^<5($``na^3O;7Xmx|=4KXdZccRbv+ z>HbT$o%ji-A3rR=Gg80vxR*b*;i0ak3n!d))O@GEKP*2Pso#*f^@W|p8|R$xvyb2H z^jAY9KO9+|JNCz27hU$$v`ey_{`;`}nn?Zh*t>o;@kd>6?)SH6U%jl{(dR?*!$^Me zo4-G)@rPZbzPaq$%jOq4{73W0q!Rf*=(_#3lWzL@XOH=4`K$vD(kFS)HE*2y-LAUU zmoqmkEV1=JEI$>gpE~|8Q`*1X_4{x9vEuap@<*-wd`Nz0H2+^e{qonp*>(TFj`;I~ zS$}r=?L+dDk^ErZ-uEtC)%Dh>FU~(Ad6ez{!}1#<^^-58G z6Uh(y*R4q{>st9=FaBlz!Eajo^kMm7w0``m@#!18-l}VrRaY1PPP2Y*c@Keavo#o)o}<9~PGca|Q!!}4zs$*=kBrI(%X)9P10 z{piV4#~kMHL)_W_Yxv&MO;#JW}d+}4xpYHv` zNPd`q(&u4=nh3%YE6leln8Z zvH!aM#tqfCw4Hg_nh8%?{M1D9*PU?FCvV?aJ?|&~oP7OFPXC9|{BOLKIr7Qsr@zpg z*>-{(e_C;G_bKXVtR@I;WKVG1rbi zh~%elt@_=a&r~;$yRiPWHm6_G#3}v1&6ixn{IdG~?zLwxJ<<8csYrhMl*%t}{(1FJ z_MQCMjb}RgvMaiyz$oe{``yTF}FR^xc|6kZ2LhZf8CE-O1}2X z>cpD`&+Pd5SUdg{aeV)8%2^GS*KDr7{p{Zz@Y+gq{yGRcBl!)}W*)O~bM@QXs}A@| zk@KIEk^F7q&*!U4c7AL6-s7D7HAM1vt^N7btDdi3aKN|z`rJL<|BU1}T|E73UC&oP z_~@t4{oQ{#{tYAf5AA*RrwU)F-v03E5C7)N&VOQ`l?OlQczr(cf$Dd2>Ob@MmCpZ3 zMe-Y7?0%>4#p;Ku#=Uq$kE1X9wg&6RAG>zuRWDX=y6&BI&&?X+Z2onGWnDuo8J)0-}cgB4;-|m`o46>j+5TXv-vfV{7u(9vGkfP)pK8-IqDZjPI%Px z8%FXI2Yx;Mvn|!DU&$YPOL(l!4tn~-_SvU42><3y=Myif z{zJ{BzyIC$od1)Gwd#}A8 zae&$1o&3R>=DsHHCUfs!vR?PplH>oXd)X;<6`b2o&b=;oj=TSe`zt$s|M7RZ&v|CY zV<(*!bTlP?n)Uh9f{Q0U_M`69Y2i~pe*2WNk&{|>esK&1s)|?RE36 zzX3Rck+;N?BX0?UysRKA$WclVIL$YvkPI5}5TsSeA3@5fETe4z#Ie7)~ zipr{*(uv7b>BN>)OZHG&cu^u1UR0VY&5mW|zBo47c2*u+ICe&5(8ME>UNAuY^~ zji8I7l1RBc+Ve1vx^PNr)P=RF+Uy`!=*sLv+CIb5#*j8p-p|u7kLWxyEvV^VG+<4(2+93vhk})i%)o2KwJX z{~PFk1O0EH{|)rNf&Mqp{|5TsK>r))|3&)WK>r)qSN%vVD4>1;^$Vz9@Y)%(uASD> zJR`iKW!lU+!P(QUZVYG4Zk!X&n%y3@HO`sca&2QUx$XLJ+RSOqv-lFttYg|5r}I6U zwrQ8QG=`0BZL`~g3uaw4YxZ@s2H#X-=B(x$8fS#lX3U_KIpG!4u5NC*KAhbaHoFd6 z8fVRHZ=!Cb^qkq#uWD=$+plkJ9G218c5U-?B}RKXXKs5q`-<@D#;a%Zg`M-SpTl=^ z91!OK;T0{juM37%fe8i%ZqyHfyMOt-`{&4gFyE$8rgrw6c7v5GX7lZ!55>tuH*n!` z+O^Y~TZFIoKojY4&yNu}c7t=b7gII1&%p7KHf({>1Bc~=uuJq`Y~bLmFw(B_vIjTumL})Uh^gO;nj0S1{xb4+iic-*5`}eR8;t>t?PX1o2#1T-jVX zTqC$fa^-Q2;>zbL<2s7#b6kAR$^J_Qd>4v;^K6@vW%U6;O@E>&{2wH_kOcz|a-l8* zWQ1=*(I1y-gZsbzuemjdHBY~~HOOk7$=~dmGh2h4=GMkp)2FotBlvF9$oA>2L0%)t z1+8;igE4bwv<8Ju6g^vW{wrMj18fj|{j%}NBZEG-LW?N%$eB+$f;Dl*S!6)Wi z1$-x)q6WxQ%;T)tv*xu2RpxniYx|twjJ7$gR|Mx?tFAsfThEs@w^H+RJB*pF%~uBl zZPO5&kN!{eM|)m7t!+Zv+*uQ5&6z%9f)(Tdec|MhhpVTxPj3pmzG-Wm**LFtP&%j2 zZfTj;-aKn&*x1r|b>pn|p`4~^ZPTW=H@1b-Tc)8&W}%{xJ|s$|FP}F3syU<&HLMgg z*AY{#tZ6MlEjkACaR#cWt!-{=dvNaD=Jw!>mX^ku(^~MJ=CrrXoi24F zB{;ot!sTs^)2?cr-8>7wsSQ&!@$%Vi?R@R<)VXt3j6z1MT`_HL3+i|e1&*y3MoZnxeB? zTY;m@0&)~}O@hCNW$o?PgM!w%?SpwX4NGYfFKC+y+~FA$&IwWGGmKgePOHBlY;VS~ zZE3!`x!vGE8ag-~b;_7;cu&TK&28;-LAENjQNFdgjpFrfvr)jZCA+6z)I4j(?CZkm zO)?HM0`=WG8+|W-uYUG*v~UH+YA)vb)M?<@da2W=LHYA0pK!eXoS;7y`g5ZGoTNV= z*PlxL`2>F|68dwT{-{QUYE-C3g=$o&Mulqdd$IgcjT2SlMAbM^HBMBG6IJ6x)i_Z# zPE?H(RpUg}I7u~5QjL>T<0REMslrQN@;7Bca?@Es@a=2)Oy1XnFtCd|Q#Hcq%<_N>O{Sy#-S5WF{` zv3+_Z-*?R2xtwzK%)i*!>{Bpk<~>mwGcID1dp&91WW;VbFNn?GJ2tjpuRwEGmEU!k zEfY-IJH9CA=Ik$JeL2>OT?@&pm2rO<32R6?AqVvh&72U&2+ASehv!zbLpbQxc5k8k+&IUV885V~hDoSC6k1 zW>o&b;LOZ{!6g~S*>eET2bkx0FXNSw>Qx)#gJ65z`}9|k7@dhzCQd(j-18#+F;6sY zsO=bSV;HyF{Ir2b`Q-6z^Yh7nC9vbl3pz4|v{y)biGc~-DziUr?C)Szdoc$qd4sUx z%52xAaTkDx0`TC+ukpl!&i&LD=XQJPFW=Wey zfR>_{=$hl`3Z1r#?l=4>=<{^hT$mTE-9Imw$2Ey-=UKBg5{}g}o_kch6{3GG-(;pFkSA7KhP5w9WH?Zh~@%LBq-`Y1XsNp*Q zxBUaJztTT&3ix^$yp4aYe_$-nE4up!_&EOCdGKFt&8kdJ4fF}Nz%TW^(wSL2i;v*r z{o+d=-BxDkzASzEO%^|~c&EvW#W!ZMf{nYCtvfs~So22zKmvQB_UF|@fB1Doz4{PO z8ofv#FPVDqYW`H(7mvx_Ook^sZ|wzcn7nMSbp0Rkp8lVt|0hj5zZ)1pM=#HW@ebs% zvpZ4y=(aKF*lneGK^gFrj>!vNhnHFzcO1A@G|&$+6$7@B4T9}u0~_;5V0+oLi9N}m zN8clWEhk~&Yl*;@xzoZ|@8J8_!r}ON3izHfZS64deKnJV+?9qMnGwKvJn-ejmy_Sw z8z75UjodqXV4ricTfuo9a4&}67sc~}ccGofXA$}$_S#uJwU4jL6r|7@sg>MEXG+mQ zVH~|3Vw=(BvPIrDaPUJqwR#U%;s2D&P5+^S4g$k@l+v2`!T7{nK5c7-;r99{GG zUv~D&CP=rTd(d^A-6woK)>o>!gX#BW&i{DOr}k2&?y9Y^*ut%{NkMF3TD0_flo(a< znK9wW+T^2y=Zwvk?p66%kn9o6&+}f&dl7AxgtA9LpLCh(pE9gIe#Cb8RQ%YZ_KptT z$W%e2SRCBMZ_gB<*Rq0hyQQPW+p?J|o45JQo&@%C;lE;iYxhAPb5(|U!6V2=9nV|# z91gWbfP)~kwn4f+0JdX*N3f}FKQ8$Wvgfi9Di@D0=uz2A-=}PJ3`2fL(Bo}~ zx8b3IV>EQY9|8{bCz~#xCkr1et0?Fj?voIpD9($c+kyR09YLRTw{)?$S$V*h18o1i z7hO*qIkZh3W4}~S_=_j*%Um`vFgQ-xwl`@@ zs;!G1-^!;eq_3iIG~-xl?4Eq8Li!6CQ#L%O_CxwU!T4vE{*M?PZRtGcC1fl>wJG$ohs_cpNzwA*umaX;3%VB8EK`A+4#A@Pz282GP1pDD;B9ozOeiv z@uhT{Z1Sc^{_;blBhKyJIqtdcoyfb|iS~m`(TA@4GUc?Pd_zCd7rA!Shwv-9_&!=( z-eCLTbKQTWec4&rR<&~mdS5y~awFXhZx|oZ%Z6|@%K5gQ_iOh;H*#$$$O{S(uKC=j zaussz8lA^cX{*CECny~gkty&c-&uM~{=3I{iNl@d2ZfC3-N=|kPvNaFf&78HY;gCA zlhftZ-vDpt8(mWnF3ot{2@m20`Oc-Z7lz!apB*lw{CVg%J4TDUW$AeQOe3e#SC*HN z4eI1jC;wOQx5LW_NB^8xhtj50c1uqLsim3wSIR~# z?UC*le`itVnG=IPBR9bUGk1*Ig{ba43>_ZThk8E@oxUd;NGGeV^elXCbh`YNy`aMe zZ0Euk`v;aV&fx#SvCiJZSeO03IMx^XV|~BEuRDgZcIF|wT%O-#FT`(I$bR5-_cT^7zr* zOZZV-V#Y_nkJks%Eq?so<|}p}{MdQ(d-0=u;m7O5D1Hub`0-Om5*N(G3XaTlmlBdf#>+s=*;!uV4_zvV389OdpbO(HC{k?d&u;+*W9O(0L zBTp}2DJBNCeTNyxvUCCVDhQd!(yq$9KppvVEP&+8Zp$|)!PX78Z(a|sjo^K_U$ETe z4ccsc0@-ZoeA+ku$8ERChm!wcefJQ#C`D%)zksy7aB0TQEo>ZaxL;uBMXR$XRdA8L-^OcLb4$L+sA?A(;HYwlmF=Z+mnwH4j=Cx zU}>el$y{Fkphur#R-Ok&ym3}fXz0BxT{6tySb=Sig`M3Yu*jYS*tmf(tEU9GvY3}F z;XU#9SYL=OSOH&@0DDfbpy$eku|Cz!!3URLB3d5~t@kIrl)J{OG?h`CF;m|9$fY6i zmIQWawlRFkDwmN{r6bg11`uzKVO5;eV^=PNe?=Q*T*XIynpf<5<$fFLjjd&$51KJ=bDpkD7(+L&Ul`!Z7vlxi zmESA*@$JqV-fl$x;JfW#_}4%m@v`l9oK{~iLhd!DgTY@_q;2t=kAIhgrv&3nFwRo$ zr4c+u{g{u@hIFdUf3H1IzU;vor{APgD`>BR_6~OT!1Jc$(A!JZo9F6$%$2(+{v0yf z0q&(QzyZ7^`vjZh>)p?V7Lr{)zJr_F!w zIF+w)`gxMZ>DLYN9sH6n$)NmvKTg$4Ii2F?UQs$sN-WLPP_KsZ)p4&gOOf-fq0 zujIYT@r$2hwMX>za9yexlzeH=hdU$sOE~lLSHw=7w+3>91XnpRwp1)Hcz~;r=SuD+ zT-zw~EY}jQ)szdk*8M9tSkC2jo#s^Hwf(T``^Dcdvbu7d_2Gl~6XsofxWvh}&(S?fpCLXJU%Zz-o(~IMKcbKLPVpGehd$O5_1jeMV{SgVFJovyp0+p~ za*%$|_w&4nx-x4a;>CR<{P%vo%l|h2MSYjy{O5IZ)bA4id75|pGdHN?TGyW&6hgxs zu2SBEb;Rw`#K?@!UY6bq9^NOF>Mhu;I6%<1`X58|mTi+i6UUz@kj?-{(qnqgVU8xd zCK_tKCf`bStPnjKizSWD^>~y_`#GvF`zQD_T3+qRM*#-xo960@snqvkdB^K{xoll* zL0WAF@ubO*w&m-|r`Zh#+EBc9c)HqVKic@g^fBwU%9mdrVzV_LWzOee&T`}13e3Kr z;qX<+9)1M)q;vKJ-|%z~-(b3fuLAr2LGaC^eIKt07$bXCi<6{D?iQ(U?u)rge@PBK zu8o|;&VIFaofK|6v)`YsQjH5SyQ)eq@rZBb>V^Px#vZNCk!gg?cN z=DG9)>GOFn=U#5wuI~-$C&9CD&&IW$1L+Hcwp-fNpw+}teYr44ylUe`inXf`W1kIN z$+UE*(zMPMgj$zcXy~c>(#wKF{S)!%5iGAcIO`*DP9{GJ=eS`o{kw27ruV=p9pz!S zI8CNa3=Q5BtOXHR3mmMkI=ouluqjROO^Y?#*)cmNoVkXm^g|CqB@UY3BV*_nyW3<-C8I_euHIM{A&u z2FB(4&>Dx{kEM^TT`}fA$sYM!f+L&f2F5>?@irtBcRMw`;PJtjSuu9T@a0tMtKDtj zGun0;ZEqfB`xWnt&qVu_OIv@C^|KpjKNdeV9RnZxj_K^4f(=*sDTarFwCZWoTZifq zyI$hzFFwl76Q=-Mkh+8T)spTYv5?p}IyIsGnA@d-UikwB=;q3l$}P>DzBs0jlbUq{ z;atx(uFW_)SMnp@RX#}tFf?ar&BCmQnRtil%wWt)V-I_Nt*@&wf=BsY$b4~yOHa6Q zN`4B0g+`9zVUqg9!sxFM9jv=#NqtKGMjcL{uKptEa1}7@$g{W<53opOe69)fcRh6T z{j#3}T{AYbGbWwHeawBCFCS^wD6)V_K6L?kqFsH_DnsY?8(B9XZ8~YHD_9N+iP1&oNtnRto%r8~VG)^X%`*^L-00;n=sAOM9^$ zOMCtj-iFNB{&S>%60zRT)4pJnjSw6K^czEmsa@$5U@4BmrgU()pVv2<&ogM-e*Cr` zm5qh5-j`_C(e(b9!-?{id;nbAz~vGL*98tP@s8@XQ%~@{%)5sXzqeRp)n1bi8mrM) zjJ3wKX>bwT)*~9KEy2l}#r+2vf6J_CkGEYk~8OL)Kf<^5MT8^HpExLb3JWIK7Q}`30xP$=Z0rwhwDRN4T-}fpaJvb zhw%xA^A2zt-Vwhv1E=sO7^Qav?+d(po|#6z#@~uR@VBJZJNQ5ME3N4r3!B!$E~1>_ zS@6?}O_wdzT%inj#KUK2zn6y(a&T;n;1Ih~ypy&yM&Svc#=t6G6+V{0*Md`guo2l6 z9H#?^-WL&HD5YP?Z<6@EctT@L0Nak7-F2XFpfSqllN_p@F$XX0xe_>)CSPucqy4e8 zzk~LJko7&*F{5-oh_+=5WG0ro(63-zFngKu<^yfqv{`&8}tdESE8SGnmEL=^ef;` zIBa2@f!XI$-&;z5t10K<40+#3ykVoOvv%BlnRUmRb)WRE6YM%-cH*X=uXU&wdJ4m-QUlY_oi+HMEN zZH#MbcoVknrp#v2wz#wzq&1Kxyl-Rt?X1nK%`J>k?>X*0245@+WkFv>2I}>-JAEx(YhgqysU2f`By0R6ntjaA<-I?i3-1Bp`t>}6H@oJ%EUoc&KojHNV8`K$ z?L}rTJ(MT=Zuh|rrD@(}*JNy(+An~n0sB|fZ`{#W<)o|CPa!(TKgSLW`qrEIw)IzK zBj9bLuVUz`S@Be6^|1WW>nuNxqxe<);o!NOk$m-i8%ixE@bJ(GWJtUCH}maxN|Gz zlEg0Sxpor2EaSe7xJmx{{(=8|tbbrJZM1TkHB{~iuIC@=A2{~W{(*q@=BW*?dits5 zO7L99wU%}Yh^uHEInX~(bi)TWdo|+EBZKwblGT9x5y-F47w(iUzd?Bc_2f6mFOeQl zIq59Zu6xd*oc6l;Hir7BNf&Ik>#RP`t#x0O*L*^>X**zOeb<+X?(N_>XU|@WKY(+s zQGAW^1=Lr}%IiqcTJPF-F#(?2%lQW5P5SP>!QrH7e1c!_s?DfB!2X)?qObf3`2(6a zd4E7~3l2NJ+cVeh0T$s&W7B?-t@y3NT@*g@jh&C;@pW(~+zPJYdq+G@$_y=umtf=L zA#XqH}@`3w8d?0mewo66%0O}k?=|25yz#mU~oo*pOB?l*6c zUC?~Lrsq)jOSnHW2yV>_mh~9?j_cT(wKJJs0s=Zd;`KpgKPc~`w6FT$hB=YN4f0{= zBixyFGIXeL`*mm*-RC3O-iF-n;`&V&@`*fp`Xz?ZPka>huVq7(wirA`_`2Bh#LK|o z^GlJN>h(kZD7I^`uJ~>Vy3yC$L_Iw#9^>oF1}dhhc8;MP@rCbG>wRW@Lwb=ixna=z zH2fpF5);XPS2oY(jUq4Q^2Fo5&+pQwfv?Cu+?RfQ$EB> z+R%F;@ABCskKzF@ADo6TJfPT?_ld3iSlDNgFFBO&!B|~9F@>J8d*juX@Gt+_>o57y z4}(81m!1ZK$MTwy)wISVTiW7msp`uYQ~Few=J6vx#^^?L{e)ro`jmq~JhlnCTR--` z%iiYpp6-i@~tklyc^}MyB^t{w?bf(-ZQ!-WNhK7 z=}z`b#=%)3?^*GQY3)&p#TOE=rJGtS4o*4G+^YMv2 zV-A_!{f&J)yBFYVs!i>~`W1BbF#H(!gmdll+g*-{RgcIE4){U;z$d=X{$T9s6F==A zctSSUoelXQ8&$IZ2ivFwxbrsZXhW~vZPXE@iO1j1MiFQE$TsS4h3|z${4d^@Ke!P; z@sp(=%}yzv`F?iF@1K&L(w^LQ=AJ(0r?Ip`KgEJICz-*s=C{PQO)RvGXW1Oh3Dz7I z^lsQ0>uYD88-T-W+st$5qlNT0_A&9ed-VMBpJIK2x0Povla}{w-`4dS~UsaT?;lef=SmlG* za>=yTb7ixctKTDB2|pG1+8@;aF?;C$s6F&Qjz!k{9hhYRTC{4JMeg3cRJl6L|g>c&;(eHnyLQzRh3!^`DF%2m341*^xIcx{G*dxXQ=d-5Hg4DXq>+$6NK zJS3id82MH^lBWy7Te-oP8B=j1v7&cJSlX2`$D9$$_AKCB&cvP>;6iCZd|HpvWAQ9t z8A{U}S!vQ^mRA~jYUxk>62;{V`m?yNNh@ElGyqEz=?%PB(oQ@vHdC8o%|4AEQq$R8 zkY1F@OD%#|7iE}t_x^7GSYHKoDyZYvtD#A8CGaav{8uu;0O6K-O zr%@*CThc$$f5J(ezD2`RoP0~~WI4UVoH4(``Sl6PYFv4a?t1n-pqQp& znbPlGeuwLNom)_uBI=c-Uw}h3 zI?@fj1Y;IHf%^ZAvps@CvLc%#nO0rjuhR2e`byH(#|YZ=<;ooVDz}_+@_BA#z9!gg ztRmRH0(kPAPExw(BkYf9CyVt2)w_mz!i|q{C?2Q%?y0}$29lp;*vo+3r~CPy z(ko3m{OR~uWB283%*56&cjdB1Sv%{?Rno6y!}qh;m&1>4Nc$$ahg~Y59>=l6rv_D9D1~ea)UnE&=LZ5 z+Uq0!vHJ`1pyyS8(0o+%H2#;NO)EIi{2Se4WKGYK&p37ElNy}RhIG|5N6&=#gZ7Q@ zL%Qs&V$mUR=7iuifvrhlPtx?2vT?KXfm?D!Sz`+Y1AUsf%>wd#d`7aNHtu!ht)8&` z!^FkxcQ?A@Uv6InF_~h;7}OqeTdeX0%>D7LdvrENbxI@hoCEACJ95uu^O;`^%D!*M z<8-FuFSV()PTyBemgd(M?;{HZYminSDhF?ya^ipK4B?Z#$efk2Yo#hjnfx-~&WR^` z%Fx%+DT;B3CMsX$-WAu7jD8LJc>eNy6^$!z(ik1h#?v-%0E^mHANOT8VSD~NF8|Q@ z8{>0GSDhSmT|j;Tyy9abk`X@^jroJh3U1%N_E-n-^qpC(p|;@MwJ&&B#CyR`ao)>n*a?1_co+t&Nt^Rd2iS4Q(xA8+zJdMY@V&Jk^r8k5C4 z^1M{|hTb;a+8VoV13ddFU%a9D*$B$L?(k{P5-v;M`gE*M`{h*s*Dj_inLHG_;XA<_ z;Ftbn>yQ)lC~ak1TO*%h3GZR7lR351gOpta?PO1kJ;Mi)zO;Ha&748$mXE-wiD72T z4-6?63KvP}WY0Cd16+PCD>zc5i&r0t&}QYf++Y#c16&Vrt>dcVD&*2RWbtqw{pHX$ zwyE&XQdF@KbvHL=Oao=@em+GpVIq2KuC-PgwK*{@{=M+92??Ab5- zF1S!+#Wjw`_FOl4@RQ;T*}z=x@-&X4uyvt-_v7Y#Ng+Pa+TZ2|OSqC;EnHK%qWmv9 z{CW&~tRgZXI!gvlh2Moo(OGo!e4suhTb`$90*~k``Aa&yikJPn^-Jo}!Ngx_o4n#h zE|%@-Ae!!p&+T_xmZiNcZhk7($M-gN>kE7s+uDpRbn=w{AMic%0pgOHPY5o_=#UIF zbFMoF7$j$AJ`c_X^Sgt16iwk(Vn^~x#7kc0zWWP1e?Z5>SLZ4gl)s366zljzv5mp1 zFO6-!_z78%ez5#vWykB_5e84z9*VzJM|;J!w^=xCb@06TY^=|pVHaFMBIr#@@9-Ss zxjFF!buHbNZy??eJoL*t%+ky<+KIb%<^#L89nWTRgB?T*+PMc|kduBZH(0|}%T>WO zne=ilot?F3ap42U$9~V}IwPBQf4BUtoOs7ul0Vtk!F|DwCuGkwCe0Ci{$F@6@asNq zKZIg-Iqa(_4Dk8XHjl7bw#t)$d>S=GSVR{c~>9WYuw4R^oc2(Z*2C-@5I>$ z<>-c%gg z^R3z*9*5T0qH$>dE?oi7ZLczL+j)@u)r9w5fI&RyFOMVaBDOckKY+SQll*x7PJ81A z(`es6d$^wCT<&IGl)}Z??Nn|4ZmqW z9CZ>MgYmgi;Pde=&l{q(pDU{m#Z{%7ZCZ`-cNAYWIu!oD68(D9A)ddF zba1Z)Z)$%XFljvSD6%8nXVcf3bQ||j8O?3i8l6mB7FspMZ7fRh#)I01Vo}Vsh+SK{ zs(tB?(oq55*V=u}MfPJ-Bu(?s|C$h(J?(-+@6uIzE)Y!MU;8$^y?f|&>=|^*dm}ek zcvt^G8G6LiOS)uJXmzIQ<)PnY!=lv=C2G`G`{9~XmI!5`OUY`m##`e2*1zQ09 zq)+`Aix}HtZ1;>;_?ceriPt#8#dY7EtZg$!=|T7{&5%=y^RjeMg`S&FRS8tiu!5fB z=?&0Ba-cCQT|WC%(x(hdhi{3=N=}UKG=8qiYi(uM$1FUw0ZtcyTlx6b|KGNk<^LmE zgT5`U+~#8}k8&Q#@YW&Z33n-1b`gEc_GzAQ4fHG!4$zm~tZP&4%n#uQC*$&wu~FN7d9BaOH`lv#+aktH zjD0X)>%x1HKhL!%Us?86FzQ+B1}hAl)~>B|&swAI0GD*fFJ-eK?7g1kyKttqjab-4_4Drq)S605Lc!XGWkIGAi z)~Y;wCqH*2eAo7Xm3`^26|OB4`=c&t+mkM>Lo&qJ)MmR&dlFl4IqP-ef5~b)V`=C4 z#UBx$;JKA&;Tb%hoEmNWReoz|;`!;UFpV51)7Soaz~~>*W2MT$6VjKWnc9xZnbir_ zW?|gSIU?H6_b_O0=`r-_^E99Hc{+>b^U!_STJx=>{TOkJ+aqlNQ%?(drf*__9nf6y zDZ$f1d4EoeK8kMwM`dno${PVrfBS<$oEFE2jb|?9gGl)T%D?K$hqR|Q6z}{bV}J*W zUw3E3TEPEaisbue!J8ew!x?`4AWgh|*ZgsbYfH4uVSiFbsB!5G$`HTS zscn^mN7P3y@cp-guf)NZ>);c;R7N^ia*pjZaQ*6i;F6y9wD53=?{>p8e5AI#{9xa= zr*-Da!6W?MHVmE-!{8YSJS(A}hv%jLAv|i!!-L(iV{o|k>HdC!k$3Gs$0mf5*@@{2 z>dV$@4t^GUbt@!G!Nj!6WJ?B+YuTsaxWT{`cW4g@V_%|sxGcTc?I~GQY3+M*ll?YS zdzv_v>tgDT5DN7P9YvHh(eO`bs;vmvDAX-?7VrMj>^x7;_nQ^nI|< z^;_rqEph!8(Qit7Rj8-FZu`qXUlVyvJbNCI?do)8qA+M&m99M>8_l6Tweu*|i}|*MnoFCQR&NgM0DUL3(Z^~Pcm4v4F`rr<|Tiid=)~hf$TD3J%W?@oXV0mI}=B7i5GaFmQ zxdx+Y!P3tV`Q;fThpIBW&NP zlPo?uQVV(>`t7RB-sxrO2Ts@b#jwfj|0rbtSS*De23KoR*#88vg~S5jy*-o5`m*BR zohkh7R9#O&B0IgBXU==3Rc1A1R`RYgia`ekRy*hH1h299Y3O*?y}3*OG&mR^*@chk zo@*SuRYzEOD;>P@r{rTuW-@={n?kO956`>Yewugpw`1LTxUJh7U&!~f<&O-_d9;ss zhbg~m>k4S5Sfb)vns@m)W1Qy-&H!$9<&>XtZOiwRTx(u;Gi6r0zD4&LN~4Vx-?yM*#9RG;T5JWqCQub{66;57G8YnFDunBq#> z57I!s_7sM~DbGI6)IeM7*{vq_#YK@`I*=i&wx!(w$qIUufw!&z+5HuduwDlSpPZ5_5}ztEMo; zImBeM5YI6C4lT$eQUe-NRT$;*B*Z;@cvA!wbd5Y-B zyXFV@3dI#Fhh9mz@*U7!`mhMyA|3nYyjb6jl-ImU{Qlh@t84uChw>auIfq$UGv`f1 z#VtBA-OL&7w}zIbf5Y=;7f+o6Z)zR@&*clA(c3uJv~_46iqh)SWLv4Tcq;CYyA~)~YMLYGhukk_C{LR#{-yT|)o=kcXJk#RZ^>jst=HEyg?TpRu z<&WlxH?AhH#@CAzKX?7tGC$VZ%htaR^p&{$IyXLir2M!`6AdH}Ilc`yj{b3W9J`LQ zJpS(E)~3hOlus|o1nDHYS8?V$kVnOoIa5bFqG18|PyT(NZ)Jq0(2TgQ^+Ak|yc*ti zeYe1KQNJTboHLF*{0shjfcRyKYYSJ+Q><}uFJx|Bz`Q-#(Ld17y#2oA{R5j`=0|?G zKYZvv2PTsL+ogQJ`o-L!^rhTjCgtY8kQ;2=Ld=Q#JnqfpuYEo@*u`}L`TO6(*%ZqD zmgkmVv!=%N{FnO&8t>>IC?_5ezM2~paCQDFH%OEB%59uKSel;VJ;86fS|-NH60mq22apa)bG#mEOv?iMh7|&m{6%luzAV&(St%D;eiQT&-L! z)SL7yYld8F$=?FZn}41gyu~$z_c9hq*OIrB=T=~vB)BN^3ReTLS8^q|maBiRI=$9?H2ZCh{lRUtwak_%-m-ViTubwN*Z#z8xialZmcMh>{vJ^+t%b^Itd*u>q(7j=fj#}yTy^f>bKT<~w! z#+0Y|wP@sV#$FK@XWg7>Csw6A`J=+w2>8v{IoZ{LC$l|1JN zQO`Dh%(t8)efTv{t&>zm+BNpw@B}`+N!M8IcNB)!<>@c%;LJTfwc$N%l#7qf`x&~2 z%a5T9UMX_+-mJ6ZJBjA9ITrV$GmkJ1#iw6)YYE!NVEM(ytu=1p%=5AzD}{*hjx~50 zo_`hkN4mz!(dZ1{?lRHD1qSz7P5Ql5@QeZ&T^Qt?KxFA%6d_ zfrt2u+BIwLlym-z+4q(M4$xb-<~bO|3#Ul z#Af&G{SNWx#z>m{hue?xx?*%@n!&T;W5jWYQ|p|Qa3uXGJesww5PFBFvR;k8b-t(a z1jBrM2R*A@;X?Yzj1&H-aO-873&n^PslKO|$LV}SM}7aw>Pz-Lju#GFj`T8z|`X6I(#2DyDXYU4KjV;)^r?!0G9)6Ya@c-KIgmg5zI%Hglg+1}a z=)rM6MZVSJj`tY1{4uS~N#^}|N_b?vEl1nl_jfpo>I(6Q%1Pd(r@XwEK8cOTe_GGI zo$G;(tcrcUe_+egLvp0`4y$v~1!?I6)=z+2-$j>wM28OUh48er2GxUXr+_u`NrJXpW$b_`K$N@ecXNLCVM{etOw-- z@U4CJ)yiJ$oBbzZzhuXwHkLSfv23d6W9B^e`*Wf>c#?jB_XpENGs!}`nJ-$~6d(wR?J!9h;;iqgoqvQg9 zGo*f5x~OqgM!zi*G~AV`Y5;%rU(M9ibLV^NlWMr9zn-a0b0?Nok>XA~C?P!KdzXjc zK18l`u7UXtdqhnZuaR5~b zuR4;y2V9+X&aSL=^T;(PSemZhU};*DhR2x?O-gYmCRLl@PHd?nVIg-tNjB z|D-MViYs@Rkw@9apht3p9yauzVSJ*Xx6X}6G_Rxms7{ltBG0D$@O-})U3?8mT2$>il*_yn)|E3$T~SMh@HP;OVmkE#(K!5uP@e+hV)O5ED- z!4uDygNxWttUP+Tm!5pfwZUGr%m7G=ddI;Wtuy*TGk^qWO_8`xL*L~gKcZEldmeIvGL(c{d^ zxt8-R+ce|R+@Oi~sqT8WD>t~Ooq7B<{R25X?|6)QTt$@I!F;)B6MLRWf1T?Au3fad z<`L!>Ja=;4J-2^gJ>_=syo1a8j>G+5j~8%QEZ{oYcmxHOG~`(4Is2C2zpjsxy`Hy34nFhG)Ac&eqj_3&9~AX-=;3 zm^lDEH-~YRI$pdCSTr`jXWZkk6uR&H7QPO#lLS`-&nvm!<#|u|)&A=!j>XgRSv}sO zIMQ`7~5Cyxote%`l?HN^1`needE9P zrn!jr@M!NJahcv-ixfk(b+X_EAFr7@hjVf+J*IT*uJ(w5@9ly~WBVLu=YZe-Ru;ec z=D)o(S?4QA))XgOW9r%RZ@cm)rta*m z{3}n#ZkN9Zd#F7FuQRVx96>w|&1KVH^<`gnW&N3mQrZzN1&`!71i!!illI%eFOl;Q z+C!o95Ss59S)#1=2VjHEzE*4B?YAuaxf;8d$?h4Ep7rM;wAV`YWV56P{Jht+i;Ofg zFVu4t`CgXfFFfRY5!KTiLpDccgfsEBd_DOAK3!$B*5%vM9+N8kSMAAZaBZos@fWdm z+P7Nwf3uGwS5JKe;IIlFHP6^q!4&MFKk0{^ZmumjYM{G(N&PN}V6KUbAAc}E6`7a& z@vF?9#;^3~_|3ceVElq-ex%<({V;yP|tM&vBda~I2E=t$_r{TUfewx8m-1V#PKgMZO_>)fa^$J|tovh0!Z<6|e z*PKqw_i6XA-qP;mU3FX4C$LD~R7U>j?Ud2;!G9bucUEGM@%QS;zt7?~O3^P(?YqpaXkd_n8pX6_Z#@Ex=0yYZ`w#ZMa3;JDl#I=faN+$T*u z=N0{h2B7dyl7;`qx;rKU98^*2(l<=iYTT)VC*_;C&RWgX>vy z%b4Rc*=`@pw`&JU_L577+cxwpp7-UWw)?IBhTn+C%LgrT``a}>*=4`Z;br3!2LFsp zFdF!=13U}=>c5m{@tdBl+*-PL8{p3sm~X4OGO|hFB%eKWmUoODfgT=rq7Bd5yYJ+4 zycsL~NH>Z$!nO5j7&qx!%N8Fhc35ZX)!2ROkv2pJ(Ey*uw4pYoBQ$2w)8{MB6uMY+ z{STRYfG6*3ms8&3M)pSZQkrP0c903tL73J$ihifj)5P^tKOj%CD;R}aT`F4! z9Ti8iaa?N$Vu6EN+~J%1;l062ip z$cUajy%o2TZt*y4F#I-_Z+86q-wELRVBpZsORUbfd+s%+>&TP;plA72np@xQ%9ls_ z2Y)X+KfWEyj)A{FuL-xnl ze?R-Ca`2fcCmk$YN=K=#Y?a^`0Zz+!mOT1cwd6qbPT&VO$z*%7JS=+iU)~EWwX4bkT zAHIxyd*vl)*Uyn!kjb6jV=l&hcEix`j!!{8=ea#7hL06X3W<3q;AzHI${0$%!*4v& zp7gTTycqY%dM@Qz_^Wle61?nv?NR?)pZy&9*ckI0E6TGr+2*y8C%mW~eVbM7t3OkY z`jQvlAAO8h3}uUO$etxrE>yi_I$L{bx%a$n=XsL(bq?2(LD6V+jMZamrT7ypqZuQ&X@f!3{4bLg)nqHc@=tj;ohf90>nODAn+Y{sLqGVAzw19oK zZ?P_H?69Ld`XgWc$X~SbDjyp83EBHcOv-;Zrn{+sVCIbefzMsmKQQTv{(%>#@;m8k zhID8xH1fX@zPsL$9%YXk?H7PMuLD@e!xs|o+V6|#+1p1w%a;(Z>pA3keeAjJ2ON%6 zZmrSvtG1R>CIlZ^XOzAyb9L5GM*OAv%XzNgS#luz0sf2)l#M6@4|(8NzZ)$)6+mle z!}75y+pAok2k~1b<<7q7d~E^iE(M|Vnzc3S$=5ob>U(}J`wsD^Z?mU?Yw~y555(nd zPBbPs9@{0@ulcT=Po6tdU*qGAL`ez|)7h?IE6UoRN$0vr2U&_FDiacc6=NFBULEv5HBR|kg7rl=F6GOoEOhkr=gKP?Ut#LrOgw#WCa>n+Ovkx) zZBerAe_Kd8vm}MCPK#Dw<*H|nX!W0Lra2RyfEN=t_2$u5Q5wI#hBF@Z=#QG4(K9z^ zg4E5Ky|Z*)-um>C?bD%y)+$10+n@`6uhk=>=j1o+yi@bjQz)Ak&~`}M37sDywu_z$ zQVTMhU)AqsT%$b;&scnD?m9YiEOktKrcZ10j-(H@BY$4{>I<*g^=XX3MsR%+Q{db@_18iE2Zz9wk`GSN{gjv=3d=8n51E!}Lui_&S~1=Rect8)`1k z#B(@9AwOh`sV_gm>OQ~ksT{rOWzgDVY~+X-cFOXj-`|A(0bb5zG4B-*2Ijl;N%o~% zczk)!ABxMWyq8lipPIuJK+C)|mtcVgigyap+oKpSnH6>yC)j)t)8!_*WXe zw)-B2;doQu9uFRG_(cY9fx}yjeIn)?C?0=|Rd@;uI=goY7K2a8?ozjZM>bqIj-@>= z;e~xXKDF0q2#?kWQ9Q)Tw&LB#W(w_}_M zk*<`z6VDqPh|aR#oL-QoUXSMi!KQCSdz#BudAsBBlNg2{{XV1Yu<`#4E(9O*Mfgg* z=f^7@FP$Jh@%Z^R_+j0;Tk-!O#yA+W*MGx#NZ&7!97{e;+uX&=_N)NssWqRaZQ%!* zAs!>R23Gvu(~uyJZBuDxNq47yXD~E?t(lkDeiLn-^-1#`sQgh@|Xb66`ua2ZE zt(5VWQCEAHHLu>=E95D1<=xlzs zj_;J)Z@I^?ks-MFeBVG{g=?eIwNXiZum5XYpN~16FI^smOEmsCxDY&(#G`JmH_p*B zN?Xxl|95b)1KbOj`oWx! ziqmbw=+MFYYOWL)=QK@QYOjh5qiA$wE&Qdk+tQOY@QB{U=XOuzg3Md4oc4#+hWbX! zf^-6(hYR00==~DB^%`a5w+yfQ66w+}UXL)G{5|y{dE8U|CnJ4_^sTwYmGsv|`tWvQ ztm}A~A0$62%Zdl+U9u#<>RIH8xLmsC(9Z4@^E3H{pv1_ho+Y31aeaH=Bv0e;aZH_o zW-P^m!IYs*&hMs~>zQ^2(|M1=0U3-=*^Ew*Kk56_v-*@zsrIBBzeJzXKZ-|rI2MpL zi8PPrfY_qXyTRpYjH?-+Xc!>xzN|0TN;%ncje-8h%T7v1iT-{c!El{(GCBvn)B9WI z*yX@l4xFB*7f2zP>ltS`W%D@aQBFO@?bq|{;a8r{$4;WG=Uv5^m9F15 z^f=LY^entH&h7m!cIWej^AEzS>Un>ojB$9J`ub&&_7xu(j7O?ZGX{QNiSe#*{aRgb zZJ6IL=5g>TaDXJc7=z|R;GFfpJm@F72F#t!d=Z&e{NSp84xQTx@5>FUzRn)UdpVoO z^VF5>=jW=uCpT!T?;mI)Z3XY8E7(79HlEn&{R3<6&JB({yMLga_eFQ+1}B}_Kd|~P z&ht?&=dAvLJnFp5v!BB$rnfkZ?kkY5?qYkuDjpa8vox=V4#)gT`yH&`FTV&Gd02A* zzb?r;?58y|zCq3J)zOpj;|Avf*f;H`B!93>%=}JWVE1eBZO&IG4efgnd}$X0mXELV ze8QdaV`wXce;<4$*7uNK6NIO%taf%^&2NpG7<;e`*&6bXfQfH@*gemN9^`3Eo}a%+ zJ-_cGT90uR-%G4w>ae=PkLoIRa|d-jZ2HX)j#HX<*?yh%Ha-^5ckvscem=cxIdnXS zbCh4m4bnX8+Q{W;q&S41-$-@^b18f%`O$pe>ch@%;YM>*jbHF6Zpk+edI(1w7!+$4 z9P*K)<5fK$yYhMhU1R)8@#dwjei*4A9kc1z^D{JYb4ycSbK_Q5U-1&(ztwq=NeZ} zWwb7)-$pwUzAlfXYpz+wRcfxG-=0U;8vEeuA?s#uyUNRMsE+J}o;4oT*LTpixOH#M ziEO_U)0&HyxpICzI>t9UX>XUeJTq^^-m-_LXRolcyMl820WYV|d-Wa!r>6D&%}U~@MF|_* zPNeL*oZ>7~X{Uj_DK%q>@q=&b%%}&xdgz-%H>Q_nX7SAeeb?BZ*O(fP<(o2^>vncO z-xBN7Jl)tJCojZQ^EbJDfi;|y5?yO(yNdpE+4Jh=pwFyxWu%w>h~D#OHai&fB`hAarOYJ?#IAvp=y>w`t(p~yKsLr&goW{4ry+`%A_-LLl$5^zV z@Fdz&UmnimXsanwcRl~G-#`YtBULUlwHhy z3s(-;F3P+`+tGE-5FV8-lYSimZRC@V!ml1dx_AlvoS;0#8!6Z0*FyJXH&k9WK)SmX z94eMYTK*I8fd96L(d$N6n%^!nzHC%a#eUwQGV+ImcL(@QXe)0eQ$Af;R>w7e9+IX|2m)1S(8C(3*!L!8{@2abHvX6pyzR_TOmEklI z-o+2-fbE86%$XEB)>^~hw{(o(z`6>!(4K=>0v_aCe)2ckH!mCy`3vk4t{$TWxVg~ z&!e=2_MBN$!tVJC!<+bSuT3lAd5U|UZ1Pua%^G)8I?1<+YTf%y3GMv@HwnJ)#a-)_ z(s_Y!?Y`rsGjduRScKkaSjhDy^bgOIc&_2OiYw%K__|`0K8ic&UGZVbfzPX=ZRr!y zQ@@iCcY4yyeaZ7{hikxt{Da}T0Uj#Wnw4}yA#D%mYsrfETKf>ZoT-lZj{E!WUpUJ6 zs+v#ReGS@ctT~$hHi&#b#e6jm$+(woU*5*b&AFiQzJJMpeDWAEMzcps`bppXiL{|} zj=sFkda=d9enSvkNT*2Ve4oPEFNHIFgXr3g<}_2>dD|THmp=zI!pY(r?mUciz1o1c z^UJ+o2wz6@wRC>Amo@2p;a`01zyAOZ^M7h&L;P&_<_fQQP8UnRX-+k$i;)50j!6Ri zVD(=#U-G8!6=>ceys90258|V=dGq_UDV|XMIBiLvn>h${@ZUB-?;08j)~Ma^-!{PK zF>9BCM{Rifpm((gQ4Igd&Ip%UKh@lf{Itp_4sr3zR{tXhgL*;pFMZ2EaYV)SV`DnI zr?_(B2|tJOd@K9le?LI_L-t`FXCu6yW%azZ5B_|K*I|Dz3i{Y%J_x7e(d!}6+kT&c zZo@H^51H7#cUQyezFz9UwA7?zwEuo*<@Hm#Q;orrF zdX}%Da?x+HR1u7*M=x()z&>bn-x5spnYo_`E6YGX0^i;*lO_+ziEj6LFU9)%-uMrAYEhMh0|lf=~U9P7_(xG{AQ>5 z{;1>?x#c$pgWePL`*Nnu1!?WMl%7(a^i(-{(ns>Ig;UROI+uF?OLnjLBM;1@F_lg`5mpd4f!dO<4fVWGM|PnlijT$)+ic?M&9=QgWqn;xhXfu2ZsT7kHd$+ zVLfX&ue*8AD~H-xT7{A4J2K1Mp3rjhZsNQ>ihVu+JlcO=!L#w9c-Q`-O6Vc|rSZ?Y zIM%oPdy>hZHzZAKg$eEzpcutU}wxqXIp8ETFJEML1#C`M2D5G^p#VE}0 zdclhc*T4DQIre5{k6V)d0yrr#<5{&;-%kw_)JgFhUIp3d>@+kRw-6p)Xy6b{1e^L7 z4+_4Q;pHvf$0Mzjv{Lfd@ND*MSQu|T1>R8lL!>MIBe=Cjr}a6(sjbozG`JU3H%S z^M9ZJd6dyraT}7Mt=W#MN1f=~ta*6P<=kP!BbGlVnsT}$TW1`QP3BCpjUQobDywm6 zPg>u&7?bXay(t$?a<8P=7PK{qMKhK!$`83r_t^qPGY;a^3 zHD2LKIQcGhkR5aZcf0;wq|fv%Ptj3pHfrC}Su$}TIx8=-&l&7TL}$rSAD!`^+?dw# z=M2sHO~Rj~pYD1c2j9(Pzwj^5?LX&!<4Wdx{HOW9fmr2(1WK(O$nSYJDM!Rdte&&F zO7xrze0tA=1M#Px7w}xx`Uf^Bvg47i(;Bk!C5!V#N9a(KbHXohmx8Tl?b*^~-veYA zD2)!5ez^A+(V1SsHa~7Z&#&yIbiZ`y3p`u7f0VJ#=KmM`b>xqEHC;!R#ea32one`NE~33M?X)&?55TllN3=HQvo6mr&o0lc!$?1l z^kJm)?DFjL+(FU7lN;NS{G^6X`s=Ji9!%4ki6~(ub1Hv&*x~b892% zCy?GqI?pc8F3+t)NdF+|LrCY@<=N%AwSn}Rq&JYxv&*x~b89{6A0oY;be>(FU7lO( zNS{S|9qBx~Ji9!%){=fA>9wTu?DFjL+?pnRHtA{7d3Jerd2X#K1mzs*HKg;L=h@}C zHB|_*N>7o_Gy7bRaCvTZS?f|dM09yhE1l<7c$0619MS5K&NK6|BZwJhYtX~;TlFuR zo^IARyc$c_fi&IMc}qp~6fH%&KDxy;>!Vjpt3Eo#H0q;IOq)Ks#5C!nM@)-8I>a>S z!+(tXKD@^`@56VD>pnckIPSx5jN4XlDZIM;3a1vI7MB)}7KeTK19!ri_UnHWd9r#! zu^bnjmqq>_`?+M1b9~(QB>O+w*zx`6LGmdym-r54WgjB_(7*RS*{;o#Y)3NZjL{wJ zsxLq4KGUCMyrcTn_*s697e7lc-^rO^mDfD&w)k1=Cct6Rw^7#WhQHJ|b1yR|u=-hJ zv~w=`^XfhK6dFePjlYrjJ3&qBExmF58B_ zE2w#`^{qAL2tL{W=z9dx-2-W2_e07~B*&=pTE?c(cUe9r{3pFHJ0jbuJmFrxr{+5E zGq~-K1z5hq2aoK9<~T9m%1-Ib?O^bT?~AdG(zoW**c>N|?PqT70*B&nf8e{CGvkp`iKse|9NIVQsl>4p10$_;RfzM;lIHDU*b<1$Gq`gV2N}_B+qu-O1u03u>9fn2oK^lt4nSg0E^C> zzZ+O|j$3U>m)fygSd=!eQRh+NIo2>m3*lX~(D`)R&p#cS9nW~IepnZmjd(ngX=2Y)Y+P16p*&Zy2#ueAwL2kh7hdg~ zI}a?YzNGu4<1{9h`H%3RGxt&dQLMs_^U)9N>z#2e@nQb=Uehnv+Sj4B?AdSC9ct*@ zudijJ6eF>GM42(4jp?R%9M63;uw`_{6Mi7p;DmScyHw9G`i^`9 zTNZlu$@4bRlJP1}x?6D$TTVQo_ahAcqPf_28v1#{+>HWHs7!Or?~;GGS4&k zjh@9LrvF4)*#hCjroYuz$8{5SfVhm|n*+gmVhrnvz?!t9|LfTOztLuFb}TF66H6P( zsPH9zQXJ(-Lu=;x34G$OfHG_^`J#b%UpTP78M=Ln+E<-Or|vH+Mt0~&a&n+@>YIEc zC+b6TqW<)KQjN>PC!QD>8@GDP>L2TKyj$Ppx6cjE|L@j!$r=4bOqw~pC-?*qeHBH5l`yADNE<9 z(IHV>UigXZZAaHsGlpoKwvOnXT({GH+6UvYqAv|y%^Wc?)`7A|9PuqSS>Lm#WZdI^FPUbtNdTfe-akqOt@J7E^x8%9pK`Ox535Uj~?^t=$#wc zoAc4iqcQ9n(~PVWKK;+`9(z~h4cTaJd99gO@}2w6=&auress6?<@xAt?TZW1`S)Tm zx?6ii>8@&h=@$Q-@?F)2GIw;8xm&B$S{+>^=3Qv5X7jDpi9YvdcXDSl-)72X!-X~Zy168+y;7%0hJp}MyhR_rFUx+jPD9B&I|09%3e*3`EhT^Qr*txaz zf3!B<5~pP{(cPA24*FcQHvW5rc)^AB&c*y&8cj~vUg<{Le&pl$@Y+y#NNc8tRok}1 zfAKn${iaWvZxZQ_7U^(X7n&oxveVJI{%fiCi@tsRx`%8>^+^29$K$lYVi7Lq>(%## z^!=rHt>-t?8ED@HXDTkcjIyB2_+@sTMr+VkXJUk>ERF4o?7Z}?Y+owY5qb7Z51c2B zXDn*d%8+!wZ4aAh^i7`}#qDTqw|iS;OQ_rDD@b0Y`_&fCr;*=ed$2*|e+Vx6vARpP zBJve>RKEy}vLP1Nw2wZCbhFl})xO651KLlP6P{{fJn2k>^45b-dpD`($F@zCO~+-S zrNOCaA%D@z`=|-~dRJV|eP#Sl<^Rc1`+6UkxUYBXrTx66IhYsoq3TQ5X-!o7)<@N{ z{t)MBe9ANZlQ)dKDAu)qdIRYeUMo+wj!WL;KAzvd-k*s7Y7d2Y@({||F^n6#uXjhw zzTW5gZyVolquvf~f~W_Cn@RbgaKA|Uf#Ci*c?W{~S@JC0ukvi+o(jCxk^6cN<#%_s zA8zegw|hKQFNyE(Qva6+!1u%I2c8x0C+YAY>HYNEKkr`hEd6AUll8t#p4}tyJ)W&B z?lsT-yo>L_dFDumiQf;4WoDp#h9CE{#RF~c%i!V|{x9Tz1o-&qq5I?H(F5?{wZL;* zoF^EQWB5Dr#DfEQ^D@$lah-wqDw3D)>udk^&Lz*%@_L?I;&K*G;}d-OGO|>A2=pDn zSonYQ@c!|<9dG7o*V1Gl+#e!uFqxW8`hj?JDtQOu%}M0h@tn-F<<0K_`)R}X^?siJ z4im&( zJKllx+e@Au??CP?|LRaL5BaVd;|V}S!CZO|IhI6@xPOQ>odr&vi8uzCp)L}{IZqSKR5BOcy29Q zDf<`ULbk1=>qTHuS~TzJw|C>fb;_gY2c5voQvsa`~M#yU*&)u`JGA(m6cgZH? zhdh@&o!i|{u1No%-KrDyq4_!Ju(|3ZAn0ljWzdPNuy^8@DFTgEH3;PiWub zRK~J!f>`%{I(7&jK@2EW;xD|~~x zatCx2e}N~nA5G^_tnZe}T<~M?1`o=&w|7cyKX_m7#Ru){ zZC}P2&|&*}i)Yt6AH^0uv%Fva?RtV_N_t>})i2P&$hY(sd+nj^erp2ClTL7BKUuPO z_?C$7OilJ%6Oc|wmX*!WTvT|I&ai&;OT0&L?C+lzR@6H)_;28UFuj$D@Q%M7jR6{P zR=^3rflOHaW##Y)=%YThhI+5^%{ZeqEv0#Jn(?727wNc2-!m@Q%j&o&4gbcpr*;&l z#}8r*QN3-Z|ET=t__@RUMtW7_S6Qp??AR1TeRNkef0bR1?&c!SnT!11CV2ASHtYKW z(uL954se(`*5A;-eB)^ETshu98tp&c!5kr32Vcz8Da3Uo$LG@?ck-FLKTI2mdollp zJY>Mcm}HNoD|9zgiaqw9Wv-j@!Np3po=IAHvD*m$qzfy)I?oQI6wAvrNIRA5%D z@GSCGzArWk3_5G9awe9j_!;jia~3eS0Egs5`uGgqYvVC!P9=UDIH!6QU(eEW;QCcV zF0x^2Pkx-_YbfJQ&b#4sV&VPvLwuHuu}L3)jJ_o=md8z7v5h9~IYo24ejCkrkVD~f z2Dlc^kU{2#(cC%OR}z&maM8BqPsykHVB%`z)zS#O8s4{ORA-~tv?trFMHrdax>*#D z2S=u_hbc2y{?Wjz{Xf4l`B4nwT;Mcgvimyt&R!(D;EXiIP*WyNae*jKX!VhW5j$>R zwC|E((|=-}Np0FOK!^Bj%v;<13bs^bLHd5{O$VZL)K8=*M9(CBMb9LCMbA&f^pvca zc&T_J#$V)fMf8-ewc`{$J2yr2R2$I5&`tG4H?<|7YhZiG@f6d)a4bGd@LFSxBkAp^ zua2&Z;<1Y!!n|b_PHpbA6dX^7ls|FiKaz2npM{T_2%s{ELLA8=Q^7cb#95DH}d=cijO{P`gr$zbOrVPT6{Fq`nT9h#R;Wbt*)8O zvyGeB{(erH#m9r`eZ5Z(?YE&;zO=`umi6OWa}S7FS8iO8m;=jZ+dXG1&>uPLQ;!#K zoJ_nzIKeKVztpenxoG$dWo_TjE#e*({yX`%x?TN09=9c(+T>{cF4FDzQDzT;K=0GrZzp^36{Y=@D zeVqwje6zlpM_N{A=H~hXzgL%}NjB2OePfgUc{gX-C&zfV`!WXO-4DmS%lT6i=M`?= zxCIy)lku}-ey{^fJ0*@eTNnanL7++s7S)coA z=Kk`{6>qU~2gO`2pbyOp)Q;teixNDcexmQ9A6C6E(T0_EwQ*()=P~A6!kV}3Ki?co zCI*{hYAv+Ij5XpZi&K0n?7D0&Jg$7v7<+5yg|?jVsB+4eJ?7om>iQUef?51H*c>s* z8@4@gK4w%*L(AtzR$?ELx#<-3H*ij{hwpJlG7!= zXUD6%j`f1O59f|(d`u_P4i87Lnfh7g0GUNT@upO6-4hLSz`+~SI7d$*dk@j$ndfMK zk@?QAo6{crXrAV9e$er~uq7{^^VDCD)48=;>w|~fyh=9T&OWgv+Q0K+Q=z5WQf#TZ zh1LEcsbwMORvTT%{El%-+@Dlb{pA7v{VbO1YX zRU10ns=B8{JSQAC6^?mT@TK7~&AW8Ja5|T%y1TY?v68PlTl}VuC%RL^cNyBG+)nem z_pD=H9lLX5Wi~iobb3!$sslehduO$#$eDriomIC4?h3;5(olCUvG25Ta)J9nOT>uz zUG_i^?Vo4jr;|Il=by5RcPnj0IDxVaPIQk8g3#mS9LDMe{`C1_eTFu(URY)gV=_8S z_b5(Uu& z96k1v8g#z1BtTw?Ing(=GFJO4Zh4}6BJ!s(4#|2!UCs+WLLUu0XLE;DAKZ|5he$FFx~E9+F?OnsI47SJbO>YCiKqq^4j zg0bX(ZKX%7lX~k@#D{lOzdp_jR%cp+o9NdqdZC+rGAtL?2HVHH)a5!q4Y!~7QrG57 z*7}=@U-LJOTJ3Mnaz;LLtM3%Q%!SRf3)Q`O2(CMw<=hKzrFZ6;BP4gBV z`n5HTX?exzbe4RmgEIGdizI9M4vf>`EQ$6|EDzQ08Sv2@=hNXw=}&Erx zuCIRbxwX*kvP!meYvlxF{m_xE!NJqL;NU}OGv_SHmWmZOvn4no2Ocuc$U{H4D#(@! zl@EEq@g#Bo*!*}{cpjmkmh$8zp-_I30F(~P-lZ7}wF z4{x?3NSEZ(Ik*zb;HR~m9UWRdsn!cdxaDBhNZPIwZk7U*vjjgbQ0)IC`W*3UIhdSb z9p`0l(Qy;hE@iUL682UhYhF0IlfAtjdQR^i^zTG}knVJNVHW<-{tMw(`V0MSbej4$ zdM4|I>c{9h>gwFI>ImNx^Vo%A)=!;Zhd+FFAU}UH{5~wV#ZMK#4!?ZK@SoH9nD2Ca ztK#JDs|co=(#L$YJtY66KRJt!+5!D@Pgk1!#s1VR-vsM?0lspkOZMwKJU5;5T-UiK zETKnh%h(s^_Tbad1N4Or;GMFKBUfIj-=0I8uY{)M%3Sy$PnCdG`P@PG`_UFa_A}Gb`p5YJ4@>F?ZHX#rpg|Enee^b;Ib~eIdHSi$Q1b> z8ZR6#4@XXF3l2R>Wfnm%=v-mb{=j<%QEm`e8cj1@)AjEEsr}4w}Ss7ypqyhaV2cmc-?5*Zl*uQR~o7 z(F=a0Hpz3T$9MwuKZ?3mZz*luDKB-2=f*id?sD_a$n>N#c(^*o!xD65gol@C>r`me zykf?t#CgipQz4!rQb)mh)^e2q1!+k>^}XqB61=)594o!_y?UK#M)(_1h6 zv;>mLuQ@vdcYK+7O}cV1u(>()eGPqApuyS1)IAqH?{;b&1#A%gXk7oydoAMiVCq`B94!@8|tzv{ME?rS3k8TSbG)x>W}wRm#hiB zFRTq#O+!yoW)6KYcUqq>TvW-HudZbCmsFcb zGFw<&Ie6x(;DqD7MJHTDySY_?Q*@T3Can$2E@hlA=zFj)I?{X2eZHH{glEy02XB5n zOFs8wxt$v-HMzFnH29agp_?sK_Fj2vSeN}d@f>GXYHh3ZaUh>Yys`I-oT<;ATdB+b z%D>j+uQUFR&C4JQ(m`3~;D;Q0q>b{)mFch01~eXZ&dtHdW!|EZGw}tRg6Rv`hfgkDRB0%ljDNAXQrEfApZ;mq z&+FC(BUAKu0`SsT>O!~DdkDC~*TObE)?2s6*sS`t@_x7wKj35UcKs_VbMhOo|DUQ% z{{ZbB)fOBMFE*8y`_q?uK@MF#F7u^IX6fo6C!BP8;mAew*TOS4wKlWK_yxi%_pBM5 zeRa4Ol)u~>Jm>yrSAC(TB0s!oeOYwelS20r|HqFngwr%$em}@qUFUZ6&;8i@vjuM} z^3+N`=|d_*Iy!u(e1GLw_@FL3!B1s7bk1Xpo0|;Y-tVWTO{mm#Y!*B<)E)lDxKnE+ zk4e5DJ#@<0&jhd3Wv9~C?ZKy*SJcqvVZhVG`$YM=@YTRJ4jw1|IXkSS4cBo-er1Gs z#Pz{*PcmKdSM}#~3~}6YgOf~;&br=ZzGd>phwEX* zXkMr}hhhNuBf*lMo~{*l^mMtI3BFr8tx{8j=XQF*pHFqdH1nd=aJMoJKX=??_^kOe zH^2F5hbwBAZbGNclD$k_^{mlDdv7^4Y{>tsf4*eqkPV^kLzEvbf2Yye*zMl8CcGQJ zdiuwk!|W|yu>QKWLHd$Y!}{V!{Gr3w1@5kF4^Y+{Y_hPFUlAbq?Q?M&`F% z*6usSi~nL;+a;3ud6g}fsyuDYyU%y>_yxsRDqH8R<=G3jUvrP|psP~&mQMcZ%9by- z`dhDG?W@17hn?crVbK5EVV5I@QlE&{!7a#J6WlrYARQ@X0M?YIekd3Wf$LzjkA@GE+mpSWY2LvG0Sd&ixHJS`25M_$CQIzPAp z8m`SQ_P^F#hVS4l=S5#S6(5&wT%mbrDc z_HZ!An(FiIMf7Cr@CXK{Msi}yZa{yU@r-LlkD#kE@py9Bm@%v2ca7;QjA=<|#>2QY z4mZ=*2WvxaD*bH^ehr`3(=P(h*u*>g3NXuFcvRzg;A&%4`wTK?NtVtJHQzt%Ec9sU z;>yt6e7?POaV3XdJM3F)gQH7JE1B=0i}BZ`i{*1SF$O2=gl`;ssu`=B^@iiC^uxfM zEr|osW-9%PW+S*$MY74AAC-gWsLzjAhUVv0j-$VWFIIm`D%|P8oxk%cBk*HKOr+l| zHUOAEj1Dhx=L&p|4M%o@U7?e|v9kAqQ{fFS9G3qVAH>c|&3!6#idR?Cg@5tsb5@hn z77Q!42X)L9{6JtmV69L0a(rwPW*6K;TJx0>{+Qr5iGU&Ul20~mb_qD9n=-Kq8ovwIMV0W z!17Mv5DfnX3~$J$aYvZr(?9-=lfAmK_w`f5CVUsgNt`TvEyo2EPHqy{LS+R(s9dQzLMZVkYAbUyl3z9w-Ljc@O?`HFA#z2~Mc4BPZ=X7bZn zY$tJ&FYul*wA31fR|7wqdts;#Zxv5A@WPjevp=t}pe5?QVb1yuw44<0r^<)_=~bL^`Bi0|9rOMX0XNhyUgpRpgStshvSykR&2fBVIO+y zTKjB+#j{(ytTG2(_}{&~>$BNQE3@z)v*l%#IUQ>&Eyuz8S?(B{2`=ByTy^KI=zeEO zjgN1b^Wo$3!}{Dje++p_pLGo}xX-K&#(a*LjGgm)HwQTrS2;f%_5QYC2JL5#7B8QJ zUZ5>>kWFh)&-^%54vue#^GnEMVr?^$t^$mg)>!$F~K;Q)XY=tlEJ4 zB3&1KaC2bd!ss+&YoV+8#?MX-%SNyL+#d$7d*~Z?(#@4tw;YTc`chZQF9*rGE^WD~ z95!KCYM5Vkl6k$yHixetH6MB|2{_}ue!?v9=X>GLo0o*Y>FvFvbsl|oN@o(U$!KDYwKQ zF=RjhRqm2R8Qb2qv}4hf0RK+$+)7wQ?0)A6-Rk8B0-GM9IUl!)0;M&tNsqF+l7bAHgY3y&0->l%Ey;A@d= z`54s|zSMp#?SBT??DMbxCovA@A&l`w@!h1xj|H-slg2Q2MQ^0sAClJ*Y$45ct_!jA zy9c(*+^CQC|ISz~eq8)4&2i*cz#E}-9RAQ4^UQo(dM#Ct-UIeO;U7Yy9#e<1Y4S#~ z-?aU7;t;iqP5xJjE7*3pOSn;E$}<1edijjytAdu3;UDTsw~jy`%67V$v6Xv{UG1j| zt26Lo*g`xb^${;z4Nkr`t}R$g8)~zrsCY!1&pz(p9`II^nd_yqtGwlN{I0aO-fP7V zxX1Yz^A~u#M4h}Bphw3%Sf)Lfadz_G!GE5)uxt$SEV#G}85bWrU-H7bJU&dxndds! zqWgL8xXE`5&QrCedcQu~#yn}B-%8(K9aj$S<98K(-N&3A9n~lq(YzxCd|l8$?f)mT zs`n6FXq~pk*%Tb1vUw*wcS3VG?xebK?^*N0H;z~s-a9Q1Ex>DrZx2K~YH)m2%x~D8 z)0r#x*BiHgJ&mgX&$!NK6eDOvUq}AVoOxl9HZ*sKZ}3?^6Q=SvRa|d9BINW8$z#VD z%lfm*K|`Y#>}K8U&AM`>X0gsoM*P{wFW&kJW9l*UdvK*OSzf;f-4n@>6P691rwyOW zcThVTSD6@|eos!U2PvLvc$=}`!##!A%*K0$!dr&7eQ?qeFgb&_uN9vf-c}nrlZsy! z{3OwqOB<=$7UnvNLBP-KJ8q|pOPX||cvxdreevN+@;4v$QdgEW0GGQiQjN%h`|ntyg%v%2{uhJ8Dfhkv7_(b(?dc?_SdS zLYw35nwsSs5J#@5l(Azu_E48I7m>H-=lU*5IYT}2^xeZ&@>`vY{M4muUYJcDJhm%! zR8%i->ybC#)+290vfgEDURY@BO^)gnY(4Up+Ir-jldO07nip2sdhd_w6`3~~I8C0O zfs?#*SR3hs^Al=Q@4z|5_7~S9Zy|Z69(fCr^**UF#PyE0!K< zed+Gh)L7M96qL*0ne@q&E0=+>X-#DvXJpnDihk;fH7`t5xcE9MCi)&ucU9VZv+kJ1>&7|MlS@iRyYke_8`puo*t_`G5&JC?@$QAu8 zdD<7#K;DL&w`)drV|5kztFlEuL!S1)r^#ED^>($9uC?B_Own&3FI8GoX#=m?tJ8{~ z*y>#7r%BfsCwJIAG;1oUOFa00Bj!j;hLvi50sl><2v@w&k z89a->*3w^|cYQAgo-*$Xc%QHQ_`SgULgil;<)6d*Qsu|*MQr5?<--SR?;P_^>>z$G z@V=B7Mhbc>e*y0cd3WRYJn!?B4_%c%llR%mkKc2=&rm)zRQ_b%wT3I2>V2Z}pqFT> zbvV&ZbkyCpqL*l=GX|oO=%sx|qKRmv{Y|2Q=%GDM!ntUmJuMNQ^<6yegQE}fTFFB% zJmvE2WU*O{TfFY&y#RaISR9cK->hYh`@VeGiN1Mrlo!t8S9U{l@nk+W(&QgUzI3kZ z)ID5REbpYByW7!g|Ac;>Ge6R;RXN>^>kO@TEel&-zcQ#R;L{a8l``cV#d@>;b4Pb|IXTLm zsB()#txcvnDA#crwuQCbBK(UUZ780}JL@u;Q&Fv_`7IwouZCXOej#!9xw4yQ>l?O> z@-4>kOw3??Hg`!yHu;26k*+?tW_fVJsJ7v%<2pCUhQC;s+sYfr_$@33(lO!nw-^UeWABmF~M`C@AWN~7i%)Ce_#jX1Kal4|GV4GE%W8auB^Ex zbB@p6;OkxUE%CVrKRN|JEETJ- z{F%N|&UjyRPvyMeD}U>4dj2O}UDp?@+f&3f#0#SH3wxk>f%t>7Y|By1txR0)!Y9Kr zF|}Ipso~4EP;;`>_+6w~-fT5#DkI)hdaYAt$WFwYlvmn`z^*Z{cE#`Fa52BB+GU|= z;fS9K;-|UC4JllBocQDJq00lUqo~am$~0u>R%$uVG^8-M(t^L48r2?7$Ja_vBIY;o zWZz{CZ1cryeTTX9_9NE#ce7_|O}4Est*LaTU(enpwY~YsME?9WzLUSSvUy^=snbp! z+6r}^YE`~H*gU_@KcZ>5Sp(XXs>g3UrBXBdGQSLch(A5hdu4ODGQYaAvY3YtuOmjU zxb#xd_NEty zz3C-esJBY-wRc+Has0UaPTJoY=-%srZM`l3btd=^UhCt(pTzukMnC^e75^2{n~UJT zD_LV?Z^l6Ws~f<7DpLcm2siccme#opekoUH(vhb`I{X=;I}HsaYt$QglHtj-Bc6m$ z3{RF~o`gs59=a?T`N4=M$HqMAW=?@8TPtIqWPHQnn@Ov}rk2)VSgzf#C4Rl>h*p0k zYbmMP;queR+|AymR^mi$&`EJr^wtB?*{q?x*t-&5D&^Tz=K8A+Yc=x9+1{YeHGA+d zO=a^+Z@0@RZI!XyWg0-_L`IejZHp^BVo|zl;Cp`49O2 z-D{?wUrwrbJ{^zo8}YB}42QR!F~op+oPc{H=az{vc)^RBYiC4zbZfvFfVmyj_nqSQ zp#Qa&aEIkP#llkAO}@*zu#w2=bn3h zzMFr3qT9)Tu9N3Zp7o#gYn-tm`hJYxypc6V_PbG+@iZzfHa3@PtNH29V6XGgWT)n# z1$!NjUvEL$85yR%O)Y7!+@kigj1@gSYj5*I-Fn{J>~#a*6s_%#b4G|C8&iX{lLEeU zZqvkv^UvqgxiY_VKU2W1UQNcA^l~8Fi-Od+ovTtb3&M<(E~CSj&`i zD66rY&hKc}=ch4-27V9WH{H4E0TeX@3Llx6}TYXn!m1KS=xc#O;GW!JqB?R7G?b9^t`R z>9@f%YiCo2eQa==`OQ1J+m0nK3-9KhpXhWxKgrF)yU^9??3^@~^$*J%4eXn4cFMlP z9^wqV^602~=X@*A@vnHO+3YR$0_l`;C;PDR>6;4?f9d}J$C~SZt8(Y>UHM=7l@H81 zE%S*b#JAgOZJNs6)y&!_>6@C{T-V9fCXsaLG3GU=ab9qwKS{&R`t&rl3LEB*VZ}h4bh%;owe!Tv%dLz?w&#o+yZP4 zelly)J(-2A-l2q6#kx*10J45 zU&hztWWDe-?uB+T)XDbunc46}!*%rebc#EU<9*{>>1Sap{d8dC@{Fz2j_*!?&TUo2 zk%;kiKkMAuJ-V>9I+i{1KVvUOmNKIYeD|euWAzsDX0Y#lCi%0;S3ClrE0|p(EzX8Ub3Tma_5>#UELksIe!iNl-MI%*il{Qw0CE6 zjH}qrdwaF6ZdG@-qaEAx{lbIeODE$0uyS33W&^Z}iJ>E{{t zGt_dv`G7oAOWSWa`?sxo1x365vlZ;~r|spY?Px86^AyZqu%VfUWV0HlSi-Ym&E04K+D0GHLXZxN+UbT)Wc@<7P;baiG_$KXoy>L?~4a$@9$`@gZ5nL)CumM zoZ|(zKH&CPntRe0&^!gb4Xv}H5&TB^AJ1dV@Pc$7I&h5eEx++{2qKo;;SS-slEHu2Yn~|pe@tKX9n~^-+g_2kUpOIbAKPwuh%jT!E5?( zfGx)Lg~CnV57h@g{pm^Bk#0YKPQUEHzdf82kgps*>ijx*#Q7n9F?Z-IO>H7m8gCMo z7GSv;Sgd}K92Jpa`MgGkvz(6tr;4+TMgQvl0ey2#yevL`nqS?ODSU{xGkM~+Infzd zkm-Bpi}%zGUTJmpM=QPHsX54hYD0IL^q=yaCf)IoI|~;) zu*$%t_9M8s!!$+wrr|l@S}_2w_73z4earq0>5xvjox9KV{d1==?{4na*he$}Y~T!5 zCa-%{u!o(k=4w?;aaK1ci<*nvm=GUs63 zc}*pgS;PG}YoL>SDETh7@74Gat6$AHt1DT`%!Dp~uk~U_?M1R_d6ju_!zX~{kw5kG zs^q35UN!wKjri4p*PNmFqE4f9m3XjOG6X#E@R+3h$o72kq<;B%r?%%*;RHCW-rrBp zy^Y>%Jb<2?PJ8$N_W{~_8@>6buS>RMXZF{_<7n@k1GV=y@a;Q5dqZij;Xv)Z4ScU1 zpuImn>h5{;jeb71dVDY)`?I&vv0RG@UHkR8@GEN zJ*Ti0A7^W%>+{@6UD#Ot;RWoggR9VWaX~krpf=AOFzH=ul{jKKi?4R=cMmo=Wv?4 zX?`NVRC71$Uu~1z6cpzx3E#Ufg08~7^p*6d@l74(bNJ`@$f);z8A$pv?*pC+(tizzhXZZ9H5^s&``!ssE0iE13Ko_q+QTThS9JSEzQuxe)7BRYTYVEpjj`1@bKG&n!_^i!fA%MT0CMIO3n;2d#kJeSd& zMZURoo#Izb{Qi*N4EW0yh<#t<7@Qk$3s~!ofAtP3UEmvWc+`^s}??!q0vf zxEluGXGLD~FuwaP8)f{qp;bfsI0h*DVPhJd&5QxqwC~0Ey=E?`*a`b-3!=$e<8RyO zv+(+0Ga7^I)i(a*1b+fS3X^Y>QHrH#D!sej&%O*3=qd(58Hep{FH_b#H3 zYw5$&X|E*#^Lw@Bx9?y)30r>UiO}Lb*zzgB)A=9$BDb`rtnX+<{bG=im6UU|K@;gW^qULOrwYCBQf9A9P>F~ zD+1d)8T-HUy>BC*?_%ta(*84h{@;zgJBI1KjC~WZT?}kicgkP99NjrsTx$A17&$d@ zDe1pD#dN2!hMIl2qPWzzXZ7=;;<@72$o{Q{A0@+YjYVBW|IhaH<3@Z}gfGmxu6QC3 zz3^9u(taty7xGt={2*SCZL<7eY!k7C&q%i@RxyC)OXzo@`c1^5{(sLEw;Mh5)|k&! zV0-S>ez_M-&M|gjf4gbyLSpV>?V8qzgd4?jO^i;oz$QE=J{d$;XXtMx{q0S}d)j3` z(P0Obq?gYQ&0HocUt*}SiS?X0yk77d`90h0yOGX`;y-JN|Ewkcv-U5y8$9!rNHA{x<2ab3*paaSr=7?v`3taoOL}AMM(Pj`C3mxxI$$edBB^Y#1{;u9VGw{{OJ;z#8qEeYzv2{q z_H!-@hi1Hytn-YzxVWCSI#^pytdY)L-D7QQ5}xf{rsP!jobq-{##bZ8UAn!T-%>SziNB zI#crct?r&{c*cLN2nP8~U&`K9)wvvfoB27-2N|pQMY!0){lMDu_d)i?e}OSEj!Y_l-s%AJzn$RzlCkkBu^h5;;}_hTpnJD`0w^n#3F{zgE!iXthdo` zbr$#2#dy}b=r+cCoOqyU*1kh?{=->QWsk*+zh4x6gRG&n+R$h%G*1)172Y{V+Bk76 z{qju>y=xEa#L-@GRp#|qY#Kb-II(5_H0>$AR%Pa@-cqvS-VAO#OO(_r#dAh&c;Z6%;4^?(;xfU#@yPxdcXd<)t|GYdKKSA;Lebqr(eduVvWt} zIdfK*Sl3i|q9}eVK=+bjpnNYhzmB*V@EG2>tvZQ)FOw$n&%PbOKE&-{J5XWdk@CSf z4|;^b2YK0z)h|;=W0Aa!z#mb0=z6-^dn#^EX|>cPEi6;+qL?0ObfDG{h71=UMfdiZ zXM9M>vC<||?=o=sS!j*kF!C*0OAhS*bCvy=$|U%fbB2vqC*Ggq{W`s0sJ@nk1={_I za7Nu^TY}LY>xCZ@jL23RIUF*)+@*7)7A9n|@w27ZU#TCy$-rG7k4^QCPSzXW(N#aZ z&D7PnZ29TbzuJ5KmBvBpA2zW5_|097v;hse>iKWvU-%RalH(Z*3~td2ih>_J4WXSh zap)oRVfuyEqFtlAsjKcP#>2QJ$1T|Bdg_T6rxw<{Jc|2Lg%7=-44+1Pej9vOu5^rB zM;nVm_5h7h9p$yK_B4q);4RR1xRSVRVU5qu6;)O`ZnPu&gP+M5s56{?Mt<+j-osR1 zb0?2A5X-X*@x#QYkC<=cs7^;!dsLkIZQU)Un<`g8o68xaY;s1nm0#%-v!4&Clpixk5M* zzmESA+Sk&AU7d)nU*X(DpSM>hm!mS^at!{1#)MqJr#b%7dynJ8yN+-0<|%$aJ;8zg zGBQ#Fp68>x3$=b%rf?JIac-(UgN??os7yr1%TBLct2T5`ne^v*p=3$pydGVPO!hPY zhv3w{UY$YFv-YM=E!>38RDQHS3R~GY7C*s-hSF1#TiNN$*X#Q`QN2HiwxnM`+u{MY z@jUHQdO-aSrTz@;iD1#XHQ!*szl-_?jz-;YrSZ!q{bHxP$L#To^R>RLvn@xtP!01!N|`NZDzw0lCv?;#=)NI zE=g>GkMC1s?asjuyL)!NROk2*q5SJQC;OW^=MjGM{9E5&GNF66<^Mt7$6U_zH976@ za(iX(f+xEFyvgmcw)IcQnOBOwA=*$}YkYxE;y;Rx^9`SG6%4xDlQ;qEN38MAor+I@ z&R8_LtIPaO?CO$z*R%N49bfK>WL&l^kBrNXMKUf~R^Ftzj~8P(7hM1SkoE*euo{_< z%P8+TU@o#2Wc{M)@cSQr#63(yS+CCAwK~;QZiBDO?9W~;AL;jsQ^f0Z8zl4KWHvJ2 z!X7TsAkr6+?57K?iQ!jd@y}bz+p43zZQOCVt$H#0FS5`<{1lC6OR(u!?i64@f&4$M z^C%r2-d)4E_LjV`m9|skecE?kKaza#&)RjAC)(P)(ezbhP2aRTZcEUSX!ja$6}Q`H z>%P-=RZlc9-xkFuziV7auz5}yn3u%oU81%n%QNuZ_lNlwcrAjv7Pv>p`TcN5`O?o% zaG!AlV~}2G%4{7a9mX%Qa`sOE|BCDyIJAv@duu9JPY?FLGCd9&PX>%NwgFYQ9@UzIk3?{i91Wj-hGvJ-XNq^!a(wxqIE$Z2ST4-W$%|&<{SuK8Am1C4hCM zrTov}|5yK3=iJY~<+Z8s+KtylzEG+IAI6LKS|B&FGrjP%^d7qFbj|HFhpT0L@MqXq z;I|BqNN*I^aDU&~75OsqV})n*Yop8lq-%J7RPvbLUgf-$Ie&cW3H#3z$nQ~GYFD^M z-!>8xXP$_!Y+~;%CPI$+O^RO$60_H9sp>lF{BxT=%+C_v7BfOn-XIXPp&}53@(sW#b0GR z-7L6)H@;JbS7=k~#@sR3_$lT^*vH1t^Q$}jW>H_bc2B^ZGu# z)KOP$SiCf<9mP|SKL>o)#$#NW7$1CP#)rN%<9mVe36B~dc<&jS_rjZx$5!Uc(8&)c z8=HmP5*Hk}clM+B2@8l+J&?(BwSNQn#m9o}Kd==; zz`Kd*Xs(|3(MQm+Pmg=9-5>Z2WszlkS9mb%g?~dIy0_te_+lMAvH)PM4%fUkUErSg zyzb{cTl1pF91qyak}3W>h(%qv-fwXJ(cn`$`cCK}eZ$;(jP7z54TKB9ul`i;y7-=! z*P$Z>g2u!p%4@1_hc{Ap9jP93ZfJO}&hj?CFhk#Iqi!7g>6@DZllUar{>S3B83y&w z4mlsfFYR;JZht*5w9odt^y?VwBV}YEuX(%dAbTs314GBs*6Me$*?QhA{9(i7Q|cU# z+Llk4&OBc7-mH?CYLci@5-D<#hcw&8+DshL$F-|DDJ@)>&;g%ft z%`=Ys{-bx#wQcZsLGavJb%ASqBlqHG?yUY1-_u)J_J5Sm`)}4eVTQZK8YnaTv;BAe z{>y8~Y1s*Xl*OmJWnP&2>^fict@bQ?Wm7?AM0;xZ+UmH zOJ(Bv-;bYF_j^3Q;q2~sI$y# z{gE2exU#>?V@!@0cKq#2UDE5j3Y?K(T#~s9sPpD2+%NOFc3<}AeBRfe(e6u6ox`(m zr8*}+9+h_r>#B=MSJ?u;DtD%SiM?3qc#>-E@Jz-7Na9y{bOd z-)Q<0@2(PFNSAM>Z{Vm;yZ81R^ttnA{e7B!Rq?o+ig%iMq+r$edXr;L7dKY1>w9#^ zvutg%vxT*}35%J#>1^rA4fNNPQJ>U}$7%cQpwF3!J~hq-8Yi?%4Kmip=ttkjP^^`< z7cAH@9`ttK8jtpVO1I3$ zf2-l_vh^8~^GeyYAMb&ld1Su?kJFy?#M7jS=jv!%{-yZI=6xZdGnAjqd*7=@R|GC? z$o^@~<00D8yY4{}ynEqW^?OC6ONawRagnG_6dU1uRppya&K|9+tB;o z)K5V%gh)Tu({5^bmV5pg1AYBZcLWFeyrOoCoabt(IbZwaHoE-N5=x6YDEu z9r(n==iWLmt1zcq@y~V6{Ac_1cni2~k`G%3|G<$d;#YgJO?Q|%p{2>)dx4__ju?aV z*zbPJJHOKNQ^-5y0Jxb1ZjKawI-xZvWfAlff(P`MViKm1^@Q=jr zg7uI{&qVR&B5AOEvhk$+RTG(XjRRkEycN#7kxIg9JFX{Vu#PR&9S^rdhs z_@ocM4Gu&z(Q9BE|DZhTsQml{EVk}_aHf^=fE@1 z#dMrTTk?sl-?eBK-%9A@evApCmEtK&bQkcnlrzuILOW|%d5?PZ-}<#1<1a1wX)tFGe|x$`$VOP%yh4Q(BCgvVI( zvA*lz`>wUnKH~+)zYZQs5q=cA>rCkE55##ccN0oiewRGq^+9;p;df5j3m+j({+Zg4 zPc|eo-}q$@3#XKk?jIVpX?S6qo}+P}E*_iH;)R#t!=>O=`<|iS)8W&L`4``*jdigN zR2k8;UGF*ig(nOT)4#^|d?YLT$>B!9GGnM>FQ)EU$cx(4m{kAiLF$yLbNl2W;W0DP zef2&vd2ZMa|J%C%I7q##sdtU~10M05?&11XH2&Zd@vnUMDU4ZTvt^FdJMa_}WyX^x z7{ABwzdjh@{7=x$DXpt6qg>Lzcd~w{`1f-saxYfad5X5emBg5dIm}wgz2jLEOM}OnR}^S%5J0VrlH(B%Y7@`xhp#7f-7jUow^yGJ77{PYv5HE`~|69 z4*NCWzZ~ns+$D2F(-OrB$28>Ta#sv#)3bi-(0Tq@=`yyiZ2HvQ*A32vmC=4HcXcBd zbG`7m+ys9F_u!^n?j*~tY8x?ZDKOMjoZKoW!`&GJ+juXwS95;(^ZkC&+uPnRGe5rK zsdsOC-}eq}?;6_w{geIe|Fv!Jr)b}MkL^!M;OaoOeHrcN-(mX${UF_OZfS9#_D`aH ze^)<#RK3z7AINmJoObce=FxF=~ zf|Klazvcnmz`?>2h7pu3gt7?R!2-G|PYakIEboc8PUs|+ml^x2SO z>~UXq>}Tnn`R+A}U(uFeweZ_IPgjvw{N|?Ioz?Ra_>hk!_}4O~PXL3)PfUJ_@c&`K zLVgnOO)=h+q(rUg3uj)(?Z z_-2&TXvb#TY!FS8ZL&({wd8g7sAN&K>pe?J611(Vhmv?gWewoXSB!>n?5poG!{{?Z>@* zMakqNab3|^IISfuRY0~AbP`?VJAIw9JG1NqX8hSy|D0TR^Js59eC*pfW-sR#2jcm5 z^+nqeuPh5{q4hu-E`o+yA8IHfH+jwu(zj&gsRZnnwyP8E*>c4M+;fPrj7CpB{RIDy z_v=gD5qc7^XC-z9jA((`_4$a}IFFg_J{s-C-u9Y`hdHf15U(|RC>#J9bro`w!Bz?#| zn$0eA8v3Dw^{OYGsW#O9zpGAMzgBILt}_4j9ny0s9^2D*9QjOgxtF@M)-@FzT|5^VXAIh7 z{Sop6qowVk@mS8F9gSb@92(<5xbbK+P5FtmVfSb&ev_1o4Vl`{TiH8PdL6HQSUU1e|S+e zKZiWw=TYi*e~*1f-=zlsNniM##({qTPX3br^_%-S$MfiWYhQP_{H6B)iS`}ZPx@hx zY>NE5rZV3=1}D;E=DrZKCM=)pnXr!UxoACA-{eS@wwUh;>dwj6SEAGL1=!~d-|-DY zXD#M%iHn|T6mCVMx-7prbO^dn{vm6FJt^{D_?)w6p4;AiD(|-J_2QKhdaMx9M)gNO zqBP=Z-1lMQcIcX(@4E+uch71FKhu1M*+ZJeQACvUL8uk zf#Z1PTTx6yzOl-ytQ}V`_FQoc;!5aN=A6V16+blNHE~3(>+Ua?OQ7Y}??B6sMt9N3 zSD$}XM9WkGeCzBHYmD-BQm*Vo!K{hv4qnkp`v-O3Z!hWSnOR1Ua<8e&zEtu9b|dWe(BrqO%9KG2Qk!_Y5#^t z>Kw_NWI=H#jZMGFG%KfC+tqwX<2G`dXI`Ybg{c0JY$Rj8&ia(#MK8jG{1BVBQs+>q z=Tb+uNAahtJ!j8!XHB<_pY4s$PpKc36HF<}rKq<%Zd)`_yVu9<8Qn(W%omC?MLxY`FdKD&9RT;PGFqsoxxh{ovc6JS?x&Q(OsVfk2x>MmH8$u zGCUd@xka6~rXKifAU{pMY;PU;l6#kT;ZVNp<=D`v>WB2n=oFQ;?PtsEIm>UXzVR}? zcd@@swPpML9eD%W{6?Z5$%EU$yi?_nb9CdFsl-v$pT;$W{wDLTvF2FAxq%o&j(E+) z(hklMXuWc6cgNeTUmXWc)LtX-O$OifdGvLGvD1FmL#AkdENxv9_x}=mKgNQ;OcosG zgd_aa^u|~7^xafq-obcVio(0ICtv1!0UbN43us?BTFCEYet(KB_8w**;zN-g)1LMk z`j;$}v(cX9_ME;o5SCSo5wgQ!dSWJurid>Qv}4 zF~6~z1+F)P-aRUBRyxsuGt&NNcL zmj1@l$LKQOG*8m5NZB$zpz1-p>apJ^NK7^qLeLX9g%ip0OklbQ7*fN396CEAJJpw0WM8%5%L~Wm8)gQ# zYD2iS?QrY5!7Yew98!>eh;XGkmysqsX_yjw*$@l3%!J#&;FtX;D*wEf@LYI8RH+3cUMNxi1HquSv8?{kWj(A>u zeo5_7UiNQ3aI3F;2Hi&9Hu5BUD)-;M={J=Aza_4dcW>%$$lruNaZ`2Sa~sWf7Ns90 zGoqLJ3f(VUi;mhCEE&*wL4Dg~VEKupJ*W9r%qRNJ$-pxD4$6<>GDB!5 zJxCet@A*DuBxh<%`%J|jvy13LC&Im@{Tq~%PS^OwC*p;f`3P^nBTqU~yefQKd$fmi z>1xqI{A+PlC0%8vmZE-jpFwI-Ej+Y5R3DLCE&(_02+7)Y&^E}3e9;$KT?{sEZC()YVxXk#<4DLBkb+TVO`|(Ok z=Bi5W2xMx!x5z#Ch)RZU)SB;e(t&&k>nE6ab%*XRW4}v5@@jnr>BNndoy@&t=V}@E zXmBYW{W0^MhB7uTAK7~0_i4p{$P<26%Lnzq6VJl=6Uxg+{HXfhO_V)KaYE9R|De(t zA2BD+3UQ|d>5W-nV{f(SD_^#TJJXi#XSbnSBGxtkUUyF;{fc(C#qIIElScGewC9xe zBwtQRv}A7!bZ_x?K*rvlYagg{p8a}cdxaPAk93=4*`>{z4s3CWab#FWRNDpkgLK*0 zcHYGoS$O0WjlcMRsCySUsfsi2|D3)K!!XmppfI4Ef#D*eHi(Ldp;1&s)aIfRgR#9u z5>0H7C|>EgFas>;XizZ$X`_kR#NCi={z*31(V%fn%nOOPTy#(O;m`!!NOrUPH)1mH z_jfMPjzePHci+$d|M^T$)v0>wsi&TL>Zz)yZqN|K-6hzJ5397d=^IzP(l|(W+PFcO z;65Qgd{~35Cx8(TuM5HogyoElzcq4(_YOqFeBM#K`pP-LM85W8Z=F$LlIvMYms7n4 zZ{K0FRCrOkUn}ZIBnRsW`!ZoB`6yg-H~J^~mw#~ztM#6Wum6S$R}nnS z%_S=h`p%2~tbeZ$^b1z>*97@ek8~lIdVA>Wwr?!=)O>C)`evW{*A%j=KB6h;BUBF6 z*;F-#=yRNY{`g*ep?%TozjP^83$I6}^;`mw+$E>8*)5%qF zUk>7UN1Jx)w}(y?|g3=Tn>C_D@(uHPzx(Ru1Z&F?Z;xPT zvw80t^S$674*qxb*+ur?#8@+7-Bx>@qB-u+p#7`=Hq+dF%v@D{Riyb}oP;L>Iuj{- z*|UypiM)X{g*B`hCiXC`b0#z0dXEbEDLMxk|M~l;JbjJ4AvczLHR0DIk)UVVj!gEZd-_HFcYJaxa;THm$yoS}V(IFJ3BWQcq+qz!Pi zNUkhDZ|Q7}5%y@jnj@_HFg@=GE<;#WdYE=fTlwBU9!{HK&Fp1pVC+z8p#1U7{a*VK z&e646ckV7-18w5Rx5SS$`@1c4`6t1F4Lz2V9o^pt`y6=0Q`U_piJzx|Px*y>6QAy` z;?Du=`9pmB7HPxsmbV-G$3oXP2&-NnKCg-=ampC`P4E8GSzp#T8D4m6#9x5MB=psg zSID!UL#u3R8mDN^BHgUI`i05>tmqPr(#;QuhuL?-<*WErWZ!SdZ~F3mS`QTOB9v8c zwI|U_AAw)Or#ca@V&KoH+&S=O4%bac=fA#2-xk1qn*4PxBIHE~a}(hq%vj}1T^`~; zYj*5M=1wwy{WnfN;Tkag{~CuVpoTDU9 z={e=M8Xf29w}QVm9BF>qN#@kYafgGa-+p{ZzcJ>Sq45ni1MK|^OB3Le+H>x|-~l*Q zcJbY_U&`hCe$B7(tNd~CgZ?Xodxtp2gZs3`AbXT(Y8dSAnc;iA(tnsX9O7Q6^Z~9r zg*Wf#ak_L<&d>jvulg`rFF8BFJqBe0N-zA=grkbrj|Oxr{sjE~ zdIoiz^5=Wudd72)vhD{DGl@RVm4G-*v& zX%5PklJo2RCqZ6H_n!*SAIANAzHbTIrpI&A3T}T@`j@1el@H@Rjk7f7`vz%#y7lll znGv$7HTXU=lm`Bx4aMhmwCS0N!+aQ9d{atu+lct+-KpXA_iq(; zl)eLu_GBMG2E=38IsG^7c{en*8QtNJwFN8QuL$6G10NauW5={EWiRV48-z1LeO&{c z8b2M#?I_)jP7LXuLO-ZA(wDrvnr)5o2ZFzEx=)hEG;Vc%z`A9K>z6ke`+I21J4hRr z|D9jr{t8t0wk&Ubz_>pX|Y`$(JD9QyeX@4xp0mlAKX@Jn+?&H~8JF1$Oh zWWO@}6ED>ttrJ|%@2f(5VO*Qy3dD8u^Kh{)sW8fKk~XCCKPk8JxhyzyApBvuKBYZy zec*qge!ObTvkaWCi;sC^16eSkXFg#`(j~|zSCwZ-ffc*{!)My!##bO1Fyd5tReiD+gif z2Ufv%6Ti|(x2pV++~NA32A$)PW!24Q>FCni8b1u|3cH$%HEcJhrl%77ve+OAQ>lx1$fW5h7A`j~G6&D%& zEbV)NH@;9^sLbj^b|?@0Ut`SvAjse(R;Gm%P$+L_K`^zJlh=ektlT($xwHUIax=ZAzdOHLJS+i z)xntki+$un^)f_Z^nbFk#)5j3TuT3ZEv)}6c4yW+YI3Z-apzmOpCxYDBw*GIi0Js#i<+hvG%sboa7=6zWf9bugESxo#}#=jgocc${58tkiHNZCR zi%oWg!9FtW$>p9~^xloo9it3tTVMY;az|dIanxq6Q<=!uEFRALvM(QQ+v1yS+$;Kl z`=W4Jqy=vXmtsxwdSLYyXk!+=OuTq^bwGpsSK_ZOPsDu2iMDo?O~bN{VyD-(W+^}a z?j~&AexIVUh&LyRzWlpsZtsVdbA%%YT$Xh*`gyHGhJ4KfFIsh9pVm*qIt=TkscPP= z{FV0<@Tsigzsen+|GuBTFRJyQqQ_US?Js^ZpfTX{z$#kAE9tZ(IAj|dsW|+ho>Cd_ z0_O(mUHzHrQT^B3$WRSD^<*XIuVsZelx6^U!tX>CPRWP*_GRj$Gd|zM57mu$qBZ4D zqXShZ!lSiZz3HkuApSs}AHTcdPoj=7&f-f^Q?x>fcK+Deo*nBZG9-KQ{ccsi4QRsCNw6}OZ0nFDEW zA-vKH{=WhnN%*aFzb7ozx2nT*U?-SH8&W>Q)PAu0eop&&3!=B^%z!TOuCk17YeC z_@nOM$ot{q#fi!nS7m;aesZhySHXwz`TRj^}$l`Gpe`TCqKs<6pR1v-TmbI-D^GJec*2g{~xgAgm+>{Ua@7= z=F;qi@9Dudveb#D@73J2cIgg-`+PF?7{9-m@s%;v_s!9tuK^1uy^)H5B7$64-3wVKJYHQwuiUQvj^U5xAIQ)0b^%L zIJV~w_AWvWl8o~g<1)|QcZp;c_flNOvHJ|{xO*5&hkD?c*DoyZ<*rfbg10ye60X_l zz9ag_^*g;ikoVdhVLp^q`?(^_huNs{e2g@vv3Mx=WHpUW{%b||@~>pSy!VEJ&Qzxt z*t}a$pHZ9=N$LFRa^Q=mXu7g*xp+&c?>mvB_2x**cJisxo?7_9eP8SAtb5BJ=ibsN z=R3^7_gBmVZ8m#Fdt(%^j$b3ccQ8C)H}if)T>V;t@Gaceg^WW-M|rhHJ>`nd7eA-E zC_4Y1FFIR{3tZ{C0x&V^iKK4|USSdFHdC$T0<^GCrq;AxtjMP6-9wl@i*gpq5i4r< z^6tFu?=^EfD*q0@S^F%1iF-}8?@o7&NUpTsg*>Az=yvv!#V>)EkyLx?T8sKfv(~Tu z&=z-z@n#78_4=NO$J2$jk=j*`z|*gRTVY?7TT^uYLGFN}^A%kG4q(!A_U8${MgJFY zx35bWE0C3U9#0rESv!@y)i6dgdMIO$`Z3n6$)2Qr#+nO9i^%VG_)!o~7I=4# znceb#M0dW?BrbQFaYtYmYGU2lFlO5p<;0N8-zCn0!ZU;Cc`|spGs%O$#5q@PcTsV| z>+K2%5z&eado;oa#+ z8H>92zoa{X1asSS(s{mL;c4ZYrcK*L$6`N{&SF~BrF5obs3yl+fyaYO)J|(vW|be> z-59_3 z`03vhD%wTcd6YwCl3(M9H5!vRIvbP3#HSO;3|_A7_+D76i+Ia3eft+ zhC-<|)mB0cmrTnp4a{t^Ypu`QPhUCEe){5CJI?;5Sn4VKb@n}0SLq*D7fYSFF2-<+ z%CS81Y-1ayol3qxPh1<{YJ1|7nNOB|S^1gMX;J5eLMdW&m7?8+_sB<#HmkO}s?ONA zMC&kto#JZlRovW)?$5EG47}cba(cH??|8%K>u76t4&LS#xC@Z7myWe4A9qp54aS>Z zf4bPYBqhD`@~~rbYsz;U7Yv&PjK)E?>D`5H^gMW4x-am?%(}nU^yt6Wz6jjfZy=tb ztDBUE(j7y3rCFLIJ@uz}>Zj>$<0I}E;{K3QUuAfAVjGb{slYv^XK=T77I@F*yMlYC z+2iqC>Vk5&#r=Wg7ti`MUmroor8buJZjH(iBkpR#y3u>G!I(8xxm)9OWbk_<;?9~_{5FB3tiDF!lR~=gFP1XUu5*au zA-a`z%H8K)A5}D~-wO@zK|5hyMxe|IR^t+8VdvR^{2A<}RY0Tm;DznhfHgRU^6=d3n{SDu%LYN-G zEM>~uyMA&Tx@F4RQhkxOOkFqH+Bf?2Zmln(<0eJY;7FfMev2I53|K$?X!uP%Mhm;Z zvBY_nu!&O`KMiRwnW?9D3y0FO=W$ZBDu3=)TW^H?0rqdSE5P?KL< z&Jq{1j&>KPx!XO}%-xyNZ|pO%#@b0>YrM9kbY10G&qJZ~z#FL|f9Cv$*t zS&8>5W;Aj>Z(^Er6ST46_=gv@$4d7UlUMfPy)8xUXVbo2?$NU**>(AcUGBdJmu1%* z&9-N6H^$m&U{VFnm)Yyevb}5Vu*R@#?fIR~_e9c|d{5_lCf_OI=j2r%maTjKGlXf* zIujfU%MjME_>sMaF~Qau42^?i?^T@k(a-054*ZDlJ)iF^-`85&vufE{z8CXdi+?fx zCHRLIbMw9~lLD7RM@(rEo- zy1wyG$FIHiQT#LUXYfbxXYhNx!aoQ9+yH(q{`mp?eEit}K8t@be$liT|B?WH3I3&m zPfxIyrYUlsfWxt8Co5jn^dHTRnMb7xp2F^M$;Verf zovX&3>#P_($Ls%POW@5R*C=juuO^aMZo%bNa!(a!kU*{ppj=b+X`|Ga~6M2KS1g z@5MdsFW}bOQ`L0WP}if77tvkE9dzoeQ`|#`PA#3nJbG?=owqMBjeeNY@WVY*r7zGM z^9e_vuzuf;-Jdrz=m4fTloYt~+_`@%Zv%`%zegEk=$)wM!8H>FGE>a#dR_j7$Ig#60F;hD$R&EFDCOh2oQ@?bZuV%jhamf{( zC3A3F$*^8^lX)NR*!&K6G;m|dqZv1vik8Pr1>TC3O;4BZtc)T2Lh#H=o8@N4XpzCK zrD)nu-;yRR@fRtd=AD%&`HZu6xEDj4@Cb*83!26Q7fZd`FP@3unLzx4Dm=%5XJJ2h zCIPqT2=q(_&+)|9kWWa@3E;V;A3P@lcj*!Eq``9%@t0N6GX*?#@UCyUrULijBjA|^ zo|B2cyb8}L;Q2^D^qdM@>InRq4xZD9*ZA`aaLwd9EZ^zi`zZJt(#XLW@w}}RokIN> zJ1b`ZH^bnTmOu6G+t8LU-ulGB-WU0G^IObs3uBv2{8nFduy^xyhsQPAr=L=r&iQsr zwb`QP`$hC3vhST|$alN#jJ4iQi}2>Qq7zV-u`|w z`^s+bUVX;n!sVs0{4~*)wvys@cE3`J5En!L#pbN@)1^o!yh=m=@10u=S1!;_5(}ag-5)MIg}|nhC=IQofQ@HQ zv=wjW&MgmNZLg8AXo|Gy0gRy;t5|I%_LndR-Rj<+>fJMGcx$OGX4tabn!~Run;2z` z)ctPW_+NXZqYd7Ousr)taGoTke)aI3B){T~oyTuW7~2;d>}^?fc%4Oa__<3BXYMk7 zbJ%-ODA=*?f*sG|!rMZu+w1eykJRP34|KBz$C7ExRS_q*bC=W zDzVv{ff;YFsD=mkqE(g3*c-7oUd3BU%O9>J^PTphYb&JPTuDIF&AB)CB(8b@f0u3K z)R&F7lgfv5WBR09Se0%?Rk{@Ail#~TH|Ez!x(|}y@DG^Zs1KN*^#Sv1`GEON{DApQ z{($+VKVW{-K45;+KVW_{tMXgH8)a$-$)_HuNG`qhzzM2_w4)4gCbB+<_W&f95ytsR ztHZ8Gu50q_t+v{*@r|W@>fYhU_9nTTawy{_lW|74))me}wT>#CHPhA{V<+R;oo=MK z(~WlTbeo^5xAjiknC5!y9gOFTjNiLAx?h@#-8u_Qn)!KQC*zo%?zpS!ZSL!!y|&v) z13Xtf=;`uArmIv(*u>@ab~4{?XL1j^ndJ|;wD(ey-z0@++U?^Wt+#8t+wDSTlbgtN zl$yaOTm|sxyC=t7Bhy*36rOIkncJ3orhEpS*ugp$`WKoEbed>t$e3m28L>WtK4`aN zv%y2YhDrY5*LnUJ`m-SuFB{NeWU^EFRP!K>@#6H=+@olZ!nQRff?X_HDD8aSsq7q8 zuGG!`Y{eXm-lX5io?k|W*4h^3=kB{w_B@C0^#MOxOFPf;yU&SA8hGH9O() zJeFB&8|HdDM|h8c90F?)uQkQ^;9}+UA=n2sC(f=Zr|4gIO>!!`=9epx=10iyp-RN+ zC@t`4>GEjtzCBv*@o9l?yF6ODyl=(t@@ToolMU5PvuI(w7|V3o=HPpLT2l9+Yde^W zw_&d-xc`sl9y>4a&c%M|ii5q4tc5&x^}*h6^PtC**BtB}j{i>lVLQ;c#TyfFZb$c; zjY@BH+ps6iUAfI|{wQOWpV1cKeSAunUDy0j#h8lhK}&X~Q{laBH%fRW{TXOzFD)Rf zF$@dgW;pcgK3pM$BTRP?D{Uc!o9sk3C)HdSF)ix@*xbE@LddKOKAvS32t9icTT?7I2;R8Bh25 zJRHLql=H^4MeWZsQwz#^yXu>KXD2(Ax3H@&z0+ZQj9vA{)^dhBW0TA8tHd{QcWm>6 zl|pxGIdMg6xhKOnI(F$Qr;8(xdaHXZ_Ea@xhlC0<7?1zt6_``m<=YPNppJ^cj z%OkdBM(E=FL%(drU@Mf3UX(-SgJ6gS|U$LjMH!;Arpm_8hiJ zc<;TJkyW!iF*leZ$lNN6Sz(4qG-sz3+@3qGW78^YF z_u3mRA9fjOw3Z;)2(V{fAv-(@5fF**t=mW>&=;iy?2~>uy^~Zhjr{XC}U$M{a7A; zQ%*C-oQ!iLIxC|6;}Z?uqV6tW=Xu3#rQBw!XSenMnAWb{YIA13I9Tuaa<1UB>Egz+ z;I*#wlRvSpge_I=^K|BAQPz+(KVxlo=qaXA`vh%w2H#Wh%kKOb?Rl5=a;dk+*1DUc zGv9ijYDlK&tjNB(XCQhpZebI)+IHp0H=Z(nl#w1t6?e%_yLj9-cb07;llR*7sh?Nk ztSgK(ji#TcUM^`ZdO0|>7E+gLCH(>?o>qG3$`qfXzqDudXk6i^8$`Nakgk_A5Xyfb zvQ&+OIo~ePQ;mo9k6p|GZP`_R+8EfgQ@cDm{z%zg>_dm2nkF@Ba4xZ|6 zQF>^iKlS7z^Ye<{tBY{|-yU$)jlHK79gjXdzpbP-5B}A@Bzi4)*Hd&B?r@?32l>27qX7LUj z^`w8t+2(rbJ^n9pOsn`*6#JF(nPow5N;npjz6Xw1ph^9==LRgL=sN{6j{l&o+;iQCWBv z;MoiwjfXTwt3$So=!TMPB4(n%I7#-6SNLU}Z^FxonT=)O_nDUDccHV6uv*+1#=@bU z_?H`B-LoS7RjOhKe7>1B@UlBFhm7R0&0s4P zlAlP9cfJEzgvZ1qL+Hd9JQ`uJ=5zB;DqY9JuT-+U!jj!!qubilR*G54a%3Q51=axC zMzqo|Cd;%bTle&|VRuOJZDfj0+tIBh3xC^Dt)-0IVc09=Viz$d<1#Lc$Q{D>@wnIn zBXZHXtQTOjBz$Did1A2kbQDSv-ei7ai0?P$AL{!H)aw(CzCR=XFyC*bR3Y{`A^Q?O(V{PrF@a(B4FxIx7 zbr-a_^e5OX<)7gDBl1u3{U(0duw_@1J*_tVth;b3eLc3%0rH>X`)lN%?)#JSpFy8J zk$WwGOYps5id!*x&{t9bHV>h+I{9o6MP_#xfFnZBmEN<&{V7Fw0&D@*7bfuH8V z>kWo`ZFtIBFZl)h4&Q^SgWpT9;|;2hAMCxG-)~Mj*n1i7leppBq63;2UqZh}dChL# z(HNF=B6)NQ?;LAwRefK=D3;z{YVJ=HXB41=eokv@r<=z5QNy-*dnlp-ecu62`h)(g zN&8xiqV{9`9viUgrU<=R$D({KxQb&Pv^Hj7|Htoz@vb84$CUq<%=LYE!iRF%4SOzs z=|-!<>Iq8{7OM(_XSFqi#jC<%gbg4pQ57~A`fiEEotpec*05(gjd|AYDAS+o200T8 z8_VlHQtOy!N1X4}obOC0{Dmtoa?UyLT;y{jXLYdW!V}3$k~-9xmvn}6JFKZ_POZ3F z!lQgw!}U_v*fV%TiTw*&n=#ne01j^r5_rk7Xc9fb8PX#fgkN+h?Z1I1YOVM1h{v*h zsJ&2O+&_33de=LTIAgGJURgT4q0l26G&XGY6-9I+??V8a%AdzP_%i1$){0gB8sj`{ zv>9*gB06K;8{MA+XLPF`E^~%=zo0WUwSt`pK5X9Da*uX`wqvUOqQeG1Lz*YZ>k01T z)p&|MY)uo3z{A&C!nNOYH!#8@-OOH?eIYODcf31!1oLbBW~QV3cEe(48u^MhX11{` z{!E6pYW_^*Tl`r!u-18@;UeJa)1ndQoaDKV)^?R|1RP;LCV46k@j`cUeWkY6S()$M z^X1xk&ZWcR&O`Jemkc|{ll3V3xm2&hQD<#+Hv<2yK?!Hvh?w*1#0Ad%jc0lADv#QR z-~}7z)f$vZV_MeVYr}khgkI6Qn`}M?cvZKTzsz|=bWV%cR@U0lRFC^T_R9>nUSlkD zuRS9DntN>SHFwN{U6rPu*W6^b#~q7*AZ|c@zB)(j_68wvx>b!)7na!!(GOFOfbr$p7@dEF#-CTp?$*aU6rBG zCt4GWeVRmba#d$3MR-WZVCWDY;TQsrQQ$a+^kcvy97lUN66N@{oh7AjLt`40)u-*M3`+O7vSZa*@Q*Sy@jd2b_FX`GB(Tl$HelbL&5$=XFQ zr()hxlFr>}^pqkQ>cr~CuJ&koRw`9KU_HzHT6_OGSv%*w682ce*zbT2T(9=4_Pz5Z z&N8Gr%L!!WdgLpaU1KLRbL9-YCNs`NANqo?vNxLoWDm*5r&UnH0CLUXaSP=4Zy zywlQZY8|GN1ScXjP>hXOMYw-K1Sz!l6O{NDto9`|Tq z8gO+VY}ns!p}#eIC`*&(HuSO9eeM!;t!1&d%G$!XW{X>s-{RKgwzwO3k9S&Xi`U<5 zNTJ{4&unog12doRZhjgIT$^KmS?)g4b-TK&v%qg@W-I$_Ho6`B62%U?&cH^_`d13O zXqI>*Gu~c;-ic;nc4HoxOp84#M<15yuve|5@5BAXtrt4_j^?>f2zkh5x4OIMx6?;# zag+I&z03j+>sXEYPOrBwy>+1T<5d?qNo>A}e2c9+L9dz7VH-tcGat2oolQDg_qvgE z!#y19ZENvaPM+_0whKG)7I!87Sf2ej_}$Ce?U{Th^KpBv7e2w(9+^}QdUI{I<}29g z?cd#9=X`(pd`JE`X|?y|W7nU%B$Dmwye)?jsd^ZBW z5?<`SlXbq#IBaf7JDO%4vDjiyI=7Cy4cqOm^X51)+<11KZISOa^RO$XCjb|-EqjZb zz$TT*arbt5jXm_zn3G;IkGXZb-BQ@%I+t~@KG#BA++JlvvoY45jBC)o&Fcm@=YH~H z2OFgw&$ZZh7TDJkUt?!*ja-6u1I@Yd(6rW8`CeT$z^TVA+;*;4UL!xwuCW_~dTWC-=Z?8vd5!#ddnV<*c3!7Fu_*YMJrkKyU9uKy zOP5@X3?#GXc=Z@XUS;ETDMwAWUmvQ+BsBlv)GXRAl&XO<_O&3F0rB094HoubjFk2M_VyXWSZbK5QRpmB{y zqm|z3(I;66X=`3(IN!KUZ~hhny2g=T47%2NdCu&{4hwx(e>&pPckC_X3!T$N+b0X; zHy*eoJZ$swo+IAE%goK(8<9ue@bA6=nW5Y&vv{Yx=L-(H7Tg|pK6=Yp9*x(1n74GP zE6JmHBbvT^Yn}7W+h{MMFCo5#^sV*i)BY>bcSW8(jL>z`Ewo46chk1Pqpg-Y5+=P=T5qX$9$f`4{Qc4O+#LC4)?>v7#7ntr*$aBjL4xjh0+>+g&?lkW26Kdi%8 zQL<0Da_j9D_@cTy>-4De_No|_;cWfnxsLo%^u={cI_x;{(jRrpqK+Hr5<7-G<;H=h6?yaZ zht@r`$8L`51l=^}_Il?BE8!7-jYEB!zy%F|e^Z_F`VDgq(PJ3*c=U)C<(<1B=6v^K z=&gR}$lo~7`QfK7@N`;8hfjm_W0(Ex-ASj8JU_Pz*}C&aUT7| zMOFE)eOLZ#a#82aYcK3K|M(|OXYI9_e)3=E<$rp9kZgeVi?`7Ra0giEx9-E`myQZ}%e*Lj(8rHvyhPQ4^I-kF3cE2=S_p!M1BWxxNnn!s4YnPp|I8Lj z31HNZ{piDSk53=DPC+s+sfe9(0N(mJQ^%DImGY~3k|7SShr z4zgT|3J!YiTFiSHSCVf)TVJ|%T@iOWE&)$KS6`Yg{%F$U+n269Oj9zo&bHBMqG|Ho z;D)Y5jH8ID$W}cG(t^}{59vu7v z#)Otf2b)gKvAQd$VB5nw(cfMf;T!eJ9@{A4Z6CsRg1hLni?MkW7#CqX8pN3<#uA!0 z)}-4QC)g#%7SOleHW@RhJ)E+l!I^Q_1)ePG{@iQMP`}vDxU3EPmFfmQ(`PUx#ouXv z_v%5;E1x*alLg(gTUjMN(FIQEhL%iPV?XCwA9yDr!yf#s^E$R8<{`{aJ)4*F{foKZ5*gz@ zBjjbF^D_1M_Qgf#?FR48q#1?r0~~BVrOj4`wvsLJj=?0&DQlpW@q9UE!87XQrW3Ih zGq)iM{_)&)nx7|nG>zNRaAR@L~CnVhy51boLuo&M~tQs zgX)~_+{Us!@Epg`vHTT_J-NsdrI5+-cXZp;seijWXF4w@70mGQ*WN*Y|605 zi0>Kk2Iu1VJg1xQ$N63nZE!wSceYc+zox#{xqjdUoU7dEUZ1FU2GpMGY{swo`1~j~ z!7Mr(KlS3x&(9-F-?9rSzq9$y!&8MV!!LRjCwdiMO|R&wrdRY-(_2mJ-$Lh@fTqyi zpm!;bA*`A6FQQ=(bN}Y-yfXQf^YF4Z@5`ucUKa8;1Xv53gB92tMjOu{bMtJ=%IG{) zS>=;`u?9Gud6a!u_b#hm^5Dd7=oIgVAK4;va^F^`eBa@W$QR(t7qExOKA|=sKAFZa zot3SX%qagqAU9@r(FyZ9ihRUJ;q~gBGgI&4BjtM+ANl_8@)22V5`R<%wU4lVe?l3| z>?LK+&^D>O*`Tifjc~87Gw5jQTJJ0VPtz#giDz*Oo|1pwz|P2;NeY`2Heaom%n%RL z(D)4TjkG)Ij(>sg7WMW?&S5okMzuS?u{_%7ak2$p9#q$ATNZjEh3unf_DTPQ^06LV z;rCYqyt4JOj$8UVxD5lH)!kU$q_pr48eH*4e5vM1b)Efp>x)cTPw?wWGISMmS=n=t z;Y&Olx%MxI@sU2|xfEF0kYziT4OzD1O?231U=LRRFCHrW1;F;TABV65X;&(5n2+d~ z^%v-vPMpsFao)M9Z@MXc(uFuxjz?%8UR$Hy4B83nCi^6Z(!XO#C%Mr6hPn7v=Vtdh z>HE?JVcir)cy;rCqh7`hh&lJi&caUL3O~=ci^W!Zl36Sj78Fb3x%jQPN1?fpDKH-M z^@7TyvcAb$%cKS8*aMI)^abzZB8!wwyxm(B7o}`rJqZU&#k=!=fN@EQxF&^ZPdt0u z(K$Z96ega%3~U(Bo#eG4ew|BKU&Z=AvQ1yl8NXP8wfD5fo%ecY;Jz{0+~1LY&8;JD zmKF8(%tWwx>&%^zFIV!|y>G|ny*#j;$2mK&qUSYicTXR(XLGL5T?3A>*u|5C-;sIE zO~g7%*{EAFj9rzu@w7Y3h(QBu1Nh_EDw8#ZQa0hoM+i$077fBW$y2Zi!tX>USW{SI zAdloUXsT&Hl&+TeWNd?%&)v{>SCDQ1;j@go@=E9wo*KSy79Q3r8nREjYbnn-Zyn%i zw=2UsMLJfNUA~SyW~CFJKjr1uQ95k1f7-2^)_LfAa6k0alTRJxhzI3Z4NbQMG>s?k zA;cd|{uA(jI{lhE6924hw0tYHY3-wt@Ac3YuA>akJngQ7KAqPH=@>@71=2=YyNJS% zCZ_FMp zOABS$pt8iM_dqV1NuP+VIrKe_?^qxk>MKL}7~{eD@==p3di8%VG>MM!#2uf9h84Ri z_dvrW{Mx$_B_FNbOyoPv?>Jy5F5U*Ow^c6Y{Epg2q|Z2x^=IdSKlOU!xN~utb9iGw zh2P7!#&j86rH%GU8wI8_NPAjUT7?&av{Qn#u|8>8GjVPW(oU>OtMKJP+A%>|t-Hp9 z^z`%3r;1zQ1^d#v2?zeV0MAjl(iMu=I5v#GBEVN4;7c3K3hF2FVD2)G6T`$B;0?tXA7{+R&REdj3jBjBnB_B#Qt z8~VYe_^$`Jt_g58906AYuulZImh^*5@!JAiS=Hp370t{i)? z_c-R$-)}nDySMRR@APv&*t+S&e%4Lfex~=F{B_g6`z!0F?MGNQo!DpH^pih(@4D&4 z_qlE=8jV!Zo0mMb?(n+lwN>k;A6@SIbZQRdR{f`l-g@*BJg9YL4)^)mo|6Otp?G7_FPi zuXWRrz@%`$1I%FD;Q`E$z&{jsG%$_0KLBPJ?x+CfsK9?T?pR=k<9U?l*xs2Dc@EIX3W*#;sn*y$~CZ(NipK;A}H%dFAjeEZKDo<384LwH|w) zo92x4$vO|s`fQr<-)Z=#;hu^+9oI+~ofPnsE%p!*radi{>Aub2L5IETLOO;zZLlBgI&M#vXT+?Eo5_6Iodqa76ehaj- zzDQZE>=yR6Y;iNQAKBZO+2UF~*Z|<8_Ee2AHgfMF>(tD>QyFOE*J#6of^Qp3qAL>l z4eTwHywzbN<-Ao!WAd)j$CmT9$1M}cpZ0La1pDLHc;U90A7>9>&F#0>X)SMpt$S~N zd*gV!u1G(f>$K|&jNh=SK9^%XJ_{X|)|R=0%!U{{~3bytj za)*LjXOjXG-k!srtZ|BAt;61mo9?o;_t40%wY|A?c0KFZ zd}lgr?_DKm(tgG|yHKvl6|hO%vv+v9z}fr)=i&=)OAl_9%{ILIB)s>eVZFJ3*{p=I z;Vky?&t&i4{1bkg+P<;;KWRr@$vjGPnU0Izza7;@m-RqjH*`M2FO$TUikrd>?WlLq z#$H_=>`kO@BE@1UlVM+RUiRvTkX^Mk#;Atw)6g9o+%v&X?dAaY=*_Ei+hlDjnSWZdbv8T4_BV89VGI_=r?bsE>jm>bGI(Zl{4_RBRH1<%&F zZ`o+ZvebPv;KA(+;Q?_APx z+hs$WtYNXgNOoDyxO8Keg`edJ{8t?hIF3Gw_D6beOs+PqeV7_gv7e1SOYG$&OtiwY z=R-b-&hHZc*}x_%`>e*p4`PpIlFq%|YzJX`7ahFWLuqBq>XfMbY~Yj=s(-sDmTvYF=eN2Cbl((kzdtJK z!O4EBwVz`~pX1qab^cB9yN1+w@v`^sIcA<`^PLQwky`JL9+e<&y=NQ#(8#kq`>@j7 zaLhn2jcmxXM_%mNkqz$VxNUHR@O7Tu>h7VleY<0-%ji7<{1`KI-r+#ug7T|*f3K}IuY3)b+#Ax z`|-#-aWjS?@5CK*67oK5o)@=k46;t#>LGFN)nkqL1mu12IbPi8$;di!8-^#G$;5r= zc;tQL`Ci-&Cm`>{ePzTTNB!XJ-7YF#v;(GGBoO@D@TJ6y|BU$S_ADin;v$18TeVH|k4es$Z4bGqn}_lc-;Ov5?OWZI!&w7a_^HO`(v^PHBPzPsGfbqVJ~1J81-jK0^nHw+r+ z%pQo&NB-Fp=iX*UoV%m5y|$qHb*0x-_mXwVP`JJpBkyqCEk?fKnp})L!}U7p*>Ek- z%r})a_X*ef=-&^mDa6P}V~J7FB%LgM9Lut&nX-#7VHs6OZ^HX5^t`CH;(r==xnx>w=K+``Hr@i~_^!I(! zeZ5b*5U0v?JGx4;6Rr(-?a*S4h%|zWQKskdOYXw8flvLPl9NmyImz`WCmns{r1OxR zn8gw0|E{dOTTVKv27Dm z*QJrkJY)3Se^fYMjlSVbZ1xv!B>f9)-mh0?Ww=8rgWWpAeIe+eM2_>3+1D%MSp(2| zMl|!|sFDOym{+qOUFX-ZlN;$uMeR0X{Ma26-BmGh z-{Abe<_M#)FGm=UM*56LuMX^kN8`dzZyX!iV~2QQoHOc^=8_;yZB?2qevNzMxH`iW z>yu_~kmgU=y25dPB1n??B8%{T&lvd3!d{08tEb9gr7s`!%v ze7n8)pL=tJzIYPg85iLBRX=zXe@uYq=fo$C{_!M%8yetwt{* ~j{2A-?7acxr$% z13cgC2an?aNVs_WkGKPlfM)=3ZxS#5g!b8L{wTf@;E{c{_6T@tfs+kec<#sT%V)*Q zhAn#5;|@Fmo`JyqB*0VX2an=^6yRBfTXzIJv?b@C13aJV2an>v7U209?w}*!83bHt zk6qdi9>s_D*u})x9|2E2a1RhK{>>8E$j zNEp?1CYaj;Tqoh`4&kspti(oqf1LA|gAVrImpIsKMGp4HPQyO@&fC4Op7hsk#H;(9 zttoJ}W-hv@@7bC^ayRtcz()MC?6B-{)!CXtKWA%xMtp&NE(OM|tAo8Rt1~h1zWZ#= z?&s)Fxm$eH_KoF6_OzipwJ)qR*SLST_K4NwJIkqJXL)sS&SrRbXW1%rmQCa7itNp1 zw$;A3rM>+wn7Uu^sQgm0ek9ah{f+xK^`JdP;ucm50Vv zKQf;6_BizrzD7Ls&id{k>=lMkJKQ*~?8-4**_ETXvMWb$Wmh(FWmDGvG})As#;-h^ zaspR2~byK1a32K9Cr+E40kMU6t@L8f;$e^#2t^z8Y|=8t=Oo2920Sc<2YR5n1m}F$Kwje zWL)7m0arLq#1#(R0d*37(UHa#jw!gpF%?%hPR13EX}H313a)URiYpwlO`L{bIHuzY z$LYAjp*tCc;|yHkn29SKXW|OShj4{s7H)X|QP?K_7q&j#T`|2_C}#?V^7cG7Crjs} z?sr=(*+>fIrO0>+cP{R5-1)c`ZWebU?qb|DuEqmP@SAF5>91E};LzR1y62dCRlwP0 z*BFIz0euxqx4F#Ao3`83Q+mD6V6_gC%BU)aUpRZT~ms&_qg+xE^N!=?70i?bL-B! z&z%phc=i!HVgr|c4Eyy1_UExvk72(+$Bmp}T)Kw0G9I%NxpDU6+3lsP8B=+8sBsry z3uhdo>4)sL)oV(RUo)Px1O*rU|3qSeK2*R+Ij+~flGENe4zv)P?k z#6P=x?_$DhQlH(MHM;px-Yj-~_Wf#IV!pnM{OPLExt(RP z=aQe!tJM@Xx_9!cvp2drFQv7BC;92Fkj?yx2EQ!!KkD~!oAD`S&y%mtz^o#Vde#V* z@f}Y^Y@H`#O`@bXD&n)SVdAeJQY?)-wzYI3cNS?6_?nsAiOgL_I*SxdH*;@Foz31T z_ry=h=Gj%M=brp=%ezYPtJx0@-|wM5zHm(^`$EyDtSNjcqcQSB_LJlpNj+v8ry+O9 zLd)sxrIuT}O4jwN#|d`F9q7%KU8UH^!NcBf3z!)3Um~4XpEpn*>UjfY5Z*hx!JB`` zUdwk!ajSb{4>FD16bjs3keXyK>!FT#W2BZc)a52J4u8mw6j! z9k*E8#CM$fQ=45$SSNC9=HVm$M1g*?M>sxXE1h)V{50R)e{V~tnMSuQdD8slNxlu^ zaa(fft+QnA^VV6uSqo9*cePqot~p0GWe zn&9*IbJzD)w+KwENL=Cbc82dP--hvsz1Wt!0laPQk{tfbJ)GCv>a`ss)yaI1uvCY4 z{>I3&Mwb18cYY(62Va42>Q8S>e#uZD+Q$AQ`lNyB0jLEcqCZ+Oh5q z!n^2Kn2#C$KK^jMAzTw$-)`=2oQOr~_=CN>$1qO1@PywEXB?t;@Va?3JA9*|4&G)` zjJMLPd05=FnBgA9lTACyeTr>k+e;>Uv<4H_KBf(SyqwZKi}+)}56SIxP43&W^29f{ zmEwfyJBmLM_}giF$uJy$%zD8cKt45uMaG(T``9+#@bc3skAWd9{<<(6e1pPp{Pkft z_!{uX!Rh78xBM~in(QZA1MXv>QFw;}cWjn?a*48FM^h%1qcOw<-eG}X^dA+5gY)Px z9Df4*BcOjcVIloZAuRYthOqcYh2c}ggBRQcy2s4LH4b!iSDKlNl{p9G#We#v0Nl0w z#Q%Y~UFfGVd1&VT?5?TprLloOiN7Tb2j{pj9RK(*9J~|o*O0H$bLNTn-Cp3{eE4xQ zwypdN?s3E?Z>A1T?kY{fKOlfrUdLB`E5FHA-(BQ$Le+PY?-Q%OyU6RLs&C~j-08HR zcP)8OAwJA|D*oE4yiczBR^HR9zLobWRo}|{)T(dgeOma0oZ(5@>YO#S@l3H~W&M4+ zT6?R$qfGsA)Lc{1*&HMPbGPdz`kkxRRLtDZJy}oi-L;x=#PT(jND$U}H)pf>j)pM7 zcPxy*%a4zT@xgZ@jK9;5PloZqcTE_7haW#6j1RtR!}#0%_<>=3@LdL1PC$P{7$1D6!g%^u#SaeSgYO|>JYzJ)4-Mmk@5V6x(|-K0Fh2M`DvYN;SNfyF z_~3hZ7=N=LKO&3|zMI1M6@L84Fh2Mm6~;5}5dLGr_~854Fh1|ckLKH=&z4SOkAjzm zagl=^w^sKd&Btz77gZl|FMSMS=pL<&ce4i0SPNIp2EF~^a_%v&yo3}{M zm{WI3Tfk|2d2PPi&2@LXb8W_w*cml`D-b@1G>d^*LK=--=I3}XqR4oL{SoZh@b0o* zoMOyL-&h#DW$&gQ-i_dUbAkT8&F|xe6StHw-FukB)m&yvp0OnUx42_P_c3IQZu(dM zOpl3u;>BkFZbGePiJvW`(K_H-_Nv6vYgjYX__A2~3~f1SK0w>M%qcc7Zefn2?`(rz z(R^3_o%MD_b6zhTzvjJO_#nHYxvv+FU-Mrtyv}C-wfvm*O`%t~2QQi8d+-C5T_f#E zP2gXEe?S=CWLIj#aQp+q@DX;UE)2&%C=4HNSL*Rc0{jhO_|bMH6~f~m9EKldSB8Y) z_=kq!!|Y097><8f7~W`Cj>2zBFQN~ba0jJ-1r$NX-Tk#dcc<~&75?Fjr)#aF|w z1h)En8Qs$}%g20I0AJ02$pd%2IpUu$6Ys5qBf-oL%2O=|{e`9d@=(9x`9g;} zni7^tW#C*T;}B%TFovpJs>8pQhV_j1q=9E8)$5}*S@wMzO}e9Q*OcApMBaO8*;z@V zzuLyMm)P%LF)4T2jCgJB&nGoF5$`;x_x8Le55*I?r}h6JzAX4MtZ~iaC;1gmSEp%{ zw8LhNm(rT|7$@s);5TN~du_It`x=UzdmlRMrqW|mnKv_j{NtSIFJl_N>HKE$%kW#wZwbGp{Br!R<+qIAjr@?seGl^E zzTg%HO{Ikh7Fy1hX7d<%IP*Wnc2D^2!bZ!uTQxJbOc#_21++`^_sbdeG;ARGJ9|_>D z3E;jLzzqrDrUr0}0=R1fxXl5a_WTLo!~pJs0Pgw#Ze;*>Yyfv`0C#o(cVhsT4d7Y= zxW)kPi~ueVocJ+$!C`((qRh1c+$mLX;zv5*N2E{u%|U*9gTCn0AiuZH_4zt3!1L(< zZbtw&Gl2VD0H=Eu#s5{nv7Xgb4B*ZR;C2Ua+T)})o92!xbb;qa0~fw|-|@i3g5Z58 z12+=5tS@`vwShY@aO(nhK;XW^9n9b6*UfJkzYM?O{N9*xu=h*+3jCJvQ#-x-2r}BA z+@{#$T7x}9ve_lQ6zC20*QpKPAFld`ABXf6VSB67+*-&;eq}Xd6zL24&Gvz`wfae!@*ih$M-qKb)L5v!WiuvP5!SS`rAby~ z?8~PO9E*Omh-)UlNz{|q&;EIIgJW8pF--zqv&u;qs3{u66{ zl;Z=XeM)KJJ!RH7&J6oO@g)_sk&m-iRxqkt?@jCT{(73FebT&fE_<1)`3yG z=@TazKhoGSA-dk@*zx}stmIiTps|O>BGB&j;eEfa3gWLcjH!a}`>i^yzWc+V-itL0 z>2HkMJZ*MVX`B0%=WDqy?#(@4exALG_PyLY(_vqlMSiEfUTKeCQ+hnh-NLNb#4$od z!^JOEbni`c%9hI6bBd*#xwqS7tPoRMPK~vV6mMCjV#s`}E&5gxFWbv6n0FLx?q(*9 zXnZW&VXtAoOTus4wU6cKFEhpFMDA?MFfQf(AODWjXV7_Ttb1(J`nelv-cni7Pkv`L zs*U;i8QdrC%}FdHY8Y)E4HGXo*jqg3VDF4ectZsD4|5OpO0WDqxaE59V&m#v?C@-W zaA*&=3GWPOLyv;@L04@6T~vE!pgZx;)4c_;40jaeI?KA(Rk~GWr#(4xLz$mWdzL;{ zdp`dhzdg~WYkm8dAAUCB-ut?YeGA}8k)Ad#oxS(1U%cIW?u&2tMxZSs+HjGX!Mu~Y zZzaF}@;T`U`5gBb^64g@*ZK9$NAHYZUX_n@CgrEz(c$=4GnPe-4Gs0CXqGP~BX&r*y*LuAoxgQl9Qqn!x(s3Rx>GQOK zjM^+Ma`y+`I=ckM&t@Sk2!ruY@I1GB7 z;lmB*3i4Gt?klO4Y%8tGWx|tp*f*ZYu=gU{>Bi}YZ)M*r`#7cd_pQX=sPePlRA)HC z_a!Kc8NM$u4cw#HEwnv2W79l>`(>ahq!oFer!s}K%m`?SiWVHK7$Rez5GkkM2`2V@hi3|9 z%fbei<;Odn-yX9Q9<(ynZaL^JjK$~`Ci>Kwg)EL;nHsR8~IJ}(tFPtt`$pVU=$zj1yCQXG8n}U zS3KI@yo<6HOT&74_YcUzAN((Qc-ON3;BdNpz<<35*qEh#X8d{{3~Y@-o;mnKeQP|V z_y7KF6}06#sB>f!w~k**Ki*`@&;S13@69*gwO1!f8NKp?mwmLPEtcM3jHkLq2T*61 zI?Gk3oEg-fe7&pCYHa-NcrT1Ri~PFzQCBI0wNdirPE@UNda^kukca8C36-BaS8BD7 z#^gK!<3qiT=Cq~?rKGvsIURi>8>=~(bsO+}?M`35PZo}D@TQO_^TpC*L3q+!SD8%Q z_U0E$G2)*7Vefw3@1i^KMUO{IevNxgK#LX7ViW?pff4Ps(1P4~w5%_RmfM}9prtmT zN1?&Xx3GrwsIKxHpNmN~ziG&falBp)+>S@|Go#gk=TzP1y}RZUv!sMu{a&u9O)WJ_>K_z^=c zzH_?i-MMXa7v3dTp?>n@Du-NUk$LD8UAJE#*@7=X zrJvsV!mctNxsu+L?(3^RnHTR1*Jbnf9`n0Z#&F~o9gA$~Zlw9>X)}v%qCaiqzO4A{ z?uwCr2w8gQx6&8$;b~3kA@)Q(gxqx4rgav52X8yw4sWt+$_e%so79)^a^Kp%0~;An zvBnhkqcaWkFTC*9kkK^;d+jwY;4CXs#O%v~b6H!u1^J;$hrC%smA25Z&|!_G=x3x-0cH?Gim+gQF(rvKj; zOFcw>3v~9bR_!(czqiftWuJGKfj^o(2Rkcg0;BuobRVXP-V<+xXO3vQz+d;C-il4X zoBD!2(dDh7O*u5X=PX zQsp}Eci-QuHu11Wm+j~~W#(VL$)FG4y@>LK*VvJVU*l zrMugxb7Q9`JJg?7pDA?fCsdc@BU#h;!^mQzWE;7zVLTxolFv|$^==?;&S>oLfqvhV z0}lM!&k_sjM?BU0w89rnZ>vaF*yB^yTldT>Cq))~rlRvQYD3~NvVzMSJG0x9)M*r1 z*{5&<*~3lyg17$L8rv$D2X#bVl%sQ)IlF*ataS+cFoxynv%yhOe=+|KbKl-k+~C2l zgJ0N=CDW35(JbCX@{~Kro#*f;k)5s+4uCAXug!PiI)qg4$gl8!rVFU%=m$wR~7Z(}U=El(CSZj-#I43DXt@Ko(vW3qVA-gSSZ^b+#|P^r{>KEj_;Qi;FQr-*tCHD}j&ZYi4t%EQ+|g_758rY7&iUqk-v7{8#%aW@NwEft`;XxB-j%Y@ zs|ru6-i=4E^r$6Tj2UCa5{eiO

UYxMrjt0cse8l8Ax=zr9qnb_ zqjqq1r08EfQyxy*GWaiwpXr+-a=#2M7MJjF)`3}AJqDg>a_*wv3i4|t|4Ts=e0n5h zsJx?j>mNO&Z6f6*imDqO(L`l(KJdYuzm<7PWxN4hu(r&(na%Wz=5|iu&%ndFtjcD* zXeRB^yydT`zD#wAu1~F{@;BC0k11@YjQOe?n#OKJUi>V%Lv4=6luX81*XRSKH^QAHuc*yHl87lO05Ur6W!f-_7?O@8*PfQGM)mt*NC7l);|mW_zAc zL36P-yY|j`74U1%F61wHpn*-@@F?w{@1IZJ;j)FO4q&Pb6^t+KBAq9=+4s$P!t3Cb z3;cP&BRy2|YD5?9F0tP~vBonlv|f6ZJq(?}iP#4kNw;RSVB}0}H-t}jrP)41W~pw@ zL-wyfR(63+-D%P%MnTu^GV;uoA8#h1z1P|0%#VcgIld_@8Ls=)sVldUy43%#{3q*_MRiH%BFZ%c>hxY zI}Y_;0xW8u+Kc{HZ3K=V14r1e;Mpv2^eYk_$4b*JS<#%GQ3(`pS@-PN6<9La=KfyW#VKBrONQgov^ z)iiO~0k_H~*mTv6$cD44()$I6&Rb|eUdo2qICt`u>Qz^<2NOId>{yf&h9dF?jVS(cy^>Hc}06V!Dm=}YZ_aaYjR8`-n*O!b#TM9=NP z)Aqe0_=SCkdxX-$vFaqh=DD#AWjgRq`XTVf^Au#6=6upOnE&}lf!{?+2GF8Y5xkG-e3O+K*d=P&$F=~sG6AIDpL06bXUc2;=Uewb&6 zG7)~wT%bI8WP-E4+w!T4Pn<0$9P?ArSb|K?oJf8DV)>J^$0Oaak$N>p(cB>Zj@K|& zL(c_tM(u&on4@!)nqAIt0@sEyl{1xUOO6fOn1miR=M$~|U0F1&@yUZulqa+ou;!Ft zZKCNN>fUyr}96h?*FTlJEXy2vzlSke~tg!P2;}*A5hq-$P@lE25nt$%LxBJPnlCwDl`1QlQP#(W;QA_eSW$2P0Bp54(?A< zh8wj}?U3DiP+j~g%2-GlnW&8C3+37uDdSMuR*vdn%-h;p2PgBt%(;|NisG-MjAqI> zFe>8|+O~)?&abP7PS1?BWrp~?oIHCN(`1(wJ_^)(7I_+IKYX&UjT!jnH1amq;pIH? z-br33%G*lbJb8!I)ps0uTI&8~AHSA7%~76r$U~YQx2~N>lIJ<{xKSTdX4@q442klb zPP@=)du9;U5QSY&IrLkP#`0lN*oCBH{jz5qVJ%VEe8RMEG(}iC3R_Ixp@eB(mx#i~ z5hmSFVFyHE)5wc%jC|b`435IyAYGC$**cO@*c8H=urVP!t0|YUC0DWI%6R5Kjm|1h zywX6paoW8UI#xYjNX8&LX2teM&U&J$X?sUPYi((G zbt1gF?%H?zUc2VqKJV&x`wG0T=e>^igS_hwC=W8P21)K`xrY8~(svnH^R%}pKDT>YGn)SoCN5Rh7+UbL^=F#R zD(u1w_1%X2KsNDIG%kjEc>;Mi%;Q&2f9<@$^dU-wPi|nrpwySOC3CUNkYb$rgCo zFOvKAWR7E;_#@>c84E1EFJQ;^jC9`Np2o-pvKwYDqiof~njB?#$Vl2!x7R;j zdpH8zi~}$6Sgd+4t;+rgEUjrv10P4HMW=%|R1Wj>HBXFCy!5;SejlcF_|8P%YHK6k zN$gnk*JDaseN(VO6X?DF`W21Y;Tn0YOCNSE@iW7HXx2s>-EFdnJ`w+JW_)nUf+3u% zX+7MQXCAP3FEn*Z&-z|42|65^E!QTZ$Bp2fpDBfA#<9Ijo9?QF_Qf4k(u4(+T z^C4_j^jRiRL>>Thk~TNGp?!EY>k86aCJ0VoCtX`3{Ug~X{Z{F2h|-LU(i~~iaMpG7 ze+2(ZkIlgU*YAhrFjivA)g9ql*Wg}Vx1%#XlgQX2HJG5m9uT}ErPsXXUj1EXMFkDXd)X{bF~Gkfov5EkaN z8S(sL#+ZG0j%Wsa?6bMu&00^6G;zCyyE0|Eb!B!(W#Xp+o;V{c`j<^5#=+(Y2eNbh z-{7DU*$GERICymc4p{Tb=#C4~tI0msJ;Q!K*F8IW&x_tO-Hp%>GNrWA)48>}_dFpcKyOn3oRlR-3T-)0>?!KgHcENW}_FCjpc4=)Y`QCUYX<~mY&G(!_$ZrXi zo###j>Jv{o@RWEGKFh?uR!vbI8X&2hf zu|EubLhZM6QOfSNY41V?^q*rAuUoxSV?FfV{(0XYX^-~Sn+oRIqj(l?QM6^WF%{VhowpKzx%q z1R60(jTP@)8kgM}i0`SdO)x3%`UnP{s|U|(tYAF%{1o2@kTy+yTM1K~*5T6l6{uZF z)ser4@(%H50^jU{=zrcY?1pW~&6?6?b4zMXr6sio${T|R#UJHm$bn_SD6?vh{A?%) zJF>>@a%~2N;ac3-9+d=hg*H*vyl;!=QiHGiBDvIICSg zAQj?Oo+(uPJoKx6(foES-!mw)kupL)P#=Z5f^0#Y>4;4g`=RJEJFAukFS28r+=~Ze zgT;^S>|ydPbL-#%Pi-S=2!64FG(RrnwNcuW*>xSRCi2xyw#==xlL071REHtn&_{#U>E5Y4Eq>ay9 z*M8`sR&Oo%zo#7GTyWe(yPhwGyvm%SCnx%seZtc9#vrC^$rz=-f%KhGdfBhkHpvvn zY{h=4Z`SK*ll+G3`}+j?T6HR(H)s&=h)<9Yk&Hne?HdbUMOQiiSgf2w#;NWib&sHK z!KS{VKYON7RAnP}MV{h@r19>-{>VF?t7vcHlv~JC0q4}uA=iEq+DqiIvqp9a(W=@Y zy`YuxSL1GvWDKxMM#v_jK5|HN?=xoa1oSuUJ5S+Ht^VN30-IK{BDXJYrj6r@XB)>p zXWMsQ{+;A3GDEmR7Ijdb#RXxqk+`B2>}<4mA1>B`3$0Hu&eX2gm1V<+(C{jy>!|&DuX%kNcdoImd?(W({bFkOXXJ@`X0n($|f}l zxTM=#n-}&g`Y+N^Is)~b`t2;WQE||)Y=p6G>yzO6@w=d($lpfgeZ3gkG!wK*xL4VB z&JGRI501&+ZtGCrr?RpEVdDf3v?F6UbKQ;;^q=6K=rW(8-Rie{%HWId(X=6r4TyZl z*|H*-V*i|0U|IxBKfbuPFV-OzU~iKSF%Q~y${}A_dd3#ua-WCh)l8Cn*)+yy^A@Hj z<(I1d05;~Vvp!V_eO)!*741wS-uYu_r#POozx$3czVwLQc8+1!O=pRAO!k-E@)r{A zpZR(Clr+*;{^JW_|Bu$VDjy-*yrCo=Z;Y34kMML)h{n8P79Szv7Sqo62E@ZBZGu7T zyedDm=a{##(PWO#-fZ{tOnt!Y)mWW|Hmr}0+JuZ?%<56w5|r`2WebFnX_Y;aW7-trgjOqzJj;H%(K{i?NuFJ9fhmT*yB_@#uGqO*aeTP`j@ z+wirW^SYc-z&O%kI{qNPL*Z;@MEfIt34a$QI~O1q&{;xXl*hJdjL=%P+GYFggU>lw z1MCy;NZR>uOt-54PLKM#m~VR0!@gzbJ56Q-ZZ)pH;ot0rKx-Y+mDR?>LtU!v z5OeDuM3H|wZA=#nUbBl2Rqo{)v@%L_s-HC$A*uH(j1NKjAhy$d>`)oF5?c*GknK1AlY#lG@!nY?PS6mc=Ct*!t9l8 z-pWqeRHV)sMUBJAQpUu+zZEZOoLFh)MvIlz;h15_E4xGNlNH+>GPEg2o2~~2*&gCC zPyc$yWx7t;bZVkV@^lEV@D#Mv`yA0w<{=v1NrT^bUxJ9 z1p8cslP1Y2x8k2SPW5V@utNIMEbaB6UwzH5{xXO)oTv|80KRw}{U>0zZHeeCwoyI% zmf5>uUFe%K`+j^=E_k;u|I={oqJGTK+J)9FCfYs$FRYb7*U-;RHQAxiX((S~Nl3?b zyqMi@XZ=U&S$W87{4w}P#y%-UA5;!oRYxc9N6wve)UYK!1Aja?^i$2Vy7reI>}^ja2})46aw z6OB|~Hhaec$NNKCne!}nLU>-Gud{YMoG$&R$lk?czEm~G%13WS?LF4c$I(vl@wZ0{ zSLigf1OBcha?2PyR(R51Pc}>TEG~kNk8poePV++U*BJ$EMfLv@TZ!mfdqAC=E@iGE znR+AmmHilccR645F169l2WR!2WT4SPwJ~4LN%l2hhebxAH|tFK3TMSXcQEI}4^^GT ztEh+ki-1+*Wn(FXQMi8p@NoXg7=_;QYT(jVbis`5O;cLve|H9J@~qiq@uLO5)hm%N zk)HVxXjnEH@k@sOZJiMMWLR2>^x~M7heBf-zb<3XSR?unywUEsoV5<@L!Dvhr-pe~ zc>l8CQonA7cU13Twr}lxc9!B63*WRy?f!KHKYInX3jPG}zrY-UaTNUI{XXV}S42EF zR{9uqJcDj28K5@KqmKGErs)L{U<+tJ2^WN<{{leb9Z=BQHchSP$zONIW;#BB(JpNj# ze>}di)PEMq8Z*01$kXiRrp znlk%i2u~I7K+RY%Olwpr{+Tbfxh3>P(^iA;urGx32Ge=R@RYe@xKnAXX&ppkcvJC? z;LyZ%{#eJf59-uE8zJPCYM*JD>!fBlAj6HE=%q5t)wRbUfur?TJ_n)>YtA-zL?Tfg&J(L?A<%(;x2 z5vNE7X-=@In~0uNCwcOBfZutRCtT=7YXs&h)?>^ayS_rbPJUiZ{w3n)Yxvh3NOEXU zp1k<}r0mn?;~U0MM0>J4xv*Wp&|^wVs>#wF)rV-i^)oBI()|taM49cWBIm|lb$fT? zJ1e~z@Avhw&htM$ul)^a{+4n+EKM)OptzYZnBJ|>JHZ=Y#UAnY=U>&>@R8s$-b13WOZXNq;v?q2dhvhd%|Cyl zpEsYlcz@o!e!BE9^jTz>9V4krybe#nMyt{(o_H*}v`caTc^QpC$bxpS8H`ZT}r^q?ri0i&8s+;wC zTd&rzB+FDcw9=+Ed*&-XcewQoDkInU*E_VYhTlrxS^(Z+nD<86e)HjXf$vs)HYFD; z++9;I8*i<{|M>&(FZrf9-pFEj{}Jqf@9%2#o{E1oZ4C95dA_y%nq|TENEf}!%02bd zf?>+jU}mBpE(>l0ekaSl6D9C>Rk^#DbSZp)8(ijL`1vKSTGH*f)6pO2T0c_khdt;w z=+&wh`lDXqZ3{Bo0S6BMZ=>@~(Rfb0^oG$+#~!Vd9mV*6l*^L~$4c2Rq^JFKg6NIC zujJdjHfjEqvHZl(_V!(SesABm&g$*k$$Lwox9@7gA81dS{LM+zyE@rF?*gZ>Ka^~` zvdM@)v6-l^nG@if#y$bZ>FoZa?DqOEux<}+>bz9!y_0uad%kEHpEw@bnPNTR4RB+k zdHW4l_RrgYR~P=(y71fU!mp_dzq2mE1d(|DjOR#mM!52UgWH|hH!3_J?Jg? zn>BCR^7=*nYtow6S^t2w-1XsHr})V)5wEpq*)i(lMK9PnGg?1LR?xG{)MJLJO_?Fs z4s#}B{S{pL^$6-spI2sI6m-_*`CY(_zd(5YhUVkkOES0%zryTY_^el)Pq7auh9kz; zMEds|=ML(P+uc(aepg-iF2XlQwjd`@`32ym9g-`bAssw#akm+J(r-zt@gN1C zidN#jh-ug4iZ$7tlH}DIer#XRo_v+||HNzewn*G+rmuw4#DuCphS z@yE_(kZH*GqS{&UpZdAkJ9_X6Kjo6|;4(TT`(Pz!DI>N8JW3k+r^jG>vwQhIU=8z0 zbnoo)uyj!Qp_%^y^W%@UKl1`CP@eSb_A^hl1A3W|=bc0P<$ZqcHMhUND>YXszZ2bMKx% znogc2d*JaN_1i(%1=WwD_hY^Yo{ReKUl#`d`}Bg()Bc%^#T$OWyTCjhS`j@y&VL?T z0et2spcBgQHD)F%#Q%`Jr;ID`ou!w?9N~;Dzuh8Shdwx3r;c&Cx`_W)OoO!RcZ_Ly z?AWSjC}ruro%*DY$bQgV?g-R|*k&Z&R=d=mcdyX6SMs;odGE$xCv)-oy6~y!NztF3 zB;Ip6{%?`ru>6^>g?8&{b_8n_tp(;a`CEeK@-4yJz#zI6JT7gXHidc9N7Wah4)K>A zjQ8;281rTt{!7YUP*1D$douR3ZUwJ`c@n(O9II#1lF)wDS$D3CY*(g>abG^Txr)CK zS{_Qf?HY*eR+VUMgQ1(6y@$H zBO@x(XBhuV&_*YDi~N^mPs4Wre|*j9S-Z@PGPmq$0{@AYPH62B`Eask-Vr>5KgL+z z;w9E#t({49a3A}5Wy{I9$gdoBBXm_aPde-XFR{idJGj;(5{~C{r>A@itfpazrG0{Mf9rib{Ku`xG#pZ$axOPcFU9E$IjJHJa6*F+5vf!Tyvph zd#Do;zNeIZdv`h14=0xn{pg=c?nAr#esh37GTQ>(-+~>@gC@@A&Xk^S zJ+4E?(LN{XIOqVV9_ZHIZRiyEasM-Zr0nBizLhy)@FK<+({`Ztgu*W;e1yF-?lNdV zeSd+yZ^+UBy7MZu1bYGUc21Q_hqHMw@5s zy{z-;ADR@Nly)KiBZ1lJ(m%XQxL{YD;K$Bay#ko7U>XlKpW zshsr-=iBZrZ@2dHChONQ`2-gof%u`vzt!D!!~y=O;Y+cLY9EXEi8`~-w#ct?c2;&_ z#t+h~Jk3M!3$$gvg-yra4?>xoA@HGPBKw0Kl(qTm%r(Hv_n3>+W1g?c&>V`JhhYxPlHhgdxljdU+)j#v7?8fybyJ9`K>p7|pX2DAuyZ$r~PxpeS z&cDZ3ZBx?x$C>Cub?eN~ZWn#i71nKX8|34ZNqOi%)M-5N3iJgXxP&Xc-iadZ zD7+Y)bOv`Ou@~h2Y0JI)uUh4$@~fDi&J#ViJ9+jwXHTh>3n$l-$E~n*aSW4UwQ)RU zIE5X%v_Gt?n68x>o8*5Y8c-c}Zf0p`&{^KA~47Cx`!2 z{$^yO^zA4OWyIg53qSg&k%i&VsC1F}Ru_3XH=}ke`F;9%9(3vy=hiH(PGCQ*U}Rt7 zGxV#;tqJ0CEN$j)rVRFbo9S9(wD9x|IRHE*;gg%K%{LC;@U5gN%)qxDe8P80?3csi zJ7Vj7WVNSXYdDD#ItA+t&BcydvMhSR$EAWlm%FLEp#XiZp^Sq1YoX74hq3k6V5~dX z>H=Epg}#P0F=l8C#>Rs@n5R2o=(~k}-oDTAr>SklnYHVegy#cDZ_pf}(GBNs*P@3e zu)`Fz80MehBKeh;#4172VnwCrzy)`iR9@L3EE6>cD zIV-Y_texmiFSRWNz8Xt;NCte+_NSqv52{o3VUHRA82x#DIc&G|&QkO}m~Yi5ezUZ1 z>92h_vLP*=GoA238MvwUd!mWXXbvDg$!;XwM)a8_Ja_K%F=F1hPhb5xHuObbO_~R< zXC0cy@TB;UeJo6Y@dR32_lr$I*<=4p=WWBCGV_hU8)A9XfZwt=(|`Z(&Z3>?4g=n{ z(hz9cWXXpv2pr88_63sD)pouUdC|{2@mpj|T;KyB?UQ_67ARdoZG-g zG5lZg`=%b?Z_f{}no^Pfd0Ib(NAtC0J_Vi6sWwhq*)1P9Q#jdMNI5ERC2#fNJEuRf z=jjS%Yn*Cn=x!uT_Z#aTUCG&R0xy1p?UIS9(#l=3%gfesis~TiBI&$ zzpn8##LY<-|Br%$gz#7h@mQa~A!t<^MPDf0;HpyaCy#}6|jT*Y|e9*=!^8bx{acseZ_H-x8( z!? z&ADM7+9(^i%5I79A=;1d0c&`Ec+WFotjTg724O3QCz10Ce|*}zec5QfZRZ)1w>j+b zj2(9Fqj?Nr^1*1*Tz~XC43zm}Rz~@Xv&vr`HZlmPrqc zX3c!)@@(LkgD$#|b6|opw&1Chg`V4Pl1ulcV|?D1E}11AI>1!g53Sw(;G+G<`nX+v zhaRBugZkC>{`Q*W63SfzU-ySyabo}Zt&S@>SMZMm4>wQ;GVbnyII^~1(%qY$i=7f0 zlxd2#}}b@J>g^FPi>=t7o&FROdc5zZ!4U-eM;h3*40 z_FmnLGY%b|{G&5x!#n$zouuq{RQ4yAu`b>fj5@AT9d!`;D)kN>_EvX-Fo!zSHq~)S zj2qXp^cvS)53llgD)SS|s-uQc7UigHl>^>wIk7EBHd9jpekYutz)#>xda3pVLf6<8 zLVaSP9c#3n!JggWrd%GbJ&60#vVMx*?jiH{jde+AJXsPgo#&@ISrgAQrk5%tqYm~; zr{KYia0jj#BPZMnuW0PWPb_1KVLoI~CP_NcXanc);72@Jd0308Hn`Zm+qfgObgI`- zX@@TE1fFoM0in@DyMQ$e6R)QWtcf$eq{^#nl+(uffjxliOFn2!|0VC@6?o@oInQ2jyi$9H>JCSbRO7LKi>{xkCT0FSBgE@+C!y1<>R0~ zoz2A=tTmm%i_SY)`tWI#)nGz=oE721>TLNLp3VTPFXK;)Q;ZeJyu0Zijr((a`Cq1r z?1Rc9Gr)0@_*C}xUFm{&c8;GXUVKyLn{}JX(3Hjf#HcRDROG4WOJ7qPG}en}Oy}J} zsx042>f?J~o1_b>O`Yw*5lV+2=)g9qFJ<3SAGg)_uceE5+FLzY`QmoBkfxFLHmbdK zG)T9BpscJQ`f=AC|D`J!n8{Zzp?hq~nPutS&o%Ue94Y>9sH6 zuXSZ|pN;l^lJC-yKS;jMpsY8(h+UTcMZceLL$=E=A9H}8$d2+##~`2Gp@DRE`KQW$ zfQG~4SwxvH$WOma{!l-DUHyu^u%@#Br0X?iL!Pu@oMrYR{PfAs zkOx_!vndYZEEaSS^tb-BoCj*l6yLJHY*wO?^D`rwd9xI*7dqg8K72bdxYgG_64k3X z<|=;T$S|%Rmd()`NSkbYIpXs?g)p7gKJEr5e=9!mlkxv}e?LBN54K>fIaiSky*to8q(RX|0+0HA9!Xf%<_SJ6fwLrEwT03*lPiXu1 z8y#r52)t2P(lL+yCvwNZuSfFdaPZTRUFDqtPloNP`1tZfa;G5wD#mB?eZ7;e>cH+q zp70#z1@;_fyLRQC}=T=tPBK8OF0N&Q6eiuQopv@uAZtd_NQH&_8jS-#x(h)!y5?vHMd7X9i}$L$2aq zo%Iho!^Or)csje(b9v8XJQ?Y6FLrjCr#&n$SR3lJ+9c(ryeYgVk!D8pZ_8xOsmpb0 zU9=ZjWzV>3No}Ja7#r&1v}P^+h6W&B=lEm5O@_5aXAt{g!P`u4WAFm--0Uv(bav8A z;Mkmf8a~|)+)KO}cC2Gwj9nnCzrSA5r|d~nNF&f>Mz|XUiPrz1drf<7{fDzwu!D(FZ1p;>wCdB{V^;FKD3@Sk-YMCz&Ec) z?*#lOKLy#MEhqrtv{`<@YL_eE|(Sg;U^*Q>-l`JiCZ3df#}NEQ+T@mp=7DW$)*Ghtx}C12wZmJNv?Pkp;O2QY<()IkWS{94ZClwAtueBu zvQOvwE^9mqXq)-?WAf)zKQpdFYj^F^H|w?R;XN1GUpX$7A9Osvs>gG_YR{H0Bu&@n zljglk`*k0C->j8Eoc~8V-yzm%$MUP~N)?m0d#KQOSB6u!7yG!+?%IYuXwdJsWoq?nJ8jI7I zXD(npXXCT!;wfExKgD^{ugh-4+8c1a8rmQzNB)8EbC2@Xw^RKY>cP)K2d8>Yr93^; zr8D{ZaS@L%hH%X%IX6r5it)`J|L*VU7dt1)NLE20;}6VD^AeH|$ch84+~I6Cb3s*h z4(a{P=1faB`gYu#iJPlA$2f(H78M?kOID``=c>0E`Gy_#F^&02*4~G(_6A-oO-g6l zl6?gp;@(sD6=)Y6k7G~Op-=8UcGHu)9rs2r4}7G*JMQD=;)+_*w#m+QGL7yEuYoz9 z^tW`bu$%cidV~4CV^+dHCtXPmaDi^$j9_FawO9{Ky2Q_R<%f~ND9e|Dz1w>haf zFMa~mPZ1`1r%XSMG1c-Q`k-XBe5o`R{hGdNG&;*uKH2%2bQKFjb^r|SG+?mfUwhgG zhOA(iTL;4&A9)22hcLj;=ogY>;oa4$GXWgnfUa5TJ(}Ha@ibxTle@LfkgSNG&(plF zow9?BTgH|I?hCAa=Gi+^<~~QQ1HpT=WtP)uf{IzVn-@1_ZAo9^B2~7 z8b@9w4jZJGAYT8{|6Yp1l67I*qOeq5*z<&?RjyfO%X)@?)v2|PXL7xLPaf63ej~U9 zQ#p^V82GeiA=s4uC*+IayKDe_4@LFX!}t9ttRBAmqp*7TDumU;w}Jl{zS?s9?Sb)7 zU=%%v^t=B%_Gd6fWGJwYWNzSKcNKqVpZ7bdSE{C5tTiQyp1Ep;HzlvL%hPE(azgEapc#yIBO?b3zQ+#fV7 z(^5T_d`-Dq1J{i3n>%mC2M5`RpH0Suem2S@Ju;i`2y*CH&nlj{7WnyykA!m#>|C?X zIJBdNEC5#(f9)fjPszO;!k747wDLIj8jYhI=`0hSjloLNDE>0e;2vjrr{LdD*}ps| zoX1^+ojn80S>&_$@1r_9hrABw=d>h;!XF`=3E-1I&?d{fo^RY}&vkb0zO(WE{w3fn zLkH*x(sfNmen+QUU22dE{>sb)bT+ibWkGU!))_-ObRJACO}y5Jl&0x`u)P8E$Iix# zr@htab+ydWcJIEs;e$@bNBXLXb1H?0FR5PWO|~WJX~N;i66I9nucC9Cz`axURFB}u z5q8sX?KKO}%9Pz`5IC@QBg#{}^kQTS_P>qy^S)1GV0adk-w! z_rCv(a_s)~GHrKxirA1D%j7>IJTR_vzNFpT(Ib7@*5NLNSLRlkV`Lf^Jb?uvEPa^Qq=z#VY8V( z6lm)V&Z~aHnLNE%x6TzY89D+;j zb)cb1giQm+qbi|)c9gE?g$t3T$enfPCyhIW@&RNl|F6UUFM=%#>UIqQQ z@D2aztn}MZ$I?0&cDuc!DMLCl_^F2bisjcKyGKQH$)2xJW)j@lbrnM&T3Jkd_7=|F z0uOcPWL^(U`OmNi3!j}gcuG7Q&g-9x=uV~QvBRppS^TmR-~gV%{~38$-qf9e=0=uG zG7CQ^X%GK^5c+9_`a0(WlTUM4@xfRg$qDVJ628P|n=jfXyDj61&d6SnlpII`_d%V) z?ULYh+C=`E=DZ2o+=LD#+C=8<3s1qbiW5J5vsLHC-4l!^e{&9BvTPRIqR+Y~d##nF z*n`ijBKOBTjAxQ(I?GCDuBLcrdAFANFPDK?zM&!QUrw3&7fq(!)gEbsE#+_>TmI?B z)wu^+e<~x(@DZo)GP%4bxYe)fOWEmI%;IdflGYoMfp$%aww5FlISWMR#;}$WwOca7 zmTmuKYhDga_XJJF<(v<(JTUl52?xq2ddS^)oaDlL5w%~R{w!QW#)=S=!{#<@uY zzA#<%+tuU)Vd&?_q21;(J~{N~)FL!)>kro^)Q^>l=Id*{4H1ny(9p`_jr3uq`oAeZ zr15t4zthGN_enR1#!uml@Kv=0dknB2RvW>46J@2z6Vo(yuMef^rzzvZXnNq;ufG6> zMGMg_@dtVj|CZO-2vG?;9++V;pW)_j=PXS1v^pfitT&32Hp z#7h;izmyl&bT`Nd;At?^JbTte9$u1cvNXrq2jw;byVg~9-XU1;4!#S__H2WK@J3zq z^OH6%JfA5=U79;Oz^#3LicgVFdqSQiUHrX{a**@z_C5HAuVy^9ep!>*?HJt`=dt&?Jt{Sgpi(la5jLCNf zAGiF2Uk$jGUe22Jo_PK#zxCBc%{S-=;TYJb9}~?(nP=&KAm#hcug%`YuErm=0Xa#% z^0UrnMQ6gfWZFFWK01 zT-;vWT-wgqxgfMRX)UkR=rx)$Hm>QO)+d=eNTERn-W5zWPR%&0y(aJ&^6NbRA@;Ve zakYPFu53J+k@@gUDQtjk4`A;X3=VjDp0-5tn09;qy~I_rI;)g@2vMB1Usp80{cUw* zS^mNH%plU=#Ta)NGPuhhfsW=9z9!qw7>?croo0%{`A>q`3)CmrBfyDl$QKGW=zl6c z7RFi1zIFEYD_^Bdy4!8quW;|gd|=^J*FR)jWRorH+kwttOL4yc2y^=`TQB8aM_tX7 zr#hxl_KS0V3m;0iXUr|?OhoBtU-mTyYrRC{u=@H2eHt5AdnX}V`&1G7N zdxA1ZV{ujDEu9b;Yr=DiZ*+dnAJN>PAHTwNC*c}r2ZP^7Ke4rXf?zMSv##>rNwObK8Q;tmJiR<)9b+WRg}W{8i(WSOTV&L9^fl4qT>mWi4jZd~8Se~l{Ce9= z>BT@gTpv7KCj5Qg%OukY?={eB2O=L^z4qXuZJeD9Y=UV^{zYuIx|cA71Dx)IL$C{$(;`@A4}fKo zX-}UDEcJc;18^dLMD@35)oTgq!Z6-IlXh*RzTVMMy_-Iu-s7WsCk?2#OuhI5^ayVD z3S0cGt;;W%KN;nftf|MJV8%D0M?5K+pDN9OF6UNn0PcxVxacbmzm7h?9=Mg>hLiRx z!mf$JY#D@oiTq>9ueyfNR|#N%4BDuV{_ZdD64D_Y|4TaB1n=~xTSy(}QOCgY_NAjv z{4(yI^8so0)k&JusBiPJTdUusA4DhOPv(U^uNA^`jU{s>uVUHi0&|9bn}du~d)(}s z!EE?NX+qiz?;Ls`{eywBZ4~icoINc+jubdglSg4g2$PNi8{n5z@fRV(Sxc?W$hX(F zEZ{!q#BV3B%SPIWwMfvoyD9$n6Lk-5tTmLqa>>^|E_@w$PvAL*=Xjpucs|K<0#7S- z4X4h7iaUaX3p;`bn>Ga}60Wm6KE-<)&tX?@@1Dy0sH?UGhx2_h-#Na=@I54#tewRF zME*zX-%Zq#x$WJP_#aYA*N&aAEl9ZAyBkZ}yIZJdybW)t4WmAVePYzM;Nyf1E$s*n zpG*E?19Q+F!IU}Mf+O?~A2bxVcjx&Zqkrm~d=c#;?-0tELHKBeQ}3rKM|B@aSu+Wb z%bPwSgy8_{pG|mN?kUvWOkD-S;<8olJpSV{&%7!ub1-EJj=2-UvNDu43pkFrID~Nl zWeuXNMTCt}7;v7}vMo53=M0|HdFJq(#WSDhY@Ty@&fz(q=QBJP@m$Dr3D0MFF6X(F zrzyK5C{pfdl}kGX<6{0t=^q|Zn}uV?Z0}w;XIpUIRoeo|>B~qX8vC5H&C=He6Sf6; z<_H&$qTMCV{fW*!d7SkJn1ypZLz)~;ef2bX5N$n}w%r+}fB6c|&Ma~tQ-%2*Yggzo z)|W24lNr8(_M3~^2e)9@BbImJjRAa?WBd>gqTb-gy)Br@hxSulz&vYW!=~WoWAK}w zXeeu~clmiCZI38*1Yd=|B^Pz4vvl>=PMvXU+S=FRM}SSCS!V&j7iVYt_P)a%WKGB+ z@ktZy6`!0zc>54+kt1bq;B1`>!m=-*+-1x?+t;36)7jfPms@@tvK2_@$Yets^X<2U z>9dKAX>=mz81ku(xV-zL@@7ThW8lS)f?vrVouNQE!!~ETTE-X)yLE*=Uh9WAFrBSA0Wjrl zxP_H<(llu-tnNF&8s2j**s!1P)*9ST?Y>aPD(bHf|B>!Ya?7>x%**!-kuADXJ?~u3 zZ(APPep)*>LbGpL8q>MA!qvai&!Xodu#e{)Lu??7ryazpKG~!ye*1;&k%dlDJWV`0 zS9$^SGtm=cNLBSD*he~MSlB1yOWT6)Vqe%k2ANL$&ER%?4&P4ZbsV>_S1^o#)$qUt)7m5O8eA~FM!_*E(D&= zO0^7q$R;ni4vJ`J5%HX8DP8+Rq%C6qC_}TxK~Wy*f(jSCEx9eMCta#k zHXPK5?FcmCNzvv-I5W9d`LGvrvH@m+MFzC#zujK@x(d6oEZ+sI~g zm~!ofk+c1S=WYwujatUJfAjqNMnMk~mU_!_GrUBeHJ98npSkxg{8-wZyzDfD2_|T6 zFgk&>cd`}$yZeOnT-M*Qp zCe4ndG0$gv`wnaF?YpkEx9`yz=yr8`9pq1fUA#^FE8DiixRA_U*@47;+Hw`e9ml(_Up>ewSb=5l!J$5yK%#_Omzmb{8V*L zqRtJ`p2?>CvKoAb4ypcB-Qk&~z{qu(;d;N$OUQAr^98KmTsED(?4Rq`Q)O>njE&JL zg*G#2xtb_b9=t1?W3s4m8+%$UyrWO+?V1^!CpJU#O zUA*GdM&~sqwFBQA+5Ovrk8kEI)T8yJJ=y*{2yTn?Q)n!+qb|II@Pll=f`1tOpmi;k z!O;LTj3r-3l$+V*S%yRMq=2>!}he6kH=uY5g&AIoy=ciKOsyeaN`)fu3sAUi&O z`OF*TLoo^3)xE75_f7PI)z&{kYYN;4>ob>in%T!?%4O_Lre`obt@_0S&3Vog7QW!O z82|Ae^!K-N_u{l9JLGy9gnRquw)FO$!uus?ZeZWL|6SkbWq-t$m}9-_Z|wWGsQZdM zcmFN!?|Yrqt^U^ij@n>D`pg>=Xwn{Di{ri!qe~}lNMRnoB>-u*({X58n_McY{lv;Q{-R$SS+Kn+~~DZR&!owHHP||LwUN3RcB_3&O{HD1nnzA z%e=AS(zmi9cGNoY1sIHs!EcFvoa2v1rxHJX3^*jq1c!KH`AzT{c>1#G+xH5OLN(HZPehWzQ~l4>Q}>F9qn_8_hZLr zYn%w2)6Mi+vP(LU9Wy3Jd!#a-VZ6%2UvHx)i~-ka;gPe9hma2WhK{O=iVm9ydh8S7TlYo_rLy%GjgK)QpO<{zSBt^ zW%>yJ0^9%5`8M%7lw{mU>-SUfXAKR<9x9#1*i_3N!rt8i^Q~e>@S-m|00wgv{R#b- zLR{0guWKC9d`Nbliey4w{9=0co~b;Y+`)nECpv@P*7pJD3VrWa>|6O%zf3!ob;Xg; z>;cqSZ)*V${>=}f`xO{Nv&?lq4jg&WSf%>O=aI{<_zT`AE)6}YPwtP-aK$b)OLjM{ zLzkT;oHbCc-Alcq7uf?-CC0r@@xnP8V=}9p8$DBjNAj#?lW$Ogc1dpKSFjGavKHE% zRv^O&cXtLqZq}JiJCS88Y9sMCk>81_?CRz$+}gj=htOt^$`E{mz>(I2Qr54JdVzN# z?NU8uVA=rgE+;JXJLunsOy`!0m!wbJ61b%$ z3Hkmvx_QQX>$5w#wRlVLaQ4;e=E|#8-7_XRmdv)-lG&#;XYd-?Q=o9I4GG7wf9G)g zM_AWte~vR^Sm&>~2dwnc?kx5{b^)_^vKf$hi~ynO(nP z-kGm8m)8a5TZ-NwNB#hv@HqSOv)j|GH93C)ciQVTi2h1|zlWKtXdbWp_))j1jrFjx zzOx7ZfmZQVX5T7(skx}Xq+G6^mKX#sXlL0Z#}3Lv-yZ)9tIUbpSGQco*=*cx3=KG( zpEx?A4c0j9T6!}+13L3VbGKA^c~G8H^ipN+a)Fkhi@+&Dt8SNtQ*Av4dQf_u#iuiY z9N^a-aZZ+XHGCuFgOF&stvlVavRiFPILo^0+Y$R2ijUWnkuTz5+8>`$Ws0Hvmh6ss z_}rF|o>^0vl`6s`dFGkfkT)eaAH|j-dOZ%_M#rlDt`_pO!eX8leynY`Xil3?vEI_ci8H))Gu4M%8mD#u`Zabzdz!8lvh5XpZ`V2&rd^kRmfWe z-^du_qC@0mcLk@>IB@xmv=^L+ud&TYPob~22X;@AnKMjl4+h@MVYe6r&Cit13LN+k zXB_czR4!w@9gF6xyw3je>>lR2@>n0(ue{@<@<7xFmN!l1mHW#>57`gC!v>T$Dk^Wn zhm?0T<;iDT{9*lG*moSamGNmm*qE={wl-4MV0hrLk*r0gp7kB@<=%-dhwkGp*ZXbk zHth8%Y+dg(pL-kW*#laEFGRbc{);ZQPwsr0zUpJ0(&^k9pr^EJo~iVS@>b11ZcB2_ zD`P+Yg%#NodNelZ?b5&T8h=S+>dYthd?ACsEB)#;an71+j&qt>2j-nkEUBJaw`cI8 z40UGN2jLWa{AZ8TJfN@D`vpAiY2QFxE{mz$+ZCo`jozjkku|@_InWX5;;~(GGZ%c6 za&PaJOe&1%?uHhELTh)o1OOR)m(U0Q_p$_z@^km@<9pa13+a9y)kOhBpZYOrXoq_g3O!&={yB{Sy zISHP}-`|Ga@Z|2F5N5wGd~)}e`1`CUcRw6|ANu5O!Fd4i{k*udnn;za4|8u@!)02V z>9yvr zy>ah>U*UE!c@CqUM@2T5|3lr`$H`GudH+_w%uG51^kfDILzD^`*boP3FuE~D>4ZSS z5Tz#xgqNU=%PP8{&5DY^@nsP;U}P}@4)}&y&>-?+c6F<}(oC4R z8hFqk0`q)-x2k43LtyvOzn;%$`cpMkx9&an+;h%7=iGD8B~CW8vq;?)>MlTwdBtX+ zUzHhK>UQvNIqZSf9Dt1B?`(WGSNStT{}%g$kZ0iW$x4X3mb~{6a^fNUVMB_IgT}X} zvXq~e#7^j|eaA(HvkxTU&i9pP_(J9w!TG~45~IT3#A{Tk}i9-igS+ zhO_QER)L$!C=FRX2whxz3_l*x(01`W^h4i*9DA|HfsdcV_l}Fq!-wlYA6h4F1!m)v z{j%X)0B9>mP6;!)?Cae!vpkoeToS$$FMpqW9yw&>0~W78Wxjrj{g;K{yR_A0mVuu= z#KO*@|GmK$>Sa$%Jy#-!Mh?A3>i}xMt|2h1+SXH~5nJ1*~yf<9K$X>isL)x+-dGwQ$$a*7E(d#aj7a)fVFe z4i^R&(^gV#u}&i!)?4URKLWjozh{DbXa^baM>~GdZcyZZY)y1q-_m@7vQ0(sko#j@ zdZhGk@VialLiC-@*%WvWzwfjA6Bh-W3lCM#q+R8&O2GrreCO||C+D8bm@O3T&vf18 z(j|f={C_h4Ul95G^YnWObEQ)fjm)FG%}F)WEv*hTzDpQGcys0jg?Y6!d^AbrDVLyp z;>hLx*l#q}`i8k_R8LBgOmXQp)PKtI3tK+5Jfs`8lYy zh;K#xmh76T|IyvFcUZVm&K>59wVMC2ZF?*}3#_Stn}O3qxn=c1%fI5YaPC7V3-zAQ zKhSdJk2U{FpRxOFd-`bKap(Fg%sknAfpU+qH`K=eFG40a(I#zeYc>}$rpN~PO!DQP z>`#2)t+GFF>lXY}iVKnrx7qx(dN2R${vPh-!_xg0?zcww_i}%KbblZB4@UP}x!>%B z%fM#KD(c`F8eFnx-e?%L$q}eZM}%jBfkAG z|BKg`vaj>+n6t#I=WunnF5%k6ToL<#=3N=mmHc9wCso$q2T`7Y%97ETWSw2ShrZn=SGqC!A@!-QCi6pJp^iN3 z2JY#aW33FE!I{s3X|nc#0+ZH*r6UNYTo&IvFg-yXDj%*-ud(_6#?e2=>}5{}Iji{o zaQI(71?dO!1EyCAMr;%JSIpT9t(}@P=h?NB!>C_tC&;1B<9Rj^t(!RfkM3>9#NsWo zkLdTvJpZZa%-H)hc7jQ`s`QVv!#*GAwl~jV~ekFN4ZU3EoAQ|cKPJ3=lZZ&HVtFh%))T&x@__$UpllG>?-!x_73#dU*xxk-*5Tt;^#QG58iF=B^G{p?QU{0JCr@t z{GhJ*vd!k9DsriQAh1sySdNT(h&&!Wj)j58m??dd`U4r^s2IOl!*Y17gOpqa+A*~!joY7rR z%bw!B=;g!2k{Yu#I3Qtm%y98{mHC!>v`0P56+OLE-`X+BRiF#ufp7U$Y^Q0PcL!RoWkaPQA31Ao|hlp|?v{K%< zg}~*}iJGo_lL+iOxQ3zrJd!qp!+Y@yKJw zi2XIU&=-BbcJ{}hDR}7`VvdhEBY3648sg;5gD>ey+oI`gVm$40wiGZZZm+;~64w_g zvxnbr`R(FoDy+lsYvI?*uj^W9a>Drfg}f{BTgvZjehc{ix5S4@mVlr7bhn?}5n=p| zV3OQT(We%0Bs>2f89(8+i?WmGyKsnab?C0kwf`X;W1Rt}{W~9_tm+Ugv=+Fk9_T1d zIq6s*KGN zTvJ1s4;?Y#JZxDnh~paZqi5QA)^X%{`23btCva7+q$A&Zdg4H{u7$C8I9J+XKFsLb z1Nrv*$l|nXmaClu4K#-Ag~o^4qPn$qBY)@w?B4{mfQ<_66shZBAKW;hv+>W*rB7Kc9zC`t8 z3&_yzv=!u;32QV9;}yd*-XmI9G9W3&WT-Z?=8T zL^u*(2cOM!%*!J%Fi!N{w|F|9`;@18iK7QssHn(P=mwlP=#H<_hHPEE7cIe;!Ew8# zlZN{byUNJaGO-KrPPbiCplM*p&{Q zQN33{i;{VtJ0si|pB_+u@%s*VPOhl2SQ_kkb-43n;bp*2PT{I^I499pm%1gZZGLas zen*+OJ^q&taVNROQq)E4(GYdf-r4vF%?>9!-%oh7!87CrvCQZe+1}dFtE{KE;e4ez z`|8OT`_lECfvanY$;*B0n5u8Sy~{VZ?b>73@msb_{$%N@mnFiv$m+G(!|TuoYc`{L z_YIr{`Pe++lyfEM^K!;4tv8&uhoC#b9{6%aA$iZ zPq9tRH2J%@GKHYy5*KU#r>{-*^oXT-4QuRxiQKSTrKUF30fU3z%; zanylchD|j%xC=QvqpWc)C^ti9*t-z?uTbs*+FxCps;kC~_s5CmxQ@#zMzFUXdRr^M z6#v)s96v>dxGePn6R!{)NqONw@4(#%?C7obu3gMR8I1zNqsl+pq1=gz_4RN7PQZxBe&f zanX0GOWz_ttZuCFobf7q>G>ue+v$w$YU+9Mn54P?#l6F4zWDO+<;3+|P2AWCFT6bb z|@S9)5|Dm~=#vqrs))DZlk>U+eDiK7mbZjHz zzzuO&tn{3)F6x=YcZ#85oe=vgtgD6l z)zr~#b>~vR+UB6vtR#EwoP$lM_Q~~D>jviOIdo{+N*m4!fj?9q|5*8r-q%wh{(u;6 z_TfrbzJ8}Y|8v008Tl2{Ph38Ixy-?iuM*f__8@sWi#pYYas%=JnBKZx9T%KO_4 zefFB$2d76d6k2DNjFCLooR-lQU-{S=^e=LL=|z*0rb3~McfCAvHq9Ky?=r>?eW+^k z&{SErbGWXmbAohE6#TbM`!SGh8S%UL!4+<}&Vw#f*irg+uAQ&YhqEh@kJk}vP_({5 zDO2{+c=IsSNE@M0@(6`1!e&~|MlFKH_n3dJwk1p;zwI9x6Sdz49Msp|MjX%cw!JfL{!ist7mi?* z41K7Hf8>w)P9NtwAqQiab|81{*aDL?J{$Pdp|Wq)x^kX#Z;3fa4%BXj7h|2V^Zge4TIf zO#0-`)nPrcKB?zgyL?BaXJW%smNhe+WMlP7aJn{yJ}JDTQ?^TQMAyXrw&(tBbeKa` z*O5_Oit_*$LwDdOyQ4&(z@5ulpPhTq&(i##v~$m1i&wpq4@0d9NAjq_$GDfrg|H`LcVL`y9t(;IctLN+nOBH zevf&m%>mEPlw_xG8f?V(W4)4DXUU(qZJZsyLa+!};rk)+s&rTw+46ES$2x@j(|XLG z$m*d=Tqp27xIp8I54z|-fp2PP&MBk1BJLTV1h8tO{kDprdc{eb^>)D3WD7 zAMyL{{XYAu_zvBEsOzw#`RQ->4xe4yJ3OtvclZzgv3GdZ!R+zpH}l!O!@D|=dC}Qu z%F*nUmEX_=^2v+0W!Gn+RmtXL(W`!_C8SC8>>kOW2|Q2bC|lrH3g>b9Yvc4p6U~;7 zu?Hx72xYB46qN&awp^M%HI*bQ&F>xV<6OvEe{B3emd%RGN$hxEZR$0$vynMv>IXJQ zFw>6W3OfWZFm-sUH_Z8=dB68T#xZHoUp&^b9<#DYWlau0DSK#If$hHHW7U+`Usv88 z^*%{w|_QCwaRpp9Q`OR$)SGPv@_i&G%l2I&U2fjl~L*VR%p-=~4 zzfDg%tN1_pk_*+RS>v*?kcE}L?A&qYO#eyBJeYrmHWvqf6Y*}ksQfi+>YHbW&$R}S zDs8AY(at1d#MO?*=9#FShip6ZM%qDEZ{u7k>?3mNGd|yCZM6wqYfJX=s&UD2s(2Xs zg3eOCM=;rQTFO4L$k=~p`bXhxW$n_AqwO282tMGqa9H>X>@$eqxHp1>J?K46*-#f#ZfZfuj<`!FURev^mRvaCrFK{s<1+ci^}u!U;YI=Ijmibjho_z@oWLI@`Jk z7Hs^U<_6qfZedvsET>wWuwL1K6LJJM;N+4R7S^;wSe6j`Y0g#tG10DV6Ig7_zFAVe z0C+>1M^A^3r)KdZyK9K=3D0zi^G{m&gkENVd8&9Vuek$Vf&Zl=A1O&{ij9sGhgmgKRmQeysd9UqiJYJ`6{=b zJch4$7Ry5IxlwHF9Oezl>n?0V$-M;qaw`0wea*$fQCDR}eJb&uB!GA6Acrr>7D_qQNmX*q`0Rk9ND^`;+js$Zj*) zCB9^__CID_eBPoJPi4*tb>AYoPB{-9pburCn+O+6X}c-gGr~t_TU?8Ndo0Z?-Urt` z`_Rn$gllNmDQ^(2XKK&HCE%>RK^Da^!&$Xo7;XPq>(0twY06N+_IYLP0(&rY28lU|1ICOT(+{l#r=iNM||S($Xs^--|%eeUx?q_}ZEssJ3*htY`Tpz+=KJ_p{SiT3VS3{F|}Av`1GoAX<i{x!d_sXpITj)^GdGP|;t2EJC= z(&mN1Eb;Qno8DsXsQ-GU+3i~UpM2+rT-(eyVN5-9Dtvk&Z58;f_|?n9U0h>%bVbp& z3!h;V6@i&^vo5weBI{DnfMl-PQZAA#@6=vvb``eJm+J$xIk%xr+4o_)I-AR@KD8p$ zTj1G@tNulOgEuxJ2LsJvk}JE8BhLhIZRWa$UkuyD`+;rg{}Q$vfi3ihg!ss2uWx{@ z7hT6>glFuBePCHY`^DqXm zncwG++b&u;6W@T=xmOV1?*+?`tPCb6Up1(+D%eA>{9iL|->66PopH4u#QJpZ@OonM z2K;`&=YAXCw;$a8#bwz%aW8rLU}1GfVZEdg)>40cFKZKmRr9NKVPM!Q9VL~$(fZk# z2Y0*pp`GGcc5Pg=cQyM;=5If|-eqY@{uDE`+V0cAo^gSvA)nABw>@q3I^JV10^dl^+;38^bXV-rnettv%INnV zdsT|y2OMiHTm8}ZEsXsX?&XVm3s>fWB>iGey@B<18xJ%yi)@sB-hrG?GA^vE`mL^F z@MbfIEmTg#Y05>ddG=Z6_73`-S6f_9;!5m^wJ)H9;Njud*dM{VwtWWe^K5`UF0J%Y zHptfK{Qxw#$82NlLpXkoPg{1S?B))`T$4S;Kb$@$k!ch8pS?z$Ke3{|6wP&+2wvSb4xNyZcX;!w7Pxh7z-KS)j6%hIHGBne45Mcc+enpm@K3JRASx@eRU4s%lCK!@=5g{ zr12|?X7e?Z_ujBcLEGvpd|chURrVlqIO|mpW36SaQ)X@AvHAgKb*+gq zbHL+d)=dN_Fv9s^xf5nzL?reCd4}UK@ z5q?FTMu{WK%M+Oh6&(f1rcQf;r;?X(^?AxtyzFB#|(!`vvvyTBfl)WR^-4^0p zu>-d_k5$w3VV+L|ibANZPzn({S zDktpah5p*&=5XG!YiwqGeZKwwi~iM|cYihURgZGky#@VXQ_6mGxr zCFD~1CSp)F)X&CNDSF}gWmCbY{28gzn%YFx6Y|iY=m~m32ej)n_`pv=KCiBczW?jB z*LJ^y_dj_ay5y`e(LeT`@|*QEz0f>&@R&Nk2*{8GqU`7Qd<+WO}8r&nK5 z4hHa;L0_b;PQ|=A<}%Gy+wQb}@k;#y+WsW| zA3*`nyToNMHjHQWv-Qv~|3Sy>c$v9%YO%k*5$A*GLorA5O@GZJ|Bd2vBJE%h6p6&hRmX z*}m2w9q?GD9+z*KZ-ypPpYm_0eYJf)ZC;eUrM~<)>YOq+DDq9)3%!H0kMXLv9k`Do<6Z5y01CR;T}O$Lvx z6~1T8X5@jvB`0B#I8R`*WubHWSk*izT`OO{rJnPs->D#%3W|TQG-`M#EI4+~?$D{ZT^7{3FgR1(Z z{vrFTz1X_&(2&|zeX3XRsNOfL^BmPlU9w~Fsq7G)r=W9m-L0l9*}X(@{+Y|Y{+js7 z;)L-&;>uBdUGW-|h{J2a<`xc_)A1XeUB#c38Ohz=MjlMgU(i^Dd@h?~ah0=lGc$6) ze8RdxQ+fjZaG~~%GdDa@S1vyCt5(VTXvan6S$oJ>opAECjM+lz4V`bJPPGR<(4Wp~ zBfhyaPydAnaAmM-(#5~p82nT3H$H!&h{VLiuj)~3KG+El3@IMcIM)u2V|+gF>TA38t;&f$h?nog>163c@tt^V!eYcKPEw2xnK zt(9*dIRJipWRs;`<}Kw>&gr|*o@g`u&oxu_Wy_1`5x_3r^`yuKuz4QX@A3m+?f}LZ zo_6vkUxB`n%mWMcNoFFitJ240uXR9Q(uuVn`Bdg88@B+?;y45OspOxsGCbX{&x!EVV`x0mA>**BKz0s9E}5z6|w#$dGWnhWE-sYZLUR^wz2~| z6}S8yp0~JREVlX~oQtOP&1S`AV~3BWg-hteo|h9Nxh7o3S^N63$R_#j*dK}SVLY~T ze|>IVYb~?V`@r%viP%`Mrp78w+-nSabAK`ALoioUB zZu9ece@9*L_01>i+!10)^2A4q4}tX#p6P$prCd&`cOa_s+tjJ&(ivxz!*)?1wO>aOLXFlSm1G||PmGqw$Mt1CoDyqC@+dTKH+j_fgk z@76Mwz#pzV4}4ZR`!yehJ{YYX9b~*IOTpbQS#yr-{76(M^PjDATtl6iF?D{up-$$a zOolquN5+2g+0TLlw>4Jr*Zq_|A8s8T4l8qr_U(5vd#kAkWT9#lIUudm-T~xo$ z`S~*6%%XnbdKPdOv14zwbE8+C$y&JPT-HD=42tKwnRn__3cRYW&qa7W2E2BT!7Db} z+640yd$zVIUyGA}9)1UA?5uIQxiuF(m^vB5Mb=*+9&vKeCG(CqTlB>_NQ}Ma5|?}V zVaBn4QE{N)WhOGE9_LL7=i@xIi97->!yb{VoR2aGtnt6|LTLMv$N%ArfnRjbx<@A8 z7LHF7<%P$ds4r?~>X>&YNAK9P)%n&j{agZm#h-#@9D8R4>z{4E!k9WIpSSe#HE1-- zIaQsnJ#+XmG7&Cs`txhMPmb{3dWzY-8Mz=IwRjSR!JaMrh4)C_RCw+ar)19wUz^Gi z{|NlqTt1lKv}4RE63h_pV{sG zDPcbI`KxOezI-nITQVIKbJ z-r?AfIM&zbBLDvbRRaYW?)+1aUs5n@% zcOx^9R}FO~sp|s`b$x=m?*2FY8ToZ})zz9^XJgca`-HO|e!RP;x)Tm(1)NdN&Ve`B zAIKaS=6n(i2XikNkY^T#`+*__CjKhFCgTAKyEAK*ea znp2@^`GeEsL)q}Sa0`8iPuY8E-!ZSlOHVZBD)|@s^2--Sbt3Hes#E5>B0eR-F$RX? zHD-B@a|!tzuCeU-`~$mgpT%>0o#!)VI#*ipN~XmS%af;%6RX71Wrmv%3RLIbgU$e+ z7Qe}a@snfQI3#LAa}qwzo<#FR=~WrcL9ai9dSbvBj=V!okDNiR@`5eat#p2>^vi?? z{Pa_OobWIE;&|`;(f%c({>5uf9~;w0>G-34JWOYU+CHkzu#e+xADK6*R$pxx%U?(B zsE_QQq>qP5$A$Jb)W`NQ{$Fd=@Vup&#Q4MPT7F!-tn8k;owJaL~p)9}(xU=ZEIk58oYlGTF83vQtf>pPF+54d(>S@soR z+x&a@i#Vy64i^I(dSzXFbpMw(5ql+n6h6}uv{^>a@T4Dydzd9_buO6TT~0d{_SIDI zi%A#Ny!NR`H=H8cDM)XYZFEx|9gtjB)S2ktrT$2_|1jeue5haD$Tj)p?048(Jg1#P zfb7j^&K12p^|=rSlX+(Ar4G$Aw(h7MC(-|$>OVtu^F8a@)*r5Qi7&kyUMg1VhvEnP zpT0jS`kuWJz%>d_i>Vxp9eglyy-j{+wRNf0!6`#~20vs6zV(kX2cQT`XAH86HEU|C zUI^Sti$C`Fy7K#&cIk1)yX>vvH=o~Je)7G};-}d4Zhlkwx%}`+bxvh$CgyIbpXfeT zf2_NeJq5%&58yBEUJ$HXxB^{mQE*2ubTfOUzaoFKpU!#s2UY}5es-8Em-4N7uS$J2 zL!GoieTvx(@x{7a#!#Q#8$f>)=WhKZ-2aICxzLpCpoh74T%ULd^`lVzKK;dxhKBn5 z1)@WqcMOQG?Y{7>#v>-)z+cd(WQKH$!)&}@At+Io@-?PiWP-k>{*D2BQwM$jkaX7P zS-NIhDI*SM;#6z^28>N1?OBe>q9GWhi6tIhkI$L`)zKTzu?{EWMfzEUo? zu8uDtqyN*4xB8p__9Xpo%`bzOp(|iJL1pra;a;qLV4d2l;IM9%p1gce{xZWEq^g5G zoI@SJvlrc2FevWm;h7c2R;r!Pa_Z76db|BKI`jp|4PlW2P?bqSZ> zq^$nexFIKoEY2v?5!FY`$&fwMJd5lgRziK3zZjj(j;Y32zTI@&a$+6Aall3*=JtmA z{1P}K=0*7yQrO!qC3JFd)S4szLT+I=#_*Q(`?>HRu`RyiE(9+N?OK*#l`i#hU{4nn zuN2Z0GBtP?bfvX7_&xJE`p`sQ3_0zrOl^qSXFg2q;q0#WFxIM1{geM#_+(r%*I$9H z%QOA&(5GlEke>oPR^zisB$GsI@~c}u zKVg~QMLYoe6GXoo)%KC#U9l&`X$_g|M>%83tLkhAhdpA7cX3!7l*~yL!#v!Fzx3Mf zw7G6qm^U`h*v|2Fo_E3}_o7+3OY&NKzD-W{pXy#@X}p!Z(@yqe_+=He!#P)~ll}9e z3)u+5V{7g@@b2oNw)<6@$6UQkjmZf|^(pYRc;f$;WyGz-vcbat#evg zYjPx>OYIkF|8;HeM@QbLZSr^qxJ4L-zwq+FKD^?+Y&rc=(_GlV6FW<%qmTTAmrD;qNz@f-T2XljV{ z3X@;SIXjB4V~<6|pV`t)_3wfc$$yhy4cwd^I78owPp?CMg+78NGa6Iyfa(I5;rTC* z@C~qoC+W<5Pk6&sjF9%@0l#!{=4$!uxpX)5SgWQgD&_5tX}1A*>mhNUO8YuUjxt851pnlECk=9 zOt)x(d-QI43cNpR>c3URfCd-YX5#4}H34#of9 zfzLdaiHFXkJz!Ql*#fa9v?YFo-*{%@A3D+ZhBU4!!@g?Htmub+=s#Eai*j72!`s*! zwMocK@wD1|h51)`R`i{z#5C9&oT>ifBVABwm9X|%0gzv&N`A7L4 z|My7Wj{bk>+mGI$Z|IV?Z^Xuj>%>v}_(n!xJ@J15D}CDs)@S%W)Y~{)5?keZ&Un$e zz>2NmoXnkCBf5rfglp`S%=3SXtf;`3(A*UI*nxZz9f*IBsYB1{J#_DQ)(&I-DYU;6 zm^J3CwT;jlw6`@|Vho|-i3RjmzO{0L@nFpEbWT{$n)9M+xE^qB)p0o^Gq-vRWA>c{ zG=yGa^?T!$ZSEV{S-|vz4OUigwlDDDFS7f_L^FSw!&wUU9X<=*(O&0+z>43(>gFnQ zJ7vTxH{HY98Do7TSLw|+aFuM_UyjdD^UhVh;6wGY20wHWvSLj&VJ`cxgn8sQ3G?=^ z?H&H}U3-U1-0$QU%a8@kq2h<>$dJ}DHd9`DZ#b7}9+R(eRECI8(EZ_;)2;(fzDcn! zHs-f?N9%6vbIr)VF%J3DjLMgIC?e znvlveaJIfQ&qnH`zVE6&-fOR;>Ls3g=wllDQl)+;_kv;UH+MzfC^n+8UHLGGXKz#9 z9ep#Kaj@^H{|3gXgLn>jdtd)Wb6d3lY?2kCp)Qv`6ujWuo6YV{a&M}CtdUg@MfP}{ z8^Kz2U2+H-?39fV`zqw8NDUw(b6&NReQEzMJ`1(iifodtApb>Ij{j-n&o-XQ;yGjd z$o8Ty(|}iWCA=Z;GGPvb-r!vB!~7@(S zee(m&1=N8Y$mD2CWtDHI1zn*vdlhY8k57plC^^MS_4&d@vbYl1(JOMR{8{{;DsYdD zn=M|2pYGC8!oQvoYcmw$_FQXMi~qiVd03AF{!`!z-ND+`D&LGdnNYDaIT9((k~2mED}g4sJGc>@)2ZX*MT^@427IIO!XepUD4U1v(zJ!)32dX3S#R ze{r4g9onrkG&Ygls`icw zpSH(%=uFULW106yzS(*IgPqy;nQ#op+L`|xJ-2oyJg0s;xfkd^b|yK%<%>tRTFd-k z=S=18mW|oRJ_O|tS8hmaXS#WO6)S-C7uFtrl6g5nU&8wKP~XF2>Ra~a^%48us`|3q zsjoL+FK=gTA7`<%-WS3BJ!ED4oc%TRdC_xhG_9EgPH|wT_EjM7iJkPS z4?Umu4tO`RfsYZb=5_8$IKTb*TH#H0uhxr9=DoJ8Wcr5v+P%ZyGP^H>241(WAMN88 z-BrracmbX-%kL1Lt29;hq6Yz6^)gp$bejfCMe!gpEBKHY_fDsk7(?ne8Yjz-tFQy% zNBBc~(PRgSHWY`&`cTNTC33T>FD2pf_2)D_q59w{@#bZ}#CJs8+$4Unzsmt0?JbvI zV$Y{3Tkx&k_bTlQf7Yh~j^(r1rT4(~N4+DM z+|mp3JhO{2I+piH$;{hy%~?CGGm#Vhum+@@^uQ{b#I{KOtOuUJnyF4H{|ghm>ax;0<7U9(ssi0yiTtiGQpwMlg{FADH)nDSJVp zv5l__7Y(@DAD%?-F`hQPDNKJko*LU&^}oQA-JcZk=0CkDOn*6^8r!(wP4EO?T6)5% z^af+~m*YjUBgTFHP4Hr2fPUT>hQAySlN#ipV!kcC@im^~K1p2YD+`fT zMb_T(;d;AM(z?Io+jVutg5n#j{z>wog52|Ns{i)w$UD)!c-Wq0|CEh4WUlEvq{Oq5 z>;k@x{M^Xy5xw~<-;|uWM*afq6XKPr`*`^7kx!9Z#nyR_&1EcE#M*i1h3tRER+Qgy z?bjn*!gEli%bc5>`@Ug&J9Uq~CV4`8P3X6jWxw3e!PMs)PrI5&jh)aBCaq_>1| zMK7Eb!t{2*1n%gs{AsqoEd{Syj^=v`w(Hza(#r{p{^-m7U zhd!SF(`F6ySO9Lj-vwHi|BUab>m1nwv?W;6;2d8By3iV5@9Z~~Ld~fVk3d^7%s-Cc zeTu%9u-QbDe9QQ6WZfpj3w=o19QyP{eRs0hR=i)8Rfg);)JfC%1JRL(K6X!un4UzfrIBWQT9kIj{O}_>)B!-Ij)U#~vj8 zKa>A$>^_dhdILJN)PJz2i?1?wZZDyOg(}8+@L?Rk%Kpt7Mn$J2(+OrmFvZ zYx_*w>UT>0E(=!$>8aRQ@D%M~1LCYE0`t*x$`*f}T3;k1{rW^5j zxV9DhA$dO{M;9ZT4xkQv2^r!)hZaAYFw>yfpKW}3IJ<4shEa^4#*BVj`+f>Kv~nJ{ zVr$2=u>pHgV|Rk}!`a+YJ-iS5l&c^D7Ac1tzEx<8KD|ffO7xF9Zy?^_C+wk)W9bSN z>MDA{0&i1&L3vYMxgj$iF>+{xNh{EuUXXVN}n(L!6cWp#ljL_d_0kM@EGB%_4%pF zb1BzzvK7kb{q_!DYmvRa)^?an9CYP&7ar_JHo?R7J4|_3i}`?W{P0~%_8i}-aG#}4 z_gp{Waqp`Bv;0(%d+IFUSCLP!#ib8k$nzbQFs?!8Y;{8`-SjJ-7tDa_CMQqMb6nPj z*ppe^8ud3Gr|Y1V!l~cTIgu{_*O)S9_>syoT zUBA^olJmy4kk3TmHqpeBCrCxo}PQa^H@%do6yHa{bt@gM!|v zrv%rWIN47@uh+IF`~>5aLO$Kf*l6B*g}9qN{N(Sy2x+Zx9_tMvNud*$<q1zdf_Pq!@_R7_afe8BF>Oe2p&*{L)A#FOFq->TCX zV&2HrU<`3G(U_EoAs_x{GjtKS$ubrd?EhW-w(K4wLo3pg76vD2---OCMdo>ApyNSv z$TrOx)9L#wjF*evp>b?M&rMbE$9H+vUA8^e<#VjRv@EOQudk-eG0ab98uJEg!*>E7 z=eB!&d~f|7L7sY8E1ps|OQv*D4q3-sM=aKWa90S9z3iYMMa-f8cRKoZ9eYHxcs{sp z5;(gYoaL#P__6CJuk^29a+JT$N%~Il+FCOAWj}So+CiJg8a?@snv=2{>#e|P>5wxr zOm=MpoEa-)L~~o{qx`k8961TtomP8Z&)ebEp^L#=7W}>PWWuZ%8udk7G=@%|W9`!B z2%=8O?^Llu3@UP?1H9rtNlawz!zK1eXJ%xN#=pFNC$X0G-mbyH7HIxb_xI0ECcl|z zSMw&m%z8htNl%rXlc()n_(XomFQ(P?#c+R_%KVh}*9@=@H=x|bH`mLwt#;+JkzXd2 zyT0C%zqy_y_i(D0eZgj>pCS&593HhquGfd2+mvrR;r9Ao9y)Nnwp5NhLYzO{WOagT z_TjV26xUD89$-xRMriK>;MF`Rcn6+Kn2UgQDZd%q$1qQi@Rk$ouGU?Vi^Y)Fq(k-p z?ls$I&C7a!75@t#s8vLdQwkOF0_*iv%KJq6={?q%&*(mv?+>K4lRm7*D zgUPoV`orVc@^_NEV)H}9@i9j>=C^EdKVOGFtF5`u6L0L_@-5d==nQTVT|2j^mMR@W{j0dH*ol9$Q#748p>Och__@>6BVXD+dtu-A zKugQ^{seN(d5t^(3*!Do`55K*Q~$(I>fc@T&GyUPz&hY~#{BcFRWA#a`@0L+QbomC zwb#X8nunWmQQv^$fD*Kd?S4Q->pDLO(z$tl5}4B2=0VoqgD!Am<>jTz{gwiI3X0e6 z)H=Chh*QPY^{&ErvW?OwhiN}nzp`2TavHvBKf z>>B#F;rsN@jd<9~dd^CJFfp?KdVEPbf%$zIc|T{?XpX8b#&*}GkvuQM#~PQCCp-yHeuvIKo^Hzvv>Hcm3xN=?oXJP_%+hbXDG9U zXK~E2Vju6S&~|~bDI$l;*BAkw#5`kC^sLnBG(D{bqHWU%@b@7=d^jNQ{8uGfK&PMhEN z^apmWeK+UH{VVY=^dVi@JG?o{8MTIfc!enT4f{^9uhbdz*mf4@j(E%Ox?HDTG)_I9zYX*86`W5ymFJi7yu>eWLcSqa7dlRV?IG&!&>5-d z{l`V~UQv4>h_CE`UbN<@SlG?1`P~BS_kE8TQ1&ogj9wr6Dp%sGO#jFGYHO~c?lQ3+ zAIINQVmuDT?@|cIB4ObU^Aa0qDfThG_~no%;iJs`@Ny4)B>#lyO+L*MIkLomugvbR zzjUd5CiwRX{WZzrgOAhN9eJ7X)d^o_q5LwdYH@72;!Ctfo^}-%gNeO@Ek*v_zY)ud|OZh2$Z_i2wb_CCmF&}3B(KW<_%5+9$#6PXp|ApV8 zxF(&TjA&E!Jq0ep^;q`ha1Rct2VHtU`HxRztb71?gofgAn#VYaSJvn|@OL$J{uFdr*DKz2^}V;zPASaco;@D_Zys9(o4duV-{}@PkCF&X#vv2)$p>@c7>XvF z>AQI6+~^rN%Mh_FIk{G2+DyOKxa?sQj)37c=x+`E#dv2dC6~Qm4R{2`5j_5ZGTIZN z_|nPL#a@QaGBBw8oQVF#6Pg!T%jr>Bv@5k`>r0_SziUjn>%>2*lX8ludn9`Q)R=Og zr`%7XXHSlKw}N+}A4+~^`C|(0R7$Sp*YE#alVuMQ_*R=Ow5f6Btct`1jAPc>ST`qz zGBlZEjzfoCgB)1Tm~24Temi#EAC8sY7o96fUoWDakBf$iqE~2uXY6gK`LhCaL}=)G zbg@EJ@j`;R@ElD*UP`d`F+?w$ZhaQpTqG6rk=2^g;m97g4gOvQJ>;6Af-poV^~wg>+8z@ zs=8apl!b=zcWEEs_4Sow-r;NOf#!w;_t-X|?;h3rCF%{~`w%{3^v*1K#HdH|Lio1u zhhx=qB{cA>Tj4d=52T}NuT<*3Q5(bLnZNkPDSFi_OJ?_5&>I$7Sr_H=gO?@u&;^jE zE5Q|UBiQodzgs^cS+O3vSZQgarGQVhP=-c2WX6BexU*{NP#V_PY%H$gvC_vEN<#l2|kTZ^x+J4<`} ztN(w3JJSF+a`P{OJGP$$cY;18OI(55DRWM5G!A=^o1!oF=w%H3(jJ+K#N0s}$bwat zZfU>TQCd}REw8G-XJkB9h2v3VJc{vntg7$n5AmjJY!@O^Y+1=#ldsgDwrz60I536& zYSva5KaGLLMq}H!79zQzeD?8k>vyp_SyV3a3py_F+dYTqN3IwAM!q}hYRo&UGHV*j zJlRkt9_O=|UltURV|lN70QeGInxo_V3es!Rz}{`=&2ZgH?;o~xT|ct_hPIRO{+lv= z_U}oUqocmP(bWK-e4aYrUTcHYv5tLL*9jk@PxZ&*CXUI2m( zW6kbEk0UlZ`u`^WR~>EC{T1$Sw(CEk&T=>N_B_ra{XNfLLC4V?Fa1W(#P=I{F5Rc` zd6MVrc`hAK^?n5X`(|XQ?yu*59%uLJ{%Y=ZZKeE`+)vC~|9h=Nb|HOC=R!P6|4T)- zqI5vff%L-{!MR<>LFW;lNNK87wZ|}+@LR_egrNDXUTSHvv z|FijDuqJ3j|6fY~o4Zv{S$k@RSnH@sx0CIr_a|aQJJ=x5#I{%8rZxnJ%_X=`J9Eag z^WHJ-ETNs266;bfjr<0?YB9g1n-k_^pJ5GEa4_bAg?Vo% zrhoZW5(Ch-YiWiu^d|>?z^U*A{n|WZTUh^$^S&&a!Mqpomi?Zx`+Uzg@wfj-eP3?) zb}rw>{H54F_d|?LIpQymdd!3o8KSmZOrl0?6VrM)4`vFIznnjoJH`5Yb&iVGv$W6d zpeSbRQpPSk%koKku9$LQ=zIC0WE+osLkx(%bNCM5!H~+ljke&yTBam?J#D||dAsDY zLmt$|XOhb;MRbj*&T+tz0vG68oC}Fg53JZT?cxKgI~G^@+C!l_cwT)kZMATgL$|W+p}Xv6*0iCW`TvNoDc?wYCNha!jF)Bk$-MQdD4X;k)${KX$B(Ab~ zlJJmnsh-FGin&|LReJYQzE7BToBvv|bn-a{(4FLi(@<|I_pN6g^U2^p$;Wj7W8VC; zaIJm;Yc5Uri!6-D4Lh#1W${!{-Q?`S|KtGM2WWFLG?^%P$pmx+ZfELBKgmm^a}Cav z&M+-)9v@Wh8+ak1XT%QJ^$W)zM}6%5Va-O*p{Z%<;^Tv%96c*wPf_N4Yzue+KEl^d z8R%fzc|6m4u*%BzP%P|m=G2<Z?v$>fs;Vscs zxVKRHpX_4r4M@MRaaW>m`ODO%@~G(i&?0$2RF+(LOppd{bEI$NPDtKS(wx=H$J0E-VGugBep%UrWJt7LK!mzvsH$ z{1RAgj9QOiampb)rR?vaJA2mSo%DIAi}_D;Qxo-m_%Y2#HkPEP3qP0SvG^g}KiR9; z>8;8Gnlhgapvz2>>vqwB33>0ok)L|6Iv(Zukvapg7{;NvBoZBo?FOn$E4 z!5o%B{-~eoullO}JgQUuRbP`)U*FNt*K=)O7YEa9U!Owf)*jCiK3koKi;YvJpI+E! z`C+g-wjB}mS2|6~WsZ;fdl+_{@kaaFRaPI)mY;Bk@MW2FB;lgL%}yK}GSqN_<+`US2z`$&7nwsEeH z#kb-7VA0jT5*L;Rhs3ZTuWSx2JJ-SARz`~szG3SP+fvY2fS^H zjHhD0fF1s7m%ewD=%*swE(w|_KZUUq-RZt`&|7}@c=l8D@lq>qC*`uN!;_x``RNqq zOu)xY{5Afh(h{pjt4{g7C*WLBeQD}pKb+0YcPTU;pA9cLmB_L#1%Eki?@q>V?d%cZMchU*RkwaO0%6!nNIs|jfm!coR^wtKrn;PKOm~J;uRi!rxZgdjM zm%zTGRFdqG|2C96;=|J-xRL)oU8RsV7}Fi<8~SYZ`J+PK6J1e<^)+4}JyV}p=eB)% zoIXgm5-$Ed(mymeh_}RZ;4715pY(|Qj&x>nY3SS@n`@SpG$L?y&WA}_h?RW+Wh5mbLA`+hKEfEf zNqO?jP;WEz@elMZqqd-tmf~7pZL40(Tj2lUF?Fk6#*x_3P-kv(&-8!H|H8fGZxVbq zdGM9!r5yTTMJvL&p6~wmkY1qA5m_%9(|(T>uq&1=$@OV)C?A&S$}k^P<0FI2B| zXqDI8{4@ASJQVh6F?z_tHe_~Z+!ystb&aDxlKK6w>(f=fa-fbY(g32>j!Rqv!fLfdv8X(?--T2gnsanKCBf&FtVxb38zbhvS} z<&Tz&vkSMnn5%2VPz*Vha!vcLej@teybWN0 zPL>2eK<`&ynqAh8>!I(h**rpSF|YcgBjHWPu!;Lkx(7~l7fX{5M(@(R+Yr6m3>?~D z1MGG#5Pts8wvq38;=ZR$m@i&^8!s3mJ62=xUEsSNc#mbg)^YFTyK3NbsOX04_>ASy zh1~0T7@H${uznn3yGCPSuVEc2M>*&tP&w>{p{Wi3BMXN*8tTDb80xq+VLCRk#`vX# zxu4&o{8IcM>)_g}?{F6C<>bmXWzD;+F;={4J_o&Nz@_jhPc4 zq%#KxbF+`tkJK}0v^htNa5OfY8&uPJymX?zk0LYJ)1!EB(G_FaP7d=9&1srn^iI#G zQjeSMXI+kYoaAns1PA`D zT2Zm7z%9LTF8}Y4>?%TI(8?){i^fYh)4Jx-)R`A7)Oj&=ri8zIrT%WpNoGoZ$WL$u z^|#o0^Nap5(s?O=8|9tc_MLMF`fINpZ-VwCK2g1O4&T#%$8-dDQn!@><_%Tjr0m{VD|7Z_zX$`FK?wTYD%;*{nG@xRB@K zJq9BJ>A*j=LKBsciGp9{?Xq|HDqi_S0N1{Mwu* ztO2j~k^k(o>+^pT)g>6{0I=Khl)`#}C(%|OwC5j@S3~Q2dFoO>Cjr;&2!7czqETej zkj7E>f)AZ-WGo-C?Tn5k#J*42ziBA@z?ib>yq)s{(V52O68Kbj6YmKg!IC+l?2FzM zKbE*)xrJH#EOaJ^-Xs5Jwt}Zjc3D^sJUf%igBqh|92 zA%3_*Que-l3Hfps-7xZJ=w`8f-4D^qX@( z8%#jof(?5n(DP~V=1J@^pdRQl+>?Jk&y+LcT&_uUmGiiY=azDny>L9&@fMasa6Ptt zv%6C9=Dq`6IK<7Fk{`l=uE_U8(iI%8k^{e>9`R5jx6+@|v{YvpGj|H^#ewXSW%v-R z4M@53dG;Qz=UMrr_C?zvoB-STknJ1ib?d4=QNNRaYLK?kiIGGWAQf?aUoMaZaOE<(PGUT|c zYR6MnbeDi0lp7>%em3|8-izMli*s_n*eRSMD>KXare%P5=CbIL+!^pl+Ek&Tr-ciY-UV0O5bqYOY2La zIrx^c^Lz(?mHe4bY2Jk6@sFuq=8YlN&#QJ11a<0+E9|TGX6l{69H!hJo8D%2cl#k9 zb$}1qUq$r~pT;IHl%wpc&S-ARLd&AhCXe>ZD{En17WQqpw80v2D!+DThC1&Jq$8b4 zTT9ySpe_3y?=QL|%q7YhR72|5QgZWzZPN3J=hFBeC0tb02hrJ8 zbIhI8`6EfvaYGYwY9kKMpO$ zW5mXA+sA*S?eiksDAw&O{NHVmgTN8Pkf5#+IPzZdJHV-V;Gpr_Lqv#IWJv;jw39t7 z=-=lAXLGMUC`Mg=Ypwq*viu~yY}=rCG|lz>lyAhHXduR&c=P3&Vm!&?1is!3SHi1& zQo>75j4$u1`ZW#s!bg~C=PW>IwK^o3(0~*0HBGospVaPQQJ>Ju8*uTo#l=%Ri*X?v zImU&)8y|g#kEv5~PIxx?a4#O?Pt3@`!5Yg$f$N8!cyw`~xlp)5cA)#*_=YrxY=hs0 zQ}!4_i|B)#6@{O?!Wt%fejWDwrYa^W`LcRbNY{g}$%YBfVbI*Bcjf4v&pYC_hlB^g z^ulD~V}WU8ZjST~JQ^DdX*;$a)kznrD4s%cR1@SMcmZ~1(Tj3_fzOPnh%Jxoq3LhRXnWordQMz zTNB$uSEFB)J_rsg*yf((CUZhTdIfpXuzxzBu~YaS-OI{=H3`}Vmc)23ILX$lGoV-0 zj{+v-Nbqte<#Onq-RN?xfk=-C^F8p~U4c$S*&{-{9-@Afh>^vDiZ~XT(6`kMOE_%BP_1P0{lwcy470Ff8C3yQjcg<6lUdv?qPBwg8zgykyGC z8CeMOz%rL-Mqj`-KdP7HRv zmeLOVGXj&~(776-LCIk=KBTFCpzmtG=-mPh(D$-W8$0ULC{I7zn?$>``*p5w^Zbxmq~vOAcgdnw=B{vrO?bMni>_=}O{hP(*S0yd)ZAd<&1 z&AM#zTHDqR)?0km0@xoGwF7Q5v>W^l-)YUo6#T$3FVu6{JUN1TrAMz#C~maCJTZBt zzl=`g3VC#Z$9E2+|Ip~R9D5U$uY$R6CgHT2{mhw_2}qh!e8y0}6g+25Acm@-}(d--zXv0nq>) zHpJs@->a+}(#PBFxG&+`Li@>#JMacsw_IE1ly|k3HSSN>Uqjz%&HjYA_0_v{-n8;) zYy9naHjE`cs?6t+;U|H!joix)P8X4J;6UqJwoTW3+cBTEvdzhv9|(U79J6*9|3{Bw zwy{PLB;5)AEacO#nG-ag8gGs7uQY#H8{+Z$=lK6g{ts~=yW*^Xb8(mp_m7J|z2(jE z@VwrmGYSum=pX!u_E%^>GPckt59kbm)%6GH=UixPX;E`vA#kA~@u2L+{qf+hRDWS~ z9LDnBjWaQ9Rh^SzwLm@rxD@p&SxSNxfrSCPeh8`Kg=! zQ&)AFSrOsjh7la7@6Eis(xkDI?D>ir_4`V`I|f>ljG#Z*hCA4k5k6nQI11-9lu5u( z^IfgeU5l?!`(eq|#eP_wnb{iUF*p^U^x}Mjyk*S0qd7)%rsk=*Kj*1G@IcIe_FY4N z7uo)jgNt!fye@rh*ZlioV9`7(zE+*WX;%T+?s~x-XSIzrb#kJO)yULU^~D9|MXoLU zM1N^um^G$tJ+pFxI`r-IsO{5jTa0y{_R7}g;Vg_B=>Lt_O3H~Q7?=xO<^mU)z)$p6 zPOJj|H`@Cy+PC8ZZY5(hUu4m@ly6cztMZ~d$qo76kn!7O6P`ia>YMtk{;IDU5A^il zD6RKVCWdPYW%X{p>SSD`W3-Y>I?I__4)T|LdGJ8Kmf)PzRbKe_`4BX{~Jg1@H7M9s`ZfQpVZBnh$X$UG?!v z=tl5~^aSZjKj%DDjdMEBxi9u?ROk-a_3RP;m)uw{U6DRYN1Ra<{h7=|yc0iY9G`44 zyJy+wqj`3DH=_IAOxX)(N zXWtyZnFm4Z(6P#WA{FK%5$>ipJpU-qh0|O`;~b8;+7+GrU+UgJPOhrH|37zr?9NUWGMj8*-K=sG7F{8hL1T?HEjJJ$gji?u zgI`h{lq#*L11eIhGP~K$FS9U&1Y#g$P*jXoKu|PFyWxi-#WsjqN|oN3$?b+smGp!4 zS26iMU+3PtJ6Qs#-+uo1{4tL^_uTX2ectc)dB5N1ectCBU`MuvymJfkxZtX9(Knuo z(a7YhweO{X3z$SV(LwxFA%7onnwGZERQ!gV+?C>+afbX0!IjF)C4ZmxjtVz(hVYO2 z;>X%gwi)Kplm|xDCpld<+KXLnoE4Srr3`+rtnjP&*oCyC=V{S*;>@z5hhWT+SG=S? z2<~bGbE>GleItR^(%Ca;+u|G+_;OVPXJl4qf_73SX$JRcY-kKJHhj(RIaA)({8fHm z`d1fDiC@evt1yOWcVSidy0Gy$zBb`OXPU^~%-cE&wMP(=IrhGUV~aCGv0tEX!;Y`~ zaF9IZ`Z3nn8q(Fmll-d0Z9v}}fXCibwjsmbl_dCz_pkJzbkK3;{OFk~Yz{PUOwrEH zqeGl?E^@ch1k-1uw9i`@x7fVZDDBhAM?aKL{J4#D;p!3aleBVsSa*`PEDjZKpn1a=bcc2Y`jU4Zdr7yDzn61}jLTjj>QuUX z{fWF}3%KK(d?tN7+w~nAM|52RPEXP}px#@7+baS$^(G2xd%GPA`SNV)yr3}$T>+%D=##?C4M}JN-%2LSjuEe(E_q&A6G+ww7Rhno;qDCX zjEn3E@iz1MnqaqfF|b`lT`5m`@h!}a3pF1Zqw|gw_oML~;%1Y@$0F9ggb#3VPcu%0 zkA2We^K0P)KTY-me1)28%O9wCBgW~YDV{6gC!K$=%vD^d&T}q=-UFjg!dF(zv>qcjfKwQ0h^(NP+HO87tIt+ps_T9i+`m5S0@NaJod zjV0#dTR6d?@C5o~m-1LWQ3>b0+M}*3FdS$P(jb^GG@ zd$r-@tI&5Hw#7Qy26lehOB9bs)y*Q|VPkM=dq0WWbHnyNW83o^@&U-6Occ;fJd?n% z2-$68M}(&wctWqpwx8!tqx}ItM?T3V@xboZ;$(52>btT^&j3y&|lr} zly>Pq&qd5*+I2<%`?C%j18%-e-$4|TrY*$6F z#q#m=2(}~2NAWFtKQ#}MjEwn~SiA8y()2V{jaTb`uU#(Mc#L!K*;R&@ICEa|YGGA2 zd7(x0FtGp)t@bEd#Hbv(Qx zhAkR@tRpi1&`FFjjWq{2H4fEYhhbhpT!Hm9SpL^IL_d_#MSNErZ+o>8+}f@&9oluR z^feyOf?fG;Z2GRbwD2Gtbbtfd8%c9LEDDu`MSE(!I^Cb%8X-d9~4`K z-^N3S4#_txJBju34b6Lem2Uxp@gjah{8AqLSLCi<{3aBd=w}AFJn)v`o5rWLN042T z|M$Nl#7)xjHs6@LbxQa1y^E*z;vG6}y1o0dhxwfJs%Yx+zn5PhKU-%}_lbn@1eZz*F3CVuN)9>KCXUA52wxMeVM%QzXJI{OcnF)W?D%1 zi5{YFmp!{AZ2!tgUJ1Snfgwpf%G<-4f<00G7}obhc|}WnRNa5B_AB6(F|Bi`#7B%5 z=4g6XTlP#5#nQ2EU2XhLq`!spIKSpl*4~Nu&Ekvp`y)F~W9}MsueFb;LvbdHxsz44 zn;nMSt}@)ztSF>&vT}`huVCu)cI0p)Y3~p)YDjeNms{ zzGR|$-_X>Tm~Nqc7xv}XN9ml&urI&np2nW2FW5`nee|V=zUccg&__7Z+-Ym%dlqgV zr_V}HfPWPKM4s!K@;nmdNs>qFSj*Oi_N3|>Zp!!fQ9kj*HBISXCtY@-?m$voS2umT zJ1V2Gb!Aif7o#$=cU9)SP2X;BD)X03>G(sl+r^K_fjrK=Sl-2QAXN$FK&Hf+RX&sl z8mGIuLRnzqaa!VBPK|eDhQ_M&at}|Hae4Og>$By&erN`&LMrr)(}5$0Ke$+P=}L#L6ok34^fZu z%l1c3?22*kIGhuQf9rBrG3xz_&)bELw6>6)GXbY;_~V(+$K~;BhUH0D-!)sA*S|e9`@(s_G&l(sCdB#JN@1u)+ z(yuDB9Neo9va==Q;{URVq$8y(m8QA9@-O}$`5Z#K1eG-LUvezAEmY=I@+rFeJ)=*xF2zFAfzrosJOu$c6e zX$b~q3^Jc=!R`s$8JA5$|66mc0~6Dzd8puR1(!dH;FWyVxhF9moBN;jh!OZrXkSV8 zevdN3UyQe>u{-1|K>jznO2C#V`>DcO#u8%$o4-)N4g_w+R2pZ?*nHL*`gmW!Q*@FZ4IwLp!n+@8$^u*Y4yg`|Iy`DlRouDhKnTbB)FYAG8tQiLSD5 zJ{EjEvpTg29e zbtH<_n(BFfl!qOI*`B=Gp^w-+eb9V>_(qLU`9z`TwjBMIk6L!5_Jg!%1w)7v%`xB%Y55wU$M<5|xwnp`n#S%xalrZ(wLe~CO7qkf&LYg}aU_&JQt=x=;Q3YO)QEz*GeUQ8jo1HZK z|B6jxY5r>sjdKu)xmX{Z2Mj6irXYFArmzYW+GoxBzVg-GIwx^1WpuW+=sA%OS~3C18v1VU zT%_+QXrR7#@|2HEx?Q%;1Rv1ORJuFRZXh>U(-=wRpuJmWkC5mf-nhczDPwtpxC!7& z7c~}$RgCz*rN9`e%8o76mV?uinNKDoTBU&3;#2(sPVwJ7=nTJfbRa+Gxg$aKO34a% zjsAyqv+l@!F5KV9`9(JcEhT6`jG4Z3eo>=!(WbH1D6JJ-Wz+_=7w(9M-X&fU4bXAj z?FHJR-4=K|Rn^*AxE?mthW%LOTNym(7;p5)#jnyrY_eN>0QJUo5ol#nXG@Y5ziLWQkdWGt~Bl`9~&_n66x&8whcXAe# z;8x#6Bgd2en3SitG&|H2l%@Rczm2{x;kzwE`c+ZdcS#Fv z8NXINPJ76zwGTw!9^jh`&nfM%qqO@;%S35^8Kv1dMwC{J((Z}cfSfD%%5I zS4HV}M0th#LX`HIC@n$SIZ@iJq-hKYo-?DgPc@ZY6s2t@&Grd?60TWSn&7p62j}7^ z7;d~7Pp^;4Vi#mJ29-y=*9z^1po#o|>>(ybrdoSVl=q{w6XR!Yl=k5W9?|rbQCdmx za88BFc13CLjq<6_9MHdCaH2yj+-_dBe5r93{9yf$^uyuYfHU}RV?MUa@{?UfZ{8VzO<+kz|cK^~|-qCgFqYj?L;sb+h zOvMpPXSEyW8J*WD8(KDtwG(J7NxzJ{c>)%#wWZ(*O5y}Saf9Wgfew_u}&pb~&C>bU?wNmbzQ9Gi$aMxNC&T`NnUanRHuyX6T!6@?w3ob-|nBN%5of z_td_rt_gm0C;5@MTFZ}N-~5J+0jHk{odLqx*S*MI^*vMo z!K<2;NbHOB5P1rAtVWnF>q)di>fD2U%(1nn0X_lm{nlQyZRKby zDyzEKb7ae7$98j0IrAA{!v4rud!Pi})h2dg1Y_*47hJbTa7nf(kM5Th-R=3moa+~# zlV{@qu0Arp1v4)DFlpi0km31R@fw71(^n|hsdwBpTlOk{nzSx6 zi}vhUS=p~qR`K#Whf#HWg?Ggs>APZ1dfae4+I0{5lPQLMQh(ZuYpC0vNpxZhz7F+4 z`-{{EPV2MvsQh!(e+%z!RIgy(%6n(i`)1zlyguqvg?I6q?u{6^fiuRVGkaEWUf-|o zB<9J3$KbPxxPIo_Cii&I>uKG0QT=@Cc^7uCi(S_4URSre7+c=SwbspnL4OECcRmQ3 z`n!~L{o?dakQe{|L!Zs{{h@XIKIKM%`BE5LWi}!JexI`c8+p~=KHBE&EBLeD&dY|A zoNueOLf%J+H~Q=7zs8>vok1?%RIKH@m>VWYQ(Pr6r8D2byVk8bZGEA?!?@U4dF}l` zFIKS2W}@@q2{+1P({r>R<(sDOj8XV{=9yZ{B3-sdCqB*u|4#*;t}?u?GPGsOnRT2? zCwUsSC7-gbx5{~CGhVvAXZ4--Qph8nXQS~%oi)eR9xsP|^~W}9^s84g8~BBn*UK-} z1eg4mT1$@C*R)?F;S4?VnxycE&m4Hlz_#1ozlu#odB^*q>Z?gk$sWs-=VG(6o@2h8 zC=MqyPp{%{lZ=&Z(`QEN|Gw)GXX6ncPu~-qm!)}c6E=@zhQ6Ifo@O#;OxMlLuN?`e)N9h|~v z!kOCm5utaHbrZ_Hllg>e_SpF*!?~{efLQ0PT?7u?g8YQ?S+EC@KI$CpH04)OMtb?D zGeVsw`OTRH^-3!AT9Jl0bkf!_;_w?Z4ivJrkrvNzn}o6^sU($|ol zB0UkMFN)I3q^C(wM(OMUvGshE^cK=nQTiLB^be8VN_sjLKTQZMy~1k0O0q zln#Au`cl%5CcQmMmo5-~7Lz`m^rND54?XgR;yLwJXdpXJ^7wN<2+vqan{VUeuGTK^ z2zm64loe058rHu!LqKvXPx(Z?T#x51G?L9T-o$tI{72*gzVo{H$l#C0H&o9Q2Z+aH z4IVq@(DdnyTa9hc`r2L6l{e;F$)A5ecxcxF=8I|Cc1piMkDb?en!PF34q@DyQO3wA zo5qp{u;p8gx$OUu-_IG8|9@faLqVvHjXay{I}oL>L_z$aZCo^^?4?uQU##BC$@ifs z?_)fF!0*S=`-_#?JLUU-^8Ryv;@Y^(^St*qy{o^?cF~E*^3(8bN%tei*$0ac5n0Tb zds%zuUve*PfBdt)NA?aN9A;NIE_S?h4L>K39;(3WdG_RCUuaLBYT_IezBlw)2D`~}t$;PtLDbDJDI&p44zOdG~T9-kO; zTz1cAr}RVLZGQ49-}a~<75ZRpC1k?2Q~DG4MfbOtv5(ZBVf4nNNJ7-9Q0ucx>RFaX>XBUWvq8WrykO*p5ZwaX7%d>FVnC0 z#PlgkkB72Oas`$-j8>$Db^)}K|5AIE|Pn*GQ>YULLXu{AC~;HaBeva&eRm# zo+%g#qFYFxso($1!njK7<{|Ed6Y%y&7GBFY%%6wArOx0S3KXksD)3QZC>S6fvxj%Z zW6V|l!VUb7_#nsA4e4uf2fmJ1TfiK{G8Lz@EiC8Ry=*ZbSiE6J#qpL&>K#-(dKtfv zx0dxn#R^RnI@M z&+`;>7BS0(#y=iBv_a$8(xDvA_cVS&o(SiBqMvMD$sy7CqryvJ5^ws}OvfuN-c~j8 zobk#ynMr4}MrDzgeRiJ&{DiHqeQ!z9j5{1?E|4NlBE$c%PrC2u25{4_y6^5G9`sK7 zsP*Ak-ZZzVak5hFdXsQX9Xrb`T+DqdVLb10!IcwkeSAFY*?U)QFtA^;4I4A~5^cO; zN*iyU(uPYLkGxD9b6=((-Ba3dr?gR~jXP;$+9-2Io^3qkZ*Ap?P8v_~L>9edKb@C> z;nz0)a}t-=N!L}_k4n8+>WAjzX`aZE@gz^?KI0Bgbj^5zCwgAzxPW{1*z!CuE4;sK zqcgx6V!AH?zw`g-?mPTzjOjN)`-J)LAmLJm{+VKxz3R-*3&V+w%Ur$m!?Aq+EcT4E zHpCf;Zhmd!JpQ9cPQ9dbMm>$~N&1HK@@Ldr-nkk(c%a^qIk$l+vQu$poU3~(XY=@~ zQ~J>4p48e`nF!@*H@$TljeiKKVk7KgI{+I+V8> zr^puhVkUHXaVQuyL(e$HC3O#6Y5WrR^pbYbrYUVtm9rc4owlsZ!8YQokjSREprM#Z z+egNP&Mr%N>lyp&nP(F_fNs+{9ny7=Vb?omHGbqlYZFKxYA)n*MtBO_=QI4*I;7^X z1K?PGh))9lG;sPo%Bt;@g&~w@cHD`s(OC5OAGhmR84;b8K-;l=_#*YdCyj6L)ZPJ8 z7ER7;6crCx4S7U7*;STpqS(lCUFTlHJGUv$O=kfA3vjkA+A^_zV`-67Y&iaR_~O5q z^O~a8^oY0P+yK^Lf?S#RJoVbRYGSKh*^%gd!Owc1;c0y+JWp<-GcZ`(PCW-g^46}? zpx0hY9-XgeZG<9qt4_XKe#^(m5Bky;ki)AIaDx=QPrO7koxs zet8x)!z}#oGe4m`RcKq%`aFF^kAXw>uWhpHHL@K9qhcAg_t=0>w~CGd<}WfnEH44m z`$$hhla2*wn?mJ`U|(Ffn>L%5v$P+hPI-n<8GN}-*l`N}-< zT3i9Aa3?-bb(DR(e=83k6sd#yZER!2bDN{Kv+NQy{b7AzESuv2`?>#55E zTI;z&c+Q-|f1TAU9Z3Dw=c=`mS4egfKVao#37&w)nmZcpi`27-%w*iPGyZipOME6p zyY|iyU!8z=yO19$)5-aLE^`gebfLUm%SGl6JgoiFKeT#bGk)ws?Nqz>o$=^;oGUS8 z+tj{yt^KAM58C&Ru4TQ8?~F^s9q6n(VDm-qx~~y=pfzso@5->2=@O5kxJc)~pxq0G z-Z%fr?@7lPw}Q;9u&!RvS-I>%0(QrIxUreFffUb8v>C<+u#bm*97)m)v2(INbY4*y z+u&Q7h@WLVRjveW)oQJr*t$>Wr`%3?`5mmD7LFMo8e>jA^zED?et}*w52a&zMtNRq z`H6E%u>X#-`$%XDd#b)Ak47@h~ho=qFSOdt8SR zR$p}&-j60fe-NE>9iHR}LPx3Mzdy+{SkFNq>(%rfrOE?xD)i2>#_!4ea*4jms zNtw@E{g=W&bX>$o?0uQwqwXl}+$c?U)2t}1KS~p?=AyJ^r0Fg{@!_;6?X0HyT9j5q zS3>(=$Yv*>;1rDF1+9GvR?1{k;8^ggu3zblfAYM(sjlauwAU$(Iim9YEJ|a}Yx^MB zo{7?qC(Yi6MZUdJ8nHAJef@W(6}bloxFiciOVRK;aGrsl31os`kZs3v=6dv_&h=Cq z?chE^46|@6`8o(4sH?Gsy2Ad2^b-HXJb$h1W7^voJ%y9?Q}Q>DkwLzR=1RLJ`v37M z`9*hq7Y?uDTdaq5-+{)DQ-!yHQ87{QTq7;M2Y%-Y_CVO!$&c9BNvE`kJr>)>T=+NM zKj0#VrJuB}lPeL^2!32*Aj3T2J|sK-sL!4+>V~pVc=%ZqKXilI8x;)Og3eK7CipxP zzqw)G#j63cegSwHZoSZKagj}y_xgMHwFoJt+kzVq)p&p4tt6%4#K_& z2WgM<%!M!dB-xpA%D(z&`BXA-9GDdrN3%ciKsq~O3guayx ziH*MCll)aa&4Jpg!~Sq$lf_dWJaI-&ZFq3d-;lWNMQ}9`;cCF*Du=z_SsVao0|B-= zd$@etf6FWR%2>ifz7juY<8AG}t`92S>Cu?(@f_GhIC_5*j%GwS3h^G|D3u34-pYC^ zAL2)J2Pb12pu1?U^Ji>7;feLOzrN%ja5WbL1 zUiROL#v<~n?1rc;^FsUsTbL6SYK-yl3?ysU4F`5Fp%EUg$F^H;_xWpIR$|T#V~GQ+ ztzEay7;^`|+u7rL<(I;SG z4{ayqb9tRZ!@Zm3HoJe;CBOFGcDdV-WjgojdF-!YCD0~E+IM%bfPM%FBsPVfHBI_yl^X*X@bFyB?biOz*c@veAiVu1E) z&!Fm18LRh@v9Jw(o;GwUWs0;>QajN+6dC3mL3ehVPTQ}vU^!Br;v#I4SJ*t%pZ$@A zPqsrFa^glGTQ8fEd~^otJN->ppiLp{FLFrzrLN`j@h?r~2eh}Avt`R; zY2=7>qU5jW);TJ;F5)h2%H(h1{|ey?nX#(Dyck_aIiGnreO3%q>nXR4X-~v7@kufA z6)|>^Rl4UTnBHk7}r)VK^7-eGxrW8L_y z)lueXo%=>PwIdj<9}QY+o*vTlT*154!_Kw5TNLkJrSlh_G9~fud6suuWXAz>%N47~ zTvzGKb!XX9#|4(swxIPAz5{P7_n5e{HQ0A0oy0!v@pw$hpBMj+@1IS2%n3ENEdQ`? zzewD12|sl4I$w7*C>eYE&u|I7A|NEHps!hRWfNh%iAG|2-r{ou$Z64>k(@)ttf}yX#eLm_R zeN(vs%KdijWS>q3F2!ohw`;1=*d@=;<2;qH{x_13o2lya$?Ncxk7^z;WoS?9DDsU^ ze|-t*@|7$sZx7~=f;al-MQICoI>03SEUpkAQViz|eYA})SFsD=UT~i;I%G}%#xc{s zZcOwRjmT4DJnf>LaeZq}XMF5ZA5>2|u4m4An`eGhR(W*hiFj!-FzD>S6!4ARIbq8@ zD)<(*nx&TJ-jKhLd`?qT;@OcY~-`D>o)Mj z;)>SoIg8nyi_Mqtv+mjTCFq11+@E3bQ&NBX8=syl{wMa?cYIR?XWq16p3m7d^fgyv zKW23+`%bkRKM;t#ebHwwvXCwg%A7O z*_=-w&ac#NYiW~R7ZHzS-P7xWPcS8N9lnn|C z-hds^Z}~I_u0$*N8W|xyWaC7Zs^6W+;RWChg@na4kgY~?I z=Q}uy?&IK5{*k|K33+yQM2EOf@1PC!XEXUOA&>s6UG-Oau7wvakLuDGQ(fmBA>X?w zuRg23BLDw9YENrhaa*eU0_qBJr~OH@Su-rmQ4t1ah;kQ+dH_*?&pbj^JTd+FZ9~0xjm=lO^V*dpBR*+A8 z-P|w1BsiDw{T#j@9k(6fQgtc6F>kDOk>@Ssk=_X7c=4Gc4Q1E zX9$~)jnMl(;0fuCoieU5j}DrUn@Ocb@3s!rM;#%qFSIgLyy_vJq$ds_cceFTPBY`J zwug85eFQ_se8a*p%RI%goVl66*xgqU?5i68PCoIYSCW1S+J0{f{9 z?MiEX$xg2{4V_^)6ZrJ_1*+10rRrSK#aK@?2eDiJLcxMfeIgRY!@J#x}7H3)$=vRDGcPD3}EOxgCd1S6r zT3pw;D6QTw=g$m|**?%N^`ZM_2c;?fSWiDB$JGbo7HE5|KM#E=JPN;g@TES;1{1Hy zW(oBsJx*>8a}CGW8BJux|%kYS= z^IPrG%I;;WxXdpWM2xho^xeuN0RgZ#<-K5m%p|XKda>YUx-(P588w$v-8wm zBta5g2nkOJ} zj#z}ySD-Q%v>w{8I&P+{^2B8X6Y`~7b$>3(v*`$VK1;vUR)#k4Ph;T7Mn!%vTp_x;on%TMOcS>dZeKH+97WyObT zS8XEG$1QBtYVAVsm0~`5W(4DHQNIr&=alY6={Kw2X7k=Gb;R;fb?c5KV=itiqfN=5 zo~rP9VPlT^3a`103#-rJQNvi2eomQcE$@#6C1io>xLsqZI1(7($rr`P%=1`d)R^iq z8v>QH`kk`Px?T0WBdVj5d_&aJQ(ZG5zwXXt0xZ;Q{{Nl zldEifG2eU;`o%P}a>NUDrs(W=W~O-OwcuIu@n_WGQumSSx#V}KC)AfK8$TxYa~k!u zKnwBx@`#7TH_n8v>-EL|;(f`!f2Pbq))OQv1aHhAvAlSW?^@4^>-FR@>NAJx41YvIEi*V10PYp^eBC=N&qj$6h|#|!5zqQB-s$lh_mqBc}-tl#Z?&tpHhFBn%gutD;L-l} z_FQF_ak-Oo&iZ2|zofSm6Y25P9T1$;%}>1C-*9E0lGa+LF2+&fitEP`S1@P7pVL>m z0sDA)G?xnLEqGWb8x;7M z3Wa)3;}74fc*~43CVj1ggnsdMbFt+e@d|eEqtCL}OgL{&#}3|wo_tC;7hF|%-;Oo- zM)VNx>6>hVPw{P``d+~%C;%H{Ry?G0#rrBZ%o``G=JwgLPecAnj>IWcjg^|V=6 zyCzu@pKKGY*ReNM&taab=X#zR_XyejO8*4!ZMU&5<81J^r5#^qQ)^vKbLDlcT?|-z zi8Tx6#H~Z296b|UWc{+w7{EXEs`|Q9uzyT(hD$k=KYQMd7rk^UVoiSX}KC(4_dwz9&EwHZNjjSl~y;@DPp@72glmU)6RX38OmqmgU5fyxfR?2EIsR^>j&(5kbJ1U)!Bgy-5CECkFmc) zIFvsF0XyDjxN{GDF8I40mS)Yos=g|3Z$t-`*Ly4F#ItRr|D63X4m4^mC*DxKUrw6+ z^WhKifdT(tTCFovB@@E;J9yXG;nNs5ZPa%=?-}Yu*AaJ0yE+>j9HiW8<2zqOUke|U zMIJvYyKY7jzdBE?DLLLc-y5YZpK)vw^~Lnh;Tij2;BD)7nNRznUF$a_XC#Z{b3p)( z$KMva)I+`6ucO#PooUgR?V3=%l;YOCX({L5`)0jA8RNixWDR4Q=L&!B>;bT!?MvrFHdxs=X}RyQPeu zpg2@#9?jYPBf%FFp?wqH!C_`D^x@l4!7#_p=~e@WYi``zS01X%U$1&cX}`xD!~UmR z#*Rkjd<_}i$-BPcqp-HLwQa2}Jv?Dc%g&J<8{5)4UuAJ#bPjFlWd6vuG%2`Uiz;- z$2_CHt6tgmvJF@_!Zzq{AWM<6vP&;)?Bc)5UKE!lpUT~!>46w>Y3aJN_vXkKf(c(UMD&&5}#E?VOZL zP9&FhIruP;55m3bWvtJdXRu4~`+!gGh{U%>-{mKkuY>b5t?V)MN%{4?6P!v$%;$Tr zTd{i9jOt#6k7m?wRQAEBYS+uCL7>t|$pmZ1TDVwJ@{>;_Ka^pIy$e7V>tSyQYc zCb8L|n_?q>Zs-0tBEJjTZ@}DN{=`CK3;5I6$-C;We7%FU`TURP|HNkeK{D$}V9@*@ zdxG&A`gxH@N0eE=RG!Gk`*`$UbND$CZ5MDh!{q#5eW=#H=!9cc>#r&++K5gnr?pA( zL|hJEnUz-;CN=lJF}Q`YR_4)vl|vSGtIj*3@0+K559jyX{*RJh@-5CUdHQ#v z=@ht#E$RLQJkTspe`yZy+f|f{<>?3c|194{_e*F)bcx%DR7OQFlBqSxTFNQ}(H~%Za@RGlL`^@&q%8cXzV9uuySL)j!}H_^XG8HYQSU+;|bU96`(WqtIcA^&RL zZ0q(m!efdtvwSAsUAN$4Z)`25S;{3=0J}EjmR}&(f6aVTx}Q0-rN7`{4xM#MQ}ln# zFUS$)QQhJNr_}5lIIppnviPPgE})IYm0)=(7&5GhFD^_AegrI2+kNZHv>WQ@^BUh~ z9{)?qFN*k9OKNnaXz`Q3# zOim_u4|Vl72IwPtmH1)$1&wTMNI~&CS2V;|@bpvC*ZAmmn!+aPxgkhjvUMyCom-%% ze0NU&)-k7J^H`dE2P5^ut{Q~?`@a%%^ew?e1%qFaw` zFYvx-V9dB-oJFGl)?*c?l`807n-9+fkKJpp9~etjcxR5udQ1(TJW=PcX&)E!{Zow# z?twiuDjF=e`bzJN_i@EqWbqv^22TOMCtmAb>nG-H^fO-QTVOn+qa|z3web_zvKQgn zIqXZ#12glCG==N1k@au^7TNWwN)ZpzCBbz9j zpl;FP6v{X^hw&|$!sbA6E^AoNb*8bOmvs2NvF7^f!1pXn_Kr+cTevUnktmI=yaE39 ztk#M(&y?R+dp1+mA<=`3lGRiDJC$wO;@h;adC1n!lAJ8vU2Wk$JYv*?o&T4voj@ zJ>W0?W=yeOhGvx|AzlAZR7P-5W1K-V#=bq5>O_NY#<+`o!KL&hFiel=URiSgzCPLz zjYT`X_wpXst2UeKh5=r-?q7XeJaK8gqJ40{X`iDZ-8)3X{NwO_q3*cJdSY~=|Agz0 z>A7M~_D;fqvIM-jU!a>PHSgE1WvO{d7)t7GoG$t9Z^7vLcb|_cMFW zNzlj4E1>J*4mcaVqc#1zpl6OTCthnFcRjRoCpZ$X+zf28qtp)PM)-F@^M`O(H-19QsSZK_M~16wD5z z;ESPL*azfQHpcC?ruJ`%;M>Mi&@l(IG z7i9*x8Ov;XNM_KRe22KWx(&)&-~u1{k#v|mHHssCFo44Z=+n_yA-xQzHh z__F+vhtAAt=jC?<+PiG{&wh-0hJGo&bt)fFPv+g!`ymblxf(cPId0=+;bp_zP4R6` zQU8fILmz8@hxCI(tf8q^8yn{SwQs6}=<~<-Dvxv$yg06JT4yt5Y&QCVJn_rU*TGSE zUaDdOKKB;U(8haZ9r#7|fy!_ujUSi8mYXQY|E$VwrCg}j3wF)b`gANE!nif9Vd#DV z-Ek;c7wevsa6o(binB%CLCKy!#ir|iF$a4Rzj83Q<8;}jHRR5=GG%2azfOFiy^WN` zkB5!MJg0Ph)TjGwpOh}xt^hXMHhJaygZC^RvehAUeKRh?a&e#Dr9MSIN^6U%+!Xkw zQ_0Wyf@)viW$$xf`W(|#y4k6MUwl#W=O&!$6Q&a0WifEYdUP8+T&&gx=mUCQ z_qLp>SWKlqD>+Zxr24NhI-kBeW%cU<g2yrK_l;OfM94vw!ZYuv}Fo3p=FZ=N_P+8mg1i67pD{={B=o8T+H=^Px+A`eUv zUr{0HO9v14e+4?MQhdLSIf6fJy5l8%#rF;^()X2$XYKx1Y%IaIn|@(SOz?+MY(M=v zER8WOxMJUv^%pAUnYoC@JAI!e-DGtx=_!8Ud-UC{%;HSxSsDWtf1IbF@6B{E5|@(U2EyN($<*-20P|U(4X-y+ePiZIcnGP ztlDKyj@lKz4t$b5@n!Ny|Kt8WCiyggoGhwe{p?SZoYei^(*oxIS&bv@!?CfpJQeSe zs*KbVRf`L2$7o-k;662KU$%wtop#o<&SW06@!K8j#VXxRUC@U3D{Sdd4lyoldqQB#E?&ELdZde_EH^u9NUfbq+KT*(q&N=WRcvopJbXKrn`|fx6Ci<#A{O;qAwNVrA z@QoAeG{!r2T)3wJ8on8)5re_S?AKlrF7RaYstTYyWp zy=<8T^;#apza)N}AK_ejQaGoqVuv1dI@b8?TeIbuvvilxN&2RzXr*V&19q(BHExk# z&|CCr&)+mF6aRN_nw2P>;Y+4757K;hLxMAN6Zz`h?XKjN&XHk16?35j*WJHQ-+-|} z%-}uXp^?6x^Cvy?8KXAJwBdgvXdgwkRj=i2i)~}+OSZvt%V~2HHb3`tmv5Yvp$-W7 zl;)Z3dF+n-wL$8N8^;WH;G}t`uGlb^>R3Z8C-v9XjHRX1T<(W*fyEt=55Jb`*f5sh zyK&i*ZCL+ge|&ocW8C*2NbZ*4pBapSD!8S8p{)QfFa}D(Yk_s^3iZ$z#Se>qTG!@0 zjC!I>J4NF32o}*qJJ&rv&7}z;E{F@rpIWzL1Ew4Fl^=`+2E3L^J?q1D#;CwNB z+TZw73&+aFpYTjwPt1cWSo!7Augq^Py`ga?{-3*v_(SPg#aeOK47yfrsGQ?M(-P~O zwbk&0`HYQYECO%!?*lU`c(HfO8|tn2Y7)*?=(>u%ep{d`dsjv)v)a*JmagohYvmqT z*1g%cRT*nl90oA8vY#SxTY0R#h)=P&AxMj^1@iDMTVM`%isCiaz;E=?z~iIDGveDf z#0)F`x4GZ!LFzUY-AhCr6|Kp#hF_`HT;N(?l9HHf1YFZPus&c;QQoJllzS4Q+96|+*BD4IebOM5Y|M6cdHj9n?l`y9>Gr2 z+Stvkvs*t`b`JLkXFT@ybySeMU#&}5Xy5Wf)7j_GlYF|r1;1bj%XY^!q_?ub+LlM} z6-J0nVSF*h9t6f$V+SGgx)Tm(UQaKxmymr3)X}LtqA4)+QJ?ZB&B!xvn^NCbsAI)Z znqL94tGm+DA2almD5u^n!e5tHKx`Zwup(xY*Q| zeZ)&Z^N$e!W}Xs$yt%1HE=rk!EOCdzRf{5MRMpT`fH$8BOmJ!*{L3w?-h0Y{pr zTDt)|;uvV!lCQ9b;Wj_faRapYO1^maT8!~W@t;0fMgGxG zwbkd1%#bfMZ$^;AoKt{pBl-#7E>C`SVpV2|cHCVyD+B!$SHybVtQ53KdhiSRL|@TW z^m!Y!Nbz4Zm3;zlj9*5Z%|2J#KK`%;=(CwIRMyo2vU-zQ=VhpzeQd z7oG8|PNDNL(D{)>!aU?8%tidHewNHH02ggNJl(|~6TZx6b#*eyVHlbIwP1c%0!dI>^0A+kcp7WcH_hJG0C6-gWzxj%E?N6r4{51~aOf7%JaV{x$T{(bLj*Lp`uy74Lb zrnKG~%P{}Y!TrbN#PhPf{DuLynrmA*9Qn322Nn(g=?t^~b`N})fC+!ZGHfd8nk4?Q ziV;s==eKIlRwOUPJ4-5?gLT!-!MABg^H|Gw@TlZnk-53X?nUm7;3DX#dAjzqiH68k zC+$`{K4{Nxls^}}Q(m)5?X9a0QLoOU;;xdY-FQ6C&2TUCo^$H5jed+z=MjEK|I;rH zz3=JX?gq`|R@b|V@Fq5$_APXcuC6PV z?4cd-W)a>j3_|yfK`ya+-?vYL{@95RTb>*Yz7z506VpQZ-xJ9N%bTCPKa&Fwdw%Pf zc4&-j3CAe#zUBUd<3B$hdPH+C5B#JC!4J3l5eA(wcID7wSV}V`6Tz;kw0ZN26_sH7VKx}gZc=4EwUY-Z8 zQL@^&lXBJgj|DNl6+87g%HRIHV=C}R@fFyI$0W?k=YDhOe16Z*LMw1LY7#oq&r==# zd*XY1DerBPZC^Ne1lbnZZjQUCej4*2m}!H{S(UmM3Y={FG<#Bx;|wNecV*1Ub4SeF zjWvgTY=U31aq|)CjAgF);3)Ve5&dx-M#_r~&8I!bru ziC(*e6X`8vwqacV&6Kv3*D#ke_>SN6T1(O;H%;i0<=Mh1@GidBG*?G-wz;w+D3`G_ zz-^*9$t!J@9b;wiD__6-fXo4E!Bwi?Z)`3S-wtle;57dsvtN8x;axKSY|h}CP1;7@ z)vvDruWS_LIs9RsJ>S9@xUGlgS3*zpQ) zS>C=^^nZ!*+D@B&wCRywe0zNKjPW%WKKxJkaL5bsC5uij*Jb-8sMi^VXL6jwPV9*EW~}SA7HoAwlRpCZpuhMjv>&8%rGGL!?;-t> zvnKr!?`fO#N6cePi8tG^U7V`wSWX?1VVo%l?)tzd{)M`D*HoyltUAm1hQe5S=Hu)? z842#D4bdd)k}o;3PyT~UWhB^1ySgKKE_!aj+=%bu#>jVZUtMvYT9egWhxun+C`#xCD3KwbMWf_Ied&PLH<}9)unFQv^qv|MRBk25iyO?*x0@b z-aLbS^vqpeI8TuNT0$F!`VDn$qYrl8eAh^QkM|&c4@>)6I?tG+|MFe`8T$$1JQ?Vw zJn|v!@t8w^ucY#sk$tBoSkqxH)I49F4*y6#$i^7UO#0ef>ZwX+QuboNfU_^si3jpSAFl5ox-{&m_p(>|KVNmj@nlfAF~1cK+S z#61X)8WXlp&@7&>yoEe=UiH@d_f=`@DDtVyS(I@WM)_5ivHS+|>U_hCUB59sYwFo- zbjQh@OhaY*@J*er7z^P|aDkiS!Ofe>=UBNQIiCR!YU5qVdbNSRnS?892jE}O4)P_d zGU~^ha_7|5C)*D1YP$to>z%R|pRv3F7uI(b_whaSv5)f`FE02==}*!0oe^zfoLD`9 ze_;oB{}3DM)emforyTO+AH5IMHQ{^EZK?SDR08w=L(1R+w9rkx`sit9?of@waHLR&nq1 zUGysT--^6;vMU<5fFI9izmq2#KTSU3)k$kJ)mniu{cwa=!7#6^d&)}<_RQGt&Er7j zenp+O9Qs7G^>V~af-6fW%4`4rF_ixt-UGnjs8`Zh2`XTQ|!+dkeb6Hh)vnhR}aMn05 z{cVjd(wr9KUy*zEKW275F_x=<1I`SKzbn@IO1>q?)5&+;yUhA_Z5gn2mf6Qy()&i% z_O}O1sC!vuI4Bf{gO%ms;QSJOscP>G@c`skd~=EK@|!44dqYJ3GI>oYoF}P|6*CNe z%(!^&Qu8-<4k-C0{zDdaBTE^-PX}?@$@nF-_Q+Fq3G;dGYv#R~&yJQ1flpNSXz(Jw zeJJ8v%G$O5Ld27C-egl=@tVq8e)Cn=#aheiE|x~>1z=uDKj8nyW9FuPNpE-`=fBk3 z{o&v-&R|wdp$E(hU5sy#}UZ#dge ze74T0{>|_JyvBczJjP#R+e{GyGI~w5wv2pLcY82EJ@fhh0BLu4+k+wA7x4ZN@3-WI z-!=PgFK-XNGz!m=-Upluio?MY@V+b${oIIt-f(cy=x|UXO*E?TU${GtaoF2r8)%{tO?BhNis z`Dp<}%w~v{-BWz9cG~Mxjp=!>(c-OO?c$g088D~*=AWyLTw>F{uF_!r#`YFJml)ZX zDT3qTcWNzp^5uV7NA{kmHHqgqrvSbf&rzT5b7C)6(8;*Hu|4VMJjNXVkIIXt++~h` zH7QtubBTlf!CK2$t)*x}Wuc7_ZQ2!oFAsOph{u>%;r=hr;ppBfxRsz7URS zzat#)297&`qdEp0k-z`P;N!AIJA$hj^QHnm3X}M-v2|tPcC%fxZ>7&~1`fA4QrFrl zbF7B&;g&~&?*WTsQ;d_A2q$Mp=MTLZ9hqTHra4NEr{)h`Jhi6h@|4}*$5VTk(F^sf zu^%*#$xG(&l-$=?yGV2@4%c(o5?$3{{MN(uPKz7Px&|hF-w7-!vmMzqT03CAU6bt= z{{L>R?1eFdp+6MegX{>uJqit1aL!`<{+*ikFofS8sAh>nzZzxEHO2f6+-a5YDI4;di0I82qABC%?7VnV5qw zwBtUUE8UWwoGY(nd`KoGeCDIeCDShsWxBhC`6K5|<(Ox}pVHNt(OWsI{nnt>`Iuc_ zH${AD&PSx{vywyNU#sUymtKw2Oqu@`exv+=178C&Y@_|&>Rf5dnf$G@Qn_1aIquqq z@+;jbRvUW;{;7kqNzMhvroOG7xNR%?;0xH>n{~f+*0|J(k7PRkS!bw!hnRt!H^}-A z>yxZan(|PPE>~-hy@TfrTY|e~y8huXw0AGYd z`cb55{;NJHX3fA;oKq5X;OmNIy<$gh9|(00G>c+Bv`-E@I`??65kHL9Q~&Bz`iQ;v zI`FVHWg08gXWkWKD&DDxmLBt~D|D7`oh9cFwY$d@}$WIR^9T+{V}l3>`X`XD=w&- zESRD6WoAck88+HwZip|R4|z_eEp z88YFgX71}F$3~gKZ@=>whwg0{hG+gPN>l#CdBZbBGmrF-sGf_4lcI(5_Ti*(BA-s| zFHMx;wPI+ui2q`~NLKg{9Q_e*q)WuLbcHm!EIP|*AvnRGvs5&X=`R|xrig8(`|HlG zF9f%l+`9Ta@R~vf2)BKF%Tu;D!l%oB`K-YuYaWyM7OaYgT?W3Ez%VXGP;9>;+iIob zUiq_S=37BA*AX~#M!^~U$Jjg>{?#}+W%n~qc6{W=@ierp%p;HwO6tV{+cl52bU z9)A;_<8Q>bkE_lfM^EU`kZ(5b>#{ulT=Iu--cY;#c^F+KAUhWH;Yl1ovKOv&DX}(7xInBy-<`Mg=SPJJ|=Lz8Bohvp#H|B}8g@ zhhhPG@gF3aGm0*N3qNcpF!j)12b}elJzUPKCeJ-~F5mlc{d-S<=#Z12(Q!_35rJ zWaLvR)+f^7m$;EyrnqL%u(`ArGj{i|bc=Mu9DYhc>zE4%7L@Gx?@mKcR+e0atazSs{2zAiGMZt69w zf$53B!~TGNoJpX)$1k)qkb5%=X7wz|Uir&(2HO_Kz|{R?71l3)1U#1l%jScJ4#jrf zO7=N97jdS72P|c9Q{;#I*1S3GR%>(K7}^1vJGU0RobZzZ=V`BcN{a8#-=q2bT7RWk zgDwvDaB=?^u^wvz;{GP~zKO?l=C$P95;v4}PX2Uk{?)Z5$l({0dj`JkL1$}UZYMwC zN%2X@CFr-A@wFM8>z-k30e*!fVC-n9UFgX;Q`=?eS%#MW zOxAik=H+b+1Lw4`$AqKQKZp*V4{sMN zye{Kax=Y_2c;3c+^DXK9B&~I&RMYs2_0V5Ivs-@T80nXIeP91G?k9<{`6TDQMe-Ls z$p1|`bE=VGV+V8tKe1lZe%Rcy`!mF+Cg`WLf_i(YH?C9kSq6OsEBtSD)3&Ne7-=Lao?wcy&yB0CpjJrsX>wf271%X}dIu03W-|Cd&? z{~&iD&A0s1-&lYhw}5`|1aHe5^K*leN$QV0XE{6Ky^!28#k0C_H29Vxs2~gv&Gtr6ZTsPTLHNs`%UF!zuCQs6?nBW z9N?P_I{BseW%$Yease>PKWf)wnIi#D<8nLi58+MHPBD7_Ch)dZ%C!OLXwRo09`tm+ zJLSj*F346{W%nN~ll?=V)aOKmquY*vqt{Y?nX7Vkzp2ILP09yvHsb?0I4=o2m1HXv zv^MpU_#u{N0zYZN2u}LE2%qK9p0<1GJaFTI8-AVqQv5Rf?7lF{36Ea}p0>&mWA)s| z!`S9Z`vPe?D=(D&=QY^pXU`Bj1Kb>rPw9qp?U~XlceCnh!uJ6<#pjp9KO-N_vPfEu`b%<0(OD7rI{7yZ~lhI@)C&*(q%n;9urdhKz^j*jK5Cn#qAt|$g^na6yu zKr9CPX~MbYX8O>tc+>96aq7?HTr=h+#$486Z?No4lV`s!FLo8~e2e1);(g?5bM z*6jXtqqBcQ9Tw}g7GTwQjOSG1m#|+$&~{G%e(jixzsEAop+Bh{_sbCTpNZt!QIyrX zqR#Tj0^bi`AK6PvFUigUU+BhGm_hgZir!iqrQCS2Fhm@EwQ&@6rra<#!r%iuuZ`G6 z#s>EReU>u(&V{c-xi&+vW3$G#0(M$xs}sk5@%uTu;GZXTx&tnrOA8+9DgTTE=AnLc z$_nY}|Bty>ESL^RvdU{@Z`k zpGSG&;FN#Oqb;UVIKH~EZv{R4U;mrPn zZDrO<&_VCrjgFOG&ntGrJhn~e70c$87g+?@95ZT9K9?l-aA}NlW*9$lNb@U1^9b&w z=)~C{$DABEC7oGRektzT0%oVAwd-Q^1N1KWQ(9c`zE}G$8yhXK5R|K zQlXm|eSGA$yHMnNv$R`U-%MYk%Z<>px!ss=x(9T{@=)H(x4!M?vF#~ea2N78**}1G zt$uw%<40rXv=+1dDm~-wE%M?+6OD_7Rp}DxK-R0ZjvGThXx`BKs)r0GCKk1aft`&k z9}7(<7THfLvENnhM+V?4?Q8v_eFfSZY93Fm<~x=)T9aw~w&u~_8;}#;Nx*Yx-066u z+jW^9^3fO{&Ls$ZrYL^r2XZ{KIn|=!vky>Y+?G3 z-)S{JX%ys7wYacHgAFelPWIct^G0G*!nH88AK3?6d$HN>KIxd}pP+|Gk4oQYzE_{% zZEcdD{}$Xzp)GZt=5fpC(J?$F;%S+2DBhG8eEp@X;C-yi)TYKPV*`H?_2IFG1b($4A143n8=!?#KEQL9nTn> zpuBhDqhSqaI9K8#uk=OPXByp-gOcyWa*9qK3fm2HnW@c}frqh$eE1!LWyH<`_Lx2z z2&(%9>Yl)SD|~3w>WJqlvw6WH8p>8v9hH$!;)!#>t4%!CdI~-ZXVqz5zbvx4&aSnr z4R|$e>U`8n^pxsr&2mzj_yg&`=uD5*cLmWg+?z<0IY&Pex$vM{?pf|BkbQPdaFoh` zHyjuCoC

AJ^LaiQ|B+9oWESXePf$&#U=$(c-jy)-Xo8>zh+%!Z>6|?J%c|?vIp+ zoBoK(gL0YK_p=S3_d^_>wRz$r%I>bqq>P$ zD#_pCD)|qXJNzE<2nQ|P37*0JN#s^hXI(>c2>!OlgS@NQY0V$Ta2}sFcOaj!pGELE zUg!f6j9LeDmuSCS43Hr^23l!rB5gCbNjFwC9$V4R2NP$LLnme28!gY2mot^SG0lCW zd~Vzi{mhxflNfv0ec-1!P2qC`FeUMgr7TRyP`K{cn32C5n(IIF&h&Qv8|1~fTLzI0 z_$Yhf2J)cf#?u!zWDnC9cNpMHw0PmyiS?iSS(Os|FNIqg(w|A>R{nJPG?A(4-PUHM zZRQ1?37^8*?6(-;A^S8{XVdp2Qx+1@5PgYL}5|z%@hACKwsQl`bG12 z3TT@TbZF0$-`+hvi2Wo7tzb{f;@JaF{kqPOe)gLP3p8wA8$%x}8`{JCY$S8DjtR%) z0q{Ziz3VmTUqUBL3il4vWxxK3V5k@WiK#(<|kU;s=Wov!+4L%5k0A& z7OwE)u2*T3@x$27N-n1Fh8KE*GS!Xu7u3GWestYvtg6imRG@E<5AY{}V+S}$XGur3 zM0gxcAE)iZd`$knjDJhx=T^5w@xRcScc&uC=u<_?yUX1rNL4-+laR{s`)R zX?_k~7lnLSS4 zzo)#umgk-B5^aIAn_u6U>aHjLa(!b4&*QVQ|J`DSf*sBLXFaA^YtGJgKo_NCD8b?q zgkH_>c&v4o&;!s!K0MhPMl@s1kt*KUkpEqLLpHOH=*V8aerwLxxJkOIA+f5VvJ+u=ESy-$59c3=KU`6b&JgR!rVj>umwyNf(1 z>_Mem?P0Fw|5J^$d3;cLYU1|QrtJe*a@rfmhM568jQ48VthVx!;eElf8}3Ey>O5l_ z+!(xB;V)cc|EqM4YzxVHsB>1!zS>Rapa*7$ZG?15yHTDB>;_2vocbJr)8Yefz$5gN zN4{EcZ}tI8SF9{V{z~4Fj=*MoN%tk_zG&K=eO|T<2Rqc8C*@&69JgPLbRtjlZ z#%A91_`pH_Q^@SlV%|Gf{J=JltcH3*Io3iwVf~++W!R6gaQPN)%AoCLA09daNeW{i z-mDMv@7vKI;&tTzAM0y;_=XFZiIpU66X$|LnseuC&w)%viQ zzqV0!UwKjgk(q3+z?o^vQslxBEu6wCa$lh9%r}^Ov2CzvWZxhp*uUs5=%oL*zB+n_ zVnKm?T_t1z`53$I*SH_M?uQ>k;<485$D_ZB%!i-Izw#pd;I6uF0{X%Zy{9@!`dcj> zmk(`H_MwQAThlP5Emj}@IoAKukMTU@6d23k^@8*mI0_~Qd9d>{Fx`|?{7ErDHYUjK zj_p73dTLL!j=B;)&_?DC_J_{)?Cf(_L4zxq<0hL{*-Cw)Ns4(irpFNx zJ*4vsq6cu1U&GQ-@C)R`XD;l267Tjg43X}lf~;*8&xGsxJ{*cs?{+9?F}a2_*lPbg&2=}L%7km_z`T*-r1@n+c)L< z)h)D_hWE(9?6^JUK$RT~&8>d@0x-<9b_Q$9iuwiKD8>@JN7k0Fd>FZn*0;L*6&rA9 z#@Ffjq0yL2>#He?=E~E=T_zRATE*{37WpoKqp$fa%y%K0E{OQR^l1HA8?u?8%hi7L z4a_;%o+JBjj8kmKBY|1x(Bvn{M1GP?5Rwja7FUFv}*0=lR(8rDtdGEM>anXRGFK zzzzfd=DL}J@b9h@6g?nwN!^n%8erBT_hKDZMYOQNIZ;n3G588M=l7s^X3Ye(4$ z($6st7i)ij`kd7Qe)g8p`Pvu9Rv_+HV~hJfvHi*WFho1pgnsqcmL7}hl8e|*Jb`}4 z+KDIbyRVIJ`VD76jN1~Z4r2!yH?JJ}-&$K(dt|<3i(nYL2GiV{IJ~7+`cP=U;$L$UR$|+6&eYC^&y$R zkmve;zUmc1-1O|t&Xuz!{r;rh1D8ZwGcxmsN?hO4`R^IXX z$u26e)>|A5ocm4mB;(wc#rpx!6Zo|@FmAkGGQ$7tH0r`r%QMXx={z!6W>1sPL_Wsr zZ`ge?#vOVTKG2??v-g?Nb8H{c$?lav7gG+XH~PPMz7Y=ci6~!V(gQDKN&Hp2g7F;5 zC-|yNdv&@Ofq9*CG_jp|rtG0T<4;bhfomRrQ}`>^!s1^F@s~}=8Q$vT68Q0%$j4Mh z@gEoCujj&F&y7A|E<@pP5jeE)J+gOH3XtVRieJm(YZ~!&iN7&D&}CJp6ymM$xBBEIvna%HqvJyXKAQOnpmd zUK*Orm=GNN&W7=Oa~O{|1_M`qF=(bi75F3@nimvbB|Gj9;q;mRM{$DYobO(Yjq+>_ z`MXR~d>Zqj{Xo@S9sTymVe7Ha?nQO6g|e%$U(}c03HxB*Ci*X3=qV>A-}Crh;a=NV zt(X|rCo4D`=$T#{drI=r4JGtQj zzn%+L!I(4clIlswHU)0%3; z)-8lOTk8Yy0H1-?Pyh8RwXsMrXRp#6!uXIsp{u0di|g_!sXqAHdG+8(_GR~~Z@nvf zFRWjb-=I_aQE|tUi8wv+O<1$0j!? z(0wh~RMX=aSA6=_w2PgAk9Pw65+AOY&Zj)2k9?8%%+QrN=-b>5Hr4E+Xzq!Q3uPD0 zZ!7U$S$@++RqfrSx!ckyk$+2fkmeRPveeUlZmT&t5Dbo|{aNft`pW=^d5D$r|JSMZn%iLg2@T~dTBsPy`L>UU~wJ zb{bc6TCv`R&UM{h>`Bf=k<&|e7bdHHUK+_fLo;F(s6>lTGO&R#$v$U`fKaY+`y6q$=+kwJY)H!*caW&Yu`r})W#>HHeTR;Ge$mi&QkOdO=bTG_e7C74;%&4xzJ#% zd9@#7msp#aI`!7vyg_K#nJP&h>^)z^Vj{DgdCZk^HOeug$dhT~ zG8Sp?1lvG1;x)l~;8UMkSLOc=eZgntgU-nQgXX|_1?oZr`IsE&5T8NNSd(L4V%(+q9kfBcU408M~_J0Q_5_Fe+um{~x-6}$#Ev-L-*o!Is?9{a4s zMK~qDdM|K^Z|{~I(iS?Qeos_(l*-EBZom)xqy5nOQTWj+ipir)_pqgTKHd7(kXd3e zHk871pcQX*<1WsEb{Bz*dtLm>>c*XXZszmZGG&#=@C)i(=jw)X2rCDp_CTlQ@e#2H zI+Z%{8B)pWyj-|nD!-lhp*VZpSEM~u<H&wG?k$A4?GEw17Xyaq8HBldO+cIBbyzzkz=BEH0w zOKbspLbjal6zE}mIVG>&&hr-JNBf4%Pen$L_R&S_Y(mf?eGKQc1pzd&? z-O@UIlxkb@)f32{{?}jJWuN6#PF?P&vHFbrT=6lj10?0D31!;A%jUfw?JI65?KuWP zHgaONO<63S|j-Atpz1i`{~ZvX55e&cR4d-=6qH9pUM-^4dXu&4`|^=!tAu zd38g1Rh>lclpx_C^HW0IFnx^7PIA}PO#YNWvGm5_>>cNMB*!QBh4ZV{x*5qZV=foA z%NV9z=2gjbEX$y4eV5v6(|pI=9i-V0iqGAB1UL(K>`se&poQI=)ERK$Pfk7jFxsP- zE}BTj(+YM+%-}o7g$?h2_%kYwh?&M-R=qeN^^iyIk|n*+tpp#Ave@ zrSp-6LZ$J3>_+YHf2>SfYA??iKz^XrE6UBtJy%)Ht+e=DI4_x@7k4i&rxh3rN3gF8f1rikM|6NcqaofW9#Z2!y|d+H{a)& z&+=Ky=T-S;A{wlO?lVsUMvs0<;8g@CpFRqTl~+9NIB!2Hp(``7m-oZrKuTCYt`S}OQK~+(I#rh#?r{ktUm8YrycTLZ*lRGDS{mg-kZ(=~F zy8VoMy?0HZ80%j}^w$|*1D{;J?cTKZq>*RIm~1J|z}B`#I`YR4#PVeK9JA{89{NO{ zhrR$^OoezC?5(S@--JU9|E=~JW3j0I;nN=WkIzLi2 z-#M(i7h;=Dzs&%;+I*_E5Bb(~77=^9I&$YBuq!@X=M<+!^!svBvQN1ij^8+Trad~R z_zv*$HaX^^&7-#A|IPTB@4Yg*KKP%+&wLm<{-OAp`!$C0T1zR0=l@RpOw2pImhH-xj@B_|Iz>R>Hli{a8vojwy*p@ zj2Rp2q_v%0&lT2=_<+Fa4KYuQ59{mkBl-OApNnrdHa26xVZgoO>vS-qH9_`>m zu{!n_v^8+9+Uk}s0evrfDd1VRotsXG`ur~SG#8YS#c<55mCx7OBE~>}>MLelcKBv+ z`79rgPm#|wJ~7?wUMjlDVf@)M)xcrr;%IJ^F4wv^{abL1*21zk9IV!OExfBluJW55 z^Uph3pG7+VtNgy|AL$bs>ioavr)*g`npgJ2mSL=SYAj8Q%9K~+OTXMTlB@8rrt&)| z&qsL#EWFS<<^w`GKSeu#B_0$-H}$dmKJJ&S9U;4A5%bjm&vi$3=(7WdnsPUP^5@~1 zCd1q?@_wGY&(XW_d$v9@I9C5>2BiD1ere=fx^%m>RX@eK!T!eK2mXG2|4-2H>YE(X zanvPM=RKVHKA3#7uI3c(XM{H@kIRyhiCoAh&CT+mn*sS6Pp=mTIJXaN^*oOqi=Bo} zJza50ebPzNXWT1DuJXADbN|Z0GjiUc`Mh_i>d-A&(P^+SAnk0l!{|&RZ+T zj@EOsD`OcFEi%n#^7G)H$4<|KJKsN#_R_}Ab*X5s`zW|qZhZ55bjLlqDpNjm2>nOke4rFkJNB;hjS4-!t{}H+7+|eDo zu=TGBj-qa>i*248*e1LGP|D9`9Jk?{ZXq_@lsGF&-z}w+w`B@_weu=|{n2u`=6~{? zVJyGoy~NmEc@FO_3qlOkJ_wUfQ$u>xMJ!KHBCYr`u)JbK>ZYH*U?pH(@n z_9$?l=Yxyr=!JAQvh}g^wQjmA!fS=u{L%_@n{wdRGT0jJ#r3t#?pAENt&PpZ^CXy) zgllOGzjE+PIdQaB%J8lEayNc)Z*a^n?4lvff7;)a?IAtoaPDqa?zGyIhrvTT_ehrC zcwfLGaJ}a`=D!cUHhjev=0ASN_7biD+Oc`yi38Q#LTqnQaM+lM`GWgjMBB1_r;10m zv7F?qt6#bxMS&O;D)%3VhEF57P-=ZTmT$XQe>gyV`f6;GzTM95@T zZTyh_K2dn(Me$KKkH(+;0DAU4p2g*{4_LY)-@`a9U=!>io?Efpah$NroT+wtg;R+! z9^r(nJ)v@kvGfnjdy3aqymyR?<`q3ZHNsD^)zD%Bb8x3<*c`L^1={|7G1b3-?;6)} z-%Mgp{FHf2vMn88T))0*T`14wgqSZHQfBF|yf-mDS)a{TTspky8=-5VfGy9w2~hTo zIdsk4uJJTVXTi!j=FT6`XX;vHp~?4_aj}OA_?h=iNLx?7>a%yu-dD8QZfT>`T0R|24oB#h2U+ ze`9#wf*d9a@ToWoSM1AId&-Bb{*V8F`VW9pGTq-eq14~VSCk)=^NIXdo$!1jJ_g{L zQIy`~ou=H#)-Oj4{2Jhy3_LR!obUM=Ys1L5vJJT8n=JaL)x1kX`EyDkT+`s8bVh4g z`r;-Vub!pP3BaZGK6I{?^5_d-5k2kL_i0NoKsS6a^W#`$WY5}pKiG~;c#>J{xw7>| z)m`)0_p#}IkXKnaX0yt7XDZUAVN5n_ENe&OPaZ1L*Nlqt_pG-4Hs@KA-$K4%jjyl# zjJ_hiqI)`Hc=arX^-#eIAKX%FO};Aa<;O8Tk{QXdbcEt-6iYLsqCUwhROXxe9hzfp z=ap}5gtU@RDhEBhDs>3e*InzQ`^?`H4Mfj9_(KkaPb)u-rs*G@qkhd@4@{(-t<9}6#+J^>(v(Ueugs}1+_Dy zzc$Tywfo%OS{rxyjU&JLJo-7CcM?YjwCmTqbDVX`_14ID-?)f!<>WmUTDF_H=FwqTV}8_VA6OH#`5F(!`m}a(DgdK?Yup?feLLngOFPa*+x#r8@KW=JllxKQQ09Up z?Iv=sp+miz{wo%y-J2U^iv|3$i>mwDz82NdGo6~x&Km#+_ZN-CmY|;JOh6lUG5ZjG z3HL|z6V#_-6FcxP>6#>Vb_eBy$=v$HMaZs0yr^>R3Mc$;Lt0yCPd~=%SL477xJL0> zSYIZ*fH$_o1*7nDay#g6p}qSMnZ=Jvdwl<~c>PFyaUX$XMrQ!jhsLw+|86h%)kVZY ztq6!68bEKrm+x!60DgI3&zM^p>u*lfUGR61Q>Rj>Gl$dbf-~JzJgHP=J1Lyw7 z06Wh_vs7UfGJjI-Af7?@zWo=s1^d5SxGk#dO#O6dmqoTtC~)r>e1VQtC;v$2XG5oMSGlPiKxY`480+ z^H`GCHqs@0xcND?EVywtX?i%e7gu$5 zv87{e&{J9v^b}WNbL#H);2S!NB3=s~$>NLS6z_0OotThpiSiQ)l(RnK&P{xz9{bAL zvkYMttjsUZ2}bA$y+e3rf{)rm&klv{E~?KgYQ1WfT{teR7mgFo;ZWD)pKc^BWh~|V z`jNoRnaj=uvXL#&UjDVlY-l{4a-G9ad4jT-#z#YoWa(*3<0E-4UZ*O(+^x#lz&YMv zg0@#3y*BWi33j~x$Cc7Q*90e8cv(-<-YmgDUjsao?5GX(sd~p#PkmY3s1uH#{>H)L z!GiXB0=*zUJh>p^LH_A&{D%i~Ee|*^0y~+XICEopuzYQ>ws>l=)~pKB@Id|W{2SU& z5g*k5Gr%EUsQ=!GAIuZk5;$pJGKCH@`L)y`7L)q2neS|ZUGI&fOn5n!lWJE-&m7>_ zGi2HF%D92W{r11o8vK@q1Kb+VbFkx%XRLl`oawvq;|B*$Wqr-bJ&o+nVIDhOIge&* zET1*bWDn$9^F|)nWnV7VIJG#PQ-5gJa27clVtydES}0%aq0Kk=?3ZHSdd|JXrARKW zZCoxMMDKl>_pE+E4&P26%BxPjT5%jX4hKRSX{>Wrps{bx5NENleuC`fvS0maxiw*J zw53}#Cpn31M}3YtRco@2i3 z7x0-z+n;S}zXYRH@1f}1{^t2C6Y>sy|Ac&M|ByAEmx~^*UoYEdm)5Uv@#~kZ?eyFU zE<6}F-sY2XO5#OZ^SvIy$G(hm-fK51o4}@o3{~uk)_r{9c7WdSCl`3D)mwQ~wjJ*Cl6f z`AoQ`)IB(9%96#kwL~^opU)jkGgZd=2{$Qv|j)fY|J?1~rZC-sT zUL|Ikxv&X`v|*kHhxhCK!iJH0(jV$ya0_O|RlO593n}z_$29aNxYJLo`rODKR`mOh z8^PO$UOiK4C#KBM<-lGpN_Ia{zXYC&=LvGd-<6QxHCGd^JNA_>)g58p_>XcYP77^o zuNFKkO)~|*>Os$0l6UE{oS7@#t2?fKh0cY(%ulACir$&*6>L28=A-A;uke`ng>MP` zY3Dy9z3P?wYpdy7W5?s0(O56TZ|UPCbu`|}W%UPd(k8pIgzvb>IH8U3Y~RBAQ%PcY z3~Q)0!`o-MVcjQ^tZ8#6VKWq~k0niCx=4&*VRbD{3=i{1-=j%0R3`V)z2sDK;D!2I z;B%)4xSddB50mGs0?AILahtw<>_g!B1hIj(f8hD6-wpSra@W+1bM2tnq4+yv^f{Z! zcYx26d}iWskIRIk!5=$^e4+X+y>T{uI&Q`0o?75;YP1ymx_`7^-QfImkT||=J^QSx z-gdNCy^WX+o&Q(NzI3hQqZ5Ezd=oE~pZ{I(;B0=2KUr*bc!bU9Jq>v3K)9NrQO#fc{@De1=HwX<45?+ z;NC>hMZACcpfDb18Zve`pBx9>^cTob?-XBO>i97rEa_7WYz+!qdE9hU2Z|CiSv zCQd>;)%-7<|AY56C+dCmt9kNBwOv%1U)NaqM=PIp4S{UQ1~@}@W9KLRS6-BUk`9!A zu9sMiu&vKEWN)XbGZh-P@?7@zC&xTj8`4)vbfKPqEP5_l%`9(fkc0{yT&!P7y)=2# z{vO!Vg;N9RU#GoWe5Pu@sXW%g(sRUt$+MV#@*fR_3{6(?(~iSIJd6*K{Vz>?`Kcfx>$O6 z-8S)|MLO44-{?^I_VWGwcafJo&nVM7KcJ5O=b@FJS^m+!+J1}P2M5+zvfHh`_~1>H zA-6imGou8~z|>6+5y2|{LH8l)pHvy0wa8|(xi2~|$FFDk3SGp5 zpAq+tYnPXpr@%*JCmvIh>*LVj&tFZ> z@+R3gtf@49LLQ%958DmL!Nrcr&ZlkR5aZ;SE9zSmXH@pBU5@=cU3crXpkE3O^TDMa zdM4b%ga@-36Ln__IbAqs)C)en=wH_7(x*&qjn+o{)7`RVO^`(-G%iTMajS% zayOX?cf3nu!wJ&)c|l%yxhDm4m;2RK`L#hC@9AvjXNb>gdrxoe_#-p*`n0+A3BNpj z@R5>Vz5S@Q!RwcpdSc<4pm$1dkjT#sTD+5kcJSBQvz0Ln{7=jR7L!|EQ<TmQb!=?BU^gX|6O=j1-W-O!P77W=aV|RITC3M7 zPwFnF9VgCBeqQ;&^5&S}#JL=mD%KCAAK4qRouRe5=4AET?$KYqzp;;SqfhM#2nNOZ zh$gZX-Y&SPE4XZ&H18PE8=CCTvz7tjj{XcZ&O)7#qd&#RO!7jx%F(WHRG-Qfx{vXz zmw?wq+G*jPb7k8AYcKTHyRodMiGOs=eD;>Fvb0=ELQBOIYV1z+x#zM3?K|gSlPs*V zMrUn!*s+Flo(}8Ml<{yN$|qn;@F&J`|qDN@hzdP5zRn-|P+$Rj_;+bn%G zb$EE0K_|5_?%HzZt6B?lzHd6~&-!fag7KFImWQIZDc;yP7CBG3Yqzz!Y24ISg6s{g`4<$VGZ_oW5_;Rt9}^xz zR;GAC8+9a?z3$0@=BhmBb{u!!wqIfkI{xZfEJJ!Gd*wSb@EPH|dUD1B=3JA#A<;t{ zi|V(b^Gt^q^yH#;*avrVf8D^x)hF4I4FZv~lF#3?nE7_CP1)rxJfE~>f?ZSdtQ^W^ za#p@SoycG7*zwIhr)QToH98Kuq?*m265NI@CH*8D*5v#xva^L-0=fFy!kxpiRUM!I z)~>+@;yk3S>w+J!IIe%MF>}#xr&ph8F;A#$CbEzyU<;#TQbqq>`9Z(KGua4dx^mjtp`qR(>-8xHocdB%9WQ*wB4YZDIk%O1Sw#Z`W7TO|;L)QEP9M&Fb zDTlD8f!~d7l9i{{LYriXViNXflf2^>;e04O!};oS%F`A3%Qy>6Ic?fJ2$!C*b6EG4 zO3tw{J3mnh=T^~8bL(4aOKs>K@jUjEV&ipA183EdP0~K5p0znLOY9x1qK)W*Z}{$1 zIi|-FOOICQk!KEkVR1|k_Q@l9U`y{tkJeI15BUHV0r$y#Zt31RJVG1R%CWtMeD9Gq zY=&A}VJvM9rJXL?Td`p0a7eR|PATYwa1IHdm_~n74r!$Ir||y{Z8p;=ls ziweh{0(^Dwg;TY%0P^Bx>*LA@MPZGBc6Bd1yr2f=viWlgve($y6yHkaj|GV4f=Zw%|)Ha8Qv*WRlg7b`Y4jGjj_=FW zmO>NZEnm_Gh%|6TZ>E);_M3_dB=A1U}hML4@Wx9gG67xb6HnfX_~pb?z+;0rpg z31{0TzMvz7FYU#?AnCh@qIp1bz!q}3i(j%k(#UOVp1eGqagKjKB%8}ehkAVQn_XrKGChMu`( z=dkW!PGpxVhj?cidx5T-^5$-vX%G5<9impk*8LU{|KG}>xdkyJ^As$ssFBg zGPIw^Mx(6`$tq>~FIvgBa2oY&|3$z0u=#Y&DM(hku;&FYGK-8Jsk>3&g=4;zPV-*E zTbsD)dfMW7rsP-I+v{YnXGrZl?0>qJ^_t)>;A0c~wEJWe{HZbUkB{I_(Y`~Q`v@-J zoFUq}VSCjGrx6|7k8B zBDO{{9?Q3Mzic054xe49qi$k%?z_k@JkGu|;<_Im=nn`sfR%O-i78b1T?Jv|$wym_9C?H}Wo;AjLO0P)Mk7R zctRO|qs#i~n z^`wgOWp$%t%*HhQPMMjEztDfm{Kr}fn~LwrW>aG_^Y4W`dl$5wVnoC+u(2?-uGYWVoAg_Bk2WYPW8=&UWuI7i74;+R4 z_LD!dt)%w5C)~<2;hF`;$+R`9E9L}hTQn`;D^MP2&+&o_uy5MPrzJYI8ZRgRsr*p` z2Mt6pF4g$CS2n!#y7q9;<M))#U#|1}3W(flr6x*oQ7p}#gho2zNx zOKTe8DZEaHR$A*6HBOo5We+>;i}8Jr`Z@+rkH_ige2>3xx3zssc@Mg($=88RHewsA zZP~`soj)f|ws{_l=le1+KB@eT>>qN@yOHNSjrj4I1KKQ4405>vA^yw(bLxTG1Bb=% z0Gn5PDD||nCh&?HPt4!YV}DP6+O^O+yo2L3=2rEwd0?P=0B!3xhtL~u<#(tbLjOT> zjGoloX=y{7g8yOQZ`Oa!dQx+z^or`;Kc=2!KJWVVLYZ-#;oK#2k>;V<@FQM-GN+EL zKkYo~#dg?Iox47|XM``he`C)+DA~l4N5;gc9o96DBI_f3X`V-~+6!NrX(RZxhGG3a z)I67}oUz<_g%{4J@jQ81L<2pKdEO2kG-lsT{k$Lgigiz`@%7`3o1?%upC53N!9q&D2*XoWrB@o_XIXyP2T)&0?einD+EAkn&(!K*6!g&|5xZZ^}fZbUoDgytg6 zE!yXH;OANR4jgD2kEeTxt6yy47r%1WN7@YkclXk^%^L_^v0omH$Mg)~+uE`_P7m+l z{ex|PH`^GNZpL>Q#}c0X7XSV?c<0c^{Ui(?<=@h(?jaPZ}ox>BsHP#6+zS2dx^zM8s?gdw^ z(emIRdrNu`VS9nIjaZ{Bu%dr0tQrfmJlQtOs!!V(O!n@)Fh*Cg z1K2*SLh{4PDO`9pl!QH=RAI)^a+cbnT&CDuBHJ>N^~y;ygM_iE=L`_4k^4-@^Q-*}f;bn;G|74HX}PTW@b z44QRb%2{dgP_+Kem^LLhifxv@5KoHmUNMJ>TsW8Loq;j$B(mLg_KQa5Ze(a=?slQG z*1%SFBj53LBXl1f4}Z$q$F>=bhg%|A$K#=ywrS|Wo?Ark-Pb;9L%s~*)i(yOncyz_ z5MRj_&H@kp_J?5}`%?&koB=l-s+ADTj91Nm2PS%9a0B^%;vejmctu)kTX3^5Pe-{=gj1?+FxO6lmgWF6bWmLso2@WK9O91LMM+TYUi zZP9Pjv0+f~^SqO_8@?~)>+q}We{W1u?|OJMatGZ2Yd(86F*@54ot5GI^2ps2MRfAp z4%<2WrD;2de{eW^BFh}phJDmQADnUSN#(DpIUUyzIy+VcKE}doJ{R$UmLtC(;P*3p zcJOHjSUrpDG}qH_eK>f?8L*yOEBA_L?74y7hVq^7a0kL1&O8s#6ZAF|s~|t#-k#|$ z<2^o8?ljg6I;XAv;`Vm9PgVE!&SswNt*@=W=RL(2HNO|13p_gc|qi2nDwreKqSB;x!-EPl#o&Cb_cE#H2T#%mq9dRG!-0GV4($C8Ea}I7bvBf>L zoZh9)6uIkcJASzKk&km<MByL~pqxUDVU(Dmgo>ho-xGhMzoX5(O|J^+) zNcffDkxOo44H=#>I)^@8`fke=#$z8$VExvoHJ(#ay#EikwV65Cc?+X6nKn21Z1rbc zV1Q>+=r>V0r6yTc`7!*bty-FQus^0Nm*qu_FTusyIV*YQZa(ncO1|Z=9(Fph!1nIJ ztkz40n2W7upvw9mIG+x5p3yOd`#3lHwXH6A<*o}_-8%w{6Sj)+uMGw+xjpzcw!xva zlV;p7matuSG^PW`I~dnGgLDMt&9MIxt5&qvJJ2!P4C|N1z#7w*+?m;(#8j%CS9w3i z;ZMO;XDZ@$xkoh10%k~jPMGyI`8%X%@{Cj22*B5;^IAHq#eBz^Tzpe5Yf5}D(iM*5 zJI3A}-&5;G9(4|l{l!7q%Sqak-mpGi_u6f}#E} zF6d=XAzz+@4D7%jpMwozb3)I)n7%UfZ~apCJQs6}7s`V}Zfp4)BpZ|Xj?a)hkRXcr{g+wO>giLYGNzOvH4rS~|K@seTPjdf%uBU)9HKZ?1#-o_fPIc|Fe zeP6+65+LX^nMK}e{@XTuukkzfR+`7dk;pTTZP}dvp=|GI9NT@h-Ny13JC^gvaboOv z-uL!EaZrz;`vqgG%R14*H>yvtud}kbb)H=Et$GuvBl;Ye2>1G>OSES?MxT5w)+ck! zDeP$!>pSr^NJevvzsVka;v7(X?&onP?f4Tw$M_SM{i)x15QyJhz?>TYZvIwZ`mg7V z@R7db@~KU2G`~lzx!F_uY9p?v=c1YXn;NrIo`wI+719Usd|~n*t37hmW5J9va_5ov zuuQ(s{Inq99?s|b>bHokl`fF4_VfI<_A|f9voe90ORFb#m`AFl6C1(?tY`fG#%0jE zov|SKK8rPqa&0=?zxmkv47~H}mm&LSaem=Kp0DKl!+g$17UVZ2Ms`SYFPVRz;w0eL z6v`aN*kR~@!Lf>SJ|FMjwH>>2sIA02K--SLvvCeFl(~H`srtm9so#H$`a1`Ijr!9# zgCxmBX+2**8q|3d{HEDlu&MC@eiJ9sdCtJHn*58-fSK^G1K;5sXQ;Dbll<9rI8+xc z4e+*jlHD*z&tJdr`)Rwi^jK9iP38}?bS*d0l`)wuv$g{$yKYpj)cMderu(Te-PsG~ z97ugGvMie}G3ENf1U8+6gv^K(!J5Ht3pE$!k>k-%LNz zbv-mhyu)&6D*e;qhWv3#y)|$R9Q?H=_d{YD!gIu$?+yXy5AdzrPpnDpJMV=4j`^k4 zkHnVpyjUkbBtzY5O{)SZiL&O=`3@~kV*bL8ux%ei+3|CjFbk{|3R zg2xTV;XKZxv^c%{Bc~j@SX|XPaLIO`@8L5d2BU)(=j?%f*>U)=)^<7h|uZ8UCL@LMQXcnpvH=GwC+-%YgqV`zB& z9{Qg{|9hh0N5;_bOVo?!S7aiq@c}IxEl!Vx{RQMBevZz`3btmLkYnQP7z@1DLcVq3 z+x&cQG`nCAFzY@!(LlZ(;w!CuoE_K0m+fFbOXlz;WSjdSW@|UuA`aNXv)OE^{)J!z zAMLqyAy0ka4`0z$Hvd59WuiBI+A?rcy?7pA0?R-$al7 zOMjESA5&vPu)-Urk)`(RiJnW&REPQ&7yZ`R1+}YrL%7P&4L+O@U=&zT33YI`Dm z3P-&o{!8B1GVe^HY&GBEnS?MOXNqUze>}YXqCKqVX*(U~Gdeq(w_?1+C(h-Jz#({4 z_FPmp85kJn;d8Iw@?T~64~NQ29g}A=iSw^`QJ#|mIgwsgUYG*&XA%1VJvh}xHcITB zcv{LvrY+41Eg*?Z=r)KWng1KjOa|$#a69pD%oGc@7yYQ4jh! zCFy^y+x+^?4a&>P8ydE$w#a`1UBjFuYImaAl^g<}Yg%jCr(wRRvX6{gMthj6Wfw9& zI76&{#4GlgE$mG>MdI8qEzottdq?w$oDD5p>~hEVt9FeKT(iI}K^~3!+o4TGV<%ka zhq-mb*nM22jkZcpAe{a=fv>X=;-`6BYfXKp%>7m-n1Ae=6x}f-IOS8*S{1n2+rGb% z!oJmiowHJYkG9-`Kt6zzorTqF66V2KzWXxR ziP}%ndNhSxw39o8{Pu~2i|;BR=0JHvtQz5|#PJz8H z_I#aUp+51lbf8=o!iRpc6P1gDchrBzTUB+-%EiH%58j=4VLF(2oL~)cQ7#VF`kjso zy!rb^@&uj{1yt{B$P_p}Z>g>yINQhb}7_tbm2hz) zmd%4p8!(|u`b-I3BDf_7!0y1md|CM;EMKu(pdE9$%^%T04v#!JJaX%TB>O--TctDQ zdS89Q@1YQ8YcDNJ%dY^OmLJHlAt$S1K{TdKDX{rA*bC@P_@di`jMpgt2LChskMnFC z0i5mS>nvWP<>8XiGPDGC^_TN|Ysuy71~atpLYq}buM5286B@Jge*LPp@C^1$U{gQn zl{))uHm`>GFZ`8PgEsp_rwq2N)**3T6P-1ZUnrD1Aou9ffu!^Tzn!`G=<>japS&+6A5P@Ec@$;Z z>bX>qh&g8_z?Tfk}Jh3=jiJ$=bP1g-g;}}9_lKGnaSgqgLi6MzKS2R zk0ajh4}2}iTBf7DUda{e`=vUnm*)H=-;ROjv&i+>k|*~q^PcWD&^lc(LsCx!)jrvx?M5zbb}Q=g-r~$Nl@8?bDzeV;DX839UxbyE*6+^_N(dVU33^Rv?aEgS@Z#vKhAD{%-iW zJAv)ct@2G(8gunsRL-Gsjr}P9xUu&#)pKL~cP{d-PYME0<1>%XEPQxzncy@P&+745 zZhuN&>qq#uh;iFro7a77TDnvA3i(satm3$|cuKydxoOM8CCY*IU^vfX-(9Ah^&y_7 zu-fMSQ9I04TB@w}|{z``o~+meNv<{ZVtwDL@} z+&tmd#^z&gZM3<>qe1`HNFHv4h9`K6**(+RG;Jm3A)ck7Ys&TO!N-sn;MUqiHqGhq zC{NSN`KQ1qhGQ9UNUr0)H2%1Y{5cjj z=fd0)XQFcab?g&|<(E?49+hJ=WOWaT>VJgt zTvQGpvTjs@M|J}W9egK{@2Ka}!9RQ^87 zr$*%mQ{EMopGvvIxcZvbSA2g}c9_AI<2MWVaG#jp;^l@yxMmQI*k2k-M)k1&Wc%B9 z7>7gY=y#Ui=q6h(ULrI0w`@elgZ(YO!4v!2bVvv9dEdC-!WZanx1p=L$fG~$wPAn% z*M?W{>DK22$K3T7jyZh)ox_(-+&Mf*faKA9{|o`?LF!FL>O};R)?KhyR^A+a|s?{FAnw!#SQ^L7V^oUh^CGy#JKFX5H_iy*uNh z4>->f+M2R$N7tYWSbNSx&uYxc=0`r}XVEoVemJ~+2D(?jW#ardbhF%tTmP? z^WoV7zu&eYn-^KM{<*hfXS5ZyKKYt`=Q`f`*c0Ji%i)X%^e?`y8@62>l@asYnW>yy z(>fF#Ii!9s-bSCap*Ee;&9&A7c$B|bD@E@h@7bi;_@6DF=9glXHBEMFZY#QVVV$!< z^R;)J%Js3{-`c=G#yLWAV_~PVmg>tWP83}5X|T3t{Y^g(ap|P7YCI*`2Uo4VnL9hX zJmw(!Nb`Qm{0r+Hdv|VD&yUjcB5UB1jY}FT2xgzQiZ`-vA^*@g`?(=7)R(milZ<8p7^t@I)yqJ z@;0ge$q_v9GjvSW;aR4v4&UL< zH*8I>c7i*uQNSK)ryZwq-QYOdX@!QOaopFSa%y=u*OX66eEU1_kp53qD%<22vG+N; z$WVzhT&`?O!C_T zROEUNxE)V@VCicoM^&o0&h9D4xvE~mUb-6Ko4@rt{;y~8XKH%%cU%^K@2P%sS@ZAs z9eu~;`X2k4L4PXS5y3bw>c6>O+?L8NLecBHxlF%RZt~=Pfj_2jL*uAis^*&2b>%?R zeloU^a~8HqA>ZG?zGP3iTX%?A`aZ&c?HMc9I1L?-g^n5M*yX{~DBl-3#LSa?SbvV} z8$ZK;+gkYJ<`BUo+3%uX#h8C6 zwCU~sl!GplAKd}I4aB%7O8!$?o3{ds1HBGH|2fd*GTu==2-I1op6bNyT7Ln4Ekj#v zuJ0&cj%X>IWJ{}U?Gxay`l{A5!dG&B2y!l14x+AO*0TS|d*J<$#xZ-2m!2B>)M+s* z`iq95nf!PU1E2A}-IxGQo4s#WJNVYvO_kR-_Tjg9y@T%*xwXT+Ui5E~Jx=y|hoW-f zw*oj+uD#xEdQbF@`uRNYO{{PhY(VS!^)=m}tel`4`-)8wzNWxh5?E#i;k^f%J492> z9hT>%Qy$UxOmLW-1Mdp{0QfgE(0a01?V?TQ#F612Lk>vEufrL`+p)bI z!~Ed;)t<74?IIftIu+=Xz2Biz%yzAt@qt%E{*gD|SG(+uA%E)sdfG7fza*>Lk4u2B z_^3Xv1{V1w-;EzCWj>wTr{Y&V{3UvSCp=P}R6)6T=D_FfDzw@tczSZ4g+uUs8yM8L zWY@|+@2anl(=UJw%|w>N z@bfd+@S;yF^NXRq?h+S#=%}pZUVPWwFWHeiOTJlSc21=Yl?hLkNzQG2z38`VnN@%t;#Rx-$UcBZ?D+`yWT z*mp9$x!NV6+?@^Xk~zI2n%qfy;`P()=Spsw8i%H$uewUICx{KN^lKk^WZfYBpmjrR zSMcA?2U?Hoebv{y$g#CGo_jH*z5G~`6WK(kjKI9OK9}Fphq6ap-V?l%1@Tq5Fs`x3 z*5Xg3ecI4(!7jh7WavZGKMsGW9h0<=&mL3GpO7N1L*qhgBH94vErahwH)|{$!S@2{ zUP(L?{0BGUqk#)^k$UV)`I-}DfA=&FreDd>UEskQLv78M4p6K?s0);bA+n0A+cmC(}!8h_=YPMs3HT>%E%Y3}dYs3HH>>OU3B2UT@xV>Fu5B-v|?T`{1IczKlQ8Op&69NG2M5v&VGQsZwEO&xPYyM3|AN{$qIf*kF~ZB{kc2Dr&0D6;Fk`VQ5e_2s9}FQs82y=6%*ueUR&$%vw^9|f7uG6+YFv} z^NoBwr}w4TuotSvf3)!v@u;LRgAa+d=V{U(T1&DAAJYXt(MY_1H*~pqI;VjU`gmX8 z+Dx8b?Pk4@0FR0Iq2>QdvB&O|3hZxj_PyjEEj=U|mCvt*{#RbLYkSgcc&UYXs)hAi zk6Hbap2-)CeK|!oq3X+ji9J{?NA+7dBiTy*r0QFE2f$JH?5ttmevNFOs_r$%7O9FK z>@Czk$C!=pr_ntyE8v0f-s?RuO?SXNFc&(Tpf0*CfQ~8JoB@4jK<940MQ6p@x6<#v zzNt_S=G8VZ zZiQ=we#FD;1yeELj3PdjqG0H@d$2d-OP36+lnmJNPzHF<%7FB#^82#?{M=hS=n78q ze~TX*bWdL~{BP-sT%vP{+g((puR|mw<93q~!58{&)5u61>+tW$<<-;3f+`t_?gVm5 zI`hvR@`XwxLBHxpXEW2lGZEf7ZXuwJD*jz-=i_TYPIR7ZLxZ>l^wYu#=uNx-DEZV~ zMtSkv#{2gKJJ`z-9y|KK{*vtD7VwhHyu3pEpg(jK4bD#^Z%%nhK#o*&m8W=1%%{^? zm#lsXpHL@u5jrNQs~z=ipuuF9yN~!7SL5P_#zcO*?9GLDO@)2NcwWGnjX!wDP?Ky; z86#Wbr}ok}M0~Wewg)-OamVwX(1n~m-onYJ^#_<|_Pd%LX zSOd?V;`8`^%n?IsL-M2VRAEgc!^grwTiD~+bCLWoR^9^+Gr?i3E`6DE^;gV#WBGT; zsqr`)s&Do_Nq7wn_kMry0l;y>p>Muj;-P+TyGh#JHq~KtX#Ogl6rM5S92mJ$iILsb z#+eA0IT$}@Rx!8Of3uI^e-hfq>jr4UyUr!x@IOW)87>WB2< zzs|u(&gD-Oz0i#zjVQ|o)TiwFzg!(8ru0IqGuY$Xx~%xUl+UAy(P+Ky|!{!%%WaM0=Mqf+J}oDO#UYBcE(p@ z`d>=Z-kscqrv1jBvQBO{KfzCZ3+vTF7!S{yM=|9l_k+Rnp%XZEUJ=#DMjk4omz+HQ zWoWFseiONG4IW87@&}GLx$h53R`njfT<)a;SF;AXYu$(pyIUIXvhxMM?LB0`70pMJ z&8?8=8d%LejaPj=a~OK|)hUiy^qXG~=N3BV^`c|$;S=-b@6g-MkKs=lcpYG>KqF|D z0eA68`yfeA-xciBagE#YlbCXU<16qZ#oT6#upXHr|<{|2z*slFb?R75Jy6OqXAbUQvBcJnIn3F`eeXya7IqzMnulRND zO0OX+LrWu?F3-wVDY5|}$OoRulZOks&9Lt<9}rhI)jhDGJBi~s z(C1Dv+h2DbbLy{tJ$(Pqem%T?@Ylo7?)>%eRLbJ9r+Gy(fUH-O1;3h$VE6!dqtC0l z=U4Lov!DNZ*tliJSQS1=2K+h6oIgjpWn_*j18>Q%wtI{V+TwXNQRG8At!@bOZI2Ca zuN=NAsLVPfNVtoFYmYl5Nai06_C(%y-@D{p-QyUs1K&MWjcUZt6@wy#c2txWV=ALWENqzBc2_#J(F& z34DBOpu6yY??I>4+VCSy0!OufCAP;TZ&TyF`<_-49c3=Cp zyNYR(o@&X{N4dZ8dBs5>Gw){3%OPV2p+lzRx_Vm*evfOG*G$Q*LI!z`9M18oZ$(DW zf@i|d70!FW4+qH`@w*57{189E@yu%QLtaDtSkKbF%7x!|c$T&Jr9=GKqmJ>*f?tC6 zoAJ|_(OA-W(O8n-X0k_o3+)TH$Dzv?_Vuf)3tn|C{TT3>uC)l`(<$-czs9M(_rZj; z5{)dqkhwJP#Pm}9iS_$1ezXTb`^h61dxs+zhaJUOpVNzO?W)a!E|Tjzp>vy0pB4B9 zKT;mQ^Q?2>gWBvRMo7M+>vJ!#b~rUi7N)Rn!ZxCR-N}%mPJ5-l!5LD{e1!8%DbfF* ztGVMwb@Z;<(S4Mef?qv4f-kLp%H$x5V1#C{rJMP|biN%}MHlwmnFmj)<@X(aQGV2D zjC9Uy`p*I`>!QC7LT<5Zt8Q*iZ>r??n1Wg5y31=B&kID4i3fAf?DF2~O!QWBzd9Gh z9k{4Q#w&8DcxUVf#(~yK%=fWP5#9MbVUF2;CkL`J2RI}4!@r@|<=fR9CizX4k;N2t z1bmaNmn&*MR(ogG+w1WGZ|UREJuHG7v5N29pA(& zt?S3~s;vyKDj~0uc@MkBuOIIes|UcZ<1Xz5mnBK9Pldzr*M_+R9r6iTIu+kYr!??6 z@X_i?*RMW^49P}{`3t{?L_6&XZ$Q@cOzlaJ>)!KrWPcob{TRJdR6C36ebhrPhlG2s z#JM8yZ>~R$d^h-3Ytok)&U9sR(xVIO2LpG8F|TJS=1-mTo6P%8JMjnw;9TGzB7UTR z^MO^nwogJIpo5Ul8yYPpESJ8ymuD%n7T%z5yrsk@pl?c{K22553^Md19w=9F7P*nn zTe8>;BkR)XDd2ARfvrdW!e#Yt{6HD?NnOc?G3VEE$dUsOCh;x)YTPD#?g%NY4a5Vr zZ%X76G{_&YNiL#s$=*6JM`aH4F?@T%;?Y#EmHAloQ+_SSe{U!^mj2eae0LT)$Im6p z8SdE(`Lj%GV0*8O>DkmEZ`7ubwH|7obC#XQd5|GywFGUmJ%?y$Xy=2UjTnkdLuj&CEt|JK#8WxrJdrl zP8^XzMx=tEhzx|AA~NHy+SzpIFn}{M2hS*dKHs(X?yhcv_j8`-_dLHp>Zz)| z*X3RBdT;Ap?|N6dxE(#PzFY+$(PPY(}l24+BO|Ft*Hj!sCA| zK3BEi(@AYPkl^#fe*m0QV>lCk{m1Qvea@BnHQYvL+H*B} zmR|HZ?3R@Q_FjHAb*lXKyxK8o?_QfDqey!h@?_u>Sdi|z%Af7iE`H{frK;MUnD(xs z&1&elS53%yeAkIlaI3h_|BcVy$sO=Zanqd#mj3jm!9>RT(4HUDUaA*e<3)Cn_|578 zc+T!^Bmc3Znv3E&DZoqOIqj2+*H{~e<(Su$Pew5inexfaO)>q=)PHkZc;9!2`AsrC zvD;QrN3!_^D@S}Ad#t@S>AplB{`!GoT9mGqo|e9ro^H3i$KE8inP|~%wRzChmoh)| z$1p!xXmQz_pP9=1>}TxzTgP5I^w{OU*4R_OW$YImV(gnojs42sF?QqCf<261`d_|^ zWZYK&d+2{V{`U;oDu*6_j~)Mvc>IZf`Z&hFFz=Wu4zt$rkC^-4G5)*X5y_rI&HbUq zKYH#@`#r{AX_^1=_{tM~{i)WmV^_$gkZmEGB3;<#J6)Q4#*R%s+Bg57%-h1l7<>AU zAN$%raqRiuW9)NV=Dv08@A?zQek)`5a*kOiee*}s{Yih|*hka-kGRuT>s{&CL-D-m z{y+ZZ)#?7_AF2KyNB654|4HqR*(4nl+m(N)%**_i@&7(D?=!z+{NH5kvSGyYqh;P* zcI?CJlSA|Tp=I9NG9jc_(yQoO=~J6C)UD&QTaFJ-F+JKt>~69@ z-6W3#epKSHiD5~X$2Z&P({T-9kBSdJYa(ky-Y?e~LR>8?0{pR^>>UquP>wT71CD>9 z)=)v#eYijC*8A1R>nWRc$(QAEhKKwK>GFi3cC+Ta_?grecRrNRdGw#*9%1ZMAKwf9 zhUI~LtmyjSL3F)C&a`SZM_x8B7<})zV0S9zr~P!ZT3r&n{N82Q#oO51dcB|ZHu~+A z`_LOlHtJEE%}lwvU;dmlZDBX`j0L~n2KFp{W!0B*sTQ<{zZ@SRImW=f@R00n3zigh zbQVGRn~9&zr%Q#V#c5G-0V^8%?G%?cElw%txXYFAzf-o4@O7-l8kd!iHA9>DSQj{R z4dY+jbmn>iHcqfDo%XdipQ*2FR=r%1_Onfmx9TP1>pOjTlpUPJ)(g+@65!p zN!;74z5X8MRF5F=4E2u+$O}HfsyJblW54@ALR+Fg(SpVeZRJcI{P>STH}3_n z#6?(|AzvA7Sh}HZhdIL+9VYViBhTV` z;jB>Obq@E1lMhWcm~@`}o7XEYrb;>J1eX%@a!!D|A)hns?bWNq4y|giY+Asbyn(AW z`PQC~_Ug_A);;E@jXi}Y8YV{@Q?UKU-Sza`EAD#Q6i)SZ|LfpLcR}uR`U&W}e_i64bkA^n?o91NF#)jgPlfFblNk19- zc}%=UUR~vhRsL0tDOG$TG#%Z@gD1jNrV8Kk&Hd`m)HRLtB%ZhQpQe91*0}%0^q+3& zzrE7kXrBcBS9?P+w2n0m4BrHXDcG$`yyzT`1MkdcPK$h^+#h4Ghokd*I-jGnImZ2I zzvxYI3Mpq}|0BmW`=`07X1Z%f|M;WVH`3GB5iLNx2J=ST_X6Yl&|l>9gGOvF40vq7 zacRS6Z1kz;DFgi=W3-PAKh1JVSNW;pPy4f_pZ2HOyB57F|AXY0WQ*!aZjtkC*H~|{ z{jal|qG_HDq?yA{(ocs+yTIU7-~+`>Qg1rX^?L{|!B8aE7IvwHjq{+;GIE0Fz@<9I zU29=O-p)#U>-|*ar~Mi4r~U1mL$JIQ$&OBNL!8hMY_qZjVB^2WXW=71pw4N^-!9l3 z-BakU2cOIB8HTK1>ju_t);;97zS8e-xz`(5I2V9kjpFaK#s?2o(JV59vdqUM))#g> zjIG_A;!k3|FJG~I`kBHeU{EgQxQxplV#&q?4~(S^+D8o_^Q)UUU!-{4vm0+??$Y%q z`ZY(Qk2YXa&W`?RUPrT-nSd;?`(>P=-_q>RzCh_Le?WB3*v2y6LWzAhPrr!) z22R~q0Dfng3hgql?dqHP6#ROp*ag*b%=+fo=se4=ta+k83$C5g$>7N5(HO%j$y9W)KBzH zk$V`>V;#&Z@pQXPQF-P{jdU?BzMCaE$U-zP{7AmC_(g9#ykD}^DL%Mg_M!Hcic9W1 zu;^1SBTH69dvZmUUlC+g-hF>FRqX0d6}^5dqi9EYyR@!NlV6LN`Ml)5^cnI!-{C*p z4}6UdKj?Sro9SmQx^SY+x7Fb;_B;G4d1AVWiBaB7#Yj)Iv7JtRQPc4~(OA$=J1Os} ze$kXuBzKk=;6EJnWOk1M~c`J#czaqht9^1nPlpF!uEd-;^KPnCDryNlY3bguTgRW@ZL3(UlHar^}2W~b;-b+q>xwUOjx zTpkcJv7qYbg2A!lf^XJx`=HkS}ab4 zM!26>{g&7>gBIx@UW&#--nY&s{ib=ReJS#X4kWf2IzVuMN6x30qVgE6d8ZN zvR~(%1PA;`9P(q})aHU`?xP&=cih3+qpUU_3;#*K(NWl=;QVaLO6IfQ$N2W>ot?AC z!mrtKC;PIS{+zH}+XBmu-vY~)7Fc=?0gK>tfZ1VOZ*y29R5ZTFL*qp_(%!Ge)K);( zyS2tLl>tYVZq199?vQ!t%*Zy(dg9mUcfwm1_w<9^MqSMtGM_jNwJTYa#Qdoo?Xp+6 zFI??D|FmMD;N^*m^Gn(SuEr(g9g1PF{kGe7*ZFEobC@=hf^5qiGLPdG<0-gQM*hVi z-i6LPICm@>PZ##0FUB{Jzn#u_LkzPQ^g0f-cMN(G9xgrz4^Kp1-J0dUNtYgUQf4wI zcc20CJUNA5TAERRlFy<=<3evm@#u15fJNWL(W!lOQpBhG_Dinoy%WEeEKZxnzG4^? z+-Tg7z8u9q3LgnA&BT6IJgwe4W#))DTIDmCBwH%Z#p{?Kqe@B>Q<6#G;H&6*ota~Q zmzd&O;JWB$o==89jrp;iQ^D#KHZ&dH)sCw*C{K>Oo&6Kc2I4!`_d7-~+q>#GH}M$$ zO3_WsFVxZh&#PX+X{P7y?0>5>0o-X#@PfsWm7(MxoD}aDWsElzPrH1+Ik@M)%#iNX z*sg*uoKm$ZILo|O-|%f-w3Kz3*ClrzxZ;!2gS9|&oZv?~S#-5SeuXmUf-K#Y8rXTW zWFrbr(RSsNysvQfl>aYK|7d>8W=FTc-zN{1Q(qrYS^CMEd$2L0@=1RV{iSFxRR$lG zTEi7>dh{XP&iDmC@i@d=RINN~MXRFqZJoSt^pWt9bg1+IN)H(iZ$h&=2hDmO+>Vdr z8oF3`m#swZTx>-%P^|;QbK{7|Uev7Cf#JDjfpU9}Ew1rX-UH0-B<1AJ4`M?==XuBd z1#K@6NPC7WWRWkPHO7m-hi#O^7b6-S(ofY$_G&JqpB&cx8DOwF3R+C`6m}THI1~F# zV;QNh$S2B7n!SfCDGGkp<~OTuk0D6|2J^%zkL|a zhJL>aoKNaq4Cha5xd_h0PO`jS7L5~EXxB6ApgWs`1N=y?5&K`LPvTtG1gCW71oEqmHh0cVnN+@4);rpHo>6YYue}jAAYozo<1b`JBTHALSmIuKATO5Lo1+Q@$9T zXLI~o@P))#fGnX9f#Ycc^MZ9i+mR!x-3SzB9C-lqzp?Zt9_0LNm zg)IILMDU7ZIV*xM z^E>5YGlGwg>vvVp&k1hece<|UtAeV2`+EM~U^l-rvN36UGrx`MR_TivrP)Ij-Js1lNzlPo+i{e%Bf}U4Fd3 z^?6=nWG&zBpXjSj8k~)>avK?5p)Hbo6xV{}%5<0uKH*&S5d9U0REEJARdRm>>FR^JP0Of&UdZyWYmn>KuhrU(!6m-4vK!Y;8NX zC{BB7J(5*|Pr6<2O_@CaqnO%~^2f}Q>=liVg(g$ATI0cA4Z}0Ft{85QvaG#4WP~5Z zsv>`8Euc-k&wK0q~!PJDJl<59j<+URcQ{2F-ByE+XcQ^R$_?r|%-j3aDDv$8pWpm*; zg-7>Wo&)IJR5^70{Odhs8EpWA>(xKdr= z>mT(W9?7w`lAOGl|LbtxuX<*9T_PDO97&GB_no~Sc~U*Czm)$-G7jE?_RB3gRO3pX zZ}~NMg}TV&0_Q*7tv>glA^Rd3hTO<=)|oNsnPkJ>A$>R-JxISiqYkkvYEODHDz|3D ze?7mEc;NYbF65Kg6R8&1kp+xnCn9NOTK*ulr!~=WEpW@mOrD0W2M4<=dyw}H-eI4-I4D+5jq9DQy~$lxFYu0jOwtdsBY1)NPVpSy!8cOU zCGdq)FNG;UsvrGUvIg7{R`few17@m6QWjlX zjn=9k);s7&`7=If%SCIz$C#^RJ|x?mGG)L8egtTDoj<$|yvFuTPSZ)5SF8ggS!!uF zj-N1f^`$X_2kmkD%yV%!ZM>IfD-`d59$CS9Mso-~Y2SoB8t91io@~4m?RqbnpHz|d z1V3~oT2g)MS4SRL+VG+n1jSld+rLl?R^vlT_#^F7muDBZJWJZPyv4hW%FxG~R0dtH z`B}86HeR$en#caa;^)?Wl>uL`y1x6E$`xqW;=-=GPItzy_jLy@dpYuLL0^lSI+u&x zG)w(Baev9YM0^6xc(hYiThaPWyrJ<8T79E4@3*n`>6$3}tCbg$n1QKU%haeZo}&%U z*|LV9-EuTf*eUsE?Vf*zm}rf+sCueX!SCl>XK{dyns0Y{e5bOp znK6XE)vo5Ho$0qxM%vFM<*v#Y^N70FV@%~+8)KI5oDL)wR? zJaRMI_pJppfZNcvXj%9eTAz5t<}BLPpB>t}KFi`FSzE-om}OelCKea=-Z5}N&fygB zILr;W!;X^-#s&oLe}T5rUAy|*TKY(EahN&P=gbTAs80{Ph`r5kavC&G@HSdoS@_X8 zk|`eR`nvY0mmp8imdxB`?PdBB|E!8}ApVp++)*b#E9IE0U2kFBD(mpA=NV@c`Q^`S z9Oi*Puh!kc+-2s97FJk0R(5XI=h<}5*mZSt{{?lR3*&M3Gh<@0!Cv9c{n9UyzbNt* zs2#hHihn`$v<|)ROuwezp>6pkHIK4mWy9)Bpx}`WYwMi9!57Wz+*vZlhpCT0)!MbK zG?MUVssxWP2F*RV=l3gZ!Z9uKqSYpR2mEGjLhg5w??!DHBYs?;C2!mBWfQguwz~RP zJ!``_lC`qev2h&qk(0<*$wsT65;b20^4cTc%5JUpJ8Yyti*YmIRYdpx4;TJZkq%I$G|cx;M0FQ!0OX1eb} z$L?&rkPCzUl)JXE2-?cJ_k^`KI(})v@Uslh+&56}?ZeN~DmR02Z;PI}6C7XlQ#{{y zbO7zGX)Hq5IfZ-3SzyTDVw&Bi+E`?;EzLc^1ZGgnq`&7@zkHDVjssWT z_~PJe_&6?f1uh49_Cma7NtNOKcs>rWc5W+_n&`dh%=N4%X+OD(LbCA%^q|I6wf&H< zj&W&x$=Kc$k4pLyu!ft)VxNc`$PcnwDec{EyMSL_S$!+2)tc{DEG!z%A^Mm(Q?HaR@tBTg5 z^P$4)EgEk;UtfNe`688z)gQ?D{52!+A-)!U&|d7bq|GR|8)slvLLVCc;ow)}erLUALBJ1a<_q#fxnA!kpfQcMz#B}~#V%1!&?ubk>@d@XG- zU$JZ?mlU?>3lGueL(ACziQ0U}A=})}nRB(dmNwUnfT_SdLkDr2!4}9hBmj}cT`AON}SEt*&zKt>b{x5zfDpmd<`LA(~{xSV4hdeTOK?281tPNWEP%xzCWdoh9yr1&b7#_*y2U3zdTUoD1_8zh*-^RI; zzkK0}7yG8|20zi=B)!w{AM;ywsoqtQJ8k0^E!OY0>D^6Re%x-WFPP)b?c-FwG@`uz z+wzVV*Q++;K13It+Yix)`m}Xg`s(}t`}u!^xADx+9MQmXJ3rgzXijb3ydfIU+zidV zTWu!uE-$CIMl2;Qq3ZR5S>wmECUIG@Kz0(P{r@xD_uffPopzp%2{IXkHy`; zBB;J!_lhZA`|5D;+>T(^U!EAGJ@WZiw9YCuPQzD5ek8jtc*C@4eR2|bYA5Grp-9dQ z>|DjXrj_r}TkPAj;)T8V)K&+w^(P@?kPr7`%QZ7D>(;{FreYXWPVZDke0Bo)BT~~# zO=9$TXBNzKky+qxqFHm`9&+f+1O}bgv$C0b%0VNUiC)Jp+!B7tF~ObaEzKjo)XtRS z2g*g|dRv-0M=M$|9=I&wt20ra8(Wt?ID>nDoN(dS;`+ebnUs;76C6pI8!7WC;k{7C z=DM_T5$okhH)^hjbEmeWUz2@~%jbYM@JXDrVk0_-^GL+=a7nG9eU6JLL!7|CB!b9q z{1tnM@mXW(iwm&}oHBh>7%#Zc9HmP9j_m~Hig6fM*92@*=2$k1&glS0BMXi)jN#+J z4i3N0Cv<)_A|xri@j7CR32lCd!v@3ZDxVTXsD7bxg07+p;IJnw$9DoR)XUdu)wW zHuiFe&#`qAeIUNU9<+REjM^?qR^)MF3}~K{XIkg2JrR8Ad0Y-T(5Zb0$pO25Dyn=W z2Q){?yH+`%Ig&2}`p6H%V300_>lS5frreEe0K6#hdn!NzrhFTWUltV#T=~cGe$Du9rTyz;!kIYjpo@! z@Oy^b^%38*pC{XfKJk?eB)P7=@n+z{LGBJTkxu>q@ULq5-@*H({MT3#zn9Bc&Z$F} z#G2VXeb$T}0f7D zdmds|pg+-&W9iA}FCNfcmcn%gx!-rfc4*zkUUzOgVU7K5eR%ML$_QqC3nr~668yYK zZpK%|PqMy^`-aE!myU&B;Hwnv79x1Sso<({fDK!#9>avMr*GQTz!Bxf+#Sn#!6Z1k zQhvY6Uo>{ue_Z#5_poMboXlA#GVzuY>!gzI_l$BH8TNBJJo$0$J_d4Izf1PM=OW{T zpOACIWyBAzX@=Ls)40CMSsHr*-7Jl5hP?=}F*= z{OpV=pY&^e+AkaP>~HWA^QrQw@JY@znzH;t_>`*1ir!jdDf271Cehx?O0@22uc-fx z&DHdOl3*-}FZ=qv)Vc$?>N;|z00((NXIuFD`ZM0f{c5Abt4@}zn}$C`{G(jq*sp>8 z28L&U%?=+Q=?~FEWOH^ip6Gw%a~%F(ur}%N|0$#XD<8Vefg0CWJFWk*l*Qk^aXYF;s7KJNlpK=i&ZkN2Hwkh(7eJhHh-_cQhO-vo8(3DPL<#pC#v& z&(io4Mwe^-pU5!9@l>kOd?hk$hWknxcHV4r@C;;_)~{!n+JOg=V-K3@fsi?x7tfz^ zybjGJznj*7d}uj$Gcxg(9~sl}bL12sWEeJo7y6#F6ooZA8Q;~KTRlH2b%}kR8TC9V zUub=fKYL2N?o<6qEx*;a>YT^@k1X9s5zTjU`RGiB~XK_*C!_Kn-o*cszOzCZDeo@Zkm z$Odv;=8t)L6nUb3#g4MtJ1fAjK!#m|%_QI12JEC#<6>m2#@qoNT740JC-QSl+^%ef z?!rjhT=H3X*E^@>9lF8pr>nh;`GRdXgRL!C|AzJwdtU9OrZbPR4RQhPCGXT;^6tIJ z;?%YuQ10A-^gel~d}Xx$owEa)SRVW>^{yEw`WRmSuC;bgEjXOH?kFNdg;zewT>iPn z+EHHH`~fmobD!wG#Lkl~r+)gzT?5Znv6(iEpkc+Buto^j$Hs>n;jxY%}fe zCGSP!nqL#Ym+h?i`*V$l+80sDB-Rneq;KtM< zo;nmSsjXyu$vXT{ykuPQ>vH+5?AR4zr~;7Q_RR7P`xth@i46V0p6*I}kI zV!Xo1={RHv!3hUEKdy=0tq;D*9K7I% zKxiTx$?B9=-RO$ut>9>IwZXn2FuGf0}ZMo=bE}W!&qYD-}vsHe`W)S^S&%=eWL? zy_|c0u#1^qKlm%wE{WgY`b#UdM&)Aao3m`*)l^;krsQ08S$QSQBHhhVy&_7V&&61JCMzvfwqB_c=a9l zI3%CDf>PnE$&A~^wVM`U{Kj{#FE~Vu`s~ffzHL@{3(q+#YgnM@RICN( z>HGxmW%J6eeH^`(*pu_1|J7Qb^m2Bij15?e>b3e=&(0VXrXW=G<4;JI#^iRk+o>ew#S61Rt}(#{(N8e58br%CO&1cu-yWNXa!EDnGFHw`F_loJ^ti z*>D_Zx1x2$wP9u2>z;GGT2L;Gx9d@4ZDS|-IAl``UpZ{*!PnqNe(j0D5tP&WEsp7# zB;SehZXkEik$J^zB@Ps?%QQoF8-wK-SJd2SjTH%1c*CcD9yJ0i(*p)3$sPi;{AI9Xh+0ee#H| z)t|kmrE|wP^%vRKNq@?VL=M;HB;xthXNR}3StyQYyqqJfI2O0UbDH~OERKudx6*CL zud(#bQJc{jbd^h$_yDi^*7(N&w_tm-xW9D7Dw*ZR}UvIt6;xQ<>*aK^GJNcILhu?J0b!Y|S?GB+ z?-TU9g62wSGhu@Ge2PE@3q#KtmhqtFNRfQk$(R*?cn=s%5D))tYxkZbT6J2 z?Tbb}LYd?}d^h6#`(vCWbJ_#_>db>+B!*@(KI+NI+-~As0zY>0WNceLv0T9ik?2DC zAQbx`_{6Wm+pFFixBE0Z}~G(v92;~z+H-%Z_C-W6QQyH>aoIBK?^vzow&>uXrgg0rDLwVP%F z>vxTr^qpb=)=i$&#GG?b(3;z2h*<;ar*<+$349I*(alK+)Vpy(* zS0Xt-npYA!Jru1Z?^W9YQe1%KTdh_ndlxA z`Na=~YxZ|7@5!d^yFz<7X9v$Qo^4kM?z8QkWg44PKBY-Nj&MvI?7)Dja%#Bq5r@mU#n;i|K}p7xECF-J%a6%QyfCUZie31fHu2F&^Utj#Ou&!k8$z; zPrp0pp0=i;ep=Vpqwh#Gb?DxFhV%2)13C9IO|5_Dd&&M1cO$|BI#W9{UgLEAkhx=B z62-EXqIjRBcjh|wZAk&A^S~Rh6K>@+^7n^h2({)amkZ%#wv9e}J`?)vTpj{eOmiV}Y>+_+r#D}H$!x_&2*8yW@fW;+z z7V=5fRO@0`G|%8LZ||tnIkbZM@3!kYwW)b3+p;`ozVvKm{6C3B*ZCmrM~#%FYA4b) z*xplw&qKYJUH+XW>sLOjfK?vn*?f2K>1Cf$@mcEgLYI5vT*YeG+=KbDhb$}Bf>pFD zoMj5@sJYd5!9n6j6I|Y_^eY|Miw{9-*+ap_y8~qGGQWhfZ|8dlpVfSleIEG{#BfPf|yI> zaPyQ~1`n1iM}#-p_^-Zx`QBM*b=KnlyC;mKoepzEcsc$V&Et`jI~KaQaCu*NA?39< zYH7)-jzGA8&16=Z0pKIN!{;IoB4L zifmTmFOOxfSiH+~t(okzujy{YBf@NFE%+HRbRCtpu)=@&KmLYxR)ZtO6Y9GIdb^K1 z2yMI@GF)-wid9_gQHRea)<I$=X3ao~6K~@oL>A{QvB$g9m5E&(b_oo69dT`0XW|%=2CJsX9x5 z`@xUs&aYRuW0H3xWpgbkLo1r|e`d}daHu}iw$7wEUHq=I76Q&_k1HB4=d!p9tiYN7 zLO4UTR_x`zlQv=sydfXR&R7md`#sC;9M7n?N&m54tOVq#yT59jrs%!9;5MZ@95?D) z{6KD?L)t`7$$!9Jneg)!@FUna+Zf?m@Yb&poBTBV%jXHGtpjWuJ~y%m(Rz5EJ06&; z;XLDn+SgKSt9>5-PuAheIJrHWH)G!dj@c(Yy79aasvprW1+hX zDX;gM!?!VClD&$X(tQCd?K|z6NBV3I?;TI~9<2;Mh##v5`giz!QhVP{+PIcnk2@E} zzdiQtPU634XKIzZ`+B#8obM0Mqm3n%*M{TDd(ria{F(IAp?)lVZ1wG4_#0wb<*xB> z$aXchg8!SwGalmgHq-AfkDz=_dRTXgoPu0s4oWBa8q-f_$j-!HP-d)E>`Llxf)-wH zYQeSmEmrT9K8nVJPQhLli3is3#__NlcNFopA* z?Uglt8r)+q_*L$mbV^71(j6&cvmDQmpJ$zaoNx!6@^ee)3s$>ti%(T{u-+XF?8lrY z8mR=wv!9slt@_NHm7!_M%P-bmW&fTsqCeqx@a8C%|MmQTJ-(mWtRLo=w=|Dqp33-P z+o_~gTXehIMFM2nT6-J`GGT4NBnFQEPT)d)`Y zBRZC^t7QA_>zB_DI{|n!9qU&EvJ5d};hna;=t2NHKS@MzdT?Oa~ zoOgeSxwHP!I(u?GVc9z(oMgvFTn;+09lkEa|2ro3*}D;2Z9*0}0|(yM7al;5>)u4& z6>RyW66tc$k>)qiC| zuWQkh?`}@vtk^u}Zg)krX`WYkl}(dR(b^x=cH!gev2aIgpJ)v~1{~!XXQZ!?m*7?P zrK_adlXoNa>&ifOirz^!ivES8UH1IPO6`xnhc*WCo*#U9d^DbD?4BJv?+1V_n9cg> za`LqwbXc#>P+SS^e>Uvk_skOXW^I;8_sD)ItGyN*dOrH{U%xVVkSm3eA*+?M z^GW5EFw~cPIYxc)EgUG$Lf>v|2PHa-efxZYXNR&;g$K@CcK-5JY*=z#42Tb6TDAFd zE))NA_E7eqa3neVQJ(z>7{>t1En1tPmkxDD@-}#}@^xb_{1GkknNcS?ktc{Y`2oA6Kk{iQDs?*0LO3VnWV%v1k{ z{qR#`=JT1!XWsXLlxMvu+U8Td*O<$G!ZSWI`0S(4J$%efX6TM)(bbJQCmHW2&Bf*z z&;I|=K28a`piItu%i;xpO+FRRL0@b)vR8`FW7uxY#H=YD8%KeQy7C7CgmF=grNF`i3C?sKDe(u0$=@9))uI`0x& z4*8hB)vl9=*Q9EDs@X9aJ(pH4ILkNic(1Lub!dGWt!;r#nc~#H0}cC+8nc=23;9&ym~h1g zKU-p~WoU)$dQX?k=Od`Iu8Jo~-T`t%+Bqdt|$aXJ_+X zdcBi(lPDkIrLRBRSw@$w^QFW2*gH8T8&5=M$zJa8jJ02-o2TFh(wbWHrv7E0$Tm6F z-hlyZ$Z`3Vv=5DsA(yi=K0)#8)|v&w&xK9RG&?#HdAqIkh%9ijcaTsQFdj@;jz0gh}O zc||$|zbQDD4*d=1RF7hhQ?l?V?96wuzxgig%o)ge#u+#j^SzErPw4R<{f_RB_URBu z?C14_3(KDk;kAM5dg-%>P1w5?JbyNv!E>F1aNJX{(^9@usgh^B2Rrq%^m{7$)APIC zTI1nUqB)bly7J{DkHk)NjXi(bsq$*Kt*kupr`kHI`@OiH@>G~|5A!wb3x;Nf_FQLx z_n9|<_wOQ+pe3!rrefPUt{+sLqnYDw>PF8KnR9_xZ9XkHLde*}s%7 zK|bdH9z0s#Q>@SR%#_N=8|MB$<2Ym8$5B#zBsALcOg5_8x}G)w3VUFarQ`oaACk4= zZLJZD?4v4AlVBE&+w-pR9XRsqb!-FYHQ+2qa3*JYWY6E#Myv(v66}=V(X8SwB0VVH z$a}TMjKZyqr7Msubbv=b0bse~1@i*-S;SLzPfs>8a7mw-3ePEbkvV&!L%k#LdFqU; zZM#qlzD+-RruPV`JUFrXU3B}8yld@OXF!vBU)4MMTx|0mo@4zgCz)L|v1xJ<8#2+~ z^9sAdP0YLYiW7fidyC)yD@St=Uk%;pYK^BHt#QssaJNTm`1q`_^odihHN^Wxd~5LmdgqPyR?8$-?gw_D!d`XF+ab2h7) zn00(#&N&XzLNbm`&_Hh)cs%xB%aWZse{SP&o2zdcJQ(?3BVVf8evq~`=F2Ny^J@0# zMEA0xmnvU~v!-AAQ8~P%7jNbLLXDZZvvxOk#jA~N7QQXbzoz}@OnD>^X)B$?=A6nr zEd3lj_b~R*L--71n#};O{Do%_1Ld$+Hyb?N8uzgW`LPGQ+rHpG$&o1DADSy{3GXA$ zNA*mt7Q9R>`Wg5cQ<8(+GhZb(g8=$`QGP4n&3AvcKi}1h@9>;P9=O?q%*!uNzO#us zUoQNg$2c^e`QTIk_t3t4_4<7d?-ZLXo=D?gxA5w0y(u)G1UJNUG+)?CzD>qAlks)H zn8}pOg2IzO8c zeLEBEdOMBZU+bMrapDAcQgIPBeq8dSOpJj;`Gf|gJJ=6mzk6H5q2Bfry5Z+2ZcupB z`=ov%6SXH{X)Zo*Cf#jZ;)Q+Y#YHEBBkdO`7pHP^DmSOS7v1Il+IpPNle0fli(A5% zoX;J2$yq~u(Y9!xl(Rik!SjfH8qS>m6FTYEzlZ3_1w4-YzmheAk2(IM&-T#9z|dG? zdX%n27B}Xz#&@cW$NJHFt!~tP+~drVHg~4zU`}W@x?ni)YgXoDl)HpK*#er%w2J2;FIjWzk(G-l#hKSz##MdRCU(zL^esBOhqagJ`ZU^I ztnr!mabAzO7(d#tWnJGXUrfe}V6?Uv_0+HJ!>qPBwgt0%a>AM9ocL8|3uhEM@SBd=pbj;Dj+a z8CJJPz5q9lKTTr!u5!&mwU2HY$RVqKc@26lu`g(|^1Svyk3u)&uLL)l(l^7jb1(3h z8sivuj{6YcLvn%;gNi?T5x$XhA%l*w`Spk)+8&On>+DTM`3HOCo0=5oos^zf%e!{o zwZ%9Ue`RPhV{yv8Ry;3IrUQJbe3f^!lb>mTi7|p~?tSs?-p))jmN_~@d%<>2bM(y_ zsOF5qSX(xA{)UN;lePOS_-PtCzo_`GS+vnREs8mnzg=SzFHNE!wV{64w9Z#e#_qXn zjQ3nbH-caAO3xJPCOrcf5?GzCEOf-(OpG^a2R~4!_#?%5W*6QX8tQY-GgORjrf9IA z9r*7|Y>KhrjQZG+dupJysEsvzTiwdH;EVdo;O{C0-?DH=u!6620z;yEfHhBy&wy}g z<%IVB=V$zZ`M;Xrc4{V_R`l}2%l+a(C8xmb`hO#-sv_rRB!VfDB$i)`o zeFtlR%j>5SySO8~Z=CUGlmAlrzf7?kpGJDJ`bNHh_p{`KJbeg;6Si^o%;sq7Okfed zEG#8xt|q!$Zez|RYa$q<{RHv0{^#Nta-Bcrjw~xLt^8@J5ADg#)ciTw!Vk9<;EUu1#f31nN6 z!;&(DE2@5VM19#fs(;e=@gFjfB;G;sPbKUzjfJysR`v-m_G~tG7<-!*2KhV=r!V0^ z@-NzFkuRy6-;|5(HJ!sIb|JTvybqJM8&kj6m?OYlYfMU&JUrbK*F?Gk+=zEuaU-4( zysCqo=n-Bd|8=%_;i$2H_7G#wFm}hvf5py^#A69~pzopa>zv3*#Xi=J=wWwDa|L=* z>o>!ByJ+7FoE&n@ydO2@ag2G&HqH#AJT+#`F|=pnaMCvS&@0EB_!b1mZ2U7?A87ne z86IX%k~98-Mf9%oi{ke*bkcfe4_SckCyFB!y{GvNL1AN+>{|TIG5@H(%1K7G#(S!5 z(X{kmTJr@BT>bpuLDu7z?+!1ct6cWC!G9a&M0*e7yLcNkt^ca`0RL0Wn}w4&x!yS5 zUwaqI`J?B|;}GwN&r;KUi{E7LqLr_bcksvIP_%?O^ zS$4(YKJXzwMbn2`AL%Xwd#{08Yj`c|q)FsRxXgU_z-8tJKA+>W#;%vTClWiY*iv{f z@j(hli68PK!qHVAh_SThMHAmU9J@@KsleTzW@?&JY{I4%CM~Pk5ii@ro_P76mxHvJc_r_7UcxU|lm>3u0U9Elo zZ{p!ozXu+E(t-z_H%U$3YtM6!fp(xGqW#qPNJl4e^w86YIQrJI$$Crml0B(h9s7uRPBpBIi1dHqY6~lVJZzvv z6UKVWedHxa*{+V{<9Rwubf)qYckVb(Ht3o5{G;|OGzMTrEY|GVB=I2sPuHAar{*OK zs^pFl|4ZjfK1lXUHW5EKA_hp$huWdu-^61V{GuoEoAf?@6?h$bW&J0ARc_ywrpiFW zz!~KsAZ}qNWo#Y->xaXRQ<)?OMv3;Gsz&}i?GerFW&K!{j*4>qDc+(r9z=aBCPXq@ zvUvvl0-p!Dspg={>WsUd%`Oqo$>+rea*Jouxx&d2qSX?*)7ufIvaA`-tR2%0O-1`3 z?3=Jwp}lU!Wjbvuse>(YO6?d^s)ioVI((@;>#whFCXf2OBnfM=^x!pu70c6qx~YCy;x`G z!bM(;KDQ+OD*rh4WOFL9HB;I1SV(NmLhyLHg-DSD;)-rQ-6**us-bW-+$2?1ISNDa2m4-_aWqo zbYwyLi0A0jiOkoDmR@3CToHQ7Xs@RNjd{q)7Fuys7y1ap!GlLb6XR%Oj%<`NzEAX5 zJM)H8%TFy{8|GOX*WJGGXv$})r#;CP-yxm@TZwi8V(+Z4RC+RfV-)Xjr1C95OKmQ? zh5k={Ep|5jX#WjfyZ<+z*IrI9{^VNY+9S+C)}A)+(m2{vJ>dtQhh90qWphJD=VPCb z>tWjrJUjGTb4>iS)w{#_KY(>8Pvy}i`dC%&3oqboPvVQ0O|5$O z**>Lfqgdm^>2DYPCjId}ko56!`jEdbv9mSC#IJq{W31z}60H^M6GAh0cfaJc8Nd0+ zGt6?%wpJ&eVHRT}L^erc`-5j>xwQosB)p&4KiZ2=V$>uX5+6?FSFm<1@^JeB?qOt~ zqK?eO9-)0}C&m3}Zd%tqqC057%5lkJ*-_H#vJb6(iO*Zf&(eC{O6}^sXuRMi_GK?H zMSj$gx+wBhS{X}!wx3uACvAM~g_d}G(;L>u)oH66SR(u+cAjDfbNF9$r?_xsbRK{* zpPwL_SsP+|5ht?No~J$)njD?OQZ$h4acucH`>*b2LZ?c+LGd%;{Ij%%lMiVpd!J*% zN8zb|KY=+eYyL~YMa0!j`FJGrwcjCrSqQz5tKZT~78xy_sobY&=0BpLjUh{}yy^A9 zJMFm0n&Ybe6XowU4g-Ftu+i5VJXNeQ4d({hi%R#_7C9ZumDdZlKQ}-bo-SK$3GEz~ zsuDZklas3&UUz1+=7jDVj{?Viu?)DI_}9A0HS559A~1=r)pvPv1k=Z8$1_ucsos`w z7GqjPJE z_yGK)|GA3pf><8t|2W5-KF@Hs4SipQJh+YAex}w=Oe1Zm9&O}2(Q&l5<#1l8UR1vA zTEnAG*2Q1)&(VI1Tv>)Ldc&Ns2>?HbSP1+SahQ07#(DDy?K1H|&M->|r z$XR&sqj~%d?^9pSSs9V5d4ci|ep_t&6)f(Nve_b4Rcot*&A^+9a8|mwx z_}#(1vLv zmUByZJNOrFb2aW8%||pL8ouS|zEE@_op;S0g9oR8&pLYndY57TP6bB6cmLqvlk zlU&j`Kl7T%Z~j4G)!Zs3^ds@yp8vb%wz?(!i+FC&isx4K|AqWZ+7A=%g{yH#M!aw~=oNS75vKpTM5q5-l=PkSs`=A}K7d!CX&6lU>IrsRq;N#P@ zl>1lI<$g)IG|zsly5KU>g`2`;Orph2jO%tUK2t;8pFSg5yd#v1%$1|NNVb57 z>+n$}@k+0_zib(CLGk&hb)D>mQ&zG-^6%H=(i1EX1B++d*y`t5Q^?<^_t5RYjQDqk z-+R^>^P$^~IRJxv=Qg~dFys?UUL4%?kHiJM`^CXk`sVW?KIT?q9)Llg;`b9Q_7flE zW$NFH`j_mu(T)jwgn8s_Len{JJ9fNX@8vb;PDgZJL^{VgnmfL1o{SaE4)SDxzfRGR z1D#~hlRDcl)!OA-{Zqk<=v4H06!;gv$rqUeo?E_QhR!qPeCz?Ymvu*PsP(6G+$GG( zyQydDzLl}^!}rR+Qmi#@7`MikPRG7skMnAL1zp%PBljL?BU(4QSJUp*wq3=h5Zg9j z>Z1ARJcwiIaHZ~3x9dRN8Iz{3WF6=*HZW<|g8xc=y&LWSO<+rOzH*c;Tm+6wv{OTF zcv`dVP^?+_uh5flz5>%!w2My`pMSDnJmi15KiNA@F)pWNzxY5ew21yBbl=8S!5daK zA6AHLZ{g)>)-CdpNQWmr63tN_+!^LXb1#`9o%yeJjkD7Fp9L5GLJJpb3+tG{tM9{dvflfvPp{8pX@hu^D+9sQ-&fa;rea#eDsbH+z}cnY*P zwX`YnQSeUly@+Q+XU&Z4@{;l^=i6)OWA>pGGpl$@OH1WV7Ea>+f-3bhy{u+Ex2B$fY;T_l9te7=9l^JKr_LzO;z z-b^`Jw7$sMbwwAsv$3(D*{Q_1Nj{irZzz~{0((b!BY5W?V&K&}WM)CM*w?SIs?27} z=>4g@*Z)oYH`R5a;uZ_HurKn}C{8U|U!~v~$E6SC(_&%?Q_#a0<~uz)?{Lh|A7K26 z?$X*y`$3K;{S~dF3IkawuUWRf7e*t`b=x?FGxP1qBm_z1` zxdfKC-!mUgdKcAIYUe7mv=L(JD7r zQZ^Zv>U_vZHgEN99h*<03T{T`lPE}r#_|GRPJTXhM((R@^~K}zp~|-)I1;@X?e9eI zIehQayPT$^t$~(E6hjz2CSxa0VzL!GnVcN3$pM|9_4JP_P zalF;zGJ*MId+>{*7rgG%%Yrnq&V%!EL31u+uAGfe)eqW=kNSVFyENP_VfE;IP+gP_ zPJF}CAXV=59glM4&d<1#WzZq?Vyd;^;oXA=doTE~|Bd&ggA{9mEbsO1hs%2XjJGBH z;k3{D?1x$()%R8Q1*ZUtWjtTzf@r!~Z{&PkD z_g1j>o3`Hn8uhcKQ~Y*(_wWA1xL`_w@>eViw*B=R{BL}CT(J55?+(PDM}GXQ{!>=g z8eh72y?^VOhB~MEe>sj^X=QV@@O;hcWBntpxES6r6K`tP8WZ?^)1~;iiW?haixd5H zalOC&jByj+QCZuZJJ$q*%a%<{)8_?@KU3LQ-=0qS!i6)Q``n-l#h1V4DDoct19s>6 z1FVmt`@7QEVy5u>(M|soOdq5xE@dR>Gc%Mdl(%q@j$y#6e zPIXh$aUWrAd#3K;yZM5m;r%sDy^Sp+wZBk|5IE%o+ zT1y*Ae98RNUm7fU+z|`SN*-%3U3(p-aA}k6_oigVJ>y&-K3jCpv>E{lojrURUh zg&)4!nGbI6TpUdP1>=IgHLh-Op6WW;zjs7}1g>^vgZiACeC73U;G=G{XUqKg<0=GfMd83Xl37Ov6o+@zR)3lI7Q9P|SZ zxET<>)z{bQ>+6Lx{E6J9Zc5iev%C8>*5bIko%JK6np& z0UvF!{_PPSWm``2IS&j@9QGWAH*Af@{`YepR``eBPD5AjkI&;`QCK``e$P2Jhit=d zIo*rweflfZ@%{N*!vFF2g9lG>k&ngCH&gVNqFlPb*uh2G!$(ORz2lzA`+JZL=lTxg z$Nzjsw#qz|b|T|$LIT! z3NG>1z%!-Hcq+_6?DIxugPYI$Q#n_xXR=*&o)aE-+PxkA56=6dZz?PObmeiMSh<@s zm2-XJCtG>a7mgCT(mR*=*vGkF8yk*a7(W)a!1Cbj;KpIRxym(Rs`x@bI1kqr*80_Q z724nEZ>VhaGlhr4P4CnC{U_YZ5aHP|XE!t_@1y=lY3tSc#@@n8Iv08Jq>sIIhv-JS zSZf<&&Ih0CLod~b&AXNB{(AW?Lpqx$7ZX^SL-}EWv8CSoY`uc5*LNiIJXFub^&%VZ zs_?G_TLB$d6s;(SOIJg5yb8E8;8MJ!vvQ)lEYDo;wUi|Xb7f_?k+E+qUfusz@l_GN zx&po`tnf8%^2@S5y`FWHxmmVKu6zw}LT}8YXrT?om*tGY7-WF>kFi@BuXmF3gSnwbnD>3?g|qECUHcqu_=uEeg4l}%@-L*|v2N4tR}J@kntk+Nva#9E%GRU} z(Y|~tj?ta3CHbyrIUclJd>HE(r$*>WcY>?$t+vclJX&zl3jLCS!@@$|crz(wN9u(kZ{<12-~aKC5nk zLi$)6*687LM5DVg`AIdiu4a&6g#&Uy_xtv z?cN12-Ol+LQ`KI=jss4)*7ytXoQEzj(gSo4X9h$owO}D@W$m*`25NuJ;f_jEIn|%3 zIq2oQu4t}?zm+yS(DgI%SD5N`esxwls9*dg-?U|dtnq@wIJb{YVCA1Fe5v^a<=Q7_ zg11aqO78nozlW@s?Ssz8)~qhh688x_osV~#U>vkG_cfANX9wF0KkwHa1eqHCio%-I zRQW@h*KEyva|!Ftp79?+ZdLtu&P-{{RVN!L{uMbIuUFX;5dyiU z7|P49E1$7!Zx}g$p&6D_4$tKKi1_&|(KWxL7~(d5cksInSvsk}Gxrqmw1|(rGkU?y zxLw1Xs7$h^e29Ltz(_pAfm?y^sS{ortTCo2=Mm-8`p#j3 zjXi|xUOss1Y5L7xxVM$H4LaVFo$**$drkCQwzK2aKrv@{d1mD|@*r);K;E0Dpb6?v zNRdbBE2<;^*;aog=TXn$Gmp=FJ`4GHd`{xCgwIkwB|aJaPpS_M=BIF`@T2o1Kg^SA zm+_{I18tqzn0i{@vFTFn7~ph)<=Dc9iHU@_GVr%zk21&#W5*@?)W&FZBCn7o>zm!Y z?>4o$DSNMF2|BM$=gxre8hgja2HSs%{vDUSrb4apt+TbReJor+J2RkDr?if zEPR>sZg1iF@jTy=i}0vCLh3V18|$V(2h5A`_Jf~AxOp38rxbdcdVcJ$dFH{tl!wmj zm}i5}c4X^H>Wc0%@Pz6=`$SZ~9GBC(H-Vq&j7z`gAd~+#?n8ZNTo1Xbn0)rj_U`=eXnxw!QBI`_>?;F&p7|Zf?~K3P$WDsZd;L{%CRsUBp+C+mNVmTU zI>QLcznO3DmQS(go^{+>+7y2h7Rx8HHa{x>*FCy-Uhrs7-hfx9_>=vzCsZa+8QB_I z3p$*K=xsCp0%%M0)?TbN;KKo1-}XD)?lau`tu|E_o4C^{(nnGI>69U#-C@uGHegRm zeV3xXvvb$`yx+gJw9Z$VJZ02(cKHW@>C|S5{_mxs# z${Sba+PT0o}SK+*qd1%A@;>rBmteOxvGRJP~~G9(>Ox50b(5jQp2t zu*V*S-_H&viVxkj%mHKKna1!go?AZTE*LwfrD#syRJk{FO2nwx@&33!3y-L*<-<59 zr))IW-Kceqo$C>_QSR|oU-a>&%JWX^B?HyA)+CkgaGB^sq+Zfh1i|B8{yk6;o1s)79ZK)%4_W3L4Ujl_oBT5a>&lx`s03tzeD9T5ia8+rd4URj<}4Dh7f%QocN` z1q;YI?e!_{cS@r#9P4Y1c~w~08=rYDY!0D?JM6q_{EW$X;xl6B3#=W=YkfmJf#Lk0 z##@U&_y4f>E^tv+ z`TzKNh5?2Fb+}k)s1FDRUI#CE$#OuP)J*YKZdnWh1{=c2VB%)mfM#Y|gIQr|vEA10 z*w)sz8Q50a&0Q>Ot*zaGkx;BeR}I@Xe((2ro(Bf9w%zaR|J~pJ|KHKW=Q;P!`JB)B zoX`23^PC4|W#c_I_vRMb97J|Ovn_j5OB?18u7S~JYZ%&LLH%-%68TJgW_yqq&jXj} zIwt+x(?T0VQEupqDC5Ozg(yhJ9WT)JEYSO+us>S^gLK8k9i85v;T$jA+I3iGOgYv0 z$<&U{MR3&=j?VRY@GrwwOfK|Am*_g9Am5QwKLGz%wp<1tCcd)) zK4-VNKSzWRc zHrZy{SeTA=3B4Sp0cC-&B2edOjyslx@2#+7WgS=%o$h*_}9xi*cR}Kw8I%;kX-{Y2S{Eq>GiPB$txHGnCBR$^9tJ{c?E3= z;+4LB@DXh;{*oA*zp{mAS%U4;CX6SOnhm<{ZQiKD2Cn|CnL2eA^p_A36UfA!mqSQr z)R{zx`ry~b9QqOd5bVR4u%-^p&qg`kz0CtgYxb}K&?ji;qTI;28+$XC=K`1cfxXy= z88Emqlh4$t-_+nsm-d?<7z%lF>5z=(5aeO}K+pj7w#j=EfrEnd3UJ`PBM&8sK($Hn zdxwSunqb?*9b^8=5a@#E^$lE?a0$|S{j`t6cHPW1jBhOL+(!yyHRwv=cRPj|=Db?^?LQUz0b~pX-7h z9Xix4nCncy0gFIC_gob0wfN=-{30|WP-2;mI`8re+e6I;erD{waQz}sXZse&7X1vk z{8nK$+JW`bS{Kr=&n(_%uH&%h(iHmw)?IF_yRmmnKkmhDW_%xn$KlLw@%bWPf-XA; z-=~bqzN;k{--9*_p6$i`jdre|x3ml-Z7^4x(5?w3z> z{(c1RfdhWL+nM^z(|NnpNTh%s`a z8RO<9$E)T*zXnI3`tx=Ety7cyrgVSc{e5QKC$P}JtRVwBQ**NgHjbEY7PMyKEc1h7 zus;HO+^ev2_Z3yOyxS*A-`6z0ulu15_f;(sUHinub4~;3r`9%Q-lV@bitjf^VgDc$ zd#&E|L_gaQ0@|S6#{}DNuL29yhG^e;cEtqOfqh`h9@O#46+#&A0dZd`8@315LEVz| z7<>NwRxsa3VA)c3p?s^@8L@X>tr8=uEEd$Yoaaf=XHjUY3404XH(!T(9zs3ksK=+% zob~XVx{#$!cj7(JZo1mhjmQc>hSEX(3q7oIC=fW0q5KWlHX zL9dp5q?OA)!r3^>3bf0Hb{TdT?k6d}+9n6?!-LL-zR+#ou;22&FP?XtPMqCaTSC0h zX~fneCgq-HQTrVE$oe|I7t6aNlA$-D-!Mn;E(_`mTUzeHIqL|}Al{9=CkJ>v8)>{- zTCVkL-&G%uw({L-u5lq&0uK-I+HlszABS{%=e-7d?RM_f^IJ8b8O96h2`)cEFQ0nm z0+gwU0aA~pT<0D?^H87T{KtIk590M8!kv10%#pxh3GEtIVGHt` zGPX7g$FDF?;ob$bb1!5kWdrv}c{ff0#|+x0^G@Iz+~Eg4hK*f2%R*f@Oy{lV_}&Zn zIQW}qkC5YrCZEo`&2-M)x$c^@G9kUBZ? zb-nNUG;n!xA$VWkFLtBOMU|*C8+@Vf7bk${v#7U=JN&!vguR7)lEk1-pj);3-E3oY(k3TZY1FfMbm*c{W{0i@uLfKezfmO8q2%b}gouh4sKtD;@@IWidZzDdRKA_dN8F8VE z1K#2C+!XoND{;OJX_&uu8~OqB9PhOV!B|*|JQ%+iH`*CYzQfCk<&LiX52j_(|_$|2asrMNB1f0`2HY7iQ z{`Y~`-oMC{$_uey1?HY?ZESX zKjhgI)GOEf$*?J^c06P3Ll=Z?5AQdFPbk;e_a}in?RJuH#NF7Je#GC-JM%F&Va$F^ z+^JW%foGzDdsn}9#~JfO7V)ix?jpwo_y@egdBKWuYEc)*4)4cQVS`MDtuxh!`;Z-5 znnMw<06*$Gl=+Ga=}cRWyo)@lZcB&$H`|VJ3SE$TpLIO=N`C9tmf=^j7yI{?zJc+J z_XWsrC_{J``x_XBa%oRx*_>-w?yphqc$CGnu?qHh!qwWQy+a=L@0NG&%XjEz4-qdM z9wHt;JWbpKw+T9#X!B!?%l`BF(I;@|M}FTz?%lGjIxnF<1$A|OFX%pe(;y)mJUkB= z>atZ~TT!o+72t^!DH~nQl(TbczwJ8D%d_8{OH=Hb(r&RBx&oHQn>YuUvav5V?12{5 zaYr-8Y(V%hC#P>~H9?n6#2Fsyigp*;UwdCmNc(#KE(K%if6={(x;}wB3Fig@S+@$t zS^_u{a6DWvKKZ@;j4MW11)LXdGu)GK4RE#)A#lNhmt{IA)&%9p9eo1#0Lt*gI{XIEjQk03Q}s5?0d&DV4EG6K9L^3EPZWU!oXH8nc{#3Y zgac<5pksc4_l~Zkj_>y2oD{-qf6+V-_n7?+{A5GENXXJv=ug_!!?89{aMnbgT^F?h zgtdrOn%zGfd5v>Y@yLgJo&wh*AJfY=J#C!Sg^1!+AFPcEsg?mQkPw>G^xKs}^Ai zI*mgRra(3_{9c5UKMcRo2iq7f^%adjUMu@9e^}I-mLX_cB=`^e9j_K6PcrC?84mwBLe*gKk+19kYxevi?t=84xZV+{8!Ku zRIV>-o2akw4m44_rKPXlpW3(Xu7J(b+k*4)(ZRfJ^u^iu^(&r-cDpghG&R#E0N$7n z-kbjd?1x65q@YidLATdqaaJ*U7WV3KZZD-)p4)4J?V}oJ4{uGyS;f?u0h=kiwLw3V z$nj8Z8tUJisP!{Mb=#Hw76i^mc`Gn}%(WOh4vZb~3-CPO@2_pb8J44A;7_v)a~}E_ z{1Sj)L0W}$F@rAG!nOt5p>>!}V=3=3uGN*_?KdM2_8{A6BOoowJ0{W-^^i`pJ^&4% zR|L~c;sE@N{H)Uo{fuc?2HQq@{!-FU8}H;lh`%+t$0QQQwQ=K=(pXv;S z{DOs|AVl-CBdGz9xf>{3w@XgSLt6F4Lb++6e)YEuh}5e zhh=B@AIauf9)~|A7vmFoHvp#0clx1o1wx^3Bq?0`&JQFh>+KUjGxQh8l#R2qw4bbN z_6>%-u{r#|z~WlZ z&V!y>y1_pH{CfA;JpXmqia?lsoj*jxN}C+@F%REM7VG>!RM!O5!TNY7Eo}bm(}8iY zsoa^`FVH#W66kpIn`nQ!ad5_~v_Xa7E>!FbSQX@B9cjg(0gt`&vpbf}38XF!4cr+P z=D%ZsBd{Fw;F%CieVgu#-r#=}HlOOTxuEI%0Pces$agTW9*zj z_0S}LD*Shhbp)1yCftX32WJR};0)l=#2JC;6KK1xqYt>aHn25TD--y{vOGi2To0WH z>-!eYTZi!;6L=PUbtU?eI;UofxF78>k>9+07liT(=YGU&;CKaXo|SlK_ht+BjHvsS z%)_~1?=61Cy&mi9B~6gw!}*=^6!a;_0sD3{phdjV%Cpwt4vZl^E%sKY+=}wa(~7UX zWfa~Q!Z@(z0NTY*VlzQ=lkF?)?cttC=xo@lHDTS}jI-F7^B~CH#d{YR}NG&t3$5Sj5hg z{e54{46z{AW`}H2Plm>$Ud6Npcam&rUe0q*XczXStine6E#_T?C~H63kPW+iHqNu- zjEA6Hb2RfEXGI)uu_G=9;fl)oPQdSg{a$|GYg5m(`&&#Xm+3(xE1!XD8{aFWEQ78Z z_zmQ?9lXN6vFm&C+#d;}UZYk*##3&+*Q#KA+F_HoLifdcCR2EiKF{~xIjLVDJi9i% zbK1N>oC=#0_z5(4dld3)kT#ld`waB|I;;uaYnhJlQ1n5FDnejq056Iu zBalz_G4?pv&*X)bot?v3S1I;06)^*Na6Z%cfbm6$$GC0|M;RvcC*GUclrQ(hFe@9)}-ZpB>#yJiUC2U8jA2%cl-eX$7* z2`!>wOUnej<#}X9oI-wim(@V-C5C{G$8es5`(t>AwvFS{;vnyx zkY|EJHFKx@8bQp@_VJ5*dH8c5g231u)krh1I^!) zxQkkUAO-2!2aAv%6>N&~a(sd=XlDY}RH@!en=feK9^JN9*jECkRPdu8_qO_QMjh)Z z6}1j zf@?cEBR%orvmMNBgk|CRpdOAp&t*6l(u_C4`8)~rzYCXufu8`{_EiOBCFrd>0!nQr z&bp!BY~+FLK%s(nZXN8mP5wT+p%-}}SKfy#b8$`ruh_IRP)Q6!dNaak|KhtBe6FAl zith}CW#8<#ICi$20Uz@`_7L>5g1-G2{wV(zl>b4C8MtsvBs9?Ofcf2pIRtscjQbBe zdhznqUc5XByvuSF@xh_sa#DJgGaBWL!^wr*FjB?0!+o~~OtoS`cnSo5?!8TZ{f+hC*kF?F8#wsRoP-L+k$mr1W!ap+WZ#1>dw*nkjVSK}l*fH5DU(TWUu~c`9OXA)-zKQ8fvBtOEOkBl zBkOt|b-jf0O7;pVyGZvq;5`jtvLA%j7YPWP!g{?TU;*!$ybkD;8=DuRZu0l+_rTK* zt*x|WnbF=thJmNBX15MN+L?OVX=h1$wVw7K&g-t7EylY|ybsD2{C*S9)+r8u6xzKm zhz^6`x1WU$`9G2l9?*en|8SizlF?pa!+8em;qsi*N(^eYyRTz2Y!sl03ZAv$eFQtw z=;!5d-q45h7e34nTPtnrd{ua-4f(es-CdY#s$uuOx38K3+!t_tHar6K(#{@lY~gq$ z|8ksgTyq?g4wqqH!{Dp)(8jNx`7U3L_>pa#f;O^WIi6E7mgO8T$1~dE`asspbrnRY zbp#;SWoU~DVzP~M9`!t?BVXI`J{Zp}jo|$0LORF?;i{sY`z88?19ZW80-ia5_}mnh zi?d9%S2nYp4X*Wmrp0?M135oOkq)lqF$n7~gcEvD73Nco7KXft1RZ`9UyzQFpBim9 zNO?$l8niv5e}~#eIzSJGeZiV4z|Yuov} zpDWN-2XNHhmo?rGKbuaJ6@k6iOd%#loa%h0->J@fY^OTkoeb>-?-9y-Ezs_UF{*#> zv_yYHzf9P;r~4;Qz;`kEeqcX`--mYsgZCErE;z>+_Ys2k7TAv)VCTW!>0$0WU{49} zj$!}6eqIaD;v0JhD)3o?`i(mmxj(ketF2Lj_l?2?-aEp3=zZMi7a!!g3Vk{o>wSdL zwiF4w!(iB;nQk!d5F89U^kCc}$Z;`Kz~*ZM-Lc1z448^@-=yKB+jaZ2&yR8A#hd^L zTS$U#m&V#%358wSxGNEtULUmpSBI_pePMnl_AvIjOu$Lha_r#jg}&d#bN1jptb_2b zlZ&=QxfU2soM;oFO*#bMcM8Gx4CP)J`yX*uw1MwID*FE8wtFGZkoQ7)R|@6tt#Gxx zH?|GDGO!r?2gTfDM*BG5jl~@!wg&Be7wD4Ksp93s{O;jY@r%P3i5+m2heMIKV@vnl zY02=<6gXS*apzG&1$ac`jJL~yagS${it*#-`U&@KARPP6&2wDZcW?J>!5sn$ba6Mn zv59sOH?Ey{mz_X9*t0-aaf~>a?fAw4>>b#5l=k+d=5Af&*C>bke_TWE2X4ZJJ`v~> z*e1bN)*3g1)ubv#-|XFr0att;q>D*9y{pby5-D#G!tWm#t^=w(tGWPb(k)fne6LEo-#d8MNdvSFNg4K!Fk0lKVyp2LSR z!FP4~IB++G+P&AzJxA7Qmo&P=zZAH#-^^$m`6TvswyS%O(U1}6q8hSNdcVWZ*P;oe`67R2SS+^a0fxSzv>x0kYU#H&u5vbG+khQOID|3x_uWE^CsBL=d0r(s69=vZ|%iGgH542I{^WM_39dsE98uk^qtJi@& zThQ8|ALQ`BC?D`d84uS&UMpJt#9O`>F19v>0tcJ};h1Zi2zq&uj`8QfAJ?FjYs;pL zxjYc%gH0Rwa;?oXIwrkODDRDa!P%VV+c6LAorn7n#0J?X7Ij05&AtKsvY{p3y`g20 z>=)3jmwdYtbYBX7e|Rj$7M#IvBfzf)4`R%qebjU22XOyVv|;0582Rc^xj*a2*$t$_ zJ}K{LGR9~s`mqpWl>Ep!D|qhaoWU|EPbts)<4g_ZS18WfNx2FcW<$GyM;pg&2+iu3Zm#P0Nb55a@>(C%cet%FVPO01h`m%I{o$tz=*fqyKv7rX3|X75H5=Q!F+ zm}k#hkm;OH&_6t9M44~cCPB z){=oZ75P9v&|B)&19w68^9(=lzsGw*&9n*AZUnn#Ean(|$4&cgff?Z}pYJDdPl_@p z4lF8zz4 z5Ae*-PtabB72gaK<0bFHA8bGDtz)dgHyh)?v$dmbp_ASVf}68 zIyDh>bImgs_=SSTW)R}h?VX#eq zr)<;I@0N{sm#i1bvDUqI!ZK-Kf6NI+oq4E}`zWS!arZgyP+^}cXahc56i7v06?{k? z9rkkS!!7b&6xWuP`!;nReHdZT3!0Vlyv^GL<_bx_JN$cQfv?a9qyzJZfxe_Gbeh?m z6S!{_f-ulThQo&5RVQUB(-?Uu%gd0KePT*PdYy*E#|IoJhsWMSK1Lh$xlOJMapzPi z?yKn-fi*NLH~5fpVw)ehHq&BD0)~p)^O}^14oW2uD_%BJs_kp+ACNW<|Od5A7fA6zs7Uk(}C}#;XTjM)=(D! z@7#?1hAzNwmm1@RbdCV8#{y4=k(U*lhClUb$li|?2l|&hi1@<0HQn2g7XkRsc7|4y z(cRx`4ng_s149S>;O=kR&IP`{H0TaN-@M%A0=~eBW8{y(CCo+r6Ze6E7R)c{s%khx zf0Jc$EcM2_AMy5rPk=Y)@hQNQkn*f+JfV+%CGqEdW19YK^u2~Z??uz-O#De7gYO>! z{;fD;`U#lar>~DJ4(v1P`XbMw??9ShdHF-KavS*IOXxg?eBAOcqw{~DC{=g4dD z>#Jxl=NQtBGHEs940@B78@!-5$IlcgznUz#=PSfF<^46{29!0qv8*OjkS>6H9vS`N%vf+ecE;0J*X7oe;`Q2kwj!U+8bN71A7=yuK zZ|e94hwU@MK&z3Y)up43#MfOd?sh@P0<>k4Y*p0PkzaoI$f`g;Gvgcr-*>~9mia@G zo?)T3^&o3)h$1o&r0GJ!hI)hL6~6f~9dvLE!#O?5QrL{pw* zwWOQ|{jF@v^`yTI>r2!dhWbMIP9*w;<7DSMOpA3mXo)_S(BQ50LA1h!mR&r@^j&d5 zeGq4)4+?XfCG1Wk41MrUP`ST1!jaz?bJRmz4)lwcft0Iu(Q1b@;UBZc`~`1$KEh-0gev_;TRDV zz60R~o|Fx9h%;;mQkR09ma+=zHlqwFtL?O_Vh*qqS8;43+o9qax{PLjUBG!Q z8u@e^tacBY@jgyHa6U8?XGDYSD(Az_JQuV;A8%s&$%}ym>}NT={#fuR!(tt^55z!U$Gp?@_au#HV2;N61#%pH&_lNAWt~kA9E7+Run%d-w#$R)H9Ux3*+DcRpK=UA z2g4Y^w|sTlDcPEC2ALvd2j1h+bpnA09si9#ewK_~@UMLEN==$ay|6A}H_M$ZN=)#h^L+hI83#LE#Hw zb2GwU`4{S{Mq6z`bv+YQ<}97(Sl44g;i*A5JjA-tukWE8>Vh1bSX#=k#-Ol~41>)1 zkYOsEy*6+@!i;uJ4l46@mMQbr@;&r6gKyqt81m7M70gep8*^}vFxCwns4MR~dR{Pc zSDl$bb*^Nc!0!X5Q?+{deLHuMom1MC9DYqUxuDy*G+9`}u;D!e{n}#-=OehBR$$-;7Gk~Dsb!rzBBOT{zE^^cMBlP4ZQfhW8%bnx1f`}Z@gnh zye0v!mj`vv4J^}cv|D8!ZLfv41slqpD$CW%3NB|D$|*)UM*H>>UmN>X8>4@|^V>GV zHaQaItKcoZ#|60wyNH_$Z4=LMaDBsd0_l|m9-;1o^$vyz^fU2# zYYoSgga_UdG{RqyJ~3uL^_uDoTx%YiHVvXa|~v@yIqz zMIVlbv%}#$O-nMI3P-+Kj`2Vnk+f~1zm5Idh*2RS7<)KXRw-!XC9gb%HZWg#ODs0OF>CJFM5U!QmyR3CMC%LW}w&hJci)rY?0yJ9d@E(en zHYxe-?b%Cc|KmDmEo~5Y_;EHp6#hUm&OfALeKb{Qa|>ll0>WH)pD-Km_`$Updz$#B zw&Z<_V|_~$ocy*4)?*T4Pjl*A?Cnl~OwreM8zE2lEe922H(teh*N*o=T(}P*+usyg zy}^Hj%O4OfoO#E&+c-DgwXuizL!c}39T3{;6r5*Y>spVqCfujE2eh~c_4C~P&4=i> zsG9#=@Plyk`3iZb4C|vDsqg_`+pR5T!1mkr9Nq@pxSwOJ(d#gFxTmauUcx2$KR}sO zqt*rrq5DP|_mE;v#ofh+D3iW{OzD7~k?peKE=(gl8u)VG1oti9C8BS>$70)nH7D-a zW4Q?DUH5O|yg?1*0_nd5GKt~5zva2;2l^unOW4Z(kk@THa5h1c^XMn6&F#p;HEuHQ zP2C1LwlNWNDApzGnGW+K-@#K<+?#8w!d#8HRrR*;e1q|f`5O8M?kYh)E9!iCpNjUq z3hg_!*sp4ijrsQITGZplypaVs6ZO!h8w$BR2mWl`UoLhcC_4lTX=~J8%#)y(P-~lZ zL2gAg+~NP3W5?m2IjZojJNRqnozA&+oBXy$p-|-e!@N^qMPGB^ z4J8Y5;(^v`C^z8C9%(T6#LIA`Eyt5FM}@j^FR9xh`_7j*ILAFxQ0_oNF@5ynMb1%}rOxs@P!Fy@%WuUdyd`n>KX+JT1pP2O-=>Ir?|}BT zSrK1sRYW<;-m+a0C4kl2@C8Z0F94SV_C+1JfPg@BtuY5%&K|*|V7MgmgPs0@{s%ZONVxSldPj5exT{-!Ra~4d5UM<$5-^e@s1@}<# zXJ_w+wMVR?9Byq}&%Prc2LBAf7##MmK2xsohxxVZcQ(Adhc#S@*LvdBvx~d(@_~5u z2>3TVl(qc*7~Lx@T(~Kn1MsOBDWxS zS&_i<3v=Sw(e}wA%o?6yQ6fy%kSL|E=%?6}Xj6_vG%acI)V7+p}9v%D;KML|wsK79GP<+(CrS*cb4l4)tztU&oCt40^*dJ4O9$++AV zMR^7`<+;nmXip)EmQQ7+EKw^LZT+#o>yePMl6*|LNyqQ7kd^Q#At#WZ!@;gu%5;>U zbZf_HX%N)KdR2HPe=r^8D_sNB5SGOs%cYwQ$MMH>l+SdO&&JP4zXAaq2TaF#fzE-9 zMm)=+jG??nn49%Cj8UYUIYyJ`20ln}`WfYIM2L}YJkrtOJ=dW;GVmuPww(Ws@#{liG78i@dnbT0;_d-MG$@g~yot`*~FPzXzQSwExvnA`o2Mv2c8ja1$A!+Ml$ z=pq@@P3=x5aiSM&F57BECHyi2u1N*uk@aJLQO=N!WylL#FTDq9iP9~u_>lW~S~AUhptPuE=0p@_Dz9f%|;KBiVO;vV4RxogpI$Rru)`u9xBd1_xXW zHwlbGXT%e7ypiWFnY&n=`O`T!4W9QvZF=j^M%Fk@_aeB*Z+eiAvT0E-e?gF+x-ZvJ z#*gW5GCY9v*Yxr~7363B=Ysr9f1sED<6i!zUjE~~{ASR|_^~`|FMq#Y{&RZy(al{y z=1=M6$5hbuWBi0({tJ8gGkWE*vM$WMCUz3{Fd%RjG|U+v|`(!1-&^do!u$M*7H z*vp^U%Rj4^zqps*)62iMmw$aP|CV0;{2Jkp!i3>9@&m6MGrG$=iSkxI ztI0d!GZ*nQg5o{EcV%67`T=O?p$}mXLU~C@|1|Q?zek9R!A#8mA<}>FLU;NlNN@WH z>sLMf$MF02Y4U>UHvqpk_aY6#>F1qaSI-w>neJbR@}7DIcZ=!xKMa5A^V&N=O#d|e z<9@BpP4w?azB$0JMz8N~ly7f^9)uX(r-a|PoY2Ey8k@*V;{#|0=j<+ z@Q?pJwxSTu{7vw0Y)2%1Y~Ovr_pw=^36{x>k3qTu@E=f={#5vX!t(U|O93a>b+>mG z@XKn%y>L3c07ND5tI+dbhk$6%%fNpG(oa38$zj%i1?xw7Mm+Y;gp!H*QqTVp{DUyQ zCh7hIpyx!i*P;8EZqzj_zOoSuIp^6y7^F5T}2JvYCI`4N!x+Xnm+ZxP~4eZ0Pd^gDyb zM+nl@{GogN?nQh-P<#RKS%OKa2IZ4JG;u8bfc9g}KOgxIVLUyj*MB|e^Lz%zfbQQ6 ze>LcLj^3X*hb649QHN^NFdK_Tn0D~j_rro7FWXMz%7Hj4z3ig436V*E!@;Yri!TC+kgMu zk(r@OAKWx-{_(BD80KxfJuqR$3qwA>{>}G?+j1_v7-1LQc>DNQ{`zv=!mLNa-&~cW zzPqFEeV=})T$3E#miuDOD~-h)Gb`<(gQhDNpFVxT_$z;N|K;J;Z@=}6=vU$&kN*0( zPanBy*6Gum)a6gVG-URbbK_q1z0qMled%v*dGO%x)*hN*vE^?5%hy}hS|_wN)PEJz z82|IQPfO3=b=AXTMz2lo358QEb z(dvEw`0DLX$JeE0Z5uFZ|NLvr*Zk$(J{tE>+mt~MJ^10z ze|zGs*Uvw8s{Wa8Ph5DW?W%L1|HH9$9nZ|0`_3o3f4O`8W2>J1$Km6XmV7xePQB{% zb)V!f+;rr+%DuDiy`}T+`71wc_C>GR^wyVuf3^SN_dVyFK0S17Ro;!DBV92Ze=9T~ z`(zEC&w*n@kA1WfZWTTHk9u+m9Qkl2TsGXLaOH61 z1^&S90^+U5kNgk;$Nr{V2W!ju7eLCbLck^T=s)sCBHTpX9|;%@M;;=7k%z8;!;tIx z&DWp$0A8*?_toKSKnyA2gd3-a5s-)Z%YuuA8$%C%7?Se0P=^E@2juA+=n48=f@jJC zZuz!7kEIF6qoa6f}{!{x!92e%xKzy2EdU;P{Z zZ`793E6@48{PYg(6=s7UvP%923swM+u2RHu zl4;0AG*^UUZn4;)p%G~kT<=!Ah#^5z*C;NYB_p1Z&rzWRrx}Y zF|T}$yC4Tx<*ZnileelYXGLy>SYA-NTsAci?J6j^PAo4kC@vO7o{IdUYm3Uoie=@c zt5FdQzNPtMRSB}^vDDyNuIR4j3RxC16nR#A+_`xLS{4MX&hwO)u2|;510l0qZgkJA z74Bkmmd-9RX0_&#F-EeS^0HMWV84QV(51U?gG2Q)WPCR_7}UAmV?3cfCvw8q)(!TRsRYnzga=Occ$5V7|Np7*W=^ztOVif1F@*IOBR_CCw zuENU-JgfA2Jh@>0>zBLHpqwQ#H<`V=eEJMLJmm#$k+WD6Q`1YqLTG4LKvn_0Gb-{5 z+?u%36JAnKJ}0*X^6A=wh3k*7MAB?$Q!{6a*I4?DQ$s< zx{>Pg1q*XfKvypMXO*BcKBuQ^FUi8Ar#4jgqXw+5JJiE6JpZ-gb>Q{Awo>< z?-D{uUXKYna=qd`30PNG=j{OWo&&uNN`$gY@zwy=MXvXLLTCYm;$cDh!8N{3c)uZhLU@-#;b-~zP%P`}p7zdx0^9+GF#(GM;x!VAJkuW{MEXTgst+T6 z>P;H{<$#Caf6~WxhIktR1@N9!t;H7t63@O{3G2LbB;4iQc$XGF_HIDnTT6)gy!SAC zm)BgQl`|fYbjk;;+ZFEJyj_cLCPeyy_iFx`gilz!b%Y2%O!#=XchP--$oC{6%FW*a zi26Pz1YURV1VsAr_iOPl5+YuGK%;*)pm+U!-ZufCao*>B`4QlS_AdP;cn$FLU0V3b zC$;dAPiy|qpJRD--ot=VNVn~I*m3~XUt>K5_#|LFVAKm*`6?j8*T1NRe*%~Q|I5EY z9zgqVp*I3@A0rj818^kZ#yyAwob(FTDS(ZDlK^+rqmLNgAcO<(0ALni{$8xf08awW z0i5%yhUXGMh8O-$3(tQ|3wHy`@Yl8Q!Z)ylrA;YJ>sfB+I z$Z$f^aq3%IITi0{<@ny!;wOEGeg+A}ry4wo@pH~%w385vs&5EG5#I;U1lkiKgN4un zdgckY@UP{!R|{q6Y`O89^@Ti2KbNgT<{y=Igpox zmq0!erb1p4Le?E3#KQJ4;Y5rhLdckQLM&`g5@N9(JjZgj%v;8z2cs_b6y!loUsd8+ z<#t0!FUVJ+AQVA4C{y!_p%;m4=r#o&4>aPuvLbhRX_-f0VN+0{7L{Nng!DMcP+Xwq zLtV&Yb{wEk-DT=>sG8IqP~6H@MbJ@H%&cX(>gv+6e2=&atB#9O@~^%s#Uq`X;t|DV zVsXiJC8euNR2)S?XRdZ3(JwxT;FlpLxcYyy?Hu4jZhS!P!NB6|jS z++qx=GG_cF`-JpyqgE7@U0aYpro6PYIIl3bsANoW@%1Z4LD~B$n#w&|ZIL&|Q@#Q&@$4X05vW@cp08jS^Q4_*q^#58p- zHBE);em?uId{tQq`(PGSV`o|E3Rwi!Cp}b3UF*{{J?OsZgM#bFT*n3W^4wx9Qu9|~ z9el0o#@U*Jbk$t|+8|j``I>Gf^W+xgt3~DDu2sePDppEvwXmS9V5H z@{zctw7ffA7k330URJuQ91PyoT3uPy`g~qVfs^NgggZ|kN`lJw<<9I3&l0h4#++Gm z7cUWWW@Rp(d&R7|SvlDYX3brclaW1Zp_s8?!Mp`x)?DX2aYe?0xwGb8@||>ACdOo3 zK3#Ywi?Ycgf3on51%UIU1HySGiLyx|e-a`9;5_MoaGr^xY@*1YhzI~UPdXr+=K@i7 zfyloA5dd(WbU--I1W`6Y!n zY4gA$ue2;RO{5Oj@Bo)_F{2Js?G?-Q^BHOCMHj(qoX${(4O5dYTjeQNmldeFY97{2 zxq0QPTt*~ufuNN%MK$Ct)-z{YFs15ijf+y;sNrJNkWyS+VGS;!2W?UdDsoDHx{S%E zfN!z3$|-VRgj^LP9t_fgIalG|V?xl&PzW$_v%5rs*yEu}BAr2FSiu#pP-WrGZ8*HKjtulFzNCxK%8t ziztM0ixJwR^b`-~qAAEW3eYO1OG!qfo2s3~QFBZ3(cKsmc@#=3h%=`+7>o+?r>HVh z5E}${VowU^!HOouS`gA*lV*y$2U02~ z2BY4y=W-S=n4U9h?!rYGbLQkspEq~Wj3tYHY*wzsr3IzSW}?#kiDqCF$by|2C^sj2 zX2!x9V{@di{NA#AQZ(mZDm(pOE1OLY!tCG3Ju_o&)|?p&a-8!P&&~SYCY^=Sq%fwF z89Cat@V!}kQG*!&P*#mx(28ghN`*laDhK4tNNQ4ZSFM10Hd4Jlw|G^7lmux#s_jK; z+M7`ANHa#&+Yc8A z1xB->$$78KzV^>Eb+LGN(vj*g+6gJgdp2&dHWT);yGen$?5E$Gyr*65pQhF*!>QzK zWp)i8X)p}NIfx^MX)N&_{JS!LKQnz#*6))uXQdC7MW~!X;!E?J{A|0R4^XOr5b?*Eg7$J^)6DwTE0Qz5Lj51yY9m9WYQaoe+|N<5liuX~36Q8I3w zj2jd5JmWs(ud83rG}`lDfAsTonfLcHz4m;^cb+q3n!P_dJQ#=IGK0glGS>x_so@YD z9-RKWcz!p1aNb~?fA~2VhaYZxaQWYd+xLaDZ;W5v^Hp{6Get^8mbp+ZRD42>Y(IUp z|ICUkrI61ZM+=?euE_Rpjw((?#i_vUN?Q2G%J!tf`2)@MV23?SZ@uistjPASkBY)k zg^}&JEoGL;nOe98VdC?7?Va||TBq*Au!b|k-5KcYRA#NovbfxrEc*-Iv8@|*q<#8R zrxW^^omn&cIJ0I|WbNyil)46gr%nnl3>j%DoLsoCBR<~TSet43m?bXuPo4YoEPG+y z@y=;WKkajBU16WolSHU*-brsRK3w5+oah`o!Wr5A=268yxiMs9|HiR1$D-V^m#xXl z?nrjrm~|^+8;P*Aaua6Rk&`<1W`HM%1o82YUP(S^3y8lz)J_Qt0>ieLT_^Q}+Q=R*g$o3^i zBWEg!4JRzhypxS(6`)t`QIV<~u)FSRwHJ!itsiV`74=WU4(R>sW9|TWvlsdu{E;YdsOQw1wDm3fE-$_gO7^( z>ruw{=PN#IJ`?g?jC^&^o1H>Ew7ZW}NgMFzzBALD6A^Q^-G3@mFs`9X8LO;^-opba-_>DZF2SuVUGyJ`ftUHlkOA&VT*NGOVch5OU_g#FVB|Z%& zR#a*jIof}UC(&M6spbBxO~dPG54;XZyqXSXj0k;S!|mfX;+7F^sn2Q4Tq)Y~Dt|I~ zTH48je?1)CK2@2dj92W+NF`NCR#YWHiC5zAMqrd;QSkPH@{Q;aCvl$ibDWVqB>dtN zaZnr(jpAMLCcd=LAYR5<@8|Gd)01MCcv#em9XN}+4d-z;i;ZHv@Z!{51y0zxak{Ti z(x)eq*N&nC=V)+ zU<>#O!b(?v`80o!135zNQChh4h1o-;Z@=_~+jEhE zsYq3nZ^g4!d+vkhKGqqn%kY;^(Hj*nWBMg7JEI^(v4)@|J+4r}9OB*6C(-2Zn<)HR z8kBcF)3L@rR?Wxs{my6Fh(xR3nrQJyCWiT=63zba9{R~wZ~NC zo#{7Rz{m2Z<9TIp?8e@)H~NPFtDyXay;C&%2LXq{z`tioV0{VdF)=;s%9IdcXu*KQ zDE~mT;ykn>0j>Dn5{M~Fn2p@CSqf+{oF$+=%Di@aRIC&IFx+%pRr1b6S@Aqu;CWz0 zhJ3OqfT%snYLeXlPjYSiPA=JR^V-Km{lq`7eN0XWsvN zG>@~*h}QO#U;OZlXl+0A#eZ@}T%&AL?osYlb}BC^dz1#{cgh>eTgto2AOAZe{$DdA zZuUK<{m_w2b>2N(9Y_3m$?m)vr$35zviwI;K4X&P-*e>O7jY&~E0b&0fmK{#i{s~1 z<;@T$VyhBIM7)3hcgj!mXM~Sv{K?=UX~}X``Uk8p--c%X1~l|np{c)ub>>T0Y5p3T z`?FYWJ`ElIap>}oLgRlBTK)Z4cixMY=iOL)ZWSsH=qV#g{A?ew`^n(W;lmrna7X(* z?~t^=$r=vf5?>W-+Xuu$;+H6`PCSnq8pP{jpZEaB{rsX;e2JCsKZIF{RAQ9#upT%c z>ws~}WM!H%LlKFUZpHhD)*)%{$nsQ?jQEkt7^Ik}OhuaM$|cGiWuCH7xk6d0sOjO*DqO@tJAAaVqhn4orE}eDBjOiKEF1m2? z#0lfl$BY`0c7Dpx#36&u8*py_*l639R<`j!;Y{uYts+rbskr6=R(ciJrGQx9*zhGS zJY~2PSEde?vWtwT%1QbY*EeJJ=StV8uFwdqd_rw0U4AS4VYWeC{z$np>fhRMEL#-% z`Il=6VB(IHVY<(2mcD+8)XqbK)49I-YzcUiIYnZ0QbR`{TMT%F{)UdewwR>IE?+-e zY?4{`DYmCuRtIZ{5KC!eF_bAS2VM?UwH&wb@{gnaIe zA@pSHBcIJX-MP{Py}L7={I&BlKCO~|W#vjO+`B?TQoK+;hX~_r+muP;>$1AVAT{ozMpM^d`j2CSB;XevMNoFkF_PsCsm8TK0$-) z;`R7*ZCq1yruWmrS4T=%xsI!r&h&=3DQ_AA?WBFtJb^EaL97CdFP+r+|DbG;&c zJ3D5H#@}YP+e0$jRpA*CUw=dD*!?%8_TPWQ@X?J{r!o{)_CLOS{F;82dehK%mRjmJ zC0sqiQm>4St~kE?>NKTM8GEKxjEb-Kq{i-7o#$-J?l2GO?^6p!+P))Zg?l8{M$Wi9 z&&*Vn;k)V;m&u=6AyN-Fo?DY%Xu-Z*e4TqwMEmeBMg4NngS%_tg|kn`6(~vb+C|j7 z_K*n3{=W0tO_qiem-IikLz(u_al1KUr1H_l&RBocbr(CI%$%Jk{`R_eT!s1Qdb9n) ze6i^3btj|p&e76_TgQCy{_qR4M~D-@@lJI9^>A$a70?U+oqzLXuB+tV1^BD^|Fx2C zMLy>B$x@h?zF$Kl<=-0pc~k4&js)LCr*+xQt(6@a>O^N{tEXbzg)`8KMc%(24rzBR zSa8XLfA$+1lt4af0bRjE_jafa6P*iN-zh>39S*;8;XAo=S|>V5=i(7z`}ZCR-X=6uBJ2~MQtDiYc)5ebo-EUnsvK2jM$Ijt8kz`#K2CE#O4;(<> z&a12&^@aJir%v^~(d%rSd7IOFEZbX=r4F?mH%EjPrWS7TC0AEHrX0sBDrf$<$>JRL zm^kyhO=74mJ;XilmBii%75Tl1V_Z>-H9A7`$ zd6Dx)tX?KI)&Mha__z-~BK%ckjVLVKeXs+qN;=pPZ=2{8!y?+Vzi{tHGQ zQR}03DC4hoijmX4`q?v|cKGT@U6DHNq~{qGwbmYwP|RtA4?3KaAzhTtjD!msul(A3 zJT&dp`+J{N(@r$T-g~eE7gbG(-5TS2J3M>8HC!Bj!#gWHs~)YhMYJFOe5gMqHG12~ z{r$Ep$pf}^_p+KO&g|WUHhcTlh%@^(DX!#QF^_eA)$xziUjZvs8E^AF+ZtO#Eik59 znH1}L;y6A-)1i!c5Ll_h&oqXnef55_ueMdg?%8i+$Q!nenU41BHbf$AvM+kaw$>pv zHM<9+wYE>#)>~Wq)PRaMQMfVl!;d2GRVMYhFS;<^H8a8H`a|mz&Yc|_Me@#>ze%m= zOx)73(dLk{(^8LTM{0!w5;HG#-w{=q8%sdH<=z`o!}n|TmwlU&5aa8+f3c3wPvO6+ z7qw!1iQDrg$ES%CgDS)B=8n`?xGkhW{u`(yh=Gds&xz0^`TPySv~cb3H$v1GY=u{5+5u&9U3X&;j=jywfy1zPjPW zW~}W*``0gbWAt|T#Qo7b$O+E{aYFOa27jNOS2;m_m6)X!R%ebQMw8QAgPiYmh=!=+ ziW2P_?(egmX(AAB{V4Lj;m$rgmC5fTKF!tN$ut98)_Yk(G-jNHdS!C7%ci#yxT9Yk zz^P}?uJaFaj=;=1D%H_`e&u=FhSY$c(elAH%A~{CJQ`PosWshhTr6Myej=LtV9+h|`SqZC`!XjmZ!9@~21e za&Xf5TcdX?sSPJ~RcURG`^Tu%7+-w-2+-#*USbK_Y*!`^t|rCqs`~T@tQtwyMd#Nj z_Vazic8T^2J4NbEC!3zy(EMX-26r{k$}_xCg-jJ{+*XtChBQa}xXP->QY#clfE!X3 z$p2|4pMPp!M_j{`&V&x7pGex$k(zW(s#4#-!_<#sGBxH&=heXKJui46N>#5OwxuK1 zWNHfEZgD~rxiIcdF~W4`K=!0}W}zj$zfYuwS5G^+>8YJ(HrjX==3u>&)_J<>sk-AL z$Mn9ER)2c!Q|k7-G|~QrSDDlwSUum^8q@+pYH%rJ3tXQEzuCo2xYbceIzvQp&2j>-+l1TJ-+a9MMvq-gG+o zsh;ERycRKRfd^3l$YK1t9-O{$Z&y)|YAiI`ovYEi~*8i~hD>3=47M5Q?9$>al~CkoNEp>r(UA zA%7b<-MR?n+Y6imTJ6;%UfrL3S87Gr{hZHkd2(k*swo6>zT$(HdRMsL?pD?MZ`Y`h_@PopO-XFq4gbJJoD4oNb9tsHMS z;Wb5PMm8?=H~j52rRwqdOGECR?-1?bru~k$!e=&u_iZ9AV}E~)toe=P!?^pEv^{^G z27VrCN`ZcYIXG-f$EcWy11Y<1dn_FD{(>VSb;u{v)D#~rCL$u@xDpBd;mB`~hxYs1 z!F4l^f2?76!QXFt43ovth>#fH5_ih3d)>@E{)m$Lk@L`>Q)-@Y+FfS#(+-KDsm~pKk7W2w+o2&rgWo8W)XvkNKOz0D=pCJ3wf;61awpa| zCTN7D^^#GEL2;{k#jOa6`{9wL)R1nr--xuEPCoHO9P~1r=)1QnbK1#=pHT0O-jO%l z(m44y+Dc=iO-XyW9?ve$Vz~dUUb*0zAo={mHFbXV``>oUF~^DM%AR~|`OziCJT~lc z&L?xfp?sYG#E=@_uP6b=d^`w$Lou#Js;iM9SAR^Y3T=52V{STm;S(C>j`l^B;WdzC z*MgTW>Xsj@D_DMCCGk_9Fz`M7cj9~I@h4i1vHxhJHuC4lnJ*L4W2fW~ zOZ&ISYIGf^X&At;dJd#Oo5#SE*tA;hFBaXJ%#Zx#z~p@6J8{ zkhnhdcu7#N+;n>0Gd=sv+0~M1CucunNU^_pDGNI>kKc4^8bWbnVEf(2CUaDbxA?Ah zhTY%!RmDr)yrA>V{n3SXsUyZiV}i~xDc&^*TEs@#mZCrR@n7}zhT{=wBMz3Pn)go{ zJ?i6UaY8lL&%ZXm*3M;T{ttWa0v|Y`|aQi+OftX6kJfT*CG0Ge=Jv9(~W0^U%G2`yFHO5cruFEyaMSwoTyl?fn5 z+Pc5*Gc!w;MXl}oem?)t@ALZ;&d#}?=RD^*=RD7M&dkI}VnfoicG<*L<)3Jkcbnv@ zQ|?{AMxNBh@;C6|ihb&wt4!0kj?w(+5!(m7gfk90 zY&VWj#>OU@**EY9&%Tw_8g7Rp|Ae%_RvOM zKKBWiuWwChcjFO1d@2}^w6XDrVtjva&r9T|M%U zh#jZGvZ=NtBZ~KfFXjCOxiX_H)flOfhv#PAd77JrJ5F5K-LE_Q!gf`n<5~x7G#iJ2 zp2xpzL>$TU$S*=)t=%(VBkjv6(1#yCw|YBqAjM&+AUh}YRn4B13QDalF(@>KQ(y;Z z@@+z{`}eSB`}25DBHS{#Egr(Ic*#`3*-Q?xVxKXl)n&9m`mQ)k_CjTo9~yZgS)aCT z6+n^VxbZ2?`@3~kN@;!DQ2{I5n)h1vVpAz+#rfqsOfvi8FeGE=m(HbuiiwyRR!2j# zz7%D$Kb}jqedOI%!HvIjG1W#j-JG^?VP%uJxT&ksZj#^MD=s$4U;ND=c@{zjz4p^Q zV!wdJ>~Or{owUneaiEj8ii@odHUBGnvlnw#eU?ey4@=)sq0g_yo}th8J6=Det2$M{ zHU*Y@?CNDRYtdW0 zSP$`ZUB0k#GM7I}%Qq4Ec0u1f2HTuXbS-q<^ZdJ&lhrzsTdNm)`|?`mlvvwsADwMhP!e272cS5&+qQFytcRitxTt0jcWCF^VtX`xbB%~C1+6_lrTGCxyILfXf05ME&F2VBDm~WLiAsMzRr-te#vs+|x+nHE zq9xUb`je>s_j);g`8`W|=^6a{dUQHgnAU8?onqup5}H(c1z4bL;zGD^Jp#@?d;3y+ zu>9iR>vi6q)68X6-=VA6)@$Pq?KbCMFN^vYdT#=?3(i-c(f1@+Tnn8|!W(F{eYcuF zd-)wnoW6o;C<$gfK$zlRcbkhQLhpXQ_a=@?F7LIVWJxd6Y1E)c>_S_*o_#p3PyD04 zVB{zTwSL6(6}GDf_UzM_1ZUkpv6pcuJ-Tx8lye&_)r@9lD-s9ak}% ztxd09XlQ-D*QSptRO_&msx>>`K&gRL{~yma4xDnXG40Q(s3Glvo_M{Q)0|DUJpXt7 zKrgJ~mwRpT`koTqQrV!NX1_D2OCZYMgDmkl86K0U)!q}&g?r^}BE;3Tb3poFDN*7j z@WV7y=3=Yv6?PTh%jK1|(USwAuVTNM53HxyVa^}&(ZlzWO*sBuZv6N=KjJ&rLEjnMhkeS6v9fW%yXEEin6>$iiD_A0YsH_8My#2MiiK;%eEqa%i0^PW zht+4`U;Pz0T0R;xn;W3Txtjaq9E;W6*0B(JyHc=fm=d-Rx=&>H#F4`%pm8T!51|L?v zlxFP5J)q?7T*pV9--*#zs2Dx}zk|_N*xx2}9()@+hji9#;Po~_=#NGr^kae04+cU% z3J5))J-d(B5PDPgpXZ|-5KqGB`7xFX6{F|>6O0~QxEP~1$@7D(`FD696(nXdPZ<5( zkQN=J#fMhU{YX7ctf`Z!5$9azbd0Kl%ukBN-b2DLVIu;gnk<}e+{Abz6lagbfY9q<`I_PAj=%t1Aya1HjC75SuaBuStk3bq;l2Q2#sBr>d?}0=$7_f&Ck}CE zZx?_XzxoZ!d%(+yu##g3xZlR=$IcvEdAx76lk{Z5x{s_h$#VBmJpD5o>(wJ6Bed4x z-eRl+CtzuxX|*)s`LgkrI^nr}#sX;7S3s{xJ7Qf-IgWJ?0a}kQYvjGEW8krWW;k0~lEO6S7I$?|V*lG|zSI@g;y)6P&RXG^({FX$r>= zz;CIb(Ma~a`5eCU)JUl`TIz`Or(+xxJ(({yISNMsR~}`T3tdCkr#FC`z?IX0kfgaq zaCY)H0g$=Ow7~`n~B5DR0B>%1v{pyZiZLf{fn|{@<_h|DLWTzJB?|1#zZ&Q%}9zp3s=MF}6arYE3|!dBv)wImb*5TrZu6LPY|uQX4Uq^3B^8^b%l>1W}x)suFLWhTR-h%m1gPIQKFainiAdx0^NWU zg=7QN;yTc0#t8%ABToGNnGZQ&%LZ<2N*^a~En&c*OM(WS2_dFAM#G)J$sf#%YsDQ;HY9J@(M+r1Jd=SobEcAH?<^ll@C`U?v8yL( zrU~ab%38f9ePaq%tv~KAvjacppcNah0sib_dw|>0KKP*%a>|mna(S_Bll%BW6wq3Nx z(5}Psp7k~YnVMYc=`k^iIjkVfPjz0r;v{3mnSvEZK>0CWlt!#L*G8>4S73&O$C^-z z{*k_TKg^1lN$Q=y$n|i!vZD~c2}7xPnTY~WZ6{D|pv59#A-CDQNSImoqj*V$IG7tl0(K!qjZ;<;=hQ9QQ0C^JYkV=&SeMO=wI}cKYw5J@nO! z@6y;1ZK*FqrBpC?qHY_gW7zFOUnT7AUt{g3i-1f=_AeXzMR+&({&hsyk@FJmNKgJf zWf##I`fAC$>5zzWFG=ByPln;wfK`Fz{X&WPIq>ORtk|^jQrhRODy4RLiHmQH+>0M^ z;pBjm7g-PP7PyV_NZ>CAy~7)g(#&8jv&(we3(Qn3t-^EaSRMYxHP)6m+vt0uhI(Bb@CBS|GD?Px>DYiKNan% z$o6-&H&JO}-mBPWwHdwIrBYmWzJG7K!43I_9)=m zhSn=bvD53FZNRGs`#w?dZMBz2zBWo34Z?FOvh8mTjT&;4b_?@KmiMT)6**HBd|OcP zZD;`szMbY~`NP&92fp2QY>ZExgH>slc?nk}-Go+(;fiizl+#XHBW5{a+L62%7?Jk< zbD+5_MZ>g-mSNaE!*yP@6KVt{5Jf19*URnBoXU{^{bVcE$^G}j5u?!x+} z>~YBZ!P`SLEZfvNxRG*jj-#*!XkB@>bMOUJn^Hdf!6S0{3F6|&o)IVd=lJ92#sSTa z?0w-0!pXS?Cy?@D$*|cd5ALCMLSNnbPJg-Y=S%E~@Z1=w;5Bq2AsKj$Zfj{X&5H#+ zw8|!K)NNJwUUdWrHCDCd@6*Dd$wODpXjZkr@T{EtHLZE?{`N!c+GO>au{REY!#yuC z#*LFHz+;T^&Y+3TXDw9DXWhw07-dh8lSW`|PHBEes@#!r?vP@MI@s68d8BxGbZ*2<7RNmSnIM9B*`fKO#C! zLqr{+P+qZPQ#0YB=KPdzMMX>ctn6Z3wCxAsqDJ{8pbY)EC|}H9f{TXZR9uuV{(fAv zj~UYYanV7*MUAqfyB4@;GgFaK3il(UMp^D6WYj31?IOLd9~m{uC%d+r2pbh0-p<1j z|H3$t3FkXU{$h)ymtHJOkY1dkp79*eTV)yq)Rl1Rhf9w^Fa51@mJ>G9*Uvm@7OnLo z>a*NpJx)?);Z#)8Z~L~31yy8QJ-N!%YXi1qIz1XEHBG0-U}Z4%j$;GdgSR(If(@rZ zPmdMj-KH-GR3^Hg_Ux9#uTQ-MooNKl`-psS?XJ?Ps0K9x1(TK}^s4F~ds- z_QmXcx7V1+PFJm3lJ;3woXjVtUo*A*<_+)As($(>t77rI6n7<7q&OybHbG+s7RAMZ zTGWkmf0q2ZLGBTCO#W-rQaKa2tuC(oe6=h974fbhRK&vmV)%<8jicZGKM~;^B`%N4 zZm`QbHppK98+3~kc2OK>9bT8?rjtqR%ns)U%#fKfAD0W8;l(D(^|qF4L|v}?4cz}I zL%T6byQ9(WI;GvaxOzjqVGN_+Z~r^?J!wrS{Rg-|Dz)_2bVLdNRrx)}K>1lud2wK= zT*9c=m!tJLE5_kHN69$LD9AqU2E!HjI@D2igW=o$x{$8^?f%vrGIxXSLU~;{?+pf> zPR$>KxZz6tZkF5df)8hN^RM;hG!W3~0`~e)Ke|)mJhTOWz=WHD2lAyEZ*Oi`jbKLdWbs1}gtQ2XPbVAxM(Y*YHKUc8_ z=T$Q+^)n4vZK~y1a|})#ZSGqmOfkkDo__A$(czw7Q)54UyT#<2c5ZlAb^K zOO-XxE=k(-69Ya9CqL+7g-l-N%H4PnDe<68IICaEH$x&F*>00ByV0VAC!S>sv+D*! z7O`3l6J?VwyPnGu&T>n|dL~bB4QR-8b7Eng)jchc-N?yKy~Us1NGqEKd928v>CQ&Z zkuJ_^trOn<02V=}J9h(gtpXq(%W=lUMwApR=SDwS@>Q!v%0WH(KEA=0li>yxs4>R7 zrf<7lmlN}7{<<}NI~+^pJNdcitIr8pmXX(;TlY%8Tz}RTBgb0sPx-MNBCllt%_}=r z26I+&Lsor;+j3TznY7O2P3Sf1!Ks8#`*_D~eWqIm`u-dffA}e$_5RKIR1E9=Sv9`4 z+M8MPl%6|0P2YJKy}HE^oOtJ_L#D;xxu3htN8ZHTF&_I#3HA*A#Iw%(Uz?a+$9!X@ z#4VPy^*Nc(rZlHX^MX7#-b1^vCjPJ z?_5%=sU(BzadDaTzv`LD-Mj$L@TOImSkHRKdISLu*0x~ZXF?v`uWlvMsV^nvn~}N@sacfzQzg}eR6A04={{&_Ii;2r<=R{^r!!AfG)yo|ucMNwoUKXtGgm&TWv8r4(P6fXH zHy6vd%Y81L^iMy@)+Sf(29k2Dp?`1mXJ8bk#h8xVyZbea-lxqpZrOEz_|o|h%f_f) z&ayK*lQzAMC)zTG)9E<4MGcVI`dsx6zgpvzg_K?!?2{8*k$mG-t~uefC&xQHkHLB9XVhy9zK9Gf3?Q=Bb=D;7R4AJT`v=L%C!))VLO`_ z)B|-}>9m}6Vuz}B9x1~bjJ3~T#d|BeGr1~X4(@`+$ar6(nC9LuC2gW!Q6H!Ju2p76 z;qJ2LbV-kO?#YfhCvBKN`tv}q@* zcNJFk09*77vbO8tpx!!s6m^B`W=kazAFy*(s!Wj@f*p>5TZhJJz^iC4C`eIv)Pc>Hm+@I8dqFD=}fEl`s&iy9uD#px^hz5MBfbQI$vU*{%9<4iy6{LpQ(p& zv0H4TpO`eEITrq>ORR@PIZCUk(z?pmAb~g&l-amL(KnLomPxS{gJ6dp?-uTg zk#7vr-KQQWjq1d-!P{SysJ@odA2{)>Rv|ZDv8eLT9vi&Fo@y+u!rJ_fGr4h413#K8 zK6>ouzT@4Q$=fESe!>-%v=Kk?REo1K4bKe3qI{9A2<3~4+o;bt;fGjm0_BoFS?Aj! z-QtTYN;$?A&1n;g3J;lvt|*z%oHx`}l6xozT&zto^yp&gJ!izhNeTzom87;_gViQW z8uVPRB15-z@0A8^{fi<+h8{k4Btcyr%s zW;f!!qGHmfLGD*ioorO+5xvqE3Q_*f<0LzcxVQ5J+2akc$_08(uowu`NIWNK zy6~J}j6A=qP?@b%E^zEmu>8O4ovbnPJ>8eS%QZ$W?qba^J%n#+Y5yYe0^2WK(o1E7 zq?e}RY0#GmF~E6}cw+@t--Mr+KFx|NUd7pxJQfeEfK~ysc(8A^?_l3AeL3!feYHN= z<0BL53o2LvC#RyU4bO=3zzE{p{OQX@J$|ncb%-fuCU?qq)EXxM+2v1P2`r(`!+)8u zOvMaw#EhLu3Eh)uom`z2_ZpmOCvt z4ZXbSvo5pDTQVk0o3&sgf9<~02It@gJMO3rcH`;8IJAeo{#Ck5#Nd*oFV)GwY~5jW}${3 zJmVaG5q|Il@Pp4T!Viur_<^q1tKkPD2tPnA&i9C;`0!sl$pzsDbS3?@tdr2DwaehILTZ9gHzpJu9K@JRd`94>bHMfD}MgS65G)reK)R2SRI&p1^^q3#0 zdmfWNbsg-h@)2HeusiJwLJLL!Em$uM+7gWy*j8SO7YtuXc)@Vs1(_ONkg4GXIW6`x z&pHjV0IUE{KFD_hE3krUmaqo8!*B&Hp#IZXsAvJ=WcD82`&99Q4+t+Xt9Zfh4Zkr`YDAl zgB&HrS)S`Q^qNf6D<(hHq~x-t_?1$QWWNcxfz>^)k5_Pm?L7u|P2S9)C&moyz#uOH za*z%Vr7cfWxM=XD?BMX^a$1H-G6FA{*Jl+8wck~e*3gHeS~0$4=s~%6{65{<0DIVw zw{@8@tucS&5e+l=sbSC-=udKoldP!|&JhY`z~tq7bHk_s?6q8=2DxF>04FMP>xQ6r z$s2GdVOVlKAqTlY4xBMr^{ltSX=<}hT zK{p=$x6zGqm-Pa=F-|IU+lJ5ZhueL}Nf^gjv<`l_jE&X{7{^;t7{_G_ilN(JRWOb> zqcDyPB~_19vZLRK!Z;EYj6=-6J;VAOtZ5^Q{pedTj=%ekgDWD@$Hw)(-5)S8X zyj?>Ja@Y1_1tWkJ=-Vv73RI-PN=U(yEFcA03Q}MNQeY*dV9D9eTm>nxMj!=h?%YN- zKfj?Y#u9-Pl;Q;N!R|tX5ePw{h7=UK3&kS0p+UHQWGkTrh2Db-Mv$vv1XeD~$BLLd z9A`8Q@)tqk7WIZ!kIeR5X%S+pgTKUG2bNGIh3)PkRhdzCcb0o#gK5{GErsn_8w#Oa z*xcA-U~2+QJnS)DKZC?wPV^NP^YPvd2h6CB7iJZSclh%ZK&q^vPbXn7@iN z>bP`Wp8QFI&%9;uW-c%JlO2*}lWq82zQLQJOISY#Y?LtH+s``3a9VyVzpHwxW$Ji?=39aj>&>d6W9GY*)2Cl~(M%8NMQE(B|>! z+!+2EybEBx{<2&tcx(`z>%O|s=VJOi<59d9!$&n!+zu(nC%$Caqyrvh8?zqgnm%?K z;NOk6P_#PPh#k@xsbv`UzpJ_8Dfs=mbkMi%rRp=Y!TE8{KAc{$CcoBGlQ}FSxmar05pIk!hyNZ2X#`)b}OW! z56_)%Z{7pzsO9v7u9QaINc4Ib;jh=F@}=ChY!t~%;Mn`UEN+G*!McP@pdOkn?TIl^OM9iqfx2X9zmNFxQ_D)J&4f%mzncD2pD62%;!C=oRXRh@s#x2Y ztAV$yG4ntR)&OtWgJ;c0yqeLOym73yEA}H}p|1w*WzDxe!WbcBp|2k3!`-b0Rz$64 zt-IoIP02X$85$f*c8_a|>3ND-al&n!^MHiYsIlOoamXtN`xbzEcX#U~8=kl_$n(KH zE2NdAE4>X*&W_P3{2D4vL@$?kb5aeZ+;yyIaGf7-v-to#9JFPCv`@MV?^rM~vWb)N z&YQuTZ-*3M#&!?7Yc6{KLg|)a;4fApLI+syF6ZXHb!_l@yD{TPL4%mf<)yZjq=6zV zdt)V%i4D%dTXlKAC};9|CwCReEXW4D!w{>PLFR;Pn)GNeRUOhs|kGQNe+3>QUxlXwG7}G)Y1A#O*_TIXH9JAaOm_ zEVifGNC*&(=@ey5*Ogp@)2ol4{p^(o@Z<}Nn<~Zm2vNa_A=vg=Kur>dP_2VEWE|#j zCk}aVoY(5rk1Ve=Dj|1es@GeY>Eo^);L~3l=ToKF;HylVg-~?%}HjG!^&@>;&i|r!hg_k#-%E+XeKIt?-a+EsL4+DSM z`9{r7)AUD6b1RA}^y^G7>(;S#y!^{k`o;p~i$^@Zc*M((o*KD`F_-p!I(j$VVVuA) ztTp`RV4talTWgRvbzS^^Iu7rr<7LMw_AkoServhXQwQ$+?(g*&+OpPf|OoFY{=xl7``Jf5z6s>JjWk9U<=q)I%E$}8{xN9$RuC+SdXn^LN> ziOb*H45}up^u}CF@8p41tPoEQBb^ZFwB{sWuMF!GJv)oHS5$7x5~Rw>hYt&HDDPX& zi*M!f$7y{Zb5UIaT9Vsvcf>nTnzc*U!^J->4V2QVa?ndWL7VV(>z&dIJ-T?j1qYP3 zUHH0B48A(#1jb6=9b%{7bXp5jlrQ}7a`1}}eOaQ=L^Lw_1Lt>BC^VG>88^kqK249m>TA364_>!cYs-@a(c13!QCv6b%nbA@FT@U@Tu z=CgQ4_xslky0>dQE~T*C(px9&;x6BTwS)K37;VF&xBKXe~GICQPHdenT|>QNG2J>t9uW%V$AWWhdOfR$q$-q2S$@6xq{-pc;% zE1mjPFrql^xc;?ztk(24fK2ZTnCaY@S1`w6>BT=d$@meMZ}8GQ+kz)d7D~N1?Lj$p zUK73$ru5Ak_9<7q>7CAea)krh7U3NR=C-N3o+?TH$eRKTXu)q^<3>LPu8i?<`AKc8 z2+#Nz2(7G0S%F0nEAY;y@)VYRWI_6C>e?$}1=-m|bY>i9g?6t{#q#XlTQkbj?#-B* z@f&=(jOfFil40qf&m!S{O(MM=VP})kSDvyW)#&r$cTI^N@gl}yXVW&^vne0yDM>oC zj?PeUe2;eH%9|VY z)C;aPMZ$c&^igLmmwLA6F;4)UIeksui)GuTM&H2VwW25>tMAJ?I zy1{)t1ArUhOm;;>NrIP7H5%B#ZoU3sH^+1$b60@3ub;FBeUb|(|a`0NzU8dB}OEb@XQ~ANUP-S&V(ors7Q02cc-g_K6Qe8>H zUuUJILe}mC2TD)6(&Ad_t4ASQJZ4esCqh#=?TV9xh978#BtQ}=hkzM30Pj`c@)NZ= z73ZZlD)6$X<=xq&vl&|}GSm~ytmwR#y9_9-R-Po8O5^(t%8WFkG%xEew4>O$wj$7> zzfeYKI(%4u}yc@E;yPS9HknN6L>@*XM8 zs2GcPCnJ6=Nvx^WsJtgw2kp#+HwE)@a!{WMEOjO;`g`lJROB1CRCY|_dtTsQF5wUH z@@k@LY@n0nLN4Y(nlFbmmvfpl-$;FU@NlQ&W>tRwXL2=C;mE~uwZHqse8yxrfq3#D zt<~VlgPSJRKv$%7i1-qrD>|N~*c^Q6(>4q{&+{c;x5h)4HF|x|YW-UHt4k~&>EK^pG5}hw-hnjc#_EZ82`ifIYJAIsRv|ZL zczh2J{m}doZ{<*ea?+PQu%2`)t5??Q5*E0@xbedSx|r|*HW_}P8&pp+XQ z-^<7MwI=zV*yS!6>aE6_uEQHgT%gYlKZC!n#CQn%ljBY0+asKVzF34(dX<_@pvKu$ zNv$uNK@zp>S0&tE74839+OBM3xo`B$d0gi;pdN&XljXFBtDI!R$L^ZlH=JUwcn!SG zh~VJL2oA3N7aSzpAi+C!=u;(e#DD%Zg?BU?OF?gG-*ASuFXCV&=05uH5HFQcFCN;i z^kQd|I+Gy*;k^|SK>H}WU4IDk9=+JvRQMXpeX1n&quG7AECu??Z$Npvw5H?>@4Av4 zFYS}Mo)yefl8l{lLNgCPs8dSQzDT85UZIQb_!b~gP66pPAu2fK*kTn1W@ z`}_7voYudvS7O#(vR49k`Odu(`6Kqq1zOJjy>dFxpy<6)k-5jTIrG?AWzIO8p2Ac2 z^LIT7dzqJOx=31{U>jLem(#A7KgO)0Z=r(I-&?LCs$7dw?z`XH&dX)p7w^sV-gZ5n z4*z^E(2=ssx(Dp^_*sj)gRunxBV$uJn!GmvAq4UA94M=$ zCnKFPPap-?c(i3?=5|r=1QJCb(G{?;YM)6kx4UwkexBMy+D~!T)P#Br>}G!=>ejRT zcFy@kJM(

13&a1oR_KT+>eZ2O$3yG5BOorN^H}S5_YQ1=7u&$8Seo9A`{T6E=5n z%<1EqvmN0e4oS2-nY~?1^7w(B2Mlahz|65HdGVQc&QjmW$<@9TupY1kn~yX4M{%`- zQEoT`oA1Y={Q|v^*KQu}@joDjPH@7)4`aC<(2cC!fLH)a)`hxaSK!eWxM7zi0 z@}~%EIz9-6f{B&pcAc;)kcjq!C>QRNQ2-CAFW^C^LG9B*q%(GN08e`bI5!)Z#x{36 zfqW+7ZzUYs;zjTnju$&;*)fA*uac@Hn#1xCIpa24i{v?YxdO;?)7$)b@eb zE%j5`7shz}S>S!5sB70Jx%@ht%a_UK1a^QX&Q$H^Fs7X7RL0kUe%*`uOc=}OQ5PpL z%mH(|Nw8BMj4619{FJW}JZ0>rzyRV=rOKmK;1TX$;)y>G<^%CYXbBIp`4SJT>o`^9>B>sag z#vltZPCvxP7ddV#z(Oua1dR9h2H0I3^1$>X_^Xe-5LbAIjG}t=unB?jXw1 z+%sXGbIcMDlzypeM*?`6;P3>7Q=iK{{&=nLSVL1|HNQ224~fV>5AjsjQGsZ~8Yt9t zh>!uDFw2+bou-qYX)^sB?J_n`ovY<)pI1ul?Yi@WC^lNb6r^jFcu_N#acb%BI>zA<3k$u6CSSczEEfg+?=JM z&~r;dp=LPm4QTz|P^cEyw{T6F3EgtqW!))Q`%|#?o17av^bV}=m6&gK^u>)U=69U3 zF3k;G4?gM2v94qOrD7g4b|cn&@R4~Si=Yv7eu%z+|3RLu3{YMJ)@tH4hrBeGs4VJx zlFEFI^$#)?%tTxatqWJRTnL#QtBuK@f?iW%rf88Z_bI(VT0`x*;cw0Oh~!kVEsfyc3Elh#=5k$xUyNPHH` z*K|Cp$iOo`Gx!_XF0C~(gLm_#FI!*${Nf{$M#-EZVF65SO-~7WODFIx<@Fa(zZ-g0CFW4MI)}_2(xEm$ zhl=a?8>}SKT`p6`Xl~#NZC-syW8|Q9_NKr#_*0zxdBOcT>1)(Z4)F+W zG0&QgH1wBfgdUMPfK6@}5|{ctI+U^3_`AVZ55||~3eA%PI+dUMvB!niGrKaza{~ux zjM=IH^^4YkB}8XbzxGjooF4zLwSIvg8Xx8d9PmHHLMMopWb`4o%;SHk{OpMoC-hYG zg9iTETS$0bVgkc zTVq(E(NU(*O8W^5_C1fjp${BO4$xR$g|tVseBdp{YO~TFjnxNQys~CFJpNHA|2oQ3 zxgz*!k2DF83xN$JIi-HM5N38&X9(+d#NY6qQA2YQGV8>C7v^ziJoa7CMD+~@4X0;r% zDoi`c7|s0;wK;bHb1qE(TPXV?(Tws$=l7L=R4ZQO{Qryc9p7LL3`aIjc1Ms9^ZgG;kG`9 z9Gn2ZOOfv-jI|y4?BFNuccjD6ewG9t+K_%X(ut9c&eKw<=Yg6q}SzBjFesaLajT%2_?MuIHjj|t1H?y8H2i8IKIiA&Y z7{CX}tN(K36@@kJTRRNpG+ygFax|GYMacX= z<5*86);enEiN~k4r?}U&i=H*@VI46U@*abEPHmad{(D+`SeUT>%nJmtCeqkp&YII~ zDor$Jbw!>)thRrne5!~!VFaCIcjQ2?1$ytp{3HE)!`WFx<1;iD{Y>x$Xud5)A4%_` zevC%>qH-!TFK`Uw_P!QRdK%R`4)xhlZy9(*yqYf7bi|;Ja-jzL5%?7v<)<}yk5-Ok z&V+TQ5ZBkVxc5;n*`2fo*J^tB@4%ZD$VfPTF5(R~TI-(iy^FNZkX9zvw3`rTWc6J} z>?2g3@)v1!>_R?ji{uSDL6GRp1HDC{*NXgMd(H$tr`TZkBE1W;Mr~|E*>J!2fzO-K zUM%W*0(E)Iw_;r82WV`Eq8~1;A2e^$6V|ksK|Y8UTC;3;4ps-5_yx+-+M~o{jf~XU zSD`Juu_Ne=NRM2f9!EOV>mbS2-CA2vAIvnrs6L{bXnqL!pymZhkB^LBig@TqLH8ua z(ynLhI_QA^D8;M26`V$@T{o*>)OKV5u6n=4Or#0nn&Sj-h8=*yt~ zinM1dx=L!dMi2I7vw(VNj_%iFybtTtD;Qfl;ziV-gZf$dnhwl|Zpue;O=a9#{*%am z1o>lu#bB#3Hz1zUNq#zz2Hpe@YVDC;`6tbf0Wg<}D!+*LcZ1J%Elj+>1L0;ZZyw4; z=Dh`JrD#JyziF)A!MNnix^!&}>jH0qj>DK^G*&dnfJvBXzo-6$X?+KIPN5&$qvBsj z{Erd8Co29G#6PFe_aWx<#~RP5P6pX51)pkBCS3nE9 z>UtsAVb4d{V)ZD`qpx#O{w6p(=9CSt8FQ)YI>uhY{cg+fQ*m*#{* zuK*Cb4(am{ei7GrJHAB(*{Ga;S+}rrP5a1Cv5!H{YCxAVzo3%@vCa^`Fh|UlppDw5 z`9R~k6toP!Z<;bEp2f5ISnm|Sl8!aX7z*{+QP)RF>>~Tdrqxq~e$xI+b%p!*BdvdF z=vyY7_gcmVU>zw1?SGpa3dNv5f0~JP16VZCO!~_*2k5n{`bms~){AGc=Ef~2Hj!Z(1vu969M$K zT*KIk**}~nEJ6xR@ewq+e*jI!e?e26M$@C9rwH!n;Fo~0HGxmWpPxmvVG`G&FS7%% z!vQywUW_>~y{Rh+@{oi6Qol(C(jxkNCKh9?mFtD93#*&DK7`@k0$B>*8$ezD8uSl( z0Bl3p4}{xmV@24_goWx@kv`c!R%<%)?%&#Bti+nf{+o1o(s_Uf{I@b@NuZHz0HTTZ zYNCtB`Vt%O9KG0Zm0@*`sASZm#p<^E?bphMNn8VgXf2Xqj9d^+Z z*u(Gl@6pPdfXlmBO_zz)w0EN3a$HIOA7f>K&k!d0r*#U?F7$BaEtnVUI$%@z34VTGT1g`bBN9uQAx z?D8}kXI;Xt{oohT2Oes9J-`v#;`{j!)F!U*(AD^wSNrhgP<4u#g?L`#b1_k34u`J%nM-~`gb|{R|!7mfS+Oe$cBE-g?&WxM~A*rKO=3W2~l!;NxvPc{gMXx z4fi`aLUuE>e)nnp{sQ6e@AtY3{U-Sc_d8qZx0*(L7HotwsCz%g2;=w=@LF2$)^)%J zZqEU4`paW2fQ(bVaS`#P{}C+-THUlC6HmkCNOqa%g6yut8tw~3wh6fjK5#d9ymV$j zu`45N^RRt)2z$oM<6&z}W9*vgp->h4pGB;r|JyS5d$dREL|Dew{!3n~a+G|L99=A9 zDG_Nh+YcH1ZW;S<3S+HPvF{c$wjBF$SjMQIAKD>fmB8ejkTb}bGIt%2H*Nl^a;45+ znolAJ{1W3FV^il8t%>U*=2NZK*D6h}=3mm+{yDDBr{_9i1nB(ej}dYcmMicJ^GQuJ zpgppS((G!Uamt(nHbQd>m}YoRg~Okh0dtrV3W--Twheoc4f_D?MOEeMd?NdhQ*eUv z02;x1e?*Zt*m7pljlz4-+=zb7*7{Yf^(*U=e*FP!*B_L6fSoDwE()s5+RB=`EJQQt z8UVV=0OZ{cy&|mtG&n#Pc+c5sZz9@0j@XMnR_dg^hoFgp(}V8@jqkibdo$VlvjX?VB=B{l=)y3A;w`UoN5dQhwqk#L5~l8oXSlt)n)qsXHacnkI)r zed8D#O*)!(FM#BGr~TiPZ^hOCkDZ{4WSsaMX}fwgx=BuG&-YwH_qWS;ScYv8GQ4bX z0(e~>2xVfe0xpUD`2tSQqfZ9($&Nk+Xn)XjEilNI87q^!)3;Qdyu4->RLEWM-; z-!3aON5is0a#1`9cZ8)-=uzA-34-tcn>7A3ibm-N(pap~SUFwQDc1oP*gOg67sfF5 zGtd~8-T%p2FjJ!~ycT3$LffUXTNEL?B(rC)XN&_~6F^rZ_Mra_y7Hsw`WoSj*MLju znh>={EdX7MOL6}OK-oyp6|L9lBlJ2Rex?2>y{;{)UA?d$^LGD5c@MyL9Ex@XVPZS2 zIRn}Kr1LE&yD;1)&0n49YPY~%02cKmZ+oU)XIFhzDr}Dm$75}TP5LCqIenzFUtD%;$LlE<+nnQ+IkvTvY)_krxr_Nvc4waL z;(ML%;9ee&`Tr<*_c+`nJ8&!9AbAM$OM?GFxQqB=BfiX0?lWW0Zr6`q*Qu9l6u!(w z-yHveBfhl2QTYj?v!5pse(FE^?ly_e_25}Oc-BCB&VNVsTV2<;c1}RQCq(pnTvWdw z)%txc`aBWN9?{l}>p~$OJop9XNeTRqz-6+10WRKt;l3NmOL*>(ywEJe0%3bU9`>=5Q*}MMFIdHq z4g|XyxF6*+!B*tqUPJw`OZ@R{b2sgG6gE|sDyMea@`-1LB_jefflXwyL@nDyc zE>HX=`{Hhmr$kr0jW7w)Gv^R4VWzVNgrgERLU`Qm$V<2-W0QUON)&WqGtm(qfAt*ZI?&f|OLD9wKsbt_+>SFRNYk(s!c_=Q znafZY^t=M1AN&jBDI^O-S2UhNSPFC_(^a4?5=(&%tds|iVm=MNE-PTHPX`?U{r&>$ zO&G^26KE`FDfcg@MEFVO%7m5s5*{~N z8~Dol4rcQNe$CAR)&~2^S>N$%KV;58um(N#S0Hgma0gv(kt4^hSTpzFj!J#mrRi z%KK68LSy^N%2j?{k?qi9?r$^r*V{Tz8Yq){Hw8{C61<)!evvd&J< zk=>qr`%?d}W|K@k>nEF;pI8lgYTCJp@$C(^eVt22J3EM`8pmpXl@)$rrEk8)*|Fe$ zXNO~!vtyCp*%8SHvNawXg#80@NNtjCzd{Ss`5ih_rRwtk`s|JeYcs8*cR)_=g3HOn ziE6lOAusE|i~-nB?!ZCI*~QRJDecPsxkc*~wjMKYgB*cZP+iTY7WhS)pU`XA0)IAR zSM3$OSpQ}d?qfDVw_-k|+ku5cU&6cz#@hwhyU1^^@!8n&E)Hp&IJ;d$I@#dK&!@klLa)6BzH~ zM4Tl+e=#S!NMAn#JaruA4e23~^QM4!f%IRD@&rVri&5!WNG}9mYQT@bf>$kSvHgJ}@7L%aV0K^Jci~)NY4)>7RemF1OF}N>2!M{{5vE3+avs*2!BI_-yPwvjqp1o{8bVD$_Rf& zgx?Y2pC92*j_{ZA>RL@S7e~a~BK&y~elfzI65$sj{H6#$i}3d;XQ@>FsOPFwe^*4j zFT#H`!tagnt7kvevMmwudm{WhBmCPV{GJFuK9$u$dkFR29pSHy@H->?RT2Km2!BO{ z-x1-TAK@>L@RvpS?GgUs2)`}DpBLd5Bm5~5ej&nditw`te;@8wh5IW<_`4$fz6k%( z2){SNe;~r&65-zy;olkI-yY%jMEDyb{O$;UZG_($;jfDDS4Q|NBK(dB|NIDld4#_# z!f%i8?}uLYkqy|;=uqfY=pc{5O@_;Y`{%e&XgAy%xC*%I;WFT6Ax(rk1%Dsh2XJn< zd*LLwzmE-tK8D)`w;JxpaE*wkIaFJTn*gqaLI6| z#^AgJ+^^uAaE*wofO`yX23!tYFXBIhbHOcuyAm!5?r-Ru73nhk&2Y7Fcfwr@cLr&H zf!hIB1GgD*OW>}CGrza&(lySzrDK9`i(=o3 zaqg2e$NH~cB^=6hBA>IP*yluECuGwTQ1dyp{NZbh5+*xN!C_jUgSOy|HS9i<7d9~L zHac_0!w$>C`3t=Vw(d^YNlrMjiJ|v$Zt~xvoabijM*%D1h5}DW&0LE3i9QkfC2kYr z|Ga?g{DWmG*(b;cQxdk!ynsYkH=et2{(mKCTG~^vdXSS8Rz$wUf7i9Ja z3kRBUcGG;dR#p@o?L#ddf3ef$ACEp=N(XF5g$_?3j_6}6U~7B)XB8V+^;0~~8p3vS zwv#R|!rq|!$f6BrL6Mhm5|hK@$8iMkU>-da3VRkjNB=>42M>2iyGREP7$lw!(|Aht zSkVU6pBGs_+NAn%%PW{EI@-q%#JM}1^G~@0{n9&?v+LAuxF2+$6{m5Ov+ln`Kd4Vc zC&f>temRj2+M;QS9N$cg@2XI!fZ7Z9dnld}EY^dMXrJ1KeT1*ru>w)NQ=y4?L$+8X zZ5WqSU@nTyg|l}-JMxN^*SAxi;vYNx9X=M;Rrun+!t-_znM|0|EfrgyCJbGA9a79h_)kDu=8 zl6-GNS~bEdHZTv*A{yIOS@}y~rHC0bf01;U6vh z*HXU)&rinNuJ;Z73o!3}(|ya~sEsttU1<3Mr~R_-AD;{LIGs&huEcV`ZuIgjSE60y z;OF4y;CCe;ehlIh5Dz~GKL@{S0OH3YegNX(=iukycf})q9OB~<4?hP#Flf;ghxp48 zABTAOIrusFU1r2zfp|0G;pgDz;CIC${z}BhA|8GYehz+@3Gu~#KX_Q&%y69ApUB^8xRja2R{eDi%0wf#Pf)UpM#%+-=#12 zJ8g*9BOZP`{2cr)UAbSRcpc*57bqTn7gz2#Q9Oru_<4$l--R*KQ9MIDox`Sh_zAM;{*3=<)nV%QNjXmNbquZWqQ(8?OsvrH#{tG1A88!q{lza$!ug@whM+ z+BjSo1C9R|c(3vO0?##mU*NUI=LBhhRVX$u}XK0pF}(%YwGA*iRSZy^SpXAEl1T_R}v%_{$^w z%O-^Lz&Zdja#;sEfoHvgPq){6`Cdo(I|1P6ycoTzY0R5(5=%%Z|)&t6PlS5){|RM;C8-X9g-6BT|rD(s00uZs%TMui`Z3Rgyj z7e$5VM}?bbh0CMDrBUJHsBlqKSd0oMM}P8el`= zu7i6J?q8+T8btqm2UXYA27C`L`8Y6(Pl4@``wUo!59x3sR>WF3H=GTcXcwFe$Dt0w z2I!(^c}=uK=L?m7X}$A^Y-U;$T3KaccWT!+AogJ!Fvc>L&b z?TW`!jkr=-RL$0*TpX?f9GYZRiHZ`-@XihP zKj`5MqQxG9TcWvqT*LqJ?t5s-5=7+Px0o)tYGL@lznq$XQ3l5*UX9EP?^<}rV#bYz z_xdp-ZR1&t**MX}#p=v@e2a=Dasp@3CGpAJAT~sw!VTq8by?hSj*Xl+d&bDSsunM* zVk4K`Rke(b{P9xtXJq9PcpkWGiR!uI!3WjM3zsc>VDTLfExQZhMGxQi;IapBUASza zlJ$Y5Y7r!;sSp1IHQx8o$clvzRv_{1(m?_>$%MLTZ;Eck63@Di8_EMQy zVaGbysilQd+pKfK>atb~eID?r!&>xXzpM~5^A$}!f-iSTO=jcs}s!O;+>bTlW( z(fK3I33!z`Ky#!34)2ih`5pY28{`r}5KWI(o&$c$OW`zk$aygV_30?>4RACsDV*jW zIhuRgLv_1fWAqdBiHkLEt5hbvtR{1q1h$c;q>+Vs`hCn8=ek8}h%(h*c1 z^^?+3c~8SpnRqY8CSbElZVo9A{!kn}8{x=L`K$3ljodd{B*L_MaVDUoqb|xf!lmPk z_e~J|p}Ofwgrn?~PSGD{p(C|I5T>P*csk-?bvJ=kpTqkR__asNvIYTE7Ntvu4ovQV zH3~n;h#pEu{Uu#W^-{VcNOuJ3MEJEwE01)mmX364E7F-MfInKgmq2Ujd;OTc@OFJxfrL}|h{2=8QyA(X=LOHnDdU01dml&Ge$~-CIdU|HXz)?~ zbNRA7{lg`oS?*+OFj_M~l(>xjVguq4}P~8#G#jT?eVW=pY)GJ^T;F<06g!AsUAV{)gz~AUY$69vej87(`1! z^rM<0L)ldRUxVlug6LO*=)1Im!}G5VqBjK54+ha^g6Lm@Xah|M{!sg(g6JMWbZihk zFo+%;L|+?3$73vO57k$tqiH5C9@DnWs(SHh} zUk;)_38KwWTIp(eBZKH0g6J7R^tvGWfgt*kAo}qj`k5g5qLYH?v>;L+yzUqW=^`Z$X;k$FU~>_JkD|ZNRz9NkOqUdayn$ z7y5SV2de!+@wqUV+C~V94MOyTeHmLeU0|6!|DDLcN0 z?KPtJLOcbYBS4LgOGKNtNO_IK5eqxc6Q?6W&cd{L6rAz7$LMQ5KZS* z6Zc^6%IT{hkJn#Q?JkO+g#ILDmQck1MOH|Fbrd_nm(hx)St_54&D=oRJ&ZR-yu6u6Ux5wSJWj8J zz0&)g06&I6-@L9H-;Y2(ZIF+~uLQJnude)`Kwn1vBwuRJ^{Bu1GZ;UdPQduu{E=$I zQ~c}T{~a(|M{)i%VSL;?G0?t&m|ysvcGkl4zlQ$b{8^xV9;CkrBeapn-;DU)pQ?Lk zD*s0e1SjN)DWJ5c0QDWkxo0Y;KS2JckYDS61?1lyMy{F1FUEL^m=TyCxLeQgPwlc+ z9`8cJakB#P$B=(E=9iVn{|x?Z*5&VufoNML;KxhV_Uu9Z!!h4y@bq&{DF3}c|K5lE z--l5y^Z0QXUlnMd#(&zdjn&PkTR@-C$-@o)ABB7qc>bvv&tE+mkk6N>Pp{t&Kt9u& z)w5RO-w)vDq!WSh|38rbaF^g8|Fgi4Ht+)?Rrq^9%6lDybpfXzME*MqhLe)op_pTJpn6pzmXKl_dg=!^Hzu5ZB~na2-C{>z60`g1%Q(gWpb@lQa% zoyYlk9#8*0`uEi^=tEwg0sVXGhJZf%6z%J)({~$C-%g#p>QP^&PM-N_=%Xjqvm@fy zv(UHA>9|e8>uUi&QZU}M{+~d7H|xgR52)`BU40=KuPGSs;Kx1S zZx*L3F0-`?minyt{?5Kd(581Kl>oYAFqFi3F8y;wD9tt0X|r*9=A~bmk|Hddstt1dKv9}YIi_Cc1K4y z|2e>Kx;Oe6^rcpx1NpOb^26_ZuyNJU&yX{fe+Km}d?GOarX#-l`+@QGChGkPo!`RS z_bavcQ}C42Ch-3<^p}s*wUGBSQv>6D8S)=}9{l9>wWIxSw*=}t1bJQud2ZtQzXZK- z4{R1rzYPA5>m#sK^po0~34S{Bjdc^mjJ&%OM`$1NDc(I6)JY`p!e2^w^IxQ%-+}_FTr`(b}8Xi?OML1MQiJ{=a`y zp#O_O-;otq5552&uG^uW(@=k=V_@E;)7N(B<4_z#VaO@<8_-_+Q|ehLrC){qJ^C5$ z5A*gkfVSx5n}PBMWC!YRKzrvy9vXkPLqEI+eN)Qw*J8Z9{BFQLb7Ow4(Y3!1>VNdv zfWCSY>Ejy$@;U>$``fU~d3*nb_2YHuzgAxUI`lt6r%yiwKWFILTZ#E{@OZ%f>5cY1 z>*l1L*Gv=E!<+$iI}Q{{{TW*YV>|kau6q z&rBY_9_>G_TYvryzD!&qu#b58X{djqPW~rBuYtbt=SO=^#|Z2pUfy9d#xD*%t(?YG zVe@tRHXHoC2kZMH9={*sA=E9fSO|dRkp-;N@xLDF2h;`Z#kt_uGtf_3`3b1+L7n}v z0R4F##;i6Tr$bQ79u2HtS7H1d#Q1UW`ac2+o3K8j9;zR|YsIoZW(->jg~qpMtP0~f zil=`9=`LbDO67C|_;Ii$pdUgpKB}-@VpuEr&p;lJVt;Um_wP=W`}Q;VE`rC?cTkg{ z{7N}p1AcA>KWA_{9`bqt{M^FnT;y-m@#h`C6=$&g306Z)9P4+8(%R$%=^JBdFJqW^cI{|P)k2K-6_KQ(#I zfPD1!?cX4ud$9Rx66i(Vwb=~&Y!;E$Ly1V@mmgh>ffYY3`dNd1wdnX|z`#q|i~e(YCW1b=FTjsF6#UtY!2JIW z9WNdisBa|XUH?cxKYs{*ep?#g-%+&hlCHixG2Vv59qAYs6Z%6`YJ<=8y=39LD4X7PW?o zXKb?acYz#X0&AZSF(pV$5FVN&G7ARyAtjRh7A6{5Z4QfT0te`kkeok8W;*hON$y+C zqbz8(Y+w-(?Oy+xLd8MqW; z>A`v##5iU&G9g(==#{zQ4$~oIF!jwe8;nZ-1p_l%1tuiUkR#F59`_>!ZI=4c>DWBSeP{vDaC|IvI&7E6ANJ`S$WfI5}BNX!VSWDIoZN?PNpKwhP7s9 zWgEysQ|}AV?6zbUz|g?jp2-$WvN97j)0AaiUo9|;Y&J}>hz6Pz$?K431Kh)KV3KlY z0Olxr2JQv8m*IB7)xo_6_a+>+ABui^5BLGxhj1UkVc4@T;Gk~UX}GW8zJWUj_dVQY zI3HXmoPgGbz=gs^!bQQE;jk4@_Co!E@o>bX6u6;qsc=v&4AYlk31rw-vMF#_-Wg0n zHWzLo+#)y|Tmjq?xTSDViL4B6IULk4li}9Et%ti04kiYB7;ZBh)B*bo+*5G1a4*39 z99sH)T+A(x?R?mhx$D>ax5fNyUAAQG4f4m%`qZ?^i%xG$^euWZukN?B7n6V3xBFe+ zuF%kB`}?mO`uSaF-dr2K?Jr-fT-Sd~U7x2%9eMZTQ@8Ej)Ua;vvwQbF*L-UB^(TJ& z@WFMD-8br;+YMy{Kg!=Ocp~$5ng90G?fa%|wzO6@A1}lkUq(vOWIL z82PuR8}I2~ao6-=$$76vhs9JR&zt=G_a6@&HumVz*unSiK62HE!S|G#^_yCY-h zpO$Y}w{i0ukF{1e|M_39zGKaK>DQRv1D~k8^yLV5aaXOo;lm%^U4QOS|H0k97$pyy zmsarjf;C6K7qZ}HiF$PO*$J<;4*s9S>z~i*uf4N}3_rCWJ+<$bjIP=)k zd7XP^7fc&G$Tjs($ESaMbHlsS<-tjd*Zj8Wj{TR9{ru%E=?4xs&8!#^KlpH3SnlP~ zsU6RqS@Q9U2(dJ8!px6nmfU*G)&0o&U-O>r%zyZ$+m39$sqKxA9)C7#%o8`hbGH1& z&iB9m>9)uFNFi^&w{hOVlFwIm5BdDqep|OSJ$}RUZLi<>%tL=Ub(8IbyXW5G zy#3wVKl&lZ@mWUB_A!MU=02G5#+qM`U7zAS_wa)yGY&p&ZI8U>P|ENn7jrg#F49V9#-{&@a*%u?>+udTF0-h19OWn$T?G9y!WZneOvz4v}D8A?Ab{hUB9?u z4(_arKPycb(<;B(eD&T#m2bX!qRRPrMo-(1PtS^uue(^cV(^=3A-^8py*+<^TGR(K z*A2b?ievOhKf5mbi7n;cxoN-F z9iH{c{GErT5@U16$MS(Kqn0dKkY>L5^E0bg|6|-+wbfy-teO4&!9|(h_xsC>htCgP z^vt-0PYz#L+&JRG>T8xC8MN_meDn;{`fzvMCQofY(=CsDedH?Br=Jg7{?$wM%YM#S zc`E$gyn@~bG7e;U4lgkoHuSn){<(4G);{0dalBjRg8Mo@d#G~Z3g3IvC%?Dhkr8bp zPhP$AxvSU5&!6tO{NlbjZxlQ`D79qA-N;4S*N7tbK=tzM)^v6 zr~dY@8~$CCbgRLbT(NPc@rEBy#_esnFn;U!ai!hP{o|_l`~STq=Ywnh=eqOrzi2zN z=cQY3x#i>=*>~+Jii{e!@saYDAwR8sGxFM|kLFZKp~qi)l@rm`z3Z^Vi+|g~x{q_;#tLn6_xjW?y5Q6 z&qSXbv2liBta$9}Z)@kCow#RH=J9_|%VVQXep>$N?Y*9LjlA~0#OeJWIsN?j_`G}n z+W(_lZhP+KhW1U94qo%ip^zQWQRM#UxdRamaQE;uWIxaYQ>4pN2^!`edlsQXfb-$j z!i|KZXSNFLG2kHX*TZC^EAWwiHuA7i9y10wp8H4h zxNOj*m!E~}!NYhGc0W9%yS0bb9$GJ+fFrxV4D|EBfjj}sVdWvc|1=y-PWCUjB>10$ zqh|w<)<0TnXr&E>I|Lf$rn0B52hwvJ!eKJA=M@TjN6_@x;b`4toF=dS*#Tl3;(q3V z{ose0%_QI^;9r5Xwr+%@HEuMJ)<1d(Xq~2&XDX?jwZeo|)@=%^(v|-e=;3f4xOTX^5ElZZ=aLG_Uu-FrGc{chdmLe8 zr`!t6hSQ!3_{m10hisn&9tX3ZjextL#9)60|D$lrK^MVI=i!eb><#$IR?!|DiTfY2 zeF}9nMI3}%1eb{L)o`~`ARe-1=vfIQo8&(|QxQ)#5J0D{+yGtt9XdbRRrCyl%YeUx z(?>wBhI<;0p6&>@0Uv;~@-)|Qge|K)&GQHjJaZ8D3vertY+ZWj{bDlQ-{6;l z8{s}xV$?tPf?fbe&v=g5#`&My;U^pTNjM3i_wl$B@Xz3WN_;!q{h)EAtUPXxu|TqE zx5KT1i-enu6lC8Hh0~rU?zw@7J;QMl$G*T@;NC z=Ii3j+<%`gOz*7*5y}Hr=_2;Pe=nSqhjjx!1(yT21uhSco@e0Zz**q_3g?2G2$ur4 z0&XxIJ%?4e;@{$+&_4yyMD~aKh=(uX*vN4^M;udic``ZG(k1lw#E%I2M^NzFpljj2 zhC2`UC7kvQ)jYs!;C_NT4!0RjdyZPI>JJ@|6TaE z!DYc|&sFfWY6S3c?!TMkR9##${0rd@=)zuw--hz9=5+!VNCxCh|qi2!~G zbO6(N*fL-|!e+q@f`28Do?~zWd73^PDf~h1zX*RHxT`=vtcw>BraZa1$`@t16|NOd zS3z!mX=!e0Sq96^wU?E-N^*-z@@?hL5*Gpr-Q~9O+;V5Jee@`e)CV9se`#*MV`*-o z%~^yLfq=yowz2}2Z!1D^EWfD4ZCC#mILjDZVF4@5cMwrnX1BA#ayN>>cVlICmr5u( zD0s2GJU73{X?K;E;&U`D1Rs&vZN&;%Qp)on1f?R2+g`4rE^D#cmSTHxerXj06qhWw z6H;=e5c)$+TIMb(D`!+3Q9z{=#EU{VL(N5~5KxxyC}Xze`MF4!OFhj4H`D;IwAki! zftMwvYDh(avt0EpCoYw{6(Zl3@38CXBD>91p=QZ1DK5pQc)9s#K)GE_&V$hCJiA(V zeu;~|?aM7M$#vtKJ8DkXpxiRs3MII}jy9K6DSo@Fu%s;C=~}E%g%vJzf;vgUSG%Bw z`CF}!YD;p<9SWzZDkWHvU>7Q=7!qOEf{RLYHFLrEOTc%DcAMKCl*L_MQmU_-3U#`i zvllumm9R294Fw7;t8lqA?jTHM zmns8rStl@}z(&MEHA4N z@G*zHg+;c-XgJ1GAX}F~dgVBEw04ctOHUiGT?$45jqCi|GPU!lTKUopqk~$n`jL~G zUJm|3kEk=kA4#*V0>O$#(Q@!WWfTO3Rg~hpRC|FxzgjAar&|1lI}6Z423cc{65x|o zEmGkk<{}T(<{c-M5y<@jK9=0BQplsckn~Cc6ac_QYU+x*dbl#qT=sJGerc{Pe_4gI z%$^ICN<&u*hGKzkcI9;qRG6So=GRbKKL5nf0<`HER6cY`MP89QW=qRT^6eNtr6onq z{Hmf7=wWqgslmD#l!p}un%`Ak#E`fM$~Av^VOdGBpWB5+6>bNFMWa#Cj{X>}J-SLX z$!Sr#iri3Ss(?r`PM6ybl@6}v7el)il|ng?j<=PUIMEz+u~B*sse-F^Mf)>%zD-#W z^uxW_S)Q9$QbLLvi-9(?Xgu>!2&CMJmF0?YE?Y4^uD)WFviRtyB=J!zRF{YJ0Iw=g zkS;K{$mvE``P9%F<*vvJ4ueXm$b-;q-N@Fl2l-8)amGlGG#55umjSao)=LU^^l&y7Nna7+fN+*hKrMU(6 ze5ad?v_Lw4i?0->>Tc$y5naeCTvUcK$+%xxHJxr-k*k8{qE>eo?fysMxiqN%V zP^sLC(YmIX5gOsLuV9)VS{(w0B!faQ+5#b%kX@5dfIbB{?N41^RoXSR5};3A0e#V> zJSBiqxXVjPF_S7G&{$Of=8-zgBLzdIhkqQDxC)(%nQr727rAq_(N~CW6*`OT=quCt z6^0RANk7TJ3Q@5?Rlu*N0vh9h$tdzXMVvKQI%>_q{) zK&7Y|Jf?uw4Va65+PN4ew4Wq)c1-}TT#& zC@&==fTvf<+&t8dZ3W8u;}}I;LHSe#rdC~SFL2trwpz1+l>T>VvcDRv?@$mhAiMAb zQ)Ia_KZpnF-bQP|@3>v57NK%U**LjN?fJw?> zu0hHkscVor`qBNxD9DB#r)Hn4Av9}LAN`g}uA)L!8v8r&hqm-+j~b-d6G}H!TIHYW z$07;Q+jFTHo_x8zjIS{2DyNM1F5HEb71no&QEUom!}6|%#qZOob_b@uV(;>z)bV-6 zS}MDWwg|J03PoC`^D7g8XjLcX<`xvWtBSQVjDjN7)+;Dd8Bl&#*9EV`K(Bvc$lYI09&UvsgQ9>|_yTp(p?^2>73w|=tB?3QnqR!`v{BqqyUuv(?&S|vyOF;!V$$;Uj1%Z3i zJ!c_)6Nyfhlo_CTa7Z{~ z1Og)+7B@-ecQMKy|M$U8m!itAn1$;?h@PTV(98yVr9GdAa<3Ls=vI%Dc&v`pQh$OF3Nvlnk_gB|}RuT5yI+RzXPxuhyM_SBufLpYrHP zErq`MltfQ!X}UCx5_JhsQ^7idalvePw1F*DH*2Nn;0P##0vrS2JT-SL(?W_%F{yMk zsS7_zvsF=qx!9GuX&a?wR0e$zr7STjCs9jDF=@4OdztG>$-1;cOXtL?qdzgM2OQne zjt_eoYPeQ)Ai|Czdc}w?r3cbecd&oK(P(Z-p~4L}N>vv)UE_I(!ns12l{k$kqmu~r z7{zTTQ(rOGl*LWk>vz@R`6WdqWkGvq3>P|z4Gyg+!=W}-L*;CwYbktwc3s%CiL6B` zdlN-fXfbry?zR>B_sU%h?V>dtCA6ZJ{f87VI&gy8rF`&FZ7o%eQK%{wttj5ju33sI z{pZd>{m0o1T7+?OS{Adjze3u5DHp zJ9|0ITHQ85%Zs_EiL6VRZWoq6dl?S=uh=H+UCQmne-x`6B^D`fSAw{tY%i}|q%s`4 zwpcXzU%bjKA6%b}YiSqJ+7kA|}&SIMGU6ar@3cD^{g0)aF#5C>&g0wXVHoXg0mrgcXCRpyU8H&ntiO*eRsw;)k*emmkDxeR# zoNooCaohj69<9~dc3C@yDlaIZGhF;EVaZYgvh9@Bw#rS1%oXbK2F#X%+$yKND6rM! zhsakv6XJ20O2L*Pk5LCKS$3r7Y0^R%IakBy-DXOd}$H zKz_EByBzH#i-;d%YbkNi1(uS%sQgMCdaeFO94KXp@>BWr%W+B?WmKu*#sh+Kx6Z_| z7f1Sz^lH4!r0=~6>APV<`d*WezDp#e-^(Cez;VM{0=})rH?kaSIM#C9#&HkFLmWTi z*uwD?$8R`xaAa?*<(oK~ImU7nIVN+=;P@;0)jJIBL^-_&;jSDZUer4Xhv7FQ38&-! z79n0huOgg*`&fi{QU4asF)r8~ljTYsbfN$xD#>;Xu(bMp48_~Gn7)SJU zftiTLC9+haZ^pNHMBj>dLcEC2BwUVfHVJW=&q(+ef&EHYBd`KOTu7Zq_yWEiBYYVj zmRt{B?hcdZ-2mJ*M1Bhwz3S?=%N7gaPIa!`Ao$EAq6-I22CHo|Dxg0Ygj z-XMQOV+Q5MV$7J#OEmrATLiB5!g4<*dA==@F1 z1Z)944|A{E`fzyzux^J@Zsl~`3d~W&pQt20;~!}TQv8A{m9_$RBEAxrJ}BHrxSPp? zR{_PgBhlOC+a4gSlUEX=UtcTqc6szBHT(ublvk$Em`6N( z52r6FxKkd53prFyCLzj^flm&G9<8SE?eZZ7cgl1ri};xV+<^F|Eo%I43htC=KsQo3 zt4K#8{uC|{QF}X}D`|W^{{#@@$U}(v_12Sw1LdNp2#s>}U)AuIX9&aO^)CRyuP0xG zu0?r8+n`ISUR;K%+nphg+6zQ`%L&2fCI#zc<3222yF=uR{RrQFwOme!biWcJU*bC| zT|kKTH4>uTmlWJ3?`)uWgKTWX_)Nk&2s{DYbBNL#5>-9Gv z+RIvis4tli<@+@HGotI{WI~jCLZNrcJ3m*$w*lkO562gvF|Nj*1frZQLbQ7m(1iHL zzf=EGD_|&h}{G$kwzmn6dfRz6&P9Ng&rhlsGqkt4Y zgJ`sG6(RB`f1}bhK#D&_G{RSX3&i|1o*~>Re?z!4RCb&NLjSf9BAwW(#-9M57z7>P z1{!>p+kxHI%O?nT8|B#Zln(2ig4<;A0%)2igy4@?!EN#cFQsSlB|_BS@*{@zjj=L) z0kR(Dn?eM(0oVvzp>A8KY{CUOYL8XHA@UJIl)DF)!KwblZUXWfWKVa{G{0ih_#873 z<7HlTEmcHx}VLfi(Uz@Not4rxZy1d!7*CZ(Xg@QMimw^n^H- z-bT11PA(b%+}T?eaWSuMXE(W(aCbj>0WR*U?a5JqDB5ca})P8b25OjszeTEhDU_BUFW~p$2;KNi48pni?F7QU(0zosJb8jJ48L(k z*dMHVh%gPzT`#{BATM=IABDRQ&cgVJPfL!dJogcYyKGErbIwjtK|C z9wNL3`i4-1JxmB)6-)R5bQj^Xuu%vxrVWgR|BT-rAxwq*330)A0ihAU0YNwn@+ZU`Dj-b6Z*36P!tN%- zoGT@K8oz->h)b@Og#84zig1g-WWq|!VM2`24TPPLKjCWpmI7fKc+zLKkf#d=$TFMVJ9yK{y=i72ycTpKv7PPq+j6f^ZbRXCo}ZZ@m*Z-o2_4?_NglOTUW3*=9D0P-h%7xE{}h5QN2@mpDhk70j9I2pDA zVFG^hkMJ<$PdEhoD8jcOf5PdIKVcT+PnZe$6F!FdPq+^I9l~26f5P`L{|Pf8f5PuE z{|RSc{u6G({3pB_^PdnlL^mccVg3`&gZv3+WBwB+ zWBwCv!u%(k5BU?~^7RSATOohKM#!J=SID2R7v?|VEs#IqUm<_O1&}}C0L*{FS71*Q z-iG<#2waHyPq+y3C%hf{ki@#F$9+?)AM>AZG2~B}2l*2^Ab-LF=zqdu$e(Zt=0D+5=zl^R!j+i+gq4{8gexF_!ZPT8!d;mEgm*yy6IMh1gcAk!7GW0TPuK$a6F!6aPk24# zPq-QLpRfw@C#--iL+FP531Jg>2=9da2|vR8C)^7CPxuVvPxvI{Pq+&6pYU$TpYSfo z{~&Na=0D*Y$e$2vdKBSm$e&Qg{3oo&Z_^Oo1^E+70uu?BVEz-X#e8DoT`cWSi8l_t zcGd04cQWx#x;BZ|%KSnGqpRo2xu$Y2qKe@pQ!FVIac7~VtcoSM)74`}mX3qz^g?$! zKcK}aq53*IO$$h0ZYxX2wP&PI&JWWOqj-i@xNtpMd5J>hDJOsF@Tl)maqFT4Zx7UW z8%ybB5S^al9dRW~${10}#Ei-$c-f!gNg<hX6!L*}Yx)W99#$O!E{>Q6CyyqyE{=-F8l_L|uW_oQ&@1fvlAgH0_MO$&2 zjf=2o_3wMY0}-XNNWBy}45Onko!*R6yc(kfmD}7)(~}Bo75|nzk9ENj5uYlD5eAYz?jI@4S1)E;mQYIAK2;s zq%Mu}I*x`8n#x4_H=x+9B^5;lBJNewl`EQxs;A3E17$2J_nqiYgYuG&bx$fPs=zA> z{z5M3)kn}P6#x8IE>o#5A?O%B9q+X@HIR;>Lf1zE6O5IYRoI#T)iiD%sxN$5;6?Nm zx~HYl^_w_`w-&hl05Nc1p$(T47-HyUEWKeOKg~#cK}DHALV4+^jK8 z8a8R}laMH%!~>b-U(qK<PgrUD}w?WfmJh)YXS z?)R$-f!<9i-$&8i2oh*sQAr-58*j{tZMY_+y~xgG8N)}69Ch93F=NL~nmi?Qs>CKt zz&#ZE4L30V_?*b*PMwiGbN+&fY|hl#^RwqnojY~@+^KWs`XlBo%$Yio4ILkNq2w+} z8=>-}q;#V0u9R5hE}qC|iwNGkO0a%l-nl0Z_b1295&!0$2pQUz zi?9di<|t34O&zCORE!JPVzH3k?ke%3GBwbyyee=&&7oz`&owS}f0`>!$lyf(u>frr z2WP_cc2^YQ<`|hZ+`dyJO1cm1#!8}euu2J}Hs7i;TZBU5Gms~vrC03~Kw&jud5IFp zS0za43Xw`RJc_4BP6(N%2*d_0q+QKcd1osqcDl5pkrd3P)U=#IhoMiRe$`SM@P~S> zB#*8ninKiZ=Y$GJ{K@25E#hW^&N^|6E1X3|VxC$H6{g)u z)Mke^BK*=;6@Ed59bKg<6UnC1y<2#T!5Q3`Af72^x|opUPSA!o(#@fZy&_qgusmoW zDg!T7v{z#72aBjQ5eoyZb*Kdg-jGB6|E?kWp{3YRNTN@Ny9`(kNg#^Oh7d&MLqjnS zW6NF^NuSN~=B!te z`G>ElD-&H2u`uIXJC$XskWuKBQschvf1j9E&;Kp;eE%SFuW;g0sXD*80RFJibeF43 z$rC1GaKe<}{80L!>2r1aFt7?HRL2yWTS>;?-0x1LIpRe1v ze<;q)&J-0hF?juOqS|7ZotPzB!-~~=l1N#httY9p;;HKemJ(ZmT`fivx~lcKTIV%r zrDs{1mpcL#pbY9ZUlRWx3xT$Q@D+_hvkRIAt-yw=7{$85mjCNk`RxNuLu-uUY!9?w zi%?iY^+qyD(LG&s8%!Lm;IKsKOoVJP&R%~#S~0MBv4UX{#0iM9G=YB@u1OU)>gQAr zKZj7}CoNvuew8aF+}>6fHFa*L>GDpYZ8+(2Rek+FrT%1s%%hUB0(GrWRXS({LSHDx zJxZqOax?CBJ25^*H$K3FDWzU5SFaaxGXN4N62m(4Z9iAF6=8U~{R%8#%6|ybAE0 zCAK~V_AaU;3GH#)>2nxm)qp*NVoHked8xLPqPQS^NeoLWMmS$|=oEs71X7~S1%YsX zg8~V#_Y(adq^dknwK`cq{1zlHRau4tiz(-hnun|aOps!6nCM*WDxv#vXxR{D77Y>U zR~6_?0@tI7fs>YZvK+5CN<>e8x?lpJC?qD;0^=F#9jVdRN;oXZ z%SoGGDaLQG#*5<262C$v?y3b)mE5NN!z4-_Dub|MaBD$Xd9<0X?i3)R0u)<}B8izn zY5iM9D76YVB(|6=3w;KR0qkyxJs1yV_VS7{n)#FjVu8V(RDhDZgAv0sGSV`VlEADh zSr3|hBEf2+iDqm593+&|c1)L1 z+mUM}>7taY#n@wNQR&o1{bh4qlva#-V&l&dbgw$FA=YJ8%xaw8;cG-)=*BPH zg>s*pYDAB<#%g!=wZrQ)w53p&7s5w{FdY7vTU~gW=06bD@d3=n5xSn`Y3i|DAdUYM zHM9*}pl<|%n{wql+Pb!C3A$X+*2U{@+UtU8kn8Tz2gT?w(d(jqcbQ!u7j%zl}ME6|=kX+fBz<3w|LpOc*3w zD~u8*3%SC*LLGh|ut7L3To5)JP8s$Xq76S9ZZs@6+-68J;I~U(71rQ>i-0#YOji6m z6#qKKzbl-=c^aMn_u=;{>31t>^#1j_@cR`1P9@Dff8byH2kG~!;Y!<%jzpVxGrUE@ zZ^P=+zw)YK4O??mzf&cXZ)u-_-u7ZB^6g+p70>Lta#Ul*&AM ztX{a4F*U2Rji)}~=?-dXq6)nArZXM7sw2$fu~MM;8K8(%G%z0ZD^^V&Me z=ru6okj!=qUUq)i7Aa>S=E?5_sdeEqlQ%R(NyafO`Es*0uI{p&8+$T+086A4o4&i% zXIxr~6h?`qU2ZmhFPOV|%-#*(;WmPGmX~!~Acfbx-RgaK)};+OANoXQ_OAVoC4cC{ zr4(=EnlF8+)oZ0?;&ERuxv$q`G&NYhk1(6PrAWiPKgzO{xX*O4R4`p$Uuy8Mc=*|o zwpcS8lI9364H1vD;jL%dEN^GVkv7vyM|>HjM|_UDBW=bNM|`pH<5jfxH92{Jp}NHT z96R0?vNx=niD*|OoJpw8kd`H(-fl|0z0Kal?~GD%rrDeET~2+dbl@V3iEIu<-Jxj1 z`=~eE!;((>qU!p25W)~rI$TPSD8%d?_?^QQ5+6RF`t&9G_kLTSW$&Xk@1r%lZK?V1 zx0%9?H4$FH;IO7h4i{QIvemXgO1I-0i!U_U)F6lsduUB;{rf(XyiE%C?6&Sj3;Hz( zTOYJ;lY~bNTO%(Dp|gEMt)uJ1YPL&MlW}ZV^%H0J)*FWP{@a(nM7C}6Ri8#)Oc6fx zC7Q#we(5u@?8#>Di3=aXXW7TZX%b@CV9zuu(L=EopYeecsE=y$HIKh31*MvpG2u>d z;m`&0??+6yW1JM(WXxa*+50Shd%u7TK!n*Er;3r*9$Clh~#UwU>wz;rr6pX_#U~i0Dr45 zc+TJlhQam5w%B-<5Lp@$A2G;~-w1v-wiy%F+8Wz>k2q*YF7tg>Dz&lA)YBS%)V#rg89+Y3GyV!Z-hrT#=PC2l(Zmw zpj4IF>|@m@eat*$^TN1U-U7kY5EtLLWbp!1Ly0#(ObB;M-K7N$Nvn7u(dIT}V6DkUx!B&GfWGnb6&hBSyNG1(%AI8)9Cfv~~ zeI2^OQC%+-vy^Tv_c+)vQ^Qn@hsVw*7n8J-rAw!nCsax%{c_&wNUpyJvG<;5!;+<+ znrj^_Ikv&O3$npQHtX{X^1py>Dv_g z$axC;(|NqdejBo%WOO|FTBua*8^+}Dv=&csNc>lJAvv|lkUXNf4qXlXN#SqTZ4r_5=_EqrLh&*fvG?BWsm zpH9k;mW*KD^q(l^6WQ$jxZ{~(=CI~X^5WKxUCr^H+|(xYWJ8ZS58BL_Y5P63j?((U z68H}(PiyiN8CxW)UHTzONnWOGm}?#yJ~bG|AMW&}8x*-*R0?Dcdw9AoS6h1|Axu(S-wjfoCx zm}upEpgRBBL2`J!1KPfWB^s^ocvNm0TUsHdH#!(^+s2NBrC;aST*8QL2;bjP=t_^L z{O@$g^>vet4a+Jqj+&;$jYU{y$K*I!qFH11zSwcdmmzO+f|Hm5;n2s%o|`;|=+7o|CUe=aJeI2FiYqAs)&%FD5 z4r^0A6N?-U>ja*+UkhZrq5y>u>23U{dV7;Ebi%OzIYnx~GEN0kwo)xZ+fu|ugw zbx_Y)hjmGKJj03+7H_jBV7+ZRN3stWXPz~Ck8!`%9wK_|l)m+x)v?1VENNUadEotv z(o;+gW+^r^J)Zf5{s~Kovu1B}N2*n==Z7ykJJUYz>-uFCB0NO26_ zT+Qd(X6aRo@oLF*Dps0x$q0RG_P%va#uysc;*?0I|8P#M$M_f>=V61BJg3{308KJz zp(V#+niAD~x-AxTG^dTqD)1J#5sKb8&KcqinkajJnzef8V~>)@Ic zhpC^TAwwEMnq?i2`6fVD>(APRJP9L^--$td9E9Hx@qCxoLw^n8$j@`OPVSocyb)Fa!#Cw!@*Wl=gtSd^3#9`%^; zC~T)Oh88AxN#iBsRoQecUI)jj9d9I%oF&)=r~`d0S=E#j7H+5VEL!E!jA1 zO<4Dqtyq)OQX-mbe7(#paWqT&O}xV&UAylh~I;M{MS5|INn}qt?~6_(aDB} zy;wVpY_C0{#!}Bxq8qx2m3fh3r9G^s(HZBtzaIajQu07>p{HWi7?VxSs0pi~*ixxj zmpPy${ z9^x6lbnFtJO&C$-3ax?0X{Y$a_7IG#aag-Le=PVVdTXU!NICgsXz~{)gapIS%xm@z zj={(s_sLmGJ+NJwYe}O##1RVV)o!R+@YJT|n4^nG(m@o9`6meQvsMY?h-Q^RyekK%f0haS@TQwG$6(e@;9Amj;4c<5&s z&G-iv$U=;q_uOkjjR3oWY}w(>7^|$Ze&$UJpdE~ub+m?c{&@etOZSN$*k5fjw2jlF z^787IFBJ{qYOAilu0C1X2|aIVW2s44Ps1^eNrMoNNpq83S>T~M`ZevpD8$4yeCbPu zv3FY&S>`!Y`@k|!7RaV1>->Ibo~Smm%dME*WR*)EmKxU(rf4OWbR%SEV%OEfLcjeL zSV=!a|CpF~(`-q#&VOhHEutRQ+mRxLHyKByNLM+8u@6}1_>BE&#ob^Xfd0JOO8ueL z@tfALr>PIS(1(Atvb5{ahbTxltBrb632Qpg4_K*|WGP13yUqMz?y?P&Z@l#4D~G>j z4fq^@-PqLGDZd^%H1uz**%6k!*)Iv2P7+_AD_B zv7k&(9{$PkA<4z^fmSE0OoyeW+JMO|efiqW29xc-yp<&N+EULw`07&e#aUtsBb77efzQ; zE=s9?OKRx~Y>7nCKGD;iVO5E$pnVT_I;~^Ch{h{4JR4eg{*11xYU5YY47==6Y zKtU{>Mm*~t#Iq=jfdMVOvxFzrlCRSBp+{Ccc5IcIG2SNZ`eSr;tNe=bwtUAtMk$zQ zdEi+T>il^izTZIo65J{H&9ASm>pdMhFKT7xRgU+3tZ`p`&*|*kL1r9_l{$K5W>1IU zF^#p==lF~>gmFTuuNkR4Mh8o!^}d(Ydp10acCKttWhPe3M&^z18S2dF>tiT?JzUx` zb-z07yzzl(NsyjKt6}NLCumPuuULv5#s_G(^*DMOC-s(AXAXo!ViID!4}CYY{?V+* zvMVz`Z)1b1FiImF25UF#BQx*LoLK)z)^JBzNwA% z?(b-7Gl=7ma)focG{E{1+G(rbFl}{qf}<+aWbNUY4JyJTVtlcb@0%;LnMmQx!^9QS z8^6n8D{fkmy)yI8%&N>OKOK)WTTR#{TYiW#O!?`P$QbXI3(LTP7;o}-mU^R<;V?Ls z)E8%sLrK@s%^V>PCe}=|pN;X3y-NoTXN zve71lq32qwC?@vo)T}|$R9##+_TGD^O+v3=wK=l(#d!BzFiNyX>3`aygc_|(jPcg- z(2&!Da@z2(Gt{0?$D{Qz-u>qdj?ugY+oqjSVhoPQ>xcPUb?QvIQdX!V#{2wv;!PFx@0Jx9gcEWuM zw;t~4=FZMk#IFX%0bK}l;hUv@AMfm}!8cfKz)KkatKfeXIGVo8f-Y|FkMBMZmk9dL z*teK~`M}N}m%I}Sx!QhuFw*wZCqpej%TEsvEuF?1$}&jDuBjWSjF1$^fSChl+?}~9 zb9lW7?UjPDsOsjvvoLz{`ev=ptjrt-enefkn&;3`^ip_ba3s|C5(60DK!rdM!n%MQ|kR z^w;2-ED?6Jcz0%qN65gL&+6!=TVR8-lpmWgenuQ!B8*@uzcdS@wol&HFh8#2Y=2&g zrln&QI^40MzE3vJ%-o;*jLB7*y|UP!2T2j=f$-?wX|zuC$m%g2TEk}?yE4<9%^W>3 zf5I`ZSn2ULW-*8co{lw)N1Lnh{m@=GUtfH80s0J3{G_w<2jCG3gTHiIOAFZv>*{Vm z8;szrtAi!UQm9pQu;ikqalEBtXRwr2Sl>u0qq9gKjGq2Ht+4ScGvfxaA)y|NZ-`kK z^Co)VuPMhj#KKZ9H}!EON#;?RKRq1TvE)Ktnk4Tct5>?F&j;yGi#)f&DS#n$h^m28m)P9yFQ6AV-)i@VWtO%@M;Kg*f+BB?V za3ABF(^L31Ck_4r_`#n^V>lm1>-bPBVa4hgIg1S*sf`*2^gE@>!Y*G2TZy`T#?5HZZQ9*3A@0-}UbNrh0eiO&sv==-?TbYo>7IBXZ7?|nPVNX4e?Tc z2TL;nS7x%!&j{Vp>#dyQcEuTIzw(+uTTbUUPI=Vv`EO?f)Z|kp{{7cgU zX*j~7I-rLw;jl;{pLRhKGY=yDn0n{*>rrM5PJ>>b^6lBe>DMaMg>#hp7A5tG_PptX zbftTgI4b8@`=Tx}pZa6oX}>i)f%4?j$c}MbQy)^ZP}(~!%rhrzAm2bW`(~DM7v^6wB&()-q}{l6NNhJp=++3wl<91<+T*b(;jweH25DTS zkrl{Q**<<|sASAsfbkdOtv>fY`W=FvjX=JY=ZNR#Ch9GY$h}S5Zn0ggf6JL+s z(>$DInx?*=Vh!zm@Les=rx(HXf!_Tb^c}!)hdMi34|H}u_D*N#{I{_Fz`ygo&dvnH z4~2Uee(_p?MZb-8syEg@qiPWy^#w?YcNrD%zE zjA&yubC|0uVNpeq{?0h&i(=SOVb6CA`l!Wciapk5G%w3qhV}O}P9{_fj;uJ-6jK9@ zD0*LIXkenTy8A_QH%C^jB4xp`SU18ATQa4B>?3&7VR8r=CM%shSnCar{gcC*Cp%un zd8*<5?UR{zt9%6UN38d?-^(^S9F}fQVZ=sj$-i>N+u|Z^^Kf#}T8~}7rxB}6V||F@ z0CI-hzhm+b%~Kq&P5xJNh*L;hpBK_@3Yjw1)ZE+_I%a&PF!Eqq=m>gSQ|>w94BZ|z zVv2-1Y{MO69qf^zknxsw8FpNZ*U=_8`lL)~9-n=>&G4wEnTSgj zMy_LCr;pMYCF=p|3-L0diCv5mn$Cw`YlF_8j&=Xod3NnV2aPjBQ*ygt*j}GBo$T?R zuotU+qWEceM01QcwXHjyJB|{pVK}ceJR0Je)NT?ET3KR0Wqgs18b^CYtPW;=1}REC zls$5c_m(z;)6i}*OrE;>l5O`*4k2M}-feNytyB`VYESdqXRW>P4QqR2Acf8b7VDG@u+PJ3fe(9F%zxm=C>8+Hh_0rVtpUzdA&0Ti@(6F8FQ9V5=+cAlA8&p6slADH|(zHY{oI%)_vL_jDLxP1a6} z@xI#HO?g+_IdZxOZ@-SP!FDN3@d?*K-bwbUlbdGyl1f?n4^3|^VX3z?O^`a5yqL$* zJDT<{VMBjxKImf$eqQo*9<9$I7o)<$FPcMyq_^`DlO{-0AOSY$`n=4a?umQ}?->ub zu?V^SekQk@*dVLvaT-yxaenykyh%Q>H2h)|D}=m0hrD2`MIMF>ueQeWoj3G(ba31H z_}jLB$@F?^*&$!DM{C;;&2?v~b;7xO!hT*d!ukqU#__FD@%Nr(4WHwzcn)|y8m>oU zXXhxO3GQ@5XD8V&G*=(MImXplF;cIsu;8W5${RiC6c$-++=i9jOy>hJI1dWL-)QV$ zq5bNNFZZrNT%U20p<0xtOH&Xxw(WhyMAndQQcteC@li9r^$oLH*TdLxOIaHw3G=%* z24tpwfW`2M@xOkxZHp32vGv?WteVsq<{LLse?d$$@<&xNXdaKD2ukuhX zVMm68X7eHB$=TN@YqBJOe^@O+3pLoSwLS@JS+N6@8AbDU!?frc>L+mrtAN6tXz$MN zXlGlQ*9>`02cO72lmt5&?q0Zm;5QM9Vif*OT?hURfUE}S`6oXP{-F%B0sIrdzpHip z8|TnkS_#=@?dy>>M`>xa_sDmlXlXP~0Q6@PWQ7`Ps5ELNx8&}L_AdA?5^W$i6YUuV zcPLujUxZ6NaN;pg(ereM5+~jdTd1Z!>Za3eVexlnhFcA5{|{sD0^dZH_K%-QCikR- zTA)BFleW;bloFsoL4{D-X;Q0HMHdk@psPbc?RtBQ*Ax^hx*8BQMbLU#x3Hp(RbNoC zb=P%qS9ufY?xKK3ch@#eFH__a!Por1&rFKNUEklopHAk??VK~`Jmo@5ZNh8MEm!U5^9UMD^tpsH|;N-`CA$Tawll!X7Gt}B_IrNd zpCA}U;d9pr8MPm?PpzwB-UePw&)vWry^f;xr;`H{YA$j z!BuoNJ$guTgZ3Qq9j^VbYw0Q~Yobe)YfD!gu6?H~sGJL~_^b=O4>!%T%3V*`)lrjh zAR$FZEpsjibJ8C0JKQ5XfKL}nA@Dn#qhaNW?1=@V(MwLAV0YPFKmO(XSX4^+A4m>` zvlth37iFM#qazKg=*-CI2a?Kj*rE$pQEA}{BT95_;RCF+Hpc=$G1fdt33{BHya0rvZ zA*kGePSOc2mAOhgOmc@*4x59lpg}8j&|*yRup+|YyiW$ENRz)&ydHXvl)%4iNhnR; z&^y{&C~CoLjPj5+0lWq@*UA%s^t*b~A?Kv0+GBxq=$V=;ov5AQIRkw@Z32#eJDE&J z`5Tn$Q2sOat=P6;yApgkeSG*el<&ee56ASLI+6OSm*_on(SzJ!@9FSN5gm9>$JzJf zC0=P4{Z;yJKAI*s_GKykgudnM+s?weyaI1EuJ=5Q9lce$@>VT)E0vb%@K)AddsVJz zKj*Dlutw>viqOY&Y`4CZOcrAQgOOubq5MAfe#j7%&*u=z&Dd_haUIq$FJ^n-QwFTu zam>i+BqyWnH*|`OJU(SKmU2e_MY_dCcfj+Gg)8yI8`;0u|lFZOz z+oHS!8k?1A@{B%?sCvOa!qi{lcT0TQv)n9?o~FrioVAytA1k4Uv%jT-OhS5@rR{0* zF|xF~1fP9x0(v&!gN3_JG~7&Z0=l8-p+8OVJVrCF6IuQ(Sl=#@$=82}U&k~blidl@ zR~|nyMPgGj+xaQS!j>sqgym(TkE-6lAT0=u^o|4NRN-@Xr8zpnW5=uwjcYn2R|ZY! zQM5EfjRnz$evnon4f5sF2ING-whY^~kjJR(!B&Mb`dqoO)i18`q={?5D;RdIEuTZ2 zTYwdBByBB&9w@L_*1})VU|%d-nKqC~){Qw%k#o$$$Od~=tt)>dO-_gJ0Ph?OFX_=U z0n{+}DDj8d9sEv~i#c&Dgm;7=qrL&p!0oG3`>b_}5Yat*ux*(FV|bHkmxh;muTtJN zE1D+v^bkLOSyWW6>!h~;bf1!oi6QGbl9e0oST_&(8Fo+{vKzl-WNmp-u$!mW=5xYicn8cs-ASn z<{b3!pLemm&%3fc6LD?gPp+lOzMhLQCi}X+&~t1Y;O<83tFWzxoK5?5Y%@{zVDG~A zCgvFq$*fZcJc05Cl-FRB-h@6C=RU;tep@p63igj<`#s8!VeiAX8RZ|a{{~y*JIQ1V z_V;4TLb>&=^^GH^WQOfi#)UOgawBY&)HcamUPj(ASWP{=Z;JG6TMjTpPU?+=G(xV{ zy=+yQ{8(HU#iyBJhvu+`ACJ>Y*avyNb+0O4@=|$dEFgbu>RIR!ac#6`48~;4*)h>! zyjqkiW?*eIu9=5jW4NZa_#TfuPT0Oizn5e0!gdGBcVq9vb}!2H*jHn_5oP(UUB~!~ zXJXd$l1qY|h*kV@3{abYGw61%vBh4`J-5t2mPLrg%z0to1$W!!kZd>GeVi(%(L)g zj{bB!R6EX5^p|X#e7g;AK^*ksG1mSGavHT_dm7u;-Izb@AH=o+#UujFZ~+mL9sPs(?M6X8+$*te*u0qXZ5Ca zmG|PD9X^ypk_qYIRShfTm2SbeG=$F``#2#-DuXqEbNQX*3*ZCBCb+QgosZ|q_5nR4 zFrX@h!G}}P(WXHgpcCucT)=YI#{BS^N|U#CH-~P^Vvh6#cfpqN+>8gy*7r>R)v1G= z`~WjXhxllxxCu3+N0|VrDhCq1{f{xWa))oO1i8MZ9=fD@-;iw6+)X_px#l#7cg99^ zL=Q+aiUIg+jq#!N?#57Us?}eh)rWs<)xd+M6rg9{$NYO-(va3)fEPrXd^(mHdKnhg zQ<2^-M%FJk(FCHUTSSfYIDDd*gXL+WT#3IZEqqxbuZB%ckiKp9E{U7Xrovt_{W;i6 zZqMrLGFJH~xg|sO-Q~aT&SiDbu4tcJA?9Kpg&5IE@ZH3dFgQfOQ>v~~<&Gi2NMpQ^ zVT^3@+VT;S5nf`*1eTcW=Vtqhshkc=#pIf)dyaR3pXerElD@E0dKL9<>z)gq1C~%| z*pGG1O=-^4WMemjSI7{jS6A7LHi?eyI^nLndLF&6nvwO z`k76BWuUo@W=Ipnc(e0dX|Vr`C`HMxHRs{7V~2} z0}jlPCir;S$-_4J{(=92PQZ?F@L;?6x5?y@SCdI6%I#1@d~k1Zfj+|BH$6mtV($Bx zlM9oN0K-P(Mx51!9yXacSWy0Eg=PmdPyZ4;QBQX%G{um>yP5N7#CV+0nHquALv&1S_++C^v_5arG|@nl!q?#6bbJ7gmtjo?Flz z77FxX(_CGF_TU(JUcxF9S(}|!=fLyLXzNe}el2rK>zfBnoUY)zgM#x^m^nWU&lLqH zo-5TkwFgHgxDm{G66@JmSBjB(Yyfit|4L2JCZFtgqNW$Sv-f}(!uaS#?J#}Q(5Pz$ zzW9N$A6isinXHrKVZv=%1ukt|LiI2)N7;5MD-GT- z`3CrEQyU-k?;g~SnC~;=-Dt(yn5KwO3SpiLI~M>{lt z?X00bm1IXb2l3=g&c}P5=)^qaO&5NhFu2K|-6hxt_1v$)X}LsAj!l-KPa?=(($7uZ zt$504f^&y?;@ITb{ri#UFi#!N#p_$aZHG6C20ZgQ{Ml*cl3oV-6WB;>a=6b~&IT=d zs`K+c=JbY(t8`+qXF4oLxxne*JPi%X-2EAPqzgNp(Ae4J-aeIYopZz{zugC&5Io!o zidqKh+Nh!$ns+3VW3eY#+65f3 zt~E!&Xbx}#Yr&5g0i#-Je}cDreZ4IK0~?`x*x()B;^tm*7nUmgZzlt=Dn!e zZysu8n+>9>iRYkI;{)w&H6Ghwn%OD>KbY#v@oS{n;^X_cCGZ5k7~Xq@mj;%kVy*^wRt6hUcO8FmRNadSk%(4&}UQkTdP8xFEbFF}LPYz((>^&JW6Z znPZJ=o&#pABWT))6-oR3y`;h6@(CW8^GH}TO^bD~t#`EJvo6lzR7N~`YR3=Ed7u-L z9`&A07uYlv_R|3SX$;acM*lY8CG@xV{VrO)Tob?5Bs~H?O-CdAgbg13to>!=p8Y2_ zx`TWJu(GkbZ5~@SyaP7F$4JV9$4ic+dy?A>$!fDrzP6uh%E4@L?KD$%H!xBqW*Bun z_Z{X68{v8eta13=&Q9Lh2>SeGH?*XBlXV0mEt)w>zlJFJamVUAH_&JXU7QJF)o~s!3Xl^>SPl9Bip~gXaX`=4>t0J zBFo+o9beblAkOzRit_>U1dr#!J)2S$o_h}De}P#Yf_#Q(iG`Sl=}P~JqSau`KZ%Fi z$io@DqWpNTO@1?8mgR&;9acP6%Ct@aGWlho>A+9%or9c4y|!7=8<>D2UWRNt+Q~-* zP`S(S)EY!=*yPK46f(d{$cRb4<#wHcFOmo4hlw%zr$!#u#zw zTsttq&?#f+so^vRWYwlH7%*pr=jwMaK40`oo{DM>Jcy9pr>jn2q7TADd)u~L`jdhzU zE@skw<*rb z>(uU`RdfN$OFC_`(Cb!?>O)>pkCMBuBb2sP3m*n8Vi|!?Z=0c9pC2V19S^AerU%dt z+PF0|*|U&(Sve+}pSpIaCs#tGg!H>muE#5u;cBpQY;*$bEM)um(Tfc9s;@az?4wmR zGCI~%1@Fj3xSQ@!r7xRo?#m3#Qtlw1J6#hvAth!@PpcC%)6?_CN#wJ|xb23mlJc_8 z0lV9beI3rP##~*3{Q~TlU|)!R5%#CIlYg*HezjNc<(%YOm)TDK!OXF@+wZo?W8Puy zwXk*3C7?=-EDHf4Ly zeAENHvUnHP*u$;sMbIa)2Cy*^c13zWUg7d&6dzBRToTRp!7=bfF@!vgOY3AcM~IX`IC@PkGzKbWE82kp?wu^G7& z;KQSH!KWJBPh965`8gnbRwhQd8ly~Q&Nqf|(5wvQHx1E*rYO7z1gDQp%Z6=-aNtLA zHi>$*ELMZx4KZr<3&bUyE3K&BA>c-1$G&Au#JJwR6}npNpKMAdeb`#xYHYOvJLE}| zB*RnOF7^~Sjvm*ZU&4-UZ7P%&VsG{t6l%q{8pc@f^QN zy94kUQ)hZi6cQPF*BIl#&5u<4zb#Qs6ZAe?$u4VyyvySX`285k0QbiDvwWpafp~Ll z3EKHjeY_<8fAgLRtO(%#GI6-IJ8$G z=dA2(5SL6pAxTUDzCF zIziy_oONEphJn!u?jEY8zsD1LslpfL08yo)wVX|BSt3VfM2n^wkN$RWD$_Uz4&D!r z=DKO|unGPFkjAFzA}>``hNlSh%)h#R7w4JQx!@qb)gX^xlN<$C%r)7UU%I?-?4@-t zW@#OrUBIW^3VgvsWG+Eg$!z7<6(tDLcWbZ$h|f-7W#n|~w$%W>*(8EeclXL{_1f3C zmZ3m+jYtp<`Uss3=?@5Ai$p`zDt8a$rFdxaKc~8l-L%R|x_=6zSch2QLok}ry%Y2G z1Gde;AN#R?^qFMxLIoGEYh_QeTpPTrG_Xpla5v%K;BHAO#vnHZch%hlcaD$^(muT} z;IWAV{{^cG`g1{#3VUwE71-qY-Q|0@?dB%JkDip&RnmPfJX+=74;&dFK5D2xIZA(q zaMeTLffm3AVgX<*_#=$98YSYJ30K*HtGwY1v9yXGhL4wGUQ~Q+mG4IGfCa$EuCp*^ zl|2LdpuHfh^+s%p3SrGIK-fh%Gq2kvTII?MF9m;r`uip`# z&_l1M4O->e0S$hofjjqb3cqBPZ^L*&QUE^9YMMuWk;za!a^e_9TC(a%_*x$Rj>5xlY-1NV7VfD*( z{iM->eA+%*>Elw2z)3(bLp}<{O8SJRRO{?LzVVfkw13PQ60qx!G>sSb4tXYSmo?~2{A^{nkh%4`&*=2Qe}I8H?F*vIx_b=SM{gL zJY*@W%=#%(x6W;q*Y0gatwS{)??a7wMLs*`|NXx5tQIK~rFZ&-^75>1rTnH+cCMoH zuPJ2#_3iF+`l6v#;5yfI?~n>Y^OVTo9XP(BJ9hRshU4Yk1*?Xx7p#IlquVJ#e+?@I zK20EzyyaGY*(xggm9kmXK>I^{UIX}f$clwbp6|DVSFnPA=Ui5~9$Ye$U4A_Bb%gM>k#{WgD&H``HF1C@t7rti>4XHR;u}t(Ln>y;QY0+$qJl;A8q#7| zWn&*}5B4zDkL}yd$>f-)f8={N{EV(8!+sugExKWE@N(afuEi>U1c`IBqHDQXgvA-M zzEyq@T%Z8WJZ3cY4o7;s!TnlgzoLOaUtMY+Glpqe6iEYpM8tplff#B+AFH<{lLn7b z8CS9dDI4aBLzE4E9Y%LuWh?G|8k;&7{%bZMN`h*4o--T!5ih)`Ew5?~dZzLu1Pwzo zaRc$t0xOzK6!{98kpgic=E8`%Kr|(0!>nWwF&)8r?Ila^u`taIG8PEdD3xk^(@0{d z(d5wl9O-?FUNj6VCYWd(rWf!oplNZ`o#Z9| ztR&*Ktcdo}DbYT*qZfz|wNVa^nXta4Y#a~M=#tE4g}**@CCir}K%cl0SqjPi-9j zbu#%pw%f6(a{8#VayoN3d>kSc0j>R<({gx-&dgWja;sd}%cg4oY5-3Txvf)>2wO?< zimY>`b>JuH1h#bHTP?EwU5pW8MfC87pm;>^jqqnPqCeH~@rd47l*Kl$Eq6f^5Biyk zeVTw7E2n$}(sj-;kkz>z`3!24M=nG+AE?~z~Z^u=h-4JC>1Q{ox6J?;aZ^X})&j5e% z=5K!FtJk+SiZi@3s|aRD`)q~wIY+X9*D(q&wG~%sb``_EyOy-BcG9K$F9pTHJdjr1 zO42VRCY`88bU=be%i$h7H19K@Jzfmz26_>{4g3pXwwJML$8^$6-Vz_qzg(j5FRw!4 z0~8_ef}+Qnt?)0iwNb*$Sn)ly#*g)=ux(@xMUMmN1RSWU&jm*6VeK_|w^y+J2C$L) zB>d%3R$(OVEQ}Cz%!h}F`z#kyj3es%xU*xgYHh6YPSjcVuc39`aEmzw(rC ziT$|7e~1lVBW}MNanqmxJ=$Tu%gXzJ>Ynaij=q#(j{U%GzrY^81teKZ zf@GCHf!&#Wy#(Cjf6FQ_iD|Hg;UVku@8#8ZSlrfnoB9r9onkWr*@9JWjo;$EMX{xs zq|vyWIb87VWlpyo@~^7S+68|yg-JllLNX ziaxT^!|y;0I{amr^QQ3C!=COu`*h9EK7B>%>09vh;xMEg?A_ri&qWHCfcKB>YtuN^ ziQLmfFT_i1gI3sfji&p{$+mkL#_d?lfGCwq!rGt!e7g+Kd%_i20_07M2aXno;nnV% zE}FqD(DBzqZKerzvkqsyrcxNc($8^6EEI3Ayk&lrWU;L7g6MYSqUbP8ijdw{mpaOh zknfZg-T*FNX0-8~@i0TnOfBot65mcwU)R%7Hy*vS$j1k2aOK6=IK^AUDrdwqy~K|o zdeKC4HY2FYoP7IC#r8t|p}M4QuF2>-3VV9+E7&Hm9m4j%8xVtv&Hr#RnfWOEtR7D$ zKgDtEA;dyq|26i%0ld^kRa(~2dc`GvCB85ba--HSfN`1pYd@{DD9Sg6r$eJ{lsuud z2?Vv1AxrxZvC3P>qD>H**MpH$dIs*Krwot=ZUJ}IhS}5Wp%);i%!kg%0*Rc#<~dhf z25q`k?m+EgnB_Y_{g&Z)ch7yM$nceJzL-dA=N+@e5e3tbN@V z%A6pENblqiTI3GcuQ}gEu*j4M6me#e-v`$2ML^;RPsk#F-H*sj$b$@dveY}B(sZ=& z1@$T7lKQ%+g$=0|K&_P)^kPq~y4p7mYk?ouf=jZ1ga2vWWvdv*jC@nS>!c%#}g5!6vaF0YhMYy6GCRk4s|Sb;`RUb zt{E}u>}Woos+T+fR1T?bFTL};e}CuN^w34MTg$kxR(Ws#Tj7LDd2@RhvDY1l*y_k| zjP!ADE4LQ-$9ngPRsbk$;Y# zY=QJ&u&VJYel~L^B1XL7%04H4eSKdkYldKO8KgndrdZ?y?{TY)(9_reH}E`kWZ=&9 zK7YvUHCI`b>=vynKY}*$U zVv||^t7C;#z%YF+S6Sq2WHz+GK1Wx%IT^DbE*m(=rXpq*ZQA?XBxNnM$dA0&*{SvL z3ax^_2K*uImGz$k#=%z;MEGkfrRPJi7mAZt5r+8?61WQIY0#=8he3Uz(-`I!z> zMJ)1rki+3E4(XQWT#Xglb?l!PH;8Wolce|%Wa^6YJKH#{_0JN{8ord}VQpt|^R!qI zyksn8g(o9LZ9BaJq(zKhe$w{Z6#MGsUvjD{@5=pn}A2@B*GSWI`b zX-Dx~ETRvxe4;UJe$J)A{vUol>Xdw?E$(B<>B~?PGPpP!h?WLw;N!=c(~u=9BOqn@ zhTZe~0o!)UgvFfm!;)ldYAl;~0PpQ)!koaY9kT;-em7at0J3sbIImR}$lHgSU)5VmwtQNFp1>E>L1XHh3+SQsWo?4A1igz*IuxGmh2(kD+Va14LAwavhwyVF zxDTgq!Exv77_k7&nSMYYJip5tG;immS-~PHO$vtcyezP`d}P!byj;xnqDK;0RiQ1c zq%nFfZf+V4|6X0!_*8!{2YxB?{qHx(Lc?X`jJxO_K-W6Np}UaBYuvMzCbsD+w4@m@ zK*#*R@t)V{3LUfje|$r2z_(IBlp9(wdf8^n*0trWm7jEVe#Y3})*}`h`x_dQ$s0gR z)4{)-2LE#5reyM#Cy*)x>-F4qKRW*ZTKCCQzW0aL{jBek$tkGgcI;{0`)ezZZH1Q_ zMJqBZj{V^GBLfeYhZV469DEz|zvAHgn#F2x z$Adl7gCE02>k2x3E(y3`2j_D6hMv#rg$CZ~{`ROJ((#>p%hDo$1aD$=aPba~;Ccz( zkIDb^8>CfCFD~>IZ_fo6Z;|Wzra}LY_ta#zYw8~Mx>}ILq=hst({etIP0yLJbNcVN z0Nwd=uhw~MfpEg-MWnonSrRMZ)1hL7_Rr$D5KJf zqc!2XQb#Q~x+#2n>gYurtqlJvb+i>n*Mx6L9c{tU((szp(PKEeJiID(v;jw#gl|e6 z-H#)0_=eO`J&wx5D^o{xI9d?CE_GBJKh$;&d_~AlkZiD=aJ84@-?in4sKJO@qMXxo zxDt0%dg&h3TZA$X&A_VteQY{@J7O;Uz89N@-*;m3yEMYoF0C-7ix&#IbV7cYUYOKn z5OTYWf}_hMOzbiX6S^$I_%5q37P$c}a&v4Re7M2?#b~TNaf^IQjPTI3XfbLG#Asz- zDyBcna?mrsh4k0(yjRz|xsRSHj*mk=rN)qc(8{w(QP9%6V$kSYLH}a{j&6y$ByLt# z$wOuGL2BtnrKOBGbpP8wPB?v($ph=@{F#dH1lPV)ENR_~+*w?!v0bG#w?Iz+{;!OU zd5LPr~_|vHu;m zrhAjg0_>45nw|Hc65CaasM8R<8+2@PH~Kp|2CfagPmd8NW5+viv$JMxK<|gRll8j^ zvpif|Dz%0Tg91NFY!umK%Keg6vZ2g+0B@hQ0}+o)5=QGn=zXzTfMrUh{w~ZH?4ukw zLBcPn*;Ss%G*P{BLuaSIRwG@899M#seB+lVvMk*6O=a5h?+${)&*jf@A6WMt8o^h2 zI>OjgQC8jY@{>s0^ z_)*sFe~IZA{Uv7XB~87bd>on4R~a_OKZ3g+Wm<#ZeLf#0Ol1JCau;P=%i`v13ivQz z@NI;!*SB4KD(6~%*R5@zSJ?*V=oca{D`jCJSaHP;CtP;jV3r0s|Kvpp`1U#%MI!%p zUsmcqLAh@l?(@Gpk+_)I)1XPFo>-8Lcc?B-&c+>yJr?Na)pLS|lQVFx1?N7B6IEk! z&`*@28Mz%@_GQQ^TEt34wuw2ADMRkTy@AkDcxvDIovMXxWT4uL6`Ay~52p06fFC>$ zumuN5G}|pd?Po=65VKA6vNnFJR4vpmGRyCQJ7RJ{1N79>nCxtDO1}l4@Dj9P8O?04 zoTE&w)sw5mjiULPaf#|jbz=G4>KwxQh#wZ&8GWl6Uk`ALbjI4wT>u$)fwTNlu{*LH z(ZLzbM-xS@o(li5|Ml-a-FVF0R1Lq}%(Tl)%_290JHr-gkXyU3{K_oN8CIEgqG8Dk zgVj39s8F3_hW)w}95$71gO0+Xl-3N?W$i!@tLIc-cu9p`>^@vB?gxda@1i$bCf*M3 zHgIUbxSYHsK^cF|av3x(8gL6YysqA}de}W>3E;)!cf3yb6{qg2d3|+PA*`>+6tvOw zjApRfI8xMaW)1^5N5O9Z4^|Bx%r8IeS9q|PA=pqcct9Vl&f(5z7;?jz8S(*x*R_v| zR(fVqCOE({MF*98Ovr-3o>AhnW?Q$bT$xf|Mfd>C;~|AF+luufM-EX2VPtnTBOV<& zmm+4!pM)VTq{H(WpplphIC-#pDtIT#TP3uyf{!9aG=nazy8|3Z)gqd{N}IlfnJKSZRxiL*<{}L@;B%IS2Ag< zPbLHV89RpU6m&~f*gu5*eC!4AWWApRw zd$}1Bt`qgw_fsy@W60~8b@uu#%JntMb?NMNp4GIukQae+dJ!hjAvT9++??uJ50^~< z*U#KHj;%wzHT}M;%yM;qE95mz55YI`vJN8GGmf54RA6Slj<{BtlF!*O{)qsi49($r^si#S2IQ7(fd(c zt|pL*&JEQ7H*J<5>{BCA)O%FUw+6(*DvZVCyUUSVlj4)S$+dK@|R)^z`u1DK{ zlznkf^QxhR?tQKM2E3;Sn;?-cj@I3Kz1-yNVKGJ%6v4YbPDT~Kk)F4Wo znMi}|ha5HhNz>^-h@Jzz=q24MdKNH*n-b;dV09Ro)j`3upvVkaOp-1wvW*-4ceWV^ zf$kLg%m?&QKIl#qy3K0-M0`%KUmA?I6B*;AU9mQNA7fYDnue?{idr4>u+UN*1 zuhBMnDjM4tirP($?TGwC46_#2LcSB4EB{jD6gNf9@J=VYDl>rti@pHLX!N@=dfB}m z_*N0GIRwX#*0TI{wFeWt|4JoSm78Kl1Xetq_JC7R*=Lyin4i9Q!weRb8~w<2kv9?9 z>(c_~Nnxov$A&j1x@`@ZpZqjtyvZH~RW7jvDk|^{BT4biGL$%!92FI%?QfR-@__D) zW-Wh4Gmpk?Yogq43OV=uqbn2e&dst0zUulM5*Mvc;3qpn4t8L%R_G9XS79xI!uB~s zpW!WE8PI~8IO+cklxrI3UTN$4ieE)OPq^)$CETnkYh1W2i^-4px%fh@t%=3mjEk@5 zs-(0gXf8Ofw6SbqB&~@eM{S5P%w=Zz)qZE|c z(Q1czYcB5Ra+XwID4L$vo#B3UZ#nL7o=mx$Ix<%M4OzEFth&T|NfoFuG>PzE{td-Z z@pT$uX_Po4@Bfb$7kU>eEuxOAQGeq8_l37odIY>nOX~h*VGKdgoHR z_bBb&ptNhzKjl`Qn^Z+lp|5_O@)Y$qU3zpi-ZAQ5ovlQ~;o0lyDa}J@u~j3Gy)?5! zu5>6hj8tk+pGG}(>5^sDr-e zTB%Kq`++|)b_L|8^H+m5fSLad%BOFJ4GQJUAxm8gT=*EuV^F>gwJy>~Ww4-o@n;B7qJ!mUf$?`7#$}IP$Vgz7~ z%B%Q_?xp%Zk8#fNVdoyRd;~Hw>51liRi2%h;O)~2MnovHoDtk1G#;>+EQC&a9iD+! zSqO_}yb*XNXfkyH?NjA@z#qAxN!;|$J?o-N?ddmQO-&b3BrnK*VC( zH0MC%DeueB)(NHrKu+%1vxH`zAUZx z8smDct|bSvVV1XI_3r7^A!h`7<9s`e%v9(fquSf3c1@7_^O{n-Cg{IrUx;eI{cP>< z09X2xhMMcnsTt8g&|Bgw3yySLy9rqsG(kYySY3;mzW3wQ!zVj0qzRg3U(A8HSxplw z&}_X%25RgqoUSuJo)SSO>)J%;0TUjBLT*a2O% zY7ci|^?dT?ntjx@W?@!ldvn?G2vM`F_SV(Vv*HUq6hC__q?OXnR_GGvVSIk*^@J8? z&l4#}rq?vrt%+!$i=G%|_-=%}nsdq5L;ukP>8YnBWJk>1$FPW8ln{Izaz1|4)ll(a zkzen7k33_QM5Ek}R&kl{N8jPe&&)S$(W;6-4@*90gxm=3Zexb;26 z-M^#w)#_6*C_~?SGt2uCKTvQR@@Jr)PUu7oDCc9{ep#DLK2rzX2=*tie+c`>Z&B85 zDlS{)VS!7SHXuCE{_WLIbQl&!N)_J_@VUe(f)@uj*gJH!AkqmRUJrUxMt;s%t(SBM z5z~6DFdrO_9v-=c3gxKo&J(zub(iylnea)b`PPA>Fw0B3Y5en!PC{k}YR~as6b(25 z{POVjmc#2$dp1H!^$0P`%esj&Yr~WUf;gj**r!G5oGCowVy?eAcWt`R?_>UMXXc;;q&UjgxJ zqo8piZ@8PG!F!V@J{KHx419<{@Zg!jCh;X0vW1Ko7+?^-GQYhDn zhzh4wiBmTnRDGaNMx^YDEN!$@yf2GIrEI>gwBER0 zT7Qg>wTeb6d@pvHWDyemL)l6s*sN0G0gYPNTflGTK(9EN2NpO2eXy=m6J(Qg&x7~(=R|eS z8=+6oJ+F&0*tcDh?s=13HQ++s;sDL)h%n?GLCNwgWJP#TWlRSHXO>Gee zNJl@5#{7%!qnHu${895BJahAK?IJy$jd>LT`!uh+P8hCfXth5{`jO1dX(#xr&qEH^ z%(Pxl+KKg7*-o%)$?p-?F3F6DdQx>j?y&dp!*gdZ@$eWf8RfHE8y|c^PWu)QXW>1cu}Hxxk=tVK;L!rAMMY(x`{L1z!V6eb&@*)3iiiLq3lucY@uq}4*J(pIhFD~$Q=U-&a^>6ErKN?XZ? zOmSVREeC2H7fVyxdJeI1KkUJm$o9S;qWb?96LP;u1Ua>>Z6p2Teihd;=(jk+n0P1F z0xrO7YAEBwYX44lTb_G+b#}{U(x}M&if(M*L&ys$@(wO%3fj5@@h${$f!Ib5D;kRt z9v%2JmT?o4AMz8Av-sr3l-JU|k<1`*92tOHNbul`^VjRdv4FXCv5e;!vUO=jayfuS zVY;Zv#fbGR1aF+XzC|n&kvB!}6+H>#i1LK>R`A@+xdJwi8Uda|9kO;kAh1q5k)JYG zyfnd2$@KxJ*gND?>xNq2Smu`Hb!(yf6nqBwVCV`6D~;L2j{R$$8Q&+O-bB}t&I|rF zD1W|goj4|_LEY8s^Cf;-ha6c)5dT~sS`s*sUx4q)Sn_=hW!BB8NpQ;_tn*}N zOrUu9%GtVSpb`6Iok@PP?~SiBx6xYrG$BE|f|8jy6t~>GZd}WF-21jP2D`$)p?m{orv8 zzGSi!u$H}(35$@qONqUim$_~1_Fr~EkEpAjT$EWV@n`gdCOM&4)h=#WFV;)CGx}rT z2cUg5$r*irg3f|^{%K-)>w7*ai?|qMi-D}HzJd9(_lCVe3An_oB5#7fxMBPi;lG1s zev0o}?Lo$X;=>6Evt?m}yjd3o1+bV0t>(`vawso1B5RS85uA<_{G~~bVr}$A2tQTl z5q>&~I;KZo@?`H^L|6(oB__OpH``rFtHK0p=Sxx2;hK^Eh@PVR>A7ZTX#Wn0Vrhk2 z{?Kn1So|VgLej-6xJU5R{mMH>{R!WFE8Zf0>ip`wG26{&_$x;@Wo|P`A0^zaZz%cN zsuK1Q4p@Q&#_;wBlr{+K@mEj(uUR6^aYdqZVnrg)Iw4q*D0y4xNFETQMnqh>YH+cbLErfm2i?yj64MQzorh{><{1&oBXR|79Rp?<1! zZuo(-@aPTSm$I*M(!HJ$3Ow$v^hS|08qYu@4XyJ(;Tvc|{&?!0NzUw@fG@*l^zvW* z%9Hi1`ZW-B9B(1(dv5kl{Q`-y|eU{3345_II(`7<>!L zeT(I4J|Vdi4qrl|Gk_D0yRCE;q6t{(iUp@;y>5rDn^s7cGWM9OdHB9x1_3K?q`(Z)P%xuD~E>`(7z>}#~S5&Pd@trufo zh2saWN3OXpZsyBfnd6N3fJJy~sehe#2II!e zrd8@X>P7{p zPKVdtz*od)b@Yr0>YOD9iCVmKum)!udq69E`~vvUjnDTrmVr+A+_H9^HNd&(Ds#RP zwr+iB)Q_LDqbn|0>0-rmMZ?n!wi=eZSnR-e zOM?wW(~J`dLP6EuMYgnx6jeL+SOc&grzz-=A8xR2b&t~r1YeEPxAcSs`Y*Sy*!FNuDJ zY{-knnu@dtVY@1QLej@Y1>5NrTI^{^`0ii9q6x_yui~s*P0uRxjWySi;9He7DB$fh z>N?ciX4pEk3fW*$gL;;8aJL~gBghxH6S>Va7RU##CC%THWe*&`_jMLs3`?V1PN%V8 z{9KIV6dFfX=>J$RoblFwwdRbZ2!w?DgnlGn4<@PqU5qDbyG8|2MeL&;6G| zgRvfGm&eR;J%{`3`nZLAXMBzi`LeQDfm{BQ|2{ALaGzUS4$oWA&2Ef#c6+Achh+$7 z25ANCSHAY@{&tUFkd7kjK5<)q>V;i15uBDK$46eL%zkHBOxP#FgSf2jAVHI996=Lb zpec6{FDBe{aHf=54hhBz#w}ufmY+-flX@+9J-yu$- zuiXg!r-NnQK=;n4d+mHF@p^)9OptoOJJ%=5193S;MEO8D2YjS>HTGwpP3~}}_9}fl}55l_P?*I8$iGn>!C_&{wi_M4@+{v7lM$$8jZ6(V$H;6?hxsA1Lq3?#d4!$F1znb7-QdPti-@ znG%(!e~%;E=uiZ;;TqL*+=TBDvi5OrfCmH4%>o^GJj7Tf;PTCDR9P#8Y$E~BXvH&% zqPJWDqX790JQI^+e!sZU^Qf2QK3esHxG%Ro`&AA?=Y=l{+S6;=m)uX#c5ml=T*&n2h*&ebH!WfcwxdNKwku3DnYCkk)I(| z#~tYXUycF-+3vDG9he$?tyUu<59D}A?^>bYyRdG`se_8|q>|z8A7QWf02^sU6FrYZSjwhX~B_ET@zQZ19+6q zvjXBThI;fCt{deC2a56QA9$?`eX9Fyn;~AwK4LD>C|`+qe4a^4+?!Eu9Pps^_j+`} zTe|=;vt23eluMkh==hDuk7txikQI@xa=!kqsi=3!fO9oz2cQ?+d6V{jXhLw=(vX27W=<=3v{s48A^qu{U0aE*h{${<#mm-_BjO9LOxUePIU9Q`_TvWNk25Dz_5Y2I!WR#XvV9=s4e!RAybt|ArN5S}T%^+( z7cQ5K@cc2#`};>Org%;^`GN9D&w@vYeL-Y%#uqg`VSi6@qg9_ z=waU~&gEO7cmO@T;cCQq)M}(Wkd>4elyz+&8Eu_HxB~G;yYXw761Ai zxgsZMHtiYzSjCrQNsPi<+^yzPWI4J5P`@1d#z6C+plv(7{@bffm-T2hnlO4Xq8H!H z{&K{Tu&&_qgDlVe21%LYj+Tz)O5F8yB@0iPIKE);P$^c$2VxY~4<7jF=^l!j354>? zxmh2=^0!ZXr^^Uw-{2jcqamC=Yb`vPnEe@i{p%@$!daWU9>bXJ`UUxa1%ZVqcPiyP z8WVUdbh9#hy|GSwv0dTcVh-d*G$H!P2dFXjQ>?`pCI2Gdo{8*>fFOLA!1pRcELuZr zw2c@2QkpnH{2-IZT<`0<$iewa@FmqT;tLr4Lw$8^KHuXZ!pk$L4X#l1j-M$S%Q=Th zHukiXWkbtslJjDQ=$z0C_;PpzMu4Jyyoj{?3>-y$nN7d_ z9iW&US;;N?qfdDV`L%Wf$I{nOF&jih8R1!@ybkn;$t4XsG30r}`$iRJT?>uJOv*aC zwtVl(KLN{@0?SUB`FMy2mc@0;a6F6JTH%3Sd?KQJ?I>bG1lM};1yFZnnUXUUt&Ofh z#kR|#u?7b2=64v8?SOD?!fzbrgZhIX+Rp^C zyrl;HmHk?rar%l{rXUIr_am}-K*6mGqVRH2Rwk4C{2#|ZW*ZcIl!o>sACv#(r$}5Q z=q>f-j&5!6uU(t~tdRae`~yxCZ{R#Da0EQu5$}LS)DOH&$5WMK)~$T$vjC z`Tfzhid4C@zoTtYsyw5=xou&pT-5JxTaYSG?)SA_m?{hX?zZ#6jS!7c`!JV$Qd`^5 zr{>BBy9D?^5j7Be4;;V$b?Vvuy+jG_?IpZj-+MbeP?x|1^)h&%*1!Yx5_q6i!vnQS zS$D2sagS&5e=qPqViagya=yN-M`w&Lw6Yv-QQ)kb_FmhcSSE8xB5 zK4hNvlg|GB-!b+Q{6f#ib^WXA?;Zo#>+R4}g@Ws}}kUuYjzj;kEnLycr@}DkA zCVA{1-vM8btA377r1h$F!i+u934JfoiL7(z#P{GP6t2f@lPJHrP_|`91?K?wx4lsYi@*EhUftZB+18SZF z$bYcm5ElNwW1ul#|CZ;#D4zxf7#;&{M8sxGNX>Hqco~-Gz$pI{^Gh+%M%i^vo&%#? zlge}OZ8dywusw?H=VG9NZK2T|ih(x5}QQhCdU<0+W{~eQVQ*42YEdh}jQ@##Y^e`WHise>X7R=a?avl3mLve-Ei9 z4c|-8LHrWhM{W!1$GA9UpacE?N#r`S&pVs#bZEYhBX+$Eec6KTn@fjm6WVK@XqDmN z%a9k3M_lm!M1m2=r5EEK4KksFJ`5n!g7dyGMth)3i3Bjp*CI~C2>k=enzar8N}auI zpaEw+%Gv80qN%f<0eirRuiwzUOB#-(&R#IE4)@Mb&UzdEo;o{YUat z@<1BSW+`W9HtbKG&B6>5KSlLA8~&U+Yag)TUcGX5Lcfl5$Z6M5i}CyohQ z8a8Bh6=1m$kY@-Q|MaIz#GPK-V8WdF`U}8tewm|hP9WCjHy2w2_hCHu^=py$i{@DQ z${l*}O%EM22RIBRo?^w(xsZ8{$UaCb#r^F!{ygYYU{`>Ah41p^j@d-`6Z~esUK|ZA z3_O|q>F`wrJ!OZW3xG~&kz4-8Z*R(Iq1aKk+~ZGc8ig_^W&}xQ{@i~wHadO8v+(VU zW!P<71Ik?y|EOSklVz*^sYzm>(zRz))99VXZDW4N@D-KTwzb~Nkd=`r@h$xsL32}M z<*1+uzj;jriEH|ZY8;3aiR8tr-@Xz2(rx&58orC{ydiQPdbi>zT>*Ub-%vF)0`NQm zg*D1Io*jWkNG1R5yuWLB3!}GR4Q@*Le)5zx;W3I{Y-v^6iHDb{D~0fOIaYv>S}Qlk zXxy$oN*SDZT+7C)rF+)JMs6cp@ZzIv(&%7oS!O$W6I1(dRr-Hhj4XlUBNL(;ajK^{ z#TPShX~#)go|T5R*4=l$moir7DqKl--$>X*lC$VJP1ucAy8BcaHj&Drft+b4wAZNq zN5Dq0NsxZIDE%6O^LG+IN6(Mk_C7}Jb43mwxy|97zf&FS-OAP9cFhv+?J_s<(c$Bw zKGy_V)%qLA#GpHOUX)CJ{~}|F%ah3((4q@LmrS5fGf>Vz`3r3C;P^${cQeZGz06n< zjJJb5Y@Km-t#mExelM$tUbjB`;R^#64pWK`16_k5j5fS&& zorzw86+SXQg1t`zzDDT@^1iJ^(&!$x@U5JVRor8_ms@mPGr zh1LYY=1K57-}k0P&vVi>xySP^)uOh7y#ERo|4{Ko(2$puk^@%#Qn3N~2%VmSi|We@ zs_5(P%p5aXEgFb*Er93%;Wf!w=1<^+U##XhIshk?^{OM8(A);TnBM;4K81 zQ=l*4!j|MBnL;C!0G~EjQk3n-k*erakCEarBr`ZGjW7k*g1AP#;+*G)Ibn&295i@q z9+4l;Rw^wKsU5V!_-)X~2bLtJGYL}a2b^(}q3IALqrAWf>DDd-uVd0CvqN4nJ z4aoRM{iA4ld_x2F=pWvJ@6lTs^iABxb!Fo|ORu8m$cIz|skBV`J>tWRY4i<7Xk2ow zr?tneW!df5CUR?DSma#9&3v-#_(9|VEHsK|!UlXlO|89k?;+It5jNz4MxR2^9Z`M* z>Vp|=ohPxJvFN8M%*|+=xw0a05Z1f1Jqx_y-+M2fm9W*lH73=!$)Alnr*C2CV(9CJ z8TjU}QJd;v&gF?5xAnB<1UDn2htngfe$fkyZmanAAldEf@cyN#R-i`_>o5ivpp8Oo zovA<4xiyGi683ELRJf0A_aCip#P?qeh(_wfTbSaekygyRSDGFoIO7ZS2Pl?dV^r0_ zE{q<2iQ?U^|8AfV_ny}=aH-!K|AV$;U`exf&}eYTTJ6jI0n01>fhpPIP2c_c&pF9% zo}a&F;AHojf#V;1mt6C~%7GrIob3PCwFBROP&07v2fl&txmZLVh&r3fd;`b0?~}eF z+S~gB7hMv8=GRSq;nA1nN?)Fj8kIRCy(r6J4oDAzQJh#1Y@aB&$8j#LGygM|A9|^| zKX4EF<@^75d-u4g>imEFea@V@a1lmD1w|Z?%b=*^H8M9x9LIr7(QejSJDSz#x-FR( zGP4n{nH8ET8X~oAi{=(1YHBLvTI=@7c2KuZTUrCQ0%y1!5OKUL^L@U~3<#(E{=R?x z^zr6>UT4nx^}fB{x7Yi1p|TnR!L8&oT-}y7XaMC&4fHvqC?)|$K@+cZEMeWhvR9#C0-R#VG z9~NK+XZL}8fqHUVWlPN)|G&0dk4m|XaQ7mZC$=v71oruhh17oMSTakT)Fw^PW1Ihx_i*i`{wztoEGk1*wk7wuc_8Jt?A}_3!5aa zS#Htylr8#uy_?FwB@r&&J&*YkP3oIM3pb{>7#bN}-gokQkmNwW+NtNmO{!G&cj)QF z$34{Fx|;FtQ>o?!O4W->1)GB2Q+!2J&s?HNhTeU+I2v9aP`>+gqt-W;zMO2G*M%Be zYl#xu{As7GO~*ecYMkc(w2~;Z%|E<)dee!@X-y|Tcl&=My8RQr@;9yekH2Z<7gbHG zJXK9AE3%tbRb~5E$&`Ek*K0UbRXHg2)wZ&a>#oQ(O%76tI>d7Z_vwcbG&hCo<0o@t z-pig8r5n%!5iua*BZ#yA9=ei&FWYz_QJtSMa!A6XIbAfbT=3H27} z;O$GqUMSw7eVgAh*UD|x!ahG=5U^uMdmX)51uqg>DaSi!2lmI?p;?-VTV?EKbkJ1| z!QO|Y*)a$5p|EGKvciJ{yj2Xxw-o;35Abp#twS>7WP9FWjsftzOjcnuo@C5_VUFK| zH$M_5jslz>Ns|e)`Xa&KDa{m?~=MdRI2zZp# zO2thi)vAU^N!-J5g2ZdkayS0DN!@JVDAvN`l*S%qNVRea+>MG&r>{DhAaSZNJ0Jf= z9j++mlI$j}6Pu97N(5RT_JPMe%l6QN0B@}~EHKVZn!DeT z>8W+_duc~Y_N*_}_2*tN=bCks2UU`GpjPS&`PlvNo;F0E?Mt+qkamtYx6EM6uzJzz zoFgtExs=Ja6SdSjG?3xaLjS)f@P2{Zwb1{k5^R!7upK2ZxTplP>i0XsJR}9pJM^}m znZ1GSB})eAlc+tli?7HbwBy-;-ALWqZLoQb@NpSqX2cHc=jTtvIrVM$Ny&iTJf2Bk zko+W~mE{#b4HjKkIA%KBS%rCMW#>lQr|=f&<#K1r*IHfIoD;*{&^BTb*uw}580@@y z-H8jp3rV|Gd8I~2{?EoxO|>@oz4u_AAX%$vFIl9iq=9I~9rB~d|0woV>Is4K#OhM= zkEn&lDZC8HPm3GlF{f~;Q9lfy#!`kqxYF_p*|=g_=ocE$8uP$^p>}(!5ntUi((zVC z(+^tsk0aScE&M?xWug|kMC2O(Aju%FwCF%fTFKf-kP-K(i5M+r8A>c;DOwNRig5RQ zirQ5`$0o{+J5UY1Q!ik{B=^7SRDu>al$2+^>|5>{X-}DL*6JE6PjzhyAzQy-nB8D|4 z*l?s4H{?wbGlo;UTXk>R?%EFTDice}((sVMu=vF^ktoe=Waof$r$2rdr5j2=IzYPmeZq zP|hGw&Y&)oqsP4!C;)my)Qj*Z(}i;Q+CFmXaSeSlS#odCP45Ra_718^YtY~ZUkeM< z0@fWZ_!S!a1Q)$|w1M;vv{HY(p?f-3-WO#R5PbR2F+lhh!HrOga0+1`0_M3%8)35> z>0!~|2}~n!Vq~?&8G5(YN}LFccxYEp4#OJ?j*jzCEjb6(a4h!1&&6&87s!nDFWyF8 zY$*Q}Yjl}LdLGskI(4eVXZo)BL7+KVieJJMP_Ij8tU%syd*9rJWfX>#!;xlpJzwtp2;V9-0)O{bMQiop{nxK+;4@)t!5QR^C?-|jV{+%13mT>q(XWFvsRj2e=p{YRLqv6 zV1)(iQ@z&H@%(3|hD1SgomN26ZmDH%UJB166EfAVF!5Vjz^cgyn7v>UjU zzG+#BJH8pBO^yDpSKDWZXHX}NbO4<2iEiBWqui~qS&@xcl6j|?Z@iR~(Q&`sJjL^U zFAsU>z>MRa(YoRVScGd`Vb@4`m^m%-dvH|J1beod6=)$&fvj6GG4vrww-_p&s&Yd#R3#nhBpz-CH2a9jO0CaA*Vd z&w~e(JM|wV*WZYIuE>_!r@#u1M)EXppur!(lP3>AdUi)^$N8kJp}j@}YiX2s1NNF@ z(EhOqXI`g%*oVDtJp85xtQM!@eusm-0j=~r^ihaDl0CFf$NJx;1xOxNy3k904{Om5 z&AVjV7u-NG-KCL4jLx+Iy`?c)4+mS~TKE%!jaig&b3p7v=qV@+CSc{?apn2L3mx zRW*_%Ye}+UF&FvX0JWt79-JD0CL|@P@WRC60;4Y-7F8b(iJ9i-l`vY=st*m3Yxkn$B zzc#ua>D-az*9GRZMRi;}2c5Oz=BGJ*YQ%TUKr;D*!b76M@vk5K85ADgpG^RUNVH6= ze7}yiG0|!@4sQhy?Jzk{V0USO?dnOfd}`7;vg(NYKUjNs7q&7|Vey^5CcY;1$ah}Y ze~AYT;W@OPn?Mp^xCNH>V-G>M0?`*W(n-uAKcwBO_&7#xxuC$l~+4k`)&J^(eo()6%v^ z>LtsYYNU3B<#>!1@AwUvbB6`au1w;OEWtBq0`_DCt0U3q3;vt3Wy<_BqFTW$2wd7U(2w9mvBIKS2sO#~>&2nwQ|EA3L(*JjDT&#NVx zu-V4u($`PaI<0?hewyixJ!C(UvEkq(;-5?)i2JcgNEMDgx+34#FYpkp2%hoy%X#jR zF{T3txQ_Y>E?X>b)WAZR2J$faqtiSrHOtK>Fa!Ld^3a@+*1@ud4Zdj)V&|s8eOWZ* z#OSFMe(J~$iU#@zTA4y(Kq31v7YW8jcW>Db0*hw}=-=S)A^5#e{+;~ffC^wg!+#Hc z$4wiZduea*v-l}!?F&F*vdk#yGHYP5oOqL%7wNROhH@{4U1ajaLNF_LppR?_^AOtC zV_$=~Kf)-4X$YDRp-Z90nFRNjd4PXL{Gx`jbBJdHK8%=r<&k%R4qtg1=syJJU)Yb$ zWVn&`Usw6*ZUytJ!e5?7$`yO9!&*EPdqSh;k`_15I%^DUf%&ihbi#Q^1O0yDXH%JJ zkDr0>0} z7X|FXYou|Iy(nCZ`U8Jp9)r&rUXHNS@2-VOItLEKrt!2Mm_xiyTP1TNwc`L9nuxgO8I?32V_^vS*L zfj%+(qE9}qTLqkRfTyFpF(|JZ$EnzTX`Q1 zmN(-U<#pGdTFux?z!jhe>xoHt{$=~d{-V4igXN9>MR`B0eI7Xb0nabb;u^?D{PHYb zi|@0F8T&WG84xk`LlkcDlZi7qq+`_f{9!P0DN2T7gigbGZR7>6zydiN^k^-tKx4kh zf|dSJ*q6oCc1VlId-`Q^CB#|PK-+;ls&u58fHbt;R%5o*)DmCw61<1td{gT&Xr$8( zp^yrB$?|uSN`xo-fL{ocmH3?)|Ik~w;>3B*==aY>n1bLu6pa~Q0}WQ%2heQF*G9>2 z;161UqlUy=Px;O8&NosM@kcZ`{das*+Y~CliIv~P!osD~td^bt_kxwDp`Cgm_>lLH z2&WOKuX?ONLu;xnw1fwA7=m zrsF$0^}t&DA7M`Kws+taHYBEa+VfX z`Ci6@=dc&Z`2?0e12sQTpBsESSWg~<1Erxh)<_>f5=ISwb%9&TbWa@eJOHb^qbmb- zFN7bzm|)#IdS!?g>vvASvVHKpz{Rt{Sih0tG%OUNE#h%{!ETavBQ`sggYrJ&<_xBs zI>&n%E|fWBfWboL4OkyV%gcLRRz7PL&dCT=&-L(uG$^wV?3o6efktBqq_?Q2 zA+P75FJS3Grj6P4>P|1I0qM?t#~E4oJZA46QWuKk}O=EUmC`J__(<&xO&GvzFav>I|wSdgC{Auv9A z*|>dV zs?;yOt3~hA87NGr=7-A`BD(&?Nj`^4r2#i#TdLpxPBQE~AtsujbL{`Se?2wMbAfZL zTJ|S+H~)HS@XzQJs|Np;XrilsJ+*YO^AxM|s4;*3zxmfw!(uusPk))QHxd3c7&61C z2kHQxoYp$pHby=_sHM~3on^WMHwUz5245ccE^0`7eOERT=QVJxaX!e0l_|_vTI2Lo z$e!g|;29S*wZ=+g*$GZ6QbDYYPIs1Ck9?n~!#=$)=%KlitmB0M(s?UzdV3r`X|Q5= zkKlQdmA)bg;UCFHi7DXQtA~eLVq+uTjO*dy3~VP$*4cUZm&P26IbAKSfCWvpbfzAb zaB05|K3Bg)zkff1djAKRDL21EY z8$xy#d|m^epXm~Uw@X5Kz_OlN+El;T_F}LcLgNb$LP{C@f!(q`9`}wK)+qQA)n2r{ z)Q)mMizv0CWup`F{iU#=sDn2k=vPI1$l8$__IqgdJcsip*-oM73Gijdq!OnY?M^r@ z$~ai+vzNGgGob4vlv^LhtkWE!?dOXK=k6g*B7@NfU8A=1gE1G8&8|l>|I!?(4u}6h zIzHi>0=|AliaFgG+#!QQuzS6SFGTzsZjieK-VQ@52SHAph{3qN!!dSzI zrpf^|d;!L%oa6Y0DDd2pK?_dw|BeLsmj|s`@GKy%$G!W1;~+l>di)O0XU6*dherAR zKj7V8@$LViWX#E~rHAWtLy>+fPOa2xmP z62dtI{h`3RVRD7qLW9(*Y0!Ffa@y#l{_A<|JW4?Ol8OcT6BWzABSX4A|MmN~B7XBO zIDW40io@QC1M;mWRszQpNLP8=@1Ks?ih%J2-pGh}HF||CKH1!<)0~idGB`j1B1?YH znrvr@Ni$xxKo$jUbzGc^yJK#QAq^g!tyEhXEGbDJiezO? zEsgS;?5wOW`1E~cYACh#ncnBYRk$}z2VJDfUK~b86Sb{v`zh?*ORHgvi$;O}HhIp%Yz!Y#l*@Q4!yG<4 zm_uQ`9$q!7F0#a+UesIlFM{WRGWLjpJrFz}d>d_uvVV*?4ey5x`gNMHb!&suw>xOU zi8inRPXf~Kya736#Jv!Htv{2y;ds9jM+I;&;P?@D&5r=bImF)}{2C6S8xChD4m0}m z_+rRRAYD)3coXrayWpr6yVAr{oj59hBNsRxMY^*-$o(L;-UUZRHyjf>ahOq;F~HFa z>0Sd4SW0L7U2qh2!_of^4$!ys0@?s~(Nloq6ylQzzt$cryW#NN>gX4Sy6gvzO-RQ9 z$121t?t_cf)b86Nec%5`hC!)9fYSXv6(; z3&O9}rEfPJTRU+$fy0x7HbAs0>=uZYi@&&8^jCm zg5z8_9Md~-RG=KQfnyxf{S`QdAWpsuj?!*8(mHW4n*V^K1^3hofa40{YJ^{Fk2kyF z2<^mS1`hWh(FRDD1RU=pF1ibjMcr`J-t3@{3gFlb91D@I>Kf)h#82D>M^-l+Uv}a! zqfKqVL3i%Efuld-zIVZ4?1tlmP8?3)5P+i^_q~q;$G3>jA^ciRZ8)(jk_!0{&1sevO8 z@v6Jvn9~i%cbzyY(58!kBMa$1zY5uU#N+RRV|+IphwtD(-x`5KgLJO|$IZ=*wITdk zU81|;*w%@|jB@NLIKYocHDZ}=J!GsF!9 zv(WrEV2%!7>U-d+6;IB7Z}_g}AZBm)B>L9;hVLfh3~%^u5OnVwJ|AHQe4V~V+=kc; z8|82KuF)5XVOuz{Pfvzln$5lQ|Diowr^n4c<#!1zHlWrxNfUDx@qxzvZxtKPr2loA zp#5)f(*9y!rWSlz0sh-JmX*lbWrFl=ksX#LuMIHQO_Lmtq05j;?Lg`3H4ec# z6xR9{lN4guVp*cMU}`e)N5|gqkiC)FZUOv0;$%Nm8Brier|Ns-Z$&*z8dFtrfQw%( zx=m|gQC(+_-br!;4=0kxzxS>=GhC35I&d?uWrjGb7ZZ6+MhR;+^BFh(8OoYX!PpRt z^}PAU=}`I&<=}byqgxXzFZssg&+t&IY^tWu%bKJPYkUaeylBfOLuwAC5ci{_In&d8aBrOo6@%0|`Q7O*ug0vqR z@M-YNVw0^j{!}Bto`BnW2{;2XsM+{tmzU_Nz)RLV!epz`Klq}}0#5%l-Zn!<>`&IK zm9x88aCltiLkaL7NK~QT{k_k??%RiU)iY_tEzM+jg#^!^=8_u><7~JlKeilJub|;< zUQMUc_v`Y&wIELg)N>)QZ)s=RUDO*h%{_KyLqggMuLTdoMM8+tjXQe16wdl^VQdPF zV}`Ov@HdP-!~R`!swSi04&`VoL*NO+pof-$lnd_3Z{2^gjfEEkgOjM18~*qY#qwAc z2J)WpG2Hz#-6;;Um3($V{}>)*nDHuXsiUidV^v)^6I>cPKRr=59GYbYXOeXfTJU>O zYux}pJc;^T%LK>0b_Tx$27?MV)?rE1BS>H2_cg!r`>Xgp0>2-_?=Stz?@MJ`D403{ zCwIzOEydM6S0fY*M_Wy7&H5yyVA9@+`^+D~1~D`#Cc#FPdHKX0;5XN)cS9Gz3O}_e zZ1`aJMD%8sS36r}m!G#r`xMGd#( zGwZD@;A+7_FH4zfvtgyMVJ^Glait73A-P31TquXNE#r9EddxlUNz*>JPO`C-Nst)K zwLAsMYg!X|&*24Z_)PaGNJxxv_?z8h}keVGHSnIN|f%80yRn+rP@vquHdR8>m;%b0J&kuWrlA zOf57Zc-%;-cSLwX;aBxO2l>Ow26beiMAaf^4K3hCGHH)v+8c_FWX5_}Y(jg~!lIFbOSer4cf)R(*UX1FIBy<5({iqXg>7|g zvVV?P2TN7cG9w`eBS7MseBi!s4ckB4WkU@G$u7=zO+_i``3bp<@Hq#M1HM~{p2d5) z=4{pjzAgW`iEB2qKx_)ehG49hW1~S0?HL+}N4%*H*rwYh6uj@n-k@A(5%N3QD@$%K z*uz}LjeNEB*-um++6_f{ge`)!trpg{C9^0L{L~ORfHmqveT@}i_Qvk*WM%n%~~{QlC72i7#TqpK_~UH${;0v5#lc zZ&vU038)>D1_hs{SF+*Lz#~=d7T}$Rewn$Nam*c<91>^P&w8)-$nnv=7FRetPQ+jdlP;5X1Dd3EDZbII=@ zUyn8JGpv&~eEWgU?>8bY0NfYf);a{75Vm2IJqAs=SZ=i##*waEcIC&8NoW_9^bP9z zr2Drhr`Hj%X(#Uja%Okx$El<-xPeChV=q^0e;r=PB2fFu?rhAgbl3HgC4cIs8WrTZ z1xQe1cD(QyT4kq?V|GXqLvrCBq6ZD=;UUsPyN7JYluq60!)y=F_OOxpg**}G%`~!pL4^Z39PrPw>Vrptm46wG-OS1zV$+06t*I<8^O#QnxmHW zgN1SpGy+waF(EJX1bEf2gCB`N@*=%#3!pd3k^>q5s(j9Xvz&o^JJ5;^AA>Gg8Y@TZ z39*F)>&?qMAyuagA<2V)ej2s`_s0l3K$r7n{3WHFl{5&TmE=gt*ExKyh=Gf&TKI4j z!a1zb$v!-V6}#f8b!Bcr^F!G=1tapdl@M{{;l?pj((X$0pQ{-HQa-*E*&H8lY zHy*)+9)Zg}_)@R*rMtIoZU^QQhUDI*_92Q%7!*j;AK9bq(8 z4w*)FtiuylTF9cI0kl|DUjjQm2An2Tu&<)E@<#Rf+8DCblN&c8wGA35_tL4*EJ!W2 zg4e=XeeulW(s)S7{XeSZ}oFzzYOs z`1&KRd2v1Gj7aTMIXMm#&GmdZ+S*nb>G@@zGA+YLJ9bYVPS!!>>{y3SeEAygduGE{ zEb_e0`Ta`()86`TkPj(@zGNZF-q%`)($hXd2MascOXyEutx)yO$=E`9!b?w2lWUvp z9k~ruQd@)8^L9|C!hd}zII2c_zI?)@8TV_aW-pDmG}+44uuE7Q8T6jPgNVXOjN3k{ zG7Z0j;xG%4G{jn&!nk1-EuRH>N@UkOd>r>1CLM5&!8!{%g11nWRE_zhr7}IpQ+mZ+ zU!#Kl6s_>f-7jEmf1yhsZSV*sQIH}Ru=D;AXX53MQ^0;1Pmh5^vJiRl`Sh3N_x3z) zB@O_jtKwC=;7NT5o}laSPag-%Wt(6VS&+WO+3346`yV*t^-9CSw7n|ng4bBa7bFF3 z<8qM~e}LSI10F1N0&HJ|>)_WSpJqp?5_5@s&h|oL1|ugykUp&Y z8a5#B@OI+L?{J99-TzZVQu!RLP(p(Wdub}qhjp-I?Md5-?|5*MW--GS@RER`7q~;c zwq%64AF3H^6N0=JW*9AxKFoEdj7}d?lVx6p3K9f?+QYU$m}(bE?sT-kbYJ?>K}Z?u z=o19XWa(Je=q_YMLYhAxEoc$-Z|w< z+!(2{9B0-?L5s%`jG#k3!gJu83y9&en|=KlV_~6w|Mz%4hhW+OT{gt?5%%Lb6yf-z z3N$whd} zB>UANu=cBhMliU=9;{FD&Hj3inZZ5@Alr0iTxEII&268WnUIT9SU%1#b?O4%*y}u< zm^fX=^tG|8ECMXJ>wP){t*uYsck&j)k8w_EIZxg;Uz0dZUoi|^I^v`DagWPXB^#~8 z{dvg(`vu7YEWBl-b%$C_nIp5vrWL$C%KK*lexg*8O{6kfMZG}<|2eavL%|Fld1m`s zgya%@uhL5UrD(SwaRv04KeVH)u*)FW%~m!Pc{GP};dT{y+yTu{kHp~&AMVu1d!lH$ zkNS#S5#i}2njF0{duOTSas<~$lO|{zK^vr)IC1+U(3w~deM7|KZ?*dq5T0po_y1E3 z8(WCqUxzyYguy&yVQ#bs=l0>?oR4wXW7Y2YM^&)DPc!ILk3inqjFT?}+!9 zmkaPGYj==8w9mC|mGKX}Y>9Dvr5#(jI%AA`r*(`29?K1Xc3eO`0$(5O^7V#{#>&1l zwjIn-0{djN4n$#p)$>A6S*FE2MFUHCDoCd>qxQmSa0)Qj=V0xm`mG^q4Ln7I;sZ}Q z)TMDPPFM)}4?))wG3iyuVirt8jgs9HP@}AXeg!no26)oo$6J!?;i!F9u7`wrj6lyG zcLYj2r#82wtM@kn>GW>(R!Q|u-RnJJ0o7Xt z4?cIVx7^xhmGm@p=|6wPuiZj5DaD67+HQh0zY-<7`NucGxaeNh|BK%@1A@cYCul`3zn0} z631Y+9lL3)!?gcCTsy*%i$0!Yqkh0#Y@Y}^3iK~_7)`6X^smoh0M!I)y0BYK@v#A@*Hw_fNxf7Q*-0xSN-cI*@I_ zbIEGx?IT`cOixCT%Y8h`Bax93|p_Ou`hU0zTyZk0OaqobjKh#$qwRGOo zpWsZrDoK-_#v_ z$-29N-#r6*e+Z}XycvP^p+s}=<7ypjOUw+=TvtoD(fZM4gR(S#odGGJ6_A`%L0^N9 zH(H_j#_^yBX6%0QZHZvkBOIB8U^5T9+I^ua&usJ2Ys5G4Z9a|f204MCjyp0 zRFY6n`Xjd?!|y|IAYE-ho*(T24LI9tC-f5x@cwsbnld@)D1Yl&@1a3{O9 zb}+^Q?ew>Lav)il?)dKLA7xE9TAwhdG|v!c_8gg$bM*Ie8M0jqZ3}~GzxGBSb#!WE zrrS-qM_k$XN+o^hWw~ddtx2WMb$TnasdBje4oje{;GTGy-1xgVYXHhY2$<8P=Wl7Pwxirfn8)I?0H40a?V z-qUEs30r=DzA{3F^swJRdEs^&v)~!*4N`!9_<{%tHID()@UmUh2{_s`;aiiWX=5uXA)3bFH0K-W+uov!VXiT-VQ$wGI;p*hZ@ zGSxZkpt^t*tP1yC)Kain80=-|iP+|L|JIezA4dLb5kD`r&t;vKrn67SI6Hk`DHLUauQx|;xhBmugE{8=a6 z-K_7Ry9+l0bjJtjZq&^Vy5nWK+jz4p-9)4BfR8?JQm_SsokH8_l++@rcni6AmS{*#}IeW$1<6>5!}7tX8sx1i?$bo z^OD`Q*tS@nTXT8A#VN?HXA0?#Ps zrU&KmHXBw1&K&)noFdotRF}DFQQg$w++?i_(8v@Iojg_2{dGhmX8Ae2?hcLQ!6Gry zNSpUQ`L`jkr%k%zDk-_{4vo}$C)hFX_yY7H1nDE;MhATe@fnzxa&L5{k9KbjS+Fog z2Ir&pTI#D=R!feO0KM{X~^=$wzzp)nDli;J}Zx}aaN-r{|LF5L3_4!Sra z+gIl4_CGiuEzX3UDun3>PaxFW0`LSp7vnh}VFf}hLI&Q2ufY6(@Hph-Nsr{I#&-Y1 zc>fii_hODZf;buQcErv@fwH|BsGpa1HY#ZEQ*Ebt1Lbqdki{FAFLPz+elO{;;Ql~{ z#(8O_RFSo6*-{HHeRPbK|2!3Z9|RA=R|ppnP9jK$c9)Jozte7oC15w3Li^>|e9k~E z9M~lclkIT)!K!Y4iX~+(O86_zN9G$dLet z*0JXkRPYEy6cBx}mxq@QW1ROj>ML>P0*{85`hph}asfM_1OfCE2k4|5TAk8O;vI~M zm3`0fgJXAKa;j&pEd+Oaqq16pMqh9;UbT>2B%;zK zBL7#C5>n$lJj6Qb##^Df_>3E$hlcI#X`d8}JwI+)doMm)*K@8A&$ceK>fzZqLIWw3 zUfjO3b>Y22_l9jz!-GB52RpwE>@H;vmliv!=|SrRM6)W7b3ni{OrOrRvM-%1SMyRH z^z8Ge;68x`^vpJ6uL2I|FyfvV9AWuc7gQzMe4#}B2|NxO@?kk76#NXGN|R4KOBGr% zSI$$7Jfq}1Tihm3xN&%;u#@t3WpL%HJuk(vgah806O+JU!VE6@ptm|6y8#~d|6oz) zIqnm}#sS-`PHgqC_8}Ns+!~K+=f(_GrAhW8=fEz6hxQKP4%?X^TSyEK)+HAF1dK;} z3)bulmcUu<-BNfTNc`TmMfPw;bk!f_j|6Qd)`jL<4Kdqz$pgK~em}rck94fA09?+P z5|-a{Z&*Qsbtv}5(RIjcZ1m9>@MogSzkoy;5AK_+Lpc`wiMrF^M`4A)sScim;cLf< z-#@I6EYa`Hz|#}BiHBENXGP2FtGN!=T&t?R&}10K)CQB_7*NaCf%ux7);=ypeAb>cpr zc2FMd!XI&K;00H}H?#5PDd-l!rhIZg&%2oWwzx`RabDvNxmTU4IuEY)0Pf@BJ{kq!GdU>m_% zXSw#q8ipW;@#-O}^YCLhlnrUCq=aZ(8_gwcHVl*`k+-$xgtlc=TBCa z#CuNR@1GzwWO5Q#wTHeUh+2)>!c$9ix>!qX{5@<1^3u1pppV*3nPdz60o?ka2O6HO zq15#KncA?O^zAhHUeV?30m=}{cVQM!GQvhJpjTVtKUtZXEvHP!SFg6ld~(*8=sXKq zgBWE*2{6MqJ%Gy#F{Ku)a*65O+dl^R9c%0ODSe#FQ{e8-L zQ|)|4^!IEn^|2Q+j27#6@Y-_PebzSy*rW5sD=gmmS#*H~b;d8o-?+mHVm^ZXY|Lq2 zH_?2TbP<%Cgs;j3^!JcjR>oVy3b>syAH5CCkk?3J#+VB+unXSzhiIgG!|^zLYbTg? zXY_?->U?<2VM)C_0sIjj!W8BKhd)UNuC>O)Fz$YAX!n0JnK3R&2aG1%b@9@0tZDlz zbD--l%XR34BHZ4+q$})$T?M@(nkU+)F`yi7;%^LVV5~nm>Z8-iOmi~4>(=Eb{k}8KvCgM3qfD%lHbZyvYqCS<{i#w zGu4nw!TAaM6{^qHCg`$TdGr*tJxVlt$5V^Km*J8hz^j4B6>gXIV*#G+qjisiClTq< z7B2BXFI-yIq(#a3S^>6bCV`GHJ7W|ZF2R>)emwl)(X6r-lv(R$!x}KoIGp$MV8<>M zp7VHV8Gfm*jKOW|cxcEHzUjcnN#8dK`9dLj?n9O~3+o}x7_-5<74i$pro@g1$AO1@ znuA8Q!D1C)3z0Bafd62!PsaD?pGK1_8goWyDrDs1b$Ft;A2(6U)|OEo(M?1zU)EBt zyWpYibDJ z75U9pITuf(s_0^kst7(GrSBW**Kg%t*W%Z2u$F9^$e901yZ_C)cK>>;@zYzRUYhsj z-e?MC@n5%`8&y#!#78HXZ{&q?vh81P@Ra9X`{-;_`^94g%N`!3r@i*-(gBa`ak|Q+ctmVF?!Z|OIh5G84Hd0{jhooawtPeDr1s4M~1ZC1ANks zA(l55QmN2>T9g&FIm{0`#+#{x^qlfef6M>ii|g}x{2t$gpv?H@AV1*Pc(d_smG>~d zI>`683`ebR%+sJ;_)0ZH{1ji6@(=!=7en~??ezT4&2={uETJeD@4Y(j4Z?;NK8o)? zrQdG#H~-`J^c?2>RI9gy$Yo94f_^|7Qa>Dpf4!re!F<-|EGDRbQ99~&P`g1agB){< zy&vjD&&}3*@%{kbAJlSQX|C4%0^T1CPuz3Xx3YAjs8z@9dBvQ(XOpN7J?payvb1FHYAhT)s*OX*qdJ*XaPN!hLPwm;o(hsqlg ze#y5ne}?E%y(a2}HAu0@oVr^jMyS?`Y8}C6eVd&#L?3cE!1q9F4%p3mZ==1#&`Kxy zb)rTv7UhX9eu_`XtO?s@n+=7pm^EG(7rI?%ncnu^$Y;!7w*7nVI??wswUQ2&uF;;{ z4Dr^|)#7pPk}ub(duJy4`=okMVX9@L7^;58oLopPch zr6u}VFAC~@g&U9xSbqP6<#))jb?M;3b>htxXVHFvJ}qVG7X3R^Z`4^k8`>9ZOE&<+ z>tcxdF*ybDxObg+duhr$z_TE^5cw@lLd{O9N8oFxN9&cI)1sFH6jcVQV(K?V4dT}2 z2}K)3U+!7o66)V20@_^ImTDOU9HnZ`dmC+jK)n`plnH8TTfR;VM~mEC9>3!&)V8BN zKC@C=4A}Dka{C}(y&XNOeoc(fT=LmH>+w~}9vbHln(@5cD7WKUv3+^}LTZyC)Ebx* zj;poXsXR}bQJ&@0IY1qb>w!`ecT4|+Hz{2 z4QO+;z_Y3EnDG5IQ0sB+REx_>^^bT*>(zvHv_K4M&D1^ioUv{|NvLK0! z(10r1m#r`lrc|g|Dbu~9w@@ZX)_8GHIys-l9 zG9O;dWSR;EZG5H1JR>AwJrvJ9FmY71dxA_@JROLTunIgJ0 zYEZ96EY;jUIcX2|c_PMuZ#nhF?U&E`wv}RBFaOaRvjelc_I_%6Q0f6*zb6Z{(Yibp zU$>&qZZ1zCT0#q!sru8H9L-}bMY*5uZ_#Ya5Dy6&tV9pAU9_HQ8?bD&MG##AFCNlZ zL``^Xp-@C+9aKnvCyNJ!_2P|}1*F0E@cWrhWsoT^db>(Ips`yhRZ1bXwz>%TUb0yz zr9n;)x?Ym9jfrI%%qq*xVws?&aRjXWc8r-kaL-(ewuss)6KSq5A54^n^0zM^D367c zRt?r#{dRcqMVn~7&Cf*^VzjC!q25}I_wbEF0*0;7|hIm`1 zp7EB*ZB+A!!Y$OgJdbsXT6p|qq*|U_7>E2?m#4_BCXc>gi$x&Sjd>yBAr43)kt~69)h*-R7@szCJbqHlcRU87F%D4E2O`x#oI3=P+rS6gFa7k(O8Ky#G}Zm z#eUM7j$SF_(3{$%qI)Oz!zgZB&M;0e17o#xV12r^JT*8vk7(~j?;ijyAJz8TmRNYt zWW78ZB`oR zCswazk=xPxntLoN%rRQ;RjhfT;gc~Ev=~YEOpcI8{9t*+rpkXi=3il(%Nk!e-V$C! zD=h77XnhUL53{iDg=?`s<`X=v<=ZICZ>zp;o0oXjS4bt5=i+orxV%CeF&_(|Db_H| zkH;||mob`+Loj#WTAGd7-v{arC+bEj7Z>)9&Jw$ub|$ooVcwhe)nX`@ZlN^ENYlC$ zGxw52c?Xq*UMN$=6~J?Rq(8tBJjsla@Dg8m)>oWAL%gvn z!h2j5W5EdaaNf4LC$Xyx-Glj56_`iYh{sef%5&-f%nr9!p7j}t9+%Uuy1%8zyKy^c z+$HTH%-HGbVoNIA$dV-L{`jhac=X&=|n zyOF3tDV}bvq*lV|6YJUSrJ0teu(LkEkFuofi6|n9;JqPXJ$Dgy!yX>pN--VnO_t}#tw$tppSnF-UTuAdS&7n4n$v#BpUUb~iO_zO$ z+>sslSDiXucZ}uhkMXdK_Q$!snst#}{;8{9u*7n1i zJEUa(xB#+a3E&VU+?Q#z#F}2R^t9x`dlQ3yS#9g>oWEcFf-k-1d9{CK$%1~(=16U` znOiUy7{Q-NAA&%93GJz7v*t+j0{CiRQX04t_X4-u&k}}sQl{hgkX6vDEQxN8jy0{3 zarbZhM=U_YVi( zCg)IuEG4$zWpFU*=Z0cP#AUgSq^!TQ0+q3zG`vlCDF!U}*r)&$=JW zB%MK6PuubcZb2ZG-M5Da5+AS>$k|tTvhm%#26Yjo-Hz|4yL+1gbHDm2cMqsw>x6z`|#nonwtcDfe{7oK{wFJU_msT z89%BFhb9Pnr@L8j z2#-4tL4Ph-0(h2`-|CMC;_zUsJ1$EyNy2fm2L|6S$8*kOd?3%2h*u!Ifv^DKMFf&7 z?E$~g4DvU<(a$09*0KqnF+<${fDHHzjG4Z{{74@3EsUziXjI|uE){8xG(@{-Y_(R3 zHJpLn)HIV^C&s6lNnd@Yd4leoAr;g}`WPLUg$~T{$q7pqkgMANID;KtNJvO_NNdr! zc({?gX#O$<^bOK(xtSsHCTlsTRaJ5(gV6_Himy3l`V0)emahOV8p|Y?^{DN2D@(H& zle9PVEcJ?;+C>#C{ZBP)#3hs@rCUiZ1WUq<|8z5Hv6tVev%p7j{+Svzw8UT^QOfn^ z!PYWKXT$4Nah98;;itJXT3*K81Jmup+%el4>f_&H$%2O)XV)F^J{)Os3n<@pH)kj{ zca-nzhCul$8am2X)2)0Sl<&u1lO}V- zfv2f0&(CCrFWVMP+T;`1S7?dA`%!4;r!I8JTC>!NXv4&SKP>3O4}OqK8Ekt6n&!}} z@x02mm8E3df)w<=|CHu7VSULN$sPc;w$3e_0qc-GA=uKLsS>#H`r zP+TQ=;ywH`>n|6t+EBG-WN}q5d@~VwK=f^sIeT8U!MV8FV1A~0w57PJ@07Jwy&oyA z>TN5Y$+Fo)g87}Q(W6ikp9eNEN4vBfjT%>!Q3nkr^z5AjJGE;~7V!&Y6! zb;|e5^tOj*A9^@Je;MMVp~bAU>S?qgG##qBip|w3Q(hHBQbeS^%o(z)r9V!+tTCMT zpuNZQ8&+RtXg}1i3iYp&UO!RG86L0J7%qC2$iHvA-1Rs8h(Dt4k6ktx@~Zod_+!tZ zs85(DquTUEan(?ix8JJGRpAx$s`X2YXX+!UolvJP?NtVGs@^h=M`Rp*2!|awoEg=l zuK)-7xX@f*&wsCuG#bG0Go`RdWbfiESmDh{+5 zp7WSz^kbmql;W9#^o7;>hu2k^s7)%0FZZ5_{G#xzXAi0MTdFuc>rL&-p|7aT{?xO$ z%2~9!%FSJ^(PpcvHOyHRzR5}b=;c}Vg@P_=VNczoWM7yPW_>qIFRry<~JSwE^=^Hdvh4kI% zg4%#Ni@UXf#*hYFJFA|DnJyJ@4nTRr0mD3Me|%r1#L;&U(a$57)u1l#owQx? zw|%hk4xW+tl&5LhVw>F>rPtZ+tXT-k5Z(nV$zSw+R{;dOcE8x=# z_=p1DuYijc@HPd!Q30=1z&Q$dz5<@DfTt;7vjQHYfQFK>_>z+r2&O6|hGEUr@kj6!38cT&jRSP{0KWc(Vd_D&Qpw*r9;u zDBx@bY*E1J3OG#x8x(Mq0@f+u+qb*7r=);?R>0pW;I9>Mg#tdPfJ+o`p#siVz-tw7 zt^!`9fb9zSVFhecz!Ma(NdYG+;AjOLu7H^WZoSpLeQOo)MFs3uz^4`P5e2+o0T(Oa zZ3=j!0$!FK>_=2c5lym1?*A47ZmUr1$?ppz{eGEsRI5$0T(FX%?jA5fR`v>hXS6XfU^~_MFFS(ANJk{ zyo&1D1Dz8BM2v9IsF9|1fPeu5957GwrNdUT2oD1s!*d%ZCbHSEn2Qdn?IyPq&6ygQ%lYJty#YflTll5d%y3!`@XyL z?d+K~Yt~+St+m%)d(WOZp7V6ix$QYm@|-7l&JVpmTAo9m^Sz$)J)ZMjp7VCkd7J0F z#dF^1Ij{4a*LcpWJ?CYf^CHiAp65KrbDqU>ybk!0^!Zn?299jfrqa`c_Yftp-e!=PjZwC4i+9Qe6Pg9I-*q_heZ>VaveW6QI5qQ>qO)j)`xM) z>Kt!j0SCV!w9{u{ZOXZK9LhEjIvS-P8EY+8qb?j-+fd(oc%T_ZiS}0p~z*XrlQ4WJ)GiIV^Ko=CRCYDMiS2EQbX=$J&yUM81+lChjEi^QLM7 zidcqOma;5k8DWWa)ln(BXABcSCu!#5ujZYJFLF&ib%}RDAXNwboo^Ct7^DHMg=9RGp)!PzK3zEuH^v z>HXv5T3p63=!}Q;5VU05b&K)oUj|IZ#5iObA0@Y9u#@o-hmP+Te}6MFejcV=@MN5n zyayG?xgjiQJ5`9Rr(_aY5Q9N$@0PA{%-VFG!YnLFh@WVCZ$=Sa?`~oKws!l`xt5 zTW~ITa$W~vN{$GC$<&*Qa{)^nVu^A#f|p505Y3pHg-Zs{@rD24vs&?x$|QrglXzoL z!O)XFY^Bgx$KaWI7Xu*j5j=@s;xWz^8zf$nQ>jy8FVTs_J%VRw0Ce~ms>Eg=#S?ni zu{kOpGWUdmBQ%9x3h+{ZXJQk84%6F;W08sAWdJV&c%3*F8u~M7_t%KOa^Zk&JAAL?|fF;*HO1YLTT_;;MkEL$|B$u0B=ZwLMFK; zDHj>lM4S`+6FkwSNru%Y0C2HmH308LgM`xH1OHW(4G$1K+{_B0!E%A0zGSg;8UGY75Ljpcqs#ecnDesrm(N44D5;E=SCp(R-!S*f zb1Tb*t*UojV>2up4`I`+<6#Y=Pq}7t5VuPfnMyuF z4JSx*(C;NXGKYc7^#IPL4#`7jYIbab*KK?w(Y{u54sMX_^etX5y%gU}x?Qu-6+d9E zOJ6O;{iV2Haud!a%XPtBJrE6h>G!~C7~U_!b)bDDIrIS)yeRlF*~YnKfs;n00h1P! zW5AVcu1kq#ICipP)y3BKzJ-&QFPk;@y+dmrcznSNKgFd{>2%kb%#U(B=Wua{(_|c7 zb@)u@Qx%@`>pkZWxX$JNCfB*t*Wx+{3p7Wo3&VTPYf5meS(OC0g2O{5Np~+s9 z&1mEx%4s+k^DN87em9<qRZJcL;evH?T z101`P4!6gT>p6&fa^NTg|6ZKuXTy&Y|0hB(j1wejzcW`pOZ%AM{{`@*^MS+bCxdreVaBfgY?l|6|z+@$r7!I&YJZap;|Z^0UZOk!K*! zLPoV#+E3QhbS2-E6uK<_GW(0~Ulkbp*`_aC&q_N^~8 ze>XbYUwC%K3Gcsu_Kjc7c=F`)zVOP+&xg+Y>FwVbbJN&AXYGxv?ta;K*`fdX^=p40 zd^=Ol%Qi3B)Lni`%WL5^cfR%B(~sS>^8NSE1$WIGl)l3IWn?>05J=QwfvtRuW9mi3>#T zMoy>VSZq;bcRaG#pU6@KEAsoa3F`i*N#YGjM{6rj>XNlp6_q7cVWBe}DXiLX-TDpT!nKh|VWc8pRhE>k z-%wRucw`I(W@WT^?NxM#6yS=g;)<|!b;;FhBiBn&SrWA>H$*DdZ-}n@ z_&VlWmC&mJy!2ny8l_eEXttnhqxIS7^Q5-EPjFg5etSd^Q zB^$1_!ezx_oPqU1Kng*qY+Ur^ z%uP(U)cdUsPL&65Ux_Eu%$54Zb2DnG)z z+_grof1GtK02nh#V{$6iFygduNnuH8w6J7DwBmZBBSde`oEGsMZBm0doUfIj?&IUU zG{iyDXVO^)SZ1)yWSPY>$TFK{4$EAYA(nY8^H~tFTSj|JRBsjvymOm6rfYaOy$I=3)Hi$A4xB?Htp$fb@ zpy!!q==~WehXC(Ec@Sk;CSn1U!zd4-%s*4yxWgOp)G6`i(5WJ&IS&WQw5Tz~fCGfc@?WLfJvK3`I%3wBPUz80frJcUwy;jM@l1ik~M17D%=wj9LWz{4+RSeYp6P-dYVLMixC3Vbu-7lCJo;JK1wIzY%1Z1oocmh29ZPa{q{3cTwPCIJd62uNYCT32 z8*6(N#rAdO72!&2Mx|9&VO_l8stx7WZLqQAX0IzNFOD9r8w}|s!^|iSiz}`^f{77G z#s}e8M%Kf}fG-!ChXJ@a8c-Rr=7|vIWn>0UpMTn%t4k`bC<)JtmY0{UEiGQZVP09; zwO7xqIn=a92K0ZAlJYcX zSGLjE_8-r z1SZzH;xa^X;VML1SJ)9O(3Z@%BPEasF0PMWf5eh1i`R$k^--8mRaw}^>PW;cEvYC; zcT5h31dG)B7YJ4u$XMq+f_K=a+{7!&tD>;t!);{zpqjpLL&=hyK>3Jt1ex+D!VYWdgZFT!o{JLYd!*0jFqcaSe0j56=z!EGp))6D3B}XqeQOE zv??;Ka3(IGK(3sR61nmWtKtkRdcAc)``9y-7Ua%fTch=AJ>t?$d&VPh$9d~fn2!& zC359_xgwWv9Iz0+hY`V_h61^A0ZQb``M81{MxI-|p?t&jSC>~+&b7*+%wjmDv{_c# zY~=%HBmty7(+*UxEBk z*uHLkwA4P_G`8r|GVSX7aA{R#QG14Tca@esquNH89kFLbY)oO+i*G3|19TMi8I|^o z%Cm6W9F(K3xb-nwSeQRyK!Xc%)qi*tn-LjBK7QCf`P|vXbDw+2!OurcRmlAHn`Os?v$w z?0g)KXPx376GVuiOAyWwc6fQ>pWk%Y#E+n++cRaEMO^ae#$Bv~nrGR<@qL$H`HASG zmnHrwwn|1ug^#0oxcfM3LvEg37BN#z;v@VYzWI}l=@V}KBtP@9@{w>3L7~fiIri$z z9UO}dua}9#)$2FNFzrApO0fD;iBY3ioHsxo$vd2zRjiL~^~c?SW_qpuu#`ObR4pUX zPi7tW8-|_oaOW#4%a>L<-hB~9r%$Gjsn)+n-$T-oiMWP;WD2wdaEH5*g803m&+o{= z|4VjQIdk4PrWNltw$}|{Cf|?cob%+}RV%P*ARcABzja`03f_U7d`pkzzj**_x%&$a z^zXx4-u+lgm@oz<-aE$Qk;{(562g>%&VBpjzQp;r^u*!)Q7k{~_n{P+6}Wl8YTq;f z{N6Em|Iylvccw3#^F~VRmcG=k3;N_;>$b652gX>dd*yvdtc(oU)|P?UK5KT9!0~>r zyhH6P!o9qngzI=p(VvVGOA`CXTNCy4K>aqYun ztTz)tXRP)4UOe8PJ;s{V=ZoFZXEnXNFJ?{Y#}dYr6nhKaPi^Ww2KWo{P_}<8TcjML!K0w&==>wy?+ww-;euDo5uSg zLum_Suy3rksn}U!|9rn?LuYl?aVgfr19t!~LF5}jMqTkXWQ-hdy?4BSQ{Q`dciPyL@Kh#z$C`RqlYiLw6ybBc zFV?eZQp{=F`MfsW)3*Bop8_7RccE_*bTCQe55Dm0JNvA0v0j)>k6UlYpiN@$_~~2d zBwuwBQ=M%7lsYMxtUCEH(RyXO4G;9m^5(Awh{-rxiMcMsUGfjIv~Jt>^ilg0+mOCm&3DYBpeue=`fNMs?2Fg_{~ByjbT9gv1Y34&3AQYK z-SGj?h1gXl>14R*{D5*uKM(8a$hB{dj8r4H!RUtH7#XqmkBoF@qx0jq9%VAhv}IWD z#`iLk;ro-}`x9ah4U8#*e~+NQ1<{r|l<41BhMs^G=l-RTQ(Ops9sLi_Iq#JB?!~_3 zJ$mtH3E*=V+5-9SkGF2?7u>P%)go&jaHTKFy`Xoe-1ArXKiJg%lL5n0^_{+y+xrAA zbkE?@x4yEFiP7pl`;b6NMJ!HyiRgcHD8jxMu=> z!Ezwp-+*z=+CQ0az>bIh#w0+-N1;yfu^1zJBy$`|e za!~)}$64sJ==auL@E@b~lVO9upz1n3uyr8LzN=?UK=7J+7lWphD^Q+?9D{&Yy)rU# zaEWF8SI@}EmhO>}zrH#$lD`kXBYlDD$qzj_ItoHJ5%3Jzh~6%teDJ6Mvc9x`rlYe< z`|;RiN*ru?67+|y8;;IS15D&0?TPm{^*(@mPla6kzK_as_J32BA#2Ovc*sy>I@U&i zgIsn{hGWr2;cX9mR=gkf0ls8h9&I0aj(rGk*BrKwJjXt|?9N_aN==_!8|$_YoICcg zW#^#i1^oy=3$}IO`IE($9??sd*a~8nOz@NG(o4XjmsgKgz4W5oi<|*{ME^80@?$Vw zu-LK+|A^mC#&I!pWn!mv=%wwOpkIf$DuO-~(mphyNn)r62W;!;fj^JYem=+1QC|N- z_N!f@qayLMm-c@H$H|C$q#uYcJSN!G|6g(+Xuih#Z*k$@hvU?yEfTl&`(mIG!8cE! zOJC&nK0oNKLLH}%PwvJ09}|*g9I#W{}-Cl{;q@Glf5$dv zYek=bQh9Gv9PSC$^ulHAZ-RcMuSh@Xj8l1bV2m^VtQPnCU~8SCW9#;QX_xqbGU3zH zu4kk@v8LWf*)GU~a>(y5!f_S8{x7^>zs7_aIZeG)p@V-)jv-D(JL18EV`u(YpYmUa zF}iL*#%^ho#9uO2S+Okx*Rf6KgQhPi_LJB9=aG@K#1=toEMmsERBOxFTge9=4T4u0 zPptjJUq{=}2JGJK7hU49=9C+V2iZbb4?FdVu6{|lZI>2@bvLM z&=B7{i1JP3!62STBUkm0jO60kbfj-&WG~7TJeO|4GwXDmx8Qsi%0Zl$p?nbmOEr#f zfRBFu<&lvak&U0ul<{=a)`5hW^#2Cf<T_CdFBOalTOhZ0ODxa(rYTd|_`~ zu&H;x_;J`Q9l-tV&j#Liq3a;< zf{>?-cM`+Mc=iq82XK8IuBYR=jCJnoLT?a*Q8x0u@SW3;=e#sBvJNtP66G<-7ybT- zKRy4C7xejF)%oZU{fyX8D&+VOnt`#91MIc;bl}?l zE%+uli?<4T(ElXN$!sA`i7ke##Prvjxyln8M#9h*k`M5tl zB==n0kMLo_bA(HrCUi4EF9^ChkdeqL*RB~D2O1%N4dNrn3*S5vpS+#-iOnUzZ=H(r z3BsNOU72@^58li;d?IAi4jVD>Jix~W#Mj-`GalD4WbV(gYx)uZ8*kk+I3DAO+%Nd) zr1AHoB<{}d7kfiYuwVF11Hb9ucPjABd@ccUP7Yw+i|d21HL2q*+-utX4CMSU>N^Uw z?f|W>&{pUrzh88VeQr}Wu^rNHK09$sUk>a;coCl!kZ)$eCT_(2;(Hu8e55;O|2^s| z!u6Qzy4ycJkLy?Ay4cn_*0mmdNdFRg!ng4J4Di+hPvUqt{3*cEJ@z+Y?@Su@(=@@q zB;fd7^p(xXC*k7te?TmPYcJtA0p$%SGlBCp#3Ya5*u*9GNA`V1*&f>T$eMiQVZ19={n~{0FyYFN={Tn`u?fNY6 zrJu!v#sttxg-zjeYW=^4u7plH?#~3B9P195TlVJKcd)PAF>sW>srNMWmGR*3bJAX0 z#%w26neqnt!MHrZ4?5CEJ80wC(CrVP+aTaJ=u4bog|-Z!KfED$V%Ks{9q}`P57!7= zUfsU}^)vu3*TsJ3;~HeMAHOl`^nHmdf6ZekU_!R7@)FsvgPemr7ujEq^QVcMhWp&O zU&OU+)Zs_FMORNkFR2%O)Hj+~0E>=q?nWOIT}dB%5ABxmUfN@L9S5AhpdBq9I5wQV zc?ICxJ@5|_tT*o`f4k71VfK#9gr)`C%!Zsc;~qo*E#S*Kz%a@J^vj1lv41Xbb8X~+ z`U`&$^Lf`?>~-81kGAebxeqy%i9K1!_J3mjjEU3HP{O4cpDsXNfWDXY+mVs0aNONB zGII5AMn;+t3%vQ_$jEr$r67=%Z^uP)=e*%>-O@Mrb&O-Mvl{T_^e@CD`{8FqUg#qe zBH&Hxmi{1meG2uTu;F~CPuz!N_&ev=xegyS9#sWF>&0WOQS+Ms>7=8bGX4rJ)5dRu zj?}RYv()_}rr%UdKyc zMPE(|NuLSzqaRKF5!)pF(81*xIu>K7Ut*8VI}Gqpiu5M-uopvEfEs7dx@R z-!4p4KLh!GTAtv99G((gJk@;)p4R>oo-QR%wcsZc`PJWzjQFA31jtF`gRORsE~VWG zc0Jm%x*zj!EH_JBj6RUNbMRQ$Ln_)U?G_tIakcqe&^PzJ4O{ou?i}2M_aLUimdvvO z(CV#0`w$0UZixAVtlLvpvUZbIth$nT@`$ds45VGNbzpo1KF|7t#PAaHU_Iauj?S2^w=-tkH6RC6nXA; zKJ_^knT8XeZTY^w!4hz_IVX~#!+I-)}?7drh`WRqamIlg~^#Dm}$^2LyH zpA%Onj=ENZ_B8cF#(n7DgB9Z@_K!@u@bGy8+Si8xbtB|>E97bBM**}m5A!4GKXGWQ zw9`FT%JSIlN7`Ma>!K;JU(@a+m+fB3b`P`NH~rIg*E?;Kc8h(Oc29TOt=B}({(xSW zu^&2u9;>ikEB>y20CV8J)KT}t7r@2^FT(|w_)layk+fwz7MYrQ#da-w6KuC0KD?*j zT_@~jf5ke;xcoxQTQIM5&CSjHZXVjY1B(y-Adr7GGE#%%pv#Z`Putie%G%i2^nWUA zwWB1oS>g*LYqyOhyRHpc9k9Vqwy|c+%WuMhQ}SHwU6*!ptyJ2%D+n8lU`;LtJA;im zeYXhq#{R2zrTtg>lkLO$CGX@IoI^{;qOYMpBBqDj$5QU`ei@r(zJF2wRpTa(fu9-^ zfxpfI-&4SM+o|yX;M>SE_9^5U20uog1ulNiBfmlNd)Y_${ir1;*h?zWYDm+bMN z#%V(q+c09D6&#LRKHgTOj~ZJMyYPd@w|v#Z|a+j`2yCW?u-xM``uIT-4{1>OHYo4rv=E> z|3a2~OL>15;%iM;Tth0z0>E5^+?P<`rfSrZONnOoEc6Z+qzHY zo&g(u7yV!6ooRv5&pO9JX7T9%^89_09RCdKRx>fLh7TBv;|0j0$D+OR!^KgUs%M%o=3O`Gcq3>esjK z%R;-eTnB8@5j6*=Ufu74zlCAjXE!%$IsubJb> zx~Nhu{8-aySPxMGo00S0HbH63>~YVPOG_>Tl!tSdk0mAbSAni*%gC{Hfo} z_Y*Ncj`=j!ljnlfK8b-aZ^pWx;3Z(L7>|5D=wn{en`k}!hKzS|o)8IP&zQ_(zJ~s* z^A$4|8k+E5Q%;oU!Am4=xDWGkonwRV!|OZvHpH7X;M?@&*wilg#nsL_myG{1A4;Lgy#_MRk$JVO z$I0`2S+}&T6+L*v6tOUHg}$wOjLbZ4C-73I;-Sy>Bk_^&*C@IV>YQ2lF#6YhnDyZA z$XwEAwfD<9%Ggj4drokTYbmm>BI_x67HA>Pdsg^|{LFfa*n;roT~Cp<6vRde_W|F% zmV!C3b3fKmCcFc=*4TItI8NS$ZR&js{>H3h<@iOH7dg)_SGtbH=5qY)1G27_44yuh zX7$UuU=sYP`#Iuf)cYd(!~UO)jGTzl=uLb>eB_=1E6~*csBaC{+AzP0-81lLFLY9b zJzxR+=1%0co&>w8_x8n)@AF+0-*;=M2KuJkskQdOL}Xb`60G%SrdfrZP$Qq@%X;mBDeR(MQ-n@nYy_r z-o6Ju`<~-{*2Tv`98<^X+LQKA>uy;ujEvKP2MToWAdyMH^*1y{~5&e^xmS2VpZsj$5FTdT#U?(3B~ zbNu3)!B0(}Z*gK@L*SWxW%FYL!c$FAZEy8d9Q$wU$)6k>D7YpzP;^mjAlw@paLYmF z8aEw}@dI`!bxLf1E6-*Bj_j$@SpJ{y-8q2uL-C{XEa^SuG(4OsLVg$a(u6_soA9Gk zaXk6QSmOgc{YOXomiQ@5sVBq-U`LSE0vso_pl|_ z5L;P2xE{n>IN~MroBfGFKjyo@57zVqtXl?sxaPxe2-&zUbMT~)f8fNpi~A3vufQ() z{|j?wJnt^^JafrL$IL~l!{l*z7WT8 z*kki`)KLfjum*dm^AfPHf%~YPI(PABc~&0B4;CqkPDDYk)y~lTW+vWFv{QvLPVJB=5Yot~W@&V*w z^kZFj-9yN+93$1SeoCf9L;>$uK!yxDaea~)T^jw7z)QrB^j>p0(a9C97!xQ>Ia z<4o6az;&GFI<{TM$*yC+>o~!6Y`Kny@w`gLOlQvFIv#W#_qdMtxQ@GA$DOX@cGvNC z*Kw=sxW#qcN+lR9p}4_L$2c-*KyEwd_7{2yRkv$ zsmDe}mLUd7LVoS>k&*4l_af&YPe=anha)5ZiQI;~59dEdz7_ea$X`O9iR?q}#`U|A z%aB(fPecBD>&VDFToW2EAU}v)g*+R1EOHO7KZSfB@?$u^3Hbu#Dah{u_Fd##k&BSe zMm_;~9yhUoABDUZa(Wv1LF6ZJelzknag5locjV+p1~a-I8OZ8+#F^jP zN7o$?n>$i=Ip-rM$AA|b$n1#$HwN3(-#xUCjuGFK?aq1do%F-MT?j9HIGNuj^x?M& zli&IH-zJpbg0tQk{o4=+UBAUA*9Sf4?%y($>+at&lymoQ8Opi)w+!Xn{ac1|?*1)9 zId}h-p`5#a%TUhUzhx-r?%y($bN6o<%DMZu4CVarZy8cj@_kLo$sxpd$k`WRf5B?R zc9Pd%EX3lZ|6;^@$lHln73Z($#e5_3rlp{aEc zKujvFw{esF4nz&&b_wUBa`Drq5+ME?x$O3T_gm+m_M7Ja?|jYtU;K=@wE<$4ut@Y7 zg#k#macq+4Nm2o_=v46Pfg^bcC!#_eI{FlShmo;9=6uIo^elM@Fu6~Xp;wAyx!0)| z2Vx5`5OU)Q?m5U}V*(dDk}P&)lGH5;UBLMUrCbvGl`QsaP=yx4Jcn1@6S*134KPZ{ zQm?d4lHt$BF-SP~i@i&Z@m$(z=-r89;X&$Y0FGpNPu3(uZz})-#@c|Dh0Z7WZ7d|4 zB-bS4cBhnncW$6 z6v}s{%{4(c$Fp!O^$UN}m&<@Bc59NQ#21hUT}3)W!aqK z;)=DU3p4OZ$+`3OJK=FY!o=w2^m(6L3G#YS7A)kPWz&m}x$q0yU}GU8_{~YbC(WIIE>?cXrreI#RrcwcjqGUP3@uxCS(DUIyqo9Jx1BK({GZOH9djLLb~@FvOh;RzVT z1zs9e_yN}QTdaXj1AO>Zl|T_YUlTfRGc1$NM}EJo6N# zlTY|l)09qEnRs36urK>PY+vXk#3>!DZ#(HGoC(If94S33br=l{VjppGu0mSKl)K=Hv0Jm;DXRqE?<8ErCHNU%()OBh>rC2VQI*0a{*Y8p`E38S!oi-C-osu+p_7Cc z^CTI5E~0!gu~E$puSH-h_`4gmy%4XHCZfLzJpM_gZ|ICaLE+J#DcmOh3ffiCDOzt4 z;pf87h&^^a8ggs=jgw-+rpXYciEjBj*E@Tm3$T$B4h1d>$B7KAQ>8L-VCw);C7W6YpI( z_2&La;qk=(1MRklcIyl%xOOZ0yYQLty4Gv-_zvM25#>KjI-wPcKSX^QfAuHA!{1Rk zgoyuB>bKxL<-djf{izd`PTen6j{)M>PE~l<8m-specT*{ht5(yyNQ42C-{dwltb)4 zltMe}eFW@@+`M)kI!WoL{Z{!gb}|M2R^*fTh|)3bz33C{Djs5Y?16s3@P9kV3Ot|v zI)nT-fuO)MWuX)aYI0Hu>|gjCoT?Ozy**30wNq|JU)3{|&Y=fXZU;$c6~-~4-*`yn zZ|tEB?n3D27izr*ejNLISB1ijpZ`7NH`zMI3hh+=ns(tsFAn|sIEAON-d~`f3jOAG zrPDz953yh5diuqZSxTqwIMuJQxAAC>@E`Kn^C^Tk$wL#QDC>P3d;SE!>|JfI;j;w| z6nKg(I3o>`pKZ4K&$hReeN+=vt=KRKr=Je~f**vVGHqaMG}4SflH`zuO6o^)2wZvA+1 zNK&Ko-?G-T-oR&7KFx&3PuF@I*xzl!uS!+8cig!O6DX-S2QPz5Dj@!~lz;U*3Rmgj z?e=UPueWoYGIG0{_8IxR;%_9KrbSw>pLS&v-b=k~yh|yY{!)fh(N`kJFJrfJmMZ+< zxr%T6%w*_6e#4{bMTHwXxs-a`K|LN~z4aWAVo#}@4gK#!4pMLWIOQMBbkZ58DSStw z!pjK%;X;M?G$}kx_%WF(&+4ZXzK!rw`iJ2kYrW}&k0n2ceyVW8e-#)Le(F8r@~zkz zB=CHnV)n!TQjjyMBqb|55(f6 zchC_eev{o?l9XG#55fN1xLdE6Y;Fj$*E936IQSl!|l=^z4qV8IJ0x6(lPD5 znDWWJM&XI1{~f}6?o#+}!Y5$7lX^4fslE*Uvr7~nS)}}Dlb^2wP4IVgE551s3i`vI zt5jc&#IL74BzKIqhaSd1_CAHj6MrfCuh4I}UFA?q_|a)9hg?~BLNb2-Lh7sOR^>Au z_e*+ux#HJzo@?xL@mQrFFAwOEk|>AE*seT${78~Y37bZ6ApC4h9bNB-l>eSt3O9OR zK{+?mK0~DcqnS#l<)@0D@6=*hBaCzWY0AIxpWmRJ^l(0Cn4Z9Wk_LE9~8{zGM3!RK#DIDr{(tW5%;He%z zHlB78Ia)2s=&|%<#m`Msz0^|vLzwS7eD0xsY0oEdJZ*eW>ox6~z(M;c%AZ-ME(~+FVf!DFDvC{_#;WT6CYnZan5_3|CaR=_&vc-C{=kXHSkV& zGyP#2@e?TjSf}C}xxLGArvS!YLOq)Kr-#U@K}{kPfY0;J81-5)(wUS6w}c8 z1LObPU#lEC$xk}t)s7*>G;!l^?>r{iLpZ<>e3h!2YV}}== zp>&Gy;U-B5l>c0e?;^J$j_<}EGR-*tj<(DArD?Dq!S~wF<7WOy{|WIrsf6~wgX<6N zg#R9;*j4)Xl)kCAh5EH=59!3ei~MAcQ~4PD@8HzoKd5px@Y7P2epij!bAWWdbc({0 zUsre=?HS)@bNFfYjI*mK{|M)aMWl1xa>Z}tdP^PQmm2+WDWRD}MfU%4Y-VKg)I#G9ehmaA@QsW!joeO5Q+W7Qweu$8zk;||>>>E=(d|7mTk&fzP&+a9@DS`u z@V$2RbMyFkSG!s6HaZA=#mD_F=hgCj{@;P4V8-5zN z{?q!r!eN$9`Z^3o`_1XM`b#P?DDgJV^mjf5XVd2Z&s zj9}}#l#k^Z-+xQJ9Hd^_c*Cz~pUE7Llv~UC8{2gdA8(WtBL36#Z;kYCLBj9$EC0!l zDSkfsv!t^qhrs(vS$_LVQZwOgV^j|5)JrbqlSla!IoSB71Lc$WSH({w{`r(o8rP{x ziSK88Qp}+Z!Q$c}HE~)r*u*!_RocnZkeDL6v_l z@mG?*cU|ag##aT5tBv2dgnG%4g=?fH=Rcl5v))pfP$1<|{RcFGLp{}A;{bZhNt3eP!K>6rEKpTciTdows+NGCrxfIWf7e?ut*2>&X_)4J7t8m+?#j<|RcrRgsQV0=# z8~7CdcW+ldTL}L=`+GB=vt+Ve^|a?M*{Fe3O8o1|f8-$*Ob6kjR^i{~datp=IF2tn zT9nTW;-AcMs{S`>KZgEg9H)lgQa+7dj%K}kUQ>9t;b*qiYt2(ShM&Hr3NKoz{2Mx# zk~W;&a2927wJFGxFzi@wTEQWUfO?(1l~vugx^g$r+ehQXNmID#&sA& zXTu2!Z+&{SeEt|veH}WW^l|E>$ z_fkH&b5%ZJ!b=w^{RE%dRTANIso&Vy3ODU4CcK_-oA|FTQ#uyc$ukLGit0rESzNy| zc5)>S1Ri8OZ|IcLKh*wyv_1S94qWh)|E`?Wkp3#%DDd5jRWF85ANza5J<5M0@v{kU zmW2@{{cl-+p*?JTR_!*8__uIg*1_juMm{~qXuZvx@5B>-CFcuCe2#43W7)2@=as&x zH`$E89LEj(UHC)k7pYuNFmijJayUf0N@cyBCO+i4oT1Z)hKPKc7+>LbC;e=N!fUy2 zrke0g5TM|!p8>{-{${`M* z(C-A^$#Kf)_YCUMyB@NFdK{)6ZPKZx{H-4=CngT>;JA>@br^%+jR95a?fHzh%dE%d zlh1Iu%0G|vzlQmd;BQtfS z@HuQQ>BNyvccaoV@ObD;_}QJS{m$6oFc=Vi+URFg3+QL$e;4~pHtD~L8--4g>loVz z@8^7E_n(x{cEYD34ix-dTyHEOT$UvTKDb}ymQMHwGZo&xceH=#T&{d}avmBWevER6 zN+69?M)=ATl}^pi6>jW#HOJHLla+om0|{Jzjl`L(@MC!2Pk|dY*l^;woW6T z+4A5UX)iB)mwqXi^NV=G7jXTvp8m?TcMQj=RK__*&W(&KHvUxkY$lzbo~iuo`IEwf zgm*E%ZQyhKF8Yl-W>U`El~2>IFE3U2cCH7)%$zg~zbA6b;C@NNzl0@@zW$SFo?!adIn;aRXSLom;`axYe$oP!a~9zjLC-?J?JG+EAmPoFPg~KFoead7pRqF)UPO4#Vx^zLxXaj&&A8OQOzSmzISKKM z&`I>{Z`{fL74!73B#2k+Esy)4j67GcU5#v)(aQ;J*CE=2O+GhpJnDE``EMiq>Uia+ z+B3hH05wUyWqclC>b(JWCH(vUs_imy>G>QNYCP+ZcT+Eiau=eVb2%E0 zWS)1wO@EcleP2QL(;2qXKX9(vf0=WaWzCtcbawE0Yn1RUU|8hY$aT>q!n>&7-CPG7 zB0Piqd+jF~5wbHlQ-dghCATNv~CBRQf>N-X74{&^} zQLuH8_7Fd)c2!GwbcN#Y;&Yq=!hcM?*i|YA6AxU?aj-L6=^P~fafqXY|18E)HsML+ zzma|e;&syB*)P0y^*G1JAwCbvCjJKa0ntkq{eZEDcWDnDf72dq=$BJ3-hTJgNsoZ}8{RF0VgdNIEGOj<%CW3AZ_aH2rSIGNm8#?B6(@OIY$W_j_vEQ8o|1A7__t*WI^%m5rg`4>252TZQ zq0;FfKhM!$wH>4UYIn?m|7qIOl-wcJv-=p?ZM|jFCh40|{ zPded~7b$!6AJN@L}&Qg5ubD2HB7rWX;|6t^wkfr#E ztQW(IlfDUhQg0)lpB50Ffd&fv;8iLI^W5!kgg0_sB9-_(Sg(+J!~KdM*221p{UW+T z^|+b+;ywD|)E0$@N#{AXE6V*^dkO!_5~bh9xW&lr%V#K`hkl@R4E$KybDd{@)CC;B zTDcBcP5M`&l>I_Ye9l7ltqZsgvyJ9$A1#HrPZnCzwj`Mxfu18q!_E)rCGf#Pz_SR#oodiken%PRHJ6+*sU;0Cf zm7nBHrEJ>!0pOy?&1WjSi*y#De~3L~vfpJBej^M?;2X=-|2GnTb(+Fs+;3b<_(IaL zPShIBe&7#Cry!zqa*6*m`&Z(RRGxbXe>0u@^F0ycSAPQg5`OBqA5vR^ehm83F7JBd z`zI)VyC;4=H>~sDpDMMxH&Cr+M#{{hwo8lXj(Y-%gD9m$Tkx&-)s8F@DbDdZ6iFW9V<|-cWm^Te7}M|C#oo!qMHF zR7!gbd){;W3)mL<=Xl=VnZfnR0>;&*y*sF{)%jukS2VIfVIMYBl-%6XTGO=e>z1Xm1BS>zwuEv+Q@uXFL`2maX)gJaNcf?7!XY zzsAnRU5H*vFIBtcYtYt0I0}JhdG>Q=rfGWv@2Fl{$p191dwbstok*@5&mV$xzqFB))LBjm*@S9 z(=)VPQCsQkAe|WbPvHI{<3HPAsKQV3uN2?FE3)-@vG@I^=yb)8uT;62=RVz#r_kw~ zqI680d@1Zr;KARjy_tSd$$E3z6+e-7*h#;U=6N1b%yt!=qWGp?q@Jtv_wxBT*l^N1 z_(Q2TOFq_yR7U!@lAkW}6Lzj*zZ~Qv_V3;AF)OI>2%l>}eNK9X^QqFaR1PT^7bQ&u z|3b&x-@k-7NBGI*JS*m4TUI~&SIo1{eDfm3PvLWOqnDq60ihq11xBP!(%DY_z56Ke zTb)k5EsQse++=zs{L~zy7Q2~rzQ8!4hU@?7gm2_H-ofXj27fX7i_i)4eImo>R{GB} z?q9V@r;YO2#ph7%gm{0 zOE_M%d)8s_gMkhmKj)z~@#jn-w5q}oPyF+hjdx7SppVMwL&s6*& z!cU`K3b^i3K=?PZl%E>UeCpD8mGdBM_ zf$!#X4Yfx2MTl46mM5Ov$^M(d=iO9v3?8eOK+o$n##xq4gG(gPC_<0D}?yuxI>z zfqE?EzC=IUn?^fnXS`QKcq;9r+OsdQf%A*phgI)j!%6eF{?o*G3Ej;}_oCs#PwG$9 zZc7P&8|@Q#=M-(%E{sExzRLF2yr%G61zR^kL4sfQfz}JNangw#AJe#=79_j{?G-(G z-(&h5>Jd84d|nwN{^R5`{-E}EQ}2Vc^8=@Ay}8uOo6wig_r7=bttm=B=_u7pGwDA< zeP#ZK!i_yYWBgu%_OBrEKZIQgpWgkPdm&hxr<@}mIj^PNCem)T6_&LY^Dv>`{#)fI0RJfI0OuWprKw2!bE^mA2R5$H5;8e5gR}2jX>7 z;%tTI$_r&kMvu=>K6TvBZ#!4%E=*qGO)Y?;xG$rz#yk z=UD}Wf02CV-J}*`?Bq^dm-oR#FDfL5_bA879y_API zuC-Px|2?Gh68ljqpFpB+KP zZ)Up!wA&&+4@x0?J^k3W%e7wP&yVG}md5AHWyH^+|Bv!Lltb*Nx%7JlT=&Z${@bkA z`<Pf z5O|F5VeKLQQ`CDW*GUZj%Q=7d?k^oCKf{kHpY^0u#PPbrvo3lk$B`O-x1pQ-yb8k> zeztk+;UN8a9ra#H{FfP@d*8QxFkb1TvtOe*PHH|y;k!NiUS5O)k$O`qKr{p7%#ipuLrHo@?sePCmWwzu!lH;(gyoUf&b?-tSZ_pQ`;mjn56uddp4d zN8$(a8E=?4whRU!@QCL*_-EiZgnrWV+AoZq$mi+=Ugvp^vxoBz?>gR}=vSkBPob4^ zyOngpp7*&f@GJf1D^;Fd#E*i$@Y(VUg~L3YG=uale%HeItL&NjykPh*Dq)ztlfDQA z3I7{+YP+@(jvvx+;9kG_A^l!H-;>HCeiP&@biDhuzQX?N{qD-O?7z9}zeexrn72y1 zhG_q$U0>z8Wu2w=-^_Y1qkY=nQh1LR)}x=`-==4iPrlb_lTN`hg%@yrCP?_P)Jxmz zDj)e?kE9bIPigNVzLzw_{`l}Lg{K{(_^8fFXTbiYUAy=lxdy^Nq@9Er{~LX+!-3H6 ztWr-BApQHf&K7w|;oXEkVr!hx&NzW=TCY<+1?<1+toM6}tA!u$^X{kV_xyiSLDms} zknnAU8$WMD&cdg6oL!6i1a7|PSW79*#R;+eur{ou%G<&w6zh^`7C=cA4?|r&w>4db7W- z^uwg{6#Jd`dsDTj*J&^J!z2-ZnEt$*{=Ag%QjW`A9GBsaoOBt?Pxx$?2@cXO=f7p0 z%YIrm34xBY-_!Yk^lSDh{dVG4 z&rvDU!E@$ksnDNrVS5%KJ#2@B7ZWrfq#ty&1akl7m#W(Y{Z5+ot83*Q*&Qh)q zwDhU{ml1v~;!u%i?HS6aiJyN+zZB#5+>G9rQNKmpN0&-E$1PSmi5c1=)2>X|yU-u% zhCq;7iGS@>g%|KW4r7ON(-iJq&;J=5n$QWpr0q>7omHqv;N6^G8$0n+K55?>ubAw2 zFQ6i^fA9X-n<<}k?jJMleJ@+-w{sn@p7kcPUCFO%yYfi?Cd#dw&2UyS*b(DCk{k#D>U z9q)7DE(lQI?cAql=zp2<{~pG%2A+&@MDTN`XumV|IUjz>;j>xwWu6O!fhKUjY_vu) zbSC0J;8D+d#q*p`C2$|0Y1bk8)fTR|8#>99XDPp{R!cd⁣tYzNhwJ`e`QnY5oJs zk9qEVIm9k<^V(Gh#v@s;_Krt2lv_9Fi8kvk;kXliTl<~#2T2`x-X!!R6IH*?$B6;| z2IUjnsr;ln*p{`D^S-nqm2(c^Ei<%TrCbj*_$9Qr%%@fUX549@Uv2X|7oJKzmU{G9 zMmlLf8%?Jb{EIvj$-mLdj9CglEKd3|1TOdo>XiTQApC=9z z{w21nhvP^*;kQyg8NbkW89#h9$AuKn_#&T8ka~;$tok+I0hpGh@D#?w#-1gd6a28} z`wyo>umaEKdY5TeY^uULLux1W=;xB&K1Ja@jNh6SY+cXwmNvf6n+-dV^d0y^$38v# zX>Ybke~HrHP5NojNb zG3xgg+HK1`ddK*bc)Gc%x|Du8{ApD!`U*6}H6~_P1QTY!O zzmD^QEPgj2O87X&liq!FPg0&~l&6u~57>`7n-o$_I{P^;Wc@+?Oqg&9KZO4}#shE{ zPU_(ElpU|A|2O?>Ip@o{+)r-CsViuQtsL(Rot@P0cJ4DVcC`r2mwJb2w+4SB=QW}4 zs@*oSzZ5Y}*um$LG4i>V@zusdil0pQLdwniT>A@*bB6hy$vWcSOL&rJ-$Ib%h5vrl z*CFDk(m(8aP2pNQ`Vkr^{iVgT?`|{On=_<#V&*%tEH84%@w}ID4(AuSmyMSHR*pvv zp7H31Y_In@_(alaray#xcTyVXYkB-0xUrvXD8jMlA8EY>^cy#_-YDxe_|>%M?71q3 zRMNN8X;&WqP(r`w-Jkyi7?XO_|E%_Do?G2Qd1mu_2q5dEyJ!!)JaJV2GF6!fbfXd#`|^oL%(TuHml;ByUB=cHNm1Jz$r3o&|s zfOcCOr*bpq(_FT{X z%t694srNeS-RSoi>LtbV9r!K~lzJ09-_L&meoWxr{V?ZbDF5-PD$i!tJ0@GK7-jnG64WCcb-?r`7egrfpy-t6%i~cHy@IR+f?|Akj$@quw(H|yqKUhBTzk+dA z?68^ZiU$Ah(-a=$b3KrC(hlmYfbXGfBm5558|8D!1j2s|`3QaQ_hEic`Q&onDNda< zz__cL0idy~1N5thMmSCp|2c?7=w~OY1~UoY0R{x_{qEU3+DQx7)nNusx{B>>tWi3p zgx^PdXk~n8cEY{PZ8%$hE5;tyvXxi-#GfQ-5x*Y<9d|$ zdlA1T{c6%Te(z?+MMLeX5%XTsZq|F?DeaF&&cEk6mG`@vkB}elch~PEKi>C?r&A8z z_XPK|A9=s$f8uPFPc@&n=1~5bgtrsEi|~icyvb5~$S2(9xKqu2n`wmql>MmH(~q7b ze7gs}AN|YmGoJpnh5TeO?ltsp2rB=z>1sbJZOgiscH-?{Kf{e;4|#uA`InJT8CC?| zME)}ge*@!^z{^fjKQKgi1QiLqu^$aV@)Q0m(#iP6X#8t{Ano$*Z-1ToYUg^IY1dlX zNdot)8~^hwjB7%tnfn-0LsPBHewaW<3nu0K`ri@5%* zTv^tQ;wKRcM2D6CF5-96e-83_t4;V5@c+VR`grBvtVeBw00o}H@g<4)Kg0MU&tF>J z(i)9_SVX<-rCyqdKY{kp#{D_QKFb+bG;#gG@Lx|qobUN=@Q-Zer*YY6J*Lpl9HcxA zogOG!_|L6YjdxLxr{i`S93m zav#3gx0%5ChdW%@A9+1f@atcue(BF|qP|Kw{|(Yk4lYvsTz)se@cA(37vArvzd<@_T!)O2 zpRvUEeqa43w4Xtb{T$8tMecO9hwY^E++vkOE7v9V5Pm=Xa5ldmQc8FR1TOaM-RBU+ zI<~+wPg4Dw^_eGepZLAf@lZ69T9IWH!cawS-rtG%PC)5+zxUh9aW-^=_V-kW8orZ6 z{WfwuN+$fYWlBFTTGg`YIs&1Pf*Mr3WLYB5Si|ZQ@yOUNUjurlspBydEH8T~zjr-h!#IL2Cvp=i-$msER%N2i^ z>m=w_PWqOudhhg%cQ3PDDQs6N@wXuE5`HomcNGzSHsj}3&w6+~I*#aZ&zjM4PNzSK z=lYoGU*oBlw1*Yn_>(K?#|}QAdNJ*qPrtg`vyOKq;TyT1Et`A}&_2E24_r(+d%sVZ zL^_GjX}v~nacSDF?7`9eyhM90wSyz-^~3FhR$>JS9==O&WUbaLq5IxE0zFF z3V4~7Vr#g=({m|zXet`JL(9WZ@^BTfmC!gN$$#he01)leyzt8z&knaf^e*R28 ztGO;`>}@~et3!{f9HOlEA?RK7o5yk4Cj3vS+OCE=m46H2!)c1|{eHsi6^bAA_~B_| z6dvQg5wni^9onb&J5KK;D}E;52LT&SnofBZeL>}!47!r`P@ehxzJY4TvfAi3>K|7A zTNT~<8|^J{tisLjO58(x>!iJzdRLK7VuRuj(mwyj^(F7`O8gwx#SU%H{_3M3c7c0; zx9&LdKTLZ_BA+R2Zx-8Y_}MsJ=>)j%MWta`C(luM`q|nq(m1aDihPDW?|l?y>+^l@ zbHiQKqtO@Hv7Uk4#7?~5X?gGzt#=nUBo6V0tLBlPy~+8g?>SYI)FV6HT*8$Y=yV;I}cE+le$k=xOYEWV5-7% zRw@16#P>muLcg8!TqBZ^(G_1nak&kG2CR-*O@Bj;TD=j)qp-H_2ydg?_W=dF3xrzrZsY$C`R?<2v2} zKDRRUhN+j>_q9chtoL2^-$=g7%{>2l2+b7w={}|0O#GkFuJ-Ubrm1&1{cR@qt)&tF z23zTLc-Cz?(T{|FV1rTsnv-s#Ud-=l=MgTSVHErh%E8!YF$5@Z?|$rY)Jrwr4>fv! zigvQ=^V(jp;iR9@-ij7#zx#iPdlR^tw*P;4pW~FH(m4{Ma7w6zWXgOZB%$ku5X~u? zR5Bf$5<&>coXllNk~xI9hRpMnkhx1jGUa)%vp@U(&OOh)-{13k{=fhK|9SOpt@T-- zX$^brea>EcpAh2DqxqKCogc>{;f#clh zIzjh+$aV0!hS}F$Cy#dE?I`Gc?M?RO2%GgoRUB^_&DRjyHm~U)`rR?%_&tmcVf^G$ zzFoq;zBV*ZEV6k!ugFeqI*y`rzAs|8m6@HZG*1*H?@9fi`I)zGP5imUPp0P$cM-oA z&8sqco+y_1YbYM~JA6E0bp9Go<5k{@M zsB7pWj(67oy6-m#pzF+=^nJ1u$tTl1QTO26>r4ETbX}zTe%k&t&+~Q8`=c!Q{`89G z+nY~z77<@PpXVQ<^|O%pc}sb|FhB3mc}MqmoZBkMzLKB+TgZM2`0Vp+0e!CFt#Mp5 zK6kKw(0$(7iN@>ZE55zL@n}!{DLUggpZqFj@qPm-{w(-s^T&M$4k!@+AHVO6dgiyv zoafMM#JQ6SUfz|?BSM@Fg!3l-?u;;wjtGz0_tx3>^4)~zugb&(1_|?X5uNvS-#7A} z<_Smh*;}KTj>7{^!ZwMKjH5)SAsA!^h}TEXXu>w2hw@jw+TNVRMbB#VaF+V zho3pb=I$zZJ05ggbl}Z%d+7SMke*ldBz|*RC#{8j4rFL;Pu@-mUH@z$`C1f@QA0i+ zYm&Fa#9-}GCGvd1{(j;s=srsr$xCRw%;h-nD7C^U9_b>5FO8rL(dpLM^_X>Q5O z-yOvJjUqc6YWA1*{BdDV@^Q3Y6|CcrJK?x! z+=%yEPQU9X%o8#7f7Eo|uQwg1t6H#rsL7@3{LZ5%tutjh>&!!2-o6uk4=ME94V>3l zzv=!C;ISUOyaRm?ET{O{bLXsoOzC?bzW4C_2b#n9l0AI8Txgzrp?=7qeh}_c*r7k! z@u+*h$xFeH?;$#WNyzU?4ZjXosPo*@11sL|stCS+gn77B$@5d`^9~;s$GxWGG*jm| zRZ^T&?*DzfCeY_)4|$6OKIMEQ~Z1B{NhRb2?L4m)sL6Ap?&^n zriNak;jBL&-s9KdDB>s3{7G!Yw>O9QbI49Atyc&`L&dai@7ltTl+Zu(&=6+dEWD;h z&V}Sp)#dqNdw9NJKZoMceIAFsK8V>-&~>SYV28$~Je-eHSSM}lczLfD=x~r=pFNhy z>`3WzfiSO{P&^^D&qtxr!tWu|=af|1pCk0M6Rnd!=(%`nI)2&Dlra0QI>+M`vTw7O zZ?AA)xC>n;+@PJAWVPo?V_De(=cpMTKrK^GAJKKg;# z*L}a-cQ|70{Y3AX7wkO4c~di9^q$fpvNN+i&)0q4d<@M)6@5+@=Hc&jeXe_dEvx}= zNB6ncaA7^ydCuY%T@QGk=Z}{vil;k`Zz;_mVZH5Z#@kP#ef3@>f4vpYPt|$;s~ydU zOqvf4B%dU-i>`|V`w29T1y1}p=8^munuj)zc)rk|iL}m?(>jwu@_q__Jn1|b!I=$fRE6K`X`*?52Mc` zpNM~pK36Jc@qQ)5cgF(Du3IwcIR&9zXQ+RQ=>E0vz2!#)#{7QLeZLkumW_+<_xxqJ zPG<52+xht)NbT)N@$0_7C6MN!gU&o`EaUCu>%7OZF7?|kdLCR!_Gi&L?4)yFBh8Yx zlS%i5B_tn(`NQHr=g-d{;d|^6a$Y`m0^e_8B%g+G9dN%45o`B8X3ZTi3TzdKzoebU*d&C&R}=I|qa zlj0dfeoe_QhFe3;>3Hl;&kG3r8^kZA=QQm}eiZeC1%1vEaOz~>ns=EPSkkbf?&s9!jHR$BhSgB zcy7@3Sdz~5m?zC=-QR1T2)}Ip=srKIr2f=>Ke-Ye!p7ZW4&UB#YS&ap-j43~1GQs)^- zXddeBkG)IByOKU13GsN*y0?qgJ>fhOOy{*J^d3uLKHTfa$D_MHK}GAcdoe#xR1{}2 z6j_{Jw9cTJ8dCJ+`LF1`ru^*XI6E504zyng&DRjy9>DCVXkH29D5w4`tjG6fCdog7 zB9nKa_oAq9JTvM=@!0F^-|$!R@=o;mOPJ?vTzGyGeSbn`G~XMs^JB&nz9)tFRV{e= z(*1lq-D%wr)B2ejdd%_MRUjl=gbxh~F`XSjbq5T`eyt>tk zx06Eeol}#Y)tEnQT%72=or3+*xDIFbb>E-4p88Ef_Y0E9&RN96>^!9RR141!nln^jODCXD0LbCHGjqhE0egR=o2L-JULjR=V z`jhpi?)$0UARcC4_c^TJ&3JwZeZEkT-=oBL*V%ttALn0YU-x&GC((JijLyqgel@g) zKHrtm=SLid8rs~4_xnT1&m5sYH`8@{>;%5wFzq$;ojxy1={;r&;=gpDcyyj?vqJzZ zPS5c*HIjn;$+S-9_vOd;6WJLo<>S%)UBv@5F1qgtUrOU*RLsXI%>SD-A3|t8bRhd? zIFGab_oe$;aHpXr5MXiYzHdrN$5CK2-m>s{XEmLdJ?Q-yLVsSObvXYGKdFg)D+??l^2*u0- zykB?fANH|@**`?%#rG=5wWN8zh2Dp+WJW*<^!fA<{ceTO|GV)$IkRt*4}%~lvLA29 z^OGF->#GvtJJ5BesuSNo9u((1EQ~B36*cfz8m#_cj|v(o{L)X@pqu#tq39c8E7VJuXQSKM>ww8 zvdQ@6I`^X+(dW=ux-TgqJ0Gy1G5IKV0S`j+HDpioEt%$_p8uoL67*lzk_y_uAe{A`pX7Gm+Mxbvo;hGhHVu=sH(x!1%`jO6(_#81G4(Tuy!`rnqmx72+v+z*;RDRlqch3ptxQvB|`{SxATq4g?> z?k@}b@xI|WW$n$O_dW7Q6UPmvz^=sjyQ?pT!{{3@((BQkwg(ck4&O1 z;CN#cMg3-4%=eohA4&1}()Y$f|6ig0RO)VB>+jk^PlyD7;JqvJh;u8T1JG_;5Mvx7N5zB$Ctu;z~!DcxTd>}S$-S`>ZW z5#sy;J=Q-NkNESiFfPOFc)q7CfBc@K`D5n7^S$ZwrqDk!I$nI~J zZwkeEmCnoSj`8yZVQZ)f9WUOrUqy)1mF!FA@E+HZycMm_y6?R-qVG49k9hm#n3s&s z+3@if{N(#BkY{s4sJ%I~uToBY5e9o+HE` zNBv+y^G%q~S#5}Ejp%t6;XJ#D;#ARkBJ|IE`rfIO=9@jmv(JpT<3QKL!u&i*>#z;o-*KUM z+EPE?Yz{}DMB?v*J&j4Ma~L{Kq2SD-;gjarUVDl_oNb4^R+I3u+(g;Pq;aUARWLJp2xu)hgTkyC@k zDdZeol_{0k>I4>q*hs=D*e%E;Ccw!>iA)yd`4Xud?j>O2Rq05?oP-}!#^N70=c_rf zRLV&kDIp=^dcz&3;1qb>fch^vC1<8asB+GRgQx-?)CdWuJ&>sq!^GLBp{?m4Bua^b zGpMDmp@ticGbSgeFVwA(kHXhf%$WkN`0>Vn2`AE5Kmp?=c9AQwd~2dayCi7O#;dGR za4u4!scNwjwLFQWYR;MCaJVYKkSaN4qC}CzkDFYbXoiqQFe#PVv3F{SG(mIDNysyC zbqWg{&lD7>(O`+3+oe`#a2fg;hHUr^l&qE=h-wjQI>KC|(PCqdcoKPQV4*dfLYc7& zqpgsrIWhaWTyzfpVS=tgJ;+v6wmFFmE_zf)OWy@!qC-axR|Z;>AjdQX?ylm$Lbh0gF;g z1;Tb=_u*j)e{kfi5#p?&o8eFZ9_Q++DBvW{2)vIeIz`Q~f#Idpngl`Xf$z&n&Co+u zA~Xbij130BS;D!BIFD3LS*R3q(1i)SO-0>K&8gWg3Qn!3R3nk1OB8Y`8(q%~PGY83 zaF}8WJ?NrVApYx3xx*I3p2#`GGd&3!;eJ!F z{~9vI*q}?*rt$7NnqQ_)}wQ;7H|l*#9Ipgee=*WIbG0i!qL% zb|MK&7^%fd7y@Y=O0jY=a%RrBWThyfv85#ps4H+JaTqOmCZrAj;)3Q`Y|QM`fV6la=(l?dyHRIXT< zl%U`cvw|4}lECKFtWz~}TPcySHYvn77;0gCcjHVHs8O=xM{OjLJ1{wo0Xb`e8h;Xu zriM@uL6VWg(nY};Nz`g9IqM~>B(++^>B((4vjLj-|B5X+30JZpLHQfjM>Qw0mJi^Z zH3yBRw-BERhl9E~OtFQ-TCL_Z6>x|(7mi_iqyz!^q8D-yxk)2Qyn^+PrHE6zVp8(r z5lxug+c{}dQ?HI5gUMwko0vFAMO5;=()6$%u=m%1o$rbkIG$MY@VC=kA; zlhL7KB^xBQS_}!yL1%;`LM-5dl9z01z>J*XYaD_5X;Sfh6O2R>lvKVs$Wu~uINS*O?tAiD%a5>joBh3CeTVd(J z3dE^TvSR_loTCSuUYv`Gl6e(-KwTbKTcVcdNl=R*B?>iLuTm7!6a~skj0QaLl}a!) zqr+G@5|w7+4AkhOAaam0k7Bii<|~u5Q_oQ*V$7K|2n$XDXOG^JSSm4HU`fd=`zrWG zpg~eUPGTf6P;iMHr#9o%SP09Qh%=N$>bfol%T}T7tQ$FH4b5p1)?i0rz+5;epv6{F zkt9p*+eK|A=K89^QixFraxil~*V{~8N9iC>(J&M?YB+*EJfknp!74)Gqc!8KnQak9 zo0Dg7cz2)mDy0LC1{M_>uldt>gS;3;R-wdXV)nQUGiPZ+W};Xv$D*J>tr{^eR^w2> z{FTEhBeOZ8L`4SY00SZk{z(`cmjpPLlNDGjMC{5SL8Jgzfe>*(adi|D=3kwt1e3FJ zafWZTLv4;775WJV$)XUf>F5Ckr}SWgk_<=4`KpziCsW`s+Muz!?K@O4E z;go8)uXk6Www5|8zF`Kd(SGn05R!V}WW-ER2XX2U&Lky4p=2f%BGy%ym=J);`5foR zF4;LH{8*Z))$k%xO0Au_#8nW8hYZeUzD!tw7Soim>8nsfOs-~agi#IE@xVmGA!fu` z*?=#%DL@-CSlC!%e@iOx;FRomNrgO?2+cq^NfXp;AYA$hMtD`Kv4cvFheW$3rRC<8?>)I39hp21vLK2a=BQgFT(3=PBz zs4K+F6ih(P{%J^|fOR&96Sz7WhLTexnmTZZM!`wc9^iSY#R^SCKkN zs??XDj&YH}=5(N&XG^FN~>?&Wb)Z@%V zwH0bG4V0{A0VgV8DaQ>=!2ocQ*{&MZM1854g{_b$a8~%pCv{Q72cueE;sLlC5eW`N z$~m~8$xy>ph644ank!IF&s8l~LJFNwO9{4y&`nyz42ro0B2Dv7vI=YTn%Xsq*-ny6 zvHplSv){4)qpCTpSF`>+iPo`WQYSUk$@i%2m}M+KrApBm<7xR766NZWk4#h8dwKx05${Lft|o! zfKeWB05}4i0{#Fl0PJezDsTsQ1Uv3KRIC=qn0asuk-~o67!-3I&KM)9n0O3FsFcF9YW&jDmY+x>c zvEvp2OM#WZS|A?gICLVxSaw1^fwA09C*b;1?jNf$;|nfZBi@XaF<Atkh%lxqH`E967U6rfeAo3FcF9b;()1u3Qz-yz&v0PupC$cFj|9j z1F((CJCJ4rdw?8ZFOUly0FD6pz$u^*xBy%OZUc{iV&EC@3Md0AfUm$e;3x14;IIhQ z1n^mdGX!J+7I4l2umqX|tpG>B3Fr!R1Ns2{0e9dxU>Gn87z_9V6M#q{3Wx@#0@HvQ zz$_phmfIUDiZ~!<090dx1v%opv3UCX!2RsByfakzl z;5|?QQ~^H#w(!*eBtT8T0FVK7fO2jgnk2P1a z4fVIo`}Jbr{%gHQ&iggKsOy}ik+&=thaYIM>A2PRZ+jLzmhUL&b>WNWzCZUx?r=D& znkb(=$2_}wUX$cI=c6*++GH(QQ`CNTbHhDqBZWA%{tDI9X*c!!6O;#Ue~hc^KP=W} zhOfP-MZ@zwN=J2+^@LCP{M9N8*UM{`Eu3TCc5si%)H4Ikqb9UyC_X+U@TzP3Wkb%^ zogW>QWa#br=9R>@>hSnw0lU8U*l94lW3()!bdv0{Rj)?b>MN6zHjK7RsjcU}Id6Hm zLB+6H!%X`}1P}9*i>C(D{Gs1x@{cz z`>TmpJ&b?UEy|e`zB^@BvAdhyh7I;(Hs!4i9ef})~jxx`fXH=8BN?J#U}OgUXS|F+;*m-!=FC#%hxXN z@ZH0GJ&?AjV$0*9ukM~`d}+1%2g!EWcTL##X!C?1Rgz(9!-*%d-_6gA zv(5ID*IJ<}otdWcb-CPA^yAiJgCL_lc2}QRRn`wUdU(+872fOG&G)XmVvpDLx9hI0 zUuQDvyll^x^{- zSzu8QnRr;s>f~*Q@)N58e^K=q593st9^@h$+`?4U%B}}W}N?k&~3AG z9QT!HZHb=peiUrHb39Qs_HN>htUc%Mq@#b*vWt5*?OyNfc-Z`XQOnZHdHt0Rw-#^t zVfSKAukTl;Rh!lvf7w1DS{<8EJG)V_VY2U2S*PsbS6{gQnXsk`n7$RfeT_6G&7NOYA*k6RX5r3`)R5Ghji81=WRKMfMD6n zPJ?Pj_kCUao#Ag^JE-paylXe9p<|15RarAdpG!SfbtyA`ec@Tz<hA|FKAW> z{rj<%CX0_JPVRiVV%oVe^+vras@X?XYW!f7Pti6v$t2lajQMi$4D%^l`uTmlZ+YfI z+H#A_UmnZcCa;kt^-P;R!?x?rCLT#s1s6+wZ^CC`gkwRazx9)Fhp?UC`fb9MG)uQJz*YiYPg zuk~7$Bz*XTko6s9aRn;lA2au~nKfm<_2v7^E!@ISZZ`ky)#KX|b6KAGgrvaeB*U_# zpqdUF&Gk-yTUWlVd`VT*(58h!9zQd~P8~E~@Gwoa8vT(L-#hc}g~DbgdT(X^KRu4# z3aYi&Y*v~o0zNjqZYX)u(*IM*_B7R2mxzn&AU`T9?|nksCN|?ww%!#K+C_FKYFhNP z;$^><}UWynf=1CUg0bAwU%ygYj0XK zYN%i>>Oz8pAc;QBnBYr3&wxvD;FW#9i(roLz{>t6Gw-h;tD zd+k#4-E&h)*O*C9#ko|v)@?FnBzIF~-|&|AsRH>7v#D;S9-guBJ4_YeulO{3+PLA{ z>m}|_Qxz@v@%74!-x`g--qkd_Um6$oe7wz=Tc7*h9o|c}pjJ)!!lq%l@8->Zlpg$B z-)V03R&01QZp?ku#a73ztbb45e(_=l)!fDVuhla9k?vGj)pp(F(egJp-Wz}1v~2IL zDC0fTnlI0sKg9NEF=AQlyf?4tdXoL&MIVfdVM7wXLA4-%QQvv(>QoxHYd_zqL4$!6 z<)wG;kGB0XbZb#VHTyLYT4wZSOZ8LG{X|$(PY?9&SzN`C=7jf_^x#IW{=lfhR?Q67Wg;R&*@3ZzsfAe`6(0I*tB;eu3K;o!uJ>7yXy{}s;85u=an2>k`Xg(P zr~D5~^$lISs%za}DDM^#&~2YO`|;)%H%A_wbSqmD_`J2u(rWc*`#RgMM*EJGZ;J1? zFDBN$;mbx&RzqP|HEw3r)?+cXFZWuZD)Ehzbt-lUsQ+fvhxzzXB9DCs&6WE{waN-y zsVbjpA{!CqY<{C@{_+asRgT{dE~p$KKTg!=q)^ zLps;1gl*Rkk7WrK52`NP1`kac*Tm$l=<}tSrr-1jCzaGPFQ~ON;_LKf9Xb0B-A8`R z=>Qw+;A6p}2E(mPI(5Bo*8a%Qmld^F=lz!K*riw&o0Hu*viPC*lJQ4Gjo>3&e8}Bt z*|VrMGAGf*W{=NT*C@K@SeRrOHe^%d4r>bHU)-rosxkDe{-`zGWI4B1+DO4Zh@rGW5a=6^; z{khRQ)4cRr%xfOmIr;d(Pi3hMLn`xOufy^|i>?fcyJ0q@hb#lOa;#RI&KutDXYjq{ z7A;g~f8JeRe*MOrspZGTD^%hSXD2@0Ya1z>Hj>MqW!UUOaFXFuS!z>blszXT>}e7A zty{El{U)~S=E#i(zKi^^af#d;Yk}PBK~-4)ri^S<=)q z&S&Zc94*f6`s{-Hk~cHjh}*iA+sX^RraN7As=4&A>QaN;CZ~TcIy|Pt>6MFoaQ8Xp zk3VKl_g-lu|J}Lf(rn*(zfS*9zujbevD*937E3j>* z=kPv>-n-vSi8o)G_48BV)kmf?uQs@~4mOwXmiRBODcv&|b}YtNY_~r%c9Nsby<@Gf ze^rmac_QnTOZa57^ShTecysV%qYbx@d6dVDO}0#t=NwB^OwfOs++K?3&PK>zUaFt8c;Gfs9W+(@(SJfYUeA2L! z#gBb#3bU$jyc}tvS2WS1zr~Eoy{*SPMBM7w#r50%=EFM$s@%I(#NTW-^V^d0y7IFs zW7U_?vP_por(YI?)LY}7vCHbXY){ktsq>Ue8vk71f4;T#q=ee8S)FAO+n(0wGud|O z)#~#1+`?oNxxwI6dFg&@$N1njj~;*78+T`|%Gi3p^{H-CEsl3>eE7+RtirAe`~43$ z4V-9FUzBAZ)vEtcrG#s^Wn}vB2g@zucGg=LJ1%Fqq1ng*4j;>h4TcYQ99NEoUrcVa z&)saF=QQ-k;`ft7Dti1HhIV$}-KDnJcfBfMW9QO*kGS+L%--h-4+Zu@wB=u(bUoH;|!nH6_Kt^9meII+hMf@_7?SjYFoLbV&b&in|}tq zI~jju#d3>^yjJc-q4DNL7k?ZWYuESATKhPeo&65?`ZcfBSXVDgR(*SH%MSbMNly4q zE&p*tDXW+B?QTxfi`B<_w<}3|I=r`AzODPcZLimylB`hmyB~8j+T?+(Qgm-t7t8ne zW~N^?Gr0OW#dGAsVWs}gpN}_L(;%wL!`)vTb~&o6n(sTFIcAYd^D~ROgzvYmYWM21 z2W)jk{?E2MUmPCwdULZW*{$YH8+vXYWV<`*du@tLGQ8^3Mdl0{3$ywM`i0&;phW)b zgf_`)N4qa47h0S((3`WvzQER|Y{13+u6fSCUj1fliM@~M{MO^qmz9xO{rAe}` zJ3n7>`ZcxT_&ZD2+K!&KVJ`edj}DgAZyhQxHu?7R;bi^wb0zI$FHb*hI{o$p`HK$u zS;jssa&Ojrd91>Cf4=Pmr=~OEb2xk^*RqvMIIktoi_n%sR#HhZB1^f;@FyzDJ=kwq8 z@M>DvBt(5*fAzuzEgKHDDfgXSee{<4)Su8ZvzSreB4AVZ%&w`b*ap4R=a!E=R|bB} zir_hyow~>xKAb$?qT|Atw|S8}de81wuj?xBL!RoQn0b9m%UW2~8 zsZ-KbSs_Jn$rDZeJ7f;tyUTxryiRoVtDD!3ue3R_t0Wuc4*5xj?q`c*Vyyp|7;;kN zmTK9>_=QdO0$0S+D7@5c%Fs4hw{Skb+2P6OqJuqUhY(MCzy3`uoMu1BYM)y(ee>mD z!#w@RcTnG`Dsk$!Ns|&y2EO?`YDt>E_(}gvW0l$A;c>+|*EYYtTK0##yi;70jb)}= z=Qqf$9$*SN)t_}_MLYd3IVh@S4=!{(ekju8(|y?L`fIAbe%n~tm?V9}ch?uFo~}++ zRW@v`8aE(v;Z4MGziom%;nYmO3X`=#f4mOOFoUf(_ZF8&bWc?+3CW3ARz3H9X`_KQ zh0oOHUu_3E9%?nA-rZwPKV3!)mb*1to*Dfu-u%?w_R>zd$ybbf*W33zV(`y=%Py9$ z#;ypBhK*-8+wNR}wKF+9RljETt(MBuN%2w26{;UsYUa8fS)=lD{9+Xgx%W41d(Lq1 zm4B*OQW1^%_=f2!>8`I!_6$!~Wtf~gqSqy|QP(+dEu3PK3^VN-80LWQ{(Jn?$H#n* zim%qpje|}<*e<%1sHzqfn(Y5H``m?I3(^~)?fPQL*HMegWowp7e>IKUf8RuO!NVl!u8RB4i3)w1!HoCvpPSLSS|!aNw&sJ$ z=bcU!3oVQj8^2ipwVT|i$*M8(TjgHmTV!E(k{ewyDE?wnXgs8L?lhN&EB#$=OzfPh zGJ0HN=YrsH*~~Hb9BeLIFZsN{+t4Ys>7}?Y`+AxxryBNt_`xc-bpzR*&;8_w<_>jg z?Wp`YrM#h0PEKZOb?>k(x0~bovXN-)o5TecTW4?juVqF@$H^>_)uG$r8}3s-RAWz zO||&*vwg(}de8bXx_bZ5(ZwbyIB#*&*Qwlk^#Aw}c19)aj-BnS_BDW?5csj{KPUTZ zVe30p_Fl*5b~)ud$8Yja7udHRh>F=HpQ#N1FJF*Mt|{S)uanQ|D~i<>pqW zXYZ3bynmfva&};w>#@>bw_N+$ESp+)$Qs`{=6>)I-&HTeerrcr;RgF3+xJgtma!pA zwrP28sI#TZ(V_9(zH+hy*7ltzmuIZ*jH1;ZxP>w&#lQqv3VX z&Y=y~*D7pte!Yc%pJ%>_D*@I3F?)LNtvPOe2b#imdA@AV!j;7)ajiPZ z-2U{JUBH@r{hEiYXJ(V~eVe|Ra5voAe;Bae`fit7O_z4^sC}-BvT4cc>qj#E9}cor zC~9TzwAfg-RAn6UdZ?Lak-_XR>*o)zHoDMY-okMCeYNw!($DtO3_LssIu4z)uT^@@ zpDOtAfS;@IlYOgQ!+A~T{7~L9bNUu=FVFSV&Bhh``#vdfihNgvxV|{<+K`qpe`VL= z4z{`V)~Mc_8!rgnGC{Ut#0B%-)1EzV@nzl$$%93m^G=^N@3Q~BgTuMu=C`k(X<7Z~ zy>GK(j|UYyI>|~;kEnCEENYiujeb9N>^Bhg^Xe>1nlpd=yn%CGd_w)!KRyM=9(Bog zIN3`U2HOd79S_b=s=K*E@7PHTaGW=)ni*r)eDJBa&iU2H=e{>y)n&cR5waH&)}}m2!QQ(ipJXi~OU;4@46r)ad!+nZQt*O5o>U$j|N9En zm>(Nd@#p=Hrad2@BJpqt(o@xHoZ`TMKK5^-*6F~LuskenV{u-o_Ogoh=s zU(HmRBzK-%d7#x?U#HHytWVCpT4vXNg{o@)h1FY3QtJhCnMc~xXlL=ZUe^4IjU8pT zjeE&5^(V<2uJk#&V0xW>u}K@`$Kf;RO53?Z-)}xVe((yL zxQd8gqk=BK?tJ%JtM$v^E3?zAn)`5Fxkb-l`P}NPfH7lQrcRE078QM?)sFi{_2;L+ z?yPQV*{1=i<*wxpbNx-4w7WO7RaTR5ml_xDd)A+;N`BSg%6e5;$EBCX#&$c?xn8}X zmKCd26=$9|%sf_p^x_$l-aB^OnX$=qg{pi}fa&iy>rWrLbkq4H#UYDc#&zmg!%kTG z)wtw4`ZGFi&bo3g*r{)(?*RF?-R<|ew(j{Lcvju&Ce|^&vWfb~)lDi!hE-cG3|iN) z_;P~E*lJ|B_su60Wp5(4f2o4M2#aSgI?G;V54{yV)9cpW<+i(f-WjS+nqBF)J}Tbw z!>^t$D^wQya|=6y+*z=8x_j zdH>QE^&sP_mz}>fN>4I8mK7nJ+w8#JxT3pyO??8gZa&oKl3P2K7G@6f>GMV*e-V^; zWc~1(OP$7MDw3L4E;?{%>-%9l=08_lsr7T_MdGG&$Sj;dkK56>I#z~@K5b*lW48&k(G z>{2Zgr{BxJRv2yC`eNNqwS#}U559kQ1=>EhJWXYi-NyRV{KuvR&sV4ptnMcB%|F_8 zuG&W4W8#B4yKWz{@;=rQ73w%>&d7mD6^acDZ9o^8R zI`4;t>)Qv?kn_Xqw`#WEI=OlG;&)cXhTFt@_GTY_QTlB6S@Y#DeGb|*PPSNkvcZ8D zXF6|8SbMFc!~FV2CY$4`r)=9TSvEX4;KHzrir=QzJYBG4QH4e8<*MQBgJu4fdIiO2 zTD%I2nRBKD*OuRH@!4s;>yP0D{Vz=onfL1x{4B$F;qOMTPA#tUXvf7a9g(i@%jIfZwwnhAeh;d|uGT9N6XK`r)k$7 zvJj)SDvuLss=?Dreu)hea?FZb$@M(um``hXym2~?$8Wj+-qt8S+*x3Id~L$(Ees%M>_QhT+}(Yk3>CYN#FjDg*EWW??> zvU~Siac|U$XK{6qug~regV_DnK75uN3|?!}bA=6UszzN`>^%r2el9B0P+m{+xs<9Y zWx7LPn_lbjm#L}HZvef1#95E@Fydp+)d&jgfDMI(A=vCfR8WxfDQ5*BvVRA4Y|nxo z%A*;d;&3E+W_LbpUMBu#a>VvLFk*WR*uEKdKcDSG8UVko!FvK8qgfPh8p&r9|Av-* zCg(=+EP@?&pFIU~PrzgQOBlUCdJa5x@0}6bH^68N{4uIUDa*&e=Uvpd0gtxia*e3o zh~ha+{*EyP5Zk-q4HR#v!7OD`(_y3)bh!5`2XIh7oMRH zN1ko0Yy#cMq!&Z!bV^5)auSu<-XIR>P3_RO&GKw-2qS$;GGSeX*r>-G#O+i%47Wj#Wf(Iw=yiS5r~`%~H8DMmdh9ZzvH9^3D64fzT-xDYd_46SYg!B{mFOau_ zt#{Op(*w3H-I>la(FVKhRm?S-}_EPBtqNUI34xz3l#U4O@+Jff;xiw6H$Ob${Uf>Udt<@y^2>H(x?+!{A zLWb>wVtdfo9#*#ZN(moJ$;Nn!VL!!GPGx344>Fp4d618#`cq_=9@p3Gr(?2HptlY3 zY#-nt$gn+vjC@ItFY$Zsf>0krF*E*1$eh!XWBU}Rz!uwUWv9h2K;E77oMFQRa=lSL zi9FjI`Wm+8Lx=5&Wcvph1*04eIG}zO_+bE}z2Gs*hVEhH^MPET8f8WoDV<4jZ11c# zv3;u7iD!b8?TLLrI!x{w{Ik7%#lQ*pWcwi*btQX?pwCD}adsj8Z;<~%vb$h&2IO{; zi~;05m>hWPAj9^-3bc~+HxTbE#gdP-C)F|8!=$i{(ywIu47G*Wc*01?@q0Qiquv*` zW)gb^{ER1^ixe-bVteHnvHib{{3%T(nV~vNv|?qI!^!@3_-=?i+ea7+8Mc2@mlD8ZdnK;|_sQQk@~47cfAHC! zQufG+GudT((;2NIxi{qhIb<2JeW-D$V|ynLBL=n~lhGNZGbzvZQ^u2CFxg=;A0XeF z{4`?>qEMu2;K+Ee^Pbqx1Lof)tY zvW!Hq#b_wx3&9UTc{covB7QRJ@I>fe^aFK_iXbnB%sHeYvc>qP!DIWp1)7F>bJWj4 z%IGPjt0~QZPqu$nj&fh5N8qbS%ia*m-yr#H$k!y^cJTHRzZ23$;IqB>jMyGuMjH`J zGoTCN{)Dt0PzBi?NEwZWJw{LXg!@V5wOVa1KwbePL#H-$y{HdJMYDHW0lh--_mTcf z$gKrLC@TfQe@NevJgcgNemg2Zfv!O7;rklU4|NuhiKPltv@sTZM%7w&Sa~*;$7=Cg zLRSJEL+TqQ$M&~>p}t_{H}Lyj%XfRBACXRjuS6!KMYpwNOHj}DH8a`;n~o?C)AF?+ zylBYx06zg>j>{3+ts)$)-Jo(;9x4*U_2Ux@T~w5t*M zWzVoopgx_Bm?BZ{1HH*wcDg_|8D$yd*fTllfI!KR-9)wyf;oud5bA$JW-xpl2Y-i_ z-ed5kuo+15X~;hplqfx~CGV{@cC30j^pa4&UrQ$%Jhpd!DdId&^6XgwMja3%duHH0 z#TtP$jN080{PEB;M4CYLycV8uLZ&(VD9Mh%?}l<5_}!tG4m<1_EJjnnw+4SQz^Etr zIG|(qHpRD0t6k$EKak`-i2nt=dq6ArVH6Cz^ZBgi|3%_2VexC(@czGnt${Cb*(&FV%U$AdW^SQt1Ybjk^KX81{wk`s6PswwSWk=T9SSn(qqqc zFbbmb9i+V>$9|`aQNEUcR$fN2Hi6z6t^U2C)n1Jr{;}r<1yaCX0oiN_n>9$DJxjzW z8g(a-mLT0ryiSly75a_hXiqjYO8Cc~MboANYR_}xOCismQ#uGc<~q7eZz9Rfh0HGG z`$69W{GCX51N*@{33R78Mj~}Y{S5LmocJfn_BQkhd&Wti8YFiNX%=K{NKS_Q0>F#> z3-X;&{}}os!T*kQ1NE_qAfS`hB>#6IyH#sE8lvm~TQ`xaNJd*VhvHzoGUQW{u7pqa zJSd}0(D{zE6Z9Fm^2uNSLm@j|EB5ASpBi@lgv2Z8T9VH=@Leh28TlO8drmf)F=yDA zg7WVadkc~knlc;xiz4PZ1FsSo6A zp)(!%HNa`)FN61*FTV6FWBpKOo+M(Oy! z)Yl}w&a%O^CVnR^J-kBm|3z!TvD4vRM7eo z%^>ffWjju*PB%MBi?LNJ&$87z<^QfYQOhTj9Y*CT$d5ukg$ztWIu5u_WtP~Weu!wplrVW0Uqe^bc=O$8B`vvWxs^--da2+ z*8N{}cl`_hzhj?*{cs35Y56h#hwC9`XN#5$D_f#m!7^Ip173eBucWl8jvixo_y-@G z{y~mR{FSI7GZCf5B-=&DPLsbR|0`n&nP3#n{)?r_x#^U(i-%yp?Z3p6K>R356{I+e zQg^MG9)j1HTJjRAi>0)YmTUm&v*bSI3y}J2$q-xfE*K&Iw^U0uh5XJS z0avZM<63!EyWL`#;!Vx)0&k5HjbG&qb=I6%(uP!v2A}|AU>L|G?v=aemOUvr1qh zy#k%1z$4V_YsoN?!Qcn|gUvUPbJ4P4PI(jXGm&ou{6aoS%iaLwJ*iwn=d(y1EjcZ- zoId1~@Nc0d|BD1N|3Rl5yvIQPKg1jX{(E2_@K1C~i@94zW;vA)AiYTWJT3Wc#2Z7Y zR@J{|&;CozAsrhI0X_ zuoL*d1Myn6-lD!2${)4#S0L{M-gKR|M4|i{w!V?QbC7AIWosJvGU#SPZm*6msc4d# zka5$}tJcbE%eBzrkAuBPs$}nk(x#oTmki_q0(nEeBMPe_I|{g^Rd)$>-@!8lCIDrm z^nhec1tUo7LbeVt9}uWN@`Ipv5AcA_JEU)^J{9FtTC%^Pj?n_*6==orkn$UN5w17V zBFYO%YtmncG=bH_4tsA@Jan^=wxN1KzBc)}Auy1(B$>M8?>p7R*ys+Mx^#nVZ}|s(ChJS}FOVMq{ZPcY5MX3U_5!tZkD<<*_)|#kDC*OB z70vt4ShhCm?2)b~-ABj|A^CxnGC4+j_=Iauc9&30#OB%~^PKGTBBndaR)7KUMN3|f z9@C(UK1S!;qo$1JLL0YBN9)GI0g^e?) z)268?w<6h2u-6mirbsvI$O$^(#BUFst2#1UNN51+S|EQH_4VMh5A1!QSeeKVU?0#; zM}IbCo`5$BI`xnSu(FmO)6J&xW3BexVilk^#QP2UjK-4vMB*=|@=}&UCX34RwQTJm zU5x?!TTcGQL)HOxw~#*6is>VGFG*Oe$7eok*<&0#lHUVa7ob?Hei|!N9Od9oLiq%+ z6nZ0wf1lDh;0rW}Y&sHeKXku=XHI1`batX_NOFQ6O9j?eWR?Jlkj*6@f}AJ#k+4+- z{sA!Wqud7NrAY6?*Cz1alCQQ%FKg*XXthtU$5J=+&v)p51_U~U{2u5kVWTVRbCGrf zZy53q$Yz+9y`98+iS#yPgCWTjVe5PK(HAY|fDUjtxtQmdYodqK}uD;CD{A=wR( zy`&}I7jnjscSD*6ep}MpKxsJABF0BRcA#eqDmqCuv zFr#56&Z2yr zY%`G_;C%zuk=;b3){tQ|4RYVK{2hecN@fGR3$XbH`31DJV)661$>6icAyXWT81>L8hKCD2{Z}$mf&}X?X##m3VlZB!5awP zMk;$E9RnVt#iW0j%8XYZ_05r5lRV=w`bp)x$QMzZjBf@0aoE?UZ{VE+); zqTB*{A5iXq@-6t3!~Zq%{gSoK$0sB*a)M7#gr9#{5T6eY^BV_#Sg3!1PjGZlkdP1e z3k!=3Kq-7uOc-B1E-cdDFD!=Bhy_oI2#5`hjPQvJ_K6Aciw+88LL8|@hD65r@Oj?N ze~Co^6Q97KV82OWu|9#JF;RZ80U@Z4iS-MZ;1dus!6(=+H0*DTag+R_13ACg zpedoTTwqv0SY%8P(xB+*$Y`XIQ9%(%Vy1?3!F*kCXhdjCh)-w)jE05z=*AYqp+5FM zMH?3B7xk>d&|k&CAhbAyiwPa) zr|DNsE-;YU1urN(Dt4+SA3Wk6&^aOk+mXnwBzRbnkg zr~8D)21P^DCn^Ni1H&{<0>dI=gw|^Ep%EcL(HePFkR}^7pNJSPkeY`v3q;w+f2vP} zUw9A~tQpo|78l3D7p4qUPz($T3k_!(w37>s@$m}?2#Sdb#Uy7XHuulVvUDsaL-gR~DWnFs-_TJ0db=9@6ZS4hquQPLR60*9V|MPu6{|k4{nRCvRJN3+& zxi`6FQ6y@WUag@9ieHn-5-`2pZ?<@c_{o6x@bs1t!nM`Ey9Z=46aaoCAt1beBNQ_L9aF@d8vB^+gh z*#!d%3d0hQ(O%99HFPPzI*3zujyDK)lmZ&ef+ih(6b%}KVQbeX$ zE%%U=DHg8I5G$xzS0EHJyhpG&C^1BZ#5m;Qa7l(@Knj?|5CLK6nk)jsDI@_3Nl+5j zkqB`1i2P~z6A}(tgE501iVBWY4~j-25tBru?2w1Y1DA@M9LT^0AwCH@By&L`4`>E` z0&^%LeF6wZLmG2H07?Xt0V3p5;cAU)oeFcdHiH85d`Ut8G$U{q!P)eHLZKStnT89x zlve@}27rsgU=E%PD1>+^5!vqNa&V#C&~%`sNtZlK)lj7x<1);ch+{QO*@%P+W>7n# zkv3TeNppsoksag0goJ4wPmqBjpQa3&rbkjkvKWEeG( z(KJ8XeCRQeNzsw&6cZHGbQ@WkWO9ijQX}*Vi572G>GUZ{kveEd43i-!k*#;GJO zFvhh}%LVKS7su_l!LfeG-ZjmLN)k&lqF6LK9phL`sTz}+RC0naJFtKb zI08jFEOX`ct_-=V4dvO06%Ig(9!d!zW(|f;4JjVn#{eQSne@;%446%k7@|fYFf|b+ z>wreKA{&G%nu?6+>X5`FrZHjFL1qiBR!3^6mbG5e1R|4D;-TlTbu|X`bX6o)KNJV| zVAZSAks23CHVddS;5dX}CIdGAS@676>=MAjgM2C?Lj{XZNAfYLSUcqQIdExNLJKch zTJl2zQ!z|2OlQY(f-OtfQiBQn+|sGv|(i3dPD04yZnk_w8(EtunyXxSOZh2oN8n1@7D&9WJW z7OYx>piV)hb8TKj>O6#uX+Y50)p68cu@dR266-XNDR?pBBdO%nW zv<%?}6bl(h*g64OCp(EMHCBKM!=8?whO{4QnG83Z1&cZcEgo8w8WdV>0K+pe48sDI zhdB=27Zp$C3O$D+j@Ppo>|6w~d!Tht=TapJIw@;XR;e1$w7B+^!T}|M-fDrK1SFj> zq`*lG;nk30Pt-vuU1p}UETOz5Ifii}WsN1k5S9QMb(#f(kOb3(6!=*4B_y3;cwF>h zDAlRlh@px}U~3u)rb|m5nH-I_pjH`?V**GD4aQ`kB@33t8m0igLnPP6B0OKG=_ehLCDP#|1wqA}W!HQuK zQXD~`K$VE8I5G}-N3o;nQR^i`q@99pYBtbNZOIUf@u>zHFHvPisvL}5lue`5S|y0` zpj88`l98!NtmH8l*$J%lK|zUOrJbB&z_J~Y(447?g99v@QZS)$8F_o)>k%`xA2YfD zwGkVF)XdaE>g<>;jLsfh z=NOz!7d*Hl)kcgaBQ@r4Sl^Q{7GfEQ)<`o6G$s`^3yc()WSQP1M4MqNLo)^e86b;; zg^h}OERgVsY@DU|?9mA^G_;Z*82Y}1+7KHFO`j{am|bXEM?|DQ<0-MoA*dlkF=fb(p@1YB)2c|d zg=z-XZ^5o8FVG%9+SpxJ;AV?7`7rG2jyGuvfjvnG*P70XDVq0mZ< zh=b*cN`l*IzD=vFg_ zq1?|nELcEY3Kl3}z{YMFlsVj-7EqU&_X4$y$Cwf+pe_p<2t^N}F#1vu1{j?=NdaVp zCF?yLn84wJK=T1+I|e``!#U|JFgKyIkaa5_&l!zYj((`pVtb2m)5#?R6REY#L0FRK zu9k=5u2x8>D`eD#s|A2BK*$Jy3m@(Ty^x`Y3m9q#eP5 zQJ({-m>hv1qiaFIhAzW(HZFRmox*JTm|kGH2c28<;U%L!GZ~_|sdkZB1{f`na1=ec zj1Wy|G$WvVq$uUM^k_i1wd7FYN&5s2_|U$t9d#tLs?-WG7>3y#h=CIG zKYW-f85P?Om>LeePv#i8*$S`#?6c>Z>+IU)^I^vLw z*HZ`NK=x8NvOTgsM16&9jOZ}r!k|GVVPTaZ0dRPo@U0N^zJ~Qtk`3#Vktp358ySsR zgw4`61ZxRK&|-GQjRpvZnxO@28n&{)Mln?krXve58rq3sj5lfm*|aHbI*`fG1eFb& z?k2u=!a%A{V@nrRN;;HRrqr>Cjis2vTnHhM2?knXlNJHZA`&x*1uIF8nAjYM^&DG> zLJCYl%k@jimlVyW8ZgLO4nRvC0Ms7={1f02@gP`>>tiWN&>0?S(CeV}Vb#J`7Fb~$ zu<^ojg>a`8splSq?KZ1hAIudIVGCX0>(5W(gK)*DCtS@22#Rc0mMpxV#K_a zNbMaiVuBi*q?cD95{4~1TEA)0kkFj8Y-<^@OI#aB`5F;9@hczMXNV9m~z8bTmM zo5~VMN{-}84G`1^(;k81iDZ@v)88N^A@wkUMq?m? z1qo2kbUQPlc8vOQjx*16D5zuz-8j~4e4PcReH!tI218sH1@9fW3mEg56s#G!*qHT=6HrC zsS?Tdj6DoiCYU882D}(z5?Ki$b5e<6H;XV#G}unXn2+AW9-8cqREru^9E(o&utK2Y zNMr=1eSA9A115SKN6_1VP;bKl9hhC=p)eSGsE=S8V#j_M4>KLOuru+%KzLy4I^dzV zz%F1hP|raM47XRK>`h6EhW;b-Nfv#_7LEoUtqFz~LXt=^USOH$TdwOT!1iX`D!EbNd*xT3!Og_!y8bHqHJcxi5O5>+6+P3@KdXk9!y3l zL5XUPOTk=~hRUEp-NMXEe0njeU_zzUek`dh3^S(57#$~IT^nW+CW5J&H}Y8O3_y`G z4`_=)mg4~+MGtP1F{atV@nj(GfRH$&NLCfjqQ_%70gEH_Xm*Me3a5@(o^6cBjkfbi6S@VH@x)yLEi!g7j39cJ!W_;r$BFTn%}CP|Qh zU}i2kC?#sWy#9!Q)gIwSYV0Lv?h%V%k9$WbL@ z``3)+FEwNbI=3S6Gmz}S9kZWf&;7rJ*dfsWiiTO<|4Tso|5HNDRPxD8HF8YRT$2O$;46O!{VQ zjB9{hkVX;Ocd|bfwVv+>wsF53_}h<;aNl`!it+o8&biKyPRE8~F>I>2Dw~0+AW2AN z`!h^~BL{lF7Uw_71WgB^nM3Oc;7oIs5ztJUSf&yIG8Q188wo*YvFVg3Se2I99B{l9 zEn7Mgj7&O`S@AI?Rg%T}6t%&0Rha^d5TmJ$DQafTwgtl08ebBzgCXA{WB0~JMtHl2 z*SC$w5ZKOXhnCVenJ813?XYa4bt0!y&9HW2^E!vih}Y<1ggEKovjj2l#L3-8Mu;ZO zHxgvSlNqxS$`}iI9OXsnVL>KI(E(tnm;xXsa_k0J7#lMSv$Lk=X2c1YRi7OSU~40x=h zOewV1qe0Jpm^&Vg$9DEH!<@>@dq_Fjj9Qj5VFLyz7Ayv%3bss&N8Q{*=+q?GK}!N`k;3``JdTi+5^R*|Y=>1w&;eu= znmGrKtg$S_%Z7{d520(d$u z%huFjB(dWK%w>^l<5BNPi3H%M$CBeAI}7=>w57$cRG9<>G|rTOqKw9JeQR0oS2fLz~%rkG&O0K#^Q=$yNR zQ+oi4RzopKKC;8G!|>n`&dBVT0>$hU8QXf2VuL+KjuL)eKMRqIj59M# zQO!l9usrrOhe$~U(;HwTCg2dW#gst>%XKzV45)<=RO2O^cxXMOq}k<>HxP2A$NWsr zk^C(wDko0afpgmM1 zMQ3~egRO$oND4by%#{MA)>u*{2mq@jQOHn2#8i3su~nC3cubX`bP$jv<(jsON)?HtfxK182vjFp+6XuZ z!Fq%Q(VX}h3EqcMEw-rS21ynuvS2wdkeh z(v)%{?$R9p;bIY5q?mxW*$EWjj&Vq_4foh2wE6jL>% zl8zwY&@lIM;}LqTn!U4yH*cW5<7O}TV1|OJ4EZw>6M$KvSjk9x^SnI&<-xPjQ{inM zzJt$qVnAW4JQUH~s16(XMAA_*EP6R4kKf6MWhhA-*|81imm22fr`6ER@JP@<*wi#%;Ow>rDiXsDGEUY&lL~Je(9C9&0B(~2&V?;%rjQY!gPQCx8y(A|vuyg9 zXiN@tI*VQ!r8i*Ixn83qpS<26oGqaVfwoM2j!?S_O+KbWCt1>M!3bx;2q9fUAcO_7 z`?!|DI9w;Rtrww{l5taBKIS$iNNY+)WK$%)UrI`;{q-7|_np^=sC@aWnrtPPI~_0? zRk95@rN&v(+WucWOsoHnj9ygwHkGNq@ zITU;4Ne;%$s5l;Ci!(RZDZyM!lOa0A;RVZFJUW(di;}-8X-m;5PDZD;``yx~2JB#5 zaV%jQ>l>xdc>gMWqUI`n$1;{%__xZR8;zvI9Rndo=OSijndL~>9-!>OdfefyXnIxE zIZdiMxmU@d7|q_j9LcQsz7+>!DY^78?}}&6YtT z$~{P~TeA`U#JLeOVy$?sXfR|KrGbczzzky@pJ6!H2o02>+&W8Rgk+#4RJN-EML9sf#fe}X zZjU3Baxjq2r~@6y)~7b6jU_QG11U7#jUt8&q3ZG>m$_`hwvTEy8g><>)5$nVh699@ zUmgt#7N%H=koggvjRHI-Uds=pa2I7t@1u}SfFSMcGd-Mz2azB@@61oUK;dL8d!o>I zSyWK1i4%*JE`Z5aTFfJt7xN%mGWp?McS%%ANAjUaSQ6yms5H4~XKaLKW=oVS^l3Rw z86`Darr|sWy{e_cd02HEZJesH+)KioK$<@8di1y&=`0ihzQ;v?q&dLyMa$mDr{b|; z(7h2>ItK-e&L^-s297BY6NikTtbrP(RIoW?WPa(G87b3vt%HuD!30aGQsYFLFimR` zk&2$qlyi$Vol%pvk$8h=U$KA;gLGaVoBXNdXNSpb2sn&DEE17YVA7K51PPjw*qdZh zfuQi&i#mXma{%@P4B`m@v1h{>Xn}T2+VVBYftjB*N_=pJ_g4;SRY;!_U`q;CD(w9j zj?w22Sih7IWE>^L%2$FYRp<#+v%EVaI(&zR8b5zjG!CqEX`L0RqCF1806$wZN``%? zbU4mIXCei@#vpUQA3k1!??>(|1lRsMJW7Jhs5dH zl{@~X`|~sMY$tkze6wc9lGK>bf53g z>wbqG^c}kLJM>;obYJNs)lp9LW?KHdaZdF4YECyg(a+IGG;w7*(OVceeToykL?Wjz zcA{G(`bH=E%_uIP7xv_?_h9KG&z?^7Mk=1J z!inBWdfvKD^fywvZJp@p7kGN%PV~5ooZicc-s=*lM>)~63Uc}fC;I&koIYhQhk2y$ zekp$9{;^rP=_@Zw@=kQ4M4#YUoX)gInlRC^p{TbzEVH^;6(pH`go?hR1b1{n0*T$ zb;IT3ME9yfzjB3d>N(Nt7U6WG6J3^{?L_yH%6YL9-Cb%w8=dG^#`1bGO8rC5=OamO ziW9x%N2y+%=w-fe`cWskr$j&NL=S(%>orR1e{#BVKh1Qa%jvFkqPu_Q>27zT%jsr0 z(R;m-+MCqB<#gq7!01Gm(_QRD@AZ|ZyV8j+r@PUK9`%8ztCaeyoUT01^m3vf&C7pT zBhHELmBiyQO8vzJ<4h6Oa=5cy>W@Tk>p+ilkdJbpr#sN;H(%tdtpi>D-ACe;aiG&T z&*jVAfll8Dm#_(&wSL+E1&eo&+R=q!FvwBqc(^Jh+85hCmQ z`T6-b?lZ7gqxf#^JH-SBm?LmzoP9bWyk|?n-=a1O-e&p}ykq?2f1&SyzQOF1DZ%|C z`wO+b@v#R|^6B++!G|s8Y6{`H6q8El+uC4CG6xI(SmFEgqi=>JX?g$0jM4Qo%ByMw)B1pgRKDsq(Mk6lt<_+h}n2Nr`B@p^0vBN4quVKEqx zI8u(+BQ=GOcT7zWJQrEs=~IB%Mq}S^X-Nx+cr}H={)d7-K%t32U0_bnAZa}Rjs+Dut214UFY$b(#R@AVGzp=Fpb(^pcqvo$BePV{ ztYlHTWK%(WzI236(LJo20xL3uE>&Zuhl`P(8fPAvoI%fvNob+c<4q0){~)L$Ee@rj zK=YuFN+{yZ3bKYE5kDbkS-tu>Ipj7FLc6tS*G>p)5g8g5CWLqH)~*-UStv}s2JhS> zU-mDQq4x2)4ZO|4I*8vuT3Cg$FMptmNM(<9LA6EggKnxdy^%F%b2Fi5XxC2dI<-NG zvlf76;g8zK348ju39|;^i=M&kZFF`%+Js+2>)uCb8QQ&7$8H_u@2viNttYo1EIuu& zbg7>04JX*cHw4-75WE_C3Et5PZ;iscyJM&Hi`V^vc$*AJ}jnc;|9etN|VGn`r z7g6K&PC};rz0tX->*DB%MhLTY3o2!*0To`UR>fqn{)6V)Eh8z~pko{@y>LjSgI{lx znxa_l zz3Qe5-gRqHroP*%uwKuV51|h2*K1(i!C=+5Z?MA%fG!aqqmYe%7KKmUnhGO*=Y@4I zDoy?w*07e5VeOiC4ei=HGCZ_1_GK3Q{Obk;qFpv@6x2Ahd5e~8Fj-5J(cj?-#dqj_(y?XaiMXS;H zW8>lz5_L&>gE84;wxpz{rDqJ}Z*-cQd8c?G6M}bdU+*LuW8{4$Awo~zvBGN~coHgZ zoSsBGMLnZDTXpT)xhr6Q-Uuw}Xn$`rMk*DK=nLi1FYt-kdO|xrFC2L^2oO4k7SksB9plc7wMp87>uOerj&^dC-)m5`63Ss$I7i8nSQQ^xAdP71W+< z(Ob%VSe3u;iv}0owN+T>(3V0AzliRweS_HK#lF2olUKV=t!;K}Oy<&-RRIi3&1e+B z;AF(0L<2KTXKICyG(6Yz=)pg;1R)Q3UI~{5&oHVIlSc&29MpLgeWg*WzoM_NZ-4X* zOB|W9s9z}hqUW0Xf4eat=UqF~m`q>U%{6Q$eLDb5{(L}@<}=I%En9VK!6q~eB6tL8 z^5sRr23G242Aqjxql0&P@VEX2>1CF-ojbOo7nZ43V<|eC;CCwHjqIJ7okf5k- zEnM*S<|8+`X#C!4Z#BjM=TXhu=rpLwQ;AgUwDmykL8#V5&{t^|09Tp;H7X;Y`4!%J z?^*Q3KKusebI!Ocwm}&zLU$UnU8A zyX1osS^CWadmtZbxHWl61$%eHmvq=3ojk1wkr?GVMsy2{4C~y21#ofAlgABL-n{wp z<->EMY0CdG1-xTCnEvRfrQ?gZiKIf{wTUG3x_V{>Io|b95q^O*1WOKD4Gqqb=w-Ro zNg8L^L=Ckio{zY+)nL;RVhm_NG-g+l^jd><=D(q7O9gX;y?O(3`Zw)ZHmnqT|g44M;Y#t|kV_^Im}eS)u=qR)r*t8QJdZ>l6I zLiZ|F%N9l(Db}GvAy2o06^mD@5l0G3v?6c5q9sa}D(7B0zmL}sWeRu{DPP&U+Q<<@ zs@JOBv{{JQqD}jb&~*Ct>pvheO2yP2(srQeWDFcMxUJCCJ0=99AVwY1P}#_W3W#7o zb3ls(HgtV!-G-iymcu?r?}wjdV8NM^%P|k30L{&|`IH?zORJ4B-YnG3eSt>mW?1Jo zokB5-sQn~$(a&V@OY)1+`C(5_=a-ImOa0{KwG;fxe@g03UYFnp^;)^rNEWBBx5PI$ zX9soaKCDN~`oFBF#xJvG_tqWea)?X3MMZmCqv;rb#5ss9eSR_)sE)pa2deL@D+*utSADg%8uYY=7yqHpQeH-We8I;*lX=F+Pa|0KlnttWt zYL?coUmP8IHPro+c(vHq63;8D}npn{AtzLx!zC zFJ2CdFTZn%{Qt=*|JlRj^m1}M8LDp6c-7UlUo%?t@U5Kw@}qUT%MXVp{1iXxT=SM& zDo@K1$1M3!wt|=cEM>=gu5}JRvQ`=r*0Yi8mZ_a$`_7xyGe`U~TzEUXXxF*plHW`T z@P8;8sx(_1w|@NK&&96oE4lNdwc5Q6`}ZYRIKuxwTMJ4VkkhB%!z_!Negl?%OPj2V zWJx4Bde&x}4(2R!DYuyazu?cKY+pZrqdQAy2kexdFHXv@puBgxT}0@Jc4--btruMI z{2=}_r|O954)O=p!G(TWaAU#j$3u2iTq0ymJ@r$?Zny3Ip@62@r$SkWW}O`qtnyOb z60;Y^RDb_vNE=JTOs`%I-iVC`-5oXQ@5s+Ld;WR$!sT~jbc>8BUhSqy|6fGU6?aq0 zPAJx7QwjS2Y{etkugsibTc<3@yYKT->-!h=eBW>UTgtypM0ui*V+Z&mPMmo8R;^jH zZVYGjd)ZodWZtkZKQ0{mC%c^6*JSbS#N8bZPF}YESpEs^XHCCmo!!q`TDMRBe`~#Z zqsFV^QE`>}w^W?lJvv)_UaVo)&8Bu^ucvxNUHvs%)J^Pi<973_MarDmP-f`NS7M>}|14bg!vB7Bw%Fv?py(=%~ozQl37_7QhP#N-g$|8LO-v$IS>shyKvt za;_De&*HQ>&zo>aNu#ZQeF`|kfE#rI6vKcD|x z;_2+pA8q+{Keqdy*arm?$P`NET)J)M&rExN@vQp!MDm#TTh`xdnx)I z+y8Lk>B!mxwsnnJHsr0ic;4y(6YHHB@}b7Xl+1Tm8$iAFIuu*T$QEu?!M@>UiEI}{qpageokNfQEW85lmFPD zzxPfYU+8)Eg^%K2kxhS7Z+(-RuKQ5DK#SL6@A~U%rv`8SP_Vsm;>N?D#OGD3rM~_B z?a^ww0S)w3KZsp&B`D30$Ope((9dA;tD)u~4c*4{gKw+-K5oHGk34C0W}FtQRB62R zTDfZz1DFxhoOs>DteYmpArVDX~JN;g`D8s)F%vpH9{LzS)jeScLl*TVPP9aEX zSB<%1ZDTPcM|5#|Q&;HS6B( zo}R{=1&fraU9(TEQ;)@u7mo)mKigB9)jo?2XFbc8_-f3PUUfd){`~Zdn7jT2<$@~# zZ~J>LR)=(JX)NA$s8}5ND2AJ63;JF5T5IQz`T2$4k+(mkeZF7gvDIZuuah^ltADh7 zXrHg{(%07gCEu0acG^R)Iafbz$lKSp{zLOHT~p?z$9J$4>TPh{)-^oeS8K?UhYy|= zZ{g*BLw*JFsFZ3U2 zK2@^V@mpNdB^;mIgh&ZbmTl-H#}-wy#6Tb(;2gRH>|ako+oOb$5V0EC*7t; zVH2$7wvHPfxJ1FWHfT;;@qvnZxVU0kqs3!~mK61hutTR>Z4>=<33XcjzSMfYO1)u& zuNGHE`zANPYiyXMs?%onqQX7J-iu0fo_zJ97(VJr%hYx+#9Mhz%#3M$gwo55-!nh7 zjBm^CpQ{v5X5ZL3JGFZt9zUv_>i4tFOpE3OeJ#FEoZR2?X19CEkfeFd!~ZCJSM2zg z=ikp>*AWw^Z992oU98wM>}`tiOEt=;XX}DhLZ*~DfpPj%h?=$kOJb)t&%UhdA1B_A z(^i|hG|gIKM}_zyr!HDw9oYU^dHxUU8`grBSmS1v-g)@Y%e~`vH!Z0xbIhtbbGGNM zJ_%yNv#C25RUU2aFB*#dJg%GQcBPm5+8?b-eO9Xgb#y}0Mwj!AdJ{8KjJsZP#7=#d zSTym%!K!C1Vu2giPGzKB6@6Z(loEVy8MA8Y8tq7zPJwy+-EIOJaJ!Ob!h2X&vewrYtPV%{3>Do-!$ro5!uhjOoF zJvA))?i5v|$-w9e|)d{2ysf6iq*&W2vJUF_4${$@Rxlpx;!b> zTpow{t}uRc5jrc)kMcHF{t2GrYIm>Hbne$%U43t@vsONPV$S9Jzgg4W?=~Di>V(*+ z*Dq5hjm|F?=$ksE_=oIZ%fsV$KMJ+1A3L2K81VC7;_LJGduqCN&#L}#W2jpt@2qo| zu7&N4?I4PKd#JBOzZbm6CbB-Q43rKksVxbbjq|5B}=2xBHC!v%gF$7qazf ze1jLy%I=lgcb3BI<+M9K+18z>8%BgJ-zb*MW9(Vw>_Fx0QDVIh*3BU=S9KH%Ykn3> zY-@0P<*~)qr9sb+)_#8r{X|h}(248RUpB7zx+D~E%GT$j3EoLgjt`}Bps~Eb`>%!n zZ00H^m#&t5IC;1AYL|z5PqchwJ-S(pZ+8FCo~s3qxBj#`A!KXNnc0h6+KU^~CsjO@ zd{TV0WclWO!`_9GPD1+s@BxLs=H%SAn!80^Kauj!-qZCaO!=qc95JHh*dh`8j)}jm zIr+3-iP)^^?b}Xz(WabI%sv8bjm5`_@k5SHxp(Fo%0-FsX% z>n0O^Jasr^(Xlf1J4Rd*6Vk86pE(?`eD_*G4D?8 z=n?57#-w@cewt8AOu6W1dGf<_@$pL4i$b@<#5WiAKMMKUfa=MZTxL#a;EOMFGDch* zM!FU0eIt(^OR5`rUu?(P@gb{==Jo30nR z#kHk{;+LO%*jwdcLf1kKRLc8`E}6$8FN$AXUk~$*nHnO+XKljb+ait~-0gD2`kmsl zJLdVGD?+Wwm(HO#MJj7W?if^P&AQNon||N3@OhPxbzSFYb{;xyZ}Z^1e|C#lE)LA5 z0dSu<_BWT5J1^RaK`HO|ZttzOR;|=H;cZw*h-%LCp%vQB6AK(VJh8_8{Z=7rQFw`# z7sRkPncu2NYS?~+x`_1Y0dw6$n3G< z6~vOCa&l-+iw}Q(r}^}cA=!P#R6N#wjd*li@A-e#5|kZ$$5p5ioF}W>P<8Qw3#N-X z{e|(Bl=sD73hyj?Z_sol??+5;^&3}qSrdh7Y4aBI&+ff7dcSe!^vBlZa^o%)t~p3~ z8CzfTSJt?H!pv`^H#_+VUs$2#ERWR zvN!d-SMf+lbl1hMM-KajXsh~++7uR(RnPEr+ROHTilZa4F|PFyYo#5();RpPX7g2h z_W!XwkMjMlBD$mv^~Gl&J!U=BP7`OJ?Ef+K!;nzY&C8y;)0dCi!<&{3th#B*&UPU~ z3{UeEzkJb};58v`Th2;loX@i=WowU8R=C+EasS`r#Tz#+OfOUJp(y@y;cXeqrBDSL z5{-%KXiSHmT|&~!Nd; zgL6g-SoG}|A2hjry6o#^%C}`||KgqBMVTGC8H?{VVo=7cX({6dgv7J??xk3!%Yrgb zU1uw!=USe>ezD$KCuhj9v1=QIEIOQiw!qv2NH23otFon)ig9C3DNaw>b_Jl#WyVJ^G5-vhCq z-$pFYBuI!?SZ$C$D&W39>=OCC5aaYS}n#}pF=#$Rj7Kb ze-*KE1NFnG>RIB8#-9-180EI>s~46%UDA45ylj4Wsoq}8nuF25wt6Q9O*rt(Ygm2j z(;t7V+bop=lZpIFK!4AzyEo`BXM2P6$itXT@+h(4Y^(X z)qb%?rJ|31%<5s~&-Fk&_;fbL(PP$$%TC<)sM|T~%FrEW&eU4dY|PvheJfgnL;fiF zcyo@Q-r9WJf!#aGTCH6_O=xa0ehO(=`&F@jvU@j^=M#scF}%U#b~AZC3rPL4Kvw5n zs=TplZ;Em~%zHQdd_nUI$^VhrcVZC7BSh?31FGm47eUz^`smEr__6Z0dGBics}8F@ zu&LZ{_MCV*aBO~~keCr3$(0O>D> zenL0AYxl6yrGtWch6%1Y`7&b!g^)ipCnq05;SV8P@JAOx$3LQZ&W1Sk$PNmBTfO0I%pW~Ec>iV`zmK0nR7!xuPmz*|*b;-$>lRw8jrvQs+x9e_K z7oQ9wIwyRI4` zLWmD&*{E*gRIMtBeoaHyMx9(YIk{d$YD$F4oZ3nS30XIFdJZ}wD2z=Iy)L*f8;wD$ zL`w`_Rf7urayDR5&Do#)+<($_diJ1-Pph;NXM@gYyYJN%dR%{7&9zxK!v`PRa%q(K zLxDD1u3joVt^DVVpH>tMIJb7o?b+8&U;Ve;E_e3t4S#N*Ikb90t0{wb&KXwcRhv0W zyx!}kZod24#l7^GNu#eQ@b-VFp2^Fyd;K!B+4MJ)Ei(%D+B(V9>(uz<=j}_4sQ63w zL0fwF+28n5-T5V=Ue~I&@!X-8qoaJvWP~1n5p;d~#v2)vij)bC8ee3gE@WJ#jr-C{ z{ZXSsz@BUV8`7itMMeDM9r57jH4&?$XSeOCEcr;C(D-=7R?juH>i0YO`{_ra#WsxT zzGV7i)8JwypKq?U+o0}$XjScZ0c9+uL;9VrH2ueU^>$v~(C>_wZg{uAfoWrA_{?rQ zxygnd9TLkwvL2jX#YcOxz!lZhRaHJmtsUGpF!Nde{{2V2*pxE*v1_&JH^!FNJRKhL zQ?)~DXI}WnkW{pB^EV?$v@Gc7rWo~0`K#oihViu!kY7o zyClTUCM8EBHcxzsbN7;V+x)oL%el(2AN@OHy4nG!E%~TsyW%ixFkV zJfHCUB15YK%SR2_J79d1D=CIMX2rhIi&~abu1MI|BYO3PPWjTrhNj!u<(s+AYwq>O z*+y-Pdz`)Sr0Df|S+Ck1Gj+>9sQjGFD;XEP-`{%lu3UKby(VE9Tld`2&DxnS%WZ4t zRr?M`EmH1Z*l&Q^E1~QL!{@jy-E<*Oe`{RA@T_Z-%3Z!B)tOOuUdvX)PZroSV1wJ) zdwajOuXiiIPf_jW4t`~Ot~`-d)N-^{Pe%NeFR%53jN zu_u>R{`}$b@<|`62425iI;Cl;MAg@$mBRhkbj`c&)0Sp|Q9oAu+ICPtw-L96kX=3d zEnGaoaNMuQkL9k%|8V==%I2323+H?@?suE3Y4u_2?sk*TPnq?(!?o8p|ExFk#4n*GK1V+~eE;;d zysy~d$Rb_^u)90CY`qc{hTuC z^WAroV`>+FU3~l4u@znxp0aqry3Q32OpcrS>dd)y`&Mo)Q`O%;?S~?Jvwc&ZEUD}| zzUaxte?77owp{*aZJ9ZDe>#1q^UB}1-6`>Pc$0&JudZBoUb{51bl$WXnI+99r>B)H zS4E>(_h8JYEopC?cRzBb!Gh0)Hk^98_@|IT7s`iTZa1{jdfy`6!hp@mid{ln)eU#H z_ng--#;ekf0xv60RY$B|al7|VE%v1k>(=M$(!N6!hj#qsnwqEj$W>=%M8}mqS}e4^ zxzlmqHk;e8DmkdD(4~B-n%(A#J&Rtxe|X;R)L)`%3zi4Nw-|Q1SAJZ+XX3M&uS;Dm zT&s#ky>;uy9S{BT*3XIQG5Sz{-;LdtOkT8VPMe+X3He9s9~%dDXjyaf7QX^1*Q-@8 zdvN0l{kkEye{Hut>iK-_Z+}ltp0N5^&JT0rbzNh6{JC(`{^hO>%hn7lkfJQndvQo` znfr5RcNaAan)khYe{$kw@%^0q^Up7NTldk62SL6a_x1OFIYx0atol>qsKbv(s0W;^ z9_vYuxBlYOxpgs zbk9cDf;Sc}6#mk!a)p;Un+4Z$Zwsye&9bFwuYn`I_f#ox<70`b=id3R8frD4;%n zAm(73G8JC9Uo92bs7Z^PrFZW4YSD2?@h%57wOoat!*AXXF0t^#?c|6~0iKuB)()Rn zwpiq=9u3?Qk1YG6Ntt3z3oQ5Q{r7{(uRnc$HLH$w=R$qcQk9IY?v2@Gc>Kg;K#?(< zYlZZw@UU{)r+m#i?YN<-S+wHAIhTJ5$@D&|?V5fp%r9ZH=lq>TN*zvra`)$MO}{LE z+K-VbSW1%SL7FFYEqmwXR!>mw2zwUiVa6`M{GIt#;oVd34Z{aVX{GG?dCsWwte(=(yruC{V_3n33on(twfe;FF(;q@S|>VcO}{ns z^`rjzZ9}E;F~?u-DZ3-S+T!qCeKdi6;*^_zuhsO@L?3SF2{>B^CgqsJt) z>2Nfps*GZ_X)jInLWfSD`082YUuZ2H8{%h{KT)X zM=SM#X{SeJ=3lWe{6pD@D=!{3m~~aP`q%hX`|}k#^kt1(pBu*uX8H|mxo}kdRtNIN zUix}$VWlD$uA8r)8XMu6aV;`d*&_CyrvAZ0SAs&47k=<*rpQTp^{`4vsrJn_POiP^ z-h=n+MqZEW)9mqk?R1bSszh?Z;?-iv&-(Pf1^-FxSJ5E@7G1<%gV!|6my=@n_ zwb-B%!u{SguGG4f*DOotDkYU;$6G6YnbC2>)GaeF+GbMnk&Q0QS(bjfd96fcgN50O9i#uby5^6@ z&&PKv-sJtKV-d+x4UtZ=GTf^_UOay@;Y_p{l6D4vM}&Z*K?h< z8%hm7cl^$bYKebN3#k`f(5p$+9ebY_S^C@PIfZvVm{7=c%jf-4-&X0x&wl-R+l*-U zAu%uCo_uIn)~@@fwnKib@pZw0N)to=301f8u;lp7S)(@uH~RYFP1KFzD?i+Px+Lt+ z@^cC&>aTl73_9GXPHa+>i<%m4kIvpWQn==;WjVshX+;}vDfoBM_|J<5Y3C2Qe!Ae9 zsf(8lD0OwLe~DdQ>-u)FX8b(p>E$M=oBbL$%IIC}IV&%3=#H*YznL~#%O zvqD#EJy~<9Q|yMA$dzNRjeTt0n|fFe)4e?xr+KY^v}aE9))6YdUe?KPdUaa*_2%oK z7nV2s_x$wZfsfj5mPwnBM!Zh>eaF==tyR^Z4jgs6;`odr38U`Lh@3j9x4yNp=kJQC z5tk-}-Euwl=h}ih&gLDlt8MhkVhdMUQnr>IwynU8Rd<#Y@YU999eB{Sa-T;N_P#9O zy?9Q0&uo3gip_SEyZy`Zf<<4CSrY#$&S&JcZ8x&>HSRg@^xY+0hn?!|GN4%ar;+)7 zJ-EenagDPN3fGwMVQ%OcF)d$M)PXJeR;qjk{_yh7n73K0dqoM+LvGalW8s5h>L)?f ze!jWCTm91e_xE^yV%M#$yQeG}^5to2#onh2+|O$m>oT}QGY?Z^uffv>N1E=9*|DIb z>G!_-%6-|s@Irb-_rJF+|KfgOaI-51i>~W9;6-?)4-ZtEu9h6Vv}uVhx`#(D84h;( zv&U&`t%i+`9==&WaPA+)N2sSw`cx$Lju546bh>fynYW)VG)mjQro8W=J_}UvHXN>t)?ugvUijIx8L;*6a3e-?U|wf zdH4C$WrvUMZ>M?wp=h@uw$@7)3~#-2)uZY&*1Fvru=nMqPfO-@ z@lCip;f1>D^D%R`@Bb<5ou5~W)p;r{|K;rK`8!ovBReKMo)=!~ugRDD#_asKw7Soq z!NI?dnX#(Fd+CFZUx|#VE|KXGQ7M17EHd-`gaH@sT};Yb zYG=}$vEHgBdG@@F$y;jS(*sL;PHFnAT#Z?^@&~V)mn4>dTSZ;zvv&U5vj5zu?sK(X z%G<9In?}^{k!M%ao#Tq7b@BOd)nk8bmy>HEr>#5L<%dTdC*7zU6y0k-IdzT2&~nGZ_Dc)G7&>h)XF&9A3Xd{s;F@D{ocOHiM`4Xb2)!<*S+5phbAn&*(#@Za7&K{A(u}5zI)~A zC#GGaVm_@euASJ%(A28FKkxj){{G#|r|n;FRCjtev2YPf&k}tfCYLsS=s%>}2KN(6 z<;lChcS>k|zW%5UrCfa6HvU;Lc=n9n7j4hicCr5Sh$f<8#2Q6|4i(F}e5hfnu_>yQ z>V`P|VeO3IU3E1Cb_n%w2v-p7g`?eNJ$a_x{e%`bBFL_QB z(LcM>%%SF!jL1 zMGbEI`Olfx#!&D<#aH)=cE1+(*VHT5DxJxQK5ZBpa!R|YOzFH8>wGJuHEu9rO&K}rS&Jx=f_Tqo_XtRyB)*JPYDkz+2e5X zo+F1Jc$6F%nD6YLt>zATec)@i)yIUQE2HmPy?4Dkzo$Xo_~OY`UEO{geCEW~+VfLS zWrY;$cHy3~bkv?TK`+;}@t&^;+L?N~_4`9hHl#&Gr6@XOpBQ~*;-hgJKdv4)boIxu ze*4>&oiOgR>-~fSE+7AVdO7~X#VLz#^;_ZVT|iv)z zmv{9VJ~eCAuH!4eGQs$=?Sl5B*`Bn7cmmSRsW!tICxYQA-Ew#Z5W1NIL?PWWK>-H+ z$qc-L0u+AfQUcP=93V>RmI+EQ=uc)K^FZO3u1r9>nFAzKx@AHp81yGIG9f@WT`rR2 zA|cFf2#G#2}OaT_oTpIc^f-Mih8( zN|Lc-YU8Y(b1KNvA171$XSW=hSA&}kP#b6M90I^_OMje9?VsKD@KlV{ zzB!;a&f2-`1%v%@GPQqpv+%CcV^ZEJya)X5xTRAE)QIT2K?^uyTeZxiv_4BZi=0yq6EY(poX!LA_$>?xCL0ZJ;}ly3W%FunNq$p`D3`k z?+O%;45%11`&Q&_yJu3*##2CxI2Ld$ zAiw@EK84{o%dP^rbW;p;50`Ei1^l>&OSgcF*Fq*>CVVb{JcK8OLOHNc_@FF2sXVZ2 zWk!FTq9jf(}4Aw1OgiO8T#m!V9T;r|#K(NZC?i$OTldWgs%NB@J*LdjBc zC>hFh8On4SvUsI(V--OTwLTlGKvWL3o(v zC&T}f|G$N%^cCdbQb>-15ZnxM6z=!<3dZ{$SE7G6GzC_YgG(tnNUqfa~mM0c_GzaGT#*+SxBBC%6HlTfS_531Ksm zOa!#~Spb>w{}!4;Q7@KzaxTBbkn|awpv_Oh|6^#1fqJprlXLkc28{{wxIlt7KMDVj zp(zH1bQ4aOLb_!{0Rr9s@`Gd}GCY@G!I}RZnnEb0n{c|6(k&xO5a|AwA0!)*;ko=u z&iwDt6e3f)38yPlx@9C21iJs_2gycccrJe?XMQ*I2ok`RiH-%_6@soKcqd~ixq?S@ zk^@dU9m!AtVzC(-G!P*N0^P*N&1J!2vhagU7wgF+2Ri+;beR{P5x9xS7~o|BCokkF z9+n4qS=>Z}Jc}1{j0PE7{KRu5VG4&E7XvJ9!ZV@cx+?|#M>r(hrMB#@gg&f9LkA=o z+`!qNgAC}9$4wl5F-(dJ;aPl~N3w*mxLEuw1j#}U9625-J(49l{#YkOI4%n^<5AuS zm%;**^TP6t_$YtuQcAL1hT;LmU2t;&*M%#NZlv712yP_r#s+K}K1}(jhKeJPtI9ja`gJF|)r+(2y4LkUPJbN}eY<9|R#Ih2gRY3aQ-rWBJGg zCzEOkH}EQe`~d|)!D+PWVl?pG6p~C8-~ul2P%CWZMsgIF02w#p<7Pa93V~llhdj{* z!U0&^BtsX`6`;9faz5g+(O6i7AsStfmD8bkSbS9|EO`JGa43wB30d6mGdYkc4wi-j zy#qC(@FcFg0+6k&Y&eDT1RY@%)SGD4#b_Xa4i^{EfU`IR$P!MyPFkh0*1_UH{45M) zAV+ixW0U1!Sso}3mL4TWG9)Y0SUxEZ$}2%0my{lnaocz-A3T4I%+s~y8Bx2?iquVT zB{mfi;Vy1S(v9e}dS#CQIa@yi2kyPrzBals4fger^?F*-^N=vT7rm zf>K4%DJWUN)t&L&fkOs3s~iPM5RZHmhJ|(&C=6~;NR|mPhiGw=( zkpS5rBanv>=75h2l|l6cosa>zTs@Q(M#f;f1|%sV*h3&`x=M+$F6>`$k?oa$MB)Z|hCzggoXio4P2n@yzmSrLApt#8;5+M6y1o9BV9Pn|W9;hnFCdqt&MB)Z|hEa7%1d@`>mn2P>EBhyH z2N-Vr5s7awFiawHGUpJxS4Q0cWi=Pq%wai&J%$%hRH4tg(6=e*#ZG+8#lh3Ki#j%_ zWkzbeCb367OUpJL`()_b2MFJYp@El)K3isPMxW-Q*z9kKvQNc)pIfhowq-)NrEY^B zZCmT>H)`Q;NNC-sVTZ2ay-l4H5(0E#(QW-=5?j}c?$b~ZX8Vt8KDA||%fpg5tjaYP1&1nW&WD)#yrcb zdHeRo&$~aS&E6M>7wju~a!DaCL!HOIt4CSd-&nP=-?gzHQ@eHvnVobvVCB&oc{Uyy z*4z3|!8?cBO#Ssz7xnRcu{CR!9PP2KMo^XVemJq&u|(mZZr#lunq3|Vsd(pk_~<&O zX-ghimv+#WTWK8MVgIr&x+7&DO*`k6Z_ee>4T`)mE>7?)z3-Q@?z%>m!qVE-*s^F#tzy3-Y5aKnWTHodpd6M72}5;`GN(|ZdwK){rQKnR@x z!Ju#UB-t{Ul=sQ^{_{So-|WuL&hF06&hDP}?)JJgiH~qLYaY?1SEt_1>i2KjV?gJA z^*fpS^zGY(93VC56W7QZ-n(OX?}m+gTO;wV{Td_~Qk%f(@DcEGbe+;+es%rJ1eC2; zu6|&HeC6|32(K6s)UZIMPbxPGZCo(2kgjl*CRLjjX;!pavFgohv_${a)@&zOAV5zc7?rhx)(l z@VG0;2HN=tEJJp%J0Y|+}iBAfNUaR{NSg^E;h_A-4fYGWbwl_kbTUo z8FL!h$oJizEJSv4yN00|vXw0tCQU^4qRe00AK6T*f0z~7&B!||@7F0ekiD+z^2JSLvl4UkAhO%z6WZQJwwrug>W}Pq*|v=r zkqu{Ue8eL=78;akk8C;VtH(=_JwIqJmq#{TtFlxL+4a3$Bd#OcE;aDbMr7Y%rli}* z#^;MQ@1JV(>9L@F8C4E%8*{euubC$=4!v@Jz}fhRX@;RMwkMQHTH}12f92dGOAii} z3Vu`M_3psAQ6UvNS1b`(sBeW)Lj$Y5=zq1=jOvG@2Y+>};m+MhOBLL`uvL#D!SfF| zE4AO+uFdh?=MVO%Gfc^vUpvkE;E#(fzDhW5FGF|kHmO{PSZ#LwKkT{hL;3mvI)Cra zy{7!B?4Q6u9W~J5=Cr{k-|nV)|eAUk&`<)_@;0 zAB_Zfr4I`P$On)g0G&yNEZ&x zDd^l3bY2oVHwm4egw9by=eMA9T+n$g=vGajQ|=0L;}F&Bi0n489;M@769nLjaC4y0onjW0Yn3c z01f~NKn73%Q~)PHFMt?;-T-|7(1H;M5D$<5kO5pdUbgfB^sl0R{oE z02}}hU<0rN2mt?E^$&)+h5!r&7zU6A@L%^|4g6OF|F<>Z2hIP#JpZ?a_O}CQ56}Ui zBS0sB&H!Bix&m|q=nl{WpeKMHzyN>)7y$?X6Mz}O0zd*#05pIVfC2c}{QtjI|9|Iy z=!E~eTpB1IlKnin`$_T;c$7TE`z(0~JWL)O9F8_}!NG1{aO22e?W8T=Q%JM%CfV4( zg)~J4H4AY+68{ehjdDx;Cw@qD+mLqN?+Ju>J}3~J{Y3#dXEzuQ+C^=J{mqRRK35ku%rp9Muxe@gJ11|->gP@`P`lDb# zaPUBHiaX3@40%Mf!xa9+^JalFfr1cd@jzXm%asPX5ajLly8^IKGspNU|R4f^xaL@BJWui zSf+1UfcqSapm1o1E6WwCart>#2WdB=Fzm7GRX}#0N3I(;U8mt>q(_ThyIgM%-d;flf@?t#|5{q6Ri3VzsW7gqV9#{b*xMddPp-yeH$xb?-d zhh6&*R~n9GxG?O|4LHW@t?TxUNwzl`H1wuH zTDskp;VSGte=yAz6r}@&gS+_BI)%Q26HnY2N<+@w!nD^Y z!Q-M(xJReOpfrzboz_~zG#^FMya}Grtvz&ph?n4UMMa^W``d1jCY-lFU9Q7$oXL%1 zP@V^F6!DaO8V*iL4|ccMM{;z?3Ub*cL?Qk(S234g2BJrWy7LzUJiwD13P(xc(%yj~ zAbPr{8Mo(7dgOXkESuludgi9?0g@X_!(6vDEXpInT`l+Z1Y}=%I+UJ{>H{4i2JqLQ z0O2mz4g0mw)5Tn&*FxP=czyqL&~7f;sH_HU@LjHFnQ(EVW!1)qm;Ym7hQ^9oq#7PzYh8LyM(7o~nh}9;zlCCI}w~!yP&FKO3d( zK@q@E;I>mY)<7F|S-)s8%^rggPpCWfr3)2Li&ix!*k}AU_Jxin-7Zk+)n#$#z;#c%}un{yc4eudzP)q5nX9mqy9$MZxqA zv*~m#9mWns(!3llS5tRBn2mC1nj9J4c*5gHpgv7CC~qhNl;#2|LNsnvw+6_@vWwD+ zaHoUsnHSt~%@zUmgqp2t>>qf*6q>G;{y`E@wf1O5A_>uY=CKZ_4ak08`#kJQL~&PV z%|}>e%!RfOA^rwpi=q*ZT-q)eQtnw?%kN%q(fT<&^W*w=xpoA>iFn{p*~o4BNSoZL zQE9-6ME3Hhg?RWq=^!~u)za_6dXihI?j&zI%93l=b(aBt0G95gK%* z@?_`IAx2Nk53UWsrKQ0cSRez^E&xq$7(+%iRk=fgW_HqrGZZ0L)mKo)zhX zqCDle-cRt-U1L31dQJ&wKRi1(9s+pN-61fcnl#9rg2A_30zGeMYu_(}&(|gP5-~@t7~cnz;!~1hG40za7<_dx zu@~n4PNnv3Nh~onUP{5VZ}(vENjN158{lRw9{k>)^5EdA)q=fmcn8BP-XS3k-2WuK zPu_&&{#)l0YvzOhi)iy;_iO6^lOm%(lBsQQaJyjmPaX^(`3vqE9ISokFWC1(e{3-N z{te15yjOo-{4^QfKM=d zst|e;zE(JRh(>?U#6F06jrwCD%J2^U48p4O3IN@M!d!1|ynzc+z!2o^TEPO6xh|*F z8yc#MUZ0CSI#&!uvT`j6xstQA*w1h^;=-cQMk=}`2>4|UMgKorc`z*q&h)E{3EpEs z;rio4_v-X*AO>JM?LrCd!}_=aSdhC$M1!Y4E5i-K?6ueZ056u6;K|^LM|+}(AE5!6 z$Oq?%zjeouuS1?IC>RCE>5!N1`BaUUAJH^d7F&$&vH46qfr#$OosOkv+|cf)y1CK4 zSI?}aX$#hIZ-S>u88FZKXe2#9mebY@FE51Xo~_qqg5sf69~mGeTO(esKMMtozic*3 zGlZcwBxaM`NH`c>S_4#<)-&&=<`FH5Zrr}-4j_{QT85YIhh;88cv{r^rE1TjqAMO~ zA2@Vp8eBA1_^}&Hf8`Ey%5tFvw#u}RS}UqM4aCSVm3?f=*Zw>#?wU}kXk~>U z4dEYiKIP^@z5Ka|TEv}#bP6s5`Y6Yx7kkf8krkwA4?AQt<L$YY^3i2B<3B*s zHC}W_FPF>8Jqh@Xv0QBMOa*Aub7A$;mG<7H5b&n^tSvcTSn?E-mWGA9ua&2ItSb}s zb}@IGw0=TH^ic;KT8-~jo9@n#hh{t}c^8-tG9d8!8W;3j##BVoVmThP+dcR^q_J=@ zjVjFafYvd>t29qChQ=8x8$o3)=1uwYFlW|AnUOJm=rR#81Nz~^q#Q~{R2KS=<2A5% z0lJ(tw9dM92nOYGH>0p$&?cta36RytdF>@%H&OENH+c4a*8+qhPhbUjo@=#As zx-X_(;`kEW_~81B{3tXg&~AI)4z%Y@@V3OaIy8NF)6viZ{GmtU@fBPX}Hmv|vvq47&?92oDqbR|h)qpjLqoTM*!Z=KY#Oyex#U{$l6B zAooMs_W-8=P68MJjsYA8I1PD32+skW1$-HV7a$w~;YENe06zd+1sbd%+BC5N%3cI; z#O3m@1ww$!mZK5WSrgKR0fg1T{CEg!)q!19z*_^}5qLX8hESkY1gH#9AM!K=XaW!k zU|0bU*#JZUUJJqw01W`I2k^2shP8$GTL{A-UI*YV;O&7{2Ve)pKLz*>pawuGpcP*M zx^gc{embZd^q4I#JgDOb4{Q3>1gHTJ4S-DK3go>Ca1$T{U@zo>KEd_@4km)Z=XkJV z0Pq(8gFABAdH^&zP69so^MWmg@HObCd}6k4{B)3x>c*iCG!H%jK5%|8|6*r=?$ei8 zMj^~kKw}2d&&5Z(gV3v}cb-*)THqci`)J2kRbhkiR5m*3(5%r7g|<#(BJ zp>gH60ccwQ_5eHtxCrno0Mh*~2#=(?%MS15!h|e$`8p^T`V1BBR|~w$9y}WG3=ggY z+{@#OR|WcYSY8W96d6!7typ^S@Q^XZ$CsE;VoJ%WrKXjhQDSD9S!HLJn^XSt3iHb? zsJO7wqRJ~nSBHKRxQtf!PAFDsD_Ir(&HFfn`)NkFOO?Xs9bVU1x z9U66Pj7J)q5KT?Z%+0MW*p^%?v9;7jiBh9`wT)@lJGxK%*bWKp`gTm}l-hY{ml0jZ zb{pS)LXU|(CiVPGzre5%???@o{gSGl7}JPT{6;S7&eBB;bUwu_LvdaNNg0$h-0v^a8}oNEF<-Fzx46LCybaldeWH5i3|EJ=(n){ z!U2m0E*i9W@Zup$hA!E(bo;U$%XTi`y<-2;6U(oxyt?wI-uJ)!^~(qG&z8Pg^?KDG ztKY27T3&N~oeg0d^_xZ&aBP=$$UBu?>h8Eb@fitw6ZZ|B(nip8e|kwdcM* zzy898iyJQ;xP0)3Ls#6VXu@aNJ+4Up{6)PuI$1M+32!Wa{*t9~@c&(`bQ!3x?Ei;4 z%b_yMdkU`LQ$a-^{NF<>{d-0I(~>IZodHJW|E8-R=5cMsj44`WbR80%r| zMw0m=@Z7)Z_&|-&>^-_Z!e3vch(17%RgcH?5jEqD`s%p8dCB1wDpfL}97WvaR4`S? zBmB9+-e1Xq8dfi?Ye1KP0o>q{*zAISVYTw-$Li#-QztBF$Cj|lSQrQu=I3q7$HI>> z)XA0MWT%tx;rOs1zx;j{t)1ZeS09dCDFF0_$HuenABLSg;fGy9ZluPbd26$sdxiAT z4_pKJ#5@|(Ba-I%r1_6snfsSZNa&61-a<0w(PRH2DOXj6vBJ4T2@9%&R=I!fa+i{; z3rI3{cqy;FQIHZ06zl%Ak2nJ4exmbGxK)})+`hCgj#{u=-EM-Ez8^Dpv% zy&6ceZde*gHV+E}Gt7w(Z<@*u=Cru~+CaDx_e}>)G%PWk!V-oLJ9=qYz2U=)4r`v4 zKw-m&4=aTY3-HxTP+0Y_IoSkvmuL!1N2c6-$Dedkb}kL^(@a0dJDNQR+{l?CFzEd^ z@7;Up5{&P^_+7eW+S~a&jH-OTl?1uZRru3>_zx1_8-eKm61?e^E$LsE>?5l-XBo2R zFz;LjVZE@hfhAEmunzaBo4ydlxLHj_hR!vIb#ZIZ&9x^VgVfDVhx0hLCS*8vfS>eT>6L&pD<3 zRok%tSuP(LeDvz0)xWIQr-VGw{GS>V(CdF5Hw5lqdGV1`KAVHB@y0N0^MM@ve-}G+ z6zW6&@BeLg9Tj-fQ|cX`((n4<{~mh(-z(~$mh@ZR8Q?B{&N=<)y^7#q9nd_-^alsm z(r)v+z1nqxgTs8{|9fcNVDznkf4RPh;9%`FktXhPn+69r3-*4|pe?*`5!|s;=RB+I zk|*`Ah+Vq@NB6(t`X}i<@?!3(_fQR5h#LtlG@0{a&OY@hZ{q;!``e6v9{*@D*$IEW zi;p&D(OVI22ivHgo0w=P!*?Of*t4iL@T8-43UX+(ygN^ZcOU&#F&%7yym!YIIA>D!-p!wOJ zfH03VUgB84!1r|TO+jHg?Aaj~o%U1ct3_@+=k5X8goLffGMZjr71Q+9s->GNdYMOi zkI@$K`|hRdZIxgT!~1F3cKt@XISz{+ayMM=(Cs7Z@U=JJG!LHsLGkInT&M&s-pZHi zHKANIcO?B~6d)NnDFIjq*wGbo2-z5z4_JLbIwE91u5{1+SNAYD(g@LY$U|_X$x`S5;#e%L!1KC^(E zJ^ry4j{P7r5Cjj=&5{>K4lR35$suW&$JD(B>#>LdHNDD50;N9+_2epmxh*Cm4TfTl zOFtK#6UoNXUqNP%g_T8Z{gWHqE-qI;y&CQQ=aOC4dq}JGP#!WNk9+lHE?w5z<4r(e z=1%t@^rhv-)8X~5f9xI?Ms>c^dhFG}bZnU07P8VjP4+D?%|{@(2bLA*n+W)@%{dIq z-b?$lDB96N?ym;Ao9jd0+plRKCcRHd_Yl&x2EU)`8y7yN?sRxA{4Pq&J)dZKEBcn; zgQGbJZZ5kQyHFi^94^-l;IzMZ>e*e?-)Nsto!RK04y@c|KK_VWKDcTr{sE;#FnnhU zsAYnK(Z{ALe&oefAXoZ$(tnat8HofJ&i|kcbdCMzE(AR%U8U-uGyUtS)jraO>LDRD z{#IonH9!4Zd>>=-ei$q`94^nnW!ut65Y4>*ruUH|3V(D(uI#xq`A9|}3)$1C`&qDb zbgR<8FeZdYgXn>EtLxAu6PoDoZQvo73E-Cwp4WLMp(@_^pN7!XH#yDC^*})mS;L~B zob1O7;E@Nf^Fe`>sgZM1U1=I22>G7U-4t|<{STbC5g%j;byISWem6IygnILN!w>QU zA@}@P@w{UEx@4tqsuC|L zScil-F(zK8Bqb#zVT6*L8k?evPe{=vDTzr+vJ#&nI${+}4-`?Ck}?p(m87_sZMa-VijFNl1`3EQlylGq=DHfiAf2P zlAMfnLLAC~nBX)SaYjdk$0 zQqu;M3_5F{5FZasM=C-(O@S@|J;AF(IjT#Ei39NxQd4p>tEtJL$u0@WDY}?sot)4= zK0A^cpQMOVFHO?6vJ3?$>!2ec1J*7np^p-;ON;@Hb!}kqbgs8QJZ%8IitcP!yVwM& z4|J#2wj{6&XhTl_x#=Dg$mztc$zTyVQL{22#jOZU=^Y0s66(7+F~w88SDVnQsc~Re zSX&PQ+Jxq5J)RKn#7qO@90{>8lBQ%`Y(he!W*_lloX0E>lNszIPK+O@OGt!QP z_w)g@RO=F1NtTjg62ay`*6eO=C(7?9BfFA%#l*@wuSp^%uX;4|%(lp!x{(zmqpV1M zzBOdqpA06V>9&7DDiod|NvTPgDMf?<=pJKILO&QhXoTTOp#Fr^WF7Ref(A>jktK;R zR%3zG3B7*i9cwXhiLnaOrMtJ{zyw4XGB6OJ<){zcgQhDC)RY)C28vCB0n!h+JGlEA z2_fo|Q^1nqP(!qVmZK>T7qU%cnVv!BW=}?@tc|i{tOqDU*Go*+ITYxPWChv;b>K+} za8hzYtf&8xMm-X`2YgQNAiH%yax&74$Iy~Nr%DP4nw%07@19;f{Q(twEVQSm!8v`C zU1$F!FcYo$+6>|;Ejw9jc(z64A}7OJeF;fOr6x@TcOfW4PKrKPEIYd@#-TYRH?MbW z=9Q>T1RgbEUdG(A>6MiJ2}ymjhrc)OJN)ySU*g^Qe5Vu9;cgdnv?Enj!5rLkX-ymq=rfAbt&eSHxf_cdUQ70!RB{%@)QPYt!EEf#q%C(Kk$Z-M@uV%9=&A_qV!&lownJA<=vh;=>LX&b zCq)M#x+X3NfOZ)Et!GKmB``V*voAm&sPhzD<9@aWkbmm5mt3{45PNcd_%UMdXNcVl zf0h1E1^Acwp*?{?0r~S`5%n9^562ofV&WSl_bRA`5+I%{|SL6GZ86vh9sYf`jH z9rf+M~U_z1fO)L=^z8xAg2tbZxKU=B_<8GNQLP^ytyn3{k*)8uT5_Z3(M} zcQl#WcJ64x3KR?~Sg25qP{8LlZ zW*A&xz$vLm`lAq@ygigEB{y^^7BaJ%)5gnB;^ejhkTZAT=4o4r^s$*fEsRoejf|qdGC{gpw-tPK2HC_Px^)(JZ`CI6MOCbo9ZnADoVcjUpiP6-SIS5(7_->8D^) zV)Mvw6pQTDCs~3rGNJ4~Nf<;Tn}kO|I3OY%Lf~r*4|o3pH8g7$**pS^g%v420%ijb z1xl{9IQTiGUw-ZR`P!?@krpz3OVu*m`jRE*>NjTw&a!W#JI*2#iZr3PuKB7>iau~c zJ`+*6hIF=J*~r_I+6Vvk{9NPnC6$wrJtwc|X}y+`e4%N#(bY~x-#L?Vrg@PS$!A-X z+j9F<`;aGr=UY85tDJ7FpT1(MHS77vQi4YW*ypabc-r$zwU%SL?`yAG-?&ug`dj=l zZvD*a$LnrB-|+i-+ctNwbZ+p%Kbp%3tX3F1P;1ngh$GdPooINX@$D0hunyHuHaDCo~T6Vfk z*0>tkBEYLPpoQw^h~;z5=RgZJLb|Y9s$L2MEtEBFo-fY;Ekqi-aQPzV;8?zDC&CYu zlqPl4v`{kgcHwTpFB+HZao#MQjJ!O51&EOH;zCnD&_c62Go@4AG%b|ca$Dam8T0eJUMpAs{CVuwdq01Xc&PT>`6(A;?k*Vc^Xj_` zhx~pI{u&~e=3jo*;*rIfRZGTJ{e0EZi4Cu;S~j&^`P%N(9N!~CQzSx<9#3jjpA&nm z_NwKdU9Pisvw!q~^#`gRuREdMq099qH~;-|{b?_X&y!~u$$82garQj*i@q1AmLbLrkA&}ww?k~g?zj25#C=6NbA1n0 z8p|aet9O`7KGiy?5*oy4j5O1w|Rl7~f$EqtGL{_bt)E#8in>$>H^0XuZW)Wo8AzfHaHhgqL$`S?`_ z7~JQjsg&ESpWmL^!z)7smJ*ZPq@gCtfIn)lS~8(Uoz*KoA5>?}>NVTzeD(GAKkBUA zaI8hx*ITX(3j5}}``g3T?fT=7u=RTjwyeA1VEMsyHy*9Cqwc1YE#B1Ke70N5dRs0q zgX?Kp-}*!E9reEbanPH3+ip&1S^vAAJ|A3v`@JhJh%`^qNoc4G6u)q&I6!YH5Kp>U1cr90~Ojv{(m zemj4zaeE7la_3La&r{bjYFuiX@9UCh+TNL&e6~Z870<|yRSwSltS9;8&bd|{F2E@7 zbuX|Kopm6x%t5aV+Dvo4&!bue>!m!Z9q=U)zWbAJZ*^l|-~aV&=QmILbl(&BrqAO& z1w$u=XTW5q%`_)`Cc7AqS@g`fteHj1ri*B@`@k%Ed(2{L#w*P%p6|byZ5EO4RRFB4 zcJ(>Lh1%qbrN?}1Jm~^xBKfDK$Ln{la^YfwyrejzO}RP7nF5B9mlTh+DL2PBpWi;6 zErmAQ1ea_@=Hht5>5NC@|2Z*H4*$
M9 z@|YBD+NU>P(qxFfQ&B$UGeAO~REGglRw-sywP~L_a4AYkjA#h!XaAUZxa3dPa7bk^#Yd5U>Qvg4KETOtK=hOr506Mw+q^3vPvZx@t}$24zQXyttF$I`6Ca^rD$`XrFcDsx zuDkkhXXBul74xzO;@pL?_YQZdcVhj@#p7yc+w{hJKGUwprW^jOU0TeWKl@KEup$RP z{o%_++O<_?;W|Z^j4nUu7x$Dm4nAwUYo4O4-&uBgl_61 zG+b(Fn)Xh4NYjgNm8qX&9&Vf7Ssa}#gRX}#;e%CE6Z-$#swrqfH}er1F3#RJkIvdH zAs?Ft^XTID&ukhNmy(iaaTyf}|0#kyCSPuR{yhAxaWAvil*^vQ1up2_njQ^*Yg&dJ z9C^9b^ULtJ#=XpDMm~+|lMvG^3FK^aj@xvy>FxQOPPKe??b$`H|8FlY31jQ^J0UHb z-|%?-J%uNnSbrqxcI=W89CaZaKeQ_3z6mB{pt^jP>y28B?0wNjIHpet+%AQ!O8^Z{H*0=v=9A>)(%T zGPH5g`zP00HnVs`{^sUJQ`L=}lV+rIPLLO0u8Q0Gd0d@p{jb8GhNl~kM;sqg?D+b{L%JW+9dFpAmXz2Gt||V8oJHC!6OxPg z%DHV{E;)m$6%JqB0m(U$nBbMO)v0i=oRPI3N5jA0Bb(1pYIHgi$@#^;6Bo}7X+OE~ z3$tlr)b(DQdNz5R@~mLJuMbq+&|vHQq+<2gFHY)Sf721SoK4V-^`9*D|A{{H`d4~( zXqN@!F3r09;{;8gHD`dFF^;&1@WxGH4?_zuzxmaLBcXE|-#X%7xN&AiKyU}$-oOPV zM`TnfE9;I{tC*6xHz+-@>JilE|02msH{9wJXEgN@(su8KYo7O zPR4f!j?ZF`cxV&5Z6Ce;5q(^0tgmRAu$l~OPF4XQlwmylx(&cxoF9JY26ajf3z z`Hn&jcD?$(NS$>lZI4F|4y*nSQ)Y z7F^dyan~BQkI*;LN9vpETj`_qZT0Q+9rT^_-SkGiMNjD& zJ*#)f%-xE!TKTkq53p^x_-ER zgnp!clzy~+tbT%intrx^j((ngzJ7szp?;Zujeebey?%p!vwo}oTm3fuclw?BUHaYn zBl;8i)A}>|@AX&oSM}HQnfhD$pY?b3_w>K%AL<|JpXi_KU+G`#|IlaY-{~=fzahX7 zXvk;CZwN9JFcdNrHWW9MFqAZuGL$w{G*mKFF;p|uGSoKIHPkcIH$)g386pi$4b2TL z4Q&ijhG;`uLpuX}nbOeN(8bWz(9O`@(8E9&%m&gR8XN}I(96)<(8myGh&S{#BpFf- z{S5;Q0}X==Lk;PM;f4{0(S|XGv4#nTNrowgX@<`XGYqo~vkh|%^9-LG<{K6o78#Zp zmKv5DzA}7mSZ~;1*l5^f*lgHp_|~w^@SS11VTWO-VV7aIA;YlOu+MPFaM*CfaMW@)1hR24dhUbPChTjb@4X+Jv3|WS^ z2A2WD{cwLg01w3T;ra0(ya4_QUJx&Y7sZ3|5WECl3NMY9!7JdE@XELjuZma0tK*;I zVR!>P9NvyGx1sYYsA z#~0uW@kRJzd+tpX27D8~8Q+3$#lOY3;ospq@m=_Cd=H+1 z@5T4w`|$(#LHrPY7(ap^#gE~~@e}w-{1ko$KZ}2lpTp1N7w}8?W&8*HDt-v{{_E?-^YK&AK{PjC-_tRIsQBT5`Tri#{a;x@VEFo+=XLCKcl}f zz!+%EXDnd+#8}W+*jU6^)L6_IYz#4$FqSlyGL|-$F_tx!GnO}2Fjh2HGFCQ*8g<61 z#%jju#u~<&#!rp4jJ1t*jA6#Q#(Kv3#sWu~?yenWd87U)ev>I6>Z?qZhM!_f=C8KOqjZR~Xv9~e7m}DGe z9A->2jx>%kjxmlkPB2b1PBBh3PBTt7&M?k2&NY5+Twq*iTx48gTxMKp{L;9}xZ1eJ z__gsH<7VSF<9EjG#@)s}#=XXU#{I^F#v{gK#*@ZV#xut6jpvN#jTej;jhBp2$tXpJ0TDbLLy{BC3+Ekh*%zC<#SLZlM?i2lR?VjwYy z7)%Tyh7sw+NMaN*nixZjCB_pIh>64`Vlpv>m`Y41J|kulvxwQm9AYjpkNBLJPb?=^ z5G#ogv4z-5d`oO2z9Y61JBXdcE+T{2OY9>K5C@4v#1Y~cae_EWoFYyW zXNhyfdEx?biTHuILR=-T5kC^wiA>@qaf`T3{6zdr+$DY??h*HiUx^3AZ^T355%Gk0 zN<1T;6EBG0iI>DH;ti2Sye0fh{-yv^pee}oiK(Eekg2e#s42u$+*HC;(p1(|!Bo*y z*%WH3YN~FkVfxfm%T(JGW~ytdXKG*yH$|8lni`oJn<7n3OmM`5sg)YF8U%qH4oHL)h%q?me{dYfWRai%0wKhr?dVABxOP}49|x@ov+q-m^aoN0n- zifO88nrXV}Gt&&yOw(-B9Me407pD29g{H-(C8p)3FHLJqUzygLzBa8htv78jZ8U8% zZ8mK&eQVlg`p&f7w9~Z9wA++n+HX2wI%qm%I&3;+I&C^*`rdTTbis7Rbj|dm>ALBr z>6YoP>AC3-(>s&Pgqi)#`OHD)Lgtd@vgUH;^5%->s^;qE8s<9YaC0Mb6LV8@3v+98 zl)0_Bow&i<{9QW=DFti=7r`(=EdeE=B4Hp=5NgF z%^S^I%v;Uhn!ht|H}5p>Ht#ndG#@h`H(xYgHeWSons1tKnQxo#n13}tFh4RsHa{^x zH@`6dZhmQgWqxgbV}57GEdG`NOQ0p6rGTZNrI4ktrHG}drMRVprIe+#rHrMlrJSX_ zrJ|*hrLrZ|QpHlkQqxk)Qri+{sb^_uX>5tKG_f?ZG`F;{w6wIgL|fWfI$Angx>&kf zdRTf|^cI7~Xdx^ni`hb1Xp7atSXc{hv03aE!Q!w;mR^<^OCL*uCDGE?l4MD?q*w-8 z23dw$(k!DbV=R*_lPyy$(=4A^W>{ufW?SZ1KDT^fS!h{oSz=jgS!P*oSz%deS!G#m zS!4Oivexpo z0U1sNJWH+)qX&`aZND`!pM6Yl% zBunz7jkJ>jDUuSYkSggUHP(awIv5 z97~QP$CDGtiR2`5GC7T$PJTwtBxjMc$vNa)avu3P`2{(jTtF@)7m!d56jhokLzSh< zQRS%$R3)l1rK74)RjKM!4XP&fDHTRVPz|ZZR3z1eYDzVuT2Za3D5@>hp6Wn#q&iVu zsIF9Zil9uCnX*tMMN?MFK}nQCIjLAGo=T+pQb|+_l}Zhu22z8lq0}%cjY_A6Q)8)d z)OczFHJO@1O{Jz$)2W%%ENV72hnh>xqduqRQwykt)FNsHwTfCpt)!}UYCTa_{ zmHL+Yj@nM`pmtKbsNK{aDudcf9iR?Uhp5BU5$Y&)j5JoL8x<*~6 zZc?|X+tg3g&(vM&7wR5$pZb-0Ks}@$Q%|U;)HCV@^*i;FdP8MV?bUwNO z{Rv%=E=(7ti_yV!ak>Ouk}ge`q07?c=?Zj3x)L2qSEZ}bHRzi3r*tj4HXTOSrR&l4 z=>~KJ-H>iXH>R7=P3dNIbGilHl5Rz}rrXd_bTr+TZb!GLJJOx#&U6>LE8UImPU~p{ zjnf2eqAfH@Q?!+4XqM(^p0?8hEz%BJqGej8opdichVD(r(eZQwok;hkljvkRg-)gW z(f#QG^gwzLJ(wOs52e%S5%ef}3_X?}M^B(9(v#@P^b~q3J&m4D&!lJ3v*|hXT>1-o zKD~fmOfR9A(#z=O^a^?<{UyDI{)%2pe@%ZwucOz~8|aPnCVDfyh2BbkOK+pUqqoyL z=w0+~dJmmJ@1ytA2k1leVfqMtls-lur%%u)=~MJ+`V9R&eU3g)U!*V5m+2qqEA&36h? z#;ktU0Bb&Lero~iC)R@2Le|38qSjz*h_$%2gtd&dthIu*lC`o`XRTtbYOQ9iX{}|g zYYn$Hv^KIvTANr~T3cJ&Sfi}b*7nwp)=t)L)*jZLR@_QhO;)RwwQ^S8YO~s{qE)iW zR@LgX_OixUdt3WhW36%4cx!?+(c0IVWKFg9w+^xnwhpllv!+?ot)s2utrM-2tW&I0 zt<$X2tuw8&taGiOTfeZ*w=S?QvM#nRu`ac)w0>z_YyH}~-nzlM$-3FP#k$q{t#zAq zyLG2^mvxUd!@AeH&${1wz{gF7~{_rWC}4w zn4(NECYUMClw?XVrI|8JIi@^QfvLn)W~wk%nd(eU=2NB?Q-=v->N541`b+~RoN2^F zGEJFgOmn6M(~@b;v|*x{woE&wJ=1~d$aH49Fx{CROixD77#N%}G6Z8{%nZp;49!>> zmf;wlu`zZ=U_?e@WJY0BrWX^#^kHI|I3}J+U=o?WOcImK3}6N_gP9@BP-YmD&J1Tp zFe8~!%xGo|GnR3mMm3R{#7t(UFjJXn%yec3Gn1Lc%x2~=bD4R}d}aZ&kXghmW|lBZ znPtp!W(Bj7`I1@1tY*Gq)-qo+-!SW#^~?rlGqaWXmidm^&g@`zGP{^POa`-`Ilvrb z4l#$BBg`@8ICGLY#hhl&FlU)_%mwBW^8<5*xyt;=TxV`Dx0pN3Pt0BB7v?_mfccGi z$UI^mGf$ak%yZ@i^E>mBdBwbD{$SoPSIVhgYZ*+Oh# zwg_94Eyf13A#8EB1Y43V#g=8uvE|tcY(=&bTbT`Ib!-*3DqD@M&emXSvY)cG*xGCz zHjJ&y)?@3l4cKrtf^Eb`vQ5}#Y;zWl{9;?Nt=Tqg6x)_<$F^rXupQY>Y-hF$+m-Fc zc4vFAJy|_#UKY$VXv~+*i7~odz<~4z02NX@3X(M57^(>N9+^!Df^6l z&c0xOXJ4{!*>^0)`EvnWJ}!tWz!l;Ob49qKTrgLhE5ViF%5fFA%3LU?^|=OIIM&*4wdU6D3;>;Y$Q5?-N9LsSW&)GQ# zCv#4&7Z=0z=K654TpX9kC2`4I3YW_D!}ia~rwM+*a;8ZacSw+sWLJHwsj&T;3t3*1HS5_g%q!d>OAaX)g`xf@(2 zcZ<8t-R16azj6<_hukCX3HOwH#y#g=aKCdexmVn4?hh`Dd&|A!;59|w4<5G4#~0u~ z;S2JG_`-Y%bAU}v7 z%n#v*^27KvKAj)VkK{-3qxmuXSbiKoo}a)^KVei}cW|BRo(&*W$Ev-vsv zT>f+Z3w}PokYB_v=9lnG`DOfaeg(gh|B_$DujbeAU-59B9sdo#j$hAj;5YJ{_|5zl zek;F?|Bm0z@8EaxyZGJw9zKKL%kSg&^9T4t{9*nGf0RGQALmc-r})$S8U8H)J%5fr z&tKp#@|XC_{15yU{u+OS&*X3Ocli7K1O7MuG5>=9oqxr@=KtW|@L9Zz$83SNAloOl zg0@1oBDSKo;N(?XhLp z_S*K@_S+8F4%!ac4%?2{j@wSyPTEe{PTS7d&f6~7F4->Iez0A!U9!?Sbt#+e6zU+hf}k+f&;!+Y8$(+iTk!TbAvuEuTHVJ;?rv zy|BHAy{J9dUff>NUfN#HUfy2OUddkBUd3M3Ud>+JUdtY4uWPSw54Sh8H?lXjN7|d% zo7$V%TiRRO+t{P*ZSC#s?d=`x9qpa$o$X!hUG3fM-R(W>J?(nC!EUsh?N&QuXYHKb zX1Ch~yJT1FPJ1tVAA7t#!JcGKv8USm*$3DM*@xPP+0*PJ?PKhd?NjVi?bGcu>@)4N z?VsDfurIJLv@fzRwlB3WvoE)=u&=a#X|I1 zN}vT+-~^js7X(2R6hRf@g}y?PkRqfC{e@vdx-eWAA&eHr2xEnD!USQWFh!UuOcSOH zGlW^f9AU07Pna((6qX1}h2_Eu;Y(q)utxYsSTAf6wg_8=Z-wuK?ZPf$x3EXZ5cUfD zg#E$+;jnN-I4Z#Tlfo(Cv~WiFUN|S57cL5ygdc>f!gb-Ma7(x?+!1~f?h8+Zr@{;2 zrSM94BV-A0g?EBW@D~HbKrx>fBo-74iG{^rF+?mbmJmydrNuI0S+Sg0L98fN5-W>U z#HwNqv6fg{tRvPH>xuQn24c7vAvP2niH*fbv5DAJY$i4rTZk=1IABw3BSwpD#dczQ zv4hxA>?C#;yNcb#o}xj-MM5-*W|0&rkru5YBeEhV+C;l3h@vQovZ#ov7$f!;`-rh3 zylWyRh<(K*F zEKU`tiPOc;#2Ml&ake-|oGZ=~KNr6c=Zg!)MdD&{iMUi;CN39Oh%3c4;#cC=;y2QNEJ8`?XL)b!y!8qM=wWTN0KAOk?QF07~mM>80;A8815M180i@87~>f0 z80VPinB4o%C zdM*7Sy^*q{cT#{HDCd`hTvQH`i_4|t(sCKOtXxhmFISR7<*ITuxw>3K z{#33d*OtTN`f>v~T#k?%%8lemxry9VZYDRETgt8EHgdGwPHr!EmAlJ5FS)neM~;=@Av8Hr?kgwB$#ROED)*E7 z%Y)@1@=$r0oF=Es!{w3k7XXSJ9dHI5TQT{=`B43rS$v?_BL_)U zdP=wwp)^z)DNU6YN-L$c60NjV+9~ao4oWAbv(ioJuJly&3a$`}Nii#wLMx2IDRxCr zM0o2)QJhLIB}VCuUOiJ1l)g%`lB)Dm1}FoSLCRodh%!_erlcw9%5Y_bGEy0(j8?`d zshm;HD(95*$_3@J@`G|kxvE@KepIe2Hl>!@LBUA4X%t~OK~sZG^pYIC)P+EQ(;!tu>&JGFz_ zQSGF5R=cQO)oyA}Rj=ZzQKeO@%Bq~ot2R|o9jc@%D!g`}_E!6-v1**!S4~or)l_wW zI#3;~4pE1yX==JUTpg*7R>!F0)bZ*Bb)q_1ouW=vr>UQ*Gt`;tEOo9rPyJl|LY=QJ zR2Qj>)g|gub(y+cU7@a2SF3B(uhg~b*XlZTy}Ci&sBTiXs9V)<)oto_b%(lB-KFkP zGt|B6K6Sr(Ks~4)QV**~)T8P#^|*RMJ*l2jPpfCtv+DQiIrY4HL4{MK)ywJ?^}2dP z%~Wryx76F}9rY*muKJ65U;R~mp#G*lR3E8N)EDaS>MQjR^^N*g^>g|=1Dt`*e9rvN zAZKA`QD-q{h_kq}gtN4>th1c6ytAUSvQy`*;;ibd=B)0l;r!HD+gaZk?ri96 z>TKq0?riC7SUa(lXp6tl2dl7PN%bv zGu9dJ?CVT%_Hzz!4s;H34t5T8raMPCM>faK3S7Io~<`{vS=m)yMQeZ`}3Rn%S0oDZTfOWxoV12Lw*a&P4HUR^` zreGlG2AhG+!4_a^unpK2YzMXn0Wb*c1a=0yfIYyTU|(=3I1(HUhJfMVSTG742S$VA z!5A2b>Ge2lK#uZ~?dwTm&u#mw-#bW#Dpf1-KGi4Xy##f*Zk2;AU_OxDDJ1 z?gICK`@sF+0q`Jr2s{iP0gr;mz~kUa@HBV^JPV!!&x04hOW+mo8h9PN0p0{}gLlAt z;C=8R_!xWwJ_VnF&p{I`1dBimv_S`a3BCefgKxmM;5+a=_zCO%FP`cMO?A=C)+hXSCc zkQ-_SHHTV2EumIWYp4y>4r&htK^>ruP-mzM)D`Lm^?-Upy`Vl&Kd3)65E=vxhK4{x zp<&Q)XaqD88U>Apf}s%T|1I_ZN$US|)c>cb$3qjLMCjjq7fORb2!de91ECNO5fBB@ z5CgFg2T70&sgMTg&?G1mngY#)vY=VeY$ylHh2}tWp?Od~Q~)i67C}p)WzceH1+)@c z1+9kGKx?6O&_-wzv<2D*ZHIP1yP)0BUT7b506GXAf{s8(p<~c-=mc~MIt`tH&O+y) z^Uww8B6JzL3SEb8LbssX&|T;rbRT*EJ%k=XPoSsJGsuJrp%+jQWI;CMKrf+J&}--o z^cMOLdJlboK0=?NFVI)$8}uFe0sVx2LBF9tP%-2K`@$t)KiCDAgiFDt;WBVpxIA0| zt_W9xE5lXbs&F;9I$Q&;3D<(_!u8<#a09p@+z4(02f$6?K-dj8gPX%G;nr{)xGmfc z2H*~GN4OK*1?~!WgS*2$;GS?VxG&rf?hg-u2f~BlA@DGGI6ML#36FwD!@+O}90rHO z5pX0t1|ADX!QMo@48jl$!wBqwQ5b`9n1Csmh8dWJIhcn9 zSb}9(g*8}*C&3wTCOie63QvQl!!zKSa27lZ&W3Z~x$r!AKAZ>V!wcYr@FI9IyaZkf zFN2rEE8$h}YIqI27G4Lhhd02R;H~gBcsslU-U;u5cf)(&z3@JGKYRc_2p@tE!$;tw z@GWLtIEnq!dybDT9E3P?qy5>gqdf>cGS zA=Qx@NKK>`QX8p*)J5tc^^pchL!=SX7;z)bkQPWwq!rQ{X@j&y+9B-`00}}mBAt-V zNEf6V(i7>0^hWw3{gD310Avs{7#W6)Kt>{?kzgbQ2}Qz?a3lhWM8+Uvktk#wG9HOR zVvz|*91@QtAQKS~fe-}oASi+%I6@#KLLoH5AS}WmJR%?xq9PiiBk9N#WI8ennT_Ni zxyT%3K9YwNAPbO%$P#2JvJ6>{tUy*FtC2OxT4Wuv9@&6wL^dN^kgdozWIM70*@^5z z_8|w5gUBJ|FmePriX1~uAg7Tt$T{RZasj!BTtcoO*N_{?P2>Uc2zi1$MV=wg5d$ei zULZw?h1iILyhL6hZ;^M%f5->qBk~FPf_y{1BR`N|$ZzBiQjB;JACIr6gvZb0@|5wE z^_26J_f+sy@>KKG_B8M`^fdA`@ig_gJ*_=$JZ(Keo{pX_p01wmo*tf_o?f0lp1z)e zo89Bo?uUiC(IM>iSUf|jPpc$#(NSyNuCr>swd5Zcsw4|gL!Zd>7hNW zhxZ5`$s>CdkM2qLO!iFk%=Bb=vOTjsb3AiBd7gYvfoFkdk!Oi#sb`sIg=dv#gJ+9p zyJv@Ik7uuEpJ%`4kmrQwr0109wC9}Xyyt@FvgfMjn&-ObrstODf#;#;k>`o$spq+; z$YXmP&nwSc&pXd2&lk^6kJt0Rv8f+g5-o+6LCd1$(28hfvOoPIKuMHBX_Q4dR753IMpaZpbu=BFgl3|X(W&S( zbS9dGW}`W1E;<*@M+?ve=t6W6x&&Q?u0^+?ThSfpPINcA2i=G6M-QTh(8K5v^eB1^ zJ&B$|Poo#mi|8fvDtZmQj^03Tp|{aH=w0+a`UriDK0%+Oh3E^^LLKxa`U-uGzC+)m zAJ9+e7xXLo4gG=sLjR)0s2BCYe6bRkALhbJVWqJ$SXrz*RvD{?RmW;#b+EcveXIf2 z2y2W5V9l`RSPQHb)&^^b0ay^$5$lX~!MbAIv7T5jtT)yN>x=cn`eOsI!PpRNC^ifm zj*Y-ZVxzDSEEEgF!m%+}6c&w*$6~PwSR9ssO~ewhBrF+A!BVj_48$M|#yl8`;TVCD z7=_UogRvNg@tA;#n1sogf~iDWvx3!8;yW3#b2*j#KLHXqBw^05MJ zA+`uxj4j2MVJone*lKJIwhr5XZNxTXTd=LzHf#sB8{3N=zz$+Zuw&S9>;!fiJA<9Y z&S4j@i`XUX3U(E{hTXt!VfU~H*hB0w_5^#1y}%soHTDL3i@n4C!`@>bu#ea$>@)Ti z`-XkTeqg__-`F4QFIJ3sF(2F)_rqOyNxU>(1}}$~$1C6!@k)3VyeeJ|uZ7pf>)>_q zdU$=j0p1XAgg3_h@c_Ij?#7$p&G8m^8@wG3;2rQzcxSvT-VN`L_rQDNz3|?6U%VgQ zA0L1Z#0TMn@nQIId;~rU55`0AP&^C|$0P7ad<;GokHVwz@pueA0guNM@QMF0jAT3o zPs2eR!eJc2|KTP$fs^<@M+4_@9v5*LS8)y3@pOC=o`GlLQ}C(yG<+tWh3DY8_#AvL zJ`bOd=i&Ky0lolVh%dqy<4f^n_;P#&z6xKBuff;i>+lWuMtl>#8Q+3$#kb+x@g4Y1 zd>6h4-;3|Z58#LJ!}t;WD1Hn-j-SL&<7e=*_&NMMegVIPU&gQCSMh83b^Hc?8^43! z#qZ(w@dx-r{1N^be}X^7pW}u23%m%oa2t2F7g3TZMU*DW5M_ySM0uhDQIV)bR3@qr)rjgu z4WcGdi>OW1A?gwhh=xQXqA}5g@FxO@K*CKlBbpN}h?YbvqBYTmXiKys+7kc~M06lJ z5}k-HL|394(Szto^dfo_eTcq9KcYV|fEY*&A_fyfh@r$VVmL8^7)gvKf{9Qfj0h(p zh)7}#F_wrT#u3rPcp`>~B_={#2jKCQ9vvp77>eyWyEq~1+kJ?MXV;)5NnBb z#Cl=_v60wB>>zd#dx*WnKH?B@j5to5Ak)_EpWO=dzS&^(nRwk>G)yNuTZL&VufNV%MA{&!UNPjYbY)S@_Zn8Pq zf^12)B3qMf$hKrVvIE(X>_m1ZyOQ0=?qm&cDeCUP^mmE1;dCwGuL$z9|gav!;$JU|{I50gj8 zqvSF2IC+9RNuDClkY~wr8LBK*jw(-8pej<8sLE6osw!2Ds!r9QYEreR+Eg8?E>(}J zPc@(#QH}pmKK@hy)szaP+*C8FIn|PCO|_-kQSGS?R7a{4)r0Cq^`ZJv{iy-eKxz>6 zFDyk3r$$gCsbDIE3Z=rR2r7~qLq$>JsPR-R6-OmdNz^|^iUKK^LMRV~QUpa(G{sOH z#Zv+$QwpV08B``Ug_=rDqoz|cs9Dr(Dwmo=&7{qtr3#ICX+LOP!<6Qx~aA z)Me@lb)C9N-KOqPcd2{S1L_g=m^xQ!LSBZ;Rd) z{a5t9=tI%RqEAJii@p?nE&5jUz34~L&!S&Nzl;79{Vggk@)r46zE%m#&vIEMtx{HL ztBh6FDrc3qDp(b*N>*j7idEIBW>vRpST(I$R&A?}RoAL#)wdd04Xs92W2=efZv|LQ ztw76d`2fB^31Gj^#S-UBZ1C{|N&+>S#CdBqY1X7ple$eBG-=qR22c|y1C#~I0p)=T zKt-SuP#LHN)CQ^mRe_y8_kF(mbO~r4&?2B}K;?i|0WAZ{1au9k7qB+K9nd78TGNFA ztpnNxObi$mFf5>VK*NB904x9xNDfE|XcW*npi{trX8oG&@afQ`V-r7rmv_xSQqJ@O zP1gI=@-Nk_WV6fe>+Wmr^X_x*mF^Yp-R@oPo=xWZ=laj_pXXm4Xl6CHT39WuR#t1P zjn&p_XSKHgE6D0#b+kHJovkibSF4-V-RfcWw0c>+tv*&?tDn{18ek2y23doxA=Xf9 zm^IuQVU4s#S);9BE5r)5!mMyB!iu!USYxdyYn&BrjkjW~SZjh6XT@6y)vz;b=G=ogSFAxWNo&#SX-@a)^=-$ zwbR;V?Y8zx6aEI%S=<&RA!ybJlt5f_2flWL>td zSXZrU)^+QKbmhqPLmh+bPR`6EzR{94rR{5tZR`XW(*6`N!*7DZ&*74T$*7Mf)Ht;s| zHu5(1Hu3s<1H4VWfnK+_nYX#Og}0@*mAAFGjkm40owvOg@CJE1csqJKc{_W%c)NPL zdAoaiczb$#d3$^N{L8TS^Y-@+@DB72@(%V6@ecJ4^A7ip@Q(D3@{aZfdqcdT-Y{>t zH^Lj~9pfGAjq;B3MtjG5W4y863Emf8+j6Xz)+_6^^~QQ@y|eza-di86kJcyav-QRL zYJIc5TR*Iy)-UU~^~d^a6 zyPe(M2J9fagWb{YWOuf^*j?>zc6Ymn-P7)6_qO}keeHgBe|vyE&>mzDw)+DEfPugu zU@$NQ7zzvnh65vjk-#WmG!P7g0HHt_5Dr8Dk-!*WED!~Z1EPWPKnxHIOaS75cpw3o z2qXeYKr)a5qylLG2tWV~AbYzTm;uZLvVd7YHZU8=0dj#kz+7M+KsmI-IIP1tydyZGBRR68I795A z_Aq<6J;EMokFrPG!FGrpYKPh3c7z>ikFm$vQT8}H+8%Gm*s=BmJI;=`6YPn0qMc+X z+bMRcoo0hJWWzRMdu-IkY}_Vn(xz;iTJdw{*bK43p^ z05}L70uBR5fTO@M;5cvsI0>8rP6KCvv%opvJa7TH2wVa#16P2nz%}4Ha09pr+yZU` zcYwRVJ>Wj@0C)&I0v-cTfTzGS;5lFbCQt~x0Ez$$umJ~n3A_Sc18;!0z&qeS;63mG z_y~LgJ_BEXufR7zbu>qJ(w#|8hLh<`cGlVJ?G5%udy~D{-ePaHx7pk69rjLpm%ZEG zWAC;1+57DS_CfoQeb_!?AGMF!$L$mLN&A$2+CF2Swa?k-?F;rr`;vXxzG7dsui4k_ z8}?26mVMj4W8by!+4t=S_Cx!T{n&nDKeeCP&uzmt?Lzy7U1VFfZ9Dc$`<4CLeq+D2 z-`W4!@9huvNBfig+5TdGwZGZl?H~3}`W$!tbQtX}>dmXZ_Ck zo%g%ochN7~neF5_xy~GCt~1Z+T->F&dvTBAzQz5Ddl&a9?q58hcwq6M;`vUV)3dl& zao6H*#X-gSPRHU-#Rbj+XQ8vmS?nxvmO9Ivy&fKI~AOYP9>+ZQ^l$3 zRCB63HJqAGEvL3q$EoYobLu+{oQ6&#r?Jz-@pl58rcR*acA7cOofb|@rNvb*Xifk$oiHcdiEtvFG0s>g${FWGJL8=gC)SzZ#5wU!f-})cbdsE8C&j7a zs_Lrds_v@cs_F7|)pFH#)p6Bz)pON%HE=a_HF7m}HF5d70$fd9fiAbJnX9?0g{!5j zm8-R@jjOGzovXcTg|pIG<*atrIBT7C&U$Bqv(ee)Y<9LdTb*qV;0kheaCLNba&>lv zxw^Qzy1Kc#yLz~Kx_Y^KyZX5Ly85~Ly9T%hx(2xhyN0-ix`w%iyGFQ1x<|82wxx~W~)tl68;%?HpNlE{*{`vj|{tNsU z`qvC-9MCi%JRmh7E#P=Ss5{IZ?v8Lry2rT3x})6V+|lmw?ihEhdxAU89q&$XPjn}` zlibPf6nCmS%?-LCH|$2-9yjX7+_;-?lWxjQyC=Cb+?npl?kVo6?rHAn?iucx?kx8# zceZ=BJI6iGJ>Q+@KH@&=E_AhtJ>Lhuk9?o_ z{_%P36ISB9Gst(C?@!-}B`!LboXgG?r({s6pwdBQg31P!3o0M(47}O}JaZr;W|Db@Nra^&0?x1Et&4XG5wG6uI zTyw5FH=LW!E$6m#$GPj=bM8A2oQKXM=dts|dFniKo;!wPI{rWa&=d#++(0v+InV-V z39RlgyJJqrB^{S`T-I@A$1@$Tb-dZ}R>!*?f#O1Mk+;zC_Ydmu!l}}v$gv#T`2#El zmH>Z&VxSbT6j%lfFCI}mvUpVS=;GkwkmAtdu;TFI$l@`@V~eATql?EE#}vmFPbiKn zjxSCqo>-h%oK&1#oKl=x3>HJha4}Nc2l&wbWBX6-KezwV{%iYh?Z3DG(f()qU+sUl zFAaQgzB=EW@6HeBr}NAC?fh~6I>nCH@pf6<=qwB z72TEGmEBd`Ro&Ix)!jARHQlw`wcT~xb=~#c_1z8J4c(30jonS${_X&GQ+J@-?QZ67 z?r!03>2Bq2?QY|4>u%?6?*`mK?hfvb?oRH`?k?`G?r!ex?jG)*?q2TR?mq6m?tbq6 z?g8$B?m_Os?ji1>?qTlX?h)>hZr_(BUi!Usy)5~%)XUN@%e*Z6vfRt^FDtyP__ET= z$}gW#PpRirA@zbPqAbd$9O^aohWbi3bU(U3 zJ%Aoa526RtL+GLOFnTyWf*whaqJ!xWI*g8>Bk3{pSb7{CO^>Hz=vaCJ9Y@Fi;{p@^ zIep1=3Y|))(I5@cFpba{P0%FmWB3{+3_rtVlr%~irHwL1S)-g$-l$+yG%6XDjVeY} zqnc6OsA1GJY8kbSI!0Zio>AXuU^Fxu8I6r5hQASDG&KSZx6#aKZnQ938m)}hMjNB8 z(avaZ07j6}!RTmoGCCVwjIKsEqr1_==xOvadK-O=zD7TzzcIiVXbdt28$*nt#xP^J zF~S&Wj50vW?kBj*)B3G3FZcjQK{Mk#7_j3yg)v zB4e?!#8_%9GnN}GjFrYJW3{oySZk~^)*Bm)jm9Qpv$4h4YHTyM8#|1h#x7&GvB%hJ z>@)To2aJQpA>**|gZ@eXqJPtW=)ZI^?WKJfU#0}(VoEb*nDR^orXo{`smfGisxvj1 znoKRGHdBYG%QRpbGL4wV|KMtWrYRH1G-p~et(mq=dnSnKz;tE0G2NLSOi!ja(}(HH z^kW7v1DQe0U}gw2lo`ehXGSn1nNiGWCYT9fLYXiooQYr}nX$|`W&)G&&p}RPl9*&B zg-K=77?6P&m_Zm1gE2TmG898I48t-4BQg?m#5igkGmaZ4jFZMG>`lY1T4pn{~{(rpzde%IHi6 zGntvfOl77sGnkpoEGC=DVdgM%nR(27CXdNy3YZ1VLS_-Om|4m!XI3yPnN`ecW(~8J zS;uT*HZxn8t;{xNJF|n?$?RfwGkci5%sys6bAUO>9A=I%N10>HapnYbk~zhkX3j8Y znRCo}<^pq(xx`#%t}s`bYs_`#26L0S&D>$`GWVGK%md~j^N4xOJY}9UCSx%U^OAYR zyk_1oZ<%+@2j(O5iK%DSHyfA@%|>Qpvx(_%2AECFK+|nDGn<<&%$8;=v$fgAY-_eN z+nazHWOgt+nw`wfW*4)o+0E>3_Aq;zz0BTbAG5F7&+KmwFbA50%)#ambErAY9Bz&< zN1CI|(Ppq2VuqSwX1Ez)Mw(;Hv1XJx&Wtw4n=xjrIl+uG!~A8686Vb{Ey4P+CD~GJ zX|@bomMzDYXDhIk*eYx_wgy|1t;N=6>$45nMr;$-pABG}vVm+fwmI91ZO3+EyRhBa zUTkl+FWa9Tzz$>wu|wFQ>~MAjJDLq;!`N^(f*r$-Wuw?}>;yKBjb{_siEJX9#3r+8 zEXYDE!g^Sg#aNsrSdyh#mgQKUl~|coSe4b-baoP(!A@qUu+vz<6ivyLO~q7A&D70w zbCQ{1W}1`DDdtpjnmOH^Va_zO%vol(Ior%JbIm#CTyvf|-^?@f%>r|QxzJo>E;g5# zOU-5Ga&v{b(p+V(HrJSI&2{E_bA!3j++=Pxx0qYaZRU1!hq=?-W$rfjn0w8A=6>^l zdC)v$9yX7dN6lmAar1^gQmyMf)vZeq8x``G>LA@(qPoISyw zWY4je*(>Zd_Bwlmy~*BY@3RltN9+^!Df^r?Sd+EbSL|E%9s3{qp8dprX1}uE*zfEQ z_80q`{mT}!B{&yXiYv{P;mUI5xr$sRt}<7ZtHxF5YH+oOZ_Rh+f98AhgZa_?WPUckm|x9r=6Cal`P2Mm{x<)Zf6ZdkYx)%W z7M3XVD|8i>EG$)6y0A=P*}`&#QPGQ}` zdWH218x%GyY*g5|ut}kRVL)Ni!oWgzVY9;Kg)ItO7PcyEUD&3uZDG5@_Ju%UP+^C{ zj)jA{A>2@I7&n|7!HwjCxezXt3**AM2yP4)#f{^lx$#^KH-U@i61a(6BA3jiaH(7x z2XYVxa~=-mFpl6zj^b#J;aHC2cuwRbPT_PeotwmEa#Oe&+)Qp3m(AsHx!hcCK9|Q8 za0|Fa++uDCx0GAPE$3EnE4fwNYHlsJj$6-d;5KrbxXs)aZY#H)+rjPT_Hz5U{oDcW zAa{s6%pKv5a>uw6+)3^7F?vXqR3 ztzo}R_y;d$a|JUK>!weL>Q<1bccWD9dYZ-!NhU(v*-sK{OLJ1*(QT6H#5Ng|7U*fi zC@k?xVyBQ7%!r6(;g_^^x$WYnf=i(ENjJ4y;8|hT)Udql(7;*GGM=TEjHosvCHxF` zp1a6h=B{wJxx3s$?h*Hdd&)iI49?^Vxffg!XLAnsl6%Fy=H75`xp&+L?lbq5`^J6e zesRCKznquz;Y;v-d`Z3(UyiT9SK=%4RrsoWHNHAugRjZg;%oDD`1*VUz9HX;_vf4P zfxMe<&bQ)Q^KJNcJirI>9r%uXC%!x1lkd&<;rsFf_(A+&eh5E|AI^{9NAjci(R?r; z!iVz_d?Y`HAInGadc*9M^DBm4ph`!-NvKP1o*o(fKBG-q3%TWl?^;{-t0#5#kDOk)J(u4>>dvVl zEAi`vdy{)lxr;nR?jgQ$)8c-z@A4+3l#4$bzdYyUxKoq#+?f2W1qX9#>20$6Cms)Z z5D^!#A!lRG=A6;Fr9n7;!6b0{y(L)_TZgyeJn^$qZs9MnV==>$*{~g9XVU5hUlf&v zyNL;W93Rgo@X34{5AiUM@)(cv1W)o5PxB1V@*L0e0x$70uke%j3_g>e%1`5`^E3FF zd={U>=koLT`FtLq&lm6u_=Wr;elfp%n{2qQE zzn?$AALI}5NBCp>3H~I1ia*Vt;m`8t`1AY){vv;gzsz6ZukqLU8~jcF7Jr++!{6oa z@elZi{3HG;|BQdm8@$OE@n=~s?l5g+ZcTofxOaY`XMDuz@XE=j=FgYght!Yjl2L2Y z%t?XRB@ex5Oj2ybugH|?MpVUMzqk~nuktdtR)U?}fIc}rG;MTZK(>c(Is5z^DE>*N z|CA~VPetTS%|hNKyiCxEs~JHVw}aPvu1~p|{80G|{Y)L6nHMouULP_mY$Q4$7M*=7 z>0(5i%r2Q$+8&{I^7EMavHwkQvd$%bNt0&zBp*r|J}qlu?O9FqPxIFYSIJ+qph9Xb z+U8&Kule`<2mT}fiT}cX<-hYk`Ct5R{txdbxP+2IDWSAbMkptg7b*#rg(^aIp{7t* zs4p}Unh5?vfDkCO5LyYXg?2)FAxP*TbQC%ZU4-63U!lJ+Ko}?t5{3xFgyF&nVWcox z2ob`B2q97!CqxV5g%}}Lh!f(41Yx3(C?pBVLW+5C;5YTEh-wWk~l1KTyS~m zRl;QaQI?+AC^u_Sk?0rsh4oFU8M=bSkyCJUp_sgM{IKTS$%EFQDSh|t}tY3 z=?N=hjwX8MFl>d{O(frmqI^F1hc<<;8vJfj!|-2WB)lPF+H6@IZJZJQkh^Plac~ zbHNZyp-^}s6bY7K3y$zgcrCmU-UoEup0*`Em!9x2U0qrHzaE z5p!ACmRvn~Q&Q`sx_NDrkL5(lExCxq=FvkUMPzkor^IG)Q;`?JU9+>MPl<1rTT!ee zRu-#>RmEyz4Y8(JORO!{5$lTe#QI_bv7y*lY$E!L0b*0pEjAOIi!H>KVk@z=*hXwC zwiDZnfEXlp5Ic&U#V%r3v76Xk>@D^c2Z#g3LE>O>s5n9#DUK3Hi@{=u7%GN|;bNpX zMjR(bixb5pF24v5N_+(vE~*EqgfZVi3Kf{di|*aG1oA%w*gY>w*84h-qW z7R1>J3DGCeq>TaYPrBodzVvjmJu;eoG<2y`C@^% zKwKy;7MF-i#bx4hafP^2TqUj+*NAJyb>aqbqqs@jEN&6Eird8P;tp}AxJ%q4?iKfm z`^5v|5%IWqN<1T;70-(o#7p88@tSyDydmBe?}+!r`{G0Kk@#4AB0d$LiH2y3g`y?C z6km&P#s9>Q;wSO5_(l9GeiOfoKgD0-AF){UioTLdDk+tg%1C9Ua#97UvQ$N?DpixJ zOEslhQXQ$TR9|W!`3Aq|{)?C~pIcCAdE2~2F{`oyf*S;H2}_!MYT}0Q@?b#-l5jYz z5gHqopIDGGF@MOc+PM$dzqv{LEo3J3minF5d7Q|P%8pIzm$xW=U7SBQH~PKwj_;Yb z7ulLpNiQ`nEUa<<&zOzlT0~N_Q&aQ>MPrxFFXsB^PD?r$SueUr^x~*pvpc8#%I=)e zKm0>N&2hz3U&anzb|z%$f`eJ^@m#{>2qiHjx5ONMA(T)dy>D2@^cu13!XR{3a6_rF zMsqD21DaBcT!|;SwoP5-qWkEGd#IX;OwXMVcnfkY-9*(kyAVlq2Oz^Q8IG zLTQn-SXw5nlvYVV_RWa#dWc*lBqZ1;|E}i-{c1|7IGFvdmxGJ>x%2yLW|&P}jkuHJ zm)SCVJB2QM>=_ZYJL6!=ru4pv9r8CsdxMW>9n$)we9DX~m^kLyg0<6*7uYO->=0-)gv{}3}s~W-(YWi;KWS|5bPVfIjzLB53%L*t}WQk z{v`r(w}&;2FEq_G?OcbJP4_nwpv;vt(P`R zo21RsHfg)GL)t0rlJ-b@rG3(V>40=lIwT#Ijz~wPW6}xfq;yI;EuE3hO6R2W(go?F zbV<4*U6rm$*QFcMP3e|&Te>6NmF`Oqq({524IGNnSPNOGi?(ktn;^hSCs{U?2p zK1!dY&(as^tMpC!F8z>xOMj$d$w&5;OUNbV(sEh3f?QFqBv+BE$<^gra&5VeTu*Kw zHnX)-87GAtvqM@D5_CS+P>WKI@jNmgW4*5yfZhMXx+mZ!?o4HqI{& zU+#IHai3ZhH94(qcH`(JIp;I;BC};6V^vIWwbe z)L zsj1Xf>L_)UdP;qzfznWEq%=|dl>nuw5~#S97D`K{wbDjutF%+vD}WNDbWl1fos`Z> z7p1GxUFo6pRC+1Bl|IS<<-j5+hDV!u)&}p6nYJit&W4HGb6*v-j@*mhiuoHhbIch1 zgMK7=bkfI^V-vFz%8^l?;hy%xKqwozbPS()J+m8l zOq>-tCZtT{EpEoF68XD52T&xt%8cY?J+k|by}9fzJ3Zc@z=S`M8-iQG6Q_qK5IJWe z9}wcyzROynIWcwOS7|fGY!B%~_ljH;Q8%a0KQ-&)el#$9PC0GejLX|KjT!~O3l`+a#B}y5mL@VQ!7$sJjpu{QhN`f*` zNmP=QWF5aa$I=ksEKEm{Su<4HCLu_AHu#Rj3Ni7 zj*1(dP-zxHxRYk&9F0#*xHI+H*ptZogtUpnBaDR9)V|q&#d;yHh@qa{S)Z~VXvvwC zQ^$`Vus96;j`p9`6RjJ95c@N0CEiS^SkOFM5PygDiTV}QBTZm?OYg-z`M7dIIi;Ld z&M4=Ui^^r?nsQyaq1;q%DYun7$^+$*@>qGQJX1`iPZ_Je{ZyA)QZ1#HR?Dbm)$(cuwW3-{ zt*lm2tE$!1>S_(Ordms_t=3WNs`b?RY6G>Q+DL7z`m0UVK-H}_Q(LGl)mCa7wXNDt z1=Jw5gW6H;q;^)jsI$^j7XHjRfv?T-U-mZol~z_S8#yWM0XQdTv~n*0QiMC<7)yER zh>PRbYk^5dSh-0bCp@2=o|qn%6jLb#6$gYLNC-yH#GHy*nYAOUFy%`8;QV>fjp~U(AZ|@tHFh^vUkP9-BWjw3qZfcwd^f=;U|>xyg->sukNq{*o6=Dq$Pb zi3}myr8mz!0lrP!De%IJ#ED7i^1Bc?{CZeu=3|JN`!zc#`{ayvd0XXg!ClpEYIn7V z+EeYN_E!6-ebs(ye|3O5P#vTWR)?rV)nV#zb%Z)n9i@&|gVhiY zzFMFzP*XyIjIeNgVHIo&wjTWz_cioMbg9@OXuM}Cn>85($42kUY7#-t8J$0TN;Sw= zY({=dUlV*OcPKh}QR(qV#+FR}F#ip|Hl*Gple#rSTG&Z{rs?6)DeN3QCo|_wM85Pc zX>HO}?uQsPy-R+p#LeLu8Id57#wQ{CJINQlt-Y9WBf47R#VKnuMzM`UF9)w>-(-#W z_mmzfuSvN%=Tfj6ji1q6ZI*X7xW%l;Ids9u)JfvaoVSF4E>st(i`6CSQgxZSTwS5A zR9C61)ivr`b-lVl-KcI-H>+FJt?G7lhq_bUrS4YusC(6Y>VEZrdQd&29#)U2N7ZBM zarK0HQaz=fR?nzs)pP23^@4g)y`)}Nuc%kmYwC6NhI&)IrQTNWsCU(S>V5Tr`cQqO zK31QoPt|AYbJb8ywNQPb7O9qMtB(3oeWkuu->7fZcj|xYd-a3*QT?QTR==oU)oqOASMKd&)FCemg1!RnN%`kc4ScOB6dZ=gT#l_ zQFd^wxad-5zu5H1SA3t~ZJzPzxBd-YUQXb{o@O_i-9GPQh!x=^Y$4LZLz4zBYdpSL zXjh((DAcATw@#UnxFD%UXlLr;jNM@;vYu)~qpoDGpT9V)e&p}i-|8RruUf2nRUge) zE1~&mF0G_iN-M3E(aLJ&wDMX7t)f;*tE^Sgs%q7=>RJu0rdCU3Pwwbt5bZMAk!Nklx@q0D z9$GK0x7J7NtM$_cXaluD+F)&nHcT6?jnGDFqqNaluoj|)YGGQq7NJFIW3;halr~O_ z*2ZfwTC6rfyA<+58jqT}Ig8TA5dVl!`4lw-^zvQ!?6R)3tPMl9s7W)~0Av zwQ1UPZH6{e%hG0P+1hL^N6XdbXmhoB+I%ff%hw9D1=>Pwk+wuzsx8yDX?wK8+7a!Z zR;0bvero0PDtaTmtKMDjsrS-*>wWaTdOy9tK2RT`57me1!}SsRNPUz(S`XGk^l&{w zpT4m9#7rzXs#NNbqz_}?CPIm~<7(t*MqNytHpYM2h)Jt+QpfDfOi3G?{d6)szA|Vh z{wG(>C&HHCz^uPX7iQd=SV})dSXfDBKJDd)23V!+N=IybE8frXQj)^YtP4j(A5601rZy-Nz}DvALSp^VX-=1iV04; zGH1<_UgC_*$jr)-wZY!OGa`0|_YtN>v<#mfniX;*WUL;o$LO*81U*hq(3A9JJw;E| z({x1l=%|kCgihr3<%`YL_3zE)qSZ_qdDoAk~47JaL}P2Z{S*7xXp^?mw&{g8fG zKc=71PwA)iGx|CGynaEys9)Bv>euxf`c3_oep|n%-`5}NkMzg-6aA_FOfS@5=!NWM zX|A%?voC*fcqa2I@>NJ=`mW4pIYi#YxCRN!(K>N4DcvR&&mWm^ggC8jQ!a*00(UBS zMqu#Noa=M6)NR2fXKhR#k^U&OfW1aqF-LP!@u|TX$w_(l;ufaV4nH;h0u>b8`aAu-{z?C$f7QR~ z-}N8*&;MiS9K73pxHvr9wr$&H(l$xcG-;BiNRg!Gy7X(CZMStcezR@cwr%fy|A%|d z#d+@YDg0UZr|@s#zru1rWuOXB6{rT(0%`+wf%-r*pgGVIXa%$e+5+u?4nTLH2hbDf z1@s2`0R4agz))ZqFaj6_i~+_16M%`pWMB#~6)*rsAOr{nOn?Ol2dsb%umcVt3UC50 zzzszIfBc96Vu3gy9`FJQKr-M1{6GLm0aAfZ8BoJ<;7$f&d6W016NQ1+V}I@PGh_fCMOj3g|#FPzo#tmI2Fw z6~G!`J+KMb0&E4g13Q3Sz!BgWa2z-RoCHn*=YaFT1>hoZ3AhYg0j>l0fcwBh;1Tc? zcnf?1z5zdgU%+4BA5aD?3zi2ffEB^YU=^@BSOaVbHU^u5&A}F6ORzQA25bwq1KWcg zz>Z)iurt^N>^0co7e$+nB zc_N`r&X=GXIy3Y{Sl7skg&zGHbS?(TIgeh<8x|;b7TfBg{?xj@eV%>U#<>67f7Dv> z^YQkjZ0+T@>m;$DQ8DJKe4d#HkAPB-B0-_)W;vfmqAOo@>2MVAB%Af+Opa$w- zF<1hYg7d)z;6iW_xCC4ZE(ceFtHCwkT5uisAGjXe0B!^~gImC@;5KkOxC7h`?g96L z`@qBC5%3sz96SM@0?&YF!E@kw@B(-dyaeXO*2UT;%>|$5CdQiQ?C^B=u7+1cTrXYi zYm;)*6iWdtsL7F5b7<@j2e~N%{XgFU=w51QF7HHdGg?4>g1uL5-m%P*bQG)Dmg~b$~iT zouJN8SEv`%4;luIfJQ>2p)t@{XdE;ing~sTra;r68PH6~0EIxIP#9!_%#Z~Nhpdnd zvO^9i3UWejC>ruWF;FZN2gO5PC=q(*x$Cp1`#tNFJ;C~*$v-!=f-BRuDy*J6!=}ZD zVDq!O#XOCESMZa5YVMrev2=6t71UzR4q4($vTSC1q&IXnbPvir|W~~6LdyS>unDx0S)_S=^^*qmN4-qyn^ICEe zlnnVGKa>V#LD^6alnc#*3LpRiAs9j-48kE2VjuyMAq7$)4Jv`=K&8+EXc4p+S`KZ5 zwm@5m}EV%B4KaOvuAy zm*)KTh(+Dm1Lg=I%#dXL0o6n@oNq$EW*(-Ej%e#|A6_&mazO5cIj?xNAUrJ8^HVu$ zz~E@)1-uctY#rb&ZwQO27N@WEO444s&u;d1E)Ws@>9%#ji!O!N<6>PF0nYLh%Q zd6n00HiJxIal~MBm}`+|a?(&WJq~flB`nV|8&(*fMHT65Vy_pzahx-<`IW;ezzqz& z-EjUgWs7m6;c4E0%*SCnpo_74^6uC{Uzk3NZ|55oQfR6eI2)T8-^X{vom2FU#+)NE zbMjB+H8E@u%4d%A-3*^D9`VuaFw;@&WBh;I6h~S~o4{N69sCLY41a;Y!r$PZ@Gtl` z{1^TQmqE%Q6_LtF6{IRs4XK0FL+T?9kcLPjq%qPIX@)dMS|P2GHb`5f9nv1@igZJ| zBR!CwNH3%}(g*2>^hX9D1Cc?l7$LPjHFkg>=(WIQqfnTSkArXW+1X~=YB z1~L;dAVwqv2}4YX8L=SYNCXmvI1v}(Mm$I?5{JYiUL*lYLIOw%l8U4wnMe+jhs;6> zkgKLak@Wn<>@HIk>++)U-Z|n4Uqa}~tWTab=|jTig$*lIQio(`nx>U*OAO@h*KQC? z3J0YfN+Uxrc^_v!j%X6MEqo=NU7YH<68zzQllnU1fMKWMjO&^H%dmtlA4b_$2hE-{ zh9_~gJVW3UNDR#4O4`=I4*SKR9(p3?vX=yg!VQUfq4{>mwt>$uMET-EJCZ4UFKrs? z%&eonSC{#pKz&_OY(0M$Bmf?o>x{6oRSgGBvymWDhyVzPzzBk%2!>Dyi|~kmh=_#9 zh=Qnyh7=(s$Q)!IQi{w+79fj|#mEw5DY6V%j;ugdBCC+q$QooVvJP2~Y(O?4Tac~D zHe?sF8`+EOM-C!~kR!-Zk+KgeIC3|bZ~hqf}t#DykY zvUD~yh%G5e_8g3KWu`+b{cjVVg>Z1W+%evvVa2`;!CfJ77H67$R#^PWj91W9ZJZ@n?S^3@lE#hDwHBR7$;eb?2PzohGvdW#G(X4OwIgfsVwv}?5kea z@!Y&cI1}{Prm@Gc8cPe z$Yxo40Yh}-!25{SSsk;|(ZsN3;{#8SR30MZ2Lr(Ozh8v=7=B?T7Y92cQGdLFg!S zG&%+yi;hDlpcBza=wx&{Is=`F8c-t|f`+1Ds0lTr7SxJHppmE@b)ZqG3w5K>s0WQf z zhBqFWYANgs>m7gF;tMY)Trsf5lVWOdZC^R(c=r_kj2%QaJ{ z?Af6`&_&)&2~@J=k@?G}de8+wpA~N~u&MU>Y5$>O{EhhZ!ZP@o(l+y*j^5t)eBA_> zx2ET6J{qyrnHtwO1wvsILkW~bDU?PTltnpIKt)tSWmG{`R6}*N2rWiS(0OPnIv-tt zE<_ihi_xX%GITk*5?zh1LD!-i(2eLObThgI-HL8UccQz{-RK^4FS-xij~+x1p@-2U z=uz|-dK^88oN9YsuDf$e3 zj=n@+p|8<5=v(w1`X2p&endZ^pV2SqSM(eD9ksXzht&kkv8hDYtcuS1371CM#zUZ7<`Mu0*a`KB>K%Z$#ZUgUu)c@kt*g@ef-3Ri%n_3!XC1b!(`yKMy7on`eOaC{@4I) zAT|gaj19$xVZ*VJ*eGl?HU=Aujl;%c6R?TcBy2J^1)GXZ!=__1urSPoS+H=-ibY^H z%#Jy*D9nYqv1lv?i^bxwc+86>V2M}~=FeyqGo7o=odqsKYdr5w?|n|Oo2{%n)i={R zEw&rmJ^Mw#ZUvIwXZcflGF6y(>$_x$*p#~{=7(u*YOF6JqFZdai2Dh1z&e(z1x-SB za;N?C*;lE2%pCvaz|&v_WPCz-;krLtTR~fGdFhYQAhhiY9Oc z7L)N`JRg3=t@_7@O?7m&{qiSA^vPpqpieVUz5g3Va7>@~ec*fwlCwgcOR?Z)kTIaBs$xt&PBq1Pf%Bf+#t;D*XzK`7)YF~`g|uo3?9kxw({5=qI8Y%`eNu3%tg#_XaY zhHt5iUC$jE6B(~%pK{N!PmQc>uz*QEI%A=M^oAA!uAgzuz1Llct!!jdygA+yZ;iLX zJK&x0&UhER8{Qr7f%n9F<9+accz=8#J_sLz558XB)pP2ADytn0^dzfKn%IDxQs1|bB*4o(3ILfgm*v;Dv%`h9Y4grwG zIA^A@XP`b))sY&VY-*j|H9%o_+%I)UaJadDeznN%k#x%wk`sd}>^(>6@ znjXZ!zE@EW^9p5#A=>wvp6<9GHy2u-Fv>i@d@4CUSuu=@nPO@M)ika%I<3__f3?{7 z#Pok*3&ZBS-_Z@?i_-*eAsLqYCnq(2c=!-=TQxs@DZUI}j<3X5;p_1A_y&9@ zHbi@(On!>HhLd#90~^|IyOFeasjC7ZJ|G=QT1)hFx(%Bm=UN^qjs3q0inZFFqd_hw zh_yGh(ls8Bf8bbPe3M??u*_83R^JT7bWx{--Z0FayCk^X+<`R{gQ1Tq=kMe?9XTpK zQCpwUD`TH2A4H267Ot}AXd&9Z;P>oBrfjvic*Cp-WJDm^aJeup?uruYs-N1_wandm}vCAtyai5^5xq8HJd z=tJ}+`V#|)fy5wUFfoJ}MhquL5~GOG#8_e+F`k%2OeUrfQ;BKBbizOwi4Y=`2qR2{ zg$O4ih)BXtIEX01Nw^3%5l?uDB*IUm5NSjPkx66`*+edpN8}T;hyns2U;-m>f+Q${ zAvl631VSWaLLpQ_CyIz-qJ)@3%p*#P`NRTZA+eZPN-QIm6Dx?7#44h;Bg)=3K?8Cv zb!Sb9Gbhe5Oini7v~D!Dju{raN|+mW0I0(hhptb^HT}SkM<4e6^0i|^9jnm)pc-oX zYAt_Q}6MCdON!>*(QX34gC&OG3EIV zYgAfY<6C2C;YjZhOBHp0o<=+h&X1_!DWgYXO~Pjt39jw*VZ@HeJT#gSVOEM{v*~C8;MQCW?~DmmDo<~Aa)YFh&{w!Vjr=eI7A#K zjuFR+lf)_FG;xMFN1P`v5SNIn#5Lk3af`T3+#&7~_lXC@Q{ox%f_O>1B3=`3h_}Q$ z;v?~e_)2^ueiFZkKg2(x3|W>ePgWo+l9kBHWEHY1S)Hsw)+B3@waL0<1F|96h-^$Y zAzP3w$yQ`*vJKgeY)^I|JCU8qE@W4-2icSCMfN89lKsg3Q%8Bm{yD`||WS?&R?tR>#nCvCI68N-yo$-Qj7OP}Q>czvD}>eU7&)9AL5?Ixk)z2mlLh2#GDrd>L?R?g;v_+mBt_CBL$V}C@}xkDq(sW3 zLaL-j>SPgFOqP&y$hqV^vXq=pE+7|@lVdwO;si0jJ#*8x#$4wAdODCS1N2EWgbxA+ zr=JzKhPn({MOsWV!&lR%_~~E^+eUvG!;gsJrmHa@lzGN##u!2<8f>!)zw%xh-UDJ5 z|6nI3hqpL9&=yI;#g{KS81_#2)@cr;@$)EHA+Amvc^H=4S|A;+MJk-(9C1{Jt z#pDulDY=YXPOczVlB>wom&nWH74jN+oxDNbBJYs*$ou32@*(+{d`3PeUyv`!*W?@WE%~1OKz<}Y zlV8ZMGe$H?(8%uOz&7vzoHsf96D4}3()##Ri z1_4wf41FV?Yqbo8jvaZp{SCdRkj%JerJ(~tS@UFJm*u5np{q)CzvK_;8v&APMYX2d zP;IGpRC}re)sgB%b*8#dU8!zVcd7@~lj=qFrutBQseV*{Y5+Bm8bl4IhEPMPVbpMH z1T~TxO^u<(Qsb!c)C6iGHHn%`O{Jz$)2SKMOv*qRsSqlZ3ZqPvnX*vfl$DC0Y*Zv= zryNui<)mDcn~J79R16hM#ZmE;mr9@#sU#|y@=<;&K&4QrR2r2|Wl)(^7L`rqP`Ok- zHH#{sW>Z0`kOHVo|0Vks0x3#n{3ZQMs|A*Xjt^aI1&R5l&oL_hCOpDi$vQei2sA0Y z&0l~;3Q?WOin`>6xeLFy27m^wlorH)a@ zsT0&m>J)XFIzyeM&Qa&73)DsG5_OrnLS3b4nA$*GvIB9S)FG;B{jPDjMjai|^19~Sc5zANWJavoJwu-A7kdu5!?_l&YQhG10^ z+@>a;8~OIYpJ3~x`}wJ^{wZx-Ow0;@xVMU-j?t8^z8O!vEH=A z!bzbx@Y)m*iaa09@3CUOBG6cEjNagF{xcc#<67rb$m*Q?3v9&_$}*$s-H~;YuB`Wt znXC@fwj=Fh#^!*o_vxgefjcwqohK%FblhX=3H6kEMm?uqP%o)h)NASu^_F@^y{A4< zAE{5&XX*>}mHI|~r+!dBsbAD@>JRmo`bU+a%hKiO@^l5dB3+5DLRY1$(bef1bWOSz zU7M~$*QM*x_2~w5L%I>&m~KKhrJK>s=@xWLx)t4;ZbP@F+tKam4s=JlGu?&mN_V5X z(>>^(bT7I$-G}Z=_oMsM1L%SDAbK!8gdR!{qleQY=#lg&dNe(T9!rm-$I}z&iF9Y@ zv8)Ti0eT)Z&)Yk$uek~SBxey&4jk*7Za5a40esG`nW!auJtIO8;}~n@7*FzE_ND20 z-1gu#SF@OHF`vNCV4$#t^M+$kSbqGAyk_}b*y7a7S$6}wd{TZ-!;$R!uK7w1aXfe? zZy#_*{Ag%wYz-xt??(0WpQe`va5x=D5enz*1TUcLjw~X)fExL~I zBmdU_CAEh=D|7mW4>wqm58jnw`>|f^cO`VzBziJEg`P@Jqo>m|=$W*EHqs$=(d zXfti0!)YrWLEGp^+D<#@DB4N8Xg3{Ad*~SY{{=>S=>$5FPNI`(KOLY`=u|q5PNy^I zOgf9srgP|AI*-n$XVC@pY&u96(f|$85Dn7^jnWv6(*#Y@6iw3%&C(pr(*iBh5-rmT ztH=hE}&QhGkUhTcH$qL0z%>6dg>rWIo}KKDiC%_@;S-O~Sf>Khv6 z`uO#+k?he~i3y1F2!Yv3W8X$>O|O;MQT@Y5ux@La!b;51(5W#uJSNkOgm#%_;Thrt z%b1*k)M@^_&=uwh>_6M7z+u0`-hK9E!|Z}ffe`ykux7w&FF)r>LN`vg{|*j!Zwc-Y zQdy(zOw7H!O>CIyTwHkG5qh|GIJirJf&;=yv5YsVu$m)F=o0@lD#Uo#8yVs-hFF(7 z>pZXfyx#Nr&l@}+!VG1GF~gY=%t&SwGnyI0jAO<#6PSt26lNMToteoP7$XzHgfd}_ zi7_+bjFpLCY>b_8Fj0(?aWQTthKXh3n0Us^Bru80|63RnU{aV=CXGpFGMOwUo5^8j zF$K(QCdd>r00S~GgD@C_GXz63EWnB~k0W+k(VS-u%>yY#ipgE zb*4Byifv~4V(Mh-Wg2C2u<@oCQ@OAzVYR|~hV>5{9X2LxMwl}!I?NIFA2K@(3IoH0 zFgXkgH5cwO95S3R95g&PJTSa6+%+^eeloN(#u~pEIvP70s~Wo-|GzF6hr~r1qm1jA z|Csg624*9(iP_9-VYV{cnC;9CW+$_Y+0E=>_A>jJ{mcR8AajU0%p75kGRK(X%n9Zs zbBa05oMFx~=a}=%1?D1iiMh;NVXiXQnCr|9<|cECxy{^R?lSk7`^*F8A@hiN%sgS9 zGS8Uj%nRlv^NM-RykXul@0j<@2j(O5iTTWYVZJionD5LF<|p%u`OW-c{xbiVGHhA4 z99y2Pz*b}{v6a~>Y*n@zTb-@J)?{5qtFfPPq%qr=X%vm55jOJ1pmCOQm9fCM#;6#d z8lM|q8jl!n8Xp;N8!s5|8NVC5hRg{0Wb6@AJET`g&yc^y79ll4I)-!!35JXhv4l(s zF@*R+W`!&bSst=7WM|0LkV>I%LSBbF4S5?lz6OR@~hvK-5^0xPjHtFjuavqfw%Tf)v^=d$zI zQg#8mkX^(sVVAPY*yZdBb|t%tUCpjx*Rt!_|Je2HMs_Q^jorcSV)wB7*#qoB_6U27 zJ zDt(>4LEonD(f8@c^mF#Y)JACIw5gg| zb7@}9r=@F|8lkb8uC3Q}VU93QC>GWT3x&t%c#c^IPflK0&xd4~O zZ5P%H=Y%7|Ug5lOSU4tJ5}pbVh10mw~n(WT2X7V6|)vtXIT@hervWh!@AbG#L8GVTF+UR zTQ^%}>jLW)>vii*>kaEg>vrpB>pSar>tpL3YX+Ca<#4%N9+%I};%0L}4&WdT<`53! z2#(|^j^-GShK=xKcs%Ta$H0wY zH{25*2KR^K;W+pS_mq3aJ?CCX!vT0boC|M) zH^U3yRj>@tg;BTw-U_dTXT#g!S#S|t3a@~7!n@%Ea9QLDd>yVF(FCcE+=2hVSK$`O z5Tq_r3Aqg4gKHz@k?-&i_$7P<-Vb*|-op=I2NH@zA~qxg!4U$C)y&Af#V=dFAMZ{s6*JMZA5cqi}T-F!6f;bZt% zK8{b~0iNL{ejdMoU&t@wm+(vZW&Cn}6~BgG%dg}A<2Uk~_|5zlejC4?-^uUdck}!B z{rn;RD1U-K$)Dm+^Jn;T{6+pMe~rJ%-{&9jkN9W&YyKVof&a*V=D+Y?`JenR{x|=Z z|HoGrst7fNT0(uHq0mGKM~9>Tkl%<6oq{Hyv8WTxM;GOcN7K<+=n`}VilaQ5hpt2C zqA0op-G;75FQEs}+vq>EGgcAniZ#MUV1J!eTqA5_ZG!=eEz%ZYt8HrM=;-L-7~q)U zN%GA1?DCxS-1J=Yy!Et;`RW-OGd*T}%-Wc*F=;XRFJn8!-SE-Xkm;nUYICM5~c`Kg=xZcVTNE3%z{M- z7i>bL;1Hq&r{EIYLbMPo#0y>_QAiS!1)q>2qzY+5x{x7c3pqlrkSF8|vxEX6D1ZVY zpaLe~0x3`eBd`J|@PZ&nf+A=_kx(Mc6-tHq!UAE5uvAzstPoZTtA(|~Mq!(!#jNKo5KK5GdRpUCw^@^+Ry69?}RoCS*eK7qoy)ZpD{W8@v zH!@c;PdA5|O=hz>+&tdg)@(J~%tRhwwtBKXc8e&bcmRMV?Bi0q`iS@+>VneZ!*jQ{LHWQnREyR{$Yq5>k zR_q{l6g!EX#V%r3v76Xk>>>6Pdx?F-eqw)dfH+VbBn}pbh{MF;;s|l1I7%EXjuFR- zsJm^3L+a^3n3ia@umn^4#*p^4OyAt-`B>*9b2g-XXkw z_+Lwz@Xp~q!z03nhFimXgwG83gxkU$;WNU`;aGTVcv^UJ_^j}Pa5P*F|1bP<__gpm z;Sa*Ugue>^5&kc{g0-5ps6Cr)rMOyLBd!-Wh#SRC;#P5+xLw>K?h<#4d&IrsK5@TzKs+cO z5)X?<#G~SI@sxO8yeM80FN;^itKv2Bx_CppDc%zAh~IN;dkIPAFScAhAYPfxo{WhVqB`L*frO+!nM}5#kJezA);Mt)7PhOPT!WkJ^fJnf%LQK zXVNdGKTm&`{v!Qj`seg->18uMrI*k6nf_FKCO#Kmh%d!g;%o7Z_*Q%;z861;AH`4N zXYq^pRs1G?7k`L9#b4rY@sIdd{3n)?%1Y&=@=^tFC!%*n9&^SXnR)F!B)-| zQ|v8HC{8R+Do!r;6=xRb6wfXOijiWxm?~zAg<`Q-E>?=wV!$anX{Y8~>RjO5;oRcf z>)hltu@{{eoVT3MoiCj`!NaZ-uB)zduJZ1;t_to4uE(ywu8QtT?w77NuJ5jCy~oreu(eQiv2Pg-IsK zELo&*$tp!iHYrlFOAaYYa!M}AEk#TJ2Y01dDNc%)yi$UcC?!eBl27tW0VzdFmC~ei zDMQMVvj30!=Sq1}zBEfJkY-CksZatWP=X{_LL^keBwQjSQlcbUVkB1LB|#D;Ns=W+ zQYB5&r6Q?VDv{<$bESDwsWe|&AT5*@NsFZ=(o$)ev|LJZ&vt9>68Bp7JU8r?+>CpU z`?Y(QdxQIq`?~w6`=)!dyG?Yb=!Vhlqd&XLMAwS$9z88OD|$_|61_ZnTlD4VFVS~g zo3t0|EA@l=N&TXJRe!60|F5>LpjFbUXw|gpS`DqHR$Hs5HPjkujkTs)Gp&bVbb3S2 zK%d#?@J;hA_Mtw(r}!9Of-ljR>C5(EzLmc7zRSL~zHh$oz7M`@zKy>7zN@~UzE{4t zz74)hzIVRAz7^6+X_d5ES|hEM{*%^A8>EfWCTX*@McOKDleSAcq@B_(X}7dT+AHmo z_Dct(gVG`CuyjN^DjkzfNGGLJ(rM|8bXGbiotG|17o|(mW$B7^Rk|i!mu^TmrCZW% z>5g<)x+mS29!L+RN77^IiS$%@COwy4NH3*V(rf9B^j3N&y_Y^nAEi&yXX%UdRr)4< zmwre;rC-u->5ueR`X`l<%gW{C@^S^aqFhO?ELV}M%0vB~{WbjK{Zssv{T2L8{k8nv z{WJZ&`~&^X{6>G6-|X-27yT*zQvU+K(=YoKzr*kL7x-iR%>u{#NBnpFwF3M6_x*JP zPyKiNH~ftPtNk?s>-^mVfq)^99M~9e1+aiAU=H*PbPP-g+zV_76b0@FdZs)KEDbCV z>fz_C@0G&QkJLeNZFclFy%qY+myR0_fy`bG)rxt z+B|h=>cCWEsxuX^BX-p;+n3t6+k0n~mJEn}Y=3L-L%z39<|@-$`Ahs2{t5qf>2%PB-9mZ3XO!e!a!k=&{OCwOb|kaFd;-p5F!MRU>A18X9}|gNFW4Q zpoI{Z)fMiFa1D2?HitNDj2ijgDQC&qa*muU=gIl=!v8 zvTkJi$oi3EBE6B*A}x_4B0Z5=k*|SOk#iz*BJ(5nM!t&N7VN@@^)lZdt-Yedq4YN`xtveM+--!J=K{^EI~_% z5~@TfkxJwet)!@APRZPoc_s5p7L+V1SzNNTWO>Q_;KJbI;L_lV;L6}x`J8-Sz93(e zFUgnXEAln@x_m>vDc_QB%Xj3v@;&*!{6KyvKawBIPvocaGx@pvLVhW~l3&Yj{wRNvKg(a_ukttfyZl4`DgTmx%YWp*@;|wZQdTLalvgS!6_rX#Wu=NzRjH;_ zS86CVm0C(|rH)cpsi)Le8Ym5wMoMF)iPBVQrZiVtC@qy%N^7N!(pG7wv{yPP9hFW> zXQhkMRq3X5S9&Ntm0rr);QHX+;Qru&;KAUD;Hlv0;Q8R?;I-hb;Qio(;Pc?C;M?H4 z;D_MHV9mm>!Jolj!N0+>h2;v%7gjE;QdqsPMq%y3x`p)$8y7Y$Y*E;}uytYE!ghu2 z3p*5cEbLU+xv*Pd_re~9Jqvpm_9^UJ*srjE;lRRtXJOW@s8Ly}z-5RT>oOK)%+ENN zaWLaT#p`QN}9cl<~?0Wuh`knXF7vrYh5v z>Bs#=$}gpb+D2`j*(`HxrX$muY00!@=4ARagPC(PxlAAv%)~S4%+kyinJY7e z%;lLEGk0db)L-eZ^%-81*Xf<&P4q7H;@%{0hPTMO+?(ZH;Vt#9_0rxPZ-Mu{w`#(5 zZ?%LM-u2!Z2{*i3yf?j@ym!215)OH{dmnmhB^>bn@%BsTpWsf2PB0{R5+)?XCcp_T z6D$ew31bty31bpQB}_}0n$R_2O9G#enUIqpCnP5v{D17??}XNgZ4!SZbXNPTBh~Th z6m`04Q6p8i8c;LUEHy{XRrAz*b(UJ7&Q^mepn@u-!YZPoDyHHpp^_@4(ki2}DyQO8enov$uX7pjZY#p)7usk%&EuC7p5s;ku1>Kb*e zx=#I1U9WCXH>#V|&FU6)tGZ3yuI^BGs=L(P>K=8kx=-D&9#9Xeht$LB5%s8gOg*li zP*19-)DDU76RIbcO|&LHN~n{VmN+!goj5KrDiKLcP6QH*5_1y=B%V&h6N$vLiDY7Y zVoYLQ;+(|giMtY4C$3GroVX$JK;p^7dx>h|{X{nLUt;g1H;Ijtnk2;~eN3E?G%9I9 zQb>|LXc`^1|d* z$;rH*Pvg`13_gp`=5zTRK9A4m3-}-p@(_>kI8XDZ)idf@^_+TMy`WxHFR7Q+E9zDC zntENmq25$)skhZT>Rt7odS88@K2#s6kJTsYQ}vnpTK%V1*6M3bwB}kXt&P@R>!5Yi zI%%D?E?QTuyVgVNsrAA`?zZHP2MR1mx)JHzs0wV9}wR+eo*|t_)35)-W)$R-WVSqA02Ov&xuFknfT@L^Wyi$--^E- z|1|zZd_T57JBS_5j%3HM6WK}Z6m}{*nVrd6SQ~qcJV~A+Pm^cKbL4sQ!v8f?^V6f! z(w2j(kZHu-|+pg`@c4_;xgW4hOxOP%IrJdIb>bdh3Ca%pkB8*aK_=_5-JYGe8rV3EIF&Fb#BriC_UZ8}x%-aAw4oNCJdF z5xf#u46XuCgPpu5!Chcmy`A1(@1S?oJL#SEE_zqJyWT_ZrT5nR>izWr`ape>jARy{(G)E#=1 z?$TrQSUq0%>Ir(1o~-+HzaG$2^i(}fPuH{b96eXh)ARLN`fNR@7wUix>W~iWh>q#F zPUxgg>5R_lye{anuIieu>&1GBK1ZLc&(jZp%fJrFgOi6Qk58VIY)igSbg}4C(Uqbb zMK_B!C693(O#Va)aiR0&xZQC_;||9iiaQ;5Htu5F)i@w^ZmO0#FLhVy{?u)$t5Y|m zK2JTFS|_c3TBEeyX?@c&(i~|CX%o}Z)25~6rsbyAUqk`d)pXzF$9}AJh-&hxH@+QT>>HTtA_o)KBTB^)vce{hWSYzo1{#FX@-{EBaOa zntomXA8%(F)W#ov`$g(lp+zc`8a1Fmkz&Cm!QI_mBP78BM37(!S>L+5ySuyV?$+H~ zD%}13XYSnl{?6QelMn_nyUBj#dCvKqyPA8N`oQ&5-7|=8xvD=AUL*1AtnfHmC#Yf+N6@;3#l3I0hUGjsx{T zeb4~(%u3Hn$tuk9%}UP7&03!|GFv}8J$r37mVG|^YWB#S=H{m6fwJ#qf6Bk}fARsr z7=f9=mpO+z{LqoE5wkd=`wT|0*!4pH**J?^KTvzwqDqAN&(fm#@$N z#T&&R&7Z`d!MEVs@~!#K{Kfnzel$Or@6S)+7x4{2BXB%u44Qxwz=@zKXa<^t7T_ds zGB^dC3R;5GKr3)MI0Kvs&H`tH*5Dj)E;tXI4=w<0KwHobTnO5O4xl5r2y_CSK^M>! zTnxH_?w|+g33`Dn&>QpteL+9a9}EBk!5}ag3<2372Mh(nz;G}Ej0B^=XfOth1>?YY zFab;ilfYy!1xy9gz;rMJ%mlN*Y>aeN#(Q zW79?6GoGWoT_sh?RGq30RhOz;)uZZFsZ<)3S_P{R6{=dUTA^C6TBq8m+MwF3+M?Q~ z+M(K|+O687+N;{9I-okJI;=XOI-@$PI;T3Xx}dtKx}>_Ix~jUSx~aOQx~;max}&_yl|kk{|`r;4|@F(~S90GrXf55-sKX4cXAT3B6(t&iL5zt6z6!c2SSC=c^Z} z=c#Sf4r)iWliEe?s$Q)2RI}9HYCpBJIzSz$4pOt#Vd@BVq&h|&tBzAAs1wym>ST4Q zIzyeQPFH8EbJTh2e071kP+g=hR+p$t)n)2Rb+x)iU8{Cizu>>+pB=ne|E`{@e^WnF z7@V#n)D~I_CkRJFW1z9nI7kmNfDEDWkTGNeO@JmsrjQwA4p~5xpvlk_Xewk0O@pkU z>Cg;lCNvA04Ov5Tpt;aIXg;(6vVm-&g^)eu069X7AScKfa)Df-#gH514tYSHkQc;) zydfXR7xIJrp#Ufl3W9>65Qq(Npin3b3Wp+~NGJ-5hGL*tC;>`@lAvTL1xkg|pmYWb zkqKo%*-#FY2NgiIkN~QOL{K9nfm$I2$6crYG(#S8F4ya+GGOYl;>46nw^@mjo}tKtrFmvAxe2JUk1cJ2=D9_|V5R_+$=aqcnh zE$&4w#eKG8~XV|lZA=DdkK3tktbgkT7PPzZx? zh=7(rOQB`Za%cs#3R(@Vh1Nmqp$*VRXcM#<+5&Bbwn5vW9nel_7qlDN1MP+OLHnTt z&_U=BbQn4U9fgiT$DtF@N$3=G8ae}=h0a0ep$pJO=n`}px&mE=u0hwK8_-SY7IYiB z1Kox0LHD5t&_n1E^cZ>qJ%vb!f@tU&^c;Ety@Xytuc0^4Tj(A19{K=%gg!x^p)b%^ z=o|DM`T_lfenFFYp1c`6E1nO}mS@lN<~j5Hc`V){UN|p<7spHECGbjlmArFJ&xHbE zy|7s*70QKb;VR)0;RfLv;YQ(hVV=BLUL$XiH_0XPW%5(<-SPwSvvN{?NlwY1$v?>_ zDgMY`%Kynf%BL$V6efzvim?hO#R7$k!dFqC5Ggvj&ziUKcJLnXjhiPln>J5rwrZZy zJiFPtc~0}Z=DE!d&EuM#oBf-En!}r8nv0vOn_CqEuo-LxhoIlkALuXi4;qF5SPRyM zbzoh11UwQR1&@Zuz+>TYupX=r8^DIJ5j-9?hE3oJ@I=@YHiOM!3wRPd8J+@9g)QM} zuoXNVo&nEthZn#$uq|u{FNE!32iOr_1UtdbunX)8FNWP<#gOXC;cM5A~lgcl75vwmfn$GmFmbw z$$m;l%f`y|Wn*MBWgaq1*=(7eY?6#63z0d=9Axukt}<_#L^ea#AZwI~WLdH*S(vOs z)+vjX<;ujeJXx|VUp64yEL$VHCQE@+;WRiM&VV!FEI1p^fpg(JI3F&63*jQT7%qWJ z;TpIVR>0k`5>~-#7=l;9YvFb9dUyl85#9uEhPT1n;T`Z!crUykJ_sLz55vdd6Ywec zG<*g=3!j58!k6I7@D=zPd;`7---2($cj0^RefRt{Ah5y0BFo0+w+K4VP0vU;%lF4Q5vW+s0tW~yN z_Dr@xwo|rUc3yT}c2IUzc0_hgc3<{d_C)qxW-2$4|C5cCPnJ)YyU9J}E=_(-0ZoBT zMv@`nU*Twxj!0KDQ8ZSxKx8Ni7A+Rd7g>nxL=GY=(QMHy(IOE`WGPA#rHBGWTv3=P zU6duN5Q#)Nq8d@Ps8_T~v|6-Bv_XW4)`;3g>qMw%nP|Oehv=&4sOW;|q3E9IndrIb zwMb9=PxMD*C^i+36OTg1AY+knh#sPk7$8Q-c*Gc)fJ{V85i`Ubu|OsvlaVRNRKyao zLZ%}#kXguV#2T4{%thuQ^N|IJ4PuMfA@+y^vIucPoDmnq6BWLv5)v)i8%#|#VES7jk7D=2WEJ=WbEeVxGN>U`5k|If?yj?Dr-;qC-XDF~HqG@&0 z=B6!8+nU5;c7C526Qg2WyiB}Jd_a6qd{lf|d_jCsd`5gyd|UidTp?LgaJcDG)6J%5 zO;4MiH@#|l-}I&FrZfEkxj^EWDBwt*@kRKb|5>E zUC3@^53(28hwMiVAP12{$YJCNauhj+97j$dCy`UgY2*xY7CDEUM=l^2kxR&B8FfV#wa<;DkV=TR#qzOl8L#XZFX#RKFa z@(6j1JVBl!BtjuH@(g*7yg*(euaMWs8{{qW4tbA!Kt3X$kk7~$k+ zA>=pm2lM2B;xwgpNmzQ4@3mIuSKR z%}{gH0-c0TMyH@tQA>0hYK2ZmXP`6DS?Fxk8l8jAMdzXO(FLdtYKz*T3sHO20d+(d zp-!kX>VmqWi%~b!9rZw;Dc&ePC|DiVow1r%gI@+e435-jX+~=dH3pjb8e5IMhNWpH zT8VZ-O2`N~p&&YlF5(|Pj?gFe4jvdhICyFB+TdaSlR;(wrv5Yitbvk&a|1mCELEwh z36-Gjs1%i$l1iA!WiY`N!qbt!>=xTHgx)xoJZbUbs zThOiOHgr3>1Ko-4LU*J4(EaEE^dNcyJ&B$|PowA1^Qb54g|bj@)CcuN{ZM~201ZTg z&|owKWuqK46b(be(FimWjY6Z*7&I1*L*vl|G!acglhG736-`6a(F`;b%|f%$95ffr zL-WxBv=A*qi_sFa6fHx`(F(K@twO8O8nhOzL%Apq<)Z?$9u=Y@v;l2Io6%OZ4OO5W zXcyXp4xodm1_e=#Y1t>WB4ij9R2{mfq%pA;9u}) z#1Jt-rXe$tgZLrbqfvs(a5>(I_v5z)Fr2{e;}7tM_!ImwPU5fd7x**$HU0sgLD&!r z2{*!(@FG}5070QN`V4)JzCd51uh7@%8}u#u4t^eg%e{f_=Xf1X7#pYr2u?3h7W{cTj3o(1l0dvF_ zVNRGc=7PCmi!nFM9rM6EF)xgTd1F49FXo3ikx89_#pQDih3L&lPEWIUNbCXz{HGMPfAl4)c*nL%cf zS!6buL*|lsWIkCy7Lr9|F@i^8I@7%Ucx!{V_7ED=k>lCcyl6-&d?u?#E|%fhm;94r^h!}75LtPm^0im?)` z6f48Zu?nmbtHP?W8mtzp!?+j^<6{D>9us0BOpG;PjaU=bjI}UEomQ+3YsaLR43lFD ztOM)By0C6ciS=N;SRdAp4PYuvjSXTN48$M|#t;m}Fbu~CYzej$TZZ+M1Eh*nlY^v& z1WAa5NrXg6jKoQTTtY4-myyfK734~C6}g&RL#`#)k?Y9~|+^^@)7x%d_q1YNs=OI@)`M@d_le>Uy-lLH{@IL z9r>R8KrY8tU@Ngz*lKJIwia85t;aTC8?jB;W^4<#72Aew$97;lv0d11Y!9{<+lTGP z4qykdL)c;L2zC@Zh8@RFU?;It*lFwxb{0E_oyRU<7qLs&W$X%e6}yIA$8KOZv0K<} z><)GpyNBJ!9$*i#N7!TR3HB5tF$$xxXV`P>1@;nqg}uh!U~jQ^*n8{)_7VGpea60E zU$Jl4ckBoD6Z?e?VZX6I*k9})HjDwd7OstdBtMa#$uHzr@*DY`{6YRCf00AvZ}JcM zmt?|@Nr2L#v?(1*ml{Ehq()JrsWH@8Y8<6U=~D)jA!S63r;I5RY63NpGNsHYbIO96 zL`|lqP*W*OY8qulO{Zp1GpSkBY|5INL(Qe;QS+$EOEf2z(?y3LlM+ z!N=m`a6McfH^2>XBYZq=jGN#S@QJu7ZibuV7WgE5GCl>Lid*8d@Hx00ZjU?R9=Ip& zgNNf$cnltk$KeThGMnA_DxWH#3aKKhm@1)4 zsWPgZs-P;VDyo{Qp=zl*ic9e*J|&>)DIq1I#8d;-NHtN-R0}1cTB$awosv>AN=_-L z4yu#tqPi(1)kF1CeN;a+K&dD-HArbFkb)?fLMW8ND4ZgwCDc-C8MT~RL9L`#QLCvn z)LLpCwVqm!Z@@R=oAAx}7JMtd4d0IM#CPGl@xAyy`~ZF!KY|~{kKxDhllUq8G=2s@ zho8qU;8*Z#_;vgyeha^i-^K6YkMO5Bh12+R{3ZSde~Z7v-{T+gPxxp23;q@VhJVL@ z;6L#p{4YL?1B4c#P3RE1#0X*}F^U*Xj3LGndW0b{o-igR5EBVg!km~yOeUrfQwd9A z8Zn)iNz5W<6V}8WVlFX{m`^MqYzaHUo^T*GP#dXD)Mjc6wUydNZKrlnJE>jNZfXy; zm)b||rw&jDsYBFZ>Iij|Iz}C*PEaSQQ`Bkd40V<|N1dlGP#397)Me@lb(OkCU8inP zH>q3HZR!qnm%2yYryfubsYldf>IwCfA}NZZsb|!4>IL(ILN2zn$viXKgmp&f}ugcIRR zxDbm8cfynKCVU8A!jJGL0*N3ZmqKqgfDu_y=il`=Ph+3kK;1WE7PY8&5 zLP&@RG0{LY5-o&;=q8jz57A5X5&gsfp(518AfX{Z0wpj4CzcY+h~>lzVkKeotNxez z*Nb1DevPHa(R#E#Z9p5+M)Y{vm^Ps&&=YA>+Ke`*E$B(~WO@ocmA0g((N^?ydImj{ zo<+~5t?4=RTzVcopI$)Q(6+Q4y^yx29cV{-5$!}f(=N0ty_j~R-DwZnllG!nv^VWT z`_g{2KOH~^(m`}E9YV8d4joE|(cyFi9Z5&g(R2(QOUKdibON17C(+4t3Y|))(dl#s zok?fW*>nz_OXtz~bOBvR>ka7-84MW?jUO@@nlNNOG;PRgX!_91p;<$7hUN~<8)~sL zeQEa6{AHV6ty8d*+^*Bk@+Iy($;H5>%T8lg;b!h;{qnNgW4BuOD{c#3+PsYLsPKH^ zx!TU*rQ^$Wb_qV2zMJe^{kPlgvfFLv@p7yG2LJtb5B+ZiWCw)>`M;bQJTF)r+#P(* zE|@K3H?Sq_R(3mE$|l$ddpCO*JMLxt%jIE2*pjfNVf?UVVJ6`d!Y78ChMR?(hg*bi z3eS64L>JQ~bSYg%m(vw=C0#{V(=~K0T}N|i9?ho(bUiJkMYNc1pd0BXx|wdFC3Gv@ zMz_;aT1Lxh1>He+(p_{nt)zSCUb>I&rw3>it)>TQ4Gq!|4bupX(in}?1ige_N-v|A z(<|te^eTEay@p;(ucOz~8|aPnCVDfyh2BbUqqoyL=$-T~dN;j?-b?SJ_tOXHgY+T# zFnxqRN*|+-(xgJzb_u8B*@oivh_?Hyk5I{^UH4O5f%c7FA8(tHrz zPtBORB4xJA7W`_;?$keN6@>%sflziGR>xf&I~8}Cp7oaXDCwK~AEAcG_SILscIX(J z;W*a6t!0lB^W8QzI-cguUFs}rYht-`FNeXR*kugEN&z(-x?oDU1pNZ{> z3)x|D4!%kk^Q0PKzNn{jQ;@u(#TQE7Bswtb3~_YU*;yB6oh5YJ_dD!ia~;39C!;k3hf;N`S44(EWg4(A-sJ6v!u(7Nbw z$pO$h3S87W0vKzZ1tw{o0m8Jd16Q@K0lN>o%wxvlOpFvu8B;Du8T&a>FA{~YhpZN zBVw<{-iaj>J|^iVk4kn-c1kWt#Zw8U5Gl>@$WrA0E7UE@FN!EXQ02^B(YT>;U*qY< z%Z-m4M=~^jq2w~dH#wy^)Jb+0^c40~^o+0jBfi)WRD4!AC+5F*{qN}N+at*6JJ1XO zD<;Z zJ01Z1^d{;ri0+L)W%Jg}S0_lhC;XWz*5^>OkiXc*$^B+dtFa4jPQp%QIYSlOVYs002Sy;qa^GufeM$j@fL33*En}-+7O8J{@yIb4CzgTY*gk z0pPCx%B&rER9u+txdnWq-)@yg_XTd z0pLvFA6-pWnBky)ef)l(ef|KjgHtdcGg{a34Mg?pbN{(<=N#!?njp^eX90lLsxbza z=byD<{?6!_Tff^#cSIs^W@ntXib-%V+-75?IpE|~<`MDVr%}i4v>QDmb=Ac#p@Ea$ zl%j#A%SJkL-RAvvrlPwT!|pnR0dIT5VExGPVFm!O-eB#x5d9-YUgLjx4_Z6sZ8ZG4 z$c=Yt0VC4WkG6@oF?Rn~}B|8m82941=uDckR|DBkF%kHt=&H27!36xY6 zGs(p(ahs?p6$r5jokP4}PeZ$rx>PTR-|?TF6^vh9JNf8j!Ov)o6iAKOAHi~-nHHUS zB6DbQSdMePu++KCy{xQiL5)qs#+p|lF-;!zzr?XkB{@f(03bA1yScg9xFEVXJvL8% zRIyF4q;qO_yGo#LQomFiDxYh(;3e=8m^)1aR^kQ_fvwixEY{!n(J>UEKsx1ol)wVc}TH5K^Y+)2i3$*Jt zMH|Y-_~e%CTQ<|rP+B>5zAzYi-4U+|ubLIxX!UjAE{{^ICLZ#aM|_Mt5VepYMyw&6 z;yN4plddG~n%bWMWgN>ip3|BCYmr59$q^>VM{0SS(Sb6|g$KmfRz zHp$~xhak<^E81C*zKuw%P6~YMGt-hECUE08t3P({uL7B*p>2 z(dp$1ZQuPt^}VCKw-Y;;{`9%!`XeZ)OzgW&I$T>Gcw;dfmmN4cP!TQ)23b-H+r=OrM@W5E^%B{z3QQ{Mdre z?)?QbJWLDyJXSEe$0QF=wG!ZW=URBJhbgB&U^7U@1JXBfY#r4`<4W)H>*;Ms;MODRBRaP}sDXM1c9QFU? ze8=E6>rmqL3Rd-##0ZyV#jiT+QrW_%Nq1HGOz!1|kE~ z!VkXN;?4z_U!h;JLVtNe=ra27toGseu6%=?%9ky6Y4T>l>V_w>JPlqR+f6y1~Tj zTf;B2XAR}805Cl}CA)&jms#Lnrkd0EUsg7==Ce|?*y_fr*&-UYPP*|t=Uhnb&|=&s$chg zL)))NCbgOx>@Yj((%_KdERv8C^VUw*^70X-n$k&1Pm}V=e{%_ zzawe>fllq8Q=p1Dfwk?XfrkrC8D^`8^k}=hN?R(Kc(H2RuadyI(h!$r=09TL+D2)$ z?B|mAvN<+e8P=kqJUgLGnowjI6v*%ZpK$kj(cBC3^B((E*eKzb0GaIe#)^46) z)WH^e9B9$2ej(h*IMt6UZ}eL*Ug-Q*SexBeIMP|_MPb4AYPA%_LR0raz*LgqQLEnaPSv>7{)7evXJcVS4L;380{}=@=cwrVMevSL=7kPO<$&Fw(3gMZuiHB;&qG4sGT%~X}J{wl|b(Mg3eDkcLDZgsSm{m3-mk%{LRMb8;V zwKL3_Q-0laOZ-ztm-7}B47fq_I5nY(;0Q)}pvQF0jhO!T?8Fl;TYGOJ|MhW4AYWmF zunA0G5|BDYIU#F`k1uYpk77EJEG#f3*IbE&`e~!Xh#p#m-@tYg{Y%#`eY#}k(#K01 znF^`XQUhFGbOKAAKCskknTN%qWp3EsB|Db&GPNR{sSG)VuVZQ{dHzT7ERSbyQ@tD` z54kSr(q^h0gQ2rj%gW7|9e^7_By=PM5__P|-p`t*wj5@!{y?ZCOluB>a@qUY{OX_V z5dUZnUVE=|$egHeRJbUM88<>e$J_oE#-CB9*c)J0^M+N!_ZMENstx$*-&p;@Y=%x*V$=a%P-Q7p{uk+U5I=ZnH2RW>c6}0zk^qyc6MVOX4RCp znJZXpqK`*E8SyY+xo*F1pRTYQ?JyXX9J3;(cJxzbEiDY{s_yc)(%ZnMjjk3?W!7Pj zSWw-Bx`aBgRvCJL)9rI5w$0hF=B&?-_&}YDai_*jO-%3qI#@99FtIzaDUs!JJ_$+* z>KHPuG&3@@N!gR!)8CkKImI|7DEW&;wz=Bxo6CrD|GMcZtr;yTw>wRApA2->>6nbD zu3?W>{Z2;U+Ex{f8rq(#E zQRrnFWX`czZ&sS(VHS`&LHd`QC2!2in!CXIe&!z5=Glj4%X@1pk7ToeAYigJlnrKc zvk;q6wl{*p`>ONugGv_6bDyyw%IP&Iuyf5#U9l_AA@^!7#V)j6k+amsBd;{SrueIe z(te|(g1z2RKmLj1U2|{Mr2>WHc84eKy}oVnGdg?X|Cpx6&yIU9&~cb=?&O%~Wauby zIP0e@m{nd{Fc4~AP$hUXHMiiJ<5Ne=f|{hOi`^F=aD_aUx&B*prEbupr*MAIt|H5# z6CQ_3au?rpl?5y+i77c-GNRPjVthVW6jpNELs*hndc+&{Tr%*v|8?j24n@CZ^8-bp zYGwJj%C~F)X!7py(yj3H@k$w65u7xu+)v<9J;|Tv_o*tXMpmP$=?R`4TpzuH^Javw z?k6kLg~KspKg}HJy_NNtGqcOtdv(?q)`dFHI@y409TD)lJUXzIo9lfyY-D(ncRaV2 zcWR}DM^W8{j`eIM`?vc=uL#aC>m>V2$WWhSqTc}3k!SumYI@9zXaV2R(SRBAMSM+A zi))7a$B@N!NZn!9KF(>*8J|JUrtDDzE%BCuZh>5IO>m_)x!#vgB^dZJ^HbZ(dfSpA zmb>t4eTDCHQ%7It{!jH``dw^Dd!JW@nZ~&^zRG!>%MC4kJyMvQ^38g43YwyylB)et zFhAu~>ObKx-*j=gc)Z{C)B_o-GA@dLvA38h#bWU$@wSo&4Wk;bH$*gL=cHwaFv4a@ zMu{J{)O5_P!U*89^NB>`yq~!_?1xQrL^h*erTC}oi3~EHLxIva4RG>t{>S7B{~Ipz89K|DFnfl0@zpP> zY=7nSnMg=|WoN}bvscJYmxb*8%&b)%Ka#z_PgfP%k>m5HV~%Pn!vcHOwx9!Id?p&k zjpEcDSa*yy9OB1$?f-%`+p%Zxg|dL*;ap(7V+%O%y^ZTi>;ABOHCuXvi>7i0dLJ{Z z?xF#O!n$i==b_wv{o_-MJ3ZOIYA5?4jDCDEWNmmf{bc7h&NT3z>U;lf^|QVs0a2ZW zoV?BlU|f8dqSHBqd6QY`s;;2m7(jPG&sNWOj1wDbogJ&LiiKd8yW%~fDVRg zLK_BZlC(``B<*3poDd+GHfN95$)X0YB-I?ZsO*+GrSnE*M>73t5!0(TLYo-nge^`W z-m0l-HXc?=J|u63xy!__AS_ zn#g;VcPjzFvT8!yPT&2BUy?c!O|jdFPSu;8Hu%1&_9@du0>Dl)c86Z}rIc|g2mIs# zHm;)3KZ^YWdj@WqPc^J*J5pvcVaU(g=BU3adAFBu;hKij>=jvi*`4em#j}30_7!VK zU2Z2~{*<+ybEo7>?eE%KEEY$_xocLyS*W9q!H5@aI;>4KS1!}3rx*qA+$lj8C z*u2%et!`-w>5}TT#U-=9KBc82$GmRfqUVmWgPabxuTjNbVP2>SxB9oAxx}!7?tK%G zSRUC}ShczAmghTfTb6c(US*}%H=Sti0hcpQ%|Z8A)gdR6EBaY%-<~O5M@o&8&LrM) ziJ9=PD#yQ?LoM85d~oL8qQYX(mT==O0=lrHtC;L5BY>Av(ek|=$KzQRpf4!0fKV6emRg0=$`LFbPTsHe> zBqzV|P4dF@FJ5Vx>72)AD;liZ+T(`YfkhLv8xwDiwix-$8=tmonNC$|UsZZf?R9Z- zhn4C~nhCyX{@ad*ChPrA5lCXC^c&ij$`L z`19R-vI_%!?mL705}!(+jI=}i^Zd_A=7POdQv_>#+!$?sw4gu`(&ik$s9vYYv0le_ zjBj5(RKLGxr0_sbp>LmXo9_W(z2t=NW#N6{W#8!$IZUFvNMy|9glG8){X~rNs>^SW zSmpOlJkWEb>ayNtCSUdnld8I@X-5-Lqw%+5RGn@jJ4VqN*L=ZiN8*yI-HapVoxfho z{91z+@w%73H|M;P(;k$~*CKiPg$) z@mrLxA(t4w6wMHsJ}YABD|Ra5CS|Eg*m0_Ql`Z4gTh0EeUNrcOy`*p!$4BGL*~a)l z;u2`i%dEYiMd&9mAjyDHaPNqo!l<$R7$tQMR26y>!a_Y^L!?z$%&5>FGHR!NZ`q%rQ+rMB|QcU_Gr8p z1cL|RFs6zqx7ls;GIg}AvF&bKwXL6B-NFkC7ugrsPh0e8(bmk6g2M&%!b0aS&H%8F zS@%qGF?D4sp*lO03<}MQSGy8i9*%`C-0|K|-`~T>$$!v4kaKRt zwdnC9iR3kwhdhQobpvh%$P?>=5VwWFFWb*TIj+a2XE1b905A^u9Ws$KrsfDopqHP# zGC~?zKkA;+CaNdKJF&VNim`~}c(3Lhh!2XZh>Ix&fLbT-ct>7Fym@d=d_nx_L~nL_ zQbE*jrwfY!Kqpxo9HQ8iqDUQ?);o1+VsteCXlEOX6MDC0!;v|i+IgJ3)bt$T>J?DdlRMy{wXMlaAGG6_LLtiGk4)D zpu&ox@p&0V0C2jDX!hKL#r-U$9Xo_wAZxs3#WJ1*bh9fxx#iUBUDj!KMm$EEP3mB zwQXC?_EgO0SedVI+so{C0KhwNSHVVf1?aE1z)up~N_gxu)^w@cLf4ngCrvh17qmXC zU&C|-c86`QTCez9KUO$SRL~&w^%phPk7?dn`FNR5iFxa?*ijXS2V9EMHI*$hn|Bt! zGg;^u(%hbI-__bYF>xlyR;*8#vk&FpEWQ@IlGEZn-qWS9mvPdGD_wP}B_J5&x?Eg{`#IO>7CXeX?fRjCMnN^Vy!y)?N zmC<*ObET)JrZeG%>J{U%-J^EYTm{(-U*$M77dAjr7)1`z_!FN=tOyHM4=h_&J-nAt4!#Xvs**Ih;^#V4y5dm9y}$N4Ws--ArU|pKqJMNt+ZmKK7Ish&6=G zNU%&`H{HqLCi{3*$)-fSHe~DXb1`elz|6Znc!6jHZ!bS^z%eeQ<%eW;f@6z$+rj47 z@j3PW38rzF!6oMr37_IW6_65J7bot=_2J7*ZhDY&S zBP^#}ql&xO^}U+c_a}F6?!VmPc>e`8=yp3e`mxh${ngRoahCBW@dZ3vWk=Cg?yhEC zxm0#qzN=+Q+fk#p0lKv#gL_lQwGI@C624k5Y*_AF+i;~~S4~yl-SMOQ|9GxlwZY(* zK|#&3DG|ZPB7Zf#F&y$qN2ld4vHcSABv_2SH5|+AGnaGU83I5^mTdg;YGq>-HYq|k zg72)Kx~;4*c7w|Uw}#l2F6KdSQbnk(Bs6YaqB^C13;@Kq=g+FE-GGk?e~0|@v?o%- z)<)v$M?_lNF0XAh+Y7B+?Q&-~{9xIz=DUyXy%1B!61uz!RO(;nR`c!C5YXSM~P)- zv1!-yuBe=I^Qt#W=jzWWCpp#n0Pru`aGEk*M{{**3m(=xO{9%nOv9zAz zxE&fFXrIO=);`;&_+IQ-R2%aM^Brt1UKJI@EjPOd)iQhc=Sa^bN=W=)>et9r~UN=01Ngl9)`@1xS@Bb)lUupoU|j%cp*%j7V|CrQiMa< zXDzo#lwp#=F_P|o%=!K|*2a{I`&JSf#E$WP?St3gZMd*{0#kz>Dm9l2nwYw50%sLp z8OzjSZgfW~r{G#jNlruST!uw?UC8IKgm(n5%L+S2@lrahiA!a-WXHMNmcfUti=PZ=&%UmMg&2+as%turJKx4<`*#Pi2tB`ja4!|k~dXR6j|Hhpk zT{gATHCvEd&f986g++@wP;z#1wr}F-f9V znA0q+OKtvGr_(ZvuTWeljOAXfSS!0EJJZr7V|Q%q^6y;0|5GGqp@jfr=?tAg7S1_{KtIT!hXED36CR=89d6m@|Y87MddD`=KMgq?vKEUFiu@3a0 zd}Gzow2j$@9ekJfKv0oEWKQ6;)UezrVijJR6FlO6|KkxSy~Emf`hBR~8O?U_W7CR& zf%_KkC1*T61p$SRa~84JDVAt&FCAgBKJwkH(NXit+k}A2R*ngC4I6b)fxH zrpt8gqJ>Td(Pf~&t7+Q`woErGddH%Y=;3jHte$Dx&sL6Dz;Rh;e7WVB!GyXcFptYTYNQRcW9>ln|NoiT|qs+heoCu1gx{o0?z z=u1tQIqgvgJN|A(vv=H>x893l0e~ELzRcQea?#CNPS!fsg~cyTA6735D37g+T`z59 zem^+T#Pm6TAQtIZ61$!uHLtF%Nlf-eYmdgBjAfUxBmO1cja~2FGww<38}H?|-(&fD zCUNyu3*r+CQp?NZVoU4v)8l&>lHRIdeY1xKJG{d64=6zE;P`i83**e{r15Lx0pPZ| zyH8A@bHMh39Y(GR7YjNZR}^e42v3Mi_$2t0Ak+Md1rk3byh|wPoaqv29F(}yx1y}G z$}e%NOJmvdMAHd6NpTZ${jHO}g{CE~OUibBlJp?S)A)DNaH@Y|)KY=p^?{|%(~?&e z9d}8tV42QJiH+|!9npR^IYPgsY<~Fzv%AT^&9<4XDz7e|Ykt%8S8`y*`jp(1=Pqm6 zOn1R`FeSh4ddkwuxY_Mq<1HLr_n5Y3T#4bO%Japk20~q*^blR`o2gmZPLnPquN?6u z6-zY=ACVR?*(B{!?S!@qq^n7cpYK)`jqRk6Dx0ANjQwb_`AuNjiIUm3qMmMs30!77@p z($74qrTY2EeN%|LiF>BJ=*mpy+dL~b^Pr}pq9)U4z@=k#=C1Tiuc(-kX+NuqtZ((< z+VeUbvf2yy=DBXCz1&=S6L-w1uJ>W#%zdyuOTzA9#|l@}J&jtDl9xTJ%`bb^&ubMe z*r98PWIMVw&pSGIW>#9ZWqxO_xz@>?Dzlq8`gUcxkJ)DK zi<#Pu(&UdB0CZHk*i8Vte5NKo&3l$7QWdzn=Owy_=bKi}<;>)?=a=Va<~Q2!%=clt zhF;CT<1^3vNj{bTI1_L(FZki`(;=W>g~LjRP$rzVu;6)gZ2_ZfuykNO_j(L>jTv}Ipan8<)`|dk+k}P~&r^lsuJT3jhdQj?G);a4_NOWmR znYE<8?10zFfP-EueD&A>`>f|Pmp7h{`mM+>hrU@oO|C_=%a@kFDIa85TH*@b$|iQZ zPff+5_PWZ`(ac~9*tJb(Mx}+U*gA>#h5E>fzLZe=pOyNqSfxE{s6sQr%O^hY<3K=V zT4jV+U1e3DhZj*9;CsEY&nMp3&F7={4sUPY$I%JCiM~F*53@t78mm@UwNzcGx>T#8>?@O;8)q~Z1zXgnYU!KBY*1u?4zJFwO&cIPWo9dK+Ih+}sebp~J4^|n}_^Bq= zj8FDg&8qPo7?EIC<5N?|K3EiAlUNha3b5BN@~V{70KkIUnIQ}0??QCtbD2b}%eCuj z-SzBB&epQ)NZ%Kxvp6#gPP*)7ce8)9f7DK_(>O0)K%Fs!-Q#UBv2g11= zZXvgVD~Z(gHVId9(-{fu4z4!Oh8N9?WW1I<-nZOd-iMZ-uH%ahikI?M@^;og|5kzyU~=~hQ7Oj>u||MraE8)*i+nCSr(`^GHHiUTYKMKh`r z@Y!FKyUoUZc+NLVoqW^?>^3&vby>@yW>Tunt|M0D@ifWgC-*M3-||@-H|~mINPv&g zm>WKuJn}397MP8eN?gpBC|;ncquX?}@AbaDFKa)%Y$GGh2d!oY0YO9)b@Oc7*IVHu zsa@vAW0><|uBH|90Q{eG{huHId+EPpGyo_C6`9_R550c;LH=yIHr!MC>BZrFn<~sc zJXp=zwUd9g|MtpVoC;)bOL*bGxKBvVzsi`~F8>bC3vBsyFn^Vw8?z|P>H$60&WwHjIjbu&v-HK|KF#mHFn$RX68{>ZZ*M(S)=L#O9PAmUoBv?&&S7+E=ixh z?QH&5*z5yDMfjd+15&c?Xf4o+T55X2=X1sQ4OVU^j*K3qHwGL$ff!8z%cS#HX$6fh ze|G-Fk$$7;rp8#!*J1;+`O8|qih$88#`PN6TyW!PeKZ|6)<(OLYpdB-=2jL~Q>><1jUFB|Ji&gVy~nWEaO!aS@M!xn_G9hG+3VTs z+Z)&$+8f!Aw;wlbJYy?x8~6cC9nP9DW!Q4KU`FBpV(&hKo65s9(E}Da=ZpgundX>i za?Wt1qhLu^KFZ0GB}=ko$yT%_S+*nx*~aA1P0rBd9GcwZoO6y%#=0-e?o`dp&fc26 zwI6QP@U062g^TXS|L1w$UsAWE!ATQ(jO($!*1&{832})JV}#h^T1#rxjm5?4#TvyD zVt>@C*Q0Ka6T~yb)5N31Bg93-g~Uz7jl|u=UBr=EfA_6LtYxfKtaYqS ztZl4atbMFQtYhroetcpqDb_uf93#eh#Cpbh#d^p3#QMhi#rns7iw%e&v6R@r*r3?p z*pS%J*sxe?YBxEu&1(}LWL#87$keSFVWHy2#B!rA$2nC@cG=z?% zAq<3xu#j|wjc^by!bA9o01+ZMB0|K71m1vTh#XNMN<@XI5e<@oXb~NfiRcjnl7$!% z6JkcP5es5Pau6G0M{w7ve@dh!@F63J@RSM*>J85=4rSVkCrwkqAR4w z0u~{QktN7dWEt{1@&~dUS%IuXRw1jAHON|I9kL$TfNVrIA)Apc$X4V}WE-*_*@5hY z^nEw72ic44L-r#Fkb}q}Ki_}lpIZ=G5?T>j6WS2k5^59L5!w?v5IPb%5jqpP5V{h& z5#k96ghWCTp*ta&KqT}a^d$5m^d|Hn^dEb3^R|@Wmh#4;>QqS+Mf2vF6JsFE6nRqq(Z&ptJhf3VO$6j3B4qssV z)8}Vz;WZRhNTne8WNJgFU zn`Mptwf&%@nMdTSQ%DE>VkEU3U54IfY;^1ozs|0OzGW`XNDU9P_Hq}x2Fx9g>8QJD zcWHOhzOmZ!-&H=>JaF{);&8iiVf0%?W7>Oa0sD+@ciw@}lc<);nYY8*5-rXC?(C7# zA&o%qs_Nv0iwCR~RH^1hR;YYJS_GXLeitRVn^fJUZlxpaOn!p&y_6tNRlisF*08hw zGNIPNm2X{apCItEWTR%4@wn-5)$fAZg@K}t<;99M)O@TJ{Pk`UYcu;Jua{tj=!0ml zx{bD2*Cvydc`h$FXBYiIc@?rle1NL#A%WX^(^!&DVwSg^v)nz-&Zt7+fiSkbTG^o zkqo4Tl-JaTY0X*t)3>tkvd>xy;E2Ii*(}v2wMuuuP-NL-8D%ZV+2FeFBDl@&*Y5Lf zX~D)o@3iH?MWt{~I4J8DA}xu9urNI^8T|zR74`=K4}l2=LOp;9G1Dr5$S%Z4s3-o&vhX2VUpYEeQ?UuB+htGwx4zE)3}N7o5n;3GBl zGtOqPJuUo`LT%9Db3al$D@G_i6@BC!?4Wg{=SC!0%1-MwFG*h4o#I($Yd|^ddX$xm zx=9PMbL>WxE0o8SZ`?WbKFmkJ1Xi-Iv-La+Z;2pYC=%`wUKin#vofmut9*=#s$!^< zv=!RP+Vxo-V20b+9J4OUNwht+56yc6BYCN7zI&4&T4g~`7%MArud>;k zQPmFAPOgQOvgWb};12wSc!X-9K4P3|S&{dxU{^p_xLS1Fy|AJoEsnN|PLng0xY zlkPkp-#@(YZ0Kwxp5`dwqC?3KsUxVD^-;r9^CQ=~;v;a|>|B)@>m=mmucuqBCh~dq z)UY=?5WTK^9Kkr#IF+IW)>3s=g)p?z8p!lTUAFdeT`ne9m%gKPrmga}4lK<(ReU!r zrTTp1EeA+@$So<8Fe<%2^gfh5Yxe*k1wL~kOAa9)^HIBA^v(2(s z*=Ode@|wA4xKHIb083a}_@lT}WoX_u*Xhuc>HA~{>C&^D+4I5vH5Y~>JoH%gQq)FTN9ss!N1l$Uu?3L0 z)WC7F`_z`SW;8z?V=QLaSu{469b!M^KIVSnX7k*g=)BJGQ#^4tAb>gT1XI&an<|;nJKTXCD{E zJeAF_Kv_Hv48Sx|LN`SLTeI$EK{Je4;j>U1Wa3?`V}lerWsr znb=6m4BC&haaMCgFz?*_Nrr}6owKF!2mhL2`JC(eT_Le5uJSngCQZ!P!%RSU7x(bC(#pXzB}(H8P* zauHTbc8tbj?qscJEBLeVMWWGigQ`J>U3WC%(6@zKrW#AJ1hL<6@AowfRY%2?UJNog zs*8e~u?dVm(GkpmVzXl-tr_<_qdfcTJPv6FrI0p8+)LU<_K(bCP0LBL4=Clx*HjPB z=|~M=!zj1eAMv%~N8Oe-%;<=uctj{o?sng#aLwa zY)&0sU*2VYS3!Nz011b^PO@9_L^4l?t8!G$)sqd=E&DB>Z5{1>?TbMLe(#L>c_3>` zL709OI0{{e=t*Q_mMJghZF$Vi9dz!GeXo56Rbpiz7G5XzE7mWL}M_8IT205NN z%RKW#WOd&B=6q&h-+5oHpNosw&!chZTk0o93hP+YIV=vsKZI3s*Ce`PqH{FO3-LzUTdL-e!2 zE9zVN&~<>F!v7)|iC&`m*fa4ISwQtvf5-5fRpne#M2SSJT49%|N?MHV6Af3cw%4k- zmUfydr&X|1IJda3NVSDE_(07*%L&T_`|R9vc`u#uuCQl{Pwx*GW)%+zrG#fj)R806 zxsiP+i8L7-Pc7v)!!O9js~cyC3_WsBMcgG9s2y2**=IFe%Wt{Wwp`|d%&!>L_a=0l z^o>K5t~TGdc5%h3j$kYKFCI>=<+7x;Uy?vUTwTCq- zeG7DinsZumI&nO_0PhY@$Irt(_;oxeGK=afgX}0POMv-+JoCx0*NhnQ8`@pEF ze2M?Gre9{#a?K9C>yiInA}Fr)j=`nr~Kj-+4U(`@nxb3;!3=Kz-`@^vpVoBYO*(R zO)t2LPQxZr1=J<%I-GW#o7^7wRD8X-wS2Jrn0m6Nz2>|O61Ply<_SrjbxqDS`;6S> zc~)m<*KpVG-i*NM;6Fv#py90n{m5Ouwt6KX&6Hza3q5y}^R#VN8pd4CSt+WK z{*b0CuIfX^Vq-H)n&q~o!j@p4WUrq;I509Q3@0pjOg2(ZQtwe4l2_7SF-fe^`a-st z)0KK(@Ls?XxkY0|J4L6&PbFWZ_qnGOKNM}X4>SGfQ{!h-sr9z)jD4y3bl@0Sg6(G}yO zhoGlLCDmiruv<7ba0c+a`~<;F;b1%_eWSjWaoUmRyx=PIZ41^4{}v8Jug||;eII>8 z?aAtwQxut1`DNbCjGj6wbrRjnX5#bl^Y~)*Yi&|i`#@^her-)4)%_Uzoc|2mzbPbI zczsC8N^$<+KC|u(U6T(Be%4Q$J6gFuFdSu4>!nE~g9#Bn0OJiso!pLUcvhQ9_V0f%ynYMAk8AesEoj4(R$B?7Bdp#EU&WX;I?;OtN^ zIlQUrSPsYd#fs$|^0Wv)wUk(gy6>isGxtZ|p;hcV{N4B{>08+owJP828yP?&?@Ru! zYOm~}>tlHAVtCdCYok-J6IdbrA9g$5AfA)ghChU_;&0}^6SfpBmUfk8Xes&;#u4T$ z$8cx1(^RlGa3kO?dS5iB#9Usd^5NXW6o%-7sjbYXK4mxta z-PkFd&a5NfEc&LHNb0A(BHJ12;oWSfm5x>Dv`WS&W5hzdwC((*;@P2>p-Cm#n2On(MdwuTzG*_aU5vYe z_lnt-+jU(@dsQ2>i1Aw##ahsYFd59%ECHv4Um@Yh5;Jm4x7{y;ZNfjovrCC^`Fthi zcV0Xri`7cdikyzE#!g|iC@ttTCX>m4L(d2$pY15KvtR-PDHXD70Re75g zGIdtb%u-yjxr&f$HZrIJ#qHwzQLZ|Oj^ec7KMmEPo(=jPmi!p+p~QenFc)hq=T*fw zG{8E=FACh%u7{iXk6}^T*RUZoi}w#d2hPPF%&fFcM8ET9qBj_e(^d!$YMa`BI0iZQ zIyZ(Y{7dsca0B=nae?0~yQdzb8JaaXdrG=7Z;fkp)qB!G-c7tj#E@uZ2Til|CmlN+ zy+gAjP03#vCFx$dK_Slkq5m`Zj(I1jjCKs?6+Ey!Huv@nAW10c)KmBw*>SbqR0jCU zP?yL1Jaki$r`F^36GtR@vN38@Jx#w(|I$D+j?eAq zdg@&fJd|@JqR4Co7s^MA6jCJ|hWR^tqW4vPA1X%eL36VTcsBkR0#=Ny+c19Hy?Ly_Y4o$bIyH zC3u^6Ms(PC&T`q+3sz!tJumb7&)rW`^MewOrmgM|Fyu#8k|ZCqnxa?GJB+?8Y5Ga_ zax9KFm)A~Ug7fu%NHU}unxXn0Ib`P?XO)XnxDZ77tfDT_Nzt{WRbr{Cn@i|XdY^=A zbEt~tl~xbK+nl7Otl&@PHI;=d7H55bzW*Ohdp;fCraprz^d;5xC?jcEX`7f!g&BCK z%nX*_cFWe;{w7)=;W;{@Mv@U5#}x4lyhi*(vL5JYufLofHP~ubek1Q^rkVc8CQ_D} zTYB?zNaTjBsrsWiPwYEF7ececdr8MAz0+&+*HqjjwRb%$iDS@33RPd@4PTqk1nX@| zDU&X2gs+vJQjN7eb$2I!Ce@`*p`|dVv*xkiKrgB{z%CnbnP`k;yQHz~qWZ4-i*8_c z-`rb{O|B-M7oIu=t^9ogGlIW|&z1bBd_Pw`kAyBli^yq|8I0wu(Y!1CtNgL}W@%^H z-?DS^5-ica(y`oGt1uYc8_X>k7)_w{t87obNaj$NGdGF{%Vc_~d5ZN>9Wvx-$@ zYpG5!U!^3m5;OVw>*k&n!g-AqdFdU}mtjq5c3Ly(ZPTYvWyy!R&sbdj;f&eXG5KEm z3p-x1V}7V^TE%N2*Sw@}~`UJ^9~%So0I)C@HYGo%>FmW#g1z>IJN#M(pAqJp+8AIc&% zL;ZoVp%#^E$TnV-wVl0cR_>RBj~82yy-a16C!7R#v0 zF0tiDZ&-^KY$as~4@)@t`RH2o8aWX?iM?W1Nu2Usns~zm(;9OZ>tmbNbuM5FpNSZw z9V)9SXYGB-mDCE^3C-V`gR-7D`-GN68FLrR7i5iRR%r&3Y3@#)8T1b%y&O;M&$YMN_t{dfJ#oZ60D$IOD$VGUL~-(x9s z3@p7A=uZyPG6PE0pRO82jEZc!^Z(8}d=q{Pv@O~P zO+kmEqtLPF1T+n0qVVX2s)7Num8&Vg7@dzUKo_Em(e>yibSruqJ%t`ZkDv$8%jhli zC3+kEgf=9llMoV{#3eCEQ%NS0j}#(Rl5$82l8zK1X-WB{D5(Q>fpnX6nskM9ophFT zjr2F^0qm+fk$aKbko%B{rRTa&K}!at-MEtw6 zV`q~cHE5*vOD7F;)9b1g8z*b_b zuua$oY$LWF+k)-J4r9l#>)0KPp8phkiM_%8!D=u9r7@)qr5U9sr4Qvd${@;c3W+j_ zGLtfk!lTeAN=g<*K`~JBD0WH_w0TP?nG_!-LRk(D`fAD=$}VX6?x&ohT&CQj+^0OE zysW%Kc~AL=@|p65QjgkznrduHZ9#2LZA`+i1&Zt7(U5 zf6?N_vyw6C;#wBht|bP;_P{Ws93Cxb@Kpr?XG z%>`-NL0?A;fI*!D3UwuYK7B3S1~&BydI5bo{Tlr+nAC^pH$a-cKyQ=wiQW#3=^ymY zX+z!Ic)L4M)6S-y2YLEc+LyFijGyXsLq-EeV@6X(Yeok~JH}CZ7e)`pFh(i^WsGEu zV~`jM#y*CRA%xw%pHa$K$k@bK%2>%*&p609!nnXV$2iYu#Jt3~&uGl-$n470GZUFO zb0$;A+^3>2M>8!c{F2m%LJ0V_4^`bQYO~{!-_0mVhN=saXnEHp|JH z$KsJzk(RJFvW~Exux^0od6jjIMNE$e=dufkmz_YmRHsi*ACf*4q)TP`ob+8_T%Jfj zkiHvq%gbO}J_X_OYx=kJChW$)e9OPn`>=QIa4?&hsq&yNT8CcIT;)rHE}GQAc)vyoNCr8&Q8u&&MuI!4|Dc#j&iO- z&-w@FE9WIA$bH6X%x%kU&*gCALBnpzZNp9CcH(w~_Vp|-g^PlQt>zlICT;=O10wcf z5V2Qt*K&7r!`wf)yST@=r?}_2=eU=+*SXKR@3{YP8}mAWH{O!hnKzX8)9Rkc8^=R= z3^2R-Je()yP30MRCLVOR=jVa+yq|Y~_ZRPP-WlE{-hJLp-Zjvk@A2+}`23Xjg7=p9 zp7#b?-Jf|iyf}VCK9|qoH|DqGxBDfort-)0r}1a;QT_lv#!myCmBknGHT<7ew;QzX zJbr-h<3~X4o)2PoC4b>BvwIg9(%-<2{>*>N2M<{g<0lFPf{ua~phh{vh}&h!Z9XzX{$7nu60jRX9f2OV|^% z=CQ(2!hXWR!qLJB!bw7d&@5!aLL^2knq2X!PX#Zo`mhm{~7?w4+t9s~bym-LWyoAi?On)Hrz z>@NxNjr1SsCuwt8J6Shb4_QxHYQ=A|v9h7E@v>CeFxeEaMFldJOe7P_{4$+v1!$s5 zSrA;&-(`zrUfG|r*RsvB7odqgm2Hzfklm8q1W)vpY`g5V?1=2KY_77KdCDHvHYTZsG^mEptvl*FDHUKdPn|R-b>L((Mi!=k*GK#wh5BTRByUD_KgZa+)$#nFP9L5xAcoUYMs%(`-m8Wum z6Y2&fv{F^Bny*?4PUtGtI@Lzi2Gu6jpQ>G|BdQas^Qzmb7pmtVJHA%!ncqO&THQw7 z87#-{YMz>|#??x-OI@U%qjsvR)wv+68`LGBtRDhR{k(cNi0X&cH`QywQs1NAp}r2D z`c?Hab-ZS_Zzw3_qcy4Ekq-fXycG!LBCyBBnskjt!_pLM$~6zPUd*xH0LyjHD5JXH4iiwHP1A!H2uLM|1G0qh8`^PF&W)KCGVSI%9x!I z4;Q#bfKr~E5zFYBk(%MkSP3@yG4ROGgG;_H;}GcO7cwq_OTO%vOg>)QQaelcBI9#L zO~zR5yNnd=>x_ZglNn>Qt+k&r^1&p});hHgtyas{zSnBBE^x}3+G1@)dq6AIZqhE% zF48X5Zr2{suGHSvUevzOUeI3Az6JaHy0)RNvF@w3rLMNFxvoarS=ULIrR}Qgpo`Zb zx-q(eI!q_hRq0A}>vV5*t99FSn{;bo%W*JRNhUqh2ihW@IV&?Q zQ=J*g{Fdp-EC-Wub>`*F?U{Qr&t)D1oAERVjW0lG{3r7}D2=hqrur88&R{k6*Y^W; zbD(~tew2PJEIp>^XX=^yEHFu3dMDhiDbYvui}io#x9d0R59*KT59v?q&*|^!@96L9 zzvw^do0Hoc8X3A6+8MeU1{($#dK-v_F$UB?GF+L@FenXjgU*m`FdA|UR?wC=8BQB^ z8uo#@{0g+?#aVT;wBRg%HVn)f0XFg2EI#Yc zLe^Wuoviy=kF%a;y~=u*^(O0W*5|BGSp;L8v97U+v6-={u`O7x-Hbhq{fwiGlZ-#N zBGbYAn_^5iDvV;-ji}(-L$=Xm%r_Povy1_w-w1mM;~bF8mVsn;04%eE#x2I(#%;!3 z;F+B=9ygvdUIoqUmazr|v+u^*rZ>i~#&^c|#>S?mriP|?Q%}&p62SmVHVp;=Y@}(b zX_{#Y$X`s8(1e@hCWT39l9|*dokpX(|pu?%6!>;&3war-~7t_-P|O*Rdx?cLUtd^ z1gR^=?kW(031&~c%F1nJ*}2JkUZys#SfiNdSFI%07I%<&XAm5 zIYV>EIaUy+X6G~pJ?f_+CCSOkk>%(>k5YpqH7}IfK8Pjgz@+Snd}Bqgxbvemb>w!P2!l+)hU)b=r_yR9c^O!2m1w!XH}wpq4` zHkyqDE)&_twDE0XTbfN`Q`?MiBQb1SYFh)c(kj~`+j83`5SBK;U8L=xEbRkb>45Eu z?VRm~?VfF9^=sQZ+Xvf6+gDpXdmVchds}-W`(*n}`)~Gv_ObRM_9=FQJ(>E z!%nh`?4@?Uoeh`y?DlH=4v?6Z*tgh^!gan?_R+bE?d$9}?Az=M?Cb4U>^JQX?6>US z?C4TQtp&oY3|6}A-ThIC*}^z9hf^JSC-4oP0Quw zvU1aNExF2EMXoJ3C)b^O(QeOO8_LV|<@$5s!7VqOTbf&z8_g{S1!`_?YTgJizGmgk z&O`Iac~~AjkCw*-aZ`{d&r{^d^0M<>dHH!zbjd5o3+9#O&Ci>cHz#iiSX3MHw&$(Q z+nTp6FSGD)-jTfHpiN!)AKKLWyf1k-D(g82VA{2EbZ~S8?Jg0FyRnXujv0d>|9n4jaBj4e5RD&x#2Rz|0wi+DahoA`G1WWju;}YDFdJT?n zXJ>C`cjo}-OlM(qigOIO!I<;ABh#7TlsKhe2)ms=XTa$IGkBr1+_}`b#L0J-I#)Q? zJGVIZIJY@>JNG#cICnUYIZrvyI2jG4BrV8Si%Q-`za^mltpo=!2=?DP(EpBr z{dX+CyKhzgh5TC}|Gfa~?>gMLX;o0Kpg}>cg1CZj`7J^FYg+InzY!RJV?p>E3F=>O zQ2zu4{DN%o{m_D}0!D!xoIlnt=TBKs*SEM}0XTr`K*QTzu&?0nf=kg=1&2V#J6dq0 zU}wSWf_LEJHTN|I74Kugr-Ej_a|JDZ?R;&0Ng(8<_=bX#*V8xBH_11_NAaEK@gYTg4knaM>ZdZLbeP4Waz>WL> za^yQ7!QTYT$kzUe{%(GvzYo}vU|PTy&;-;0Jm3wu0-nI~z@k7Tuq^Oc<`j+yjt`CvP7BToqCs+y9>jy{ zV0O?7I+`DJv_P;ZSPVj%4V<*D_?qC#;O^kY;HKc(;MU+@!TrGp!TZ5$!5_h|!EZrA zQQe|OMfHoC7d0(vQG_mNU34ngrl{R7;V!etP~2h&Xt zs++U808}?mabav`8-B8<5t5C;K>rj_ae5iA%NvKC?P-tXmcxZBHLTF5AR%m)?N(c=xLYxpD5{1N} zxuFH2C80H;b)i2)*Fq0N_d?G?uR{gl523o@FQKoYmf^PH#BkGaw{W{~<8aUL(D0D( z@bKplDNGJyVM@4JczSqDI4#T!^TP75Hf#znkLts&us!SyJHmmmKO71#53dVv2yY4B z3qJ@?!>@Q{D4AR`y+m9xt)#qUY{|?La>?A1 zXh~^FbxC^3nUd`#f0jHgXAeN$JDVE2RfZZkuRJl@EBae>Ky*TMR+JQ- z88t^aQC^fA@qno2!quZj}LApB;Jr+F`o$bF7 zZCY_L`Xx%Js8`XTqBU4~Ei0N=w5#X{!hIhw^M-?AKL+gjDd5&m0J)x1A^2s}%PY=N zRB%tw0=AnC{CO+b^M${}`MIF7{|?IACQ#lEg17z`Nb5H$URID6ysNlv{aEo&MRU;5 zQ$Yz&1Ot3DNZ%y5bgQb={xYuNh$)EH>%g(z3QF}sP^zDTI~@lGb1P7nJAkj;6{O{4 zke2&_3is1Zo&ZYnWU!HE{?d_WffQF!Rav#{mkPWK+}`Wp_g(?p_a!L3Z$aq&1~PAL zFl+11xmVQ@4BBB}%1!`bb|Of#bns-^;K%YoT$O;hssLwI37+bDFzWV!mU?#1dC*dC z&AI(cNBtLz?iFaLZ^1(SHm4r=pq;>@O9GLO_^%@9Z`A{;2Ubr4(~bn59SWiy6U@$1 z&@Cgs?8={7<#LcH_kl3E9kj_azii3-U@$%aU-EACh`I0J2HBhHwjd@p1UIn>sEQ3h zP;4}p03KpzP!f|tOhiBn90+FM81MiEApe=b_InJH-J7`$!KHf+v%-2{%k===tNXlo zP}kbdYXy4TSa9T~fW7w9R1<@^Mh0a~4-#4dXkc3K(&m5>wgN1$Lm+4)DIt zfc&LvGK2vc~rp)i?eQERdBc9|7YJ|tNnAa-7MBKp>wR$ue~->^K*Yq zuEA<3HGkHk)zE9wY8W+~SnGt8nl=f8YO;ufYe+ReciBT~hSvPtWDl!Jt@*jp9$qt| z=I0)JWX-6WpBwGbHDhXi?zP9(jH~&%*B)Opq2}jidt%L`8dgnu4ZDU@!>!@f{Lig- zSEL&Nuo3n zD&2{S#6%5e2LM5G7O6X}KYM#fVoK??VOvIYM?*?<36_s9R$y>Z=?dMWi&8l*H# zX_V49rAbQDlx8W-Q(C07Olg(UI;BlY+mv=G?Nd6WbWG`#(mAC|O4pQbDe)-@DTygb zDcw_&Q-~=&QhKKJO6i?)wa&3R$Lk!ebD~bId$0sKyuTJ>?@Lvd& zh1N#n(1|D)%}0ai4s zd1x{8Q^sL&l&_SD)MKy)h-W-!wq^BX8K66kv8n9O?B9gfaKhjV;r1TBX}LK)=f`_X;qK}gV#LXv(QJppO@L~J}J z!!(dbtFa2$r_`g=r!=5^gC#H^7JDGa)Y-ASsYkNR~n?=b_|{w1KRbEJ20C>+lKH1=V%v zxetb=Rv-OPcrl!+XX)9n(2K8QY1h))v3_Hvu(DVt79A4DQLyTH4C{nC zyf|nz*?6`29r*2`!PN+-;xwFsW6%?RB>7F&Pu54)SJq!9Qk_z@RJTy4s>iFxsmG|- z={*L&;YQXY*h|G5dl`Eh5o3yRpmC6~&}cWUHSIOMGF>;{Gru>FfVEnd^@Oz>8jn&? zDyl%M(52|_unCMoyP+eg5veVyDX9giC8;^-9r_XdhQ2}Hqd(Bvq{gJ_*i>u^HVu?uQd&VLpdIu@8o<&hfvTfsQXA5m)0)s4(OS@I(;CwVw3f7{v<9$&YDTL^ zYX!@~TWL4b1~CvuFGhbxKgIw?Z$?K}C)O~?gFTS^x>ot>*~Hb^74OP$>EU%ll|Rlm%O`?wE46TM4wBfXo zv{dLg41tzI0<;~*(NbuAY4Ol~c$W4i?Lpe}v>6Nxa{t+ksn7rz!yrQgU?k%;a}1>H zC9LJF`K%?ZDpnLS_NA;+R)DpP#btATrQ<<%A?$(ramGQuJ(e?u6aJNE2RYBVO?V+* z0c;IELH8$~FXrQr_*(c^_$T?7`KKV)eGcjFGkz<)CF~F?Ah~qlCde^0cmNX3TxdEP zAkoy~m3RhT4#{Q&&xgI^NJs zSWMOm3=2+yugd2^PhVHGsc2R4ao8cO4;=^{2^|g{3f<*K!i&Qz!@q}@gr`T2L{3Hq z!dF8n(LT|B(H_t_GN6TMH8hQWZuPXV-YtTCX&y;Us)klpKFJL|A|tel6woP(l5(M4 z6d_qjG-&hCF$HGEOjtJbbAG@oDwQ&fGJ=9Y7iTDCJY^4MCxt?#QK`@lEt*>pbf?YaMG7>m2Ja^!>za z5xast2ezGa*;VXnb|={MO@>~?pPWUUZJZUHRh$i+O`HzUPH4w#3+;rKylhyQRX~%p zoEPPNgiTyeXcaj4PUsETpc!z7|AYSzKgMq^Xd@UP7$RsV=mX7wL_tsJ3G{`QKs&rG zJ{BK?kHhmI_dbaq!uLW7z8yaY%a>Kq%D;f0!2iORY(3w{mX09&?8 zkfI;KSL3JgG0WHZ7yvqZ6vKPZ7ORl(?G^sF3Xh7l^JAa zS%GYxtU{)R1UL_J;9OZ)W|Ym5mHtYD^|J0NsY(W^WNXM5@2j7{&Uu6Sr23(H6Xb|{ z)VtL8)T`7NAywQ4>Ed?ven=I6ri*XYd(}IkO?q8@T)i6d#T)8N>O1OkT`?qS0m#$( zLFSdJpQe}T75aUUW?j`^*8i`+enLd~vntzzbK-MuH zvW&khR>&v5T5D`Hc(pFfs|&s9Wsb$L%v{tmqy@&A5+tjtmHP2P$TI1Rc zueRG=8{pk`nQNVEn`?<{y=#x_PZtVrl3gis--+VW(5F2c+5%ns4dJcfqmi=_bGf6u zOLSaxY;+_ve<#4#^G~9b3SPwmbPe0;zpucs1wwtz+w*OauYaR5rHqds`R?z;S?WM)iT#Q0!IxT^n<7viP zXg$$c7>fkmBQms$uE19AKJ<*@(jT&(LC@$Btm(h86znC?1zO5p%wEQ>4ecHhXEtXh zG$h>%>TXu836Xe8v>(7V@k z*8HO$uIUNcc5lsZkZw1Fq$Q-)!s-sP)jti#At~Kx zI0b3xCPODAR^uqOXAXyIt;Xz3UV$;d;JtB$*&`<&~t>#eIcBoP7A4O^4?<5u$HS+?`@;ueJ2)e9D&i_9!yDMF=`GFnwt#e}YD6LM2>ck*je4`_#eCiRAeZawlSay+>cxfQu3 z`75awG)H@p-$G}!F}WkT1NlAa6{$aTNoz=b$qiwVycAmoJvXk@Y} zTuL9{O zgTOcd9l#UN1#HM{&SJ2b(9UZK&F?PIz3ZIbI{gJJA-#Kg?{s2%6X@Zo*i6oP_9pgL z_Gb24_7*lYs@Vh%#-T$K?jVi$;jZ zqS2y0qQ0VD&~KV0nkJbpse+!+VaWt&0g+&*Op(r#PKG|vXIS92m9>X2a8u>epo2q| z50_7n)1Zs9N#0&QOODA$%Rk7-(99VnpD6DmpDb@J?;sy952*amAnC5|sqUrj0d0{t zY9%x}e3}A{RFkbSL&Kv~qt^H}8PNC0f*o{F6M)`_S0mAsX^!iz=pI1l;V<18-Eh4H zI?-SBKlGoWeUNW>X=s@B%5cZ<4Eh8#BiG1*q@QKnV?1m;YTO8E{%*+ew?nSK%~%L2 z`*BFuPn&w1|P;<+RN4+Y;^V?TsOyO|U1~+t^z`R@(^@+otx8_RjVZ zkaw2l9nRaIR}Zqvwvbk~cZ`Ot@-3``-@u)KmvBM7p7XQgy`zEihoccBjjtUqoiCiV zAQ|ifxnM6>Kgj#~x+!iN?Cet^$(!!}neCAv+Z*m43W?r0$oP7>C%S3*0Z6t=^Or)x z6@;8?ZobZEhRnzXd6C7}0rHt~8R49; zGd>MpDEdRBho<3T(K6T`%S4r;m7+}13TPX;Mb*$YTrPrBG@>feBGGD53A7L$q8w-- z%0*TY7g~Urge74}_;eC<0qGK&WUi!IvRZORass-4rzKozI`sPFQYke2zDO@qI?J5$ zb@IjXQs~O%$xX2N{{yZPtcM2OA!yU(%e}uEb<3e$w@B`i4^-`d2G|Q#Uv;r&m*ze6 zt~P4!LJ#YK<{PxI>Si3(oY1V(oYky_cGef@X?@cC1O2RXnoiK%x&~KpuRurZEwr?r zK!fX$W+k+^_ClZQ3$&UVL9gkxZnWMG?V~P+&d@7rZRltyFx1Uzlogj%FRRATH0w^* zM`-;5N?HvDqs5_@N#~Ntewr!i${JN`c+m#ez+gY(wF)L10 zP;A?_ZQHiZGrP~T@6O%6IzJ%mGEcJR7~=(EC#1Ou{LBr_Fzs0FXzftoWJYR5x=Fe$ z9Y@dCOY}d$=tr?wz{s{MOv&hrPEATCG(bI52dmMcbeG+{YeHQf<2@~ps z5=_!Mb$6wHbQIt6w` zCEmMi1(*wJa2Iap+{(EDp5UTfBk=o6fY|qA%7EUtV@fg0F@?bH7XZV*0a*SD%vj8N zOfhB$W+i4F5dJ>Q3Sj>0lE<3 z55SYX$GXgV#d;0o??={i)(ut;`#S3zi^}10m>d=-%878+fW;>Rhh4)(xKXZwo6ps9 ze{=tEJM!A|z5{7`mw%Oigt7D)<5Y zo{`XoNr@7`UcM1!iu;NCi2DJxm?!Nl9Vq2VXG%Lsy8+$UR!V{jL536y)MH<1cPSzr z46NfwX&;~++XMf2Lh1t+GE2&mP6i@U08C_Sc^haJ^p|&*50y*h?c}o+PK8x56zILd z!0?S!^;Qj1bpbYShzh(>RXzWN3*ao5yWfs4DVHUiW3mt@EUqJ&w4BeLchFXTYhU$h&z;Pvk?UU9OEn_%9w4eFfB4s z&1hhmFi_5r0@cJYbInY%3@9fj<|5EPb%E<~SuwUe;BvBUS+)_jziiHIpmS8VCiZsr zXTYj_1wQ4j8L21Gmz^S9=s|nZa!lH0dK~c0QUKA?|75NjPL_xxw za3$Pu^Hn6AiG7JfiGzv5i6e<+a~Sgoa}3i2JQylI`>&CWC*t$)BK&N;1V0m;mK?BJc+mAaMm!1j z$S!b5Hh@90o+zLol-W?HItFG$6L1<<(pJ;jFd8zZGdeOp(AzS;(JOhCc6}h}Td-RKRWAmreJv*mJh}(C z^u#|ZJ;<%fD-=isL?E{5Kx?ak(Y6a_3i!ZoQw6yKy}%8;HcLPfOcN1JSlBT(QuB0U8BZ%VpTx?Q>gN?i$QDUiVzffZgN-6+k1UYbD8md}9Z2N{@K zF0i*8IaNMY&Ibl}q#{Q#OgTk44memuMFg&urlJDXIu6)Y60ofr;7=<7MOs<&Ui}VO z(m(2tK$G6q+|iJt+Ts*CF^y1%@o zP2b14OW)4W2AIlbKvi}yR2b$Ow8p(gIZ*=lbQq2}yt!k^(cN_Z$6V{d_+Y7#&~Wc;E=|I0u2v zIS|+%I25=COwH5a8)zkb2g2rI@NsZ`sA{-&_%HoZ5ndmj6`2{yjX;egGCG0*MrB$A z70HUs0aj&fWJ07#v?Y{i0LKTSWEe0b!((G&<6~oEBp^oEvFzAjU`XZwNiqT$5@IY> zlu8r=V{z&qPjNi)CGi7Tiq8pbQkB#s8>Jejnx^WcngH+cGxag`EA>0IA-yiWF@3N2 zS*g9eK}FMwzh#ft;2K^A+weO268a4K9QqylH2N8unf)Q#kz>ypkh?1PH>MJ{Ew&~0 z7kF$nu(h%Eu?_zDY#p(+u#K>vF@N1NH9jBjz{m0af7X~8Ot5Z*E`$t%5InDY|17O5 z;Ave2XG;x^)f4FJJp$kADHvA|Nq0!~$xN~nyeATsMaBFxm@a^&REIW|CZlbmO#@qJ z78p8AD9F*k(ZPe2)0^pG4P*xsvupEKhihQG8%vkM+PEYk><#MNI%4j^hRDvD+6_ZQu+m4;xu$v{6M$I64XcCHo&IE>lT>0*|VD0Fzx8sO+XdPe0H&wV?cK8QOii3f&Uj2Hi5?n?w4rKB&($ z3@|JQw)mCd4bGfe#J;<3ugMiT>q2YucOHJJ@I0j{y&P z*m=;Ic9uKWJ4>A>oLilHofn)DD7KsfVsf0P9q@%+fG`~FY30EHZP?560XV*oK=Xa| z{sNY-jqiYg&mikk`))o7g_=Ui<1)wupY(ba~j21b`K``z_k5_u996VyLNWvhFk5|Kx%zY7=B&@n#E!y_ z0AF()b}+UVHW!1o;g1)DSkN0EU*HUCv$)W>$o~796cOy9hk37<)B)6?+A{n7xvnX3uAD zfF_C-EUVSr72J*7b=4yc%EAu9*1}(c&S3G>5q<#Mr#jd@jfH4Y7Fachz@r%l zo`@6421g_haf3HP0*^!r-iQH_A#CV1;*m>I1fe4|qykKib>MSg6(x!+B~B>-Bf>3oyLFd<<39^UxzUF4hB3h0 zk20)*8saMBI^)5AjQbJb-RBxl8jqXqnm)YaJ4)zulO-j@Jg`~h0n4vee@ zh*>TWvwYxWalp+&{M1YFHuBZRFG0&3qYn6}S(C=FPzKz`cMZSP@(h{152L_MtAJhM`_SS#}26 zayt|z+X4sK3n<9mKty&3cMA(5Qs5LN&~54;?Hlb89T4>Zb65~{L=VMw044Z5wlelG zwlB6Jwlj788N0wE_z5>^~aq zT>28!8Ly?c6u&NhQ{1Cu3b0|?5~QR?S?#i_Wy{KU10}T-pHtB!yA{~xjlem#f;WCE zJDL**lbZmxvK}h{V_An4V)3jroCV^ZAKO& zbCHe6Zt#oFf@O3R8nWBKGrA_fDL)`TDCa9nRSQ%pRR#Dv_f_@4%^9p2t{DkV%OlNS zL#0ByNc$A5lD}@rV*MOLmSL)4wqcfGyWtx67RQVajW>+viULeB3}T1T&dtDkjSt7zx|a9suXe$SHZ=Y3->-QGOcVpVXU_K{Akrk@@7ElZ>5N+vuV3%#~Aw= zN5HNuSa?f98asT@Sl5FD!c*yVLlj$5wH=- zqSd17qFUm=ZrdH?F4%1kkr&7tFyme#_mF2`zP(1?B2VOZY45u?}9gT8_bzIrfa5Krp)}YV6pslS0?5Emp=yFmMQrY@&|*_G9A2@aryo7 zCtE&1b432n4?(~aIcz%s&c=RlH}-(RaSTk3r?%Q)UMzPx!8Rxb3!%`p1`GrbxCnk1 z&%M`k4ru=KK=ZEvo`1XND3JbtY5K{)(~kjCz5y`ntqa-~)B$q6eL*!~*#A=KO5n(U zL9z2I(Bw;lGeWa~`JNe?3`F)sAhXAXEx=1Vfta=fGd&`@AX*+Rjn0YN<2~c*_{=yb zJ`2iv`EhZa6qm(`@x1u)q7%RiA1yjuv<`^h%ZYZ$4#~O63(2X#)eZsv^-p?r$%>NY zC96tC0R>tBT;^?{GH;dNC?8uftKxoc5!4U<76yisD6|KRLCn8y@o#nw&Uf|?_80aR z&P{H2-e05ifN%%6rTc}eg=>T~@I=>$YJ<1<8>#WnU2G`(0Y>3(`B%9XEWahHJ*wtv zEVyqunq6R@?EwF*0k~j)?JhbPT!#$*{qv0tN# zuxP>AIb}Ozt7AXoTIMj?ATJSx#xfZ+LcwYXqC*0s7eDw7!7*Ws%tN?VNdXNCz zJQoOh8qo85Lo>p2fM0h+ypa{rl|YRb#{Ka`e13dGd^PahYvZYSaXcCSOIn`=;`)B# zX5w1nM&foN1E}W3$yLd3$vMC*&xU?;ZfbjKYJjq>PJ3df(z)^&sE1DzP*7|jN5!i8KP2)_% z!7>{Lewip=0ydTad@PqmnZFsFsjZf)wr60&JO?M{AviH#TtC1?sqg*f`R$?mXg(}h z9uo@41$Zzgh~Q743Z@ni3TlDXU;^8~1I~dsWC*E3dEf@D3i~4kk#*74(R=Z8@jF1k zAC2Dx3jT8Ze*6>=@elv8@i&V$CLSbuC!uHr{P&L}6}r%*)Xvlt;I&&6w=S+<^0eeh z$>ow8CB8BSP{+bDaapW9SspLnSpGFP|L;QFsNtj8WNyjK(!R>FwX~t>I zdCcv>dnp_XF6=;ABZX0UM#a={G~M;R|G9@p44(~uZM!+9H>S6y)nPMOY4-ftP#U^! zqWC=ZNZ$q4Bo0hb3uzXg~5~CX=&;13wBc1f3{CYuh1t3 zr$$wv1$(7w&>sqd?Lh+*V|z5Sa1i(fBMXNY4g}NSe$j9s^#>;ZBzY-L3YVS&H2Zkq z+0PW`0dL+H81Xe_8-d&2T2=8;)6()KHBZHa2%3x=3GPoJM41R_nLzSV< z&}3*cbQ$^#LxwTKlwr=0b(VKlbXIm&byj!Qbk=s(b=G$tnl%oZNprL8&<|RX{ViuW zRCzY%)*)C)Hz)?G1xg`kD1Z!xMng=k(QFEDPAsi-xXS;md;O(c|7YFn?{?Ri9{+#r zJpKQ)==8r|LHghC96jm0wez;lX9k?CM9lJ6s*)3(ytcxdy{@9B=6}IC|35!^|Ifoe zHUCukGhkqg&Ye5|U6r$DIy2pwo=k71KQoXS%q+?*%`D4Yl({5xdFIN@)tQ?!cV_O+ zJe+wv^JM0!%rlvnGp}Uc&U}#he;@Ctg04cUl2=7tCAad|O0z2as|c%1%pzxf%5DSI zshU~!vZiGPv#wz4eV3Azw1!!p>;_qxS#hX@9m_hJmCF8^BhKBH+X+_{Do2gTe<=4j z8$>5X&BWsr1E4$hH0x&8v#fhrx3aEh)yrv@J3F@vt}Cu3p&!9Ua*~>mN0C)zCAk{a zO0`pu(P}f%%;w^u;z{Ch;!)z&inZ_pa;x5N?T~1IYK7{9`jpiS^)9Ot>PuEFRC`nv zRCQDrR8v$n)W@tQsCC&*b86=_%2}M#CATB=v;4U=^X_5qU^8*Oaf5NA308uM;360a z`2;P&Lco%mle>{kWCPhq&L`iYsHskB6egh9V7{$zXDsOeO)kG(;wc z`G)hFGnO|3+9;!VBcYQrnm35oN<3AZBPNSc;+f(+@htIdv0O$~Y*cJggp|G27LD4F zZPD3HcB|cNx7g>n^L^<^GLnjvL<%Ea5*dk3iLMDOY7~lznt{qiu~B1DJX8*fhMIuF zqxzx-p~j&YsBreS>~=XFaymm-Y;{iUTvo0kH5#|#jgfL+VVICnw zijjJfdy|9YFxg8kAp6KMvY+fG$H_(HyOcWA2Gjzoj~bwQsX=OWT6>y~rlFmuoui$l zy`nv(y{3Jj-GjE#1KM}m8`>A>995!!q>Y4bPkhN0f$Val0CrhzGA+L&IZmT6`x zm@4Kl&Rfn$&IitW&L_?k-ZbdUV0g26(|Hu$93GK3i+5JkTHHpg6Z6C(u~S?i4vXbt zL@a=2iA3xX7l}*7T&R|C#71#QtdbdIEX5whu7CB5eTwahLy970QW;kkDr3r#>Jd#`kyDo4nyeC`{nHO0UDUU3P%#HL1 zx36EKZ(>kZ3S~mMP-c`B<$(^K3gtsNQCie&R1zgb6{GYh8%l?Yph{7NsB%;cWk(gE zL?|h$JbP#M?(88s1EC-{HD>^H;p*hp%^jT^%Pq|H=gk$fM__@>qGiJa*m}=$s)q1x|t!;p8|OPJ^@J)HowfiBsX2I6lsdt4EkZ z*hJV)I72uM1+kNaeT2<~orD8~%Y=1=U4*@aqlArwBLpVYzv84K(q!^9@@n!z@?7#J z@*46|@=9_Ac>#G9c|LhLc?r3Uya>u&YsrhrEve0*tJRa%h1QjpLCb@_RX)v3yG?sU z>kGB2*7Sk&y7a;HI`o$GLG(8C4)i|sF7*0PwaTP7r8lP6qt~Xlqc>xYVXk6sgo4v< z=5pp{=r*lpZeXr~p3^esTIN>f7|v484o+?EPfj&%6K*Z&8&&1j<<{p`=l^#syL>&36+(LitCD#igSvE z%6ZCF%Eik0%DKvw$};6G)l=0t^?3DIbwHg`uhD$aywkkZeA0Y{vd1^=5AApD7wts- zIQ=2L&R{hBFsMzxOhc?;>uT!)>sIR~=pS^l&$aJ`o~p-!Oop)R6sqOPHKpf*6g@d)ZNYCCE#Y8&b$YI4quoV_{w zbH?PFa+6RWOy{O@x982vTLrzqGkFK{>N~dNtj@n-r>Bt&=%YYHNo9^ioDx- zYx6eY%5fFY*bCs2xH4P`t`L{Pg>h-zB3v=92=@_RpO8zqN4QORO{heCN4Q1!PWVOm zN%%~7NO({9LbySAM)*MZM7T@%P54N7MOaFjO+HJ$NRReT>U?Smbpdq|wI(eSdTnmnF!~63 z9$i7F(x=dK>C@;l=p_1V`gka;(dZfU$#guOOPA6oGABWAtUjwQ>l3p9t19aS^Bc1v zs~+ns^E0y&t0}7)>pQbK>j(2Ka{^})XE|pNXDGKbcOZ8NRJpoyyKo0_|KpD2wuegB z2yP}6wi@yF@!s=}^0xDC@^B-T~fo-U(h$ksUfvn?;vJS4DS4oy48RJtc!B zeI*?wqb1{@%v48mT|7uq4N6TTC2b^)C6y%YB@-o;C9NdGBvmC1CF3PEC7mRFB()@6 zB()`tBwJ(^vfZ+EvbC~#vW>D$vSqRfiW!QT3b8__P%5N~o@ODqkq~D32(2D-S3SE4M07C|^SVCsRF5Jzc#-yfto4xf0aQe8T8~>VS&v!ISg%+gTTfa4*gD$_?bqzTq03Rz@zh?)amhZ? z@xp!?njJ5p-|^o5$zBht9iQ!Q?a%EG?RTN$@xlJeu63*2Cb!Z(!JFmfd4*nq_kypQ zzouUid=S1DejUCWeizvl*%P@GITYC)ITAS$IU2bby%5aecFMjPao=XS^3&ufWkf_aknA@5;c8_b`)YM5G>7kP~_)iLk#>R^84 zeaZWpcQ>yArYfc`rZTPy?j&v?{t}cgw?hGQ7j7qRD{eDx5AF!=EN&ZaAFcsmCZRR4 z1+gzNlh~Ztkl2z~huDEQh$tp4A*~{!phGtqb|c#J zoA8_QyYqYUGoem5lHZNriQh-mPt;d5Kr~n+5!tO?(PPm=(Noa_(S6YqQ8%#xN@r|| zMp9qGki;Z-2~#4GAd;vgD0w5XNZb;tgeRd(k`kH3B%w*{l9Z&MgfA(O$20bd$NfNokFY7 zC@LwxD?TcID?TeeD1Im^D=#RoL7D5aa;Nf%@`3W1@}}~<@`m!F@|NW=E8>J$8lC8=rZwd#H99qNPXz3M&c&FbxHzvh^xJ+!GBX`5=BXuE0~Yny9ZYujkM zXnSb8>iX!q=!WR}>3Zrib^qx)>w4)1>3Zu1=<@U=JxWi8f>e$^TmJ%DQFryP^{@2L z^*8mm^|$nHgU{eH6c_>qzrka;YG?#Cr>4g0(0Zx`wWkKg+QueErzvXkn*t`UDQI$; zI+)v=8=D)LTbWy!o0>bCo0uD#>zT*(#MTHDD3odOoP(SY)7u6gQJgQmZOJbx+B+}Q1KZh?z?nbUfu16k3?nE9%u10P| zu0(D^cLN>GilU;IqW7cMqhx4E>`XjJ-6`%@V#$)Bg=hwvil(D;(X-GLv;x}6bI=pe zB=k%)6Wts=1x-T>&>}Po%|=f}k4H<Fo2 zm^m0UW-z8VCL6=V48;t=^vC2t*S8*KAcljO7$MnUFg68imOdVWJ+z(tc{C(VK z+)dm^TqXP~+%H@${8#AZe!!i<)x^JscJ6!JBiu{eJzN9)Ph2DX6I^9{J^VY|Gu&HT zeS9@MnLsDvi5Mb}NF@%3imil*Ceny7UP#O(vWYn2BqD{FL!3+`5vLPF#IeLV#1X{V zL>W;)oJPzhVu=%pWTJv3CG8{aB@xISD0M0Ap&8qWQjgM;QlHYD(v8xP(x1|V(iNJr znUt~A!PIrsjnr+__0$d2p|s(&5?X>*LG#libT@rBeJy=E)KV|f&(I&zuhDPOkJ8uC zZ_u~UkJIHyvA1grk z3-~Mf%lK+&U#^E9rj{S$oA@HWg1?+^<;$UhS;9~73!sYW<1ggv_(pz|AK?4>X8uso zAgDwR7ez&3=s<=<0a0)9Udal{M#%<=O1e_=OmbhcMeB^M;WBv&M-Bo8DzB;RHAsr;j?r>d*^sH~-8t6A#P>htO|>J#dd>T~L| z>afPAxu6-L?WgUl9iSbk9jqOs9Rn@BQQ9He>AD=<4BZ^vOx)3jpUZ5B1 zS$dJ4q2HnZuK%k4tpB198ww2(!w};%<6vVqV}D~8W5&PQTyJB0;{fA-#=gdW#(~D+ z#x};T#zIrdlrSwY4Ka@~4>u1rkA#L>A9FAB1oJfWSm?OTG>WchL{guhECmZ_($` z7tt@#r_t9@Ly^14TVzT|5~74Wfh6`Mgvn~DN2z4G1YLwKM3-VHRLwn1vWKCV*Lt zv0#cY^DrSyJzOvRFz9@D!1us+#rMR2!cE6_!?(k?#Sg-d!wX@#4_S@ z(kapjsA-=g9VeY8)g$*O52H+?h$s`FZ9R@Mgpv)FYYb%&WhC^j$4~}P2$YGG5tJE} z;gr#ofs~g`9H2omG8RI>@4x zEC%}ybdYbcZbJR|D(eDkKkEqVB#c7s+6{q4wBB2j+bUjM@g$fX_hMGOUY7(bbz#{lrEJ+Z+3*Vi?qKqSIUw0 zkm{stsaA@S>ZLg_@G?X?R7#X~l6R2zlmC!q$_L21%Dc(C$OpsKx3h>g(!z>c{F^>O1O(>MQCZO-yrLGZp%wleKfSQ?xU+ zS=w3J+1lIMms+}xt>fs(I*CrM6Y8WonNFbN>!>=WPNi4s6?&PzilL67s-d2tnjsC{ z&4-4W#!<$}#_3Sk%rcHQ;-RrQ-&78T%Xuce8G$xsF0?9nW+8McC1$c&WX>_?nFZ!- zGYtxsV=dz>91GVn-!j)S&vM0*ZADqp*5B5P)=9RRwj3MEHqAE1Hpe#7_Sp8?_Qm$q z_RUt!KH5IUKF+STk8+%Gd~~U0f-f}#0 zTy^|$+;g0Os^MkFC&w{IZD&L0ZAW8g4d;Hx2gfDHcgGROImdG-A0BYLbi8&Pbo_8! zaol&bb>49Nc3gGecRzMta6fQgcRzREa=&ojgi_!$_Z9b3_c`};FV>6kZuegCZu4&S z?)C2WZuai7VUK`Dp=Lz#7;d*d16K*cR9na0abGU+`-1Lhw}Z2o&XRLRs!o@LKR_@JjGd@J;AT z=zHi}=yB+E=uzlX=t<~l=ra`H-i2Opv4*jFu}ZNfP*wXCtr4ph>k+FGYZuFi zwT`unRgblZ8H<8Nfucx}Cy}4o{~u!tV<=-FBa<bYk+OU76jO8O%ehcdQqz zM(if++U!p34E7-QBz8x3b9O8C2=*{`7j{>6J?O*thc^5K_7HYIb{%#b_IP$Tb`N%4 z_7hfHb|$+sy8#>F#5fl?Zf<~E#EozZxPES$TgcUMJMp*znqasfOTZM!1>*!$phZ4H zFj0VmKDj|KT`)s1L?9Lnf?hdC@RqL=3>F9k;{}LdvOpui3noCxe70beV3uGQ6wXnC zY{5W5j-a2wC>Sl6Dw+bN@QI@7q6wlBQT^{fL_bB}L|;T-MJ3Y9(xXtx-Ycz;#-)YQ zxzcsgZPLTi^H9;=CoTV1)Ltt+C0#5%14ZrK(gV_)(yP+6G$sv6gVKf4dD1=74brpH z6%j>9F{v zH!-v{G&giHG&Qs}G%_qO%r}%9UKqGWwow8lVu4WsMPen?h-F5GQD@W`S;qCIjixmw zui0(3nuBJG*=07ELuRAdVK$iq=0dX*I=?Zq#3F)@F5e=yEVeANEV10M+_ZGIvTbym z)<(7oY$O}grm%J9QEeI<&xY70+ZeX?_8Ioc(3+iSpJIRK9OuM3HBf5}JA+P+Gut`d z>2~s*{hf21W+%}}aH^b2C&gLQMRg8yj&u%o8l01yI_E&=6lX8zHODMxKj%beUnk8e zbB=IkIcGa_p#6Ht8FG$tPIC@*4s!CH`OeAC|D1GZ9`s?+&bppzo-gitp6~9fm&J?*{iz2Uv?J?TB`b@@tscYKe04}JH1cYXJLH+_$N z8o%3b@iYA6{Mvt2QMKRUxA|3mgMV~jR$yj;6JQ6p0e8R&eb18teozpM1_QxRFdPg9 zQ6xG&mJ{nA8yVxq2E{l~pmfACV>4q@W0IIS zhKkW+yciN28e_y{u>rBX7z>J&ida@GH#R$ljSY-d6qOd07nKyn67fV7DvqZU-xI$Q zUlX4a%A_`_ODdqFSU=SuwKDZN^)0m~y*|A*y(zt_cttU(sDX*Qn>I7paqJlV}KSGp!G7 ziA-Qn8FJVU$$_m91w+iR!hT2|!@wZHhR7&}kilSJ8NHZ2nEx^VW6xoe*c7%3)-@Dt zCELtav$^cWu(V-kQ`uzpEOsuN&PKB>>~U-{Tf`=?IczOk$(hTU$DPZq;8y2V<8|S! z6Kob7gSCd!f`nkLV5MNEASy@;La^cx6fA>fhc&SAa8R&IuuyPOuuX7WP%Jnj*dT}t zmJ8+y$_3X1NkK%gU$9BANN`v%Q#4033$_$0L~}(4L~or0=9vk#5LW=`iGn zG!v5q&?h9EDbHIW9$B&0g>OZr(l z0%?c*l#WC?BK46DNG+r@;*m$>KDkxylG|Ygz$kaiZStgivLaWJR1_)Vib={j%53Fq zB~ztT$yI8VLd8=_Rh86Lpq_75N7OAewKVlK%{7fRbu`VOMsL%av@We(E7e-Hh&EsA z(5{Cz{6^g>-BMk?ZiTK~w@5cvw^BD>w@{aY@_T_kp)b-G>f`#P-m8!4Bl`agGY$O> z{R}+~-3?0(D-0_QuMJ)(t^18pW5k$%>UzLfVhkBw&|r7wq>T%WcH<7y9@9?KcGD8` z7W01Sm@hZ)Gw(GYGMAZ8n-7`~n@^ZeLeG4gIbq%jMf1hbI$vg?CNcIRp5e&btNWXuv< z5PKa9#q3bhogXvDys>#Pf6N^V#L}@PvHX}Pwk%c_D~R=tl_%yU{v_~8GxTBela8b{ zX-^uHPUyv2lBbiWk}XoJQma!{(v{LR(`D(>^nvuY^p5oQ^#1g&^pW(s;s?b~ipQ3W zE1{P#N@ykTO5c}$DqT|k2VFJ0fA&{&jqIA)pV7b3b+hYb|3cTvF31Vwyvuo$GaWk- zI~zL$I{`ZrI|Dlvi^meNU#48IOP7ry|%2EP)&2tN=1 z2mgook@$=Fh4`6RmGqTZh4hVBnN*Whoz#%TBc~}zN(CiGnNNvRiYRj_rIdM;VoEth zK&4RWR5JC0`3ChitU1k~t)~?*91J~U9b+wHKEuZ-VZ<407^@f+j2L4%V;&>O;4(Ha z`ZA?#89TwA%XYDo>{51^UBLFU1MCIt3O1j;j-6s_I0lZMW8|#oY~XC-uH`P{uIFyy zF6XY{Rua|_wiG@Rd=)eoz7sqYbb_^*mxB9(w!-$p-+~^(_k!237V`zRW5x?>2_Fl7 z2wNDhKVa7ZB%M<_@ogn@_1TB1DYHdiff8iF~bmuDn>jOuk88CSN2kmoJphlP{N-%9qGD$Ww}91zwq>q$+7j zx{{(KDH%$llAvTNJu08duX3wwDznO^@~TWKy{eYFraE8UP18ZsTGL+BPSaJ>M$=Q% zR#U1e)0AsUG(l}x>(hF*r*u1XXLKiFui`YURUFkF&>hqr(-rII>F4T8_4D;5dW~p= zVZC9aVS`}{tUByAt~8!Dt}$*i9ye|>9x@#@oiv>=eKFrKKQ})yUx7`8s`=l|kIkRW z@62z_ug!PN8p}${TFWZSE6WSZbIVK1W6M(u+sd;puvOT~VFO^kEp1EMTG&zcTzifk zZ3mdop5YqhYUrxu>g8(Ss_m)|jr%&T9|3!O6dqXF@do(lZin^ozs154i&ZsqdHg+I(Ft#^#Ew&lz-tS^} zVppN>eI<4)b~5%Rwm?F5lkAXcpX#3KmFk)5lxmymky@XsmHwV; z44vv0>6Yo{>6__O=`-mI>5J*p>0{~3P@z7NzLGwf-cbCq_(k!D;`h*vo(f%PUWuwi zUcxQWl&DK&CHxY0iMZrv>G#rKrGHAll-4Y(RQ9d(M`^9H>Sfi+9OW0v8&))`XqVkI zyJdEp?8e!*vj^wi%avj2SSePCQrKlX#MlV?Yx`i2Z98KRV=E)TxWc~7KFdDCKE~d~ z_OTDJZ?HeGudxSlEF2SOKIbUxf$ilU=bqr6;qKz@;BM!hZ+~kewx0T0h%$IL7LH;A)0}(0JKB% zSW~Pm)6UbDYD={9wF|UG+O#&Qy`_7md!&1=yRCbmyP>cjgi-&RjP>HGVT*G(I=pH{LY9FkUs@F`hSFG+i;BHJvjx zf%TS_`SoDErBi;>{08~$@|)+kffbj!`3>`1`Xfas~mUWglmfqH0 zR!EOm@^kz8_DmP_hV!&V2+MRx^UTo=Wa=i<1;E|#0)X1hx~VNcQ%_ryG2 zkIj?tq&#|$$FsoW@%hCf+rWo_ zI4BJ+3N8r_3XKg74~-8^2@MU64vh&-fW?JzpLsI=x15+94j_DrhUg>fu+P_RcNo9m<-O^(t#$)~c*aS);Pn(1-VyyURW0tD)L{sr+*J_wptcV=B65cgyaQ zosr!syI1zdoH#as4Z>ck6YId{W9?WA)`q=|zlcABKacNA>P8w!>PqTE>OtyG>PN~X z8ARMk}3^ws#wSi|(z_|DkKRLAtx)Hi>0{>1!2`O{Ow^Lyvd$RD3SEx%X( z?EL<)yfid_GHfnQu~;nm7PIA}<-Mhkb%*Vs?WFCv?U3z=ZKv&-jRY$}WV-`4a>A}9 zt|Hf5SAlDR%Lt1(#jbK#$T7Kmu6ZtlOXtG5Lav0%?Fzcmt}>U+HQ!~2RUMa0@6xzZ zu%lDq;=6ZymU~ut4tdskR(iI1HhFe>R(p`;v3`}?Hda#GC96szKOn7zBRtpzW2Tlz77Sg3R)C2E~s8mx1dWw!-6IS&0)!< zenG8*8U^hNn!=_FDj*9e0`dS7D1zmcZviBj6`B*ug$0z^u!S-yJU;A-4S7Qc|+GWM0Xll7%JilKHR( zv8H4YY%}o6hL&ZP^(z}tHoUBV*_5)0Ws_j7VOZJtvN2_!EPc!T<-YP@`P%ZM<=0_3 z;ZJ!+Mc0Zhu$3^kVnBA^?9A*GwiKJh7Guk>X{?Menj|0#$$Kdhs+4+_afk7W@tX0D z@tpCN@q%%g@r?0@T@7|azp^WHs&am^3t|29I`;*2NO##!pI}t@7mzn^kqtaBp>OaqV^q+)q6JhrRm_N<04_eXpseZA@<_(L~Z4nqs=yG}92UfuJIaqN1XL zih>;lQ3OFi1W7i@rh#IMpjc38ii+LzWRvVBo4!dl`J{P1`@82iXTE2?_s%`{_s5+x zbN+C~4-O0;bYysc-mmB5_k!OF?i9Qz__N??!GnVL1y2fw3+@&S6}&1KFL+#VsbH+& zR)Ib1L&39xdj&r1Gi-0rN;<)Yvk~l5>`k2YoV6Tx&JNBl&PmR8&H+#+f^s07J)Cu% z-JES4SI&OULC!wT&qdUta!?l%7ySSQp-IJd#aJ#LRDJ@u{@h?Lfg8!ia09ua+#qfg z*Oz;uGQR_<~x&CBaoez2KtYvS6@msBECDPBbREE$Rq*AQ~6l6Rj1m5ktgEaTVyd z+z?+9Ul;!(zAPRS-x80C$Hia7a0x)S+TZ#ng(86}u` zA?tl^_^bSX^}phO-CyZ{8+8wL2Q?pk5LBlRV>*M#!EGU5L#GoQ33CWD2(t-45%LK4 z3HJ#1BmRhf7;Oa=)LYR#poDri+7|sHx<9%Xlu>ap>X_N2W#si_XYyk5X0i);4S6kj zBY6qgmFz^`M4p{EGjV$2q{OL-mlJmamkjH?v$>ScPT4r zD`+k>C)yI)Dw>)WnMO==ryrp2rEj6{r@y5CNl(t8W~642Gg30*Gtx2=GbkCvj5C?1 zLG3O-voZ5~=8w$Otg-Bo?C_k>oUoi1%y-OJ%v;Py%xBEo%rWLm=3mUu%yH%e=3~$` z8(_X={?2^EywCiNX~+6fFq!o?Xok&T&1TJFO=Ep4a9~Xo%w$bs;n{&~3_E}w%Eq!$ z>>xH8^t&3_H`#x);T$i{X^tny2lTRhIcGS290ccQ4w4hVIa-ublw2e&ng+U7Gm4eP z*NaufH;QAqsoW%PG&i1`&h6kLOAsaACGZlTlKK)){$akFuj1bqm<0WT4#91KO#ld9 z3ao-#f@^}`1>J&S!E3=|!HA$s&?jgY3<~}bFv@P1-6?w{+9=*E-Xh*0UI(f{jpCc) zd*b`z2V$%QEeVnYNZKSVl4eP(#H;*d`O)%U%deC#moAb*q${Pa(iPG*QfKLU>00S3 zsgrb}v_m?pVrIop6|*b$RQy@-qT*r2ebCYQ9n^H*f||})#mkDj6}Kv$R*ZnM&h3g6 zdA7Vzo+oF?6XaR)RC&IfBQKIOX znym^?#Sz6hg})*|fl}=8sr7I0uk(M5I*u_0nSx#izX@&+(T4z`O9+bziwFw{4+zg9 z9!EThcpv>1)OVjn1JUoIUq`=*K0w|@-cH^_-bsd%e5ZP!EjCS(87(v11#&&Z#e zKRf?Ba~aE#HIKD~wE#5d!a(;;%l^d1aiTdv93m%-L*PVmVmVdC`CJw#kD*KaK_|?w z1XU7PQp`KbKf-SWMXp+Y9e)<+V0{%h3+I9&)(^oV(8JmwTqc|&{3x&!eiBR<{v=#1 zoG<($m?8Wum?@kPd=tzT{wHaF6Jot82L2Wi+qZFoP2`3GI4q0vc!y}CDiC-Tgp}% zl(wDbOZTDwMK8=K0L@HJMt(+d#<|Sq%pF%3qhiI)81xQ~t92_4(WKUGkmt zH|DR(hvaW$tz@lWIkQ|@POQzW)2vu_6g!R`!~VjiaZ)+QIcc19j$`q{;_6~9w}=~D zf+-0qIl({1KhD?k4-5B$PT5A`LE!=64&h$mCgC387NMsQDtub@r0j9oF7ZBboA{A9 zTw(zou67Bk+`oLU^q};F^cW~+9g*&n9+vh>7ga0)-K)hF`znwXn`J9yPO>eswXzMe zjsM|NHOg-c3u;nIxmjK#SIMpNI(aYXOr=%IDqmE- zu6$LwLa|h_Tmeg+e%%pM32;~-Kj1rryO5R1Qr;*bV(gNsMI+~852hu;#-_iLQ z-k>mgKRYI8Xa25y_xvsSZuz_Ox3D&_*0DCRAglz?Gi+slWoK~mI60hr4wI9~SyWsG zT6Vew;ev?NN>Bk?WAl*6Px zQeWv=skhWidRlr)ij=~oHtDj8%@zAA{44w_PRP7uhh@iPdu8sjBeDasJ+hsk=yg!G zTXsi2D8DbiE$@-r}w8`KbJ(d_;avezWpz>unRP*LP?Se)ZU%Zer3 za8L=<@$o`GVUREY6z;mj2C<#QUy7El0xdM>3Zx7rgUe3K&dJWm{A6g^Ss6_BRsIQ- z!ye0DfQr}?`QP$K^4Fjy_Luyv{5Sba`3L!X`7_WK>j15=zbfBXPE$B3JQRBryA?+j z-iibTSrMm*R}A_0`ol1>*sX+D5w9Z_#DtJT$w6csd2Qky%2UcniU+Ncc9RxO7iX|D zV{?w?d$9JfcCpT|gq$)?C0D^c&y{mq`7QhoevFVPj1oQ-oe&=tpA?(KPsPE~5h=PN z40O~YWO!LHD5_1W{2`xSIk|FL<iB{9om@Z@!g*AvH6Zl&y{MbTqGu~(WQ0rlJRjEC8evQOln%=gNtvD4Ueb~<~4 zEdfQ!%iK%ci`+P2tT0}fAWRaT5+_PR%89Z#S*&bU<(x{#%DI*EE9X^SsQg$tLvd1( zsz_I)DQJpS0TJYQ`TUxiRrfzJil0q{$j9smU?YG}+3^C~`FUQe}qXltQA&PSuw7vzk#z z+!gwlKwn4%BmjbgkRV}@Ku8oM2!e!!L;U``o`r%$LNE{?2m#^`34z2w@DMbF2*E-S zps)3x(v|>Xx5FNE(i-g`&g-17f-afLZjm!&&4GynYyML&`$s@?0JXD!U-J2z){=gBUcUH);yo=*I{&U2kJD3HxxJ88{y&?ZB! zxnj59`5*1kxu9ycan1TQ8`k_bG3YYvVz*}UntHq73DczUN%tn*o^)%H#_rCf3wCXG zwn^L8%v&>e%_X}TE<2p{kakETNfhbz$qu*O(y}AvVYWq!e-qav5?J zas~1$ST?by?Sw~uz>E;AZYn$BHxOBLW zySun3ovZ%sZ1?~DjrQQg_vDEwpszL!oc{htotrr^YhpI2yZtmVcfxUE-o*Tg1rrM= z7ELUkSTeD6Vi~B|t(b6{SUIr@)a;xmTqf2`tetS3fK05LSU<60V&lZ7iOmyRCbmv& zo7g_FV*)ziHsL<8b7I%T?uk7U9us>f_D$@c_#G@390FxI&x!vpvKeLq90#9xJOPEe zLEWJ{p}U~Fp?jbn(7n)o(EZQ@(1XxJ(8Ewq=n?48(4){}(Bse((34Ov=qc!F=o#o) z=s741>J5cM5l}F`2=#^fLH(ftP!tpm4TNH#SZELwOzJ~%&`>BI8U_uA5}*;#NGK5+ z1&xNrKuOS8XdE;iN`@vt6QM~^3X}>>hNeJMp&)7wO^4E<3}^;46Pg9hhUP$Xq2Og4 zXg;(6%7U_?g-{N(2wDu~LjPS6;(z>>{+%cHKYsepcmJ!p+rQr1&hCFzd;6FBgIeBf zh$ATFErdAzhoQF<)b#8jlOb~<3m_{XOCW0@?vTxp{g5*dXUKNQ2FM=BVaQ6z4hRI| z0r?p+A2JWJ9I_0u7_tcR6J!o#6=XF8Wd9)RAnPF;A)6puA=@BOh#O=VWH)3lWFO=p zob~n3Ualh)WcCT@-b+2=;cW-dN>E7tBao4(M zxo5kB!$R(P?oIB^?mG7t_g42dcW^n`9gKjv3*Fbbw7cuw3*9;HMed{S26vTvm3y`O z9rtVQ*WGWpE8Xw8+uVoTJKZhrcioNd|Ia63w6ob=w_9g#XXgff9kxGWKW>M#Z?d~@ z7h!)Iyane5=DXa&+``=mZV_&gZbY{zw`jK*Hjp9aiOLj|f zOLe2UrMacM*<1%)2VI9;hh0ZpM_tEU$6ar^-gdp?de`-y>wVV;t`A)wx&G$**!79) zQ`g^J|8RZg`rP&3l~1BwV_ZqDv958hEp|RGzAieuRy&Lf)&(4#b@|m}FAuD`lo zalPt#&GowL4OgYB%C*Y1+EwjZ<67%l=UVUD@NWw(8Lr^g5!YJ9qKcB&Wy}$B5KjOb0|JUm*|N40U=ji{u0{^8~Kxwa> zqMWXDP|i@!R5~i>Dd#H}C>JUhDHki3D3>XhD_1C;lq;3aN>}9;7m@K zJfQSco>87t`YKUMtdguuP*RjB%2Z{VGE14Q%vI(onaX@6TUn^&D2tRlrBEqT%9RS` zdF2J=RpmA1b)`~StyC*DO0BY4sZ+Kn^~x@#S!q>{D94l!m5-E9lz%9nDgRWyRK8KZ zRlZZcSAJCft^B6^uC!B4QB70LR?Sh(Rn1c^RXM4gRU1`XR9jU$Rl8IksspNHs^hAY zs#7Ys%2$O_g{s0-Bvq`6tV&W*R8&>6DqTfaF;tnVY*mhmrDCg!R6LbTC0AWiHLBWG zdR3Rotg@*FR70w}s)wp4s?RDw^-VRQva7PMa;Tb7HLGe))xxSBRnV%PReP#Ds`gcR zRvoE2T6MhYMAhl4vsLG+U{&xc-zxtqY*kQINENOsw2Dwgtct24RgtSwt7ui}RrD%G zRbCads;Ej=)l$`3)n3(AWv%M1>aVg@4OER+-Kx4<^{nbe)vKxxRiCTARZXvUsGd{p zSiPlsYxRz5x9XkM2dWQNAFe)9{d4uvYOm^3)$nR;bx?J9HKCeTomHJz&8p^A^Q+6M zW!0K$ZMCktz1mc5uC`S7RQFc*RS#F+t$tShy!vhRyXp_spR4WE_Ub9>*=k4iLba26 zrFw(Rt;18sD0Ib>Hsxb9i*nK8R{%`p1Md~tS(XW)e?2NTB??*73%Zq z3+i9gSJl_lH`GeCN?oO{R@bQO)b;8HwMMN|cc~3(liIAdsBP*2^{{$e{ZRc>?NBqL zW>(GY8poRXH4AE%)-0=8QR7szvSw|KYYn7kL(P_&oi)2^_SAUP?5**vIZ|`1#;XQi zgQ)SX@vA}AU~6zSsWr43dQDbMP7SMuUBjs$S>Sb!~mExz1o+l(UpJ-B zq0X^xUfqJag>{SSoa%h*{ObJcP<5C(Y+Z0&NL_dxp)Rs6sxGE3u8v%nP)Dgttpg*p zb-8tUbt(syH$6m?q1!)x+itd z>b})E)-SDhs&}s6P`|N$bN$YGcs-)tw;olGs}HRYt0&Zx>a*%u_44{l^;hea^$qon z_1gOO`mTC&y{&$v{%-xf`iJ$8>z~yBUjMTGRsEa#clDp^7d9+vSlY0n!KuNu0n)I( zVMD{lhD{AS8lVk38$2338;&#_Z8+9&yx~m4*@kls@CM%oY(r2(NCU1RwBdZig@#KF zR~xQ3+-OiY)HKvJ)HgIVG&krPS{m9Lx*Dtvy$$^hwuXU*@rIWTjyLDsTzGToO~}nH zH+SCjx*2|xcvE&$e)IgzOE+)aRNkz)Iehc&%?~#}-?VGAZ=Bk=p>bp5md5RkI~qM2 z_crctJl5#dc(&2E(XSEL7~V*1BsV5BCN)wU>5YuWtj4^?qQ>IJl15%5zp=DY)F^J0 zG?q8Y8qYUgZM@caqfyzYYOHT;Xw)>C8ZC`48(%fPZT!&qt?_%~M5CR?UNc?epqZhW zt#Q;W)hyGj(5%$BY9N{onk|~0nq8Vb8V}80ji=_A2BGoMU^PJ+oF-gD(!^@wH3^zj z4P8^LxvEiX>NOfoyGF0+)EG2YjZHJGxud8_%X0W@DV-!)UTOSR70 z1KNXHPwg@7S*^Dgp+#xYTC5hQC2FIzG1^!yMN8GDYU$cMEmK>dWorwy#agabsa0v! z+Ip={+oEmL>a}LAMQhdeY5TP{?SOVrJFdN@y`#OSy{~+|<(4)nslOZ@Sg=t;w!= zO7ooNxy|#Mmo_^!J2!7^-rT&ic~|r9W{>9m%?FwfHXmv})_lDAWb>)!bIsn(@Mhm; zRC8!^STm`a)m+rfYrfQcwOQF*-`v>T-rUt}ZniZKHGggfn!hzqG~4MW=^S)3bhC7` zb#rucb@Ox!b&GV1bxU+hbt`mEx|Opc~PR>+b44=sxQH)_vA}(@kla+A_UmMvG(1!WP$-EiIlcUM;6u z&bGi?P%Y?|@D^fAR!eqEUQ0m>tA*WC*urb!w+LFwTFP6bEwYwNE$S9c%W%s`%XrJ( zmgg;RTRyaWZn0~f+&Z;&cI%v0$JT|dPOU3jSGOK(J>KfodbSnT>f7qq8qkVv4Q$1< z;#xyn!&`~1(XHgxgw~{1N-MQBy_Md|Xw7QPYb|OmZY^o$wenjft>vv(TWzfat;4Nj zt>di^TOYSRX?@!IvemxLp>0On(za!7E83jeR-P&$zA8)_aey9Cz`@Qxj?N8gEwZClt(EhRgQ#;UZ zr=OyC&^ziE>7Dc|^{e!&_0IaWdRP5A{d)Zt{Z{>U{SG};zgzF2->W~M_tbmoPwCI- z&+5UnyhUZj`l<$8twg8s7ps{Wc@ zsaNY;^j&(h-l`wgkLkzt5A{#<&-8Eg@AU8W-}K-0b{$hXW_Qf#nAKP(9i)!f4sr*jgWkdD$n41OV0ExN zcpZ|CiVk_lrH;!TS38s)s*d`OhK|M#ZO7e?dmRru9(O$Hc+v5)<5kC-j&~h@cYN*u zI=*&%>#*x|=$z3xyK_$Gg3g7Vi#nHfI(52sLOM5eZtOhXN$ia3Bz2NI6FXBoX`S>= zMrU3ptFx%Hy>q>Y{W}yJ%fmUBWI|m!?bC^}OqC*SoF{U7x$YbWL>G8SD+S4UUF|1}DP? z!$!jv!w!RoVXtAo;h^D|;kd!eaLRDnaMp0n05kX)d<}kv00YW^HUt@h4LC!nA>2ST zkPQijBm>1jHP8(VLzW@WP+%xB6dOtmd_$Q*Y>*gahVzDNhI&JTL1WMwnhiQbi=oxf zWiS|w29v>Tuo$`xRzsh`W*9IG8^#R}4UY_u4Nna(4X+HZ4Q~w}41XIw8NM058-5rj z40c9)<5c4`<8-5gakkOXxYW4JxWedUbTvYZ8;o0wTaDX{JB;qeoyJ|pJ;uGp{YFpY z5#uqVml1A67=4VsMn5Cg7-Ym5Nyd0%f-%)dGt!M&MwXFnL16Vl{k z@-_LH{7nHSlnHCXnTVzs6U9U|rJCp_hAG38Ysxb*O$8>FiES!0aZNlE-y}4NOk$J7 zBsHBkT`*lVT{2xZ{c2L0R3^2l-lQ|NnA%M3CcVjQvY4!de zm{ZMZ<}7ozIoHfI=bH=69CMMm*eo>5%?k4c^HsCX++yxBo6T19uzAEhX1-;9XntgV zV*Y6UX8vxrvrMr}wM?_jw#>22wal|DwQRBMw7@M0i?0P`!C68rVHT2wY@t}_7M6u= zDYEb^5=*(I!Xme{TlAJLi^*cPbX#ne0n3nO#4>8RWw~v+Yq@86X!*_Z#PWybrRA07 zjpeQ7o#nFyuza)Fbvty==$_R*yL(Re!tSNr8@tbTpX-Kq`*vfygStbyaowTa#BN4+ zUbm!M*WJ?H-rd!0?e6XF?;hwL>K^aD)%~#hW%q|}haShCg*{7qoO@h)j`Vo-oa#B- z1MflgpnHOP!h2GCXg#tXP0w)8lb%0%p7+eQ&apaL7h0XHE3GcpHP-dk4c3j;E!J(; z9aazPUh97A0qa5QA?q>gajTd0tku`*XAQ8TtY|CF8g7lS60Kw_)he-;TV>Ys)~nWQ z)*Du(Rb{QWHdr-Qowdtqu$rtEtIaxWePVrSeQUMvb?BYZJGx9evO~_r6_ys6KQbwlAa)*B9DH?2GD) z>5J{7^ilg#`{;dneayasK6W3kkKZTklk}bMyU=&B?^568K4qV(Pv7^X?`hw&z88Hj z`#$u2?EBRBxew^G>$mTp((lmk*gvm-LI0wDr+(*t*ZwX2Tl=^7@92m2d-VJE`}L#x zvHjuwg#O6>sD5&PLO-QHwLhz$*Xy1%RcVgIB4C;iX*-}b-jf8YPH|69ME zZHjH4ZK-XmZKrLQ&BJ!UcFcC%cG7mr2DkazP&S;6WQ(IuG*Bg zdRv37(WbSv+w`_Bo7rZu*=z&0A=|KR#CF$q&-T#v#P-tm%J#Hi=my#c#s@wOd>)uSIAhRpaLeG^~4e|%e24#ci2QLjO2Wtj3gW5seVB28(V8@_&&@$LFI6OE$cz5v0;M2jkgYO1E z41ON88?qmoJT!G^_RyT6pN1TV77i^QavEAWw0g*S$YlsJv|(uD(3YVcLmoqWhxQL0 z7&@aRPd^mPEemHfQHcTJR z9L^ff8D$k~x|BimBes#jk@1mRBX>sbjXW87I`VAf<;aJTk0YN(fDyY<`_U<*4x^5v^F|kp zE*xDn>NM&+>N>i0)MIq-=z&qs(c`0Dqo+pCjGh}sjQWoHjiN@eqmiRgqvX+qQOaoU zD08%Ew0M*^Djb!MDn>7iUL94Asz&7)nThEemVb#!2Kcyw&^*6730C!^0s-;RD9 z{Wki2)NX9b*u1f&W6Q>z#+=7CjBOm-JhpXg=h&_>kFf(|$Hq>Mof-p^31hxv{$l}S zs4?^yZY*>xY>Y4#HAWgEk5R_xV~nxvG1eG+tZ0lkCK)Rqs~D4y{g>9I{(rvnLGL8* zlb{@R9{I+H>ig7phQB`WCZ-_bCRnh3~wQ9r0I{$4Ut zz8^{Pnc@o?@u9ky{-ovEP3%GT#j;%QnaEaDDrnOuf-<2gv_0$u=)Pxy5qenitdx(b z4xo+hLYHU7feOfrLP1#!Oz!t7VrSgN)K_VkJV!RH;Hr2W2@BYSIvcbaGr`q9sWnSbDWZlq1`K9 z#hnB&c*jdWmuA5Jh7sUDg1!V#3k`@$$o9=Y$)yT5OM_v2I62ajI0I3PEJgkjtxIt% z9j?5A+>ZOJ#0!2c^i^1Q_yJ;GbbCB2`vO}inu=m1DifzrX^hy6Ygru7F$m)!4?*z^7Lh#fYKs?5;o+$3KS@(A@pEdGSi0~kc*NA^Fic78QKS0j`85M z(1!>yu^^U7en(b-JN3IMx2Pmg2zmf&562k-#v)M9UXd-zjo>UQS;TuJm<(z;fzoFc z9o{~E`%nmU80G|4gPV{48I+5k5wFLcrKTqLr5;TC3VIpz36 zo8bG!e*hhh*^j*u{0Xc^A15Rd&PP;7>Z4jn->AIgjFiJ^!pzB87qUvSf6w`z^EOw{ zgtF>cmkTEYpynwsi6SNA(gliFz7>SOB1(v!X?u!r^0)rs0rP^UMbvyNk%luFi< z3LN=uCfD7W_2oK57$geW(p@ zi#k$Kf?3L_$qFgC1RH>Z%gBDSG3SH+2--w|5nPC8q8}x4li#ESGl0zV*)ZmV{K=pa z(#@L;IuJK1mcow1f?y`tJMR?452UBhW1pu!T;GNM)PP?COqhZoTkyBgaXf;ciF_W} z17-w`pd9@(Ne;$^#u7zIlPPVW#Gag%$(WJ#4OAy*<~+#3v)nn~irTsJcs0CmaK3L4 z^e&#u#^f@^tN>4JckX#`|8jlh3SUXurOaEbUhX5F9m)$U2~(t1VHSkYK--L)qvWoG zdmuj}Q+yl(-s2O)X2x{J?n$}q6Ocq?CUZy1_COK^7oa z^lv3FJmPLjb(X#NPu|IhEW}+uqW}JYF9F{I z7NSq01?Vme5BpQlyP%9KXke69 zb~BQ_XZXIx`w|EEH~jV?W&yJSr@-Gs=K%fq3H(pMT)+{S2h0Z+02g@+fknV#Uf#6=~=tpWB>*8;8p1en8E2doDY zS>66i12+I0fla_>U<+^*wiSp(c%k1$I%aJHlF*c3X?Qp7V+aXn#;1qVGM{Wlw z=#3%saXSDg;0B;6?!Zo97qA=H19$)f<$HmBz<%HWa1b~I90ojrBf!tVQJ^#a7;qeD z%dRbXhdTkB1R}D%fK$L>>}lW(a27ZRzyNOm4j_Q%fsn%VNJj<|@Bw@QKfoUd0KR9T z05os_`;>IprzvhZdUx(#uyJ(4y9yLKO(_46M#AretmuZAb8)Xy-lVOf-)HpXm$KaW z7(sJ*YkXRghU&}n_d6HR6gnJkig*-@iHij-wYSux;6l<7W-j=lE|9ZYARvKnbLn%r zK|DvlNuafh51x*%41Y*WrjF9<3Z7LyhOq+T0@kDFU~90ugP9>a@sGmmlD=efSfD{H zx>L!-G=|jS=ELofJCKV5c7ay!PoQQ0HrL@tWq*r8Q zajv0ExCp{`LTf}3X%#5J1r{xk#KN-E?!lCZUc?A;w(phDgh)8?0dZ9FDw&yiIKQdjGpmG$1y;n~BHvF6WgN^40;2iv%7cL%7}aMAxC8PK zDDZ5}UsZV)d9ef@^fY`6VR6ih_@i`LMlUlQbayQH@$A*C?%*Yn(XrnWr&8i6IR*E5 z`QCGa&WFs*Yebw6BT%bg)8J7E3nH61FS?3e1FBW=IfI2OO8WUXz4w65+Rwf(gX2OE z;SGe_(Ldrh(e5(WvQ%YwX|z-fI)4>NV?;{KO7dnJzT{^yFZ3T5XKnnc1UBUZBR=bO z-nx>tywidf#1iC13+1^xLG zS*v(EOaB&miI+-36&&v?@l~{Xexv9<5(ltA?QbYLIDRQ;CjF6YpEV8idEXRIEpZda zSA3CED~nPyY3CW1+$n-g&|w-ae1Mt~vOaP>xu1dq{T~;^1~NXeJGBFxy2m6WOjEIdK zB)pHTk2(@7i>If(-gMYGpN)b4NJ;q6gJDq--y$aBHphpND^s@Q zEnx0vEhsr&zF+3#r^RoMyg{0g8_iA=Ux4MI$3t#~tca{-_-2;oY|d@TT~$)f>j}`J zO9O3~_27m}Cb2&zJb{|o2#S^2yyF!P*@Z=CMV`{jl_8h~q!mejf&TOCyz2-7`y8SS zVL@7mLei8J=ae?aO9o!tP-%#p$GnV~RS~vGm+0P@J24@Np@~N*9w~QHmSpL2rsmplHgJlHHbnAB zk%_O9BT`PJU6W_Vl#&{&{b4)7;pcsEo0G=U#&R|YZXsU;Re?g_ zOuPwyBm5x&9TgGN6XTNbDJh70B4bs~q}*uc#lp5C&;yfSMU7X^ga3rw@9XVXKu9Ld zWXxg&XBFmWmAw&7Aj?QmiAlcFz(d$GL1#e^d{g92Vp!AxP^X`fl0@fb7BGL}d@2bQ zf2eH4-o(uTbyjb}SHd^KJklGIBFU2Uh#@R9Vw?&bIYG!kpI-c2&}rQn=@H#cYEP=9 z2-C~xD>4eRcjx3WUz7kMefduB(>}dEcYLh)E#Yzx+MR%vb|yg5bR?gQpmXq%3_5)2=a2uDVc*}8Eg`qiojq~f>(i}Kv$%JSVy*} z8mJc9y|ncu&gBl$d)_W6DC%!aTm&IzUTj(%BmPO!X6p0YyQ~FlEN5ZKT<;OMqYu_c zfVmbjC$fY_%if;5kU5>5&)!?IzvOcHeA$-BWY}l#Q)qka8|+(bWY{3_JNcJ{&6Ld< zFY_i9tlAN%G*$B{`^~@>Hxt2S*@I2_jel4ODHx)nQ-Q_!pj)`uS{~=rAGYz@XmjYh4 z^+m^_CqgcT#^D_!GDyo4jLB^JefkQ9GDlzZRuljni`J zJ&Z@0m$MFmn&e-(%L|^dw1p&2bI~$>u=tK-ze4H12}24gBGg4!$6#aM#V;ZUC90CH zfL`-{2|siX36*6pJqP2!Dqth9eclJWGrb>pPr(23`Rp^z@3G$<{}(7cdS2iQ%oofQ z?E9e2!Q~+%4E02j%k0)5s`bVPrHQfp0=4V8aN0r0%4(R5?|g zF3U^h?+~ZJO2eCCf)lPWXTV~BQ<1NUOX3tcOPMjOPhr{d^GmBGGhvOOZoV3E!tc7@ z5`Rz3GVDXFM{r^A`QVP=_c#PGG#V9mFwrxyJ@IogfmV|~ocW&hft^$s3W~l?;$Tz@ z<$ICIj|7CKtzjhQBqpw=La4F8vUDcuD@qqs5_~h57vhI2#b<_{Bk&_`MSPA}O#F+O zl^9HkOOIoWG7>YNX3fv3W@fSlg)4baN_#7v6^qcZ=&iwbaSJ1oB6yK-k|8!E{(Z`| zRAXKbf3x5gtQ+oy$n%?pxrjN5&mi;=+~ODs_LO6k0!kZ&lzg3bo8gynH8U-{E_-jz z^8A+t)WXNbsXTruM0gJLHHkh5tZ!5oX=Ci>xY;=s%>C@GpvV2EC{huM)JD|Ch%$Z@ z%qh+-3lgm)uw!2*!~uN)Lue&tJ?;z65x*Lg1vVxDNq3U%Q%LFlphrr`K3`bMUkv*V znHaMqc52$*^c{?NKwb3GXS+WF)qt6cornD`$P=F%wlI8aL>*B_V#QX+ttMY04<@Xm zmQcCWWy$l>AEs}nJ2Un&wt)8S^!%NLb9kY=23}+7-{L*eF>ew!B$6MipciE<293Yf zyco>)pgG~~d05^fNkI|^BgJJz-j7{f_-mm(Z?Wz(3E!2 zZT&M%OD_S%^w5mT%#GR5TwdPoyg!*DmKx+HJxYEC#ZD4mRH_A0zkiI}QP3E7kci5M zKxc5+Cj+$%O~$lg24Vw26WfKNr7*L;=ROK52)Q2eGwvl}J~2MZCz>6zEB1ZtYI_Ty|x?saf_d2+$b!W-O*k_5m| z0!3fK{ElJco?~WVJ43QbiNL$qS>)x^>YTp|+hIq1l7L&Hxui;1mp2j-><0;ahMk8; zC${8V&;5~)X00qdEHa1|l+OUQ(wV3_Yzx?deuP^T`Y3EtG$d{k-&X*WILcBKY>Wmw zBg{L(i+CVKFDe%MN)#0r2=<&FuC8pC-!<%LP(!dd;%xGVTvOQ)N{8=D+#r7o+lHQt z+aHk|^9OC6T;S90+ZnJpXhrB~M0IR&YDSJkbWEJ*L&uB4YGY?+oXI{X`5JvTvs%8q zG6c3CeiPw@25^6e?TFf!xI2pi^zoq~R3I6c!g!h!4>sAJGK;w>K)iP?s(8gM#lZS<^Idon3ukU9sHEXirM2xj;xv5GfC6pe&{_f$3mPb8qfpaPBT2SXy2HumCpjE36QB z5I_rA5WxY8fE!d=Mm(n&n8w3MFZ-`YeMYYitjBPHzk^Ev9sma$|9s$JQ~{}t#Elii zevDlhUkW@R?@mA`2!Kw|)fNKYsqv|005$zFLj-Kk5Ci8}54aMb9FPK-(hA^jnGBEv zl>iuGiB$kK3Fm&T@hCReZr}{ ztH3qj4C*>?1JDE|geU>$2yO%|sw&EZ^q#ak?nwOY_!Z#vDjbxX$w?{zL#YCWDduEY znjoVZPy_FBaD{y(HGr3(7FZ^(1L}dFVVmF$fCKW8&-;j2Qg{5!q|6k2T2;j%psY@gN?VjMGv5lOKyuZ#PgaxLw8+SBY?`Kbjf3Qq+VVJa{eh;~t7vDcCofE{gd>T|}6 z%%$0W*_F9xi?)g8N!~_eg?u5fBcn;2q^C1LA^ncByVaH&&5;TTi94-JQu50-40A7gj4hYE3G>1B&Z$RWs$QxfHwu% zY-bLd*9m0tkmVVnZZVYD)fo_WP04__3NHTVQg8qprNwB1e-15+DvFJayBQ};z*9C* zw@{O^JaR30Gr2;+eL)w%g(19?;hPXrDWO5(S z52zSFvOV%fSdzj!p!2>1RNYVW4uO9ADZYgtC$IqnzzozNFcibis?8b#hJho981!E3 z2rvqS$6rZ$k~O_}3|LZHQZ^3U0&W9$fXu9)@J?hRqqyKMa6TXkcQUjwrnNAuv_^!6 zt@eEpyOl{6y%)chjD|fX@p<D4;LrG{N++ z$w;!#E1<>qq5oI30kbHiHzY3ZHSh+AX6(v+3kdRy3*P~;V2}1M;5}dm3-r$L22f84 zzr=a))B$;c~%A zZnJQzPS|mPA*PmdD*n+?3Qp)uiuY`~kjU1mt~UVhXE^3b@O78eSg= z#ym#O^nDcAfkA~+2_NFR62Ag@$>-8mq@SZ7%6O2Op1YaTT@+sYs5pfCX9-Xe#TyVD z^-lAygA);{h;ztjWGGnc#i1Rre%L#~6}TPv1;n-(2U1NuEWw$wgX%#=P!p5SrgPF& z>BjVujJU!>C8tUbmkLVP2sR79iUK7ul2zr?k(=-mJUTW!xjU^rJ(+>?X~l|zJ>s=w z34NZJTXDbga#&h;a)CXp45or%;Z(SnZzo!c@eEc4e+teHG2r%v9VSo+Rzfn^A^#Cc zjABQJ#dwn1W1q#Y22F%jaH~)npF(KE(dc&U+jzoNk03udK%86sd?@^25ugpvNzJ+at<;B@tXI?DtF@L{kMD$eFPnb@x zEAAAaFf*}8pIi8qN$cTaOl61~7ZW-{ycN5RvMudrh726{i^<-^-NnriEEU4N;Q_w~ zT?yVC7M-~t=IveL9SFaN?#hbxCHnsG^G26r?g!lt4gv=X-$iaBJ_ARyPRA9;H=&<(`L;6N31OZ?LmNa`tC z0BsrLm#j(I@3ZAOjX6sTiVD90Q;J)Q6L}w!m!<6inbDN2>0q;;irOBwCj3;ynTS|o zMO0o4B(^55H;D)q^DtB`^-=Pw6i&+Tv>6#IGrneTXNp<-3I~c_aMdMlJe43!lq8xi z&hvif^C#L8xGWe(G=ke6pVLayE@r>V|Fg(i{HjDjSQPmk_!JNje~+=f2Z5Lz;qZR7#>iFLi zu2ODOy3*#;b24h_k_;(lW-&wP2-YX|A|GIW58oQOjqwWJ=|e+-SPSUVD+#yKz>g-J z8E_Xk6$$k_5cn{d9I`Vsm@qqHcf>Cdy-{7@klo@Wmt;ZO-fVQvXs#}=0sc0i9rFwm z6>=eLU1|g4ZmtW{#Q(@IM0||&PgfRoA)3)GIKO-+_ypjK+=pd^F2@hy{|diMu*Nwj z&&dT;VJyY2;4}L( zW;6s5d6oB@Pax_e<`OPD6c>IlgPa9n=9X54E8z=+>>_2T7W~z)pQ2KuRz=6eH6{1Z znsYYh^%q_({$4Vle}*sPI|`Nxq=Gx5-O@R-c`z(Bi1sB7k#0%fmeG}m=2EzyxsE)@ ze`D^=qaC^KdfzIQs&qz4rJ*#}AXTX(mFA&TnpK*Ink5|_ouMbY+e5qUcDp-?<2WAU zI2{{o2RpVizzq-r0g{m9GUO5nB!rop0J(5O@Jn9K!UwHX$yx88cgUvK}R z+NY{^No$>2zrFYG_x*i-mpXU#ufL{e@A#ef5B0B|ed(URc>C$oZ+!X_=RWxu^d$W3 zmlm~U`G7wvoZP(nwMUK~MIQT;$G-d7cRYXg)xUV(b?vjtcP;+@T>9SIPo7q3?e|x| zeRB59#dF_r=cn%Z?3GX7{~I^Hb>sXSf^VL_`STC|{loV@@~y|d|Ech||H4Zjf5rID zvGFrbo;O_yU1#6)mFM5`uFt;x_st~XKR){<<$IRx z)ej$j@8+B578kyBfxlyQSLN~UOCNjXE3c%r#bU5}%Nl&`@Pg&Fuit+8(pT>I%zb}$ zU-Y_g-FH*?4tHdPIKihu$4{^*=vz_xgb|AH8G$$<_#M?1wM>$c-OwBUm%`|v*^~g6LdH%&0UesRt^><%02H*Y< zI=B3>Q%b#C<@eRNK)c3sj8!w)`^Z{jC{nO1K z9R1Cyaqit0uG}Hs`P4nRdxb~d^Jwbvt=G0+gJ1q$Wu<+5`L6ZR=C0$9AOHM$v!un9 z*ww##`|rKu+RguY^YQ2Y1lpZ}+KZ@;pB z<&{?!2j6*k{8ZuQkDUMOyB~Vsi{CcB`~xqoUtMj0YnPwB>)pShe(NND_HWN0U;5*F zK794b2Y&p4e|#Wm))?wPeBt5PTdR*7Pw%|;^=GCp-1oA6eErScr@@zh`RHrva?#%W z!%6$L2e16yHTW^xV;^|p*IvlK{2^taqb8>6!r7m>=U4BAuWDC6a`lD#|LB3cuFszM zz}wG1z4hD&p8M{1edv{Y_4V1Wox9`w$1iQ(Hv6`ZJpayjE6NMXTb93h_}byan@^sw zXZZ`03p=*V&DZ?3fO?OgA@=O51g z_j8ViKD+dvJ#){m-;=uXTUX(SAAD->X5)R>;+Z9UT)+GB)f+dSeelx{e(do-d*c3^ z;yXX|%=LHgzV@x#xr;wxQmZe&Sx{bGX`2roKXm51&wcr}^OrAPTHG19_u~B@y}tGE zlh57tD)ZjPl`q`een5QY7hn9QE}i+%S^3;wpUa<@&i5}3E`9G^pSkC(S!LzIebW8! zz5ez$eC8qcEyfe;x9iV6`_j+8Kezbm!QY>`dG^A&?>P52=PzA??s)Ire|qJI@4I^S z?g#FE>!%+pKk*w+edy+ApLxQ#_P|TWm+!WI$Db~rIygO^TzuQbU%2$0cRqXN=dLua zed5s@&&#iVL=9-KF6_(Z@*S(stvY9(IorD9(*0j~0DkVv#$Ot@AO7m0)vP!0_xFGF zZSI>-U;5D7zHjy9rP=+3C#Q>7R-d`NIs21m|J1}xjqm^Sn~`t-^!rh@e)}(7y>#P_ zr*1!|yz_70H`f1J-&*ij{`S!?yx)5%cKPM2zj*^%K6qk#7`sVcZe1Q-7M54fwXXc; zBU_JuN&Bk)-qmH33iH2jwvYGDymJ2K^Mwm%F8#r!SMLAu`+ws8@4NPW4+3xgkw>oH z1V9k9fDmW}VGsdr;1;+I?tr`C9=H$MK?mprQP2gtK@9YPelP$A!4Mb*BOneEAPG`n z6pVp1$bfN>1ruNrOo1Gj1#@8D%w8{p6_5w3W)6D|tb+}(3AVttnTg*6```c^f+KJY zPRwllIVgcLsDV0YfD3R5uD}D(EL;hiTVC)OJONL^GvHb99C#kQ0Nw_gSVqt+cLv@G z-UZ$bUIy<0uYmW0_kmZz`$4lbD|j8e0h(@N(41<)H-is>Zvh_$-wM6}ei!(kz>k0* z1^t$QC1?p*e$H}>Kl0m_FI#@cq>um*2tm*`6ocLjJqf)HdJ1|PdIxk9dItJ7=-Z*^ zp#QfFi64MI4E-SVL(mUHAAvp!{ofNLeg^tk=(Eu0pr3<25B)s!3(zk@zXbg<^efP> zn*SP$6|!2buobb|tXtM?>yCBTx@Wap9oB!J^^IA5R=+i14O&Ciur*@Et%Q}dQr4(7 zW~Hr+HEw0C32V}tvU1k6HDk?MbJl;K)y-S0R>4}c)~yX|)7rAOtsQIEDq4HizI9+7 zT1VEgbz+@bXV$q@vdUJ)s#LFTh`fe;xh}_&4D% z!T$~ZE%>+L{|#2Z@}M#{~Z1c z_%Gpa!FxyuArK0QA~A$U7$lCcNCHVBDTG7PNCwFwIV6u1kRnn-%18y_kt!k}HKdMo z5fSMjePnZ)- z@9=q3$*?o4uJzx*oL-w#e zV#n=-owQT-s6A$Rl8uX+3WU> zy=xckJ$v6iun+Ac``A9QPwg}N+%DN=yJA=Enq9Y><*4nZ7{GpDHw(eqH})g@vE710 z_zu2{<2ZqLaS=bj*Z4X7JbpKR8GjUi41XMd0yk0m_+Q|EiSLp7q@8q-Zqh?~NgsKO zD-5c(tai;J?Iu?$QdJJ zR1EkR3W~?%poAIPbh=4U<2RLvBw17R} z5Nrb-0DwW*0zz!W0~ZV=xUf za2#gg1e}CZFbAjM44j2?a2_teMYsf);R?*dRTx141Vk(dgjf+6u_0T?HnM~4B74X_ zVn-Z^6G0Id;zm4(7r_u8;zt5V(0pIRNCd&nx0d`5Z|1+gmkp$ew2(H^`G4?+@<;>- z0~EjjaexI9z%AQJ12L0!G6y8U2Fif`x<%hMZF(BanAW*%+UL&ew!8@pOe;JB8n0V! z(zNLllP@v`#-?=^K@sSJ17HZ;ve&mP{w>RY%Z3BCT^nfIvsrBWHpphT!8WJOYIE2S zTgZmlP}`O*V)NVFHk-|73)@__ZCk*G+dQ@%ThK<>yf%wD=UB}-2QlXy06YbbfD_;t zI0ZMrAut6dz{;Eh7AB(w1S|js_<;Zr1PVYAC;?@l0`Nc;5P%v`2e!;PiUZOB3EZ;Y z7Sn!PAs9j+8?O%u)2#ugIG=Zkj44Oj{Btr_MLS~T`$bfc$UBCl)0l@lyWz+Zne__?Ne`nWWAOyGo zH!yc>qFCrSHHk)? z&X(KCdE&eWi}#qQ&y`9{wsYgiPR@f4ysB@)!4)-q!dTrN&u(rn7tDmyW73{m1q}wx zMVxJ4%QtkSvdOF~061JZcg~Y@r@T4cH*)ByW6z1_)FZ}187$|^`EzMJ5YFI{TqqaL zRXvt~iUxCm+*Xna(!mIY=gNr@;RA>ql^amqOwvK-B52GziYiz%2gPUZST5!0_~!0S z0%Zk{hK_P1J&!ZF5G!W(@ObbbNTB%`7)|ClM=ur(y8}=HV53^Ez zv=|n0tz09==j>E1*UZ&(&15yVj~l6Wu9JhpQZ5(wCdN56=|$(cVNS^v$e6DYk9Xu; zlkVkKzNN35tA!>xC7F&6avCP)rn!DDpPA*z?9i)XYEH{7a~Vf2xyTv0gWM|DkLlPt zM~7f)5Lx z8)q&IuwuSTXYpQsH(4V<_m;b#ALawtAa7(Q;V{c3NBMPnoS)=R>1lq8I1MbYSw0a| z{KJ@(M<^vA=asyJoaeQ?H`Vv6`6bp0NbyBp&mSeGk)wRywagp&EP0S$J~d%;m~7V4oRUyB=a2aJ|P3$B8vfEBz2Um=WgspEjZ zAdp~W%Nr=v*DtrDAF)lcQJHG z`9i56>{MMte=>NBdzbJqu92Vhv}0DMlZkME|1ce=PknZ5ROoq=sU|xtwA{DIc(=HC zbx#-HOZH4!UeDJlKv_LGj)^|iKQ1(xdEqp)3MnQn&%#YjPM&P?@)#e^=_WC6RZs&| zc4iXwmL^AUU9c24CNmEx9u*?RNx0rD}~YySE9g%acW+gac)j7jVO#JC_bT-)Y6eN z5YkFHhh8#DqEmBsm__oSR3=v?0;{K%r2&0dT9=MWo6@oKxU>~HEm@-{C0FtwT&IDu z)en||c#F+**7#1@=b_VfZnt77!)2QCrIIvK9;A|i?TD?M3uc3U2kYA*@{Zl|UU|Fh zbK2coWtwv)Y>q=mK3H&A9BA254sousvurOr@%?fw+4o^(UwJ3uF7IZ!xVMZ1{N-vQ zQ1+CATq_Yol<#<_lC(3L&S(YQ|awA4N zQ{_xKTP~Dy<$SpiES6z^sk{mLsB*bdt_2Z)fwOrMKE7OaRm*-#C|fGEa=qLrx618u zr>s(la}uH)Jn3+ILN@=92EtDFI695_9DdI-NreiY84(KE(%o{ylM0}&o^v(@g_ZPQ0gOAMK!0G#jdtn>19hRw|4uh*WHq zt;)i^UD>JZqCv9n-mBQS0wuWY6=$XG-mf?+t_oUlS3H$4v+o_C-b%3IuN)*-$r0_V zl)_j=$3vBAfT~0*;fl)@sl+Qxg|3hlq7tjDlc@?@Nmlf9qJmcpoU34gbR|6Lq3vBdWL&Pba@ zBkS-yc9`_j`~F7aup-b@Amx){QHLD1`{U_N<*1U6A6E`Cd)d>i zEMMScqE-F?-K*AkM_k}} ze&cWQJ$}M>`361Yn~4rT;71ugSf*Nh%QNQph=Eh&*XW3Mdi(s8pYtnz!E3zAuQNK2 zdY%4F_>iCR3g6~8sKGD!eAE{3p%TC54|tb%iyFG3n9OhZ6LSk04k1-Fc4Rue4pB=L zs-BvT%8_q7?QsiExEhG>Il63v->o{T18%E2q4%q5#9rl@cy_1itRDGOM9HyTwN-26 zG4BpCRJd9veN|7@TOEe{)le0yTEl^=yL!wWagi!sEjwIQ-3vRs?qD@qrK<5NTV<+= zDp{qg$!e-fRF8wPYK21mXw`#PtCM7=nyu!le6>4;^Pszhfx7w=KtF>yo>PWg71{15zYOi{PIni0w=bluj)vds|I;;+= z^J>2;SC3G&s#I+;qpDR$)niPruByxGL3L4GR}ZV3>NI&=J*wj2ePW0nqbF6|!4ap` z=o}Eb7$_)&MSz5W$11=ABJfn&*X2T~Y1k&HL|7KtX(%1o65gO$df-ks49MYh>*d_4<#Kb|T}8)|gr$(e}`_SgjO~*F4@tjjaXT-e8qUU<*81 za}YMVok-QVTDs;-Hp>jbC8~h|8)efpnaS474SUV&D0$OFCy}pF!FaSD#PCAxBzfR$ z1k1Hzty1%6O0^?`uc1`6wsF^LLaknF)Q;T8j5j*>G=p8T5q3D5H4yFAFn_1E69`1w zwP1wcwgbE)Lkw$9BJM1QhbcV}iXg2=&%N9wlvRz2xuoqqpzeW$L4MY39mnBLVY6%Xh+8QV*r zCQlQ)bt8bs_UiU*B()RUuOGYZbw|C%LEI|UNLhnueHO~6+;yz(tslBO9#`E{_tg*a zqjbX2352L-$cO~%8QNdpp#pV1(&R$*aDAMn>gEzEj@RdOt0va6o_L(BMk*-WE$t0XQ;U=~*p&s-fyXy6Yt6Oi? z4I<^*4J2LJK%+juQkX>7>WrgVZ`ae_T`c5IdBpl*Kucl4Ufq#U>MpcjU)Be8Dl@OQ zx^2G(G`fkrdj ziH8~~ue;%GxEhXZ&E;=I8y?pv&~%m>&Me;u;FU(XQD`uYUW0B_8@WcR!8Q_&d;?r$ z8eu;h>yum~-bgmm>1it45E|V^tubm08l6VFk&6w;W~1H^8{@{3l^c`ByfJJnqP|3< z(QgouEVpjV8lcaDwq3;r8BrStjl+=c+K);Ny|HR|y_{#!06ZziOigQ;((^{gm53fU zHjRnzumLoy^`pi~xVO+e3o^si`#i)Pgcoo0=!hENV-ha6rovq9W_&X>;tvyvydMxgfOoh&^ZmsYBDf z47QG%V2X|RJXTtYT3eQuK&_f7XD01v*;^}Ow>3|D8MsBqI4ADkY9ZnM)?Ulj+G&kl z+pR==KeLD|32)2YN++BxS1aHo{Af1p5)<7t8HG6qdCUULH$ zX_2jHrk`e8R10sFGIT2$j<>j$MkiabmM@xW5iO>bZXG+KtxPL~x5L)tejwONv;wr3 zn7HzRSR~w{!WTN`Qb zwE+TZJKD}R<@UBcZLA$=M`?H4*Y0756xt5P{q0CQ)W+LXJJ_E3ZE=5Vkq)=X_L3sn zJMD>LPLAz(JKB!5**4S8Br@$(JK0XROH8)Cj7pT1$h8aYe0$j{wnt2-ahmkG~V>Kt7fYb(WD}$L9@q@J^&dbn-s3Gw~ASi9gzjb*PRMVi~`e z>Fh@7jwdU|6P;9t?UcA=hwH>LxlXpTitfd&zDy_Gai-C9z9TzVelb|+6g%ZkG|YD@ zooXlTs&#};qf_sMe6|edh0&?E>mFxXold9Q$)nAVKS;*foqk6SiJb_RV0)d@&ZHCe z4LrlnN!pQFQRB|EQ_1k&V>TI|b)=5knRk@V)R%T@9Tk^5jm)A0E%eT^GwK+fP+-+L z=$xk4ox{$cv+2a#*3ePsxO378I)QF50d^s`r3-bfU8HO4!rfwGySvrh?e285DHli@G!|o;U?rtWH5#30)!jfI8TX(D?@ouauG0`sUNp|Tj(@k{Q zt~EiK(PG?_>T=z5H`~p0D^9*UX7k-*x7>~3m2Rm^gmT?Nmr5ieLbuwjb>Waba6s3) zsaUfMWE$NGDR%qaLAMm`bvxZwx83b_$K7Ff)SYx^-Fa8)%H3&K?J8ZZOJ^2cy=!zu z_bHc5uDZ+aL3iCf?jCkG-J|YF_p}R`K~0NjXLB?p>T!3%D#GG6jkscdM6`)p;#P1+ z+!lAmJ#oVBi*|8hI7Fw2iu*yASc$h&87J>_i(b(qs!wb{nFH!sdR)ZX zi;_qW?%8@{aVl>2wtBYEBvT0O^e~L@Af8^(op$!juo=16v-eVA0^jfL_O|2cNGQPv z)->94^%A|RH=Om5?p^^shyh7YZ{zXyd_8|}D<0?td!e4-Euu7I3-^QJ-c~q?MiY_V zjHOAkNA)Ig632VpDACJA8jdYbw6~7MdU0Zpvn1#q(niuA*w9nyExmm2m`F1nVi~CR#OREy z^%^~)SMPyXvj<}%Pa)mzb$a{GUT@_Rd)?lk*Xj*>TbWUB+*_2Uy-9D@oA*2!si&yf zh}=_peAvgRJzG@kg`#@T>c#WTs4K*}4KwV%>Ww{1GqAkq3i7IUMqS;Z|GIP*SGbz`+NQU{!V|lztuD_kWO9?*lpAQHs~VMmgS4C>xFE0MX3 z5|#(-;FO3B5`)y>FqRo42c=}no#I-K^x!1J4HV~`$qp)m+<+ex2S@Q_s5~ePs)PI> z7Ph9iaB0vPGzM*R5!)Tq2jrkgq}bL#7yx7e+X-87FWDaq2cto6Fc{PZ;$S+M3^u79 zY&@6`W&>rQ4MIU_pbw0}Vz3zm>D6F4I2f!4hl8WR@qlxm3{Fjn2spG1p`mpM4{bwa zxHa4!?hN;byTiRWL4U1{d@Raoq{X^dn8wQ3b z29l&FG7JtwLwraK!^168Ktm0q!`M(v?}nKnJ)Ge2Av;VA2kx{xIZO?ksb0F2#?4u_-RWH=sL*+V9h-C}3MVq`lpA5MoEcgZ&n%Ivne5LSlL zFyqhqCysGIh_@Y!ArY;{>(pwf50^uAXbj<0*>~d5hU?+s(9ZEIvJHP%XoLZ zH{Nl$$C_hjygjy#q46!DQ!jc%A!B&FHCD+oMmlX~cv2$+(6&ZG7NbM+W2R*dT{vY21#= z<4U?W_PHkG`FNL8#^dp9yp;m7#&|Q<$I(!JoJoS*p$TeRjF)4t^I)uv%dyq?)B#e_ z3DUR&TX2V*9>yk~Nha%_EYseJZ{nYXCV`2C2~N`N{=_h)1JqB)u(IC^MdQkBGT|rHkT4;m6^wEnJ6elOtd@`QQ z(BWh|HJbD%gUNIvO=gqHq&KNX7n5yHpBNK)qE57lG8q#7?0RxI0jAsd(d2Zpnk-G> zTmwCxSf(eF&BPTun2h7dG?<2`*6IG#Hg!ygY1ednx-+#;(W!H~HQk+tr{1Z1>X`r1`H@y~nh*P3scFH< zN4RO(pPqId;Y4OS#mm#eREZX+xoOLromQq^tVK1Z)v1T#r?qK*TAwzj3M))o)6TT$ zD@{p9ds+{yGTo^-U6Np?H}$0Z)4{Z#98QUVB{Q4Or`5=GY8ry+WEv(;@$poeT9V3C zp3Yr6DRp{0)u+aEF_|x|(X!gXz%}Q*G$!wCz2anv%>JG_%aCGiMl{Av4=7 zLh$LW8I)w=+q0e7j&IkqJDUZ`_}(n%+MhXQ2Wk6^!JRX7=9=Z*?wM!iotdH;mv82u z1!g*G!-KQXEIcE8N3qC^$0|5EgBW7w#PHbxwdbK`(U}mDu-I%j)h1S+C6^@WSsYiR zwFomy%#yQ6fHGa`rt32ipUs0sXJ%H#P+&LXO&rDCK3t89&Ms38}+zj^_ei^q9jpp*JgD*nIW=` z*=Xj84QIXCY}TK3X2-GitT_YHYOp(N%?7hYnDlKZI#qLu4l6U6jb}qfnu)XdOrGuf zrn8E_98zZ0v_4zTjF~5)&N|-3Y&F}=4rYh5_3UWo44ur5XX6Ag7qC-PSZ|rfJml~& z0?lQgbq>!X!AuRA+vaF^YrZ|-neWc`<~z~-xnpjh!;EvjmqO>RxqI%J*O+F+JD>UY zbJ*NB#}ocJISe#(MEKrLkd}na5Lel$+bKRE*AQ!PMMIHRd*Q&EyidaSUcP`Fn3A?8^_r~hZ z{#*;K@I7iYAI=AJX+E2e=hOLQKA#_W)HzRRbHOLimHAc{i*1qmd^tDfi@A_SnDu-$ zKbRlRx1{6w$^2-(nV-Y}$tr=;>D(eg(vAd6Rl+81ONg{5xx%~BzGRnB$tjs~CdnnO zIIrZ9Fv%wcB)`PsK}qt4q=Dufk@;!Y(^(xrsyMmZpDj4G8qhtkSS0@z|YQA0Y773qbvmOA(- z&c!I=K-#AuYAJ0w!0blqQl4-UKbDRp#c?9p6Np@AX1EYtdtn(%<4!)=MFAO{xt3_(YQ>=qzuwgnUaf4 zOg5cIp?X}4>Y+JK%Z%K_<1#BJWF?J-Q*u(S`3*l2%*exVTIS>lrTC(RgY}2AaywFx zb8=oT$whe+Da#d^mvd|}&<#~(L54DPSmwA)O%}a%*+RGEwJU`;1$qVOL-ec!-CsOybslJy9hnPKb zA$_?DeLksc_^>t1KFNB3UB0gvgBkEdUWkMmYe?F;E^ohG7cyf~vi+e}EIm0RKjN?B8k5?2yR+07~& zuB0TDw31RdC8Ok&yi!ogN>QmORfSjlu@NUI`(#atCF{zLE9B(e6?a2vDlMh0bd;*Q z>}vVFtf+LAp3-m*m4VV%M#@x)JI2afnJKa&DT=bssEVc-imuEP3uUPc0xRV}Nu*LF zff z18E8Mh3h_#>Q;@QS9PhjU_jlQRtg8WPqk(=X2;nhydf+}IHk-i9aL%hC=^n|Du+i@ zIpbFel~PI7oZ8fwx*@g2t% z)S_Bac~?aha9)M5s@hO%YF#y4f=N|tsjQ=|cGP~PtHwQjwWo@zHftmY>QEi2V|AiV z)tY;z&Q(iXR)eUfsw(6pz42rq1_nC*QMiE{s;*M$vAe)Fm}5MXsbed3siso9OwM_r zF4TNtnqI4i>XAC7>aK=sqfXqYZ|cVBPNv~IR!`JZ70@gis2wCA&8oo~qHSq5ZChKg z+;9s=Jv-X2wp$1`i|mmj=vV|Dsnf`w*5>xLMw)VxLAxfS4$Y~#G`Hr}P|c&&-I!(y z3blY1)WTXw!_7SPh(>6n7S$-P5sGQF*7Y-5#FdT5HCC%dXG}sniE~udDSH*K5zT0- zS3%QSO5?PoR@JgvPRnZrt)vw-AzapY&E=_RN2JRqXlbgZ?J;$&qqVfA*47%DsCBiz z7WMYDfi~2}+DMydGi|CJ`{$aZg&{e+%Q331=@CZOC4Cz`)Ytmbxzd-qp&z+) z9Sv=vJ^!(Oq;K>C{X{=ClhGgpHujum>Wi`P`#Fn&rgjX(*fq8co6*4bjD5pyY#R&R zVRU0|!(+G%)bJTj!)sth!0;O(BWy&BL^f#5C@Fv&gh3jV5jEVtm_Zx%5M#s*)<_tf zku*Ztl%ZtOMm>`;PUBgVA6zhUhU8Ckz0^Ua(>zM;pd};cDH|1oHvW9wiuc zqh>UWrqMFmM#tzHqR}(@#=saFBV%kpbkjFA(&WsT8$d!bWCQEf=8~!yCaTOZjD?|Q zG-GM3jJ0uKY>Xr0(C}uCjT58PJ2kd}1)u^3umqAo3RnRR00OK44md1M3ud@YOU;r2vVa3XfqkHB5iLDS-!iZaEhEd=Vs3;iGmE)kvd9+2qFOYIZZRwi z%hIy499Y(tL(9gJ1EN44Z~}^%rM16ffBhp10E-*<0`h(L0?Qj0f%V1Pf#sbyfYG(P zfW`Uix31lO{=r+9Zyn4#9=LTh@4EM&d*;2@UO#0HxOK%GbK}~r&g}fX!06%yKt6vB z5U*Y{kLEe^i)J_O0mK`30E>%P0rj@)fO6j(fWgJvZry+5{Iv&fjk)%E#~gUuowtVG zbswhtbaaXKJcGAq6}y(U3nX)q zWR@he0hlW}=Jp)(AizxOjJY^k0L=8CJ0x?}EGO}A*Yl?Ex?JI_Lhb+bh<_b<=J>(Q zDRAcaUgZ>c@Ztl&z4rl9|7?G`(T2AQQ(ZvE>2&_FY}(ro4dG}A5rV*^Mk8l!23iN{$sVYZ~%QfA}Kmj0KP`Inac zmzMjN_Rrt)W1#PI&%EQVC%@1ee3E$KpE|dH<5Oh!^Kbd!(_f>XbN$LQohSb81HeGJ z7x-UEFYtvm3w)*gSHOiY|1{8ii5#X7v&x>MZN}X ze$B};ok{=5Lf7t#$;IAr=1A|Xxf%pE=lfWn9R{`;@XMrz0Ujr6T)PVPX zs|LLD?2|y^?|u^aTcQDc^`G7iI9`SUH1}t~&e1!8E5sGx*WStl+rPvDBlSmt_h0G& znQxp0euBLL{Et8WJkWaR&w;}o50^+{susN>@2W#`Ye!#4uFUL*9iFLhc5%< zfB7|l|JJL(?|pv+ICK7aU@Pe`GXMP$8e0B`{${jxoZ2#f!0GlULz&qdYS>S`=mw>@Xz5)c+U-&;5d-Hg% zny~TzB9)!ANs3aovX@kc`Pf1wZK5O!DI!!$%?D3Yj@QX-M)a6YnBNS0&?X|waO zW+~2Z+}+(z&-2gkdtR@Mw`*q3oH=u@xvp#GoZzm;Y?wWl29*m4UMhD&tWhhx@KnZ| z#fxCMJqr@Y55an|P2e?D27>|1z$0P>*sbz_I>$*+lyifHo3$Z(K`9h1Nr1HCm$24Q z6Tj1G4*s57;H$G7#Ft(Nl_LOIH6Dmun#tWXoM6$HCQE44j+Qu;J{u90I^&cmq3LEQiI+oiMyC1%%toKz{x@@D(}&zWQu18fFG*8D6lwYY6nWYJ;F{Eu^+Iz+($7 zusd7{+|tQV8Jw@ad(i^sCjsr)LOTeKB4&wYWBPeITbeGR^}`$3%2H3;mp2MY^TcrnEf`q#6- zHu(fNyJ*2>b7e?e_#DXZTyR-wHH@9!2#T{sfsdCBws9VVwf&}`ml^@5JJ=!r!zvi_ zya$f@E|9p01!2+mAS?0*RPWgZ@3@CRJXQrpw_SittgKr!EP!D?3oZqDf$?q`*kH{B zhXz5wFQI_h_A{`<`90|Fbp#%+LXaIj4hxRffz;dtFz46_S(`sXw`&OmOe_Widrw#< zl?PXk%Rp(xBS>4Y7b3pcL11GLtaaH59LWXXHa8lE6|uVhB?aBi_u=SE1F&vWhifgq za0guj?dU1s^A80j%~rUwF#%efLSb=E2XKDc3=21`frH*jaKJkX1m|4=fzAdv;hhd2 zX}r-kLICy)yTXyX3*m723wRZj3xbS%s2Sn_gELal^XvzFT=@dH7ow$(KyMKEXq8MK~Jz131bT!sN<1(D-c` zs6R~xlcnLH`j!iJXm!E-f@DyU$cBMb5iqj34>g5%fd>hI+1a-sG2sgCAsi62*ab@O z?SYeyUx3vE5Wm$FikosE^=KZrzIzAN?91TEpd5t86+%&(8~E!t!I*gxlzNZAqX7kI z>Aeo{NTR-7Az8y zh2|VNnEXBkK_90fs9*}dxt@SI4}O7$w-a1)TnJMEG)QaAg~PTgAS>(-hWXw2c}j z*a`?g{tcvPH{hM+jrF=57`;sb@w;*G^mZ*Mm~#XE5-H4UDhJPkDv1AB2oD$ML)q<% zz-f077=4A*dXoaRJS+HOI4BUg(!R+B_$fa(=F*_sJ zDtjM{iVuL?I0^L!VqxS}4DcCV2JubzAlbwM7N95Ktri0n;%wmD;ty|Hp8;h#3WhfG zfNiclgl0T~OlB4AGS-LXKlvfXr3miK*$hm^dYIGf4sE&>utR1NK9s$N&S6dPux^ET z>jr2nP=e%ZMc`)o9*%!4fq|y^khh6|Ey7#C_>2YA@aB+vyd3G@N z?g7Z0`UKf;@*oF)ec($bADqrx1>Zd_;dDbV@GV>m7lnD@!qI48lUW3b=UriHssQ%s zvY-*q-Mx+TAtWysim&|wrTbDa=vV~NC92SHe;;Js&xUsu{2&x02PaL|!PiWl*v|7@{ZFKy6_O2$Z~oj|U!uUWq?k(mnz!{5QkebI~ApH~?g~yMy;y5zwZD zAW$*_&JsazzxW$`|L6|In_hzCNH>hx;xTZA0-HN{n`hJ$$oJrZA*&uGQ?M(pRvQeSNeQa z#VITsKE56p`rAQtwbVvi{IL!BP5;z0BRg}pG3529;2&8__c@g|O`-p7Pfw$0w8=3A2HZlY6Ey_*k^B<7WD-8X|tpE&q7GIqE*H2FG5mCe-J3%Uz7S=ByrLeeSc@ z)Lr!#`Sp#TJU*(&e30zqk)NlrDOB-G*g0Jd{dDsBQ2lw0%YE5NW2G4ygU7xGW{-c< zkkc0J`JyUm61x^oJ6-iR%%r!YMI3O-_x1D`k<1(!GHau5~&S~bE?%&3i)p0u9qO*R#d3tTZ!8xj_Z7l^PkrN zhUga9S)>Q6P8)b%72byH{_p6$#_qUDEOYtyZDfb4%*cY1yr{%UMHqP*IP89dD)_Y z?RK6IKe{JOz0GMsvV-#$RvnSwcHF_IpiSsAI^(cU#P8=sLJ_A#f2_$jRKwXTZgc+Z zrIE6SGG1vN$oEpzIi%FZp?b^T97EB(6VaI=)d>rpPPk$jQ@MT{<&%R$It)9@$U|x|2H$(8NdJj z@6U+Y|C>iR{}+3!@%}GP@)!7Dtlug0zgSLb(cjEC>$&7_-uax8!7YwbsJL5D3lEBld@tZH2o)sK9P685dh>PIqr^;_mX>_?Z3H)UK%?niGodmX)* z)Q?VE^md*}>_-7?J*O7j>PPb{x5owD?MEMzWVOCO>PO#8-ezpb=|>R*cYGp?`;qx- zjm%kf{V1*WKwM&HKhit&bI6tMM>lp~YKdRMLdPRm&EgwaC?NS~v#==(S;V`mT=QU| zrikR*>B%g_&~-F(tz!LqzdN;}B_g+t+dV?V#gjWLsY>)=H!rOv-~O*$O?%pBjSb!! z!uXw!)cBoG{@;98{~I^mpN`J|naTNv9JX!evYSS&I|RJr?CSd;=I@kZJ28#kGnW>u zr^7UgU9MrY^ujoyza-|<*vV<6|71X3U_*`iSV`ESG3RM?IwQC}`AyyC z#iqf@6Q`!p{h(x()UF>P9+KAC(`Tkp@|8>8?2niHsQo?_J?HE+5-C4{*mrq42xn;M z%{!0Je>O^YT~x8ueBSw0%RQ#izH278)>O@2(LQoQMd89U(o(x_ZQE+O^wwwXGg{u$ zNZR1(WbpVS>CY9>ai%`gXuD47HG75c73YZ~$Nfc{_We8OvANlq;qylXuRN^c=sgU#RAvy^-7D)wJ(0f#)V{s4Q@=M zBgNKXDcgONl$CEP`QMyIk6x5Lzn>QCYvV5xn|I!wM$3mypJaX-@?X!R@;NSL8r|MQ=WAzbjC@Qq$!>ozjRKDg-8q`e zcVh#ZdZGGg8hMMJT5Nc#vu;yb%tX-RX|&^K>8D3pocoV{jCEMaR29w+7ac9SZhJ$5ka~Dq|Nm+p!ulcvL7I=Oa@+!sWE1DfxTrRT9-I3AwQ0f4YyV(3{^e9b~fp~OVu>uzk4%Q;&YmCVuQES zc+E5#|8%tHK$5wOe%`yw7who-og2uJVD#L5W9$hf{)TD9tM6bY!sIlOeyVPHvk8wM z#W_T4%rlNHOBKbGTk!Qt_icO>dbKAzcf+YSpYZr^K7UKhRVY~KfYD*=FZg<|Y+R^V zed-AP^lU%-cRc>jO892#XRmvGniRX+F^#gCMuj(xna1)N$WCtUo<=(pqJIj_`pD_k z-rzIRi^tDHxd$tzoZW6OY%jRQnntU_t%^<_?>znBh3nJZL(}NqVQJTG9dXxJDcAe&=(LqB~ z-Z{&6uTwkusP`)up1-t&bFPHQ{D_Es=WonIqt=g6&u8b%J?*tBXXP7S8byx<8_Ay* z7Z*{NE7IVn(alA{Es^39=}LXH-7^6ii7Gcf9(=KbJ9{bY`M~%Dzkt_kB`);oABuieK!pbeXx*5N1pJPydHa-wz%$<@0!Il zx*z32#N7PQ4jQ;0VZ>9a8TZz+1oQ%FwqhNgG zP0QlAkrgyz?k{|jCU(MKVqeF-014cln47IRvZljUvOm9xuBOp3#bwf`?wQoQl{&O9 zOOi%OiNwIc!4q6+`^2UVrD?Qs=k3vj<}Zc%r*?hnlA+P6vhY1yx?AYjG`^{TH8j#V zRi#=N30aB^#{<%GG)msM+k7NR^zi7HsL3*U++HiiEe9^Q?si^!r|qO7jTEDxXz<3K z`Mz(91r{sg>pgar@9^t4BPTT_LzpTw`nGw4>$)S+ytK@O@rm^`lGW8!*s1HBdL#AJ zDUl7hd|g3f#*G7}{^nKfWok5G;bzKyiC7Ncx#zv^K{p zham14b1`jvy>mZtji@SBT@jw+-=;$&?!=Y*M}$_p&916mp1g%dk_vNOLg;ys+L5Vazh%0 z@}wRQQLnWtt2r(eZ$zV`mWHo)E6fwM{;@XIVHb@?>>CV-`vc3y-yhG`+CwAWz1r+Y zdUyAVH{RVYVvL`Mc56K-cf9Ah+y2zZJ{s+<(>}F^++E}4VIWs+N~8N-k-ONULSwWp zz4y#GK%@5;j$d55P1MOg`tGqS2Wcdfuu*(_L6`r6ngfHbhw%BG^kw&-oEyS)xVH3= zC5^fpYfdH!h;1{yJF7?U2p$igcWieacm1VQvE6E&HH~h|=gQpl6f#)3dv&11QT+V; z$dz&_xu|%pOvilTG5q{V%U=fX$HAcgiWib(LZS0AFMvb{i06mE>uz3nZHMKiyuh3j z0^aL+!P_VV;0P~7IN{&hLLg#CD8PFzfP38FT^0=9>v(Wb6$kPqd7&zZ7c%5RL7YDn zn0=UEkG%{R0{>CdzcNl!BQ6Y z8z25CAMa({Hhhk%%aDQlkWr1#E5Z#CF*qN|1)a8haP}JR&-r1%Y~+Dec)`;&BNW=J z_&~fi1a@-Z`$6%7cgDtd(c-FP+j;XY{yTrX)?#g(Dc6ox75mWn*dEOu;jQPi ze}pP_ZmD^g>5Kd}?TBxyH~v5K%>Z(w0!55A#}n&Sem+qD!9wQE4L9anI+KUJ-88QE zvQYKW0KxNzBFIZ#j-3m?ve5h=w(ED-gc5)3|H-fk3Gd1fvh_!cj6JrG55I0dykpFg zcpZO6SoakR#nq~qeY+Du_zstrX%w)KgS)T!z3bPAKYqh6*3=b|>@Z?X&t=V(NBDd< z6r9uxG>F^PwaUGBScu0lB52(#J9265bo$YF7UIuWklnp6hWz7i7pm=$ZhWxoYppl2E9~y;*z+u;A9c5T;Zkq%kDsEh+-f`^ z7EX3w4pp15V)?DO8l)n?B~1H?kn_wRq5 z3cE`D@ryk5PY($n3LwzEOruIA78>dhi(RAVO|*D?vC&<_LWek=k^A$j2tZ(-&39(Rkd1cU*1&+i% z1&s&kyew?bWEh??^ddOP0iu-|x>alKa|^9Jb4*=qFwx{`h^t$ED9(3Pq8k-j6tRoBEMi&|>}Y z6jM@GeryHzhkms5x`A!{S`YHt{hf`Y)%|GRqnDO_R@%fLf01Q9XEWE)tAq(G>(eaj zN9ShUYWiH^NK_Gp*|$skk)n_Q3ZtWl*4p|3*`j`wk$(O~WrQp7$IoL zTZH^cfh*=^$xm?k`YGYr>!XM!sccof^nNruK3E|q)}5Gob?v=ST0i32EBfWLRRrCI9%tkGs|co!#R{iWgO^+j6HLnVRl9+fo@tu3l;EqjtL=SueTZohoQToO-bR zy)pKarB@!JSWlzKKYp}N?z(_9Av=<4;yELP{p$V)4$oP(R-}T64Uvic>s^6ud)1r0 zNx$e_6P}pgX&ksgmR%GTOV$p@re%)Klj)WM|QkZ9meg~ zdiPYJSQlc#`jCdVtBvr z4|hVcq%OZS5=M&K<3ZKKcD|+(cXRin75%7A>C0Dt(GWsKZfkFR6@H#xkhRq>BFR7Tj2fl> z8%h-sgy~bo`8ON;(bEH`9Q}^_5!BQ!)^CE$E%7H_QtVCh=o$-s^7bG=QQyr&LgmrA$~K`hm7@ov*@cOKL3@%x}68Yh)c>J zXSE+^p>6hTkM?i!BP_GnhF6?oAu)}txhv{?$UpI+H+*`Vb7g}_nOma^w)nG9ZQv$L zj%TNdA1I_lK7xguT@rTnmiQ9B+h6_Mb)AI@dPq5ezNN)N1qi9hkEzOS3V94G}jl7Iv^uP=%#t;p{!cpmX?|m&1SJR{+ZY*cDHxb16ny zRWjmCtIj3v?DL$Mzsd~XI@;Y~s#{m6@?6Sd?y`L_S7oI+L+$wfou0#K42z6sjxTDy zGV&iJh6`N$#b`EIYviWc!KjG&ruRhnL4N7a>+LlCgre+uC!FpGGJO|&9#H7n$+W7_ zvVHpWG_yYBjx_7u8D?uqEbm3j9Sm#f3kTU9A{ds-3c|+^McykuB+^1NDB zahu_v`uXIn&#MczePNFM!1bQ?PJW!JJBztsNAC8mW9yl^BHUUlxeb{D4Oi|zazV^m z&$)hA`ZO8*9$U9;jCEyvI4tPkc<&11LVVHlUx|^7ixC@sUCp#%IIv4~Evj`Z&{}@T z;0Uf)`~IrnjjChRa7udLaPV^GEUQ%|?MEb;0v+M$7hAcQ`}c=L*r=&8!UH<_Z_YW) zh@ac7<=^4NpeLVgnnhSKxP&+AJl**}@zS-^*leRR(T8!T(<93xnKfrdoH_nqf8DeuwF8ih)9F_wGK*rm4N=u5XjCw5B>wtF-++ z8O;*Kn6(a53kn-UD`djev(&d|xKz*7U#lLIn_;q;(G?iLedVhq<6`magbCgV#=Wnl zd%6}UGPD;S`uX(9UPdKzMN`t|B!e`Qjm#mxJh+f8>i9tlM|`b)LO>Y=ASZQsfdxa?V|G&h9aZyJag=-Iex2u zTlKfdIw!_T>gh_|qrr>?FAv`ExE8{&nGgJXV(b`7GEc|tG!`=d@h2c6-+u>N;etT# zBG)}jOAbgUm&DHN(&~vnsrxZzxt<5-H@!TbO?ts4`e`&Bo*%gM64QNuz{~iAyG7?V zq!n>;oYIJzPFf+~$PT7l(G$&qb)4GDM1ZzP z+Y;^Tm+byozX73oQ+9%TOVU+Az0^_(?UzDRx>3qJ`tO(P80=RuG&tUq z^7>rwEjgNwQTNZ!V?>C~)qZR1t+S09*Gam3S8Hwyn{Kp5)RtxG=6Z#Jym|$@Q%aT| zyQ<`b^M9SYk5S?wtM#c;R{PrvN$o{k+FGI~lyoe7Y;|qFzQOOFF4NtQ78ZRBTCq;{ zM~pgEOrxnjTTSP^Q<|Z(eKeo<4rzuz^4FG8-lOyV#X+6e)V%LZ-B~QGk!`w zs`T<&pKxB55ph@E<~{QqqeA)a9Z%iIj20iMqVwGaj7KMI!gMbmWEe>qUTwbgh~f33 zeh_v=C{M-J7cj6QZr-Q4+R z41*85s}{+5Gjez@ym)@eo1t!%e!u&UA%o3!&QjH%VT_;8=9uVoWib}MxcfXKxqz|j z(8*bf-q#o{H`7v9{F2dduU=s`it7zdJJdw2$#!<9=AU zo<8HooOQPhY&01bDT8xWc+W5->Nf23FpFSxDaqgQ{1C#}(XKe?V!w{~Jd z)NTt#o>zQ`Wod?W9MvdN9rmvU9-RKsUicI z3r~+TI##`6+TEzKsk+y}{QlOg_1;=*#uC4Vsw0sFjMzgt8P`ofbIA>VmpwmLYaUEn z;#Om_N>e!XF8}d~d4;h_w?wojpXIZ>_LtApzb$Q6S-Yq>Gb1Zde3*BOkjv9amzThTQX}T zOLZg%0}2E)^drMG@~EaAFOK_h%wc*(94q|tK!usQRj~Drj4pGxGP5k_;+mKMG*TFyHI1UK+r7-=IMpx?shy?_Ap9|6%^W<9k58`~4ox zJ98?v`_{PVe%LdnyVmZT)^~Yp-Sr0r^^V>u&q(=Vj>Og!UH{diy54N04&P@ZrH43gN47gd_dGBD+Ql<14?;`JP?kq4 z_V9h}uOEGM`ko%rIT7_v>;8@ol|_>*b!NaRA~tk7N@GNSu|WY@ousbCOYu+l)k zJpA>Nnw+$0x;kIA0}AaQZYWZ@_pn%0PogCD@@=i~iE*9rvw>S2UTEn_nl^1YdoHTP z$n)_NGn`+@KZ+SYt6xk>A|XzkIhGf&g4`r4;+QDttCauCWo zGg6%|u}YD-$S9#u%uwcK=0s}YUY%Y|y`LW1@|Nm4vW;pwLqkioH)P#p!lR5FSSKUL z$32$={`v7+qvQ2xyq-3O;5JMC*X$1UjlMJMYaLVp$EIekSFPx$f|Ir0!g@-7`>dTO z+-eYcrCa*xt4d0KV)5tIN6OIeI7qpiea@KMHYwZke5BTo^X%u^ zh&e;6tM6Nf8ShnK|L}@ZOV{|ig=a=STO>uLlxn&2cB=o& zAeNhSjYbz!iW_-Mx4yuf)VAu|#7hcRdim{%#>|{Lu>VLQMby_Z=Qv{a&OdYg21UL6 zmZN8(KO>(oGjS)W>PwJjDlsD;5h%6HrKDEw5g3@B*}w5j@5}aQRQRSUH}Q{{8Elzn z(Nn6?d;f>_9L!8D5l^)Y>N3N9S|esAe>Ufplc|*N=9042r)JpADCT@Jb#N))w`;p* zn78Dhwpte@~R}^ zT=Z0pzWB@yGlQe4%cTHmcNw~5^J2F2+F8?Fh^)@=H)kgI;{I!$_0Y8#?JED!8HO;6 ze{s{E`wDI6Wj9*W(u2!C2x$#_gUS|K!U<2zZ8dh!`6^JjioBz$_zYL3bNf}IjXPx| zol3g#`E9ef&sCuoB`K240W*{B*z8BuC}a?kwTm#fO~0S_u?D40+Zt5A?85m=c5IpR z9TRsj$l%EG4+KO>q3M$KV#jb z8(E!ru!6SeLgvSlW_#1Fq^(}wW=QHnvo7=&7mqs;cja?b&bqe8}!uX)W}pZbZmi zcE1)fB@EU125GS#bmOd4TFYb@d0^3sO4_0aZ4fG5vbcN?ak_nHBb|uHOJ%fXn@Mv3mbOI_k$h8`%&8w7MFr|5Yce2 z-UKZB(c~Df=^362Brmg13UIt@UaT&cuU{zXQ9gPC+WOIm>%tf*?{KohbIn=6@s994 zr(GH^hLha7ypn+956!fc6gV?TmAcSQI+2Ak?oWMgUTR7P`3uz1pIFE-EV5i=B7m@! zkl_IF0d#STvdyqpJaO#9*RQn20CJCc+`>8OPne`!;)bLF6#28(yY-6+S%EA>pltvx z7$}*o;uA@Z=B1C*VuR>qp7+rglObeUq~UkkVi2)9HY^tMN+3V0KAHzfgNPnoG@Bc4 z5c*oLmq6PfN-}Ty_R}YVbUZXI0OCVP`{#}ARa1dvWJ4lu{}A%2jy@+l9ZEE{-;;r) zAyg7_$oUkz6S+}Crj7nIgeLA~Hf{8XCwvB)$7r!(w0C#2qspKsVHt8@fVLP$JxXu4 z@0)d#ta!{X4@twwExC4E$G{mvHFGluv<)Mbz=Q?mo;L_Opc?o82$EKpdza36fq3Gf z#s_#lyBBA3X#=MZ5tO)(1CmBi4QUi{a59J-)?Saxk06;ni~Cm3J4*_9u~*Pyqo{eW zn_q)(EOGK_z!YsUisnjJDRpoKka=UvhUmmm#P05LEO9J=xVY|D5B+HrX>=*dM|ek) z1;gX#X6ExHidW4_@qDfxn7(ypeop@NUilcFpECpp24?14_Z%_q5Io;rn)N(mX8v@4 z=O8jOf8MnYMNeF^1cTT z%*;OrFQ28#@%)nlaheFvCzTd@&l2!_^4m(9nIC>1kMyojh2rt3KDsKgOzkO3vSSx} ziuHG1q}tvGSbw+wvNM^{$2H%IxAI_pESGxAYDT|`+LR9HV*T2ud3IJi)|canOV`fm z%Om=GMrZV2h5M5oGy2c)0{7T6p?a!K`IrG!){0ucPA1OSW6xi&xA@0SQ~SLhrD*#q z%4Aqxo$a4ywNYk@yYMhm{v`}RqWTpL_NkkvZ(C&%Lr7l&a zkNI58L2q@d&&=PQO(nLr6%DMz_F6>yIj@R(N~~+G-P!q=f$wGi$$Ba&dy>;~XvQAf zHUI1Uddf3&zSzBL%x9(5%$|Rss#uQ$six{%*Xe~1V|#Es_qwfSHN}u_eW%@kIhGY& z)mKHmzH^qTkw0TEeiXiYy^0E6-B#pv8}oCk-R*i+)H=WSt7Zpc9`M&)+4GL#kz2B~ z(gAbE;xk+I-cfsnH`X@p!K^5#u9I9z9q`N2k=isP@6zF8ucUsCFVxJ&&o>OX$=tSl zOFbFUX!4nhIUt9ohPZWZiX*jUVO2fT2m;CBo{cQHo@$uo4zDE6wvw<|AW9$0qF?fx4|^t@k**%{1=`o|r3-%wvv z(+n!jF*9}Vi95fh81D^Z&uy8JFUShV_U5W`y7Hg`W~U>>bK(_cz<2A6n=od}4e{6S zlu{8JH8#9v!<>|}SBa;T(j1R@v7-mur;4jwJx`TT)en}ibJxxA%W(JRVoG)@=l(>& zj6GYkYwea|>fut}^P&%C*q02>D54az7g)MP&9H%e+|nXy^p=Uqq$g%Za>U+%mlU;m zS^U#un3K+p=MEQA`J>^!343OE?dsQ-h16!wH>o)qn8n9TyegU0Gsc58Y_c=*X@h2h zNhxbqTs^lCv-dVK--M##f7L7#XT!{3T;<$QK=J4s8jSQ}`E}Idrsv$ zJ#)_p#te_1vR%%ho{_B8!EQ6GraO508P%k;ef?$YneqV+{D-os77M9}++CR4ZnYlN z&!V>1C@1qH%t>~JjVe#6rU^lpkhPdCOUBQMXHrVpz0!&zn8lC8ZsC7IrLXgA`Z*Uf z)YzSvOsAgO?-1x8#rl%rw)JvT8daGnYd+`OjQ+gzt(SaAt+>0^N~Z=hQ-~3C>j5Pa z`^6)LIm7HKrhzF`wQ8xaLON#e54MM#?@}A*C&hkDm|+`(;DdLlPaV6{ABJLPEVTY= zaEtmWy+a|6H!8|i=K=IfXEQTnI4rB(^gaPP}mLT9O%Cky6q zaL(|mb6gyjl*pvym{{A4zYyU!%sPoqcOQN!dTWN&*K0NhAo;k98G?Ad8@yi@->Hm3 zZ`1mTAFF5NK?@2K<59=_$XTzuu>Sz_DjPCxp@&b(&w4+`%w(%F3%-luc@u7noW|_k z!0VH1{}R2tfM-IA+~Z6<&0_t-NTDE5WhceXO@Ux@R49o>_OW<`4N z3|+wtY9TH1S*Y^h&!dkHVrH5I?4R=tCA1z&T`M&sw_ESpg!TGbdkx1S?0;B_?$Cai zi`dVl+;%9KVZIly&b~knoEsb@LuUMw4(0jU`G~{RJU`lWM&9bI#7&{l#1|^rl9(Bh zYZlrv5$fpe_Z`LdX2untOL~Q<@ao4iQ^lAS%icOlzC_Yn{dt$iVBT4BuYOh$TC2e- z`C@}vP=zS^RD?nT-`MJJ!0dhAUNx^6ZB<;hVjCCcfokcEQ6=aCYv@wvN9@lG2(Nl; zSBh?{TPiI}#~g9AcbxGGrI^;DcApu!$S8B+YZP*di^VX(?EPb}OxJ59we4Yq>{`r> zady=gZ&3A6)chni%&=CpFQg1DB|ff7te^3J*beS8D@PGeUd>+p7&DWsu$8Gm3YFJ} zs%p_Qn8x2=K^ALfX~OfKVh zD50JX+ue@+rRQQoC-|$72UH|hQkbiv_@6shA^xI*CT1LFC;yxF@2k+y#S*Dfr)T8b zR03415$E-%kH84CzJx7bY&A00u<10A#q3?tMvqq`OZJf%wmF!OS=C$YuR$_*+N@T8 z!T#3$1zd``*#FCJc6(ZY*-~53MYtBdTz$w1X1KmX#AMnL$lh^9=R_f9Nrg5?`!`2AlQL+pW|{qO1l$6UADhy zg$}eTAYbwegc5f1E`FkoI#3PUpkpMvBN=hSRsftj(CHg{-#Y)YCD*^cFiD5w_Zv7y zZx$9@B-&q!Pt)lg$i})tX7L3l65gOb`dtSS4RPmm@%AQNMI^guRtI`Kk>DWByi9c7 zxxoQ~ok(T-_gyt!K}7i4sKubzi7pN+KI@teCU2ZlnFBjJ5xL&Ro}z+?(jYByaOy;7 z+~*D3P$4A8xzQdvyc3y}tKW8nAo3Fjxe_uu5%;z>wNKzl1~29PPQU9!vlHnp&Jajy zheiIPS)EAD>3o&@MPK5T#pH4j#QtQ~C)Y@gAR^a&%Y0DmLayww+je++5hv&Nx6?*l zNd5@jIB+4HtQI0#>62Y3PfhY_s*e-N7pyx%hj$^?ee1}j-T|bBd~6Gy-i0;D-gF#JOeH1Yu`4QapFIrEEHo(3c-w22S1R*;shD9b5As6)etJj=1GpJsKVJFWa}8guLjGd`P- zv_KEaM$q0@U%@tUE2DcXwEJ*Zhjzw1FOx;OXd&?m@G zTvp>Ws|OwAk-s)e^CG#&HTyFy(2JJXSkn@4u`;ne!h-G5} ze4yBenqD~_k0=ZxiUrPf(?;08o;EGnz;8-^4``MLr#_@3P-ytU+lx%~X)*ReYrv6+ z{_*fFEr9nY9hVbl_74ZHT&ZT9A;7+g>X${dvc)!cT=#2Zc3o-9da3_7T zAMGz-JvsHmgyf@d&Vz`4#P*u!&}DBwQfKkqPjq@e$`;qIn}gr(yX7S$0(gJMm4o{% z$Juw1#dyaD3}FAieZ9F3;}Xe!`TQs?z(TVlSsRsp8j-Jpzb^+x7BZf9YUt6$U?O^* z&nS)eCuqN_f3Jyx3H$92e$gjc=*-zz(Gm6|q-OENXF443N9b@HsrGauE_`jn_aE;E zyW??v3EN5X<~KJ{!29o>JgVH*f6_cY)8`3(q@Df7(h+jq6TRf zf=Sc!{=>Aw0BXzpRdn3z8o8M7*A#6ufXt%H*G*H`2p_|@v%qNpmGAq;^aUTHvwR4T zzX2q2W<$Xz+JUrb;$?%30n{Ve{@4pel4`ZdY*00T#uD| zHl*U?eTCe-d%A~yH-v_ojnppCf#lV+=Xeh+j(=z>?%YLRBGbQ||49oBqXg+gfzEJ| z^s;z_pZ{U}K2#1erz6RIws_C!&S8}A6SC6BD~4Dx_H!OM4Wo)WzT!OaCUO<_Pa$Rnx)&8N=wanyZt&*CoQ_P39>54#yLOD8Wta=LzAg4E+2Jqm128jyib7 z5+mu#3qf!Mt-7?tV#ByCArqp9_5TRc-qq~V&gM&4WyN&RMkDAzQ0DXFbP&;Yh69iP z5wv&cW&9In4B1h5l0}D)AQ#z9LtKn_f+MBiJDom)bUpc~96Fk8l@eSGRU=5JrVHKKZ3vk(Z5BGu%h&NdvD#CfUNi$HM{aVbkwR#I2V?M@$i zX`@k8e{1$^7xp0X!Cng%eR32DS-;_Lr{W1A!S=-vF^VEs=@&~b1QJ&?-pqoGQN-k> zxH&ZwNpgw7GN>9wGJ30j%3h2mgU&zipjo4c<9MHU&V}gz;x(a2vCn@Wf8sTM`-jX^@2372ugQ{EP2F>d z{4ZX^`BB{MyJX3K@fs(+m3;m$%m0hle7=zMI%IbBfAJcw{%0nN9PiPe{lV*epX=Y@ zucGXD+IY-N>X3vc_s}%`iGu$Vule1oHp=p!ILvoy&Od)>LpzN3<2XzgI-e|qCEuwo zUHy17gCVL2NrtR}f3GJA$2I;OuXBfd)_$P8_id0~l{Go@{xDv$`Ag1eO66DAQQ~85 z4V-sH^Z$`s{v|inUF_ysM>*_#R+oH4;sED*U$VD>Tf<@4n)r4aY(L9QXbGasN@9czpVX zwVHpopZS+u|G4q?d$p8h^e+F>C*r@$C;cV&j=kQZS4&;-E_rXbfAw#<_g`}H&v{=u zYp8}bIdRL~C4b8;|B?r`&OZ=TL!pd!&lZ#|{C|@({wja=-A_%K8p^e4kMyboi}L@u zEb+hOvBwKzUsqEcdpnMi>0-a-@R$7l!-ofLtEqweJ}RYRV!zLiF5vRVax>H5*&lR2?`~Q=B z|5cuG$U!*w9Th;tJ(M-!``tdvzvPz9T|=So#lg80=C zBL)0niqLi^?$?x*azvN8s9dCxqsG|{Yk=2g8-{UXoFL}lnF;b$8>Mq>!|FQSR z;f+;Q-}Y{sb_#^C0|ANz3=*M0fC2#v1Y00NfdB;p6bRU1(wP=AGo_P(0#T|&EgG~) zz(E5RjangUl{{K8FjXQH30NR#)u>g2RG+BTCrZ^Q@4b@KHqD&O@VwXc{qbGjcV?11 zS--vZUVH6*_BlC8lZ-d?u2TGa(ud3ZyYi81E_-h0Z`0p1{Pd+`O8z}%`i!M_?9S<( zd;Rn0ckX<0DSu`9TvP4ck3GAy`+~~{M}PhnbxWE5%)zg}?Z?mT-1wVse&NP!Q>lGb zrjLGkp!Llk>^!Zj>2v2ieArvl<$dL)ZQ_yNvR>}|_y3) zj-gZ5f6sW(nMJof>AbJ8R6T!IZz6SA8 zxu2hSZ*J$#zn=UBk9)f|e^i;Sj%ELNYNtaj9AmQ|Q>uSYI`0!L=V$8LPr9#9>}+`I zece|X4=wd?s!X4|;PlO(dSd7Fqu0)#a+o^>0slN16ZJArJiI ztjBl$YX5(ves;Z08=pPsmNI?#3x7Jb_ZvI+`obq~xjtah@?Wm6n`+(r>pSoL#Hn{Z zeD!f}P3L{U|z>il!01>2LQ z{bvvQSh@b6?|k&(FYJ8uKS%!U+qHkw*0(+A!)3Y}TKm}#eQsxVuMY%{96q|#{+{%n zGXLRM*BtVlPwzbKOaHvjeNRitzoSf7g^BU0PwtF->6O0+4*ON<__Qb8Qtsb$(=l^* z?#y=lY~S|Z99*h@Pr54iUmmHl2Yb+`BcX>NL{k4tJ_LhG?xqaJ#+WZ(U)2BXXdNe%V{{ADcnYsTqEk7OQ`Y&GC_I!-@ zWoOU(Yv0?o{FUnsQz!o6YwdTO@!TcvS+};N^FIA@^F8OjbjGK@(f-+o_r3DEla`K; zW%}GpU;f*ZkGKEi;-7!<`e|+d>?za7K7G+OzyGiH!w$X9(a^8O-%+Md{bBD{)VJGD z`r|`ix#zH4Y5uA*z2oZZu0Q!Z?LRy3$7xfs6ndX-zkBe)BgRj7rj(zKGJWFYyUx5f z**^5Gf4gtHOI!af<@zJp*N*x@`?K%we{KF`t^ad!-CNt5LQh?M)idow$2)8v|IyO< zQ<*-n{p0&BJk$Q`!y~WVvu*auD_AF=9=f)x4t>n-SXEprT(ijeePq9Ki>Dl_Wr}K>^f&qTVLk5PPw$r zTzeI-FWVpeb?oAMPSMVfr^@uXGul2n^L+ca_H$pIyigloK9{rPu9_B}`PbG|`^(2Z z@PonJUrOs!PnkZNo;dRxFSMUBaljkLys7QK9c6mc>u;{T{KfXx{8>Fm?f3oC_*lwx z^^5FR|Mp`0x=(z!_khEHSgK!@=@XCp%n$ykz4cedALgDvq}2Z@uH!Fl-52$=-8|EN z@5O&S@aK`@_eUu;R;Kr?-*jAZrv3MSa2)vYX6^iWxJ;jy`T0`2dEtv2)*i0ye?4XT zV(fX(O)s^7@W3y=@xo`8&OgiazH2r-xbvmd zed3oN)y|*zoYhkJYBUqz`apZ$&~?E-Bii}TRGHrM%CG-odZm5Rari5@|3({MKDV{( z-}KZin{IlgefrkFOuVpVowk3J>8&TGtUC-iO?6IliQ;GJULV z&DY-fYWvmCdOtYu)bXYI`JCEv{4>*+bG@Yf&mGtO>5pI4&VQ!L^q#AK_2jFswr6^y zjmPY>_mlr$|6I&ITh4#(z*}B!zx|o*WBmvJWalrQ__A+rKA$}O_oq7L^D)Ox9#i@p z^V`K*G2R`1wnZ7JVN$Mq%hSMbHHS!0{V>q{K|NQo1VYy54AM;=)G9Qg3p zFD!Dr@vb5Zp@eWMbkZBbmNQ)Tv$+_v{njJRh z9CgiEPd)zYsdFsP{MYZ#XlZcz7Oq}@*{v5lk3V7MTc^)Do8O8%_q@*YFSyWk(Z!c^ zU3%H&?kldms{86|*59zf+q-d7U;m9a`8IFaI&kygHvjfpZXLSqcBS^NU8`!;3EtjY zPVjBrL|g|fZR?gERPfLPS`S$F&SQ?OFduNhx^)Ls^C^`*6CA&b+p=NZx^=X_+pU)T z)p0X$tLMgbv8grO8oBW$3{!mJ!4zK(FvTDGPwmT%ft`9AH@-!8ipr<<BpdPPL>S@}4n)aWj{ikXFY1)69_MfKxr)mFb z+JBn%pQin%Y5x`4f1386=5xj0tx-n&jrbe!H~#$mEw`-q^?NNFed{-EQx~t_+-vb} z?cHYCvej=H?A^B2cT2Bw58h^3ziEB{7QU@~%W;Ff8~D!h!Sy%zdM&+!gIfpH67Y}%_9sJrcL=)^X6f-ma=+9t9!G)zEECB z9{;C~NmRMDi0824oBi2_6kpWA#)M?`l1d9V<}KgqUvw$e z!Q=n+)<2+X`ZsJIP__M=xUbu^X+RnJ2YR<`SU;fZ`M&)I|Aqn8*o)aXuzf(S+wL7u zrap=uIJ9Nkas50yWRSZ<`?qa9&OcyR)*=6ZdY2c|k%)uGZP?87qx%PLIaM7i_s0!x z-R|#Iw<BgX=dbDj6J5#}971Wk8*4!?lQm2K zq$PR$t}@$_wwk4Kt8H)PS*Q94^@o>Rd@DZXW%!$mnuBVN{D0t|QhTcPB3iwH`7h;e zLJ@$pkN&lRauW36g>&(+Fwh~@8#bV*;u(5ZLIohi(>fg3a3LbUcQ7ey+E_?j` zqn2aZuyu={rnkw$eaHJ(%*zoBuHUk$SFK*!FY6uX_4oVxw{9sey8|3Ddc7Q7Zn(`- zI(pGfIm}Q%Z(Zd>#!K6s@+YcFy+v|0Q zXk}khe^CEa`xMI`!~L1XM@==at>I@4Yk$Ayk1T@nNUh@tua2;tBHyUXc3#)PteXcP z%(1_;FoNRxSzJ`bc(7WxH+tRPYgEl4d+o!C!d?xC`=|!-(LAN#(#9dbv{CcEqO*k8 zD!$4|YUX}fUn;juP8yasPEwRA-tAQ~b6>767c4%I<$P(A!-AoBY}l)0=Du8CDpw*W zM9W+8?5lXUSINvh|0QE@f4ki4Z>#iT^7P9sTRHh!-Z=T<2M^?qAAgpO`*OW#TfXCT zZF%GLjgwWm+pA>ezWCrcj)8KM?KplNNB*5sv-C)~VlO>fd0y@-{_ke3sil_p;4jaY z%D=e$=(yS!Rc)=IwxRY{*yMI^jr`-m^5c?4@BXo3X_Mwt^1HXjq^>WyV;?Wm9#t#l zuP@pkQ~QoTs(#ABef4(w>Zc9e%g;C8A`d_O#sd%h>rM5*1H0}JEtIqPRAKc6Vo!3;#-8;Csf6ID_<8r?oyi1jw z)7S6wS_THUZjje#+h}0>Hgyh1=>9F+i$%8%3=l_4DUhS_EgF_+Y4Q7SBLxH7{mWX( z##wUFc*Wo*;O`%cw&nWHQ14M^DJT2irD`L)7?!YQ%FNfEBps3%aJ_q^vdnH~)A6t4>oLn)f-XQ+veE!)o8#R1&scRim;n4ht{?&79-TcGoBev|Ur9JaFW?8Wj*;U!x*o@2eqJr9%BRDg%=@ z;SZ;L__y=TrknAHeOvK|)&cSWcfc?l7$gtS4^uD*voP(iQHBoUyOnz2^bqyH$=j$0 zT5qQwxG+LJ(Ef4ifq@YDgflP?t@n_Rvp6`gJK10qx?uMMlhEX({8yPDF#2`sf&K~d0Y_m7j=?CjKS4b(1+#F1 ztB8j8GF}Ph2kd^5`2j88V}8IM7=WH6;|nt|4qJafUZDFK>Ve7UsOMbf-w&w=&c8rC zaOy?sfuY%a6$iEc}Fg!0B1?3SB=XA8;YV_`=Cw&=1)1OU4(DLF4)4`&W!FG|Vx+ z&|C@fmEX=@( z9ODXoe`0=I$apL;KVayupv?6b!;F48Z~nL(?UU1GK>?bix?)z&P~7 z1Ps9>jKUO5z%)$549r2pn~Yx<{efm^s;N~@Xoeo>fdS}+Vd#f37_X~UX*gY9s|wK2 zP^-+BGM~7>Pc$_sNa45KD~ z^9PQ@42;1%oPfs5=?ApI0(3$90kj8wFa*tq5T`tcDHwoR7=i^Df&9#!DnJJuFw;Ja zz#yE4QP_MaalpwV@Q0&T+P{MN{jOSNhcW1ev(OJ)jwF6KesZl!zywUg8JL3$r;w*B znOE;7ZkRrexS=|oxMA}d5He1&3f3 zjzdE?`(FqCFm^U^!`V*ahK}>`hhZ3pMlSWFVH3MH~>8`1pROa48bUj!fBX*Ihckj zLHp1I4eQA_G{ZJ%hh5MG`=A#N!vGwGVK@e3Z~`Xb6wJUGn1^%Fcmw%`R;Zq&f3OvL zU=Iwy01U$&Fb2nA5>CPloQDP2{1owRpuf-#C!h<)p%+fV09=G&*tV1U;22Co^?mjO zXn+OS2u)tsjg0iU?+^iCPSS{!VZ{)-D~QUc`Nlm2h2kcY;LSm0qBAe7=#JoUUe!1 zldu4@&^*BS?2SKchHlsad6Q3hU>J_TI1IrQoPt?s-iO~~znS=qb;<^F&;{N5;t#Vh z0;ks!HynB!e$d>6-yrJ_w89{C3fI*sFC5sfj@L$v1B}Af{qcvLFax__9;Tpa8}rCi zr|d8XJ#Z27hNW^JNc(UYCg42G!02BG;D=DV4AU>dsNBJ@M^ zp~M57Fb@4N4aZ>~&Op5}a1r{TxrIEyF&Kj>n1WfDg^r`?$NSlTpcM{5CyYQZOu!(Vg;BTw6VP-F z^8vQO0_=w758w|SFa+Ij9Qt7bhF}^-q2XBa0h?e3nqeN=pz%)Xg;wZ<4(Nt%=!bq7 zhG94f<1h`=Z~^9Fla0I%6DPDmD|Eu(@yu^H1tTyE6EJ-O`G9#?fQ6Hommg$Y?TiOh zrxQPHf>GEE6R-uQ;Q-9TC^UVDIM2WzMjZIVP#6Ai9L8Y$Qu+_4FQfm^a5?S5#%|)h zi@2c^`k)6!u4NoxiwA!ghAG&39qmHb`^Z<2dVBDL;a>6tcfb&gz$mnDq+K}BhaWWc z6YqyPK0^nLLob|x0XTdkd4eGrhhs1WQ!oo>U;(C~=_AbNo2UmSpchWT01R!WeK@&| zd_m7G%)5^=Uv4FC*mE1>3k!G9PdGeGdvFq_pz(vW2Q9Dw?a(yB{D3y-`4Ika3i@I9 zUGy7Hz^K?iOuyj*%tGr&X#Z~fpc#gt9p<15w%*PD16?o(`(On6VH^&_6b!*E+yM(P z3QZrQU1)KAJ7ex(Ed1n(C{t%?qNQD8$TGE zBwo1VJH!hUFbdNN+JkB*aYOg_=U(^H51?stv`2cM&2%T^SdZ7E?!~^?a z7zSVr?)VS!KwrH<6<}k7L7Btk1v;Ru(V*NgwwFQqV5iZbA}|9JaA;qH%D|qr_`~Si z49a>x_9laJ!O;E&<%f$f1idDMibBr;29<>I0}Uz*M-Db9(*xwwY*0=(bf`gjq03@W zVdy@B_TaFU_Mq=5J|X#O;^%7??Qr23;)ILG8B`EXw;EI&rtAilgX5=C{|?rv(}@c@ z__{(r?0_NYgi+WD6VMIQFn+c{86IRFLNl~pNIS3#`e6WuVUvsYU<*u(elhJrOBep1 zVIEwHKg>V}j9*SYu)FC`@z{2b_Z$*mgDfg`?2?S?1|A)B`QoQV(=O zKb(bOxB%nO?=h$pT!dK|yPmv7s2^J4G<3iP=z*^H8B_q~U>J_}7*q_-ucsc^w85bA z(BUPHhsYna!&E=xCG_DBn>I5~pa&-4ILyF2EWqSe#^Yi7eX~K?pk+Jl!)X|R&RfX? z>>Hw87`lyi;W#vY4u5EcbI<`tZzm71;|}r(eF5?b!!QODFeQ9H;{x06r2R3*vkm zF#fRfbHoed&=e)!G5lcbBeV1wFazyhCEhRL53O+MYxEZep-1?2+J%M*@&p@U99m%-PJR>rFEfvz8QLBv zZfJ*YXpQ49`nMS$*zz6Pg+6E)XZ}DdoPiG5@&xU|6b!)D1pS3Bn1p_qh3ZNCzruO~ ztuVNgxM2!aju zbp3?9!|7S#fLR!ash{BwTYky>{5thRJDmF!?Zd@6+K0y9(0@4lTk-~NS@H%w&^ST6 z&<11B1tYKF2U9Q%4Zov3IPeGBgDOY7-ym*ihTcEXo;-&>H~@oi2u5H4#^L;*X&>fc z4n`J;`ze=uIK8wQ~3pOlB= z1;z;`VF4~cbDa6`Z`y+aL%s4r&)Rwwfs-%+voHhQZ>v`YINwySZ2v|5>*|#YcJ5!V z0?-d5FbWfJ0;XY;2|w5cP2a)~+MpLYVITCsjsxmd5C#scS25^ru2)%TIJjOJzfD}w z2JMH`D>v+fei(#dn1FGZhiT|A*Q>nfht(_VB<;M5cwq$k;J{JDEB2G?RSFKlENnZ4 z_P#@&PNh9)Iip^AVG4#|%bD~Kw!$=QgE`m<4NuTNXoh{z4*k#tTRMmrHlIbiaP}P9 zg=Q!I&<^cdUpNl^Fb+d-=Bj#?gc+EDecj~gDeAqNcHlI0!xZ$v z85o4KFanL&5HIY285n^DIC?GPvy=JaAx>z%4u3cdL$Kv~;uZS_@&xVBFh&2pj1O$? zWgbB%^ujq9gzk;?DhiV@2@8Gn6UKeC_dUjGfOz2m^ugH8I3=YyR%t7P# znaA5`7q)F@-a-fT!wwjNoiGaXFa@)>;SVDL^7}O70_|`dy5I!#!ht)<6LbvY2Lmt% z-5;dgBF1pP+r{fq7{8B>jJud_V_G z-b>!0{eI$x37CY|2k18(g{Eox2_10c)0Bq^7=qI<3TI#f&cQTXfH~N*gL(2C>n1eA z9CW}ubi)GlLBoU07ifeL*b3v&0aMTqvoHn=aO5-O?}y|eLOn1I{V)qd(DD%dhuts* z1D~V+F!Kogd7gT{Lf)Y%hM#bPen2x!!1-?xKg>fziuv?y+Jml_nQw6NSF{6@bHoXo ze?z~8dCI>)KA;uOLkBGUjq!y|Z{P=`|Dqk(UDKd)&~0c?<`)^yH4VxIn;ILGAG(if zP*K=%bc0I3VVHp_Sb(`>8hEdQydB%1oX`S2&<_J}3PzywxCRx6UYLeKn1>6{G(#N6 zHz)@TKo5+-08GOOY&wDVp$%r>C@jDQXnu+DYQ-P+K@a5j;#B~4oz$RWaEPzDOv5pl z6CD~}CLVi(vcqZUfv!^^Y0Pze<0g6B^EL z;M|Gz0|wy;jKCaBz(tsbjpxvR(V_9jihvrF4URxJ+yQ-X3I<^cMxpz?!~uOU1Lt4? zs&nbbPsl%Xzy;`q1LxrnhhPK_!#Eu2#2?0C9?nD4EMc6FKb(XfI0gMM1w(KSMqv&n z;37;z;|25wTA|^mj5oAG!-eDpcDv{g9DyO2zKHn%voHxm7n4^Qg$0;^rk@cPv_s=1 zj2(l6L?IeCFSFbHEX3ga*Vhpr$FXuOhf z$?Ra0=#Oa}WLhC3%2$n1n8vf?k+|0cc#$Jcni&gEp9i4w!*Xn1^0y{1thER=5K? z;1qPj8R&y^FbJbJkQb;n&@bqLc{tKbKIe!7+TqAX{NdCl;(@A{9x`z{Gj_L z`txi0^O3bsbvaBy_;&&GcV5h@a@&@Pplc{NMsC zK;w4W`z<=O!=_uvAIw7^?6{S@!5u@4J2c*gKeXOXp0ecc4%&mE0PVrn_mdZxgHbs9 z0s04%chWyN1C6gyewh6jc7Bld-~jZ(aTtIp7=}F`V!Xr-Q}X;S>VbwJasQ4ye32HT z0d_+3JmUrJFbv&r?34J*^HJJ|_Iqg`I$#F6KE-(bp5ecb@q)H6;|%T40~0U+XYZ$d z*s=pZn1Kc8c#ydNK)*jjyU+t&a0Yr|^0VY0Hbt0UFawj&@euLD4p@LLX!>uCOVADz z&<)!jrhS-!5x4;3(C|6(08KCp1F!%sW8@)6yU+pq9-$x5_<8Dq6EFg;QT$=^7stKj`=!?LgP-i=GcM2t4K-_&59VMYWkYg7T| z_Qn6Nlv_*u&<4HG0RylThG931K`%@~Kg__?{`BL2i0dHQhpo-{!&w-FfrIgbEpMk? zn1%&7e8?JQ&C{NlcHu1a!{DK7R0M_(qa7H1#~PJ`6BhdWH{v>+cwzTD$rtn=L7Z^Z zx<*A|>|JYA3QiwMo?!Nn@Yg87F z!2&d&K>KenA5Nsda0q%}0Q#ZfB=QaCVNCRssRuUNi3j#U^CII7op2g@;pi#kOP<3B z%)tcAoJ#x9@^13{58{A!sM=^3ww*@&a2$qV*Xj7dMVOW6XW;iw{Gb&kp%XScn5VD{ zhT$-b!wHyy1!(ve>&BVH3!C1vMmgZ*+4K|koI{*&9wwprz4$@Hx%3m-p}9c5p%Vt6 z2M$9&jKc_QJdb!`Crra0n1eZJ`Zx2blXhVzbi*7Bzy%l<`}z38AWXw)n1_Z7X#YRN z0Ua;^y>JGGVCo{`hVF}L4|YSto0NlQ7=w1$d?^(=Y|+U=}7W zBj1WYJAqa>1067aIsPyS127H4u*uClfgYHGlQ0K!&{$KW7NHdyuAm;+0zEJY18@|E z;WUgxe>e5OS(t?xSb+9xsHe6@%|RDjgkG4tmU`d<42vDcU;!p!pND$j04%@=G}rN2 zU}%RE&;=9F3#VZKreGL$UPoLo2vcwZ=Ah|%`e&$7&Cm*`p%Zq!k9i5ZVG#Dg2pocO zI092}2h758Sb%Y8s;7U@1{a_cHujJ&Xoh~+3PaEVqp%Ampckg$5X`|5XlUTG(d&s1 zx}XF4pc{su4@O`Rj>8B{!Z=)jDcF1ial#NZtf^6L8>kOD`^Xz~K`-ov0T_f~n1FGZ z>?bbRbR+W`ns1_?jr1Sd;23nmS?GffAMwCB7=?YC8Aq6c8R0haz89Y@-_H2K0(3#c zEyMwvU;uW&FzkadXug&Hz%iJEO+)l&Z$85gt+4eD>VZz^g>D#t>O=TLD@?&Dn1iiB z`mqoF|1fdD0Cd3-=!L0|FitQHqc8^((DzaD2?MYIgV1E8{0QR%=b;=!XFqf`%RB6B=O>W?>G- zAH=_jarzAN0y?1+rlALBpdV&o2rj@VZ2BzmKr>83Lxgz-$DnB)d4)FE^bq}mEzk=+ z4^t0J!Z_^u9Q}dAFb}7paX;2SXoVT*fTl6}1>2wxx?m7?!3gYzap;37I0>`d#3S&sprV&LG0(y3WGnyAGSSD`*0kF;53ZE;S~9U>IM9v51N`;hoBwC zpc^*5Nc+$XLvRAdpk;>n0Anx@C!z6R#_uKO1GGRVoP<6&^D^;6^$PKeK1=-2_%rUHuC7ohPF=KVbF!C~lxlh6kjVGvq>&$z)Zn1sVH z3q!B~r=ZzPp8i0)(ECUH;Q$Q60F1&IOu}iHg&lulzdn@lg;p4bPB;s_Fb@OJ^k>E! z4#NbTgc<1g3;u^u4qD;pU-5_T|Dk<23_~yuV{jg(U}K*74)f6T4)XCg`VWoJ37eq@ zw!whte41(=1+i_~*C>n1eA zFm%9a=z;Sv02BYfA5OspT!0y9{U_r9-O&6_+JO!@2|X|k{V)qdu=!u)13F*|dSMO@ zLBkP@D>TC~XopGYhBMFy=V1`40^*R8dVOCLgSIF7tjV94UNhP zZO{wd^^Gb3=V1hnHZ-av48shZYHU;m*tAz8pE;#p&6JoDnZdTDV*k{qFtLO#n!?lcwjxKp&_f|X5U7|bJ zHVMq!?ATM-XT_k?FX?ZvuDb3=cNmyc;;+;1FuJjxH3jPFaqOK9jIEBp#GOW;N4JVz z7IzMN9(!@jJ9iyVV{OULA?@n)%Z6^?+)$_&mkWCn=WJf_*XfrJeSmYZW|>4f^@q^= zI7cb$h3;m)+*i9iV;_q5tN44-i)*d)OQ+vK^uD8IpV!x;=;OyQPw9tV`$_aETlM~B z(Idw(?p5M5Fdn1Fvu5bDFL}438(LYrdCYvW2xLESU?0cs7lTgTJ?I7W*(&u0(Bmg@ zE-C)nChdhWjA1YCaAotCk2I+r*t;YTI`e~%>~NigJc{mI+rfP?&Mfw(ckB00`eCSN ze{Iv>AB+7*cb~?&kyKdTf9&JfQ){v7jH^fdIyuK)ig$T7m*NVb4_&}(-zww6TcT?0 zLe_X`w<5n1R{}k~q?hF>jlGFQxs!3P^ppPb)~0H^M1S2A-PAyL(BqWT>z5rp%g&+~ zm-xHUo3G&5%wxUw{pfadhDuitqjz3O`&Ino=o$2(DtcP#zp6%Abmm>L|Dt#E8lsB7 zX$|iSp-)NvbjH<=Ze($8(b*qmT-@lU>p9nw_O*E^b|3b}_t9?2-ioO>KY59(I@$Sq z#85HL5^ofJ1ihomcqY-uH*k(o#h+7V6<|?z>*x{}rxI!c-6Fa+kHl`qK8d{;XxaR- zW1qo3F6ArY6+bt6cQ5D4qC3|v_tS?xhkZ!;uj3a&cW&bNQS!40iv7nvihWKDI(|v? zu0GDyMK9YwGuX$mx9GGd^%l@4`Z>p}qMKQ-J8$BgQR>xc*MYu>E_IdX&5gZ)y-Doa zCS`os^Vq{sZ(fD4_ig8zg!ojnC;l%I!dJetkR@SR3x^ZvjHG{)P z{C8Fl>T^mveSXz|z4jr(w%{HS)`rjvggx_d-_Ka%!c z=p*RW)+HbM9C~rT(5XL!?z>xF14w1No}b0g?H{XtzMMkefnMBybo_JZ8T61&{gQX1 zk@@~{)-BPsP5NcU;K1G|Hl2E%=t1;q{q~|~cB2Q;?IF&ubn2J5qUZzYcG1h`T>^Uu zd$s)~gKoH|`u5oo6`DI;|MK7*rI(i)4et-3Rr_n>`)%^458T7nP`_gX{=Q9%z?7E-Zu+L&wI{SHX z{-aOss6M}a=*|bL>ml?R^ny-YQhy9R_L=JIR|>uRv(@X*p=Z&nt)IrXalDA=?;p~> z4c+vR{&|Vy%_aWmBND%J?Sf1z@$+JDewg!Q$(x+FR-6Y0(S2k3^@@KKT|H90-%0cs zdO~L&i+>iq^9x+dsiGS=P?^5S{#9jMt>`UZ<~T`Rdh^wZK82pDqI=PO zc%$Pd;}SquU#&jhBIrK!V3mF)&}Yz-RrCzH|7+TDw_+WUxC`jxUnf5C*S6yRzaRP5 zvD>hZ=-8duhji>7?0q_RKlW}NdkA}{jy;OKO~;hWTq(y<4yPw3PW#vawN$FT3vsV9kjM8}@NKBQyM zWAD?k8|hrPj@^pAQ^)SW-lk)BV{g&1`>;3b*n`*`b?g!Bi_GuJafxG}*RiLhd__H0 z#w#o3#V+exxAD-9`)b36`x_o;j2ItY`;aU?tbg2$2XHcGPb-3_SjnbYS-NcKgR&l7fjwtyRO88)duj$g-<+VN^P`O8qwwNpFZSIM8$XE=!YH>JJ4q+Dw;KaCIX{m{yuFL|_4 zZi;eKJl6X4u;HPG`p(As^Y^a5z*v9b+Ip8Z*izm@`L6G2$FONjEbls8EI z+9viW_N0zIfqkOFF5{EN9>Fess~jKE^XOyfEmd?AQ(+vvO>~YM<@2dH{^(P?(cS3M zj}h^g*I8xrQT+Yrea~_oPjsE*QW$*)dij1Md5B>j)v+hBk5t&D?-}gF*n4-G$z?8{LO)M%U}N_=nJ4=mn|2 zA}A$fWL$%k%l<-pU!u$&!9G`Em)ys(XRupjeW~o<-t`x1sVLSY z<+GGe{fhnSEtHqMm=0n8__g+WbS3WMq!HbY-t${6Z#v`TM)#pNYyH;CzvL%CIZu}3 z&2Gv`dr`_cUek|9@|Q#(LSLEHrQ=x!d!LRykG;FXF7X&?tP{J7$GhZJORAK!Q_i=C zauTE_GKs+e}nw)VgEAlmbc*_ z)#I_EccAO#N8)v&yQ=8o<3;aA-=PdhY~x_nSk7G|& z*o*r=_5}94mN(8@%dWJj;wJ6oDL3+;)vxQs-*`CtB=)5EFP{fCM9R+uWjyVaYgT-( z9`CEj>w#U)r_0V-r9Cg@U2AGpm(*MK{z?FQkQXJRa{jAr5=R(&VIRJiuecwr?O1iX z-c%|maU>`=wYFCE8F=DZJ65x*pq$sQoIgvwIm$2YS6lkr!b<-hsO+EVot*#e&-V=S zKG`m>PvrGN@f8XhTXFtV-u8CBS5OSg^_FV2WAdU;qR&a3ob{b=uRmXEEbYG$ z{CeKO_brNndRG#$^y;V7ev)!|%JtPyPS&Tg^LNQp7Ts|;-;c<3PyN>!E3YzSyi7+h zzQ^;ujXYl3Bq|$rKX$!-I?!F{>IA+g(g^#EhYnXAkWxqMFF36JLJMWtWPC#Sd(YT& zf8&%}q}*ba{-)7q&a73Fl5f4&Rb}&2`emT;?z8p#CHb?WXVJ5~f1+Do#XNIUe#b?7 z?ni)fwu@^OUxlVSZV_}Z-{Z@nOTXQUexxXGy`1mKl=>=;r_#@y#C2t@8Z+>uVxG#l z8{fsaU&}n<@zN%CD|RDxsbgsqy90ZZPI)(WgO1&YeeoK;pL3}aDD?!j@|E_8R=<>4 z+T?K@yTnszPhoG=X*Y|#K)c2DSF1>T3svlHU0a%pElB+y?0MR)wEMB= zbnGGQSsi;6dq&5ez@FBzr?IDW>^bbo3VU(>AH`aVy;UM-UQ~Y0&5ADd>FLr>C%VK} zIX^tu=XC6T?0WqSY4w!nOX`ebmwp!8=XytFd`Yc7Jze6m zYGLfK%Wkr?NqHOgP93`wyB&MSQei0NJz71Lc0cwY<@?IYOFu){)7X1zdBXl%e%(Oo zi=msY-!-lzcBNy_V9#B*>-@`O&sNx_edE!LBX%c`E87>{hA!DSOFw+*0d%XhuWe!vVjt15N3e%=>~ZX4I`$Oy81}}c%AwSs#U8;fkC!%iT)@6V z$8I{d)NbW^X{%y)YWB)@J=lZPZ|;#w%JL!XqdN8|_TdV<DK0J~S} zt~l=zlWa2XS<1yI*DVH}{mQ_E$f8TxrA;22vCr?u&w*}PzuHgw=f-ZuzPMBw6uS?* z;fB@YApHnpSJ>^K6yM7AW--Yo{xQlKDOa91@lRqe(B6y`BoCbTFJG?~lWfKLPq`@m zJ~33}L3HDBypM+-(a~{LHgw+xO<#3Bx^g{UTxFBEJd~fMyi*Lt_I8a+`W&QOo^r+O z{H*W0yq+jKzmlqClll|*&GhoRLJU>LU-Fiv+#==5^CtEJ_Id2`cxjV*O~>PdeOQdE z`nmeN+d(j1oQ4ya7x-l;Tdltmr;T#m{k$%da?AbUeWbE?3M39U<;*wk zdLHS+-h|yF{@NzxgV^2Jy-P+Y_6YVd?3OaS^e2ve9D7*o#reMU?o4T(r_s$fkx$We zUeD*z?dW}?SIkT4r->7Z0D5shS?*_fFH7^!LAfc)75BR;^F-?NQm)U(>pvc^&hN^8 zBjrMrvuxHYw|d^iDK|^GS*fogKFM1eeRRvN=LtFNfeO3i({Lj3VV4;6)&(oN4_(Su z>e8N5^EZmFZBo{Q-Gg88^UCD8^1Lm89vaY&4?5!*K_9!Be#m}N;V;Jub5Y2l1<|CQ!ZH% zcX9kR`|kWDwm9Wx@pm=zgxA@-xt?H_db5`ByGj!&efyzhFTPRyOuHtV+A4MEU z=kV#(>t~*Frq8Y%m&-qoCiR$3<-8cZczmc>Uy+NOlygvSmU5$FDbA~1ueVA$FXcM; z@Ih9}Rm3eO*`$9V$`vR#A%;?Y%ja>${-cK;DvP`9JWuo#I%{kxZ#wflhi=`CZhSYN zXF)H{L)MAnbwuszuheftZ~olM@t^a|os@WTtj8NK3V0TkKAa;`~^BJ`+VZd_lWDsiP;+N71D%z5OtY z9zq`xy&_N255s93AJChmA64eDK3mdlfx_9@|a*8T2^1rHX$+ z(>F9gCA~Obb>@Q&J&eE9rRVQLmv${xbf2bI^ABn5 zSM!hUK~HIVwY=xF_N(c}_TBf-hA#c97QYK!@;fa3*U6s`J%{d-^;Jg?p|?G%KW}6# zW9Sa_CaGWB#Gb@%udquWGuUm|2c*3!$2IATp@ZYdW7XTWq7R{$w<~@Q>;u@VtzRDW z;VSite*is*uD5?nzar?P=#}$0jy;6kLK(eznBIe)*Ys-RXgZ5^nfj~gc65om+B|Y= z{y~XPXPo`$IsB{jJB+@FUOb*k9c&UyOtW{3L8smndTXqDUUHgVtzX8onJ@TP>z56E zclEn8U9Wx_L!VZEWxs-ZXg8|WTP?06x_vkO&hAE+xD4l%{43{~8N2kiT7MkqlCN?< zdF;mCg1xezKFzMTzldK5y$!$eT052Y1a=GM%lAtupT^#-Q%?@NQO9m@;#5(O z^x2GE=Anhhdi$AU54uOwtyTO3=mqN6%dglXntwGtfu64tpX4E<=>}JmoA> zPTM5zB=!aD#lLIS`#h4)bs=fDfZr6q^UFt2i|wv_zGmgRtgKPib6Kk&r`>v<>>)3b zUpM7_ldJP9b{}>x_R7~oLG12|^2PDT?!w-QudH|F*TY0lp!?A!2YPx2eFS}GSzmeo zrPzP;w(oF$zDxgCu9p=1e;(zYV0`wcqCNCq;&M}d@da&OSFF>L2S0iq-6-v9o7h9x z=U-$!DYHv`QS4cM$JQZbEAk-mCDGLkuLIxW_~a>{PVc(T7)kUr_TuqH+G3MDS+O_r zJGaHZ)2K2(rJRd$O|$%7t~eIsT*-oSe}7n-B<P~QLBvg={XSEEYfmO)=c?~?Ya z==6cVV9p<8nKt>pgSHZiQc;Yi2I{KDA|6A{pHkLMd9Ht)cK6T4~7o_)j7AcpY zoYTnfk{WnY{(euz-?e5b*R-x~m;4v78+7a@4j79~{C=qfrOgX@Y{Q<%J}!1`6T1_; z{BC;XeD`2aVt2_rD&}qV_2dBh!v1xo?*}WZFN}TS0Dfmyvk9cXG3@38X-C%2D*csm zY0A0G{I05um+ZGXe|IMPTY+-&d*ZzGDK96ZS8UIGDSdkW_=$s<#lCNU_(L0ZAX(&?`W)-(eAPQzV2?yN&j4wGxB@fQg>zl z#O6bn-|sHIe_dril5%0nDSofpwlpqF{lvVqNqY&(8BXMPkeAD?%rDV1=xrz0sXiTD z+9{w9pTh6_$~@W4d9C$w^3+D&_i(&$QO$<&tiX-D$}a z=NZed10#DsYCO`=E7TN$59J&W!>zfMg`g*w;04Oftx-RM^I;05~g zTHmoJFD=IA(`;Xpqb@lvs z(M?r!6IBM$o6)DFer=O_BiN^|sVjY6M*2}&f0z52K<~SjeLPyr&3;Z=e?V9b2n(~syMSre|*?u z8}#!e?S{}N(37$sYMa=j*gYG`f0$^5{gLL6U*4EwPxUAm)vgld#G0N(lhZwGAT^*!<74yK1eP&yo8ZNWT{$t19 zww?C0^|DKjPx5^t^8FSMuGPN5LmDfMyO;9OTR9&QgY2&r$Autz^HBBUMif1aJ}>?i z{gZqq(LJ|m>rZXp>K#RV*rZ&JaznS1PchW8(3kH}Vq=qXrfc9GjOQNaxr1^|AE+x` z$0>T3=9e40x56&t&=JhPnGs<=*Ew}we}^C9?DPL&3U}! zu{3{It?O8e?FK0~8sfOO-0sSCGwDYZJ#r7{U(%1|>s-a-etqZK`tzIWFW9gC!qVO+ z?WQT;J*u5QF<(}lw`zZHEd4D|&hx4Ae6McT>|x&Br^UaAb|p?1<$Vum=cjuZA3Nm( zls7(Drwj(3lwaSF&r_H8tlo8C{YA|bD$UP0ezTFf)N9sz~g(ZJW=bvTg*^+ICl>0pK7h5ddiQSDorepVE-=Sj_BmenKq zj9?$a-m``$%lVgerg(k2zH`6&^QAr6gel68e`R$&cI;W~5$s+$a+cRqnP=ps&CCPy z_3Uro(9Xwn)-4Bmx=gP;@A9C}p@*f?igios51_lh$?E~pE6&TLKM`~ndW+0^Z4-MO zyC<%l-;~)?*iGN!_*}MLOFLQY;cr(zP8#0FxS-FL`AIuw?A_ntytvFR_1UrKpQv7+ z8+{SIcpbMaJ|A}TlXa!%VpiiTR#Ybm+J?M_9>f;+gx1$HeU)v;}a1Ymc zD(sS*7HkCA`@qLlmHGd7nn^qPQ)o+o0x3yput|tgmmuvD zWJ=@Omarg3!6iT|QCf&fwXU#)CD^zY8nuJjwMvi{P}77e@2cjHoQT#IJif?372 zN>ZXu(xB}ccaz<8`+a}TJ$L5L&7Fqb*YD4LQRdF)^PF>@^PJ~A=g&QNV%K{XSAJW; zkAi#SoWI|wp6&2O-?j0FIGSz!-Y2<#wBtq=Iq5Ni+~V(L#wo>@Y49jGe{$N6r)J(1 zGMc*arRx2TvtfW>s(5NI)u$GG?LRaB5GMNs=IM>_P4IQ%1OBJ=9 ze)tpcW__+QxP%`C?|O;#L&QMlf2#Kc_^m&(`$J*-)PeWP%kE+x`eVEA70_SpRSRGH zlN|oe8eg*brR18B+w}_lNpgy(>HQ=C_cHB|T>DSYET{VQA-D5i?fl-R7e1);A^5n; z55c32#9HwEHV=I$pJMRppJn1|uWIlCa5G;C^at_v@UI+A_otvgY=s{>1K$pxgb!P{ zm63lB{NbNldo!N7&pGCPIre0J?poWqC3h6L;#cW^bNGT>YG1=0_sr~1a?uZ9m*E`# z{vBW3c^dQi%zYKrs|vZM*O-Uf_6zpgI{4~evVVuH{9uJU&-cw5-1*B^p-sSSJ~rlAAC1_TgvY;*F8t!7bml)pN21nKPWu{ey#G0KZuXPoAETj%WgIB)nj%( z@A`depFvsbl3XKl(c@>9Q+aL34I-yBVfu8#zXBf*sc#KQ2H+>hiF0%L;?4Kc^Fnuh zkXyGX_lBa}KRz$_Mt4T3!l*0%3h$=A|7QDB<}hpcD)FYxDQ<^6D0r>6kW*k?1@$u@<$v{Xj@qjo8 z{OqOlwctJAtHv1}{0p!Z(MYI0y`8fRj@Po=H5YJ?%!h31sSainxunfEu z+>A#d<6SMhlNWVb_z3n(<=Y6q0iGOEm-KB0uLC#s3+SVE?totl->&iN4YczB^4$f| zjQ*;;N5H$lH*oLjm!3CEzi1P23EuB#YQJLeSHMj?!CzB;?R&l`J!_HMT^Mz`-atH% z{8r=_FNnJD4f)4q-|xdBW-jT`iG1zC=o#OmH|>wy2ILx)GA#bVcfkAWsdUi~G4_C) z^;Mu9#FxSM!*?p*K>JI-TKKh#qVC_lO3z0!BAfDQLaz0k@ctsc4Za1wocpl;)(yWM zzD;~U52YV~pMc*bKBOMw@J&V0tiJ8$KN7Y$n?D8S=l#g9yeQnR%5Ma|8s2#W z@ly3D`UpNzn)V0ApHw}e&82$OAXklCsgOY265n8X-G}kb@GD54jqiZ3%96kI?6dqq z<)1O`q|a|E?-Ats$){Fw0X>yZ^gjB(%E^e+ndhXGPZe@QOQTue=2HyZ)%6+gt;)_4(@+-3NHSo%JWfr~x zemlGuSA+A>X842fHA-(UmD>(J0^Thw#Lhh`cX{S`YU&T(0KUNHr4Lho@Op6hf_LeD z8hit|nV$sWnI|WG%07lWl-sy!Q0e|=8r1w$e3NMd_{@pL})9`WlM)muE-r|e5GG48)_6f9u z;%W_i^IMs3DE%z+S;;pczu^y}?m5XB^8R0ybo+G&^3zv@>t95Eeef%)qwf1heAgwt z2f^2Z>prYr6Y#b0)#3y7l3x^kob?d=3Go?mHFF%RL9X;lTYexPmE8cp0zRyNsO)C= zweU?*zRWVeL&97|{J)2E&8rxPWp{fC9{`{D!)V6us;Qh2@Y<`R?*0aL-w<4Pgsj(; zU-6$3Z?Boz&#J*E_~VcMd6eYqz(+#xM({y!RnxmAH;$kk9@TyplR=CcjF5ge1eVSiA+>L=Z%HMFntn$fQ^f44;COdwxf`#+02(!1(@ z>iv#Pd#L zVDoOJ<7nN;I9bu{3ozKEV(-5dXTG;-7?lc`Kc9iHPlw*4&O?NSI6ISN%Eu{fHe; zv&@TB-+JU$ev%0 zR?_X}KHEG;at$rSedKmZE{lCk`y;pVu}rGrgp9)UGy`IL-Y7DOTSY2&BzzEXVzPGYX^^k@8>=< zPG$OEA96d9>ylicp9|8Z`ivmA;4g_k0%p-mBD!S%;!o0kq-z!s7)PXk4SdyKW$Lf; z>cLCFi=%vbgXJl|Hqu4El$oFG+6g}SMZ52rr9UEOF4=Do`FipxkMPB_OW?VW>NhR9 zC&I@c=~KLoH7mTT;a$S3!EX)0>%eQl{rytqG=f)y%Qj(t+zP+;416bi1AK$h2iigP z?T6n5KOsI#Ka<=za*ZA7eZJIwzVA7e^et+oyeC)?xo|d>Rko@3Vet7Ks*s&2VeiyXzK5X`S<638eyQ<7K_(}McbNDi2 zK9jkQlYAfYPFK`9ki%!F{nF!#Y|8ZzwzV!0&|L#C_0?;s@bhfo}}qRptbI@v}C*XYmsy z)unoreVRPKIWrE_fFBIO>%qIh{c%A4*#zDJ-mQ$&aVg{Zzw~cMuJl{XE7VT-iNF5R zzX$%r-_q{#lhAfg?;A@05#;ahovHsccq6!9PstZ;N4)@E4PFIa3vT8yvy_XtxukC+ z=?4CueLo@CJ99pmIZrqBM}AYEUC#yjiD`fM?eMQCzW^`&2jS!2&Rh>F-MI1vU!im% z>y^Ssu@k)N8swF46?_}KKkg{sT8o!*A515{(emD4%-dI-a-l z$CRA=yhHp}c;}#<--MLY4!;xrR>acfWS+-RK7Gg?L~euR0)8O=sLFZ4?u&%P>uLC{ z-{HBDt&hK-DShz{{`H5ammO>1cYZh1?n+k=z8zfm!S+^LY=v)yFM5Ohw-fnI-^(mV z?a>Fm0lZ)3nEK5eXQjgka?@Glq))UBe>lv3mh3>k&g`F=@3Tn08u^>P&p4KC9PvG$ zm7a~r@BF7|>hFR2pU**?OLA?m#`yz$ zGz9MfpGM!6fqVvR{(juFKX?uKU=G($e9tvie)O|UBSQ2l2HzEeSA%a4!Rx^955XJ3 zH-W27yi4UagKq^7+Ns^j`|Bb3Zg7?B$JKuQ;L`u7GS3{(Q~PeBbV-K^HK2ZNl zkHa(0YZQMD`emzkV7!n%HSo8>``b&Z{BhJdGLJ8_jMtJcqyudD1^q?&y@B~>4f4B&qBs!p zv;6Lq?A3()rjbnlSGld=w}KDM<;yI3DxV(YC&n`UMs^zjPlDHTpDli*KrojUatIM0Qth@x$b+;fw(BUjl*}sFXrC6q;K?b7dOvwv*;_iD&%&S=eo~( zv&c#BdgPibavioNX0?~(Tah1nbFR}vRcEuW
~Bydu}(r&5fco@qZ#@*~Kvcw4Sh zrtu2Do++Q6FRPwKpQrs+<+|snQg+OIzNma_kZVV-wuCRU_=WL*n!gdKc#O!{9*V~<(tti>HQ*=GmhNB+mPcv7?-49VY|!g z-n*2p47`tY^#TL&N_;K+33#JlfLC6P@Wl%gY!Sx zq|Hmv+k z__gqj;sf&{rRj%ngWr&aAAui$kEi%d|BC(aWUh0hVrdP{=i-7{#qJxeo%7F$Q?zl zk^5}x9M!iIxy6mS8Sf1$eINJ)_~Nkf+G?frBgl<>FgIh~sdk+P9|YgeeXxA-#k;8Y z-MP+g@%Tk>KdT0Q1b%xKz5%{&6G2^e3#6A`&G1#5Guu&kJ9u>n-VI&@F5SFK_x<2; zPd=poj#yqc3+rFeFXF?bUm+ep@YhrNl)=}-`{(WQiyE6QsdTgWoAhi#ewut*Bp(vz z+u$4S36C?0JbE$C0aUvT=ZUkH4Pc3%^8i_(tx- z>{JE63%)6Ymwt8dBk+sG+e`U2T0AUYrE7&R{}A&fFA<-@JHU&<{qW&)E!>CM zaS(nh{QV)k>@@*D0B`)(w8PAGrphmV0{{GQX8FRa!OOtQmA}2DR~>jYxT!~oA2z|4 z!l%j&ti#&ix5EeR*J<&sDl5cq`r(g~u2g(Txg(Z0{vC+#vPZN7-yr=#rBD0MjP+0z za;+Z;x4Z082R{NIQ$F?*-UvPzz*TNDc)xJP2d~_WQe8jjM6UC`Tz9|N=3C5XANT?A zYGHwTEB~YL1Mm&vv73M1BuJO?i9X5yw&bRMC(^$UB>l_a>*0g-ssXPB5BhCA_)2i4 z^)B5vfmesfw}O|4;2q$_A^G%xF9r|#$$-VHRA|5+s^>U-8R>e&2mDC(D*Q6`g!k4X zVdIwasYY((qc&bL?+wP0diV+Wu=n6(hpq6XTQmEW@@)fO4Bo|k*m%(mzZ>3PZ{^n? zf-AqH7SCpnY4~pPQ{991QohAc@g5p{uzjn+I|I1Ns{`K!ULBI3_$K%>mDgs=^Vd)5 zIxXI=d;{aC_4#h4i{LvV2#3Vu zcKDqS1>$1B9@4W9x%>WnracG2H-N{Lf5!e%dR{9%r;)q=;j}&d{aE!aewvs8-^#t> zb3Ao!I5VGGY2cH`sjq^B2i)wemr{ z&E_A(JHc-a(Wei5ZAiI;;5UK$}NgV(>=rpgz^$4FOzv z*4cdel@IOZA8*Tmn&2l%*C753{kI*ts%^8&^&z(bIrBS_sq$v_^AY44KgB+c%C(pD zod(|t-X|=ePZ?10SJC4y0&z32PE|g&$klv0e0{C4rxCsZzQy*(n^OIV{TciBZ4on< z%56u!dpq-N$$RlQq})E_PCRP&H&XRa?;jq8U-=kuRXJz$-}JtaC|xSI@UNLKA>Sk* zU=KmMRNrdk7VNP04D>tYQx9Jazu)SgjvMK6CD)8xUz;88Q~GDNpX54`tNDyw|7O%9 zGY$XPNpV zw_qpph8guw>uc(d+yruFzH^3pbxOZP`1r1R^}~t-nS05|@y_=?v{9J*$u_-4$+6@pbT> z@O{!G#BNRS@h=AbY^J}rBNzSBndQ`eeaIa~u2ALKOL`1~_koA$A$|h>)+fSwz3br*!`E`} zT}sykJ^|h*Fc4prt_^xV`)2!(j~pOB0qtAqkxb&-44I< zuXD{~MTzKA`Q6}U;DrWp|EK)=!54#92@3d=(v4(EH*M3!L(<8AiofpaF_tYi7Teo=wOlk=t#{3yx21@SD13j!&K7w}LM?gC8sZ0p#L))Bf&{qvFTmYxbq* z1Abn1D*OiXarl_@vzOA9fgb_iE{yv5=l8OCE&R%7Y`HV`K{L;HNq#HxW!=PE`CrER zB-~#*k+1!RZ{NbV4v^mc@UOsc;NH8W_fhcDzX>15lx`Y+1ir>g#HaA0zhNG+-;PI? zQ+`$8jo=f)Lj0@_e&w?>*Nu(fP2dBmbTjuGm46#@3%;3tjuZ5cZupzvqsqr#rvBhH z;DsK@r>Q^q3UCvL0`WooG<-e0+9Vi1jQ{T^Jis@L55y}$x|B~Xay`g(3Xp#U+NTkI z<+o<;hc$y&hv4ntE5H{hKYOX3-QeZmn>>(DmERA3KX_RA;z!`8&%j5YWj@oBJ$)H` z^ttSOE&TQ@ylmSDU--B9llm|5Hgi6bdA>?^Z%6*7ze~?|{rwc$T$1ZUuJ}MuE;H^( zZUng`a{Wq~icd4=w}sziF4jvwm!28@GCf~Ye$~j0{2x0$6YpL9GRu|S8<5-j_u+O^ zdp5&Q!biFHF6G~@{K30s=#if9$*z6KHGZ4=SMp_+^C!}09QpDj`Y7%M`foK*;kW4H z$FY+-(i=x)I+{!;H%+x!Y7qupufu=_3*bYn(OW}XUqrEex!1mksCp7 z1NUkVf4npGhc7#4t~*`^$|)jAAABdg-+z>D5PT=NH*OpMw9gf!-voSf(Oh@EcLx26 zpJO~dcW&zaaerJ>IW_PD@Ppi^*XeG*65jyda$a^jH^cXzf$xBiolpBK|3Lkfe;<5X z7QP7nDExxO*g@&-C42%r3hp20g%|!U9Qp0GvicBS2HuSPZUOcZUIQMzAQP9L*Mql# zoB6^l^8w}4iu?%jy7w-%QwR7Ec*gUgz&@zT>qqY3g>&8W1i|>)an_d?Jh>=$t<79^ zn?U}+MRVPK`am3zSkd3nuJA#-Re>J_4~|Q<;L0y3-vHhnBEJ>9Geo`(d{;nT_3H%R z34UuXUuKC*)rbrtf1q@()0Z>5JQC)TT?$!n)s)Rm?ML|gjrc0~{w%!Os}8>IQtPkY zxDa@rC%LW29Y(I3`*gp_{5>Ai{>tx7v&;1(cjOJojU!jTeD?f`dYMnY0l6CF8p>zS zuMxQ+qQ7U|03Ph;Rp5)k{rz11q87XmT=&6wwDN0&kA;-e3_gKglS*eV$+v?K zB7fKe`4rv_-g?DcXN3<}eCP-70B?@+B`iMp&f_be3FMPkk&os<_7Yy$$N&CtuDc%a z;bq`O;L6^+Bwu6YO`NxcY)}t=AMzu@0`XP+R`}|x=ceB8a{GfjZp!c4;XAYN%2Jo) z`j9KEvGa#B%xg!Gt3|F_35`DC>#xFZGt43XQ~+9gmg*`p3Vdd=L7^XbLl zjo@YA(m9x~uvYkT_!Hv2xEUDdB-f4H3FK-em!%$(8$_<~?Q@+L$(_M2)5tXaFrR>4u#LNj)bolpCx~#twydBxs{U3(he%G0l6OJOn(j7 zP5Cs#_h;}k{ZewB$PFM@$9=Z)RPO=gPI!7rmP>k!fG@aiuKRujdidwr)$q}Sq=n~I z((w2Z^7~NItA=zn@3ifk#SW5dM6M4x6DP9RRrr-;+^S8>s6}}ceF5X_kJHVU37keO|D!0etZNjtIQ*uXZpk=u0rO#3u|*Mra2Ut5uDMXpBW*h_Zn0Pg}%>z}rp>emN9;HAq_ zKjZ(%4I!7UA4U(+Z{XLe>@4}Hyei}t)SWp#R3lf9Tn%#O{e-abI7@$OBb{^O?ERz% zxedrg5%(^&!vJ_4xODX{;UnO;f}8qVLijZJec(-=FrUJU`iV>62LpH&c+*X`JuRoQ zYQej~PYA;wvz^OnLT>B3?7l!2zg79|$T>G>&a+fbH~0kUk8+>(`8p@i`|g?Z!dmcn2;Km`5y>{Qkpf27GJ`NtWm_DR)Klm%) z<~eh!{WASRa^uLwZnf(*|GLNb`xh#|=zI7Ba{jn0yvpJe+E46$=Jd~+Z_rRxW80%BPdoU2@UZ%b?}6`wZ&aE<|4_XK;hjIp zY!9Uy2OlThe(r;DUj9+|BK9TScJcU8*m(uzUya;RlicYtpX!F#}4!Tokt`woDs{W_(j8HdvI1?e~r@7y-mDdoev zghzisTnNF7!54(!)!>W4{pBm4Ix8P+heq%y^3u_}B;O3Ka)a&D4qgmi?g{g$wB6uU z;9G^8c}lu|rvC6P@ZFa8$6FKs;0qh@lUxL{#7XH@G{Agl!%Y9K0v`e|RQ~pod@cAO zc$Ek8DSaBili())2l}b_W_V{K7GC_Q`~_rZQHz7M|W{pe%U`{y0f>!|X- z%f=sWbqSvUZv^-EU(^5p3A2D3eT{u*t|yef8abtJREGAFK6T*x!Hxfgv`-U!b)$_h zA@Q&cz60K$Zx#7=f+xWp{tJ2kEaUHns2+pJAGinqnZuVewDYv&nlkO6`W5|<`BMm9 z1-=Ws5E<{1d@Xnz_zDl?Q+NY-CwS10w}SV8_efs-)c3qtc5H`V`=Pn+x#NIeDBm9V zZut9@-d;*K03Q9YO&74g>@p6&3BE&iIn((6&-l)LGvitn_$%Q4ey{ea1wR7bt@fE^ zzArtuB4783@O=nVfB2i=JCF?)XL-J&e2yYl^nl$TP5FU4uazCA;TQbb z%yug}!a5k--_EjM6?idttt~g(_*jqJ$_HoqRg>}s_m3~ix7Fe;+^6F}=KM|ib|cq9 z`UR4+m+*e@`vc`E-=pB0z}L>AZ}cU`ndZ#+tn|g;ZQ%P=R>}`&>Q#$e(L*!m8x7zN zc#lo*k5BTOX83maqvB_@LrR~S>y$y{T9B((zV?!x#=-l*{c%rt z^kr-tz~w*1;2q%iE02(PRRcc(9~W;g8k@ zekr~Vz6HL?^8WEx`8L51z)M%}Qo2^~A@CM~CT^w29r2y;@xQR+LP$IG!?(iw^ELht zz6;#^PFt$~&-CBuAnpI@nfs;1;3MGG(kn22t3EaGlkk3jlD_reN$~AT7ozW0_?xz8 zrjx#H;BoL*Y&w5`5u{6UJ;dyB@Xu#h@-e0iFc+k4us-{1@s2 zzSGL;LwFhZeUD}8E4&7LLkM0EeiOLge|(31+NRp%Z?r3aqy&sHBS28 zBNyKXAKPKa3B#x7gUa_Pd@H=)FO+Tqd>42v_hIc?^kedd560;#i<|w~ls{$0AGJ?C za);Z3aVpdAB-e~w>@)VcT%dkZO_%D?iChzMD-7cPPk0}AEBM*~J_z0e9t+^(;GN*X zc8UH3dw{PD$QOej0GF=brE;sm`@wbZUAnIWKL{T6Kt6>xf_H-l^=St02;`&oX$O}+ zb;>8u@6?t(@G5^m1e^vT6_#XI`A?anG zZukNCLh<%ex_?WS}imiO03_%wJo>HPgl`4#;ud4kt+?~X^AaitnQ2`^di zQhs&d zo61i{kZbLrAKUfQhK%>Gm}fna`THbAKf_O-w)5*5zxUyuXI9;7kURJlYdRN2T9t`2#k+ ze_WHE?eN{CZwcWA=~6y@$VH#C^R0m2i$4l~6MQN6!G0lrdKSL;=WhB&r4RI1*|`S3 zo&1CSu^xOoczZ}X)o&~OG`v~AW$6#nuLHT-zw?ieY5y|q55FHiSpFb*CwPO(5BR0h zO~4NY(n+tvU*J37yHe@W^H4#$Bv*}G?E&ToGsc0;^PLUIRrlKd5ojOf)2w{pOSK<0 zWBudyGwIujT+KglUQ^}SOZ~bJe8EBbm(~+u@!$8HP5F!?-+Cz1Ue(~ySLyHIHE&@3 zj<~thJ~gCkela|rt9}je{qP-1k!5}(xmM&>exLD1{oB7T@|_=+TsLw>12*3I<-U+U z7cIR8kvoc98~4HfY1$ut?LTFXZ_2-Li1q?EvK)jN!(P8=o zyjhpN0Y9on{=|`)deniBcz8g6>Cps#1U{^O;@jYd;B_Bnzi#-W@L}~(ngRIgm$IiH zhhG72+BZbM!ebN*UmH@s>|X`ndx&lIT)C)yNGYm(9=X z;m6^PKZe+2E4=eB+4*+(BKT6og7%c2J(eFApCzuSy^bRH3i%XD&R)VNzz0I`!V%^( z0bKPh1MfM5KDCxVtTMduEaP-5^?_{0A&=CPI?o5x`EgkN;S1qaH}6upBj7P`v+gi)Jw1;QAN>_>1YfO0fqcc6 z!8gK}hVW%zwU!?gZ}JZxH=2-J@Ji;mqx@UJr^!ECy*uFxNtaE(e)v-OZ0SefSDYbz z^w-oOOM2O|489&-x_X!J8t_}e7kD6_lBoy3=?vvQtH?ZWKc4!;TBtb+pWp>liR z?}OL9cjIH@CM3R%)NIBugiirX2F|7aOu@< zabxeW`oqia!TJn*eHOkSeh^-=!E&YR2>cQFTAMy>T$5bk z7~>XlfvOGGnFifPj%%ine58BeR6ajINJ|K{)YHe-l~fsvkgepb z!J@9;m8_&bMZ)u(md8sXdB^wQ@#Rj-%^pu3i<0vwJ1;)9H|OHXy*YPGmPW4qaV0YO zPJaHr-z{0W?{~MaTu*-c$gk^ncdp#z(?|74ost!Xr zd6lu|_~-GZ9?8TavPXQ%p^TxkNq2U9DtVDJbvAkkul$VDK2!i+VDmdaOMVOwl2O{B zi{ewK9(?Cs+8}W-=guFW-WH)Pa`&EE6VevsZQ8;s+v+}=^FM0~)%pLGw#fP4wT0IP zNqkD}d8W2FEZZRyTS!}AL$yVY?9ctD6Hfc72j8(58yZ`ldhj3iPGhrEIZonq&cc_B z@4d%u$F6N9lS9j#sbT#3813*Hetvqn)0en( ziN|uBo$Dj>5^E!|9Z`HCNgEC2#P&rVYI*rPr%vy`uI7rwsM>}ym%T62o?M2G@WbeK z?+S)Y@*2fwl-E7C@*AQN}R7cMM%GK>i^9%@Qgj9kHs&>o>|LdF|>|o7KOuMFnN;+ji&VwUPYe$+Ml_ zVfq+;-HT3(uvuan-A_IEoi6$21K*0YpFZT^pL6z}+U6wW>%+9!u^jvuxnbJn7(T$G ztM=V)KTGxHvuqow?IMpj3CTqwGv)lcDh8;}N|#d)UbmOB-9EBLeI&Z~R8AzJ*s_lP zzMB}bhj?;KeS$XFP23o!|DGzMPul)+IO06FM>u*v{4tI&(q^$!S0{$3gV#>S(C^jv z(sqhj_{e;s(F6DbFJ$=FJLw@-UK6!k@#M0Pm zC*kR8eBd3#t))(4?Wv~@E7lqw`zSt@gEa7_8!MK?qzx*ee_R$C4F?i zo3`@zzZtR2*Y|eQR?lODE@HOyF?B(gyi<1`ri`hAQ~8HS(PRDT(^r~UiG4<~&t78j z?lsO!viUH5YYzHdw;(_98uvAyi?qklO+KXd|Jy{Qy_D~}vFV{5=#Os<(SD{ZtIJ;6 zN@I(BU=O}OMEm>e(DzMzew%zgLQKYX^vAA4_~oHHR~q}>VC)ooC%$|m|4u!)n)u`S zsq@ll@-;E-0_Wt@5&9PSd-24ir~S+6^P|)!PjLV~mP=3toy5 zb6&)ME3UaB(SPdn&eo>06U3Aw>#5gz)2B-&$LKGr^Dy>3hAm&CO^0&wjopsrd3uDt{l<_vG_&#gij5;>kQ4Pv+TpvTZ>| zJox~&%~G$Ose1J-{9o1UP@rA~{~y-ta`{Y-VpI-uDkl-6j~M&Bi`bzyqRqQrD_L2= zyh`JR#)Q4Zuf6!J+GMZK-s)TD;?HNA2PsAwznUMPQlA;6o%g+V=gP|{H%{z4PCJwm zN5*J}0_H`}kf)K)jZX=uKOBkOt~QTPowYX7ZoY4H+g^T==V2d6V^OeE53XU}Vq@GP z)psuALL^Ilm$>z{{dklyca$+}41F`}yTsHt2^T)^Te8yBmpbODzWDbj{=1L4W~#o7 zW2U~uD!0B%7)O|wVvkrPFF*OPvx*pd#HnTcQ;d8#zRJugX%o{vw9|UpW*;$XAF*Q} zZL^QI+4sL`7ptc?Kh0>D0;^|%rzh<~9FWaVJ@`kAchM zH6G+E9o#pz*l`bkl`r{als>~ZY`pg7cSiOd?)Z|9Z_hKHI|=4;_|*$$zHxfnTzqQr z-svA`4mURuJE!I4Jhlld-nBTfFWPeR4*rd!H~z5m>yf3257VYUB>w#nKmG=G`o^|7 zFa36z=4fZ_J3-tY&RI}-4BIA`;WNzp#;$UDk6+>Rj^mTB<43=tFaDM~pTK`!{Tt$< zS?`=Tss35Y82Q+>VBF_%&)Q($@Ky2NKL8uMBHl|}tsI6s#u&9ka@bAnIED`n!yRLu z_I>4vuQGQteN=gV+{&i%Brl~OBX5%@`6$mAm}z(F7o&dOxTP^``Ue`bZe`q=YsQMR zpVgS9HC-NK)|`YHqYqugn01@RtRodSxMP;rW+o0{k5TG7D%+rIUXJT0@{hd8b-fjN z&X&j=^RwgbykluBq4kUUsmaH-C$a8vZ!Y8IFTc?o{Xg(8`Nl^6X>QCo_w4QhH&1t5 zUE=D8Z%e1W+lnXmZfhVeF5%zplZq2cZ)929ox{D>aLPw^5a-TEmaNp;5Z#Z&)=>^| zqARBu|0l0Qx39#<7ipe^FD=?jK1!$fko>&kj$@>$Scom~2ethuYYtN`_e#gS%FM?M z_IVpSm=k#Qb#-SwO6>6BdZz9rR(IxGGjuNr(cPUV?Pbo9sk=MxGWEp%@*&ff^U#|< z>5T=WIegE7ryZoT{+)Vo9sV+N%_-X^uW(k4UJ+kqVj;e=?$lGUu0u;QV&fRL!MCTz z=r?1Ezaed7*32{NH?wWLICI9fS>~+z4gOYR zT;jC6d@=uqh;bWpoc&98*Ca~Dn;&=E!oN<b${V|lLrqk*za-Dc<-dBeJDoD1se$Najc@C;oeXV&#cwBKd4*=4Ntqc(mK zuTAXy9-oXo$wPe7j}%M&ZIGAioK)NLxBguJrk~|S;%+SV+6MhK|Eb86?edPRta-MN ztN&@u1inN0AIEmGr*s*8(BfB6?!%OSxie+B%h4ff%ew*@waEkV$7AysoZRiTnIGR0 zdG-Yxdz0YI$$B^2^`Y6HQ(52@__5K0xM9jCc2t6CzoR^2%T~Ut@ki-|9^9XlUO!Wt zE1!4UJjCCUsq^9ghjcb`%b?B$R%g}2weybA0vT?j#`?&tjc#P!Nz6DsFaFZ$LxnGOecE{`Z$-?SaYBgKO8Pje1y;6E?k!2@4uc{aLKX+6RVwXowGR6$oJOx zvcw1Y{OX2fi4XGmk3*LvzL&^J{O(Y0!oNS!>P=5X|d`xkOsZhq{6NP8?wyWH(C{$u0$PGT4?c{jeq=N$UWTOKrf z23KlLD_nWKE`PK6F~(Gz_Isf?Vfr55#f@HTeDS#Un%6Epd|~o?%q6d8?oIz3`yTN* zlAnJ#;#`^JeiR&E@5R?E@tNMI(dGD6@u|D$TaEOAQu26#x-4xdP1Mvz+ROQ##Acdj zCy|RFH_hGx^NuS$F8Q-~FR`^+eBMIhEqhyvrRd1Kmh$F1=6>BmXDS~*)!z8JbG0va z2m50A`zlM>4riny$-~7J2$P?^+7TZHv$u(}fT*92|->JjUgORD^gR7@BKIxMgf8tl* zx|a@pR)_2UEYe;wopbV0Vv9SrluR=DEAq-!eJZd^p7w*#QTs@;)3K`y6U&@WzI-9| zk}et_k}C5ft9w~bF3QKY?DyO|cDd8LM|o4Ox8FPpM%?LLOZ~Jy8iE@pze~vPHu5Xs zFAydF?x_nm!P5?={z^}LB(}MF=zE0ko*oRAMh_$R0sISHelGvA`bd|J#8v4ZGqJ^2 zAHI9}$VRG95`EMjN{`+~pV&N}u~<8=LpS-d$!|$~YP-8;HG7#Kz<#Iiq_5nGzsP>@ z75Izxzdp&IWcAWWk5Odu+4o-eP~H=Y+a_+^X7^R`k8j-{(O&a&L$|qmqy6|roU*mg ztM=FPfBJakGTMKD`J31N=w;epZO${7BI%?#N)r8Ek$ud3qGWO<+z@pg{z^{b zQ*Xy7Y#SV=Ea7K=jWv~(eKWFtpBo~L{O*u^!TMb+$1OMZEVfaf!KbzFA{#iggWAc# zkJJY~h7ZPwAERL9&pJ~o?{|Mqd1`a~R_i+!0_MBwF`x2@6-Q!`WhPeOyVcT7W3T#& zsRuFkRp|q!@j3Z$#N_*P<@@mJjP{x$O%i#Pqdn3;_}o1_&l!hn`wY)0B8A7-M+zSkR!N%CHVDp@*BdY5> z6Q_4nu+|u(?bb!|9y5CHK^NpJ_Qa=5TT{0t^gy4)`iBai7%zxVJ%)c&;2%c*tH>{N zb!Q%7;zw*ahc%Iltwj&@Gt)N0@uzae!PIx;%8}7`?RCgjYvIfJRQ_uhx}0Pb3k`Oz z9W&u?zZk!vZ3}4I9qQZCoj#a1HD{IS_ud>K;#6yle~tE*jGh&0j5mFfF@DW;*aTcL zLwj({?GnuG64N)X-s?SU(=!J>x6-pJv**OTNP9~7)v5yW;(6V@$Io_pHTU*-;^>x9 zo?WwV(F}wvDair#wu5Rr@H;!r@P;yi3V9_51=IyyqTEDO1l#^nB#^=LDZ~ zq@Txm^T)O9t#p0t;+5W5u+Tn-(|!tlxJTux{#EGL^|3ovB7fC#-P6Zn8n>9wpH|(` zSL-wXSoL%cW0jRNW0kQ>V5~w{Gk-$g2=+gT&farC+P6pb&@;XBmb&AXng3axOkDBV z=~dr&^hTfU>3OoAJ9QCPW6!=Np=VXv=P00W8{H$$4z2Yhqvyj{e1bT24)s09w9iS_ zXW!JAw9Sooi+JXA;BDB)%-_zB8@W!>ew8{rO&QniUa4nbm7jW7q)1U%`BTZ2 z&XyK$4XE$$a<*v9G;0}sPvC!?d8mjuH#1f+G&)80S&`9yF5gu@wWpqWss4Y5ZFe8K z^MHS^gnjSSvjqCpiCAQD#=1n~bp>-Q?I+_)&uNYt`#5uN?w_X~hbYI)_rV@vk9hl^ zVq2e_(w9HrOiA_tvd?eRnHDz>t;cniL+SoZ>1dOK8dK5R^xu}t62}-?z3P>w-Rhn#_paUI5%2F7?JgAbT?$Cu~_&v|vd!selQz4~mJZ~X4#uKq2=DRcJ0 z_Q_E`wXVE+FEL(q;=AhP?ia*%C_lwC^Q-`yX>PYdvfO9LkT$l2xhJ2Mtby-*J+ftt zzO|mY+4b0%dDBH&ldGT0?(*B8;!~b3=i$5Z7q@Tuo+YiBJysuCaH`y>hJt7V)cGozS@^D-Nb6$N1kP$xn%Zu{fjfi>&)lPi_AF!^_e;MGZ!V+ ztS8nSUzU^Dy@vivd`jNMoZ~jm2HnEHcXEHZIVUuAdKvM3&7B8^a|(FI!=4E3I?5d0 z^h4S=c`e^*Q^w?zSzl4_rIn*>Vs+PJJf^ z$N63*R~rdx-n5>_ZoHPu}JFwm07`BQK*j z_%Y;^U(P~gJl&06oP|T@b?AJS)m7_3^?T_vR~YG&SCRkK&XmrgD4!VlNKXE!x=TI} zeNp{}{%N!L(9x?Lp$~EJ(S1z2!6pu6Z!cIxf0C{H@4=*Cl zTy0`T@3q*L_*}2=nnNU)J5x8R9v;8M*|L-vn&N41bAN#yzdmEXQ)e@teae16*M5K0 znY!AS-}-$WH$V2-?vHBTrr7M|t3FmqAM)nxtc4!aGv~EmwE3ai7&@&dU#*iQt9g7% z?n!5Asjr+$UpYKysAv4#A7dYoIN3Wohy5tkm%KKES1i3i>jn1U@+;L=*x1zzz9h2Y z5&+uXkcE7B9jZ0o0%%^&lugbVK*Y#P>{T3zPNm;~Ov$j*Y)MfNG*DtO^ zH_rx1`j@%SrJObHJ^OyWj}hmvOQrTVuZ>*%(MK2VxKMJ1a5sQG6)E{>H&{iau+qby z;{4`%gXLF_-tFpT$|aqi85!Ta#npi^B&EZsk5Ar$4kbqSlg4-8M)9*s`k$W3>Y0m) zx%fV3!Ol-ECr&Yb#O0&3XC7_D`t0Nw<*?S-F-m^I^!$YH$B%<=W)0-w2D{B-*g~?x z#@3k6$40TqC~3x!VJ&2Et%*k84GuQ^Zu;`w@!n4Q*)K_>HRc-Z^ZHMT+MR z1*@s^1=17D`0Wkwq&N5P;+}fu6N~O77HyC}kiX{3lK0wfH+igVV4i6AaC2>b;=MW= z`&_0u&mEKbOWe?UlIJI>ICHgZ?6%`F z=C>*QPTR(oALDa9eN^_6j~`awyNtSs#~+P9pi2cZMwUEfW8uao=8@Cu zQ&4GSE=7j?jGvu7vp@6vF2$QNj32svS$a?;!M2^DD=FeZyade5{1}Yu<0{ zbfccBGQYSUn{gk(hJIgLgAe?6fz}MnJDu2Z`PU;h9x2WkJ7ZhfzJfh_*-3r7B2uzw zujaSGwotnuZ~W(c>~KDLSeyE7<&T%zUz41kLm%gpJ&EI&(TDEEuGg`Dp*hw#wi?bU ztlX^qLgYp%mokozQZMOto$71xF);ay@>_~6RA1$>W^T*PQSJNEkAKPcX9M+Kqk5N+ zFXhe_H)@F+r%TXV^(TH9KU7*hyJfu*hL^n0Ir#{Ea_Dl~=V|9M`Z#lj>iXO4y6uZ@ z{4GTnvlpT~d3Gv&eyLbRdB$fWZl8=r(&ZaG&*G29%^tn!dy-k^>S$yhVIF^1eCo4Z z*b-f|j@Q2L71AN%KGzZM#heZJgx0w_&#ZJUN{00j!rvAh?*)OD!%0B3U!jIk$iJ>TzT{iS|+yny-3((iw1-?oc5 z|64No{5G}e?FYI{omo$8#P8<0a}D{3H`hpBrn=uT$pJLl4!O6+S(g=0t~JkHZa=Vx zvpqUH{S|cUJk)UDtJ`jyjHBDoa`s&C&8{`KAK1?RwRHcebhh=_1Fm$BT3K%_e~bDu z{gUU|4^;3Ruj|n52M&|oU<>1`D&{+juH`)e#k*xzHb%V0x1I~i#F4qs%g?6OUh%r6 zFJ3rS*m5(mCZDTeoI=;%zM#o?;7W&E^H`!l;^tdp~#l# zVB%^X`>mtcO?^C>W7p!y#;$R{Cv&hN?Yob-pf#?1OmSj#8U33+HM~rIvmU?k&IzUW z1GN`u`qOjlyYOcpPm{m$8^vEMz*w_B;LR(KX|4wz1CL23pWpk6_Vp=4b58U*s(42lW@b}&5%&vaU*&&;ozv+4 zUFKbj9wP^?&j{EW^2#{NouhTLd!BcbdB($@6uxWDSF>+m=7Nj` z`o4B9ev6I$b7ZsMDNN&GZi(?H`N~rC)IOuxcc5;g^cjuO-k29*jUa9g9A|r{9+yA+ z-vK9Y(y>Pyi<?`1#iO8Ei(N@weLu3!45g!a5!=tshpu91BPjV01wdS&$MZSvuwz0=Qt z=NKQ}8QWN9`nJmZ?b+5hm*bnHANvc=wBW;PSMBZj+fp`DKiO1Z`#L^{&sM818UJHE z)fsj9AbxM&wQ}ousac<=p5ORoC=jHh*erfJqAFW=Ae3Ej^ z9``Wk1}VFgI5B+4G5&f#?=ERxl<%~@oZqth-xzxyL&qMY2mMoa)LHj+Uw4RUidXbk z%Kb0;q5Luyx%=_$nxB5!=jXJebWr_AAC?U31^TwyInTyF#w7F3W1iwaoMh$GI*+LQ z+p(+iC_!g^Dt6@k(mYq&8C!j+c?SN0KSu{_jxU-x@{~uq(-<32Km8>l#^9?mJid9&WkuPncm_F#R#kv_12e6Kup`U{)*R7~19$Bjv+d3G;MZF%npwU#N^vH4$&ao9#UW?r}%yp(V=98bb)je}*YwE7GpZDCd6d&ERNNYqV@jQBDJfme!pmpzN z#`j0Z8= zPIKuB+SQENVC8vsPtnTH*i!^Idl1B1|9nAvg=3^Qdx}=Z#AD_Nnge94|0VaN-BYyX zgzPEq%-B;@p5%occ82aLT0XU>Xum7IhU_VtJjxmCQ+Ba?LFX&BDJ?cp4AVIDNyT}K znX%O4`AqHi5i=w^>XS9LHT8F&ohT2D%@vo*CSbJNU*W4}-a$Kh`&!2-ceD2HXlKR$ z0yy=r3gVpZwZ>6Q9mNMsezc?3Q}QXb!MYpVbQU{quy?t#+UKkqowF+aPF3bQ*jxXa z@rAgOH*Dr5)$h)jm$-5MzwvJ~Cq&Np(p~O+@EG+>-hw@rus>;^C#}zO7QV*5=3VS- zzWppam-NzIZ`0k#oS1R`Z9#sS<(D(ZhJOpZ*C$o3@d@Iq?0-;a%#`mJwFg0rf6{Fi z$HS2~IK1|pj4U!9roAVfdHVIBVxLoe{w?V}k0JVvMS@unsU|pZrOET=AiNAu_bp_1NUH zbCIP_5Am$2{40_6IsDV}``39!a|6$4{%O8v^I|hjnpk(=1+23a>v;a5em1Rlz%DlL zNNZli+{qnhu$Rv8$X9e8B2K)^cowhuBsvb02kSWV?9%vCj@x!|#Vd3ZwuvA4{-}35^I~zPFC08KvGhHzk2n|1 zbNfeYNun5R2)mj*p0|Cqca7rT@!e=63q zVFSL8{*BG^Z&+Ki?lt*;+8yI6*fY@>=ao4MZpXOeiWTKw%;l^UHt^(m4)KClcQD0% z>2sc6#y+d_$}uw1SMm$J7_`Fb3`X47yvy(x82yb6l-~Q7M0e(yCzI4ga--DGj0@EH z`|g4fmpF`%@ZMtOwvU)Ix7DVe@Fwk(x#p}y68~4f@G!}kb;H4`#E@oBbnJ<&5~ne04Bn)D zFy32!j{AP9SC^Rfwm(4~NS9}0L7rJVgMGrpxmEXAogYK*Auv;){W*y)YuDf3W9&Mk zUd?{_$+E&YD_4S}`%E!}5pH+UnqHjsr`H7%j=Z5HY0eTVVQ+gruzt)R<{Cb(^A=JgQ zyPxy!Z`N4F_@VJ+>y+>1W-U~C?5a3&@u`u^<5OSt%Dlxb^FO{s|0n;|)L|)cV%2|eKL2C%n~yR7;cUF> z=r7acU;GwRhsV9V-tFc!V)MF8^+3;c{*e>`%u)ZVe{tan0jjtAl(Z-5S_a00%t0pXR1lQm!FHR zC`W7O%NWP>Jd-_v$1Y?1+Jvr#=l(Kd$ATT#V~-F~JGt$+y!4UUaBR|bqT&;2S%t?6+UlgDEF0!)I6L2ZL z&bR#e@u{lYtMkE=u6UxbWlGwx&9L~`0+(tNI*`JQ6;FnfI0Gai-} zT0i>=a}@ULOgirM{#TOE(R=Yv?4~i$ZMQ4j`NQ4taAvBUG-c>wGpYfNI zu0Q16UORRC9OfSjohjX4HlOEi_|9^*HMoN>JNT;BKRY_M5l zSgR%PVn0Z8yM@eyU&uX~v}2~>=~J;wos&5>{z%S2PH|knHT9j{=`(kSZTwXHn?t;l zY!0#}UUXHO7`pmx?B$+Ktkyb+^q>D?#Ms4@^?_P9hAwscVT}0Y%*T%koT>l(FXD7T zd}@-<~@*g6t(R7d8ZrhL^ww$FPc*6WpP zzJE8?tFmJ91@@76&*8-ScNHe`>7QmDNLl&#h`E1q%~U=4^`AHE*#o~(sCQR?!dL>geyMZv&WMvJjgnW!$&VyiSM~vht|0x{nv-yUIro>-{`6tZpEMQ>lZWA$PmJ9X@5KgN#@Lf! zo_$qvo||sHY>2NKx#hIya$*4aY*IN_*t4lt_FBoRj0>zhV@q-ww%7S$#tn@bqkONR zKWRVd5M&+-ieBJBa>$?}a_dn&+?7eEdr@Rs3 z#SrDab`iYQdkg8z{_gpdf&QK@-=W{BKBjCqJwx!`oiuYg%6z}-3}@yO=ksid{5?CF zcYt!{WS)cZJ#+T+*S9mKcPM`%5M$P zZ>|(B*=tAL^Zvrgi@jE}-)f$RYMv}P>8X9NBzgwRHD&Rf>QTya+c&=IM#_>eje@Hz zPe)Uh_A7kPwai?cIv86}ANr`f*NSd?4_$n~q_zGkztsCwniFdLP<@uM-?8$<=`ZX( zbcZX8Un`CD?Io{_=hW6mv;4 zY4XhcZ8sBRhF6;34^+IYxCUAB#wO3nFR?T4z`5_>3=y-G-zd2DkJet}>QEj@KcDjd zUY^PO1)fXsJdu1%%vAkVzkan3_Q*SV-Kx#JkN!N*oy_w$>Z9{ne&35x)?w;(CSK1< z3SQUVC(o@+n_oORZrVKkeGz4<9JkJIA`VkmVv^#+Ci2&(i6eo0gqyh-vU;}cFGD)5 zBTiTF{HB8Z)Tbl#BgvF3bWRet>P>k|-(>a?bw+8nGByDE=h+jutAm|&hsYFaOlM^Q6sR{Xy{|W6%1K z`n&rZr6z9jF2{e-Y`rxCaGyPjX!wq!DSne`#YCAIr;bRMSibV}7>|}|AN!8UfAgJN zODgUxbK{u#JeRyR_6y@)K2Dv@yA@_U=DYkextveMmYgN(-$!Dc*U2O1?8UcQ^j`Qx zL4xxh?RnZm#Fydl@e9rG&M21c%eiopxzv&MtCuC#>-)9~-0!@PY`*8%?~y;W{6#KL z>~#6!N$*+5Guw(eYk0?jw#dbcjIATf68!C~-s5*gWa}eeryoB<-`h<&6BqI>PyxU1 z$=ScB7+brxHB2hjc)wA-mUC(5_rVNy5iyeY@Afk1F!9j6*LZEt>A*Q~KYfyUF7pii z-dOow_t}NgIlb|zm*Gqv54gPiLjAmjIAvtOU$ipLM$V{mzniq5K{tJ_VESyZTxAe0&MaJRhR&<<#A@t8EKCo2&%y%0Y%c zVq(*__odoot>Nr<+QfY4e57eJ+Qj|dVA??UYsp_{%?c>@8Fb`b=OV44jc?$ynoAv9 zPQ0<>@Oyuwdbz&~-h~gkKIFc0vR&`LF3n9m#^?H%7IHR+^U2y5BflH)sjVjO+b4&~ z?>znH0QPwdd$H~~qBCS?apps|&EtFv>jutm-ah#@{f@+C{L-P7sejnY@I3TU_C34s zpBVE4=8gEq?UOe%mOsC3vD^N%%ie9bX2hfz_0f9Mv~3o?fV?l$IZ*ObOb+J3Tx*IS zro;ztwPWbwNzEUP?$V3C}!$nZFPZ`O*3X@LYdmmcLhwO_08=9hCKgRs{zjIh)&>wksv-fGnoYzSIF?`WKh6LttCWf!m7{Z#4 z{^c1|lHSF>N;=ZNf8C<`-)Z9hC)lUZGhCe)l6}Sh zJ$$)MQ%M@Nmtwn5?m5Xli`;geUnuWV)93Cm&wIb4=lSHPHq?AdX=95SFY>Sr?XLQk zSeb*KbCB61}CvPIO4WlbZgWc77+D@!I`8Z{stJ zd!CJsVH=&v(%6MR_h@do-DfAAt5#b+N#35F-mLPlr{;>4x~E>M-zL&)j)Co-Q<^5y z9ALrAr9!2HD zqY{4Kv}MuWH6;f|=kl3Ta$sx?zrPWglhE1!vAgCZ)(<<0efZtJZG3K9V#bYi-~GSb zy=#0`)wTD(*4~8doeLxoL_u~EB9a}vl!8JklpVmD1Vy=cZ7l(94Mfy6-w&NNmZw&ruQdMyMqyq5R#Q4g;rS;!S z)9iIi(FK{8o>PH+ir>reeLXnVv3aCZ!UY3}-*sZ3d$w6gn?IbiahJVs9Ouu}?`K3!uXJTmi~{xEnW?GOwC2sn)prmaLoAs zW?03;lt)$#s@avk{dKRbBbNs;2gZwse(R>|{w$laDcFQn*q>St>kQ^%m)Cyn)tCN6 zAF5xAx!ttci4!`09Df&cvnzi2VaakQe%Zt-QeHgWvP5M#hk~r|+vl>yC6B_ak#xr~ zdUu;-P>UD8OnzvoFMe5gYmABQ6f8Q6P>k+1^z5>Su_emUv*c;+YwlFZ#&Xkie}dNI zUi}){kz@KuXxl0Hgijv0 zlddv20?jxpEgKxW^`wg*>7T=ov>y1WjPT2I`$SHBuFpF9B)vzUq?tZR58Gz&~0l|hV;XfQ5)BN^6ts%&&9># zfAzj`!#rGL#?q%}%>CZ*uUVH4lnx6MFK%_rG4|&-jjXs=vI06vPW(mb&3I2%-hU1?y@cn7a>aP$C*awE*>I)i) z4(5EJ=9k_tbKm38r_c>vdrtb_%*n9IAa~#+?Vr}}O5dI=c#vi43x2Kl_PI=(lvf+( z*;mKWufFf^;B!S*Y}MLa`;#GPsWDW$&BQVpSvi7d=wY4}*NdDt&!b3>cb`=+$TRel zY?Lktx#uaEk6q{F4c+lWw*(zJ8($A}63xx?5J!fYI9I^{t^EESGrqv0xN385gfda+ zX6oyCmajhaR(+j!G4LkS9=PpMT{lnXzPjU2iTxW44$v)hE@fpuxzFJ^e8%WV)h|$c zN*~KFk9a(uqdc=eX~salA??ergm5|HQ&(Mg{KP9GbvBndCtggr zcOh#7&i2Do%G)D8>Xm24hO!~~ilDpkEmOW)^jBFAKDQ4)JZkq4`Ky5c5pX<2U)(Ry z)&;M|lrpAm;BIEz!l^pbCf}af1G&8wUw0vQAia7reuB!1MzVjqzFz~6d1uM;%{mHu z7aK))KC{j+_Os|R{sG5M(OOw?yRuipk|W?G8T2di#a~MA;qRUnr2QoBjSW_q_QlKR zWABZ@?i&rQFJbTMeEHcN-?ZpeBs!9YYATHDWuq zX4iSsZTJdw&a^goy&3;7=T`maOxxJoznL>vEAU|w=g>ArJeIkqvgV7oT_=%7!|IRtnp9?npVMg~&>po}tN2O15&dYk|Opmx_<5rk(nnX?5`H?cq$x z{Ne2(ct(6HAERg}`%UkfbDukBifnB`7CC2%tmm2-f9&5kt+|6LeoL&f$0vsWkdJ;m zH_BJM%i`Wi`0V@eKkE|dEz_3Lh>z8r8@?Eu9$pIpPgi;IHSKljM)DiSX$^5H-)jmw zXXt#kf;WG>`N`g2z3>!1hEJrog{#`V0XUdT<{db)#u&M%^#j0bI;O z!KyJ(-czLSN8Tfk>U(~7uGbz}JnJ&*43xYHI?u>D;?ZLD#Nj7$R@b?U6-u-URxc(!1-dep*jnlBQ#JP}6S9X7g2v-jSLf}S-^5~ZU*Qi}ZE31ZjP9LYBbqD5RO$Ins7?qV=72o?|{01-@JB@tdW$u|0@8}&^&Aapm z?~eT%YSWz4{qKUYn@4>9qjF?w)w0?0Q?TYz=58o)?RW$UQ!Q9n7Z5|yB{U{T9;Pg465&hC& z&Xab}72}`8-y;8{v3;Ws44!>#=*653<&rKw1aJ9fTKS*BJc1^}pviFPTN35`Rrp%j zwb;`xKH2CM)7Ax|clWb+CE?zddrYj&FE$KY6tE^}PIf+gmXHmn%>D=55i* zIU|*i9oC!o44=?PD?F&@DEzmD|3&be;$VyU4yJ5!{hPVv(xeu?(48cgg`nly8NuYELv#tS|d$hQ}|Bla5I$QGOwKdifeV z`2eHrgUBx`x)>XubZqAOR`iu%ftTw`D8s$fCG&NLEr`Blys{ZD?^%7R{qVi5_h|2{ zg)s{UtqGFBrVRX{b$Jy2kv$U5RweZqd zxV~lNtu-hg+rY%x&~8ZI424dwe{S4-et7hjf8X5fduZOnGu|0I=^0{gjQ_x-{n*hx zTA!MjA>NHFfo7kp(mo#Dt{%1SO0k3rl> zc;%2gd+z7UzQ}*`qCa@D%i0(`=GhX+{n_#(Fjl$F+?HQ`aLs3%xd(uIaLsTDNUN-fu>>)|Jm|uLIt?@)6E6V^xPg#H2IU zb?k5N*mc_W&h=J3GqPUkJM2(f%lTGE>loq$MiVEH#kpi?EZt%H-`E`=GatHSYu!YB z{a4eL^k4SZ;JIB_ZO7*~UiCt8(91o;UF5dqr7d*NaCiAERhBc{@XK?9xZBZRzk1iO z?bW-k+fLpEiOK_B*-p~gQT{b1)n;#%n3pg|_ z?lsSLN~v(#LD>rC45ufdQv%9+~fVsZ%1J>ggVRKHQe7aM1%;XXx818rO@NSJ+Wr+p>?$Gh>%&lTX`W+Ky5_Twrl8yu*9iC)ou4tQEIfDQOjx zoHGku=h5LmbN7)259oaF*NOSOp1Q^!mc7Cl*6gymH|Y$<(xI>DWMnq!W{yy|hP19V z;&7jzfM=~UtAmOsxVF6-pP2qtPj+{gKY?fY*s9Bi(%*I4r<{>eTIJHSC82B(GOlDN zeywV+&+ofDZRd9FTVMm4aq)2W$_DAn;J=;k1+ll^JVyI&4!-H&t1&A`a{6E`Ssx2K z@zT2!%LYjwPO(EJMU1(vx#>$QOLF6?eQ93ZYGC&2#@uqteQ8DBc~ROg##gPjn|V%; z>sML^?{vL0M~aAx^WMYomiDU_x-s>pPI5|VgTGEH@4oiahoqER1H0Quhqguj_Yov?#oJNUNswD=&tXzZSJxz|9rg{di42?_L0a;#kW35Ut3L^J=>zsHn+`s z_>MlWZEu#X0S|@YiD7+u5oec^0#n~ zs!4n+xC_>6W<{G*YjZn|hFcy@~h9#8+8Ig#j_6FfV%1T;sk-L1a7vhcqLgJLqP z%aIQ*okOhOyu3wT9`!UoqZN)0q6|K$p5rpl;DTP_469?GC^koP7rF22*XR=8yj#)V znrlI~*;+%617`d~yNNNCywF_KSmU$Mo#ET6kwwixN5&KDv^8`L+0A=1Hj0^}hen!v zdsxfXruLevLOK_A&+r!UD(#h&?LH2I=&FS?j?Xel0V$&$`=+Nk9{pLDHT zP2OlHPxWiTy%0R~9S_-!qFt?_UBXJQUW%#b;Jazpb=wUt$Iqx}{QtZ^0|0?R~_V@F4Du=U&c@CUn~Fa1a~cLmLF~oemLh2AN=KA_Nbxl%Y1VO z9un>OuD0buHe;56$Y%4j|- zrab|BP(D+1*`Qj=^!feyV7LE1TjxyfV*F?~z*?8shOL>gv5vjCbE2^i70-Uqg}W8r zaN=`V&qAM#yS_;tcWW@`r8k9(d>H?2zER`&=wF4;RMya)HWin1n0)iT(eWYXW4k?| zb+T8Ebd?M6tsbQbzD#f$$GiHG?#>_L?7=E_<80dN;lrBdTkEo}(B6dB-$u8Oa&3Fs zH+vEIB~APfc(5L?7Y^)K)aUUHkfCmWp%G4<4C)yCv6p4HY7UvWT%P|67)<+_Jii9b>&%CS=N9-U^sD~&-daRKhLjg`i-8VEuBAM{0y(qp6p(&QI>f= zyfEVr?&9?VH-=jJIFWwgNwfB^Hfuucr?K6Ah%Yl;dNc;LPdpE~Li|yXd!4AO{z9(5 zvB~l6bi{~D@{iMr`<(A4cLsy|wDvW!413w|;h}RBcez#Pk!N5>m%tNRBW5%A<)7Ly zEwVj>=NjZ;ZTV^3^LbkPup;yQifv|XL4GxILAnLGpF5B5EI8|2C(V4Hm$i)-TM*XS z4RiK{_(PucU51TDcU;d{%Xa3U^3Gnb&X$a5*EjARoBHbZ9Qr{Xp`#@yv>&W8&e;at z7iDC)3y=JTlAVk*a``Hrxl5FKiuu~fxuzdn&wS!p=hd8V|C&2q_N&d%xsrFp3M2cN zkHj<$Zx1tW(LU~*lAW&DfS=OW7V7+*Hsyzxo?ZrRr>+{ocUKZ}-eWCLT|NYvH>CZn ziT1jU!7C|e*P3xS^R$Xv&LLKTJtFO=EGHIZW4XO=2XWJzd9Mmyg${B2opoP!{hj}{ zVOX=ycabMvdRFoQJZpH*M#fiBZ*#W3zh$Led9mx$#FjTUyRTgJv~O;&CPrGkQB7R7 z_*}SD0pG^*E8D?g_0&*;`F6}FoL~0Ay*d-PV}cu7S@v*&;C=_VS?_J;y=t>9o-=;h zB|f;TfxD{w+V=0cFst7x%HIvVvcsc3c#Fgz!0W(vmH4Iuxy_hf+lA}7(kHwZn-~d) z9$)#U=m8AnX+~yVXx`^i-&%=34O`%J`n;xnJ}^{agIA&VHt|lJ3V!E=ocGZejxN2b zU2{+}tHAKg)uuhhZFOctZpEQ*IOR6dm&!EjoiPxc;)Baf``5P5Fu2NQamL3hBV5p3 zt3xdlfyvFoX22(H?i1_!);Yck$xitkqv)UXrTB_Co7EtlB_HJ|t2BKGaTcTD3(}Dd z_4xA!dHxK)uh-}#d={Q;)43M27MmY4_E-Tj*Vvcvf#jBKx^(8jQP<$LGq<8S-u*rdc@}Ni$lZ4;jQ)50thAFoUp5hN31<^G;@C3SF{`cZ7gqf8 zIv6Ut=rVL=rq_e{$Khq{#uXDUvxIc_;ua7^4a%A z!)o^~Q$Jlr!x(hCs74$@xGv2XnCZBk4QZFTLhQpRuph^}2Q&{rGzC8D3%2XfBOsa>xRoz3^!lWdJr zR%34b*B&h3rdXwJ8u|7a82b?S#7gE-zkUz=DIN}gJk{9q#Mjr;o=Fpr8JZ(Ygs-_{ z$d$bz<_i1q^=1vm`AXT7D9XEqU;Pae`v>B<9K$T@ZRmozf}$sjgf-MZ4lD$ty>mII(+CU>1%hhDzh5y2y&&W2Li%D~*4!mwXm{$Y;UF zxv2Vl?Mvd<(j3*AN$`i^p$vQr-_sf_=*ECTd&Ow>RW_5djRCC?e_1%IFf#^e2n{h8DGg@Gsg3w8Td&S8Gd!? z^^)emKcUwQc*E=)e?d8Kto?2IdBu~HhoI@B(DDQLOLx+AoiPP2!y~|`JvWc<*#GG< z-@0XFH%~2g%gde!9}DCfI${@yM`bUYwp?BftBg6j>*zJ*OTYW&EbH8APqUT+mrtbW# z3L|IGO;y3bKE?U83(&uecz;`3^^CI?! zE|Co72RsD0nK%yJBUy5$z3vEiYjuvJJ<2ps<9{47jxlAwp?pn|+sy$fu6JdBV5F)=MueWKCqg{X$*&y_&~3@>=sfK;E)PZF48I=%er2v{7HN zlO^zP!M$VJ1%r6ES#Z&Yv6p~Ncg`64xV%X0u(_u~v0N?mAsB&c?qrQU;~n}LeUC;r zo=>^+9XM6yr8ws3X!5e0`hrVm(ppK=I#Rq+_$WVgiS%`AmgBor`9_}AwoQ5c2fj2X@JzBL5yxOZUFs`6JX;JzUFSY=K*}DdA!EERN{BMH0 zDKA{zahC0kuVk*#b+lpV1Wo=w%37S+(i%&#GEZ^GmuR4IY?6t|G1to`^QdkPj|s<;^#jM=icqL^4a;nEz<3?^X89aQ-ZBI zB;8EgL3ciy*g51)Ud+e|BaaG*57AtQzoer=@S^M|<)s%mJ`z3K7vm?1=WY(xU==#w zNLq-xjN3Np6!B#5IU9xsDyQ=WZ=(nFY~<#}PW$0JU_&lR_Wcn&dz}Bl9`o*y;GFh; z#o@g|8}jeGz}cH-V6o1iKL4_bs*z)gBYK%KT6+rzz4wTZ^R?;ui5RQrhG?wR9ymOw z`g)Fwbv)xehhrU2dC$q50puPYQ)Wo4W4rf^zm0td;EQ$ql(Ieg-PJutlGmbx`W;EX zo}bI;CHmJqJLs)<=vO*YgiB8PA84t?e zN1ps&_ZnFJ=iR#LE*doj9U2!)rZLWfTQRi4Uvyv2`r$l?wq+3w4Z53;e=U z>r+EV+Bp;2d2RMp*4R_@-_-^<2;V~L7Sgulr)O7Z+ueL2zKnvu=E1)*CbBOKO@UE5 zFdG|2v=bfK>)+Hcq@274nwT_)#>yMU`6hp#S}S^T#Lv&-Pn{Q2 z{SY{Jwa@c5=_J|*{%z{#U9X=ww;#$d4(qfZr}{>wk@s)+3;nwK<@pD_`hCUgmoYbZ zQsy9cZ1?Eb@RaW?^%vl4&g%$YWLQ_fJUL1C)?Fi_9@0mb10vB%z0<7Ax8y|nOi*HK>vC+bR7cj ziO$?>J?M9_) zhIr?>OQu!h&oeq08TH<(p0aL%#*p&jg>2h0XTw@)TeQ%;YWC(W&l(e@d-Fy95y@1Y z15ebrLU-|DwD-7@?~Uu-ZW~sdQ+5yVuwIehV8RrSpU}nd({t~0_QUYN2#4oFr00W+ z2b*|K&la$~Lc8Lh=V;IIe`DWJUb4sNW}dbG?ddJ2-B`yp&`HmyDmztv8`|w^13r+Q zR7iV$=`F7<$zo4$g~6*Yn1ATPyd0SQdsN0h1}|&hR&%G=#ZxpfI%A-@13yi9h9=r? zgZAP*vnCg=^e?`@BWMjC&WpD+FJPs%sbzY`zi@-cmDzs-Y9{Ltld z^37Af?ess<(^Gr#?!T<3r|B$fi1S^6u2_BXe0VAQZ}6S=?n1~t?Q#Ffoe%OG7GrN~ zf2@i9v0uU8-W?Iuiusa1hdC@CKi~PoCRDr%duG)0XfK;bcSWc#wI}-PoUot%wrGD9 zvIBV}Iqcu-YlEhJwcC4N{z(sJ_`6tsaQf4n@a9})y!Nz5w3l%nbBEbiR7@V{U0oho z#=6k#9~$_+(E~o|WPE`gwaB-|EUhPm4||PnS;bF#Hmh(sm=)_71+Dnr#0ADj#5?y3 zPt;!MPP1Qv--tA=p^yRQ?!}wIzY*Tn9WTEJR{wb1WoW|Qqi7B6`=E)+y-GPlSK#Qv znP(5@>wP$TcOjk%XPtlbkBPxK(dt+%oQLtHZ8<@qIqoJi-H?t-*(L7W>0- z(t(^&1J=$*>~+f5`oNR}?rP?QbectdZ>^yB9(JmaZrSeoK`&y$Td9H);DGisNl@T)+2Um z1+iMIQ$9?q;7s1?u_I5fu=uy$9A^V}(CH{R z9)mwduCTEkzG>ne@>6y97w3SmcjcSD6yL@N%#q8mT@E87XJ)EytmE=gs?WKvsM?5i z%sQ9*y%~$CW5o-6r_r}hr+9pip1WNeAga5Ob|#p$nZ^lP@J*eB3T%wkQT(S`CwJNI z#7ubSN=!Lu=G_ydJ&k5?SiyLQ+eVd3p9|NSqL1G zE!x-7nT$uhvh=HW+g)?|*SXA@LAsjjYd`VPqs@(z`94 zsS1Ffr<={ZluV{A-5aKNe|saP-vk@;t2b;R`jp+IHPCRyD!~)XwQXkY1s)-_3y=Lq z?UE;&6uUHmzs+4smw2>IwmQnlU+(4y$oKB?=)pg}^+Pdq34J`i!i=ZguVNAJg}-*r z(TnG`?}F{enJ&xRoBRj9kt{frr*FV1uMPj`??{)8Yto6cQ2ML>vToYTN&{DaIbxI< zb0?1Pk39FVdH)%#Pser5&)%@s`uq36`cu+-!@9v=)`hiFX}w_mK4~7Tbv*Zgby|0< zU!Qa>jW^%TQ+?sDyFi!vXe&BC&<7m%kfyoir`a8Sl)FV~z2IF)S{3q2^OyZ-qt~^s zvsGpJ&&L;{I6yQ1{y3uI(z&14f5tBX{X)PhTew@l`CuP}3Pds>h2Dxu9L z_`8X?FOSD23AR3X>_XCekK0&(S$EvdQCcrLj3&(+w<&sdX(HSmp7Y`I&v@?j(|X|G zpC8G7cziq;PkL|I1OBou>?h=N?FD2*ebjgVBGgdu#58BI) z*Llvrs&D9_{6oMl-sG;;`ahEH!F~uhm3}~F-7)gg-cUK0PrbA~{x;pZzfqctqgRe^ zLUxUb?q%%}XX>*lJbfj<&G_3%AlJ} znK)k=o@@PO;(GbYe0G(8-Fg)dU3HD{XV;D5T@>Nq%qd3g!C>2Bx*KQzCuxVo}I&2RJ19e2(D zSN97LE5#jmEhUUaR-hu<#D3pRJnsr>AG@RRea=zytoWWD_X6uK|8jeukqPZ*SD10d zUJ;*nPO$2$;3Mh14>N^RgzxF$N>8`fhhauya(oENo9SwGGd{5imHv3~Z`f$tL48(O^C-=PKg3jeE&50m>P zM|9(?djZ__V~_Upp6MI6ffeu2x5irS)xiIv$8*9Bd^Mip z`_A}S1w7UBUQ^$E^<;;oA-)=PQ%tya}H-mEW?Jk|o+TQ2e zjWP!{cl~9fl|pVf!ksTJrH$@|7pMTX|aJF7nc_7nLX3gl^wvp24A7wmfAei?We_CKl1Ofe*i{ zE;fVdJJ#_{rD=Tz9I|73;KCgD)*8kpx<1;q*6{97&|DY1)1jyQ4zgjfam~G*UuV3f zQ)D-5o$m3a>U@>|GfE}66LYFF@T&;B@Nw61X5 z?V^q5g!G$%opU&9w@_;YS2k$9Q0v04J;rR8cFrAc-SME@U&h!4YfkK94Z`^P_Zi+1 zPT(LuX?E9s;v2zNtbcIGXB=;$ea{DNeC1}2vBuK4P*=Ry+xM*Xn&?miJsrOi<>1TS z<;>a%7|dFUbt!ZOPIQ>&7WnT54*5m6XShT1`x0b~;y+ElW?tf#jdRv~^O;+kAA2d& zSN_ZS^g0DEymbm*IEb%7V?Xy_7<<7k8?V4C3lDho>S^bF zF&{L$<1d`O`u?#MuZZ7!^N9Asz47R)p2nm#2>PX1Ak7gg?E%Ji_yeY(|e z3y<>pWN!72wb+f1R`AGnm5d0Zs}yUokG4IZmiWut!xt>sVTU%das73EqdLIe44kUt z?LC<~$)wZ1d5-JlqwE9jblZ0HEAdEv{klPYNY>F-U;6b4@|EY;uWP(C7lwaFzpm`6 z%iK48$2z{Jv{UKVK5!8&{x|E9Mf7KQ26{`k^yE{%zvRMb<}I}GuSdS_*40>D!?R@S zRs8p`2~Gj?|AcR*(e}T~Hy5hT|B`R8xwl%&&zDbQYbHJo>CljT8O(LX7-BPlyJMGT z@hl#aZ6y2aS4Q^w_6B_P$X5J)=49hvh!T%cb4U8bnmguAYzE!}YykOULi8nFn2m04 z>u;&#V|Pj zTHP7nW8b8=%@a&FcaTf}$&PdN;Bz~m&xMk6q)Qiw?l&LbkAxt4)WS@r!T#yhMr z<6L!v&Qnm{*r&S3h!|p}Q?H(RJ3+RL@MVu$)-{{ox@aIZ-73Yz0A>*UYf>TbGD~#@V`7$H?7+* zs`dRw*%iRB+{bGd2nW}`g05!1h_3g+KW2P^zZm@{`oibW2_I~IW6OM1wz7|BRmO{7 zm#=<;Ydf$;HTJ2-f-%aKAM_#C$&%ml1Io5zy21x259z-d-cMjm6(g>_ch&n& zuX@-L2A1K1h4#AX=<_S~!L$B#op{LG8?@Fxqpl4u zJ^vPYvN?ax{};i~1f1%_j5F{G9_a+rFK`%m;=1j1wV9#u5?sCV53;@!&TTWHm-2zb z!^dwoS+>)sKi{)?)TT#Uzunyf2cPWr+dzuFG&&S`3wp(yYOl918)&tmlLJq)#uPsC z>=)tS*+PDKZcV@F4_3c~K@IrVY@Rdk3=PV(tQ>p#@IP4axzeo^Yfz@jS$2Z|J=4la z$n2w>>ig5x?nr)i$U-GKZQ9@z;D`U`X>9qqb|@BApCf(k9SlZlpJ*W(p+{Wc}; zsLA6VF}+)%Sj|++-kVuAuRZggBK#SFl}(8|s||b&H%6TQv)un8_dmn=kA2JiALaaq z^3qNDknQ{z&v(*`GoAl?-1mFj_t;Gl%ev<$rcP`$&l8qhtgEI038EZ1;T#DRCT z^H2L;+s^;D-S@NIaxOd#E<6n`J`D~&gRh9y47nz+W@ypWnlp;0)eOIFdd-M?F0KiT zk9EwmtcuWCmn=>=Cx3C$_=3d)CKWE$`S?8MieyJaF#WB;S6I}ysUM(zkox_oA6Ih_ zUu|H#)e$-?p86r`hpC^im@%c^ytj%|vBfRx_x8!oZINsE#zzr)Bxu!gX6*^)W=>;p zaBbw880nm69b~694|B>_Cp!O`hwfjKk+h>Gc7N=^!C=4IcY<-X?*{wVE+6jX!zbI+ zPg8Jbb%r(NEw7%%ch_Uak7l#J4sa(xkni1e*MBb|HSDv zpY7!g=+(n&j$b~USg!sRA)Rx(GG<^u9<*~Z;aM}6(QWGMOyUQdv#g5a!>#(XSysiz z;P_FwRb#zt-P#yTqko;nPtR%GwRBSBu4R+HT5PX-PIGoZtp5207QWuoYD4&Os|Q&1 z58r67D?X3=necZuA#;i&PHc+he!nFUZkTM&F`)N01%EPWWAMj(d*w%ybXF+$0*iYG zsLPr3BIZyrXH-Ib4_A78;a9DW;@hoy#%9n(l+C~{1n1RO%&y4XnqBe6x3I0TRU*Vx z319pAJp2*%zB^4kn!OJkYV&!Pe@E$QZn|jk6`mV|!HO*@_Bv=?aWE+Vb$ZU8n{2k< zt<}W?%4Sck8b+E`v5~s(Kr6xWJa+?Ra4(+l|8#-c7*g90Us61H2zFzC0Dlwh#pnkb z)~cRxd4{%y3-~d9-T6x9z-=$o*W^yMjW)6HEO&4yoX#xj(HuExf5?)Uib@r*s=y@&fu z1rK&Wy?1wjHOY}vJ1z?TA#@w}0^DmEx$1l?D7EIZwS2#H-eBNJgV%=CoS4>6y!NJe z4L+-7uCPby=>Dbf`tkr{YY#5T!*|q}6{xse^ydE{^c1}pgZp;i5&z|jN1@%?xb_Jb zGS+V`vnv`#S{2`AoOcJ%>p^SV!Uwrul{;j2!;1^QnPQ%c_59V4d6r&T7_kNorp&YW z3+^(!I;8gT1&kYQT2*$1{cXNw{T03o5^rsrL!aYA*0$53!&sNcRHmIis|7oJDYyow zZn!Q?{-eOU8@dV3!R}aQ>swd>=e-HMym7w}-1-{JrT9*eBRR(}zY;pmt!bY(2fAuZ zIr~)Z+*c&}7h}6Og7@xg#gk=&vVcQ;C%!(R*b>dTkL}Wrf@_EUJt5Zdg#2XvWgDcm zE$!@_`^hG&V)w~;%Q@>`y7WZn+&`WSEng0Q$$k^w;@$ta!CrTO_tR*9Ao%(Ft?kjT zkqNbL_tEcvo#@Ae~i2eLH}lOUk*;8YroT;&%Za`&G3~s-plpOc&9Pm#}lpRE=aP*d*l2t zZD?MI)=x7(8v^YU)c()hwnNy^7Gr0pl+FG&FltUjfhS`uwqn@Y7N%d;8r!UUzte7U zp12VDBs2aR@8gVtc%XnZJ>yqRGi_y*&7MPB@ak&a&tu(tVS5>Tb{D)FzUD60JZ_(q zEAW*I(?`a=7qsg?dhcEBEpgViyW#2RwRg8ieRU@Fs-vM!d6CKX=)jbZb{yNsZ6WfcJJ0By5$`%lN3Qo`V@3(#Pd`HIV{m<}& z!E3%H7Bp;JJVtv6K;?k8e01>uv>GiilfZg zY-rUyje9=PLCs^Vj%w)F+!l)J{@2h(?nXLd*L2<#tjWG8X3p>z!-MSeR)mwSj_kp) zdOb&hE1!MJk+kLEF_Gu1xidfX*^&eB-t26%7H)rGzFQ_FU6`FPM>G@++1$-Caj>QD z(XYahMlxl~$n>{)`V<@c$h7Hd6Jt%O%9OfXtqtZEc%~o{|2OZJ*71 zm~Vegkvu>?GzL#cmJF?}`8vGMzDRaw=W}XnEp5%~oKnhqGUpKbyb?L2{&ph6)&6qs zWOQ&Ef{l_^d*_3UF?3=+KQEtKHtTrgaE~(YAh(u*yUG>yYkyw%e9w!Il@{UKo)E{&&1~u z!oHASROR;3zIA_Wt>DP$?0i8uHU_N<&5!0wtonV}1%1hE?GGQnd?)--7GYmucTu-=K6ENZFG)^CkuM1koIW`Er$y}v*I#i!_q8NU z&OZy!q<&@m05v)94raFi#zra?0#9uZ&XT(x@QyT z)EN_R&L*%<8j`}=^~_ku)$=co&Pk1R#Li${JlX0v{vhLszaSe~H0DIo^4E|<)pq-Z zidV=_LgpdY1J>3-%%9(<|A+PTPT=awW4yh*-RRMRopwbI?PQ-wT)vlf=Dlv0Rx##X z<^RRD486zN6}lTrGBUw!uZ1~e==mkPqA%E*fUQTq556jRV;(%P8yzoMvu6N)E@X}V zYhSV}sseV!m^@40!#!4DudHOAcbseN+aa~rvu;UGTP?UEf$9tZOU} z@U!_@b$>_BG_w|Xi}`^qmeZJp4a;-mXsiAsVo0i>^}%UYMSg18>?U|pH0^7iWWt}v zFTafOy_9jD0e=@_(>QZO@>6m?edmzv`tP&8q4{8QZ((2UWLW8ME#kY1o#i^W(y!*T zTjQ9E{pl;7K11+Sn0ePN|36|a^$GUGKYV&1h)%i+Ti|(Q)W^)-zcW_t!P9a&g2QX& zTRuJBI+mNod<2#Z{3m6do!chy&7t(y*fUSI=3YSB-}_ssx^pM|jBUt)y%^e6L4z7F_J-rZoo7td$NuiytyR=N)9!3&cnuD{|6PtWpbG`t28)tJ<3embX74 z-l4tI+-s%vg<=y#uZy4;!j#rHUHdHCK*UuiDB(!0X9k#Xq8AK;n*|S=|4W<0) z%#|}(zr}UQUr*+~WVcT^j{o8B!9Wgkr&jWy4E|v8x@|dnNb3n38hs?a2hR{!F*g%< zzXq&Y8-0W4=HSqr(~muHz?x=Ntfl`dV!yEKYKMmKH}tcKbvX^&;q=;v(A8>V3~l_K zI`ZRR1`jlbtP7Z9$KHZ(WAxSF@ohh+|HhM@@M-4-|M`#3CmK$6j+cMn0_O8z`Y|w~ zZ}X5vJe%_|y?qfitm&)qM~n=YP0oxfo19^nO_og}9XJ(#D)zDQ^%Mg;bl=*0J#L#d zTv`s!K;v6DK^$91K8O+1j1OXtVu`Qc7=f$xOODSW)Xg8CgHybG{PJrV?`zI#LrWU?mo3wT4W_IHz$Tz>{+5qMf-Oiz&D_I0FPPFAV28nRs2bH`0n_jQ-fCBvk7JO z(zVUl@$i0q=RLN=SLoV=d!hAEtG-%%E!`T_nrm>a_N}C2rC$YqH9EhFGP>usFFusr zAiMPAIU3i39=2!|a2^_X=4WkNti>&x?Zqel5vciy7)GtP&^4>G&)hiEe)*CUT5nks z=SQ0ElCAvhpM+1`d0=56oH*t$!CBEi1!pZ`k1qQ3%3%qg9pT%H_TYrqW6y?8{$h5C z9i9=M6;2GzN=%GCP;?-~&l*uZv(UcjCu8^}@!N5mwayAHiddnNNH}zJMEO%HyVGpa zGDg(Q{D|1g6aSc6)3P~laod*I;zRM%Ka=b_75vNZ{?Y9G8>??OIN12FCuT;3$5r1M{ro=S=h?|Dm)XV5m&$$bUia+B z)UY+dlxJVM(;m=vc=U#00|Ji?n;Eh$+`*plVd^j8JDzR)@3mL*Y?UUauBgmTU0!)< z>Z6slsgG2KJ8nC03%}%Fvla_m<8S5v4eri8`tfUP6Ya!T>pp&U?c?aj^Xvg7NrB6r zL0-&#`-ELujD4BHS|bHGj|Nlr9t#fGYlX^YA072(Wz(oPDlKck-lM_Ql}9r7RaWpr z)*VX;q}CqYNZRJdE05SIwVkX3jsW}H{FhPZP21kLivPpGipq3&S4s2PYBtwo@v$CFpdqB-62>gx5TC6Vf(RRw+!N3Ipsdc zvKhmM$-KRCTonQz>}+3^JPnKBBzt!sqh1_wX0ZfB*RaK z=(o-e?`;W&_uf3}waP=mRh2vF?=rq~y2?(j)%Q-%*|@5*W%CO4S$jAb+Dn_qW@f&@ z_trKHOXvCWQM)UXZF^q{|AqXTgO8auifzu$z>E1Cmscj*;o8<i-g(WhQu@#~#Bk?ZjG@{bgh4bH*nQAL!dS#-7)+<`%L`bbz)A9*kFE z!_fY^kLL8NX&T%x*|W<};wO^K^z891v_A%48?ZhC%-kgxsFD0T4o`*HV-8(^)d74x z`>-#LRRjM*)`iig#EOi2i?>S_%9hc8Hf8l3vhRN;n>}YECwFzn&EF3!M&C#t-^Kb9 z`6+o^hpnV^czm+twB}$nX~MaYdF$U>50ECk!3L-7Rx z^1QxVvR=Ab_(r<2`pk%C z&}U{OtEviSZKLlTR^Lox2oc)Zg)cpL~CR6X%<>#;Ib@wJJF8vk9~%9b6Us>mc%9ms!@nN~rKzt$>H|2Mg0Z?A zp|`<>JdYPs-FyQB{U&=bP-Y)@I+*gnun`=D`$_y3f=lJ)dj)4>A0%Q6Ekvh>PIm5R zPOS``xa&ZA$G~I7$VYtba}&$n-=C8_{lLQ1f|cagwr1_9X^h`FNj#N8B^N=s+WxiT@E^<4{e<89^a$D)EmOecD zb#zfH^0UUiV*g4zaYl4!BHxJX**AW|&!uTi(p28B?$E%(n!|$&YPKNjwKiGUe%pcN zR`AtjzlyE-5`N^x{NGyk+FESA1CNkbG5w9T$i6kP-@LIlJ&;(k@>jDC6en17v4Lj2 zOdIKexRQhTkB%ZUGOgR5!T&jX?1R`Pd8u>i9*C_u!fzEn{NuVmG;zVbv1hX$wAYV% zFnRsB2M4bI{R0Enk9{y{eZTvY*T2Gj*75fbT>m9IC9#0_%XmINkhbMmF#W}E2l{VG zxZhrH-Ji4`AHj>*0bBMyV6Xo+a(s)GxaB?4O6>S8Z*aHgrIh)ZrT3&QPx4Q{CjZr? z1GkJ__Pe!5D4S}fZb8nyxN7O%wPThgZb_wFo1M7waVu!jKd_Ux3?!}2?!V=To&Mr4 z%l53je3HGsW@&iK2lfEkq;1lpcIuYbJ0Bjjis$4&a^j`IvKLmJ*t0fzV(;3L6R)l< z>{xQ(Fm&31P1cG(Vh1{OF@B{dCtvaG9q5YN+GA@rFt+bu`}_@k06p8@$M$)}dU)8` zd^@w)n)M9d+hYwT**}B*dB^Wrhu!40%Q@7!A$Y4ZE3T$-XwP!xwEs5ktL-ILT-$rJ z{j&AlVeesAEvN0n*h`WF{GgSDL7+vc<%sA-f=@xgH(w!rIl z@{AqWnS9f9Zf92in#SQh%iK8eiv7P)dkNNaE3hROvJdLC7p!SyPWsy$AUhRX;z%%g z?;CdV-jqP{-ptJ3RUX7*)T&qBGA&UeJGX+w1n@!}8d0z$80)%&^47Qf%4jl1WOBQ~J`vPWf zEF3C|B%q(QhR?(v2&5E6rsXb-%*SSkk@w2Ct#zVl;JN81>=gV2qy z6&vY>P?pKd1kaJtk>IWO`H>JX6amBdM}=<=_-6EpZ(sJ%ihsi%+V(HlLmGF_K9${J z;S)ZUJ=B4(u`k<|`;+!DPvJN2H^WYl4dK`{Ws^r^XQ&UoTYr$R;>VL@r~Hk!Jex>) zBRqQqx%)PG1}?#~4;u#gI)r@L&xP)o+AFZ9v3mx3?H}8T&!cSeTGAPK_*i;FZN5f7^8I=5J9!UM9v$PfvC*-uK5=a!>4fb**z8B?lXedK^0)f) zKX&r}=F6`p-)|fKly~9(mo9tjFOz<*GT`Fbi<{gszxS2-F=?U)Yb$(1Kba&rDu7#S z0oD7{DuctDhVCu9CcjMG737CC4$Q6Z<3mzC`PSC(ta6U6m(QHLWO4VMSlCFxxVpA+ zGiq8cDXghQZ~y%UtK)HeF4=*kSNW#dtVdb@f9ccP4#*~Y+fFFS`OnyzPp(d8-(x_{ z?qSy40sXC64b*$vwo0b*&Nn@B@Sn_By?WLDH!s@p;idK!&)oLyzkay7>hOn~PJHvg zcVFJKwytCE+Nsz_DS^1XKgU+O%(~^7P4~aK{&+C3cj{*;>sLSc=K7SAFRcwc_;OcT z%7efEx6^*{SE}JS-4xnWyD9yxeSwoj2U4)xF1~;8`m_h**MBz4SI|J%TjEvXNts?JKKO<5MdrDEA{ z*S^p5tm)SJV^-?Q(m?8#D(tCa!PK_Kp!I1yu*fDHMZdBahuj~a-FR%m!1`_Xr(hGN ztWV*;Dsb8s_Sjxr8yLLBX?rkjA6P3J@VR9TYoDGTSf8@Y+H%R<`1SO$QZ}7s#jnJF zWo){X1nZcM|L%s5zj@%syCQQe7f<{#F9xtNi+`DzJ(2XHf8VTKZ~+sAEa#An`_mtaAfaky#<~RS#ieZe8`I5B3rZ^ zyYi>}{|tOYmoEiUwj3*a+0ZC`S#Zly)hXMxc9%7D%iqXfUssjk)a2CRrB+~lQ2XFWeXYa!- z&Ib2i@NX~6JFtwjj0ddY2kwtse_Z2zGVeep&qwUQ@G{1F8s*;OeP_RZ(4yaP_Lp8P znS8~8rhdWoIbTf}emS%_7L02<-}2F7SvM_2hqk}>w98r7rk(R?Y|RvK5gr@BqY*qZ ztg>M-Z2j!OtY@%gj6JTnz%E)n?9yt2L#sDgJBU^ZtbOvaEn8f>Y%+EB;d?5zQ)`c5 zTRy@wwqSh?z387~3o;wO1b}GI{tCmf*GzS;Q22S8zK0?t-Hm3C{v1Qnq(qRsq;hM(LvPEaI z4|!S5-*4a!)LqWnNHTX7uwMFm>`&^Cb$z6=KhGJKlvsiNxyl}pc-ZyTW?*M#VtW?K zPs=ZgU%Mr^qH+qjG;S`#=1-{2*|ehau{>*53$U)j?o4FAV=&a&;zH>N$=|<~a`xoip!5uvFiu*Y~-jKfLLH${OE5ud?_C#^W2f zZqdc|KlKfqbLO=jyL0XE*Z54 zzwRDrYVVVOSm_0f^Fr2AYA@qwEB-A13GMB(;63qE!rZby&%j5rCw}?toAHxG?L>IS z@o`^9n~%}frTiC;+FRKge8j8|Yw>Fr;v)%Jd20%GhF7+2UPTNOcdJQGxIP-@#G4kj zlLvcKa7Uf?f<8dbFehhguSf7{t+;`;oc3l4*_+8h=CqB8)y#C|lCv+8-1Y(aAJ~r$ zJGY-TVKF}H*TCyZeyfO)oM(qxw=m{Y?8LT1%z2&l{NNWGhOMG}Qoy=Ud&=xb89K}V z>OGtCv}?-$ms5U~)w4YGz(1Yvs_+vH8-drW@5f1f{Qm`~=FxdIt>?c~eTcD&G7iGK znEy6#XAh&T1)43iR}7oyj$b-y^8M%A%ZFvyp≈c)leoR?{}Heg8Q}|Gqzg`ZMgr zm6zH{E2W!sU!lf7asD+266VL&BxO|1{$QxHzSnwo9%Y_{9}-Vod*G1G`W~JMTh+HO zw8M#4Ag5Hf4fwP_Gmmj^W87C#ekEnY_17MVW+r_3Ym`f}s`;L8@qXr3?Ls>#F$X=) zL7YJawDVQ+#~l6p8r5IOoC_a)YIZJZg~+!t2OpkIdI@_o$Py71hv&rWWXKPq8H zp`Dzlc__c4=AmeDxoz*0Z_(Qex)mQ|9X^fo`Oo23h)mX=Un{U^A4zo&|bK(0H z(9QLWq)M)f28$$X+Z9X3-aqmbp3bq@JNM?O_PC87+}%HrKP3}CxIb+LX>DVEy|{;8 z0GnSvfvElMpXAVX68|eZiVj?MZ`zz}{&#ewmo9#QXZh%OZkFHHPRdz9TyjCj3xkNM zE7{F{lGYG&7ILSOySDgIv9+!+Tx9m`=dH8WokQO{PNbK9Dy(&x(QYY-IoC^Z2B1q9TLDH zkDtDKqq~+ZoTh@H*(;MFm#!cmr`2n^2I;s)8mV)O#c~%FH+%) zDC3lvvUN~1{Gd5o1Fi|V*^Ko_=$I3cy@Ou4Af3Dx+M9B%b$d=YYVxbFr!%8bcp*D- z6rX^f7yjz@nHkPDywEcBvDwk9dXB|%#^PtUiWkm-7cvcf&ZFL}w6}4b2upcMF>7`&_D1r#jH&E#5=K=15M2Ls57jYH<9R{6g9!_Ijx5 ziH3TY4@!KYdzd)Se&U4gVnX+7FKiG#2Kg1_Ct;t~&|3U-2;Yl%_db0WjdDu6hi8q2 z%IaJ{ckY~^O-F}&x@8HvrR?*%WdXQwuj>As4`OQ)t)>s5Wn1{zp0)U7=H`%|U^Rb; z-MLchfGNn$Q|XZe>V(lT1$$!8Vy~tpTFv;DFUIaqT)8OT@h_uCR!$^uOqoxQoIJ9B z9BD?6+}=Zv)SgTFIc4aP+x|U0qJ1mr5a|=?5ycq1iabB!>JjWg8q=*CVe7Z zatMF>+u$iZB3p0_bdfF)jXv1?Xr)J^8t}b=xq~m=oIw$;tsGOaB zUPzajL(O6Bv)l8j`*bW3Be|K6;}{eUzC%-K0sfdJo*s7cLDNH zecnkw`8+H3pe1yn52 zZcDTa_y}7>eq(ulcjC7rWf?a{M&;ZV zc^DmCmGP~}CH#kwt%DniB7=}cMI&#Ie3|D)`c!%vX|a)aMv8cEX8sO^N0mO5^yAFK zA?#h9#(A2ek#|R)9dKtP#{Y+;zx8S3Ewj&j{+7Y~s+69wAaW8`V*aREc_~siTP4G-#d^oxX9})u;#@1qg&SL-VtZ){5$Ua&ZACBt5 zhd+nEGBPr|@=wKwKQeqc5El7-p)#`I!K|JN)nICwC_;X%^J8@R?B9$YRSByH?V4!jw_TPA)Z zErqmH%334GMjFL$q`gl&DWo-Kd?^xh(_SGhp0tLH(UGd&JV+hoy$cR;2Dj0X{@}JX z=`3&?4G)fvbo1X?&X{%aA9g%rv-IXnAOB^*f0+@%QiMI3$Mdp@qu{^Mk&NsN_>b}C z*MtA)?{D;@z8v1mfcHj69Nu&K5PS~rv9Ak#9o}NOEx^`CmCj(o3@D;wF1!zWkTj6Xnoj&Nrx{) zmyzyxN_Ppq#xc}`?oGn0d=PL@cS(U$cS$(6=?UdIbqD+FQoiy0GWH_a3sAq{+4wMY zU6Sw6bV(?;@nO;g8|^o)ByWeCw}ZTuqyf`9XkrcMrt50n%aWmM8t2!EgEH;jt#&VQ z+l|j{yxVOTer~)^?Yen8$h(g;@zX&5myp-#=5>;{gf#e0d(VyKcE3m{u{U*-=$E=nzUKDZ zKey?o?z)?L*Y(EV)J>+}OX%0F+ezJ91(O5wTDPu+UedhN_-nsPd?b2glJD`q$Ma{p zbrdIUcphGHcs@*D(5G{HH_vzW%Wb-xwtz{ppz+}lxJ(C^>D@fPq+f31B@RsL-_X0f zKjo%TZkmg8Ft=#}Wn5ZooK8*ac;KLqPPc9+b*DJ(Yn-m~*A>ptx~Y76-H)-CjVyZj z;<`$nRaP|l2H$xXExzyNgNNpMh290P<_hEdDDT4YYPb9fUmM8G3hz9<>}%Qc>x!{Pr^%NRBJQUVTS&U(vKt&*Sg;0IUo^*0 z{Y-MMyAQJl93*{6nrAoQsGgR-y=vOLU!CFlXmpn(cP@T5K=g`ln0z*U8Cp}3r-UtzCZoSofOu_=0Z#^v}Rw{b-GP2Qf3*td$B_Gk$}4D4(_VB;~XI03W4% z7LL!lIPwNmS8G}M_?~k67R+ih3e56Pc|Pe7>lgWV_t1_d-v{~XXSDk9 z`K7Zdr+%is)JD>(XEqZ>TnZO`wW%{o@N$gisUOKDGhE*fZlCNMu+$vZVK_3g_R4|%@q zVxC|1`LdfCSHX9qkB2tXul%M;pX%eG8e;mgNf!?lgTMUS_+*T)wgo)o?{WOw*^#NV zAzZ#mxmI{ae(oB^M1Jm!M9t@shHoYcN6O8moaxWy8||n0ecK-2>}%_M;xPLm@@*$z zgL`~)w%ecRb_dT<;B@%L@n4H?ff{%^ZG`|nqUW~2LQQ}VTvyzVx;?{hYMzS-i; zv0bD4Lrw&_AIZSEAd-<;6ww}IHtTuGQP~VrSf4C~58=)6Ey$%iMQi-GA@rVnAox<- z6eB4*X+DLSPbHMeri^^>(wPY-k9>#^)7(EJ`4T$4D8hbg8tMC$-b8(+?+&!T@c*bg z_wcBytKpwBlfYbZzd$a)WI!}CTok!w6l4-mVGxuFS}wH&s5T*jCbn-y#3X=i!XPpj z#mid>qBR+bVg*gS5kxKCp{caizOMnTCSXNGGyyT+Z=ExT3?b0^z3=lq&-cf9&OZCH z*4k^YeK~9Gy|(n}m7HG{-pwYD@caVgM(S>JUU@PhYtev%lAnA|;^n*bdHp|1=3s^W zz|yxZJR`+k28_Psi`4ZMhv7?~^ODX!ID85$7;q8wH(*tS`>@cn0K1QN28wZ2VVoK( z^n`73VuzD+)M|RcfLobIBwprb6Y+!4sZ2s|R+Y05-WCn;LepS$UWWd{f*wr;kK!ym z6<0x@`h@b*2S>Bsm$s2H)Q~-Q?)(9lL66skjxJ13GW1A3;mO|*>a?lgcQS_B(&nF% z+hNpOWa8-Y&e2kLX=4NKQT$AuJb^xMv-j-#rztCAOqMe%EGJoKk&}S~ra-er*J+8;W2V;-ZP zk!^aSK$jV*mzy#|=S9$Iw5P0ZTswiuW7PSJC8xLRPGG9+br(H$7Hy}{rb2`8-&btQt=bq*HWTjZ`={H~{-uw7cS~c4bHXS$n131T3;zHhJedNMU z@4_C0?Ot2!$hf0$9?FV+c{;q7^;?_S6`J6>>T8w!7U`E^T}du7lFeBJ7tciUTCgL% z*{c>?k+patYd7IZCj3`eXWn~*_`cF@=F(54FW(gAeI@b2f4ju5R;r8XoASMs^a8@t zr>#69OIFUItwOs))|AjD{1e&~(hKP88PqA4y$Ujh$bJTeXFg{#Ey$KD9X&l| zWvyq(lCkfg=XR0*jK0|1%vzhs`8o${h!(~oqkp%s*VM{9J56hk^9Rr#?g{;^r55_@ zCfdNoxryEEuMm6BBxwh1c#dfaB}N;()Fy4<9Pn4xqg`!4-+|7AGumL_fL{cp4fHv7 zmevMY)X_~FxCac?+MvPjX3X$`-_Bekwk%nDSQt+Zn)%0qCgJb&T~0mEP4M1uk|(nm-K7eA&Yjf(N~3^-vFztEe1jlwtiREfJW+;%U%I5^vZq$ zg=aozRV`^w+5%nmS=yqfetji2Eu&vg3fs4&{LpZ}-pV{~^y~W5=JeD1^|{sw8xEg+ zov@CyuCbzPonVX;_>384xvhWFAojkBu5#hlQNbGNleZ+!a8xNJ6A2G@bos5to*J@7 z_^p+43%z55*}N?kHvH&_-&+$thAg4xM11o^DTxZ_-;IAd1Mh^Hinjl!FR%^2uB;u` z8~H@vW66yVDzi7;X2cm~3vA_n*>})AzMOd0d8guQ_>M*vns|#HSJtMir8M2?_hCPQ zof9p4Sto0LXB;i&5=+=;zA*@nY}wv3FpeYC6)ym|-tk zloROe996)0sLaZWQ*tgE9>d;V_958UaK>^mbn%|C5&7Yay=@KlL3@!qNA8bknaG|I zx&LCB7oR-(-R7;!l*YCp6YCCL;O+RrqUiUW-3vLk@04o}9=wepW)x-JW30W_9nY=5qgb-p}GU z`d0DHW%GJ!H>c-pyIl!xLe>s&ADj3{ny?6;l9W|$;|vt}#gDM~?c6cZk;A_l=a5nk ziEp$Dt?;TVK@HZ550GtsA&ovF11asvCb_Ds%BpWWNUZ0?%YE_unVG@8 z(7k^UKI4IXXSiFV`YSS>9p&{%J3HP~N;c7dr4222sWTimC?(l^Yx`6$swvIJzeX*4kMU=w zmCdECDwD{&lfH<&?}q-WK3OE`lz zbJ*0Ia(0}Ae!@3Jccty-Ou`kF=;;kG9d8mI7<__ik{KA}v~I90`$r&@fHzlwajdwCn%fN>BQyYQd)DZcz7^OOs$ zJSF(CDfQ4w&ZF0eZ_0qT{*xizlLpSLepyT8-@m38Y4jz|)VM!|ey_yaiML6a++KH-M{U$ZIe?G{j!Ej zT_bVr4=DAY+>0+Y;N$~m4nBxC(GHV$-|bgp*ZT8#XK&e3s?j`%dh+CcSywucIQ*zJ zUO^e=LiY!~%aaapCu?Lr_UTn`@1=~8^g+xM+zHgUpSCRWt@T&@^KSnF-gRqEhppCc zDSec?{->uIlG!8|cDoYtB)bxN={aFs{qdk_FFATb(?^tgwD#|G;RXm2Q973)% zIff5y$d?Z}F6KE%eGfi+P2(ZxKlFS!4>XFbFA`Y???G_2nY5 z=sy(C!Cs6@iN28F^Kq`v7klUWqT9hs-|(eR3As|y-B3o-7u~6NidK-uICloD-zXlN zxd&LQ$F0d_bp0#(qZt~Og8r($_7<$+n2e85W3O$@ zD6h+sr3UR;i9uVIGq^@oef6rzS8;tp{tC{=#f{5#1;<x9S$wkiZYF;sFs`A@@Yzcm zR7;+V^Qd;tSo^+I<`(iLQf4hMuAY6fR)({;_Mu9QFWwZdl@VN9 zDf0|vo{lIZcq7Y9Q({k*`FHWCmWi$(izkw{e-}?AZU3r1s%4T(cqe=i+63Q^;oXnJ zb@~58Tvz-*#C7HWI<8`5b!%7#Erdn>e8{Y7ne1}WUKZ7E=bWF`UZO+FMIKks59eDd zvdfgHL!W#i_b;g0*|v`Oe(miMccUaGcs;flug7HZdZMGeo&?3~iRtI{nA5zTUMXJB zRMw1)e_MzDkcVH!e!+V6ykI=xggE}izRzoZjps_9TX|+OAK%BbCKf+W%)36`hk0+c zCIpn2JJ+w``JCt0xI5QZ^K9pN-WtQYJTCB@)fQM(baTT~Jn?zn##^}m@8@b%;5M~) z;8ry`@GCVnuvAS61k~Kd5#WA2#=HIvo;;p~JkMAY1CLvy1HZTS3Os5x1vXpF0dzWn zCn&#-a>Gn{jqj-OZv5-G?ZA&K_cn@8o4dWajdGsgZO#{LY4SF5HZJt8H?J{nrMFT1 zP~Nm6mv?OFE%N+R$!!$BeOFQDmh!yDdVKX(|I4}jCF#^6q-SpIYskV=&R@-46vSBqh17$FAlz^jtVB2%%Ksc zoS+w7hf7uL3smdW7U!E8=7Dc!e2R}F)A_GHRIVtYJjEO`-!!{<5>IqNYW@{Gy{}2i z&*xdWbYa8mvu|j4Z0XM%&YyE*!*8ct5O{XV$iU}W+)Xmj+qh7Pc1KT&%U`ARa$mvw z^(lh`pD1Q`?@{*rpPQoG`Ml5fogaAI>ur>>Z}8U2^S%{6YUQ(g>*aZCWz(Ycvb+VJ zlsE7`UhZvtgGY%`lJa;yF96O0Z)4MxtiV9uz<|=^ZOr2lxI&Y_6FLT4@Uw<~M(%G! zN8nzenA|mr()g&CJ4^V7`X=JemyX_(+C_hKF2DWRz&H?Zt}9H$zC}9>xuah`|4w^s zye;auAbOr)?{P8t#mDY!j=3o50+oL01` z!Oder=8Dz4#?hqJlU7Yyh8hzX%5%W1beNE5k2$ZgPrs=A?WAR@v4LSc+;dG@N?=Vc z^7+%0{0n&!u1d%s!84Y;!ZRn&KFSUF@q;Hr{<9{bmh){5^GIqohlple~Jq+&al~vq?`| zlS{tbiH&#X4`GHsv;2c1~<`@klxPH#lM$ zAL2Q`V|Tk*AKj!{+>=!+a;S_JdjLLI-IK5nh@7|>3q^m(d2{y{=FebVmrYP9Vs)-3qOV4alcd;jv7{4HvX@7?7|`Fn=-%D-^Rg@IwS7qebtyqR(l z{!D29R3#}$`tT{k1FVJI_fTfUlo0{pVLi^3)`iQBFimP)LtZI&SiQV)=#-(9zXE<= zNxnEY_;})Z65Kx4e&BV>nf^7Z-FJvO?q~hCihWkOywS_;@zGE0%lqZWEl$h7LX8UE zbA3vFa9TqC+p~%r-kY_g;R;m^-c4Ep@e1+p&$_u`@2nEyqk|RWWAaxHv*hoYbxT9j ztODXp!D-aXM0^zSJ7>*r*fnb*@x6jo;JB-QRQ{S+CF#{!3mRUV^>gCQ!PjTq(D2Tz zn;LqNW+83L?4kyr+B>C><&n|2TQ736fbRX_TioB!TBB!iT-3K$}zB{|SjrK(c zh;Mx>@!U(|ka&~3j&GOR%WWmj@U42bI}bW1%DER4Ix92!xG3$+;BIuU;@929*y391 zL7(2GUme37%fInf%gcj}UC^yUty7 zb)tLy=ITk%J`r6cwi0xkGIkE--a7ndP%j%gID1|DycqK6{*2@v9ix72`~!(jasNVe zRI1zoX$o~rRD$BWGn@Yl2YwB0OC9+ZrRt%B6ZuEOM&c%3^ds{~n<{D0cj(HlG>Mb6 zLsJ!W2~`hS`Im7K*LHPR9!WnfE&m*8KRHL*+@90MfbZLRr>FIWmaX`Zs)WyuHuRwA zG)2ehE-(L+%eng@TY1aZ*!&M-U($4)J@VYG>pBhpB<=IAg1_OrYj){=N*w42EBc^w z?bYP@!cm@7gg!L{o>G5wX1!qeJKYcd=aYROb|_gC$#!Vd)fbZ0<(3ffcAacJ_T z_6^1q{VHdp8e1%IA;u<3~7hAHr<^u4;c4QR>q5I&~$j`-5Cc}o81R3@%(3Z0_RK0 z=Q3B{lhow-it=AlzLWBCkFL%5nDPP2e@yve$}-;DizypZR1EzI#TCaoXO3pfyq59w zSB#^n$2vy^`kc<6#lXA?m}S7+60X_=(qZDBtU0 z|5f7{bPw0{PZ;?h{VgLEbZG}hqVpZ31V5wi>UZAsapN!5eVo4e404Xo6PM_hMNijy zfypI!TCvYeK`$ovW@Jb6uiqEFwK@He3%#cFX&DDT$A6yWtxZx!eLK(V`ETqORp{j= z&}S$w=a~IN=rcb2e%t9Y*t`!`e1=ZEzmh!k`m~Fs{am!)Vr1js6pzTe$e)cemQAa*{_qy*4$|)dpO19LdV3ZBL}|=>zqBbq;nQf=**Eu6Jh4OU z`Sq$TDW=SnA8WRbw3nIl?^f+2ZDmpUi`AHs*0SjQX=?09OIfe{n7MuPhpBNRZ41-$ zW7YVPYtVIAiZf+yW?o55ioLaezRWEaH7O}(nIm83n0hri$yTN^zQwxNs3}PnEpIAs zS!?u3iY@C+8oqwizR+MLj9&;fjWYIdne-%kSuA0}k&$FAizh5)Gbx*x&l&_+c6Ur! zGWFtSiWcXrhW7#Zx5lP;?v7JDasBg}4{|@=@S-I- z8;2|WGhWlYHc9bJNB?RcpD^;O#n&}YyY`yq$>V*^zQL24<=?RQ@~fH+c-j2l2o5Rl z0?(QZ;0{ncl>-&e>hl%PTDRh<>Z5qJpradzOo**Wbk6G*$Q>uip^c9yId>ytW!xLr zX_hlk(RIg0${j1cxno5=HCGunv4P>amQ(qAu!-zZl&GL{a+GExJT^!PviGujBy;Gk zp`;*pFKu%usfSc-IFi?SM^vzdJPWWt-ZxaYKi+G)_Z;)9+;ev&^oR}Xa%_G2JxMb) zTBGn8X~k!xjeC^r%mXpZv$4#xajYZap}lLKl{wb=`?Y?VU!@+$n0IB4{qKv^YVF=h z(i~<>vGgx&w_BYt!-5OwJ2Jm-CXW|8rP$wknv*sSH0GqM!95)sra{Y9(4@~xZG)yP z?lebf>w<5AxBD`0j@&)g%Kl)nE!JRL6#L*<@X2=;-xks;(Ubp#xLVFkNqm9W9MO;e znm9{1u8Fy_65aR);tHWrD=%>ay`JZZ+s(R0;>zKF<@sLEUx>>N$F)$`N_64h5m(8$ zB>B9=^+iUQ|Mnoydo1%WKj~_mbTYlnyS`S=EZ-Zt@U`hBdTwx z`P8@E@t7WZ-}qjs1Wy=Y?f%Cu7;g}NAWlit;iQD*KMq@Q^@J;xBPW0%`@`g|r31&r zIMzt|vWeIOCt`n;u~TRx?SMHGc~R@0Ass&jo_78{V)^%wJH2E*_$2XZj45eyhKMq= zI(v_ia+3a7IBgd95*hH`6Fk@6=l^?Xwg32Ycl)=USLJV~?dD#)z4WC}sPy)ao-3WX z{;^WoQ!4PK&g&^-lz&BF_Er)do8wDbXg`TJc=rJQri?PO*F@-$x~-s0@&xub-*bjxrx07CYX9j605otPZ_7-Tk8&2r;5_o^7Z|`Sc$N~J2N#9>TpF1i| zeFIhWLGC8>99+p9_pI0R#l!mgciQ-b{3XoscQfAwco<)UKJ-$>!>(wq*T$VC+IS;l znv7>LiP$dsD;|3)b3!`sHUjT?;QiL?nRb0z7*@i_slZwj0msdFC1aQY$L3T#F~fi} z7&tEAYzEGYzzO_FI3s{F9yp_c;{nbSz}W$udwwLGT;NOt&gHTEP;$DWO8*_u<3u;~%(}kh?3Tuie@Q-6e2z zm@+==_XU5a?K60-_IQEZH~MnjF=+qhYTbXdHs1aww?Y2nzJYF=+;Kwvv)KzYVZTxm zTI9+R+?CKRG!INw=&ug()pU7Z-(2OF_YKWe*U5Wvv)n~cHAnSS{!I0_ zz{j3|#$(irvDG7C%W_M8A#*ol^U^}*?@DZjh0NjX^=K?)&X%{9hxwLqd1)bYaV7RX zc4vAR;~NW^ljW`DVQywjURucfT#3D|khveYjfEHMZ!OOl!o$gP3Gbhg=Q73f&DDAQ zYwptab9C;~=j#*9*T;X+o4Lor9Astwvl(-@cHeu}XUfg-_f-3@{^tv&GXL#4c)$Ov zw3kauGS~WL9&{?4m0k5%>8}5J(7$c7X^tj>UlH&%B%ZwX5E;eq|xYW4Ox|+Y@UaU2 zZ=Qao^vc&C@Jk(}E|IiLoecV=Zblt)sDp<(TtXc#r4A$BlQsaqezqbNIjep^>A`FM@jhtU_L-9)qB1F!i{LI!Ilj zh%@Tpq8`J;^>~bW8TA-U9-|(whwD*5okXsr9!glQRw7plGF2ew8`v{r&&qXKv-4bE z+H+lB^v!ei&Aiv2t7h9qsTNy>Np(9FyWOco+3{OzKYYVI$%pxGa;g{FQq*`YeH;Js zylM$s|l*p_Km9A)|nFAf94;Hw;j8*J>jx-$%*9sN*!Svr6g+UGZjlX z-E2oAnQSKCMam%CD3e)B$KGf^%)j|zwU>tfGu3Rr&~(1-mr5_&J=n-MVFMQ0zRI8I zI;{4#AD(ef^5^8eS-HTrQ|WC>1Rm+`3pF^&_QT`vNj^%x_mz>hBTBN(35=B{GgFjG zJyY$@?260g9c2i$_T4_CblgXQA-+tqL$ov(mS0^$P!Al{EXqlWWdR-=6>Ue+zw=ro4Dg`ZAYp^gpPk z+m{`G>74X^^wNVGeV2}V4Eo~FO5aDv%l!@T@5GJg=HH2}75?j>@5P5+IVXKD{%(ce zLmSO~vi6+xy_oeojlN~$wnLx!tn_7;Rr~*{Lf`LpoST1h%l-Ewdnf+6<(%}*4OY=t zpzrZ#dY~`eH`koz`!%vNH~Y8#xt%+@+O|}ey-W8z;?IF!bIg5xmwvgmi>9AEkbJ_F z?i0D2bKV+F)|LV5g=g!bC)^+M?oZC3ob-u@)!+IXp<|A@kMG5oc68Blfgx*Mv^+fd zcm9tyY=f3?KQL%H_Mra(cq6oYw5+a2S~6~`()c2@-2NA7yEAFYT)V;lAu=ztocPQ0 z=cMJMzux0tmi%~En}una+qugB0CKMJW#-F0((*#WZ!{SaT8{tjg&t{nv1X+|JMiK$*bV&cI5JYWsLj8 z*Bkx&kde7J)RywUVXvkws^?TESCjv7@;%7KC+)vn<@cDQd=Hdv50B%F%~#x)yxo*y zZ!n$b9^1RxpJq<+{d)b5(#PTV# zl<&mJ9i^`;(csIpKR!wDnd~nr=ea#E1pLd)Cf|cwx0cqcnUqVn*AKfpS;~J-o=4Sz z?mQ*h*Z$@`eu0~ItiE&x^Wq42H)2sRc?2>&&@|ZH_wWBndaQ55>pMyhP%e*p>9iy= z=f&EmDZ|{=3o5y1G#VU_%^|J++tT7V6cai%)e)16bVtn7udAwBimc6I;vA=iu+GK%& zP1yd9I@EoB=|lbyYo9k+->_fA_r@(Rms+z;t~Rleu>Nojt4elZtH@@(WY1DvZ<>@n z-R*4bhh~3*kN=t}{NGGQw{~US2}2*aOV^j`92U;ljG*)cjC(OIrI+SymiY@*DlXncl-%bU>G z{Ha}hor;a`^gF3{QwOnK%72#pTSe$gx5^%J#S$HKUK8D2U%IXbed$)`?r2S4I*>CB zQ!Y47UwQ_gbbF-yk9XZME$0EWvq12t)7)$Cp&<$QZ%#yKnuLBb8D6Hq(^PodhkwSt zoP~%IeSt>*G0~Z3m7mg?ekVFpv!lJUOs(r2q%__$M6LdoxQ|Uy=(Zf_RW;q|I_Fo> zVcltJXeN0?cPi;x+nVRKyTq;wZzgaSVE%Mz+n<2{I_S6-dS*gbKL59V{0Poid}zou z=uY+f&$q=XL3Fe0oCPC-2hi8azy3JjquU&nTC&2=|G!7@h<@%EIz-3a*yqrF>=r$% zi~CBy@0=;;c59#V=4fXzSVnNqJkBUZK5hA6_o+J@)?IlOJ-gMh&!yo<{VFoG0qYh$>Iou`;c__5#o1(1n*Q z-o{xRJ&)*j(n4ELed}em7~h-K;9re!7+xftM^B~ca310N-6qRaQ-w0a$Quw??2SFjS)4w|!U^Dre{rK; zD@k95PIfN*k}`8eCmY6bhmnR(R{ETr-#6esMp|SYlJWn3)Sxk3$D7D=E9dRzqqow^ z4N`;m6DQ~UBg<-OnvN_jG>`Pw&AA@RC0e*ct;}R8HXG$PE5YB8ehk~($JD!xb9cL0 z?^}wNt#31zOWohT9ewdf=!?HcUyMyD@GLsx4)D!EXZ#{M<2%q9_vIdxeDub0PRpo| z@RYt%EqpZMiuv}SKb~MtE%wGM$YP(<=uYRHkx?%xe-Zg3+r&r{y|Ih5>@v|CXBH2n zO}pcEMx=|*S*JmC>S>`2Ev+AAlZ!V~_LMBZPr;jX4!rpDt?p>=J+^z9=yQ~L1)cSC z@a@;|QrhY|c=Z$br|H?jDROy?a}@@kgm!7K9j4@B@uyjM=W`E>ulq&FV$q$?KWvHi z*6B=Z`Fxv6n^&Bs%*~%xTyf zQ$nN6YW2alb3KQS=6b$BMy=%Qu45N{{kin)g|B7>Y$`hGSaj0S=%lTpBM0XD!2B+k zF*G^U9p<^<+0g&9ucAx;e*#!f0;@Uqf2RNc3vfDt^Yo8|(_R1nEO0&ojydng(bHZ3 z-#ZVx25>(9k@R%e|9=8K_B_uQFaAh4*au=3!)I&+vDgm|?f}l`&=db7>s11r<-qwB zaFzlmnR@L4&Y>SkPX%!92F_i;sRGVn=;=$n{`e!|1cCEA;M@nC2Z56bJ&rt&qh9e` zg^ggb-1Cd=$cO!D2==0(*qk(b$tKFxVKaH0eMTqo9sB-zzcsn0bce}$sPd)GnOYx; zN(;)qiUa0UcMAJoe{N#ULS8xRCu@aS5%YEBu1;;fZhE0}rkuN$dAj9Q=4H$XOvpS5X z(3vo5;Dxq+s{;l$AAxaO4=@U!(P7loo(ZEIJ_wAaCxCIW4 yU`&2mhf)6fGhr0K z2Z2%ZI55WOFdBM*kzKFDD0u8l7+&}wFv>Rr;}RXlUweRIc~Xbree_HiS@1|;6l?+p z_ATK@dk-*LYjqe|HD|(5;E}-aZtR?yhMjsG{qp3pm+JnuK z_E52Tid|D|o?_<|dnYzf&GsoaPqBw$^Q>G#o;==T$a4knjBp;G9xj~D`%d!sd0$4J z>v#_c=b5923xCFYEqNC3zJfeA@*Wt@vq%pYF5&$Mc}jUtC(nQIK0lo2c0F8pXLlQH zG&+F9y6^858*DQFu_^q|rt&}C2OF&RKfP7-0b+wSbOK^;6`P(5+oW!1jE)D@w#zpgeo)#7xUV5C^x?I@* zOxU@yv6Btr?ZPG|c1Xh>X4t|86X(R{rQ299(rjT1v59FmR_tS9V|Dg>?39i5|4Z7# zcGdqcX%E{{E$r`bYxZn4ZX)vGMqUQ;zFZ4ik*DamJmkPFc6;I`YT?+pN!mL;?sDy& zNFEz`lH(?7?^M#|yD#bTPA5H%cP8ob?oYbB9dX^|rp@eay!;%pQy!L`EbOTGrwocM zb*gF!EYAH|!%Y?Q8Z4ZXaHvt-PZMijocq&;wbRxlzvsHj_s-?-_+%gTmMM29uZf-J zThsqt?YoHc1a0P6_kMgK#OLI?VlT>b*|R6QtXY#>=!U-Hj`Wc8t6r@4*hkBmi*L^{g+wjxp$3#-f?%)bbgd zWdG!hy3SqEnm#a!x#KLl%XK~JF2yH}@K^cD7Ify=Pt#o{qp#|&yOg}nt#(a!xgxII zpE2+>-Q^iHpG$Z7A#=CpyX9t;y7b_?1%FKFEH%1iZ`8NwEk#!;V=cbmLyarVdowKM zNwq5nw0Cw!*L4n{6*{?;GerP+Tn%7v z%glcq`yBE49tksx{XZ@7P2;l|=f$6V7Gq`${;9JVa|C|2QC{1pnHD;a?~T3G5`n#v z|C*!b6t@Ns_$J2Lex%=OQfIJ6)b^ORnlEu%ZpcV7VRtd|B@2wOZZicLVSjIgZZjn$ zy03kAs`=N6Zj<_7C%Vlr{3s(WQnx8QB7G=nQon3?x*Hy5!$YHf7I@iH9c!uMNKFUo z(CeY;KvP2Kt2NzNnJJ}sqUbR-9HyYqel{GUb2}Fv4Mv!ce*%8%u8C*rOEo-du!Ho6 zQB>o2lgMV`vCKAP@T^rzkNRh$DKeYX1h?|P5zUWR^oRvNjx>FoMb zY-S?!Kc~D=k9CAae=0i9tll~wBXy{!WDh;6=uky24H*>us>rD5SB*X=@@nW=MUDiI z^h1$RP0t#pVIp`1=R=Wt)^PtA7m=pvStIy4I-GW#_OA%@Iv+i%jNf{l^g0e9pM2li z-qr73?b5OCaZX0Zx=@YKv8IMTI*Ff7bfiP{@okTsF)yx&4SoS#E_6sf^s9?m!#stJ z=xOm|6^*SiRtc_UKl43BH|N}qe#&uDIkJa+)JM=?ZAEwWG|zkNeLjp1E8&W(nx_?& z!ICmX}YgHPSJf)H}=H4Q=$8D>UAS|YkSgt?bZL&l{dR|QZ0MJ z|MHa9TZbb$yV@RfUj~fqw{#da=;=gWB4K!`zvv{Int(ANtk;Op zeHkz;yLA}l=n*d;+jWQ06xLsQ(O=f~p!+glREG7KqOZyfKj+Z zuZ!sHy2B`k4+29Wt+of#5@d=MBd=qzh{(0v&&vR~9;h|aD%4B>;o zXhLUM+k@`QfMIFWVTjJII}G82z^FlAS$kPnMta~!>vK8`(b;u}A$$-R<@LJmYsb?$Yx-Zii`~5%FbYCX+`>X8tH|hKRGq6Jg z=WM#KA7-9S_ciHs-B-F--|xRuJ$=7FG!IPI_xqP=wCcLAosqgP{ElXk&(MASihO72 zzD#EHTIjxd$s665P2T9f;^ZCHeI-daLig3jj4sLSsTyGRJTAH~+Vhm|OY~Zz`|8DB zj+2GW+J1l0YsDFHvzj&imzI7_v!(;n!jqe|egA|t9oWy%Cy5SB^jo3>vxVV_4ovi0 zq65?7fG2t|Eet%-g=t~ni7t$=rVG0hc%lmvJ(uXh;==Gm7lzJD(}ihqz!QC#76zW^ z#I!K*L?=dA(}_J2)`^L}OLSsMVR)hwL-(cW#I!iziC#<#15b2gS{Qhu8zU^bvC3C~ zw}SW2fcG2TeZufo6CMt{wMHE9)*E5q-EV|}_mCbge1!LZ1MgAZOMv%#-Wg$dA;M#T z_mmL_ygwLW;5}!Af%k$QE_{Xe55Rks_np9dgZF?iytfEn2E6x-IN<%s2m^1Q5eDAh z^l;&S%RARoc!2j>;C;q>U>M#(!dC$AOCt_=UmIcIeQSh)cU%t_{&2c(@ti(OC))Og z@Jk>*Q)P|fijlR6wA1Mwj9W*d35 za`=vryo)P-+J2KdFPzVE1Yab?&1zqJ?45hxOTX%^cc#7bmQRfdY;nGO@0$M8-dYp= zuJ*l1RXPruV?+BzCsX6Cv)1O;*|+A^#WqZ=8^OP{=wwE$hzsrq-)H7H{?B9iKab)3 zlAW_awonJQq~rKCKqnItzm@1@LZ6#6uzAaWU!TzSOFp<4oy=-2>&DKWCClH zn80EsHZWa@3&htb)LEs@ChBaitLwp6gp)OY=S15%eY0+4OpCl*xi;(rV<$doo#j!Q ze~4Ce1D$64Iw-6;=cnhl61GO+4@1ousf8^P&eX#(67Hvm6C~VU52r|YfF4ehutN{` z^Lp^d=W(g{jpi)0z_Vn9@epr4Px69C!r2lAkA#Ox7(5cbNW$Qe@F)p`N5VN029Jct zfyW+!hd7HjjE8vZB*_aN2~UYa|RF3D1-;cqBYa!r+ncPr(x(frmIt zK^PD5)}KpW@JP5&!r+ncO%eu=gl~~BcqIHw34=$%w@Da0621dG$q{&nvy_MN5N};2 zdBG#$N(qBU!fPZ99tp3LFnA~C$!6V^X z34=$%TOVm4v|~ z;cp}i9tpQg7(5dGUe;&|KB>ZTP25`UtCD^y`h&>*i1IIK++QyHCSRCIpPZ=saXB_u z&NIf~pT-n4`lk5!KE(JYeq1b!y|Ul4m2vJMW1INCauf||7|MSlejD>+2oH))})ca;8}XnaTO->JrTFa0~+`0lNL_t(GI%#U?z@SWlB>%-rJ!{4*R z->&fYwc+n!;qMvY?-AkeY2okD;qNKoZ%_F9^6>Zg@ON(bJ2(72KKy-o_}dfyo)Z2Z z9sZsc{vHwjo)P{Y7XH3A{Ot;V&klbN4u4-C{&t4H=gW6GJ{`i}H^_HJ*LRV8XLfxr zmhXOD-zDuFHaeCDyq7&Vzc*yGz2uu8 z0pqiY*javSuIElxm-u7Yh8^MnZ}Cy`33?y#QBsBcqrctb3^t9I_rQ{!+Ivuk{%*w| z#d$$Jys)8WNw3(b>8p6}ne^C_!7j?+qsz&69?u^qZC;Y#o*!3aQaoQ}&!7IsNt>3u zG3n7IZ)DGp`;6~5vYl~RpDI`PayzFBE{i8F{|)Ry7u?r0Ue>QSCY6u-{afB?j|1y- z4aeh@e?lwY%RPVkMV~710^^N-7scH;Q2oAjkk?~Pjn20uM&%bRQarLg)#75!`4+1w zU;HG?+EnUbjTZkQN|N|6mbGasembNsuR`Bz)J5vC6MA=1hgHy<$0I(Ce-7OPd2+#9 zk?ou=G-pAxtQQNCR*Wl$UUd@pTtlZqnJn@uFOLo~b2?WTe7V@$$~y}`EZY9@ zACX63v`{v(Tx6U>F`Y_xl!K3b!H)C5$6I0DA?3~)4{XQx>Y4d_h_^_csLLsOx__@C zeW}`&e#STc{UhtMs@r!t$Qsj)zcp-0lbd}NDZwe?SGxbi=Is8_v(x)(+dAVk*mAH{UX|1Z0+2bWW^pUK8g;) z_X)A=eeXutY;i|sU`c7SKWr2sOiQ_ zu`Slvbsu?JI&z_<@7B{7Nji5#7h=bBR8ihYZ_(2ike+1=r{5is-ma(5k@DPgs+A8$ zq$eb4`2AAeNdH|#dbXbKlk)hM)XF~-k?z&g^Q63yj{fOX{buRumvm1z=)Xlzzc>>A zyAkj!^>nx3?-j=XK}33uo<2~@8|nMi5*g1m{x#_78B$)m`^*tKs`=4)RrBv4eJ}F6 zpxbwOx9^&6-%Z`VTe^LFyL~I@F(dJLyL}gQ`!4VHoz?AoI&%4o9%Qq-tVYTqGU%4Q zdDwi!=I)N92V01HNW^#&nbsW!Yx%Rn5Fb^>7LvSDLJ3wu*!JhS9@XSh07`sIF{Y}!KXv(`Jz`4dtTNGA8^xxH-#bJ5|4)#^w)_{^dAtj8B&TVRX0?4XPFz#?qwi)B5)JrX`_>+#D&55L1*S%UQi_hDl2P0ByH+@4vCKjB4Eo_^yyxuDrsACu|hY?d~+ z9!Uzimo93!3jdcIqw%S_+>$JPrjolBxLa&$A@a=n!!9)XxW^!LymJ)($k!F)3uw&| zXK?y*HUC}CyFT4f)-3j4hZkZh#f^Sq zX<>sEoY;1?K8inRUz^!FHQQNv3f~0Qi83x&qATYZG&uSG190TMmACLx)&bppE^Shk zZHAvH+0Rj9@-`OGAEqFKirPMwy-$0SIUl{9|HDz8o!ez!gXA-5Z^bQRt@s)edx+t0 zC{pfKc!0kTx$7>pD3iOqfUU(n&|90cZy-*_SXtjI@FsGc&4TvG?+F&=NI5*45TzWM z&-;(m=hr;K^RwZc)&tJT{{x(I|J$4}P8m04%`W4n)IrAXHuJ_gGJa>>7al(w;*;va zV{bO&rL1Lra+XupvLB`gr-s+E__q$-#CrBBe?nKQiFxn!BEB@Wr!vu1WwunPFHPOfB~Ecc^%qgQMFQ{=_kvJu~Z zHKnc}c4%v53+q)|g*j2?hI#z&9Apk~G@47YRhzAiurXh-2JCS0DAPD=%k%B|3ckEv zVK1}F*HYLfC=q=%lpY|`Jnk)SOxLU&>Qn+6+ zmonnNPs$CX+`TsLXF-N#@1KQtfPGHctV_hlUM1m5&KiW%8u`~g;~e!)_%HkK<;?lX zWmDKQsh=@_9-ODiqEhKs;qik)EB#68BXTYDy-eOL`d_j|X^fO}+WNHhozR?(FEhr% zKX*@y#AU0=b%~CV`_qbxk(n%Od1Ec*ob&M+s8nd>k-rfC{3Ym~X5w4jJz#`U4+t_`0-A9S^Srro^*n=d^mrIgQAB zFm&0_{nE*q@ZpqAQ@o8I_?4jS#T0*XH?s${1$$1XNojnOf7)*ukM7sjc&VXe&RxgQ zKFrafcG6k7T`KKx<21Ee#w2Mofny=>FFCgWEM!jiJ=Z=@WbZX?9*>mQlxuXE z6*yOezkss~jF;Gz9PW>OYIWHUs;f+cCGD-t?^grGp3Pl>XW4K14R^G4U@sD1c6U$@ zso&SE|9%~S^|$hq_<$e5ydJG3#L zcjTBmtiQC1OV$iS($(q|CHYV}{yLa< zJTlJInv~HzTP@(TfFqlFs=Vj(P6e09$|mse|GrJ?Df<{-%s>X&LtDFIUc0sEmpK!T zagQedNftFFps?>PlJ8PC!P7nNWJywm4K;bDfrq5sGprEFl=EGlf@wkW%OKWR?dRoc?H z52wY8YGdJ3F^=fA;qLr9;|ThG#P3yV8dVw!=3SYi8n>d4_gqFaY6#QWq(Eo%tlUXw_gvYyC zlUNi5x5nj3J@v=yuo(%zZl}*wnv{UdIf_*YJ^{@IKXh)>{RpS9w?YX$G!^+Xr37lh zYe`XZ4$$YNEvt!_`Pc9T+a1pYtxZxxDU8iE-*;}4c9*$A`tP?tQCS1(zFduVzErDA z@~OIHQ?$ zs57!Ox{17&!e8b%=-(CaPsW|4(E1HD%KfNW^o`x*sr`-8F84@F9PK$%>ahd4mp&g! zi!Rrv$BF$!dB(I?WUuY}&abrnF}arn-K??asY-E;v>P(vn6Dm@^CGe?k$R(7*s9Fx zAGD@;bJ`e-$WyY3bZ3gUV^^w@Vx-IW4!#XL%|URqQ4b$D)=+IYd=mc1-Scwq%29lIpIoT~ zo0vx~`=dw?9OGMjum@-a#dcHsF46)(G{I?*MJHpYd@w`wz{`rvhK&oqAt` z6@MU;iQh>+iLc`hr7Xgm7P8+cBERSgS$lb$f|LDyf2J)BUCpPgvHn4y`Go$%S*{Rt z>|sCjS4vB4(D_nqu>NzM&Iz9=)$eKLy8cm~h2~oa>i?jBa^UNlJ9ZuL>TMa1mV57| zZT7OSk9~ZWKni_aji4-6+FadJQsVtPAyd@pQk&+rhORT*f-!Im*`d zDsPnk#QDfdGW}V6-N=8n6<^1-;H(2Tz6d%r-e&a<&IC7j8DqQuYoECW(CU`Qxhm*< zSf{h2kbWd|)a}b_)Yb%kdr)x7{3&hmChMNvc}jaW{Y+%0Zl9^~EokE~RA1)v65?h^ z+}+gG_O2`7i_Y!vFq9aXfY4Ia#kRy*i(r|KvF82v)uguc4Xx$?X)ldBgo!6*>e9|;%MY<)Lop_ zqCTzgN5-!{$f!}5XEZ)^{a2m7COlpCySLqU1L#BHI*B~i{);+A=83G+m&{Y%e^Do^ zqO9Y8(Cu;#3);eUn?l|4shhM_ZMbeJ5p@$;rESmXXNuz3r}ynHTbTAQ&$#mXW1Sl+ zK9;d2+0FeKj8CWMIY6AGJUTfIu~*0(d^G-&y2tunTK9X$xVlsK zWOG*S9}k=AKX;Umr|q`N9;AcO*WArv5ez0fjOZc zd*|`5G~qman6a)h=9ou!DSIWa$~s2!Huc8$*(_y*YLWTZ8j>_8W(gxxf+?e2btP{ zOl>?}rVLqP-6{JwLn31$OPZV^JF0roSSxxftEHxNk;!VChux?Ed{W=O&?z$b1u|H_ zP~9u?_vA|MK_YGq^2f%c;1*=>$>+z{S&=Sb z;orNHb6+Qy#o~j(9Ne6IY2D+QaFpB%POl| zGui(Gtc5%xcdXy03V%L9e6?`kI5jF6G0z z>~9In9MRST%+~ny?@umM(F>Y5hi*py8O8rZG&J_o>d|TU?iHEiA4fZjJ|RZ#*yDf1 z>}X$rzenL6_JMWTciefe>&T@+pL1L=r>L-DPEl>cG1j`WhP;$ENzYP({aQ>x`5%|H zNQFhI=D*Ip`+W8ivBzft^MAJ4+fl{Cczvjab(@v{1(#Xr=)+!9BcIrh#YbU{yr08Y z54`yJDgK=pzm4(|Z-X~KiC#TNU@45(r@=B|lYCq)na@~jl#zTjoDn@EU!z*GiTKET z1wG|^5P044m0#2aPoWO7Cr!gkd?dW4OSCG z22GW$_vlMUYM?70d>_B6lpLGa+tq%^RJSws^ty6i?I|9gqLsZP`#D1`vfdf3RA&LZ zh4b#;6E^tN(zkrP)|Nw0FaNV}{r=fosn+;Qy>rh$9cR|y)5~c2gpb4<@C9}O^c(Fj zv^i?hOou``=UI~wNmYeHjq51OQH=4^@B zl^4qU4))VwS4MZN#qHvptm84IWM?M-ckNI6TYs%Iw*1=r^5Q>rZs;AZ^H$&qjS4;! zDi?S=DwX!&;%X9>x*LCpZPa*wX}-{Kp!yJ~skh6Kx>r;-6J$ z-2JM)C$Y0x!48O@+`&po6>qUEbS_jH<^D5tn{NL9OXiv%en|R9D}D9@_ISNXzngF$ zV}uoXxYXMyw4=XrKZ~8sqI~ry_Wm8JU>vxWyQifsEtlwR3GYKjd%i=v3w*(2;FLND ztYHS-5p>GfD0?qfARl$y%XO<=87*}=f!##bhMiwKO2kJ9{$yviE%Y`DZw|7qn37zT z(MEmR?)2V^4tCV!r9}->S?5}2b1lP8Wya*Ct_F+T|3&@3>$4%Fz4rqd!cXC$z-R(* z>#l-Ei>h4Qx+|-(@pkO>}~wz2xZi~3zQ>QA!}vk4DCL?)n* ze!ME9wRh#zvR&DS?q+VtInlXcGCBq5Z)}<1ZTu;GHu_T}-jVoR-qDM3ZVYyYF?##B zsJB~9eS04GOrzigT3oa|?mcG}e|JWetHv~aGxkn$-ey$xS7(ioe( zANAZXW1-d1tLif32yLRA^wF)z5^KM8wcY$tJ6Cc}%kB-z8tR*?uV8+^oOyl{^Zi8T zeYtljx30DKmEX%+BD>1Gx6EugM2DF9_3_jBGAKOmtOpy&`}ueEn};ZPx_*xK`C)|8!rm*K^&QX1~_Ew5gp!*f{%9kCG~{|?v~8*@oNdI za<5Hh)A(!N?c5-~c7;E`fF^C7A@d%5?T)7=s|%0Jy;0Kk)L{$n2Jx{yNGZ9B|Fu^7 zh4sTa@guI8T8Vyklg0O6ypw=%k{N-hb6*QeN^I*d$|ADDC^t|0MhGCol%dock&=l|+7N zZ~4zYW>H2O(Wr~Al-63d%Zt}uiWL^IY2pb6a4Fw z(8rzy{LWJ4h?I>12f7DzNzj&DKAwL^T}JvgjZb*LbAu)?_*U7^e75VI@He>Dyw$lO z4mb~?pD#d1-9Az2Xqo6e`CIbrr))z+-U{+wLLEfsBy@@`Quq>CpBnrTMaptyU3QTs z{3yVOy6rA}v#=Jn5`Gc9+WLxnM5TT)%rkB9{upih-6wflnl;$_-OBvRC^LihgWh~;8~8ti_YVAl2tAofW%YUYCED7_WZzLb~p2>#-*6oID z%lJ8m`tAnzPkBpyEHbY$URGkSw=S=>In=&~Mn1D4qwG~(&bA=e+2pq-@lQ|tWU0wh zYZlPwuU1Yz34hREYwOJp_TFf9rT^{l;N$d8<{thL7^i0Mw>8}t%kocZnXS1ul_|Np7CN8@%u?% z#=8r5`S=s?O>m3OVo-#KzZf2bwpjB%~- zPa7+!^Y(i?M;U7-H)ASu$zC^WCP$)jg!y}BDt)poyq=Qwm;W}Ag<51o)>f9Up7ytx z1`d#W4i$@Yzzphi2tOWW%!6CNC3DOD!aMqv@KWw(Fl4Y49$D0;KUpd=-xfQg<>b%q zb>+qev(N`f-pYu)TbZLj0B#%WvqF33ROzSacGD_xIm(uj7 zL(IYM3QM5y&9Q-Fv?qQR_qLT4{K5YEyaxWsX2{-G#==qAe+>Ey`#n)wSeE^;^<)`7 zVdvk=*>Afg)?zQhrX5-oVwqVf8L}ym@d5LFEc5$eXg>_y^*e{W z{ZIDZe!*DsC36}!P4{7BCn5ITj3dxe&RJOJqJoBvxvoP4nX}h0CJ(Z_o3TWRvkzaC z)zHEo(T`Q7+SyU1*xI`{t&_wMmgR@dV9erC8lGf4!JKp=rk0-`e$(4rtgiJ1_r zxp*ldXhquu)HWAAZLnTYk%XWo3|dBGt)y*&^pcFmqQa5%oE{RXo*=acq-xc+CV1(D zh&5glCFcFE=a~l{(qMbu_xJm}f6QlQ_I2&G*Is+=wbxo3SYv?Qzx95x_45}ph8Ktp zE@kJUhm^gz;TE;*tTAfA`Uh3jW6)tIW4xa+yosItYJST&n!of+l5xl$cb0qq)`QQ* zH2xa=WpiHnyHCYyj)#&x4p*g$zx;MZ`mf__jQ^pvU)wF`(}|2Bbk+CK?@aK|+jQ-r zc}va;W;={Q%`#Y4li}a&>+Q8oruey2nmhu~S z^VXmLhvjp3-+z^F_lwl;3qM#iVdDKok6rnTHFKDgIP^L1GtSG8Y+LiSss9oGBzS5D zwt0RPuYhO$GpxJknvzAoBxCR3oVDIDxsvx?V|p3uHe|T-=#v=Ve(umKMTXgT)h)Zl z|DcxN+xV2zrU^6ojAh?d<>bdy*795Cr`&DJ=WJ-^PpGWr_clJ|y!U*(K4&g7e_~}V zzXc|VyO6C-tjyv2YsiC#h<+FFc11&YdW8UWb7U=13jQu8X@Vs)k_oy)yZ^Sny_d-`P_TSyyS)aobzF-~88m~eg8la52`j*|oJ6mYW;1BT4 z^SaP)($*B_fw(i1BwhMRqFv#cdQ1}Uu_XDWZQ+gIrY)ed< z`X+2sj5l4ZHZDC5&#eYL8*S~x(~dPxsZZRSNis$~#vGr(kY&NZ*lxD!lJ2B#H!)Ua z%v^pP=YjJmkA3eT`(0tubK0a~M@nVnu4mj+pKMJ2^jGZb|E*QM5Z&{9(`9dzJ#T8E zw4-LUT-H+xZ@pFxy(fDN{fx;(2S%(>!KI>{l=+jC`K}szi!ugoDfj$RYky?F;{PAf zJNJO|*W{Z$Q+R`X_t1`vQ)do5+T_Ydp4TP`?mq(NPZ9#{7f`O3`SV_d9e?xYqSp59 zYkF!OZRk>r_f>Rk*@y{rynYSq+NvveEvxu%&ks0}uV`PJctfh3Ipn`Ku_9IAF@N8f zYSmBEOH-e)#ssWkV7JES*`rl$X8#d6NaXcTu*osYDYsqfd`V_m6|9Y=hyt6ztru%BFWuOK9Ic?byp#$DI7v0bm_+eC8V`kpA@f%rj zlJwh)U$?+q_Zw}Jz|+Q<;yybeJIh_%6!vp#>~FBATC$JGkInEjOUBp^%?y6Q`ev>E zk}-(fA$@dMc%<(Q^xgI4n=eI%hd*Qw5018nKTP>Ej+uM{13P05zC>1>y>#@qz4bBJ zYg{9?ZRS0Pp_5}AA~V8o2Spd)d7N`AY3AJk_F%{fk|uNL*~ytzXo@@d@^1$Q<^Lz} zK4~XFy1gBpv*vuxY!kqX=nCY&++T`a$@R1;{}aKN=*0HYc6n5{gsq36TM~Fgr{$9V z$m?~!M#@wG+d?goSeWjh#Q zoOMMvT%=9%X)%R;&e;7;`2Sy;{`;->Yudp}_%4{ef9nzFrme_X{o!q!wiamLcu7}U ze14}Z*tdQ2R&R|S?wh4ryT^Af7X6m26~$SBzKr`s>9ZS~Y<*(KN*8<KAj(bp{fqgdRZ5?IjN7XE32H$ ziKp7+2eN*Qb;bJXVNQ?NC;hVoe1Sw~9;G3xolJb$_uQVcX6o|Ap$_uC#vFr~pi;tVxk^itm01J6iXLWgyviK%~;wpmZ@lW0%!FO_+b`#s=KLw|iUZ>jC^!`F~QC$l#8oS~{(Sj*?1 z`yh6+9{*MJd;YoG*Os!fzLM`$(0-Uw&Oy0x*ZqxI`!mz%9&RB zYGoat&V0~USpIv#{ovoUr}nw01g|>x!4D(Ib3Xjn7M=Unhnv7p5E>%pVo>x0GM9PG zi{L8&f3{!}-XOeH#>==Htn@qs)@glm*Cp*+Z69%Z!^qctF8G=3_X0;+IiD4tug{E zMtQ!4E;cc~S7uI55qPS>lQ%lPG{$GOFZIeT9MV?{yM*!q;d zeCR@a>tAUcCSGkP`fE47oBQsX));a6!GqrCB~9ki#I-x<0oNVi#y+1GEYhBwr5Y9!{NL16fZuP`7IcrB{o2+N=afDt{Tl7E zmMpx}?p7UK+@k3QJ#>RtJN}{bzI~)wYvFcal{CS>1+%nS2R`ioTTZcF-*hDOIXUZc zU$ZcYvv9}9CuAnkU(31>8io%Z$BDFG4=(I$ts5MiYHqs73)Aj;N8rOebX=b~nvy?d zPZyqG@S`t=|NXC=rv#Q(#?fShVGR9?PGg@#f8ktB-!HO&-525W?}}}?>5E|T9xt#8 z&-sKl*~50Izg#p_|KjGN-sc`)Q}^E8*gmOteX3fklp6A6Vt>p&+KQag0`1g4(!S;h z`%2-u*2WU#ii)9Y59ypO>(KeQ`Id50#*^pIETDYGovn?Pl)2$4n8^3xJA+-w1MKbl%fd0O<=;X6MCP~KLQA=d zZ>zxy>WJN!l$CO>{*p|Oa#afdRm7;a*YO<7elnN-M%W1z&ScILpa-!LqMrvWJD%aa zVxxWvn;Ya3=z5Fp$~F8!WbfCpIdj#N4AjS{o!NJD|0wv(iKd~yy@E078Zwt#?Q~ZK z))w#`hd*TOje)husYWcfHh6m*?Mwy^k+WMV?_zwWZ;88)H;{u%7{e1aCA;5raj(K0 zzd4qB8or;%3bg)#`Y&q}_KRPF=+^VctDW41KB>@y6ANze@p1J1Wm8cQ-(lVb4P&os z_`V5$kh>rs@E#d%ThqtKpnu|J&Ka9C@E&tFm$5ZXR{gxk9GnXbP16{oe8Y!2z_-k& z=x2E6BrLWRP3SU(he(|sc*19-i_S@6aQ5*%z&HL12AoN>|0zBzf8ygBA85~Zs{V+x z4tb4xZNq2L6P}KpmfYF&v5tRVU0uZe@}Qh$Wt^wFFJ%r5`Oy`}84X^dmzVGELC5`< z7_C9tkhZ$zj)gwuj$PU+;a+tE@aA1Ty-_Cyo-0npm(SXloUO%F$T;f&q@ zTM^*DQ7$g9HZn9kKX3i?#;ywVsH_3$XRp1VW880yXFk{7ep`fCqHf~vh0%TUy%wEl zLCGSurURJ-AKCUzUkuOJHFr?TxOunX3-Bbi1b<9cp+(@}3*i1U@Qb{89PV6SU&Cq@ zdX&AV?r=$_8@Tt&Cqd$i;@ybl8}{C2*~f!Lhu z;5_eYoi|Kb8_fGJNwfT_<&I=1*AYS?z3kWv3#I2iA6YkP&F7fmH1&sh=-Y@`Ug6Z2>v6n)Z5SNv2Jq*wAX&6p9_8+wj4# z?DouZc_YfPKCquYxl9DPU2GA>4pD3#@8(lYy|=Ls72Df05~bhHmJc~MTm46D<*%cB zGv(j1*^^GA&bx-Kw%OOeVh1X=pB4@|%O99^V_>l48>ao9*v<+rMJDw!@B6fk*h@c) z4l-&l4ZrQh?$fZ{&eaaOu$v9TnnyUl#{yOA0;naseT!Qw4dJG^m<;j?De*?1KWp_ z#6QWEenn=v(iaMkG4|q`Zw>g?rpCL{r(_~4Y|-1thf2o6tF=vACEvd98veH%zBRz! z9`bNUxB*<-_cnZUw%l}hn&F$XGaf${%9gW73LkQ5=(ac`Smm!w&=QAp9qesmTR%Hj z4Sss%c_IO#(;-4b%JcJg1Li^3qHhaF5)A1p0S>hc0RW|8M zatEu~>zYYd-P1yg6SRgJN1*LW#*a;ZTQ_id#;7L)z~4xD;zYK+z=C^RTzM7<>rtxri8tX559V=xcbd*uR@^?8%#YJwd(o ztp{DL?=-qw|5l=i&7|56Pte-lPI9!p>v6V8UxrV`ICEXTH_Ji%*QifLEpm|fQ}iG| zY*z>EJ{4+gplus#xcT0!Hi^48+8WyitS8>n8e{BjUjxRGe$F48F#-&~&krBPrxJbJ zdN;ZR>O+72hoU}-_V_B@(4g8lRs1#md+Fblxkmbh{a5R;XgrQ#XZk^lZ&`FA^l^BR z)*$=9;kQ)iZRW>Gj0pV^?(o@HL^c+=<}LQY!}PbL+8wG9yAS5?rngnYXk+d)U``0r zU+9=E&c}xr%D7M1Y0Li8jl6IZdZPShbdzJ$JcWGHF;*?8zW`sQu>%Wpqv=9_igasD z$a*uzua5~iGNEIqTJXNeCW4b6%njwhd;jYr?h=R-0-a*tr2KQG95QY?dBE@Y@js3K zQda)YmV9jErwE|-SP*rM)6T(&EcciJvAirV3fnh%t2l?{*>sXj5WjJ_@Lo%obgVb zd2T^gVck6XF8XF^qiZ%akQhEn9pP2q>KT1rgcjvIG;~KU>&29_?qg0yfp>?*U9rK- zTEzF(@~J!l?+!f*Z!vu*MB!z82V`9MVhU{5pi;)fyg3NGNIy3r4|VB8-i_<#K2@WcOVosMXopwy|A_c;?EeJL$IbUHZu|02CLJ{2duJPTa6srl{HH8BXukKtHr@(t=#aUI(m`dE4&LY<9mldh zKfZhgV-()G93K$#ojz#=eKK3r)$`Et-=<8gK z#KIr+>bwzwAGSBvjJ;%}C#b>u{xCW{)j&@MPjQ+&Zdj(uj0;F z&eyHP6>uZVsHv+`V~F=Kr@YdX<2X0hXxIKs!=`OjMaHoV&hR3)bQ9}~eefUX&Rl0J zf$=J6@0-9s&fNcjADn!k7Ja>4_Oa34*0gXYdJ0+FIcX-|RL8j?jW|*=hvVQ^rP#g+ z{}VpgPi&eVCw!6n&0c4q{V?wu_Bb7F-Fu&1T%lFRb3S$Zs*?Ejoa57`o+JF(+WQW3 zXW6~?p2gyiz&dExCW`-Sf`1=v3QtOB-JaUXSzSl%8D;AzYcC^nVeshN7KYNNPYKnaH!Gzb&g;9*V=buXLw z_BKZLvzmUE5W{6;+ZD9U9EGL*fTl)m|9aZD%5<=%MYd3Z=^+iju;RZUlM^Ga5}69T zeUMh4JLoPWHU;ysiZ$yg^JYq#9X634?EkGZRXS_sl@E|H@AM>C=W;uLk$Djr+AaJ9 zm=CkgM20r_6SOOGvk!jL3$O9PYkce6jlJ+1AH1d)UgHy91OGD8;WaP+i@fGr%<*up z$;XdfKGERi+hP{AZcjKLAI=N#;ha8T$@|utlC`q!T6HjuF$piS&H=KfesLXiA#yi- zqc?gUc$_`KIH%WgPAAPe4?Om(EY9hRNBCB!Mt!TlW%I4RChA)~-SDkG-uG*_9+G!v z-z+m_ZRAN@02IF1-yU!cAMcbI>;C@Yv|f|p&=+DSh(2Fp19HCJd4zqXyz!ow z_I>)B5AzSa@nQM3kq`6NKmJqA!$_avXE?u@`11It(}soTQmy)d0{lNL+*;{n(O4_FqvYj9^A?Yi zSH@j|hOT45D0$tS-$d7umu{AsV=IHNZMbZ*nSO>X-LC(#e8#QyW33Zwu6v!;4~UE_ zYeVMyv02(dk#Qf-HD%n0vpT-A2AK+d7G>if|8-Vtnd9U*1n89Vf)Z>nEVZ`b3+x^L<{3&A}bb9|G z_Q5#H-@RM-9Px0lH6CrgYoJTJ-fS^*{f=|2-_k3!%rfzMVDN+2PWZCt zw9=j-U;XKP+4oJoas+>M!GonUtQ-YC8HdEL=whD|9IYOujO0r@g3Cy>+&4!lr=#b$ z_jmbdWsq^DObNb;OTJdOwIv$oVd*Gk<%}zQ1iiPvW2VUYI`5vHh&NGwnvAuxWdXEi z##CCd+s@a8?((=3u;xBvls+V1;IYcg9Hq=*`j;|g(YB_IQl>82j)m)qqm{uHNZPUV zyW?$TIGZ8U$0IK%Ak%aHtQ{jV{T9(hAd~v#t~E{K3^DIHe9L}aHp?8F$oOuTT9Azo zS0}a&Pjg4qJj-!#?tiO>Cnp=R6%VND>F6WGF4*Ij7*%DmuW55aYHV(sBOPC;d}8#$ z>$}H0+YZmdF4UFNCh~1>Av#0e2X|^q&>L1~F&Ay<+tEKryq$GVq0?;pGc+o+8esk< zZryhJ?La4$j~>;pEgzV_&3UjyOY94v{|_vxZkL!>Ig}Ng7sSaqC$TSw^TYzm?r|g< zwj#)&2X5LhU1Z{LZ@Sj7i#Hf0uZD7Rw_xuNSO)K*Ptn&IXJy9P;n5l%WbA^6kDf=y z;BNe}0fVP)87WJ2!qYzHjC}0M<;W97$Ir=QSxZ)Xo^;|+vepIXRtyL`t;rgRK%+9I z5dB!|d?q?RYds1b)kWv5&Q@k|#fd%A8)vprZOganOCNdB{M$y!*IDnV*xOE= zweRLkXOB;`E#`b=>l5)&M#=^je0|8tvOXTKHRBV-AR|b8q9fy~<4;3Is6!7XF!%ER ziC+_=XnR(x+c|JvKKp)&6{Co?Liq>W$OSnvM)rNbEso43i-6CrPII2 zi1zqNLL(xNL=sI}`~$Ydr^fY(*gbx5ZP9rPdyKSW(c^Dv!&*;~D2)l7Kl9meQkU)j z)BOL3^H7lU(1V=dItu_pLdrV;`43 zthN6Q+nlu|n|ab-0dr*4aZtYb<;J44Kkr$SKDV~0YZCi{;}>hBT)BQEymbsF<>%V`B z?;PvX#W_=S6FPc_o>{^l@jKhDS}*h&vgJSJgSobO&ISK6&w21wkyix|^8b(YW#QL; z9;l<7l==OqVvBK-Ueu6nWG}VGq623)@I{7ItK8|Gns1!o(bH|P>G-}__}gKF$HK9f ze1WBPl)Sf`PD944zLDp9S{@ zTV1P6*N8G}M=2ADmJ!&iM#=Zao>+tBw)_aPqv)x`u0(WDuXde~Me?Gu`L}H4t?`sa z$8rT@kv7COMRWz0E*>}%E%?{l><->?)^4qfo*`u1U$?cbN9ietHoei`7xFug-%?h_ zc0T{hqInDXT^Id5pWiLEa$n=W)qe}}qQx`(w*IXAE?fT0QSu|?OaE3I(?%)N6@?>y zl)OkZFJ_cH9huS^o70y6gzH4?TZvPPL2Tkbv+2%B=UD#8T2Fk2Q;%uc>)Bm~&1d<_vfVGz?|t<9rX#M> zPk&x*6SilGeGcrLD_2%Z{4V1A$5sBAw_wt-A3p{!`rJ9yzsZ4JXHG`nKJ1zC%ZMp2 zD9^+(!`!)x`R7$VkNF0kIw=2@6epT+?ibH!58-Gr|Xkj_Ll7a$k6ozY}Fx&wQTY=%WC=6a;a2dEM-(3M50)xzj zz>&}Q5n#y$mTABexy!L!%1b#t3im$nS#RLeRVn53wfbZ?-+C0UVqag9t-3nz zndrc$EmMaUifV@9}EV)DcQ*`z=pWd;ufX~Km zEfhf?!XDQ?<&QbO?gSO8Wj#6QL+)LUoT+?+pBnL(cVa8-d%@kYa+3pH8f{4#!L>dX zA8=?q2fe1W#~yC9(MLV>ggXW1R_fI|TtR^y`)Nb2y_m7)aV8P{YdPoTNK^-7>0Wxk zx13Kcza!&4e3V z{ap{R$XQ?TgKlsjnK?;g|L*~h0>^ha^Oh3lSNiSsgo=8zpA>nZ65E4f8DkGN!vc%W z++?GF>4A2zB~GkEza;VG>m+u#*n$6KZs;$6Lk|cY$a@v@vHK1T?}=YZ%r>D@X(yMt z9gct>c%$G4-v5B$Xk<(RZ4z;U#!H+ad0*hvZ$=pXri}~jpA>E2L(!*<*(ImB) z7LQZ)J&KM5z7v*yJP(;ZOD<0!(>ss&FWZQf0bW>36DumqGJP(0@O5BW`L|i2cE*~f zd5pNnhr#_6`iF0X#l{sUv|ZxEludi$A8*vzv)HhI{ha$NA9*rIf27_ZaD2}1^GQc% zpU9os4zX!Subvmea5b8r7~aZUwm7Em&&I}gCU-I&j_E;ppX4z1xP9kp4O6iJ_BfOJ z7CWa0w-fVaB5n0>zLvIrMmzAQuxoN4Q`%2|Z$?P?L9(jbijQ&l&1gT#yelGSWxFoD1r^ft_3G_dU{Wy2DI~p7_SQm-mMje|aYyf{uTH+YBtILrTRQ0^u z#Jv5K{@GV{>c7fmjj3=FJ`bu|XlqK)hyCKS(Ki9A&(K;~CxiEa_iA)e=34=i!}a`^ zy2z?KeCabn{-Kht5>4;Zpn>^A?ygP5pFacqHaG%<#k>_zjemjIaFl`nQu3rNd6VEe z{&SDIAceBB9ut3&JGfJ+UBYA2qV;zYYuu{;&1n6)(};0Fd`{?3bF@V#F0lC|Xho$!8%oqU-8EzTJOUpSKYf5!hmQ=T~o zj^qJdCGD4btxb2fzC(Lg;*-$gyk?-@=?cEDe>n|ABFKbf<=JO7L__}V(6wI_>$E0{Sx0u;8x6m@Pe)L;RVFgZ>ha%U}~mn$DUz_%n`w(fTd($ z$&UG<=^OF;rM($L*r~#w|6}jU-NNht$VX^zWr8!*P~i)GllC5k2D;(Zg2O*E_aZyy zMAI^c*-OW$P&0Sn9oSF&#(DKX7C4Z8r2R_lgIb&`2jIIQ$&-Fc$+vz>e^x(dlfLL& z74ou&No;(5wp!K$U3O z63T8FPz~+e$BPUgWmu#0Jx7thk$V=g_edK4UJJw@>MQ#$o)eN-;ev~;^c~=S!h&Dc zmYy>s#M^T_24<@Ek55yCWx}r${!z}F>xJjK4W8=iTg6zCqCC~YP4_*4R(Z##{TT8J z^C7S%FvpRZoPmhF({1qAH*G2fm5f9N1N6+)u{)!L!%|*RsVlDRQsQZpJvE9*lmeE+L8x5X;<3W!#esZSx4IV z0NTSTJ=DRs?B#EBHz2laa(}W5yZ@i_`)R%Xaf@6kdu>KEtpge{c-=JK2P(fQGkcY@N@7K=dH2NmApx1ul%LJ_(IVyav7I;ecIhSEqUnRbgKJeg8?@I1XBOWC2_tQv|JDDAfUu0EeS;Lmp z!yZ1e-O>6Q?HvjiZw<|d=QJdb3$>sp6`5%={NYEA>aADOzq}tO^jXHcRWBef^+9{Y z*a?$3U*1K%k65OA<`T1j@o&~r&e?powyfz!)*=5ZX7C;jdwc=u1(Yq(yp^lKiKL6H zfdXZ|7vJG7-e8kHq+R)sJW-j33@>9^3g6%@(qLI?AT#pC@Pe{&fkmyvg^?JKJ{NJW zG)GlrL;cp9IRBLa-z(6U$mVW=ng3nUIX=hMj;!G_N8)}34Ax%%nek?m`A(o|@8Tw| zXAE%$Vu>>l#~JQ4&T!a|);dD)1>=nJk#h`w7ZP7UGtVgO+12=r^+Ov9-JXo8A3Dab zc#FO`=aB2zHx3m*AB8zS?ke=)b;zoB@Mg~*c;YLp<$dfQ^?ZMmy5HoRHFrSAzucF8 zb|`YTqqXh_smlwH2_l8OQRAqNFRZy{Airk%z+LP0lrHK{`*Qfe0^S=EndkajQ3LFn_TeNq2Xjesi=;5>RtIJU6`%{cPOUgbu23ZQ<81QApID6|sksq+J7$11R zf9KTY8Fx-x?k@Lb`nAAIG3EYD|DF2s3~l{OzOjLq+~paW8hJ)}>Ja-OzZ40pxw7>&#e);Cq;vKSN($jvrh-?WAkE z-^;kgpMER4#&X(SMO!}J@ynmB=F6V_Hup(g(3@|%N+>^l;JjZl-@a*{`0Po}s?tr^ z+|lo~eCxc&U&kO)cKG)Uf}Si4V*V$;Nl!mx!>OX2jC&@_WVep$UIu5m-Q^aj}YU^ ztMo#bOOMHSVUGnJfd57MSkH2T%opa(?n?IY2Ci?miZ|v!jj76om!w}^c3aLgwP47p z5>ICiewU&a82DX}4v6~Vi>b#27G+F_Hkrdh@La|mdTIjmT+ns}pQY%Mdf#j^@OHXe zwU^Sa=1E!ZWDT; zPk#3jqeo~@p8>7R)~YV0O^;8jYF^H_<}|-u&ZOg+Z6mv1-Y3bOvzms6jp8H*2I_dCUZOjvzPjwK}S{H8rJO)FXJ*Y*Q#UhmU;f7RkeJd^3~2|0=v%p*kSIMu=`6XFWpd7L3{?^ zq`a7d`U^w(SC!wk5;%1HXuZ%3GN)hDCY~m7Z^zh45EwK;?vH=ruF=w{-57}dJ|(+vNyv=x9wF2 zAAL(5+`e5M{JB#*82;7bRyoJqL|wTTE{ETR=_^@t?=K;SMP0ecOXZv*=Lb2@h+XBP z@sDi%_p`MIi*L&M>W{9scaj3Y7-*L@C^Dq*O&@0>S#x8!)3JD~%z?EAe~GLl^Ikq- zVBz_szYopHTwnVglZJnfPkBA_y@_?;oj&zk{G%%~_+2oWH^|2Zle6bJtCpf8mvmVh z>|@2&S`qq?zIZds@N*9GZO{%fj?DF{YoQn9fc3;h(D^Ova{Fpk90MI!j&&?sKd4nj zRsjd=GnR5qlq=)^rQp-&i%Y3k7nc(Jsr-&f@oBL!M{fOTJhu7%bj=;(hbM^t{ZT$D zZp>*peEQHIII%l@k2DA0+h^#P-H7k~6Ix*HukqKudxC!1X72cZNBqqv#s=25;s5}9{?vvFhdSH`mRF`}&yvms0E6p|MWj%SSv??84kRLuJ zZ+Sn8UZ1$5JL>V>tqjG4^tr8#+Eo>|b;ha%kHII5`YTwI8`ZMxOEqIIb^Z&_)9)re z(#={`EB6!Iqh+1NW}d-M)&Vng;ftLfpIzgu>M0UF<6P!u-AmsRcUyE+9{7nO5BL-E zX4J*x2wl41F=vqnPpetR`T!@wyKWSI!Z<8gTE0O&#v*W(GZyr^iT*K+jd7M;4zHT5 z8FFTJKJ;~&D$Wo-RgNsmn<^Q1^ZR<}QGU$AL>BI7gU zAq|-ko~BQ#NzTUSI`={o#tNs5QD1%uPS72!#Fb>yebN}1D$0Gwh{tE z-UVt`5Ae%)W|DqEG`;S8wd;*&`m>~89Zm0Aq;|bby3ED>d<)!%zoQytELMNQ&qZDm z8A^D%C0B_)%BQ957rnOdbc>&-Ogq8P&jCi)drk2|Pa<gY@PTY% zsi4a(&Tep2p))Q<_6^mb1F~RmnXh&|4s6HU8B$eCkYi=M9^#&F=dA^S^#!9Xcclv+ zCi7a3Z6~_aMUrQwxzNXrOiLqHll0xmcRu&yBlGKlyY@a(Q%aW+bWmC9j%mmh;lNfPTl~u#-yL6cHL?p6S&Fat~K&%Q@qGpR=KV@YM1a6 zS+hs3t}W_ot0_7P9PjZhGC&DDwi&n!v{?AJR=KHWQ?d{F<`V8{h`(&CL1=Qj z&!kB=^jM%xD?(RMlmU*>{S5lSudJyQ=*dsoCepm5$=pDvq8}c7k{FSr;dLMQ5I=37 z4d*_kEptcP895LC>U`#N?w7g-3^Pm^t_Hpz0>@(bv&4PRi}GqY+h&utl>FOCyN5Ez z=Z`%@=I?uyNuO!*HIY;HAvXw~T-4b^-dD|yn*4upZirj3ce*(@H$dx`GHz=Q=0xYk z`o9S~QfscPw7JnTGOr`&(<&o#Xs%*4sK)b7!5yfU!af+P5KpC z;r}H4zRg<&BWbucK@|s~Ng3-UW7VXRby}4hyPr68MPcM~7jj9#*h^w=fcA(jJ6Iv( z93NQd9n>ZTnBxqQ|2TU@I8$US(3a&>cQZ0$T-S!8{B^DreL-wWNez1)`=Z2&bX7lO zoU6Qdx|e&}&?Egf@xw^-D_3C}vR^Oq46>mSk4a=fA99VzfJaDMjU1Svxq~+#2Ua5Y z)glKLppV#y>~~#M<`a2OWWCzyvo0&?cl3)qw+g+Q$aFemC`6`1-st!BJ3=D+`3%`D z2HDM3Df{!Y@TsII9p^*Gq6;ot@la9Tid}2G>slK1q3;~x-XTU`rzXkz7Jj5NmqPb~ zUmyR4r{ysJ736Osep2=^XH|srafTXODR%FBMMvg}iK);Fg6w093XsPtIUgTYF@?d| z`;!aOUra9Gd|ZG$QK*^cWB=8a@FI8RHsqBr&`Ss%wQ%=02ly(G0ZQQgdDvZSMt|tW zHei3O9uvD3+-qYik7EBSeI9kj`ft=P6*+oQ8-0FntS9~hHKFhiv;QZ#E&V`p8-2EE zr}>|V_Br?Je>3=BH+_~2Ijahw4<2Hfw0W|clnd^jXWRly5A6#)tO@RxPV6a~Meq1v z3BS9U*t(yCW8rNr+*$NyP?q0v#t?qdyU>B5Yx)PDxpc<#@#UAJ zySoe>-lgdBEai++|kaOS%e3Y%p7J;Qu_ z*ohQ87mKc3^#}0RGxWH3PE7Dr=Tx~Ct9aHx@eyPbb#ujsArAwO8#<7@CEBeuq?}mHmC%~F;p&ND) zgDM^PvHvrCPIPpR7EW~RGQJ8Yt|(62QJnOFlM*e)z={4^`6+P%Pc?9|-iDJojCu3X zaiROb$^Gcj+FYjvo56|f<0ElW@)bC7M{!c}6*villNj`}#6epCO&f2i3+@FcZgAoT zCmQfCH*D@szE@!V?MxdZcQgs%Z(!dhbSpYmIsccUQ@7tQ3_HcI9UqwQB|w{r(CB1n zbqadsB*PzuHL=4Mo5L^o7CRU(FrY7;FK_C3iIM90E$?{1U+dA0FJS#-^S_k;`MiTx z?+z?lL0a!bHDA)3(dX2k7FcvMY5CDKC28@2MGukII>9VgDt2;H0*iK$=8C3C4B7f* z-c}^7oc^V~G)YShEc%4BeQ{=5HTzQiOk$qO`97M~jf_@*2Jn$qA8VEiNSYt`NW(wa zgb$gl{w&}lEkBy3B<&pFBaQQ@QBK~rsy`3-NOMKg%d1^J$83eE+Ap=FdUkQAqp-(o_(n$M+|8jq9tj|BkhJN$c;fJfC zBb|8`8;!0Hh8GYEZi}8D8>(j zhgiWU=%nX;?oHc)7yeLRadYNAa4BV_Tpi`gz{NKHr|<9>wk4l_hOZOjHqr)SjkJE! z(nwp&y~ue_=VG3wc6Kfi8!hbB$P1A73U}hS5PRwt%Dl2#>{O~} zS3mwN#2@eiQ`ZzVc;3^_!8eI9Q0flcKE?T?t);ZBN@s*_=k8PXnUAmqk}(TDYQP7u zZh2+(Ss_`&5`)p$W4N!j-+}f(Uow`DFTawtdj)HF1#21Ks_#X8tB6%0{MHkvRdr%l zB=18Q^R#7ND8M|G4jQ&Zi{##29~_?F<*6?E4{UIbVQ-MEP2Mj);$7Thv1e4hI_^hX zBXXz3TNI|9S5mkXdCsNcxmE^f79% z+vsC*xY~wynJbwm?&p(ws3T>*|1H&UAOFi~k2xSN(%A-%bk14AC-odH1g^Kpy_bwd z?yw`qx2uoz4}6_7LV3jeH}hP5zD{-0f642md>Y?^d()2C{wA*`cX$SG6}ua`+r_rV z#DkN5|2Uf$zKHSz-FRa<%JL;~=$J&qYhUMYENtvuo*ltN>B9Mg@wYVy`t zc}Z$ec-bCDijmh$UZs_Xu0^9wvH2AkWsa`1@)!?mxcFt_-IcQb4A_BBWEJkX6Vqr% z%74=;PhJ3i_OhC|khNo$2WBaMI^_e=@|RfU$?M>(|FW98uoQa)qkYC9<)>1Txm9%bP?5=%waZhdM;={Eai&t$>ZL8r+e}eWCD0kBmY;##_4%U{~$i4az ze3V#YH$nS1%{=?0yaVI~;1A?|aMPnRLYh)r($zfpXhL`hzu=pQV`FdYca-mdPZ;Gl zq@I-bTk^U;C8inu{xn0wkCZ!-&xd!(J2o5nZ;#vdH1vKG^ls8nLiiANKtBM6hG%Dl zz{y~mfxCq8i=W~Zoczbur8@e*Q~t+uHyQo^TmH-0p!!jU)gE#CSNJf+gUwCx$1LdZLY zJKg--Gvy9W@Vqr%wWsUES)yFY1mtz@>Ns~4Cv#Rb^1z={+N5pyZ}8aX=Y^gvxIME( zB~;FZ$2|hRo`Dv*qucTs^f>j6(~Nh}r>&dTxHZ1I{Sk2aY=v3A4c;cQ$6Me8ntr(C zTvLB4^o4z)zv9c`i7}Ps1`Y&IGJbS(vUm0mvTwY{I>^TESN0G|lf7pVd&#>-y52X) zz3f}oZ}PgW-wT_P|6={Vw<-B`>o+#=uUfy)Y)bwkzo%)_f-mvC$f@r?z;}c_#~0JJ zbDbDcvbhBMO+OCK5TRH8Y$=rJGMy5ipL4GLqY(oa84|z+rll4``9whw$ z_u#485E_&<_8jwB#ylGH!aPVF^F4)x@I-6v{eqa=^mD4Vgx0Kah@25YF0spW+n`NZ ze};dyDs>J+lbF<;oURU#Jkw=A~P6!(lm6@ z@M3trCnU0ljK#1iBOdCWWOzlYD&9GX{o_mC{F^<2SOXnds|;KI#PBftZ#O>P?|iJV z!B5=(=C^sPmGOx^u<$`shMPi+aBRH2BJ-Y?*)oN>Ko)FpJhWBjQuz0E*bEwTi0-qS zJ=$vjf2?*_tA5d4jMi?Ij0fF z0vktHbrxr?1B;x~#5Q$sH*0h158WYGRL3;gS&w`=IegP6#(S6HAH)kC$eZbt*{i06 ze?-5&S%FqReLm!v61*LHgJ<_i{j0GHkTo{NXk$wFH&ayRD%z2=fV64G^-T)jO&eZp z=X~_P3qJ!XXSH!hQsYT&EE}c%%~GFxvVWz1&M5V-m-^Uj{44e6jZ%Mw)W;@iwEAmD zz@I)!{fnqCYk5z_LgGZLhU^qob%=Ap40uc;zDnTAkiGAY&Z)?)<{VB9>6b1Ds&wpq zDGPr+paP!ItF$39tf5yn=GrR9y%6Uu^lio&Ny>{pQRg{|V?5-ro(*1e zc>%mCAD)%R`Sjw}FRWO~d(BwNTZjzL}-4Q^9D zBi5O7jKP2Z3jaBk3?%!S(B-NLYNxD$WO%69Ws2{m*k;N*K~t}u9$I+4hY@%7jRRMTEQZDmC5W%AJf$UTp=mgOEJ=;bqqUO(O-^%R8BmleW=2 zEoGCWN9%|lSzxgGV{bItx7w1k&{D=AdU@&BN>`+R&bXxAN^MNcrqt?o?tvy{V?(6& z1#X{(T@f+X&dbZ;-Y7@St4mewo54>VXGkOd7wy%tm%Gx_hC)mFT+2GcM#Y|M8@~1DSaDg!{oyW zVUL{W;OqZ*X87v%j#6*5_F_)ap24>kEES#q0pFAH(R++K5`WRX6X851L-hAFt&X8)CN~|1w+BmKx_Qdq3>8>|@yQel(mUzWio905k0`q)99%FLxak^uyhKs2X`( z_T)l*lrp$)@^FS!lbT)^*>J*VD)BTvf&=^cQQ#9fUFJ`G`z}XUCpeM$dzSfI4UE=) z{5O10px4At=|^~yl`i{qH+fCBpNB8C=r2w_FaEPW&-sczU-*hX_kKm6tH;V7VZOg( z_ibkGjB|(JEQVN?*4ZPEc#0SvT)crmdoOCG{YTHek2eov``&kA^TK@W(Nte9bjtsb?Y3>+00|eV(;*M&y}WciJXswZhp=hOKYhw+F0!ibyS4cn6b=^Gi~_c zqr(qF|ASq$ufuP<*68hW1{YikzZrR+=>a~IKjcE6(Q{4IZ(C>03_Imq<3MNaL^c?M zj4+mR<2cuh=N{_BdD0CpJTmJ4(9MZYn0(V-anT3@63k8m#bG2dcu!#(E*o}uymzCrXC zLSJ!tLA|pild_X!UFyssdtX@2(xUh9&WPq;q$b_YT<2ZB6dJF7;DYr1Taky@-%^es zGszvofmdeE49WXQ!m~1=2dOXRrHtrdTTlCX`!ucN^+gFCuP?->GO}`UW8y00Ct|5Z zR!(nBs3LB_mnC&S{?b*4K43wF`^lNKBfe55pGXKF`5f7teaK1S=F~*^h9eUi?+q=y7A9maXKIT)tj4}Pd zHl|gaPfm<0A-pR(Cb^S%8{KU6c(qg7a^)`_jBw^K_b%Srgue9};!o3`oMoiXP3Rs! z#$S~6u!DFJL0|gJ5H$N#2Km_N4Cc+vZk0A3ghnL2ZUgtKTA;m-`(eRBHonC2?Hvo= zfE#&xhf13n8Wj)pe^R~;7s9ijM#gRCp0@6c^{x2xJV309{n+0R7Vw+68$lQFY}!Cf zbpDrD_y;~hk690m7Vs? zv2!ZU9>x|K{f_8pT)@Y>&ik`jyDf~_CotO5{j4qS%MW7D)1S|}Dq%f!Q7#6*q)OIR zMO;C!K-LuXVybJ!7NAe~?h($NB9k94H^VGfAaWyc^Z-W%_Xw4a$w9$iMVuPE0U7cJ zml`bOQ{m$6G)Lg6LP?*kTFWcE12>?Ho<*DGGjm$yJh}~dyv&Q9{u(~Uys0@>&GQbG z9unGNzuHl-YG$Z#sHBef@7#q$v2~BL&ai`+C;37{zN(qx>s|++bNh#4>bS>SPnba{3lH%PYzTI1@c3@W?u|XiL_a4nL5&jI8ASF4kEKG<78%z|fI9%`@UUWT~(6YoE>@|DQ2xAv|wK3Ue;&b1LH${)e7o zhv<~=_+o;g>-`CG|21y4ux|LGLrKug@I9*kSrRo{aSf|84vWUEuV-H1wNg;5d(W-UP4fPvT`$4IGydQ$yC{Xlu9zyRPGTHXMsT z;qaH^`@P_&Y{HenB96v>voDOT%)I<4&C4B0B{Y8nG`*R(0&WE>NnGvy6_TZ4QRg;Cx@ zu+AVNi^+Y>%2`@s*@lu#+U4CaQ#QAsF{F*j$VN?$$$f7b_lb$YR%H8`$T&)K?3Z@( zXK4+IDq+8r*~_{6W?&Ti1<@~3 zT#ujql7PfvCXPG$gnruSMkgg}_zU*P&%f(ntvgxsW03X68h2hIZ)=o6rrD9_;SGVP z%;!Vqvt+z_=zB4E6dVdJ1&`MNubg%2PuKWP2nP*Zdcs$bUe4a;NA9}E z{#Hs~pAQ>-ePZ-4y3z#BhLgjEGfh3;3S(S};WXMh!F#9l)g#NgwJG}*{S+@X8w-z>62wn zJD(n1<}~?qG|$Ep{u!SZ82=SME#>|hpEky82cQtr$=R2+3%a+SLN`bV^JCQc)1zZ z7<>G5)~f6Sa{e*oNM!i>Gm{(7gU%xCEk+(`b+eKili2gSprt=qv~aG4F@G|Ms%b732xU6y!&VzFIuL#z z059@eaC1UNp7Is#M)Pd#3ZGn#ZHf9|7+iDz56uYQCSJt;A059?-Z3|D zcInbVQzrZKR?a0y*lUR!QhKPDxgK#2@muE*=4rk$mnY63e(MZ!2ejH~%_;LyH_Chn zopCmTs`KlKd>j6RcFH(Q1N#;iYfR>(ZkDT2pBxx0!=C3O`Y+>bmViw7h)K^| zbYvdR;s-a-P6fCSy-vY8#an>-CVV3{A&>2Uyht_3oOb~)W7~3k&gnzW>c+gEX+InN z@8;dI^BO2w%2_6tvy640Z`yoSi>+35$$$#T`!8ytOPO?`lxsh67=o2j4Q zdfu1t`_R!R>6$9kCj}nxO(dQK>pMd!-b{A|*P}}m*~qQjL5VM_lpgeg183I?XiwU$X6*_rI%gXNZAzNFtD|bPHdUkAlvbT}JWW$=*i~R(ck&yw ziO6Rjm3SnL{$Kv%XnOV&HdPU48utleE2{P7QeNg(N7jYT7OdZ(`ddg78!Xet_sEag#Gmn6%<{bMdN_|VuNSkM)X1>>)YSl z%1%@LkHVijm^Zuay^LSR$D3*4C)gLdnFEoZWse3QC%uzZ!`PJy7;n&9@6S8*($p4W zQjMEM}qvj6@izBu&P?EJ}M)qnBOmgxWPB;M2L zxoBv!`Tu~tr=eN{Lv>dEc>aHK(a=WzOFPZZKQG=H{oOsjBWvByPozI#rSrbst+wBzd)me*j$=kY|FdNo)_1V6|m)1k*6Q~+e?DC7N=*R*;#UI5oz1NQ-$PxNgg;B z{d;q5$+2@t>j4jPe|cuKoo;a4ja~dK{!3mu-%>|#-2*PaNgbKD?t63ER`A=Jha{^$ zbyBR~%*j-K8-7!JOs(2smZ#>=g{^(RGn z*R`zC@X1q|>8Wp|P$%$feE#^aryC!uj*c&$Yw!n$vWGlX9jsv&`V-G!;KMw4sv29D zEcEVqG4NqGc3|?mnsoWr)fuU-wMomf@ku*gZ>?H$QoXCxnvwOoCp0aNWCU9KhJRR; zKluGa+*MT;{5HUwZ1_gdH+JQ6*6g?1k(hwepOr4{S!n``_1k=Nm-m6Wb1`UhEpx@( ze)bC+t#)>1?b&Y5LpSsDfjw_kXVz|8UaviGUPsoGw!Fjkytrqx!nV9O?0H8!vbNju z{%FtpOJ~+kY^x)N@UF1;2+}&H8oH&ofO$tq${lm-u8!`_ca${Fn9}v|UPlO|4Fy$sYWJ2?PHw zyv|8~%DXx>z{1v|x20xZgm1M&4aG2CqwU`=9TJ?*dO0wZLtA4EJeu-`;B*yZ zskOI7JDz=J8~fppGB)+1DR*dbImj@jsnzr^ZEc|L5`L!xo4_OeiY~5t`p+*D7^Qu5 zd_#Gpx#mP(s{cda>2${D15Wo@k(XxjR&Rj+Qa_z`C4ClY>C_ck)YD2+B~})?>#D;O zv>Uq<)DZR^hVJ_aJUXzp{FsmPy_6GsXMTso55<~~YTJDL(K+zU@p(4-yEfGUd_q$K z=W6B>AAm&Vh2FroL2rBuy(#7{iM)K;b6hoX*(~mkP2VdOI-Rt1bb#H?z)&K6Kodg| z&Z44gkTrO`PueP_opk2VK_5oHd`rKwmPX#&tTJTvlizt)tW{~hd%W0FsY3bR!ud0f zcln@75pt-WbkDW6=YTLk~P2J+N-*froS52Yb*F=h3Iwl0$1d zZotpIpfe#<7^{|Dvf>wOILi!P_S$v3E7r|vEF4;Ys9J(3VOs8rOPNSqjn8;@WB@t)P0f`NiP3y{#O#>3VN&RQ>}B6(EJr-@_LHmdG2R?4 z(2`=r<}8-C462#$(!13$?ic%+ze${#it~qu&+CQ{iVd1trItmgJM2(Z(|9lLMb@I| zl+pM4-Q3Gdd+)(ZrTkI+;NIsPak+L{%(lA&$5y}gy<>l%-E^J#1lKvxNcCFZv20?Y z3a!v*s1Un7p&9RR`z8;uJ<8}`&WEC>caeXDw!Qs#Jy5`TP|j|Bv>&L+ZmT4IPC*SZ zW@$4UeF5i;;$F&fUxNMAw7%@M-hqhI9S)K&_anX0^JTbJI~X}zwR<^hMlw`;o@-k0 z3+lxbEC>>l=io<-s}Ec2m_PjOrC8R8dtzB?%*5+bb@qq9!%a7$S2TOF5 zCf%fin|?R(fJ(vHX7D3xTI$xEuNqt;f9n;g7dIkzUL0sIb(N;3EhQrp0 zs$8t6$AsU~r|wD)x;jgC_nl2lEZ_d*W!j{e0*M2txeHyLp54f0?K6GEc{?*OSR#Ho z>yO3UrWCsH3J*$C6t%GUf>)n)xFK`-%+c&pCuf3C!M)8hqVwR3kgX###bvNrwDn$X+dYbJ$`g0DWx zUZPD7zPI*<-8Zt{Wt~^QG546P)Av}XN6`)Zn-&*S!#dsY#=jkF8?jDH@9-Zh;eKD% zXeFP=xMyRYla68&-sjL_h$Fgt{T==RtBl!x#xcrH$^hmp%8GxN9e-(`60Rm;-hbFGYXCO8#-n>M5MGV$%$vxu_|Yg5ic6Vq0uMxHU} zN9aUn-AcNwF&Aa}GID%g*2YZIG}}5W)!Z>c*E)GQ#GuYwd;V@Im$x>3x6r!Kg5(Je zi64lUe0k$u`fsBDe$qa0Ciuk;ec9N7g%QRgWrbe}A9xQwH5c5=dT-)v^C*2g;Dbxn z&N;T3JyXtbvEZr7Ij~UbU?;W3w{3B#AWjeV?RAEvzQ7@MrF;Q2Q@Zx7|A)JGkB_Q4 z_y70IBrubJ+!a(NGYQ~KkX9-xSRylA6Y$1>TH9I(cu9!#n5aF{iV*M;K-vVcR;?0L zB*AjTDuS(6BE1ByEtlTxX-^ZaHGozWFHke)_x`NC_he6`J?HoO{&T+HKjt-Sul1~F z-Jjcf)>@tK&{w*%?<8NJS2UJSzPI#1wDR9NC-ZxeIpfmWnXI`N@v&oE8{Q!NcgBM` z^jwTPAfU76ezSMj*J{=|eeP_~TRf9`;g=20HOPW$PyDu}_rx_bY?Mg15-HXBX7K%-7M;dkZvED=%Rd?|&H@}7apU6dS z1h3lAu`F?%_leR?xz=$rA3bFWwh z*z(N5qRER{7mMHSUDhdmKj_f2b{^o!!>nzQr^ zYwU=3)~>>E2sv8cX1y`TB?AYpnURB#_&Hyk^d|@0qeLwJO)vu_UJ% z9Xs&A#AjE2`Pgs&al?VupUgkdbbkM{rBl^$X5H=^bnbr3OSpVWHne)*(w36;LCZ_- z-+&Ei(5{kt{@eK8XX~@|zGO>D8}Wm0u?GH>JVuZ0x2xn%;FMpue9hQ8fO+1N&N~Gq zOM#isS?IRYmX~z=C9xzp!5Y}az1{`;tm=GpCsY1IcHXUSUh=hp|LZP(YzMxlY?#M@ zk=;P)?dZ?f03$osLjFy?(68saVbWEHukvl`fL*NPMNb{tebsB=`*oZ*^}Vy()ccIZ=B5#;Q zStF|Z7l6UHS9;{uwWPcK%sJx0dKX}izhnQNpCq1cI%7g}W8k%ksssAwdJEmPF)+Nd z*sVL|7l3Op{N{sM)^StUS&4L^Cq3n~0UgxNzjAozsh;$H?5Ov954+_KCGVFyKlG%HB=2(U zCG7#^ckx+A4r&Y(bVgbC6|6nI399r6blK2R;G_!@Ht#B#K4si~PdOU1-||1d*Yc9F z^l_c?`|Aci;t%BqkQ*AowXw*z?`qO* zxlG-Kcay%U{|AA=vra@;e1=yIE_UxN>3l-IBXjZfv+pZeaT@+EvWND@R?~G?DrD@T zc~z1jMekhO=F44oVXMi@*Bx%VNt1mvXl+YbPt|?dg*ntadF<`Jb*a%-+x}J94f0<| zJpDF4{*!>cKKjcCsz&a4U?#t~PjAxRjek7;qrN^{DmSiTkCeNzHm&-y=)dg27J8-~ z9u>Bh7R*uDt*6r5GPO1X9~yf^ea`*%9!&1dX0N5b*RbhnXaA;N`z}9=T@&uLYN}`7 zWkGnVv)7_Mk=^WxT+N<{Y#Q*hal)O8PStD7jP%vOZGwM1HeP!&SBI0b5d^O_JVdqu z)lcv~V3jWZQVzWMs+;D2&Fs(A2aZp}j$`(f>T`Ui?%QO0T0ovaSt31x{3FRLnNfQ{ zBdMG01cpZgr~mBzno#r6Ih=Qz;%{gdEryn0b1RnJ|4W*y>;*Ii9-4R=Vd}U|^VZs$ zNV70(+Qz)H?wyf7)%}LvS`ST2pX|azZ>=SX^gzCayD{+RiIbgh8}v5cnZJsk=*F|g zay{|qy7A20u7|quoJG9pH|dctw3Hn4q{Y2yuR$9;X@%aj?Yl}2dD2cJtxhyuIN#e{ z@`fjEus7fAmXbet()xPSa$8D%>q+b7O*>~-$*-KW8@b;}dpLS8sWI@iPSN^S=%3c> zanV8gQhZUg=0m>Cnx}7ecyPb#!2ggq^@YaDcdyNBI-ES}+bx+qf10?N@Xa|nT!f{P|!u)G}UVBb5J%f3z^f#F2+AqC&>uA znU>`q?M*`mH*S&HKsnI57)I%O;2;y;-kPUq;1-leLQ%bZHtXv z8&*HbPN=>zejmBktdZzgMOw$;Dj$kYLciqn$4*cF<=|_iuZ+CS`F+{j<;w%_$8K@$ z!vXdHf=lPGQ@#K?o8smJueb#Ne&TY_|E5q6)i(jY`7vxS{3rMq%@J?XzvhEKIW4XK zz?)Oef6g7#&A$)$+^1%kaIY3SEkld`Jl)WuQP8=^3>|W5(zW4H^pj}PBMv>PrR^=W zRc%AI8Rw(^k)fV_+br?~xok2>at;?o!BU5S?ld1hy_c*;=pqwG}~Y($mr( zJG7&%2p-3NiS{HuLa+V{y3Bgu+Oa2zKV9%UXH6CCD!CT8<%RG|!h-*BGk11??@!-n z9U}V)SNmzZO8&q%eoWN~@i^pC!7Hr&Q`Nz$Z7KORFq!&{1t z=je?h$J zqVlZTT_xY~r0w#iz1dPy=Sh3cn|4i0Nv$XChu*Y3EhXRdq&?wH`*2sujh?i}ylKzv zD!Ja1_K-L21#|;X+IQ`=y`~Kx?k@S7C+$1lwBx%=t{}~fmBwamB0ZOM^R2OYDv`d_ zDM#{{6`h{G(EZlfc#;-gg8?M@&84<`blfn8Uilz^!23w&do;|?ZE8k`&{mKlug;n z9W+a27W`IsKy^4i8n(W&2fix#O0-60NuDwFq#& zmw%IYixY2X9OXaGx9C_DKYEpS9<=8GcBkM4*gOKmj;W4~fh#*RGJrz^H;_+t`L6yc zPxZaW!QDpR)DsSV`tg3zx2BcD(8I3myoY+2KBCUbtA1PS;JMwvdatoD@a4|C z)NZFd=_${8+OZ6HrM0h|vUQs$ehKkzf4H)a+I1uGA6zmQ+tD2Ohh!eiuv2o4PsYKb zpRm8>GkaOaj|3mf_>#f+g!HQp9iM+dXL$1 zz4&DD(jjx^9q(P;M4sT?n^Ti~!Kw~y6q+a2o*C)j4UH-JuSBu;h|hWRhTz4e{f_O3 zV5j$?GX%v8vE`4wS99drhsC#L2No{>l{W7UEV22nWc{2MY~7$2cylLQ6W%oR+HkJ{ z#;>d1A98eq2}zrGhpt9GxjNiyyPZFizRJd%qVLLcp}|#)cT>m1=Dowt@ovK z2$y6GH(qfg;#Y_DJ$dNWMz$!`yho1lBhy5XFC=ex+uv~#x+{9OH||7jmAv7$HP>lh zM&5ARC;dnAMy}JoOuEy)OuEy)Ogimz)6`DIyKM|GK1!25mE(+q(pS*doN2afB->wf zDEqODwU05Wl^n}Y@|L$=?ftPGQNJNoq>?!8|bt% zlkT)LlkT)LlkSY8z6GN;3a{iq$%X27H%)C*T2Xvq*o{~F)Xpel5k;T1$5wJC`|q@M zevh$LKFxo@h0GejZpu1{0GB+$TIlE(?(ANZu8)?lI<&+ZSfurML~DyX%#oE#`yM;5 zEl_Csx1;xDWB)B)I+S(D+d-rUFMZssL(T`zEyM1sH5fm>xkh%^sOk`lWaVuKmxZl1 zg8z%g%*830JnAb9m*yWkFG9Mn|Cy}Os9B>k8NbTK4{%OwRJgfzN6pNCUy*uc`}b49 ziv|xov}s4pn!oL+QG2!ar|`R)pFdO>Zo2s1R4%?a3u1-H`BB!?h;Zr8HrG5B8xhWp zMZf^VYe7IyF(BGU}tUTdC7Q>QhI3 z0>G+HIs8`fPNy_z8r>lb_!jq|o(v?I&`p8=s^}WX5?)yU1_ZkcN@(JDozZYKP;_c)2?%|xir#&w_ZCOoQlwWO8->NOX zPsghvsfQdenDuw6{?_|3d09>(l!g4Alv=HqU7R3y{B|O z7vnCx^8VF9M;}!ElqXJp?fY5dE5pU)Z+7xVhIQvpfL}Picn1C_@Q?{-a83i=!FOdc z?Uo__BN*|m6SqO@Ec4)1|X-6!)#K936){Q4YIjZBDV!IuyirFWf^6ivcPV8@+&Dd-1xNYfL zse3@-=lJvFKGJpH_qw=;6#lQdMVjIHwvTeR&8so6VdB&5ryRt7S;3x%_Bx8FFFsZ& z$u`^fJ8}hb$e*!+NH=tC$4SXf=#f7Frl&n+5BP+u9UlZ2*CW8)2(Ob&GyQ&eyZm=4 zLvLd@ViPWNWf;;W&qzn;b1r-N$gGWlhbK1i?(mC6CD`x(JkiKHSg0;YpIYkdP+O4JsuannBhdm2wmyfD}L83UK{ zHQ~q5Sde%j-_1ikE$Y>?pVj{ts!ik-UbXq%?>TKJ&keM_XCAljBGj=^eL%k9giYOt zw3PgX{EGXl2hSe~_rxQZ|0F&$j=b?FJOA$==$F;_0XM0C!3; zMwU~LPsr=m;{~T4FS+$#%yznA+I9rko^?7{^n#IT-TP?=AwO2&GZ{p#6-~Mc-%9b( zAaX6|!1IH?LB+hQmL=D64rOrNZA0c1AkW5tt@BTv9b9_yoPsw4)sG<4>Ws72)WM?N zM)nnr2tt=;uY;CBe?mrvtu*{`IdRAUdwsTiv)7St_Da5yY#{kY{~7tjmT&N(%#v^R zX3}l>raK+^CM!Skjhk0;obtIcj?OSBZ8EZsWLBkBTzhA#9D3N1hMw@fL3?UL7lc=` zZbf%j$)A(9jj-jsz5XCReaJHwvY%uf3mNDV^_SX6yymVSo?-zf`DToh=E&`P9l3q4 zE6b|xl4bRukwu+8aOF`)mff34clscc?({(>-I4M1Ef`mhm0YZLXW$&UR&zn~wG@UQC(f;_WWN7X8A)^BmXGZ5RGo7eZ|Se@9MSw=&iJWM<>$>9R!5$X ze5OrC#(30Poc_eueDoA|9HC49Ku)>_IcL}W@Eh1rC9m`yY1?uouLKz{`AZe}`d7aJ z9g-ZbdS%H%F->2EaS>BLFPdbWokZ$mKwQhTz6YN@>0x}m&TZS$EbINwnm_5-EB28*Q-8H zdtY9@ zS*$O%`je2y0u7DGFHa-CNY0Y|#+9`e!tW!D=~wnTJQMrSN_e*9tc3at*<-FJ?N%qv zmBGs4N3P5vyWVK(AsXCq9q--ChIC!xg@0f>e31TiWirtTohxzWuEX$D$tlXesK2$| zmJg8kzKb7(WHj(R6BrI}zpAs%<-fr8r!3`r^lW4_m4!TT(oxposaLE!_~M`ULz517 zaF=&*2zv?0Cr!E5d8(_0%;U-e!)cr3JY$oDr+?zX|5M60b%F1HNW3c-TuNJ-kJ&O4 z_gEn>J(+6SJ$z^5?yH`E(Y2FOzh}rVdz)lj<&mu9Q+ZBXh5IqL4C+1-S>=OEE{3;X z#J*s_$jho%W8gt#Ro{#MLM_cx!&U@Xul6M_=T)f(^yw`)bpYWcCFA4Hz+j)uf zcigtn4=)mS`{Iwkw(Bc-={e#ho9=L7k;~M_?et@NAhGo+;!WQGbE8`pb-h9L&{*7+ z*xKa6P`^bUnCsjy@mCWzJcGVaKKV?{BToG$o-`A>q;j+$*0XOEueLP2-#vHNxMh-m zE_iz8@xGgoC7b0AFl)BrABlCLPv13jrxf0%x}C^=xR~*jELV5iz&Qo4O{y+st<>Fy zjMg5gZdJb92G8NF#5q1_v-jL=$9LF|@a-HZIj5C8r2#wKK^`Cbi;|Iwpz907n`86C z@?}m8ogbE8dyus#-!}OkRdCiuIHcqH`v05Br*j(-@+QbPpFE<~I=7(nNalP5^jx@N z#Ea&*`+OCAOU^1>JT0yNqWfh-n;%wq#Jy8Z+$dy-k#E}RFBy4-{n1VK_fL^AB-@NE z>yh4I=gp*FHC?!lWb!XfnD6fNWOn*RiL}xLFZ{A?N>+M_xC;C)SA#2vAL-1Nz74`V zrLv+k-qMHr)v<9A7N0cdPvDWVjb+O@-DjBvZ+I&G=N&8W8Ra-~eC#-9$<5rzJKVv% zluX85b^G&(&;Rt@_jOm=qr_|6yyuwD#I7wn_G;|dGx5VN=Ix}>>DvzUWnVrIJ9m|@ zVBp2rw{P_IE&dPokncu!#|Ni4#GXay(3RCs{cpUl7~ELWu!?sg@+xl} z9}nfNiTDb%SG>*IJ~hpL=wP?}src$H!+&-q_tPvp$*SJP-m44e!iawg_j2Y^XnA9b zvh!u*6Tjr1#Bo95I6s-McgGh{57~-!&kudRjWO63896qmAXmEAM1uiDs}xB0>0+Lr%27|GaP81I}K zjVxU$JI#?N?fnWfr$&X(q%L{H_c`)p%{=Rzm_#L}v+xsAMbC-d6yTgsYl)E#l zkY6;Pjt26J2Q4i)do}Y3JmBwBl|a5OV?6aOylv2&K8+hwXZrf&Z(zI^@*U*c9_vum zOUyOy8yqL!WXYK4&}SNh-OgOHz*m8-E*5GsHeI(Knoo;}ui~dUQ5@P?Ga8r(a!?W( zeF48HHr@n3=7=q`dfKMCl{5FuJb&u5^Slzg8sj)PH4in8L0^vM)brjt*mc|anYvk= zyR7?m*L^uYYyFKq=9PRCEM#dfp6>F5OLjPHtkm5LqdDcLE}Bb%SKU?<=fca@nk28v z_U3EdbYH6C-=D2<^GMH;9oLkB9ru3mdBwzd|=L+~Fschsl9Rcd^fI9E7WynYFG_?~H+2ePe5?vV_n z{Lfq zq0fc?2{$k09q5s_)|pGPiyvd;~l%u0)<)c~$3`$S6a| zWAL_=G*lk%%A)BnI=0C|;F_o#<23LF+A4Up=Mv%{A-;WOV(Y!gJ`eP@)?drKmTYtn z{%z{B<;-bAGj08Rn`EqBw0$@ETs&^SSj4Me)gPC;eM-OF25$6>Q-WP@?eE|mU|T^V zoo0Vjcjg4UKHlF^L|joKeT=x*6lWb_AKRAMj}o_pIQ98v{)J!R3q0+RYkMmF&6Wx)#!O{?I$`SyELtDf9kBN}wDbVX{* z#)ng7KeqR+ieK7XGlKZgocmMPU{{$;c-5XAH5*0W(4!0aj&laJ?6>TtIr~_GS9&e! z<@`SqEOh0!ZNU$3J210$nB=#5!!Je%?lftg`FZuon(tJwgldz z$kdVX?5-pGp{9=Xi{UHG57M*iNI!~S5wAIYzP}BHea%=S+;{kKS36raOu;{qc3RQ82GP=>em?cE*RFYx&j%k zgL8)BHSOq&htbp9(G$O*IV*o#Y>AEi5^1Hq?V&g4|FY{o=vzu_|2)FV^C#@rX3Z16 z>b~yvKLDCC%*E%>QsMi92j3_MM=xN7OLzl>O&QReu#>*qEdzQp)l9lyJLt8QIHB z|3N2r{~i3$zqV1GWx;vCwQJDoVg>!q4)pp^pkl1_4JLQ<-LTJDHlF{c z*F&bvM`R0z*SNmf3oho+VIZ?F7X=as-18wdDRzdAP$Au04DqB*_oiKj zjm(oa$(uIzrIOyBwDI1wAup9!p0u&vwBz{8W7{{*#GmS4e_Qu)l%|jH;pkl$w|{Z^OdO|ca?;^@Rcnk`&@X^{NA)-FO|IPN&5s@ zz>J6b|BhWHyFF- zkpA%DEYVF{w{Z1`1ui~t-{aI#^5A8}A4P7<_8ly!UI=fu;KRx<`$`#is+8eN5nN=S z^OmhAhV4dai_mW?2Yx4Y@bjB$0<2*Zp0lZ@3K?eBW!r0JE`8#`ndx%+A-Rq-4)vU) z&o3`A=M}h*FSKPre0o|o5c#DU8D=|YE3i-9i7c=Q-}_PJ=Iq3_jg%>0t_4GqX`MsZ z%^v9E_~HMYpVD>{r}QZCwS4P*ma#ecf@^|zELbQ1!$RVVfRTMg_PqJDQT9B^84;(w z-g~v|eu$WUkpEUO{vb2yhi097xuF>u#`YnKVTYJ`!OE0@dHFHZ5Be^i+1(Fu`XO%m z!N=L%Joc?a#oF7Oto<2&Q?SeVsKY}3N6?1J^g#~)@@t!nznapOruJFcucUq_XdPJhz3x%1z3)}SBz7yX_6iw)?E^NwkL-h=#clyeHQgD!(#>8!ry&mZc$ z?z>lX`0wIXHa*wb*aI%z!rmNU{xqV0Hlu$Qb6zfQ0B6MlUpqFZ-8Wm~uQSjGIgkGe zXXf69|NM5#>eQK>h1W8nzwz$t~SnYY+HQ_7xophnjJ@&f$h%&YJwVb$*ow$>& zI&g6QhcTev7X~{&M(1nz1eq zPh||Fp0Sp1qS6=g(>}k(STrn5UAbN|D24qfotSopDv2nvo4;<7k0i>09@qroS}}yNZ2=-1JQuKJ+?U zsy5J;0@aWDpUl5G%kRVwbmC9ZKXE7V-^%!_{3gne85!lse#x8ig%jbkjWiGthLwZlVqSc~nq2r0CBb2W` z)EtcSgZ`Wyfu`8~%2{CSRUKzcZay?Av1BxBbtdbHyNim;n9JOS!+VFjC%Ul42M`~# zxO3Nl%F#V#L1?_WYXJWe*>iM{S%u`{LF-d{S{29HdcL)XCwl1mhwFZhh-is>SD1Lp z`1xVUi|v60;e&^|?$e%3TM_RnzRldtu?BLNM0K0=O73@wfRlbd2iw@<{{mM2RtoKWyQ^m7aU)61zx@i$ke7WHc)PoCMQT7B#N zsh7EbP2ZyT0rZ1luVd$#IqKTO$)~hd+Bl8ymh;ajjB z$Qt0-<}0!9r+kjQ=aRRHyG#sjd>$h7`&q=9I&)v-8)hsb*vYYdxN!GRP0JSC7O)!T zzuP??3F@iwSk8ZIfbj?<(v6g%J2x_QhZYTRzbQ|&N$2@8_2T;;^7MCMC`&eC)nEGn zW-P(G8W_dfcWl`Ah8P+`+Pg>W@zhyu?>ol!cy{|o_GLWXdpk>8??A7(_Tkpf%{6i6 z@g!`AvR$k&_z7+$Efh0$=^0SC61q{)|l)X zT6e~#;n>66dfRoz_JFOz#i{)9%bolGsP~V0x%I}Lfek}*RX)lQ>>6&K$Fk}zn-F#n za3-*K%qJ|Hy}~ZM{&!Pv@A|m{p2(d8_x&9DndMJ0CEFIld^UG@97G0j@9q%aI#e{J zc@VTJpFKg|^{P7bd4F>`^HscN9P>wVtLByXlEpdPA5f0rL(tce*d~n48``QlTP_-c zZ>D6NCi=keeeQc}%TJ^qCZG7ggY09Fhk1{Fu-n@A2DXC6yKdVmc|$ZQKIF49$43Xd z?vr1RX#Agl(Ov!#;&o@u-}%=Z7HztMd8_-~6rM}itQp!OIsRtKJ`s)DNPAuThim(| zBHy?Ev%X|r-^haMM`>>bdjgsdLG}f-4;S+VXK4<)bFQaP)_nHhoBTTm7I2T2XiV@{ z*2OA&U6}RTkVpr)4_i1UYwq&X-1Tu6o5DTu89sUIpgEI5k!tx=mSQviBKKX$cBQua zlg8h%K5)>Sx2@0FXTG=PzL_w4=7K3i-dPaCKkp*5Usyi^`AGZMxx?-6O!_-Ej!gQP z>GDJD&OfY2{^+!{o3{v@j~dwuUrKmYIqNvdo~17+xw`x_?F#za_UkTD3;$5>K6uae zzuoWj&cjbwiw{1EU;QWk=A+p9<@cwvzR%r-Z;kvN-kVe{-!Ao41-2o{6RNlGUc3JO zo9%W5?l<4ta?VMlb17Fa8>yf49DlQE+cxP!(h+hFa`&`1z0B_CO!`*4FEaVxGi7w= z=Uv*Y{IhNNO#V0Rx@Gb&?SX&E)U?tBZ|X;#CVgy={N=`9 zwZ1#ONB-RD+4*0y+nT{YJei$7tw;KmHogr0o2RAC9V;5+VRqU6=Ny=a*iWbKIW2aX znKDM&{hz^6YxhMaeYX9bNgvar4e=iN@9mNP+a7)NvmWJF+x5!e|C#RXC=btM?)m6< zofD}gth0R<@$!-B!}=HvZCH-4p%uJ7?7Q@OGxj(0CR>R4zI;MOxHitY5#W=r0SX0dzkL)v5$iA6#j@T;16Y&M{(mXG{6AyCF zLGgO&fo4PNLRFICLeRQN*xQja@c}|!MNaaj9OZhuL~_$=>o_6Lzi4d1#gJg0jM3mv}2o{S@N^rW-%wZ8+O zDQGUJE;W1$d3=@6?E9_y5wa6JtMZ7>*k}(2JB#EOewOyS`r~^_xF=4VcbyFH%7J%r z#*J~G4)4+)dF*$qQys^i1s{IYQ@)>yH>^n2?R~bU`QLZcnm*5ZaS~e~hhYRz(0qG5lOZNeL*=DTB!-G+sP-F__n-pW+P^E+x*YTpz;boEKc{5w-I`bqtr>AROVq)LC2 z-FF>dCGQWj;eyF^sn*}`$m-*!Bdb%T?b&cm7I%98F&kg(+J{rIA7{h0?z}%$x-c7G zMa$|`@URW1{x@e5H7^)1Yy1fKqyD4e$20sf7T(iK{ISsCkL)qicdhR(Pc;?otf{|5Dz z9Vf3(#W-(}Rc@bqQ?c@FxQc-DC8I&P{{J?D9*mTAvE`>>ut}?`X<~ zs|c+~wT{hhckJ+rRPcM*aCJYrCslVayFIc0;N6Iuvf)Zk(C*lp@1{yGu;CUlmoyL5 ze?i}ZO-*Nv|IGw!j2M>UI4w8?`P;%?w1_qGkf0V zPXFUug(HbCB7Ki zyFlsC-g32tc8HFM?uy>JXMzR$GBo#P=&j&tfm7TfX!Og_=LPiR3i{B*L2tJ}Z@qA< zq05V)H(t1xp}C8oxq`bLe5$9?H5LnqQ~L@DuVAg0;&(ggJsX&Pli7^ zePDPobXfYla20|>G(x_9*7JqNzNhgPFLU#FWh9#q7l!YH4~qv|&{dardv*2evE^5F zcHQ@%clXfMJ@&ALviGOE?%Ur3Ms}4L=Y;b{_M1H~ZB-Yt-+YL@teMI48YsPy#gyiQ>{P|{BeulDvW>{U)J}ggD}81fe;w?ND~{>i zU*2KG4*tcr@E_xc{c|5@FE~@p+3zV<1G;~`?!cG*&prp9IL;b8{t9o`bnu3$d7~!O z|Gf^ab?O_TjnWr=Es3=HY&~)kw(T@_llnOK!|VR$*sHNMz7p(sjEnBlwd$`9`#2-@ z@^EXtY$DR91+TlEO&;Ja;IWC+cl5CH$tEJZZ3ng+*LFLwH@4e<){a5h?f8+}(L3>6 zXbETKd_HrgI~a+C8znnl0xiyip3=9H3xB7!^iI5B+7iv7E#r~xXv^@#3!;-JQFpc7 zqV3wxQTyhSNBeMgyYTxm?HV7BD?fQH^8QcT^`~BHm+k;&Ph=%`?mJ`C$hyqf%Ge8e zoHpr9^_H%W*U6`_nSHq6giF^k=7$e*Cw>r{Z9981(l0sVyw#V>n@`xiWFxakQ@k|{ zUrzQK`m(-x%x%4Vk=nqm{lE!_o&D)#ZbNWigt6RZ_JaJtnm)*3zv&e0X{RL8FR#I# z2A*#e*kdbuTD(Voi~QRV0Sj=JRxB6efI!%(-Vm*qUh!*@adeZ9Zj=*RQhM@1kPrD3_ z&{i@0u#SeX{)E*l&rs!{7r{pbv zZGYatWABST1&-UaNAV$KhS(D9i!IPp?I)x;f8y)g;0s;4SbG7IAKdX=#CQe<%r@hx zzANJ`!r+q1W3oZs&QIes8Jb_qIH`YwFXS1zFM6r<=(@1+?tf0N1`gLf@ zP}Y9O?`?W5+M>FD{3dT~z=t(HtqXP<`LTZ_8t%BuhO1?tPH;zo>sqPvfqmGw?2FA_ z=L7NWV}E^%57`>~wdk(&+7R!c#M3t%c$s+mqiQAehIO~sSGlX?EY{2imt4f2<%Qhu zV4qiW?-FR^ELR0`zwE|SUt(=>9_vWmXW?Z(*ZzRuqm*aK56pot$2Ybo{zmr5&m!H} z%)$93_87f3^A`B{uD*2x&+x!p;=qg%t^6js8A<&Jgp|H97S?u?TKOwV=&_c?pt?0awS zQLjg+kM0$13S6~Q^$YrZ148A)1{RbLt0^94eWC2BRi~c4wD+LF!>obzz`A|2mayjN z9?jF_z=nA83k~Y$$dxxe@4NDYiSmtT+#*ayd6sAL(ZF>qSvH{CdBuXS)baHnMK4Z(4d+NvS7oi8rme zr6lP|tMR72(^7K2C#}kxhJXIqZW?^-YSK*G>DMvD^Cl*DJ9n?2o1)xX@6wp9&{q$Q z@#m(xY0Q8*i$#Zh(0QHXmn`Mmh@3*W&_in$0u$vYS|d4Ba_4-)|9@zWd`LUki;J-r zC;7tAlOA;EwZC@Low{=$PIX+lvm4ftDO(&m`Ws-qGW@sT=fjcD53S0U;kAE1v)@|o zw8lp(Xzv*ZSikrUm@{~y%`x~-1^2mBA_qj@Nu(vi9;9xQw0B0|1v|UnITTG?#D40_ z|Ij|TP1}kuU!ThR)c!VdA$Pz@rttsFu{B4NY1!2L^s9MSNWK8b;4yF;U;c#s?xxBw zx+;;Dtxk=ih`+VC%7i`kWJhdkxTed{65%L7g=J)XzR(bvK!08GYyU|0eV1>TriM7ozl^bcy-2*Vq4sj7$-P_G>Rm zc|z%a1L9*YJfM3Z3gL5i;(30~%W8}*^`qgN3o>O2R_UsjtwzR*2~^EgMMIoQPf4W4xDywF+pUU$@4U+x@k(Hw5;es@!47IOzv@$c+8 z`v!F^{vqcxuoDZP?8r^v3xZE~8^r(daO&Xe+_ME_8P+2+X{DQ4T zzgL+nu-ipAyAs7VP{hAGw?qCrQwOiyT;m7-NZ><#8&cE39oi^+^r-L|#L3qDCU$1o z9~2%*xOabSjimo={3zz^DCX=a;)?9=p`#3(J!gxUvm=?aMb4ZZ(PPdQF*hR2mqL3i zfsJC%l$}C-kkkKs=In^foK2cJi=BgZ`1+q^^en+?4#^g*a3~s@qxEA8aqP~94{itq{m@Ug?kEdtl&zE zGw7jN-Sf7Hd28F$`Ji%TtM#UP>GL3Dn1`^#tbkw17DJlt_l7Pf zJ&QZjnuE~fgGKVkddc`>iFS)l2RSP}$rn5<9Yc1LIC1)}WbH~nQ(cVR!Qm&8b6uLz z7O>ZtZKvx#BWvTrUkrO3xT>;oA?s%0vd6M5V7FVkw(BR>*4x^;_;q+N^sNm!V-aDU zgKX=a*m?~8(8!byt`_RA_zCO_czv&=BW8VrC+iy;e#eX1c}(A!eu*7wL$0*v5VlA| z6Jm?pxUQAbpL3AUbCJ(^6AxKS_n!PSzrrfYF|q}9xW}n~i{wqdKe%KXGCJ>+qKm6M zBP*3<`xS<0oBT;E{*C_<`Q&fdd50aZJrU{0$M|;r3WM~^{7;wNBi&&Z<@YQ{x`zB* zrn+?l|3>Q8?)nhYzlXsmAHsHz58-$hp7Q=gdQZGv0p~vQD+ba(gRuYPG`!y1zcuK$ zj>rZSbl%cwLuUP;ov{pB)m5xR$@oX~%{y><*G%&5+fMo`qjVnbZ&CX0SAv$*>lnqQ~6q)?Z~)w?f*WI?2CQ z>+2}v-+>M({BB&w<_)Lb=nu|(op@KTlmAl-_%)xN`bpP)FTuYGRt2hmpgv$u@!tHb zO<`-je5aY)+pO7|yPQM#5Zs!B%!9)_>~mim$uAguO@#pew{Elgh%3AE^Q@<1UIprh&*=|1JCqAZp>JRk;`R0+&9V_+0SM;qi z)F-50;o=28;>88-)7@{UP6PN>o|9FE>f!Y3bQcGC`YFvVpE1c-9&oEIKEkFioV@AV zoo~|rL0EIf%sKFU=|gCS!ACv5?vAH3f7?Y<1A~k2 zU|h_+MZew1y^89Wzo{&>iT;p|^f|`X;2~`c^Zc0*Z!I!6ueq)3z7LtV3SUcD_^O4E z^XR{I>qpu9P2C3-pAH_ib0I#E&@kgOc0K-G%AX>?#aR%|ZTXt=M$PfIDcnEew!xXh z)tax=!=101!{@neWA0WkcO@eyCEG#MJC<<|(z3)0gN5^p;QV4D{bUcEUp)cNLJv+e z|LLzF_VbQ^1K+%pA}^eNs<9Xilnc?{xcz zwW0Q`aKA}krvB0#;LP5wp7`a8ch=LdUvS2jIrK4gF>4imrnZ~01!p5~neq13&I+G( zp8BF`_b~2Ay7KvN057|4quUR_hG>I3?>~V)1+78(%^D+YL%atHADg^&KIO{~PvNTw zi?(WQ&vEPMjL{|TH|ZA=HtWRC0DDTz8(^d3!U&3n%>uj+VMAsW?>yGo;&sCr4ZPvV3 zZ#u9Vd+|(g@&4j$-cNm&_jgjO$)Dcg_sR&s?~bF8p4Yt&HX8x_X~`h&N#0{$3l;`)2C2SI&CZ z+O@}^d#~M$0dR~#l^KI2r_3>9(7!R2ccpc<83V#=3EN|U-ZIqIgA(2`xbl=ajKO&` z!R3s>xt=jNH8TdlF$U*t1wFH?ems23%-MMdMCI0X1ATTyrg6F^k%(#7w31FHyX=0@0aHra`;SN{wwdCvEkIa zS%+&`r_1~&S8ZjTvc~4F&9TlogK>|u#`=(Uh&OFZ33GSWmY#WU>}AdN=3h+yi$3jr zZk;c0OKXnx%-!TG1IH2CW4GZ{c*vKZ|AAY7>MME1+ynT=S8f?Zy)XYq*Ev50{%GLj zV&>zxHQ+kP`qw+<%)4hsvuC5Ti0p}OkEdJj2~vjQR2SYce+HgWt@BiiH3$89&Y7y* zX2}A#gIDw8cR#k}g=Z;y8F5vw`vyQS9N3&@$rk3W$4ST};=Pj+FD&!axruz$XZZVWf3s9ZPOr7L+V$Vm}IaMd^^Lh8^ zRJV8jRo$-g4Gazmej$F`N8Qc{|4-`HHAS*Op!sMKvcPW3?9EAc)SX1`8;@;HJ>#5d zGWtxeWQye_>*;^PU!8jUB!}uetaIl^e^>rtO(Y1b{(p7r&%82yTcp07(o8*!oRH7F z`2n)nmcYDYaqbGMV&147m17IO5HGp(K#28n=nNnBA7qCfdBw{!{of-^b3t|?*^i3( z6~LFv#@0QMH+J;{rSkn?jwHmBxC=VUf3eScCQiKO&Mx?1G4|SdxxVTr2)j03*RJIC zCz^EplzH%3@zfn^`l84&%;Cp$Q< zW53RMYkz$NaHWdpe-yTE-Mjo#_E>sh1K0W2mw=HC{7&}qALI9PY{$0~Z(tl-`6lvC z)(_kJCgP%!H~627ZN4_fzHkhiI{y}L)X6_+?YC?joQ1!+>klRSn9fe>{BAKn`ml;V zJT*9W^{KKm>s#|Kfj+PE-}8g*`@sqBRot?cN&-)+q-tn9FLW7q+ zf=}XB-q^4PXKa$>DG1?*qCO-o1dME%QQ0#2#RzxzA@*O^jnnD; z^u~5*yuu0M#?d!}#;!V5ylMk>1<_RTnnizRE%1JlY(q(48w3A2(YrS%UM!l}@FSZq zDqZJcGIA<;d&1ZG2g#OFoezHD-bkL$*M4^IS9|2&0&lQmjj5C50@ifBZ##Eckgpy6 z7iFfl(?~!-1n-~1;=uhecTQ6a6n7Vw4 zx)|J!jl@r#js7hfPkp{Xe)BCIo$sT|ZC&RW|0W(CTk-GkE!|3C#gB06A-D&??W;5I^hVPCW_w{w8h1`8&g&bSDGNE8vc~P| zXUaM+h+ONGb*>i=`w%ssEz9>$%d&Z0jAoj$6w}dsynUhvzKiLwL-VS||Of(64V9qbR*}FP`eT(*O^M7dewCe*)rs=#^ zea_FOrp3R+x6E3n982#9EVa|MH@yHJ7IkcK8=)b_%Hm<+t!NPPMcFOocn7<%lj_^hW_B^ z_K(i+c;OF^etxn0CeR0$1ejMkyXY&HPpE9%-yCN9jq9%a_upkMaTZ^)u(7PZ;-A($rr53N-H1UP!$a~$e;0U&L_Hge#sfdSIPS;h@5e$^6g z&|R=qwW}X^xpwUX@-OsrC%^mx3WqKTUpF9;o~kh#mRzi{nVL*4o`H{me3gG5yDl6k zUKqYEnOHm>UjfB!LN?L)&8fo@i>I;gpIdy5iC1_x@oyr_2u|OE8^QO&p;w3XUSAR4 zYVX3}qOjV8pVD6ZmG;JKqYc4iVOD!HX}+R{U`vr{?;6@Gc++Ou`%A)VbFgJp!_4H! zhP!>E8n)JspuPE3rL}brG}qq!z=$D}7x$kwdGV;>NmJk3sZVZMa`Eh7by)50UtAsj zEA75Cc!T-AwD<;FW-Ci9R@@oW5{oy*ZZPecMq8CfZ5vU1wehde_hi0>L;g0kL-FV6 ze@i&YypD24P;++?b5?84+A}pBqV1ZCC$pX-yq)mDB~#F^?YEo`kN&B7-{lZza(m8c z+sFCZhX*m2Lb-#A&W^~~dT-Rn#Jyk#D8 z&MbPk{NNxsvnd3`2X|bpKLwrqjSHpZe6_J|peI zk9gIlRT=n$(=us-kuIruF1cFw5QS*F+BSIn&Veg#;cUDAmI1Q0AN;?JX6DfkvKgu^ z!@0-L^m*LwJ3oD(Ij1(?$#{u|YF@Z>R6M4b`KY`ndGaREF_bUD+Z(rhMu+d#m%DkD zxg6b?m>> zoL6iLXP6TG|tz*7#>DH@l%=dy@-(C3;*BouzC*rAhlfQS9yS^t7J*+;-xj~&VRte9{tu~E>PBJr;=)0jKC#Uo9;!n=u=|B>MIEw~YU zFXRo#Jp8AM_*Q!t28*-#SF*SvxUo2^z1{q4bVIOwv}x}vw3qm8@ULyJ&|di0Hrl)` z-hO7o%;=d7ch{cTu(f40?G^u8*I0PZ2>91o z=KIp(v*2GN!oA88V-yGf8nY>OmTAv4+NwNi+lb;3@Gtv>*`xWv*#tU+2w9)(5mh?JcG}3MspBnxq*}4mGHK9a9;K9?DLaNH>^tqSKDy4 zz^R-j-$SXopW`P7%~5`x?KAPe<2_JdI)2C-oWu*K=!wqniKYl%bqMv}Hc)5xv=($< z#Uj?o9{M`YI>*`G@Qi^c!86X{%<*40*EECopm2guG)lH2*?czBU+T*reIuKeOXv8_ zr}(!pmh3ZTX!*+jM`(G7{?why{~ZmFvnJF(FLM97+m||n>h`1FiMrh3qaR#yqBpOR z*-oJMdGE89-dCx62AJ1_*)O<^v5@ZfI(`p=bM-&%k-K`6YzhHvf2H^d`zz7i_zxVn z<*1x%%sn9WxewWQ*C;Kc(``gQ5xFI4E%6jx^3um+h!)mG*tOMm!;zvKA^OXpKGWp z|GZbHGkF}HZW}t?wwci{pc9_eu(f<_!>@d2{cqOkwxOeKi?@8CAy|8MR(r+!Oj>eW zL(q4QSEsXiqOH>zo*49vZU3HU(&>`+TXogRzT6Fm z&Y}7E@XfA%H?{dg#}5PF*~N!P|IFxkheyAVnd_S4u6}13{mv>5vF1zB@67#H=y$sv zTA{h@z73^tPu;Ftm$css4Z#%QdO0*FqnkOj$JO0(pg}4x7v1@1G0rAz zwCRZ-TA}@xoP+ka^0z=YdO7qdlOOt&o!|b>OeusW#@;me^Q-0c}f0_If zYLsA@8^^M-S7TQu%s+nlZ@Z&C_i` zYrX33LuZ?h-c>k^H1?5<-bVhOeJ|T#HxK#Ox|o-qN*BxU4}2Ydanp(Uhwh`OK$kFY zaKgt+|FJsNw8%b3(E97usXafxBNcmMQ_Y?u&vxr%+(Th}(a*7UvMn3drd*jQxNV2g z%kr+^ZWPI@$P}f=HrRUD(>A;-7pWhmU%7f%>*dHLv1~momj94}sk>)$jr6g9rkhD8 z)A<6`L$ZvwFWkEWy>b6jesdQ5#_R(-w7%HkIpM=y_vN$4)ug^d2P>OU6)w%#5p5l8 z2rx;n9ns}EXgodhp6TEtu&2wx$%;q^`zla_2{D!Uj&Tsft^!)!7dYE)D>0%4%KU){8 zbagSCx72>IA=v)KtTuP^or&ma6aTd?7QLWhW_d}&-Mr1S)mQSrKo=Wr=CgD$?Wu}J zRLY*{=wlIw_ZYdeFSG`jI546$T{p51p*+UUn9YZ@7u(H?K5Ikw>S~tesYh2^nBhf* z9$xfWJ#8R!SbCZ4imH!SPweJFKfONe>SmlzDh0PKPh-P@Zb%pO=5uYNs{(6qPje8 z&)RUJO(yOa_oVhT+q#(Olfk#@?$oN{Zrx6C@oT@A3h^e6bTAjLtnVGE%JtcBZhAiF zn@l*4y$XFzbW3)~yihdUjLk?iQ97M;HukZh&EYxpi|NC`w-(H~jB%A7Ao*Xqz^@#A z>^u)0pW>n8qTjEGjzhmkL&r5&S{?d*HQ^>`xOB>%G+g?mO~XeR9n?$1y}GUH>$Xj{ zG?#`8rk1{x{wf+S7}4+*^rhnTt+3$aLogq`Q}#qds|icCv7qN6Y+0H=SLfNfmh`Oh zVX?6ESNlAZt-DI+x|zB3&e=ag&hjB&`FWc?fG?YSuEuIO8UK@xz{#_I%RQpp-(K8w zTWPW0Tr|Fm!|?;)u6*ehLH1R(*Eo3N@-OSo?B5XI%DZL3rHPq?d{*B3HSFJ$PwV7G z#;y(9dgH11Solvat~k{iSL@flQANh)wvoNcCfVw@53YU#{S2B^U)PKKT5B)(%pLJ+ z>x_fw*wkYOeW3F&A6+q(eS#^x0Y90xB^SSQ_Rp_Be5~t~f0nkOv+ER7UTweX4t#<( ztgxzoFa+OsWRne?zwse=RB@I=zB;=5x{&i(A?~pc=2`=E_wNSUr28Z$BY!3FVNAfE z9@2PFw^xtZ`oF@+G4~1IE7%@|%cnD%;P5+om*|z`$prrfCpuOcei`lf46XuSK4q6O zf8wz!_@QK=O7^9Yhxj%B;I;h$WUS^19q;E*myyVDx5F#zBiERI)pEC(2Xs? zE4&5xT*_Za-aAi!?Bc7@M{gg0O?Vpll~?uq+3Am8oI|_bX54g!T5rxT0#yrWr#71`G{BeXn)OogC_FmKn`HZXDW6=j|hyUQ>70dz2 ziwi=WIUTqA$Jp74^*8@9ie7V7?vzf{}TIUAIYx?hRhxe=hghRedg1?A9TR`5v zl(iDPs^=7^u2tg~Q~&E8z9G7>`#d!LJ9%hT`0im<;e2r6Pj`y&Y0R$jjNczVVIGqn z8Qy$x3GK@xESU2NUmC^lXosyM*Fu+;qdQyR)Sc+dk@c;iiw*r3{DU3#eAS)i`o=lb z*oYJ!Hjw+d(8*=f(|5fmZJ^SSdFj)=K6ELUcU?^R;#=;XsntjORwrHu(oftuZ1&D+ z>%pQISo=QYX6_4e^egSf9twnJ#jvXsK?dbGVNy0tWKIpt~{wBbwgtpC`#@&ow``E}myFaG&sm-9c!jKhiN;JD-e zoEFzZi@!UVv8PVL85zu4@)troV%_|K`JjF&Jl?%-Rybq$M25CTJhWA^V1zXogSJK- z+8WQ$R_PB$pJ?_~}g7t!lbY_zdEM}l$aHF)?1;N?N9E|o*Ry4_7Rw~rrfbhY}($V@v$ zr$fN3BHgCZg$|7#0gdL&H*$gCy);^Kfav<$jE(N;n;fG|3t<`ubso zhNkvqEU!ezHs`3h$HKMC>MjeHR&zE0TFpr_&R*NJ8u?W?gQFc<9rMuYsKJXoU}*Ik zL#sart!6y7J(2onv|4`OD;eh^`ePxu7gFD<@nfLXXM}Hv7WJUjqoLKKGqhS`AzA2T z%5>@WgP!&MiHC0QyP=zI|BCRX(MS)v9cO$-8~@DZ$i>#sG0;_eFVE_uHxy4ir$tv! zOh5q+q6Y9$H9;8 z`*i++gWnuHNA;GUGj(`wA-pI8uVt+az85+5S>#6fsQ=~lld93B=B~GnJsU^w3*z6w z+I~*F`!votidQ^Mo||Mhg?VpDEKzghU$Jn$4(+xx1I8bQocLM=glLZ zMZF6qTuPVk;G4hChAD?9M;u}9J`}nw%#s_Xc_>$-! z{(9tS^2lFMbFw>3S|*xGt#rP*5mz_z!D4BNiPNmGBYF~~1os3>nM4cAU?v2zj$(nnL zwrUQZLLX>6{_=YN>eiEtZGW4;)-ERyvdbTZeKF?_{I1mCR>Q&Czjht`|ILpnshDOF@4fTrsa($}lxHAUOB9-Us zJ3Xy&jv{M{{+e<6)P|{qM!7J~IPLZ0uc)}dR=a-D3FYCOv>lf_WJdZ7d#tt{;lI}~ zyN>=mr%hMc-#K~C9_MizN3XnV(fzm&X|b6D+XVjzZO(nl?kB;k&wG{F@PWLA<}Hrg zA$GZfe+l?rxpx0%@F(s3g5S+r=(Wm}zc+(FZkMNY<#`mC+zHdtDvLJm)t+fBeHo}u zrhR$k;n(Tkjf|DXS#Kt3+~Uw}BmdKfap;57PuqMS+GYC(ICbzjb-U8;gDCqDNpSm{ z?fG65a(wX0>~)YypJsn&((ka_oJnu7+muP~+an!X;ihHsP*=rGro92~&QknB=&<%l zGXh!^Ug#TJ9F6-r}{p#B<@b8~JbhwfHRhkM58>#n$KL z!z_K~HPXECyU6doYf|#M%YXG&ka*en-(}~K>=c<0hyH)g*zd%rRbB<}k;OtyHIlo0 z$h?wgZ>0YWpLP1p<+JK5!)LiqN#R<`Js2p|JUGug54!ip+<6c;^T6fDt-riG)%vzg zKVr~5<-J^GPFl1kB2!+J@_wvUv(4qv5vPlq?)YFHLISz6B{gk zKd0BF;$PcbQ~KcM8rhEHzVSwWZ~FLwRMUSwjvo{9CF3W1X6qjxN>!BEHec;Q$v>#| z`wym?ctbr0Tk7PPZAYy;bYCin9aMHt*_x#flz!)a-dpIlH7l<59p0SB7f5if9kq1L z?Wwv~Y&h4B+BEcmRFJlL@l|ZTBUSoQHolJQZchoO_30fo@*DD_lPrkI|M3jtJEZ=Q z-;m;Sn5%_KLwAW8{>_{8*r{#)&As49yXH1?&t!c2iTHOB{2Tn>OH!V{9^DFDIxFVy z?+ufdiAR?uELr^!vTPh%ZY}FOxEr4Fi9K$+`-yL3A7;#?FO=R?pQ`xB<2CB%rf=Vo zI{5awR9)}QH3!e+&3ek!K4DW`eX5B6($jmqhp>Kis&3Banmh1)Tj=aj*8TjusiuSt z_Xu@S{?<$I1-ostdBfq+7f}4cB_% z%2bo{{)1cYO8VWD$2cflNZ(DS{Ef6L$(om+(ggZP^ZQ}?%jo*;{^~m9$>2lZO9klr zrZ4ZPDf`h~scVNq<7P2GsJreP+l?=z=5pOD%TwjxtDCZ;M)Os3xM@SJi3`qT50*If zZR~nGuJzh(xC-77Ro>F=-Ed8Be8FFMZIT=P18JC*PP#$0Qm zsXz5f?x^`IYxXhLgUQ#1fAB4vYu<3j1-RBO?%n(P4wK&3b#&V@0~GFd)n!PrswWYy+qqLGM|DkGPeAieg{Tlw2`|?G#279ad;E=!kr>} zQ5<@wzHeP~SE_<{PeMMgNJry;<^7&@eAQw+M83l`r|6v?>)yQSz!j}S#_qm7xsbe z_*Z!Sy1n=6l?cv}Rb;!_0&Ld(dgiV=U-MDy1iIf|_xk93ckFuM=gcL3Z}ZvzI>0$V z%Q~`7X~%BqJ{vcK^Ca4@x*ons`u_JkY5AlHhthT5mShFl^Hs)R2Y(CjM;#f9d-KdW zaQA)3>nZm+(%p9y^Q`^*Lm$1hKkuWr-j|M{`#Pua)7`Yv`DAY{<5$Hm&lh^7lHWXE z=&;VFg*aPYoU~@oT$(#)Ub#^ZvX)XI{3h zbn~giho$V#e`(okWn)vCg2};UH_pGPf9cry%f4|rYXtq@eo$*R$+W3;CE18u*i&tw zHy4rH^rERn%PwX=QxW=e@g!$|ldZp_qsxvuOdl^rmp1HguFE{kv$wIUirDvLpnEp( z-pIYdu2=Wkk1b!Ak8_x_7>9vQM;zN)AN#|lgIf)!g|s%6OdZ477iIbimwDM|HHJEp z&$}UW3~NtM^X_c_H!|O4Ptln9#mh|koP_=Mcy5Iq-=tW{VPh{^rhW`(Ow#qWpXdLD_IT*;Ui!Y0{^~y|x6an7b&lQh8XxznV-Ia0=Ka+h%K`-a2z{7p`Q^1MZ2KjgJr|e@-E4g_W z>0DOzEt~e*Qt)HN&u}*wgYZgqt-7y%oEsmWxMx!1QaxqwHIb8Z({j!ZPAS8VsvuV~ z|Jy3-F46xh*DWlx@0V`9$o{|nflKY@H{4#>zn^o0_v`%Cg{3K_m$Qy=cW|BRDY>=c znPK4iIOcgcHtrbw2bY|8BlCG9G59xTo((?g`jv?{XP)A}KJ!%n4VeeP#c}?dGjlyR zKDyAqJd?K9X8SAbd8ll)eKBjN`5oJI`n-$#OB3D~?z_0(eW!oTqrSy1SX<}^7Ocmp z{IrYl3o)f4AGT$KVZlqOCps>14iziqto`e|HufCv|J{R&QvUSw1_qVMwk@6Z9Hx9iy< zWM%2)Y2);+hW{f4c4y z<;-L7`f2Px9!HEwqI|Jw722s*e*XpRaX_brm(_NP{*>n$x@;p(AQw7T?u$b3Lb*Ch zp`~2D3qVhP;95r66$i%DRk5F(ypl^+ke}JRhO;8pu$CyiLI#-Njch<4WefVh?3{Us z9c|NUcIAK#%KicJzn+L|KK8~IgofAUk^>|wZv=DUGe@MY0UGIS~$bl;s09cxeQFzD5#`?vUBJVJ4;diJhG+wcR`uWQvm zj2z)ACm&uR_2xjYqQy$+Q?#gg4gYohqWPcx+McH(>@CGt2oCJ`pI`=^e7U|m?pdVu zk5%Z~nos4;j_OPgvS$<)>Pl} z-SNC$9qwcJhXlq*LEIiXj?g`6?0U? z*px%M3SRLI=-7ka)(F4ofMza*Zl&{`D zWiwBw*#Fv3rM1K&>l8Z=b|H1Dz8vyVi>@mv@6zV{y^k&mzZ##jWA2d_1`dQ{<5Nxh0BIRPj8`1+&kxHa@pLFDZOhYddBI{RS`NzaeDDG z(Z;2PMa!h4HNr0>m%c~YwdgHN!GGy!4OV65ySX>>%!$KM?wfjkNzk3@6Ta`EY!dp> zACO^1xzcaY$7rJ-n(+9g%joCOg{z0v!y9b3U4r$ne3+PO%yhR&S=E__(QeCFr) z#+Bx<58q5F%fBXdaoNKBrt5$mxq?zp;l1$vadbrKsfD2{mu&%t7e5f}7r!lP2=?bN zPL2Dg{0m+>_eSHt2bgB-ndUFfvt0eaM^2GW)`#77pb zjRd*0hj;RM4%IDPd*~7N)f$G*nBCK&?*(t_4T!fn^*VGe{+7ra2kYfIyg>d?eec%$ zs9o>0fqGBOUtT|N-hOO6@)f;g>sVQVnwGfzM~<0w>^HIPyEq@Nq>RBBDk;;Bt=LW( zll9F4V4{E7gWt1tgd)zda_l{~%q$yjCq7?C->#r8`Cr}giFj5&1kZE^o$yRJX4`eb zFZKl6_ytbofn)O*<~0>^meTaG!DYfP;h}KMuqx4a#9tOJ6Q0)RR%Bk!JLT6}%DYqe ziML30ie~jM-VmhR|DQPaN*u=?*iZZpzeF6H4~|`k&A*fN@E( zHy?fOFnF)rL55-U%l;}rx9i}UaI~B}DN)e{xkOo~>$UNoYXhFEYk$tZ4dwcl@04eB zex98#bhUW0!=n?vp?>!};gFx4pyk(i=!5C>ft*70;W?)do`gP_@jj4ak2ndZ59T-n z+==%gqg>yr59Cg!56YS8cl@6CxDvmie)qdrv4{M7!->tX!u@+qJOAEx^6zzyY+NKC zPy6Wld5z??6hEC#-4&ktd0%;fxgd8_#V}*$u>~c>3|<+>`+R* zoli5Ja>|`KLjUA8^7C)!P~v^X&*M16^G4=Bkxy)qI(Lxt6_IYe&zJb7?3|8FByIMQ+(4 zG-vxNdY#0hnWztb{hPvsNaHR8ixET@09PX8RcQ~00l^lye@X5;-UrGK*(L#6(ai*!b= z34NNzIr082^hxKZUy}0$&&-bZG3IsXQZ(64e(byV zkvC59WXQ5RxNcW2EOKDK#=HurP2knRyNjq-&#we;Y`XM1bSb?3$F?v;xwebe z97RLQJE%6oE3fSl?MFFNSbY-j%O&@vVkNb9sy4P$hs%?X;9K3~ee{Cx^zVjK*sonn z?${NKceCa`*?4*{|C&d|NY5uvtLBd92b7acbfDaZz;uRtMqVK@Tr@!d<_Y8IfG_Cz zW#3ZUj%?SsOSpGs#aO=4Gv!!Q-xJ}W9Bk3aoTd61IY&=-a*k?EHr2fG(W*}e^Jyzj zZyPYt_nE-Eg1%|p`m;E%xpfHqHd9ux(7dW#cf8y`@G;+w>^gcnYxtiPhoh4@6E&Wr zO**TgIPQr@gwyM7gK*l3O(%THcl#9|<4`y~6)*Qw;Phk4x@TBNQ-PD$$;m36 z*-%{PkUCBqfRlL+aLN#^DTdCaH3wF?1F(89-qtX;Pbb1^HgFlMI!xB|R{HEH%8kTpXPlL<@KFxo-Nq4xA{!u=N{|2Or3LGJgI1zYp*OvQ8mZTx$h7%X?|BR;Ey{DR_BH{1|gKPJ0+AQoP< zIT7AxaR!0%5$n7>W6XCmi)mjm?Z%Yq%J=|j4DMs}OY3dg7btxnxnt9u&giPmGsX!{hlqLYIvktn zW1UC7RIL#R2UW(;dJn$2rU}#&_VoNJ7d~2kd{iBBUOwYjt$)7zk>Fau9+AD`&-6|C z$H>wDs-Jh3@;I}$)OkAVk7jmV)c(gm0sKY#LvdnWTWrFz`*e1KzwArM+m|>$LAY?x zw2thi&7tMvzI=I zZ5JPvA71;7^T08k^IMSY@3QdKB)-%0T|6(Lem`dvs-E5N$Cl*v-};(`E>O@vx?5uh zUbby;2zy}nd&Esym6;dNwg+4S-+m=Ktl)rASH&2s8Gqa-T2_sH6=3}6gR9FK$0@$x zr$KPGXaqSZkHnY{H}~IlnYq*_{Z2mK6nsCT?OJ>)E7+rc89X=_de|S^xTx#kuH_ri z<1FRZXATnmLbBOqkK2BshBH6s7a9XhA2AKh7$zbO4m@<`sCd1VNn zJGl7=aOhrsV|ZW0zj8{-=ce^nFTC@4{!@%sUcDFJoNRfmLw2P^KN3&70UQ)B+X8GP zL#*C!y*8i!tmo!;U5D=vU*B4t$Ltw-<+4*KzlwMFu>O#Q-ymS0zg*L!xYFx@aUOh0 zdAc|AlYcQAepLv+vYvC!%XV~S^yHx9S5L{_1WlyWPLnJZZv<}AdAGnnqQiP-3tm2a z=hYv_X39t90Up}JD!5zd&pv!r7QT-sn2QwK7nf42cqpy8mZHZfpKeO~E^r~Gwgp=3 znr@w=`ir>sb5>v$dZPgjShg<=yBFN)^yUS!?w((Jh}erl%85_O-pM6?*;9FGzkF_X zuF}6aYy5@iKi~YL&awWhy)Sb9h@OwMwj=o>zG9cX-zmHKIA>y0HXvIZKjC!xDVy9! zA2e2bOpLelAI>=huD^T#xcWt`x0MM`YjR%5C<9I_OkYhsZO^B^Z1j5d5gVgsw_@<2 zJ-4n^@5N?5ZR0+A4SPj$Y1@wvp!`|~|6ibP)mvg(WmP$C8SFn;2u%H|E6EqUBS;)y z(FoQEK8ek8@E;uszrSq*5>F9LL3g{;!+CK=NIKeoq5DYBm45j*uB9uBFTcupw3ncJ z$+s0vLFd{#c;=z@5bFD=X{^;*bM2|fi$thXckT4j6s?XwE~T`kPNcz*!> zqa2+(oBIOFwX=?pBAo;r9)Wy0gR^tGxE{szD6WOW+B0}L_TVnAE8#8ggFolUCV=*g ziH^_p2+z(SpYfKO-Syttt@ey+~Qe{rtPDCC0LZ0d_GdB7aC z{}=GuU7XANmgvluCpmZE z`^c-Ou+hHHc{*&z$RHfcUZcbZSm z;W>kIc4GH zdYI3Qg!wG{z1rOuLAgN$5rtv{f7jCfC&$Bb-v4DhZqIRn?3N6xyn=P~DB z%;PlXF`IeJiO*xs(0MG4&tskTl*Q+6SsCUlV=-o;&Aj;ZM7;)_M0m zd<6I;(v%DRhxj*7{k?Oh&q3m^cY}YM{u!I0x~_EV!k=(kysk=M(82vL`Th|9zvlao zfwlY?@)!Jtv&#;8Ce*%woa&*DgXC&^A6z?>r88tN4Yyr*^%u0{|3|p} zbk34o5bQl8eqNBy3c873J~n9$yhP=%0N=DG1D}hC4nE*)pizt|um4XEzV96!`31b^ zedL0^`3G{m)*Q9}4Q%RfiWh;$szc|2jRdd1$vHvQ{VzROivR2*WTWa+yd1$81&Fcpn}g>7>0a%vhJVQ?_wlIuanaMQ z@S*9PufV+AL~OyK37VIS!!K~=Psu#Z&5-kcfSu0!S?R(L_&y2z1bg(4S2SPpTgx9+ z%UZ;)xW8JyUX5=L_@Z;BuIHWRa^i4f>;(GKhQA@poKUOvsvtaY$7aWG^-#R-v2lDm zL;R5UTjTE^h`)cG{s^~a_xwA2G71g|FD2u{p0pRNJ9S>4(QQ56_>~U&sC^v8^g(43 z{m#PKV+;Nr8oL@=`?v6?-?$3e!(VmhV*L8UY`;Ff82g-_e)I$P?1C3YMql;Ky>gxO z{U~xzXmukrZlTMnFV1>{{D_?Yi$7cI4J#7X z8{GaTt~c~ju7!0D)l&lAsy~9s#p)B^pGu#G`s~%$xA;ytbBKT0$?~tZtendEx7YT) zT^%1c-#_TitEMu)d>>)-iKYYX}=;KGUWFY`PN*jjy`yxQWWKJT@C2NLQlr9OQx zI3D}s#*BTGSKf^dVCmL3KB2xnyjL57y~gt^?)C4&MR0U*=ESw1&gs9k_xrjzCoEHT z%b-s+4?JsQEV(?FZH(@<4xQ^G+wSDKyFQ)6^&I&{4}(3y(knZsKX2#cQ|$j; zmGDFP6oxx`d?nv$pR4q&6Uu6zFZ)~*pIMv!@@jNy^dtEQ&Pr%Yv}!!JagoYOKURBn zf^EFKa!$!!N<7QkrtfDrj!XPT`&_fx$C~)8oM)ehQQT%iSJ4{7AdJvgm}~SI?KNKx zjA}g&jO?{`2S(TqC%`E0gfR+6Gi?~<4S|vN#AlyamOWYq`HWA1k@1s7Eg!Il3H$CW z`CsF0oeV}SxTjh1^4ZiSU+EZhfW&XwhrsAk8%A?ZfKd_8qAM$ZnPpgA*6zj)KF)Fh zE?vgct1@)<&lkO$@h{$K4_cNnrfxs*v+z%7{<8YA*q>dgz0dZutYcrU3$JJ(PV*a& zTH97-*!p^J)?DxPys|tRHuCFrk5f$=%R(96eD*K}9j=#?ee(CHZbq&N& z43@KYuXgI3pE*`*AV%Nlg_1|cDWk_)ef-Y6>@}5FU9#b_43E=a>|wz&tItb+bDaLl zMn0BY7yhzxWt~nQ2KATu>-EO_YdZbCIlRb+E~_%r_!UwntJzC`(aqG~`1`Ef+~mJHK5l}4F(j?M;Kg8nnK+@DWU?mLuo)^f4+R~n&x zYj|Z=PcHMH6&h{(+I;AoIn1T}Pf_+*m=lS~ zWXxIEzK6vR@nvZ)I`G>`*1hyc;`X?2pKgS5_ae)Zu*=wY8C0Ff=z9L)^E1(TmPx+s zp}cH9Gq)0d7vII7|0mD2ZmRn$_r%tVE_8i4*V=2Tu?#&&zJdNE#^LlG!oQ$3z4%(y zan`ErJQInq0LGFvCs=7N1+NCispsyqbjEjz#tQsY<}c49ZyC4p zI}2X7QXbuWnQ%jThmZ1M)`eP>GmTuxs|{mJes$HHQXl(6%&a#WsoOpi17E)IQ}^h4 zz1p*CO7;QE2A&D8b*;YezF+XxwLzH*{!a$)4&beQ$j4a+?*9xQk=Es)Z_)fuT-e8b zO#}OmiwE&A9h$(;EG>`h>=_}e`C zns>+QMb)EQz++!6V_Z^w*CUJeah8zoE#{`t;Sbx4eV6vn^s#<1hdf=t_u%f>de)rN zR7UrgI`i?IuK&urg}e)&ci*DQ?U!|CS_M6KO zFwnK2oW4qA_rzxDSu^iU=_>RWoPO?*%EtQ++@I6hmR%QR z{?7HW4)F&svdJGl{#EgY5%5V*2>$ene42_kaQT3In=SBxP2L}KHrv;F(uqmf#W@1f zcO+YmUGK!@)DtiCb>>U5A?*EHk96G5>CSnwl25WFjK2l@KfH0!mZ`>ex9ympJG$x~ za7;2~muv|91T*b3eK#$NUWyOn(%^GZYzSALjs1q?3~?FA6VbQYkHY8Lvx5D9p$*B< z*Xi3FyHCVD;QyTher&q`yIWnJvI$sj;a+_fPdN*i58z1QIiW}bPf@$#D{4<|Rq~sr zy5VIb9v$qL?pIT<{#7nH;oU^)mrY{RjBI0MwEFoN{Zv`WW61*d{ZHKIl#xu(^9IV< z_8HF}io;$wHdH2PevU1;jQN$EEyZT_+WhHO&HYM)xp(~7FYs-tEgMhEEQ2RWw=QMQ z^WjMa`1+2(i*lh$dv6cCs6}%GzaOA+_|VtrTL3;(10Rx3E*Vh+A9?{kqm(Y32``f0^S8TV>+xl#>HX=zjCi)R2t210o}=#?Y3m3)=LkG!pdOwz!aMAG;5(}4 zJiZ&~4?IV69`(k{kS{@H0z9+F0H0CJo&lecOlot=y<*NQw96&a53Oa2pIq^~K{=jH z|0TyO_;2CNUGbSu!6)ee17kv#%ZFj#t6lEJGY0y~eFI~bT;iG2CeNbOW9tCaDOvOu z_mWF$<5tSsIsn(pxfWfYjHfs{Kw+4cGe=|IS=hUq>M+rP%y+=jt?2uUw1$^U-Z%MH^YE!UW6ZY4xZWP4_QtRuMPq!){C|vb8e`1M z++mNAbDK^&M)t>mhmz4+7dvr`lroI<{AFalY`;#j-O_E($;goZKqh!#xtD9E?SI!8J_*+;B$AnJ{SBZH!qE?mrdHvwfON) z^kng4@mvdiId9?T<+60YS94MsBnuohUbiFXHuyvg1_UC&&)r z)lj*i9KCJqsT<(o$QSh^o#&DpyJfqGmjmB8ueW8M+H>W*iF^=`w{a9Xrg!4=Zk?)6 z{IQ<-`QPyS70iom8-Hemk8%#J{EeyDyVZBEBPR;>E_;||6BxUR{{p_^x2wqQuV=-~ ziFmX2y@&_9Ja;<%=}d20uDX;I2<3_Q1G(rwko8IcVU?+J6(w&Cb-g^M3L0qWP~39XGZTJlg+v zjazv$a}vhwyz}zT88>IrQK$Te+3dWP=qxS|*%0xopsR)lp(`UD+U-7q9HEjIEcA z**RVQf#5pHiO%WXu=WQCPhI)&S)8{@K71SB&KJrD@aJ6R^OSrz9e#Rx@VTSt@oGyl zAxeAMwCBo)v!El%2hox36M-hq{?TB0(a7J>(e(Yvc$Z0gN6W<5OB}ww(&6hiKc7zf z(}T~wme775<8b-8?5)WE7|kD?{~L0vo8(yckU!pwFMlw1ypMLzHHncXXQy>LdXoGe zz)pF6-Mt8kC9GBsPH3=Ex)f*Yv{1&z5z!Gj_bPcWzw`}XW&HBHPl($?(COl&o?l1V zTVR+=9iROD&btokKyGeRUdX)4b3(Z<*>REV6zu@Dv!#Cs$QC|n%Uv9s*$0i=-x#7|i-%qvg z2fv>;*u?ifxo&OayC!5Cj6r=txw z@GTBM#T%;4TIkK5D{{A+lvCY$r}|HrE8g34<@QhW6mO@QufSXIQ8glC>nS6- zn}ghC-C%7lxqQDUub}AKDSK0<>}c7*iUY2fJ&b>Qlm5vo*ucN~^4NdMMy<5Z1h72s z+I&YmPBxhOat8e{snfEM;pBPD<=#l(d9q8yL!@JA|C#uf{OYQE4RuS0RsFgb|Nj6# zu;6E&dWnPM*0(ZifzOxG9bV=?n0HfV1>-e%t}`S$^T==F<4gwmIgjc*T=Wv)pq$tJ zcV5I=YB9XN2%cZK?1S_FV6UsX`*G!`xa*%9-{4-~M15cDE#Kz-?CXZe;JqiV18rhY zkk;S##n<1fSbw+ux6GTnS5SN2N|0ZYl`UCD>!z&WbGbLhWgotusTGcoNoy@PJu>*b zjJAiat=Ra54J2QaWcF~rJ;b%UcCEM>8;0|D+kUW)CSz+dWh4vx%K4}4r~C`v(gCh? z+vELK`ee&{cg+>KAzk*;gfd-xD;X`FMs>=E_I~`Cd~ebl4#nGX`=s)+#fFZh|IX)# z$rwDx!8C$%_gKb^?a0FBqM2G_i?n;wBDekAXihiM%II77$KHoBHVrqnR}Kr#Z5TdS zR^`4)Ig4`E4dlF_Y}DJBk#3F;1cy=HADnAq&$@N1{B>^m%aP>+<dIG)P!)K5&+GmP~X#;QDH7uw}h%dBM2 z3FQUr^*8{L(}*mV*M)y96tDvz(p>Moy?MVVaU*mb5%1|O~&v(@xqscPr*OmH`LRuGEK|9h1>BoT-!)jW={yVjK)awUdQX`3Nr1JeO zo+KN-iO&b>rH)cFDdg7EP8_q^P~B=r^$wKv86ou{UbefQxKv`Znrb`B2g+wtUi~eo z&Yd%~FPqrYXx$Zj&@y+wX^q|era_FT=0dn<8P2oz;f)zz^vv-MC5QXIHH;Xh;o+Wg z3taFKH|Qsh*_6+Jn|%Jg%%ctOD<|9W%tyV+tOF)Rj(fe4k2!Dk6V?T_4sh24iqnaG z=;Nm`rtllVuZQogJ!zr5u}PuQ=Q!K+xEWgWxE1PHY=p!QqCYpoM}FZ6zZdg{*X=f% z+lW7t-1v-Gwnp#;{e5j7XPKV=x2m2;jFQ=X;O;fa!P(d%#07u{ebYlaBW&wh_IzT; zyM3^po)LdD4uuTAK(JF9zM zgoc{T;M{+Btw0mBxQ6HNgO9pDIMrCLn9G-^8siR{`L&f?H>C&XJjE{`+WaPXaj=uv zFL2MAwlAY_I5AM(iIGX>q(Bet%=wwotbTmJvx!N>r+pUN;ic_sh|y;f_oKp2<}1pE4W8*9)$akfkpd-U~3ESvdjx= zIO8nxVCu%NnBWjNvu)_QMIIfnx)n`*-=VlCh1Q znA&3t((O1}mmb~%?+<%Dkwf6U#d?S>S8ie70P`w3V1=#q3GydY;49ix>^X2twb8tp z|3fPc_(oPBME^R99o&Com(jeb*f?OCn-;zOhQ5JcP7bzS4lc1?)tZJMq*T|b!B&m$ z1zTZc*4~eIPI~fA5fcT(axsg@||DO zzqfp+=<^=hIsGp-%)5yfUD9!`#*oHYa#?{t78uRPfPWaeXYI=FtG>&O_uW3Liul+H z)_0X3L*tncuRC33XmbJiXrH0Y-Qb?)W;D-_QTOjCf8b4{c^!3kz(3Rvz5mT$`EE4% z17Bl~_9ih$W5az5#=)9Wx2{Ne#dC)Q4WApGcZ{kcT<;eus zBu_e~6QjYide#@Cm$Kf;ehu`mE|alG@fUQaIQQ~{h#wz++-P>zV6q~bp9}tca@^ZY z`Mw20KW2T+=vEoYU&D7_tDAR&`&TSqajy?N+lOql3XM>KX>|4M!UZ<{y9)-%4ypN*bbpMo3c39ZUi z-3}j>FR2B+h}fmBW{=fW$TR6m?ZX}VJ3@VuN6{hscsgu&q(f)L^j&@>eEMD|?jwtF z0iTFyuK&(w!=I$+d>0RM!1#dkC-kKqxF3bD9D}bswFMYT_9kD{`Z4d?h#^4V+Gezm z3|+O%Xx19FtHbT#c@EFpy%Qq;eyh>E8{c!gcYNd>=7cz#ZK9o}yq{*{#`wsaw?W&? zr{3wgPye02Yx(*zorN>_ zPR}&f*h<+pCOp;yuT3~xF=0;s3M>Urjoaa+CVGVt&>T_sn}5io;sqo9a?DB_jwNmdvk>gUZYjdR3<_W@ai|Kda6eg=Z-8b-(l2gPf8#; zIQNB7#thw8GS+}EIM>hp->7Hqdg`IgD%w$fxzv}lC%35`d=d}p7-cjwCxLo!r-gly zvv@{dTm9bp)?9wochvNx?3vY+&v*INwR4)Nrxd-+uYU1c?Fr7EjgF_bD`>lXD>@Fv`OUvO_#pbnSRZ|; zy`#3Llza7=J$p@lz9|5vXHvIa-%oaT&3QQ3TRNkE^~EzFWcjO4_i%AM$R_EH|gvXEEe{FWB>A_Gv`_8QY;b zIJV$Y;Bg6Xxfu9d1e}WD^{f?o9UV*Uu%fY~1lo&E)!BN%xlPP*7Gvm?kDK@Mb(51R zaGU0Bh;d`JH#Z)>Kp5QwE&e za+iZkyN=_hLVn)Fcy^JuM*4Ft<=JNvQe7j_q4n-E>JnZZf*uc}D<{_3)F0cS_>Bvg zmqZw+9gpq!PoArPo!%*d^<0aOsE*&`Ul9G`XIKI)>~GL>WWm$q9BN0m{a%XEd<3~t zz9+k@kT@{aE4Vzpw|uAO@DO~r1e>9p_=N{}-|j^p0T$5dI z3&!vYznD3ub`Nug4be^BgAjWPLpj)$S~uTFT~>Ny2JzZ6G`<(%8-m4)?57;Zd-Wj? zyiM$b;>z`GB>y(u@vk}arx-I{fRCv!W!NIx)4U&8s;tIc$R3h>;3?i%irp$$H;(r= zjqn9qH7A?tbMGhh?tYsRn(E|Bk#^6 z=xc%Qpqja9spK?ADyyq~m&3WtGka(9{pXw>)dwn;bIy8nL^{VannCA;=d!O2)-8|ar zq>aS7`yNZE8$RR6;`GSJ>@!yXRL_&p`x(?DIF+G^b|TOxOrfsIl1d8WW!D1retP0IihRtIKO9eTrWF< zy{GSZS7$$`oMmRDs9$f9Z{O|P8Te7qG>rCoXoHd5sj76*C`spLk0eo5b*fHG*Ri!y)r%|@u z)AQ@j$AaMk{8rV|jIaSt0QaX0fwgR@s$oX2XtoL*&~t39B~`@I#Q)Gv9p%`ME<9W4x8^`F)&EBiCBpUd zx^cP@?3GO-J-8uRx@tPI(-SF&7Yy4XeV4c&*+uv<+SIOL%&ljf@PHkPzf<|?;B^Z= zG`-imcI<(VfxGOL#!=&&jO1W1dmcm3$@YnqZM@?TO0kuWeDwhH*j~l)jzS;thPM3Ryzo!g zpCA4i+b`t9Km4G%RsR0Bf8^QvF8ge49tDmbNv>blh2Ht(Qj`24!CwD|2S3_cOilxy z*P5QtnmtBHaYs))abbAtGxNgiU!3L3VP9sBadz07wQ~*q2z$TIehI5ddRfcLYkPRl zH&c5w-yZ7B;lIV};IE$5fdgv)|AM_Q!oO#=E5_@XpN7W{&=Yoe)~>r zYYSP3Y-(BglbwyMfn+^^uFd`Vv|S1Qw0kEb57HwCzJt#N-A-*TQ2WRMWM(7`yoS<9 zCAx}Yxj(+T4EUA;=lQ_f*=H)<-i_^^!Z?4-IAvQTw)Lxowq|I2?1@&pC0x_Dfw4Zp zSliLPK0nqepBrl=KGyw1#v0W9-k2j5Exxy`0qpPp}Yo{uzoe*-9=U6|(R^jb9y62l0zVi?I(z zrj5(W3)Vr?OR~lW>-O;9VDr7SNZ@7gpY@|%o~g^*cqV`99J4VaIyu;TYmzlvd)WSQ ze_S8)Vb1^uwd3~TdfE}q^zfa&`LoBGt@wnjndokuwf1oQc{yqExU3~yzCCcn$7&C-8OhX>k(j)7xzo&;N$9KuURj7EIRR-UW0Eh7r**^Y;oCU z(bJRjqN9!0Tzv23IBGranK(;&Q3>y3)06W!gSs_?Z=&$3Rq=J%&A^s#yBp8}rPE-` zMpglP^zTRuZRfHUthLz^bav^9N2oX5oH$!>I6U2%k3-&xfeKH-8=rwsMv5`>5w&r8 zaaik2bKz~1xDUwIRSqQT2!K1~_}O!~zKAkcV=G(U%yY6PvF{$Ai1_M+@8QR1?Op4U zJ?KbZ(mj3w%eZIJkMWm%f}didIez~J>_^8ZKPhr5W!mxm(~n41LE)UNR%ai-b!Bj_ z6^qT#eWk{`1Y6Y3VKO;_(8LcqvFjdiBWmlM8OV^ykw&jEcl$=8`7LnX;q$V4CO|{! zfs2u~^?k7!8gDziKYVX&z5LO-{u$Rj_>ZM;e%-|%a6&$*jWx680J~XV1iNhf8mxob z`uPIBb?II8uBG0|(7jEg-tr#Bic4oJ0H(rC*k=o!dMT4c*<|Q*81fnae_TGx7BS$jqD#@I(K9tvm|ed}eyvaNwe%okC7aQG zS}#LKFpgggYJ%y%Uww?Dmn~YHM4d4j$1gt~WW+UV)#3rLZ?0M^|gn#%hmT%pm`3`8l9{+eQ zeG<&TjqRdA;nRJO8qM+nTnJu7>Hj_(=bg1e>4f#r;%wmz{7!h1SPwXIQr%NL#?}E? z&r}>yUqT#_`u`MdDlbvF4P(a#Qxf-E90Mjp{VA@0rU;v`G47)|l2FcB8zjFnzLox! zgZ#+{KXB)${d7;BVHj&Wz#(JptWd{vBQ*R!jpn1QQy&F~R*=VqxYi|o$OU9@tHHP8 zDWZdu@e~_pkmZi=+TkPl19yOCa6F3#LB5Xc7J#-AEU~r#Gk7lM|j>vc|B*$p$O&e z_WM*%$_e$TUHL>3VI!Z2`c=ZXT%U+rPcr32kK$vR?=OMx%*!i7?L23$2Yn%{Up-N_ zNQUgjhho%U5NcxWS2&rvuc&niIG?u9)9SwaGVAw}0g3VjUWojU$4wU7boGkt@{UwT zev7um`}2RzStPV^%RLv^Jf)6yw$o1i%SEl5_Z!WYWHr2|@nuhIy4s1anNQ_AjcKU7 z*bH36H|(|Yy$(NK!nhp2j>$RhS||6Sk0jY~0F24*7d#ky2)&#Bsc+fn#YTM+zJH-`TcbWrt<0Vh4WO4(07jyo7R_GpN@9D%* zxcz#Wez~xD<5wq#O@|Ge6VJ1E{Xi*#k!@Rz3HLR@3%yC9L!+(mTN8ZYce4B^_3sJ( zHm%3~d+&KYAh)@t3snmH_#mp&<^@BcvA;pvSVa?@9B zK;LOC`sw+hKWqY5AM=EMO1*DCNWJxvA?Y^0XK>xiyU+0BdiRVo7dW( z?X{W?drjL9fW04SAMV+@=m+#2ox(EGQw|nocXh5n<~OX$@Uh+>Fef#Y5_?;oNBlnX zYGFejq5d*(xU4#FPCKwnqg)5iRG)k(-KW^~{aRzpHFIZ0zfs@X$$R`FYjb8Kh3@fK zWz+Eo2bs&B8wclVgy^8a(e2aXbTAaR8nFGq74(aX(djQjr+3!w9bdyAY+p>3C)n!S zY%VOAYSgvSXBTGz=n&{hHI+QeYG8d)HWB?#q{~9)xLtmBo+pm!S;kb~8~c^URMIA{Yv^5Z`kWi@5rAM6=P|5yj_-i*y4{^*m>`nyK>+vb?r z$1H2sonH@z(|pEk;#ngr{6_Dll}_y9ufXY`uV)tVIccnozapQ3;#uC~Cm&qxs#*4# zG!K}*z*g{Qq&YHj`AF9ElUa|hshP9uxLMaioB!yGjrmsfvN$CthPaczQtZ-{(HsXWxf(-^u-Z_+(z8Ur+LXnVH_SMr)^= zotSUM6g)}$y6>21EKz?g#bxvDn|wRsvzX#{>@sWcN$9=qW#75iifvS^@+bV3d@_92 z?Z7iPcIRtR%h++gcXE>pqh*2-xoX;xu{GBk&7YiYG|Rr!{dc&Z$R6fR#b4h!G2NK) z(mv_^U*BodP|szFV<~>->8v;KFS&CVKNxmdFaD(75dKHWS@=qK`!kIj z9zm{YefK%=(4haiUd6TaV#jvP3S2dlx`}6+y)tey5({TA7dyat`LC|xUu)j#&(GB_ z`uI5ivM2Ox3)eZePkdrvG2=+A`@(qL*+c5)K)u<*tC`g8%EDCeR&uIkh%cj(T(FK^ ze<}BOez~v!qq8KsBeZzmW;ST&n<=!b> zUid=e@UJyZ$-~w?diQI>`vt&F>w#Jaj1mWvezy^B`2x73>-V0kHpow{Ha^3bHk0@e z*%hC9C!^bqk6a7P#p`u_TyZT!=HS>2XAa)D=fpX{_pl>KKTY79NuSi8JH8G*ems2E z1jfRmBfdbRJ9bB2Pd)ZV4YXZ=?=FfyS;4sSdsnQxY~-Y-Tr;?CO{H<*ePTm4-o2}3 ztIz1(!yJ>xrE53*aej_*z|VIDx99et8%IR{CEiKws~WWMwt z;otelS?NtV+!ufg!ddZ2$ulp%oR--kqa-O*v}tZ==_^*~E#&vx=w0ssi+7=!8D9q0 zeEUaaYNEeH^4dtUV-498LhK7Xi?!;A_?6ZNjN7VuI_MAY!it~UNX+JR`u$%#6K*By zxb7NN?o{k`>`N0}d-3-N`L6rk;=SHoc9M6EQ=E4fpX6N|?`$6i-_ASX{j=aJYd{I{ zp1aNneuTZfpN|^gMK>^SdWh!LLXSk@2C7$4VyBqZL%|#DzkoFL4(#D zHonQcK3(Aterk+q+%SSM9A>^z4k>F4}DV}D5; zMt#KQy7qm{E15PD7>r_0Mx)DnL+)C1qD|_`HXpDkH+)^PQKjFv;CtB%yhaa!iTv>& zGj8dC(g|e~EQPOVJwt1@Q-MLJw|?CbFE-wE^MDWg_Q}1hW%H~Q`?j_LTkhuxAO$1k6 z7_%lV`#(|)d@F$CA^hzc3o(b#N>=1=#5?>f0j}jk(vGvnb!-8?5#X-+(~?-X zAybdm40c1G9kx$BEwcF$dmmnx_!PcMbVp<6F2B*eM*Nt#H~Hoj!)SfTUNEkuzkR>K zp&9u~MH^$8=Utwp?)|{`e%>|W|8w8TS7^)6n99_{hoW- ztg=Io+v``(KDb!1_J54Sr(M2@s{x|(fF?67lyM<@}2?5 zmQnc7SYvBo{wsl{0iSg3?8a%#p>!p0=1$5HAB1hlH-@=;o|(LRp2tVtw@LUcij7|7 zknfnxK2ma)%gq4$aL(ZA%FV*WPKKX)xyI?#wrvA}t`;h#uamCqLF8|32xQHsG%kPdI*yU_Hna)b4lU?VrJa zBl|F|hB={QS9|l+f7bDOJBpcC#-X-0GlmLu57Fg2@ND#{-YGw`>3N_(iW%u=J;jO1 zPl(0eYY4CHdC^>G9;~!rZ(4S+b-a(fA3R%3AB58$Xv>@3h%Y5A5SwD}LqNAl3rzc? z(Y)&(^a=1Lx)Hi1KCjZj*T`;9!5d$M&#q0UH5&RJabvMxQ$P0(`Z2#KcFFCx8^h%G zKMD={h(9fW-x$OQHyCHlMo*Eg8abW0j>df;mgK=w$JZGFo}J0~I`<$e9=kA1nQ;;P zoYnXXa`7=1#C?qKF)qb79ia}LaWxHJP#a~mZzPO=Fc9|->bd-YKEBNw8XJ}12jsmS z8zq0D;-VVIJ9Bylb9yqkHPC+lor8YBMqniwkwW|Nb^NpeIt4#mI-N*;t~_?%zwJ#z zz6|bVGikq__9bgBWZhlm{c8qw0Ik{R8_~JS@@-%@@N?tkt&sM|Ndb9@M zpTg#q*pJ}1*gxd4OETu)QDUa}t+5-wqId>JJ~_5Gu>CMEXC5&_>`yfB*$`m9HlbfC zE-dE)W1N*fejEW>Gg9h*#NNV`+795JYxq-+toYG5)AXlcgSL(X=Bp{A>*xw(kAF~K zb79~&^u2SAX>RkbMOL1fB3#eq8@G&bQ`e7qj5Ns&_xVQd9iHl}9Tah|eoIftJKO2| zbowrxL3K)BAaPgPN&LAsecNL$oEKW5{lUQKTyy0H`An)C#I)e8JeLH11xFQf37ME%B8ywwn2Zae;2#kQ8MlCUq<@ja-eBaJ~ z-^4P)&&2yO=(ZNRtpyK>4`*+OcVYQH_WKhfhW}_=tvNSTb4|@o;{ChPS(fB5j^8tn ze(E0DzWQ5oS8$!$HO!R#iS3k3HByh*Ji+g0-t?_~Rsk@#*MB#G=a%8D*R^Api$|!8 z!z=J#f9oXs*WhQd;xPx9E1Rw~Hy^_niC>C{u3Vj(cmO*t;yl*>tnB(lw_{gDrxSa? zKl{k-Se^64bN*RGj*m%QowGM)#723#ql{PgTliNU9rzO}lZ+k3%#ZFrqYik^@@ee3 zhW6#hLAH<p_x^hGU(Yd@Hn>Vl-uWCwI;FPG~yft=}+_HFeHg-d{j}=X(A7KjA*z zNN>8GJ_y%`_Ty9dOdIo|zUcZot_#7vx%?*Rf4JfuC&G6oIPIffS}45eZQa$P5&A%x z?j7jbrfD?M*Iwm``^01Hp9oFOX0OmF@fY=(@JqX=>n|V|H&|t6l(NKMv@t`lK6aP5o^SqeL(_CGMzIPfHL;rs02d6~0=4&q0bE^hM@;Qu7~Ql1)Tgnm#&{ghoZ zjA!ITVIOVx8b8ZJ7p^{+yxA!M?85&aHn9F_`HI- zPK3vN;N`-j1=*VjkF5jze$4>C&t<%V!y2>AfdhPf7ks@^V*&;*8V{fFT(U@GbzveO zw#K)HI1r6xBRM7vzWrswc!Ae;!Oa6rDwa(&h>p@K+$%Dy{VrZA=Gx6CGeox8{8DkdgDjts3MJe6r35 z-#>{w3MAHBiaaVs9+lFs)zEYvxL5!#mH^k)@PSh3e*QJpJIhBH-O%-t7myL9cljrm zPBIn-ILp#cTY;qD+%o3oCUCX3dR0o3nG#q5uKK8>A_0yDx@uzEd2tzqJ>gehhsr2? zRSV}&@&)#R$9f+eBEte!aIVUfgTvz2iDfH>mK`xzwhEm|WmRYWkaDh#fS+=EK_6=~ zjM+4dxlZ5q*z0bIysnXXDBz43;ePI2!xsC-pbIBwoRgPkoNMu&FUgqe^xOG1_pagO zL>yLE0iMhL&b=#Tan>@UZY8*$Tar31Yc4Umx()^xDsQbtj)vU3#@Y4ysXImWkDPK& z;Gz-d=ASX_T(=!R?W9<=H-ddY_L!XZj1%FH{^Isy#6X>vZ}K^TA$73^J+~sE9>;z@ ziB3>#+tF*~b8&rQjKRA1Uvm(9Tl^Z@q7Mk8*YeA~f9X$7; z4YCI|K@XCP^6AUxf2-g-8oxgCP~e@=br?8n?a%QkO^Br8JKw=xq`BS+`z`8)c#Wa^{9EKVj>E*Ic>@^8XDfM*PqF(ja_Wcp_qC@3TuIaQ=JCcH;&~6efsBcb zGP;SY?A=HF&mQx?sUgCg)%!l=f`M-#j9{lF>`wEYPe;=dMLR*2Q%!T$@bl_XNo?kFizW4Fy zwCMxB_fLOpH2;x0Wg{Ph*URtauE{7LhPx)??1yE&#n#hK%!zRTdikF_=!|d8L#y zOmlu5pOf}_2-g*_aLYri|H0oZ)(Q4$4+6ga$;j{+v6=?5@&(}ZJ8)ZT?C!G^*6LS$ z;_Ysmz&cvGqe~NK5crd4a#QDCGgNL)u2sxl1iULF-ry#0%Kn!;DNTCz6guBQ+5pdF zBahX-c_v}EGnS0tU4Z;+E2y(Zdn&vmBI2hnrV@)Z+vwdf z*4Vxiy8kQxpEB^ieH8sMl_*=bf3{*@nGk<2Kj_GW@x|dKj>p;IeXX9 zNk81{4Rz-8j`3md&n-iDTVZTl<4}6-#KooQx2>a0#3BwTv5di36&sO9-iL65uPL{`{J|+TEw?>iebXGP z_O3bDAGPqPB^GPX9j0f$VNPl4O!0)f&ox@#+vVl#YxK6$jouAw9pCqRyNY=iY*lVH zja%c>xWtq6Y@;`=b}R5+vG;fz?$Ib<|DZtTp#}o9wj{aBM{jWq0bexW!>75qoCcoSf z_PUs+*~HpQZ)cMkR{srRRjV085dEV27o6AhOkvn<<7{N7f1WX}arTchrkEpA)Cbw< z;7oUve~oWGHegBL@YxIJ85}Fb8LaG8P&u~_mC-wm4L#Y`pR{Hr9Qc&^mfUf!({0;U z*NyCBsm8XxZSRcz1$%P4to09XP)thJp4nZ}B}EhBy+h+9_Smsf*1xoOE3gr?Cyf0$ zkzs!@ns3ARBD}*E4Pf7f%Iv%iDJj?mW3#+gS8Yw+9OY#!5x(F?AYd>Y%{Ihz~{{A*1oA6u^ldWrVNwwDld6eWjMx!2cq zU2km3;#fg=AFy(9-%DP;?;RUa%nv@3Bqq;69r^9HEr%ZOm2jrpXw9i=_)zn2lUq+wUWqP~1 zX%D@5$&=*DRNdX!oT~RsWAy%A)S(66%!51DiFg4AuJzl!@%oSI=Ir)ue^@T_~_SpjT8?d4Lghj@$t@HV@ouW3!z zBHw~=L$cmS-wSwm5970+15-U0j{m=Wa0O=>bv1mavYJ~9+eSJixg(b&Pmrlu$mkRG z*uDhcPm4czdw5jvc5B|4#Q}ak-|nwm%`fWlrO@BH5_mj%0lo_MH=V;7ZTMV|us-mF z$KO;t3I7OkS8Yr0V~wCo{%Cx!%O6Nyv%xY)HqD2(js!NN%=G>EPr7R86EqWg3fmNT z+dh~U`mgsRnFEy}f1}DAP?>|t-a2^vLJPW(ycq>gKgbzH72K=6Ft%PBbM=>=*o-;I z?N-mkrt>Bn2hOKoniD_umYWj?+6_==DfN}(qfonguXQq&aobgya@y5ix{~9xOP~F; zt8tdoc0JFs%#5y8d{6GV3A(n95l6(htBI+CXE!yn@7n}SYfUmJa>o4%yQZ%@5w*$^N)1zj3?PVU+u$4*9Sd z-)hdy^FohtKQzBG`&#DOvDlxpHJ%!FP_-@D4lSR-QZ0KGZYpg~`v}(=&1)CqL`aGx8ia^R4Q7 zlmBW@cBkYV#;5D&7RJ%EVp<4a%}ITIRds$wU;n6gefs*6p7rVLZ}qHCU)%L8sjn~c z|8#wI=v?35)8z@2?O*zbfq!0hM{ERJzyDNyt(jIod)KGt+SS;KKe*5ZtcOQZiLHtL zly&IJn3X$?ea1gEYCFIaTem-1FNr)M-krI}OFhQ5-nD)88uNGLhT%L-^Ma`%>^K+b zTxH3U$mJ7Fa5ohi!Wl=Cpj8)?*m6R|Z_yQU4-s>G-=o5v&(Rf1viCK}?}eR!_%cgJ zkY3PrrtAdDF>IeZI)*&m*I{>$K3qSHa{#NiV<(>1FWB}gXj9y8ENU1fnSiyQ|5|pq zUO8dC)g3v;+%HuW`(Ei^aFNP1XWDU#LCU#^=N`klKRi`&U7JcCB_uBg1jV|`2ecN?yZZS4F zbQB-$73A;A4x`8UjOIJkH-qQ#RJ%{}@OeK9A7PHk(VL5Woa-l^*sr|=33<(b&@)mr z)BmR-Mw9w#WT1!A-_C=wPbd~Y%X^W_t76|n@9071Zh5F;ziz}@No_8&=evNKq zZndj5d?$M!UPo6Ge}fsPXEPs5_+@{-AKys&#lYf0du$Pvt08Aac?vQdzg?8+;!Gmd zkNk)|EH|<5%?5Fihp z>OqE%+&#t!6^`|-eF69>PI;6ky|H1TQCr~&*1nlSZ2v7rZT|8{(_$vwbEU@E+gh+Ou`-GU2I9rUl9Cp@Hu!Ie10)(G%X?TO+I^4ms;j?63t+9M~7dZ=uS^TF@EcX(N|782)CE1^= zr_5ta=;K*?e{;gVc*;Ja681_a=!RnP6qlF4>)6M{Vf_l49|ezt+b)m2Mm-l%=SN5C zHY*1EkNgjAOR?}>_SF%{$A4LB?3=)RyvhH6cx>F({#{}_S$kR7PZUc#QnGndZiBJt zW9rF6E-R&ed=JDLty?3Ug-$g^siTVaB)>3UZGwg7@uZjuV9{4TO}4X!H1-nZjPl@z z?nlgfe`1LT!0S?oNi?lJ`p)>v3v66m!Q8U0@j<7+D+aKh`bUmpcahB{k*giO(}>*Q zTAp1L#J+)y6ptHLZX|L+#14?Z=ye0#p1FLTxm+*VVyqD|h*Pp~Q+PMbmYrS)|FEIj z?_Cc)x0Lb9SN+W$#AK%#!|KwEJG|hfj~p7GK${vo-aXpGZ27j4k*fHwc&TyojnEXH zM+O@)@@B`T^v5<$T%YPX;29MOc+6ROjOj+|RlMH<-@`b2EAAN?=ZLd?5O8?9Jb zKm5shMog0R{;-?*`l4}x0lb@)yKA6vfpD~x{#GLERM1E1Q9%I2Cf^CkHB&DsV#x$c{?wUok?AjsOuFA&od$!{LkB&Lf$ib??@JS z-U^=A4ltU)aqwAqo{nrb96XoJ)5dcL$3KF;2f^_I>Q2V-D!%{k;CNq~^M68jKVLQ& zd0aLi2Ekh2z2LLP{ZT?U>+rw>8ONoJTXxrj(61P{uX73{M{EP1znEt2o$xC+Mfo^n z1S&W ziJ9QP?3$T_avB?00~qq*i&^ZO>KBr)WgcrJaOlDPnc#H;_`fuhoN&yOQFU+ct8RB= z({`h2InPsB6CdA!KJ7{EDR7NwH2F?>Pm9sJ3*gz<+G7pivS1||sQTnrse2TDh&|>@ zjIV)T(L=?@BHu)+c^5M#>)Lt`D2SR2xj zRYvPoztZ)96FD}Gn6+;D@CmKYc*)4mi?1pHpFVU& z^qGG8=?5?(_X2W(<8${=$G^%qoV-$6PwI1lNzOWCRu?!OwdQ$xtN*Ui!^)RH^LggN zFCg1gOD20RGlDC35B4@r@CIXxFTlseyQ*jKW0JQMoSD0LbTGDxaeRw2;liBXqW5Y2 zgSF@mwepLL9+wR=hL!~$S}B>*@4{|02-_<7s<9Uj^;oia2XwyzADZkn7TseWtUjVo zs~-)|7;P=t{0`$6dCR1~+VAN?R_q0qCN}1efR8zzm`3_%((hgDcl6Tl<)e)l`x%vI zJhtl`W9vMfUx(~+1HYBvslJZ^2lFwbw{jTYlIy78;o?@SjrqtroM)#s*>IMz}9hG#_r1ARXwS&4*nY)Ys^*O>)^FZ^2zV z)fkszl4A|if!T%b(ES_GBajtSfmcBvxFCNd;4;#J%kuZ2rw&{O*>J%gHl(`d1YA1l zLnC<60xtM~djz(K|sVMnRMwwA}<_qv#8(Gq^g5ld2I`GJVd z9Bjp^hUp0NV#jMJ>= zqpUMSoadE?$T61mTxEALk7p;pH3%6N*cC%-SKMgjPKrndcIw>qPJHf| z)0KTn@kYT|a@oqaZQJ)L^(K|I+J7rP7ksT<2YfY`%3XcRH%WcbHx1o+7! z*YJ%+?|@AuKC)f#^iuYb{uy4f3!26~{D$=h-u#(0awcPlF2r63ovDA$70LxKM4NNL zi~0OUZy{G2xTu)>c3@%Y<$4Bugj<5ox`&K5jkgoJ_q~;@ua}J4Z_uCb4YO%$lrx4T zXWj+PQ@il|y?t$y|B9@91>b+JytwRa@#Vd;CG~m+ATRbu7PRF>a+O(o1s%V0wh^qA zEGS-8p2j%%l}_a0cgN^pU}ttP@EdG+zs0tP-SF{U*c$SGJS5byEF~1Z9oq7Bqe;2h zhUXev$=i@#^i5-tn{S(K-FI(a|6Kk|WSV7Xf38d;n^QZsi~N_-)8Pjlv{SsCy-3;Q z$iF!l*nJ~*x9fv}J+p&>m#+#21`G=`n|Mz9&(Z4z^T)_PwF5`Zl5i8~XkI(_~ zoy97--+PJ5K=ZiYcQN;~(37}tzli(G!42-WU&wt4_7v`Se1ZES=I_%>_eZA~u>yW~ z`gt~)XP@v4e|EvZ;L6{e!?W{gkNfC2etGxXF+4k$cJ*v5zba2Y*9QH^Uemz;bLqQm zI4wNux(Pc6_NNw=JA?Ltw>jyYkhiL;a1Q0}g$Dr^z3rtcB*bE9A34! zFum%|!ohl05{zY6-B*}Xb${Wgss{?s(6gdoY;@H_g&9?M6^^L7w{VD_O$o-bs_rQq zSyfxuy*4E|E}FMgF}LL;qNo66mLAV}r3bSEU4cHl_p)Z%zsH z*E4cJzV$*%;O#vrfus9V0)pE+e@h8`axf+EZcj>}YXfk@&h+UADS`KnrUVY9nO2zt z6U@ND5;O3|EHluxmNMAe{yxWk*R_%V*q^#LBI{9Kca0h72KR$5Viz{^4!d0UX5D zz0UO}*SlOFbH%w*sP7D}EUsLxbGgpvn#@(ibvf77T=Tffxo+XQo$DU1Ix{e%(P;V` za^foD%sF=>eFy7lC+q1qtf$|yo-8;g*7x48*=2`klLKI+F)_Z(6S#MnF`1lzj~ne@ z40Y}_LkGqV3VD`bdz%AIBbSBnQ*`9W?ZMb5=md%l6aDPu|B-_OL%|8Qy|Dn>nq^yN zjd$=|>wGNFI!ewAxq^}tM@3rZkxw8MUcYc)XgGSDNsi+vxGQ^v;&i-<<3Hv(giaSY z58j>p?Gt?4fsCPN^N|r=#7|TK%`c@L)4+yU@|8d(ddFejoA~=AD~sNnXCvoj7>hPQ z&$VB-0Gqz#aFy$zAD#RXkL4%cEbu1!KkE@*2S}UuE*jP*cp*shDW>&2z<@D2&@RbrjJ528@kWYNPE7Cjey=MjseT+de3NgaaZBJ-7?RNm*DYMB5hwP-a z7hP5oh9#r=$?7l=E&!4sSu z&HVGB9y;Q0FlZ|-C+qqOhB)~|e^6HT_3jSa&YA`cHBT zj}kitPRwlqrwaHlc#Z(K^jpT-$_-BL*)bG+=f8C5D!l`KZT}J9^1c6vPi!9$tUWr& zXv$)Lo#6C?L8iT)%%L`JCDzle%uOEqBgCgWSZ6L`ePx>>w#t9tdg^3de8}?~`sYpl zfc1C-{a%b*FS#VS{A-l0_p%Nst95V$J{n{lj9?8M=2?T=&iN|*SN@24U}dcV@}Fp& zovh9277nCG*1exwc<(aJaaNCP_#Nwtv4^Gi)N}9P`(wZWJO8AmZv5`wG7d%1XMFVO zcpvXO=afD^Lmw-U(VRZ+`@iht)&HcAUr+8M@gPsrN6nw;ncG?$nUOoM)!yE0zi_n@ zAKwSOmt8a&UnV5sOZP(weEHutzQoVL$Mb1!;l~Ml5iaZi7jlD>66eN^wAOtmV-WxhBJ(_YqS0g9Za^%D8{209FHQyVJ~41j2Y@@trrtJ-|)zhkL)vs6S~vuW~?0 zhh#lANxqGy*!F(Wg$nn&H9pEJH}87&6~1cO`ga=EKD6&MZaWXJI~+qkIw=?LG9yIW z&q}7<&3;J5qv*_4we90<&d#xhg{QK2$~O8Dc2W63WY3Z9vx`3MUTWK%L{m&?e+;{) zNuLYSf}3T-4AZ9|Yvdy4ISB2OKK4Ai=63DB{LggOb5`UQ;U?c&&!XpA&lYG7!4cWY zwYOUQL3_1!P=|QO7@iYzVc86UwP>mTDKFpPZ>8;(yYN|mk26a$&_^23O^wtf(!O+) z2G*}}!SZb8@fX+ zUmqO6|MaipK*qm_1Da=hzoEy%0pSDl?bnz!=i~ve?Z`4}mD@Xxt!#;G2C2~8RNEgD zz}9VAej@A{uFM|RNh}O+(frGXePYg`x9DuPPaqRJ>hQ>a5}Vvi%>4-*IJL|lDKnLN zl+WfwT_Yl|iO-*eKO-W4;#oxkf6mw3F<;o%_jCy_nC~AjuglRuFas4*Z^E)BH@?RU+rGZY(|o@3{iM`s-uzdgg1b0BaBBg@eMo{|d+U|y zLwl=`i!2|f_|>z}HOboV-yUD>1s+9LV$WjUp+%A1w8OkxeqGTx-Jfp$RbKwx57F0@ zObiNZ^LF3W5G?tgwLo-Y0-&pxrbNw61X)GCY zdi|)-L7oc^!o>y9)@J34CYB3*ugxURGVj`g#^JOn{wX{9N4$F#pN?WCs)l7YPGl`f zSIIcr2(>&{-g=nzX8Qy)BGCb^)~DVtFFeXw5H0+&pJ>rmd`vnUbsc+jmS7VWz1O}I z{i>e08@i)95_#KysJ_$IM=u&pKLVZwgDgEn{?gOZWWoRN)zUu}4)lp2BoH zC+KeE(%0O<+9GVZ!riXH#!w&j@K>>gTjdiz39a||_0~CWa|P>U-f`rX68huhvbFh# z8Hc`j*jM_CA7qSYgSoSF*``Ofehb}^^ZTIH zS(WE-W~j?sBN|shebN;Qz!lvqhB+0wS5TWf;W~Ii1ASPJEcZAt7M@h%8>k@1<6Z3S z?ShYShDB&8u|bpYk1nTdrYpR*{Tw6opS=I`7$a~8I8vD!Y^&nC$VU&6M=#eOb(?8E zaL^n|Ognf%d|_KbO?B%o?tL}>)}PZy1Knj4zs`A1R=(ezCuAPsws6;356Nr%+r&aC zc1tv%!(*>s>6Gp80j>M_=Y&`n>HcYdch>xg96eeif&qCFvlHuBcF9w4a=o>V9iP2D zw|?PaQr-qfE)WgTnswyB71S4H@0Z|LhWsYpWRQ=fj56iKGcBR+#V3|u1GPZOk_-RQUxbe^*9=+9@fw-vjA zmzj3@??rFTI$lqfRnPe2^gcTzpO@Edqp4}I&&PO7IFAA#y|>hN(MQ%34?MF!*=i0PuO8XW)9E zfqTUa-<@g%x+tSMHIL35I&(LlcRK&>qj@Fds)XAH`Sce|I0(>OuvTQzmoe|hp$vX`}s}wrQ5lk zQp2&Eq~{C-euLnDI;YNkoc}4Gp}j}c)^l=+ZxSEu0OmSJ%Z1Nq1M~i4Vt0R%8VvlD zm|XQm@mgwMZT^zB(a~!b5GVR|Z~>d!-u%68+pdl;wgz3f1-c)B{%^#$OwNMZO~Zn< z&t&2k#SX500cBh@T#s;V;`#%Zhdlz9a(#{KJ6u2G+QmiRCPuKOZyX)0RbQrGdREAP z=7r{=kNtu@3DTJqC!%=K>%q4e<+dJ~6heke&-09E6kq6k7}=Gx=Q5xD zeslqZv6Xz1mOZcw_`+v|x33fHRiiaPoqOC_X~y=+jRmf> zS!I_$%>Hg;s4I9;W9nJPy>58Iboh??_#fYqu0PSrU&0w6)?EDK+eVXOrhb$dcLVrk z8dhA~DfBj<_G}s8DsbaU;-;Uw_gGNlqTp#<%P!F<=~Q!|3yA@0^T=*S68q$%(DmPTnSvO zc$db#-dD0N!)JdtTlP7{>^|~Ed`R1&v;3|ot17I7rj$c><>!3>-_UZ}(w+m+d)ZcV zx!?W;eEWQx1^Syf7>*ro| zui$xOS#{ydj8WtAerf!@M+^ZEDVm|MX8zMb>8ScDu7?xn{67YIEoKesj?tlYLBi0p^r-@c?w@ z^=xC&%Zw#TJEjZU3(v7}97_A`x;1sh#-VOcmVYTcQv2s*gHio?#8bR>X7A)T8FwAH zRD32pDihic>^>gVJ6X7*SU=_V)BL5)tS+pAf2nLIaqnia{{#%+jio%(ULFSq1;iQr zJ21F)qz!{5tPSWzI)u08R3QE z5QP`1tPP81fENwOio|6FYP8Pze^Q(0*=wUwYlC~y0j-TaXFs~eSsPD-*BYxKMj$P*D{rt_u={xdWe5Yc@Q{bB>{L>3xOvSd~l5F+3Y{=SA zBYmo~#MV_L_mdC7vYFu9P~MS6jr4UJ@n?C=X$A2JDl0!njB^@fKQi{sI3|6p!fhP8 zf-$ehcBp#RL+hk-RA6&l?atg2|I|1%P~%=G{Z#rzR-~8vH>u|%esgOk7e2)vjUaSb z@6(WR)&53cx=r!r{L;>_3A7=7@&bONIY)M!0 zt9?IR_=5eeU|YEt-UU4|;SaBq&)vsazU%{;%Uq@Zmn)C+mU1KFl`21u@}fr-Q|*Pfju>lt&tFbonSR37Epi|{F-=XmW z*V*OB@@4F^2zrcT&BRsy9vPxy3VHW=7q80*$2S|XnaK49IgBL(IN!}IarFjE>ZcmS z((UCNU{ce}csnP!Ec@W^p0xK~_0aaqv>jcT5qhN99qKM|h0^m`-^2~nt4{hEC9bBF zeHuL*h<(H_P{2Fc?VdmupF~;N5Gr|(ei7?PtDklRxtd_u^drzI?Mc&oA7u|3@iA>t zSMzLrS6w}Fty%1yK- zW3tz}=(A{mY?Zx?`%j!7F`E89zvz7nZ^g(TE@7j}7JfS1Aj5gT{wU_Kk zaBg)8@xb&^^@;xdz7D%IaS08aCBOXgU|>bfJ%z;mrjnDOWR}pM$ublSaFGhQ5W|6-T2zUV>v3dTAaj*&iluTj!3mM$5X;w@l8}iXJYS%wGSY zakqQXc5rV4u>P+%P{wPK!FPX}0sih1&sSuu%QV!ZEQ?b&fl zBO@PhUwaky;~brxlIy?U9ei#MFp7geM_GF+J0DqL^%Cs1)}9M|m7W|c_lW`D5zl!W z8Nwj`!^-==#8?FG9FNP;-T<@m-a^G}BtKVfujs<-thgri!Lnme2D~~cuBj5)z=>;8 zIoTmPsteY3a*wYQT4e;DJ*M+I*(Zh^vYoXh_@IM?3a~pKMZZC&n5g!vnAZyUMQTz! z(gbohIBg>X9dBE@w$rxkAO+I-fr(_-4;a7AlXD_!|9r;p@Z|<%QOO)Se^_$g(><1a z^ow@-CHgE}lgud`tExFtn1?N(V5@QM@_2l*)em^uSo(Nu0b@d5sjRuLa2L2;>CV~X zhR51`J}0tl3wsf??pT+aujD!H&wR%Jm$3*rZ!WnSh~pg@=_zIXfs4-%G$t*i{mG^= z={@2%;_xTVmYJLOU~r{iD;%h*GS@5X>vK*H4}As-K2WGE4O=r@k`!)>R!|E54C?$Y}MuY@R6_t#loI=l=?J*9%;& zdBycpi{LwdphKo3dJVpR ziEp9hk8j8CvG9vSLW#Yzk+0kXAGq2+yX^04w6`qNUqws+^5jY9esmHKrQBZeoo9Y- z+^_m4dE(W|M1HVqv00%DN@vu*@#V(1unpV$#)IhJDnD;)e3BLO#C$xg`QX{d*q>K# ziBHn}b+T67dG)iGnykk(>=cYm;}dU^{w*0R0zD{4FRW({R=|^bo}bYc!MCR~;B+>S z7kpm8ysJzxznnQ?*|mrvSW`=ERZ-k?NOEKJS!6U%Mx)7hxsE&57V^Iv?LVg>tk81CW^Ig01o!)b=??eygd0ahtthKm1 zr{^ifcUk-^EAnglTtNQhH-M*XlC}<`eUj<^*Q*N(M;Xv6^7lyAJ;M7L;EU{eCH%6d zf7s-f_@o~)uY%DfJc}~Ulk3uc#dN>oIQ2ce5nM9-6DS*k#}$kq_Lhr0RkLhA{)_lT zPd-nGz0~_E6Z@b)UN|(=wa^thk!#Hj>~sBf3Hy0;LBG()8~TNjH$q(-JmHQMm$mm7 zJq}x>$8V0UpZz9vi~;Y)a!KbBP3{{5*-fk>^znj@39LWn<#puT36z%}r8;i8xn-^D zSjL+BtUCTe9nqcWfYf7rX+mo|@JDVO+KF6nfaeDf8lg8fklQGzI))Q}nI5^UKfX=; zeHpCDk9c>Ov2ML1E3}=wjq-y_&eA@s_ur1MmVA$&I+PC{bc_wQ<-zN*3$>A>cu^Vs z3^FIeFX@+}q3EE)>@|Gy**(RSwfC#rc{4sqI)f>CdvU;vOeWtkbagIgI=*KP3a;{9 zaB(QFwzBmt=DvdZyZJqn`Lxez85;g%mEswOa~4v+P&IQW-(VRw>IgP!@|(7m|Jqn| z8}%7{TSniM|5-Y__817pMd{>?0A5A#sC@LLx*_Pw@W3kedhCH;SD=G50{h_EX=|^* zPH%k6_(XEkt;E#GFHjfcJM4?c%fDimS6O|d@+vF&S~0jspEH^i-__;m7f}q}!Lh8( zJk5(2c$<+Cz+3!skN6eu#Lpgt$2)w?F-T)7)ARb6{{t-D!ICi?Z+YSs5Up)tS%YSg~@$+nc1;6`Tow_9R_?kke zd)7W<%l1WS9u#-i;vv@ResY>4yVl`T2ttSIfWZ=S6aEcYPv?302wMhui}4-B#vSdVOIdF5ylqfOXo5j|bN`*d1 zh4EhZFOIibX zpbRpW{~$KyH=qfM&m5E}qvr*Qv#7LQyo@I~uM>2eI0E||!DIKo5dP#A7j{QC_Q%9| z+a7Wfqy_h(w;iX0%GpXxC$>d9-cEFWDSc2pnqpEVb4Z4epY;izyWtrH6}(TB1Ge;) z+(!7a@!8h);@8F=Q;Quq(7}F^cJdXetQ%R^V@?;&79{C2m z+5ldH+h_5WeBnV_kaIwgK@u?_z2)q~zeQ(D{MO2ko(~@I{Ym-jOang@@c;(?C&_dC zZ!;gP*l&<4&-zdNqkLcm&{^j{_ic{s*Y_;R^!EHl#mEP*F~Vi^>33g?uP!AAlF2t3 zgEQyJ=SHdz@{MPb3&>3P!AkRtTC?8?A6e;H{3ni1eH}rW8w&V8(0-pq|K^S7{}B7X z`dj)%{--DXpJ>#!k`MDk?4@_J2UP1y=k48r{@QT_-E;^sVTvPTuY~-7v#^(Q?(nrb z?}9jlNiFyeOv>H!g<$Qg(+e6`_k%96ANHOLgSA=shwp^NYr!xo_Py z>;-C}-sQv%qYobH!T!qL|5|gTfi272<0Dt05BMt{D~)&Pf7fP^$8a7obn3^h595>7 zhq2&89b{BYKeK(_ozngxyM2v)UT=I-9b>-dqF^n0 z!ij#6$H3{wNcz$K?y3D4N3N)3IBaD9@@Ms95Brt-_M_o#yB}H?b+lK>9LPpLpFPX? zgq}6pW*pOcsXNDrRg5u?q1TgR7d|NGJV^4^AkT~?UQ97QcK(Uskui+L(HFbA z=|8l{I>Z z@IIw}){njsJ&-Kj?jAm?Z*IYNzS?(v_02u_tME~5>!;+Z^5E|t$i5=^HiE%6Cm)k? zR!N>D-)PNx_}S|Nj0>RebFU``Bi@~VlA=FniQ61eKU^qIRaBZikV5bwKe zVq@vm#rMiDfNf=NCC_!=O2w*lCIn|{s)N9P#sPmbXmpR^A8zB^qke*^K8^ugj|$@j_o67(EA zZ_91-EZR*U<(m@UPCkQnCi~-k`6=XMuPkh$eA9I7vYe;*0lZ~7eqjT<(TXwT^cw8y zUC!Q-vmP{>8pP`jGfgo$4Q@M+Lvo#`lhv-Loz>K@wW)RUJ?>u}lM?!v_O_uH?=Gt> z+_j^i@i2JQooKg@LE|jxSdY!|s zKepC@BcGj2L$6aV6T5HN_N`chXSdR~SLqw}u(mLLYq9#48F?wux7~@py+q$$r~O^@ zZTFZHa4B*Puh~T(rI)EsT_^SFWEz)rj==HwRoN$;TuaXRkY7bM>mY7H=R-z_4OD!> z-M5P;*fFol%cQ-`)1m#d(3`uEFF98_VnDlVn8#ZmtXnzL7&t3+eu-O zZ@_mdtTP_8htNyEb0bUX-_`6*I37c>OflH~!XJ>&;F!l1s!ZgRQ0%pth$WnlFTFB5 z7@NN|xM_jAeVUbr5<1_x%-D3mQ$H^J)JB zbZlauVx7x^)X_ez5?()lsX_hq)6hru_KhjgTK-_1=;fuy_m?cgXQs6-nK526fN>0G zZ$dhIB{GolGvUWs@Z%BIeiLV0+E0kD$1l5$#;5&$${`4!BG;T98{!40D;Tp$%s>}* z<09<5BRQv`$b;OR^2C}V&SRjDmd)x)iw@^TE{4t*aE0CJe)%yalX@vLgZJ4p@Hu0f zO@0@JzULMZgT!xmA@KknZ_O3lXCf%C&n zM@FLLLhd3b^Ocl|-a{-eI2OU)F_U_CdvGfI zF3Mx{1N(vBmD4!RWu}!L$}Oy>OwFF^!ac}_*ifxJ`I_Gr>aN^bK&-80?Q_<%;%Bt>)pxVF6k24<`O&dP?OF6u`@c^&ztS@fUws2}HjlZxo;hSs%dIlJ6@8)JZJH41Z752Hk)s9nCDTI9|@AZ`VCNOWiu)9wki{F#Ezlr(TLcQ@KBPLzG9-8Flx%}aO-vlirc3|oI z@zu+BmbaDy#~)y~youlC+0Y~O^@iWM?(HfH#tP;I+n!u(gjmy7>|hu59RNNJ%gU|T z4as@++M_~ys{0R#x@)MrlxJ_l-_!^7NBt@`mRS0?{MORJU+|2&_mIbEQal1SB>#Hq zegZjc5xz6ifL4FowP^l$Y0$Ax;A=lakvs4Lh{!FY_iIgQt2LL{g0l{{vCXMPfl31W+HLl zJCO+m2jX`Z)#zTh$vlXM%f3tb7(rstOdp}n0a;Ps}mhWID44(KVeS97dke855U_b*2e7`m3Gw>+ZJ;8YNKsW zFLDIGpA2XG@Px{TDpo;XV!Qq@q47@U09#A!z=q4hKL$U4vdV4YsH^tQUfE zBKA&sOCI4#=Q6Le`Cz_uZw=5Yt?iR zo95t8!%=di01x#M-7aFm1i9SLxfkS7yrUf+R*t<(ysRAi*Lrf9gwM()Xm|laDZO-GT$YdiL#w%7AY-a**^+*FgRO{v2KjVLbUiM#w-5R+9{>fgFll|{D|NJt%@FsZT zjqt`V!6R?LMmH}E&$RgGWYMsMt<;j4cAh?d7QSiiJJ#8Kj=Utk+2OI|5^y^gnW;bW zlAUi^cF+yjV}sZl?0nKA@TE#Vk^fjeRm*1UAIiFu3~^!%C;J~ena|IP4K?X|d|6^I z@do)6GaR`tu}8We{jtt?xAqDmo6`p)@@9z<;49cl4izi~ z-vm7U%3rN`B=!62_t|-Q?YNn&$O8Un`)aj6Fi3rU&d;F6puDCa+LdB z8BonRtva1K8t!i)mi2VwwQw*1DjT`pnF);@Fe6}7#Dwy||TZgd#d|Ihb# z4&3L!AHU4wdDmTbOnwUWPc}N~!D0H*LO;C~*p|l|$BNNawh^z?ta$JrQ!ZhBbAa&*H=d{kFYuv7 zb4vq?G0DSbW)f#2o62r%ZYnFEtz@Q0owDQXvIem_PT4SfGcu0Gt^Vw=>Ly2}U4G(v z#;khuJU?%A=mDNL$DsE)c0Ho$PCZfhd<=XpNW{G8xnf>&6EQE)$H+hM4=O*=Qf%LD zE1p~S_mL4VIQ~52bK1)vUl8Yx@?0LTGdei*CUqKz z;&wkDBi_f++t{0;y}t$UyX5{3OsYGG?K}59a>OcUy864oXuAtIC|}#H*ulF}g5hza zf`w7+0Y3DLYG7K&Tvq_sa^R{MIv=tJ=RZX{S)*O>GG8JVsiG$-?-=)rttK~ep%-~? zu7yjP5p>$7zjDT$WYVvc+xmp)Q|`6w%YqMEdTgIKFRf?EUc%Ee_+GHc?{`r`kIeOd z=Y#la&RMYN_kGu6=N11ut-o^M=lUz}frmTv{~NR?dstsTQ@Jr@ky&3q`}!|7WM71R z6MrRq#f5#qAm%I=+d-xu{{y^5@i)W$&|S`S?0sG`Uj4<`LEIyHn#j8=Kb`GowetjN zFTm^U4`?5@?ncM>SDV|Ua4v(o1ZX#wXmT$CAJ0Lp@ z@jkN4bSB$l&`rzx4YBq=9=VzGyoGOu6WK-Qr+mmYB?9hK*N4c~ z$|rGp0TG3J}qgj^Naf*gCA>`)zbM$@M1)AO6{)WP_fOZ<%U1u}r;r*!Ih!_&_?JJiWBW75y$JTu>- zkB+U(`KB}}FO6W>caPFNN#7*rPH_0nf#K}y$qUUE&ykL}_s7e2^kCuFdt$VSuKy`h6+Z8=Q2yo!UI zBaz3si6ypt9IrFNccY7lwrbx;U;C?Qw4F1|ma)zP|30&=`qYr*IpKFPZM;FdqFc(* zRZm=_)dsrQKWigRZCEm;8~Yw|<+2lF67F^)S4w77J5K#CaxMco8y;W0AdR?S#}=6$ z`Qe8fj@QOv63{e#$5>~ni^u=A%| zd*R5@+$6tN5PtkBxfdJo%V4kE;sRdcwVUpu4tT_g{cNY!bI3)RPvh`G<(nhyDI;rQ$Oc^ z*Ucv88M}<^Zj#B({QcQpCpH}aGW_&VDbJ0JhHS+vZ>zf`ybZZMzSz?mhZn}j8Eu(v z@_+6G|D61vKEulYsl9da9wRX8!Irf(H%>=JX!LQ4~z01HoJ3oX|=ckVv zO^U%492;meQc~V3yZsT?TsAaWd!Rl>-`ay6PqrtWfpm~{-mwZE4GhO{T}3>YVgsW; zyVQqXIZ?Lz=($E)5BtZW*jl?hJ@4d*a(gPQOW7sAy*h3zDgXj629g{;{_(r!F)T8n$TgCmmjG=%y9Y5v5 z=Ra6Akl!+V%k0-^T;bw81pfB`mor!&SGveC%~`gxaX!NL&$!NLT<02W#mzs8z3m&w zyWizGzPq7U{$*URzSTD$N(QFi6To5C+{7P#+0LmpgnESA=keSCXIArE_@F&}O^JJr zS1|*1*uu)GGmo*!hakMyyAOzCla6oFRxbSwFZ)qzKKDMu+qnK5BaA%U+Je7UebM<+ ze)cCs(B&dKf7CjYar*f0!``)-`qY;l$k`6==vxwq~^iFV{_$0}& zUC>45y(S-?_dRj|YX8QM!$woz^JZi#htHxkP&M!JkXZopRX#zG_X_Y(NlSOfh)`&5Tx!$^<3NuSqY zU)lvd5v?i!pQ7-X1I)V#Ud%MiM&Ek*Q?jfxU7;PZImG*(ukm>@X8FjeuCrU@OJ9#} z!QQdP3~a5EY3{}bCw*hNKN$A8%spjXDjRVl=lYFs7xUv|99_ueG66W3qy{o+^<+!+sE)qUTEUqZIA9hThg@bC|y$q62A;eoz$c(=Zn z3}RrnfYH zET75$e!b7jCzlG2E{6_zp_9r3q`Yd$W!VVcRPX71UhA}2HVW2dF8*fyS4>L->#}Sy z^Q}00Y?dlhKfs91;92{z_?9SrR89}pk{|i)c#fA!_)HqleZK6RYV0KCK4Z}`Y#IMs zG#UBm50}s`Jgty5kp=D5S`r^@r`;96NIosm1ZNHT8RMP&zlU$eS>M>}NHit+Y{Tu? z$h7ax$#v0NUIL#^!gUm()8V4LX@6X<2+W`IB2F%6_Z%QQ7ri71{D9+CdjYx%7q#@G| z#KvpMwAOkN%})3Vto7p9uC2Hb&ViELf1kl#A7bcdu?}Cm%xIO(_YGv=4#uSX^TMTl z{e)NWd3c%Rd**s#Gdx`K$w>MGU60gb=M-O;Y#>=&wrkgi5ADxI9_|(e{F#RGwAeSJm1xi&zm?T1_#1zUMw)tG@Y|`HGGU#$3>iPVBDQ zpD*4>&WENB@K^6l&hm4?hXV8o|JK%565p6rwY?2T;#=|){f~P-zyEJT>rUxEa~4C# zaQd*Fb?Wq?>2vy^_;ago^nvxerOuPxL(F4n1vwzdf8DkVJW*U<3vx@@jq%kZ;bod1 zCTpwwmNR%}vX(Um^7U7MTe35Dat6{W;ZXtyHnK(qPwoBb_T*T0Pr;Nt|B-5Jk~P4! z4xL47Lw%^G58cQ*duU&E9HtMNyI+F~^~hA`(rycFnu-26^_Fu+{Pp|R-3X&@yVH`R z(j%38BRpLsKOT0r+x3e~avQ&=<5MpB1HFWvT)|qK#hNRFp3Gzoil1JNFVB|yHIC%= zwtN!b`Vabf3H91}ar%dk5({(WmOlA$N=~d>;$N_7B8Pz!;yJc1+8;h((|bn;-3ER5 zM`O}Dm^#|hLEl84tr+g;pd(m|Bhf)^-Sa!>cap;l@M2y<_bffGduBxjgBR_r?-M%W ziSO^>+xjG(GYc3tK%bzITi~&ljI)FO%o$=#TE?8(I%j4L@&T}Obj}m?!OxqT6S}5k zDo59JCFsF zmD5vlTp!Dcy!2CWl(wZqP2m!)mCr}I*l&@=nvtztd>&#s3(7_k9nT?rGy@xz=2|LU@`Qq#ClFlFmhX8o6>spaMmB|=|s+o+Ezo9Ksp^rmsnXmZQf%m;qbjU-n% zI#w0soq98=cRy#pyv7>64)}hi?qtboZq~hr^?xGQW2t22e?xcDS;w&zM$c-H3fy=`F0ePGoB7K@)#$`^BUOH9l7y{%)r9 z9$Ozm=il>)^d2rpH`?eBlMe26(#J8G|QTT7Vd zglv081E*t*C`d+?HAEFZkZlag!$AHa{;Z)e#B)Mg3z zB7SV=D9DNwT_#%D`@GJ_5id@DSIE2Ol-}pXKcq`I`iA}+$X4KY>I?z-|+G5 z^YjhYOG4imagx3fPSSn0>&zKs|5W^~mcD}gUnboI8>7BAdG7O#$VpAoSHhVFG5q9# zKzG_s+?JyQH6xezqfdFr{KucMM=O3iWABX{DW5~qoI`)C_zTVLUfR7I+}`nOF)_4?r!pctkgtur`q`tX7oTFJ z{Z&`2g7r0@a{29pt##$$z2vMx@OsJCE$;g42IR2MT5H>ZzvKI{`tGs&{th%Q*|woL z_&kHN3BjEo1M7}a#&P&pfG6vC7SuCz(I|b>SzM_`{mLTr&pd3c(t$gW>58`6@oGhz zj798`@rz%ij+Y1BdO%YyTr~VC^|@SdUHoe*_Nfqj2D$e5C&)aA41*1BdyF8}5S-%?X^W z(C=w*vM)aDVqN?*d=T9?&<&IAjwj*A7z2419A!;(c-GyRT<&w@#XKqd9RBabJF-9A zqF35KVA(qGIsFDYAp2SK_3_oCpj)D=CN>w*HpTB$Km$67!;>7g@*9+eezhY%6s+PL zTFx=3VlCc?>@WM-Q^*t3M~w`1EOv+HkC6{#L}VIvv)n8CEmAJ(zY^1bAGA%pU3TeY zdsH#Fj}GLIds03(7RF_8UIg{K@Z~>&+?B`PK4inW^*9m}4oLFvPC^Mh7CI>|u!UmSt#k>k(12!4t@DZPaa=INd2HPS7wYY^Yg z@$Y?-UwjI(RbRui`-m<1Ju;ep%jej7yyBHy@IP!XwtRJ~t#g*4v#DNq^cEMnaq^8x zp$_^>4!+3I`&GZriPl+7*^!~}zY26t$}4{uw(G3Ojm%R!wgJW$0fv)ox!B%I^B29- z*s5!-yyfthxs~KZvT{9vH^^GRR`q8_?m_NRor)XHVT{S;j$qR{fXwCe<=uwE%%U<*s~^EaQ*J-#*ZCC-+YI6!VTrA|2}x7xV=j^7){p(B3PU9qG1 zVgGuaErW=ki%&~_w9biV?9Z(?ngkQ!i11PIj`#C?D*rG4IKDbKsCRNJc<`lT@yD4< z|A?dKgaVCep^wm$h3m2%E}LZMi7_lcA+|qPzeP_&gWdRT?J^@H#$V&BE7(gc{B~RW z5wiVHaju?`+WWj?Pn5s|bE zBY&xKam&7_v9$f}^6sG1Dn3^7%~_OFze=&Cw0t^!zX@({n{FIhz&E0Q(p|l8KeV!%y+}RK;o!IH zZmZ#3Aj8c2LOxgU+eNpP72COIWeYXHd3;}cs#3j;*rIi=(jkqYf1vohi+$;k}XCgP2hQV1&ZhY)1$zCsn z$(zA>87JrHu<=Imxvt1m*7efC!SJ`(R|?&EJiqr`=+5xS4bTRigQfH1>DLwycn!VW z0`5HC#e$ckZDvo*%5S=P)C3IdvGsv8xVG?LEDU>m!zL zYY@Cw+=%RX#BV@LhDT85n%W=A9iA))$Ixw^yco%Pn|oj|+%J8)Z@`nqVe&^n8(a~q z@829{#r*oHS8}NFK<;q9v*2O-Z8+-zxf)q9JaA%hILsK7M`rNU>0x4m4~=oHSX0OE zkV{L#V>$b2U4P>jZ@}OF?s;U*Htvl)`I6S%_@?K(YhLGn7x&$sN7ux;@8SL}&tq$H z2N=J2oBMY>kFP1>{$1|h^MuwAFZ>JbTkY`FuTftQ@x0?)_DAd*1-#A3A#7mpvDfBZ za-+SI#eSr*oZ}&%Klx6&k>$HQM%xGI*Rr$N8AUNQVHzcMX^y=Jxf)&u*^v#)s081jwL zF4rwT$}6lG-8XJ8NO|L$v=DM~h_eJf_Zy4nUz--%Va>Z0+Z{Q)?!-AZPTTOU)y5~+ z?*F{De)Ab+oH}0A`xmcS^?7w{?o$Sud~Fr6S_Q{kG3i;A+`#%E;rAoo z{y*>gQ+wX;@dTgqotL^c@7VnP-b7i!Km7aUvv(J^+~H68&TOyGn`7=>F}vX>oA2UFGzE&UmcIu#8if5NS&Z%R#%V?8K==?_} zCg-hjp_k{9{hYqaM}5{^n%9%}o|os@G*mvJ%cuw2&$El&L&!I1uYLKuWUHuVtYwU; zZ2h3nO!7U~3<$Q(=}+E?rR0PE%7{4)#pH=5|8C_Ua$65TGqDe_-mQ54wwd_*w-8fX zK>e?~T^7xb0w-Y1f4BV~{F86$b$7o*QFPEQ_?dNX5-{BaoNlH3CScVM*sAVt(9cc4 z?GfPHLtc#YfSKBP9T=BhVEDR$-#hSucct_B2Ce-jaC{dUzc)eOMe`L0^9X%sTo?L* zS3|LB&8^0v`7PL<+xi;g77gqhH!3)O2>o{QpxjA+nt+?+V95=m6S!dluhg@HzDD4c z0lf6iN1MK4J6~}%V+jK@_Cds|;5mMBPK=vo&QUCS>Uo9_n{nGOtv+N}efZF#yLOy# zGG03KY$UZSTrg>KIex-f34FZmwquuogOkfMmN>{xR*40a_t8uI=)?LnoT4R`S(dozX z1l&&6KTd_&b@Z!g%#hI5*=ZK+b}-L^k>ry<{m5vt|7)+q@W>zduXmb@WH==AAJ)NM z*^hV6|IX}G^2eF*M%i0BQbbpk_rs{=%%+QACHCAD!>euH^yB7} z?Ie;rpL*=#@Q+td3IBB6)bRSoz@`nWE@r><%BeqDJ7v?)JWosopZ01$Pf!2)l`WJ{ zd++u4wWsIlg|2;r$!(yuzKQx9wv(?F8T6T-UlOM5uuU7Lg|#LWlecl%S9)*7XZ{x}@%Tm6`Nq(?eB*+MC%5%!e0XWw$2BIGdrL7j#NL=+QLYkfnr}a_ z65K>C)}C0N(YKBCO?^tGf9g+>S7$@HLyyp}jT~a~gozSd^=jPsLJmsz9 z*sn8?eo23O+cVhz4E_8g|9?(D|3E*96+86kw~gL+JcIlO^3XfvaQQubeVx8;qOXt8 z*Q>UFp;0kp%0ro3>+5|3TXCIR@rtL%)C*x7ke`8L-|GhQL$y_WoML-GX0Y?SwB(?-vcDikJnsn^ z?pNqHF7%u}zC6~f=(0nPwHE;!$K%FZU$p!y(!ZsfO9wx2jdiw2ByF~x*8v{W*211X z`GTz&a?75S5&5(VT@^j&babMay?FB7CH5;tL^G0oXo^+(5ST&vgorbc%4euox<=MC5*E@u3epC-P9G9x0JJ=hE=Gm1TUQFJEd)i7P7 z8co)67BqHS$>`Q7ZHv}syR!CR`v}RVu5bTB|8tBHdt^VW^Ni}5ZrNGn$JbugoJi&m zji#sA?_BE1X)N-PdzEn;-)mSSUg*WAGFb1igP}WH<6Hv|+m+B$JLs=sLM$Cxx-)eb zKo45zPXYG127Dcb#CwR&>Ks}9D!#jt=Pk%yyU?*u2iGOg2m^cL=fP8Qx$0BA?0>?e zlkw+Xp4q;zL>|hWmY>n$(Ot-x4jzb4=i#Hbbj4@M#S2~(jMbhVomW1b7@?&W?&bc# zxWI}X!PeFZ9etm;;tuQH+NTz|4w%$lo7mF>KbsF+3K*XWPH7w-=F?#ROer{gy78{3 zy!2da-))Jl$8BItcC1O_?4=URzGRI-b*k^`?^a^>I>s8ITgIWEV_$4wy~y^bH6yuj zGd8!|-p>B4aC*HYTb2MuTgu8UOX zVd637Q!nz5f34yM+53}akAXe%C)*+HF_bZenaBP9cQ6Lo98Q}TG=;pNo{?Is&q{XE zIq#MnJ9dDzRKTq#@wxcYPTSQ3{zQ)}mJW~Ucr)d8d!LK! zX&kRPFPB{B_0yiC4YPjf?5=}m$dH|sI1Hm%dw>Rn#*As0y(lNXI{H{K`T3;JnEsm8 zMicw{7NsH6bTk&Xb#Auv%Pe3VxkI1@JeM68dO$nY{(&|ZF)AwiWxid-SOzoBD6!O0 z#;@ns^L#(PJ8&#rFwwJXRTkT@6{klm75>JyAnVblPq~qA(eDo8T!bIHmvT=(z(@2- zVqN;oXFh8%o7o&^Ly@OIE%Td{aKOhVKMh|Kh11roE{I zA`NXZhOlqVlF|J?7>^yvvM*~rX84=`@^FQRaRvhAh`pgd zoT)LOzOA%RndL)mduG&STx7ELwC>DdC)ZO&eATWD`&nt9cgxQ(Cbg!KBa8TvN`8Bg zS+(AG5nI^99+L5#LHkHoTBtHZdj~RWL@!@rZFUfI7lj6>J*U4u?7Mni06)<4U*=+G zB|n}?&J686S1!rrD4r|Gh3E68Hs-lUMNvM0354E*0&YWPLLdp^LbZ9p2;QF zo^^X4q3p}5yBYW>m#+2~x?^=K3(%*F6Ype8t^96@uVP^RN)wtpKZKpwb^CK>q8+Ed z;s1xVH;<31zW4vnnE{ze!V(g)q7$U*%%E0lLcpaNK-&;pYE89bHNoEYJ4w`A+!2)l zY8x1<25>3%UV^qYGs>-A6HRJ+3DWk0w6@fxYVR!x+IEt-QMMVD&iDB`XAT)6*l!=d z-yidsvwY5Hd4Jyf=lywq#w4q)HVkh|2zdg3Uge=4&5_ea;|a-X@olG%6R5k9`uya8 zQW?#?#!#ENmyNGJW!K}h7hr9s1$*`Bz3HxAVCeK&K5NybxgI##)g~XWXvt@uc=IfC z#ai{}S!gJ+(uy~Ew($$(aLcKS1!CYBK10s(S!?N=Nk#o-erqYR7{up>_<~k$(o?Cpk3L&a9aDqqCQx z)oSPHXU7_^-IgoJn1dhU&yUbvE$tP)?h26Wv***=6wK{*Y_vqe;^qy}A0EcJ`?RmUkFWb*{q2K&1$e7BYFw(;F7eD@o^do}C3-}2qBKk=O{n~GwG zULw|v95RlsA-$jt?cwL$(04`T zg1N|T$<2{6a}#pq&`VmQxDvcZj=SyLdS?lL(Camp8P-0OOZF>Axgd7%wPWcm3O6Xt9%Bg=rz((|X}%?6GUVDw#J@*SSt2kgFW z!RF}iqM7ejQf5WwyH>t?knbMg+4uPF{v+R|mrKqA%Z6g)dkM5(ihLi2sgt`s`^-q$ zFoLxOUv#iObg)n80VCGISr573N$^j)faa$k-S30%!)Mc+iL&sVws(2%tP^V;(8*b! zK*#t7dv}gCFa5xD&t+HB@2}GTtI$!e#Mfre&DO_lo_c6Reh3Fw_HNFxbeKxfJ~CKz zs_}Tx!P}R$p5MOgyZpYx?>>It=C_>RGJZ?>Mb6)IjmbVWqvxwjiNj=1LqGNH{JO`I zC65KjgQR=#q>=j9aNmiXnZo=_*J3XApXvI=1Zto+rl7vUHgUW79Lq zS$&__Z0k7A^94MAfaiUijkfc#Q}n-{I`7R~_w((Ind_zSU-l_Jihb~G9kw{R!aiQX zZz;cv7`rD{-4<9svu#T4s5aR@?^4%$a}*yyOyp_AbS`nlMllD#D5!Q0u#UePy{tbo zUX{CszVv6ttNW|CSAQg1b-$P~o_eVrtEw2;sa%V%NLLYkS+NaMtu})*@G}^kQI=i* zoDuaCD|S@(#5f-hPh{&HD?dh!g(tR8Vx*n*dt#Oy-voRmZh#w%B`Ceb@na~r ztz^A&XZ?$GB=YNHOJZkw$fx16^2h#-*p-c0bq#-O=i#vOzE%iM9`N4Iy`X#_@A2Jp zS>Jt19H?b$VgEXLS1i3Ufv?+)566Yo#g=`)-pV zwv0BrUFA0`2wq7Y@R6YJXSSc=QNBwM|DpWq^5^P%<@wu0Jk}=80dTtuw`KSFPkgVK zMR;kjkJynH*{9q}47}^u=Rl^T`(a9_Zs({l+Les^9ymYYR4AcE1(xUIE<*mlN0u*|ADC&l|SevSUc^M}DO@ z+i}mJN575 zt3B*lsm*HJ;>VMXg4)CB>3s!8+x@>KN2>D78-=;DFVx3AV&HcIqX>QJ{5Om zKe-%zi!w1E_u1z|&!g@orfUe-eq@xHM(n?}T2~-Dq0pY@Hr%nsEc;TDKQF zjOSyg{Ce$1GiQ+4==vt|#jCz2X#Wh#wTv~|HnOH0APzy#)ld1|)oz19Yzb{lquda9 zvV=BgK~K}zzpgzY0?d(CSF@7VKLtnc+)@l<<|*XiZTe>e*N=V<)2C*T(tgZ%ZNH+Zob z%q)Df^j$0dTsS8`ko>QbvF~B?w~EgYQCMS4YU!T}Kf#B& zhI0Y;yGpl-HrO z(5TlLkD7nyyBm2Y-s7}k`;Ex4{|)if(xI>8lI>Q}4Fh$e`on+6S)0@FeE ziPYjR-HYEO_V*n#yu>WghE;x}t8mE67zUA5GqH=5M%YwSt-qw_S*8BtKmStd z(KiE`y7b-2X30>5d@x>j@63JdHB6yntbiV-&`0e76|LEHP!c~ogG=S$QXl>rhX$g^ z;50PQi0>>7uJmWoz{kil)?%tEsYiP>+4B~JS1I>M{7>@t6FCtU6iK$th3D16`@RSd`~tl2B4nL%?7J*kw=S2nJ;2|6 zPlaODW>FTu(zzl9!Qu=IgzsCR3HfBDmkeZV@qNtE9@cRbTj&PIq_=3_^FHLZ%Dpza zqWS=OfsGHPEhg(#%9&yF#WJf6(=6Lo;x39FLT?vN=zFzS3r_Shk552{edLNf$okI# z+VcSWjtrj^yzxC&X-~U@8}Z-R{82EEP)@k90KG-$kD%90tSKosqY5oCXMX-M z4}lk&7Y}*noq4FG4(+e@n(SeNKgRx^t{#?$HhwJ`0XN~2g=gfl{6CCYy4zg-jmM-f zYHyhfJRXsIX8Ex0D4(j%d64{;ESG;ya@@~emu!7&7X96FW@yGajN8W^1@&M06@0#a z~;1AQn(heOxxQG9`i*uOVl_D7G{IJ9lhrVpLXfcL1M#GC63;ewbaY3ZKw z(<*L#=+~Sd#C~A(+#cd8sV5MB?UpcfvYC0EajkilKKV`7fIfA6J&yS65+WGLNS~0Sz zSw_k9yK*ca^s}CR&ZjhtT}9@ywYM7n>Lr6lOgd}_I%e3))ix=<>Uw;2oLTBIR&oY{ z!5%(Ob9$;dvmH1kq>H9(JS7=OTfY?}KOTRB4fz`lbzwSKsBs%h;L zb{os^>y*ad`WPO`d3%k;Mty{JAlXH=6YzPFyFz-K_7N%vgVwE*?&5eK-~YvJ`#iK? z1)of$Xaw zrw8%U_dTkTu_&0;AgBG-ZY-ykN!@Vh)_=HOZP?Q^2c4zEP<*3#j{c@HjCQ zThYzFBy2_HaTV<=wp-6S(2?%IcmBHDKRxUU#md0DT5I3OJ+GOpv7SI3?dV*M_^2!B z->pVPwdz!iKJiib`X(t?Noq4N7JLoA(YA|q5PhTh0Vh+wZyQM$_@_0Foog(AK}xac zYYu(3I)QGj990Hwy~}rM2YT-ODL%%A@y4z#(1iBeS?y)UaSm; ztLj~Q#z+49tap-!$I=C6p;w{z2d(wgIh?DVwa+>`Mnv|n#x7c@-GA>YFZ+V!Kh=3g z6-#S2)Umf9uoV9tbD%tLy~u;P*g2j&@^7)O=_3YiljbKgj{S^Naep~^Wzk&5KaaRK z%?)L-EsKW)S8O6;70RN%JN-%KhkabdQQ2{?r;4l5p&v5%_UmVz#9EzrTygx_tBf`O zP5)G{Vzq?liA1_;OB;T@5x9O!2G9Ew>3YHDO3H5{rX>Ncm4jc`45TY9{NnnHy!)mF zOWVI~pK(17ooPHWX97A@KDyLI>_^VKUhZ!56T=`L@O{HKF>uF8S3uVbt+AOWemP(+ zD+_|R9d2JT3|$_|@*p_DU1$m?2 zYYwm%G!Grh7pw#Rc|2big9osty6!N0N%?PqcdUb7_ku6$n#_e8Xj8s|Ui!0cp}A0c z+&bD_%0270Rdr?wIRLy-wd=+Art|I}=BGWfl4bH?NG?2s?u0L*Z3FoNHZs;GLXz=g!5&EiErzwqpWr1w&;-Q`Rw02$=>XB|}?+YxAGq#yu zIl%r5^2a2IOGq3TQ|)KoEPoI49lin?DtU^}$JG5*p_yCAXTKBwy;H|U`X0Ms3pkNI z#zw}d@nnxNdu$18t3G6}Gqz_kW2+!Hi_`a0oxU@kk$pdnb}Pu?sD9TT8t%7j-t2zM z#!m3H&eUxI)k>g_fCCj1>a8Q z-hstQf`xboww7qB#sfdbWzVG^QBaU*cQo zOwKt_ZfNZVo-Mi8Kn^OdG?N2z`KaxQYmCE(7w6IbSo$%J{*0$z6DXG-JG|L-L|%$7 z8N|~Z!cLRCOyO^pocw|MN$w$wzTrcUao-64!0zsZzx~drdies`gVGfnS)*MrneyZj zlHBYAUtPrU@GiKUJ!_M|J&$EC%YOI8_mG=%4}Rd(dBlI*G9lm@Z>%{8k8a05uKk(J zH@TT@xhWlZ4}Q0+sed;-s}YziqA!|@JNU;Q@6=giw{pGP7fLO*?#tuf%9K(3N_ma} z$cO=gzcpLW%j04G*Mbknn#%>NHJ4?~<>kQaOTg|jV0bC8yaX6u9OIm^!)apKq@TeD zZYOP|UfzjQ0XMC~Pjkq3ArfJFq`*`t0ie)+GRzVt%N&b9DV z@=s&`s~1{-KV6mLT)0EU$W6%|^sxy2Rox-}e>RReB%ep0?EeqUh3)!B4{GH9Cfce6 zu7Zu=LfaNx!m~p&+54WlW4;+EZe)G4)Hg9a+n8Bf4)1i?@N(MPM4!Ru=~jFAYV|KZ z9RPO9Wg?xbhISkEPrFml5mjbp@%PtGHjQd>l%*JdTMf2=#_X5v&$?D0^KP|49r6#T z&(ha425WA}m+2)ZNey{X67XJN9UQo~usmS;b9Ej}HSf}YcQ3r3I<0cZdGQ|KbF9Im z1FFoC@9C55)va0IPv`p`e1AIcvcJCsm=%FXvdu)FYFBoFeD%84JK?MR`3KI);4Qo= z8!x|Eetev5;N2CUF*-xL(_Izlg~imTKFeRLGeRU^_E9e~Ce?v`e1tZLx5%cAOFpHI zFYr#d`5HXCou7Dt=3aF=xFGrn&o^S8QfRVFbj+DJ_ZZW6{0%%q z*B2g#(Q|hEn0aFE9)XTO%)n}gtGH$0sdZ&LSYHvJ9DH$IndW9be5dv#qh4^%<9`U> zWWdbHIpi*f_RHg2-xyPUh&@6>hEaVGJ}SJK!kUp_E8LJ>U_IY!E?iJ<^9jAX5<or_o8(b#no{J zku`4v%$qZ3Tgr@yg5OeT)9NREtC(QVo4>@UoS91wSl}r>?3C?AXVe(TEmg0170rgn zM|zr&Nzm-}FgAev67ix?PjyqcdnJCreNzqe<4N%&csYV+KlLf&Z0DWuV=Z{GnzIWu z76%vhmlFs6OU{hS@bTx7k=QCd?c@w}`1pQDP8^{HR<>;_w#PX~*cavw; zX|Hcn+M)}G9vt3y8#K`YTtySYz5Vb+^>Y}`*auVL>ClPFNd^L|{2KJd7oTti_9APr zW$O27Er53#ZyDnyrhDNKe938l-_O&Ps{c?ibcH@Cn6uaQa*x-zycxI$-)>F{{{FYa z$-yVdrD(Tvx#Wn$oBMy8uK&Jpk2ah+T}sZ72zV@-7yZ=oFWDr1Ao=Xd;HtrYwoa3# z-=@VUJzy;oQK$Vq%=CjGi|~l}zs#WmKimsnpL~frlfHH?oiD zM|qY#97IlJ^b%j37@sxL%lh0#Rg$@uf7bJtF`wOknjsoVrO83S84x{tz*q6q?6GP- z9vm^wcj)iRtY<$5hUk*uk=A~c6C{VVyqq}a1FY$V4<4-(mc|$Jt)Ds4Jhx28Zvajh zoE!TQKBvYR@Zfoo9upq32iabM{^?<#v5URy;=4CLWUSG*KSmx%&p&sDQ6-#^Z~De+ z;xaP0A-t%&=bqKM;7E{jJX^S*_I69Mfi@$^`yjSn*yOvh}3yRF6ZU89L{Z9 zLHvAkR{VU&2_fZ)N;Q=wU#Q79o~tM`>V=PLKSDn|V~xviA5CmOZFp$Y5B(3ZX4k>l zeSgeh9;X>SOW4bR-=uycd0ym4+A@x|y~G(HFP4z&S7Bzqa3opk2@;tOMXF5Gng zvbF4orr8nglY>nYo{{MzLIz!xo^Z^a!B(|5FV zBwrnV_if$@r(gS9x)UG&G3ov8D~(tyG#-PN?KX#LUG#H^xt~1E=+u~5&$`@ambVO| zGiXfxz^j{cKi0X*TdXm49X%%bpZ|@H+v0?FB~J zX>EZqJ2auO3g09T zf5n)NwwZ=$Vk7UwW6!~_Xz&x>enJL**_MGT7-P0xD1YsXv@N>Wk5A)8#$L_(j9~BR zV|G8}-|@wN_=$cD!~NfQC)(WoM%vN?+WA*Kj{JAVCEtlNkH^vp;4dS_I$ZwSyjzW5 zQ}%y||L`&9_BM1WOO7*Ea*yXr&O7reex`ZKWqn>_)?8_>#fzx3bFVY*Bl?KqP<`=l zexgsqHfuv>+~>TS?lkxv3;uTlfAJ*2UwwCUk3+zsQ@#$?;s*HnM}wno>450=;ME}C z5HHi?!554SzT8XBXYz0H+~^_Tw^Dd0y4#H{FMhq7vbvX?)Vt>yTN0kKXKJXY?APf^ z>}c6MamjLgU*|04zZzLpfnTH-yP|J#sK;dNpW|7Gd(~Be&sw}*xxgLSIokyf_*J?p zCrjth+z(P_!@+d1uFv3|`XD%q7bKQ29{Lx9j>tumD_S~dQ)}7oB}UKuU)b~xJ?1-j zmyIV0%8Uli(%Y=}mfv6)FaBOUtcO+sPr=yMZ;;IkZ5?zFAIZb;fOQ!jun~V&!W~LI zk3XRnT}Ee#I5=wKe1SFhf2HoD+jVlC!RxZx6%Xh`S9t?E9>kZDK;G5fnC=|p-@%Dm z%`0sZua&|EJKOHB zZajb=)e9~f-rft0+{gvl^?qw+i;A(EJt5;z`C0UR zvT?XP#D50I^?es|cw`;lq>jGN8lBF!s%sM8Zv;N-OUJ39SPpQhL!K*kRcpD$JnyHE z{t4`Jg;o>PYv;Ac{{ieg^jYu1_}L}9e~27Ze|4_o8Dr5EcBQL2-2dJ!y%No{?Q-Uj zy^fvaChXa`{KRhIr0UnVPa<3FeJQ^9Nqj5)VBq9X?1#&*?Y3!`a~$(Ie~VvC@>=6X z4+>GI2OAW>ch_I&v({9i=rcja(!d-K(ofdqdz^3Wx`_RUt{=_R6Dntq8spD?XHKFm zeh;V&bXW0Fy6bn;&v;XeJJk;?tg$QpVZ_)!L@xFX96k1l^s^U!f7IBiW8~O(NOw4q z*arG@)YvoqY0mUVV>dN+Xrgv7-Sw3EMh-U*eZ5q60)0}hg2njjB7aU-%>piCDdRl9 zjpx1ZDK>@um%N*h_3kF#^`XB^;kn*7bKl6l=t%b_I`}vA{Z9LPcujl(_aW|&l!4a1 z##RC5zIoxaaQ$KG z2~DznHHHD*QI|t^Bg?O(Y@^PlXXF2l&>5?~?VxA@IDu?=)Ly-HC5sboW>Gze`T(JMEj-ciL~D zvu18>33RV)Y3g3bzf;djS57Wxm*lHH;&AJwPl@l|!N0zXAkR$Ta2@>$jiaC7TT*lj zZmCVfgbwM$yYBLMq1Wj7Z`v0R7q1o{e@Kj&s!at6MV%DHb& zxBT?4kei1+gF&?$qFwY|t6l1g8(zEoB+7TVIFE&W$jb3pOxaxOUC+7)XCTMVyx&+O z9m-eXeq(EM@#Bi6@=rkreqc^_n0$;)*K~C2-P^pYdC*Mym;0>yz=Gn(1FY+GDE5iC zWckr^nwqSfZtB-L_uRWmYbqzvFZ^OD{O4V#)2_xUnw-q@%QeS*C%s|Iz2pj{Z=NRa zQlco>*FvJ)ChIddxcPK;Gbb0c|t3+4XN8KLJPr_s4~ zC0Iva!v6sOiUHxwQ0qKT@^S`K$}zFo*Nu#*_Rv?a{Kr>VewQ9kb*RU~8YMoYeBu1> z=xfDwiVtF4L~@Mx(sjUT>-`qqH(smv_B+W4>m4$86YqMzW^{V_M(eo1I7rOKME3bx zwqFZnz~h!znx}O)i8io3v~JOj{xQ=d*jYA5d^*?AT@bnsdib7L%Wkc;h2|r3(63X{ zX*V~Zce;Y)`Di%+%%0eMV)t*ZHtJLGhYs?<<&#T5cBtx*pC>^)^#tlV`^>yH?I%#Z z8~H|czl08(VE*Kn3ZhF4j>Bg{Uvu!6G?Ws5Lw;oJ+6kvOH_vGC=0zJ#-oxzNjgAXK|$Gv2tP_nZv;1uytnCR`Ah_PMx#ZcSZ1;K=)bK_`CO%u)d)(^da6% zIpi)hb$Oh!$_rKp-PGlmL>s`96~#s+{-9MYg6n)_An!$o9^SJ?Y0;OzC|Ny#k6v*0 zTxfLrFA8-NGcf%ba63pJHI`2Hw&kGz1jZWMb%x+PG{TXlX{0N7_=nc=RbGR4awjn`#lG= zPrre-E}_k4bkMO)H+Da8b|@yf|Ir`TmAy*)_w)OWIX3qhGcWi4(c^P(pna{UIDOYV zsNb3k^)&`WUk`ekKbn_^U6=0dk%yehK>{^ns$^xjMR!Bax9I@$^7hh0F7 zD>`yn@CQC(^!ZNZbU&H<3$p5Q+7gdB-RS*&a4~!a-Y!0)7$k7z@?313Y1agq+dy|> zal?OVO)khBa86EC+^GBvZ3Htgbnq+yES1O1o{tu5eDUQC z-Jzovh8NPm_VO{?T~~h07XmAH1ieDb}rX>VGqaeY@bCXi@mudJ?>2(K+3HvI!_> z+iE4j&6Lx7l~Wnfrsz-f=3O+SyF2&ZfatCy*T`3FwEF%MzL_5s5XXdXO1PT?KUn}z z6%Qc5Y)$)YqiPdnb^kf;pO`aTr|RmPg}>9$-|_q4v;1Al+`LctEW=~)1GyWR4{QS? z`2zcGpX=9JAK!M6*ST(A|ygt*PBs_Z$F$8%$ zSKa6wl^WAe>0=?^@5X<83w69s9#ggBVK0ezmGre4nfhn&Tx}Fi3B`Qu-I(>_+A_s8 z5nsB-;5*gV-Ewb0Fcmy!YmR8k!PNuQ*95!*#A=hb+=Az1VrsqzJjwrJ`E}IhyVTdq zSxE9nnPzBJNv40#(^s{tKI|F8Srt#NE_+LDPqf=U;4TQNJ!D&tGoR{Ht z{pe(>TWy)9F;VsXih3%DfpcI;e3s5Yw)Ius|Cio#zd*JH_dnHp;S|@>RbSxV0H$hB za5#YP!Rd>GSHb~n{K<5+a7pXuPv6ztjSr)38+11h+KkPg*8S}KAh-Y{3UbEOnpg(!-*=Y>iStjDzkLp}qM^H%J!bwX_)=&u zXVEY}5HBdWI@m3morbK;xjN8o;q}$ix;r*s+wJgF$;uAo&Ou~Y5FBt|>%eu8Jz3)O z0rq~1*J}@EPJV9miqWj^z*h#@H!+VqqRr_s)y*H=4Sff@^}LxepXDY`tvfHMf5j5i z@)L}_`_t3!V4nK1!vy2&7@yj$7&i>_t2GB>v1?d2P#F)rl07uJSMa9TkXD&Fce9Hc}XB@%n+T-AL-amoY8msU+n@*31(-$8Hr}I96)4t%m z49vd@PHUf52YBTn50zl5Tu#I=2b0kGPv_V?@?4&Eay<->!r<$`OSCn>_yr5_H|2f1 z(i(p&bZxzZu06mZo3@{!9?`ex+JVJCqHn<)D?IncZ4xh_M` zdY79Sf1UIP@WT&1JO9M%bki|;&EcMTU0A#6v9fbFtu4C>Sp5L|;XK;Y ze|6?xYw%}t?dZ?anZ>u)pbyBd-T-Vq!+Z5z*Z;A+q5Bo_BlwWU;P9f^$C)=**1V}c zr_Ud%j^p5#1yjc80>{R|GxIY1Qsa|-Cf?c6?+tX%HF`FzR1URCartfI)^(9}{S?>Q z!}gEPz1N&;48l=eznp=S^b_s%JG}U3vHi&4eOI}NFLY;eK-p)ADW;y7MvMQ=Vx3fS zv}hEupIY}~?WvbIq+aBiQ-*btdaY~rDP95}hdyrMU3NXD zIXm_N-?bk|R|>w0eRR&W+7F*Oy!aO#iU%B-pQ?cVoP!TL%$i>#b$ZZIw6>u*0^zlu zYyZHSo$Tjl@1BeCld~Wy{X3B6%;B;40IirtPy9UQ%c_I-_@-0j2ji~|*`~Ov*6++o`niuRyEfTQ{ijm)Am5G5=cVs`s%K)zii>ZW zj?II8puSInHiQqXv$n099T@7G&3-fH))T%h8KzG4X#(Hs8-2fz`twv5IiPs-pKM>dxt1RwyDxuVp%~du$?xj+Zf88!{x8|?T8op;+Tr#k1MKzix3P}Az_ixr zZ>K-r!|7)OSLe0Gu}h~d3g21h&5bI*U_p`nVIi5p&67f8Z*af~H5l_vt+n8U=Up696=jZwM8OGMT5pk6-@Ge2075}j2 zbKnU-_G5fpao?s3T50>vGebSMX4UI~k7uvHgwHg3encHduY(M)zx+GzEI$YSVq$!( zJ?rvIu%?l7<*W(S#JE(lpYUVj#@-A1@#(2#^;`b#}83LvHE9F#RwputU&$ z*Q`LM&clBNTn4}c*6Ee6pIqAJ!Yj##Cp%9zSl<=eix^p@J&(7MFC($U9r)3Iv2VTk zwi)cTU-)3!TR``zHC;>N9;eYaY~xi-{HKaCw3q$gM9zzgFBV z&+jM3ko^uj#Kuo=^Z=R8URC~YWi`pwcl-rZ5+>&L;mpUtkw=CF1nGXD@0p8V^ zMvG$H@NX6B`_0tFJ`Ss{37=BeAaGTz^_XMS<#$iY#qXCd{UjS+ubh4~ykL-PDo#H> zy#9BB4KIV(k{EF%eqbh_mEa^x}npkVZy{fZ4mmp_8H#qre7!B=oY^BNpt{|~gF zeVCrlf*arzaq{)G4d}b$D8qd(F|=E*<9?!jkFUADR`*5reMs-G<-W|mH}qcjCuP3J z&ffAh?oZBq&wZ`#PtAO<_t$VAwC^kQUiW9(_xQN#x6J4MbM}27GPG9r)9ri7+$~pg zf3AJs&b`L$*R#*t&o+J4sDBY0u3$b{mkyr9zv6V|`?!jK?d!UOf64Pl`PcsJ7x|YR z{|Ejj@~=AgbFH;XT^CbdUgo-*YsEF>l-YD}`neat=6!-i-u~&m% zCq5IrsvYZz4&b{e9c`q(jf|4tbKl{O6P1I+qVIRp^%ath%=Lq$>P_tk0oyl83Vd(kIC9~1fIPDo~=E%9|6l3&Nr$xhCaq^^Wwtz zh2XTo8iV)*vI9EzRNn!APnJh|79n#So@3jTc5F{BeUX29xSzwZE~B#g`6g>y9sIOL zRFvr#_UPfat~X+D(=YY+m-Opu;Pq?%(Rq7boX%XzM&mljwe&DuFQQME!Amr!w2i&z z3C`nwF?}RnCOF(r#X#8oWIvs?H+Z<8!|&eV9qS44JAor|#Ofn!8LJ2QX&t6y6mZ0+ zhJAe4bD9w=XMIflxSf9diGHacYFGVuhkNydb)&3)XwQ}1kBzh?931Y)@Vjo_36_%k zR}*hMUHiR(SAu?c;ZJX$z#cB*A$pO)e)@49{W!on)t&Uis_#e~jMl4&|DitmmDN_s zk$(Ie?;IT3PtL(D{F;Eh+AC&$yx@)I=K%4za~a<&tS`+ap2b_r+93GvL1(pe1n&1t*omk_N*`-{VsMj@Vb+8RAr|~ce$AI(kZ3qWYdSO z=M2a4s4Ov_I~PK@}fTWCH@k9cnUGAJJR)nx$xSd zAsgnyII4Xgg8SGEY>$P5}2kg?f~u&-2`) zW$3#-S})1&hv>)7mF0_nIBukFJKVp&^3K8Q+gMAj5dtR{l@t z|9Ing>QmZepTQB>Jj|SA(Gp#H!FB2ub>PdW zdw_f|^C+Lkx00RO*Q9+zgGG<7jgr&u65^+y0sqzojD_!_BW^IuKljYF=0fG`+i;(G z`Nm1a>hS!L{G!|)voBoe!GGP%y^nKcrlNWqtfFo-EA*+z=`PjjP3u0*L(^1 z9Zx3D&4O)x7S2l#7tTAfSUA7pR3r8naNTo)v0ZS@W$bTm9#fso>uh|t;~78qDeX?< zop`{W9qG;4PWengb6<*8ANPrf2KYL1JdJZI4sY zu_6V>*QImlgKUpRVo`-3(w&7v##j$-?8pUP^yu4>U+8cTef@WntUM96y;>AM<%{f9 z&9-~kGt-LQV-oYOGGl1FoxS9O`?2hvp|P}m*YsorIp}w_qdy+q?p|~}a?RyeA+H+Y zYqCv7QMV(H72o@g>OlS_(1lELU_9}+dtQ^=e2cRXq$8&9@+TEH-wd8fZi)XY?qLyQ zy_Em&gR>X&uUJi$Q*24XUCvsIkK8QQzSUqU>w${-l#W?rPHy?Zi>+nzr)pnb*-%LP zyO@thn(s=k~U&q%PJ;FJGClPbTyIE8vgh!XG$u+u^@km|u<08P|^( zll)2hkQcrDgcp*9MOk?97_#mlI-qpCqxA%q`_oiizXt7^XRrNM7}!9wzygvYUdC^q+ni(IJ(d(qs312mjia88rVrLy$ zoxKZYn@Z``Doa~(QNBd3xIhZ7f zC;T*+u=aLbn7qxqe+njy;aD*FZ|V`e{y)Klx{n8wv|Bk-UFS; z>|@AzL;mhO*&-%9N_wvej_N$CG4P=i@Gp2Sx=nGWn3rlt_+#oM;!6(SulF! zI50XuobT~qw3~N7MK()5hmQfHH`t^7X>i(${=XDBIk38!vGtLQ%7M*I+&6M>B8&B` zS?~Xvi3jHXn|javQsAWb*X#XXhQGg#`zydZ(aiuj!TLx3By&>kIL0l!(X$1}A>op> zH-s~R2EgBesi9bq@mvEwrJ)%QdliFC{_bI3J`8K^**YG4p61HU@TpFzId_lM|>x?ydgRC? zBR;Px?6p$HDLJkFbZOll_Esptp#FA6$6PssZ9O2Z?9QnPaq_M;`|9 ztK<;F>cvMmptvOZqkF%*D0l9XoX2zNbM9CD-5unTLawF!1BGdS6n)Jeqh#|CxD=X0^%8+lR1*b&dSEr+(zH+MC29`SnON@ph1l{G%>D(Y*Zz9H5% z6hE@AQqzHJIau4Dw5&qiKIrya`;matY@}F{fLu^gJFQAH4MT67CPM)+8TH z?Nud@v)*jyC%m`h@g`zR54eh=dLAQpCvdf5ty7nlJnmU|) z8aFHH2hOq|yo>Gxz9q^9(-s4+3HFBCG-Tx*QarPzPtOOo73k7Gfrfn8Z+*b66@H&X zKDW8>n3@xfUG%?RaLfaKJ|k~PaLxsuOQ0nO&Z4!ba0`0V95fvR&NGQmP(S8gRrEM~ z?Q$!w4SrGqoPQ3i>b#*<-=mIy7S4lwD|=5iz@E!$;lE#m4}SrEd=Y%vIXBYQ+bWET z*R_6r5dLQMS)d>L&s%M04d zm}kim$p}uftm;6IW1XYrdj*`kjvlA+67z56RB&{&4s8oVk6m6JOa%==c@ZD#&XX_st@O3F})qi(9&zkQF*#p_%z!Sr=fgjKXrh zQ@ksl-Y5!9#`^T+%sDrrh zVh{W5w$cxc7u_tl1^6fqMl?5=!CgNzy>`C8TV-z9k9|ye;E*r<`bEknf!`y*uVh+5 zo8+Nr!)ae_hPB^}zF$b2jl}OcFeUEfxG+tCn}dhb6LOCQQ^69Ls*MEul8EE7U^$ot z%L-um%3Ys?WkqIg)2zezxh~7Vbrx`)tp0)9vw-XCBVg#|90>=89pvT{EOr3P;!J%@ zxYin>p8L4Y&s=Xr_DGK!3D56m;JF%{%!X&h{_HrMi=crEp@BKjz-(|`@i@QA#N!-4 zcEoE8`w({G^IE~4fW4N@4Kfc5uEB{Cwg8=6t|i9-RL! zIgnWkT7v&(DZa*QzB{M;CGc@Ka0$C2Jso#-Sp0SJwIz?YP7S4gKzZ<~Yu!}#xWngK z(Zf34d8zwx^fk#A&()M0!@DzQ$C}`Mb?)oB@t^NP-UbJW;aI{o=Q3K?fw4Kk6;q9# zX82(pvC@9C}VzB1aL^(e+ z(E)r_#zXF0|9zpP`UpLxIDN zpS;i9$%ownFPXcjrQ6H958Qe258;EqrED6X5Np8o8_;E5rC-uD#xf4?TiTb9IiK9A z|8Tm?LtURX21`EjZLJ&L@L_s-6aCvtykqOjc6?S7xHgx2@jP^<>HxMvbGS*mRDN?f z(7obZ`cLk5*d#9th|1cqO2#hP3O!f(Td8eEN9ckz2wHN zA->WuA6`3dZu-&dIe+q9;5s{!W973Xr$&6|Dfrp(&+WTvR?IUB{J@4eyx2Z}F}-;d zw#Haw+BoQXJoG(*a{1UA6Jv+xxZB>l%0M?Tey6;@oRNraY|?K#-*g9hzZ*Y8Q{$|e z;o4BolglS3YdEX(YGd76#bOUZt8as+2EN(_5o67h%fFVi{LbJ=_-mn_J=I2P3w~tf z;s}4$=KI+4><26FK9x8?0=Z97mljTsl11#gW>u} z&kSItXWl>G^;+Pbw;pHxZ)gA>kw9;qyUeI&Uh)fe-}71_@$N6&d-v+_e9z-euUvaS`p+Tg<$kj$`aRyYm}9r;{g9h;iGcI{=uqs%Pd(#4F*)zstlhY;P4=^Q z;2C_7<7w+Z%);DV*aE$L^DUPdZTsNv=RY!yZR0ri?krb9beH?Vwc~kymMb?p!Icvo z`_bL6O(@EX=DWsnUl=``pFz2g;9X}K`BCC@$yMu(Pa$uni=V-HE)yu*-{eoe%M%DAw0sX}_Pkof>fZvGs{VMalaHWD=y!x*1v`}h2dtw$d z*UHhdi86icdlVo2FRpK~o)^Sl%6xm}twz=3)UWmvS1B&S$F``*s-5@(JmdoISYFm0 z#>Q3d$yQ=B6UdWF<{Eup{;X9U?(*b;bBq-GZBj3}Cnr-o%-8|(Qt-^84dRgo7{^w8 z42r=G$QI<@3;s4^_bqYf49!NbXu@t@g003mL6+@-p1v(e8GVbr5N#$8^CjdUhwtw? zgMZnW(kpa!-}TU+C7ZY(DyHt+?p-y=yj{P-&TB5aQ04LM4>h9WsP3lmUagH=b!`$K z*0<;&?`M6#jPF&){d|uc*`<2KvsI6&+`IMm7;_nu`1F#y{M}8=5%nI~d;G%Y>$>~T zA*VYzla>LuMVn!u&Su?Z<@O62R^I)3@4TEzrZ-12 zr4+sEu5qDMc-)1_rjw2BYGXI>Y9-Dld@^=tF0rZ9qkXL87Z0xBC;F4$l-Rv$|EP-U zCdzFc?Pm|bb=_h1JjLDflk6Al^m@;lzT;kUK{D^vjMsOMxiF-2DUglc)9tZqtO@ZJ zm#0oRqVcW=p4vkYqrUj^`Q4nYxG-qu1l5O?htidC#u)I{cX=ZgN%f{(as#&l=GuH)p=>1@`*(y{z}-N3B#H`WBtA{(n_3wCa4X zGbr@UA9${Dqth(h&p0$jGnKA)%3Za0Otsdu1taH~$|v@b=W7{xr*mdhr*@}1A0<9N z!J1Yvc^M8ux7jj4v@1CP9M?2%GCC(`WPsLY>>O+Ox{w8qEjkKXAueT2A8UJ!UpA;4 zdK`auOt9!+ZqQLu32=yYT;(rMv4)-^o~$Q9E+ngOC!yzMXf_Is3@FwN+LE6jhq_y! zzZx@l=w#@Vd9mnLwygX$*sa^FJ)#*porlgR+M1T3(`Iljh#ad$2THJSM0Gz+pQM{j zyV{?uMdw3DWFJX!{2E~V0W_Ec4Nrr&z7Icd$G5Wq+PfbbY{XCF(4qWM%26wP&=^|> z(w!PhExG@NAM5G6gM;u#3kSoQ`PSSyIB3D>uVbno&&+SbG3Iv)xbMv6Reu>XU1$5e zgY937?zIOV)lK^^uwSE=eHM3K!ijUnHhC>BG9kPH_u5KZP{bFD1E=FQBN_OD)S z;$Lv*4L!k_Tim&pzil?x(ot{VJ$sC|wYc-5b@=WsX08Iv)eV`sl07wW#TR1Ec$_}I zjg0yOWB!nSPGfE^W^O(pPhEt$5r5pk-1IUxuW~IODPE4fU*CbO4(HK7^2mnH=4?II zF~aPf5KO|zB0pmdpJr6Q$h^Hc!>C*joZ2(I^33OLelaP-D_mF?z;feTYES{Lo@Wd{hIO3*UGlCc&6;Dtr?!V9{hyQReRu> z0m{ZQJhK*_IU~a_(;0qw?g)PQ^Wf3^verG!FZAx zQ`CDberdpi+j-}}HUhttZ%NENcPdJ1@`x?iVm@=H|ez4NW#WtU5^ zW}-67kvl5CHLLFiJ{!H;mU;Jb_PdYm`a0Yt)6a!R%TF;2yHah}()RqR)|yYSi*MS= zu|4a%p(FS>3tLw@oa*kRZhiYS#q2=}Ech3AqP4jno&bm^}l1LD^{&?TBJud;9hj{_mJ_#LA1FRO=kb;Byvuc#7`qn zX~#tNY_TW396dDw{_Uh3@gDHG>yjq9F;1L|&4@43Ul1YBotlI)&)D<*I4p_{Io;O*aoj-J2<%30{%F9iDg4zH>`8z44q6Y6nZLh&pY%o{vp`| zvLB@DufW!m{V)>0y0Kk;2CYf<*8U>h6=2-zqhyo%>AVYXPIsNmyVv0X>YMtt0GxLE zl*@I~n%M|PeegSY6zQ`Fiq1281&O{GdXO10ufc4zd;mL92h+@39 zaW2~>TwCx*cT+zEYo{Onjp;76-+FWM1xwFiPuU*kQ2T;XJ7&e$yG<@l^bm9e*>Fz( za@M5l-#C;m9%-=&NQy^o&JI zR=sMDkIG&j4_rQWkHsJ7Oa6AWD>)&@*tH&+z7c(;ojJl6!5XY5I!GNJzW3yhW$mkg zHO{fy^qh5>;boytAGdoO+rN2jXE=u|pmM(4X-icU2io$4nqok!k` zg7__NvrTb5vj0~2ZC<0X{^X^_0qNiDAv z501^*OTD2H?<>&3SOc)?=(B9rvS9UyGU#4M>{bIk?6V`^lb^W>7>H;1X-}~U+9!7k zbI|^v8jnN&a%-p6VTcta?8_ zy52WF9y7hwt^Mv_OYVioX#F$~JnjHbr@>ESqj|>JLfQ)~yG1&c%8?%=9{z4a(*Hra z>nff-y&S&-^e`JgO$&UiZmI3-(eou{(UA1kS6sQ<0%LQdiXk{1J7*L0-prommFO1o z;m^fplU^a8^IUAvt>_xWBedW%uYa0;EV??7)VVzY?ASQ;BmP*I|NXUVim?6ZcN{q) zJ50|vA+I_zeP2e~??b=RcZ#T6*WR&3(RJ{emw7jU4&dWj{wtM{4+`H$atN94WlR;= zR*Fd&$)8oGfw5Hpn|a8%t*)GHD@}WiJ~v~}&CA$x^40awKhaPe8j4N9R-^7(V(Lpz zF=89w6%I^WciJ(ug0pyoc!y*P^pz5SSbHwHg0Nd1p7*Pt*z{~-pZ$h02ybS(ZU0pv zyk9h;XG_oJtY^;5bKZ68e7H-|CHZdx2l{|Zf_e2K=ck=)UrUxYF*h;hqM1JH{SeQz z#uqYsFYp3mWAxJV=Z&3}oV(PQB>n;YLou!eviDNjV|W9yb{ysMs+M-#G}=hrK-u@% z=PsYZhvd%Fd_QnssQXpcKz1Avf%&1CslqbWt}p% zT?%i;p3u9=S?_wuTW0wWu}kkRBmX?KBfC^}$}atZWtYN#`9^YODR!w-@3383792<2 z`gSzsLY7@h&KCNjX9myO8DA@S=6ox=RD4c;!46^~#h2w5+{%839PE-b`uA+~?*a60 z@w1)CQSmc}Hx07hqWGbayr~I(+5`{wL+9l2sg|BBnpFNg<(EOvws>%AzCU>od@a&#uAloW=f-n0WVSJC58iLS zF-cy%e?4g9BM``hS&!*>$Vl_0;RgLCHSJJVyt5@03sSfc5B5 z4(?CxA442LalDDO67k_?)=R{PcOydvUQ2g9hQ0dF^tGQkRbPGuyrqj59bWR9?5V!u zQ0mP)ZQDt<)gzCltGck+;3aWv3CsSfSd;F;r&>In?-diG|NZzxa)FnwKjN9z2GGl_ z^6jhAT@O=UWmKoir0}Wic3&8Kj56guVjYsh{ZP&qr=0A*2Pt###B|rklzD`{G0(xB z-jeEHdXq0|9TPcb^m#~!@%Tgl&o-)&~3=VPANWS&2C=fY$fTKyezyODCf6O89(@!xAkF4%~k zCH+bE+(y$FBA%k{ey*pY^AD11TC}QI<{PmWg63HCK4=F%`6c+w$HYUNJ|Q>yAhdjj zF*dqkiqUo^eUyDB+mJYh3%+#qP2I1u-u)0f>0|eVVE%`9zjk(*{+Rhz{@=4EOo%?m z_Y>fSHLeLm?BS}{d^Gb-gXw8e-x}zb_{6)&FBf$Sb_rrOf!UA?U2z^|mvHSpE4o(x zcm#eMKKxyCpe^D9hlb9Hu6^Gf46ZYCTb_i*ZaCcd8a$;7cuw44=FHp3^LOFFzs4_5 zeBr9oCwQV^?9)0kH~0Rf_!}6%<)1feHbl&2&&WS7I*>mO8B;%(_Eo0h&W`grpVsOZ zeaK%<8SN{=7OvEJlEl%c4!d<0s2O)zdwH}sgfj8)5%_I6`1G)~51}w#!nJZrk*m#$W60k7lyBAbGwgj6 zyMH(*l&1py5&wzSt)+7idx+0w6h2UQP_Y+j*C-1|`mHjq_)g-EiX)LPmr`~$wykna zY(ai)z4;!_L7K43!#jihQq@aa*M5A5#)fazT5Btgzt6q;uKgyBJcm!FTIqW&K8q6Y zvz761-Srr2Y>%E{v^-To7fX^^3xG|pS$%e5_dr&ps9GnhJILkb{ zUfNQA1*e_Ei|snqp6XS5jh|BI$#$JV=X>?dx$XtFz6r*zozRkec?o`B04DqgCCh}P z-aYrO@_X2O$vNf~qm1!d^LKHb+utxF&kW5RyQ^Ua`v1(Yo2Q-hZGKOln0M(f_pU*u1{D|ilWS(Cz;`#ZR=jV@jeqQGJ zc_W^mn|Xfjk>_6@HS@FI=BIwCPm*_2COpaY56|?#KWE`941K%*QlopUg+q-5h__Yg~*7rkNhEc~DRduQAKW9Qia`4`&%lP4|nIq@@maO+b&U&9Fx60wgCh=9o0%r1R^^wC4e6sRPc;KOm zC-F?OaU}w+3V$D_4)M%B*4gj{K9m@Pjze5lfbz1jmx2?A7te}Ozvc7$2t3L@`<8sd z?EAIycLlR~)^~1W-Gf*m?Z5PA@Y6$k(uew9 z&5Zl}quw2TZm`A2*|!x3uJG%RBRQ^==#l<@JS8-Nc)No5&(IAtU)k`AZ_WVeeh(|l^Jj6GR@Xq#(4_L8$KRibLjqus;W$GuU{1f#%vZ{bMzA%01pgsef ziKu?=Et~@P?&f|8{mYhJN6R{RJ@Sh25i%D_eaq@!D0P#*quuQ9vT;0N7#}Nsa1`x- z8#oc42;SQ`j=s5OfcJgq@&5>)&!e4Bi?jETi*66HPO+8y+3Tw~`+dMvdl%?KC3&gZ za)uVaR%e!M`>N@W`r!+$#8kEdKM#4hIqPa=X}&jFL0>AM6|Fs$%pylIvPf%R2a!7} zdyTt1YL_oRQeJbU^4foqt8X~F&QCqH;H2hrku@*JkD-1M{j7?;oGYk5xIPr~p!d{y z>$dr@z5MKD7=Zs6sU@$8MyCQZ^#hq(IkMmFhtgI*21CZ05O%h7Ta|rF`I| zbH0)}7tZT_lr~!eq1eTycjXQFdC~7t#~JK*k$xUCy;0}+13Z^LeG|G^KjRjyKTDql zFDJIviK&Iopg}v{)_EqLCfImJ8L6=zVVDbVpe_&eeS9#1tzZsN$g((WZ1i$5&ic@VL&()>#;PF1wI+=Fr}3+Ldng%Z_i}V(VbX zdT00Xll#oCqJNLlAJHeeC^Tw9n~K>|u9<>(gg!w-mE;cDbqi~%>XY=nY~AkYz3eZ= z4jlsy={b4Z6B&LZ9>W<5$$n_LlJ@(@7_l{6Ywx6ZgZ8Oi$^4D_Gv6)0K`(35cHTZwjNAWN6Zzr>5v1$1Q3*z*Ld_lu7 zJd}Z<;FS3LwW|kcTWdGk-%>z-Yw^(}fK%}5=}E<(2l36~yXn+fAVuKcAm!&Snx6Di zR{ctYyhW@BqvD_j1Ziz`mN&{H^2$d}Nz|^_lp|K5RNG>2E$OIj#AOWFGC@ zz)$v#?4DOBvywg}7M+#M;dw332Z=FjHSIIga^NRU-i!C)*J^hmdj-kg3x9@Aus28Y zi(}ErQu-~JO9z2gQoD)MPfRu1_Chb#IXU!w26ST2Lt(sK^8g=l=tT1%IvMzmk<@&= ziOhK(8hPXIjjIiEk2ZocvKM=?*TsL2OFQ`Sgd?Az9cwOsY1wYto2NSGId#Ib1+OK4 zH)6Mf?*sHZObn-g(QQfbV?TK9q0PC>%MvrMCHh>nO!GYtcs7~dd0W|o)rCCtQa%Ws z%%lJFn7bCrUksjH%=P_T-)QE|Lk{F`1W#D2-o664tol9zAHHmrT>5hkBkd-duNh zTCk!|u@x7vUqNi;imKc`e=xW2(%|KN*8$^?`S|1XFotJ-4h;S~bNjC4dMVdm!^iu) znUcDZ|Ccb{rO;~wd&63xfj6KDt8ZxH2>*L-HMWo+i-27A=Su zbYokrrk^a{N%TW#Jq+%lPV;YP&ryX@^kN-;fNl8HZe!myHJ<&yhX-uuzxG4ud7a^5 zJqM4_b9zQ~U-V3I!ZV3|RTw!h4(ylhgzsbnbOzrQ&y4|RH*m@h;%u0{$M8>8{zJox zrDGmjyY%kJ)|7Re?^F_6k$?Jc4)>ZZYyAx=*`@1JvyVa}Jx>@B(IUBHQT7+F7>SN@ z1YXG*Z_(fLsiK}z!R&#DhvV_}mc6x@91Gjt+6-O?+@`g6|1{SI;d7H?hOY%U1!D?* z2+miYFeB*myXuVgMX$K=O}Xnh;>AeUIk1Wy{(cR#0ZcCgPYG~jye}L}BJ^$9eKj8t z4#DsP)_j965Lsq=qn+-)XREh<=a-#t|HB`b9sHj^RgR_xj^}*FS~p?;IH$?psS9%|_1Zrtd8J zDB(Oz=3uAiskvcl2_4TrFe6;df9Y!LCxx1(PYgBX1twaxJzhp1_=)N!(a&#O);_UK z9&jq(3_FNRRyZDDjd(-N?}|H9rIURRhBtIk6Hy>k?Pk(djgX zuIaY@DBbP>ct**=D_>y$+n&|mcVqZTq%;iX36;07Yx&7vkaIt}C8)wP5;19-OP z*BBkX>1x}aYuH%V_B`jRcJg_0J7=Mq3&YiX>Q*Bg&A)lN$h>fE>obcYL33kM{WR6M zuUpZ&;ljR8e$Ch-`fKH#7Us2Q4gSoD%}w|jjvsM1G<77bXVIo$$paSo(FGeg!N&pZ z14|dM2)VK zqi;0m_qtIl#Ur$@Bo&?g66U2L??4lLj(Q^AsJGJJ=kScD=%)i-;xVXK13z)DJAJfE z)_aH-5F_`puPXoavcpBtBlB7I81II_N%FhkwICERH%Y4dyM>C0zG-)8TC>DfqI z*dIBTkJ^FZ&l_j{#>8=c^E`c{m!9n}cH#?9C-y%Ke~GWvq5Or|;ltt~I-C9xez(Kx zv}QC8$%QAei^NlfUwF(pw6YCcCi9nu8tDmc_*+Wk#Ao4e8!Y~2_21)GObI^>>)qiM zzo1{ONo10Ij8?zUK>t7^bzQl4-hMwTzE+A};I`KdC zocs;V<*6rx(=vSQBQ2axW-JLbPQ8~wntMMQI})cM!6?7Ka7_$ui$iM*fJeXKHS&*s zAi6pM-*WKdnTQ;W>pS;eKDbU3{kfn2H1Tj`hv@A`(+3_-i}Q&geq6V3JTY?W#!2{^ z(ui4%IE&}FT$7*Gl51VlU11lzL2)sS=WAcpJl9Qn3;Ga_CCjsKVT{eh>=;|~>4As6 z`oGm^6OXUwbA&cZ1UoYPGr;Z0@JD%eI6QP)^uihM*RKS|70}so=-r;1pMa0|n*+h% z(8x*EA>+T+c{9JAbuJkx8h@Jk_^|8zyYz2(>Q;zuE3j2<`u!)%9(Uru>?c1IJF6#% z_qaIYgY$f#3*qoTEx243TzBxhg}Nfm)G-d?ne&$8K_-)f&8wXLGsc;AIN`WB1zY zVkvDIoIw)D$%Dhq4YXUv`0#%%X3SbQwjX9Gc=2?(!bPmF0PXa^dnDVBq=lNKi#WQr z&hbrL^WDh37Vb+{%U+X>CR<5%nsJq};HmKk^KTTnWf@jf-Gk?DWXD!H#SZbh6I7P>3`>=F$nsuZ$JJBKF zS$@@^#BNM?ejmY~del9_|Ac)ls{5B)xqNoL%A5He!=Aqxo@>{z^!Tr_uDSgi>}yt= z*uDPixbE-}WKpzY413<`vz7dXZhZGzm$xS1d0>fc?81VJ8OE$`^cvmw5nGdgK=%T~$GhfeJ$n5oF2$D!9NqjE9P&%+8R4;l zKCTCz;plH0^mk&q=nmU(iESH4!R5db$=+1>*%<2SCCWgTzt$Lm|Ic1~$ec1a`=><* z@>i&D(VhBErEf3SRR`h7cE*jJ?#{Jg@51iUJAzNP?ZxMTub7;&x30eyp0y14Y#AN9 zlA0xB(g@<|=u8}Qk1%rGuW#sMHcuMz267b}}1QyaSP0|x!i1^3jWnQ{X-6~B~! z16y%Z-p;w?E3UVEmkuo%X5E9Y0H32TI&*Mhjp$~`9Iy|>#6K`-D{1}Ijqk*AQ@*(hU2 z6?^MOy70wz)1N2B#!Jpe zjamE1Z_@Qdnyzp`(_Bl0S$GmIb8a$b<+0D9%gO^t@y~bWlK;;0&RqUGI0v7TUf}!f zzZGw=a*wQij1{ckL?4v19vzxqkI#N7KKj@-p%pQFIf}($-<6fZ2U79nho`M#_ox#inCF!M}meA25g4SL$iUT8D0X z9d-bB40?jjCTZP>$8gqUR5dY^uERGx(XVsep(W9w!zCX4wk{Hi{d^RD_i23+dP7w^aRHFIx4HJMh12UcEEFswfd#tWnQYJ6b5nBGj$O^% zoDU}9@`Jz?N`kBF-o-nPe7AT<9&qIX*L2{T7KiI4_WphlxULjjR=uJLkyDMtv{a+W zZRw*H&ZwtkSM+C2#7#WA3Q`j%!Ev^twg$-!XR7eW!TG0QP`z*X91!!u9JH z@91vS{q{v&?tA$k*wKw$z&z~`-kt@%3iK|`NuA5Jqt1ohKwEXAXwOx=G@p#ZhoR@PJD%O8%VHa&5TT<7ydj0x^^`qJs-Ms$W3oF*wE$rH` zZ{d**&n*m&GWuheOHN|PU*wJq+)A{6KDBPw*DtKt2(OwpB65r0uyQrxw&pqWxQ{ia zeSoJL#iKrjKg-_x_xe}F-Fl*ZAd((UEJ;edQvRz-PV2o=M96-^SdOz*C+7N8Ib1glEYxPpaLH{!dM4 z%jEf7#XPK-f*CoPCYtB$&n1?gWzoE1{j$|_IbTpTZ~Xc~s}@lnxiFV3KrWL%Vf=mF zih5wD&i*dfM{N5{c9jR)YqhloGNO4r+eNNsEA_`3(7|#JnCLmSAe79}G531Ps z6t$_Ch4cXQHN|SC@iAE^dN=3Woa2XW(!hP`MtWv-{C=15sRHGd>AK7O#?!i2AHNrL zq4$^<%vN6>c(5yX&_@PcMCA)7?D7=8_5L!vDV% zFel)$3>-)XF}6bC{Vj6FP4dwc1N}#y3ArwqjW6Q3JqKygEv``3Zmw7Ie8@G{x)f)xe4z^jP)SCPx>WEf#LSMopLs=Aa8bRP>un6F%YHo>L3ol3E$G86XW@+VdvP9lF#z}`~Rb4kH@@WPw5@zq;m4iSF_f{ zOEgCL#PONW^PNr!v2SsnY+zlN4?D$l%~n3K|FkE>-m$qpo+Vy7SNBr6*I~R_a0%C% zX)_O+mcRWz+7SKTjQ&}|nlQ0P9lb2)`^Hmto~Spfd$OyYdp*c`wfidH-YZ<>aZuxw zw&knGuAA3tzBym*>b*SLbk>yG6kVOY2mH=G+ZMHRNiBau0$;#~;J4N|JRv6ESDI1i zW$yttz^)+sr^J(5Xt(mgKwI5~#w2)Ywd}CqMaWHbPW2)BGt8nFIg|0Y1w&e(khPPp z>l@JX(6#4E7KwJrRSH*5Hu~|u7fO~VuZ=v~mK|-ai~V-Iv~t?wh~Vf(S5{nl3H-?6 zpT_NhwrhINJnVzsL{}#KYnCFdZ5M{{81oc#&Hw$2l^B7V6~mMbGGYiI3AAMxQ! zTg-Sw3oD=n@x=SNC!N4+c&s|!>Xq~mAUVB0sF;FqFd z|Ch1H_S^V)2YkGJ<4F7tqmpo9z;|6|eMTN?W@ycN(`_7BV>hW0Ngj{zG1+uAmL%X} zJn+BdGJ12F`y0Uzcn^0`i@22>j0pK)FS?A*9Pl%*{-LIbpPbD4<|dEf>6HH~pS>|9 zv<*Lw#2#+Ws=KqU?H!kA*%9QZ`Vg+)VPB-$Uk!djKe{?1+7gaK7aIKy)3B|F!Amjo z^49gAAb0TU$U6>h_S?7#fn(uxAjPO@rVS70jx}SWhrn?QeF*Lfm#0&7^>OexuwgJ? zo;7IcJ(43m;0*iB!kc_a_aVoHHxIhcn2(b4V_s2EaffQboud!zvgCPK@*LY8-_sFz z@Ap^>!||?q<~T90{sLec?mYI%*K*FPF@pHb*?2Gci_TekQ=+MJ4E*7fnG56iJFZaE zfGe%{#O%$)?o^w8GBBh?$5$F#I;gdIz%`|@6&j9QC%t%Zf6LGu$Uz?H2|>5eOA53j zzMo2MaN>G??cMZ*$Xh!fzhf?HnS)yJPwc*+lJ&!hUTLbOSV{kh>ss@qbs(Ofc->3i z;JnRGpG#RUUiy>_SdSZ*c6SWLdP0XHGae8oWt;|DZ~ArdqhRZ7I9*$aqiWuDVT;Y7=ZOWT3{c*de3n1@>KU9@BNBukgvl+0HI<5@dygKfv%1&oRD z8_#`qZSA0Z^w7x9h>?hYd2Y*^WX%(PNJkDx1{~q6uf+R_bNFYBebZk$_Ph@<_JxdH zJmYao|A^BM>%7&NP~btYv0{nH56z{ckX#pJ<+iBe{B5! zra)WF?eiP!a#rfTft<3QqrX;Ui-+~*WxbJm*{SP-&w379ItQ93_Or;qCv%qGfg>cD zgk6~YPK-5caNR?$p?Gy2^Y5WXuMb~OV%r`)L;VEL2>ZHVcGi)eOWt4T(1>z@L<@7E z5%^Dy>Y8YckVlDpON)}Hx+TayYT1(dFI^}d)%Z5zgZjVHrY-CZ!)oVwYU#ekIS;}4 z*wc)u65YaEh(DF@rTq8Jv-mK0PwOpaEMCTOg6AtfO|Bn2PHlEs-|58ehUV!e<`fHl zU?SI5@TW)Jz!8GR(F1jzpDEplB5%`H0{gSgUcNu(h)`#Mse(O5A zVFP$I_*O1unvv$;0zT8xdv{^qKFaS^w6O*JQ;&652pvzg1BA1m(x+myA@pC)*tYcF zZsz3j`1~hc50Hzrk$3&^cOMr{;_qP>vLDv=WhiE#n#lTZpxcKolFn(Jv2s?I)_Xqm z@KAzI9XhL1r&}~8zms%;I(#;D_;)zp3VKVAK1;va)2MnM&b6nxCb^wvrukpsECW5a zgKNkH&JQQHMt_ceFPLP@2Z|5B{dgGv%SBl96Y;p$N96@UMBQpk1}q zfIjS{E&20hlM0WQ0=M&AH?dD$n@UU=8{02iHAtIPXA*7IwD+-9z5H6}?DjrL;VLfU+`wtApZJ(s;$y00Ou~<1jL(mU zFOgRqVh{Kf#%R$-{P}Ix*wUh}u&(v2&JWkK%)_pVQRE)uuW-S4TdqKV=h`>82Cof! z&B>h+7d{F4*#&MDC-}x=ipwh}$JX5j+(!SWyu^>OmzSacw($ImF6s@&>Bj}^=nUuV z+bm`bYQx#Xoaj4J-u+zmWBn1DxhptS{>xWmsJ5}u_pIcHmGw< zk%KL~r}@ylT*;hhUsDTwu%3B|xy()v^ei7vJ305!9fE!5YI`>YwgEARlcj%t{pE=*t>Jnz6?I#=(>tqNT+{j zy=u`>AinPVKf= z&q{yS`Qf5n@w%2J<$E4ttXezCdh$N-;^6x9UF6~~v2DzqU)EX~ngimhN!>Q(!tWBzl`_m=wH`> z8~I#=(0Ctf>>c3(JT0(fN_zBGVAFg;_hnt5F!tp5j2g~@S#GjNE(AReV3UOmUqJ_W z%vo;MNZ(Q$4o$vHf0nK3maRExOAf7-FzBBao+M8ko$E)uzxhti@QusngZw{X>ytxt zEvM6gyeYM}+KOq2m${<7vy3hKz>)X{F_MFdkNlo%?X26w$cJ5K-%nq?0NMmzt>>5d z-4V~3b@;LHB;DSrD-pUmo&BhsVYVIHfqZy;JX_#XXJaeqTG6MzWNg{YRqHz{)S z2rFK(2V1@sUwJEWhzK!@!{}|#(3ZxFj^aN8U);`nE(`ul>smAacl}~~gt?_YzS&0J z{x1DioTZ)hqj-VVUH331#c$Ae+MIXl<7-v>@;2a)XjwllqH*5{ zKE&%htTA}B;v+lJ@pgJ@7E-6RC|ug@3Rf>58Ez#$62&+F#5hCok%Nixk=6$^*E%n4 zXf2Mu(e^&0#8`uTCAObLa*MqS7is^sr}yNXhNitPSMOuca0B#w!tzl~o!!6~4q%_i zN0Nun>R-PoKM8dQU1^=FL)wAdtHU={4j-Razp=^7nK^a%FYI@SZ}HBa_&dV4>Z9u& z*&c~{+;Dy!`BmhTn*z-8Uy8=&3s&F`FbDShd5|@eqru5WzjB6ijLE$>i#I_x)4*?R z8f%j|tLIwGl3jKzH^vxx{Kb*DFRru5Oz)jLYfDoActsD5z#IcN9_kWHM#X^n0Dbl# zvtsaTU0Y9^J*t6K>06-vf(Mw(Ah-(|6XwUbj+}1HiTgdBxk>aRCGM%}i0_>(AESMc z=tan(=jhMF*rnf~4~7ks`0j?fh54VknZ1tQ&T`Z2eG(X~{gl|k`u{xdOiWly@bU+N z>&;~T-OYOz-`;QgVREr;kz4)R7gL1Xc5LTlUM}4y5BpwxigmqH>-tUfgD!M?&BsUk zZQkxN#?MyV%0;}!W5ur0qxe8-i;Ft#K0vz_oL^-<6ZbufuOzpTyr*6@3^Z2p6=$qkJG&Tfi{iWRAuqVnZzsO1 zaczQjY+g7y8WC>bH>{a+_(O=b6@%6??jP5;5nh#A$6n1*fgPdiWWyVqHvV{I1YQ7N zfS>1$g%3;&H^9dl;A38{hc2V0Bj5SQ)54zRlfw=0hq~{K-VqunUmta&6V}*C_WU%0 z$3Y%zuRoi=+WDcwtwF6l@pHurwElWnL*?<9ftR_`TI|4Q?9u$QrtI8KPc%0kzv{#{ ze)DYUfJUcq^Ah{ai7A(9ZEB5r@cCPN8~Lq!AwAFDxYd^h`d72gx~2u#yJW}08hI}$ zy~;EiD^swg(4Ay|uwDwWt-`HUa~HgYKUp@i+D7*}uJ*mi73IuVvNo$|SNd}h+*d^n zD%krQfDJgFX@^hN?tlI|+|Meh&d%J%sP zx7ZUu4OzT5Co0=qzIo|pq7$vH;owa4(RU>A5G(!&@7Tav9}}N@>C|4v;TsDdgJ0#s zYh+6a$A)ElP=}Fc46QlxBRq!XC$@M_6Ios#S)*ajWu5-oX_(hC;S4`%bpW2z5RZu?e{5S6e5HY} zeDLMKNm9g?#5TO>bRpN`2(gv0ayF092Xf3m6`M_Y8S=wa%s_W`h=FOeYNs4IX3#Iz>{or z&2h|)k6dzTx^PRbEb>)(A-PTXGx05n2DJ}#jqqeWBV4CNzZb{#bgrun5499r%#r*m z>Qzid1m=k9%lJzZnE;9U#lYJSsyg8LnGw;Y6&BgFOq9EhOuS0Dg zH#KEclf%vxIzcX*W7}raKem>I59+J<8}Jp$H*-O4##SdjD1Su*exqD`J@VP8UTx;A zN6z|dezAI}zh;3uvonNz-4mB-&%rO+;Yp!s^6xsa(@fi+%rlv8qZc{fC|_Se+=iYG zT&j!m&A1Jl!+#&`rg>k8+r)$WkQ4niebmjlow^CgPm5mO!FP7nXUSa3t`t9&JeHpJ zowtd>;XfCh2IF-5BG*JCny+`zOAAB`Zq?$M94*CX(n7r>+5gHHSd9%$t-_E?o>m}Cvuj+l()G)q3zk*o3k36?x>^;ZN8hf>QMeNMpwYt~p@}x-b)id=xW8#M$ zG0oZ&^ei{NI{eTvx?Kl0 zh4Qt&1T1soblAxlg1h2wTd_R)kiN{M9(moi? zi*y#%Hu2n%JqcdYmdMM*oxd!CJtSMlCTY(LSOctylmFkk0VR}l}CzPNbA`Yf9U z?L0W?N9f_tine|5RMEElwvs)G?ev^3Do|`9NIM~|0on|)2GUsrjxY6J>@f|IzoOY> znV$@M=>GxOA36{0@=bcoFZ9J2v)5>~Ya`rmShW#6UjYZyqS6>W0e*u&&n@swJsZSs zti)#IyORI;tKC-o!qV;QjYHttiue1NBToSTPSEK0eG%Dzi(%J`F3qAPgmle&8kN*9zJkdNN&r7g$q*@!){5jiNHAsBP+!zYivzl?m;Cy=Wp z3V;us6c_I3corfX)-)Sq+x2v&>7CqEnjq8BXpP{qWE*31dkJs|;o`in5 zHq^g6ltr_&ds7a_&DaMU zE^_oOTR(MVx)VpJFmkL~PIV z#ZGORO3bVros#jilUpZ$v23Aa_-&r_50UrK@!tKii<*aSe8VHi*CdA(T`w5de@~+S z$`>WuK>FWX=)WP0-)2NV`<5+Vbe?1DB>YYHq;2&io4#Rd2PQG1+6gAo`mx7+YGc@n6t*f%}5SR&d{RTKO9p(M8}(Z5IQ( z@;!wo!HrGXI0d-+-B(7Xd;edot8219_`2Fsfv*Yrkgui{o|NpX5&!#BIUVir!M|os z$DR}C=5#y`PyFloT88FyIOA)P{>a>UM-1kyG%&Uf`2|m2W#I^ZeGZORgCqFwoB8`D zhJDnIRcxax-q!P6Us~uon*$?1Ry)^>mWkoD7T)Z7O;#U=(!$zjm4ZA&&du}2_kPGu zmaKB#`x^L3?vHyG>`s4Pe7btidFRGB{+xa|Q^Bu(QrR2wb7bZ*_HkIx9!v|D46~P` zoA!s?Pq5u=pVdAqPvB3%g)h^}1#;k;1rF>ybCa|GT-0iH!@owrzvB7kOKkq-)JB%A zD!+r{$G8++JVbop^Ndw;{kBwN*Qfck;!AJYpR=cgd*nGFBXX?zyTMVC7nxD!`H+=+ zLww1}Md)H4m5X5e8K?OF<6_Q6q_14~;A-CUxy{Zj=%a(SZ*mjY;tXUr@5qLZJ`6Yo=Zsw_Lz2z!ja2Wx3d&*okkcsDqpI`7R*Q{g4uoG;jc45&co zdXL|lQ$5%1&W=9Eb%T0bf(<#;&sl%{+$R2(6@B9K_;I-IF|(|^iAOklwi2CKwAm7; z6>I!)I+0%U3&{ZD_402B&U9+)HmiOy=clytf54SnZB#B;P|ttGY=h_&mDqUrN0;a0 zODj=a0G*-|I}do_ljur+9C&!qq`|lew$Fne-qjqa&78`)3tIP=?+M1Af0gHLK5ggV z_my7-O)P;noIN;pp2YXTM-aO}?R1>JxIk^RB3snwi?pGB#D6>JBf>{G$}rPAbBRaF zU%DoapGwA1#TXP@(rUfT9Do)9r=*3A_4qq+myU%i-X zpXB#&dP+x}hgdcaytX~g1J==Aq95*8Tt_K=Q#!yq&rJ;Per#g+*ZhwWOHJe}-YadM zqWO}YtGI~d(&yuI7=ZS2$hVL$y92$XqH6vE@?N4&JKFE{7O?7>TJBlT(T``_@^Q|%(#chU zUme8n+SWsCBwoAdxG}MLl~cQ@p0OpyNMgw31@8<@abQ6Swq%b}EB*v}@U2Jen)a`d zFHymMy+r-Ii~B~`gmA-b#r6mHA!uHp7xFZ*cRprdhuC$l)5r%-GmL(0MQhB7Z4DRi zv+Pgen(|Zp5?Jf-k*V!m#{3KZw-f7>&&=MZXy;FqL5trc=fXvu4%@$$Me&1)#(Nb1WUuH_dcl?b+Ot zWIu+JdoY+UVN%E0w(C?EL2J;HZsS#c`J>dR=zu>RK#xj6&n<^ftwFbL$7fdp&-IM- zMl;O7)6$E-1soN`54E=28HaLEpkK?sr}uTX@?$P<=NbHiYV!s3Hsw<&cSQc_T=ELm z#P1zI*OgwawbemPbP4pCo%tEv225Dv75<_AGnzP zB*gQ2(9xBPkeGuohBcak-CG*ZL1+X9E1$q?$30%3Fvvf#R~1X#nQSxJ>twK|>xJOR zv2%yxM;4vVu<6vtv-0JNPV1n#@AH|?wc%(r0^TfIB@e)%)gN#TdK=s)3*A+&hvxRB zhO1{yV4rMS_)pMp+wJ19&}ZeDkrB~o)sLwgz}zy{HxubI57rNf`C`IA3fJj#jvrZaybXy_s6%4@nBi_DDP{miS+qR9-4CR@*} zI~+p3)%tV^KJ5ceBIpI*-XhCW>T$}xU<`5Pa5~Ph(pzZmqLL5>Y|*RjFP-ur;tW1M;r z8Ee_(sr;{FjH-vFwVMtOTCi{H7>o3qSs#GjCG!-QJIUFqiTRTijKQIMdwp_Vb9`Tx zGsn*SrrllFb~E~;@X?KY3gy~0)11A}s;8BUj#h!Z?m|W>-%K(3?rcl{J_onkfGK%@ z)2*o{t%ROc-y4uyQSU|0U| z+BezfkWeo-F-~RIQ}Uwor=au4Yg;XGDK2f%oMO`QRcOu>%k+^?q#VH(aGgV~0NGv( z7?*H^{qApBVjMEfmKD$Nz2hb;7v9S2zLEP|!L{;t#Lqp3(YAtX6{~LFBe`(HA1oY` zLxNsO9L&xkO^arLd*;dS<=GZ@^B!M3|EXok;O_wU6m!UM8!KAr*Yla%_T-ou)!Tq= zU!8EC;g{cg>_lQ__*36b9H|*_rJurra(g({6?SJSz3uq!x!k~WAeEg+<77e;rwld;k*)R?9HJz z6aBorggj|*5xbbYfLd21_n++;Xs+~V?vDfxt&K;}_XLA+$L_PRme8m0I0ZZoCy%z_ zf3f+@wan`>=C_nR^wcU|V%G~(`>!*O*!V!aMn`%yc#qL`DK-~l+imxE`<`OvR=E3T z)<-f7pXQtx_3<0#?j`vCYWRpK=T}4NTKr;ZpFG%3jDHZ{Qq4_CoIHHo}qZ#-S+m_vBAL z9{KUJiz2z~g@n&Vf%TwKkr953eUZ{@v^R3e2+RL+j*J$6IJH^)idv8H8aH!+*%$Y} zlnmor5Don$xiUZc!@04Rza&>?Ik^6Fa%H~7d!qNh&Yqleb7c~7lnc(Bd?My&*nA=< zm*@-N$Kt22Ty6PxI)<48<(OE{^jzI$lCwk}Ky?d!K=%jpyEDT1dA4na4(j+|4ldm% z`%L*urd7`)?)R~1rP!L2LEA%ZqkH*1;VFO0|6{%X<{NQY0X&uQoTdu;hL#VO0#B(0 zPfD-GuH244Hw+HDfLT5`jU^d}4&2Az80v4d+GYNR$W7MdkHC}g>c~xWhJ$%rmwby@vDv-? z2PXOK+lPTCH(^ef!EY7o6z z#eg~YWuDHVPqLXUxu-KMWG@Ssl6}%g;azRAqersWU^v}Q{6)4hy61A*wDyNj{VY1b z+XGXavHuspomwT5@5(U_BKuWGM01u?X3SDM&UIJ7o=fRH+b>A0V`BR)1*g+a2kqQU zdur$VL!KSFM?-do6AN1eZkRVKMm!Nzyci8*Ex5RTb9Q=KO`L}d7V3ut7 z*QDp2=M)$Du<}57wc=XH18Z+Q@*p4l57`@!JQ!|o{I>Ys_;q$%>jUxsU8{^O%1w$< zPug`y&PsP3H9Vjx=bHRVx-Pv~v>+VC_0x3O_vIeqd7F&3ZssiK3gWZq0r}KZ@*j4W^%si8gTe$ z#93?@|AHC_8$JY#ANxOoaqWkIapEvA{{J!89|p$QXZ~;I`rkhU9`^#HY`$U7c^e%# zq#BhStP%M}(a{g)FP$AGkLkGhdEn`BvU^sV^-cHBFt*5M6z|)} zZ*-N$DS1YCU{v7vQP#zpG4?)>2KcaibL2m_T>{@wyhQc6ZU5KgC}$jp`}zOajqG1@ zPi~B{Zwp`C0_B>@FZU?dmAgL2J*BZ{x)I(&JjhqQU_tGVau&>^KlSgMiEYJxbK(v= zTBs}8LVd{=YSHe+cdEUak{?b@?!+3y?8Thb0<2qe#uX{4Eo+~E-%0P^Y^GXxK9(lF zS-0pR#;kF^;m6m=SQclB_u_}d{#$IctaV>u%f;VKvE)~Di_tfqbG0m9Dqp-cex45| z$X0BzgLyYClK$au>%|tt&Pm==Bi@@*O|UaYinqZ zO^)^q<3E-iC4Eo!jpErF!;IJL-0P6-8?(+DR4$nQw}O8?^V`?4Y4{(+f6{On{<1uB zIPeGe*t*{#_Qv$55hn~;I!X4V5WdRW(C_r_m9$@h4xXID=-i*feaX?}`|p#FZ_V9s z`S?x_cZZe#m>zut8e(m;mtA@IM$P@etNeQ9ZfiU-a$%GsUyS{7p~VLncl_C(BlB%v zYI^iAx^UuqK`U<~J^H;kUetzkV|0jJ(m$1>FMToX0Xx@TI8%Qvbi{moqz?XWre6mS z$AMKj_woGhP04=AxW51$+0JK9&b8OR1K<11#|`M>s#R#1uFh^?B<|8*#W@j)b2iWN z7og~cCc4SV*kICFj9$`$BpKS&0D=uq*w|ZA#hu(MUW$F1bJTKcs^l|=me+{sG z0^iBi)aA6#>$zBPTlIto{TvR=TCajt@OdeK~GnrG_5kctt8>~}nJwSiK*{lcFbr9dQ^0q>5t$FJ3L)-FmY0trnuIx$p z7SG=(~HJnnhVMW)>xLlsmlv=Ze16B93gJ-2>C_7g{O+gjAM?M)f>w% zW1cT(9JT9=np4a{3_5&mN^x6n-t$?vm}63I!LHYJ;o@#{s{dNvY3A8h;M6lepD}Hs zo*RRoq&$Ldg-^Mk{i%NDp%lE8gSS>M^=-X@!dh@v%y0QXH^qZ8cWSFoaIK&$N0Ya$J{B)9(AY9KK5OsO}Hy+BVPhNq7ifP&wyjO&ex)}xH6OdY(dAu6&B4@iGqrl=q2^Wjz~&ch_>)|WS7)q7khQA$ zoOo{u`0u6P)XVz|oWASHyGf)Cjd`DQ?b5)4pPU-l`G=)h;ZLE1I`DO9{NwHMujcns z#vft)`Owla#=n$v;qw{uJjR@lT>LDs_drh#^UQe!(B;FGqFJpeS9xCBC zj3Hb_+}_W4#hZ@3IZ%)|zpH`!ap?3IIHjv!HKBa7%)fBwR@JL~4Ps?*I3|F6r7gPB(WQVO+>~bp3JutEunw26~(C zMEQcvca5JkupM8~SbW)>`Imy9?Te{_J3l!wloh+zvO#w?yVKT2fVK2L(!!6rH&d6J zd8FQG$2z0zN#^xFGc&x=FeWtKQF(i*H?=Wf=Dc|7t47-m*g=1rY0m3f)4u5SHAdS& zv)R_QvTo7#kLgS%bmlAD7j>^}U*!GoGY_BoDn34)+1Ybp_+!kW_H}h}PuCxH-Bf^W zyfgpx3&Xw)^3dGIT5^Xcu@Ajz0?)lz?^3V* z{C9c&lkiW1gX!u=eRxh%XZ~{ZA7Gj=&x)(pT*|spzg@SE3jfQe2ObXaobDHr=VeZ9 zxlZ>o|L4rZ-OG4}>tA^3%){^9MeZ7Vk5v0t=M3M(x~qq8#hQ!T;Qz~GjGHr8+Jdg^ z*{YwhdJHk}5wTyQTlIJSByjrBZ_Yg2!?_8yuI#nPH7_%au*SBxiZ+3-rOGW9NPv_!AEFKZmjRFfM28!g-OIz1Etmk-6d0i6g^H zCpI+cdMcmyz8h#Oz5k+c^$Q;jS54g5B)#k=?ynqgOnfvmSa6#=t@@Lh*cQl=9CO5R zZ!WQ1)xg?3I&5M)k1fL&5!ZQo@R9Cyji}k4VU)cK{l1G%qj<(D_lV^=z@Yo(1MZrh zbm}|}jGk|`L3@%<$BFZ+9o6U1STvWhYuNX~`G6hZ*7MXuS+SW$|Es?{W9=ymT)=$I zXTJ6_UwZ?mzZA!{-hGJQn&*rfWBEzud0(6!`!lGc$@Ec(T6?( zJi#3H-G4;;0CW5u&kmG5!Fmqn1Rkv3cR_gRvjY>V?g-wV?@ejUWlgJ}12)`#uW9r@ zO)XZdZ~BViPkxtgFL`TTuQA7SddcCX1K7#zA^Mq-IbZOQf7+R|Z(i@Qn~XAa!kvGh z&wq!W_AbV64b494{>bdz>qA*%IX6JqeyIGxkgO`5LbW7dz-nFgktHzJ)uBUd>ry>D3?8 zx`PhtpMHqFWLeSbJY!3V+h8A9-+V7?rRTUY&%v{BE8SiAeInkL+7=%YJUh599Yj27 z9%mYfUasc5)$q;tfctm1V~F_(6K^pPfCtAP}4mYdz)Fp@t);quVJ)3x9cYX_)q#HDFov!^^{IxYQy1Gj_ER2XOCR{EyT;gK1Rhxt;H>SV#PS)3 zU;OMezb!sP8{hg>oaexYeujK-_^pdIsXoMWR%3_l#jm9GzYN{%=)G4(j=+yzz9!K2 zCUY>6KHg=YOYunK`klBo}*tmpN1};xfUj_4}34M%mtP7&RNyj6Sue0gC;{3)2@1dqko|>3_?Z4*i(R0(_t4p!Jmi}kX zo)hF}H@mZXdpM6weKh=M`JPJlc^-x5gqVNzKiu3uPku!$^LFe0%~>nA4$NmCP`YU9 zG<4(8R?O|`lrEh}UorU1-_ho9I1nt!cn}UUM)ws=MTaVPr}e5nQVD#h(#Yz}KfU>ZKC{%kzP$s^e1j)p(bQXI;yA(XUOX4Z%mdMHA7pR~TiIzgt<4 zYR|XJu-fyii){6rUVT`wB9kX-+}(_=(&M3ohLL`ZHMZ>Pz@A!U(kip5sTbO)a%FX1 zn@Zg&@5bKEPi=0x&GgO`jeMJ&_q`Vxv%I?=S|!{~gC93eKaiz-hF`Yo`mCM1L0Du{TA0Er`Fu;`!@- zzBM0y@86c1T{?2=?3@|Kaq;CCx~`9LUJvc+xlhJ<;FcF{{i2cYK4Bi_;tHvWs)c3JD)LkbR1N?HXp4TrQ!OV3Bw^q~VSALk8{d4r@Quc7{pzk93 z?x7z3{g)brF>rB!dmqD3fE~B{dSFu9zK=r_#5tmT-ouyB>z?Xgi2r5jQw>=qf5=&I z0vMK#HVPZ5|1Q2D`{z1nMEah1kHw3AJIIILV|@qUv%3FW0lXM_ zli*Oh!izH>chk4vaCA}W;j5{InuxbwfwvxX_2@wJiem#mSt~trAv#IV9Q^#|)JE)E zOW*4K60#G0tfySp)5!Dn^d3V8YfUjGZrtx{6wOH26rZ^ln%4i2JG)c4>OHb!;V`bDQN%tSv9ow>X~y0mnox5?R5t?G9f>)w<=%{In*1M*d4KA)X& z;!{I$FFQkHKA&E+beJ*!ntmM{BXP{hy528%4jEFt5_$i-cP$>fNxCC@fG0}6!*}N1 z@b{d(i(cD+t^N}HYvII4v;Kh2^8))cTH$Nrt@1I*$0xn=82{mO$CpCeKI-L4=idD^ z_B=NGAKYW3$Iv?+UAzb07lTfx?t6%HYf_dqyFJxEZ?b9QA$Llua#x?>-C9>>^%$-@ zH1wW(d~^Y{-G`kozm#ZcFaFaie&6oOSav?Tel(8TK6tugr-`;XqqgiAYow2}NJZbt zx@YAuIIc~=ao<<9Z`C(n{y6>inZ5$8IefH}V)Uioj($yUrvGd9yM?Xj(%s;r3!X1} zsp5Lhl0c1_FeVPyEW3*_1&3QB$fz$T(MxT7jr6&*haJms^m1*&Sb)j04>XPr;JeFO zC)sD$NwJ>2GSI)3xjKcv_I}_yA8diY0&Fur1Z-LMvld=uKWWdR@S3jc&`3^dpr-yR zbQWl(l<(WX>-pB<#3b1NoOSs29|qT9N8s`T@skkW$?Nbno;%ZzT)nt`k^i;}Cwbu2 zIv2MZozi%8+jaZUW6Xf@xB6|=wOmiVk?Kt2q6@o_3+R)}y{Vb0(krExE|4#n=ihUW zi3YC;?3wEQbrw2(Z^QP?Y{dl{Shw;SNe7*R{E$B)xv) zn$u=oTT{66Kb;jON{S}N#mgdz!oY*-$APaX4!Z?+ z!dffTbnd>d(J>1Yzq)dJ70`ya|$`O$&- zSK^oQ3@~T^$ejIS;PeA8p!>p)Mem|xA9}xhm5!fs8}x6;b{k!{=)cCqX`LLMF7)Q7 zc~|SD+a1{P6Mj2w#AbdXU-z@klx0_%#7Ga@_QxOaDXcP_yp57yiaB7DOf2VFKmGwPbgcDTG}363pP(Ec zfoitsy~MuMw_|h3x2^lKvoFEc9nL;K3O_lYeIDW+Vw$~a^_!deuu*DVS-l5%{sis#l^_Rrb8!ZR|I=AW~DZ4P`h|2p&q z`codO+DSD(GD&0gp--o}-JQ4KKPWY_XE%Bq8}rfe${#Uns_+@erVRjp>Arc5CB?o@ z&*|2U%FW5!7s{Grr21RY>ucqALbvy1gz!Ijs_(;}qibr-d9xz$E)G^67_GCEtS&&y_8D3ccZ4;PzvWmST@C z`{9`E>bM=+h`rf^&LtaIIM%b3?x~&7!13242dHCGrP|ZfO}y-6&Vna+jvDb{^|>B9 zv@&j^mSUIcy++_I+BdUzBR1gAdB2x+;$tocnJ()F@NsZTp|97Mh{a-kN{|+1u4DiJH zW5Bt_TY@f_EbreW=finNFzY?wD3so4`CP%RWZdtEdk(xx=52#+67hE_|A)mNZU63* z=i+a=CHHK+4TrOyVRYW-fJgWeyxOzxjKLYV$MnrtOwhTm@oRj;;ov93jcrY!?e(u> zKd?^npoem|yZ5%brY7Pm(a%BWTC=*ZIE3z%Oo#utsi(_&kblPU%iQwAso5p?WbTWv zg(oZhtcBF*G4xgWWwajP0nwgXYGAuFdbJPRfJSnRZ0ih6FEOwp)`fhpz4YhruYJ%k zQaZC*d3u$6RO z^Bn8HydXy3nq!mtP`3<&i+=;ZJ@}GK7;6!BmnSXM6vOT+&c`0RQuQ&jsGpP-4W+{G zn3FEr^S}e;EB++7N{RD?YqyQfu67f9kK;rBG1+3b!|AomQ$i|^QiZ!dgWdrgFk z9&i#{L%RlkNccu8|M4kBFB1OfGn3k@np@?k<-yA}p29edXiX`P*P-u_+gD(`H?YZh z*5I5+J%1_BCqJv29_mN5pAF6Az<{u%O#J*dsaV<&J@?P#?pWH%t5(ltwq+~?ny>C&q#wuQxBN9ME6?*^Htj= z_*cZWWTJy>;_ZXDhOWrhvT*IQaa|}}`xuAv(Hva=j@&LAcNu5#T*N*K)oRz+h4Zd= z2i|nvad3WcnD_pgIG>k*bKy@o-n?ZQ%SQI*9xy8rKtP z!iniGGDg%S>b~SHL``_o53aTO+%aUGW8asX_)jKC@33wATU~=O+S)VtuDF*h8$!Sb z{@T=z_}pgri|{F07e0l52bb%H8H-?@iR>?TXZ9X}FH3jQ`V{SkXiLAntWVo-J@u^L zIz{{;?zh(ZtAs~;pnKKisYK8BvJS+Tmask@o-`aUDn~D=1AjIjve$z}55QmaQS>YL zgKuXjOKZ_YZ`3+mc9tdv*K83rS>}AySl&xpHqXuMRnF~(ew&B#zF%`AyQGovqUZT# zb0p4<=H=t@d2!}M&#Zw)Px8EE&+ddd*~^?97{>0J9-kA%&85GUGKNxbN@^+g?o99V zS=B#G$(C+ajb8Zxv0;a9q}zp%$LZGE^%dNXP2kuusCOx=_i`0J@wr{e?bc8eUpFWbO4lZF1{JmU*n?qR zBZjkO^<;7jbZxPH?Gw6|ML)W>-o93>YZu~MB?q?UIs4i|U7JQNXI*o-to90Y?V>=N zscVbvYqPmFpS6e`W$ky)}>Xg06+DNSb<72($Tum+rYk;#553+|Y93{VL zW@T`J@|%>StQ@D3%sTu6Y?lqIeFZ~m zgsP30XJ{L@pm#ZKEKwUg(~$pNlb*RG;Th)GdM1?g%$S5{u&u0Tf=SQ3 z248XD!8Wv>DM@Xj!w*EPO*VQjQ~)#RhQ`=0gof9>$GfqPfT7kLy~eSYBHNq+p6=(I}}Ud3^?xPFg_1s ztD`OTp}bcI_A_3mhCSzl{e^G@Zzd=Gt%08k_uG+os)-s)VUGjjw(^>100%iis`FZU z@deq!=_>M_XY#2fcVrE4747CMKX6P2?foH8)F0AKwq54=0Z`y09THe(bX{{b92_%FVO z`Gejnp#jkxdo7fI(#Y7RRA$$;dEx(l+9*=4bhgd^t5~DTl@|Z6WZiK;T;(?U7r_4$ zakrE_65&obGTs>|Q|@&BNOO77E6AQtkZX?YxvwEl^$*VG)~cRtC9+WR2RqaMj_he@ z@EE?L6f-3yl^TWR%C^SnZi3-DBbW_mdC^3c4%$&4*GjyvsJ z`FsKmewp76jsH1oK{Obnw!Uc4S^v@{V$84PY7g_930`C4hSFta?*N-x2Bzq};rK-{ zu{>+d5ObLYjEQShJ_Py-zYAXyzmd+NXXo;qt{Ds1Zv&L*p7X!x_H)P; z7p@w>6?5n0P@3p0%AoC0iqWRL|1MxO;r-Kcp_8S?mfHU`YX+Wi z71lGZfiG4@OCr+xNwKGl8+ydjOe4RTCl-wd=hzynL}n7c>klQp}h z_SS3fF!z3G#d3Rtpdjivoktx^GWJKM?6DGThpMkH5FV8;oeRIuxy`H*4!7~Uhc&NvWAL@u z%n^kJ(9c@>GKfuN(D$pvBbcw#llXnvWN?HX^GRULy_g&-OYUVwyQbUc*b1kQfYTW8 zc=p+Nh0o57O%L=pF?Qt@8q`XCi}AH2;WIZ0pF`T9PP}xpF4~#Pc(;Hn?E{}<;VM1a z_1?g4<*90ag7k0`e7VVCc4+mfQNz<|ZjMp5hq!cf4BAXEgV~Gc4D6;&+wI}51F%Sw>4*)Px0+{66W)* zDS%_mAZZv=?X^bNBjq$%;VXEwt5IIdefTG2>?R49U*l(?*YbjQ?f6--W(!qqTl-zS?jr;JMhM;GoTD0tmCbp%$;_~~5_FhbQCy4JR@i_xL4Lf#d-CkGl zE7q0fzLj%w>uD2OyWNUzklGT#f*O-u6vjKU|^o~ z?dh~@t&Jpk*5dA)FP(&%x9OrktquLww|Kwccw^<_{8WAmE$r z&g&xJz}oBkec@w?YDSD^zpsb=zDczg*{giC(TogSPW|gReoneaMCI#s>M&~0ruNm2 zWlyc}6U5K@J@@2P-zTU8>CUD$yD?XGl=x%ly1*XdG4pgzs~248y8)k&Pt-@f^UHX_}=bE4mQpPE^Lzq?nl^1)=*Ws=j-Oirel|D&Z$w2u6+zVX^ncA z>$iyS`0I`3>R0x-^!!%Zdp_?#)-v+2m%+z$tpS{@GPC?Mc~*O4L*T)AW+u3jjk*mz zy$3k`!0GJu48mg?flYf|P5E$uF|x|PNBBI3Op#rc|N7mBKU-P3N4gC)uET=w%5m(; z14hU0^6{?rw32V>clNVZrDj_`-6zPGV2@2|k(pZk*>Ll!jljB;c8jp>T#TjGZSQBz zX1s~_mN0k!GpDbhoISL8*wqf~<&32s+s^ube_+pdwTJcfi-zoB4We^4;@h;w#qYD@ zVyubd`W)kGz(0zTyQ*BQ z#CyV-YL~)G(i8WPIYb`h2S(NS5MKNj8}r`L zzE%e>N&8xFVICXf`&z}vg%{x?u@8ei2Cu?D6XE(CFi1xfE{?eu`ftLn7EIC|8{k!r z9qXG;oP@otic=IXtz58$_)>m+U+aE!p}cA6Ozdl|hweM_`sTmGdG?YS;-fv#@wQd= zp4Jxk-z_9pZnydy2Q1I#8@rA$rWSbSmrP@e)>-Je@;%=8KGrX$1n%_0M}6#L^^V1+ z-e=c6!=~PO-|89me$~oK%bzP-y2br@i*JlSI`FXCTF6|yg+BQ?@P7M8jCq=yH{KmM zm@@v(lwW*lRCvFc(s+#jsh8iGQu=ong|9MGt^Z>R_+L0aJQjX+wK=A7BmF&N-u1Nh zJ@&Xi=C5+4H1_gdK4%k_9{W5Ooe}k_;*St&aOsW3g?C=qOf0=77YpC^#&A2$% z7p9gnu`r{pg}6uXJJ^|@$0ld5q&M&TD?HOXvb>X8$gS~K)v@342ky`JZR1_l-4`rVn|WVc6kfA)Zn&E^SH3W?>((#m*o(bk#F13LkiVsQu9j zUPN2k-{>_tBMX>?E49V>61)2tf2Mm@cn|Fhf4jk-_MY`J7Uik*gUgn~#-a7lNjEhe z-o6X@PYe=1UxqB2xYw0dUAQE-QMSPea%E&oIPYm4wWRbFXbybr3y*m<2Wi!JKfN_; z2%g+&L-1b@o}NLDD1J5AcV?9RS<7Ff|JMDV+mnf1E*$FYpQ~H1 zSX^4PSl8n5In_4THO14m#p82zR(vikTA*w3_*|V8pG%AWf9#!me3aFl|DR_Da4zAR z3nZwMfNduOUXWW*X(pgH7cUi1TiY%{f4e(Lv@KArXekM_yMdrGh^3A0l4!fljE&V6 zThhG{WG_LwR;1c(ySpUVu9L(YC@@hVzxU^P=1C?YP}{YyU;p_1F|V03wN`vCGmaj=tvMB+tF_{DnQ`QR z6`%Vjc&79;&aRsrghv0j@wpiBIq$LlGvagKrhV77=ZvRqDZJ*R|399ViV^<*c-pz* zb)5By|8;n*%!}W*%&_7wr`A9JhsV?Gx>;jc_v5gA##5Jk0(R0wY{E&gQ;XeEbn&-k z6ZWOzStp0Zvs$dWa+&dm{=?c&Z~4rm(>wNEExJT4_7EPiIp|c>u?mE4K~LupD9b+9 zzGvlU)gsetZw&A1yp>qkJB)|e{9~EMfm(mK?YI2rceDvxK(bfpa2%mpU5=Ipa0k7f^3zzWgIq)Ih^ezCh2v=sb@s2=tpPJJ=@|;oOG$ z_xpFsw<14#gy(B_qEIWbEGJy>cK##9PpVoEh^XYJ%p9a^DoPItaTyf`l z;5v!SQf|n=c+d~6@qqL6@yNCoK2HDE#)kzj_FZ$S1y?lA800hCKHZ2G@vK+&3%YzM zc_Q`TH-P=r#n=i`@uubt3tL+@e0JCHc+;j)gYl-=hVyr2@P2H=XOfCH<>Td|%6)La@@`;vb` z)6p51{>Ra>f{sT2q|T$*%spZ99TMo1reJLH`& z&fb(aCYSa5gmGXGdQ8h5=7B|(q3GA<1fs98UcWV|zVY$y{_V}&e}a30{V+P_ZfY)f zc3OJotWIJ@EgNo2c3f`buMc0xRL+c3f6tDgo(XzV`Z~QlHTF(-Zs?~+ZT``IqW_%y zBiG^|xuN#d^G&SNXTZP4?5k~hNqufI+K+BBG=5)Cr{)!(XEQhtg0Dn?oO)`*NpJQP zToHZtUx}6An^R0ebG}=;9M64;HUmF1SDLJ`^!Gf`E24TH9Vv?#%#kK^`YL=l(7}SI zd6(}sPubsX-{ZTKwpP$q0==z!8gcVx>PBKCUu8bl=pmMHTlzg>2@#&R@9oILZ$y0P z{J0mNiw~Y%yvAIqnljytK|Y`|`G9Jwc5Q;MloKl|mEQ{gzViLg$1fy4R2szh#Wnf9 z#KU#19zK2*Fn2SKe*B>It{sgD)?cC+p9FZ>$baGI*~jtm!8e6J#paCcNAh&P6MJL@ zv2NqtuO9rz#rPdRV(je3?s<=T@OO{L=ZwB8ADUuTYBRz$&La#Vm#n>^@8M_PO}`2D zb4X@9%RamXj3<98ei?Ycx%jQg{5!GG$MLPfH$(YTu+!-?h(ADaF~uS@X5~<7{))e( zm>70EJXY(dcqe`HzkvUR^qUCa|6`9#6LxDizTh&>L@Na@)nj-MJEUMuXjg!JGbQ+A z1m~xTSrri9<1B&r*BO89RD1lQ3H9lvhC#7u)+mPI1E%G`)DHd?E00rOVL7nL20P6B zlK*1))T)Wi=z2caJ$shZpReX|-k)K3DiY)kl(NTS9WbhW57%GfU286&Z~6X%#DmBW zKN4eo=0gi_VYeES!3*%;hRqk8XdEC9uoqfg*2(i4$AUh^-g4v17=!Y~!_=(lgbqk{ z>HDSyTOj@ozOUxKt~X_CoYV_qo{zn4$2PRyG{-Gtjk$_7I`gb~^zmPKELd~JuCvW~ z6LHHAI^Q2n&3BV0ZN9;oHQ(S^xX~Ot^IPJ~cWVCkfUETR&X*sM`R*n*D_oM7J|Zq3 z1DBl{$@#+L&%j6e9M_I8$3E&HI&-YEtpCtCep&AVpZHSoNxR;xWJF?^Y>+;tXJo?* z?>gf{I-S3VGfsG4>$ehnvyE7nGhehZM`Ol!$sQyF5d`f*7xnyBTY5)r>Hd41HT4j@ z41ITJF>`4>XT^lBDAK)p?&(`H{OkNyp9B3+U-EUvAsHT|j{x$GzAas^W!N~R!#Mir ze5-kUpiYOfJhKgtubnp()!Qy^2^y0X1)Rj!zf$d2eDq zEx3loSmd`nb^qcR?OHLF#lNc$&iXxDA3;x>SOU};x?JFlTUEA5$6YOVk|l{ zbH*t6&|>3&H-BSeIb*3Q*wLt%*36U355^f2d|^v@i7y(v^y9H7XUj+EZta;beo;HA zm%I+u1Xms4{tcpKr!MfQfx5sw-^UM0J>cHuM&D1Np?&WT`dgH5=w;sFg$q`Iy99RA zV9j6~kNwcrNMmdUk0+l{d}=BFm(dn3-!73K;U8ir;EBg?bzzq=Z_LeP&YJUOA?MBy zvq#Jo@5W}K-fz5|yczAY&_1Afvo{P`{$sOFE_^1)n z{bK6i%$xOaBYvly_F3yW@&8UeYgjpkIr}@O88PB{3+jmhi%w6#+v};tORceiGuThv z|Ib!F@yIl0a#n{yUe42#H2A6eATbm=S-bs z-rl{@WzAXt2G0Stq4gGGKZR)0!Gjl?EWC-cc%GaP>$!t7$)L$@XwsXnJs!2hLPxds zeD}Oq_jKJGJ_fs9b*Es5F1vx>BsW3(Dhi?beb8wBFd7v~d!S#&NIZH`>?E{t>i)eJEfk-HhDE!gW95>IW_32=yU?=a zX%+RJMPs7dY0xzFOXN#_CLft9dYwZ&2c6cc>3*EJphL5tzNdeCm|EC9)55WC#_)=Z z90hFoF#4eA_a1QZtjpVg%y<@BZj1cgH8C%?2tM+@s%fs6;y=Wxw=y4D3Hr%mzZUty zZ4u%*PCaYlv6n}PNvqzybEc-&fh z;(a_{2jU@Y_^P-O>v5CYcZ1RQ2s|djCx~4t zA4?FOIv;%y*`Ly_Q)giC?z+=POyiNhtaYRHBKuafCfpA{v-8^nWBUE&+fPo6jJrDe zN|$d9zpIaP$hV8mGC#xnSfxJpjzYc-*0ld!XZHzD!couv;;bfY*|XBm@U!~L{vW~E z{CmKd4~)t|F_zWd*6v>Ziul<-Qg4Ctn2VGnA0&qyzoL~xE`LyQD|wCZpJMcR`C_n@ zmv}j2GbEo8KC=Y0Am54nI_cNFJS+Rk%4g!+fWJHMl+v#5`PncUk*0s9zeFu6`U_^c(P{U!E>GShPIrQJo*JIwO^? z;#czf`{%c#pC{Z4;*;H34X;p(_7!A^_5)xuAP4MoI-K)fb^p zkoaR<*ZMc;TpoAq*mUUzqb)t$;kjP&0wlMehfX}=bqn#$zSqA+a&b}J#=L^>Er@nb zx6h{G{pc8cSc&QEzmWZjZw4OP7IS&xGvKK)o!xeT^JN~fb5h^HJ{5hcM&tKf;UDP# zljx~;u`i^7Jk;Ife2jx9myh0*r*|$UzMpW9i_2Hgu{ON5g)tl4XHU$6%UFXR&h?NS z6yG@`zQ}seezRlPHsYO4z+${OE6Ug{z4i*}mgA?MJoUa2JINlG;q%nLck+PT40L9% z^nPFG)Y^M#axUAL=io%OXA2&iW1r#5UN;Z*Ql$G)TPwO7d!3qB7H&>}8v{SAN9W~| z2T_8IQX7T`|Gr*A2|dtnrR_kLr6 z5tXi>HVW87KG~=IJ)=Tx5SLwMT+niZY+&t6{MuRgRRK6+oI}rWBCdMs{ctRC-Zdvh zD{p^%XdF>EP@mXmp@QFBg#M5j|2pH;_dAvjV#{`T-Vu17wN|0QMzhX@o^AU3L+_n) z@6+PIfNop*6_Yyd==bhGvXtEJ?pJlblAAT>ienVVlWy1k!V)9e%l{reha23n>&&c% z9%>^rkVAr>sdvWLjimT-(5<~qQyLDra(l7iPn#EzpgwD^AU@Uy2E9lXtNOkBXaEq3ShPBz0oI7~h3gnND{Fgkp+ z06&cO7xe8g`}%s#n4kGOj_-}wIyGZ+;~nU|S?uvoFb>a}FD0*ZPiauD<{I{n$$usO z&}7BIRO@T1_20-y&cm-f@V}G$%DsDqyoB5}S0xYmjFmp-ALTwv*U@3gpO_k-n!4A3 zFaNzAC4GUc3Xp^!A(q z`0x2(@cT!GpL5DA_;;k>FHgb$E^E($U-ufwsY{3dHE?I+Ba1bWi5)mO{u}t~Z`u3t zKk%d7hOte1J1W`hRZ47OT|fTux<~W;*e)b@r!jJ54oNM~x z+V8Ll{7`>lV!%sY&D;H!kG1|h`~Q<~A$y?}_RvQ|pAIJFkMzFPzf*efaq>Z^c^4I& zJKUqJI#%Cef4g04IyZjdRn%%`ZWvp9D!QxYTrmgD@nr6mx^o-A#}N=bC!52q=_y{<2;H$ttJRH6ldiDo_@A#Exhwm-!oeREN;7Es0 z>qYD4{m+DBnuo4;?0s3!p8mzdiTBnWoW&Tl&V=vZHJ8Vk>k8(xh;?#4V-@Y@L4O_h z-Iwg9t!Ll+#ls6mhhO?Oa@JrCslAWVu6-V#Xd^d%CGV@h`S`k>`;xPR&kWMfWjrgN z;1qa8g87iGqcN=D`w{Y%a)IF*{{IyJs>f{qm1ko6uUb?$(q;cn5M%I|=0vYaT&W9uyoIg2fp`gXp*S&i zTc31^x$su`IU~QmC)q`OgF2b<&+?4g$Ty2$f%bJyqvAF-=%vI=P`x^bz_cu=Adku4yW9Imief<_rnyDG#VN9Ag z?X3{(QzzKx7T1%brhE0oT{RxXd6WmL>-zueaqPv1$82ED_JFVMC&op~#8Z$n_!@S; z!#8_4LOKJhlY80VuMfOYW5MDr_g*gEk_!)+5)()|$ zBlYl>$EMQ`*Tq{_yQcJ7YnMI@WRZBwA^hkkeqr|gyXN~BVe~1%fvjZSiF@o%0LxTh z(b$f=GohW__?r9YSTd*=edok4O8PXH2Ytp(Ex%fw%ys86N0Z}4yes`zb7jl7$*jLz z>wi1(dV4+E|HG&H=if`4l5di2)x;%)#~yGZob`e;;ZwMDu35Zq@A=Uaz}J0SF!?+< zOo}ISt&dn`UViP8W57o3lq1V#H8x6@lAq@!@x>osVQfS}!`0Kpy-eeE7D*T;w?Yaxjxpp@q1MIyN<6|c`NS9|H z#a=6C##*ah{7BN*c z~Be*_b&Lz0Db;1U=0m{_3f7g>lZ#`Sby{(!P-ymtFxXQyk7kw!`kp6!TR$d zu-bUd{g7d;{E%RcjR5PLCq7(yo%1nPFc`&*`)2b7zlGe&i;RXu0d4UJD3t^KQ!4+CeBD(+~u_OtJyzu$-@ z9wPrtecUuUnwU;b1o>)z(|vk-J2^F?d&wAL)_n`paKN?Dci-mhE6EjPN;fj42l?VR zt^J#-<*3g!p=V_>S)i+AU{Pv&E#k+5goxF7!yoNaC20KpqugF|}+ws`{$N2d4 z2>9q@T?s#q{JP`!#7B`WbGM8vbI-)b`m^I>ixa0z*9|^o`q+|AAK4?|}B+jAGxM#*9`2j4B|`c zOxMbqS@n4hRfR=*w-DbWIfcEd6XWMTe(ZR;*W2K`&gi?77@q%WBX(Hvcs>!H^^78y z3qNR(n6>nL$N$y8cQ9|@_^sKj-5h7_4$O^Yw&QP<{+9FMuHP-5f@#+JZD;+qCWfxx zy41N1CH9OGmr~pExx8g!Dw#@Lqcjzq~*(r#~&+{KW{k z88ZYow%n}vBje`5KSA6CM!?N(;?Zeh@&fJ-2WSzQyw-sM?uDv=>e$72Ciea6kb{jIdJm3uQJ{&v^a;;-cR(XKvcD$2=;3;vM>N`V?9#{?AQx6@-;cI3;&uk^fBToo{;cs`Lyv{zxS6e@>JdE8>7&(h;^fb_wUU= zP@k%$aXjT4lWnv(pM4vaKb!nT!`*v3eW)HHwJWm7$6KKIA@xjwnf=hjRCnRuQ|?$L zX9F+2(Ae3*S+SGN$qoIS5xbai?w({E(f*U&ALX}G2kSr&zHiYPzS4Mnsxkkk)Z|0=Jb-UAdgldGX7AuRJwu(MkeAvT5!d7f z>NFjgE&nDpI5v(6FR0=BPQEws?p=I57kHKiFPJd9itmklm(os*54lV=J>#e;PJ7j< zKK)aS`TNI(7nG;^EHwS0!p}`vs%Q0_&*^h4HJQ<&bL@WG7*pGju}m5|mhx=IGAV5= zpF7J~l&i0HC+=v}d78{?+{#r%7me>#{d4x|Z1{NV=ITh1NBIEM$$o%yA6iGR-FidS zUCB!ee%csWGc)<7IWB%f)x}BqU4?scKbNUKz6XETOxh~sGlLI2fIfV2#v9dtUB8nx zw!_TsbrIhj3q3zjeOJ+g)pr){Hpf}Cw~pAS;^)-nXwWE0*Po&e!Xd`9i1YexG{-hP zRQ-jbE!Ced>Nh7?z5d?gR|8w~2T|&@iY^<=WqUp391rcU!p~$RX~FZ;}hN zte%*Iw5KX(654!=HP}v@Z%s6g&~CJeHr2M;r1o5I z0kZqwY0H*vt_JUsVa$8f3vGDM zH;VS*kJEe7ON_MVz{4CzUy+`oK9H$>31oQ!8LnK}aJ#H>E2Llcf}ayxQIx>X_H&N~t6 zo2S1c{iF*%UQhfr^~{{*q9^!SH?WuTez)LfEbMV@s4+vE)-i@X=u^@^s{hJJiXIA5 zI*4$rvq+U&Zs&QkFMN)2sD|b-ox1-YW2!&mkdGOZBeHpr%TuRde6YgI6tiJNIlTU78_e(l*Uo zz?@z44!AUo9gQPB8$91wCHRrK%hvhqb))&*2Q6wn*1}^n&$_2|Q%!!pXis#PPIJxB ze=qvGXRZHH_QO1{d)3gR)|B+t>IcKg^yfRF1KK?xTtvW0fWDe(e>g4b|J#gZHDe!s zPvetr4BzH`Z;c5I@aCwWPXwM&#>Z~kTT?5#G`$tJp8QX-cc5?S#Tq*`L9sQ5u7Q#A zMQQ78`H?lxde1|ASvqt9FtE<4DM<|-@TOc**C^%I1VYLeT|1k79N;tpoT?!kLa*?z zbuXgS<3<7fy+KBMHU~1u=ZradqAMcd&PN=FnsaokBhB2T0 z92PCPS(AEZBl=U-2(@QqlNd(k{`CA%Cogm-JV5j_NOMN~VQ`)W9z|z+xZjS9b#g1; zz0sz-A6`za#nf*buS5BU_AH2g@5R2CKliJ&88Fbtm?xLjwq0wm`Ev0fazlo#`(yM; zO`!uCmz{%@9sdT;uB)C~6b8S;pBJAveyhni?7ctsJ_(n#cS1G~`>e=cFyext4t!{A zp9Uu7NVK~$8y?4&Q9tY_+y<^1)c)~XHCBfY4bshFi*MWZbs0K96JuiTQ+dbgE6zEd0pKCW zVZZnk`C;4Ovkl@44o~QI&)|K3>V1EM#t`r8pF@4|BX=OPW%q{EAM1W5@<#SN~ z#Y21IF}??e@(rzH^rBUjdQUZ!c@J5?U-W07d+HneW#@2OP`{!@^?ft*l|%oU_l}et z6P+bkYcDge>GI4F-#}I!IXqOJy}^IUGXp->k&GTBz_ZtmfmHp#^6~oc)9X3!6$l|QM3a5Y5h8D zRdndAr$x}a=A|CIYYq9B6Z}03)}cp;U+tsKCuy$~`j8$ae}ISi7SER-$>HY?K8MOc zBmT&#Gx_@Pv1kkvSP$Y)(hdI#UM;!ZPT%%kgiK_2R{Y*s#^<@rS^LS^J5ouUcPPIcB;GPQXTmzzlJ9zJ3HCq925<}=L$!m zM_S*ujFitK1DUrOnRi#!otB(zK~6@hW)8?nFYk)CltDv1Jg?s~QnIz8swP?Wk$sJf zZ=QHXS&CP97^iFt?Hjl3IqogT&(jXA=EF;P{_Wv0XUU87{b;Wtht8Qpy13wgGu|Qk zf;*)zWZV1Bw?dQLOLZ9Z_%eVYkzOlxf8onDcjX9g~Pu)q4=)3qH%l`-Q zD_t_dm?M13H?)cMSI&EqA<{GC?`g$H@#Y32`i1J6q8kIooEtB&eGyONi@526@YYQQ z&o_Pu3khJT9<8k14-SqE& zgLCBdp5PEn^6}`u+7ldyuvInZt&C6C1&4etKf{mIpMv8$t_kPzJIc3MmGUiy$;qu| z&ZIZTu>*CU_YCf>gT@$hr~o?q81HW8y}S50K2fKQ`U^bxDLu30dt`kqt7g8{R=L)O z=^OA%N}tH4od;>-3nR4SF*8Di=GagNZFQ&G6Ar4O8@2a5>(_z}pJY00<>da#HensS zI6S|)j&mTy8~PWYJ5TdOie5CYnk(To0{$-M6EMg2-iz<=8+Dr+vn~H$4!XT4A z%=@cdISutS_ZNBbe-y%t%4({Th3Ma9r_P(F9PZ#6a;@>l^8T`bncEv=KZE*wjNEwD z(BI8InkUzPi`vaKobfj$9uu#cUP3(pKYj;)LxbDja3?%}hgn!tf<7U;MdQ`l5YN&( zUVazTHhgZ`Mts1VYAiqS0cs9d{=L5G;h6gCpg-+Fd51jPE&TpR&g4c}c^8s%U1gB_7x!UV2Ij8;PL;Ve7ktxzKcFU$@{mFkW`c1gUhU8bKuInn*kr~TA z(sAe^p2(d=g?tX};f!{ZbE~OyRz7QUW6fNhW$mpI|EgqO$407)_dwAh4f2D zFVb^olV;O$c!m;RcobM_h z>~hoWEo7X9{6^>L-3M$5^h;!Ow4A+yGnm7f_zBC2_Y`tA2j>hO@iUJZqYdkxyZru7`o#`A^nU`dX4baeYd66n%e<-?ct;;WWKzB%0P5 ze1T_e|Bz@lGyZG-H(Bovr9~IK^Gndl=@I(Yy7@o${SVbKSFmOy#HdtX z4E~Of$F%$|j;@(*6KYI1(q<)arC(=lkPpduIBJWJXuN2Purjg%l7;X|7D-%L%;3RC%-jyA6#1Z zm-7EywyFHYXU@l<-M3G6pXe35#(*WpTv&D=aC)yXcC7%WbiBfM+Z=q$_l?d^9Vcql zF`hy`(sh_mJH8DK74VVo+u>oH({1rI%}cuP?lAqUtrq%UB!4^ad9Jqmmu!bdw+W7P z`W-sAL+!N-cwY9Jhq?M7HXHT&EIc4*+CHewb|3F>ZpV+sE3}RRW@b$jV>h6e0P5)N z7VbEc@Em+ZYs2JStqt*!^tIroeb&Ox^tCV&-S_X+o_Ld5RxWBR3X zv_>?2{S{!Zh~_%5FNDt?WUu0o871=$oEkY}tKYQa&O3+;ioevDUdtx1YzOcaGd%gZk~%UN3Xx<=su3bD}zzJV#!H*Wz*Sv*#>9tot#4 zZo|)+GmYgjzo+2|Br~SLGxdxmd%?#Z=D>MQJWn(1isdVY~&>%4z>V?jzs@^47{Z%2!=4Fzv)6nA;oAg3&r94we%vZXf=+DD%)uk%rx8Szp z;`nuJx;avj)^`QtsZNbYV<;rH?&EVYzX#&mjHQ(^rH_X^ckBE<<##+TScc&Wd2iuL zc*@Thb%WRAYw*+N>%vhTadO%eo`i$tv{egkpw%M)dvgBOuwhZ<0tQ{+{-!VzO|#4`qz4wW~?2z zbnM#9rQ_C4Sz5@MP33%Q_-y9W%I5_>-m%7PU%}|v{<*%{8D-;VkNsTM>~Sk|X9q`{ zsvWYRRNrHa1rdGws4b`Oam3U4-lyM_j0IIWe1LbjA53&-%k{;qt3?C#2d7^mJ9B9K zN8>cH=L1v7@nbIOyCEyrIMDoF|JwzR`J&IkZ?%rDrT$=;HTCmO`#c4$Wsjj+UxR*Q zM|Ze8Lv`7*;xX2vS)}(f<7K-!w}V=$=tr|22fpWM$68O<*yq{~?{D->#ewh%Xzf{b zGXo)$`5vy5X+HuoFr;z(4F3S!}{oEnq%@{BaD35+YBas6#-TpE+? zC&}XUctHJYJC-Eg-@U4A3N@Hr9$&WU4c(I(=NA~K>oR+s+5`9uzIx+y|2*>Itg-%e z+E{DRmxhn^`5DGG?U}dvc_8#})|dmKf8o2|tuwg1v13aW>-Qt`T|;P8v?;pW!q|V; z9KKAgzI^o7JJQCAeLW%#mB1_PxvijY#h}WdBezwf#+hGgA8fbRNM_t*eht%@tNESI z+ETk^^b@3JwQ#C4g06A!z`o}a@*|Y1@zsmb<2 z+oY~u6E&Tu)oofbowfHH*1&K1Pzxm9kDUK$@9FJMn_3qoJU821&0Yuh~Nw3ci<&%8WzYI*%8{GP#6zIyUp=QhI+Jk-qGeWm;5 zimYgmB44#+Iw2nCso$}W=@Xofhch>Pv#CoUUobUYgAMg$}YX@Na zp#9wS2fS1H2fVZN4|wO=Kj57Of51C4|A2R($t=-8kom^fIWcs~Q{Tc|`=Jf#cDhI2 zX{g7l&ESnMqIOAXM(q-FLhX`QSy#xFp9j8aJhR7=o8GwY!HcQ4V&B96>n}CU%V*L) zv5y*^DI7eNQBPggxwdYFjZ7UQ{T8i?CPk|i(AEr|`yS6FpvmRXVy9&<+kF(!|1{RP z)4!R1kL%7owM%`dtp3Ha1=H%bvF4n$yMuM99C67)$pp#6D6MxK#4BeQfO#$=Clz_S$$gV<)J)KTs2I@gQ!GgHzR#>4HBBxB1KswerwA+{#-C ziC1o#ARmV6faDNY&JNkIW`-UHM)6PPBP5zq&cQmK6`r|1q;{vtc#*jw2iGU4kz(=I z6uy55tfenr`Lf4clJo|tRlKGu8741M^s$clCkL0a3Qcl|u_cs?f_{Rm+IcZH&l||* zyZGJ>{T48vS`*mfmaLZD>f9^AXYSm)136yC`=WcEojCK<*!oszS@A_;A#GXIGP^ULEnhZ-4jd^EX1{I};(^$g8{b)mScaIJaMT7XtsJN>l_JE7aI^V%04_O~x| zd1@D$qiPo-j}{KsrJEVwsr#>{CJE=)rsm-$=Ai%_{0IFya*ceIZKA(Yu5aYS8R2=7 zmE;EGIWku?to-#_XwnE@9^EwF$Z~!kWGv3_O0J)4JcS1F3}-wW*cU&W+Qys#NKK z|34W+eZ}YiTOO{yw(p17!0psTZlWDOa6eoDQaNonwd_-p+_4n^IsOR(^bLQOp zA%1`eyfpoue1O*6<@QY~vT#Q}1*t^sXK7Q*WYO0Ix*!9Yt`Snb(_2MA3rTO-Z3e%2t zc0ix?|NLS1FB!J1>I8Ok-=vw!HRrP)L)1TG8WRum8F|1$cM$bF^ zeE`1HbrjCleGsEah@uPt~ z^ped3>o>)Z`l-{H&XZL85L>n{oi`n0z9QmfW!I|qW}o%@dVVvPN4Uq@AF=kpJ2Nz& zDIT`LmfiW#PBD2vytB-~t?;k$o4NKJ7oJ>uko&9=8}|eEN8599*7q;4-w%RM@gB9Q zxij+zf9u^2+U@q+yb@Zlc%_~zHS!O5PknX7zO@Gho2<`e+Asvq@htVV`jDv>{iMev zWN+$xHpK!2o0DIuxz^mYz=tBIkvHgkk<;FK{1|iP)74r|>4l+-!1wJg&XBp+SXF|Y zY?*|=Vpw~_@~W))FC#~*=gWc1i;;&84QLL83l?FPYMEFyO~{iVm!fS2_$SZPIzxmQ zwCqx3Jp3NrLB2#U>!X{qgS^;Ax-qTx5pyR@tsLzOv%K zgdeFE;FsX7#GiK7gEwUP#DJMyQ|~g`>ZonGZk*94zK!3YP2cd?HqC?DkUlEygHXH_2({DrfK!Az;0)H6BUCRPHiX@m>McCmCT3s?4i(Qo>F>O-~#Mq|@sdt@U6cG8ym z{NK()EwoWbtvbhb3Zs=&ioFy)1-$v z_jHyG@qG(^(Wc~@#^TT+I0?-I4$b2t@aD|pyU6$gPXZF=N< zRvV@)n;z5XWDuq^?+K^MBR?xX)8HR$Cq?sV_tuNos5T`!^xcbOx7m3#8k5PGto!iL z>!1_i;gN~PwlnZ;=FAYjt@%wGSHj96IumxmWAeQEvesbQn0<^%eZq(5+%o`Aj&%Ca zf05gxd&EfR$`|8G%|jK}w614B1LA+Z?uNWOnG0fvH70P^oVIOhy`%f+RLhE?5!nqx zctILo&V{qjfCG5z88WfUS`XO+c2>Ggds|R;oGq`=-v?yZa9JmL)s2ni$V7aFOXbUw z{Bm?!*=k<%mLjebZ@Q&uAM3dg*lWjgj)s|2m(t4`od*@cy0h>NJ$M8F+x9fgBIK(b~Mq?NM zZDG#Uw%+ODol0PE;?IsBAKw5mDakxH{f(mE(eytCIL@~?T zBVLa0e1qjXw`AIv@Gqw8VfDAKUJ!8`^GoG-teYD(MlWu46|^t(mWBIdV|JeBZaq95 z8_`?R>Z1O6cLn+ycBI=})OvV)?Ly|H)oq43*N9pT=Gq^yr@J-sQDd8YEcxt%k?nWs zv;EsO{`%WzTXmPT4kOpv{3ShXVJL_EY`&;sYhUkd&7sPKS8i^nqI>_rCt`q(~FsuM? zBf&RZuY8cb2)DC79li1lozn6F4){0*YzmL#4@lV)>9$0=4bg=@8APA-Vi#f0W8)|1 zd7uvudgR$?YQT0#`9CH?Q@`(gI5K*q`Ec$z^C6jD$9(iJcCmIxks~mgT!AszrsqK) z9>>39josP5CfipykiSQM9>%yJXF|A*vzl51V~tgv&}jtOQwHr9nm6x~U5FpFrWxLs zjqTZjeVpze1sBT>qLY6TIKIl-tao9b^V|t!Ve-N7u6lG9FM3!Z^yeiWAzz4UfQMPH zv-w`cd*56Sjq=;uzr21U>(|q7+Y+ivp|xL{x+fV|M}NA%k#-z;c`Nsw-;4Nd>(r|C z9f*(8o)y{+^8Dm}TOU<>iH}ha5xb#Y<5|o(cG!kJz^uIMiU$@Y=ikIWQPVh5N_{!x z61D^OF70?pEP0-`u$ingSyjXM?0R?V9{o9Ur?ag;#eeawP4KNQ&Y1}F|7v_diSf3M zsWIkrP4Iu5>+qzNebnu+{iaj!o92XGB{%ptoI86BG3f;BAYe|ZSs~oqd%+~?$IOv$ z_qe+@uaWh5=s4%caM6T%b!x8leipcw{NKbgap=Lv z|FTqEO1V0s39BEjJN>MKHk#obey*vnI~j{~FNc2ApG7~^Z&9D3pYqiG6ZCmA{Uw(S z>cWF`B>gcEUj?2WI!f?d_9?p!&Evi?*4$Y!zvo@Gc^&w#BGc08-j1ntWB2ZZH%FK+ z!Bu>{Y1v%~SJ8ao`u|L#ZUZ*l3S@%hpy=3iY%eR$Z1d(7Tu;BB$vD#byUClv)mW|% z%;B&Y3Uf8${B}Zv%a55W-*IosBVWqG-^lArXWNiV4qjTot=89%Q}&*-rW8MEXHHZ@ zJ_0}Ormi@)1LytOYfkrTb)W0v8N!d&nwNg9YpluiXZK)xsV$ussP(9RW7tST*JJiM z)??}(_;cu4b^jyGNs33U6pxBC7U|MDmo|NE3zyD(oE3l2!!HJpr9Db|&>HPK<-anJy z+CHBa@+&0&MGyBl^uTtvQ=RL%W#3Sy6U)%}5s7mpQL)N+5 z#!WfqHyS0gocHe*Pf5{cFEOJUWJ@pO@!_*gf9_^ZzWW9=whz7` z-;`{4@!S1eACx1%pA9d1N#NT`Khv&@HSPGO6%Ui1;P|fIyvW!lo^g!5l#c$8Znq_% z5&16*%;fySdjpe-k#Fs^=Orek=ds(!B_YovWxEZx^Ys3}vxYg_dG7Z;gAShjw)}9t zoI&@xn>@bhF3tw0_Uqe+jy|=)h;BAr(bg``mVLrFptD%N_x+E@p8D~^SpI0srifRf z@9PZO$I<^(XRhh57q;|(uNN(8Kfdh#L!6I!hHquqz8$CS zjnFRb-{hrz>GxcJTkQq@{5;#=LSLS)>)I07j*R_nW6K4W43Tc=(4{4_nai`?SG(yt zz1mklF2?F&tum$;_3moMqxQ8wL*r;=91&#M|E6#0?T%l$e}ipsbG1 z$)7*&KG1L)XL`THJwM`9ENv9?zS`rX~KH`?nuje_^u2 zwKN$pvwEBGwL)`Ghsf>0hj~+I?3lKF>xpN3IlqeUAiVD9FI^t{Ejo7(b*)MtxGhQE zL~lO1=pO7bztOT}CNh6q-Jk6mYyR1;j`dTMtWVy}OL|ut2gHMBoEl&MGhjc&y&I4N z9pruNHgi`=&ee_H(J0=RPD5vtd8M?gzQfR1{mK4k{|%UavZsIhWNOa*{BhTTdT<2K zwRBMV;_HzoRrISIUW=CDHIla->u*U)571m~eY$^phwC#syJaW*CmsP76I_Z1Z>RnC zzq%)Ov`KX}Q$Dv5^_xL|oHGus!$Yp@U7r(8XdZ~GsLfOO-O0zZ=wt(Z>$zq2Goj93 zoZfEvSn58VB!19m)a52ETbp>C?4)4ozg@q40(sgKk<*iq*O|!eRBd3;n^b#ua-H@3^egM9MBjVO zj8*y2M<*@r+f=x?Z>Re+$)Y#DvUL%;63b5pj?6c+_G1q%+jx5MLDd>tuirc;dd_rv z_up@w2Q)RFEZ`ie@zh>iZCK~2V&iajOd#IO{I=uUxCI-p=ATtN09|X0@$71T3r0_D zk3slu6Z<;wfb49p7c3t6`mB+jZ+|H<*|2g=B;UH8AT9tcxbZz{UTN<|PxI`k zY|WrMFFAu+<_E`&X>)M5_;~-aCh~Kd$Fwhe+}FOaC6D|E=3%|Bc46I^+J%~nF6cnG zEj!k~>?!EPvw)aVU%v%+tiRua`+cr=5Vwhc?Ect(Y~an{i9HtwbS6@ay&iGX*KmdD z=_xZ+mn5TMf$7;aN9W{_N3s|kjgs$7@I%jxBcxo$6-&w6;XK^he9rYoZ#&<}Ye+%^ zw=))UpLK8L{i-RG#p+SZCC;B=@@S@%Q?lUR25X)q9Po8`)JdhVyXTMqgzI z`K>J*m;8q7?bY#Bnopf6^eT1rD#4HTBIGf5;O=S91&jOM=Uy3k?8qT&Jad3u`6_x& zdX(r%FbyAv#&B6`43&AtGQlny5qJy%U{}@m$pxRz-Y@RU7uuMQ3_xdN z%rAaLxb0iMwUl}NA!D`micCu$)gQW!J%Plv(QE4uWwcEI$NJCueeug$yU3&yLu678 zv@qF{$sOcrKbMk82U9YMceeD5BS(~PueHAX(ZZ-Nx_gZA%)D2P=wHyTtFvxNKE94p z`uxZf?&!ZF=N}*61Z_;E-KqE*a^i_`W7@v8gSyzGlJhmzpvTIEYiog@HM{?2mn+KJ z8)vUO=q$e5Mcybl^-SE6SBCGTH03*yZrN<<(0L>3*L&b$IxA1|CtZ(unLWcc?dzOG zEB7h1nek?G4w&fhX0vuuLgPeU3RdB7a1)WJSY3-s4u~#`4qkMJk`Jb1z>-~ zI`=UUT0@?K@`t`@$vgX;x-qOBYP3{HhqTYi%8Hv@Yv;X6=&202TQ>YzGTjD{&G_z( zHzRkJA%B)4hn66ZZo+2#C_?4B%V~|_@H^j<@`k;AcT*)py zo4~IuU1lS6CL2+GN~X(3T*P(T|7QD5E&^7^R;))?^q{9oW_u*h;m^ZmaG3X3+-~!5 z@pFfdGe-mP-DANv^7*`4BcDP(=DKg*)$tdfu<{hNW~gOQ(+piD{+s!L#uNW->+90h z(es`@m!9_y`vDw1?^~%gwHiIe^Mv~V`yG3m7=JgmM&Z~nb#6SpozP4%*XxMYY3`cH z@nkQ<7Gp|wv>+=#`uD{B1V;yWv}hGr;GsJ!udwqM3}k_m+c^9@V6_Eb++ut&7+pkL z(#5|^d)6Kh+N?j-Z^gn7{l$QNV9~ngRR1~Y++?4@+19}?k!|GkSvj@G$iK19Lf&Bc z7+#c*LHeu1|2^n}jw}=ZmybD%`%m3u@3E9T!!CcjleuWV;aY6VOiSLq!hP<|BJXfn zi*?;Ti>9A^y((&FaCTWmH?m80FqXhqrH^ISU6?!!UwoCd_HA$?nfA49-T|4`O070~ zjVvFsMpiIC!tXJ1htgx3BRy+cd`f$F2A|h>mE(BflK#n4SW8+9UckJMrgAhwOLTzWWn1Wb=%{<~h&u+m}L@!mac~?adcn)AKS%%7Y4$ z2Q>}3R6veqJ~^76uSz5-y1NG_C2iONRE z&IYHz-Gh$zB>S|hP2Z+@=r8y1yve-|SN49xH1|(St~$-5U)!tkqyCMDE>eRbU`1S6F|D6s0@80+c_}R6w6Eo8Cv)s1-YZo%r!7<~r zd=;LR7cBLx2`mX3PMmq@{`AaFS5!HIqCLe$h_A(;dI!FGMQs~&IVXsJT(vh$_)`=A zk!2MgaCA5Gto5wC0X^&CIXzQLE`-`Deb?gG`0Ix5i4v`P%-fR2rzEh@j7`CT%9w6QILe@V8&3Ht9EclEAjeiwVF-638_Kjer;-f${XQ zhEnjnk4-N;zfAvgurm+Cmx`Ewht5|p1_K|K)|GJF!u{d<;4C|ixe!h_;-{~I_ZMOx zc+EW7M=vtZTV8qG9rYXuM#)_`(vBZL+pKG>qVIj+EaCQsp1&d3P+q_pSd3Y+#FJ~s z$dupqCFHX5#E}P~2y_9DZ|fN6ZJVIYke1&^N>re)c_UkG$0`ez|R*1V8WKH%=gDz1JX<*33?NkWKO{NG4Tr zKccZVyQAK*nGHq(^>~0SLom_)UfR?A4fmJr0l)9wSdK1HX8RWR+H3F^0qGNekFH^& zbEM_QxrwU)Ys<7-ih`q!dB`~FuP>4ZzeO~%`Z;&>1h}5^PLMpEOwNWiw&_{b8}j4^ zCbdkO+x_{?S_^7R4 z(w6j$FlUeI|26!76ME3^m-u~%Ge+wfkNRW(;Y82L^X8|^-1j)o_|*OX(xUko?hh3I zkbbMS6r1016dMeF(9T+_W6y?S`X+n`40RGgEp0tT8Jt&N{(nKa)n9 z!KrG?sb9zUOszJ>!51jLEIy+(F5TG!OdljyUVD=U;J5UUVRW{5a4d>-IPy~batinh zj3=&5ZldVnLetKhugf-;eabUsKJ^Ylj}UjuF>@zH*as40jqhU~Hxet|h)+y*18aQN zV%Ck`ALqQEX)K$3o-yg0858D9-?Z}+sJH6epCno(kBfTU)QGTrUtV-lAM?7LczXT> zd@1l2?GsRrqOz& zYG*6=Qh46T7}eLN(L?tnRC9kbaF#QMgnMSlZ?4&j?3i1>9-P)qP3pZBqu2ewi6e@y zz6uSCzKJo<3WFEJ(&>!~?dw?1JJkMJ7TAp3;CyJ#ke>Tf*M#{&;s_Dr?fLcKwN!d9 zI<9bA?=vhvw%T9eH7=LUr?%D36~Lxvz6pH7{r9l}wC5pww13%E8kglW$%+%dQOy+2 z`!=%D=i-Uf^V-Lwx$xdijwI7xps8U4`VKN!3WMY7m5$mlM5ff{#`-; zmH2&pT$Udc}8zWT_71|{H1(H>hh9mi#=g;!nCh<)vjn#Yb7xH!d1H8EPMfD1in$pb9%@B zkLNSYeFQr9e%Y90@f7Af2;V}tTe*VYksNzJ5qP>uutr!*<P%9J!Y7p!>o} z0k><_sNC><+P8TRw(ooF+1jS~F}xzOVQTU@bkhhv&?@%oRNrA9P#zp{z&?#R2tNCa zDf6kv5S_w2=aD~w?{x0X~{J;s{uPmzwU+y~7YI-8|qmw=0i#p}p5T66xS zh|6euH?3|Gd6Y}i|1+kiw>_Q}HPNRE+9#nyWyOC5&#PtL!_+%fTORX*%ZIlkS(@HX z`X2XE+LYZXo@DcSbTM?zJ3NILpF%Q8PIEvBUaM*>vF4m`oi|10f$e(QCVtWb~wF9`g@C;lG;`@R# z@ZG;xd;h7wzQNKjY`h@iGg6)xs7PnxfL_!eB!os_@tQO*T=1vb$f z@h`3acKSspTUiMmN+wCp6!N^D(|y6K9FBU{pyR`npU3v^VQ+$qnMeA!_rT9A8vNvi z;9Yc+8NUIaS*=UDx8=tPUB`d-KER;`2^dujRaQc1rZAH#@bxy~u9^OYSM}@U zUT;%8G3~#8>s@;u?c7vny$#pFl`}n(kH<4?uN!k@>*FhOmwMRSG!kwE+wk1fYH(uN z+RS(QI+N}!9s18xx>FspID%fh5nd9*|Gp94f-O8mFNViu)pNhw9Sjw(F;=aRJ_T9p#wp#lZkTRck8UU)=IB79%-kh+yp2wkk_%QIM4Eo9ap`&VL`yI1MBh#4Ya@*d z{8-Ow|F$zO#$%1EaM-xCA4TJeuy#JI@m%@=#*?3dF(037`gjuPV_syhY{Clia@T>! zPS(#l*%Qe73Ur-}ld0MZ;KqN_-Y4Q=9HPzi_f}vpJ35_1myXUN`R=8i7&6my(w5WC zGiqnxIdl)LE7gy1_`^(id1!RFZT*_?*5&Z*2hc07o=|b$8}0}4F0u5xj2h+fU7XVI z?m{*@ZC{66CijjSo31ltvMGCm+|y?Hr_k3*#V_C|y6(I$dS?F@Hom>ipvykzw`6P; zIFbBt_^j@kseC5c*9QI+Ykh8z|HXs<#fvZ2rMSJt`^8_iPP^b~;;}ByUKfuwJ>gcj zr+p!Nf9cPRDwkUI9khNER~mhXA9A&JW%7UBsMtTfYWZsB2####CGt`7S0*$q37C~f zGG}edD>kn_;&C-CnZvo&VQ7WEU-S~kM<;u+f>=Irg_^>;hx68%SvB6WF>R5$ZF!~F zk7>J-XEh$^8+kO8H)@@{dt({?j8bAiw$G+|DtUt&+wdz(H^jZb5nmV$wHUv%;b zgaeB=F;4QRW1Y?(di?2!@f+Mu%si53^sOLAzsHmjG$q(<+CrynOkQcQGhw#n3`jU2WwvSp7y^K@}4mGK|JIET-`_6OF z3N>!fanNb^$&PrY-uBO1IC961O;_B5^C$NX>H^}G@&lPXFMLUk31{Db4ZH0s)d=QX zNo>4}nP>6Tzf>I#&TDC6tv*4lXFavU7Jc^esC2Cop1ZFdzbv$6`EDdTxGoxh?PsUA zhuMFxSdY$cpwCcZ3j0-D$sZHLK0L#e49sXa4>4_Dpz&pXZ)R z{g%Jt$<%Lr3Za55;luX(Hq-Y=^HpHWYD@1uHeI^zV(7`9Gr{o=YwzcIcK%#?J=k=A zd+NN6&`WO&`rDiFnJI3pv6-xAa8)6G@+HPDoUVFv&|j8bYs}WqQfK5W9-J5TCw%K& z&5QKbF7(!C>9aQFr;`u8BNeM2-rp_sr|~C-`LFjdpU4}_Z)fSX(1Q5IcE;@Zl|%UW zRRdbO74~ZZ3%t0L z{29@#=g6OJWv%w9Z)Xjp+ZnPg9e(U(-uI!uNe{GmIka!__LLr|{l-ok>Q6Sb0iUj= zKPMMj{fO^7-|EZJb#(2|jt;gVeaM1wZHrIqRQH|rc{?^~S6Z|9d7SI#Jg35)ap3Sn-q>%p`(ppI!yoJNo2{>Ax>`HWBcBQz zNp=luHq@0lD*D^0?pD#pYwT5PaR-u%D&Wfn#!l>rE!z%frTn{D@t|miJtCd>H4NpS z-s^uc!FuSv!xb%J4ZX5;Of-wU*(>%K zRz7O$PX}uX%0}**Y%Eh-Vb*tv5yiLCv`TI6d*Sr<+VRPG&Cc`axpg_5p_X&mydu?# z9nF3y@^asuK0ek0A4t@Ui*=7C2ZK))xrD(2VwykpqRV zn`abH>s}r13lW3pS#9*`x`*pN-VN|>5ASZ)TH@W(J;4U~(-S<;ed~D_&#(6;=XcB_ zw-S3z{BpkO@!bIr>zeMy9tnizlVAVp!EjsCUh{zI5ueG42X;%gM&a8{t4z~9QTI3( zIJB8D8z=hbH`A62p4`V8eqb{4sFr$|=#0&K-3L0^cXMqVpFeppCeIAoL;-70j^f@& z%dB|zGqvc#%6rl~9hx8dQEYKSHZ@7mJIleX{5QJ3h<FUId;C?oJ;$B+Q;R=g ztkhh!`F}D0owXI_zxtle zv+}KId>ZpO#@9LK?Bn}MYJBbB#2Mcf`X$D*Wi{(Zy!X?u6K7)W#5UZN)ENcJQSeoL z3I52w5ax8X@)upBj%>Rf`Nx`2Ou!lYCm6fdn5TJBUk`lP%UIthrd3wpebi@WH59WZ zJ~i5I+ov@%J@lw=Vt4=--W5hnes$kOqwO$obRM#0TMxM-ljuiyV!zEc=lXBBo(_8` z1-sL?##2h)@TtBRxW`&T!d(fv0eb8I;5I*0NPpUXEl z+!mO|+BZ}7fFmz#9e3Cn;u|bo!I1}+y(?KD-R*k#s_EQ@$9${KgrjyZ_QN9!m)90W-i#%9@4YoT`r#c zF@8?LkPi$^__kW`naGdbg1s!Csq(4G=b8J>R4&s0fWIQF0MH^e*~X4sLqLdxW=F_-Jc6 z_g8Swd(w_ag&3R8gSPI;{--veb5FX7+E1i(5ohk)qBHbb@wjvy;Mp>(2CU9&BQI$T zHcV#hqSjX*He08S4Yx{wH%}@U%o|aIdN()ZeKty>j`4vYSUz0F6Vu4 zZ{-GM(XOW;JG$+qSm>-d}9`4a-LV)7C#1$?^=Ci#(za0Z1lIzWIKT#Yu#t8dgm+j3+&cC z!SPw}E4XgcH+vf88>nG!FjBT_J~uIEBdwDHX!hM3KLril3N77&U;k!kOSaBZ_S}uJ z^!-PXl_S+1lMPTMJP~UvEW4&nx}9=fZhHRo_Ahfihjnve5#xEzKC4x0{5y>4*do;_ z$&ObrhD0g{Q}_PX;zil@(}c zIlNxBM?TLwHq|@i0Gxu}pwR(7>G(ZlPyLL%PVm1o;kNcq%I~;kBzx%{@&^X?qV2Wq zr3wpww*T~1*41H4=2^Kvp*;3P6IWgdeM}k&-uhI|Rr+|(BR^mEdjkGrP+#2PAD5iy zpZxpUW%3z1XL4vwx|*+Pn>Ij;Info!1IPb;SFc@zK>PdB_c7@`PhE}^lb@JwM-98T z(z#axomlt2bB(i~YKu{uWAEh}<`)+)w`93E`p6Wd(0*EJoVm(RX`H}{+5Q-QuRtTi5pE4JMg zfUg3J@bepd3ReFAC!lGrOE-v+E2g%BseaJmp6PV!45c$`bnLO@)tNQ5{{cQ2JV_l5 z^UA2_mzBG`mu*@io3hYoS~Y4)$$eha>(Ad~`)<6I55461WXDUfy}OSFFYG+pz2n8B zs>fe;Rp(LGW-n`XndFLa))V^rOWogZ*cy7i?PyhE*HK^AFOSAOuO9Vsj#FrD`J;aN z37@+D;TiPP`nk<7`A(KMeD9)-FO3bf9WDEgf9iExzkT$9U$h_1UmM7rS@zP=yDon1 z=nT`3y+^*np8ST*7j+-4dS+vuFN3|VU%CF#>(*ZP=v}97eE7*z6%7xZT6b_W=RIvU z$NDe1<*%1)9-C2d1O1en8UE`oFeXa2R@RN1x`~?2j zdfmbC=#mpIjQSt^QSGEz^L8wGYu1*$=*l0hdVSWosSmE5_2>)q7d?=3(+j{|{G-t; z53KcMUh>r&{H?Pd%zF@j?dR8yo4V!V+DR{X$NP6Prti&~F!kRV^PBhs`uYDB>vtXB zz5M@zIX?6Z&vI@`^^BZo-j076wX$ZmB%5Mz4LA_$p;4IGaBQ)g30uvHH_Z9Ls__$_S>^Gf>#n`z(O}qB z7zFPP%(?L3;ZyU5G4TcHk(&Dc&`fvX^K1Q?{A#uXU^WF*!Fkt{qFtl?{~xRaL(*`ty!;Gvu4fgR9m*~v)-rC&!?^K_lEs@gz__{bh27V{ACm!c1lU3uT*6ZkWaPw$ykFs*N};23MOjrdhI zqDx-)#5PVwkF`NoHta|)xdP9xl(ws7?vk@UGAAS@hrb(0X}9{w z#mmq|F(tcNUO&m*_9Z56Y&&b@e--<1GjI&q*||*a9A-U-9UsXWGXno;c-1$=^)}(_ zscvX_!WX7iq+;8owozNqgU7wwyOjA;f|cnn*!M@U4k1p#zx&$AWvfzC-8OuIj=&gldtq zLzL@zCTsr)-s3SV`R!G=70jv@e(TG7{-&afmK>jyc_HseecD@l%b(Ze@BhR6yv==h z-{wHxlPUOFGE}YpB5~&C2Z$f}L*l&6_rc45R`FG3-r$fWvB4oL>>EMeSu%b(-f~V_5z<3_W*o7f$w_Y zo8^fw83A0qfo}@1^#QiNz_WyY3`|po#;smLpX=%KHsE>#xPA#-OMq(@a7_WOrz!t5 zW&T8&KEU;Y;yy0mQp&S?p^9is-c$%&i&o*YKf(DJ-aFE!k$1n0w|IFYF^8-X>sWtP zJh%3p>{qv)to!*JC$qy(oNTx^d$qiu$ai+aqk*&8C0$cIWzfYsQ~OuqSCdrd#v##a zANPNZ7oHHi?-S>F*>s-&O!&sHuD@r0=d!#(9&GZiXd4&v zPv!B46x(ekde7Ncs>^(} zw;bCsaJNNc*18s*I3wZh6Rb@0cz5S?#1C_kIsxDD{x5IU-@o>}IOCdN<{dXacJi7V z|8{cpf@@Zb|7P^r(IvpgJ2=_@U0uv4hcS8iNG#VObZujJY?bUIN;&y0zO540eDjE#mv>2ttP2bh zi`9s&wTrg$@ncCF`K+OiktaO55L+zyFuoXK;^bXs0z)ce4Zvq_QfC=Hq+IIkfd_LL z%Qca~A=mnXL#}HBZhLrdD5cL0;B^mpE%mgk+5jJ>!jGfDzxaEX3SUB7(?7B?B!)Kp zv_Wk8kW|{hkM(E{Z4}T3cN$yvqT6R-VyuF|rfNHvi7&13OMC!}?K!cTIgwy+)@~83 z|Jkc+H8*x*UP^{1MRsIvyOjA!)-vxB51#*vV5+S3lLmY2c$iertRi@eHBC*+N%kzB zk@ugn#=tfKwzO|KkCYDHWNw`I%dFJXQ4&AbG3{p7o2kz(ddFK^JZ{$&8&9ra9;~3B zU57t8ne){bCppJd;#-ijI*mCv_3ZV$8!5&gGxmXTY1rU_burz&ZQkn^2=|U5Hi&y) zSqHHXnbg^ilff5R^Xx4}qn_DAO)+#vwo^4k}N-cd3dJcut={4_`I?bLngj#S?F>BO`(>v@{@6_%KN zzK=WDeoM1Px`8(5?2ve?4ppCg(egxp^rrka>!HQa$GxWNz#h4w-#*Is6g?~NVLx#N zzF^)v3{M;VC;D??n6EL&GcUX(`SrkR$zddCh2bab_him3F`rKh^X+D=yYW*sK81bw zDCdXgyqIC#Z~8UzY5M&I`?GEsZM(|xBQ;Ix2oH6FhdNt6BnuvFoI=`D!FJZ(zpR%A z{ON}$ih1T;aA_Gf`3bM5aFy_r@HRSW`MUlinll+&m#?qDU#n(y2rpjsHTgy8Z|Q&W zr<@NSr82BBq;%mvMEks#D!Ol|Dq1`=sepa4992`O=Lk>75zR#-ZP3h8uJ2p6i17*D?)V5C_-T_eLqrC->s_9o{DeJaU=33pb{FYq11PJw z;{aHWCq;%g7XbUP6U1I3+rOu-)boSOq%+|syud7W?si@GQ`*amP+J?&BMJE0pJc4o zT9`T)f`_%!24@b2zCWeytzkpU-g~l3Sq1IaS+U9;t4=dcu}vQhQ@f=f+4o_Osj3Tg z?R2NAj*Pn@tYb+>^nWsESH%DC9)Hg!v12+^g+q54gV0vSA#^rlDdIim`M@T;R{$M` zHRFG?*GJYl3z^?Ur%#LF{+#Wd%Zfezl7;c=gfB(jn}+|BvN_B*GKWcB*`J(4S>e@b z;3!|yyPWx%SisH#M+acY=iTc`yP!35TK*q~r%rorM5>p0HkFT;IXIOMy0RpPd2||| zR5_Q2eLvb4!1o$V}#vRdJ> zjQ3F&q-GwYe{9s|rRb$$8|`?{VOyC~=$rSIlu3RnxLrp6HM}ePb=JHA`W0IO+ab-A zapxD156o$k&;z2gd2dkFa9!SAt^pXOoV0N*>rLrv8SmwuKDcdnt1s}!UhVictP`g% zXw#i?`=m|TA6Q16qm+M;u|E%7Ex?scU;m)JW#BcJ_9Xur+I#!T*zQ&t-JYyBbD_uE z3)*!bO`EUN=5CMQkAAS;!y;{%XGOFr?OA$I>oDP4XNA{L_x^N$t zzhrGezf8ebEHWZ|_aI~QvToam?&}9H`Pm=H=2;T_Ci8a^{3f{fgSUl(TgD~NiueY9 zR$l??vClbV24&V#MtCfR{;`8XYw9)Y7 z7(Lc&>8o_Gzx!KKM)w7r7EemqrSu`TWNPHnTaH3w^!Hxh=}4PCFL36UzL<;mW`n;J z)wQf1U$5w~aoA=J_;qohGHFBe0QtOwTP5HhuW`2@fTyK>>HA;S zhlga}HZU)(+JgU&$N;uR8uel{Od`Vycty^OG+eSCmG=7qANot;4x0n;uz^2c&W=g{ znumW3{(kf?vMG8Yjkcsuw`^9xw{E$&^8W+5heyU=pkZ{kSLu@V?IM4SQ(zwh?(fj@ z=f!TcI2l81zt9}rbv^xU5ql?< z{$ub7_-5WeE{!#M>d+Z|@-ME4WpP!8#>X$uq#$)aM;!jBv9LgLgakn$d zJhR3yK9LjrFPxuMgDt(J(0H+B`G6A8=cKat)K{8*ax8+k4IS z`6`(UWDnyIb59W5d07iru*S|7e*!pcU>@4_i;{PIzj^*-LF{&G|LzX(uGoWwj{h|G ztV4dS7?+(k-HvcJ?`ZSy-b_yLen9ewS4C_hQxYN4yLC_+_rf zR>QYo@lWZ)Z>GSY7o@wn87 zjuI>83&o1r>)52KKu3FTIG>ocm<6@(h+amPRr*Fws}$%Tb~dJNnVe5^jaB-RShk=_dQakt zxS!K{pImu_#1ZeX&Sx6DN*uP}y_Wb$gH!YF5c*E5gKl2N-;+oXU|r}t%B;=kCJQJ!0W@9Z&#ResI+m9vYPn?n7a`>uE4 z?>zPcMK%jo-~Z08Z(cc>v-hKu`S^?)khg*<4JQ{qsj9u`Z=bd2IBvh_Q_lI`eLmQ> z3vBvMCfWNm`+%_4fzB9Deh}#&9QgR@NQPjv^#&GPk4mHpVOE6v%UDT42C@u{D~1DixY8(ynt{1$$D+4%hwerxDZSTpUFrQ=4&J~{gc zJR$OL?=}Aty@<~T{aDabRh|!?AESK`Sh9dWCD`4X-`u`rgQ;p?y935i=3+R|4aoExzTxTOD6()5yuU z-}Mn$`RyRNFHFDZV}vCO7JY;!hDN99LwuJVi@4upohs|RU6mb%UMcX1@VU&}f5RUs z^3zw~>lv(RosVVe`^eOT%JzbN^?F?FW@uMyH*Qv>~xR z64zGCfDQ|Lq{* z*Q-Kz;J@6)nI~Jvlie?Ti7pl!?rP%RjZQtweMQa09Jao%D0>AlA2)g=*1oE^FR`ug zl*sAm4~CXC^WI>wk#5D7D)Fee+`IGVP4~r?oDPpn`-ZrKZ;3n5b6M5!CGlm+_i?Wf zca`ROlHO~$AXwAXiG5_Bn$>A@kFq`8WlWu``f^{4_j&Hr1{V$3VM!OE8(3pud-xT5 zKz{e_nU7xW^7_y+)|v?o-6-1?e;Drs#1{02%QI7-I0dWzH7for?H-}uY1q}% zu&b-EtB+z+r@qjmOyZiLYf0MKQI86oKgH&GoANTg;X}KWv8RymO$_&(kyj_}@O#$< zDuY;&eZ$a4R-NrjS0TFz-$dEvUNiOQrkBl;=VA8qYV$mCVA&n~KH7Ig>a)F(nGv>2S%)63b!LtVHR8eO%b^KjbeExzeu`l+|@OZ~~ zBXiL&VK>C$V=g7l-tHJ#BS@Q4KaH{yVS)ostE83N>$Z&N z$z>|O8{Y!QSz<7cpi`tRiMi{f*!ybTUb`#VH=$2pJxUo9M95jmm(MVc@nc+o%UhEfNcGACK%<6pfk^k&fk1nJAd}2a`ZoSyA9)9p5YfcxqdvVt~ zdy@%CPe;yTjb_Q-)r`USa*whG#`6t(S8{&zy|Cg{!`6Cc9b&ESUW>{)bl1JI4(-G` zbd=Rs0kBwO_#kHW2=MkgcZqmCk9A90;IYn9iEm^weOY`|5g&p@^lr-wEl-H;ZRKsb zvP^tP*6&jtLgfU{rzt1)XFjsu#5lwzkUkgF-(hS-;hTk`7dB^zpD4jz*LAA81iIPh za=KLAaFBg0%E*}_b4Nn!dqt&98N=hO1K*}yZ~BPTN@OhuS&K#1Vvwie1(mC>`BP-P z$lP=Mei%8kV%;LI^FQxtJ4WR1ZGN{y1{*{sJ+Wh;jo*t5`YFGMe2Gbv_(m`7ZSchU z^CPn@xn0`^-1T9v`(6C9_>t;j)L`j9o4y4GAN5nI8{_efNsS!sPo=HYHa^IlAB@5dw*)-*h|d|dvORkL#zcvstggWDhNhd%J(JD67K>pps8kN8Qf#ScNB ze)pA(8-1l}-*|oX>@f@c-D62l_my-wc76G5VoIi0;Pc5|P=T(weZQBwNmr`bif6^( z%-pw@{Z?fBzk`vzMC-nd?ZD_w;2wqcYsVdFd46_GR?_vNce06F^|7`d23)d!4FE?> z%oF1pfa}eth!05hc8}qi*H_%(D6q-hOclT&{+gqVcN*hOTadPT47T>@v$sErpWg3h zyb_<9!#YOvjDc6`ncpot<-H3&?g@?|-rbL{HWr_48}`53TK5kg#upqATh?X&P3|i1 zZwV*H&b~iW><(i4tGt|nOr&k*+-Bj&ys*wCpA(1mk&pYi!aJ9=ge5d~iB@GhI7jFW z$0zz2x()cfom9pV_Av9OU6%PRvS`Zv6`%XFw9^3ZcJC3Cy8+V*Ugo`+yO{f^Cw`Zl zlwGURJcaKpQ}ri_e>hr*ZQC5)J+1r2!Az;Q@Od?>TgT#6VwVhuzvXV})BnvGF4idA zr*5|Qy;5m2kW$wz75|3MD|q%zV_jzL4b%3PNkcuBEiUI9lES++o+fTp zo=KTW3HWvS4oeDV9>$KUf0TXmDXPr2wYsK#PJD>su6))dRorL2CBS+?ec`!IbcNIx zKk7QZ8;DoLSGBL99k@>nW+rX7-;p4`g#dW);)hGZSKI)foKuzlpo2r7dpD@=FpzheG{M1>tS6R1A`u0 zHEC;)fqQ_rl0EYRp&c|7xZ6-)@GkoAD0udJy6(nLSTY^lj>gwLsf}kAev@(2Jj+%| zY|;*7UTD6<@{x9JT*~*6@FCpUavAsGuxISy%pc`a2QFJB{^vcp+|dxZH&bmFma zV)KsN^9Xun_Yw9>ONkNuJ9{04MZ-33q--fRu;l95 zX{njanfPkEHcEbgdc|R#cMl8qHZDEV0&jV(_r&F(Ckq(&4!##McHsvCh_@7TXRfs4 z<$n5w(*p3byyxhj*I-%TM&|KT*IkReU4z`^B7b9$L;H?RYj;AbeCTu@ptJ zNyf5a@z0wCz9Z-%q0Iw46P$mCkK|SIS{xMwh^6k#}ax>j^Mfc{N zXc_-8^<`gTH0_DsZeUo_C8LQutC*tpts!qR=gCu8&xqY=@Z|%hV~htGw4N;mKLU?D z`?;&!QuaZPp@S!wc32n4ykOd?wDLPtjWzkK55!&*pUNrdEN7oi;KQ6rejR;eLj!i4 z;%_4@FfXM~;p;TuUHW&KcM_}aa+fVo@wbpZ2F`X}#5#cT0%r!#1ZI;y23;@anb7ti z&nHMfQ&har_j|@9ypXJE+Vbu{)3g&bMXzV5+qq#xmpgr)a4pi|o+~fI`itkI>;7``3$AC@vBBbM4KgwRvMYPkO z{I&Rl9wz^|$Wvc?-W*GxZ~NN%y;sI{f17w0k#VUnF}1~%Uy2+XUh!kAEkqX#=NvkI zoH3FwI6U(M?>wO$izaud`0t?C`yc%vcbLdo0gHz3vi?31-Yw0vCB6o!TmDJQR#|7* z{H89cn4|Uy|CIpeLgvC+bjm_>!XdszJ_SC}N%AfAMb_ncsY@P8fnVYVyv!xocC(I= z50BJLM}Is>xsBw@9(n_9%HEx{CHr=xDH}_Box6)p5A&_XH#! zk$!~!ru}R1O~NbeNvW#Gxa%~Cujo*CaHHHg?Bk4ADlwr&VQORXyJ#Pyo0^}^H2EL4qUeHOusL2vAyS%Zow9bz1zO>%xdwW>Gv}&jtIf{wPn+)Gy^9# z%$5M}QAo_Yjd($Vh3YOYg2zl~n?)NzaN^;kRFAThu|3OPddjnJ8t1_)xI?p!K9#5TN>60%m0>>a zmg>s0{;GM$q9sGOFIqgbcG04tVsE?xF6#8%4k@I|!#(?+{S@CK-{R-nC%;J0C-fgd%w?8KDWZE>YfjLolQr=V3z*$2%Uu^E6LC%&BU(UULPhmgN=2Nsa z>TjG`WiLtEsiuvN>?y4T4)*m1Heg=70Ij^1bZRNgmd=KBKVJ$oN_sxa@fKUDTPX?fpM_#tS*d{?gTOBA{(o*e~NX{|9B|3{0Ia|BS1A zpo0ygz{J@s>pdA}pU|E^iQ5u?dU$f0THu^%He<5&xP6bi;9cGU)PNpMg-0u)-63># zs^UJjUc_g&u;&uNzI2PHQszyOZ_fX=m-l1Jdp>Sl^{;5h9bf2gf%B7Sk(*bTS3Jxw zt?tx-XIgf8_Z>!l4S!kk6Q0zxuH(P)72sd+0#{y4=aR?|7&H36GwV0Y2f!X%7vNbn_$&Y_7QoP4*QbL<=AE7qdi34tHjSYg!imcQJ(9>h8P~f zok*#t$hSxUaQ+wtP?h}eCDcggym@oz_Vr{NQ4-8~(j{digH zN^Hw8*8CetSIU}SXn-Ac4`=R#UQeRu7lwy57Fg@|p4R$3)ziHs*IK{FvVJdJB|Zo4 zg*wrTIj|A?B0t>AIXZ7+@xbC$X(tvuB5+RkbuPJwGjHE=#>~sJJl64E>TscqwT>5G zKoDNYr{8nd?!QT`UIc91A$iW)eUkDvhX1_>utMV~do^5BzStrNFlKUBHJ56{Y>19_`C z0$*2gXUuf;$S{vOp(2Zklb1V(Vj7FUr?uavZK#j9L)(lY0NkQ$XS!fAc{0AEoWYeb zawkPvogSm?hknjo?o#j8^VHjY-w#iCz)3P|L(x@&6L);Y10UKxaqfv2SkIZ7>s@V# ztXb>T+CgPC`&v5Zv36DbcI*BI>?IRRGu&0zz>q1{iMX^QKIWV5b zZrRy>ANupS*w^@E)3{?XP4*#$_F|_A-JfEu@mZL6me`%!*mIOShoJL{mM}H_5Mz|z z^6WDGEQ@F5*kOzK;JZw-`O}UMx&$2{aX|z4=C0E+kEeWGU(Q(PNsN=H@R2m0H?Zar zo00x`&mJ)wy~0v^Jc@3Ainj#9a~@AznJSFNFDmy%C#5Z342~`=lo+zK0CbnT{sj*{ z!F8{~8qvcIJYS?~0PRoE-}nZ3=O1>rl#%m;QdjB;Jr2!LWyQ(X-5BGHTnS8m;Hd9j zShMquAD$2!sI@(Hh<00cwm+{el?d;5cnI=q3(A8 zuCwG-;;o3GS!~C@{eyG-Zkxdccbc3}sK-|-unVtbYnc|FB_4U;I!gxp=g0v2`Y+cH zk~3kxv^L;Tej8mNHipE)P5%-67FqB)cEw)=whQIAbs}{W&m--2W9>lymaZhqQYk?X}kFqUZ9sSLJ+q?iy^N zZa-2d_#<_K)De2L?x&8q+UUHpq3E=Jat@I_Ai(p7@* z0PhEeCS}Bc>|PtDIJ0QS0eOhI?_6!KMNNE77x3s&i-@;LK=*a13ec9|;cwty?yhD2 z-ja`BCW*N6L*R5F_QPb0*Al88-fO=vQ08Y3?Pen*rd^?r{63%TZl@jT>vdps$JO+Q zK1L>mUPcz>w^z%mdyc5XJ~#NP0bYa8mhfo!v}@&={St@Poq0DZ#H-@RkaeBh#cyQk z^dKu%40mH;`}4uOj=jKB*Zqw4WKDcCwsy3|Pivz*Cj!WM@g?}diA6&e%Vh6mbfl+j z4Er|I*`p{y-&(d5wv(Kp5_vyzrezCf!yDIer-G!toKs6Wk(7A|8${Bd@!iOM2+WI{ z)SfHL7QXaD=hpAH5JcN)9ZyvM2* zTD5)_WS!RfnUR-#7p{Nd!ZmGINuKbG(56`HPx)Qp`fb`NcD0l1YNyUs_I$cq>?5J| z#(%iZ;<2PO;Z4!qVrMs4wC79%^~7IzKD}H- zJuCisv8|JrLI(r8%pd2ol_HWwH`sGwXV;iaJ&AXc^`hvr3UGWY-(o8X?EdqVmohR| zu{T6FNjZO^fdyfO?K^B=+Rc>1ub*~mfE@#w7q<46uPwegHzqfk*f;T2olhrviDh_q zha-;$5Bb0?bp+>zn(oA9u_t4-=j9V_Np^EUfZ!uJ_&{UPN|`me~4 z;Ak`bm}kP1b3H3B%_H7P=AA6~q9-3|JCE3?*3ag8CWOisQBLmRa+h^Kvt`VQ0lf6A zvz{=q%Admr)?9+lkbAej;Pc1#J`>bTplhbC@&zeJb6iSnkL;rTjvMesJ3 zJoznjk#w%BZ8t2*8V+~+b9DQ|Ty^SPaw6?zp2Jf%E)5L%lno2oaP)VTL$9Qj+qPc& zaPT0*~zF8+a{|bXTfkEpW)MQq|Z?u_c+~I&fZ$d-b?Cu!b13jD_6k$ezr2^^)9S zZ~K>J{+F?zcRr-~s}iOqKV8@+M(lGr=MSH#@w$d%{^=INTr%Yp^@#P12dnfu1% z0Y^Tul@)x?ZQtshOvu@n4|7+qvF-6arm(jpbz7HppB*vy5c{^i7j`rsgBS7F$@_AM z4?EuA=e;Ye%SW@egPyZk?{*sv{w8t$p$Pch=S5Dl1~YULeNYTvPG&w3Sc+ZrGG!#+ z@KvEJKcMgV6P?T>0$i^shrZA|?Y6CQ zb|9$=KRT39D>*HoS5(^5Ca@LwES0SFpzT zm5e{>kj1xGRmO1s>L};0RG*mc<}Auc7Y)$|mR*w%PQ(@vdcFt#jNUPPEctGm%n$tD)i$mWr!RORiwH~D1udidRF)64{>;&GviNVU!jD?j^iK=o##;{7^?H#-evM+lE4-`&n zisH=YGlfY__VYM-UQ=8-q zQ_3W9hn@5l=$%!$Ed5@q&q>O2q5&ABZ*Ol;WmWo5LNG}A{zHdX4j4J2a#Eq^L?z`G z#m=uRid#_mWbDw&b#Yf(FiX9*L&GZLMuz`W{kEKC>d!jpGkm~L;fGGo2v)X@4L%Zn zZ2luDs_pLKs%=T%#H`BL3$ZuavU)CoZtc7WcllQpzq%UsyRKBck%Tp+hVGka%TfMem;gqm5%3 zpWyvD;8|kBvittTh1U3`{<8E1;OI={(^j4h&mR(p(Z@oo9Vt_lex+4zL&i3%>~IZF z&)5<4Hw@T^TV)03QZ_9;Nc%%8!(zk1U6_UQm~>^)+-=*-zV|F_U!^*fxb5%4Hoyu9;809bVp1${yYlz`a>Z4BNuy(54o>aY<7!K}7Ig#_UO09hQ zC#m72b3OMyQX0A7MdFHgC%1{Xwk>ge!R>`Z%d>tzG~C*6@TGlLLMIOUgYz*;wuw z#z*n-YK^=<;WTdp2n^cJaoIek=RP9IMtcNCl$&3;5aYj5H8b8)GvIxK11 z)B%5LFuduqLiBe)_UW+oeV(YA0N=^zNVU92S^jWU<7H1r^qZ9Ndb*b7-$IPp@UXJH z%h~tl&T;Gc>EY;K-kTkxn&&*};htjROI>F%#kOkw?0olBKojcgb7* zD_?lCFCwcc2OmnqPG#-aOL`Y;dg2&QR6x5J^j*x|sZHN8?;ZIWwg>SEf}@-v;mviF zOXYXqbXc?O5z3g7cz%>KNR3Yfo3V}e3Y=1A7(TO&eBNb6iBD)*$h){kFdy^28`CbkB#VpZ6~L*$v)6S$Q9ljY)1NSS+4nU}TL=Zo82BD`6Soziem za7e?yD1%@4CDWem7tw7~FaL*&F9Wwjvj0VW;yOb01=p6m_@Zjgyej^spW+|0?+$70 zvywG^7IRnjo(#@oVaKOPTv1rnbo3c?TRvrPU+fgQk3S*JiY;{ctnxS~EbY19S(Uvf zrNoW{@}x<7ooH{{&Co`lr10Z&4cIj3xJDf%V2xBI)SPsYh_sh_{q zwgHY~JH*ju{F~gLGCZm*IV{PVcV(`TkCpc}dAHhmR{9;%i(KjVNI&UHe@J?VEBz_y z&+YV&m>vHcA2ZZ{{==@f{zmk_ar~%@h{pO~`qy&K-@gZPZmS}9wo1=#mHuU`G`i)S zzuV49DfR1C>6==m=d?=S&?-H(Rhlyf|DXON7UI18^}k?H|105Yjjy&EQ9oR5|KNTW z>yP==`uo16g-~v8jJA^s-D&-G`KkTHl&e6Y%AfvH>^oV1yx+vCf1^&D1yk)LyFPv- z>u;X^W}dMA3b!*W@@Mpf>6f*Q^_QtDvc|RkOn=OG)?Zv(yM25L)?ZY%{eAie8=m5) z?KJqX{#L$kf8&?4{_0<``|qyPW}Nm@oD|R4zpy>5zg&WE{{O#!kAaz0Hs1c-_rGuH zUzad^^{-3)zn^RPDg;Tg{Y$u4J}vU$&nb7u7so!0Boxj5yXe&Q6aKsOh1yZyY~7tY zF#InOKbtvV%g@gy&s$is>fp_(WAn3@KKsm1!)LU-{8sU>1HazV@9C#zo_OVO#hI+d zXFs^>(6ZlLe9_#;qm$bBei8m(=`+6#KArx|T{mS_RL%XYe8SZ5(SLbk=nc<3KdDPZ z#~1#3N8tWvW7oev;`ouVMMG}tH10Bi0Dq`5GAn5#fAahP$-nc_+f3d6lW-V)_dnK$ zQBeOu2QuFOpxFPVBLCy`{gLt7+WcJ5WxPhepPTc4X9~_rroC2h$)j`r`WM_;Fn7}A z*|R6#t@_WsZQfkf|Mt7=|N76I!H?Ou&6sTex^>PR)qm>Ucg|r!-v74zNxz&u`HtHr zPR^H@03Z|(3bMk3$?&Mpkc$c(e zw{{MY|K}5&C$n^pkux*@n(whs*=-A&gjPmhnR)p(o_&KYVCtLs*L=raV%IlxHudeN zN)_-!c+b=~^R@Y&$0nZqnfiuY=6e8nqNhxKV;`7rV;@Kx@@LwyQ%Z>)Ep3_l#{Mzi z#{Mz&4E$#NDXr=o`^S76`$y`F;n=!$08~Dvrv6G~}vEPCRZA}uav%k~dF*$aP zp#2s4zc0Vs^+WchK8{bk_Y+ZvTW8c;5`mNz@ogPJ9X~WcS(Dq@2t{-fL zu-5hOrml?8)DOIF!yQ;_f9FZ#{F!DK`psO+^9r_+HVBn>f z3>kIl@JmMVbVSzh%QCEds_5@a^>^#95$3^+f?F@S_*=d@!unWb0C|#cJ$sht ze9FVU*2j`Ioj(BH8vq~5X9X4IV@aXTuX`$1)$O-GT|j)ZTE|Mdl;z#t z*CSP-E?|-U#rZ%fAA?j)5=G zXu&geF!+BVMorY|NBYwLTVBOpzLh^8+SI=6;PWReteg7yL-^}~V_?k|8-9WJAo*v& zN3w43bJFu0{R*^M?WLno=CbQ<;NQS%r2NB}5Ih-KN(H|3s|~vR1+3Qq9-x$xB&Ryb-C!>hyQuFVls; zKs@+48L5`&{Hvkg$3Ex>J_P@hz{jy_pDNb*n}hKGCl0+oBfaM{f2jSRGyY9yqtsko zeh&7|yw@D~4)c8C?a?X#p9;J?nQ$j|XM;kQZ(+P!T>Q2H{N-KeQ?P+mJ_dNkLLZZU zj&$0vJU)&ykyTFl`GWct*<}Vpf=M<$54v1Fsc`p55J{tVy#)jl+NGI_Bf+HVy z!Z`ER`<0jhQvM8%mnCmSh3NaUWTonpqt!^=e;4qVJILXmE7%;GcqoRzNgJP?8BaAr zUZLCDm!Z_k-Z82Ix=H(6D7WNQGIe|T;Ag{uNHvwTz&{3g&Mk|z^sSU%Nc*K%`arZf}ioMGrj@fW800skUaefe3t%&gU`DEOLCMtbw#Yzzx4Mm z`YGblSmo8@=@0)@pN>=$NlX4V;2rQ@q$S^yo(?`Heqr-(JiQ!-e`Dhv{{0Q(PPi@7 zkyqN^``?cKdtdm2_VP45-;>@(dW}wB3jgnE7ZZ|~e!c0xlUL2q`JIsO%$K|&{qobx zl`4$%huZ4^zchRh4Wu^w%fUzbEzxR;rcWvII&YuDzt_NMGgJNQV_p8vEa=l<>qDW> zRaY_pg) zuf^~OY-+*Z2E{h=ZHM2+fsfoYE)mrEuLKz%`oZYmx4=&V`f()rf}fMw@b~4hiegrK zmnATM=7VINPDg)4;JpQGD}SKC4?QDvx(oW@6!bFbSAh30c8P)a1@z4uyEx>i%U|&m z==+pE#E<`-tki`5=pUWm*q`xk@`d>4AqcgE`6N;2M_hqEiTA4sI{h;AJ=iujRDU7- z7>oWe_#t*c#qD{{}?N?BomC8+SwB zGhDz08fks;vPC5Eu1pI641<^O}^kTkhfqyhV zuNuRAzQ*B?1EdH1J~pK9evZCL2*j!hFqE`E2mL-{R}{bO^abE2+3Qoa8s48{Zz+OY zAd?0EeDp~@`Xo;GcU2bfGM}6JRS>=~8-1zEX8`}quOmbAV`Lx3Gt(R5j|TYn@a`C^ zzKo}y@r^{DjehD(|4oTL6+wRj?_v7?IM#2;i=?yB*As?f>+1fvuUW16C^94;!|DI9 ztN;Ftcjg;WA$fc3LiE{LZ%BSSK>y?Aj{I!PR;pr%O%H)j4mRXfLT^3(CjfLpZHzju z)7R5qwToVFAXrPHY*kbhkoOb+g#n=N7SEzecnZ<--ABc<<9()F%bRpT}((`KNx|1_N_m}-%rzic>;U^ z!leHR$Y;cySe35nGYI`;>{-db%muIDZ#?5a*u~MmE#R~8=e`hp52Ihs{MK*Thf;qn z{Wl@M2^!u8@EP<(h49(A1M~Tis1W_`q5TH%+XS4_UljVh{w;@I&A>UoF3OUB$-mBJ zpGkk8!N1wqW68R|U(w!S+KbTXn}#xe<|mW>9s0Wh{cY%fmho>RQqjP>i1yO=M5&d! z{^nlbvp!nQ47cGsd=caM($FhG^}t?Qxy7fdHT)0I{z}?M?5**9#g8WDtEn2_`RKFs zp3eH@pn@;9C(98?yKNftV)d`*i9HKEGhRMt@40U`{m>`zcYwKQD-WV}3XM zz6X68fj%|-xf%G5|1wgY((o`fsJ;6fdu0&xAL*k1G3NW?@aPc#{sTbj@_p*C?r$pj zQ-6q3Z|L@Zg}xZ;TK^UUU&9`6$o}q!zN>zi`AFBF3;r`Q;CDT~Z%MECm|I|U{*%}f zh2KPl>fhWOe8KPKx<5bsxaJ}*4AtpbLG;VVw!Rg9%%`0?*Ze&V`5G|7r;h3J-H@lb z$iMV2<%@-1SPzcW=}A${KY#YC85*7<;5&o<&eQqN^kjUCSP$#;sR1%yqW^U|DTw{` zHuk=*|B>`R*O}krS?45fu=Tsp^JVDUa14Dy0|HNP^kFsgje&1}CiKMq0S#7u0Q}dt zIP=5p%s=@TIQ?Uqs?tXyRd4i*w3kGBi%TBY(Epx|T!5cjZfGryVf z>;OJ>B3cEtJZf1u_1eMdzNEgO8?==*P3MfCNv}CeH8mF?_I|p zNu>W7(J`u4!&3l1CA;RIyHbGXy6BMnaxhC(0N)JVUK#Yu{G%`AKlpF-Z|zz5RkwE~ z_*0*`>>cP?dR1&lUYEM`i@+C!KA#IeChPjoG2hO8k@X>I$q!;*ZQE#%SH?FM`X*yv ztOFXt=H`lpQ;-0*@`}r_2xwMyW&Dh z)8DPMzv4SzNI!K%{!R_@ht@}D6M@&|UwIw-B8YvFtlR%$B>2YPW!l>r3I6st_I5q| zlE22WPdW}^efE{JKD{Q0zIeycAHRqGnLl{d9^HN_@-+kbGX2#8o~EW@NLGsasc@I*r&E>{KyOAdN=w) z{P7V38T+(j|JS0gCe}Lsh7#cEy`K3Kx=R0F0{{9QQ6c&Liuq#6_faAH_#pVo1Ydkv z<=;y|e`CJ_wk5xF=>K?>Uu9~1^vr}m@&6U;{NKVKBTvVuYVrl&FDC$x_RmQArdIMX zB?vqp+V+;f_f-z_<4(8T=+XrkM9lgE1IH0 z^Y`Swz*CLAtKl7+h5lzfBm61w#Ku9NCWqfYfF6Y|di;&~Zi|bbIwJpp#m@TX4{7l8 zFkfhWI0*Zu^kTm{pxd7be8sN;9Dg#NQ}Fw7*5|Q0eH-h?nQuG(m|VvDF&8*a(D}e3@)BhI4%)5$van~mJm#zi-e9!o)=pTQ`Ct*rig=J59*`akoKgU{d6e>(o_Angf#M#FFO_uq*JGJRX{+)8@+txmdMF7UYK+fSiKY*_>}7yXK#;0pXB366dGJ@}gV5$kV_uOHAK z^ZNTj_Ty=xA4ZDd|K?H1&xOu<*OWacAshvGuANFqH zC()tx=l9G<4ftKNbBoR;Ryaew(n%@%MZK`~!MnZ)z&#y7co`neYcjgcE8L{FgGH9n<;0=ev~slo=X7KLOFz`1efvPa&V_+x@D6c7^`EvyjifIqSDi z8E-!8+vU1`FX(0L8R_rdWb}86qmP>z?*`u3c2Jk^2Y=VO_&pMQciA7Qs&)Pby?wx39ooV6SaY zNq#x)PIc`kyaN8_;omx@=^cQc)hitT^CqG1@0|7P&(NotF8jC~`5*s~FC>q#LCUk= zVDR+-=|udE6X{p_zXJJ=WxZ?k%Td~IaIF`MVu6SC;RIbi4*bTQ0lyl)wavXZ5GJaL!*b`zHtzm!Kz`rb9;EfEe2i}Q9-m$mm>+yY!JRgk0zR}}L4KTmr zUo`khLSB}$pKI{BBLn&F5Tk;+{&?s;wk9UD-uor|vmXA5BVYJ$A?d@U4gcf;-<~I( z`6h>chyE!_z_#e)g}<_~4>EPS4gQsRAenBh{OR!1o^5`0MyKCGzsz*)AC0Eq0sLW* z(JH?m{@we6ZI25)iFB0Unm^Wnk9o7$`Gw5i6TY?WUD+SK82jOPRivYDke4@>%8QTe{`|;ahReRc26_~85HSO| z1fE&oKl37IKI_bQ64&`d>xFBWZxY%$_G|)-R_=A?|8JR}R{q`}qUR#`xeHEFL+=Nm z_jtV@Ch&endU;2ms@L%K8vuW@zk>R+;PFE51DoynPw+WN6<%=caJZy0>-Wxk6`#Gcmtci#y3#pSQt+Y|bskBz;$4*blF_p06iWYG)$ zk3jwb+e(LnpK`{#LZ`zL(U096|K?`ox7syd-gy!Fi~8JCWYzyI^0$KZwBi2?gY@sx zU;ELYUhG>LkMv(U7<=voXFhs>{y%=+S^uqo{$pz+DQTC-NKxyd&l`HY@{;pJ=x6e8 zV*VI_J`{Wlek;M(8{i8ttp2|5jeKZ&$$D}U@Mk{YtS^oN|KU2@A1d{|z>~p#iNQw^ z{F)Aa&G=H0uMN+!zR=^ph4g&vl?>fq6+gPX<@l!`29P-XZKnRPaOR6s5g~a027F`L zKM?o@Ki|N=$u~Lsb1wmJ@?xJlqsQN#_BSD~roAkY?^pd+po-Lg9C@Ail!Nb^sJHSt zM}HqgUXRVP^_`4&DfryOe!J0sFOfcj|1nR)69fNOV^14>lmxsh*pD>u4n$ucXMIe! z7XG)wpDVjN>+1vvwYMkp6L3g>v3;2T8E-B9NcwB!<2e4SLS6q#^!Fz8cZMGCCdy4r zjtbcmT&|)L4@mTqjjy}fKyO|$W8m)weEBgBd|w6OXY3Ee(!y_7@YNgrwn3*;NY`z4 z_-`2eKLL9{#wYNdqP_7h`fLaOOMgoJo1oVK;x`I4eC-CpumA9c z;Jt_R_%qIY_#XAw6cQIfg96W=khh2zz3O9)|Iz8Rhtnudw|7-G^D!q$Gf4}6jw1&W zUnYN2zojknG5b}J-Aca-{+cqqDpt$Ow|`kr$3G|aqv59}>{ZjB4}I9g zd_^->dly`Vyu{-Vh7Y9wKG1(J_TOBc|J()WTbI4F5Pfd;Q>FZ+qHowwHt>~E&uw2k z4*tCC5AN0Nb&&PW9*5r&SP#r?=MT+qQN!VP=w;}U90@=4XWxqPNq>83ui~gb z{;A*W@MkaRvFRI!o~xJ->W~*{PwHQQXqUe2jBg`?cm{tAY-HtUG2X*mKx^c46!LTm zdvcr{yBMZGUp&JABxmrD4wg z%Z>2I0qk+J9yv)p@B21?2tT%=scoze$MRgp^E>3B82J#65qdwHj6C&aKVS1l#{uvY z{B81mz%w8Jw88Hb{5$2urv)|tyaIg=U~ic5e=`Dpb?u+sLiwq#{fbxUF9Cm#!N>d1 z<1q9PdL3~c0u-jM%nUtjF^55S(r*HPf9#{MYO?LEbOe1Q2lPp7-14>pW(_OsSQuTmGi z9tEHC%A?c4m+#!-}U0zR+(IbUME?^OGz7V+Z3g{z2i7Z|Q&jL#*R;|FgkY zVO&(mUVIA1GX6j*|C}&hz>)vK=#$t8{2{u(Z-)ZkD%Q`sy*bIycb5~76QKXZY@eF1 z>G>zM$qX@ktuab?H;4O4a2tx9$#Va=CeO%`1=9y zIm1QY6!wnN5Wr2b^SudU-+6MKe#ScG5g&@Pk}FpJ~#e-sed)j~u>+*+uO#gf7e?_3!tK;eRmrUwIMsZxQza=zfzK}7{sP&p z_SezTO8nO|b$UGQjla|J*F2iV`ss*6&s)J${%;~f{+|oMS5U8?E&L-q7g)`lBJ)Uav^~`W!Ntugv(KU_47)^Wg~S-P`3)EJQGkKT+zZ zq5oDkIO7`w{)VzYW5(Nw@$S|BTH%LZUjlyGI`i=bgRqCtUxq$UBxC>fj8=O!f8>I{ zz5Bh^{)_Z?nEpmaIsS+@(KkWjKeP4t5}8j|V$V+1=@~)fpLmE&ot_Cih2Y=xcMJOB zK#imSbFRjI!M>%MrEhyM5Vw6Zk^YLW@`dF03;2J0gyRp-i$~t-u=h0l5$IpVd_7e6 z|9LX{<8^;%zx4iG`eQw7@P8+EMHlwxjlcSnUf>IV`T<=(?HcrLMQmvQdn0^!0Qp^` z^B<=Dk@%MzbovYA_aO4SN2l*Vznb_#!ROm4^gqRk$1RLT9+)qUf29kE&Bq=q*7e`O zzBz{f)xf(?;D6dlxACLTh<87u%YW08^3OzD`wG(EW$;saPu720eg;6VGtdjRvgG4& z?2U#3$G`h)+K+Rs_isi&94Fq?&~q^Mqr(4U=y?(NI{ruYcbT`Ozf+#=B(EY!Qepuq#-&{?5Gi1FDUZlNt$j`xK8$N08*>L8!zdQRslTqcl zoCgtjBtM6GZv8%p_U6|{qqDR==na1RI{N0P$WPEEKf6h9dLR2ym#;!ztMS*TYj}sC z57wX$GIaV|^wmn@Ng0L(@9AXto%H}@wBR`by;nl-4LbktF8&aBKZW0oKUBthdlL9^ z?JvCryxBj)U&Xj3zu#2?zY~A|YsNo;_-TPp+6yq=(wB%Y)A+j=d0zjX*NV@V`fC}V z*&meje(15M(us$7AI5kC`(OA)+Pj+embmPtWu#ZUPyCCWyv-Y%~3zJ$G$OHlhvUH%mExy7~qpN+h)Cmz$xzt5wO zSKuEq{dquq9`YmnAnivWKXusCfN7=gA$^9kkB0vrrk~ncd%YFUOBiou{xbV#Qoma= z`{zBufAGrqIv5BGlUJm|3ndYJrSz>~Yz ziH|GI0>9Yzhc*7kqHpH1zi#NE=w~GJy{VrGVOB&&hvv@<$UnR_A|$_mMDUKW|6%Cg z2;x@i{Y@Fq1IU-z?~(KuL!qaRN0s!e=%Y7?pA>y0PV0sjdu{I9+gd!O?RroVRdA9U@P4?=#-`8pZz`}8-_#b4_fxbgQ$ z{A0}NCg}viij~{_CE9!}t)9`iifzJ;d ze%nZX2J0GwY?GM1dNn|{LXVK$f)+c7aL&pCo<@26&^vj=t|6}~~2HuaTKa%}} zk-ELt(f`|A`riwDXSO=~@8Y_f$oZ9O-Cqg%IFRVXLrzXXo=4gK5b1vc`fB9&PW@Qs z#{;Y{8Kwn)9rQHu`$GR^z?1)yvwzk=`QF4+<>~Tmz+dUsXw{_CkC8qIK4tt;zc>6~ z;!z~M8@wg6{*gaPU+0>y1wIe@%e_C4)){_vo!>mk_|5)~?8m){{ER1Fz|c1me3i2P zn6KNv9C<7CIrhwR@PBZ+Ge0vGs*$rD{5R6y0meI4*N;hrzc6-cbvh6HO$2{td{?L8 zZybcbJ=z{mcjRUMmKe+akny|+U}k?((g(w7uQUD>zGeJZ1%V&_5qXky2JL*jF;a~L zMUu|;l0neo5}iLP8T`5S*Smx7CD;=No^hnp-;P!lTK<~s8MsBSfqc%HGk{#c>E!amk%5#-+3OQOF8A8nBLxp(6~(eQj3 zM8ALGtS?W&-|7#Jy;2E3&m+Eeyl$@m{T*nD3hf^Z?twgDuQLp5{Byu(!hVN-9WEsO zDf7GTZ!qa{*Lw0T*2jtWIp-~Y4L?u(*4F1D50FDexcqCEg6~3ql-i@)dlUGKeiD2y zMc-z)?3t;I*Q}pq{Lxw1e_Z09t>OI*?X2MZQH4%_1s&(#6{+fV`i*4dGZ3wC*;wOy z6?qtme{ZhNFQcJC>>Z{#D}Se~uU-3Pk*vq&5`Ph^^F7G#!A``}YJO?CnEr`h8>#c( zM_**(FE;HxpFsb5y&~f)0)OKp0pcetIPWDE(ysNL`Jyzq)|=X9n|;@#hW2zE!UM)qLQ~ zL!XrE_7*Z;vmYh+|2y?3vOip}+y680HnceL-;bfM*W=$K+v@*)@Vg%Tn)T#mG0Y!2 zo>f#-2`4tDv{hN0Oh6ept``)>PfzWxZuScIJx@q325WZ-p2Qqa1AHe6q?eMQo zcY*)6O!I}-^WCubCaiJx?-l%9(UBK4>hi0>PX+iHtJ^<;z6db?%+vYzz%Q|`^=%UN zse6C>m(l1ump^hp@|%bM(a3KV^h@RzHN!t&17H39&Uul(;A<1}fx*vo(qp%=zo_+T z0{AWm--dnzE%%cRcnP&06%js3-8V zm=*Yiz7?X+>K%Rdi{9vmuFiS9$7wIKDKcaqXJJp6`x0dT@Sb$$KlY#3>-NuL-zU7_ zQyVnC`hxge>^IoV>OXxX^vC~<7+UF9MnTWsvDW%T;2B1}0h=S#Tn+yOm%fz#wgO*7 zfD6yHet8;s7)v}%gKlp<@_G>e!4{o9LAne6K|}AI)E_&N7qe>kR-%sz*`MUG1y3Y^ zo}xTpTj>GxlX<|%Uk3$h)f(o1hHd2&O2Ycz@$ar?KF%e+agEO32>zxvyUshx{KEVN z8(a9FO2>!6cZ4p#hK@^#$2a_Q3;g4b506G3%J)0*Qh!4}_qg^azw-skwe$7Q+dGQC1)ctQpUl(E@m~<8R&BPxtQRgROAIClE57`S3f#2oWbBTI9 ze@7o4=R8-jF24_cIJn92S4OaYU*8-P+FzW<_{@Hlz-s@d_W}WVJuX`Q$+H2oy1t#1htiO#IzkD-1 zkJ5GeQP&`d_&o0dlrNIkttZML&poWSR4n~j)Ss~bNAjBn6+-daW2;DvqkSG?&yzl?Dt<{^aV(mCe@vvjKg^1*WfwG;%j02_($pXJ9=aM z`ep0dV>;->-z%p;USB3J$rB5|5Bww_$v?I}o^Lnx|L7lHXn^-m=nsXk{2rnI2jmV#8>C-S@WQyLO4O*f7R$~7i^DfcP5{-BApG5w`_gwNzG5(yt>%RB9xM2RY z(~Y-JqW(kj(go_?xbMQ3$+ryiaR~Ou{K=2;@EH5ap?IFESp0XCPlfl-2^+Encu*LN!$NVxG`)h$LzAEMuIld1;Of7w(4eejyyaw~PM1L&B z`GXMjZ<03-^=B)``UHOBe;oA}gZ+0;Mn44g7ZInMZ?{0O+^IT!=_@E-da3UH&Is)- zyl)b}6pXiI>}Lq_%X^?buz#y${Phj-{DgjvAM;N}eJDli_bjxx*Fl}W*bAgDbJN`q zu@v)Jj;-!}={BCnW;lN;j28)>_YpWBM)Ntz^AYpqF)PDA_0=Boy5k|SkHzCvf(sGp z%S=RhLohzQS-cJUM;YE<1^&{8C?DDn)uiR$5cx+o*Xf6M$NV7|>035KeYroo=#Zy600m-WSb8 z@AGr5z>n>JQhk`CKZws`eL#Q6#rF&u>klLF9Ja@PR^=0o`>WQ#C-pHrS5%fBgVr% zd>{2@@|a-#Y>)M{P@Z4Zk=gq<$=4VC?@^>qpUE8SC5r~~Kl>GNEzv&EryaxijdaHP z3eR7G-w)KU66a6RZ7TXV+Slj3{LlLF0Q@d!ZzR*kn`8dr)s9Cj@UjYj2<8(rtXF**zgoy|qe;bn zGu7uc@OgAe_x>`<2=VA&0{t@dhaB9Gq+;ohLHIDRAl@IYg>+`nufE6P zYomTVP(Rc^s69SoJh~jMm=DNri}iIG*2Ab~ZGA;_M}Kh8eV^V1IZYer_U|rZe3aq5 zsnGv6V7?XfL5SZZv`@iR>=!fnpTeKd>Gpfa;rZYVeK#S$9QfnW*uE_PG}LFT4DWBO ze$Jrc{6+eA257&1cz+k#;}`rkSl{O*~rdp-{9f#(;9;Z$yh%bvG!Vs`Ab-j6a7Vd%$K72 zeTH#Viu2zgjQ1Zj!1LWtwB8G_hCK6i^L=H|XFkXGYL?%3sxQ1mZbZML z@g9l(DXhmS|8-~|nU${o24H@V$9XJImj7iBln?tw)W0eHUVV(mH{_)QD0dvuz$wduNuaS6B{qY?-}Z&49`oUJ|*Z6PFPPzviz>$d7{F8 z)@q&AiN}2JjC`s6QZavqGXCUW3VyH$wUcXh$X1CnyGv0#)ap zpgzMf-wEj#p#NXGt^Y^gHU<@Ci}^*UFEh;Va;#5ONKfTij{cH^?*qbocMap+ryKTj zS%0~R_dyN5m!&iMYUuwqqWcc@QNJ$O@1p#P-v$}xx3RkC*CNcH!uJg-&n`9k2fp8d zOiSO%0PT(MFj z*Qg&% zJKFY9q5fTt=;o(Jcpk+}!t(>;fXZuPjrL~yo8<40_Q*YE_(#8fD=N~bRJWd2fv>ne z@adbg#rua3%Wpj9GYibOLVYZNJVo^W&FcFS#+#{B|Id1JU=xfN?5APaXys|q8~q>p zZh}7$^(UN1C;9SFKSiSNVHxPZO4Ps5f19AZYEgMDFdu)x^Fh#WOTzpx8{b3RS$^4& zzX0FMg!(T6BRTftRV-fa$YCvs@9(HDs^2l_82MOVTd??LD3>qv)4iF0ojv9gk-p&` z%s;~WJf)w8hAE5F%_lX{abKtF-p4*yLwiVc-&a!6K2D2u`sch2@~zYRqyE(Y zRQa)5cJPvGq z+6B*JFFcP?ZQAxSMg6*9e^;pgh@KdaLv;FJbum7?@9OqnI>3Jn{uq{CUxoCd_p3OR zC%G5a)95!O-xoZe+)*BZ|L|UT-xcY*MO%Sh^nSG;f|X)F#E0>}i}yW2|DVdY1>-3* z9p7tM{W;)yqcYSzZ$@E%&FivG|JN4teR@6J_{l(f%tm{l*|hQ{!C?gbZWqCiiZ1)6 z)5qh2-%hKv&{Fh?C0R4%|H;4M`MY&dQg#4aZ?|R~XW1)T?qrVCI)mnPAZ!XR! zqnl~d4^U$~VSgLVto6INBL0=Gy;Bhpi1xQ(@y}6z{&*jo&HR=aPfotN{eveU^bnm7 zEJsF~Ae{F>7o+@#p?;iDKPr~KzXkf6Xn%7v#=i=iQZcN2E73o4@xCj}2W#L^?bY3H zw-odB7|hqg_&WoC0sKOHf5dz$tZ%6OPKutFBu{e);w;h^INS*3#rvy0>#xq3?~11A z=9ekx@51>3;x``kqelG*`S-^2XCI!g2-cR@)eil|5dE9=m+NRB;XYzYpMv+LeN3N= zlcm3j{^%*1FYQntML1t!%j8{-_hWf2 ze6L{ib_nP0R_HU*_oUk>j}2R|5dTEX=e81^eoA*3wGjO<6im23fcR%%Jh_X`1H4B4j%lU$=Y7-)`R~Jivd~|0(SJ9NmvaTI z|3#z!Z9J-b|5}5F7Vc{!{vA=COW6Mr=Cit1sE=kkec7ex|7v`%i(&OKzBA^pFS_;5 zOpG7R>dJrg1?ORW2;VEIJVt0=Q#_AQ3~hh%K>2e;@8iLc&wiC|eb)`^4?mH9Z(}ML zzPDdu>Dz&SA?`~U!~9p!p3b87yo>T{ut~ZP9g51=0{vBv?~6kIXWRbA`EVTOgMFg; zbO`bj)+?0$2jus_{-`a>uO%GbFNOC3Q*H;wM=JIcHX@$N>x=bpI9m@>`b4y60lv=( z<3C!B^5^O911Uv&X0_4Hzh^Mt^Vol(VpIB@Hh8{c{U@YXVEr~5^Jy%L-v_^0nC||U zHfTSuZo2hjSM)z4eAE%r&>ZE9wt-u^v$2eaM6H%fRz1AN%*A=)aWz6Zj|3M?2zq zMCHpw|0%=#gzDA$*JHd1-*t(JE~5R4Qy8CSO#hMUcRA`yj_D>t zZwJg*lRxX`D+T(q2Iosr&06`of`9&Y-TjqM!8c+O?ki#PTcf|)h{j*CsQ*y;lJWeJ zVZD>e_~m0f#9}=l$WuV$59e9XJ+%2v!2Wb0&i@GQ^#Suk1oWA`SbTfbw?|by-T4R1 z&ph@Ug!o|8PnoEGDkHu4eLD-!V?n=~)pe;_sa}5%8&U6$+YtHNBfwfeFS-@ zqJDg#Z${&no_BI<%;$JNB6_ONJMjC)==NJpke{HBP5EV@Ux(v-i6HMl1^O@3x2E(x zOm+LgL_fwB?T5c7A)NQ94}LDqboxAwkbiQj?);E_chsk?ZhkRD`#Hqw(za086l~-F|$X~esj{LFk3;IVSf0jP#6YFQ1AIKk% z@_oU5Uqbo_JdcI_Xo^39{+SE?2thxoGRiOP_Y(ihXkTH!m;3`!AHx1T`8$EXpr1+n zT+u%gjCIcoOSDHQ_9KMy{=)N3(3ha}^4@qK#rG36D_?!&KL-0}uqxUg`3v{q6aS^? zn3-7bC$RXs7|+KFbo$h9(LXNXc_ZvcD#70e{Du8hXZU6K{v-6KKA5lKMbE=@%tyj~ z9h6@$v~SJ{-FY-?wAX7xmrfmkaH& z2JP#O_XBrU9=pz%uP`47@o(TCBYNNKjpvIKzSj%m^E2M}9wq9|OD<`Q@^{e9H)AoM z%lhf`|CU4kjo4pyWBDzF-0??cf4(R5w8nTFtNZ&A50R1WG~DNdaYOB;kNHbDPfh+j z_`hI26XY#yf%!#>`wJNVEyyn(`c}Tof6X5AD;`?H{JsqHjS};PP@d~(7~%UZ?cZHR z`*>nMSZJ?cJWt|rs9eSP`9R)dSbq!tRq$&>`^C4xKLPv&`Or1F$Xty7$`$2FK>fv{ z{wx^3q3FMlP(QxR{~Zx!`nvff9pP%Mw^1G1`9}f9Mpz8ogP;0K9jp&tKf(P3Y<|v! zzpSqAd(kahU5y?N!73)o$$1!60C5h$_lJ^zr zqX_jO#LuaZ{to?vWb`9SKMUhWxSyB&SJ2UXasRDQ|G&_FGQ1B8^*saQr?lAMPk+Q! z(_B3CtfV`Cd=cx@_?y^Y zXY`Lye&KsRm8V`0%zx0YP_g)is6SKaAF3I@G~~x&KTN3aF4W)fd=lh;VTbt(`e8hy z&u~G1!1G1u-#%*ehx>+q?#EazdVW#=?q-OD*LCZsd04*)e}9O|cLDuFxZj!lJ#IHFTssR1j z7xh8@yBH6$7P|Z4yJ0?4VSOdkhXn0!o2t97-U|HOpkLs`>LVTVr{^W?e=vSfjpAO5 z_6PMbu!Z~dDZj~e(Vlo87V5V%=Cd^HrwHT!9Lg!&H$e4q8WkNb+K*g~^-k(n-TT)b z8XpnRH$}cg{{sEN59=kNeN6AJ!;usOY>yT|BRy z7vp{@v?IyCo}S+u@%}8U$WMd*>05^T$XR{tL4EMh??E$Y>7i=Fc?IasKjewluf)$8 z&j&Nyuj9XQgdNr$zc@J_^`a@U`#MAx;8?WZbF9-Pv z>HVm`h@R(e;P>U4ZohSqGu8tw4YlvXB=0L%^#6Cd^8Ry&g5N#{wwGski0W6-}#F67YosUg!6Nh{{eTj58g*-0J2L;l&&2b8h$cEk9xkJqiIc2~uGe7oX4SK=3j`QjM#DJL^|Mo|C9_s)Da{tS&l zkMpjVSbQwTv#>s){ClIkxhOB1U0eUZ)p-Ah{v^7Q*1r%A8&QAqM|%kOrBnJjD6biw z&*6-|CfXwwo0E^2Ki?kpGrr27{?rNW?}`4cV)1pmVg0Sry^k%ze3yy)6nt3xOO#)@ zzn$`Hjqw|a@hi0d5%f<7?Ejz`T6s^Re~A0H9wLPEfK*@G(EjnF`yD!&& z|(Ql&y}oEeV?G195!a$Y4x4wT-SO9wZ#e614nTqO$9n$9S$T*6m^7<8*lg;AP< z8w@=U{5h_%BPUV8sWnP`UxkQDq?2I6SL>;jTx}*Lj~K|eJx9B8KpUu~p}6SClO;s} z3hRnL&4xNa64bamDxBv-h(O7QKtq78wn9u}n#)l<65Lf=U1pq|cfd1}*TcLWLe+&e z50Apd>ctwO|3Ec1R+w+NR6%uV`OPc7S{sbgHf7%&a3fSwYFp`ZLTu4wgeO>`%|Hm zK&$xFukc%s53*?P*Os zkAgdZ38!}N8Oy6VS2gF>1%Ejb6{8ZhB**dS3VZ|PqNc7R~5`tGWko%gNKJ$xtUe7s{hZI>XoC`6|2>@6S~RTdB5bjjXJYeT9jYsw*7y3mM56 zaIR9R8tW2H5?ohhm4QCsKw}2|P8ljUk#6VY4v-`tLr-p^Mz7{Df2(<=%+pLMmCA5E zzAaZ_pf|5nY2{XO)#O~Yeo8)=OO!)QPY{HvLaJe;VxkkoaW#X8>QGaO%9~mS9m8lM z*uzv&v(7{wPR8ShtF(st0IzkcQLhpeil82)tz(W1#(~yYqf@~w=0ajQUOkeFu=NCQ zcM4PDpB(Cb=owrH`PF~~NnNGrF0m37cQzK1L@3n|!-T665-bH3nTm2*!6oa8#4uO| zm{i&{A5$;l7 zl}w@zC7$@hHI-tx1)QrwLL$zXgu!_iH=?5lM9*io{Rv9Pd zRcaY{DUGS4&4fF*0Q`+0=JAR0HN6PR64mQ8^Vx!mBA78b~tyy74@o z#vz>2I+O>IB*K=n=piw*($Y$~kzpa~8!E1{QYy3ICtw(2X;Bdc_9haUnzMlyVoTzY zk(A>p5|cJ&G33qnv*C>}vv5)e^y{^CWQ$a&EzU~9@jbAHB7;XkWrqqUeq~B>(l|Du zxfG?oi62Og8LeV|LP^kuTFKqzN;54zDjKt)s}jki)zAfygIdaMkFL#2%%rZPb&X6zGRaTWw}G20TyQi{#@vSCmCXRY%4X%?M$`c@@u7{h_qf znCX}`x?Aa~WvFa7FFUSMdOXinmZl@B!pNmkts?@AWSj%$ePSrJb;Yl*Y;hzW%g9_x z&ten;-5d10+CfHz;t|G&zy%gGO=Ijwvk!m3bNlLnhTC#f794i@K;N@rr_ zU9}#$Jd9VW_Hk;15Kb1$Rn`WoIbJSPq2rNHsp64?TBWUTxKN=|v>K$Lyr_*a31b+M z2XrX!S=2s?1vjTMl^bPHI7XP(*mkbSf|sbQIRjf7k}9fOIp%+)MM+`YE4is9RWM&l zaUVJ!5x&}CD&xUYq9!?Ho`yuJg3}10Riwxcf;8gjlbjSWkeXAgL7{fQ2So{J1S3I~ zGAodC@{OoC9!#)cloeD$LO1mqEg4DWXaZI=&gB<))fBlE=Siu8IlZbf6mYqgPGxGR z;^FEi!H-fN#`5MI&<0>3j4dm!hO~~%&B0Wg2tO!5gq7IoA($!|`VwDJz&1Pma^;%(>CSTE=@+QU_km8KM47C2D#)(&QTfV=*GlqG?4yv8qicYHl;(zHkSjSOJ8onP9_`i@p|r}=8V9qpgZs;Qj1Hxi z2WR7qMU@J|OHnGR(w$SPc|jI4?Vt0KY5Oax*|u6NUIQIM)72IAh*bEGyk$IPt#zmS zswqfo%v|Mhu_03440=2&6)Il>z!+m*q82A(yF?Q25X&h|SpYJl!pLmsPfyCFplU%q zNkt_i`jIjY&qm50WuR)Y?goongsPXwE35Tm4djw5dP7oVat~BE$YMH$&7fRGH%Iz|VFF8~wF`MF4UD5Z@ zH6RSh%2PB~&686ZDI06u_;I$!>Q)-7^>$JAyOgd59g0EU{+uZl(a1CKltbU?ruA~^ z6bPeTIdTrZN)>t-GiZ%CYIYP1#HP5DfM1FPG^MCKD`?aXu+bcxT7`WS*eIFYz(A!; zw?c#s*1X(Vl!{kcNH8TKNK!HC;nJB1q?1*lWu}CSpeGzmh?>SG>kL}b*if?mOha<6 z64f<{|NN*RE%6FQ`%!y(s#PHsuShD+jVCg1UaB%u z8^NVka=2Tto0_w><~g3L!mB7AZ!D6zdQUeRsbrmbPR9G%s{8AWqzF6@&ujaOQjYSG z!QZQ2WLx3XvXi^JhTtI-tcDM-M!bwihX5JsR3=gOgXPQNUBa3p13Ae^s?p@hr;}T) z<~V)1nnx4xYLo&aMvW%|maj5xeQ=9-nRYUefmo&D8w1GMJ51>6JjfTzH7 z;05p+cn=fOc*k9#9`> z3^)R)ub%mo$! zA;1zK3|IlI0wRGZU?Z>M8e@1D0ddf1TFwjE7O`B*mr@) zz*81h0Q(j28h8VI1fVv=eFw;t!7c~z=BK@TwF+!}rsHrFo0Ikj_vH^e?0bGnH7_Oi z{WC>my@ffoZaW?-O7wi*A}M0LrAfIY(`5L)kXw?n7Ec;}TeiNFakXmh?f5FbRUcOm zpAxCJf5ypM<-Z55-ha>RKt>t=Zf@<I(HG3Tl5L1?w}(CYDCoix0aLY_7t zx@^_QS&i!-w%NXM#L1rB)9WYd%~S^V>=&%}s+DDV=gILU)7r*<3G*?W?;E4BzSluv zP&{Dam<~G>|EOhLK$`F4@{2CYa#Q_@GP}~_51I!n)vN3NT3R&tQsS5mnuwXfRo{k$ zS(v^w-|=Df7e#Ob!viy$9ZJ#^7(Y}N9JU$r;q0Otb*|JZ&HCZ=d3!5``%g>DLvi0s zc2+i6_wsz3?b*%i{WxtIUF*s>uS5FQ{>{S1O;I?U)J%V3^5a)jQeMZ{uwf%Z$95Q? zL>)vV{`%7P@JEMn?c-M5yOLQy<%m(lxq~g`y^8FVo_AL~>;G}barNVui6MuhG!x$R zH_O$3b*kolWy{ROSmm82XIGel$IINoCk>j%T#%IAe&T1Go%nKWlx7X`j6q%{^|Mue zmj~9o{d<$fctpb#kAD`;G45RBkQbjdV}3Jf^RCA4uiuGWXE9_%)~+!zwey;|o?5Wh zQhq!ztiH*D!E>EQZ6CO5*Wuaq^G=O8S+(7rsrL%~_esl3pYd1C6$O&8bFa=ltLIT# zT(#Pem$MT5vV!`^fBbUd?42&YnGjocT|Z@y?TLl~L&qs>cAMwlS~91|KP<|vyi(Tv z5_POvdb;nJkEoL~?dl#M-qJ;q>-6+%VD(;x^=xMNhK0>5vP_$^%%slg;F@5gjs zZgU6mTYo$@Rv#YH#C>9@q218A+fF}-(hRw>{H!!bKgGy$<1pnRqvBQi@&jW%evI>jE&Cn%>51a`rbx}bQ`@gTPRZQ)0DMdb1}OR;>HBH*>5y6d7q$QHUAf`1UZV!j z8aH61GU?>Yg43N_^f`uZi+`Zu7lsd1pWS ztshO|riB_hE*jS?e*fmMPCu{Dy)rJ;Ff(Dmh^uX#!t*YSoS~3zy>Hy23OE=+gy zT(7zFIjH*k=6heRO^G-5*uXME=r@kT0X=5yYBf$0yu{ynP=Y0`a3 zl26$-&FtRAiusN|j>^4{c2}m~scgI_)G#PrW1IuoPbUmp`E%hR8`Y>W`h6}ZH}Rj; zu=a%F4!`ScyK}1D-mlk#?2^2?O}KMpRwKg$>UjowrO#%~a~l(-v2b4euxj@Y?~)+@ zvhu{y&NCo)wWh-}2Xb4cY|RPsIXC#EJLtBe&R5i%oKh#f%9>88!xBE28;1qGh|*YY zs&et|wB6s@cAGAZcC%sc(&wKayq$fLSZy0g1^T%tK#x1N~-Us=`BR}7Voa`T0{Wb48 zY|#|fS9KYgcCy{|VEr|F4jV{Zeor0NBO|x2iN~Ntr7d26jkCL2z9=rg{gtS77PGf^ zj2T{RvhlrF)dR9O$4&b4p^HOR$1F_Ma}0YQ77&2jo&Bf_Xng=6VY&WkQ zGf;ocW`(n)aYUNOl#HY1HA_21TfZEloU6BF+=TU4&ZPTBF65G09(rI@bNP_q{egMs z8@{%(-t_fm(=PqPFwWnlPTQlLZ#HMCCj52H>svH_yA~(!H<%N4sA(-l%(CW3%$h}O zc<{P1;Hs+FWb@cbdauIkl!naNH)@;G`ogV4zTkQDhNq(a?>Tx&2)opIu(IdZn|6B^ zja_wPON`0v_--Sw?HzT^U%g)Ay!N?&zIVOM#Fi=gOM5MODSNoG%~|JZ9|91Une*7x zX=kU2`K*s)1!%LT^on3tLCN0`-sF-MJG*Wx!&|C znyk=ke6?Yxo+E#2%s)3v*z^zjfs5U_tjT0Pe!D; zJPVm5o3-oA3Y(Sj7jM68-CN%x)4ujU9#d;2_w3qh!PXozS)NIagQr4N**CKHJnQ7K zJnc(DtCP3+j0?v39o$YN508q}|7A0+P58?rzpAg9cH8QM@m7t+`3~13Yx|WXw2Rba zw4Yc|JO5+*1pZGBI}-KUq_-oIzRekU6j3{{j2_K zCmJUVx&J9mao~heO%t<)QJS^L@9nLR#>xvChj#iZh`98&^(h%ZPoSEinUK( zKPoM`H?Wf;+qtbG1~NHssA@Lvf)!_d_-Y-SYr_lbbqd)kf4Ci~vE*gCFXSLxoD{Tmf;(v(L{bqlfHtjTrnYU}t@WjUqp zj5~Yw+@0TZ^XOas!M7-0F}Z_&ZslC%qs*9yUN3!JQjZ6iJXA>MTY7l*`gE|Laar|n zs|aPZeUKvG;K%Xbk3agRpX+_Fp>>VEDF7)G%i_6SlY$R8_Fr=7T8E8oPk3pHqH~RmZ#e6(32kM@otCcG z^t$Wy$R;@FN7S={ooDabFLSsw(AUh_c3;LtbMogqWe zw?iFkKDm3+y5xML#ZL}J8i&+&`uYQH zt)3~yWOYz*OFH${e49}Ca%{+nocRV9m&sZvG6O>2ZR_bXw&A3a%J2p*dF?m)9MgFC z1TIDUzo^s8c4<+RCjH~P-%*P?CwS*pxt9BMy5dgUxy@U?w^y#6Q@!s*@O3!bN#T_} zwX6DE=%TLveSXzM`RbG|m{BA7$33?}eUy%fclr3a`q5486*(h|YsAbNsvOvIoTB!l zSXg6G4G%9Em6Bh4}YxdGk4Guv)l|6o_%w+G*gtCJBnbTAjCs zl?+l|yBTUfJg($d;o2ERTf23)>^#eEO_jTMYI&B}DIIRFiuXd>&zYNB+0u`H;NK=v zQ+)nK?a{&Nqy29OEJ)(7&w2!Yai;4v9TO~z9tM_}3{5yZ<;bkMRkJLsnzYG({(bU{ zCFlLNv{Ec=&nsTuEeS0NsM})O1;Z+73@pHEVy~vHj0&nk!*Bdn|@o?DoEWaNR?*mM1b~ z>ov~~+D&un95^#-l-sQrTe*6XwWpu4$aSi*v~7l~*WeWczJR|c_z!PdJnf(151t<$ z*y5L7{IcIE5e3of&-XI#ytsXh4$~i2yYRH+X!gr~b*h;JNL==}t(u~@Vew+qE~`@_ z+TW5dSZh%?K6z>n(0UJBV$R7TO_y&QYL!!7^s#Sfeigmyy`Py>ey2!%Gvw2$4-r?~ z6Wb`pTyMVg*pN3n=GbMY?DIE1Q21nKa`ySxFA3HYkQy=hrmF`z#%<7Tl+^wE1 zf37r_U3uVD^J;KRq+h{IMTl*{t*9mq%h#kGi@k8WzC*VMGfqA_I^4q#{Ib7xz3az6 zY;V=#Rj=fFSIcX3H9Fw*euUELPz~9*Jh|^_@NX7juDCLJ#`Ci)EH22qys>B)^rL-- ziCv%r!Yi$QS*f_@%dhutG}*#CHvX0*J?4=)tw+bWbtw}L+-^~uw=5n!zDE!1tWHJ; zYCi9zC_NEdGwQ>jn-lM@*}Qbs;Tw}4<-hr{x~O_!=r*Oz{m6k%;2AmSig`Q8(7HyA zu|o$BHn}pWor$8g>C&Y`hMyt6U!F$CmgoXysxH+J8k`Qc;e@7ABJvY7BAn#9ydR2=@Y_)^K~vNZ;I-%M65d=fCl z=Eb9sk+XgcZ`}26;O!*YqNEA2{{8D^o^Dj5wOQW|3Zq^hrnPflVjfYovd5>DqfO&Y zB5SPsm~+ZHzx`bwueYyPuX4~t`+2wBRkXU)^GuXRdArE_*vE8SNObdxSH?q?bWS7`EL`A-hpHJZ@#8p+GDL(>t3lP@*LDE zKydqSKaBoY|%`~gFR;R)5(cG`Se84*gbIH1>^N+G0tg~pF z`E%UTYcmyhj5{fuU%M$Cdw!lD@MDpA4EUC&L}?ygX>~fLj%2uPw$18+{;uY=Km7Kp zyyj`*>=ny?w@wMa-fv@jg zK2l>hduX#4Q~Uf<+8^&eZs6`31?L7;yV){j$L7tJ9vfavjL1*vZQHZJ(<^hEeOdZ` z-;=k;Rynw8;poRMaj#5`HD<@pbUyh#+2>Z$jj`ENf;ZpDPi{J}Zu*P$n)ipM?Fr3x zzG;+J$oby2sD31BWpVbmCyE5OI_D^U-HIjVZ_3;|o<91eFzKaZjiqz@U-F!ze{RL_ zjG#;7oV!LiAD&xN|3)RHtXR=!ZvO8^nhU?e!Ee;oebF7e&Hla><2%|XXML_uM*7se zM?QDIc2iouK6S|R&{6Ye`!AS(Xfd<)xgRk_;Z^LEX~;VldH=pMypd*8^Gl%(N>=8e zJd?%_GoA8cqV2f&Ns6#$8>?npcdAnp`CUBs!D8Iowu*S<@!nZ#+_7JkI+@2~)P80c zH)uTC7kAIQ)*vII@P%glg3=)?!Yt;S2PsP8i>EIb@Ob)>Tsvi}>acUEf zqK*X7kT)ZK^o&j=wVP@N z>{YKD~C##*-abSA)YvrkB7L)tN)t%R1U4rU*m0$g9ZK;wHd24*ZSNpl&_}<15R%NB` z>GQK2*1p^!ef7#>GvsC3u7ko7vVHlq_e8M8!_o!A?mP6F6|Hbjb^Uz<^ND-Yjt`4g z)+o1d&yKES7@Vw0?iiZGH~ZAM<$8?)^362vTQ$eM<=TOxU$z=h`$|BXXVBJd_QT@p zfN$fRr`8vd&%{0%Ckjn>F7F4vbFJ%k(_0y8sJCfF*k|LzX9R zsbg&GhFe6I%^X|v?T}AHPKGob-(%W}`gxuY%Wu6J%o!M!%*}ge;e$D12>9eT8ug{= z`QbCWdACvcuRDLjKGnJZvK3R51*2TQuKN1?(2ov#4Tk!se%uvx;dW~N(bYjsS}PJq zJ?cHlQEicNW1d~Zo$Y6hnez2v^qn?Y)prDyyA1xE-1*S>4aa-$Uw_)vdhQp7cSH zb7pDt;Ejhq78<{~GGukssQ$S%=dJAi%q}Ujw<6jnc3=4MVfiz1JLnCGZm&3EWx9QF z#)6UY0kc15A&)81XYK}p#{*^b)+LR@7r8q-jrZw3r)bQJrizOr&NS#zcX)v)bsu!f!#! z)crO3r7z7XJoeL(3k|Kl<5^*L!H7XKif%-{<0EF@i=J_#w0)neUt>#=uFi`kuLgqm zIplG9SdAe!I}e-D>A8_f&-bGg`6FXAUrfG^X&i97s;x>@W!(LB7G6EK_ewZ$>F($} zo61W+&1x)tdtXt#rX;Z=_Hc3^>2XPQ@(dSdKgINU=QktLXf$EPMYZ-QT&UmB$w)KX6<%Q(@z|^wikwH*JyzZq>AIdL`Nu`M*T| zCiqzPk9v$;js5PzH5%iB<*`veLdS*qb>%ulEqL*3kb8=1)x;B$upVcgSr4A#)A-!J z$ElDZ8GNpItbRMgxTd8=<*ezYuiAW5E?#2pfibqMT9??*EzN9t zcaGn(-^sV1TE}1fGA=ZwX6DrI^VZ&c+~#5*mnG)&eeEUX$aA8kRfA!ndWL>eHmzMy z)aHoyxX+Q%*OLq*Aj4YeeDh~10Y&v(YFuz!YPlN`V4CnS+mdSnW0OTPt=n& zxxHuYyIqT{KK(W>-tu5nyOiU>+uIamO;}|9vW~NLJLDf6zBTY%Uz-`xZ`vqKl1dvn zU5?)p7M<1d(JZ|!&QlKC{IEN0V}J8I|Ih2`IdO}+9ZKoBe!}LnwT_h9d!P0{-TI_Q z&&Jy#T6((TOf#GehO?0URo(zt4}b8b?1i&vnjum3iVLdyk$*Y{MStd!%Y?N_L|4~+1uX%~^HWP1r?%ARH=hZ)_Dm=4l7dS`OyXNAo z=gounQpjgeuhX#jTkQvYbKJLW1on9?TCJE-)Z?&CtVza+63P9Ah2ZhE)<5-19QHZ- zot5+?f4!i?4~piPC>l;HF#FtSf~D8lyeCZ>^vMX@fA!GBf7WYkQ+`)3M!a{Y4VpHJ z*GYv9uPoM-4)G4Zd)dF*@=Y4eiWv&e!wE|7A5j`Fm!k(&J>ON$-jE#boY+_Z6EDlu_4c7E6SF+ zymG{}=)y-cy1uEp_``zP)~6K*vdugcN`pR;B_p@^>s|h;cvmBFxyQ1xHS%0{o_z3) z8y;95xM9?QKwD|rh$}v~zji&o@{{pC&!k^1GryT+g_V78r?Njh!*{UK`AUnjO{x9D zV@|HKNIiGK{Ass5Mda+Q8a}uFg5A;s2g?kf-AJ}xe=Y@S`O70uR`y(CzI8|TIQw)v z+h&zoSU9`ibjZawy}lQ!FMsA5P?Qifsz=?!DVfdWQ%=OasV(aW{>Fh$=6A#ET*t8~ zx|)g8;F~jjdlwt}MoQmKyVCbqn9YdCH4y*73`e>f2n&af2AKfpEO>){6KKC69LL7~ z82Z*+2D(~|A4QPfOg3md7~N`SzXM-mfW9ezVqsO_RsbsarvUMAj|NR2xZA=GN8V&i z89jZICDRyjT__!7BK;crCQBv?;TvFAf=y-^XviFZpT39x0zN_Bhm4-S7pF6~0^!Dp zC*2bIUOWBfDe7!CbO`@(*BZuh|6Df-4tP-pluDC%zC)Df~GU`2O^L7 ziYV=`!C&c{A$|tZ)MfIWWM$%zo^%lCTX}oL9}w|yMch=BV=7A{isDE&%nBeq2KxR@ z=4XZV=hqYAojhOnwAAL*$WfyN3TJ*pP4$s9vC=^_}Q z@1KX8^f6|_Ci59|q<=-GE5e_Hj!Zk)ZlEbHeL(!nM}x-oQmN{X;~ut}#!FgR+iH9TnS{*ssUzQ~YH$Q`7)#$+B3+6Rm`>3*yQ z$QUwS9O4dvhD?8ElN;Bz(^(en-xeiZm8S5Mj@CrDJB#SH!B09!Wb8#U{YLmGk)KCe zx-XJUbD##&kbVxXgXQ#C`pux}#oR9Q3AxK>nqzLDamF}elJo)4Z&0Kt$x+kN;?gJ%$I z(oqV9`wn=L{-9uT!S@-$=yM=XpMNm(EY+>_A#5>p^WDOxYJqqTBM6X9v@&2XL%7VnF$rvpWi2luL%?& zE$PaUnGFAH=57Ib{J@6{>5d8JJ{+V^DVT?%I12lQIMVYhLHJwP3E(k6L_@kx_nGXZ zE4TsqWh0DC1ZZ(xtX8+F67d0sA?-%cePe0vG5Lso9ZN@UG7`k8nQV#7rtlcJdx&J9 zFwz+$J*0ZTI0C|cAP@SRu46!dkI~be7NoBwn2vBr5Ju)E!ncUxDncuC6YsNd(qZ&v zG9L!t=iqxC>E9zBNFUC51CVYV z8PY{<1^0O%1)#qne2ek`9}n0#a;^P4)q@$or3fcoQNd&*&3E{l!4@0RtrMGhNPiLh z$fyxlSwx@5_}LH31LOYcpqgPFDJD1R>yaV7MKYutN4l$IteH)Enhv1*1%F%EbO!<%(&g<1|9Ixl*M{nTRE)PS zq(UE${Erc5ggn!My5Mt#3{}1$;?|`7`Vl@V^JDvb>0HIpWCZffwnwos(9|3*CNG7F|HOK*rUAEc`e+W~X~S@!R^BVEf5Wf?FmtY$r9hqE&j|J^N zjQ?`6%!G0>rlk}y77*9WD6KzA#4geD2c1GTU zIU`C-VWb1u3HCQ8M=EGZS3MfxOJS34reLff&q0KfA)V1=(0)c(f8Yh98vy$-qZxs` zNY~ySa^-`T^h(zu?gNuE82%k358^7pO?uDI5$_InZMX%~7w#01-1m_F7H9-rWO4AZFWI7{kKiv0#Wbk>$(yeB+gP82Wumiw@bc~%r+YR=3#$yCa z6U}Vmy9@C=;&*}P5rnrwdLLjP+*#mV%=mX@{I4@Vof#m#{hvrr#+K5-f2G39HDGpY zmiGB2jpO#K_vO+1g0<^P9;wMpT9n&=(&E^j%mw7nYdLX^@!%+8;=7g7~|@LWJ*v-%BKm8S)Se z4*qD(WKsNj=EjLbtN@4L(ERC*AGEjE;1-1=9p^aR~nk zx~(j}3}K6q2kH9{66J-ff3!N~l=iYHk5`cQ9{laV!wc@Iu+PIj3;PsoGISP&44vPh z^B!czvalfLKhEq%NE3#<<5)b=dc*&O#rFY^E-e54ER1MEfblF%70@pRpOx^xg}XQ0 zGhqwn1Kib^Y*ujh1RsCKGY&*kknSSl=L4fy_&Vf!iRBlL@T&+P%ox%ctKaayX0$>+ zd*R>B@@<5)Pgq1>5uF?|1Z!tK)l#o7SVQOaaQ0%=cmZTAJFS!t)R&Hp%d(i_(mR+l!S|pi@AGEbtMGGjppEP=L76NMi{;-30<> zFA?RH1otqOZZPOiBi%lP-GSW(wmfP=UgCf8SsPA z_dvv8MlaCNd7W)6{swrQgZ~vGm%}!JT;*_oV!X|noEEH(uQIy_OaD_Om%v}JV<1N- z#M7BFG78x5;opFKVi}*N2+xFHFz#?nXa0Q9E8w39J`?^b>nX&|29KYNcU{Df0B-Jx~DU1xA`4Y@qq%8oJiNbi1-0|Si61af`LEwEHG6<$8!jHi} zo8=h^K5O7!$$0HU++0A$KnO_1jxq;3VfIip<)-a*XGGh4{O4E_Kx_>l?;%1o$my`9&bE3BvIiR(rGaC>-fXOtU@fnD6RAp@ukFe&DYY%KXd-x0K9)izL@IHyKGr&BACm>CA z##4^8xO!E4PL_g9Kz|x^9{~$iMmjS}rY*DYBTj7cnXE4mp3U4g%U z2K@BF`wGH(gD&TtB_6p^8bI;i>~$mi1Ga&aHShV{vFfj-yj@)w zZ=4zbNg|K`B>aCGT)-0Sfc^Tf#Pi^9B?{XHw=b)o=FD~z#s5#70so?&>xjbA8MAi( zf@zj0%miuZ4i&MXyKDZp=EPq-{}sQ4e-$VSt08ieb2#G}%An^+V=FA}??aJW>}<A5)4e)kQ-`hTrXsMXD9#K1W+J~h z&EM_;{|4QszY^X4%RiLySn}5zlgQ?a__ty1!Hn(<;$Mp51-5Kys=Yf&2>Y*WGelYf z-N0)n<45-ik=YD?J>VbaZ^i6JjE-oIvalWqo5#uZGD^(DfBfJfqjsBv~5z((@?r28y2zF<{6<7|rmWgOTG8(c@i`?XV1I!kM z`68_|($*HG7osO1&KMpUlT!$9FF0Ta0Cu1wbM!ABwUExCBI-Z<27}%Zc>fo@n5Y`k zegsV!U?qF3{yak%NdDi|82( zE(-Ls1P|ih0lUHH4CAvF{%XwajBvp$5**Av%V_+-s~BPJn7;-5!{9ap`mucUU>^dU z7|nhWUjq@}2$B2WaeiR@O9Z~mPGbCP!rsAXh~H!uZV9?cA{iEoXoyZKiX*>Zt}y=b zu=Ow@K4TGTM!)y}(DpU(aZcI)PusLj(=I_uQ4A6rv}xJ~QEjE9ni6fcFJwt2lX=NZ zGjE-lyj2h@Y^^RuQFN(l&T(DuK3p?BmyzS?PLYN8iqr|UeAYCl%lO8dbE=-#aB z;x^Kk+T|eaL;Q_}t&Q5=p<1sA{O6#T7(Sk)bqrJe$Vas966k%T%e57DLv|%(KeNk5 z!bYvrxQ{OLT(2`WXf&Z1Hl`cC%thjG@L_HHY1q3)XBcGYjnnjnhO7@h|H0txa`r-Y z4DA~J+G)gNP{w^m+Ov=iHS8?c$}6?Zt7B6L`F)_fbl&H6-5=KJI-B(X_m+EpSYaegZNK*m)W$W~(K)2&> z74ifP9S_pGA%9it9fG=aYTBU@$?D%|Y!GkS#|Jy@+I@77M8fybeGv8@0=*6RnxRu+ z*tyK`hxSUL%YPGquL`L?FVcD=Aiom%Tfh$j-mmqkv|8)uAnj;f#+N$ZgDC$W_$$-; zXQMt-HJzh%Pu98_ny0x1+V4LY{_A3_PU!td%kyAsKKOTa-rEfQ)AZ*0C2il%Pt1mX z1nCQnwA(>X)OlP+{oaGTNZU7UnDlC-UyZzHKzBUwAHXMco*r$hO=noD`H!K)e-CQC zCqdVO{t4>vovufw);$t7KTz7Zx2E+kLH;oGs=yD|erpxJUS9ns;tRF_=!9;M&ik;| z|62QSD)=Lib{Wch7qV-i|972tn${`Mc_^;c{%Ms2oTqitwcmEaW?in$kF^Pn*BQ36 zA&bHOdR=Zg=ucqpr<#8Ry6d&QziHXykj(|%3%o^_ed|7DvZ!ggy#2u#lUo5=I1p)XST zJYDO2vQK)a_JdlZbeU$p8==1delLOWER=aO=o#7;^-csm7&h*}-#?I63)w}mb0O#& zUDgcngTZI$vhu+HOP947`mY#u`3v}?bQ(zYJDpQe#>Iy0I&Jr3L)LE4l_-noSMHMzhWr*GQe1PCpws2sX}v-jm2b0rZ<%?|drif4SN=**@es7J3!HNm};`jVEfo zbC4bbT?%{#e<#EK(fE57vM<0_>+)%D9(V_EgpSA0U~3CxXKVh?$nz@ri?r>Fbl$m0 zFM+I1^M?Z;z+V;0;lH<#K1jP8_hY0T348oES?j$CojY~j3m|(#mwz|P`4G0(LVqjLUj`k~dBp_pld z&@TZ$A3Ezne*^g{;7X(={#8~wJWzZ9JMVhSsY{5Q!p~(ls^`3qCei^9HFNZ4- zH8X(irw_iLddw3y)xq{t2TyfhI9yxl2|23+9-+MfXAR^5f3>U98}-mfMF*XMK-dK? zSQiV3s3s7ub_QaWQuEe@TycLmR2lYG#(d7G$4w8h=C`D#-IHFeIY+j3TiqjAgY4b;VamNydhhvHr<7I%B1QOg~u^v69> zXWUa6@wv%6-GCZT+!Ly|bW;Hsp;$=gt*mqhVokxyxIgGQoslwXUFr6CopphDrQ08i zIO8s#1>24OI4rvY;g|=Aa>Fbs9PxxKuSmWAkU!?D^oLNJK%jEp7{n~3#}5>hK-lR% zQ2M?WQ4|c?En(@JMdH4w2ZmhujC(Abb$bwFpB3}hI90BcZRI@l3u>vuVr&jea zFu|Y`ACYJ{?s3JfU|onjN=76q-LbGu+DHc*aqWyFc5|=aN@~~5)`zUw$5%7m< z$*FKciA@uWM_o>gcYrUdAygMo6lQM9V-a@rs0?Kg0SsjVW2ELdFd>)N3h6}Kn$8Oi ztKRNvXa+l*&gHICj>IAX#0$FdDEgS!6ZTqObh(%+t-(bI(OJwlx~_zqLoZ>*6jI5~Oic2XQA?!&F^~JbUae-6kg7>MIgM-9(uim%N+LPb9?*>q-}bWMcnpF(e?Se%gxKNeO`@M(bkE8jq1E0!d`Xu-ENu zGSU+=ssy#0LKqUnNMtHlXkBMOt0rh@C+O1)pqJnzkGUSInF`AU<|`GpfK=UfOS7;T ztkuN`LCF%O<&Jt>9)EqJru&Eu#hB62Z7s^E5^oe^aJAMhyq^$CiSB4CBm_u|M8eq8 zXde|rVayZ2T9*(D%LIE1!Zn1M`gPm=ryAL0MN#=ohwXg(BpVs^Vxn@&I+O5Cdz7j% zKz3Pr4wPLlPRZJ;Vi$+~*ncEOy6v->h7!@J^}%4Q#N+N64Z>;G_OUbC> zuThmZCoWS)6=MfU8VjNxFEM7t$WaJQw4%GR-&C5EFj=$-nyX3d@F}bbYH`%-W_%5O zLt^M8CuFKpCpjf6$-Y)>-nL=uqYk3qKR;F{muhIk6I%uodQweJoe6=Rn3!;7VzI?2 zP6@y>?vw4vC`JnlOkn|p-o`}Sg|sKFq&z{{{hC{iP+g=l62|JBBILyy*IC)M86^Cp zT(({bN=v=5rjSdeD@se%ZjY)Y60&$*G^A9c9+Fz#fU5$J5-SQ)uPYP}SXdI`PGw!n zKo^Mk-Q51y0c`i3@px2STJ@Kp<>VTQQA%D{DQ%>w>(rQkX=)1eUA6sEbUf?=)mB^W z!)=L7as5PLYJV{rW~_MnM5Rv?K%lIHm~x${s1mi;+E*zMR2dD{Rkkc z+;IBCR+2=}T;Z7IX~Y&5SA?LwWU#CAhrE`H8$e%EBpg?yS7?z3AYxWrH!ve*nG;4<48ngn6AMo20|YvdD3fpCrGQkzPW5X;_DnWjyYpllLp6G(tjtkK9y z%1e?KK{F4*X0&l<_v`Df|UWB0Vv8wNGdBSi};*66_iR%NMbI( zUujhboS|B!r6_3G657Cw6vfFJq>8dpm*Do-V584eMcJrRYogAGPo*jfskVfc)it6@ zm8-?1gsCc!DpgU)YfDrr&-Emg6jQZ?sW=XEc~q*RkZMbqsv`)gqHKgdQyY>Y6%y(F zWsIcOw1Wu+A(h+?A2Qkz6~Zne8Pf(ks6r$ZR4<`J4bov4VIdQQs$YX0Se#n$iW^#X zxEfSMD6xakVAr>yQCdq|Oq8L#Pc~_T-KIj=&e?4U%QmSpCfie-fqp!!%Tx+>1KLj7 z2JNP!+-ODdTdHZdUxN#KOI{#CH`$H}66Hx8l!DY!3?yi#Bp91SloGOd*r|s`VgT9) z8p`3s5QJ0$nv`H~r#;ZBDV{-Y2L&Xm)8KcHXo5zdGrILZUmg)pm< zhqao}VaB<8#05A;?)K(6WHOR`DJeyKSO<2w=m(=-j@oAh_;b_(w z;D)J^C+FB)K-*AK=htdEf4z$!+nL7Vcw?(`jUpV0IcT8p`)rq?HKj_^d)8@60^xJM)VV-GjTP>M8w zKreOq*z?>@m<(As=A49sbsX@a-^8GZ&cHvM6QW1OEUc@%TLRj}9*QGVl~`}z$Ej~{ zVi&JI8!biiqtWunS-i!W9F%Z=`dO^yo^Zsb9s3W~z$W%Xk|j50JRV~6u$8uo{jg=8wSBpGZDI0l?S z219RU>SRYllNdsAY+gV#l7c-EDMDggmn_0i!nvzxH!24ZOs%IV5z-M=fFe`@=%fvC z%nTYEaa|aUsJ=*|QBkNI1|qh#IBAgWlYOXaUm{kcZC%a?oFu?HIzTkc%|A|bJ*xj> z5aXUA=F}RGHt%C$9mWvG5~g+; zR&@;bchK1&fCIeHj;qNjgv$w3OHFsQmo#U&Im3|$g%PUB1d$M-gUU7#b2=AIG+L6v z5fPpc&|-u1;}WjkQlFK|kzh40(r{x7o2PKFTBnHbIA=Dz?8jmEV5EVQl9X{MdU*ek z4kalBNXyh7gc~v0f+Q&{#pg=W$M8&#o@=^{Y!R-zWz2&D#w9(N|yOpFs?nTqu@6u8;JCpOgLMUiS^o0z2c zIf-c;?G$kZoVbha<&|-g@%H1etOdeYmk|>TuWIbR5)>7+5T`OY%upX*_37gY%EXA! z>NsU0PB!#G4goo^`})&kmHnLl1TWhg*uW^ATsQ((V^4_&L%)Y z;K6ev{D1(TSWOxuL98zPgh)Q;AwMF&{H1)szLeuFiE->4>VEcX{!Fo1^%IfYUh^D+LfT>KeGUz}XExuvx-~ zx}Vxe0V2o`3V4jgEv5Q^0QYty@~N4C9KYdO7EOck#!s(6z*?MdHRa>e3-DTtCt6Jt z@d*xpi$hjZ2%q5a$Gt8dYTzfPR<(Q8LN##ENougI3E%_wd5B>_IYW2ujTqP>cN1;s%(AUBcaQ*%(_&12%MYa#RaJx`l7&{53DIAR9&&v;Rn2oEOSf>Pq6kEvj#<5B8{xuI)HhSZ5}|qGyyf;=tRHQ&udX7(=;^wAx;khHp_fGWyB{w)EOfOH4hVU zM9NQGtTS|R=}Q7gkxpX8qd2XQPYB6iA~?MOEp>VUkBAy0HI>y^71a(QLEwr+3A|YP z@rN;k500ETE{D|h37&CcyJLBcgW^&}NyH!VOu}`C-@X~Z1H&jbinv)Gl`AeR46X*X z|8~|jVwbG8l4_w=CDy4=C?Qi62f|F|4hFlLO00-+T!#vQCke2o<0?>Hh(ZgAmREMQ znhRE|r4tD}1_$th34n$r;D3S`k}S9q57n?p$Q4}~4h68+#EPLVSa3QN#tjkW74mV@ z1!YkBnof3tvKNlkRkH-7tGyWaGHNrbcV_-Dw|zME!-WRzU_GqmP{JqjW6K=G4ND9J zZ1^;VXH9tR=S7b~zw)XFreIyCCMe7lK4%P4mJaJWJto=o#UC&9xKWMAB;MG#$DuC4 zl-9HpVNV_@LOOP-0iG|@uI#QC(Y9VB}*?@g}$V!sv zw8Bt7nWV1wB?05ZsdY#uGJs(o^=K|eA*Og7-_$t$iIX|{lSuHvHYX(Cz9)H5qM7O@ zs@k;?4n?s_xa<_$wx&JvNj`Fuk{J~%Ig{z&+3Ec_v~x$*wYW|Cy|{MA$bfr%R1@bc zIPk)ZV6l2iGve{c91mY`I8$HUWS5`hq&(pxJg!aXl0z!?`#6ABVvL`J0nnU$bWkj` z4<{}kv^52{F!y=YMG$i-UuiAGeLE78Le0eSFbL{{nw-wcRysq_(=;}Lk!su+mTIbi zs|u?=p{PK}i4|OtL1!(Gk8vA9#ROBmP5_Hpq*g@;&gqEZh7>8R7P#|?DIB9mRhmbR zY(K&%&DK#G#i*IqCiStN+*P}Kqw_OP4QVBXkmU-hkkfilWQ9{6p8?X zXJ^y(u}K!Fr7>R})}02l0jKM@I*^hzBjUs%Do-A2xZO}xq`{{6IOk|CDErblEmsMPmm-8tCp1LTzDW>`4n)~ zMjEh7ud@kt^rlXKco^)m@VL6#FYr7F0&xz6lm`iom> zY-HTHjp0(9{nwDo@svDzubv54C+N6Keqbf_b%O($QD^T5Qt~FK4r(};)DCO zrc%Q=YUC`!J50%1vG)v|IgvVG=KWV4`39&uEe>3DN!_>Vd}OBvv!zS+r%hM^g6aZC zePG}2#g`L!T+Ofas1NS`L95-UO+Xg))TliWP$dCnF(iRulGrCAq)x9o;K27R(5Lm| zmT=D$%j1iNE-p0rw41!uE6nC62u9YPFjoK zJi+eK0%DEEJ%YGO#_FNYc;k2iqE=ltL~5)ECl_w*NZ>%6tGZ3$dp!ZpYuHghgqaSt zAO-W9;>}DoJ=KJGk%2Vzcxki_iy|4^sIb2TUO&NR2jW~gg97B0Iwc>=aB*z4KME* zDXQKUfq+aBHE5nHl#5248jLs(z}-(QiVZjvRnKkU%Ml4m?!;r4WD~qb!ma^d-oZI; zb)8q80D}zR1dAuTI=Uq5qPX)0Lfw?%SC%x*=z~NX(URtPtXc%}STYlQg`mNelnAeK^b_OZ5Vf!CCuCl+-e4Hz9wrpVDK5dV9FI07HZ(M*1@W!Tq9iJV>w~hnP;L0 zs3RD3#^g~9s>2hRFkMCmQ9%`?gb$d=C-lfDJ$Vj_U+J+aIlxLfku0)7m!%O|M8KY) zn$jkcnb}fC!=-Y#f5&aA3z*EMzAz%+;X|6bR#eXf;3gFK1}Z0rWG;q^-{r+L${Y5I z<$V~<<0cBf?TKfbaREh75qtD^O(e~*O$?V)IhIaoKbIlE76`4WKCqOsS7=Q|Bn!a8 z!LJYFB7xV%Q4gOGN;a&7R6gr9gDbo`oW^?M^%?;F?^3(udM8-8eKX_+~*OzWTE)`t$aXev;j~ag9 zf!;=DBQiNa22W~%5>nCF=O~NzO-YoIBm?+1wJy;nv_H)a4GN#z+=U#9EwuU>8UU(7LWC6 zvZ_Vqp?Clji$|CUP-itN zGWxc<#509K-B>GpFH>&5%S{JJ)czNHy8dJU_uwfCuoz|D&MEfy;c(SP?TC6h0(&@* zV)-~1qyXA)aPllv`HiU1E;!{rSQ~Bklt{ZuZ6Q&%V({!gWcie#0 z3dEL9-IB&bp-5Mr0##6f+7`SOExGT_3Lp{xE2)~au&v^pfc+GYOAP{3f-S?jtxn}) z9;~$$6(%KAQzFq))?8dK@Q|_xTO>t?ZJK9hQ74|B=gOAQ0G-ePEy{71+K>_Wl@;|k z7@uR~(_kTYH5Lax{>1O}^Gg`~E@twJ(d6u{J85;rNdg;GjzW@Z!^-d5*@~o7h6h*Xbku*6{HRDEfvO4zD%z!eEOGzL?zXi z%lnijN&0d(B@Ihwsxftu(yuBkBIR2$`W}N_t%&@FM8X^ngm6-ca|UF_LX8;OCe4(6 zbE$9E*xA&9BA(tz}N703w39z_th}DC0s4jEbn@$)=tj6<%hL=9R|8e5z@$}95+rx*jX`)s2j{YUDCmz7zZBe3N86fG)VW;_LSm#gS-WA#qq!% z_=FvP{WQVg%0z!1H6g-xW$ecf?V>5tXArhX^=5uwC#G&5B+X00zU?4VxWNM&K90ky z$^Nz&@c|R{6;%H$`)%>$FzeUkNwF#icNg|QsEu~-=bk+Z2OQMMxSv6t(tGu{i~IRz zf8#jXwEtheK;`%OnG0ja*f-Kf(tZlDt6>jF)g$%!N+P&uCLLP&jT@tHqLv3p!ag5A zz0ofW$gsc83^6ZM_BjaLPfv!TNYOPC4pL}$Z@ZOZ5j?Z8Z!nawUl^36-%_9?`}Qv- z)C(-1&P7P@Ob1Wqc&nTex$q;UuKh@`ozxd9K_f3UCBE1l@c54Nf|~ghrN*J6a3NRTq4T#6*%#h%aSse5-~DHe&B6)1LZN zt7CFzK~?#s1zrmic>`E&M3n*T=F}4m*fx@nCOlS*?XzN#pXXS3G)JlOZZdX#c5a*G zX3&=5-a5ZI#vILqq!BfOHo1>p9R-$cLwK!?vEhdb%e;AVmq7L=|0v($*LFC@>mSU+mEVK_jd0 zUjXSjK-EAYY5>G>yd>aeWs=hO@uGHS#q)Pa@W6CX`nn*mUHF}AweeL^QQw0QijQBZ z9qj)Re3e0u4DAkF18@J5zlAUEZGI~z&8&+k(B?; zD2wk+DAYJrW0A%Zjolh|Ys~DBd|4XDXdJ82p)p%yuEud1^EDP|EYVo1u}tFvjTIW* z8Uq^R8k;q?YFw^yrN-47*J<3Kag)XljaxK!YTT-^>RGApPL1Q97d~HOfyP3O#TplA zEZ10}u}Y&`W24698arPT`&}BhYV6jyO=FM7?HYSE_G#RsF|%9jIW!h%EYw(}u~=h? z#!`)YG-kancE)MU*I1x&mBtMkJ2ZA`?9!P3hS(|ASfQ~>qg!LA#%&s#5ued``O|7* zi)ED(JMcavB3?)`mAD7LB}~MNFCxU^!PZJ5er&Lac<~Ucg*YnBT1z}F&00akOV!GV z52snJ#I7{UN6a2-l@Rd*o2!Ur!>l+FFUa0{Ic&Aw-BNhf5X(IQzjufq`Sk(UUp>SM z04@BuWCXYZ7zY;Ng_4cH4Xu)|_gay!{<+9|mWezIKRn*nX0^D1v{Uj6u`})lN#FWQ zEx$?Rn}GDA9Z3CMD@49(rP!~)5A^b98XJN1r{XS=Z~G0)8QfBVA4+Hb0{oae{olS> z_>~<@AJS6(tng`D$hWod#iR5u9as$g))&YRZOOzB2Gh|&2hvME6Mj8@ymc)2(l3NxfFIj!Ya8AY|3=DLj~AiR&gMP% z@kr<`_+I$!dzn6{Ww*k?E%|tp2K^;cf1BddTGr!-n7i|kZwS)|x3mG7-kc`^`Bi{ahFC*X9vKGkxupYQ)Jl-F`A9F>%b|B@wKfXr8NQ<~KUel?Kt%oU=)9Jmd953mO~6+d{p9oP!&1$F^xuUpIW@gir+ zmjibIGw}n(eZUsrPV)HC&|ScFz}@6;lX|TLvR+xYr&(#h3Sc^L9dHz|510uoz{`BI zfGdDwfZ6!rar(ajNO=~1fR*xQ;8?*a<8F7OqLNN`b3@Wx(CQ1;F+9q*>*_thH%Y1uzb*0&WGm zfn)B)Tm@VU3;?$Q+1`7!e8I0pz5^IRdeMDpRvg$3Yy_6BOS77R?Z6gb*>BRUR^Vpf zGN%7F%~}rJ4rIHG{hh?8058-fPh@!k%{K!XzY4tYlYTA(l6U_>{4c^BOMN$x^1x=P zZ|jr7`<{|GjB6Ku^B<+0b|B+c__V}r9guO$d`9$rK*p^D$heK^kaDI1>1PR$<#jwK zcCw!rJIz4a=>{_XUyy#D{i5)hFA2W~$o8#zS=x6qknNj^9}i~x`hfJa1IYHx z#9G2~w|0v>_jQrC04XniQ{=0*iF_xJ{-(br=@B69ZvfJM+S`&|1Y~}zNBk=PQ24+{ z!dKwOqQ^kL8#oqNu|xXp24FVre4@(%I=}~jly3pj?z+!K-t(o%3%-(mz8uJO_ctQ% z`H#p8zLoyJ3ds2E1X8d0dy)58LzTP$KLX8hu^h;LFm>=yRo@C=F632dL)ARA8b8j? zIIIKG|Edu~l|A2aLsh+s9m2PqG*pe#aX%J*^|+xjuJMENS;*I#C-SLhYJ0$R$jhdP zd>vj+n+AR=UU*9RSp1MQ{oXQ1c5wZo%KNKw*#ra70C3? zW|3!JDZB&NiSz)F`kS{0r^h z4I*CvqA z=39oUb!ybDL)Che3*>rK3}pF@TE0T_>wsL>x_~UF7s&N))JlodiYFvr{*&To3y|#| ze@gsW38el`Ang?52iUm|mH=7a79i!3KZ<-Mkp8R#_EGQYp=y0B2XcK}3*`FP2HXSO z0;FHNwS4L`A}{EWe%=b?xaa|LTvRyz>kb`o{0lFPL~5YpOkDF zs^*j3K#uFIKMhsuV?L1O76U2o)bcSeNL+RRnZ8T&Rh?4b6~IjBr@bWQwgVZ5E+FN_ zFN>TnwPyZopz0TzF9CA=Hg-vRE0A$m38Z`-koDaJWIrDDH<4EXDQ^Ta4r_s(Ez8lE+q<2ev96-h)A2<$J0%X2sAnVZrr2aZ!Hn1B= z{azsTcL6!yroArnYSkN(&j+Nu8K~x4AlH%gz^TB^z#?ELuo&0_ECKEWavikZ9IDpS zEZ_q0*}!sO0k8sC46Fi<-8NM1Lt217@GF1;;Cf&L*a3_KOWzu*_8S$zX7F)f3vd;% z6}SPo47e4z9Jn300=N^n5@@}RYcOCYa5c~YTno$xt^;-g*8@x68ES0+x`A!LO~6gS zEx^sdZeTlbC$Ix(^0Oamy(jVrknPe4r2aB3U!~>ifXr9% zzSs`{X}=jr{pCQ~Uk%&}+yLwbwga~Tw*q^By}<3j-N0U8`gV+O;22;ZFc(Na3Vw`hc|4^^v6S05W~u7m}X7Q~W6b(jOm?{*3xk{3-fM*BdV~P~!_o|I_gj4DzKJ z!&Kas0cp1}E4^we&1gbb6uJw-;`8d3!f%(@vB)>wGvjaTk z%kbih4dAhpBn2^hwbpQm^OfVQL-O{fy}M zbx8USAk!l+3LgNbqx{^LgwF;t-LZ9;n$M?pi=4>yt`o?4e%$LKF9%Y->mOptAFp$a8_Jem%p~I=k~-k@o?){w{w{0@*P0Nxfe+NtzQpQ`;+e7!ut*xuJ#F|4jZo0ts{r4eNOvW;dhN6uJWhn z3Exp9d?%3hT1tgq4rG5^1>|-A<_m@20#xTkb3`u=q~3ZU^QSEmxdkNO>K48QNWM5I zd=ao1_1Xa}0lGuOty18qu;^`%h+Yqn_V;Lhx8}z!8E)l5e>afnEz#jtHux<-^4&nj zX`AMEXuc1a4m~SA+!_V!22zhmc|pDK`9S(xalP>6z+HHW?@HiqV3(G60;xB4h3Jg| z_9DF(xC2;zv&a_!Det;P!(ccB6y`6s%KJ%5~)>`Q00@ncxf$M=Kzzx8bS4BT}tH`at zio6QgM!nZ1z2Z$N*9W8>_cqb*1k#@^K=!vDAj|J~NBap}4*kL&;fc$@TkmQ=KOAng zg3tX(rK<;A^XXT#rzTPq-M2d)Hm0apQwKOSza2KE72U-v)6&o&_Io4-TK zEB-{}D}nU0uTS*%0Lic0C3f}zX=l{GMSm=icE$l2x2Zttr+qE*Vj%e*Amfnvjo5Jj zX(u1ZxGezEP9uAW5$PHp5==!Qg>aM3lc0NV{#Lg&%vk@I6OJ{yty@^ox!Zz64kf zz6VHo<541C2Bdta=JSshJFO>3I+5jU&XIEZfCbPWbyB)j2wV?j`mQrXUQj6VGT>CC zZvrxX>J*W$1(IJmRrK3{w6k@Zl(!8?`-RiBf53e3Q_q%sMdzej(6 zzx0`sZ!C~;as#uVw+X2HoRw~6f*&(SW6LR0;bZZx|uvXG{1Vx@361^&5AJVr1sqYSpelL)I7DpuCs#vVbSJa651duoqZ%rKFF#O5}||=4$~m-_C2qepjo=yMbGfZ|rsHRwwNN zyMW7pTY(inmwfJJBJT!Lo_~G1)eYPNq}@$7iQVl$+F5tA@S|=K`yD{a3vU%W8-UE$ zwMO)I13QqOy;k_;z;^Jt_lkTukbd?7nXmUg9S`7U$XBit{kHXz-u>!?{()JWG5&#lK(@nfAm!;#h`r3GL_QWsJ3Tz0Zog^f{5Q2Gag6 zAng=CFZSDkYr*#dSx(j#smB5!_1l1y@6>X~pTvGUu!Z?vl=MCz>tEF==__AOw*pA- z>(c(dD|(qg#?kt_@M*v@=#>L0A9d&ms}%emAmvqABb2;gyvRF%^k*xO`WuQySVhn) zoFRN}^$2S!?ExuY38Y@TOXOWZ%C`fHf%)zcRtc~XNV{D?%6qiD4@iHCJ>t(Y-~#Zg zfaSoQzzSfdcZ5|1%m=!GML-{L1CVjp1B?KRYeracUQ5 z=JSJMKLVt@8_0Y+Ba)9uf0iv7q1vGx$n>@G5o$lY3&?tnsgwFn1+re{K*qTb$a?Jo zvK<}uVwb4O0Wxmo4PvLfQPO=t73ZarZ_MQq=Vl<|xf)1&J2XG4d4!bM zV96DtzZ^*Y6~JuB)2|eHF3jo8@)q@UT>ihePWb_#wW{8Zprr0>@Jm>Wh|W5DMEDK7-F zoFbqaC%{a|BfuW{R@@}|Yk}muZx(yRG~{1<>j*0yn7MLJTlCw3)StRq^jd+m(+5=YUx}So zAmi2zWPNw85qZZwI-YAKo+UuWv+dWS*9T;KZMsk79qUF|1>k#t^e1b*$g6;qXWu`< zDg?Fyr!pU~2-x_b^v_Wnq@VWyOCfK5NXj9WfY1NE*e%;A`o(P`-v*?<`wybu2&8@| zkn+NZWxR}fM8?ZhAje%~lgtC1K>C~hxX9g`MZO7G1-<4cWc;>1Df7e1r=&l$0o_b* z7kT3!rTi^Vi+m5zhx9Ga$b6F3A@fP*vr^uM=kz=SWZVm%7kz7s@DU*Gm;FijW+44+ z?i9WSScdefm!w{sfV9&Aq`XJ-y+Fnx^JS5b{j=!r1~Lw-yCffx_Ba1U^xIz%{l-@% zok%;qKP2#*3sO)!(elL*oQtsz|wcb&+_*~p7p-S7i>rSA-xya3XFUp zb^}0FufsD`9OFl12!B$B@~7y>!Y?b3^p4Yn-&rjDn3=-&0*hg{`8<)g0@=^gW(jWr z+5fi#Deou|{nFVPs^6xaFM8vE$_|kIxl7BfQjxC%(%&%`ik+!I>gUc8`GUD3ZvnDj ztN^myZq084ay;(Q{HU@FHBQTbv|Dvah8pkIr6OMqq<&$AwgaU9jf+G->oSpV0#aUH zDgNvN(*K^S4AoCMoEfTruB*;a{iMj1q55HgTkK@}L_WqZ@=ZYc-v#76+@twzK-Sk$ zEB4a^l3om?|Jh5#pJh?WzX?eHOJX8#jfd)hVE#OOmt-xhK`qQc9o3504TdktM2}u2dYqkD$qQ4nPc{h-Dc5C?_Eg$=H zk(UE$e>0H!Qc=;V zd=-%Pw*YC!x=HxuK*pzPg~TWAW|5}@Deuz!HX!4?=@v=v0@6Rg7E|EKc>{kVM3qKWD0DhI`iL|>(^E-eJq?fH0 z{Z&AwkNK7GS-??9-wDhF#@C2^{XHVjUn~3$U^>#P?v?!8fb_5Ncaq))q}}bnzEiMo zSTDSFe}=UK{B|Jafd@pNNcl=2<(Us=SUZ7Rfwa3|gUGi6$+tZs`uUGZ{yO6$3^WDHs@XcMKSN@j_YYX^IK(=r0D;ZV?_?1BNy+GP4crC-)1o`f6 zk>|fI^2|4dcL3KSeJyYuFm0R2vw@T^15&pz80t>-!22$SmFOjbW(x0-gGt_m} zdSEWnTfY%`x+Xke5$-yGu0UXDCnIo-y z;5uLdFn);G={i*83l1Bp>JtG{e~ae3fh@NuOXMA+MStAkQeFX&`mLH@4y1m;5n{i5 zjM!}ivYsoB8);=Bz4>_ITe2m6HBhCGAF1N73Ap=Y%+Glvtv$f?K&Ee+DDuonQvN95 zE~HNdQojXA{k1>~qdumrdZSPCpYNBnIBQoamGdC9rrM=?;?X0qN&sKz#ZDqn&h%|PB~Y{T>PhyQJewTXyl+cy)t2V3pLF9um1#F8P_%)19& z-PZPgE3)*XXDIozN-p#@iauA#UrdpkI;pg2$E3w(s{a2*K9K!XxmlN1RjX^Qz`eWuzrOmtp(PmlF2c9wNC3I@uA5xc8e@y>Pn`XJD z+@uG}Yx+NszM1!j(5ZHQNS~MbrshqJ!$3L%$y4i?8h^Zi;Pss8*==Z(}cWtR2-n?7(IQ`^Gy$INTS!PFUuHuD}>N5<(uI;LD~9$4Sh zF=?}Jm~}~&AGjZ;>YM#U@*cSEsbfm&_`~I?{+oUBhuAUa1hc+>Hr7Xz{vmaga?SXd z@i*&e`ftX{%q#kx#u|OS;hX7CDt%y^=6FnvmGWVbQFl!%e=IX@hEAK%W*vn#%Q9&x zccA!~c~f;HCQ5FZW5JA-&;!MDAe&OIX-9k(ePf;+oZ8lASwg3_i&-zB2Z;Y*qfZVN zJF>SKr1mx@ZI(NbO;ettKiI6}f#WH5Qe!JN%|2kpFqJm-P1>}d>T|071LsXGH`V@u zb$)n1Ij|imSH|ywefS|dQkLnPio@Uo$H$D7$~)NT&uymv1NktJ+|2vGq0O-%c1)km z{xDDsQ}utiKmVJ&1NrmAG{QuU=RQf;QL5oTVKPAymB z^F!nV>6mSr>c8om*`}#&WZFrsquGuJwrRH4Kt8AX`NMrn_49w4*X+aQ*pZkFznQJL*kHH_f#Lm&OmW6+fM5BL*_IYJE?iapYzn2&!CMw z^D+G~<7URw^v#r~>YMdVt(WMh`fvJ>5`Vp~Gs`vgm3)wtn`+;jdsE9z)&Jo-;=@4u z2kBR-aZB|xrHuxeYnkbf$j!Q^(gVfZELUi=T(d3%*%Y~HKXt7%^Ga-`EYrTo&2};8 zMv15Cn`y_4=Ri6G>6<>7c~j|UjB7@zuW8ft-;7BrZO&2Bj;a1jS*HC|J7ym<+s@2u z#?y?s*x8X)}i6|3LOl-$cjMABavVYtZ|uja0dVq+UXcT=Y|8 zF7_p!W?7F%{rR?n08DZk(>R;jIH=-<~8+AdZ4n*I;PU*9AeVud}7k(++y~( zRGrkaOdXR>?N?@92BIZ~(x#%5+AgW_G0RP*&2};UH|?0^nsloEK>hZIj5X7@)Uho7 zi2WbpgBhPxTFOeTqx3zo|Alc@X8LwuTJoB9gf?Sk>X^O_R4+5H8Ha)7srIE0r^-|3 z9A&fLoxfwU{A+nw^gucyH`ghnP5YfqnPb`L!=^v_oI#aqjti6CkB;e^87ngmsd8zr zRJj>LQ^%C2(#mJEE+(zdF3eb&KAUkieKz$?`46W}|Np0SY8}mT#b;?pk*C^E)e)Pi zbvOD$o6u(6P5psrvn(^71IbN0rj98eh&Jt;F)?Y$n>sF1t)gd>5D(9^&LoV+A-^8 z`ev4!N}KhP_A>K|P3ebbS*do+wOHE7Jex}Of1sETRCjYMr1nqKH`C|Tx`=&K->gfj zp9A%`fv&T}zFDqWR;vBfdBeWdv|BU3JPYJ5aKkpCi2wPVUn`oQ+hvIe44C=?!Q_0RQstu9Jnk~ z-=s}{QpcUx97x~HYwDP^#L#S0vu~u<(VU}9J5p8+Ue@iy+XB_=xAA-K_&xt{C=WjY zpBHpStio!?)@T3rYS%WaGCwabZ^D_EEG<97KmYtW?nx743$3z?i>(QGdx(`2<9}9; zJJ0^#y7aP3r#J(ly5N*Wm5Z!Xa{>XpTm-M*3_IOcE?$)244iD01?r;C!1(jR(O_(f zH36@5m>@6V9ffC=>q30j9NyDhrzq+qpckOou{7d-)3C34CMa(+a za#-c+pN<{-0bF4CRJ;%~zD)y}$MfC3js>Oj9eAHWI8g72(Pg);3tq#l^ui&jymhWK z6bi>3?f_qw;b`!ojt;bS4c@No@W&i{D+UV5v-aLMxnS>Jex7E{n1Al<*;eVfl`~39 zt+KiEXO~-EC*HGwu2LO>Q}rG-BJ!5lW7IkONt)r@G{31~vR`5pOV>TMO_y7&}}FECJVXpdUstp%4@#WNPnym0;nEMGeP z6h|dq^wEIWdLp@ai%KjUot6_pQ9;#qh%R4Sz|h+-Gr*e>?4E+xDa4(PxP z>F^ofCOJLZ%BgnbcpNzk%n_UC4~=)a-O)T}G~$%ONGI{$2@Gu2lo^(KhZnnPP7pPl zkke$TagA4gdaTmK0AS?VD$W4kEzzV#3OeTereJkApi;bey|SAjv`)br8dRr?SyLPm zDYX2OIzO7<;S4#P(VDs-hB9KBNQZf6u-ac!7p_AaSW|NHCY4*`PjxsP`Q`bIR!;sY zoNQ#2?x&~W4HW(Sz|X004!iWyDaI&A*Y~>}_Df*mj@`| zX3je&Ibn)>GHc{PO2Fk%hj}=AXL2=-HaM;E`Q@kPm!rpz&u=_6zY*&pDuh=AV@{R{7|k-Nn*FnE z-ptauGm5Qq^A;{RZ+xLz&)j(FcAV?k>^bKpUVntWfPM|x2uyO*I9*_>VAvse9_n!f;bCT&nuS1yai)YS1S1m-C63}RR(G-8xT*|Q%4tNk$ zjP;zxDf{~$KVGEX>{2rK!kL(4Lu`Q>nq!X~&viKAk1HHii?`c@b;FGpCi^kna3Mtb zEsdCJ5X?(2%UFcZj78R@>B@&t9p1W{9G)@=v3GDstT{90Sf^qx_j|2yNG4_yux&-H z=YP@FiofO0KLC87YdM2H7e$m|Q-aB_*b{R_{SnLxcxQLN?HHRu8bG}yDPzH0$9c2M zk&{IxI@D#A zA-o=ELLg8d9FIm_j7FV+7|O~t0k7eluu$#H#?LI7an9^KS0s{HdUV68wU|@niDy}% z<_Sk83^5K0^%kjFW-%txpJ`7ph+<$$;_^LD>o2J>o6@~ zL_6@(E37x;*{cE$yaP6hy^Kt;*>mU3o;7=pZhmY-v037b9B?je!i1NYf20R6kA<&o zt7Uzfl~Xb$=R#Gmt{>+jbQG>%8P<4b3_Y#U%AT0bO&&aj&md1vkjcI?r*ytrBz|P& zoSIjFHA;dyH{>}F8y0Lp+<41xw5C2^mBte0a2a(-Q`(y#y}!w@)cd-y@=wXo6+7UT z)R7y)n@VL4&550CVXeoa6N&}!qCG4-|1XP8axBPf#kXI@ELFu^4wjQ8c2jcZTUf4~ zbpgCyN|q<964T#>3+Iq0~Z>2gR8NT zk2_QGlU&ojc2}o=^d}Maxv4!J&kdC$u%Iq z5#z2ATTiTD4w!K{aeOk|&pvn7`4?iZcWK3<%POm!YQx9<9=3W-OD}IOv8Lv@rz5zSLAg6uvm|=K!jQ8X z8!ucmscCqBSEmR&`n=lu%xzv zxU3yR2BUl>0NPd*zgIby{76fZNlj~z!@?U>V9PF>c?azsws-j6^t~gLq4vS;gIOE; zx))Z(I;sBuvih?r94**+r8H5I`lhI(Hby!~eveq9L^&y9(qgWap%`6CB86K2zyC?( z&HB+5@#mt2(XwE9@##w@gf5ytKXOK7PO$tc_B@ZZ(DF|#J|q9kdapBB z7w`uIvs_E^mn@mQu)c1gGgd#-2@SULe*O(&qb^%~cJ^!O_g$O!iz5#!jzxx?P&i`U z)`|C=yz1M$^y>q2`aV8BcV@$3TMJ+HE_gaH>XU-UPu|e+Zdvn7kG|e|_7NjyJ^I$` zhu?DacTLy-cI3prZg}+lRqsUqJK^#7kJ|drjUQ~j{mPRT&s@>`d?2|8m2U&(9zIvt!mR zxct$dUh>?Tujj8mtm>;%PHg+@pLSkbl{=-z6zO%Wlr|HIn#!RWY;h;5v>AyI( z?b(LWZ~pj#iGO^1!p6p`imHYG%31jF$_E!dP`#?;;-ah_uEl4*yzsHi2Tv)e=-Twk zjv0q+yl%n0w|)|BJ|ye&M^0%EyB7WF{!_o6I3|Ah^om!Gz4hmJPk!=^jTNtE2dx3xc6?3=J)Tkh`;zWu(*KY1`P@{3UKSxXNq+j-WLt4{gu%40lV z-djIt2|zUiLu%;$g8dd1V1-Eh{ny72ax> zsQQ7|<_v2%`}FAh-yc0~@ZINRzq$2{l0!4LzV_+C@7&$-UKdPyhG)$?px%J=pum1$jqay#CJ}2gf^S zo>@8aZ|{8f+7o~5Xj+&tsp_Z~tb5;RijFJ#KIfjAuKSMrZucj@xpDUiliqp%@Vcp^ zYn}h?Jhp7YgY$+y^zEb5CRP3X#Q&CDK5_of-nXVdb#cX-waddV=Uw#kqu%i!@&4EA z&w2g%y;tVGFsWukY~kGHZ9l%$-&y|hQ;Y7~`@z~nc5YrZ?c2=F*Bt-fz@vN4zx1`g zj&JInf0t+G?#J3^-}tu`cYJrj+h4u=!Q?Cdyl%!}-&OB;q4$-yUv_*ksI=kq@7G+p zb<=TQo_gWi@7%WP<`utRa^pjvu6_O1+O2=RaqIS+7dKA0bn1&nB2Hr&>d73;dSA?v8) zJ&uPyzV6#c8~%OHf)`&s?N{H8+W42x)?PpT^4E@@@y6^ckNy4lgL16P9$Dge%JQAC zar@?r?>^m~ee4q>b{=!HYvBXGegCJ|pZjd%Rr4=->(`fF;rP=Ne;-^wG_Sk=(_($H*g1zpGuKm-Z@on?(z3HC&@0|5y`r_f&hCYcb zy`cEyM;^@^QTNV?Cmp$^?YE(au6X})dT(TLZR=@Q`L6qL?YyD$ zS3iEn@UjQSwho_O@%#6#>MOqD#?4%r4s?ChIyY~8v$|5Etjgo8dk>)xkYCQW=~`k2BS7R*V1A~<)*kIo4{la}%I zibFS#YihnDJKENC(Z=gHwjbAU*L#`ix4w1g?{9b{@9bB5j*A|;FndXD-pB7ARrd40 zwB9pjR{7E+-cFCV-*r#>@+&U*WJ=%CzHg7Y_8srJBV2!b-o0hkn6Y1^zcqT&8E2jQ z?%_{9mwoPq_a6F_EuK^G_R6w%d#4<>X7~F`7S5TN`9{NrtM5MYkjgJFIxVgC#rxho zYs?{2NBk!Hr~mx;rmw#J?#nyIJ@e$6(A3e#MrM9=-5t&6rP92`k7TZX@}SW#G=BQw%K20G{ASnqjAMT{ z=bNvloqEja2cLB2kyTC49hv^S6X!j4=waW4zJKU5?{Uw6dfUwQkFM#w{I`ESt!PhWd{xo7aI2Ymd@@_4QBIO#Ei^@f}BwTyXcD zYb$?#<(=Q$w*HD~)pspBV|T;&kM5|v^`D1aeW>s7lb4-#&>P2n{NlauzjyncPnI|S zruE+!yP_u?z4!TR?!NuJ=Z^X;%o~k3O~di(}53 z>lu3dUuLXnI(FNO7hZea;#n8;mQ`Op?W=pMUp?=nUtYTQnq_#8ZBGPWh+*UN~y{u7{3sM$SI*mYvbb zGp@Y#mtXc(zVXgkCj@?a#(T|G@z4MKyRWV-3QcNwrL|@FZ`YLVK62r`A3x z7o6;0`Hf@rjBmOJ*XPylYWG>c?p~6e-o1F2WAfwOkDt5IiPx8Wede*}&CI?dKKJ;n zqGdNcbIhLGF5GzYqqo2I?yktp32VK5Bif(2_KrUe*&5t%RobgOWs`9agguxU*7xUUw?o0+~p7b ze8;fu%O8F0ja^HdkA3BVzRELRyYs%rSI#-5w&=7q-#ebT_Fr#3_~x0P-!SLUv%dNE zrDHA{6TD!e_r8-JzT=}$(JL9`M9{GOThKq`3{`j_% zC#OuAzwqb3yvFhP{k7j;aNF%$+iTV@EWc~?y{?n*@0$G12X|g{{wcrxPtL@%$6mSX zvgU~=uNb^z_kSZ3ew7X7`Rm zL6cAN`~36$yyMNhd1dC!%$wQ0-MwYwxCKSF-<#r*aKroca^LoYi=TNvd+VGqw-FIf zU!Axgx1zK0ZKn|vYQ0~)zw*>pFItCpbWgJSE%-h*u33}!SuaDb6lE^dOsrR8NA>lhe$SBE!ya91KJX{MrcH*JJx|QLUDM+0@AvY%^=~N*xx;V zsq@?S8QMNSM)X=e{>sFMdv<@=e>Z}&>|8uP{q@tVVS_`uhd76H81w#YnpfbTEyj$_P^JPD%Dz*H%{LPiC3Kx4mep%GBzTRW5Y`ylOU{h}4soTq#l#@DDPfz;on1bIO*zA;A z>&(3(wWef!(|UZ+pn}~(_HQn@I@Eh;nWvfKU!||*L#VJ3*Xmzda?dmTx4>$%ukSN7 zD!p$X`^WJeH@57Yv1~;4qk)yorwZLI5I0dZylYGMgr?Pp&l=t<;m-J<7k5wCBclP0xkvO*VJYxMcu z3wJJ9Jk2wsRAR9`BP;q|9Cm;@+`qzACx9#@l7z=zaU$p7JET z>L263*|u*|`k#K)+pcx4ylUl{=Zm(7rjO~~@8Lqa)UVSoiV@r2EU)Q(t68(}#?M*P z^OyVPjOFI%71 z)qmyV)NEbG+!5c7P+e?w`mDF_h~cMeEcIHr?fLqIO&5i3uO%}IV#rKYufe8}`J2| zAC71$UVpt?44%qYNKu_TvE$CB{v-P>zuw+zZr08{pjF1nO*>bg`YmC{*oe136z@H? zV_b_p;dd9FThgPRvBIEzKg5S?|D0N+XcAXKyg#6HLROCvh9BLI2M3?Hy-C}z!?{Ld ze=Mc)c1gQcsoDHFo0e`X*eNyk)ad3xaihQ21az%bUX@iVq1J|wQlZy^zP|4>q}h&L zHQG12)pTNN*CD6$TaPxVcXYc~>c}G%Gygn$c;|jk)#<})A1v!UVMh6HcIhr0zj5VE z;1&0bI{TYC_h~%y&~X1ZQ*}NYGPjzSyl%GRRajBWZx?_AbV!9 z8l`404E_4ow~rQWIc1Jmz3=Sm?Ztb#?%m?iufUy1`kcDWO6TK6V}H+VeszB7-Kq1t zMLdKHkaN72-kfzl)2C7D`O41+OlqCvUSUe36{Q&yRwsLJC|kNfrMmAAo|`^)LDhGEaxbdAsy48}xBdaE zzjiu#f(;w>blv7h$-=zGrDIPdo|`x;Z0?OSoqrxxVFul;BztJUuEU4!xtAQ;px~KX z?Y|lJeE)mH+M`sl)nT{y)Y$Roxm^JTqKXfw>g2L<`03+Yd=?Enncm9NaQ;qk>5yID z2R>cjvBn}z;P!#1I=nu(?8iYNA;}u;hvVZePkk^s?akVuBiFv^ChhA~Zp!3$PIvq5 zSG~FQ=wejXg&C&3b_~dC{nbT+Os%Z<||#MY`O9kWQB^ADtl?FRIOIMM$KB@wd?rQ^|jsh zC34&K3pEgkBmqLLpp9X>TxJufK>^VQ8Xyo!0)$3E8$$yE{MZC=106^xD-aAhB1wP< zR?x->1_6F-0&pHkC@Wxr9FZhISQNA|EFi#-O<(~9Lfma;(D^C${*p$BA8{M36nUa^i;!0_gpxIkrww z8X1+0(f|QyF9JnAG8;rt0z@E#_z!YY2^9<(ppsF+NC)Q>fg&H74I(H3B9KY=2RW&P zvOor?WRwMIOHL6e@{!pff)bzusDK7kG6-;t^_(J5{9!>?iX9Ddf~(ZPwu3WJvI`Vd5a_5T>-_sxsoHQ;v62n@R$1OK=hmcr zV^gIfOWVno$rU*%Ly6;qKH@ug-2mNXv281bJQz5^4!NulOlg47pwt3{1695+0^uz^F4Or5R(g0fnat+8eAlE=oH6Yi3Tmy0fP#viR=LNx`91O)^ zC8{d}yLu;rO2Z;d5A;IWDCdp;;Gjd{O)3RCF zavR^4+xWJe)T@jenFvfU`s}$(=IDJQ|7X@%u`-Wx4h3@Tx8+}y|1)cBUyfiUhXOfy zwB=uv|1)cBUxNvNMoBbC0FqoLA69c=p zVs0-hgkAgNvaLDF2`EGx*vkqbA5lR1ahVv{wH0%F`D5*m%eLky7oZSrU@t2KIU?DZ zklV||z$W<5tkDWrv27*iDk}ozoXM_eFQf85w8k26728&FuCgMqf9w3XjLQGe8f(xf zh%|ymK^vg~2I41WVA+|3&Q;dP!p~cyMX-WMBLpjGBZ7f}_(>U9b|#^7m4jvB=dIDg zq9D=;76omD1sI5*l!0Yu5;|AeA`A1i9*qhB7Fbw7JAtCpXnB#5mYhI_eAENl$q8tz z41_2mM`+;T`3(hj1W^ts>w<{rLK)ZyWF@06@bQndONvk!4Twx40GS17EP_7PLwtZD zsT+BqPwE9d!UG+9{U~!nWwZkXAQ@>JX$zd=x&>462WilN|Gx^UnsN(<^Wk8iAx9lR zpvlKU2l%K9wjhy#P)c23PwJCpsEZV-i_}jnP#4=HZ4+yyJ=8@$e27nAC+h;6$j~>W z6ou%avLU_HQbqg+%VRmmrgE_iaUqFhklg>2Zk$q@$G?C|&ie8L0T zh1kj)p&rXfgsH2NXUQpddIX$N_MWk&n8R%tI}rgJql? zEX#6*CUq)x*=f)LO^3Sd^*HdbHUfl)HIt78c+eL3klSBNu@-@bfe$tsTuu0`i|~K}d`JP12Q;aJ0$rqW)hXZ7NOX`o zpnhTlI-rMqw6W{Tw!{algS3Z@p$_WWc*IYv1AWC@u1jeT*?@K#@k92Hu;q5`K10AEYkZH)QfH8GTc9NWF?rSRb}W{FeQa{j}9d+K_!$bcs(ulXkIfq+$5wD^J87 zjhhphHsoorRTw%AHcL6V5t$p%paV3S91Uuq3`=N3teq&d0R#@}5+#x#FGrWu3+>A} z>Ovi0hp5nUJH!`aN9<)EDQtsSfIhZE+Qb@&Ew)QqMj6zNextsOq)#A6nGC@c1%Fg< zu$(~(Bp{~9f}DH^0}Duy1mplP88}aP>$^w|A03 z3M3$=L}g(zo`0ttvIvLGiP!oUI&Bmp@9%mYpZUiTy=$dM)A1RxYnm%>0T zZ5I)F~Z(dwTSzuLYgZ00~UOO9WR{1 z&sQ7@P4(e@0^1E47!}!{ZJ5-qV|QUlY!`njj~W81D?jhRpDqjTm4`4fWh;7uQZN0dD9fZbR8QPqn>(|dex?5N$zli=F8iomt;eR;Qo7`q*yCwrp zzf0e|>gwi);y$+NGtXz{m7{}ZW%R80{rt7AmsR4J;=9NBSKmLv!<5uxcfyAaH!R;3 zytXju*}}bhqZZyB-*NYoLyPwoJF(28dR*OyzH7%Ob-A`CO}aAi%|J%qYJSWi|J6ro zIj0>?G4FX%IP*})ncrpT!;cm0Q@eJ_aqe4d1y-rx2Tz4|FHt1WU`%w6+~MADpx@Db zqsG-un6>P|p5`ma<0p>B^W1nYUlznRyJo$`3mDDa?!d$7&FOEK4y%YF?YX> z#n`#7F&N?etQ*Fms(zcD7>hIKWu3uztiRh>5M#0|^W-ANrFu=I31hR4 zW&nrrx$W$Z1{kA9&YatZaccO!wH{-2!J}z6F>Hx)-l(8=~WJI8-KR)qs)^RM_l=B@Y&cuQgkC;ZI3IHu(r?B zg5S@UZWXHQ2fPXesyI zOIma*qF!{MPo=h7+O#^p`@+F)bw);JFRGnl{{8L6X5Ysh?^TBH*kww&cF|;a{Xd+@ z_n{n9XN{BXBOg;fo2o)w(m(fcyZq@Vx4v73!{?yeU-~%K;C9jBbI9#4eQfr4F#R9= zZv_5tkAMogk1N4;ue|`!1p!?F@dUIGpgSP^XH&`pun1sLz+!;-b-)v_IA94tyv11x zury#9z_Nhl0Lueb0IUdD2@rm_M|lBi0IL921*`^G9k2#qO~6`!-hj0M>j1*PnTB_G zQ1CJp_;;~@^#JPw!dVE_0I(roBftPaIDMp=00sg!1#AY`9IypoOTbouL4d)4p@3n4 z;eZi-lRb_dh~>Huj#Js<;U05k%c09il|kOwpa3V`|T|NpJ_|F{3c zB>WF@jX-fv`!nnE)9Rk^xVoq9d38^CU|p@Qj~8+Q;A)?`QGi4->_V_cYXK-uJHn#V!c`7enQ`_IX25;j~!|OP(uW)@crylenBBw|wPtVAY zIG+|hJ$v_2tNTXf(}#YZp8Y>H%7fz>9b@Ol+Nsj&$TrEjd(_XqDRHCxh7TbkBO7aG?ZTw_3eEz*|*h3S|g#4^!TtK!G64_h?87YEz(P zN=rhK6bmUffs)Ik=lH%9t2HQy_-E7jQofWpXPZx}^)S4cr^ai zxyKHk<4aZ$>@@_Teu}l2Rh5qXATQaz7|_m&HB^owfRqmq2*|gPk&zQw^u+q4m|fp$ zeIaxA086G)DC-?U1t}ifC)PLOFuw8xlv}Vp7$a1GbS-8`Tdg;HUGq9!%<6RwBGVC^ zEHGoacw@8cwW4=fUu43`4KXF_!_g)$I-RIuj|h?CkjfI0dJXmrB#;11E*}B=0k_cSIw15@c^KH~-_3(mzMGee?)3 z$m#_-T<9RBl&8~4Zcnfo3v3V$0LvW#zh|Rt4;FyJOpa5Tsz+8`_5)HP@#u;=Ub6JH z6-;r(rO0K7pCByb+9r?!*JuK*))v&rkt1Qe2@i~9p*?jab=ZqG0#{2)7Ae_cTNVY` zG)PYNixes=)A~N8*VRu%as>(-2Hd8gxJdesBj*S=ZA!KrM%(HPvrCR0={7EQG&{Q@ zT3VlwLD&lYhc)Z0xszfsS-WJ zFB+w)i}3jmNUJBT12}Oj+^rA(Y=iX=yOh2r0W@Ny)o7v6qjxc>Eldng94( ztvg(;W6&P!M))b8KFQLc6i`K%ZTcymioPNT&rwR`_u+Vwdr)POO^znHc3rs+NS!H* zgQ}fWHk538V8dOwDDmXU)NJ)V=ZGjIsx_}?heHl`4_^c_Lj)Qy;>ZS~{ z`) z)mCTNu`-n-QtnC|+WA;uYu`FSp)5Hy;Pr63Ja6I)aT?N)+?I?lFa-<8U%9C#&bWwn~(vfK!%9bpzt*0 z04Y}Mo2&L++R-zaCQ4qGpxj=6s`oL6*ugm$$@56O6a$-Rx?Aq(u+@j-F!#f2_p!gt zT|4*n>z>wk)?$AE+T?N$O4t&?+!FG#W_Y!CeBV$iEIKr{KNSxDk06>NZ&jq=HJR~! zDfvD9PRaizV6d!OT>a4(akaW?HMQ-R<7)WnxTj}5`P-xIX&cYn zU*#N1fe!S)NH$f=KY#yEstnG@sI^+%Mh)M?)$jszb!WAjyaQeB_!4wcjc>+8^N^3~ z2>q!1*NXUcKH|&Y*#BB(M5}imy7F9TJvNSL#Sc)c;nkjMc#S*ECcMdg z*l@zn9af)~Y=N9#jus!_3m_b6vHpO^BtUo0)?12w19&~=8rkG-Xx?ZI4zfY zaIO*-vU42Huz(tK1^;;EB)5a8uBJyp5`m5TS(95x#lT$8MWI=~6oE ztqvn%DM;n?HQlBQIo_wWK_(15)au{^NbDnG)B0QnEk$$(!8 z^8@tJTC6gNRoQzp9qO&@5L1rvr>Mv+VXb&Hg)9Z>{J^2!T%=7u7n2lZ363d1I?PcH zSc-iVs2Bw)P<1$3Z!U5Tztob6(ApU6CYY+|=X0yIdAA2n-{mGT%FMIy^T zPA%XAqAewIn7Q=n$Reej1o|y6K7};_PN1>DOa;lrgk+^CVhYz8){R)1lx&jE=Q+DJ z8e=ei3Yo}Ehk5u^l*366%Es?CwgCI!tjUq#aaJBf2q*<^1yK+1!leu$VCn$2c*$7{ zWho8#;APWy&{fEGS>%}*y8v&9eg-ViS4Xj>D3t8(e7tnr9FA^|Ryj19e+t>KseT6F zVwnPE<-m|jH}kNvz-1q};Gh2#-~&c7SZu^!$SrvUv1~?Ght>I`%JLik+!qq^Z8-g7 ziEY}g*7|UMn1Anb?7*))h9^hvNRd+5DWC2zA8F)SeEP}0 zWLL9%v{l5re!OVxst$y0AieB7g_GCt*>&eqIZPOS$i+o3ltB%+(^bSA zic0h8Kq{9K;Z}>Yd}ZyU#?U%%fY|WLxn(2kILe3yIG_;`JXq104s5k)r~10(1s+0W1Jm5YP>< z5THAtH(+hRUV!A`MaVGtSO{2rP7y(+fg}@pu%{A5-G>;22MC?210Qs-SCCRF3{Mz& zRZ}RJg&dB*)CCC0{UAFBcna_&pbqdD;BmmyU=s@YdBC$kuYf!Q^3jlA1iS+H3*c4Y z!4ZUP6C0rJrGQ7QRwr+80>JJ^Luj)m$VLMC)S*;8Aos2VS5AR$33PkV?Fa^5z^e#Y z8PE@G8UQv13;@)vf(L5={ekv|yd7XYpnU;f*QTh}P<{`&50vWw-UqrZ@ah2WfO0Ls zp8;zCmI7Y!RWMfaK}n^7wqYFY{=&mKDtJIsRTHoVU@#zt$Q7`?3U~`J9dIw$z?@L~ zfQAsE&H^3-gs%V;+=Zjk0C97i0($V}g-V6|4UA9uc>CC>G$6;eX=nrY!DpZcpo0CE zIs<%%xuntyQ>qYLGax@Rk0NbDTTTTg%e>&1R%*$8E46PDMO}q_7U0)_ivSk`t^`B~ z1br94dqwSj5dJ}Euk~%KQ>E!7)>POJ1b9eEh5hLQbOfLUFdlFuU?Grg0Xw+DM_s*R zRV@N~2A~CSH2SsMo!&@NYqW8*nf15mN8SW2cOnlgCcNwvC}`r(jj3I#a6bWUK13 zV8wN%+5o)GfO`P{0K5qJ2oT497vx8h<+|(lwNfG3a{U^p7Um2qS9wEnS)qf0PFH9R z&^8@Mx+?H*z~R-yzsTUCDa9=6QJ&+APbx9F#I%yrOU)?#b&1(!=9HaVZeICsDl96u zxZ;vZODlitwZ`iQuML`wnypp#R6SVjNVVhDu2+9v?avyoYifL(`L(Rqs(z4vuz%YI z?HaakL4BC>qb863YTT6ZQ{xv8 zSUhmape2Ks4p}-hby(`~Wh0jTw0!%D9V>RO+`Vf5@)Ikse0%lVU;F*`-J|b*kA1N` zYxSGeZ`ZtAlfANLTAd9(8?`@;bqm`bz9V91ZH*$Z<{t>6Ao|%32 z`*Z8gufLF%vEkyzO9w6={N>OU`R$zWucj0zQm|lA8;x)5ELg%;Dp;^&sT}&hOO-AI z?Unui&}KQTv%FGq1&0PII?(?fS?S-a>hIQ6Id214mH$ns?xTQ;GO`Nf(SBLrDT=N> zKBA)7g=Gu))44y@=^=y>_z_?0@1!kKMC+`js>f1V|C+ISZFO4PwB)D?l`0v~qKI5i z1w(b(-$@!~`;;6)C|{q>&YheGOT$W1bKO-w-USO%bqdz0WV<>RU^Ls2KGl8Z*%|VbkO8z{C^y~lnMtx+5kf^mKPNeAUj#C7rU)#0r%ikJ zUb+P9`wO2-mkfJ5UVv3qz_Afm`CSU1orixQ{AdN@_a*psm)+AZ*X-ac*|T)^Jsk<{{(DR zkzC$9l`8YT3YOD`{V)Wku9VFW%yV?UOgYv|Ku9BU)=mn+Io!(p6rYW-5*sQ=8$ z!3T$Nbr{tzw(C$so@BvVdiYP-b{G!#uWW@e(`L=1*4j!Gwdp_({okby9fkJr`~ANS zH?YE6O09PtYQOJ5|9j+b|6Wypx2DH=8^B%socI6ZZz?35?>OU%w)K%?uI zwEm8~TVBfDwF*~9a$3)j++fU0nY;)o@4!&FA75tteEG9g5#g7w;^T!`{3V18a2Zv( ziHTP-91n!)dzQ8Y9Sa_(z=G`ZvQ4_}I(k+y4O{|w_XO^a0DQDn!sVnKvyvaGd>~gv zm7|`zGFZY@(qQG(Pd&aVN~%$|^6n}`o~`K5sH}n8`Je%(VFXkfIro&a^t~-Z9P6JE z3|D(lpIohkJ77wCvNonJQVvZ$ceU7eWn@41a=6Y5Rir9zd`P*ONv>>r1v^+%neTf; zalZ=g{Y7Xfjibnc&UBz1Nc{eSh~6S1qy+_kz_A4wOj3Iif|TNojX6Q(^3mAaB$!*^ znjJ!L%CBBoOJzFe?g3tegiDWQ$hc+|BjeSgxvUkuLK(fMc!~I9cx`=OMXe}&EVJJj zc}H%J!$A+>rd7`66r;m2-W+8LZTX~ni=!6SK*}u~xwa6>#ghw`*Fiv}=SZBXc5p>E zB!^+AAbcR|oh`^n2P=zm|5aWFM;anug9*aO-jo|&@w7kDvv@C=qWZyvnO~xT7zYR@ zX+8<|Q0Zk}8kW464!qCA1<}$t=zcnOyMU}7`;_DpJ~0MHh{BXr^3urR+4qzj z7NL|-w*{;c5rb>m1|J==Jn>Sj+$cF>(os1HL(fvoRxjivVF~!=$8t!1!)<|Q!SACm_kKcbD>~NTK;s?+H<#suotFk5M9XypIOPjZI?Gl4h5k9T zX(vCsuyWV=G@oJl(XlZ^r5<^XN|wqm}kvee@QPtCEGt7Q`d*na{zlR=64lDnAQm!CRG19#lyEU~t`~ZgmZwGT}x?Z-WFO6HsM=#0JVH zRK-^QYz|$0m($%`4;19E2^9qO*dH%|M;>h8KmrSH3IgOvtto`zim6h&%)xW)zX7&R zd}87yb8@eKSsNr?HalDXi9IlK?~i!mgIak$0EiD8*df<34jRbI2V1+CV>@=^yooQ! zT~V$y^Pl}vCf@~MK@anQVEKq@lMoMJ$UgS~iz;)Et#>!=vpxZ5-_yoV!5_%v0ZSEL z5H%Pm^7SVKk1;lok2^$3W-0pCAayPH)|20^ujt|gWZEn+|MUSETSIdG=>ssGqoX7H zghp$^h9pI55+esBN5+OnQtjemLZf0ekqHTL2^15Vm>iv?iH%FrBt*t1L?%YYCWVGY zM^akggldwKhEQ~5LQGU*VpLqLCL%I6Dl&r7#YZNDCLsgupb!(4lmvWjoF*|jysxHr zRCJ^!EE{RWw3y(}pq&lERt!AbH!kU?)hI5HgPE+#ZdY2G%PFssQi5LZ-dg@Dn7?vWXfi|s=hhQx%$MMs5` zLDoda#l@5Ohz*TV!UC0y5Fat2u|qU*@z7Bmf6{qn4xp!GN+Kc?!xN(7A?CnW`?R(R zjrdrMaTVS-Dmp@A3rSS64G#%Vdqn26jZu(@rZ{|#E!g8P0z!n0?Vz}1s5~w_JUM|f zB!$8Pl-F2z+(1}7xWecJ82-5AL=DVwBrcX*D=Rz{)@n4cI^fUGY-=qlCO$e6$5NiH z7zlw-STe8>pyxP;@}lVs3pFXKcNA2c01IRwXt$H+8l4E$Bql*5#b8IsLdzK`MGIpS zBU4#qvU(zhGFfGbR5ut1P2bQ&O;{w%Mq(uN3EH3&;^6JbanZ{B*NIGUIg6)SHq%6ve>N`!V-I-D~%_BID4K$wv3lO05<%`PS#w?{-S zb|U<$FD?Ox)IeO2D?x)Ci8fcsZr(dI2KSKMy0*2Mmym1($}mwj#oWGWBa;TjCG@v1 ze_Ppc`RBF2#L9M#+ev7c+%K5uu;kvoAspnrG-tw<9>Z1>E^k>mqkvxJFx!U4l3qr} z!ZPU%`>RGCVNGJ+@Mn-^h&=uG>qmpA_mXl*6D2`0K!XihE?r=bh>KhlY35A0a z?8dSYIoyFq@(u;-$$s$E?FcQ-f{56J6t#^2`=^ii_{J zMC=d8@l$XPi}<$zo*-5Jd$D5r=|3^H{rh5B;Y;$r?ZLlm0R3@sbuL(d^7m`tSD&gE z78P4Bv9CMH!-0VDx>4L^4rIB`G_jl)bZgtItia>YQY1n( zB9({=V|XqJHTU)}Wz37+(pC>Jvag(mQHkBo!Xt zG@w3~0{Zq(42L>0q3-?(6chp)*Y}5fuz!8XL9Y=!{`>%12n-Bp>Q6<(o*fIn6%1B5 zP_uWc`X#4bdF|ze+N+HLCN_3+)iP3A$&&N6n=)PI^xDR^pTou#Y0OET3sjpDeBeaH z8Gnx&;b$9^4Y)Iu$Qx;GFNaP3=q_>%DRcmeO6Sc~A$u$DZ*&QhmjV1}7TbInj`6SM6k@3xgw0 z29%FnakBBXv86JaWR9&?l)77Y_L-&+R<1bJ?DtJ~PBo_vR6E_`iO06sn(RR?at`hf%2E*-6}7^^y;aLRrJ6MG;@aC88HJJocRaeNUo|S(?Se7MC$yaDmdcQGkudJGEV{>q!?(eXb{8j{NTWz-yb&Egp1pRGB4%@660;FV#e?syxxZxi zpATUVMo4igzdChHabfkc301#Yz5J^NS5~i>-llwQS(=k7E;K^I|8@dXiI>)8d7^Frq?=)G{j#RYwqjw=04 z-xV_p21k9nc)ZYW?fOGP{~vZ06r(qu>>$Q$xjtTu-TwHH7`OXmwPjB6y2jDqHv_sZE}=t9CtaN+hbAL7EM3t`87x7|%R?YHygZTy|W zUjfU3Hbpx}%;t#tBhP7uW?*%UBYX`J8X0QpH zpPT5kl8yA8YA-H%-nVi4h*Df)dylz!g!PGrXI?hCfIf8Rl(4>YGLNu6)%|v2g?9%HX%PS1+5~tj?NM-wds@cFo%Db-rJ}{cW9f8;&*e zS-<(pP@f-u{%yO@kGtN!^-0_7-n{OHgXM?S-FURlj=DdcZ1%41rn6m|`)E%`m_157+MS+wpk&JHMUJjy12hizwP$AsdZU7iKmD zg6s0ijTdlcC;O9`Eqw`QHtX)b3plf1HFa6zGNU!D@|o>CYTPN^UbA;B)7|v=!udvR zO|Z)Q{QB|&cP+ifr6vW|mps$@?(D>~?TV~=!M3k*aQ3Y3?6bS)TeQo7Ro?fZODR74 zKtP#;HXq1NbD{qeZ+G9MC$*iwW9siNwDoou@y&0K&USqFynoj{F7Nt3-Q(^xrG7eW zc4VhH;jr07DPhq){W1xQq@OP0X7@>0^i#rOdU_TKiZD&GYJACFE5^wEjK4PU*0)w_k!#;Y9p3~fS>Xz(FmfZLN$u)moQ4w) zGSWSW(<72$q&uoK@C1c@q(?7?k?y3b&xxsHtJ=5WTp_A#!w{mdSdahfAxfNX=-@Q$ z91c!XNgkHN^2Tp>np)(MF7s0B9=dEhb~vTuNOw^VU%0OR{~f-F(~TUQhONfIX=S9* zZM)OTNK?W8Xr%wik%~AS;NUcz2IX@4(~%y%*gn!29#!Xk;V6B)UcG%~S4?Smj$bjg z$!)jbY0dsP55%D z-Y(;p6CQF*?<|h*UcvJrD&&)+CUN@z+fg%wI34KVG@P7$93CCXO%sQhhVbYFU;h72 zOvAyYWVmv085;oKDb(!~FE_ey0lwDAhuLe9mz9GHoY2c7Jr2IsH1|9>=5mXdm*H!T ze3;D)cplV0E-Ekq{A_q$`srkoJBxlg)%?Y^7Z;^Lk6&F1nczF{MEHtD4UYTm@tAxf z?MTA$maVKFFDJAv+_KTx_7z4nI@j4}SEKXYo4-#=qC5J&x?tpDUS;ro)+g7g^uaY` zoOAKOWtNhaCx(o6_d60Yrs&Wt%~;Pae2Aq&zrLB{Du(q99shoWYtIpPW;R(m_~qgW zJ%|3b{&3G>f4q5JdHA!VT+hMp9`?C0BKuaKUL(=ZGgtADtiW(elqDKj~Uo`TbM9 zSIjQnpvSFE4X3wy)^5dQzdh+Ey42e_cH=QW8282}n*|jKINP@4`^2+deHL#z+2qB} z#8VAlpGZ32sqlz^4DGvh$r)_dmaoosaITbek-sA)UXq5re|7Pv5#bBsSC!(v8n8NM z%QrD~stvjdUmBioJnnydc(LPYsl&S-(;RQm*gHHv5Y8$7f}cfNtq4gh;^^nLeYyN} zt>#gGO*{1SD`v9I&lacZ+x!fu{WKW9{T|tLVM@c(nds-jeJ3uSAKrFqqgO`5S3x)W z{?xtk`=l4{zUvQE-B54KqJ(08X{iZa{eC(k``H+GtbcN-|0m;_*SFHMBRVafcxle% z>yycN*8Cd$j0%hKuivN%T*Dw4rCL-y~&dOa&&r?vJsl2 z)hZ@s?sc`eR6U{@>m9$h;Kb_7k5rvfG9+C!qg~kE>a&eug;et+mLIKI(wI4^pvF?Q zXw%HG4Yf`0AE-9DaEsJ5C$E}#rQiWxDfoSO38zAJR;G<9P?pLJ81W4>z^g%z&1E9+d5 zIzJ}0J{~a4rOE+yyON;?JUdlC>kbj#P}g#V_Xd{|-2r>&>;*EZA!Xq#wTXoIz_wQaQRv>mivw0f;c%V`Czs14JGYa_Ig z+CJL8+J4&p+GuTzHdY&_P0%K3leGi2gS3OSL$pJ+!?eS-BeW@6i*}TDw04YktahAs zf_AcYhIX!Yo_2wDk#@0miFSo{t@cN4ns$SBlXi=Ct9G0AXYEeyF70mZ5$y@>Y3&*9 zIqengRqZuxruMe>p7y@>q4u%%5A74}Gwn-lmiCSItu|ZxK}+eJbj~^#T>)J|ovY4G zS6JtvE3PY{E2%4`E3K=jtE8)erI#w5|3)A)1_0{#$_1DGdVs!&_3A$w6Al+cy5ZzGS z2%SYYN;g_JPB&gRK{r`9MK?`1LpMwJwQi1Xu5P|=f$kgKBHa?*Qr$A$a@|VZ_qz4E zG~EW>M%_=kO}Z_*t-5WxpLN@HJ9ImByL7vC>AJnTeY!)s!@487qq^g|6S|YSbGq}o z3%U&5McpsDtGZ0xZQWhnue$rXN4np2Pj%0AFLkeUf9hWA-ss-xvUTruRvksFXeZj4 zcA*Q<1!-5>jV?sH(}n4xw3_y$OVFk0(sUWR0$qu&Ol#<>bTzs(TY$_d^Zn zhBW*bnr=)tp#$lrbaT2T9YhDyt?4#&JGwpHf$mIqp}W%EX`0s43~iu|G)r?dPn&6x z?m_pYd(k0uC>=(J(-CxUx)0r#j-va~{plDwmX4$2=>c>Cok%Cq$@E})2tAY@PLH5R z(WB`x^jLa4J%OG?Po}@3r_j^r>GTYGCOwP(nx0M1q36=`==t;l`Wt#7y@*~+FQJ#x zsq`{>IlYqpmR?JLPp_wcpns&(=neEw^d@>Uy@lRNZ=-*vchbA)-Si$ho!(3DqxaJX z=!5hj`Y?TjK1v^>kJBgUlk_S241Jb9N1vxJ&>8e4`ZE0seU-jOU#D--H|b3J7JZw( zL*J!;r614_>EGx_^b`6i{fvH2zoh@9U(;Fi8~QDsO~0o<&{mq#tMpEKXT6KQfZk1C zNbj!q&==7c)fdyN^`80?`jYxm`qKI``m*|R`tte;`ilBW`pSAQy+&VEUrk?KUqfG0 zUrX<;udT16_tDqY`|ADl_4M`i{`v;`hWbW&__4aai9S%@RNqYBT;D?9Qr}A7M&D82 zMc+-Y)$8=|G`-%S=k&bZtQYk?^gZ>x^db6CeYieC-&@~DAEoc7kJBgUhw4Y_Q}ko> zWA)?p6ZDhyU+Jgmr|W0vXX?M!&(_b^f1_WlU!q^CU#4H7|5pE3`S%p?{`-u79b2t^c5RVG1yXn8J()Q;bnFC76;-DW)P* znek$(FjbjqObwCALxw2Y2n7z1Ns zIEH7;jKGLY52hCr!h|v5Oa#-L>C5zIqL~;bmWgKuFo{eOlgtcc1~G$~A#rW$rQenFq{6<~QaM^E>mH`Ga}FJY$|SFPN9i zE9Ot;HIv1>W3rj|jLP6-a5lIYTn&W`?uNn!4?|Idr=hr^grTINtf7LTqM@?E%TU!& z-B80&%iwLOZSXPFHTW9p8R{GS4Gjzp4UG%|hQFXwaKLcTaL91jaLRDnaK>=XaNdw%xMH|wxNf*%xMjF)xNmrA zcx(7zuo@_%ld*u&)mYeA(pc75&RE`9(OA`3-B`m|$5`Lk(Ae16#MsQ((imiHZERy~ zXKZimXzXn4X6$a%8g)j!Q84x}_B8e~h8RPQ;l@Z~A7hlUpRvC&+8ASuHO3j^jY-BK z#-YZM#uVcy;~3*O;{@YG<0Ru`<5c5J5mQl9aZ?FXDN|`v8B+vH>NH8n6bG6k3#n*vQuP0dWrO)X8qrZ%SbrjDjgrmm)LrtT)K zNoUfV7?Z(dG;t>0WHt#V(bU7#)6~lpVhS^boBEofO#Mx9rg+l;Q-Ueclw=xW8fqG0 zN->QyjW! zX`N}k=?Bw~rZm$=(Bv~(@xVKQ@UxdX`gAo>5%EL>4@o=>A2~X>9pyLDZ_Nh zblLQa>5A#9>4quOblY^_^w9L!^oQxG>6z)J=}%LZ>5b{F>76OtWHmXl&a4YtfGxR>M|dtFkrNnyfclo2|q8uzqYkwm#dC z4PcwFfowCj1>2Hs#Rjvj**0ujwjJA^?Z9?qJF%VFE^Jp;$I`5xWmp4?KXMdUk?q0u zWP7n8Y$zMfMzX!xK5Psd%f_+sYyz9eCb7xvAa*c2gdNI`U`Mhktc4xLj%LTOW7!Gp zM0OH8nf;2L!cJvpurt|N>}+-pJC~iu&Sw{}->?hWMeJgB3A>a{WtXwb*_G@n_FMKl zb~U?(UCVyYu4C7;Kd@=+26iL+6T6As%x+<~u|Koh*&XaIb~n3+O=tJA``G>L0rnt! zh&{|6VUM!M*%RzZ_7r=XJ;R=5&#~v(4E7>>nf-;m#@=CnW$&@~*$3=H_BZx-_7C<6 z`<#8j{>i>(v)FfRHcN3T&WS6)xpIX$53UGTlq<%mIZv)QSAr|amEuZsWw^3jIj%fc zfvdz-<}_Rtt}0iZtHIUeYH>cCKi7b3#07ATxh7m7*Me)w1#zvpwp=@|J=cNj#C7Jn zatvqSjGT#MIi53fVO%&D$@Sr)xmYfq8^9%SNnA2Fm>a?k$!QJ9+ zb9cC3xqIAw?g96Z`;B|V{m%WtJ>{Np&$$=eEACJ3HTRCo=00#%PQ^R(E_?yrjW5Kz z^B#Orz8J6Oi}NM;l6+~t3}2Qn&sX3p@|AcmzA9ghuff;kYw_NEZQh5k%lq*WU(>%i) zcoWa^9B<|YUgRad2j7bi;Y0Z_KAex>d-HwxzI+tlkB{MF`8YnFAHXN@iF^{D%n#%T z@q_sx{7`-vKb#-Ir|_fsvHW;`0zZ+T%zwpC;ivM``04x%ekMPgpTp1P=kfFTh5RCZ zF`vpWpiCVn%&h2P3=!m z{BC{^pU&^&_wxt%L;PX>2!E76#vkWT@F)3G{AvCSe~v%TU*IqDm-x&4FZ>n$Du0c? z&fnl~@|pZC{x*My|CPVT-{&9j5BcBt$NUri8ULJr$-m-X^I7~`{vDsqzvn;jR-Q7e z%+BTl=7MH7b0M?4xv<&8T-2;Kdzy=zOPI@;%bF{gE14^sHRdYjs^)6unr3fvU2}bN z19L-jfVr``xw)mel{v^9Y;J3AZ|-33V(w<{Zl=wQ*&_ePtDKF&&@B)FU_yaf0|#Lv&?VJ+2;4=4`xbm65NHtLJ^^; zP)tw@#f6eWDWSAbMkptg7b*yqgvvq{p{h__s43JEyoEY~k5E_e75s#HLVcm35Fj)W z0)?hRGoiWAQfMUv39W@TLR+Do&|c^$bP~D>-GuIfR?rEwpcfdyAQ%N!-~?VU3!)$i zJ%pY@FCjz-6~cuGAyViq^cA9n{z9}6Bg6`ELcB0QNDvZ*!NL$>m@r%zA&e9(!YE<1 zFh&?Fj1$HS69oBvO-KZVyqmheV+E4&l3h4;b-!75OqN^}yPMHjJvSWt8o-9&e>u;?Kc z5sQk&M78KC78gs1CB;%=S+Sg0UaTNi6f239MK4h!RuQX;)x_#z4Y8(JOY|0Ni*-aF zv99PV`ib?#`l7$sPz(?oi-BTO5nhTVwh&v2t;8U)wb({%E4CBciyg#{Vkfb)*hTCr zb`!gcT2Uu5A}jV3L&Q+AkJwl2FUE+mVw{*DCW!;ZLE>O>h&W6fE{+sa#L?n-ak4m7 zoF>i?XNt4L+2UMrsra3^T3jQp6W5DBh(C%O#Es%sal5!f+$pAud&T|Y5%H*aOgt{0 z6iMuo0F;cvgASFskQnEBq8YIDsf~4Wn2x+8bk;X`4rE$`DX@WFKnk;=K zO@p_H&62*B=17aBCDKwURaz#klvYXKO5aJVrM1!z(vMP_v{Bk5ZIOPKwo5ytoziY8 zUD_w@mkvsYq{Gq?>8Nx}Iw75uPD!VwGtybq|4G3>8f;1x-Q+2Zc3Tb zZRw75UwSA#l75%|ke*1-r03EL>812a`crx>Wl3+Ow^FwBUiu)x5BYkidbso`(8H}q zp&sr%3it5n@&9PL3-6{K_wC~wEAH;FjTM&+cX!zs!y#$YNF#0B2MqV&?(XjH?(UL| zB|$&9!}{?3o%8$~_qngvyC7H?ECLnz_wsJuszrT>Ho!3p3*a1uBfoB~b-Gr(!!bZ`bZ6PyLk z2Iqit!Fk|(Z~?doTnsJ&mx0T{72ry66}TE)1Fi+vf$PBy;6`v0xEcH>zJc4o?cfe@ zC%6mT4ekN=g8RS&;6d;Zco;ka9tDqq$H5ceN$?bS8axA@1J8pOz>DA|@G^J>yarwe zZ-6(!Ti|W*4tN*52i^xCfDge(;1lpUXo4@mEbuk>7JLVO0KbCYz#rgG@E7KiJqUp6Lk*yY zP$Q@@)C_74wT9Y2ZJ~Bh2dFdD1?mcQgStaKpk7cClnnKTQlLIiD%2ku01bo&L4%>8 z(7$d3gWf|QppVce=ri;c`T_lfenWqtzfewJE?;h6US9!UL0=(X5nnN1312B+ zSzkF{1z$y9C10E`-dDw!;H&Pd<*V(h>jQlCeGPmKeT{sLeNB8#eJy;ge64+LeC>Sg zeI0xqeVu%reO-KAecgQBeLZ|VeZ721zGPpDudi>AZ?JEOZ>SIQ`FwsK;=_DFAK{~Y zjF0sRKFKHh6rbiZe8YVseCfV1zVW^c-!$KJ-%Q_Z-yGjO-y+{)-%{T)-*Vpy-zwi~ z-x}Xq-#XuV-$vhN-xl9?-wxkS-!9*7-yYvS-vQr2-yz>&-!b2D-wEGI-zndD-v!?# z-(}x5-)-L=-(BA$-(%ks-!tEHU#9Pc&-OXKm%dlNx4!qj557;n&%Q6dZ@%xopT1wd zKfc`lJpR1?{Qd&|BL1TO;{MY9GX8S@O8(0Jcz+dtg1@%Ej=!!S@F)7~`y2Qh`WyS3 z_?!Bh`J4M&`CI$j`aAkN`8)f&_`CYM`Mdjj`jh<0{uFIpN%JJ~%&I04@R-g^R((;Sz8uxHMb_E(@23E5H@uN^l$; z4}0P2a1FR7TpO+f*M;lB0GtTdha12R;YM&{xCz`8ZU#4pTfqMaO1L%L7H$W(hdaO> z;ZAU8xC`73?g96Lli}WQA2=263-^Zyzysky@L+fdJQRjtAMA%=7=ck3gK?OIDVT;s z@V_+%%)&e@z#=UD`zY679X8-Jco;k!9s#GrBjHi-Xm|`f79IzWhbO=j;Ysjhcq*I$ zPlso~GvQhAYjyYNf+75o~01HXme!T-VU;Scaf_!ImY z{sMo4zr#P^pYSjEH~a_w3upg}21N&Q1Y!c7K+Zt!K;A&{K#4%vK!rfXK&3!zph}=> zpjx1MphloppiZD(00<-o>IWJH8V8yNngyB%S_E1K+6OuYIt4lhx(AX2y#xIM{R0C6 zg93vCLjzy{3ity402~Me&;S-70(?LSNC7#Z2DE??NDB-Pj0%hnj0ubjj1No*Obkp3 zObuiNrUj-4W(4L076z6CmIjsuRtMGv)(18QHU+i@wgt8ab_8|>b_ezZ_67C_4g?Mb zjs;ExP6kc~E(fj$ZU*iKo(7%;%zzch3cL=y3492A3VaRx2>cBE3j7X4Au)&t$%*7b z@*(+=0!TrmFj535h7?CiASIE~NExIoQUR%oR6}YYwUIhVT?9Z9kp@UZq!H2>X@WFG zS|BZv)<_$qEz%C@h;%`^BHfV`qz{sc^hNq31CW8pAY?Ey1R08eh#x@_48aisp%5Ag zAuPfn0wN+3A|o22Bg2s4$Ot4I8HJ2N#vVauhj^oIp+@r;yXgS>!x&0l9=+My?BSMX};o z39KYm3M-A3#mZsju?kp4tP)oF-|8?9i^r;930PIE8de>vfz`xnV|6e9OT_A94X`Fy zbF3xS3TunC!`fpVu#Q+~tP9p1>w)#clCa)bA1oE?iw(dAVuP`vm=E(~@V{LOh6OPK zBQffK<1CEDcud4(Ou;lv#|$hD8-@+XMqugKNNf}~8XJR+#l~Udv5D9uY%-RCO~a;R zv#>eXTx=e;09%AD#g<_!uvOSV27~7*fH!l zb^<$voyRU<7qLs&RqO_K6T6Mw!R}%Yu!q@oHPdx|~7o?{mF0?Wc)V{foe*k|ku z_7(eq{ltD_f3Uw;HWtC6aSxso&yDB7^WypOf_M?UC|(RNg_p+5;N|f0cm=!?9*4){ zRqzD7DqaJxiPyqw<8^TWua7sto8V3HW_WYF1>Oq(2OQ(=@s4;WyffYf?}~TByW_p^ zB)m7Cf)B(8;Y0ADIEX{I9}nONj^Y1i2RMb(cnA;UJTBlOF5?=m<7xOXd^nztkHkme zWAJhKczhy037?El!87n__;h>*J`10X&%x*73-E>bB78Bv1Yd?P$5-Gh@m2V0d=0)9 zUx#nRH{qM{E%;V^JH7+oiSNR9<9qRa_=f)0>>lh9>=o=2>>KPC91t8B926WJ91;YBfgln@gLp6)q=TU#6Xb(ZPzh>5J!k~e zg2RI&gQJ6Eg5!eYgA;?3gHwYU!D+$i!I{BX!P&t%!Fj=j!NtMl!4<)k!8O5k!HvN! z!EM2v!Ck>U!Gpm=!Nb8L!K1-r!Q;Ub!BfGr!E?d$!3)8Q!Arr*!7IV5!E3?0!F$2` z!NLv$f}5WR?Gq7TuZ7(fgp1`&gaA;eGuBp|{^P(+9b6AZx;9KjO;@o&IJ zXoOA}L>e)S7*32J(utA8XksieftW~4A|?}4h^a&dF^!l(%p_(JbBMXbJYoT{kXS@4 zCYBIOiDkrcVg<31SWT=U))E_uO~htm3$cyZN$eu_5c`Pz!~x+CxIx?`ZWDKid&GU>A@P`aLOdm&5zh&W$Rx4|n{Wu1 zcuBk>UK4MKx5PW*J@JY7Onf1}5#Na)#Bbs+kxfL1C^82bLwd+uWNtDKnU~B*79b0g zMakl139=+viY!f*A(GQlQqbiWG%8bS(mIwCX)5Z z24o|$G1-J{N;W53kS)p9WLvTw*@5gxb|O2IUC6FvH?lj~gX~H6B9q8u^8ah&|Fg#b zCyoEl8TXMe86f`+T*)9wkR(ZwAu>#|Bu5IQNJ^wkDx^WCk;BOmWI8#D97B#HCzDgi zspK?rIyr-!NzNi?lk>>=0xs+T+E+8i@Z(VCGU~< z$p_>^@)7xjd`dneEi#jQL1vLQ>5wk@l6*zJCf|^6$^XdrO^&>x>DV!?orqQVqIu@pxMlt@XGOevH~X_P^wQNyVbR5~?^8cmI%#!=&`3DiVt z5;d8cLQSPIsOi)!YBn{8noG^27ElYRMbu(y3AL13MlGjSQmd%d)Ea6nwT@a(ZJ;($ zTd8f-c4`N;liEe?ruI;KseROb>Hu|+Iz%0&j!;LbW7Ki#1a*=+MV+S3P-m%g)OqRx zb&H+nTdPF^@o={JzXVi1bq%10vdO>AT zHsw$*^^$r;y{6t!Z>e|Gf7E;G1ND*mr{hy!sISyF>O1v=`bqtwep7#_zf?9Ap`z$$ zItLv?d+3~WE;=`zht5mqqw~`R=z?@1x-eaYE=m`pi_<0Ol5{D$G+l-+OP8a|(-r87 zbS1hn?WL>I)#(~^O}Z9co32CGrR&iEok%yJ8`6#F#&lD<1>KTvMYo~b((UN>bVs@q z-IeZ6_n>>xNpv#Zn@*wo(5ZA^x*y%29zYMI2h&67p)^QCw2$`FFip@TP17MdOfxh~ zb2Lv2v`9;|Oe?fXYqUWRr$^A~^hkOfJ&~SDPot;PGw7N09C|K2pI$&Oq?gc3>1FhC zdIi0TUQMr|*V60g_4EdMBfXj4LT{zF(c9@A^iFygy^lUXAEXb_hv_5qQTiBtf<8^3 zq0iCh=?nBl`VxJGzDD1mZ_*FwNAwf=DgBIoPMdTl{esS-ZQ7w-`X&8}eoMcj|D!+9 zAL&o@7y29ho&G`pqJPtW=xjPdM}?w8IYKcZPbg0)Zzx|Vf2csHP^eg_bf`k8VyIFm zHdH0#4b=+O4%G=Ih8lz#hnj?%g_?(2gj$ALhuVZXggS;gg}R1%gnEX0g_1(ap_EXc zP->`uXkchiXmBVHLPA&w4+TSXC=?2Zm=GJ{Lt;n{sUa<7gwjI8Lg}HAp|PO}p~;~s zp^VV9(9F=R(A?0x(EQMX(4x?i(9+Pd(2CHi(1y^K(Du-d(4NrV(7w?A(4o+Y(8

5l5@Z~P}xu3tMR|^ zzo~ooFu96q|GRcyGM&zlbdm{#NhCWYKny`T7&L|moe&^Gkj{;p2+c7nB1$8m=t1d8 zCbxl*Nk}jRFpZ!h2R%XH97WIxf^s;B;UXY{?(R)9VS*49Zy`AE=UaR4nVm5?{(kTC zoRjaDtars~*20RbgitP@bdTY|@(j2^|VLZXv(in=oj76_B z=he61jLM8*L0=xuIy-rXW9Oz}?En32ci;Ox^9uNG3-&#o+EV(*90wlO?s#BYG4KmG z$c{ZLb|LRVL$|5;ecp#G>HC#o9myy$rH@NTj0V@MNB8Tnxxj&zfxnzO7uh=45hc%e zsmEB)P6BWAgq`cnthR24F|G4REs=C0EdV61V#R}1{`BD?+7xZy4^NeDEdCo` zvwdz`+USY(Lpt&v`r(E=j&xVSfd-7H9^AlwBrB^e z4Femfh>UDKkO=mJ*q;u^c_*@h9Z-wp{t=6#)@eomDvP7O3&-|jH z7RPqkAH}La&1GE8uQc?AUkbZ2z*TYwu0_uu9m>V|;3}Hu(bac@V>7sFk829wbMQkt zJF+!2hGsK{uV3A}^Nc3Zm9q-072^3B%?G4+k?y$F+N=@A{}OFVXKW2^3hB9wdu>C! zYR+HVNWq6L_HYt&+mohCKH>S?CoO-(M#i7+?7fz;SOMoX8ZRBvQ)75XHRp`5`kOtE z+RJ5ZFWLk3H`$Zx84Qdg;p0#~u?LvD(2q;!Pc8~f~$H=;90@PChUWco!AGTacJvC?J?Q+NH0fi4&6taN`p2%TKBfF zR+B%@m{KCGy0q!wOTkvKIE-S=y*`UQ%D9NXdA4=lLz&F>H3{Uz`8n&b>;9HDabAxO zdF>PeUTY4#g|S*NEioQzP7{w(K6LU6rT|S-9`ipjlQ)A7@{_+A*~Oll)DAxIj(_nk zP1@s(AJI3_)Pn6H`_B4{_etX4`q;5r;Au= z=^FY`FeCc<(2ShgKIG{GI2G~*KUqk4NzRF8OxOoQAL!p;;+SSThWVJ-_32A}#nj(K zA9%pZ(ziK$Q!WEu`MD2eQXze>1g}^AobK~|<#IPShF%7o0&fi$MIXsZsV?7EG}e@N z;8&1+0mx|_>`|T!JduwV>IUUiJaQc_?RcJ_KJnkZh4fW z{RnoD&OK{3Kd2kgR60;|ie$r>ZM9$6GI}G(cu$7qrI#d&vfG6lcv(C4iLF{gc=Auz z`ruhORezi60^8~*z9S>G2V(FdpV~Ukzv$crZ+-fAbLUOu1JxZj8(7B>$(&{?TNzaITwJs3WM%V0ByG`k)_mD~jVH_5iq+icAF z_%^tx484a<=}!)0A0AM;Us9%&YhWKgt<_WDhKlY${VD0qT}F4_y0YubI?YyK(P zWnE46{&o)=ST@(r7_a)DrT@zN1o-Q>;K$);O*5$^U(T7nQ!*nz3+%5AfGvMlU!U+d zupdSIRnh8T<`YYo6GOVhVLu1#ZsvE1e<*ke=PQt3$tQGPjGel` znAMB9FELnCy2!h12*$k1{w@=&=ZD;neBv{jm><(fx(oW=>U<#?;PW??Lw@3Ydu#v? z)FyjG*(9GYVNb>R%>1`kzb8>As?=19&dJ^vOpkhl;7?}?Ik{JZhKaX-Lba6U{tj(E5p+K8UqF}z>?qhtN9VW;?C z4&`e(btGTH10V2y>**|ve4APPX!2Q0m+O3~=9900r|u9*fv5Vec@95}#pyGlywD%) zk42%q)6eID`X(x;^!HGH+yEN7aT+ql1e3H@;buy*oq~4H2-{UYjO6UlS$*a* z{VtBompI#2)SYCeLvfbIt@^xS+t8=tEwL}N5OKINRU9}nY4E3Z z4f~6C9Qx?=w|jRUN*U?%(bV63Z|~0DGZcU0`EQjnM&j$DTCoLV_0M5DaT(r9ic#nj7=F_CPu{myba_{-@ zL-e&zHzGrGp{-zkRQ1An?L&X>ZT&QFXkKh)Bd3e#|MT8sp>0I@yM_#)!`WfqocC4z zOJd(Y#rKndBlsr?*Mi1sU|SORVAD?^U2~Gs2hO{iOZsp%#Ic9v0-Y7aFUH5md*gkh zJo5tUT=A>wpT_sm;Bl1jD+YFxZRZWq6p3*Z-WU&B{cJB~8tke&wxc-wgxThiJ| zuW@%?yxk+?^8_@V%sdh4f)(UtO1i7au0bE4X?VL=?YXn-&BHspo#EWAkKZ%H9NR|Q z_%k199$x82Pt>mp=bJwdVlIY0tnXSyL+M|QW6||Z@|*&?4_&J=Sv%(Xb;e$L-_o$Z ztaNa530((`npFc0)-y)6BqI0)G`ishN;N{<*{?;>h!dgD&? zzqJoogF{&0>I+^AaGmPrx_Ds9-|Cw~HGk^P#`rjh)8{|Y>7wz4`Pn1VX5q-M^Uk{T zVhgy4=8DhADs}?+I;Y#jci9u7rF6IG$=v_Iw36nY&GkY5f=uHE3CC7u0Eu=kfQ-rAUO^itjr?z>=mJYCpEonU`b=a-DRQ1iHF z%v*JEx$CjN?L8-qg4YA*gd;5fgE@f?TRk)+kO?~<$o9F|;P1_ygRXtnI~=&*8vwUA zkX2tgc4Syr-*fi+<_7l^S>7Py7Una|!=s_k@56eUcha`(h6aYTE$dMRePVSRy3ekO zxSMP*-S)${PDQsp0Is6P1kto&d~fZEzWO9Nel9kikTbid^=;bZ4N~UFVC+TdANqIF zJI~SGj@T@ZaL)?)=JT=xUyRDAFYH&;76ErfDVVE?Q#iK?SZUICvhEoDr?hE4i@R%i z3oKs@&XT*>%<$VYxjX8a0ar#vov?*i^3cS+q`1085z^l~cP z1rX~;+Tb6|Rcmh?Z+4EOu6$00@ya^$ap?r%Dm+EsCe=gcFqqg&KalmhXc?@3C3DcB zzJ@Z94uQ`-nj55FWE&!>iQW@ojB@3Wq&7MLe;;5e85 zN!M3viW!pa^nsxn-_M-rn^T+7-E-@kPlGSJirrn{ULDrvRh$YmSb*GW9lD0|ve*IU zb2c_+aIWoXhxnN@g>Qv*26*XQq3Cg;`c%+fA^fRSYo~CJJN>nKl=;xo=2FU}7(+@o zRcHurL>Jw$^^5P)U+@!sb$6jt3g%wX)WW8Y%8SmLdw)WC@t3=wYOSH1i?+f+b-!zU zIF?qeul==u>`lsQyzhMuKTUsxqu>q(rj1KEgILLHOwa9?@dgb8Q*r4y&t@C}DyVux&nEiU0vGLomE_AT+@CU`DQ8x#DMj@+OUuMuBs}2tK z-OccI-NH}$-`NtZL8X@_6_K$b^KA*bfnVCWse1}#>|BS;sjSu?P3~dnDc*w|zPj7V zt>^3zdAXJKYsyYy8u^1uCnpqx(mA5!$sXx+9B&eROCw{iVOQ$A=B;TZ$#lmk&57{e zkqzyz&K_K?)I}HB)#%qA-EAwmdl$Bg-bbDUJ(LffACTXr2|O>zHTkO*6GUO5y)U9(y&iK{cgSmfG@muN}K9{@L`|RocFXJEm3-xO* z_e~jWEqG>D)isv&Tf7-Vc?bS!Tn^5epG+@`)~06!G?O0$UT|m3nYB#0T4R20Z7Nl3 z*KjXD2E1gaHG#(kdFUsoH{Lplx1ln&^To4Wdm`Wh~TgBF6u4@A&!4y+QL=cc)n&Ic8;jaP`#0HD!}% zHbcG6M4ELIZI%peo>>d_^cVZdF7Bg}zCk{k;4ii!=V_1?2l}ygaG;saIc0!5t#WJQ zUcQS5j9vc>jaxSu>*_CSu2$(i(c+q_sb{6*3Wm3ev~RP>M|N7Je`cv(fj9}iYmPYL z=lyUrb}pd2#_Lf*{HLAE&^`Vak!x(hy?mC(yp7)le9P`S-W?BPUh?>(@+U4Z`PGZj zy^7ed~}dBV6zODY#+#RmPp- zzlN{oIp#R7O(S+_k$({Lg~}YD{*?Or^T#Lp`=hae?AOLnMtwbd6grKv8MB1;)_TTS z*ZmGKBRWRqJJ6XsclY+BoH^c1WRAECZw&LrA>hZH^7yg9)wsQwb)9f{T`($v&XrDm zI)(#rPF7#;RvGeMNZI%|>a&N)95RXd=cRq%@CIp*gM;|}Y%D)h<$Vn9Bv$&*htKgP zm$dEIRi?l^A=#m?_r!1zUqy$)cf!6(7I|Aq^F$UMAb1J0A|ZIlP9k6B4kBl@NmUFi z;x51tw)Cb`#*Q2STX8t=7HrxVJ=FfUVmL?b{~%1$-L%tH4p^CrVj2tHvGU#17VA3V zn2~E^?%j65wtM79ITjnYg>usO(&ZD88;7z@&>~Uc?F!(2iaX$NDDBL+&>+t{W;4xB z=@6B3z-t+0j-|}6C=>BpybxfddCjSWQxE=#-&8R{C z&)+r6Z-M?Qmqp)8&hQbnCV?S%f}=d-e=ivC0fy#X`H)mL-H9H6$KuDFknZvUiRT(q ziX{`hcZ@r6Bx|bVg0PJ82N#36MRf&p8hL&7+-hw*YcK>OSpN^m1D=)5<74ndQ^}wD zyal?c&&X`8r4-0Y7MPkl-%DTFleM;_+C6X|FrGQx^3U1>*Dy{+AI(7~Z;&1015N-p z+JcvUyLd!9l1H_XAYc5JjL6=-n*0>s2l4$G^`lg+$qtZScZ`R=DtL+E+vv}H^rd-G z^N?(xNvDBB*}}83^}6t&eYLrD=8687qHzXXbZ1R#uP=nSvgSM0`lKm8iMBlObzSIy ze_D4yO@}teN*^*G8Rk9~n)gQMtxTCavO*lxRs)!J|3LVb?3n3M{fTA4S;Jj;?RDJ^ z5z5?)m;JflzSG)H{xa;4Y{VbIlpXQz5LfmgT4kd|ZQjV(am&G;-tD}T{A_pHT+RPn z{?9`q<`mxUedGk*J(}}&@0?$y`|fd|hdnvd5e#JmA zE;rl!;n2%q?}p{HCZ}vez9`D}-3|D}CFpBxh9@p5`03%xy*BPoR!mkz2eliggK)}o zzrWgoKby(7ebW8u@jP^7hvK2JJs)_KyWLbCUD46<-K_uJl_?+n;-7L`!LN0k;x`Rn z>9rw;8wcS7E*7C5aW3U;^#}*a!9W}Y4_frKKPhZq>CA&28F#Ulg+ACy4@710H&|TX zrLyo(esbph$?^V1e#nULQ8{e38QYs4VeER)HQ2YJ-u>7I--F-x8~LSk-OLl`fEP9` z_G}azAMHOkBWn}Ek2#{lsc1~h_s7$>L=hP&F&4p3^ZcJU_n`X=bZ0?;-;MC&Mk|;7 z_^m+3*h>z}2|t(q1bfL^hemR$7?hwsy2CeBFDdz!|00wu2q-m}il@8$)`oxLj86UD!Z@Oug_Sif08=15) zI)Jg9vxh3DGTbk|4*i8p-PlNDcSn2^U*hz}&h9Z?0bIkJ%KEAUUFJ8Bzs}ppI&bqA zdBeL}^>6wj??HGTG_3kdUB+gS`vk+jWxKu(-HNQ&_ohRH3wUgnzecpwzR?BXDp;v5 z)}bN3bh@J{x7Zt;d&y%gPu@6ssn;&y}ppR-QRe;bu;;TU0wKYc9 zPF7jT5_FH`2wl4Qr?Z;;v8NO?$J#y~5#o_T@2DK|0ejBY`JT-iM<3QlH_=S|j`KtC zqk8aWvv{Mr^Qqh0>1o_p|H;Nl%RHyx)dm;ZYqw0F;3W!+yky};?-crbnc-||alLKF zblr~r3dU@50Dt)1$CKT}bc-L)jtKn(oEeMycnocCI&7JD%cOQM;ojgGSH9v!%K`}zqES>&G;#}{tVAmPXp-qPJmRk9ndeYEw7_@(Q0#q~H7X5#t)ZzQp@tI-hkg zWzJ2a3$kf*4r$MNjKRqT+Oq8cd!XLT<__~7A2!9ycFy-6KcmYVm_IeFYyG5(A;C{N zuuR2S^O|A+?0Bwl?zvp8T?~DiJnTmLRB>*7EuA}`Gl#$OZf8EYI`@wH!%s!yU9RTMX z`*v@Uh4>xQ-tO&s@7uk%@E^sA**ek_ZdYT;(Fy-Sx2tx^;e-664?o%G9^%HwNW1*7 zll>Imk9M2<&mDcTZ}>h6n*aQXX7o#?{=^gWe22b&Z3<(O--FHh{_Xfc5~SfP?NA$z zTXFY}#z;T7i84#m3Vk&z$ex@!6!#k-|E?p@>DQKqZ2 zF_ilju7WpZ@Yr^S&d08=U6HRet}axxx1SxFn}=>#;B6cCjE8L9mB?L>{;xC?A9qn# z@EM(tGMe)X+@+hpAOFOt#yodaBUReiNR-)!HlrF%oi}(tTl-BfU4pmy_hAp+Py=Q* znLmf|^MZ%|yGwRr_dBc)i4(>4e9`H;sh-N;h@S=f$-A>|N;fu=xf^SmibH}d?K$pEwCDNTPPmiL6|9F> zGTCF9_JoE2mEE zx!K8_|nQ!py@Bcd$P?H&q}N)H02&-;zM=DLjJrF$2&WRY^TgzV&DG>jHr%a2;Nb^ zNyO^kMt!YuqW;PTRa~oZ=`7)M%45rv@TI%Rjmz48$`gJIdvo-?V`wpJ0j&S<$uj6;&Vqi&0~FM%60x}*93H<4(i$N`9rs5^#W(7=eo=4`G{tfhihLZ zUR>w?GOXPci#wW_S7_K5^ZNbII6HGsFk3+f#UMKwYv3q?b2fd#dEUi1&G#=rOLG9Hl zM!Ol`4)MEn?VW!r?C;@>J8Z1hgyZ?&7*_Fk>jA$vd*zZp=&E`j4$E>UmhE>umYVZY znXBoq``>93^>1pd567qv`;gPqLzrrT7gI4_aJ0?;8u$jfV{o&AvacV9@_BjV+s>3yoUtG7lH;UCQU+}Is zTF`6K(~8R>&UgC6LjPHyYi4qPGJ4Ri{diMPx=m{^;-Pm;{h-tH)~k%!*qzFpg<_7x zXITmFRyN%63a_3(Uj+68b0aXLvmi(mHY9fpqL)4Z&7*XkKf49oh4(=p)Ollk#@D>- z4UMN3&bvyt!q4m!)yd1>imWqFq+ISaIYMxxgQ_<;CEnWs=R+tLj)APsM9U|mveNfo zq`Z}lqUV3uoYiP4HerKQ_y1u50T<&2>Ab zSvoR?(ZixkCdc^+KEEfKV=W!r-(B<+UyhE=G>7r!F9UzxW6%{HrT0q{CG-<`%766E zD(Pj9zS}*{toVVv?qH9G@rO?fy^JzAHk&Q8a;Zp%m z*{3-5(O8)y!!k#O^f-cXzcrMl4}XF>A^paa&$~)Jo5;iNuw&#NJMRVb#V>lQWJ)sf zS=9qC?DA9LJnIccd2jZG7bB}yx4jZ*fVQuUc+z!z)+_UB30Z->1)!Fa!|f2Ob4 zK+#BN+B81~pH=_$k2^d6xMp|n37h)qBV5h~Uk7^Fy;irA_Y1Z(bVlBearP)qaD?k( z)`P@nlmh-s)^&DRW8vWgoShS?qcNa5U$f<_ewn#(!eFOo>`&9??&s6y*!8=6ujBLY z$V#Ly)V|t_w_o6QA=du5uzmWRokE`EPwlI%+i6>86SIt0?WahO{Eaqmc{FXN(ANFv zre;3(JeTe}TOIHo`w=(~hBExaKkR+p$)R7$k7WE_@--GC8P|8g(HSOe)E$}Pwe|LL z|5*dgE2$!P_JVIj*Gu?*NvvP5+4G<_t~Yyj2^~p$Y5En>^%D9vhdL2m=Z18BG9!L` zwQs!JJ5)GN5{>uTr@8a`5pKTae-ZtDxpQ~#G(Nw11GE6|a(G^AUwya=Jfe9s>cd6g z5cQ$N_fo76-w*pB-iVJ;AH|1ws#5-R0_R)DZj~CcC|~4g09Ap3Aq@M;F@aU9tmm>l?dy|93mKVy?8N zu?|C_rEK*Zd+;0VZ{6{H0PWg&fpIEV7^)j6v|VT-M+7%b^| zok?m!-oM?>ojc~%9kNSIem*w&)A;PGQ`KhN9%fyVIS;r+;6^^m`J9n)s@2-}#ml1b z(muD~uVZg(NW*2Fktzl6g$^cuFG;>3HYinCQBPK@x67Zmi#Q=C!ye$B%puSfIszw% z3pn>%q?PdpbEk+?BF5v<{xaZh_b{HLp6>V1n}-=<8nWh`8fQWu(3#x;7woI5hZ`Q8 z!h44q=km8EcCCFVa|C_bwJmLy@Sm|Vy4IVBZ(laayx;WqwH+GHF2S6K*Z|%b!KEv6m&jN9tubX>zyl$%eN1yBXUfGeN z-+Eu88GAQ3^0DrOS&F{wB29Ls&b3*6;%g7+7~tvm+uW7%Abr*M!K6!n-MicDyt^2r zRb1jkioso=zxbraPB35CIizHLlUdfitp9+W|7CX;n>SIaIu*+4b5QP4@PLLz*{FO{ zPHz%Mxa7cNPT0QM-ay;R`^FNr#r*uX=f(*{A59PhHYj^kEEx3t# zdM9avg}uPS#%^fU-3zN??U!laz^|Zh?f8I9t8}~Cz6cn?Q*B?YwqxbA-e3`Q6$82=BV$1k7{&yIh)_dz}* zn#+c0{eADwF5Q_9ZKJtL`s|`Ic;(p(jNJz|%B^DU#d}KVvfZ4S-e3}* z-W$p>f33rSNFw_Vx>D=L1L%*=kx74k(&{ko^9s*3+Ia{aHVfHk{!w7ZM)UkwaL{<_ zn=i5bHWg*VOGa3~GY%d4dBrl$8sPgx*S3b|gP{$&l=X9^h5C%Au5r~`8{ex#+Vq=s zT(HM})oSTf@x?>NLK`w1legguOK@M?(jgV>zRP@cX7V5W2C+FbM<1?!C~c%m^e-O# zPoFj~oS&w@*}L-s$+=`)xWEhH71vR%7dt(7YJW0Z_ichc?uGEE2(6)$#(1Y2uGyJ0 z$3mY2Otp3`_Oom;lwx)}x^wr@Wo+r+*VB@t)@J_&d^VfMpqCXFn0uz4B)tWEqPtKg z$!^SLtX>-YuB6||CbLPh2LKHPpM0xl@YkNIKFbfyeqL5V?mx{9$GM_?3;| zc3a)yt`EZ=rKxatn%)P>{a3gTi>?yg@E?o~Wy|^xu6-zT7dHBN57Jx9XX0k*&k)L9 zNK8(&w*Em`cpK}R`A97N`>}McbnGkt!C3kkq{rvTnTz2`7d)v}YZq14)bn0jUHpmU zR&(Vfv-uULIp@Cwos7GgzHG+FupS@7J@w~FE9O_#3whp9;4^shVO3pkFqHVMIKfDLwwYJzv)~?|APD_p3=Fyi}bO; zpY8?e(lMzVZK{0f`i4uM;7zOsaP^&pP*ayp41cev#s$n``OWW(thgRj9VKo0LSg*GyMo} z74c3+iF5e0akhJF<0schKdo+99nD>dtIX0Jj~*?Us~dL^dzf-p;Fm?$4yRA(fVIr~ zle##|gD*(&mhv@8wl()mXN-zZC178agNFI_4aEow=e z=MBFO)4n6eJf!>jQpL6%GpRqe!dPS;;Cw2yWS_*{R#S}WE zxqP3lx-ReX@!jOtHH!LVURk71=9shf*|m;0L-_R8A&2et>rC5@d3??V2NxVNQ^|90 zYW(%EM{19;Poy!XeUK%cw{kYL?0-K@n^;wBHNFY>IoOk~+}zOklW#u7_)AP(hhF&} z{wI??AKh^7?%tj3@AXKInmdWrLYCKZkBVe{Y;i-wh4+_`cXgRDRc^2I{#c_@;{812 zYE>8V=JEELx29gE-W9IKC1cA7##eXD14a=Ti-56~_fy0>z0-F$b+>k{u`+NUWf#HQ z?^9nqZMC-CIkGc3=Z0Qk4ikSPzD|Iz7nRr4ucpnbO1Cz)cCM-GJsG$7wH?>Pw-#uo zI~GN=PRd*dPgg-#@$KOg@YA@D*AkRnQszw-XjKJ<-aHG&>dHm}KkXew-bSO|-FbM| z)!vxtwKob?-ctka++sWX6zz>-z8(IxH~zZ(k=kHrI5V2BS+`6K^>Sp3&tYDIzV)j^ zT`Kt`#`av|U3(sRAZ<$jnKtwIf8u`TywDG&wapyt5^Iwbuplk_-_np@5V zk4#bg*IfYSP4Ka>h4a5rUSobw2t#S#jKPTNV_StVY<=6Gf%P`UaFTr_+w`1J?xHr; zpX(`?pv?(uWtHg36hZ$9?ja{+DC|4+pFacK<466tZ;9u(b=5!O39R&Q;L zCktZfgPhO#_8!>r9>XxOAxbu?F$zu;dyT}HK-&->10N(L@fB(MDan#j4T_s{{ zWh2Jr@7#Xc`^Zx38R2iy`=}UBK|AyPSouELn?SjEy>YR6AB@rF;8^0mWLYF!Wx78 z9{0PzM#U~|Eg$`^H_G`DjjM~Z>ztLdv3YOMk3(r^thoXCGH*8LZ(wb+Q0FjP%jFS# zKg-6_Tw&!ixTEa)(^<1{2dMH?&Y``)U$ds6w$-jF@K4(X^#lBCr#d^A&@c7zUG!0F z=O1%^WnXsd9(=Q1_oU5Lzu^Bn=)fn_=KK7oLRj~3pXnw1j^smU97ShKFUjF`5ABHZ z+G6~>{jbO-Yep<$bWw2M+=^KK&E&fsrjN&!7>_rQ7u9KtrC(2aCpubp*GZ3#3iCSg zt*(sKnWpp{|KT{P49pADzcc{e=R*7h@A6ok%VT(dCYHV`hTA7&=`Y68FC;yZ*E~At zu?qJAmKdj<$PDr&8bv-I$*ay0tTXF(B!;sWn7gm87!Q1O_0jn?^>x@Kvg@$7{E5un znk!`gO8(>%{Th3(^4FNsn#OJKb9QR}z)rFIgLkk$m;;9Ve>q#1wM@mFfCo^!cNmCuxW zA9L%?^uxS)kZnWx{qrX}wa*8;fVQT5w_gq~i}54_jidG@7hfbVKEIj)`RAk0;`x95 z3v(HGNBetyeZY6FJ~*WU>(w*;CGf6Qyk)#up1GTBf0|w1y`ewyK_v3ksVVn<+o$?p zH0H!(%_n+~syZtH96sB0uTIogyM{cyzpn?;*Yg;Q(HK~!Hba^HIk?8hKuvkMAYXF( zDEU!6bVKbsLl5vjb+F^thMwV%7(LrRI|l240kGZ)^91MKI2>#y*{oX6euqA3%u8;L zieSZbz?)GTbH&$VFkXnI-x5oIK9+uCEd9w?dS@*C)L42)Ed2{Hd1xm+7wR0@a;pl` zi}K0g)3I~Dd=@5K7?Hv+(N!CU-!<|}V873>zUD9G>!FB^d(6d-QkwVF;|5!5<~XROOcld?~zw|&F3GWKk>1% zc)-}0I)Ik)`N!!f*^><=|5rqh1Awh`V!ifBgA1TR>8^enoK^0Z?@_cD$?xT{^kFeP zbr0EM(SxxbAIF1Yb+3%cgNdbI5ret=&;4zk87u!*EdBgg`WvK2KA3sP)g`R^Oo=$G zLciZdetT10-*6J^U(2!XZSmGOMt7~}%=c!_A8*BHh&~>F1M9%=Vx!8gUXYv!|0cQk zzonbY+|N*1S|80l4ap*O%&%+=MHWog(nhm~-@gl=e6?EpFMI{2x~h@tT-C^SB0r=J zB`-z!q*tvy!Fp>7vY5gjGRWXRs}kc&8#mLRl;9y_k}s`S6VAqZnzSlLUZd$7{FZFzvGZK|hm+LG6^NEGs(j}Q+W6{|m&bmpB}1VcVby|EL?s_qiJletTB%KF^O-RFR7=dk=H zXfHWo?<-%$+(7pfMYBb|VjZ2pr~81$bTUVMbW5h%ULW1rUcZyQv^MtnG>?gH@_$>O z6uu(PI5u{fM{1ED>S61LTG0snRI9OPIdg4kN&6u!qc!_k&}!2+`)SpP(cpj>4PK3< zH^tJsW9d#TeMc<)?LYO8{nujYe~P8|#M0j+JwET&!#vHEI|k(Wu{;jj?p5!VSl)}V zwsywadN!8+dMy3PSo-f`>5q~AjjXXTQe%mQ(w)?|@jK$b_Bb-t&c<)*j&DlDAT>VZ z&sh88SDVYcH_}O5sm=+*|My?>;{!ji<;MqR^HsKhBm?XRI}4fP%;e-O#f5Tzw>f93 z!yFl%$7m|7CcnkL5ir_izr+2Bf5f(>UJKvT$h2TLZ~I=i?tV$(-oT|3qHW-uKimZ?}l#b&u@-_=fu0Gj^!N3aQHwQ zDxYF*nmX>s2fp`&A0L=>&-9oFHtP(Tcuvg1WQUkseUJLWox~>MxzhW-hkHxvFEE(L z6o&^-*=NKSsZE$1&>nr5-L#{Bzeh0SpBZ&o|30DKoCc>?TR1QOo|Rfl2W6u5lYAUc zgnG=DD^{2*vFXrZ%~ppICk!nBN8Z!mAvwo3t9O>|ec^snd9A0nJnkJA z;HQ17l-t>OA9eM1kps_;QT;srBIvBL74d{Sl-QH1ea$=_K6@zrl-(KaE4tt$JfKaF>i=+s z*?I7QGV(~Kjc*Cd z|BCV(IK!ek^3`B}g6?zd7sxg#`o9EDa~0c!I*RkT5uD!Y4)7i<_?Ab{*|LvQRyNZN z`ouumX*$U-(m%%F5_F;9GT!{dfcrk^;^h8JY}%~4?rCJqcjR(w>$%UdPe2)+qqnlQ ztIfLu7M}+*w}+ql{{PiKXVd;EtXKX?yMHA=u=}dD|3q%aK&wGz=4n0;V|*V*U#DFr zzxY}Y+n`6X^c~4p1-p}S$XJi+j~=c#nYCUM`RD}7-P+K;^Lo*ea!&4fP~qOr9Q-Ql z{tczA^`2BduypoTHlvgMcwJ|7nxJ{wba+iU*^1R#$2W8ynRDF5Rp_lVYn#z~#{A0i z4jBR6#r`?)P^H+~eKE_aMHDX-hs8J2sr%9|8x;d_i(IU~H7} zzs1Lf^&(w;M;K3(DrxLuua<7)^Gv;GlWk@%KLvfy5)bG7YqDd}yUewgB41ao!_W)7I*;|Ieu&Um$squyZn>%f-- ziO+uR!T^W&tKXd!yT(f4FKHpqyvY8+!+$+v@OtSARVK7@i&ndr|+HYO`XSo z%Ma2f&wnH*T6a#*v&Jk!cg~_D3dGZ<1z+(hdi!li!@g->$~7L84(*f-b#kAN?zQJ^ z8#tcem=5*Bfe!Z$Wu66=+LnBMk@4J1nfFmme?5JHc{zmXy;JfyYLc!~o71<3f@GXm7GTPjjCO+Sh{Ea0o!&+aq z9_LqEcTp!rTba_*`aJrRr@uMkIEIg#xw@yu|-{c45dP@g}X`S*}S#h+1rT;%B^CK z-%g*}h&8Sdv{Uhha$dhFx6OAdr?YQF+yT5=np^6xbH09QVgCh9AuL1RjB4=u$tjXS72Ge5@God>RKh-)3aeNBB*?lk5y`dS&%`1xG= zT?zVkEIb`d`bC{%*dra&DAMkd!jMMESl@Dw>8kpIS16BZsN9^=n8w@!<)D%JC!E`I z#Ev32(mAr*n@iy12KbByANAwYF+5c6Zg2?VfrGahrMFmwwyGWJ{=sgbv!+Aid8O0p z!a;3{)(gyyI~4D}sA~*9hl2MqW2V$eUZq-F0&aIQmuk*&8Kcq>wNfBwhf+tjK})sL zIM0APV^^{^iGGZ=-|0hzG<3v8;hEGQa96hE{XFc3gyv_)f$WWwfi>^z#A{cHYotH2 z%MWm?{xzYVk`4~j{pXL+du!+-aGwHQrNh9#N9~FymfoF18eb+3_U!Fr8tRMsaUo@z zi6?b(e}eY2>Mk_mP9NYF^$yXDXPGzE2DGv^Oo_Gw9PODV_`009o73u9^3NNOKfD;w zh&7L;qiopYfR*A?cX37OI%A?Z%l)-m3fpZ8+l}<~9N^3h=M3rB9JI7!A3IUHZ3y`p z;=Oe)&+0sPWqr%I^^CLmx?`&K#&Jva-RfA4Kkd1T9(k90$jyzfohe>`FLnmH6JJEN-xl_4)H>7*1`w;pf|oI8FDHW zYkw#hq=~CJv4|~5qwl|OLHNl4m=*ttE z*)#io+Khf@ckhNhyL-RMe=GmH*zf-+^ZS&GcK3epd}7;~|NphLyZ6_8RyL;he%FX` zh0gMwJo{?AVKQVpscOPeWsAv|)Y?#|}1C z*2VsE%vDy_CRN%J;?W+nX~!P+nSyu8h^6Lzvb*m|_r<}=Cb4&9Lo;N-+9mRfr%Fzb zVnOra{+v^k|2jAuD*di{lI`(fnq>3}wU6DGa-Zscfw_US8N(X=wu}5&d1%q@LX$~L zktN;cte6MYhEx z%BhWRbVRCvonqdyFnQB}z1wFSg!wX7- zre*kKz(r-#A-+ExfUk6hjf+M%8Z(D}RO+fX54z3yGctb0QLl~Nx~6^!@;ME;OI8@) zC%{km?wFi!JZMtp->W)$v(MyiAPxw-CD)mrQXay+*EX&e@-Ftb=icZR9)|rv^@Uim zQKoY}JJ=)8X^8M`MSUZnkq$0^T@=STmyG^8F6p)8Kp;ZLRgjngpNVN!#sCf#3M4@SRNZh`Hi>GvDQVYJ;cR+eta? z(4$m0q$`e3b>(MA7O_pf)-YAcLec*gc22qL%~y4QUAB`m)3Rf+8MAK1(-~zJ=d3TD zZgiP>cHLMgtZ8&2&#C-cVBgSfcJZz`M$#SOfBbtDs=dJfF8-%82)E+w-5P%Ccl6(! z1QY(R=08f?>r?gle|sb%5=#zoGVLC(PsDfj@sQ zHP;_uy>mUZSNm$SgEkZFwOE)guv6$6x9Y!tNTKeg7u1%eSl_4N8TW$SHuelZCAh3V zj!FbLe~J1!XA{Tc^C{i?fu5l3IPhnlfHdKGE~)gE9*__>@>zm6L_ibns){JBgmodW!7AYebAxr zy_d5;p1&1&oaJG&`4!^NjZ?way|C^&jGg>5jgf~odHLZ>Ih!8b`SeQo22C4wt~q3w z-^=^w_vAWjYNv;Ld}mu8_RBx#jkyDdSVAisymg;{x{>6~djr1;1@LLE)RL)6Es?9# zx(w^9%w>M>dxrTxF(xTL6u)`@_xc7jOin>>4dm(19rL(kk4BKaryJLU2lv3V-o}}Q zl>oQpSZ7LAZ{BiZu5P{=8QIQ6fR$;{%@0}Vfp}aV(z#1* z!9U?qFth7F0+$ED<(mBL`k}!50r{fiznIxeZ*ynW&q0s(GM}&H`xp3r4`qelp5wPP zJa^Wiw<33X7jwPC+X;s!e3!Q)+H%Xh@8#CjhPj40a+$Z)Y_Cl{l(`n!UqAY(?p}8r z<8pyN4nBYX5PXbfY>EJ zw3VES{^vpC+w!y5pKli4a4Ym}<8Dy#1eH29C+CbpSa6+ z@_&SLx%6iuU#KU*oB4f)^s3I=>y9q;sed|qJUj1)`^!-D^ z%oq>v7E&Iav2lEE*7}c{`8WJ7VLDzpa!cdEd@vR(6HHD0o)Ff1g?i#k8}u9}y3zhF z*QtAqrEcbtZKM}CW48=BJfF1L@bwzn@DkV^%zs_UrQR%H%_i-1@Rd&5PQ8?=`lph9 zTPQaR!TUB-0H@*&kAc&?7*1^yoSJwe-5^<&uD_H%|A;weuFBC*rAwDo#`o(HmHRm5 z9-`dnFgYR@( z_P6HTPK(1C)cG24OVD&2eY!cM@A-T$us4AI&Yw)3Ex;9Bu#ex1+)gb|4rv|YyKcgG zD|hOf@cjUA*JdWxy3()X% zzMpec)n8O*ZbiSkl>MGt_1hAqsVbj@&qt1_)Q=o8Uu)kPlIbIdu+};U8Fy!S20BQ_ zb;dG~Z{B>4$#+8XU1(qbAhwMBWKMor{Xy3IAEkZCx8t@m2JE_k`L(LMnW*?uc8nCvN~%$vx|LOevg-&XLyf*Sh^Q-vL91*_u#ggdnNmt zxre)*+=RMQez@D6$o@3+JZSuHyG?mhBT-&iPr5(rcFfPZbrv+G^}Siu5bPA|L-{1e zSw87E=NV~7<$elXSqr{$7(Dw~7yN-Yw>W0<7Un+HWpC7<3r!}KnHLIy&uY(de54iV zL%#e6Z#hAXShOGUi7mZ5PXJyQu!HuCJ`M-lUeZm~$IoPbDj!-;!Z(x zhUUHu@~yM20lf-d%F=E{<1y+BSA1|i7iz3xf68XSch*sGhS@x&okf3-Hwp6R*DgX{ zROcJu^l8SZ=FRhv6XC`@UDw>X99$xJCjc*rys11eFnQ+>UrjyXA~;RtxA)Nrdbq0^ zV_f*gJz$a##|_dZ?IZ1YH-T=LUz-n3f`2w}PX!l~3+v*)HMvz5KlMTF9pN_lpGl-_ zz46pbd+gJnt3COuf9X-^Mebm~^vj06EcRa`-yt5n37Vz~Cw2bySbUVGy6!Ff@OQZQ zn6a1S2Ys-}U5t6`qtY|A!{PI?E0eX)m*0Ff#rR%H`W@wJZEOCSn&xW8e(QKA;SVK` zG2vPMpaZo3N?-h0q6K>^;=RLuujo;s@9xAyJ>zbp+@?l_byt+fyP1}+k^GztFZ1iU z(*nMHWdL6eiSb2z{vv!1Z0uQ{_;(Qb`=X!VG(o>x{XJ8`L}vwL56m)UVp=6=E?yh$Yi|tEvbOdnX>g}w?w|9HB_vSdeugq?$91W!P*Kf zoUd+zXVN>-n_XjbrqP}!ZW8~c-fen`HwO-+%qM=UwPB^9`?%v}&kM^wO*yT}xc{@~ zt=ikY+v{)lE@7vq%e>uNNxt2i`qA6HmwaS*@7FJ2kHFp2d-!#GdcU$}Pw%0R?dk3K z*`D5Gp4`)0c=7Gt$xrR+o&NIMy`TTZp5FFfzujB@GWUq{@8DmREJk|-hnr%(%8bWPYGr`?sj=5ZbLJLDKKpXM_tnB<&^%*fbnjA(4foX5;8SZe z@5VeE?q4H=e#W$oP@P9ebG&)=6n9<<*1M5aeCG9`<2Q6Cb|Eha{UrWx;h%F?rpP~I z9DkVUxGsc8Oi|BN(wLk(t`5^Oq+MmdFSKxJYjiVdNy@PY*AeW`2!H$Lc9y#-UeTDb z@3rO^)_2SPwKuNAZ+APqU%=eU9yR^YKE-{+N(%p9oJ35sL! zan3uv-#Mc{CQbLwjrE2TBWYy_T7GT6wA8-M|4hrT{R1sk$I{XjEf?56PY^AMAxey2 z2QAzBXxTo1mIv>Hme75_v{Ve2Xf9eZ82*u#Infdyja^gp(~@Re?VeOl^yGXd=USVG zf2~_GSAw3)=Iri$_o=&kx1G4V_Xi)|-MjtodwPG&|1-UNdN1U6B#(yisru+o_UYE` zNV(P8V(!=0_#|&QYtx!|W6)~@)1v)`Q@9_p+y8JGRc`*5>z-?hL&MT_F z(8gFE&8PHd64=OSma}<1kJxyLwcfm!!?yF=N3munUTon=?Va2?YD;6a(Y?jUnbYRY z-bXp@zqE&NW+)&1bJbzsMgL9|{`sdy1>|7!Vb~C6pMF7@KAZHH=IAU3`)}n9z&Oj&E|Xiy{03jba(eUG<@}-Ywbm4mrV;0{ zuR3$XIx_~;X|52{(20+W7z*hM#SUJAeI&eP&!Tf%OJN=lE(P+?DSp5g_JYqPe zd{~uLO!Y6oPmSfGze(_*vtlJTD<-x|dPQ-6=e}yoS5F<0GUwJ9SCg?J>VuJQ>o=v& zCiv3aDWB+aPqC{`uIm-?aL3T0zR zC>y3&v9h844#|Sxs;*#vDU^r7$OH84v9aHR%Nrg`_??Z>CmEwp7W(L}(joiFA3}ak z^xIENB)TY zXf^_gAH_#Oh?=nl5J2~BZU-TT;pfVT>dHqg$3HHyXP*!C)Z&e?Q zt(weJPWi--IJuQ!-qN~s-#tsZBiLO=yJhq`GA;cs+r`GWW4CHw z+A&RatD9*1y~w5Ab0}#2P-afiTTrHTD`WMg^?_~;^dhlWoy=pDW9&Io*w={4e}l1- zFbDZ|%#r7qsy{Kz*S$;ZFJx1(`~>-9!+h<9ZzMkx%TJQu66R~~eGU1!SbmE9<}hD- z>dVP@$sb&*I>%WZgD;AOy-}B(4owCtCw1@pq}tny+6GZ^i6r6 zrH*h{olICqIHSv2KS>?+MRj(Bb(D7@b<`i#QGBP`Q9o3NIS9KcsPpTvj`9{!M}369 z#7k_%r=oq~pxjeoIra5a%Bim^*AkXPA6XoJ7M9bQj)KaCG1g;Cnu{0Lu+`|x%DUEB z*znAWYcYivV=rkQr}qv_+{&ZKRAYs_Rw zd&Ash<8a=v_Y?A-au>Q87%!V!cDzjNQQDS8=5u!6+Jl~b=OwlS>#fhF&BqzzEo;-} zmd~V(`|R%C4F5&GBmX9fLDD@6cHR$Z@EPd5q=?N!Tk@@QmjgVm`ky2{!*|Jq@;=7z zW`2v``h5YvbNm)x_4_0IZsE81r{8DsJH>g$(WH&#J}Jc)eH>f(zBTE-xG9}onJau; z`SxVD>b}h2ZZplXnuC|P)~A3iDLebt#&NP0?HOX(HQrwLkz5ZQ+)L3P;hi;Cm(W${ z3)zb(h6kMDZ2V=+C(tsTSF8#?Y~Y>%ObjdRQRa3-9{3pSzt`hp`};>b={JiAjmv7V>^vxtnilo1Z)ppEhD z(|-h9+E|A)v;P1L-&7g%!`F8^yOw%o1`2-mzJK%G@VjS!3_a2AeD3_iLzgbiEpbvD`rzguN%BLqy zzI0uJPljAiiq9wTFL1U4SzYU8ZEQ4g9LP|n%GhGRhOv(hJr}rIk8$3tTB$3~ArBOK z4giLJuUfaIp*bt-{;FH=kQTY~l?j~jW_s48QeLLIv0)0!iKPni=aSD@tZ6?+@p&Ib zMw@8|UD{#gjj@?BYwOwZovf!T-ZT&<&)~l|7xB*=V%quN!2ccm-^c%h`X{dH5k6nw zKUG*)b9ndWx7cU0on;GXzh7xiA?0CFjIRcBXgztnHu=28CBHg8dlv8eJB zp4|KQ4V*WkZeRW~?#Yh-ey`qfNEO<=E~OXdct7G(I7q*0Y_4hR2IUNz>TB zm$Ead?aH=U zE`2y^kSPwILZ6*Nfwjq7d&N*|j}KUzCqf4&x1v6t-=fbXK4Db$vwY%Dwf4gx=pvZ= zgF*Qo$w=!X&38lX!;Oy>qE?~qU~|4sg52?opQlD2wgsbJ21sV#=4!$n_T?Bo22LE z4}GlLF~598bN8TP)xqb4oSiA%kQI$I_kZ>6!0tMbSSM4e)>egU`Lt8sHYdZnhfmv%>;Sc~By0nr%g&^YLBLTPK5azwqK$u` zS7Wa>`st-MqP`@;HckuMc$_x4ySc9)kB9vbPi!0D7VpOsd$qCeek8*-c=pJSoqK3w zPOKmI{F8p%yH^|g=ts$(le2c~!C@PlXk%`yADd$RKxaQL8b-XhxexwNyWLB{T zRsd%naJVb9zKHV~d+-rTz9+G#vIkpnPIx|pdrccNXG>;+dzMq8Q>ijS?W{(wCoUkb z+%L<~dmLBO_Hoch<(o{S`?w0RTENoUY6_o0JTEqC2D&Z5Y7_d2p`F-#!HjS{dO@J(<;E|3~&j~ZJce_9huT9+owhQsQ)3= z?+kF5-pM_av@KlZ$I@CRQ(A57p0tm;GeSCu1~bd?x@XySRi>~KpAB*Ts!Jb*cM7}* zf%lQr)tDTF@1)C!2W!38Cg`mL={9T!SDyUVICxRTJDcEPzxeLtsG z>6G1CNl3p1`jOZcexvLCi$eH}|KY&tnG*i~Iqg|pxR3K0k1gcxwu{onUCBEBjXk~j zrF<@D--Q1qD>#pt#~-~c&57!?x$-^u`P=CoWtx{zhR=h>I6VjQANi~`$LYSnBz?Db zdwxSBm509hYwLRZ2))l7K$`j*yD#vY;BnE%u+PIdd7albozmCX^w3merh*I)E(-2i z&Z1vfPm>Nj=Keo44x|62>MIN=}PtoSY7w8*9^_;s1z<@aMwf(DD!myWcv+^g`;rRpg^OS)`_3Djf$ zYE5{uLH=&@R3}3n8`ljS$7%B_In{mom3Uppn_W-1RUeo^>w9bzjZ15sGR3`Mfo@(EiTx9b&dTLbQeAmY+^97IS-iub`$3knkrrN zYts&m(**H#$UAUpkM+IoF`(a1ahhBGA3n%n3o-UmF5@tdt@j7y4S(to_^a_{I8S1V zD>xg?Sw-$m7Hy6BLhWMY!jx8cgU!iabpChx3+N2d)t+_t`tEQzg)yP^S|s!K{xja_ znugp*GH=f%BJ+~{581UMcV)(9p1ayo;k!QF3!I%-l=|1ErX-p|k20SH=3C|m*&D>- zXninQeUyHg+ZvpK)mk#LF$dNizMr}uh}CUDPqgF$-4wjnVDDbDZ$f>3GH12ElJ4^n zSUt^HG~l%x7!L6O(00b}o{yhHLJq*`Jm8+U54g%xf98-MoE_8pnX%hk^&fy1d0R*E zpNwm*8CFGRB?a#=NBbkT3izKF~v)p7-DO`hU>Y@O`v3Y!LV0gm>BNZ0p@^(1-p+epSue3EnB$w2gZ~u$3p~ZfWo)BYfgb zLHf*C-h45#(5!J&3dYVt>DxT}Bjs86d7C^_T;`oOHHZ^9o3*R<9<_eO?#(8R2fZ2B z$kgGg2Op0r1IOO+R;t_fA~?iu>E5KM4#xcCoO_FG89!~7dD2OVk1wb*%O*6==D{oM_+D5!GZJsqXOM2ASBPI3#btgw|1^UL0yH-yXN@ja*>`XV_LWbS^`8&br66dE=2S z-Lr%bZ-%S&DCHH4S53{YdF=nHuTAKlDsvudVq{tG!f{s{__3$K8c}nO>8TQ{r2OIJ z->rRY;19pNT65inbyGHoxMC5ubhn`I-TcSuZkhAxD311%wlpq#FMDOjRObi2W$r4c zOp-ki$L*{szSk+K58Ov3xWI5NUk!d}%Gx<2C_6_o10STP^22&x&8^it_QZtfJAM?m zQmIb@M&@(vUZ%LRp6U$f*3AB~>PhCB@lEUO@s?p5Yd4Nt4qfe;YKBhH-sg~}jP#Xs zts`DRgTHWgTKlx}7v-w=z*Axi@fSfS@Hw|OxKOQOw*Uh_6L3gx{cp-EUHZF)^BYm# zqt*@!?v9X8LAW&Avm!+wKaFjBR~g+6pVYny@7?=P=4auhJSUcS5ABSuU^{a!`$Tv( zI$S5K&$%VMb-qC~1*g(lPjWsCob~pRQw(s}GHgR_%$dT7Om6w8Tygmb>Jc}wT=oloyOZi^DJZ*Rz57R`mVJvoGn%{}zri`mV>9>; zo1+jL;jJmmRP22MckO#Cv=fz^r^y9jV&OOg8iT73*1lzV zZC@uJ&3w+Xo@8MydBA6<_)%_&w zV_>{6&9*OoaINe#%wA_2vgO`}_GzpO>$}gZdsFC5`a32OY;MS{#s?{8ee}+O5qa*8 zJdtk{T%HboyeD@ERNn7Dowe@{{+cgM2d@$jU;b1&DDppGO*_l`y^iB+<~{}g|I z463|7;2t#DioPMv`fl3|%$0;?`D@}<-sX&bLDwzV;SPo|Dn77cK9MM4Z{)Ao8}TNx zw+wj!F-LlsH#~|vHs)mVk-bq~&zV=*Te~lJF(>2pm4p{wTN%wf5AzM(X>3d}aU0)W z@V@=l>e`R-Y04>;i-uWbLaq`yp!My%*LONIneVa>M7Y_zn8_s=`=F#-$Te1_8P`=A z_tE~^|CBe(ug7d*#WueDhm?s{+(&u;^gXvPYOsIOzSJ;y7JoE@IoK}l0>&qix3LGU zEmdHi1RZ7D7k9VrUan>fXLJYI;MhrL5L-`E|6nbTej&S0WL^K5`lV{3Jz2Tsf@_gy zajbtVjrs>;3L6Wj-dqhW+_G)ph6P=3g@yIl;c{qq8u`V#d{-}XN6hV(SZjetY*Kpk zIvKf5|JYV1e*Ho4we*6b?S3&|E_u~X(wqFbAD7A=yiT*s)zDHa4@q1egGR(plQ{ zs3$i4{;04|KHIi2cV47|g|t;hI&S)gtxFDB(j9_}w3lg0I$%B_Gn+OG$pvi=R@uN^ z{i%G8{cVe?Zvw*#lllyMu-Ka{9}Qf9{Of>Y{X;&xiayABW>_4L`TD>M2Wv=E1e3|` z-%|;0d8;nFtXf~Zwp!1=ZS7OcWEMa_(j9w^+gQWxyN|ss*PV~%kzH-KP2bN&{eba8 zQ*g7-mpQN5rSB6e_mA>p^tg6!uh%?0qg(0Jw--LWnwlRHKk0kYGcfV;RCZ*~_N*H> zlsmdxin*vu}Y{TY9;u0&2#3GxZrd_i>va@7(0)uyt!6E2MMZ`OTyU*fF$O!7#^ z8-wI&%1ht>-OFB`DvE0oPYs;0)7(3AaBC$SaogGx{tQ2$y&>zEpWz&-#sW6>Nw>^h zW=iOby#kq(>~U>Fa76bHB?mGv&T48XV>8ebPUWPmb5&ouNqgN`s{<~sM;}Kt_Qrl( z99d-;TlLm|B#xwd*1==S;r4Ac`1jzp`WAa8yp3oMC>4tjUqk0z_-^_|H7wJ28CxKK zU#jcY9=%tL&^ykTe>6^%T%5o@%c)~*oS3~!v`4BOl9QEOoG1;%iM@Pzl5)l-8S?)z ziVqid+Wi0He%b)7X2wI%uZ}~1|4FW-d_?{|RiHhH4Jo(AS}Sj=qs@HogbmwzFEbBi z+mOC#@50L}*BYEd-TGFzP5yK%`O^yg9^Zkq`O}JC^{sdgwQY>pRzHdn(vRxY(h;7@ z=25=U+SE6u#Uej{&F#Uw%>N_qMs4h8cWjII&uKkX)-_PmEP&M zwy$qtFLFaFFtTy&M-K93a;qvqp#2US_w%hZ{wvqkor2LC26-Mi27SH~l&-4; z*>S)hQ%z=}l?|3RH7+ok(;!Fh)E(P`)4jpXCEZJI_f}NL+Wj8@m^S*k*IG9bKP`#=$o^`eTXP@697l z)%GRl#^`yNcwfo8a_z%QupdqIwAWi>iny%?T}R?uHSe|#-;Le=hcQJXUE zB4$MSoNdE>Z==sFtKK|y6*vdRe%1!iTZ(fD5uC<;g4@<_sh!nVQW^RP@!hv?+&2ww zU->5Wd%m$y@xDoUsALS>JPq-#=G=qYZ6xj1%-p4HSl?l2;JI1)?kX{V6SX;Z%l(*l z_N>GAGIzqhm1r(9;I5qnXZJ$RnY1t$kf39yL^m^|^85wLH9k6TQp;u(6TCfiX6SE& zc=u&-Tk*L;@odJ38yO?=ck5@AKqHZq<+*^2VP2L77)V{D2YmH75&iM^a9`)#!L zt#{hxy3;{MxlGe%Uij$xkn$v&v8wrXa@i^CVq*^3QHFI%^t4*}bNlQkHto;1wT=ak zU0Z+_GT3z{&OBE3niu7a@VZxMOyo7N@X+j;o+lyx6+sreR&GCy~FYH?=OH4tw(2XI2vJoy<$5diG{M z(ARIr7JR!&#_ixU74=>2D8b*5$NsmAW7si1XN8qTXSsGnzJPwBXP;+kExcjfn(m;a z?*rVhDag|2+BrXUJ+SUsbePkuUc|eu-9p2Pg9(V`VWC`o?i16@dxy_}2l8lWpY7^h z7N!+%s^_*goAq_m%;K?g&4!f&I6eHy4t`!bf?qT$Z5E|xn)}DjFh8yVe~jSEial%C za~91vG-mk^t+|xFI3a1?n!=gM0{j)(|Fq9s^NqviZsl*fqxV!e#PavGorjt451nhq z?L5QWQ-j`t4*uJTh`;$e*PE{fv(3#r&oy6;!K-g;4tpZ-l3V*Hq`xNK-w=cIa%X{K z9hY@0>=a%bfEyOvvZ>9{6Aq8_CE@G>j(gA4P8D<``@~08)4&Z?oib&)sGYWaCn}Gx zRX;5lXsOLqS7r6om)?F%naA}bo|zk~HuC=D7~l0lr0e5(=Y5v9Av>2v9r4_E-9Q_A zetLL!RyJ*>@84+Dc9P{!*`v)rV|MeZtGCCn(6-B}OtWceINI9Hq0sEyCvEz<7fEwt z&3kkA8}0x4)PZ*W1@%SW>Ih!8I34Xx-RTpP=GafBBbz9pvvEh8`=`#ZdtY@1>WLW7 z(z)gc#!B`AycVN26r9)5!@z63UEjNYXi#0djzQgX(4*Ir@2_9jd#E|1Jz?5*n`XY; z-eC&B?6WiRB}dz3%VypDBTTBjT)iX0X`jE$vpt9DzU}b%h+$?IzOTJ*P5_K;Q=)afN%A`kxWgx_``ya++vQ{1 zUhlIUltF(+Ma_Eo^chEPU{?#^mt*Z5}$TTu)A(2mjr~ z_%7rsV7DdhOTE2E`%K?>7+4a9#6J~7F>%EQo#u~BfOW4MhjYDa} z2<~*?f^1;^H7Q|o<7b+G(nr$N)0kazoX>>8oJZh&(rI;O$;Cs=sb^0!8?TyS>azHf zc3^Vw0Z$J`_G@+R{oR)Av(@`}?_uxe`Q6~zYFI9t!@29}kh&D#;*9uchmXgHwwO`w z_hCxIgwT)!zmE}OrSKfePW`l3nj?#ddne(_^uU5c!;9Z#w+zRvwOvPQVK zm38vR$hu!lR^k36JpKz=cW~{~$en=8Cp4S(scG}<-m}abF*`a{{5pR(c}9ozaZjyK z8`5vl!H$;Zw(V)-R*8F#YRK9G{G?rJzXOIC^@lK5uAVxPc06nb`Ca78XAY{Tace5C zJz#5C1GnEt>-oU)MSXB&y4Lh;$vsJq5>ua+KY`ohp+Lz*|F+aT_$ejWtm&yZS%TFG0T$#T&g-9F|?Y zr20I|CV#`T>Wk(@+-J1f>O3RdWpEe4NxAxSQ#R_e zDdf?9k|%)4bVWQ0zn5S00=frkKYY`H$f`R?L{s!dZ=dmH^GAGWs^sJ=Y%8CqcfWXm zKK*?w5B2b%HO{Zac>Rjta#1^o*7{GREUmTsH<;{h)6M4(CT6!3d{x=ILDsPVJo--F|1! z1w6kS{Jgqw1@l(;-y!sccGmFgpw*Bou?LZPQ|w_o#qkG=({=Z(QrwoGupro187!Cr=$-Gbr1fu@kY9=2c0 z?=?MrL!x_YCQd9>B(L_5zodEja=Drf?qocN9R!z}9Or1Imkj!34fB-=@Ua(qYYA3u zT8GY4;7pavW~P;6+kSssOg8Dw$DuWBDrl8_1=>VB6D3lXq%6dwW$V==f-Gyz9s8A!N%~u0bVcbyF7b7eVr2=Um<+4DB8E! z@s)IZz4&kV67IqHW_=7_YaL%6?~h}A$u0-uIl%FifUiB_OZJs$9oH>e3(9AACnoLM zEqfbJ*(V&&-fkZFH#|RDgVqio!}EELXOH)$7|-|Dz*FydPQtUz+1y?O;z7u`A+hDg zCmP;8BHX<2aOQp{r-R228dw{;n|oKdf1&5U+M3K{`r?Ca)6L66*k{tEJWV-Z^zE5s zz?xikO;6@8`QBV>xms>v?S^l-tZP|UJ%D;an5%T~ztp*`@>S|CSSFpJ1l*Qbr0BgH<= zRGziRnlxfsE6PQ!Gdx9gX7^-5oM688nosyj+Jk= zyZ|S@vzZR_zcD`2LFE5$QTylNy?{N#k7D8f)PTEQa3$c%{J{_FzmIhggDwu{c8|e7 z9GZtW>5qBS1-q833vRi-GJMx*l~gw9QT#dv{2@8sD9*5b8rx7^oiSBCCe@mP_JABc z$~7cYg&cPRd?X689lJ+rgKYX}>WZeN#k}CUb0WO6;U~xm_Bfr1e9Xe2Cyg;fy+1(n ztut5?U``a<3E5wuwZnjU-TnixQFzgL6OG5Uwjx{}j=t6=88#g4pBy6_ z<#Bt(bS^!JN6qWY4!oU-AKAj)3#=_z8}oV`6RXcysZaHMnRKkYX|Q}wXg0SU)9|?ZCNEKlao>JI`%Fz3}v6mjod?ZQ|}J6wfxhpsoZm3pgumTA-jsR zLuJ#EThp^FT;9tc_EqZH7(7$mNKUrvAUhSm*K#(v0iS7eE&P|Y?c#v%>d4^>8N;t$ zQ0iSx8_Cb5it=5nyPJUHUckq64~_OQV((@&ysVn0@9iMR*ty-M1)Y`pkneq!R`jI! z+l>CU1W%LSa0VP)4V+bLM2FbSV{d|Iu)gz(cj)f`e9q*8-FMu!7y7!mOE!H|ug(~Z zrvD2@`*ZfC-GVH44!XXvhuWFGqV_9JVl14ejdGj|2d{9b9{UsLc>9pQN@E|XcFtn} z^S~Ybn|~}GO4KWC>#t;Yb-?{n#Ye%eu#zYgYfpkw-5RsJ|2KM(Wd@?V@`RxV{1d+p@~jwD>LiXM z!zSp@RoI8@IEro4y4h>6#E$OlxfH*^xe4Fn(4P370x@2NKw4kkX*uEn+< zje&hW`aj}}_c}MBcWVDsrm08$Dp6oeTqK{4^*Ob(+Tt73Q_R{z3{z}Su2EM4Heast zowN-dS?RoNc}0KBj~!z!+RERu?q%?|>_6b|i~j+CugCQ5`76cvOZ8ie&bh+JI8Ht$!TELam0mE1L=6=3|*O1lR#U-Y>UH|#E4fYU_hi}P(m;EczxtRqO|J~>>(msdM4tKXiYanI! zAN$+pyYD(rs(zv|nbl93K2s`JcZ$hvby#2yk#|?|4IXn?G`|<3vxoPXX@2JPC7rgo zdcO_pn3o|RZVk-(#%X3E>me6J?PU98-)zev4Cc$^NL+9&M7&+uD#eQr6j#5`?zP0z1IC#N|?;!9aR$KJoz)KH>d zk?%Is-?pxg<}|K~WO#~tO+0ISBm4<)+cUf148SA4e&l2jUrqehnxE|h)YCi9)5E}d zycbBPIm+|Gac9qwyQT5FclIn|y>ZcQGtJYiQQjZJ>*ZhK+Cq4hyGaix@owvyAvWmg ziRW7#jw1)HlRr&c|Ly8X_rfLon*Y2mM$_Z(1J2|3Yf167TlHP_&CkW^Dh~-u#t2XoShYFr|vXO&hb$xq%g}VRX{b2oRUt^+xT-1|aNfyi1 zJr1A`l<@_uwc=~Ldypd;ykPemX7N{wNt*W_Jv3poCWn8S-hnJ{(GPzw+LVjO#L_*c z*Eh5806ur$uE~r{&M=*02G-j)*5Grg)5U}MAiM4ePglq}L)fGp}Of9KsryQ1$;)|8^}hwJ^pm$vtAyVGH)m5n;F1QuL1v(gP*{C>f&jao$Acu#(CyE;18?; zzrn#DR5#SZ|7~QQsY}f^Uj}|+4fr2B_$#=FUHH9ETo3%tz`r=L-V~=KBEMHCSAR18 z2=gy&-pliltN&JgWS1?YhnOvi8D=>3ht;5WkApum2ra#Lwo8~bP18&}@QE7mUv}`L zk_{IA=Q|EHm0|PEalrrMs)4rh{H%8Hx1=I?1OD?Hk2cGI@2dfSwS)gKOj-PowKkbY zhD~{!eC=%68$*( z9(LJuvcGPW=#;fC7sXG>EO`_^cT-tcq>GWr8kTrZxqMAMN)Bb6_N46&6 z5|59@aCtty$2-Yj$H@iOoAAMc_wt@D-n%0GslBxZ73LUQDGvZ%{$23$$;fS_qgN@D z{1zv5H1-zEVaO*O($kcftkRR}KOf62JkECDyq}h>NDlwo$G!3R?JWMz>s@?lepYQg zhBo{Gcz?NPXnwd~ow;q_xn?;wCH+Z`haJtMQ<2a3_D*T@hvxa_On7dE_Vc?Y&0ope zF);0WKYtyz5l4d_h)!#bDS>DJkhq2P4*!BDmfl(Kf@-p&!+oI@PotKq4ahs za2M{|Xg=7F*j%Cv9Cu8ZVb;gwInUv`tFhVokfVsrqf;}?Z_&pu9ZvNh;gyeA5~D3U z)_p}k1+D?wl4YH%qj^|s6WC;0Hc39AHkQ3T7qbP$uq1egVV8s3S?6eMtjZBj!}Ufu zWLv~#eS>pKSXb&!hNbQ_`=MTRcE5}GDdAosxB*bCyL9evP$ zH22&b<5}|ujeE41>S4(!pDLBBAMBko3E(`>fs^C9M6?gGGM1bzeS|p@9tuwXw!M&F z{=?^(hqOd-Wy0_Vb7p$3xsSG3SOb2OlW|chiYtrQ2YTJGoOu@fvunV=>fo4V>R`@IP;gbi&7D&GF4gVq+bqr3U=H4t}m{_dVK@W_9y4<~Zh= zWDWS+fVcK9yT|?_+n-_H{ro^bm3`X1ob-oAVATJw7o9xzn~UnGzspWkw#@MmZ`;FT zw&UZEhyTAAEpI!2i1F(C$8TbA;!`xA=iOoKhmak2$c}kG7y915eO=$TPNdA+t+!X- z4;MPVYuW4fW3)YwvmJabd;LZY_(L6hEqncH4fu8kU&~&*YQT^8_-okfiW=~vf!}Jc zCEBvRTKfmO77k+OyUG7~iKdL_9?M!p@w(r~+`A^xV;_2?F{k zT_{)grB8TU47zw%9`K=xy^3Ssas1Y@$(ESR9{yejU&|(wHQ;Y^@U?96oht@>fXDv@ z2Vcu3H`jo_#^bMHldso+zX^pelw0|Z<{KwH$NG!s z`BLb6JX_D@i)Ogae3-U5(-d+L)>keztF;yct?O=Kev~uy*)ip+zRk_~^VJ)GWi55} zD=o|yw=ixURW_d|hY+0Ve4aXrC!(udXq@J*_^*CAlsO2&g}^NZPUWJ<`pzrc12>g@ z>Gx@#qEN1mQk;Q5;ToHJuGi!Hs>-P+-`ynK#eqJ4Jbb83_VqT;=Zs(uvIN~Rb8Rl8 z`<7MySTZ#Ga*pFD2QOSJUD+jk7jELP8ODwU9i3|^dx$wo(K>+mF8C`P{D?-l~sWU&G&88FP`0;%#?_+spZ;+8??q8|%uvoUg~qMpHIe zPJ4$obtUJE4pwb;9&*a&hEV5B^5}a;%rJKT5}qW(FOWgDCt1~oPrJHa-t!$F!lk({ z$>!HnB$xI8+dPW3v}}4ol{GT@Q`zhg>yMI0JjC-N*@XJ-?t&{gKTw2Eey6hV(vA*d zI7DB0cIW3fW2L?iObCqU`}4ftGWUs1PfTH-D&ObUcw#(#RxOrM6X=f(4QZp?rBc>Znbi@$#zIB+)j z-2;QofBr<$Og}tf-rHxsd6QiGNXhX9zRfmo@3%7FW&I@=`K$V{XxMABT6~|PHp@h^ z*SYo)+|9D-e5A|8z)0t7oSlo`qiNgsC4;Lwhu=lcC4TJ?){BOetE(ADj&?eme?@-T3s1L2as7q&8LTnp?xB zf>@l;_XJ;uhVZj?wmPiq=>8}E|5@()kPO1*V~TBy8nm9f^rJShx9G-lsWVlth}{*K z+o8P! zrEaeQzn8~fBbU0N27DIyt+`YlJ(WJ1OKtCP*2<+WtATTfgRhlKol^t8-}U2Kxzs5& z;QtD|e3G4ypsjp_uz5s*B!1}c3l_KoA<42 z9egdje($n@e&FH1=-_MF^`C0MFLUsk@6yHDK4C z=OqqjExTS{1Lq#xONx5l|xn|V9f+tCva|L#WW zdV5iSk?rZ;X65)=pZ0ma^6Q@uKnm{dy-nxdA+$-)8)Q&WHTo;glY|oVN|aC0}uK?45^Oqj#*EvR7!j*Sn9g zb`BW2eq5w^U)_nQzI_ff2m6H|iVxtuKR7kUm-h$Tqu&dm>1oBsQ;Br0``}dG9IrdH zpy6TEt`oWd>=dB)vYzI9)ZqB+O}S35?r$2_2u>12v6~`R@(9C z<$XHF=Z8*qFYk|IWt#gMJO+8&!AsspV!D~<-irr2?nhQVKM0KH&)cHiZ-EXIOV%4d zw%Ebt^XwO!BK;}n{KNqt&7H46S4R@ZLhf?h1=s{O@(z7EjZPI4he&T-=$H3li9V|S zUgUQE z>rL?xj(7N7AA|EeOo{R3>mC{NbDnlPa2FlKp4~k73O4UB zDoZTceY$ku+eA5cX~+@g>=-SNV=OrQIrKkiAD8>Kn?2i|1J{Cee7C*DT+_(jjuGt7 z=Z-+~X)8Y;i`$y_e?oQwA8h*Q3&TYkf7o{0{YLZ+FQ1m}z@yr19Pgw9@u0HBlxa*j z8XV84eRv+_>`pp)$UUp?YQ5oS&=oDlsO-O7#Cm4LkLt~c(Ww_5-l<+lz3;?yAGk8% ze^2?OgS*Ai5^sOw{gIO+S}D;YzUh5w$0N)iDU0@<&_5UKHeK)ezEZdDr9TWku8WO* zd|$9*ANoTk)nUi1?4#8Ex2TMH+sq~1Ix}6znmczkO_5Lj(6xCj|M2i7^f}fXf?{A^ zy+-4U#rpQ)#N8SHz03HI{pn1FmCf!=`D&6FApR7e`mq76yZYbWM*QzDUW6|4DKQ8x-SY(RL8@1!oTky zY9FjFAe!Hd(UcD3{LHR_`?whH5Zu~3YkA^X>%E@e zqhfGL%A{-ISN^Noa zj`il!)O7Pr^0b|5@cOWWUobRj;m_D9VE_Ca^DywOHQ?`e@aG57dhahsrOdqaJo6~< z=^F5NIQV|I=V$y5DH9})G*1HmPvrr~sW$WNvc|y=9h9SM{r!(I_*4+Z;ZET9-K)-Y z=*+y+&)IuTH}_1PyLCK%xx=}Zdl*D(@12L5*Ag?$w`q%)V>mqz=Q{ZFnxcJRZ)b*> zi(BTHM}YrD4fukCUs@OKgLKU` z@UwwWkym#g12MxO-KQ7!|Ftf3OKbw+hecFV@g`D4fp~9XC#TvUeA_a|fJA1R?LGy3n zcFVhyt)1P8%uQ9@$(mq(bJuIait68gyxd`H18}XMj3ftf?=X`9N%rpORwjsX? zyXdfDPLU2{Yg3Kw9cHO;KpWh4zC?MTw_#7uI9%~Ln!8y?^X225-Pf{>Hp*lhA6Iuc z<%4+|>gsHrS5~}*NA3A3O+AABw|r-1+*7x3=IAE+$=VRTC?>5Z57xZwhv@|-VQreT zT+HEKE%^)5!!c&u1$CACPFbn>*e2oG&h`b}?{iG+uF^Al_@3?x!X|#t8rqS;JiB*p zmh!5Cf7fB(P18Hh^bp&piLTD$aPAO$Uu1R%7q;K0@5$_7dAYGW9qV80PYSm1_bz|Z z{g*@&86uh;W+J$SbLauO!|>QD=dh*>9lwnH~fKE zhPFgFzT4F?r*9*}Jo8W29&sBy5t!d|mgPwL>7DYKbpT3-8jsjc;|6zja=pT;)ncts`CBBl6BUWzD^E#2RWq1X{g4O>|H*KtDKX! zYo}Tt)B5RN@UjZMDhI7x9p#{$wIx5Rmt&gGzA9}?xD;$OCS0`!&7=r?Cey8RmB zUy8_#ZS7MZ<$BoR#B^QsjQvdvX&?E}In?JKEb~MD%kKR92KbSkD4)Dr&)^D5WwQr7 zrg+|s|MMMgVxwIL=m*aO4o^QhxA5<$-~8A3_pi$DcoxnZ9L@wd(M6x)nE0@BH}K(q z+p(albB(JLQinK&j}I)3Qp;R(xhqTBvdDjF+%I~{*)lxuHm1o)#w&MX@0%+dnr<aN##`EzeUmld2Pu2nl__ue5IRQ;XI~>YU)d>I1D5@X&h1UjX5O&5=7%v{Z@V(p z4TH5_A4o8V;p};9n}0Qi z^S6$s@Xw%*w~LM3MzDfEKd<2TImYJ|J{Unhi@)OB`0Cf(bDOojykWdz$3DvI*V*{C zriXZI<7T(=l%izBzn99@*Z7^zMzQh{!SWooNA0TmXKHNZ-bpvg#Wzt0S{C2)vHHU8 zaakHA?mpt3;;yAvE?1w7!Q2+3=l5OAt3hx73;Q=;%3Or_@?#mzv&z>!E&O@#7nC>Y z8-^d93y6>JHMUoq?AEM35_x_9b@l!9eSH^EYm~xyKYjNTPKRn^%_FMsYAz%}`95EO zCiEoJo;(ZYH?g+-NzATJ;yvf{ZN036J&t#(BOD7=uNdJP?aJ3VT+;WqVz`F#d@6HC zvf-(zvY9Ekd?fea@%ROd$9JqT9uJXyV6W^E)`t8!JNYTKU#gw6=lFV`r`~dAy^Sf? zoHgBKn-=Jf{5FkwrR)6u-19{}Y{Z;OTUgxal74S(P-Py^@H4nSgx~))v5C3du=~M@ zb4*uaS>>K!r9F=tmP)i?G=|k!?`XAc0o@cOWAxth_arvq?NWY9W9jL@Ke_u*Q<&6Z zj-1#raBdMChX_};T+ODUaT*^);Vj3I=0nC9UqM!nOFEa`@9W@iu8YPbZ_>w>wB&%F zVQvIIzK>h*_25>%IgWb%d1s#gcW?Hgv@;g#LXL8|`o{R9%#%|G)<#D;I=^#wsy@e= znO=O;CiuD}#+UCOjSgOY!Jm6SCkE&B)K5Q3gk9Z1;qLC_(xBVR<#~H2*3ZWDiC&3k_#cepDCnlL*bKbPiSe*?t~b_3zV7d0bp5+SsOxRS`#jAbC0e7t zdN=*EADd!6YD&uSI)|Ll*LnMR{5G@UBDMjrodlDZyFQBsD*5KhR@HPO`NO={RCSV`F9a7No z&y_)W)`GM9dL{pU+?6J~(!3h&xi(#hbf`Y$B(|JmOeu`)j>PbfUY)GnvNY+>rNlP*yQNr3G_V=yuzcolzHeR&+p%cNz?7< zF!o(t89K~-$Ngq5%*K`1fsfOJpjCapzRI@*J0q&JKFRW&2Pld_vpLU+?`*o z0=pk@(t+ZZ>_Ys=mb9j|m&3tW`PaN(zaF~39+xmzat43@eUC81p}P?rdmTV+LTW7gLgYU0<8<~ZTtYf#YkK6Y9>v?jqVVr%Qw%)vHZDOJN z+(h}D`Ha)>3sv;D+V{CmwKuwxulIf6KzOtJB(N>|yZJsaKDIP*u1h#*Z#$lqUXQJh z<|PD+ZCAemdND4jhU#cZ)7OCj{HYK4rn?fGrV&gNU|={owp<`I6taxWQ%5a)M8D%=Iz;J&r$7 z*XzUcX@AduV$<(7ZM&wqJ^K_B;X1olQT~s%pQt-FE+j`%Uyu&8zV~-xxp`0^R>rS#iJ)HadLu;BFKg@XCkIa5N?R&w%lC?El$MZ2BfTIG*@$sq&Edd%rb? z{it>vU6wClKPGFET6OlvgwuG z*Yi4J|2W{YNAi4b z`+D=r(0S%}!2huZd?)ad+xs=2t8~X;{rK!JG5&m;FLm^U!@v6tb-iuL-eo7qV8R1g zYnRPxuB*o0EFTM!z|*&@-#-V~E@C2a;IH(LsP8SRK7JKR9n$6<<_t~)|Czw=#CKd49@4PY z#y`2oOmp4Tf%Tbpo&Op$G_t8DcN%6E44GxV0KHGup!Wv{KQ!js!lm>QE_Nd_1pA?lXht~e{OFwYj>Y#c5R=x z)i}BY4b+?OtX+R&%SfP2fetAS3CIc4T_elg@|^I70us{wzZ zgFhn?^^1qcq|5~FS2-Q{U)O*?!@;jfM);dDN$%}A+RO+3nHumXIQZS&9YU#{L-Sp} zp|Bh9kJW%b+QAo+k)3ZhGHD(Izs_twR0Dpxga2g8oihi1XfVsH0RA4}Po3Ck-kv(l z?!&!!pQG(Q+=r$P`2PJ|{acz+wqN~u`;?hIY_@4*ZR+M4^mcdfdku@^zn3}1*~5-D zCj!5+27H@?AL;J98Aq%r(tcL}e?<-WVGjOG=cjhqF=74?PB$xnKfeZi!oe?fv1{lK z39~wVl=(dHr`CY~hx7mATzfQ+N}99T2eJhC#Wmpj9Q>-fs6Bqjy+uXNt*evZHI06W~oUP9Sn0Q{+}xp%kMTRU7z{-JTsf$anO z>T>l@bM12R=q7U){`M;LyhSl|P(NSc;NKh)`R5&ma}H?OJaaGbZ`6Rl+`*si=E5K4 zUW*$VW|?mQ|8foZa~=F>H%GmBcwp|t-`)=VFKWOS9DGwtq~C|eq|J`xP!|Kgt_J)< z2jAuJe_^|{*{N>2IUIPKGYpbzQu+`n&voCQ1IQ@lHv;7q|>#3}^{{3+0V@-gn_s%LC=g!Bzgnm>hn7brixiK&g2>9Ohm`SS~$k57k<#+r@T`7_hr9=&yS2b;9qU^a~Hd7 zjX5U7+C^hV>GE+0Uu(=UrUv|X9ek}ZN2Uh6&oOI_Ia0uH9dmR=w&cegp1)fiy;@_A zznwkM-#z?w4!+iyV^aM z_LI!a`I?q6s8^vPJ+`L3R1OEPz}kILXz{O<$rZNsDc1y*dw{$=Q#>fnzYHq0~;>vxIa zI?#(W5^v}j+M2#dJDO) z@gC%TAy(Gp${M(f^2fB*hFF>AzJx!`9Mp@{`)RD~eW#CShBlivY(rzgd)uNr1v|KV zMm+u1!HgJIXHFr$Jrb+0xi70{&bo%+{#LBa&V50*mwfvcB@T92R=DD&zu!2 zyT_H?z}=s_P?^M9xS+v&4xA^(%5HUKy96y}LO9PXjFs8BFXVePXfl)7J9bp8 zY^AGr3uWWz*Rv?QZ7lm#y13Vu{a)htDhG3AOVa!rc@K-#ztokDO{dK75*Jr8zD*MZbd1kKXCz&uM<`##<@Y=aA@JwUOJC?Fktn4sXuP@PL zvb2d^Z*VjZbY*k7tMrTL-Hrua*}ksq#i5O69(wsv>MB1Hm#Kd(679mk@B18n_1XUuesC)Hk)J%+(UZ?v3*39k`*|-P{%gvF zubuzu%O8p7pI1CT&X9+1jpsM${)~JSldk7k{AL{uwL>Q~Y%C{l|CIINi?Fv~;w^e;_~$i$=8KPFk5CnvFQ@lQpCG{e;qpU7<`YT>tpb#P7kX8 z6<6kC@QJaqTV0ut!FDVNe$`v!%6tqyih4Hg7Pwc{#oZMS#>d@`81AcFnUA}N#mX*q zWj^j67%RKLmHD{4Z>;Q0S0>$k!o62pKdI-u#$yW&n~&yPiH=!YTPuC(JnwU=2jBWF zTUhMysP6U-kJilA^ImPCTyP|9s#tC3FTt(zar%9P>bg8supZ|$hf}{34yX83%Fa$~b64;Z1RZ?;2HD69ufbk4 zhoQ0aywqJi>2{UhSh+dqdMhY14lZ={Oy<45gK#`~a0)n$Yo8#;@XtX>`NW!@;fxPP zwy*7}14ex(EP^wZ!~4GU5OrT3x54(;un_50>r2yOu#!<@?0c2VfG>om?JvL|1l-HJ zP#^qZF4Bi!C&lV-Ol+v$8mp^*Dw!t;ri*<%!`PFU2-pw997376JObIX&};`M=ZA`= z&TjSTAc)Qpb%IMe(w?uYh1b>dc<(C}#mW%j%dDmyF4RIcm&sNWg z@u;?!9{SIUWcG6k8VfBETt3n%>-}#v1SQt-e!%Me?LD2dN)>$vL;DG|uR!)7StXC! zYO}*99s07Pz~{#x$(^KU{l?9WNLQST z)4kZ7TMbiPqc-AO=)4=(w2-Yb{S5b`cF{h)keIWDIib;2t=m7b zX-EE_x#0c&h5VhFf4_e;e}1f@{YWW&6Q#(wLoycBPqZiL4!b8w@($UX7>i7zo67O6 z7OnZ`=^xB_a?+-n;*VxwTM{??WWcpsrD(QkgflqH*$~wn<=6*ldm@`e( zYsf~rACD}jx^pqYt-Ul`xSOEp&I^>n&WiN@@*eA=F%a9H}CaRdh(HcLxw+}KWIPTt3`BF zKu7RUUzC1s>yghAjd%A#KY8phM=sO+W^WEzwP#R%MEzxro%4v~znr{ZcD#^W`s=(u zE~GB?eHU>|rVARZeF4Y(ftAPVGRyrHZoMe3%io}9;5OfiZqmTZ=N$}A zFT2Ibw=aVXyk`gYAQ7(`c0=~&CsV<#$i0cbOWF6~=Se>jyNcqsqoTNVk>Xa4`?!(m zIvbyEtmKGQ9ej6jcDbu(Ua8CO)vbeP^|4m^jenNhidVJ|P_F)^*d$y_kU@S{@gX%m zs7RJ3VB{BMqbX?KhPRke7hTtNYkn%_;>F)TBioB&i*U&Q%V)|?q|29g zEhEhX#GTR`3ToQ9Qq+~=#Kwq96B;a)Ay180-yIUibLwlzHj+B zr1)}o3{G)~v$1o+Ssgb1NUuHywb!6=vv@}S1ingrJOfOMI*=R$t+VkrFI0kDu>TV5^W)-AYYo~LiFS?nQy?yVT>M!lJmuQ)=Uvr< z?|A$t07)b6ifa_#%eSgmT z{ge6ge$3x5#apbY> zxA&|%Xn7?^{RDD&y(iKan}lm$|E?TrJZ;Z=P87a?Soa!Z4{`yW0evjq9`C+3Fz)>p z`I5(>_Ez2Pn4|fr)-|a6Rrc%0>;8$l_jSDAe}eWQx^KBD7C)sNEXO!d?TEhjWKCRo z$ZXoOYw2A*sch7*>Wep4CWN>3?A3Z(Pj<;|J>ze>t)~I`>L_nScebs-b2hrz`mR*2 zt{bF7`TZa19kit5c5tS_aR7ML27iU0$>VP8Idsx(J;(XB?fPijz7^$;wm(aD#Vea* z=D-Vl&f)EFc>hoEdCFYdJ{ha;+xCz_Z7ZAy@@{Olfp&eAcD=3VI@)yub#Fih&5tC~ zmnv5zEu+8jJKmn!@2c4F@bC_EmTOaTD%+;Ii(mz9qH^_>$+V~Hf4{ZG*7?=&c_we` z6t}je>Rd^km#4qqe{hX)vD!~(46Yc>-a?m8>OLFm53--@*7*SWe$HS{QI0!{`|{&= zr><=0JAUvZ-#82zWrOYF7oV^970$S;4Es0NQ||3=#?A@z65}k{4P&+`53~1DzDK{a zihfMn%rLp2t1AGdX*UkKpcwKIO_c zGFL9V9X7U>?KwdCOpkMSaOoQ>{lKkZk;( zQJFe-#-pdPX3CvJmcHNy$)WzEoM)CTa0?66OEXurkfVu1FO z`+6huYkGDBUi6UDwzKpkdI{)^q-^h9o6Vz#wV1|zW|`I6Uz>x6A~`!{s&g>wSCVtX z*k` z4ixM58sv+l(|YhK$80V~<8>tRY;dgjDIsT(Kk%n>oF|1w7+hgEchZ(}^&5Z}jUxn~qYv@BGN1m* zJLyX{ehoBuZ}+|&2AuDY8Ti%RlES6_atUyr{{F%TF5&Uz`^4(1zv}(QsqC4fuJHc^ zeJKa!Ov?seuKw)HpE`8_r*tfR?hvCRev_O9I~G1XkGlVAa_o0MbB;k~b>(1m5}v?) zMfkeGx=0D9{0Tg1j^^tTT@wyD!R(Lj!bxQ_MLKaX@~QjKZwKRfTQE)s;!Ssc?ZrE* zTlHHN;H|5iAE)i%Q%u5#X=+0bAb zSZDu`xTxRt&^ySLX>CAbLEBgJ@a^7f+>>LD=WF^64ez(s>NjxGkLv6egI@>y&F;70 zuhuxi(c5t=-0@ly!JpypC|l{?3*T^_^-lI6xvx=KQGI~(YIrJd&*%|FSHF(+XAqwqZ()7gm*N0R$kdV$@F zgMO|%as6-U^!hK9@Kwia9>V%5@i3SAx;N#k;4D#wUvHCdc@bRKKx=2`iv*if{o8L=P<8uJ^Ow>{7j*HJ7~~ftSrL0r*P&XpK>+)s{MB7 z9WZ^;`}d$D{>Jgn(~zwQ?gFO=>2HS^9{a5T%Ju#XV1%a})6D_w>pSQa#y#$S%@F#s zAK6!;h`-<)szElo15$UKk)w0R*-v?QS$DD^UN^F~5=^Y8P91e+SF(MrQTgZkEM@KJ zjC?|Vgz@=fgUhbMZ>f&_`8~ktxgDMwI1?6v+xjl<1rT4+y+773tk!*L8mBXMT&q|t zIoqkL|MHE;AggHNL#*%8yBByT7||M!p7s1=+C{kS+;gxF`SbX%V1&*y5BNx(JInJI zy^r5fWY5mSr#(9lAKFuylMjQ&`N)Hhw=vtx{fc}xG#GC*>7F9J*O_?f4p@8EUA`FI zzVd3p*0=BWM^DJoHz|q(_T7t=r@^0U$AN1TfYVx%yWP2nRvc+-=wx_kDZDCwsH)7zvfQVo}7u#*)#F!l+MF`0u zdC@m*`-+E4gcb0Ntp3i?5sdD_d=Ys)%vIRUIVZ>Z54zvle+t|7pCtNp_XJL?L&siM zvh_skQ}n4zITL8_5u8{LY`pvd+V8%WJh3Rcvqk=*E*rJ^5zdy77aKm#PG)Iv9_=7p)z7d`snPXq$-MBIUpfe7}1feQfn5 z(nUeGgfFo)s`>sT^qC&lOs(*0sSn+r{=j!@`58weetCV z@pMZ6WdZkrF3{f2rt8t;GW7Y43szRI%O6*%%Wot{Wu9}LY;=L)zBg`^s&y1@C!A;er)c18f zOk*8>V7rql6Pil+##bv%LGWP1Eb5W3mB7D(_PY;xSL9A&F8$R?n3>is-~3L0Kl3}- zC~#r0q#AqHaAfe?&In8fJCA$RyEqyqx{} zO|+32-(>F(Y+I3)|CrWY-gj9wQNPgYIsq>3*qD--)JXra-xGT*+i?CP|0 zf4<%1zn)t{4gj9UcGj4A_GKyFc^urg{a{#Jg1$b>5g78mx{}pbm|0xeFDx(EZ+o?M zaO`0G-mk0Azp!U-ukJSq+RLVsyAYKh26^m!68Ax#!dw~eTJSlV-({V(sS|!yM(-x} z&)2#ydscEe*0O`9RC`?|@yX@9JI3awsSNj|&g|441v^K0u6#Z^)VND=V`ZTAT{{of zBwu0Q9RSbBk6B62bHX6ky&RfhaFO|!?r&))AKPCr$Cx*1m(GPeqkFZvh0jj!maS=S z(8A|W?QWf^TxW*0nJ`$Y_WmjNfv{H~vK9Jm8-28IdFJ#180ZR4v;~X*v$pYf#TeQz zkj`qN zP_p9~=|XyVlRGYyH|~V)^e)M>uZNvJ7_tw267=Kse$Lp~`*!Sm>$kc3w-4ygcC6Qw zD2V60Vcgf2rhW>%R*%>Ld5QU|@(ap`NRIX>@7WT&pKJ;?S(jzLx*s@b=zf?mSFYxY zx$dC2vd808%=~bK=;kV|b6&%jZduJ5{;+D zC;W+b@i!8_UgA&k^ls(t)}CA;*Dd&{b=Qfn^$JGi z55?lcUX05nVe1F!oI`Yj%&rH+JLRr<)XPb@$CEg_|^FC;E2jJbbw#7dARJLDI4Vy(@q}n z2_d}y1U-DX8~rvoPJMVvP(mlXZ(lKu@p^Rkx9Uqz${#mKZ+YyoFhJXv{ZwV}rF=l` z@LkR`%g%3X`IP>z_PD?Mg;_OmKk{#Ie`^r#_S7_!g})H|;xlXKfXn6!z=iOsyb->t zW#&R8PZGKH+kU&7@(l25Gs*TP;VvOlSCo_Jo%f;hu?heD_NRdZCIr^kZ3L(EG84G0 z?m2>1SY(U~PG9Eds6WrUY<9<<JZd`08NJU+nv zzD>W$OT|O!+*yWL(=$^1r&PY&w2~;Ivw>Di^HUWK|5GelOQ=aLxHl*}|QN z>|P&F=-JU$`h?WZ^ig?z%QO2AV&GIjvrsz@4SHD$0>3+ zSk5bdXiJmJj&CbfQ+1s-E-=T_6gDgWSitv1=CigJ%t;-neo--<&{__jd&hQGm$x5N z3EO|o*a-jM9_5-1j4w52l0O+1-GgK9M^|G{c%6MSve{el^<~<(lJv0sE{6Hvs@osk zn`6GLaW(Z+$CqdLZlmTzylg(U(Uw*w=I$6$l+B7%zRE+>ijmCo1r;k#(&aspL-~sG z7~##hoS^FHUJhNYCyM85GCBCgXE6@;d<%Z-J$1ynIF5}D$7cuet~$5JVxPTtq<~(r z>%=s7&kwp~?5@0UitIJ3w#Q~<6Kdl$eAi|9-_F=HW*4$q*~Qn@=a6U1jnNq@!Iw?| zM-dza{=$Wfr-)(Tt_P2FtGeVxtP<-f8&W0{ZoKJM|LcXA0 zI5Om~(6{{N@wdE_&rW3{ew(}2^khnFdd@ltn6kM>hng zrz76Q^Peeue&3|&{p3*d(ym9C{bP6jY;yeV(3rG&;R~%pOujj1e#|`mZ1DgN(UM-J zyH_3jTXoTWDEDtS%(T_bH4~WcIJ5@*M&MQ7?$HF-jm)Pfu(L@uV4nu|#=X<#@yU@t zk^U|oKi`ZzeBkbm#~uA|rlWf^7VeldD>xs2H~fyNf%Cf#{%>v#_1D~UaNN+N%+BzU zsR4hVgTF5k$+u}ti}^WsJBja94fxv~ykyj#>kMu3-iZTxlik>utTspfxGt}c)s-E{ z4mVTR+vc#Oy>l_zdw@M{ga_ZIS5VhKUnV?7XS=|i6f2vAt$2KXzG1xb$+ET4Y7&~s z0?*W+Y4?wUr)$85?$`|O5;6o|vH7*~1Np=>{3fQ2AFh};CQtup$KPVRKa%Wu+M9cU zUf2Fy-t)b;>L>C26M{8w30|;c^2EF@?6(}nXyL7cSM3o~-%-vmF(ya0xTa@hVNK7W zyt`ODmEp69&fr6JJ>CPPi=1fE|MJWg=r=UQhuT6qzLvH~wC=&!r>JWwT#YpmI#`Wtru=WgG12PGZ_9myHDLKCY z9$TMh)ej5s7Q=CDsJY*TX7xS^qZpl-_H%Ol-G`4Yc9`K=_7dagq?0qzU67L>3f2}b zxvQt}rMr5xpGap3h~bO@7b0JeIb6hmN|dM|aaqrd+=Bfb?BG2LPwD z4LaZ9aiP;r{8wi-K+X($g>UKaHFRd%(&fW`-Sk-GdwG|iRb?Y#ne)RjJKvc2S@ob; zUr?T;b@}IFFfWHctL{?+=E)e$MqqZU0rSHcOauF%o7qp?9^AI6tp@BvF<5U08Q?bV z@+@Pr$d){R_r_pf4xg>o)xftl2D1^E4-`MLk-d04H^pEYw2wV=2mIZ!>FpY@HYT|G zp1(~s;I0(TTr?)xGH9Q`t1)=*ujNnvKhEAfOseAgeQ)oPMtb+s&18|OMM~^|K9@6onvu|@S&HjLpub$(KpQp zG>6lCfO+5YF2}DZb-4oF$t*5(X+$U3kC;S%nYXv-eIaK<>b(=X#hlXU;Kt)tlr~-- zmJz(u!F8QN9ejs;!LE(1D6Iv~>7eQmJbh#y`ZO!*LQW%*uFge ze}vy_W2=kHBK**pX!e9eVAlDoi;K&^^ozi(1?I{!Fg+qLHN+ko5=)Wo(v26Df$b21 zb+SDhxOIJBF}7CoI!9kT0=qW$O7Y|}_%=tzp>_T%#be6Ad=Y_J3(Sl%Fds)?YWQxb zA+d}$EZaD#4D3H6u&xcMGH|kWZvz{Zm-J5dVz`CZyiNJC+p>vckz*8o6~2VZ3<6$b zoNUm3z$DN!&7Yo%(9#QB-4nN!&KKX(VgEa$zBYJ|yS~PH2L{d5TM3Q_RVGC~Fyggr zTe8g9(3^VPc$pgXL+k1MF6Q?;e!u7U1HX0r2JQMo>+pd;v>web!|z&t%lK{L_aeVu zz-%+#QZGtv#!kgCebIt%P2X5AKT_+7QJ?lS=oQ@>L%xR(jwbCz0lr22IWFXb*>C-M z1jhNVJ}|=J{8!=n@y9k?-&IZzcJWz1c&LLJ&t)52oueNd7t8_wEB)$&1BSN`M(=u1 z@X`UuB?H}A?m!9aF1};WBb&vza46>?%)k#@d1RZ-adzlX3;#evxcBXzojU|?RZIxp zV7>mqGVo(9{AtKix^YO?2KJkb4fa8vca(wO$HM=*E<9&yBj=r+?oSS`MVFSAfgfz) zf3WAy`8(AHAGbR=SPlM#W#D(V@V&jz@BCYr_Q7Ac*X$ASUkdzZyS59uH&zB-4*oQ8 z=fZB|f-QERqWtFew)~GV&$MUvE>*!7$b1R>IK4+}a&-*IK& z|BK$r&iAMe{njk^rM&7N94rKWW*PXF?fRS>&nHJ>4W;-f#7V&EGpq^6uW1z2ib5QQ=6oJ z0++~#F`I$Vqd1QID$Rv7e{wJvS{?~+7wmxMLmCq_9>^Ehw`@@#<^l44G7o}I(SNt4 zvD`eUD#DYH4!-%e@a5(~-yadmLUcL!>wwq$_Vb|6%D`R$?ACcu6ZB=;=0OhMg|_~3 z^Pu%*aGq)5%guxSS_b|&3tw&?^n4lknHFAc)OUrl$xlY$G{2GTUM|DS36wQ5q>OY} z_W54Q_QlRg=cI3kb>sUq@~)@}^}CUCJ6~;gQ1CDG+xeK~`sS~}v(X{gV>=CBypT60_60rSRcGc<+5@k=LuNt? zczk~bF`BmX?4d8vKKK*i@5uO!U?zDE*PWCf)tft;!2g?$lYeNmYYrbW`PQF<=q@nM{GvXMJB)nD_8@EnvYY`MGZ z+6A@LH!f24Vw?Bts*2$2nsI^Vf{vDRZQgj!*}IH$U-pjVooVx=S3{NF6xybt_N*DT zU9ku8N-&b`0KI40-v4TGNY1nM46L({k4Ne{+`X*-M*k;T zJU;rSGjWynC}}F=dyI6f71WEfw}R#WV3_qZD~}$33NUNuxi`)v`zD8 z_V%oCe3&afP-h}{Tj@Sxz7JC``OCjV&jh1!kUcP_?kEhw%Z0JqUkO?`8&vIB!dxP@ zWYEgj0n=X33x6evHif4_xHvy}= zW;Bw=LFPvNC_gKB#H$vODk@R6UT{1OuK?}c| zO_%IGweZqC#l(d3S@z(5M7sExBTe6x?rh6w-gd2?vwVNX`C97F3F6<~l$HwZ(UltG zNLRgC%Db_5P_G|aNAVlMZ;E}+@ITFOF26xsa5jqHaDEf*^9lS<^SgkbvxoY2GB#%w z`ZF`!8$i9XLv_rfwANtviI{bPCpZH|w(!sh9ntyIL*W@K!9kIH&7H8-TgOA6vC_1G z??P{8&1k>)^x#U^Z5xGay3KR`P?t#FAvRC`keLgDLuDo?9h)g1 zKxcs&f2b++hm^0_m+Eu=P^;}<${S(xoImt!ByX6_bNwpT*%}8Tn81yhY~B zMwP!ZvX0okVrJm#I6qR}rL{8Get$PF_!Kti=IU2v!JRF)%sS4vHtBI3z ztD78bBCc}8jGxl`sfC}@5S|Oaf;icPnuCK`%!#IyfiD0rdfh&lhizVG#!xsL8su2H7=8dm2R~bZm6J_AdS<7v@(IdCehnQzKcYUMMp#8 z7Bk;=YjvJ?M6ro^LaZbAkvFp5mJHXuUAdhpr@cnd5-1ik7J6&JSqCny{V8TMowRFJ zcN2Bze_Hnx@hhc17_QF^(>pjsPu!!=CGy2p@X9xB@m4mZZZmtE%-R}xabPQfeGFMB zE~IkIFyB!BP4*VHbZRjgC!T$w?vzg6MWo_^wr(&?<5X{|zQZ}MqX=UCSNe7pXU1|}VW zk~G4w4Ib z{$)hhv<@=U>SM^OP*++aFj@yWqzp_k0;6@1@nvAvMPQ=qAp4bpeLVu}WIYr(w+3hS zuOUOp@A(L<*5C$|!S|;KjMm_Km4SIU0ux<>>sSW%w-H!JUv(L{TLhOPmV`{UUxV9> zy$@v+#^jT|gT7T2G=+d`d$FC^$V!kTf(4IQ2 zk9DV<*5CB5^*6mo*WWfB$6Sdym0M49`d6^)Z+|CW`nN8&qO=Y;H>Yy&M*kw~Z^uGk zIlX%a-bB~mW<>ZWy?fG*eJZQ*Uvixifsx)lR0d{T1V(yycNv(G5tyjn-BJd2Xav^D zbqR13A~QyYE#Bd}V3yQ&Pno)H+Wzg=1erc(q)>u=|lfr*X@(e*dyGer5eC89&F z4Vef|@m4&Rjr$VVsJx_eitkU4z-#SIx+zDOa^E8dSEdr4$$q%J3!vr60obBZb-|Kd4h$}c?Cm-`q_cQk zO?VeoMbEn6?hX@!Z}FAhoYpq(N)P1Ao@n7aFfWxXOI<62@ipUvWAXW3Dg%F{g@4of zk}r2|7wlR!A-EU)`BNGALoEElhLE0_FumW{s#n~M=#*H_UrBj?BIdb!G5f% z`8meCCXMrpN^1sl_f*7x9AR-@$$567c_;kAyXy}OM$*P*WoQ^;;qPt;{m0pfhTzcJ ziNWK*Ut0z~Y2i<(4*kJj^{NR5G#nHRrj5BW@LeqYrrPixp>|!Xfm73vNTYtGZLBBFMpRn-d<^(&G zfxq9vmzxvRmw{hl;mgelTBrWBAKhT#qyGQb5jf46W%uI9Gs-v32V{3!fOY*lCtMl$ zlx>@jUfUsD3sYGq}kv)wEtRbQrJ8bXS?exVory(!9Z7_d5X@d zJ`|g)a{=~aoob?m7tZg%^VSZmx2HMlvh%p$Yu;7IKKMgdR!imz*`>~-a zc(QI>@L(iwgUwS+AV+NMo=9Fnd702A>F$wBb!LB-?;dsv<7De>*@HN*{Q)0)6Dj+q z&3hgje;eyqOCot|Y~JJSRTxW*Z($_w1)FyyalBg@8?K1tJ#F(e&R=5FCI40aN>@)Mov}sE*(a0Pw7wXiF{Wzynb|oMB4}KH<*gL|kzx-dZ^^--)%K)1S zZIJOBlE}4@HE{Gq=R^&#YfM^iZ`4{&$6cFcuc|Cu(qHuPV(Aq1$E2URd@&6_HRd#A z7nQDr)+4A(yj8hq9jjD-N;FX}gRbd42|d&={nDE9a0?^8%;DK*KPBHpu9W_3jdw8m z@18}2>Tk$~XZ=)Ap1lGe-?5w4tETCkh2?e3<;)s(nmq)$+e+2Ecel?DPgigXN3`q@ zYI~oTgO=m9j&*pjggIp2DYUs`pHAXe3c56g$+z#GW`3Ppgl*Tp3U7JC2`fq!&{lI- z=(pG9*>8vLS9#NlO4oYICguX0?rh61=m5lOGH>9U8flAcH`FL^nuhg<6+t!a5Fo>|Vm+vTM{N6MSn7qqE9GZ(aUei7*# ze~iGJ*cULOEN&3-|^qXwD`tMBA&6-y}l;w3cFToze!y;v_w&@L|yLHS7k^Eewt}&7H3vIe+ z+dq;%*QQG!_Ku{hpPpg!#GgHEo@jm9K5r&{5YOrtiap3La(iS~@GN_yJuR|5(x0Pb zbJC$LF>A)etF$Jj^|08yL7hfZrV?1$pT#`OhN%zvlz9eQxUbU4cXq5YfsJE~od8}p z&MvED&mq6N_&v$*cl=)C_cXr(zpLZyIpnARx7y!}_WvvVcju18?0Ok%WObs$XT1#D zv30!+n`g%DPdM*HW5?yRe-meZXwLHyY3NsQ$=0-dP_$_m*tBwMW?mz6&O@HrcnoW1 z{TUzjgg@x~!Z21p*~CDkZ>pztabzuR@q{3TtSS;!W^8|{Gv^%vCmrv_JuNR)j|-la z@4~xu@Ee;qxIr(r2P9f?;Y0n9%p>NIgtABLh`3P)~)+V<4TdYeQ zbV$fU!Jvoce3&nPZ2jbrt^U@`ZTzjXE;7v#rDVTV^gu-NJXh&2#?NG}{*C zU1Rf{zco3MH(z-<>1RHeNSmVmmdaji%Q}B+RHW>=HqZH6dq?ulw0X|o+9Q&8vdwe; z*1$+!#^y-|eQdhMg_ZVM{#H+(#d8z;%7ij_qf59hDVmMFBV95T-2D;!Zp?D)SL1Dc z((6j#)Hj^}HeBo8Daq=%=2&t*G5!}=1M&j-FLw-GP}F%K@)cs}g!IJO5UmO9r#MF= z=LsY!-^eH3q{aI59V;KDOXH#v-{#ca-kUYY-hB$5=NF8R0*)BCS567PN0$GkeN01z zD@QvMHx-kqp4uxD=uE^f{P3QM1F&In>XF^MKP_J@%QxCO=fDfrav!bsu^ooJsqm_} zlV@?W=P%jdZJJl$TN>|o45I(ymn86+%=9#SH^svX{)t|sLfff#&5?WbZkq#{^$OB2 z$4}eZcGm0v{dsuSui9bwLE7eTTg|y5tASPgr2}i_qG=koN#EzHok{i@&^5>7A|CB#n z^0$>%eJlRY4z7VF>I!OvCwo=1Y=vYfy?!%NuiEZ&j#)sO=&fyJoYTB42|Uk^_aBfi z8CQ~aGc;DSuc|7AT+;cX&crwi`UJaDxZ$68rgI#JAIf{i&H=L6De|kpqy3eGGWf#a z(D>LtyQuWMXpjz&Cz$<22W9-Gu#bs9N>_gWTzzR+gif`qmiz?1bp!g{jd~>~wZr8} z265`~n)v1)xVyKbwL9fRyXel)#uE={FztAK7<-q}A^*OlP9MHX$2lKvjh+*++cx&f zv0s_`wa2qEBRazG7QVvfRzibl7CqWieKqygk`^cJp#M?N`_$7br9INfj=o#fL~O3L zb&{7u7xSt=9~>+CNssX?eL70-Y3p;O^2J%a4}y;V_@Qsf_EV>1TIs1Tvj>WN$-IAK z&(c5PR~`AHvkx>mSm~wo_ivJ8p7P!V=$RGxIe44p+fcp}PUaQ!)E>IqO7~B0{bWvk zTR$B{dVkU-=~~7^#kiAs z(UT4qY)6k^2GQo4y!N1bEvnDyrtn`wU926JLU}VMfEHKxeDb6Nm_GI*U_)|-kDOf- ze)CKFn@?9*G%EOVJWG-mO4Q*Cto1KA!-`=WNI(?7sRdG%Rm8y?-sGrqEHvgnF?`C=3G zDux~{TT!O$Zz=2AqH^NnJj(LD%r<4e`uk7oU0SAWcdO@GhmfvEzX`fe{M?TAUJ+e) z_3y8`vFEbm;;-~y{Z+PaICbRn6hFQs~xDk-D>HE9#|QBHl+rNz?p z%WrHt_>?aE{nMQPL0&RbU#bB9{Yk(yg|c!m?G>O<9hzwf@^>`$Lg z(x=a)z16_$oC${47MEX1nqqAyk?-{6pYTm{ADwZe*oE^YoG+q%G%@H={hs%W=8ByD z|Ed@phEJ04f_#T7I9Hc%e>qd40)IsGN$(vV-@8D*=$cyZ2J}Yf5#)oRl1CDI)|d~T zrT#Y9!@*5@A|KpjV5c$8;)8f&o9{R*R^VImF>14X7S3q^mlxogeJwXT)E$tDfit6*e2+Y+LONcq~bxje*LKUliHTYyU6YVFFcOk z0q{w85B)D4H*1C2aGzN2GITku{y&rP)myGU==4*%r*lGNtNc`|nf@|XX&g5-M)GHm zdYzXXP`G1-SMVvf@nC4lslRC4J~v?h$(k6ka38o><++MK_Ru{jF+aJr-)*fA|MlE2 zTU&(3>0Hn6TU)eep1YSjZsKe(weLI5I8lG=XkwvRo%v_>=%4f@b<>U-VA_2zn?F53 z-YXdg0zY%583Sdk`E)_%9o%*q7S-EB;rZPt|E+?C8x}&h5CH zJ-DaRe$E$JSOb2|kyM^>`_2a7)%GnXs_oN*-=gp6H0Nhi7ru>fud#KD4o~oz&@Kwc z5269sIC~d3`)vJEbVu#2XBpuzLJz2$^*EHnm%e-sdPHm{Ap6kIU ze^hk+1$g-*Be%nIx52ZZ+1T&A;pe0{evo9Nw#*k_z$IOn$FuNVEIRowIv=CF+SZx> zqPefZAI>}5lcu_T??CVqvrHpn#&g9e$D@z^I?~4}qkecNusZwNoae1_$P&M1w&aCE zuiSDj^FYdr@74Ky@fguXS?S7+z$<=sJ^w|^Cg_u%wDRt!rW^gLOEcG@Zq^CVuDS?}{ zeL@**RUghwBo5$HXW3W)xO~p8#XndhyH_dOgl_#?XYeq-&ZnL>yQca6v?B5WW@^xv z|9;b$=Gzzh1;0tQ&76B}?9C2eg>6>bipH)S$vFbv%Gm8!vM)bf#QxOz=&|%y?R_(> z&*)4wZIthFB6XYg^RBi_hT0QO<@d=p2~Tx8JPQ-LO}}?G@EdHR>VBTO&DvGOcAnkC zYgx^2GrxFpMJblloq5Rj@VMvrXV%8@;te!M_%l0`R(BQjo47>BU78h_5MD2t54vOHvW)x z$*_O@W1gYchhFhW^#2~ZU7GVDK2n)RFA$zY<9H)0l`oL~usO#$*PLIGE4-Qt@nn#p z&QQ`>{FvpM-=<9w^!{_|GEgg(vk7CRjl>)Y2ILAM#tE{ zlt2%>aK0`2MQ3m6OpaIvzO|oaOl(?I+L<_ym-3AMB~#rJ?VRc`=K-1kFuX_ zpZMqMZUEPTyS5KL9bF&nu=^o_hYkz3I?-vXAAU)nK7M#L40mm@+ze~Oo^az*i zuH-ku!q2G<=Xkxk$AcT%PY7-Sepwm#Ar}4{i~rW1^+AdF|MS3KTLwO9;g9SP;$OUD zyP$K2@xeR5=gPo$vG5nvcxKLdO_!RWxoTYS7vSfXfv>UfuXy2}sFS-i1ea7C8vKzx zQKyuF{{fv#^{ENI8Oc2zX?T(4uJ!?wf`78#$MNJ#V8o}R>>i}`J5>i`>ZSw-L&M=^ zaBi^h=f=ac4O({agFWIC%%1lNW#B)s@B=%9@c-`U1)CeDn6nf1F9ZLEh5vg)2;aSj zAG}?0aIgV-hL(YU!NSkAXG`3}H^MK*4hsGYe7`dAPY6D5Y*%>SF8U$;)!6AZ<%4dK zGV-4^&J3V$iFZcV)7Uw1t4s}ii#HNe2WKVE)jU_JJg{2l^MS44EWbE?yJH1+t43nj zESPzZ^2_m?!Vka6lTDRex?}&Oud+8^;0u(qCkxb$CS;li&X(&EDfch=5YVKu@-NiB z3(Ay{{dRS|SEkHqWy;h<%B(3<=9owsr)O%5KaBZ$8Te_yA6JEqvGmIB$?iNC!)Cze zy~zJF&o9L5gU#0V4C5>ksFz@vg6h|9OY-T)Z25 z)C6a7H`d|6UtR{jn}y%lA;fLc6!RTYc~+_DQCIA6o|gZx;TvSO~vvr@G(?Y{JRF?^6bTjfKA`9-ar& zx3eEy-!Lh-3HZTf;Gebd4|E9a>XgpxWsOfUc6jG9@Q(;S4KI+r)00~Gr#R^Sv-h_NnxIFtU1n)`)1G;_a)+v5eZ%-* zGQ9e5LZ}x}`u@+tkKrr?rT>NRJ3BN?48}30yj=$VWDEazO{nWDu;*`}8HG;19F#``dfzX0We#M&*IQW8i-%0`KHB&cgqqUA4g< za8KT=-oe3o;O`W?c%BP+Ieu{QlAH>w|Tjs_-$rU}i;l_o0zB^_kc#Wra_^k^G{1{8QPrFD(v~ zV!3SAx;?9c(R8o_t?H~J|v=KvUg8rkc zgBOMz8VpBg3{O(r|7msX1)W;Qk zCyYENl!5=i!hgzlw`%u>ZdE~D^})gQ;NQOt{2La&wlX}sWgurKt!_6FpKEe36!@?D zwF`d6eTl|)WtqnfIyBfD-bf!r-=AH5UZ~Fr&hY%dhJ%AKlfAc|Y z{ZV6y#^}WsUcAz{EuH&30_WPfz{2&e4sGa~9U6j#@kv3t*W_R{dhyL}^}!jV!?RhW z-|dGSgxuTsbgnJ`d0i;S>o{X;cjk^WX~(-|>idO-A8*gVDsa|m7T@y%+VN@`_@7(& z&efr>`nw$)g1tK&66C@EOd0s87XA)vS9asxwExsh3H}NGKWqaZenWB~@X~YH-3r?e z#@2=V^#8P@AH2Xf?|-JQJ0fM}lWBkQ{q(sQyp>P#m91C&cKvD(((VK%o|6p1`LpuW zkL8!B9Q`LKscb&9E$CxVyIWmw&AwH^ZbK&r*8uDA^{{pHuMNke2X{snstyZYr)?KT z=yUMxEqvO3w|d@=@t`F>KKKmyGt0ooEc{vRLS7%eLp<2gKg8U-dR!U!|JpIIwOu%# z?yMMP{X}y<(#$gOCLWb6=TA%yCY6EzC-~&|B8iw$P4M-%=fnG_@Ti@;0Kj~f7-%-T@&i~0Pd97 zjXvThIJ>e8{KFRhRzLK!-s@TyJQJJ3zM1hsq73|9z)NqopX*mfV4aWH1>3N#@1Qsh z^A7kb9bN*iIM0xYZyn^*X?=pSfBSsstz;uF&B6E)RtX2b7wg5 zK4OH#uP)?!*uh)lS1PlN_59e`$!4v|DxdX=^;;@u2PbGviMUzL#HwZ!tD4>UI1`V^ z2j*-XwG;SNv9NuvPWmi!l|`l3I75WGnl*MSu2ho`&%|ui{5X>@ZvGel6U@b+;WWh} zc#iY@jq)kObMO>6T>9sv-^#o15x06MS6BlMmDSle!vC?#Ssb&#C%cvT~lFAD{CH|9lq?LoxJ<6QS8S&yrWq0;FTDl+Q%&^b- z7GtvJLEu)KOdOHte|d5I+i}78NZuv3+(*O^y0T~MfJokXHt#}WU3ZhWPbBXwn>RZi z3yvmlXe93xn>V^57W{^9KL$ne=8*Ty4(&`F^y_Znw^T=4_`kJpA54b!UJ>{iHg9@| z`d}Hfca7v7YV*!SR*lfmK9V=V=DozZk|W@IbtErk^Y$k;_=w~c$un^^cqQ3vnUN2^ z(|F0dcz(U$QuHI%-l|C313Yp5ZxT#a@r#2^{9-+_)4F&Kux=mTjdbF<{HF68&aVqU z*+R)@6*lujTaW7PF8b2a)i6I#{(ClGHZrkYevbS%BKaov6)B%0|K&)&iOG_$x)l>x z70LUKHlAC@{3{W*`A^ErhBoG-Ev;+r#*gogA1}T9N1LvX^jW8pF3)zfF~*Bl`2ha{ z=N+BHJ-}~sw`;-j`(*sFli9~>=7P4q_if(o%wKOs7jCw+iT`ieyi3^|`~~X)*GKaH zV)J~jL+~s1BwrKBd(q~F`Zz9_Pu{t`Vy5qmqVEXzQx@hx{D@}G-?=DK{!yDZysAE^ zgr>QXya#OFYWAof08M8^^6s{IU5TOo4&6N|lDERNZvB#rQjWONzZQN zU3xZCFr-WW#MkRA4%utPFGnC7wWU(>YvMj$#--P=L(a~a*aPKcGcE&uBQa6w*}FX^ z2NzjfZ?&rkj-rkGTY5ypT$^`vl^68Fe;*#nJHzJv*x&_e%IzM>JIUs;S1s75esZu& zB=1<8cRzOr--o<<+dS#y5jJn94%NZm;7xb(;?S(Nt9=@0RtW~$<@am-_-R{4^iJei z{Hg-JR(P|a{_cbR{uldNQ&AC|0bbeR>lm|NsPY5hcnW>K&*G6SW=v}7jjrEj^?U*I z%@?})nok8!;YW#<-&lC@bTH47m*^bG^S~Xk&(wYAj+29a7Vc#Byq^T00{Nl93Wu|I zTlg0CZ20u|NS?EI2UJ%CH=slBM)I7!8^w82I}mICD|z4S*xtzWp&di|Ds8Jo%lJ||3#}@8l8N~eHiAbKYca-Z|R~gLL{(qY%*?nxwNe=f# z$^kW~-*KFQ| zcJZK!@y4t-Sl*g>KhMY1)(77pb2As@U42HeFUf0n%cIxZH3Z*M{zT;N^lBHICpnqe zS2on8JAZ6lb3Aa;6Zt91mk#w1JTTIcV|rB>9XYBO@fAzs1(nsosnBtJg#KG?-Xrz# z;6ilm=t$m8Hg7{+MX-{*8Iio}Y~EL%XU^$5G?I6f&2wY0S#O}NqBBR{2VEJ1)7;P7 zGt?7j?;eU(2k#gx7#^v|*}L6qD~KzM4|b2_IeYgk@vNQ5+a;3c z?44{=Qt2t;5!f}2AxDVzOlbeqj~}(=q+ea_v*goB&skzOl$8(sGh`yU3dfV+aDLxx z_NKcv>RCLWFdT_-w{g3Nk-he4aAuUpH=Vk-$jnz8iIsy>{8>9*zG61OFI^Ms*uUA> zWWjscW-n1R_YEmcbT;x`ZL99sDT4<(gJ%pb_Aq7g+P}0?aW9qK#CP{9uexV5cVX|6 zY5!o|5ekgbHw~K-93GkLxH=yjYwja9e2{FV&-y-f>PYT4T?I~$`8WO~-vLZ&?Z^5% z-@Gx$7#T~BE8zEX2h+5GJC^79z$JQoxM8n2-#)O(YE&{`OmYWdNAL1t=l-N6rxk|9 z@>9)y%$9eZBfLwb;9Y{cQhalWT*NcD%pNT9tc_oZTzH0VA{Fu~>&j&ezk`*gtoC_v zXDU3B%%6vETloO5Bo`m}4)Ei}R`RG*cWG{gfmb!#@k)F-6<$3lc*;BeWufOHWaji? zOTzo)V!^;emv$uWT)kb|Qqs<`X>PxRz9sQ8$P1ZmI*WbB)Po%;KFmB`axh~DC3=ck~Pd&3Ix#B+t}wpV3pXIU0TZ=mHom#gn^*w*H%=xl z-lJwgbH%(|v*y6Py|*`bi31Dksy^B8G_SX}VNUS3e2X&S`&sUvPI&4Y$zJm1Rm zr93xf=Qm%=9Ut1Kd~5c`lD-vqLVQoc>qgp<;eQqzsI(N%v`w;6UyB1%2~09oU(z1i zdx6pZYd=pv)Ok%7PUkwU?e}W&_6Xi}u~&<6+Ux33|CMjE3>b5MQEJ<|u8)@GY@)Bn zFxDr-`oG{=c(eysG%H4UrKM$J9pfzX;nP$;&0b*6i1M3gm&f;>i6*nZqk;cgUr6v@ z`_E16djQ{^Ha3-{FWTdB4&?_W!~On$qrNKYtEMh551lFY;E?9$cqd&kD)s9c@?&}G zgwGECW#Dv9k$9=|hNRD`Lvco}GQH@&=$^Hy1ZE2bbcRO3><8L4Mq^`g>vo_q0M zyzQ#;8S2Tk)gwNt9p_Syz5@`yCG-Bs&C&7-2MT;AKcOzM{hOd z;^0qYS?@?KD!m6xe=npr3Ee7x^l>52)(u=;d_y=Wqk7j?uP*LMeQO7;E~=f9NwpX1 z-%AlV(IveY2yB1$-wRgXn+bj$WnGyk%arL6X}5!W5V*CwtS(k%)#k9RiaUwcdm=d2 zRjw}9QC{a$O9vX#q0X<{Sw(R51$UOboH|CrJmQh1)%_dILwzuKFW{wG7(ETY`H z{=&0u{Z~cs$tLJr_>aLMUFy!c$FgUtSN&-1fK`T$3nOKmZ2umi;eDRBum23nxbN5B zB;VPGzw+$*Sj>NkG2muot$lxr=S>J2DCZ~C_jbj$H^F^ky;Pf+8?j%OvQweEt=~3sSFULjeCKu^-wSVp8+_k_Pvg=qz`6SR zzrDG2)0>-H^J_P^u6lQK>j-{#y|cOX0)88nXYW?mK5prZWGVlGMoki~0o;j8JLhnDgHb!z!N7~R0+KkO$9-{jo_hFBSIX5c7 z`T%L#Gp2N(dhi$6<2~KnEi76#F|N3F$=CT*;|#Kw+*Ob0aW?9Q$Qb10eh&KT$_rL| zCywh-iG56&F`k%j;Wvpov(EStbo8$KaD(iopU4%2AN>qWA84Y^y!eD)NxY>tNuO`b z7Y{-|q{GrD$wuSVj7T4IYm)CqXw%pw`qz@SVwZ-Xc4TdELGqB`1^E~m@^biJ7XG1{ zaIN4a_8ni>ZcOkRH0~OyQ|;7#;%6*;+}`nW^NzK_uepQeec*R01OEr$$M>xaIt>f& zQ_%TaKO1^T@H{j)oWHX;|5*{P1;NWF&)|A0{tA#(3GmgcN{;cElW$o`R z;C~q1#wUlr$->{T-P(5v_)QV`O4=xUK40*Rfs*$|@{R7IL((m`H^Ir}m%zH`^LSPn z`8Q5?)R*1YEnRw1Lwb^Q2eT8;uAWsPegorW!I@$F zC+*$c>w*!(YlG#3rvy*o2OmbAs!!{5s#|Mzed=aTCc^PUe5j!Q;c6exl8x&-ODXU66E&a%(j~!H z0W(POIgJU>maGao4`&^(DPLTXJTQ10{L=N_z|;V*G1c*Io@mNYPqwWd$D?;5@aGGT zvZBYWRh~ndgFTC9*LNMgj=#@`ZJ`h6gI}VH4$j#&v(A&kXR9>p3L~|*n>ma**I^6m z2F#y~J#*!_mlZqLMe{0b*<5|MV)o8=?wk*PtMx5#{Tlm&t!vrB-B6*O$_FQM7K_6n z`!q*qv!ugz%Wqt&viz^~*n5EQ=wQ_z<+q0U!KaZt-8Cy&w_`ujB<$3eUC?oFKK6+3 z(dip2>Ob7zd%1#Q6`!d*^|^SW)>mccE>LAeYF6XR9>aO!h(y>6eSr>6>z ze97-*o_E=qIZg_EI}`7)d7axg1g{bQ=x58REk~30AH}WmtnC4-v{@GZKH`Iei8<_G z;nn}sEoAJR8#Ea9MMc#E>&}eadA-^$OW!m`9OWV$cGwZA1Ry#fx z40OmQ$PV?Ptmsr;GkLiLc*$Q*yG84ck^J@KJDDdLzny;So>8AN;+MwWy%~Fd8LJ7_ z!zaeS;xP2HZTy{HoIniNXY3im*dqTk-iUAZGw5jI3>rJ0`KK#ZIj^8{hgFQ{%-+Mm zHLEzvmi2S^BpRF2=$*drC!VAHO7QvM^E1Y`od@oDYO8p?(T-(N__}m{Nj#HZqB62E zs#on+eSZgD_CvPu2Kq1G7~-EWmo{sk(~I@YebrCo2YSE>hkR$r^CYFwH&^2WxU^R& zuX~~912cNZ31+TL+Cs-0pEXSN+rQFpT^Yx3jnxHksxJ+!yRxAB8-L@~1-&o)aKlI9 zbLuL6cV^aD{}!P~Z5KWFs_l7bOi4!JxbJvBQM^xXOaH8hJ=2^)^M&^!cy(`uc<(nc z$0YqHW54Rk@ST?}Q*P`Zyj{KG|Fagp+}OW!8Tdzl-#YeZvAvP8Upy4fdo9j#V}ETK zoVQu{a%2CG2W-nL;a_6m%Z>e?N8qJb>f;N5*BrY#Nq;0?d@}nG(rshE#v%2KhTNi( z#sv3%9?#NojcMvHex7+!qz>&b&_06GMN^hO!n>c#7pL-m-jIsmv;8W9Uk(@-v_#@o za}54)PV-U(mu#2%%_`E=59A~6fF9|5Skbo9zTQ(;73AjG5|}mtj{%$8#ocPiuQ_;rtf$ zsim3=q**VwW1E*F#h z57H`u>xr%9tO;Xp!o3!4_U0mcevJ5O;!oI_ld&^;r;eZ@VyEA!->~5%@=t;u*)GlV zWMfXl#t7c=M0`9^cyo;H#9Z&W}RWANoR&yB%jZ8`DbBJ#`_oDIidrJZNt-59)&g%>Z+B2T(C zOljbk4lcVMm@N zpBF9OuDt6zO7UVC{*36HBAmd87wyP*Jl4Btc`VEi?ug{|;NF6pSTmCjp4b}dx7U;p zJS&sC&}&zIxXMf4!RK)I+-^MfGVF)lyS^>%n`2#zeknN{J|y!&fAx?3*=LwVjw$XQ z*)IS2QPH^X5!=LlA1o91&A7O4pIo!fT8TlQi~UZkE^X*Ob495#t=R7Z&K@H6+n?C) z@Ec1p=vc<{tvt6f?zq@*uWT6m?ZW>N;do@`Vt!NDkMwQ8m!v0>HbGW%N5sUGPk0gim<8 zs!X2wlC`ZVV!7YRR$&))X5d`P%bp3|)SE#+Ni%1FknWSN*sto7ES?9SgFg|tI55?~ z9LKZHim&Baq}uJ235Pa5mKIR1>}S-yb}UWdz#Q?IFP8sA*;;Y^56 zajkXzRvDl8MT@(;@*BltVqO)iiUT6_IsW{~!HVC&%Gd50fpc{~6r~qf?Fa7&oQpTN z=h^j_Fvh&B_&#<}aUS7_#+ZK|(F?=hJaZ=SoB3r*b6nc3_7O8yS<$4}@w)!2i;FBB zI7DRO{?fbi$(L?a@-7?Y zXE`$jJlpG)bX>M@2yJliZ>YX>s3(qI^dQifu0AlAdR;v~ryj)*RfqIh^|&%KZQCRV zSI?8+mQKV;dyHqbS@7~1MSrjB!OdFBx?Nr^9uz6Rw(8a5y}&0jnv;jR9mTspu#dEKB_p*N&KK7nW$6fINpYVjK2Tt9H_sG^=~>e)&4 z4*cnT@jFCuRKHrh5FAy|t-gA`@MS`oIC(mnGMMo-*3bpH@E)%kvVpPKDxQJJHH=* zasHZOyjzHY97Ugvbxk#27M~g%?RkS=@1AR}=rgZ*6TXkm0=r{^Ux?AJ-ig0$@L4;a zNu77yzM`bFaqF_^Y4V8;6TM%5+>rfsNp}<1WEYk4z!=#;qjYV7-jm3)5qQg|BO)?$ zvcBGy^L);(MdwsD&a>pLFN0G3>64M*ZvsF3(m7hv6`iG} zzI_RBwY=BxTm;VP(fPJ6)zRJ7(F?q)Bdfej7<*EGUB&qv)0GAuwZWWI3;x$tuT67$ z|4Jmywe79q340>{N>tB8(zQDSE;^9T+E-ybtJG=rsV*YLF zN)(BK^{DjVAN5qyCfT1oXtVYkh~6%d_B*(o#trCWKQpoULQrq{CZ0JMhpT6#yyMjp z`kC-M*pvU!`pNg;RGRi{)QDba-Rf)GHIWMIpNrNPnL{;N8n3qJcZlyRG)IRvzFRP~ z%~M`l`VqGAaphB9b$7Nnly?a*UEC;<004JVp-?wK(`u5E2`gTp3zI`lsuKgc#WD~Ajcw!d|;{q08by3?0?(U-s75ByEY92jE*a>z^1N$CN8USpdL zxRP?NZ?{lRFv>T2nbH`?cuRfizb61|&MsmMzCg49tG?{w%j+rc#+CO(gUxqsdW*E( zY1h_1OZ#;;&er$;AAPthdZ#|TY`Z@E!ynuFa0mMH4=-(QJ^CALzTKPijm|fq?&v<2 zft-(F?gK#&z5<_v9YcD%?O<;u?T+oz-Y2cXrn$bSeMYV0Shq`$E9^C%|LiZKKaJev zJzwTX&S(E~eroSYA8v>@-e6+x@+Up-Xx4oWV~-SYslwrYI$r2Z znZtpVyuDPsAiR>l=2^lo8l$v0e&l!KZp_9)a85G#Xzl(RHZ0{l*BTRhoL5pj@v=mU zSYpDoDaZaPcw7_ny=L+HQeb0hOCR2o^GZr<9Zi|W6lYj=#)k|2-<)Xe>lFkOOE6x@ z)&Q&TNE|N5;{@aI@HG2)Qs{1)?^2k5>HF7qw1w{vIP2@Sjhm7$7EL<3tG!w^YbVFV z>w#OmPqOXUiDmUdc5)Rli8JKWnEJx^=)NuobAp9Aq1tO%#_tDo#@V|XX!5e`3xlQ@ z{YPiyc#YZSO`Pc!YkaSOJV(Zcy%_kJ8O_z%w^@JSo^SSy~({Ta+>D;B{VRMj@7bn>|a451p?uZM~mvLO*Wj4sEZcB6(ABXTgt&YaJSS z@ZC&0P5yJu(+9Kn&7Q;4y07-LP172557%Gyjg5Q0RBg_09S4^h4y}_^AAsBKc>@Ut*j_hk8g38b98S zz{{5CEHKfpviGVjz$51#eukJ1XLD7}$&ah{x)m7c>qH+iF!4LdA!m6c+)*0tiqLQ< z&%WIU>ed7O%=~8IY~hSHXlqgWcRD{NGv1{C%cO@oa;IoYg?dy)ohvNAMQ_IPJ6iu8 z;)RFUM-O$)rUm6Op zonLe|^bg^@%6ZhC?>!HgrG@!P>F9IKcdod+ctvt)!E1aKyBzK_oEh^5yRyeoR^=8n zhGo5EA;@{x7k)ST3RC{--k)w!_&2b$?w|AbdM|Q;6#>C}^(h!#nje7xRkw%kW)q zH+vu}!9P)bJI}Y1?1SuoZ5S8P-440f)rAq@oS1DYO=)T>9g%1%9ql!l^?@XKMQeZb zUwWlE`nj~_`ULZ+G#WWv2e&yemeayBivL*4+-e^Fgt9?Ma+w@KU$MJA$-o@M_LInY^|AxqGiE zq|2orDEck@x{6ndzf=A8-K|%RQrUi#O;a|bJ{;oQCt8+!`);OeC1tCiacLPmgM~-; z|80k-e-sb*_FWahGhcYNgO|@H9qC3nS6^qI6%SG@NM{Et4)_gYiQ+u6$>Rry=aA_x zy&7N@htj#sr-NT|5nr9mPF9^6+L{Y(p5#&mjOKtVDeK^W4xDsBdNCw*TWL_5ePqfe|fggLHXk;KY0Nfu20O^R~P5JS0mDsVT%?1H5RJPN?tJq*#wv z9cl7&AwH!Gm-s7t;@~TR``HbZ%|;fKtMuA3Dt~aJI znz02(ZE&OZWP)>T^$Nq+QF;2ix41AOU0)jOHGM3+4Jq~%=bB0n1LJusHXH{|N7HKR zcF#kw6}_ObOC$I3fYZrtrfA?@a#3FuAD^V$*7z~BIeATvz=|(|eE`@|=*I|b|9xt! z#bsoZ;f_ektm{iG8a#e3iBVG^DUd$Bn)b{KTNrrmh z$2?<7UHX#-w;R7&J8N8?T|AJncrW(i=$UoR z<&+6lR5CZ}_!493tYAO0#+nX>C+o5K_;AoRhPd7{M(1YBH(aRk*iRnOtTW9$@6E#A z;1u0qj3!)b8WyQrV^$3`MdOCgi{6}kw&#L&Gg8fisH308YGC~=xEbFn5^pju!Ea)| z>Ls={jw$FHk!0fdX6y~$=}#-{(-GNq%1`aq z58O%baDUabX89jp=DFs0rm9fU*ifiq{#H+0MPns=b2z7gbNhD9K!%Q|!V^#Ug^FaX z;3uvx`U9agnXicQv-D%@;QR2QzUVMDIL-Q^qF?(()n{b~KiV5!q*zlhGM8?0f5+Ja zJ2|~^^~oncf%sYn&LWcBE5_uSC2PrF@;;-C4!j}Uz)Ig#?`6c}jt6clPJAHh4niMj zfboHbcs;x3GWm|R-qMaw|JYi$fI5EnP>%9mxQf9 z$NcHd!lT$K$7Aue5&qo+eU28FC%a?X)I`5fn^Mq^|JKBJJgr6#Cl!Xnqif;kUdX$1 z+(Xu5p&?yJHXhfEoeX53Ycrh1x)*D@jrplN#MFM|pJZ;9eWAG$nbl^i3zY-|tCPqs zD|=f}fbX#wd!Tvu&&8Ln1-JG#O9$`T(rV^_mof*EtPI`g-nxfcTedivjNMiyHDzRS zs$`O*O?kdk-izlZ>PRAk73c(VngQ?R;~zqM#S_^H<$I0!;85}!u~Dv%i&wH;4^fZo z*pL*qKZPzaCd4QwIO&S`SrT6U4^N=~q)ERfSlyFt@iW*o(xrdW73dl#+?!TR9w(bQ zO0ZezP75Yn8G{B2O`hmgU)%w^C7i-HD8(87S^Bs~UZQO-5S9Hg$X+^sHTebYu}BUs z#1c!2|M{V{u2Fo(&Kh2(WV5hc=%$}26mRd4E|!21y#|i@#iNTObe{0+=GOU7Z*IMD z5&Pcx<*wb_+Q`q%$A&2OkrAH?*i8Hh_EW~X9@MOFruuNs$EEDadx1W(g;-JR&f1%v zF1};zcRonsOTJ!nWphQxT=OP)pn1&g6T&mhdwYLl+&QRF1ucVq`_uK4FuwZh(p2)p zzDb+X+|4Q4!RzF$d4hN+n_3;|2cp@}<%?s0H@cM4JcIktlVQA7UqS}(Mp^CM|) zZSFGC{EHVh%eR-`t2%Gs`!vANfS z_mgbd{wkZs)}n*LtMcFS{N}9BHfWE5?sia{JkEPl8;|8Y1?kpjz}#Lvqj)0r_2Mg7 zbES@WBJ@wjJal!lclNSUCcb>5>#LKf&pm5Sk&n>l%HEJmLw;FAqbY|4la9(P=eC&wkG7=1Xc46aqOx!^jF^UtHWhJow7o*On?hON+D8cX@lxx&UT z?8o?bC4A6YUTZvk1F@y$j_2=2{2;BzzF^k>^xbp;`C;c$`9L=A8J$bxg|d&sGslmm zi8;+a{N=tfzTdb4-=VW&VRgdq+PV3G)|O8AZ~h?gdL1Q?ide_hfs@BA;Px~5;%oSh zDkqtl_0^Pg=sbOEyyhPI|D}mSkWCandXGJEJJ7!Bq~@%)=#F47uW5?v> z(9x+Vo#g(sSVwP%Vl0*C+~xCwYv7xY442!pR)#T0(tTk1W=*>3+T}gJxpm^@n_Gu% zZf$XKSjEkU5H~k5^As`j6gDfhsM+H`yhfi%qi;v!eA z{&-iEV#8iR&#o^XvAku&)RvYFYEviVp%|H$Img7)D~P8P%N!ZTvX3$Q3}VBc3o06K zEbcGZd;$GCM&E_iVGqontsO?PK9Xj?Il9zm3u` zFP7u}W1W4q3fr5#uG!1s_psmmFv_Ssj`kYp?0f1}#TfKgS=wWwy(u4D3EfwL`<}C| zG_)()$i4D(;*u1nsl43{Uo&m7l7;acXOIhJJhYz_Fr_{@o*y(i!oL$q8trzBsr` z=w1!*e(vgK>Av*R_pYN&=_31Xd&Ho>p^5eYCt5T<@4&NSPQtMSeXXUOY$|dx(>{5s& zXBX5j{RA>;JpK-~G4|Y$v0mc>VvlXQ7sVm`+#$HN&zrr@dC`lVPrxw+B{Ny}ro$8-f98JFHb^S)qRkY<2#YXu50e+&(Ti74E5~dvV zc$56ZnC8oq(vNe3&9cYIIn6!J31v})msbvV?L0h$=NnDg4d44e3!iE3wx?jG7S(qu zk`oHT70c$IizN%(Q6J_J3s`?u@_6R-W15wJ$FcO^Wd6S`R{vO zxWo9Yj_4rWHOu4^tiFuHpQXBk&VnWHz(GiSoHM;ecBJJY4_!U;G{=T71$0Dk4T3Rcrkj4i z*f1`}x}1X-U2g0a{pfD-vbfir>k+oY*SaZf(AvHJ%O4cI(6)Xtex}|>!pj)6Dt@W= zU&)`gbpA@`$|OQQSN;BWcc-CKb^latH@r0g<4+~Pc)BO4WFD1 zThH@WoZwMe;kpUl2}ZoXUVOB9I;R=^E&4guyC^HamAzIgtEsz&x?P#wth{{clnuI9 zvH_nDKF9xy!OORcEyAtxh0&84uS5FV3%&_^z8&7K4lg*(_)Z^C9ZSmKuiFN8y6$ip z>*{uGj_xm5d)kH#(t(@klgM~^JRQ>N)@TjSNmKujtc9x`^1eiU7#?fgTRg5u?wXg4 zMdq%o;1#=3KYoD!I#bM}4ZH9x9x{%ZvuBjAxuEnw?fTZ9Kcu==01K~;5Bz7!*Yhq} zRFS80Ew-F&6n4q*#oSXyp2|0G&4(AXL1~V@qos>>Kc3M&@T!l;GPJiNdo7aZ69*30 z-2KE_;~y%PthIKJ_$~Agu4GQ&;lDG!Z(TR{lC+cY{h2+(b|P=1E86BgNN zZ-#5fzqYik)n7|F^&8r4<_v15EB9$c=651%;fM_zU}B{X|7|wiJD?sJac5Oy+j#}c zEKRb}(leFWxI5qYPkd(M4Cy6hbYGpysI1y@jV(i;U)jB{7qkvNOuAxZBHG+N4Gy1h zVc56M5&SfGQsU{VK>Q8A*O|)uOE+Ni!@U~RC0vi7Te1^gqOzdzfPQTJ2#uA?$dlfD z$-P(ZE<^udhBN^2**;lI|m4F2xz$!BcdVSFHX;iXM0W%D-f z)FIG0Qzwfbz{UIl&4RnY!Wlkvf)BlQ*CBo#Wg4x{s=Ue`K{~dyh5deJPmdc%VkvYy z4L>scub-yxl~>9Zh}Q*t6VWQ3|3NwcOlviEQSf|2v(Nv4z1zb6$Qgiw6FhYMVapD( zsj0&5qJ?uDbcTcSccq^g+eTkYVfTDuiK_EP;Yo(}%IW1|i+coT>X|bjEG_7CSsYiC z!Evd@k>$)e;rLVgxu#9f_}uk|MxEurcchAcr^e&Acr9ifFg3rRG{X~benB*PBg4M` z3vfoqGCu{6pe=|0fv>VsGSPZ~)>oC@%Gl8-lCJgDV>@tf6M2`6@m|=H8gI_LxQwxm zffU~dJ5ZSqay`cfvUhGwtF!n@z~L`$16L7&^DNxs?c4aS_P)mcBI(R!)JI=vISHRc z=d;c4nJb9qFRXrY?p0g=v2C08mgJLZ7M%L6^z0Pi+&o%+>>bn<0*Z&enRks z#+amZl5wt)b8eUu4Wyr{PRZ($M!rj@ZGxBm#5T)5j&%8P%E|`*2^peCz%i~+&h3XX za>jnj?z2DEv_<*v-D`X#_{aS<@)`Lr-%)*Ceq&AJBC|f?Wi{`;VdIkzL1!xLZ(YFU z>K`*SjInBugQ;V?a`#b=v8_e;glEd{T0hae!@W=BUH6{}x8WVJaA-k?^FDMkR7{Rx zmuvcb<$W^$ql#_rYI~Y}yUY>*iH`pk;8uCgU8$se)U8SBMg6?9HtlH_ehstlMLs8vyj6kZ*_5#_fm0;fe+U| zBoDzld7J=W1mnuP^f>%bdDdKwyj=ci$csD7Iipwc15}KD@ZOdf4?FDmGnSAkIaYhOL$SGW-&;1?(wZt$Bj^9z*SXeUDI{(j&=KeOGIc zn{qGFrqyQd9ph~4*sxuxYXRT+E+{EoGFH0Ul!#rqQT$3 z=b*Fn8MA*+bmw|bFz5Olg#2WC)6BCEgq}AmDuav6IRG8*MgBF&%E3Rudw0q&BL7{#B51bssXc;!i2M(CZy!9A9MAYTqd1uKx5mVR zi+|ZJ*z2$fL8mP9A?i`gL9`wVosUCPh5D7B#4d((-3A^9_aOPwP2oR7zB#%nyXj?q zRXUCI?ax1VzKHPe4gTLyemHRAeJi|Yj2J0gnmbgDNH|j9SOgB{ z_$}>JhO+xT2+b+4xq`jH^U5UUQ_OYI=T+c{@?ti;P<|YqbtHcv^=Vvh&vSpCqp*ibKHJ@2s5ZoC z!_l-x^=-*#llNjVwA*3dmc7Q8 zsCkUKAN{Zmzhc{)rrPmx&WnMGaeKtG(Y>}lJyZKD7V7R`^=WvyO`pD-^%eBrjEzTG zy_Jr&_3N2M_{uzr@1lmbeiH0PpEmtwW-&>6RF2XU&Bf{G>s#PEaauE`y-m5XJh%15 znYy>s;Czj|w!S#CDEi*&LtosFzBrV=X!Luo?y_C~E;zw$aNekS2JPH~_b3gz3sW@g z4E(FWW0TCi$?8+^!Hn_Jc(81%esCgWf{9oUsBsodZ2A=r*E3Eov{jY z8IJ9e8}qG}Y3P#1@-+YDGakx+;9I8fADwFvoNW5B^v!U5MqXCOsK<=WlJ!VrE!dCl z<_;h9O3wk$u`x-bXGfq1ZY?@IQvu%;-?DI67z^-H-ncQGxf`4M9BGrsU1@EgWa($Z zIXu2VvFoeQ(IoqfFP_7$GmiJ@mV<{gG!~;8Gm5+LUv@yY$i)X^j0ra*JM6L9$NPYu zQ`pN0ZeRU1Nm`@v_f7dbfpa)~a9kbYFfdwsQ>?*EybO7ik$-tgnfi<1b#=*(icgy&G}Vz-P1-PY#KCEdlwO?% zKG!}sch`8YI^7v#15_?0p3e-ran>^TmyV5{y*l_qY&I@0eD@lI=zsfxy zt^Q=w6&G-2|7+s_s%ND=C*yO$W$ym9?>)2;uqc1L?`7$+dnP- z$>{hGKeV=7-`E!O(0rgq^MN$Ji{=9dM6Rs8V#A#e8qNj8ALUmj`A@mWkt=f!;6gde_6P>~n^;OUd9vrO+&l0;^!GjX zJca)LBM;E-G<>2RvJDRQK=e&}DvWQawpo9jd>FB=V!{!Lfm~U?v9pj$uUZg8w-%z`h&uLu5%%`jt6eeCpkZW z9%`-?E$_wy-KqX}>XfV{H~Ckhxp=?m2>q*jA0!{SdK!Pr*z?t!yibmJe>+J_jEO=9zmBO{}Nj}Y1zvnFdIF_;)v<7m_vRuWW1V>DKGQ4ATf z(M`Uao&D-M-}mQM71PY>ZeG9d_xDG=s_H(Td+s^so_p@O_evfts2*njEB=YXVM%GA zW$OAQ|DsRXgP*UHS>iXf3B9uCp{BII-RkxWs>e}|`1r`Wd{g;e_sbA=#YrZPQ9YC^ zeo;Hc;~T>E?-!=NRGx$D%2V0ps5~bsZ-~n4gopKCK~CwvNcpAy@*Aj6`u5VgdL%0( zXK$%%o8-CtVP2vPjn7WQ0?9-0<@wSBV|)xE{(=GR`j#eDLHcKrWpBOedmuVx)A z)`iA0W|+MC4f`0nk6jO|=zqS()yWE%efQ%7XhAgPkMAG5bq>{9?%Gg3=Hcza%lYI< zxzJyZbzbNu9_O7>NH;3ub?H;25lxAf6xJnrVq8M6Y?D6={rLL9ly@9!5(Cpd5~ani zyfw~$gw8wqNa#afc9wWeomqUJv%$6YzF*^C8tHawpUR)cJz%n*D!**Df`NMc8NeUI ztn~;}HvY+%JMwp)UcT{k|DYoCUh=4)n`ndN%D2FY`orlA+fyI+17OAW;EyP1k5XvU zl)XbZ5-Gy!ZB0p zUoqTj=g7{EU@K|lH;Ao=J4!o(KMAkE759tUah}2oqK%5bTi-?E%l+`v_x>+feElnN z=_3;UZ6_N4ug6S7-<{zv1TS$rRrcH?lYWu=`yM~?RR8nD7wY0wc52)K{(~np`3uM8 z{U?c^Sr`9%;`biW7Pb@9I@{?4PC z{p%(T@z4L*S^hVO&(+00NBsC>(tb1NPW=q{en9-Vy7-?HKXuFyf5gN_|K-EZ@?Rm| zt&9H&@q0#P{HM4N_5LxZ`+p^VL|yz3iEleR;Xjfa?C%*p-OnC7!_U^mf7iwj_K%&A z@!Lnu^j+ec>f*me{5eOZ{L+*m{=Xe|sy~K!Qx|_b@!uMo@#joF(0^(S_puS*`!V{f zlm4RrBqKF%i2I|R^r8>Zg7z}}3u)uHWi}2UPT~sm!|uAcuPbh%W8=O>+*@^V8WYMV za6T#d-XiX`y0|Nedpvi5jT;VbUaX6|l(@g28kRYOxUF?@MdGfS(C8mHy5e6>+%M|l z&L!?ApFYqZMBEDE9C#i#di=tkRGqCi(f(fKzhESF8&+D59Ei%b@5*&ejq*>&;f6F*QM%&3c>Py9f6GNms54B`jMqlv_S7(YxSecXTJ>&p2Q;q`q! zrY>$GarLw^vM%mu;_7K^cwOA##MRU0fpu|*5?4>tjdgL7C-uBw>f&06tLLHr7}w9w z8RF`B@6UB{N#g2xc5hwWdz4?#>$~dW{!MYYei`v*UEH5+T)%ADNgTMfzaOG4@(05HZr3%# zu^pQ^aq8PMl&28tY^v{A{a3>AEUxFr(ubpR<1iEQwuN7I4aw2T%3HSMz3))Qq5Ml9 zklu3~-$~A?a7?*Y`BK??jyhG=LCO=!T#d&c(?5AM-m2YdLqJ&l{7UmPm94rQ6V45u z!&hk2c1ZG^wu&TD_R`bIO?=#vHwN&=Yh8rwp;xd z$Nf=pB5X^PX9Ra< zsvfP!%t)QgIgPXZao|&U$A_ghqP#H>J!99>dS}&Cz^Os6~!j zf28YpNMpHp=q~C~or2?bz8eeRq=cP3hD|)ix8R8R?k`7#`#R!25bo5kDbi?6h|>!% z>YIOfo3Q-;eHpj?tE7*=uj4!JkCFGh+ZUgs@=?OQ_qZEYG>m=iP3%|u%>fJS8P4={ zJJfNk-S=6r_U*q6!#4vzFejV$SSzkN?5{M&^IdmNEq76+TDl6H70ubXt1EL-xdtz7 z8oea@;s#rLGxHbLxZkg9f138BXwQ#ne`Ddg|uIn%|kaH=kFH$W9QI+bG+kHW_UHNdb8ntLhFE!pj)R^N5nhv`5;f$&4EpJ zd%(PrexKvhxXgBZ@TFhttW3fC{U75y4_?wH@5zUY?-u+%GER1<@}ZN@@tLP)ly(zo z)8I<{;+(X+C6V39G;_`{Y(zHxj{5jhmbWCMcx*j3er^WiM=ky!c^TzBlG6)@zi1t*7&M_UuWTu{U!jAMbj?GS(&wRnALkYmV~bYh6pa+7mR_=S-DM(p};?xsz%McP?j{ z%=OuC(B>4NQ}fwc+FefF^Q+kBGVEoc4fw9)UZ(uBec;c)@5q>Gc0YyS8csX*-qhP? z;Ww2e_;zwb`gY4VTzT#JN5aR4#5tT{700QJB)A@!e?nBAOL=jbv28+k!;Q^#SlOsE zWD{lZUJ*}dEq;BVJuQ!OHmmygS6|Xz$kS_fPKq4+?yu=zGgGz=>IFu4YhCN6_3W$f zG}CUmBhh?_KN)!33VY?UjBimnzq=aQMEd9O;VzA(Z0c(^_XL-<>YNPjJu9&vh4^uK zdv>rhZ$@ZmkbNMLgI2LWBZKMdbNsQ$-&B6Br}Hlo`3hZbeXDsd^b>`OGqT(fR7}Sn z&AT)7C;M#JTZ3&tvS{L_z-3%`Z`h_FnG?+{MEZ}pgklPs~ZMkRrd*Qvw@ZcYK|Bf+1d4bLUjq;K%5v60_fKB%n z@8_d*!pY-N9al$nP;c_5sv|0=k$S&M*u#W1QJxc(w=F8~pLzddU3tKpzJm1ZS!+v^ z-pU2w1IA%pq+L(i0_!Nqb-RY{R?yj@EqvG7PN40gq1nzRY+T&C<%(b68|~vlZo(h% z##~=#uJG?@FK>!;^>GSM5T-F19973H@2!Q=a{)>*u5~w(HvApEMON2i|ymT8IPn`A$phpbTd5uoC)q5bfDR@BRQl;XE-w+Jw8DUuo!#M8=!|&L}IBObfX~L~M(jMxPmn5CXc;}!4Ss$a@df-)H_!;;SjhMXU zv*aV-vGR+gS<%UO(Umku+}hjM%KOdRd;5Nj+fXND zYkT`{y}7sVE9?8mnOSe0QCk!H0v3QB_`z+h-XBo%KxYBrlg9Xhu7kX~kMjuy6Kevq zHWb43xY<`3&Iu7`q~M>|<-Lq1~z;(Meyy_Ptbwb9*SUh$C5*g1L^=O^-}`Tlsm zwZB++I!SrRFFcBt_V5q;_b6Sxzg-%f#l9P-fU~dV!}9Tc;JAcvpLDCO<0HXP-XrM; zWMwTynIG*A`-k!TG3JCd%BrfajVGDCS>*5Kj1Q(zwy-H*B7&85d;ev-CvSQ168$0g z-}S??|AV*M^O!xJ$1nM4ZZV?Uwca6DhU1Qj^aSb}hTMW)B%3S#smOMdT_3c%z|P*u*eCJ5 zKbXCD3J28P^yLs=_|>!(DOc-mDmxK{BWK#=7jdV%G1!o!Z_H@V$&T?HbY-2pR-~=H z#4U#3-Ww4A2Jx!n<{z28eD`>dC(UWgdi(C@iS_%1@Xx{ENPN>|Zb_r#H)Y*hpn+R0 z4@%ccxl5s|Ia*uDqzG@y*?&LjhB{s!ctA$bX8KBPM`tvK{JckMJuup9a>(z`s9j$| z1{I;xJoFBIG?t_n+`(B>g9GVSHQnh>|7wgl*)hH;uJjcC4sC#E=`S;0ZPJ|d5W-X+ z{oC_NK-&H-aV9`OV$GqD*)R z7*t*v9z>r6ZuOBvy>R$sLs12fnAAM8pvOWg0GzYi^JeP9d ziT3H=p^cKavzCVG2Gdvbh}WD`dKvswRs3|~b>1Tow@spa^d3uRt*eyJ?sdtGjo^9b zj3@T^*Wvrw!I`c9oHQQKGdvr3zRt6lr@#~Inc8do{B&R{QfGyB7r?*vE}M0XVI9Hd zq0;d-bw9WixFSDLwJ8CtM)b{i#@r#(nV<~Xqcf}QIi_wIUsL_B*0?~LLiHBy&uzJz zvgi0W+x#yEN0Z;`AMT5`tkr}oEaASGrtIVw_@4rN;KGmLQvH;jG}$E{ca^2_VXK|k z0xsM;sk4N>yDs?8Awym!{1wu@oKT&)Rg8_xgJkYS>bLt2+ZZc-M?6Wmae>o)C9wOw zJE@Dd3ywzaY&?=Vj^@WsVMU^;Q};ZqkbiVlFda(08XMK8^x4aUI+u9%em%nM{#A2M z%b%f52Vb4h^q+Jnmo^PDuXIl)O^&c@p9*o8&fe%XcL{%M0w)hXILB`hy2ND7d4*T* z?FIIv=DtSwWxuAQwgG!mxX7*v#%OGz9fJLM%6`wK|1`fSRluqGm3C&AT3@(5(9BfA zZP`}lyb`>YSAEcUepB~$vk7%{6Yj~KybHrdsb2huABP4--#SP3n7;5HoCnFLJeIeq zPxX+g2O6(#Ag+NurHaq8p4mygw$4&Hm~88R#nvggcs+6ZV2b!H8P!{i>ScdmkEPFu zx8bFI`o>onf2RB_X^K3Hc=Y|9pTY-^Vin;%iRXHrMs$o6Z$tPl-n)L1G$ZxTbLo